diff --git a/.travis.yml b/.travis.yml index 808a6f383d..21b2404d37 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,7 +2,7 @@ language: go os: linux go: - - 1.9.x + - 1.10.x go_import_path: github.com/GoogleContainerTools/kaniko script: diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000..6b9a15a625 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,35 @@ +# Copyright 2018 Google, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Builds the static Go image to execute in a Kubernetes job + +FROM golang:1.10 +WORKDIR /go/src/github.com/GoogleContainerTools/kaniko +COPY . . +RUN make + +# need to have `cat` +FROM alpine + +# need to have `nohup` +RUN apk add --no-cache coreutils + +COPY --from=0 /go/src/github.com/GoogleContainerTools/kaniko/out/executor /kaniko/executor +COPY files/ca-certificates.crt /kaniko/ssl/certs/ +COPY files/config.json /root/.docker/ +ENV HOME /root +ENV USER /root +ENV PATH /usr/local/bin +ENV SSL_CERT_DIR=/kaniko/ssl/certs +ENTRYPOINT ["/kaniko/executor"] diff --git a/Gopkg.lock b/Gopkg.lock index f10ecacc3d..d8c5ddd483 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -12,8 +12,17 @@ "internal/version", "storage" ] - revision = "29f476ffa9c4cd4fd14336b6043090ac1ad76733" - version = "v0.21.0" + revision = "056a55f54a6cc77b440b31a56a5e7c3982d32811" + version = "v0.22.0" + +[[projects]] + branch = "master" + name = "github.com/Azure/go-ansiterm" + packages = [ + ".", + "winterm" + ] + revision = "d6e3b3328b783f23731bc4d058875b0371ff8109" [[projects]] name = "github.com/BurntSushi/toml" @@ -22,36 +31,34 @@ version = "v0.3.0" [[projects]] - name = "github.com/GoogleCloudPlatform/container-diff" - packages = ["cmd/util/output"] - revision = "9eb30146e04ed241c8d5be4e0b9383efaa60a395" - version = "v0.9.0" + name = "github.com/Microsoft/go-winio" + packages = ["."] + revision = "7da180ee92d8bd8bb8c37fc560e673e6557c392f" + version = "v0.4.7" + +[[projects]] + name = "github.com/Microsoft/hcsshim" + packages = ["."] + revision = "800683ae704ac360b2f3f47fa88f3a6c8c9091b5" + version = "v0.6.11" [[projects]] branch = "master" - name = "github.com/GoogleContainerTools/container-diff" - packages = [ - "pkg/image", - "pkg/util" - ] - revision = "859166bbd7810e3c3fc072f1c33ad57b9f4acbd0" - source = "github.com/GoogleContainerTools/container-diff" + name = "github.com/Nvveen/Gotty" + packages = ["."] + revision = "cd527374f1e5bff4938207604a14f2e38a9cf512" [[projects]] - name = "github.com/Microsoft/go-winio" - packages = [ - ".", - "archive/tar", - "backuptar" - ] - revision = "7da180ee92d8bd8bb8c37fc560e673e6557c392f" - version = "v0.4.7" + branch = "master" + name = "github.com/armon/go-metrics" + packages = ["."] + revision = "783273d703149aaeb9897cf58613d5af48861c25" [[projects]] - name = "github.com/Microsoft/hcsshim" + branch = "master" + name = "github.com/armon/go-radix" packages = ["."] - revision = "45ef15484298b76abeb9513ea0ea0abd2b5b84b3" - version = "v0.6.8" + revision = "1fca145dffbcaa8fe914309b1ec0cfc67500fe61" [[projects]] branch = "master" @@ -59,101 +66,71 @@ packages = ["quantile"] revision = "3a771d992973f24aa725d07868b467d1ddfceafb" +[[projects]] + name = "github.com/boltdb/bolt" + packages = ["."] + revision = "2f1ce7a837dcb8da3ec595b1dac9d0632f0f99e8" + version = "v1.3.1" + +[[projects]] + name = "github.com/containerd/containerd" + packages = [ + "cio", + "defaults", + "log" + ] + revision = "209a7fc3e4a32ef71a8c7b50c68fc8398415badf" + version = "v1.1.0" + [[projects]] branch = "master" name = "github.com/containerd/continuity" - packages = ["pathdriver"] - revision = "3e8f2ea4b190484acb976a5b378d373429639a1a" + packages = [ + "devices", + "driver", + "fs", + "pathdriver", + "sysx" + ] + revision = "c6cef34830231743494fe2969284df7b82cc0ad0" [[projects]] - name = "github.com/containers/image" + branch = "master" + name = "github.com/containerd/fifo" + packages = ["."] + revision = "3d5202aec260678c48179c56f40e6f38a095738c" + +[[projects]] + name = "github.com/coreos/etcd" packages = [ - "copy", - "directory", - "directory/explicitfilepath", - "docker", - "docker/archive", - "docker/daemon", - "docker/policyconfiguration", - "docker/reference", - "docker/tarfile", - "image", - "internal/tmpdir", - "manifest", - "oci/archive", - "oci/internal", - "oci/layout", - "openshift", - "ostree", - "pkg/compression", - "pkg/docker/config", - "pkg/strslice", - "pkg/tlsclientconfig", - "signature", - "storage", - "tarball", - "transports", - "transports/alltransports", - "types", + "client", + "pkg/pathutil", + "pkg/srv", + "pkg/types", + "raft/raftpb", "version" ] - revision = "495da41bd26c50be62fa07ae903ea2ee54c00283" + revision = "70c8726202dd91e482fb4029fd14af1d4ed1d5af" + version = "v3.3.5" [[projects]] - name = "github.com/containers/storage" - packages = [ - ".", - "drivers", - "drivers/aufs", - "drivers/btrfs", - "drivers/devmapper", - "drivers/overlay", - "drivers/overlayutils", - "drivers/quota", - "drivers/register", - "drivers/vfs", - "drivers/windows", - "drivers/zfs", - "pkg/archive", - "pkg/chrootarchive", - "pkg/devicemapper", - "pkg/directory", - "pkg/dmesg", - "pkg/fileutils", - "pkg/fsutils", - "pkg/homedir", - "pkg/idtools", - "pkg/ioutils", - "pkg/locker", - "pkg/longpath", - "pkg/loopback", - "pkg/mount", - "pkg/parsers", - "pkg/parsers/kernel", - "pkg/pools", - "pkg/promise", - "pkg/reexec", - "pkg/stringid", - "pkg/system", - "pkg/truncindex" - ] - revision = "1e5ce40cdb84ab66e26186435b1273e04b879fef" - source = "github.com/containers/storage" + name = "github.com/coreos/go-semver" + packages = ["semver"] + revision = "8ab6407b697782a06568d4b7f1db25550ec2e4c6" + version = "v0.2.0" + +[[projects]] + name = "github.com/deckarep/golang-set" + packages = ["."] + revision = "1d4478f51bed434f1dadf96dcd9b43aabac66795" + version = "v1.7" [[projects]] name = "github.com/docker/distribution" packages = [ ".", "digestset", - "metrics", - "reference", - "registry/api/errcode", - "registry/api/v2", - "registry/client", - "registry/client/auth/challenge", - "registry/client/transport", - "registry/storage/cache", - "registry/storage/cache/memory" + "reference" ] revision = "83389a148052d74ac602f5f1d62f86ff2f3c4aa5" source = "github.com/docker/distribution" @@ -163,47 +140,92 @@ packages = [ "api", "api/types", + "api/types/backend", "api/types/blkiodev", "api/types/container", - "api/types/events", "api/types/filters", - "api/types/image", "api/types/mount", "api/types/network", + "api/types/plugins/logdriver", "api/types/registry", "api/types/strslice", "api/types/swarm", "api/types/swarm/runtime", - "api/types/time", "api/types/versions", - "api/types/volume", + "builder", + "builder/dockerfile", "builder/dockerfile/command", "builder/dockerfile/instructions", "builder/dockerfile/parser", "builder/dockerfile/shell", - "client", + "builder/dockerignore", + "builder/fscache", + "builder/remotecontext", + "builder/remotecontext/git", + "container", + "container/stream", + "daemon/cluster/provider", + "daemon/exec", + "daemon/graphdriver", + "daemon/logger", + "daemon/logger/jsonfilelog", + "daemon/logger/jsonfilelog/jsonlog", + "daemon/logger/loggerutils", + "daemon/logger/loggerutils/multireader", + "daemon/logger/templates", + "daemon/network", + "dockerversion", + "errdefs", + "image", + "layer", + "oci", + "opts", "pkg/archive", + "pkg/broadcaster", + "pkg/chrootarchive", + "pkg/containerfs", + "pkg/directory", + "pkg/discovery", + "pkg/discovery/kv", + "pkg/filenotify", "pkg/fileutils", - "pkg/homedir", "pkg/idtools", "pkg/ioutils", + "pkg/jsonmessage", + "pkg/locker", "pkg/longpath", "pkg/mount", + "pkg/parsers", + "pkg/parsers/kernel", + "pkg/plugingetter", + "pkg/plugins", + "pkg/plugins/transport", "pkg/pools", - "pkg/system" + "pkg/progress", + "pkg/pubsub", + "pkg/reexec", + "pkg/signal", + "pkg/streamformatter", + "pkg/stringid", + "pkg/symlink", + "pkg/sysinfo", + "pkg/system", + "pkg/tailfile", + "pkg/tarsum", + "pkg/term", + "pkg/term/windows", + "pkg/urlutil", + "pkg/useragent", + "plugin/v2", + "restartmanager", + "runconfig", + "runconfig/opts", + "volume", + "volume/mounts" ] - revision = "b1a1234c60cf87048814aa37da523b03a7b0d344" + revision = "dfde597fbbb5de4a7559a68980401c8c405aa9af" source = "github.com/docker/docker" -[[projects]] - name = "github.com/docker/docker-credential-helpers" - packages = [ - "client", - "credentials" - ] - revision = "d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1" - version = "v0.6.0" - [[projects]] name = "github.com/docker/go-connections" packages = [ @@ -214,6 +236,12 @@ revision = "3ede32e2033de7505e6500d6c868c2b9ed9f169d" version = "v0.3.0" +[[projects]] + branch = "master" + name = "github.com/docker/go-events" + packages = ["."] + revision = "9461782956ad83b30282bf90e31fa6a70c255ba9" + [[projects]] branch = "master" name = "github.com/docker/go-metrics" @@ -226,11 +254,90 @@ revision = "47565b4f722fb6ceae66b95f853feed578a4a51c" version = "v0.3.3" +[[projects]] + name = "github.com/docker/libkv" + packages = [ + ".", + "store", + "store/boltdb", + "store/consul", + "store/etcd", + "store/zookeeper" + ] + revision = "aabc039ad04deb721e234f99cd1b4aa28ac71a40" + version = "v0.2.1" + +[[projects]] + branch = "master" + name = "github.com/docker/libnetwork" + packages = [ + ".", + "bitseq", + "cluster", + "common", + "config", + "datastore", + "diagnostic", + "discoverapi", + "driverapi", + "drivers/bridge", + "drivers/host", + "drivers/ipvlan", + "drivers/macvlan", + "drivers/null", + "drivers/overlay", + "drivers/remote", + "drivers/remote/api", + "drivers/windows", + "drivers/windows/overlay", + "drvregistry", + "etchosts", + "hostdiscovery", + "idm", + "ipam", + "ipamapi", + "ipams/builtin", + "ipams/null", + "ipams/remote", + "ipams/remote/api", + "ipams/windowsipam", + "ipamutils", + "iptables", + "ipvs", + "netlabel", + "netutils", + "networkdb", + "ns", + "options", + "osl", + "portallocator", + "portmapper", + "resolvconf", + "resolvconf/dns", + "types" + ] + revision = "d5818e7204d3886a246f3ea4ce39fb133067b194" + [[projects]] branch = "master" - name = "github.com/docker/libtrust" + name = "github.com/docker/swarmkit" + packages = [ + "agent/exec", + "api", + "api/deepcopy", + "api/equality", + "log", + "manager/raftselector", + "protobuf/plugin", + "protobuf/ptypes" + ] + revision = "8aa9c33bcdff9ea38fc79e0b1d054199917513f3" + +[[projects]] + name = "github.com/fsnotify/fsnotify" packages = ["."] - revision = "aabc10ec26b754e797f9028f4589c5b7bd90dc20" + revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" + version = "v1.4.7" [[projects]] name = "github.com/genuinetools/amicontained" @@ -239,14 +346,20 @@ version = "v0.4.0" [[projects]] - name = "github.com/ghodss/yaml" + name = "github.com/godbus/dbus" packages = ["."] - revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7" - version = "v1.0.0" + revision = "a389bdde4dd695d414e47b755e95e72b7826432c" + version = "v4.1.0" [[projects]] name = "github.com/gogo/protobuf" - packages = ["proto"] + packages = [ + "gogoproto", + "proto", + "protoc-gen-gogo/descriptor", + "sortkeys", + "types" + ] revision = "1adfc126b41513cc696b209667c8656ea7aac67c" version = "v1.0.0" @@ -260,8 +373,27 @@ "ptypes/duration", "ptypes/timestamp" ] - revision = "925541529c1fa6821df4e44ce2723319eb2be768" - version = "v1.0.0" + revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265" + version = "v1.1.0" + +[[projects]] + branch = "master" + name = "github.com/google/go-containerregistry" + packages = [ + "authn", + "name", + "v1", + "v1/empty", + "v1/mutate", + "v1/partial", + "v1/random", + "v1/remote", + "v1/remote/transport", + "v1/tarball", + "v1/types", + "v1/v1util" + ] + revision = "ee5a6c257df843b47a2666ff0fff3d31d484ebda" [[projects]] name = "github.com/googleapis/gax-go" @@ -270,22 +402,85 @@ version = "v2.0.0" [[projects]] - name = "github.com/gorilla/context" + branch = "master" + name = "github.com/grpc-ecosystem/grpc-opentracing" + packages = ["go/otgrpc"] + revision = "8e809c8a86450a29b90dcc9efbf062d0fe6d9746" + +[[projects]] + name = "github.com/hashicorp/consul" + packages = ["api"] + revision = "fb848fc48818f58690db09d14640513aa6bf3c02" + version = "v1.0.7" + +[[projects]] + branch = "master" + name = "github.com/hashicorp/errwrap" packages = ["."] - revision = "1ea25387ff6f684839d82767c1733ff4d4d15d0a" - version = "v1.1" + revision = "7554cd9344cec97297fa6649b055a8c98c2a1e55" [[projects]] - name = "github.com/gorilla/mux" + branch = "master" + name = "github.com/hashicorp/go-cleanhttp" packages = ["."] - revision = "53c1911da2b537f792e7cafcb446b05ffe33b996" - version = "v1.6.1" + revision = "d5fe4b57a186c716b0e00b8c301cbd9b4182694d" [[projects]] - name = "github.com/imdario/mergo" + branch = "master" + name = "github.com/hashicorp/go-immutable-radix" packages = ["."] - revision = "9d5f1277e9a8ed20c3684bda8fde67c05628518c" - version = "v0.3.4" + revision = "7f3cd4390caab3250a57f30efdb2a65dd7649ecf" + +[[projects]] + branch = "master" + name = "github.com/hashicorp/go-memdb" + packages = ["."] + revision = "1289e7fffe71d8fd4d4d491ba9a412c50f244c44" + +[[projects]] + branch = "master" + name = "github.com/hashicorp/go-msgpack" + packages = ["codec"] + revision = "fa3f63826f7c23912c15263591e65d54d080b458" + +[[projects]] + branch = "master" + name = "github.com/hashicorp/go-multierror" + packages = ["."] + revision = "b7773ae218740a7be65057fc60b366a49b538a44" + +[[projects]] + branch = "master" + name = "github.com/hashicorp/go-rootcerts" + packages = ["."] + revision = "6bb64b370b90e7ef1fa532be9e591a81c3493e00" + +[[projects]] + branch = "master" + name = "github.com/hashicorp/go-sockaddr" + packages = ["."] + revision = "6d291a969b86c4b633730bfc6b8b9d64c3aafed9" + +[[projects]] + branch = "master" + name = "github.com/hashicorp/golang-lru" + packages = ["simplelru"] + revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3" + +[[projects]] + name = "github.com/hashicorp/memberlist" + packages = ["."] + revision = "ce8abaa0c60c2d6bee7219f5ddf500e0a1457b28" + version = "v0.1.0" + +[[projects]] + name = "github.com/hashicorp/serf" + packages = [ + "coordinate", + "serf" + ] + revision = "d6574a5bb1226678d7010325fb6c985db20ee458" + version = "v0.8.1" [[projects]] name = "github.com/inconshreveable/mousetrap" @@ -294,10 +489,10 @@ version = "v1.0" [[projects]] - name = "github.com/mattn/go-runewidth" + name = "github.com/ishidawataru/sctp" packages = ["."] - revision = "9e777a8366cce605130a531d2cd6363d07ad7317" - version = "v0.0.2" + revision = "07191f837fedd2f13d1ec7b5f885f0f3ec54b1cb" + source = "github.com/ishidawataru/sctp" [[projects]] name = "github.com/mattn/go-shellwords" @@ -312,16 +507,26 @@ version = "v1.0.0" [[projects]] - name = "github.com/mistifyio/go-zfs" + name = "github.com/miekg/dns" packages = ["."] - revision = "cdc0f941c4d0e0e94d85348285568d921891e138" - version = "v2.1.1" + revision = "83c435cc65d2862736428b9b4d07d0ab10ad3e4d" + version = "v1.0.5" [[projects]] branch = "master" - name = "github.com/mtrmac/gpgme" + name = "github.com/mitchellh/go-homedir" packages = ["."] - revision = "b2432428689ca58c2b8e8dea9449d3295cf96fc9" + revision = "b8bc1bf767474819792c23f32d8286a45736f1c6" + +[[projects]] + branch = "master" + name = "github.com/moby/buildkit" + packages = [ + "identity", + "session", + "session/filesync" + ] + revision = "b6fee5e09d7aa62d1cd950b6bbee2d09d049e3fd" [[projects]] name = "github.com/opencontainers/go-digest" @@ -341,11 +546,20 @@ [[projects]] name = "github.com/opencontainers/runc" packages = [ + "libcontainer/cgroups", + "libcontainer/configs", + "libcontainer/devices", "libcontainer/system", "libcontainer/user" ] - revision = "baf6536d6259209c3edfa2b22237af82942d3dfa" - version = "v0.1.1" + revision = "4fc53a81fb7c994640722ac585fa9ca548971871" + source = "github.com/opencontainers/runc" + +[[projects]] + name = "github.com/opencontainers/runtime-spec" + packages = ["specs-go"] + revision = "4e3b9264a330d094b0386c3703c5f379119711e8" + version = "v1.0.1" [[projects]] name = "github.com/opencontainers/selinux" @@ -357,13 +571,14 @@ version = "v1.0.0-rc1" [[projects]] - branch = "master" - name = "github.com/ostreedev/ostree-go" + name = "github.com/opentracing/opentracing-go" packages = [ - "pkg/glibobject", - "pkg/otbuiltin" + ".", + "ext", + "log" ] - revision = "cb6250d5a6a240b509609915842f763fd87b819d" + revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38" + version = "v1.0.2" [[projects]] name = "github.com/pkg/errors" @@ -371,15 +586,6 @@ revision = "645ef00459ed84a119197bfb8d8205042c6df63d" version = "v0.8.0" -[[projects]] - branch = "master" - name = "github.com/pquerna/ffjson" - packages = [ - "fflib/v1", - "fflib/v1/internal" - ] - revision = "d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac" - [[projects]] name = "github.com/prometheus/client_golang" packages = [ @@ -403,7 +609,7 @@ "internal/bitbucket.org/ww/goautoneg", "model" ] - revision = "38c53a9f4bfcd932d1b00bfc65e256a7fba6b37a" + revision = "d811d2e9bf898806ecfb6ef6296774b13ffc314c" [[projects]] branch = "master" @@ -416,6 +622,18 @@ ] revision = "8b1c2da0d56deffdbb9e48d4414b4e674bd8083e" +[[projects]] + branch = "master" + name = "github.com/samuel/go-zookeeper" + packages = ["zk"] + revision = "c4fab1ac1bec58281ad0667dc3f0907a9476ac47" + +[[projects]] + branch = "master" + name = "github.com/sean-/seed" + packages = ["."] + revision = "e2103e2c35297fb7e17febb81e49b312087a2372" + [[projects]] name = "github.com/sirupsen/logrus" packages = ["."] @@ -441,10 +659,16 @@ revision = "33e07d32887e1e06b7c025f27ce52f62c7990bc0" [[projects]] - name = "github.com/tchap/go-patricia" - packages = ["patricia"] - revision = "666120de432aea38ab06bd5c818f04f4129882c9" - version = "v2.2.6" + branch = "master" + name = "github.com/tonistiigi/fsutil" + packages = ["."] + revision = "93a0fd10b669d389e349ff54c48f13829708c9b0" + +[[projects]] + name = "github.com/ugorji/go" + packages = ["codec"] + revision = "b4c50a2b199d93b13dc15e78929cfb23bfdf21ab" + version = "v1.1.1" [[projects]] name = "github.com/vbatts/tar-split" @@ -456,6 +680,21 @@ revision = "38ec4ddb06dedbea0a895c4848b248eb38af221b" version = "v0.10.2" +[[projects]] + name = "github.com/vishvananda/netlink" + packages = [ + ".", + "nl" + ] + revision = "b2de5d10e38ecce8607e6b438b6d174f389a004e" + source = "github.com/vishvananda/netlink" + +[[projects]] + branch = "master" + name = "github.com/vishvananda/netns" + packages = ["."] + revision = "be1fbeda19366dea804f00efff2dd73a1642fdcc" + [[projects]] name = "go.opencensus.io" packages = [ @@ -472,40 +711,40 @@ "trace/internal", "trace/propagation" ] - revision = "0095aec66ae14801c6711210f6f0716411cefdd3" - version = "v0.8.0" + revision = "10cec2c05ea2cfb8b0d856711daedc49d8a45c56" + version = "v0.9.0" [[projects]] branch = "master" name = "golang.org/x/crypto" packages = [ - "cast5", - "openpgp", - "openpgp/armor", - "openpgp/elgamal", - "openpgp/errors", - "openpgp/packet", - "openpgp/s2k", + "ed25519", + "ed25519/internal/edwards25519", "ssh/terminal" ] - revision = "d6449816ce06963d9d136eee5a56fca5b0616e7e" + revision = "21052ae46654ecf18dfdba0f7c12701a1e2b3164" [[projects]] branch = "master" name = "golang.org/x/net" packages = [ + "bpf", "context", "context/ctxhttp", + "http/httpguts", "http2", "http2/hpack", "idna", + "internal/iana", + "internal/socket", "internal/socks", "internal/timeseries", - "lex/httplex", + "ipv4", + "ipv6", "proxy", "trace" ] - revision = "61147c48b25b599e5b561d2e9c4f3e1ef489ca41" + revision = "f73e4c9ed3b7ebdd5f699a16a880c2b1994e50dd" [[projects]] branch = "master" @@ -517,16 +756,27 @@ "jws", "jwt" ] - revision = "921ae394b9430ed4fb549668d7b087601bd60a81" + revision = "cdc340f7c179dbbfa4afd43b7614e8fcadde4269" + +[[projects]] + branch = "master" + name = "golang.org/x/sync" + packages = [ + "errgroup", + "singleflight", + "syncmap" + ] + revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca" [[projects]] branch = "master" name = "golang.org/x/sys" packages = [ "unix", - "windows" + "windows", + "windows/registry" ] - revision = "3b87a42e500a6dc65dae1a55d0b641295971163e" + revision = "7db1c3b1a98089d0071c84f646ff5c96aad43682" [[projects]] name = "golang.org/x/text" @@ -549,6 +799,12 @@ revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" version = "v0.3.0" +[[projects]] + branch = "master" + name = "golang.org/x/time" + packages = ["rate"] + revision = "fbb02b2291d28baffd63558aa44b4b56f178d650" + [[projects]] branch = "master" name = "google.golang.org/api" @@ -563,7 +819,7 @@ "storage/v1", "transport/http" ] - revision = "dde16f2e838706995ab9946df55f5dc7c5564cc6" + revision = "4bd7f4beb291148443ed4553071c4e0697ff4afb" [[projects]] name = "google.golang.org/appengine" @@ -591,7 +847,7 @@ "googleapis/rpc/code", "googleapis/rpc/status" ] - revision = "51d0944304c3cbce4afe9e5247e21100037bff78" + revision = "86e600f69ee4704c6efbf6a2a40a5c10700e76c2" [[projects]] name = "google.golang.org/grpc" @@ -607,6 +863,8 @@ "encoding/proto", "grpclb/grpc_lb_v1/messages", "grpclog", + "health", + "health/grpc_health_v1", "internal", "keepalive", "metadata", @@ -620,14 +878,8 @@ "tap", "transport" ] - revision = "d11072e7ca9811b1100b80ca0269ac831f06d024" - version = "v1.11.3" - -[[projects]] - name = "gopkg.in/cheggaaa/pb.v1" - packages = ["."] - revision = "72b964305fba1230d3d818711138195f22b9ceea" - version = "v1.0.22" + revision = "8124abf74e7633d82a5b96585b0da487d0e6eed0" + source = "google.golang.org/grpc" [[projects]] name = "gopkg.in/yaml.v2" @@ -635,15 +887,9 @@ revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" version = "v2.2.1" -[[projects]] - name = "k8s.io/client-go" - packages = ["util/homedir"] - revision = "23781f4d6632d88e869066eaebb743857aa1ef9b" - version = "v7.0.0" - [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "6d64e71de2909767e347f36c90936a5f77da94f74fe2dbf84d6d44fdbcdf4fba" + inputs-digest = "7dc0b12e9e50b1e09cfb6bf10cbeca2299a3c12ef5dd1074666215f683969b10" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index 47af20cd96..eac774ddc3 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -18,16 +18,31 @@ revision = "83389a148052d74ac602f5f1d62f86ff2f3c4aa5" source = "github.com/docker/distribution" -[[constraint]] - name = "github.com/GoogleContainerTools/container-diff" - branch = "master" - source = "github.com/GoogleContainerTools/container-diff" - [[constraint]] name = "github.com/docker/docker" - revision = "b1a1234c60cf87048814aa37da523b03a7b0d344" + revision = "dfde597fbbb5de4a7559a68980401c8c405aa9af" source = "github.com/docker/docker" +[[override]] + name = "github.com/opencontainers/runc" + revision = "4fc53a81fb7c994640722ac585fa9ca548971871" + source = "github.com/opencontainers/runc" + +[[override]] + name = "github.com/ishidawataru/sctp" + revision = "07191f837fedd2f13d1ec7b5f885f0f3ec54b1cb" + source = "github.com/ishidawataru/sctp" + +[[override]] + name = "github.com/vishvananda/netlink" + revision = "b2de5d10e38ecce8607e6b438b6d174f389a004e" + source = "github.com/vishvananda/netlink" + +[[override]] + name = "google.golang.org/grpc" + revision = "8124abf74e7633d82a5b96585b0da487d0e6eed0" + source = "google.golang.org/grpc" + [[constraint]] name = "github.com/genuinetools/amicontained" version = "0.4.0" diff --git a/Makefile b/Makefile index a37e20622f..a00b2d386a 100644 --- a/Makefile +++ b/Makefile @@ -34,22 +34,20 @@ GO_LDFLAGS += -X $(VERSION_PACKAGE).version=$(VERSION) GO_LDFLAGS += -w -s # Drop debugging symbols. GO_LDFLAGS += ' -GO_BUILD_TAGS := "containers_image_ostree_stub containers_image_openpgp exclude_graphdriver_devicemapper exclude_graphdriver_btrfs exclude_graphdriver_overlay" - EXECUTOR_PACKAGE = $(REPOPATH)/cmd/executor KANIKO_PROJECT = $(REPOPATH)/kaniko out/executor: $(GO_FILES) - GOARCH=$(GOARCH) GOOS=linux CGO_ENABLED=0 go build -ldflags $(GO_LDFLAGS) -tags $(GO_BUILD_TAGS) -o $@ $(EXECUTOR_PACKAGE) + GOARCH=$(GOARCH) GOOS=linux CGO_ENABLED=0 go build -ldflags $(GO_LDFLAGS) -o $@ $(EXECUTOR_PACKAGE) .PHONY: test test: out/executor @ ./test.sh .PHONY: integration-test -integration-test: out/executor +integration-test: @ ./integration-test.sh .PHONY: images -images: out/executor +images: docker build -t $(REGISTRY)/executor:latest -f deploy/Dockerfile . diff --git a/README.md b/README.md index 8084161d9b..a63d1a6453 100644 --- a/README.md +++ b/README.md @@ -3,21 +3,23 @@ kaniko is a tool to build container images from a Dockerfile, inside a container or Kubernetes cluster. kaniko doesn't depend on a Docker daemon and executes each command within a Dockerfile completely in userspace. -This enables building container images in environments that can't easily or securely run a Docker daemon, such as a standard Kubernetes cluster. +This enables building container images in environments that can't easily or securely run a Docker daemon, such as a standard Kubernetes cluster. We're currently in the process of building kaniko, so as of now it isn't production ready. Please let us know if you have any feature requests or find any bugs! - [Kaniko](#kaniko) - - [How does kaniko work?](#how-does-kaniko-work?) + - [How does kaniko work?](#how-does-kaniko-work) - [Known Issues](#known-issues) +- [Demo](#demo) - [Development](#development) - - [kaniko Build Contexts](#kaniko-build-contexts) + - [kaniko Build Contexts](#kaniko-build-contexts) - [Running kaniko in a Kubernetes cluster](#running-kaniko-in-a-kubernetes-cluster) - [Running kaniko in Google Container Builder](#running-kaniko-in-google-container-builder) - [Running kaniko locally](#running-kaniko-locally) - [Pushing to Different Registries](#pushing-to-different-registries) + - [Debug Image](#debug-image) - [Security](#security) - [Comparison with Other Tools](#comparison-with-other-tools) - [Community](#community) @@ -30,21 +32,17 @@ We then execute the commands in the Dockerfile, snapshotting the filesystem in u After each command, we append a layer of changed files to the base image (if there are any) and update image metadata. ### Known Issues +kaniko does not support building Windows containers. -The majority of Dockerfile commands can be executed with kaniko, but we're still working on supporting the following commands: +## Demo -* SHELL -* HEALTHCHECK -* STOPSIGNAL -* ARG - -Multi-Stage Dockerfiles are also unsupported currently, but will be ready soon. +![Demo](/docs/demo.gif) ## Development ### kaniko Build Contexts kaniko supports local directories and GCS buckets as build contexts. To specify a local directory, pass in the `--context` flag as an argument to the executor image. To specify a GCS bucket, pass in the `--bucket` flag. -The GCS bucket should contain a compressed tar of the build context called `context.tar.gz`, which kaniko will unpack and use as the build context. +The GCS bucket should contain a compressed tar of the build context called `context.tar.gz`, which kaniko will unpack and use as the build context. To create `context.tar.gz`, run the following command: ```shell @@ -68,7 +66,7 @@ Requirements: * Standard Kubernetes cluster * Kubernetes Secret -To run kaniko in a Kubernetes cluster, you will need a standard running Kubernetes cluster and a Kubernetes secret, which contains the auth required to push the final image. +To run kaniko in a Kubernetes cluster, you will need a standard running Kubernetes cluster and a Kubernetes secret, which contains the auth required to push the final image. To create the secret, first you will need to create a service account in the Google Cloud Console project you want to push the final image to, with `Storage Admin` permissions. You can download a JSON key for this service account, and rename it `kaniko-secret.json`. @@ -108,7 +106,7 @@ spec: This example pulls the build context from a GCS bucket. To use a local directory build context, you could consider using configMaps to mount in small build contexts. -### Running kaniko in Google Container Builder +### Running kaniko in Google Container Builder To run kaniko in GCB, add it to your build config as a build step: ```yaml @@ -141,22 +139,105 @@ To run kaniko in Docker, run the following command: kaniko uses Docker credential helpers to push images to a registry. -kaniko comes with support for GCR, but configuring another credential helper should allow pushing to a different registry. +kaniko comes with support for GCR, Docker `config.json` and Amazon ECR, but configuring another credential helper should allow pushing to a different registry. + +#### Pushing to Docker Hub + +Get your docker registry user and password encoded in base64 + + echo USER:PASSWORD | base64 + +Create a `config.json` file with your Docker registry url and the previous generated base64 string + +``` +{ + "auths": { + "https://index.docker.io/v1/": { + "auth": "xxxxxxxxxxxxxxx" + } + } +} +``` + +Run kaniko with the `config.json` inside `/root/.docker/config.json` + + docker run -ti --rm -v `pwd`:/workspace -v config.json:/root/.docker/config.json:ro gcr.io/kaniko-project/executor:latest --dockerfile=Dockerfile --destination=yourimagename + +#### Pushing to Amazon ECR +The Amazon ECR [credential helper](https://github.com/awslabs/amazon-ecr-credential-helper) is built in to the kaniko executor image. +To configure credentials, you will need to do the following: +1. Update the `credHelpers` section of [config.json](https://github.com/GoogleContainerTools/kaniko/blob/master/files/config.json) with the specific URI of your ECR registry: +```json +{ + "credHelpers": { + "aws_account_id.dkr.ecr.region.amazonaws.com": "ecr-login" + } +} +``` +You can mount in the new config as a configMap: +```shell +kubectl create configmap docker-config --from-file= +``` +2. Create a Kubernetes secret for your `~/.aws/credentials` file so that credentials can be accessed within the cluster. +To create the secret, run: + +```shell +kubectl create secret generic aws-secret --from-file= +``` + +The Kubernetes Pod spec should look similar to this, with the args parameters filled in: + +```yaml +apiVersion: v1 +kind: Pod +metadata: + name: kaniko +spec: + containers: + - name: kaniko + image: gcr.io/kaniko-project/executor:latest + args: ["--dockerfile=", + "--context=", + "--destination="] + volumeMounts: + - name: aws-secret + mountPath: /root/.aws/ + - name: docker-config + mountPath: /root/.docker/ + restartPolicy: Never + volumes: + - name: aws-secret + secret: + secretName: aws-secret + - name: docker-config + configMap: + name: docker-config +``` +### Debug Image +The kaniko executor image is based off of scratch and doesn't contain a shell. +We provide `gcr.io/kaniko-project/executor:debug`, a debug image which consists of the kaniko executor image along with a busybox shell to enter. +You can launch the debug image with a shell entrypoint: +```shell +docker run -it --entrypoint=/busybox/sh gcr.io/kaniko-project/executor:debug +``` ## Security - + kaniko by itself **does not** make it safe to run untrusted builds inside your cluster, or anywhere else. - + kaniko relies on the security features of your container runtime to provide build security. - + The minimum permissions kaniko needs inside your container are governed by a few things: - + * The permissions required to unpack your base image into it's container * The permissions required to execute the RUN commands inside the container - -If you have a minimal base image (SCRATCH or similar) that doesn't require permissions to unpack, and your Dockerfile doesn't execute any commands as the root user, -you can run Kaniko without root permissions. + +If you have a minimal base image (SCRATCH or similar) that doesn't require +permissions to unpack, and your Dockerfile doesn't execute any commands as the +root user, you can run Kaniko without root permissions. It should be noted that +Docker runs as root by default, so you still require (in a sense) privileges to +use Kaniko. You may be able to achieve the same default seccomp profile that Docker uses in your Pod by setting [seccomp](https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp) profiles with annotations on a [PodSecurityPolicy](https://cloud.google.com/kubernetes-engine/docs/how-to/pod-security-policies) to create or update security policies on your cluster. @@ -165,21 +246,37 @@ You may be able to achieve the same default seccomp profile that Docker uses in Similar tools include: * [img](https://github.com/genuinetools/img) * [orca-build](https://github.com/cyphar/orca-build) +* [umoci](https://github.com/openSUSE/umoci) * [buildah](https://github.com/projectatomic/buildah) -* [FTL](https://github.com/GoogleContainerTools/runtimes-common/tree/master/ftl) +* [FTL](https://github.com/GoogleCloudPlatform/runtimes-common/tree/master/ftl) * [Bazel rules_docker](https://github.com/bazelbuild/rules_docker) All of these tools build container images with different approaches. -`img` can perform as a non root user from within a container, but requires that the `img` container has `RawProc` access to create nested containers. -`kaniko` does not actually create nested containers, so it does not require `RawProc` access. - -`orca-build` depends on `runC` to build images from Dockerfiles, which can not run inside a container. `kaniko` doesn't use runC so it doesn't require the use of kernel namespacing techniques. - -`buildah` requires the same privileges as a Docker daemon does to run, while `kaniko` runs without any special privileges or permissions. - -`FTL` and `Bazel` aim to achieve the fastest possible creation of Docker images for a subset of images. -These can be thought of as a special-case "fast path" that can be used in conjunction with the support for general Dockerfiles kaniko provides. +`img` can perform as a non root user from within a container, but requires that +the `img` container has `RawProc` access to create nested containers. `kaniko` +does not actually create nested containers, so it does not require `RawProc` +access. + +`orca-build` depends on `runc` to build images from Dockerfiles, which can not +run inside a container (for similar reasons to `img` above). `kaniko` doesn't +use `runc` so it doesn't require the use of kernel namespacing techniques. +However, `orca-build` does not require Docker or any privileged daemon (so +builds can be done entirely without privilege). + +`umoci` works without any privileges, and also has no restrictions on the root +filesystem being extracted (though it requires additional handling if your +filesystem is sufficiently complicated). However it has no `Dockerfile`-like +build tooling (it's a slightly lower-level tool that can be used to build such +builders -- such as `orca-build`). + +`buildah` requires the same privileges as a Docker daemon does to run, while +`kaniko` runs without any special privileges or permissions. + +`FTL` and `Bazel` aim to achieve the fastest possible creation of Docker images +for a subset of images. These can be thought of as a special-case "fast path" +that can be used in conjunction with the support for general Dockerfiles kaniko +provides. ## Community diff --git a/cmd/executor/cmd/args.go b/cmd/executor/cmd/args.go new file mode 100644 index 0000000000..fdf6eb0c93 --- /dev/null +++ b/cmd/executor/cmd/args.go @@ -0,0 +1,43 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cmd + +import ( + "github.com/sirupsen/logrus" + "strings" +) + +// The buildArg type is used to pass in multiple --build-arg flags +type buildArg []string + +// Now, for our new type, implement the two methods of +// the flag.Value interface... +// The first method is String() string +func (b *buildArg) String() string { + return strings.Join(*b, ",") +} + +// The second method is Set(value string) error +func (b *buildArg) Set(value string) error { + logrus.Infof("appending to build args %s", value) + *b = append(*b, value) + return nil +} + +func (b *buildArg) Type() string { + return "build-arg type" +} diff --git a/cmd/executor/cmd/root.go b/cmd/executor/cmd/root.go index 7a648fb1d2..63e42831d7 100644 --- a/cmd/executor/cmd/root.go +++ b/cmd/executor/cmd/root.go @@ -21,24 +21,25 @@ import ( "os" "path/filepath" - "github.com/genuinetools/amicontained/container" - - "github.com/GoogleContainerTools/kaniko/pkg/executor" - "github.com/GoogleContainerTools/kaniko/pkg/constants" + "github.com/GoogleContainerTools/kaniko/pkg/executor" "github.com/GoogleContainerTools/kaniko/pkg/util" + "github.com/genuinetools/amicontained/container" "github.com/sirupsen/logrus" "github.com/spf13/cobra" ) var ( - dockerfilePath string - destination string - srcContext string - snapshotMode string - bucket string - logLevel string - force bool + dockerfilePath string + destination string + srcContext string + snapshotMode string + bucket string + dockerInsecureSkipTLSVerify bool + logLevel string + force bool + buildArgs buildArg + tarPath string ) func init() { @@ -46,9 +47,13 @@ func init() { RootCmd.PersistentFlags().StringVarP(&srcContext, "context", "c", "/workspace/", "Path to the dockerfile build context.") RootCmd.PersistentFlags().StringVarP(&bucket, "bucket", "b", "", "Name of the GCS bucket from which to access build context as tarball.") RootCmd.PersistentFlags().StringVarP(&destination, "destination", "d", "", "Registry the final image should be pushed to (ex: gcr.io/test/example:latest)") + RootCmd.MarkPersistentFlagRequired("destination") RootCmd.PersistentFlags().StringVarP(&snapshotMode, "snapshotMode", "", "full", "Set this flag to change the file attributes inspected during snapshotting") + RootCmd.PersistentFlags().VarP(&buildArgs, "build-arg", "", "This flag allows you to pass in ARG values at build time. Set it repeatedly for multiple values.") + RootCmd.PersistentFlags().BoolVarP(&dockerInsecureSkipTLSVerify, "insecure-skip-tls-verify", "", false, "Push to insecure registry ignoring TLS verify") RootCmd.PersistentFlags().StringVarP(&logLevel, "verbosity", "v", constants.DefaultLogLevel, "Log level (debug, info, warn, error, fatal, panic") RootCmd.PersistentFlags().BoolVarP(&force, "force", "", false, "Force building outside of a container") + RootCmd.PersistentFlags().StringVarP(&tarPath, "tarPath", "", "", "Path to save the image in as a tarball instead of pushing") } var RootCmd = &cobra.Command{ @@ -70,7 +75,16 @@ var RootCmd = &cobra.Command{ } logrus.Warn("kaniko is being run outside of a container. This can have dangerous effects on your system") } - if err := executor.DoBuild(dockerfilePath, srcContext, destination, snapshotMode); err != nil { + if err := os.Chdir("/"); err != nil { + logrus.Error(err) + os.Exit(1) + } + ref, image, err := executor.DoBuild(dockerfilePath, srcContext, snapshotMode, buildArgs) + if err != nil { + logrus.Error(err) + os.Exit(1) + } + if err := executor.DoPush(ref, image, destination, tarPath); err != nil { logrus.Error(err) os.Exit(1) } diff --git a/deploy/Dockerfile b/deploy/Dockerfile index a1b920c5de..ff9eb8af4f 100644 --- a/deploy/Dockerfile +++ b/deploy/Dockerfile @@ -14,13 +14,31 @@ # Builds the static Go image to execute in a Kubernetes job -FROM scratch -ADD out/executor /kaniko/executor -ADD files/ca-certificates.crt /kaniko/ssl/certs/ -ADD files/docker-credential-gcr /usr/local/bin/ -ADD files/config.json /root/.docker/ +FROM golang:1.10 +WORKDIR /go/src/github.com/GoogleContainerTools/kaniko +COPY . . +RUN make +# Get GCR credential helper +ADD https://github.com/GoogleCloudPlatform/docker-credential-gcr/releases/download/v1.4.3-static/docker-credential-gcr_linux_amd64-1.4.3.tar.gz /usr/local/bin/ +RUN tar -C /usr/local/bin/ -xvzf /usr/local/bin/docker-credential-gcr_linux_amd64-1.4.3.tar.gz +# Get Amazon ECR credential helper +RUN go get -u github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cli/docker-credential-ecr-login +RUN make -C /go/src/github.com/awslabs/amazon-ecr-credential-helper linux-amd64 + +# need to have `cat` +FROM alpine + +# need to have `nohup` +RUN apk add --no-cache coreutils + +COPY --from=0 /go/src/github.com/GoogleContainerTools/kaniko/out/executor /kaniko/executor +COPY --from=0 /usr/local/bin/docker-credential-gcr /usr/local/bin/docker-credential-gcr +COPY --from=0 /go/src/github.com/awslabs/amazon-ecr-credential-helper/bin/linux-amd64/docker-credential-ecr-login /usr/local/bin/docker-credential-ecr-login +COPY files/ca-certificates.crt /kaniko/ssl/certs/ +COPY files/config.json /root/.docker/ RUN ["docker-credential-gcr", "config", "--token-source=env"] ENV HOME /root +ENV USER /root ENV PATH /usr/local/bin ENV SSL_CERT_DIR=/kaniko/ssl/certs ENTRYPOINT ["/kaniko/executor"] diff --git a/deploy/Dockerfile_debug b/deploy/Dockerfile_debug new file mode 100644 index 0000000000..4ff4df9a6f --- /dev/null +++ b/deploy/Dockerfile_debug @@ -0,0 +1,48 @@ +# Copyright 2018 Google, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Builds the static Go image to execute in a Kubernetes job + +# Stage 0: Build the executor binary and get credential helpers +FROM golang:1.10 +WORKDIR /go/src/github.com/GoogleContainerTools/kaniko +COPY . . +RUN make +# Get GCR credential helper +ADD https://github.com/GoogleCloudPlatform/docker-credential-gcr/releases/download/v1.4.3-static/docker-credential-gcr_linux_amd64-1.4.3.tar.gz /usr/local/bin/ +RUN tar -C /usr/local/bin/ -xvzf /usr/local/bin/docker-credential-gcr_linux_amd64-1.4.3.tar.gz +# Get Amazon ECR credential helper +RUN go get -u github.com/awslabs/amazon-ecr-credential-helper/ecr-login/cli/docker-credential-ecr-login +RUN make -C /go/src/github.com/awslabs/amazon-ecr-credential-helper linux-amd64 + +# Stage 1: Get the busybox shell +FROM gcr.io/cloud-builders/bazel:latest +RUN git clone https://github.com/GoogleContainerTools/distroless.git +WORKDIR /distroless +RUN bazel build busybox:busybox.tar +RUN tar -C /distroless/bazel-genfiles/busybox/ -xf /distroless/bazel-genfiles/busybox/busybox.tar + +FROM scratch +COPY --from=0 /go/src/github.com/GoogleContainerTools/kaniko/out/executor /kaniko/executor +COPY --from=0 /usr/local/bin/docker-credential-gcr /usr/local/bin/docker-credential-gcr +COPY --from=0 /go/src/github.com/awslabs/amazon-ecr-credential-helper/bin/linux-amd64/docker-credential-ecr-login /usr/local/bin/docker-credential-ecr-login +COPY --from=1 /distroless/bazel-genfiles/busybox/busybox/ /busybox/ +COPY files/ca-certificates.crt /kaniko/ssl/certs/ +COPY files/config.json /root/.docker/ +RUN ["docker-credential-gcr", "config", "--token-source=env"] +ENV HOME /root +ENV USER /root +ENV PATH /usr/local/bin:/busybox +ENV SSL_CERT_DIR=/kaniko/ssl/certs +ENTRYPOINT ["/kaniko/executor"] diff --git a/deploy/executor-release.yaml b/deploy/executor-release.yaml index c733962ba9..5f20877c62 100644 --- a/deploy/executor-release.yaml +++ b/deploy/executor-release.yaml @@ -1,25 +1,11 @@ steps: - # First, install make - - name: "gcr.io/google-appengine/debian9" - args: ["sh", "-c", "apt-get update && apt-get install -y make"] - volumes: - - name: "make" - path: "/usr/bin" - - name: "gcr.io/google-appengine/debian9" - args: ["sh", "-c", "cp -r . /kaniko/ && mkdir -p /workspace/go/src/github.com/GoogleContainerTools/ && cp -r /kaniko/ /workspace/go/src/github.com/GoogleContainerTools/"] - volumes: - - name: "make" - path: "/usr/bin" - # Then, build the binary - - name: "gcr.io/google-appengine/golang" - args: ["sh", "-c", "make"] - volumes: - - name: "make" - path: "/usr/bin" - dir: go/src/github.com/GoogleContainerTools/kaniko - env: ["GOPATH=/workspace/go/"] - # Then, build kaniko with kaniko - - name: "gcr.io/kaniko-project/executor:latest" - args: ["--dockerfile=/workspace/deploy/Dockerfile", - "--context=/workspace/go/src/github.com/GoogleContainerTools/kaniko/", - "--destination=gcr.io/kaniko-project/executor:${COMMIT_SHA}"] + # First, build kaniko + - name: "gcr.io/cloud-builders/docker" + args: ["build", "-f", "deploy/Dockerfile", + "-t", "gcr.io/kaniko-project/executor:${COMMIT_SHA}", "."] + # Then, we want to build kaniko:debug + - name: "gcr.io/cloud-builders/docker" + args: ["build", "-f", "deploy/Dockerfile_debug", + "-t", "gcr.io/kaniko-project/executor:debug-${COMMIT_SHA}", "."] +images: ["gcr.io/kaniko-project/executor:${COMMIT_SHA}", + "gcr.io/kaniko-project/executor:debug-${COMMIT_SHA}"] diff --git a/docs/demo.gif b/docs/demo.gif new file mode 100644 index 0000000000..3adbd1d34d Binary files /dev/null and b/docs/demo.gif differ diff --git a/files/docker-credential-gcr b/files/docker-credential-gcr deleted file mode 100755 index cff5a9be17..0000000000 Binary files a/files/docker-credential-gcr and /dev/null differ diff --git a/integration-test.sh b/integration-test.sh index 2ef8d07a63..02dd4b6889 100755 --- a/integration-test.sh +++ b/integration-test.sh @@ -21,8 +21,14 @@ if [ -f "$KOKORO_GFILE_DIR"/common.sh ]; then mkdir -p /usr/local/go/src/github.com/GoogleContainerTools/ cp -r github/kaniko /usr/local/go/src/github.com/GoogleContainerTools/ pushd /usr/local/go/src/github.com/GoogleContainerTools/kaniko + echo "Installing container-diff..." + mv $KOKORO_GFILE_DIR/container-diff-linux-amd64 $KOKORO_GFILE_DIR/container-diff + chmod +x $KOKORO_GFILE_DIR/container-diff + export PATH=$PATH:$KOKORO_GFILE_DIR + cp $KOKORO_ROOT/src/keystore/72508_gcr_application_creds $HOME/.config/gcloud/application_default_credentials.json fi echo "Running integration tests..." make out/executor -go run integration_tests/integration_test_yaml.go | gcloud container builds submit --config /dev/fd/0 . +pushd integration +go test diff --git a/integration_tests/context/arr[0].txt b/integration/context/arr[0].txt similarity index 100% rename from integration_tests/context/arr[0].txt rename to integration/context/arr[0].txt diff --git a/integration_tests/context/bar/bam/bat b/integration/context/bar/bam/bat similarity index 100% rename from integration_tests/context/bar/bam/bat rename to integration/context/bar/bam/bat diff --git a/integration_tests/context/bar/bat b/integration/context/bar/bat similarity index 100% rename from integration_tests/context/bar/bat rename to integration/context/bar/bat diff --git a/integration_tests/context/bar/baz b/integration/context/bar/baz similarity index 100% rename from integration_tests/context/bar/baz rename to integration/context/bar/baz diff --git a/integration_tests/context/empty/.gitignore b/integration/context/empty/.gitignore similarity index 100% rename from integration_tests/context/empty/.gitignore rename to integration/context/empty/.gitignore diff --git a/integration_tests/context/foo b/integration/context/foo similarity index 100% rename from integration_tests/context/foo rename to integration/context/foo diff --git a/integration_tests/context/tars/file b/integration/context/tars/file similarity index 100% rename from integration_tests/context/tars/file rename to integration/context/tars/file diff --git a/integration_tests/context/tars/file.bz2 b/integration/context/tars/file.bz2 similarity index 100% rename from integration_tests/context/tars/file.bz2 rename to integration/context/tars/file.bz2 diff --git a/integration_tests/context/tars/file.tar b/integration/context/tars/file.tar similarity index 100% rename from integration_tests/context/tars/file.tar rename to integration/context/tars/file.tar diff --git a/integration_tests/context/tars/file.tar.gz b/integration/context/tars/file.tar.gz similarity index 100% rename from integration_tests/context/tars/file.tar.gz rename to integration/context/tars/file.tar.gz diff --git a/integration_tests/dockerfiles/Dockerfile_onbuild_base b/integration/dockerfiles/Dockerfile_onbuild_base similarity index 100% rename from integration_tests/dockerfiles/Dockerfile_onbuild_base rename to integration/dockerfiles/Dockerfile_onbuild_base diff --git a/integration_tests/dockerfiles/Dockerfile_test_add b/integration/dockerfiles/Dockerfile_test_add similarity index 93% rename from integration_tests/dockerfiles/Dockerfile_test_add rename to integration/dockerfiles/Dockerfile_test_add index 6fb9d73a7c..4b58ee6fa2 100644 --- a/integration_tests/dockerfiles/Dockerfile_test_add +++ b/integration/dockerfiles/Dockerfile_test_add @@ -14,5 +14,9 @@ ADD $contextenv/* /tmp/${contextenv}/ ADD context/tars/fil* /tars/ ADD context/tars/file.tar /tars_again +# Test with ARG +ARG file +COPY $file /arg + # Finally, test adding a remote URL, concurrently with a normal file ADD https://github.com/GoogleCloudPlatform/docker-credential-gcr/releases/download/v1.4.3/docker-credential-gcr_linux_386-1.4.3.tar.gz context/foo /test/all/ diff --git a/integration_tests/dockerfiles/Dockerfile_test_copy b/integration/dockerfiles/Dockerfile_test_copy similarity index 94% rename from integration_tests/dockerfiles/Dockerfile_test_copy rename to integration/dockerfiles/Dockerfile_test_copy index 99c179c11f..fd184394d5 100644 --- a/integration_tests/dockerfiles/Dockerfile_test_copy +++ b/integration/dockerfiles/Dockerfile_test_copy @@ -1,4 +1,4 @@ -FROM gcr.io/distroless/base +FROM alpine:3.7 COPY context/foo foo COPY context/foo /foodir/ COPY context/bar/b* bar/ diff --git a/integration/dockerfiles/Dockerfile_test_copy_bucket b/integration/dockerfiles/Dockerfile_test_copy_bucket new file mode 100644 index 0000000000..fd184394d5 --- /dev/null +++ b/integration/dockerfiles/Dockerfile_test_copy_bucket @@ -0,0 +1,20 @@ +FROM alpine:3.7 +COPY context/foo foo +COPY context/foo /foodir/ +COPY context/bar/b* bar/ +COPY context/fo? /foo2 +COPY context/bar/doesnotexist* context/foo hello +COPY ./context/empty /empty +COPY ./ dir/ +COPY . newdir +COPY context/bar /baz/ +COPY ["context/foo", "/tmp/foo" ] +COPY context/b* /baz/ +COPY context/foo context/bar/ba? /test/ +COPY context/arr[[]0].txt /mydir/ +COPY context/bar/bat . + +ENV contextenv ./context +COPY ${contextenv}/foo /tmp/foo2 +COPY $contextenv/foo /tmp/foo3 +COPY $contextenv/* /tmp/${contextenv}/ diff --git a/integration_tests/dockerfiles/Dockerfile_test_env b/integration/dockerfiles/Dockerfile_test_env similarity index 100% rename from integration_tests/dockerfiles/Dockerfile_test_env rename to integration/dockerfiles/Dockerfile_test_env diff --git a/integration_tests/dockerfiles/Dockerfile_test_extract_fs b/integration/dockerfiles/Dockerfile_test_extract_fs similarity index 100% rename from integration_tests/dockerfiles/Dockerfile_test_extract_fs rename to integration/dockerfiles/Dockerfile_test_extract_fs diff --git a/integration_tests/dockerfiles/Dockerfile_test_metadata b/integration/dockerfiles/Dockerfile_test_metadata similarity index 100% rename from integration_tests/dockerfiles/Dockerfile_test_metadata rename to integration/dockerfiles/Dockerfile_test_metadata diff --git a/integration/dockerfiles/Dockerfile_test_mv_add b/integration/dockerfiles/Dockerfile_test_mv_add new file mode 100644 index 0000000000..60200df9cf --- /dev/null +++ b/integration/dockerfiles/Dockerfile_test_mv_add @@ -0,0 +1,3 @@ +FROM busybox +ADD context/tars /tmp/tars +RUN mv /tmp/tars /foo diff --git a/integration_tests/dockerfiles/Dockerfile_test_onbuild b/integration/dockerfiles/Dockerfile_test_onbuild similarity index 77% rename from integration_tests/dockerfiles/Dockerfile_test_onbuild rename to integration/dockerfiles/Dockerfile_test_onbuild index 2968c0c622..ab145e69e9 100644 --- a/integration_tests/dockerfiles/Dockerfile_test_onbuild +++ b/integration/dockerfiles/Dockerfile_test_onbuild @@ -1,6 +1,7 @@ FROM gcr.io/kaniko-test/onbuild-base:latest COPY context/foo foo ENV dir /new/workdir/ -ONBUILD RUN echo "onbuild" > /tmp/onbuild +ARG file +ONBUILD RUN echo "onbuild" > $file ONBUILD RUN echo "onbuild 2" > ${dir} ONBUILD WORKDIR /new/workdir diff --git a/integration_tests/dockerfiles/Dockerfile_test_registry b/integration/dockerfiles/Dockerfile_test_registry similarity index 100% rename from integration_tests/dockerfiles/Dockerfile_test_registry rename to integration/dockerfiles/Dockerfile_test_registry diff --git a/integration_tests/dockerfiles/Dockerfile_test_run b/integration/dockerfiles/Dockerfile_test_run similarity index 85% rename from integration_tests/dockerfiles/Dockerfile_test_run rename to integration/dockerfiles/Dockerfile_test_run index cd225fc038..ee5640f8bb 100644 --- a/integration_tests/dockerfiles/Dockerfile_test_run +++ b/integration/dockerfiles/Dockerfile_test_run @@ -14,6 +14,10 @@ FROM gcr.io/google-appengine/debian9 RUN echo "hey" > /etc/foo -RUN apt-get update && apt-get install -y \ - bzr \ - cvs \ +RUN echo "baz" > /etc/baz +RUN cp /etc/baz /etc/bar +RUN rm /etc/baz + +# Test with ARG +ARG file +RUN echo "run" > $file diff --git a/integration_tests/dockerfiles/Dockerfile_test_run_2 b/integration/dockerfiles/Dockerfile_test_run_2 similarity index 100% rename from integration_tests/dockerfiles/Dockerfile_test_run_2 rename to integration/dockerfiles/Dockerfile_test_run_2 diff --git a/integration/dockerfiles/Dockerfile_test_scratch b/integration/dockerfiles/Dockerfile_test_scratch new file mode 100644 index 0000000000..e2a1383ee1 --- /dev/null +++ b/integration/dockerfiles/Dockerfile_test_scratch @@ -0,0 +1,14 @@ +FROM scratch +# First, make sure simple arg replacement works +ARG file +COPY $file /foo +# Check that setting a default value works +ARG file2=context/bar/bat +COPY $file2 /bat +# Check that overriding a default value works +ARG file3=context/bar/baz +COPY $file3 /baz +# Check that setting an ENV will override the ARG +ENV file context/bar/bam/bat +COPY $file /env + diff --git a/integration_tests/dockerfiles/Dockerfile_test_user_run b/integration/dockerfiles/Dockerfile_test_user_run similarity index 100% rename from integration_tests/dockerfiles/Dockerfile_test_user_run rename to integration/dockerfiles/Dockerfile_test_user_run diff --git a/integration_tests/dockerfiles/Dockerfile_test_volume b/integration/dockerfiles/Dockerfile_test_volume similarity index 100% rename from integration_tests/dockerfiles/Dockerfile_test_volume rename to integration/dockerfiles/Dockerfile_test_volume diff --git a/integration/dockerfiles/Dockerfile_test_workdir b/integration/dockerfiles/Dockerfile_test_workdir new file mode 100644 index 0000000000..9c2794f702 --- /dev/null +++ b/integration/dockerfiles/Dockerfile_test_workdir @@ -0,0 +1,17 @@ +FROM gcr.io/google-appengine/debian9@sha256:6b3aa04751aa2ac3b0c7be4ee71148b66d693ad212ce6d3244bd2a2a147f314a +COPY context/foo foo +WORKDIR /test +# Test that this will be appended on to the previous command, to create /test/workdir +WORKDIR workdir +COPY context/foo ./currentfoo +# Test that the RUN command will happen in the correct directory +RUN cp currentfoo newfoo +WORKDIR /new/dir +ENV dir /another/new/dir +WORKDIR $dir/newdir +WORKDIR $dir/$doesntexist +WORKDIR / + +# Test with ARG +ARG workdir +WORKDIR $workdir diff --git a/integration/integration_test.go b/integration/integration_test.go new file mode 100644 index 0000000000..4f31f3c20d --- /dev/null +++ b/integration/integration_test.go @@ -0,0 +1,192 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "encoding/json" + "fmt" + "os" + "os/exec" + "path" + "path/filepath" + "runtime" + "strings" + "testing" + + "github.com/GoogleContainerTools/kaniko/testutil" +) + +const ( + executorImage = "executor-image" + dockerImage = "gcr.io/cloud-builders/docker" + ubuntuImage = "ubuntu" + testRepo = "gcr.io/kaniko-test/" + dockerPrefix = "docker-" + kanikoPrefix = "kaniko-" + daemonPrefix = "daemon://" + kanikoTestBucket = "kaniko-test-bucket" + dockerfilesPath = "dockerfiles" + onbuildBaseImage = testRepo + "onbuild-base:latest" + buildContextPath = "/workspace" + emptyContainerDiff = `[ + { + "Image1": "%s:latest", + "Image2": "%s:latest", + "DiffType": "File", + "Diff": { + "Adds": null, + "Dels": null, + "Mods": null + } + } + ]` +) + +func TestMain(m *testing.M) { + buildKaniko := exec.Command("docker", "build", "-t", executorImage, "-f", "../deploy/Dockerfile", "..") + err := buildKaniko.Run() + if err != nil { + fmt.Print(err) + fmt.Print("Building kaniko failed.") + os.Exit(1) + } + + // Make sure container-diff is on user's PATH + _, err = exec.LookPath("container-diff") + if err != nil { + fmt.Print("Make sure you have container-diff installed and on your PATH") + os.Exit(1) + } + + os.Exit(m.Run()) +} + +func TestRun(t *testing.T) { + dockerfiles, err := filepath.Glob(path.Join(dockerfilesPath, "Dockerfile*")) + if err != nil { + t.Error(err) + t.FailNow() + } + + // Map for test Dockerfile to expected ARGs + argsMap := map[string][]string{ + "Dockerfile_test_run": {"file=/file"}, + "Dockerfile_test_workdir": {"workdir=/arg/workdir"}, + "Dockerfile_test_add": {"file=context/foo"}, + "Dockerfile_test_onbuild": {"file=/tmp/onbuild"}, + "Dockerfile_test_scratch": { + "hello=hello-value", + "file=context/foo", + "file3=context/b*", + }, + } + + bucketContextTests := []string{"Dockerfile_test_copy_bucket"} + + _, ex, _, _ := runtime.Caller(0) + cwd := filepath.Dir(ex) + + for _, dockerfile := range dockerfiles { + t.Run("test_"+dockerfile, func(t *testing.T) { + dockerfile = dockerfile[len("dockerfile/")+1:] + t.Logf("%s\n", dockerfile) + + var buildArgs []string + buildArgFlag := "--build-arg" + for _, arg := range argsMap[dockerfile] { + buildArgs = append(buildArgs, buildArgFlag) + buildArgs = append(buildArgs, arg) + } + // build docker image + dockerImage := strings.ToLower(testRepo + dockerPrefix + dockerfile) + dockerCmd := exec.Command("docker", + append([]string{"build", + "-t", dockerImage, + "-f", path.Join(dockerfilesPath, dockerfile), + "."}, + buildArgs...)..., + ) + RunCommand(dockerCmd, t) + + contextFlag := "-c" + contextPath := buildContextPath + for _, d := range bucketContextTests { + if d == dockerfile { + contextFlag = "-b" + contextPath = kanikoTestBucket + break + } + } + + // build kaniko image + kanikoImage := strings.ToLower(testRepo + kanikoPrefix + dockerfile) + kanikoCmd := exec.Command("docker", + append([]string{"run", + "-v", os.Getenv("HOME") + "/.config/gcloud:/root/.config/gcloud", + "-v", cwd + ":/workspace", + executorImage, + "-f", path.Join(buildContextPath, dockerfilesPath, dockerfile), + "-d", kanikoImage, + contextFlag, contextPath}, + buildArgs...)..., + ) + + RunCommand(kanikoCmd, t) + + // container-diff + daemonDockerImage := daemonPrefix + dockerImage + containerdiffCmd := exec.Command("container-diff", "diff", + daemonDockerImage, kanikoImage, + "-q", "--type=file", "--json") + diff := RunCommand(containerdiffCmd, t) + t.Logf("diff = %s", string(diff)) + + expected := fmt.Sprintf(emptyContainerDiff, dockerImage, kanikoImage) + + // Let's compare the json objects themselves instead of strings to avoid + // issues with spaces and indents + var diffInt interface{} + var expectedInt interface{} + + err = json.Unmarshal(diff, &diffInt) + if err != nil { + t.Error(err) + t.Fail() + } + + err = json.Unmarshal([]byte(expected), &expectedInt) + if err != nil { + t.Error(err) + t.Fail() + } + + testutil.CheckErrorAndDeepEqual(t, false, nil, expectedInt, diffInt) + }) + } +} + +func RunCommand(cmd *exec.Cmd, t *testing.T) []byte { + output, err := cmd.CombinedOutput() + if err != nil { + t.Log(cmd.Args) + t.Log(string(output)) + t.Error(err) + t.FailNow() + } + + return output +} diff --git a/integration_tests/dockerfiles/Dockerfile_test_multistage b/integration_tests/dockerfiles/Dockerfile_test_multistage new file mode 100644 index 0000000000..78b0a2b0d0 --- /dev/null +++ b/integration_tests/dockerfiles/Dockerfile_test_multistage @@ -0,0 +1,10 @@ +FROM gcr.io/distroless/base:latest +COPY . . + +FROM scratch as second +ENV foopath context/foo +COPY --from=0 $foopath context/b* /foo/ + +FROM gcr.io/distroless/base:latest +ARG file +COPY --from=second /foo $file diff --git a/integration_tests/dockerfiles/Dockerfile_test_scratch b/integration_tests/dockerfiles/Dockerfile_test_scratch deleted file mode 100644 index 0fb22d93b4..0000000000 --- a/integration_tests/dockerfiles/Dockerfile_test_scratch +++ /dev/null @@ -1,4 +0,0 @@ -FROM scratch -ADD context/foo /foo -ENV hello hello -ADD context/foo /$hello diff --git a/integration_tests/dockerfiles/Dockerfile_test_workdir b/integration_tests/dockerfiles/Dockerfile_test_workdir deleted file mode 100644 index 6c89e4c11e..0000000000 --- a/integration_tests/dockerfiles/Dockerfile_test_workdir +++ /dev/null @@ -1,13 +0,0 @@ -FROM gcr.io/google-appengine/debian9:latest -COPY context/foo foo -WORKDIR /test -# Test that this will be appended on to the previous command, to create /test/workdir -WORKDIR workdir -COPY context/foo ./currentfoo -# Test that the RUN command will happen in the correct directory -RUN cp currentfoo newfoo -WORKDIR /new/dir -ENV dir /another/new/dir -WORKDIR $dir/newdir -WORKDIR $dir/$doesntexist -WORKDIR / diff --git a/integration_tests/dockerfiles/config_test_add.json b/integration_tests/dockerfiles/config_test_add.json deleted file mode 100644 index 135cd04a3a..0000000000 --- a/integration_tests/dockerfiles/config_test_add.json +++ /dev/null @@ -1,12 +0,0 @@ -[ - { - "Image1": "gcr.io/kaniko-test/docker-test-add:latest", - "Image2": "gcr.io/kaniko-test/kaniko-test-add:latest", - "DiffType": "File", - "Diff": { - "Adds": null, - "Dels": null, - "Mods": null - } - } -] \ No newline at end of file diff --git a/integration_tests/dockerfiles/config_test_bucket_buildcontext.json b/integration_tests/dockerfiles/config_test_bucket_buildcontext.json deleted file mode 100644 index 04c0668317..0000000000 --- a/integration_tests/dockerfiles/config_test_bucket_buildcontext.json +++ /dev/null @@ -1,12 +0,0 @@ -[ - { - "Image1": "gcr.io/kaniko-test/docker-test-bucket-buildcontext:latest", - "Image2": "gcr.io/kaniko-test/kaniko-test-bucket-buildcontext:latest", - "DiffType": "File", - "Diff": { - "Adds": null, - "Dels": null, - "Mods": null - } - } -] \ No newline at end of file diff --git a/integration_tests/dockerfiles/config_test_copy.json b/integration_tests/dockerfiles/config_test_copy.json deleted file mode 100644 index c2fcf57d60..0000000000 --- a/integration_tests/dockerfiles/config_test_copy.json +++ /dev/null @@ -1,12 +0,0 @@ -[ - { - "Image1": "gcr.io/kaniko-test/docker-test-copy:latest", - "Image2": "gcr.io/kaniko-test/kaniko-test-copy:latest", - "DiffType": "File", - "Diff": { - "Adds": null, - "Dels": null, - "Mods": null - } - } -] \ No newline at end of file diff --git a/integration_tests/dockerfiles/config_test_extract_fs.json b/integration_tests/dockerfiles/config_test_extract_fs.json deleted file mode 100644 index 2143ed4924..0000000000 --- a/integration_tests/dockerfiles/config_test_extract_fs.json +++ /dev/null @@ -1,12 +0,0 @@ -[ - { - "Image1": "gcr.io/kaniko-test/docker-extract-filesystem:latest", - "Image2": "gcr.io/kaniko-test/kaniko-extract-filesystem:latest", - "DiffType": "File", - "Diff": { - "Adds": null, - "Dels": null, - "Mods": null - } - } -] \ No newline at end of file diff --git a/integration_tests/dockerfiles/config_test_multistage.json b/integration_tests/dockerfiles/config_test_multistage.json new file mode 100644 index 0000000000..9aa0494cb8 --- /dev/null +++ b/integration_tests/dockerfiles/config_test_multistage.json @@ -0,0 +1,12 @@ +[ + { + "Image1": "gcr.io/kaniko-test/docker-test-multistage:latest", + "Image2": "gcr.io/kaniko-test/kaniko-test-multistage:latest", + "DiffType": "File", + "Diff": { + "Adds": null, + "Dels": null, + "Mods": null + } + } +] \ No newline at end of file diff --git a/integration_tests/dockerfiles/config_test_onbuild.json b/integration_tests/dockerfiles/config_test_onbuild.json deleted file mode 100644 index e7fa9a1e4d..0000000000 --- a/integration_tests/dockerfiles/config_test_onbuild.json +++ /dev/null @@ -1,12 +0,0 @@ -[ - { - "Image1": "gcr.io/kaniko-test/docker-test-onbuild:latest", - "Image2": "gcr.io/kaniko-test/kaniko-test-onbuild:latest", - "DiffType": "File", - "Diff": { - "Adds": null, - "Dels": null, - "Mods": null - } - } -] \ No newline at end of file diff --git a/integration_tests/dockerfiles/config_test_registry.json b/integration_tests/dockerfiles/config_test_registry.json deleted file mode 100644 index 27e7a402ef..0000000000 --- a/integration_tests/dockerfiles/config_test_registry.json +++ /dev/null @@ -1,12 +0,0 @@ -[ - { - "Image1": "gcr.io/kaniko-test/docker-test-registry:latest", - "Image2": "gcr.io/kaniko-test/kaniko-test-registry:latest", - "DiffType": "File", - "Diff": { - "Adds": null, - "Dels": null, - "Mods": null - } - } -] \ No newline at end of file diff --git a/integration_tests/dockerfiles/config_test_run.json b/integration_tests/dockerfiles/config_test_run.json deleted file mode 100644 index ae2b3d8f2a..0000000000 --- a/integration_tests/dockerfiles/config_test_run.json +++ /dev/null @@ -1,48 +0,0 @@ -[ - { - "Image1": "gcr.io/kaniko-test/docker-test-run:latest", - "Image2": "gcr.io/kaniko-test/kaniko-test-run:latest", - "DiffType": "File", - "Diff": { - "Adds": null, - "Dels": null, - "Mods": [ - { - "Name": "/var/log/dpkg.log", - "Size1": 57481, - "Size2": 57481 - }, - { - "Name": "/var/log/apt/term.log", - "Size1": 23671, - "Size2": 23671 - }, - { - "Name": "/var/cache/ldconfig/aux-cache", - "Size1": 8057, - "Size2": 8057 - }, - { - "Name": "/var/log/apt/history.log", - "Size1": 5661, - "Size2": 5661 - }, - { - "Name": "/var/log/alternatives.log", - "Size1": 2579, - "Size2": 2579 - }, - { - "Name": "/usr/lib/python2.7/dist-packages/keyrings/__init__.pyc", - "Size1": 140, - "Size2": 140 - }, - { - "Name": "/usr/lib/python2.7/dist-packages/lazr/__init__.pyc", - "Size1": 136, - "Size2": 136 - } - ] - } - } -] \ No newline at end of file diff --git a/integration_tests/dockerfiles/config_test_run_2.json b/integration_tests/dockerfiles/config_test_run_2.json deleted file mode 100644 index 11f48cbca9..0000000000 --- a/integration_tests/dockerfiles/config_test_run_2.json +++ /dev/null @@ -1,12 +0,0 @@ -[ - { - "Image1": "gcr.io/kaniko-test/docker-test-run-2:latest", - "Image2": "gcr.io/kaniko-test/kaniko-test-run-2:latest", - "DiffType": "File", - "Diff": { - "Adds": null, - "Dels": null, - "Mods": null - } - } -] \ No newline at end of file diff --git a/integration_tests/dockerfiles/config_test_scratch.json b/integration_tests/dockerfiles/config_test_scratch.json deleted file mode 100644 index b9b8930fe4..0000000000 --- a/integration_tests/dockerfiles/config_test_scratch.json +++ /dev/null @@ -1,12 +0,0 @@ -[ - { - "Image1": "gcr.io/kaniko-test/docker-test-scratch:latest", - "Image2": "gcr.io/kaniko-test/kaniko-test-scratch:latest", - "DiffType": "File", - "Diff": { - "Adds": null, - "Dels": null, - "Mods": null - } - } -] \ No newline at end of file diff --git a/integration_tests/dockerfiles/config_test_volume.json b/integration_tests/dockerfiles/config_test_volume.json deleted file mode 100644 index 706bda2c34..0000000000 --- a/integration_tests/dockerfiles/config_test_volume.json +++ /dev/null @@ -1,12 +0,0 @@ -[ - { - "Image1": "gcr.io/kaniko-test/docker-test-volume:latest", - "Image2": "gcr.io/kaniko-test/kaniko-test-volume:latest", - "DiffType": "File", - "Diff": { - "Adds": null, - "Dels": null, - "Mods": null - } - } -] \ No newline at end of file diff --git a/integration_tests/dockerfiles/config_test_workdir.json b/integration_tests/dockerfiles/config_test_workdir.json deleted file mode 100644 index 3b3fcefab7..0000000000 --- a/integration_tests/dockerfiles/config_test_workdir.json +++ /dev/null @@ -1,12 +0,0 @@ -[ - { - "Image1": "gcr.io/kaniko-test/docker-test-workdir:latest", - "Image2": "gcr.io/kaniko-test/kaniko-test-workdir:latest", - "DiffType": "File", - "Diff": { - "Adds": null, - "Dels": null, - "Mods": null - } - } -] \ No newline at end of file diff --git a/integration_tests/dockerfiles/test_env.yaml b/integration_tests/dockerfiles/test_env.yaml deleted file mode 100644 index c23c7a5163..0000000000 --- a/integration_tests/dockerfiles/test_env.yaml +++ /dev/null @@ -1,41 +0,0 @@ -schemaVersion: '2.0.0' -metadataTest: - env: - - key: hey - value: hello - - key: PATH - value: something - - key: first - value: foo - - key: second - value: foo2 - - key: third - value: foo2:/third - - key: myName - value: John Doe - - key: myDog - value: Rex The Dog - - key: myCat - value: fluffy - - key: test - value: value value2 - - key: test1 - value: a'b'c - - key: test2 - value: a"b"c - - key: test3 - value: a b - - key: name2 - value: b c - - key: test4 - value: a"b - - key: test5 - value: a\"b - - key: test6 - value: a\'b - - key: atomic - value: two - - key: newatomic - value: one - - key: newenv - value: /newenv diff --git a/integration_tests/dockerfiles/test_metadata.yaml b/integration_tests/dockerfiles/test_metadata.yaml deleted file mode 100644 index cb0024311b..0000000000 --- a/integration_tests/dockerfiles/test_metadata.yaml +++ /dev/null @@ -1,4 +0,0 @@ -schemaVersion: '2.0.0' -metadataTest: - cmd: ["/bin/sh", "-c", "echo \"hello\""] - entrypoint: ["execute", "entrypoint"] diff --git a/integration_tests/dockerfiles/test_user.yaml b/integration_tests/dockerfiles/test_user.yaml deleted file mode 100644 index 9a4bed1dc4..0000000000 --- a/integration_tests/dockerfiles/test_user.yaml +++ /dev/null @@ -1,15 +0,0 @@ -schemaVersion: '2.0.0' -commandTests: -- name: 'whoami' - command: 'whoami' - expectedOutput: ['testuser'] - excludedOutput: ['root'] -- name: 'file owner' - command: 'ls' - args: ['-l', '/tmp/foo'] - expectedOutput: ['.*testuser.*', '.*testgroup.*'] - excludedOutput: ['.*root.*'] -fileContentTests: -- name: "/tmp/foo" - path: "/tmp/foo" - expectedContent: ["hey"] diff --git a/integration_tests/integration_test_yaml.go b/integration_tests/integration_test_yaml.go index fc01d48485..110a374fe2 100644 --- a/integration_tests/integration_test_yaml.go +++ b/integration_tests/integration_test_yaml.go @@ -26,6 +26,7 @@ const ( executorImage = "executor-image" dockerImage = "gcr.io/cloud-builders/docker" ubuntuImage = "ubuntu" + structureTestImage = "gcr.io/gcp-runtimes/container-structure-test" testRepo = "gcr.io/kaniko-test/" dockerPrefix = "docker-" kanikoPrefix = "kaniko-" @@ -46,6 +47,7 @@ var fileTests = []struct { kanikoContextBucket bool repo string snapshotMode string + args []string }{ { description: "test extract filesystem", @@ -63,6 +65,9 @@ var fileTests = []struct { dockerContext: dockerfilesPath, kanikoContext: dockerfilesPath, repo: "test-run", + args: []string{ + "file=/file", + }, }, { description: "test run no files changed", @@ -98,6 +103,9 @@ var fileTests = []struct { dockerContext: buildcontextPath, kanikoContext: buildcontextPath, repo: "test-workdir", + args: []string{ + "workdir=/arg/workdir", + }, }, { description: "test volume", @@ -114,6 +122,17 @@ var fileTests = []struct { dockerContext: buildcontextPath, kanikoContext: buildcontextPath, repo: "test-add", + args: []string{ + "file=context/foo", + }, + }, + { + description: "test mv add", + dockerfilePath: "/workspace/integration_tests/dockerfiles/Dockerfile_test_mv_add", + configPath: "/workspace/integration_tests/dockerfiles/config_test_mv_add.json", + dockerContext: buildcontextPath, + kanikoContext: buildcontextPath, + repo: "test-mv-add", }, { description: "test registry", @@ -130,6 +149,9 @@ var fileTests = []struct { dockerContext: buildcontextPath, kanikoContext: buildcontextPath, repo: "test-onbuild", + args: []string{ + "file=/tmp/onbuild", + }, }, { description: "test scratch", @@ -138,6 +160,22 @@ var fileTests = []struct { dockerContext: buildcontextPath, kanikoContext: buildcontextPath, repo: "test-scratch", + args: []string{ + "hello=hello-value", + "file=context/foo", + "file3=context/b*", + }, + }, + { + description: "test multistage", + dockerfilePath: "/workspace/integration_tests/dockerfiles/Dockerfile_test_multistage", + configPath: "/workspace/integration_tests/dockerfiles/config_test_multistage.json", + dockerContext: buildcontextPath, + kanikoContext: buildcontextPath, + repo: "test-multistage", + args: []string{ + "file=/foo2", + }, }, } @@ -197,15 +235,6 @@ func main() { Name: ubuntuImage, Args: []string{"chmod", "+x", "container-diff-linux-amd64"}, } - structureTestsStep := step{ - Name: "gcr.io/cloud-builders/gsutil", - Args: []string{"cp", "gs://container-structure-test/latest/container-structure-test", "."}, - } - structureTestPermissions := step{ - Name: ubuntuImage, - Args: []string{"chmod", "+x", "container-structure-test"}, - } - GCSBucketTarBuildContext := step{ Name: ubuntuImage, Args: []string{"tar", "-C", "/workspace/integration_tests/", "-zcvf", "/workspace/context.tar.gz", "."}, @@ -231,18 +260,23 @@ func main() { Args: []string{"push", onbuildBaseImage}, } y := testyaml{ - Steps: []step{containerDiffStep, containerDiffPermissions, structureTestsStep, structureTestPermissions, GCSBucketTarBuildContext, uploadTarBuildContext, buildExecutorImage, - buildOnbuildImage, pushOnbuildBase}, + Steps: []step{containerDiffStep, containerDiffPermissions, GCSBucketTarBuildContext, + uploadTarBuildContext, buildExecutorImage, buildOnbuildImage, pushOnbuildBase}, Timeout: "1200s", } for _, test := range fileTests { // First, build the image with docker dockerImageTag := testRepo + dockerPrefix + test.repo + var buildArgs []string + buildArgFlag := "--build-arg" + for _, arg := range test.args { + buildArgs = append(buildArgs, buildArgFlag) + buildArgs = append(buildArgs, arg) + } dockerBuild := step{ Name: dockerImage, - Args: []string{"build", "-t", dockerImageTag, "-f", test.dockerfilePath, test.dockerContext}, + Args: append([]string{"build", "-t", dockerImageTag, "-f", test.dockerfilePath, test.dockerContext}, buildArgs...), } - // Then, buld the image with kaniko kanikoImage := testRepo + kanikoPrefix + test.repo snapshotMode := "" @@ -255,7 +289,7 @@ func main() { } kaniko := step{ Name: executorImage, - Args: []string{"--destination", kanikoImage, "--dockerfile", test.dockerfilePath, contextFlag, test.kanikoContext, snapshotMode}, + Args: append([]string{"--destination", kanikoImage, "--dockerfile", test.dockerfilePath, contextFlag, test.kanikoContext, snapshotMode}, buildArgs...), } // Pull the kaniko image @@ -280,7 +314,7 @@ func main() { } compareOutputs := step{ Name: ubuntuImage, - Args: []string{"cmp", test.configPath, containerDiffOutputFile}, + Args: []string{"cmp", "-b", test.configPath, containerDiffOutputFile}, } y.Steps = append(y.Steps, dockerBuild, kaniko, pullKanikoImage, containerDiff, catContainerDiffOutput, compareOutputs) @@ -307,20 +341,15 @@ func main() { Args: []string{"pull", kanikoImage}, } // Run structure tests on the kaniko and docker image - args := "container-structure-test -image " + kanikoImage + " " + test.structureTestYamlPath - structureTest := step{ - Name: ubuntuImage, - Args: []string{"sh", "-c", args}, - Env: []string{"PATH=/workspace:/bin"}, + kanikoStructureTest := step{ + Name: structureTestImage, + Args: []string{"test", "--image", kanikoImage, "--config", test.structureTestYamlPath}, } - args = "container-structure-test -image " + dockerImageTag + " " + test.structureTestYamlPath dockerStructureTest := step{ - Name: ubuntuImage, - Args: []string{"sh", "-c", args}, - Env: []string{"PATH=/workspace:/bin"}, + Name: structureTestImage, + Args: []string{"test", "--image", dockerImageTag, "--config", test.structureTestYamlPath}, } - - y.Steps = append(y.Steps, dockerBuild, kaniko, pullKanikoImage, structureTest, dockerStructureTest) + y.Steps = append(y.Steps, dockerBuild, kaniko, pullKanikoImage, kanikoStructureTest, dockerStructureTest) } d, _ := yaml.Marshal(&y) diff --git a/logo/Kaniko-Logo-Monochrome.png b/logo/Kaniko-Logo-Monochrome.png new file mode 100644 index 0000000000..b6ca283741 Binary files /dev/null and b/logo/Kaniko-Logo-Monochrome.png differ diff --git a/logo/Kaniko-Logo.png b/logo/Kaniko-Logo.png new file mode 100644 index 0000000000..1a37917bf8 Binary files /dev/null and b/logo/Kaniko-Logo.png differ diff --git a/logo/README.md b/logo/README.md new file mode 100644 index 0000000000..2a6907c3e5 --- /dev/null +++ b/logo/README.md @@ -0,0 +1 @@ +Thank you @ggcarlosr for this awesome logo! \ No newline at end of file diff --git a/pkg/commands/add.go b/pkg/commands/add.go index f6aee688c3..6028b4c0a9 100644 --- a/pkg/commands/add.go +++ b/pkg/commands/add.go @@ -17,11 +17,13 @@ limitations under the License. package commands import ( + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" "path/filepath" "strings" + "github.com/google/go-containerregistry/v1" + "github.com/GoogleContainerTools/kaniko/pkg/util" - "github.com/containers/image/manifest" "github.com/docker/docker/builder/dockerfile/instructions" "github.com/sirupsen/logrus" ) @@ -40,7 +42,7 @@ type AddCommand struct { // - If dest doesn't end with a slash, the filepath is inferred to be / // 2. If is a local tar archive: // -If is a local tar archive, it is unpacked at the dest, as 'tar -x' would -func (a *AddCommand) ExecuteCommand(config *manifest.Schema2Config) error { +func (a *AddCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { srcs := a.cmd.SourcesAndDest[:len(a.cmd.SourcesAndDest)-1] dest := a.cmd.SourcesAndDest[len(a.cmd.SourcesAndDest)-1] @@ -48,88 +50,66 @@ func (a *AddCommand) ExecuteCommand(config *manifest.Schema2Config) error { logrus.Infof("dest: %s", dest) // First, resolve any environment replacement - resolvedEnvs, err := util.ResolveEnvironmentReplacementList(a.cmd.SourcesAndDest, config.Env, true) + replacementEnvs := buildArgs.ReplacementEnvs(config.Env) + resolvedEnvs, err := util.ResolveEnvironmentReplacementList(a.cmd.SourcesAndDest, replacementEnvs, true) if err != nil { return err } dest = resolvedEnvs[len(resolvedEnvs)-1] - // Get a map of [src]:[files rooted at src] - srcMap, err := util.ResolveSources(resolvedEnvs, a.buildcontext) + // Resolve wildcards and get a list of resolved sources + srcs, err = util.ResolveSources(resolvedEnvs, a.buildcontext) if err != nil { return err } + var unresolvedSrcs []string // If any of the sources are local tar archives: // 1. Unpack them to the specified destination - // 2. Remove it as a source that needs to be copied over // If any of the sources is a remote file URL: // 1. Download and copy it to the specified dest - // 2. Remove it as a source that needs to be copied - for src, files := range srcMap { - for _, file := range files { - // If file is a local tar archive, then we unpack it to dest - filePath := filepath.Join(a.buildcontext, file) - isFilenameSource, err := isFilenameSource(srcMap, file) - if err != nil { + // Else, add to the list of unresolved sources + for _, src := range srcs { + fullPath := filepath.Join(a.buildcontext, src) + if util.IsSrcRemoteFileURL(src) { + urlDest := util.URLDestinationFilepath(src, dest, config.WorkingDir) + logrus.Infof("Adding remote URL %s to %s", src, urlDest) + if err := util.DownloadFileToDest(src, urlDest); err != nil { return err } - if util.IsSrcRemoteFileURL(file) { - urlDest := util.URLDestinationFilepath(file, dest, config.WorkingDir) - logrus.Infof("Adding remote URL %s to %s", file, urlDest) - if err := util.DownloadFileToDest(file, urlDest); err != nil { - return err - } - a.snapshotFiles = append(a.snapshotFiles, urlDest) - delete(srcMap, src) - } else if isFilenameSource && util.IsFileLocalTarArchive(filePath) { - logrus.Infof("Unpacking local tar archive %s to %s", file, dest) - if err := util.UnpackLocalTarArchive(filePath, dest); err != nil { - return err - } - // Add the unpacked files to the snapshotter - filesAdded, err := util.Files(dest) - if err != nil { - return err - } - logrus.Debugf("Added %v from local tar archive %s", filesAdded, file) - a.snapshotFiles = append(a.snapshotFiles, filesAdded...) - delete(srcMap, src) + a.snapshotFiles = append(a.snapshotFiles, urlDest) + } else if util.IsFileLocalTarArchive(fullPath) { + logrus.Infof("Unpacking local tar archive %s to %s", src, dest) + if err := util.UnpackLocalTarArchive(fullPath, dest); err != nil { + return err } + // Add the unpacked files to the snapshotter + filesAdded, err := util.Files(dest) + if err != nil { + return err + } + logrus.Debugf("Added %v from local tar archive %s", filesAdded, src) + a.snapshotFiles = append(a.snapshotFiles, filesAdded...) + } else { + unresolvedSrcs = append(unresolvedSrcs, src) } } // With the remaining "normal" sources, create and execute a standard copy command - if len(srcMap) == 0 { + if len(unresolvedSrcs) == 0 { return nil } - var regularSrcs []string - for src := range srcMap { - regularSrcs = append(regularSrcs, src) - } + copyCmd := CopyCommand{ cmd: &instructions.CopyCommand{ - SourcesAndDest: append(regularSrcs, dest), + SourcesAndDest: append(unresolvedSrcs, dest), }, buildcontext: a.buildcontext, } - if err := copyCmd.ExecuteCommand(config); err != nil { + if err := copyCmd.ExecuteCommand(config, buildArgs); err != nil { return err } a.snapshotFiles = append(a.snapshotFiles, copyCmd.snapshotFiles...) return nil } -func isFilenameSource(srcMap map[string][]string, fileName string) (bool, error) { - for src := range srcMap { - matched, err := filepath.Match(src, fileName) - if err != nil { - return false, err - } - if matched || (src == fileName) { - return true, nil - } - } - return false, nil -} - // FilesToSnapshot should return an empty array if still nil; no files were changed func (a *AddCommand) FilesToSnapshot() []string { return a.snapshotFiles diff --git a/pkg/commands/arg.go b/pkg/commands/arg.go new file mode 100644 index 0000000000..cf4dcd525e --- /dev/null +++ b/pkg/commands/arg.go @@ -0,0 +1,46 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package commands + +import ( + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" + "github.com/docker/docker/builder/dockerfile/instructions" + "github.com/google/go-containerregistry/v1" + "github.com/sirupsen/logrus" + "strings" +) + +type ArgCommand struct { + cmd *instructions.ArgCommand +} + +// ExecuteCommand only needs to add this ARG key/value as seen +func (r *ArgCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { + logrus.Info("ARG") + buildArgs.AddArg(r.cmd.Key, r.cmd.Value) + return nil +} + +// FilesToSnapshot returns an empty array since this command only touches metadata. +func (r *ArgCommand) FilesToSnapshot() []string { + return []string{} +} + +// CreatedBy returns some information about the command for the image config history +func (r *ArgCommand) CreatedBy() string { + return strings.Join([]string{r.cmd.Name(), r.cmd.Key}, " ") +} diff --git a/pkg/commands/cmd.go b/pkg/commands/cmd.go index 4d49234b20..9d33385342 100644 --- a/pkg/commands/cmd.go +++ b/pkg/commands/cmd.go @@ -17,10 +17,12 @@ limitations under the License. package commands import ( - "github.com/containers/image/manifest" + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" + "strings" + "github.com/docker/docker/builder/dockerfile/instructions" + "github.com/google/go-containerregistry/v1" "github.com/sirupsen/logrus" - "strings" ) type CmdCommand struct { @@ -29,13 +31,18 @@ type CmdCommand struct { // ExecuteCommand executes the CMD command // Argument handling is the same as RUN. -func (c *CmdCommand) ExecuteCommand(config *manifest.Schema2Config) error { +func (c *CmdCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { logrus.Info("cmd: CMD") var newCommand []string if c.cmd.PrependShell { // This is the default shell on Linux - // TODO: Support shell command here - shell := []string{"/bin/sh", "-c"} + var shell []string + if len(config.Shell) > 0 { + shell = config.Shell + } else { + shell = append(shell, "/bin/sh", "-c") + } + newCommand = append(shell, strings.Join(c.cmd.CmdLine, " ")) } else { newCommand = c.cmd.CmdLine diff --git a/pkg/commands/cmd_test.go b/pkg/commands/cmd_test.go index 9a0c82e8a2..a969e4e3f5 100644 --- a/pkg/commands/cmd_test.go +++ b/pkg/commands/cmd_test.go @@ -16,34 +16,32 @@ limitations under the License. package commands import ( - "testing" - "github.com/GoogleContainerTools/kaniko/testutil" - "github.com/containers/image/manifest" - "github.com/containers/image/pkg/strslice" "github.com/docker/docker/builder/dockerfile/instructions" + "github.com/google/go-containerregistry/v1" + "testing" ) var cmdTests = []struct { prependShell bool cmdLine []string - expectedCmd strslice.StrSlice + expectedCmd []string }{ { prependShell: true, cmdLine: []string{"echo", "cmd1"}, - expectedCmd: strslice.StrSlice{"/bin/sh", "-c", "echo cmd1"}, + expectedCmd: []string{"/bin/sh", "-c", "echo cmd1"}, }, { prependShell: false, cmdLine: []string{"echo", "cmd2"}, - expectedCmd: strslice.StrSlice{"echo", "cmd2"}, + expectedCmd: []string{"echo", "cmd2"}, }, } func TestExecuteCmd(t *testing.T) { - cfg := &manifest.Schema2Config{ + cfg := &v1.Config{ Cmd: nil, } @@ -56,7 +54,7 @@ func TestExecuteCmd(t *testing.T) { }, }, } - err := cmd.ExecuteCommand(cfg) + err := cmd.ExecuteCommand(cfg, nil) testutil.CheckErrorAndDeepEqual(t, false, err, test.expectedCmd, cfg.Cmd) } } diff --git a/pkg/commands/commands.go b/pkg/commands/commands.go index dd2de8a8ac..09bb19d0b2 100644 --- a/pkg/commands/commands.go +++ b/pkg/commands/commands.go @@ -17,8 +17,9 @@ limitations under the License. package commands import ( - "github.com/containers/image/manifest" + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" "github.com/docker/docker/builder/dockerfile/instructions" + "github.com/google/go-containerregistry/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -28,7 +29,7 @@ type DockerCommand interface { // 1. Making required changes to the filesystem (ex. copying files for ADD/COPY or setting ENV variables) // 2. Updating metadata fields in the config // It should not change the config history. - ExecuteCommand(*manifest.Schema2Config) error + ExecuteCommand(*v1.Config, *dockerfile.BuildArgs) error // The config history has a "created by" field, should return information about the command CreatedBy() string // A list of files to snapshot, empty for metadata commands or nil if we don't know @@ -61,6 +62,14 @@ func GetCommand(cmd instructions.Command, buildcontext string) (DockerCommand, e return &OnBuildCommand{cmd: c}, nil case *instructions.VolumeCommand: return &VolumeCommand{cmd: c}, nil + case *instructions.StopSignalCommand: + return &StopSignalCommand{cmd: c}, nil + case *instructions.ArgCommand: + return &ArgCommand{cmd: c}, nil + case *instructions.ShellCommand: + return &ShellCommand{cmd: c}, nil + case *instructions.HealthCheckCommand: + return &HealthCheckCommand{cmd: c}, nil case *instructions.MaintainerCommand: logrus.Warnf("%s is deprecated, skipping", cmd.Name()) return nil, nil diff --git a/pkg/commands/copy.go b/pkg/commands/copy.go index 845e7dfcd4..89dc9256a9 100644 --- a/pkg/commands/copy.go +++ b/pkg/commands/copy.go @@ -17,13 +17,15 @@ limitations under the License. package commands import ( + "github.com/GoogleContainerTools/kaniko/pkg/constants" "os" "path/filepath" "strings" + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" "github.com/GoogleContainerTools/kaniko/pkg/util" - "github.com/containers/image/manifest" "github.com/docker/docker/builder/dockerfile/instructions" + "github.com/google/go-containerregistry/v1" "github.com/sirupsen/logrus" ) @@ -33,65 +35,68 @@ type CopyCommand struct { snapshotFiles []string } -func (c *CopyCommand) ExecuteCommand(config *manifest.Schema2Config) error { +func (c *CopyCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { srcs := c.cmd.SourcesAndDest[:len(c.cmd.SourcesAndDest)-1] dest := c.cmd.SourcesAndDest[len(c.cmd.SourcesAndDest)-1] logrus.Infof("cmd: copy %s", srcs) logrus.Infof("dest: %s", dest) + // Resolve from + if c.cmd.From != "" { + c.buildcontext = filepath.Join(constants.KanikoDir, c.cmd.From) + } + replacementEnvs := buildArgs.ReplacementEnvs(config.Env) // First, resolve any environment replacement - resolvedEnvs, err := util.ResolveEnvironmentReplacementList(c.cmd.SourcesAndDest, config.Env, true) + resolvedEnvs, err := util.ResolveEnvironmentReplacementList(c.cmd.SourcesAndDest, replacementEnvs, true) if err != nil { return err } dest = resolvedEnvs[len(resolvedEnvs)-1] - // Get a map of [src]:[files rooted at src] - srcMap, err := util.ResolveSources(resolvedEnvs, c.buildcontext) + // Resolve wildcards and get a list of resolved sources + srcs, err = util.ResolveSources(resolvedEnvs, c.buildcontext) if err != nil { return err } - // For each source, iterate through each file within and copy it over - for src, files := range srcMap { - for _, file := range files { - fi, err := os.Lstat(filepath.Join(c.buildcontext, file)) - if err != nil { + // For each source, iterate through and copy it over + for _, src := range srcs { + fullPath := filepath.Join(c.buildcontext, src) + fi, err := os.Lstat(fullPath) + if err != nil { + return err + } + cwd := config.WorkingDir + if cwd == "" { + cwd = constants.RootDir + } + destPath, err := util.DestinationFilepath(src, dest, cwd) + if err != nil { + return err + } + if fi.IsDir() { + if !filepath.IsAbs(dest) { + // we need to add '/' to the end to indicate the destination is a directory + dest = filepath.Join(cwd, dest) + "/" + } + if err := util.CopyDir(fullPath, dest); err != nil { return err } - destPath, err := util.DestinationFilepath(file, src, dest, config.WorkingDir, c.buildcontext) + copiedFiles, err := util.Files(dest) if err != nil { return err } - // If source file is a directory, we want to create a directory ... - if fi.IsDir() { - logrus.Infof("Creating directory %s", destPath) - if err := os.MkdirAll(destPath, fi.Mode()); err != nil { - return err - } - } else if fi.Mode()&os.ModeSymlink != 0 { - // If file is a symlink, we want to create the same relative symlink - link, err := os.Readlink(filepath.Join(c.buildcontext, file)) - if err != nil { - return err - } - linkDst := filepath.Join(destPath, link) - if err := os.Symlink(linkDst, destPath); err != nil { - logrus.Errorf("unable to symlink %s to %s", linkDst, destPath) - return err - } - } else { - // ... Else, we want to copy over a file - logrus.Infof("Copying file %s to %s", file, destPath) - srcFile, err := os.Open(filepath.Join(c.buildcontext, file)) - if err != nil { - return err - } - defer srcFile.Close() - if err := util.CreateFile(destPath, srcFile, fi.Mode()); err != nil { - return err - } + c.snapshotFiles = append(c.snapshotFiles, copiedFiles...) + } else if fi.Mode()&os.ModeSymlink != 0 { + // If file is a symlink, we want to create the same relative symlink + if err := util.CopySymlink(fullPath, destPath); err != nil { + return err + } + c.snapshotFiles = append(c.snapshotFiles, destPath) + } else { + // ... Else, we want to copy over a file + if err := util.CopyFile(fullPath, destPath); err != nil { + return err } - // Append the destination file to the list of files that should be snapshotted later c.snapshotFiles = append(c.snapshotFiles, destPath) } } diff --git a/pkg/commands/entrypoint.go b/pkg/commands/entrypoint.go index a7fd5f3ed3..f4c417ca8f 100644 --- a/pkg/commands/entrypoint.go +++ b/pkg/commands/entrypoint.go @@ -17,10 +17,12 @@ limitations under the License. package commands import ( - "github.com/containers/image/manifest" + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" + "strings" + "github.com/docker/docker/builder/dockerfile/instructions" + "github.com/google/go-containerregistry/v1" "github.com/sirupsen/logrus" - "strings" ) type EntrypointCommand struct { @@ -28,13 +30,18 @@ type EntrypointCommand struct { } // ExecuteCommand handles command processing similar to CMD and RUN, -func (e *EntrypointCommand) ExecuteCommand(config *manifest.Schema2Config) error { +func (e *EntrypointCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { logrus.Info("cmd: ENTRYPOINT") var newCommand []string if e.cmd.PrependShell { // This is the default shell on Linux - // TODO: Support shell command here - shell := []string{"/bin/sh", "-c"} + var shell []string + if len(config.Shell) > 0 { + shell = config.Shell + } else { + shell = append(shell, "/bin/sh", "-c") + } + newCommand = append(shell, strings.Join(e.cmd.CmdLine, " ")) } else { newCommand = e.cmd.CmdLine diff --git a/pkg/commands/entrypoint_test.go b/pkg/commands/entrypoint_test.go index ae24e8fea2..326bbb0ae9 100644 --- a/pkg/commands/entrypoint_test.go +++ b/pkg/commands/entrypoint_test.go @@ -19,31 +19,30 @@ import ( "testing" "github.com/GoogleContainerTools/kaniko/testutil" - "github.com/containers/image/manifest" - "github.com/containers/image/pkg/strslice" "github.com/docker/docker/builder/dockerfile/instructions" + "github.com/google/go-containerregistry/v1" ) var entrypointTests = []struct { prependShell bool cmdLine []string - expectedCmd strslice.StrSlice + expectedCmd []string }{ { prependShell: true, cmdLine: []string{"echo", "cmd1"}, - expectedCmd: strslice.StrSlice{"/bin/sh", "-c", "echo cmd1"}, + expectedCmd: []string{"/bin/sh", "-c", "echo cmd1"}, }, { prependShell: false, cmdLine: []string{"echo", "cmd2"}, - expectedCmd: strslice.StrSlice{"echo", "cmd2"}, + expectedCmd: []string{"echo", "cmd2"}, }, } func TestEntrypointExecuteCmd(t *testing.T) { - cfg := &manifest.Schema2Config{ + cfg := &v1.Config{ Cmd: nil, } @@ -56,7 +55,7 @@ func TestEntrypointExecuteCmd(t *testing.T) { }, }, } - err := cmd.ExecuteCommand(cfg) + err := cmd.ExecuteCommand(cfg, nil) testutil.CheckErrorAndDeepEqual(t, false, err, test.expectedCmd, cfg.Entrypoint) } } diff --git a/pkg/commands/env.go b/pkg/commands/env.go index acb379e23f..9578b60241 100644 --- a/pkg/commands/env.go +++ b/pkg/commands/env.go @@ -17,11 +17,12 @@ limitations under the License. package commands import ( + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" "strings" "github.com/GoogleContainerTools/kaniko/pkg/util" - "github.com/containers/image/manifest" "github.com/docker/docker/builder/dockerfile/instructions" + "github.com/google/go-containerregistry/v1" "github.com/sirupsen/logrus" ) @@ -29,59 +30,11 @@ type EnvCommand struct { cmd *instructions.EnvCommand } -func (e *EnvCommand) ExecuteCommand(config *manifest.Schema2Config) error { +func (e *EnvCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { logrus.Info("cmd: ENV") newEnvs := e.cmd.Env - for index, pair := range newEnvs { - expandedKey, err := util.ResolveEnvironmentReplacement(pair.Key, config.Env, false) - if err != nil { - return err - } - expandedValue, err := util.ResolveEnvironmentReplacement(pair.Value, config.Env, false) - if err != nil { - return err - } - newEnvs[index] = instructions.KeyValuePair{ - Key: expandedKey, - Value: expandedValue, - } - } - return updateConfigEnv(newEnvs, config) -} - -func updateConfigEnv(newEnvs []instructions.KeyValuePair, config *manifest.Schema2Config) error { - // First, convert config.Env array to []instruction.KeyValuePair - var kvps []instructions.KeyValuePair - for _, env := range config.Env { - entry := strings.Split(env, "=") - kvps = append(kvps, instructions.KeyValuePair{ - Key: entry[0], - Value: entry[1], - }) - } - // Iterate through new environment variables, and replace existing keys - // We can't use a map because we need to preserve the order of the environment variables -Loop: - for _, newEnv := range newEnvs { - for index, kvp := range kvps { - // If key exists, replace the KeyValuePair... - if kvp.Key == newEnv.Key { - logrus.Debugf("Replacing environment variable %v with %v in config", kvp, newEnv) - kvps[index] = newEnv - continue Loop - } - } - // ... Else, append it as a new env variable - kvps = append(kvps, newEnv) - } - // Convert back to array and set in config - envArray := []string{} - for _, kvp := range kvps { - entry := kvp.Key + "=" + kvp.Value - envArray = append(envArray, entry) - } - config.Env = envArray - return nil + replacementEnvs := buildArgs.ReplacementEnvs(config.Env) + return util.UpdateConfigEnv(newEnvs, config, replacementEnvs) } // We know that no files have changed, so return an empty array diff --git a/pkg/commands/env_test.go b/pkg/commands/env_test.go index 346d57e434..16d7b33f70 100644 --- a/pkg/commands/env_test.go +++ b/pkg/commands/env_test.go @@ -16,46 +16,15 @@ limitations under the License. package commands import ( - "testing" - + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" "github.com/GoogleContainerTools/kaniko/testutil" - "github.com/containers/image/manifest" "github.com/docker/docker/builder/dockerfile/instructions" + "github.com/google/go-containerregistry/v1" + "testing" ) -func TestUpdateEnvConfig(t *testing.T) { - cfg := &manifest.Schema2Config{ - Env: []string{ - "PATH=/path/to/dir", - "hey=hey", - }, - } - - newEnvs := []instructions.KeyValuePair{ - { - Key: "foo", - Value: "foo2", - }, - { - Key: "PATH", - Value: "/new/path/", - }, - { - Key: "foo", - Value: "newfoo", - }, - } - - expectedEnvArray := []string{ - "PATH=/new/path/", - "hey=hey", - "foo=newfoo", - } - updateConfigEnv(newEnvs, cfg) - testutil.CheckErrorAndDeepEqual(t, false, nil, expectedEnvArray, cfg.Env) -} func Test_EnvExecute(t *testing.T) { - cfg := &manifest.Schema2Config{ + cfg := &v1.Config{ Env: []string{ "path=/usr/", "home=/root", @@ -77,6 +46,10 @@ func Test_EnvExecute(t *testing.T) { Key: "$path", Value: "$home/", }, + { + Key: "$buildArg1", + Value: "$buildArg2", + }, }, }, } @@ -86,7 +59,20 @@ func Test_EnvExecute(t *testing.T) { "home=/root", "HOME=/root", "/usr/=/root/", + "foo=foo2", } - err := envCmd.ExecuteCommand(cfg) + buildArgs := setUpBuildArgs() + err := envCmd.ExecuteCommand(cfg, buildArgs) testutil.CheckErrorAndDeepEqual(t, false, err, expectedEnvs, cfg.Env) } + +func setUpBuildArgs() *dockerfile.BuildArgs { + buildArgs := dockerfile.NewBuildArgs([]string{ + "buildArg1=foo", + "buildArg2=foo2", + }) + buildArgs.AddArg("buildArg1", nil) + d := "default" + buildArgs.AddArg("buildArg2", &d) + return buildArgs +} diff --git a/pkg/commands/expose.go b/pkg/commands/expose.go index 2dca6a5a55..64c32f13ae 100644 --- a/pkg/commands/expose.go +++ b/pkg/commands/expose.go @@ -18,11 +18,12 @@ package commands import ( "fmt" + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" "strings" "github.com/GoogleContainerTools/kaniko/pkg/util" - "github.com/containers/image/manifest" "github.com/docker/docker/builder/dockerfile/instructions" + "github.com/google/go-containerregistry/v1" "github.com/sirupsen/logrus" ) @@ -30,17 +31,18 @@ type ExposeCommand struct { cmd *instructions.ExposeCommand } -func (r *ExposeCommand) ExecuteCommand(config *manifest.Schema2Config) error { +func (r *ExposeCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { logrus.Info("cmd: EXPOSE") // Grab the currently exposed ports existingPorts := config.ExposedPorts if existingPorts == nil { - existingPorts = make(map[manifest.Schema2Port]struct{}) + existingPorts = make(map[string]struct{}) } + replacementEnvs := buildArgs.ReplacementEnvs(config.Env) // Add any new ones in for _, p := range r.cmd.Ports { // Resolve any environment variables - p, err := util.ResolveEnvironmentReplacement(p, config.Env, false) + p, err := util.ResolveEnvironmentReplacement(p, replacementEnvs, false) if err != nil { return err } @@ -53,8 +55,7 @@ func (r *ExposeCommand) ExecuteCommand(config *manifest.Schema2Config) error { return fmt.Errorf("Invalid protocol: %s", protocol) } logrus.Infof("Adding exposed port: %s", p) - var x struct{} - existingPorts[manifest.Schema2Port(p)] = x + existingPorts[p] = struct{}{} } config.ExposedPorts = existingPorts return nil diff --git a/pkg/commands/expose_test.go b/pkg/commands/expose_test.go index 8bf8184f9f..8e64d47f7c 100644 --- a/pkg/commands/expose_test.go +++ b/pkg/commands/expose_test.go @@ -17,16 +17,17 @@ limitations under the License. package commands import ( + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" "testing" "github.com/GoogleContainerTools/kaniko/testutil" - "github.com/containers/image/manifest" "github.com/docker/docker/builder/dockerfile/instructions" + "github.com/google/go-containerregistry/v1" ) func TestUpdateExposedPorts(t *testing.T) { - cfg := &manifest.Schema2Config{ - ExposedPorts: manifest.Schema2PortSet{ + cfg := &v1.Config{ + ExposedPorts: map[string]struct{}{ "8080/tcp": {}, }, Env: []string{ @@ -51,7 +52,7 @@ func TestUpdateExposedPorts(t *testing.T) { }, } - expectedPorts := manifest.Schema2PortSet{ + expectedPorts := map[string]struct{}{ "8080/tcp": {}, "8081/tcp": {}, "8082/tcp": {}, @@ -60,14 +61,14 @@ func TestUpdateExposedPorts(t *testing.T) { "8085/tcp": {}, "8085/udp": {}, } - - err := exposeCmd.ExecuteCommand(cfg) + buildArgs := dockerfile.NewBuildArgs([]string{}) + err := exposeCmd.ExecuteCommand(cfg, buildArgs) testutil.CheckErrorAndDeepEqual(t, false, err, expectedPorts, cfg.ExposedPorts) } func TestInvalidProtocol(t *testing.T) { - cfg := &manifest.Schema2Config{ - ExposedPorts: manifest.Schema2PortSet{}, + cfg := &v1.Config{ + ExposedPorts: map[string]struct{}{}, } ports := []string{ @@ -79,7 +80,7 @@ func TestInvalidProtocol(t *testing.T) { Ports: ports, }, } - - err := exposeCmd.ExecuteCommand(cfg) + buildArgs := dockerfile.NewBuildArgs([]string{}) + err := exposeCmd.ExecuteCommand(cfg, buildArgs) testutil.CheckErrorAndDeepEqual(t, true, err, nil, nil) } diff --git a/pkg/commands/healthcheck.go b/pkg/commands/healthcheck.go new file mode 100644 index 0000000000..e5f4924a3b --- /dev/null +++ b/pkg/commands/healthcheck.go @@ -0,0 +1,52 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package commands + +import ( + "strings" + + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" + "github.com/docker/docker/builder/dockerfile/instructions" + "github.com/google/go-containerregistry/v1" + "github.com/sirupsen/logrus" +) + +type HealthCheckCommand struct { + cmd *instructions.HealthCheckCommand +} + +// ExecuteCommand handles command processing similar to CMD and RUN, +func (h *HealthCheckCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { + logrus.Info("cmd: HEALTHCHECK") + + check := v1.HealthConfig(*h.cmd.Health) + config.Healthcheck = &check + + return nil +} + +// FilesToSnapshot returns an empty array since this is a metadata command +func (h *HealthCheckCommand) FilesToSnapshot() []string { + return []string{} +} + +// CreatedBy returns some information about the command for the image config history +func (h *HealthCheckCommand) CreatedBy() string { + entrypoint := []string{"HEALTHCHECK"} + + return strings.Join(append(entrypoint, strings.Join(h.cmd.Health.Test, " ")), " ") +} diff --git a/pkg/commands/label.go b/pkg/commands/label.go index 60577403ab..5b476226c5 100644 --- a/pkg/commands/label.go +++ b/pkg/commands/label.go @@ -17,11 +17,12 @@ limitations under the License. package commands import ( + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" "strings" "github.com/GoogleContainerTools/kaniko/pkg/util" - "github.com/containers/image/manifest" "github.com/docker/docker/builder/dockerfile/instructions" + "github.com/google/go-containerregistry/v1" "github.com/sirupsen/logrus" ) @@ -29,24 +30,29 @@ type LabelCommand struct { cmd *instructions.LabelCommand } -func (r *LabelCommand) ExecuteCommand(config *manifest.Schema2Config) error { +func (r *LabelCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { logrus.Info("cmd: LABEL") - return updateLabels(r.cmd.Labels, config) + return updateLabels(r.cmd.Labels, config, buildArgs) } -func updateLabels(labels []instructions.KeyValuePair, config *manifest.Schema2Config) error { +func updateLabels(labels []instructions.KeyValuePair, config *v1.Config, buildArgs *dockerfile.BuildArgs) error { existingLabels := config.Labels if existingLabels == nil { existingLabels = make(map[string]string) } // Let's unescape values before setting the label + replacementEnvs := buildArgs.ReplacementEnvs(config.Env) for index, kvp := range labels { - unescaped, err := util.ResolveEnvironmentReplacement(kvp.Value, []string{}, false) + key, err := util.ResolveEnvironmentReplacement(kvp.Key, replacementEnvs, false) + if err != nil { + return err + } + unescaped, err := util.ResolveEnvironmentReplacement(kvp.Value, replacementEnvs, false) if err != nil { return err } labels[index] = instructions.KeyValuePair{ - Key: kvp.Key, + Key: key, Value: unescaped, } } diff --git a/pkg/commands/label_test.go b/pkg/commands/label_test.go index fd91cc5703..4b6d8fff1e 100644 --- a/pkg/commands/label_test.go +++ b/pkg/commands/label_test.go @@ -17,15 +17,15 @@ limitations under the License. package commands import ( - "testing" - + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" "github.com/GoogleContainerTools/kaniko/testutil" - "github.com/containers/image/manifest" "github.com/docker/docker/builder/dockerfile/instructions" + "github.com/google/go-containerregistry/v1" + "testing" ) func TestUpdateLabels(t *testing.T) { - cfg := &manifest.Schema2Config{ + cfg := &v1.Config{ Labels: map[string]string{ "foo": "bar", }, @@ -48,14 +48,25 @@ func TestUpdateLabels(t *testing.T) { Key: "backslashes", Value: "lots\\\\ of\\\\ words", }, + { + Key: "$label", + Value: "foo", + }, + } + + arguments := []string{ + "label=build_arg_label", } + buildArgs := dockerfile.NewBuildArgs(arguments) + buildArgs.AddArg("label", nil) expectedLabels := map[string]string{ - "foo": "override", - "bar": "baz", - "multiword": "lots of words", - "backslashes": "lots\\ of\\ words", + "foo": "override", + "bar": "baz", + "multiword": "lots of words", + "backslashes": "lots\\ of\\ words", + "build_arg_label": "foo", } - updateLabels(labels, cfg) + updateLabels(labels, cfg, buildArgs) testutil.CheckErrorAndDeepEqual(t, false, nil, expectedLabels, cfg.Labels) } diff --git a/pkg/commands/onbuild.go b/pkg/commands/onbuild.go index d0e1167dea..b78f32a843 100644 --- a/pkg/commands/onbuild.go +++ b/pkg/commands/onbuild.go @@ -17,9 +17,10 @@ limitations under the License. package commands import ( + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" "github.com/GoogleContainerTools/kaniko/pkg/util" - "github.com/containers/image/manifest" "github.com/docker/docker/builder/dockerfile/instructions" + "github.com/google/go-containerregistry/v1" "github.com/sirupsen/logrus" ) @@ -28,10 +29,11 @@ type OnBuildCommand struct { } //ExecuteCommand adds the specified expression in Onbuild to the config -func (o *OnBuildCommand) ExecuteCommand(config *manifest.Schema2Config) error { +func (o *OnBuildCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { logrus.Info("cmd: ONBUILD") logrus.Infof("args: %s", o.cmd.Expression) - resolvedExpression, err := util.ResolveEnvironmentReplacement(o.cmd.Expression, config.Env, false) + replacementEnvs := buildArgs.ReplacementEnvs(config.Env) + resolvedExpression, err := util.ResolveEnvironmentReplacement(o.cmd.Expression, replacementEnvs, false) if err != nil { return err } diff --git a/pkg/commands/onbuild_test.go b/pkg/commands/onbuild_test.go index a67450960d..44f304184b 100644 --- a/pkg/commands/onbuild_test.go +++ b/pkg/commands/onbuild_test.go @@ -17,11 +17,12 @@ limitations under the License. package commands import ( + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" "testing" "github.com/GoogleContainerTools/kaniko/testutil" - "github.com/containers/image/manifest" "github.com/docker/docker/builder/dockerfile/instructions" + "github.com/google/go-containerregistry/v1" ) var onbuildTests = []struct { @@ -50,7 +51,7 @@ var onbuildTests = []struct { func TestExecuteOnbuild(t *testing.T) { for _, test := range onbuildTests { - cfg := &manifest.Schema2Config{ + cfg := &v1.Config{ Env: []string{ "dir=/some/dir", }, @@ -62,8 +63,8 @@ func TestExecuteOnbuild(t *testing.T) { Expression: test.expression, }, } - - err := onbuildCmd.ExecuteCommand(cfg) + buildArgs := dockerfile.NewBuildArgs([]string{}) + err := onbuildCmd.ExecuteCommand(cfg, buildArgs) testutil.CheckErrorAndDeepEqual(t, false, err, test.expectedArray, cfg.OnBuild) } diff --git a/pkg/commands/run.go b/pkg/commands/run.go index 699b23dfed..612e5fa9b8 100644 --- a/pkg/commands/run.go +++ b/pkg/commands/run.go @@ -17,27 +17,32 @@ limitations under the License. package commands import ( + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" + "github.com/docker/docker/builder/dockerfile/instructions" + "github.com/google/go-containerregistry/v1" + "github.com/sirupsen/logrus" "os" "os/exec" "strconv" "strings" "syscall" - - "github.com/containers/image/manifest" - "github.com/docker/docker/builder/dockerfile/instructions" - "github.com/sirupsen/logrus" ) type RunCommand struct { cmd *instructions.RunCommand } -func (r *RunCommand) ExecuteCommand(config *manifest.Schema2Config) error { +func (r *RunCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { var newCommand []string if r.cmd.PrependShell { // This is the default shell on Linux - // TODO: Support shell command here - shell := []string{"/bin/sh", "-c"} + var shell []string + if len(config.Shell) > 0 { + shell = config.Shell + } else { + shell = append(shell, "/bin/sh", "-c") + } + newCommand = append(shell, strings.Join(r.cmd.CmdLine, " ")) } else { newCommand = r.cmd.CmdLine @@ -49,7 +54,8 @@ func (r *RunCommand) ExecuteCommand(config *manifest.Schema2Config) error { cmd := exec.Command(newCommand[0], newCommand[1:]...) cmd.Dir = config.WorkingDir cmd.Stdout = os.Stdout - cmd.Env = config.Env + replacementEnvs := buildArgs.ReplacementEnvs(config.Env) + cmd.Env = replacementEnvs // If specified, run the command as a specific user if config.User != "" { diff --git a/pkg/commands/shell.go b/pkg/commands/shell.go new file mode 100644 index 0000000000..93c37716ea --- /dev/null +++ b/pkg/commands/shell.go @@ -0,0 +1,54 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package commands + +import ( + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" + "github.com/docker/docker/builder/dockerfile/instructions" + "github.com/google/go-containerregistry/v1" + "github.com/sirupsen/logrus" + "strings" +) + +type ShellCommand struct { + cmd *instructions.ShellCommand +} + +// ExecuteCommand handles command processing similar to CMD and RUN, +func (s *ShellCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { + logrus.Info("cmd: SHELL") + var newShell []string + + newShell = s.cmd.Shell + + logrus.Infof("Replacing Shell in config with %v", newShell) + config.Shell = newShell + return nil +} + +// FilesToSnapshot returns an empty array since this is a metadata command +func (s *ShellCommand) FilesToSnapshot() []string { + return []string{} +} + +// CreatedBy returns some information about the command for the image config history +func (s *ShellCommand) CreatedBy() string { + entrypoint := []string{"SHELL"} + cmdLine := strings.Join(s.cmd.Shell, " ") + + return strings.Join(append(entrypoint, cmdLine), " ") +} diff --git a/pkg/commands/shell_test.go b/pkg/commands/shell_test.go new file mode 100644 index 0000000000..05d977553e --- /dev/null +++ b/pkg/commands/shell_test.go @@ -0,0 +1,55 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package commands + +import ( + "testing" + + "github.com/GoogleContainerTools/kaniko/testutil" + "github.com/docker/docker/builder/dockerfile/instructions" + "github.com/google/go-containerregistry/v1" +) + +var shellTests = []struct { + cmdLine []string + expectedShell []string +}{ + { + cmdLine: []string{"/bin/bash", "-c"}, + expectedShell: []string{"/bin/bash", "-c"}, + }, + { + cmdLine: []string{"/bin/bash"}, + expectedShell: []string{"/bin/bash"}, + }, +} + +func TestShellExecuteCmd(t *testing.T) { + + cfg := &v1.Config{ + Shell: nil, + } + + for _, test := range shellTests { + cmd := ShellCommand{ + &instructions.ShellCommand{ + Shell: test.cmdLine, + }, + } + err := cmd.ExecuteCommand(cfg, nil) + testutil.CheckErrorAndDeepEqual(t, false, err, test.expectedShell, cfg.Shell) + } +} diff --git a/pkg/commands/stopsignal.go b/pkg/commands/stopsignal.go new file mode 100644 index 0000000000..03182c8c6c --- /dev/null +++ b/pkg/commands/stopsignal.go @@ -0,0 +1,66 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package commands + +import ( + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" + "github.com/GoogleContainerTools/kaniko/pkg/util" + "github.com/docker/docker/builder/dockerfile/instructions" + "github.com/docker/docker/pkg/signal" + "github.com/google/go-containerregistry/v1" + "github.com/sirupsen/logrus" + "strings" +) + +type StopSignalCommand struct { + cmd *instructions.StopSignalCommand +} + +// ExecuteCommand handles command processing similar to CMD and RUN, +func (s *StopSignalCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { + logrus.Info("cmd: STOPSIGNAL") + + // resolve possible environment variables + replacementEnvs := buildArgs.ReplacementEnvs(config.Env) + resolvedEnvs, err := util.ResolveEnvironmentReplacementList([]string{s.cmd.Signal}, replacementEnvs, false) + if err != nil { + return err + } + stopsignal := resolvedEnvs[0] + + // validate stopsignal + _, err = signal.ParseSignal(stopsignal) + if err != nil { + return err + } + + logrus.Infof("Replacing StopSignal in config with %v", stopsignal) + config.StopSignal = stopsignal + return nil +} + +// FilesToSnapshot returns an empty array since this is a metadata command +func (s *StopSignalCommand) FilesToSnapshot() []string { + return []string{} +} + +// CreatedBy returns some information about the command for the image config history +func (s *StopSignalCommand) CreatedBy() string { + entrypoint := []string{"STOPSIGNAL"} + + return strings.Join(append(entrypoint, s.cmd.Signal), " ") +} diff --git a/pkg/commands/stopsignal_test.go b/pkg/commands/stopsignal_test.go new file mode 100644 index 0000000000..cab40077d1 --- /dev/null +++ b/pkg/commands/stopsignal_test.go @@ -0,0 +1,61 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package commands + +import ( + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" + "github.com/GoogleContainerTools/kaniko/testutil" + "github.com/docker/docker/builder/dockerfile/instructions" + "github.com/google/go-containerregistry/v1" + "testing" +) + +var stopsignalTests = []struct { + signal string + expectedSignal string +}{ + { + signal: "SIGKILL", + expectedSignal: "SIGKILL", + }, + { + signal: "${STOPSIG}", + expectedSignal: "SIGKILL", + }, + { + signal: "1", + expectedSignal: "1", + }, +} + +func TestStopsignalExecuteCmd(t *testing.T) { + + cfg := &v1.Config{ + StopSignal: "", + Env: []string{"STOPSIG=SIGKILL"}, + } + + for _, test := range stopsignalTests { + cmd := StopSignalCommand{ + &instructions.StopSignalCommand{ + Signal: test.signal, + }, + } + b := dockerfile.NewBuildArgs([]string{}) + err := cmd.ExecuteCommand(cfg, b) + testutil.CheckErrorAndDeepEqual(t, false, err, test.expectedSignal, cfg.StopSignal) + } +} diff --git a/pkg/commands/user.go b/pkg/commands/user.go index 5ca5687dc3..c40b6b9211 100644 --- a/pkg/commands/user.go +++ b/pkg/commands/user.go @@ -17,30 +17,31 @@ limitations under the License. package commands import ( - "os/user" - "strings" - + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" "github.com/GoogleContainerTools/kaniko/pkg/util" - "github.com/containers/image/manifest" "github.com/docker/docker/builder/dockerfile/instructions" + "github.com/google/go-containerregistry/v1" "github.com/sirupsen/logrus" + "os/user" + "strings" ) type UserCommand struct { cmd *instructions.UserCommand } -func (r *UserCommand) ExecuteCommand(config *manifest.Schema2Config) error { +func (r *UserCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { logrus.Info("cmd: USER") u := r.cmd.User userAndGroup := strings.Split(u, ":") - userStr, err := util.ResolveEnvironmentReplacement(userAndGroup[0], config.Env, false) + replacementEnvs := buildArgs.ReplacementEnvs(config.Env) + userStr, err := util.ResolveEnvironmentReplacement(userAndGroup[0], replacementEnvs, false) if err != nil { return err } var groupStr string if len(userAndGroup) > 1 { - groupStr, err = util.ResolveEnvironmentReplacement(userAndGroup[1], config.Env, false) + groupStr, err = util.ResolveEnvironmentReplacement(userAndGroup[1], replacementEnvs, false) if err != nil { return err } diff --git a/pkg/commands/user_test.go b/pkg/commands/user_test.go index 27f19ba54d..f4e1759f38 100644 --- a/pkg/commands/user_test.go +++ b/pkg/commands/user_test.go @@ -16,11 +16,12 @@ limitations under the License. package commands import ( + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" "testing" "github.com/GoogleContainerTools/kaniko/testutil" - "github.com/containers/image/manifest" "github.com/docker/docker/builder/dockerfile/instructions" + "github.com/google/go-containerregistry/v1" ) var userTests = []struct { @@ -82,7 +83,7 @@ var userTests = []struct { func TestUpdateUser(t *testing.T) { for _, test := range userTests { - cfg := &manifest.Schema2Config{ + cfg := &v1.Config{ Env: []string{ "envuser=root", "envgroup=root", @@ -93,7 +94,8 @@ func TestUpdateUser(t *testing.T) { User: test.user, }, } - err := cmd.ExecuteCommand(cfg) + buildArgs := dockerfile.NewBuildArgs([]string{}) + err := cmd.ExecuteCommand(cfg, buildArgs) testutil.CheckErrorAndDeepEqual(t, test.shouldError, err, test.expectedUid, cfg.User) } } diff --git a/pkg/commands/volume.go b/pkg/commands/volume.go index 31fb476a92..bce7fb0460 100644 --- a/pkg/commands/volume.go +++ b/pkg/commands/volume.go @@ -17,12 +17,13 @@ limitations under the License. package commands import ( + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" "os" "strings" "github.com/GoogleContainerTools/kaniko/pkg/util" - "github.com/containers/image/manifest" "github.com/docker/docker/builder/dockerfile/instructions" + "github.com/google/go-containerregistry/v1" "github.com/sirupsen/logrus" ) @@ -31,10 +32,11 @@ type VolumeCommand struct { snapshotFiles []string } -func (v *VolumeCommand) ExecuteCommand(config *manifest.Schema2Config) error { +func (v *VolumeCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { logrus.Info("cmd: VOLUME") volumes := v.cmd.Volumes - resolvedVolumes, err := util.ResolveEnvironmentReplacementList(volumes, config.Env, true) + replacementEnvs := buildArgs.ReplacementEnvs(config.Env) + resolvedVolumes, err := util.ResolveEnvironmentReplacementList(volumes, replacementEnvs, true) if err != nil { return err } diff --git a/pkg/commands/volume_test.go b/pkg/commands/volume_test.go index 07b6b1d75e..9045a531b0 100644 --- a/pkg/commands/volume_test.go +++ b/pkg/commands/volume_test.go @@ -16,15 +16,16 @@ limitations under the License. package commands import ( + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" "testing" "github.com/GoogleContainerTools/kaniko/testutil" - "github.com/containers/image/manifest" "github.com/docker/docker/builder/dockerfile/instructions" + "github.com/google/go-containerregistry/v1" ) func TestUpdateVolume(t *testing.T) { - cfg := &manifest.Schema2Config{ + cfg := &v1.Config{ Env: []string{ "VOLUME=/etc", }, @@ -49,7 +50,7 @@ func TestUpdateVolume(t *testing.T) { "/var/lib": {}, "/etc": {}, } - - err := volumeCmd.ExecuteCommand(cfg) + buildArgs := dockerfile.NewBuildArgs([]string{}) + err := volumeCmd.ExecuteCommand(cfg, buildArgs) testutil.CheckErrorAndDeepEqual(t, false, err, expectedVolumes, cfg.Volumes) } diff --git a/pkg/commands/workdir.go b/pkg/commands/workdir.go index 8972efecb7..7c52720a89 100644 --- a/pkg/commands/workdir.go +++ b/pkg/commands/workdir.go @@ -17,12 +17,13 @@ limitations under the License. package commands import ( + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" "os" "path/filepath" "github.com/GoogleContainerTools/kaniko/pkg/util" - "github.com/containers/image/manifest" "github.com/docker/docker/builder/dockerfile/instructions" + "github.com/google/go-containerregistry/v1" "github.com/sirupsen/logrus" ) @@ -31,10 +32,11 @@ type WorkdirCommand struct { snapshotFiles []string } -func (w *WorkdirCommand) ExecuteCommand(config *manifest.Schema2Config) error { +func (w *WorkdirCommand) ExecuteCommand(config *v1.Config, buildArgs *dockerfile.BuildArgs) error { logrus.Info("cmd: workdir") workdirPath := w.cmd.Path - resolvedWorkingDir, err := util.ResolveEnvironmentReplacement(workdirPath, config.Env, true) + replacementEnvs := buildArgs.ReplacementEnvs(config.Env) + resolvedWorkingDir, err := util.ResolveEnvironmentReplacement(workdirPath, replacementEnvs, true) if err != nil { return err } diff --git a/pkg/commands/workdir_test.go b/pkg/commands/workdir_test.go index 67187c2aed..26fe8f578b 100644 --- a/pkg/commands/workdir_test.go +++ b/pkg/commands/workdir_test.go @@ -16,11 +16,12 @@ limitations under the License. package commands import ( + "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" "testing" "github.com/GoogleContainerTools/kaniko/testutil" - "github.com/containers/image/manifest" "github.com/docker/docker/builder/dockerfile/instructions" + "github.com/google/go-containerregistry/v1" ) // Each test here changes the same WorkingDir field in the config @@ -63,7 +64,7 @@ var workdirTests = []struct { func TestWorkdirCommand(t *testing.T) { - cfg := &manifest.Schema2Config{ + cfg := &v1.Config{ WorkingDir: "/", Env: []string{ "path=usr/", @@ -78,7 +79,8 @@ func TestWorkdirCommand(t *testing.T) { }, snapshotFiles: []string{}, } - cmd.ExecuteCommand(cfg) + buildArgs := dockerfile.NewBuildArgs([]string{}) + cmd.ExecuteCommand(cfg, buildArgs) testutil.CheckErrorAndDeepEqual(t, false, nil, test.expectedPath, cfg.WorkingDir) } } diff --git a/pkg/dockerfile/buildargs.go b/pkg/dockerfile/buildargs.go new file mode 100644 index 0000000000..d948025e1b --- /dev/null +++ b/pkg/dockerfile/buildargs.go @@ -0,0 +1,54 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dockerfile + +import ( + d "github.com/docker/docker/builder/dockerfile" + "strings" +) + +type BuildArgs struct { + d.BuildArgs +} + +func NewBuildArgs(args []string) *BuildArgs { + argsFromOptions := make(map[string]*string) + for _, a := range args { + s := strings.Split(a, "=") + if len(s) == 1 { + argsFromOptions[s[0]] = nil + } else { + argsFromOptions[s[0]] = &s[1] + } + } + return &BuildArgs{ + *d.NewBuildArgs(argsFromOptions), + } +} + +func (b *BuildArgs) Clone() *BuildArgs { + clone := b.BuildArgs.Clone() + return &BuildArgs{ + *clone, + } +} + +// ReplacementEnvs returns a list of filtered environment variables +func (b *BuildArgs) ReplacementEnvs(envs []string) []string { + filtered := b.FilterAllowed(envs) + return append(envs, filtered...) +} diff --git a/pkg/dockerfile/dockerfile.go b/pkg/dockerfile/dockerfile.go index f755a547e8..973120d138 100644 --- a/pkg/dockerfile/dockerfile.go +++ b/pkg/dockerfile/dockerfile.go @@ -18,10 +18,19 @@ package dockerfile import ( "bytes" - "strings" - + "github.com/GoogleContainerTools/kaniko/pkg/constants" + "github.com/GoogleContainerTools/kaniko/pkg/util" "github.com/docker/docker/builder/dockerfile/instructions" "github.com/docker/docker/builder/dockerfile/parser" + "github.com/google/go-containerregistry/authn" + "github.com/google/go-containerregistry/name" + "github.com/google/go-containerregistry/v1" + "github.com/google/go-containerregistry/v1/empty" + "github.com/google/go-containerregistry/v1/remote" + "net/http" + "path/filepath" + "strconv" + "strings" ) // Parse parses the contents of a Dockerfile and returns a list of commands @@ -37,6 +46,28 @@ func Parse(b []byte) ([]instructions.Stage, error) { return stages, err } +// ResolveStages resolves any calls to previous stages with names to indices +// Ex. --from=second_stage should be --from=1 for easier processing later on +func ResolveStages(stages []instructions.Stage) { + nameToIndex := make(map[string]string) + for i, stage := range stages { + index := strconv.Itoa(i) + if stage.Name != index { + nameToIndex[stage.Name] = index + } + for _, cmd := range stage.Commands { + switch c := cmd.(type) { + case *instructions.CopyCommand: + if c.From != "" { + if val, ok := nameToIndex[c.From]; ok { + c.From = val + } + } + } + } + } +} + // ParseCommands parses an array of commands into an array of instructions.Command; used for onbuild func ParseCommands(cmdArray []string) ([]instructions.Command, error) { var cmds []instructions.Command @@ -54,3 +85,69 @@ func ParseCommands(cmdArray []string) ([]instructions.Command, error) { } return cmds, nil } + +// Dependencies returns a list of files in this stage that will be needed in later stages +func Dependencies(index int, stages []instructions.Stage, buildArgs *BuildArgs) ([]string, error) { + var dependencies []string + for stageIndex, stage := range stages { + if stageIndex <= index { + continue + } + var sourceImage v1.Image + if stage.BaseName == constants.NoBaseImage { + sourceImage = empty.Image + } else { + // Initialize source image + ref, err := name.ParseReference(stage.BaseName, name.WeakValidation) + if err != nil { + return nil, err + + } + auth, err := authn.DefaultKeychain.Resolve(ref.Context().Registry) + if err != nil { + return nil, err + } + sourceImage, err = remote.Image(ref, auth, http.DefaultTransport) + if err != nil { + return nil, err + } + } + imageConfig, err := sourceImage.ConfigFile() + if err != nil { + return nil, err + } + for _, cmd := range stage.Commands { + switch c := cmd.(type) { + case *instructions.EnvCommand: + replacementEnvs := buildArgs.ReplacementEnvs(imageConfig.Config.Env) + if err := util.UpdateConfigEnv(c.Env, &imageConfig.Config, replacementEnvs); err != nil { + return nil, err + } + case *instructions.ArgCommand: + buildArgs.AddArg(c.Key, c.Value) + case *instructions.CopyCommand: + if c.From != strconv.Itoa(index) { + continue + } + // First, resolve any environment replacement + replacementEnvs := buildArgs.ReplacementEnvs(imageConfig.Config.Env) + resolvedEnvs, err := util.ResolveEnvironmentReplacementList(c.SourcesAndDest, replacementEnvs, true) + if err != nil { + return nil, err + } + // Resolve wildcards and get a list of resolved sources + srcs, err := util.ResolveSources(resolvedEnvs, constants.RootDir) + if err != nil { + return nil, err + } + for index, src := range srcs { + if !filepath.IsAbs(src) { + srcs[index] = filepath.Join(constants.RootDir, src) + } + } + dependencies = append(dependencies, srcs...) + } + } + } + return dependencies, nil +} diff --git a/pkg/dockerfile/dockerfile_test.go b/pkg/dockerfile/dockerfile_test.go new file mode 100644 index 0000000000..526bd3efd7 --- /dev/null +++ b/pkg/dockerfile/dockerfile_test.go @@ -0,0 +1,136 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dockerfile + +import ( + "fmt" + "github.com/GoogleContainerTools/kaniko/testutil" + "github.com/docker/docker/builder/dockerfile/instructions" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "testing" +) + +func Test_ResolveStages(t *testing.T) { + dockerfile := ` + FROM scratch + RUN echo hi > /hi + + FROM scratch AS second + COPY --from=0 /hi /hi2 + + FROM scratch + COPY --from=second /hi2 /hi3 + ` + stages, err := Parse([]byte(dockerfile)) + if err != nil { + t.Fatal(err) + } + ResolveStages(stages) + for index, stage := range stages { + if index == 0 { + continue + } + copyCmd := stage.Commands[0].(*instructions.CopyCommand) + expectedStage := strconv.Itoa(index - 1) + if copyCmd.From != expectedStage { + t.Fatalf("unexpected copy command: %s resolved to stage %s, expected %s", copyCmd.String(), copyCmd.From, expectedStage) + } + } +} + +func Test_Dependencies(t *testing.T) { + testDir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatal(err) + } + helloPath := filepath.Join(testDir, "hello") + if err := os.Mkdir(helloPath, 0755); err != nil { + t.Fatal(err) + } + + dockerfile := fmt.Sprintf(` + FROM scratch + COPY %s %s + + FROM scratch AS second + ENV hienv %s + COPY a b + COPY --from=0 /$hienv %s /hi2/ + `, helloPath, helloPath, helloPath, testDir) + + stages, err := Parse([]byte(dockerfile)) + if err != nil { + t.Fatal(err) + } + + expectedDependencies := [][]string{ + { + helloPath, + testDir, + }, + nil, + } + + for index := range stages { + buildArgs := NewBuildArgs([]string{}) + actualDeps, err := Dependencies(index, stages, buildArgs) + testutil.CheckErrorAndDeepEqual(t, false, err, expectedDependencies[index], actualDeps) + } +} + +func Test_DependenciesWithArg(t *testing.T) { + testDir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatal(err) + } + helloPath := filepath.Join(testDir, "hello") + if err := os.Mkdir(helloPath, 0755); err != nil { + t.Fatal(err) + } + + dockerfile := fmt.Sprintf(` + FROM scratch + COPY %s %s + + FROM scratch AS second + ARG hienv + COPY a b + COPY --from=0 /$hienv %s /hi2/ + `, helloPath, helloPath, testDir) + + stages, err := Parse([]byte(dockerfile)) + if err != nil { + t.Fatal(err) + } + + expectedDependencies := [][]string{ + { + helloPath, + testDir, + }, + nil, + } + buildArgs := NewBuildArgs([]string{fmt.Sprintf("hienv=%s", helloPath)}) + + for index := range stages { + actualDeps, err := Dependencies(index, stages, buildArgs) + testutil.CheckErrorAndDeepEqual(t, false, err, expectedDependencies[index], actualDeps) + } +} diff --git a/pkg/executor/executor.go b/pkg/executor/executor.go index ee7c87e452..b3c697859d 100644 --- a/pkg/executor/executor.go +++ b/pkg/executor/executor.go @@ -17,103 +17,215 @@ limitations under the License. package executor import ( + "bytes" "fmt" - "io/ioutil" + "io" + "net/http" "os" + "path/filepath" + "strconv" + + "github.com/GoogleContainerTools/kaniko/pkg/snapshot" + + "github.com/google/go-containerregistry/v1/empty" + + "github.com/google/go-containerregistry/v1/tarball" + + "github.com/google/go-containerregistry/authn" + "github.com/google/go-containerregistry/name" + "github.com/google/go-containerregistry/v1" + "github.com/google/go-containerregistry/v1/mutate" + "github.com/google/go-containerregistry/v1/remote" + + "io/ioutil" "github.com/GoogleContainerTools/kaniko/pkg/commands" "github.com/GoogleContainerTools/kaniko/pkg/constants" "github.com/GoogleContainerTools/kaniko/pkg/dockerfile" - "github.com/GoogleContainerTools/kaniko/pkg/image" - "github.com/GoogleContainerTools/kaniko/pkg/snapshot" "github.com/GoogleContainerTools/kaniko/pkg/util" - "github.com/containers/image/manifest" "github.com/docker/docker/builder/dockerfile/instructions" "github.com/sirupsen/logrus" ) -func DoBuild(dockerfilePath, srcContext, destination, snapshotMode string) error { +func DoBuild(dockerfilePath, srcContext, snapshotMode string, args []string) (name.Reference, v1.Image, error) { // Parse dockerfile and unpack base image to root d, err := ioutil.ReadFile(dockerfilePath) if err != nil { - return err + return nil, nil, err } stages, err := dockerfile.Parse(d) if err != nil { - return err - } - baseImage := stages[0].BaseName - - // Unpack file system to root - logrus.Infof("Unpacking filesystem of %s...", baseImage) - if err := util.ExtractFileSystemFromImage(baseImage); err != nil { - return err + return nil, nil, err } + dockerfile.ResolveStages(stages) hasher, err := getHasher(snapshotMode) if err != nil { - return err - } - l := snapshot.NewLayeredMap(hasher) - snapshotter := snapshot.NewSnapshotter(l, constants.RootDir) - - // Take initial snapshot - if err := snapshotter.Init(); err != nil { - return err - } - - // Initialize source image - sourceImage, err := image.NewSourceImage(baseImage) - if err != nil { - return err + return nil, nil, err } - - // Set environment variables within the image - if err := image.SetEnvVariables(sourceImage); err != nil { - return err - } - - imageConfig := sourceImage.Config() - // Currently only supports single stage builds - for _, stage := range stages { - if err := resolveOnBuild(&stage, imageConfig); err != nil { - return err + for index, stage := range stages { + baseImage := stage.BaseName + finalStage := index == len(stages)-1 + // Unpack file system to root + logrus.Infof("Unpacking filesystem of %s...", baseImage) + var sourceImage v1.Image + var ref name.Reference + if baseImage == constants.NoBaseImage { + logrus.Info("No base image, nothing to extract") + sourceImage = empty.Image + } else { + // Initialize source image + ref, err = name.ParseReference(baseImage, name.WeakValidation) + if err != nil { + return nil, nil, err + } + auth, err := authn.DefaultKeychain.Resolve(ref.Context().Registry) + if err != nil { + return nil, nil, err + } + sourceImage, err = remote.Image(ref, auth, http.DefaultTransport) + if err != nil { + return nil, nil, err + } } + if err := util.GetFSFromImage(sourceImage); err != nil { + return nil, nil, err + } + l := snapshot.NewLayeredMap(hasher) + snapshotter := snapshot.NewSnapshotter(l, constants.RootDir) + // Take initial snapshot + if err := snapshotter.Init(); err != nil { + return nil, nil, err + } + imageConfig, err := sourceImage.ConfigFile() + if err != nil { + return nil, nil, err + } + if err := resolveOnBuild(&stage, &imageConfig.Config); err != nil { + return nil, nil, err + } + buildArgs := dockerfile.NewBuildArgs(args) for _, cmd := range stage.Commands { dockerCommand, err := commands.GetCommand(cmd, srcContext) if err != nil { - return err + return nil, nil, err } if dockerCommand == nil { continue } - if err := dockerCommand.ExecuteCommand(imageConfig); err != nil { - return err + if err := dockerCommand.ExecuteCommand(&imageConfig.Config, buildArgs); err != nil { + return nil, nil, err + } + if !finalStage { + continue } // Now, we get the files to snapshot from this command and take the snapshot snapshotFiles := dockerCommand.FilesToSnapshot() contents, err := snapshotter.TakeSnapshot(snapshotFiles) if err != nil { - return err + return nil, nil, err } util.MoveVolumeWhitelistToWhitelist() if contents == nil { logrus.Info("No files were changed, appending empty layer to config.") - sourceImage.AppendConfigHistory(constants.Author, true) continue } // Append the layer to the image - if err := sourceImage.AppendLayer(contents, constants.Author); err != nil { - return err + opener := func() (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewReader(contents)), nil + } + layer, err := tarball.LayerFromOpener(opener) + if err != nil { + return nil, nil, err + } + sourceImage, err = mutate.Append(sourceImage, + mutate.Addendum{ + Layer: layer, + History: v1.History{ + Author: constants.Author, + CreatedBy: dockerCommand.CreatedBy(), + }, + }, + ) + if err != nil { + return nil, nil, err } } + if finalStage { + sourceImage, err = mutate.Config(sourceImage, imageConfig.Config) + if err != nil { + return nil, nil, err + } + return ref, sourceImage, nil + } + if err := saveStageDependencies(index, stages, buildArgs.Clone()); err != nil { + return nil, nil, err + } + // Delete the filesystem + if err := util.DeleteFilesystem(); err != nil { + return nil, nil, err + } } + return nil, nil, err +} + +func DoPush(ref name.Reference, image v1.Image, destination, tarPath string) error { // Push the image - if err := setDefaultEnv(); err != nil { + destRef, err := name.NewTag(destination, name.WeakValidation) + if err != nil { return err } - return image.PushImage(sourceImage, destination) + + if tarPath != "" { + return tarball.WriteToFile(tarPath, destRef, image, nil) + } + + wo := remote.WriteOptions{} + if ref != nil { + wo.MountPaths = []name.Repository{ref.Context()} + } + pushAuth, err := authn.DefaultKeychain.Resolve(destRef.Context().Registry) + if err != nil { + return err + } + return remote.Write(destRef, image, pushAuth, http.DefaultTransport, wo) +} +func saveStageDependencies(index int, stages []instructions.Stage, buildArgs *dockerfile.BuildArgs) error { + // First, get the files in this stage later stages will need + dependencies, err := dockerfile.Dependencies(index, stages, buildArgs) + logrus.Infof("saving dependencies %s", dependencies) + if err != nil { + return err + } + // Then, create the directory they will exist in + i := strconv.Itoa(index) + dependencyDir := filepath.Join(constants.KanikoDir, i) + if err := os.MkdirAll(dependencyDir, 0755); err != nil { + return err + } + // Now, copy over dependencies to this dir + for _, d := range dependencies { + fi, err := os.Lstat(d) + if err != nil { + return err + } + dest := filepath.Join(dependencyDir, d) + if fi.IsDir() { + if err := util.CopyDir(d, dest); err != nil { + return err + } + } else if fi.Mode()&os.ModeSymlink != 0 { + if err := util.CopySymlink(d, dest); err != nil { + return err + } + } else { + if err := util.CopyFile(d, dest); err != nil { + return err + } + } + } + return nil } func getHasher(snapshotMode string) (func(string) (string, error), error) { @@ -127,7 +239,7 @@ func getHasher(snapshotMode string) (func(string) (string, error), error) { return nil, fmt.Errorf("%s is not a valid snapshot mode", snapshotMode) } -func resolveOnBuild(stage *instructions.Stage, config *manifest.Schema2Config) error { +func resolveOnBuild(stage *instructions.Stage, config *v1.Config) error { if config.OnBuild == nil { return nil } @@ -141,18 +253,3 @@ func resolveOnBuild(stage *instructions.Stage, config *manifest.Schema2Config) e logrus.Infof("Executing %v build triggers", len(cmds)) return nil } - -// setDefaultEnv sets default values for HOME and PATH so that -// config.json and docker-credential-gcr can be accessed -func setDefaultEnv() error { - defaultEnvs := map[string]string{ - "HOME": "/root", - "PATH": "/usr/local/bin/", - } - for key, val := range defaultEnvs { - if err := os.Setenv(key, val); err != nil { - return err - } - } - return nil -} diff --git a/pkg/image/image.go b/pkg/image/image.go index 52035ef693..989decce26 100644 --- a/pkg/image/image.go +++ b/pkg/image/image.go @@ -17,79 +17,27 @@ limitations under the License. package image import ( - "fmt" "os" + "strings" - "github.com/GoogleContainerTools/kaniko/pkg/version" - "github.com/containers/image/types" + "github.com/google/go-containerregistry/v1" - img "github.com/GoogleContainerTools/container-diff/pkg/image" - "github.com/GoogleContainerTools/kaniko/pkg/constants" - "github.com/containers/image/copy" - "github.com/containers/image/docker" - "github.com/containers/image/signature" - "github.com/containers/image/transports/alltransports" "github.com/sirupsen/logrus" ) -// sourceImage is the image that will be modified by the executor - -// NewSourceImage initializes the source image with the base image -func NewSourceImage(srcImg string) (*img.MutableSource, error) { - if srcImg == constants.NoBaseImage { - return img.NewMutableSource(nil) - } - logrus.Infof("Initializing source image %s", srcImg) - ref, err := docker.ParseReference("//" + srcImg) - if err != nil { - return nil, err - } - return img.NewMutableSource(ref) -} - -// PushImage pushes the final image -func PushImage(ms *img.MutableSource, destImg string) error { - srcRef := &img.ProxyReference{ - ImageReference: nil, - Src: ms, - } - destRef, err := alltransports.ParseImageName("docker://" + destImg) - if err != nil { - return err - } - policyContext, err := getPolicyContext() +// SetEnvVariables sets environment variables as specified in the image +func SetEnvVariables(img v1.Image) error { + cfg, err := img.ConfigFile() if err != nil { return err } - logrus.Infof("Pushing image to %s", destImg) - - opts := ©.Options{ - DestinationCtx: &types.SystemContext{ - DockerRegistryUserAgent: fmt.Sprintf("kaniko/executor-%s", version.Version()), - }, - } - return copy.Image(policyContext, destRef, srcRef, opts) -} - -// SetEnvVariables sets environment variables as specified in the image -func SetEnvVariables(ms *img.MutableSource) error { - envVars := ms.Env() - for key, val := range envVars { - if err := os.Setenv(key, val); err != nil { + envVars := cfg.Config.Env + for _, envVar := range envVars { + split := strings.SplitN(envVar, "=", 2) + if err := os.Setenv(split[0], split[1]); err != nil { return err } - logrus.Debugf("Setting environment variable %s=%s", key, val) + logrus.Infof("Setting environment variable %s", envVar) } return nil } - -func getPolicyContext() (*signature.PolicyContext, error) { - policyContext, err := signature.NewPolicyContext(&signature.Policy{ - Default: signature.PolicyRequirements{signature.NewPRInsecureAcceptAnything()}, - }) - if err != nil { - logrus.Debugf("Error retrieving policy context: %s", err) - return nil, err - } - return policyContext, nil -} diff --git a/pkg/snapshot/layered_map.go b/pkg/snapshot/layered_map.go index 608808ac68..e8574e9136 100644 --- a/pkg/snapshot/layered_map.go +++ b/pkg/snapshot/layered_map.go @@ -16,6 +16,11 @@ limitations under the License. package snapshot +import ( + "path/filepath" + "strings" +) + type LayeredMap struct { layers []map[string]string hasher func(string) (string, error) @@ -33,6 +38,21 @@ func (l *LayeredMap) Snapshot() { l.layers = append(l.layers, map[string]string{}) } +func (l *LayeredMap) GetFlattenedPathsForWhiteOut() map[string]struct{} { + paths := map[string]struct{}{} + for _, l := range l.layers { + for p := range l { + if strings.HasPrefix(filepath.Base(p), ".wh.") { + delete(paths, p) + } else { + paths[p] = struct{}{} + } + paths[p] = struct{}{} + } + } + return paths +} + func (l *LayeredMap) Get(s string) (string, bool) { for i := len(l.layers) - 1; i >= 0; i-- { if v, ok := l.layers[i][s]; ok { diff --git a/pkg/snapshot/snapshot.go b/pkg/snapshot/snapshot.go index 1966537fb8..478095afc5 100644 --- a/pkg/snapshot/snapshot.go +++ b/pkg/snapshot/snapshot.go @@ -19,10 +19,8 @@ package snapshot import ( "archive/tar" "bytes" - "github.com/GoogleContainerTools/kaniko/pkg/util" "github.com/sirupsen/logrus" - "io" "io/ioutil" "os" @@ -33,6 +31,7 @@ import ( type Snapshotter struct { l *LayeredMap directory string + hardlinks map[uint64]string } // NewSnapshotter creates a new snapshotter rooted at d @@ -51,85 +50,125 @@ func (s *Snapshotter) Init() error { // TakeSnapshot takes a snapshot of the filesystem, avoiding directories in the whitelist, and creates // a tarball of the changed files. Return contents of the tarball, and whether or not any files were changed func (s *Snapshotter) TakeSnapshot(files []string) ([]byte, error) { - if files != nil { - return s.TakeSnapshotOfFiles(files) - } - logrus.Info("Taking snapshot of full filesystem...") buf := bytes.NewBuffer([]byte{}) - filesAdded, err := s.snapShotFS(buf) - if err != nil { - return nil, err + var filesAdded bool + var err error + if files == nil { + filesAdded, err = s.snapShotFS(buf) + } else { + filesAdded, err = s.snapshotFiles(buf, files) } - contents, err := ioutil.ReadAll(buf) if err != nil { return nil, err } + contents := buf.Bytes() if !filesAdded { return nil, nil } return contents, err } -// TakeSnapshotOfFiles takes a snapshot of specific files +// snapshotFiles takes a snapshot of specific files // Used for ADD/COPY commands, when we know which files have changed -func (s *Snapshotter) TakeSnapshotOfFiles(files []string) ([]byte, error) { - logrus.Infof("Taking snapshot of files %v...", files) +func (s *Snapshotter) snapshotFiles(f io.Writer, files []string) (bool, error) { + s.hardlinks = map[uint64]string{} s.l.Snapshot() if len(files) == 0 { logrus.Info("No files changed in this command, skipping snapshotting.") - return nil, nil + return false, nil + } + logrus.Infof("Taking snapshot of files %v...", files) + snapshottedFiles := make(map[string]bool) + for _, file := range files { + parentDirs := util.ParentDirectories(file) + files = append(parentDirs, files...) } - buf := bytes.NewBuffer([]byte{}) - w := tar.NewWriter(buf) - defer w.Close() filesAdded := false + w := tar.NewWriter(f) + defer w.Close() + + // Now create the tar. for _, file := range files { - info, err := os.Lstat(file) - if err != nil { - return nil, err + file = filepath.Clean(file) + if val, ok := snapshottedFiles[file]; ok && val { + continue } if util.PathInWhitelist(file, s.directory) { - logrus.Debugf("Not adding %s to layer, as it is whitelisted", file) + logrus.Debugf("Not adding %s to layer, as it's whitelisted", file) continue } + snapshottedFiles[file] = true + info, err := os.Lstat(file) + if err != nil { + return false, err + } // Only add to the tar if we add it to the layeredmap. maybeAdd, err := s.l.MaybeAdd(file) if err != nil { - return nil, err + return false, err } if maybeAdd { filesAdded = true - util.AddToTar(file, info, w) + if err := util.AddToTar(file, info, s.hardlinks, w); err != nil { + return false, err + } } } - if !filesAdded { - return nil, nil - } - return ioutil.ReadAll(buf) + return filesAdded, nil } func (s *Snapshotter) snapShotFS(f io.Writer) (bool, error) { + logrus.Info("Taking snapshot of full filesystem...") + s.hardlinks = map[uint64]string{} s.l.Snapshot() + existingPaths := s.l.GetFlattenedPathsForWhiteOut() filesAdded := false w := tar.NewWriter(f) defer w.Close() - err := filepath.Walk(s.directory, func(path string, info os.FileInfo, err error) error { + // Save the fs state in a map to iterate over later. + memFs := map[string]os.FileInfo{} + filepath.Walk(s.directory, func(path string, info os.FileInfo, err error) error { + memFs[path] = info + return nil + }) + + // First handle whiteouts + for p := range memFs { + delete(existingPaths, p) + } + for path := range existingPaths { + // Only add the whiteout if the directory for the file still exists. + dir := filepath.Dir(path) + if _, ok := memFs[dir]; ok { + logrus.Infof("Adding whiteout for %s", path) + filesAdded = true + if err := util.Whiteout(path, w); err != nil { + return false, err + } + } + } + + // Now create the tar. + for path, info := range memFs { if util.PathInWhitelist(path, s.directory) { logrus.Debugf("Not adding %s to layer, as it's whitelisted", path) - return nil + continue } // Only add to the tar if we add it to the layeredmap. maybeAdd, err := s.l.MaybeAdd(path) if err != nil { - return err + return false, err } if maybeAdd { + logrus.Debugf("Adding %s to layer, because it was changed.", path) filesAdded = true - return util.AddToTar(path, info, w) + if err := util.AddToTar(path, info, s.hardlinks, w); err != nil { + return false, err + } } - return nil - }) - return filesAdded, err + } + + return filesAdded, nil } diff --git a/pkg/snapshot/snapshot_test.go b/pkg/snapshot/snapshot_test.go index 2def85db0e..7cee2ad118 100644 --- a/pkg/snapshot/snapshot_test.go +++ b/pkg/snapshot/snapshot_test.go @@ -149,30 +149,23 @@ func TestSnapshotFiles(t *testing.T) { if err != nil { t.Fatal(err) } - expectedContents := map[string]string{ - filepath.Join(testDir, "foo"): "newbaz1", - } + expectedFiles := []string{"/tmp", filepath.Join(testDir, "foo")} + // Check contents of the snapshot, make sure contents is equivalent to snapshotFiles reader := bytes.NewReader(contents) tr := tar.NewReader(reader) - numFiles := 0 + var actualFiles []string for { hdr, err := tr.Next() if err == io.EOF { break } - numFiles = numFiles + 1 - if _, isFile := expectedContents[hdr.Name]; !isFile { - t.Fatalf("File %s unexpectedly in tar", hdr.Name) - } - contents, _ := ioutil.ReadAll(tr) - if string(contents) != expectedContents[hdr.Name] { - t.Fatalf("Contents of %s incorrect, expected: %s, actual: %s", hdr.Name, expectedContents[hdr.Name], string(contents)) + if err != nil { + t.Fatal(err) } + actualFiles = append(actualFiles, hdr.Name) } - if numFiles != 1 { - t.Fatalf("%s was not added.", filepath.Join(testDir, "foo")) - } + testutil.CheckErrorAndDeepEqual(t, false, nil, expectedFiles, actualFiles) } func TestEmptySnapshot(t *testing.T) { diff --git a/pkg/util/command_util.go b/pkg/util/command_util.go index e74745796c..0547718aa7 100644 --- a/pkg/util/command_util.go +++ b/pkg/util/command_util.go @@ -20,6 +20,7 @@ import ( "github.com/docker/docker/builder/dockerfile/instructions" "github.com/docker/docker/builder/dockerfile/parser" "github.com/docker/docker/builder/dockerfile/shell" + "github.com/google/go-containerregistry/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" "net/http" @@ -83,8 +84,8 @@ func ContainsWildcards(paths []string) bool { } // ResolveSources resolves the given sources if the sources contains wildcards -// It returns a map of [src]:[files rooted at src] -func ResolveSources(srcsAndDest instructions.SourcesAndDest, root string) (map[string][]string, error) { +// It returns a list of resolved sources +func ResolveSources(srcsAndDest instructions.SourcesAndDest, root string) ([]string, error) { srcs := srcsAndDest[:len(srcsAndDest)-1] // If sources contain wildcards, we first need to resolve them to actual paths if ContainsWildcards(srcs) { @@ -99,13 +100,8 @@ func ResolveSources(srcsAndDest instructions.SourcesAndDest, root string) (map[s } logrus.Debugf("Resolved sources to %v", srcs) } - // Now, get a map of [src]:[files rooted at src] - srcMap, err := SourcesToFilesMap(srcs, root) - if err != nil { - return nil, err - } // Check to make sure the sources are valid - return srcMap, IsSrcsValid(srcsAndDest, srcMap) + return srcs, IsSrcsValid(srcsAndDest, srcs, root) } // matchSources returns a list of sources that match wildcards @@ -141,24 +137,9 @@ func IsDestDir(path string) bool { // If source is a dir: // Assume dest is also a dir, and copy to dest/relpath // If dest is not an absolute filepath, add /cwd to the beginning -func DestinationFilepath(filename, srcName, dest, cwd, buildcontext string) (string, error) { - fi, err := os.Lstat(filepath.Join(buildcontext, filename)) - if err != nil { - return "", err - } - src, err := os.Lstat(filepath.Join(buildcontext, srcName)) - if err != nil { - return "", err - } - if src.IsDir() || IsDestDir(dest) { - relPath, err := filepath.Rel(srcName, filename) - if err != nil { - return "", err - } - if relPath == "." && !fi.IsDir() { - relPath = filepath.Base(filename) - } - destPath := filepath.Join(dest, relPath) +func DestinationFilepath(src, dest, cwd string) (string, error) { + if IsDestDir(dest) { + destPath := filepath.Join(dest, filepath.Base(src)) if filepath.IsAbs(dest) { return destPath, nil } @@ -187,45 +168,42 @@ func URLDestinationFilepath(rawurl, dest, cwd string) string { return destPath } -// SourcesToFilesMap returns a map of [src]:[files rooted at source] -func SourcesToFilesMap(srcs []string, root string) (map[string][]string, error) { - srcMap := make(map[string][]string) - for _, src := range srcs { +func IsSrcsValid(srcsAndDest instructions.SourcesAndDest, resolvedSources []string, root string) error { + srcs := srcsAndDest[:len(srcsAndDest)-1] + dest := srcsAndDest[len(srcsAndDest)-1] + + if !ContainsWildcards(srcs) { + if len(srcs) > 1 && !IsDestDir(dest) { + return errors.New("when specifying multiple sources in a COPY command, destination must be a directory and end in '/'") + } + } + + if len(resolvedSources) == 1 { + fi, err := os.Lstat(filepath.Join(root, resolvedSources[0])) + if err != nil { + return err + } + if fi.IsDir() { + return nil + } + } + + totalFiles := 0 + for _, src := range resolvedSources { if IsSrcRemoteFileURL(src) { - srcMap[src] = []string{src} + totalFiles++ continue } src = filepath.Clean(src) files, err := RelativeFiles(src, root) if err != nil { - return nil, err + return err } - srcMap[src] = files - } - return srcMap, nil -} - -// IsSrcsValid returns an error if the sources provided are invalid, or nil otherwise -func IsSrcsValid(srcsAndDest instructions.SourcesAndDest, srcMap map[string][]string) error { - srcs := srcsAndDest[:len(srcsAndDest)-1] - dest := srcsAndDest[len(srcsAndDest)-1] - - totalFiles := 0 - for _, files := range srcMap { totalFiles += len(files) } if totalFiles == 0 { return errors.New("copy failed: no source files specified") } - - if !ContainsWildcards(srcs) { - // If multiple sources and destination isn't a directory, return an error - if len(srcs) > 1 && !IsDestDir(dest) { - return errors.New("when specifying multiple sources in a COPY command, destination must be a directory and end in '/'") - } - return nil - } - // If there are wildcards, and the destination is a file, there must be exactly one file to copy over, // Otherwise, return an error if !IsDestDir(dest) && totalFiles > 1 { @@ -245,3 +223,53 @@ func IsSrcRemoteFileURL(rawurl string) bool { } return true } + +func UpdateConfigEnv(newEnvs []instructions.KeyValuePair, config *v1.Config, replacementEnvs []string) error { + for index, pair := range newEnvs { + expandedKey, err := ResolveEnvironmentReplacement(pair.Key, replacementEnvs, false) + if err != nil { + return err + } + expandedValue, err := ResolveEnvironmentReplacement(pair.Value, replacementEnvs, false) + if err != nil { + return err + } + newEnvs[index] = instructions.KeyValuePair{ + Key: expandedKey, + Value: expandedValue, + } + } + + // First, convert config.Env array to []instruction.KeyValuePair + var kvps []instructions.KeyValuePair + for _, env := range config.Env { + entry := strings.Split(env, "=") + kvps = append(kvps, instructions.KeyValuePair{ + Key: entry[0], + Value: entry[1], + }) + } + // Iterate through new environment variables, and replace existing keys + // We can't use a map because we need to preserve the order of the environment variables +Loop: + for _, newEnv := range newEnvs { + for index, kvp := range kvps { + // If key exists, replace the KeyValuePair... + if kvp.Key == newEnv.Key { + logrus.Debugf("Replacing environment variable %v with %v in config", kvp, newEnv) + kvps[index] = newEnv + continue Loop + } + } + // ... Else, append it as a new env variable + kvps = append(kvps, newEnv) + } + // Convert back to array and set in config + envArray := []string{} + for _, kvp := range kvps { + entry := kvp.Key + "=" + kvp.Value + envArray = append(envArray, entry) + } + config.Env = envArray + return nil +} diff --git a/pkg/util/command_util_test.go b/pkg/util/command_util_test.go index 242a1a3057..6ddf8c3b9c 100644 --- a/pkg/util/command_util_test.go +++ b/pkg/util/command_util_test.go @@ -107,96 +107,64 @@ func Test_EnvReplacement(t *testing.T) { } } -var buildContextPath = "../../integration_tests/" +var buildContextPath = "../../integration/" var destinationFilepathTests = []struct { - srcName string - filename string + src string dest string cwd string - buildcontext string expectedFilepath string }{ { - srcName: "context/foo", - filename: "context/foo", + src: "context/foo", dest: "/foo", cwd: "/", expectedFilepath: "/foo", }, { - srcName: "context/foo", - filename: "context/foo", + src: "context/foo", dest: "/foodir/", cwd: "/", expectedFilepath: "/foodir/foo", }, { - srcName: "context/foo", - filename: "./context/foo", + src: "context/foo", cwd: "/", dest: "foo", expectedFilepath: "/foo", }, { - srcName: "context/bar/", - filename: "context/bar/bam/bat", + src: "context/bar/", cwd: "/", dest: "pkg/", - expectedFilepath: "/pkg/bam/bat", + expectedFilepath: "/pkg/bar", }, { - srcName: "context/bar/", - filename: "context/bar/bam/bat", + src: "context/bar/", cwd: "/newdir", dest: "pkg/", - expectedFilepath: "/newdir/pkg/bam/bat", + expectedFilepath: "/newdir/pkg/bar", }, { - srcName: "./context/empty", - filename: "context/empty", + src: "./context/empty", cwd: "/", dest: "/empty", expectedFilepath: "/empty", }, { - srcName: "./context/empty", - filename: "context/empty", + src: "./context/empty", cwd: "/dir", dest: "/empty", expectedFilepath: "/empty", }, { - srcName: "./", - filename: "./", + src: "./", cwd: "/", dest: "/dir", expectedFilepath: "/dir", }, { - srcName: "./", - filename: "context/foo", - cwd: "/", - dest: "/dir", - expectedFilepath: "/dir/context/foo", - }, - { - srcName: ".", - filename: "context/bar", - cwd: "/", - dest: "/dir", - expectedFilepath: "/dir/context/bar", - }, - { - srcName: ".", - filename: "context/bar", - cwd: "/", - dest: "/dir", - expectedFilepath: "/dir/context/bar", - }, - { - srcName: "context/foo", - filename: "context/foo", + src: "context/foo", cwd: "/test", dest: ".", expectedFilepath: "/test/foo", @@ -205,7 +173,7 @@ var destinationFilepathTests = []struct { func Test_DestinationFilepath(t *testing.T) { for _, test := range destinationFilepathTests { - actualFilepath, err := DestinationFilepath(test.filename, test.srcName, test.dest, test.cwd, buildContextPath) + actualFilepath, err := DestinationFilepath(test.src, test.dest, test.cwd) testutil.CheckErrorAndDeepEqual(t, false, err, test.expectedFilepath, actualFilepath) } } @@ -278,141 +246,111 @@ func Test_MatchSources(t *testing.T) { } var isSrcValidTests = []struct { - srcsAndDest []string - files map[string][]string - shouldErr bool + srcsAndDest []string + resolvedSources []string + shouldErr bool }{ { srcsAndDest: []string{ - "src1", - "src2", + "context/foo", + "context/bar", "dest", }, - files: map[string][]string{ - "src1": { - "file1", - }, - "src2:": { - "file2", - }, + resolvedSources: []string{ + "context/foo", + "context/bar", }, shouldErr: true, }, { srcsAndDest: []string{ - "src1", - "src2", + "context/foo", + "context/bar", "dest/", }, - files: map[string][]string{ - "src1": { - "file1", - }, - "src2:": { - "file2", - }, + resolvedSources: []string{ + "context/foo", + "context/bar", }, shouldErr: false, }, { srcsAndDest: []string{ - "src2/", + "context/bar/bam", "dest", }, - files: map[string][]string{ - "src1": { - "file1", - }, - "src2:": { - "file2", - }, + resolvedSources: []string{ + "context/bar/bam", }, shouldErr: false, }, { srcsAndDest: []string{ - "src2", + "context/foo", "dest", }, - files: map[string][]string{ - "src1": { - "file1", - }, - "src2:": { - "file2", - }, + resolvedSources: []string{ + "context/foo", }, shouldErr: false, }, { srcsAndDest: []string{ - "src2", - "src*", + "context/foo", + "context/b*", "dest/", }, - files: map[string][]string{ - "src1": { - "file1", - }, - "src2:": { - "file2", - }, + resolvedSources: []string{ + "context/foo", + "context/bar", }, shouldErr: false, }, { srcsAndDest: []string{ - "src2", - "src*", + "context/foo", + "context/b*", "dest", }, - files: map[string][]string{ - "src2": { - "src2/a", - "src2/b", - }, - "src*": {}, + resolvedSources: []string{ + "context/foo", + "context/bar", }, shouldErr: true, }, { srcsAndDest: []string{ - "src2", - "src*", + "context/foo", + "context/doesntexist*", "dest", }, - files: map[string][]string{ - "src2": { - "src2/a", - }, - "src*": {}, + resolvedSources: []string{ + "context/foo", }, shouldErr: false, }, { srcsAndDest: []string{ - "src2", - "src*", + "context/", "dest", }, - files: map[string][]string{ - "src2": {}, - "src*": {}, + resolvedSources: []string{ + "context/", }, - shouldErr: true, + shouldErr: false, }, } func Test_IsSrcsValid(t *testing.T) { for _, test := range isSrcValidTests { - err := IsSrcsValid(test.srcsAndDest, test.files) + err := IsSrcsValid(test.srcsAndDest, test.resolvedSources, buildContextPath) testutil.CheckError(t, test.shouldErr, err) } } var testResolveSources = []struct { - srcsAndDest []string - expectedMap map[string][]string + srcsAndDest []string + expectedList []string }{ { srcsAndDest: []string{ @@ -421,28 +359,18 @@ var testResolveSources = []struct { testUrl, "dest/", }, - expectedMap: map[string][]string{ - "context/foo": { - "context/foo", - }, - "context/bar": { - "context/bar", - "context/bar/bam", - "context/bar/bam/bat", - "context/bar/bat", - "context/bar/baz", - }, - testUrl: { - testUrl, - }, + expectedList: []string{ + "context/foo", + "context/bar", + testUrl, }, }, } func Test_ResolveSources(t *testing.T) { for _, test := range testResolveSources { - actualMap, err := ResolveSources(test.srcsAndDest, buildContextPath) - testutil.CheckErrorAndDeepEqual(t, false, err, test.expectedMap, actualMap) + actualList, err := ResolveSources(test.srcsAndDest, buildContextPath) + testutil.CheckErrorAndDeepEqual(t, false, err, test.expectedList, actualList) } } diff --git a/pkg/util/fs_util.go b/pkg/util/fs_util.go index e2d5f588fd..613a1043a6 100644 --- a/pkg/util/fs_util.go +++ b/pkg/util/fs_util.go @@ -17,6 +17,7 @@ limitations under the License. package util import ( + "archive/tar" "bufio" "io" "net/http" @@ -25,38 +26,196 @@ import ( "strings" "time" - pkgutil "github.com/GoogleContainerTools/container-diff/pkg/util" + "github.com/google/go-containerregistry/v1" + "github.com/GoogleContainerTools/kaniko/pkg/constants" - "github.com/containers/image/docker" "github.com/sirupsen/logrus" ) -var whitelist = []string{"/kaniko"} +var whitelist = []string{ + "/kaniko", + // /var/run is a special case. It's common to mount in /var/run/docker.sock or something similar + // which leads to a special mount on the /var/run/docker.sock file itself, but the directory to exist + // in the image with no way to tell if it came from the base image or not. + "/var/run", +} var volumeWhitelist = []string{} -// ExtractFileSystemFromImage pulls an image and unpacks it to a file system at root -func ExtractFileSystemFromImage(img string) error { +func GetFSFromImage(img v1.Image) error { whitelist, err := fileSystemWhitelist(constants.WhitelistPath) if err != nil { return err } - logrus.Infof("Whitelisted directories are %s", whitelist) - if img == constants.NoBaseImage { - logrus.Info("No base image, nothing to extract") - return nil - } - ref, err := docker.ParseReference("//" + img) + logrus.Infof("Mounted directories: %v", whitelist) + layers, err := img.Layers() if err != nil { return err } - imgSrc, err := ref.NewImageSource(nil) - if err != nil { - return err + + fs := map[string]struct{}{} + whiteouts := map[string]struct{}{} + + for i := len(layers) - 1; i >= 0; i-- { + logrus.Infof("Unpacking layer: %d", i) + l := layers[i] + r, err := l.Uncompressed() + if err != nil { + return err + } + tr := tar.NewReader(r) + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + path := filepath.Join("/", filepath.Clean(hdr.Name)) + base := filepath.Base(path) + dir := filepath.Dir(path) + if strings.HasPrefix(base, ".wh.") { + logrus.Infof("Whiting out %s", path) + name := strings.TrimPrefix(base, ".wh.") + whiteouts[filepath.Join(dir, name)] = struct{}{} + continue + } + + if checkWhiteouts(path, whiteouts) { + logrus.Infof("Not adding %s because it is whited out", path) + continue + } + if _, ok := fs[path]; ok { + logrus.Infof("Not adding %s because it was added by a prior layer", path) + continue + } + + if checkWhitelist(path, whitelist) { + logrus.Infof("Not adding %s because it is whitelisted", path) + continue + } + if hdr.Typeflag == tar.TypeSymlink { + if checkWhitelist(hdr.Linkname, whitelist) { + logrus.Debugf("skipping symlink from %s to %s because %s is whitelisted", hdr.Linkname, path, hdr.Linkname) + continue + } + } + fs[path] = struct{}{} + + if err := extractFile("/", hdr, tr); err != nil { + return err + } + } } - return pkgutil.GetFileSystemFromReference(ref, imgSrc, constants.RootDir, whitelist) + return nil +} + +// DeleteFilesystem deletes the extracted image file system +func DeleteFilesystem() error { + logrus.Info("Deleting filesystem...") + err := filepath.Walk(constants.RootDir, func(path string, info os.FileInfo, err error) error { + if PathInWhitelist(path, constants.RootDir) || ChildDirInWhitelist(path, constants.RootDir) { + logrus.Debugf("Not deleting %s, as it's whitelisted", path) + return nil + } + if path == constants.RootDir { + return nil + } + return os.RemoveAll(path) + }) + return err +} + +// ChildDirInWhitelist returns true if there is a child file or directory of the path in the whitelist +func ChildDirInWhitelist(path, directory string) bool { + for _, d := range whitelist { + dirPath := filepath.Join(directory, d) + if HasFilepathPrefix(dirPath, path) { + return true + } + } + return false +} + +func unTar(r io.Reader, dest string) error { + tr := tar.NewReader(r) + for { + hdr, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := extractFile(dest, hdr, tr); err != nil { + return err + } + } + return nil +} + +func extractFile(dest string, hdr *tar.Header, tr io.Reader) error { + path := filepath.Join(dest, filepath.Clean(hdr.Name)) + base := filepath.Base(path) + dir := filepath.Dir(path) + mode := hdr.FileInfo().Mode() + switch hdr.Typeflag { + case tar.TypeReg: + logrus.Debugf("creating file %s", path) + // It's possible a file is in the tar before it's directory. + if _, err := os.Stat(dir); os.IsNotExist(err) { + logrus.Debugf("base %s for file %s does not exist. Creating.", base, path) + if err := os.MkdirAll(dir, 0755); err != nil { + return err + } + } + currFile, err := os.Create(path) + if err != nil { + return err + } + // manually set permissions on file, since the default umask (022) will interfere + if err = os.Chmod(path, mode); err != nil { + return err + } + if _, err = io.Copy(currFile, tr); err != nil { + return err + } + currFile.Close() + + case tar.TypeDir: + logrus.Debugf("creating dir %s", path) + if err := os.MkdirAll(path, mode); err != nil { + return err + } + // In some cases, MkdirAll doesn't change the permissions, so run Chmod + if err := os.Chmod(path, mode); err != nil { + return err + } + + case tar.TypeLink: + logrus.Debugf("link from %s to %s", hdr.Linkname, path) + // The base directory for a link may not exist before it is created. + dir := filepath.Dir(path) + if err := os.MkdirAll(dir, 0755); err != nil { + return err + } + if err := os.Symlink(filepath.Clean(filepath.Join("/", hdr.Linkname)), path); err != nil { + return err + } + case tar.TypeSymlink: + logrus.Debugf("symlink from %s to %s", hdr.Linkname, path) + // The base directory for a symlink may not exist before it is created. + dir := filepath.Dir(path) + if err := os.MkdirAll(dir, 0755); err != nil { + return err + } + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + } + return nil } -// PathInWhitelist returns true if the path is whitelisted func PathInWhitelist(path, directory string) bool { for _, c := range constants.KanikoBuildFiles { if path == c { @@ -65,7 +224,30 @@ func PathInWhitelist(path, directory string) bool { } for _, d := range whitelist { dirPath := filepath.Join(directory, d) - if pkgutil.HasFilepathPrefix(path, dirPath) { + if HasFilepathPrefix(path, dirPath) { + return true + } + } + return false +} + +func checkWhiteouts(path string, whiteouts map[string]struct{}) bool { + // Don't add the file if it or it's directory are whited out. + if _, ok := whiteouts[path]; ok { + return true + } + for wd := range whiteouts { + if HasFilepathPrefix(path, wd) { + logrus.Infof("Not adding %s because it's directory is whited out", path) + return true + } + } + return false +} + +func checkWhitelist(path string, whitelist []string) bool { + for _, wl := range whitelist { + if HasFilepathPrefix(path, wl) { return true } } @@ -117,6 +299,9 @@ func RelativeFiles(fp string, root string) ([]string, error) { fullPath := filepath.Join(root, fp) logrus.Debugf("Getting files and contents at root %s", fullPath) err := filepath.Walk(fullPath, func(path string, info os.FileInfo, err error) error { + if PathInWhitelist(path, root) { + return nil + } if err != nil { return err } @@ -135,12 +320,32 @@ func Files(root string) ([]string, error) { var files []string logrus.Debugf("Getting files and contents at root %s", root) err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { + if PathInWhitelist(path, root) { + return nil + } files = append(files, path) return err }) return files, err } +// ParentDirectories returns a list of paths to all parent directories +// Ex. /some/temp/dir -> [/some, /some/temp, /some/temp/dir] +func ParentDirectories(path string) []string { + path = filepath.Clean(path) + dirs := strings.Split(path, "/") + dirPath := constants.RootDir + var paths []string + for index, dir := range dirs { + if dir == "" || index == (len(dirs)-1) { + continue + } + dirPath = filepath.Join(dirPath, dir) + paths = append(paths, dirPath) + } + return paths +} + // FilepathExists returns true if the path exists func FilepathExists(path string) bool { _, err := os.Lstat(path) @@ -210,3 +415,80 @@ func DownloadFileToDest(rawurl, dest string) error { } return os.Chtimes(dest, mTime, mTime) } + +// CopyDir copies the file or directory at src to dest +func CopyDir(src, dest string) error { + files, err := RelativeFiles("", src) + if err != nil { + return err + } + for _, file := range files { + fullPath := filepath.Join(src, file) + fi, err := os.Lstat(fullPath) + if err != nil { + return err + } + destPath := filepath.Join(dest, file) + if fi.IsDir() { + logrus.Infof("Creating directory %s", destPath) + if err := os.MkdirAll(destPath, fi.Mode()); err != nil { + return err + } + } else if fi.Mode()&os.ModeSymlink != 0 { + // If file is a symlink, we want to create the same relative symlink + if err := CopySymlink(fullPath, destPath); err != nil { + return err + } + } else { + // ... Else, we want to copy over a file + if err := CopyFile(fullPath, destPath); err != nil { + return err + } + } + } + return nil +} + +// CopySymlink copies the symlink at src to dest +func CopySymlink(src, dest string) error { + link, err := os.Readlink(src) + if err != nil { + return err + } + linkDst := filepath.Join(dest, link) + return os.Symlink(linkDst, dest) +} + +// CopyFile copies the file at src to dest +func CopyFile(src, dest string) error { + fi, err := os.Stat(src) + if err != nil { + return err + } + logrus.Infof("Copying file %s to %s", src, dest) + srcFile, err := os.Open(src) + if err != nil { + return err + } + defer srcFile.Close() + return CreateFile(dest, srcFile, fi.Mode()) +} + +// HasFilepathPrefix checks if the given file path begins with prefix +func HasFilepathPrefix(path, prefix string) bool { + path = filepath.Clean(path) + prefix = filepath.Clean(prefix) + pathArray := strings.Split(path, "/") + prefixArray := strings.Split(prefix, "/") + + if len(pathArray) < len(prefixArray) { + return false + } + for index := range prefixArray { + if prefixArray[index] == pathArray[index] { + continue + } + return false + } + return true +} diff --git a/pkg/util/fs_util_test.go b/pkg/util/fs_util_test.go index 36583564c1..0feefd0ebe 100644 --- a/pkg/util/fs_util_test.go +++ b/pkg/util/fs_util_test.go @@ -17,9 +17,12 @@ limitations under the License. package util import ( + "archive/tar" + "bytes" "io/ioutil" "os" "path/filepath" + "reflect" "sort" "testing" @@ -47,7 +50,7 @@ func Test_fileSystemWhitelist(t *testing.T) { } actualWhitelist, err := fileSystemWhitelist(path) - expectedWhitelist := []string{"/kaniko", "/proc", "/dev", "/dev/pts", "/sys"} + expectedWhitelist := []string{"/kaniko", "/proc", "/dev", "/dev/pts", "/sys", "/var/run"} sort.Strings(actualWhitelist) sort.Strings(expectedWhitelist) testutil.CheckErrorAndDeepEqual(t, false, err, expectedWhitelist, actualWhitelist) @@ -100,16 +103,13 @@ var tests = []struct { files: map[string]string{ "/workspace/foo/a": "baz1", "/workspace/foo/b": "baz2", - "/kaniko/file": "file", }, directory: "", expectedFiles: []string{ "workspace/foo/a", "workspace/foo/b", - "kaniko/file", "workspace", "workspace/foo", - "kaniko", ".", }, }, @@ -131,3 +131,387 @@ func Test_RelativeFiles(t *testing.T) { testutil.CheckErrorAndDeepEqual(t, false, err, test.expectedFiles, actualFiles) } } + +func Test_ParentDirectories(t *testing.T) { + tests := []struct { + name string + path string + expected []string + }{ + { + name: "regular path", + path: "/path/to/dir", + expected: []string{ + "/path", + "/path/to", + }, + }, + { + name: "current directory", + path: ".", + expected: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actual := ParentDirectories(tt.path) + testutil.CheckErrorAndDeepEqual(t, false, nil, tt.expected, actual) + }) + } +} + +func Test_checkWhiteouts(t *testing.T) { + type args struct { + path string + whiteouts map[string]struct{} + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "file whited out", + args: args{ + path: "/foo", + whiteouts: map[string]struct{}{"/foo": {}}, + }, + want: true, + }, + { + name: "directory whited out", + args: args{ + path: "/foo/bar", + whiteouts: map[string]struct{}{"/foo": {}}, + }, + want: true, + }, + { + name: "grandparent whited out", + args: args{ + path: "/foo/bar/baz", + whiteouts: map[string]struct{}{"/foo": {}}, + }, + want: true, + }, + { + name: "sibling whited out", + args: args{ + path: "/foo/bar/baz", + whiteouts: map[string]struct{}{"/foo/bat": {}}, + }, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := checkWhiteouts(tt.args.path, tt.args.whiteouts); got != tt.want { + t.Errorf("checkWhiteouts() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_checkWhitelist(t *testing.T) { + type args struct { + path string + whitelist []string + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "file whitelisted", + args: args{ + path: "/foo", + whitelist: []string{"/foo"}, + }, + want: true, + }, + { + name: "directory whitelisted", + args: args{ + path: "/foo/bar", + whitelist: []string{"/foo"}, + }, + want: true, + }, + { + name: "grandparent whitelisted", + args: args{ + path: "/foo/bar/baz", + whitelist: []string{"/foo"}, + }, + want: true, + }, + { + name: "sibling whitelisted", + args: args{ + path: "/foo/bar/baz", + whitelist: []string{"/foo/bat"}, + }, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := checkWhitelist(tt.args.path, tt.args.whitelist); got != tt.want { + t.Errorf("checkWhitelist() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestHasFilepathPrefix(t *testing.T) { + type args struct { + path string + prefix string + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "parent", + args: args{ + path: "/foo/bar", + prefix: "/foo", + }, + want: true, + }, + { + name: "nested parent", + args: args{ + path: "/foo/bar/baz", + prefix: "/foo/bar", + }, + want: true, + }, + { + name: "sibling", + args: args{ + path: "/foo/bar", + prefix: "/bar", + }, + want: false, + }, + { + name: "nested sibling", + args: args{ + path: "/foo/bar/baz", + prefix: "/foo/bar", + }, + want: true, + }, + { + name: "name prefix", + args: args{ + path: "/foo2/bar", + prefix: "/foo", + }, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := HasFilepathPrefix(tt.args.path, tt.args.prefix); got != tt.want { + t.Errorf("HasFilepathPrefix() = %v, want %v", got, tt.want) + } + }) + } +} + +type checker func(root string, t *testing.T) + +func fileExists(p string) checker { + return func(root string, t *testing.T) { + _, err := os.Stat(filepath.Join(root, p)) + if err != nil { + t.Fatalf("File does not exist") + } + } +} + +func fileMatches(p string, c []byte) checker { + return func(root string, t *testing.T) { + actual, err := ioutil.ReadFile(filepath.Join(root, p)) + if err != nil { + t.Fatalf("error reading file: %s", p) + } + if !reflect.DeepEqual(actual, c) { + t.Errorf("file contents do not match. %v!=%v", actual, c) + } + } +} + +func permissionsMatch(p string, perms os.FileMode) checker { + return func(root string, t *testing.T) { + fi, err := os.Stat(filepath.Join(root, p)) + if err != nil { + t.Fatalf("error statting file %s", p) + } + if fi.Mode() != perms { + t.Errorf("Permissions do not match. %s != %s", fi.Mode(), perms) + } + } +} + +func linkPointsTo(src, dst string) checker { + return func(root string, t *testing.T) { + link := filepath.Join(root, src) + got, err := os.Readlink(link) + if err != nil { + t.Fatalf("error reading link %s: %s", link, err) + } + if got != dst { + t.Errorf("link destination does not match: %s != %s", got, dst) + } + } +} + +func fileHeader(name string, contents string, mode int64) *tar.Header { + return &tar.Header{ + Name: name, + Size: int64(len(contents)), + Mode: mode, + Typeflag: tar.TypeReg, + } +} + +func linkHeader(name, linkname string) *tar.Header { + return &tar.Header{ + Name: name, + Size: 0, + Typeflag: tar.TypeSymlink, + Linkname: linkname, + } +} + +func hardlinkHeader(name, linkname string) *tar.Header { + return &tar.Header{ + Name: name, + Size: 0, + Typeflag: tar.TypeLink, + Linkname: linkname, + } +} + +func dirHeader(name string, mode int64) *tar.Header { + return &tar.Header{ + Name: name, + Size: 0, + Typeflag: tar.TypeDir, + Mode: mode, + } +} + +func TestExtractFile(t *testing.T) { + type tc struct { + name string + hdrs []*tar.Header + contents []byte + checkers []checker + } + + tcs := []tc{ + { + name: "normal file", + contents: []byte("helloworld"), + hdrs: []*tar.Header{fileHeader("./bar", "helloworld", 0644)}, + checkers: []checker{ + fileExists("/bar"), + fileMatches("/bar", []byte("helloworld")), + permissionsMatch("/bar", 0644), + }, + }, + { + name: "normal file, directory does not exist", + contents: []byte("helloworld"), + hdrs: []*tar.Header{fileHeader("./foo/bar", "helloworld", 0644)}, + checkers: []checker{ + fileExists("/foo/bar"), + fileMatches("/foo/bar", []byte("helloworld")), + permissionsMatch("/foo/bar", 0644), + permissionsMatch("/foo", 0755|os.ModeDir), + }, + }, + { + name: "normal file, directory is created after", + contents: []byte("helloworld"), + hdrs: []*tar.Header{ + fileHeader("./foo/bar", "helloworld", 0644), + dirHeader("./foo", 0722), + }, + checkers: []checker{ + fileExists("/foo/bar"), + fileMatches("/foo/bar", []byte("helloworld")), + permissionsMatch("/foo/bar", 0644), + permissionsMatch("/foo", 0722|os.ModeDir), + }, + }, + { + name: "symlink", + hdrs: []*tar.Header{linkHeader("./bar", "bar/bat")}, + checkers: []checker{ + linkPointsTo("/bar", "bar/bat"), + }, + }, + { + name: "symlink relative path", + hdrs: []*tar.Header{linkHeader("./bar", "./foo/bar/baz")}, + checkers: []checker{ + linkPointsTo("/bar", "./foo/bar/baz"), + }, + }, + { + name: "symlink parent does not exist", + hdrs: []*tar.Header{linkHeader("./foo/bar/baz", "../../bat")}, + checkers: []checker{ + linkPointsTo("/foo/bar/baz", "../../bat"), + }, + }, + { + name: "symlink parent does not exist 2", + hdrs: []*tar.Header{linkHeader("./foo/bar/baz", "../../bat")}, + checkers: []checker{ + linkPointsTo("/foo/bar/baz", "../../bat"), + permissionsMatch("/foo", 0755|os.ModeDir), + permissionsMatch("/foo/bar", 0755|os.ModeDir), + }, + }, + { + name: "hardlink", + hdrs: []*tar.Header{ + fileHeader("/bin/gzip", "gzip-binary", 0751), + hardlinkHeader("/bin/uncompress", "/bin/gzip"), + }, + checkers: []checker{ + linkPointsTo("/bin/uncompress", "/bin/gzip"), + }, + }, + } + + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + tc := tc + t.Parallel() + r, err := ioutil.TempDir("", "") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(r) + for _, hdr := range tc.hdrs { + if err := extractFile(r, hdr, bytes.NewReader(tc.contents)); err != nil { + t.Fatal(err) + } + } + for _, checker := range tc.checkers { + checker(r, t) + } + }) + } +} diff --git a/pkg/util/tar_util.go b/pkg/util/tar_util.go index 707ce6c294..baf15a60c8 100644 --- a/pkg/util/tar_util.go +++ b/pkg/util/tar_util.go @@ -23,18 +23,16 @@ import ( "io" "io/ioutil" "os" + "path/filepath" "syscall" - pkgutil "github.com/GoogleContainerTools/container-diff/pkg/util" "github.com/docker/docker/pkg/archive" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) -var hardlinks = make(map[uint64]string) - // AddToTar adds the file i to tar w at path p -func AddToTar(p string, i os.FileInfo, w *tar.Writer) error { +func AddToTar(p string, i os.FileInfo, hardlinks map[uint64]string, w *tar.Writer) error { linkDst := "" if i.Mode()&os.ModeSymlink != 0 { var err error @@ -49,7 +47,7 @@ func AddToTar(p string, i os.FileInfo, w *tar.Writer) error { } hdr.Name = p - hardlink, linkDst := checkHardlink(p, i) + hardlink, linkDst := checkHardlink(p, hardlinks, i) if hardlink { hdr.Linkname = linkDst hdr.Typeflag = tar.TypeLink @@ -72,8 +70,23 @@ func AddToTar(p string, i os.FileInfo, w *tar.Writer) error { return nil } +func Whiteout(p string, w *tar.Writer) error { + dir := filepath.Dir(p) + name := ".wh." + filepath.Base(p) + + th := &tar.Header{ + Name: filepath.Join(dir, name), + Size: 0, + } + if err := w.WriteHeader(th); err != nil { + return err + } + + return nil +} + // Returns true if path is hardlink, and the link destination -func checkHardlink(p string, i os.FileInfo) (bool, string) { +func checkHardlink(p string, hardlinks map[uint64]string, i os.FileInfo) (bool, string) { hardlink := false linkDst := "" if sys := i.Sys(); sys != nil { @@ -108,7 +121,7 @@ func UnpackLocalTarArchive(path, dest string) error { return UnpackCompressedTar(path, dest) } else if compressionLevel == archive.Bzip2 { bzr := bzip2.NewReader(file) - return pkgutil.UnTar(bzr, dest, nil) + return unTar(bzr, dest) } } if fileIsUncompressedTar(path) { @@ -117,7 +130,7 @@ func UnpackLocalTarArchive(path, dest string) error { return err } defer file.Close() - return pkgutil.UnTar(file, dest, nil) + return unTar(file, dest) } return errors.New("path does not lead to local tar archive") } @@ -181,5 +194,5 @@ func UnpackCompressedTar(path, dir string) error { return err } defer gzr.Close() - return pkgutil.UnTar(gzr, dir, nil) + return unTar(gzr, dir) } diff --git a/pkg/util/tar_util_test.go b/pkg/util/tar_util_test.go index 053eaf7aea..1762ea6f29 100644 --- a/pkg/util/tar_util_test.go +++ b/pkg/util/tar_util_test.go @@ -101,7 +101,7 @@ func createTar(testdir string, writer io.Writer) error { if err != nil { return err } - if err := AddToTar(filePath, fi, w); err != nil { + if err := AddToTar(filePath, fi, map[uint64]string{}, w); err != nil { return err } } diff --git a/test.sh b/test.sh index 1ca29155d2..c332c8f298 100755 --- a/test.sh +++ b/test.sh @@ -21,7 +21,7 @@ GREEN='\033[0;32m' RESET='\033[0m' echo "Running go tests..." -go test -cover -v -tags "containers_image_ostree_stub containers_image_openpgp exclude_graphdriver_devicemapper exclude_graphdriver_btrfs" -timeout 60s `go list ./... | grep -v vendor` | sed ''/PASS/s//$(printf "${GREEN}PASS${RESET}")/'' | sed ''/FAIL/s//$(printf "${RED}FAIL${RESET}")/'' +go test -cover -v -timeout 60s `go list ./... | grep -v vendor | grep -v integration` | sed ''/PASS/s//$(printf "${GREEN}PASS${RESET}")/'' | sed ''/FAIL/s//$(printf "${RED}FAIL${RESET}")/'' GO_TEST_EXIT_CODE=${PIPESTATUS[0]} if [[ $GO_TEST_EXIT_CODE -ne 0 ]]; then exit $GO_TEST_EXIT_CODE diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go index 41e5d42b79..d484e421aa 100644 --- a/vendor/cloud.google.com/go/storage/bucket.go +++ b/vendor/cloud.google.com/go/storage/bucket.go @@ -269,6 +269,9 @@ type BucketAttrs struct { // The bucket's Cross-Origin Resource Sharing (CORS) configuration. CORS []CORS + + // The encryption configuration used by default for newly inserted objects. + Encryption *BucketEncryption } // Lifecycle is the lifecycle configuration for objects in the bucket. @@ -406,6 +409,7 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) { Lifecycle: toLifecycle(b.Lifecycle), RetentionPolicy: rp, CORS: toCORS(b.Cors), + Encryption: toBucketEncryption(b.Encryption), } acl := make([]ACLRule, len(b.Acl)) for i, rule := range b.Acl { @@ -470,10 +474,11 @@ func (b *BucketAttrs) toRawBucket() *raw.Bucket { Lifecycle: toRawLifecycle(b.Lifecycle), RetentionPolicy: b.RetentionPolicy.toRawRetentionPolicy(), Cors: toRawCORS(b.CORS), + Encryption: b.Encryption.toRawBucketEncryption(), } } -// The bucket's Cross-Origin Resource Sharing (CORS) configuration. +// CORS is the bucket's Cross-Origin Resource Sharing (CORS) configuration. type CORS struct { // MaxAge is the value to return in the Access-Control-Max-Age // header used in preflight responses. @@ -495,14 +500,23 @@ type CORS struct { ResponseHeaders []string } +// BucketEncryption is a bucket's encryption configuration. +type BucketEncryption struct { + // A Cloud KMS key name, in the form + // projects/P/locations/L/keyRings/R/cryptoKeys/K, that will be used to encrypt + // objects inserted into this bucket, if no encryption method is specified. + // The key's location must be the same as the bucket's. + DefaultKMSKeyName string +} + type BucketAttrsToUpdate struct { - // VersioningEnabled, if set, updates whether the bucket uses versioning. + // If set, updates whether the bucket uses versioning. VersioningEnabled optional.Bool - // RequesterPays, if set, updates whether the bucket is a Requester Pays bucket. + // If set, updates whether the bucket is a Requester Pays bucket. RequesterPays optional.Bool - // RetentionPolicy, if set, updates the retention policy of the bucket. Using + // If set, updates the retention policy of the bucket. Using // RetentionPolicy.RetentionPeriod = 0 will delete the existing policy. // // This feature is in private alpha release. It is not currently available to @@ -510,11 +524,15 @@ type BucketAttrsToUpdate struct { // subject to any SLA or deprecation policy. RetentionPolicy *RetentionPolicy - // CORS, if set, replaces the CORS configuration with a new configuration. - // When an empty slice is provided, all CORS policies are removed; when nil - // is provided, the value is ignored in the update. + // If set, replaces the CORS configuration with a new configuration. + // An empty (rather than nil) slice causes all CORS policies to be removed. CORS []CORS + // If set, replaces the encryption configuration of the bucket. Using + // BucketEncryption.DefaultKMSKeyName = "" will delete the existing + // configuration. + Encryption *BucketEncryption + setLabels map[string]string deleteLabels map[string]bool } @@ -563,6 +581,14 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket { ForceSendFields: []string{"RequesterPays"}, } } + if ua.Encryption != nil { + if ua.Encryption.DefaultKMSKeyName == "" { + rb.NullFields = append(rb.NullFields, "Encryption") + rb.Encryption = nil + } else { + rb.Encryption = ua.Encryption.toRawBucketEncryption() + } + } if ua.setLabels != nil || ua.deleteLabels != nil { rb.Labels = map[string]string{} for k, v := range ua.setLabels { @@ -788,6 +814,22 @@ func toLifecycle(rl *raw.BucketLifecycle) Lifecycle { return l } +func (e *BucketEncryption) toRawBucketEncryption() *raw.BucketEncryption { + if e == nil { + return nil + } + return &raw.BucketEncryption{ + DefaultKmsKeyName: e.DefaultKMSKeyName, + } +} + +func toBucketEncryption(e *raw.BucketEncryption) *BucketEncryption { + if e == nil { + return nil + } + return &BucketEncryption{DefaultKMSKeyName: e.DefaultKmsKeyName} +} + // Objects returns an iterator over the objects in the bucket that match the Query q. // If q is nil, no filtering is done. func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator { @@ -869,8 +911,6 @@ func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) return resp.NextPageToken, nil } -// TODO(jbd): Add storage.buckets.update. - // Buckets returns an iterator over the buckets in the project. You may // optionally set the iterator's Prefix field to restrict the list to buckets // whose names begin with the prefix. By default, all buckets in the project diff --git a/vendor/cloud.google.com/go/storage/copy.go b/vendor/cloud.google.com/go/storage/copy.go index c9fb0271ad..50589e0e75 100644 --- a/vendor/cloud.google.com/go/storage/copy.go +++ b/vendor/cloud.google.com/go/storage/copy.go @@ -60,6 +60,15 @@ type Copier struct { // ProgressFunc should return quickly without blocking. ProgressFunc func(copiedBytes, totalBytes uint64) + // The Cloud KMS key, in the form projects/P/locations/L/keyRings/R/cryptoKeys/K, + // that will be used to encrypt the object. Overrides the object's KMSKeyName, if + // any. + // + // Providing both a DestinationKMSKeyName and a customer-supplied encryption key + // (via ObjectHandle.Key) on the destination object will result in an error when + // Run is called. + DestinationKMSKeyName string + dst, src *ObjectHandle } @@ -74,6 +83,9 @@ func (c *Copier) Run(ctx context.Context) (attrs *ObjectAttrs, err error) { if err := c.dst.validate(); err != nil { return nil, err } + if c.DestinationKMSKeyName != "" && c.dst.encryptionKey != nil { + return nil, errors.New("storage: cannot use DestinationKMSKeyName with a customer-supplied encryption key") + } // Convert destination attributes to raw form, omitting the bucket. // If the bucket is included but name or content-type aren't, the service // returns a 400 with "Required" as the only message. Omitting the bucket @@ -100,6 +112,9 @@ func (c *Copier) callRewrite(ctx context.Context, rawObj *raw.Object) (*raw.Rewr if c.RewriteToken != "" { call.RewriteToken(c.RewriteToken) } + if c.DestinationKMSKeyName != "" { + call.DestinationKmsKeyName(c.DestinationKMSKeyName) + } if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil { return nil, err } diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go index 9040ac2af0..1740708e38 100644 --- a/vendor/cloud.google.com/go/storage/doc.go +++ b/vendor/cloud.google.com/go/storage/doc.go @@ -19,6 +19,9 @@ Google Cloud Storage stores data in named objects, which are grouped into bucket More information about Google Cloud Storage is available at https://cloud.google.com/storage/docs. +See https://godoc.org/cloud.google.com/go for authentication, timeouts, +connection pooling and similar aspects of this package. + All of the methods of this package use exponential backoff to retry calls that fail with certain errors, as described in https://cloud.google.com/storage/docs/exponential-backoff. @@ -158,10 +161,5 @@ SignedURL for details. // TODO: Handle error. } fmt.Println(url) - -Authentication - -See examples of authorization and authentication at -https://godoc.org/cloud.google.com/go#pkg-examples. */ package storage // import "cloud.google.com/go/storage" diff --git a/vendor/cloud.google.com/go/storage/reader.go b/vendor/cloud.google.com/go/storage/reader.go index 6af5a94e0a..68d9ca11ab 100644 --- a/vendor/cloud.google.com/go/storage/reader.go +++ b/vendor/cloud.google.com/go/storage/reader.go @@ -22,6 +22,7 @@ import ( "io/ioutil" "net/http" "net/url" + "reflect" "strconv" "strings" @@ -74,11 +75,6 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) return nil, err } req = withContext(req, ctx) - if length < 0 && offset > 0 { - req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset)) - } else if length > 0 { - req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1)) - } if o.userProject != "" { req.Header.Set("X-Goog-User-Project", o.userProject) } @@ -88,39 +84,57 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) if err := setEncryptionHeaders(req.Header, o.encryptionKey, false); err != nil { return nil, err } - var res *http.Response - err = runWithRetry(ctx, func() error { - res, err = o.c.hc.Do(req) - if err != nil { - return err - } - if res.StatusCode == http.StatusNotFound { - res.Body.Close() - return ErrObjectNotExist + + // Define a function that initiates a Read with offset and length, assuming we + // have already read seen bytes. + reopen := func(seen int64) (*http.Response, error) { + start := offset + seen + if length < 0 && start > 0 { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-", start)) + } else if length > 0 { + // The end character isn't affected by how many bytes we've seen. + req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, offset+length-1)) } - if res.StatusCode < 200 || res.StatusCode > 299 { - body, _ := ioutil.ReadAll(res.Body) - res.Body.Close() - return &googleapi.Error{ - Code: res.StatusCode, - Header: res.Header, - Body: string(body), + var res *http.Response + err = runWithRetry(ctx, func() error { + res, err = o.c.hc.Do(req) + if err != nil { + return err } + if res.StatusCode == http.StatusNotFound { + res.Body.Close() + return ErrObjectNotExist + } + if res.StatusCode < 200 || res.StatusCode > 299 { + body, _ := ioutil.ReadAll(res.Body) + res.Body.Close() + return &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + Body: string(body), + } + } + if start > 0 && length != 0 && res.StatusCode != http.StatusPartialContent { + res.Body.Close() + return errors.New("storage: partial request not satisfied") + } + return nil + }) + if err != nil { + return nil, err } - if offset > 0 && length != 0 && res.StatusCode != http.StatusPartialContent { - res.Body.Close() - return errors.New("storage: partial request not satisfied") - } - return nil - }) + return res, nil + } + + res, err := reopen(0) if err != nil { return nil, err } - var size int64 // total size of object, even if a range was requested. if res.StatusCode == http.StatusPartialContent { cr := strings.TrimSpace(res.Header.Get("Content-Range")) if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") { + return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) } size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64) @@ -155,6 +169,7 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) cacheControl: res.Header.Get("Cache-Control"), wantCRC: crc, checkCRC: checkCRC, + reopen: reopen, }, nil } @@ -180,15 +195,16 @@ var emptyBody = ioutil.NopCloser(strings.NewReader("")) // the stored CRC, returning an error from Read if there is a mismatch. This integrity check // is skipped if transcoding occurs. See https://cloud.google.com/storage/docs/transcoding. type Reader struct { - body io.ReadCloser - remain, size int64 - contentType string - contentEncoding string - cacheControl string - checkCRC bool // should we check the CRC? - wantCRC uint32 // the CRC32c value the server sent in the header - gotCRC uint32 // running crc - checkedCRC bool // did we check the CRC? (For tests.) + body io.ReadCloser + seen, remain, size int64 + contentType string + contentEncoding string + cacheControl string + checkCRC bool // should we check the CRC? + wantCRC uint32 // the CRC32c value the server sent in the header + gotCRC uint32 // running crc + checkedCRC bool // did we check the CRC? (For tests.) + reopen func(seen int64) (*http.Response, error) } // Close closes the Reader. It must be called when done reading. @@ -197,7 +213,7 @@ func (r *Reader) Close() error { } func (r *Reader) Read(p []byte) (int, error) { - n, err := r.body.Read(p) + n, err := r.readWithRetry(p) if r.remain != -1 { r.remain -= int64(n) } @@ -217,6 +233,35 @@ func (r *Reader) Read(p []byte) (int, error) { return n, err } +func (r *Reader) readWithRetry(p []byte) (int, error) { + n := 0 + for len(p[n:]) > 0 { + m, err := r.body.Read(p[n:]) + n += m + r.seen += int64(m) + if !shouldRetryRead(err) { + return n, err + } + // Read failed, but we will try again. Send a ranged read request that takes + // into account the number of bytes we've already seen. + res, err := r.reopen(r.seen) + if err != nil { + // reopen already retries + return n, err + } + r.body.Close() + r.body = res.Body + } + return n, nil +} + +func shouldRetryRead(err error) bool { + if err == nil { + return false + } + return strings.HasSuffix(err.Error(), "INTERNAL_ERROR") && strings.Contains(reflect.TypeOf(err).String(), "http2") +} + // Size returns the size of the object in bytes. // The returned value is always the same and is not affected by // calls to Read or Close. diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go index c6a2704ce1..df58bb74ec 100644 --- a/vendor/cloud.google.com/go/storage/storage.go +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -722,6 +722,14 @@ type ObjectAttrs struct { // encryption in Google Cloud Storage. CustomerKeySHA256 string + // Cloud KMS key name, in the form + // projects/P/locations/L/keyRings/R/cryptoKeys/K, used to encrypt this object, + // if the object is encrypted by such a key. + // + // Providing both a KMSKeyName and a customer-supplied encryption key (via + // ObjectHandle.Key) will result in an error when writing an object. + KMSKeyName string + // Prefix is set only for ObjectAttrs which represent synthetic "directory // entries" when iterating over buckets using Query.Delimiter. See // ObjectIterator.Next. When set, no other fields in ObjectAttrs will be @@ -779,6 +787,7 @@ func newObject(o *raw.Object) *ObjectAttrs { Metageneration: o.Metageneration, StorageClass: o.StorageClass, CustomerKeySHA256: sha256, + KMSKeyName: o.KmsKeyName, Created: convertTime(o.TimeCreated), Deleted: convertTime(o.TimeDeleted), Updated: convertTime(o.Updated), diff --git a/vendor/cloud.google.com/go/storage/writer.go b/vendor/cloud.google.com/go/storage/writer.go index 21d146f0f6..3e9709b8fd 100644 --- a/vendor/cloud.google.com/go/storage/writer.go +++ b/vendor/cloud.google.com/go/storage/writer.go @@ -88,6 +88,9 @@ func (w *Writer) open() error { if !utf8.ValidString(attrs.Name) { return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name) } + if attrs.KMSKeyName != "" && w.o.encryptionKey != nil { + return errors.New("storage: cannot use KMSKeyName with a customer-supplied encryption key") + } pr, pw := io.Pipe() w.pw = pw w.opened = true @@ -119,6 +122,9 @@ func (w *Writer) open() error { if w.ProgressFunc != nil { call.ProgressUpdater(func(n, _ int64) { w.ProgressFunc(n) }) } + if attrs.KMSKeyName != "" { + call.KmsKeyName(attrs.KMSKeyName) + } if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil { w.mu.Lock() w.err = err diff --git a/vendor/github.com/Azure/go-ansiterm/LICENSE b/vendor/github.com/Azure/go-ansiterm/LICENSE new file mode 100644 index 0000000000..e3d9a64d1d --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Microsoft Corporation + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Azure/go-ansiterm/constants.go b/vendor/github.com/Azure/go-ansiterm/constants.go new file mode 100644 index 0000000000..96504a33bc --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/constants.go @@ -0,0 +1,188 @@ +package ansiterm + +const LogEnv = "DEBUG_TERMINAL" + +// ANSI constants +// References: +// -- http://www.ecma-international.org/publications/standards/Ecma-048.htm +// -- http://man7.org/linux/man-pages/man4/console_codes.4.html +// -- http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html +// -- http://en.wikipedia.org/wiki/ANSI_escape_code +// -- http://vt100.net/emu/dec_ansi_parser +// -- http://vt100.net/emu/vt500_parser.svg +// -- http://invisible-island.net/xterm/ctlseqs/ctlseqs.html +// -- http://www.inwap.com/pdp10/ansicode.txt +const ( + // ECMA-48 Set Graphics Rendition + // Note: + // -- Constants leading with an underscore (e.g., _ANSI_xxx) are unsupported or reserved + // -- Fonts could possibly be supported via SetCurrentConsoleFontEx + // -- Windows does not expose the per-window cursor (i.e., caret) blink times + ANSI_SGR_RESET = 0 + ANSI_SGR_BOLD = 1 + ANSI_SGR_DIM = 2 + _ANSI_SGR_ITALIC = 3 + ANSI_SGR_UNDERLINE = 4 + _ANSI_SGR_BLINKSLOW = 5 + _ANSI_SGR_BLINKFAST = 6 + ANSI_SGR_REVERSE = 7 + _ANSI_SGR_INVISIBLE = 8 + _ANSI_SGR_LINETHROUGH = 9 + _ANSI_SGR_FONT_00 = 10 + _ANSI_SGR_FONT_01 = 11 + _ANSI_SGR_FONT_02 = 12 + _ANSI_SGR_FONT_03 = 13 + _ANSI_SGR_FONT_04 = 14 + _ANSI_SGR_FONT_05 = 15 + _ANSI_SGR_FONT_06 = 16 + _ANSI_SGR_FONT_07 = 17 + _ANSI_SGR_FONT_08 = 18 + _ANSI_SGR_FONT_09 = 19 + _ANSI_SGR_FONT_10 = 20 + _ANSI_SGR_DOUBLEUNDERLINE = 21 + ANSI_SGR_BOLD_DIM_OFF = 22 + _ANSI_SGR_ITALIC_OFF = 23 + ANSI_SGR_UNDERLINE_OFF = 24 + _ANSI_SGR_BLINK_OFF = 25 + _ANSI_SGR_RESERVED_00 = 26 + ANSI_SGR_REVERSE_OFF = 27 + _ANSI_SGR_INVISIBLE_OFF = 28 + _ANSI_SGR_LINETHROUGH_OFF = 29 + ANSI_SGR_FOREGROUND_BLACK = 30 + ANSI_SGR_FOREGROUND_RED = 31 + ANSI_SGR_FOREGROUND_GREEN = 32 + ANSI_SGR_FOREGROUND_YELLOW = 33 + ANSI_SGR_FOREGROUND_BLUE = 34 + ANSI_SGR_FOREGROUND_MAGENTA = 35 + ANSI_SGR_FOREGROUND_CYAN = 36 + ANSI_SGR_FOREGROUND_WHITE = 37 + _ANSI_SGR_RESERVED_01 = 38 + ANSI_SGR_FOREGROUND_DEFAULT = 39 + ANSI_SGR_BACKGROUND_BLACK = 40 + ANSI_SGR_BACKGROUND_RED = 41 + ANSI_SGR_BACKGROUND_GREEN = 42 + ANSI_SGR_BACKGROUND_YELLOW = 43 + ANSI_SGR_BACKGROUND_BLUE = 44 + ANSI_SGR_BACKGROUND_MAGENTA = 45 + ANSI_SGR_BACKGROUND_CYAN = 46 + ANSI_SGR_BACKGROUND_WHITE = 47 + _ANSI_SGR_RESERVED_02 = 48 + ANSI_SGR_BACKGROUND_DEFAULT = 49 + // 50 - 65: Unsupported + + ANSI_MAX_CMD_LENGTH = 4096 + + MAX_INPUT_EVENTS = 128 + DEFAULT_WIDTH = 80 + DEFAULT_HEIGHT = 24 + + ANSI_BEL = 0x07 + ANSI_BACKSPACE = 0x08 + ANSI_TAB = 0x09 + ANSI_LINE_FEED = 0x0A + ANSI_VERTICAL_TAB = 0x0B + ANSI_FORM_FEED = 0x0C + ANSI_CARRIAGE_RETURN = 0x0D + ANSI_ESCAPE_PRIMARY = 0x1B + ANSI_ESCAPE_SECONDARY = 0x5B + ANSI_OSC_STRING_ENTRY = 0x5D + ANSI_COMMAND_FIRST = 0x40 + ANSI_COMMAND_LAST = 0x7E + DCS_ENTRY = 0x90 + CSI_ENTRY = 0x9B + OSC_STRING = 0x9D + ANSI_PARAMETER_SEP = ";" + ANSI_CMD_G0 = '(' + ANSI_CMD_G1 = ')' + ANSI_CMD_G2 = '*' + ANSI_CMD_G3 = '+' + ANSI_CMD_DECPNM = '>' + ANSI_CMD_DECPAM = '=' + ANSI_CMD_OSC = ']' + ANSI_CMD_STR_TERM = '\\' + + KEY_CONTROL_PARAM_2 = ";2" + KEY_CONTROL_PARAM_3 = ";3" + KEY_CONTROL_PARAM_4 = ";4" + KEY_CONTROL_PARAM_5 = ";5" + KEY_CONTROL_PARAM_6 = ";6" + KEY_CONTROL_PARAM_7 = ";7" + KEY_CONTROL_PARAM_8 = ";8" + KEY_ESC_CSI = "\x1B[" + KEY_ESC_N = "\x1BN" + KEY_ESC_O = "\x1BO" + + FILL_CHARACTER = ' ' +) + +func getByteRange(start byte, end byte) []byte { + bytes := make([]byte, 0, 32) + for i := start; i <= end; i++ { + bytes = append(bytes, byte(i)) + } + + return bytes +} + +var toGroundBytes = getToGroundBytes() +var executors = getExecuteBytes() + +// SPACE 20+A0 hex Always and everywhere a blank space +// Intermediate 20-2F hex !"#$%&'()*+,-./ +var intermeds = getByteRange(0x20, 0x2F) + +// Parameters 30-3F hex 0123456789:;<=>? +// CSI Parameters 30-39, 3B hex 0123456789; +var csiParams = getByteRange(0x30, 0x3F) + +var csiCollectables = append(getByteRange(0x30, 0x39), getByteRange(0x3B, 0x3F)...) + +// Uppercase 40-5F hex @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_ +var upperCase = getByteRange(0x40, 0x5F) + +// Lowercase 60-7E hex `abcdefghijlkmnopqrstuvwxyz{|}~ +var lowerCase = getByteRange(0x60, 0x7E) + +// Alphabetics 40-7E hex (all of upper and lower case) +var alphabetics = append(upperCase, lowerCase...) + +var printables = getByteRange(0x20, 0x7F) + +var escapeIntermediateToGroundBytes = getByteRange(0x30, 0x7E) +var escapeToGroundBytes = getEscapeToGroundBytes() + +// See http://www.vt100.net/emu/vt500_parser.png for description of the complex +// byte ranges below + +func getEscapeToGroundBytes() []byte { + escapeToGroundBytes := getByteRange(0x30, 0x4F) + escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x51, 0x57)...) + escapeToGroundBytes = append(escapeToGroundBytes, 0x59) + escapeToGroundBytes = append(escapeToGroundBytes, 0x5A) + escapeToGroundBytes = append(escapeToGroundBytes, 0x5C) + escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x60, 0x7E)...) + return escapeToGroundBytes +} + +func getExecuteBytes() []byte { + executeBytes := getByteRange(0x00, 0x17) + executeBytes = append(executeBytes, 0x19) + executeBytes = append(executeBytes, getByteRange(0x1C, 0x1F)...) + return executeBytes +} + +func getToGroundBytes() []byte { + groundBytes := []byte{0x18} + groundBytes = append(groundBytes, 0x1A) + groundBytes = append(groundBytes, getByteRange(0x80, 0x8F)...) + groundBytes = append(groundBytes, getByteRange(0x91, 0x97)...) + groundBytes = append(groundBytes, 0x99) + groundBytes = append(groundBytes, 0x9A) + groundBytes = append(groundBytes, 0x9C) + return groundBytes +} + +// Delete 7F hex Always and everywhere ignored +// C1 Control 80-9F hex 32 additional control characters +// G1 Displayable A1-FE hex 94 additional displayable characters +// Special A0+FF hex Same as SPACE and DELETE diff --git a/vendor/github.com/Azure/go-ansiterm/context.go b/vendor/github.com/Azure/go-ansiterm/context.go new file mode 100644 index 0000000000..8d66e777c0 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/context.go @@ -0,0 +1,7 @@ +package ansiterm + +type ansiContext struct { + currentChar byte + paramBuffer []byte + interBuffer []byte +} diff --git a/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go new file mode 100644 index 0000000000..bcbe00d0c5 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go @@ -0,0 +1,49 @@ +package ansiterm + +type csiEntryState struct { + baseState +} + +func (csiState csiEntryState) Handle(b byte) (s state, e error) { + csiState.parser.logf("CsiEntry::Handle %#x", b) + + nextState, err := csiState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(alphabetics, b): + return csiState.parser.ground, nil + case sliceContains(csiCollectables, b): + return csiState.parser.csiParam, nil + case sliceContains(executors, b): + return csiState, csiState.parser.execute() + } + + return csiState, nil +} + +func (csiState csiEntryState) Transition(s state) error { + csiState.parser.logf("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name()) + csiState.baseState.Transition(s) + + switch s { + case csiState.parser.ground: + return csiState.parser.csiDispatch() + case csiState.parser.csiParam: + switch { + case sliceContains(csiParams, csiState.parser.context.currentChar): + csiState.parser.collectParam() + case sliceContains(intermeds, csiState.parser.context.currentChar): + csiState.parser.collectInter() + } + } + + return nil +} + +func (csiState csiEntryState) Enter() error { + csiState.parser.clear() + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/csi_param_state.go b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go new file mode 100644 index 0000000000..7ed5e01c34 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go @@ -0,0 +1,38 @@ +package ansiterm + +type csiParamState struct { + baseState +} + +func (csiState csiParamState) Handle(b byte) (s state, e error) { + csiState.parser.logf("CsiParam::Handle %#x", b) + + nextState, err := csiState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(alphabetics, b): + return csiState.parser.ground, nil + case sliceContains(csiCollectables, b): + csiState.parser.collectParam() + return csiState, nil + case sliceContains(executors, b): + return csiState, csiState.parser.execute() + } + + return csiState, nil +} + +func (csiState csiParamState) Transition(s state) error { + csiState.parser.logf("CsiParam::Transition %s --> %s", csiState.Name(), s.Name()) + csiState.baseState.Transition(s) + + switch s { + case csiState.parser.ground: + return csiState.parser.csiDispatch() + } + + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go new file mode 100644 index 0000000000..1c719db9e4 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go @@ -0,0 +1,36 @@ +package ansiterm + +type escapeIntermediateState struct { + baseState +} + +func (escState escapeIntermediateState) Handle(b byte) (s state, e error) { + escState.parser.logf("escapeIntermediateState::Handle %#x", b) + nextState, err := escState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(intermeds, b): + return escState, escState.parser.collectInter() + case sliceContains(executors, b): + return escState, escState.parser.execute() + case sliceContains(escapeIntermediateToGroundBytes, b): + return escState.parser.ground, nil + } + + return escState, nil +} + +func (escState escapeIntermediateState) Transition(s state) error { + escState.parser.logf("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name()) + escState.baseState.Transition(s) + + switch s { + case escState.parser.ground: + return escState.parser.escDispatch() + } + + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/escape_state.go b/vendor/github.com/Azure/go-ansiterm/escape_state.go new file mode 100644 index 0000000000..6390abd231 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/escape_state.go @@ -0,0 +1,47 @@ +package ansiterm + +type escapeState struct { + baseState +} + +func (escState escapeState) Handle(b byte) (s state, e error) { + escState.parser.logf("escapeState::Handle %#x", b) + nextState, err := escState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case b == ANSI_ESCAPE_SECONDARY: + return escState.parser.csiEntry, nil + case b == ANSI_OSC_STRING_ENTRY: + return escState.parser.oscString, nil + case sliceContains(executors, b): + return escState, escState.parser.execute() + case sliceContains(escapeToGroundBytes, b): + return escState.parser.ground, nil + case sliceContains(intermeds, b): + return escState.parser.escapeIntermediate, nil + } + + return escState, nil +} + +func (escState escapeState) Transition(s state) error { + escState.parser.logf("Escape::Transition %s --> %s", escState.Name(), s.Name()) + escState.baseState.Transition(s) + + switch s { + case escState.parser.ground: + return escState.parser.escDispatch() + case escState.parser.escapeIntermediate: + return escState.parser.collectInter() + } + + return nil +} + +func (escState escapeState) Enter() error { + escState.parser.clear() + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/event_handler.go b/vendor/github.com/Azure/go-ansiterm/event_handler.go new file mode 100644 index 0000000000..98087b38c2 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/event_handler.go @@ -0,0 +1,90 @@ +package ansiterm + +type AnsiEventHandler interface { + // Print + Print(b byte) error + + // Execute C0 commands + Execute(b byte) error + + // CUrsor Up + CUU(int) error + + // CUrsor Down + CUD(int) error + + // CUrsor Forward + CUF(int) error + + // CUrsor Backward + CUB(int) error + + // Cursor to Next Line + CNL(int) error + + // Cursor to Previous Line + CPL(int) error + + // Cursor Horizontal position Absolute + CHA(int) error + + // Vertical line Position Absolute + VPA(int) error + + // CUrsor Position + CUP(int, int) error + + // Horizontal and Vertical Position (depends on PUM) + HVP(int, int) error + + // Text Cursor Enable Mode + DECTCEM(bool) error + + // Origin Mode + DECOM(bool) error + + // 132 Column Mode + DECCOLM(bool) error + + // Erase in Display + ED(int) error + + // Erase in Line + EL(int) error + + // Insert Line + IL(int) error + + // Delete Line + DL(int) error + + // Insert Character + ICH(int) error + + // Delete Character + DCH(int) error + + // Set Graphics Rendition + SGR([]int) error + + // Pan Down + SU(int) error + + // Pan Up + SD(int) error + + // Device Attributes + DA([]string) error + + // Set Top and Bottom Margins + DECSTBM(int, int) error + + // Index + IND() error + + // Reverse Index + RI() error + + // Flush updates from previous commands + Flush() error +} diff --git a/vendor/github.com/Azure/go-ansiterm/ground_state.go b/vendor/github.com/Azure/go-ansiterm/ground_state.go new file mode 100644 index 0000000000..52451e9469 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/ground_state.go @@ -0,0 +1,24 @@ +package ansiterm + +type groundState struct { + baseState +} + +func (gs groundState) Handle(b byte) (s state, e error) { + gs.parser.context.currentChar = b + + nextState, err := gs.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(printables, b): + return gs, gs.parser.print() + + case sliceContains(executors, b): + return gs, gs.parser.execute() + } + + return gs, nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go new file mode 100644 index 0000000000..593b10ab69 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go @@ -0,0 +1,31 @@ +package ansiterm + +type oscStringState struct { + baseState +} + +func (oscState oscStringState) Handle(b byte) (s state, e error) { + oscState.parser.logf("OscString::Handle %#x", b) + nextState, err := oscState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case isOscStringTerminator(b): + return oscState.parser.ground, nil + } + + return oscState, nil +} + +// See below for OSC string terminators for linux +// http://man7.org/linux/man-pages/man4/console_codes.4.html +func isOscStringTerminator(b byte) bool { + + if b == ANSI_BEL || b == 0x5C { + return true + } + + return false +} diff --git a/vendor/github.com/Azure/go-ansiterm/parser.go b/vendor/github.com/Azure/go-ansiterm/parser.go new file mode 100644 index 0000000000..03cec7ada6 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/parser.go @@ -0,0 +1,151 @@ +package ansiterm + +import ( + "errors" + "log" + "os" +) + +type AnsiParser struct { + currState state + eventHandler AnsiEventHandler + context *ansiContext + csiEntry state + csiParam state + dcsEntry state + escape state + escapeIntermediate state + error state + ground state + oscString state + stateMap []state + + logf func(string, ...interface{}) +} + +type Option func(*AnsiParser) + +func WithLogf(f func(string, ...interface{})) Option { + return func(ap *AnsiParser) { + ap.logf = f + } +} + +func CreateParser(initialState string, evtHandler AnsiEventHandler, opts ...Option) *AnsiParser { + ap := &AnsiParser{ + eventHandler: evtHandler, + context: &ansiContext{}, + } + for _, o := range opts { + o(ap) + } + + if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" { + logFile, _ := os.Create("ansiParser.log") + logger := log.New(logFile, "", log.LstdFlags) + if ap.logf != nil { + l := ap.logf + ap.logf = func(s string, v ...interface{}) { + l(s, v...) + logger.Printf(s, v...) + } + } else { + ap.logf = logger.Printf + } + } + + if ap.logf == nil { + ap.logf = func(string, ...interface{}) {} + } + + ap.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: ap}} + ap.csiParam = csiParamState{baseState{name: "CsiParam", parser: ap}} + ap.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: ap}} + ap.escape = escapeState{baseState{name: "Escape", parser: ap}} + ap.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: ap}} + ap.error = errorState{baseState{name: "Error", parser: ap}} + ap.ground = groundState{baseState{name: "Ground", parser: ap}} + ap.oscString = oscStringState{baseState{name: "OscString", parser: ap}} + + ap.stateMap = []state{ + ap.csiEntry, + ap.csiParam, + ap.dcsEntry, + ap.escape, + ap.escapeIntermediate, + ap.error, + ap.ground, + ap.oscString, + } + + ap.currState = getState(initialState, ap.stateMap) + + ap.logf("CreateParser: parser %p", ap) + return ap +} + +func getState(name string, states []state) state { + for _, el := range states { + if el.Name() == name { + return el + } + } + + return nil +} + +func (ap *AnsiParser) Parse(bytes []byte) (int, error) { + for i, b := range bytes { + if err := ap.handle(b); err != nil { + return i, err + } + } + + return len(bytes), ap.eventHandler.Flush() +} + +func (ap *AnsiParser) handle(b byte) error { + ap.context.currentChar = b + newState, err := ap.currState.Handle(b) + if err != nil { + return err + } + + if newState == nil { + ap.logf("WARNING: newState is nil") + return errors.New("New state of 'nil' is invalid.") + } + + if newState != ap.currState { + if err := ap.changeState(newState); err != nil { + return err + } + } + + return nil +} + +func (ap *AnsiParser) changeState(newState state) error { + ap.logf("ChangeState %s --> %s", ap.currState.Name(), newState.Name()) + + // Exit old state + if err := ap.currState.Exit(); err != nil { + ap.logf("Exit state '%s' failed with : '%v'", ap.currState.Name(), err) + return err + } + + // Perform transition action + if err := ap.currState.Transition(newState); err != nil { + ap.logf("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err) + return err + } + + // Enter new state + if err := newState.Enter(); err != nil { + ap.logf("Enter state '%s' failed with: '%v'", newState.Name(), err) + return err + } + + ap.currState = newState + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go new file mode 100644 index 0000000000..de0a1f9cde --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go @@ -0,0 +1,99 @@ +package ansiterm + +import ( + "strconv" +) + +func parseParams(bytes []byte) ([]string, error) { + paramBuff := make([]byte, 0, 0) + params := []string{} + + for _, v := range bytes { + if v == ';' { + if len(paramBuff) > 0 { + // Completed parameter, append it to the list + s := string(paramBuff) + params = append(params, s) + paramBuff = make([]byte, 0, 0) + } + } else { + paramBuff = append(paramBuff, v) + } + } + + // Last parameter may not be terminated with ';' + if len(paramBuff) > 0 { + s := string(paramBuff) + params = append(params, s) + } + + return params, nil +} + +func parseCmd(context ansiContext) (string, error) { + return string(context.currentChar), nil +} + +func getInt(params []string, dflt int) int { + i := getInts(params, 1, dflt)[0] + return i +} + +func getInts(params []string, minCount int, dflt int) []int { + ints := []int{} + + for _, v := range params { + i, _ := strconv.Atoi(v) + // Zero is mapped to the default value in VT100. + if i == 0 { + i = dflt + } + ints = append(ints, i) + } + + if len(ints) < minCount { + remaining := minCount - len(ints) + for i := 0; i < remaining; i++ { + ints = append(ints, dflt) + } + } + + return ints +} + +func (ap *AnsiParser) modeDispatch(param string, set bool) error { + switch param { + case "?3": + return ap.eventHandler.DECCOLM(set) + case "?6": + return ap.eventHandler.DECOM(set) + case "?25": + return ap.eventHandler.DECTCEM(set) + } + return nil +} + +func (ap *AnsiParser) hDispatch(params []string) error { + if len(params) == 1 { + return ap.modeDispatch(params[0], true) + } + + return nil +} + +func (ap *AnsiParser) lDispatch(params []string) error { + if len(params) == 1 { + return ap.modeDispatch(params[0], false) + } + + return nil +} + +func getEraseParam(params []string) int { + param := getInt(params, 0) + if param < 0 || 3 < param { + param = 0 + } + + return param +} diff --git a/vendor/github.com/Azure/go-ansiterm/parser_actions.go b/vendor/github.com/Azure/go-ansiterm/parser_actions.go new file mode 100644 index 0000000000..0bb5e51e9a --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/parser_actions.go @@ -0,0 +1,119 @@ +package ansiterm + +func (ap *AnsiParser) collectParam() error { + currChar := ap.context.currentChar + ap.logf("collectParam %#x", currChar) + ap.context.paramBuffer = append(ap.context.paramBuffer, currChar) + return nil +} + +func (ap *AnsiParser) collectInter() error { + currChar := ap.context.currentChar + ap.logf("collectInter %#x", currChar) + ap.context.paramBuffer = append(ap.context.interBuffer, currChar) + return nil +} + +func (ap *AnsiParser) escDispatch() error { + cmd, _ := parseCmd(*ap.context) + intermeds := ap.context.interBuffer + ap.logf("escDispatch currentChar: %#x", ap.context.currentChar) + ap.logf("escDispatch: %v(%v)", cmd, intermeds) + + switch cmd { + case "D": // IND + return ap.eventHandler.IND() + case "E": // NEL, equivalent to CRLF + err := ap.eventHandler.Execute(ANSI_CARRIAGE_RETURN) + if err == nil { + err = ap.eventHandler.Execute(ANSI_LINE_FEED) + } + return err + case "M": // RI + return ap.eventHandler.RI() + } + + return nil +} + +func (ap *AnsiParser) csiDispatch() error { + cmd, _ := parseCmd(*ap.context) + params, _ := parseParams(ap.context.paramBuffer) + ap.logf("Parsed params: %v with length: %d", params, len(params)) + + ap.logf("csiDispatch: %v(%v)", cmd, params) + + switch cmd { + case "@": + return ap.eventHandler.ICH(getInt(params, 1)) + case "A": + return ap.eventHandler.CUU(getInt(params, 1)) + case "B": + return ap.eventHandler.CUD(getInt(params, 1)) + case "C": + return ap.eventHandler.CUF(getInt(params, 1)) + case "D": + return ap.eventHandler.CUB(getInt(params, 1)) + case "E": + return ap.eventHandler.CNL(getInt(params, 1)) + case "F": + return ap.eventHandler.CPL(getInt(params, 1)) + case "G": + return ap.eventHandler.CHA(getInt(params, 1)) + case "H": + ints := getInts(params, 2, 1) + x, y := ints[0], ints[1] + return ap.eventHandler.CUP(x, y) + case "J": + param := getEraseParam(params) + return ap.eventHandler.ED(param) + case "K": + param := getEraseParam(params) + return ap.eventHandler.EL(param) + case "L": + return ap.eventHandler.IL(getInt(params, 1)) + case "M": + return ap.eventHandler.DL(getInt(params, 1)) + case "P": + return ap.eventHandler.DCH(getInt(params, 1)) + case "S": + return ap.eventHandler.SU(getInt(params, 1)) + case "T": + return ap.eventHandler.SD(getInt(params, 1)) + case "c": + return ap.eventHandler.DA(params) + case "d": + return ap.eventHandler.VPA(getInt(params, 1)) + case "f": + ints := getInts(params, 2, 1) + x, y := ints[0], ints[1] + return ap.eventHandler.HVP(x, y) + case "h": + return ap.hDispatch(params) + case "l": + return ap.lDispatch(params) + case "m": + return ap.eventHandler.SGR(getInts(params, 1, 0)) + case "r": + ints := getInts(params, 2, 1) + top, bottom := ints[0], ints[1] + return ap.eventHandler.DECSTBM(top, bottom) + default: + ap.logf("ERROR: Unsupported CSI command: '%s', with full context: %v", cmd, ap.context) + return nil + } + +} + +func (ap *AnsiParser) print() error { + return ap.eventHandler.Print(ap.context.currentChar) +} + +func (ap *AnsiParser) clear() error { + ap.context = &ansiContext{} + return nil +} + +func (ap *AnsiParser) execute() error { + return ap.eventHandler.Execute(ap.context.currentChar) +} diff --git a/vendor/github.com/Azure/go-ansiterm/states.go b/vendor/github.com/Azure/go-ansiterm/states.go new file mode 100644 index 0000000000..f2ea1fcd12 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/states.go @@ -0,0 +1,71 @@ +package ansiterm + +type stateID int + +type state interface { + Enter() error + Exit() error + Handle(byte) (state, error) + Name() string + Transition(state) error +} + +type baseState struct { + name string + parser *AnsiParser +} + +func (base baseState) Enter() error { + return nil +} + +func (base baseState) Exit() error { + return nil +} + +func (base baseState) Handle(b byte) (s state, e error) { + + switch { + case b == CSI_ENTRY: + return base.parser.csiEntry, nil + case b == DCS_ENTRY: + return base.parser.dcsEntry, nil + case b == ANSI_ESCAPE_PRIMARY: + return base.parser.escape, nil + case b == OSC_STRING: + return base.parser.oscString, nil + case sliceContains(toGroundBytes, b): + return base.parser.ground, nil + } + + return nil, nil +} + +func (base baseState) Name() string { + return base.name +} + +func (base baseState) Transition(s state) error { + if s == base.parser.ground { + execBytes := []byte{0x18} + execBytes = append(execBytes, 0x1A) + execBytes = append(execBytes, getByteRange(0x80, 0x8F)...) + execBytes = append(execBytes, getByteRange(0x91, 0x97)...) + execBytes = append(execBytes, 0x99) + execBytes = append(execBytes, 0x9A) + + if sliceContains(execBytes, base.parser.context.currentChar) { + return base.parser.execute() + } + } + + return nil +} + +type dcsEntryState struct { + baseState +} + +type errorState struct { + baseState +} diff --git a/vendor/github.com/Azure/go-ansiterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/utilities.go new file mode 100644 index 0000000000..392114493a --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/utilities.go @@ -0,0 +1,21 @@ +package ansiterm + +import ( + "strconv" +) + +func sliceContains(bytes []byte, b byte) bool { + for _, v := range bytes { + if v == b { + return true + } + } + + return false +} + +func convertBytesToInteger(bytes []byte) int { + s := string(bytes) + i, _ := strconv.Atoi(s) + return i +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go new file mode 100644 index 0000000000..a673279726 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go @@ -0,0 +1,182 @@ +// +build windows + +package winterm + +import ( + "fmt" + "os" + "strconv" + "strings" + "syscall" + + "github.com/Azure/go-ansiterm" +) + +// Windows keyboard constants +// See https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx. +const ( + VK_PRIOR = 0x21 // PAGE UP key + VK_NEXT = 0x22 // PAGE DOWN key + VK_END = 0x23 // END key + VK_HOME = 0x24 // HOME key + VK_LEFT = 0x25 // LEFT ARROW key + VK_UP = 0x26 // UP ARROW key + VK_RIGHT = 0x27 // RIGHT ARROW key + VK_DOWN = 0x28 // DOWN ARROW key + VK_SELECT = 0x29 // SELECT key + VK_PRINT = 0x2A // PRINT key + VK_EXECUTE = 0x2B // EXECUTE key + VK_SNAPSHOT = 0x2C // PRINT SCREEN key + VK_INSERT = 0x2D // INS key + VK_DELETE = 0x2E // DEL key + VK_HELP = 0x2F // HELP key + VK_F1 = 0x70 // F1 key + VK_F2 = 0x71 // F2 key + VK_F3 = 0x72 // F3 key + VK_F4 = 0x73 // F4 key + VK_F5 = 0x74 // F5 key + VK_F6 = 0x75 // F6 key + VK_F7 = 0x76 // F7 key + VK_F8 = 0x77 // F8 key + VK_F9 = 0x78 // F9 key + VK_F10 = 0x79 // F10 key + VK_F11 = 0x7A // F11 key + VK_F12 = 0x7B // F12 key + + RIGHT_ALT_PRESSED = 0x0001 + LEFT_ALT_PRESSED = 0x0002 + RIGHT_CTRL_PRESSED = 0x0004 + LEFT_CTRL_PRESSED = 0x0008 + SHIFT_PRESSED = 0x0010 + NUMLOCK_ON = 0x0020 + SCROLLLOCK_ON = 0x0040 + CAPSLOCK_ON = 0x0080 + ENHANCED_KEY = 0x0100 +) + +type ansiCommand struct { + CommandBytes []byte + Command string + Parameters []string + IsSpecial bool +} + +func newAnsiCommand(command []byte) *ansiCommand { + + if isCharacterSelectionCmdChar(command[1]) { + // Is Character Set Selection commands + return &ansiCommand{ + CommandBytes: command, + Command: string(command), + IsSpecial: true, + } + } + + // last char is command character + lastCharIndex := len(command) - 1 + + ac := &ansiCommand{ + CommandBytes: command, + Command: string(command[lastCharIndex]), + IsSpecial: false, + } + + // more than a single escape + if lastCharIndex != 0 { + start := 1 + // skip if double char escape sequence + if command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_ESCAPE_SECONDARY { + start++ + } + // convert this to GetNextParam method + ac.Parameters = strings.Split(string(command[start:lastCharIndex]), ansiterm.ANSI_PARAMETER_SEP) + } + + return ac +} + +func (ac *ansiCommand) paramAsSHORT(index int, defaultValue int16) int16 { + if index < 0 || index >= len(ac.Parameters) { + return defaultValue + } + + param, err := strconv.ParseInt(ac.Parameters[index], 10, 16) + if err != nil { + return defaultValue + } + + return int16(param) +} + +func (ac *ansiCommand) String() string { + return fmt.Sprintf("0x%v \"%v\" (\"%v\")", + bytesToHex(ac.CommandBytes), + ac.Command, + strings.Join(ac.Parameters, "\",\"")) +} + +// isAnsiCommandChar returns true if the passed byte falls within the range of ANSI commands. +// See http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html. +func isAnsiCommandChar(b byte) bool { + switch { + case ansiterm.ANSI_COMMAND_FIRST <= b && b <= ansiterm.ANSI_COMMAND_LAST && b != ansiterm.ANSI_ESCAPE_SECONDARY: + return true + case b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_OSC || b == ansiterm.ANSI_CMD_DECPAM || b == ansiterm.ANSI_CMD_DECPNM: + // non-CSI escape sequence terminator + return true + case b == ansiterm.ANSI_CMD_STR_TERM || b == ansiterm.ANSI_BEL: + // String escape sequence terminator + return true + } + return false +} + +func isXtermOscSequence(command []byte, current byte) bool { + return (len(command) >= 2 && command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_CMD_OSC && current != ansiterm.ANSI_BEL) +} + +func isCharacterSelectionCmdChar(b byte) bool { + return (b == ansiterm.ANSI_CMD_G0 || b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_G2 || b == ansiterm.ANSI_CMD_G3) +} + +// bytesToHex converts a slice of bytes to a human-readable string. +func bytesToHex(b []byte) string { + hex := make([]string, len(b)) + for i, ch := range b { + hex[i] = fmt.Sprintf("%X", ch) + } + return strings.Join(hex, "") +} + +// ensureInRange adjusts the passed value, if necessary, to ensure it is within +// the passed min / max range. +func ensureInRange(n int16, min int16, max int16) int16 { + if n < min { + return min + } else if n > max { + return max + } else { + return n + } +} + +func GetStdFile(nFile int) (*os.File, uintptr) { + var file *os.File + switch nFile { + case syscall.STD_INPUT_HANDLE: + file = os.Stdin + case syscall.STD_OUTPUT_HANDLE: + file = os.Stdout + case syscall.STD_ERROR_HANDLE: + file = os.Stderr + default: + panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile)) + } + + fd, err := syscall.GetStdHandle(nFile) + if err != nil { + panic(fmt.Errorf("Invalid standard handle identifier: %v -- %v", nFile, err)) + } + + return file, uintptr(fd) +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/api.go b/vendor/github.com/Azure/go-ansiterm/winterm/api.go new file mode 100644 index 0000000000..6055e33b91 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/api.go @@ -0,0 +1,327 @@ +// +build windows + +package winterm + +import ( + "fmt" + "syscall" + "unsafe" +) + +//=========================================================================================================== +// IMPORTANT NOTE: +// +// The methods below make extensive use of the "unsafe" package to obtain the required pointers. +// Beginning in Go 1.3, the garbage collector may release local variables (e.g., incoming arguments, stack +// variables) the pointers reference *before* the API completes. +// +// As a result, in those cases, the code must hint that the variables remain in active by invoking the +// dummy method "use" (see below). Newer versions of Go are planned to change the mechanism to no longer +// require unsafe pointers. +// +// If you add or modify methods, ENSURE protection of local variables through the "use" builtin to inform +// the garbage collector the variables remain in use if: +// +// -- The value is not a pointer (e.g., int32, struct) +// -- The value is not referenced by the method after passing the pointer to Windows +// +// See http://golang.org/doc/go1.3. +//=========================================================================================================== + +var ( + kernel32DLL = syscall.NewLazyDLL("kernel32.dll") + + getConsoleCursorInfoProc = kernel32DLL.NewProc("GetConsoleCursorInfo") + setConsoleCursorInfoProc = kernel32DLL.NewProc("SetConsoleCursorInfo") + setConsoleCursorPositionProc = kernel32DLL.NewProc("SetConsoleCursorPosition") + setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode") + getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo") + setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize") + scrollConsoleScreenBufferProc = kernel32DLL.NewProc("ScrollConsoleScreenBufferA") + setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute") + setConsoleWindowInfoProc = kernel32DLL.NewProc("SetConsoleWindowInfo") + writeConsoleOutputProc = kernel32DLL.NewProc("WriteConsoleOutputW") + readConsoleInputProc = kernel32DLL.NewProc("ReadConsoleInputW") + waitForSingleObjectProc = kernel32DLL.NewProc("WaitForSingleObject") +) + +// Windows Console constants +const ( + // Console modes + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. + ENABLE_PROCESSED_INPUT = 0x0001 + ENABLE_LINE_INPUT = 0x0002 + ENABLE_ECHO_INPUT = 0x0004 + ENABLE_WINDOW_INPUT = 0x0008 + ENABLE_MOUSE_INPUT = 0x0010 + ENABLE_INSERT_MODE = 0x0020 + ENABLE_QUICK_EDIT_MODE = 0x0040 + ENABLE_EXTENDED_FLAGS = 0x0080 + ENABLE_AUTO_POSITION = 0x0100 + ENABLE_VIRTUAL_TERMINAL_INPUT = 0x0200 + + ENABLE_PROCESSED_OUTPUT = 0x0001 + ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002 + ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004 + DISABLE_NEWLINE_AUTO_RETURN = 0x0008 + ENABLE_LVB_GRID_WORLDWIDE = 0x0010 + + // Character attributes + // Note: + // -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan). + // Clearing all foreground or background colors results in black; setting all creates white. + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes. + FOREGROUND_BLUE uint16 = 0x0001 + FOREGROUND_GREEN uint16 = 0x0002 + FOREGROUND_RED uint16 = 0x0004 + FOREGROUND_INTENSITY uint16 = 0x0008 + FOREGROUND_MASK uint16 = 0x000F + + BACKGROUND_BLUE uint16 = 0x0010 + BACKGROUND_GREEN uint16 = 0x0020 + BACKGROUND_RED uint16 = 0x0040 + BACKGROUND_INTENSITY uint16 = 0x0080 + BACKGROUND_MASK uint16 = 0x00F0 + + COMMON_LVB_MASK uint16 = 0xFF00 + COMMON_LVB_REVERSE_VIDEO uint16 = 0x4000 + COMMON_LVB_UNDERSCORE uint16 = 0x8000 + + // Input event types + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. + KEY_EVENT = 0x0001 + MOUSE_EVENT = 0x0002 + WINDOW_BUFFER_SIZE_EVENT = 0x0004 + MENU_EVENT = 0x0008 + FOCUS_EVENT = 0x0010 + + // WaitForSingleObject return codes + WAIT_ABANDONED = 0x00000080 + WAIT_FAILED = 0xFFFFFFFF + WAIT_SIGNALED = 0x0000000 + WAIT_TIMEOUT = 0x00000102 + + // WaitForSingleObject wait duration + WAIT_INFINITE = 0xFFFFFFFF + WAIT_ONE_SECOND = 1000 + WAIT_HALF_SECOND = 500 + WAIT_QUARTER_SECOND = 250 +) + +// Windows API Console types +// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682101(v=vs.85).aspx for Console specific types (e.g., COORD) +// -- See https://msdn.microsoft.com/en-us/library/aa296569(v=vs.60).aspx for comments on alignment +type ( + CHAR_INFO struct { + UnicodeChar uint16 + Attributes uint16 + } + + CONSOLE_CURSOR_INFO struct { + Size uint32 + Visible int32 + } + + CONSOLE_SCREEN_BUFFER_INFO struct { + Size COORD + CursorPosition COORD + Attributes uint16 + Window SMALL_RECT + MaximumWindowSize COORD + } + + COORD struct { + X int16 + Y int16 + } + + SMALL_RECT struct { + Left int16 + Top int16 + Right int16 + Bottom int16 + } + + // INPUT_RECORD is a C/C++ union of which KEY_EVENT_RECORD is one case, it is also the largest + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. + INPUT_RECORD struct { + EventType uint16 + KeyEvent KEY_EVENT_RECORD + } + + KEY_EVENT_RECORD struct { + KeyDown int32 + RepeatCount uint16 + VirtualKeyCode uint16 + VirtualScanCode uint16 + UnicodeChar uint16 + ControlKeyState uint32 + } + + WINDOW_BUFFER_SIZE struct { + Size COORD + } +) + +// boolToBOOL converts a Go bool into a Windows int32. +func boolToBOOL(f bool) int32 { + if f { + return int32(1) + } else { + return int32(0) + } +} + +// GetConsoleCursorInfo retrieves information about the size and visiblity of the console cursor. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683163(v=vs.85).aspx. +func GetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { + r1, r2, err := getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) + return checkError(r1, r2, err) +} + +// SetConsoleCursorInfo sets the size and visiblity of the console cursor. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx. +func SetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { + r1, r2, err := setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) + return checkError(r1, r2, err) +} + +// SetConsoleCursorPosition location of the console cursor. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx. +func SetConsoleCursorPosition(handle uintptr, coord COORD) error { + r1, r2, err := setConsoleCursorPositionProc.Call(handle, coordToPointer(coord)) + use(coord) + return checkError(r1, r2, err) +} + +// GetConsoleMode gets the console mode for given file descriptor +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx. +func GetConsoleMode(handle uintptr) (mode uint32, err error) { + err = syscall.GetConsoleMode(syscall.Handle(handle), &mode) + return mode, err +} + +// SetConsoleMode sets the console mode for given file descriptor +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. +func SetConsoleMode(handle uintptr, mode uint32) error { + r1, r2, err := setConsoleModeProc.Call(handle, uintptr(mode), 0) + use(mode) + return checkError(r1, r2, err) +} + +// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer. +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx. +func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) { + info := CONSOLE_SCREEN_BUFFER_INFO{} + err := checkError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0)) + if err != nil { + return nil, err + } + return &info, nil +} + +func ScrollConsoleScreenBuffer(handle uintptr, scrollRect SMALL_RECT, clipRect SMALL_RECT, destOrigin COORD, char CHAR_INFO) error { + r1, r2, err := scrollConsoleScreenBufferProc.Call(handle, uintptr(unsafe.Pointer(&scrollRect)), uintptr(unsafe.Pointer(&clipRect)), coordToPointer(destOrigin), uintptr(unsafe.Pointer(&char))) + use(scrollRect) + use(clipRect) + use(destOrigin) + use(char) + return checkError(r1, r2, err) +} + +// SetConsoleScreenBufferSize sets the size of the console screen buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686044(v=vs.85).aspx. +func SetConsoleScreenBufferSize(handle uintptr, coord COORD) error { + r1, r2, err := setConsoleScreenBufferSizeProc.Call(handle, coordToPointer(coord)) + use(coord) + return checkError(r1, r2, err) +} + +// SetConsoleTextAttribute sets the attributes of characters written to the +// console screen buffer by the WriteFile or WriteConsole function. +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx. +func SetConsoleTextAttribute(handle uintptr, attribute uint16) error { + r1, r2, err := setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0) + use(attribute) + return checkError(r1, r2, err) +} + +// SetConsoleWindowInfo sets the size and position of the console screen buffer's window. +// Note that the size and location must be within and no larger than the backing console screen buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686125(v=vs.85).aspx. +func SetConsoleWindowInfo(handle uintptr, isAbsolute bool, rect SMALL_RECT) error { + r1, r2, err := setConsoleWindowInfoProc.Call(handle, uintptr(boolToBOOL(isAbsolute)), uintptr(unsafe.Pointer(&rect))) + use(isAbsolute) + use(rect) + return checkError(r1, r2, err) +} + +// WriteConsoleOutput writes the CHAR_INFOs from the provided buffer to the active console buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687404(v=vs.85).aspx. +func WriteConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) error { + r1, r2, err := writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), coordToPointer(bufferSize), coordToPointer(bufferCoord), uintptr(unsafe.Pointer(writeRegion))) + use(buffer) + use(bufferSize) + use(bufferCoord) + return checkError(r1, r2, err) +} + +// ReadConsoleInput reads (and removes) data from the console input buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx. +func ReadConsoleInput(handle uintptr, buffer []INPUT_RECORD, count *uint32) error { + r1, r2, err := readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), uintptr(len(buffer)), uintptr(unsafe.Pointer(count))) + use(buffer) + return checkError(r1, r2, err) +} + +// WaitForSingleObject waits for the passed handle to be signaled. +// It returns true if the handle was signaled; false otherwise. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032(v=vs.85).aspx. +func WaitForSingleObject(handle uintptr, msWait uint32) (bool, error) { + r1, _, err := waitForSingleObjectProc.Call(handle, uintptr(uint32(msWait))) + switch r1 { + case WAIT_ABANDONED, WAIT_TIMEOUT: + return false, nil + case WAIT_SIGNALED: + return true, nil + } + use(msWait) + return false, err +} + +// String helpers +func (info CONSOLE_SCREEN_BUFFER_INFO) String() string { + return fmt.Sprintf("Size(%v) Cursor(%v) Window(%v) Max(%v)", info.Size, info.CursorPosition, info.Window, info.MaximumWindowSize) +} + +func (coord COORD) String() string { + return fmt.Sprintf("%v,%v", coord.X, coord.Y) +} + +func (rect SMALL_RECT) String() string { + return fmt.Sprintf("(%v,%v),(%v,%v)", rect.Left, rect.Top, rect.Right, rect.Bottom) +} + +// checkError evaluates the results of a Windows API call and returns the error if it failed. +func checkError(r1, r2 uintptr, err error) error { + // Windows APIs return non-zero to indicate success + if r1 != 0 { + return nil + } + + // Return the error if provided, otherwise default to EINVAL + if err != nil { + return err + } + return syscall.EINVAL +} + +// coordToPointer converts a COORD into a uintptr (by fooling the type system). +func coordToPointer(c COORD) uintptr { + // Note: This code assumes the two SHORTs are correctly laid out; the "cast" to uint32 is just to get a pointer to pass. + return uintptr(*((*uint32)(unsafe.Pointer(&c)))) +} + +// use is a no-op, but the compiler cannot see that it is. +// Calling use(p) ensures that p is kept live until that point. +func use(p interface{}) {} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go b/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go new file mode 100644 index 0000000000..cbec8f728f --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go @@ -0,0 +1,100 @@ +// +build windows + +package winterm + +import "github.com/Azure/go-ansiterm" + +const ( + FOREGROUND_COLOR_MASK = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE + BACKGROUND_COLOR_MASK = BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE +) + +// collectAnsiIntoWindowsAttributes modifies the passed Windows text mode flags to reflect the +// request represented by the passed ANSI mode. +func collectAnsiIntoWindowsAttributes(windowsMode uint16, inverted bool, baseMode uint16, ansiMode int16) (uint16, bool) { + switch ansiMode { + + // Mode styles + case ansiterm.ANSI_SGR_BOLD: + windowsMode = windowsMode | FOREGROUND_INTENSITY + + case ansiterm.ANSI_SGR_DIM, ansiterm.ANSI_SGR_BOLD_DIM_OFF: + windowsMode &^= FOREGROUND_INTENSITY + + case ansiterm.ANSI_SGR_UNDERLINE: + windowsMode = windowsMode | COMMON_LVB_UNDERSCORE + + case ansiterm.ANSI_SGR_REVERSE: + inverted = true + + case ansiterm.ANSI_SGR_REVERSE_OFF: + inverted = false + + case ansiterm.ANSI_SGR_UNDERLINE_OFF: + windowsMode &^= COMMON_LVB_UNDERSCORE + + // Foreground colors + case ansiterm.ANSI_SGR_FOREGROUND_DEFAULT: + windowsMode = (windowsMode &^ FOREGROUND_MASK) | (baseMode & FOREGROUND_MASK) + + case ansiterm.ANSI_SGR_FOREGROUND_BLACK: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) + + case ansiterm.ANSI_SGR_FOREGROUND_RED: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED + + case ansiterm.ANSI_SGR_FOREGROUND_GREEN: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN + + case ansiterm.ANSI_SGR_FOREGROUND_YELLOW: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN + + case ansiterm.ANSI_SGR_FOREGROUND_BLUE: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_BLUE + + case ansiterm.ANSI_SGR_FOREGROUND_MAGENTA: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_BLUE + + case ansiterm.ANSI_SGR_FOREGROUND_CYAN: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN | FOREGROUND_BLUE + + case ansiterm.ANSI_SGR_FOREGROUND_WHITE: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE + + // Background colors + case ansiterm.ANSI_SGR_BACKGROUND_DEFAULT: + // Black with no intensity + windowsMode = (windowsMode &^ BACKGROUND_MASK) | (baseMode & BACKGROUND_MASK) + + case ansiterm.ANSI_SGR_BACKGROUND_BLACK: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) + + case ansiterm.ANSI_SGR_BACKGROUND_RED: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED + + case ansiterm.ANSI_SGR_BACKGROUND_GREEN: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN + + case ansiterm.ANSI_SGR_BACKGROUND_YELLOW: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN + + case ansiterm.ANSI_SGR_BACKGROUND_BLUE: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_BLUE + + case ansiterm.ANSI_SGR_BACKGROUND_MAGENTA: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_BLUE + + case ansiterm.ANSI_SGR_BACKGROUND_CYAN: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN | BACKGROUND_BLUE + + case ansiterm.ANSI_SGR_BACKGROUND_WHITE: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE + } + + return windowsMode, inverted +} + +// invertAttributes inverts the foreground and background colors of a Windows attributes value +func invertAttributes(windowsMode uint16) uint16 { + return (COMMON_LVB_MASK & windowsMode) | ((FOREGROUND_MASK & windowsMode) << 4) | ((BACKGROUND_MASK & windowsMode) >> 4) +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go new file mode 100644 index 0000000000..3ee06ea728 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go @@ -0,0 +1,101 @@ +// +build windows + +package winterm + +const ( + horizontal = iota + vertical +) + +func (h *windowsAnsiEventHandler) getCursorWindow(info *CONSOLE_SCREEN_BUFFER_INFO) SMALL_RECT { + if h.originMode { + sr := h.effectiveSr(info.Window) + return SMALL_RECT{ + Top: sr.top, + Bottom: sr.bottom, + Left: 0, + Right: info.Size.X - 1, + } + } else { + return SMALL_RECT{ + Top: info.Window.Top, + Bottom: info.Window.Bottom, + Left: 0, + Right: info.Size.X - 1, + } + } +} + +// setCursorPosition sets the cursor to the specified position, bounded to the screen size +func (h *windowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL_RECT) error { + position.X = ensureInRange(position.X, window.Left, window.Right) + position.Y = ensureInRange(position.Y, window.Top, window.Bottom) + err := SetConsoleCursorPosition(h.fd, position) + if err != nil { + return err + } + h.logf("Cursor position set: (%d, %d)", position.X, position.Y) + return err +} + +func (h *windowsAnsiEventHandler) moveCursorVertical(param int) error { + return h.moveCursor(vertical, param) +} + +func (h *windowsAnsiEventHandler) moveCursorHorizontal(param int) error { + return h.moveCursor(horizontal, param) +} + +func (h *windowsAnsiEventHandler) moveCursor(moveMode int, param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + position := info.CursorPosition + switch moveMode { + case horizontal: + position.X += int16(param) + case vertical: + position.Y += int16(param) + } + + if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) moveCursorLine(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + position := info.CursorPosition + position.X = 0 + position.Y += int16(param) + + if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) moveCursorColumn(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + position := info.CursorPosition + position.X = int16(param) - 1 + + if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go new file mode 100644 index 0000000000..244b5fa25e --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go @@ -0,0 +1,84 @@ +// +build windows + +package winterm + +import "github.com/Azure/go-ansiterm" + +func (h *windowsAnsiEventHandler) clearRange(attributes uint16, fromCoord COORD, toCoord COORD) error { + // Ignore an invalid (negative area) request + if toCoord.Y < fromCoord.Y { + return nil + } + + var err error + + var coordStart = COORD{} + var coordEnd = COORD{} + + xCurrent, yCurrent := fromCoord.X, fromCoord.Y + xEnd, yEnd := toCoord.X, toCoord.Y + + // Clear any partial initial line + if xCurrent > 0 { + coordStart.X, coordStart.Y = xCurrent, yCurrent + coordEnd.X, coordEnd.Y = xEnd, yCurrent + + err = h.clearRect(attributes, coordStart, coordEnd) + if err != nil { + return err + } + + xCurrent = 0 + yCurrent += 1 + } + + // Clear intervening rectangular section + if yCurrent < yEnd { + coordStart.X, coordStart.Y = xCurrent, yCurrent + coordEnd.X, coordEnd.Y = xEnd, yEnd-1 + + err = h.clearRect(attributes, coordStart, coordEnd) + if err != nil { + return err + } + + xCurrent = 0 + yCurrent = yEnd + } + + // Clear remaining partial ending line + coordStart.X, coordStart.Y = xCurrent, yCurrent + coordEnd.X, coordEnd.Y = xEnd, yEnd + + err = h.clearRect(attributes, coordStart, coordEnd) + if err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) clearRect(attributes uint16, fromCoord COORD, toCoord COORD) error { + region := SMALL_RECT{Top: fromCoord.Y, Left: fromCoord.X, Bottom: toCoord.Y, Right: toCoord.X} + width := toCoord.X - fromCoord.X + 1 + height := toCoord.Y - fromCoord.Y + 1 + size := uint32(width) * uint32(height) + + if size <= 0 { + return nil + } + + buffer := make([]CHAR_INFO, size) + + char := CHAR_INFO{ansiterm.FILL_CHARACTER, attributes} + for i := 0; i < int(size); i++ { + buffer[i] = char + } + + err := WriteConsoleOutput(h.fd, buffer, COORD{X: width, Y: height}, COORD{X: 0, Y: 0}, ®ion) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go new file mode 100644 index 0000000000..2d27fa1d02 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go @@ -0,0 +1,118 @@ +// +build windows + +package winterm + +// effectiveSr gets the current effective scroll region in buffer coordinates +func (h *windowsAnsiEventHandler) effectiveSr(window SMALL_RECT) scrollRegion { + top := addInRange(window.Top, h.sr.top, window.Top, window.Bottom) + bottom := addInRange(window.Top, h.sr.bottom, window.Top, window.Bottom) + if top >= bottom { + top = window.Top + bottom = window.Bottom + } + return scrollRegion{top: top, bottom: bottom} +} + +func (h *windowsAnsiEventHandler) scrollUp(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + sr := h.effectiveSr(info.Window) + return h.scroll(param, sr, info) +} + +func (h *windowsAnsiEventHandler) scrollDown(param int) error { + return h.scrollUp(-param) +} + +func (h *windowsAnsiEventHandler) deleteLines(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + start := info.CursorPosition.Y + sr := h.effectiveSr(info.Window) + // Lines cannot be inserted or deleted outside the scrolling region. + if start >= sr.top && start <= sr.bottom { + sr.top = start + return h.scroll(param, sr, info) + } else { + return nil + } +} + +func (h *windowsAnsiEventHandler) insertLines(param int) error { + return h.deleteLines(-param) +} + +// scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates. +func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error { + h.logf("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom) + h.logf("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom) + + // Copy from and clip to the scroll region (full buffer width) + scrollRect := SMALL_RECT{ + Top: sr.top, + Bottom: sr.bottom, + Left: 0, + Right: info.Size.X - 1, + } + + // Origin to which area should be copied + destOrigin := COORD{ + X: 0, + Y: sr.top - int16(param), + } + + char := CHAR_INFO{ + UnicodeChar: ' ', + Attributes: h.attributes, + } + + if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { + return err + } + return nil +} + +func (h *windowsAnsiEventHandler) deleteCharacters(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + return h.scrollLine(param, info.CursorPosition, info) +} + +func (h *windowsAnsiEventHandler) insertCharacters(param int) error { + return h.deleteCharacters(-param) +} + +// scrollLine scrolls a line horizontally starting at the provided position by a number of columns. +func (h *windowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error { + // Copy from and clip to the scroll region (full buffer width) + scrollRect := SMALL_RECT{ + Top: position.Y, + Bottom: position.Y, + Left: position.X, + Right: info.Size.X - 1, + } + + // Origin to which area should be copied + destOrigin := COORD{ + X: position.X - int16(columns), + Y: position.Y, + } + + char := CHAR_INFO{ + UnicodeChar: ' ', + Attributes: h.attributes, + } + + if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go new file mode 100644 index 0000000000..afa7635d77 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go @@ -0,0 +1,9 @@ +// +build windows + +package winterm + +// AddInRange increments a value by the passed quantity while ensuring the values +// always remain within the supplied min / max range. +func addInRange(n int16, increment int16, min int16, max int16) int16 { + return ensureInRange(n+increment, min, max) +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go new file mode 100644 index 0000000000..2d40fb75ad --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go @@ -0,0 +1,743 @@ +// +build windows + +package winterm + +import ( + "bytes" + "log" + "os" + "strconv" + + "github.com/Azure/go-ansiterm" +) + +type windowsAnsiEventHandler struct { + fd uintptr + file *os.File + infoReset *CONSOLE_SCREEN_BUFFER_INFO + sr scrollRegion + buffer bytes.Buffer + attributes uint16 + inverted bool + wrapNext bool + drewMarginByte bool + originMode bool + marginByte byte + curInfo *CONSOLE_SCREEN_BUFFER_INFO + curPos COORD + logf func(string, ...interface{}) +} + +type Option func(*windowsAnsiEventHandler) + +func WithLogf(f func(string, ...interface{})) Option { + return func(w *windowsAnsiEventHandler) { + w.logf = f + } +} + +func CreateWinEventHandler(fd uintptr, file *os.File, opts ...Option) ansiterm.AnsiEventHandler { + infoReset, err := GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil + } + + h := &windowsAnsiEventHandler{ + fd: fd, + file: file, + infoReset: infoReset, + attributes: infoReset.Attributes, + } + for _, o := range opts { + o(h) + } + + if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { + logFile, _ := os.Create("winEventHandler.log") + logger := log.New(logFile, "", log.LstdFlags) + if h.logf != nil { + l := h.logf + h.logf = func(s string, v ...interface{}) { + l(s, v...) + logger.Printf(s, v...) + } + } else { + h.logf = logger.Printf + } + } + + if h.logf == nil { + h.logf = func(string, ...interface{}) {} + } + + return h +} + +type scrollRegion struct { + top int16 + bottom int16 +} + +// simulateLF simulates a LF or CR+LF by scrolling if necessary to handle the +// current cursor position and scroll region settings, in which case it returns +// true. If no special handling is necessary, then it does nothing and returns +// false. +// +// In the false case, the caller should ensure that a carriage return +// and line feed are inserted or that the text is otherwise wrapped. +func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) { + if h.wrapNext { + if err := h.Flush(); err != nil { + return false, err + } + h.clearWrap() + } + pos, info, err := h.getCurrentInfo() + if err != nil { + return false, err + } + sr := h.effectiveSr(info.Window) + if pos.Y == sr.bottom { + // Scrolling is necessary. Let Windows automatically scroll if the scrolling region + // is the full window. + if sr.top == info.Window.Top && sr.bottom == info.Window.Bottom { + if includeCR { + pos.X = 0 + h.updatePos(pos) + } + return false, nil + } + + // A custom scroll region is active. Scroll the window manually to simulate + // the LF. + if err := h.Flush(); err != nil { + return false, err + } + h.logf("Simulating LF inside scroll region") + if err := h.scrollUp(1); err != nil { + return false, err + } + if includeCR { + pos.X = 0 + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return false, err + } + } + return true, nil + + } else if pos.Y < info.Window.Bottom { + // Let Windows handle the LF. + pos.Y++ + if includeCR { + pos.X = 0 + } + h.updatePos(pos) + return false, nil + } else { + // The cursor is at the bottom of the screen but outside the scroll + // region. Skip the LF. + h.logf("Simulating LF outside scroll region") + if includeCR { + if err := h.Flush(); err != nil { + return false, err + } + pos.X = 0 + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return false, err + } + } + return true, nil + } +} + +// executeLF executes a LF without a CR. +func (h *windowsAnsiEventHandler) executeLF() error { + handled, err := h.simulateLF(false) + if err != nil { + return err + } + if !handled { + // Windows LF will reset the cursor column position. Write the LF + // and restore the cursor position. + pos, _, err := h.getCurrentInfo() + if err != nil { + return err + } + h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) + if pos.X != 0 { + if err := h.Flush(); err != nil { + return err + } + h.logf("Resetting cursor position for LF without CR") + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return err + } + } + } + return nil +} + +func (h *windowsAnsiEventHandler) Print(b byte) error { + if h.wrapNext { + h.buffer.WriteByte(h.marginByte) + h.clearWrap() + if _, err := h.simulateLF(true); err != nil { + return err + } + } + pos, info, err := h.getCurrentInfo() + if err != nil { + return err + } + if pos.X == info.Size.X-1 { + h.wrapNext = true + h.marginByte = b + } else { + pos.X++ + h.updatePos(pos) + h.buffer.WriteByte(b) + } + return nil +} + +func (h *windowsAnsiEventHandler) Execute(b byte) error { + switch b { + case ansiterm.ANSI_TAB: + h.logf("Execute(TAB)") + // Move to the next tab stop, but preserve auto-wrap if already set. + if !h.wrapNext { + pos, info, err := h.getCurrentInfo() + if err != nil { + return err + } + pos.X = (pos.X + 8) - pos.X%8 + if pos.X >= info.Size.X { + pos.X = info.Size.X - 1 + } + if err := h.Flush(); err != nil { + return err + } + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return err + } + } + return nil + + case ansiterm.ANSI_BEL: + h.buffer.WriteByte(ansiterm.ANSI_BEL) + return nil + + case ansiterm.ANSI_BACKSPACE: + if h.wrapNext { + if err := h.Flush(); err != nil { + return err + } + h.clearWrap() + } + pos, _, err := h.getCurrentInfo() + if err != nil { + return err + } + if pos.X > 0 { + pos.X-- + h.updatePos(pos) + h.buffer.WriteByte(ansiterm.ANSI_BACKSPACE) + } + return nil + + case ansiterm.ANSI_VERTICAL_TAB, ansiterm.ANSI_FORM_FEED: + // Treat as true LF. + return h.executeLF() + + case ansiterm.ANSI_LINE_FEED: + // Simulate a CR and LF for now since there is no way in go-ansiterm + // to tell if the LF should include CR (and more things break when it's + // missing than when it's incorrectly added). + handled, err := h.simulateLF(true) + if handled || err != nil { + return err + } + return h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) + + case ansiterm.ANSI_CARRIAGE_RETURN: + if h.wrapNext { + if err := h.Flush(); err != nil { + return err + } + h.clearWrap() + } + pos, _, err := h.getCurrentInfo() + if err != nil { + return err + } + if pos.X != 0 { + pos.X = 0 + h.updatePos(pos) + h.buffer.WriteByte(ansiterm.ANSI_CARRIAGE_RETURN) + } + return nil + + default: + return nil + } +} + +func (h *windowsAnsiEventHandler) CUU(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CUU: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorVertical(-param) +} + +func (h *windowsAnsiEventHandler) CUD(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CUD: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorVertical(param) +} + +func (h *windowsAnsiEventHandler) CUF(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CUF: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorHorizontal(param) +} + +func (h *windowsAnsiEventHandler) CUB(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CUB: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorHorizontal(-param) +} + +func (h *windowsAnsiEventHandler) CNL(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CNL: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorLine(param) +} + +func (h *windowsAnsiEventHandler) CPL(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CPL: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorLine(-param) +} + +func (h *windowsAnsiEventHandler) CHA(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CHA: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorColumn(param) +} + +func (h *windowsAnsiEventHandler) VPA(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("VPA: [[%d]]", param) + h.clearWrap() + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + window := h.getCursorWindow(info) + position := info.CursorPosition + position.Y = window.Top + int16(param) - 1 + return h.setCursorPosition(position, window) +} + +func (h *windowsAnsiEventHandler) CUP(row int, col int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CUP: [[%d %d]]", row, col) + h.clearWrap() + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + window := h.getCursorWindow(info) + position := COORD{window.Left + int16(col) - 1, window.Top + int16(row) - 1} + return h.setCursorPosition(position, window) +} + +func (h *windowsAnsiEventHandler) HVP(row int, col int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("HVP: [[%d %d]]", row, col) + h.clearWrap() + return h.CUP(row, col) +} + +func (h *windowsAnsiEventHandler) DECTCEM(visible bool) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DECTCEM: [%v]", []string{strconv.FormatBool(visible)}) + h.clearWrap() + return nil +} + +func (h *windowsAnsiEventHandler) DECOM(enable bool) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DECOM: [%v]", []string{strconv.FormatBool(enable)}) + h.clearWrap() + h.originMode = enable + return h.CUP(1, 1) +} + +func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DECCOLM: [%v]", []string{strconv.FormatBool(use132)}) + h.clearWrap() + if err := h.ED(2); err != nil { + return err + } + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + targetWidth := int16(80) + if use132 { + targetWidth = 132 + } + if info.Size.X < targetWidth { + if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { + h.logf("set buffer failed: %v", err) + return err + } + } + window := info.Window + window.Left = 0 + window.Right = targetWidth - 1 + if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { + h.logf("set window failed: %v", err) + return err + } + if info.Size.X > targetWidth { + if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { + h.logf("set buffer failed: %v", err) + return err + } + } + return SetConsoleCursorPosition(h.fd, COORD{0, 0}) +} + +func (h *windowsAnsiEventHandler) ED(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("ED: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + + // [J -- Erases from the cursor to the end of the screen, including the cursor position. + // [1J -- Erases from the beginning of the screen to the cursor, including the cursor position. + // [2J -- Erases the complete display. The cursor does not move. + // Notes: + // -- Clearing the entire buffer, versus just the Window, works best for Windows Consoles + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + var start COORD + var end COORD + + switch param { + case 0: + start = info.CursorPosition + end = COORD{info.Size.X - 1, info.Size.Y - 1} + + case 1: + start = COORD{0, 0} + end = info.CursorPosition + + case 2: + start = COORD{0, 0} + end = COORD{info.Size.X - 1, info.Size.Y - 1} + } + + err = h.clearRange(h.attributes, start, end) + if err != nil { + return err + } + + // If the whole buffer was cleared, move the window to the top while preserving + // the window-relative cursor position. + if param == 2 { + pos := info.CursorPosition + window := info.Window + pos.Y -= window.Top + window.Bottom -= window.Top + window.Top = 0 + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return err + } + if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { + return err + } + } + + return nil +} + +func (h *windowsAnsiEventHandler) EL(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("EL: [%v]", strconv.Itoa(param)) + h.clearWrap() + + // [K -- Erases from the cursor to the end of the line, including the cursor position. + // [1K -- Erases from the beginning of the line to the cursor, including the cursor position. + // [2K -- Erases the complete line. + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + var start COORD + var end COORD + + switch param { + case 0: + start = info.CursorPosition + end = COORD{info.Size.X, info.CursorPosition.Y} + + case 1: + start = COORD{0, info.CursorPosition.Y} + end = info.CursorPosition + + case 2: + start = COORD{0, info.CursorPosition.Y} + end = COORD{info.Size.X, info.CursorPosition.Y} + } + + err = h.clearRange(h.attributes, start, end) + if err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) IL(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("IL: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.insertLines(param) +} + +func (h *windowsAnsiEventHandler) DL(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DL: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.deleteLines(param) +} + +func (h *windowsAnsiEventHandler) ICH(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("ICH: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.insertCharacters(param) +} + +func (h *windowsAnsiEventHandler) DCH(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DCH: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.deleteCharacters(param) +} + +func (h *windowsAnsiEventHandler) SGR(params []int) error { + if err := h.Flush(); err != nil { + return err + } + strings := []string{} + for _, v := range params { + strings = append(strings, strconv.Itoa(v)) + } + + h.logf("SGR: [%v]", strings) + + if len(params) <= 0 { + h.attributes = h.infoReset.Attributes + h.inverted = false + } else { + for _, attr := range params { + + if attr == ansiterm.ANSI_SGR_RESET { + h.attributes = h.infoReset.Attributes + h.inverted = false + continue + } + + h.attributes, h.inverted = collectAnsiIntoWindowsAttributes(h.attributes, h.inverted, h.infoReset.Attributes, int16(attr)) + } + } + + attributes := h.attributes + if h.inverted { + attributes = invertAttributes(attributes) + } + err := SetConsoleTextAttribute(h.fd, attributes) + if err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) SU(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("SU: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.scrollUp(param) +} + +func (h *windowsAnsiEventHandler) SD(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("SD: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.scrollDown(param) +} + +func (h *windowsAnsiEventHandler) DA(params []string) error { + h.logf("DA: [%v]", params) + // DA cannot be implemented because it must send data on the VT100 input stream, + // which is not available to go-ansiterm. + return nil +} + +func (h *windowsAnsiEventHandler) DECSTBM(top int, bottom int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DECSTBM: [%d, %d]", top, bottom) + + // Windows is 0 indexed, Linux is 1 indexed + h.sr.top = int16(top - 1) + h.sr.bottom = int16(bottom - 1) + + // This command also moves the cursor to the origin. + h.clearWrap() + return h.CUP(1, 1) +} + +func (h *windowsAnsiEventHandler) RI() error { + if err := h.Flush(); err != nil { + return err + } + h.logf("RI: []") + h.clearWrap() + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + sr := h.effectiveSr(info.Window) + if info.CursorPosition.Y == sr.top { + return h.scrollDown(1) + } + + return h.moveCursorVertical(-1) +} + +func (h *windowsAnsiEventHandler) IND() error { + h.logf("IND: []") + return h.executeLF() +} + +func (h *windowsAnsiEventHandler) Flush() error { + h.curInfo = nil + if h.buffer.Len() > 0 { + h.logf("Flush: [%s]", h.buffer.Bytes()) + if _, err := h.buffer.WriteTo(h.file); err != nil { + return err + } + } + + if h.wrapNext && !h.drewMarginByte { + h.logf("Flush: drawing margin byte '%c'", h.marginByte) + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + charInfo := []CHAR_INFO{{UnicodeChar: uint16(h.marginByte), Attributes: info.Attributes}} + size := COORD{1, 1} + position := COORD{0, 0} + region := SMALL_RECT{Left: info.CursorPosition.X, Top: info.CursorPosition.Y, Right: info.CursorPosition.X, Bottom: info.CursorPosition.Y} + if err := WriteConsoleOutput(h.fd, charInfo, size, position, ®ion); err != nil { + return err + } + h.drewMarginByte = true + } + return nil +} + +// cacheConsoleInfo ensures that the current console screen information has been queried +// since the last call to Flush(). It must be called before accessing h.curInfo or h.curPos. +func (h *windowsAnsiEventHandler) getCurrentInfo() (COORD, *CONSOLE_SCREEN_BUFFER_INFO, error) { + if h.curInfo == nil { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return COORD{}, nil, err + } + h.curInfo = info + h.curPos = info.CursorPosition + } + return h.curPos, h.curInfo, nil +} + +func (h *windowsAnsiEventHandler) updatePos(pos COORD) { + if h.curInfo == nil { + panic("failed to call getCurrentInfo before calling updatePos") + } + h.curPos = pos +} + +// clearWrap clears the state where the cursor is in the margin +// waiting for the next character before wrapping the line. This must +// be done before most operations that act on the cursor. +func (h *windowsAnsiEventHandler) clearWrap() { + h.wrapNext = false + h.drewMarginByte = false +} diff --git a/vendor/github.com/GoogleCloudPlatform/container-diff/cmd/util/output/output.go b/vendor/github.com/GoogleCloudPlatform/container-diff/cmd/util/output/output.go deleted file mode 100644 index c7c1623d65..0000000000 --- a/vendor/github.com/GoogleCloudPlatform/container-diff/cmd/util/output/output.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright 2018 Google, Inc. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package output - -import ( - "fmt" - "github.com/spf13/cobra" - "os" -) - -var quiet bool - -// PrintToStdErr prints to stderr if quiet flag isn't enabled -func PrintToStdErr(output string, vars ...interface{}) { - if !quiet { - fmt.Fprintf(os.Stderr, output, vars...) - } -} - -// AddFlags adds quiet flag to suppress output to stderr -func AddFlags(cmd *cobra.Command) { - cmd.Flags().BoolVarP(&quiet, "quiet", "q", false, "Suppress output to stderr.") -} diff --git a/vendor/github.com/GoogleContainerTools/container-diff/pkg/image/mutable_source.go b/vendor/github.com/GoogleContainerTools/container-diff/pkg/image/mutable_source.go deleted file mode 100644 index 5e1cce25ea..0000000000 --- a/vendor/github.com/GoogleContainerTools/container-diff/pkg/image/mutable_source.go +++ /dev/null @@ -1,259 +0,0 @@ -/* -Copyright 2018 Google, Inc. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package image - -import ( - "bytes" - "compress/gzip" - "encoding/json" - "github.com/containers/image/docker" - img "github.com/containers/image/image" - "github.com/containers/image/types" - "io" - "io/ioutil" - "strings" - "time" - - "github.com/containers/image/manifest" - digest "github.com/opencontainers/go-digest" -) - -type MutableSource struct { - ProxySource - mfst *manifest.Schema2 - cfg *manifest.Schema2Image - extraBlobs map[string][]byte - extraLayers []digest.Digest -} - -func NewMutableSource(r types.ImageReference) (*MutableSource, error) { - if r == nil { - return MutableSourceFromScratch() - } - src, err := r.NewImageSource(nil) - if err != nil { - return nil, err - } - img, err := r.NewImage(nil) - if err != nil { - return nil, err - } - - ms := &MutableSource{ - ProxySource: ProxySource{ - Ref: r, - ImageSource: src, - img: img, - }, - extraBlobs: make(map[string][]byte), - } - if err := ms.populateManifestAndConfig(); err != nil { - return nil, err - } - return ms, nil -} - -func MutableSourceFromScratch() (*MutableSource, error) { - config := &manifest.Schema2Image{ - Schema2V1Image: manifest.Schema2V1Image{ - Config: &manifest.Schema2Config{}, - }, - RootFS: &manifest.Schema2RootFS{}, - History: []manifest.Schema2History{}, - } - ref, err := docker.ParseReference("//scratch") - if err != nil { - return nil, err - } - src, err := ref.NewImageSource(nil) - if err != nil { - return nil, err - } - ms := &MutableSource{ - ProxySource: ProxySource{ - Ref: &ProxyReference{ - ImageReference: ref, - }, - ImageSource: src, - }, - extraBlobs: make(map[string][]byte), - cfg: config, - mfst: &manifest.Schema2{}, - } - return ms, nil -} - -// Manifest marshals the stored manifest to the byte format. -func (m *MutableSource) GetManifest(_ *digest.Digest) ([]byte, string, error) { - if err := m.saveConfig(); err != nil { - return nil, "", err - } - s, err := json.Marshal(m.mfst) - if err != nil { - return nil, "", err - } - return s, manifest.DockerV2Schema2MediaType, err -} - -// populateManifestAndConfig parses the raw manifest and configs, storing them on the struct. -func (m *MutableSource) populateManifestAndConfig() error { - context := &types.SystemContext{ - OSChoice: "linux", - ArchitectureChoice: "amd64", - } - image, err := m.ProxySource.Ref.NewImage(context) - if err != nil { - return err - } - defer image.Close() - // First get manifest - mfstBytes, mfstType, err := image.Manifest() - if err != nil { - return err - } - - if mfstType == manifest.DockerV2ListMediaType { - // We need to select a manifest digest from the manifest list - unparsedImage := img.UnparsedInstance(m.ImageSource, nil) - - mfstDigest, err := img.ChooseManifestInstanceFromManifestList(context, unparsedImage) - if err != nil { - return err - } - mfstBytes, _, err = m.ProxySource.GetManifest(&mfstDigest) - if err != nil { - return err - } - } - - m.mfst, err = manifest.Schema2FromManifest(mfstBytes) - if err != nil { - return err - } - - // Now, get config - configBlob, err := image.ConfigBlob() - if err != nil { - return err - } - return json.Unmarshal(configBlob, &m.cfg) -} - -// GetBlob first checks the stored "extra" blobs, then proxies the call to the original source. -func (m *MutableSource) GetBlob(bi types.BlobInfo) (io.ReadCloser, int64, error) { - if b, ok := m.extraBlobs[bi.Digest.String()]; ok { - return ioutil.NopCloser(bytes.NewReader(b)), int64(len(b)), nil - } - return m.ImageSource.GetBlob(bi) -} - -func gzipBytes(b []byte) ([]byte, error) { - buf := bytes.NewBuffer([]byte{}) - w := gzip.NewWriter(buf) - _, err := w.Write(b) - w.Close() - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// appendLayer appends an uncompressed blob to the image, preserving the invariants required across the config and manifest. -func (m *MutableSource) AppendLayer(content []byte, author string) error { - compressedBlob, err := gzipBytes(content) - if err != nil { - return err - } - - dgst := digest.FromBytes(compressedBlob) - - // Add the layer to the manifest. - descriptor := manifest.Schema2Descriptor{ - MediaType: manifest.DockerV2Schema2LayerMediaType, - Size: int64(len(content)), - Digest: dgst, - } - m.mfst.LayersDescriptors = append(m.mfst.LayersDescriptors, descriptor) - - m.extraBlobs[dgst.String()] = compressedBlob - m.extraLayers = append(m.extraLayers, dgst) - - // Also add it to the config. - diffID := digest.FromBytes(content) - m.cfg.RootFS.DiffIDs = append(m.cfg.RootFS.DiffIDs, diffID) - m.AppendConfigHistory(author, false) - return nil -} - -// saveConfig marshals the stored image config, and updates the references to it in the manifest. -func (m *MutableSource) saveConfig() error { - cfgBlob, err := json.Marshal(m.cfg) - if err != nil { - return err - } - - cfgDigest := digest.FromBytes(cfgBlob) - m.extraBlobs[cfgDigest.String()] = cfgBlob - m.mfst.ConfigDescriptor = manifest.Schema2Descriptor{ - MediaType: manifest.DockerV2Schema2ConfigMediaType, - Size: int64(len(cfgBlob)), - Digest: cfgDigest, - } - return nil -} - -// Env returns a map of environment variables stored in the image config -// Converts each variable from a string of the form KEY=VALUE to a map of KEY:VALUE -func (m *MutableSource) Env() map[string]string { - envArray := m.cfg.Schema2V1Image.Config.Env - envMap := make(map[string]string) - for _, env := range envArray { - entry := strings.Split(env, "=") - envMap[entry[0]] = entry[1] - } - return envMap -} - -// SetEnv takes a map of environment variables, and converts them to an array of strings -// in the form KEY=VALUE, and then sets the image config -func (m *MutableSource) SetEnv(envMap map[string]string, author string) { - envArray := []string{} - for key, value := range envMap { - entry := key + "=" + value - envArray = append(envArray, entry) - } - m.cfg.Schema2V1Image.Config.Env = envArray - m.AppendConfigHistory(author, true) -} - -func (m *MutableSource) Config() *manifest.Schema2Config { - return m.cfg.Schema2V1Image.Config -} - -func (m *MutableSource) SetConfig(config *manifest.Schema2Config, author string, emptyLayer bool) { - m.cfg.Schema2V1Image.Config = config - m.AppendConfigHistory(author, emptyLayer) -} - -func (m *MutableSource) AppendConfigHistory(author string, emptyLayer bool) { - history := manifest.Schema2History{ - Created: time.Now(), - Author: author, - EmptyLayer: emptyLayer, - } - m.cfg.History = append(m.cfg.History, history) -} diff --git a/vendor/github.com/GoogleContainerTools/container-diff/pkg/image/proxy_types.go b/vendor/github.com/GoogleContainerTools/container-diff/pkg/image/proxy_types.go deleted file mode 100644 index 36e13b4b3b..0000000000 --- a/vendor/github.com/GoogleContainerTools/container-diff/pkg/image/proxy_types.go +++ /dev/null @@ -1,64 +0,0 @@ -/* -Copyright 2018 Google, Inc. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package image - -import ( - "github.com/containers/image/types" -) - -// ProxySource is a type that implements types.ImageSource by proxying all calls to an underlying implementation. -type ProxySource struct { - Ref types.ImageReference - types.ImageSource - img types.Image -} - -func NewProxySource(ref types.ImageReference) (*ProxySource, error) { - src, err := ref.NewImageSource(nil) - if err != nil { - return nil, err - } - defer src.Close() - img, err := ref.NewImage(nil) - if err != nil { - return nil, err - } - - return &ProxySource{ - Ref: ref, - img: img, - ImageSource: src, - }, nil -} - -func (p *ProxySource) Reference() types.ImageReference { - return p.Ref -} - -func (p *ProxySource) LayerInfosForCopy() []types.BlobInfo { - return nil -} - -// ProxyReference implements types.Reference by proxying calls to an underlying implementation. -type ProxyReference struct { - types.ImageReference - Src types.ImageSource -} - -func (p *ProxyReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) { - return p.Src, nil -} diff --git a/vendor/github.com/GoogleContainerTools/container-diff/pkg/util/cloud_prepper.go b/vendor/github.com/GoogleContainerTools/container-diff/pkg/util/cloud_prepper.go deleted file mode 100644 index e77655111e..0000000000 --- a/vendor/github.com/GoogleContainerTools/container-diff/pkg/util/cloud_prepper.go +++ /dev/null @@ -1,76 +0,0 @@ -/* -Copyright 2018 Google, Inc. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "io/ioutil" - "strings" - - "github.com/containers/image/docker" - "github.com/containers/image/types" - "github.com/docker/docker/client" -) - -// CloudPrepper prepares images sourced from a Cloud registry -type CloudPrepper struct { - Source string - Client *client.Client - ImageSource types.ImageSource -} - -func (p *CloudPrepper) Name() string { - return "Cloud Registry" -} - -func (p *CloudPrepper) GetSource() string { - return p.Source -} - -func (p *CloudPrepper) SetSource(source string) { - p.Source = source -} - -func (p *CloudPrepper) GetImage() (Image, error) { - image, err := getImage(p) - image.Type = ImageTypeCloud - return image, err -} - -func (p *CloudPrepper) GetFileSystem() (string, error) { - ref, err := docker.ParseReference("//" + p.Source) - if err != nil { - return "", err - } - sanitizedName := strings.Replace(p.Source, ":", "", -1) - sanitizedName = strings.Replace(sanitizedName, "/", "", -1) - - path, err := ioutil.TempDir("", sanitizedName) - if err != nil { - return "", err - } - - return path, GetFileSystemFromReference(ref, p.ImageSource, path, nil) -} - -func (p *CloudPrepper) GetConfig() (ConfigSchema, error) { - ref, err := docker.ParseReference("//" + p.Source) - if err != nil { - return ConfigSchema{}, err - } - - return getConfigFromReference(ref, p.Source) -} diff --git a/vendor/github.com/GoogleContainerTools/container-diff/pkg/util/daemon_prepper.go b/vendor/github.com/GoogleContainerTools/container-diff/pkg/util/daemon_prepper.go deleted file mode 100644 index b3269b3ca4..0000000000 --- a/vendor/github.com/GoogleContainerTools/container-diff/pkg/util/daemon_prepper.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright 2018 Google, Inc. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "context" - "io/ioutil" - "strings" - - "github.com/containers/image/docker/daemon" - - "github.com/docker/docker/client" - "github.com/sirupsen/logrus" -) - -type DaemonPrepper struct { - Source string - Client *client.Client -} - -func (p *DaemonPrepper) Name() string { - return "Local Daemon" -} - -func (p *DaemonPrepper) GetSource() string { - return p.Source -} - -func (p *DaemonPrepper) SetSource(source string) { - p.Source = source -} - -func (p *DaemonPrepper) GetImage() (Image, error) { - image, err := getImage(p) - image.Type = ImageTypeDaemon - return image, err -} - -func (p *DaemonPrepper) GetFileSystem() (string, error) { - ref, err := daemon.ParseReference(p.Source) - if err != nil { - return "", err - } - - src, err := ref.NewImageSource(nil) - if err != nil { - return "", err - } - defer src.Close() - - sanitizedName := strings.Replace(p.Source, ":", "", -1) - sanitizedName = strings.Replace(sanitizedName, "/", "", -1) - - path, err := ioutil.TempDir("", sanitizedName) - if err != nil { - return "", err - } - return path, GetFileSystemFromReference(ref, src, path, nil) -} - -func (p *DaemonPrepper) GetConfig() (ConfigSchema, error) { - ref, err := daemon.ParseReference(p.Source) - if err != nil { - return ConfigSchema{}, err - } - return getConfigFromReference(ref, p.Source) -} - -func (p *DaemonPrepper) GetHistory() []ImageHistoryItem { - history, err := p.Client.ImageHistory(context.Background(), p.Source) - if err != nil { - logrus.Errorf("Could not obtain image history for %s: %s", p.Source, err) - } - historyItems := []ImageHistoryItem{} - for _, item := range history { - historyItems = append(historyItems, ImageHistoryItem{CreatedBy: item.CreatedBy}) - } - return historyItems -} diff --git a/vendor/github.com/GoogleContainerTools/container-diff/pkg/util/docker_utils.go b/vendor/github.com/GoogleContainerTools/container-diff/pkg/util/docker_utils.go deleted file mode 100644 index eca0e53cd8..0000000000 --- a/vendor/github.com/GoogleContainerTools/container-diff/pkg/util/docker_utils.go +++ /dev/null @@ -1,123 +0,0 @@ -/* -Copyright 2018 Google, Inc. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "archive/tar" - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "strings" - - "github.com/docker/docker/client" -) - -type Event struct { - Status string `json:"status"` - Error string `json:"error"` - Progress string `json:"progress"` - ProgressDetail struct { - Current int `json:"current"` - Total int `json:"total"` - } `json:"progressDetail"` -} - -func NewClient() (*client.Client, error) { - cli, err := client.NewEnvClient() - if err != nil { - return nil, fmt.Errorf("Error getting docker client: %s", err) - } - cli.NegotiateAPIVersion(context.Background()) - - return cli, nil -} - -func getLayersFromManifest(r io.Reader) ([]string, error) { - type Manifest struct { - Layers []string - } - - manifestJSON, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - - var imageManifest []Manifest - if err := json.Unmarshal(manifestJSON, &imageManifest); err != nil { - return []string{}, fmt.Errorf("Could not unmarshal manifest to get layer order: %s", err) - } - return imageManifest[0].Layers, nil -} - -func unpackDockerSave(tarPath string, target string) error { - if _, ok := os.Stat(target); ok != nil { - os.MkdirAll(target, 0775) - } - f, err := os.Open(tarPath) - if err != nil { - return err - } - - tr := tar.NewReader(f) - - // Unpack the layers into a map, since we need to sort out the order later. - var layers []string - layerMap := map[string][]byte{} - for { - hdr, err := tr.Next() - if err == io.EOF { - break - } - if err != nil { - return err - } - - // Docker save contains files and directories. Ignore the directories. - // We care about the layers and the manifest. The layers look like: - // $SHA/layer.tar - // and they are referenced that way in the manifest. - switch t := hdr.Typeflag; t { - case tar.TypeReg: - if hdr.Name == "manifest.json" { - layers, err = getLayersFromManifest(tr) - if err != nil { - return err - } - } else if strings.HasSuffix(hdr.Name, ".tar") { - layerMap[hdr.Name], err = ioutil.ReadAll(tr) - if err != nil { - return err - } - } - case tar.TypeDir: - continue - default: - return fmt.Errorf("unsupported file type %v found in file %s tar %s", t, hdr.Name, tarPath) - } - } - - for _, layer := range layers { - if err = UnTar(bytes.NewReader(layerMap[layer]), target, nil); err != nil { - return fmt.Errorf("Could not unpack layer %s: %s", layer, err) - } - } - return nil -} diff --git a/vendor/github.com/GoogleContainerTools/container-diff/pkg/util/fs_utils.go b/vendor/github.com/GoogleContainerTools/container-diff/pkg/util/fs_utils.go deleted file mode 100644 index e7d7541d23..0000000000 --- a/vendor/github.com/GoogleContainerTools/container-diff/pkg/util/fs_utils.go +++ /dev/null @@ -1,181 +0,0 @@ -/* -Copyright 2018 Google, Inc. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "bytes" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/sirupsen/logrus" -) - -// Directory stores a representation of a file directory. -type Directory struct { - Root string - Content []string -} - -type DirectoryEntry struct { - Name string - Size int64 -} - -func GetSize(path string) int64 { - stat, err := os.Stat(path) - if err != nil { - logrus.Errorf("Could not obtain size for %s: %s", path, err) - return -1 - } - if stat.IsDir() { - size, err := getDirectorySize(path) - if err != nil { - logrus.Errorf("Could not obtain directory size for %s: %s", path, err) - } - return size - } - return stat.Size() -} - -//GetFileContents returns the contents of a file at the specified path -func GetFileContents(path string) (*string, error) { - if _, err := os.Stat(path); os.IsNotExist(err) { - return nil, err - } - - contents, err := ioutil.ReadFile(path) - if err != nil { - return nil, err - } - - strContents := string(contents) - //If file is empty, return nil - if strContents == "" { - return nil, nil - } - return &strContents, nil -} - -func getDirectorySize(path string) (int64, error) { - var size int64 - err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error { - if !info.IsDir() { - size += info.Size() - } - return err - }) - return size, err -} - -// GetDirectoryContents converts the directory starting at the provided path into a Directory struct. -func GetDirectory(path string, deep bool) (Directory, error) { - var directory Directory - directory.Root = path - var err error - if deep { - walkFn := func(currPath string, info os.FileInfo, err error) error { - newContent := strings.TrimPrefix(currPath, directory.Root) - if newContent != "" { - directory.Content = append(directory.Content, newContent) - } - return nil - } - - err = filepath.Walk(path, walkFn) - } else { - contents, err := ioutil.ReadDir(path) - if err != nil { - return directory, err - } - - for _, file := range contents { - fileName := "/" + file.Name() - directory.Content = append(directory.Content, fileName) - } - } - return directory, err -} - -func GetDirectoryEntries(d Directory) []DirectoryEntry { - return CreateDirectoryEntries(d.Root, d.Content) -} - -func CreateDirectoryEntries(root string, entryNames []string) (entries []DirectoryEntry) { - for _, name := range entryNames { - entryPath := filepath.Join(root, name) - size := GetSize(entryPath) - - entry := DirectoryEntry{ - Name: name, - Size: size, - } - entries = append(entries, entry) - } - return entries -} - -func CheckSameFile(f1name, f2name string) (bool, error) { - // Check first if files differ in size and immediately return - f1stat, err := os.Stat(f1name) - if err != nil { - return false, err - } - f2stat, err := os.Stat(f2name) - if err != nil { - return false, err - } - - if f1stat.Size() != f2stat.Size() { - return false, nil - } - - // Next, check file contents - f1, err := ioutil.ReadFile(f1name) - if err != nil { - return false, err - } - f2, err := ioutil.ReadFile(f2name) - if err != nil { - return false, err - } - - if !bytes.Equal(f1, f2) { - return false, nil - } - return true, nil -} - -// HasFilepathPrefix checks if the given file path begins with prefix -func HasFilepathPrefix(path, prefix string) bool { - path = filepath.Clean(path) - prefix = filepath.Clean(prefix) - pathArray := strings.Split(path, "/") - prefixArray := strings.Split(prefix, "/") - - if len(pathArray) < len(prefixArray) { - return false - } - for index := range prefixArray { - if prefixArray[index] == pathArray[index] { - continue - } - return false - } - return true -} diff --git a/vendor/github.com/GoogleContainerTools/container-diff/pkg/util/image_prep_utils.go b/vendor/github.com/GoogleContainerTools/container-diff/pkg/util/image_prep_utils.go deleted file mode 100644 index cf4b5ad50b..0000000000 --- a/vendor/github.com/GoogleContainerTools/container-diff/pkg/util/image_prep_utils.go +++ /dev/null @@ -1,288 +0,0 @@ -/* -Copyright 2018 Google, Inc. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "archive/tar" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "os" - "sort" - "strings" - - "github.com/GoogleCloudPlatform/container-diff/cmd/util/output" - "github.com/containers/image/docker" - "github.com/containers/image/manifest" - "github.com/containers/image/pkg/compression" - "github.com/containers/image/types" - "github.com/sirupsen/logrus" -) - -type Prepper interface { - Name() string - GetConfig() (ConfigSchema, error) - GetFileSystem() (string, error) - GetImage() (Image, error) - GetSource() string - SetSource(string) -} - -type ImageType int - -const ( - ImageTypeTar ImageType = iota - ImageTypeDaemon - ImageTypeCloud -) - -type Image struct { - Source string - FSPath string - Config ConfigSchema - Type ImageType -} - -func (i *Image) IsTar() bool { - return i.Type == ImageTypeTar -} - -func (i *Image) IsDaemon() bool { - return i.Type == ImageTypeDaemon -} - -func (i *Image) IsCloud() bool { - return i.Type == ImageTypeCloud -} - -func (i *Image) GetRemoteDigest() (string, error) { - ref, err := docker.ParseReference("//" + i.Source) - if err != nil { - return "", err - } - return getDigestFromReference(ref, i.Source) -} - -func (i *Image) GetName() string { - return strings.Split(i.Source, ":")[0] -} - -type ImageHistoryItem struct { - CreatedBy string `json:"created_by"` -} - -type ConfigObject struct { - Hostname string - Domainname string - User string - AttachStdin bool - AttachStdout bool - AttachStderr bool - ExposedPorts map[string]struct{} `json:"ExposedPorts"` - Tty bool - OpenStdin bool - StdinOnce bool - Env []string `json:"Env"` - Cmd []string `json:"Cmd"` - // Healthcheck *HealthConfig - ArgsEscaped bool `json:",omitempty"` - Image string - Volumes map[string]struct{} `json:"Volumes"` - Workdir string `json:"WorkingDir"` - Entrypoint []string `json:"Entrypoint"` - NetworkDisabled bool `json:",omitempty"` - MacAddress string `json:",omitempty"` - OnBuild []string - Labels map[string]string `json:"Labels"` - StopSignal string `json:",omitempty"` - StopTimeout *int `json:",omitempty"` - Shell []string `json:",omitempty"` -} - -func (c ConfigObject) AsList() []string { - return []string{ - fmt.Sprintf("Hostname: %s", c.Hostname), - fmt.Sprintf("Domainname: %s", c.Domainname), - fmt.Sprintf("User: %s", c.User), - fmt.Sprintf("AttachStdin: %t", c.AttachStdin), - fmt.Sprintf("AttachStdout: %t", c.AttachStdout), - fmt.Sprintf("AttachStderr: %t", c.AttachStderr), - fmt.Sprintf("ExposedPorts: %v", sortMap(c.ExposedPorts)), - fmt.Sprintf("Tty: %t", c.Tty), - fmt.Sprintf("OpenStdin: %t", c.OpenStdin), - fmt.Sprintf("StdinOnce: %t", c.StdinOnce), - fmt.Sprintf("Env: %s", strings.Join(c.Env, ",")), - fmt.Sprintf("Cmd: %s", strings.Join(c.Cmd, ",")), - fmt.Sprintf("ArgsEscaped: %t", c.ArgsEscaped), - fmt.Sprintf("Image: %s", c.Image), - fmt.Sprintf("Volumes: %v", sortMap(c.Volumes)), - fmt.Sprintf("Workdir: %s", c.Workdir), - fmt.Sprintf("Entrypoint: %s", strings.Join(c.Entrypoint, ",")), - fmt.Sprintf("NetworkDisabled: %t", c.NetworkDisabled), - fmt.Sprintf("MacAddress: %s", c.MacAddress), - fmt.Sprintf("OnBuild: %s", strings.Join(c.OnBuild, ",")), - fmt.Sprintf("Labels: %v", c.Labels), - fmt.Sprintf("StopSignal: %s", c.StopSignal), - fmt.Sprintf("StopTimeout: %d", c.StopTimeout), - fmt.Sprintf("Shell: %s", strings.Join(c.Shell, ",")), - } -} - -type ConfigSchema struct { - Config ConfigObject `json:"config"` - History []ImageHistoryItem `json:"history"` -} - -func getImage(p Prepper) (Image, error) { - // see if the image name has tag provided, if not add latest as tag - if !IsTar(p.GetSource()) && !HasTag(p.GetSource()) { - p.SetSource(p.GetSource() + LatestTag) - } - output.PrintToStdErr("Retrieving image %s from source %s\n", p.GetSource(), p.Name()) - imgPath, err := p.GetFileSystem() - if err != nil { - // return image with FSPath so it can be cleaned up - return Image{ - FSPath: imgPath, - }, err - } - - config, err := p.GetConfig() - if err != nil { - logrus.Error("Error retrieving History: ", err) - } - - logrus.Infof("Finished prepping image %s", p.GetSource()) - return Image{ - Source: p.GetSource(), - FSPath: imgPath, - Config: config, - }, nil -} - -func getImageFromTar(tarPath string) (string, error) { - logrus.Info("Extracting image tar to obtain image file system") - tempPath, err := ioutil.TempDir("", ".container-diff") - if err != nil { - return "", err - } - return tempPath, unpackDockerSave(tarPath, tempPath) -} - -func GetFileSystemFromReference(ref types.ImageReference, imgSrc types.ImageSource, path string, whitelist []string) error { - var err error - if imgSrc == nil { - imgSrc, err = ref.NewImageSource(nil) - } - if err != nil { - return err - } - defer imgSrc.Close() - img, err := ref.NewImage(nil) - if err != nil { - return err - } - defer img.Close() - for _, b := range img.LayerInfos() { - bi, _, err := imgSrc.GetBlob(b) - if err != nil { - return err - } - defer bi.Close() - f, reader, err := compression.DetectCompression(bi) - if err != nil { - return err - } - // Decompress if necessary. - if f != nil { - reader, err = f(reader) - if err != nil { - return err - } - } - tr := tar.NewReader(reader) - if err := unpackTar(tr, path, whitelist); err != nil { - return err - } - } - return nil -} - -func getDigestFromReference(ref types.ImageReference, source string) (string, error) { - img, err := ref.NewImage(nil) - if err != nil { - logrus.Errorf("Error referencing image %s from registry: %s", source, err) - return "", errors.New("Could not obtain image digest") - } - defer img.Close() - - rawManifest, _, err := img.Manifest() - if err != nil { - logrus.Errorf("Error referencing image %s from registry: %s", source, err) - return "", errors.New("Could not obtain image digest") - } - - digest, err := manifest.Digest(rawManifest) - if err != nil { - logrus.Errorf("Error referencing image %s from registry: %s", source, err) - return "", errors.New("Could not obtain image digest") - } - - return digest.String(), nil -} - -func getConfigFromReference(ref types.ImageReference, source string) (ConfigSchema, error) { - img, err := ref.NewImage(nil) - if err != nil { - logrus.Errorf("Error referencing image %s from registry: %s", source, err) - return ConfigSchema{}, errors.New("Could not obtain image config") - } - defer img.Close() - - configBlob, err := img.ConfigBlob() - if err != nil { - logrus.Errorf("Error obtaining config blob for image %s from registry: %s", source, err) - return ConfigSchema{}, errors.New("Could not obtain image config") - } - - var config ConfigSchema - err = json.Unmarshal(configBlob, &config) - if err != nil { - logrus.Errorf("Error with config file struct for image %s: %s", source, err) - return ConfigSchema{}, errors.New("Could not obtain image config") - } - return config, nil -} - -func CleanupImage(image Image) { - if image.FSPath != "" { - logrus.Infof("Removing image filesystem directory %s from system", image.FSPath) - if err := os.RemoveAll(image.FSPath); err != nil { - logrus.Warn(err.Error()) - } - } -} - -func sortMap(m map[string]struct{}) string { - pairs := make([]string, 0) - for key := range m { - pairs = append(pairs, fmt.Sprintf("%s:%s", key, m[key])) - } - sort.Strings(pairs) - return strings.Join(pairs, " ") -} diff --git a/vendor/github.com/GoogleContainerTools/container-diff/pkg/util/image_utils.go b/vendor/github.com/GoogleContainerTools/container-diff/pkg/util/image_utils.go deleted file mode 100644 index 95901d7844..0000000000 --- a/vendor/github.com/GoogleContainerTools/container-diff/pkg/util/image_utils.go +++ /dev/null @@ -1,78 +0,0 @@ -/* -Copyright 2018 Google, Inc. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "io" - "io/ioutil" - "os" - "path/filepath" - "regexp" - - "github.com/docker/docker/pkg/system" - "github.com/sirupsen/logrus" -) - -const LatestTag string = ":latest" - -func GetImageLayers(pathToImage string) []string { - layers := []string{} - contents, err := ioutil.ReadDir(pathToImage) - if err != nil { - logrus.Error(err.Error()) - } - - for _, file := range contents { - if file.IsDir() { - layers = append(layers, file.Name()) - } - } - return layers -} - -// copyToFile writes the content of the reader to the specified file -func copyToFile(outfile string, r io.Reader) error { - // We use sequential file access here to avoid depleting the standby list - // on Windows. On Linux, this is a call directly to ioutil.TempFile - tmpFile, err := system.TempFileSequential(filepath.Dir(outfile), ".docker_temp_") - if err != nil { - return err - } - - tmpPath := tmpFile.Name() - - _, err = io.Copy(tmpFile, r) - tmpFile.Close() - - if err != nil { - os.Remove(tmpPath) - return err - } - - if err = os.Rename(tmpPath, outfile); err != nil { - os.Remove(tmpPath) - return err - } - - return nil -} - -// checks to see if an image string contains a tag. -func HasTag(image string) bool { - tagRegex := regexp.MustCompile(".*:[^/]+$") - return tagRegex.MatchString(image) -} diff --git a/vendor/github.com/GoogleContainerTools/container-diff/pkg/util/tar_prepper.go b/vendor/github.com/GoogleContainerTools/container-diff/pkg/util/tar_prepper.go deleted file mode 100644 index 948916cd68..0000000000 --- a/vendor/github.com/GoogleContainerTools/container-diff/pkg/util/tar_prepper.go +++ /dev/null @@ -1,102 +0,0 @@ -/* -Copyright 2018 Google, Inc. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "encoding/json" - "errors" - "github.com/containers/image/docker/tarfile" - "github.com/docker/docker/client" - "github.com/sirupsen/logrus" - "io/ioutil" - "os" - "path/filepath" -) - -type TarPrepper struct { - Source string - Client *client.Client -} - -func (p *TarPrepper) Name() string { - return "Tar Archive" -} - -func (p *TarPrepper) GetSource() string { - return p.Source -} - -func (p *TarPrepper) SetSource(source string) { - p.Source = source -} - -func (p *TarPrepper) GetImage() (Image, error) { - image, err := getImage(p) - image.Type = ImageTypeTar - return image, err -} - -func (p *TarPrepper) GetFileSystem() (string, error) { - return getImageFromTar(p.Source) -} - -func (p *TarPrepper) GetConfig() (ConfigSchema, error) { - tempDir, err := ioutil.TempDir("", ".container-diff") - if err != nil { - return ConfigSchema{}, nil - } - defer os.RemoveAll(tempDir) - f, err := os.Open(p.Source) - if err != nil { - return ConfigSchema{}, err - } - defer f.Close() - if err := UnTar(f, tempDir, nil); err != nil { - return ConfigSchema{}, err - } - - var config ConfigSchema - // First open the manifest, then find the referenced config. - manifestPath := filepath.Join(tempDir, "manifest.json") - contents, err := ioutil.ReadFile(manifestPath) - if err != nil { - return ConfigSchema{}, err - } - - manifests := []tarfile.ManifestItem{} - if err := json.Unmarshal(contents, &manifests); err != nil { - return ConfigSchema{}, err - } - - if len(manifests) != 1 { - return ConfigSchema{}, errors.New("specified tar file contains multiple images") - } - - cfgFilename := filepath.Join(tempDir, manifests[0].Config) - file, err := ioutil.ReadFile(cfgFilename) - if err != nil { - logrus.Errorf("Could not read config file %s: %s", cfgFilename, err) - return ConfigSchema{}, errors.New("Could not obtain image config") - } - err = json.Unmarshal(file, &config) - if err != nil { - logrus.Errorf("Could not marshal config file %s: %s", cfgFilename, err) - return ConfigSchema{}, errors.New("Could not obtain image config") - } - - return config, nil -} diff --git a/vendor/github.com/GoogleContainerTools/container-diff/pkg/util/tar_utils.go b/vendor/github.com/GoogleContainerTools/container-diff/pkg/util/tar_utils.go deleted file mode 100644 index 1cb9cc09dc..0000000000 --- a/vendor/github.com/GoogleContainerTools/container-diff/pkg/util/tar_utils.go +++ /dev/null @@ -1,224 +0,0 @@ -/* -Copyright 2018 Google, Inc. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "archive/tar" - "fmt" - "io" - "os" - "path/filepath" - "strings" - - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// Map of target:linkname -var hardlinks = make(map[string]string) - -type OriginalPerm struct { - path string - perm os.FileMode -} - -func unpackTar(tr *tar.Reader, path string, whitelist []string) error { - originalPerms := make([]OriginalPerm, 0) - for { - header, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - logrus.Error("Error getting next tar header") - return err - } - if strings.Contains(header.Name, ".wh.") { - rmPath := filepath.Clean(filepath.Join(path, header.Name)) - // Remove the .wh file if it was extracted. - if _, err := os.Stat(rmPath); !os.IsNotExist(err) { - if err := os.Remove(rmPath); err != nil { - logrus.Error(err) - } - } - - // Remove the whited-out path. - newName := strings.Replace(rmPath, ".wh.", "", 1) - if err = os.RemoveAll(newName); err != nil { - logrus.Error(err) - } - continue - } - target := filepath.Clean(filepath.Join(path, header.Name)) - // Make sure the target isn't part of the whitelist - if checkWhitelist(target, whitelist) { - continue - } - mode := header.FileInfo().Mode() - switch header.Typeflag { - - // if its a dir and it doesn't exist create it - case tar.TypeDir: - if _, err := os.Stat(target); os.IsNotExist(err) { - if mode.Perm()&(1<<(uint(7))) == 0 { - logrus.Debugf("Write permission bit not set on %s by default; setting manually", target) - originalMode := mode - mode = mode | (1 << uint(7)) - // keep track of original file permission to reset later - originalPerms = append(originalPerms, OriginalPerm{ - path: target, - perm: originalMode, - }) - } - logrus.Debugf("Creating directory %s with permissions %v", target, mode) - if err := os.MkdirAll(target, mode); err != nil { - return err - } - // In some cases, MkdirAll doesn't change the permissions, so run Chmod - if err := os.Chmod(target, mode); err != nil { - return err - } - } - - // if it's a file create it - case tar.TypeReg: - // It's possible for a file to be included before the directory it's in is created. - baseDir := filepath.Dir(target) - if _, err := os.Stat(baseDir); os.IsNotExist(err) { - logrus.Debugf("baseDir %s for file %s does not exist. Creating.", baseDir, target) - if err := os.MkdirAll(baseDir, 0755); err != nil { - return err - } - } - // It's possible we end up creating files that can't be overwritten based on their permissions. - // Explicitly delete an existing file before continuing. - if _, err := os.Stat(target); !os.IsNotExist(err) { - logrus.Debugf("Removing %s for overwrite.", target) - if err := os.Remove(target); err != nil { - return err - } - } - - logrus.Debugf("Creating file %s with permissions %v", target, mode) - currFile, err := os.Create(target) - if err != nil { - logrus.Errorf("Error creating file %s %s", target, err) - return err - } - // manually set permissions on file, since the default umask (022) will interfere - if err = os.Chmod(target, mode); err != nil { - logrus.Errorf("Error updating file permissions on %s", target) - return err - } - _, err = io.Copy(currFile, tr) - if err != nil { - return err - } - currFile.Close() - case tar.TypeSymlink: - // It's possible we end up creating files that can't be overwritten based on their permissions. - // Explicitly delete an existing file before continuing. - if _, err := os.Stat(target); !os.IsNotExist(err) { - logrus.Debugf("Removing %s to create symlink.", target) - if err := os.RemoveAll(target); err != nil { - logrus.Debugf("Unable to remove %s: %s", target, err) - } - } - - if err = os.Symlink(header.Linkname, target); err != nil { - logrus.Errorf("Failed to create symlink between %s and %s: %s", header.Linkname, target, err) - } - case tar.TypeLink: - linkname := filepath.Clean(filepath.Join(path, header.Linkname)) - // Check if the linkname already exists - if _, err := os.Stat(linkname); !os.IsNotExist(err) { - // If it exists, create the hard link - resolveHardlink(linkname, target) - } else { - hardlinks[target] = linkname - } - } - } - - for target, linkname := range hardlinks { - logrus.Info("Resolving hard links.") - if _, err := os.Stat(linkname); !os.IsNotExist(err) { - // If it exists, create the hard link - if err := resolveHardlink(linkname, target); err != nil { - return errors.Wrap(err, fmt.Sprintf("Unable to create hard link from %s to %s", linkname, target)) - } - } - } - - // reset all original file - for _, perm := range originalPerms { - if err := os.Chmod(perm.path, perm.perm); err != nil { - return err - } - } - return nil -} - -func resolveHardlink(linkname, target string) error { - if err := os.Link(linkname, target); err != nil { - return err - } - logrus.Debugf("Created hard link from %s to %s", linkname, target) - return nil -} - -func checkWhitelist(target string, whitelist []string) bool { - for _, w := range whitelist { - if HasFilepathPrefix(target, w) { - logrus.Debugf("Not extracting %s, as it has prefix %s which is whitelisted", target, w) - return true - } - } - return false -} - -// UnTar takes in a path to a tar file and writes the untarred version to the provided target. -// Only untars one level, does not untar nested tars. -func UnTar(r io.Reader, target string, whitelist []string) error { - if _, ok := os.Stat(target); ok != nil { - os.MkdirAll(target, 0775) - } - - tr := tar.NewReader(r) - if err := unpackTar(tr, target, whitelist); err != nil { - return err - } - return nil -} - -func IsTar(path string) bool { - return filepath.Ext(path) == ".tar" || - filepath.Ext(path) == ".tar.gz" || - filepath.Ext(path) == ".tgz" -} - -func CheckTar(image string) bool { - if strings.TrimSuffix(image, ".tar") == image { - return false - } - if _, err := os.Stat(image); err != nil { - logrus.Errorf("%s does not exist", image) - return false - } - return true -} diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/common.go b/vendor/github.com/Microsoft/go-winio/archive/tar/common.go deleted file mode 100644 index 0378401c0d..0000000000 --- a/vendor/github.com/Microsoft/go-winio/archive/tar/common.go +++ /dev/null @@ -1,344 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package tar implements access to tar archives. -// It aims to cover most of the variations, including those produced -// by GNU and BSD tars. -// -// References: -// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5 -// http://www.gnu.org/software/tar/manual/html_node/Standard.html -// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html -package tar - -import ( - "bytes" - "errors" - "fmt" - "os" - "path" - "time" -) - -const ( - blockSize = 512 - - // Types - TypeReg = '0' // regular file - TypeRegA = '\x00' // regular file - TypeLink = '1' // hard link - TypeSymlink = '2' // symbolic link - TypeChar = '3' // character device node - TypeBlock = '4' // block device node - TypeDir = '5' // directory - TypeFifo = '6' // fifo node - TypeCont = '7' // reserved - TypeXHeader = 'x' // extended header - TypeXGlobalHeader = 'g' // global extended header - TypeGNULongName = 'L' // Next file has a long name - TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name - TypeGNUSparse = 'S' // sparse file -) - -// A Header represents a single header in a tar archive. -// Some fields may not be populated. -type Header struct { - Name string // name of header file entry - Mode int64 // permission and mode bits - Uid int // user id of owner - Gid int // group id of owner - Size int64 // length in bytes - ModTime time.Time // modified time - Typeflag byte // type of header entry - Linkname string // target name of link - Uname string // user name of owner - Gname string // group name of owner - Devmajor int64 // major number of character or block device - Devminor int64 // minor number of character or block device - AccessTime time.Time // access time - ChangeTime time.Time // status change time - CreationTime time.Time // creation time - Xattrs map[string]string - Winheaders map[string]string -} - -// File name constants from the tar spec. -const ( - fileNameSize = 100 // Maximum number of bytes in a standard tar name. - fileNamePrefixSize = 155 // Maximum number of ustar extension bytes. -) - -// FileInfo returns an os.FileInfo for the Header. -func (h *Header) FileInfo() os.FileInfo { - return headerFileInfo{h} -} - -// headerFileInfo implements os.FileInfo. -type headerFileInfo struct { - h *Header -} - -func (fi headerFileInfo) Size() int64 { return fi.h.Size } -func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() } -func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime } -func (fi headerFileInfo) Sys() interface{} { return fi.h } - -// Name returns the base name of the file. -func (fi headerFileInfo) Name() string { - if fi.IsDir() { - return path.Base(path.Clean(fi.h.Name)) - } - return path.Base(fi.h.Name) -} - -// Mode returns the permission and mode bits for the headerFileInfo. -func (fi headerFileInfo) Mode() (mode os.FileMode) { - // Set file permission bits. - mode = os.FileMode(fi.h.Mode).Perm() - - // Set setuid, setgid and sticky bits. - if fi.h.Mode&c_ISUID != 0 { - // setuid - mode |= os.ModeSetuid - } - if fi.h.Mode&c_ISGID != 0 { - // setgid - mode |= os.ModeSetgid - } - if fi.h.Mode&c_ISVTX != 0 { - // sticky - mode |= os.ModeSticky - } - - // Set file mode bits. - // clear perm, setuid, setgid and sticky bits. - m := os.FileMode(fi.h.Mode) &^ 07777 - if m == c_ISDIR { - // directory - mode |= os.ModeDir - } - if m == c_ISFIFO { - // named pipe (FIFO) - mode |= os.ModeNamedPipe - } - if m == c_ISLNK { - // symbolic link - mode |= os.ModeSymlink - } - if m == c_ISBLK { - // device file - mode |= os.ModeDevice - } - if m == c_ISCHR { - // Unix character device - mode |= os.ModeDevice - mode |= os.ModeCharDevice - } - if m == c_ISSOCK { - // Unix domain socket - mode |= os.ModeSocket - } - - switch fi.h.Typeflag { - case TypeSymlink: - // symbolic link - mode |= os.ModeSymlink - case TypeChar: - // character device node - mode |= os.ModeDevice - mode |= os.ModeCharDevice - case TypeBlock: - // block device node - mode |= os.ModeDevice - case TypeDir: - // directory - mode |= os.ModeDir - case TypeFifo: - // fifo node - mode |= os.ModeNamedPipe - } - - return mode -} - -// sysStat, if non-nil, populates h from system-dependent fields of fi. -var sysStat func(fi os.FileInfo, h *Header) error - -// Mode constants from the tar spec. -const ( - c_ISUID = 04000 // Set uid - c_ISGID = 02000 // Set gid - c_ISVTX = 01000 // Save text (sticky bit) - c_ISDIR = 040000 // Directory - c_ISFIFO = 010000 // FIFO - c_ISREG = 0100000 // Regular file - c_ISLNK = 0120000 // Symbolic link - c_ISBLK = 060000 // Block special file - c_ISCHR = 020000 // Character special file - c_ISSOCK = 0140000 // Socket -) - -// Keywords for the PAX Extended Header -const ( - paxAtime = "atime" - paxCharset = "charset" - paxComment = "comment" - paxCtime = "ctime" // please note that ctime is not a valid pax header. - paxCreationTime = "LIBARCHIVE.creationtime" - paxGid = "gid" - paxGname = "gname" - paxLinkpath = "linkpath" - paxMtime = "mtime" - paxPath = "path" - paxSize = "size" - paxUid = "uid" - paxUname = "uname" - paxXattr = "SCHILY.xattr." - paxWindows = "MSWINDOWS." - paxNone = "" -) - -// FileInfoHeader creates a partially-populated Header from fi. -// If fi describes a symlink, FileInfoHeader records link as the link target. -// If fi describes a directory, a slash is appended to the name. -// Because os.FileInfo's Name method returns only the base name of -// the file it describes, it may be necessary to modify the Name field -// of the returned header to provide the full path name of the file. -func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) { - if fi == nil { - return nil, errors.New("tar: FileInfo is nil") - } - fm := fi.Mode() - h := &Header{ - Name: fi.Name(), - ModTime: fi.ModTime(), - Mode: int64(fm.Perm()), // or'd with c_IS* constants later - } - switch { - case fm.IsRegular(): - h.Mode |= c_ISREG - h.Typeflag = TypeReg - h.Size = fi.Size() - case fi.IsDir(): - h.Typeflag = TypeDir - h.Mode |= c_ISDIR - h.Name += "/" - case fm&os.ModeSymlink != 0: - h.Typeflag = TypeSymlink - h.Mode |= c_ISLNK - h.Linkname = link - case fm&os.ModeDevice != 0: - if fm&os.ModeCharDevice != 0 { - h.Mode |= c_ISCHR - h.Typeflag = TypeChar - } else { - h.Mode |= c_ISBLK - h.Typeflag = TypeBlock - } - case fm&os.ModeNamedPipe != 0: - h.Typeflag = TypeFifo - h.Mode |= c_ISFIFO - case fm&os.ModeSocket != 0: - h.Mode |= c_ISSOCK - default: - return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm) - } - if fm&os.ModeSetuid != 0 { - h.Mode |= c_ISUID - } - if fm&os.ModeSetgid != 0 { - h.Mode |= c_ISGID - } - if fm&os.ModeSticky != 0 { - h.Mode |= c_ISVTX - } - // If possible, populate additional fields from OS-specific - // FileInfo fields. - if sys, ok := fi.Sys().(*Header); ok { - // This FileInfo came from a Header (not the OS). Use the - // original Header to populate all remaining fields. - h.Uid = sys.Uid - h.Gid = sys.Gid - h.Uname = sys.Uname - h.Gname = sys.Gname - h.AccessTime = sys.AccessTime - h.ChangeTime = sys.ChangeTime - if sys.Xattrs != nil { - h.Xattrs = make(map[string]string) - for k, v := range sys.Xattrs { - h.Xattrs[k] = v - } - } - if sys.Typeflag == TypeLink { - // hard link - h.Typeflag = TypeLink - h.Size = 0 - h.Linkname = sys.Linkname - } - } - if sysStat != nil { - return h, sysStat(fi, h) - } - return h, nil -} - -var zeroBlock = make([]byte, blockSize) - -// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values. -// We compute and return both. -func checksum(header []byte) (unsigned int64, signed int64) { - for i := 0; i < len(header); i++ { - if i == 148 { - // The chksum field (header[148:156]) is special: it should be treated as space bytes. - unsigned += ' ' * 8 - signed += ' ' * 8 - i += 7 - continue - } - unsigned += int64(header[i]) - signed += int64(int8(header[i])) - } - return -} - -type slicer []byte - -func (sp *slicer) next(n int) (b []byte) { - s := *sp - b, *sp = s[0:n], s[n:] - return -} - -func isASCII(s string) bool { - for _, c := range s { - if c >= 0x80 { - return false - } - } - return true -} - -func toASCII(s string) string { - if isASCII(s) { - return s - } - var buf bytes.Buffer - for _, c := range s { - if c < 0x80 { - buf.WriteByte(byte(c)) - } - } - return buf.String() -} - -// isHeaderOnlyType checks if the given type flag is of the type that has no -// data section even if a size is specified. -func isHeaderOnlyType(flag byte) bool { - switch flag { - case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo: - return true - default: - return false - } -} diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/reader.go b/vendor/github.com/Microsoft/go-winio/archive/tar/reader.go deleted file mode 100644 index e210c618a1..0000000000 --- a/vendor/github.com/Microsoft/go-winio/archive/tar/reader.go +++ /dev/null @@ -1,1002 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tar - -// TODO(dsymonds): -// - pax extensions - -import ( - "bytes" - "errors" - "io" - "io/ioutil" - "math" - "os" - "strconv" - "strings" - "time" -) - -var ( - ErrHeader = errors.New("archive/tar: invalid tar header") -) - -const maxNanoSecondIntSize = 9 - -// A Reader provides sequential access to the contents of a tar archive. -// A tar archive consists of a sequence of files. -// The Next method advances to the next file in the archive (including the first), -// and then it can be treated as an io.Reader to access the file's data. -type Reader struct { - r io.Reader - err error - pad int64 // amount of padding (ignored) after current file entry - curr numBytesReader // reader for current file entry - hdrBuff [blockSize]byte // buffer to use in readHeader -} - -type parser struct { - err error // Last error seen -} - -// A numBytesReader is an io.Reader with a numBytes method, returning the number -// of bytes remaining in the underlying encoded data. -type numBytesReader interface { - io.Reader - numBytes() int64 -} - -// A regFileReader is a numBytesReader for reading file data from a tar archive. -type regFileReader struct { - r io.Reader // underlying reader - nb int64 // number of unread bytes for current file entry -} - -// A sparseFileReader is a numBytesReader for reading sparse file data from a -// tar archive. -type sparseFileReader struct { - rfr numBytesReader // Reads the sparse-encoded file data - sp []sparseEntry // The sparse map for the file - pos int64 // Keeps track of file position - total int64 // Total size of the file -} - -// A sparseEntry holds a single entry in a sparse file's sparse map. -// -// Sparse files are represented using a series of sparseEntrys. -// Despite the name, a sparseEntry represents an actual data fragment that -// references data found in the underlying archive stream. All regions not -// covered by a sparseEntry are logically filled with zeros. -// -// For example, if the underlying raw file contains the 10-byte data: -// var compactData = "abcdefgh" -// -// And the sparse map has the following entries: -// var sp = []sparseEntry{ -// {offset: 2, numBytes: 5} // Data fragment for [2..7] -// {offset: 18, numBytes: 3} // Data fragment for [18..21] -// } -// -// Then the content of the resulting sparse file with a "real" size of 25 is: -// var sparseData = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4 -type sparseEntry struct { - offset int64 // Starting position of the fragment - numBytes int64 // Length of the fragment -} - -// Keywords for GNU sparse files in a PAX extended header -const ( - paxGNUSparseNumBlocks = "GNU.sparse.numblocks" - paxGNUSparseOffset = "GNU.sparse.offset" - paxGNUSparseNumBytes = "GNU.sparse.numbytes" - paxGNUSparseMap = "GNU.sparse.map" - paxGNUSparseName = "GNU.sparse.name" - paxGNUSparseMajor = "GNU.sparse.major" - paxGNUSparseMinor = "GNU.sparse.minor" - paxGNUSparseSize = "GNU.sparse.size" - paxGNUSparseRealSize = "GNU.sparse.realsize" -) - -// Keywords for old GNU sparse headers -const ( - oldGNUSparseMainHeaderOffset = 386 - oldGNUSparseMainHeaderIsExtendedOffset = 482 - oldGNUSparseMainHeaderNumEntries = 4 - oldGNUSparseExtendedHeaderIsExtendedOffset = 504 - oldGNUSparseExtendedHeaderNumEntries = 21 - oldGNUSparseOffsetSize = 12 - oldGNUSparseNumBytesSize = 12 -) - -// NewReader creates a new Reader reading from r. -func NewReader(r io.Reader) *Reader { return &Reader{r: r} } - -// Next advances to the next entry in the tar archive. -// -// io.EOF is returned at the end of the input. -func (tr *Reader) Next() (*Header, error) { - if tr.err != nil { - return nil, tr.err - } - - var hdr *Header - var extHdrs map[string]string - - // Externally, Next iterates through the tar archive as if it is a series of - // files. Internally, the tar format often uses fake "files" to add meta - // data that describes the next file. These meta data "files" should not - // normally be visible to the outside. As such, this loop iterates through - // one or more "header files" until it finds a "normal file". -loop: - for { - tr.err = tr.skipUnread() - if tr.err != nil { - return nil, tr.err - } - - hdr = tr.readHeader() - if tr.err != nil { - return nil, tr.err - } - - // Check for PAX/GNU special headers and files. - switch hdr.Typeflag { - case TypeXHeader: - extHdrs, tr.err = parsePAX(tr) - if tr.err != nil { - return nil, tr.err - } - continue loop // This is a meta header affecting the next header - case TypeGNULongName, TypeGNULongLink: - var realname []byte - realname, tr.err = ioutil.ReadAll(tr) - if tr.err != nil { - return nil, tr.err - } - - // Convert GNU extensions to use PAX headers. - if extHdrs == nil { - extHdrs = make(map[string]string) - } - var p parser - switch hdr.Typeflag { - case TypeGNULongName: - extHdrs[paxPath] = p.parseString(realname) - case TypeGNULongLink: - extHdrs[paxLinkpath] = p.parseString(realname) - } - if p.err != nil { - tr.err = p.err - return nil, tr.err - } - continue loop // This is a meta header affecting the next header - default: - mergePAX(hdr, extHdrs) - - // Check for a PAX format sparse file - sp, err := tr.checkForGNUSparsePAXHeaders(hdr, extHdrs) - if err != nil { - tr.err = err - return nil, err - } - if sp != nil { - // Current file is a PAX format GNU sparse file. - // Set the current file reader to a sparse file reader. - tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size) - if tr.err != nil { - return nil, tr.err - } - } - break loop // This is a file, so stop - } - } - return hdr, nil -} - -// checkForGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. If they are found, then -// this function reads the sparse map and returns it. Unknown sparse formats are ignored, causing the file to -// be treated as a regular file. -func (tr *Reader) checkForGNUSparsePAXHeaders(hdr *Header, headers map[string]string) ([]sparseEntry, error) { - var sparseFormat string - - // Check for sparse format indicators - major, majorOk := headers[paxGNUSparseMajor] - minor, minorOk := headers[paxGNUSparseMinor] - sparseName, sparseNameOk := headers[paxGNUSparseName] - _, sparseMapOk := headers[paxGNUSparseMap] - sparseSize, sparseSizeOk := headers[paxGNUSparseSize] - sparseRealSize, sparseRealSizeOk := headers[paxGNUSparseRealSize] - - // Identify which, if any, sparse format applies from which PAX headers are set - if majorOk && minorOk { - sparseFormat = major + "." + minor - } else if sparseNameOk && sparseMapOk { - sparseFormat = "0.1" - } else if sparseSizeOk { - sparseFormat = "0.0" - } else { - // Not a PAX format GNU sparse file. - return nil, nil - } - - // Check for unknown sparse format - if sparseFormat != "0.0" && sparseFormat != "0.1" && sparseFormat != "1.0" { - return nil, nil - } - - // Update hdr from GNU sparse PAX headers - if sparseNameOk { - hdr.Name = sparseName - } - if sparseSizeOk { - realSize, err := strconv.ParseInt(sparseSize, 10, 0) - if err != nil { - return nil, ErrHeader - } - hdr.Size = realSize - } else if sparseRealSizeOk { - realSize, err := strconv.ParseInt(sparseRealSize, 10, 0) - if err != nil { - return nil, ErrHeader - } - hdr.Size = realSize - } - - // Set up the sparse map, according to the particular sparse format in use - var sp []sparseEntry - var err error - switch sparseFormat { - case "0.0", "0.1": - sp, err = readGNUSparseMap0x1(headers) - case "1.0": - sp, err = readGNUSparseMap1x0(tr.curr) - } - return sp, err -} - -// mergePAX merges well known headers according to PAX standard. -// In general headers with the same name as those found -// in the header struct overwrite those found in the header -// struct with higher precision or longer values. Esp. useful -// for name and linkname fields. -func mergePAX(hdr *Header, headers map[string]string) error { - for k, v := range headers { - switch k { - case paxPath: - hdr.Name = v - case paxLinkpath: - hdr.Linkname = v - case paxGname: - hdr.Gname = v - case paxUname: - hdr.Uname = v - case paxUid: - uid, err := strconv.ParseInt(v, 10, 0) - if err != nil { - return err - } - hdr.Uid = int(uid) - case paxGid: - gid, err := strconv.ParseInt(v, 10, 0) - if err != nil { - return err - } - hdr.Gid = int(gid) - case paxAtime: - t, err := parsePAXTime(v) - if err != nil { - return err - } - hdr.AccessTime = t - case paxMtime: - t, err := parsePAXTime(v) - if err != nil { - return err - } - hdr.ModTime = t - case paxCtime: - t, err := parsePAXTime(v) - if err != nil { - return err - } - hdr.ChangeTime = t - case paxCreationTime: - t, err := parsePAXTime(v) - if err != nil { - return err - } - hdr.CreationTime = t - case paxSize: - size, err := strconv.ParseInt(v, 10, 0) - if err != nil { - return err - } - hdr.Size = int64(size) - default: - if strings.HasPrefix(k, paxXattr) { - if hdr.Xattrs == nil { - hdr.Xattrs = make(map[string]string) - } - hdr.Xattrs[k[len(paxXattr):]] = v - } else if strings.HasPrefix(k, paxWindows) { - if hdr.Winheaders == nil { - hdr.Winheaders = make(map[string]string) - } - hdr.Winheaders[k[len(paxWindows):]] = v - } - } - } - return nil -} - -// parsePAXTime takes a string of the form %d.%d as described in -// the PAX specification. -func parsePAXTime(t string) (time.Time, error) { - buf := []byte(t) - pos := bytes.IndexByte(buf, '.') - var seconds, nanoseconds int64 - var err error - if pos == -1 { - seconds, err = strconv.ParseInt(t, 10, 0) - if err != nil { - return time.Time{}, err - } - } else { - seconds, err = strconv.ParseInt(string(buf[:pos]), 10, 0) - if err != nil { - return time.Time{}, err - } - nano_buf := string(buf[pos+1:]) - // Pad as needed before converting to a decimal. - // For example .030 -> .030000000 -> 30000000 nanoseconds - if len(nano_buf) < maxNanoSecondIntSize { - // Right pad - nano_buf += strings.Repeat("0", maxNanoSecondIntSize-len(nano_buf)) - } else if len(nano_buf) > maxNanoSecondIntSize { - // Right truncate - nano_buf = nano_buf[:maxNanoSecondIntSize] - } - nanoseconds, err = strconv.ParseInt(string(nano_buf), 10, 0) - if err != nil { - return time.Time{}, err - } - } - ts := time.Unix(seconds, nanoseconds) - return ts, nil -} - -// parsePAX parses PAX headers. -// If an extended header (type 'x') is invalid, ErrHeader is returned -func parsePAX(r io.Reader) (map[string]string, error) { - buf, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - sbuf := string(buf) - - // For GNU PAX sparse format 0.0 support. - // This function transforms the sparse format 0.0 headers into sparse format 0.1 headers. - var sparseMap bytes.Buffer - - headers := make(map[string]string) - // Each record is constructed as - // "%d %s=%s\n", length, keyword, value - for len(sbuf) > 0 { - key, value, residual, err := parsePAXRecord(sbuf) - if err != nil { - return nil, ErrHeader - } - sbuf = residual - - keyStr := string(key) - if keyStr == paxGNUSparseOffset || keyStr == paxGNUSparseNumBytes { - // GNU sparse format 0.0 special key. Write to sparseMap instead of using the headers map. - sparseMap.WriteString(value) - sparseMap.Write([]byte{','}) - } else { - // Normal key. Set the value in the headers map. - headers[keyStr] = string(value) - } - } - if sparseMap.Len() != 0 { - // Add sparse info to headers, chopping off the extra comma - sparseMap.Truncate(sparseMap.Len() - 1) - headers[paxGNUSparseMap] = sparseMap.String() - } - return headers, nil -} - -// parsePAXRecord parses the input PAX record string into a key-value pair. -// If parsing is successful, it will slice off the currently read record and -// return the remainder as r. -// -// A PAX record is of the following form: -// "%d %s=%s\n" % (size, key, value) -func parsePAXRecord(s string) (k, v, r string, err error) { - // The size field ends at the first space. - sp := strings.IndexByte(s, ' ') - if sp == -1 { - return "", "", s, ErrHeader - } - - // Parse the first token as a decimal integer. - n, perr := strconv.ParseInt(s[:sp], 10, 0) // Intentionally parse as native int - if perr != nil || n < 5 || int64(len(s)) < n { - return "", "", s, ErrHeader - } - - // Extract everything between the space and the final newline. - rec, nl, rem := s[sp+1:n-1], s[n-1:n], s[n:] - if nl != "\n" { - return "", "", s, ErrHeader - } - - // The first equals separates the key from the value. - eq := strings.IndexByte(rec, '=') - if eq == -1 { - return "", "", s, ErrHeader - } - return rec[:eq], rec[eq+1:], rem, nil -} - -// parseString parses bytes as a NUL-terminated C-style string. -// If a NUL byte is not found then the whole slice is returned as a string. -func (*parser) parseString(b []byte) string { - n := 0 - for n < len(b) && b[n] != 0 { - n++ - } - return string(b[0:n]) -} - -// parseNumeric parses the input as being encoded in either base-256 or octal. -// This function may return negative numbers. -// If parsing fails or an integer overflow occurs, err will be set. -func (p *parser) parseNumeric(b []byte) int64 { - // Check for base-256 (binary) format first. - // If the first bit is set, then all following bits constitute a two's - // complement encoded number in big-endian byte order. - if len(b) > 0 && b[0]&0x80 != 0 { - // Handling negative numbers relies on the following identity: - // -a-1 == ^a - // - // If the number is negative, we use an inversion mask to invert the - // data bytes and treat the value as an unsigned number. - var inv byte // 0x00 if positive or zero, 0xff if negative - if b[0]&0x40 != 0 { - inv = 0xff - } - - var x uint64 - for i, c := range b { - c ^= inv // Inverts c only if inv is 0xff, otherwise does nothing - if i == 0 { - c &= 0x7f // Ignore signal bit in first byte - } - if (x >> 56) > 0 { - p.err = ErrHeader // Integer overflow - return 0 - } - x = x<<8 | uint64(c) - } - if (x >> 63) > 0 { - p.err = ErrHeader // Integer overflow - return 0 - } - if inv == 0xff { - return ^int64(x) - } - return int64(x) - } - - // Normal case is base-8 (octal) format. - return p.parseOctal(b) -} - -func (p *parser) parseOctal(b []byte) int64 { - // Because unused fields are filled with NULs, we need - // to skip leading NULs. Fields may also be padded with - // spaces or NULs. - // So we remove leading and trailing NULs and spaces to - // be sure. - b = bytes.Trim(b, " \x00") - - if len(b) == 0 { - return 0 - } - x, perr := strconv.ParseUint(p.parseString(b), 8, 64) - if perr != nil { - p.err = ErrHeader - } - return int64(x) -} - -// skipUnread skips any unread bytes in the existing file entry, as well as any -// alignment padding. It returns io.ErrUnexpectedEOF if any io.EOF is -// encountered in the data portion; it is okay to hit io.EOF in the padding. -// -// Note that this function still works properly even when sparse files are being -// used since numBytes returns the bytes remaining in the underlying io.Reader. -func (tr *Reader) skipUnread() error { - dataSkip := tr.numBytes() // Number of data bytes to skip - totalSkip := dataSkip + tr.pad // Total number of bytes to skip - tr.curr, tr.pad = nil, 0 - - // If possible, Seek to the last byte before the end of the data section. - // Do this because Seek is often lazy about reporting errors; this will mask - // the fact that the tar stream may be truncated. We can rely on the - // io.CopyN done shortly afterwards to trigger any IO errors. - var seekSkipped int64 // Number of bytes skipped via Seek - if sr, ok := tr.r.(io.Seeker); ok && dataSkip > 1 { - // Not all io.Seeker can actually Seek. For example, os.Stdin implements - // io.Seeker, but calling Seek always returns an error and performs - // no action. Thus, we try an innocent seek to the current position - // to see if Seek is really supported. - pos1, err := sr.Seek(0, os.SEEK_CUR) - if err == nil { - // Seek seems supported, so perform the real Seek. - pos2, err := sr.Seek(dataSkip-1, os.SEEK_CUR) - if err != nil { - tr.err = err - return tr.err - } - seekSkipped = pos2 - pos1 - } - } - - var copySkipped int64 // Number of bytes skipped via CopyN - copySkipped, tr.err = io.CopyN(ioutil.Discard, tr.r, totalSkip-seekSkipped) - if tr.err == io.EOF && seekSkipped+copySkipped < dataSkip { - tr.err = io.ErrUnexpectedEOF - } - return tr.err -} - -func (tr *Reader) verifyChecksum(header []byte) bool { - if tr.err != nil { - return false - } - - var p parser - given := p.parseOctal(header[148:156]) - unsigned, signed := checksum(header) - return p.err == nil && (given == unsigned || given == signed) -} - -// readHeader reads the next block header and assumes that the underlying reader -// is already aligned to a block boundary. -// -// The err will be set to io.EOF only when one of the following occurs: -// * Exactly 0 bytes are read and EOF is hit. -// * Exactly 1 block of zeros is read and EOF is hit. -// * At least 2 blocks of zeros are read. -func (tr *Reader) readHeader() *Header { - header := tr.hdrBuff[:] - copy(header, zeroBlock) - - if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil { - return nil // io.EOF is okay here - } - - // Two blocks of zero bytes marks the end of the archive. - if bytes.Equal(header, zeroBlock[0:blockSize]) { - if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil { - return nil // io.EOF is okay here - } - if bytes.Equal(header, zeroBlock[0:blockSize]) { - tr.err = io.EOF - } else { - tr.err = ErrHeader // zero block and then non-zero block - } - return nil - } - - if !tr.verifyChecksum(header) { - tr.err = ErrHeader - return nil - } - - // Unpack - var p parser - hdr := new(Header) - s := slicer(header) - - hdr.Name = p.parseString(s.next(100)) - hdr.Mode = p.parseNumeric(s.next(8)) - hdr.Uid = int(p.parseNumeric(s.next(8))) - hdr.Gid = int(p.parseNumeric(s.next(8))) - hdr.Size = p.parseNumeric(s.next(12)) - hdr.ModTime = time.Unix(p.parseNumeric(s.next(12)), 0) - s.next(8) // chksum - hdr.Typeflag = s.next(1)[0] - hdr.Linkname = p.parseString(s.next(100)) - - // The remainder of the header depends on the value of magic. - // The original (v7) version of tar had no explicit magic field, - // so its magic bytes, like the rest of the block, are NULs. - magic := string(s.next(8)) // contains version field as well. - var format string - switch { - case magic[:6] == "ustar\x00": // POSIX tar (1003.1-1988) - if string(header[508:512]) == "tar\x00" { - format = "star" - } else { - format = "posix" - } - case magic == "ustar \x00": // old GNU tar - format = "gnu" - } - - switch format { - case "posix", "gnu", "star": - hdr.Uname = p.parseString(s.next(32)) - hdr.Gname = p.parseString(s.next(32)) - devmajor := s.next(8) - devminor := s.next(8) - if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock { - hdr.Devmajor = p.parseNumeric(devmajor) - hdr.Devminor = p.parseNumeric(devminor) - } - var prefix string - switch format { - case "posix", "gnu": - prefix = p.parseString(s.next(155)) - case "star": - prefix = p.parseString(s.next(131)) - hdr.AccessTime = time.Unix(p.parseNumeric(s.next(12)), 0) - hdr.ChangeTime = time.Unix(p.parseNumeric(s.next(12)), 0) - } - if len(prefix) > 0 { - hdr.Name = prefix + "/" + hdr.Name - } - } - - if p.err != nil { - tr.err = p.err - return nil - } - - nb := hdr.Size - if isHeaderOnlyType(hdr.Typeflag) { - nb = 0 - } - if nb < 0 { - tr.err = ErrHeader - return nil - } - - // Set the current file reader. - tr.pad = -nb & (blockSize - 1) // blockSize is a power of two - tr.curr = ®FileReader{r: tr.r, nb: nb} - - // Check for old GNU sparse format entry. - if hdr.Typeflag == TypeGNUSparse { - // Get the real size of the file. - hdr.Size = p.parseNumeric(header[483:495]) - if p.err != nil { - tr.err = p.err - return nil - } - - // Read the sparse map. - sp := tr.readOldGNUSparseMap(header) - if tr.err != nil { - return nil - } - - // Current file is a GNU sparse file. Update the current file reader. - tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size) - if tr.err != nil { - return nil - } - } - - return hdr -} - -// readOldGNUSparseMap reads the sparse map as stored in the old GNU sparse format. -// The sparse map is stored in the tar header if it's small enough. If it's larger than four entries, -// then one or more extension headers are used to store the rest of the sparse map. -func (tr *Reader) readOldGNUSparseMap(header []byte) []sparseEntry { - var p parser - isExtended := header[oldGNUSparseMainHeaderIsExtendedOffset] != 0 - spCap := oldGNUSparseMainHeaderNumEntries - if isExtended { - spCap += oldGNUSparseExtendedHeaderNumEntries - } - sp := make([]sparseEntry, 0, spCap) - s := slicer(header[oldGNUSparseMainHeaderOffset:]) - - // Read the four entries from the main tar header - for i := 0; i < oldGNUSparseMainHeaderNumEntries; i++ { - offset := p.parseNumeric(s.next(oldGNUSparseOffsetSize)) - numBytes := p.parseNumeric(s.next(oldGNUSparseNumBytesSize)) - if p.err != nil { - tr.err = p.err - return nil - } - if offset == 0 && numBytes == 0 { - break - } - sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) - } - - for isExtended { - // There are more entries. Read an extension header and parse its entries. - sparseHeader := make([]byte, blockSize) - if _, tr.err = io.ReadFull(tr.r, sparseHeader); tr.err != nil { - return nil - } - isExtended = sparseHeader[oldGNUSparseExtendedHeaderIsExtendedOffset] != 0 - s = slicer(sparseHeader) - for i := 0; i < oldGNUSparseExtendedHeaderNumEntries; i++ { - offset := p.parseNumeric(s.next(oldGNUSparseOffsetSize)) - numBytes := p.parseNumeric(s.next(oldGNUSparseNumBytesSize)) - if p.err != nil { - tr.err = p.err - return nil - } - if offset == 0 && numBytes == 0 { - break - } - sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) - } - } - return sp -} - -// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format -// version 1.0. The format of the sparse map consists of a series of -// newline-terminated numeric fields. The first field is the number of entries -// and is always present. Following this are the entries, consisting of two -// fields (offset, numBytes). This function must stop reading at the end -// boundary of the block containing the last newline. -// -// Note that the GNU manual says that numeric values should be encoded in octal -// format. However, the GNU tar utility itself outputs these values in decimal. -// As such, this library treats values as being encoded in decimal. -func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) { - var cntNewline int64 - var buf bytes.Buffer - var blk = make([]byte, blockSize) - - // feedTokens copies data in numBlock chunks from r into buf until there are - // at least cnt newlines in buf. It will not read more blocks than needed. - var feedTokens = func(cnt int64) error { - for cntNewline < cnt { - if _, err := io.ReadFull(r, blk); err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return err - } - buf.Write(blk) - for _, c := range blk { - if c == '\n' { - cntNewline++ - } - } - } - return nil - } - - // nextToken gets the next token delimited by a newline. This assumes that - // at least one newline exists in the buffer. - var nextToken = func() string { - cntNewline-- - tok, _ := buf.ReadString('\n') - return tok[:len(tok)-1] // Cut off newline - } - - // Parse for the number of entries. - // Use integer overflow resistant math to check this. - if err := feedTokens(1); err != nil { - return nil, err - } - numEntries, err := strconv.ParseInt(nextToken(), 10, 0) // Intentionally parse as native int - if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) { - return nil, ErrHeader - } - - // Parse for all member entries. - // numEntries is trusted after this since a potential attacker must have - // committed resources proportional to what this library used. - if err := feedTokens(2 * numEntries); err != nil { - return nil, err - } - sp := make([]sparseEntry, 0, numEntries) - for i := int64(0); i < numEntries; i++ { - offset, err := strconv.ParseInt(nextToken(), 10, 64) - if err != nil { - return nil, ErrHeader - } - numBytes, err := strconv.ParseInt(nextToken(), 10, 64) - if err != nil { - return nil, ErrHeader - } - sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) - } - return sp, nil -} - -// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format -// version 0.1. The sparse map is stored in the PAX headers. -func readGNUSparseMap0x1(extHdrs map[string]string) ([]sparseEntry, error) { - // Get number of entries. - // Use integer overflow resistant math to check this. - numEntriesStr := extHdrs[paxGNUSparseNumBlocks] - numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int - if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) { - return nil, ErrHeader - } - - // There should be two numbers in sparseMap for each entry. - sparseMap := strings.Split(extHdrs[paxGNUSparseMap], ",") - if int64(len(sparseMap)) != 2*numEntries { - return nil, ErrHeader - } - - // Loop through the entries in the sparse map. - // numEntries is trusted now. - sp := make([]sparseEntry, 0, numEntries) - for i := int64(0); i < numEntries; i++ { - offset, err := strconv.ParseInt(sparseMap[2*i], 10, 64) - if err != nil { - return nil, ErrHeader - } - numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 64) - if err != nil { - return nil, ErrHeader - } - sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) - } - return sp, nil -} - -// numBytes returns the number of bytes left to read in the current file's entry -// in the tar archive, or 0 if there is no current file. -func (tr *Reader) numBytes() int64 { - if tr.curr == nil { - // No current file, so no bytes - return 0 - } - return tr.curr.numBytes() -} - -// Read reads from the current entry in the tar archive. -// It returns 0, io.EOF when it reaches the end of that entry, -// until Next is called to advance to the next entry. -// -// Calling Read on special types like TypeLink, TypeSymLink, TypeChar, -// TypeBlock, TypeDir, and TypeFifo returns 0, io.EOF regardless of what -// the Header.Size claims. -func (tr *Reader) Read(b []byte) (n int, err error) { - if tr.err != nil { - return 0, tr.err - } - if tr.curr == nil { - return 0, io.EOF - } - - n, err = tr.curr.Read(b) - if err != nil && err != io.EOF { - tr.err = err - } - return -} - -func (rfr *regFileReader) Read(b []byte) (n int, err error) { - if rfr.nb == 0 { - // file consumed - return 0, io.EOF - } - if int64(len(b)) > rfr.nb { - b = b[0:rfr.nb] - } - n, err = rfr.r.Read(b) - rfr.nb -= int64(n) - - if err == io.EOF && rfr.nb > 0 { - err = io.ErrUnexpectedEOF - } - return -} - -// numBytes returns the number of bytes left to read in the file's data in the tar archive. -func (rfr *regFileReader) numBytes() int64 { - return rfr.nb -} - -// newSparseFileReader creates a new sparseFileReader, but validates all of the -// sparse entries before doing so. -func newSparseFileReader(rfr numBytesReader, sp []sparseEntry, total int64) (*sparseFileReader, error) { - if total < 0 { - return nil, ErrHeader // Total size cannot be negative - } - - // Validate all sparse entries. These are the same checks as performed by - // the BSD tar utility. - for i, s := range sp { - switch { - case s.offset < 0 || s.numBytes < 0: - return nil, ErrHeader // Negative values are never okay - case s.offset > math.MaxInt64-s.numBytes: - return nil, ErrHeader // Integer overflow with large length - case s.offset+s.numBytes > total: - return nil, ErrHeader // Region extends beyond the "real" size - case i > 0 && sp[i-1].offset+sp[i-1].numBytes > s.offset: - return nil, ErrHeader // Regions can't overlap and must be in order - } - } - return &sparseFileReader{rfr: rfr, sp: sp, total: total}, nil -} - -// readHole reads a sparse hole ending at endOffset. -func (sfr *sparseFileReader) readHole(b []byte, endOffset int64) int { - n64 := endOffset - sfr.pos - if n64 > int64(len(b)) { - n64 = int64(len(b)) - } - n := int(n64) - for i := 0; i < n; i++ { - b[i] = 0 - } - sfr.pos += n64 - return n -} - -// Read reads the sparse file data in expanded form. -func (sfr *sparseFileReader) Read(b []byte) (n int, err error) { - // Skip past all empty fragments. - for len(sfr.sp) > 0 && sfr.sp[0].numBytes == 0 { - sfr.sp = sfr.sp[1:] - } - - // If there are no more fragments, then it is possible that there - // is one last sparse hole. - if len(sfr.sp) == 0 { - // This behavior matches the BSD tar utility. - // However, GNU tar stops returning data even if sfr.total is unmet. - if sfr.pos < sfr.total { - return sfr.readHole(b, sfr.total), nil - } - return 0, io.EOF - } - - // In front of a data fragment, so read a hole. - if sfr.pos < sfr.sp[0].offset { - return sfr.readHole(b, sfr.sp[0].offset), nil - } - - // In a data fragment, so read from it. - // This math is overflow free since we verify that offset and numBytes can - // be safely added when creating the sparseFileReader. - endPos := sfr.sp[0].offset + sfr.sp[0].numBytes // End offset of fragment - bytesLeft := endPos - sfr.pos // Bytes left in fragment - if int64(len(b)) > bytesLeft { - b = b[:bytesLeft] - } - - n, err = sfr.rfr.Read(b) - sfr.pos += int64(n) - if err == io.EOF { - if sfr.pos < endPos { - err = io.ErrUnexpectedEOF // There was supposed to be more data - } else if sfr.pos < sfr.total { - err = nil // There is still an implicit sparse hole at the end - } - } - - if sfr.pos == endPos { - sfr.sp = sfr.sp[1:] // We are done with this fragment, so pop it - } - return n, err -} - -// numBytes returns the number of bytes left to read in the sparse file's -// sparse-encoded data in the tar archive. -func (sfr *sparseFileReader) numBytes() int64 { - return sfr.rfr.numBytes() -} diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atim.go b/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atim.go deleted file mode 100644 index cf9cc79c59..0000000000 --- a/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atim.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux dragonfly openbsd solaris - -package tar - -import ( - "syscall" - "time" -) - -func statAtime(st *syscall.Stat_t) time.Time { - return time.Unix(st.Atim.Unix()) -} - -func statCtime(st *syscall.Stat_t) time.Time { - return time.Unix(st.Ctim.Unix()) -} diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go b/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go deleted file mode 100644 index 6f17dbe307..0000000000 --- a/vendor/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin freebsd netbsd - -package tar - -import ( - "syscall" - "time" -) - -func statAtime(st *syscall.Stat_t) time.Time { - return time.Unix(st.Atimespec.Unix()) -} - -func statCtime(st *syscall.Stat_t) time.Time { - return time.Unix(st.Ctimespec.Unix()) -} diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/stat_unix.go b/vendor/github.com/Microsoft/go-winio/archive/tar/stat_unix.go deleted file mode 100644 index cb843db4cf..0000000000 --- a/vendor/github.com/Microsoft/go-winio/archive/tar/stat_unix.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux darwin dragonfly freebsd openbsd netbsd solaris - -package tar - -import ( - "os" - "syscall" -) - -func init() { - sysStat = statUnix -} - -func statUnix(fi os.FileInfo, h *Header) error { - sys, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - return nil - } - h.Uid = int(sys.Uid) - h.Gid = int(sys.Gid) - // TODO(bradfitz): populate username & group. os/user - // doesn't cache LookupId lookups, and lacks group - // lookup functions. - h.AccessTime = statAtime(sys) - h.ChangeTime = statCtime(sys) - // TODO(bradfitz): major/minor device numbers? - return nil -} diff --git a/vendor/github.com/Microsoft/go-winio/archive/tar/writer.go b/vendor/github.com/Microsoft/go-winio/archive/tar/writer.go deleted file mode 100644 index 30d7e606d6..0000000000 --- a/vendor/github.com/Microsoft/go-winio/archive/tar/writer.go +++ /dev/null @@ -1,444 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tar - -// TODO(dsymonds): -// - catch more errors (no first header, etc.) - -import ( - "bytes" - "errors" - "fmt" - "io" - "path" - "sort" - "strconv" - "strings" - "time" -) - -var ( - ErrWriteTooLong = errors.New("archive/tar: write too long") - ErrFieldTooLong = errors.New("archive/tar: header field too long") - ErrWriteAfterClose = errors.New("archive/tar: write after close") - errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values") -) - -// A Writer provides sequential writing of a tar archive in POSIX.1 format. -// A tar archive consists of a sequence of files. -// Call WriteHeader to begin a new file, and then call Write to supply that file's data, -// writing at most hdr.Size bytes in total. -type Writer struct { - w io.Writer - err error - nb int64 // number of unwritten bytes for current file entry - pad int64 // amount of padding to write after current file entry - closed bool - usedBinary bool // whether the binary numeric field extension was used - preferPax bool // use pax header instead of binary numeric header - hdrBuff [blockSize]byte // buffer to use in writeHeader when writing a regular header - paxHdrBuff [blockSize]byte // buffer to use in writeHeader when writing a pax header -} - -type formatter struct { - err error // Last error seen -} - -// NewWriter creates a new Writer writing to w. -func NewWriter(w io.Writer) *Writer { return &Writer{w: w, preferPax: true} } - -// Flush finishes writing the current file (optional). -func (tw *Writer) Flush() error { - if tw.nb > 0 { - tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb) - return tw.err - } - - n := tw.nb + tw.pad - for n > 0 && tw.err == nil { - nr := n - if nr > blockSize { - nr = blockSize - } - var nw int - nw, tw.err = tw.w.Write(zeroBlock[0:nr]) - n -= int64(nw) - } - tw.nb = 0 - tw.pad = 0 - return tw.err -} - -// Write s into b, terminating it with a NUL if there is room. -func (f *formatter) formatString(b []byte, s string) { - if len(s) > len(b) { - f.err = ErrFieldTooLong - return - } - ascii := toASCII(s) - copy(b, ascii) - if len(ascii) < len(b) { - b[len(ascii)] = 0 - } -} - -// Encode x as an octal ASCII string and write it into b with leading zeros. -func (f *formatter) formatOctal(b []byte, x int64) { - s := strconv.FormatInt(x, 8) - // leading zeros, but leave room for a NUL. - for len(s)+1 < len(b) { - s = "0" + s - } - f.formatString(b, s) -} - -// fitsInBase256 reports whether x can be encoded into n bytes using base-256 -// encoding. Unlike octal encoding, base-256 encoding does not require that the -// string ends with a NUL character. Thus, all n bytes are available for output. -// -// If operating in binary mode, this assumes strict GNU binary mode; which means -// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is -// equivalent to the sign bit in two's complement form. -func fitsInBase256(n int, x int64) bool { - var binBits = uint(n-1) * 8 - return n >= 9 || (x >= -1<= 0; i-- { - b[i] = byte(x) - x >>= 8 - } - b[0] |= 0x80 // Highest bit indicates binary format - return - } - - f.formatOctal(b, 0) // Last resort, just write zero - f.err = ErrFieldTooLong -} - -var ( - minTime = time.Unix(0, 0) - // There is room for 11 octal digits (33 bits) of mtime. - maxTime = minTime.Add((1<<33 - 1) * time.Second) -) - -// WriteHeader writes hdr and prepares to accept the file's contents. -// WriteHeader calls Flush if it is not the first header. -// Calling after a Close will return ErrWriteAfterClose. -func (tw *Writer) WriteHeader(hdr *Header) error { - return tw.writeHeader(hdr, true) -} - -// WriteHeader writes hdr and prepares to accept the file's contents. -// WriteHeader calls Flush if it is not the first header. -// Calling after a Close will return ErrWriteAfterClose. -// As this method is called internally by writePax header to allow it to -// suppress writing the pax header. -func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error { - if tw.closed { - return ErrWriteAfterClose - } - if tw.err == nil { - tw.Flush() - } - if tw.err != nil { - return tw.err - } - - // a map to hold pax header records, if any are needed - paxHeaders := make(map[string]string) - - // TODO(shanemhansen): we might want to use PAX headers for - // subsecond time resolution, but for now let's just capture - // too long fields or non ascii characters - - var f formatter - var header []byte - - // We need to select which scratch buffer to use carefully, - // since this method is called recursively to write PAX headers. - // If allowPax is true, this is the non-recursive call, and we will use hdrBuff. - // If allowPax is false, we are being called by writePAXHeader, and hdrBuff is - // already being used by the non-recursive call, so we must use paxHdrBuff. - header = tw.hdrBuff[:] - if !allowPax { - header = tw.paxHdrBuff[:] - } - copy(header, zeroBlock) - s := slicer(header) - - // Wrappers around formatter that automatically sets paxHeaders if the - // argument extends beyond the capacity of the input byte slice. - var formatString = func(b []byte, s string, paxKeyword string) { - needsPaxHeader := paxKeyword != paxNone && len(s) > len(b) || !isASCII(s) - if needsPaxHeader { - paxHeaders[paxKeyword] = s - return - } - f.formatString(b, s) - } - var formatNumeric = func(b []byte, x int64, paxKeyword string) { - // Try octal first. - s := strconv.FormatInt(x, 8) - if len(s) < len(b) { - f.formatOctal(b, x) - return - } - - // If it is too long for octal, and PAX is preferred, use a PAX header. - if paxKeyword != paxNone && tw.preferPax { - f.formatOctal(b, 0) - s := strconv.FormatInt(x, 10) - paxHeaders[paxKeyword] = s - return - } - - tw.usedBinary = true - f.formatNumeric(b, x) - } - var formatTime = func(b []byte, t time.Time, paxKeyword string) { - var unixTime int64 - if !t.Before(minTime) && !t.After(maxTime) { - unixTime = t.Unix() - } - formatNumeric(b, unixTime, paxNone) - - // Write a PAX header if the time didn't fit precisely. - if paxKeyword != "" && tw.preferPax && allowPax && (t.Nanosecond() != 0 || !t.Before(minTime) || !t.After(maxTime)) { - paxHeaders[paxKeyword] = formatPAXTime(t) - } - } - - // keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax - pathHeaderBytes := s.next(fileNameSize) - - formatString(pathHeaderBytes, hdr.Name, paxPath) - - f.formatOctal(s.next(8), hdr.Mode) // 100:108 - formatNumeric(s.next(8), int64(hdr.Uid), paxUid) // 108:116 - formatNumeric(s.next(8), int64(hdr.Gid), paxGid) // 116:124 - formatNumeric(s.next(12), hdr.Size, paxSize) // 124:136 - formatTime(s.next(12), hdr.ModTime, paxMtime) // 136:148 - s.next(8) // chksum (148:156) - s.next(1)[0] = hdr.Typeflag // 156:157 - - formatString(s.next(100), hdr.Linkname, paxLinkpath) - - copy(s.next(8), []byte("ustar\x0000")) // 257:265 - formatString(s.next(32), hdr.Uname, paxUname) // 265:297 - formatString(s.next(32), hdr.Gname, paxGname) // 297:329 - formatNumeric(s.next(8), hdr.Devmajor, paxNone) // 329:337 - formatNumeric(s.next(8), hdr.Devminor, paxNone) // 337:345 - - // keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax - prefixHeaderBytes := s.next(155) - formatString(prefixHeaderBytes, "", paxNone) // 345:500 prefix - - // Use the GNU magic instead of POSIX magic if we used any GNU extensions. - if tw.usedBinary { - copy(header[257:265], []byte("ustar \x00")) - } - - _, paxPathUsed := paxHeaders[paxPath] - // try to use a ustar header when only the name is too long - if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed { - prefix, suffix, ok := splitUSTARPath(hdr.Name) - if ok { - // Since we can encode in USTAR format, disable PAX header. - delete(paxHeaders, paxPath) - - // Update the path fields - formatString(pathHeaderBytes, suffix, paxNone) - formatString(prefixHeaderBytes, prefix, paxNone) - } - } - - // The chksum field is terminated by a NUL and a space. - // This is different from the other octal fields. - chksum, _ := checksum(header) - f.formatOctal(header[148:155], chksum) // Never fails - header[155] = ' ' - - // Check if there were any formatting errors. - if f.err != nil { - tw.err = f.err - return tw.err - } - - if allowPax { - if !hdr.AccessTime.IsZero() { - paxHeaders[paxAtime] = formatPAXTime(hdr.AccessTime) - } - if !hdr.ChangeTime.IsZero() { - paxHeaders[paxCtime] = formatPAXTime(hdr.ChangeTime) - } - if !hdr.CreationTime.IsZero() { - paxHeaders[paxCreationTime] = formatPAXTime(hdr.CreationTime) - } - for k, v := range hdr.Xattrs { - paxHeaders[paxXattr+k] = v - } - for k, v := range hdr.Winheaders { - paxHeaders[paxWindows+k] = v - } - } - - if len(paxHeaders) > 0 { - if !allowPax { - return errInvalidHeader - } - if err := tw.writePAXHeader(hdr, paxHeaders); err != nil { - return err - } - } - tw.nb = int64(hdr.Size) - tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize - - _, tw.err = tw.w.Write(header) - return tw.err -} - -func formatPAXTime(t time.Time) string { - sec := t.Unix() - usec := t.Nanosecond() - s := strconv.FormatInt(sec, 10) - if usec != 0 { - s = fmt.Sprintf("%s.%09d", s, usec) - } - return s -} - -// splitUSTARPath splits a path according to USTAR prefix and suffix rules. -// If the path is not splittable, then it will return ("", "", false). -func splitUSTARPath(name string) (prefix, suffix string, ok bool) { - length := len(name) - if length <= fileNameSize || !isASCII(name) { - return "", "", false - } else if length > fileNamePrefixSize+1 { - length = fileNamePrefixSize + 1 - } else if name[length-1] == '/' { - length-- - } - - i := strings.LastIndex(name[:length], "/") - nlen := len(name) - i - 1 // nlen is length of suffix - plen := i // plen is length of prefix - if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize { - return "", "", false - } - return name[:i], name[i+1:], true -} - -// writePaxHeader writes an extended pax header to the -// archive. -func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error { - // Prepare extended header - ext := new(Header) - ext.Typeflag = TypeXHeader - // Setting ModTime is required for reader parsing to - // succeed, and seems harmless enough. - ext.ModTime = hdr.ModTime - // The spec asks that we namespace our pseudo files - // with the current pid. However, this results in differing outputs - // for identical inputs. As such, the constant 0 is now used instead. - // golang.org/issue/12358 - dir, file := path.Split(hdr.Name) - fullName := path.Join(dir, "PaxHeaders.0", file) - - ascii := toASCII(fullName) - if len(ascii) > 100 { - ascii = ascii[:100] - } - ext.Name = ascii - // Construct the body - var buf bytes.Buffer - - // Keys are sorted before writing to body to allow deterministic output. - var keys []string - for k := range paxHeaders { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - fmt.Fprint(&buf, formatPAXRecord(k, paxHeaders[k])) - } - - ext.Size = int64(len(buf.Bytes())) - if err := tw.writeHeader(ext, false); err != nil { - return err - } - if _, err := tw.Write(buf.Bytes()); err != nil { - return err - } - if err := tw.Flush(); err != nil { - return err - } - return nil -} - -// formatPAXRecord formats a single PAX record, prefixing it with the -// appropriate length. -func formatPAXRecord(k, v string) string { - const padding = 3 // Extra padding for ' ', '=', and '\n' - size := len(k) + len(v) + padding - size += len(strconv.Itoa(size)) - record := fmt.Sprintf("%d %s=%s\n", size, k, v) - - // Final adjustment if adding size field increased the record size. - if len(record) != size { - size = len(record) - record = fmt.Sprintf("%d %s=%s\n", size, k, v) - } - return record -} - -// Write writes to the current entry in the tar archive. -// Write returns the error ErrWriteTooLong if more than -// hdr.Size bytes are written after WriteHeader. -func (tw *Writer) Write(b []byte) (n int, err error) { - if tw.closed { - err = ErrWriteAfterClose - return - } - overwrite := false - if int64(len(b)) > tw.nb { - b = b[0:tw.nb] - overwrite = true - } - n, err = tw.w.Write(b) - tw.nb -= int64(n) - if err == nil && overwrite { - err = ErrWriteTooLong - return - } - tw.err = err - return -} - -// Close closes the tar archive, flushing any unwritten -// data to the underlying writer. -func (tw *Writer) Close() error { - if tw.err != nil || tw.closed { - return tw.err - } - tw.Flush() - tw.closed = true - if tw.err != nil { - return tw.err - } - - // trailer: two zero blocks - for i := 0; i < 2; i++ { - _, tw.err = tw.w.Write(zeroBlock) - if tw.err != nil { - break - } - } - return tw.err -} diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/noop.go b/vendor/github.com/Microsoft/go-winio/backuptar/noop.go deleted file mode 100644 index d39eccf023..0000000000 --- a/vendor/github.com/Microsoft/go-winio/backuptar/noop.go +++ /dev/null @@ -1,4 +0,0 @@ -// +build !windows -// This file only exists to allow go get on non-Windows platforms. - -package backuptar diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/tar.go b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go deleted file mode 100644 index 53da908f14..0000000000 --- a/vendor/github.com/Microsoft/go-winio/backuptar/tar.go +++ /dev/null @@ -1,439 +0,0 @@ -// +build windows - -package backuptar - -import ( - "encoding/base64" - "errors" - "fmt" - "io" - "io/ioutil" - "path/filepath" - "strconv" - "strings" - "syscall" - "time" - - "github.com/Microsoft/go-winio" - "github.com/Microsoft/go-winio/archive/tar" // until archive/tar supports pax extensions in its interface -) - -const ( - c_ISUID = 04000 // Set uid - c_ISGID = 02000 // Set gid - c_ISVTX = 01000 // Save text (sticky bit) - c_ISDIR = 040000 // Directory - c_ISFIFO = 010000 // FIFO - c_ISREG = 0100000 // Regular file - c_ISLNK = 0120000 // Symbolic link - c_ISBLK = 060000 // Block special file - c_ISCHR = 020000 // Character special file - c_ISSOCK = 0140000 // Socket -) - -const ( - hdrFileAttributes = "fileattr" - hdrSecurityDescriptor = "sd" - hdrRawSecurityDescriptor = "rawsd" - hdrMountPoint = "mountpoint" - hdrEaPrefix = "xattr." -) - -func writeZeroes(w io.Writer, count int64) error { - buf := make([]byte, 8192) - c := len(buf) - for i := int64(0); i < count; i += int64(c) { - if int64(c) > count-i { - c = int(count - i) - } - _, err := w.Write(buf[:c]) - if err != nil { - return err - } - } - return nil -} - -func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error { - curOffset := int64(0) - for { - bhdr, err := br.Next() - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - if err != nil { - return err - } - if bhdr.Id != winio.BackupSparseBlock { - return fmt.Errorf("unexpected stream %d", bhdr.Id) - } - - // archive/tar does not support writing sparse files - // so just write zeroes to catch up to the current offset. - err = writeZeroes(t, bhdr.Offset-curOffset) - if bhdr.Size == 0 { - break - } - n, err := io.Copy(t, br) - if err != nil { - return err - } - curOffset = bhdr.Offset + n - } - return nil -} - -// BasicInfoHeader creates a tar header from basic file information. -func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *tar.Header { - hdr := &tar.Header{ - Name: filepath.ToSlash(name), - Size: size, - Typeflag: tar.TypeReg, - ModTime: time.Unix(0, fileInfo.LastWriteTime.Nanoseconds()), - ChangeTime: time.Unix(0, fileInfo.ChangeTime.Nanoseconds()), - AccessTime: time.Unix(0, fileInfo.LastAccessTime.Nanoseconds()), - CreationTime: time.Unix(0, fileInfo.CreationTime.Nanoseconds()), - Winheaders: make(map[string]string), - } - hdr.Winheaders[hdrFileAttributes] = fmt.Sprintf("%d", fileInfo.FileAttributes) - - if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { - hdr.Mode |= c_ISDIR - hdr.Size = 0 - hdr.Typeflag = tar.TypeDir - } - return hdr -} - -// WriteTarFileFromBackupStream writes a file to a tar writer using data from a Win32 backup stream. -// -// This encodes Win32 metadata as tar pax vendor extensions starting with MSWINDOWS. -// -// The additional Win32 metadata is: -// -// MSWINDOWS.fileattr: The Win32 file attributes, as a decimal value -// -// MSWINDOWS.rawsd: The Win32 security descriptor, in raw binary format -// -// MSWINDOWS.mountpoint: If present, this is a mount point and not a symlink, even though the type is '2' (symlink) -func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size int64, fileInfo *winio.FileBasicInfo) error { - name = filepath.ToSlash(name) - hdr := BasicInfoHeader(name, size, fileInfo) - - // If r can be seeked, then this function is two-pass: pass 1 collects the - // tar header data, and pass 2 copies the data stream. If r cannot be - // seeked, then some header data (in particular EAs) will be silently lost. - var ( - restartPos int64 - err error - ) - sr, readTwice := r.(io.Seeker) - if readTwice { - if restartPos, err = sr.Seek(0, io.SeekCurrent); err != nil { - readTwice = false - } - } - - br := winio.NewBackupStreamReader(r) - var dataHdr *winio.BackupHeader - for dataHdr == nil { - bhdr, err := br.Next() - if err == io.EOF { - break - } - if err != nil { - return err - } - switch bhdr.Id { - case winio.BackupData: - hdr.Mode |= c_ISREG - if !readTwice { - dataHdr = bhdr - } - case winio.BackupSecurity: - sd, err := ioutil.ReadAll(br) - if err != nil { - return err - } - hdr.Winheaders[hdrRawSecurityDescriptor] = base64.StdEncoding.EncodeToString(sd) - - case winio.BackupReparseData: - hdr.Mode |= c_ISLNK - hdr.Typeflag = tar.TypeSymlink - reparseBuffer, err := ioutil.ReadAll(br) - rp, err := winio.DecodeReparsePoint(reparseBuffer) - if err != nil { - return err - } - if rp.IsMountPoint { - hdr.Winheaders[hdrMountPoint] = "1" - } - hdr.Linkname = rp.Target - - case winio.BackupEaData: - eab, err := ioutil.ReadAll(br) - if err != nil { - return err - } - eas, err := winio.DecodeExtendedAttributes(eab) - if err != nil { - return err - } - for _, ea := range eas { - // Use base64 encoding for the binary value. Note that there - // is no way to encode the EA's flags, since their use doesn't - // make any sense for persisted EAs. - hdr.Winheaders[hdrEaPrefix+ea.Name] = base64.StdEncoding.EncodeToString(ea.Value) - } - - case winio.BackupAlternateData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData: - // ignore these streams - default: - return fmt.Errorf("%s: unknown stream ID %d", name, bhdr.Id) - } - } - - err = t.WriteHeader(hdr) - if err != nil { - return err - } - - if readTwice { - // Get back to the data stream. - if _, err = sr.Seek(restartPos, io.SeekStart); err != nil { - return err - } - for dataHdr == nil { - bhdr, err := br.Next() - if err == io.EOF { - break - } - if err != nil { - return err - } - if bhdr.Id == winio.BackupData { - dataHdr = bhdr - } - } - } - - if dataHdr != nil { - // A data stream was found. Copy the data. - if (dataHdr.Attributes & winio.StreamSparseAttributes) == 0 { - if size != dataHdr.Size { - return fmt.Errorf("%s: mismatch between file size %d and header size %d", name, size, dataHdr.Size) - } - _, err = io.Copy(t, br) - if err != nil { - return err - } - } else { - err = copySparse(t, br) - if err != nil { - return err - } - } - } - - // Look for streams after the data stream. The only ones we handle are alternate data streams. - // Other streams may have metadata that could be serialized, but the tar header has already - // been written. In practice, this means that we don't get EA or TXF metadata. - for { - bhdr, err := br.Next() - if err == io.EOF { - break - } - if err != nil { - return err - } - switch bhdr.Id { - case winio.BackupAlternateData: - altName := bhdr.Name - if strings.HasSuffix(altName, ":$DATA") { - altName = altName[:len(altName)-len(":$DATA")] - } - if (bhdr.Attributes & winio.StreamSparseAttributes) == 0 { - hdr = &tar.Header{ - Name: name + altName, - Mode: hdr.Mode, - Typeflag: tar.TypeReg, - Size: bhdr.Size, - ModTime: hdr.ModTime, - AccessTime: hdr.AccessTime, - ChangeTime: hdr.ChangeTime, - } - err = t.WriteHeader(hdr) - if err != nil { - return err - } - _, err = io.Copy(t, br) - if err != nil { - return err - } - - } else { - // Unsupported for now, since the size of the alternate stream is not present - // in the backup stream until after the data has been read. - return errors.New("tar of sparse alternate data streams is unsupported") - } - case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData: - // ignore these streams - default: - return fmt.Errorf("%s: unknown stream ID %d after data", name, bhdr.Id) - } - } - return nil -} - -// FileInfoFromHeader retrieves basic Win32 file information from a tar header, using the additional metadata written by -// WriteTarFileFromBackupStream. -func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *winio.FileBasicInfo, err error) { - name = hdr.Name - if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA { - size = hdr.Size - } - fileInfo = &winio.FileBasicInfo{ - LastAccessTime: syscall.NsecToFiletime(hdr.AccessTime.UnixNano()), - LastWriteTime: syscall.NsecToFiletime(hdr.ModTime.UnixNano()), - ChangeTime: syscall.NsecToFiletime(hdr.ChangeTime.UnixNano()), - CreationTime: syscall.NsecToFiletime(hdr.CreationTime.UnixNano()), - } - if attrStr, ok := hdr.Winheaders[hdrFileAttributes]; ok { - attr, err := strconv.ParseUint(attrStr, 10, 32) - if err != nil { - return "", 0, nil, err - } - fileInfo.FileAttributes = uintptr(attr) - } else { - if hdr.Typeflag == tar.TypeDir { - fileInfo.FileAttributes |= syscall.FILE_ATTRIBUTE_DIRECTORY - } - } - return -} - -// WriteBackupStreamFromTarFile writes a Win32 backup stream from the current tar file. Since this function may process multiple -// tar file entries in order to collect all the alternate data streams for the file, it returns the next -// tar file that was not processed, or io.EOF is there are no more. -func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) { - bw := winio.NewBackupStreamWriter(w) - var sd []byte - var err error - // Maintaining old SDDL-based behavior for backward compatibility. All new tar headers written - // by this library will have raw binary for the security descriptor. - if sddl, ok := hdr.Winheaders[hdrSecurityDescriptor]; ok { - sd, err = winio.SddlToSecurityDescriptor(sddl) - if err != nil { - return nil, err - } - } - if sdraw, ok := hdr.Winheaders[hdrRawSecurityDescriptor]; ok { - sd, err = base64.StdEncoding.DecodeString(sdraw) - if err != nil { - return nil, err - } - } - if len(sd) != 0 { - bhdr := winio.BackupHeader{ - Id: winio.BackupSecurity, - Size: int64(len(sd)), - } - err := bw.WriteHeader(&bhdr) - if err != nil { - return nil, err - } - _, err = bw.Write(sd) - if err != nil { - return nil, err - } - } - var eas []winio.ExtendedAttribute - for k, v := range hdr.Winheaders { - if !strings.HasPrefix(k, hdrEaPrefix) { - continue - } - data, err := base64.StdEncoding.DecodeString(v) - if err != nil { - return nil, err - } - eas = append(eas, winio.ExtendedAttribute{ - Name: k[len(hdrEaPrefix):], - Value: data, - }) - } - if len(eas) != 0 { - eadata, err := winio.EncodeExtendedAttributes(eas) - if err != nil { - return nil, err - } - bhdr := winio.BackupHeader{ - Id: winio.BackupEaData, - Size: int64(len(eadata)), - } - err = bw.WriteHeader(&bhdr) - if err != nil { - return nil, err - } - _, err = bw.Write(eadata) - if err != nil { - return nil, err - } - } - if hdr.Typeflag == tar.TypeSymlink { - _, isMountPoint := hdr.Winheaders[hdrMountPoint] - rp := winio.ReparsePoint{ - Target: filepath.FromSlash(hdr.Linkname), - IsMountPoint: isMountPoint, - } - reparse := winio.EncodeReparsePoint(&rp) - bhdr := winio.BackupHeader{ - Id: winio.BackupReparseData, - Size: int64(len(reparse)), - } - err := bw.WriteHeader(&bhdr) - if err != nil { - return nil, err - } - _, err = bw.Write(reparse) - if err != nil { - return nil, err - } - } - if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA { - bhdr := winio.BackupHeader{ - Id: winio.BackupData, - Size: hdr.Size, - } - err := bw.WriteHeader(&bhdr) - if err != nil { - return nil, err - } - _, err = io.Copy(bw, t) - if err != nil { - return nil, err - } - } - // Copy all the alternate data streams and return the next non-ADS header. - for { - ahdr, err := t.Next() - if err != nil { - return nil, err - } - if ahdr.Typeflag != tar.TypeReg || !strings.HasPrefix(ahdr.Name, hdr.Name+":") { - return ahdr, nil - } - bhdr := winio.BackupHeader{ - Id: winio.BackupAlternateData, - Size: ahdr.Size, - Name: ahdr.Name[len(hdr.Name):] + ":$DATA", - } - err = bw.WriteHeader(&bhdr) - if err != nil { - return nil, err - } - _, err = io.Copy(bw, t) - if err != nil { - return nil, err - } - } -} diff --git a/vendor/github.com/Microsoft/hcsshim/baselayer.go b/vendor/github.com/Microsoft/hcsshim/baselayer.go index 9babd4e18a..860185c357 100644 --- a/vendor/github.com/Microsoft/hcsshim/baselayer.go +++ b/vendor/github.com/Microsoft/hcsshim/baselayer.go @@ -10,7 +10,7 @@ import ( ) type baseLayerWriter struct { - root string + root *os.File f *os.File bw *winio.BackupFileWriter err error @@ -26,10 +26,10 @@ type dirInfo struct { // reapplyDirectoryTimes reapplies directory modification, creation, etc. times // after processing of the directory tree has completed. The times are expected // to be ordered such that parent directories come before child directories. -func reapplyDirectoryTimes(dis []dirInfo) error { +func reapplyDirectoryTimes(root *os.File, dis []dirInfo) error { for i := range dis { di := &dis[len(dis)-i-1] // reverse order: process child directories first - f, err := winio.OpenForBackup(di.path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, syscall.OPEN_EXISTING) + f, err := openRelative(di.path, root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, _FILE_OPEN, _FILE_DIRECTORY_FILE) if err != nil { return err } @@ -75,12 +75,6 @@ func (w *baseLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) (err e w.hasUtilityVM = true } - path := filepath.Join(w.root, name) - path, err = makeLongAbsPath(path) - if err != nil { - return err - } - var f *os.File defer func() { if f != nil { @@ -88,27 +82,23 @@ func (w *baseLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) (err e } }() - createmode := uint32(syscall.CREATE_NEW) + extraFlags := uint32(0) if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { - err := os.Mkdir(path, 0) - if err != nil && !os.IsExist(err) { - return err - } - createmode = syscall.OPEN_EXISTING + extraFlags |= _FILE_DIRECTORY_FILE if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT == 0 { - w.dirInfo = append(w.dirInfo, dirInfo{path, *fileInfo}) + w.dirInfo = append(w.dirInfo, dirInfo{name, *fileInfo}) } } mode := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | winio.WRITE_DAC | winio.WRITE_OWNER | winio.ACCESS_SYSTEM_SECURITY) - f, err = winio.OpenForBackup(path, mode, syscall.FILE_SHARE_READ, createmode) + f, err = openRelative(name, w.root, mode, syscall.FILE_SHARE_READ, _FILE_CREATE, extraFlags) if err != nil { - return makeError(err, "Failed to OpenForBackup", path) + return makeError(err, "Failed to openRelative", name) } err = winio.SetFileBasicInfo(f, fileInfo) if err != nil { - return makeError(err, "Failed to SetFileBasicInfo", path) + return makeError(err, "Failed to SetFileBasicInfo", name) } w.f = f @@ -129,17 +119,7 @@ func (w *baseLayerWriter) AddLink(name string, target string) (err error) { return err } - linkpath, err := makeLongAbsPath(filepath.Join(w.root, name)) - if err != nil { - return err - } - - linktarget, err := makeLongAbsPath(filepath.Join(w.root, target)) - if err != nil { - return err - } - - return os.Link(linktarget, linkpath) + return linkRelative(target, w.root, name, w.root) } func (w *baseLayerWriter) Remove(name string) error { @@ -155,6 +135,10 @@ func (w *baseLayerWriter) Write(b []byte) (int, error) { } func (w *baseLayerWriter) Close() error { + defer func() { + w.root.Close() + w.root = nil + }() err := w.closeCurrentFile() if err != nil { return err @@ -162,18 +146,22 @@ func (w *baseLayerWriter) Close() error { if w.err == nil { // Restore the file times of all the directories, since they may have // been modified by creating child directories. - err = reapplyDirectoryTimes(w.dirInfo) + err = reapplyDirectoryTimes(w.root, w.dirInfo) if err != nil { return err } - err = ProcessBaseLayer(w.root) + err = ProcessBaseLayer(w.root.Name()) if err != nil { return err } if w.hasUtilityVM { - err = ProcessUtilityVMImage(filepath.Join(w.root, "UtilityVM")) + err := ensureNotReparsePointRelative("UtilityVM", w.root) + if err != nil { + return err + } + err = ProcessUtilityVMImage(filepath.Join(w.root.Name(), "UtilityVM")) if err != nil { return err } diff --git a/vendor/github.com/Microsoft/hcsshim/hcsshim.go b/vendor/github.com/Microsoft/hcsshim/hcsshim.go index 236ba1fa30..b65953191c 100644 --- a/vendor/github.com/Microsoft/hcsshim/hcsshim.go +++ b/vendor/github.com/Microsoft/hcsshim/hcsshim.go @@ -11,7 +11,7 @@ import ( "github.com/sirupsen/logrus" ) -//go:generate go run mksyscall_windows.go -output zhcsshim.go hcsshim.go +//go:generate go run mksyscall_windows.go -output zhcsshim.go hcsshim.go safeopen.go //sys coTaskMemFree(buffer unsafe.Pointer) = ole32.CoTaskMemFree //sys SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) = iphlpapi.SetCurrentThreadCompartmentId diff --git a/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go b/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go index 7e516f8a2e..90689cb1ee 100644 --- a/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go +++ b/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go @@ -1,323 +1,323 @@ -package hcsshim - -import ( - "encoding/json" - "net" - - "github.com/sirupsen/logrus" -) - -// HNSEndpoint represents a network endpoint in HNS -type HNSEndpoint struct { - Id string `json:"ID,omitempty"` - Name string `json:",omitempty"` - VirtualNetwork string `json:",omitempty"` - VirtualNetworkName string `json:",omitempty"` - Policies []json.RawMessage `json:",omitempty"` - MacAddress string `json:",omitempty"` - IPAddress net.IP `json:",omitempty"` - DNSSuffix string `json:",omitempty"` - DNSServerList string `json:",omitempty"` - GatewayAddress string `json:",omitempty"` - EnableInternalDNS bool `json:",omitempty"` - DisableICC bool `json:",omitempty"` - PrefixLength uint8 `json:",omitempty"` - IsRemoteEndpoint bool `json:",omitempty"` -} - -//SystemType represents the type of the system on which actions are done -type SystemType string - -// SystemType const -const ( - ContainerType SystemType = "Container" - VirtualMachineType SystemType = "VirtualMachine" - HostType SystemType = "Host" -) - -// EndpointAttachDetachRequest is the structure used to send request to the container to modify the system -// Supported resource types are Network and Request Types are Add/Remove -type EndpointAttachDetachRequest struct { - ContainerID string `json:"ContainerId,omitempty"` - SystemType SystemType `json:"SystemType"` - CompartmentID uint16 `json:"CompartmentId,omitempty"` - VirtualNICName string `json:"VirtualNicName,omitempty"` -} - -// EndpointResquestResponse is object to get the endpoint request response -type EndpointResquestResponse struct { - Success bool - Error string -} - -// HNSEndpointRequest makes a HNS call to modify/query a network endpoint -func HNSEndpointRequest(method, path, request string) (*HNSEndpoint, error) { - endpoint := &HNSEndpoint{} - err := hnsCall(method, "/endpoints/"+path, request, &endpoint) - if err != nil { - return nil, err - } - - return endpoint, nil -} - -// HNSListEndpointRequest makes a HNS call to query the list of available endpoints -func HNSListEndpointRequest() ([]HNSEndpoint, error) { - var endpoint []HNSEndpoint - err := hnsCall("GET", "/endpoints/", "", &endpoint) - if err != nil { - return nil, err - } - - return endpoint, nil -} - -// HotAttachEndpoint makes a HCS Call to attach the endpoint to the container -func HotAttachEndpoint(containerID string, endpointID string) error { - return modifyNetworkEndpoint(containerID, endpointID, Add) -} - -// HotDetachEndpoint makes a HCS Call to detach the endpoint from the container -func HotDetachEndpoint(containerID string, endpointID string) error { - return modifyNetworkEndpoint(containerID, endpointID, Remove) -} - -// ModifyContainer corresponding to the container id, by sending a request -func modifyContainer(id string, request *ResourceModificationRequestResponse) error { - container, err := OpenContainer(id) - if err != nil { - if IsNotExist(err) { - return ErrComputeSystemDoesNotExist - } - return getInnerError(err) - } - defer container.Close() - err = container.Modify(request) - if err != nil { - if IsNotSupported(err) { - return ErrPlatformNotSupported - } - return getInnerError(err) - } - - return nil -} - -func modifyNetworkEndpoint(containerID string, endpointID string, request RequestType) error { - requestMessage := &ResourceModificationRequestResponse{ - Resource: Network, - Request: request, - Data: endpointID, - } - err := modifyContainer(containerID, requestMessage) - - if err != nil { - return err - } - - return nil -} - -// GetHNSEndpointByID get the Endpoint by ID -func GetHNSEndpointByID(endpointID string) (*HNSEndpoint, error) { - return HNSEndpointRequest("GET", endpointID, "") -} - -// GetHNSEndpointByName gets the endpoint filtered by Name -func GetHNSEndpointByName(endpointName string) (*HNSEndpoint, error) { - hnsResponse, err := HNSListEndpointRequest() - if err != nil { - return nil, err - } - for _, hnsEndpoint := range hnsResponse { - if hnsEndpoint.Name == endpointName { - return &hnsEndpoint, nil - } - } - return nil, EndpointNotFoundError{EndpointName: endpointName} -} - -// Create Endpoint by sending EndpointRequest to HNS. TODO: Create a separate HNS interface to place all these methods -func (endpoint *HNSEndpoint) Create() (*HNSEndpoint, error) { - operation := "Create" - title := "HCSShim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - - jsonString, err := json.Marshal(endpoint) - if err != nil { - return nil, err - } - return HNSEndpointRequest("POST", "", string(jsonString)) -} - -// Delete Endpoint by sending EndpointRequest to HNS -func (endpoint *HNSEndpoint) Delete() (*HNSEndpoint, error) { - operation := "Delete" - title := "HCSShim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - - return HNSEndpointRequest("DELETE", endpoint.Id, "") -} - -// Update Endpoint -func (endpoint *HNSEndpoint) Update() (*HNSEndpoint, error) { - operation := "Update" - title := "HCSShim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - jsonString, err := json.Marshal(endpoint) - if err != nil { - return nil, err - } - err = hnsCall("POST", "/endpoints/"+endpoint.Id, string(jsonString), &endpoint) - - return endpoint, err -} - -// ContainerHotAttach attaches an endpoint to a running container -func (endpoint *HNSEndpoint) ContainerHotAttach(containerID string) error { - operation := "ContainerHotAttach" - title := "HCSShim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s, containerId=%s", endpoint.Id, containerID) - - return modifyNetworkEndpoint(containerID, endpoint.Id, Add) -} - -// ContainerHotDetach detaches an endpoint from a running container -func (endpoint *HNSEndpoint) ContainerHotDetach(containerID string) error { - operation := "ContainerHotDetach" - title := "HCSShim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s, containerId=%s", endpoint.Id, containerID) - - return modifyNetworkEndpoint(containerID, endpoint.Id, Remove) -} - -// ApplyACLPolicy applies a set of ACL Policies on the Endpoint -func (endpoint *HNSEndpoint) ApplyACLPolicy(policies ...*ACLPolicy) error { - operation := "ApplyACLPolicy" - title := "HCSShim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - - for _, policy := range policies { - if policy == nil { - continue - } - jsonString, err := json.Marshal(policy) - if err != nil { - return err - } - endpoint.Policies = append(endpoint.Policies, jsonString) - } - - _, err := endpoint.Update() - return err -} - -// ContainerAttach attaches an endpoint to container -func (endpoint *HNSEndpoint) ContainerAttach(containerID string, compartmentID uint16) error { - operation := "ContainerAttach" - title := "HCSShim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - - requestMessage := &EndpointAttachDetachRequest{ - ContainerID: containerID, - CompartmentID: compartmentID, - SystemType: ContainerType, - } - response := &EndpointResquestResponse{} - jsonString, err := json.Marshal(requestMessage) - if err != nil { - return err - } - return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response) -} - -// ContainerDetach detaches an endpoint from container -func (endpoint *HNSEndpoint) ContainerDetach(containerID string) error { - operation := "ContainerDetach" - title := "HCSShim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - - requestMessage := &EndpointAttachDetachRequest{ - ContainerID: containerID, - SystemType: ContainerType, - } - response := &EndpointResquestResponse{} - - jsonString, err := json.Marshal(requestMessage) - if err != nil { - return err - } - return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response) -} - -// HostAttach attaches a nic on the host -func (endpoint *HNSEndpoint) HostAttach(compartmentID uint16) error { - operation := "HostAttach" - title := "HCSShim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - requestMessage := &EndpointAttachDetachRequest{ - CompartmentID: compartmentID, - SystemType: HostType, - } - response := &EndpointResquestResponse{} - - jsonString, err := json.Marshal(requestMessage) - if err != nil { - return err - } - return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response) - -} - -// HostDetach detaches a nic on the host -func (endpoint *HNSEndpoint) HostDetach() error { - operation := "HostDetach" - title := "HCSShim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - requestMessage := &EndpointAttachDetachRequest{ - SystemType: HostType, - } - response := &EndpointResquestResponse{} - - jsonString, err := json.Marshal(requestMessage) - if err != nil { - return err - } - return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response) -} - -// VirtualMachineNICAttach attaches a endpoint to a virtual machine -func (endpoint *HNSEndpoint) VirtualMachineNICAttach(virtualMachineNICName string) error { - operation := "VirtualMachineNicAttach" - title := "HCSShim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - requestMessage := &EndpointAttachDetachRequest{ - VirtualNICName: virtualMachineNICName, - SystemType: VirtualMachineType, - } - response := &EndpointResquestResponse{} - - jsonString, err := json.Marshal(requestMessage) - if err != nil { - return err - } - return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response) -} - -// VirtualMachineNICDetach detaches a endpoint from a virtual machine -func (endpoint *HNSEndpoint) VirtualMachineNICDetach() error { - operation := "VirtualMachineNicDetach" - title := "HCSShim::HNSEndpoint::" + operation - logrus.Debugf(title+" id=%s", endpoint.Id) - - requestMessage := &EndpointAttachDetachRequest{ - SystemType: VirtualMachineType, - } - response := &EndpointResquestResponse{} - - jsonString, err := json.Marshal(requestMessage) - if err != nil { - return err - } - return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response) -} +package hcsshim + +import ( + "encoding/json" + "net" + + "github.com/sirupsen/logrus" +) + +// HNSEndpoint represents a network endpoint in HNS +type HNSEndpoint struct { + Id string `json:"ID,omitempty"` + Name string `json:",omitempty"` + VirtualNetwork string `json:",omitempty"` + VirtualNetworkName string `json:",omitempty"` + Policies []json.RawMessage `json:",omitempty"` + MacAddress string `json:",omitempty"` + IPAddress net.IP `json:",omitempty"` + DNSSuffix string `json:",omitempty"` + DNSServerList string `json:",omitempty"` + GatewayAddress string `json:",omitempty"` + EnableInternalDNS bool `json:",omitempty"` + DisableICC bool `json:",omitempty"` + PrefixLength uint8 `json:",omitempty"` + IsRemoteEndpoint bool `json:",omitempty"` +} + +//SystemType represents the type of the system on which actions are done +type SystemType string + +// SystemType const +const ( + ContainerType SystemType = "Container" + VirtualMachineType SystemType = "VirtualMachine" + HostType SystemType = "Host" +) + +// EndpointAttachDetachRequest is the structure used to send request to the container to modify the system +// Supported resource types are Network and Request Types are Add/Remove +type EndpointAttachDetachRequest struct { + ContainerID string `json:"ContainerId,omitempty"` + SystemType SystemType `json:"SystemType"` + CompartmentID uint16 `json:"CompartmentId,omitempty"` + VirtualNICName string `json:"VirtualNicName,omitempty"` +} + +// EndpointResquestResponse is object to get the endpoint request response +type EndpointResquestResponse struct { + Success bool + Error string +} + +// HNSEndpointRequest makes a HNS call to modify/query a network endpoint +func HNSEndpointRequest(method, path, request string) (*HNSEndpoint, error) { + endpoint := &HNSEndpoint{} + err := hnsCall(method, "/endpoints/"+path, request, &endpoint) + if err != nil { + return nil, err + } + + return endpoint, nil +} + +// HNSListEndpointRequest makes a HNS call to query the list of available endpoints +func HNSListEndpointRequest() ([]HNSEndpoint, error) { + var endpoint []HNSEndpoint + err := hnsCall("GET", "/endpoints/", "", &endpoint) + if err != nil { + return nil, err + } + + return endpoint, nil +} + +// HotAttachEndpoint makes a HCS Call to attach the endpoint to the container +func HotAttachEndpoint(containerID string, endpointID string) error { + return modifyNetworkEndpoint(containerID, endpointID, Add) +} + +// HotDetachEndpoint makes a HCS Call to detach the endpoint from the container +func HotDetachEndpoint(containerID string, endpointID string) error { + return modifyNetworkEndpoint(containerID, endpointID, Remove) +} + +// ModifyContainer corresponding to the container id, by sending a request +func modifyContainer(id string, request *ResourceModificationRequestResponse) error { + container, err := OpenContainer(id) + if err != nil { + if IsNotExist(err) { + return ErrComputeSystemDoesNotExist + } + return getInnerError(err) + } + defer container.Close() + err = container.Modify(request) + if err != nil { + if IsNotSupported(err) { + return ErrPlatformNotSupported + } + return getInnerError(err) + } + + return nil +} + +func modifyNetworkEndpoint(containerID string, endpointID string, request RequestType) error { + requestMessage := &ResourceModificationRequestResponse{ + Resource: Network, + Request: request, + Data: endpointID, + } + err := modifyContainer(containerID, requestMessage) + + if err != nil { + return err + } + + return nil +} + +// GetHNSEndpointByID get the Endpoint by ID +func GetHNSEndpointByID(endpointID string) (*HNSEndpoint, error) { + return HNSEndpointRequest("GET", endpointID, "") +} + +// GetHNSEndpointByName gets the endpoint filtered by Name +func GetHNSEndpointByName(endpointName string) (*HNSEndpoint, error) { + hnsResponse, err := HNSListEndpointRequest() + if err != nil { + return nil, err + } + for _, hnsEndpoint := range hnsResponse { + if hnsEndpoint.Name == endpointName { + return &hnsEndpoint, nil + } + } + return nil, EndpointNotFoundError{EndpointName: endpointName} +} + +// Create Endpoint by sending EndpointRequest to HNS. TODO: Create a separate HNS interface to place all these methods +func (endpoint *HNSEndpoint) Create() (*HNSEndpoint, error) { + operation := "Create" + title := "HCSShim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + + jsonString, err := json.Marshal(endpoint) + if err != nil { + return nil, err + } + return HNSEndpointRequest("POST", "", string(jsonString)) +} + +// Delete Endpoint by sending EndpointRequest to HNS +func (endpoint *HNSEndpoint) Delete() (*HNSEndpoint, error) { + operation := "Delete" + title := "HCSShim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + + return HNSEndpointRequest("DELETE", endpoint.Id, "") +} + +// Update Endpoint +func (endpoint *HNSEndpoint) Update() (*HNSEndpoint, error) { + operation := "Update" + title := "HCSShim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + jsonString, err := json.Marshal(endpoint) + if err != nil { + return nil, err + } + err = hnsCall("POST", "/endpoints/"+endpoint.Id, string(jsonString), &endpoint) + + return endpoint, err +} + +// ContainerHotAttach attaches an endpoint to a running container +func (endpoint *HNSEndpoint) ContainerHotAttach(containerID string) error { + operation := "ContainerHotAttach" + title := "HCSShim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s, containerId=%s", endpoint.Id, containerID) + + return modifyNetworkEndpoint(containerID, endpoint.Id, Add) +} + +// ContainerHotDetach detaches an endpoint from a running container +func (endpoint *HNSEndpoint) ContainerHotDetach(containerID string) error { + operation := "ContainerHotDetach" + title := "HCSShim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s, containerId=%s", endpoint.Id, containerID) + + return modifyNetworkEndpoint(containerID, endpoint.Id, Remove) +} + +// ApplyACLPolicy applies a set of ACL Policies on the Endpoint +func (endpoint *HNSEndpoint) ApplyACLPolicy(policies ...*ACLPolicy) error { + operation := "ApplyACLPolicy" + title := "HCSShim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + + for _, policy := range policies { + if policy == nil { + continue + } + jsonString, err := json.Marshal(policy) + if err != nil { + return err + } + endpoint.Policies = append(endpoint.Policies, jsonString) + } + + _, err := endpoint.Update() + return err +} + +// ContainerAttach attaches an endpoint to container +func (endpoint *HNSEndpoint) ContainerAttach(containerID string, compartmentID uint16) error { + operation := "ContainerAttach" + title := "HCSShim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + + requestMessage := &EndpointAttachDetachRequest{ + ContainerID: containerID, + CompartmentID: compartmentID, + SystemType: ContainerType, + } + response := &EndpointResquestResponse{} + jsonString, err := json.Marshal(requestMessage) + if err != nil { + return err + } + return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response) +} + +// ContainerDetach detaches an endpoint from container +func (endpoint *HNSEndpoint) ContainerDetach(containerID string) error { + operation := "ContainerDetach" + title := "HCSShim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + + requestMessage := &EndpointAttachDetachRequest{ + ContainerID: containerID, + SystemType: ContainerType, + } + response := &EndpointResquestResponse{} + + jsonString, err := json.Marshal(requestMessage) + if err != nil { + return err + } + return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response) +} + +// HostAttach attaches a nic on the host +func (endpoint *HNSEndpoint) HostAttach(compartmentID uint16) error { + operation := "HostAttach" + title := "HCSShim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + requestMessage := &EndpointAttachDetachRequest{ + CompartmentID: compartmentID, + SystemType: HostType, + } + response := &EndpointResquestResponse{} + + jsonString, err := json.Marshal(requestMessage) + if err != nil { + return err + } + return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response) + +} + +// HostDetach detaches a nic on the host +func (endpoint *HNSEndpoint) HostDetach() error { + operation := "HostDetach" + title := "HCSShim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + requestMessage := &EndpointAttachDetachRequest{ + SystemType: HostType, + } + response := &EndpointResquestResponse{} + + jsonString, err := json.Marshal(requestMessage) + if err != nil { + return err + } + return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response) +} + +// VirtualMachineNICAttach attaches a endpoint to a virtual machine +func (endpoint *HNSEndpoint) VirtualMachineNICAttach(virtualMachineNICName string) error { + operation := "VirtualMachineNicAttach" + title := "HCSShim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + requestMessage := &EndpointAttachDetachRequest{ + VirtualNICName: virtualMachineNICName, + SystemType: VirtualMachineType, + } + response := &EndpointResquestResponse{} + + jsonString, err := json.Marshal(requestMessage) + if err != nil { + return err + } + return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response) +} + +// VirtualMachineNICDetach detaches a endpoint from a virtual machine +func (endpoint *HNSEndpoint) VirtualMachineNICDetach() error { + operation := "VirtualMachineNicDetach" + title := "HCSShim::HNSEndpoint::" + operation + logrus.Debugf(title+" id=%s", endpoint.Id) + + requestMessage := &EndpointAttachDetachRequest{ + SystemType: VirtualMachineType, + } + response := &EndpointResquestResponse{} + + jsonString, err := json.Marshal(requestMessage) + if err != nil { + return err + } + return hnsCall("POST", "/endpoints/"+endpoint.Id+"/detach", string(jsonString), &response) +} diff --git a/vendor/github.com/Microsoft/hcsshim/hnsnetwork.go b/vendor/github.com/Microsoft/hcsshim/hnsnetwork.go index 04c1b59196..398583a4e4 100644 --- a/vendor/github.com/Microsoft/hcsshim/hnsnetwork.go +++ b/vendor/github.com/Microsoft/hcsshim/hnsnetwork.go @@ -1,141 +1,141 @@ -package hcsshim - -import ( - "encoding/json" - "net" - - "github.com/sirupsen/logrus" -) - -// Subnet is assoicated with a network and represents a list -// of subnets available to the network -type Subnet struct { - AddressPrefix string `json:",omitempty"` - GatewayAddress string `json:",omitempty"` - Policies []json.RawMessage `json:",omitempty"` -} - -// MacPool is assoicated with a network and represents a list -// of macaddresses available to the network -type MacPool struct { - StartMacAddress string `json:",omitempty"` - EndMacAddress string `json:",omitempty"` -} - -// HNSNetwork represents a network in HNS -type HNSNetwork struct { - Id string `json:"ID,omitempty"` - Name string `json:",omitempty"` - Type string `json:",omitempty"` - NetworkAdapterName string `json:",omitempty"` - SourceMac string `json:",omitempty"` - Policies []json.RawMessage `json:",omitempty"` - MacPools []MacPool `json:",omitempty"` - Subnets []Subnet `json:",omitempty"` - DNSSuffix string `json:",omitempty"` - DNSServerList string `json:",omitempty"` - DNSServerCompartment uint32 `json:",omitempty"` - ManagementIP string `json:",omitempty"` - AutomaticDNS bool `json:",omitempty"` -} - -type hnsNetworkResponse struct { - Success bool - Error string - Output HNSNetwork -} - -type hnsResponse struct { - Success bool - Error string - Output json.RawMessage -} - -// HNSNetworkRequest makes a call into HNS to update/query a single network -func HNSNetworkRequest(method, path, request string) (*HNSNetwork, error) { - var network HNSNetwork - err := hnsCall(method, "/networks/"+path, request, &network) - if err != nil { - return nil, err - } - - return &network, nil -} - -// HNSListNetworkRequest makes a HNS call to query the list of available networks -func HNSListNetworkRequest(method, path, request string) ([]HNSNetwork, error) { - var network []HNSNetwork - err := hnsCall(method, "/networks/"+path, request, &network) - if err != nil { - return nil, err - } - - return network, nil -} - -// GetHNSNetworkByID -func GetHNSNetworkByID(networkID string) (*HNSNetwork, error) { - return HNSNetworkRequest("GET", networkID, "") -} - -// GetHNSNetworkName filtered by Name -func GetHNSNetworkByName(networkName string) (*HNSNetwork, error) { - hsnnetworks, err := HNSListNetworkRequest("GET", "", "") - if err != nil { - return nil, err - } - for _, hnsnetwork := range hsnnetworks { - if hnsnetwork.Name == networkName { - return &hnsnetwork, nil - } - } - return nil, NetworkNotFoundError{NetworkName: networkName} -} - -// Create Network by sending NetworkRequest to HNS. -func (network *HNSNetwork) Create() (*HNSNetwork, error) { - operation := "Create" - title := "HCSShim::HNSNetwork::" + operation - logrus.Debugf(title+" id=%s", network.Id) - - jsonString, err := json.Marshal(network) - if err != nil { - return nil, err - } - return HNSNetworkRequest("POST", "", string(jsonString)) -} - -// Delete Network by sending NetworkRequest to HNS -func (network *HNSNetwork) Delete() (*HNSNetwork, error) { - operation := "Delete" - title := "HCSShim::HNSNetwork::" + operation - logrus.Debugf(title+" id=%s", network.Id) - - return HNSNetworkRequest("DELETE", network.Id, "") -} - -// Creates an endpoint on the Network. -func (network *HNSNetwork) NewEndpoint(ipAddress net.IP, macAddress net.HardwareAddr) *HNSEndpoint { - return &HNSEndpoint{ - VirtualNetwork: network.Id, - IPAddress: ipAddress, - MacAddress: string(macAddress), - } -} - -func (network *HNSNetwork) CreateEndpoint(endpoint *HNSEndpoint) (*HNSEndpoint, error) { - operation := "CreateEndpoint" - title := "HCSShim::HNSNetwork::" + operation - logrus.Debugf(title+" id=%s, endpointId=%s", network.Id, endpoint.Id) - - endpoint.VirtualNetwork = network.Id - return endpoint.Create() -} - -func (network *HNSNetwork) CreateRemoteEndpoint(endpoint *HNSEndpoint) (*HNSEndpoint, error) { - operation := "CreateRemoteEndpoint" - title := "HCSShim::HNSNetwork::" + operation - logrus.Debugf(title+" id=%s", network.Id) - endpoint.IsRemoteEndpoint = true - return network.CreateEndpoint(endpoint) -} +package hcsshim + +import ( + "encoding/json" + "net" + + "github.com/sirupsen/logrus" +) + +// Subnet is assoicated with a network and represents a list +// of subnets available to the network +type Subnet struct { + AddressPrefix string `json:",omitempty"` + GatewayAddress string `json:",omitempty"` + Policies []json.RawMessage `json:",omitempty"` +} + +// MacPool is assoicated with a network and represents a list +// of macaddresses available to the network +type MacPool struct { + StartMacAddress string `json:",omitempty"` + EndMacAddress string `json:",omitempty"` +} + +// HNSNetwork represents a network in HNS +type HNSNetwork struct { + Id string `json:"ID,omitempty"` + Name string `json:",omitempty"` + Type string `json:",omitempty"` + NetworkAdapterName string `json:",omitempty"` + SourceMac string `json:",omitempty"` + Policies []json.RawMessage `json:",omitempty"` + MacPools []MacPool `json:",omitempty"` + Subnets []Subnet `json:",omitempty"` + DNSSuffix string `json:",omitempty"` + DNSServerList string `json:",omitempty"` + DNSServerCompartment uint32 `json:",omitempty"` + ManagementIP string `json:",omitempty"` + AutomaticDNS bool `json:",omitempty"` +} + +type hnsNetworkResponse struct { + Success bool + Error string + Output HNSNetwork +} + +type hnsResponse struct { + Success bool + Error string + Output json.RawMessage +} + +// HNSNetworkRequest makes a call into HNS to update/query a single network +func HNSNetworkRequest(method, path, request string) (*HNSNetwork, error) { + var network HNSNetwork + err := hnsCall(method, "/networks/"+path, request, &network) + if err != nil { + return nil, err + } + + return &network, nil +} + +// HNSListNetworkRequest makes a HNS call to query the list of available networks +func HNSListNetworkRequest(method, path, request string) ([]HNSNetwork, error) { + var network []HNSNetwork + err := hnsCall(method, "/networks/"+path, request, &network) + if err != nil { + return nil, err + } + + return network, nil +} + +// GetHNSNetworkByID +func GetHNSNetworkByID(networkID string) (*HNSNetwork, error) { + return HNSNetworkRequest("GET", networkID, "") +} + +// GetHNSNetworkName filtered by Name +func GetHNSNetworkByName(networkName string) (*HNSNetwork, error) { + hsnnetworks, err := HNSListNetworkRequest("GET", "", "") + if err != nil { + return nil, err + } + for _, hnsnetwork := range hsnnetworks { + if hnsnetwork.Name == networkName { + return &hnsnetwork, nil + } + } + return nil, NetworkNotFoundError{NetworkName: networkName} +} + +// Create Network by sending NetworkRequest to HNS. +func (network *HNSNetwork) Create() (*HNSNetwork, error) { + operation := "Create" + title := "HCSShim::HNSNetwork::" + operation + logrus.Debugf(title+" id=%s", network.Id) + + jsonString, err := json.Marshal(network) + if err != nil { + return nil, err + } + return HNSNetworkRequest("POST", "", string(jsonString)) +} + +// Delete Network by sending NetworkRequest to HNS +func (network *HNSNetwork) Delete() (*HNSNetwork, error) { + operation := "Delete" + title := "HCSShim::HNSNetwork::" + operation + logrus.Debugf(title+" id=%s", network.Id) + + return HNSNetworkRequest("DELETE", network.Id, "") +} + +// Creates an endpoint on the Network. +func (network *HNSNetwork) NewEndpoint(ipAddress net.IP, macAddress net.HardwareAddr) *HNSEndpoint { + return &HNSEndpoint{ + VirtualNetwork: network.Id, + IPAddress: ipAddress, + MacAddress: string(macAddress), + } +} + +func (network *HNSNetwork) CreateEndpoint(endpoint *HNSEndpoint) (*HNSEndpoint, error) { + operation := "CreateEndpoint" + title := "HCSShim::HNSNetwork::" + operation + logrus.Debugf(title+" id=%s, endpointId=%s", network.Id, endpoint.Id) + + endpoint.VirtualNetwork = network.Id + return endpoint.Create() +} + +func (network *HNSNetwork) CreateRemoteEndpoint(endpoint *HNSEndpoint) (*HNSEndpoint, error) { + operation := "CreateRemoteEndpoint" + title := "HCSShim::HNSNetwork::" + operation + logrus.Debugf(title+" id=%s", network.Id) + endpoint.IsRemoteEndpoint = true + return network.CreateEndpoint(endpoint) +} diff --git a/vendor/github.com/Microsoft/hcsshim/hnspolicy.go b/vendor/github.com/Microsoft/hcsshim/hnspolicy.go index 65b8e93d9b..bf860e9387 100644 --- a/vendor/github.com/Microsoft/hcsshim/hnspolicy.go +++ b/vendor/github.com/Microsoft/hcsshim/hnspolicy.go @@ -1,94 +1,94 @@ -package hcsshim - -// Type of Request Support in ModifySystem -type PolicyType string - -// RequestType const -const ( - Nat PolicyType = "NAT" - ACL PolicyType = "ACL" - PA PolicyType = "PA" - VLAN PolicyType = "VLAN" - VSID PolicyType = "VSID" - VNet PolicyType = "VNET" - L2Driver PolicyType = "L2Driver" - Isolation PolicyType = "Isolation" - QOS PolicyType = "QOS" - OutboundNat PolicyType = "OutBoundNAT" - ExternalLoadBalancer PolicyType = "ELB" - Route PolicyType = "ROUTE" -) - -type NatPolicy struct { - Type PolicyType `json:"Type"` - Protocol string - InternalPort uint16 - ExternalPort uint16 -} - -type QosPolicy struct { - Type PolicyType `json:"Type"` - MaximumOutgoingBandwidthInBytes uint64 -} - -type IsolationPolicy struct { - Type PolicyType `json:"Type"` - VLAN uint - VSID uint - InDefaultIsolation bool -} - -type VlanPolicy struct { - Type PolicyType `json:"Type"` - VLAN uint -} - -type VsidPolicy struct { - Type PolicyType `json:"Type"` - VSID uint -} - -type PaPolicy struct { - Type PolicyType `json:"Type"` - PA string `json:"PA"` -} - -type OutboundNatPolicy struct { - Policy - VIP string `json:"VIP,omitempty"` - Exceptions []string `json:"ExceptionList,omitempty"` -} - -type ActionType string -type DirectionType string -type RuleType string - -const ( - Allow ActionType = "Allow" - Block ActionType = "Block" - - In DirectionType = "In" - Out DirectionType = "Out" - - Host RuleType = "Host" - Switch RuleType = "Switch" -) - -type ACLPolicy struct { - Type PolicyType `json:"Type"` - Protocol uint16 - InternalPort uint16 - Action ActionType - Direction DirectionType - LocalAddresses string - RemoteAddresses string - LocalPort uint16 - RemotePort uint16 - RuleType RuleType `json:"RuleType,omitempty"` - Priority uint16 - ServiceName string -} - -type Policy struct { - Type PolicyType `json:"Type"` -} +package hcsshim + +// Type of Request Support in ModifySystem +type PolicyType string + +// RequestType const +const ( + Nat PolicyType = "NAT" + ACL PolicyType = "ACL" + PA PolicyType = "PA" + VLAN PolicyType = "VLAN" + VSID PolicyType = "VSID" + VNet PolicyType = "VNET" + L2Driver PolicyType = "L2Driver" + Isolation PolicyType = "Isolation" + QOS PolicyType = "QOS" + OutboundNat PolicyType = "OutBoundNAT" + ExternalLoadBalancer PolicyType = "ELB" + Route PolicyType = "ROUTE" +) + +type NatPolicy struct { + Type PolicyType `json:"Type"` + Protocol string + InternalPort uint16 + ExternalPort uint16 +} + +type QosPolicy struct { + Type PolicyType `json:"Type"` + MaximumOutgoingBandwidthInBytes uint64 +} + +type IsolationPolicy struct { + Type PolicyType `json:"Type"` + VLAN uint + VSID uint + InDefaultIsolation bool +} + +type VlanPolicy struct { + Type PolicyType `json:"Type"` + VLAN uint +} + +type VsidPolicy struct { + Type PolicyType `json:"Type"` + VSID uint +} + +type PaPolicy struct { + Type PolicyType `json:"Type"` + PA string `json:"PA"` +} + +type OutboundNatPolicy struct { + Policy + VIP string `json:"VIP,omitempty"` + Exceptions []string `json:"ExceptionList,omitempty"` +} + +type ActionType string +type DirectionType string +type RuleType string + +const ( + Allow ActionType = "Allow" + Block ActionType = "Block" + + In DirectionType = "In" + Out DirectionType = "Out" + + Host RuleType = "Host" + Switch RuleType = "Switch" +) + +type ACLPolicy struct { + Type PolicyType `json:"Type"` + Protocol uint16 + InternalPort uint16 + Action ActionType + Direction DirectionType + LocalAddresses string + RemoteAddresses string + LocalPort uint16 + RemotePort uint16 + RuleType RuleType `json:"RuleType,omitempty"` + Priority uint16 + ServiceName string +} + +type Policy struct { + Type PolicyType `json:"Type"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/hnspolicylist.go b/vendor/github.com/Microsoft/hcsshim/hnspolicylist.go index bbd7e1edb0..ef1ccab16e 100644 --- a/vendor/github.com/Microsoft/hcsshim/hnspolicylist.go +++ b/vendor/github.com/Microsoft/hcsshim/hnspolicylist.go @@ -1,200 +1,200 @@ -package hcsshim - -import ( - "encoding/json" - - "github.com/sirupsen/logrus" -) - -// RoutePolicy is a structure defining schema for Route based Policy -type RoutePolicy struct { - Policy - DestinationPrefix string `json:"DestinationPrefix,omitempty"` - NextHop string `json:"NextHop,omitempty"` - EncapEnabled bool `json:"NeedEncap,omitempty"` -} - -// ELBPolicy is a structure defining schema for ELB LoadBalancing based Policy -type ELBPolicy struct { - LBPolicy - SourceVIP string `json:"SourceVIP,omitempty"` - VIPs []string `json:"VIPs,omitempty"` - ILB bool `json:"ILB,omitempty"` -} - -// LBPolicy is a structure defining schema for LoadBalancing based Policy -type LBPolicy struct { - Policy - Protocol uint16 `json:"Protocol,omitempty"` - InternalPort uint16 - ExternalPort uint16 -} - -// PolicyList is a structure defining schema for Policy list request -type PolicyList struct { - ID string `json:"ID,omitempty"` - EndpointReferences []string `json:"References,omitempty"` - Policies []json.RawMessage `json:"Policies,omitempty"` -} - -// HNSPolicyListRequest makes a call into HNS to update/query a single network -func HNSPolicyListRequest(method, path, request string) (*PolicyList, error) { - var policy PolicyList - err := hnsCall(method, "/policylists/"+path, request, &policy) - if err != nil { - return nil, err - } - - return &policy, nil -} - -// HNSListPolicyListRequest gets all the policy list -func HNSListPolicyListRequest() ([]PolicyList, error) { - var plist []PolicyList - err := hnsCall("GET", "/policylists/", "", &plist) - if err != nil { - return nil, err - } - - return plist, nil -} - -// PolicyListRequest makes a HNS call to modify/query a network policy list -func PolicyListRequest(method, path, request string) (*PolicyList, error) { - policylist := &PolicyList{} - err := hnsCall(method, "/policylists/"+path, request, &policylist) - if err != nil { - return nil, err - } - - return policylist, nil -} - -// GetPolicyListByID get the policy list by ID -func GetPolicyListByID(policyListID string) (*PolicyList, error) { - return PolicyListRequest("GET", policyListID, "") -} - -// Create PolicyList by sending PolicyListRequest to HNS. -func (policylist *PolicyList) Create() (*PolicyList, error) { - operation := "Create" - title := "HCSShim::PolicyList::" + operation - logrus.Debugf(title+" id=%s", policylist.ID) - jsonString, err := json.Marshal(policylist) - if err != nil { - return nil, err - } - return PolicyListRequest("POST", "", string(jsonString)) -} - -// Delete deletes PolicyList -func (policylist *PolicyList) Delete() (*PolicyList, error) { - operation := "Delete" - title := "HCSShim::PolicyList::" + operation - logrus.Debugf(title+" id=%s", policylist.ID) - - return PolicyListRequest("DELETE", policylist.ID, "") -} - -// AddEndpoint add an endpoint to a Policy List -func (policylist *PolicyList) AddEndpoint(endpoint *HNSEndpoint) (*PolicyList, error) { - operation := "AddEndpoint" - title := "HCSShim::PolicyList::" + operation - logrus.Debugf(title+" id=%s, endpointId:%s", policylist.ID, endpoint.Id) - - _, err := policylist.Delete() - if err != nil { - return nil, err - } - - // Add Endpoint to the Existing List - policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id) - - return policylist.Create() -} - -// RemoveEndpoint removes an endpoint from the Policy List -func (policylist *PolicyList) RemoveEndpoint(endpoint *HNSEndpoint) (*PolicyList, error) { - operation := "RemoveEndpoint" - title := "HCSShim::PolicyList::" + operation - logrus.Debugf(title+" id=%s, endpointId:%s", policylist.ID, endpoint.Id) - - _, err := policylist.Delete() - if err != nil { - return nil, err - } - - elementToRemove := "/endpoints/" + endpoint.Id - - var references []string - - for _, endpointReference := range policylist.EndpointReferences { - if endpointReference == elementToRemove { - continue - } - references = append(references, endpointReference) - } - policylist.EndpointReferences = references - return policylist.Create() -} - -// AddLoadBalancer policy list for the specified endpoints -func AddLoadBalancer(endpoints []HNSEndpoint, isILB bool, sourceVIP, vip string, protocol uint16, internalPort uint16, externalPort uint16) (*PolicyList, error) { - operation := "AddLoadBalancer" - title := "HCSShim::PolicyList::" + operation - logrus.Debugf(title+" endpointId=%v, isILB=%v, sourceVIP=%s, vip=%s, protocol=%v, internalPort=%v, externalPort=%v", endpoints, isILB, sourceVIP, vip, protocol, internalPort, externalPort) - - policylist := &PolicyList{} - - elbPolicy := &ELBPolicy{ - SourceVIP: sourceVIP, - ILB: isILB, - } - - if len(vip) > 0 { - elbPolicy.VIPs = []string{vip} - } - elbPolicy.Type = ExternalLoadBalancer - elbPolicy.Protocol = protocol - elbPolicy.InternalPort = internalPort - elbPolicy.ExternalPort = externalPort - - for _, endpoint := range endpoints { - policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id) - } - - jsonString, err := json.Marshal(elbPolicy) - if err != nil { - return nil, err - } - policylist.Policies = append(policylist.Policies, jsonString) - return policylist.Create() -} - -// AddRoute adds route policy list for the specified endpoints -func AddRoute(endpoints []HNSEndpoint, destinationPrefix string, nextHop string, encapEnabled bool) (*PolicyList, error) { - operation := "AddRoute" - title := "HCSShim::PolicyList::" + operation - logrus.Debugf(title+" destinationPrefix:%s", destinationPrefix) - - policylist := &PolicyList{} - - rPolicy := &RoutePolicy{ - DestinationPrefix: destinationPrefix, - NextHop: nextHop, - EncapEnabled: encapEnabled, - } - rPolicy.Type = Route - - for _, endpoint := range endpoints { - policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id) - } - - jsonString, err := json.Marshal(rPolicy) - if err != nil { - return nil, err - } - - policylist.Policies = append(policylist.Policies, jsonString) - return policylist.Create() -} +package hcsshim + +import ( + "encoding/json" + + "github.com/sirupsen/logrus" +) + +// RoutePolicy is a structure defining schema for Route based Policy +type RoutePolicy struct { + Policy + DestinationPrefix string `json:"DestinationPrefix,omitempty"` + NextHop string `json:"NextHop,omitempty"` + EncapEnabled bool `json:"NeedEncap,omitempty"` +} + +// ELBPolicy is a structure defining schema for ELB LoadBalancing based Policy +type ELBPolicy struct { + LBPolicy + SourceVIP string `json:"SourceVIP,omitempty"` + VIPs []string `json:"VIPs,omitempty"` + ILB bool `json:"ILB,omitempty"` +} + +// LBPolicy is a structure defining schema for LoadBalancing based Policy +type LBPolicy struct { + Policy + Protocol uint16 `json:"Protocol,omitempty"` + InternalPort uint16 + ExternalPort uint16 +} + +// PolicyList is a structure defining schema for Policy list request +type PolicyList struct { + ID string `json:"ID,omitempty"` + EndpointReferences []string `json:"References,omitempty"` + Policies []json.RawMessage `json:"Policies,omitempty"` +} + +// HNSPolicyListRequest makes a call into HNS to update/query a single network +func HNSPolicyListRequest(method, path, request string) (*PolicyList, error) { + var policy PolicyList + err := hnsCall(method, "/policylists/"+path, request, &policy) + if err != nil { + return nil, err + } + + return &policy, nil +} + +// HNSListPolicyListRequest gets all the policy list +func HNSListPolicyListRequest() ([]PolicyList, error) { + var plist []PolicyList + err := hnsCall("GET", "/policylists/", "", &plist) + if err != nil { + return nil, err + } + + return plist, nil +} + +// PolicyListRequest makes a HNS call to modify/query a network policy list +func PolicyListRequest(method, path, request string) (*PolicyList, error) { + policylist := &PolicyList{} + err := hnsCall(method, "/policylists/"+path, request, &policylist) + if err != nil { + return nil, err + } + + return policylist, nil +} + +// GetPolicyListByID get the policy list by ID +func GetPolicyListByID(policyListID string) (*PolicyList, error) { + return PolicyListRequest("GET", policyListID, "") +} + +// Create PolicyList by sending PolicyListRequest to HNS. +func (policylist *PolicyList) Create() (*PolicyList, error) { + operation := "Create" + title := "HCSShim::PolicyList::" + operation + logrus.Debugf(title+" id=%s", policylist.ID) + jsonString, err := json.Marshal(policylist) + if err != nil { + return nil, err + } + return PolicyListRequest("POST", "", string(jsonString)) +} + +// Delete deletes PolicyList +func (policylist *PolicyList) Delete() (*PolicyList, error) { + operation := "Delete" + title := "HCSShim::PolicyList::" + operation + logrus.Debugf(title+" id=%s", policylist.ID) + + return PolicyListRequest("DELETE", policylist.ID, "") +} + +// AddEndpoint add an endpoint to a Policy List +func (policylist *PolicyList) AddEndpoint(endpoint *HNSEndpoint) (*PolicyList, error) { + operation := "AddEndpoint" + title := "HCSShim::PolicyList::" + operation + logrus.Debugf(title+" id=%s, endpointId:%s", policylist.ID, endpoint.Id) + + _, err := policylist.Delete() + if err != nil { + return nil, err + } + + // Add Endpoint to the Existing List + policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id) + + return policylist.Create() +} + +// RemoveEndpoint removes an endpoint from the Policy List +func (policylist *PolicyList) RemoveEndpoint(endpoint *HNSEndpoint) (*PolicyList, error) { + operation := "RemoveEndpoint" + title := "HCSShim::PolicyList::" + operation + logrus.Debugf(title+" id=%s, endpointId:%s", policylist.ID, endpoint.Id) + + _, err := policylist.Delete() + if err != nil { + return nil, err + } + + elementToRemove := "/endpoints/" + endpoint.Id + + var references []string + + for _, endpointReference := range policylist.EndpointReferences { + if endpointReference == elementToRemove { + continue + } + references = append(references, endpointReference) + } + policylist.EndpointReferences = references + return policylist.Create() +} + +// AddLoadBalancer policy list for the specified endpoints +func AddLoadBalancer(endpoints []HNSEndpoint, isILB bool, sourceVIP, vip string, protocol uint16, internalPort uint16, externalPort uint16) (*PolicyList, error) { + operation := "AddLoadBalancer" + title := "HCSShim::PolicyList::" + operation + logrus.Debugf(title+" endpointId=%v, isILB=%v, sourceVIP=%s, vip=%s, protocol=%v, internalPort=%v, externalPort=%v", endpoints, isILB, sourceVIP, vip, protocol, internalPort, externalPort) + + policylist := &PolicyList{} + + elbPolicy := &ELBPolicy{ + SourceVIP: sourceVIP, + ILB: isILB, + } + + if len(vip) > 0 { + elbPolicy.VIPs = []string{vip} + } + elbPolicy.Type = ExternalLoadBalancer + elbPolicy.Protocol = protocol + elbPolicy.InternalPort = internalPort + elbPolicy.ExternalPort = externalPort + + for _, endpoint := range endpoints { + policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id) + } + + jsonString, err := json.Marshal(elbPolicy) + if err != nil { + return nil, err + } + policylist.Policies = append(policylist.Policies, jsonString) + return policylist.Create() +} + +// AddRoute adds route policy list for the specified endpoints +func AddRoute(endpoints []HNSEndpoint, destinationPrefix string, nextHop string, encapEnabled bool) (*PolicyList, error) { + operation := "AddRoute" + title := "HCSShim::PolicyList::" + operation + logrus.Debugf(title+" destinationPrefix:%s", destinationPrefix) + + policylist := &PolicyList{} + + rPolicy := &RoutePolicy{ + DestinationPrefix: destinationPrefix, + NextHop: nextHop, + EncapEnabled: encapEnabled, + } + rPolicy.Type = Route + + for _, endpoint := range endpoints { + policylist.EndpointReferences = append(policylist.EndpointReferences, "/endpoints/"+endpoint.Id) + } + + jsonString, err := json.Marshal(rPolicy) + if err != nil { + return nil, err + } + + policylist.Policies = append(policylist.Policies, jsonString) + return policylist.Create() +} diff --git a/vendor/github.com/Microsoft/hcsshim/importlayer.go b/vendor/github.com/Microsoft/hcsshim/importlayer.go index 3aed14376a..2742b9f750 100644 --- a/vendor/github.com/Microsoft/hcsshim/importlayer.go +++ b/vendor/github.com/Microsoft/hcsshim/importlayer.go @@ -129,37 +129,39 @@ type legacyLayerWriterWrapper struct { } func (r *legacyLayerWriterWrapper) Close() error { - defer os.RemoveAll(r.root) + defer os.RemoveAll(r.root.Name()) + defer r.legacyLayerWriter.CloseRoots() err := r.legacyLayerWriter.Close() if err != nil { return err } - // Use the original path here because ImportLayer does not support long paths for the source in TP5. - // But do use a long path for the destination to work around another bug with directories - // with MAX_PATH - 12 < length < MAX_PATH. info := r.info - fullPath, err := makeLongAbsPath(filepath.Join(info.HomeDir, r.layerID)) - if err != nil { - return err - } - info.HomeDir = "" - if err = ImportLayer(info, fullPath, r.path, r.parentLayerPaths); err != nil { + if err = ImportLayer(info, r.destRoot.Name(), r.path, r.parentLayerPaths); err != nil { return err } + for _, name := range r.Tombstones { + if err = removeRelative(name, r.destRoot); err != nil && !os.IsNotExist(err) { + return err + } + } // Add any hard links that were collected. for _, lnk := range r.PendingLinks { - if err = os.Remove(lnk.Path); err != nil && !os.IsNotExist(err) { + if err = removeRelative(lnk.Path, r.destRoot); err != nil && !os.IsNotExist(err) { return err } - if err = os.Link(lnk.Target, lnk.Path); err != nil { + if err = linkRelative(lnk.Target, lnk.TargetRoot, lnk.Path, r.destRoot); err != nil { return err } } // Prepare the utility VM for use if one is present in the layer. if r.HasUtilityVM { - err = ProcessUtilityVMImage(filepath.Join(fullPath, "UtilityVM")) + err := ensureNotReparsePointRelative("UtilityVM", r.destRoot) + if err != nil { + return err + } + err = ProcessUtilityVMImage(filepath.Join(r.destRoot.Name(), "UtilityVM")) if err != nil { return err } @@ -173,8 +175,12 @@ func (r *legacyLayerWriterWrapper) Close() error { func NewLayerWriter(info DriverInfo, layerID string, parentLayerPaths []string) (LayerWriter, error) { if len(parentLayerPaths) == 0 { // This is a base layer. It gets imported differently. + f, err := openRoot(filepath.Join(info.HomeDir, layerID)) + if err != nil { + return nil, err + } return &baseLayerWriter{ - root: filepath.Join(info.HomeDir, layerID), + root: f, }, nil } @@ -185,8 +191,12 @@ func NewLayerWriter(info DriverInfo, layerID string, parentLayerPaths []string) if err != nil { return nil, err } + w, err := newLegacyLayerWriter(path, parentLayerPaths, filepath.Join(info.HomeDir, layerID)) + if err != nil { + return nil, err + } return &legacyLayerWriterWrapper{ - legacyLayerWriter: newLegacyLayerWriter(path, parentLayerPaths, filepath.Join(info.HomeDir, layerID)), + legacyLayerWriter: w, info: info, layerID: layerID, path: path, diff --git a/vendor/github.com/Microsoft/hcsshim/legacy.go b/vendor/github.com/Microsoft/hcsshim/legacy.go index 673a4f8795..0b23b6c4d0 100644 --- a/vendor/github.com/Microsoft/hcsshim/legacy.go +++ b/vendor/github.com/Microsoft/hcsshim/legacy.go @@ -127,7 +127,7 @@ func (r *legacyLayerReader) walkUntilCancelled() error { // UTF16 to UTF8 in files which are left in the recycle bin. Os.Lstat // which is called by filepath.Walk will fail when a filename contains // unicode characters. Skip the recycle bin regardless which is goodness. - if strings.HasPrefix(path, filepath.Join(r.root, `Files\$Recycle.Bin`)) { + if strings.EqualFold(path, filepath.Join(r.root, `Files\$Recycle.Bin`)) && info.IsDir() { return filepath.SkipDir } @@ -336,59 +336,79 @@ func (r *legacyLayerReader) Close() error { type pendingLink struct { Path, Target string + TargetRoot *os.File +} + +type pendingDir struct { + Path string + Root *os.File } type legacyLayerWriter struct { - root string - parentRoots []string - destRoot string - currentFile *os.File - backupWriter *winio.BackupFileWriter - tombstones []string - pathFixed bool - HasUtilityVM bool - uvmDi []dirInfo - addedFiles map[string]bool - PendingLinks []pendingLink + root *os.File + destRoot *os.File + parentRoots []*os.File + currentFile *os.File + currentFileName string + currentFileRoot *os.File + backupWriter *winio.BackupFileWriter + Tombstones []string + HasUtilityVM bool + uvmDi []dirInfo + addedFiles map[string]bool + PendingLinks []pendingLink + pendingDirs []pendingDir + currentIsDir bool } // newLegacyLayerWriter returns a LayerWriter that can write the contaler layer // transport format to disk. -func newLegacyLayerWriter(root string, parentRoots []string, destRoot string) *legacyLayerWriter { - return &legacyLayerWriter{ - root: root, - parentRoots: parentRoots, - destRoot: destRoot, - addedFiles: make(map[string]bool), +func newLegacyLayerWriter(root string, parentRoots []string, destRoot string) (w *legacyLayerWriter, err error) { + w = &legacyLayerWriter{ + addedFiles: make(map[string]bool), } -} - -func (w *legacyLayerWriter) init() error { - if !w.pathFixed { - path, err := makeLongAbsPath(w.root) + defer func() { if err != nil { - return err + w.CloseRoots() + w = nil } - for i, p := range w.parentRoots { - w.parentRoots[i], err = makeLongAbsPath(p) - if err != nil { - return err - } - } - destPath, err := makeLongAbsPath(w.destRoot) + }() + w.root, err = openRoot(root) + if err != nil { + return + } + w.destRoot, err = openRoot(destRoot) + if err != nil { + return + } + for _, r := range parentRoots { + f, err := openRoot(r) if err != nil { - return err + return w, err } - w.root = path - w.destRoot = destPath - w.pathFixed = true + w.parentRoots = append(w.parentRoots, f) } - return nil + return +} + +func (w *legacyLayerWriter) CloseRoots() { + if w.root != nil { + w.root.Close() + w.root = nil + } + if w.destRoot != nil { + w.destRoot.Close() + w.destRoot = nil + } + for i := range w.parentRoots { + w.parentRoots[i].Close() + } + w.parentRoots = nil } func (w *legacyLayerWriter) initUtilityVM() error { if !w.HasUtilityVM { - err := os.Mkdir(filepath.Join(w.destRoot, utilityVMPath), 0) + err := mkdirRelative(utilityVMPath, w.destRoot) if err != nil { return err } @@ -396,7 +416,7 @@ func (w *legacyLayerWriter) initUtilityVM() error { // clone the utility VM from the parent layer into this layer. Use hard // links to avoid unnecessary copying, since most of the files are // immutable. - err = cloneTree(filepath.Join(w.parentRoots[0], utilityVMFilesPath), filepath.Join(w.destRoot, utilityVMFilesPath), mutatedUtilityVMFiles) + err = cloneTree(w.parentRoots[0], w.destRoot, utilityVMFilesPath, mutatedUtilityVMFiles) if err != nil { return fmt.Errorf("cloning the parent utility VM image failed: %s", err) } @@ -405,7 +425,40 @@ func (w *legacyLayerWriter) initUtilityVM() error { return nil } -func (w *legacyLayerWriter) reset() { +func (w *legacyLayerWriter) reset() error { + if w.currentIsDir { + r := w.currentFile + br := winio.NewBackupStreamReader(r) + // Seek to the beginning of the backup stream, skipping the fileattrs + if _, err := r.Seek(4, io.SeekStart); err != nil { + return err + } + + for { + bhdr, err := br.Next() + if err == io.EOF { + // end of backupstream data + break + } + if err != nil { + return err + } + switch bhdr.Id { + case winio.BackupReparseData: + // The current file is a `.$wcidirs$` metadata file that + // describes a directory reparse point. Delete the placeholder + // directory to prevent future files being added into the + // destination of the reparse point during the ImportLayer call + if err := removeRelative(w.currentFileName, w.currentFileRoot); err != nil { + return err + } + w.pendingDirs = append(w.pendingDirs, pendingDir{Path: w.currentFileName, Root: w.currentFileRoot}) + default: + // ignore all other stream types, as we only care about directory reparse points + } + } + w.currentIsDir = false + } if w.backupWriter != nil { w.backupWriter.Close() w.backupWriter = nil @@ -413,21 +466,21 @@ func (w *legacyLayerWriter) reset() { if w.currentFile != nil { w.currentFile.Close() w.currentFile = nil + w.currentFileName = "" + w.currentFileRoot = nil } + return nil } // copyFileWithMetadata copies a file using the backup/restore APIs in order to preserve metadata -func copyFileWithMetadata(srcPath, destPath string, isDir bool) (fileInfo *winio.FileBasicInfo, err error) { - createDisposition := uint32(syscall.CREATE_NEW) - if isDir { - err = os.Mkdir(destPath, 0) - if err != nil { - return nil, err - } - createDisposition = syscall.OPEN_EXISTING - } - - src, err := openFileOrDir(srcPath, syscall.GENERIC_READ|winio.ACCESS_SYSTEM_SECURITY, syscall.OPEN_EXISTING) +func copyFileWithMetadata(srcRoot, destRoot *os.File, subPath string, isDir bool) (fileInfo *winio.FileBasicInfo, err error) { + src, err := openRelative( + subPath, + srcRoot, + syscall.GENERIC_READ|winio.ACCESS_SYSTEM_SECURITY, + syscall.FILE_SHARE_READ, + _FILE_OPEN, + _FILE_OPEN_REPARSE_POINT) if err != nil { return nil, err } @@ -440,7 +493,17 @@ func copyFileWithMetadata(srcPath, destPath string, isDir bool) (fileInfo *winio return nil, err } - dest, err := openFileOrDir(destPath, syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY, createDisposition) + extraFlags := uint32(0) + if isDir { + extraFlags |= _FILE_DIRECTORY_FILE + } + dest, err := openRelative( + subPath, + destRoot, + syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY, + syscall.FILE_SHARE_READ, + _FILE_CREATE, + extraFlags) if err != nil { return nil, err } @@ -469,18 +532,21 @@ func copyFileWithMetadata(srcPath, destPath string, isDir bool) (fileInfo *winio // cloneTree clones a directory tree using hard links. It skips hard links for // the file names in the provided map and just copies those files. -func cloneTree(srcPath, destPath string, mutatedFiles map[string]bool) error { +func cloneTree(srcRoot *os.File, destRoot *os.File, subPath string, mutatedFiles map[string]bool) error { var di []dirInfo - err := filepath.Walk(srcPath, func(srcFilePath string, info os.FileInfo, err error) error { + err := ensureNotReparsePointRelative(subPath, srcRoot) + if err != nil { + return err + } + err = filepath.Walk(filepath.Join(srcRoot.Name(), subPath), func(srcFilePath string, info os.FileInfo, err error) error { if err != nil { return err } - relPath, err := filepath.Rel(srcPath, srcFilePath) + relPath, err := filepath.Rel(srcRoot.Name(), srcFilePath) if err != nil { return err } - destFilePath := filepath.Join(destPath, relPath) fileAttributes := info.Sys().(*syscall.Win32FileAttributeData).FileAttributes // Directories, reparse points, and files that will be mutated during @@ -492,15 +558,15 @@ func cloneTree(srcPath, destPath string, mutatedFiles map[string]bool) error { isDir := fileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 if isDir || isReparsePoint || mutatedFiles[relPath] { - fi, err := copyFileWithMetadata(srcFilePath, destFilePath, isDir) + fi, err := copyFileWithMetadata(srcRoot, destRoot, relPath, isDir) if err != nil { return err } if isDir && !isReparsePoint { - di = append(di, dirInfo{path: destFilePath, fileInfo: *fi}) + di = append(di, dirInfo{path: relPath, fileInfo: *fi}) } } else { - err = os.Link(srcFilePath, destFilePath) + err = linkRelative(relPath, srcRoot, relPath, destRoot) if err != nil { return err } @@ -518,13 +584,11 @@ func cloneTree(srcPath, destPath string, mutatedFiles map[string]bool) error { return err } - return reapplyDirectoryTimes(di) + return reapplyDirectoryTimes(destRoot, di) } func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) error { - w.reset() - err := w.init() - if err != nil { + if err := w.reset(); err != nil { return err } @@ -532,6 +596,7 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro return w.initUtilityVM() } + name = filepath.Clean(name) if hasPathPrefix(name, utilityVMPath) { if !w.HasUtilityVM { return errors.New("missing UtilityVM directory") @@ -539,10 +604,9 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro if !hasPathPrefix(name, utilityVMFilesPath) && name != utilityVMFilesPath { return errors.New("invalid UtilityVM layer") } - path := filepath.Join(w.destRoot, name) - createDisposition := uint32(syscall.OPEN_EXISTING) + createDisposition := uint32(_FILE_OPEN) if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { - st, err := os.Lstat(path) + st, err := lstatRelative(name, w.destRoot) if err != nil && !os.IsNotExist(err) { return err } @@ -550,37 +614,44 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro // Delete the existing file/directory if it is not the same type as this directory. existingAttr := st.Sys().(*syscall.Win32FileAttributeData).FileAttributes if (uint32(fileInfo.FileAttributes)^existingAttr)&(syscall.FILE_ATTRIBUTE_DIRECTORY|syscall.FILE_ATTRIBUTE_REPARSE_POINT) != 0 { - if err = os.RemoveAll(path); err != nil { + if err = removeAllRelative(name, w.destRoot); err != nil { return err } st = nil } } if st == nil { - if err = os.Mkdir(path, 0); err != nil { + if err = mkdirRelative(name, w.destRoot); err != nil { return err } } if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT == 0 { - w.uvmDi = append(w.uvmDi, dirInfo{path: path, fileInfo: *fileInfo}) + w.uvmDi = append(w.uvmDi, dirInfo{path: name, fileInfo: *fileInfo}) } } else { // Overwrite any existing hard link. - err = os.Remove(path) + err := removeRelative(name, w.destRoot) if err != nil && !os.IsNotExist(err) { return err } - createDisposition = syscall.CREATE_NEW + createDisposition = _FILE_CREATE } - f, err := openFileOrDir(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY, createDisposition) + f, err := openRelative( + name, + w.destRoot, + syscall.GENERIC_READ|syscall.GENERIC_WRITE|winio.WRITE_DAC|winio.WRITE_OWNER|winio.ACCESS_SYSTEM_SECURITY, + syscall.FILE_SHARE_READ, + createDisposition, + _FILE_OPEN_REPARSE_POINT, + ) if err != nil { return err } defer func() { if f != nil { f.Close() - os.Remove(path) + removeRelative(name, w.destRoot) } }() @@ -591,28 +662,31 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro w.backupWriter = winio.NewBackupFileWriter(f, true) w.currentFile = f + w.currentFileName = name + w.currentFileRoot = w.destRoot w.addedFiles[name] = true f = nil return nil } - path := filepath.Join(w.root, name) + fname := name if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { - err := os.Mkdir(path, 0) + err := mkdirRelative(name, w.root) if err != nil { return err } - path += ".$wcidirs$" + fname += ".$wcidirs$" + w.currentIsDir = true } - f, err := openFileOrDir(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.CREATE_NEW) + f, err := openRelative(fname, w.root, syscall.GENERIC_READ|syscall.GENERIC_WRITE, syscall.FILE_SHARE_READ, _FILE_CREATE, 0) if err != nil { return err } defer func() { if f != nil { f.Close() - os.Remove(path) + removeRelative(fname, w.root) } }() @@ -634,19 +708,20 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro } w.currentFile = f + w.currentFileName = name + w.currentFileRoot = w.root w.addedFiles[name] = true f = nil return nil } func (w *legacyLayerWriter) AddLink(name string, target string) error { - w.reset() - err := w.init() - if err != nil { + if err := w.reset(); err != nil { return err } - var roots []string + target = filepath.Clean(target) + var roots []*os.File if hasPathPrefix(target, filesPath) { // Look for cross-layer hard link targets in the parent layers, since // nothing is in the destination path yet. @@ -655,7 +730,7 @@ func (w *legacyLayerWriter) AddLink(name string, target string) error { // Since the utility VM is fully cloned into the destination path // already, look for cross-layer hard link targets directly in the // destination path. - roots = []string{w.destRoot} + roots = []*os.File{w.destRoot} } if roots == nil || (!hasPathPrefix(name, filesPath) && !hasPathPrefix(name, utilityVMFilesPath)) { @@ -664,12 +739,12 @@ func (w *legacyLayerWriter) AddLink(name string, target string) error { // Find to try the target of the link in a previously added file. If that // fails, search in parent layers. - var selectedRoot string + var selectedRoot *os.File if _, ok := w.addedFiles[target]; ok { selectedRoot = w.destRoot } else { for _, r := range roots { - if _, err = os.Lstat(filepath.Join(r, target)); err != nil { + if _, err := lstatRelative(target, r); err != nil { if !os.IsNotExist(err) { return err } @@ -678,22 +753,25 @@ func (w *legacyLayerWriter) AddLink(name string, target string) error { break } } - if selectedRoot == "" { + if selectedRoot == nil { return fmt.Errorf("failed to find link target for '%s' -> '%s'", name, target) } } + // The link can't be written until after the ImportLayer call. w.PendingLinks = append(w.PendingLinks, pendingLink{ - Path: filepath.Join(w.destRoot, name), - Target: filepath.Join(selectedRoot, target), + Path: name, + Target: target, + TargetRoot: selectedRoot, }) w.addedFiles[name] = true return nil } func (w *legacyLayerWriter) Remove(name string) error { + name = filepath.Clean(name) if hasPathPrefix(name, filesPath) { - w.tombstones = append(w.tombstones, name[len(filesPath)+1:]) + w.Tombstones = append(w.Tombstones, name) } else if hasPathPrefix(name, utilityVMFilesPath) { err := w.initUtilityVM() if err != nil { @@ -702,11 +780,10 @@ func (w *legacyLayerWriter) Remove(name string) error { // Make sure the path exists; os.RemoveAll will not fail if the file is // already gone, and this needs to be a fatal error for diagnostics // purposes. - path := filepath.Join(w.destRoot, name) - if _, err := os.Lstat(path); err != nil { + if _, err := lstatRelative(name, w.destRoot); err != nil { return err } - err = os.RemoveAll(path) + err = removeAllRelative(name, w.destRoot) if err != nil { return err } @@ -728,28 +805,20 @@ func (w *legacyLayerWriter) Write(b []byte) (int, error) { } func (w *legacyLayerWriter) Close() error { - w.reset() - err := w.init() - if err != nil { + if err := w.reset(); err != nil { return err } - tf, err := os.Create(filepath.Join(w.root, "tombstones.txt")) - if err != nil { - return err - } - defer tf.Close() - _, err = tf.Write([]byte("\xef\xbb\xbfVersion 1.0\n")) - if err != nil { + if err := removeRelative("tombstones.txt", w.root); err != nil && !os.IsNotExist(err) { return err } - for _, t := range w.tombstones { - _, err = tf.Write([]byte(filepath.Join(`\`, t) + "\n")) + for _, pd := range w.pendingDirs { + err := mkdirRelative(pd.Path, pd.Root) if err != nil { return err } } if w.HasUtilityVM { - err = reapplyDirectoryTimes(w.uvmDi) + err := reapplyDirectoryTimes(w.destRoot, w.uvmDi) if err != nil { return err } diff --git a/vendor/github.com/Microsoft/hcsshim/legacy18.go b/vendor/github.com/Microsoft/hcsshim/legacy18.go index 578552f913..0f593e8aba 100644 --- a/vendor/github.com/Microsoft/hcsshim/legacy18.go +++ b/vendor/github.com/Microsoft/hcsshim/legacy18.go @@ -1,7 +1,7 @@ -// +build !go1.9 - -package hcsshim - -// Due to a bug in go1.8 and before, directory reparse points need to be skipped -// during filepath.Walk. This is fixed in go1.9 -var shouldSkipDirectoryReparse = true +// +build !go1.9 + +package hcsshim + +// Due to a bug in go1.8 and before, directory reparse points need to be skipped +// during filepath.Walk. This is fixed in go1.9 +var shouldSkipDirectoryReparse = true diff --git a/vendor/github.com/Microsoft/hcsshim/legacy19.go b/vendor/github.com/Microsoft/hcsshim/legacy19.go index 6aa1dc0584..fb0b7644fb 100644 --- a/vendor/github.com/Microsoft/hcsshim/legacy19.go +++ b/vendor/github.com/Microsoft/hcsshim/legacy19.go @@ -1,7 +1,7 @@ -// +build go1.9 - -package hcsshim - -// Due to a bug in go1.8 and before, directory reparse points need to be skipped -// during filepath.Walk. This is fixed in go1.9 -var shouldSkipDirectoryReparse = false +// +build go1.9 + +package hcsshim + +// Due to a bug in go1.8 and before, directory reparse points need to be skipped +// during filepath.Walk. This is fixed in go1.9 +var shouldSkipDirectoryReparse = false diff --git a/vendor/github.com/Microsoft/hcsshim/safeopen.go b/vendor/github.com/Microsoft/hcsshim/safeopen.go new file mode 100644 index 0000000000..5356456b90 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/safeopen.go @@ -0,0 +1,427 @@ +package hcsshim + +import ( + "errors" + "io" + "os" + "path/filepath" + "strings" + "syscall" + "unicode/utf16" + "unsafe" + + winio "github.com/Microsoft/go-winio" +) + +//sys ntCreateFile(handle *uintptr, accessMask uint32, oa *objectAttributes, iosb *ioStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) = ntdll.NtCreateFile +//sys ntSetInformationFile(handle uintptr, iosb *ioStatusBlock, information uintptr, length uint32, class uint32) (status uint32) = ntdll.NtSetInformationFile +//sys rtlNtStatusToDosError(status uint32) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb +//sys localAlloc(flags uint32, size int) (ptr uintptr) = kernel32.LocalAlloc +//sys localFree(ptr uintptr) = kernel32.LocalFree + +type ioStatusBlock struct { + Status, Information uintptr +} + +type objectAttributes struct { + Length uintptr + RootDirectory uintptr + ObjectName uintptr + Attributes uintptr + SecurityDescriptor uintptr + SecurityQoS uintptr +} + +type unicodeString struct { + Length uint16 + MaximumLength uint16 + Buffer uintptr +} + +type fileLinkInformation struct { + ReplaceIfExists bool + RootDirectory uintptr + FileNameLength uint32 + FileName [1]uint16 +} + +type fileDispositionInformationEx struct { + Flags uintptr +} + +const ( + _FileLinkInformation = 11 + _FileDispositionInformationEx = 64 + + _FILE_READ_ATTRIBUTES = 0x0080 + _FILE_WRITE_ATTRIBUTES = 0x0100 + _DELETE = 0x10000 + + _FILE_OPEN = 1 + _FILE_CREATE = 2 + + _FILE_DIRECTORY_FILE = 0x00000001 + _FILE_SYNCHRONOUS_IO_NONALERT = 0x00000020 + _FILE_DELETE_ON_CLOSE = 0x00001000 + _FILE_OPEN_FOR_BACKUP_INTENT = 0x00004000 + _FILE_OPEN_REPARSE_POINT = 0x00200000 + + _FILE_DISPOSITION_DELETE = 0x00000001 + + _OBJ_DONT_REPARSE = 0x1000 + + _STATUS_REPARSE_POINT_ENCOUNTERED = 0xC000050B +) + +func openRoot(path string) (*os.File, error) { + longpath, err := makeLongAbsPath(path) + if err != nil { + return nil, err + } + return winio.OpenForBackup(longpath, syscall.GENERIC_READ, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, syscall.OPEN_EXISTING) +} + +func ntRelativePath(path string) ([]uint16, error) { + path = filepath.Clean(path) + if strings.Contains(":", path) { + // Since alternate data streams must follow the file they + // are attached to, finding one here (out of order) is invalid. + return nil, errors.New("path contains invalid character `:`") + } + fspath := filepath.FromSlash(path) + if len(fspath) > 0 && fspath[0] == '\\' { + return nil, errors.New("expected relative path") + } + + path16 := utf16.Encode(([]rune)(fspath)) + if len(path16) > 32767 { + return nil, syscall.ENAMETOOLONG + } + + return path16, nil +} + +// openRelativeInternal opens a relative path from the given root, failing if +// any of the intermediate path components are reparse points. +func openRelativeInternal(path string, root *os.File, accessMask uint32, shareFlags uint32, createDisposition uint32, flags uint32) (*os.File, error) { + var ( + h uintptr + iosb ioStatusBlock + oa objectAttributes + ) + + path16, err := ntRelativePath(path) + if err != nil { + return nil, err + } + + if root == nil || root.Fd() == 0 { + return nil, errors.New("missing root directory") + } + + upathBuffer := localAlloc(0, int(unsafe.Sizeof(unicodeString{}))+len(path16)*2) + defer localFree(upathBuffer) + + upath := (*unicodeString)(unsafe.Pointer(upathBuffer)) + upath.Length = uint16(len(path16) * 2) + upath.MaximumLength = upath.Length + upath.Buffer = upathBuffer + unsafe.Sizeof(*upath) + copy((*[32768]uint16)(unsafe.Pointer(upath.Buffer))[:], path16) + + oa.Length = unsafe.Sizeof(oa) + oa.ObjectName = upathBuffer + oa.RootDirectory = uintptr(root.Fd()) + oa.Attributes = _OBJ_DONT_REPARSE + status := ntCreateFile( + &h, + accessMask|syscall.SYNCHRONIZE, + &oa, + &iosb, + nil, + 0, + shareFlags, + createDisposition, + _FILE_OPEN_FOR_BACKUP_INTENT|_FILE_SYNCHRONOUS_IO_NONALERT|flags, + nil, + 0, + ) + if status != 0 { + return nil, rtlNtStatusToDosError(status) + } + + fullPath, err := makeLongAbsPath(filepath.Join(root.Name(), path)) + if err != nil { + syscall.Close(syscall.Handle(h)) + return nil, err + } + + return os.NewFile(h, fullPath), nil +} + +// openRelative opens a relative path from the given root, failing if +// any of the intermediate path components are reparse points. +func openRelative(path string, root *os.File, accessMask uint32, shareFlags uint32, createDisposition uint32, flags uint32) (*os.File, error) { + f, err := openRelativeInternal(path, root, accessMask, shareFlags, createDisposition, flags) + if err != nil { + err = &os.PathError{Op: "open", Path: filepath.Join(root.Name(), path), Err: err} + } + return f, err +} + +// linkRelative creates a hard link from oldname to newname (relative to oldroot +// and newroot), failing if any of the intermediate path components are reparse +// points. +func linkRelative(oldname string, oldroot *os.File, newname string, newroot *os.File) error { + // Open the old file. + oldf, err := openRelativeInternal( + oldname, + oldroot, + syscall.FILE_WRITE_ATTRIBUTES, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + _FILE_OPEN, + 0, + ) + if err != nil { + return &os.LinkError{Op: "link", Old: filepath.Join(oldroot.Name(), oldname), New: filepath.Join(newroot.Name(), newname), Err: err} + } + defer oldf.Close() + + // Open the parent of the new file. + var parent *os.File + parentPath := filepath.Dir(newname) + if parentPath != "." { + parent, err = openRelativeInternal( + parentPath, + newroot, + syscall.GENERIC_READ, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + _FILE_OPEN, + _FILE_DIRECTORY_FILE) + if err != nil { + return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(newroot.Name(), newname), Err: err} + } + defer parent.Close() + + fi, err := winio.GetFileBasicInfo(parent) + if err != nil { + return err + } + if (fi.FileAttributes & syscall.FILE_ATTRIBUTE_REPARSE_POINT) != 0 { + return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(newroot.Name(), newname), Err: rtlNtStatusToDosError(_STATUS_REPARSE_POINT_ENCOUNTERED)} + } + + } else { + parent = newroot + } + + // Issue an NT call to create the link. This will be safe because NT will + // not open any more directories to create the link, so it cannot walk any + // more reparse points. + newbase := filepath.Base(newname) + newbase16, err := ntRelativePath(newbase) + if err != nil { + return err + } + + size := int(unsafe.Offsetof(fileLinkInformation{}.FileName)) + len(newbase16)*2 + linkinfoBuffer := localAlloc(0, size) + defer localFree(linkinfoBuffer) + linkinfo := (*fileLinkInformation)(unsafe.Pointer(linkinfoBuffer)) + linkinfo.RootDirectory = parent.Fd() + linkinfo.FileNameLength = uint32(len(newbase16) * 2) + copy((*[32768]uint16)(unsafe.Pointer(&linkinfo.FileName[0]))[:], newbase16) + + var iosb ioStatusBlock + status := ntSetInformationFile( + oldf.Fd(), + &iosb, + linkinfoBuffer, + uint32(size), + _FileLinkInformation, + ) + if status != 0 { + return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(parent.Name(), newbase), Err: rtlNtStatusToDosError(status)} + } + + return nil +} + +// deleteOnClose marks a file to be deleted when the handle is closed. +func deleteOnClose(f *os.File) error { + disposition := fileDispositionInformationEx{Flags: _FILE_DISPOSITION_DELETE} + var iosb ioStatusBlock + status := ntSetInformationFile( + f.Fd(), + &iosb, + uintptr(unsafe.Pointer(&disposition)), + uint32(unsafe.Sizeof(disposition)), + _FileDispositionInformationEx, + ) + if status != 0 { + return rtlNtStatusToDosError(status) + } + return nil +} + +// clearReadOnly clears the readonly attribute on a file. +func clearReadOnly(f *os.File) error { + bi, err := winio.GetFileBasicInfo(f) + if err != nil { + return err + } + if bi.FileAttributes&syscall.FILE_ATTRIBUTE_READONLY == 0 { + return nil + } + sbi := winio.FileBasicInfo{ + FileAttributes: bi.FileAttributes &^ syscall.FILE_ATTRIBUTE_READONLY, + } + if sbi.FileAttributes == 0 { + sbi.FileAttributes = syscall.FILE_ATTRIBUTE_NORMAL + } + return winio.SetFileBasicInfo(f, &sbi) +} + +// removeRelative removes a file or directory relative to a root, failing if any +// intermediate path components are reparse points. +func removeRelative(path string, root *os.File) error { + f, err := openRelativeInternal( + path, + root, + _FILE_READ_ATTRIBUTES|_FILE_WRITE_ATTRIBUTES|_DELETE, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + _FILE_OPEN, + _FILE_OPEN_REPARSE_POINT) + if err == nil { + defer f.Close() + err = deleteOnClose(f) + if err == syscall.ERROR_ACCESS_DENIED { + // Maybe the file is marked readonly. Clear the bit and retry. + clearReadOnly(f) + err = deleteOnClose(f) + } + } + if err != nil { + return &os.PathError{Op: "remove", Path: filepath.Join(root.Name(), path), Err: err} + } + return nil +} + +// removeAllRelative removes a directory tree relative to a root, failing if any +// intermediate path components are reparse points. +func removeAllRelative(path string, root *os.File) error { + fi, err := lstatRelative(path, root) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + fileAttributes := fi.Sys().(*syscall.Win32FileAttributeData).FileAttributes + if fileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY == 0 || fileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0 { + // If this is a reparse point, it can't have children. Simple remove will do. + err := removeRelative(path, root) + if err == nil || os.IsNotExist(err) { + return nil + } + return err + } + + // It is necessary to use os.Open as Readdirnames does not work with + // openRelative. This is safe because the above lstatrelative fails + // if the target is outside the root, and we know this is not a + // symlink from the above FILE_ATTRIBUTE_REPARSE_POINT check. + fd, err := os.Open(filepath.Join(root.Name(), path)) + if err != nil { + if os.IsNotExist(err) { + // Race. It was deleted between the Lstat and Open. + // Return nil per RemoveAll's docs. + return nil + } + return err + } + + // Remove contents & return first error. + for { + names, err1 := fd.Readdirnames(100) + for _, name := range names { + err1 := removeAllRelative(path+string(os.PathSeparator)+name, root) + if err == nil { + err = err1 + } + } + if err1 == io.EOF { + break + } + // If Readdirnames returned an error, use it. + if err == nil { + err = err1 + } + if len(names) == 0 { + break + } + } + fd.Close() + + // Remove directory. + err1 := removeRelative(path, root) + if err1 == nil || os.IsNotExist(err1) { + return nil + } + if err == nil { + err = err1 + } + return err +} + +// mkdirRelative creates a directory relative to a root, failing if any +// intermediate path components are reparse points. +func mkdirRelative(path string, root *os.File) error { + f, err := openRelativeInternal( + path, + root, + 0, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + _FILE_CREATE, + _FILE_DIRECTORY_FILE) + if err == nil { + f.Close() + } else { + err = &os.PathError{Op: "mkdir", Path: filepath.Join(root.Name(), path), Err: err} + } + return err +} + +// lstatRelative performs a stat operation on a file relative to a root, failing +// if any intermediate path components are reparse points. +func lstatRelative(path string, root *os.File) (os.FileInfo, error) { + f, err := openRelativeInternal( + path, + root, + _FILE_READ_ATTRIBUTES, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + _FILE_OPEN, + _FILE_OPEN_REPARSE_POINT) + if err != nil { + return nil, &os.PathError{Op: "stat", Path: filepath.Join(root.Name(), path), Err: err} + } + defer f.Close() + return f.Stat() +} + +// ensureNotReparsePointRelative validates that a given file (relative to a +// root) and all intermediate path components are not a reparse points. +func ensureNotReparsePointRelative(path string, root *os.File) error { + // Perform an open with OBJ_DONT_REPARSE but without specifying FILE_OPEN_REPARSE_POINT. + f, err := openRelative( + path, + root, + 0, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + _FILE_OPEN, + 0) + if err != nil { + return err + } + f.Close() + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/zhcsshim.go b/vendor/github.com/Microsoft/hcsshim/zhcsshim.go index 5d1a851ae8..5123e8d8e8 100644 --- a/vendor/github.com/Microsoft/hcsshim/zhcsshim.go +++ b/vendor/github.com/Microsoft/hcsshim/zhcsshim.go @@ -41,6 +41,8 @@ var ( modole32 = windows.NewLazySystemDLL("ole32.dll") modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll") modvmcompute = windows.NewLazySystemDLL("vmcompute.dll") + modntdll = windows.NewLazySystemDLL("ntdll.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") procCoTaskMemFree = modole32.NewProc("CoTaskMemFree") procSetCurrentThreadCompartmentId = modiphlpapi.NewProc("SetCurrentThreadCompartmentId") @@ -94,6 +96,11 @@ var ( procHcsUnregisterProcessCallback = modvmcompute.NewProc("HcsUnregisterProcessCallback") procHcsModifyServiceSettings = modvmcompute.NewProc("HcsModifyServiceSettings") procHNSCall = modvmcompute.NewProc("HNSCall") + procNtCreateFile = modntdll.NewProc("NtCreateFile") + procNtSetInformationFile = modntdll.NewProc("NtSetInformationFile") + procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") + procLocalAlloc = modkernel32.NewProc("LocalAlloc") + procLocalFree = modkernel32.NewProc("LocalFree") ) func coTaskMemFree(buffer unsafe.Pointer) { @@ -1040,3 +1047,34 @@ func __hnsCall(method *uint16, path *uint16, object *uint16, response **uint16) } return } + +func ntCreateFile(handle *uintptr, accessMask uint32, oa *objectAttributes, iosb *ioStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) { + r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(accessMask), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(fileAttributes), uintptr(shareAccess), uintptr(createDisposition), uintptr(createOptions), uintptr(unsafe.Pointer(eaBuffer)), uintptr(eaLength), 0) + status = uint32(r0) + return +} + +func ntSetInformationFile(handle uintptr, iosb *ioStatusBlock, information uintptr, length uint32, class uint32) (status uint32) { + r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(information), uintptr(length), uintptr(class), 0) + status = uint32(r0) + return +} + +func rtlNtStatusToDosError(status uint32) (winerr error) { + r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0) + if r0 != 0 { + winerr = syscall.Errno(r0) + } + return +} + +func localAlloc(flags uint32, size int) (ptr uintptr) { + r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(size), 0) + ptr = uintptr(r0) + return +} + +func localFree(ptr uintptr) { + syscall.Syscall(procLocalFree.Addr(), 1, uintptr(ptr), 0, 0) + return +} diff --git a/vendor/github.com/Nvveen/Gotty/LICENSE b/vendor/github.com/Nvveen/Gotty/LICENSE new file mode 100644 index 0000000000..0b71c97360 --- /dev/null +++ b/vendor/github.com/Nvveen/Gotty/LICENSE @@ -0,0 +1,26 @@ +Copyright (c) 2012, Neal van Veen (nealvanveen@gmail.com) +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +The views and conclusions contained in the software and documentation are those +of the authors and should not be interpreted as representing official policies, +either expressed or implied, of the FreeBSD Project. diff --git a/vendor/github.com/Nvveen/Gotty/attributes.go b/vendor/github.com/Nvveen/Gotty/attributes.go new file mode 100644 index 0000000000..a4c005fae5 --- /dev/null +++ b/vendor/github.com/Nvveen/Gotty/attributes.go @@ -0,0 +1,514 @@ +// Copyright 2012 Neal van Veen. All rights reserved. +// Usage of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package gotty + +// Boolean capabilities +var BoolAttr = [...]string{ + "auto_left_margin", "bw", + "auto_right_margin", "am", + "no_esc_ctlc", "xsb", + "ceol_standout_glitch", "xhp", + "eat_newline_glitch", "xenl", + "erase_overstrike", "eo", + "generic_type", "gn", + "hard_copy", "hc", + "has_meta_key", "km", + "has_status_line", "hs", + "insert_null_glitch", "in", + "memory_above", "da", + "memory_below", "db", + "move_insert_mode", "mir", + "move_standout_mode", "msgr", + "over_strike", "os", + "status_line_esc_ok", "eslok", + "dest_tabs_magic_smso", "xt", + "tilde_glitch", "hz", + "transparent_underline", "ul", + "xon_xoff", "nxon", + "needs_xon_xoff", "nxon", + "prtr_silent", "mc5i", + "hard_cursor", "chts", + "non_rev_rmcup", "nrrmc", + "no_pad_char", "npc", + "non_dest_scroll_region", "ndscr", + "can_change", "ccc", + "back_color_erase", "bce", + "hue_lightness_saturation", "hls", + "col_addr_glitch", "xhpa", + "cr_cancels_micro_mode", "crxm", + "has_print_wheel", "daisy", + "row_addr_glitch", "xvpa", + "semi_auto_right_margin", "sam", + "cpi_changes_res", "cpix", + "lpi_changes_res", "lpix", + "backspaces_with_bs", "", + "crt_no_scrolling", "", + "no_correctly_working_cr", "", + "gnu_has_meta_key", "", + "linefeed_is_newline", "", + "has_hardware_tabs", "", + "return_does_clr_eol", "", +} + +// Numerical capabilities +var NumAttr = [...]string{ + "columns", "cols", + "init_tabs", "it", + "lines", "lines", + "lines_of_memory", "lm", + "magic_cookie_glitch", "xmc", + "padding_baud_rate", "pb", + "virtual_terminal", "vt", + "width_status_line", "wsl", + "num_labels", "nlab", + "label_height", "lh", + "label_width", "lw", + "max_attributes", "ma", + "maximum_windows", "wnum", + "max_colors", "colors", + "max_pairs", "pairs", + "no_color_video", "ncv", + "buffer_capacity", "bufsz", + "dot_vert_spacing", "spinv", + "dot_horz_spacing", "spinh", + "max_micro_address", "maddr", + "max_micro_jump", "mjump", + "micro_col_size", "mcs", + "micro_line_size", "mls", + "number_of_pins", "npins", + "output_res_char", "orc", + "output_res_line", "orl", + "output_res_horz_inch", "orhi", + "output_res_vert_inch", "orvi", + "print_rate", "cps", + "wide_char_size", "widcs", + "buttons", "btns", + "bit_image_entwining", "bitwin", + "bit_image_type", "bitype", + "magic_cookie_glitch_ul", "", + "carriage_return_delay", "", + "new_line_delay", "", + "backspace_delay", "", + "horizontal_tab_delay", "", + "number_of_function_keys", "", +} + +// String capabilities +var StrAttr = [...]string{ + "back_tab", "cbt", + "bell", "bel", + "carriage_return", "cr", + "change_scroll_region", "csr", + "clear_all_tabs", "tbc", + "clear_screen", "clear", + "clr_eol", "el", + "clr_eos", "ed", + "column_address", "hpa", + "command_character", "cmdch", + "cursor_address", "cup", + "cursor_down", "cud1", + "cursor_home", "home", + "cursor_invisible", "civis", + "cursor_left", "cub1", + "cursor_mem_address", "mrcup", + "cursor_normal", "cnorm", + "cursor_right", "cuf1", + "cursor_to_ll", "ll", + "cursor_up", "cuu1", + "cursor_visible", "cvvis", + "delete_character", "dch1", + "delete_line", "dl1", + "dis_status_line", "dsl", + "down_half_line", "hd", + "enter_alt_charset_mode", "smacs", + "enter_blink_mode", "blink", + "enter_bold_mode", "bold", + "enter_ca_mode", "smcup", + "enter_delete_mode", "smdc", + "enter_dim_mode", "dim", + "enter_insert_mode", "smir", + "enter_secure_mode", "invis", + "enter_protected_mode", "prot", + "enter_reverse_mode", "rev", + "enter_standout_mode", "smso", + "enter_underline_mode", "smul", + "erase_chars", "ech", + "exit_alt_charset_mode", "rmacs", + "exit_attribute_mode", "sgr0", + "exit_ca_mode", "rmcup", + "exit_delete_mode", "rmdc", + "exit_insert_mode", "rmir", + "exit_standout_mode", "rmso", + "exit_underline_mode", "rmul", + "flash_screen", "flash", + "form_feed", "ff", + "from_status_line", "fsl", + "init_1string", "is1", + "init_2string", "is2", + "init_3string", "is3", + "init_file", "if", + "insert_character", "ich1", + "insert_line", "il1", + "insert_padding", "ip", + "key_backspace", "kbs", + "key_catab", "ktbc", + "key_clear", "kclr", + "key_ctab", "kctab", + "key_dc", "kdch1", + "key_dl", "kdl1", + "key_down", "kcud1", + "key_eic", "krmir", + "key_eol", "kel", + "key_eos", "ked", + "key_f0", "kf0", + "key_f1", "kf1", + "key_f10", "kf10", + "key_f2", "kf2", + "key_f3", "kf3", + "key_f4", "kf4", + "key_f5", "kf5", + "key_f6", "kf6", + "key_f7", "kf7", + "key_f8", "kf8", + "key_f9", "kf9", + "key_home", "khome", + "key_ic", "kich1", + "key_il", "kil1", + "key_left", "kcub1", + "key_ll", "kll", + "key_npage", "knp", + "key_ppage", "kpp", + "key_right", "kcuf1", + "key_sf", "kind", + "key_sr", "kri", + "key_stab", "khts", + "key_up", "kcuu1", + "keypad_local", "rmkx", + "keypad_xmit", "smkx", + "lab_f0", "lf0", + "lab_f1", "lf1", + "lab_f10", "lf10", + "lab_f2", "lf2", + "lab_f3", "lf3", + "lab_f4", "lf4", + "lab_f5", "lf5", + "lab_f6", "lf6", + "lab_f7", "lf7", + "lab_f8", "lf8", + "lab_f9", "lf9", + "meta_off", "rmm", + "meta_on", "smm", + "newline", "_glitch", + "pad_char", "npc", + "parm_dch", "dch", + "parm_delete_line", "dl", + "parm_down_cursor", "cud", + "parm_ich", "ich", + "parm_index", "indn", + "parm_insert_line", "il", + "parm_left_cursor", "cub", + "parm_right_cursor", "cuf", + "parm_rindex", "rin", + "parm_up_cursor", "cuu", + "pkey_key", "pfkey", + "pkey_local", "pfloc", + "pkey_xmit", "pfx", + "print_screen", "mc0", + "prtr_off", "mc4", + "prtr_on", "mc5", + "repeat_char", "rep", + "reset_1string", "rs1", + "reset_2string", "rs2", + "reset_3string", "rs3", + "reset_file", "rf", + "restore_cursor", "rc", + "row_address", "mvpa", + "save_cursor", "row_address", + "scroll_forward", "ind", + "scroll_reverse", "ri", + "set_attributes", "sgr", + "set_tab", "hts", + "set_window", "wind", + "tab", "s_magic_smso", + "to_status_line", "tsl", + "underline_char", "uc", + "up_half_line", "hu", + "init_prog", "iprog", + "key_a1", "ka1", + "key_a3", "ka3", + "key_b2", "kb2", + "key_c1", "kc1", + "key_c3", "kc3", + "prtr_non", "mc5p", + "char_padding", "rmp", + "acs_chars", "acsc", + "plab_norm", "pln", + "key_btab", "kcbt", + "enter_xon_mode", "smxon", + "exit_xon_mode", "rmxon", + "enter_am_mode", "smam", + "exit_am_mode", "rmam", + "xon_character", "xonc", + "xoff_character", "xoffc", + "ena_acs", "enacs", + "label_on", "smln", + "label_off", "rmln", + "key_beg", "kbeg", + "key_cancel", "kcan", + "key_close", "kclo", + "key_command", "kcmd", + "key_copy", "kcpy", + "key_create", "kcrt", + "key_end", "kend", + "key_enter", "kent", + "key_exit", "kext", + "key_find", "kfnd", + "key_help", "khlp", + "key_mark", "kmrk", + "key_message", "kmsg", + "key_move", "kmov", + "key_next", "knxt", + "key_open", "kopn", + "key_options", "kopt", + "key_previous", "kprv", + "key_print", "kprt", + "key_redo", "krdo", + "key_reference", "kref", + "key_refresh", "krfr", + "key_replace", "krpl", + "key_restart", "krst", + "key_resume", "kres", + "key_save", "ksav", + "key_suspend", "kspd", + "key_undo", "kund", + "key_sbeg", "kBEG", + "key_scancel", "kCAN", + "key_scommand", "kCMD", + "key_scopy", "kCPY", + "key_screate", "kCRT", + "key_sdc", "kDC", + "key_sdl", "kDL", + "key_select", "kslt", + "key_send", "kEND", + "key_seol", "kEOL", + "key_sexit", "kEXT", + "key_sfind", "kFND", + "key_shelp", "kHLP", + "key_shome", "kHOM", + "key_sic", "kIC", + "key_sleft", "kLFT", + "key_smessage", "kMSG", + "key_smove", "kMOV", + "key_snext", "kNXT", + "key_soptions", "kOPT", + "key_sprevious", "kPRV", + "key_sprint", "kPRT", + "key_sredo", "kRDO", + "key_sreplace", "kRPL", + "key_sright", "kRIT", + "key_srsume", "kRES", + "key_ssave", "kSAV", + "key_ssuspend", "kSPD", + "key_sundo", "kUND", + "req_for_input", "rfi", + "key_f11", "kf11", + "key_f12", "kf12", + "key_f13", "kf13", + "key_f14", "kf14", + "key_f15", "kf15", + "key_f16", "kf16", + "key_f17", "kf17", + "key_f18", "kf18", + "key_f19", "kf19", + "key_f20", "kf20", + "key_f21", "kf21", + "key_f22", "kf22", + "key_f23", "kf23", + "key_f24", "kf24", + "key_f25", "kf25", + "key_f26", "kf26", + "key_f27", "kf27", + "key_f28", "kf28", + "key_f29", "kf29", + "key_f30", "kf30", + "key_f31", "kf31", + "key_f32", "kf32", + "key_f33", "kf33", + "key_f34", "kf34", + "key_f35", "kf35", + "key_f36", "kf36", + "key_f37", "kf37", + "key_f38", "kf38", + "key_f39", "kf39", + "key_f40", "kf40", + "key_f41", "kf41", + "key_f42", "kf42", + "key_f43", "kf43", + "key_f44", "kf44", + "key_f45", "kf45", + "key_f46", "kf46", + "key_f47", "kf47", + "key_f48", "kf48", + "key_f49", "kf49", + "key_f50", "kf50", + "key_f51", "kf51", + "key_f52", "kf52", + "key_f53", "kf53", + "key_f54", "kf54", + "key_f55", "kf55", + "key_f56", "kf56", + "key_f57", "kf57", + "key_f58", "kf58", + "key_f59", "kf59", + "key_f60", "kf60", + "key_f61", "kf61", + "key_f62", "kf62", + "key_f63", "kf63", + "clr_bol", "el1", + "clear_margins", "mgc", + "set_left_margin", "smgl", + "set_right_margin", "smgr", + "label_format", "fln", + "set_clock", "sclk", + "display_clock", "dclk", + "remove_clock", "rmclk", + "create_window", "cwin", + "goto_window", "wingo", + "hangup", "hup", + "dial_phone", "dial", + "quick_dial", "qdial", + "tone", "tone", + "pulse", "pulse", + "flash_hook", "hook", + "fixed_pause", "pause", + "wait_tone", "wait", + "user0", "u0", + "user1", "u1", + "user2", "u2", + "user3", "u3", + "user4", "u4", + "user5", "u5", + "user6", "u6", + "user7", "u7", + "user8", "u8", + "user9", "u9", + "orig_pair", "op", + "orig_colors", "oc", + "initialize_color", "initc", + "initialize_pair", "initp", + "set_color_pair", "scp", + "set_foreground", "setf", + "set_background", "setb", + "change_char_pitch", "cpi", + "change_line_pitch", "lpi", + "change_res_horz", "chr", + "change_res_vert", "cvr", + "define_char", "defc", + "enter_doublewide_mode", "swidm", + "enter_draft_quality", "sdrfq", + "enter_italics_mode", "sitm", + "enter_leftward_mode", "slm", + "enter_micro_mode", "smicm", + "enter_near_letter_quality", "snlq", + "enter_normal_quality", "snrmq", + "enter_shadow_mode", "sshm", + "enter_subscript_mode", "ssubm", + "enter_superscript_mode", "ssupm", + "enter_upward_mode", "sum", + "exit_doublewide_mode", "rwidm", + "exit_italics_mode", "ritm", + "exit_leftward_mode", "rlm", + "exit_micro_mode", "rmicm", + "exit_shadow_mode", "rshm", + "exit_subscript_mode", "rsubm", + "exit_superscript_mode", "rsupm", + "exit_upward_mode", "rum", + "micro_column_address", "mhpa", + "micro_down", "mcud1", + "micro_left", "mcub1", + "micro_right", "mcuf1", + "micro_row_address", "mvpa", + "micro_up", "mcuu1", + "order_of_pins", "porder", + "parm_down_micro", "mcud", + "parm_left_micro", "mcub", + "parm_right_micro", "mcuf", + "parm_up_micro", "mcuu", + "select_char_set", "scs", + "set_bottom_margin", "smgb", + "set_bottom_margin_parm", "smgbp", + "set_left_margin_parm", "smglp", + "set_right_margin_parm", "smgrp", + "set_top_margin", "smgt", + "set_top_margin_parm", "smgtp", + "start_bit_image", "sbim", + "start_char_set_def", "scsd", + "stop_bit_image", "rbim", + "stop_char_set_def", "rcsd", + "subscript_characters", "subcs", + "superscript_characters", "supcs", + "these_cause_cr", "docr", + "zero_motion", "zerom", + "char_set_names", "csnm", + "key_mouse", "kmous", + "mouse_info", "minfo", + "req_mouse_pos", "reqmp", + "get_mouse", "getm", + "set_a_foreground", "setaf", + "set_a_background", "setab", + "pkey_plab", "pfxl", + "device_type", "devt", + "code_set_init", "csin", + "set0_des_seq", "s0ds", + "set1_des_seq", "s1ds", + "set2_des_seq", "s2ds", + "set3_des_seq", "s3ds", + "set_lr_margin", "smglr", + "set_tb_margin", "smgtb", + "bit_image_repeat", "birep", + "bit_image_newline", "binel", + "bit_image_carriage_return", "bicr", + "color_names", "colornm", + "define_bit_image_region", "defbi", + "end_bit_image_region", "endbi", + "set_color_band", "setcolor", + "set_page_length", "slines", + "display_pc_char", "dispc", + "enter_pc_charset_mode", "smpch", + "exit_pc_charset_mode", "rmpch", + "enter_scancode_mode", "smsc", + "exit_scancode_mode", "rmsc", + "pc_term_options", "pctrm", + "scancode_escape", "scesc", + "alt_scancode_esc", "scesa", + "enter_horizontal_hl_mode", "ehhlm", + "enter_left_hl_mode", "elhlm", + "enter_low_hl_mode", "elohlm", + "enter_right_hl_mode", "erhlm", + "enter_top_hl_mode", "ethlm", + "enter_vertical_hl_mode", "evhlm", + "set_a_attributes", "sgr1", + "set_pglen_inch", "slength", + "termcap_init2", "", + "termcap_reset", "", + "linefeed_if_not_lf", "", + "backspace_if_not_bs", "", + "other_non_function_keys", "", + "arrow_key_map", "", + "acs_ulcorner", "", + "acs_llcorner", "", + "acs_urcorner", "", + "acs_lrcorner", "", + "acs_ltee", "", + "acs_rtee", "", + "acs_btee", "", + "acs_ttee", "", + "acs_hline", "", + "acs_vline", "", + "acs_plus", "", + "memory_lock", "", + "memory_unlock", "", + "box_chars_1", "", +} diff --git a/vendor/github.com/Nvveen/Gotty/gotty.go b/vendor/github.com/Nvveen/Gotty/gotty.go new file mode 100644 index 0000000000..093cbf37e1 --- /dev/null +++ b/vendor/github.com/Nvveen/Gotty/gotty.go @@ -0,0 +1,238 @@ +// Copyright 2012 Neal van Veen. All rights reserved. +// Usage of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Gotty is a Go-package for reading and parsing the terminfo database +package gotty + +// TODO add more concurrency to name lookup, look for more opportunities. + +import ( + "encoding/binary" + "errors" + "fmt" + "os" + "reflect" + "strings" + "sync" +) + +// Open a terminfo file by the name given and construct a TermInfo object. +// If something went wrong reading the terminfo database file, an error is +// returned. +func OpenTermInfo(termName string) (*TermInfo, error) { + var term *TermInfo + var err error + // Find the environment variables + termloc := os.Getenv("TERMINFO") + if len(termloc) == 0 { + // Search like ncurses + locations := []string{os.Getenv("HOME") + "/.terminfo/", "/etc/terminfo/", + "/lib/terminfo/", "/usr/share/terminfo/"} + var path string + for _, str := range locations { + // Construct path + path = str + string(termName[0]) + "/" + termName + // Check if path can be opened + file, _ := os.Open(path) + if file != nil { + // Path can open, fall out and use current path + file.Close() + break + } + } + if len(path) > 0 { + term, err = readTermInfo(path) + } else { + err = errors.New(fmt.Sprintf("No terminfo file(-location) found")) + } + } + return term, err +} + +// Open a terminfo file from the environment variable containing the current +// terminal name and construct a TermInfo object. If something went wrong +// reading the terminfo database file, an error is returned. +func OpenTermInfoEnv() (*TermInfo, error) { + termenv := os.Getenv("TERM") + return OpenTermInfo(termenv) +} + +// Return an attribute by the name attr provided. If none can be found, +// an error is returned. +func (term *TermInfo) GetAttribute(attr string) (stacker, error) { + // Channel to store the main value in. + var value stacker + // Add a blocking WaitGroup + var block sync.WaitGroup + // Keep track of variable being written. + written := false + // Function to put into goroutine. + f := func(ats interface{}) { + var ok bool + var v stacker + // Switch on type of map to use and assign value to it. + switch reflect.TypeOf(ats).Elem().Kind() { + case reflect.Bool: + v, ok = ats.(map[string]bool)[attr] + case reflect.Int16: + v, ok = ats.(map[string]int16)[attr] + case reflect.String: + v, ok = ats.(map[string]string)[attr] + } + // If ok, a value is found, so we can write. + if ok { + value = v + written = true + } + // Goroutine is done + block.Done() + } + block.Add(3) + // Go for all 3 attribute lists. + go f(term.boolAttributes) + go f(term.numAttributes) + go f(term.strAttributes) + // Wait until every goroutine is done. + block.Wait() + // If a value has been written, return it. + if written { + return value, nil + } + // Otherwise, error. + return nil, fmt.Errorf("Erorr finding attribute") +} + +// Return an attribute by the name attr provided. If none can be found, +// an error is returned. A name is first converted to its termcap value. +func (term *TermInfo) GetAttributeName(name string) (stacker, error) { + tc := GetTermcapName(name) + return term.GetAttribute(tc) +} + +// A utility function that finds and returns the termcap equivalent of a +// variable name. +func GetTermcapName(name string) string { + // Termcap name + var tc string + // Blocking group + var wait sync.WaitGroup + // Function to put into a goroutine + f := func(attrs []string) { + // Find the string corresponding to the name + for i, s := range attrs { + if s == name { + tc = attrs[i+1] + } + } + // Goroutine is finished + wait.Done() + } + wait.Add(3) + // Go for all 3 attribute lists + go f(BoolAttr[:]) + go f(NumAttr[:]) + go f(StrAttr[:]) + // Wait until every goroutine is done + wait.Wait() + // Return the termcap name + return tc +} + +// This function takes a path to a terminfo file and reads it in binary +// form to construct the actual TermInfo file. +func readTermInfo(path string) (*TermInfo, error) { + // Open the terminfo file + file, err := os.Open(path) + defer file.Close() + if err != nil { + return nil, err + } + + // magic, nameSize, boolSize, nrSNum, nrOffsetsStr, strSize + // Header is composed of the magic 0432 octal number, size of the name + // section, size of the boolean section, the amount of number values, + // the number of offsets of strings, and the size of the string section. + var header [6]int16 + // Byte array is used to read in byte values + var byteArray []byte + // Short array is used to read in short values + var shArray []int16 + // TermInfo object to store values + var term TermInfo + + // Read in the header + err = binary.Read(file, binary.LittleEndian, &header) + if err != nil { + return nil, err + } + // If magic number isn't there or isn't correct, we have the wrong filetype + if header[0] != 0432 { + return nil, errors.New(fmt.Sprintf("Wrong filetype")) + } + + // Read in the names + byteArray = make([]byte, header[1]) + err = binary.Read(file, binary.LittleEndian, &byteArray) + if err != nil { + return nil, err + } + term.Names = strings.Split(string(byteArray), "|") + + // Read in the booleans + byteArray = make([]byte, header[2]) + err = binary.Read(file, binary.LittleEndian, &byteArray) + if err != nil { + return nil, err + } + term.boolAttributes = make(map[string]bool) + for i, b := range byteArray { + if b == 1 { + term.boolAttributes[BoolAttr[i*2+1]] = true + } + } + // If the number of bytes read is not even, a byte for alignment is added + if len(byteArray)%2 != 0 { + err = binary.Read(file, binary.LittleEndian, make([]byte, 1)) + if err != nil { + return nil, err + } + } + + // Read in shorts + shArray = make([]int16, header[3]) + err = binary.Read(file, binary.LittleEndian, &shArray) + if err != nil { + return nil, err + } + term.numAttributes = make(map[string]int16) + for i, n := range shArray { + if n != 0377 && n > -1 { + term.numAttributes[NumAttr[i*2+1]] = n + } + } + + // Read the offsets into the short array + shArray = make([]int16, header[4]) + err = binary.Read(file, binary.LittleEndian, &shArray) + if err != nil { + return nil, err + } + // Read the actual strings in the byte array + byteArray = make([]byte, header[5]) + err = binary.Read(file, binary.LittleEndian, &byteArray) + if err != nil { + return nil, err + } + term.strAttributes = make(map[string]string) + // We get an offset, and then iterate until the string is null-terminated + for i, offset := range shArray { + if offset > -1 { + r := offset + for ; byteArray[r] != 0; r++ { + } + term.strAttributes[StrAttr[i*2+1]] = string(byteArray[offset:r]) + } + } + return &term, nil +} diff --git a/vendor/github.com/Nvveen/Gotty/parser.go b/vendor/github.com/Nvveen/Gotty/parser.go new file mode 100644 index 0000000000..a9d5d23c54 --- /dev/null +++ b/vendor/github.com/Nvveen/Gotty/parser.go @@ -0,0 +1,362 @@ +// Copyright 2012 Neal van Veen. All rights reserved. +// Usage of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package gotty + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +var exp = [...]string{ + "%%", + "%c", + "%s", + "%p(\\d)", + "%P([A-z])", + "%g([A-z])", + "%'(.)'", + "%{([0-9]+)}", + "%l", + "%\\+|%-|%\\*|%/|%m", + "%&|%\\||%\\^", + "%=|%>|%<", + "%A|%O", + "%!|%~", + "%i", + "%(:[\\ #\\-\\+]{0,4})?(\\d+\\.\\d+|\\d+)?[doxXs]", + "%\\?(.*?);", +} + +var regex *regexp.Regexp +var staticVar map[byte]stacker + +// Parses the attribute that is received with name attr and parameters params. +func (term *TermInfo) Parse(attr string, params ...interface{}) (string, error) { + // Get the attribute name first. + iface, err := term.GetAttribute(attr) + str, ok := iface.(string) + if err != nil { + return "", err + } + if !ok { + return str, errors.New("Only string capabilities can be parsed.") + } + // Construct the hidden parser struct so we can use a recursive stack based + // parser. + ps := &parser{} + // Dynamic variables only exist in this context. + ps.dynamicVar = make(map[byte]stacker, 26) + ps.parameters = make([]stacker, len(params)) + // Convert the parameters to insert them into the parser struct. + for i, x := range params { + ps.parameters[i] = x + } + // Recursively walk and return. + result, err := ps.walk(str) + return result, err +} + +// Parses the attribute that is received with name attr and parameters params. +// Only works on full name of a capability that is given, which it uses to +// search for the termcap name. +func (term *TermInfo) ParseName(attr string, params ...interface{}) (string, error) { + tc := GetTermcapName(attr) + return term.Parse(tc, params) +} + +// Identify each token in a stack based manner and do the actual parsing. +func (ps *parser) walk(attr string) (string, error) { + // We use a buffer to get the modified string. + var buf bytes.Buffer + // Next, find and identify all tokens by their indices and strings. + tokens := regex.FindAllStringSubmatch(attr, -1) + if len(tokens) == 0 { + return attr, nil + } + indices := regex.FindAllStringIndex(attr, -1) + q := 0 // q counts the matches of one token + // Iterate through the string per character. + for i := 0; i < len(attr); i++ { + // If the current position is an identified token, execute the following + // steps. + if q < len(indices) && i >= indices[q][0] && i < indices[q][1] { + // Switch on token. + switch { + case tokens[q][0][:2] == "%%": + // Literal percentage character. + buf.WriteByte('%') + case tokens[q][0][:2] == "%c": + // Pop a character. + c, err := ps.st.pop() + if err != nil { + return buf.String(), err + } + buf.WriteByte(c.(byte)) + case tokens[q][0][:2] == "%s": + // Pop a string. + str, err := ps.st.pop() + if err != nil { + return buf.String(), err + } + if _, ok := str.(string); !ok { + return buf.String(), errors.New("Stack head is not a string") + } + buf.WriteString(str.(string)) + case tokens[q][0][:2] == "%p": + // Push a parameter on the stack. + index, err := strconv.ParseInt(tokens[q][1], 10, 8) + index-- + if err != nil { + return buf.String(), err + } + if int(index) >= len(ps.parameters) { + return buf.String(), errors.New("Parameters index out of bound") + } + ps.st.push(ps.parameters[index]) + case tokens[q][0][:2] == "%P": + // Pop a variable from the stack as a dynamic or static variable. + val, err := ps.st.pop() + if err != nil { + return buf.String(), err + } + index := tokens[q][2] + if len(index) > 1 { + errorStr := fmt.Sprintf("%s is not a valid dynamic variables index", + index) + return buf.String(), errors.New(errorStr) + } + // Specify either dynamic or static. + if index[0] >= 'a' && index[0] <= 'z' { + ps.dynamicVar[index[0]] = val + } else if index[0] >= 'A' && index[0] <= 'Z' { + staticVar[index[0]] = val + } + case tokens[q][0][:2] == "%g": + // Push a variable from the stack as a dynamic or static variable. + index := tokens[q][3] + if len(index) > 1 { + errorStr := fmt.Sprintf("%s is not a valid static variables index", + index) + return buf.String(), errors.New(errorStr) + } + var val stacker + if index[0] >= 'a' && index[0] <= 'z' { + val = ps.dynamicVar[index[0]] + } else if index[0] >= 'A' && index[0] <= 'Z' { + val = staticVar[index[0]] + } + ps.st.push(val) + case tokens[q][0][:2] == "%'": + // Push a character constant. + con := tokens[q][4] + if len(con) > 1 { + errorStr := fmt.Sprintf("%s is not a valid character constant", con) + return buf.String(), errors.New(errorStr) + } + ps.st.push(con[0]) + case tokens[q][0][:2] == "%{": + // Push an integer constant. + con, err := strconv.ParseInt(tokens[q][5], 10, 32) + if err != nil { + return buf.String(), err + } + ps.st.push(con) + case tokens[q][0][:2] == "%l": + // Push the length of the string that is popped from the stack. + popStr, err := ps.st.pop() + if err != nil { + return buf.String(), err + } + if _, ok := popStr.(string); !ok { + errStr := fmt.Sprintf("Stack head is not a string") + return buf.String(), errors.New(errStr) + } + ps.st.push(len(popStr.(string))) + case tokens[q][0][:2] == "%?": + // If-then-else construct. First, the whole string is identified and + // then inside this substring, we can specify which parts to switch on. + ifReg, _ := regexp.Compile("%\\?(.*)%t(.*)%e(.*);|%\\?(.*)%t(.*);") + ifTokens := ifReg.FindStringSubmatch(tokens[q][0]) + var ( + ifStr string + err error + ) + // Parse the if-part to determine if-else. + if len(ifTokens[1]) > 0 { + ifStr, err = ps.walk(ifTokens[1]) + } else { // else + ifStr, err = ps.walk(ifTokens[4]) + } + // Return any errors + if err != nil { + return buf.String(), err + } else if len(ifStr) > 0 { + // Self-defined limitation, not sure if this is correct, but didn't + // seem like it. + return buf.String(), errors.New("If-clause cannot print statements") + } + var thenStr string + // Pop the first value that is set by parsing the if-clause. + choose, err := ps.st.pop() + if err != nil { + return buf.String(), err + } + // Switch to if or else. + if choose.(int) == 0 && len(ifTokens[1]) > 0 { + thenStr, err = ps.walk(ifTokens[3]) + } else if choose.(int) != 0 { + if len(ifTokens[1]) > 0 { + thenStr, err = ps.walk(ifTokens[2]) + } else { + thenStr, err = ps.walk(ifTokens[5]) + } + } + if err != nil { + return buf.String(), err + } + buf.WriteString(thenStr) + case tokens[q][0][len(tokens[q][0])-1] == 'd': // Fallthrough for printing + fallthrough + case tokens[q][0][len(tokens[q][0])-1] == 'o': // digits. + fallthrough + case tokens[q][0][len(tokens[q][0])-1] == 'x': + fallthrough + case tokens[q][0][len(tokens[q][0])-1] == 'X': + fallthrough + case tokens[q][0][len(tokens[q][0])-1] == 's': + token := tokens[q][0] + // Remove the : that comes before a flag. + if token[1] == ':' { + token = token[:1] + token[2:] + } + digit, err := ps.st.pop() + if err != nil { + return buf.String(), err + } + // The rest is determined like the normal formatted prints. + digitStr := fmt.Sprintf(token, digit.(int)) + buf.WriteString(digitStr) + case tokens[q][0][:2] == "%i": + // Increment the parameters by one. + if len(ps.parameters) < 2 { + return buf.String(), errors.New("Not enough parameters to increment.") + } + val1, val2 := ps.parameters[0].(int), ps.parameters[1].(int) + val1++ + val2++ + ps.parameters[0], ps.parameters[1] = val1, val2 + default: + // The rest of the tokens is a special case, where two values are + // popped and then operated on by the token that comes after them. + op1, err := ps.st.pop() + if err != nil { + return buf.String(), err + } + op2, err := ps.st.pop() + if err != nil { + return buf.String(), err + } + var result stacker + switch tokens[q][0][:2] { + case "%+": + // Addition + result = op2.(int) + op1.(int) + case "%-": + // Subtraction + result = op2.(int) - op1.(int) + case "%*": + // Multiplication + result = op2.(int) * op1.(int) + case "%/": + // Division + result = op2.(int) / op1.(int) + case "%m": + // Modulo + result = op2.(int) % op1.(int) + case "%&": + // Bitwise AND + result = op2.(int) & op1.(int) + case "%|": + // Bitwise OR + result = op2.(int) | op1.(int) + case "%^": + // Bitwise XOR + result = op2.(int) ^ op1.(int) + case "%=": + // Equals + result = op2 == op1 + case "%>": + // Greater-than + result = op2.(int) > op1.(int) + case "%<": + // Lesser-than + result = op2.(int) < op1.(int) + case "%A": + // Logical AND + result = op2.(bool) && op1.(bool) + case "%O": + // Logical OR + result = op2.(bool) || op1.(bool) + case "%!": + // Logical complement + result = !op1.(bool) + case "%~": + // Bitwise complement + result = ^(op1.(int)) + } + ps.st.push(result) + } + + i = indices[q][1] - 1 + q++ + } else { + // We are not "inside" a token, so just skip until the end or the next + // token, and add all characters to the buffer. + j := i + if q != len(indices) { + for !(j >= indices[q][0] && j < indices[q][1]) { + j++ + } + } else { + j = len(attr) + } + buf.WriteString(string(attr[i:j])) + i = j + } + } + // Return the buffer as a string. + return buf.String(), nil +} + +// Push a stacker-value onto the stack. +func (st *stack) push(s stacker) { + *st = append(*st, s) +} + +// Pop a stacker-value from the stack. +func (st *stack) pop() (stacker, error) { + if len(*st) == 0 { + return nil, errors.New("Stack is empty.") + } + newStack := make(stack, len(*st)-1) + val := (*st)[len(*st)-1] + copy(newStack, (*st)[:len(*st)-1]) + *st = newStack + return val, nil +} + +// Initialize regexes and the static vars (that don't get changed between +// calls. +func init() { + // Initialize the main regex. + expStr := strings.Join(exp[:], "|") + regex, _ = regexp.Compile(expStr) + // Initialize the static variables. + staticVar = make(map[byte]stacker, 26) +} diff --git a/vendor/github.com/Nvveen/Gotty/types.go b/vendor/github.com/Nvveen/Gotty/types.go new file mode 100644 index 0000000000..9bcc65e9b8 --- /dev/null +++ b/vendor/github.com/Nvveen/Gotty/types.go @@ -0,0 +1,23 @@ +// Copyright 2012 Neal van Veen. All rights reserved. +// Usage of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package gotty + +type TermInfo struct { + boolAttributes map[string]bool + numAttributes map[string]int16 + strAttributes map[string]string + // The various names of the TermInfo file. + Names []string +} + +type stacker interface { +} +type stack []stacker + +type parser struct { + st stack + parameters []stacker + dynamicVar map[byte]stacker +} diff --git a/vendor/github.com/armon/go-metrics/LICENSE b/vendor/github.com/armon/go-metrics/LICENSE new file mode 100644 index 0000000000..106569e542 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Armon Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/armon/go-metrics/const_unix.go b/vendor/github.com/armon/go-metrics/const_unix.go new file mode 100644 index 0000000000..31098dd57e --- /dev/null +++ b/vendor/github.com/armon/go-metrics/const_unix.go @@ -0,0 +1,12 @@ +// +build !windows + +package metrics + +import ( + "syscall" +) + +const ( + // DefaultSignal is used with DefaultInmemSignal + DefaultSignal = syscall.SIGUSR1 +) diff --git a/vendor/github.com/armon/go-metrics/const_windows.go b/vendor/github.com/armon/go-metrics/const_windows.go new file mode 100644 index 0000000000..38136af3e4 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/const_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package metrics + +import ( + "syscall" +) + +const ( + // DefaultSignal is used with DefaultInmemSignal + // Windows has no SIGUSR1, use SIGBREAK + DefaultSignal = syscall.Signal(21) +) diff --git a/vendor/github.com/armon/go-metrics/inmem.go b/vendor/github.com/armon/go-metrics/inmem.go new file mode 100644 index 0000000000..4e2d6a709e --- /dev/null +++ b/vendor/github.com/armon/go-metrics/inmem.go @@ -0,0 +1,348 @@ +package metrics + +import ( + "bytes" + "fmt" + "math" + "net/url" + "strings" + "sync" + "time" +) + +// InmemSink provides a MetricSink that does in-memory aggregation +// without sending metrics over a network. It can be embedded within +// an application to provide profiling information. +type InmemSink struct { + // How long is each aggregation interval + interval time.Duration + + // Retain controls how many metrics interval we keep + retain time.Duration + + // maxIntervals is the maximum length of intervals. + // It is retain / interval. + maxIntervals int + + // intervals is a slice of the retained intervals + intervals []*IntervalMetrics + intervalLock sync.RWMutex + + rateDenom float64 +} + +// IntervalMetrics stores the aggregated metrics +// for a specific interval +type IntervalMetrics struct { + sync.RWMutex + + // The start time of the interval + Interval time.Time + + // Gauges maps the key to the last set value + Gauges map[string]GaugeValue + + // Points maps the string to the list of emitted values + // from EmitKey + Points map[string][]float32 + + // Counters maps the string key to a sum of the counter + // values + Counters map[string]SampledValue + + // Samples maps the key to an AggregateSample, + // which has the rolled up view of a sample + Samples map[string]SampledValue +} + +// NewIntervalMetrics creates a new IntervalMetrics for a given interval +func NewIntervalMetrics(intv time.Time) *IntervalMetrics { + return &IntervalMetrics{ + Interval: intv, + Gauges: make(map[string]GaugeValue), + Points: make(map[string][]float32), + Counters: make(map[string]SampledValue), + Samples: make(map[string]SampledValue), + } +} + +// AggregateSample is used to hold aggregate metrics +// about a sample +type AggregateSample struct { + Count int // The count of emitted pairs + Rate float64 // The values rate per time unit (usually 1 second) + Sum float64 // The sum of values + SumSq float64 `json:"-"` // The sum of squared values + Min float64 // Minimum value + Max float64 // Maximum value + LastUpdated time.Time `json:"-"` // When value was last updated +} + +// Computes a Stddev of the values +func (a *AggregateSample) Stddev() float64 { + num := (float64(a.Count) * a.SumSq) - math.Pow(a.Sum, 2) + div := float64(a.Count * (a.Count - 1)) + if div == 0 { + return 0 + } + return math.Sqrt(num / div) +} + +// Computes a mean of the values +func (a *AggregateSample) Mean() float64 { + if a.Count == 0 { + return 0 + } + return a.Sum / float64(a.Count) +} + +// Ingest is used to update a sample +func (a *AggregateSample) Ingest(v float64, rateDenom float64) { + a.Count++ + a.Sum += v + a.SumSq += (v * v) + if v < a.Min || a.Count == 1 { + a.Min = v + } + if v > a.Max || a.Count == 1 { + a.Max = v + } + a.Rate = float64(a.Sum) / rateDenom + a.LastUpdated = time.Now() +} + +func (a *AggregateSample) String() string { + if a.Count == 0 { + return "Count: 0" + } else if a.Stddev() == 0 { + return fmt.Sprintf("Count: %d Sum: %0.3f LastUpdated: %s", a.Count, a.Sum, a.LastUpdated) + } else { + return fmt.Sprintf("Count: %d Min: %0.3f Mean: %0.3f Max: %0.3f Stddev: %0.3f Sum: %0.3f LastUpdated: %s", + a.Count, a.Min, a.Mean(), a.Max, a.Stddev(), a.Sum, a.LastUpdated) + } +} + +// NewInmemSinkFromURL creates an InmemSink from a URL. It is used +// (and tested) from NewMetricSinkFromURL. +func NewInmemSinkFromURL(u *url.URL) (MetricSink, error) { + params := u.Query() + + interval, err := time.ParseDuration(params.Get("interval")) + if err != nil { + return nil, fmt.Errorf("Bad 'interval' param: %s", err) + } + + retain, err := time.ParseDuration(params.Get("retain")) + if err != nil { + return nil, fmt.Errorf("Bad 'retain' param: %s", err) + } + + return NewInmemSink(interval, retain), nil +} + +// NewInmemSink is used to construct a new in-memory sink. +// Uses an aggregation interval and maximum retention period. +func NewInmemSink(interval, retain time.Duration) *InmemSink { + rateTimeUnit := time.Second + i := &InmemSink{ + interval: interval, + retain: retain, + maxIntervals: int(retain / interval), + rateDenom: float64(interval.Nanoseconds()) / float64(rateTimeUnit.Nanoseconds()), + } + i.intervals = make([]*IntervalMetrics, 0, i.maxIntervals) + return i +} + +func (i *InmemSink) SetGauge(key []string, val float32) { + i.SetGaugeWithLabels(key, val, nil) +} + +func (i *InmemSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + k, name := i.flattenKeyLabels(key, labels) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + intv.Gauges[k] = GaugeValue{Name: name, Value: val, Labels: labels} +} + +func (i *InmemSink) EmitKey(key []string, val float32) { + k := i.flattenKey(key) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + vals := intv.Points[k] + intv.Points[k] = append(vals, val) +} + +func (i *InmemSink) IncrCounter(key []string, val float32) { + i.IncrCounterWithLabels(key, val, nil) +} + +func (i *InmemSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + k, name := i.flattenKeyLabels(key, labels) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + + agg, ok := intv.Counters[k] + if !ok { + agg = SampledValue{ + Name: name, + AggregateSample: &AggregateSample{}, + Labels: labels, + } + intv.Counters[k] = agg + } + agg.Ingest(float64(val), i.rateDenom) +} + +func (i *InmemSink) AddSample(key []string, val float32) { + i.AddSampleWithLabels(key, val, nil) +} + +func (i *InmemSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + k, name := i.flattenKeyLabels(key, labels) + intv := i.getInterval() + + intv.Lock() + defer intv.Unlock() + + agg, ok := intv.Samples[k] + if !ok { + agg = SampledValue{ + Name: name, + AggregateSample: &AggregateSample{}, + Labels: labels, + } + intv.Samples[k] = agg + } + agg.Ingest(float64(val), i.rateDenom) +} + +// Data is used to retrieve all the aggregated metrics +// Intervals may be in use, and a read lock should be acquired +func (i *InmemSink) Data() []*IntervalMetrics { + // Get the current interval, forces creation + i.getInterval() + + i.intervalLock.RLock() + defer i.intervalLock.RUnlock() + + n := len(i.intervals) + intervals := make([]*IntervalMetrics, n) + + copy(intervals[:n-1], i.intervals[:n-1]) + current := i.intervals[n-1] + + // make its own copy for current interval + intervals[n-1] = &IntervalMetrics{} + copyCurrent := intervals[n-1] + current.RLock() + *copyCurrent = *current + + copyCurrent.Gauges = make(map[string]GaugeValue, len(current.Gauges)) + for k, v := range current.Gauges { + copyCurrent.Gauges[k] = v + } + // saved values will be not change, just copy its link + copyCurrent.Points = make(map[string][]float32, len(current.Points)) + for k, v := range current.Points { + copyCurrent.Points[k] = v + } + copyCurrent.Counters = make(map[string]SampledValue, len(current.Counters)) + for k, v := range current.Counters { + copyCurrent.Counters[k] = v + } + copyCurrent.Samples = make(map[string]SampledValue, len(current.Samples)) + for k, v := range current.Samples { + copyCurrent.Samples[k] = v + } + current.RUnlock() + + return intervals +} + +func (i *InmemSink) getExistingInterval(intv time.Time) *IntervalMetrics { + i.intervalLock.RLock() + defer i.intervalLock.RUnlock() + + n := len(i.intervals) + if n > 0 && i.intervals[n-1].Interval == intv { + return i.intervals[n-1] + } + return nil +} + +func (i *InmemSink) createInterval(intv time.Time) *IntervalMetrics { + i.intervalLock.Lock() + defer i.intervalLock.Unlock() + + // Check for an existing interval + n := len(i.intervals) + if n > 0 && i.intervals[n-1].Interval == intv { + return i.intervals[n-1] + } + + // Add the current interval + current := NewIntervalMetrics(intv) + i.intervals = append(i.intervals, current) + n++ + + // Truncate the intervals if they are too long + if n >= i.maxIntervals { + copy(i.intervals[0:], i.intervals[n-i.maxIntervals:]) + i.intervals = i.intervals[:i.maxIntervals] + } + return current +} + +// getInterval returns the current interval to write to +func (i *InmemSink) getInterval() *IntervalMetrics { + intv := time.Now().Truncate(i.interval) + if m := i.getExistingInterval(intv); m != nil { + return m + } + return i.createInterval(intv) +} + +// Flattens the key for formatting, removes spaces +func (i *InmemSink) flattenKey(parts []string) string { + buf := &bytes.Buffer{} + replacer := strings.NewReplacer(" ", "_") + + if len(parts) > 0 { + replacer.WriteString(buf, parts[0]) + } + for _, part := range parts[1:] { + replacer.WriteString(buf, ".") + replacer.WriteString(buf, part) + } + + return buf.String() +} + +// Flattens the key for formatting along with its labels, removes spaces +func (i *InmemSink) flattenKeyLabels(parts []string, labels []Label) (string, string) { + buf := &bytes.Buffer{} + replacer := strings.NewReplacer(" ", "_") + + if len(parts) > 0 { + replacer.WriteString(buf, parts[0]) + } + for _, part := range parts[1:] { + replacer.WriteString(buf, ".") + replacer.WriteString(buf, part) + } + + key := buf.String() + + for _, label := range labels { + replacer.WriteString(buf, fmt.Sprintf(";%s=%s", label.Name, label.Value)) + } + + return buf.String(), key +} diff --git a/vendor/github.com/armon/go-metrics/inmem_endpoint.go b/vendor/github.com/armon/go-metrics/inmem_endpoint.go new file mode 100644 index 0000000000..504f1b3748 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/inmem_endpoint.go @@ -0,0 +1,118 @@ +package metrics + +import ( + "fmt" + "net/http" + "sort" + "time" +) + +// MetricsSummary holds a roll-up of metrics info for a given interval +type MetricsSummary struct { + Timestamp string + Gauges []GaugeValue + Points []PointValue + Counters []SampledValue + Samples []SampledValue +} + +type GaugeValue struct { + Name string + Hash string `json:"-"` + Value float32 + + Labels []Label `json:"-"` + DisplayLabels map[string]string `json:"Labels"` +} + +type PointValue struct { + Name string + Points []float32 +} + +type SampledValue struct { + Name string + Hash string `json:"-"` + *AggregateSample + Mean float64 + Stddev float64 + + Labels []Label `json:"-"` + DisplayLabels map[string]string `json:"Labels"` +} + +// DisplayMetrics returns a summary of the metrics from the most recent finished interval. +func (i *InmemSink) DisplayMetrics(resp http.ResponseWriter, req *http.Request) (interface{}, error) { + data := i.Data() + + var interval *IntervalMetrics + n := len(data) + switch { + case n == 0: + return nil, fmt.Errorf("no metric intervals have been initialized yet") + case n == 1: + // Show the current interval if it's all we have + interval = i.intervals[0] + default: + // Show the most recent finished interval if we have one + interval = i.intervals[n-2] + } + + summary := MetricsSummary{ + Timestamp: interval.Interval.Round(time.Second).UTC().String(), + Gauges: make([]GaugeValue, 0, len(interval.Gauges)), + Points: make([]PointValue, 0, len(interval.Points)), + } + + // Format and sort the output of each metric type, so it gets displayed in a + // deterministic order. + for name, points := range interval.Points { + summary.Points = append(summary.Points, PointValue{name, points}) + } + sort.Slice(summary.Points, func(i, j int) bool { + return summary.Points[i].Name < summary.Points[j].Name + }) + + for hash, value := range interval.Gauges { + value.Hash = hash + value.DisplayLabels = make(map[string]string) + for _, label := range value.Labels { + value.DisplayLabels[label.Name] = label.Value + } + value.Labels = nil + + summary.Gauges = append(summary.Gauges, value) + } + sort.Slice(summary.Gauges, func(i, j int) bool { + return summary.Gauges[i].Hash < summary.Gauges[j].Hash + }) + + summary.Counters = formatSamples(interval.Counters) + summary.Samples = formatSamples(interval.Samples) + + return summary, nil +} + +func formatSamples(source map[string]SampledValue) []SampledValue { + output := make([]SampledValue, 0, len(source)) + for hash, sample := range source { + displayLabels := make(map[string]string) + for _, label := range sample.Labels { + displayLabels[label.Name] = label.Value + } + + output = append(output, SampledValue{ + Name: sample.Name, + Hash: hash, + AggregateSample: sample.AggregateSample, + Mean: sample.AggregateSample.Mean(), + Stddev: sample.AggregateSample.Stddev(), + DisplayLabels: displayLabels, + }) + } + sort.Slice(output, func(i, j int) bool { + return output[i].Hash < output[j].Hash + }) + + return output +} diff --git a/vendor/github.com/armon/go-metrics/inmem_signal.go b/vendor/github.com/armon/go-metrics/inmem_signal.go new file mode 100644 index 0000000000..0937f4aedf --- /dev/null +++ b/vendor/github.com/armon/go-metrics/inmem_signal.go @@ -0,0 +1,117 @@ +package metrics + +import ( + "bytes" + "fmt" + "io" + "os" + "os/signal" + "strings" + "sync" + "syscall" +) + +// InmemSignal is used to listen for a given signal, and when received, +// to dump the current metrics from the InmemSink to an io.Writer +type InmemSignal struct { + signal syscall.Signal + inm *InmemSink + w io.Writer + sigCh chan os.Signal + + stop bool + stopCh chan struct{} + stopLock sync.Mutex +} + +// NewInmemSignal creates a new InmemSignal which listens for a given signal, +// and dumps the current metrics out to a writer +func NewInmemSignal(inmem *InmemSink, sig syscall.Signal, w io.Writer) *InmemSignal { + i := &InmemSignal{ + signal: sig, + inm: inmem, + w: w, + sigCh: make(chan os.Signal, 1), + stopCh: make(chan struct{}), + } + signal.Notify(i.sigCh, sig) + go i.run() + return i +} + +// DefaultInmemSignal returns a new InmemSignal that responds to SIGUSR1 +// and writes output to stderr. Windows uses SIGBREAK +func DefaultInmemSignal(inmem *InmemSink) *InmemSignal { + return NewInmemSignal(inmem, DefaultSignal, os.Stderr) +} + +// Stop is used to stop the InmemSignal from listening +func (i *InmemSignal) Stop() { + i.stopLock.Lock() + defer i.stopLock.Unlock() + + if i.stop { + return + } + i.stop = true + close(i.stopCh) + signal.Stop(i.sigCh) +} + +// run is a long running routine that handles signals +func (i *InmemSignal) run() { + for { + select { + case <-i.sigCh: + i.dumpStats() + case <-i.stopCh: + return + } + } +} + +// dumpStats is used to dump the data to output writer +func (i *InmemSignal) dumpStats() { + buf := bytes.NewBuffer(nil) + + data := i.inm.Data() + // Skip the last period which is still being aggregated + for j := 0; j < len(data)-1; j++ { + intv := data[j] + intv.RLock() + for _, val := range intv.Gauges { + name := i.flattenLabels(val.Name, val.Labels) + fmt.Fprintf(buf, "[%v][G] '%s': %0.3f\n", intv.Interval, name, val.Value) + } + for name, vals := range intv.Points { + for _, val := range vals { + fmt.Fprintf(buf, "[%v][P] '%s': %0.3f\n", intv.Interval, name, val) + } + } + for _, agg := range intv.Counters { + name := i.flattenLabels(agg.Name, agg.Labels) + fmt.Fprintf(buf, "[%v][C] '%s': %s\n", intv.Interval, name, agg.AggregateSample) + } + for _, agg := range intv.Samples { + name := i.flattenLabels(agg.Name, agg.Labels) + fmt.Fprintf(buf, "[%v][S] '%s': %s\n", intv.Interval, name, agg.AggregateSample) + } + intv.RUnlock() + } + + // Write out the bytes + i.w.Write(buf.Bytes()) +} + +// Flattens the key for formatting along with its labels, removes spaces +func (i *InmemSignal) flattenLabels(name string, labels []Label) string { + buf := bytes.NewBufferString(name) + replacer := strings.NewReplacer(" ", "_", ":", "_") + + for _, label := range labels { + replacer.WriteString(buf, ".") + replacer.WriteString(buf, label.Value) + } + + return buf.String() +} diff --git a/vendor/github.com/armon/go-metrics/metrics.go b/vendor/github.com/armon/go-metrics/metrics.go new file mode 100644 index 0000000000..d260bd4b29 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/metrics.go @@ -0,0 +1,216 @@ +package metrics + +import ( + "runtime" + "strings" + "time" + + "github.com/hashicorp/go-immutable-radix" +) + +type Label struct { + Name string + Value string +} + +func (m *Metrics) SetGauge(key []string, val float32) { + m.SetGaugeWithLabels(key, val, nil) +} + +func (m *Metrics) SetGaugeWithLabels(key []string, val float32, labels []Label) { + if m.HostName != "" { + if m.EnableHostnameLabel { + labels = append(labels, Label{"host", m.HostName}) + } else if m.EnableHostname { + key = insert(0, m.HostName, key) + } + } + if m.EnableTypePrefix { + key = insert(0, "gauge", key) + } + if m.ServiceName != "" { + if m.EnableServiceLabel { + labels = append(labels, Label{"service", m.ServiceName}) + } else { + key = insert(0, m.ServiceName, key) + } + } + if !m.allowMetric(key) { + return + } + m.sink.SetGaugeWithLabels(key, val, labels) +} + +func (m *Metrics) EmitKey(key []string, val float32) { + if m.EnableTypePrefix { + key = insert(0, "kv", key) + } + if m.ServiceName != "" { + key = insert(0, m.ServiceName, key) + } + if !m.allowMetric(key) { + return + } + m.sink.EmitKey(key, val) +} + +func (m *Metrics) IncrCounter(key []string, val float32) { + m.IncrCounterWithLabels(key, val, nil) +} + +func (m *Metrics) IncrCounterWithLabels(key []string, val float32, labels []Label) { + if m.HostName != "" && m.EnableHostnameLabel { + labels = append(labels, Label{"host", m.HostName}) + } + if m.EnableTypePrefix { + key = insert(0, "counter", key) + } + if m.ServiceName != "" { + if m.EnableServiceLabel { + labels = append(labels, Label{"service", m.ServiceName}) + } else { + key = insert(0, m.ServiceName, key) + } + } + if !m.allowMetric(key) { + return + } + m.sink.IncrCounterWithLabels(key, val, labels) +} + +func (m *Metrics) AddSample(key []string, val float32) { + m.AddSampleWithLabels(key, val, nil) +} + +func (m *Metrics) AddSampleWithLabels(key []string, val float32, labels []Label) { + if m.HostName != "" && m.EnableHostnameLabel { + labels = append(labels, Label{"host", m.HostName}) + } + if m.EnableTypePrefix { + key = insert(0, "sample", key) + } + if m.ServiceName != "" { + if m.EnableServiceLabel { + labels = append(labels, Label{"service", m.ServiceName}) + } else { + key = insert(0, m.ServiceName, key) + } + } + if !m.allowMetric(key) { + return + } + m.sink.AddSampleWithLabels(key, val, labels) +} + +func (m *Metrics) MeasureSince(key []string, start time.Time) { + m.MeasureSinceWithLabels(key, start, nil) +} + +func (m *Metrics) MeasureSinceWithLabels(key []string, start time.Time, labels []Label) { + if m.HostName != "" && m.EnableHostnameLabel { + labels = append(labels, Label{"host", m.HostName}) + } + if m.EnableTypePrefix { + key = insert(0, "timer", key) + } + if m.ServiceName != "" { + if m.EnableServiceLabel { + labels = append(labels, Label{"service", m.ServiceName}) + } else { + key = insert(0, m.ServiceName, key) + } + } + if !m.allowMetric(key) { + return + } + now := time.Now() + elapsed := now.Sub(start) + msec := float32(elapsed.Nanoseconds()) / float32(m.TimerGranularity) + m.sink.AddSampleWithLabels(key, msec, labels) +} + +// UpdateFilter overwrites the existing filter with the given rules. +func (m *Metrics) UpdateFilter(allow, block []string) { + m.filterLock.Lock() + defer m.filterLock.Unlock() + + m.AllowedPrefixes = allow + m.BlockedPrefixes = block + + m.filter = iradix.New() + for _, prefix := range m.AllowedPrefixes { + m.filter, _, _ = m.filter.Insert([]byte(prefix), true) + } + for _, prefix := range m.BlockedPrefixes { + m.filter, _, _ = m.filter.Insert([]byte(prefix), false) + } +} + +// Returns whether the metric should be allowed based on configured prefix filters +func (m *Metrics) allowMetric(key []string) bool { + m.filterLock.RLock() + defer m.filterLock.RUnlock() + + if m.filter == nil || m.filter.Len() == 0 { + return m.Config.FilterDefault + } + + _, allowed, ok := m.filter.Root().LongestPrefix([]byte(strings.Join(key, "."))) + if !ok { + return m.Config.FilterDefault + } + return allowed.(bool) +} + +// Periodically collects runtime stats to publish +func (m *Metrics) collectStats() { + for { + time.Sleep(m.ProfileInterval) + m.emitRuntimeStats() + } +} + +// Emits various runtime statsitics +func (m *Metrics) emitRuntimeStats() { + // Export number of Goroutines + numRoutines := runtime.NumGoroutine() + m.SetGauge([]string{"runtime", "num_goroutines"}, float32(numRoutines)) + + // Export memory stats + var stats runtime.MemStats + runtime.ReadMemStats(&stats) + m.SetGauge([]string{"runtime", "alloc_bytes"}, float32(stats.Alloc)) + m.SetGauge([]string{"runtime", "sys_bytes"}, float32(stats.Sys)) + m.SetGauge([]string{"runtime", "malloc_count"}, float32(stats.Mallocs)) + m.SetGauge([]string{"runtime", "free_count"}, float32(stats.Frees)) + m.SetGauge([]string{"runtime", "heap_objects"}, float32(stats.HeapObjects)) + m.SetGauge([]string{"runtime", "total_gc_pause_ns"}, float32(stats.PauseTotalNs)) + m.SetGauge([]string{"runtime", "total_gc_runs"}, float32(stats.NumGC)) + + // Export info about the last few GC runs + num := stats.NumGC + + // Handle wrap around + if num < m.lastNumGC { + m.lastNumGC = 0 + } + + // Ensure we don't scan more than 256 + if num-m.lastNumGC >= 256 { + m.lastNumGC = num - 255 + } + + for i := m.lastNumGC; i < num; i++ { + pause := stats.PauseNs[i%256] + m.AddSample([]string{"runtime", "gc_pause_ns"}, float32(pause)) + } + m.lastNumGC = num +} + +// Inserts a string value at an index into the slice +func insert(i int, v string, s []string) []string { + s = append(s, "") + copy(s[i+1:], s[i:]) + s[i] = v + return s +} diff --git a/vendor/github.com/armon/go-metrics/sink.go b/vendor/github.com/armon/go-metrics/sink.go new file mode 100644 index 0000000000..0b7d6e4be4 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/sink.go @@ -0,0 +1,115 @@ +package metrics + +import ( + "fmt" + "net/url" +) + +// The MetricSink interface is used to transmit metrics information +// to an external system +type MetricSink interface { + // A Gauge should retain the last value it is set to + SetGauge(key []string, val float32) + SetGaugeWithLabels(key []string, val float32, labels []Label) + + // Should emit a Key/Value pair for each call + EmitKey(key []string, val float32) + + // Counters should accumulate values + IncrCounter(key []string, val float32) + IncrCounterWithLabels(key []string, val float32, labels []Label) + + // Samples are for timing information, where quantiles are used + AddSample(key []string, val float32) + AddSampleWithLabels(key []string, val float32, labels []Label) +} + +// BlackholeSink is used to just blackhole messages +type BlackholeSink struct{} + +func (*BlackholeSink) SetGauge(key []string, val float32) {} +func (*BlackholeSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {} +func (*BlackholeSink) EmitKey(key []string, val float32) {} +func (*BlackholeSink) IncrCounter(key []string, val float32) {} +func (*BlackholeSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {} +func (*BlackholeSink) AddSample(key []string, val float32) {} +func (*BlackholeSink) AddSampleWithLabels(key []string, val float32, labels []Label) {} + +// FanoutSink is used to sink to fanout values to multiple sinks +type FanoutSink []MetricSink + +func (fh FanoutSink) SetGauge(key []string, val float32) { + fh.SetGaugeWithLabels(key, val, nil) +} + +func (fh FanoutSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + for _, s := range fh { + s.SetGaugeWithLabels(key, val, labels) + } +} + +func (fh FanoutSink) EmitKey(key []string, val float32) { + for _, s := range fh { + s.EmitKey(key, val) + } +} + +func (fh FanoutSink) IncrCounter(key []string, val float32) { + fh.IncrCounterWithLabels(key, val, nil) +} + +func (fh FanoutSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + for _, s := range fh { + s.IncrCounterWithLabels(key, val, labels) + } +} + +func (fh FanoutSink) AddSample(key []string, val float32) { + fh.AddSampleWithLabels(key, val, nil) +} + +func (fh FanoutSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + for _, s := range fh { + s.AddSampleWithLabels(key, val, labels) + } +} + +// sinkURLFactoryFunc is an generic interface around the *SinkFromURL() function provided +// by each sink type +type sinkURLFactoryFunc func(*url.URL) (MetricSink, error) + +// sinkRegistry supports the generic NewMetricSink function by mapping URL +// schemes to metric sink factory functions +var sinkRegistry = map[string]sinkURLFactoryFunc{ + "statsd": NewStatsdSinkFromURL, + "statsite": NewStatsiteSinkFromURL, + "inmem": NewInmemSinkFromURL, +} + +// NewMetricSinkFromURL allows a generic URL input to configure any of the +// supported sinks. The scheme of the URL identifies the type of the sink, the +// and query parameters are used to set options. +// +// "statsd://" - Initializes a StatsdSink. The host and port are passed through +// as the "addr" of the sink +// +// "statsite://" - Initializes a StatsiteSink. The host and port become the +// "addr" of the sink +// +// "inmem://" - Initializes an InmemSink. The host and port are ignored. The +// "interval" and "duration" query parameters must be specified with valid +// durations, see NewInmemSink for details. +func NewMetricSinkFromURL(urlStr string) (MetricSink, error) { + u, err := url.Parse(urlStr) + if err != nil { + return nil, err + } + + sinkURLFactoryFunc := sinkRegistry[u.Scheme] + if sinkURLFactoryFunc == nil { + return nil, fmt.Errorf( + "cannot create metric sink, unrecognized sink name: %q", u.Scheme) + } + + return sinkURLFactoryFunc(u) +} diff --git a/vendor/github.com/armon/go-metrics/start.go b/vendor/github.com/armon/go-metrics/start.go new file mode 100644 index 0000000000..dd41861c90 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/start.go @@ -0,0 +1,129 @@ +package metrics + +import ( + "os" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/go-immutable-radix" +) + +// Config is used to configure metrics settings +type Config struct { + ServiceName string // Prefixed with keys to separate services + HostName string // Hostname to use. If not provided and EnableHostname, it will be os.Hostname + EnableHostname bool // Enable prefixing gauge values with hostname + EnableHostnameLabel bool // Enable adding hostname to labels + EnableServiceLabel bool // Enable adding service to labels + EnableRuntimeMetrics bool // Enables profiling of runtime metrics (GC, Goroutines, Memory) + EnableTypePrefix bool // Prefixes key with a type ("counter", "gauge", "timer") + TimerGranularity time.Duration // Granularity of timers. + ProfileInterval time.Duration // Interval to profile runtime metrics + + AllowedPrefixes []string // A list of metric prefixes to allow, with '.' as the separator + BlockedPrefixes []string // A list of metric prefixes to block, with '.' as the separator + FilterDefault bool // Whether to allow metrics by default +} + +// Metrics represents an instance of a metrics sink that can +// be used to emit +type Metrics struct { + Config + lastNumGC uint32 + sink MetricSink + filter *iradix.Tree + filterLock sync.RWMutex +} + +// Shared global metrics instance +var globalMetrics atomic.Value // *Metrics + +func init() { + // Initialize to a blackhole sink to avoid errors + globalMetrics.Store(&Metrics{sink: &BlackholeSink{}}) +} + +// DefaultConfig provides a sane default configuration +func DefaultConfig(serviceName string) *Config { + c := &Config{ + ServiceName: serviceName, // Use client provided service + HostName: "", + EnableHostname: true, // Enable hostname prefix + EnableRuntimeMetrics: true, // Enable runtime profiling + EnableTypePrefix: false, // Disable type prefix + TimerGranularity: time.Millisecond, // Timers are in milliseconds + ProfileInterval: time.Second, // Poll runtime every second + FilterDefault: true, // Don't filter metrics by default + } + + // Try to get the hostname + name, _ := os.Hostname() + c.HostName = name + return c +} + +// New is used to create a new instance of Metrics +func New(conf *Config, sink MetricSink) (*Metrics, error) { + met := &Metrics{} + met.Config = *conf + met.sink = sink + met.UpdateFilter(conf.AllowedPrefixes, conf.BlockedPrefixes) + + // Start the runtime collector + if conf.EnableRuntimeMetrics { + go met.collectStats() + } + return met, nil +} + +// NewGlobal is the same as New, but it assigns the metrics object to be +// used globally as well as returning it. +func NewGlobal(conf *Config, sink MetricSink) (*Metrics, error) { + metrics, err := New(conf, sink) + if err == nil { + globalMetrics.Store(metrics) + } + return metrics, err +} + +// Proxy all the methods to the globalMetrics instance +func SetGauge(key []string, val float32) { + globalMetrics.Load().(*Metrics).SetGauge(key, val) +} + +func SetGaugeWithLabels(key []string, val float32, labels []Label) { + globalMetrics.Load().(*Metrics).SetGaugeWithLabels(key, val, labels) +} + +func EmitKey(key []string, val float32) { + globalMetrics.Load().(*Metrics).EmitKey(key, val) +} + +func IncrCounter(key []string, val float32) { + globalMetrics.Load().(*Metrics).IncrCounter(key, val) +} + +func IncrCounterWithLabels(key []string, val float32, labels []Label) { + globalMetrics.Load().(*Metrics).IncrCounterWithLabels(key, val, labels) +} + +func AddSample(key []string, val float32) { + globalMetrics.Load().(*Metrics).AddSample(key, val) +} + +func AddSampleWithLabels(key []string, val float32, labels []Label) { + globalMetrics.Load().(*Metrics).AddSampleWithLabels(key, val, labels) +} + +func MeasureSince(key []string, start time.Time) { + globalMetrics.Load().(*Metrics).MeasureSince(key, start) +} + +func MeasureSinceWithLabels(key []string, start time.Time, labels []Label) { + globalMetrics.Load().(*Metrics).MeasureSinceWithLabels(key, start, labels) +} + +func UpdateFilter(allow, block []string) { + globalMetrics.Load().(*Metrics).UpdateFilter(allow, block) +} diff --git a/vendor/github.com/armon/go-metrics/statsd.go b/vendor/github.com/armon/go-metrics/statsd.go new file mode 100644 index 0000000000..1bfffce46e --- /dev/null +++ b/vendor/github.com/armon/go-metrics/statsd.go @@ -0,0 +1,184 @@ +package metrics + +import ( + "bytes" + "fmt" + "log" + "net" + "net/url" + "strings" + "time" +) + +const ( + // statsdMaxLen is the maximum size of a packet + // to send to statsd + statsdMaxLen = 1400 +) + +// StatsdSink provides a MetricSink that can be used +// with a statsite or statsd metrics server. It uses +// only UDP packets, while StatsiteSink uses TCP. +type StatsdSink struct { + addr string + metricQueue chan string +} + +// NewStatsdSinkFromURL creates an StatsdSink from a URL. It is used +// (and tested) from NewMetricSinkFromURL. +func NewStatsdSinkFromURL(u *url.URL) (MetricSink, error) { + return NewStatsdSink(u.Host) +} + +// NewStatsdSink is used to create a new StatsdSink +func NewStatsdSink(addr string) (*StatsdSink, error) { + s := &StatsdSink{ + addr: addr, + metricQueue: make(chan string, 4096), + } + go s.flushMetrics() + return s, nil +} + +// Close is used to stop flushing to statsd +func (s *StatsdSink) Shutdown() { + close(s.metricQueue) +} + +func (s *StatsdSink) SetGauge(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsdSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsdSink) EmitKey(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) +} + +func (s *StatsdSink) IncrCounter(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsdSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsdSink) AddSample(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +func (s *StatsdSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +// Flattens the key for formatting, removes spaces +func (s *StatsdSink) flattenKey(parts []string) string { + joined := strings.Join(parts, ".") + return strings.Map(func(r rune) rune { + switch r { + case ':': + fallthrough + case ' ': + return '_' + default: + return r + } + }, joined) +} + +// Flattens the key along with labels for formatting, removes spaces +func (s *StatsdSink) flattenKeyLabels(parts []string, labels []Label) string { + for _, label := range labels { + parts = append(parts, label.Value) + } + return s.flattenKey(parts) +} + +// Does a non-blocking push to the metrics queue +func (s *StatsdSink) pushMetric(m string) { + select { + case s.metricQueue <- m: + default: + } +} + +// Flushes metrics +func (s *StatsdSink) flushMetrics() { + var sock net.Conn + var err error + var wait <-chan time.Time + ticker := time.NewTicker(flushInterval) + defer ticker.Stop() + +CONNECT: + // Create a buffer + buf := bytes.NewBuffer(nil) + + // Attempt to connect + sock, err = net.Dial("udp", s.addr) + if err != nil { + log.Printf("[ERR] Error connecting to statsd! Err: %s", err) + goto WAIT + } + + for { + select { + case metric, ok := <-s.metricQueue: + // Get a metric from the queue + if !ok { + goto QUIT + } + + // Check if this would overflow the packet size + if len(metric)+buf.Len() > statsdMaxLen { + _, err := sock.Write(buf.Bytes()) + buf.Reset() + if err != nil { + log.Printf("[ERR] Error writing to statsd! Err: %s", err) + goto WAIT + } + } + + // Append to the buffer + buf.WriteString(metric) + + case <-ticker.C: + if buf.Len() == 0 { + continue + } + + _, err := sock.Write(buf.Bytes()) + buf.Reset() + if err != nil { + log.Printf("[ERR] Error flushing to statsd! Err: %s", err) + goto WAIT + } + } + } + +WAIT: + // Wait for a while + wait = time.After(time.Duration(5) * time.Second) + for { + select { + // Dequeue the messages to avoid backlog + case _, ok := <-s.metricQueue: + if !ok { + goto QUIT + } + case <-wait: + goto CONNECT + } + } +QUIT: + s.metricQueue = nil +} diff --git a/vendor/github.com/armon/go-metrics/statsite.go b/vendor/github.com/armon/go-metrics/statsite.go new file mode 100644 index 0000000000..6c0d284d2d --- /dev/null +++ b/vendor/github.com/armon/go-metrics/statsite.go @@ -0,0 +1,172 @@ +package metrics + +import ( + "bufio" + "fmt" + "log" + "net" + "net/url" + "strings" + "time" +) + +const ( + // We force flush the statsite metrics after this period of + // inactivity. Prevents stats from getting stuck in a buffer + // forever. + flushInterval = 100 * time.Millisecond +) + +// NewStatsiteSinkFromURL creates an StatsiteSink from a URL. It is used +// (and tested) from NewMetricSinkFromURL. +func NewStatsiteSinkFromURL(u *url.URL) (MetricSink, error) { + return NewStatsiteSink(u.Host) +} + +// StatsiteSink provides a MetricSink that can be used with a +// statsite metrics server +type StatsiteSink struct { + addr string + metricQueue chan string +} + +// NewStatsiteSink is used to create a new StatsiteSink +func NewStatsiteSink(addr string) (*StatsiteSink, error) { + s := &StatsiteSink{ + addr: addr, + metricQueue: make(chan string, 4096), + } + go s.flushMetrics() + return s, nil +} + +// Close is used to stop flushing to statsite +func (s *StatsiteSink) Shutdown() { + close(s.metricQueue) +} + +func (s *StatsiteSink) SetGauge(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsiteSink) SetGaugeWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) +} + +func (s *StatsiteSink) EmitKey(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) +} + +func (s *StatsiteSink) IncrCounter(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsiteSink) IncrCounterWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) +} + +func (s *StatsiteSink) AddSample(key []string, val float32) { + flatKey := s.flattenKey(key) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +func (s *StatsiteSink) AddSampleWithLabels(key []string, val float32, labels []Label) { + flatKey := s.flattenKeyLabels(key, labels) + s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) +} + +// Flattens the key for formatting, removes spaces +func (s *StatsiteSink) flattenKey(parts []string) string { + joined := strings.Join(parts, ".") + return strings.Map(func(r rune) rune { + switch r { + case ':': + fallthrough + case ' ': + return '_' + default: + return r + } + }, joined) +} + +// Flattens the key along with labels for formatting, removes spaces +func (s *StatsiteSink) flattenKeyLabels(parts []string, labels []Label) string { + for _, label := range labels { + parts = append(parts, label.Value) + } + return s.flattenKey(parts) +} + +// Does a non-blocking push to the metrics queue +func (s *StatsiteSink) pushMetric(m string) { + select { + case s.metricQueue <- m: + default: + } +} + +// Flushes metrics +func (s *StatsiteSink) flushMetrics() { + var sock net.Conn + var err error + var wait <-chan time.Time + var buffered *bufio.Writer + ticker := time.NewTicker(flushInterval) + defer ticker.Stop() + +CONNECT: + // Attempt to connect + sock, err = net.Dial("tcp", s.addr) + if err != nil { + log.Printf("[ERR] Error connecting to statsite! Err: %s", err) + goto WAIT + } + + // Create a buffered writer + buffered = bufio.NewWriter(sock) + + for { + select { + case metric, ok := <-s.metricQueue: + // Get a metric from the queue + if !ok { + goto QUIT + } + + // Try to send to statsite + _, err := buffered.Write([]byte(metric)) + if err != nil { + log.Printf("[ERR] Error writing to statsite! Err: %s", err) + goto WAIT + } + case <-ticker.C: + if err := buffered.Flush(); err != nil { + log.Printf("[ERR] Error flushing to statsite! Err: %s", err) + goto WAIT + } + } + } + +WAIT: + // Wait for a while + wait = time.After(time.Duration(5) * time.Second) + for { + select { + // Dequeue the messages to avoid backlog + case _, ok := <-s.metricQueue: + if !ok { + goto QUIT + } + case <-wait: + goto CONNECT + } + } +QUIT: + s.metricQueue = nil +} diff --git a/vendor/github.com/armon/go-radix/LICENSE b/vendor/github.com/armon/go-radix/LICENSE new file mode 100644 index 0000000000..a5df10e675 --- /dev/null +++ b/vendor/github.com/armon/go-radix/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Armon Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/armon/go-radix/radix.go b/vendor/github.com/armon/go-radix/radix.go new file mode 100644 index 0000000000..f9655a126b --- /dev/null +++ b/vendor/github.com/armon/go-radix/radix.go @@ -0,0 +1,543 @@ +package radix + +import ( + "sort" + "strings" +) + +// WalkFn is used when walking the tree. Takes a +// key and value, returning if iteration should +// be terminated. +type WalkFn func(s string, v interface{}) bool + +// leafNode is used to represent a value +type leafNode struct { + key string + val interface{} +} + +// edge is used to represent an edge node +type edge struct { + label byte + node *node +} + +type node struct { + // leaf is used to store possible leaf + leaf *leafNode + + // prefix is the common prefix we ignore + prefix string + + // Edges should be stored in-order for iteration. + // We avoid a fully materialized slice to save memory, + // since in most cases we expect to be sparse + edges edges +} + +func (n *node) isLeaf() bool { + return n.leaf != nil +} + +func (n *node) addEdge(e edge) { + n.edges = append(n.edges, e) + n.edges.Sort() +} + +func (n *node) replaceEdge(e edge) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= e.label + }) + if idx < num && n.edges[idx].label == e.label { + n.edges[idx].node = e.node + return + } + panic("replacing missing edge") +} + +func (n *node) getEdge(label byte) *node { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + return n.edges[idx].node + } + return nil +} + +func (n *node) delEdge(label byte) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + copy(n.edges[idx:], n.edges[idx+1:]) + n.edges[len(n.edges)-1] = edge{} + n.edges = n.edges[:len(n.edges)-1] + } +} + +type edges []edge + +func (e edges) Len() int { + return len(e) +} + +func (e edges) Less(i, j int) bool { + return e[i].label < e[j].label +} + +func (e edges) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +func (e edges) Sort() { + sort.Sort(e) +} + +// Tree implements a radix tree. This can be treated as a +// Dictionary abstract data type. The main advantage over +// a standard hash map is prefix-based lookups and +// ordered iteration, +type Tree struct { + root *node + size int +} + +// New returns an empty Tree +func New() *Tree { + return NewFromMap(nil) +} + +// NewFromMap returns a new tree containing the keys +// from an existing map +func NewFromMap(m map[string]interface{}) *Tree { + t := &Tree{root: &node{}} + for k, v := range m { + t.Insert(k, v) + } + return t +} + +// Len is used to return the number of elements in the tree +func (t *Tree) Len() int { + return t.size +} + +// longestPrefix finds the length of the shared prefix +// of two strings +func longestPrefix(k1, k2 string) int { + max := len(k1) + if l := len(k2); l < max { + max = l + } + var i int + for i = 0; i < max; i++ { + if k1[i] != k2[i] { + break + } + } + return i +} + +// Insert is used to add a newentry or update +// an existing entry. Returns if updated. +func (t *Tree) Insert(s string, v interface{}) (interface{}, bool) { + var parent *node + n := t.root + search := s + for { + // Handle key exhaution + if len(search) == 0 { + if n.isLeaf() { + old := n.leaf.val + n.leaf.val = v + return old, true + } + + n.leaf = &leafNode{ + key: s, + val: v, + } + t.size++ + return nil, false + } + + // Look for the edge + parent = n + n = n.getEdge(search[0]) + + // No edge, create one + if n == nil { + e := edge{ + label: search[0], + node: &node{ + leaf: &leafNode{ + key: s, + val: v, + }, + prefix: search, + }, + } + parent.addEdge(e) + t.size++ + return nil, false + } + + // Determine longest prefix of the search key on match + commonPrefix := longestPrefix(search, n.prefix) + if commonPrefix == len(n.prefix) { + search = search[commonPrefix:] + continue + } + + // Split the node + t.size++ + child := &node{ + prefix: search[:commonPrefix], + } + parent.replaceEdge(edge{ + label: search[0], + node: child, + }) + + // Restore the existing node + child.addEdge(edge{ + label: n.prefix[commonPrefix], + node: n, + }) + n.prefix = n.prefix[commonPrefix:] + + // Create a new leaf node + leaf := &leafNode{ + key: s, + val: v, + } + + // If the new key is a subset, add to to this node + search = search[commonPrefix:] + if len(search) == 0 { + child.leaf = leaf + return nil, false + } + + // Create a new edge for the node + child.addEdge(edge{ + label: search[0], + node: &node{ + leaf: leaf, + prefix: search, + }, + }) + return nil, false + } +} + +// Delete is used to delete a key, returning the previous +// value and if it was deleted +func (t *Tree) Delete(s string) (interface{}, bool) { + var parent *node + var label byte + n := t.root + search := s + for { + // Check for key exhaution + if len(search) == 0 { + if !n.isLeaf() { + break + } + goto DELETE + } + + // Look for an edge + parent = n + label = search[0] + n = n.getEdge(label) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + return nil, false + +DELETE: + // Delete the leaf + leaf := n.leaf + n.leaf = nil + t.size-- + + // Check if we should delete this node from the parent + if parent != nil && len(n.edges) == 0 { + parent.delEdge(label) + } + + // Check if we should merge this node + if n != t.root && len(n.edges) == 1 { + n.mergeChild() + } + + // Check if we should merge the parent's other child + if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() { + parent.mergeChild() + } + + return leaf.val, true +} + +// DeletePrefix is used to delete the subtree under a prefix +// Returns how many nodes were deleted +// Use this to delete large subtrees efficiently +func (t *Tree) DeletePrefix(s string) int { + return t.deletePrefix(nil, t.root, s) +} + +// delete does a recursive deletion +func (t *Tree) deletePrefix(parent, n *node, prefix string) int { + // Check for key exhaustion + if len(prefix) == 0 { + // Remove the leaf node + subTreeSize := 0 + //recursively walk from all edges of the node to be deleted + recursiveWalk(n, func(s string, v interface{}) bool { + subTreeSize++ + return false + }) + if n.isLeaf() { + n.leaf = nil + } + n.edges = nil // deletes the entire subtree + + // Check if we should merge the parent's other child + if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() { + parent.mergeChild() + } + t.size -= subTreeSize + return subTreeSize + } + + // Look for an edge + label := prefix[0] + child := n.getEdge(label) + if child == nil || (!strings.HasPrefix(child.prefix, prefix) && !strings.HasPrefix(prefix, child.prefix)) { + return 0 + } + + // Consume the search prefix + if len(child.prefix) > len(prefix) { + prefix = prefix[len(prefix):] + } else { + prefix = prefix[len(child.prefix):] + } + return t.deletePrefix(n, child, prefix) +} + +func (n *node) mergeChild() { + e := n.edges[0] + child := e.node + n.prefix = n.prefix + child.prefix + n.leaf = child.leaf + n.edges = child.edges +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Tree) Get(s string) (interface{}, bool) { + n := t.root + search := s + for { + // Check for key exhaution + if len(search) == 0 { + if n.isLeaf() { + return n.leaf.val, true + } + break + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + return nil, false +} + +// LongestPrefix is like Get, but instead of an +// exact match, it will return the longest prefix match. +func (t *Tree) LongestPrefix(s string) (string, interface{}, bool) { + var last *leafNode + n := t.root + search := s + for { + // Look for a leaf node + if n.isLeaf() { + last = n.leaf + } + + // Check for key exhaution + if len(search) == 0 { + break + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + if last != nil { + return last.key, last.val, true + } + return "", nil, false +} + +// Minimum is used to return the minimum value in the tree +func (t *Tree) Minimum() (string, interface{}, bool) { + n := t.root + for { + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + if len(n.edges) > 0 { + n = n.edges[0].node + } else { + break + } + } + return "", nil, false +} + +// Maximum is used to return the maximum value in the tree +func (t *Tree) Maximum() (string, interface{}, bool) { + n := t.root + for { + if num := len(n.edges); num > 0 { + n = n.edges[num-1].node + continue + } + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + break + } + return "", nil, false +} + +// Walk is used to walk the tree +func (t *Tree) Walk(fn WalkFn) { + recursiveWalk(t.root, fn) +} + +// WalkPrefix is used to walk the tree under a prefix +func (t *Tree) WalkPrefix(prefix string, fn WalkFn) { + n := t.root + search := prefix + for { + // Check for key exhaution + if len(search) == 0 { + recursiveWalk(n, fn) + return + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if strings.HasPrefix(n.prefix, search) { + // Child may be under our search prefix + recursiveWalk(n, fn) + return + } else { + break + } + } + +} + +// WalkPath is used to walk the tree, but only visiting nodes +// from the root down to a given leaf. Where WalkPrefix walks +// all the entries *under* the given prefix, this walks the +// entries *above* the given prefix. +func (t *Tree) WalkPath(path string, fn WalkFn) { + n := t.root + search := path + for { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return + } + + // Check for key exhaution + if len(search) == 0 { + return + } + + // Look for an edge + n = n.getEdge(search[0]) + if n == nil { + return + } + + // Consume the search prefix + if strings.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } +} + +// recursiveWalk is used to do a pre-order walk of a node +// recursively. Returns true if the walk should be aborted +func recursiveWalk(n *node, fn WalkFn) bool { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return true + } + + // Recurse on the children + for _, e := range n.edges { + if recursiveWalk(e.node, fn) { + return true + } + } + return false +} + +// ToMap is used to walk the tree and convert it into a map +func (t *Tree) ToMap() map[string]interface{} { + out := make(map[string]interface{}, t.size) + t.Walk(func(k string, v interface{}) bool { + out[k] = v + return false + }) + return out +} diff --git a/vendor/github.com/boltdb/bolt/LICENSE b/vendor/github.com/boltdb/bolt/LICENSE new file mode 100644 index 0000000000..004e77fe5d --- /dev/null +++ b/vendor/github.com/boltdb/bolt/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Ben Johnson + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/boltdb/bolt/bolt_386.go b/vendor/github.com/boltdb/bolt/bolt_386.go new file mode 100644 index 0000000000..820d533c15 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_386.go @@ -0,0 +1,10 @@ +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_amd64.go b/vendor/github.com/boltdb/bolt/bolt_amd64.go new file mode 100644 index 0000000000..98fafdb47d --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_amd64.go @@ -0,0 +1,10 @@ +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_arm.go b/vendor/github.com/boltdb/bolt/bolt_arm.go new file mode 100644 index 0000000000..7e5cb4b941 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_arm.go @@ -0,0 +1,28 @@ +package bolt + +import "unsafe" + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned bool + +func init() { + // Simple check to see whether this arch handles unaligned load/stores + // correctly. + + // ARM9 and older devices require load/stores to be from/to aligned + // addresses. If not, the lower 2 bits are cleared and that address is + // read in a jumbled up order. + + // See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html + + raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11} + val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2)) + + brokenUnaligned = val != 0x11222211 +} diff --git a/vendor/github.com/boltdb/bolt/bolt_arm64.go b/vendor/github.com/boltdb/bolt/bolt_arm64.go new file mode 100644 index 0000000000..b26d84f91b --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_arm64.go @@ -0,0 +1,12 @@ +// +build arm64 + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_linux.go b/vendor/github.com/boltdb/bolt/bolt_linux.go new file mode 100644 index 0000000000..2b67666140 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_linux.go @@ -0,0 +1,10 @@ +package bolt + +import ( + "syscall" +) + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return syscall.Fdatasync(int(db.file.Fd())) +} diff --git a/vendor/github.com/boltdb/bolt/bolt_openbsd.go b/vendor/github.com/boltdb/bolt/bolt_openbsd.go new file mode 100644 index 0000000000..7058c3d734 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_openbsd.go @@ -0,0 +1,27 @@ +package bolt + +import ( + "syscall" + "unsafe" +) + +const ( + msAsync = 1 << iota // perform asynchronous writes + msSync // perform synchronous writes + msInvalidate // invalidate cached data +) + +func msync(db *DB) error { + _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate) + if errno != 0 { + return errno + } + return nil +} + +func fdatasync(db *DB) error { + if db.data != nil { + return msync(db) + } + return db.file.Sync() +} diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc.go b/vendor/github.com/boltdb/bolt/bolt_ppc.go new file mode 100644 index 0000000000..645ddc3edc --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_ppc.go @@ -0,0 +1,9 @@ +// +build ppc + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0x7FFFFFFF // 2GB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0xFFFFFFF diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc64.go b/vendor/github.com/boltdb/bolt/bolt_ppc64.go new file mode 100644 index 0000000000..9331d9771e --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_ppc64.go @@ -0,0 +1,12 @@ +// +build ppc64 + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_ppc64le.go b/vendor/github.com/boltdb/bolt/bolt_ppc64le.go new file mode 100644 index 0000000000..8c143bc5d1 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_ppc64le.go @@ -0,0 +1,12 @@ +// +build ppc64le + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_s390x.go b/vendor/github.com/boltdb/bolt/bolt_s390x.go new file mode 100644 index 0000000000..d7c39af925 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_s390x.go @@ -0,0 +1,12 @@ +// +build s390x + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF + +// Are unaligned load/stores broken on this arch? +var brokenUnaligned = false diff --git a/vendor/github.com/boltdb/bolt/bolt_unix.go b/vendor/github.com/boltdb/bolt/bolt_unix.go new file mode 100644 index 0000000000..cad62dda1e --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_unix.go @@ -0,0 +1,89 @@ +// +build !windows,!plan9,!solaris + +package bolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { + var t time.Time + for { + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + flag := syscall.LOCK_SH + if exclusive { + flag = syscall.LOCK_EX + } + + // Otherwise attempt to obtain an exclusive lock. + err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB) + if err == nil { + return nil + } else if err != syscall.EWOULDBLOCK { + return err + } + + // Wait for a bit and try again. + time.Sleep(50 * time.Millisecond) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + if err := madvise(b, syscall.MADV_RANDOM); err != nil { + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := syscall.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} + +// NOTE: This function is copied from stdlib because it is not available on darwin. +func madvise(b []byte, advice int) (err error) { + _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice)) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go b/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go new file mode 100644 index 0000000000..307bf2b3ee --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_unix_solaris.go @@ -0,0 +1,90 @@ +package bolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/unix" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { + var t time.Time + for { + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Pid = 0 + lock.Whence = 0 + lock.Pid = 0 + if exclusive { + lock.Type = syscall.F_WRLCK + } else { + lock.Type = syscall.F_RDLCK + } + err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock) + if err == nil { + return nil + } else if err != syscall.EAGAIN { + return err + } + + // Wait for a bit and try again. + time.Sleep(50 * time.Millisecond) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Type = syscall.F_UNLCK + lock.Whence = 0 + return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil { + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := unix.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} diff --git a/vendor/github.com/boltdb/bolt/bolt_windows.go b/vendor/github.com/boltdb/bolt/bolt_windows.go new file mode 100644 index 0000000000..b00fb0720a --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bolt_windows.go @@ -0,0 +1,144 @@ +package bolt + +import ( + "fmt" + "os" + "syscall" + "time" + "unsafe" +) + +// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1 +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + procLockFileEx = modkernel32.NewProc("LockFileEx") + procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") +) + +const ( + lockExt = ".lock" + + // see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx + flagLockExclusive = 2 + flagLockFailImmediately = 1 + + // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx + errLockViolation syscall.Errno = 0x21 +) + +func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) + if r == 0 { + return err + } + return nil +} + +func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0) + if r == 0 { + return err + } + return nil +} + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return db.file.Sync() +} + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { + // Create a separate lock file on windows because a process + // cannot share an exclusive lock on the same file. This is + // needed during Tx.WriteTo(). + f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode) + if err != nil { + return err + } + db.lockfile = f + + var t time.Time + for { + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + + var flag uint32 = flagLockFailImmediately + if exclusive { + flag |= flagLockExclusive + } + + err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}) + if err == nil { + return nil + } else if err != errLockViolation { + return err + } + + // Wait for a bit and try again. + time.Sleep(50 * time.Millisecond) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{}) + db.lockfile.Close() + os.Remove(db.path + lockExt) + return err +} + +// mmap memory maps a DB's data file. +// Based on: https://github.com/edsrzf/mmap-go +func mmap(db *DB, sz int) error { + if !db.readOnly { + // Truncate the database to the size of the mmap. + if err := db.file.Truncate(int64(sz)); err != nil { + return fmt.Errorf("truncate: %s", err) + } + } + + // Open a file mapping handle. + sizelo := uint32(sz >> 32) + sizehi := uint32(sz) & 0xffffffff + h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil) + if h == 0 { + return os.NewSyscallError("CreateFileMapping", errno) + } + + // Create the memory map. + addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz)) + if addr == 0 { + return os.NewSyscallError("MapViewOfFile", errno) + } + + // Close mapping handle. + if err := syscall.CloseHandle(syscall.Handle(h)); err != nil { + return os.NewSyscallError("CloseHandle", err) + } + + // Convert to a byte array. + db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr))) + db.datasz = sz + + return nil +} + +// munmap unmaps a pointer from a file. +// Based on: https://github.com/edsrzf/mmap-go +func munmap(db *DB) error { + if db.data == nil { + return nil + } + + addr := (uintptr)(unsafe.Pointer(&db.data[0])) + if err := syscall.UnmapViewOfFile(addr); err != nil { + return os.NewSyscallError("UnmapViewOfFile", err) + } + return nil +} diff --git a/vendor/github.com/boltdb/bolt/boltsync_unix.go b/vendor/github.com/boltdb/bolt/boltsync_unix.go new file mode 100644 index 0000000000..f50442523c --- /dev/null +++ b/vendor/github.com/boltdb/bolt/boltsync_unix.go @@ -0,0 +1,8 @@ +// +build !windows,!plan9,!linux,!openbsd + +package bolt + +// fdatasync flushes written data to a file descriptor. +func fdatasync(db *DB) error { + return db.file.Sync() +} diff --git a/vendor/github.com/boltdb/bolt/bucket.go b/vendor/github.com/boltdb/bolt/bucket.go new file mode 100644 index 0000000000..0c5bf27463 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/bucket.go @@ -0,0 +1,777 @@ +package bolt + +import ( + "bytes" + "fmt" + "unsafe" +) + +const ( + // MaxKeySize is the maximum length of a key, in bytes. + MaxKeySize = 32768 + + // MaxValueSize is the maximum length of a value, in bytes. + MaxValueSize = (1 << 31) - 2 +) + +const ( + maxUint = ^uint(0) + minUint = 0 + maxInt = int(^uint(0) >> 1) + minInt = -maxInt - 1 +) + +const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) + +const ( + minFillPercent = 0.1 + maxFillPercent = 1.0 +) + +// DefaultFillPercent is the percentage that split pages are filled. +// This value can be changed by setting Bucket.FillPercent. +const DefaultFillPercent = 0.5 + +// Bucket represents a collection of key/value pairs inside the database. +type Bucket struct { + *bucket + tx *Tx // the associated transaction + buckets map[string]*Bucket // subbucket cache + page *page // inline page reference + rootNode *node // materialized node for the root page. + nodes map[pgid]*node // node cache + + // Sets the threshold for filling nodes when they split. By default, + // the bucket will fill to 50% but it can be useful to increase this + // amount if you know that your write workloads are mostly append-only. + // + // This is non-persisted across transactions so it must be set in every Tx. + FillPercent float64 +} + +// bucket represents the on-file representation of a bucket. +// This is stored as the "value" of a bucket key. If the bucket is small enough, +// then its root page can be stored inline in the "value", after the bucket +// header. In the case of inline buckets, the "root" will be 0. +type bucket struct { + root pgid // page id of the bucket's root-level page + sequence uint64 // monotonically incrementing, used by NextSequence() +} + +// newBucket returns a new bucket associated with a transaction. +func newBucket(tx *Tx) Bucket { + var b = Bucket{tx: tx, FillPercent: DefaultFillPercent} + if tx.writable { + b.buckets = make(map[string]*Bucket) + b.nodes = make(map[pgid]*node) + } + return b +} + +// Tx returns the tx of the bucket. +func (b *Bucket) Tx() *Tx { + return b.tx +} + +// Root returns the root of the bucket. +func (b *Bucket) Root() pgid { + return b.root +} + +// Writable returns whether the bucket is writable. +func (b *Bucket) Writable() bool { + return b.tx.writable +} + +// Cursor creates a cursor associated with the bucket. +// The cursor is only valid as long as the transaction is open. +// Do not use a cursor after the transaction is closed. +func (b *Bucket) Cursor() *Cursor { + // Update transaction statistics. + b.tx.stats.CursorCount++ + + // Allocate and return a cursor. + return &Cursor{ + bucket: b, + stack: make([]elemRef, 0), + } +} + +// Bucket retrieves a nested bucket by name. +// Returns nil if the bucket does not exist. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) Bucket(name []byte) *Bucket { + if b.buckets != nil { + if child := b.buckets[string(name)]; child != nil { + return child + } + } + + // Move cursor to key. + c := b.Cursor() + k, v, flags := c.seek(name) + + // Return nil if the key doesn't exist or it is not a bucket. + if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 { + return nil + } + + // Otherwise create a bucket and cache it. + var child = b.openBucket(v) + if b.buckets != nil { + b.buckets[string(name)] = child + } + + return child +} + +// Helper method that re-interprets a sub-bucket value +// from a parent into a Bucket +func (b *Bucket) openBucket(value []byte) *Bucket { + var child = newBucket(b.tx) + + // If unaligned load/stores are broken on this arch and value is + // unaligned simply clone to an aligned byte array. + unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0 + + if unaligned { + value = cloneBytes(value) + } + + // If this is a writable transaction then we need to copy the bucket entry. + // Read-only transactions can point directly at the mmap entry. + if b.tx.writable && !unaligned { + child.bucket = &bucket{} + *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) + } else { + child.bucket = (*bucket)(unsafe.Pointer(&value[0])) + } + + // Save a reference to the inline page if the bucket is inline. + if child.root == 0 { + child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + } + + return &child +} + +// CreateBucket creates a new bucket at the given key and returns the new bucket. +// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { + if b.tx.db == nil { + return nil, ErrTxClosed + } else if !b.tx.writable { + return nil, ErrTxNotWritable + } else if len(key) == 0 { + return nil, ErrBucketNameRequired + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if there is an existing key. + if bytes.Equal(key, k) { + if (flags & bucketLeafFlag) != 0 { + return nil, ErrBucketExists + } + return nil, ErrIncompatibleValue + } + + // Create empty, inline bucket. + var bucket = Bucket{ + bucket: &bucket{}, + rootNode: &node{isLeaf: true}, + FillPercent: DefaultFillPercent, + } + var value = bucket.write() + + // Insert into node. + key = cloneBytes(key) + c.node().put(key, key, value, 0, bucketLeafFlag) + + // Since subbuckets are not allowed on inline buckets, we need to + // dereference the inline page, if it exists. This will cause the bucket + // to be treated as a regular, non-inline bucket for the rest of the tx. + b.page = nil + + return b.Bucket(key), nil +} + +// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. +// Returns an error if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { + child, err := b.CreateBucket(key) + if err == ErrBucketExists { + return b.Bucket(key), nil + } else if err != nil { + return nil, err + } + return child, nil +} + +// DeleteBucket deletes a bucket at the given key. +// Returns an error if the bucket does not exists, or if the key represents a non-bucket value. +func (b *Bucket) DeleteBucket(key []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if bucket doesn't exist or is not a bucket. + if !bytes.Equal(key, k) { + return ErrBucketNotFound + } else if (flags & bucketLeafFlag) == 0 { + return ErrIncompatibleValue + } + + // Recursively delete all child buckets. + child := b.Bucket(key) + err := child.ForEach(func(k, v []byte) error { + if v == nil { + if err := child.DeleteBucket(k); err != nil { + return fmt.Errorf("delete bucket: %s", err) + } + } + return nil + }) + if err != nil { + return err + } + + // Remove cached copy. + delete(b.buckets, string(key)) + + // Release all bucket pages to freelist. + child.nodes = nil + child.rootNode = nil + child.free() + + // Delete the node if we have a matching key. + c.node().del(key) + + return nil +} + +// Get retrieves the value for a key in the bucket. +// Returns a nil value if the key does not exist or if the key is a nested bucket. +// The returned value is only valid for the life of the transaction. +func (b *Bucket) Get(key []byte) []byte { + k, v, flags := b.Cursor().seek(key) + + // Return nil if this is a bucket. + if (flags & bucketLeafFlag) != 0 { + return nil + } + + // If our target node isn't the same key as what's passed in then return nil. + if !bytes.Equal(key, k) { + return nil + } + return v +} + +// Put sets the value for a key in the bucket. +// If the key exist then its previous value will be overwritten. +// Supplied value must remain valid for the life of the transaction. +// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. +func (b *Bucket) Put(key []byte, value []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } else if len(key) == 0 { + return ErrKeyRequired + } else if len(key) > MaxKeySize { + return ErrKeyTooLarge + } else if int64(len(value)) > MaxValueSize { + return ErrValueTooLarge + } + + // Move cursor to correct position. + c := b.Cursor() + k, _, flags := c.seek(key) + + // Return an error if there is an existing key with a bucket value. + if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + + // Insert into node. + key = cloneBytes(key) + c.node().put(key, key, value, 0, 0) + + return nil +} + +// Delete removes a key from the bucket. +// If the key does not exist then nothing is done and a nil error is returned. +// Returns an error if the bucket was created from a read-only transaction. +func (b *Bucket) Delete(key []byte) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Move cursor to correct position. + c := b.Cursor() + _, _, flags := c.seek(key) + + // Return an error if there is already existing bucket value. + if (flags & bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + + // Delete the node if we have a matching key. + c.node().del(key) + + return nil +} + +// Sequence returns the current integer for the bucket without incrementing it. +func (b *Bucket) Sequence() uint64 { return b.bucket.sequence } + +// SetSequence updates the sequence number for the bucket. +func (b *Bucket) SetSequence(v uint64) error { + if b.tx.db == nil { + return ErrTxClosed + } else if !b.Writable() { + return ErrTxNotWritable + } + + // Materialize the root node if it hasn't been already so that the + // bucket will be saved during commit. + if b.rootNode == nil { + _ = b.node(b.root, nil) + } + + // Increment and return the sequence. + b.bucket.sequence = v + return nil +} + +// NextSequence returns an autoincrementing integer for the bucket. +func (b *Bucket) NextSequence() (uint64, error) { + if b.tx.db == nil { + return 0, ErrTxClosed + } else if !b.Writable() { + return 0, ErrTxNotWritable + } + + // Materialize the root node if it hasn't been already so that the + // bucket will be saved during commit. + if b.rootNode == nil { + _ = b.node(b.root, nil) + } + + // Increment and return the sequence. + b.bucket.sequence++ + return b.bucket.sequence, nil +} + +// ForEach executes a function for each key/value pair in a bucket. +// If the provided function returns an error then the iteration is stopped and +// the error is returned to the caller. The provided function must not modify +// the bucket; this will result in undefined behavior. +func (b *Bucket) ForEach(fn func(k, v []byte) error) error { + if b.tx.db == nil { + return ErrTxClosed + } + c := b.Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + if err := fn(k, v); err != nil { + return err + } + } + return nil +} + +// Stat returns stats on a bucket. +func (b *Bucket) Stats() BucketStats { + var s, subStats BucketStats + pageSize := b.tx.db.pageSize + s.BucketN += 1 + if b.root == 0 { + s.InlineBucketN += 1 + } + b.forEachPage(func(p *page, depth int) { + if (p.flags & leafPageFlag) != 0 { + s.KeyN += int(p.count) + + // used totals the used bytes for the page + used := pageHeaderSize + + if p.count != 0 { + // If page has any elements, add all element headers. + used += leafPageElementSize * int(p.count-1) + + // Add all element key, value sizes. + // The computation takes advantage of the fact that the position + // of the last element's key/value equals to the total of the sizes + // of all previous elements' keys and values. + // It also includes the last element's header. + lastElement := p.leafPageElement(p.count - 1) + used += int(lastElement.pos + lastElement.ksize + lastElement.vsize) + } + + if b.root == 0 { + // For inlined bucket just update the inline stats + s.InlineBucketInuse += used + } else { + // For non-inlined bucket update all the leaf stats + s.LeafPageN++ + s.LeafInuse += used + s.LeafOverflowN += int(p.overflow) + + // Collect stats from sub-buckets. + // Do that by iterating over all element headers + // looking for the ones with the bucketLeafFlag. + for i := uint16(0); i < p.count; i++ { + e := p.leafPageElement(i) + if (e.flags & bucketLeafFlag) != 0 { + // For any bucket element, open the element value + // and recursively call Stats on the contained bucket. + subStats.Add(b.openBucket(e.value()).Stats()) + } + } + } + } else if (p.flags & branchPageFlag) != 0 { + s.BranchPageN++ + lastElement := p.branchPageElement(p.count - 1) + + // used totals the used bytes for the page + // Add header and all element headers. + used := pageHeaderSize + (branchPageElementSize * int(p.count-1)) + + // Add size of all keys and values. + // Again, use the fact that last element's position equals to + // the total of key, value sizes of all previous elements. + used += int(lastElement.pos + lastElement.ksize) + s.BranchInuse += used + s.BranchOverflowN += int(p.overflow) + } + + // Keep track of maximum page depth. + if depth+1 > s.Depth { + s.Depth = (depth + 1) + } + }) + + // Alloc stats can be computed from page counts and pageSize. + s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize + s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize + + // Add the max depth of sub-buckets to get total nested depth. + s.Depth += subStats.Depth + // Add the stats for all sub-buckets + s.Add(subStats) + return s +} + +// forEachPage iterates over every page in a bucket, including inline pages. +func (b *Bucket) forEachPage(fn func(*page, int)) { + // If we have an inline page then just use that. + if b.page != nil { + fn(b.page, 0) + return + } + + // Otherwise traverse the page hierarchy. + b.tx.forEachPage(b.root, 0, fn) +} + +// forEachPageNode iterates over every page (or node) in a bucket. +// This also includes inline pages. +func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) { + // If we have an inline page or root node then just use that. + if b.page != nil { + fn(b.page, nil, 0) + return + } + b._forEachPageNode(b.root, 0, fn) +} + +func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) { + var p, n = b.pageNode(pgid) + + // Execute function. + fn(p, n, depth) + + // Recursively loop over children. + if p != nil { + if (p.flags & branchPageFlag) != 0 { + for i := 0; i < int(p.count); i++ { + elem := p.branchPageElement(uint16(i)) + b._forEachPageNode(elem.pgid, depth+1, fn) + } + } + } else { + if !n.isLeaf { + for _, inode := range n.inodes { + b._forEachPageNode(inode.pgid, depth+1, fn) + } + } + } +} + +// spill writes all the nodes for this bucket to dirty pages. +func (b *Bucket) spill() error { + // Spill all child buckets first. + for name, child := range b.buckets { + // If the child bucket is small enough and it has no child buckets then + // write it inline into the parent bucket's page. Otherwise spill it + // like a normal bucket and make the parent value a pointer to the page. + var value []byte + if child.inlineable() { + child.free() + value = child.write() + } else { + if err := child.spill(); err != nil { + return err + } + + // Update the child bucket header in this bucket. + value = make([]byte, unsafe.Sizeof(bucket{})) + var bucket = (*bucket)(unsafe.Pointer(&value[0])) + *bucket = *child.bucket + } + + // Skip writing the bucket if there are no materialized nodes. + if child.rootNode == nil { + continue + } + + // Update parent node. + var c = b.Cursor() + k, _, flags := c.seek([]byte(name)) + if !bytes.Equal([]byte(name), k) { + panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k)) + } + if flags&bucketLeafFlag == 0 { + panic(fmt.Sprintf("unexpected bucket header flag: %x", flags)) + } + c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag) + } + + // Ignore if there's not a materialized root node. + if b.rootNode == nil { + return nil + } + + // Spill nodes. + if err := b.rootNode.spill(); err != nil { + return err + } + b.rootNode = b.rootNode.root() + + // Update the root node for this bucket. + if b.rootNode.pgid >= b.tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)) + } + b.root = b.rootNode.pgid + + return nil +} + +// inlineable returns true if a bucket is small enough to be written inline +// and if it contains no subbuckets. Otherwise returns false. +func (b *Bucket) inlineable() bool { + var n = b.rootNode + + // Bucket must only contain a single leaf node. + if n == nil || !n.isLeaf { + return false + } + + // Bucket is not inlineable if it contains subbuckets or if it goes beyond + // our threshold for inline bucket size. + var size = pageHeaderSize + for _, inode := range n.inodes { + size += leafPageElementSize + len(inode.key) + len(inode.value) + + if inode.flags&bucketLeafFlag != 0 { + return false + } else if size > b.maxInlineBucketSize() { + return false + } + } + + return true +} + +// Returns the maximum total size of a bucket to make it a candidate for inlining. +func (b *Bucket) maxInlineBucketSize() int { + return b.tx.db.pageSize / 4 +} + +// write allocates and writes a bucket to a byte slice. +func (b *Bucket) write() []byte { + // Allocate the appropriate size. + var n = b.rootNode + var value = make([]byte, bucketHeaderSize+n.size()) + + // Write a bucket header. + var bucket = (*bucket)(unsafe.Pointer(&value[0])) + *bucket = *b.bucket + + // Convert byte slice to a fake page and write the root node. + var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + n.write(p) + + return value +} + +// rebalance attempts to balance all nodes. +func (b *Bucket) rebalance() { + for _, n := range b.nodes { + n.rebalance() + } + for _, child := range b.buckets { + child.rebalance() + } +} + +// node creates a node from a page and associates it with a given parent. +func (b *Bucket) node(pgid pgid, parent *node) *node { + _assert(b.nodes != nil, "nodes map expected") + + // Retrieve node if it's already been created. + if n := b.nodes[pgid]; n != nil { + return n + } + + // Otherwise create a node and cache it. + n := &node{bucket: b, parent: parent} + if parent == nil { + b.rootNode = n + } else { + parent.children = append(parent.children, n) + } + + // Use the inline page if this is an inline bucket. + var p = b.page + if p == nil { + p = b.tx.page(pgid) + } + + // Read the page into the node and cache it. + n.read(p) + b.nodes[pgid] = n + + // Update statistics. + b.tx.stats.NodeCount++ + + return n +} + +// free recursively frees all pages in the bucket. +func (b *Bucket) free() { + if b.root == 0 { + return + } + + var tx = b.tx + b.forEachPageNode(func(p *page, n *node, _ int) { + if p != nil { + tx.db.freelist.free(tx.meta.txid, p) + } else { + n.free() + } + }) + b.root = 0 +} + +// dereference removes all references to the old mmap. +func (b *Bucket) dereference() { + if b.rootNode != nil { + b.rootNode.root().dereference() + } + + for _, child := range b.buckets { + child.dereference() + } +} + +// pageNode returns the in-memory node, if it exists. +// Otherwise returns the underlying page. +func (b *Bucket) pageNode(id pgid) (*page, *node) { + // Inline buckets have a fake page embedded in their value so treat them + // differently. We'll return the rootNode (if available) or the fake page. + if b.root == 0 { + if id != 0 { + panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id)) + } + if b.rootNode != nil { + return nil, b.rootNode + } + return b.page, nil + } + + // Check the node cache for non-inline buckets. + if b.nodes != nil { + if n := b.nodes[id]; n != nil { + return nil, n + } + } + + // Finally lookup the page from the transaction if no node is materialized. + return b.tx.page(id), nil +} + +// BucketStats records statistics about resources used by a bucket. +type BucketStats struct { + // Page count statistics. + BranchPageN int // number of logical branch pages + BranchOverflowN int // number of physical branch overflow pages + LeafPageN int // number of logical leaf pages + LeafOverflowN int // number of physical leaf overflow pages + + // Tree statistics. + KeyN int // number of keys/value pairs + Depth int // number of levels in B+tree + + // Page size utilization. + BranchAlloc int // bytes allocated for physical branch pages + BranchInuse int // bytes actually used for branch data + LeafAlloc int // bytes allocated for physical leaf pages + LeafInuse int // bytes actually used for leaf data + + // Bucket statistics + BucketN int // total number of buckets including the top bucket + InlineBucketN int // total number on inlined buckets + InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse) +} + +func (s *BucketStats) Add(other BucketStats) { + s.BranchPageN += other.BranchPageN + s.BranchOverflowN += other.BranchOverflowN + s.LeafPageN += other.LeafPageN + s.LeafOverflowN += other.LeafOverflowN + s.KeyN += other.KeyN + if s.Depth < other.Depth { + s.Depth = other.Depth + } + s.BranchAlloc += other.BranchAlloc + s.BranchInuse += other.BranchInuse + s.LeafAlloc += other.LeafAlloc + s.LeafInuse += other.LeafInuse + + s.BucketN += other.BucketN + s.InlineBucketN += other.InlineBucketN + s.InlineBucketInuse += other.InlineBucketInuse +} + +// cloneBytes returns a copy of a given slice. +func cloneBytes(v []byte) []byte { + var clone = make([]byte, len(v)) + copy(clone, v) + return clone +} diff --git a/vendor/github.com/boltdb/bolt/cursor.go b/vendor/github.com/boltdb/bolt/cursor.go new file mode 100644 index 0000000000..1be9f35e3e --- /dev/null +++ b/vendor/github.com/boltdb/bolt/cursor.go @@ -0,0 +1,400 @@ +package bolt + +import ( + "bytes" + "fmt" + "sort" +) + +// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order. +// Cursors see nested buckets with value == nil. +// Cursors can be obtained from a transaction and are valid as long as the transaction is open. +// +// Keys and values returned from the cursor are only valid for the life of the transaction. +// +// Changing data while traversing with a cursor may cause it to be invalidated +// and return unexpected keys and/or values. You must reposition your cursor +// after mutating data. +type Cursor struct { + bucket *Bucket + stack []elemRef +} + +// Bucket returns the bucket that this cursor was created from. +func (c *Cursor) Bucket() *Bucket { + return c.bucket +} + +// First moves the cursor to the first item in the bucket and returns its key and value. +// If the bucket is empty then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) First() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + c.stack = c.stack[:0] + p, n := c.bucket.pageNode(c.bucket.root) + c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) + c.first() + + // If we land on an empty page then move to the next value. + // https://github.com/boltdb/bolt/issues/450 + if c.stack[len(c.stack)-1].count() == 0 { + c.next() + } + + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v + +} + +// Last moves the cursor to the last item in the bucket and returns its key and value. +// If the bucket is empty then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Last() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + c.stack = c.stack[:0] + p, n := c.bucket.pageNode(c.bucket.root) + ref := elemRef{page: p, node: n} + ref.index = ref.count() - 1 + c.stack = append(c.stack, ref) + c.last() + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Next moves the cursor to the next item in the bucket and returns its key and value. +// If the cursor is at the end of the bucket then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Next() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + k, v, flags := c.next() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Prev moves the cursor to the previous item in the bucket and returns its key and value. +// If the cursor is at the beginning of the bucket then a nil key and value are returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Prev() (key []byte, value []byte) { + _assert(c.bucket.tx.db != nil, "tx closed") + + // Attempt to move back one element until we're successful. + // Move up the stack as we hit the beginning of each page in our stack. + for i := len(c.stack) - 1; i >= 0; i-- { + elem := &c.stack[i] + if elem.index > 0 { + elem.index-- + break + } + c.stack = c.stack[:i] + } + + // If we've hit the end then return nil. + if len(c.stack) == 0 { + return nil, nil + } + + // Move down the stack to find the last element of the last leaf under this branch. + c.last() + k, v, flags := c.keyValue() + if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Seek moves the cursor to a given key and returns it. +// If the key does not exist then the next key is used. If no keys +// follow, a nil key is returned. +// The returned key and value are only valid for the life of the transaction. +func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { + k, v, flags := c.seek(seek) + + // If we ended up after the last element of a page then move to the next one. + if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() { + k, v, flags = c.next() + } + + if k == nil { + return nil, nil + } else if (flags & uint32(bucketLeafFlag)) != 0 { + return k, nil + } + return k, v +} + +// Delete removes the current key/value under the cursor from the bucket. +// Delete fails if current key/value is a bucket or if the transaction is not writable. +func (c *Cursor) Delete() error { + if c.bucket.tx.db == nil { + return ErrTxClosed + } else if !c.bucket.Writable() { + return ErrTxNotWritable + } + + key, _, flags := c.keyValue() + // Return an error if current value is a bucket. + if (flags & bucketLeafFlag) != 0 { + return ErrIncompatibleValue + } + c.node().del(key) + + return nil +} + +// seek moves the cursor to a given key and returns it. +// If the key does not exist then the next key is used. +func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { + _assert(c.bucket.tx.db != nil, "tx closed") + + // Start from root page/node and traverse to correct page. + c.stack = c.stack[:0] + c.search(seek, c.bucket.root) + ref := &c.stack[len(c.stack)-1] + + // If the cursor is pointing to the end of page/node then return nil. + if ref.index >= ref.count() { + return nil, nil, 0 + } + + // If this is a bucket then return a nil value. + return c.keyValue() +} + +// first moves the cursor to the first leaf element under the last page in the stack. +func (c *Cursor) first() { + for { + // Exit when we hit a leaf page. + var ref = &c.stack[len(c.stack)-1] + if ref.isLeaf() { + break + } + + // Keep adding pages pointing to the first element to the stack. + var pgid pgid + if ref.node != nil { + pgid = ref.node.inodes[ref.index].pgid + } else { + pgid = ref.page.branchPageElement(uint16(ref.index)).pgid + } + p, n := c.bucket.pageNode(pgid) + c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) + } +} + +// last moves the cursor to the last leaf element under the last page in the stack. +func (c *Cursor) last() { + for { + // Exit when we hit a leaf page. + ref := &c.stack[len(c.stack)-1] + if ref.isLeaf() { + break + } + + // Keep adding pages pointing to the last element in the stack. + var pgid pgid + if ref.node != nil { + pgid = ref.node.inodes[ref.index].pgid + } else { + pgid = ref.page.branchPageElement(uint16(ref.index)).pgid + } + p, n := c.bucket.pageNode(pgid) + + var nextRef = elemRef{page: p, node: n} + nextRef.index = nextRef.count() - 1 + c.stack = append(c.stack, nextRef) + } +} + +// next moves to the next leaf element and returns the key and value. +// If the cursor is at the last leaf element then it stays there and returns nil. +func (c *Cursor) next() (key []byte, value []byte, flags uint32) { + for { + // Attempt to move over one element until we're successful. + // Move up the stack as we hit the end of each page in our stack. + var i int + for i = len(c.stack) - 1; i >= 0; i-- { + elem := &c.stack[i] + if elem.index < elem.count()-1 { + elem.index++ + break + } + } + + // If we've hit the root page then stop and return. This will leave the + // cursor on the last element of the last page. + if i == -1 { + return nil, nil, 0 + } + + // Otherwise start from where we left off in the stack and find the + // first element of the first leaf page. + c.stack = c.stack[:i+1] + c.first() + + // If this is an empty page then restart and move back up the stack. + // https://github.com/boltdb/bolt/issues/450 + if c.stack[len(c.stack)-1].count() == 0 { + continue + } + + return c.keyValue() + } +} + +// search recursively performs a binary search against a given page/node until it finds a given key. +func (c *Cursor) search(key []byte, pgid pgid) { + p, n := c.bucket.pageNode(pgid) + if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 { + panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags)) + } + e := elemRef{page: p, node: n} + c.stack = append(c.stack, e) + + // If we're on a leaf page/node then find the specific node. + if e.isLeaf() { + c.nsearch(key) + return + } + + if n != nil { + c.searchNode(key, n) + return + } + c.searchPage(key, p) +} + +func (c *Cursor) searchNode(key []byte, n *node) { + var exact bool + index := sort.Search(len(n.inodes), func(i int) bool { + // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. + // sort.Search() finds the lowest index where f() != -1 but we need the highest index. + ret := bytes.Compare(n.inodes[i].key, key) + if ret == 0 { + exact = true + } + return ret != -1 + }) + if !exact && index > 0 { + index-- + } + c.stack[len(c.stack)-1].index = index + + // Recursively search to the next page. + c.search(key, n.inodes[index].pgid) +} + +func (c *Cursor) searchPage(key []byte, p *page) { + // Binary search for the correct range. + inodes := p.branchPageElements() + + var exact bool + index := sort.Search(int(p.count), func(i int) bool { + // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. + // sort.Search() finds the lowest index where f() != -1 but we need the highest index. + ret := bytes.Compare(inodes[i].key(), key) + if ret == 0 { + exact = true + } + return ret != -1 + }) + if !exact && index > 0 { + index-- + } + c.stack[len(c.stack)-1].index = index + + // Recursively search to the next page. + c.search(key, inodes[index].pgid) +} + +// nsearch searches the leaf node on the top of the stack for a key. +func (c *Cursor) nsearch(key []byte) { + e := &c.stack[len(c.stack)-1] + p, n := e.page, e.node + + // If we have a node then search its inodes. + if n != nil { + index := sort.Search(len(n.inodes), func(i int) bool { + return bytes.Compare(n.inodes[i].key, key) != -1 + }) + e.index = index + return + } + + // If we have a page then search its leaf elements. + inodes := p.leafPageElements() + index := sort.Search(int(p.count), func(i int) bool { + return bytes.Compare(inodes[i].key(), key) != -1 + }) + e.index = index +} + +// keyValue returns the key and value of the current leaf element. +func (c *Cursor) keyValue() ([]byte, []byte, uint32) { + ref := &c.stack[len(c.stack)-1] + if ref.count() == 0 || ref.index >= ref.count() { + return nil, nil, 0 + } + + // Retrieve value from node. + if ref.node != nil { + inode := &ref.node.inodes[ref.index] + return inode.key, inode.value, inode.flags + } + + // Or retrieve value from page. + elem := ref.page.leafPageElement(uint16(ref.index)) + return elem.key(), elem.value(), elem.flags +} + +// node returns the node that the cursor is currently positioned on. +func (c *Cursor) node() *node { + _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") + + // If the top of the stack is a leaf node then just return it. + if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() { + return ref.node + } + + // Start from root and traverse down the hierarchy. + var n = c.stack[0].node + if n == nil { + n = c.bucket.node(c.stack[0].page.id, nil) + } + for _, ref := range c.stack[:len(c.stack)-1] { + _assert(!n.isLeaf, "expected branch node") + n = n.childAt(int(ref.index)) + } + _assert(n.isLeaf, "expected leaf node") + return n +} + +// elemRef represents a reference to an element on a given page/node. +type elemRef struct { + page *page + node *node + index int +} + +// isLeaf returns whether the ref is pointing at a leaf page/node. +func (r *elemRef) isLeaf() bool { + if r.node != nil { + return r.node.isLeaf + } + return (r.page.flags & leafPageFlag) != 0 +} + +// count returns the number of inodes or page elements. +func (r *elemRef) count() int { + if r.node != nil { + return len(r.node.inodes) + } + return int(r.page.count) +} diff --git a/vendor/github.com/boltdb/bolt/db.go b/vendor/github.com/boltdb/bolt/db.go new file mode 100644 index 0000000000..f352ff14fe --- /dev/null +++ b/vendor/github.com/boltdb/bolt/db.go @@ -0,0 +1,1039 @@ +package bolt + +import ( + "errors" + "fmt" + "hash/fnv" + "log" + "os" + "runtime" + "runtime/debug" + "strings" + "sync" + "time" + "unsafe" +) + +// The largest step that can be taken when remapping the mmap. +const maxMmapStep = 1 << 30 // 1GB + +// The data file format version. +const version = 2 + +// Represents a marker value to indicate that a file is a Bolt DB. +const magic uint32 = 0xED0CDAED + +// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when +// syncing changes to a file. This is required as some operating systems, +// such as OpenBSD, do not have a unified buffer cache (UBC) and writes +// must be synchronized using the msync(2) syscall. +const IgnoreNoSync = runtime.GOOS == "openbsd" + +// Default values if not set in a DB instance. +const ( + DefaultMaxBatchSize int = 1000 + DefaultMaxBatchDelay = 10 * time.Millisecond + DefaultAllocSize = 16 * 1024 * 1024 +) + +// default page size for db is set to the OS page size. +var defaultPageSize = os.Getpagesize() + +// DB represents a collection of buckets persisted to a file on disk. +// All data access is performed through transactions which can be obtained through the DB. +// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. +type DB struct { + // When enabled, the database will perform a Check() after every commit. + // A panic is issued if the database is in an inconsistent state. This + // flag has a large performance impact so it should only be used for + // debugging purposes. + StrictMode bool + + // Setting the NoSync flag will cause the database to skip fsync() + // calls after each commit. This can be useful when bulk loading data + // into a database and you can restart the bulk load in the event of + // a system failure or database corruption. Do not set this flag for + // normal use. + // + // If the package global IgnoreNoSync constant is true, this value is + // ignored. See the comment on that constant for more details. + // + // THIS IS UNSAFE. PLEASE USE WITH CAUTION. + NoSync bool + + // When true, skips the truncate call when growing the database. + // Setting this to true is only safe on non-ext3/ext4 systems. + // Skipping truncation avoids preallocation of hard drive space and + // bypasses a truncate() and fsync() syscall on remapping. + // + // https://github.com/boltdb/bolt/issues/284 + NoGrowSync bool + + // If you want to read the entire database fast, you can set MmapFlag to + // syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead. + MmapFlags int + + // MaxBatchSize is the maximum size of a batch. Default value is + // copied from DefaultMaxBatchSize in Open. + // + // If <=0, disables batching. + // + // Do not change concurrently with calls to Batch. + MaxBatchSize int + + // MaxBatchDelay is the maximum delay before a batch starts. + // Default value is copied from DefaultMaxBatchDelay in Open. + // + // If <=0, effectively disables batching. + // + // Do not change concurrently with calls to Batch. + MaxBatchDelay time.Duration + + // AllocSize is the amount of space allocated when the database + // needs to create new pages. This is done to amortize the cost + // of truncate() and fsync() when growing the data file. + AllocSize int + + path string + file *os.File + lockfile *os.File // windows only + dataref []byte // mmap'ed readonly, write throws SEGV + data *[maxMapSize]byte + datasz int + filesz int // current on disk file size + meta0 *meta + meta1 *meta + pageSize int + opened bool + rwtx *Tx + txs []*Tx + freelist *freelist + stats Stats + + pagePool sync.Pool + + batchMu sync.Mutex + batch *batch + + rwlock sync.Mutex // Allows only one writer at a time. + metalock sync.Mutex // Protects meta page access. + mmaplock sync.RWMutex // Protects mmap access during remapping. + statlock sync.RWMutex // Protects stats access. + + ops struct { + writeAt func(b []byte, off int64) (n int, err error) + } + + // Read only mode. + // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately. + readOnly bool +} + +// Path returns the path to currently open database file. +func (db *DB) Path() string { + return db.path +} + +// GoString returns the Go string representation of the database. +func (db *DB) GoString() string { + return fmt.Sprintf("bolt.DB{path:%q}", db.path) +} + +// String returns the string representation of the database. +func (db *DB) String() string { + return fmt.Sprintf("DB<%q>", db.path) +} + +// Open creates and opens a database at the given path. +// If the file does not exist then it will be created automatically. +// Passing in nil options will cause Bolt to open the database with the default options. +func Open(path string, mode os.FileMode, options *Options) (*DB, error) { + var db = &DB{opened: true} + + // Set default options if no options are provided. + if options == nil { + options = DefaultOptions + } + db.NoGrowSync = options.NoGrowSync + db.MmapFlags = options.MmapFlags + + // Set default values for later DB operations. + db.MaxBatchSize = DefaultMaxBatchSize + db.MaxBatchDelay = DefaultMaxBatchDelay + db.AllocSize = DefaultAllocSize + + flag := os.O_RDWR + if options.ReadOnly { + flag = os.O_RDONLY + db.readOnly = true + } + + // Open data file and separate sync handler for metadata writes. + db.path = path + var err error + if db.file, err = os.OpenFile(db.path, flag|os.O_CREATE, mode); err != nil { + _ = db.close() + return nil, err + } + + // Lock file so that other processes using Bolt in read-write mode cannot + // use the database at the same time. This would cause corruption since + // the two processes would write meta pages and free pages separately. + // The database file is locked exclusively (only one process can grab the lock) + // if !options.ReadOnly. + // The database file is locked using the shared lock (more than one process may + // hold a lock at the same time) otherwise (options.ReadOnly is set). + if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil { + _ = db.close() + return nil, err + } + + // Default values for test hooks + db.ops.writeAt = db.file.WriteAt + + // Initialize the database if it doesn't exist. + if info, err := db.file.Stat(); err != nil { + return nil, err + } else if info.Size() == 0 { + // Initialize new files with meta pages. + if err := db.init(); err != nil { + return nil, err + } + } else { + // Read the first meta page to determine the page size. + var buf [0x1000]byte + if _, err := db.file.ReadAt(buf[:], 0); err == nil { + m := db.pageInBuffer(buf[:], 0).meta() + if err := m.validate(); err != nil { + // If we can't read the page size, we can assume it's the same + // as the OS -- since that's how the page size was chosen in the + // first place. + // + // If the first page is invalid and this OS uses a different + // page size than what the database was created with then we + // are out of luck and cannot access the database. + db.pageSize = os.Getpagesize() + } else { + db.pageSize = int(m.pageSize) + } + } + } + + // Initialize page pool. + db.pagePool = sync.Pool{ + New: func() interface{} { + return make([]byte, db.pageSize) + }, + } + + // Memory map the data file. + if err := db.mmap(options.InitialMmapSize); err != nil { + _ = db.close() + return nil, err + } + + // Read in the freelist. + db.freelist = newFreelist() + db.freelist.read(db.page(db.meta().freelist)) + + // Mark the database as opened and return. + return db, nil +} + +// mmap opens the underlying memory-mapped file and initializes the meta references. +// minsz is the minimum size that the new mmap can be. +func (db *DB) mmap(minsz int) error { + db.mmaplock.Lock() + defer db.mmaplock.Unlock() + + info, err := db.file.Stat() + if err != nil { + return fmt.Errorf("mmap stat error: %s", err) + } else if int(info.Size()) < db.pageSize*2 { + return fmt.Errorf("file size too small") + } + + // Ensure the size is at least the minimum size. + var size = int(info.Size()) + if size < minsz { + size = minsz + } + size, err = db.mmapSize(size) + if err != nil { + return err + } + + // Dereference all mmap references before unmapping. + if db.rwtx != nil { + db.rwtx.root.dereference() + } + + // Unmap existing data before continuing. + if err := db.munmap(); err != nil { + return err + } + + // Memory-map the data file as a byte slice. + if err := mmap(db, size); err != nil { + return err + } + + // Save references to the meta pages. + db.meta0 = db.page(0).meta() + db.meta1 = db.page(1).meta() + + // Validate the meta pages. We only return an error if both meta pages fail + // validation, since meta0 failing validation means that it wasn't saved + // properly -- but we can recover using meta1. And vice-versa. + err0 := db.meta0.validate() + err1 := db.meta1.validate() + if err0 != nil && err1 != nil { + return err0 + } + + return nil +} + +// munmap unmaps the data file from memory. +func (db *DB) munmap() error { + if err := munmap(db); err != nil { + return fmt.Errorf("unmap error: " + err.Error()) + } + return nil +} + +// mmapSize determines the appropriate size for the mmap given the current size +// of the database. The minimum size is 32KB and doubles until it reaches 1GB. +// Returns an error if the new mmap size is greater than the max allowed. +func (db *DB) mmapSize(size int) (int, error) { + // Double the size from 32KB until 1GB. + for i := uint(15); i <= 30; i++ { + if size <= 1< maxMapSize { + return 0, fmt.Errorf("mmap too large") + } + + // If larger than 1GB then grow by 1GB at a time. + sz := int64(size) + if remainder := sz % int64(maxMmapStep); remainder > 0 { + sz += int64(maxMmapStep) - remainder + } + + // Ensure that the mmap size is a multiple of the page size. + // This should always be true since we're incrementing in MBs. + pageSize := int64(db.pageSize) + if (sz % pageSize) != 0 { + sz = ((sz / pageSize) + 1) * pageSize + } + + // If we've exceeded the max size then only grow up to the max size. + if sz > maxMapSize { + sz = maxMapSize + } + + return int(sz), nil +} + +// init creates a new database file and initializes its meta pages. +func (db *DB) init() error { + // Set the page size to the OS page size. + db.pageSize = os.Getpagesize() + + // Create two meta pages on a buffer. + buf := make([]byte, db.pageSize*4) + for i := 0; i < 2; i++ { + p := db.pageInBuffer(buf[:], pgid(i)) + p.id = pgid(i) + p.flags = metaPageFlag + + // Initialize the meta page. + m := p.meta() + m.magic = magic + m.version = version + m.pageSize = uint32(db.pageSize) + m.freelist = 2 + m.root = bucket{root: 3} + m.pgid = 4 + m.txid = txid(i) + m.checksum = m.sum64() + } + + // Write an empty freelist at page 3. + p := db.pageInBuffer(buf[:], pgid(2)) + p.id = pgid(2) + p.flags = freelistPageFlag + p.count = 0 + + // Write an empty leaf page at page 4. + p = db.pageInBuffer(buf[:], pgid(3)) + p.id = pgid(3) + p.flags = leafPageFlag + p.count = 0 + + // Write the buffer to our data file. + if _, err := db.ops.writeAt(buf, 0); err != nil { + return err + } + if err := fdatasync(db); err != nil { + return err + } + + return nil +} + +// Close releases all database resources. +// All transactions must be closed before closing the database. +func (db *DB) Close() error { + db.rwlock.Lock() + defer db.rwlock.Unlock() + + db.metalock.Lock() + defer db.metalock.Unlock() + + db.mmaplock.RLock() + defer db.mmaplock.RUnlock() + + return db.close() +} + +func (db *DB) close() error { + if !db.opened { + return nil + } + + db.opened = false + + db.freelist = nil + + // Clear ops. + db.ops.writeAt = nil + + // Close the mmap. + if err := db.munmap(); err != nil { + return err + } + + // Close file handles. + if db.file != nil { + // No need to unlock read-only file. + if !db.readOnly { + // Unlock the file. + if err := funlock(db); err != nil { + log.Printf("bolt.Close(): funlock error: %s", err) + } + } + + // Close the file descriptor. + if err := db.file.Close(); err != nil { + return fmt.Errorf("db file close: %s", err) + } + db.file = nil + } + + db.path = "" + return nil +} + +// Begin starts a new transaction. +// Multiple read-only transactions can be used concurrently but only one +// write transaction can be used at a time. Starting multiple write transactions +// will cause the calls to block and be serialized until the current write +// transaction finishes. +// +// Transactions should not be dependent on one another. Opening a read +// transaction and a write transaction in the same goroutine can cause the +// writer to deadlock because the database periodically needs to re-mmap itself +// as it grows and it cannot do that while a read transaction is open. +// +// If a long running read transaction (for example, a snapshot transaction) is +// needed, you might want to set DB.InitialMmapSize to a large enough value +// to avoid potential blocking of write transaction. +// +// IMPORTANT: You must close read-only transactions after you are finished or +// else the database will not reclaim old pages. +func (db *DB) Begin(writable bool) (*Tx, error) { + if writable { + return db.beginRWTx() + } + return db.beginTx() +} + +func (db *DB) beginTx() (*Tx, error) { + // Lock the meta pages while we initialize the transaction. We obtain + // the meta lock before the mmap lock because that's the order that the + // write transaction will obtain them. + db.metalock.Lock() + + // Obtain a read-only lock on the mmap. When the mmap is remapped it will + // obtain a write lock so all transactions must finish before it can be + // remapped. + db.mmaplock.RLock() + + // Exit if the database is not open yet. + if !db.opened { + db.mmaplock.RUnlock() + db.metalock.Unlock() + return nil, ErrDatabaseNotOpen + } + + // Create a transaction associated with the database. + t := &Tx{} + t.init(db) + + // Keep track of transaction until it closes. + db.txs = append(db.txs, t) + n := len(db.txs) + + // Unlock the meta pages. + db.metalock.Unlock() + + // Update the transaction stats. + db.statlock.Lock() + db.stats.TxN++ + db.stats.OpenTxN = n + db.statlock.Unlock() + + return t, nil +} + +func (db *DB) beginRWTx() (*Tx, error) { + // If the database was opened with Options.ReadOnly, return an error. + if db.readOnly { + return nil, ErrDatabaseReadOnly + } + + // Obtain writer lock. This is released by the transaction when it closes. + // This enforces only one writer transaction at a time. + db.rwlock.Lock() + + // Once we have the writer lock then we can lock the meta pages so that + // we can set up the transaction. + db.metalock.Lock() + defer db.metalock.Unlock() + + // Exit if the database is not open yet. + if !db.opened { + db.rwlock.Unlock() + return nil, ErrDatabaseNotOpen + } + + // Create a transaction associated with the database. + t := &Tx{writable: true} + t.init(db) + db.rwtx = t + + // Free any pages associated with closed read-only transactions. + var minid txid = 0xFFFFFFFFFFFFFFFF + for _, t := range db.txs { + if t.meta.txid < minid { + minid = t.meta.txid + } + } + if minid > 0 { + db.freelist.release(minid - 1) + } + + return t, nil +} + +// removeTx removes a transaction from the database. +func (db *DB) removeTx(tx *Tx) { + // Release the read lock on the mmap. + db.mmaplock.RUnlock() + + // Use the meta lock to restrict access to the DB object. + db.metalock.Lock() + + // Remove the transaction. + for i, t := range db.txs { + if t == tx { + last := len(db.txs) - 1 + db.txs[i] = db.txs[last] + db.txs[last] = nil + db.txs = db.txs[:last] + break + } + } + n := len(db.txs) + + // Unlock the meta pages. + db.metalock.Unlock() + + // Merge statistics. + db.statlock.Lock() + db.stats.OpenTxN = n + db.stats.TxStats.add(&tx.stats) + db.statlock.Unlock() +} + +// Update executes a function within the context of a read-write managed transaction. +// If no error is returned from the function then the transaction is committed. +// If an error is returned then the entire transaction is rolled back. +// Any error that is returned from the function or returned from the commit is +// returned from the Update() method. +// +// Attempting to manually commit or rollback within the function will cause a panic. +func (db *DB) Update(fn func(*Tx) error) error { + t, err := db.Begin(true) + if err != nil { + return err + } + + // Make sure the transaction rolls back in the event of a panic. + defer func() { + if t.db != nil { + t.rollback() + } + }() + + // Mark as a managed tx so that the inner function cannot manually commit. + t.managed = true + + // If an error is returned from the function then rollback and return error. + err = fn(t) + t.managed = false + if err != nil { + _ = t.Rollback() + return err + } + + return t.Commit() +} + +// View executes a function within the context of a managed read-only transaction. +// Any error that is returned from the function is returned from the View() method. +// +// Attempting to manually rollback within the function will cause a panic. +func (db *DB) View(fn func(*Tx) error) error { + t, err := db.Begin(false) + if err != nil { + return err + } + + // Make sure the transaction rolls back in the event of a panic. + defer func() { + if t.db != nil { + t.rollback() + } + }() + + // Mark as a managed tx so that the inner function cannot manually rollback. + t.managed = true + + // If an error is returned from the function then pass it through. + err = fn(t) + t.managed = false + if err != nil { + _ = t.Rollback() + return err + } + + if err := t.Rollback(); err != nil { + return err + } + + return nil +} + +// Batch calls fn as part of a batch. It behaves similar to Update, +// except: +// +// 1. concurrent Batch calls can be combined into a single Bolt +// transaction. +// +// 2. the function passed to Batch may be called multiple times, +// regardless of whether it returns error or not. +// +// This means that Batch function side effects must be idempotent and +// take permanent effect only after a successful return is seen in +// caller. +// +// The maximum batch size and delay can be adjusted with DB.MaxBatchSize +// and DB.MaxBatchDelay, respectively. +// +// Batch is only useful when there are multiple goroutines calling it. +func (db *DB) Batch(fn func(*Tx) error) error { + errCh := make(chan error, 1) + + db.batchMu.Lock() + if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) { + // There is no existing batch, or the existing batch is full; start a new one. + db.batch = &batch{ + db: db, + } + db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger) + } + db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh}) + if len(db.batch.calls) >= db.MaxBatchSize { + // wake up batch, it's ready to run + go db.batch.trigger() + } + db.batchMu.Unlock() + + err := <-errCh + if err == trySolo { + err = db.Update(fn) + } + return err +} + +type call struct { + fn func(*Tx) error + err chan<- error +} + +type batch struct { + db *DB + timer *time.Timer + start sync.Once + calls []call +} + +// trigger runs the batch if it hasn't already been run. +func (b *batch) trigger() { + b.start.Do(b.run) +} + +// run performs the transactions in the batch and communicates results +// back to DB.Batch. +func (b *batch) run() { + b.db.batchMu.Lock() + b.timer.Stop() + // Make sure no new work is added to this batch, but don't break + // other batches. + if b.db.batch == b { + b.db.batch = nil + } + b.db.batchMu.Unlock() + +retry: + for len(b.calls) > 0 { + var failIdx = -1 + err := b.db.Update(func(tx *Tx) error { + for i, c := range b.calls { + if err := safelyCall(c.fn, tx); err != nil { + failIdx = i + return err + } + } + return nil + }) + + if failIdx >= 0 { + // take the failing transaction out of the batch. it's + // safe to shorten b.calls here because db.batch no longer + // points to us, and we hold the mutex anyway. + c := b.calls[failIdx] + b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1] + // tell the submitter re-run it solo, continue with the rest of the batch + c.err <- trySolo + continue retry + } + + // pass success, or bolt internal errors, to all callers + for _, c := range b.calls { + if c.err != nil { + c.err <- err + } + } + break retry + } +} + +// trySolo is a special sentinel error value used for signaling that a +// transaction function should be re-run. It should never be seen by +// callers. +var trySolo = errors.New("batch function returned an error and should be re-run solo") + +type panicked struct { + reason interface{} +} + +func (p panicked) Error() string { + if err, ok := p.reason.(error); ok { + return err.Error() + } + return fmt.Sprintf("panic: %v", p.reason) +} + +func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { + defer func() { + if p := recover(); p != nil { + err = panicked{p} + } + }() + return fn(tx) +} + +// Sync executes fdatasync() against the database file handle. +// +// This is not necessary under normal operation, however, if you use NoSync +// then it allows you to force the database file to sync against the disk. +func (db *DB) Sync() error { return fdatasync(db) } + +// Stats retrieves ongoing performance stats for the database. +// This is only updated when a transaction closes. +func (db *DB) Stats() Stats { + db.statlock.RLock() + defer db.statlock.RUnlock() + return db.stats +} + +// This is for internal access to the raw data bytes from the C cursor, use +// carefully, or not at all. +func (db *DB) Info() *Info { + return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize} +} + +// page retrieves a page reference from the mmap based on the current page size. +func (db *DB) page(id pgid) *page { + pos := id * pgid(db.pageSize) + return (*page)(unsafe.Pointer(&db.data[pos])) +} + +// pageInBuffer retrieves a page reference from a given byte array based on the current page size. +func (db *DB) pageInBuffer(b []byte, id pgid) *page { + return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)])) +} + +// meta retrieves the current meta page reference. +func (db *DB) meta() *meta { + // We have to return the meta with the highest txid which doesn't fail + // validation. Otherwise, we can cause errors when in fact the database is + // in a consistent state. metaA is the one with the higher txid. + metaA := db.meta0 + metaB := db.meta1 + if db.meta1.txid > db.meta0.txid { + metaA = db.meta1 + metaB = db.meta0 + } + + // Use higher meta page if valid. Otherwise fallback to previous, if valid. + if err := metaA.validate(); err == nil { + return metaA + } else if err := metaB.validate(); err == nil { + return metaB + } + + // This should never be reached, because both meta1 and meta0 were validated + // on mmap() and we do fsync() on every write. + panic("bolt.DB.meta(): invalid meta pages") +} + +// allocate returns a contiguous block of memory starting at a given page. +func (db *DB) allocate(count int) (*page, error) { + // Allocate a temporary buffer for the page. + var buf []byte + if count == 1 { + buf = db.pagePool.Get().([]byte) + } else { + buf = make([]byte, count*db.pageSize) + } + p := (*page)(unsafe.Pointer(&buf[0])) + p.overflow = uint32(count - 1) + + // Use pages from the freelist if they are available. + if p.id = db.freelist.allocate(count); p.id != 0 { + return p, nil + } + + // Resize mmap() if we're at the end. + p.id = db.rwtx.meta.pgid + var minsz = int((p.id+pgid(count))+1) * db.pageSize + if minsz >= db.datasz { + if err := db.mmap(minsz); err != nil { + return nil, fmt.Errorf("mmap allocate error: %s", err) + } + } + + // Move the page id high water mark. + db.rwtx.meta.pgid += pgid(count) + + return p, nil +} + +// grow grows the size of the database to the given sz. +func (db *DB) grow(sz int) error { + // Ignore if the new size is less than available file size. + if sz <= db.filesz { + return nil + } + + // If the data is smaller than the alloc size then only allocate what's needed. + // Once it goes over the allocation size then allocate in chunks. + if db.datasz < db.AllocSize { + sz = db.datasz + } else { + sz += db.AllocSize + } + + // Truncate and fsync to ensure file size metadata is flushed. + // https://github.com/boltdb/bolt/issues/284 + if !db.NoGrowSync && !db.readOnly { + if runtime.GOOS != "windows" { + if err := db.file.Truncate(int64(sz)); err != nil { + return fmt.Errorf("file resize error: %s", err) + } + } + if err := db.file.Sync(); err != nil { + return fmt.Errorf("file sync error: %s", err) + } + } + + db.filesz = sz + return nil +} + +func (db *DB) IsReadOnly() bool { + return db.readOnly +} + +// Options represents the options that can be set when opening a database. +type Options struct { + // Timeout is the amount of time to wait to obtain a file lock. + // When set to zero it will wait indefinitely. This option is only + // available on Darwin and Linux. + Timeout time.Duration + + // Sets the DB.NoGrowSync flag before memory mapping the file. + NoGrowSync bool + + // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to + // grab a shared lock (UNIX). + ReadOnly bool + + // Sets the DB.MmapFlags flag before memory mapping the file. + MmapFlags int + + // InitialMmapSize is the initial mmap size of the database + // in bytes. Read transactions won't block write transaction + // if the InitialMmapSize is large enough to hold database mmap + // size. (See DB.Begin for more information) + // + // If <=0, the initial map size is 0. + // If initialMmapSize is smaller than the previous database size, + // it takes no effect. + InitialMmapSize int +} + +// DefaultOptions represent the options used if nil options are passed into Open(). +// No timeout is used which will cause Bolt to wait indefinitely for a lock. +var DefaultOptions = &Options{ + Timeout: 0, + NoGrowSync: false, +} + +// Stats represents statistics about the database. +type Stats struct { + // Freelist stats + FreePageN int // total number of free pages on the freelist + PendingPageN int // total number of pending pages on the freelist + FreeAlloc int // total bytes allocated in free pages + FreelistInuse int // total bytes used by the freelist + + // Transaction stats + TxN int // total number of started read transactions + OpenTxN int // number of currently open read transactions + + TxStats TxStats // global, ongoing stats. +} + +// Sub calculates and returns the difference between two sets of database stats. +// This is useful when obtaining stats at two different points and time and +// you need the performance counters that occurred within that time span. +func (s *Stats) Sub(other *Stats) Stats { + if other == nil { + return *s + } + var diff Stats + diff.FreePageN = s.FreePageN + diff.PendingPageN = s.PendingPageN + diff.FreeAlloc = s.FreeAlloc + diff.FreelistInuse = s.FreelistInuse + diff.TxN = s.TxN - other.TxN + diff.TxStats = s.TxStats.Sub(&other.TxStats) + return diff +} + +func (s *Stats) add(other *Stats) { + s.TxStats.add(&other.TxStats) +} + +type Info struct { + Data uintptr + PageSize int +} + +type meta struct { + magic uint32 + version uint32 + pageSize uint32 + flags uint32 + root bucket + freelist pgid + pgid pgid + txid txid + checksum uint64 +} + +// validate checks the marker bytes and version of the meta page to ensure it matches this binary. +func (m *meta) validate() error { + if m.magic != magic { + return ErrInvalid + } else if m.version != version { + return ErrVersionMismatch + } else if m.checksum != 0 && m.checksum != m.sum64() { + return ErrChecksum + } + return nil +} + +// copy copies one meta object to another. +func (m *meta) copy(dest *meta) { + *dest = *m +} + +// write writes the meta onto a page. +func (m *meta) write(p *page) { + if m.root.root >= m.pgid { + panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) + } else if m.freelist >= m.pgid { + panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) + } + + // Page id is either going to be 0 or 1 which we can determine by the transaction ID. + p.id = pgid(m.txid % 2) + p.flags |= metaPageFlag + + // Calculate the checksum. + m.checksum = m.sum64() + + m.copy(p.meta()) +} + +// generates the checksum for the meta. +func (m *meta) sum64() uint64 { + var h = fnv.New64a() + _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) + return h.Sum64() +} + +// _assert will panic with a given formatted message if the given condition is false. +func _assert(condition bool, msg string, v ...interface{}) { + if !condition { + panic(fmt.Sprintf("assertion failed: "+msg, v...)) + } +} + +func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } +func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) } + +func printstack() { + stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n") + fmt.Fprintln(os.Stderr, stack) +} diff --git a/vendor/github.com/boltdb/bolt/doc.go b/vendor/github.com/boltdb/bolt/doc.go new file mode 100644 index 0000000000..cc937845db --- /dev/null +++ b/vendor/github.com/boltdb/bolt/doc.go @@ -0,0 +1,44 @@ +/* +Package bolt implements a low-level key/value store in pure Go. It supports +fully serializable transactions, ACID semantics, and lock-free MVCC with +multiple readers and a single writer. Bolt can be used for projects that +want a simple data store without the need to add large dependencies such as +Postgres or MySQL. + +Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is +optimized for fast read access and does not require recovery in the event of a +system crash. Transactions which have not finished committing will simply be +rolled back in the event of a crash. + +The design of Bolt is based on Howard Chu's LMDB database project. + +Bolt currently works on Windows, Mac OS X, and Linux. + + +Basics + +There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is +a collection of buckets and is represented by a single file on disk. A bucket is +a collection of unique keys that are associated with values. + +Transactions provide either read-only or read-write access to the database. +Read-only transactions can retrieve key/value pairs and can use Cursors to +iterate over the dataset sequentially. Read-write transactions can create and +delete buckets and can insert and remove keys. Only one read-write transaction +is allowed at a time. + + +Caveats + +The database uses a read-only, memory-mapped data file to ensure that +applications cannot corrupt the database, however, this means that keys and +values returned from Bolt cannot be changed. Writing to a read-only byte slice +will cause Go to panic. + +Keys and values retrieved from the database are only valid for the life of +the transaction. When used outside the transaction, these byte slices can +point to different data or can point to invalid memory which will cause a panic. + + +*/ +package bolt diff --git a/vendor/github.com/boltdb/bolt/errors.go b/vendor/github.com/boltdb/bolt/errors.go new file mode 100644 index 0000000000..a3620a3ebb --- /dev/null +++ b/vendor/github.com/boltdb/bolt/errors.go @@ -0,0 +1,71 @@ +package bolt + +import "errors" + +// These errors can be returned when opening or calling methods on a DB. +var ( + // ErrDatabaseNotOpen is returned when a DB instance is accessed before it + // is opened or after it is closed. + ErrDatabaseNotOpen = errors.New("database not open") + + // ErrDatabaseOpen is returned when opening a database that is + // already open. + ErrDatabaseOpen = errors.New("database already open") + + // ErrInvalid is returned when both meta pages on a database are invalid. + // This typically occurs when a file is not a bolt database. + ErrInvalid = errors.New("invalid database") + + // ErrVersionMismatch is returned when the data file was created with a + // different version of Bolt. + ErrVersionMismatch = errors.New("version mismatch") + + // ErrChecksum is returned when either meta page checksum does not match. + ErrChecksum = errors.New("checksum error") + + // ErrTimeout is returned when a database cannot obtain an exclusive lock + // on the data file after the timeout passed to Open(). + ErrTimeout = errors.New("timeout") +) + +// These errors can occur when beginning or committing a Tx. +var ( + // ErrTxNotWritable is returned when performing a write operation on a + // read-only transaction. + ErrTxNotWritable = errors.New("tx not writable") + + // ErrTxClosed is returned when committing or rolling back a transaction + // that has already been committed or rolled back. + ErrTxClosed = errors.New("tx closed") + + // ErrDatabaseReadOnly is returned when a mutating transaction is started on a + // read-only database. + ErrDatabaseReadOnly = errors.New("database is in read-only mode") +) + +// These errors can occur when putting or deleting a value or a bucket. +var ( + // ErrBucketNotFound is returned when trying to access a bucket that has + // not been created yet. + ErrBucketNotFound = errors.New("bucket not found") + + // ErrBucketExists is returned when creating a bucket that already exists. + ErrBucketExists = errors.New("bucket already exists") + + // ErrBucketNameRequired is returned when creating a bucket with a blank name. + ErrBucketNameRequired = errors.New("bucket name required") + + // ErrKeyRequired is returned when inserting a zero-length key. + ErrKeyRequired = errors.New("key required") + + // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. + ErrKeyTooLarge = errors.New("key too large") + + // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. + ErrValueTooLarge = errors.New("value too large") + + // ErrIncompatibleValue is returned when trying create or delete a bucket + // on an existing non-bucket key or when trying to create or delete a + // non-bucket key on an existing bucket key. + ErrIncompatibleValue = errors.New("incompatible value") +) diff --git a/vendor/github.com/boltdb/bolt/freelist.go b/vendor/github.com/boltdb/bolt/freelist.go new file mode 100644 index 0000000000..aba48f58c6 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/freelist.go @@ -0,0 +1,252 @@ +package bolt + +import ( + "fmt" + "sort" + "unsafe" +) + +// freelist represents a list of all pages that are available for allocation. +// It also tracks pages that have been freed but are still in use by open transactions. +type freelist struct { + ids []pgid // all free and available free page ids. + pending map[txid][]pgid // mapping of soon-to-be free page ids by tx. + cache map[pgid]bool // fast lookup of all free and pending page ids. +} + +// newFreelist returns an empty, initialized freelist. +func newFreelist() *freelist { + return &freelist{ + pending: make(map[txid][]pgid), + cache: make(map[pgid]bool), + } +} + +// size returns the size of the page after serialization. +func (f *freelist) size() int { + n := f.count() + if n >= 0xFFFF { + // The first element will be used to store the count. See freelist.write. + n++ + } + return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n) +} + +// count returns count of pages on the freelist +func (f *freelist) count() int { + return f.free_count() + f.pending_count() +} + +// free_count returns count of free pages +func (f *freelist) free_count() int { + return len(f.ids) +} + +// pending_count returns count of pending pages +func (f *freelist) pending_count() int { + var count int + for _, list := range f.pending { + count += len(list) + } + return count +} + +// copyall copies into dst a list of all free ids and all pending ids in one sorted list. +// f.count returns the minimum length required for dst. +func (f *freelist) copyall(dst []pgid) { + m := make(pgids, 0, f.pending_count()) + for _, list := range f.pending { + m = append(m, list...) + } + sort.Sort(m) + mergepgids(dst, f.ids, m) +} + +// allocate returns the starting page id of a contiguous list of pages of a given size. +// If a contiguous block cannot be found then 0 is returned. +func (f *freelist) allocate(n int) pgid { + if len(f.ids) == 0 { + return 0 + } + + var initial, previd pgid + for i, id := range f.ids { + if id <= 1 { + panic(fmt.Sprintf("invalid page allocation: %d", id)) + } + + // Reset initial page if this is not contiguous. + if previd == 0 || id-previd != 1 { + initial = id + } + + // If we found a contiguous block then remove it and return it. + if (id-initial)+1 == pgid(n) { + // If we're allocating off the beginning then take the fast path + // and just adjust the existing slice. This will use extra memory + // temporarily but the append() in free() will realloc the slice + // as is necessary. + if (i + 1) == n { + f.ids = f.ids[i+1:] + } else { + copy(f.ids[i-n+1:], f.ids[i+1:]) + f.ids = f.ids[:len(f.ids)-n] + } + + // Remove from the free cache. + for i := pgid(0); i < pgid(n); i++ { + delete(f.cache, initial+i) + } + + return initial + } + + previd = id + } + return 0 +} + +// free releases a page and its overflow for a given transaction id. +// If the page is already free then a panic will occur. +func (f *freelist) free(txid txid, p *page) { + if p.id <= 1 { + panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id)) + } + + // Free page and all its overflow pages. + var ids = f.pending[txid] + for id := p.id; id <= p.id+pgid(p.overflow); id++ { + // Verify that page is not already free. + if f.cache[id] { + panic(fmt.Sprintf("page %d already freed", id)) + } + + // Add to the freelist and cache. + ids = append(ids, id) + f.cache[id] = true + } + f.pending[txid] = ids +} + +// release moves all page ids for a transaction id (or older) to the freelist. +func (f *freelist) release(txid txid) { + m := make(pgids, 0) + for tid, ids := range f.pending { + if tid <= txid { + // Move transaction's pending pages to the available freelist. + // Don't remove from the cache since the page is still free. + m = append(m, ids...) + delete(f.pending, tid) + } + } + sort.Sort(m) + f.ids = pgids(f.ids).merge(m) +} + +// rollback removes the pages from a given pending tx. +func (f *freelist) rollback(txid txid) { + // Remove page ids from cache. + for _, id := range f.pending[txid] { + delete(f.cache, id) + } + + // Remove pages from pending list. + delete(f.pending, txid) +} + +// freed returns whether a given page is in the free list. +func (f *freelist) freed(pgid pgid) bool { + return f.cache[pgid] +} + +// read initializes the freelist from a freelist page. +func (f *freelist) read(p *page) { + // If the page.count is at the max uint16 value (64k) then it's considered + // an overflow and the size of the freelist is stored as the first element. + idx, count := 0, int(p.count) + if count == 0xFFFF { + idx = 1 + count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0]) + } + + // Copy the list of page ids from the freelist. + if count == 0 { + f.ids = nil + } else { + ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count] + f.ids = make([]pgid, len(ids)) + copy(f.ids, ids) + + // Make sure they're sorted. + sort.Sort(pgids(f.ids)) + } + + // Rebuild the page cache. + f.reindex() +} + +// write writes the page ids onto a freelist page. All free and pending ids are +// saved to disk since in the event of a program crash, all pending ids will +// become free. +func (f *freelist) write(p *page) error { + // Combine the old free pgids and pgids waiting on an open transaction. + + // Update the header flag. + p.flags |= freelistPageFlag + + // The page.count can only hold up to 64k elements so if we overflow that + // number then we handle it by putting the size in the first element. + lenids := f.count() + if lenids == 0 { + p.count = uint16(lenids) + } else if lenids < 0xFFFF { + p.count = uint16(lenids) + f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:]) + } else { + p.count = 0xFFFF + ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids) + f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:]) + } + + return nil +} + +// reload reads the freelist from a page and filters out pending items. +func (f *freelist) reload(p *page) { + f.read(p) + + // Build a cache of only pending pages. + pcache := make(map[pgid]bool) + for _, pendingIDs := range f.pending { + for _, pendingID := range pendingIDs { + pcache[pendingID] = true + } + } + + // Check each page in the freelist and build a new available freelist + // with any pages not in the pending lists. + var a []pgid + for _, id := range f.ids { + if !pcache[id] { + a = append(a, id) + } + } + f.ids = a + + // Once the available list is rebuilt then rebuild the free cache so that + // it includes the available and pending free pages. + f.reindex() +} + +// reindex rebuilds the free cache based on available and pending free lists. +func (f *freelist) reindex() { + f.cache = make(map[pgid]bool, len(f.ids)) + for _, id := range f.ids { + f.cache[id] = true + } + for _, pendingIDs := range f.pending { + for _, pendingID := range pendingIDs { + f.cache[pendingID] = true + } + } +} diff --git a/vendor/github.com/boltdb/bolt/node.go b/vendor/github.com/boltdb/bolt/node.go new file mode 100644 index 0000000000..159318b229 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/node.go @@ -0,0 +1,604 @@ +package bolt + +import ( + "bytes" + "fmt" + "sort" + "unsafe" +) + +// node represents an in-memory, deserialized page. +type node struct { + bucket *Bucket + isLeaf bool + unbalanced bool + spilled bool + key []byte + pgid pgid + parent *node + children nodes + inodes inodes +} + +// root returns the top-level node this node is attached to. +func (n *node) root() *node { + if n.parent == nil { + return n + } + return n.parent.root() +} + +// minKeys returns the minimum number of inodes this node should have. +func (n *node) minKeys() int { + if n.isLeaf { + return 1 + } + return 2 +} + +// size returns the size of the node after serialization. +func (n *node) size() int { + sz, elsz := pageHeaderSize, n.pageElementSize() + for i := 0; i < len(n.inodes); i++ { + item := &n.inodes[i] + sz += elsz + len(item.key) + len(item.value) + } + return sz +} + +// sizeLessThan returns true if the node is less than a given size. +// This is an optimization to avoid calculating a large node when we only need +// to know if it fits inside a certain page size. +func (n *node) sizeLessThan(v int) bool { + sz, elsz := pageHeaderSize, n.pageElementSize() + for i := 0; i < len(n.inodes); i++ { + item := &n.inodes[i] + sz += elsz + len(item.key) + len(item.value) + if sz >= v { + return false + } + } + return true +} + +// pageElementSize returns the size of each page element based on the type of node. +func (n *node) pageElementSize() int { + if n.isLeaf { + return leafPageElementSize + } + return branchPageElementSize +} + +// childAt returns the child node at a given index. +func (n *node) childAt(index int) *node { + if n.isLeaf { + panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index)) + } + return n.bucket.node(n.inodes[index].pgid, n) +} + +// childIndex returns the index of a given child node. +func (n *node) childIndex(child *node) int { + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 }) + return index +} + +// numChildren returns the number of children. +func (n *node) numChildren() int { + return len(n.inodes) +} + +// nextSibling returns the next node with the same parent. +func (n *node) nextSibling() *node { + if n.parent == nil { + return nil + } + index := n.parent.childIndex(n) + if index >= n.parent.numChildren()-1 { + return nil + } + return n.parent.childAt(index + 1) +} + +// prevSibling returns the previous node with the same parent. +func (n *node) prevSibling() *node { + if n.parent == nil { + return nil + } + index := n.parent.childIndex(n) + if index == 0 { + return nil + } + return n.parent.childAt(index - 1) +} + +// put inserts a key/value. +func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) { + if pgid >= n.bucket.tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid)) + } else if len(oldKey) <= 0 { + panic("put: zero-length old key") + } else if len(newKey) <= 0 { + panic("put: zero-length new key") + } + + // Find insertion index. + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 }) + + // Add capacity and shift nodes if we don't have an exact match and need to insert. + exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey)) + if !exact { + n.inodes = append(n.inodes, inode{}) + copy(n.inodes[index+1:], n.inodes[index:]) + } + + inode := &n.inodes[index] + inode.flags = flags + inode.key = newKey + inode.value = value + inode.pgid = pgid + _assert(len(inode.key) > 0, "put: zero-length inode key") +} + +// del removes a key from the node. +func (n *node) del(key []byte) { + // Find index of key. + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 }) + + // Exit if the key isn't found. + if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) { + return + } + + // Delete inode from the node. + n.inodes = append(n.inodes[:index], n.inodes[index+1:]...) + + // Mark the node as needing rebalancing. + n.unbalanced = true +} + +// read initializes the node from a page. +func (n *node) read(p *page) { + n.pgid = p.id + n.isLeaf = ((p.flags & leafPageFlag) != 0) + n.inodes = make(inodes, int(p.count)) + + for i := 0; i < int(p.count); i++ { + inode := &n.inodes[i] + if n.isLeaf { + elem := p.leafPageElement(uint16(i)) + inode.flags = elem.flags + inode.key = elem.key() + inode.value = elem.value() + } else { + elem := p.branchPageElement(uint16(i)) + inode.pgid = elem.pgid + inode.key = elem.key() + } + _assert(len(inode.key) > 0, "read: zero-length inode key") + } + + // Save first key so we can find the node in the parent when we spill. + if len(n.inodes) > 0 { + n.key = n.inodes[0].key + _assert(len(n.key) > 0, "read: zero-length node key") + } else { + n.key = nil + } +} + +// write writes the items onto one or more pages. +func (n *node) write(p *page) { + // Initialize page. + if n.isLeaf { + p.flags |= leafPageFlag + } else { + p.flags |= branchPageFlag + } + + if len(n.inodes) >= 0xFFFF { + panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id)) + } + p.count = uint16(len(n.inodes)) + + // Stop here if there are no items to write. + if p.count == 0 { + return + } + + // Loop over each item and write it to the page. + b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):] + for i, item := range n.inodes { + _assert(len(item.key) > 0, "write: zero-length inode key") + + // Write the page element. + if n.isLeaf { + elem := p.leafPageElement(uint16(i)) + elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.flags = item.flags + elem.ksize = uint32(len(item.key)) + elem.vsize = uint32(len(item.value)) + } else { + elem := p.branchPageElement(uint16(i)) + elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) + elem.ksize = uint32(len(item.key)) + elem.pgid = item.pgid + _assert(elem.pgid != p.id, "write: circular dependency occurred") + } + + // If the length of key+value is larger than the max allocation size + // then we need to reallocate the byte array pointer. + // + // See: https://github.com/boltdb/bolt/pull/335 + klen, vlen := len(item.key), len(item.value) + if len(b) < klen+vlen { + b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:] + } + + // Write data for the element to the end of the page. + copy(b[0:], item.key) + b = b[klen:] + copy(b[0:], item.value) + b = b[vlen:] + } + + // DEBUG ONLY: n.dump() +} + +// split breaks up a node into multiple smaller nodes, if appropriate. +// This should only be called from the spill() function. +func (n *node) split(pageSize int) []*node { + var nodes []*node + + node := n + for { + // Split node into two. + a, b := node.splitTwo(pageSize) + nodes = append(nodes, a) + + // If we can't split then exit the loop. + if b == nil { + break + } + + // Set node to b so it gets split on the next iteration. + node = b + } + + return nodes +} + +// splitTwo breaks up a node into two smaller nodes, if appropriate. +// This should only be called from the split() function. +func (n *node) splitTwo(pageSize int) (*node, *node) { + // Ignore the split if the page doesn't have at least enough nodes for + // two pages or if the nodes can fit in a single page. + if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { + return n, nil + } + + // Determine the threshold before starting a new node. + var fillPercent = n.bucket.FillPercent + if fillPercent < minFillPercent { + fillPercent = minFillPercent + } else if fillPercent > maxFillPercent { + fillPercent = maxFillPercent + } + threshold := int(float64(pageSize) * fillPercent) + + // Determine split position and sizes of the two pages. + splitIndex, _ := n.splitIndex(threshold) + + // Split node into two separate nodes. + // If there's no parent then we'll need to create one. + if n.parent == nil { + n.parent = &node{bucket: n.bucket, children: []*node{n}} + } + + // Create a new node and add it to the parent. + next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent} + n.parent.children = append(n.parent.children, next) + + // Split inodes across two nodes. + next.inodes = n.inodes[splitIndex:] + n.inodes = n.inodes[:splitIndex] + + // Update the statistics. + n.bucket.tx.stats.Split++ + + return n, next +} + +// splitIndex finds the position where a page will fill a given threshold. +// It returns the index as well as the size of the first page. +// This is only be called from split(). +func (n *node) splitIndex(threshold int) (index, sz int) { + sz = pageHeaderSize + + // Loop until we only have the minimum number of keys required for the second page. + for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { + index = i + inode := n.inodes[i] + elsize := n.pageElementSize() + len(inode.key) + len(inode.value) + + // If we have at least the minimum number of keys and adding another + // node would put us over the threshold then exit and return. + if i >= minKeysPerPage && sz+elsize > threshold { + break + } + + // Add the element size to the total size. + sz += elsize + } + + return +} + +// spill writes the nodes to dirty pages and splits nodes as it goes. +// Returns an error if dirty pages cannot be allocated. +func (n *node) spill() error { + var tx = n.bucket.tx + if n.spilled { + return nil + } + + // Spill child nodes first. Child nodes can materialize sibling nodes in + // the case of split-merge so we cannot use a range loop. We have to check + // the children size on every loop iteration. + sort.Sort(n.children) + for i := 0; i < len(n.children); i++ { + if err := n.children[i].spill(); err != nil { + return err + } + } + + // We no longer need the child list because it's only used for spill tracking. + n.children = nil + + // Split nodes into appropriate sizes. The first node will always be n. + var nodes = n.split(tx.db.pageSize) + for _, node := range nodes { + // Add node's page to the freelist if it's not new. + if node.pgid > 0 { + tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid)) + node.pgid = 0 + } + + // Allocate contiguous space for the node. + p, err := tx.allocate((node.size() / tx.db.pageSize) + 1) + if err != nil { + return err + } + + // Write the node. + if p.id >= tx.meta.pgid { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)) + } + node.pgid = p.id + node.write(p) + node.spilled = true + + // Insert into parent inodes. + if node.parent != nil { + var key = node.key + if key == nil { + key = node.inodes[0].key + } + + node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0) + node.key = node.inodes[0].key + _assert(len(node.key) > 0, "spill: zero-length node key") + } + + // Update the statistics. + tx.stats.Spill++ + } + + // If the root node split and created a new root then we need to spill that + // as well. We'll clear out the children to make sure it doesn't try to respill. + if n.parent != nil && n.parent.pgid == 0 { + n.children = nil + return n.parent.spill() + } + + return nil +} + +// rebalance attempts to combine the node with sibling nodes if the node fill +// size is below a threshold or if there are not enough keys. +func (n *node) rebalance() { + if !n.unbalanced { + return + } + n.unbalanced = false + + // Update statistics. + n.bucket.tx.stats.Rebalance++ + + // Ignore if node is above threshold (25%) and has enough keys. + var threshold = n.bucket.tx.db.pageSize / 4 + if n.size() > threshold && len(n.inodes) > n.minKeys() { + return + } + + // Root node has special handling. + if n.parent == nil { + // If root node is a branch and only has one node then collapse it. + if !n.isLeaf && len(n.inodes) == 1 { + // Move root's child up. + child := n.bucket.node(n.inodes[0].pgid, n) + n.isLeaf = child.isLeaf + n.inodes = child.inodes[:] + n.children = child.children + + // Reparent all child nodes being moved. + for _, inode := range n.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent = n + } + } + + // Remove old child. + child.parent = nil + delete(n.bucket.nodes, child.pgid) + child.free() + } + + return + } + + // If node has no keys then just remove it. + if n.numChildren() == 0 { + n.parent.del(n.key) + n.parent.removeChild(n) + delete(n.bucket.nodes, n.pgid) + n.free() + n.parent.rebalance() + return + } + + _assert(n.parent.numChildren() > 1, "parent must have at least 2 children") + + // Destination node is right sibling if idx == 0, otherwise left sibling. + var target *node + var useNextSibling = (n.parent.childIndex(n) == 0) + if useNextSibling { + target = n.nextSibling() + } else { + target = n.prevSibling() + } + + // If both this node and the target node are too small then merge them. + if useNextSibling { + // Reparent all child nodes being moved. + for _, inode := range target.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent.removeChild(child) + child.parent = n + child.parent.children = append(child.parent.children, child) + } + } + + // Copy over inodes from target and remove target. + n.inodes = append(n.inodes, target.inodes...) + n.parent.del(target.key) + n.parent.removeChild(target) + delete(n.bucket.nodes, target.pgid) + target.free() + } else { + // Reparent all child nodes being moved. + for _, inode := range n.inodes { + if child, ok := n.bucket.nodes[inode.pgid]; ok { + child.parent.removeChild(child) + child.parent = target + child.parent.children = append(child.parent.children, child) + } + } + + // Copy over inodes to target and remove node. + target.inodes = append(target.inodes, n.inodes...) + n.parent.del(n.key) + n.parent.removeChild(n) + delete(n.bucket.nodes, n.pgid) + n.free() + } + + // Either this node or the target node was deleted from the parent so rebalance it. + n.parent.rebalance() +} + +// removes a node from the list of in-memory children. +// This does not affect the inodes. +func (n *node) removeChild(target *node) { + for i, child := range n.children { + if child == target { + n.children = append(n.children[:i], n.children[i+1:]...) + return + } + } +} + +// dereference causes the node to copy all its inode key/value references to heap memory. +// This is required when the mmap is reallocated so inodes are not pointing to stale data. +func (n *node) dereference() { + if n.key != nil { + key := make([]byte, len(n.key)) + copy(key, n.key) + n.key = key + _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") + } + + for i := range n.inodes { + inode := &n.inodes[i] + + key := make([]byte, len(inode.key)) + copy(key, inode.key) + inode.key = key + _assert(len(inode.key) > 0, "dereference: zero-length inode key") + + value := make([]byte, len(inode.value)) + copy(value, inode.value) + inode.value = value + } + + // Recursively dereference children. + for _, child := range n.children { + child.dereference() + } + + // Update statistics. + n.bucket.tx.stats.NodeDeref++ +} + +// free adds the node's underlying page to the freelist. +func (n *node) free() { + if n.pgid != 0 { + n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid)) + n.pgid = 0 + } +} + +// dump writes the contents of the node to STDERR for debugging purposes. +/* +func (n *node) dump() { + // Write node header. + var typ = "branch" + if n.isLeaf { + typ = "leaf" + } + warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes)) + + // Write out abbreviated version of each item. + for _, item := range n.inodes { + if n.isLeaf { + if item.flags&bucketLeafFlag != 0 { + bucket := (*bucket)(unsafe.Pointer(&item.value[0])) + warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root) + } else { + warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4)) + } + } else { + warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid) + } + } + warn("") +} +*/ + +type nodes []*node + +func (s nodes) Len() int { return len(s) } +func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 } + +// inode represents an internal node inside of a node. +// It can be used to point to elements in a page or point +// to an element which hasn't been added to a page yet. +type inode struct { + flags uint32 + pgid pgid + key []byte + value []byte +} + +type inodes []inode diff --git a/vendor/github.com/boltdb/bolt/page.go b/vendor/github.com/boltdb/bolt/page.go new file mode 100644 index 0000000000..cde403ae86 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/page.go @@ -0,0 +1,197 @@ +package bolt + +import ( + "fmt" + "os" + "sort" + "unsafe" +) + +const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr)) + +const minKeysPerPage = 2 + +const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{})) +const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{})) + +const ( + branchPageFlag = 0x01 + leafPageFlag = 0x02 + metaPageFlag = 0x04 + freelistPageFlag = 0x10 +) + +const ( + bucketLeafFlag = 0x01 +) + +type pgid uint64 + +type page struct { + id pgid + flags uint16 + count uint16 + overflow uint32 + ptr uintptr +} + +// typ returns a human readable page type string used for debugging. +func (p *page) typ() string { + if (p.flags & branchPageFlag) != 0 { + return "branch" + } else if (p.flags & leafPageFlag) != 0 { + return "leaf" + } else if (p.flags & metaPageFlag) != 0 { + return "meta" + } else if (p.flags & freelistPageFlag) != 0 { + return "freelist" + } + return fmt.Sprintf("unknown<%02x>", p.flags) +} + +// meta returns a pointer to the metadata section of the page. +func (p *page) meta() *meta { + return (*meta)(unsafe.Pointer(&p.ptr)) +} + +// leafPageElement retrieves the leaf node by index +func (p *page) leafPageElement(index uint16) *leafPageElement { + n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index] + return n +} + +// leafPageElements retrieves a list of leaf nodes. +func (p *page) leafPageElements() []leafPageElement { + if p.count == 0 { + return nil + } + return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:] +} + +// branchPageElement retrieves the branch node by index +func (p *page) branchPageElement(index uint16) *branchPageElement { + return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index] +} + +// branchPageElements retrieves a list of branch nodes. +func (p *page) branchPageElements() []branchPageElement { + if p.count == 0 { + return nil + } + return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:] +} + +// dump writes n bytes of the page to STDERR as hex output. +func (p *page) hexdump(n int) { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n] + fmt.Fprintf(os.Stderr, "%x\n", buf) +} + +type pages []*page + +func (s pages) Len() int { return len(s) } +func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s pages) Less(i, j int) bool { return s[i].id < s[j].id } + +// branchPageElement represents a node on a branch page. +type branchPageElement struct { + pos uint32 + ksize uint32 + pgid pgid +} + +// key returns a byte slice of the node key. +func (n *branchPageElement) key() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize] +} + +// leafPageElement represents a node on a leaf page. +type leafPageElement struct { + flags uint32 + pos uint32 + ksize uint32 + vsize uint32 +} + +// key returns a byte slice of the node key. +func (n *leafPageElement) key() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize] +} + +// value returns a byte slice of the node value. +func (n *leafPageElement) value() []byte { + buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) + return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize] +} + +// PageInfo represents human readable information about a page. +type PageInfo struct { + ID int + Type string + Count int + OverflowCount int +} + +type pgids []pgid + +func (s pgids) Len() int { return len(s) } +func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s pgids) Less(i, j int) bool { return s[i] < s[j] } + +// merge returns the sorted union of a and b. +func (a pgids) merge(b pgids) pgids { + // Return the opposite slice if one is nil. + if len(a) == 0 { + return b + } + if len(b) == 0 { + return a + } + merged := make(pgids, len(a)+len(b)) + mergepgids(merged, a, b) + return merged +} + +// mergepgids copies the sorted union of a and b into dst. +// If dst is too small, it panics. +func mergepgids(dst, a, b pgids) { + if len(dst) < len(a)+len(b) { + panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b))) + } + // Copy in the opposite slice if one is nil. + if len(a) == 0 { + copy(dst, b) + return + } + if len(b) == 0 { + copy(dst, a) + return + } + + // Merged will hold all elements from both lists. + merged := dst[:0] + + // Assign lead to the slice with a lower starting value, follow to the higher value. + lead, follow := a, b + if b[0] < a[0] { + lead, follow = b, a + } + + // Continue while there are elements in the lead. + for len(lead) > 0 { + // Merge largest prefix of lead that is ahead of follow[0]. + n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) + merged = append(merged, lead[:n]...) + if n >= len(lead) { + break + } + + // Swap lead and follow. + lead, follow = follow, lead[n:] + } + + // Append what's left in follow. + _ = append(merged, follow...) +} diff --git a/vendor/github.com/boltdb/bolt/tx.go b/vendor/github.com/boltdb/bolt/tx.go new file mode 100644 index 0000000000..6700308a29 --- /dev/null +++ b/vendor/github.com/boltdb/bolt/tx.go @@ -0,0 +1,684 @@ +package bolt + +import ( + "fmt" + "io" + "os" + "sort" + "strings" + "time" + "unsafe" +) + +// txid represents the internal transaction identifier. +type txid uint64 + +// Tx represents a read-only or read/write transaction on the database. +// Read-only transactions can be used for retrieving values for keys and creating cursors. +// Read/write transactions can create and remove buckets and create and remove keys. +// +// IMPORTANT: You must commit or rollback transactions when you are done with +// them. Pages can not be reclaimed by the writer until no more transactions +// are using them. A long running read transaction can cause the database to +// quickly grow. +type Tx struct { + writable bool + managed bool + db *DB + meta *meta + root Bucket + pages map[pgid]*page + stats TxStats + commitHandlers []func() + + // WriteFlag specifies the flag for write-related methods like WriteTo(). + // Tx opens the database file with the specified flag to copy the data. + // + // By default, the flag is unset, which works well for mostly in-memory + // workloads. For databases that are much larger than available RAM, + // set the flag to syscall.O_DIRECT to avoid trashing the page cache. + WriteFlag int +} + +// init initializes the transaction. +func (tx *Tx) init(db *DB) { + tx.db = db + tx.pages = nil + + // Copy the meta page since it can be changed by the writer. + tx.meta = &meta{} + db.meta().copy(tx.meta) + + // Copy over the root bucket. + tx.root = newBucket(tx) + tx.root.bucket = &bucket{} + *tx.root.bucket = tx.meta.root + + // Increment the transaction id and add a page cache for writable transactions. + if tx.writable { + tx.pages = make(map[pgid]*page) + tx.meta.txid += txid(1) + } +} + +// ID returns the transaction id. +func (tx *Tx) ID() int { + return int(tx.meta.txid) +} + +// DB returns a reference to the database that created the transaction. +func (tx *Tx) DB() *DB { + return tx.db +} + +// Size returns current database size in bytes as seen by this transaction. +func (tx *Tx) Size() int64 { + return int64(tx.meta.pgid) * int64(tx.db.pageSize) +} + +// Writable returns whether the transaction can perform write operations. +func (tx *Tx) Writable() bool { + return tx.writable +} + +// Cursor creates a cursor associated with the root bucket. +// All items in the cursor will return a nil value because all root bucket keys point to buckets. +// The cursor is only valid as long as the transaction is open. +// Do not use a cursor after the transaction is closed. +func (tx *Tx) Cursor() *Cursor { + return tx.root.Cursor() +} + +// Stats retrieves a copy of the current transaction statistics. +func (tx *Tx) Stats() TxStats { + return tx.stats +} + +// Bucket retrieves a bucket by name. +// Returns nil if the bucket does not exist. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) Bucket(name []byte) *Bucket { + return tx.root.Bucket(name) +} + +// CreateBucket creates a new bucket. +// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) { + return tx.root.CreateBucket(name) +} + +// CreateBucketIfNotExists creates a new bucket if it doesn't already exist. +// Returns an error if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. +func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) { + return tx.root.CreateBucketIfNotExists(name) +} + +// DeleteBucket deletes a bucket. +// Returns an error if the bucket cannot be found or if the key represents a non-bucket value. +func (tx *Tx) DeleteBucket(name []byte) error { + return tx.root.DeleteBucket(name) +} + +// ForEach executes a function for each bucket in the root. +// If the provided function returns an error then the iteration is stopped and +// the error is returned to the caller. +func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error { + return tx.root.ForEach(func(k, v []byte) error { + if err := fn(k, tx.root.Bucket(k)); err != nil { + return err + } + return nil + }) +} + +// OnCommit adds a handler function to be executed after the transaction successfully commits. +func (tx *Tx) OnCommit(fn func()) { + tx.commitHandlers = append(tx.commitHandlers, fn) +} + +// Commit writes all changes to disk and updates the meta page. +// Returns an error if a disk write error occurs, or if Commit is +// called on a read-only transaction. +func (tx *Tx) Commit() error { + _assert(!tx.managed, "managed tx commit not allowed") + if tx.db == nil { + return ErrTxClosed + } else if !tx.writable { + return ErrTxNotWritable + } + + // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. + + // Rebalance nodes which have had deletions. + var startTime = time.Now() + tx.root.rebalance() + if tx.stats.Rebalance > 0 { + tx.stats.RebalanceTime += time.Since(startTime) + } + + // spill data onto dirty pages. + startTime = time.Now() + if err := tx.root.spill(); err != nil { + tx.rollback() + return err + } + tx.stats.SpillTime += time.Since(startTime) + + // Free the old root bucket. + tx.meta.root.root = tx.root.root + + opgid := tx.meta.pgid + + // Free the freelist and allocate new pages for it. This will overestimate + // the size of the freelist but not underestimate the size (which would be bad). + tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) + p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) + if err != nil { + tx.rollback() + return err + } + if err := tx.db.freelist.write(p); err != nil { + tx.rollback() + return err + } + tx.meta.freelist = p.id + + // If the high water mark has moved up then attempt to grow the database. + if tx.meta.pgid > opgid { + if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { + tx.rollback() + return err + } + } + + // Write dirty pages to disk. + startTime = time.Now() + if err := tx.write(); err != nil { + tx.rollback() + return err + } + + // If strict mode is enabled then perform a consistency check. + // Only the first consistency error is reported in the panic. + if tx.db.StrictMode { + ch := tx.Check() + var errs []string + for { + err, ok := <-ch + if !ok { + break + } + errs = append(errs, err.Error()) + } + if len(errs) > 0 { + panic("check fail: " + strings.Join(errs, "\n")) + } + } + + // Write meta to disk. + if err := tx.writeMeta(); err != nil { + tx.rollback() + return err + } + tx.stats.WriteTime += time.Since(startTime) + + // Finalize the transaction. + tx.close() + + // Execute commit handlers now that the locks have been removed. + for _, fn := range tx.commitHandlers { + fn() + } + + return nil +} + +// Rollback closes the transaction and ignores all previous updates. Read-only +// transactions must be rolled back and not committed. +func (tx *Tx) Rollback() error { + _assert(!tx.managed, "managed tx rollback not allowed") + if tx.db == nil { + return ErrTxClosed + } + tx.rollback() + return nil +} + +func (tx *Tx) rollback() { + if tx.db == nil { + return + } + if tx.writable { + tx.db.freelist.rollback(tx.meta.txid) + tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) + } + tx.close() +} + +func (tx *Tx) close() { + if tx.db == nil { + return + } + if tx.writable { + // Grab freelist stats. + var freelistFreeN = tx.db.freelist.free_count() + var freelistPendingN = tx.db.freelist.pending_count() + var freelistAlloc = tx.db.freelist.size() + + // Remove transaction ref & writer lock. + tx.db.rwtx = nil + tx.db.rwlock.Unlock() + + // Merge statistics. + tx.db.statlock.Lock() + tx.db.stats.FreePageN = freelistFreeN + tx.db.stats.PendingPageN = freelistPendingN + tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize + tx.db.stats.FreelistInuse = freelistAlloc + tx.db.stats.TxStats.add(&tx.stats) + tx.db.statlock.Unlock() + } else { + tx.db.removeTx(tx) + } + + // Clear all references. + tx.db = nil + tx.meta = nil + tx.root = Bucket{tx: tx} + tx.pages = nil +} + +// Copy writes the entire database to a writer. +// This function exists for backwards compatibility. Use WriteTo() instead. +func (tx *Tx) Copy(w io.Writer) error { + _, err := tx.WriteTo(w) + return err +} + +// WriteTo writes the entire database to a writer. +// If err == nil then exactly tx.Size() bytes will be written into the writer. +func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { + // Attempt to open reader with WriteFlag + f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0) + if err != nil { + return 0, err + } + defer func() { _ = f.Close() }() + + // Generate a meta page. We use the same page data for both meta pages. + buf := make([]byte, tx.db.pageSize) + page := (*page)(unsafe.Pointer(&buf[0])) + page.flags = metaPageFlag + *page.meta() = *tx.meta + + // Write meta 0. + page.id = 0 + page.meta().checksum = page.meta().sum64() + nn, err := w.Write(buf) + n += int64(nn) + if err != nil { + return n, fmt.Errorf("meta 0 copy: %s", err) + } + + // Write meta 1 with a lower transaction id. + page.id = 1 + page.meta().txid -= 1 + page.meta().checksum = page.meta().sum64() + nn, err = w.Write(buf) + n += int64(nn) + if err != nil { + return n, fmt.Errorf("meta 1 copy: %s", err) + } + + // Move past the meta pages in the file. + if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil { + return n, fmt.Errorf("seek: %s", err) + } + + // Copy data pages. + wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)) + n += wn + if err != nil { + return n, err + } + + return n, f.Close() +} + +// CopyFile copies the entire database to file at the given path. +// A reader transaction is maintained during the copy so it is safe to continue +// using the database while a copy is in progress. +func (tx *Tx) CopyFile(path string, mode os.FileMode) error { + f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode) + if err != nil { + return err + } + + err = tx.Copy(f) + if err != nil { + _ = f.Close() + return err + } + return f.Close() +} + +// Check performs several consistency checks on the database for this transaction. +// An error is returned if any inconsistency is found. +// +// It can be safely run concurrently on a writable transaction. However, this +// incurs a high cost for large databases and databases with a lot of subbuckets +// because of caching. This overhead can be removed if running on a read-only +// transaction, however, it is not safe to execute other writer transactions at +// the same time. +func (tx *Tx) Check() <-chan error { + ch := make(chan error) + go tx.check(ch) + return ch +} + +func (tx *Tx) check(ch chan error) { + // Check if any pages are double freed. + freed := make(map[pgid]bool) + all := make([]pgid, tx.db.freelist.count()) + tx.db.freelist.copyall(all) + for _, id := range all { + if freed[id] { + ch <- fmt.Errorf("page %d: already freed", id) + } + freed[id] = true + } + + // Track every reachable page. + reachable := make(map[pgid]*page) + reachable[0] = tx.page(0) // meta0 + reachable[1] = tx.page(1) // meta1 + for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { + reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) + } + + // Recursively check buckets. + tx.checkBucket(&tx.root, reachable, freed, ch) + + // Ensure all pages below high water mark are either reachable or freed. + for i := pgid(0); i < tx.meta.pgid; i++ { + _, isReachable := reachable[i] + if !isReachable && !freed[i] { + ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) + } + } + + // Close the channel to signal completion. + close(ch) +} + +func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) { + // Ignore inline buckets. + if b.root == 0 { + return + } + + // Check every page used by this bucket. + b.tx.forEachPage(b.root, 0, func(p *page, _ int) { + if p.id > tx.meta.pgid { + ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid)) + } + + // Ensure each page is only referenced once. + for i := pgid(0); i <= pgid(p.overflow); i++ { + var id = p.id + i + if _, ok := reachable[id]; ok { + ch <- fmt.Errorf("page %d: multiple references", int(id)) + } + reachable[id] = p + } + + // We should only encounter un-freed leaf and branch pages. + if freed[p.id] { + ch <- fmt.Errorf("page %d: reachable freed", int(p.id)) + } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 { + ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ()) + } + }) + + // Check each bucket within this bucket. + _ = b.ForEach(func(k, v []byte) error { + if child := b.Bucket(k); child != nil { + tx.checkBucket(child, reachable, freed, ch) + } + return nil + }) +} + +// allocate returns a contiguous block of memory starting at a given page. +func (tx *Tx) allocate(count int) (*page, error) { + p, err := tx.db.allocate(count) + if err != nil { + return nil, err + } + + // Save to our page cache. + tx.pages[p.id] = p + + // Update statistics. + tx.stats.PageCount++ + tx.stats.PageAlloc += count * tx.db.pageSize + + return p, nil +} + +// write writes any dirty pages to disk. +func (tx *Tx) write() error { + // Sort pages by id. + pages := make(pages, 0, len(tx.pages)) + for _, p := range tx.pages { + pages = append(pages, p) + } + // Clear out page cache early. + tx.pages = make(map[pgid]*page) + sort.Sort(pages) + + // Write pages to disk in order. + for _, p := range pages { + size := (int(p.overflow) + 1) * tx.db.pageSize + offset := int64(p.id) * int64(tx.db.pageSize) + + // Write out page in "max allocation" sized chunks. + ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p)) + for { + // Limit our write to our max allocation size. + sz := size + if sz > maxAllocSize-1 { + sz = maxAllocSize - 1 + } + + // Write chunk to disk. + buf := ptr[:sz] + if _, err := tx.db.ops.writeAt(buf, offset); err != nil { + return err + } + + // Update statistics. + tx.stats.Write++ + + // Exit inner for loop if we've written all the chunks. + size -= sz + if size == 0 { + break + } + + // Otherwise move offset forward and move pointer to next chunk. + offset += int64(sz) + ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz])) + } + } + + // Ignore file sync if flag is set on DB. + if !tx.db.NoSync || IgnoreNoSync { + if err := fdatasync(tx.db); err != nil { + return err + } + } + + // Put small pages back to page pool. + for _, p := range pages { + // Ignore page sizes over 1 page. + // These are allocated using make() instead of the page pool. + if int(p.overflow) != 0 { + continue + } + + buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize] + + // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1 + for i := range buf { + buf[i] = 0 + } + tx.db.pagePool.Put(buf) + } + + return nil +} + +// writeMeta writes the meta to the disk. +func (tx *Tx) writeMeta() error { + // Create a temporary buffer for the meta page. + buf := make([]byte, tx.db.pageSize) + p := tx.db.pageInBuffer(buf, 0) + tx.meta.write(p) + + // Write the meta page to file. + if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil { + return err + } + if !tx.db.NoSync || IgnoreNoSync { + if err := fdatasync(tx.db); err != nil { + return err + } + } + + // Update statistics. + tx.stats.Write++ + + return nil +} + +// page returns a reference to the page with a given id. +// If page has been written to then a temporary buffered page is returned. +func (tx *Tx) page(id pgid) *page { + // Check the dirty pages first. + if tx.pages != nil { + if p, ok := tx.pages[id]; ok { + return p + } + } + + // Otherwise return directly from the mmap. + return tx.db.page(id) +} + +// forEachPage iterates over every page within a given page and executes a function. +func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) { + p := tx.page(pgid) + + // Execute function. + fn(p, depth) + + // Recursively loop over children. + if (p.flags & branchPageFlag) != 0 { + for i := 0; i < int(p.count); i++ { + elem := p.branchPageElement(uint16(i)) + tx.forEachPage(elem.pgid, depth+1, fn) + } + } +} + +// Page returns page information for a given page number. +// This is only safe for concurrent use when used by a writable transaction. +func (tx *Tx) Page(id int) (*PageInfo, error) { + if tx.db == nil { + return nil, ErrTxClosed + } else if pgid(id) >= tx.meta.pgid { + return nil, nil + } + + // Build the page info. + p := tx.db.page(pgid(id)) + info := &PageInfo{ + ID: id, + Count: int(p.count), + OverflowCount: int(p.overflow), + } + + // Determine the type (or if it's free). + if tx.db.freelist.freed(pgid(id)) { + info.Type = "free" + } else { + info.Type = p.typ() + } + + return info, nil +} + +// TxStats represents statistics about the actions performed by the transaction. +type TxStats struct { + // Page statistics. + PageCount int // number of page allocations + PageAlloc int // total bytes allocated + + // Cursor statistics. + CursorCount int // number of cursors created + + // Node statistics + NodeCount int // number of node allocations + NodeDeref int // number of node dereferences + + // Rebalance statistics. + Rebalance int // number of node rebalances + RebalanceTime time.Duration // total time spent rebalancing + + // Split/Spill statistics. + Split int // number of nodes split + Spill int // number of nodes spilled + SpillTime time.Duration // total time spent spilling + + // Write statistics. + Write int // number of writes performed + WriteTime time.Duration // total time spent writing to disk +} + +func (s *TxStats) add(other *TxStats) { + s.PageCount += other.PageCount + s.PageAlloc += other.PageAlloc + s.CursorCount += other.CursorCount + s.NodeCount += other.NodeCount + s.NodeDeref += other.NodeDeref + s.Rebalance += other.Rebalance + s.RebalanceTime += other.RebalanceTime + s.Split += other.Split + s.Spill += other.Spill + s.SpillTime += other.SpillTime + s.Write += other.Write + s.WriteTime += other.WriteTime +} + +// Sub calculates and returns the difference between two sets of transaction stats. +// This is useful when obtaining stats at two different points and time and +// you need the performance counters that occurred within that time span. +func (s *TxStats) Sub(other *TxStats) TxStats { + var diff TxStats + diff.PageCount = s.PageCount - other.PageCount + diff.PageAlloc = s.PageAlloc - other.PageAlloc + diff.CursorCount = s.CursorCount - other.CursorCount + diff.NodeCount = s.NodeCount - other.NodeCount + diff.NodeDeref = s.NodeDeref - other.NodeDeref + diff.Rebalance = s.Rebalance - other.Rebalance + diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime + diff.Split = s.Split - other.Split + diff.Spill = s.Spill - other.Spill + diff.SpillTime = s.SpillTime - other.SpillTime + diff.Write = s.Write - other.Write + diff.WriteTime = s.WriteTime - other.WriteTime + return diff +} diff --git a/vendor/github.com/containerd/containerd/LICENSE b/vendor/github.com/containerd/containerd/LICENSE new file mode 100644 index 0000000000..584149b6ee --- /dev/null +++ b/vendor/github.com/containerd/containerd/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright The containerd Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/containerd/NOTICE b/vendor/github.com/containerd/containerd/NOTICE new file mode 100644 index 0000000000..8915f02773 --- /dev/null +++ b/vendor/github.com/containerd/containerd/NOTICE @@ -0,0 +1,16 @@ +Docker +Copyright 2012-2015 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/containerd/containerd/cio/io.go b/vendor/github.com/containerd/containerd/cio/io.go new file mode 100644 index 0000000000..a49c11735b --- /dev/null +++ b/vendor/github.com/containerd/containerd/cio/io.go @@ -0,0 +1,215 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cio + +import ( + "context" + "fmt" + "io" + "os" + "sync" + + "github.com/containerd/containerd/defaults" +) + +var bufPool = sync.Pool{ + New: func() interface{} { + buffer := make([]byte, 32<<10) + return &buffer + }, +} + +// Config holds the IO configurations. +type Config struct { + // Terminal is true if one has been allocated + Terminal bool + // Stdin path + Stdin string + // Stdout path + Stdout string + // Stderr path + Stderr string +} + +// IO holds the io information for a task or process +type IO interface { + // Config returns the IO configuration. + Config() Config + // Cancel aborts all current io operations. + Cancel() + // Wait blocks until all io copy operations have completed. + Wait() + // Close cleans up all open io resources. Cancel() is always called before + // Close() + Close() error +} + +// Creator creates new IO sets for a task +type Creator func(id string) (IO, error) + +// Attach allows callers to reattach to running tasks +// +// There should only be one reader for a task's IO set +// because fifo's can only be read from one reader or the output +// will be sent only to the first reads +type Attach func(*FIFOSet) (IO, error) + +// FIFOSet is a set of file paths to FIFOs for a task's standard IO streams +type FIFOSet struct { + Config + close func() error +} + +// Close the FIFOSet +func (f *FIFOSet) Close() error { + if f.close != nil { + return f.close() + } + return nil +} + +// NewFIFOSet returns a new FIFOSet from a Config and a close function +func NewFIFOSet(config Config, close func() error) *FIFOSet { + return &FIFOSet{Config: config, close: close} +} + +// Streams used to configure a Creator or Attach +type Streams struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer + Terminal bool + FIFODir string +} + +// Opt customize options for creating a Creator or Attach +type Opt func(*Streams) + +// WithStdio sets stream options to the standard input/output streams +func WithStdio(opt *Streams) { + WithStreams(os.Stdin, os.Stdout, os.Stderr)(opt) +} + +// WithTerminal sets the terminal option +func WithTerminal(opt *Streams) { + opt.Terminal = true +} + +// WithStreams sets the stream options to the specified Reader and Writers +func WithStreams(stdin io.Reader, stdout, stderr io.Writer) Opt { + return func(opt *Streams) { + opt.Stdin = stdin + opt.Stdout = stdout + opt.Stderr = stderr + } +} + +// WithFIFODir sets the fifo directory. +// e.g. "/run/containerd/fifo", "/run/users/1001/containerd/fifo" +func WithFIFODir(dir string) Opt { + return func(opt *Streams) { + opt.FIFODir = dir + } +} + +// NewCreator returns an IO creator from the options +func NewCreator(opts ...Opt) Creator { + streams := &Streams{} + for _, opt := range opts { + opt(streams) + } + if streams.FIFODir == "" { + streams.FIFODir = defaults.DefaultFIFODir + } + return func(id string) (IO, error) { + fifos, err := NewFIFOSetInDir(streams.FIFODir, id, streams.Terminal) + if err != nil { + return nil, err + } + return copyIO(fifos, streams) + } +} + +// NewAttach attaches the existing io for a task to the provided io.Reader/Writers +func NewAttach(opts ...Opt) Attach { + streams := &Streams{} + for _, opt := range opts { + opt(streams) + } + return func(fifos *FIFOSet) (IO, error) { + if fifos == nil { + return nil, fmt.Errorf("cannot attach, missing fifos") + } + return copyIO(fifos, streams) + } +} + +// NullIO redirects the container's IO into /dev/null +func NullIO(_ string) (IO, error) { + return &cio{}, nil +} + +// cio is a basic container IO implementation. +type cio struct { + config Config + wg *sync.WaitGroup + closers []io.Closer + cancel context.CancelFunc +} + +func (c *cio) Config() Config { + return c.config +} + +func (c *cio) Wait() { + if c.wg != nil { + c.wg.Wait() + } +} + +func (c *cio) Close() error { + var lastErr error + for _, closer := range c.closers { + if closer == nil { + continue + } + if err := closer.Close(); err != nil { + lastErr = err + } + } + return lastErr +} + +func (c *cio) Cancel() { + if c.cancel != nil { + c.cancel() + } +} + +type pipes struct { + Stdin io.WriteCloser + Stdout io.ReadCloser + Stderr io.ReadCloser +} + +// DirectIO allows task IO to be handled externally by the caller +type DirectIO struct { + pipes + cio +} + +var _ IO = &DirectIO{} diff --git a/vendor/github.com/containerd/containerd/cio/io_unix.go b/vendor/github.com/containerd/containerd/cio/io_unix.go new file mode 100644 index 0000000000..3ab2a30b0c --- /dev/null +++ b/vendor/github.com/containerd/containerd/cio/io_unix.go @@ -0,0 +1,158 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cio + +import ( + "context" + "io" + "io/ioutil" + "os" + "path/filepath" + "sync" + "syscall" + + "github.com/containerd/fifo" + "github.com/pkg/errors" +) + +// NewFIFOSetInDir returns a new FIFOSet with paths in a temporary directory under root +func NewFIFOSetInDir(root, id string, terminal bool) (*FIFOSet, error) { + if root != "" { + if err := os.MkdirAll(root, 0700); err != nil { + return nil, err + } + } + dir, err := ioutil.TempDir(root, "") + if err != nil { + return nil, err + } + closer := func() error { + return os.RemoveAll(dir) + } + return NewFIFOSet(Config{ + Stdin: filepath.Join(dir, id+"-stdin"), + Stdout: filepath.Join(dir, id+"-stdout"), + Stderr: filepath.Join(dir, id+"-stderr"), + Terminal: terminal, + }, closer), nil +} + +func copyIO(fifos *FIFOSet, ioset *Streams) (*cio, error) { + var ctx, cancel = context.WithCancel(context.Background()) + pipes, err := openFifos(ctx, fifos) + if err != nil { + cancel() + return nil, err + } + + if fifos.Stdin != "" { + go func() { + p := bufPool.Get().(*[]byte) + defer bufPool.Put(p) + + io.CopyBuffer(pipes.Stdin, ioset.Stdin, *p) + pipes.Stdin.Close() + }() + } + + var wg = &sync.WaitGroup{} + wg.Add(1) + go func() { + p := bufPool.Get().(*[]byte) + defer bufPool.Put(p) + + io.CopyBuffer(ioset.Stdout, pipes.Stdout, *p) + pipes.Stdout.Close() + wg.Done() + }() + + if !fifos.Terminal { + wg.Add(1) + go func() { + p := bufPool.Get().(*[]byte) + defer bufPool.Put(p) + + io.CopyBuffer(ioset.Stderr, pipes.Stderr, *p) + pipes.Stderr.Close() + wg.Done() + }() + } + return &cio{ + config: fifos.Config, + wg: wg, + closers: append(pipes.closers(), fifos), + cancel: cancel, + }, nil +} + +func openFifos(ctx context.Context, fifos *FIFOSet) (pipes, error) { + var err error + defer func() { + if err != nil { + fifos.Close() + } + }() + + var f pipes + if fifos.Stdin != "" { + if f.Stdin, err = fifo.OpenFifo(ctx, fifos.Stdin, syscall.O_WRONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); err != nil { + return f, errors.Wrapf(err, "failed to open stdin fifo") + } + defer func() { + if err != nil && f.Stdin != nil { + f.Stdin.Close() + } + }() + } + if fifos.Stdout != "" { + if f.Stdout, err = fifo.OpenFifo(ctx, fifos.Stdout, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); err != nil { + return f, errors.Wrapf(err, "failed to open stdout fifo") + } + defer func() { + if err != nil && f.Stdout != nil { + f.Stdout.Close() + } + }() + } + if fifos.Stderr != "" { + if f.Stderr, err = fifo.OpenFifo(ctx, fifos.Stderr, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); err != nil { + return f, errors.Wrapf(err, "failed to open stderr fifo") + } + } + return f, nil +} + +// NewDirectIO returns an IO implementation that exposes the IO streams as io.ReadCloser +// and io.WriteCloser. +func NewDirectIO(ctx context.Context, fifos *FIFOSet) (*DirectIO, error) { + ctx, cancel := context.WithCancel(ctx) + pipes, err := openFifos(ctx, fifos) + return &DirectIO{ + pipes: pipes, + cio: cio{ + config: fifos.Config, + closers: append(pipes.closers(), fifos), + cancel: cancel, + }, + }, err +} + +func (p *pipes) closers() []io.Closer { + return []io.Closer{p.Stdin, p.Stdout, p.Stderr} +} diff --git a/vendor/github.com/containerd/containerd/cio/io_windows.go b/vendor/github.com/containerd/containerd/cio/io_windows.go new file mode 100644 index 0000000000..fa9532a3bd --- /dev/null +++ b/vendor/github.com/containerd/containerd/cio/io_windows.go @@ -0,0 +1,152 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cio + +import ( + "fmt" + "io" + "net" + "sync" + + winio "github.com/Microsoft/go-winio" + "github.com/containerd/containerd/log" + "github.com/pkg/errors" +) + +const pipeRoot = `\\.\pipe` + +// NewFIFOSetInDir returns a new set of fifos for the task +func NewFIFOSetInDir(_, id string, terminal bool) (*FIFOSet, error) { + return NewFIFOSet(Config{ + Terminal: terminal, + Stdin: fmt.Sprintf(`%s\ctr-%s-stdin`, pipeRoot, id), + Stdout: fmt.Sprintf(`%s\ctr-%s-stdout`, pipeRoot, id), + Stderr: fmt.Sprintf(`%s\ctr-%s-stderr`, pipeRoot, id), + }, nil), nil +} + +func copyIO(fifos *FIFOSet, ioset *Streams) (*cio, error) { + var ( + wg sync.WaitGroup + set []io.Closer + ) + + if fifos.Stdin != "" { + l, err := winio.ListenPipe(fifos.Stdin, nil) + if err != nil { + return nil, errors.Wrapf(err, "failed to create stdin pipe %s", fifos.Stdin) + } + defer func(l net.Listener) { + if err != nil { + l.Close() + } + }(l) + set = append(set, l) + + go func() { + c, err := l.Accept() + if err != nil { + log.L.WithError(err).Errorf("failed to accept stdin connection on %s", fifos.Stdin) + return + } + + p := bufPool.Get().(*[]byte) + defer bufPool.Put(p) + + io.CopyBuffer(c, ioset.Stdin, *p) + c.Close() + l.Close() + }() + } + + if fifos.Stdout != "" { + l, err := winio.ListenPipe(fifos.Stdout, nil) + if err != nil { + return nil, errors.Wrapf(err, "failed to create stdin pipe %s", fifos.Stdout) + } + defer func(l net.Listener) { + if err != nil { + l.Close() + } + }(l) + set = append(set, l) + + wg.Add(1) + go func() { + defer wg.Done() + c, err := l.Accept() + if err != nil { + log.L.WithError(err).Errorf("failed to accept stdout connection on %s", fifos.Stdout) + return + } + + p := bufPool.Get().(*[]byte) + defer bufPool.Put(p) + + io.CopyBuffer(ioset.Stdout, c, *p) + c.Close() + l.Close() + }() + } + + if !fifos.Terminal && fifos.Stderr != "" { + l, err := winio.ListenPipe(fifos.Stderr, nil) + if err != nil { + return nil, errors.Wrapf(err, "failed to create stderr pipe %s", fifos.Stderr) + } + defer func(l net.Listener) { + if err != nil { + l.Close() + } + }(l) + set = append(set, l) + + wg.Add(1) + go func() { + defer wg.Done() + c, err := l.Accept() + if err != nil { + log.L.WithError(err).Errorf("failed to accept stderr connection on %s", fifos.Stderr) + return + } + + p := bufPool.Get().(*[]byte) + defer bufPool.Put(p) + + io.CopyBuffer(ioset.Stderr, c, *p) + c.Close() + l.Close() + }() + } + + return &cio{config: fifos.Config, closers: set}, nil +} + +// NewDirectIO returns an IO implementation that exposes the IO streams as io.ReadCloser +// and io.WriteCloser. +func NewDirectIO(stdin io.WriteCloser, stdout, stderr io.ReadCloser, terminal bool) *DirectIO { + return &DirectIO{ + pipes: pipes{ + Stdin: stdin, + Stdout: stdout, + Stderr: stderr, + }, + cio: cio{ + config: Config{Terminal: terminal}, + }, + } +} diff --git a/vendor/github.com/containerd/containerd/defaults/defaults.go b/vendor/github.com/containerd/containerd/defaults/defaults.go new file mode 100644 index 0000000000..7040f5b85a --- /dev/null +++ b/vendor/github.com/containerd/containerd/defaults/defaults.go @@ -0,0 +1,26 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package defaults + +const ( + // DefaultMaxRecvMsgSize defines the default maximum message size for + // receiving protobufs passed over the GRPC API. + DefaultMaxRecvMsgSize = 16 << 20 + // DefaultMaxSendMsgSize defines the default maximum message size for + // sending protobufs passed over the GRPC API. + DefaultMaxSendMsgSize = 16 << 20 +) diff --git a/vendor/github.com/containerd/containerd/defaults/defaults_unix.go b/vendor/github.com/containerd/containerd/defaults/defaults_unix.go new file mode 100644 index 0000000000..30ed42235e --- /dev/null +++ b/vendor/github.com/containerd/containerd/defaults/defaults_unix.go @@ -0,0 +1,35 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package defaults + +const ( + // DefaultRootDir is the default location used by containerd to store + // persistent data + DefaultRootDir = "/var/lib/containerd" + // DefaultStateDir is the default location used by containerd to store + // transient data + DefaultStateDir = "/run/containerd" + // DefaultAddress is the default unix socket address + DefaultAddress = "/run/containerd/containerd.sock" + // DefaultDebugAddress is the default unix socket address for pprof data + DefaultDebugAddress = "/run/containerd/debug.sock" + // DefaultFIFODir is the default location used by client-side cio library + // to store FIFOs. + DefaultFIFODir = "/run/containerd/fifo" +) diff --git a/vendor/github.com/containerd/containerd/defaults/defaults_windows.go b/vendor/github.com/containerd/containerd/defaults/defaults_windows.go new file mode 100644 index 0000000000..983bf762f7 --- /dev/null +++ b/vendor/github.com/containerd/containerd/defaults/defaults_windows.go @@ -0,0 +1,43 @@ +// +build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package defaults + +import ( + "os" + "path/filepath" +) + +var ( + // DefaultRootDir is the default location used by containerd to store + // persistent data + DefaultRootDir = filepath.Join(os.Getenv("programfiles"), "containerd", "root") + // DefaultStateDir is the default location used by containerd to store + // transient data + DefaultStateDir = filepath.Join(os.Getenv("programfiles"), "containerd", "state") +) + +const ( + // DefaultAddress is the default winpipe address + DefaultAddress = `\\.\pipe\containerd-containerd` + // DefaultDebugAddress is the default winpipe address for pprof data + DefaultDebugAddress = `\\.\pipe\containerd-debug` + // DefaultFIFODir is the default location used by client-side cio library + // to store FIFOs. Unused on Windows. + DefaultFIFODir = "" +) diff --git a/vendor/github.com/containerd/containerd/defaults/doc.go b/vendor/github.com/containerd/containerd/defaults/doc.go new file mode 100644 index 0000000000..6da863ce2e --- /dev/null +++ b/vendor/github.com/containerd/containerd/defaults/doc.go @@ -0,0 +1,19 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package defaults provides several common defaults for interacting with +// containerd. These can be used on the client-side or server-side. +package defaults diff --git a/vendor/github.com/containerd/containerd/log/context.go b/vendor/github.com/containerd/containerd/log/context.go new file mode 100644 index 0000000000..f40603b17b --- /dev/null +++ b/vendor/github.com/containerd/containerd/log/context.go @@ -0,0 +1,86 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package log + +import ( + "context" + "sync/atomic" + + "github.com/sirupsen/logrus" +) + +var ( + // G is an alias for GetLogger. + // + // We may want to define this locally to a package to get package tagged log + // messages. + G = GetLogger + + // L is an alias for the the standard logger. + L = logrus.NewEntry(logrus.StandardLogger()) +) + +type ( + loggerKey struct{} +) + +// TraceLevel is the log level for tracing. Trace level is lower than debug level, +// and is usually used to trace detailed behavior of the program. +const TraceLevel = logrus.Level(uint32(logrus.DebugLevel + 1)) + +// ParseLevel takes a string level and returns the Logrus log level constant. +// It supports trace level. +func ParseLevel(lvl string) (logrus.Level, error) { + if lvl == "trace" { + return TraceLevel, nil + } + return logrus.ParseLevel(lvl) +} + +// WithLogger returns a new context with the provided logger. Use in +// combination with logger.WithField(s) for great effect. +func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context { + return context.WithValue(ctx, loggerKey{}, logger) +} + +// GetLogger retrieves the current logger from the context. If no logger is +// available, the default logger is returned. +func GetLogger(ctx context.Context) *logrus.Entry { + logger := ctx.Value(loggerKey{}) + + if logger == nil { + return L + } + + return logger.(*logrus.Entry) +} + +// Trace logs a message at level Trace with the log entry passed-in. +func Trace(e *logrus.Entry, args ...interface{}) { + level := logrus.Level(atomic.LoadUint32((*uint32)(&e.Logger.Level))) + if level >= TraceLevel { + e.Debug(args...) + } +} + +// Tracef logs a message at level Trace with the log entry passed-in. +func Tracef(e *logrus.Entry, format string, args ...interface{}) { + level := logrus.Level(atomic.LoadUint32((*uint32)(&e.Logger.Level))) + if level >= TraceLevel { + e.Debugf(format, args...) + } +} diff --git a/vendor/github.com/containerd/continuity/devices/devices.go b/vendor/github.com/containerd/continuity/devices/devices.go new file mode 100644 index 0000000000..7086407047 --- /dev/null +++ b/vendor/github.com/containerd/continuity/devices/devices.go @@ -0,0 +1,5 @@ +package devices + +import "fmt" + +var ErrNotSupported = fmt.Errorf("not supported") diff --git a/vendor/github.com/containerd/continuity/devices/devices_unix.go b/vendor/github.com/containerd/continuity/devices/devices_unix.go new file mode 100644 index 0000000000..97fe6b19d2 --- /dev/null +++ b/vendor/github.com/containerd/continuity/devices/devices_unix.go @@ -0,0 +1,58 @@ +// +build linux darwin freebsd solaris + +package devices + +import ( + "fmt" + "os" + "syscall" + + "golang.org/x/sys/unix" +) + +func DeviceInfo(fi os.FileInfo) (uint64, uint64, error) { + sys, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return 0, 0, fmt.Errorf("cannot extract device from os.FileInfo") + } + + dev := uint64(sys.Rdev) + return uint64(unix.Major(dev)), uint64(unix.Minor(dev)), nil +} + +// mknod provides a shortcut for syscall.Mknod +func Mknod(p string, mode os.FileMode, maj, min int) error { + var ( + m = syscallMode(mode.Perm()) + dev uint64 + ) + + if mode&os.ModeDevice != 0 { + dev = unix.Mkdev(uint32(maj), uint32(min)) + + if mode&os.ModeCharDevice != 0 { + m |= unix.S_IFCHR + } else { + m |= unix.S_IFBLK + } + } else if mode&os.ModeNamedPipe != 0 { + m |= unix.S_IFIFO + } + + return unix.Mknod(p, m, int(dev)) +} + +// syscallMode returns the syscall-specific mode bits from Go's portable mode bits. +func syscallMode(i os.FileMode) (o uint32) { + o |= uint32(i.Perm()) + if i&os.ModeSetuid != 0 { + o |= unix.S_ISUID + } + if i&os.ModeSetgid != 0 { + o |= unix.S_ISGID + } + if i&os.ModeSticky != 0 { + o |= unix.S_ISVTX + } + return +} diff --git a/vendor/github.com/containerd/continuity/devices/devices_windows.go b/vendor/github.com/containerd/continuity/devices/devices_windows.go new file mode 100644 index 0000000000..6099d1d779 --- /dev/null +++ b/vendor/github.com/containerd/continuity/devices/devices_windows.go @@ -0,0 +1,11 @@ +package devices + +import ( + "os" + + "github.com/pkg/errors" +) + +func DeviceInfo(fi os.FileInfo) (uint64, uint64, error) { + return 0, 0, errors.Wrap(ErrNotSupported, "cannot get device info on windows") +} diff --git a/vendor/github.com/containerd/continuity/driver/driver.go b/vendor/github.com/containerd/continuity/driver/driver.go new file mode 100644 index 0000000000..aa1dd7d297 --- /dev/null +++ b/vendor/github.com/containerd/continuity/driver/driver.go @@ -0,0 +1,162 @@ +package driver + +import ( + "fmt" + "io" + "os" +) + +var ErrNotSupported = fmt.Errorf("not supported") + +// Driver provides all of the system-level functions in a common interface. +// The context should call these with full paths and should never use the `os` +// package or any other package to access resources on the filesystem. This +// mechanism let's us carefully control access to the context and maintain +// path and resource integrity. It also gives us an interface to reason about +// direct resource access. +// +// Implementations don't need to do much other than meet the interface. For +// example, it is not required to wrap os.FileInfo to return correct paths for +// the call to Name(). +type Driver interface { + // Note that Open() returns a File interface instead of *os.File. This + // is because os.File is a struct, so if Open was to return *os.File, + // the only way to fulfill the interface would be to call os.Open() + Open(path string) (File, error) + OpenFile(path string, flag int, perm os.FileMode) (File, error) + + Stat(path string) (os.FileInfo, error) + Lstat(path string) (os.FileInfo, error) + Readlink(p string) (string, error) + Mkdir(path string, mode os.FileMode) error + Remove(path string) error + + Link(oldname, newname string) error + Lchmod(path string, mode os.FileMode) error + Lchown(path string, uid, gid int64) error + Symlink(oldname, newname string) error + + MkdirAll(path string, perm os.FileMode) error + RemoveAll(path string) error + + // TODO(aaronl): These methods might move outside the main Driver + // interface in the future as more platforms are added. + Mknod(path string, mode os.FileMode, major int, minor int) error + Mkfifo(path string, mode os.FileMode) error +} + +// File is the interface for interacting with files returned by continuity's Open +// This is needed since os.File is a struct, instead of an interface, so it can't +// be used. +type File interface { + io.ReadWriteCloser + io.Seeker + Readdir(n int) ([]os.FileInfo, error) +} + +func NewSystemDriver() (Driver, error) { + // TODO(stevvooe): Consider having this take a "hint" path argument, which + // would be the context root. The hint could be used to resolve required + // filesystem support when assembling the driver to use. + return &driver{}, nil +} + +// XAttrDriver should be implemented on operation systems and filesystems that +// have xattr support for regular files and directories. +type XAttrDriver interface { + // Getxattr returns all of the extended attributes for the file at path. + // Typically, this takes a syscall call to Listxattr and Getxattr. + Getxattr(path string) (map[string][]byte, error) + + // Setxattr sets all of the extended attributes on file at path, following + // any symbolic links, if necessary. All attributes on the target are + // replaced by the values from attr. If the operation fails to set any + // attribute, those already applied will not be rolled back. + Setxattr(path string, attr map[string][]byte) error +} + +// LXAttrDriver should be implemented by drivers on operating systems and +// filesystems that support setting and getting extended attributes on +// symbolic links. If this is not implemented, extended attributes will be +// ignored on symbolic links. +type LXAttrDriver interface { + // LGetxattr returns all of the extended attributes for the file at path + // and does not follow symlinks. Typically, this takes a syscall call to + // Llistxattr and Lgetxattr. + LGetxattr(path string) (map[string][]byte, error) + + // LSetxattr sets all of the extended attributes on file at path, without + // following symbolic links. All attributes on the target are replaced by + // the values from attr. If the operation fails to set any attribute, + // those already applied will not be rolled back. + LSetxattr(path string, attr map[string][]byte) error +} + +type DeviceInfoDriver interface { + DeviceInfo(fi os.FileInfo) (maj uint64, min uint64, err error) +} + +// driver is a simple default implementation that sends calls out to the "os" +// package. Extend the "driver" type in system-specific files to add support, +// such as xattrs, which can add support at compile time. +type driver struct{} + +var _ File = &os.File{} + +// LocalDriver is the exported Driver struct for convenience. +var LocalDriver Driver = &driver{} + +func (d *driver) Open(p string) (File, error) { + return os.Open(p) +} + +func (d *driver) OpenFile(path string, flag int, perm os.FileMode) (File, error) { + return os.OpenFile(path, flag, perm) +} + +func (d *driver) Stat(p string) (os.FileInfo, error) { + return os.Stat(p) +} + +func (d *driver) Lstat(p string) (os.FileInfo, error) { + return os.Lstat(p) +} + +func (d *driver) Readlink(p string) (string, error) { + return os.Readlink(p) +} + +func (d *driver) Mkdir(p string, mode os.FileMode) error { + return os.Mkdir(p, mode) +} + +// Remove is used to unlink files and remove directories. +// This is following the golang os package api which +// combines the operations into a higher level Remove +// function. If explicit unlinking or directory removal +// to mirror system call is required, they should be +// split up at that time. +func (d *driver) Remove(path string) error { + return os.Remove(path) +} + +func (d *driver) Link(oldname, newname string) error { + return os.Link(oldname, newname) +} + +func (d *driver) Lchown(name string, uid, gid int64) error { + // TODO: error out if uid excesses int bit width? + return os.Lchown(name, int(uid), int(gid)) +} + +func (d *driver) Symlink(oldname, newname string) error { + return os.Symlink(oldname, newname) +} + +func (d *driver) MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} + +func (d *driver) RemoveAll(path string) error { + return os.RemoveAll(path) +} diff --git a/vendor/github.com/containerd/continuity/driver/driver_unix.go b/vendor/github.com/containerd/continuity/driver/driver_unix.go new file mode 100644 index 0000000000..d9ab1656c9 --- /dev/null +++ b/vendor/github.com/containerd/continuity/driver/driver_unix.go @@ -0,0 +1,122 @@ +// +build linux darwin freebsd solaris + +package driver + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "sort" + + "github.com/containerd/continuity/devices" + "github.com/containerd/continuity/sysx" +) + +func (d *driver) Mknod(path string, mode os.FileMode, major, minor int) error { + return devices.Mknod(path, mode, major, minor) +} + +func (d *driver) Mkfifo(path string, mode os.FileMode) error { + if mode&os.ModeNamedPipe == 0 { + return errors.New("mode passed to Mkfifo does not have the named pipe bit set") + } + // mknod with a mode that has ModeNamedPipe set creates a fifo, not a + // device. + return devices.Mknod(path, mode, 0, 0) +} + +// Lchmod changes the mode of an file not following symlinks. +func (d *driver) Lchmod(path string, mode os.FileMode) (err error) { + if !filepath.IsAbs(path) { + path, err = filepath.Abs(path) + if err != nil { + return + } + } + + return sysx.Fchmodat(0, path, uint32(mode), sysx.AtSymlinkNofollow) +} + +// Getxattr returns all of the extended attributes for the file at path p. +func (d *driver) Getxattr(p string) (map[string][]byte, error) { + xattrs, err := sysx.Listxattr(p) + if err != nil { + return nil, fmt.Errorf("listing %s xattrs: %v", p, err) + } + + sort.Strings(xattrs) + m := make(map[string][]byte, len(xattrs)) + + for _, attr := range xattrs { + value, err := sysx.Getxattr(p, attr) + if err != nil { + return nil, fmt.Errorf("getting %q xattr on %s: %v", attr, p, err) + } + + // NOTE(stevvooe): This append/copy tricky relies on unique + // xattrs. Break this out into an alloc/copy if xattrs are no + // longer unique. + m[attr] = append(m[attr], value...) + } + + return m, nil +} + +// Setxattr sets all of the extended attributes on file at path, following +// any symbolic links, if necessary. All attributes on the target are +// replaced by the values from attr. If the operation fails to set any +// attribute, those already applied will not be rolled back. +func (d *driver) Setxattr(path string, attrMap map[string][]byte) error { + for attr, value := range attrMap { + if err := sysx.Setxattr(path, attr, value, 0); err != nil { + return fmt.Errorf("error setting xattr %q on %s: %v", attr, path, err) + } + } + + return nil +} + +// LGetxattr returns all of the extended attributes for the file at path p +// not following symbolic links. +func (d *driver) LGetxattr(p string) (map[string][]byte, error) { + xattrs, err := sysx.LListxattr(p) + if err != nil { + return nil, fmt.Errorf("listing %s xattrs: %v", p, err) + } + + sort.Strings(xattrs) + m := make(map[string][]byte, len(xattrs)) + + for _, attr := range xattrs { + value, err := sysx.LGetxattr(p, attr) + if err != nil { + return nil, fmt.Errorf("getting %q xattr on %s: %v", attr, p, err) + } + + // NOTE(stevvooe): This append/copy tricky relies on unique + // xattrs. Break this out into an alloc/copy if xattrs are no + // longer unique. + m[attr] = append(m[attr], value...) + } + + return m, nil +} + +// LSetxattr sets all of the extended attributes on file at path, not +// following any symbolic links. All attributes on the target are +// replaced by the values from attr. If the operation fails to set any +// attribute, those already applied will not be rolled back. +func (d *driver) LSetxattr(path string, attrMap map[string][]byte) error { + for attr, value := range attrMap { + if err := sysx.LSetxattr(path, attr, value, 0); err != nil { + return fmt.Errorf("error setting xattr %q on %s: %v", attr, path, err) + } + } + + return nil +} + +func (d *driver) DeviceInfo(fi os.FileInfo) (maj uint64, min uint64, err error) { + return devices.DeviceInfo(fi) +} diff --git a/vendor/github.com/containerd/continuity/driver/driver_windows.go b/vendor/github.com/containerd/continuity/driver/driver_windows.go new file mode 100644 index 0000000000..e4cfa64fb7 --- /dev/null +++ b/vendor/github.com/containerd/continuity/driver/driver_windows.go @@ -0,0 +1,21 @@ +package driver + +import ( + "os" + + "github.com/pkg/errors" +) + +func (d *driver) Mknod(path string, mode os.FileMode, major, minor int) error { + return errors.Wrap(ErrNotSupported, "cannot create device node on Windows") +} + +func (d *driver) Mkfifo(path string, mode os.FileMode) error { + return errors.Wrap(ErrNotSupported, "cannot create fifo on Windows") +} + +// Lchmod changes the mode of an file not following symlinks. +func (d *driver) Lchmod(path string, mode os.FileMode) (err error) { + // TODO: Use Window's equivalent + return os.Chmod(path, mode) +} diff --git a/vendor/github.com/containerd/continuity/driver/utils.go b/vendor/github.com/containerd/continuity/driver/utils.go new file mode 100644 index 0000000000..9e0edd7bca --- /dev/null +++ b/vendor/github.com/containerd/continuity/driver/utils.go @@ -0,0 +1,74 @@ +package driver + +import ( + "io" + "io/ioutil" + "os" + "sort" +) + +// ReadFile works the same as ioutil.ReadFile with the Driver abstraction +func ReadFile(r Driver, filename string) ([]byte, error) { + f, err := r.Open(filename) + if err != nil { + return nil, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + return data, nil +} + +// WriteFile works the same as ioutil.WriteFile with the Driver abstraction +func WriteFile(r Driver, filename string, data []byte, perm os.FileMode) error { + f, err := r.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + defer f.Close() + + n, err := f.Write(data) + if err != nil { + return err + } else if n != len(data) { + return io.ErrShortWrite + } + + return nil +} + +// ReadDir works the same as ioutil.ReadDir with the Driver abstraction +func ReadDir(r Driver, dirname string) ([]os.FileInfo, error) { + f, err := r.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + dirs, err := f.Readdir(-1) + if err != nil { + return nil, err + } + + sort.Sort(fileInfos(dirs)) + return dirs, nil +} + +// Simple implementation of the sort.Interface for os.FileInfo +type fileInfos []os.FileInfo + +func (fis fileInfos) Len() int { + return len(fis) +} + +func (fis fileInfos) Less(i, j int) bool { + return fis[i].Name() < fis[j].Name() +} + +func (fis fileInfos) Swap(i, j int) { + fis[i], fis[j] = fis[j], fis[i] +} diff --git a/vendor/github.com/containerd/continuity/fs/copy.go b/vendor/github.com/containerd/continuity/fs/copy.go new file mode 100644 index 0000000000..2ac474b926 --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/copy.go @@ -0,0 +1,121 @@ +package fs + +import ( + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/pkg/errors" +) + +var bufferPool = &sync.Pool{ + New: func() interface{} { + buffer := make([]byte, 32*1024) + return &buffer + }, +} + +// CopyDir copies the directory from src to dst. +// Most efficient copy of files is attempted. +func CopyDir(dst, src string) error { + inodes := map[uint64]string{} + return copyDirectory(dst, src, inodes) +} + +func copyDirectory(dst, src string, inodes map[uint64]string) error { + stat, err := os.Stat(src) + if err != nil { + return errors.Wrapf(err, "failed to stat %s", src) + } + if !stat.IsDir() { + return errors.Errorf("source is not directory") + } + + if st, err := os.Stat(dst); err != nil { + if err := os.Mkdir(dst, stat.Mode()); err != nil { + return errors.Wrapf(err, "failed to mkdir %s", dst) + } + } else if !st.IsDir() { + return errors.Errorf("cannot copy to non-directory: %s", dst) + } else { + if err := os.Chmod(dst, stat.Mode()); err != nil { + return errors.Wrapf(err, "failed to chmod on %s", dst) + } + } + + fis, err := ioutil.ReadDir(src) + if err != nil { + return errors.Wrapf(err, "failed to read %s", src) + } + + if err := copyFileInfo(stat, dst); err != nil { + return errors.Wrapf(err, "failed to copy file info for %s", dst) + } + + for _, fi := range fis { + source := filepath.Join(src, fi.Name()) + target := filepath.Join(dst, fi.Name()) + + switch { + case fi.IsDir(): + if err := copyDirectory(target, source, inodes); err != nil { + return err + } + continue + case (fi.Mode() & os.ModeType) == 0: + link, err := getLinkSource(target, fi, inodes) + if err != nil { + return errors.Wrap(err, "failed to get hardlink") + } + if link != "" { + if err := os.Link(link, target); err != nil { + return errors.Wrap(err, "failed to create hard link") + } + } else if err := CopyFile(target, source); err != nil { + return errors.Wrap(err, "failed to copy files") + } + case (fi.Mode() & os.ModeSymlink) == os.ModeSymlink: + link, err := os.Readlink(source) + if err != nil { + return errors.Wrapf(err, "failed to read link: %s", source) + } + if err := os.Symlink(link, target); err != nil { + return errors.Wrapf(err, "failed to create symlink: %s", target) + } + case (fi.Mode() & os.ModeDevice) == os.ModeDevice: + if err := copyDevice(target, fi); err != nil { + return errors.Wrapf(err, "failed to create device") + } + default: + // TODO: Support pipes and sockets + return errors.Wrapf(err, "unsupported mode %s", fi.Mode()) + } + if err := copyFileInfo(fi, target); err != nil { + return errors.Wrap(err, "failed to copy file info") + } + + if err := copyXAttrs(target, source); err != nil { + return errors.Wrap(err, "failed to copy xattrs") + } + } + + return nil +} + +// CopyFile copies the source file to the target. +// The most efficient means of copying is used for the platform. +func CopyFile(target, source string) error { + src, err := os.Open(source) + if err != nil { + return errors.Wrapf(err, "failed to open source %s", source) + } + defer src.Close() + tgt, err := os.Create(target) + if err != nil { + return errors.Wrapf(err, "failed to open target %s", target) + } + defer tgt.Close() + + return copyFileContent(tgt, src) +} diff --git a/vendor/github.com/containerd/continuity/fs/copy_linux.go b/vendor/github.com/containerd/continuity/fs/copy_linux.go new file mode 100644 index 0000000000..cfab6756b8 --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/copy_linux.go @@ -0,0 +1,95 @@ +package fs + +import ( + "io" + "os" + "syscall" + + "github.com/containerd/continuity/sysx" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +func copyFileInfo(fi os.FileInfo, name string) error { + st := fi.Sys().(*syscall.Stat_t) + if err := os.Lchown(name, int(st.Uid), int(st.Gid)); err != nil { + if os.IsPermission(err) { + // Normally if uid/gid are the same this would be a no-op, but some + // filesystems may still return EPERM... for instance NFS does this. + // In such a case, this is not an error. + if dstStat, err2 := os.Lstat(name); err2 == nil { + st2 := dstStat.Sys().(*syscall.Stat_t) + if st.Uid == st2.Uid && st.Gid == st2.Gid { + err = nil + } + } + } + if err != nil { + return errors.Wrapf(err, "failed to chown %s", name) + } + } + + if (fi.Mode() & os.ModeSymlink) != os.ModeSymlink { + if err := os.Chmod(name, fi.Mode()); err != nil { + return errors.Wrapf(err, "failed to chmod %s", name) + } + } + + timespec := []unix.Timespec{unix.Timespec(StatAtime(st)), unix.Timespec(StatMtime(st))} + if err := unix.UtimesNanoAt(unix.AT_FDCWD, name, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil { + return errors.Wrapf(err, "failed to utime %s", name) + } + + return nil +} + +func copyFileContent(dst, src *os.File) error { + st, err := src.Stat() + if err != nil { + return errors.Wrap(err, "unable to stat source") + } + + n, err := unix.CopyFileRange(int(src.Fd()), nil, int(dst.Fd()), nil, int(st.Size()), 0) + if err != nil { + if err != unix.ENOSYS && err != unix.EXDEV { + return errors.Wrap(err, "copy file range failed") + } + + buf := bufferPool.Get().(*[]byte) + _, err = io.CopyBuffer(dst, src, *buf) + bufferPool.Put(buf) + return err + } + + if int64(n) != st.Size() { + return errors.Wrapf(err, "short copy: %d of %d", int64(n), st.Size()) + } + + return nil +} + +func copyXAttrs(dst, src string) error { + xattrKeys, err := sysx.LListxattr(src) + if err != nil { + return errors.Wrapf(err, "failed to list xattrs on %s", src) + } + for _, xattr := range xattrKeys { + data, err := sysx.LGetxattr(src, xattr) + if err != nil { + return errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src) + } + if err := sysx.LSetxattr(dst, xattr, data, 0); err != nil { + return errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst) + } + } + + return nil +} + +func copyDevice(dst string, fi os.FileInfo) error { + st, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return errors.New("unsupported stat type") + } + return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev)) +} diff --git a/vendor/github.com/containerd/continuity/fs/copy_unix.go b/vendor/github.com/containerd/continuity/fs/copy_unix.go new file mode 100644 index 0000000000..29cbb81ed5 --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/copy_unix.go @@ -0,0 +1,80 @@ +// +build solaris darwin freebsd + +package fs + +import ( + "io" + "os" + "syscall" + + "github.com/containerd/continuity/sysx" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +func copyFileInfo(fi os.FileInfo, name string) error { + st := fi.Sys().(*syscall.Stat_t) + if err := os.Lchown(name, int(st.Uid), int(st.Gid)); err != nil { + if os.IsPermission(err) { + // Normally if uid/gid are the same this would be a no-op, but some + // filesystems may still return EPERM... for instance NFS does this. + // In such a case, this is not an error. + if dstStat, err2 := os.Lstat(name); err2 == nil { + st2 := dstStat.Sys().(*syscall.Stat_t) + if st.Uid == st2.Uid && st.Gid == st2.Gid { + err = nil + } + } + } + if err != nil { + return errors.Wrapf(err, "failed to chown %s", name) + } + } + + if (fi.Mode() & os.ModeSymlink) != os.ModeSymlink { + if err := os.Chmod(name, fi.Mode()); err != nil { + return errors.Wrapf(err, "failed to chmod %s", name) + } + } + + timespec := []syscall.Timespec{StatAtime(st), StatMtime(st)} + if err := syscall.UtimesNano(name, timespec); err != nil { + return errors.Wrapf(err, "failed to utime %s", name) + } + + return nil +} + +func copyFileContent(dst, src *os.File) error { + buf := bufferPool.Get().(*[]byte) + _, err := io.CopyBuffer(dst, src, *buf) + bufferPool.Put(buf) + + return err +} + +func copyXAttrs(dst, src string) error { + xattrKeys, err := sysx.LListxattr(src) + if err != nil { + return errors.Wrapf(err, "failed to list xattrs on %s", src) + } + for _, xattr := range xattrKeys { + data, err := sysx.LGetxattr(src, xattr) + if err != nil { + return errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src) + } + if err := sysx.LSetxattr(dst, xattr, data, 0); err != nil { + return errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst) + } + } + + return nil +} + +func copyDevice(dst string, fi os.FileInfo) error { + st, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return errors.New("unsupported stat type") + } + return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev)) +} diff --git a/vendor/github.com/containerd/continuity/fs/copy_windows.go b/vendor/github.com/containerd/continuity/fs/copy_windows.go new file mode 100644 index 0000000000..6fb3de5710 --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/copy_windows.go @@ -0,0 +1,33 @@ +package fs + +import ( + "io" + "os" + + "github.com/pkg/errors" +) + +func copyFileInfo(fi os.FileInfo, name string) error { + if err := os.Chmod(name, fi.Mode()); err != nil { + return errors.Wrapf(err, "failed to chmod %s", name) + } + + // TODO: copy windows specific metadata + + return nil +} + +func copyFileContent(dst, src *os.File) error { + buf := bufferPool.Get().(*[]byte) + _, err := io.CopyBuffer(dst, src, *buf) + bufferPool.Put(buf) + return err +} + +func copyXAttrs(dst, src string) error { + return nil +} + +func copyDevice(dst string, fi os.FileInfo) error { + return errors.New("device copy not supported") +} diff --git a/vendor/github.com/containerd/continuity/fs/diff.go b/vendor/github.com/containerd/continuity/fs/diff.go new file mode 100644 index 0000000000..f2300e845d --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/diff.go @@ -0,0 +1,310 @@ +package fs + +import ( + "context" + "os" + "path/filepath" + "strings" + + "golang.org/x/sync/errgroup" + + "github.com/sirupsen/logrus" +) + +// ChangeKind is the type of modification that +// a change is making. +type ChangeKind int + +const ( + // ChangeKindUnmodified represents an unmodified + // file + ChangeKindUnmodified = iota + + // ChangeKindAdd represents an addition of + // a file + ChangeKindAdd + + // ChangeKindModify represents a change to + // an existing file + ChangeKindModify + + // ChangeKindDelete represents a delete of + // a file + ChangeKindDelete +) + +func (k ChangeKind) String() string { + switch k { + case ChangeKindUnmodified: + return "unmodified" + case ChangeKindAdd: + return "add" + case ChangeKindModify: + return "modify" + case ChangeKindDelete: + return "delete" + default: + return "" + } +} + +// Change represents single change between a diff and its parent. +type Change struct { + Kind ChangeKind + Path string +} + +// ChangeFunc is the type of function called for each change +// computed during a directory changes calculation. +type ChangeFunc func(ChangeKind, string, os.FileInfo, error) error + +// Changes computes changes between two directories calling the +// given change function for each computed change. The first +// directory is intended to the base directory and second +// directory the changed directory. +// +// The change callback is called by the order of path names and +// should be appliable in that order. +// Due to this apply ordering, the following is true +// - Removed directory trees only create a single change for the root +// directory removed. Remaining changes are implied. +// - A directory which is modified to become a file will not have +// delete entries for sub-path items, their removal is implied +// by the removal of the parent directory. +// +// Opaque directories will not be treated specially and each file +// removed from the base directory will show up as a removal. +// +// File content comparisons will be done on files which have timestamps +// which may have been truncated. If either of the files being compared +// has a zero value nanosecond value, each byte will be compared for +// differences. If 2 files have the same seconds value but different +// nanosecond values where one of those values is zero, the files will +// be considered unchanged if the content is the same. This behavior +// is to account for timestamp truncation during archiving. +func Changes(ctx context.Context, a, b string, changeFn ChangeFunc) error { + if a == "" { + logrus.Debugf("Using single walk diff for %s", b) + return addDirChanges(ctx, changeFn, b) + } else if diffOptions := detectDirDiff(b, a); diffOptions != nil { + logrus.Debugf("Using single walk diff for %s from %s", diffOptions.diffDir, a) + return diffDirChanges(ctx, changeFn, a, diffOptions) + } + + logrus.Debugf("Using double walk diff for %s from %s", b, a) + return doubleWalkDiff(ctx, changeFn, a, b) +} + +func addDirChanges(ctx context.Context, changeFn ChangeFunc, root string) error { + return filepath.Walk(root, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(root, path) + if err != nil { + return err + } + + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + return changeFn(ChangeKindAdd, path, f, nil) + }) +} + +// diffDirOptions is used when the diff can be directly calculated from +// a diff directory to its base, without walking both trees. +type diffDirOptions struct { + diffDir string + skipChange func(string) (bool, error) + deleteChange func(string, string, os.FileInfo) (string, error) +} + +// diffDirChanges walks the diff directory and compares changes against the base. +func diffDirChanges(ctx context.Context, changeFn ChangeFunc, base string, o *diffDirOptions) error { + changedDirs := make(map[string]struct{}) + return filepath.Walk(o.diffDir, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(o.diffDir, path) + if err != nil { + return err + } + + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + // TODO: handle opaqueness, start new double walker at this + // location to get deletes, and skip tree in single walker + + if o.skipChange != nil { + if skip, err := o.skipChange(path); skip { + return err + } + } + + var kind ChangeKind + + deletedFile, err := o.deleteChange(o.diffDir, path, f) + if err != nil { + return err + } + + // Find out what kind of modification happened + if deletedFile != "" { + path = deletedFile + kind = ChangeKindDelete + f = nil + } else { + // Otherwise, the file was added + kind = ChangeKindAdd + + // ...Unless it already existed in a base, in which case, it's a modification + stat, err := os.Stat(filepath.Join(base, path)) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // The file existed in the base, so that's a modification + + // However, if it's a directory, maybe it wasn't actually modified. + // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar + if stat.IsDir() && f.IsDir() { + if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { + // Both directories are the same, don't record the change + return nil + } + } + kind = ChangeKindModify + } + } + + // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. + // This block is here to ensure the change is recorded even if the + // modify time, mode and size of the parent directory in the rw and ro layers are all equal. + // Check https://github.com/docker/docker/pull/13590 for details. + if f.IsDir() { + changedDirs[path] = struct{}{} + } + if kind == ChangeKindAdd || kind == ChangeKindDelete { + parent := filepath.Dir(path) + if _, ok := changedDirs[parent]; !ok && parent != "/" { + pi, err := os.Stat(filepath.Join(o.diffDir, parent)) + if err := changeFn(ChangeKindModify, parent, pi, err); err != nil { + return err + } + changedDirs[parent] = struct{}{} + } + } + + return changeFn(kind, path, f, nil) + }) +} + +// doubleWalkDiff walks both directories to create a diff +func doubleWalkDiff(ctx context.Context, changeFn ChangeFunc, a, b string) (err error) { + g, ctx := errgroup.WithContext(ctx) + + var ( + c1 = make(chan *currentPath) + c2 = make(chan *currentPath) + + f1, f2 *currentPath + rmdir string + ) + g.Go(func() error { + defer close(c1) + return pathWalk(ctx, a, c1) + }) + g.Go(func() error { + defer close(c2) + return pathWalk(ctx, b, c2) + }) + g.Go(func() error { + for c1 != nil || c2 != nil { + if f1 == nil && c1 != nil { + f1, err = nextPath(ctx, c1) + if err != nil { + return err + } + if f1 == nil { + c1 = nil + } + } + + if f2 == nil && c2 != nil { + f2, err = nextPath(ctx, c2) + if err != nil { + return err + } + if f2 == nil { + c2 = nil + } + } + if f1 == nil && f2 == nil { + continue + } + + var f os.FileInfo + k, p := pathChange(f1, f2) + switch k { + case ChangeKindAdd: + if rmdir != "" { + rmdir = "" + } + f = f2.f + f2 = nil + case ChangeKindDelete: + // Check if this file is already removed by being + // under of a removed directory + if rmdir != "" && strings.HasPrefix(f1.path, rmdir) { + f1 = nil + continue + } else if f1.f.IsDir() { + rmdir = f1.path + string(os.PathSeparator) + } else if rmdir != "" { + rmdir = "" + } + f1 = nil + case ChangeKindModify: + same, err := sameFile(f1, f2) + if err != nil { + return err + } + if f1.f.IsDir() && !f2.f.IsDir() { + rmdir = f1.path + string(os.PathSeparator) + } else if rmdir != "" { + rmdir = "" + } + f = f2.f + f1 = nil + f2 = nil + if same { + if !isLinked(f) { + continue + } + k = ChangeKindUnmodified + } + } + if err := changeFn(k, p, f, nil); err != nil { + return err + } + } + return nil + }) + + return g.Wait() +} diff --git a/vendor/github.com/containerd/continuity/fs/diff_unix.go b/vendor/github.com/containerd/continuity/fs/diff_unix.go new file mode 100644 index 0000000000..3751814443 --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/diff_unix.go @@ -0,0 +1,58 @@ +// +build !windows + +package fs + +import ( + "bytes" + "os" + "syscall" + + "github.com/containerd/continuity/sysx" + "github.com/pkg/errors" +) + +// detectDirDiff returns diff dir options if a directory could +// be found in the mount info for upper which is the direct +// diff with the provided lower directory +func detectDirDiff(upper, lower string) *diffDirOptions { + // TODO: get mount options for upper + // TODO: detect AUFS + // TODO: detect overlay + return nil +} + +// compareSysStat returns whether the stats are equivalent, +// whether the files are considered the same file, and +// an error +func compareSysStat(s1, s2 interface{}) (bool, error) { + ls1, ok := s1.(*syscall.Stat_t) + if !ok { + return false, nil + } + ls2, ok := s2.(*syscall.Stat_t) + if !ok { + return false, nil + } + + return ls1.Mode == ls2.Mode && ls1.Uid == ls2.Uid && ls1.Gid == ls2.Gid && ls1.Rdev == ls2.Rdev, nil +} + +func compareCapabilities(p1, p2 string) (bool, error) { + c1, err := sysx.LGetxattr(p1, "security.capability") + if err != nil && err != sysx.ENODATA { + return false, errors.Wrapf(err, "failed to get xattr for %s", p1) + } + c2, err := sysx.LGetxattr(p2, "security.capability") + if err != nil && err != sysx.ENODATA { + return false, errors.Wrapf(err, "failed to get xattr for %s", p2) + } + return bytes.Equal(c1, c2), nil +} + +func isLinked(f os.FileInfo) bool { + s, ok := f.Sys().(*syscall.Stat_t) + if !ok { + return false + } + return !f.IsDir() && s.Nlink > 1 +} diff --git a/vendor/github.com/containerd/continuity/fs/diff_windows.go b/vendor/github.com/containerd/continuity/fs/diff_windows.go new file mode 100644 index 0000000000..8eed36507e --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/diff_windows.go @@ -0,0 +1,32 @@ +package fs + +import ( + "os" + + "golang.org/x/sys/windows" +) + +func detectDirDiff(upper, lower string) *diffDirOptions { + return nil +} + +func compareSysStat(s1, s2 interface{}) (bool, error) { + f1, ok := s1.(windows.Win32FileAttributeData) + if !ok { + return false, nil + } + f2, ok := s2.(windows.Win32FileAttributeData) + if !ok { + return false, nil + } + return f1.FileAttributes == f2.FileAttributes, nil +} + +func compareCapabilities(p1, p2 string) (bool, error) { + // TODO: Use windows equivalent + return true, nil +} + +func isLinked(os.FileInfo) bool { + return false +} diff --git a/vendor/github.com/containerd/continuity/fs/dtype_linux.go b/vendor/github.com/containerd/continuity/fs/dtype_linux.go new file mode 100644 index 0000000000..cc06573f1b --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/dtype_linux.go @@ -0,0 +1,87 @@ +// +build linux + +package fs + +import ( + "fmt" + "io/ioutil" + "os" + "syscall" + "unsafe" +) + +func locateDummyIfEmpty(path string) (string, error) { + children, err := ioutil.ReadDir(path) + if err != nil { + return "", err + } + if len(children) != 0 { + return "", nil + } + dummyFile, err := ioutil.TempFile(path, "fsutils-dummy") + if err != nil { + return "", err + } + name := dummyFile.Name() + err = dummyFile.Close() + return name, err +} + +// SupportsDType returns whether the filesystem mounted on path supports d_type +func SupportsDType(path string) (bool, error) { + // locate dummy so that we have at least one dirent + dummy, err := locateDummyIfEmpty(path) + if err != nil { + return false, err + } + if dummy != "" { + defer os.Remove(dummy) + } + + visited := 0 + supportsDType := true + fn := func(ent *syscall.Dirent) bool { + visited++ + if ent.Type == syscall.DT_UNKNOWN { + supportsDType = false + // stop iteration + return true + } + // continue iteration + return false + } + if err = iterateReadDir(path, fn); err != nil { + return false, err + } + if visited == 0 { + return false, fmt.Errorf("did not hit any dirent during iteration %s", path) + } + return supportsDType, nil +} + +func iterateReadDir(path string, fn func(*syscall.Dirent) bool) error { + d, err := os.Open(path) + if err != nil { + return err + } + defer d.Close() + fd := int(d.Fd()) + buf := make([]byte, 4096) + for { + nbytes, err := syscall.ReadDirent(fd, buf) + if err != nil { + return err + } + if nbytes == 0 { + break + } + for off := 0; off < nbytes; { + ent := (*syscall.Dirent)(unsafe.Pointer(&buf[off])) + if stop := fn(ent); stop { + return nil + } + off += int(ent.Reclen) + } + } + return nil +} diff --git a/vendor/github.com/containerd/continuity/fs/du.go b/vendor/github.com/containerd/continuity/fs/du.go new file mode 100644 index 0000000000..26f5333154 --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/du.go @@ -0,0 +1,22 @@ +package fs + +import "context" + +// Usage of disk information +type Usage struct { + Inodes int64 + Size int64 +} + +// DiskUsage counts the number of inodes and disk usage for the resources under +// path. +func DiskUsage(roots ...string) (Usage, error) { + return diskUsage(roots...) +} + +// DiffUsage counts the numbers of inodes and disk usage in the +// diff between the 2 directories. The first path is intended +// as the base directory and the second as the changed directory. +func DiffUsage(ctx context.Context, a, b string) (Usage, error) { + return diffUsage(ctx, a, b) +} diff --git a/vendor/github.com/containerd/continuity/fs/du_unix.go b/vendor/github.com/containerd/continuity/fs/du_unix.go new file mode 100644 index 0000000000..fe3426d278 --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/du_unix.go @@ -0,0 +1,88 @@ +// +build !windows + +package fs + +import ( + "context" + "os" + "path/filepath" + "syscall" +) + +type inode struct { + // TODO(stevvooe): Can probably reduce memory usage by not tracking + // device, but we can leave this right for now. + dev, ino uint64 +} + +func newInode(stat *syscall.Stat_t) inode { + return inode{ + // Dev is uint32 on darwin/bsd, uint64 on linux/solaris + dev: uint64(stat.Dev), // nolint: unconvert + // Ino is uint32 on bsd, uint64 on darwin/linux/solaris + ino: uint64(stat.Ino), // nolint: unconvert + } +} + +func diskUsage(roots ...string) (Usage, error) { + + var ( + size int64 + inodes = map[inode]struct{}{} // expensive! + ) + + for _, root := range roots { + if err := filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + inoKey := newInode(fi.Sys().(*syscall.Stat_t)) + if _, ok := inodes[inoKey]; !ok { + inodes[inoKey] = struct{}{} + size += fi.Size() + } + + return nil + }); err != nil { + return Usage{}, err + } + } + + return Usage{ + Inodes: int64(len(inodes)), + Size: size, + }, nil +} + +func diffUsage(ctx context.Context, a, b string) (Usage, error) { + var ( + size int64 + inodes = map[inode]struct{}{} // expensive! + ) + + if err := Changes(ctx, a, b, func(kind ChangeKind, _ string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + if kind == ChangeKindAdd || kind == ChangeKindModify { + inoKey := newInode(fi.Sys().(*syscall.Stat_t)) + if _, ok := inodes[inoKey]; !ok { + inodes[inoKey] = struct{}{} + size += fi.Size() + } + + return nil + + } + return nil + }); err != nil { + return Usage{}, err + } + + return Usage{ + Inodes: int64(len(inodes)), + Size: size, + }, nil +} diff --git a/vendor/github.com/containerd/continuity/fs/du_windows.go b/vendor/github.com/containerd/continuity/fs/du_windows.go new file mode 100644 index 0000000000..3f852fc15e --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/du_windows.go @@ -0,0 +1,60 @@ +// +build windows + +package fs + +import ( + "context" + "os" + "path/filepath" +) + +func diskUsage(roots ...string) (Usage, error) { + var ( + size int64 + ) + + // TODO(stevvooe): Support inodes (or equivalent) for windows. + + for _, root := range roots { + if err := filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + size += fi.Size() + return nil + }); err != nil { + return Usage{}, err + } + } + + return Usage{ + Size: size, + }, nil +} + +func diffUsage(ctx context.Context, a, b string) (Usage, error) { + var ( + size int64 + ) + + if err := Changes(ctx, a, b, func(kind ChangeKind, _ string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + if kind == ChangeKindAdd || kind == ChangeKindModify { + size += fi.Size() + + return nil + + } + return nil + }); err != nil { + return Usage{}, err + } + + return Usage{ + Size: size, + }, nil +} diff --git a/vendor/github.com/containerd/continuity/fs/hardlink.go b/vendor/github.com/containerd/continuity/fs/hardlink.go new file mode 100644 index 0000000000..38da93813c --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/hardlink.go @@ -0,0 +1,27 @@ +package fs + +import "os" + +// GetLinkInfo returns an identifier representing the node a hardlink is pointing +// to. If the file is not hard linked then 0 will be returned. +func GetLinkInfo(fi os.FileInfo) (uint64, bool) { + return getLinkInfo(fi) +} + +// getLinkSource returns a path for the given name and +// file info to its link source in the provided inode +// map. If the given file name is not in the map and +// has other links, it is added to the inode map +// to be a source for other link locations. +func getLinkSource(name string, fi os.FileInfo, inodes map[uint64]string) (string, error) { + inode, isHardlink := getLinkInfo(fi) + if !isHardlink { + return "", nil + } + + path, ok := inodes[inode] + if !ok { + inodes[inode] = name + } + return path, nil +} diff --git a/vendor/github.com/containerd/continuity/fs/hardlink_unix.go b/vendor/github.com/containerd/continuity/fs/hardlink_unix.go new file mode 100644 index 0000000000..a6f99778de --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/hardlink_unix.go @@ -0,0 +1,18 @@ +// +build !windows + +package fs + +import ( + "os" + "syscall" +) + +func getLinkInfo(fi os.FileInfo) (uint64, bool) { + s, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return 0, false + } + + // Ino is uint32 on bsd, uint64 on darwin/linux/solaris + return uint64(s.Ino), !fi.IsDir() && s.Nlink > 1 // nolint: unconvert +} diff --git a/vendor/github.com/containerd/continuity/fs/hardlink_windows.go b/vendor/github.com/containerd/continuity/fs/hardlink_windows.go new file mode 100644 index 0000000000..ad8845a7fb --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/hardlink_windows.go @@ -0,0 +1,7 @@ +package fs + +import "os" + +func getLinkInfo(fi os.FileInfo) (uint64, bool) { + return 0, false +} diff --git a/vendor/github.com/containerd/continuity/fs/path.go b/vendor/github.com/containerd/continuity/fs/path.go new file mode 100644 index 0000000000..13fb826385 --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/path.go @@ -0,0 +1,276 @@ +package fs + +import ( + "bytes" + "context" + "io" + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" +) + +var ( + errTooManyLinks = errors.New("too many links") +) + +type currentPath struct { + path string + f os.FileInfo + fullPath string +} + +func pathChange(lower, upper *currentPath) (ChangeKind, string) { + if lower == nil { + if upper == nil { + panic("cannot compare nil paths") + } + return ChangeKindAdd, upper.path + } + if upper == nil { + return ChangeKindDelete, lower.path + } + // TODO: compare by directory + + switch i := strings.Compare(lower.path, upper.path); { + case i < 0: + // File in lower that is not in upper + return ChangeKindDelete, lower.path + case i > 0: + // File in upper that is not in lower + return ChangeKindAdd, upper.path + default: + return ChangeKindModify, upper.path + } +} + +func sameFile(f1, f2 *currentPath) (bool, error) { + if os.SameFile(f1.f, f2.f) { + return true, nil + } + + equalStat, err := compareSysStat(f1.f.Sys(), f2.f.Sys()) + if err != nil || !equalStat { + return equalStat, err + } + + if eq, err := compareCapabilities(f1.fullPath, f2.fullPath); err != nil || !eq { + return eq, err + } + + // If not a directory also check size, modtime, and content + if !f1.f.IsDir() { + if f1.f.Size() != f2.f.Size() { + return false, nil + } + t1 := f1.f.ModTime() + t2 := f2.f.ModTime() + + if t1.Unix() != t2.Unix() { + return false, nil + } + + // If the timestamp may have been truncated in both of the + // files, check content of file to determine difference + if t1.Nanosecond() == 0 && t2.Nanosecond() == 0 { + var eq bool + if (f1.f.Mode() & os.ModeSymlink) == os.ModeSymlink { + eq, err = compareSymlinkTarget(f1.fullPath, f2.fullPath) + } else if f1.f.Size() > 0 { + eq, err = compareFileContent(f1.fullPath, f2.fullPath) + } + if err != nil || !eq { + return eq, err + } + } else if t1.Nanosecond() != t2.Nanosecond() { + return false, nil + } + } + + return true, nil +} + +func compareSymlinkTarget(p1, p2 string) (bool, error) { + t1, err := os.Readlink(p1) + if err != nil { + return false, err + } + t2, err := os.Readlink(p2) + if err != nil { + return false, err + } + return t1 == t2, nil +} + +const compareChuckSize = 32 * 1024 + +// compareFileContent compares the content of 2 same sized files +// by comparing each byte. +func compareFileContent(p1, p2 string) (bool, error) { + f1, err := os.Open(p1) + if err != nil { + return false, err + } + defer f1.Close() + f2, err := os.Open(p2) + if err != nil { + return false, err + } + defer f2.Close() + + b1 := make([]byte, compareChuckSize) + b2 := make([]byte, compareChuckSize) + for { + n1, err1 := f1.Read(b1) + if err1 != nil && err1 != io.EOF { + return false, err1 + } + n2, err2 := f2.Read(b2) + if err2 != nil && err2 != io.EOF { + return false, err2 + } + if n1 != n2 || !bytes.Equal(b1[:n1], b2[:n2]) { + return false, nil + } + if err1 == io.EOF && err2 == io.EOF { + return true, nil + } + } +} + +func pathWalk(ctx context.Context, root string, pathC chan<- *currentPath) error { + return filepath.Walk(root, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(root, path) + if err != nil { + return err + } + + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + p := ¤tPath{ + path: path, + f: f, + fullPath: filepath.Join(root, path), + } + + select { + case <-ctx.Done(): + return ctx.Err() + case pathC <- p: + return nil + } + }) +} + +func nextPath(ctx context.Context, pathC <-chan *currentPath) (*currentPath, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case p := <-pathC: + return p, nil + } +} + +// RootPath joins a path with a root, evaluating and bounding any +// symlink to the root directory. +func RootPath(root, path string) (string, error) { + if path == "" { + return root, nil + } + var linksWalked int // to protect against cycles + for { + i := linksWalked + newpath, err := walkLinks(root, path, &linksWalked) + if err != nil { + return "", err + } + path = newpath + if i == linksWalked { + newpath = filepath.Join("/", newpath) + if path == newpath { + return filepath.Join(root, newpath), nil + } + path = newpath + } + } +} + +func walkLink(root, path string, linksWalked *int) (newpath string, islink bool, err error) { + if *linksWalked > 255 { + return "", false, errTooManyLinks + } + + path = filepath.Join("/", path) + if path == "/" { + return path, false, nil + } + realPath := filepath.Join(root, path) + + fi, err := os.Lstat(realPath) + if err != nil { + // If path does not yet exist, treat as non-symlink + if os.IsNotExist(err) { + return path, false, nil + } + return "", false, err + } + if fi.Mode()&os.ModeSymlink == 0 { + return path, false, nil + } + newpath, err = os.Readlink(realPath) + if err != nil { + return "", false, err + } + if filepath.IsAbs(newpath) && strings.HasPrefix(newpath, root) { + newpath = newpath[:len(root)] + if !strings.HasPrefix(newpath, "/") { + newpath = "/" + newpath + } + } + *linksWalked++ + return newpath, true, nil +} + +func walkLinks(root, path string, linksWalked *int) (string, error) { + switch dir, file := filepath.Split(path); { + case dir == "": + newpath, _, err := walkLink(root, file, linksWalked) + return newpath, err + case file == "": + if os.IsPathSeparator(dir[len(dir)-1]) { + if dir == "/" { + return dir, nil + } + return walkLinks(root, dir[:len(dir)-1], linksWalked) + } + newpath, _, err := walkLink(root, dir, linksWalked) + return newpath, err + default: + newdir, err := walkLinks(root, dir, linksWalked) + if err != nil { + return "", err + } + newpath, islink, err := walkLink(root, filepath.Join(newdir, file), linksWalked) + if err != nil { + return "", err + } + if !islink { + return newpath, nil + } + if filepath.IsAbs(newpath) { + return newpath, nil + } + return filepath.Join(newdir, newpath), nil + } +} diff --git a/vendor/github.com/containerd/continuity/fs/stat_bsd.go b/vendor/github.com/containerd/continuity/fs/stat_bsd.go new file mode 100644 index 0000000000..a1b776fdf5 --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/stat_bsd.go @@ -0,0 +1,28 @@ +// +build darwin freebsd + +package fs + +import ( + "syscall" + "time" +) + +// StatAtime returns the access time from a stat struct +func StatAtime(st *syscall.Stat_t) syscall.Timespec { + return st.Atimespec +} + +// StatCtime returns the created time from a stat struct +func StatCtime(st *syscall.Stat_t) syscall.Timespec { + return st.Ctimespec +} + +// StatMtime returns the modified time from a stat struct +func StatMtime(st *syscall.Stat_t) syscall.Timespec { + return st.Mtimespec +} + +// StatATimeAsTime returns the access time as a time.Time +func StatATimeAsTime(st *syscall.Stat_t) time.Time { + return time.Unix(int64(st.Atimespec.Sec), int64(st.Atimespec.Nsec)) // nolint: unconvert +} diff --git a/vendor/github.com/containerd/continuity/fs/stat_linux.go b/vendor/github.com/containerd/continuity/fs/stat_linux.go new file mode 100644 index 0000000000..1dbb0212b6 --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/stat_linux.go @@ -0,0 +1,27 @@ +package fs + +import ( + "syscall" + "time" +) + +// StatAtime returns the Atim +func StatAtime(st *syscall.Stat_t) syscall.Timespec { + return st.Atim +} + +// StatCtime returns the Ctim +func StatCtime(st *syscall.Stat_t) syscall.Timespec { + return st.Ctim +} + +// StatMtime returns the Mtim +func StatMtime(st *syscall.Stat_t) syscall.Timespec { + return st.Mtim +} + +// StatATimeAsTime returns st.Atim as a time.Time +func StatATimeAsTime(st *syscall.Stat_t) time.Time { + // The int64 conversions ensure the line compiles for 32-bit systems as well. + return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) // nolint: unconvert +} diff --git a/vendor/github.com/containerd/continuity/fs/time.go b/vendor/github.com/containerd/continuity/fs/time.go new file mode 100644 index 0000000000..c336f4d881 --- /dev/null +++ b/vendor/github.com/containerd/continuity/fs/time.go @@ -0,0 +1,13 @@ +package fs + +import "time" + +// Gnu tar and the go tar writer don't have sub-second mtime +// precision, which is problematic when we apply changes via tar +// files, we handle this by comparing for exact times, *or* same +// second count and either a or b having exactly 0 nanoseconds +func sameFsTime(a, b time.Time) bool { + return a == b || + (a.Unix() == b.Unix() && + (a.Nanosecond() == 0 || b.Nanosecond() == 0)) +} diff --git a/vendor/github.com/containerd/continuity/sysx/asm.s b/vendor/github.com/containerd/continuity/sysx/asm.s new file mode 100644 index 0000000000..8ed2fdb94b --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/asm.s @@ -0,0 +1,10 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !gccgo + +#include "textflag.h" + +TEXT ·use(SB),NOSPLIT,$0 + RET diff --git a/vendor/github.com/containerd/continuity/sysx/chmod_darwin.go b/vendor/github.com/containerd/continuity/sysx/chmod_darwin.go new file mode 100644 index 0000000000..e3ae2b7bbf --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/chmod_darwin.go @@ -0,0 +1,18 @@ +package sysx + +const ( + // AtSymlinkNoFollow defined from AT_SYMLINK_NOFOLLOW in + AtSymlinkNofollow = 0x20 +) + +const ( + + // SYS_FCHMODAT defined from golang.org/sys/unix + SYS_FCHMODAT = 467 +) + +// These functions will be generated by generate.sh +// $ GOOS=darwin GOARCH=386 ./generate.sh chmod +// $ GOOS=darwin GOARCH=amd64 ./generate.sh chmod + +//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) diff --git a/vendor/github.com/containerd/continuity/sysx/chmod_darwin_386.go b/vendor/github.com/containerd/continuity/sysx/chmod_darwin_386.go new file mode 100644 index 0000000000..5a8cf5b57d --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/chmod_darwin_386.go @@ -0,0 +1,25 @@ +// mksyscall.pl -l32 chmod_darwin.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package sysx + +import ( + "syscall" + "unsafe" +) + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall.Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/containerd/continuity/sysx/chmod_darwin_amd64.go b/vendor/github.com/containerd/continuity/sysx/chmod_darwin_amd64.go new file mode 100644 index 0000000000..3287d1d579 --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/chmod_darwin_amd64.go @@ -0,0 +1,25 @@ +// mksyscall.pl chmod_darwin.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package sysx + +import ( + "syscall" + "unsafe" +) + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall.Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/containerd/continuity/sysx/chmod_freebsd.go b/vendor/github.com/containerd/continuity/sysx/chmod_freebsd.go new file mode 100644 index 0000000000..b64a708be1 --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/chmod_freebsd.go @@ -0,0 +1,17 @@ +package sysx + +const ( + // AtSymlinkNoFollow defined from AT_SYMLINK_NOFOLLOW in + AtSymlinkNofollow = 0x200 +) + +const ( + + // SYS_FCHMODAT defined from golang.org/sys/unix + SYS_FCHMODAT = 490 +) + +// These functions will be generated by generate.sh +// $ GOOS=freebsd GOARCH=amd64 ./generate.sh chmod + +//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) diff --git a/vendor/github.com/containerd/continuity/sysx/chmod_freebsd_amd64.go b/vendor/github.com/containerd/continuity/sysx/chmod_freebsd_amd64.go new file mode 100644 index 0000000000..5a271abb1e --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/chmod_freebsd_amd64.go @@ -0,0 +1,25 @@ +// mksyscall.pl chmod_freebsd.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package sysx + +import ( + "syscall" + "unsafe" +) + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := syscall.Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + use(unsafe.Pointer(_p0)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/containerd/continuity/sysx/chmod_linux.go b/vendor/github.com/containerd/continuity/sysx/chmod_linux.go new file mode 100644 index 0000000000..89df6d38ef --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/chmod_linux.go @@ -0,0 +1,12 @@ +package sysx + +import "syscall" + +const ( + // AtSymlinkNoFollow defined from AT_SYMLINK_NOFOLLOW in /usr/include/linux/fcntl.h + AtSymlinkNofollow = 0x100 +) + +func Fchmodat(dirfd int, path string, mode uint32, flags int) error { + return syscall.Fchmodat(dirfd, path, mode, flags) +} diff --git a/vendor/github.com/containerd/continuity/sysx/chmod_solaris.go b/vendor/github.com/containerd/continuity/sysx/chmod_solaris.go new file mode 100644 index 0000000000..3ba6e5edc8 --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/chmod_solaris.go @@ -0,0 +1,11 @@ +package sysx + +import "golang.org/x/sys/unix" + +const ( + AtSymlinkNofollow = unix.AT_SYMLINK_NOFOLLOW +) + +func Fchmodat(dirfd int, path string, mode uint32, flags int) error { + return unix.Fchmodat(dirfd, path, mode, flags) +} diff --git a/vendor/github.com/containerd/continuity/sysx/nodata_linux.go b/vendor/github.com/containerd/continuity/sysx/nodata_linux.go new file mode 100644 index 0000000000..fc47ddb8dc --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/nodata_linux.go @@ -0,0 +1,7 @@ +package sysx + +import ( + "syscall" +) + +const ENODATA = syscall.ENODATA diff --git a/vendor/github.com/containerd/continuity/sysx/nodata_solaris.go b/vendor/github.com/containerd/continuity/sysx/nodata_solaris.go new file mode 100644 index 0000000000..53cc8e068f --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/nodata_solaris.go @@ -0,0 +1,8 @@ +package sysx + +import ( + "syscall" +) + +// This should actually be a set that contains ENOENT and EPERM +const ENODATA = syscall.ENOENT diff --git a/vendor/github.com/containerd/continuity/sysx/nodata_unix.go b/vendor/github.com/containerd/continuity/sysx/nodata_unix.go new file mode 100644 index 0000000000..7e6851209f --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/nodata_unix.go @@ -0,0 +1,9 @@ +// +build darwin freebsd + +package sysx + +import ( + "syscall" +) + +const ENODATA = syscall.ENOATTR diff --git a/vendor/github.com/containerd/continuity/sysx/sys.go b/vendor/github.com/containerd/continuity/sysx/sys.go new file mode 100644 index 0000000000..0bb1676283 --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/sys.go @@ -0,0 +1,37 @@ +package sysx + +import ( + "syscall" + "unsafe" +) + +var _zero uintptr + +// use is a no-op, but the compiler cannot see that it is. +// Calling use(p) ensures that p is kept live until that point. +//go:noescape +func use(p unsafe.Pointer) + +// Do the interface allocations only once for common +// Errno values. +var ( + errEAGAIN error = syscall.EAGAIN + errEINVAL error = syscall.EINVAL + errENOENT error = syscall.ENOENT +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case syscall.EAGAIN: + return errEAGAIN + case syscall.EINVAL: + return errEINVAL + case syscall.ENOENT: + return errENOENT + } + return e +} diff --git a/vendor/github.com/containerd/continuity/sysx/xattr.go b/vendor/github.com/containerd/continuity/sysx/xattr.go new file mode 100644 index 0000000000..20937c2d4d --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/xattr.go @@ -0,0 +1,67 @@ +package sysx + +import ( + "bytes" + "fmt" + "syscall" +) + +const defaultXattrBufferSize = 5 + +var ErrNotSupported = fmt.Errorf("not supported") + +type listxattrFunc func(path string, dest []byte) (int, error) + +func listxattrAll(path string, listFunc listxattrFunc) ([]string, error) { + var p []byte // nil on first execution + + for { + n, err := listFunc(path, p) // first call gets buffer size. + if err != nil { + return nil, err + } + + if n > len(p) { + p = make([]byte, n) + continue + } + + p = p[:n] + + ps := bytes.Split(bytes.TrimSuffix(p, []byte{0}), []byte{0}) + var entries []string + for _, p := range ps { + s := string(p) + if s != "" { + entries = append(entries, s) + } + } + + return entries, nil + } +} + +type getxattrFunc func(string, string, []byte) (int, error) + +func getxattrAll(path, attr string, getFunc getxattrFunc) ([]byte, error) { + p := make([]byte, defaultXattrBufferSize) + for { + n, err := getFunc(path, attr, p) + if err != nil { + if errno, ok := err.(syscall.Errno); ok && errno == syscall.ERANGE { + p = make([]byte, len(p)*2) // this can't be ideal. + continue // try again! + } + + return nil, err + } + + // realloc to correct size and repeat + if n > len(p) { + p = make([]byte, n) + continue + } + + return p[:n], nil + } +} diff --git a/vendor/github.com/containerd/continuity/sysx/xattr_darwin.go b/vendor/github.com/containerd/continuity/sysx/xattr_darwin.go new file mode 100644 index 0000000000..1164a7d11c --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/xattr_darwin.go @@ -0,0 +1,71 @@ +package sysx + +// These functions will be generated by generate.sh +// $ GOOS=darwin GOARCH=386 ./generate.sh xattr +// $ GOOS=darwin GOARCH=amd64 ./generate.sh xattr + +//sys getxattr(path string, attr string, dest []byte, pos int, options int) (sz int, err error) +//sys setxattr(path string, attr string, data []byte, flags int) (err error) +//sys removexattr(path string, attr string, options int) (err error) +//sys listxattr(path string, dest []byte, options int) (sz int, err error) +//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) + +const ( + xattrNoFollow = 0x01 +) + +func listxattrFollow(path string, dest []byte) (sz int, err error) { + return listxattr(path, dest, 0) +} + +// Listxattr calls syscall getxattr +func Listxattr(path string) ([]string, error) { + return listxattrAll(path, listxattrFollow) +} + +// Removexattr calls syscall getxattr +func Removexattr(path string, attr string) (err error) { + return removexattr(path, attr, 0) +} + +// Setxattr calls syscall setxattr +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + return setxattr(path, attr, data, flags) +} + +func getxattrFollow(path, attr string, dest []byte) (sz int, err error) { + return getxattr(path, attr, dest, 0, 0) +} + +// Getxattr calls syscall getxattr +func Getxattr(path, attr string) ([]byte, error) { + return getxattrAll(path, attr, getxattrFollow) +} + +func listxattrNoFollow(path string, dest []byte) (sz int, err error) { + return listxattr(path, dest, xattrNoFollow) +} + +// LListxattr calls syscall listxattr with XATTR_NOFOLLOW +func LListxattr(path string) ([]string, error) { + return listxattrAll(path, listxattrNoFollow) +} + +// LRemovexattr calls syscall removexattr with XATTR_NOFOLLOW +func LRemovexattr(path string, attr string) (err error) { + return removexattr(path, attr, xattrNoFollow) +} + +// Setxattr calls syscall setxattr with XATTR_NOFOLLOW +func LSetxattr(path string, attr string, data []byte, flags int) (err error) { + return setxattr(path, attr, data, flags|xattrNoFollow) +} + +func getxattrNoFollow(path, attr string, dest []byte) (sz int, err error) { + return getxattr(path, attr, dest, 0, xattrNoFollow) +} + +// LGetxattr calls syscall getxattr with XATTR_NOFOLLOW +func LGetxattr(path, attr string) ([]byte, error) { + return getxattrAll(path, attr, getxattrNoFollow) +} diff --git a/vendor/github.com/containerd/continuity/sysx/xattr_darwin_386.go b/vendor/github.com/containerd/continuity/sysx/xattr_darwin_386.go new file mode 100644 index 0000000000..aa896b57fc --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/xattr_darwin_386.go @@ -0,0 +1,111 @@ +// mksyscall.pl -l32 xattr_darwin.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package sysx + +import ( + "syscall" + "unsafe" +) + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getxattr(path string, attr string, dest []byte, pos int, options int) (sz int, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall.Syscall6(syscall.SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), uintptr(pos), uintptr(options)) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall.Syscall6(syscall.SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func removexattr(path string, attr string, options int) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := syscall.Syscall(syscall.SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func listxattr(path string, dest []byte, options int) (sz int, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall.Syscall6(syscall.SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(options), 0, 0) + use(unsafe.Pointer(_p0)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/containerd/continuity/sysx/xattr_darwin_amd64.go b/vendor/github.com/containerd/continuity/sysx/xattr_darwin_amd64.go new file mode 100644 index 0000000000..6ff27e2703 --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/xattr_darwin_amd64.go @@ -0,0 +1,111 @@ +// mksyscall.pl xattr_darwin.go +// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT + +package sysx + +import ( + "syscall" + "unsafe" +) + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func getxattr(path string, attr string, dest []byte, pos int, options int) (sz int, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(dest) > 0 { + _p2 = unsafe.Pointer(&dest[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall.Syscall6(syscall.SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), uintptr(pos), uintptr(options)) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setxattr(path string, attr string, data []byte, flags int) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + var _p2 unsafe.Pointer + if len(data) > 0 { + _p2 = unsafe.Pointer(&data[0]) + } else { + _p2 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall.Syscall6(syscall.SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func removexattr(path string, attr string, options int) (err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := syscall.Syscall(syscall.SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) + use(unsafe.Pointer(_p0)) + use(unsafe.Pointer(_p1)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func listxattr(path string, dest []byte, options int) (sz int, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(path) + if err != nil { + return + } + var _p1 unsafe.Pointer + if len(dest) > 0 { + _p1 = unsafe.Pointer(&dest[0]) + } else { + _p1 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall.Syscall6(syscall.SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(options), 0, 0) + use(unsafe.Pointer(_p0)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/containerd/continuity/sysx/xattr_freebsd.go b/vendor/github.com/containerd/continuity/sysx/xattr_freebsd.go new file mode 100644 index 0000000000..e8017d317f --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/xattr_freebsd.go @@ -0,0 +1,12 @@ +package sysx + +import ( + "errors" +) + +// Initial stub version for FreeBSD. FreeBSD has a different +// syscall API from Darwin and Linux for extended attributes; +// it is also not widely used. It is not exposed at all by the +// Go syscall package, so we need to implement directly eventually. + +var unsupported = errors.New("extended attributes unsupported on FreeBSD") diff --git a/vendor/github.com/containerd/continuity/sysx/xattr_linux.go b/vendor/github.com/containerd/continuity/sysx/xattr_linux.go new file mode 100644 index 0000000000..311b896d9e --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/xattr_linux.go @@ -0,0 +1,44 @@ +package sysx + +import "golang.org/x/sys/unix" + +// Listxattr calls syscall listxattr and reads all content +// and returns a string array +func Listxattr(path string) ([]string, error) { + return listxattrAll(path, unix.Listxattr) +} + +// Removexattr calls syscall removexattr +func Removexattr(path string, attr string) (err error) { + return unix.Removexattr(path, attr) +} + +// Setxattr calls syscall setxattr +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + return unix.Setxattr(path, attr, data, flags) +} + +// Getxattr calls syscall getxattr +func Getxattr(path, attr string) ([]byte, error) { + return getxattrAll(path, attr, unix.Getxattr) +} + +// LListxattr lists xattrs, not following symlinks +func LListxattr(path string) ([]string, error) { + return listxattrAll(path, unix.Llistxattr) +} + +// LRemovexattr removes an xattr, not following symlinks +func LRemovexattr(path string, attr string) (err error) { + return unix.Lremovexattr(path, attr) +} + +// LSetxattr sets an xattr, not following symlinks +func LSetxattr(path string, attr string, data []byte, flags int) (err error) { + return unix.Lsetxattr(path, attr, data, flags) +} + +// LGetxattr gets an xattr, not following symlinks +func LGetxattr(path, attr string) ([]byte, error) { + return getxattrAll(path, attr, unix.Lgetxattr) +} diff --git a/vendor/github.com/containerd/continuity/sysx/xattr_openbsd.go b/vendor/github.com/containerd/continuity/sysx/xattr_openbsd.go new file mode 100644 index 0000000000..723619977d --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/xattr_openbsd.go @@ -0,0 +1,7 @@ +package sysx + +import ( + "errors" +) + +var unsupported = errors.New("extended attributes unsupported on OpenBSD") diff --git a/vendor/github.com/containerd/continuity/sysx/xattr_solaris.go b/vendor/github.com/containerd/continuity/sysx/xattr_solaris.go new file mode 100644 index 0000000000..fc523fcbbe --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/xattr_solaris.go @@ -0,0 +1,12 @@ +package sysx + +import ( + "errors" +) + +// Initial stub version for Solaris. Solaris has a different +// syscall API from Darwin and Linux for extended attributes; +// it is also not widely used. It is not exposed at all by the +// Go syscall package, so we need to implement directly eventually. + +var unsupported = errors.New("extended attributes unsupported on Solaris") diff --git a/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go b/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go new file mode 100644 index 0000000000..c8389bc136 --- /dev/null +++ b/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go @@ -0,0 +1,44 @@ +// +build freebsd openbsd solaris + +package sysx + +// Listxattr calls syscall listxattr and reads all content +// and returns a string array +func Listxattr(path string) ([]string, error) { + return []string{}, nil +} + +// Removexattr calls syscall removexattr +func Removexattr(path string, attr string) (err error) { + return unsupported +} + +// Setxattr calls syscall setxattr +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + return unsupported +} + +// Getxattr calls syscall getxattr +func Getxattr(path, attr string) ([]byte, error) { + return []byte{}, unsupported +} + +// LListxattr lists xattrs, not following symlinks +func LListxattr(path string) ([]string, error) { + return []string{}, nil +} + +// LRemovexattr removes an xattr, not following symlinks +func LRemovexattr(path string, attr string) (err error) { + return unsupported +} + +// LSetxattr sets an xattr, not following symlinks +func LSetxattr(path string, attr string, data []byte, flags int) (err error) { + return unsupported +} + +// LGetxattr gets an xattr, not following symlinks +func LGetxattr(path, attr string) ([]byte, error) { + return []byte{}, nil +} diff --git a/vendor/github.com/containerd/fifo/LICENSE b/vendor/github.com/containerd/fifo/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/containerd/fifo/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/fifo/fifo.go b/vendor/github.com/containerd/fifo/fifo.go new file mode 100644 index 0000000000..e79813da7d --- /dev/null +++ b/vendor/github.com/containerd/fifo/fifo.go @@ -0,0 +1,236 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fifo + +import ( + "io" + "os" + "runtime" + "sync" + "syscall" + + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +type fifo struct { + flag int + opened chan struct{} + closed chan struct{} + closing chan struct{} + err error + file *os.File + closingOnce sync.Once // close has been called + closedOnce sync.Once // fifo is closed + handle *handle +} + +var leakCheckWg *sync.WaitGroup + +// OpenFifo opens a fifo. Returns io.ReadWriteCloser. +// Context can be used to cancel this function until open(2) has not returned. +// Accepted flags: +// - syscall.O_CREAT - create new fifo if one doesn't exist +// - syscall.O_RDONLY - open fifo only from reader side +// - syscall.O_WRONLY - open fifo only from writer side +// - syscall.O_RDWR - open fifo from both sides, never block on syscall level +// - syscall.O_NONBLOCK - return io.ReadWriteCloser even if other side of the +// fifo isn't open. read/write will be connected after the actual fifo is +// open or after fifo is closed. +func OpenFifo(ctx context.Context, fn string, flag int, perm os.FileMode) (io.ReadWriteCloser, error) { + if _, err := os.Stat(fn); err != nil { + if os.IsNotExist(err) && flag&syscall.O_CREAT != 0 { + if err := mkfifo(fn, uint32(perm&os.ModePerm)); err != nil && !os.IsExist(err) { + return nil, errors.Wrapf(err, "error creating fifo %v", fn) + } + } else { + return nil, err + } + } + + block := flag&syscall.O_NONBLOCK == 0 || flag&syscall.O_RDWR != 0 + + flag &= ^syscall.O_CREAT + flag &= ^syscall.O_NONBLOCK + + h, err := getHandle(fn) + if err != nil { + return nil, err + } + + f := &fifo{ + handle: h, + flag: flag, + opened: make(chan struct{}), + closed: make(chan struct{}), + closing: make(chan struct{}), + } + + wg := leakCheckWg + if wg != nil { + wg.Add(2) + } + + go func() { + if wg != nil { + defer wg.Done() + } + select { + case <-ctx.Done(): + select { + case <-f.opened: + default: + f.Close() + } + case <-f.opened: + case <-f.closed: + } + }() + go func() { + if wg != nil { + defer wg.Done() + } + var file *os.File + fn, err := h.Path() + if err == nil { + file, err = os.OpenFile(fn, flag, 0) + } + select { + case <-f.closing: + if err == nil { + select { + case <-ctx.Done(): + err = ctx.Err() + default: + err = errors.Errorf("fifo %v was closed before opening", h.Name()) + } + if file != nil { + file.Close() + } + } + default: + } + if err != nil { + f.closedOnce.Do(func() { + f.err = err + close(f.closed) + }) + return + } + f.file = file + close(f.opened) + }() + if block { + select { + case <-f.opened: + case <-f.closed: + return nil, f.err + } + } + return f, nil +} + +// Read from a fifo to a byte array. +func (f *fifo) Read(b []byte) (int, error) { + if f.flag&syscall.O_WRONLY > 0 { + return 0, errors.New("reading from write-only fifo") + } + select { + case <-f.opened: + return f.file.Read(b) + default: + } + select { + case <-f.opened: + return f.file.Read(b) + case <-f.closed: + return 0, errors.New("reading from a closed fifo") + } +} + +// Write from byte array to a fifo. +func (f *fifo) Write(b []byte) (int, error) { + if f.flag&(syscall.O_WRONLY|syscall.O_RDWR) == 0 { + return 0, errors.New("writing to read-only fifo") + } + select { + case <-f.opened: + return f.file.Write(b) + default: + } + select { + case <-f.opened: + return f.file.Write(b) + case <-f.closed: + return 0, errors.New("writing to a closed fifo") + } +} + +// Close the fifo. Next reads/writes will error. This method can also be used +// before open(2) has returned and fifo was never opened. +func (f *fifo) Close() (retErr error) { + for { + select { + case <-f.closed: + f.handle.Close() + return + default: + select { + case <-f.opened: + f.closedOnce.Do(func() { + retErr = f.file.Close() + f.err = retErr + close(f.closed) + }) + default: + if f.flag&syscall.O_RDWR != 0 { + runtime.Gosched() + break + } + f.closingOnce.Do(func() { + close(f.closing) + }) + reverseMode := syscall.O_WRONLY + if f.flag&syscall.O_WRONLY > 0 { + reverseMode = syscall.O_RDONLY + } + fn, err := f.handle.Path() + // if Close() is called concurrently(shouldn't) it may cause error + // because handle is closed + select { + case <-f.closed: + default: + if err != nil { + // Path has become invalid. We will leak a goroutine. + // This case should not happen in linux. + f.closedOnce.Do(func() { + f.err = err + close(f.closed) + }) + <-f.closed + break + } + f, err := os.OpenFile(fn, reverseMode|syscall.O_NONBLOCK, 0) + if err == nil { + f.Close() + } + runtime.Gosched() + } + } + } + } +} diff --git a/vendor/github.com/containerd/fifo/handle_linux.go b/vendor/github.com/containerd/fifo/handle_linux.go new file mode 100644 index 0000000000..6ac89b6a4d --- /dev/null +++ b/vendor/github.com/containerd/fifo/handle_linux.go @@ -0,0 +1,97 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fifo + +import ( + "fmt" + "os" + "sync" + "syscall" + + "github.com/pkg/errors" +) + +const O_PATH = 010000000 + +type handle struct { + f *os.File + fd uintptr + dev uint64 + ino uint64 + closeOnce sync.Once + name string +} + +func getHandle(fn string) (*handle, error) { + f, err := os.OpenFile(fn, O_PATH, 0) + if err != nil { + return nil, errors.Wrapf(err, "failed to open %v with O_PATH", fn) + } + + var ( + stat syscall.Stat_t + fd = f.Fd() + ) + if err := syscall.Fstat(int(fd), &stat); err != nil { + f.Close() + return nil, errors.Wrapf(err, "failed to stat handle %v", fd) + } + + h := &handle{ + f: f, + name: fn, + dev: uint64(stat.Dev), + ino: stat.Ino, + fd: fd, + } + + // check /proc just in case + if _, err := os.Stat(h.procPath()); err != nil { + f.Close() + return nil, errors.Wrapf(err, "couldn't stat %v", h.procPath()) + } + + return h, nil +} + +func (h *handle) procPath() string { + return fmt.Sprintf("/proc/self/fd/%d", h.fd) +} + +func (h *handle) Name() string { + return h.name +} + +func (h *handle) Path() (string, error) { + var stat syscall.Stat_t + if err := syscall.Stat(h.procPath(), &stat); err != nil { + return "", errors.Wrapf(err, "path %v could not be statted", h.procPath()) + } + if uint64(stat.Dev) != h.dev || stat.Ino != h.ino { + return "", errors.Errorf("failed to verify handle %v/%v %v/%v", stat.Dev, h.dev, stat.Ino, h.ino) + } + return h.procPath(), nil +} + +func (h *handle) Close() error { + h.closeOnce.Do(func() { + h.f.Close() + }) + return nil +} diff --git a/vendor/github.com/containerd/fifo/handle_nolinux.go b/vendor/github.com/containerd/fifo/handle_nolinux.go new file mode 100644 index 0000000000..4f2a282b2b --- /dev/null +++ b/vendor/github.com/containerd/fifo/handle_nolinux.go @@ -0,0 +1,65 @@ +// +build !linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fifo + +import ( + "syscall" + + "github.com/pkg/errors" +) + +type handle struct { + fn string + dev uint64 + ino uint64 +} + +func getHandle(fn string) (*handle, error) { + var stat syscall.Stat_t + if err := syscall.Stat(fn, &stat); err != nil { + return nil, errors.Wrapf(err, "failed to stat %v", fn) + } + + h := &handle{ + fn: fn, + dev: uint64(stat.Dev), + ino: uint64(stat.Ino), + } + + return h, nil +} + +func (h *handle) Path() (string, error) { + var stat syscall.Stat_t + if err := syscall.Stat(h.fn, &stat); err != nil { + return "", errors.Wrapf(err, "path %v could not be statted", h.fn) + } + if uint64(stat.Dev) != h.dev || uint64(stat.Ino) != h.ino { + return "", errors.Errorf("failed to verify handle %v/%v %v/%v for %v", stat.Dev, h.dev, stat.Ino, h.ino, h.fn) + } + return h.fn, nil +} + +func (h *handle) Name() string { + return h.fn +} + +func (h *handle) Close() error { + return nil +} diff --git a/vendor/github.com/containerd/fifo/mkfifo_nosolaris.go b/vendor/github.com/containerd/fifo/mkfifo_nosolaris.go new file mode 100644 index 0000000000..2799a06d10 --- /dev/null +++ b/vendor/github.com/containerd/fifo/mkfifo_nosolaris.go @@ -0,0 +1,25 @@ +// +build !solaris + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fifo + +import "syscall" + +func mkfifo(path string, mode uint32) (err error) { + return syscall.Mkfifo(path, mode) +} diff --git a/vendor/github.com/containerd/fifo/mkfifo_solaris.go b/vendor/github.com/containerd/fifo/mkfifo_solaris.go new file mode 100644 index 0000000000..1ecd722ae2 --- /dev/null +++ b/vendor/github.com/containerd/fifo/mkfifo_solaris.go @@ -0,0 +1,27 @@ +// +build solaris + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package fifo + +import ( + "golang.org/x/sys/unix" +) + +func mkfifo(path string, mode uint32) (err error) { + return unix.Mkfifo(path, mode) +} diff --git a/vendor/github.com/containers/image/LICENSE b/vendor/github.com/containers/image/LICENSE deleted file mode 100644 index 9535635306..0000000000 --- a/vendor/github.com/containers/image/LICENSE +++ /dev/null @@ -1,189 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/containers/image/copy/copy.go b/vendor/github.com/containers/image/copy/copy.go deleted file mode 100644 index 29065e031d..0000000000 --- a/vendor/github.com/containers/image/copy/copy.go +++ /dev/null @@ -1,718 +0,0 @@ -package copy - -import ( - "bytes" - "compress/gzip" - "context" - "fmt" - "io" - "io/ioutil" - "reflect" - "runtime" - "strings" - "time" - - "github.com/containers/image/image" - "github.com/containers/image/pkg/compression" - "github.com/containers/image/signature" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - pb "gopkg.in/cheggaaa/pb.v1" -) - -type digestingReader struct { - source io.Reader - digester digest.Digester - expectedDigest digest.Digest - validationFailed bool -} - -// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error -// and set validationFailed to true if the source stream does not match expectedDigest. -func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) { - if err := expectedDigest.Validate(); err != nil { - return nil, errors.Errorf("Invalid digest specification %s", expectedDigest) - } - digestAlgorithm := expectedDigest.Algorithm() - if !digestAlgorithm.Available() { - return nil, errors.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm) - } - return &digestingReader{ - source: source, - digester: digestAlgorithm.Digester(), - expectedDigest: expectedDigest, - validationFailed: false, - }, nil -} - -func (d *digestingReader) Read(p []byte) (int, error) { - n, err := d.source.Read(p) - if n > 0 { - if n2, err := d.digester.Hash().Write(p[:n]); n2 != n || err != nil { - // Coverage: This should not happen, the hash.Hash interface requires - // d.digest.Write to never return an error, and the io.Writer interface - // requires n2 == len(input) if no error is returned. - return 0, errors.Wrapf(err, "Error updating digest during verification: %d vs. %d", n2, n) - } - } - if err == io.EOF { - actualDigest := d.digester.Digest() - if actualDigest != d.expectedDigest { - d.validationFailed = true - return 0, errors.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest) - } - } - return n, err -} - -// copier allows us to keep track of diffID values for blobs, and other -// data shared across one or more images in a possible manifest list. -type copier struct { - copiedBlobs map[digest.Digest]digest.Digest - cachedDiffIDs map[digest.Digest]digest.Digest - dest types.ImageDestination - rawSource types.ImageSource - reportWriter io.Writer - progressInterval time.Duration - progress chan types.ProgressProperties -} - -// imageCopier tracks state specific to a single image (possibly an item of a manifest list) -type imageCopier struct { - c *copier - manifestUpdates *types.ManifestUpdateOptions - src types.Image - diffIDsAreNeeded bool - canModifyManifest bool -} - -// Options allows supplying non-default configuration modifying the behavior of CopyImage. -type Options struct { - RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature. - SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(), - ReportWriter io.Writer - SourceCtx *types.SystemContext - DestinationCtx *types.SystemContext - ProgressInterval time.Duration // time to wait between reports to signal the progress channel - Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset. - // manifest MIME type of image set by user. "" is default and means use the autodetection to the the manifest MIME type - ForceManifestMIMEType string -} - -// Image copies image from srcRef to destRef, using policyContext to validate -// source image admissibility. -func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) (retErr error) { - // NOTE this function uses an output parameter for the error return value. - // Setting this and returning is the ideal way to return an error. - // - // the defers in this routine will wrap the error return with its own errors - // which can be valuable context in the middle of a multi-streamed copy. - if options == nil { - options = &Options{} - } - - reportWriter := ioutil.Discard - - if options.ReportWriter != nil { - reportWriter = options.ReportWriter - } - - dest, err := destRef.NewImageDestination(options.DestinationCtx) - if err != nil { - return errors.Wrapf(err, "Error initializing destination %s", transports.ImageName(destRef)) - } - defer func() { - if err := dest.Close(); err != nil { - retErr = errors.Wrapf(retErr, " (dest: %v)", err) - } - }() - - rawSource, err := srcRef.NewImageSource(options.SourceCtx) - if err != nil { - return errors.Wrapf(err, "Error initializing source %s", transports.ImageName(srcRef)) - } - defer func() { - if err := rawSource.Close(); err != nil { - retErr = errors.Wrapf(retErr, " (src: %v)", err) - } - }() - - c := &copier{ - copiedBlobs: make(map[digest.Digest]digest.Digest), - cachedDiffIDs: make(map[digest.Digest]digest.Digest), - dest: dest, - rawSource: rawSource, - reportWriter: reportWriter, - progressInterval: options.ProgressInterval, - progress: options.Progress, - } - - unparsedToplevel := image.UnparsedInstance(rawSource, nil) - multiImage, err := isMultiImage(unparsedToplevel) - if err != nil { - return errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(srcRef)) - } - - if !multiImage { - // The simple case: Just copy a single image. - if err := c.copyOneImage(policyContext, options, unparsedToplevel); err != nil { - return err - } - } else { - // This is a manifest list. Choose a single image and copy it. - // FIXME: Copy to destinations which support manifest lists, one image at a time. - instanceDigest, err := image.ChooseManifestInstanceFromManifestList(options.SourceCtx, unparsedToplevel) - if err != nil { - return errors.Wrapf(err, "Error choosing an image from manifest list %s", transports.ImageName(srcRef)) - } - logrus.Debugf("Source is a manifest list; copying (only) instance %s", instanceDigest) - unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest) - - if err := c.copyOneImage(policyContext, options, unparsedInstance); err != nil { - return err - } - } - - if err := c.dest.Commit(); err != nil { - return errors.Wrap(err, "Error committing the finished image") - } - - return nil -} - -// Image copies a single (on-manifest-list) image unparsedImage, using policyContext to validate -// source image admissibility. -func (c *copier) copyOneImage(policyContext *signature.PolicyContext, options *Options, unparsedImage *image.UnparsedImage) (retErr error) { - // The caller is handling manifest lists; this could happen only if a manifest list contains a manifest list. - // Make sure we fail cleanly in such cases. - multiImage, err := isMultiImage(unparsedImage) - if err != nil { - // FIXME FIXME: How to name a reference for the sub-image? - return errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(unparsedImage.Reference())) - } - if multiImage { - return fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image") - } - - // Please keep this policy check BEFORE reading any other information about the image. - // (the multiImage check above only matches the MIME type, which we have received anyway. - // Actual parsing of anything should be deferred.) - if allowed, err := policyContext.IsRunningImageAllowed(unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so. - return errors.Wrap(err, "Source image rejected") - } - src, err := image.FromUnparsedImage(options.SourceCtx, unparsedImage) - if err != nil { - return errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(c.rawSource.Reference())) - } - - if err := checkImageDestinationForCurrentRuntimeOS(options.DestinationCtx, src, c.dest); err != nil { - return err - } - - var sigs [][]byte - if options.RemoveSignatures { - sigs = [][]byte{} - } else { - c.Printf("Getting image source signatures\n") - s, err := src.Signatures(context.TODO()) - if err != nil { - return errors.Wrap(err, "Error reading signatures") - } - sigs = s - } - if len(sigs) != 0 { - c.Printf("Checking if image destination supports signatures\n") - if err := c.dest.SupportsSignatures(); err != nil { - return errors.Wrap(err, "Can not copy signatures") - } - } - - ic := imageCopier{ - c: c, - manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}}, - src: src, - // diffIDsAreNeeded is computed later - canModifyManifest: len(sigs) == 0, - } - - if err := ic.updateEmbeddedDockerReference(); err != nil { - return err - } - - // We compute preferredManifestMIMEType only to show it in error messages. - // Without having to add this context in an error message, we would be happy enough to know only that no conversion is needed. - preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := ic.determineManifestConversion(c.dest.SupportedManifestMIMETypes(), options.ForceManifestMIMEType) - if err != nil { - return err - } - - // If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here. - ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) - - if err := ic.copyLayers(); err != nil { - return err - } - - // With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only; - // and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support - // without actually trying to upload something and getting a types.ManifestTypeRejectedError. - // So, try the preferred manifest MIME type. If the process succeeds, fine… - manifest, err := ic.copyUpdatedConfigAndManifest() - if err != nil { - logrus.Debugf("Writing manifest using preferred type %s failed: %v", preferredManifestMIMEType, err) - // … if it fails, _and_ the failure is because the manifest is rejected, we may have other options. - if _, isManifestRejected := errors.Cause(err).(types.ManifestTypeRejectedError); !isManifestRejected || len(otherManifestMIMETypeCandidates) == 0 { - // We don’t have other options. - // In principle the code below would handle this as well, but the resulting error message is fairly ugly. - // Don’t bother the user with MIME types if we have no choice. - return err - } - // If the original MIME type is acceptable, determineManifestConversion always uses it as preferredManifestMIMEType. - // So if we are here, we will definitely be trying to convert the manifest. - // With !ic.canModifyManifest, that would just be a string of repeated failures for the same reason, - // so let’s bail out early and with a better error message. - if !ic.canModifyManifest { - return errors.Wrap(err, "Writing manifest failed (and converting it is not possible)") - } - - // errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil. - errs := []string{fmt.Sprintf("%s(%v)", preferredManifestMIMEType, err)} - for _, manifestMIMEType := range otherManifestMIMETypeCandidates { - logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType) - ic.manifestUpdates.ManifestMIMEType = manifestMIMEType - attemptedManifest, err := ic.copyUpdatedConfigAndManifest() - if err != nil { - logrus.Debugf("Upload of manifest type %s failed: %v", manifestMIMEType, err) - errs = append(errs, fmt.Sprintf("%s(%v)", manifestMIMEType, err)) - continue - } - - // We have successfully uploaded a manifest. - manifest = attemptedManifest - errs = nil // Mark this as a success so that we don't abort below. - break - } - if errs != nil { - return fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", ")) - } - } - - if options.SignBy != "" { - newSig, err := c.createSignature(manifest, options.SignBy) - if err != nil { - return err - } - sigs = append(sigs, newSig) - } - - c.Printf("Storing signatures\n") - if err := c.dest.PutSignatures(sigs); err != nil { - return errors.Wrap(err, "Error writing signatures") - } - - return nil -} - -// Printf writes a formatted string to c.reportWriter. -// Note that the method name Printf is not entirely arbitrary: (go tool vet) -// has a built-in list of functions/methods (whatever object they are for) -// which have their format strings checked; for other names we would have -// to pass a parameter to every (go tool vet) invocation. -func (c *copier) Printf(format string, a ...interface{}) { - fmt.Fprintf(c.reportWriter, format, a...) -} - -func checkImageDestinationForCurrentRuntimeOS(ctx *types.SystemContext, src types.Image, dest types.ImageDestination) error { - if dest.MustMatchRuntimeOS() { - wantedOS := runtime.GOOS - if ctx != nil && ctx.OSChoice != "" { - wantedOS = ctx.OSChoice - } - c, err := src.OCIConfig() - if err != nil { - return errors.Wrapf(err, "Error parsing image configuration") - } - osErr := fmt.Errorf("image operating system %q cannot be used on %q", c.OS, wantedOS) - if wantedOS == "windows" && c.OS == "linux" { - return osErr - } else if wantedOS != "windows" && c.OS == "windows" { - return osErr - } - } - return nil -} - -// updateEmbeddedDockerReference handles the Docker reference embedded in Docker schema1 manifests. -func (ic *imageCopier) updateEmbeddedDockerReference() error { - destRef := ic.c.dest.Reference().DockerReference() - if destRef == nil { - return nil // Destination does not care about Docker references - } - if !ic.src.EmbeddedDockerReferenceConflicts(destRef) { - return nil // No reference embedded in the manifest, or it matches destRef already. - } - - if !ic.canModifyManifest { - return errors.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would invalidate existing signatures. Explicitly enable signature removal to proceed anyway", - transports.ImageName(ic.c.dest.Reference()), destRef.String()) - } - ic.manifestUpdates.EmbeddedDockerReference = destRef - return nil -} - -// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest. -func (ic *imageCopier) copyLayers() error { - srcInfos := ic.src.LayerInfos() - destInfos := []types.BlobInfo{} - diffIDs := []digest.Digest{} - updatedSrcInfos := ic.src.LayerInfosForCopy() - srcInfosUpdated := false - if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) { - if !ic.canModifyManifest { - return errors.Errorf("Internal error: copyLayers() needs to use an updated manifest but that was known to be forbidden") - } - srcInfos = updatedSrcInfos - srcInfosUpdated = true - } - for _, srcLayer := range srcInfos { - var ( - destInfo types.BlobInfo - diffID digest.Digest - err error - ) - if ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 { - // DiffIDs are, currently, needed only when converting from schema1. - // In which case src.LayerInfos will not have URLs because schema1 - // does not support them. - if ic.diffIDsAreNeeded { - return errors.New("getting DiffID for foreign layers is unimplemented") - } - destInfo = srcLayer - ic.c.Printf("Skipping foreign layer %q copy to %s\n", destInfo.Digest, ic.c.dest.Reference().Transport().Name()) - } else { - destInfo, diffID, err = ic.copyLayer(srcLayer) - if err != nil { - return err - } - } - destInfos = append(destInfos, destInfo) - diffIDs = append(diffIDs, diffID) - } - ic.manifestUpdates.InformationOnly.LayerInfos = destInfos - if ic.diffIDsAreNeeded { - ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs - } - if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) { - ic.manifestUpdates.LayerInfos = destInfos - } - return nil -} - -// layerDigestsDiffer return true iff the digests in a and b differ (ignoring sizes and possible other fields) -func layerDigestsDiffer(a, b []types.BlobInfo) bool { - if len(a) != len(b) { - return true - } - for i := range a { - if a[i].Digest != b[i].Digest { - return true - } - } - return false -} - -// copyUpdatedConfigAndManifest updates the image per ic.manifestUpdates, if necessary, -// stores the resulting config and manifest to the destination, and returns the stored manifest. -func (ic *imageCopier) copyUpdatedConfigAndManifest() ([]byte, error) { - pendingImage := ic.src - if !reflect.DeepEqual(*ic.manifestUpdates, types.ManifestUpdateOptions{InformationOnly: ic.manifestUpdates.InformationOnly}) { - if !ic.canModifyManifest { - return nil, errors.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden") - } - if !ic.diffIDsAreNeeded && ic.src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) { - // We have set ic.diffIDsAreNeeded based on the preferred MIME type returned by determineManifestConversion. - // So, this can only happen if we are trying to upload using one of the other MIME type candidates. - // Because UpdatedImageNeedsLayerDiffIDs is true only when converting from s1 to s2, this case should only arise - // when ic.c.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2. - // Supposedly s2-only registries do not exist or are extremely rare, so failing with this error message is good enough for now. - // If handling such registries turns out to be necessary, we could compute ic.diffIDsAreNeeded based on the full list of manifest MIME type candidates. - return nil, errors.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType) - } - pi, err := ic.src.UpdatedImage(*ic.manifestUpdates) - if err != nil { - return nil, errors.Wrap(err, "Error creating an updated image manifest") - } - pendingImage = pi - } - manifest, _, err := pendingImage.Manifest() - if err != nil { - return nil, errors.Wrap(err, "Error reading manifest") - } - - if err := ic.c.copyConfig(pendingImage); err != nil { - return nil, err - } - - ic.c.Printf("Writing manifest to image destination\n") - if err := ic.c.dest.PutManifest(manifest); err != nil { - return nil, errors.Wrap(err, "Error writing manifest") - } - return manifest, nil -} - -// copyConfig copies config.json, if any, from src to dest. -func (c *copier) copyConfig(src types.Image) error { - srcInfo := src.ConfigInfo() - if srcInfo.Digest != "" { - c.Printf("Copying config %s\n", srcInfo.Digest) - configBlob, err := src.ConfigBlob() - if err != nil { - return errors.Wrapf(err, "Error reading config blob %s", srcInfo.Digest) - } - destInfo, err := c.copyBlobFromStream(bytes.NewReader(configBlob), srcInfo, nil, false) - if err != nil { - return err - } - if destInfo.Digest != srcInfo.Digest { - return errors.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest) - } - } - return nil -} - -// diffIDResult contains both a digest value and an error from diffIDComputationGoroutine. -// We could also send the error through the pipeReader, but this more cleanly separates the copying of the layer and the DiffID computation. -type diffIDResult struct { - digest digest.Digest - err error -} - -// copyLayer copies a layer with srcInfo (with known Digest and possibly known Size) in src to dest, perhaps compressing it if canCompress, -// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded -func (ic *imageCopier) copyLayer(srcInfo types.BlobInfo) (types.BlobInfo, digest.Digest, error) { - // Check if we already have a blob with this digest - haveBlob, extantBlobSize, err := ic.c.dest.HasBlob(srcInfo) - if err != nil { - return types.BlobInfo{}, "", errors.Wrapf(err, "Error checking for blob %s at destination", srcInfo.Digest) - } - // If we already have a cached diffID for this blob, we don't need to compute it - diffIDIsNeeded := ic.diffIDsAreNeeded && (ic.c.cachedDiffIDs[srcInfo.Digest] == "") - // If we already have the blob, and we don't need to recompute the diffID, then we might be able to avoid reading it again - if haveBlob && !diffIDIsNeeded { - // Check the blob sizes match, if we were given a size this time - if srcInfo.Size != -1 && srcInfo.Size != extantBlobSize { - return types.BlobInfo{}, "", errors.Errorf("Error: blob %s is already present, but with size %d instead of %d", srcInfo.Digest, extantBlobSize, srcInfo.Size) - } - srcInfo.Size = extantBlobSize - // Tell the image destination that this blob's delta is being applied again. For some image destinations, this can be faster than using GetBlob/PutBlob - blobinfo, err := ic.c.dest.ReapplyBlob(srcInfo) - if err != nil { - return types.BlobInfo{}, "", errors.Wrapf(err, "Error reapplying blob %s at destination", srcInfo.Digest) - } - ic.c.Printf("Skipping fetch of repeat blob %s\n", srcInfo.Digest) - return blobinfo, ic.c.cachedDiffIDs[srcInfo.Digest], err - } - - // Fallback: copy the layer, computing the diffID if we need to do so - ic.c.Printf("Copying blob %s\n", srcInfo.Digest) - srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(srcInfo) - if err != nil { - return types.BlobInfo{}, "", errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest) - } - defer srcStream.Close() - - blobInfo, diffIDChan, err := ic.copyLayerFromStream(srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize}, - diffIDIsNeeded) - if err != nil { - return types.BlobInfo{}, "", err - } - var diffIDResult diffIDResult // = {digest:""} - if diffIDIsNeeded { - diffIDResult = <-diffIDChan - if diffIDResult.err != nil { - return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "Error computing layer DiffID") - } - logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest) - ic.c.cachedDiffIDs[srcInfo.Digest] = diffIDResult.digest - } - return blobInfo, diffIDResult.digest, nil -} - -// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope. -// it copies a blob with srcInfo (with known Digest and possibly known Size) from srcStream to dest, -// perhaps compressing the stream if canCompress, -// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller. -func (ic *imageCopier) copyLayerFromStream(srcStream io.Reader, srcInfo types.BlobInfo, - diffIDIsNeeded bool) (types.BlobInfo, <-chan diffIDResult, error) { - var getDiffIDRecorder func(compression.DecompressorFunc) io.Writer // = nil - var diffIDChan chan diffIDResult - - err := errors.New("Internal error: unexpected panic in copyLayer") // For pipeWriter.CloseWithError below - if diffIDIsNeeded { - diffIDChan = make(chan diffIDResult, 1) // Buffered, so that sending a value after this or our caller has failed and exited does not block. - pipeReader, pipeWriter := io.Pipe() - defer func() { // Note that this is not the same as {defer pipeWriter.CloseWithError(err)}; we need err to be evaluated lazily. - pipeWriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close() - }() - - getDiffIDRecorder = func(decompressor compression.DecompressorFunc) io.Writer { - // If this fails, e.g. because we have exited and due to pipeWriter.CloseWithError() above further - // reading from the pipe has failed, we don’t really care. - // We only read from diffIDChan if the rest of the flow has succeeded, and when we do read from it, - // the return value includes an error indication, which we do check. - // - // If this gets never called, pipeReader will not be used anywhere, but pipeWriter will only be - // closed above, so we are happy enough with both pipeReader and pipeWriter to just get collected by GC. - go diffIDComputationGoroutine(diffIDChan, pipeReader, decompressor) // Closes pipeReader - return pipeWriter - } - } - blobInfo, err := ic.c.copyBlobFromStream(srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest) // Sets err to nil on success - return blobInfo, diffIDChan, err - // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan -} - -// diffIDComputationGoroutine reads all input from layerStream, uncompresses using decompressor if necessary, and sends its digest, and status, if any, to dest. -func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor compression.DecompressorFunc) { - result := diffIDResult{ - digest: "", - err: errors.New("Internal error: unexpected panic in diffIDComputationGoroutine"), - } - defer func() { dest <- result }() - defer layerStream.Close() // We do not care to bother the other end of the pipe with other failures; we send them to dest instead. - - result.digest, result.err = computeDiffID(layerStream, decompressor) -} - -// computeDiffID reads all input from layerStream, uncompresses it using decompressor if necessary, and returns its digest. -func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc) (digest.Digest, error) { - if decompressor != nil { - s, err := decompressor(stream) - if err != nil { - return "", err - } - stream = s - } - - return digest.Canonical.FromReader(stream) -} - -// copyBlobFromStream copies a blob with srcInfo (with known Digest and possibly known Size) from srcStream to dest, -// perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil, -// perhaps compressing it if canCompress, -// and returns a complete blobInfo of the copied blob. -func (c *copier) copyBlobFromStream(srcStream io.Reader, srcInfo types.BlobInfo, - getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer, - canCompress bool) (types.BlobInfo, error) { - // The copying happens through a pipeline of connected io.Readers. - // === Input: srcStream - - // === Process input through digestingReader to validate against the expected digest. - // Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader, - // use a separate validation failure indicator. - // Note that we don't use a stronger "validationSucceeded" indicator, because - // dest.PutBlob may detect that the layer already exists, in which case we don't - // read stream to the end, and validation does not happen. - digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest) - if err != nil { - return types.BlobInfo{}, errors.Wrapf(err, "Error preparing to verify blob %s", srcInfo.Digest) - } - var destStream io.Reader = digestingReader - - // === Detect compression of the input stream. - // This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression. - decompressor, destStream, err := compression.DetectCompression(destStream) // We could skip this in some cases, but let's keep the code path uniform - if err != nil { - return types.BlobInfo{}, errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest) - } - isCompressed := decompressor != nil - - // === Report progress using a pb.Reader. - bar := pb.New(int(srcInfo.Size)).SetUnits(pb.U_BYTES) - bar.Output = c.reportWriter - bar.SetMaxWidth(80) - bar.ShowTimeLeft = false - bar.ShowPercent = false - bar.Start() - destStream = bar.NewProxyReader(destStream) - defer bar.Finish() - - // === Send a copy of the original, uncompressed, stream, to a separate path if necessary. - var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so. - if getOriginalLayerCopyWriter != nil { - destStream = io.TeeReader(destStream, getOriginalLayerCopyWriter(decompressor)) - originalLayerReader = destStream - } - - // === Compress the layer if it is uncompressed and compression is desired - var inputInfo types.BlobInfo - if !canCompress || isCompressed || !c.dest.ShouldCompressLayers() { - logrus.Debugf("Using original blob without modification") - inputInfo = srcInfo - } else { - logrus.Debugf("Compressing blob on the fly") - pipeReader, pipeWriter := io.Pipe() - defer pipeReader.Close() - - // If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise, - // e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed, - // we don’t care. - go compressGoroutine(pipeWriter, destStream) // Closes pipeWriter - destStream = pipeReader - inputInfo.Digest = "" - inputInfo.Size = -1 - } - - // === Report progress using the c.progress channel, if required. - if c.progress != nil && c.progressInterval > 0 { - destStream = &progressReader{ - source: destStream, - channel: c.progress, - interval: c.progressInterval, - artifact: srcInfo, - lastTime: time.Now(), - } - } - - // === Finally, send the layer stream to dest. - uploadedInfo, err := c.dest.PutBlob(destStream, inputInfo) - if err != nil { - return types.BlobInfo{}, errors.Wrap(err, "Error writing blob") - } - - // This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consumer - // all of the input (to compute DiffIDs), even if dest.PutBlob does not need it. - // So, read everything from originalLayerReader, which will cause the rest to be - // sent there if we are not already at EOF. - if getOriginalLayerCopyWriter != nil { - logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter") - _, err := io.Copy(ioutil.Discard, originalLayerReader) - if err != nil { - return types.BlobInfo{}, errors.Wrapf(err, "Error reading input blob %s", srcInfo.Digest) - } - } - - if digestingReader.validationFailed { // Coverage: This should never happen. - return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest) - } - if inputInfo.Digest != "" && uploadedInfo.Digest != inputInfo.Digest { - return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest) - } - return uploadedInfo, nil -} - -// compressGoroutine reads all input from src and writes its compressed equivalent to dest. -func compressGoroutine(dest *io.PipeWriter, src io.Reader) { - err := errors.New("Internal error: unexpected panic in compressGoroutine") - defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily. - dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close() - }() - - zipper := gzip.NewWriter(dest) - defer zipper.Close() - - _, err = io.Copy(zipper, src) // Sets err to nil, i.e. causes dest.Close() -} diff --git a/vendor/github.com/containers/image/copy/fixtures/Hello.bz2 b/vendor/github.com/containers/image/copy/fixtures/Hello.bz2 deleted file mode 120000 index fc28d6c9ac..0000000000 --- a/vendor/github.com/containers/image/copy/fixtures/Hello.bz2 +++ /dev/null @@ -1 +0,0 @@ -../../pkg/compression/fixtures/Hello.bz2 \ No newline at end of file diff --git a/vendor/github.com/containers/image/copy/fixtures/Hello.gz b/vendor/github.com/containers/image/copy/fixtures/Hello.gz deleted file mode 120000 index 08aa805fcc..0000000000 --- a/vendor/github.com/containers/image/copy/fixtures/Hello.gz +++ /dev/null @@ -1 +0,0 @@ -../../pkg/compression/fixtures/Hello.gz \ No newline at end of file diff --git a/vendor/github.com/containers/image/copy/fixtures/Hello.uncompressed b/vendor/github.com/containers/image/copy/fixtures/Hello.uncompressed deleted file mode 120000 index 49b46625d8..0000000000 --- a/vendor/github.com/containers/image/copy/fixtures/Hello.uncompressed +++ /dev/null @@ -1 +0,0 @@ -../../pkg/compression/fixtures/Hello.uncompressed \ No newline at end of file diff --git a/vendor/github.com/containers/image/copy/fixtures/Hello.xz b/vendor/github.com/containers/image/copy/fixtures/Hello.xz deleted file mode 120000 index 77bcd85587..0000000000 --- a/vendor/github.com/containers/image/copy/fixtures/Hello.xz +++ /dev/null @@ -1 +0,0 @@ -../../pkg/compression/fixtures/Hello.xz \ No newline at end of file diff --git a/vendor/github.com/containers/image/copy/manifest.go b/vendor/github.com/containers/image/copy/manifest.go deleted file mode 100644 index 7e4cd10ef8..0000000000 --- a/vendor/github.com/containers/image/copy/manifest.go +++ /dev/null @@ -1,115 +0,0 @@ -package copy - -import ( - "strings" - - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// preferredManifestMIMETypes lists manifest MIME types in order of our preference, if we can't use the original manifest and need to convert. -// Prefer v2s2 to v2s1 because v2s2 does not need to be changed when uploading to a different location. -// Include v2s1 signed but not v2s1 unsigned, because docker/distribution requires a signature even if the unsigned MIME type is used. -var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType} - -// orderedSet is a list of strings (MIME types in our case), with each string appearing at most once. -type orderedSet struct { - list []string - included map[string]struct{} -} - -// newOrderedSet creates a correctly initialized orderedSet. -// [Sometimes it would be really nice if Golang had constructors…] -func newOrderedSet() *orderedSet { - return &orderedSet{ - list: []string{}, - included: map[string]struct{}{}, - } -} - -// append adds s to the end of os, only if it is not included already. -func (os *orderedSet) append(s string) { - if _, ok := os.included[s]; !ok { - os.list = append(os.list, s) - os.included[s] = struct{}{} - } -} - -// determineManifestConversion updates ic.manifestUpdates to convert manifest to a supported MIME type, if necessary and ic.canModifyManifest. -// Note that the conversion will only happen later, through ic.src.UpdatedImage -// Returns the preferred manifest MIME type (whether we are converting to it or using it unmodified), -// and a list of other possible alternatives, in order. -func (ic *imageCopier) determineManifestConversion(destSupportedManifestMIMETypes []string, forceManifestMIMEType string) (string, []string, error) { - _, srcType, err := ic.src.Manifest() - if err != nil { // This should have been cached?! - return "", nil, errors.Wrap(err, "Error reading manifest") - } - - if forceManifestMIMEType != "" { - destSupportedManifestMIMETypes = []string{forceManifestMIMEType} - } - - if len(destSupportedManifestMIMETypes) == 0 { - return srcType, []string{}, nil // Anything goes; just use the original as is, do not try any conversions. - } - supportedByDest := map[string]struct{}{} - for _, t := range destSupportedManifestMIMETypes { - supportedByDest[t] = struct{}{} - } - - // destSupportedManifestMIMETypes is a static guess; a particular registry may still only support a subset of the types. - // So, build a list of types to try in order of decreasing preference. - // FIXME? This treats manifest.DockerV2Schema1SignedMediaType and manifest.DockerV2Schema1MediaType as distinct, - // although we are not really making any conversion, and it is very unlikely that a destination would support one but not the other. - // In practice, schema1 is probably the lowest common denominator, so we would expect to try the first one of the MIME types - // and never attempt the other one. - prioritizedTypes := newOrderedSet() - - // First of all, prefer to keep the original manifest unmodified. - if _, ok := supportedByDest[srcType]; ok { - prioritizedTypes.append(srcType) - } - if !ic.canModifyManifest { - // We could also drop the !ic.canModifyManifest check and have the caller - // make the choice; it is already doing that to an extent, to improve error - // messages. But it is nice to hide the “if !ic.canModifyManifest, do no conversion” - // special case in here; the caller can then worry (or not) only about a good UI. - logrus.Debugf("We can't modify the manifest, hoping for the best...") - return srcType, []string{}, nil // Take our chances - FIXME? Or should we fail without trying? - } - - // Then use our list of preferred types. - for _, t := range preferredManifestMIMETypes { - if _, ok := supportedByDest[t]; ok { - prioritizedTypes.append(t) - } - } - - // Finally, try anything else the destination supports. - for _, t := range destSupportedManifestMIMETypes { - prioritizedTypes.append(t) - } - - logrus.Debugf("Manifest has MIME type %s, ordered candidate list [%s]", srcType, strings.Join(prioritizedTypes.list, ", ")) - if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes is not empty (or we would have exited in the “Anything goes” case above), so this should never happen. - return "", nil, errors.New("Internal error: no candidate MIME types") - } - preferredType := prioritizedTypes.list[0] - if preferredType != srcType { - ic.manifestUpdates.ManifestMIMEType = preferredType - } else { - logrus.Debugf("... will first try using the original manifest unmodified") - } - return preferredType, prioritizedTypes.list[1:], nil -} - -// isMultiImage returns true if img is a list of images -func isMultiImage(img types.UnparsedImage) (bool, error) { - _, mt, err := img.Manifest() - if err != nil { - return false, err - } - return manifest.MIMETypeIsMultiImage(mt), nil -} diff --git a/vendor/github.com/containers/image/copy/progress_reader.go b/vendor/github.com/containers/image/copy/progress_reader.go deleted file mode 100644 index b670ee59f1..0000000000 --- a/vendor/github.com/containers/image/copy/progress_reader.go +++ /dev/null @@ -1,28 +0,0 @@ -package copy - -import ( - "io" - "time" - - "github.com/containers/image/types" -) - -// progressReader is a reader that reports its progress on an interval. -type progressReader struct { - source io.Reader - channel chan types.ProgressProperties - interval time.Duration - artifact types.BlobInfo - lastTime time.Time - offset uint64 -} - -func (r *progressReader) Read(p []byte) (int, error) { - n, err := r.source.Read(p) - r.offset += uint64(n) - if time.Since(r.lastTime) > r.interval { - r.channel <- types.ProgressProperties{Artifact: r.artifact, Offset: r.offset} - r.lastTime = time.Now() - } - return n, err -} diff --git a/vendor/github.com/containers/image/copy/sign.go b/vendor/github.com/containers/image/copy/sign.go deleted file mode 100644 index 91394d2b0f..0000000000 --- a/vendor/github.com/containers/image/copy/sign.go +++ /dev/null @@ -1,31 +0,0 @@ -package copy - -import ( - "github.com/containers/image/signature" - "github.com/containers/image/transports" - "github.com/pkg/errors" -) - -// createSignature creates a new signature of manifest using keyIdentity. -func (c *copier) createSignature(manifest []byte, keyIdentity string) ([]byte, error) { - mech, err := signature.NewGPGSigningMechanism() - if err != nil { - return nil, errors.Wrap(err, "Error initializing GPG") - } - defer mech.Close() - if err := mech.SupportsSigning(); err != nil { - return nil, errors.Wrap(err, "Signing not supported") - } - - dockerReference := c.dest.Reference().DockerReference() - if dockerReference == nil { - return nil, errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference())) - } - - c.Printf("Signing manifest\n") - newSig, err := signature.SignDockerManifest(manifest, dockerReference.String(), mech, keyIdentity) - if err != nil { - return nil, errors.Wrap(err, "Error creating signature") - } - return newSig, nil -} diff --git a/vendor/github.com/containers/image/directory/directory_dest.go b/vendor/github.com/containers/image/directory/directory_dest.go deleted file mode 100644 index 5f7443fa0f..0000000000 --- a/vendor/github.com/containers/image/directory/directory_dest.go +++ /dev/null @@ -1,243 +0,0 @@ -package directory - -import ( - "io" - "io/ioutil" - "os" - "path/filepath" - - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -const version = "Directory Transport Version: 1.0\n" - -// ErrNotContainerImageDir indicates that the directory doesn't match the expected contents of a directory created -// using the 'dir' transport -var ErrNotContainerImageDir = errors.New("not a containers image directory, don't want to overwrite important data") - -type dirImageDestination struct { - ref dirReference - compress bool -} - -// newImageDestination returns an ImageDestination for writing to a directory. -func newImageDestination(ref dirReference, compress bool) (types.ImageDestination, error) { - d := &dirImageDestination{ref: ref, compress: compress} - - // If directory exists check if it is empty - // if not empty, check whether the contents match that of a container image directory and overwrite the contents - // if the contents don't match throw an error - dirExists, err := pathExists(d.ref.resolvedPath) - if err != nil { - return nil, errors.Wrapf(err, "error checking for path %q", d.ref.resolvedPath) - } - if dirExists { - isEmpty, err := isDirEmpty(d.ref.resolvedPath) - if err != nil { - return nil, err - } - - if !isEmpty { - versionExists, err := pathExists(d.ref.versionPath()) - if err != nil { - return nil, errors.Wrapf(err, "error checking if path exists %q", d.ref.versionPath()) - } - if versionExists { - contents, err := ioutil.ReadFile(d.ref.versionPath()) - if err != nil { - return nil, err - } - // check if contents of version file is what we expect it to be - if string(contents) != version { - return nil, ErrNotContainerImageDir - } - } else { - return nil, ErrNotContainerImageDir - } - // delete directory contents so that only one image is in the directory at a time - if err = removeDirContents(d.ref.resolvedPath); err != nil { - return nil, errors.Wrapf(err, "error erasing contents in %q", d.ref.resolvedPath) - } - logrus.Debugf("overwriting existing container image directory %q", d.ref.resolvedPath) - } - } else { - // create directory if it doesn't exist - if err := os.MkdirAll(d.ref.resolvedPath, 0755); err != nil { - return nil, errors.Wrapf(err, "unable to create directory %q", d.ref.resolvedPath) - } - } - // create version file - err = ioutil.WriteFile(d.ref.versionPath(), []byte(version), 0644) - if err != nil { - return nil, errors.Wrapf(err, "error creating version file %q", d.ref.versionPath()) - } - return d, nil -} - -// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, -// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. -func (d *dirImageDestination) Reference() types.ImageReference { - return d.ref -} - -// Close removes resources associated with an initialized ImageDestination, if any. -func (d *dirImageDestination) Close() error { - return nil -} - -func (d *dirImageDestination) SupportedManifestMIMETypes() []string { - return nil -} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. -// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. -func (d *dirImageDestination) SupportsSignatures() error { - return nil -} - -// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination. -func (d *dirImageDestination) ShouldCompressLayers() bool { - return d.compress -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually -// uploaded to the image destination, true otherwise. -func (d *dirImageDestination) AcceptsForeignLayerURLs() bool { - return false -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. -func (d *dirImageDestination) MustMatchRuntimeOS() bool { - return false -} - -// PutBlob writes contents of stream and returns data representing the result (with all data filled in). -// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. -// inputInfo.Size is the expected length of stream, if known. -// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available -// to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *dirImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) { - blobFile, err := ioutil.TempFile(d.ref.path, "dir-put-blob") - if err != nil { - return types.BlobInfo{}, err - } - succeeded := false - defer func() { - blobFile.Close() - if !succeeded { - os.Remove(blobFile.Name()) - } - }() - - digester := digest.Canonical.Digester() - tee := io.TeeReader(stream, digester.Hash()) - - size, err := io.Copy(blobFile, tee) - if err != nil { - return types.BlobInfo{}, err - } - computedDigest := digester.Digest() - if inputInfo.Size != -1 && size != inputInfo.Size { - return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size) - } - if err := blobFile.Sync(); err != nil { - return types.BlobInfo{}, err - } - if err := blobFile.Chmod(0644); err != nil { - return types.BlobInfo{}, err - } - blobPath := d.ref.layerPath(computedDigest) - if err := os.Rename(blobFile.Name(), blobPath); err != nil { - return types.BlobInfo{}, err - } - succeeded = true - return types.BlobInfo{Digest: computedDigest, Size: size}, nil -} - -// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob. -// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned. -// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); -// it returns a non-nil error only on an unexpected failure. -func (d *dirImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) { - if info.Digest == "" { - return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`) - } - blobPath := d.ref.layerPath(info.Digest) - finfo, err := os.Stat(blobPath) - if err != nil && os.IsNotExist(err) { - return false, -1, nil - } - if err != nil { - return false, -1, err - } - return true, finfo.Size(), nil -} - -func (d *dirImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) { - return info, nil -} - -// PutManifest writes manifest to the destination. -// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. -// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), -// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. -func (d *dirImageDestination) PutManifest(manifest []byte) error { - return ioutil.WriteFile(d.ref.manifestPath(), manifest, 0644) -} - -func (d *dirImageDestination) PutSignatures(signatures [][]byte) error { - for i, sig := range signatures { - if err := ioutil.WriteFile(d.ref.signaturePath(i), sig, 0644); err != nil { - return err - } - } - return nil -} - -// Commit marks the process of storing the image as successful and asks for the image to be persisted. -// WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before Commit() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *dirImageDestination) Commit() error { - return nil -} - -// returns true if path exists -func pathExists(path string) (bool, error) { - _, err := os.Stat(path) - if err == nil { - return true, nil - } - if err != nil && os.IsNotExist(err) { - return false, nil - } - return false, err -} - -// returns true if directory is empty -func isDirEmpty(path string) (bool, error) { - files, err := ioutil.ReadDir(path) - if err != nil { - return false, err - } - return len(files) == 0, nil -} - -// deletes the contents of a directory -func removeDirContents(path string) error { - files, err := ioutil.ReadDir(path) - if err != nil { - return err - } - - for _, file := range files { - if err := os.RemoveAll(filepath.Join(path, file.Name())); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/containers/image/directory/directory_src.go b/vendor/github.com/containers/image/directory/directory_src.go deleted file mode 100644 index 0a8acf6bf9..0000000000 --- a/vendor/github.com/containers/image/directory/directory_src.go +++ /dev/null @@ -1,89 +0,0 @@ -package directory - -import ( - "context" - "io" - "io/ioutil" - "os" - - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -type dirImageSource struct { - ref dirReference -} - -// newImageSource returns an ImageSource reading from an existing directory. -// The caller must call .Close() on the returned ImageSource. -func newImageSource(ref dirReference) types.ImageSource { - return &dirImageSource{ref} -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (s *dirImageSource) Reference() types.ImageReference { - return s.ref -} - -// Close removes resources associated with an initialized ImageSource, if any. -func (s *dirImageSource) Close() error { - return nil -} - -// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). -// It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); -// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). -func (s *dirImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) { - if instanceDigest != nil { - return nil, "", errors.Errorf(`Getting target manifest not supported by "dir:"`) - } - m, err := ioutil.ReadFile(s.ref.manifestPath()) - if err != nil { - return nil, "", err - } - return m, manifest.GuessMIMEType(m), err -} - -// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -func (s *dirImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) { - r, err := os.Open(s.ref.layerPath(info.Digest)) - if err != nil { - return nil, 0, nil - } - fi, err := r.Stat() - if err != nil { - return nil, 0, nil - } - return r, fi.Size(), nil -} - -// GetSignatures returns the image's signatures. It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -func (s *dirImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - if instanceDigest != nil { - return nil, errors.Errorf(`Manifests lists are not supported by "dir:"`) - } - signatures := [][]byte{} - for i := 0; ; i++ { - signature, err := ioutil.ReadFile(s.ref.signaturePath(i)) - if err != nil { - if os.IsNotExist(err) { - break - } - return nil, err - } - signatures = append(signatures, signature) - } - return signatures, nil -} - -// LayerInfosForCopy() returns updated layer info that should be used when copying, in preference to values in the manifest, if specified. -func (s *dirImageSource) LayerInfosForCopy() []types.BlobInfo { - return nil -} diff --git a/vendor/github.com/containers/image/directory/directory_transport.go b/vendor/github.com/containers/image/directory/directory_transport.go deleted file mode 100644 index c38753087e..0000000000 --- a/vendor/github.com/containers/image/directory/directory_transport.go +++ /dev/null @@ -1,187 +0,0 @@ -package directory - -import ( - "fmt" - "path/filepath" - "strings" - - "github.com/pkg/errors" - - "github.com/containers/image/directory/explicitfilepath" - "github.com/containers/image/docker/reference" - "github.com/containers/image/image" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" -) - -func init() { - transports.Register(Transport) -} - -// Transport is an ImageTransport for directory paths. -var Transport = dirTransport{} - -type dirTransport struct{} - -func (t dirTransport) Name() string { - return "dir" -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. -func (t dirTransport) ParseReference(reference string) (types.ImageReference, error) { - return NewReference(reference) -} - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). -// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. -// scope passed to this function will not be "", that value is always allowed. -func (t dirTransport) ValidatePolicyConfigurationScope(scope string) error { - if !strings.HasPrefix(scope, "/") { - return errors.Errorf("Invalid scope %s: Must be an absolute path", scope) - } - // Refuse also "/", otherwise "/" and "" would have the same semantics, - // and "" could be unexpectedly shadowed by the "/" entry. - if scope == "/" { - return errors.New(`Invalid scope "/": Use the generic default scope ""`) - } - cleaned := filepath.Clean(scope) - if cleaned != scope { - return errors.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned) - } - return nil -} - -// dirReference is an ImageReference for directory paths. -type dirReference struct { - // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time! - // Either of the paths may point to a different, or no, inode over time. resolvedPath may contain symbolic links, and so on. - - // Generally we follow the intent of the user, and use the "path" member for filesystem operations (e.g. the user can use a relative path to avoid - // being exposed to symlinks and renames in the parent directories to the working directory). - // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.) - path string // As specified by the user. May be relative, contain symlinks, etc. - resolvedPath string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces. -} - -// There is no directory.ParseReference because it is rather pointless. -// Callers who need a transport-independent interface will go through -// dirTransport.ParseReference; callers who intentionally deal with directories -// can use directory.NewReference. - -// NewReference returns a directory reference for a specified path. -// -// We do not expose an API supplying the resolvedPath; we could, but recomputing it -// is generally cheap enough that we prefer being confident about the properties of resolvedPath. -func NewReference(path string) (types.ImageReference, error) { - resolved, err := explicitfilepath.ResolvePathToFullyExplicit(path) - if err != nil { - return nil, err - } - return dirReference{path: path, resolvedPath: resolved}, nil -} - -func (ref dirReference) Transport() types.ImageTransport { - return Transport -} - -// StringWithinTransport returns a string representation of the reference, which MUST be such that -// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. -// NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. -// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. -func (ref dirReference) StringWithinTransport() string { - return ref.path -} - -// DockerReference returns a Docker reference associated with this reference -// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, -// not e.g. after redirect or alias processing), or nil if unknown/not applicable. -func (ref dirReference) DockerReference() reference.Named { - return nil -} - -// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. -// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; -// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical -// (i.e. various references with exactly the same semantics should return the same configuration identity) -// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but -// not required/guaranteed that it will be a valid input to Transport().ParseReference(). -// Returns "" if configuration identities for these references are not supported. -func (ref dirReference) PolicyConfigurationIdentity() string { - return ref.resolvedPath -} - -// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search -// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed -// in order, terminating on first match, and an implicit "" is always checked at the end. -// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), -// and each following element to be a prefix of the element preceding it. -func (ref dirReference) PolicyConfigurationNamespaces() []string { - res := []string{} - path := ref.resolvedPath - for { - lastSlash := strings.LastIndex(path, "/") - if lastSlash == -1 || lastSlash == 0 { - break - } - path = path[:lastSlash] - res = append(res, path) - } - // Note that we do not include "/"; it is redundant with the default "" global default, - // and rejected by dirTransport.ValidatePolicyConfigurationScope above. - return res -} - -// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned ImageCloser. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. -func (ref dirReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { - src := newImageSource(ref) - return image.FromSource(ctx, src) -} - -// NewImageSource returns a types.ImageSource for this reference. -// The caller must call .Close() on the returned ImageSource. -func (ref dirReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) { - return newImageSource(ref), nil -} - -// NewImageDestination returns a types.ImageDestination for this reference. -// The caller must call .Close() on the returned ImageDestination. -func (ref dirReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { - compress := false - if ctx != nil { - compress = ctx.DirForceCompress - } - return newImageDestination(ref, compress) -} - -// DeleteImage deletes the named image from the registry, if supported. -func (ref dirReference) DeleteImage(ctx *types.SystemContext) error { - return errors.Errorf("Deleting images not implemented for dir: images") -} - -// manifestPath returns a path for the manifest within a directory using our conventions. -func (ref dirReference) manifestPath() string { - return filepath.Join(ref.path, "manifest.json") -} - -// layerPath returns a path for a layer tarball within a directory using our conventions. -func (ref dirReference) layerPath(digest digest.Digest) string { - // FIXME: Should we keep the digest identification? - return filepath.Join(ref.path, digest.Hex()+".tar") -} - -// signaturePath returns a path for a signature within a directory using our conventions. -func (ref dirReference) signaturePath(index int) string { - return filepath.Join(ref.path, fmt.Sprintf("signature-%d", index+1)) -} - -// versionPath returns a path for the version file within a directory using our conventions. -func (ref dirReference) versionPath() string { - return filepath.Join(ref.path, "version") -} diff --git a/vendor/github.com/containers/image/directory/explicitfilepath/path.go b/vendor/github.com/containers/image/directory/explicitfilepath/path.go deleted file mode 100644 index 71136b8808..0000000000 --- a/vendor/github.com/containers/image/directory/explicitfilepath/path.go +++ /dev/null @@ -1,56 +0,0 @@ -package explicitfilepath - -import ( - "os" - "path/filepath" - - "github.com/pkg/errors" -) - -// ResolvePathToFullyExplicit returns the input path converted to an absolute, no-symlinks, cleaned up path. -// To do so, all elements of the input path must exist; as a special case, the final component may be -// a non-existent name (but not a symlink pointing to a non-existent name) -// This is intended as a a helper for implementations of types.ImageReference.PolicyConfigurationIdentity etc. -func ResolvePathToFullyExplicit(path string) (string, error) { - switch _, err := os.Lstat(path); { - case err == nil: - return resolveExistingPathToFullyExplicit(path) - case os.IsNotExist(err): - parent, file := filepath.Split(path) - resolvedParent, err := resolveExistingPathToFullyExplicit(parent) - if err != nil { - return "", err - } - if file == "." || file == ".." { - // Coverage: This can happen, but very rarely: if we have successfully resolved the parent, both "." and ".." in it should have been resolved as well. - // This can still happen if there is a filesystem race condition, causing the Lstat() above to fail but the later resolution to succeed. - // We do not care to promise anything if such filesystem race conditions can happen, but we definitely don't want to return "."/".." components - // in the resulting path, and especially not at the end. - return "", errors.Errorf("Unexpectedly missing special filename component in %s", path) - } - resolvedPath := filepath.Join(resolvedParent, file) - // As a sanity check, ensure that there are no "." or ".." components. - cleanedResolvedPath := filepath.Clean(resolvedPath) - if cleanedResolvedPath != resolvedPath { - // Coverage: This should never happen. - return "", errors.Errorf("Internal inconsistency: Path %s resolved to %s still cleaned up to %s", path, resolvedPath, cleanedResolvedPath) - } - return resolvedPath, nil - default: // err != nil, unrecognized - return "", err - } -} - -// resolveExistingPathToFullyExplicit is the same as ResolvePathToFullyExplicit, -// but without the special case for missing final component. -func resolveExistingPathToFullyExplicit(path string) (string, error) { - resolved, err := filepath.Abs(path) - if err != nil { - return "", err // Coverage: This can fail only if os.Getwd() fails. - } - resolved, err = filepath.EvalSymlinks(resolved) - if err != nil { - return "", err - } - return filepath.Clean(resolved), nil -} diff --git a/vendor/github.com/containers/image/docker/archive/dest.go b/vendor/github.com/containers/image/docker/archive/dest.go deleted file mode 100644 index 9fc85bd85b..0000000000 --- a/vendor/github.com/containers/image/docker/archive/dest.go +++ /dev/null @@ -1,66 +0,0 @@ -package archive - -import ( - "io" - "os" - - "github.com/containers/image/docker/tarfile" - "github.com/containers/image/types" - "github.com/pkg/errors" -) - -type archiveImageDestination struct { - *tarfile.Destination // Implements most of types.ImageDestination - ref archiveReference - writer io.Closer -} - -func newImageDestination(ctx *types.SystemContext, ref archiveReference) (types.ImageDestination, error) { - if ref.destinationRef == nil { - return nil, errors.Errorf("docker-archive: destination reference not supplied (must be of form :)") - } - - // ref.path can be either a pipe or a regular file - // in the case of a pipe, we require that we can open it for write - // in the case of a regular file, we don't want to overwrite any pre-existing file - // so we check for Size() == 0 below (This is racy, but using O_EXCL would also be racy, - // only in a different way. Either way, it’s up to the user to not have two writers to the same path.) - fh, err := os.OpenFile(ref.path, os.O_WRONLY|os.O_CREATE, 0644) - if err != nil { - return nil, errors.Wrapf(err, "error opening file %q", ref.path) - } - - fhStat, err := fh.Stat() - if err != nil { - return nil, errors.Wrapf(err, "error statting file %q", ref.path) - } - - if fhStat.Mode().IsRegular() && fhStat.Size() != 0 { - return nil, errors.New("docker-archive doesn't support modifying existing images") - } - - return &archiveImageDestination{ - Destination: tarfile.NewDestination(fh, ref.destinationRef), - ref: ref, - writer: fh, - }, nil -} - -// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, -// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. -func (d *archiveImageDestination) Reference() types.ImageReference { - return d.ref -} - -// Close removes resources associated with an initialized ImageDestination, if any. -func (d *archiveImageDestination) Close() error { - return d.writer.Close() -} - -// Commit marks the process of storing the image as successful and asks for the image to be persisted. -// WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before Commit() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *archiveImageDestination) Commit() error { - return d.Destination.Commit() -} diff --git a/vendor/github.com/containers/image/docker/archive/src.go b/vendor/github.com/containers/image/docker/archive/src.go deleted file mode 100644 index b2ffd965df..0000000000 --- a/vendor/github.com/containers/image/docker/archive/src.go +++ /dev/null @@ -1,41 +0,0 @@ -package archive - -import ( - "github.com/containers/image/docker/tarfile" - "github.com/containers/image/types" - "github.com/sirupsen/logrus" -) - -type archiveImageSource struct { - *tarfile.Source // Implements most of types.ImageSource - ref archiveReference -} - -// newImageSource returns a types.ImageSource for the specified image reference. -// The caller must call .Close() on the returned ImageSource. -func newImageSource(ctx *types.SystemContext, ref archiveReference) types.ImageSource { - if ref.destinationRef != nil { - logrus.Warnf("docker-archive: references are not supported for sources (ignoring)") - } - src := tarfile.NewSource(ref.path) - return &archiveImageSource{ - Source: src, - ref: ref, - } -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (s *archiveImageSource) Reference() types.ImageReference { - return s.ref -} - -// Close removes resources associated with an initialized ImageSource, if any. -func (s *archiveImageSource) Close() error { - return nil -} - -// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. -func (s *archiveImageSource) LayerInfosForCopy() []types.BlobInfo { - return nil -} diff --git a/vendor/github.com/containers/image/docker/archive/transport.go b/vendor/github.com/containers/image/docker/archive/transport.go deleted file mode 100644 index 047df73db9..0000000000 --- a/vendor/github.com/containers/image/docker/archive/transport.go +++ /dev/null @@ -1,154 +0,0 @@ -package archive - -import ( - "fmt" - "strings" - - "github.com/containers/image/docker/reference" - ctrImage "github.com/containers/image/image" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/pkg/errors" -) - -func init() { - transports.Register(Transport) -} - -// Transport is an ImageTransport for local Docker archives. -var Transport = archiveTransport{} - -type archiveTransport struct{} - -func (t archiveTransport) Name() string { - return "docker-archive" -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. -func (t archiveTransport) ParseReference(reference string) (types.ImageReference, error) { - return ParseReference(reference) -} - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). -// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. -// scope passed to this function will not be "", that value is always allowed. -func (t archiveTransport) ValidatePolicyConfigurationScope(scope string) error { - // See the explanation in archiveReference.PolicyConfigurationIdentity. - return errors.New(`docker-archive: does not support any scopes except the default "" one`) -} - -// archiveReference is an ImageReference for Docker images. -type archiveReference struct { - destinationRef reference.NamedTagged // only used for destinations - path string -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference. -func ParseReference(refString string) (types.ImageReference, error) { - if refString == "" { - return nil, errors.Errorf("docker-archive reference %s isn't of the form [:]", refString) - } - - parts := strings.SplitN(refString, ":", 2) - path := parts[0] - var destinationRef reference.NamedTagged - - // A :tag was specified, which is only necessary for destinations. - if len(parts) == 2 { - ref, err := reference.ParseNormalizedNamed(parts[1]) - if err != nil { - return nil, errors.Wrapf(err, "docker-archive parsing reference") - } - ref = reference.TagNameOnly(ref) - - if _, isDigest := ref.(reference.Canonical); isDigest { - return nil, errors.Errorf("docker-archive doesn't support digest references: %s", refString) - } - - refTagged, isTagged := ref.(reference.NamedTagged) - if !isTagged { - // Really shouldn't be hit... - return nil, errors.Errorf("internal error: reference is not tagged even after reference.TagNameOnly: %s", refString) - } - destinationRef = refTagged - } - - return archiveReference{ - destinationRef: destinationRef, - path: path, - }, nil -} - -func (ref archiveReference) Transport() types.ImageTransport { - return Transport -} - -// StringWithinTransport returns a string representation of the reference, which MUST be such that -// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. -// NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. -// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. -func (ref archiveReference) StringWithinTransport() string { - if ref.destinationRef == nil { - return ref.path - } - return fmt.Sprintf("%s:%s", ref.path, ref.destinationRef.String()) -} - -// DockerReference returns a Docker reference associated with this reference -// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, -// not e.g. after redirect or alias processing), or nil if unknown/not applicable. -func (ref archiveReference) DockerReference() reference.Named { - return ref.destinationRef -} - -// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. -// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; -// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical -// (i.e. various references with exactly the same semantics should return the same configuration identity) -// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but -// not required/guaranteed that it will be a valid input to Transport().ParseReference(). -// Returns "" if configuration identities for these references are not supported. -func (ref archiveReference) PolicyConfigurationIdentity() string { - // Punt, the justification is similar to dockerReference.PolicyConfigurationIdentity. - return "" -} - -// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search -// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed -// in order, terminating on first match, and an implicit "" is always checked at the end. -// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), -// and each following element to be a prefix of the element preceding it. -func (ref archiveReference) PolicyConfigurationNamespaces() []string { - // TODO - return []string{} -} - -// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned ImageCloser. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. -func (ref archiveReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { - src := newImageSource(ctx, ref) - return ctrImage.FromSource(ctx, src) -} - -// NewImageSource returns a types.ImageSource for this reference. -// The caller must call .Close() on the returned ImageSource. -func (ref archiveReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) { - return newImageSource(ctx, ref), nil -} - -// NewImageDestination returns a types.ImageDestination for this reference. -// The caller must call .Close() on the returned ImageDestination. -func (ref archiveReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(ctx, ref) -} - -// DeleteImage deletes the named image from the registry, if supported. -func (ref archiveReference) DeleteImage(ctx *types.SystemContext) error { - // Not really supported, for safety reasons. - return errors.New("Deleting images not implemented for docker-archive: images") -} diff --git a/vendor/github.com/containers/image/docker/daemon/client.go b/vendor/github.com/containers/image/docker/daemon/client.go deleted file mode 100644 index 82fab4b19a..0000000000 --- a/vendor/github.com/containers/image/docker/daemon/client.go +++ /dev/null @@ -1,69 +0,0 @@ -package daemon - -import ( - "net/http" - "path/filepath" - - "github.com/containers/image/types" - dockerclient "github.com/docker/docker/client" - "github.com/docker/go-connections/tlsconfig" -) - -const ( - // The default API version to be used in case none is explicitly specified - defaultAPIVersion = "1.22" -) - -// NewDockerClient initializes a new API client based on the passed SystemContext. -func newDockerClient(ctx *types.SystemContext) (*dockerclient.Client, error) { - host := dockerclient.DefaultDockerHost - if ctx != nil && ctx.DockerDaemonHost != "" { - host = ctx.DockerDaemonHost - } - - // Sadly, unix:// sockets don't work transparently with dockerclient.NewClient. - // They work fine with a nil httpClient; with a non-nil httpClient, the transport’s - // TLSClientConfig must be nil (or the client will try using HTTPS over the PF_UNIX socket - // regardless of the values in the *tls.Config), and we would have to call sockets.ConfigureTransport. - // - // We don't really want to configure anything for unix:// sockets, so just pass a nil *http.Client. - proto, _, _, err := dockerclient.ParseHost(host) - if err != nil { - return nil, err - } - var httpClient *http.Client - if proto != "unix" { - hc, err := tlsConfig(ctx) - if err != nil { - return nil, err - } - httpClient = hc - } - - return dockerclient.NewClient(host, defaultAPIVersion, httpClient, nil) -} - -func tlsConfig(ctx *types.SystemContext) (*http.Client, error) { - options := tlsconfig.Options{} - if ctx != nil && ctx.DockerDaemonInsecureSkipTLSVerify { - options.InsecureSkipVerify = true - } - - if ctx != nil && ctx.DockerDaemonCertPath != "" { - options.CAFile = filepath.Join(ctx.DockerDaemonCertPath, "ca.pem") - options.CertFile = filepath.Join(ctx.DockerDaemonCertPath, "cert.pem") - options.KeyFile = filepath.Join(ctx.DockerDaemonCertPath, "key.pem") - } - - tlsc, err := tlsconfig.Client(options) - if err != nil { - return nil, err - } - - return &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: tlsc, - }, - CheckRedirect: dockerclient.CheckRedirect, - }, nil -} diff --git a/vendor/github.com/containers/image/docker/daemon/daemon_dest.go b/vendor/github.com/containers/image/docker/daemon/daemon_dest.go deleted file mode 100644 index f73ac23392..0000000000 --- a/vendor/github.com/containers/image/docker/daemon/daemon_dest.go +++ /dev/null @@ -1,135 +0,0 @@ -package daemon - -import ( - "io" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/docker/tarfile" - "github.com/containers/image/types" - "github.com/docker/docker/client" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/net/context" -) - -type daemonImageDestination struct { - ref daemonReference - mustMatchRuntimeOS bool - *tarfile.Destination // Implements most of types.ImageDestination - // For talking to imageLoadGoroutine - goroutineCancel context.CancelFunc - statusChannel <-chan error - writer *io.PipeWriter - // Other state - committed bool // writer has been closed -} - -// newImageDestination returns a types.ImageDestination for the specified image reference. -func newImageDestination(ctx *types.SystemContext, ref daemonReference) (types.ImageDestination, error) { - if ref.ref == nil { - return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) - } - namedTaggedRef, ok := ref.ref.(reference.NamedTagged) - if !ok { - return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) - } - - var mustMatchRuntimeOS = true - if ctx != nil && ctx.DockerDaemonHost != client.DefaultDockerHost { - mustMatchRuntimeOS = false - } - - c, err := newDockerClient(ctx) - if err != nil { - return nil, errors.Wrap(err, "Error initializing docker engine client") - } - - reader, writer := io.Pipe() - // Commit() may never be called, so we may never read from this channel; so, make this buffered to allow imageLoadGoroutine to write status and terminate even if we never read it. - statusChannel := make(chan error, 1) - - goroutineContext, goroutineCancel := context.WithCancel(context.Background()) - go imageLoadGoroutine(goroutineContext, c, reader, statusChannel) - - return &daemonImageDestination{ - ref: ref, - mustMatchRuntimeOS: mustMatchRuntimeOS, - Destination: tarfile.NewDestination(writer, namedTaggedRef), - goroutineCancel: goroutineCancel, - statusChannel: statusChannel, - writer: writer, - committed: false, - }, nil -} - -// imageLoadGoroutine accepts tar stream on reader, sends it to c, and reports error or success by writing to statusChannel -func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeReader, statusChannel chan<- error) { - err := errors.New("Internal error: unexpected panic in imageLoadGoroutine") - defer func() { - logrus.Debugf("docker-daemon: sending done, status %v", err) - statusChannel <- err - }() - defer func() { - if err == nil { - reader.Close() - } else { - reader.CloseWithError(err) - } - }() - - resp, err := c.ImageLoad(ctx, reader, true) - if err != nil { - err = errors.Wrap(err, "Error saving image to docker engine") - return - } - defer resp.Body.Close() -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. -func (d *daemonImageDestination) MustMatchRuntimeOS() bool { - return d.mustMatchRuntimeOS -} - -// Close removes resources associated with an initialized ImageDestination, if any. -func (d *daemonImageDestination) Close() error { - if !d.committed { - logrus.Debugf("docker-daemon: Closing tar stream to abort loading") - // In principle, goroutineCancel() should abort the HTTP request and stop the process from continuing. - // In practice, though, various HTTP implementations used by client.Client.ImageLoad() (including - // https://github.com/golang/net/blob/master/context/ctxhttp/ctxhttp_pre17.go and the - // net/http version with native Context support in Go 1.7) do not always actually immediately cancel - // the operation: they may process the HTTP request, or a part of it, to completion in a goroutine, and - // return early if the context is canceled without terminating the goroutine at all. - // So we need this CloseWithError to terminate sending the HTTP request Body - // immediately, and hopefully, through terminating the sending which uses "Transfer-Encoding: chunked"" without sending - // the terminating zero-length chunk, prevent the docker daemon from processing the tar stream at all. - // Whether that works or not, closing the PipeWriter seems desirable in any case. - d.writer.CloseWithError(errors.New("Aborting upload, daemonImageDestination closed without a previous .Commit()")) - } - d.goroutineCancel() - - return nil -} - -func (d *daemonImageDestination) Reference() types.ImageReference { - return d.ref -} - -// Commit marks the process of storing the image as successful and asks for the image to be persisted. -// WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before Commit() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *daemonImageDestination) Commit() error { - logrus.Debugf("docker-daemon: Closing tar stream") - if err := d.Destination.Commit(); err != nil { - return err - } - if err := d.writer.Close(); err != nil { - return err - } - d.committed = true // We may still fail, but we are done sending to imageLoadGoroutine. - - logrus.Debugf("docker-daemon: Waiting for status") - err := <-d.statusChannel - return err -} diff --git a/vendor/github.com/containers/image/docker/daemon/daemon_src.go b/vendor/github.com/containers/image/docker/daemon/daemon_src.go deleted file mode 100644 index 5cf7679b1c..0000000000 --- a/vendor/github.com/containers/image/docker/daemon/daemon_src.go +++ /dev/null @@ -1,88 +0,0 @@ -package daemon - -import ( - "io" - "io/ioutil" - "os" - - "github.com/containers/image/docker/tarfile" - "github.com/containers/image/internal/tmpdir" - "github.com/containers/image/types" - "github.com/pkg/errors" - "golang.org/x/net/context" -) - -type daemonImageSource struct { - ref daemonReference - *tarfile.Source // Implements most of types.ImageSource - tarCopyPath string -} - -type layerInfo struct { - path string - size int64 -} - -// newImageSource returns a types.ImageSource for the specified image reference. -// The caller must call .Close() on the returned ImageSource. -// -// It would be great if we were able to stream the input tar as it is being -// sent; but Docker sends the top-level manifest, which determines which paths -// to look for, at the end, so in we will need to seek back and re-read, several times. -// (We could, perhaps, expect an exact sequence, assume that the first plaintext file -// is the config, and that the following len(RootFS) files are the layers, but that feels -// way too brittle.) -func newImageSource(ctx *types.SystemContext, ref daemonReference) (types.ImageSource, error) { - c, err := newDockerClient(ctx) - if err != nil { - return nil, errors.Wrap(err, "Error initializing docker engine client") - } - // Per NewReference(), ref.StringWithinTransport() is either an image ID (config digest), or a !reference.NameOnly() reference. - // Either way ImageSave should create a tarball with exactly one image. - inputStream, err := c.ImageSave(context.TODO(), []string{ref.StringWithinTransport()}) - if err != nil { - return nil, errors.Wrap(err, "Error loading image from docker engine") - } - defer inputStream.Close() - - // FIXME: use SystemContext here. - tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(), "docker-daemon-tar") - if err != nil { - return nil, err - } - defer tarCopyFile.Close() - - succeeded := false - defer func() { - if !succeeded { - os.Remove(tarCopyFile.Name()) - } - }() - - if _, err := io.Copy(tarCopyFile, inputStream); err != nil { - return nil, err - } - - succeeded = true - return &daemonImageSource{ - ref: ref, - Source: tarfile.NewSource(tarCopyFile.Name()), - tarCopyPath: tarCopyFile.Name(), - }, nil -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (s *daemonImageSource) Reference() types.ImageReference { - return s.ref -} - -// Close removes resources associated with an initialized ImageSource, if any. -func (s *daemonImageSource) Close() error { - return os.Remove(s.tarCopyPath) -} - -// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. -func (s *daemonImageSource) LayerInfosForCopy() []types.BlobInfo { - return nil -} diff --git a/vendor/github.com/containers/image/docker/daemon/daemon_transport.go b/vendor/github.com/containers/image/docker/daemon/daemon_transport.go deleted file mode 100644 index 8ad6b521ff..0000000000 --- a/vendor/github.com/containers/image/docker/daemon/daemon_transport.go +++ /dev/null @@ -1,185 +0,0 @@ -package daemon - -import ( - "github.com/pkg/errors" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/image" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" -) - -func init() { - transports.Register(Transport) -} - -// Transport is an ImageTransport for images managed by a local Docker daemon. -var Transport = daemonTransport{} - -type daemonTransport struct{} - -// Name returns the name of the transport, which must be unique among other transports. -func (t daemonTransport) Name() string { - return "docker-daemon" -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. -func (t daemonTransport) ParseReference(reference string) (types.ImageReference, error) { - return ParseReference(reference) -} - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). -// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. -// scope passed to this function will not be "", that value is always allowed. -func (t daemonTransport) ValidatePolicyConfigurationScope(scope string) error { - // See the explanation in daemonReference.PolicyConfigurationIdentity. - return errors.New(`docker-daemon: does not support any scopes except the default "" one`) -} - -// daemonReference is an ImageReference for images managed by a local Docker daemon -// Exactly one of id and ref can be set. -// For daemonImageSource, both id and ref are acceptable, ref must not be a NameOnly (interpreted as all tags in that repository by the daemon) -// For daemonImageDestination, it must be a ref, which is NamedTagged. -// (We could, in principle, also allow storing images without tagging them, and the user would have to refer to them using the docker image ID = config digest. -// Using the config digest requires the caller to parse the manifest themselves, which is very cumbersome; so, for now, we don’t bother.) -type daemonReference struct { - id digest.Digest - ref reference.Named // !reference.IsNameOnly -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. -func ParseReference(refString string) (types.ImageReference, error) { - // This is intended to be compatible with reference.ParseAnyReference, but more strict about refusing some of the ambiguous cases. - // In particular, this rejects unprefixed digest values (64 hex chars), and sha256 digest prefixes (sha256:fewer-than-64-hex-chars). - - // digest:hexstring is structurally the same as a reponame:tag (meaning docker.io/library/reponame:tag). - // reference.ParseAnyReference interprets such strings as digests. - if dgst, err := digest.Parse(refString); err == nil { - // The daemon explicitly refuses to tag images with a reponame equal to digest.Canonical - but _only_ this digest name. - // Other digest references are ambiguous, so refuse them. - if dgst.Algorithm() != digest.Canonical { - return nil, errors.Errorf("Invalid docker-daemon: reference %s: only digest algorithm %s accepted", refString, digest.Canonical) - } - return NewReference(dgst, nil) - } - - ref, err := reference.ParseNormalizedNamed(refString) // This also rejects unprefixed digest values - if err != nil { - return nil, err - } - if reference.FamiliarName(ref) == digest.Canonical.String() { - return nil, errors.Errorf("Invalid docker-daemon: reference %s: The %s repository name is reserved for (non-shortened) digest references", refString, digest.Canonical) - } - return NewReference("", ref) -} - -// NewReference returns a docker-daemon reference for either the supplied image ID (config digest) or the supplied reference (which must satisfy !reference.IsNameOnly) -func NewReference(id digest.Digest, ref reference.Named) (types.ImageReference, error) { - if id != "" && ref != nil { - return nil, errors.New("docker-daemon: reference must not have an image ID and a reference string specified at the same time") - } - if ref != nil { - if reference.IsNameOnly(ref) { - return nil, errors.Errorf("docker-daemon: reference %s has neither a tag nor a digest", reference.FamiliarString(ref)) - } - // A github.com/distribution/reference value can have a tag and a digest at the same time! - // Most versions of docker/reference do not handle that (ignoring the tag), so reject such input. - // This MAY be accepted in the future. - _, isTagged := ref.(reference.NamedTagged) - _, isDigested := ref.(reference.Canonical) - if isTagged && isDigested { - return nil, errors.Errorf("docker-daemon: references with both a tag and digest are currently not supported") - } - } - return daemonReference{ - id: id, - ref: ref, - }, nil -} - -func (ref daemonReference) Transport() types.ImageTransport { - return Transport -} - -// StringWithinTransport returns a string representation of the reference, which MUST be such that -// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. -// NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. -// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; -// instead, see transports.ImageName(). -func (ref daemonReference) StringWithinTransport() string { - switch { - case ref.id != "": - return ref.id.String() - case ref.ref != nil: - return reference.FamiliarString(ref.ref) - default: // Coverage: Should never happen, NewReference above should refuse such values. - panic("Internal inconsistency: daemonReference has empty id and nil ref") - } -} - -// DockerReference returns a Docker reference associated with this reference -// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, -// not e.g. after redirect or alias processing), or nil if unknown/not applicable. -func (ref daemonReference) DockerReference() reference.Named { - return ref.ref // May be nil -} - -// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. -// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; -// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical -// (i.e. various references with exactly the same semantics should return the same configuration identity) -// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but -// not required/guaranteed that it will be a valid input to Transport().ParseReference(). -// Returns "" if configuration identities for these references are not supported. -func (ref daemonReference) PolicyConfigurationIdentity() string { - // We must allow referring to images in the daemon by image ID, otherwise untagged images would not be accessible. - // But the existence of image IDs means that we can’t truly well namespace the input; the untagged images would have to fall into the default policy, - // which can be unexpected. So, punt. - return "" // This still allows using the default "" scope to define a policy for this transport. -} - -// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search -// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed -// in order, terminating on first match, and an implicit "" is always checked at the end. -// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), -// and each following element to be a prefix of the element preceding it. -func (ref daemonReference) PolicyConfigurationNamespaces() []string { - // See the explanation in daemonReference.PolicyConfigurationIdentity. - return []string{} -} - -// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned ImageCloser. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. -func (ref daemonReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { - src, err := newImageSource(ctx, ref) - if err != nil { - return nil, err - } - return image.FromSource(ctx, src) -} - -// NewImageSource returns a types.ImageSource for this reference. -// The caller must call .Close() on the returned ImageSource. -func (ref daemonReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) { - return newImageSource(ctx, ref) -} - -// NewImageDestination returns a types.ImageDestination for this reference. -// The caller must call .Close() on the returned ImageDestination. -func (ref daemonReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(ctx, ref) -} - -// DeleteImage deletes the named image from the registry, if supported. -func (ref daemonReference) DeleteImage(ctx *types.SystemContext) error { - // Should this just untag the image? Should this stop running containers? - // The semantics is not quite as clear as for remote repositories. - // The user can run (docker rmi) directly anyway, so, for now(?), punt instead of trying to guess what the user meant. - return errors.Errorf("Deleting images not implemented for docker-daemon: images") -} diff --git a/vendor/github.com/containers/image/docker/docker_client.go b/vendor/github.com/containers/image/docker/docker_client.go deleted file mode 100644 index 91006e8d24..0000000000 --- a/vendor/github.com/containers/image/docker/docker_client.go +++ /dev/null @@ -1,439 +0,0 @@ -package docker - -import ( - "context" - "crypto/tls" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "path/filepath" - "strings" - "time" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/pkg/docker/config" - "github.com/containers/image/pkg/tlsclientconfig" - "github.com/containers/image/types" - "github.com/docker/distribution/registry/client" - "github.com/docker/go-connections/tlsconfig" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -const ( - dockerHostname = "docker.io" - dockerRegistry = "registry-1.docker.io" - - systemPerHostCertDirPath = "/etc/docker/certs.d" - - resolvedPingV2URL = "%s://%s/v2/" - resolvedPingV1URL = "%s://%s/v1/_ping" - tagsPath = "/v2/%s/tags/list" - manifestPath = "/v2/%s/manifests/%s" - blobsPath = "/v2/%s/blobs/%s" - blobUploadPath = "/v2/%s/blobs/uploads/" - extensionsSignaturePath = "/extensions/v2/%s/signatures/%s" - - minimumTokenLifetimeSeconds = 60 - - extensionSignatureSchemaVersion = 2 // extensionSignature.Version - extensionSignatureTypeAtomic = "atomic" // extensionSignature.Type -) - -var ( - // ErrV1NotSupported is returned when we're trying to talk to a - // docker V1 registry. - ErrV1NotSupported = errors.New("can't talk to a V1 docker registry") - // ErrUnauthorizedForCredentials is returned when the status code returned is 401 - ErrUnauthorizedForCredentials = errors.New("unable to retrieve auth token: invalid username/password") -) - -// extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go: -// signature represents a Docker image signature. -type extensionSignature struct { - Version int `json:"schemaVersion"` // Version specifies the schema version - Name string `json:"name"` // Name must be in "sha256:@signatureName" format - Type string `json:"type"` // Type is optional, of not set it will be defaulted to "AtomicImageV1" - Content []byte `json:"content"` // Content contains the signature -} - -// signatureList represents list of Docker image signatures. -type extensionSignatureList struct { - Signatures []extensionSignature `json:"signatures"` -} - -type bearerToken struct { - Token string `json:"token"` - AccessToken string `json:"access_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` -} - -// dockerClient is configuration for dealing with a single Docker registry. -type dockerClient struct { - // The following members are set by newDockerClient and do not change afterwards. - ctx *types.SystemContext - registry string - username string - password string - client *http.Client - signatureBase signatureStorageBase - scope authScope - // The following members are detected registry properties: - // They are set after a successful detectProperties(), and never change afterwards. - scheme string // Empty value also used to indicate detectProperties() has not yet succeeded. - challenges []challenge - supportsSignatures bool - // The following members are private state for setupRequestAuth, both are valid if token != nil. - token *bearerToken - tokenExpiration time.Time -} - -type authScope struct { - remoteName string - actions string -} - -func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) { - token := new(bearerToken) - if err := json.Unmarshal(blob, &token); err != nil { - return nil, err - } - if token.Token == "" { - token.Token = token.AccessToken - } - if token.ExpiresIn < minimumTokenLifetimeSeconds { - token.ExpiresIn = minimumTokenLifetimeSeconds - logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn) - } - if token.IssuedAt.IsZero() { - token.IssuedAt = time.Now().UTC() - } - return token, nil -} - -// this is cloned from docker/go-connections because upstream docker has changed -// it and make deps here fails otherwise. -// We'll drop this once we upgrade to docker 1.13.x deps. -func serverDefault() *tls.Config { - return &tls.Config{ - // Avoid fallback to SSL protocols < TLS1.0 - MinVersion: tls.VersionTLS10, - PreferServerCipherSuites: true, - CipherSuites: tlsconfig.DefaultServerAcceptedCiphers, - } -} - -// dockerCertDir returns a path to a directory to be consumed by tlsclientconfig.SetupCertificates() depending on ctx and hostPort. -func dockerCertDir(ctx *types.SystemContext, hostPort string) string { - if ctx != nil && ctx.DockerCertPath != "" { - return ctx.DockerCertPath - } - var hostCertDir string - if ctx != nil && ctx.DockerPerHostCertDirPath != "" { - hostCertDir = ctx.DockerPerHostCertDirPath - } else if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" { - hostCertDir = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemPerHostCertDirPath) - } else { - hostCertDir = systemPerHostCertDirPath - } - return filepath.Join(hostCertDir, hostPort) -} - -// newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry) -// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection) -func newDockerClientFromRef(ctx *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) { - registry := reference.Domain(ref.ref) - username, password, err := config.GetAuthentication(ctx, reference.Domain(ref.ref)) - if err != nil { - return nil, errors.Wrapf(err, "error getting username and password") - } - sigBase, err := configuredSignatureStorageBase(ctx, ref, write) - if err != nil { - return nil, err - } - remoteName := reference.Path(ref.ref) - - return newDockerClientWithDetails(ctx, registry, username, password, actions, sigBase, remoteName) -} - -// newDockerClientWithDetails returns a new dockerClient instance for the given parameters -func newDockerClientWithDetails(ctx *types.SystemContext, registry, username, password, actions string, sigBase signatureStorageBase, remoteName string) (*dockerClient, error) { - hostName := registry - if registry == dockerHostname { - registry = dockerRegistry - } - tr := tlsclientconfig.NewTransport() - tr.TLSClientConfig = serverDefault() - - // It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry, - // because docker/docker does not read the certs.d subdirectory at all in that case. We use the user-visible - // dockerHostname here, because it is more symmetrical to read the configuration in that case as well, and because - // generally the UI hides the existence of the different dockerRegistry. But note that this behavior is - // undocumented and may change if docker/docker changes. - certDir := dockerCertDir(ctx, hostName) - if err := tlsclientconfig.SetupCertificates(certDir, tr.TLSClientConfig); err != nil { - return nil, err - } - - if ctx != nil && ctx.DockerInsecureSkipTLSVerify { - tr.TLSClientConfig.InsecureSkipVerify = true - } - - return &dockerClient{ - ctx: ctx, - registry: registry, - username: username, - password: password, - client: &http.Client{Transport: tr}, - signatureBase: sigBase, - scope: authScope{ - actions: actions, - remoteName: remoteName, - }, - }, nil -} - -// CheckAuth validates the credentials by attempting to log into the registry -// returns an error if an error occcured while making the http request or the status code received was 401 -func CheckAuth(ctx context.Context, sCtx *types.SystemContext, username, password, registry string) error { - newLoginClient, err := newDockerClientWithDetails(sCtx, registry, username, password, "", nil, "") - if err != nil { - return errors.Wrapf(err, "error creating new docker client") - } - - resp, err := newLoginClient.makeRequest(ctx, "GET", "/v2/", nil, nil) - if err != nil { - return err - } - defer resp.Body.Close() - - switch resp.StatusCode { - case http.StatusOK: - return nil - case http.StatusUnauthorized: - return ErrUnauthorizedForCredentials - default: - return errors.Errorf("error occured with status code %q", resp.StatusCode) - } -} - -// makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. -// The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/. -func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader) (*http.Response, error) { - if err := c.detectProperties(ctx); err != nil { - return nil, err - } - - url := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path) - return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, true) -} - -// makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. -// streamLen, if not -1, specifies the length of the data expected on stream. -// makeRequest should generally be preferred. -// TODO(runcom): too many arguments here, use a struct -func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, sendAuth bool) (*http.Response, error) { - req, err := http.NewRequest(method, url, stream) - if err != nil { - return nil, err - } - req = req.WithContext(ctx) - if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequest above can figure out the length of bytes.Reader and similar objects without us having to compute it. - req.ContentLength = streamLen - } - req.Header.Set("Docker-Distribution-API-Version", "registry/2.0") - for n, h := range headers { - for _, hh := range h { - req.Header.Add(n, hh) - } - } - if c.ctx != nil && c.ctx.DockerRegistryUserAgent != "" { - req.Header.Add("User-Agent", c.ctx.DockerRegistryUserAgent) - } - if sendAuth { - if err := c.setupRequestAuth(req); err != nil { - return nil, err - } - } - logrus.Debugf("%s %s", method, url) - res, err := c.client.Do(req) - if err != nil { - return nil, err - } - return res, nil -} - -// we're using the challenges from the /v2/ ping response and not the one from the destination -// URL in this request because: -// -// 1) docker does that as well -// 2) gcr.io is sending 401 without a WWW-Authenticate header in the real request -// -// debugging: https://github.com/containers/image/pull/211#issuecomment-273426236 and follows up -func (c *dockerClient) setupRequestAuth(req *http.Request) error { - if len(c.challenges) == 0 { - return nil - } - schemeNames := make([]string, 0, len(c.challenges)) - for _, challenge := range c.challenges { - schemeNames = append(schemeNames, challenge.Scheme) - switch challenge.Scheme { - case "basic": - req.SetBasicAuth(c.username, c.password) - return nil - case "bearer": - if c.token == nil || time.Now().After(c.tokenExpiration) { - realm, ok := challenge.Parameters["realm"] - if !ok { - return errors.Errorf("missing realm in bearer auth challenge") - } - service, _ := challenge.Parameters["service"] // Will be "" if not present - var scope string - if c.scope.remoteName != "" && c.scope.actions != "" { - scope = fmt.Sprintf("repository:%s:%s", c.scope.remoteName, c.scope.actions) - } - token, err := c.getBearerToken(req.Context(), realm, service, scope) - if err != nil { - return err - } - c.token = token - c.tokenExpiration = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second) - } - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.token.Token)) - return nil - default: - logrus.Debugf("no handler for %s authentication", challenge.Scheme) - } - } - logrus.Infof("None of the challenges sent by server (%s) are supported, trying an unauthenticated request anyway", strings.Join(schemeNames, ", ")) - return nil -} - -func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope string) (*bearerToken, error) { - authReq, err := http.NewRequest("GET", realm, nil) - if err != nil { - return nil, err - } - authReq = authReq.WithContext(ctx) - getParams := authReq.URL.Query() - if service != "" { - getParams.Add("service", service) - } - if scope != "" { - getParams.Add("scope", scope) - } - authReq.URL.RawQuery = getParams.Encode() - if c.username != "" && c.password != "" { - authReq.SetBasicAuth(c.username, c.password) - } - tr := tlsclientconfig.NewTransport() - // TODO(runcom): insecure for now to contact the external token service - tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} - client := &http.Client{Transport: tr} - res, err := client.Do(authReq) - if err != nil { - return nil, err - } - defer res.Body.Close() - switch res.StatusCode { - case http.StatusUnauthorized: - return nil, ErrUnauthorizedForCredentials - case http.StatusOK: - break - default: - return nil, errors.Errorf("unexpected http code: %d, URL: %s", res.StatusCode, authReq.URL) - } - tokenBlob, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, err - } - - return newBearerTokenFromJSONBlob(tokenBlob) -} - -// detectProperties detects various properties of the registry. -// See the dockerClient documentation for members which are affected by this. -func (c *dockerClient) detectProperties(ctx context.Context) error { - if c.scheme != "" { - return nil - } - - ping := func(scheme string) error { - url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry) - resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, true) - logrus.Debugf("Ping %s err %#v", url, err) - if err != nil { - return err - } - defer resp.Body.Close() - logrus.Debugf("Ping %s status %d", url, resp.StatusCode) - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { - return errors.Errorf("error pinging repository, response code %d", resp.StatusCode) - } - c.challenges = parseAuthHeader(resp.Header) - c.scheme = scheme - c.supportsSignatures = resp.Header.Get("X-Registry-Supports-Signatures") == "1" - return nil - } - err := ping("https") - if err != nil && c.ctx != nil && c.ctx.DockerInsecureSkipTLSVerify { - err = ping("http") - } - if err != nil { - err = errors.Wrap(err, "pinging docker registry returned") - if c.ctx != nil && c.ctx.DockerDisableV1Ping { - return err - } - // best effort to understand if we're talking to a V1 registry - pingV1 := func(scheme string) bool { - url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry) - resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, true) - logrus.Debugf("Ping %s err %#v", url, err) - if err != nil { - return false - } - defer resp.Body.Close() - logrus.Debugf("Ping %s status %d", url, resp.StatusCode) - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { - return false - } - return true - } - isV1 := pingV1("https") - if !isV1 && c.ctx != nil && c.ctx.DockerInsecureSkipTLSVerify { - isV1 = pingV1("http") - } - if isV1 { - err = ErrV1NotSupported - } - } - return err -} - -// getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension, -// using the original data structures. -func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) { - path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest) - res, err := c.makeRequest(ctx, "GET", path, nil, nil) - if err != nil { - return nil, err - } - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - return nil, client.HandleErrorResponse(res) - } - body, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, err - } - - var parsedBody extensionSignatureList - if err := json.Unmarshal(body, &parsedBody); err != nil { - return nil, errors.Wrapf(err, "Error decoding signature list") - } - return &parsedBody, nil -} diff --git a/vendor/github.com/containers/image/docker/docker_image.go b/vendor/github.com/containers/image/docker/docker_image.go deleted file mode 100644 index 2148ed8bab..0000000000 --- a/vendor/github.com/containers/image/docker/docker_image.go +++ /dev/null @@ -1,63 +0,0 @@ -package docker - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/image" - "github.com/containers/image/types" - "github.com/pkg/errors" -) - -// Image is a Docker-specific implementation of types.ImageCloser with a few extra methods -// which are specific to Docker. -type Image struct { - types.ImageCloser - src *dockerImageSource -} - -// newImage returns a new Image interface type after setting up -// a client to the registry hosting the given image. -// The caller must call .Close() on the returned Image. -func newImage(ctx *types.SystemContext, ref dockerReference) (types.ImageCloser, error) { - s, err := newImageSource(ctx, ref) - if err != nil { - return nil, err - } - img, err := image.FromSource(ctx, s) - if err != nil { - return nil, err - } - return &Image{ImageCloser: img, src: s}, nil -} - -// SourceRefFullName returns a fully expanded name for the repository this image is in. -func (i *Image) SourceRefFullName() string { - return i.src.ref.ref.Name() -} - -// GetRepositoryTags list all tags available in the repository. Note that this has no connection with the tag(s) used for this specific image, if any. -func (i *Image) GetRepositoryTags() ([]string, error) { - path := fmt.Sprintf(tagsPath, reference.Path(i.src.ref.ref)) - // FIXME: Pass the context.Context - res, err := i.src.c.makeRequest(context.TODO(), "GET", path, nil, nil) - if err != nil { - return nil, err - } - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - // print url also - return nil, errors.Errorf("Invalid status code returned when fetching tags list %d", res.StatusCode) - } - type tagsRes struct { - Tags []string - } - tags := &tagsRes{} - if err := json.NewDecoder(res.Body).Decode(tags); err != nil { - return nil, err - } - return tags.Tags, nil -} diff --git a/vendor/github.com/containers/image/docker/docker_image_dest.go b/vendor/github.com/containers/image/docker/docker_image_dest.go deleted file mode 100644 index 9a6c351378..0000000000 --- a/vendor/github.com/containers/image/docker/docker_image_dest.go +++ /dev/null @@ -1,463 +0,0 @@ -package docker - -import ( - "bytes" - "context" - "crypto/rand" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "path/filepath" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/client" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -type dockerImageDestination struct { - ref dockerReference - c *dockerClient - // State - manifestDigest digest.Digest // or "" if not yet known. -} - -// newImageDestination creates a new ImageDestination for the specified image reference. -func newImageDestination(ctx *types.SystemContext, ref dockerReference) (types.ImageDestination, error) { - c, err := newDockerClientFromRef(ctx, ref, true, "pull,push") - if err != nil { - return nil, err - } - return &dockerImageDestination{ - ref: ref, - c: c, - }, nil -} - -// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, -// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. -func (d *dockerImageDestination) Reference() types.ImageReference { - return d.ref -} - -// Close removes resources associated with an initialized ImageDestination, if any. -func (d *dockerImageDestination) Close() error { - return nil -} - -func (d *dockerImageDestination) SupportedManifestMIMETypes() []string { - return []string{ - imgspecv1.MediaTypeImageManifest, - manifest.DockerV2Schema2MediaType, - manifest.DockerV2Schema1SignedMediaType, - manifest.DockerV2Schema1MediaType, - } -} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. -// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. -func (d *dockerImageDestination) SupportsSignatures() error { - if err := d.c.detectProperties(context.TODO()); err != nil { - return err - } - switch { - case d.c.signatureBase != nil: - return nil - case d.c.supportsSignatures: - return nil - default: - return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured") - } -} - -// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination. -func (d *dockerImageDestination) ShouldCompressLayers() bool { - return true -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually -// uploaded to the image destination, true otherwise. -func (d *dockerImageDestination) AcceptsForeignLayerURLs() bool { - return true -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. -func (d *dockerImageDestination) MustMatchRuntimeOS() bool { - return false -} - -// sizeCounter is an io.Writer which only counts the total size of its input. -type sizeCounter struct{ size int64 } - -func (c *sizeCounter) Write(p []byte) (n int, err error) { - c.size += int64(len(p)) - return len(p), nil -} - -// PutBlob writes contents of stream and returns data representing the result (with all data filled in). -// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. -// inputInfo.Size is the expected length of stream, if known. -// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available -// to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) { - if inputInfo.Digest.String() != "" { - haveBlob, size, err := d.HasBlob(inputInfo) - if err != nil { - return types.BlobInfo{}, err - } - if haveBlob { - return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil - } - } - - // FIXME? Chunked upload, progress reporting, etc. - uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref)) - logrus.Debugf("Uploading %s", uploadPath) - res, err := d.c.makeRequest(context.TODO(), "POST", uploadPath, nil, nil) - if err != nil { - return types.BlobInfo{}, err - } - defer res.Body.Close() - if res.StatusCode != http.StatusAccepted { - logrus.Debugf("Error initiating layer upload, response %#v", *res) - return types.BlobInfo{}, errors.Wrapf(client.HandleErrorResponse(res), "Error initiating layer upload to %s", uploadPath) - } - uploadLocation, err := res.Location() - if err != nil { - return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL") - } - - digester := digest.Canonical.Digester() - sizeCounter := &sizeCounter{} - tee := io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter)) - res, err = d.c.makeRequestToResolvedURL(context.TODO(), "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, true) - if err != nil { - logrus.Debugf("Error uploading layer chunked, response %#v", res) - return types.BlobInfo{}, err - } - defer res.Body.Close() - computedDigest := digester.Digest() - - uploadLocation, err = res.Location() - if err != nil { - return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL") - } - - // FIXME: DELETE uploadLocation on failure - - locationQuery := uploadLocation.Query() - // TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717 - locationQuery.Set("digest", computedDigest.String()) - uploadLocation.RawQuery = locationQuery.Encode() - res, err = d.c.makeRequestToResolvedURL(context.TODO(), "PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, true) - if err != nil { - return types.BlobInfo{}, err - } - defer res.Body.Close() - if res.StatusCode != http.StatusCreated { - logrus.Debugf("Error uploading layer, response %#v", *res) - return types.BlobInfo{}, errors.Wrapf(client.HandleErrorResponse(res), "Error uploading layer to %s", uploadLocation) - } - - logrus.Debugf("Upload of layer %s complete", computedDigest) - return types.BlobInfo{Digest: computedDigest, Size: sizeCounter.size}, nil -} - -// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob. -// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned. -// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); -// it returns a non-nil error only on an unexpected failure. -func (d *dockerImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) { - if info.Digest == "" { - return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`) - } - checkPath := fmt.Sprintf(blobsPath, reference.Path(d.ref.ref), info.Digest.String()) - - logrus.Debugf("Checking %s", checkPath) - res, err := d.c.makeRequest(context.TODO(), "HEAD", checkPath, nil, nil) - if err != nil { - return false, -1, err - } - defer res.Body.Close() - switch res.StatusCode { - case http.StatusOK: - logrus.Debugf("... already exists") - return true, getBlobSize(res), nil - case http.StatusUnauthorized: - logrus.Debugf("... not authorized") - return false, -1, errors.Errorf("not authorized to read from destination repository %s", reference.Path(d.ref.ref)) - case http.StatusNotFound: - logrus.Debugf("... not present") - return false, -1, nil - default: - return false, -1, errors.Errorf("failed to read from destination repository %s: %v", reference.Path(d.ref.ref), http.StatusText(res.StatusCode)) - } -} - -func (d *dockerImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) { - return info, nil -} - -// PutManifest writes manifest to the destination. -// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. -// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), -// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. -func (d *dockerImageDestination) PutManifest(m []byte) error { - digest, err := manifest.Digest(m) - if err != nil { - return err - } - d.manifestDigest = digest - - refTail, err := d.ref.tagOrDigest() - if err != nil { - return err - } - path := fmt.Sprintf(manifestPath, reference.Path(d.ref.ref), refTail) - - headers := map[string][]string{} - mimeType := manifest.GuessMIMEType(m) - if mimeType != "" { - headers["Content-Type"] = []string{mimeType} - } - res, err := d.c.makeRequest(context.TODO(), "PUT", path, headers, bytes.NewReader(m)) - if err != nil { - return err - } - defer res.Body.Close() - if !successStatus(res.StatusCode) { - err = errors.Wrapf(client.HandleErrorResponse(res), "Error uploading manifest to %s", path) - if isManifestInvalidError(errors.Cause(err)) { - err = types.ManifestTypeRejectedError{Err: err} - } - return err - } - return nil -} - -// successStatus returns true if the argument is a successful HTTP response -// code (in the range 200 - 399 inclusive). -func successStatus(status int) bool { - return status >= 200 && status <= 399 -} - -// isManifestInvalidError returns true iff err from client.HandleErrorReponse is a “manifest invalid” error. -func isManifestInvalidError(err error) bool { - errors, ok := err.(errcode.Errors) - if !ok || len(errors) == 0 { - return false - } - ec, ok := errors[0].(errcode.ErrorCoder) - if !ok { - return false - } - // ErrorCodeManifestInvalid is returned by OpenShift with acceptschema2=false. - // ErrorCodeTagInvalid is returned by docker/distribution (at least as of commit ec87e9b6971d831f0eff752ddb54fb64693e51cd) - // when uploading to a tag (because it can’t find a matching tag inside the manifest) - return ec.ErrorCode() == v2.ErrorCodeManifestInvalid || ec.ErrorCode() == v2.ErrorCodeTagInvalid -} - -func (d *dockerImageDestination) PutSignatures(signatures [][]byte) error { - // Do not fail if we don’t really need to support signatures. - if len(signatures) == 0 { - return nil - } - if err := d.c.detectProperties(context.TODO()); err != nil { - return err - } - switch { - case d.c.signatureBase != nil: - return d.putSignaturesToLookaside(signatures) - case d.c.supportsSignatures: - return d.putSignaturesToAPIExtension(signatures) - default: - return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured") - } -} - -// putSignaturesToLookaside implements PutSignatures() from the lookaside location configured in s.c.signatureBase, -// which is not nil. -func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte) error { - // FIXME? This overwrites files one at a time, definitely not atomic. - // A failure when updating signatures with a reordered copy could lose some of them. - - // Skip dealing with the manifest digest if not necessary. - if len(signatures) == 0 { - return nil - } - - if d.manifestDigest.String() == "" { - // This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures - return errors.Errorf("Unknown manifest digest, can't add signatures") - } - - // NOTE: Keep this in sync with docs/signature-protocols.md! - for i, signature := range signatures { - url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i) - if url == nil { - return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") - } - err := d.putOneSignature(url, signature) - if err != nil { - return err - } - } - // Remove any other signatures, if present. - // We stop at the first missing signature; if a previous deleting loop aborted - // prematurely, this may not clean up all of them, but one missing signature - // is enough for dockerImageSource to stop looking for other signatures, so that - // is sufficient. - for i := len(signatures); ; i++ { - url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i) - if url == nil { - return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") - } - missing, err := d.c.deleteOneSignature(url) - if err != nil { - return err - } - if missing { - break - } - } - - return nil -} - -// putOneSignature stores one signature to url. -// NOTE: Keep this in sync with docs/signature-protocols.md! -func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte) error { - switch url.Scheme { - case "file": - logrus.Debugf("Writing to %s", url.Path) - err := os.MkdirAll(filepath.Dir(url.Path), 0755) - if err != nil { - return err - } - err = ioutil.WriteFile(url.Path, signature, 0644) - if err != nil { - return err - } - return nil - - case "http", "https": - return errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String()) - default: - return errors.Errorf("Unsupported scheme when writing signature to %s", url.String()) - } -} - -// deleteOneSignature deletes a signature from url, if it exists. -// If it successfully determines that the signature does not exist, returns (true, nil) -// NOTE: Keep this in sync with docs/signature-protocols.md! -func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error) { - switch url.Scheme { - case "file": - logrus.Debugf("Deleting %s", url.Path) - err := os.Remove(url.Path) - if err != nil && os.IsNotExist(err) { - return true, nil - } - return false, err - - case "http", "https": - return false, errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String()) - default: - return false, errors.Errorf("Unsupported scheme when deleting signature from %s", url.String()) - } -} - -// putSignaturesToAPIExtension implements PutSignatures() using the X-Registry-Supports-Signatures API extension. -func (d *dockerImageDestination) putSignaturesToAPIExtension(signatures [][]byte) error { - // Skip dealing with the manifest digest, or reading the old state, if not necessary. - if len(signatures) == 0 { - return nil - } - - if d.manifestDigest.String() == "" { - // This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures - return errors.Errorf("Unknown manifest digest, can't add signatures") - } - - // Because image signatures are a shared resource in Atomic Registry, the default upload - // always adds signatures. Eventually we should also allow removing signatures, - // but the X-Registry-Supports-Signatures API extension does not support that yet. - - existingSignatures, err := d.c.getExtensionsSignatures(context.TODO(), d.ref, d.manifestDigest) - if err != nil { - return err - } - existingSigNames := map[string]struct{}{} - for _, sig := range existingSignatures.Signatures { - existingSigNames[sig.Name] = struct{}{} - } - -sigExists: - for _, newSig := range signatures { - for _, existingSig := range existingSignatures.Signatures { - if existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) { - continue sigExists - } - } - - // The API expect us to invent a new unique name. This is racy, but hopefully good enough. - var signatureName string - for { - randBytes := make([]byte, 16) - n, err := rand.Read(randBytes) - if err != nil || n != 16 { - return errors.Wrapf(err, "Error generating random signature len %d", n) - } - signatureName = fmt.Sprintf("%s@%032x", d.manifestDigest.String(), randBytes) - if _, ok := existingSigNames[signatureName]; !ok { - break - } - } - sig := extensionSignature{ - Version: extensionSignatureSchemaVersion, - Name: signatureName, - Type: extensionSignatureTypeAtomic, - Content: newSig, - } - body, err := json.Marshal(sig) - if err != nil { - return err - } - - path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), d.manifestDigest.String()) - res, err := d.c.makeRequest(context.TODO(), "PUT", path, nil, bytes.NewReader(body)) - if err != nil { - return err - } - defer res.Body.Close() - if res.StatusCode != http.StatusCreated { - body, err := ioutil.ReadAll(res.Body) - if err == nil { - logrus.Debugf("Error body %s", string(body)) - } - logrus.Debugf("Error uploading signature, status %d, %#v", res.StatusCode, res) - return errors.Wrapf(client.HandleErrorResponse(res), "Error uploading signature to %s", path) - } - } - - return nil -} - -// Commit marks the process of storing the image as successful and asks for the image to be persisted. -// WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before Commit() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *dockerImageDestination) Commit() error { - return nil -} diff --git a/vendor/github.com/containers/image/docker/docker_image_src.go b/vendor/github.com/containers/image/docker/docker_image_src.go deleted file mode 100644 index 63bfe8aa43..0000000000 --- a/vendor/github.com/containers/image/docker/docker_image_src.go +++ /dev/null @@ -1,386 +0,0 @@ -package docker - -import ( - "context" - "fmt" - "io" - "io/ioutil" - "mime" - "net/http" - "net/url" - "os" - "strconv" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/docker/distribution/registry/client" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -type dockerImageSource struct { - ref dockerReference - c *dockerClient - // State - cachedManifest []byte // nil if not loaded yet - cachedManifestMIMEType string // Only valid if cachedManifest != nil -} - -// newImageSource creates a new ImageSource for the specified image reference. -// The caller must call .Close() on the returned ImageSource. -func newImageSource(ctx *types.SystemContext, ref dockerReference) (*dockerImageSource, error) { - c, err := newDockerClientFromRef(ctx, ref, false, "pull") - if err != nil { - return nil, err - } - return &dockerImageSource{ - ref: ref, - c: c, - }, nil -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (s *dockerImageSource) Reference() types.ImageReference { - return s.ref -} - -// Close removes resources associated with an initialized ImageSource, if any. -func (s *dockerImageSource) Close() error { - return nil -} - -// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. -func (s *dockerImageSource) LayerInfosForCopy() []types.BlobInfo { - return nil -} - -// simplifyContentType drops parameters from a HTTP media type (see https://tools.ietf.org/html/rfc7231#section-3.1.1.1) -// Alternatively, an empty string is returned unchanged, and invalid values are "simplified" to an empty string. -func simplifyContentType(contentType string) string { - if contentType == "" { - return contentType - } - mimeType, _, err := mime.ParseMediaType(contentType) - if err != nil { - return "" - } - return mimeType -} - -// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). -// It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); -// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). -func (s *dockerImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) { - if instanceDigest != nil { - return s.fetchManifest(context.TODO(), instanceDigest.String()) - } - err := s.ensureManifestIsLoaded(context.TODO()) - if err != nil { - return nil, "", err - } - return s.cachedManifest, s.cachedManifestMIMEType, nil -} - -func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest string) ([]byte, string, error) { - path := fmt.Sprintf(manifestPath, reference.Path(s.ref.ref), tagOrDigest) - headers := make(map[string][]string) - headers["Accept"] = manifest.DefaultRequestedManifestMIMETypes - res, err := s.c.makeRequest(ctx, "GET", path, headers, nil) - if err != nil { - return nil, "", err - } - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - return nil, "", client.HandleErrorResponse(res) - } - manblob, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, "", err - } - return manblob, simplifyContentType(res.Header.Get("Content-Type")), nil -} - -// ensureManifestIsLoaded sets s.cachedManifest and s.cachedManifestMIMEType -// -// ImageSource implementations are not required or expected to do any caching, -// but because our signatures are “attached” to the manifest digest, -// we need to ensure that the digest of the manifest returned by GetManifest(nil) -// and used by GetSignatures(ctx, nil) are consistent, otherwise we would get spurious -// signature verification failures when pulling while a tag is being updated. -func (s *dockerImageSource) ensureManifestIsLoaded(ctx context.Context) error { - if s.cachedManifest != nil { - return nil - } - - reference, err := s.ref.tagOrDigest() - if err != nil { - return err - } - - manblob, mt, err := s.fetchManifest(ctx, reference) - if err != nil { - return err - } - // We might validate manblob against the Docker-Content-Digest header here to protect against transport errors. - s.cachedManifest = manblob - s.cachedManifestMIMEType = mt - return nil -} - -func (s *dockerImageSource) getExternalBlob(urls []string) (io.ReadCloser, int64, error) { - var ( - resp *http.Response - err error - ) - for _, url := range urls { - resp, err = s.c.makeRequestToResolvedURL(context.TODO(), "GET", url, nil, nil, -1, false) - if err == nil { - if resp.StatusCode != http.StatusOK { - err = errors.Errorf("error fetching external blob from %q: %d", url, resp.StatusCode) - logrus.Debug(err) - continue - } - break - } - } - if resp.Body != nil && err == nil { - return resp.Body, getBlobSize(resp), nil - } - return nil, 0, err -} - -func getBlobSize(resp *http.Response) int64 { - size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) - if err != nil { - size = -1 - } - return size -} - -// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -func (s *dockerImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) { - if len(info.URLs) != 0 { - return s.getExternalBlob(info.URLs) - } - - path := fmt.Sprintf(blobsPath, reference.Path(s.ref.ref), info.Digest.String()) - logrus.Debugf("Downloading %s", path) - res, err := s.c.makeRequest(context.TODO(), "GET", path, nil, nil) - if err != nil { - return nil, 0, err - } - if res.StatusCode != http.StatusOK { - // print url also - return nil, 0, errors.Errorf("Invalid status code returned when fetching blob %d", res.StatusCode) - } - return res.Body, getBlobSize(res), nil -} - -// GetSignatures returns the image's signatures. It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -func (s *dockerImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - if err := s.c.detectProperties(ctx); err != nil { - return nil, err - } - switch { - case s.c.signatureBase != nil: - return s.getSignaturesFromLookaside(ctx, instanceDigest) - case s.c.supportsSignatures: - return s.getSignaturesFromAPIExtension(ctx, instanceDigest) - default: - return [][]byte{}, nil - } -} - -// manifestDigest returns a digest of the manifest, from instanceDigest if non-nil; or from the supplied reference, -// or finally, from a fetched manifest. -func (s *dockerImageSource) manifestDigest(ctx context.Context, instanceDigest *digest.Digest) (digest.Digest, error) { - if instanceDigest != nil { - return *instanceDigest, nil - } - if digested, ok := s.ref.ref.(reference.Digested); ok { - d := digested.Digest() - if d.Algorithm() == digest.Canonical { - return d, nil - } - } - if err := s.ensureManifestIsLoaded(ctx); err != nil { - return "", err - } - return manifest.Digest(s.cachedManifest) -} - -// getSignaturesFromLookaside implements GetSignatures() from the lookaside location configured in s.c.signatureBase, -// which is not nil. -func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - manifestDigest, err := s.manifestDigest(ctx, instanceDigest) - if err != nil { - return nil, err - } - - // NOTE: Keep this in sync with docs/signature-protocols.md! - signatures := [][]byte{} - for i := 0; ; i++ { - url := signatureStorageURL(s.c.signatureBase, manifestDigest, i) - if url == nil { - return nil, errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") - } - signature, missing, err := s.getOneSignature(ctx, url) - if err != nil { - return nil, err - } - if missing { - break - } - signatures = append(signatures, signature) - } - return signatures, nil -} - -// getOneSignature downloads one signature from url. -// If it successfully determines that the signature does not exist, returns with missing set to true and error set to nil. -// NOTE: Keep this in sync with docs/signature-protocols.md! -func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (signature []byte, missing bool, err error) { - switch url.Scheme { - case "file": - logrus.Debugf("Reading %s", url.Path) - sig, err := ioutil.ReadFile(url.Path) - if err != nil { - if os.IsNotExist(err) { - return nil, true, nil - } - return nil, false, err - } - return sig, false, nil - - case "http", "https": - logrus.Debugf("GET %s", url) - req, err := http.NewRequest("GET", url.String(), nil) - if err != nil { - return nil, false, err - } - req = req.WithContext(ctx) - res, err := s.c.client.Do(req) - if err != nil { - return nil, false, err - } - defer res.Body.Close() - if res.StatusCode == http.StatusNotFound { - return nil, true, nil - } else if res.StatusCode != http.StatusOK { - return nil, false, errors.Errorf("Error reading signature from %s: status %d", url.String(), res.StatusCode) - } - sig, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, false, err - } - return sig, false, nil - - default: - return nil, false, errors.Errorf("Unsupported scheme when reading signature from %s", url.String()) - } -} - -// getSignaturesFromAPIExtension implements GetSignatures() using the X-Registry-Supports-Signatures API extension. -func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - manifestDigest, err := s.manifestDigest(ctx, instanceDigest) - if err != nil { - return nil, err - } - - parsedBody, err := s.c.getExtensionsSignatures(ctx, s.ref, manifestDigest) - if err != nil { - return nil, err - } - - var sigs [][]byte - for _, sig := range parsedBody.Signatures { - if sig.Version == extensionSignatureSchemaVersion && sig.Type == extensionSignatureTypeAtomic { - sigs = append(sigs, sig.Content) - } - } - return sigs, nil -} - -// deleteImage deletes the named image from the registry, if supported. -func deleteImage(ctx *types.SystemContext, ref dockerReference) error { - c, err := newDockerClientFromRef(ctx, ref, true, "push") - if err != nil { - return err - } - - // When retrieving the digest from a registry >= 2.3 use the following header: - // "Accept": "application/vnd.docker.distribution.manifest.v2+json" - headers := make(map[string][]string) - headers["Accept"] = []string{manifest.DockerV2Schema2MediaType} - - refTail, err := ref.tagOrDigest() - if err != nil { - return err - } - getPath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), refTail) - get, err := c.makeRequest(context.TODO(), "GET", getPath, headers, nil) - if err != nil { - return err - } - defer get.Body.Close() - manifestBody, err := ioutil.ReadAll(get.Body) - if err != nil { - return err - } - switch get.StatusCode { - case http.StatusOK: - case http.StatusNotFound: - return errors.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry", ref.ref) - default: - return errors.Errorf("Failed to delete %v: %s (%v)", ref.ref, manifestBody, get.Status) - } - - digest := get.Header.Get("Docker-Content-Digest") - deletePath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), digest) - - // When retrieving the digest from a registry >= 2.3 use the following header: - // "Accept": "application/vnd.docker.distribution.manifest.v2+json" - delete, err := c.makeRequest(context.TODO(), "DELETE", deletePath, headers, nil) - if err != nil { - return err - } - defer delete.Body.Close() - - body, err := ioutil.ReadAll(delete.Body) - if err != nil { - return err - } - if delete.StatusCode != http.StatusAccepted { - return errors.Errorf("Failed to delete %v: %s (%v)", deletePath, string(body), delete.Status) - } - - if c.signatureBase != nil { - manifestDigest, err := manifest.Digest(manifestBody) - if err != nil { - return err - } - - for i := 0; ; i++ { - url := signatureStorageURL(c.signatureBase, manifestDigest, i) - if url == nil { - return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil") - } - missing, err := c.deleteOneSignature(url) - if err != nil { - return err - } - if missing { - break - } - } - } - - return nil -} diff --git a/vendor/github.com/containers/image/docker/docker_transport.go b/vendor/github.com/containers/image/docker/docker_transport.go deleted file mode 100644 index cc0aa298ae..0000000000 --- a/vendor/github.com/containers/image/docker/docker_transport.go +++ /dev/null @@ -1,161 +0,0 @@ -package docker - -import ( - "fmt" - "strings" - - "github.com/containers/image/docker/policyconfiguration" - "github.com/containers/image/docker/reference" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/pkg/errors" -) - -func init() { - transports.Register(Transport) -} - -// Transport is an ImageTransport for Docker registry-hosted images. -var Transport = dockerTransport{} - -type dockerTransport struct{} - -func (t dockerTransport) Name() string { - return "docker" -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. -func (t dockerTransport) ParseReference(reference string) (types.ImageReference, error) { - return ParseReference(reference) -} - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). -// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. -// scope passed to this function will not be "", that value is always allowed. -func (t dockerTransport) ValidatePolicyConfigurationScope(scope string) error { - // FIXME? We could be verifying the various character set and length restrictions - // from docker/distribution/reference.regexp.go, but other than that there - // are few semantically invalid strings. - return nil -} - -// dockerReference is an ImageReference for Docker images. -type dockerReference struct { - ref reference.Named // By construction we know that !reference.IsNameOnly(ref) -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference. -func ParseReference(refString string) (types.ImageReference, error) { - if !strings.HasPrefix(refString, "//") { - return nil, errors.Errorf("docker: image reference %s does not start with //", refString) - } - ref, err := reference.ParseNormalizedNamed(strings.TrimPrefix(refString, "//")) - if err != nil { - return nil, err - } - ref = reference.TagNameOnly(ref) - return NewReference(ref) -} - -// NewReference returns a Docker reference for a named reference. The reference must satisfy !reference.IsNameOnly(). -func NewReference(ref reference.Named) (types.ImageReference, error) { - if reference.IsNameOnly(ref) { - return nil, errors.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref)) - } - // A github.com/distribution/reference value can have a tag and a digest at the same time! - // The docker/distribution API does not really support that (we can’t ask for an image with a specific - // tag and digest), so fail. This MAY be accepted in the future. - // (Even if it were supported, the semantics of policy namespaces are unclear - should we drop - // the tag or the digest first?) - _, isTagged := ref.(reference.NamedTagged) - _, isDigested := ref.(reference.Canonical) - if isTagged && isDigested { - return nil, errors.Errorf("Docker references with both a tag and digest are currently not supported") - } - return dockerReference{ - ref: ref, - }, nil -} - -func (ref dockerReference) Transport() types.ImageTransport { - return Transport -} - -// StringWithinTransport returns a string representation of the reference, which MUST be such that -// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. -// NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. -// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. -func (ref dockerReference) StringWithinTransport() string { - return "//" + reference.FamiliarString(ref.ref) -} - -// DockerReference returns a Docker reference associated with this reference -// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, -// not e.g. after redirect or alias processing), or nil if unknown/not applicable. -func (ref dockerReference) DockerReference() reference.Named { - return ref.ref -} - -// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. -// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; -// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical -// (i.e. various references with exactly the same semantics should return the same configuration identity) -// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but -// not required/guaranteed that it will be a valid input to Transport().ParseReference(). -// Returns "" if configuration identities for these references are not supported. -func (ref dockerReference) PolicyConfigurationIdentity() string { - res, err := policyconfiguration.DockerReferenceIdentity(ref.ref) - if res == "" || err != nil { // Coverage: Should never happen, NewReference above should refuse values which could cause a failure. - panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) - } - return res -} - -// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search -// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed -// in order, terminating on first match, and an implicit "" is always checked at the end. -// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), -// and each following element to be a prefix of the element preceding it. -func (ref dockerReference) PolicyConfigurationNamespaces() []string { - return policyconfiguration.DockerReferenceNamespaces(ref.ref) -} - -// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned ImageCloser. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. -func (ref dockerReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { - return newImage(ctx, ref) -} - -// NewImageSource returns a types.ImageSource for this reference. -// The caller must call .Close() on the returned ImageSource. -func (ref dockerReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) { - return newImageSource(ctx, ref) -} - -// NewImageDestination returns a types.ImageDestination for this reference. -// The caller must call .Close() on the returned ImageDestination. -func (ref dockerReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(ctx, ref) -} - -// DeleteImage deletes the named image from the registry, if supported. -func (ref dockerReference) DeleteImage(ctx *types.SystemContext) error { - return deleteImage(ctx, ref) -} - -// tagOrDigest returns a tag or digest from the reference. -func (ref dockerReference) tagOrDigest() (string, error) { - if ref, ok := ref.ref.(reference.Canonical); ok { - return ref.Digest().String(), nil - } - if ref, ok := ref.ref.(reference.NamedTagged); ok { - return ref.Tag(), nil - } - // This should not happen, NewReference above refuses reference.IsNameOnly values. - return "", errors.Errorf("Internal inconsistency: Reference %s unexpectedly has neither a digest nor a tag", reference.FamiliarString(ref.ref)) -} diff --git a/vendor/github.com/containers/image/docker/lookaside.go b/vendor/github.com/containers/image/docker/lookaside.go deleted file mode 100644 index 18e7733b93..0000000000 --- a/vendor/github.com/containers/image/docker/lookaside.go +++ /dev/null @@ -1,202 +0,0 @@ -package docker - -import ( - "fmt" - "io/ioutil" - "net/url" - "os" - "path" - "path/filepath" - "strings" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/types" - "github.com/ghodss/yaml" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// systemRegistriesDirPath is the path to registries.d, used for locating lookaside Docker signature storage. -// You can override this at build time with -// -ldflags '-X github.com/containers/image/docker.systemRegistriesDirPath=$your_path' -var systemRegistriesDirPath = builtinRegistriesDirPath - -// builtinRegistriesDirPath is the path to registries.d. -// DO NOT change this, instead see systemRegistriesDirPath above. -const builtinRegistriesDirPath = "/etc/containers/registries.d" - -// registryConfiguration is one of the files in registriesDirPath configuring lookaside locations, or the result of merging them all. -// NOTE: Keep this in sync with docs/registries.d.md! -type registryConfiguration struct { - DefaultDocker *registryNamespace `json:"default-docker"` - // The key is a namespace, using fully-expanded Docker reference format or parent namespaces (per dockerReference.PolicyConfiguration*), - Docker map[string]registryNamespace `json:"docker"` -} - -// registryNamespace defines lookaside locations for a single namespace. -type registryNamespace struct { - SigStore string `json:"sigstore"` // For reading, and if SigStoreStaging is not present, for writing. - SigStoreStaging string `json:"sigstore-staging"` // For writing only. -} - -// signatureStorageBase is an "opaque" type representing a lookaside Docker signature storage. -// Users outside of this file should use configuredSignatureStorageBase and signatureStorageURL below. -type signatureStorageBase *url.URL // The only documented value is nil, meaning storage is not supported. - -// configuredSignatureStorageBase reads configuration to find an appropriate signature storage URL for ref, for write access if “write”. -func configuredSignatureStorageBase(ctx *types.SystemContext, ref dockerReference, write bool) (signatureStorageBase, error) { - // FIXME? Loading and parsing the config could be cached across calls. - dirPath := registriesDirPath(ctx) - logrus.Debugf(`Using registries.d directory %s for sigstore configuration`, dirPath) - config, err := loadAndMergeConfig(dirPath) - if err != nil { - return nil, err - } - - topLevel := config.signatureTopLevel(ref, write) - if topLevel == "" { - return nil, nil - } - - url, err := url.Parse(topLevel) - if err != nil { - return nil, errors.Wrapf(err, "Invalid signature storage URL %s", topLevel) - } - // NOTE: Keep this in sync with docs/signature-protocols.md! - // FIXME? Restrict to explicitly supported schemes? - repo := reference.Path(ref.ref) // Note that this is without a tag or digest. - if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references - return nil, errors.Errorf("Unexpected path elements in Docker reference %s for signature storage", ref.ref.String()) - } - url.Path = url.Path + "/" + repo - return url, nil -} - -// registriesDirPath returns a path to registries.d -func registriesDirPath(ctx *types.SystemContext) string { - if ctx != nil { - if ctx.RegistriesDirPath != "" { - return ctx.RegistriesDirPath - } - if ctx.RootForImplicitAbsolutePaths != "" { - return filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesDirPath) - } - } - return systemRegistriesDirPath -} - -// loadAndMergeConfig loads configuration files in dirPath -func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) { - mergedConfig := registryConfiguration{Docker: map[string]registryNamespace{}} - dockerDefaultMergedFrom := "" - nsMergedFrom := map[string]string{} - - dir, err := os.Open(dirPath) - if err != nil { - if os.IsNotExist(err) { - return &mergedConfig, nil - } - return nil, err - } - configNames, err := dir.Readdirnames(0) - if err != nil { - return nil, err - } - for _, configName := range configNames { - if !strings.HasSuffix(configName, ".yaml") { - continue - } - configPath := filepath.Join(dirPath, configName) - configBytes, err := ioutil.ReadFile(configPath) - if err != nil { - return nil, err - } - - var config registryConfiguration - err = yaml.Unmarshal(configBytes, &config) - if err != nil { - return nil, errors.Wrapf(err, "Error parsing %s", configPath) - } - - if config.DefaultDocker != nil { - if mergedConfig.DefaultDocker != nil { - return nil, errors.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`, - dockerDefaultMergedFrom, configPath) - } - mergedConfig.DefaultDocker = config.DefaultDocker - dockerDefaultMergedFrom = configPath - } - - for nsName, nsConfig := range config.Docker { // includes config.Docker == nil - if _, ok := mergedConfig.Docker[nsName]; ok { - return nil, errors.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`, - nsName, nsMergedFrom[nsName], configPath) - } - mergedConfig.Docker[nsName] = nsConfig - nsMergedFrom[nsName] = configPath - } - } - - return &mergedConfig, nil -} - -// config.signatureTopLevel returns an URL string configured in config for ref, for write access if “write”. -// (the top level of the storage, namespaced by repo.FullName etc.), or "" if no signature storage should be used. -func (config *registryConfiguration) signatureTopLevel(ref dockerReference, write bool) string { - if config.Docker != nil { - // Look for a full match. - identity := ref.PolicyConfigurationIdentity() - if ns, ok := config.Docker[identity]; ok { - logrus.Debugf(` Using "docker" namespace %s`, identity) - if url := ns.signatureTopLevel(write); url != "" { - return url - } - } - - // Look for a match of the possible parent namespaces. - for _, name := range ref.PolicyConfigurationNamespaces() { - if ns, ok := config.Docker[name]; ok { - logrus.Debugf(` Using "docker" namespace %s`, name) - if url := ns.signatureTopLevel(write); url != "" { - return url - } - } - } - } - // Look for a default location - if config.DefaultDocker != nil { - logrus.Debugf(` Using "default-docker" configuration`) - if url := config.DefaultDocker.signatureTopLevel(write); url != "" { - return url - } - } - logrus.Debugf(" No signature storage configuration found for %s", ref.PolicyConfigurationIdentity()) - return "" -} - -// ns.signatureTopLevel returns an URL string configured in ns for ref, for write access if “write”. -// or "" if nothing has been configured. -func (ns registryNamespace) signatureTopLevel(write bool) string { - if write && ns.SigStoreStaging != "" { - logrus.Debugf(` Using %s`, ns.SigStoreStaging) - return ns.SigStoreStaging - } - if ns.SigStore != "" { - logrus.Debugf(` Using %s`, ns.SigStore) - return ns.SigStore - } - return "" -} - -// signatureStorageURL returns an URL usable for acessing signature index in base with known manifestDigest, or nil if not applicable. -// Returns nil iff base == nil. -// NOTE: Keep this in sync with docs/signature-protocols.md! -func signatureStorageURL(base signatureStorageBase, manifestDigest digest.Digest, index int) *url.URL { - if base == nil { - return nil - } - url := *base - url.Path = fmt.Sprintf("%s@%s=%s/signature-%d", url.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1) - return &url -} diff --git a/vendor/github.com/containers/image/docker/policyconfiguration/naming.go b/vendor/github.com/containers/image/docker/policyconfiguration/naming.go deleted file mode 100644 index 31bbb544c6..0000000000 --- a/vendor/github.com/containers/image/docker/policyconfiguration/naming.go +++ /dev/null @@ -1,56 +0,0 @@ -package policyconfiguration - -import ( - "strings" - - "github.com/containers/image/docker/reference" - "github.com/pkg/errors" -) - -// DockerReferenceIdentity returns a string representation of the reference, suitable for policy lookup, -// as a backend for ImageReference.PolicyConfigurationIdentity. -// The reference must satisfy !reference.IsNameOnly(). -func DockerReferenceIdentity(ref reference.Named) (string, error) { - res := ref.Name() - tagged, isTagged := ref.(reference.NamedTagged) - digested, isDigested := ref.(reference.Canonical) - switch { - case isTagged && isDigested: // Note that this CAN actually happen. - return "", errors.Errorf("Unexpected Docker reference %s with both a name and a digest", reference.FamiliarString(ref)) - case !isTagged && !isDigested: // This should not happen, the caller is expected to ensure !reference.IsNameOnly() - return "", errors.Errorf("Internal inconsistency: Docker reference %s with neither a tag nor a digest", reference.FamiliarString(ref)) - case isTagged: - res = res + ":" + tagged.Tag() - case isDigested: - res = res + "@" + digested.Digest().String() - default: // Coverage: The above was supposed to be exhaustive. - return "", errors.New("Internal inconsistency, unexpected default branch") - } - return res, nil -} - -// DockerReferenceNamespaces returns a list of other policy configuration namespaces to search, -// as a backend for ImageReference.PolicyConfigurationIdentity. -// The reference must satisfy !reference.IsNameOnly(). -func DockerReferenceNamespaces(ref reference.Named) []string { - // Look for a match of the repository, and then of the possible parent - // namespaces. Note that this only happens on the expanded host names - // and repository names, i.e. "busybox" is looked up as "docker.io/library/busybox", - // then in its parent "docker.io/library"; in none of "busybox", - // un-namespaced "library" nor in "" supposedly implicitly representing "library/". - // - // ref.FullName() == ref.Hostname() + "/" + ref.RemoteName(), so the last - // iteration matches the host name (for any namespace). - res := []string{} - name := ref.Name() - for { - res = append(res, name) - - lastSlash := strings.LastIndex(name, "/") - if lastSlash == -1 { - break - } - name = name[:lastSlash] - } - return res -} diff --git a/vendor/github.com/containers/image/docker/reference/helpers.go b/vendor/github.com/containers/image/docker/reference/helpers.go deleted file mode 100644 index 978df7eabb..0000000000 --- a/vendor/github.com/containers/image/docker/reference/helpers.go +++ /dev/null @@ -1,42 +0,0 @@ -package reference - -import "path" - -// IsNameOnly returns true if reference only contains a repo name. -func IsNameOnly(ref Named) bool { - if _, ok := ref.(NamedTagged); ok { - return false - } - if _, ok := ref.(Canonical); ok { - return false - } - return true -} - -// FamiliarName returns the familiar name string -// for the given named, familiarizing if needed. -func FamiliarName(ref Named) string { - if nn, ok := ref.(normalizedNamed); ok { - return nn.Familiar().Name() - } - return ref.Name() -} - -// FamiliarString returns the familiar string representation -// for the given reference, familiarizing if needed. -func FamiliarString(ref Reference) string { - if nn, ok := ref.(normalizedNamed); ok { - return nn.Familiar().String() - } - return ref.String() -} - -// FamiliarMatch reports whether ref matches the specified pattern. -// See https://godoc.org/path#Match for supported patterns. -func FamiliarMatch(pattern string, ref Reference) (bool, error) { - matched, err := path.Match(pattern, FamiliarString(ref)) - if namedRef, isNamed := ref.(Named); isNamed && !matched { - matched, _ = path.Match(pattern, FamiliarName(namedRef)) - } - return matched, err -} diff --git a/vendor/github.com/containers/image/docker/reference/normalize.go b/vendor/github.com/containers/image/docker/reference/normalize.go deleted file mode 100644 index fcc436a395..0000000000 --- a/vendor/github.com/containers/image/docker/reference/normalize.go +++ /dev/null @@ -1,152 +0,0 @@ -package reference - -import ( - "errors" - "fmt" - "strings" - - "github.com/opencontainers/go-digest" -) - -var ( - legacyDefaultDomain = "index.docker.io" - defaultDomain = "docker.io" - officialRepoName = "library" - defaultTag = "latest" -) - -// normalizedNamed represents a name which has been -// normalized and has a familiar form. A familiar name -// is what is used in Docker UI. An example normalized -// name is "docker.io/library/ubuntu" and corresponding -// familiar name of "ubuntu". -type normalizedNamed interface { - Named - Familiar() Named -} - -// ParseNormalizedNamed parses a string into a named reference -// transforming a familiar name from Docker UI to a fully -// qualified reference. If the value may be an identifier -// use ParseAnyReference. -func ParseNormalizedNamed(s string) (Named, error) { - if ok := anchoredIdentifierRegexp.MatchString(s); ok { - return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) - } - domain, remainder := splitDockerDomain(s) - var remoteName string - if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { - remoteName = remainder[:tagSep] - } else { - remoteName = remainder - } - if strings.ToLower(remoteName) != remoteName { - return nil, errors.New("invalid reference format: repository name must be lowercase") - } - - ref, err := Parse(domain + "/" + remainder) - if err != nil { - return nil, err - } - named, isNamed := ref.(Named) - if !isNamed { - return nil, fmt.Errorf("reference %s has no name", ref.String()) - } - return named, nil -} - -// splitDockerDomain splits a repository name to domain and remotename string. -// If no valid domain is found, the default domain is used. Repository name -// needs to be already validated before. -func splitDockerDomain(name string) (domain, remainder string) { - i := strings.IndexRune(name, '/') - if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { - domain, remainder = defaultDomain, name - } else { - domain, remainder = name[:i], name[i+1:] - } - if domain == legacyDefaultDomain { - domain = defaultDomain - } - if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { - remainder = officialRepoName + "/" + remainder - } - return -} - -// familiarizeName returns a shortened version of the name familiar -// to to the Docker UI. Familiar names have the default domain -// "docker.io" and "library/" repository prefix removed. -// For example, "docker.io/library/redis" will have the familiar -// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". -// Returns a familiarized named only reference. -func familiarizeName(named namedRepository) repository { - repo := repository{ - domain: named.Domain(), - path: named.Path(), - } - - if repo.domain == defaultDomain { - repo.domain = "" - // Handle official repositories which have the pattern "library/" - if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { - repo.path = split[1] - } - } - return repo -} - -func (r reference) Familiar() Named { - return reference{ - namedRepository: familiarizeName(r.namedRepository), - tag: r.tag, - digest: r.digest, - } -} - -func (r repository) Familiar() Named { - return familiarizeName(r) -} - -func (t taggedReference) Familiar() Named { - return taggedReference{ - namedRepository: familiarizeName(t.namedRepository), - tag: t.tag, - } -} - -func (c canonicalReference) Familiar() Named { - return canonicalReference{ - namedRepository: familiarizeName(c.namedRepository), - digest: c.digest, - } -} - -// TagNameOnly adds the default tag "latest" to a reference if it only has -// a repo name. -func TagNameOnly(ref Named) Named { - if IsNameOnly(ref) { - namedTagged, err := WithTag(ref, defaultTag) - if err != nil { - // Default tag must be valid, to create a NamedTagged - // type with non-validated input the WithTag function - // should be used instead - panic(err) - } - return namedTagged - } - return ref -} - -// ParseAnyReference parses a reference string as a possible identifier, -// full digest, or familiar name. -func ParseAnyReference(ref string) (Reference, error) { - if ok := anchoredIdentifierRegexp.MatchString(ref); ok { - return digestReference("sha256:" + ref), nil - } - if dgst, err := digest.Parse(ref); err == nil { - return digestReference(dgst), nil - } - - return ParseNormalizedNamed(ref) -} diff --git a/vendor/github.com/containers/image/docker/reference/reference.go b/vendor/github.com/containers/image/docker/reference/reference.go deleted file mode 100644 index fd3510e9ee..0000000000 --- a/vendor/github.com/containers/image/docker/reference/reference.go +++ /dev/null @@ -1,433 +0,0 @@ -// Package reference provides a general type to represent any way of referencing images within the registry. -// Its main purpose is to abstract tags and digests (content-addressable hash). -// -// Grammar -// -// reference := name [ ":" tag ] [ "@" digest ] -// name := [domain '/'] path-component ['/' path-component]* -// domain := domain-component ['.' domain-component]* [':' port-number] -// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ -// port-number := /[0-9]+/ -// path-component := alpha-numeric [separator alpha-numeric]* -// alpha-numeric := /[a-z0-9]+/ -// separator := /[_.]|__|[-]*/ -// -// tag := /[\w][\w.-]{0,127}/ -// -// digest := digest-algorithm ":" digest-hex -// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ] -// digest-algorithm-separator := /[+.-_]/ -// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ -// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value -// -// identifier := /[a-f0-9]{64}/ -// short-identifier := /[a-f0-9]{6,64}/ -package reference - -import ( - "errors" - "fmt" - "strings" - - "github.com/opencontainers/go-digest" -) - -const ( - // NameTotalLengthMax is the maximum total number of characters in a repository name. - NameTotalLengthMax = 255 -) - -var ( - // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. - ErrReferenceInvalidFormat = errors.New("invalid reference format") - - // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. - ErrTagInvalidFormat = errors.New("invalid tag format") - - // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. - ErrDigestInvalidFormat = errors.New("invalid digest format") - - // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. - ErrNameContainsUppercase = errors.New("repository name must be lowercase") - - // ErrNameEmpty is returned for empty, invalid repository names. - ErrNameEmpty = errors.New("repository name must have at least one component") - - // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. - ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) - - // ErrNameNotCanonical is returned when a name is not canonical. - ErrNameNotCanonical = errors.New("repository name must be canonical") -) - -// Reference is an opaque object reference identifier that may include -// modifiers such as a hostname, name, tag, and digest. -type Reference interface { - // String returns the full reference - String() string -} - -// Field provides a wrapper type for resolving correct reference types when -// working with encoding. -type Field struct { - reference Reference -} - -// AsField wraps a reference in a Field for encoding. -func AsField(reference Reference) Field { - return Field{reference} -} - -// Reference unwraps the reference type from the field to -// return the Reference object. This object should be -// of the appropriate type to further check for different -// reference types. -func (f Field) Reference() Reference { - return f.reference -} - -// MarshalText serializes the field to byte text which -// is the string of the reference. -func (f Field) MarshalText() (p []byte, err error) { - return []byte(f.reference.String()), nil -} - -// UnmarshalText parses text bytes by invoking the -// reference parser to ensure the appropriately -// typed reference object is wrapped by field. -func (f *Field) UnmarshalText(p []byte) error { - r, err := Parse(string(p)) - if err != nil { - return err - } - - f.reference = r - return nil -} - -// Named is an object with a full name -type Named interface { - Reference - Name() string -} - -// Tagged is an object which has a tag -type Tagged interface { - Reference - Tag() string -} - -// NamedTagged is an object including a name and tag. -type NamedTagged interface { - Named - Tag() string -} - -// Digested is an object which has a digest -// in which it can be referenced by -type Digested interface { - Reference - Digest() digest.Digest -} - -// Canonical reference is an object with a fully unique -// name including a name with domain and digest -type Canonical interface { - Named - Digest() digest.Digest -} - -// namedRepository is a reference to a repository with a name. -// A namedRepository has both domain and path components. -type namedRepository interface { - Named - Domain() string - Path() string -} - -// Domain returns the domain part of the Named reference -func Domain(named Named) string { - if r, ok := named.(namedRepository); ok { - return r.Domain() - } - domain, _ := splitDomain(named.Name()) - return domain -} - -// Path returns the name without the domain part of the Named reference -func Path(named Named) (name string) { - if r, ok := named.(namedRepository); ok { - return r.Path() - } - _, path := splitDomain(named.Name()) - return path -} - -func splitDomain(name string) (string, string) { - match := anchoredNameRegexp.FindStringSubmatch(name) - if len(match) != 3 { - return "", name - } - return match[1], match[2] -} - -// SplitHostname splits a named reference into a -// hostname and name string. If no valid hostname is -// found, the hostname is empty and the full value -// is returned as name -// DEPRECATED: Use Domain or Path -func SplitHostname(named Named) (string, string) { - if r, ok := named.(namedRepository); ok { - return r.Domain(), r.Path() - } - return splitDomain(named.Name()) -} - -// Parse parses s and returns a syntactically valid Reference. -// If an error was encountered it is returned, along with a nil Reference. -// NOTE: Parse will not handle short digests. -func Parse(s string) (Reference, error) { - matches := ReferenceRegexp.FindStringSubmatch(s) - if matches == nil { - if s == "" { - return nil, ErrNameEmpty - } - if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { - return nil, ErrNameContainsUppercase - } - return nil, ErrReferenceInvalidFormat - } - - if len(matches[1]) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - - var repo repository - - nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) - if nameMatch != nil && len(nameMatch) == 3 { - repo.domain = nameMatch[1] - repo.path = nameMatch[2] - } else { - repo.domain = "" - repo.path = matches[1] - } - - ref := reference{ - namedRepository: repo, - tag: matches[2], - } - if matches[3] != "" { - var err error - ref.digest, err = digest.Parse(matches[3]) - if err != nil { - return nil, err - } - } - - r := getBestReferenceType(ref) - if r == nil { - return nil, ErrNameEmpty - } - - return r, nil -} - -// ParseNamed parses s and returns a syntactically valid reference implementing -// the Named interface. The reference must have a name and be in the canonical -// form, otherwise an error is returned. -// If an error was encountered it is returned, along with a nil Reference. -// NOTE: ParseNamed will not handle short digests. -func ParseNamed(s string) (Named, error) { - named, err := ParseNormalizedNamed(s) - if err != nil { - return nil, err - } - if named.String() != s { - return nil, ErrNameNotCanonical - } - return named, nil -} - -// WithName returns a named object representing the given string. If the input -// is invalid ErrReferenceInvalidFormat will be returned. -func WithName(name string) (Named, error) { - if len(name) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - - match := anchoredNameRegexp.FindStringSubmatch(name) - if match == nil || len(match) != 3 { - return nil, ErrReferenceInvalidFormat - } - return repository{ - domain: match[1], - path: match[2], - }, nil -} - -// WithTag combines the name from "name" and the tag from "tag" to form a -// reference incorporating both the name and the tag. -func WithTag(name Named, tag string) (NamedTagged, error) { - if !anchoredTagRegexp.MatchString(tag) { - return nil, ErrTagInvalidFormat - } - var repo repository - if r, ok := name.(namedRepository); ok { - repo.domain = r.Domain() - repo.path = r.Path() - } else { - repo.path = name.Name() - } - if canonical, ok := name.(Canonical); ok { - return reference{ - namedRepository: repo, - tag: tag, - digest: canonical.Digest(), - }, nil - } - return taggedReference{ - namedRepository: repo, - tag: tag, - }, nil -} - -// WithDigest combines the name from "name" and the digest from "digest" to form -// a reference incorporating both the name and the digest. -func WithDigest(name Named, digest digest.Digest) (Canonical, error) { - if !anchoredDigestRegexp.MatchString(digest.String()) { - return nil, ErrDigestInvalidFormat - } - var repo repository - if r, ok := name.(namedRepository); ok { - repo.domain = r.Domain() - repo.path = r.Path() - } else { - repo.path = name.Name() - } - if tagged, ok := name.(Tagged); ok { - return reference{ - namedRepository: repo, - tag: tagged.Tag(), - digest: digest, - }, nil - } - return canonicalReference{ - namedRepository: repo, - digest: digest, - }, nil -} - -// TrimNamed removes any tag or digest from the named reference. -func TrimNamed(ref Named) Named { - domain, path := SplitHostname(ref) - return repository{ - domain: domain, - path: path, - } -} - -func getBestReferenceType(ref reference) Reference { - if ref.Name() == "" { - // Allow digest only references - if ref.digest != "" { - return digestReference(ref.digest) - } - return nil - } - if ref.tag == "" { - if ref.digest != "" { - return canonicalReference{ - namedRepository: ref.namedRepository, - digest: ref.digest, - } - } - return ref.namedRepository - } - if ref.digest == "" { - return taggedReference{ - namedRepository: ref.namedRepository, - tag: ref.tag, - } - } - - return ref -} - -type reference struct { - namedRepository - tag string - digest digest.Digest -} - -func (r reference) String() string { - return r.Name() + ":" + r.tag + "@" + r.digest.String() -} - -func (r reference) Tag() string { - return r.tag -} - -func (r reference) Digest() digest.Digest { - return r.digest -} - -type repository struct { - domain string - path string -} - -func (r repository) String() string { - return r.Name() -} - -func (r repository) Name() string { - if r.domain == "" { - return r.path - } - return r.domain + "/" + r.path -} - -func (r repository) Domain() string { - return r.domain -} - -func (r repository) Path() string { - return r.path -} - -type digestReference digest.Digest - -func (d digestReference) String() string { - return digest.Digest(d).String() -} - -func (d digestReference) Digest() digest.Digest { - return digest.Digest(d) -} - -type taggedReference struct { - namedRepository - tag string -} - -func (t taggedReference) String() string { - return t.Name() + ":" + t.tag -} - -func (t taggedReference) Tag() string { - return t.tag -} - -type canonicalReference struct { - namedRepository - digest digest.Digest -} - -func (c canonicalReference) String() string { - return c.Name() + "@" + c.digest.String() -} - -func (c canonicalReference) Digest() digest.Digest { - return c.digest -} diff --git a/vendor/github.com/containers/image/docker/reference/regexp.go b/vendor/github.com/containers/image/docker/reference/regexp.go deleted file mode 100644 index 405e995db9..0000000000 --- a/vendor/github.com/containers/image/docker/reference/regexp.go +++ /dev/null @@ -1,143 +0,0 @@ -package reference - -import "regexp" - -var ( - // alphaNumericRegexp defines the alpha numeric atom, typically a - // component of names. This only allows lower case characters and digits. - alphaNumericRegexp = match(`[a-z0-9]+`) - - // separatorRegexp defines the separators allowed to be embedded in name - // components. This allow one period, one or two underscore and multiple - // dashes. - separatorRegexp = match(`(?:[._]|__|[-]*)`) - - // nameComponentRegexp restricts registry path component names to start - // with at least one letter or number, with following parts able to be - // separated by one period, one or two underscore and multiple dashes. - nameComponentRegexp = expression( - alphaNumericRegexp, - optional(repeated(separatorRegexp, alphaNumericRegexp))) - - // domainComponentRegexp restricts the registry domain component of a - // repository name to start with a component as defined by domainRegexp - // and followed by an optional port. - domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) - - // domainRegexp defines the structure of potential domain components - // that may be part of image names. This is purposely a subset of what is - // allowed by DNS to ensure backwards compatibility with Docker image - // names. - domainRegexp = expression( - domainComponentRegexp, - optional(repeated(literal(`.`), domainComponentRegexp)), - optional(literal(`:`), match(`[0-9]+`))) - - // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. - TagRegexp = match(`[\w][\w.-]{0,127}`) - - // anchoredTagRegexp matches valid tag names, anchored at the start and - // end of the matched string. - anchoredTagRegexp = anchored(TagRegexp) - - // DigestRegexp matches valid digests. - DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) - - // anchoredDigestRegexp matches valid digests, anchored at the start and - // end of the matched string. - anchoredDigestRegexp = anchored(DigestRegexp) - - // NameRegexp is the format for the name component of references. The - // regexp has capturing groups for the domain and name part omitting - // the separating forward slash from either. - NameRegexp = expression( - optional(domainRegexp, literal(`/`)), - nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp))) - - // anchoredNameRegexp is used to parse a name value, capturing the - // domain and trailing components. - anchoredNameRegexp = anchored( - optional(capture(domainRegexp), literal(`/`)), - capture(nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp)))) - - // ReferenceRegexp is the full supported format of a reference. The regexp - // is anchored and has capturing groups for name, tag, and digest - // components. - ReferenceRegexp = anchored(capture(NameRegexp), - optional(literal(":"), capture(TagRegexp)), - optional(literal("@"), capture(DigestRegexp))) - - // IdentifierRegexp is the format for string identifier used as a - // content addressable identifier using sha256. These identifiers - // are like digests without the algorithm, since sha256 is used. - IdentifierRegexp = match(`([a-f0-9]{64})`) - - // ShortIdentifierRegexp is the format used to represent a prefix - // of an identifier. A prefix may be used to match a sha256 identifier - // within a list of trusted identifiers. - ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) - - // anchoredIdentifierRegexp is used to check or match an - // identifier value, anchored at start and end of string. - anchoredIdentifierRegexp = anchored(IdentifierRegexp) - - // anchoredShortIdentifierRegexp is used to check if a value - // is a possible identifier prefix, anchored at start and end - // of string. - anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) -) - -// match compiles the string to a regular expression. -var match = regexp.MustCompile - -// literal compiles s into a literal regular expression, escaping any regexp -// reserved characters. -func literal(s string) *regexp.Regexp { - re := match(regexp.QuoteMeta(s)) - - if _, complete := re.LiteralPrefix(); !complete { - panic("must be a literal") - } - - return re -} - -// expression defines a full expression, where each regular expression must -// follow the previous. -func expression(res ...*regexp.Regexp) *regexp.Regexp { - var s string - for _, re := range res { - s += re.String() - } - - return match(s) -} - -// optional wraps the expression in a non-capturing group and makes the -// production optional. -func optional(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `?`) -} - -// repeated wraps the regexp in a non-capturing group to get one or more -// matches. -func repeated(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `+`) -} - -// group wraps the regexp in a non-capturing group. -func group(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(?:` + expression(res...).String() + `)`) -} - -// capture wraps the expression in a capturing group. -func capture(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(` + expression(res...).String() + `)`) -} - -// anchored anchors the regular expression by adding start and end delimiters. -func anchored(res ...*regexp.Regexp) *regexp.Regexp { - return match(`^` + expression(res...).String() + `$`) -} diff --git a/vendor/github.com/containers/image/docker/tarfile/dest.go b/vendor/github.com/containers/image/docker/tarfile/dest.go deleted file mode 100644 index eb11ca8668..0000000000 --- a/vendor/github.com/containers/image/docker/tarfile/dest.go +++ /dev/null @@ -1,257 +0,0 @@ -package tarfile - -import ( - "archive/tar" - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "time" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/internal/tmpdir" - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// Destination is a partial implementation of types.ImageDestination for writing to an io.Writer. -type Destination struct { - writer io.Writer - tar *tar.Writer - repoTag string - // Other state. - blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs -} - -// NewDestination returns a tarfile.Destination for the specified io.Writer. -func NewDestination(dest io.Writer, ref reference.NamedTagged) *Destination { - // For github.com/docker/docker consumers, this works just as well as - // refString := ref.String() - // because when reading the RepoTags strings, github.com/docker/docker/reference - // normalizes both of them to the same value. - // - // Doing it this way to include the normalized-out `docker.io[/library]` does make - // a difference for github.com/projectatomic/docker consumers, with the - // “Add --add-registry and --block-registry options to docker daemon” patch. - // These consumers treat reference strings which include a hostname and reference - // strings without a hostname differently. - // - // Using the host name here is more explicit about the intent, and it has the same - // effect as (docker pull) in projectatomic/docker, which tags the result using - // a hostname-qualified reference. - // See https://github.com/containers/image/issues/72 for a more detailed - // analysis and explanation. - refString := fmt.Sprintf("%s:%s", ref.Name(), ref.Tag()) - return &Destination{ - writer: dest, - tar: tar.NewWriter(dest), - repoTag: refString, - blobs: make(map[digest.Digest]types.BlobInfo), - } -} - -// SupportedManifestMIMETypes tells which manifest mime types the destination supports -// If an empty slice or nil it's returned, then any mime type can be tried to upload -func (d *Destination) SupportedManifestMIMETypes() []string { - return []string{ - manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities. - } -} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. -// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. -func (d *Destination) SupportsSignatures() error { - return errors.Errorf("Storing signatures for docker tar files is not supported") -} - -// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination. -func (d *Destination) ShouldCompressLayers() bool { - return false -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually -// uploaded to the image destination, true otherwise. -func (d *Destination) AcceptsForeignLayerURLs() bool { - return false -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. -func (d *Destination) MustMatchRuntimeOS() bool { - return false -} - -// PutBlob writes contents of stream and returns data representing the result (with all data filled in). -// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. -// inputInfo.Size is the expected length of stream, if known. -// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available -// to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *Destination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) { - if inputInfo.Digest.String() == "" { - return types.BlobInfo{}, errors.Errorf("Can not stream a blob with unknown digest to docker tarfile") - } - - ok, size, err := d.HasBlob(inputInfo) - if err != nil { - return types.BlobInfo{}, err - } - if ok { - return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil - } - - if inputInfo.Size == -1 { // Ouch, we need to stream the blob into a temporary file just to determine the size. - logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...") - streamCopy, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(), "docker-tarfile-blob") - if err != nil { - return types.BlobInfo{}, err - } - defer os.Remove(streamCopy.Name()) - defer streamCopy.Close() - - size, err := io.Copy(streamCopy, stream) - if err != nil { - return types.BlobInfo{}, err - } - _, err = streamCopy.Seek(0, os.SEEK_SET) - if err != nil { - return types.BlobInfo{}, err - } - inputInfo.Size = size // inputInfo is a struct, so we are only modifying our copy. - stream = streamCopy - logrus.Debugf("... streaming done") - } - - digester := digest.Canonical.Digester() - tee := io.TeeReader(stream, digester.Hash()) - if err := d.sendFile(inputInfo.Digest.String(), inputInfo.Size, tee); err != nil { - return types.BlobInfo{}, err - } - d.blobs[inputInfo.Digest] = types.BlobInfo{Digest: digester.Digest(), Size: inputInfo.Size} - return types.BlobInfo{Digest: digester.Digest(), Size: inputInfo.Size}, nil -} - -// HasBlob returns true iff the image destination already contains a blob with -// the matching digest which can be reapplied using ReapplyBlob. Unlike -// PutBlob, the digest can not be empty. If HasBlob returns true, the size of -// the blob must also be returned. If the destination does not contain the -// blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); it -// returns a non-nil error only on an unexpected failure. -func (d *Destination) HasBlob(info types.BlobInfo) (bool, int64, error) { - if info.Digest == "" { - return false, -1, errors.Errorf("Can not check for a blob with unknown digest") - } - if blob, ok := d.blobs[info.Digest]; ok { - return true, blob.Size, nil - } - return false, -1, nil -} - -// ReapplyBlob informs the image destination that a blob for which HasBlob -// previously returned true would have been passed to PutBlob if it had -// returned false. Like HasBlob and unlike PutBlob, the digest can not be -// empty. If the blob is a filesystem layer, this signifies that the changes -// it describes need to be applied again when composing a filesystem tree. -func (d *Destination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) { - return info, nil -} - -// PutManifest writes manifest to the destination. -// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. -// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), -// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. -func (d *Destination) PutManifest(m []byte) error { - // We do not bother with types.ManifestTypeRejectedError; our .SupportedManifestMIMETypes() above is already providing only one alternative, - // so the caller trying a different manifest kind would be pointless. - var man manifest.Schema2 - if err := json.Unmarshal(m, &man); err != nil { - return errors.Wrap(err, "Error parsing manifest") - } - if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType { - return errors.Errorf("Unsupported manifest type, need a Docker schema 2 manifest") - } - - layerPaths := []string{} - for _, l := range man.LayersDescriptors { - layerPaths = append(layerPaths, l.Digest.String()) - } - - items := []ManifestItem{{ - Config: man.ConfigDescriptor.Digest.String(), - RepoTags: []string{d.repoTag}, - Layers: layerPaths, - Parent: "", - LayerSources: nil, - }} - itemsBytes, err := json.Marshal(&items) - if err != nil { - return err - } - - // FIXME? Do we also need to support the legacy format? - return d.sendFile(manifestFileName, int64(len(itemsBytes)), bytes.NewReader(itemsBytes)) -} - -type tarFI struct { - path string - size int64 -} - -func (t *tarFI) Name() string { - return t.path -} -func (t *tarFI) Size() int64 { - return t.size -} -func (t *tarFI) Mode() os.FileMode { - return 0444 -} -func (t *tarFI) ModTime() time.Time { - return time.Unix(0, 0) -} -func (t *tarFI) IsDir() bool { - return false -} -func (t *tarFI) Sys() interface{} { - return nil -} - -// sendFile sends a file into the tar stream. -func (d *Destination) sendFile(path string, expectedSize int64, stream io.Reader) error { - hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: expectedSize}, "") - if err != nil { - return nil - } - logrus.Debugf("Sending as tar file %s", path) - if err := d.tar.WriteHeader(hdr); err != nil { - return err - } - size, err := io.Copy(d.tar, stream) - if err != nil { - return err - } - if size != expectedSize { - return errors.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size) - } - return nil -} - -// PutSignatures adds the given signatures to the docker tarfile (currently not -// supported). MUST be called after PutManifest (signatures reference manifest -// contents) -func (d *Destination) PutSignatures(signatures [][]byte) error { - if len(signatures) != 0 { - return errors.Errorf("Storing signatures for docker tar files is not supported") - } - return nil -} - -// Commit finishes writing data to the underlying io.Writer. -// It is the caller's responsibility to close it, if necessary. -func (d *Destination) Commit() error { - return d.tar.Close() -} diff --git a/vendor/github.com/containers/image/docker/tarfile/doc.go b/vendor/github.com/containers/image/docker/tarfile/doc.go deleted file mode 100644 index 4ea5369c05..0000000000 --- a/vendor/github.com/containers/image/docker/tarfile/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package tarfile is an internal implementation detail of some transports. -// Do not use outside of the github.com/containers/image repo! -package tarfile diff --git a/vendor/github.com/containers/image/docker/tarfile/src.go b/vendor/github.com/containers/image/docker/tarfile/src.go deleted file mode 100644 index a18e210585..0000000000 --- a/vendor/github.com/containers/image/docker/tarfile/src.go +++ /dev/null @@ -1,366 +0,0 @@ -package tarfile - -import ( - "archive/tar" - "bytes" - "context" - "encoding/json" - "io" - "io/ioutil" - "os" - "path" - - "github.com/containers/image/manifest" - "github.com/containers/image/pkg/compression" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -// Source is a partial implementation of types.ImageSource for reading from tarPath. -type Source struct { - tarPath string - // The following data is only available after ensureCachedDataIsPresent() succeeds - tarManifest *ManifestItem // nil if not available yet. - configBytes []byte - configDigest digest.Digest - orderedDiffIDList []digest.Digest - knownLayers map[digest.Digest]*layerInfo - // Other state - generatedManifest []byte // Private cache for GetManifest(), nil if not set yet. -} - -type layerInfo struct { - path string - size int64 -} - -// NewSource returns a tarfile.Source for the specified path. -func NewSource(path string) *Source { - // TODO: We could add support for multiple images in a single archive, so - // that people could use docker-archive:opensuse.tar:opensuse:leap as - // the source of an image. - return &Source{ - tarPath: path, - } -} - -// tarReadCloser is a way to close the backing file of a tar.Reader when the user no longer needs the tar component. -type tarReadCloser struct { - *tar.Reader - backingFile *os.File -} - -func (t *tarReadCloser) Close() error { - return t.backingFile.Close() -} - -// openTarComponent returns a ReadCloser for the specific file within the archive. -// This is linear scan; we assume that the tar file will have a fairly small amount of files (~layers), -// and that filesystem caching will make the repeated seeking over the (uncompressed) tarPath cheap enough. -// The caller should call .Close() on the returned stream. -func (s *Source) openTarComponent(componentPath string) (io.ReadCloser, error) { - f, err := os.Open(s.tarPath) - if err != nil { - return nil, err - } - succeeded := false - defer func() { - if !succeeded { - f.Close() - } - }() - - tarReader, header, err := findTarComponent(f, componentPath) - if err != nil { - return nil, err - } - if header == nil { - return nil, os.ErrNotExist - } - if header.FileInfo().Mode()&os.ModeType == os.ModeSymlink { // FIXME: untested - // We follow only one symlink; so no loops are possible. - if _, err := f.Seek(0, os.SEEK_SET); err != nil { - return nil, err - } - // The new path could easily point "outside" the archive, but we only compare it to existing tar headers without extracting the archive, - // so we don't care. - tarReader, header, err = findTarComponent(f, path.Join(path.Dir(componentPath), header.Linkname)) - if err != nil { - return nil, err - } - if header == nil { - return nil, os.ErrNotExist - } - } - - if !header.FileInfo().Mode().IsRegular() { - return nil, errors.Errorf("Error reading tar archive component %s: not a regular file", header.Name) - } - succeeded = true - return &tarReadCloser{Reader: tarReader, backingFile: f}, nil -} - -// findTarComponent returns a header and a reader matching path within inputFile, -// or (nil, nil, nil) if not found. -func findTarComponent(inputFile io.Reader, path string) (*tar.Reader, *tar.Header, error) { - t := tar.NewReader(inputFile) - for { - h, err := t.Next() - if err == io.EOF { - break - } - if err != nil { - return nil, nil, err - } - if h.Name == path { - return t, h, nil - } - } - return nil, nil, nil -} - -// readTarComponent returns full contents of componentPath. -func (s *Source) readTarComponent(path string) ([]byte, error) { - file, err := s.openTarComponent(path) - if err != nil { - return nil, errors.Wrapf(err, "Error loading tar component %s", path) - } - defer file.Close() - bytes, err := ioutil.ReadAll(file) - if err != nil { - return nil, err - } - return bytes, nil -} - -// ensureCachedDataIsPresent loads data necessary for any of the public accessors. -func (s *Source) ensureCachedDataIsPresent() error { - if s.tarManifest != nil { - return nil - } - - // Read and parse manifest.json - tarManifest, err := s.loadTarManifest() - if err != nil { - return err - } - - // Check to make sure length is 1 - if len(tarManifest) != 1 { - return errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(tarManifest)) - } - - // Read and parse config. - configBytes, err := s.readTarComponent(tarManifest[0].Config) - if err != nil { - return err - } - var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs. - if err := json.Unmarshal(configBytes, &parsedConfig); err != nil { - return errors.Wrapf(err, "Error decoding tar config %s", tarManifest[0].Config) - } - - knownLayers, err := s.prepareLayerData(&tarManifest[0], &parsedConfig) - if err != nil { - return err - } - - // Success; commit. - s.tarManifest = &tarManifest[0] - s.configBytes = configBytes - s.configDigest = digest.FromBytes(configBytes) - s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs - s.knownLayers = knownLayers - return nil -} - -// loadTarManifest loads and decodes the manifest.json. -func (s *Source) loadTarManifest() ([]ManifestItem, error) { - // FIXME? Do we need to deal with the legacy format? - bytes, err := s.readTarComponent(manifestFileName) - if err != nil { - return nil, err - } - var items []ManifestItem - if err := json.Unmarshal(bytes, &items); err != nil { - return nil, errors.Wrap(err, "Error decoding tar manifest.json") - } - return items, nil -} - -// LoadTarManifest loads and decodes the manifest.json -func (s *Source) LoadTarManifest() ([]ManifestItem, error) { - return s.loadTarManifest() -} - -func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manifest.Schema2Image) (map[digest.Digest]*layerInfo, error) { - // Collect layer data available in manifest and config. - if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) { - return nil, errors.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs)) - } - knownLayers := map[digest.Digest]*layerInfo{} - unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes. - for i, diffID := range parsedConfig.RootFS.DiffIDs { - if _, ok := knownLayers[diffID]; ok { - // Apparently it really can happen that a single image contains the same layer diff more than once. - // In that case, the diffID validation ensures that both layers truly are the same, and it should not matter - // which of the tarManifest.Layers paths is used; (docker save) actually makes the duplicates symlinks to the original. - continue - } - layerPath := tarManifest.Layers[i] - if _, ok := unknownLayerSizes[layerPath]; ok { - return nil, errors.Errorf("Layer tarfile %s used for two different DiffID values", layerPath) - } - li := &layerInfo{ // A new element in each iteration - path: layerPath, - size: -1, - } - knownLayers[diffID] = li - unknownLayerSizes[layerPath] = li - } - - // Scan the tar file to collect layer sizes. - file, err := os.Open(s.tarPath) - if err != nil { - return nil, err - } - defer file.Close() - t := tar.NewReader(file) - for { - h, err := t.Next() - if err == io.EOF { - break - } - if err != nil { - return nil, err - } - if li, ok := unknownLayerSizes[h.Name]; ok { - li.size = h.Size - delete(unknownLayerSizes, h.Name) - } - } - if len(unknownLayerSizes) != 0 { - return nil, errors.Errorf("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice. - } - - return knownLayers, nil -} - -// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). -// It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); -// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). -func (s *Source) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) { - if instanceDigest != nil { - // How did we even get here? GetManifest(nil) has returned a manifest.DockerV2Schema2MediaType. - return nil, "", errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`) - } - if s.generatedManifest == nil { - if err := s.ensureCachedDataIsPresent(); err != nil { - return nil, "", err - } - m := manifest.Schema2{ - SchemaVersion: 2, - MediaType: manifest.DockerV2Schema2MediaType, - ConfigDescriptor: manifest.Schema2Descriptor{ - MediaType: manifest.DockerV2Schema2ConfigMediaType, - Size: int64(len(s.configBytes)), - Digest: s.configDigest, - }, - LayersDescriptors: []manifest.Schema2Descriptor{}, - } - for _, diffID := range s.orderedDiffIDList { - li, ok := s.knownLayers[diffID] - if !ok { - return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID) - } - m.LayersDescriptors = append(m.LayersDescriptors, manifest.Schema2Descriptor{ - Digest: diffID, // diffID is a digest of the uncompressed tarball - MediaType: manifest.DockerV2Schema2LayerMediaType, - Size: li.size, - }) - } - manifestBytes, err := json.Marshal(&m) - if err != nil { - return nil, "", err - } - s.generatedManifest = manifestBytes - } - return s.generatedManifest, manifest.DockerV2Schema2MediaType, nil -} - -type readCloseWrapper struct { - io.Reader - closeFunc func() error -} - -func (r readCloseWrapper) Close() error { - if r.closeFunc != nil { - return r.closeFunc() - } - return nil -} - -// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -func (s *Source) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) { - if err := s.ensureCachedDataIsPresent(); err != nil { - return nil, 0, err - } - - if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256. - return ioutil.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil - } - - if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball, - stream, err := s.openTarComponent(li.path) - if err != nil { - return nil, 0, err - } - - // In order to handle the fact that digests != diffIDs (and thus that a - // caller which is trying to verify the blob will run into problems), - // we need to decompress blobs. This is a bit ugly, but it's a - // consequence of making everything addressable by their DiffID rather - // than by their digest... - // - // In particular, because the v2s2 manifest being generated uses - // DiffIDs, any caller of GetBlob is going to be asking for DiffIDs of - // layers not their _actual_ digest. The result is that copy/... will - // be verifing a "digest" which is not the actual layer's digest (but - // is instead the DiffID). - - decompressFunc, reader, err := compression.DetectCompression(stream) - if err != nil { - return nil, 0, errors.Wrapf(err, "Detecting compression in blob %s", info.Digest) - } - - if decompressFunc != nil { - reader, err = decompressFunc(reader) - if err != nil { - return nil, 0, errors.Wrapf(err, "Decompressing blob %s stream", info.Digest) - } - } - - newStream := readCloseWrapper{ - Reader: reader, - closeFunc: stream.Close, - } - - return newStream, li.size, nil - } - - return nil, 0, errors.Errorf("Unknown blob %s", info.Digest) -} - -// GetSignatures returns the image's signatures. It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -func (s *Source) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - if instanceDigest != nil { - // How did we even get here? GetManifest(nil) has returned a manifest.DockerV2Schema2MediaType. - return nil, errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`) - } - return [][]byte{}, nil -} diff --git a/vendor/github.com/containers/image/docker/tarfile/types.go b/vendor/github.com/containers/image/docker/tarfile/types.go deleted file mode 100644 index 2aa5675457..0000000000 --- a/vendor/github.com/containers/image/docker/tarfile/types.go +++ /dev/null @@ -1,28 +0,0 @@ -package tarfile - -import ( - "github.com/containers/image/manifest" - "github.com/opencontainers/go-digest" -) - -// Various data structures. - -// Based on github.com/docker/docker/image/tarexport/tarexport.go -const ( - manifestFileName = "manifest.json" - // legacyLayerFileName = "layer.tar" - // legacyConfigFileName = "json" - // legacyVersionFileName = "VERSION" - // legacyRepositoriesFileName = "repositories" -) - -// ManifestItem is an element of the array stored in the top-level manifest.json file. -type ManifestItem struct { - Config string - RepoTags []string - Layers []string - Parent imageID `json:",omitempty"` - LayerSources map[digest.Digest]manifest.Schema2Descriptor `json:",omitempty"` -} - -type imageID string diff --git a/vendor/github.com/containers/image/docker/wwwauthenticate.go b/vendor/github.com/containers/image/docker/wwwauthenticate.go deleted file mode 100644 index 23664a74a5..0000000000 --- a/vendor/github.com/containers/image/docker/wwwauthenticate.go +++ /dev/null @@ -1,159 +0,0 @@ -package docker - -// Based on github.com/docker/distribution/registry/client/auth/authchallenge.go, primarily stripping unnecessary dependencies. - -import ( - "net/http" - "strings" -) - -// challenge carries information from a WWW-Authenticate response header. -// See RFC 7235. -type challenge struct { - // Scheme is the auth-scheme according to RFC 7235 - Scheme string - - // Parameters are the auth-params according to RFC 7235 - Parameters map[string]string -} - -// Octet types from RFC 7230. -type octetType byte - -var octetTypes [256]octetType - -const ( - isToken octetType = 1 << iota - isSpace -) - -func init() { - // OCTET = - // CHAR = - // CTL = - // CR = - // LF = - // SP = - // HT = - // <"> = - // CRLF = CR LF - // LWS = [CRLF] 1*( SP | HT ) - // TEXT = - // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> - // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT - // token = 1* - // qdtext = > - - for c := 0; c < 256; c++ { - var t octetType - isCtl := c <= 31 || c == 127 - isChar := 0 <= c && c <= 127 - isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 - if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { - t |= isSpace - } - if isChar && !isCtl && !isSeparator { - t |= isToken - } - octetTypes[c] = t - } -} - -func parseAuthHeader(header http.Header) []challenge { - challenges := []challenge{} - for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { - v, p := parseValueAndParams(h) - if v != "" { - challenges = append(challenges, challenge{Scheme: v, Parameters: p}) - } - } - return challenges -} - -// NOTE: This is not a fully compliant parser per RFC 7235: -// Most notably it does not support more than one challenge within a single header -// Some of the whitespace parsing also seems noncompliant. -// But it is clearly better than what we used to have… -func parseValueAndParams(header string) (value string, params map[string]string) { - params = make(map[string]string) - value, s := expectToken(header) - if value == "" { - return - } - value = strings.ToLower(value) - s = "," + skipSpace(s) - for strings.HasPrefix(s, ",") { - var pkey string - pkey, s = expectToken(skipSpace(s[1:])) - if pkey == "" { - return - } - if !strings.HasPrefix(s, "=") { - return - } - var pvalue string - pvalue, s = expectTokenOrQuoted(s[1:]) - if pvalue == "" { - return - } - pkey = strings.ToLower(pkey) - params[pkey] = pvalue - s = skipSpace(s) - } - return -} - -func skipSpace(s string) (rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isSpace == 0 { - break - } - } - return s[i:] -} - -func expectToken(s string) (token, rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isToken == 0 { - break - } - } - return s[:i], s[i:] -} - -func expectTokenOrQuoted(s string) (value string, rest string) { - if !strings.HasPrefix(s, "\"") { - return expectToken(s) - } - s = s[1:] - for i := 0; i < len(s); i++ { - switch s[i] { - case '"': - return s[:i], s[i+1:] - case '\\': - p := make([]byte, len(s)-1) - j := copy(p, s[:i]) - escape := true - for i = i + 1; i < len(s); i++ { - b := s[i] - switch { - case escape: - escape = false - p[j] = b - j++ - case b == '\\': - escape = true - case b == '"': - return string(p[:j]), s[i+1:] - default: - p[j] = b - j++ - } - } - return "", "" - } - } - return "", "" -} diff --git a/vendor/github.com/containers/image/image/docker_list.go b/vendor/github.com/containers/image/image/docker_list.go deleted file mode 100644 index 412261ddbb..0000000000 --- a/vendor/github.com/containers/image/image/docker_list.go +++ /dev/null @@ -1,93 +0,0 @@ -package image - -import ( - "encoding/json" - "fmt" - "runtime" - - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -type platformSpec struct { - Architecture string `json:"architecture"` - OS string `json:"os"` - OSVersion string `json:"os.version,omitempty"` - OSFeatures []string `json:"os.features,omitempty"` - Variant string `json:"variant,omitempty"` - Features []string `json:"features,omitempty"` // removed in OCI -} - -// A manifestDescriptor references a platform-specific manifest. -type manifestDescriptor struct { - manifest.Schema2Descriptor - Platform platformSpec `json:"platform"` -} - -type manifestList struct { - SchemaVersion int `json:"schemaVersion"` - MediaType string `json:"mediaType"` - Manifests []manifestDescriptor `json:"manifests"` -} - -// chooseDigestFromManifestList parses blob as a schema2 manifest list, -// and returns the digest of the image appropriate for the current environment. -func chooseDigestFromManifestList(ctx *types.SystemContext, blob []byte) (digest.Digest, error) { - wantedArch := runtime.GOARCH - if ctx != nil && ctx.ArchitectureChoice != "" { - wantedArch = ctx.ArchitectureChoice - } - wantedOS := runtime.GOOS - if ctx != nil && ctx.OSChoice != "" { - wantedOS = ctx.OSChoice - } - - list := manifestList{} - if err := json.Unmarshal(blob, &list); err != nil { - return "", err - } - for _, d := range list.Manifests { - if d.Platform.Architecture == wantedArch && d.Platform.OS == wantedOS { - return d.Digest, nil - } - } - return "", fmt.Errorf("no image found in manifest list for architecture %s, OS %s", wantedArch, wantedOS) -} - -func manifestSchema2FromManifestList(ctx *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) { - targetManifestDigest, err := chooseDigestFromManifestList(ctx, manblob) - if err != nil { - return nil, err - } - manblob, mt, err := src.GetManifest(&targetManifestDigest) - if err != nil { - return nil, err - } - - matches, err := manifest.MatchesDigest(manblob, targetManifestDigest) - if err != nil { - return nil, errors.Wrap(err, "Error computing manifest digest") - } - if !matches { - return nil, errors.Errorf("Manifest image does not match selected manifest digest %s", targetManifestDigest) - } - - return manifestInstanceFromBlob(ctx, src, manblob, mt) -} - -// ChooseManifestInstanceFromManifestList returns a digest of a manifest appropriate -// for the current system from the manifest available from src. -func ChooseManifestInstanceFromManifestList(ctx *types.SystemContext, src types.UnparsedImage) (digest.Digest, error) { - // For now this only handles manifest.DockerV2ListMediaType; we can generalize it later, - // probably along with manifest list editing. - blob, mt, err := src.Manifest() - if err != nil { - return "", err - } - if mt != manifest.DockerV2ListMediaType { - return "", fmt.Errorf("Internal error: Trying to select an image from a non-manifest-list manifest type %s", mt) - } - return chooseDigestFromManifestList(ctx, blob) -} diff --git a/vendor/github.com/containers/image/image/docker_schema1.go b/vendor/github.com/containers/image/image/docker_schema1.go deleted file mode 100644 index c6a6989dee..0000000000 --- a/vendor/github.com/containers/image/image/docker_schema1.go +++ /dev/null @@ -1,201 +0,0 @@ -package image - -import ( - "encoding/json" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -type manifestSchema1 struct { - m *manifest.Schema1 -} - -func manifestSchema1FromManifest(manifestBlob []byte) (genericManifest, error) { - m, err := manifest.Schema1FromManifest(manifestBlob) - if err != nil { - return nil, err - } - return &manifestSchema1{m: m}, nil -} - -// manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data. -func manifestSchema1FromComponents(ref reference.Named, fsLayers []manifest.Schema1FSLayers, history []manifest.Schema1History, architecture string) genericManifest { - return &manifestSchema1{m: manifest.Schema1FromComponents(ref, fsLayers, history, architecture)} -} - -func (m *manifestSchema1) serialize() ([]byte, error) { - return m.m.Serialize() -} - -func (m *manifestSchema1) manifestMIMEType() string { - return manifest.DockerV2Schema1SignedMediaType -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. -func (m *manifestSchema1) ConfigInfo() types.BlobInfo { - return m.m.ConfigInfo() -} - -// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. -// The result is cached; it is OK to call this however often you need. -func (m *manifestSchema1) ConfigBlob() ([]byte, error) { - return nil, nil -} - -// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about -// layers in the resulting configuration isn't guaranteed to be returned to due how -// old image manifests work (docker v2s1 especially). -func (m *manifestSchema1) OCIConfig() (*imgspecv1.Image, error) { - v2s2, err := m.convertToManifestSchema2(nil, nil) - if err != nil { - return nil, err - } - return v2s2.OCIConfig() -} - -// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *manifestSchema1) LayerInfos() []types.BlobInfo { - return m.m.LayerInfos() -} - -// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. -// It returns false if the manifest does not embed a Docker reference. -// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) -func (m *manifestSchema1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { - // This is a bit convoluted: We can’t just have a "get embedded docker reference" method - // and have the “does it conflict” logic in the generic copy code, because the manifest does not actually - // embed a full docker/distribution reference, but only the repo name and tag (without the host name). - // So we would have to provide a “return repo without host name, and tag” getter for the generic code, - // which would be very awkward. Instead, we do the matching here in schema1-specific code, and all the - // generic copy code needs to know about is reference.Named and that a manifest may need updating - // for some destinations. - name := reference.Path(ref) - var tag string - if tagged, isTagged := ref.(reference.NamedTagged); isTagged { - tag = tagged.Tag() - } else { - tag = "" - } - return m.m.Name != name || m.m.Tag != tag -} - -func (m *manifestSchema1) imageInspectInfo() (*types.ImageInspectInfo, error) { - return m.m.Inspect(nil) -} - -// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. -// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute -// (most importantly it forces us to download the full layers even if they are already present at the destination). -func (m *manifestSchema1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { - return options.ManifestMIMEType == manifest.DockerV2Schema2MediaType -} - -// UpdatedImage returns a types.Image modified according to options. -// This does not change the state of the original Image object. -func (m *manifestSchema1) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) { - copy := manifestSchema1{m: manifest.Schema1Clone(m.m)} - if options.LayerInfos != nil { - if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { - return nil, err - } - } - if options.EmbeddedDockerReference != nil { - copy.m.Name = reference.Path(options.EmbeddedDockerReference) - if tagged, isTagged := options.EmbeddedDockerReference.(reference.NamedTagged); isTagged { - copy.m.Tag = tagged.Tag() - } else { - copy.m.Tag = "" - } - } - - switch options.ManifestMIMEType { - case "": // No conversion, OK - case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: - // We have 2 MIME types for schema 1, which are basically equivalent (even the un-"Signed" MIME type will be rejected if there isn’t a signature; so, - // handle conversions between them by doing nothing. - case manifest.DockerV2Schema2MediaType: - m2, err := copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs) - if err != nil { - return nil, err - } - return memoryImageFromManifest(m2), nil - case imgspecv1.MediaTypeImageManifest: - // We can't directly convert to OCI, but we can transitively convert via a Docker V2.2 Distribution manifest - m2, err := copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs) - if err != nil { - return nil, err - } - return m2.UpdatedImage(types.ManifestUpdateOptions{ - ManifestMIMEType: imgspecv1.MediaTypeImageManifest, - InformationOnly: options.InformationOnly, - }) - default: - return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema1SignedMediaType, options.ManifestMIMEType) - } - - return memoryImageFromManifest(©), nil -} - -// Based on github.com/docker/docker/distribution/pull_v2.go -func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.BlobInfo, layerDiffIDs []digest.Digest) (genericManifest, error) { - if len(m.m.History) == 0 { - // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing. - return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType) - } - if len(m.m.History) != len(m.m.FSLayers) { - return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.History), len(m.m.FSLayers)) - } - if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.m.FSLayers) { - return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers)) - } - if layerDiffIDs != nil && len(layerDiffIDs) != len(m.m.FSLayers) { - return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.m.FSLayers)) - } - - // Build a list of the diffIDs for the non-empty layers. - diffIDs := []digest.Digest{} - var layers []manifest.Schema2Descriptor - for v1Index := len(m.m.History) - 1; v1Index >= 0; v1Index-- { - v2Index := (len(m.m.History) - 1) - v1Index - - var v1compat manifest.Schema1V1Compatibility - if err := json.Unmarshal([]byte(m.m.History[v1Index].V1Compatibility), &v1compat); err != nil { - return nil, errors.Wrapf(err, "Error decoding history entry %d", v1Index) - } - if !v1compat.ThrowAway { - var size int64 - if uploadedLayerInfos != nil { - size = uploadedLayerInfos[v2Index].Size - } - var d digest.Digest - if layerDiffIDs != nil { - d = layerDiffIDs[v2Index] - } - layers = append(layers, manifest.Schema2Descriptor{ - MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", - Size: size, - Digest: m.m.FSLayers[v1Index].BlobSum, - }) - diffIDs = append(diffIDs, d) - } - } - configJSON, err := m.m.ToSchema2(diffIDs) - if err != nil { - return nil, err - } - configDescriptor := manifest.Schema2Descriptor{ - MediaType: "application/vnd.docker.container.image.v1+json", - Size: int64(len(configJSON)), - Digest: digest.FromBytes(configJSON), - } - - return manifestSchema2FromComponents(configDescriptor, nil, configJSON, layers), nil -} diff --git a/vendor/github.com/containers/image/image/docker_schema2.go b/vendor/github.com/containers/image/image/docker_schema2.go deleted file mode 100644 index b43bc17cfd..0000000000 --- a/vendor/github.com/containers/image/image/docker_schema2.go +++ /dev/null @@ -1,347 +0,0 @@ -package image - -import ( - "bytes" - "crypto/sha256" - "encoding/hex" - "encoding/json" - "io/ioutil" - "strings" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// gzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes) -// This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is -// a non-zero embedded timestamp; we could zero that, but that would just waste storage space -// in registries, so let’s use the same values. -var gzippedEmptyLayer = []byte{ - 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88, - 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0, -} - -// gzippedEmptyLayerDigest is a digest of gzippedEmptyLayer -const gzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") - -type manifestSchema2 struct { - src types.ImageSource // May be nil if configBlob is not nil - configBlob []byte // If set, corresponds to contents of ConfigDescriptor. - m *manifest.Schema2 -} - -func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { - m, err := manifest.Schema2FromManifest(manifestBlob) - if err != nil { - return nil, err - } - return &manifestSchema2{ - src: src, - m: m, - }, nil -} - -// manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data: -func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) genericManifest { - return &manifestSchema2{ - src: src, - configBlob: configBlob, - m: manifest.Schema2FromComponents(config, layers), - } -} - -func (m *manifestSchema2) serialize() ([]byte, error) { - return m.m.Serialize() -} - -func (m *manifestSchema2) manifestMIMEType() string { - return m.m.MediaType -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. -func (m *manifestSchema2) ConfigInfo() types.BlobInfo { - return m.m.ConfigInfo() -} - -// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about -// layers in the resulting configuration isn't guaranteed to be returned to due how -// old image manifests work (docker v2s1 especially). -func (m *manifestSchema2) OCIConfig() (*imgspecv1.Image, error) { - configBlob, err := m.ConfigBlob() - if err != nil { - return nil, err - } - // docker v2s2 and OCI v1 are mostly compatible but v2s2 contains more fields - // than OCI v1. This unmarshal makes sure we drop docker v2s2 - // fields that aren't needed in OCI v1. - configOCI := &imgspecv1.Image{} - if err := json.Unmarshal(configBlob, configOCI); err != nil { - return nil, err - } - return configOCI, nil -} - -// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. -// The result is cached; it is OK to call this however often you need. -func (m *manifestSchema2) ConfigBlob() ([]byte, error) { - if m.configBlob == nil { - if m.src == nil { - return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2") - } - stream, _, err := m.src.GetBlob(types.BlobInfo{ - Digest: m.m.ConfigDescriptor.Digest, - Size: m.m.ConfigDescriptor.Size, - URLs: m.m.ConfigDescriptor.URLs, - }) - if err != nil { - return nil, err - } - defer stream.Close() - blob, err := ioutil.ReadAll(stream) - if err != nil { - return nil, err - } - computedDigest := digest.FromBytes(blob) - if computedDigest != m.m.ConfigDescriptor.Digest { - return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest) - } - m.configBlob = blob - } - return m.configBlob, nil -} - -// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *manifestSchema2) LayerInfos() []types.BlobInfo { - return m.m.LayerInfos() -} - -// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. -// It returns false if the manifest does not embed a Docker reference. -// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) -func (m *manifestSchema2) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { - return false -} - -func (m *manifestSchema2) imageInspectInfo() (*types.ImageInspectInfo, error) { - getter := func(info types.BlobInfo) ([]byte, error) { - if info.Digest != m.ConfigInfo().Digest { - // Shouldn't ever happen - return nil, errors.New("asked for a different config blob") - } - config, err := m.ConfigBlob() - if err != nil { - return nil, err - } - return config, nil - } - return m.m.Inspect(getter) -} - -// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. -// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute -// (most importantly it forces us to download the full layers even if they are already present at the destination). -func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { - return false -} - -// UpdatedImage returns a types.Image modified according to options. -// This does not change the state of the original Image object. -func (m *manifestSchema2) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) { - copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc. - src: m.src, - configBlob: m.configBlob, - m: manifest.Schema2Clone(m.m), - } - if options.LayerInfos != nil { - if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { - return nil, err - } - } - // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care. - - switch options.ManifestMIMEType { - case "": // No conversion, OK - case manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType: - return copy.convertToManifestSchema1(options.InformationOnly.Destination) - case imgspecv1.MediaTypeImageManifest: - return copy.convertToManifestOCI1() - default: - return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema2MediaType, options.ManifestMIMEType) - } - - return memoryImageFromManifest(©), nil -} - -func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor { - return imgspecv1.Descriptor{ - MediaType: d.MediaType, - Size: d.Size, - Digest: d.Digest, - URLs: d.URLs, - } -} - -func (m *manifestSchema2) convertToManifestOCI1() (types.Image, error) { - configOCI, err := m.OCIConfig() - if err != nil { - return nil, err - } - configOCIBytes, err := json.Marshal(configOCI) - if err != nil { - return nil, err - } - - config := imgspecv1.Descriptor{ - MediaType: imgspecv1.MediaTypeImageConfig, - Size: int64(len(configOCIBytes)), - Digest: digest.FromBytes(configOCIBytes), - } - - layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors)) - for idx := range layers { - layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx]) - if m.m.LayersDescriptors[idx].MediaType == manifest.DockerV2Schema2ForeignLayerMediaType { - layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable - } else { - // we assume layers are gzip'ed because docker v2s2 only deals with - // gzip'ed layers. However, OCI has non-gzip'ed layers as well. - layers[idx].MediaType = imgspecv1.MediaTypeImageLayerGzip - } - } - - m1 := manifestOCI1FromComponents(config, m.src, configOCIBytes, layers) - return memoryImageFromManifest(m1), nil -} - -// Based on docker/distribution/manifest/schema1/config_builder.go -func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination) (types.Image, error) { - configBytes, err := m.ConfigBlob() - if err != nil { - return nil, err - } - imageConfig := &manifest.Schema2Image{} - if err := json.Unmarshal(configBytes, imageConfig); err != nil { - return nil, err - } - - // Build fsLayers and History, discarding all configs. We will patch the top-level config in later. - fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History)) - history := make([]manifest.Schema1History, len(imageConfig.History)) - nonemptyLayerIndex := 0 - var parentV1ID string // Set in the loop - v1ID := "" - haveGzippedEmptyLayer := false - if len(imageConfig.History) == 0 { - // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing. - return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType) - } - for v2Index, historyEntry := range imageConfig.History { - parentV1ID = v1ID - v1Index := len(imageConfig.History) - 1 - v2Index - - var blobDigest digest.Digest - if historyEntry.EmptyLayer { - if !haveGzippedEmptyLayer { - logrus.Debugf("Uploading empty layer during conversion to schema 1") - info, err := dest.PutBlob(bytes.NewReader(gzippedEmptyLayer), types.BlobInfo{Digest: gzippedEmptyLayerDigest, Size: int64(len(gzippedEmptyLayer))}) - if err != nil { - return nil, errors.Wrap(err, "Error uploading empty layer") - } - if info.Digest != gzippedEmptyLayerDigest { - return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, gzippedEmptyLayerDigest) - } - haveGzippedEmptyLayer = true - } - blobDigest = gzippedEmptyLayerDigest - } else { - if nonemptyLayerIndex >= len(m.m.LayersDescriptors) { - return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors)) - } - blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest - nonemptyLayerIndex++ - } - - // AFAICT pull ignores these ID values, at least nowadays, so we could use anything unique, including a simple counter. Use what Docker uses for cargo-cult consistency. - v, err := v1IDFromBlobDigestAndComponents(blobDigest, parentV1ID) - if err != nil { - return nil, err - } - v1ID = v - - fakeImage := manifest.Schema1V1Compatibility{ - ID: v1ID, - Parent: parentV1ID, - Comment: historyEntry.Comment, - Created: historyEntry.Created, - Author: historyEntry.Author, - ThrowAway: historyEntry.EmptyLayer, - } - fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy} - v1CompatibilityBytes, err := json.Marshal(&fakeImage) - if err != nil { - return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage) - } - - fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest} - history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)} - // Note that parentV1ID of the top layer is preserved when exiting this loop - } - - // Now patch in real configuration for the top layer (v1Index == 0) - v1ID, err = v1IDFromBlobDigestAndComponents(fsLayers[0].BlobSum, parentV1ID, string(configBytes)) // See above WRT v1ID value generation and cargo-cult consistency. - if err != nil { - return nil, err - } - v1Config, err := v1ConfigFromConfigJSON(configBytes, v1ID, parentV1ID, imageConfig.History[len(imageConfig.History)-1].EmptyLayer) - if err != nil { - return nil, err - } - history[0].V1Compatibility = string(v1Config) - - m1 := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture) - return memoryImageFromManifest(m1), nil -} - -func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string) (string, error) { - if err := blobDigest.Validate(); err != nil { - return "", err - } - parts := append([]string{blobDigest.Hex()}, others...) - v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " "))) - return hex.EncodeToString(v1IDHash[:]), nil -} - -func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { - // Preserve everything we don't specifically know about. - // (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.) - rawContents := map[string]*json.RawMessage{} - if err := json.Unmarshal(configJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?! - return nil, err - } - delete(rawContents, "rootfs") - delete(rawContents, "history") - - updates := map[string]interface{}{"id": v1ID} - if parentV1ID != "" { - updates["parent"] = parentV1ID - } - if throwaway { - updates["throwaway"] = throwaway - } - for field, value := range updates { - encoded, err := json.Marshal(value) - if err != nil { - return nil, err - } - rawContents[field] = (*json.RawMessage)(&encoded) - } - return json.Marshal(rawContents) -} diff --git a/vendor/github.com/containers/image/image/manifest.go b/vendor/github.com/containers/image/image/manifest.go deleted file mode 100644 index cdd4233f70..0000000000 --- a/vendor/github.com/containers/image/image/manifest.go +++ /dev/null @@ -1,67 +0,0 @@ -package image - -import ( - "fmt" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/types" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -// genericManifest is an interface for parsing, modifying image manifests and related data. -// Note that the public methods are intended to be a subset of types.Image -// so that embedding a genericManifest into structs works. -// will support v1 one day... -type genericManifest interface { - serialize() ([]byte, error) - manifestMIMEType() string - // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. - // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. - ConfigInfo() types.BlobInfo - // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. - // The result is cached; it is OK to call this however often you need. - ConfigBlob() ([]byte, error) - // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about - // layers in the resulting configuration isn't guaranteed to be returned to due how - // old image manifests work (docker v2s1 especially). - OCIConfig() (*imgspecv1.Image, error) - // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). - // The Digest field is guaranteed to be provided; Size may be -1. - // WARNING: The list may contain duplicates, and they are semantically relevant. - LayerInfos() []types.BlobInfo - // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. - // It returns false if the manifest does not embed a Docker reference. - // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) - EmbeddedDockerReferenceConflicts(ref reference.Named) bool - imageInspectInfo() (*types.ImageInspectInfo, error) // To be called by inspectManifest - // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. - // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute - // (most importantly it forces us to download the full layers even if they are already present at the destination). - UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool - // UpdatedImage returns a types.Image modified according to options. - // This does not change the state of the original Image object. - UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) -} - -// manifestInstanceFromBlob returns a genericManifest implementation for (manblob, mt) in src. -// If manblob is a manifest list, it implicitly chooses an appropriate image from the list. -func manifestInstanceFromBlob(ctx *types.SystemContext, src types.ImageSource, manblob []byte, mt string) (genericManifest, error) { - switch manifest.NormalizedMIMEType(mt) { - case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: - return manifestSchema1FromManifest(manblob) - case imgspecv1.MediaTypeImageManifest: - return manifestOCI1FromManifest(src, manblob) - case manifest.DockerV2Schema2MediaType: - return manifestSchema2FromManifest(src, manblob) - case manifest.DockerV2ListMediaType: - return manifestSchema2FromManifestList(ctx, src, manblob) - default: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values. - return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt) - } -} - -// inspectManifest is an implementation of types.Image.Inspect -func inspectManifest(m genericManifest) (*types.ImageInspectInfo, error) { - return m.imageInspectInfo() -} diff --git a/vendor/github.com/containers/image/image/memory.go b/vendor/github.com/containers/image/image/memory.go deleted file mode 100644 index 4639c49a32..0000000000 --- a/vendor/github.com/containers/image/image/memory.go +++ /dev/null @@ -1,70 +0,0 @@ -package image - -import ( - "context" - - "github.com/pkg/errors" - - "github.com/containers/image/types" -) - -// memoryImage is a mostly-implementation of types.Image assembled from data -// created in memory, used primarily as a return value of types.Image.UpdatedImage -// as a way to carry various structured information in a type-safe and easy-to-use way. -// Note that this _only_ carries the immediate metadata; it is _not_ a stand-alone -// collection of all related information, e.g. there is no way to get layer blobs -// from a memoryImage. -type memoryImage struct { - genericManifest - serializedManifest []byte // A private cache for Manifest() -} - -func memoryImageFromManifest(m genericManifest) types.Image { - return &memoryImage{ - genericManifest: m, - serializedManifest: nil, - } -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (i *memoryImage) Reference() types.ImageReference { - // It would really be inappropriate to return the ImageReference of the image this was based on. - return nil -} - -// Size returns the size of the image as stored, if known, or -1 if not. -func (i *memoryImage) Size() (int64, error) { - return -1, nil -} - -// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. -func (i *memoryImage) Manifest() ([]byte, string, error) { - if i.serializedManifest == nil { - m, err := i.genericManifest.serialize() - if err != nil { - return nil, "", err - } - i.serializedManifest = m - } - return i.serializedManifest, i.genericManifest.manifestMIMEType(), nil -} - -// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. -func (i *memoryImage) Signatures(ctx context.Context) ([][]byte, error) { - // Modifying an image invalidates signatures; a caller asking the updated image for signatures - // is probably confused. - return nil, errors.New("Internal error: Image.Signatures() is not supported for images modified in memory") -} - -// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. -func (i *memoryImage) Inspect() (*types.ImageInspectInfo, error) { - return inspectManifest(i.genericManifest) -} - -// LayerInfosForCopy returns an updated set of layer blob information which may not match the manifest. -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (i *memoryImage) LayerInfosForCopy() []types.BlobInfo { - return nil -} diff --git a/vendor/github.com/containers/image/image/oci.go b/vendor/github.com/containers/image/image/oci.go deleted file mode 100644 index e7780c5a6f..0000000000 --- a/vendor/github.com/containers/image/image/oci.go +++ /dev/null @@ -1,199 +0,0 @@ -package image - -import ( - "encoding/json" - "io/ioutil" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -type manifestOCI1 struct { - src types.ImageSource // May be nil if configBlob is not nil - configBlob []byte // If set, corresponds to contents of m.Config. - m *manifest.OCI1 -} - -func manifestOCI1FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { - m, err := manifest.OCI1FromManifest(manifestBlob) - if err != nil { - return nil, err - } - return &manifestOCI1{ - src: src, - m: m, - }, nil -} - -// manifestOCI1FromComponents builds a new manifestOCI1 from the supplied data: -func manifestOCI1FromComponents(config imgspecv1.Descriptor, src types.ImageSource, configBlob []byte, layers []imgspecv1.Descriptor) genericManifest { - return &manifestOCI1{ - src: src, - configBlob: configBlob, - m: manifest.OCI1FromComponents(config, layers), - } -} - -func (m *manifestOCI1) serialize() ([]byte, error) { - return m.m.Serialize() -} - -func (m *manifestOCI1) manifestMIMEType() string { - return imgspecv1.MediaTypeImageManifest -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. -func (m *manifestOCI1) ConfigInfo() types.BlobInfo { - return m.m.ConfigInfo() -} - -// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. -// The result is cached; it is OK to call this however often you need. -func (m *manifestOCI1) ConfigBlob() ([]byte, error) { - if m.configBlob == nil { - if m.src == nil { - return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1") - } - stream, _, err := m.src.GetBlob(types.BlobInfo{ - Digest: m.m.Config.Digest, - Size: m.m.Config.Size, - URLs: m.m.Config.URLs, - }) - if err != nil { - return nil, err - } - defer stream.Close() - blob, err := ioutil.ReadAll(stream) - if err != nil { - return nil, err - } - computedDigest := digest.FromBytes(blob) - if computedDigest != m.m.Config.Digest { - return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest) - } - m.configBlob = blob - } - return m.configBlob, nil -} - -// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about -// layers in the resulting configuration isn't guaranteed to be returned to due how -// old image manifests work (docker v2s1 especially). -func (m *manifestOCI1) OCIConfig() (*imgspecv1.Image, error) { - cb, err := m.ConfigBlob() - if err != nil { - return nil, err - } - configOCI := &imgspecv1.Image{} - if err := json.Unmarshal(cb, configOCI); err != nil { - return nil, err - } - return configOCI, nil -} - -// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *manifestOCI1) LayerInfos() []types.BlobInfo { - return m.m.LayerInfos() -} - -// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. -// It returns false if the manifest does not embed a Docker reference. -// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) -func (m *manifestOCI1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { - return false -} - -func (m *manifestOCI1) imageInspectInfo() (*types.ImageInspectInfo, error) { - getter := func(info types.BlobInfo) ([]byte, error) { - if info.Digest != m.ConfigInfo().Digest { - // Shouldn't ever happen - return nil, errors.New("asked for a different config blob") - } - config, err := m.ConfigBlob() - if err != nil { - return nil, err - } - return config, nil - } - return m.m.Inspect(getter) -} - -// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. -// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute -// (most importantly it forces us to download the full layers even if they are already present at the destination). -func (m *manifestOCI1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { - return false -} - -// UpdatedImage returns a types.Image modified according to options. -// This does not change the state of the original Image object. -func (m *manifestOCI1) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) { - copy := manifestOCI1{ // NOTE: This is not a deep copy, it still shares slices etc. - src: m.src, - configBlob: m.configBlob, - m: manifest.OCI1Clone(m.m), - } - if options.LayerInfos != nil { - if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { - return nil, err - } - } - // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1, but we really don't care. - - switch options.ManifestMIMEType { - case "": // No conversion, OK - case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: - // We can't directly convert to V1, but we can transitively convert via a V2 image - m2, err := copy.convertToManifestSchema2() - if err != nil { - return nil, err - } - return m2.UpdatedImage(types.ManifestUpdateOptions{ - ManifestMIMEType: options.ManifestMIMEType, - InformationOnly: options.InformationOnly, - }) - case manifest.DockerV2Schema2MediaType: - return copy.convertToManifestSchema2() - default: - return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", imgspecv1.MediaTypeImageManifest, options.ManifestMIMEType) - } - - return memoryImageFromManifest(©), nil -} - -func schema2DescriptorFromOCI1Descriptor(d imgspecv1.Descriptor) manifest.Schema2Descriptor { - return manifest.Schema2Descriptor{ - MediaType: d.MediaType, - Size: d.Size, - Digest: d.Digest, - URLs: d.URLs, - } -} - -func (m *manifestOCI1) convertToManifestSchema2() (types.Image, error) { - // Create a copy of the descriptor. - config := schema2DescriptorFromOCI1Descriptor(m.m.Config) - - // The only difference between OCI and DockerSchema2 is the mediatypes. The - // media type of the manifest is handled by manifestSchema2FromComponents. - config.MediaType = manifest.DockerV2Schema2ConfigMediaType - - layers := make([]manifest.Schema2Descriptor, len(m.m.Layers)) - for idx := range layers { - layers[idx] = schema2DescriptorFromOCI1Descriptor(m.m.Layers[idx]) - layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType - } - - // Rather than copying the ConfigBlob now, we just pass m.src to the - // translated manifest, since the only difference is the mediatype of - // descriptors there is no change to any blob stored in m.src. - m1 := manifestSchema2FromComponents(config, m.src, nil, layers) - return memoryImageFromManifest(m1), nil -} diff --git a/vendor/github.com/containers/image/image/sourced.go b/vendor/github.com/containers/image/image/sourced.go deleted file mode 100644 index 3477f341ea..0000000000 --- a/vendor/github.com/containers/image/image/sourced.go +++ /dev/null @@ -1,106 +0,0 @@ -// Package image consolidates knowledge about various container image formats -// (as opposed to image storage mechanisms, which are handled by types.ImageSource) -// and exposes all of them using an unified interface. -package image - -import ( - "github.com/containers/image/types" -) - -// imageCloser implements types.ImageCloser, perhaps allowing simple users -// to use a single object without having keep a reference to a types.ImageSource -// only to call types.ImageSource.Close(). -type imageCloser struct { - types.Image - src types.ImageSource -} - -// FromSource returns a types.ImageCloser implementation for the default instance of source. -// If source is a manifest list, .Manifest() still returns the manifest list, -// but other methods transparently return data from an appropriate image instance. -// -// The caller must call .Close() on the returned ImageCloser. -// -// FromSource “takes ownership” of the input ImageSource and will call src.Close() -// when the image is closed. (This does not prevent callers from using both the -// Image and ImageSource objects simultaneously, but it means that they only need to -// the Image.) -// -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function. -func FromSource(ctx *types.SystemContext, src types.ImageSource) (types.ImageCloser, error) { - img, err := FromUnparsedImage(ctx, UnparsedInstance(src, nil)) - if err != nil { - return nil, err - } - return &imageCloser{ - Image: img, - src: src, - }, nil -} - -func (ic *imageCloser) Close() error { - return ic.src.Close() -} - -// sourcedImage is a general set of utilities for working with container images, -// whatever is their underlying location (i.e. dockerImageSource-independent). -// Note the existence of skopeo/docker.Image: some instances of a `types.Image` -// may not be a `sourcedImage` directly. However, most users of `types.Image` -// do not care, and those who care about `skopeo/docker.Image` know they do. -type sourcedImage struct { - *UnparsedImage - manifestBlob []byte - manifestMIMEType string - // genericManifest contains data corresponding to manifestBlob. - // NOTE: The manifest may have been modified in the process; DO NOT reserialize and store genericManifest - // if you want to preserve the original manifest; use manifestBlob directly. - genericManifest -} - -// FromUnparsedImage returns a types.Image implementation for unparsed. -// If unparsed represents a manifest list, .Manifest() still returns the manifest list, -// but other methods transparently return data from an appropriate single image. -// -// The Image must not be used after the underlying ImageSource is Close()d. -func FromUnparsedImage(ctx *types.SystemContext, unparsed *UnparsedImage) (types.Image, error) { - // Note that the input parameter above is specifically *image.UnparsedImage, not types.UnparsedImage: - // we want to be able to use unparsed.src. We could make that an explicit interface, but, well, - // this is the only UnparsedImage implementation around, anyway. - - // NOTE: It is essential for signature verification that all parsing done in this object happens on the same manifest which is returned by unparsed.Manifest(). - manifestBlob, manifestMIMEType, err := unparsed.Manifest() - if err != nil { - return nil, err - } - - parsedManifest, err := manifestInstanceFromBlob(ctx, unparsed.src, manifestBlob, manifestMIMEType) - if err != nil { - return nil, err - } - - return &sourcedImage{ - UnparsedImage: unparsed, - manifestBlob: manifestBlob, - manifestMIMEType: manifestMIMEType, - genericManifest: parsedManifest, - }, nil -} - -// Size returns the size of the image as stored, if it's known, or -1 if it isn't. -func (i *sourcedImage) Size() (int64, error) { - return -1, nil -} - -// Manifest overrides the UnparsedImage.Manifest to always use the fields which we have already fetched. -func (i *sourcedImage) Manifest() ([]byte, string, error) { - return i.manifestBlob, i.manifestMIMEType, nil -} - -func (i *sourcedImage) Inspect() (*types.ImageInspectInfo, error) { - return inspectManifest(i.genericManifest) -} - -func (i *sourcedImage) LayerInfosForCopy() []types.BlobInfo { - return i.UnparsedImage.LayerInfosForCopy() -} diff --git a/vendor/github.com/containers/image/image/unparsed.go b/vendor/github.com/containers/image/image/unparsed.go deleted file mode 100644 index aff06d8ada..0000000000 --- a/vendor/github.com/containers/image/image/unparsed.go +++ /dev/null @@ -1,102 +0,0 @@ -package image - -import ( - "context" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -// UnparsedImage implements types.UnparsedImage . -// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. -type UnparsedImage struct { - src types.ImageSource - instanceDigest *digest.Digest - cachedManifest []byte // A private cache for Manifest(); nil if not yet known. - // A private cache for Manifest(), may be the empty string if guessing failed. - // Valid iff cachedManifest is not nil. - cachedManifestMIMEType string - cachedSignatures [][]byte // A private cache for Signatures(); nil if not yet known. -} - -// UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest). -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list). -// -// The UnparsedImage must not be used after the underlying ImageSource is Close()d. -func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage { - return &UnparsedImage{ - src: src, - instanceDigest: instanceDigest, - } -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (i *UnparsedImage) Reference() types.ImageReference { - // Note that this does not depend on instanceDigest; e.g. all instances within a manifest list need to be signed with the manifest list identity. - return i.src.Reference() -} - -// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. -func (i *UnparsedImage) Manifest() ([]byte, string, error) { - if i.cachedManifest == nil { - m, mt, err := i.src.GetManifest(i.instanceDigest) - if err != nil { - return nil, "", err - } - - // ImageSource.GetManifest does not do digest verification, but we do; - // this immediately protects also any user of types.Image. - if digest, haveDigest := i.expectedManifestDigest(); haveDigest { - matches, err := manifest.MatchesDigest(m, digest) - if err != nil { - return nil, "", errors.Wrap(err, "Error computing manifest digest") - } - if !matches { - return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest) - } - } - - i.cachedManifest = m - i.cachedManifestMIMEType = mt - } - return i.cachedManifest, i.cachedManifestMIMEType, nil -} - -// expectedManifestDigest returns a the expected value of the manifest digest, and an indicator whether it is known. -// The bool return value seems redundant with digest != ""; it is used explicitly -// to refuse (unexpected) situations when the digest exists but is "". -func (i *UnparsedImage) expectedManifestDigest() (digest.Digest, bool) { - if i.instanceDigest != nil { - return *i.instanceDigest, true - } - ref := i.Reference().DockerReference() - if ref != nil { - if canonical, ok := ref.(reference.Canonical); ok { - return canonical.Digest(), true - } - } - return "", false -} - -// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. -func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) { - if i.cachedSignatures == nil { - sigs, err := i.src.GetSignatures(ctx, i.instanceDigest) - if err != nil { - return nil, err - } - i.cachedSignatures = sigs - } - return i.cachedSignatures, nil -} - -// LayerInfosForCopy returns an updated set of layer blob information which may not match the manifest. -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (i *UnparsedImage) LayerInfosForCopy() []types.BlobInfo { - return i.src.LayerInfosForCopy() -} diff --git a/vendor/github.com/containers/image/internal/tmpdir/tmpdir.go b/vendor/github.com/containers/image/internal/tmpdir/tmpdir.go deleted file mode 100644 index a28020edcc..0000000000 --- a/vendor/github.com/containers/image/internal/tmpdir/tmpdir.go +++ /dev/null @@ -1,19 +0,0 @@ -package tmpdir - -import ( - "os" - "runtime" -) - -// TemporaryDirectoryForBigFiles returns a directory for temporary (big) files. -// On non Windows systems it avoids the use of os.TempDir(), because the default temporary directory usually falls under /tmp -// which on systemd based systems could be the unsuitable tmpfs filesystem. -func TemporaryDirectoryForBigFiles() string { - var temporaryDirectoryForBigFiles string - if runtime.GOOS == "windows" { - temporaryDirectoryForBigFiles = os.TempDir() - } else { - temporaryDirectoryForBigFiles = "/var/tmp" - } - return temporaryDirectoryForBigFiles -} diff --git a/vendor/github.com/containers/image/manifest/docker_schema1.go b/vendor/github.com/containers/image/manifest/docker_schema1.go deleted file mode 100644 index b1c1cfe9ff..0000000000 --- a/vendor/github.com/containers/image/manifest/docker_schema1.go +++ /dev/null @@ -1,310 +0,0 @@ -package manifest - -import ( - "encoding/json" - "regexp" - "strings" - "time" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/types" - "github.com/docker/docker/api/types/versions" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -// Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1. -type Schema1FSLayers struct { - BlobSum digest.Digest `json:"blobSum"` -} - -// Schema1History is an entry of the "history" array in docker/distribution schema 1. -type Schema1History struct { - V1Compatibility string `json:"v1Compatibility"` -} - -// Schema1 is a manifest in docker/distribution schema 1. -type Schema1 struct { - Name string `json:"name"` - Tag string `json:"tag"` - Architecture string `json:"architecture"` - FSLayers []Schema1FSLayers `json:"fsLayers"` - History []Schema1History `json:"history"` - SchemaVersion int `json:"schemaVersion"` -} - -// Schema1V1Compatibility is a v1Compatibility in docker/distribution schema 1. -type Schema1V1Compatibility struct { - ID string `json:"id"` - Parent string `json:"parent,omitempty"` - Comment string `json:"comment,omitempty"` - Created time.Time `json:"created"` - ContainerConfig struct { - Cmd []string - } `json:"container_config,omitempty"` - Author string `json:"author,omitempty"` - ThrowAway bool `json:"throwaway,omitempty"` -} - -// Schema1FromManifest creates a Schema1 manifest instance from a manifest blob. -// (NOTE: The instance is not necessary a literal representation of the original blob, -// layers with duplicate IDs are eliminated.) -func Schema1FromManifest(manifest []byte) (*Schema1, error) { - s1 := Schema1{} - if err := json.Unmarshal(manifest, &s1); err != nil { - return nil, err - } - if s1.SchemaVersion != 1 { - return nil, errors.Errorf("unsupported schema version %d", s1.SchemaVersion) - } - if len(s1.FSLayers) != len(s1.History) { - return nil, errors.New("length of history not equal to number of layers") - } - if len(s1.FSLayers) == 0 { - return nil, errors.New("no FSLayers in manifest") - } - if err := s1.fixManifestLayers(); err != nil { - return nil, err - } - return &s1, nil -} - -// Schema1FromComponents creates an Schema1 manifest instance from the supplied data. -func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, history []Schema1History, architecture string) *Schema1 { - var name, tag string - if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them. - name = reference.Path(ref) - if tagged, ok := ref.(reference.NamedTagged); ok { - tag = tagged.Tag() - } - } - return &Schema1{ - Name: name, - Tag: tag, - Architecture: architecture, - FSLayers: fsLayers, - History: history, - SchemaVersion: 1, - } -} - -// Schema1Clone creates a copy of the supplied Schema1 manifest. -func Schema1Clone(src *Schema1) *Schema1 { - copy := *src - return © -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -func (m *Schema1) ConfigInfo() types.BlobInfo { - return types.BlobInfo{} -} - -// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *Schema1) LayerInfos() []types.BlobInfo { - layers := make([]types.BlobInfo, len(m.FSLayers)) - for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway) - layers[(len(m.FSLayers)-1)-i] = types.BlobInfo{Digest: layer.BlobSum, Size: -1} - } - return layers -} - -// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) -func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { - // Our LayerInfos includes empty layers (where m.History.V1Compatibility->ThrowAway), so expect them to be included here as well. - if len(m.FSLayers) != len(layerInfos) { - return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.FSLayers), len(layerInfos)) - } - for i, info := range layerInfos { - // (docker push) sets up m.History.V1Compatibility->{Id,Parent} based on values of info.Digest, - // but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness. - // So, we don't bother recomputing the IDs in m.History.V1Compatibility. - m.FSLayers[(len(layerInfos)-1)-i].BlobSum = info.Digest - } - return nil -} - -// Serialize returns the manifest in a blob format. -// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! -func (m *Schema1) Serialize() ([]byte, error) { - // docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType. - unsigned, err := json.Marshal(*m) - if err != nil { - return nil, err - } - return AddDummyV2S1Signature(unsigned) -} - -// fixManifestLayers, after validating the supplied manifest -// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in m.History), -// modifies manifest to only have one entry for each layer ID in m.History (deleting the older duplicates, -// both from m.History and m.FSLayers). -// Note that even after this succeeds, m.FSLayers may contain duplicate entries -// (for Dockerfile operations which change the configuration but not the filesystem). -func (m *Schema1) fixManifestLayers() error { - type imageV1 struct { - ID string - Parent string - } - // Per the specification, we can assume that len(m.FSLayers) == len(m.History) - imgs := make([]*imageV1, len(m.FSLayers)) - for i := range m.FSLayers { - img := &imageV1{} - - if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil { - return err - } - - imgs[i] = img - if err := validateV1ID(img.ID); err != nil { - return err - } - } - if imgs[len(imgs)-1].Parent != "" { - return errors.New("Invalid parent ID in the base layer of the image") - } - // check general duplicates to error instead of a deadlock - idmap := make(map[string]struct{}) - var lastID string - for _, img := range imgs { - // skip IDs that appear after each other, we handle those later - if _, exists := idmap[img.ID]; img.ID != lastID && exists { - return errors.Errorf("ID %+v appears multiple times in manifest", img.ID) - } - lastID = img.ID - idmap[lastID] = struct{}{} - } - // backwards loop so that we keep the remaining indexes after removing items - for i := len(imgs) - 2; i >= 0; i-- { - if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue - m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) - m.History = append(m.History[:i], m.History[i+1:]...) - } else if imgs[i].Parent != imgs[i+1].ID { - return errors.Errorf("Invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent) - } - } - return nil -} - -var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) - -func validateV1ID(id string) error { - if ok := validHex.MatchString(id); !ok { - return errors.Errorf("image ID %q is invalid", id) - } - return nil -} - -// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. -func (m *Schema1) Inspect(_ func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { - s1 := &Schema2V1Image{} - if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), s1); err != nil { - return nil, err - } - return &types.ImageInspectInfo{ - Tag: m.Tag, - Created: s1.Created, - DockerVersion: s1.DockerVersion, - Labels: make(map[string]string), - Architecture: s1.Architecture, - Os: s1.OS, - Layers: LayerInfosToStrings(m.LayerInfos()), - }, nil -} - -// ToSchema2 builds a schema2-style configuration blob using the supplied diffIDs. -func (m *Schema1) ToSchema2(diffIDs []digest.Digest) ([]byte, error) { - // Convert the schema 1 compat info into a schema 2 config, constructing some of the fields - // that aren't directly comparable using info from the manifest. - if len(m.History) == 0 { - return nil, errors.New("image has no layers") - } - s2 := struct { - Schema2Image - ID string `json:"id,omitempty"` - Parent string `json:"parent,omitempty"` - ParentID string `json:"parent_id,omitempty"` - LayerID string `json:"layer_id,omitempty"` - ThrowAway bool `json:"throwaway,omitempty"` - Size int64 `json:",omitempty"` - }{} - config := []byte(m.History[0].V1Compatibility) - err := json.Unmarshal(config, &s2) - if err != nil { - return nil, errors.Wrapf(err, "error decoding configuration") - } - // Images created with versions prior to 1.8.3 require us to re-encode the encoded object, - // adding some fields that aren't "omitempty". - if s2.DockerVersion != "" && versions.LessThan(s2.DockerVersion, "1.8.3") { - config, err = json.Marshal(&s2) - if err != nil { - return nil, errors.Wrapf(err, "error re-encoding compat image config %#v", s2) - } - } - // Build the history. - convertedHistory := []Schema2History{} - for _, h := range m.History { - compat := Schema1V1Compatibility{} - if err := json.Unmarshal([]byte(h.V1Compatibility), &compat); err != nil { - return nil, errors.Wrapf(err, "error decoding history information") - } - hitem := Schema2History{ - Created: compat.Created, - CreatedBy: strings.Join(compat.ContainerConfig.Cmd, " "), - Author: compat.Author, - Comment: compat.Comment, - EmptyLayer: compat.ThrowAway, - } - convertedHistory = append([]Schema2History{hitem}, convertedHistory...) - } - // Build the rootfs information. We need the decompressed sums that we've been - // calculating to fill in the DiffIDs. It's expected (but not enforced by us) - // that the number of diffIDs corresponds to the number of non-EmptyLayer - // entries in the history. - rootFS := &Schema2RootFS{ - Type: "layers", - DiffIDs: diffIDs, - } - // And now for some raw manipulation. - raw := make(map[string]*json.RawMessage) - err = json.Unmarshal(config, &raw) - if err != nil { - return nil, errors.Wrapf(err, "error re-decoding compat image config %#v: %v", s2) - } - // Drop some fields. - delete(raw, "id") - delete(raw, "parent") - delete(raw, "parent_id") - delete(raw, "layer_id") - delete(raw, "throwaway") - delete(raw, "Size") - // Add the history and rootfs information. - rootfs, err := json.Marshal(rootFS) - if err != nil { - return nil, errors.Errorf("error encoding rootfs information %#v: %v", rootFS, err) - } - rawRootfs := json.RawMessage(rootfs) - raw["rootfs"] = &rawRootfs - history, err := json.Marshal(convertedHistory) - if err != nil { - return nil, errors.Errorf("error encoding history information %#v: %v", convertedHistory, err) - } - rawHistory := json.RawMessage(history) - raw["history"] = &rawHistory - // Encode the result. - config, err = json.Marshal(raw) - if err != nil { - return nil, errors.Errorf("error re-encoding compat image config %#v: %v", s2, err) - } - return config, nil -} - -// ImageID computes an ID which can uniquely identify this image by its contents. -func (m *Schema1) ImageID(diffIDs []digest.Digest) (string, error) { - image, err := m.ToSchema2(diffIDs) - if err != nil { - return "", err - } - return digest.FromBytes(image).Hex(), nil -} diff --git a/vendor/github.com/containers/image/manifest/docker_schema2.go b/vendor/github.com/containers/image/manifest/docker_schema2.go deleted file mode 100644 index ef82ffc246..0000000000 --- a/vendor/github.com/containers/image/manifest/docker_schema2.go +++ /dev/null @@ -1,251 +0,0 @@ -package manifest - -import ( - "encoding/json" - "time" - - "github.com/containers/image/pkg/strslice" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -// Schema2Descriptor is a “descriptor” in docker/distribution schema 2. -type Schema2Descriptor struct { - MediaType string `json:"mediaType"` - Size int64 `json:"size"` - Digest digest.Digest `json:"digest"` - URLs []string `json:"urls,omitempty"` -} - -// Schema2 is a manifest in docker/distribution schema 2. -type Schema2 struct { - SchemaVersion int `json:"schemaVersion"` - MediaType string `json:"mediaType"` - ConfigDescriptor Schema2Descriptor `json:"config"` - LayersDescriptors []Schema2Descriptor `json:"layers"` -} - -// Schema2Port is a Port, a string containing port number and protocol in the -// format "80/tcp", from docker/go-connections/nat. -type Schema2Port string - -// Schema2PortSet is a PortSet, a collection of structs indexed by Port, from -// docker/go-connections/nat. -type Schema2PortSet map[Schema2Port]struct{} - -// Schema2HealthConfig is a HealthConfig, which holds configuration settings -// for the HEALTHCHECK feature, from docker/docker/api/types/container. -type Schema2HealthConfig struct { - // Test is the test to perform to check that the container is healthy. - // An empty slice means to inherit the default. - // The options are: - // {} : inherit healthcheck - // {"NONE"} : disable healthcheck - // {"CMD", args...} : exec arguments directly - // {"CMD-SHELL", command} : run command with system's default shell - Test []string `json:",omitempty"` - - // Zero means to inherit. Durations are expressed as integer nanoseconds. - Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. - - // Retries is the number of consecutive failures needed to consider a container as unhealthy. - // Zero means inherit. - Retries int `json:",omitempty"` -} - -// Schema2Config is a Config in docker/docker/api/types/container. -type Schema2Config struct { - Hostname string // Hostname - Domainname string // Domainname - User string // User that will run the command(s) inside the container, also support user:group - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStdout bool // Attach the standard output - AttachStderr bool // Attach the standard error - ExposedPorts Schema2PortSet `json:",omitempty"` // List of exposed ports - Tty bool // Attach standard streams to a tty, including stdin if it is not closed. - OpenStdin bool // Open stdin - StdinOnce bool // If true, close stdin after the 1 attached client disconnects. - Env []string // List of environment variable to set in the container - Cmd strslice.StrSlice // Command to run when starting the container - Healthcheck *Schema2HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy - ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) - Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) - Volumes map[string]struct{} // List of volumes (mounts) used for the container - WorkingDir string // Current directory (PWD) in the command will be launched - Entrypoint strslice.StrSlice // Entrypoint to run when starting the container - NetworkDisabled bool `json:",omitempty"` // Is network disabled - MacAddress string `json:",omitempty"` // Mac Address of the container - OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile - Labels map[string]string // List of labels set to this container - StopSignal string `json:",omitempty"` // Signal to stop a container - StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container - Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT -} - -// Schema2V1Image is a V1Image in docker/docker/image. -type Schema2V1Image struct { - // ID is a unique 64 character identifier of the image - ID string `json:"id,omitempty"` - // Parent is the ID of the parent image - Parent string `json:"parent,omitempty"` - // Comment is the commit message that was set when committing the image - Comment string `json:"comment,omitempty"` - // Created is the timestamp at which the image was created - Created time.Time `json:"created"` - // Container is the id of the container used to commit - Container string `json:"container,omitempty"` - // ContainerConfig is the configuration of the container that is committed into the image - ContainerConfig Schema2Config `json:"container_config,omitempty"` - // DockerVersion specifies the version of Docker that was used to build the image - DockerVersion string `json:"docker_version,omitempty"` - // Author is the name of the author that was specified when committing the image - Author string `json:"author,omitempty"` - // Config is the configuration of the container received from the client - Config *Schema2Config `json:"config,omitempty"` - // Architecture is the hardware that the image is build and runs on - Architecture string `json:"architecture,omitempty"` - // OS is the operating system used to build and run the image - OS string `json:"os,omitempty"` - // Size is the total size of the image including all layers it is composed of - Size int64 `json:",omitempty"` -} - -// Schema2RootFS is a description of how to build up an image's root filesystem, from docker/docker/image. -type Schema2RootFS struct { - Type string `json:"type"` - DiffIDs []digest.Digest `json:"diff_ids,omitempty"` -} - -// Schema2History stores build commands that were used to create an image, from docker/docker/image. -type Schema2History struct { - // Created is the timestamp at which the image was created - Created time.Time `json:"created"` - // Author is the name of the author that was specified when committing the image - Author string `json:"author,omitempty"` - // CreatedBy keeps the Dockerfile command used while building the image - CreatedBy string `json:"created_by,omitempty"` - // Comment is the commit message that was set when committing the image - Comment string `json:"comment,omitempty"` - // EmptyLayer is set to true if this history item did not generate a - // layer. Otherwise, the history item is associated with the next - // layer in the RootFS section. - EmptyLayer bool `json:"empty_layer,omitempty"` -} - -// Schema2Image is an Image in docker/docker/image. -type Schema2Image struct { - Schema2V1Image - Parent digest.Digest `json:"parent,omitempty"` - RootFS *Schema2RootFS `json:"rootfs,omitempty"` - History []Schema2History `json:"history,omitempty"` - OSVersion string `json:"os.version,omitempty"` - OSFeatures []string `json:"os.features,omitempty"` - - // rawJSON caches the immutable JSON associated with this image. - rawJSON []byte - - // computedID is the ID computed from the hash of the image config. - // Not to be confused with the legacy V1 ID in V1Image. - computedID digest.Digest -} - -// Schema2FromManifest creates a Schema2 manifest instance from a manifest blob. -func Schema2FromManifest(manifest []byte) (*Schema2, error) { - s2 := Schema2{} - if err := json.Unmarshal(manifest, &s2); err != nil { - return nil, err - } - return &s2, nil -} - -// Schema2FromComponents creates an Schema2 manifest instance from the supplied data. -func Schema2FromComponents(config Schema2Descriptor, layers []Schema2Descriptor) *Schema2 { - return &Schema2{ - SchemaVersion: 2, - MediaType: DockerV2Schema2MediaType, - ConfigDescriptor: config, - LayersDescriptors: layers, - } -} - -// Schema2Clone creates a copy of the supplied Schema2 manifest. -func Schema2Clone(src *Schema2) *Schema2 { - copy := *src - return © -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -func (m *Schema2) ConfigInfo() types.BlobInfo { - return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size} -} - -// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *Schema2) LayerInfos() []types.BlobInfo { - blobs := []types.BlobInfo{} - for _, layer := range m.LayersDescriptors { - blobs = append(blobs, types.BlobInfo{ - Digest: layer.Digest, - Size: layer.Size, - URLs: layer.URLs, - }) - } - return blobs -} - -// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) -func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error { - if len(m.LayersDescriptors) != len(layerInfos) { - return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos)) - } - original := m.LayersDescriptors - m.LayersDescriptors = make([]Schema2Descriptor, len(layerInfos)) - for i, info := range layerInfos { - m.LayersDescriptors[i].MediaType = original[i].MediaType - m.LayersDescriptors[i].Digest = info.Digest - m.LayersDescriptors[i].Size = info.Size - m.LayersDescriptors[i].URLs = info.URLs - } - return nil -} - -// Serialize returns the manifest in a blob format. -// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! -func (m *Schema2) Serialize() ([]byte, error) { - return json.Marshal(*m) -} - -// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. -func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { - config, err := configGetter(m.ConfigInfo()) - if err != nil { - return nil, err - } - s2 := &Schema2Image{} - if err := json.Unmarshal(config, s2); err != nil { - return nil, err - } - i := &types.ImageInspectInfo{ - Tag: "", - Created: s2.Created, - DockerVersion: s2.DockerVersion, - Architecture: s2.Architecture, - Os: s2.OS, - Layers: LayerInfosToStrings(m.LayerInfos()), - } - if s2.Config != nil { - i.Labels = s2.Config.Labels - } - return i, nil -} - -// ImageID computes an ID which can uniquely identify this image by its contents. -func (m *Schema2) ImageID([]digest.Digest) (string, error) { - if err := m.ConfigDescriptor.Digest.Validate(); err != nil { - return "", err - } - return m.ConfigDescriptor.Digest.Hex(), nil -} diff --git a/vendor/github.com/containers/image/manifest/manifest.go b/vendor/github.com/containers/image/manifest/manifest.go deleted file mode 100644 index 2bc801d815..0000000000 --- a/vendor/github.com/containers/image/manifest/manifest.go +++ /dev/null @@ -1,238 +0,0 @@ -package manifest - -import ( - "encoding/json" - "fmt" - - "github.com/containers/image/types" - "github.com/docker/libtrust" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -// FIXME: Should we just use docker/distribution and docker/docker implementations directly? - -// FIXME(runcom, mitr): should we havea mediatype pkg?? -const ( - // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 - DockerV2Schema1MediaType = "application/vnd.docker.distribution.manifest.v1+json" - // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature - DockerV2Schema1SignedMediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws" - // DockerV2Schema2MediaType MIME type represents Docker manifest schema 2 - DockerV2Schema2MediaType = "application/vnd.docker.distribution.manifest.v2+json" - // DockerV2Schema2ConfigMediaType is the MIME type used for schema 2 config blobs. - DockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json" - // DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers. - DockerV2Schema2LayerMediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip" - // DockerV2ListMediaType MIME type represents Docker manifest schema 2 list - DockerV2ListMediaType = "application/vnd.docker.distribution.manifest.list.v2+json" - // DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers. - DockerV2Schema2ForeignLayerMediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" -) - -// DefaultRequestedManifestMIMETypes is a list of MIME types a types.ImageSource -// should request from the backend unless directed otherwise. -var DefaultRequestedManifestMIMETypes = []string{ - imgspecv1.MediaTypeImageManifest, - DockerV2Schema2MediaType, - DockerV2Schema1SignedMediaType, - DockerV2Schema1MediaType, - DockerV2ListMediaType, -} - -// Manifest is an interface for parsing, modifying image manifests in isolation. -// Callers can either use this abstract interface without understanding the details of the formats, -// or instantiate a specific implementation (e.g. manifest.OCI1) and access the public members -// directly. -// -// See types.Image for functionality not limited to manifests, including format conversions and config parsing. -// This interface is similar to, but not strictly equivalent to, the equivalent methods in types.Image. -type Manifest interface { - // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. - ConfigInfo() types.BlobInfo - // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). - // The Digest field is guaranteed to be provided; Size may be -1. - // WARNING: The list may contain duplicates, and they are semantically relevant. - LayerInfos() []types.BlobInfo - // UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) - UpdateLayerInfos(layerInfos []types.BlobInfo) error - - // ImageID computes an ID which can uniquely identify this image by its contents, irrespective - // of which (of possibly more than one simultaneously valid) reference was used to locate the - // image, and unchanged by whether or how the layers are compressed. The result takes the form - // of the hexadecimal portion of a digest.Digest. - ImageID(diffIDs []digest.Digest) (string, error) - - // Inspect returns various information for (skopeo inspect) parsed from the manifest, - // incorporating information from a configuration blob returned by configGetter, if - // the underlying image format is expected to include a configuration blob. - Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) - - // Serialize returns the manifest in a blob format. - // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! - Serialize() ([]byte, error) -} - -// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized. -// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest, -// but we may not have such metadata available (e.g. when the manifest is a local file). -func GuessMIMEType(manifest []byte) string { - // A subset of manifest fields; the rest is silently ignored by json.Unmarshal. - // Also docker/distribution/manifest.Versioned. - meta := struct { - MediaType string `json:"mediaType"` - SchemaVersion int `json:"schemaVersion"` - Signatures interface{} `json:"signatures"` - }{} - if err := json.Unmarshal(manifest, &meta); err != nil { - return "" - } - - switch meta.MediaType { - case DockerV2Schema2MediaType, DockerV2ListMediaType: // A recognized type. - return meta.MediaType - } - // this is the only way the function can return DockerV2Schema1MediaType, and recognizing that is essential for stripping the JWS signatures = computing the correct manifest digest. - switch meta.SchemaVersion { - case 1: - if meta.Signatures != nil { - return DockerV2Schema1SignedMediaType - } - return DockerV2Schema1MediaType - case 2: - // best effort to understand if this is an OCI image since mediaType - // isn't in the manifest for OCI anymore - // for docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess. - ociMan := struct { - Config struct { - MediaType string `json:"mediaType"` - } `json:"config"` - Layers []imgspecv1.Descriptor `json:"layers"` - }{} - if err := json.Unmarshal(manifest, &ociMan); err != nil { - return "" - } - if ociMan.Config.MediaType == imgspecv1.MediaTypeImageConfig && len(ociMan.Layers) != 0 { - return imgspecv1.MediaTypeImageManifest - } - ociIndex := struct { - Manifests []imgspecv1.Descriptor `json:"manifests"` - }{} - if err := json.Unmarshal(manifest, &ociIndex); err != nil { - return "" - } - if len(ociIndex.Manifests) != 0 && ociIndex.Manifests[0].MediaType == imgspecv1.MediaTypeImageManifest { - return imgspecv1.MediaTypeImageIndex - } - return DockerV2Schema2MediaType - } - return "" -} - -// Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures. -func Digest(manifest []byte) (digest.Digest, error) { - if GuessMIMEType(manifest) == DockerV2Schema1SignedMediaType { - sig, err := libtrust.ParsePrettySignature(manifest, "signatures") - if err != nil { - return "", err - } - manifest, err = sig.Payload() - if err != nil { - // Coverage: This should never happen, libtrust's Payload() can fail only if joseBase64UrlDecode() fails, on a string - // that libtrust itself has josebase64UrlEncode()d - return "", err - } - } - - return digest.FromBytes(manifest), nil -} - -// MatchesDigest returns true iff the manifest matches expectedDigest. -// Error may be set if this returns false. -// Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified, -// or we are not using a cryptographic channel and the attacker can modify the digest along with the manifest blob. -func MatchesDigest(manifest []byte, expectedDigest digest.Digest) (bool, error) { - // This should eventually support various digest types. - actualDigest, err := Digest(manifest) - if err != nil { - return false, err - } - return expectedDigest == actualDigest, nil -} - -// AddDummyV2S1Signature adds an JWS signature with a temporary key (i.e. useless) to a v2s1 manifest. -// This is useful to make the manifest acceptable to a Docker Registry (even though nothing needs or wants the JWS signature). -func AddDummyV2S1Signature(manifest []byte) ([]byte, error) { - key, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - return nil, err // Coverage: This can fail only if rand.Reader fails. - } - - js, err := libtrust.NewJSONSignature(manifest) - if err != nil { - return nil, err - } - if err := js.Sign(key); err != nil { // Coverage: This can fail basically only if rand.Reader fails. - return nil, err - } - return js.PrettySignature("signatures") -} - -// MIMETypeIsMultiImage returns true if mimeType is a list of images -func MIMETypeIsMultiImage(mimeType string) bool { - return mimeType == DockerV2ListMediaType -} - -// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server, -// centralizing various workarounds. -func NormalizedMIMEType(input string) string { - switch input { - // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md . - // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might - // need to happen within the ImageSource. - case "application/json": - return DockerV2Schema1SignedMediaType - case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, - imgspecv1.MediaTypeImageManifest, - DockerV2Schema2MediaType, - DockerV2ListMediaType: - return input - default: - // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time - // to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108 - // and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50 - // - // Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag. - // This makes no real sense, but it happens - // because requests for manifests are - // redirected to a content distribution - // network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442 - return DockerV2Schema1SignedMediaType - } -} - -// FromBlob returns a Manifest instance for the specified manifest blob and the corresponding MIME type -func FromBlob(manblob []byte, mt string) (Manifest, error) { - switch NormalizedMIMEType(mt) { - case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType: - return Schema1FromManifest(manblob) - case imgspecv1.MediaTypeImageManifest: - return OCI1FromManifest(manblob) - case DockerV2Schema2MediaType: - return Schema2FromManifest(manblob) - case DockerV2ListMediaType: - return nil, fmt.Errorf("Treating manifest lists as individual manifests is not implemented") - default: // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. - return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt) - } -} - -// LayerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos() -// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure. -func LayerInfosToStrings(infos []types.BlobInfo) []string { - layers := make([]string, len(infos)) - for i, info := range infos { - layers[i] = info.Digest.String() - } - return layers -} diff --git a/vendor/github.com/containers/image/manifest/oci.go b/vendor/github.com/containers/image/manifest/oci.go deleted file mode 100644 index 0ffb35b746..0000000000 --- a/vendor/github.com/containers/image/manifest/oci.go +++ /dev/null @@ -1,120 +0,0 @@ -package manifest - -import ( - "encoding/json" - "time" - - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - "github.com/opencontainers/image-spec/specs-go" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -// OCI1 is a manifest.Manifest implementation for OCI images. -// The underlying data from imgspecv1.Manifest is also available. -type OCI1 struct { - imgspecv1.Manifest -} - -// OCI1FromManifest creates an OCI1 manifest instance from a manifest blob. -func OCI1FromManifest(manifest []byte) (*OCI1, error) { - oci1 := OCI1{} - if err := json.Unmarshal(manifest, &oci1); err != nil { - return nil, err - } - return &oci1, nil -} - -// OCI1FromComponents creates an OCI1 manifest instance from the supplied data. -func OCI1FromComponents(config imgspecv1.Descriptor, layers []imgspecv1.Descriptor) *OCI1 { - return &OCI1{ - imgspecv1.Manifest{ - Versioned: specs.Versioned{SchemaVersion: 2}, - Config: config, - Layers: layers, - }, - } -} - -// OCI1Clone creates a copy of the supplied OCI1 manifest. -func OCI1Clone(src *OCI1) *OCI1 { - return &OCI1{ - Manifest: src.Manifest, - } -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -func (m *OCI1) ConfigInfo() types.BlobInfo { - return types.BlobInfo{Digest: m.Config.Digest, Size: m.Config.Size, Annotations: m.Config.Annotations} -} - -// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *OCI1) LayerInfos() []types.BlobInfo { - blobs := []types.BlobInfo{} - for _, layer := range m.Layers { - blobs = append(blobs, types.BlobInfo{Digest: layer.Digest, Size: layer.Size, Annotations: layer.Annotations, URLs: layer.URLs, MediaType: layer.MediaType}) - } - return blobs -} - -// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) -func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { - if len(m.Layers) != len(layerInfos) { - return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos)) - } - original := m.Layers - m.Layers = make([]imgspecv1.Descriptor, len(layerInfos)) - for i, info := range layerInfos { - m.Layers[i].MediaType = original[i].MediaType - m.Layers[i].Digest = info.Digest - m.Layers[i].Size = info.Size - m.Layers[i].Annotations = info.Annotations - m.Layers[i].URLs = info.URLs - } - return nil -} - -// Serialize returns the manifest in a blob format. -// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! -func (m *OCI1) Serialize() ([]byte, error) { - return json.Marshal(*m) -} - -// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. -func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { - config, err := configGetter(m.ConfigInfo()) - if err != nil { - return nil, err - } - v1 := &imgspecv1.Image{} - if err := json.Unmarshal(config, v1); err != nil { - return nil, err - } - d1 := &Schema2V1Image{} - json.Unmarshal(config, d1) - created := time.Time{} - if v1.Created != nil { - created = *v1.Created - } - i := &types.ImageInspectInfo{ - Tag: "", - Created: created, - DockerVersion: d1.DockerVersion, - Labels: v1.Config.Labels, - Architecture: v1.Architecture, - Os: v1.OS, - Layers: LayerInfosToStrings(m.LayerInfos()), - } - return i, nil -} - -// ImageID computes an ID which can uniquely identify this image by its contents. -func (m *OCI1) ImageID([]digest.Digest) (string, error) { - if err := m.Config.Digest.Validate(); err != nil { - return "", err - } - return m.Config.Digest.Hex(), nil -} diff --git a/vendor/github.com/containers/image/oci/archive/oci_dest.go b/vendor/github.com/containers/image/oci/archive/oci_dest.go deleted file mode 100644 index 52e99a43dc..0000000000 --- a/vendor/github.com/containers/image/oci/archive/oci_dest.go +++ /dev/null @@ -1,131 +0,0 @@ -package archive - -import ( - "io" - "os" - - "github.com/containers/image/types" - "github.com/containers/storage/pkg/archive" - "github.com/pkg/errors" -) - -type ociArchiveImageDestination struct { - ref ociArchiveReference - unpackedDest types.ImageDestination - tempDirRef tempDirOCIRef -} - -// newImageDestination returns an ImageDestination for writing to an existing directory. -func newImageDestination(ctx *types.SystemContext, ref ociArchiveReference) (types.ImageDestination, error) { - tempDirRef, err := createOCIRef(ref.image) - if err != nil { - return nil, errors.Wrapf(err, "error creating oci reference") - } - unpackedDest, err := tempDirRef.ociRefExtracted.NewImageDestination(ctx) - if err != nil { - if err := tempDirRef.deleteTempDir(); err != nil { - return nil, errors.Wrapf(err, "error deleting temp directory", tempDirRef.tempDirectory) - } - return nil, err - } - return &ociArchiveImageDestination{ref: ref, - unpackedDest: unpackedDest, - tempDirRef: tempDirRef}, nil -} - -// Reference returns the reference used to set up this destination. -func (d *ociArchiveImageDestination) Reference() types.ImageReference { - return d.ref -} - -// Close removes resources associated with an initialized ImageDestination, if any -// Close deletes the temp directory of the oci-archive image -func (d *ociArchiveImageDestination) Close() error { - defer d.tempDirRef.deleteTempDir() - return d.unpackedDest.Close() -} - -func (d *ociArchiveImageDestination) SupportedManifestMIMETypes() []string { - return d.unpackedDest.SupportedManifestMIMETypes() -} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures -func (d *ociArchiveImageDestination) SupportsSignatures() error { - return d.unpackedDest.SupportsSignatures() -} - -// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination -func (d *ociArchiveImageDestination) ShouldCompressLayers() bool { - return d.unpackedDest.ShouldCompressLayers() -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually -// uploaded to the image destination, true otherwise. -func (d *ociArchiveImageDestination) AcceptsForeignLayerURLs() bool { - return d.unpackedDest.AcceptsForeignLayerURLs() -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise -func (d *ociArchiveImageDestination) MustMatchRuntimeOS() bool { - return d.unpackedDest.MustMatchRuntimeOS() -} - -// PutBlob writes contents of stream and returns data representing the result (with all data filled in). -// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. -// inputInfo.Size is the expected length of stream, if known. -func (d *ociArchiveImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) { - return d.unpackedDest.PutBlob(stream, inputInfo) -} - -// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob -func (d *ociArchiveImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) { - return d.unpackedDest.HasBlob(info) -} - -func (d *ociArchiveImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) { - return d.unpackedDest.ReapplyBlob(info) -} - -// PutManifest writes manifest to the destination -func (d *ociArchiveImageDestination) PutManifest(m []byte) error { - return d.unpackedDest.PutManifest(m) -} - -func (d *ociArchiveImageDestination) PutSignatures(signatures [][]byte) error { - return d.unpackedDest.PutSignatures(signatures) -} - -// Commit marks the process of storing the image as successful and asks for the image to be persisted -// after the directory is made, it is tarred up into a file and the directory is deleted -func (d *ociArchiveImageDestination) Commit() error { - if err := d.unpackedDest.Commit(); err != nil { - return errors.Wrapf(err, "error storing image %q", d.ref.image) - } - - // path of directory to tar up - src := d.tempDirRef.tempDirectory - // path to save tarred up file - dst := d.ref.resolvedFile - return tarDirectory(src, dst) -} - -// tar converts the directory at src and saves it to dst -func tarDirectory(src, dst string) error { - // input is a stream of bytes from the archive of the directory at path - input, err := archive.Tar(src, archive.Uncompressed) - if err != nil { - return errors.Wrapf(err, "error retrieving stream of bytes from %q", src) - } - - // creates the tar file - outFile, err := os.Create(dst) - if err != nil { - return errors.Wrapf(err, "error creating tar file %q", dst) - } - defer outFile.Close() - - // copies the contents of the directory to the tar file - _, err = io.Copy(outFile, input) - - return err -} diff --git a/vendor/github.com/containers/image/oci/archive/oci_src.go b/vendor/github.com/containers/image/oci/archive/oci_src.go deleted file mode 100644 index aee5d8d5bf..0000000000 --- a/vendor/github.com/containers/image/oci/archive/oci_src.go +++ /dev/null @@ -1,95 +0,0 @@ -package archive - -import ( - "context" - "io" - - ocilayout "github.com/containers/image/oci/layout" - "github.com/containers/image/types" - digest "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -type ociArchiveImageSource struct { - ref ociArchiveReference - unpackedSrc types.ImageSource - tempDirRef tempDirOCIRef -} - -// newImageSource returns an ImageSource for reading from an existing directory. -// newImageSource untars the file and saves it in a temp directory -func newImageSource(ctx *types.SystemContext, ref ociArchiveReference) (types.ImageSource, error) { - tempDirRef, err := createUntarTempDir(ref) - if err != nil { - return nil, errors.Wrap(err, "error creating temp directory") - } - - unpackedSrc, err := tempDirRef.ociRefExtracted.NewImageSource(ctx) - if err != nil { - if err := tempDirRef.deleteTempDir(); err != nil { - return nil, errors.Wrapf(err, "error deleting temp directory", tempDirRef.tempDirectory) - } - return nil, err - } - return &ociArchiveImageSource{ref: ref, - unpackedSrc: unpackedSrc, - tempDirRef: tempDirRef}, nil -} - -// LoadManifestDescriptor loads the manifest -func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, error) { - ociArchRef, ok := imgRef.(ociArchiveReference) - if !ok { - return imgspecv1.Descriptor{}, errors.Errorf("error typecasting, need type ociArchiveReference") - } - tempDirRef, err := createUntarTempDir(ociArchRef) - if err != nil { - return imgspecv1.Descriptor{}, errors.Wrap(err, "error creating temp directory") - } - defer tempDirRef.deleteTempDir() - - descriptor, err := ocilayout.LoadManifestDescriptor(tempDirRef.ociRefExtracted) - if err != nil { - return imgspecv1.Descriptor{}, errors.Wrap(err, "error loading index") - } - return descriptor, nil -} - -// Reference returns the reference used to set up this source. -func (s *ociArchiveImageSource) Reference() types.ImageReference { - return s.ref -} - -// Close removes resources associated with an initialized ImageSource, if any. -// Close deletes the temporary directory at dst -func (s *ociArchiveImageSource) Close() error { - defer s.tempDirRef.deleteTempDir() - return s.unpackedSrc.Close() -} - -// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). -// It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); -// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). -func (s *ociArchiveImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) { - return s.unpackedSrc.GetManifest(instanceDigest) -} - -// GetBlob returns a stream for the specified blob, and the blob's size. -func (s *ociArchiveImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) { - return s.unpackedSrc.GetBlob(info) -} - -// GetSignatures returns the image's signatures. It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -func (s *ociArchiveImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - return s.unpackedSrc.GetSignatures(ctx, instanceDigest) -} - -// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. -func (s *ociArchiveImageSource) LayerInfosForCopy() []types.BlobInfo { - return nil -} diff --git a/vendor/github.com/containers/image/oci/archive/oci_transport.go b/vendor/github.com/containers/image/oci/archive/oci_transport.go deleted file mode 100644 index c4a4fa716e..0000000000 --- a/vendor/github.com/containers/image/oci/archive/oci_transport.go +++ /dev/null @@ -1,190 +0,0 @@ -package archive - -import ( - "fmt" - "io/ioutil" - "os" - "strings" - - "github.com/containers/image/directory/explicitfilepath" - "github.com/containers/image/docker/reference" - "github.com/containers/image/image" - "github.com/containers/image/internal/tmpdir" - "github.com/containers/image/oci/internal" - ocilayout "github.com/containers/image/oci/layout" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/containers/storage/pkg/archive" - "github.com/pkg/errors" -) - -func init() { - transports.Register(Transport) -} - -// Transport is an ImageTransport for OCI archive -// it creates an oci-archive tar file by calling into the OCI transport -// tarring the directory created by oci and deleting the directory -var Transport = ociArchiveTransport{} - -type ociArchiveTransport struct{} - -// ociArchiveReference is an ImageReference for OCI Archive paths -type ociArchiveReference struct { - file string - resolvedFile string - image string -} - -func (t ociArchiveTransport) Name() string { - return "oci-archive" -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix -// into an ImageReference. -func (t ociArchiveTransport) ParseReference(reference string) (types.ImageReference, error) { - return ParseReference(reference) -} - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -func (t ociArchiveTransport) ValidatePolicyConfigurationScope(scope string) error { - return internal.ValidateScope(scope) -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference. -func ParseReference(reference string) (types.ImageReference, error) { - file, image := internal.SplitPathAndImage(reference) - return NewReference(file, image) -} - -// NewReference returns an OCI reference for a file and a image. -func NewReference(file, image string) (types.ImageReference, error) { - resolved, err := explicitfilepath.ResolvePathToFullyExplicit(file) - if err != nil { - return nil, err - } - - if err := internal.ValidateOCIPath(file); err != nil { - return nil, err - } - - if err := internal.ValidateImageName(image); err != nil { - return nil, err - } - - return ociArchiveReference{file: file, resolvedFile: resolved, image: image}, nil -} - -func (ref ociArchiveReference) Transport() types.ImageTransport { - return Transport -} - -// StringWithinTransport returns a string representation of the reference, which MUST be such that -// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. -func (ref ociArchiveReference) StringWithinTransport() string { - return fmt.Sprintf("%s:%s", ref.file, ref.image) -} - -// DockerReference returns a Docker reference associated with this reference -func (ref ociArchiveReference) DockerReference() reference.Named { - return nil -} - -// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. -func (ref ociArchiveReference) PolicyConfigurationIdentity() string { - // NOTE: ref.image is not a part of the image identity, because "$dir:$someimage" and "$dir:" may mean the - // same image and the two can’t be statically disambiguated. Using at least the repository directory is - // less granular but hopefully still useful. - return fmt.Sprintf("%s", ref.resolvedFile) -} - -// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search -// for if explicit configuration for PolicyConfigurationIdentity() is not set -func (ref ociArchiveReference) PolicyConfigurationNamespaces() []string { - res := []string{} - path := ref.resolvedFile - for { - lastSlash := strings.LastIndex(path, "/") - // Note that we do not include "/"; it is redundant with the default "" global default, - // and rejected by ociTransport.ValidatePolicyConfigurationScope above. - if lastSlash == -1 || path == "/" { - break - } - res = append(res, path) - path = path[:lastSlash] - } - return res -} - -// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned ImageCloser. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. -func (ref ociArchiveReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { - src, err := newImageSource(ctx, ref) - if err != nil { - return nil, err - } - return image.FromSource(ctx, src) -} - -// NewImageSource returns a types.ImageSource for this reference. -// The caller must call .Close() on the returned ImageSource. -func (ref ociArchiveReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) { - return newImageSource(ctx, ref) -} - -// NewImageDestination returns a types.ImageDestination for this reference. -// The caller must call .Close() on the returned ImageDestination. -func (ref ociArchiveReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(ctx, ref) -} - -// DeleteImage deletes the named image from the registry, if supported. -func (ref ociArchiveReference) DeleteImage(ctx *types.SystemContext) error { - return errors.Errorf("Deleting images not implemented for oci: images") -} - -// struct to store the ociReference and temporary directory returned by createOCIRef -type tempDirOCIRef struct { - tempDirectory string - ociRefExtracted types.ImageReference -} - -// deletes the temporary directory created -func (t *tempDirOCIRef) deleteTempDir() error { - return os.RemoveAll(t.tempDirectory) -} - -// createOCIRef creates the oci reference of the image -func createOCIRef(image string) (tempDirOCIRef, error) { - dir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(), "oci") - if err != nil { - return tempDirOCIRef{}, errors.Wrapf(err, "error creating temp directory") - } - ociRef, err := ocilayout.NewReference(dir, image) - if err != nil { - return tempDirOCIRef{}, err - } - - tempDirRef := tempDirOCIRef{tempDirectory: dir, ociRefExtracted: ociRef} - return tempDirRef, nil -} - -// creates the temporary directory and copies the tarred content to it -func createUntarTempDir(ref ociArchiveReference) (tempDirOCIRef, error) { - tempDirRef, err := createOCIRef(ref.image) - if err != nil { - return tempDirOCIRef{}, errors.Wrap(err, "error creating oci reference") - } - src := ref.resolvedFile - dst := tempDirRef.tempDirectory - if err := archive.UntarPath(src, dst); err != nil { - if err := tempDirRef.deleteTempDir(); err != nil { - return tempDirOCIRef{}, errors.Wrapf(err, "error deleting temp directory %q", tempDirRef.tempDirectory) - } - return tempDirOCIRef{}, errors.Wrapf(err, "error untarring file %q", tempDirRef.tempDirectory) - } - return tempDirRef, nil -} diff --git a/vendor/github.com/containers/image/oci/internal/oci_util.go b/vendor/github.com/containers/image/oci/internal/oci_util.go deleted file mode 100644 index c2012e50e0..0000000000 --- a/vendor/github.com/containers/image/oci/internal/oci_util.go +++ /dev/null @@ -1,126 +0,0 @@ -package internal - -import ( - "github.com/pkg/errors" - "path/filepath" - "regexp" - "runtime" - "strings" -) - -// annotation spex from https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys -const ( - separator = `(?:[-._:@+]|--)` - alphanum = `(?:[A-Za-z0-9]+)` - component = `(?:` + alphanum + `(?:` + separator + alphanum + `)*)` -) - -var refRegexp = regexp.MustCompile(`^` + component + `(?:/` + component + `)*$`) -var windowsRefRegexp = regexp.MustCompile(`^([a-zA-Z]:\\.+?):(.*)$`) - -// ValidateImageName returns nil if the image name is empty or matches the open-containers image name specs. -// In any other case an error is returned. -func ValidateImageName(image string) error { - if len(image) == 0 { - return nil - } - - var err error - if !refRegexp.MatchString(image) { - err = errors.Errorf("Invalid image %s", image) - } - return err -} - -// SplitPathAndImage tries to split the provided OCI reference into the OCI path and image. -// Neither path nor image parts are validated at this stage. -func SplitPathAndImage(reference string) (string, string) { - if runtime.GOOS == "windows" { - return splitPathAndImageWindows(reference) - } - return splitPathAndImageNonWindows(reference) -} - -func splitPathAndImageWindows(reference string) (string, string) { - groups := windowsRefRegexp.FindStringSubmatch(reference) - // nil group means no match - if groups == nil { - return reference, "" - } - - // we expect three elements. First one full match, second the capture group for the path and - // the third the capture group for the image - if len(groups) != 3 { - return reference, "" - } - return groups[1], groups[2] -} - -func splitPathAndImageNonWindows(reference string) (string, string) { - sep := strings.SplitN(reference, ":", 2) - path := sep[0] - - var image string - if len(sep) == 2 { - image = sep[1] - } - return path, image -} - -// ValidateOCIPath takes the OCI path and validates it. -func ValidateOCIPath(path string) error { - if runtime.GOOS == "windows" { - // On Windows we must allow for a ':' as part of the path - if strings.Count(path, ":") > 1 { - return errors.Errorf("Invalid OCI reference: path %s contains more than one colon", path) - } - } else { - if strings.Contains(path, ":") { - return errors.Errorf("Invalid OCI reference: path %s contains a colon", path) - } - } - return nil -} - -// ValidateScope validates a policy configuration scope for an OCI transport. -func ValidateScope(scope string) error { - var err error - if runtime.GOOS == "windows" { - err = validateScopeWindows(scope) - } else { - err = validateScopeNonWindows(scope) - } - if err != nil { - return err - } - - cleaned := filepath.Clean(scope) - if cleaned != scope { - return errors.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) - } - - return nil -} - -func validateScopeWindows(scope string) error { - matched, _ := regexp.Match(`^[a-zA-Z]:\\`, []byte(scope)) - if !matched { - return errors.Errorf("Invalid scope '%s'. Must be an absolute path", scope) - } - - return nil -} - -func validateScopeNonWindows(scope string) error { - if !strings.HasPrefix(scope, "/") { - return errors.Errorf("Invalid scope %s: must be an absolute path", scope) - } - - // Refuse also "/", otherwise "/" and "" would have the same semantics, - // and "" could be unexpectedly shadowed by the "/" entry. - if scope == "/" { - return errors.New(`Invalid scope "/": Use the generic default scope ""`) - } - - return nil -} diff --git a/vendor/github.com/containers/image/oci/layout/oci_dest.go b/vendor/github.com/containers/image/oci/layout/oci_dest.go deleted file mode 100644 index e95f65167a..0000000000 --- a/vendor/github.com/containers/image/oci/layout/oci_dest.go +++ /dev/null @@ -1,294 +0,0 @@ -package layout - -import ( - "encoding/json" - "io" - "io/ioutil" - "os" - "path/filepath" - "runtime" - - "github.com/pkg/errors" - - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - imgspec "github.com/opencontainers/image-spec/specs-go" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -type ociImageDestination struct { - ref ociReference - index imgspecv1.Index - sharedBlobDir string -} - -// newImageDestination returns an ImageDestination for writing to an existing directory. -func newImageDestination(ctx *types.SystemContext, ref ociReference) (types.ImageDestination, error) { - if ref.image == "" { - return nil, errors.Errorf("cannot save image with empty image.ref.name") - } - - var index *imgspecv1.Index - if indexExists(ref) { - var err error - index, err = ref.getIndex() - if err != nil { - return nil, err - } - } else { - index = &imgspecv1.Index{ - Versioned: imgspec.Versioned{ - SchemaVersion: 2, - }, - } - } - - d := &ociImageDestination{ref: ref, index: *index} - if ctx != nil { - d.sharedBlobDir = ctx.OCISharedBlobDirPath - } - - if err := ensureDirectoryExists(d.ref.dir); err != nil { - return nil, err - } - // Per the OCI image specification, layouts MUST have a "blobs" subdirectory, - // but it MAY be empty (e.g. if we never end up calling PutBlob) - // https://github.com/opencontainers/image-spec/blame/7c889fafd04a893f5c5f50b7ab9963d5d64e5242/image-layout.md#L19 - if err := ensureDirectoryExists(filepath.Join(d.ref.dir, "blobs")); err != nil { - return nil, err - } - return d, nil -} - -// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, -// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. -func (d *ociImageDestination) Reference() types.ImageReference { - return d.ref -} - -// Close removes resources associated with an initialized ImageDestination, if any. -func (d *ociImageDestination) Close() error { - return nil -} - -func (d *ociImageDestination) SupportedManifestMIMETypes() []string { - return []string{ - imgspecv1.MediaTypeImageManifest, - } -} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. -// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. -func (d *ociImageDestination) SupportsSignatures() error { - return errors.Errorf("Pushing signatures for OCI images is not supported") -} - -// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination. -func (d *ociImageDestination) ShouldCompressLayers() bool { - return true -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually -// uploaded to the image destination, true otherwise. -func (d *ociImageDestination) AcceptsForeignLayerURLs() bool { - return true -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. -func (d *ociImageDestination) MustMatchRuntimeOS() bool { - return false -} - -// PutBlob writes contents of stream and returns data representing the result (with all data filled in). -// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. -// inputInfo.Size is the expected length of stream, if known. -// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available -// to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *ociImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) { - blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob") - if err != nil { - return types.BlobInfo{}, err - } - succeeded := false - explicitClosed := false - defer func() { - if !explicitClosed { - blobFile.Close() - } - if !succeeded { - os.Remove(blobFile.Name()) - } - }() - - digester := digest.Canonical.Digester() - tee := io.TeeReader(stream, digester.Hash()) - - size, err := io.Copy(blobFile, tee) - if err != nil { - return types.BlobInfo{}, err - } - computedDigest := digester.Digest() - if inputInfo.Size != -1 && size != inputInfo.Size { - return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size) - } - if err := blobFile.Sync(); err != nil { - return types.BlobInfo{}, err - } - - // On POSIX systems, blobFile was created with mode 0600, so we need to make it readable. - // On Windows, the “permissions of newly created files” argument to syscall.Open is - // ignored and the file is already readable; besides, blobFile.Chmod, i.e. syscall.Fchmod, - // always fails on Windows. - if runtime.GOOS != "windows" { - if err := blobFile.Chmod(0644); err != nil { - return types.BlobInfo{}, err - } - } - - blobPath, err := d.ref.blobPath(computedDigest, d.sharedBlobDir) - if err != nil { - return types.BlobInfo{}, err - } - if err := ensureParentDirectoryExists(blobPath); err != nil { - return types.BlobInfo{}, err - } - - // need to explicitly close the file, since a rename won't otherwise not work on Windows - blobFile.Close() - explicitClosed = true - if err := os.Rename(blobFile.Name(), blobPath); err != nil { - return types.BlobInfo{}, err - } - succeeded = true - return types.BlobInfo{Digest: computedDigest, Size: size}, nil -} - -// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob. -// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned. -// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); -// it returns a non-nil error only on an unexpected failure. -func (d *ociImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) { - if info.Digest == "" { - return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`) - } - blobPath, err := d.ref.blobPath(info.Digest, d.sharedBlobDir) - if err != nil { - return false, -1, err - } - finfo, err := os.Stat(blobPath) - if err != nil && os.IsNotExist(err) { - return false, -1, nil - } - if err != nil { - return false, -1, err - } - return true, finfo.Size(), nil -} - -func (d *ociImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) { - return info, nil -} - -// PutManifest writes manifest to the destination. -// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. -// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), -// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. -func (d *ociImageDestination) PutManifest(m []byte) error { - digest, err := manifest.Digest(m) - if err != nil { - return err - } - desc := imgspecv1.Descriptor{} - desc.Digest = digest - // TODO(runcom): beaware and add support for OCI manifest list - desc.MediaType = imgspecv1.MediaTypeImageManifest - desc.Size = int64(len(m)) - - blobPath, err := d.ref.blobPath(digest, d.sharedBlobDir) - if err != nil { - return err - } - if err := ensureParentDirectoryExists(blobPath); err != nil { - return err - } - if err := ioutil.WriteFile(blobPath, m, 0644); err != nil { - return err - } - - if d.ref.image == "" { - return errors.Errorf("cannot save image with empyt image.ref.name") - } - - annotations := make(map[string]string) - annotations["org.opencontainers.image.ref.name"] = d.ref.image - desc.Annotations = annotations - desc.Platform = &imgspecv1.Platform{ - Architecture: runtime.GOARCH, - OS: runtime.GOOS, - } - d.addManifest(&desc) - - return nil -} - -func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) { - for i, manifest := range d.index.Manifests { - if manifest.Annotations["org.opencontainers.image.ref.name"] == desc.Annotations["org.opencontainers.image.ref.name"] { - // TODO Should there first be a cleanup based on the descriptor we are going to replace? - d.index.Manifests[i] = *desc - return - } - } - d.index.Manifests = append(d.index.Manifests, *desc) -} - -func (d *ociImageDestination) PutSignatures(signatures [][]byte) error { - if len(signatures) != 0 { - return errors.Errorf("Pushing signatures for OCI images is not supported") - } - return nil -} - -// Commit marks the process of storing the image as successful and asks for the image to be persisted. -// WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before Commit() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *ociImageDestination) Commit() error { - if err := ioutil.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil { - return err - } - indexJSON, err := json.Marshal(d.index) - if err != nil { - return err - } - return ioutil.WriteFile(d.ref.indexPath(), indexJSON, 0644) -} - -func ensureDirectoryExists(path string) error { - if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { - if err := os.MkdirAll(path, 0755); err != nil { - return err - } - } - return nil -} - -// ensureParentDirectoryExists ensures the parent of the supplied path exists. -func ensureParentDirectoryExists(path string) error { - return ensureDirectoryExists(filepath.Dir(path)) -} - -// indexExists checks whether the index location specified in the OCI reference exists. -// The implementation is opinionated, since in case of unexpected errors false is returned -func indexExists(ref ociReference) bool { - _, err := os.Stat(ref.indexPath()) - if err == nil { - return true - } - if os.IsNotExist(err) { - return false - } - return true -} diff --git a/vendor/github.com/containers/image/oci/layout/oci_src.go b/vendor/github.com/containers/image/oci/layout/oci_src.go deleted file mode 100644 index 1109f65c6a..0000000000 --- a/vendor/github.com/containers/image/oci/layout/oci_src.go +++ /dev/null @@ -1,157 +0,0 @@ -package layout - -import ( - "context" - "io" - "io/ioutil" - "net/http" - "os" - "strconv" - - "github.com/containers/image/pkg/tlsclientconfig" - "github.com/containers/image/types" - "github.com/docker/go-connections/tlsconfig" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -type ociImageSource struct { - ref ociReference - descriptor imgspecv1.Descriptor - client *http.Client - sharedBlobDir string -} - -// newImageSource returns an ImageSource for reading from an existing directory. -func newImageSource(ctx *types.SystemContext, ref ociReference) (types.ImageSource, error) { - tr := tlsclientconfig.NewTransport() - tr.TLSClientConfig = tlsconfig.ServerDefault() - - if ctx != nil && ctx.OCICertPath != "" { - if err := tlsclientconfig.SetupCertificates(ctx.OCICertPath, tr.TLSClientConfig); err != nil { - return nil, err - } - tr.TLSClientConfig.InsecureSkipVerify = ctx.OCIInsecureSkipTLSVerify - } - - client := &http.Client{} - client.Transport = tr - descriptor, err := ref.getManifestDescriptor() - if err != nil { - return nil, err - } - d := &ociImageSource{ref: ref, descriptor: descriptor, client: client} - if ctx != nil { - // TODO(jonboulle): check dir existence? - d.sharedBlobDir = ctx.OCISharedBlobDirPath - } - return d, nil -} - -// Reference returns the reference used to set up this source. -func (s *ociImageSource) Reference() types.ImageReference { - return s.ref -} - -// Close removes resources associated with an initialized ImageSource, if any. -func (s *ociImageSource) Close() error { - return nil -} - -// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). -// It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); -// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). -func (s *ociImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) { - var dig digest.Digest - var mimeType string - if instanceDigest == nil { - dig = digest.Digest(s.descriptor.Digest) - mimeType = s.descriptor.MediaType - } else { - dig = *instanceDigest - // XXX: instanceDigest means that we don't immediately have the context of what - // mediaType the manifest has. In OCI this means that we don't know - // what reference it came from, so we just *assume* that its - // MediaTypeImageManifest. - // FIXME: We should actually be able to look up the manifest in the index, - // and see the MIME type there. - mimeType = imgspecv1.MediaTypeImageManifest - } - - manifestPath, err := s.ref.blobPath(dig, s.sharedBlobDir) - if err != nil { - return nil, "", err - } - m, err := ioutil.ReadFile(manifestPath) - if err != nil { - return nil, "", err - } - - return m, mimeType, nil -} - -// GetBlob returns a stream for the specified blob, and the blob's size. -func (s *ociImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) { - if len(info.URLs) != 0 { - return s.getExternalBlob(info.URLs) - } - - path, err := s.ref.blobPath(info.Digest, s.sharedBlobDir) - if err != nil { - return nil, 0, err - } - - r, err := os.Open(path) - if err != nil { - return nil, 0, err - } - fi, err := r.Stat() - if err != nil { - return nil, 0, err - } - return r, fi.Size(), nil -} - -// GetSignatures returns the image's signatures. It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -func (s *ociImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - return [][]byte{}, nil -} - -func (s *ociImageSource) getExternalBlob(urls []string) (io.ReadCloser, int64, error) { - errWrap := errors.New("failed fetching external blob from all urls") - for _, url := range urls { - resp, err := s.client.Get(url) - if err != nil { - errWrap = errors.Wrapf(errWrap, "fetching %s failed %s", url, err.Error()) - continue - } - - if resp.StatusCode != http.StatusOK { - resp.Body.Close() - errWrap = errors.Wrapf(errWrap, "fetching %s failed, response code not 200", url) - continue - } - - return resp.Body, getBlobSize(resp), nil - } - - return nil, 0, errWrap -} - -// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. -func (s *ociImageSource) LayerInfosForCopy() []types.BlobInfo { - return nil -} - -func getBlobSize(resp *http.Response) int64 { - size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) - if err != nil { - size = -1 - } - return size -} diff --git a/vendor/github.com/containers/image/oci/layout/oci_transport.go b/vendor/github.com/containers/image/oci/layout/oci_transport.go deleted file mode 100644 index c181c4c77b..0000000000 --- a/vendor/github.com/containers/image/oci/layout/oci_transport.go +++ /dev/null @@ -1,255 +0,0 @@ -package layout - -import ( - "encoding/json" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/containers/image/directory/explicitfilepath" - "github.com/containers/image/docker/reference" - "github.com/containers/image/image" - "github.com/containers/image/oci/internal" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -func init() { - transports.Register(Transport) -} - -// Transport is an ImageTransport for OCI directories. -var Transport = ociTransport{} - -type ociTransport struct{} - -func (t ociTransport) Name() string { - return "oci" -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. -func (t ociTransport) ParseReference(reference string) (types.ImageReference, error) { - return ParseReference(reference) -} - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). -// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. -// scope passed to this function will not be "", that value is always allowed. -func (t ociTransport) ValidatePolicyConfigurationScope(scope string) error { - return internal.ValidateScope(scope) -} - -// ociReference is an ImageReference for OCI directory paths. -type ociReference struct { - // Note that the interpretation of paths below depends on the underlying filesystem state, which may change under us at any time! - // Either of the paths may point to a different, or no, inode over time. resolvedDir may contain symbolic links, and so on. - - // Generally we follow the intent of the user, and use the "dir" member for filesystem operations (e.g. the user can use a relative path to avoid - // being exposed to symlinks and renames in the parent directories to the working directory). - // (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.) - dir string // As specified by the user. May be relative, contain symlinks, etc. - resolvedDir string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces. - image string // If image=="", it means the only image in the index.json is used -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference. -func ParseReference(reference string) (types.ImageReference, error) { - dir, image := internal.SplitPathAndImage(reference) - return NewReference(dir, image) -} - -// NewReference returns an OCI reference for a directory and a image. -// -// We do not expose an API supplying the resolvedDir; we could, but recomputing it -// is generally cheap enough that we prefer being confident about the properties of resolvedDir. -func NewReference(dir, image string) (types.ImageReference, error) { - resolved, err := explicitfilepath.ResolvePathToFullyExplicit(dir) - if err != nil { - return nil, err - } - - if err := internal.ValidateOCIPath(dir); err != nil { - return nil, err - } - - if err = internal.ValidateImageName(image); err != nil { - return nil, err - } - - return ociReference{dir: dir, resolvedDir: resolved, image: image}, nil -} - -func (ref ociReference) Transport() types.ImageTransport { - return Transport -} - -// StringWithinTransport returns a string representation of the reference, which MUST be such that -// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. -// NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. -// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. -func (ref ociReference) StringWithinTransport() string { - return fmt.Sprintf("%s:%s", ref.dir, ref.image) -} - -// DockerReference returns a Docker reference associated with this reference -// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, -// not e.g. after redirect or alias processing), or nil if unknown/not applicable. -func (ref ociReference) DockerReference() reference.Named { - return nil -} - -// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. -// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; -// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical -// (i.e. various references with exactly the same semantics should return the same configuration identity) -// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but -// not required/guaranteed that it will be a valid input to Transport().ParseReference(). -// Returns "" if configuration identities for these references are not supported. -func (ref ociReference) PolicyConfigurationIdentity() string { - // NOTE: ref.image is not a part of the image identity, because "$dir:$someimage" and "$dir:" may mean the - // same image and the two can’t be statically disambiguated. Using at least the repository directory is - // less granular but hopefully still useful. - return fmt.Sprintf("%s", ref.resolvedDir) -} - -// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search -// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed -// in order, terminating on first match, and an implicit "" is always checked at the end. -// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), -// and each following element to be a prefix of the element preceding it. -func (ref ociReference) PolicyConfigurationNamespaces() []string { - res := []string{} - path := ref.resolvedDir - for { - lastSlash := strings.LastIndex(path, "/") - // Note that we do not include "/"; it is redundant with the default "" global default, - // and rejected by ociTransport.ValidatePolicyConfigurationScope above. - if lastSlash == -1 || path == "/" { - break - } - res = append(res, path) - path = path[:lastSlash] - } - return res -} - -// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned ImageCloser. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. -func (ref ociReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { - src, err := newImageSource(ctx, ref) - if err != nil { - return nil, err - } - return image.FromSource(ctx, src) -} - -// getIndex returns a pointer to the index references by this ociReference. If an error occurs opening an index nil is returned together -// with an error. -func (ref ociReference) getIndex() (*imgspecv1.Index, error) { - indexJSON, err := os.Open(ref.indexPath()) - if err != nil { - return nil, err - } - defer indexJSON.Close() - - index := &imgspecv1.Index{} - if err := json.NewDecoder(indexJSON).Decode(index); err != nil { - return nil, err - } - return index, nil -} - -func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) { - index, err := ref.getIndex() - if err != nil { - return imgspecv1.Descriptor{}, err - } - - var d *imgspecv1.Descriptor - if ref.image == "" { - // return manifest if only one image is in the oci directory - if len(index.Manifests) == 1 { - d = &index.Manifests[0] - } else { - // ask user to choose image when more than one image in the oci directory - return imgspecv1.Descriptor{}, errors.Wrapf(err, "more than one image in oci, choose an image") - } - } else { - // if image specified, look through all manifests for a match - for _, md := range index.Manifests { - if md.MediaType != imgspecv1.MediaTypeImageManifest { - continue - } - refName, ok := md.Annotations["org.opencontainers.image.ref.name"] - if !ok { - continue - } - if refName == ref.image { - d = &md - break - } - } - } - if d == nil { - return imgspecv1.Descriptor{}, fmt.Errorf("no descriptor found for reference %q", ref.image) - } - return *d, nil -} - -// LoadManifestDescriptor loads the manifest descriptor to be used to retrieve the image name -// when pulling an image -func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor, error) { - ociRef, ok := imgRef.(ociReference) - if !ok { - return imgspecv1.Descriptor{}, errors.Errorf("error typecasting, need type ociRef") - } - return ociRef.getManifestDescriptor() -} - -// NewImageSource returns a types.ImageSource for this reference. -// The caller must call .Close() on the returned ImageSource. -func (ref ociReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) { - return newImageSource(ctx, ref) -} - -// NewImageDestination returns a types.ImageDestination for this reference. -// The caller must call .Close() on the returned ImageDestination. -func (ref ociReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(ctx, ref) -} - -// DeleteImage deletes the named image from the registry, if supported. -func (ref ociReference) DeleteImage(ctx *types.SystemContext) error { - return errors.Errorf("Deleting images not implemented for oci: images") -} - -// ociLayoutPath returns a path for the oci-layout within a directory using OCI conventions. -func (ref ociReference) ociLayoutPath() string { - return filepath.Join(ref.dir, "oci-layout") -} - -// indexPath returns a path for the index.json within a directory using OCI conventions. -func (ref ociReference) indexPath() string { - return filepath.Join(ref.dir, "index.json") -} - -// blobPath returns a path for a blob within a directory using OCI image-layout conventions. -func (ref ociReference) blobPath(digest digest.Digest, sharedBlobDir string) (string, error) { - if err := digest.Validate(); err != nil { - return "", errors.Wrapf(err, "unexpected digest reference %s", digest) - } - blobDir := filepath.Join(ref.dir, "blobs") - if sharedBlobDir != "" { - blobDir = sharedBlobDir - } - return filepath.Join(blobDir, digest.Algorithm().String(), digest.Hex()), nil -} diff --git a/vendor/github.com/containers/image/openshift/openshift-copies.go b/vendor/github.com/containers/image/openshift/openshift-copies.go deleted file mode 100644 index 01fe71a243..0000000000 --- a/vendor/github.com/containers/image/openshift/openshift-copies.go +++ /dev/null @@ -1,1174 +0,0 @@ -package openshift - -import ( - "crypto/tls" - "crypto/x509" - "encoding/json" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/url" - "os" - "path" - "path/filepath" - "reflect" - "strings" - "time" - - "github.com/ghodss/yaml" - "github.com/imdario/mergo" - "github.com/pkg/errors" - "golang.org/x/net/http2" - "k8s.io/client-go/util/homedir" -) - -// restTLSClientConfig is a modified copy of k8s.io/kubernets/pkg/client/restclient.TLSClientConfig. -// restTLSClientConfig contains settings to enable transport layer security -type restTLSClientConfig struct { - // Server requires TLS client certificate authentication - CertFile string - // Server requires TLS client certificate authentication - KeyFile string - // Trusted root certificates for server - CAFile string - - // CertData holds PEM-encoded bytes (typically read from a client certificate file). - // CertData takes precedence over CertFile - CertData []byte - // KeyData holds PEM-encoded bytes (typically read from a client certificate key file). - // KeyData takes precedence over KeyFile - KeyData []byte - // CAData holds PEM-encoded bytes (typically read from a root certificates bundle). - // CAData takes precedence over CAFile - CAData []byte -} - -// restConfig is a modified copy of k8s.io/kubernets/pkg/client/restclient.Config. -// Config holds the common attributes that can be passed to a Kubernetes client on -// initialization. -type restConfig struct { - // Host must be a host string, a host:port pair, or a URL to the base of the apiserver. - // If a URL is given then the (optional) Path of that URL represents a prefix that must - // be appended to all request URIs used to access the apiserver. This allows a frontend - // proxy to easily relocate all of the apiserver endpoints. - Host string - - // Server requires Basic authentication - Username string - Password string - - // Server requires Bearer authentication. This client will not attempt to use - // refresh tokens for an OAuth2 flow. - // TODO: demonstrate an OAuth2 compatible client. - BearerToken string - - // TLSClientConfig contains settings to enable transport layer security - restTLSClientConfig - - // Server should be accessed without verifying the TLS - // certificate. For testing only. - Insecure bool -} - -// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfig. -// ClientConfig is used to make it easy to get an api server client -type clientConfig interface { - // ClientConfig returns a complete client config - ClientConfig() (*restConfig, error) -} - -// defaultClientConfig is a modified copy of openshift/origin/pkg/cmd/util/clientcmd.DefaultClientConfig. -func defaultClientConfig() clientConfig { - loadingRules := newOpenShiftClientConfigLoadingRules() - // REMOVED: Allowing command-line overriding of loadingRules - // REMOVED: clientcmd.ConfigOverrides - - clientConfig := newNonInteractiveDeferredLoadingClientConfig(loadingRules) - - return clientConfig -} - -var recommendedHomeFile = path.Join(homedir.HomeDir(), ".kube/config") - -// newOpenShiftClientConfigLoadingRules is a modified copy of openshift/origin/pkg/cmd/cli/config.NewOpenShiftClientConfigLoadingRules. -// NewOpenShiftClientConfigLoadingRules returns file priority loading rules for OpenShift. -// 1. --config value -// 2. if KUBECONFIG env var has a value, use it. Otherwise, ~/.kube/config file -func newOpenShiftClientConfigLoadingRules() *clientConfigLoadingRules { - chain := []string{} - - envVarFile := os.Getenv("KUBECONFIG") - if len(envVarFile) != 0 { - chain = append(chain, filepath.SplitList(envVarFile)...) - } else { - chain = append(chain, recommendedHomeFile) - } - - return &clientConfigLoadingRules{ - Precedence: chain, - // REMOVED: Migration support; run (oc login) to trigger migration - } -} - -// deferredLoadingClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DeferredLoadingClientConfig. -// DeferredLoadingClientConfig is a ClientConfig interface that is backed by a set of loading rules -// It is used in cases where the loading rules may change after you've instantiated them and you want to be sure that -// the most recent rules are used. This is useful in cases where you bind flags to loading rule parameters before -// the parse happens and you want your calling code to be ignorant of how the values are being mutated to avoid -// passing extraneous information down a call stack -type deferredLoadingClientConfig struct { - loadingRules *clientConfigLoadingRules - - clientConfig clientConfig -} - -// NewNonInteractiveDeferredLoadingClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.NewNonInteractiveDeferredLoadingClientConfig. -// NewNonInteractiveDeferredLoadingClientConfig creates a ConfigClientClientConfig using the passed context name -func newNonInteractiveDeferredLoadingClientConfig(loadingRules *clientConfigLoadingRules) clientConfig { - return &deferredLoadingClientConfig{loadingRules: loadingRules} -} - -func (config *deferredLoadingClientConfig) createClientConfig() (clientConfig, error) { - if config.clientConfig == nil { - // REMOVED: Support for concurrent use in multiple threads. - mergedConfig, err := config.loadingRules.Load() - if err != nil { - return nil, err - } - - var mergedClientConfig clientConfig - // REMOVED: Interactive fallback support. - mergedClientConfig = newNonInteractiveClientConfig(*mergedConfig) - - config.clientConfig = mergedClientConfig - } - - return config.clientConfig, nil -} - -// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DeferredLoadingClientConfig.ClientConfig. -// ClientConfig implements ClientConfig -func (config *deferredLoadingClientConfig) ClientConfig() (*restConfig, error) { - mergedClientConfig, err := config.createClientConfig() - if err != nil { - return nil, err - } - mergedConfig, err := mergedClientConfig.ClientConfig() - if err != nil { - return nil, err - } - // REMOVED: In-cluster service account configuration use. - - return mergedConfig, nil -} - -var ( - // DefaultCluster is the cluster config used when no other config is specified - // TODO: eventually apiserver should start on 443 and be secure by default - defaultCluster = clientcmdCluster{Server: "http://localhost:8080"} - - // EnvVarCluster allows overriding the DefaultCluster using an envvar for the server name - envVarCluster = clientcmdCluster{Server: os.Getenv("KUBERNETES_MASTER")} -) - -// directClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig. -// DirectClientConfig is a ClientConfig interface that is backed by a clientcmdapi.Config, options overrides, and an optional fallbackReader for auth information -type directClientConfig struct { - config clientcmdConfig -} - -// newNonInteractiveClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.NewNonInteractiveClientConfig. -// NewNonInteractiveClientConfig creates a DirectClientConfig using the passed context name and does not have a fallback reader for auth information -func newNonInteractiveClientConfig(config clientcmdConfig) clientConfig { - return &directClientConfig{config} -} - -// ClientConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.ClientConfig. -// ClientConfig implements ClientConfig -func (config *directClientConfig) ClientConfig() (*restConfig, error) { - if err := config.ConfirmUsable(); err != nil { - return nil, err - } - - configAuthInfo := config.getAuthInfo() - configClusterInfo := config.getCluster() - - clientConfig := &restConfig{} - clientConfig.Host = configClusterInfo.Server - if u, err := url.ParseRequestURI(clientConfig.Host); err == nil && u.Opaque == "" && len(u.Path) > 1 { - u.RawQuery = "" - u.Fragment = "" - clientConfig.Host = u.String() - } - - // only try to read the auth information if we are secure - if isConfigTransportTLS(*clientConfig) { - var err error - - // mergo is a first write wins for map value and a last writing wins for interface values - // NOTE: This behavior changed with https://github.com/imdario/mergo/commit/d304790b2ed594794496464fadd89d2bb266600a. - // Our mergo.Merge version is older than this change. - // REMOVED: Support for interactive fallback. - userAuthPartialConfig, err := getUserIdentificationPartialConfig(configAuthInfo) - if err != nil { - return nil, err - } - mergo.Merge(clientConfig, userAuthPartialConfig) - - serverAuthPartialConfig, err := getServerIdentificationPartialConfig(configAuthInfo, configClusterInfo) - if err != nil { - return nil, err - } - mergo.Merge(clientConfig, serverAuthPartialConfig) - } - - return clientConfig, nil -} - -// getServerIdentificationPartialConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.getServerIdentificationPartialConfig. -// clientauth.Info object contain both user identification and server identification. We want different precedence orders for -// both, so we have to split the objects and merge them separately -// we want this order of precedence for the server identification -// 1. configClusterInfo (the final result of command line flags and merged .kubeconfig files) -// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) -// 3. load the ~/.kubernetes_auth file as a default -func getServerIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo, configClusterInfo clientcmdCluster) (*restConfig, error) { - mergedConfig := &restConfig{} - - // configClusterInfo holds the information identify the server provided by .kubeconfig - configClientConfig := &restConfig{} - configClientConfig.CAFile = configClusterInfo.CertificateAuthority - configClientConfig.CAData = configClusterInfo.CertificateAuthorityData - configClientConfig.Insecure = configClusterInfo.InsecureSkipTLSVerify - mergo.Merge(mergedConfig, configClientConfig) - - return mergedConfig, nil -} - -// getUserIdentificationPartialConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.getUserIdentificationPartialConfig. -// clientauth.Info object contain both user identification and server identification. We want different precedence orders for -// both, so we have to split the objects and merge them separately -// we want this order of precedence for user identifcation -// 1. configAuthInfo minus auth-path (the final result of command line flags and merged .kubeconfig files) -// 2. configAuthInfo.auth-path (this file can contain information that conflicts with #1, and we want #1 to win the priority) -// 3. if there is not enough information to idenfity the user, load try the ~/.kubernetes_auth file -// 4. if there is not enough information to identify the user, prompt if possible -func getUserIdentificationPartialConfig(configAuthInfo clientcmdAuthInfo) (*restConfig, error) { - mergedConfig := &restConfig{} - - // blindly overwrite existing values based on precedence - if len(configAuthInfo.Token) > 0 { - mergedConfig.BearerToken = configAuthInfo.Token - } - if len(configAuthInfo.ClientCertificate) > 0 || len(configAuthInfo.ClientCertificateData) > 0 { - mergedConfig.CertFile = configAuthInfo.ClientCertificate - mergedConfig.CertData = configAuthInfo.ClientCertificateData - mergedConfig.KeyFile = configAuthInfo.ClientKey - mergedConfig.KeyData = configAuthInfo.ClientKeyData - } - if len(configAuthInfo.Username) > 0 || len(configAuthInfo.Password) > 0 { - mergedConfig.Username = configAuthInfo.Username - mergedConfig.Password = configAuthInfo.Password - } - - // REMOVED: prompting for missing information. - return mergedConfig, nil -} - -// canIdentifyUser is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.canIdentifyUser -func canIdentifyUser(config restConfig) bool { - return len(config.Username) > 0 || - (len(config.CertFile) > 0 || len(config.CertData) > 0) || - len(config.BearerToken) > 0 - -} - -// ConfirmUsable is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.ConfirmUsable. -// ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config, -// but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible. -func (config *directClientConfig) ConfirmUsable() error { - var validationErrors []error - validationErrors = append(validationErrors, validateAuthInfo(config.getAuthInfoName(), config.getAuthInfo())...) - validationErrors = append(validationErrors, validateClusterInfo(config.getClusterName(), config.getCluster())...) - // when direct client config is specified, and our only error is that no server is defined, we should - // return a standard "no config" error - if len(validationErrors) == 1 && validationErrors[0] == errEmptyCluster { - return newErrConfigurationInvalid([]error{errEmptyConfig}) - } - return newErrConfigurationInvalid(validationErrors) -} - -// getContextName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getContextName. -func (config *directClientConfig) getContextName() string { - // REMOVED: overrides support - return config.config.CurrentContext -} - -// getAuthInfoName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getAuthInfoName. -func (config *directClientConfig) getAuthInfoName() string { - // REMOVED: overrides support - return config.getContext().AuthInfo -} - -// getClusterName is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getClusterName. -func (config *directClientConfig) getClusterName() string { - // REMOVED: overrides support - return config.getContext().Cluster -} - -// getContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getContext. -func (config *directClientConfig) getContext() clientcmdContext { - contexts := config.config.Contexts - contextName := config.getContextName() - - var mergedContext clientcmdContext - if configContext, exists := contexts[contextName]; exists { - mergo.Merge(&mergedContext, configContext) - } - // REMOVED: overrides support - - return mergedContext -} - -var ( - errEmptyConfig = errors.New("no configuration has been provided") - // message is for consistency with old behavior - errEmptyCluster = errors.New("cluster has no server defined") -) - -// validateClusterInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.validateClusterInfo. -// validateClusterInfo looks for conflicts and errors in the cluster info -func validateClusterInfo(clusterName string, clusterInfo clientcmdCluster) []error { - var validationErrors []error - - if reflect.DeepEqual(clientcmdCluster{}, clusterInfo) { - return []error{errEmptyCluster} - } - - if len(clusterInfo.Server) == 0 { - if len(clusterName) == 0 { - validationErrors = append(validationErrors, errors.Errorf("default cluster has no server defined")) - } else { - validationErrors = append(validationErrors, errors.Errorf("no server found for cluster %q", clusterName)) - } - } - // Make sure CA data and CA file aren't both specified - if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 { - validationErrors = append(validationErrors, errors.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override", clusterName)) - } - if len(clusterInfo.CertificateAuthority) != 0 { - clientCertCA, err := os.Open(clusterInfo.CertificateAuthority) - defer clientCertCA.Close() - if err != nil { - validationErrors = append(validationErrors, errors.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err)) - } - } - - return validationErrors -} - -// validateAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.validateAuthInfo. -// validateAuthInfo looks for conflicts and errors in the auth info -func validateAuthInfo(authInfoName string, authInfo clientcmdAuthInfo) []error { - var validationErrors []error - - usingAuthPath := false - methods := make([]string, 0, 3) - if len(authInfo.Token) != 0 { - methods = append(methods, "token") - } - if len(authInfo.Username) != 0 || len(authInfo.Password) != 0 { - methods = append(methods, "basicAuth") - } - - if len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0 { - // Make sure cert data and file aren't both specified - if len(authInfo.ClientCertificate) != 0 && len(authInfo.ClientCertificateData) != 0 { - validationErrors = append(validationErrors, errors.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override", authInfoName)) - } - // Make sure key data and file aren't both specified - if len(authInfo.ClientKey) != 0 && len(authInfo.ClientKeyData) != 0 { - validationErrors = append(validationErrors, errors.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName)) - } - // Make sure a key is specified - if len(authInfo.ClientKey) == 0 && len(authInfo.ClientKeyData) == 0 { - validationErrors = append(validationErrors, errors.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method", authInfoName)) - } - - if len(authInfo.ClientCertificate) != 0 { - clientCertFile, err := os.Open(authInfo.ClientCertificate) - defer clientCertFile.Close() - if err != nil { - validationErrors = append(validationErrors, errors.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err)) - } - } - if len(authInfo.ClientKey) != 0 { - clientKeyFile, err := os.Open(authInfo.ClientKey) - defer clientKeyFile.Close() - if err != nil { - validationErrors = append(validationErrors, errors.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err)) - } - } - } - - // authPath also provides information for the client to identify the server, so allow multiple auth methods in that case - if (len(methods) > 1) && (!usingAuthPath) { - validationErrors = append(validationErrors, errors.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods)) - } - - return validationErrors -} - -// getAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getAuthInfo. -func (config *directClientConfig) getAuthInfo() clientcmdAuthInfo { - authInfos := config.config.AuthInfos - authInfoName := config.getAuthInfoName() - - var mergedAuthInfo clientcmdAuthInfo - if configAuthInfo, exists := authInfos[authInfoName]; exists { - mergo.Merge(&mergedAuthInfo, configAuthInfo) - } - // REMOVED: overrides support - - return mergedAuthInfo -} - -// getCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.DirectClientConfig.getCluster. -func (config *directClientConfig) getCluster() clientcmdCluster { - clusterInfos := config.config.Clusters - clusterInfoName := config.getClusterName() - - var mergedClusterInfo clientcmdCluster - mergo.Merge(&mergedClusterInfo, defaultCluster) - mergo.Merge(&mergedClusterInfo, envVarCluster) - if configClusterInfo, exists := clusterInfos[clusterInfoName]; exists { - mergo.Merge(&mergedClusterInfo, configClusterInfo) - } - // REMOVED: overrides support - - return mergedClusterInfo -} - -// aggregateErr is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate. -// This helper implements the error and Errors interfaces. Keeping it private -// prevents people from making an aggregate of 0 errors, which is not -// an error, but does satisfy the error interface. -type aggregateErr []error - -// newAggregate is a modified copy of k8s.io/apimachinery/pkg/util/errors.NewAggregate. -// NewAggregate converts a slice of errors into an Aggregate interface, which -// is itself an implementation of the error interface. If the slice is empty, -// this returns nil. -// It will check if any of the element of input error list is nil, to avoid -// nil pointer panic when call Error(). -func newAggregate(errlist []error) error { - if len(errlist) == 0 { - return nil - } - // In case of input error list contains nil - var errs []error - for _, e := range errlist { - if e != nil { - errs = append(errs, e) - } - } - if len(errs) == 0 { - return nil - } - return aggregateErr(errs) -} - -// Error is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate.Error. -// Error is part of the error interface. -func (agg aggregateErr) Error() string { - if len(agg) == 0 { - // This should never happen, really. - return "" - } - if len(agg) == 1 { - return agg[0].Error() - } - result := fmt.Sprintf("[%s", agg[0].Error()) - for i := 1; i < len(agg); i++ { - result += fmt.Sprintf(", %s", agg[i].Error()) - } - result += "]" - return result -} - -// REMOVED: aggregateErr.Errors - -// errConfigurationInvalid is a modified? copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.errConfigurationInvalid. -// errConfigurationInvalid is a set of errors indicating the configuration is invalid. -type errConfigurationInvalid []error - -var _ error = errConfigurationInvalid{} - -// REMOVED: utilerrors.Aggregate implementation for errConfigurationInvalid. - -// newErrConfigurationInvalid is a modified? copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.newErrConfigurationInvalid. -func newErrConfigurationInvalid(errs []error) error { - switch len(errs) { - case 0: - return nil - default: - return errConfigurationInvalid(errs) - } -} - -// Error implements the error interface -func (e errConfigurationInvalid) Error() string { - return fmt.Sprintf("invalid configuration: %v", newAggregate(e).Error()) -} - -// clientConfigLoadingRules is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules -// ClientConfigLoadingRules is an ExplicitPath and string slice of specific locations that are used for merging together a Config -// Callers can put the chain together however they want, but we'd recommend: -// EnvVarPathFiles if set (a list of files if set) OR the HomeDirectoryPath -// ExplicitPath is special, because if a user specifically requests a certain file be used and error is reported if thie file is not present -type clientConfigLoadingRules struct { - Precedence []string -} - -// Load is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.Load -// Load starts by running the MigrationRules and then -// takes the loading rules and returns a Config object based on following rules. -// if the ExplicitPath, return the unmerged explicit file -// Otherwise, return a merged config based on the Precedence slice -// A missing ExplicitPath file produces an error. Empty filenames or other missing files are ignored. -// Read errors or files with non-deserializable content produce errors. -// The first file to set a particular map key wins and map key's value is never changed. -// BUT, if you set a struct value that is NOT contained inside of map, the value WILL be changed. -// This results in some odd looking logic to merge in one direction, merge in the other, and then merge the two. -// It also means that if two files specify a "red-user", only values from the first file's red-user are used. Even -// non-conflicting entries from the second file's "red-user" are discarded. -// Relative paths inside of the .kubeconfig files are resolved against the .kubeconfig file's parent folder -// and only absolute file paths are returned. -func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) { - errlist := []error{} - - kubeConfigFiles := []string{} - - // REMOVED: explicit path support - kubeConfigFiles = append(kubeConfigFiles, rules.Precedence...) - - kubeconfigs := []*clientcmdConfig{} - // read and cache the config files so that we only look at them once - for _, filename := range kubeConfigFiles { - if len(filename) == 0 { - // no work to do - continue - } - - config, err := loadFromFile(filename) - if os.IsNotExist(err) { - // skip missing files - continue - } - if err != nil { - errlist = append(errlist, errors.Wrapf(err, "Error loading config file \"%s\"", filename)) - continue - } - - kubeconfigs = append(kubeconfigs, config) - } - - // first merge all of our maps - mapConfig := clientcmdNewConfig() - for _, kubeconfig := range kubeconfigs { - mergo.Merge(mapConfig, kubeconfig) - } - - // merge all of the struct values in the reverse order so that priority is given correctly - // errors are not added to the list the second time - nonMapConfig := clientcmdNewConfig() - for i := len(kubeconfigs) - 1; i >= 0; i-- { - kubeconfig := kubeconfigs[i] - mergo.Merge(nonMapConfig, kubeconfig) - } - - // since values are overwritten, but maps values are not, we can merge the non-map config on top of the map config and - // get the values we expect. - config := clientcmdNewConfig() - mergo.Merge(config, mapConfig) - mergo.Merge(config, nonMapConfig) - - // REMOVED: Possibility to skip this. - if err := resolveLocalPaths(config); err != nil { - errlist = append(errlist, err) - } - - return config, newAggregate(errlist) -} - -// loadFromFile is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.LoadFromFile -// LoadFromFile takes a filename and deserializes the contents into Config object -func loadFromFile(filename string) (*clientcmdConfig, error) { - kubeconfigBytes, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - config, err := load(kubeconfigBytes) - if err != nil { - return nil, err - } - - // set LocationOfOrigin on every Cluster, User, and Context - for key, obj := range config.AuthInfos { - obj.LocationOfOrigin = filename - config.AuthInfos[key] = obj - } - for key, obj := range config.Clusters { - obj.LocationOfOrigin = filename - config.Clusters[key] = obj - } - for key, obj := range config.Contexts { - obj.LocationOfOrigin = filename - config.Contexts[key] = obj - } - - if config.AuthInfos == nil { - config.AuthInfos = map[string]*clientcmdAuthInfo{} - } - if config.Clusters == nil { - config.Clusters = map[string]*clientcmdCluster{} - } - if config.Contexts == nil { - config.Contexts = map[string]*clientcmdContext{} - } - - return config, nil -} - -// load is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.Load -// Load takes a byte slice and deserializes the contents into Config object. -// Encapsulates deserialization without assuming the source is a file. -func load(data []byte) (*clientcmdConfig, error) { - config := clientcmdNewConfig() - // if there's no data in a file, return the default object instead of failing (DecodeInto reject empty input) - if len(data) == 0 { - return config, nil - } - // Note: This does absolutely no kind/version checking or conversions. - data, err := yaml.YAMLToJSON(data) - if err != nil { - return nil, err - } - if err := json.Unmarshal(data, config); err != nil { - return nil, err - } - return config, nil -} - -// resolveLocalPaths is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.resolveLocalPaths. -// ResolveLocalPaths resolves all relative paths in the config object with respect to the stanza's LocationOfOrigin -// this cannot be done directly inside of LoadFromFile because doing so there would make it impossible to load a file without -// modification of its contents. -func resolveLocalPaths(config *clientcmdConfig) error { - for _, cluster := range config.Clusters { - if len(cluster.LocationOfOrigin) == 0 { - continue - } - base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin)) - if err != nil { - return errors.Wrapf(err, "Could not determine the absolute path of config file %s", cluster.LocationOfOrigin) - } - - if err := resolvePaths(getClusterFileReferences(cluster), base); err != nil { - return err - } - } - for _, authInfo := range config.AuthInfos { - if len(authInfo.LocationOfOrigin) == 0 { - continue - } - base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin)) - if err != nil { - return errors.Wrapf(err, "Could not determine the absolute path of config file %s", authInfo.LocationOfOrigin) - } - - if err := resolvePaths(getAuthInfoFileReferences(authInfo), base); err != nil { - return err - } - } - - return nil -} - -// getClusterFileReferences is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.GetClusterFileReferences. -func getClusterFileReferences(cluster *clientcmdCluster) []*string { - return []*string{&cluster.CertificateAuthority} -} - -// getAuthInfoFileReferences is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.GetAuthInfoFileReferences. -func getAuthInfoFileReferences(authInfo *clientcmdAuthInfo) []*string { - return []*string{&authInfo.ClientCertificate, &authInfo.ClientKey} -} - -// resolvePaths is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules.resolvePaths. -// ResolvePaths updates the given refs to be absolute paths, relative to the given base directory -func resolvePaths(refs []*string, base string) error { - for _, ref := range refs { - // Don't resolve empty paths - if len(*ref) > 0 { - // Don't resolve absolute paths - if !filepath.IsAbs(*ref) { - *ref = filepath.Join(base, *ref) - } - } - } - return nil -} - -// restClientFor is a modified copy of k8s.io/kubernets/pkg/client/restclient.RESTClientFor. -// RESTClientFor returns a RESTClient that satisfies the requested attributes on a client Config -// object. Note that a RESTClient may require fields that are optional when initializing a Client. -// A RESTClient created by this method is generic - it expects to operate on an API that follows -// the Kubernetes conventions, but may not be the Kubernetes API. -func restClientFor(config *restConfig) (*url.URL, *http.Client, error) { - // REMOVED: Configurable GroupVersion, Codec - // REMOVED: Configurable versionedAPIPath - baseURL, err := defaultServerURLFor(config) - if err != nil { - return nil, nil, err - } - - transport, err := transportFor(config) - if err != nil { - return nil, nil, err - } - - var httpClient *http.Client - if transport != http.DefaultTransport { - httpClient = &http.Client{Transport: transport} - } - - // REMOVED: Configurable QPS, Burst, ContentConfig - // REMOVED: Actually returning a RESTClient object. - return baseURL, httpClient, nil -} - -// defaultServerURL is a modified copy of k8s.io/kubernets/pkg/client/restclient.DefaultServerURL. -// DefaultServerURL converts a host, host:port, or URL string to the default base server API path -// to use with a Client at a given API version following the standard conventions for a -// Kubernetes API. -func defaultServerURL(host string, defaultTLS bool) (*url.URL, error) { - if host == "" { - return nil, errors.Errorf("host must be a URL or a host:port pair") - } - base := host - hostURL, err := url.Parse(base) - if err != nil { - return nil, err - } - if hostURL.Scheme == "" { - scheme := "http://" - if defaultTLS { - scheme = "https://" - } - hostURL, err = url.Parse(scheme + base) - if err != nil { - return nil, err - } - if hostURL.Path != "" && hostURL.Path != "/" { - return nil, errors.Errorf("host must be a URL or a host:port pair: %q", base) - } - } - - // REMOVED: versionedAPIPath computation. - return hostURL, nil -} - -// defaultServerURLFor is a modified copy of k8s.io/kubernets/pkg/client/restclient.defaultServerURLFor. -// defaultServerUrlFor is shared between IsConfigTransportTLS and RESTClientFor. It -// requires Host and Version to be set prior to being called. -func defaultServerURLFor(config *restConfig) (*url.URL, error) { - // TODO: move the default to secure when the apiserver supports TLS by default - // config.Insecure is taken to mean "I want HTTPS but don't bother checking the certs against a CA." - hasCA := len(config.CAFile) != 0 || len(config.CAData) != 0 - hasCert := len(config.CertFile) != 0 || len(config.CertData) != 0 - defaultTLS := hasCA || hasCert || config.Insecure - host := config.Host - if host == "" { - host = "localhost" - } - - // REMOVED: Configurable APIPath, GroupVersion - return defaultServerURL(host, defaultTLS) -} - -// transportFor is a modified copy of k8s.io/kubernets/pkg/client/restclient.transportFor. -// TransportFor returns an http.RoundTripper that will provide the authentication -// or transport level security defined by the provided Config. Will return the -// default http.DefaultTransport if no special case behavior is needed. -func transportFor(config *restConfig) (http.RoundTripper, error) { - // REMOVED: separation between restclient.Config and transport.Config, Transport, WrapTransport support - return transportNew(config) -} - -// isConfigTransportTLS is a modified copy of k8s.io/kubernets/pkg/client/restclient.IsConfigTransportTLS. -// IsConfigTransportTLS returns true if and only if the provided -// config will result in a protected connection to the server when it -// is passed to restclient.RESTClientFor(). Use to determine when to -// send credentials over the wire. -// -// Note: the Insecure flag is ignored when testing for this value, so MITM attacks are -// still possible. -func isConfigTransportTLS(config restConfig) bool { - baseURL, err := defaultServerURLFor(&config) - if err != nil { - return false - } - return baseURL.Scheme == "https" -} - -// transportNew is a modified copy of k8s.io/kubernetes/pkg/client/transport.New. -// New returns an http.RoundTripper that will provide the authentication -// or transport level security defined by the provided Config. -func transportNew(config *restConfig) (http.RoundTripper, error) { - // REMOVED: custom config.Transport support. - // Set transport level security - - var ( - rt http.RoundTripper - err error - ) - - rt, err = tlsCacheGet(config) - if err != nil { - return nil, err - } - - // REMOVED: HTTPWrappersForConfig(config, rt) in favor of the caller setting HTTP headers itself based on restConfig. Only this inlined check remains. - if len(config.Username) != 0 && len(config.BearerToken) != 0 { - return nil, errors.Errorf("username/password or bearer token may be set, but not both") - } - - return rt, nil -} - -// newProxierWithNoProxyCIDR is a modified copy of k8s.io/apimachinery/pkg/util/net.NewProxierWithNoProxyCIDR. -// NewProxierWithNoProxyCIDR constructs a Proxier function that respects CIDRs in NO_PROXY and delegates if -// no matching CIDRs are found -func newProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error)) func(req *http.Request) (*url.URL, error) { - // we wrap the default method, so we only need to perform our check if the NO_PROXY envvar has a CIDR in it - noProxyEnv := os.Getenv("NO_PROXY") - noProxyRules := strings.Split(noProxyEnv, ",") - - cidrs := []*net.IPNet{} - for _, noProxyRule := range noProxyRules { - _, cidr, _ := net.ParseCIDR(noProxyRule) - if cidr != nil { - cidrs = append(cidrs, cidr) - } - } - - if len(cidrs) == 0 { - return delegate - } - - return func(req *http.Request) (*url.URL, error) { - host := req.URL.Host - // for some urls, the Host is already the host, not the host:port - if net.ParseIP(host) == nil { - var err error - host, _, err = net.SplitHostPort(req.URL.Host) - if err != nil { - return delegate(req) - } - } - - ip := net.ParseIP(host) - if ip == nil { - return delegate(req) - } - - for _, cidr := range cidrs { - if cidr.Contains(ip) { - return nil, nil - } - } - - return delegate(req) - } -} - -// tlsCacheGet is a modified copy of k8s.io/kubernetes/pkg/client/transport.tlsTransportCache.get. -func tlsCacheGet(config *restConfig) (http.RoundTripper, error) { - // REMOVED: any actual caching - - // Get the TLS options for this client config - tlsConfig, err := tlsConfigFor(config) - if err != nil { - return nil, err - } - // The options didn't require a custom TLS config - if tlsConfig == nil { - return http.DefaultTransport, nil - } - - // REMOVED: Call to k8s.io/apimachinery/pkg/util/net.SetTransportDefaults; instead of the generic machinery and conditionals, hard-coded the result here. - t := &http.Transport{ - // http.ProxyFromEnvironment doesn't respect CIDRs and that makes it impossible to exclude things like pod and service IPs from proxy settings - // ProxierWithNoProxyCIDR allows CIDR rules in NO_PROXY - Proxy: newProxierWithNoProxyCIDR(http.ProxyFromEnvironment), - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: tlsConfig, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - } - // Allow clients to disable http2 if needed. - if s := os.Getenv("DISABLE_HTTP2"); len(s) == 0 { - _ = http2.ConfigureTransport(t) - } - return t, nil -} - -// tlsConfigFor is a modified copy of k8s.io/kubernetes/pkg/client/transport.TLSConfigFor. -// TLSConfigFor returns a tls.Config that will provide the transport level security defined -// by the provided Config. Will return nil if no transport level security is requested. -func tlsConfigFor(c *restConfig) (*tls.Config, error) { - if !(c.HasCA() || c.HasCertAuth() || c.Insecure) { - return nil, nil - } - if c.HasCA() && c.Insecure { - return nil, errors.Errorf("specifying a root certificates file with the insecure flag is not allowed") - } - if err := loadTLSFiles(c); err != nil { - return nil, err - } - - tlsConfig := &tls.Config{ - // Change default from SSLv3 to TLSv1.0 (because of POODLE vulnerability) - MinVersion: tls.VersionTLS10, - InsecureSkipVerify: c.Insecure, - } - - if c.HasCA() { - tlsConfig.RootCAs = rootCertPool(c.CAData) - } - - if c.HasCertAuth() { - cert, err := tls.X509KeyPair(c.CertData, c.KeyData) - if err != nil { - return nil, err - } - tlsConfig.Certificates = []tls.Certificate{cert} - } - - return tlsConfig, nil -} - -// loadTLSFiles is a modified copy of k8s.io/kubernetes/pkg/client/transport.loadTLSFiles. -// loadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData, -// KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are -// either populated or were empty to start. -func loadTLSFiles(c *restConfig) error { - var err error - c.CAData, err = dataFromSliceOrFile(c.CAData, c.CAFile) - if err != nil { - return err - } - - c.CertData, err = dataFromSliceOrFile(c.CertData, c.CertFile) - if err != nil { - return err - } - - c.KeyData, err = dataFromSliceOrFile(c.KeyData, c.KeyFile) - if err != nil { - return err - } - return nil -} - -// dataFromSliceOrFile is a modified copy of k8s.io/kubernetes/pkg/client/transport.dataFromSliceOrFile. -// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file, -// or an error if an error occurred reading the file -func dataFromSliceOrFile(data []byte, file string) ([]byte, error) { - if len(data) > 0 { - return data, nil - } - if len(file) > 0 { - fileData, err := ioutil.ReadFile(file) - if err != nil { - return []byte{}, err - } - return fileData, nil - } - return nil, nil -} - -// rootCertPool is a modified copy of k8s.io/kubernetes/pkg/client/transport.rootCertPool. -// rootCertPool returns nil if caData is empty. When passed along, this will mean "use system CAs". -// When caData is not empty, it will be the ONLY information used in the CertPool. -func rootCertPool(caData []byte) *x509.CertPool { - // What we really want is a copy of x509.systemRootsPool, but that isn't exposed. It's difficult to build (see the go - // code for a look at the platform specific insanity), so we'll use the fact that RootCAs == nil gives us the system values - // It doesn't allow trusting either/or, but hopefully that won't be an issue - if len(caData) == 0 { - return nil - } - - // if we have caData, use it - certPool := x509.NewCertPool() - certPool.AppendCertsFromPEM(caData) - return certPool -} - -// HasCA is a modified copy of k8s.io/kubernetes/pkg/client/transport.Config.HasCA. -// HasCA returns whether the configuration has a certificate authority or not. -func (c *restConfig) HasCA() bool { - return len(c.CAData) > 0 || len(c.CAFile) > 0 -} - -// HasCertAuth is a modified copy of k8s.io/kubernetes/pkg/client/transport.Config.HasCertAuth. -// HasCertAuth returns whether the configuration has certificate authentication or not. -func (c *restConfig) HasCertAuth() bool { - return len(c.CertData) != 0 || len(c.CertFile) != 0 -} - -// clientcmdConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Config. -// Config holds the information needed to build connect to remote kubernetes clusters as a given user -// IMPORTANT if you add fields to this struct, please update IsConfigEmpty() -type clientcmdConfig struct { - // Clusters is a map of referencable names to cluster configs - Clusters clustersMap `json:"clusters"` - // AuthInfos is a map of referencable names to user configs - AuthInfos authInfosMap `json:"users"` - // Contexts is a map of referencable names to context configs - Contexts contextsMap `json:"contexts"` - // CurrentContext is the name of the context that you would like to use by default - CurrentContext string `json:"current-context"` -} - -type clustersMap map[string]*clientcmdCluster - -func (m *clustersMap) UnmarshalJSON(data []byte) error { - var a []v1NamedCluster - if err := json.Unmarshal(data, &a); err != nil { - return err - } - for _, e := range a { - cluster := e.Cluster // Allocates a new instance in each iteration - (*m)[e.Name] = &cluster - } - return nil -} - -type authInfosMap map[string]*clientcmdAuthInfo - -func (m *authInfosMap) UnmarshalJSON(data []byte) error { - var a []v1NamedAuthInfo - if err := json.Unmarshal(data, &a); err != nil { - return err - } - for _, e := range a { - authInfo := e.AuthInfo // Allocates a new instance in each iteration - (*m)[e.Name] = &authInfo - } - return nil -} - -type contextsMap map[string]*clientcmdContext - -func (m *contextsMap) UnmarshalJSON(data []byte) error { - var a []v1NamedContext - if err := json.Unmarshal(data, &a); err != nil { - return err - } - for _, e := range a { - context := e.Context // Allocates a new instance in each iteration - (*m)[e.Name] = &context - } - return nil -} - -// clientcmdNewConfig is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.NewConfig. -// NewConfig is a convenience function that returns a new Config object with non-nil maps -func clientcmdNewConfig() *clientcmdConfig { - return &clientcmdConfig{ - Clusters: make(map[string]*clientcmdCluster), - AuthInfos: make(map[string]*clientcmdAuthInfo), - Contexts: make(map[string]*clientcmdContext), - } -} - -// clientcmdCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Cluster. -// Cluster contains information about how to communicate with a kubernetes cluster -type clientcmdCluster struct { - // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. - LocationOfOrigin string - // Server is the address of the kubernetes cluster (https://hostname:port). - Server string `json:"server"` - // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure. - InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` - // CertificateAuthority is the path to a cert file for the certificate authority. - CertificateAuthority string `json:"certificate-authority,omitempty"` - // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority - CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"` -} - -// clientcmdAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.AuthInfo. -// AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are. -type clientcmdAuthInfo struct { - // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. - LocationOfOrigin string - // ClientCertificate is the path to a client cert file for TLS. - ClientCertificate string `json:"client-certificate,omitempty"` - // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate - ClientCertificateData []byte `json:"client-certificate-data,omitempty"` - // ClientKey is the path to a client key file for TLS. - ClientKey string `json:"client-key,omitempty"` - // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey - ClientKeyData []byte `json:"client-key-data,omitempty"` - // Token is the bearer token for authentication to the kubernetes cluster. - Token string `json:"token,omitempty"` - // Username is the username for basic authentication to the kubernetes cluster. - Username string `json:"username,omitempty"` - // Password is the password for basic authentication to the kubernetes cluster. - Password string `json:"password,omitempty"` -} - -// clientcmdContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.Context. -// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) -type clientcmdContext struct { - // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. - LocationOfOrigin string - // Cluster is the name of the cluster for this context - Cluster string `json:"cluster"` - // AuthInfo is the name of the authInfo for this context - AuthInfo string `json:"user"` - // Namespace is the default namespace to use on unspecified requests - Namespace string `json:"namespace,omitempty"` -} - -// v1NamedCluster is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedCluster. -// NamedCluster relates nicknames to cluster information -type v1NamedCluster struct { - // Name is the nickname for this Cluster - Name string `json:"name"` - // Cluster holds the cluster information - Cluster clientcmdCluster `json:"cluster"` -} - -// v1NamedContext is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedContext. -// NamedContext relates nicknames to context information -type v1NamedContext struct { - // Name is the nickname for this Context - Name string `json:"name"` - // Context holds the context information - Context clientcmdContext `json:"context"` -} - -// v1NamedAuthInfo is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api.v1.NamedAuthInfo. -// NamedAuthInfo relates nicknames to auth information -type v1NamedAuthInfo struct { - // Name is the nickname for this AuthInfo - Name string `json:"name"` - // AuthInfo holds the auth information - AuthInfo clientcmdAuthInfo `json:"user"` -} diff --git a/vendor/github.com/containers/image/openshift/openshift.go b/vendor/github.com/containers/image/openshift/openshift.go deleted file mode 100644 index 5465591444..0000000000 --- a/vendor/github.com/containers/image/openshift/openshift.go +++ /dev/null @@ -1,544 +0,0 @@ -package openshift - -import ( - "bytes" - "context" - "crypto/rand" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strings" - - "github.com/containers/image/docker" - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/containers/image/version" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// openshiftClient is configuration for dealing with a single image stream, for reading or writing. -type openshiftClient struct { - ref openshiftReference - baseURL *url.URL - // Values from Kubernetes configuration - httpClient *http.Client - bearerToken string // "" if not used - username string // "" if not used - password string // if username != "" -} - -// newOpenshiftClient creates a new openshiftClient for the specified reference. -func newOpenshiftClient(ref openshiftReference) (*openshiftClient, error) { - // We have already done this parsing in ParseReference, but thrown away - // httpClient. So, parse again. - // (We could also rework/split restClientFor to "get base URL" to be done - // in ParseReference, and "get httpClient" to be done here. But until/unless - // we support non-default clusters, this is good enough.) - - // Overall, this is modelled on openshift/origin/pkg/cmd/util/clientcmd.New().ClientConfig() and openshift/origin/pkg/client. - cmdConfig := defaultClientConfig() - logrus.Debugf("cmdConfig: %#v", cmdConfig) - restConfig, err := cmdConfig.ClientConfig() - if err != nil { - return nil, err - } - // REMOVED: SetOpenShiftDefaults (values are not overridable in config files, so hard-coded these defaults.) - logrus.Debugf("restConfig: %#v", restConfig) - baseURL, httpClient, err := restClientFor(restConfig) - if err != nil { - return nil, err - } - logrus.Debugf("URL: %#v", *baseURL) - - if httpClient == nil { - httpClient = http.DefaultClient - } - - return &openshiftClient{ - ref: ref, - baseURL: baseURL, - httpClient: httpClient, - bearerToken: restConfig.BearerToken, - username: restConfig.Username, - password: restConfig.Password, - }, nil -} - -// doRequest performs a correctly authenticated request to a specified path, and returns response body or an error object. -func (c *openshiftClient) doRequest(ctx context.Context, method, path string, requestBody []byte) ([]byte, error) { - url := *c.baseURL - url.Path = path - var requestBodyReader io.Reader - if requestBody != nil { - logrus.Debugf("Will send body: %s", requestBody) - requestBodyReader = bytes.NewReader(requestBody) - } - req, err := http.NewRequest(method, url.String(), requestBodyReader) - if err != nil { - return nil, err - } - req = req.WithContext(ctx) - - if len(c.bearerToken) != 0 { - req.Header.Set("Authorization", "Bearer "+c.bearerToken) - } else if len(c.username) != 0 { - req.SetBasicAuth(c.username, c.password) - } - req.Header.Set("Accept", "application/json, */*") - req.Header.Set("User-Agent", fmt.Sprintf("skopeo/%s", version.Version)) - if requestBody != nil { - req.Header.Set("Content-Type", "application/json") - } - - logrus.Debugf("%s %s", method, url) - res, err := c.httpClient.Do(req) - if err != nil { - return nil, err - } - defer res.Body.Close() - body, err := ioutil.ReadAll(res.Body) - if err != nil { - return nil, err - } - logrus.Debugf("Got body: %s", body) - // FIXME: Just throwing this useful information away only to try to guess later... - logrus.Debugf("Got content-type: %s", res.Header.Get("Content-Type")) - - var status status - statusValid := false - if err := json.Unmarshal(body, &status); err == nil && len(status.Status) > 0 { - statusValid = true - } - - switch { - case res.StatusCode == http.StatusSwitchingProtocols: // FIXME?! No idea why this weird case exists in k8s.io/kubernetes/pkg/client/restclient. - if statusValid && status.Status != "Success" { - return nil, errors.New(status.Message) - } - case res.StatusCode >= http.StatusOK && res.StatusCode <= http.StatusPartialContent: - // OK. - default: - if statusValid { - return nil, errors.New(status.Message) - } - return nil, errors.Errorf("HTTP error: status code: %d, body: %s", res.StatusCode, string(body)) - } - - return body, nil -} - -// getImage loads the specified image object. -func (c *openshiftClient) getImage(ctx context.Context, imageStreamImageName string) (*image, error) { - // FIXME: validate components per validation.IsValidPathSegmentName? - path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreamimages/%s@%s", c.ref.namespace, c.ref.stream, imageStreamImageName) - body, err := c.doRequest(ctx, "GET", path, nil) - if err != nil { - return nil, err - } - // Note: This does absolutely no kind/version checking or conversions. - var isi imageStreamImage - if err := json.Unmarshal(body, &isi); err != nil { - return nil, err - } - return &isi.Image, nil -} - -// convertDockerImageReference takes an image API DockerImageReference value and returns a reference we can actually use; -// currently OpenShift stores the cluster-internal service IPs here, which are unusable from the outside. -func (c *openshiftClient) convertDockerImageReference(ref string) (string, error) { - parts := strings.SplitN(ref, "/", 2) - if len(parts) != 2 { - return "", errors.Errorf("Invalid format of docker reference %s: missing '/'", ref) - } - return reference.Domain(c.ref.dockerReference) + "/" + parts[1], nil -} - -type openshiftImageSource struct { - client *openshiftClient - // Values specific to this image - ctx *types.SystemContext - // State - docker types.ImageSource // The Docker Registry endpoint, or nil if not resolved yet - imageStreamImageName string // Resolved image identifier, or "" if not known yet -} - -// newImageSource creates a new ImageSource for the specified reference. -// The caller must call .Close() on the returned ImageSource. -func newImageSource(ctx *types.SystemContext, ref openshiftReference) (types.ImageSource, error) { - client, err := newOpenshiftClient(ref) - if err != nil { - return nil, err - } - - return &openshiftImageSource{ - client: client, - ctx: ctx, - }, nil -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (s *openshiftImageSource) Reference() types.ImageReference { - return s.client.ref -} - -// Close removes resources associated with an initialized ImageSource, if any. -func (s *openshiftImageSource) Close() error { - if s.docker != nil { - err := s.docker.Close() - s.docker = nil - - return err - } - - return nil -} - -// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). -// It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); -// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). -func (s *openshiftImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) { - if err := s.ensureImageIsResolved(context.TODO()); err != nil { - return nil, "", err - } - return s.docker.GetManifest(instanceDigest) -} - -// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -func (s *openshiftImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) { - if err := s.ensureImageIsResolved(context.TODO()); err != nil { - return nil, 0, err - } - return s.docker.GetBlob(info) -} - -// GetSignatures returns the image's signatures. It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -func (s *openshiftImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - var imageName string - if instanceDigest == nil { - if err := s.ensureImageIsResolved(ctx); err != nil { - return nil, err - } - imageName = s.imageStreamImageName - } else { - imageName = instanceDigest.String() - } - image, err := s.client.getImage(ctx, imageName) - if err != nil { - return nil, err - } - var sigs [][]byte - for _, sig := range image.Signatures { - if sig.Type == imageSignatureTypeAtomic { - sigs = append(sigs, sig.Content) - } - } - return sigs, nil -} - -// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. -func (s *openshiftImageSource) LayerInfosForCopy() []types.BlobInfo { - return nil -} - -// ensureImageIsResolved sets up s.docker and s.imageStreamImageName -func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error { - if s.docker != nil { - return nil - } - - // FIXME: validate components per validation.IsValidPathSegmentName? - path := fmt.Sprintf("/oapi/v1/namespaces/%s/imagestreams/%s", s.client.ref.namespace, s.client.ref.stream) - body, err := s.client.doRequest(ctx, "GET", path, nil) - if err != nil { - return err - } - // Note: This does absolutely no kind/version checking or conversions. - var is imageStream - if err := json.Unmarshal(body, &is); err != nil { - return err - } - var te *tagEvent - for _, tag := range is.Status.Tags { - if tag.Tag != s.client.ref.dockerReference.Tag() { - continue - } - if len(tag.Items) > 0 { - te = &tag.Items[0] - break - } - } - if te == nil { - return errors.Errorf("No matching tag found") - } - logrus.Debugf("tag event %#v", te) - dockerRefString, err := s.client.convertDockerImageReference(te.DockerImageReference) - if err != nil { - return err - } - logrus.Debugf("Resolved reference %#v", dockerRefString) - dockerRef, err := docker.ParseReference("//" + dockerRefString) - if err != nil { - return err - } - d, err := dockerRef.NewImageSource(s.ctx) - if err != nil { - return err - } - s.docker = d - s.imageStreamImageName = te.Image - return nil -} - -type openshiftImageDestination struct { - client *openshiftClient - docker types.ImageDestination // The Docker Registry endpoint - // State - imageStreamImageName string // "" if not yet known -} - -// newImageDestination creates a new ImageDestination for the specified reference. -func newImageDestination(ctx *types.SystemContext, ref openshiftReference) (types.ImageDestination, error) { - client, err := newOpenshiftClient(ref) - if err != nil { - return nil, err - } - - // FIXME: Should this always use a digest, not a tag? Uploading to Docker by tag requires the tag _inside_ the manifest to match, - // i.e. a single signed image cannot be available under multiple tags. But with types.ImageDestination, we don't know - // the manifest digest at this point. - dockerRefString := fmt.Sprintf("//%s/%s/%s:%s", reference.Domain(client.ref.dockerReference), client.ref.namespace, client.ref.stream, client.ref.dockerReference.Tag()) - dockerRef, err := docker.ParseReference(dockerRefString) - if err != nil { - return nil, err - } - docker, err := dockerRef.NewImageDestination(ctx) - if err != nil { - return nil, err - } - - return &openshiftImageDestination{ - client: client, - docker: docker, - }, nil -} - -// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, -// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. -func (d *openshiftImageDestination) Reference() types.ImageReference { - return d.client.ref -} - -// Close removes resources associated with an initialized ImageDestination, if any. -func (d *openshiftImageDestination) Close() error { - return d.docker.Close() -} - -func (d *openshiftImageDestination) SupportedManifestMIMETypes() []string { - return d.docker.SupportedManifestMIMETypes() -} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. -// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. -func (d *openshiftImageDestination) SupportsSignatures() error { - return nil -} - -// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination. -func (d *openshiftImageDestination) ShouldCompressLayers() bool { - return true -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually -// uploaded to the image destination, true otherwise. -func (d *openshiftImageDestination) AcceptsForeignLayerURLs() bool { - return true -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. -func (d *openshiftImageDestination) MustMatchRuntimeOS() bool { - return false -} - -// PutBlob writes contents of stream and returns data representing the result (with all data filled in). -// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. -// inputInfo.Size is the expected length of stream, if known. -// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available -// to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (d *openshiftImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) { - return d.docker.PutBlob(stream, inputInfo) -} - -// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob. -// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned. -// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); -// it returns a non-nil error only on an unexpected failure. -func (d *openshiftImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) { - return d.docker.HasBlob(info) -} - -func (d *openshiftImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) { - return d.docker.ReapplyBlob(info) -} - -// PutManifest writes manifest to the destination. -// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. -// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), -// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. -func (d *openshiftImageDestination) PutManifest(m []byte) error { - manifestDigest, err := manifest.Digest(m) - if err != nil { - return err - } - d.imageStreamImageName = manifestDigest.String() - - return d.docker.PutManifest(m) -} - -func (d *openshiftImageDestination) PutSignatures(signatures [][]byte) error { - if d.imageStreamImageName == "" { - return errors.Errorf("Internal error: Unknown manifest digest, can't add signatures") - } - // Because image signatures are a shared resource in Atomic Registry, the default upload - // always adds signatures. Eventually we should also allow removing signatures. - - if len(signatures) == 0 { - return nil // No need to even read the old state. - } - - image, err := d.client.getImage(context.TODO(), d.imageStreamImageName) - if err != nil { - return err - } - existingSigNames := map[string]struct{}{} - for _, sig := range image.Signatures { - existingSigNames[sig.objectMeta.Name] = struct{}{} - } - -sigExists: - for _, newSig := range signatures { - for _, existingSig := range image.Signatures { - if existingSig.Type == imageSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) { - continue sigExists - } - } - - // The API expect us to invent a new unique name. This is racy, but hopefully good enough. - var signatureName string - for { - randBytes := make([]byte, 16) - n, err := rand.Read(randBytes) - if err != nil || n != 16 { - return errors.Wrapf(err, "Error generating random signature len %d", n) - } - signatureName = fmt.Sprintf("%s@%032x", d.imageStreamImageName, randBytes) - if _, ok := existingSigNames[signatureName]; !ok { - break - } - } - // Note: This does absolutely no kind/version checking or conversions. - sig := imageSignature{ - typeMeta: typeMeta{ - Kind: "ImageSignature", - APIVersion: "v1", - }, - objectMeta: objectMeta{Name: signatureName}, - Type: imageSignatureTypeAtomic, - Content: newSig, - } - body, err := json.Marshal(sig) - _, err = d.client.doRequest(context.TODO(), "POST", "/oapi/v1/imagesignatures", body) - if err != nil { - return err - } - } - - return nil -} - -// Commit marks the process of storing the image as successful and asks for the image to be persisted. -// WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before Commit() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (d *openshiftImageDestination) Commit() error { - return d.docker.Commit() -} - -// These structs are subsets of github.com/openshift/origin/pkg/image/api/v1 and its dependencies. -type imageStream struct { - Status imageStreamStatus `json:"status,omitempty"` -} -type imageStreamStatus struct { - DockerImageRepository string `json:"dockerImageRepository"` - Tags []namedTagEventList `json:"tags,omitempty"` -} -type namedTagEventList struct { - Tag string `json:"tag"` - Items []tagEvent `json:"items"` -} -type tagEvent struct { - DockerImageReference string `json:"dockerImageReference"` - Image string `json:"image"` -} -type imageStreamImage struct { - Image image `json:"image"` -} -type image struct { - objectMeta `json:"metadata,omitempty"` - DockerImageReference string `json:"dockerImageReference,omitempty"` - // DockerImageMetadata runtime.RawExtension `json:"dockerImageMetadata,omitempty"` - DockerImageMetadataVersion string `json:"dockerImageMetadataVersion,omitempty"` - DockerImageManifest string `json:"dockerImageManifest,omitempty"` - // DockerImageLayers []ImageLayer `json:"dockerImageLayers"` - Signatures []imageSignature `json:"signatures,omitempty"` -} - -const imageSignatureTypeAtomic string = "atomic" - -type imageSignature struct { - typeMeta `json:",inline"` - objectMeta `json:"metadata,omitempty"` - Type string `json:"type"` - Content []byte `json:"content"` - // Conditions []SignatureCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` - // ImageIdentity string `json:"imageIdentity,omitempty"` - // SignedClaims map[string]string `json:"signedClaims,omitempty"` - // Created *unversioned.Time `json:"created,omitempty"` - // IssuedBy SignatureIssuer `json:"issuedBy,omitempty"` - // IssuedTo SignatureSubject `json:"issuedTo,omitempty"` -} -type typeMeta struct { - Kind string `json:"kind,omitempty"` - APIVersion string `json:"apiVersion,omitempty"` -} -type objectMeta struct { - Name string `json:"name,omitempty"` - GenerateName string `json:"generateName,omitempty"` - Namespace string `json:"namespace,omitempty"` - SelfLink string `json:"selfLink,omitempty"` - ResourceVersion string `json:"resourceVersion,omitempty"` - Generation int64 `json:"generation,omitempty"` - DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty"` - Labels map[string]string `json:"labels,omitempty"` - Annotations map[string]string `json:"annotations,omitempty"` -} - -// A subset of k8s.io/kubernetes/pkg/api/unversioned/Status -type status struct { - Status string `json:"status,omitempty"` - Message string `json:"message,omitempty"` - // Reason StatusReason `json:"reason,omitempty"` - // Details *StatusDetails `json:"details,omitempty"` - Code int32 `json:"code,omitempty"` -} diff --git a/vendor/github.com/containers/image/openshift/openshift_transport.go b/vendor/github.com/containers/image/openshift/openshift_transport.go deleted file mode 100644 index 686d806f76..0000000000 --- a/vendor/github.com/containers/image/openshift/openshift_transport.go +++ /dev/null @@ -1,156 +0,0 @@ -package openshift - -import ( - "fmt" - "regexp" - "strings" - - "github.com/containers/image/docker/policyconfiguration" - "github.com/containers/image/docker/reference" - genericImage "github.com/containers/image/image" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/pkg/errors" -) - -func init() { - transports.Register(Transport) -} - -// Transport is an ImageTransport for OpenShift registry-hosted images. -var Transport = openshiftTransport{} - -type openshiftTransport struct{} - -func (t openshiftTransport) Name() string { - return "atomic" -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. -func (t openshiftTransport) ParseReference(reference string) (types.ImageReference, error) { - return ParseReference(reference) -} - -// Note that imageNameRegexp is namespace/stream:tag, this -// is HOSTNAME/namespace/stream:tag or parent prefixes. -// Keep this in sync with imageNameRegexp! -var scopeRegexp = regexp.MustCompile("^[^/]*(/[^:/]*(/[^:/]*(:[^:/]*)?)?)?$") - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). -// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. -// scope passed to this function will not be "", that value is always allowed. -func (t openshiftTransport) ValidatePolicyConfigurationScope(scope string) error { - if scopeRegexp.FindStringIndex(scope) == nil { - return errors.Errorf("Invalid scope name %s", scope) - } - return nil -} - -// openshiftReference is an ImageReference for OpenShift images. -type openshiftReference struct { - dockerReference reference.NamedTagged - namespace string // Computed from dockerReference in advance. - stream string // Computed from dockerReference in advance. -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OpenShift ImageReference. -func ParseReference(ref string) (types.ImageReference, error) { - r, err := reference.ParseNormalizedNamed(ref) - if err != nil { - return nil, errors.Wrapf(err, "failed to parse image reference %q", ref) - } - tagged, ok := r.(reference.NamedTagged) - if !ok { - return nil, errors.Errorf("invalid image reference %s, expected format: 'hostname/namespace/stream:tag'", ref) - } - return NewReference(tagged) -} - -// NewReference returns an OpenShift reference for a reference.NamedTagged -func NewReference(dockerRef reference.NamedTagged) (types.ImageReference, error) { - r := strings.SplitN(reference.Path(dockerRef), "/", 3) - if len(r) != 2 { - return nil, errors.Errorf("invalid image reference: %s, expected format: 'hostname/namespace/stream:tag'", - reference.FamiliarString(dockerRef)) - } - return openshiftReference{ - namespace: r[0], - stream: r[1], - dockerReference: dockerRef, - }, nil -} - -func (ref openshiftReference) Transport() types.ImageTransport { - return Transport -} - -// StringWithinTransport returns a string representation of the reference, which MUST be such that -// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. -// NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. -// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. -func (ref openshiftReference) StringWithinTransport() string { - return reference.FamiliarString(ref.dockerReference) -} - -// DockerReference returns a Docker reference associated with this reference -// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, -// not e.g. after redirect or alias processing), or nil if unknown/not applicable. -func (ref openshiftReference) DockerReference() reference.Named { - return ref.dockerReference -} - -// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. -// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; -// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical -// (i.e. various references with exactly the same semantics should return the same configuration identity) -// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but -// not required/guaranteed that it will be a valid input to Transport().ParseReference(). -// Returns "" if configuration identities for these references are not supported. -func (ref openshiftReference) PolicyConfigurationIdentity() string { - res, err := policyconfiguration.DockerReferenceIdentity(ref.dockerReference) - if res == "" || err != nil { // Coverage: Should never happen, NewReference constructs a valid tagged reference. - panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) - } - return res -} - -// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search -// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed -// in order, terminating on first match, and an implicit "" is always checked at the end. -// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), -// and each following element to be a prefix of the element preceding it. -func (ref openshiftReference) PolicyConfigurationNamespaces() []string { - return policyconfiguration.DockerReferenceNamespaces(ref.dockerReference) -} - -// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned ImageCloser. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. -func (ref openshiftReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { - src, err := newImageSource(ctx, ref) - if err != nil { - return nil, err - } - return genericImage.FromSource(ctx, src) -} - -// NewImageSource returns a types.ImageSource for this reference. -// The caller must call .Close() on the returned ImageSource. -func (ref openshiftReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) { - return newImageSource(ctx, ref) -} - -// NewImageDestination returns a types.ImageDestination for this reference. -// The caller must call .Close() on the returned ImageDestination. -func (ref openshiftReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(ctx, ref) -} - -// DeleteImage deletes the named image from the registry, if supported. -func (ref openshiftReference) DeleteImage(ctx *types.SystemContext) error { - return errors.Errorf("Deleting images not implemented for atomic: images") -} diff --git a/vendor/github.com/containers/image/ostree/ostree_dest.go b/vendor/github.com/containers/image/ostree/ostree_dest.go deleted file mode 100644 index d5f0ff80cc..0000000000 --- a/vendor/github.com/containers/image/ostree/ostree_dest.go +++ /dev/null @@ -1,456 +0,0 @@ -// +build !containers_image_ostree_stub - -package ostree - -import ( - "bytes" - "compress/gzip" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "strconv" - "strings" - "syscall" - "time" - "unsafe" - - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/containers/storage/pkg/archive" - "github.com/opencontainers/go-digest" - selinux "github.com/opencontainers/selinux/go-selinux" - "github.com/ostreedev/ostree-go/pkg/otbuiltin" - "github.com/pkg/errors" - "github.com/vbatts/tar-split/tar/asm" - "github.com/vbatts/tar-split/tar/storage" -) - -// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1 libselinux -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -import "C" - -type blobToImport struct { - Size int64 - Digest digest.Digest - BlobPath string -} - -type descriptor struct { - Size int64 `json:"size"` - Digest digest.Digest `json:"digest"` -} - -type fsLayersSchema1 struct { - BlobSum digest.Digest `json:"blobSum"` -} - -type manifestSchema struct { - LayersDescriptors []descriptor `json:"layers"` - FSLayers []fsLayersSchema1 `json:"fsLayers"` -} - -type ostreeImageDestination struct { - ref ostreeReference - manifest string - schema manifestSchema - tmpDirPath string - blobs map[string]*blobToImport - digest digest.Digest - signaturesLen int - repo *C.struct_OstreeRepo -} - -// newImageDestination returns an ImageDestination for writing to an existing ostree. -func newImageDestination(ref ostreeReference, tmpDirPath string) (types.ImageDestination, error) { - tmpDirPath = filepath.Join(tmpDirPath, ref.branchName) - if err := ensureDirectoryExists(tmpDirPath); err != nil { - return nil, err - } - return &ostreeImageDestination{ref, "", manifestSchema{}, tmpDirPath, map[string]*blobToImport{}, "", 0, nil}, nil -} - -// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, -// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. -func (d *ostreeImageDestination) Reference() types.ImageReference { - return d.ref -} - -// Close removes resources associated with an initialized ImageDestination, if any. -func (d *ostreeImageDestination) Close() error { - if d.repo != nil { - C.g_object_unref(C.gpointer(d.repo)) - } - return os.RemoveAll(d.tmpDirPath) -} - -func (d *ostreeImageDestination) SupportedManifestMIMETypes() []string { - return []string{ - manifest.DockerV2Schema2MediaType, - } -} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. -// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. -func (d *ostreeImageDestination) SupportsSignatures() error { - return nil -} - -// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination. -func (d *ostreeImageDestination) ShouldCompressLayers() bool { - return false -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually -// uploaded to the image destination, true otherwise. -func (d *ostreeImageDestination) AcceptsForeignLayerURLs() bool { - return false -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. -func (d *ostreeImageDestination) MustMatchRuntimeOS() bool { - return true -} - -func (d *ostreeImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) { - tmpDir, err := ioutil.TempDir(d.tmpDirPath, "blob") - if err != nil { - return types.BlobInfo{}, err - } - - blobPath := filepath.Join(tmpDir, "content") - blobFile, err := os.Create(blobPath) - if err != nil { - return types.BlobInfo{}, err - } - defer blobFile.Close() - - digester := digest.Canonical.Digester() - tee := io.TeeReader(stream, digester.Hash()) - - size, err := io.Copy(blobFile, tee) - if err != nil { - return types.BlobInfo{}, err - } - computedDigest := digester.Digest() - if inputInfo.Size != -1 && size != inputInfo.Size { - return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size) - } - if err := blobFile.Sync(); err != nil { - return types.BlobInfo{}, err - } - - hash := computedDigest.Hex() - d.blobs[hash] = &blobToImport{Size: size, Digest: computedDigest, BlobPath: blobPath} - return types.BlobInfo{Digest: computedDigest, Size: size}, nil -} - -func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, usermode bool) error { - entries, err := ioutil.ReadDir(dir) - if err != nil { - return err - } - - for _, info := range entries { - fullpath := filepath.Join(dir, info.Name()) - if info.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 { - if err := os.Remove(fullpath); err != nil { - return err - } - continue - } - - if selinuxHnd != nil { - relPath, err := filepath.Rel(root, fullpath) - if err != nil { - return err - } - // Handle /exports/hostfs as a special case. Files under this directory are copied to the host, - // thus we benefit from maintaining the same SELinux label they would have on the host as we could - // use hard links instead of copying the files. - relPath = fmt.Sprintf("/%s", strings.TrimPrefix(relPath, "exports/hostfs/")) - - relPathC := C.CString(relPath) - defer C.free(unsafe.Pointer(relPathC)) - var context *C.char - - res, err := C.selabel_lookup_raw(selinuxHnd, &context, relPathC, C.int(info.Mode()&os.ModePerm)) - if int(res) < 0 && err != syscall.ENOENT { - return errors.Wrapf(err, "cannot selabel_lookup_raw %s", relPath) - } - if int(res) == 0 { - defer C.freecon(context) - fullpathC := C.CString(fullpath) - defer C.free(unsafe.Pointer(fullpathC)) - res, err = C.lsetfilecon_raw(fullpathC, context) - if int(res) < 0 { - return errors.Wrapf(err, "cannot setfilecon_raw %s", fullpath) - } - } - } - - if info.IsDir() { - if usermode { - if err := os.Chmod(fullpath, info.Mode()|0700); err != nil { - return err - } - } - err = fixFiles(selinuxHnd, root, fullpath, usermode) - if err != nil { - return err - } - } else if usermode && (info.Mode().IsRegular()) { - if err := os.Chmod(fullpath, info.Mode()|0600); err != nil { - return err - } - } - } - - return nil -} - -func (d *ostreeImageDestination) ostreeCommit(repo *otbuiltin.Repo, branch string, root string, metadata []string) error { - opts := otbuiltin.NewCommitOptions() - opts.AddMetadataString = metadata - opts.Timestamp = time.Now() - // OCI layers have no parent OSTree commit - opts.Parent = "0000000000000000000000000000000000000000000000000000000000000000" - _, err := repo.Commit(root, branch, opts) - return err -} - -func generateTarSplitMetadata(output *bytes.Buffer, file string) error { - mfz := gzip.NewWriter(output) - defer mfz.Close() - metaPacker := storage.NewJSONPacker(mfz) - - stream, err := os.OpenFile(file, os.O_RDONLY, 0) - if err != nil { - return err - } - defer stream.Close() - - gzReader, err := archive.DecompressStream(stream) - if err != nil { - return err - } - defer gzReader.Close() - - its, err := asm.NewInputTarStream(gzReader, metaPacker, nil) - if err != nil { - return err - } - - _, err = io.Copy(ioutil.Discard, its) - if err != nil { - return err - } - return nil -} - -func (d *ostreeImageDestination) importBlob(selinuxHnd *C.struct_selabel_handle, repo *otbuiltin.Repo, blob *blobToImport) error { - ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex()) - destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Hex(), "root") - if err := ensureDirectoryExists(destinationPath); err != nil { - return err - } - defer func() { - os.Remove(blob.BlobPath) - os.RemoveAll(destinationPath) - }() - - var tarSplitOutput bytes.Buffer - if err := generateTarSplitMetadata(&tarSplitOutput, blob.BlobPath); err != nil { - return err - } - - if os.Getuid() == 0 { - if err := archive.UntarPath(blob.BlobPath, destinationPath); err != nil { - return err - } - if err := fixFiles(selinuxHnd, destinationPath, destinationPath, false); err != nil { - return err - } - } else { - os.MkdirAll(destinationPath, 0755) - if err := exec.Command("tar", "-C", destinationPath, "--no-same-owner", "--no-same-permissions", "--delay-directory-restore", "-xf", blob.BlobPath).Run(); err != nil { - return err - } - - if err := fixFiles(selinuxHnd, destinationPath, destinationPath, true); err != nil { - return err - } - } - return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size), - fmt.Sprintf("tarsplit.output=%s", base64.StdEncoding.EncodeToString(tarSplitOutput.Bytes()))}) - -} - -func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobToImport) error { - ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex()) - destinationPath := filepath.Dir(blob.BlobPath) - - return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)}) -} - -func (d *ostreeImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) { - - if d.repo == nil { - repo, err := openRepo(d.ref.repo) - if err != nil { - return false, 0, err - } - d.repo = repo - } - branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex()) - - found, data, err := readMetadata(d.repo, branch, "docker.size") - if err != nil || !found { - return found, -1, err - } - - size, err := strconv.ParseInt(data, 10, 64) - if err != nil { - return false, -1, err - } - - return true, size, nil -} - -func (d *ostreeImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) { - return info, nil -} - -// PutManifest writes manifest to the destination. -// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. -// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), -// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. -func (d *ostreeImageDestination) PutManifest(manifestBlob []byte) error { - d.manifest = string(manifestBlob) - - if err := json.Unmarshal(manifestBlob, &d.schema); err != nil { - return err - } - - manifestPath := filepath.Join(d.tmpDirPath, d.ref.manifestPath()) - if err := ensureParentDirectoryExists(manifestPath); err != nil { - return err - } - - digest, err := manifest.Digest(manifestBlob) - if err != nil { - return err - } - d.digest = digest - - return ioutil.WriteFile(manifestPath, manifestBlob, 0644) -} - -func (d *ostreeImageDestination) PutSignatures(signatures [][]byte) error { - path := filepath.Join(d.tmpDirPath, d.ref.signaturePath(0)) - if err := ensureParentDirectoryExists(path); err != nil { - return err - } - - for i, sig := range signatures { - signaturePath := filepath.Join(d.tmpDirPath, d.ref.signaturePath(i)) - if err := ioutil.WriteFile(signaturePath, sig, 0644); err != nil { - return err - } - } - d.signaturesLen = len(signatures) - return nil -} - -func (d *ostreeImageDestination) Commit() error { - repo, err := otbuiltin.OpenRepo(d.ref.repo) - if err != nil { - return err - } - - _, err = repo.PrepareTransaction() - if err != nil { - return err - } - - var selinuxHnd *C.struct_selabel_handle - - if os.Getuid() == 0 && selinux.GetEnabled() { - selinuxHnd, err = C.selabel_open(C.SELABEL_CTX_FILE, nil, 0) - if selinuxHnd == nil { - return errors.Wrapf(err, "cannot open the SELinux DB") - } - - defer C.selabel_close(selinuxHnd) - } - - checkLayer := func(hash string) error { - blob := d.blobs[hash] - // if the blob is not present in d.blobs then it is already stored in OSTree, - // and we don't need to import it. - if blob == nil { - return nil - } - err := d.importBlob(selinuxHnd, repo, blob) - if err != nil { - return err - } - - delete(d.blobs, hash) - return nil - } - for _, layer := range d.schema.LayersDescriptors { - hash := layer.Digest.Hex() - if err = checkLayer(hash); err != nil { - return err - } - } - for _, layer := range d.schema.FSLayers { - hash := layer.BlobSum.Hex() - if err = checkLayer(hash); err != nil { - return err - } - } - - // Import the other blobs that are not layers - for _, blob := range d.blobs { - err := d.importConfig(repo, blob) - if err != nil { - return err - } - } - - manifestPath := filepath.Join(d.tmpDirPath, "manifest") - - metadata := []string{fmt.Sprintf("docker.manifest=%s", string(d.manifest)), - fmt.Sprintf("signatures=%d", d.signaturesLen), - fmt.Sprintf("docker.digest=%s", string(d.digest))} - err = d.ostreeCommit(repo, fmt.Sprintf("ociimage/%s", d.ref.branchName), manifestPath, metadata) - - _, err = repo.CommitTransaction() - return err -} - -func ensureDirectoryExists(path string) error { - if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { - if err := os.MkdirAll(path, 0755); err != nil { - return err - } - } - return nil -} - -func ensureParentDirectoryExists(path string) error { - return ensureDirectoryExists(filepath.Dir(path)) -} diff --git a/vendor/github.com/containers/image/ostree/ostree_src.go b/vendor/github.com/containers/image/ostree/ostree_src.go deleted file mode 100644 index c65a07b751..0000000000 --- a/vendor/github.com/containers/image/ostree/ostree_src.go +++ /dev/null @@ -1,354 +0,0 @@ -// +build !containers_image_ostree_stub - -package ostree - -import ( - "bytes" - "compress/gzip" - "context" - "encoding/base64" - "fmt" - "io" - "io/ioutil" - "strconv" - "strings" - "unsafe" - - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/containers/storage/pkg/ioutils" - "github.com/opencontainers/go-digest" - glib "github.com/ostreedev/ostree-go/pkg/glibobject" - "github.com/pkg/errors" - "github.com/vbatts/tar-split/tar/asm" - "github.com/vbatts/tar-split/tar/storage" -) - -// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1 -// #include -// #include -// #include -// #include -// #include -// #include -import "C" - -type ostreeImageSource struct { - ref ostreeReference - tmpDir string - repo *C.struct_OstreeRepo -} - -// newImageSource returns an ImageSource for reading from an existing directory. -func newImageSource(ctx *types.SystemContext, tmpDir string, ref ostreeReference) (types.ImageSource, error) { - return &ostreeImageSource{ref: ref, tmpDir: tmpDir}, nil -} - -// Reference returns the reference used to set up this source. -func (s *ostreeImageSource) Reference() types.ImageReference { - return s.ref -} - -// Close removes resources associated with an initialized ImageSource, if any. -func (s *ostreeImageSource) Close() error { - if s.repo != nil { - C.g_object_unref(C.gpointer(s.repo)) - } - return nil -} - -func (s *ostreeImageSource) getLayerSize(blob string) (int64, error) { - b := fmt.Sprintf("ociimage/%s", blob) - found, data, err := readMetadata(s.repo, b, "docker.size") - if err != nil || !found { - return 0, err - } - return strconv.ParseInt(data, 10, 64) -} - -func (s *ostreeImageSource) getLenSignatures() (int64, error) { - b := fmt.Sprintf("ociimage/%s", s.ref.branchName) - found, data, err := readMetadata(s.repo, b, "signatures") - if err != nil { - return -1, err - } - if !found { - // if 'signatures' is not present, just return 0 signatures. - return 0, nil - } - return strconv.ParseInt(data, 10, 64) -} - -func (s *ostreeImageSource) getTarSplitData(blob string) ([]byte, error) { - b := fmt.Sprintf("ociimage/%s", blob) - found, out, err := readMetadata(s.repo, b, "tarsplit.output") - if err != nil || !found { - return nil, err - } - return base64.StdEncoding.DecodeString(out) -} - -// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). -// It may use a remote (= slow) service. -func (s *ostreeImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) { - if instanceDigest != nil { - return nil, "", errors.Errorf(`Manifest lists are not supported by "ostree:"`) - } - if s.repo == nil { - repo, err := openRepo(s.ref.repo) - if err != nil { - return nil, "", err - } - s.repo = repo - } - - b := fmt.Sprintf("ociimage/%s", s.ref.branchName) - found, out, err := readMetadata(s.repo, b, "docker.manifest") - if err != nil { - return nil, "", err - } - if !found { - return nil, "", errors.New("manifest not found") - } - m := []byte(out) - return m, manifest.GuessMIMEType(m), nil -} - -func (s *ostreeImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { - return nil, "", errors.New("manifest lists are not supported by this transport") -} - -func openRepo(path string) (*C.struct_OstreeRepo, error) { - var cerr *C.GError - cpath := C.CString(path) - defer C.free(unsafe.Pointer(cpath)) - pathc := C.g_file_new_for_path(cpath) - defer C.g_object_unref(C.gpointer(pathc)) - repo := C.ostree_repo_new(pathc) - r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(repo, nil, &cerr))) - if !r { - C.g_object_unref(C.gpointer(repo)) - return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) - } - return repo, nil -} - -type ostreePathFileGetter struct { - repo *C.struct_OstreeRepo - parentRoot *C.GFile -} - -type ostreeReader struct { - stream *C.GFileInputStream -} - -func (o ostreeReader) Close() error { - C.g_object_unref(C.gpointer(o.stream)) - return nil -} -func (o ostreeReader) Read(p []byte) (int, error) { - var cerr *C.GError - instanceCast := C.g_type_check_instance_cast((*C.GTypeInstance)(unsafe.Pointer(o.stream)), C.g_input_stream_get_type()) - stream := (*C.GInputStream)(unsafe.Pointer(instanceCast)) - - b := C.g_input_stream_read_bytes(stream, (C.gsize)(cap(p)), nil, &cerr) - if b == nil { - return 0, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) - } - defer C.g_bytes_unref(b) - - count := int(C.g_bytes_get_size(b)) - if count == 0 { - return 0, io.EOF - } - data := (*[1 << 30]byte)(unsafe.Pointer(C.g_bytes_get_data(b, nil)))[:count:count] - copy(p, data) - return count, nil -} - -func readMetadata(repo *C.struct_OstreeRepo, commit, key string) (bool, string, error) { - var cerr *C.GError - var ref *C.char - defer C.free(unsafe.Pointer(ref)) - - cCommit := C.CString(commit) - defer C.free(unsafe.Pointer(cCommit)) - - if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(repo, cCommit, C.gboolean(1), &ref, &cerr))) { - return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) - } - - if ref == nil { - return false, "", nil - } - - var variant *C.GVariant - if !glib.GoBool(glib.GBoolean(C.ostree_repo_load_variant(repo, C.OSTREE_OBJECT_TYPE_COMMIT, ref, &variant, &cerr))) { - return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) - } - defer C.g_variant_unref(variant) - if variant != nil { - cKey := C.CString(key) - defer C.free(unsafe.Pointer(cKey)) - - metadata := C.g_variant_get_child_value(variant, 0) - defer C.g_variant_unref(metadata) - - data := C.g_variant_lookup_value(metadata, (*C.gchar)(cKey), nil) - if data != nil { - defer C.g_variant_unref(data) - ptr := (*C.char)(C.g_variant_get_string(data, nil)) - val := C.GoString(ptr) - return true, val, nil - } - } - return false, "", nil -} - -func newOSTreePathFileGetter(repo *C.struct_OstreeRepo, commit string) (*ostreePathFileGetter, error) { - var cerr *C.GError - var parentRoot *C.GFile - cCommit := C.CString(commit) - defer C.free(unsafe.Pointer(cCommit)) - if !glib.GoBool(glib.GBoolean(C.ostree_repo_read_commit(repo, cCommit, &parentRoot, nil, nil, &cerr))) { - return &ostreePathFileGetter{}, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) - } - - C.g_object_ref(C.gpointer(repo)) - - return &ostreePathFileGetter{repo: repo, parentRoot: parentRoot}, nil -} - -func (o ostreePathFileGetter) Get(filename string) (io.ReadCloser, error) { - var file *C.GFile - if strings.HasPrefix(filename, "./") { - filename = filename[2:] - } - cfilename := C.CString(filename) - defer C.free(unsafe.Pointer(cfilename)) - - file = (*C.GFile)(C.g_file_resolve_relative_path(o.parentRoot, cfilename)) - - var cerr *C.GError - stream := C.g_file_read(file, nil, &cerr) - if stream == nil { - return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) - } - - return &ostreeReader{stream: stream}, nil -} - -func (o ostreePathFileGetter) Close() { - C.g_object_unref(C.gpointer(o.repo)) - C.g_object_unref(C.gpointer(o.parentRoot)) -} - -func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser, error) { - getter, err := newOSTreePathFileGetter(s.repo, commit) - if err != nil { - return nil, err - } - defer getter.Close() - - return getter.Get(path) -} - -// GetBlob returns a stream for the specified blob, and the blob's size. -func (s *ostreeImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) { - blob := info.Digest.Hex() - branch := fmt.Sprintf("ociimage/%s", blob) - - if s.repo == nil { - repo, err := openRepo(s.ref.repo) - if err != nil { - return nil, 0, err - } - s.repo = repo - } - - layerSize, err := s.getLayerSize(blob) - if err != nil { - return nil, 0, err - } - - tarsplit, err := s.getTarSplitData(blob) - if err != nil { - return nil, 0, err - } - - // if tarsplit is nil we are looking at the manifest. Return directly the file in /content - if tarsplit == nil { - file, err := s.readSingleFile(branch, "/content") - if err != nil { - return nil, 0, err - } - return file, layerSize, nil - } - - mf := bytes.NewReader(tarsplit) - mfz, err := gzip.NewReader(mf) - if err != nil { - return nil, 0, err - } - defer mfz.Close() - metaUnpacker := storage.NewJSONUnpacker(mfz) - - getter, err := newOSTreePathFileGetter(s.repo, branch) - if err != nil { - return nil, 0, err - } - - ots := asm.NewOutputTarStream(getter, metaUnpacker) - - pipeReader, pipeWriter := io.Pipe() - go func() { - io.Copy(pipeWriter, ots) - pipeWriter.Close() - }() - - rc := ioutils.NewReadCloserWrapper(pipeReader, func() error { - getter.Close() - return ots.Close() - }) - return rc, layerSize, nil -} - -func (s *ostreeImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - if instanceDigest != nil { - return nil, errors.New("manifest lists are not supported by this transport") - } - lenSignatures, err := s.getLenSignatures() - if err != nil { - return nil, err - } - branch := fmt.Sprintf("ociimage/%s", s.ref.branchName) - - if s.repo == nil { - repo, err := openRepo(s.ref.repo) - if err != nil { - return nil, err - } - s.repo = repo - } - - signatures := [][]byte{} - for i := int64(1); i <= lenSignatures; i++ { - sigReader, err := s.readSingleFile(branch, fmt.Sprintf("/signature-%d", i)) - if err != nil { - return nil, err - } - defer sigReader.Close() - - sig, err := ioutil.ReadAll(sigReader) - if err != nil { - return nil, err - } - signatures = append(signatures, sig) - } - return signatures, nil -} - -// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. -func (s *ostreeImageSource) LayerInfosForCopy() []types.BlobInfo { - return nil -} diff --git a/vendor/github.com/containers/image/ostree/ostree_transport.go b/vendor/github.com/containers/image/ostree/ostree_transport.go deleted file mode 100644 index cc85a43ff1..0000000000 --- a/vendor/github.com/containers/image/ostree/ostree_transport.go +++ /dev/null @@ -1,251 +0,0 @@ -// +build !containers_image_ostree_stub - -package ostree - -import ( - "bytes" - "fmt" - "os" - "path/filepath" - "regexp" - "strings" - - "github.com/containers/image/directory/explicitfilepath" - "github.com/containers/image/docker/reference" - "github.com/containers/image/image" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/pkg/errors" -) - -const defaultOSTreeRepo = "/ostree/repo" - -// Transport is an ImageTransport for ostree paths. -var Transport = ostreeTransport{} - -type ostreeTransport struct{} - -func (t ostreeTransport) Name() string { - return "ostree" -} - -func init() { - transports.Register(Transport) -} - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). -// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. -// scope passed to this function will not be "", that value is always allowed. -func (t ostreeTransport) ValidatePolicyConfigurationScope(scope string) error { - sep := strings.Index(scope, ":") - if sep < 0 { - return errors.Errorf("Invalid ostree: scope %s: Must include a repo", scope) - } - repo := scope[:sep] - - if !strings.HasPrefix(repo, "/") { - return errors.Errorf("Invalid ostree: scope %s: repository must be an absolute path", scope) - } - cleaned := filepath.Clean(repo) - if cleaned != repo { - return errors.Errorf(`Invalid ostree: scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) - } - - // FIXME? In the namespaces within a repo, - // we could be verifying the various character set and length restrictions - // from docker/distribution/reference.regexp.go, but other than that there - // are few semantically invalid strings. - return nil -} - -// ostreeReference is an ImageReference for ostree paths. -type ostreeReference struct { - image string - branchName string - repo string -} - -type ostreeImageCloser struct { - types.ImageCloser - size int64 -} - -func (t ostreeTransport) ParseReference(ref string) (types.ImageReference, error) { - var repo = "" - var image = "" - s := strings.SplitN(ref, "@/", 2) - if len(s) == 1 { - image, repo = s[0], defaultOSTreeRepo - } else { - image, repo = s[0], "/"+s[1] - } - - return NewReference(image, repo) -} - -// NewReference returns an OSTree reference for a specified repo and image. -func NewReference(image string, repo string) (types.ImageReference, error) { - // image is not _really_ in a containers/image/docker/reference format; - // as far as the libOSTree ociimage/* namespace is concerned, it is more or - // less an arbitrary string with an implied tag. - // Parse the image using reference.ParseNormalizedNamed so that we can - // check whether the images has a tag specified and we can add ":latest" if needed - ostreeImage, err := reference.ParseNormalizedNamed(image) - if err != nil { - return nil, err - } - - if reference.IsNameOnly(ostreeImage) { - image = image + ":latest" - } - - resolved, err := explicitfilepath.ResolvePathToFullyExplicit(repo) - if err != nil { - // With os.IsNotExist(err), the parent directory of repo is also not existent; - // that should ordinarily not happen, but it would be a bit weird to reject - // references which do not specify a repo just because the implicit defaultOSTreeRepo - // does not exist. - if os.IsNotExist(err) && repo == defaultOSTreeRepo { - resolved = repo - } else { - return nil, err - } - } - // This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces - // from being ambiguous with values of PolicyConfigurationIdentity. - if strings.Contains(resolved, ":") { - return nil, errors.Errorf("Invalid OSTree reference %s@%s: path %s contains a colon", image, repo, resolved) - } - - return ostreeReference{ - image: image, - branchName: encodeOStreeRef(image), - repo: resolved, - }, nil -} - -func (ref ostreeReference) Transport() types.ImageTransport { - return Transport -} - -// StringWithinTransport returns a string representation of the reference, which MUST be such that -// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. -// NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. -// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. -func (ref ostreeReference) StringWithinTransport() string { - return fmt.Sprintf("%s@%s", ref.image, ref.repo) -} - -// DockerReference returns a Docker reference associated with this reference -// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, -// not e.g. after redirect or alias processing), or nil if unknown/not applicable. -func (ref ostreeReference) DockerReference() reference.Named { - return nil -} - -func (ref ostreeReference) PolicyConfigurationIdentity() string { - return fmt.Sprintf("%s:%s", ref.repo, ref.image) -} - -// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search -// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed -// in order, terminating on first match, and an implicit "" is always checked at the end. -// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), -// and each following element to be a prefix of the element preceding it. -func (ref ostreeReference) PolicyConfigurationNamespaces() []string { - s := strings.SplitN(ref.image, ":", 2) - if len(s) != 2 { // Coverage: Should never happen, NewReference above ensures ref.image has a :tag. - panic(fmt.Sprintf("Internal inconsistency: ref.image value %q does not have a :tag", ref.image)) - } - name := s[0] - res := []string{} - for { - res = append(res, fmt.Sprintf("%s:%s", ref.repo, name)) - - lastSlash := strings.LastIndex(name, "/") - if lastSlash == -1 { - break - } - name = name[:lastSlash] - } - return res -} - -func (s *ostreeImageCloser) Size() (int64, error) { - return s.size, nil -} - -// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned ImageCloser. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -func (ref ostreeReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { - var tmpDir string - if ctx == nil || ctx.OSTreeTmpDirPath == "" { - tmpDir = os.TempDir() - } else { - tmpDir = ctx.OSTreeTmpDirPath - } - src, err := newImageSource(ctx, tmpDir, ref) - if err != nil { - return nil, err - } - return image.FromSource(ctx, src) -} - -// NewImageSource returns a types.ImageSource for this reference. -// The caller must call .Close() on the returned ImageSource. -func (ref ostreeReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) { - var tmpDir string - if ctx == nil || ctx.OSTreeTmpDirPath == "" { - tmpDir = os.TempDir() - } else { - tmpDir = ctx.OSTreeTmpDirPath - } - return newImageSource(ctx, tmpDir, ref) -} - -// NewImageDestination returns a types.ImageDestination for this reference. -// The caller must call .Close() on the returned ImageDestination. -func (ref ostreeReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { - var tmpDir string - if ctx == nil || ctx.OSTreeTmpDirPath == "" { - tmpDir = os.TempDir() - } else { - tmpDir = ctx.OSTreeTmpDirPath - } - return newImageDestination(ref, tmpDir) -} - -// DeleteImage deletes the named image from the registry, if supported. -func (ref ostreeReference) DeleteImage(ctx *types.SystemContext) error { - return errors.Errorf("Deleting images not implemented for ostree: images") -} - -var ostreeRefRegexp = regexp.MustCompile(`^[A-Za-z0-9.-]$`) - -func encodeOStreeRef(in string) string { - var buffer bytes.Buffer - for i := range in { - sub := in[i : i+1] - if ostreeRefRegexp.MatchString(sub) { - buffer.WriteString(sub) - } else { - buffer.WriteString(fmt.Sprintf("_%02X", sub[0])) - } - - } - return buffer.String() -} - -// manifestPath returns a path for the manifest within a ostree using our conventions. -func (ref ostreeReference) manifestPath() string { - return filepath.Join("manifest", "manifest.json") -} - -// signaturePath returns a path for a signature within a ostree using our conventions. -func (ref ostreeReference) signaturePath(index int) string { - return filepath.Join("manifest", fmt.Sprintf("signature-%d", index+1)) -} diff --git a/vendor/github.com/containers/image/pkg/compression/compression.go b/vendor/github.com/containers/image/pkg/compression/compression.go deleted file mode 100644 index c19d962ee5..0000000000 --- a/vendor/github.com/containers/image/pkg/compression/compression.go +++ /dev/null @@ -1,67 +0,0 @@ -package compression - -import ( - "bytes" - "compress/bzip2" - "compress/gzip" - "io" - - "github.com/pkg/errors" - - "github.com/sirupsen/logrus" -) - -// DecompressorFunc returns the decompressed stream, given a compressed stream. -type DecompressorFunc func(io.Reader) (io.Reader, error) - -// GzipDecompressor is a DecompressorFunc for the gzip compression algorithm. -func GzipDecompressor(r io.Reader) (io.Reader, error) { - return gzip.NewReader(r) -} - -// Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm. -func Bzip2Decompressor(r io.Reader) (io.Reader, error) { - return bzip2.NewReader(r), nil -} - -// XzDecompressor is a DecompressorFunc for the xz compression algorithm. -func XzDecompressor(r io.Reader) (io.Reader, error) { - return nil, errors.New("Decompressing xz streams is not supported") -} - -// compressionAlgos is an internal implementation detail of DetectCompression -var compressionAlgos = map[string]struct { - prefix []byte - decompressor DecompressorFunc -}{ - "gzip": {[]byte{0x1F, 0x8B, 0x08}, GzipDecompressor}, // gzip (RFC 1952) - "bzip2": {[]byte{0x42, 0x5A, 0x68}, Bzip2Decompressor}, // bzip2 (decompress.c:BZ2_decompress) - "xz": {[]byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor}, // xz (/usr/share/doc/xz/xz-file-format.txt) -} - -// DetectCompression returns a DecompressorFunc if the input is recognized as a compressed format, nil otherwise. -// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning. -func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) { - buffer := [8]byte{} - - n, err := io.ReadAtLeast(input, buffer[:], len(buffer)) - if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { - // This is a “real” error. We could just ignore it this time, process the data we have, and hope that the source will report the same error again. - // Instead, fail immediately with the original error cause instead of a possibly secondary/misleading error returned later. - return nil, nil, err - } - - var decompressor DecompressorFunc - for name, algo := range compressionAlgos { - if bytes.HasPrefix(buffer[:n], algo.prefix) { - logrus.Debugf("Detected compression format %s", name) - decompressor = algo.decompressor - break - } - } - if decompressor == nil { - logrus.Debugf("No compression detected") - } - - return decompressor, io.MultiReader(bytes.NewReader(buffer[:n]), input), nil -} diff --git a/vendor/github.com/containers/image/pkg/docker/config/config.go b/vendor/github.com/containers/image/pkg/docker/config/config.go deleted file mode 100644 index fd0ae7d84d..0000000000 --- a/vendor/github.com/containers/image/pkg/docker/config/config.go +++ /dev/null @@ -1,295 +0,0 @@ -package config - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "strings" - - "github.com/containers/image/types" - helperclient "github.com/docker/docker-credential-helpers/client" - "github.com/docker/docker-credential-helpers/credentials" - "github.com/docker/docker/pkg/homedir" - "github.com/pkg/errors" -) - -type dockerAuthConfig struct { - Auth string `json:"auth,omitempty"` -} - -type dockerConfigFile struct { - AuthConfigs map[string]dockerAuthConfig `json:"auths"` - CredHelpers map[string]string `json:"credHelpers,omitempty"` -} - -const ( - defaultPath = "/run/user" - authCfg = "containers" - authCfgFileName = "auth.json" - dockerCfg = ".docker" - dockerCfgFileName = "config.json" - dockerLegacyCfg = ".dockercfg" -) - -var ( - // ErrNotLoggedIn is returned for users not logged into a registry - // that they are trying to logout of - ErrNotLoggedIn = errors.New("not logged in") -) - -// SetAuthentication stores the username and password in the auth.json file -func SetAuthentication(ctx *types.SystemContext, registry, username, password string) error { - return modifyJSON(ctx, func(auths *dockerConfigFile) (bool, error) { - if ch, exists := auths.CredHelpers[registry]; exists { - return false, setAuthToCredHelper(ch, registry, username, password) - } - - creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) - newCreds := dockerAuthConfig{Auth: creds} - auths.AuthConfigs[registry] = newCreds - return true, nil - }) -} - -// GetAuthentication returns the registry credentials stored in -// either auth.json file or .docker/config.json -// If an entry is not found empty strings are returned for the username and password -func GetAuthentication(ctx *types.SystemContext, registry string) (string, string, error) { - if ctx != nil && ctx.DockerAuthConfig != nil { - return ctx.DockerAuthConfig.Username, ctx.DockerAuthConfig.Password, nil - } - - dockerLegacyPath := filepath.Join(homedir.Get(), dockerLegacyCfg) - paths := [3]string{getPathToAuth(ctx), filepath.Join(homedir.Get(), dockerCfg, dockerCfgFileName), dockerLegacyPath} - - for _, path := range paths { - legacyFormat := path == dockerLegacyPath - username, password, err := findAuthentication(registry, path, legacyFormat) - if err != nil { - return "", "", err - } - if username != "" && password != "" { - return username, password, nil - } - } - return "", "", nil -} - -// GetUserLoggedIn returns the username logged in to registry from either -// auth.json or XDG_RUNTIME_DIR -// Used to tell the user if someone is logged in to the registry when logging in -func GetUserLoggedIn(ctx *types.SystemContext, registry string) string { - path := getPathToAuth(ctx) - username, _, _ := findAuthentication(registry, path, false) - if username != "" { - return username - } - return "" -} - -// RemoveAuthentication deletes the credentials stored in auth.json -func RemoveAuthentication(ctx *types.SystemContext, registry string) error { - return modifyJSON(ctx, func(auths *dockerConfigFile) (bool, error) { - // First try cred helpers. - if ch, exists := auths.CredHelpers[registry]; exists { - return false, deleteAuthFromCredHelper(ch, registry) - } - - if _, ok := auths.AuthConfigs[registry]; ok { - delete(auths.AuthConfigs, registry) - } else if _, ok := auths.AuthConfigs[normalizeRegistry(registry)]; ok { - delete(auths.AuthConfigs, normalizeRegistry(registry)) - } else { - return false, ErrNotLoggedIn - } - return true, nil - }) -} - -// RemoveAllAuthentication deletes all the credentials stored in auth.json -func RemoveAllAuthentication(ctx *types.SystemContext) error { - return modifyJSON(ctx, func(auths *dockerConfigFile) (bool, error) { - auths.CredHelpers = make(map[string]string) - auths.AuthConfigs = make(map[string]dockerAuthConfig) - return true, nil - }) -} - -// getPath gets the path of the auth.json file -// The path can be overriden by the user if the overwrite-path flag is set -// If the flag is not set and XDG_RUNTIME_DIR is ser, the auth.json file is saved in XDG_RUNTIME_DIR/containers -// Otherwise, the auth.json file is stored in /run/user/UID/containers -func getPathToAuth(ctx *types.SystemContext) string { - if ctx != nil { - if ctx.AuthFilePath != "" { - return ctx.AuthFilePath - } - if ctx.RootForImplicitAbsolutePaths != "" { - return filepath.Join(ctx.RootForImplicitAbsolutePaths, defaultPath, strconv.Itoa(os.Getuid()), authCfg, authCfgFileName) - } - } - runtimeDir := os.Getenv("XDG_RUNTIME_DIR") - if runtimeDir == "" { - runtimeDir = filepath.Join(defaultPath, strconv.Itoa(os.Getuid())) - } - return filepath.Join(runtimeDir, authCfg, authCfgFileName) -} - -// readJSONFile unmarshals the authentications stored in the auth.json file and returns it -// or returns an empty dockerConfigFile data structure if auth.json does not exist -// if the file exists and is empty, readJSONFile returns an error -func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) { - var auths dockerConfigFile - - raw, err := ioutil.ReadFile(path) - if os.IsNotExist(err) { - auths.AuthConfigs = map[string]dockerAuthConfig{} - return auths, nil - } - - if legacyFormat { - if err = json.Unmarshal(raw, &auths.AuthConfigs); err != nil { - return dockerConfigFile{}, errors.Wrapf(err, "error unmarshaling JSON at %q", path) - } - return auths, nil - } - - if err = json.Unmarshal(raw, &auths); err != nil { - return dockerConfigFile{}, errors.Wrapf(err, "error unmarshaling JSON at %q", path) - } - - return auths, nil -} - -// modifyJSON writes to auth.json if the dockerConfigFile has been updated -func modifyJSON(ctx *types.SystemContext, editor func(auths *dockerConfigFile) (bool, error)) error { - path := getPathToAuth(ctx) - dir := filepath.Dir(path) - if _, err := os.Stat(dir); os.IsNotExist(err) { - if err = os.Mkdir(dir, 0700); err != nil { - return errors.Wrapf(err, "error creating directory %q", dir) - } - } - - auths, err := readJSONFile(path, false) - if err != nil { - return errors.Wrapf(err, "error reading JSON file %q", path) - } - - updated, err := editor(&auths) - if err != nil { - return errors.Wrapf(err, "error updating %q", path) - } - if updated { - newData, err := json.MarshalIndent(auths, "", "\t") - if err != nil { - return errors.Wrapf(err, "error marshaling JSON %q", path) - } - - if err = ioutil.WriteFile(path, newData, 0755); err != nil { - return errors.Wrapf(err, "error writing to file %q", path) - } - } - - return nil -} - -func getAuthFromCredHelper(credHelper, registry string) (string, string, error) { - helperName := fmt.Sprintf("docker-credential-%s", credHelper) - p := helperclient.NewShellProgramFunc(helperName) - creds, err := helperclient.Get(p, registry) - if err != nil { - return "", "", err - } - return creds.Username, creds.Secret, nil -} - -func setAuthToCredHelper(credHelper, registry, username, password string) error { - helperName := fmt.Sprintf("docker-credential-%s", credHelper) - p := helperclient.NewShellProgramFunc(helperName) - creds := &credentials.Credentials{ - ServerURL: registry, - Username: username, - Secret: password, - } - return helperclient.Store(p, creds) -} - -func deleteAuthFromCredHelper(credHelper, registry string) error { - helperName := fmt.Sprintf("docker-credential-%s", credHelper) - p := helperclient.NewShellProgramFunc(helperName) - return helperclient.Erase(p, registry) -} - -// findAuthentication looks for auth of registry in path -func findAuthentication(registry, path string, legacyFormat bool) (string, string, error) { - auths, err := readJSONFile(path, legacyFormat) - if err != nil { - return "", "", errors.Wrapf(err, "error reading JSON file %q", path) - } - - // First try cred helpers. They should always be normalized. - if ch, exists := auths.CredHelpers[registry]; exists { - return getAuthFromCredHelper(ch, registry) - } - - // I'm feeling lucky - if val, exists := auths.AuthConfigs[registry]; exists { - return decodeDockerAuth(val.Auth) - } - - // bad luck; let's normalize the entries first - registry = normalizeRegistry(registry) - normalizedAuths := map[string]dockerAuthConfig{} - for k, v := range auths.AuthConfigs { - normalizedAuths[normalizeRegistry(k)] = v - } - if val, exists := normalizedAuths[registry]; exists { - return decodeDockerAuth(val.Auth) - } - return "", "", nil -} - -func decodeDockerAuth(s string) (string, string, error) { - decoded, err := base64.StdEncoding.DecodeString(s) - if err != nil { - return "", "", err - } - parts := strings.SplitN(string(decoded), ":", 2) - if len(parts) != 2 { - // if it's invalid just skip, as docker does - return "", "", nil - } - user := parts[0] - password := strings.Trim(parts[1], "\x00") - return user, password, nil -} - -// convertToHostname converts a registry url which has http|https prepended -// to just an hostname. -// Copied from github.com/docker/docker/registry/auth.go -func convertToHostname(url string) string { - stripped := url - if strings.HasPrefix(url, "http://") { - stripped = strings.TrimPrefix(url, "http://") - } else if strings.HasPrefix(url, "https://") { - stripped = strings.TrimPrefix(url, "https://") - } - - nameParts := strings.SplitN(stripped, "/", 2) - - return nameParts[0] -} - -func normalizeRegistry(registry string) string { - normalized := convertToHostname(registry) - switch normalized { - case "registry-1.docker.io", "docker.io": - return "index.docker.io" - } - return normalized -} diff --git a/vendor/github.com/containers/image/pkg/strslice/strslice.go b/vendor/github.com/containers/image/pkg/strslice/strslice.go deleted file mode 100644 index bad493fb89..0000000000 --- a/vendor/github.com/containers/image/pkg/strslice/strslice.go +++ /dev/null @@ -1,30 +0,0 @@ -package strslice - -import "encoding/json" - -// StrSlice represents a string or an array of strings. -// We need to override the json decoder to accept both options. -type StrSlice []string - -// UnmarshalJSON decodes the byte slice whether it's a string or an array of -// strings. This method is needed to implement json.Unmarshaler. -func (e *StrSlice) UnmarshalJSON(b []byte) error { - if len(b) == 0 { - // With no input, we preserve the existing value by returning nil and - // leaving the target alone. This allows defining default values for - // the type. - return nil - } - - p := make([]string, 0, 1) - if err := json.Unmarshal(b, &p); err != nil { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - p = append(p, s) - } - - *e = p - return nil -} diff --git a/vendor/github.com/containers/image/pkg/tlsclientconfig/tlsclientconfig.go b/vendor/github.com/containers/image/pkg/tlsclientconfig/tlsclientconfig.go deleted file mode 100644 index 0a32861ced..0000000000 --- a/vendor/github.com/containers/image/pkg/tlsclientconfig/tlsclientconfig.go +++ /dev/null @@ -1,102 +0,0 @@ -package tlsclientconfig - -import ( - "crypto/tls" - "io/ioutil" - "net" - "net/http" - "os" - "path/filepath" - "strings" - "time" - - "github.com/docker/go-connections/sockets" - "github.com/docker/go-connections/tlsconfig" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// SetupCertificates opens all .crt, .cert, and .key files in dir and appends / loads certs and key pairs as appropriate to tlsc -func SetupCertificates(dir string, tlsc *tls.Config) error { - logrus.Debugf("Looking for TLS certificates and private keys in %s", dir) - fs, err := ioutil.ReadDir(dir) - if err != nil { - if os.IsNotExist(err) { - return nil - } - if os.IsPermission(err) { - logrus.Debugf("Skipping scan of %s due to permission error: %v", dir, err) - return nil - } - return err - } - - for _, f := range fs { - fullPath := filepath.Join(dir, f.Name()) - if strings.HasSuffix(f.Name(), ".crt") { - systemPool, err := tlsconfig.SystemCertPool() - if err != nil { - return errors.Wrap(err, "unable to get system cert pool") - } - tlsc.RootCAs = systemPool - logrus.Debugf(" crt: %s", fullPath) - data, err := ioutil.ReadFile(fullPath) - if err != nil { - return err - } - tlsc.RootCAs.AppendCertsFromPEM(data) - } - if strings.HasSuffix(f.Name(), ".cert") { - certName := f.Name() - keyName := certName[:len(certName)-5] + ".key" - logrus.Debugf(" cert: %s", fullPath) - if !hasFile(fs, keyName) { - return errors.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName) - } - cert, err := tls.LoadX509KeyPair(filepath.Join(dir, certName), filepath.Join(dir, keyName)) - if err != nil { - return err - } - tlsc.Certificates = append(tlsc.Certificates, cert) - } - if strings.HasSuffix(f.Name(), ".key") { - keyName := f.Name() - certName := keyName[:len(keyName)-4] + ".cert" - logrus.Debugf(" key: %s", fullPath) - if !hasFile(fs, certName) { - return errors.Errorf("missing client certificate %s for key %s", certName, keyName) - } - } - } - return nil -} - -func hasFile(files []os.FileInfo, name string) bool { - for _, f := range files { - if f.Name() == name { - return true - } - } - return false -} - -// NewTransport Creates a default transport -func NewTransport() *http.Transport { - direct := &net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - } - tr := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: direct.Dial, - TLSHandshakeTimeout: 10 * time.Second, - // TODO(dmcgowan): Call close idle connections when complete and use keep alive - DisableKeepAlives: true, - } - proxyDialer, err := sockets.DialerFromEnvironment(direct) - if err == nil { - tr.Dial = proxyDialer.Dial - } - return tr -} diff --git a/vendor/github.com/containers/image/signature/docker.go b/vendor/github.com/containers/image/signature/docker.go deleted file mode 100644 index 16eb3f7993..0000000000 --- a/vendor/github.com/containers/image/signature/docker.go +++ /dev/null @@ -1,65 +0,0 @@ -// Note: Consider the API unstable until the code supports at least three different image formats or transports. - -package signature - -import ( - "fmt" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/manifest" - "github.com/opencontainers/go-digest" -) - -// SignDockerManifest returns a signature for manifest as the specified dockerReference, -// using mech and keyIdentity. -func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) { - manifestDigest, err := manifest.Digest(m) - if err != nil { - return nil, err - } - sig := newUntrustedSignature(manifestDigest, dockerReference) - return sig.sign(mech, keyIdentity) -} - -// VerifyDockerManifestSignature checks that unverifiedSignature uses expectedKeyIdentity to sign unverifiedManifest as expectedDockerReference, -// using mech. -func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byte, - expectedDockerReference string, mech SigningMechanism, expectedKeyIdentity string) (*Signature, error) { - expectedRef, err := reference.ParseNormalizedNamed(expectedDockerReference) - if err != nil { - return nil, err - } - sig, err := verifyAndExtractSignature(mech, unverifiedSignature, signatureAcceptanceRules{ - validateKeyIdentity: func(keyIdentity string) error { - if keyIdentity != expectedKeyIdentity { - return InvalidSignatureError{msg: fmt.Sprintf("Signature by %s does not match expected fingerprint %s", keyIdentity, expectedKeyIdentity)} - } - return nil - }, - validateSignedDockerReference: func(signedDockerReference string) error { - signedRef, err := reference.ParseNormalizedNamed(signedDockerReference) - if err != nil { - return InvalidSignatureError{msg: fmt.Sprintf("Invalid docker reference %s in signature", signedDockerReference)} - } - if signedRef.String() != expectedRef.String() { - return InvalidSignatureError{msg: fmt.Sprintf("Docker reference %s does not match %s", - signedDockerReference, expectedDockerReference)} - } - return nil - }, - validateSignedDockerManifestDigest: func(signedDockerManifestDigest digest.Digest) error { - matches, err := manifest.MatchesDigest(unverifiedManifest, signedDockerManifestDigest) - if err != nil { - return err - } - if !matches { - return InvalidSignatureError{msg: fmt.Sprintf("Signature for docker digest %q does not match", signedDockerManifestDigest)} - } - return nil - }, - }) - if err != nil { - return nil, err - } - return sig, nil -} diff --git a/vendor/github.com/containers/image/signature/fixtures/dir-img-manifest-digest-error/manifest.json b/vendor/github.com/containers/image/signature/fixtures/dir-img-manifest-digest-error/manifest.json deleted file mode 120000 index 3dee14b4a8..0000000000 --- a/vendor/github.com/containers/image/signature/fixtures/dir-img-manifest-digest-error/manifest.json +++ /dev/null @@ -1 +0,0 @@ -../v2s1-invalid-signatures.manifest.json \ No newline at end of file diff --git a/vendor/github.com/containers/image/signature/fixtures/dir-img-manifest-digest-error/signature-1 b/vendor/github.com/containers/image/signature/fixtures/dir-img-manifest-digest-error/signature-1 deleted file mode 120000 index f010fd4c41..0000000000 --- a/vendor/github.com/containers/image/signature/fixtures/dir-img-manifest-digest-error/signature-1 +++ /dev/null @@ -1 +0,0 @@ -../dir-img-valid/signature-1 \ No newline at end of file diff --git a/vendor/github.com/containers/image/signature/fixtures/dir-img-mixed/manifest.json b/vendor/github.com/containers/image/signature/fixtures/dir-img-mixed/manifest.json deleted file mode 120000 index ff7d2ffadf..0000000000 --- a/vendor/github.com/containers/image/signature/fixtures/dir-img-mixed/manifest.json +++ /dev/null @@ -1 +0,0 @@ -../dir-img-valid/manifest.json \ No newline at end of file diff --git a/vendor/github.com/containers/image/signature/fixtures/dir-img-mixed/signature-1 b/vendor/github.com/containers/image/signature/fixtures/dir-img-mixed/signature-1 deleted file mode 120000 index b27cdc4585..0000000000 --- a/vendor/github.com/containers/image/signature/fixtures/dir-img-mixed/signature-1 +++ /dev/null @@ -1 +0,0 @@ -../invalid-blob.signature \ No newline at end of file diff --git a/vendor/github.com/containers/image/signature/fixtures/dir-img-mixed/signature-2 b/vendor/github.com/containers/image/signature/fixtures/dir-img-mixed/signature-2 deleted file mode 120000 index f010fd4c41..0000000000 --- a/vendor/github.com/containers/image/signature/fixtures/dir-img-mixed/signature-2 +++ /dev/null @@ -1 +0,0 @@ -../dir-img-valid/signature-1 \ No newline at end of file diff --git a/vendor/github.com/containers/image/signature/fixtures/dir-img-modified-manifest/signature-1 b/vendor/github.com/containers/image/signature/fixtures/dir-img-modified-manifest/signature-1 deleted file mode 120000 index f010fd4c41..0000000000 --- a/vendor/github.com/containers/image/signature/fixtures/dir-img-modified-manifest/signature-1 +++ /dev/null @@ -1 +0,0 @@ -../dir-img-valid/signature-1 \ No newline at end of file diff --git a/vendor/github.com/containers/image/signature/fixtures/dir-img-no-manifest/signature-1 b/vendor/github.com/containers/image/signature/fixtures/dir-img-no-manifest/signature-1 deleted file mode 120000 index f010fd4c41..0000000000 --- a/vendor/github.com/containers/image/signature/fixtures/dir-img-no-manifest/signature-1 +++ /dev/null @@ -1 +0,0 @@ -../dir-img-valid/signature-1 \ No newline at end of file diff --git a/vendor/github.com/containers/image/signature/fixtures/dir-img-unsigned/manifest.json b/vendor/github.com/containers/image/signature/fixtures/dir-img-unsigned/manifest.json deleted file mode 120000 index ff7d2ffadf..0000000000 --- a/vendor/github.com/containers/image/signature/fixtures/dir-img-unsigned/manifest.json +++ /dev/null @@ -1 +0,0 @@ -../dir-img-valid/manifest.json \ No newline at end of file diff --git a/vendor/github.com/containers/image/signature/fixtures/dir-img-valid-2/manifest.json b/vendor/github.com/containers/image/signature/fixtures/dir-img-valid-2/manifest.json deleted file mode 120000 index ff7d2ffadf..0000000000 --- a/vendor/github.com/containers/image/signature/fixtures/dir-img-valid-2/manifest.json +++ /dev/null @@ -1 +0,0 @@ -../dir-img-valid/manifest.json \ No newline at end of file diff --git a/vendor/github.com/containers/image/signature/fixtures/dir-img-valid-2/signature-1 b/vendor/github.com/containers/image/signature/fixtures/dir-img-valid-2/signature-1 deleted file mode 120000 index f010fd4c41..0000000000 --- a/vendor/github.com/containers/image/signature/fixtures/dir-img-valid-2/signature-1 +++ /dev/null @@ -1 +0,0 @@ -../dir-img-valid/signature-1 \ No newline at end of file diff --git a/vendor/github.com/containers/image/signature/fixtures/dir-img-valid/manifest.json b/vendor/github.com/containers/image/signature/fixtures/dir-img-valid/manifest.json deleted file mode 120000 index c5bd25431f..0000000000 --- a/vendor/github.com/containers/image/signature/fixtures/dir-img-valid/manifest.json +++ /dev/null @@ -1 +0,0 @@ -../image.manifest.json \ No newline at end of file diff --git a/vendor/github.com/containers/image/signature/json.go b/vendor/github.com/containers/image/signature/json.go deleted file mode 100644 index 9e592863da..0000000000 --- a/vendor/github.com/containers/image/signature/json.go +++ /dev/null @@ -1,88 +0,0 @@ -package signature - -import ( - "bytes" - "encoding/json" - "fmt" - "io" -) - -// jsonFormatError is returned when JSON does not match expected format. -type jsonFormatError string - -func (err jsonFormatError) Error() string { - return string(err) -} - -// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect -// (including duplicated keys, unrecognized keys, and non-matching types). Uses fieldResolver to -// determine the destination for a field value, which should return a pointer to the destination if valid, or nil if the key is rejected. -// -// The fieldResolver approach is useful for decoding the Policy.Transports map; using it for structs is a bit lazy, -// we could use reflection to automate this. Later? -func paranoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interface{}) error { - seenKeys := map[string]struct{}{} - - dec := json.NewDecoder(bytes.NewReader(data)) - t, err := dec.Token() - if err != nil { - return jsonFormatError(err.Error()) - } - if t != json.Delim('{') { - return jsonFormatError(fmt.Sprintf("JSON object expected, got \"%s\"", t)) - } - for { - t, err := dec.Token() - if err != nil { - return jsonFormatError(err.Error()) - } - if t == json.Delim('}') { - break - } - - key, ok := t.(string) - if !ok { - // Coverage: This should never happen, dec.Token() rejects non-string-literals in this state. - return jsonFormatError(fmt.Sprintf("Key string literal expected, got \"%s\"", t)) - } - if _, ok := seenKeys[key]; ok { - return jsonFormatError(fmt.Sprintf("Duplicate key \"%s\"", key)) - } - seenKeys[key] = struct{}{} - - valuePtr := fieldResolver(key) - if valuePtr == nil { - return jsonFormatError(fmt.Sprintf("Unknown key \"%s\"", key)) - } - // This works like json.Unmarshal, in particular it allows us to implement UnmarshalJSON to implement strict parsing of the field value. - if err := dec.Decode(valuePtr); err != nil { - return jsonFormatError(err.Error()) - } - } - if _, err := dec.Token(); err != io.EOF { - return jsonFormatError("Unexpected data after JSON object") - } - return nil -} - -// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect -// (including duplicated keys, unrecognized keys, and non-matching types). Each of the fields in exactFields -// must be present exactly once, and none other fields are accepted. -func paranoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]interface{}) error { - seenKeys := map[string]struct{}{} - if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { - if valuePtr, ok := exactFields[key]; ok { - seenKeys[key] = struct{}{} - return valuePtr - } - return nil - }); err != nil { - return err - } - for key := range exactFields { - if _, ok := seenKeys[key]; !ok { - return jsonFormatError(fmt.Sprintf(`Key "%s" missing in a JSON object`, key)) - } - } - return nil -} diff --git a/vendor/github.com/containers/image/signature/mechanism.go b/vendor/github.com/containers/image/signature/mechanism.go deleted file mode 100644 index bdf26c531f..0000000000 --- a/vendor/github.com/containers/image/signature/mechanism.go +++ /dev/null @@ -1,85 +0,0 @@ -// Note: Consider the API unstable until the code supports at least three different image formats or transports. - -package signature - -import ( - "bytes" - "errors" - "fmt" - "io/ioutil" - "strings" - - "golang.org/x/crypto/openpgp" -) - -// SigningMechanism abstracts a way to sign binary blobs and verify their signatures. -// Each mechanism should eventually be closed by calling Close(). -// FIXME: Eventually expand on keyIdentity (namespace them between mechanisms to -// eliminate ambiguities, support CA signatures and perhaps other key properties) -type SigningMechanism interface { - // Close removes resources associated with the mechanism, if any. - Close() error - // SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. - SupportsSigning() error - // Sign creates a (non-detached) signature of input using keyIdentity. - // Fails with a SigningNotSupportedError if the mechanism does not support signing. - Sign(input []byte, keyIdentity string) ([]byte, error) - // Verify parses unverifiedSignature and returns the content and the signer's identity - Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) - // UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, - // along with a short identifier of the key used for signing. - // WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys) - // is NOT the same as a "key identity" used in other calls ot this interface, and - // the values may have no recognizable relationship if the public key is not available. - UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) -} - -// SigningNotSupportedError is returned when trying to sign using a mechanism which does not support that. -type SigningNotSupportedError string - -func (err SigningNotSupportedError) Error() string { - return string(err) -} - -// NewGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism for the user’s default -// GPG configuration ($GNUPGHOME / ~/.gnupg) -// The caller must call .Close() on the returned SigningMechanism. -func NewGPGSigningMechanism() (SigningMechanism, error) { - return newGPGSigningMechanismInDirectory("") -} - -// NewEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which -// recognizes _only_ public keys from the supplied blob, and returns the identities -// of these keys. -// The caller must call .Close() on the returned SigningMechanism. -func NewEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { - return newEphemeralGPGSigningMechanism(blob) -} - -// gpgUntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, -// along with a short identifier of the key used for signing. -// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys) -// is NOT the same as a "key identity" used in other calls ot this interface, and -// the values may have no recognizable relationship if the public key is not available. -func gpgUntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { - // This uses the Golang-native OpenPGP implementation instead of gpgme because we are not doing any cryptography. - md, err := openpgp.ReadMessage(bytes.NewReader(untrustedSignature), openpgp.EntityList{}, nil, nil) - if err != nil { - return nil, "", err - } - if !md.IsSigned { - return nil, "", errors.New("The input is not a signature") - } - content, err := ioutil.ReadAll(md.UnverifiedBody) - if err != nil { - // Coverage: An error during reading the body can happen only if - // 1) the message is encrypted, which is not our case (and we don’t give ReadMessage the key - // to decrypt the contents anyway), or - // 2) the message is signed AND we give ReadMessage a correspnding public key, which we don’t. - return nil, "", err - } - - // Uppercase the key ID for minimal consistency with the gpgme-returned fingerprints - // (but note that key ID is a suffix of the fingerprint only for V4 keys, not V3)! - return content, strings.ToUpper(fmt.Sprintf("%016X", md.SignedByKeyId)), nil -} diff --git a/vendor/github.com/containers/image/signature/mechanism_gpgme.go b/vendor/github.com/containers/image/signature/mechanism_gpgme.go deleted file mode 100644 index 4825ab27c6..0000000000 --- a/vendor/github.com/containers/image/signature/mechanism_gpgme.go +++ /dev/null @@ -1,175 +0,0 @@ -// +build !containers_image_openpgp - -package signature - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - - "github.com/mtrmac/gpgme" -) - -// A GPG/OpenPGP signing mechanism, implemented using gpgme. -type gpgmeSigningMechanism struct { - ctx *gpgme.Context - ephemeralDir string // If not "", a directory to be removed on Close() -} - -// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty. -// The caller must call .Close() on the returned SigningMechanism. -func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) { - ctx, err := newGPGMEContext(optionalDir) - if err != nil { - return nil, err - } - return &gpgmeSigningMechanism{ - ctx: ctx, - ephemeralDir: "", - }, nil -} - -// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which -// recognizes _only_ public keys from the supplied blob, and returns the identities -// of these keys. -// The caller must call .Close() on the returned SigningMechanism. -func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { - dir, err := ioutil.TempDir("", "containers-ephemeral-gpg-") - if err != nil { - return nil, nil, err - } - removeDir := true - defer func() { - if removeDir { - os.RemoveAll(dir) - } - }() - ctx, err := newGPGMEContext(dir) - if err != nil { - return nil, nil, err - } - mech := &gpgmeSigningMechanism{ - ctx: ctx, - ephemeralDir: dir, - } - keyIdentities, err := mech.importKeysFromBytes(blob) - if err != nil { - return nil, nil, err - } - - removeDir = false - return mech, keyIdentities, nil -} - -// newGPGMEContext returns a new *gpgme.Context, using optionalDir if not empty. -func newGPGMEContext(optionalDir string) (*gpgme.Context, error) { - ctx, err := gpgme.New() - if err != nil { - return nil, err - } - if err = ctx.SetProtocol(gpgme.ProtocolOpenPGP); err != nil { - return nil, err - } - if optionalDir != "" { - err := ctx.SetEngineInfo(gpgme.ProtocolOpenPGP, "", optionalDir) - if err != nil { - return nil, err - } - } - ctx.SetArmor(false) - ctx.SetTextMode(false) - return ctx, nil -} - -func (m *gpgmeSigningMechanism) Close() error { - if m.ephemeralDir != "" { - os.RemoveAll(m.ephemeralDir) // Ignore an error, if any - } - return nil -} - -// importKeysFromBytes imports public keys from the supplied blob and returns their identities. -// The blob is assumed to have an appropriate format (the caller is expected to know which one). -// NOTE: This may modify long-term state (e.g. key storage in a directory underlying the mechanism); -// but we do not make this public, it can only be used through newEphemeralGPGSigningMechanism. -func (m *gpgmeSigningMechanism) importKeysFromBytes(blob []byte) ([]string, error) { - inputData, err := gpgme.NewDataBytes(blob) - if err != nil { - return nil, err - } - res, err := m.ctx.Import(inputData) - if err != nil { - return nil, err - } - keyIdentities := []string{} - for _, i := range res.Imports { - if i.Result == nil { - keyIdentities = append(keyIdentities, i.Fingerprint) - } - } - return keyIdentities, nil -} - -// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. -func (m *gpgmeSigningMechanism) SupportsSigning() error { - return nil -} - -// Sign creates a (non-detached) signature of input using keyIdentity. -// Fails with a SigningNotSupportedError if the mechanism does not support signing. -func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { - key, err := m.ctx.GetKey(keyIdentity, true) - if err != nil { - return nil, err - } - inputData, err := gpgme.NewDataBytes(input) - if err != nil { - return nil, err - } - var sigBuffer bytes.Buffer - sigData, err := gpgme.NewDataWriter(&sigBuffer) - if err != nil { - return nil, err - } - if err = m.ctx.Sign([]*gpgme.Key{key}, inputData, sigData, gpgme.SigModeNormal); err != nil { - return nil, err - } - return sigBuffer.Bytes(), nil -} - -// Verify parses unverifiedSignature and returns the content and the signer's identity -func (m gpgmeSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) { - signedBuffer := bytes.Buffer{} - signedData, err := gpgme.NewDataWriter(&signedBuffer) - if err != nil { - return nil, "", err - } - unverifiedSignatureData, err := gpgme.NewDataBytes(unverifiedSignature) - if err != nil { - return nil, "", err - } - _, sigs, err := m.ctx.Verify(unverifiedSignatureData, nil, signedData) - if err != nil { - return nil, "", err - } - if len(sigs) != 1 { - return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Unexpected GPG signature count %d", len(sigs))} - } - sig := sigs[0] - // This is sig.Summary == gpgme.SigSumValid except for key trust, which we handle ourselves - if sig.Status != nil || sig.Validity == gpgme.ValidityNever || sig.ValidityReason != nil || sig.WrongKeyUsage { - // FIXME: Better error reporting eventually - return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", sig)} - } - return signedBuffer.Bytes(), sig.Fingerprint, nil -} - -// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, -// along with a short identifier of the key used for signing. -// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys) -// is NOT the same as a "key identity" used in other calls ot this interface, and -// the values may have no recognizable relationship if the public key is not available. -func (m gpgmeSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { - return gpgUntrustedSignatureContents(untrustedSignature) -} diff --git a/vendor/github.com/containers/image/signature/mechanism_openpgp.go b/vendor/github.com/containers/image/signature/mechanism_openpgp.go deleted file mode 100644 index eccd610c9d..0000000000 --- a/vendor/github.com/containers/image/signature/mechanism_openpgp.go +++ /dev/null @@ -1,159 +0,0 @@ -// +build containers_image_openpgp - -package signature - -import ( - "bytes" - "errors" - "fmt" - "io/ioutil" - "os" - "path" - "strings" - "time" - - "github.com/containers/storage/pkg/homedir" - "golang.org/x/crypto/openpgp" -) - -// A GPG/OpenPGP signing mechanism, implemented using x/crypto/openpgp. -type openpgpSigningMechanism struct { - keyring openpgp.EntityList -} - -// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty. -// The caller must call .Close() on the returned SigningMechanism. -func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) { - m := &openpgpSigningMechanism{ - keyring: openpgp.EntityList{}, - } - - gpgHome := optionalDir - if gpgHome == "" { - gpgHome = os.Getenv("GNUPGHOME") - if gpgHome == "" { - gpgHome = path.Join(homedir.Get(), ".gnupg") - } - } - - pubring, err := ioutil.ReadFile(path.Join(gpgHome, "pubring.gpg")) - if err != nil { - if !os.IsNotExist(err) { - return nil, err - } - } else { - _, err := m.importKeysFromBytes(pubring) - if err != nil { - return nil, err - } - } - return m, nil -} - -// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which -// recognizes _only_ public keys from the supplied blob, and returns the identities -// of these keys. -// The caller must call .Close() on the returned SigningMechanism. -func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) { - m := &openpgpSigningMechanism{ - keyring: openpgp.EntityList{}, - } - keyIdentities, err := m.importKeysFromBytes(blob) - if err != nil { - return nil, nil, err - } - return m, keyIdentities, nil -} - -func (m *openpgpSigningMechanism) Close() error { - return nil -} - -// importKeysFromBytes imports public keys from the supplied blob and returns their identities. -// The blob is assumed to have an appropriate format (the caller is expected to know which one). -func (m *openpgpSigningMechanism) importKeysFromBytes(blob []byte) ([]string, error) { - keyring, err := openpgp.ReadKeyRing(bytes.NewReader(blob)) - if err != nil { - k, e2 := openpgp.ReadArmoredKeyRing(bytes.NewReader(blob)) - if e2 != nil { - return nil, err // The original error -- FIXME: is this better? - } - keyring = k - } - - keyIdentities := []string{} - for _, entity := range keyring { - if entity.PrimaryKey == nil { - // Coverage: This should never happen, openpgp.ReadEntity fails with a - // openpgp.errors.StructuralError instead of returning an entity with this - // field set to nil. - continue - } - // Uppercase the fingerprint to be compatible with gpgme - keyIdentities = append(keyIdentities, strings.ToUpper(fmt.Sprintf("%x", entity.PrimaryKey.Fingerprint))) - m.keyring = append(m.keyring, entity) - } - return keyIdentities, nil -} - -// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError. -func (m *openpgpSigningMechanism) SupportsSigning() error { - return SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag") -} - -// Sign creates a (non-detached) signature of input using keyIdentity. -// Fails with a SigningNotSupportedError if the mechanism does not support signing. -func (m *openpgpSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) { - return nil, SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag") -} - -// Verify parses unverifiedSignature and returns the content and the signer's identity -func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) { - md, err := openpgp.ReadMessage(bytes.NewReader(unverifiedSignature), m.keyring, nil, nil) - if err != nil { - return nil, "", err - } - if !md.IsSigned { - return nil, "", errors.New("not signed") - } - content, err := ioutil.ReadAll(md.UnverifiedBody) - if err != nil { - // Coverage: md.UnverifiedBody.Read only fails if the body is encrypted - // (and possibly also signed, but it _must_ be encrypted) and the signing - // “modification detection code” detects a mismatch. But in that case, - // we would expect the signature verification to fail as well, and that is checked - // first. Besides, we are not supplying any decryption keys, so we really - // can never reach this “encrypted data MDC mismatch” path. - return nil, "", err - } - if md.SignatureError != nil { - return nil, "", fmt.Errorf("signature error: %v", md.SignatureError) - } - if md.SignedBy == nil { - return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", md.Signature)} - } - if md.Signature != nil { - if md.Signature.SigLifetimeSecs != nil { - expiry := md.Signature.CreationTime.Add(time.Duration(*md.Signature.SigLifetimeSecs) * time.Second) - if time.Now().After(expiry) { - return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Signature expired on %s", expiry)} - } - } - } else if md.SignatureV3 == nil { - // Coverage: If md.SignedBy != nil, the final md.UnverifiedBody.Read() either sets one of md.Signature or md.SignatureV3, - // or sets md.SignatureError. - return nil, "", InvalidSignatureError{msg: "Unexpected openpgp.MessageDetails: neither Signature nor SignatureV3 is set"} - } - - // Uppercase the fingerprint to be compatible with gpgme - return content, strings.ToUpper(fmt.Sprintf("%x", md.SignedBy.PublicKey.Fingerprint)), nil -} - -// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION, -// along with a short identifier of the key used for signing. -// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys) -// is NOT the same as a "key identity" used in other calls ot this interface, and -// the values may have no recognizable relationship if the public key is not available. -func (m openpgpSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) { - return gpgUntrustedSignatureContents(untrustedSignature) -} diff --git a/vendor/github.com/containers/image/signature/policy_config.go b/vendor/github.com/containers/image/signature/policy_config.go deleted file mode 100644 index 42cc12ab1b..0000000000 --- a/vendor/github.com/containers/image/signature/policy_config.go +++ /dev/null @@ -1,688 +0,0 @@ -// policy_config.go hanles creation of policy objects, either by parsing JSON -// or by programs building them programmatically. - -// The New* constructors are intended to be a stable API. FIXME: after an independent review. - -// Do not invoke the internals of the JSON marshaling/unmarshaling directly. - -// We can't just blindly call json.Unmarshal because that would silently ignore -// typos, and that would just not do for security policy. - -// FIXME? This is by no means an user-friendly parser: No location information in error messages, no other context. -// But at least it is not worse than blind json.Unmarshal()… - -package signature - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "path/filepath" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/pkg/errors" -) - -// systemDefaultPolicyPath is the policy path used for DefaultPolicy(). -// You can override this at build time with -// -ldflags '-X github.com/containers/image/signature.systemDefaultPolicyPath=$your_path' -var systemDefaultPolicyPath = builtinDefaultPolicyPath - -// builtinDefaultPolicyPath is the policy pat used for DefaultPolicy(). -// DO NOT change this, instead see systemDefaultPolicyPath above. -const builtinDefaultPolicyPath = "/etc/containers/policy.json" - -// InvalidPolicyFormatError is returned when parsing an invalid policy configuration. -type InvalidPolicyFormatError string - -func (err InvalidPolicyFormatError) Error() string { - return string(err) -} - -// DefaultPolicy returns the default policy of the system. -// Most applications should be using this method to get the policy configured -// by the system administrator. -// ctx should usually be nil, can be set to override the default. -// NOTE: When this function returns an error, report it to the user and abort. -// DO NOT hard-code fallback policies in your application. -func DefaultPolicy(ctx *types.SystemContext) (*Policy, error) { - return NewPolicyFromFile(defaultPolicyPath(ctx)) -} - -// defaultPolicyPath returns a path to the default policy of the system. -func defaultPolicyPath(ctx *types.SystemContext) string { - if ctx != nil { - if ctx.SignaturePolicyPath != "" { - return ctx.SignaturePolicyPath - } - if ctx.RootForImplicitAbsolutePaths != "" { - return filepath.Join(ctx.RootForImplicitAbsolutePaths, systemDefaultPolicyPath) - } - } - return systemDefaultPolicyPath -} - -// NewPolicyFromFile returns a policy configured in the specified file. -func NewPolicyFromFile(fileName string) (*Policy, error) { - contents, err := ioutil.ReadFile(fileName) - if err != nil { - return nil, err - } - policy, err := NewPolicyFromBytes(contents) - if err != nil { - return nil, errors.Wrapf(err, "invalid policy in %q", fileName) - } - return policy, nil -} - -// NewPolicyFromBytes returns a policy parsed from the specified blob. -// Use this function instead of calling json.Unmarshal directly. -func NewPolicyFromBytes(data []byte) (*Policy, error) { - p := Policy{} - if err := json.Unmarshal(data, &p); err != nil { - return nil, InvalidPolicyFormatError(err.Error()) - } - return &p, nil -} - -// Compile-time check that Policy implements json.Unmarshaler. -var _ json.Unmarshaler = (*Policy)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (p *Policy) UnmarshalJSON(data []byte) error { - *p = Policy{} - transports := policyTransportsMap{} - if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { - switch key { - case "default": - return &p.Default - case "transports": - return &transports - default: - return nil - } - }); err != nil { - return err - } - - if p.Default == nil { - return InvalidPolicyFormatError("Default policy is missing") - } - p.Transports = map[string]PolicyTransportScopes(transports) - return nil -} - -// policyTransportsMap is a specialization of this map type for the strict JSON parsing semantics appropriate for the Policy.Transports member. -type policyTransportsMap map[string]PolicyTransportScopes - -// Compile-time check that policyTransportsMap implements json.Unmarshaler. -var _ json.Unmarshaler = (*policyTransportsMap)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (m *policyTransportsMap) UnmarshalJSON(data []byte) error { - // We can't unmarshal directly into map values because it is not possible to take an address of a map value. - // So, use a temporary map of pointers-to-slices and convert. - tmpMap := map[string]*PolicyTransportScopes{} - if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { - // transport can be nil - transport := transports.Get(key) - // paranoidUnmarshalJSONObject detects key duplication for us, check just to be safe. - if _, ok := tmpMap[key]; ok { - return nil - } - ptsWithTransport := policyTransportScopesWithTransport{ - transport: transport, - dest: &PolicyTransportScopes{}, // This allocates a new instance on each call. - } - tmpMap[key] = ptsWithTransport.dest - return &ptsWithTransport - }); err != nil { - return err - } - for key, ptr := range tmpMap { - (*m)[key] = *ptr - } - return nil -} - -// Compile-time check that PolicyTransportScopes "implements"" json.Unmarshaler. -// we want to only use policyTransportScopesWithTransport -var _ json.Unmarshaler = (*PolicyTransportScopes)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (m *PolicyTransportScopes) UnmarshalJSON(data []byte) error { - return errors.New("Do not try to unmarshal PolicyTransportScopes directly") -} - -// policyTransportScopesWithTransport is a way to unmarshal a PolicyTransportScopes -// while validating using a specific ImageTransport if not nil. -type policyTransportScopesWithTransport struct { - transport types.ImageTransport - dest *PolicyTransportScopes -} - -// Compile-time check that policyTransportScopesWithTransport implements json.Unmarshaler. -var _ json.Unmarshaler = (*policyTransportScopesWithTransport)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (m *policyTransportScopesWithTransport) UnmarshalJSON(data []byte) error { - // We can't unmarshal directly into map values because it is not possible to take an address of a map value. - // So, use a temporary map of pointers-to-slices and convert. - tmpMap := map[string]*PolicyRequirements{} - if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { - // paranoidUnmarshalJSONObject detects key duplication for us, check just to be safe. - if _, ok := tmpMap[key]; ok { - return nil - } - if key != "" && m.transport != nil { - if err := m.transport.ValidatePolicyConfigurationScope(key); err != nil { - return nil - } - } - ptr := &PolicyRequirements{} // This allocates a new instance on each call. - tmpMap[key] = ptr - return ptr - }); err != nil { - return err - } - for key, ptr := range tmpMap { - (*m.dest)[key] = *ptr - } - return nil -} - -// Compile-time check that PolicyRequirements implements json.Unmarshaler. -var _ json.Unmarshaler = (*PolicyRequirements)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (m *PolicyRequirements) UnmarshalJSON(data []byte) error { - reqJSONs := []json.RawMessage{} - if err := json.Unmarshal(data, &reqJSONs); err != nil { - return err - } - if len(reqJSONs) == 0 { - return InvalidPolicyFormatError("List of verification policy requirements must not be empty") - } - res := make([]PolicyRequirement, len(reqJSONs)) - for i, reqJSON := range reqJSONs { - req, err := newPolicyRequirementFromJSON(reqJSON) - if err != nil { - return err - } - res[i] = req - } - *m = res - return nil -} - -// newPolicyRequirementFromJSON parses JSON data into a PolicyRequirement implementation. -func newPolicyRequirementFromJSON(data []byte) (PolicyRequirement, error) { - var typeField prCommon - if err := json.Unmarshal(data, &typeField); err != nil { - return nil, err - } - var res PolicyRequirement - switch typeField.Type { - case prTypeInsecureAcceptAnything: - res = &prInsecureAcceptAnything{} - case prTypeReject: - res = &prReject{} - case prTypeSignedBy: - res = &prSignedBy{} - case prTypeSignedBaseLayer: - res = &prSignedBaseLayer{} - default: - return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy requirement type \"%s\"", typeField.Type)) - } - if err := json.Unmarshal(data, &res); err != nil { - return nil, err - } - return res, nil -} - -// newPRInsecureAcceptAnything is NewPRInsecureAcceptAnything, except it returns the private type. -func newPRInsecureAcceptAnything() *prInsecureAcceptAnything { - return &prInsecureAcceptAnything{prCommon{Type: prTypeInsecureAcceptAnything}} -} - -// NewPRInsecureAcceptAnything returns a new "insecureAcceptAnything" PolicyRequirement. -func NewPRInsecureAcceptAnything() PolicyRequirement { - return newPRInsecureAcceptAnything() -} - -// Compile-time check that prInsecureAcceptAnything implements json.Unmarshaler. -var _ json.Unmarshaler = (*prInsecureAcceptAnything)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (pr *prInsecureAcceptAnything) UnmarshalJSON(data []byte) error { - *pr = prInsecureAcceptAnything{} - var tmp prInsecureAcceptAnything - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ - "type": &tmp.Type, - }); err != nil { - return err - } - - if tmp.Type != prTypeInsecureAcceptAnything { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) - } - *pr = *newPRInsecureAcceptAnything() - return nil -} - -// newPRReject is NewPRReject, except it returns the private type. -func newPRReject() *prReject { - return &prReject{prCommon{Type: prTypeReject}} -} - -// NewPRReject returns a new "reject" PolicyRequirement. -func NewPRReject() PolicyRequirement { - return newPRReject() -} - -// Compile-time check that prReject implements json.Unmarshaler. -var _ json.Unmarshaler = (*prReject)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (pr *prReject) UnmarshalJSON(data []byte) error { - *pr = prReject{} - var tmp prReject - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ - "type": &tmp.Type, - }); err != nil { - return err - } - - if tmp.Type != prTypeReject { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) - } - *pr = *newPRReject() - return nil -} - -// newPRSignedBy returns a new prSignedBy if parameters are valid. -func newPRSignedBy(keyType sbKeyType, keyPath string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { - if !keyType.IsValid() { - return nil, InvalidPolicyFormatError(fmt.Sprintf("invalid keyType \"%s\"", keyType)) - } - if len(keyPath) > 0 && len(keyData) > 0 { - return nil, InvalidPolicyFormatError("keyType and keyData cannot be used simultaneously") - } - if signedIdentity == nil { - return nil, InvalidPolicyFormatError("signedIdentity not specified") - } - return &prSignedBy{ - prCommon: prCommon{Type: prTypeSignedBy}, - KeyType: keyType, - KeyPath: keyPath, - KeyData: keyData, - SignedIdentity: signedIdentity, - }, nil -} - -// newPRSignedByKeyPath is NewPRSignedByKeyPath, except it returns the private type. -func newPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { - return newPRSignedBy(keyType, keyPath, nil, signedIdentity) -} - -// NewPRSignedByKeyPath returns a new "signedBy" PolicyRequirement using a KeyPath -func NewPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { - return newPRSignedByKeyPath(keyType, keyPath, signedIdentity) -} - -// newPRSignedByKeyData is NewPRSignedByKeyData, except it returns the private type. -func newPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) { - return newPRSignedBy(keyType, "", keyData, signedIdentity) -} - -// NewPRSignedByKeyData returns a new "signedBy" PolicyRequirement using a KeyData -func NewPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) { - return newPRSignedByKeyData(keyType, keyData, signedIdentity) -} - -// Compile-time check that prSignedBy implements json.Unmarshaler. -var _ json.Unmarshaler = (*prSignedBy)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (pr *prSignedBy) UnmarshalJSON(data []byte) error { - *pr = prSignedBy{} - var tmp prSignedBy - var gotKeyPath, gotKeyData = false, false - var signedIdentity json.RawMessage - if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} { - switch key { - case "type": - return &tmp.Type - case "keyType": - return &tmp.KeyType - case "keyPath": - gotKeyPath = true - return &tmp.KeyPath - case "keyData": - gotKeyData = true - return &tmp.KeyData - case "signedIdentity": - return &signedIdentity - default: - return nil - } - }); err != nil { - return err - } - - if tmp.Type != prTypeSignedBy { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) - } - if signedIdentity == nil { - tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact() - } else { - si, err := newPolicyReferenceMatchFromJSON(signedIdentity) - if err != nil { - return err - } - tmp.SignedIdentity = si - } - - var res *prSignedBy - var err error - switch { - case gotKeyPath && gotKeyData: - return InvalidPolicyFormatError("keyPath and keyData cannot be used simultaneously") - case gotKeyPath && !gotKeyData: - res, err = newPRSignedByKeyPath(tmp.KeyType, tmp.KeyPath, tmp.SignedIdentity) - case !gotKeyPath && gotKeyData: - res, err = newPRSignedByKeyData(tmp.KeyType, tmp.KeyData, tmp.SignedIdentity) - case !gotKeyPath && !gotKeyData: - return InvalidPolicyFormatError("At least one of keyPath and keyData mus be specified") - default: // Coverage: This should never happen - return errors.Errorf("Impossible keyPath/keyData presence combination!?") - } - if err != nil { - return err - } - *pr = *res - - return nil -} - -// IsValid returns true iff kt is a recognized value -func (kt sbKeyType) IsValid() bool { - switch kt { - case SBKeyTypeGPGKeys, SBKeyTypeSignedByGPGKeys, - SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs: - return true - default: - return false - } -} - -// Compile-time check that sbKeyType implements json.Unmarshaler. -var _ json.Unmarshaler = (*sbKeyType)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (kt *sbKeyType) UnmarshalJSON(data []byte) error { - *kt = sbKeyType("") - var s string - if err := json.Unmarshal(data, &s); err != nil { - return err - } - if !sbKeyType(s).IsValid() { - return InvalidPolicyFormatError(fmt.Sprintf("Unrecognized keyType value \"%s\"", s)) - } - *kt = sbKeyType(s) - return nil -} - -// newPRSignedBaseLayer is NewPRSignedBaseLayer, except it returns the private type. -func newPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (*prSignedBaseLayer, error) { - if baseLayerIdentity == nil { - return nil, InvalidPolicyFormatError("baseLayerIdentity not specified") - } - return &prSignedBaseLayer{ - prCommon: prCommon{Type: prTypeSignedBaseLayer}, - BaseLayerIdentity: baseLayerIdentity, - }, nil -} - -// NewPRSignedBaseLayer returns a new "signedBaseLayer" PolicyRequirement. -func NewPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (PolicyRequirement, error) { - return newPRSignedBaseLayer(baseLayerIdentity) -} - -// Compile-time check that prSignedBaseLayer implements json.Unmarshaler. -var _ json.Unmarshaler = (*prSignedBaseLayer)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error { - *pr = prSignedBaseLayer{} - var tmp prSignedBaseLayer - var baseLayerIdentity json.RawMessage - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ - "type": &tmp.Type, - "baseLayerIdentity": &baseLayerIdentity, - }); err != nil { - return err - } - - if tmp.Type != prTypeSignedBaseLayer { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) - } - bli, err := newPolicyReferenceMatchFromJSON(baseLayerIdentity) - if err != nil { - return err - } - res, err := newPRSignedBaseLayer(bli) - if err != nil { - // Coverage: This should never happen, newPolicyReferenceMatchFromJSON has ensured bli is valid. - return err - } - *pr = *res - return nil -} - -// newPolicyReferenceMatchFromJSON parses JSON data into a PolicyReferenceMatch implementation. -func newPolicyReferenceMatchFromJSON(data []byte) (PolicyReferenceMatch, error) { - var typeField prmCommon - if err := json.Unmarshal(data, &typeField); err != nil { - return nil, err - } - var res PolicyReferenceMatch - switch typeField.Type { - case prmTypeMatchExact: - res = &prmMatchExact{} - case prmTypeMatchRepoDigestOrExact: - res = &prmMatchRepoDigestOrExact{} - case prmTypeMatchRepository: - res = &prmMatchRepository{} - case prmTypeExactReference: - res = &prmExactReference{} - case prmTypeExactRepository: - res = &prmExactRepository{} - default: - return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy reference match type \"%s\"", typeField.Type)) - } - if err := json.Unmarshal(data, &res); err != nil { - return nil, err - } - return res, nil -} - -// newPRMMatchExact is NewPRMMatchExact, except it resturns the private type. -func newPRMMatchExact() *prmMatchExact { - return &prmMatchExact{prmCommon{Type: prmTypeMatchExact}} -} - -// NewPRMMatchExact returns a new "matchExact" PolicyReferenceMatch. -func NewPRMMatchExact() PolicyReferenceMatch { - return newPRMMatchExact() -} - -// Compile-time check that prmMatchExact implements json.Unmarshaler. -var _ json.Unmarshaler = (*prmMatchExact)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (prm *prmMatchExact) UnmarshalJSON(data []byte) error { - *prm = prmMatchExact{} - var tmp prmMatchExact - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ - "type": &tmp.Type, - }); err != nil { - return err - } - - if tmp.Type != prmTypeMatchExact { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) - } - *prm = *newPRMMatchExact() - return nil -} - -// newPRMMatchRepoDigestOrExact is NewPRMMatchRepoDigestOrExact, except it resturns the private type. -func newPRMMatchRepoDigestOrExact() *prmMatchRepoDigestOrExact { - return &prmMatchRepoDigestOrExact{prmCommon{Type: prmTypeMatchRepoDigestOrExact}} -} - -// NewPRMMatchRepoDigestOrExact returns a new "matchRepoDigestOrExact" PolicyReferenceMatch. -func NewPRMMatchRepoDigestOrExact() PolicyReferenceMatch { - return newPRMMatchRepoDigestOrExact() -} - -// Compile-time check that prmMatchRepoDigestOrExact implements json.Unmarshaler. -var _ json.Unmarshaler = (*prmMatchRepoDigestOrExact)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error { - *prm = prmMatchRepoDigestOrExact{} - var tmp prmMatchRepoDigestOrExact - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ - "type": &tmp.Type, - }); err != nil { - return err - } - - if tmp.Type != prmTypeMatchRepoDigestOrExact { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) - } - *prm = *newPRMMatchRepoDigestOrExact() - return nil -} - -// newPRMMatchRepository is NewPRMMatchRepository, except it resturns the private type. -func newPRMMatchRepository() *prmMatchRepository { - return &prmMatchRepository{prmCommon{Type: prmTypeMatchRepository}} -} - -// NewPRMMatchRepository returns a new "matchRepository" PolicyReferenceMatch. -func NewPRMMatchRepository() PolicyReferenceMatch { - return newPRMMatchRepository() -} - -// Compile-time check that prmMatchRepository implements json.Unmarshaler. -var _ json.Unmarshaler = (*prmMatchRepository)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error { - *prm = prmMatchRepository{} - var tmp prmMatchRepository - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ - "type": &tmp.Type, - }); err != nil { - return err - } - - if tmp.Type != prmTypeMatchRepository { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) - } - *prm = *newPRMMatchRepository() - return nil -} - -// newPRMExactReference is NewPRMExactReference, except it resturns the private type. -func newPRMExactReference(dockerReference string) (*prmExactReference, error) { - ref, err := reference.ParseNormalizedNamed(dockerReference) - if err != nil { - return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerReference %s: %s", dockerReference, err.Error())) - } - if reference.IsNameOnly(ref) { - return nil, InvalidPolicyFormatError(fmt.Sprintf("dockerReference %s contains neither a tag nor digest", dockerReference)) - } - return &prmExactReference{ - prmCommon: prmCommon{Type: prmTypeExactReference}, - DockerReference: dockerReference, - }, nil -} - -// NewPRMExactReference returns a new "exactReference" PolicyReferenceMatch. -func NewPRMExactReference(dockerReference string) (PolicyReferenceMatch, error) { - return newPRMExactReference(dockerReference) -} - -// Compile-time check that prmExactReference implements json.Unmarshaler. -var _ json.Unmarshaler = (*prmExactReference)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (prm *prmExactReference) UnmarshalJSON(data []byte) error { - *prm = prmExactReference{} - var tmp prmExactReference - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ - "type": &tmp.Type, - "dockerReference": &tmp.DockerReference, - }); err != nil { - return err - } - - if tmp.Type != prmTypeExactReference { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) - } - - res, err := newPRMExactReference(tmp.DockerReference) - if err != nil { - return err - } - *prm = *res - return nil -} - -// newPRMExactRepository is NewPRMExactRepository, except it resturns the private type. -func newPRMExactRepository(dockerRepository string) (*prmExactRepository, error) { - if _, err := reference.ParseNormalizedNamed(dockerRepository); err != nil { - return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerRepository %s: %s", dockerRepository, err.Error())) - } - return &prmExactRepository{ - prmCommon: prmCommon{Type: prmTypeExactRepository}, - DockerRepository: dockerRepository, - }, nil -} - -// NewPRMExactRepository returns a new "exactRepository" PolicyRepositoryMatch. -func NewPRMExactRepository(dockerRepository string) (PolicyReferenceMatch, error) { - return newPRMExactRepository(dockerRepository) -} - -// Compile-time check that prmExactRepository implements json.Unmarshaler. -var _ json.Unmarshaler = (*prmExactRepository)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (prm *prmExactRepository) UnmarshalJSON(data []byte) error { - *prm = prmExactRepository{} - var tmp prmExactRepository - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ - "type": &tmp.Type, - "dockerRepository": &tmp.DockerRepository, - }); err != nil { - return err - } - - if tmp.Type != prmTypeExactRepository { - return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) - } - - res, err := newPRMExactRepository(tmp.DockerRepository) - if err != nil { - return err - } - *prm = *res - return nil -} diff --git a/vendor/github.com/containers/image/signature/policy_eval.go b/vendor/github.com/containers/image/signature/policy_eval.go deleted file mode 100644 index f818eb095a..0000000000 --- a/vendor/github.com/containers/image/signature/policy_eval.go +++ /dev/null @@ -1,289 +0,0 @@ -// This defines the top-level policy evaluation API. -// To the extent possible, the interface of the fuctions provided -// here is intended to be completely unambiguous, and stable for users -// to rely on. - -package signature - -import ( - "context" - - "github.com/containers/image/types" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// PolicyRequirementError is an explanatory text for rejecting a signature or an image. -type PolicyRequirementError string - -func (err PolicyRequirementError) Error() string { - return string(err) -} - -// signatureAcceptanceResult is the principal value returned by isSignatureAuthorAccepted. -type signatureAcceptanceResult string - -const ( - sarAccepted signatureAcceptanceResult = "sarAccepted" - sarRejected signatureAcceptanceResult = "sarRejected" - sarUnknown signatureAcceptanceResult = "sarUnknown" -) - -// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image. -// The type is public, but its definition is private. -type PolicyRequirement interface { - // FIXME: For speed, we should support creating per-context state (not stored in the PolicyRequirement), to cache - // costly initialization like creating temporary GPG home directories and reading files. - // Setup() (someState, error) - // Then, the operations below would be done on the someState object, not directly on a PolicyRequirement. - - // isSignatureAuthorAccepted, given an image and a signature blob, returns: - // - sarAccepted if the signature has been verified against the appropriate public key - // (where "appropriate public key" may depend on the contents of the signature); - // in that case a parsed Signature should be returned. - // - sarRejected if the signature has not been verified; - // in that case error must be non-nil, and should be an PolicyRequirementError if evaluation - // succeeded but the result was rejection. - // - sarUnknown if if this PolicyRequirement does not deal with signatures. - // NOTE: sarUnknown should not be returned if this PolicyRequirement should make a decision but something failed. - // Returning sarUnknown and a non-nil error value is invalid. - // WARNING: This makes the signature contents acceptable for futher processing, - // but it does not necessarily mean that the contents of the signature are - // consistent with local policy. - // For example: - // - Do not use a true value to determine whether to run - // a container based on this image; use IsRunningImageAllowed instead. - // - Just because a signature is accepted does not automatically mean the contents of the - // signature are authorized to run code as root, or to affect system or cluster configuration. - isSignatureAuthorAccepted(image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) - - // isRunningImageAllowed returns true if the requirement allows running an image. - // If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation - // succeeded but the result was rejection. - // WARNING: This validates signatures and the manifest, but does not download or validate the - // layers. Users must validate that the layers match their expected digests. - isRunningImageAllowed(image types.UnparsedImage) (bool, error) -} - -// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement. -// The type is public, but its implementation is private. -type PolicyReferenceMatch interface { - // matchesDockerReference decides whether a specific image identity is accepted for an image - // (or, usually, for the image's Reference().DockerReference()). Note that - // image.Reference().DockerReference() may be nil. - matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool -} - -// PolicyContext encapsulates a policy and possible cached state -// for speeding up its evaluation. -type PolicyContext struct { - Policy *Policy - state policyContextState // Internal consistency checking -} - -// policyContextState is used internally to verify the users are not misusing a PolicyContext. -type policyContextState string - -const ( - pcInvalid policyContextState = "" - pcInitializing policyContextState = "Initializing" - pcReady policyContextState = "Ready" - pcInUse policyContextState = "InUse" - pcDestroying policyContextState = "Destroying" - pcDestroyed policyContextState = "Destroyed" -) - -// changeContextState changes pc.state, or fails if the state is unexpected -func (pc *PolicyContext) changeState(expected, new policyContextState) error { - if pc.state != expected { - return errors.Errorf(`"Invalid PolicyContext state, expected "%s", found "%s"`, expected, pc.state) - } - pc.state = new - return nil -} - -// NewPolicyContext sets up and initializes a context for the specified policy. -// The policy must not be modified while the context exists. FIXME: make a deep copy? -// If this function succeeds, the caller should call PolicyContext.Destroy() when done. -func NewPolicyContext(policy *Policy) (*PolicyContext, error) { - pc := &PolicyContext{Policy: policy, state: pcInitializing} - // FIXME: initialize - if err := pc.changeState(pcInitializing, pcReady); err != nil { - // Huh?! This should never fail, we didn't give the pointer to anybody. - // Just give up and leave unclean state around. - return nil, err - } - return pc, nil -} - -// Destroy should be called when the user of the context is done with it. -func (pc *PolicyContext) Destroy() error { - if err := pc.changeState(pcReady, pcDestroying); err != nil { - return err - } - // FIXME: destroy - return pc.changeState(pcDestroying, pcDestroyed) -} - -// policyIdentityLogName returns a string description of the image identity for policy purposes. -// ONLY use this for log messages, not for any decisions! -func policyIdentityLogName(ref types.ImageReference) string { - return ref.Transport().Name() + ":" + ref.PolicyConfigurationIdentity() -} - -// requirementsForImageRef selects the appropriate requirements for ref. -func (pc *PolicyContext) requirementsForImageRef(ref types.ImageReference) PolicyRequirements { - // Do we have a PolicyTransportScopes for this transport? - transportName := ref.Transport().Name() - if transportScopes, ok := pc.Policy.Transports[transportName]; ok { - // Look for a full match. - identity := ref.PolicyConfigurationIdentity() - if req, ok := transportScopes[identity]; ok { - logrus.Debugf(` Using transport "%s" policy section %s`, transportName, identity) - return req - } - - // Look for a match of the possible parent namespaces. - for _, name := range ref.PolicyConfigurationNamespaces() { - if req, ok := transportScopes[name]; ok { - logrus.Debugf(` Using transport "%s" specific policy section %s`, transportName, name) - return req - } - } - - // Look for a default match for the transport. - if req, ok := transportScopes[""]; ok { - logrus.Debugf(` Using transport "%s" policy section ""`, transportName) - return req - } - } - - logrus.Debugf(" Using default policy section") - return pc.Policy.Default -} - -// GetSignaturesWithAcceptedAuthor returns those signatures from an image -// for which the policy accepts the author (and which have been successfully -// verified). -// NOTE: This may legitimately return an empty list and no error, if the image -// has no signatures or only invalid signatures. -// WARNING: This makes the signature contents acceptable for futher processing, -// but it does not necessarily mean that the contents of the signature are -// consistent with local policy. -// For example: -// - Do not use a an existence of an accepted signature to determine whether to run -// a container based on this image; use IsRunningImageAllowed instead. -// - Just because a signature is accepted does not automatically mean the contents of the -// signature are authorized to run code as root, or to affect system or cluster configuration. -func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(image types.UnparsedImage) (sigs []*Signature, finalErr error) { - if err := pc.changeState(pcReady, pcInUse); err != nil { - return nil, err - } - defer func() { - if err := pc.changeState(pcInUse, pcReady); err != nil { - sigs = nil - finalErr = err - } - }() - - logrus.Debugf("GetSignaturesWithAcceptedAuthor for image %s", policyIdentityLogName(image.Reference())) - reqs := pc.requirementsForImageRef(image.Reference()) - - // FIXME: rename Signatures to UnverifiedSignatures - // FIXME: pass context.Context - unverifiedSignatures, err := image.Signatures(context.TODO()) - if err != nil { - return nil, err - } - - res := make([]*Signature, 0, len(unverifiedSignatures)) - for sigNumber, sig := range unverifiedSignatures { - var acceptedSig *Signature // non-nil if accepted - rejected := false - // FIXME? Say more about the contents of the signature, i.e. parse it even before verification?! - logrus.Debugf("Evaluating signature %d:", sigNumber) - interpretingReqs: - for reqNumber, req := range reqs { - // FIXME: Log the requirement itself? For now, we use just the number. - // FIXME: supply state - switch res, as, err := req.isSignatureAuthorAccepted(image, sig); res { - case sarAccepted: - if as == nil { // Coverage: this should never happen - logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but no parsed contents", reqNumber) - rejected = true - break interpretingReqs - } - logrus.Debugf(" Requirement %d: signature accepted", reqNumber) - if acceptedSig == nil { - acceptedSig = as - } else if *as != *acceptedSig { // Coverage: this should never happen - // Huh?! Two ways of verifying the same signature blob resulted in two different parses of its already accepted contents? - logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but different parsed contents", reqNumber) - rejected = true - acceptedSig = nil - break interpretingReqs - } - case sarRejected: - logrus.Debugf(" Requirement %d: signature rejected: %s", reqNumber, err.Error()) - rejected = true - break interpretingReqs - case sarUnknown: - if err != nil { // Coverage: this should never happen - logrus.Debugf(" Requirement %d: internal inconsistency: sarUnknown but an error message %s", reqNumber, err.Error()) - rejected = true - break interpretingReqs - } - logrus.Debugf(" Requirement %d: signature state unknown, continuing", reqNumber) - default: // Coverage: this should never happen - logrus.Debugf(" Requirement %d: internal inconsistency: unknown result %#v", reqNumber, string(res)) - rejected = true - break interpretingReqs - } - } - // This also handles the (invalid) case of empty reqs, by rejecting the signature. - if acceptedSig != nil && !rejected { - logrus.Debugf(" Overall: OK, signature accepted") - res = append(res, acceptedSig) - } else { - logrus.Debugf(" Overall: Signature not accepted") - } - } - return res, nil -} - -// IsRunningImageAllowed returns true iff the policy allows running the image. -// If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation -// succeeded but the result was rejection. -// WARNING: This validates signatures and the manifest, but does not download or validate the -// layers. Users must validate that the layers match their expected digests. -func (pc *PolicyContext) IsRunningImageAllowed(image types.UnparsedImage) (res bool, finalErr error) { - if err := pc.changeState(pcReady, pcInUse); err != nil { - return false, err - } - defer func() { - if err := pc.changeState(pcInUse, pcReady); err != nil { - res = false - finalErr = err - } - }() - - logrus.Debugf("IsRunningImageAllowed for image %s", policyIdentityLogName(image.Reference())) - reqs := pc.requirementsForImageRef(image.Reference()) - - if len(reqs) == 0 { - return false, PolicyRequirementError("List of verification policy requirements must not be empty") - } - - for reqNumber, req := range reqs { - // FIXME: supply state - allowed, err := req.isRunningImageAllowed(image) - if !allowed { - logrus.Debugf("Requirement %d: denied, done", reqNumber) - return false, err - } - logrus.Debugf(" Requirement %d: allowed", reqNumber) - } - // We have tested that len(reqs) != 0, so at least one req must have explicitly allowed this image. - logrus.Debugf("Overall: allowed") - return true, nil -} diff --git a/vendor/github.com/containers/image/signature/policy_eval_baselayer.go b/vendor/github.com/containers/image/signature/policy_eval_baselayer.go deleted file mode 100644 index 898958012e..0000000000 --- a/vendor/github.com/containers/image/signature/policy_eval_baselayer.go +++ /dev/null @@ -1,18 +0,0 @@ -// Policy evaluation for prSignedBaseLayer. - -package signature - -import ( - "github.com/containers/image/types" - "github.com/sirupsen/logrus" -) - -func (pr *prSignedBaseLayer) isSignatureAuthorAccepted(image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { - return sarUnknown, nil, nil -} - -func (pr *prSignedBaseLayer) isRunningImageAllowed(image types.UnparsedImage) (bool, error) { - // FIXME? Reject this at policy parsing time already? - logrus.Errorf("signedBaseLayer not implemented yet!") - return false, PolicyRequirementError("signedBaseLayer not implemented yet!") -} diff --git a/vendor/github.com/containers/image/signature/policy_eval_signedby.go b/vendor/github.com/containers/image/signature/policy_eval_signedby.go deleted file mode 100644 index 56665124c0..0000000000 --- a/vendor/github.com/containers/image/signature/policy_eval_signedby.go +++ /dev/null @@ -1,131 +0,0 @@ -// Policy evaluation for prSignedBy. - -package signature - -import ( - "context" - "fmt" - "io/ioutil" - "strings" - - "github.com/pkg/errors" - - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/opencontainers/go-digest" -) - -func (pr *prSignedBy) isSignatureAuthorAccepted(image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { - switch pr.KeyType { - case SBKeyTypeGPGKeys: - case SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs: - // FIXME? Reject this at policy parsing time already? - return sarRejected, nil, errors.Errorf(`"Unimplemented "keyType" value "%s"`, string(pr.KeyType)) - default: - // This should never happen, newPRSignedBy ensures KeyType.IsValid() - return sarRejected, nil, errors.Errorf(`"Unknown "keyType" value "%s"`, string(pr.KeyType)) - } - - if pr.KeyPath != "" && pr.KeyData != nil { - return sarRejected, nil, errors.New(`Internal inconsistency: both "keyPath" and "keyData" specified`) - } - // FIXME: move this to per-context initialization - var data []byte - if pr.KeyData != nil { - data = pr.KeyData - } else { - d, err := ioutil.ReadFile(pr.KeyPath) - if err != nil { - return sarRejected, nil, err - } - data = d - } - - // FIXME: move this to per-context initialization - mech, trustedIdentities, err := NewEphemeralGPGSigningMechanism(data) - if err != nil { - return sarRejected, nil, err - } - defer mech.Close() - if len(trustedIdentities) == 0 { - return sarRejected, nil, PolicyRequirementError("No public keys imported") - } - - signature, err := verifyAndExtractSignature(mech, sig, signatureAcceptanceRules{ - validateKeyIdentity: func(keyIdentity string) error { - for _, trustedIdentity := range trustedIdentities { - if keyIdentity == trustedIdentity { - return nil - } - } - // Coverage: We use a private GPG home directory and only import trusted keys, so this should - // not be reachable. - return PolicyRequirementError(fmt.Sprintf("Signature by key %s is not accepted", keyIdentity)) - }, - validateSignedDockerReference: func(ref string) error { - if !pr.SignedIdentity.matchesDockerReference(image, ref) { - return PolicyRequirementError(fmt.Sprintf("Signature for identity %s is not accepted", ref)) - } - return nil - }, - validateSignedDockerManifestDigest: func(digest digest.Digest) error { - m, _, err := image.Manifest() - if err != nil { - return err - } - digestMatches, err := manifest.MatchesDigest(m, digest) - if err != nil { - return err - } - if !digestMatches { - return PolicyRequirementError(fmt.Sprintf("Signature for digest %s does not match", digest)) - } - return nil - }, - }) - if err != nil { - return sarRejected, nil, err - } - - return sarAccepted, signature, nil -} - -func (pr *prSignedBy) isRunningImageAllowed(image types.UnparsedImage) (bool, error) { - // FIXME: pass context.Context - sigs, err := image.Signatures(context.TODO()) - if err != nil { - return false, err - } - var rejections []error - for _, s := range sigs { - var reason error - switch res, _, err := pr.isSignatureAuthorAccepted(image, s); res { - case sarAccepted: - // One accepted signature is enough. - return true, nil - case sarRejected: - reason = err - case sarUnknown: - // Huh?! This should not happen at all; treat it as any other invalid value. - fallthrough - default: - reason = errors.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res)) - } - rejections = append(rejections, reason) - } - var summary error - switch len(rejections) { - case 0: - summary = PolicyRequirementError("A signature was required, but no signature exists") - case 1: - summary = rejections[0] - default: - var msgs []string - for _, e := range rejections { - msgs = append(msgs, e.Error()) - } - summary = PolicyRequirementError(fmt.Sprintf("None of the signatures were accepted, reasons: %s", - strings.Join(msgs, "; "))) - } - return false, summary -} diff --git a/vendor/github.com/containers/image/signature/policy_eval_simple.go b/vendor/github.com/containers/image/signature/policy_eval_simple.go deleted file mode 100644 index 19a71e6d99..0000000000 --- a/vendor/github.com/containers/image/signature/policy_eval_simple.go +++ /dev/null @@ -1,28 +0,0 @@ -// Policy evaluation for the various simple PolicyRequirement types. - -package signature - -import ( - "fmt" - - "github.com/containers/image/transports" - "github.com/containers/image/types" -) - -func (pr *prInsecureAcceptAnything) isSignatureAuthorAccepted(image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { - // prInsecureAcceptAnything semantics: Every image is allowed to run, - // but this does not consider the signature as verified. - return sarUnknown, nil, nil -} - -func (pr *prInsecureAcceptAnything) isRunningImageAllowed(image types.UnparsedImage) (bool, error) { - return true, nil -} - -func (pr *prReject) isSignatureAuthorAccepted(image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { - return sarRejected, nil, PolicyRequirementError(fmt.Sprintf("Any signatures for image %s are rejected by policy.", transports.ImageName(image.Reference()))) -} - -func (pr *prReject) isRunningImageAllowed(image types.UnparsedImage) (bool, error) { - return false, PolicyRequirementError(fmt.Sprintf("Running image %s is rejected by policy.", transports.ImageName(image.Reference()))) -} diff --git a/vendor/github.com/containers/image/signature/policy_reference_match.go b/vendor/github.com/containers/image/signature/policy_reference_match.go deleted file mode 100644 index a8dad67701..0000000000 --- a/vendor/github.com/containers/image/signature/policy_reference_match.go +++ /dev/null @@ -1,101 +0,0 @@ -// PolicyReferenceMatch implementations. - -package signature - -import ( - "fmt" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/transports" - "github.com/containers/image/types" -) - -// parseImageAndDockerReference converts an image and a reference string into two parsed entities, failing on any error and handling unidentified images. -func parseImageAndDockerReference(image types.UnparsedImage, s2 string) (reference.Named, reference.Named, error) { - r1 := image.Reference().DockerReference() - if r1 == nil { - return nil, nil, PolicyRequirementError(fmt.Sprintf("Docker reference match attempted on image %s with no known Docker reference identity", - transports.ImageName(image.Reference()))) - } - r2, err := reference.ParseNormalizedNamed(s2) - if err != nil { - return nil, nil, err - } - return r1, r2, nil -} - -func (prm *prmMatchExact) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { - intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) - if err != nil { - return false - } - // Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now. - if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) { - return false - } - return signature.String() == intended.String() -} - -func (prm *prmMatchRepoDigestOrExact) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { - intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) - if err != nil { - return false - } - - // Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now. - if reference.IsNameOnly(signature) { - return false - } - switch intended.(type) { - case reference.NamedTagged: // Includes the case when intended has both a tag and a digest. - return signature.String() == intended.String() - case reference.Canonical: - // We don’t actually compare the manifest digest against the signature here; that happens prSignedBy.in UnparsedImage.Manifest. - // Becase UnparsedImage.Manifest verifies the intended.Digest() against the manifest, and prSignedBy verifies the signature digest against the manifest, - // we know that signature digest matches intended.Digest() (but intended.Digest() and signature digest may use different algorithms) - return signature.Name() == intended.Name() - default: // !reference.IsNameOnly(intended) - return false - } -} - -func (prm *prmMatchRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { - intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) - if err != nil { - return false - } - return signature.Name() == intended.Name() -} - -// parseDockerReferences converts two reference strings into parsed entities, failing on any error -func parseDockerReferences(s1, s2 string) (reference.Named, reference.Named, error) { - r1, err := reference.ParseNormalizedNamed(s1) - if err != nil { - return nil, nil, err - } - r2, err := reference.ParseNormalizedNamed(s2) - if err != nil { - return nil, nil, err - } - return r1, r2, nil -} - -func (prm *prmExactReference) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { - intended, signature, err := parseDockerReferences(prm.DockerReference, signatureDockerReference) - if err != nil { - return false - } - // prm.DockerReference and signatureDockerReference should be exact; so, verify that now. - if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) { - return false - } - return signature.String() == intended.String() -} - -func (prm *prmExactRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { - intended, signature, err := parseDockerReferences(prm.DockerRepository, signatureDockerReference) - if err != nil { - return false - } - return signature.Name() == intended.Name() -} diff --git a/vendor/github.com/containers/image/signature/policy_types.go b/vendor/github.com/containers/image/signature/policy_types.go deleted file mode 100644 index 4cd770f11c..0000000000 --- a/vendor/github.com/containers/image/signature/policy_types.go +++ /dev/null @@ -1,152 +0,0 @@ -// Note: Consider the API unstable until the code supports at least three different image formats or transports. - -// This defines types used to represent a signature verification policy in memory. -// Do not use the private types directly; either parse a configuration file, or construct a Policy from PolicyRequirements -// built using the constructor functions provided in policy_config.go. - -package signature - -// NOTE: Keep this in sync with docs/policy.json.md! - -// Policy defines requirements for considering a signature, or an image, valid. -type Policy struct { - // Default applies to any image which does not have a matching policy in Transports. - // Note that this can happen even if a matching PolicyTransportScopes exists in Transports - // if the image matches none of the scopes. - Default PolicyRequirements `json:"default"` - Transports map[string]PolicyTransportScopes `json:"transports"` -} - -// PolicyTransportScopes defines policies for images for a specific transport, -// for various scopes, the map keys. -// Scopes are defined by the transport (types.ImageReference.PolicyConfigurationIdentity etc.); -// there is one scope precisely matching to a single image, and namespace scopes as prefixes -// of the single-image scope. (e.g. hostname[/zero[/or[/more[/namespaces[/individualimage]]]]]) -// The empty scope, if exists, is considered a parent namespace of all other scopes. -// Most specific scope wins, duplication is prohibited (hard failure). -type PolicyTransportScopes map[string]PolicyRequirements - -// PolicyRequirements is a set of requirements applying to a set of images; each of them must be satisfied (though perhaps each by a different signature). -// Must not be empty, frequently will only contain a single element. -type PolicyRequirements []PolicyRequirement - -// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image. -// The type is public, but its definition is private. - -// prCommon is the common type field in a JSON encoding of PolicyRequirement. -type prCommon struct { - Type prTypeIdentifier `json:"type"` -} - -// prTypeIdentifier is string designating a kind of a PolicyRequirement. -type prTypeIdentifier string - -const ( - prTypeInsecureAcceptAnything prTypeIdentifier = "insecureAcceptAnything" - prTypeReject prTypeIdentifier = "reject" - prTypeSignedBy prTypeIdentifier = "signedBy" - prTypeSignedBaseLayer prTypeIdentifier = "signedBaseLayer" -) - -// prInsecureAcceptAnything is a PolicyRequirement with type = prTypeInsecureAcceptAnything: -// every image is allowed to run. -// Note that because PolicyRequirements are implicitly ANDed, this is necessary only if it is the only rule (to make the list non-empty and the policy explicit). -// NOTE: This allows the image to run; it DOES NOT consider the signature verified (per IsSignatureAuthorAccepted). -// FIXME? Better name? -type prInsecureAcceptAnything struct { - prCommon -} - -// prReject is a PolicyRequirement with type = prTypeReject: every image is rejected. -type prReject struct { - prCommon -} - -// prSignedBy is a PolicyRequirement with type = prTypeSignedBy: the image is signed by trusted keys for a specified identity -type prSignedBy struct { - prCommon - - // KeyType specifies what kind of key reference KeyPath/KeyData is. - // Acceptable values are “GPGKeys” | “signedByGPGKeys” “X.509Certificates” | “signedByX.509CAs” - // FIXME: eventually also support GPGTOFU, X.509TOFU, with KeyPath only - KeyType sbKeyType `json:"keyType"` - - // KeyPath is a pathname to a local file containing the trusted key(s). Exactly one of KeyPath and KeyData must be specified. - KeyPath string `json:"keyPath,omitempty"` - // KeyData contains the trusted key(s), base64-encoded. Exactly one of KeyPath and KeyData must be specified. - KeyData []byte `json:"keyData,omitempty"` - - // SignedIdentity specifies what image identity the signature must be claiming about the image. - // Defaults to "match-exact" if not specified. - SignedIdentity PolicyReferenceMatch `json:"signedIdentity"` -} - -// sbKeyType are the allowed values for prSignedBy.KeyType -type sbKeyType string - -const ( - // SBKeyTypeGPGKeys refers to keys contained in a GPG keyring - SBKeyTypeGPGKeys sbKeyType = "GPGKeys" - // SBKeyTypeSignedByGPGKeys refers to keys signed by keys in a GPG keyring - SBKeyTypeSignedByGPGKeys sbKeyType = "signedByGPGKeys" - // SBKeyTypeX509Certificates refers to keys in a set of X.509 certificates - // FIXME: PEM, DER? - SBKeyTypeX509Certificates sbKeyType = "X509Certificates" - // SBKeyTypeSignedByX509CAs refers to keys signed by one of the X.509 CAs - // FIXME: PEM, DER? - SBKeyTypeSignedByX509CAs sbKeyType = "signedByX509CAs" -) - -// prSignedBaseLayer is a PolicyRequirement with type = prSignedBaseLayer: the image has a specified, correctly signed, base image. -type prSignedBaseLayer struct { - prCommon - // BaseLayerIdentity specifies the base image to look for. "match-exact" is rejected, "match-repository" is unlikely to be useful. - BaseLayerIdentity PolicyReferenceMatch `json:"baseLayerIdentity"` -} - -// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement. -// The type is public, but its implementation is private. - -// prmCommon is the common type field in a JSON encoding of PolicyReferenceMatch. -type prmCommon struct { - Type prmTypeIdentifier `json:"type"` -} - -// prmTypeIdentifier is string designating a kind of a PolicyReferenceMatch. -type prmTypeIdentifier string - -const ( - prmTypeMatchExact prmTypeIdentifier = "matchExact" - prmTypeMatchRepoDigestOrExact prmTypeIdentifier = "matchRepoDigestOrExact" - prmTypeMatchRepository prmTypeIdentifier = "matchRepository" - prmTypeExactReference prmTypeIdentifier = "exactReference" - prmTypeExactRepository prmTypeIdentifier = "exactRepository" -) - -// prmMatchExact is a PolicyReferenceMatch with type = prmMatchExact: the two references must match exactly. -type prmMatchExact struct { - prmCommon -} - -// prmMatchRepoDigestOrExact is a PolicyReferenceMatch with type = prmMatchExactOrDigest: the two references must match exactly, -// except that digest references are also accepted if the repository name matches (regardless of tag/digest) and the signature applies to the referenced digest -type prmMatchRepoDigestOrExact struct { - prmCommon -} - -// prmMatchRepository is a PolicyReferenceMatch with type = prmMatchRepository: the two references must use the same repository, may differ in the tag. -type prmMatchRepository struct { - prmCommon -} - -// prmExactReference is a PolicyReferenceMatch with type = prmExactReference: matches a specified reference exactly. -type prmExactReference struct { - prmCommon - DockerReference string `json:"dockerReference"` -} - -// prmExactRepository is a PolicyReferenceMatch with type = prmExactRepository: matches a specified repository, with any tag. -type prmExactRepository struct { - prmCommon - DockerRepository string `json:"dockerRepository"` -} diff --git a/vendor/github.com/containers/image/signature/signature.go b/vendor/github.com/containers/image/signature/signature.go deleted file mode 100644 index 41f13f72fd..0000000000 --- a/vendor/github.com/containers/image/signature/signature.go +++ /dev/null @@ -1,280 +0,0 @@ -// Note: Consider the API unstable until the code supports at least three different image formats or transports. - -// NOTE: Keep this in sync with docs/atomic-signature.md and docs/atomic-signature-embedded.json! - -package signature - -import ( - "encoding/json" - "fmt" - "time" - - "github.com/pkg/errors" - - "github.com/containers/image/version" - "github.com/opencontainers/go-digest" -) - -const ( - signatureType = "atomic container signature" -) - -// InvalidSignatureError is returned when parsing an invalid signature. -type InvalidSignatureError struct { - msg string -} - -func (err InvalidSignatureError) Error() string { - return err.msg -} - -// Signature is a parsed content of a signature. -// The only way to get this structure from a blob should be as a return value from a successful call to verifyAndExtractSignature below. -type Signature struct { - DockerManifestDigest digest.Digest - DockerReference string // FIXME: more precise type? -} - -// untrustedSignature is a parsed content of a signature. -type untrustedSignature struct { - UntrustedDockerManifestDigest digest.Digest - UntrustedDockerReference string // FIXME: more precise type? - UntrustedCreatorID *string - // This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision, - // but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds). - // So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually, - // we would add another field, UntrustedTimestampNS int64. - UntrustedTimestamp *int64 -} - -// UntrustedSignatureInformation is information available in an untrusted signature. -// This may be useful when debugging signature verification failures, -// or when managing a set of signatures on a single image. -// -// WARNING: Do not use the contents of this for ANY security decisions, -// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable. -// There is NO REASON to expect the values to be correct, or not intentionally misleading -// (including things like “✅ Verified by $authority”) -type UntrustedSignatureInformation struct { - UntrustedDockerManifestDigest digest.Digest - UntrustedDockerReference string // FIXME: more precise type? - UntrustedCreatorID *string - UntrustedTimestamp *time.Time - UntrustedShortKeyIdentifier string -} - -// newUntrustedSignature returns an untrustedSignature object with -// the specified primary contents and appropriate metadata. -func newUntrustedSignature(dockerManifestDigest digest.Digest, dockerReference string) untrustedSignature { - // Use intermediate variables for these values so that we can take their addresses. - // Golang guarantees that they will have a new address on every execution. - creatorID := "atomic " + version.Version - timestamp := time.Now().Unix() - return untrustedSignature{ - UntrustedDockerManifestDigest: dockerManifestDigest, - UntrustedDockerReference: dockerReference, - UntrustedCreatorID: &creatorID, - UntrustedTimestamp: ×tamp, - } -} - -// Compile-time check that untrustedSignature implements json.Marshaler -var _ json.Marshaler = (*untrustedSignature)(nil) - -// MarshalJSON implements the json.Marshaler interface. -func (s untrustedSignature) MarshalJSON() ([]byte, error) { - if s.UntrustedDockerManifestDigest == "" || s.UntrustedDockerReference == "" { - return nil, errors.New("Unexpected empty signature content") - } - critical := map[string]interface{}{ - "type": signatureType, - "image": map[string]string{"docker-manifest-digest": s.UntrustedDockerManifestDigest.String()}, - "identity": map[string]string{"docker-reference": s.UntrustedDockerReference}, - } - optional := map[string]interface{}{} - if s.UntrustedCreatorID != nil { - optional["creator"] = *s.UntrustedCreatorID - } - if s.UntrustedTimestamp != nil { - optional["timestamp"] = *s.UntrustedTimestamp - } - signature := map[string]interface{}{ - "critical": critical, - "optional": optional, - } - return json.Marshal(signature) -} - -// Compile-time check that untrustedSignature implements json.Unmarshaler -var _ json.Unmarshaler = (*untrustedSignature)(nil) - -// UnmarshalJSON implements the json.Unmarshaler interface -func (s *untrustedSignature) UnmarshalJSON(data []byte) error { - err := s.strictUnmarshalJSON(data) - if err != nil { - if _, ok := err.(jsonFormatError); ok { - err = InvalidSignatureError{msg: err.Error()} - } - } - return err -} - -// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal jsonFormatError error type. -// Splitting it into a separate function allows us to do the jsonFormatError → InvalidSignatureError in a single place, the caller. -func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error { - var critical, optional json.RawMessage - if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{ - "critical": &critical, - "optional": &optional, - }); err != nil { - return err - } - - var creatorID string - var timestamp float64 - var gotCreatorID, gotTimestamp = false, false - if err := paranoidUnmarshalJSONObject(optional, func(key string) interface{} { - switch key { - case "creator": - gotCreatorID = true - return &creatorID - case "timestamp": - gotTimestamp = true - return ×tamp - default: - var ignore interface{} - return &ignore - } - }); err != nil { - return err - } - if gotCreatorID { - s.UntrustedCreatorID = &creatorID - } - if gotTimestamp { - intTimestamp := int64(timestamp) - if float64(intTimestamp) != timestamp { - return InvalidSignatureError{msg: "Field optional.timestamp is not is not an integer"} - } - s.UntrustedTimestamp = &intTimestamp - } - - var t string - var image, identity json.RawMessage - if err := paranoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{ - "type": &t, - "image": &image, - "identity": &identity, - }); err != nil { - return err - } - if t != signatureType { - return InvalidSignatureError{msg: fmt.Sprintf("Unrecognized signature type %s", t)} - } - - var digestString string - if err := paranoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{ - "docker-manifest-digest": &digestString, - }); err != nil { - return err - } - s.UntrustedDockerManifestDigest = digest.Digest(digestString) - - return paranoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{ - "docker-reference": &s.UntrustedDockerReference, - }) -} - -// Sign formats the signature and returns a blob signed using mech and keyIdentity -// (If it seems surprising that this is a method on untrustedSignature, note that there -// isn’t a good reason to think that a key used by the user is trusted by any component -// of the system just because it is a private key — actually the presence of a private key -// on the system increases the likelihood of an a successful attack on that private key -// on that particular system.) -func (s untrustedSignature) sign(mech SigningMechanism, keyIdentity string) ([]byte, error) { - json, err := json.Marshal(s) - if err != nil { - return nil, err - } - - return mech.Sign(json, keyIdentity) -} - -// signatureAcceptanceRules specifies how to decide whether an untrusted signature is acceptable. -// We centralize the actual parsing and data extraction in verifyAndExtractSignature; this supplies -// the policy. We use an object instead of supplying func parameters to verifyAndExtractSignature -// because the functions have the same or similar types, so there is a risk of exchanging the functions; -// named members of this struct are more explicit. -type signatureAcceptanceRules struct { - validateKeyIdentity func(string) error - validateSignedDockerReference func(string) error - validateSignedDockerManifestDigest func(digest.Digest) error -} - -// verifyAndExtractSignature verifies that unverifiedSignature has been signed, and that its principial components -// match expected values, both as specified by rules, and returns it -func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte, rules signatureAcceptanceRules) (*Signature, error) { - signed, keyIdentity, err := mech.Verify(unverifiedSignature) - if err != nil { - return nil, err - } - if err := rules.validateKeyIdentity(keyIdentity); err != nil { - return nil, err - } - - var unmatchedSignature untrustedSignature - if err := json.Unmarshal(signed, &unmatchedSignature); err != nil { - return nil, InvalidSignatureError{msg: err.Error()} - } - if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.UntrustedDockerManifestDigest); err != nil { - return nil, err - } - if err := rules.validateSignedDockerReference(unmatchedSignature.UntrustedDockerReference); err != nil { - return nil, err - } - // signatureAcceptanceRules have accepted this value. - return &Signature{ - DockerManifestDigest: unmatchedSignature.UntrustedDockerManifestDigest, - DockerReference: unmatchedSignature.UntrustedDockerReference, - }, nil -} - -// GetUntrustedSignatureInformationWithoutVerifying extracts information available in an untrusted signature, -// WITHOUT doing any cryptographic verification. -// This may be useful when debugging signature verification failures, -// or when managing a set of signatures on a single image. -// -// WARNING: Do not use the contents of this for ANY security decisions, -// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable. -// There is NO REASON to expect the values to be correct, or not intentionally misleading -// (including things like “✅ Verified by $authority”) -func GetUntrustedSignatureInformationWithoutVerifying(untrustedSignatureBytes []byte) (*UntrustedSignatureInformation, error) { - // NOTE: This should eventualy do format autodetection. - mech, _, err := NewEphemeralGPGSigningMechanism([]byte{}) - if err != nil { - return nil, err - } - defer mech.Close() - - untrustedContents, shortKeyIdentifier, err := mech.UntrustedSignatureContents(untrustedSignatureBytes) - if err != nil { - return nil, err - } - var untrustedDecodedContents untrustedSignature - if err := json.Unmarshal(untrustedContents, &untrustedDecodedContents); err != nil { - return nil, InvalidSignatureError{msg: err.Error()} - } - - var timestamp *time.Time // = nil - if untrustedDecodedContents.UntrustedTimestamp != nil { - ts := time.Unix(*untrustedDecodedContents.UntrustedTimestamp, 0) - timestamp = &ts - } - return &UntrustedSignatureInformation{ - UntrustedDockerManifestDigest: untrustedDecodedContents.UntrustedDockerManifestDigest, - UntrustedDockerReference: untrustedDecodedContents.UntrustedDockerReference, - UntrustedCreatorID: untrustedDecodedContents.UntrustedCreatorID, - UntrustedTimestamp: timestamp, - UntrustedShortKeyIdentifier: shortKeyIdentifier, - }, nil -} diff --git a/vendor/github.com/containers/image/storage/storage_image.go b/vendor/github.com/containers/image/storage/storage_image.go deleted file mode 100644 index 038195c16c..0000000000 --- a/vendor/github.com/containers/image/storage/storage_image.go +++ /dev/null @@ -1,808 +0,0 @@ -// +build !containers_image_storage_stub - -package storage - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "sync/atomic" - - "github.com/containers/image/image" - "github.com/containers/image/manifest" - "github.com/containers/image/types" - "github.com/containers/storage" - "github.com/containers/storage/pkg/archive" - "github.com/containers/storage/pkg/ioutils" - digest "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -const temporaryDirectoryForBigFiles = "/var/tmp" // Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs. - -var ( - // ErrBlobDigestMismatch is returned when PutBlob() is given a blob - // with a digest-based name that doesn't match its contents. - ErrBlobDigestMismatch = errors.New("blob digest mismatch") - // ErrBlobSizeMismatch is returned when PutBlob() is given a blob - // with an expected size that doesn't match the reader. - ErrBlobSizeMismatch = errors.New("blob size mismatch") - // ErrNoManifestLists is returned when GetManifest() is called. - // with a non-nil instanceDigest. - ErrNoManifestLists = errors.New("manifest lists are not supported by this transport") - // ErrNoSuchImage is returned when we attempt to access an image which - // doesn't exist in the storage area. - ErrNoSuchImage = storage.ErrNotAnImage -) - -type storageImageSource struct { - imageRef storageReference - ID string - layerPosition map[digest.Digest]int // Where we are in reading a blob's layers - cachedManifest []byte // A cached copy of the manifest, if already known, or nil - SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice -} - -type storageImageDestination struct { - image types.ImageCloser - systemContext *types.SystemContext - imageRef storageReference // The reference we'll use to name the image - publicRef storageReference // The reference we return when asked about the name we'll give to the image - directory string // Temporary directory where we store blobs until Commit() time - nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs - manifest []byte // Manifest contents, temporary - signatures []byte // Signature contents, temporary - blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs - fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes - filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them - SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice -} - -type storageImageCloser struct { - types.ImageCloser - size int64 -} - -// newImageSource sets up an image for reading. -func newImageSource(imageRef storageReference) (*storageImageSource, error) { - // First, locate the image. - img, err := imageRef.resolveImage() - if err != nil { - return nil, err - } - - // Build the reader object. - image := &storageImageSource{ - imageRef: imageRef, - ID: img.ID, - layerPosition: make(map[digest.Digest]int), - SignatureSizes: []int{}, - } - if img.Metadata != "" { - if err := json.Unmarshal([]byte(img.Metadata), image); err != nil { - return nil, errors.Wrap(err, "error decoding metadata for source image") - } - } - return image, nil -} - -// Reference returns the image reference that we used to find this image. -func (s storageImageSource) Reference() types.ImageReference { - return s.imageRef -} - -// Close cleans up any resources we tied up while reading the image. -func (s storageImageSource) Close() error { - return nil -} - -// GetBlob reads the data blob or filesystem layer which matches the digest and size, if given. -func (s *storageImageSource) GetBlob(info types.BlobInfo) (rc io.ReadCloser, n int64, err error) { - rc, n, _, err = s.getBlobAndLayerID(info) - return rc, n, err -} - -// getBlobAndLayer reads the data blob or filesystem layer which matches the digest and size, if given. -func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) { - var layer storage.Layer - var diffOptions *storage.DiffOptions - // We need a valid digest value. - err = info.Digest.Validate() - if err != nil { - return nil, -1, "", err - } - // Check if the blob corresponds to a diff that was used to initialize any layers. Our - // callers should try to retrieve layers using their uncompressed digests, so no need to - // check if they're using one of the compressed digests, which we can't reproduce anyway. - layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(info.Digest) - // If it's not a layer, then it must be a data item. - if len(layers) == 0 { - b, err := s.imageRef.transport.store.ImageBigData(s.ID, info.Digest.String()) - if err != nil { - return nil, -1, "", err - } - r := bytes.NewReader(b) - logrus.Debugf("exporting opaque data as blob %q", info.Digest.String()) - return ioutil.NopCloser(r), int64(r.Len()), "", nil - } - // Step through the list of matching layers. Tests may want to verify that if we have multiple layers - // which claim to have the same contents, that we actually do have multiple layers, otherwise we could - // just go ahead and use the first one every time. - i := s.layerPosition[info.Digest] - s.layerPosition[info.Digest] = i + 1 - if len(layers) > 0 { - layer = layers[i%len(layers)] - } - // Force the storage layer to not try to match any compression that was used when the layer was first - // handed to it. - noCompression := archive.Uncompressed - diffOptions = &storage.DiffOptions{ - Compression: &noCompression, - } - if layer.UncompressedSize < 0 { - n = -1 - } else { - n = layer.UncompressedSize - } - logrus.Debugf("exporting filesystem layer %q without compression for blob %q", layer.ID, info.Digest) - rc, err = s.imageRef.transport.store.Diff("", layer.ID, diffOptions) - if err != nil { - return nil, -1, "", err - } - return rc, n, layer.ID, err -} - -// GetManifest() reads the image's manifest. -func (s *storageImageSource) GetManifest(instanceDigest *digest.Digest) (manifestBlob []byte, MIMEType string, err error) { - if instanceDigest != nil { - return nil, "", ErrNoManifestLists - } - if len(s.cachedManifest) == 0 { - // We stored the manifest as an item named after storage.ImageDigestBigDataKey. - cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.ID, storage.ImageDigestBigDataKey) - if err != nil { - return nil, "", err - } - s.cachedManifest = cachedBlob - } - return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err -} - -// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of -// the image, after they've been decompressed. -func (s *storageImageSource) LayerInfosForCopy() []types.BlobInfo { - simg, err := s.imageRef.transport.store.Image(s.ID) - if err != nil { - logrus.Errorf("error reading image %q: %v", s.ID, err) - return nil - } - updatedBlobInfos := []types.BlobInfo{} - layerID := simg.TopLayer - _, manifestType, err := s.GetManifest(nil) - if err != nil { - logrus.Errorf("error reading image manifest for %q: %v", s.ID, err) - return nil - } - uncompressedLayerType := "" - switch manifestType { - case imgspecv1.MediaTypeImageManifest: - uncompressedLayerType = imgspecv1.MediaTypeImageLayer - case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType: - // This is actually a compressed type, but there's no uncompressed type defined - uncompressedLayerType = manifest.DockerV2Schema2LayerMediaType - } - for layerID != "" { - layer, err := s.imageRef.transport.store.Layer(layerID) - if err != nil { - logrus.Errorf("error reading layer %q in image %q: %v", layerID, s.ID, err) - return nil - } - if layer.UncompressedDigest == "" { - logrus.Errorf("uncompressed digest for layer %q is unknown", layerID) - return nil - } - if layer.UncompressedSize < 0 { - logrus.Errorf("uncompressed size for layer %q is unknown", layerID) - return nil - } - blobInfo := types.BlobInfo{ - Digest: layer.UncompressedDigest, - Size: layer.UncompressedSize, - MediaType: uncompressedLayerType, - } - updatedBlobInfos = append([]types.BlobInfo{blobInfo}, updatedBlobInfos...) - layerID = layer.Parent - } - return updatedBlobInfos -} - -// GetSignatures() parses the image's signatures blob into a slice of byte slices. -func (s *storageImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) (signatures [][]byte, err error) { - if instanceDigest != nil { - return nil, ErrNoManifestLists - } - var offset int - sigslice := [][]byte{} - signature := []byte{} - if len(s.SignatureSizes) > 0 { - signatureBlob, err := s.imageRef.transport.store.ImageBigData(s.ID, "signatures") - if err != nil { - return nil, errors.Wrapf(err, "error looking up signatures data for image %q", s.ID) - } - signature = signatureBlob - } - for _, length := range s.SignatureSizes { - sigslice = append(sigslice, signature[offset:offset+length]) - offset += length - } - if offset != len(signature) { - return nil, errors.Errorf("signatures data contained %d extra bytes", len(signatures)-offset) - } - return sigslice, nil -} - -// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until -// it's time to Commit() the image -func newImageDestination(ctx *types.SystemContext, imageRef storageReference) (*storageImageDestination, error) { - directory, err := ioutil.TempDir(temporaryDirectoryForBigFiles, "storage") - if err != nil { - return nil, errors.Wrapf(err, "error creating a temporary directory") - } - // Break reading of the reference we're writing, so that copy.Image() won't try to rewrite - // schema1 image manifests to remove embedded references, since that changes the manifest's - // digest, and that makes the image unusable if we subsequently try to access it using a - // reference that mentions the no-longer-correct digest. - publicRef := imageRef - publicRef.name = nil - image := &storageImageDestination{ - systemContext: ctx, - imageRef: imageRef, - publicRef: publicRef, - directory: directory, - blobDiffIDs: make(map[digest.Digest]digest.Digest), - fileSizes: make(map[digest.Digest]int64), - filenames: make(map[digest.Digest]string), - SignatureSizes: []int{}, - } - return image, nil -} - -// Reference returns a mostly-usable image reference that can't return a DockerReference, to -// avoid triggering logic in copy.Image() that rewrites schema 1 image manifests in order to -// remove image names that they contain which don't match the value we're using. -func (s storageImageDestination) Reference() types.ImageReference { - return s.publicRef -} - -// Close cleans up the temporary directory. -func (s *storageImageDestination) Close() error { - return os.RemoveAll(s.directory) -} - -// ShouldCompressLayers indicates whether or not a caller should compress not-already-compressed -// data when handing it to us. -func (s storageImageDestination) ShouldCompressLayers() bool { - // We ultimately have to decompress layers to populate trees on disk, so callers shouldn't - // bother compressing them before handing them to us, if they're not already compressed. - return false -} - -// PutBlob stores a layer or data blob in our temporary directory, checking that any information -// in the blobinfo matches the incoming data. -func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobInfo) (types.BlobInfo, error) { - errorBlobInfo := types.BlobInfo{ - Digest: "", - Size: -1, - } - // Set up to digest the blob and count its size while saving it to a file. - hasher := digest.Canonical.Digester() - if blobinfo.Digest.Validate() == nil { - if a := blobinfo.Digest.Algorithm(); a.Available() { - hasher = a.Digester() - } - } - diffID := digest.Canonical.Digester() - filename := filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1))) - file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600) - if err != nil { - return errorBlobInfo, errors.Wrapf(err, "error creating temporary file %q", filename) - } - defer file.Close() - counter := ioutils.NewWriteCounter(hasher.Hash()) - reader := io.TeeReader(io.TeeReader(stream, counter), file) - decompressed, err := archive.DecompressStream(reader) - if err != nil { - return errorBlobInfo, errors.Wrap(err, "error setting up to decompress blob") - } - // Copy the data to the file. - _, err = io.Copy(diffID.Hash(), decompressed) - decompressed.Close() - if err != nil { - return errorBlobInfo, errors.Wrapf(err, "error storing blob to file %q", filename) - } - // Ensure that any information that we were given about the blob is correct. - if blobinfo.Digest.Validate() == nil && blobinfo.Digest != hasher.Digest() { - return errorBlobInfo, ErrBlobDigestMismatch - } - if blobinfo.Size >= 0 && blobinfo.Size != counter.Count { - return errorBlobInfo, ErrBlobSizeMismatch - } - // Record information about the blob. - s.blobDiffIDs[hasher.Digest()] = diffID.Digest() - s.fileSizes[hasher.Digest()] = counter.Count - s.filenames[hasher.Digest()] = filename - blobDigest := blobinfo.Digest - if blobDigest.Validate() != nil { - blobDigest = hasher.Digest() - } - blobSize := blobinfo.Size - if blobSize < 0 { - blobSize = counter.Count - } - return types.BlobInfo{ - Digest: blobDigest, - Size: blobSize, - MediaType: blobinfo.MediaType, - }, nil -} - -// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be -// reapplied using ReapplyBlob. -// -// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned. -// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); -// it returns a non-nil error only on an unexpected failure. -func (s *storageImageDestination) HasBlob(blobinfo types.BlobInfo) (bool, int64, error) { - if blobinfo.Digest == "" { - return false, -1, errors.Errorf(`Can not check for a blob with unknown digest`) - } - if err := blobinfo.Digest.Validate(); err != nil { - return false, -1, errors.Wrapf(err, `Can not check for a blob with invalid digest`) - } - // Check if we've already cached it in a file. - if size, ok := s.fileSizes[blobinfo.Digest]; ok { - return true, size, nil - } - // Check if we have a wasn't-compressed layer in storage that's based on that blob. - layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest) - if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { - return false, -1, errors.Wrapf(err, `Error looking for layers with digest %q`, blobinfo.Digest) - } - if len(layers) > 0 { - // Save this for completeness. - s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest - return true, layers[0].UncompressedSize, nil - } - // Check if we have a was-compressed layer in storage that's based on that blob. - layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest) - if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { - return false, -1, errors.Wrapf(err, `Error looking for compressed layers with digest %q`, blobinfo.Digest) - } - if len(layers) > 0 { - // Record the uncompressed value so that we can use it to calculate layer IDs. - s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest - return true, layers[0].CompressedSize, nil - } - // Nope, we don't have it. - return false, -1, nil -} - -// ReapplyBlob is now a no-op, assuming HasBlob() says we already have it, since Commit() can just apply the -// same one when it walks the list in the manifest. -func (s *storageImageDestination) ReapplyBlob(blobinfo types.BlobInfo) (types.BlobInfo, error) { - present, size, err := s.HasBlob(blobinfo) - if !present { - return types.BlobInfo{}, errors.Errorf("error reapplying blob %+v: blob was not previously applied", blobinfo) - } - if err != nil { - return types.BlobInfo{}, errors.Wrapf(err, "error reapplying blob %+v", blobinfo) - } - blobinfo.Size = size - return blobinfo, nil -} - -// computeID computes a recommended image ID based on information we have so far. If -// the manifest is not of a type that we recognize, we return an empty value, indicating -// that since we don't have a recommendation, a random ID should be used if one needs -// to be allocated. -func (s *storageImageDestination) computeID(m manifest.Manifest) string { - // Build the diffID list. We need the decompressed sums that we've been calculating to - // fill in the DiffIDs. It's expected (but not enforced by us) that the number of - // diffIDs corresponds to the number of non-EmptyLayer entries in the history. - var diffIDs []digest.Digest - switch m.(type) { - case *manifest.Schema1: - // Build a list of the diffIDs we've generated for the non-throwaway FS layers, - // in reverse of the order in which they were originally listed. - s1, ok := m.(*manifest.Schema1) - if !ok { - // Shouldn't happen - logrus.Debugf("internal error reading schema 1 manifest") - return "" - } - for i, history := range s1.History { - compat := manifest.Schema1V1Compatibility{} - if err := json.Unmarshal([]byte(history.V1Compatibility), &compat); err != nil { - logrus.Debugf("internal error reading schema 1 history: %v", err) - return "" - } - if compat.ThrowAway { - continue - } - blobSum := s1.FSLayers[i].BlobSum - diffID, ok := s.blobDiffIDs[blobSum] - if !ok { - logrus.Infof("error looking up diffID for layer %q", blobSum.String()) - return "" - } - diffIDs = append([]digest.Digest{diffID}, diffIDs...) - } - case *manifest.Schema2, *manifest.OCI1: - // We know the ID calculation for these formats doesn't actually use the diffIDs, - // so we don't need to populate the diffID list. - } - id, err := m.ImageID(diffIDs) - if err != nil { - return "" - } - return id -} - -// getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig -// information out of it for Inspect(). -func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, error) { - if info.Digest == "" { - return nil, errors.Errorf(`no digest supplied when reading blob`) - } - if err := info.Digest.Validate(); err != nil { - return nil, errors.Wrapf(err, `invalid digest supplied when reading blob`) - } - // Assume it's a file, since we're only calling this from a place that expects to read files. - if filename, ok := s.filenames[info.Digest]; ok { - contents, err2 := ioutil.ReadFile(filename) - if err2 != nil { - return nil, errors.Wrapf(err2, `error reading blob from file %q`, filename) - } - return contents, nil - } - // If it's not a file, it's a bug, because we're not expecting to be asked for a layer. - return nil, errors.New("blob not found") -} - -func (s *storageImageDestination) Commit() error { - // Find the list of layer blobs. - if len(s.manifest) == 0 { - return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()") - } - man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest)) - if err != nil { - return errors.Wrapf(err, "error parsing manifest") - } - layerBlobs := man.LayerInfos() - // Extract or find the layers. - lastLayer := "" - addedLayers := []string{} - for _, blob := range layerBlobs { - var diff io.ReadCloser - // Check if there's already a layer with the ID that we'd give to the result of applying - // this layer blob to its parent, if it has one, or the blob's hex value otherwise. - diffID, haveDiffID := s.blobDiffIDs[blob.Digest] - if !haveDiffID { - // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(), - // or to even check if we had it. - logrus.Debugf("looking for diffID for blob %+v", blob.Digest) - has, _, err := s.HasBlob(blob) - if err != nil { - return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String()) - } - if !has { - return errors.Errorf("error determining uncompressed digest for blob %q", blob.Digest.String()) - } - diffID, haveDiffID = s.blobDiffIDs[blob.Digest] - if !haveDiffID { - return errors.Errorf("we have blob %q, but don't know its uncompressed digest", blob.Digest.String()) - } - } - id := diffID.Hex() - if lastLayer != "" { - id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffID.Hex())).Hex() - } - if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil { - // There's already a layer that should have the right contents, just reuse it. - lastLayer = layer.ID - continue - } - // Check if we cached a file with that blobsum. If we didn't already have a layer with - // the blob's contents, we should have gotten a copy. - if filename, ok := s.filenames[blob.Digest]; ok { - // Use the file's contents to initialize the layer. - file, err2 := os.Open(filename) - if err2 != nil { - return errors.Wrapf(err2, "error opening file %q", filename) - } - defer file.Close() - diff = file - } - if diff == nil { - // Try to find a layer with contents matching that blobsum. - layer := "" - layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(blob.Digest) - if err2 == nil && len(layers) > 0 { - layer = layers[0].ID - } else { - layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(blob.Digest) - if err2 == nil && len(layers) > 0 { - layer = layers[0].ID - } - } - if layer == "" { - return errors.Wrapf(err2, "error locating layer for blob %q", blob.Digest) - } - // Use the layer's contents to initialize the new layer. - noCompression := archive.Uncompressed - diffOptions := &storage.DiffOptions{ - Compression: &noCompression, - } - diff, err2 = s.imageRef.transport.store.Diff("", layer, diffOptions) - if err2 != nil { - return errors.Wrapf(err2, "error reading layer %q for blob %q", layer, blob.Digest) - } - defer diff.Close() - } - if diff == nil { - // This shouldn't have happened. - return errors.Errorf("error applying blob %q: content not found", blob.Digest) - } - // Build the new layer using the diff, regardless of where it came from. - layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, diff) - if err != nil { - return errors.Wrapf(err, "error adding layer with blob %q", blob.Digest) - } - lastLayer = layer.ID - addedLayers = append([]string{lastLayer}, addedLayers...) - } - // If one of those blobs was a configuration blob, then we can try to dig out the date when the image - // was originally created, in case we're just copying it. If not, no harm done. - options := &storage.ImageOptions{} - if inspect, err := man.Inspect(s.getConfigBlob); err == nil { - logrus.Debugf("setting image creation date to %s", inspect.Created) - options.CreationDate = inspect.Created - } - if manifestDigest, err := manifest.Digest(s.manifest); err == nil { - options.Digest = manifestDigest - } - // Create the image record, pointing to the most-recently added layer. - intendedID := s.imageRef.id - if intendedID == "" { - intendedID = s.computeID(man) - } - oldNames := []string{} - img, err := s.imageRef.transport.store.CreateImage(intendedID, nil, lastLayer, "", options) - if err != nil { - if errors.Cause(err) != storage.ErrDuplicateID { - logrus.Debugf("error creating image: %q", err) - return errors.Wrapf(err, "error creating image %q", intendedID) - } - img, err = s.imageRef.transport.store.Image(intendedID) - if err != nil { - return errors.Wrapf(err, "error reading image %q", intendedID) - } - if img.TopLayer != lastLayer { - logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", intendedID) - return errors.Wrapf(storage.ErrDuplicateID, "image with ID %q already exists, but uses a different top layer", intendedID) - } - logrus.Debugf("reusing image ID %q", img.ID) - oldNames = append(oldNames, img.Names...) - } else { - logrus.Debugf("created new image ID %q", img.ID) - } - // Add the non-layer blobs as data items. Since we only share layers, they should all be in files, so - // we just need to screen out the ones that are actually layers to get the list of non-layers. - dataBlobs := make(map[digest.Digest]struct{}) - for blob := range s.filenames { - dataBlobs[blob] = struct{}{} - } - for _, layerBlob := range layerBlobs { - delete(dataBlobs, layerBlob.Digest) - } - for blob := range dataBlobs { - v, err := ioutil.ReadFile(s.filenames[blob]) - if err != nil { - return errors.Wrapf(err, "error copying non-layer blob %q to image", blob) - } - if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v); err != nil { - if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { - logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) - } - logrus.Debugf("error saving big data %q for image %q: %v", blob.String(), img.ID, err) - return errors.Wrapf(err, "error saving big data %q for image %q", blob.String(), img.ID) - } - } - // Set the reference's name on the image. - if name := s.imageRef.DockerReference(); len(oldNames) > 0 || name != nil { - names := []string{} - if name != nil { - names = append(names, verboseName(name)) - } - if len(oldNames) > 0 { - names = append(names, oldNames...) - } - if err := s.imageRef.transport.store.SetNames(img.ID, names); err != nil { - if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { - logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) - } - logrus.Debugf("error setting names %v on image %q: %v", names, img.ID, err) - return errors.Wrapf(err, "error setting names %v on image %q", names, img.ID) - } - logrus.Debugf("set names of image %q to %v", img.ID, names) - } - // Save the manifest. Use storage.ImageDigestBigDataKey as the item's - // name, so that its digest can be used to locate the image in the Store. - if err := s.imageRef.transport.store.SetImageBigData(img.ID, storage.ImageDigestBigDataKey, s.manifest); err != nil { - if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { - logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) - } - logrus.Debugf("error saving manifest for image %q: %v", img.ID, err) - return err - } - // Save the signatures, if we have any. - if len(s.signatures) > 0 { - if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures); err != nil { - if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { - logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) - } - logrus.Debugf("error saving signatures for image %q: %v", img.ID, err) - return err - } - } - // Save our metadata. - metadata, err := json.Marshal(s) - if err != nil { - if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { - logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) - } - logrus.Debugf("error encoding metadata for image %q: %v", img.ID, err) - return err - } - if len(metadata) != 0 { - if err = s.imageRef.transport.store.SetMetadata(img.ID, string(metadata)); err != nil { - if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { - logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) - } - logrus.Debugf("error saving metadata for image %q: %v", img.ID, err) - return err - } - logrus.Debugf("saved image metadata %q", string(metadata)) - } - return nil -} - -var manifestMIMETypes = []string{ - imgspecv1.MediaTypeImageManifest, - manifest.DockerV2Schema2MediaType, - manifest.DockerV2Schema1SignedMediaType, - manifest.DockerV2Schema1MediaType, -} - -func (s *storageImageDestination) SupportedManifestMIMETypes() []string { - return manifestMIMETypes -} - -// PutManifest writes the manifest to the destination. -func (s *storageImageDestination) PutManifest(manifest []byte) error { - s.manifest = make([]byte, len(manifest)) - copy(s.manifest, manifest) - return nil -} - -// SupportsSignatures returns an error if we can't expect GetSignatures() to return data that was -// previously supplied to PutSignatures(). -func (s *storageImageDestination) SupportsSignatures() error { - return nil -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in the manifest should actually be -// uploaded to the image destination, true otherwise. -func (s *storageImageDestination) AcceptsForeignLayerURLs() bool { - return false -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. -func (s *storageImageDestination) MustMatchRuntimeOS() bool { - return true -} - -// PutSignatures records the image's signatures for committing as a single data blob. -func (s *storageImageDestination) PutSignatures(signatures [][]byte) error { - sizes := []int{} - sigblob := []byte{} - for _, sig := range signatures { - sizes = append(sizes, len(sig)) - newblob := make([]byte, len(sigblob)+len(sig)) - copy(newblob, sigblob) - copy(newblob[len(sigblob):], sig) - sigblob = newblob - } - s.signatures = sigblob - s.SignatureSizes = sizes - return nil -} - -// getSize() adds up the sizes of the image's data blobs (which includes the configuration blob), the -// signatures, and the uncompressed sizes of all of the image's layers. -func (s *storageImageSource) getSize() (int64, error) { - var sum int64 - // Size up the data blobs. - dataNames, err := s.imageRef.transport.store.ListImageBigData(s.ID) - if err != nil { - return -1, errors.Wrapf(err, "error reading image %q", s.ID) - } - for _, dataName := range dataNames { - bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.ID, dataName) - if err != nil { - return -1, errors.Wrapf(err, "error reading data blob size %q for %q", dataName, s.ID) - } - sum += bigSize - } - // Add the signature sizes. - for _, sigSize := range s.SignatureSizes { - sum += int64(sigSize) - } - // Prepare to walk the layer list. - img, err := s.imageRef.transport.store.Image(s.ID) - if err != nil { - return -1, errors.Wrapf(err, "error reading image info %q", s.ID) - } - // Walk the layer list. - layerID := img.TopLayer - for layerID != "" { - layer, err := s.imageRef.transport.store.Layer(layerID) - if err != nil { - return -1, err - } - if layer.UncompressedDigest == "" || layer.UncompressedSize < 0 { - return -1, errors.Errorf("size for layer %q is unknown, failing getSize()", layerID) - } - sum += layer.UncompressedSize - if layer.Parent == "" { - break - } - layerID = layer.Parent - } - return sum, nil -} - -// Size() adds up the sizes of the image's data blobs (which includes the configuration blob), the -// signatures, and the uncompressed sizes of all of the image's layers. -func (s *storageImageSource) Size() (int64, error) { - return s.getSize() -} - -// Size() returns the previously-computed size of the image, with no error. -func (s *storageImageCloser) Size() (int64, error) { - return s.size, nil -} - -// newImage creates an image that also knows its size -func newImage(ctx *types.SystemContext, s storageReference) (types.ImageCloser, error) { - src, err := newImageSource(s) - if err != nil { - return nil, err - } - img, err := image.FromSource(ctx, src) - if err != nil { - return nil, err - } - size, err := src.getSize() - if err != nil { - return nil, err - } - return &storageImageCloser{ImageCloser: img, size: size}, nil -} diff --git a/vendor/github.com/containers/image/storage/storage_reference.go b/vendor/github.com/containers/image/storage/storage_reference.go deleted file mode 100644 index bcb00f60eb..0000000000 --- a/vendor/github.com/containers/image/storage/storage_reference.go +++ /dev/null @@ -1,209 +0,0 @@ -// +build !containers_image_storage_stub - -package storage - -import ( - "strings" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/types" - "github.com/containers/storage" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -// A storageReference holds an arbitrary name and/or an ID, which is a 32-byte -// value hex-encoded into a 64-character string, and a reference to a Store -// where an image is, or would be, kept. -type storageReference struct { - transport storageTransport - reference string - id string - name reference.Named - tag string - digest digest.Digest -} - -func newReference(transport storageTransport, reference, id string, name reference.Named, tag string, digest digest.Digest) *storageReference { - // We take a copy of the transport, which contains a pointer to the - // store that it used for resolving this reference, so that the - // transport that we'll return from Transport() won't be affected by - // further calls to the original transport's SetStore() method. - return &storageReference{ - transport: transport, - reference: reference, - id: id, - name: name, - tag: tag, - digest: digest, - } -} - -// Resolve the reference's name to an image ID in the store, if there's already -// one present with the same name or ID, and return the image. -func (s *storageReference) resolveImage() (*storage.Image, error) { - if s.id == "" { - // Look for an image that has the expanded reference name as an explicit Name value. - image, err := s.transport.store.Image(s.reference) - if image != nil && err == nil { - s.id = image.ID - } - } - if s.id == "" && s.name != nil && s.digest != "" { - // Look for an image with the specified digest that has the same name, - // though possibly with a different tag or digest, as a Name value, so - // that the canonical reference can be implicitly resolved to the image. - images, err := s.transport.store.ImagesByDigest(s.digest) - if images != nil && err == nil { - repo := reference.FamiliarName(reference.TrimNamed(s.name)) - search: - for _, image := range images { - for _, name := range image.Names { - if named, err := reference.ParseNormalizedNamed(name); err == nil { - if reference.FamiliarName(reference.TrimNamed(named)) == repo { - s.id = image.ID - break search - } - } - } - } - } - } - if s.id == "" { - logrus.Debugf("reference %q does not resolve to an image ID", s.StringWithinTransport()) - return nil, errors.Wrapf(ErrNoSuchImage, "reference %q does not resolve to an image ID", s.StringWithinTransport()) - } - img, err := s.transport.store.Image(s.id) - if err != nil { - return nil, errors.Wrapf(err, "error reading image %q", s.id) - } - if s.name != nil { - repo := reference.FamiliarName(reference.TrimNamed(s.name)) - nameMatch := false - for _, name := range img.Names { - if named, err := reference.ParseNormalizedNamed(name); err == nil { - if reference.FamiliarName(reference.TrimNamed(named)) == repo { - nameMatch = true - break - } - } - } - if !nameMatch { - logrus.Errorf("no image matching reference %q found", s.StringWithinTransport()) - return nil, ErrNoSuchImage - } - } - return img, nil -} - -// Return a Transport object that defaults to using the same store that we used -// to build this reference object. -func (s storageReference) Transport() types.ImageTransport { - return &storageTransport{ - store: s.transport.store, - defaultUIDMap: s.transport.defaultUIDMap, - defaultGIDMap: s.transport.defaultGIDMap, - } -} - -// Return a name with a tag or digest, if we have either, else return it bare. -func (s storageReference) DockerReference() reference.Named { - if s.name == nil { - return nil - } - if s.tag != "" { - if namedTagged, err := reference.WithTag(s.name, s.tag); err == nil { - return namedTagged - } - } - if s.digest != "" { - if canonical, err := reference.WithDigest(s.name, s.digest); err == nil { - return canonical - } - } - return s.name -} - -// Return a name with a tag, prefixed with the graph root and driver name, to -// disambiguate between images which may be present in multiple stores and -// share only their names. -func (s storageReference) StringWithinTransport() string { - optionsList := "" - options := s.transport.store.GraphOptions() - if len(options) > 0 { - optionsList = ":" + strings.Join(options, ",") - } - storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]" - if s.reference == "" { - return storeSpec + "@" + s.id - } - if s.id == "" { - return storeSpec + s.reference - } - return storeSpec + s.reference + "@" + s.id -} - -func (s storageReference) PolicyConfigurationIdentity() string { - storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]" - if s.name == nil { - return storeSpec + "@" + s.id - } - if s.id == "" { - return storeSpec + s.reference - } - return storeSpec + s.reference + "@" + s.id -} - -// Also accept policy that's tied to the combination of the graph root and -// driver name, to apply to all images stored in the Store, and to just the -// graph root, in case we're using multiple drivers in the same directory for -// some reason. -func (s storageReference) PolicyConfigurationNamespaces() []string { - storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "]" - driverlessStoreSpec := "[" + s.transport.store.GraphRoot() + "]" - namespaces := []string{} - if s.name != nil { - name := reference.TrimNamed(s.name) - components := strings.Split(name.String(), "/") - for len(components) > 0 { - namespaces = append(namespaces, storeSpec+strings.Join(components, "/")) - components = components[:len(components)-1] - } - } - namespaces = append(namespaces, storeSpec) - namespaces = append(namespaces, driverlessStoreSpec) - return namespaces -} - -// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned ImageCloser. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. -func (s storageReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { - return newImage(ctx, s) -} - -func (s storageReference) DeleteImage(ctx *types.SystemContext) error { - img, err := s.resolveImage() - if err != nil { - return err - } - layers, err := s.transport.store.DeleteImage(img.ID, true) - if err == nil { - logrus.Debugf("deleted image %q", img.ID) - for _, layer := range layers { - logrus.Debugf("deleted layer %q", layer) - } - } - return err -} - -func (s storageReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) { - return newImageSource(s) -} - -func (s storageReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(ctx, s) -} diff --git a/vendor/github.com/containers/image/storage/storage_transport.go b/vendor/github.com/containers/image/storage/storage_transport.go deleted file mode 100644 index f6ebcdc4a3..0000000000 --- a/vendor/github.com/containers/image/storage/storage_transport.go +++ /dev/null @@ -1,450 +0,0 @@ -// +build !containers_image_storage_stub - -package storage - -import ( - "path/filepath" - "strings" - - "github.com/pkg/errors" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/containers/storage" - "github.com/containers/storage/pkg/idtools" - digest "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" -) - -const ( - minimumTruncatedIDLength = 3 -) - -func init() { - transports.Register(Transport) -} - -var ( - // Transport is an ImageTransport that uses either a default - // storage.Store or one that's it's explicitly told to use. - Transport StoreTransport = &storageTransport{} - // ErrInvalidReference is returned when ParseReference() is passed an - // empty reference. - ErrInvalidReference = errors.New("invalid reference") - // ErrPathNotAbsolute is returned when a graph root is not an absolute - // path name. - ErrPathNotAbsolute = errors.New("path name is not absolute") -) - -// StoreTransport is an ImageTransport that uses a storage.Store to parse -// references, either its own default or one that it's told to use. -type StoreTransport interface { - types.ImageTransport - // SetStore sets the default store for this transport. - SetStore(storage.Store) - // GetImage retrieves the image from the transport's store that's named - // by the reference. - GetImage(types.ImageReference) (*storage.Image, error) - // GetStoreImage retrieves the image from a specified store that's named - // by the reference. - GetStoreImage(storage.Store, types.ImageReference) (*storage.Image, error) - // ParseStoreReference parses a reference, overriding any store - // specification that it may contain. - ParseStoreReference(store storage.Store, reference string) (*storageReference, error) - // SetDefaultUIDMap sets the default UID map to use when opening stores. - SetDefaultUIDMap(idmap []idtools.IDMap) - // SetDefaultGIDMap sets the default GID map to use when opening stores. - SetDefaultGIDMap(idmap []idtools.IDMap) - // DefaultUIDMap returns the default UID map used when opening stores. - DefaultUIDMap() []idtools.IDMap - // DefaultGIDMap returns the default GID map used when opening stores. - DefaultGIDMap() []idtools.IDMap -} - -type storageTransport struct { - store storage.Store - defaultUIDMap []idtools.IDMap - defaultGIDMap []idtools.IDMap -} - -func (s *storageTransport) Name() string { - // Still haven't really settled on a name. - return "containers-storage" -} - -// SetStore sets the Store object which the Transport will use for parsing -// references when information about a Store is not directly specified as part -// of the reference. If one is not set, the library will attempt to initialize -// one with default settings when a reference needs to be parsed. Calling -// SetStore does not affect previously parsed references. -func (s *storageTransport) SetStore(store storage.Store) { - s.store = store -} - -// SetDefaultUIDMap sets the default UID map to use when opening stores. -func (s *storageTransport) SetDefaultUIDMap(idmap []idtools.IDMap) { - s.defaultUIDMap = idmap -} - -// SetDefaultGIDMap sets the default GID map to use when opening stores. -func (s *storageTransport) SetDefaultGIDMap(idmap []idtools.IDMap) { - s.defaultGIDMap = idmap -} - -// DefaultUIDMap returns the default UID map used when opening stores. -func (s *storageTransport) DefaultUIDMap() []idtools.IDMap { - return s.defaultUIDMap -} - -// DefaultGIDMap returns the default GID map used when opening stores. -func (s *storageTransport) DefaultGIDMap() []idtools.IDMap { - return s.defaultGIDMap -} - -// ParseStoreReference takes a name or an ID, tries to figure out which it is -// relative to the given store, and returns it in a reference object. -func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) { - var name reference.Named - if ref == "" { - return nil, errors.Wrapf(ErrInvalidReference, "%q is an empty reference") - } - if ref[0] == '[' { - // Ignore the store specifier. - closeIndex := strings.IndexRune(ref, ']') - if closeIndex < 1 { - return nil, errors.Wrapf(ErrInvalidReference, "store specifier in %q did not end", ref) - } - ref = ref[closeIndex+1:] - } - - // The last segment, if there's more than one, is either a digest from a reference, or an image ID. - split := strings.LastIndex(ref, "@") - idOrDigest := "" - if split != -1 { - // Peel off that last bit so that we can work on the rest. - idOrDigest = ref[split+1:] - if idOrDigest == "" { - return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like a digest or image ID", idOrDigest) - } - ref = ref[:split] - } - - // The middle segment (now the last segment), if there is one, is a digest. - split = strings.LastIndex(ref, "@") - sum := digest.Digest("") - if split != -1 { - sum = digest.Digest(ref[split+1:]) - if sum == "" { - return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image digest", sum) - } - ref = ref[:split] - } - - // If we have something that unambiguously should be a digest, validate it, and then the third part, - // if we have one, as an ID. - id := "" - if sum != "" { - if idSum, err := digest.Parse("sha256:" + idOrDigest); err != nil || idSum.Validate() != nil { - return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image ID", idOrDigest) - } - if err := sum.Validate(); err != nil { - return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image digest", sum) - } - id = idOrDigest - if img, err := store.Image(idOrDigest); err == nil && img != nil && len(idOrDigest) >= minimumTruncatedIDLength && strings.HasPrefix(img.ID, idOrDigest) { - // The ID is a truncated version of the ID of an image that's present in local storage, - // so we might as well use the expanded value. - id = img.ID - } - } else if idOrDigest != "" { - // There was no middle portion, so the final portion could be either a digest or an ID. - if idSum, err := digest.Parse("sha256:" + idOrDigest); err == nil && idSum.Validate() == nil { - // It's an ID. - id = idOrDigest - } else if idSum, err := digest.Parse(idOrDigest); err == nil && idSum.Validate() == nil { - // It's a digest. - sum = idSum - } else if img, err := store.Image(idOrDigest); err == nil && img != nil && len(idOrDigest) >= minimumTruncatedIDLength && strings.HasPrefix(img.ID, idOrDigest) { - // It's a truncated version of the ID of an image that's present in local storage, - // and we may need the expanded value. - id = img.ID - } else { - return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like a digest or image ID", idOrDigest) - } - } - - // If we only had one portion, then _maybe_ it's a truncated image ID. Only check on that if it's - // at least of what we guess is a reasonable minimum length, because we don't want a really short value - // like "a" matching an image by ID prefix when the input was actually meant to specify an image name. - if len(ref) >= minimumTruncatedIDLength && sum == "" && id == "" { - if img, err := store.Image(ref); err == nil && img != nil && strings.HasPrefix(img.ID, ref) { - // It's a truncated version of the ID of an image that's present in local storage; - // we need to expand it. - id = img.ID - ref = "" - } - } - - // The initial portion is probably a name, possibly with a tag. - if ref != "" { - var err error - if name, err = reference.ParseNormalizedNamed(ref); err != nil { - return nil, errors.Wrapf(err, "error parsing named reference %q", ref) - } - } - if name == nil && sum == "" && id == "" { - return nil, errors.Errorf("error parsing reference") - } - - // Construct a copy of the store spec. - optionsList := "" - options := store.GraphOptions() - if len(options) > 0 { - optionsList = ":" + strings.Join(options, ",") - } - storeSpec := "[" + store.GraphDriverName() + "@" + store.GraphRoot() + "+" + store.RunRoot() + optionsList + "]" - - // Convert the name back into a reference string, if we got a name. - refname := "" - tag := "" - if name != nil { - if sum.Validate() == nil { - canonical, err := reference.WithDigest(name, sum) - if err != nil { - return nil, errors.Wrapf(err, "error mixing name %q with digest %q", name, sum) - } - refname = verboseName(canonical) - } else { - name = reference.TagNameOnly(name) - tagged, ok := name.(reference.Tagged) - if !ok { - return nil, errors.Errorf("error parsing possibly-tagless name %q", ref) - } - refname = verboseName(name) - tag = tagged.Tag() - } - } - if refname == "" { - logrus.Debugf("parsed reference to id into %q", storeSpec+"@"+id) - } else if id == "" { - logrus.Debugf("parsed reference to refname into %q", storeSpec+refname) - } else { - logrus.Debugf("parsed reference to refname@id into %q", storeSpec+refname+"@"+id) - } - return newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, refname, id, name, tag, sum), nil -} - -func (s *storageTransport) GetStore() (storage.Store, error) { - // Return the transport's previously-set store. If we don't have one - // of those, initialize one now. - if s.store == nil { - options := storage.DefaultStoreOptions - options.UIDMap = s.defaultUIDMap - options.GIDMap = s.defaultGIDMap - store, err := storage.GetStore(options) - if err != nil { - return nil, err - } - s.store = store - } - return s.store, nil -} - -// ParseReference takes a name and a tag or digest and/or ID -// ("_name_"/"@_id_"/"_name_:_tag_"/"_name_:_tag_@_id_"/"_name_@_digest_"/"_name_@_digest_@_id_"), -// possibly prefixed with a store specifier in the form "[_graphroot_]" or -// "[_driver_@_graphroot_]" or "[_driver_@_graphroot_+_runroot_]" or -// "[_driver_@_graphroot_:_options_]" or "[_driver_@_graphroot_+_runroot_:_options_]", -// tries to figure out which it is, and returns it in a reference object. -// If _id_ is the ID of an image that's present in local storage, it can be truncated, and -// even be specified as if it were a _name_, value. -func (s *storageTransport) ParseReference(reference string) (types.ImageReference, error) { - var store storage.Store - // Check if there's a store location prefix. If there is, then it - // needs to match a store that was previously initialized using - // storage.GetStore(), or be enough to let the storage library fill out - // the rest using knowledge that it has from elsewhere. - if reference[0] == '[' { - closeIndex := strings.IndexRune(reference, ']') - if closeIndex < 1 { - return nil, ErrInvalidReference - } - storeSpec := reference[1:closeIndex] - reference = reference[closeIndex+1:] - // Peel off a "driver@" from the start. - driverInfo := "" - driverSplit := strings.SplitN(storeSpec, "@", 2) - if len(driverSplit) != 2 { - if storeSpec == "" { - return nil, ErrInvalidReference - } - } else { - driverInfo = driverSplit[0] - if driverInfo == "" { - return nil, ErrInvalidReference - } - storeSpec = driverSplit[1] - if storeSpec == "" { - return nil, ErrInvalidReference - } - } - // Peel off a ":options" from the end. - var options []string - optionsSplit := strings.SplitN(storeSpec, ":", 2) - if len(optionsSplit) == 2 { - options = strings.Split(optionsSplit[1], ",") - storeSpec = optionsSplit[0] - } - // Peel off a "+runroot" from the new end. - runRootInfo := "" - runRootSplit := strings.SplitN(storeSpec, "+", 2) - if len(runRootSplit) == 2 { - runRootInfo = runRootSplit[1] - storeSpec = runRootSplit[0] - } - // The rest is our graph root. - rootInfo := storeSpec - // Check that any paths are absolute paths. - if rootInfo != "" && !filepath.IsAbs(rootInfo) { - return nil, ErrPathNotAbsolute - } - if runRootInfo != "" && !filepath.IsAbs(runRootInfo) { - return nil, ErrPathNotAbsolute - } - store2, err := storage.GetStore(storage.StoreOptions{ - GraphDriverName: driverInfo, - GraphRoot: rootInfo, - RunRoot: runRootInfo, - GraphDriverOptions: options, - UIDMap: s.defaultUIDMap, - GIDMap: s.defaultGIDMap, - }) - if err != nil { - return nil, err - } - store = store2 - } else { - // We didn't have a store spec, so use the default. - store2, err := s.GetStore() - if err != nil { - return nil, err - } - store = store2 - } - return s.ParseStoreReference(store, reference) -} - -func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageReference) (*storage.Image, error) { - dref := ref.DockerReference() - if dref != nil { - if img, err := store.Image(verboseName(dref)); err == nil { - return img, nil - } - } - if sref, ok := ref.(*storageReference); ok { - if sref.id != "" { - if img, err := store.Image(sref.id); err == nil { - return img, nil - } - } - tmpRef := *sref - if img, err := tmpRef.resolveImage(); err == nil { - return img, nil - } - } - return nil, storage.ErrImageUnknown -} - -func (s *storageTransport) GetImage(ref types.ImageReference) (*storage.Image, error) { - store, err := s.GetStore() - if err != nil { - return nil, err - } - return s.GetStoreImage(store, ref) -} - -func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { - // Check that there's a store location prefix. Values we're passed are - // expected to come from PolicyConfigurationIdentity or - // PolicyConfigurationNamespaces, so if there's no store location, - // something's wrong. - if scope[0] != '[' { - return ErrInvalidReference - } - // Parse the store location prefix. - closeIndex := strings.IndexRune(scope, ']') - if closeIndex < 1 { - return ErrInvalidReference - } - storeSpec := scope[1:closeIndex] - scope = scope[closeIndex+1:] - storeInfo := strings.SplitN(storeSpec, "@", 2) - if len(storeInfo) == 1 && storeInfo[0] != "" { - // One component: the graph root. - if !filepath.IsAbs(storeInfo[0]) { - return ErrPathNotAbsolute - } - } else if len(storeInfo) == 2 && storeInfo[0] != "" && storeInfo[1] != "" { - // Two components: the driver type and the graph root. - if !filepath.IsAbs(storeInfo[1]) { - return ErrPathNotAbsolute - } - } else { - // Anything else: scope specified in a form we don't - // recognize. - return ErrInvalidReference - } - // That might be all of it, and that's okay. - if scope == "" { - return nil - } - // But if there is anything left, it has to be a name, with or without - // a tag, with or without an ID, since we don't return namespace values - // that are just bare IDs. - scopeInfo := strings.SplitN(scope, "@", 2) - if len(scopeInfo) == 1 && scopeInfo[0] != "" { - _, err := reference.ParseNormalizedNamed(scopeInfo[0]) - if err != nil { - return err - } - } else if len(scopeInfo) == 2 && scopeInfo[0] != "" && scopeInfo[1] != "" { - _, err := reference.ParseNormalizedNamed(scopeInfo[0]) - if err != nil { - return err - } - _, err = digest.Parse("sha256:" + scopeInfo[1]) - if err != nil { - return err - } - } else { - return ErrInvalidReference - } - return nil -} - -func verboseName(r reference.Reference) string { - if r == nil { - return "" - } - named, isNamed := r.(reference.Named) - digested, isDigested := r.(reference.Digested) - tagged, isTagged := r.(reference.Tagged) - name := "" - tag := "" - sum := "" - if isNamed { - name = (reference.TrimNamed(named)).String() - } - if isTagged { - if tagged.Tag() != "" { - tag = ":" + tagged.Tag() - } - } - if isDigested { - if digested.Digest().Validate() == nil { - sum = "@" + digested.Digest().String() - } - } - return name + tag + sum -} diff --git a/vendor/github.com/containers/image/tarball/doc.go b/vendor/github.com/containers/image/tarball/doc.go deleted file mode 100644 index a6ced5a0ee..0000000000 --- a/vendor/github.com/containers/image/tarball/doc.go +++ /dev/null @@ -1,48 +0,0 @@ -// Package tarball provides a way to generate images using one or more layer -// tarballs and an optional template configuration. -// -// An example: -// package main -// -// import ( -// "fmt" -// -// cp "github.com/containers/image/copy" -// "github.com/containers/image/tarball" -// "github.com/containers/image/transports/alltransports" -// -// imgspecv1 "github.com/containers/image/transports/alltransports" -// ) -// -// func imageFromTarball() { -// src, err := alltransports.ParseImageName("tarball:/var/cache/mock/fedora-26-x86_64/root_cache/cache.tar.gz") -// // - or - -// // src, err := tarball.Transport.ParseReference("/var/cache/mock/fedora-26-x86_64/root_cache/cache.tar.gz") -// if err != nil { -// panic(err) -// } -// updater, ok := src.(tarball.ConfigUpdater) -// if !ok { -// panic("unexpected: a tarball reference should implement tarball.ConfigUpdater") -// } -// config := imgspecv1.Image{ -// Config: imgspecv1.ImageConfig{ -// Cmd: []string{"/bin/bash"}, -// }, -// } -// annotations := make(map[string]string) -// annotations[imgspecv1.AnnotationDescription] = "test image built from a mock root cache" -// err = updater.ConfigUpdate(config, annotations) -// if err != nil { -// panic(err) -// } -// dest, err := alltransports.ParseImageName("docker-daemon:mock:latest") -// if err != nil { -// panic(err) -// } -// err = cp.Image(nil, dest, src, nil) -// if err != nil { -// panic(err) -// } -// } -package tarball diff --git a/vendor/github.com/containers/image/tarball/tarball_reference.go b/vendor/github.com/containers/image/tarball/tarball_reference.go deleted file mode 100644 index a0819ac580..0000000000 --- a/vendor/github.com/containers/image/tarball/tarball_reference.go +++ /dev/null @@ -1,93 +0,0 @@ -package tarball - -import ( - "fmt" - "os" - "strings" - - "github.com/containers/image/docker/reference" - "github.com/containers/image/image" - "github.com/containers/image/types" - - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -// ConfigUpdater is an interface that ImageReferences for "tarball" images also -// implement. It can be used to set values for a configuration, and to set -// image annotations which will be present in the images returned by the -// reference's NewImage() or NewImageSource() methods. -type ConfigUpdater interface { - ConfigUpdate(config imgspecv1.Image, annotations map[string]string) error -} - -type tarballReference struct { - transport types.ImageTransport - config imgspecv1.Image - annotations map[string]string - filenames []string - stdin []byte -} - -// ConfigUpdate updates the image's default configuration and adds annotations -// which will be visible in source images created using this reference. -func (r *tarballReference) ConfigUpdate(config imgspecv1.Image, annotations map[string]string) error { - r.config = config - if r.annotations == nil { - r.annotations = make(map[string]string) - } - for k, v := range annotations { - r.annotations[k] = v - } - return nil -} - -func (r *tarballReference) Transport() types.ImageTransport { - return r.transport -} - -func (r *tarballReference) StringWithinTransport() string { - return strings.Join(r.filenames, ":") -} - -func (r *tarballReference) DockerReference() reference.Named { - return nil -} - -func (r *tarballReference) PolicyConfigurationIdentity() string { - return "" -} - -func (r *tarballReference) PolicyConfigurationNamespaces() []string { - return nil -} - -// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned ImageCloser. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. -func (r *tarballReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { - src, err := r.NewImageSource(ctx) - if err != nil { - return nil, err - } - img, err := image.FromSource(ctx, src) - if err != nil { - src.Close() - return nil, err - } - return img, nil -} - -func (r *tarballReference) DeleteImage(ctx *types.SystemContext) error { - for _, filename := range r.filenames { - if err := os.Remove(filename); err != nil && !os.IsNotExist(err) { - return fmt.Errorf("error removing %q: %v", filename, err) - } - } - return nil -} - -func (r *tarballReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { - return nil, fmt.Errorf(`"tarball:" locations can only be read from, not written to`) -} diff --git a/vendor/github.com/containers/image/tarball/tarball_src.go b/vendor/github.com/containers/image/tarball/tarball_src.go deleted file mode 100644 index 8b5b496dae..0000000000 --- a/vendor/github.com/containers/image/tarball/tarball_src.go +++ /dev/null @@ -1,260 +0,0 @@ -package tarball - -import ( - "bytes" - "compress/gzip" - "context" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "runtime" - "strings" - "time" - - "github.com/containers/image/types" - - digest "github.com/opencontainers/go-digest" - imgspecs "github.com/opencontainers/image-spec/specs-go" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -type tarballImageSource struct { - reference tarballReference - filenames []string - diffIDs []digest.Digest - diffSizes []int64 - blobIDs []digest.Digest - blobSizes []int64 - blobTypes []string - config []byte - configID digest.Digest - configSize int64 - manifest []byte -} - -func (r *tarballReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) { - // Gather up the digests, sizes, and date information for all of the files. - filenames := []string{} - diffIDs := []digest.Digest{} - diffSizes := []int64{} - blobIDs := []digest.Digest{} - blobSizes := []int64{} - blobTimes := []time.Time{} - blobTypes := []string{} - for _, filename := range r.filenames { - var file *os.File - var err error - var blobSize int64 - var blobTime time.Time - var reader io.Reader - if filename == "-" { - blobSize = int64(len(r.stdin)) - blobTime = time.Now() - reader = bytes.NewReader(r.stdin) - } else { - file, err = os.Open(filename) - if err != nil { - return nil, fmt.Errorf("error opening %q for reading: %v", filename, err) - } - defer file.Close() - reader = file - fileinfo, err := file.Stat() - if err != nil { - return nil, fmt.Errorf("error reading size of %q: %v", filename, err) - } - blobSize = fileinfo.Size() - blobTime = fileinfo.ModTime() - } - - // Default to assuming the layer is compressed. - layerType := imgspecv1.MediaTypeImageLayerGzip - - // Set up to digest the file as it is. - blobIDdigester := digest.Canonical.Digester() - reader = io.TeeReader(reader, blobIDdigester.Hash()) - - // Set up to digest the file after we maybe decompress it. - diffIDdigester := digest.Canonical.Digester() - uncompressed, err := gzip.NewReader(reader) - if err == nil { - // It is compressed, so the diffID is the digest of the uncompressed version - reader = io.TeeReader(uncompressed, diffIDdigester.Hash()) - } else { - // It is not compressed, so the diffID and the blobID are going to be the same - diffIDdigester = blobIDdigester - layerType = imgspecv1.MediaTypeImageLayer - uncompressed = nil - } - n, err := io.Copy(ioutil.Discard, reader) - if err != nil { - return nil, fmt.Errorf("error reading %q: %v", filename, err) - } - if uncompressed != nil { - uncompressed.Close() - } - - // Grab our uncompressed and possibly-compressed digests and sizes. - filenames = append(filenames, filename) - diffIDs = append(diffIDs, diffIDdigester.Digest()) - diffSizes = append(diffSizes, n) - blobIDs = append(blobIDs, blobIDdigester.Digest()) - blobSizes = append(blobSizes, blobSize) - blobTimes = append(blobTimes, blobTime) - blobTypes = append(blobTypes, layerType) - } - - // Build the rootfs and history for the configuration blob. - rootfs := imgspecv1.RootFS{ - Type: "layers", - DiffIDs: diffIDs, - } - created := time.Time{} - history := []imgspecv1.History{} - // Pick up the layer comment from the configuration's history list, if one is set. - comment := "imported from tarball" - if len(r.config.History) > 0 && r.config.History[0].Comment != "" { - comment = r.config.History[0].Comment - } - for i := range diffIDs { - createdBy := fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffIDs[i].Hex(), os.PathSeparator) - history = append(history, imgspecv1.History{ - Created: &blobTimes[i], - CreatedBy: createdBy, - Comment: comment, - }) - // Use the mtime of the most recently modified file as the image's creation time. - if created.Before(blobTimes[i]) { - created = blobTimes[i] - } - } - - // Pick up other defaults from the config in the reference. - config := r.config - if config.Created == nil { - config.Created = &created - } - if config.Architecture == "" { - config.Architecture = runtime.GOARCH - } - if config.OS == "" { - config.OS = runtime.GOOS - } - config.RootFS = rootfs - config.History = history - - // Encode and digest the image configuration blob. - configBytes, err := json.Marshal(&config) - if err != nil { - return nil, fmt.Errorf("error generating configuration blob for %q: %v", strings.Join(r.filenames, separator), err) - } - configID := digest.Canonical.FromBytes(configBytes) - configSize := int64(len(configBytes)) - - // Populate a manifest with the configuration blob and the file as the single layer. - layerDescriptors := []imgspecv1.Descriptor{} - for i := range blobIDs { - layerDescriptors = append(layerDescriptors, imgspecv1.Descriptor{ - Digest: blobIDs[i], - Size: blobSizes[i], - MediaType: blobTypes[i], - }) - } - annotations := make(map[string]string) - for k, v := range r.annotations { - annotations[k] = v - } - manifest := imgspecv1.Manifest{ - Versioned: imgspecs.Versioned{ - SchemaVersion: 2, - }, - Config: imgspecv1.Descriptor{ - Digest: configID, - Size: configSize, - MediaType: imgspecv1.MediaTypeImageConfig, - }, - Layers: layerDescriptors, - Annotations: annotations, - } - - // Encode the manifest. - manifestBytes, err := json.Marshal(&manifest) - if err != nil { - return nil, fmt.Errorf("error generating manifest for %q: %v", strings.Join(r.filenames, separator), err) - } - - // Return the image. - src := &tarballImageSource{ - reference: *r, - filenames: filenames, - diffIDs: diffIDs, - diffSizes: diffSizes, - blobIDs: blobIDs, - blobSizes: blobSizes, - blobTypes: blobTypes, - config: configBytes, - configID: configID, - configSize: configSize, - manifest: manifestBytes, - } - - return src, nil -} - -func (is *tarballImageSource) Close() error { - return nil -} - -func (is *tarballImageSource) GetBlob(blobinfo types.BlobInfo) (io.ReadCloser, int64, error) { - // We should only be asked about things in the manifest. Maybe the configuration blob. - if blobinfo.Digest == is.configID { - return ioutil.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil - } - // Maybe one of the layer blobs. - for i := range is.blobIDs { - if blobinfo.Digest == is.blobIDs[i] { - // We want to read that layer: open the file or memory block and hand it back. - if is.filenames[i] == "-" { - return ioutil.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil - } - reader, err := os.Open(is.filenames[i]) - if err != nil { - return nil, -1, fmt.Errorf("error opening %q: %v", is.filenames[i], err) - } - return reader, is.blobSizes[i], nil - } - } - return nil, -1, fmt.Errorf("no blob with digest %q found", blobinfo.Digest.String()) -} - -// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). -// It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); -// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). -func (is *tarballImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) { - if instanceDigest != nil { - return nil, "", fmt.Errorf("manifest lists are not supported by the %q transport", transportName) - } - return is.manifest, imgspecv1.MediaTypeImageManifest, nil -} - -// GetSignatures returns the image's signatures. It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -func (*tarballImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - if instanceDigest != nil { - return nil, fmt.Errorf("manifest lists are not supported by the %q transport", transportName) - } - return nil, nil -} - -func (is *tarballImageSource) Reference() types.ImageReference { - return &is.reference -} - -// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. -func (*tarballImageSource) LayerInfosForCopy() []types.BlobInfo { - return nil -} diff --git a/vendor/github.com/containers/image/tarball/tarball_transport.go b/vendor/github.com/containers/image/tarball/tarball_transport.go deleted file mode 100644 index 72558b5e83..0000000000 --- a/vendor/github.com/containers/image/tarball/tarball_transport.go +++ /dev/null @@ -1,66 +0,0 @@ -package tarball - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "strings" - - "github.com/containers/image/transports" - "github.com/containers/image/types" -) - -const ( - transportName = "tarball" - separator = ":" -) - -var ( - // Transport implements the types.ImageTransport interface for "tarball:" images, - // which are makeshift images constructed using one or more possibly-compressed tar - // archives. - Transport = &tarballTransport{} -) - -type tarballTransport struct { -} - -func (t *tarballTransport) Name() string { - return transportName -} - -func (t *tarballTransport) ParseReference(reference string) (types.ImageReference, error) { - var stdin []byte - var err error - filenames := strings.Split(reference, separator) - for _, filename := range filenames { - if filename == "-" { - stdin, err = ioutil.ReadAll(os.Stdin) - if err != nil { - return nil, fmt.Errorf("error buffering stdin: %v", err) - } - continue - } - f, err := os.Open(filename) - if err != nil { - return nil, fmt.Errorf("error opening %q: %v", filename, err) - } - f.Close() - } - ref := &tarballReference{ - transport: t, - filenames: filenames, - stdin: stdin, - } - return ref, nil -} - -func (t *tarballTransport) ValidatePolicyConfigurationScope(scope string) error { - // See the explanation in daemonReference.PolicyConfigurationIdentity. - return errors.New(`tarball: does not support any scopes except the default "" one`) -} - -func init() { - transports.Register(Transport) -} diff --git a/vendor/github.com/containers/image/transports/alltransports/alltransports.go b/vendor/github.com/containers/image/transports/alltransports/alltransports.go deleted file mode 100644 index b4552df669..0000000000 --- a/vendor/github.com/containers/image/transports/alltransports/alltransports.go +++ /dev/null @@ -1,35 +0,0 @@ -package alltransports - -import ( - "strings" - - // register all known transports - // NOTE: Make sure docs/policy.json.md is updated when adding or updating - // a transport. - _ "github.com/containers/image/directory" - _ "github.com/containers/image/docker" - _ "github.com/containers/image/docker/archive" - _ "github.com/containers/image/docker/daemon" - _ "github.com/containers/image/oci/archive" - _ "github.com/containers/image/oci/layout" - _ "github.com/containers/image/openshift" - _ "github.com/containers/image/tarball" - // The ostree transport is registered by ostree*.go - // The storage transport is registered by storage*.go - "github.com/containers/image/transports" - "github.com/containers/image/types" - "github.com/pkg/errors" -) - -// ParseImageName converts a URL-like image name to a types.ImageReference. -func ParseImageName(imgName string) (types.ImageReference, error) { - parts := strings.SplitN(imgName, ":", 2) - if len(parts) != 2 { - return nil, errors.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName) - } - transport := transports.Get(parts[0]) - if transport == nil { - return nil, errors.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, parts[0]) - } - return transport.ParseReference(parts[1]) -} diff --git a/vendor/github.com/containers/image/transports/alltransports/ostree.go b/vendor/github.com/containers/image/transports/alltransports/ostree.go deleted file mode 100644 index 0fc5d7ef79..0000000000 --- a/vendor/github.com/containers/image/transports/alltransports/ostree.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !containers_image_ostree_stub - -package alltransports - -import ( - // Register the ostree transport - _ "github.com/containers/image/ostree" -) diff --git a/vendor/github.com/containers/image/transports/alltransports/ostree_stub.go b/vendor/github.com/containers/image/transports/alltransports/ostree_stub.go deleted file mode 100644 index 8b01afe7cc..0000000000 --- a/vendor/github.com/containers/image/transports/alltransports/ostree_stub.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build containers_image_ostree_stub - -package alltransports - -import "github.com/containers/image/transports" - -func init() { - transports.Register(transports.NewStubTransport("ostree")) -} diff --git a/vendor/github.com/containers/image/transports/alltransports/storage.go b/vendor/github.com/containers/image/transports/alltransports/storage.go deleted file mode 100644 index a867c66446..0000000000 --- a/vendor/github.com/containers/image/transports/alltransports/storage.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !containers_image_storage_stub - -package alltransports - -import ( - // Register the storage transport - _ "github.com/containers/image/storage" -) diff --git a/vendor/github.com/containers/image/transports/alltransports/storage_stub.go b/vendor/github.com/containers/image/transports/alltransports/storage_stub.go deleted file mode 100644 index 4ac684e58f..0000000000 --- a/vendor/github.com/containers/image/transports/alltransports/storage_stub.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build containers_image_storage_stub - -package alltransports - -import "github.com/containers/image/transports" - -func init() { - transports.Register(transports.NewStubTransport("containers-storage")) -} diff --git a/vendor/github.com/containers/image/transports/stub.go b/vendor/github.com/containers/image/transports/stub.go deleted file mode 100644 index 087f69b6ea..0000000000 --- a/vendor/github.com/containers/image/transports/stub.go +++ /dev/null @@ -1,36 +0,0 @@ -package transports - -import ( - "fmt" - - "github.com/containers/image/types" -) - -// stubTransport is an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”. -type stubTransport string - -// NewStubTransport returns an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”. -func NewStubTransport(name string) types.ImageTransport { - return stubTransport(name) -} - -// Name returns the name of the transport, which must be unique among other transports. -func (s stubTransport) Name() string { - return string(s) -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. -func (s stubTransport) ParseReference(reference string) (types.ImageReference, error) { - return nil, fmt.Errorf(`The transport "%s:" is not supported in this build`, string(s)) -} - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). -// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. -// scope passed to this function will not be "", that value is always allowed. -func (s stubTransport) ValidatePolicyConfigurationScope(scope string) error { - // Allowing any reference in here allows tools with some transports stubbed-out to still - // use signature verification policies which refer to these stubbed-out transports. - // See also the treatment of unknown transports in policyTransportScopesWithTransport.UnmarshalJSON . - return nil -} diff --git a/vendor/github.com/containers/image/transports/transports.go b/vendor/github.com/containers/image/transports/transports.go deleted file mode 100644 index 687d0a44e3..0000000000 --- a/vendor/github.com/containers/image/transports/transports.go +++ /dev/null @@ -1,90 +0,0 @@ -package transports - -import ( - "fmt" - "sort" - "sync" - - "github.com/containers/image/types" -) - -// knownTransports is a registry of known ImageTransport instances. -type knownTransports struct { - transports map[string]types.ImageTransport - mu sync.Mutex -} - -func (kt *knownTransports) Get(k string) types.ImageTransport { - kt.mu.Lock() - t := kt.transports[k] - kt.mu.Unlock() - return t -} - -func (kt *knownTransports) Remove(k string) { - kt.mu.Lock() - delete(kt.transports, k) - kt.mu.Unlock() -} - -func (kt *knownTransports) Add(t types.ImageTransport) { - kt.mu.Lock() - defer kt.mu.Unlock() - name := t.Name() - if t := kt.transports[name]; t != nil { - panic(fmt.Sprintf("Duplicate image transport name %s", name)) - } - kt.transports[name] = t -} - -var kt *knownTransports - -func init() { - kt = &knownTransports{ - transports: make(map[string]types.ImageTransport), - } -} - -// Get returns the transport specified by name or nil when unavailable. -func Get(name string) types.ImageTransport { - return kt.Get(name) -} - -// Delete deletes a transport from the registered transports. -func Delete(name string) { - kt.Remove(name) -} - -// Register registers a transport. -func Register(t types.ImageTransport) { - kt.Add(t) -} - -// ImageName converts a types.ImageReference into an URL-like image name, which MUST be such that -// ParseImageName(ImageName(reference)) returns an equivalent reference. -// -// This is the generally recommended way to refer to images in the UI. -// -// NOTE: The returned string is not promised to be equal to the original input to ParseImageName; -// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. -func ImageName(ref types.ImageReference) string { - return ref.Transport().Name() + ":" + ref.StringWithinTransport() -} - -// ListNames returns a list of non deprecated transport names. -// Deprecated transports can be used, but are not presented to users. -func ListNames() []string { - kt.mu.Lock() - defer kt.mu.Unlock() - deprecated := map[string]bool{ - "atomic": true, - } - var names []string - for _, transport := range kt.transports { - if !deprecated[transport.Name()] { - names = append(names, transport.Name()) - } - } - sort.Strings(names) - return names -} diff --git a/vendor/github.com/containers/image/types/types.go b/vendor/github.com/containers/image/types/types.go deleted file mode 100644 index 2e9c7105d3..0000000000 --- a/vendor/github.com/containers/image/types/types.go +++ /dev/null @@ -1,388 +0,0 @@ -package types - -import ( - "context" - "io" - "time" - - "github.com/containers/image/docker/reference" - "github.com/opencontainers/go-digest" - "github.com/opencontainers/image-spec/specs-go/v1" -) - -// ImageTransport is a top-level namespace for ways to to store/load an image. -// It should generally correspond to ImageSource/ImageDestination implementations. -// -// Note that ImageTransport is based on "ways the users refer to image storage", not necessarily on the underlying physical transport. -// For example, all Docker References would be used within a single "docker" transport, regardless of whether the images are pulled over HTTP or HTTPS -// (or, even, IPv4 or IPv6). -// -// OTOH all images using the same transport should (apart from versions of the image format), be interoperable. -// For example, several different ImageTransport implementations may be based on local filesystem paths, -// but using completely different formats for the contents of that path (a single tar file, a directory containing tarballs, a fully expanded container filesystem, ...) -// -// See also transports.KnownTransports. -type ImageTransport interface { - // Name returns the name of the transport, which must be unique among other transports. - Name() string - // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. - ParseReference(reference string) (ImageReference, error) - // ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys - // (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). - // It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. - // scope passed to this function will not be "", that value is always allowed. - ValidatePolicyConfigurationScope(scope string) error -} - -// ImageReference is an abstracted way to refer to an image location, namespaced within an ImageTransport. -// -// The object should preferably be immutable after creation, with any parsing/state-dependent resolving happening -// within an ImageTransport.ParseReference() or equivalent API creating the reference object. -// That's also why the various identification/formatting methods of this type do not support returning errors. -// -// WARNING: While this design freezes the content of the reference within this process, it can not freeze the outside -// world: paths may be replaced by symlinks elsewhere, HTTP APIs may start returning different results, and so on. -type ImageReference interface { - Transport() ImageTransport - // StringWithinTransport returns a string representation of the reference, which MUST be such that - // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. - // NOTE: The returned string is not promised to be equal to the original input to ParseReference; - // e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. - // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; - // instead, see transports.ImageName(). - StringWithinTransport() string - - // DockerReference returns a Docker reference associated with this reference - // (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, - // not e.g. after redirect or alias processing), or nil if unknown/not applicable. - DockerReference() reference.Named - - // PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. - // This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; - // The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical - // (i.e. various references with exactly the same semantics should return the same configuration identity) - // It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but - // not required/guaranteed that it will be a valid input to Transport().ParseReference(). - // Returns "" if configuration identities for these references are not supported. - PolicyConfigurationIdentity() string - - // PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search - // for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed - // in order, terminating on first match, and an implicit "" is always checked at the end. - // It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), - // and each following element to be a prefix of the element preceding it. - PolicyConfigurationNamespaces() []string - - // NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. - // The caller must call .Close() on the returned ImageCloser. - // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, - // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. - // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. - NewImage(ctx *SystemContext) (ImageCloser, error) - // NewImageSource returns a types.ImageSource for this reference. - // The caller must call .Close() on the returned ImageSource. - NewImageSource(ctx *SystemContext) (ImageSource, error) - // NewImageDestination returns a types.ImageDestination for this reference. - // The caller must call .Close() on the returned ImageDestination. - NewImageDestination(ctx *SystemContext) (ImageDestination, error) - - // DeleteImage deletes the named image from the registry, if supported. - DeleteImage(ctx *SystemContext) error -} - -// BlobInfo collects known information about a blob (layer/config). -// In some situations, some fields may be unknown, in others they may be mandatory; documenting an “unknown” value here does not override that. -type BlobInfo struct { - Digest digest.Digest // "" if unknown. - Size int64 // -1 if unknown - URLs []string - Annotations map[string]string - MediaType string -} - -// ImageSource is a service, possibly remote (= slow), to download components of a single image or a named image set (manifest list). -// This is primarily useful for copying images around; for examining their properties, Image (below) -// is usually more useful. -// Each ImageSource should eventually be closed by calling Close(). -// -// WARNING: Various methods which return an object identified by digest generally do not -// validate that the returned data actually matches that digest; this is the caller’s responsibility. -type ImageSource interface { - // Reference returns the reference used to set up this source, _as specified by the user_ - // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. - Reference() ImageReference - // Close removes resources associated with an initialized ImageSource, if any. - Close() error - // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). - // It may use a remote (= slow) service. - // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); - // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). - GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) - // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). - // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. - GetBlob(BlobInfo) (io.ReadCloser, int64, error) - // GetSignatures returns the image's signatures. It may use a remote (= slow) service. - // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for - // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list - // (e.g. if the source never returns manifest lists). - GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) - // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer blobsums that are listed in the image's manifest. - // The Digest field is guaranteed to be provided; Size may be -1. - // WARNING: The list may contain duplicates, and they are semantically relevant. - LayerInfosForCopy() []BlobInfo -} - -// ImageDestination is a service, possibly remote (= slow), to store components of a single image. -// -// There is a specific required order for some of the calls: -// PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time) -// ReapplyBlob, if used, MUST only be called if HasBlob returned true for the same blob digest -// PutSignatures, if called, MUST be called after PutManifest (signatures reference manifest contents) -// Finally, Commit MUST be called if the caller wants the image, as formed by the components saved above, to persist. -// -// Each ImageDestination should eventually be closed by calling Close(). -type ImageDestination interface { - // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, - // e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. - Reference() ImageReference - // Close removes resources associated with an initialized ImageDestination, if any. - Close() error - - // SupportedManifestMIMETypes tells which manifest mime types the destination supports - // If an empty slice or nil it's returned, then any mime type can be tried to upload - SupportedManifestMIMETypes() []string - // SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. - // Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. - SupportsSignatures() error - // ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination. - ShouldCompressLayers() bool - // AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually - // uploaded to the image destination, true otherwise. - AcceptsForeignLayerURLs() bool - // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. - MustMatchRuntimeOS() bool - // PutBlob writes contents of stream and returns data representing the result. - // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. - // inputInfo.Size is the expected length of stream, if known. - // inputInfo.MediaType describes the blob format, if known. - // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available - // to any other readers for download using the supplied digest. - // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. - PutBlob(stream io.Reader, inputInfo BlobInfo) (BlobInfo, error) - // HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob. - // Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned. - // If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); - // it returns a non-nil error only on an unexpected failure. - HasBlob(info BlobInfo) (bool, int64, error) - // ReapplyBlob informs the image destination that a blob for which HasBlob previously returned true would have been passed to PutBlob if it had returned false. Like HasBlob and unlike PutBlob, the digest can not be empty. If the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree. - ReapplyBlob(info BlobInfo) (BlobInfo, error) - // PutManifest writes manifest to the destination. - // FIXME? This should also receive a MIME type if known, to differentiate between schema versions. - // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), - // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. - PutManifest(manifest []byte) error - PutSignatures(signatures [][]byte) error - // Commit marks the process of storing the image as successful and asks for the image to be persisted. - // WARNING: This does not have any transactional semantics: - // - Uploaded data MAY be visible to others before Commit() is called - // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) - Commit() error -} - -// ManifestTypeRejectedError is returned by ImageDestination.PutManifest if the destination is in principle available, -// refuses specifically this manifest type, but may accept a different manifest type. -type ManifestTypeRejectedError struct { // We only use a struct to allow a type assertion, without limiting the contents of the error otherwise. - Err error -} - -func (e ManifestTypeRejectedError) Error() string { - return e.Err.Error() -} - -// UnparsedImage is an Image-to-be; until it is verified and accepted, it only caries its identity and caches manifest and signature blobs. -// Thus, an UnparsedImage can be created from an ImageSource simply by fetching blobs without interpreting them, -// allowing cryptographic signature verification to happen first, before even fetching the manifest, or parsing anything else. -// This also makes the UnparsedImage→Image conversion an explicitly visible step. -// -// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. -// -// The UnparsedImage must not be used after the underlying ImageSource is Close()d. -type UnparsedImage interface { - // Reference returns the reference used to set up this source, _as specified by the user_ - // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. - Reference() ImageReference - // Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. - Manifest() ([]byte, string, error) - // Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. - Signatures(ctx context.Context) ([][]byte, error) - // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer blobsums that are listed in the image's manifest. - // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. - // WARNING: The list may contain duplicates, and they are semantically relevant. - LayerInfosForCopy() []BlobInfo -} - -// Image is the primary API for inspecting properties of images. -// An Image is based on a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. -// -// The Image must not be used after the underlying ImageSource is Close()d. -type Image interface { - // Note that Reference may return nil in the return value of UpdatedImage! - UnparsedImage - // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. - // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. - ConfigInfo() BlobInfo - // ConfigBlob returns the blob described by ConfigInfo, if ConfigInfo().Digest != ""; nil otherwise. - // The result is cached; it is OK to call this however often you need. - ConfigBlob() ([]byte, error) - // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about - // layers in the resulting configuration isn't guaranteed to be returned to due how - // old image manifests work (docker v2s1 especially). - OCIConfig() (*v1.Image, error) - // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). - // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. - // WARNING: The list may contain duplicates, and they are semantically relevant. - LayerInfos() []BlobInfo - // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. - // It returns false if the manifest does not embed a Docker reference. - // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) - EmbeddedDockerReferenceConflicts(ref reference.Named) bool - // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. - Inspect() (*ImageInspectInfo, error) - // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. - // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute - // (most importantly it forces us to download the full layers even if they are already present at the destination). - UpdatedImageNeedsLayerDiffIDs(options ManifestUpdateOptions) bool - // UpdatedImage returns a types.Image modified according to options. - // Everything in options.InformationOnly should be provided, other fields should be set only if a modification is desired. - // This does not change the state of the original Image object. - UpdatedImage(options ManifestUpdateOptions) (Image, error) - // Size returns an approximation of the amount of disk space which is consumed by the image in its current - // location. If the size is not known, -1 will be returned. - Size() (int64, error) -} - -// ImageCloser is an Image with a Close() method which must be called by the user. -// This is returned by ImageReference.NewImage, which transparently instantiates a types.ImageSource, -// to ensure that the ImageSource is closed. -type ImageCloser interface { - Image - // Close removes resources associated with an initialized ImageCloser. - Close() error -} - -// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedManifest -type ManifestUpdateOptions struct { - LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls+annotations) which should replace the originals, in order (the root layer first, and then successive layered layers). BlobInfos' MediaType fields are ignored. - EmbeddedDockerReference reference.Named - ManifestMIMEType string - // The values below are NOT requests to modify the image; they provide optional context which may or may not be used. - InformationOnly ManifestUpdateInformation -} - -// ManifestUpdateInformation is a component of ManifestUpdateOptions, named here -// only to make writing struct literals possible. -type ManifestUpdateInformation struct { - Destination ImageDestination // and yes, UpdatedManifest may write to Destination (see the schema2 → schema1 conversion logic in image/docker_schema2.go) - LayerInfos []BlobInfo // Complete BlobInfos (size+digest) which have been uploaded, in order (the root layer first, and then successive layered layers) - LayerDiffIDs []digest.Digest // Digest values for the _uncompressed_ contents of the blobs which have been uploaded, in the same order. -} - -// ImageInspectInfo is a set of metadata describing Docker images, primarily their manifest and configuration. -// The Tag field is a legacy field which is here just for the Docker v2s1 manifest. It won't be supported -// for other manifest types. -type ImageInspectInfo struct { - Tag string - Created time.Time - DockerVersion string - Labels map[string]string - Architecture string - Os string - Layers []string -} - -// DockerAuthConfig contains authorization information for connecting to a registry. -type DockerAuthConfig struct { - Username string - Password string -} - -// SystemContext allows parameterizing access to implicitly-accessed resources, -// like configuration files in /etc and users' login state in their home directory. -// Various components can share the same field only if their semantics is exactly -// the same; if in doubt, add a new field. -// It is always OK to pass nil instead of a SystemContext. -type SystemContext struct { - // If not "", prefixed to any absolute paths used by default by the library (e.g. in /etc/). - // Not used for any of the more specific path overrides available in this struct. - // Not used for any paths specified by users in config files (even if the location of the config file _was_ affected by it). - // NOTE: If this is set, environment-variable overrides of paths are ignored (to keep the semantics simple: to create an /etc replacement, just set RootForImplicitAbsolutePaths . - // and there is no need to worry about the environment.) - // NOTE: This does NOT affect paths starting by $HOME. - RootForImplicitAbsolutePaths string - - // === Global configuration overrides === - // If not "", overrides the system's default path for signature.Policy configuration. - SignaturePolicyPath string - // If not "", overrides the system's default path for registries.d (Docker signature storage configuration) - RegistriesDirPath string - // Path to the system-wide registries configuration file - SystemRegistriesConfPath string - // If not "", overrides the default path for the authentication file - AuthFilePath string - // If not "", overrides the use of platform.GOARCH when choosing an image or verifying architecture match. - ArchitectureChoice string - // If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match. - OSChoice string - - // === OCI.Transport overrides === - // If not "", a directory containing a CA certificate (ending with ".crt"), - // a client certificate (ending with ".cert") and a client ceritificate key - // (ending with ".key") used when downloading OCI image layers. - OCICertPath string - // Allow downloading OCI image layers over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. - OCIInsecureSkipTLSVerify bool - // If not "", use a shared directory for storing blobs rather than within OCI layouts - OCISharedBlobDirPath string - - // === docker.Transport overrides === - // If not "", a directory containing a CA certificate (ending with ".crt"), - // a client certificate (ending with ".cert") and a client ceritificate key - // (ending with ".key") used when talking to a Docker Registry. - DockerCertPath string - // If not "", overrides the system’s default path for a directory containing host[:port] subdirectories with the same structure as DockerCertPath above. - // Ignored if DockerCertPath is non-empty. - DockerPerHostCertDirPath string - // Allow contacting docker registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. - DockerInsecureSkipTLSVerify bool - // if nil, the library tries to parse ~/.docker/config.json to retrieve credentials - DockerAuthConfig *DockerAuthConfig - // if not "", an User-Agent header is added to each request when contacting a registry. - DockerRegistryUserAgent string - // if true, a V1 ping attempt isn't done to give users a better error. Default is false. - // Note that this field is used mainly to integrate containers/image into projectatomic/docker - // in order to not break any existing docker's integration tests. - DockerDisableV1Ping bool - // Directory to use for OSTree temporary files - OSTreeTmpDirPath string - - // === docker/daemon.Transport overrides === - // A directory containing a CA certificate (ending with ".crt"), - // a client certificate (ending with ".cert") and a client certificate key - // (ending with ".key") used when talking to a Docker daemon. - DockerDaemonCertPath string - // The hostname or IP to the Docker daemon. If not set (aka ""), client.DefaultDockerHost is assumed. - DockerDaemonHost string - // Used to skip TLS verification, off by default. To take effect DockerDaemonCertPath needs to be specified as well. - DockerDaemonInsecureSkipTLSVerify bool - - // === dir.Transport overrides === - // DirForceCompress compresses the image layers if set to true - DirForceCompress bool -} - -// ProgressProperties is used to pass information from the copy code to a monitor which -// can use the real-time information to produce output or react to changes. -type ProgressProperties struct { - Artifact BlobInfo - Offset uint64 -} diff --git a/vendor/github.com/containers/image/version/version.go b/vendor/github.com/containers/image/version/version.go deleted file mode 100644 index 6644bcff3b..0000000000 --- a/vendor/github.com/containers/image/version/version.go +++ /dev/null @@ -1,18 +0,0 @@ -package version - -import "fmt" - -const ( - // VersionMajor is for an API incompatible changes - VersionMajor = 0 - // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 1 - // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 0 - - // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "-dev" -) - -// Version is the specification version that the package types support. -var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev) diff --git a/vendor/github.com/containers/storage/AUTHORS b/vendor/github.com/containers/storage/AUTHORS deleted file mode 100644 index 11cd83d14e..0000000000 --- a/vendor/github.com/containers/storage/AUTHORS +++ /dev/null @@ -1,1522 +0,0 @@ -# This file lists all individuals having contributed content to the repository. -# For how it is generated, see `hack/generate-authors.sh`. - -Aanand Prasad -Aaron Davidson -Aaron Feng -Aaron Huslage -Aaron Lehmann -Aaron Welch -Abel Muiño -Abhijeet Kasurde -Abhinav Ajgaonkar -Abhishek Chanda -Abin Shahab -Adam Miller -Adam Singer -Aditi Rajagopal -Aditya -Adria Casas -Adrian Mouat -Adrian Oprea -Adrien Folie -Adrien Gallouët -Ahmed Kamal -Ahmet Alp Balkan -Aidan Feldman -Aidan Hobson Sayers -AJ Bowen -Ajey Charantimath -ajneu -Akihiro Suda -Al Tobey -alambike -Alan Scherger -Alan Thompson -Albert Callarisa -Albert Zhang -Aleksa Sarai -Aleksandrs Fadins -Alena Prokharchyk -Alessandro Boch -Alessio Biancalana -Alex Chan -Alex Crawford -Alex Ellis -Alex Gaynor -Alex Samorukov -Alex Warhawk -Alexander Artemenko -Alexander Boyd -Alexander Larsson -Alexander Morozov -Alexander Shopov -Alexandre Beslic -Alexandre González -Alexandru Sfirlogea -Alexey Guskov -Alexey Kotlyarov -Alexey Shamrin -Alexis THOMAS -Ali Dehghani -Allen Madsen -Allen Sun -almoehi -Alvin Richards -amangoel -Amen Belayneh -Amit Bakshi -Amit Krishnan -Amy Lindburg -Anand Patil -AnandkumarPatel -Anatoly Borodin -Anchal Agrawal -Anders Janmyr -Andre Dublin <81dublin@gmail.com> -Andre Granovsky -Andrea Luzzardi -Andrea Turli -Andreas Köhler -Andreas Savvides -Andreas Tiefenthaler -Andrew C. Bodine -Andrew Clay Shafer -Andrew Duckworth -Andrew France -Andrew Gerrand -Andrew Guenther -Andrew Kuklewicz -Andrew Macgregor -Andrew Macpherson -Andrew Martin -Andrew Munsell -Andrew Weiss -Andrew Williams -Andrews Medina -Andrey Petrov -Andrey Stolbovsky -André Martins -andy -Andy Chambers -andy diller -Andy Goldstein -Andy Kipp -Andy Rothfusz -Andy Smith -Andy Wilson -Anes Hasicic -Anil Belur -Ankush Agarwal -Anonmily -Anthon van der Neut -Anthony Baire -Anthony Bishopric -Anthony Dahanne -Anton Löfgren -Anton Nikitin -Anton Polonskiy -Anton Tiurin -Antonio Murdaca -Antony Messerli -Anuj Bahuguna -Anusha Ragunathan -apocas -ArikaChen -Arnaud Porterie -Arthur Barr -Arthur Gautier -Artur Meyster -Arun Gupta -Asbjørn Enge -averagehuman -Avi Das -Avi Miller -ayoshitake -Azat Khuyiyakhmetov -Bardia Keyoumarsi -Barnaby Gray -Barry Allard -Bartłomiej Piotrowski -Bastiaan Bakker -bdevloed -Ben Firshman -Ben Golub -Ben Hall -Ben Sargent -Ben Severson -Ben Toews -Ben Wiklund -Benjamin Atkin -Benoit Chesneau -Bernerd Schaefer -Bert Goethals -Bharath Thiruveedula -Bhiraj Butala -Bill W -bin liu -Blake Geno -Boaz Shuster -bobby abbott -boucher -Bouke Haarsma -Boyd Hemphill -boynux -Bradley Cicenas -Bradley Wright -Brandon Liu -Brandon Philips -Brandon Rhodes -Brendan Dixon -Brent Salisbury -Brett Higgins -Brett Kochendorfer -Brian (bex) Exelbierd -Brian Bland -Brian DeHamer -Brian Dorsey -Brian Flad -Brian Goff -Brian McCallister -Brian Olsen -Brian Shumate -Brian Torres-Gil -Brian Trump -Brice Jaglin -Briehan Lombaard -Bruno Bigras -Bruno Binet -Bruno Gazzera -Bruno Renié -Bryan Bess -Bryan Boreham -Bryan Matsuo -Bryan Murphy -buddhamagnet -Burke Libbey -Byung Kang -Caleb Spare -Calen Pennington -Cameron Boehmer -Cameron Spear -Campbell Allen -Candid Dauth -Carl Henrik Lunde -Carl X. Su -Carlos Alexandro Becker -Carlos Sanchez -Carol Fager-Higgins -Cary -Casey Bisson -Cedric Davies -Cezar Sa Espinola -Chad Swenson -Chance Zibolski -Chander G -Charles Chan -Charles Hooper -Charles Law -Charles Lindsay -Charles Merriam -Charles Sarrazin -Charlie Lewis -Chase Bolt -ChaYoung You -Chen Chao -Chen Hanxiao -cheney90 -Chewey -Chia-liang Kao -chli -Cholerae Hu -Chris Alfonso -Chris Armstrong -Chris Dituri -Chris Fordham -Chris Khoo -Chris McKinnel -Chris Seto -Chris Snow -Chris St. Pierre -Chris Stivers -Chris Swan -Chris Wahl -Chris Weyl -chrismckinnel -Christian Berendt -Christian Böhme -Christian Persson -Christian Rotzoll -Christian Simon -Christian Stefanescu -ChristoperBiscardi -Christophe Mehay -Christophe Troestler -Christopher Currie -Christopher Jones -Christopher Latham -Christopher Rigor -Christy Perez -Chun Chen -Ciro S. Costa -Clayton Coleman -Clinton Kitson -Coenraad Loubser -Colin Dunklau -Colin Rice -Colin Walters -Collin Guarino -Colm Hally -companycy -Cory Forsyth -cressie176 -Cristian Staretu -cristiano balducci -Cruceru Calin-Cristian -Cyril F -Daan van Berkel -Daehyeok Mun -Dafydd Crosby -dalanlan -Damien Nadé -Damien Nozay -Damjan Georgievski -Dan Anolik -Dan Buch -Dan Cotora -Dan Griffin -Dan Hirsch -Dan Keder -Dan Levy -Dan McPherson -Dan Stine -Dan Walsh -Dan Williams -Daniel Antlinger -Daniel Exner -Daniel Farrell -Daniel Garcia -Daniel Gasienica -Daniel Hiltgen -Daniel Menet -Daniel Mizyrycki -Daniel Nephin -Daniel Norberg -Daniel Nordberg -Daniel Robinson -Daniel S -Daniel Von Fange -Daniel YC Lin -Daniel Zhang -Daniel, Dao Quang Minh -Danny Berger -Danny Yates -Darren Coxall -Darren Shepherd -Darren Stahl -Dave Barboza -Dave Henderson -Dave MacDonald -Dave Tucker -David Anderson -David Calavera -David Corking -David Cramer -David Currie -David Davis -David Gageot -David Gebler -David Lawrence -David Mackey -David Mat -David Mcanulty -David Pelaez -David R. Jenni -David Röthlisberger -David Sheets -David Sissitka -David Xia -David Young -Davide Ceretti -Dawn Chen -dcylabs -decadent -deed02392 -Deng Guangxing -Deni Bertovic -Denis Gladkikh -Denis Ollier -Dennis Docter -Derek -Derek -Derek Ch -Derek McGowan -Deric Crago -Deshi Xiao -devmeyster -Devvyn Murphy -Dharmit Shah -Dieter Reuter -Dima Stopel -Dimitri John Ledkov -Dimitry Andric -Dinesh Subhraveti -Diogo Monica -DiuDiugirl -Djibril Koné -dkumor -Dmitri Logvinenko -Dmitry Demeshchuk -Dmitry Gusev -Dmitry V. Krivenok -Dmitry Vorobev -Dolph Mathews -Dominik Finkbeiner -Dominik Honnef -Don Kirkby -Don Kjer -Don Spaulding -Donald Huang -Dong Chen -Donovan Jones -Doug Davis -Doug MacEachern -Doug Tangren -Dr Nic Williams -dragon788 -Dražen Lučanin -Dustin Sallings -Ed Costello -Edmund Wagner -Eiichi Tsukata -Eike Herzbach -Eivind Uggedal -Elan Ruusamäe -Elias Probst -Elijah Zupancic -eluck -Elvir Kuric -Emil Hernvall -Emily Maier -Emily Rose -Emir Ozer -Enguerran -Eohyung Lee -Eric Hanchrow -Eric Lee -Eric Myhre -Eric Paris -Eric Rafaloff -Eric Rosenberg -Eric Sage -Eric Windisch -Eric Yang -Eric-Olivier Lamey -Erik Bray -Erik Dubbelboer -Erik Hollensbe -Erik Inge Bolsø -Erik Kristensen -Erik Weathers -Erno Hopearuoho -Erwin van der Koogh -Euan -Eugene Yakubovich -eugenkrizo -evalle -Evan Allrich -Evan Carmi -Evan Hazlett -Evan Krall -Evan Phoenix -Evan Wies -Evgeny Vereshchagin -Ewa Czechowska -Eystein Måløy Stenberg -ezbercih -Fabiano Rosas -Fabio Falci -Fabio Rehm -Fabrizio Regini -Fabrizio Soppelsa -Faiz Khan -falmp -Fangyuan Gao <21551127@zju.edu.cn> -Fareed Dudhia -Fathi Boudra -Federico Gimenez -Felix Geisendörfer -Felix Hupfeld -Felix Rabe -Felix Schindler -Ferenc Szabo -Fernando -Fero Volar -Filipe Brandenburger -Filipe Oliveira -fl0yd -Flavio Castelli -FLGMwt -Florian -Florian Klein -Florian Maier -Florian Weingarten -Florin Asavoaie -Francesc Campoy -Francisco Carriedo -Francisco Souza -Frank Groeneveld -Frank Herrmann -Frank Macreery -Frank Rosquin -Fred Lifton -Frederick F. Kautz IV -Frederik Loeffert -Frederik Nordahl Jul Sabroe -Freek Kalter -fy2462 -Félix Baylac-Jacqué -Félix Cantournet -Gabe Rosenhouse -Gabor Nagy -Gabriel Monroy -GabrielNicolasAvellaneda -Galen Sampson -Gareth Rushgrove -Garrett Barboza -Gaurav -gautam, prasanna -GennadySpb -Geoffrey Bachelet -George MacRorie -George Xie -Georgi Hristozov -Gereon Frey -German DZ -Gert van Valkenhoef -Gianluca Borello -Gildas Cuisinier -gissehel -Giuseppe Mazzotta -Gleb Fotengauer-Malinovskiy -Gleb M Borisov -Glyn Normington -GoBella -Goffert van Gool -Gosuke Miyashita -Gou Rao -Govinda Fichtner -Grant Reaber -Graydon Hoare -Greg Fausak -Greg Thornton -grossws -grunny -gs11 -Guilhem Lettron -Guilherme Salgado -Guillaume Dufour -Guillaume J. Charmes -guoxiuyan -Gurjeet Singh -Guruprasad -gwx296173 -Günter Zöchbauer -Hans Kristian Flaatten -Hans Rødtang -Hao Shu Wei -Hao Zhang <21521210@zju.edu.cn> -Harald Albers -Harley Laue -Harold Cooper -Harry Zhang -He Simei -heartlock <21521209@zju.edu.cn> -Hector Castro -Henning Sprang -Hobofan -Hollie Teal -Hong Xu -hsinko <21551195@zju.edu.cn> -Hu Keping -Hu Tao -Huanzhong Zhang -Huayi Zhang -Hugo Duncan -Hugo Marisco <0x6875676f@gmail.com> -Hunter Blanks -huqun -Huu Nguyen -hyeongkyu.lee -hyp3rdino -Hyzhou <1187766782@qq.com> -Ian Babrou -Ian Bishop -Ian Bull -Ian Calvert -Ian Lee -Ian Main -Ian Truslove -Iavael -Icaro Seara -Igor Dolzhikov -Ilkka Laukkanen -Ilya Dmitrichenko -Ilya Gusev -ILYA Khlopotov -imre Fitos -inglesp -Ingo Gottwald -Isaac Dupree -Isabel Jimenez -Isao Jonas -Ivan Babrou -Ivan Fraixedes -Ivan Grcic -J Bruni -J. Nunn -Jack Danger Canty -Jacob Atzen -Jacob Edelman -Jake Champlin -Jake Moshenko -jakedt -James Allen -James Carey -James Carr -James DeFelice -James Harrison Fisher -James Kyburz -James Kyle -James Lal -James Mills -James Nugent -James Turnbull -Jamie Hannaford -Jamshid Afshar -Jan Keromnes -Jan Koprowski -Jan Pazdziora -Jan Toebes -Jan-Gerd Tenberge -Jan-Jaap Driessen -Jana Radhakrishnan -Januar Wayong -Jared Biel -Jared Hocutt -Jaroslaw Zabiello -jaseg -Jasmine Hegman -Jason Divock -Jason Giedymin -Jason Green -Jason Hall -Jason Heiss -Jason Livesay -Jason McVetta -Jason Plum -Jason Shepherd -Jason Smith -Jason Sommer -Jason Stangroome -jaxgeller -Jay -Jay -Jay Kamat -Jean-Baptiste Barth -Jean-Baptiste Dalido -Jean-Paul Calderone -Jean-Tiare Le Bigot -Jeff Anderson -Jeff Johnston -Jeff Lindsay -Jeff Mickey -Jeff Minard -Jeff Nickoloff -Jeff Welch -Jeffrey Bolle -Jeffrey Morgan -Jeffrey van Gogh -Jenny Gebske -Jeremy Grosser -Jeremy Price -Jeremy Qian -Jeremy Unruh -Jeroen Jacobs -Jesse Dearing -Jesse Dubay -Jessica Frazelle -Jezeniel Zapanta -jgeiger -Jhon Honce -Jian Zhang -jianbosun -Jilles Oldenbeuving -Jim Alateras -Jim Perrin -Jimmy Cuadra -Jimmy Puckett -jimmyxian -Jinsoo Park -Jiri Popelka -Jiří Župka -jjy -jmzwcn -Joe Beda -Joe Doliner -Joe Ferguson -Joe Gordon -Joe Shaw -Joe Van Dyk -Joel Friedly -Joel Handwell -Joel Hansson -Joel Wurtz -Joey Geiger -Joey Gibson -Joffrey F -Johan Euphrosine -Johan Rydberg -Johannes 'fish' Ziemke -John Costa -John Feminella -John Gardiner Myers -John Gossman -John Howard (VM) -John OBrien III -John Starks -John Tims -John Warwick -John Willis -Jon Wedaman -Jonas Pfenniger -Jonathan A. Sternberg -Jonathan Boulle -Jonathan Camp -Jonathan Dowland -Jonathan Lebon -Jonathan McCrohan -Jonathan Mueller -Jonathan Pares -Jonathan Rudenberg -Joost Cassee -Jordan -Jordan Arentsen -Jordan Sissel -Jose Diaz-Gonzalez -Joseph Anthony Pasquale Holsten -Joseph Hager -Joseph Kern -Josh -Josh Hawn -Josh Poimboeuf -Josiah Kiehl -José Tomás Albornoz -JP -jrabbit -Julian Taylor -Julien Barbier -Julien Bisconti -Julien Bordellier -Julien Dubois -Julien Pervillé -Julio Montes -Jun-Ru Chang -Jussi Nummelin -Justas Brazauskas -Justin Cormack -Justin Force -Justin Plock -Justin Simonelis -Justin Terry -Jyrki Puttonen -Jérôme Petazzoni -Jörg Thalheim -Kai Blin -Kai Qiang Wu(Kennan) -Kamil Domański -kamjar gerami -Kanstantsin Shautsou -Karan Lyons -Kareem Khazem -kargakis -Karl Grzeszczak -Karol Duleba -Katie McLaughlin -Kato Kazuyoshi -Katrina Owen -Kawsar Saiyeed -kayrus -Ke Xu -Keli Hu -Ken Cochrane -Ken ICHIKAWA -Kenfe-Mickael Laventure -Kenjiro Nakayama -Kent Johnson -Kevin "qwazerty" Houdebert -Kevin Clark -Kevin J. Lynagh -Kevin Menard -Kevin P. Kucharczyk -Kevin Shi -Kevin Wallace -Kevin Yap -kevinmeredith -Keyvan Fatehi -kies -Kim BKC Carlbacker -Kim Eik -Kimbro Staken -Kir Kolyshkin -Kiran Gangadharan -Kirill SIbirev -knappe -Kohei Tsuruta -Koichi Shiraishi -Konrad Kleine -Konstantin Pelykh -Krasimir Georgiev -Kristian Haugene -Kristina Zabunova -krrg -Kun Zhang -Kunal Kushwaha -Kyle Conroy -kyu -Lachlan Coote -Lai Jiangshan -Lajos Papp -Lakshan Perera -Lalatendu Mohanty -lalyos -Lance Chen -Lance Kinley -Lars Butler -Lars Kellogg-Stedman -Lars R. Damerow -Laszlo Meszaros -Laurent Erignoux -Laurie Voss -Leandro Siqueira -Lee, Meng-Han -leeplay -Lei Jitang -Len Weincier -Lennie -Leszek Kowalski -Levi Blackstone -Levi Gross -Lewis Marshall -Lewis Peckover -Liana Lo -Liang Mingqiang -Liang-Chi Hsieh -liaoqingwei -limsy -Lin Lu -LingFaKe -Linus Heckemann -Liran Tal -Liron Levin -Liu Bo -Liu Hua -LIZAO LI -Lloyd Dewolf -Lokesh Mandvekar -longliqiang88 <394564827@qq.com> -Lorenz Leutgeb -Lorenzo Fontana -Louis Opter -Luca Marturana -Luca Orlandi -Luca-Bogdan Grigorescu -Lucas Chan -Luis Martínez de Bartolomé Izquierdo -Lukas Waslowski -lukaspustina -Lukasz Zajaczkowski -lukemarsden -Lynda O'Leary -Lénaïc Huard -Ma Shimiao -Mabin -Madhav Puri -Madhu Venugopal -Mageee <21521230.zju.edu.cn> -Mahesh Tiyyagura -malnick -Malte Janduda -manchoz -Manfred Touron -Manfred Zabarauskas -mansinahar -Manuel Meurer -Manuel Woelker -mapk0y -Marc Abramowitz -Marc Kuo -Marc Tamsky -Marcelo Salazar -Marco Hennings -Marcus Farkas -Marcus Linke -Marcus Ramberg -Marek Goldmann -Marian Marinov -Marianna Tessel -Mario Loriedo -Marius Gundersen -Marius Sturm -Marius Voila -Mark Allen -Mark McGranaghan -Mark McKinstry -Mark West -Marko Mikulicic -Marko Tibold -Markus Fix -Martijn Dwars -Martijn van Oosterhout -Martin Honermeyer -Martin Kelly -Martin Mosegaard Amdisen -Martin Redmond -Mary Anthony -Masahito Zembutsu -Mason Malone -Mateusz Sulima -Mathias Monnerville -Mathieu Le Marec - Pasquet -Matt Apperson -Matt Bachmann -Matt Bentley -Matt Haggard -Matt McCormick -Matt Moore -Matt Robenolt -Matthew Heon -Matthew Mayer -Matthew Mueller -Matthew Riley -Matthias Klumpp -Matthias Kühnle -Matthias Rampke -Matthieu Hauglustaine -mattymo -mattyw -Mauricio Garavaglia -mauriyouth -Max Shytikov -Maxim Ivanov -Maxim Kulkin -Maxim Treskin -Maxime Petazzoni -Meaglith Ma -meejah -Megan Kostick -Mehul Kar -Mengdi Gao -Mert Yazıcıoğlu -Micah Zoltu -Michael A. Smith -Michael Bridgen -Michael Brown -Michael Chiang -Michael Crosby -Michael Currie -Michael Friis -Michael Gorsuch -Michael Grauer -Michael Holzheu -Michael Hudson-Doyle -Michael Huettermann -Michael Käufl -Michael Neale -Michael Prokop -Michael Scharf -Michael Stapelberg -Michael Steinert -Michael Thies -Michael West -Michal Fojtik -Michal Gebauer -Michal Jemala -Michal Minar -Michaël Pailloncy -Michał Czeraszkiewicz -Michiel@unhosted -Miguel Angel Fernández -Miguel Morales -Mihai Borobocea -Mihuleacc Sergiu -Mike Brown -Mike Chelen -Mike Danese -Mike Dillon -Mike Dougherty -Mike Gaffney -Mike Goelzer -Mike Leone -Mike MacCana -Mike Naberezny -Mike Snitzer -mikelinjie <294893458@qq.com> -Mikhail Sobolev -Miloslav Trmač -mingqing -Mingzhen Feng -Mitch Capper -mlarcher -Mohammad Banikazemi -Mohammed Aaqib Ansari -Mohit Soni -Morgan Bauer -Morgante Pell -Morgy93 -Morten Siebuhr -Morton Fox -Moysés Borges -mqliang -Mrunal Patel -msabansal -mschurenko -muge -Mustafa Akın -Muthukumar R -Máximo Cuadros -Médi-Rémi Hashim -Nahum Shalman -Nakul Pathak -Nalin Dahyabhai -Nan Monnand Deng -Naoki Orii -Natalie Parker -Natanael Copa -Nate Brennand -Nate Eagleson -Nate Jones -Nathan Hsieh -Nathan Kleyn -Nathan LeClaire -Nathan McCauley -Nathan Williams -Neal McBurnett -Nelson Chen -Nghia Tran -Niall O'Higgins -Nicholas E. Rabenau -Nick Irvine -Nick Parker -Nick Payne -Nick Stenning -Nick Stinemates -Nicolas Borboën -Nicolas De loof -Nicolas Dudebout -Nicolas Goy -Nicolas Kaiser -Nicolás Hock Isaza -Nigel Poulton -NikolaMandic -nikolas -Nirmal Mehta -Nishant Totla -NIWA Hideyuki -noducks -Nolan Darilek -nponeccop -Nuutti Kotivuori -nzwsch -O.S. Tezer -objectified -OddBloke -odk- -Oguz Bilgic -Oh Jinkyun -Ohad Schneider -Ole Reifschneider -Oliver Neal -Olivier Gambier -Olle Jonsson -Oriol Francès -Otto Kekäläinen -oyld -ozlerhakan -paetling -pandrew -panticz -Paolo G. Giarrusso -Pascal Borreli -Pascal Hartig -Patrick Devine -Patrick Hemmer -Patrick Stapleton -pattichen -Paul -paul -Paul Annesley -Paul Bellamy -Paul Bowsher -Paul Hammond -Paul Jimenez -Paul Lietar -Paul Liljenberg -Paul Morie -Paul Nasrat -Paul Weaver -Pavel Lobashov -Pavel Pospisil -Pavel Sutyrin -Pavel Tikhomirov -Pavlos Ratis -Peeyush Gupta -Peggy Li -Pei Su -Penghan Wang -perhapszzy@sina.com -Peter Bourgon -Peter Braden -Peter Choi -Peter Dave Hello -Peter Edge -Peter Ericson -Peter Esbensen -Peter Malmgren -Peter Salvatore -Peter Volpe -Peter Waller -Phil -Phil Estes -Phil Spitler -Philip Monroe -Philipp Wahala -Philipp Weissensteiner -Phillip Alexander -pidster -Piergiuliano Bossi -Pierre -Pierre Carrier -Pierre Wacrenier -Pierre-Alain RIVIERE -Piotr Bogdan -pixelistik -Porjo -Poul Kjeldager Sørensen -Pradeep Chhetri -Prasanna Gautam -Prayag Verma -Przemek Hejman -pysqz -qg <1373319223@qq.com> -qhuang -Qiang Huang -qq690388648 <690388648@qq.com> -Quentin Brossard -Quentin Perez -Quentin Tayssier -r0n22 -Rafal Jeczalik -Rafe Colton -Raghavendra K T -Raghuram Devarakonda -Rajat Pandit -Rajdeep Dua -Ralle -Ralph Bean -Ramkumar Ramachandra -Ramon van Alteren -Ray Tsang -ReadmeCritic -Recursive Madman -Regan McCooey -Remi Rampin -Renato Riccieri Santos Zannon -resouer -rgstephens -Rhys Hiltner -Rich Seymour -Richard -Richard Burnison -Richard Harvey -Richard Metzler -Richard Scothern -Richo Healey -Rick Bradley -Rick van de Loo -Rick Wieman -Rik Nijessen -Riku Voipio -Riley Guerin -Ritesh H Shukla -Riyaz Faizullabhoy -Rob Vesse -Robert Bachmann -Robert Bittle -Robert Obryk -Robert Stern -Robert Wallis -Roberto G. Hashioka -Robin Naundorf -Robin Schneider -Robin Speekenbrink -robpc -Rodolfo Carvalho -Rodrigo Vaz -Roel Van Nyen -Roger Peppe -Rohit Jnagal -Rohit Kadam -Roland Huß -Roland Kammerer -Roland Moriz -Roma Sokolov -Roman Strashkin -Ron Smits -root -root -root -root -Rory Hunter -Rory McCune -Ross Boucher -Rovanion Luckey -Rozhnov Alexandr -rsmoorthy -Rudolph Gottesheim -Rui Lopes -Ryan Anderson -Ryan Aslett -Ryan Belgrave -Ryan Detzel -Ryan Fowler -Ryan McLaughlin -Ryan O'Donnell -Ryan Seto -Ryan Thomas -Ryan Trauntvein -Ryan Wallner -RyanDeng -Rémy Greinhofer -s. rannou -s00318865 -Sabin Basyal -Sachin Joshi -Sagar Hani -Sainath Grandhi -Sally O'Malley -Sam Abed -Sam Alba -Sam Bailey -Sam J Sharpe -Sam Neirinck -Sam Reis -Sam Rijs -Sambuddha Basu -Sami Wagiaalla -Samuel Andaya -Samuel Dion-Girardeau -Samuel Karp -Samuel PHAN -Sankar சங்கர் -Sanket Saurav -Santhosh Manohar -sapphiredev -Satnam Singh -satoru -Satoshi Amemiya -scaleoutsean -Scott Bessler -Scott Collier -Scott Johnston -Scott Stamp -Scott Walls -sdreyesg -Sean Christopherson -Sean Cronin -Sean OMeara -Sean P. Kane -Sebastiaan van Steenis -Sebastiaan van Stijn -Senthil Kumar Selvaraj -Senthil Kumaran -SeongJae Park -Seongyeol Lim -Serge Hallyn -Sergey Alekseev -Sergey Evstifeev -Sevki Hasirci -Shane Canon -Shane da Silva -shaunol -Shawn Landden -Shawn Siefkas -Shekhar Gulati -Sheng Yang -Shengbo Song -Shih-Yuan Lee -Shijiang Wei -Shishir Mahajan -shuai-z -Shuwei Hao -Sian Lerk Lau -sidharthamani -Silas Sewell -Simei He -Simon Eskildsen -Simon Leinen -Simon Taranto -Sindhu S -Sjoerd Langkemper -Solganik Alexander -Solomon Hykes -Song Gao -Soshi Katsuta -Soulou -Spencer Brown -Spencer Smith -Sridatta Thatipamala -Sridhar Ratnakumar -Srini Brahmaroutu -srinsriv -Steeve Morin -Stefan Berger -Stefan J. Wernli -Stefan Praszalowicz -Stefan Scherer -Stefan Staudenmeyer -Stefan Weil -Stephen Crosby -Stephen Day -Stephen Rust -Steve Durrheimer -Steve Francia -Steve Koch -Steven Burgess -Steven Iveson -Steven Merrill -Steven Richards -Steven Taylor -Subhajit Ghosh -Sujith Haridasan -Suryakumar Sudar -Sven Dowideit -Swapnil Daingade -Sylvain Baubeau -Sylvain Bellemare -Sébastien -Sébastien Luttringer -Sébastien Stormacq -TAGOMORI Satoshi -tang0th -Tangi COLIN -Tatsuki Sugiura -Tatsushi Inagaki -Taylor Jones -tbonza -Ted M. Young -Tehmasp Chaudhri -Tejesh Mehta -terryding77 <550147740@qq.com> -tgic -Thatcher Peskens -theadactyl -Thell 'Bo' Fowler -Thermionix -Thijs Terlouw -Thomas Bikeev -Thomas Frössman -Thomas Gazagnaire -Thomas Grainger -Thomas Hansen -Thomas Leonard -Thomas LEVEIL -Thomas Orozco -Thomas Riccardi -Thomas Schroeter -Thomas Sjögren -Thomas Swift -Thomas Tanaka -Thomas Texier -Tianon Gravi -Tibor Vass -Tiffany Low -Tim Bosse -Tim Dettrick -Tim Düsterhus -Tim Hockin -Tim Ruffles -Tim Smith -Tim Terhorst -Tim Wang -Tim Waugh -Tim Wraight -Timothy Hobbs -tjwebb123 -tobe -Tobias Bieniek -Tobias Bradtke -Tobias Gesellchen -Tobias Klauser -Tobias Schmidt -Tobias Schwab -Todd Crane -Todd Lunter -Todd Whiteman -Toli Kuznets -Tom Barlow -Tom Denham -Tom Fotherby -Tom Howe -Tom Hulihan -Tom Maaswinkel -Tom X. Tobin -Tomas Tomecek -Tomasz Kopczynski -Tomasz Lipinski -Tomasz Nurkiewicz -Tommaso Visconti -Tomáš Hrčka -Tonis Tiigi -Tonny Xu -Tony Daws -Tony Miller -toogley -Torstein Husebø -tpng -tracylihui <793912329@qq.com> -Travis Cline -Travis Thieman -Trent Ogren -Trevor -Trevor Pounds -trishnaguha -Tristan Carel -Troy Denton -Tyler Brock -Tzu-Jung Lee -Tõnis Tiigi -Ulysse Carion -unknown -vagrant -Vaidas Jablonskis -Veres Lajos -vgeta -Victor Coisne -Victor Costan -Victor I. Wood -Victor Lyuboslavsky -Victor Marmol -Victor Palma -Victor Vieux -Victoria Bialas -Vijaya Kumar K -Viktor Stanchev -Viktor Vojnovski -VinayRaghavanKS -Vincent Batts -Vincent Bernat -Vincent Bernat -Vincent Demeester -Vincent Giersch -Vincent Mayers -Vincent Woo -Vinod Kulkarni -Vishal Doshi -Vishnu Kannan -Vitor Monteiro -Vivek Agarwal -Vivek Dasgupta -Vivek Goyal -Vladimir Bulyga -Vladimir Kirillov -Vladimir Rutsky -Vladimir Varankin -VladimirAus -Vojtech Vitek (V-Teq) -waitingkuo -Walter Leibbrandt -Walter Stanish -WANG Chao -Wang Xing -Ward Vandewege -WarheadsSE -Wayne Chang -Wei-Ting Kuo -weiyan -Weiyang Zhu -Wen Cheng Ma -Wendel Fleming -Wenxuan Zhao -Wenyu You <21551128@zju.edu.cn> -Wes Morgan -Will Dietz -Will Rouesnel -Will Weaver -willhf -William Delanoue -William Henry -William Hubbs -William Riancho -William Thurston -WiseTrem -wlan0 -Wolfgang Powisch -wonderflow -xamyzhao -XiaoBing Jiang -Xiaoxu Chen -xiekeyang -Xinzi Zhou -Xiuming Chen -xlgao-zju -xuzhaokui -Yahya -YAMADA Tsuyoshi -Yan Feng -Yang Bai -yangshukui -Yasunori Mahata -Yestin Sun -Yi EungJun -Yibai Zhang -Yihang Ho -Ying Li -Yohei Ueda -Yong Tang -Yongzhi Pan -yorkie -Youcef YEKHLEF -Yuan Sun -yuchangchun -yuchengxia -Yurii Rashkovskii -yuzou -Zac Dover -Zach Borboa -Zachary Jaffee -Zain Memon -Zaiste! -Zane DeGraffenried -Zefan Li -Zen Lin(Zhinan Lin) -Zhang Kun -Zhang Wei -Zhang Wentao -Zhenan Ye <21551168@zju.edu.cn> -Zhu Guihua -Zhuoyun Wei -Zilin Du -zimbatm -Ziming Dong -ZJUshuaizhou <21551191@zju.edu.cn> -zmarouf -Zoltan Tombol -zqh -Zuhayr Elahi -Zunayed Ali -Álex González -Álvaro Lázaro -Átila Camurça Alves -尹吉峰 -搏通 diff --git a/vendor/github.com/containers/storage/LICENSE b/vendor/github.com/containers/storage/LICENSE deleted file mode 100644 index 8f3fee627a..0000000000 --- a/vendor/github.com/containers/storage/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2013-2016 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/containers/storage/NOTICE b/vendor/github.com/containers/storage/NOTICE deleted file mode 100644 index 8a37c1c7bc..0000000000 --- a/vendor/github.com/containers/storage/NOTICE +++ /dev/null @@ -1,19 +0,0 @@ -Docker -Copyright 2012-2016 Docker, Inc. - -This product includes software developed at Docker, Inc. (https://www.docker.com). - -This product contains software (https://github.com/kr/pty) developed -by Keith Rarick, licensed under the MIT License. - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - -See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/containers/storage/containers.go b/vendor/github.com/containers/storage/containers.go deleted file mode 100644 index 5631e31c3e..0000000000 --- a/vendor/github.com/containers/storage/containers.go +++ /dev/null @@ -1,531 +0,0 @@ -package storage - -import ( - "encoding/json" - "io/ioutil" - "os" - "path/filepath" - "time" - - "github.com/containers/storage/pkg/ioutils" - "github.com/containers/storage/pkg/stringid" - "github.com/containers/storage/pkg/truncindex" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -// A Container is a reference to a read-write layer with metadata. -type Container struct { - // ID is either one which was specified at create-time, or a random - // value which was generated by the library. - ID string `json:"id"` - - // Names is an optional set of user-defined convenience values. The - // container can be referred to by its ID or any of its names. Names - // are unique among containers. - Names []string `json:"names,omitempty"` - - // ImageID is the ID of the image which was used to create the container. - ImageID string `json:"image"` - - // LayerID is the ID of the read-write layer for the container itself. - // It is assumed that the image's top layer is the parent of the container's - // read-write layer. - LayerID string `json:"layer"` - - // Metadata is data we keep for the convenience of the caller. It is not - // expected to be large, since it is kept in memory. - Metadata string `json:"metadata,omitempty"` - - // BigDataNames is a list of names of data items that we keep for the - // convenience of the caller. They can be large, and are only in - // memory when being read from or written to disk. - BigDataNames []string `json:"big-data-names,omitempty"` - - // BigDataSizes maps the names in BigDataNames to the sizes of the data - // that has been stored, if they're known. - BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"` - - // BigDataDigests maps the names in BigDataNames to the digests of the - // data that has been stored, if they're known. - BigDataDigests map[string]digest.Digest `json:"big-data-digests,omitempty"` - - // Created is the datestamp for when this container was created. Older - // versions of the library did not track this information, so callers - // will likely want to use the IsZero() method to verify that a value - // is set before using it. - Created time.Time `json:"created,omitempty"` - - Flags map[string]interface{} `json:"flags,omitempty"` -} - -// ContainerStore provides bookkeeping for information about Containers. -type ContainerStore interface { - FileBasedStore - MetadataStore - BigDataStore - FlaggableStore - - // Create creates a container that has a specified ID (or generates a - // random one if an empty value is supplied) and optional names, - // based on the specified image, using the specified layer as its - // read-write layer. - Create(id string, names []string, image, layer, metadata string) (*Container, error) - - // SetNames updates the list of names associated with the container - // with the specified ID. - SetNames(id string, names []string) error - - // Get retrieves information about a container given an ID or name. - Get(id string) (*Container, error) - - // Exists checks if there is a container with the given ID or name. - Exists(id string) bool - - // Delete removes the record of the container. - Delete(id string) error - - // Wipe removes records of all containers. - Wipe() error - - // Lookup attempts to translate a name to an ID. Most methods do this - // implicitly. - Lookup(name string) (string, error) - - // Containers returns a slice enumerating the known containers. - Containers() ([]Container, error) -} - -type containerStore struct { - lockfile Locker - dir string - containers []*Container - idindex *truncindex.TruncIndex - byid map[string]*Container - bylayer map[string]*Container - byname map[string]*Container -} - -func (r *containerStore) Containers() ([]Container, error) { - containers := make([]Container, len(r.containers)) - for i := range r.containers { - containers[i] = *(r.containers[i]) - } - return containers, nil -} - -func (r *containerStore) containerspath() string { - return filepath.Join(r.dir, "containers.json") -} - -func (r *containerStore) datadir(id string) string { - return filepath.Join(r.dir, id) -} - -func (r *containerStore) datapath(id, key string) string { - return filepath.Join(r.datadir(id), makeBigDataBaseName(key)) -} - -func (r *containerStore) Load() error { - needSave := false - rpath := r.containerspath() - data, err := ioutil.ReadFile(rpath) - if err != nil && !os.IsNotExist(err) { - return err - } - containers := []*Container{} - layers := make(map[string]*Container) - idlist := []string{} - ids := make(map[string]*Container) - names := make(map[string]*Container) - if err = json.Unmarshal(data, &containers); len(data) == 0 || err == nil { - idlist = make([]string, 0, len(containers)) - for n, container := range containers { - idlist = append(idlist, container.ID) - ids[container.ID] = containers[n] - layers[container.LayerID] = containers[n] - for _, name := range container.Names { - if conflict, ok := names[name]; ok { - r.removeName(conflict, name) - needSave = true - } - names[name] = containers[n] - } - } - } - r.containers = containers - r.idindex = truncindex.NewTruncIndex(idlist) - r.byid = ids - r.bylayer = layers - r.byname = names - if needSave { - return r.Save() - } - return nil -} - -func (r *containerStore) Save() error { - rpath := r.containerspath() - if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil { - return err - } - jdata, err := json.Marshal(&r.containers) - if err != nil { - return err - } - defer r.Touch() - return ioutils.AtomicWriteFile(rpath, jdata, 0600) -} - -func newContainerStore(dir string) (ContainerStore, error) { - if err := os.MkdirAll(dir, 0700); err != nil { - return nil, err - } - lockfile, err := GetLockfile(filepath.Join(dir, "containers.lock")) - if err != nil { - return nil, err - } - lockfile.Lock() - defer lockfile.Unlock() - cstore := containerStore{ - lockfile: lockfile, - dir: dir, - containers: []*Container{}, - byid: make(map[string]*Container), - bylayer: make(map[string]*Container), - byname: make(map[string]*Container), - } - if err := cstore.Load(); err != nil { - return nil, err - } - return &cstore, nil -} - -func (r *containerStore) lookup(id string) (*Container, bool) { - if container, ok := r.byid[id]; ok { - return container, ok - } else if container, ok := r.byname[id]; ok { - return container, ok - } else if container, ok := r.bylayer[id]; ok { - return container, ok - } else if longid, err := r.idindex.Get(id); err == nil { - if container, ok := r.byid[longid]; ok { - return container, ok - } - } - return nil, false -} - -func (r *containerStore) ClearFlag(id string, flag string) error { - container, ok := r.lookup(id) - if !ok { - return ErrContainerUnknown - } - delete(container.Flags, flag) - return r.Save() -} - -func (r *containerStore) SetFlag(id string, flag string, value interface{}) error { - container, ok := r.lookup(id) - if !ok { - return ErrContainerUnknown - } - if container.Flags == nil { - container.Flags = make(map[string]interface{}) - } - container.Flags[flag] = value - return r.Save() -} - -func (r *containerStore) Create(id string, names []string, image, layer, metadata string) (container *Container, err error) { - if id == "" { - id = stringid.GenerateRandomID() - _, idInUse := r.byid[id] - for idInUse { - id = stringid.GenerateRandomID() - _, idInUse = r.byid[id] - } - } - if _, idInUse := r.byid[id]; idInUse { - return nil, ErrDuplicateID - } - names = dedupeNames(names) - for _, name := range names { - if _, nameInUse := r.byname[name]; nameInUse { - return nil, ErrDuplicateName - } - } - if err == nil { - container = &Container{ - ID: id, - Names: names, - ImageID: image, - LayerID: layer, - Metadata: metadata, - BigDataNames: []string{}, - BigDataSizes: make(map[string]int64), - BigDataDigests: make(map[string]digest.Digest), - Created: time.Now().UTC(), - Flags: make(map[string]interface{}), - } - r.containers = append(r.containers, container) - r.byid[id] = container - r.idindex.Add(id) - r.bylayer[layer] = container - for _, name := range names { - r.byname[name] = container - } - err = r.Save() - } - return container, err -} - -func (r *containerStore) Metadata(id string) (string, error) { - if container, ok := r.lookup(id); ok { - return container.Metadata, nil - } - return "", ErrContainerUnknown -} - -func (r *containerStore) SetMetadata(id, metadata string) error { - if container, ok := r.lookup(id); ok { - container.Metadata = metadata - return r.Save() - } - return ErrContainerUnknown -} - -func (r *containerStore) removeName(container *Container, name string) { - container.Names = stringSliceWithoutValue(container.Names, name) -} - -func (r *containerStore) SetNames(id string, names []string) error { - names = dedupeNames(names) - if container, ok := r.lookup(id); ok { - for _, name := range container.Names { - delete(r.byname, name) - } - for _, name := range names { - if otherContainer, ok := r.byname[name]; ok { - r.removeName(otherContainer, name) - } - r.byname[name] = container - } - container.Names = names - return r.Save() - } - return ErrContainerUnknown -} - -func (r *containerStore) Delete(id string) error { - container, ok := r.lookup(id) - if !ok { - return ErrContainerUnknown - } - id = container.ID - toDeleteIndex := -1 - for i, candidate := range r.containers { - if candidate.ID == id { - toDeleteIndex = i - break - } - } - delete(r.byid, id) - r.idindex.Delete(id) - delete(r.bylayer, container.LayerID) - for _, name := range container.Names { - delete(r.byname, name) - } - if toDeleteIndex != -1 { - // delete the container at toDeleteIndex - if toDeleteIndex == len(r.containers)-1 { - r.containers = r.containers[:len(r.containers)-1] - } else { - r.containers = append(r.containers[:toDeleteIndex], r.containers[toDeleteIndex+1:]...) - } - } - if err := r.Save(); err != nil { - return err - } - if err := os.RemoveAll(r.datadir(id)); err != nil { - return err - } - return nil -} - -func (r *containerStore) Get(id string) (*Container, error) { - if container, ok := r.lookup(id); ok { - return container, nil - } - return nil, ErrContainerUnknown -} - -func (r *containerStore) Lookup(name string) (id string, err error) { - if container, ok := r.lookup(name); ok { - return container.ID, nil - } - return "", ErrContainerUnknown -} - -func (r *containerStore) Exists(id string) bool { - _, ok := r.lookup(id) - return ok -} - -func (r *containerStore) BigData(id, key string) ([]byte, error) { - if key == "" { - return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve container big data value for empty name") - } - c, ok := r.lookup(id) - if !ok { - return nil, ErrContainerUnknown - } - return ioutil.ReadFile(r.datapath(c.ID, key)) -} - -func (r *containerStore) BigDataSize(id, key string) (int64, error) { - if key == "" { - return -1, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve size of container big data with empty name") - } - c, ok := r.lookup(id) - if !ok { - return -1, ErrContainerUnknown - } - if c.BigDataSizes == nil { - c.BigDataSizes = make(map[string]int64) - } - if size, ok := c.BigDataSizes[key]; ok { - return size, nil - } - if data, err := r.BigData(id, key); err == nil && data != nil { - if r.SetBigData(id, key, data) == nil { - c, ok := r.lookup(id) - if !ok { - return -1, ErrContainerUnknown - } - if size, ok := c.BigDataSizes[key]; ok { - return size, nil - } - } - } - return -1, ErrSizeUnknown -} - -func (r *containerStore) BigDataDigest(id, key string) (digest.Digest, error) { - if key == "" { - return "", errors.Wrapf(ErrInvalidBigDataName, "can't retrieve digest of container big data value with empty name") - } - c, ok := r.lookup(id) - if !ok { - return "", ErrContainerUnknown - } - if c.BigDataDigests == nil { - c.BigDataDigests = make(map[string]digest.Digest) - } - if d, ok := c.BigDataDigests[key]; ok { - return d, nil - } - if data, err := r.BigData(id, key); err == nil && data != nil { - if r.SetBigData(id, key, data) == nil { - c, ok := r.lookup(id) - if !ok { - return "", ErrContainerUnknown - } - if d, ok := c.BigDataDigests[key]; ok { - return d, nil - } - } - } - return "", ErrDigestUnknown -} - -func (r *containerStore) BigDataNames(id string) ([]string, error) { - c, ok := r.lookup(id) - if !ok { - return nil, ErrContainerUnknown - } - return c.BigDataNames, nil -} - -func (r *containerStore) SetBigData(id, key string, data []byte) error { - if key == "" { - return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for container big data item") - } - c, ok := r.lookup(id) - if !ok { - return ErrContainerUnknown - } - if err := os.MkdirAll(r.datadir(c.ID), 0700); err != nil { - return err - } - err := ioutils.AtomicWriteFile(r.datapath(c.ID, key), data, 0600) - if err == nil { - save := false - if c.BigDataSizes == nil { - c.BigDataSizes = make(map[string]int64) - } - oldSize, sizeOk := c.BigDataSizes[key] - c.BigDataSizes[key] = int64(len(data)) - if c.BigDataDigests == nil { - c.BigDataDigests = make(map[string]digest.Digest) - } - oldDigest, digestOk := c.BigDataDigests[key] - newDigest := digest.Canonical.FromBytes(data) - c.BigDataDigests[key] = newDigest - if !sizeOk || oldSize != c.BigDataSizes[key] || !digestOk || oldDigest != newDigest { - save = true - } - addName := true - for _, name := range c.BigDataNames { - if name == key { - addName = false - break - } - } - if addName { - c.BigDataNames = append(c.BigDataNames, key) - save = true - } - if save { - err = r.Save() - } - } - return err -} - -func (r *containerStore) Wipe() error { - ids := make([]string, 0, len(r.byid)) - for id := range r.byid { - ids = append(ids, id) - } - for _, id := range ids { - if err := r.Delete(id); err != nil { - return err - } - } - return nil -} - -func (r *containerStore) Lock() { - r.lockfile.Lock() -} - -func (r *containerStore) Unlock() { - r.lockfile.Unlock() -} - -func (r *containerStore) Touch() error { - return r.lockfile.Touch() -} - -func (r *containerStore) Modified() (bool, error) { - return r.lockfile.Modified() -} - -func (r *containerStore) IsReadWrite() bool { - return r.lockfile.IsReadWrite() -} - -func (r *containerStore) TouchedSince(when time.Time) bool { - return r.lockfile.TouchedSince(when) -} diff --git a/vendor/github.com/containers/storage/containers_ffjson.go b/vendor/github.com/containers/storage/containers_ffjson.go deleted file mode 100644 index 9526198061..0000000000 --- a/vendor/github.com/containers/storage/containers_ffjson.go +++ /dev/null @@ -1,1194 +0,0 @@ -// Code generated by ffjson . DO NOT EDIT. -// source: containers.go - -package storage - -import ( - "bytes" - "encoding/json" - "fmt" - "github.com/opencontainers/go-digest" - fflib "github.com/pquerna/ffjson/fflib/v1" -) - -// MarshalJSON marshal bytes to json - template -func (j *Container) MarshalJSON() ([]byte, error) { - var buf fflib.Buffer - if j == nil { - buf.WriteString("null") - return buf.Bytes(), nil - } - err := j.MarshalJSONBuf(&buf) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// MarshalJSONBuf marshal buff to json - template -func (j *Container) MarshalJSONBuf(buf fflib.EncodingBuffer) error { - if j == nil { - buf.WriteString("null") - return nil - } - var err error - var obj []byte - _ = obj - _ = err - buf.WriteString(`{ "id":`) - fflib.WriteJsonString(buf, string(j.ID)) - buf.WriteByte(',') - if len(j.Names) != 0 { - buf.WriteString(`"names":`) - if j.Names != nil { - buf.WriteString(`[`) - for i, v := range j.Names { - if i != 0 { - buf.WriteString(`,`) - } - fflib.WriteJsonString(buf, string(v)) - } - buf.WriteString(`]`) - } else { - buf.WriteString(`null`) - } - buf.WriteByte(',') - } - buf.WriteString(`"image":`) - fflib.WriteJsonString(buf, string(j.ImageID)) - buf.WriteString(`,"layer":`) - fflib.WriteJsonString(buf, string(j.LayerID)) - buf.WriteByte(',') - if len(j.Metadata) != 0 { - buf.WriteString(`"metadata":`) - fflib.WriteJsonString(buf, string(j.Metadata)) - buf.WriteByte(',') - } - if len(j.BigDataNames) != 0 { - buf.WriteString(`"big-data-names":`) - if j.BigDataNames != nil { - buf.WriteString(`[`) - for i, v := range j.BigDataNames { - if i != 0 { - buf.WriteString(`,`) - } - fflib.WriteJsonString(buf, string(v)) - } - buf.WriteString(`]`) - } else { - buf.WriteString(`null`) - } - buf.WriteByte(',') - } - if len(j.BigDataSizes) != 0 { - if j.BigDataSizes == nil { - buf.WriteString(`"big-data-sizes":null`) - } else { - buf.WriteString(`"big-data-sizes":{ `) - for key, value := range j.BigDataSizes { - fflib.WriteJsonString(buf, key) - buf.WriteString(`:`) - fflib.FormatBits2(buf, uint64(value), 10, value < 0) - buf.WriteByte(',') - } - buf.Rewind(1) - buf.WriteByte('}') - } - buf.WriteByte(',') - } - if len(j.BigDataDigests) != 0 { - if j.BigDataDigests == nil { - buf.WriteString(`"big-data-digests":null`) - } else { - buf.WriteString(`"big-data-digests":{ `) - for key, value := range j.BigDataDigests { - fflib.WriteJsonString(buf, key) - buf.WriteString(`:`) - fflib.WriteJsonString(buf, string(value)) - buf.WriteByte(',') - } - buf.Rewind(1) - buf.WriteByte('}') - } - buf.WriteByte(',') - } - if true { - buf.WriteString(`"created":`) - - { - - obj, err = j.Created.MarshalJSON() - if err != nil { - return err - } - buf.Write(obj) - - } - buf.WriteByte(',') - } - if len(j.Flags) != 0 { - buf.WriteString(`"flags":`) - /* Falling back. type=map[string]interface {} kind=map */ - err = buf.Encode(j.Flags) - if err != nil { - return err - } - buf.WriteByte(',') - } - buf.Rewind(1) - buf.WriteByte('}') - return nil -} - -const ( - ffjtContainerbase = iota - ffjtContainernosuchkey - - ffjtContainerID - - ffjtContainerNames - - ffjtContainerImageID - - ffjtContainerLayerID - - ffjtContainerMetadata - - ffjtContainerBigDataNames - - ffjtContainerBigDataSizes - - ffjtContainerBigDataDigests - - ffjtContainerCreated - - ffjtContainerFlags -) - -var ffjKeyContainerID = []byte("id") - -var ffjKeyContainerNames = []byte("names") - -var ffjKeyContainerImageID = []byte("image") - -var ffjKeyContainerLayerID = []byte("layer") - -var ffjKeyContainerMetadata = []byte("metadata") - -var ffjKeyContainerBigDataNames = []byte("big-data-names") - -var ffjKeyContainerBigDataSizes = []byte("big-data-sizes") - -var ffjKeyContainerBigDataDigests = []byte("big-data-digests") - -var ffjKeyContainerCreated = []byte("created") - -var ffjKeyContainerFlags = []byte("flags") - -// UnmarshalJSON umarshall json - template of ffjson -func (j *Container) UnmarshalJSON(input []byte) error { - fs := fflib.NewFFLexer(input) - return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) -} - -// UnmarshalJSONFFLexer fast json unmarshall - template ffjson -func (j *Container) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { - var err error - currentKey := ffjtContainerbase - _ = currentKey - tok := fflib.FFTok_init - wantedTok := fflib.FFTok_init - -mainparse: - for { - tok = fs.Scan() - // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) - if tok == fflib.FFTok_error { - goto tokerror - } - - switch state { - - case fflib.FFParse_map_start: - if tok != fflib.FFTok_left_bracket { - wantedTok = fflib.FFTok_left_bracket - goto wrongtokenerror - } - state = fflib.FFParse_want_key - continue - - case fflib.FFParse_after_value: - if tok == fflib.FFTok_comma { - state = fflib.FFParse_want_key - } else if tok == fflib.FFTok_right_bracket { - goto done - } else { - wantedTok = fflib.FFTok_comma - goto wrongtokenerror - } - - case fflib.FFParse_want_key: - // json {} ended. goto exit. woo. - if tok == fflib.FFTok_right_bracket { - goto done - } - if tok != fflib.FFTok_string { - wantedTok = fflib.FFTok_string - goto wrongtokenerror - } - - kn := fs.Output.Bytes() - if len(kn) <= 0 { - // "" case. hrm. - currentKey = ffjtContainernosuchkey - state = fflib.FFParse_want_colon - goto mainparse - } else { - switch kn[0] { - - case 'b': - - if bytes.Equal(ffjKeyContainerBigDataNames, kn) { - currentKey = ffjtContainerBigDataNames - state = fflib.FFParse_want_colon - goto mainparse - - } else if bytes.Equal(ffjKeyContainerBigDataSizes, kn) { - currentKey = ffjtContainerBigDataSizes - state = fflib.FFParse_want_colon - goto mainparse - - } else if bytes.Equal(ffjKeyContainerBigDataDigests, kn) { - currentKey = ffjtContainerBigDataDigests - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'c': - - if bytes.Equal(ffjKeyContainerCreated, kn) { - currentKey = ffjtContainerCreated - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'f': - - if bytes.Equal(ffjKeyContainerFlags, kn) { - currentKey = ffjtContainerFlags - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'i': - - if bytes.Equal(ffjKeyContainerID, kn) { - currentKey = ffjtContainerID - state = fflib.FFParse_want_colon - goto mainparse - - } else if bytes.Equal(ffjKeyContainerImageID, kn) { - currentKey = ffjtContainerImageID - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'l': - - if bytes.Equal(ffjKeyContainerLayerID, kn) { - currentKey = ffjtContainerLayerID - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'm': - - if bytes.Equal(ffjKeyContainerMetadata, kn) { - currentKey = ffjtContainerMetadata - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'n': - - if bytes.Equal(ffjKeyContainerNames, kn) { - currentKey = ffjtContainerNames - state = fflib.FFParse_want_colon - goto mainparse - } - - } - - if fflib.EqualFoldRight(ffjKeyContainerFlags, kn) { - currentKey = ffjtContainerFlags - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.SimpleLetterEqualFold(ffjKeyContainerCreated, kn) { - currentKey = ffjtContainerCreated - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.EqualFoldRight(ffjKeyContainerBigDataDigests, kn) { - currentKey = ffjtContainerBigDataDigests - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.EqualFoldRight(ffjKeyContainerBigDataSizes, kn) { - currentKey = ffjtContainerBigDataSizes - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.EqualFoldRight(ffjKeyContainerBigDataNames, kn) { - currentKey = ffjtContainerBigDataNames - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.SimpleLetterEqualFold(ffjKeyContainerMetadata, kn) { - currentKey = ffjtContainerMetadata - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.SimpleLetterEqualFold(ffjKeyContainerLayerID, kn) { - currentKey = ffjtContainerLayerID - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.SimpleLetterEqualFold(ffjKeyContainerImageID, kn) { - currentKey = ffjtContainerImageID - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.EqualFoldRight(ffjKeyContainerNames, kn) { - currentKey = ffjtContainerNames - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.SimpleLetterEqualFold(ffjKeyContainerID, kn) { - currentKey = ffjtContainerID - state = fflib.FFParse_want_colon - goto mainparse - } - - currentKey = ffjtContainernosuchkey - state = fflib.FFParse_want_colon - goto mainparse - } - - case fflib.FFParse_want_colon: - if tok != fflib.FFTok_colon { - wantedTok = fflib.FFTok_colon - goto wrongtokenerror - } - state = fflib.FFParse_want_value - continue - case fflib.FFParse_want_value: - - if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { - switch currentKey { - - case ffjtContainerID: - goto handle_ID - - case ffjtContainerNames: - goto handle_Names - - case ffjtContainerImageID: - goto handle_ImageID - - case ffjtContainerLayerID: - goto handle_LayerID - - case ffjtContainerMetadata: - goto handle_Metadata - - case ffjtContainerBigDataNames: - goto handle_BigDataNames - - case ffjtContainerBigDataSizes: - goto handle_BigDataSizes - - case ffjtContainerBigDataDigests: - goto handle_BigDataDigests - - case ffjtContainerCreated: - goto handle_Created - - case ffjtContainerFlags: - goto handle_Flags - - case ffjtContainernosuchkey: - err = fs.SkipField(tok) - if err != nil { - return fs.WrapErr(err) - } - state = fflib.FFParse_after_value - goto mainparse - } - } else { - goto wantedvalue - } - } - } - -handle_ID: - - /* handler: j.ID type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - j.ID = string(string(outBuf)) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_Names: - - /* handler: j.Names type=[]string kind=slice quoted=false*/ - - { - - { - if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) - } - } - - if tok == fflib.FFTok_null { - j.Names = nil - } else { - - j.Names = []string{} - - wantVal := true - - for { - - var tmpJNames string - - tok = fs.Scan() - if tok == fflib.FFTok_error { - goto tokerror - } - if tok == fflib.FFTok_right_brace { - break - } - - if tok == fflib.FFTok_comma { - if wantVal == true { - // TODO(pquerna): this isn't an ideal error message, this handles - // things like [,,,] as an array value. - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) - } - continue - } else { - wantVal = true - } - - /* handler: tmpJNames type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - tmpJNames = string(string(outBuf)) - - } - } - - j.Names = append(j.Names, tmpJNames) - - wantVal = false - } - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_ImageID: - - /* handler: j.ImageID type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - j.ImageID = string(string(outBuf)) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_LayerID: - - /* handler: j.LayerID type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - j.LayerID = string(string(outBuf)) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_Metadata: - - /* handler: j.Metadata type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - j.Metadata = string(string(outBuf)) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_BigDataNames: - - /* handler: j.BigDataNames type=[]string kind=slice quoted=false*/ - - { - - { - if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) - } - } - - if tok == fflib.FFTok_null { - j.BigDataNames = nil - } else { - - j.BigDataNames = []string{} - - wantVal := true - - for { - - var tmpJBigDataNames string - - tok = fs.Scan() - if tok == fflib.FFTok_error { - goto tokerror - } - if tok == fflib.FFTok_right_brace { - break - } - - if tok == fflib.FFTok_comma { - if wantVal == true { - // TODO(pquerna): this isn't an ideal error message, this handles - // things like [,,,] as an array value. - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) - } - continue - } else { - wantVal = true - } - - /* handler: tmpJBigDataNames type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - tmpJBigDataNames = string(string(outBuf)) - - } - } - - j.BigDataNames = append(j.BigDataNames, tmpJBigDataNames) - - wantVal = false - } - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_BigDataSizes: - - /* handler: j.BigDataSizes type=map[string]int64 kind=map quoted=false*/ - - { - - { - if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) - } - } - - if tok == fflib.FFTok_null { - j.BigDataSizes = nil - } else { - - j.BigDataSizes = make(map[string]int64, 0) - - wantVal := true - - for { - - var k string - - var tmpJBigDataSizes int64 - - tok = fs.Scan() - if tok == fflib.FFTok_error { - goto tokerror - } - if tok == fflib.FFTok_right_bracket { - break - } - - if tok == fflib.FFTok_comma { - if wantVal == true { - // TODO(pquerna): this isn't an ideal error message, this handles - // things like [,,,] as an array value. - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) - } - continue - } else { - wantVal = true - } - - /* handler: k type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - k = string(string(outBuf)) - - } - } - - // Expect ':' after key - tok = fs.Scan() - if tok != fflib.FFTok_colon { - return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) - } - - tok = fs.Scan() - /* handler: tmpJBigDataSizes type=int64 kind=int64 quoted=false*/ - - { - if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) - } - } - - { - - if tok == fflib.FFTok_null { - - } else { - - tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) - - if err != nil { - return fs.WrapErr(err) - } - - tmpJBigDataSizes = int64(tval) - - } - } - - j.BigDataSizes[k] = tmpJBigDataSizes - - wantVal = false - } - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_BigDataDigests: - - /* handler: j.BigDataDigests type=map[string]digest.Digest kind=map quoted=false*/ - - { - - { - if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) - } - } - - if tok == fflib.FFTok_null { - j.BigDataDigests = nil - } else { - - j.BigDataDigests = make(map[string]digest.Digest, 0) - - wantVal := true - - for { - - var k string - - var tmpJBigDataDigests digest.Digest - - tok = fs.Scan() - if tok == fflib.FFTok_error { - goto tokerror - } - if tok == fflib.FFTok_right_bracket { - break - } - - if tok == fflib.FFTok_comma { - if wantVal == true { - // TODO(pquerna): this isn't an ideal error message, this handles - // things like [,,,] as an array value. - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) - } - continue - } else { - wantVal = true - } - - /* handler: k type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - k = string(string(outBuf)) - - } - } - - // Expect ':' after key - tok = fs.Scan() - if tok != fflib.FFTok_colon { - return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) - } - - tok = fs.Scan() - /* handler: tmpJBigDataDigests type=digest.Digest kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - tmpJBigDataDigests = digest.Digest(string(outBuf)) - - } - } - - j.BigDataDigests[k] = tmpJBigDataDigests - - wantVal = false - } - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_Created: - - /* handler: j.Created type=time.Time kind=struct quoted=false*/ - - { - if tok == fflib.FFTok_null { - - } else { - - tbuf, err := fs.CaptureField(tok) - if err != nil { - return fs.WrapErr(err) - } - - err = j.Created.UnmarshalJSON(tbuf) - if err != nil { - return fs.WrapErr(err) - } - } - state = fflib.FFParse_after_value - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_Flags: - - /* handler: j.Flags type=map[string]interface {} kind=map quoted=false*/ - - { - - { - if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) - } - } - - if tok == fflib.FFTok_null { - j.Flags = nil - } else { - - j.Flags = make(map[string]interface{}, 0) - - wantVal := true - - for { - - var k string - - var tmpJFlags interface{} - - tok = fs.Scan() - if tok == fflib.FFTok_error { - goto tokerror - } - if tok == fflib.FFTok_right_bracket { - break - } - - if tok == fflib.FFTok_comma { - if wantVal == true { - // TODO(pquerna): this isn't an ideal error message, this handles - // things like [,,,] as an array value. - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) - } - continue - } else { - wantVal = true - } - - /* handler: k type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - k = string(string(outBuf)) - - } - } - - // Expect ':' after key - tok = fs.Scan() - if tok != fflib.FFTok_colon { - return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) - } - - tok = fs.Scan() - /* handler: tmpJFlags type=interface {} kind=interface quoted=false*/ - - { - /* Falling back. type=interface {} kind=interface */ - tbuf, err := fs.CaptureField(tok) - if err != nil { - return fs.WrapErr(err) - } - - err = json.Unmarshal(tbuf, &tmpJFlags) - if err != nil { - return fs.WrapErr(err) - } - } - - j.Flags[k] = tmpJFlags - - wantVal = false - } - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -wantedvalue: - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) -wrongtokenerror: - return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) -tokerror: - if fs.BigError != nil { - return fs.WrapErr(fs.BigError) - } - err = fs.Error.ToError() - if err != nil { - return fs.WrapErr(err) - } - panic("ffjson-generated: unreachable, please report bug.") -done: - - return nil -} - -// MarshalJSON marshal bytes to json - template -func (j *containerStore) MarshalJSON() ([]byte, error) { - var buf fflib.Buffer - if j == nil { - buf.WriteString("null") - return buf.Bytes(), nil - } - err := j.MarshalJSONBuf(&buf) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// MarshalJSONBuf marshal buff to json - template -func (j *containerStore) MarshalJSONBuf(buf fflib.EncodingBuffer) error { - if j == nil { - buf.WriteString("null") - return nil - } - var err error - var obj []byte - _ = obj - _ = err - buf.WriteString(`{}`) - return nil -} - -const ( - ffjtcontainerStorebase = iota - ffjtcontainerStorenosuchkey -) - -// UnmarshalJSON umarshall json - template of ffjson -func (j *containerStore) UnmarshalJSON(input []byte) error { - fs := fflib.NewFFLexer(input) - return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) -} - -// UnmarshalJSONFFLexer fast json unmarshall - template ffjson -func (j *containerStore) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { - var err error - currentKey := ffjtcontainerStorebase - _ = currentKey - tok := fflib.FFTok_init - wantedTok := fflib.FFTok_init - -mainparse: - for { - tok = fs.Scan() - // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) - if tok == fflib.FFTok_error { - goto tokerror - } - - switch state { - - case fflib.FFParse_map_start: - if tok != fflib.FFTok_left_bracket { - wantedTok = fflib.FFTok_left_bracket - goto wrongtokenerror - } - state = fflib.FFParse_want_key - continue - - case fflib.FFParse_after_value: - if tok == fflib.FFTok_comma { - state = fflib.FFParse_want_key - } else if tok == fflib.FFTok_right_bracket { - goto done - } else { - wantedTok = fflib.FFTok_comma - goto wrongtokenerror - } - - case fflib.FFParse_want_key: - // json {} ended. goto exit. woo. - if tok == fflib.FFTok_right_bracket { - goto done - } - if tok != fflib.FFTok_string { - wantedTok = fflib.FFTok_string - goto wrongtokenerror - } - - kn := fs.Output.Bytes() - if len(kn) <= 0 { - // "" case. hrm. - currentKey = ffjtcontainerStorenosuchkey - state = fflib.FFParse_want_colon - goto mainparse - } else { - switch kn[0] { - - } - - currentKey = ffjtcontainerStorenosuchkey - state = fflib.FFParse_want_colon - goto mainparse - } - - case fflib.FFParse_want_colon: - if tok != fflib.FFTok_colon { - wantedTok = fflib.FFTok_colon - goto wrongtokenerror - } - state = fflib.FFParse_want_value - continue - case fflib.FFParse_want_value: - - if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { - switch currentKey { - - case ffjtcontainerStorenosuchkey: - err = fs.SkipField(tok) - if err != nil { - return fs.WrapErr(err) - } - state = fflib.FFParse_after_value - goto mainparse - } - } else { - goto wantedvalue - } - } - } - -wantedvalue: - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) -wrongtokenerror: - return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) -tokerror: - if fs.BigError != nil { - return fs.WrapErr(fs.BigError) - } - err = fs.Error.ToError() - if err != nil { - return fs.WrapErr(err) - } - panic("ffjson-generated: unreachable, please report bug.") -done: - - return nil -} diff --git a/vendor/github.com/containers/storage/drivers/aufs/aufs.go b/vendor/github.com/containers/storage/drivers/aufs/aufs.go deleted file mode 100644 index c1cfabee99..0000000000 --- a/vendor/github.com/containers/storage/drivers/aufs/aufs.go +++ /dev/null @@ -1,691 +0,0 @@ -// +build linux - -/* - -aufs driver directory structure - - . - ├── layers // Metadata of layers - │ ├── 1 - │ ├── 2 - │ └── 3 - ├── diff // Content of the layer - │ ├── 1 // Contains layers that need to be mounted for the id - │ ├── 2 - │ └── 3 - └── mnt // Mount points for the rw layers to be mounted - ├── 1 - ├── 2 - └── 3 - -*/ - -package aufs - -import ( - "bufio" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path" - "path/filepath" - "strings" - "sync" - "time" - - "github.com/containers/storage/drivers" - "github.com/containers/storage/pkg/archive" - "github.com/containers/storage/pkg/chrootarchive" - "github.com/containers/storage/pkg/directory" - "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/locker" - mountpk "github.com/containers/storage/pkg/mount" - "github.com/containers/storage/pkg/system" - rsystem "github.com/opencontainers/runc/libcontainer/system" - "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/vbatts/tar-split/tar/storage" - "golang.org/x/sys/unix" -) - -var ( - // ErrAufsNotSupported is returned if aufs is not supported by the host. - ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems") - // ErrAufsNested means aufs cannot be used bc we are in a user namespace - ErrAufsNested = fmt.Errorf("AUFS cannot be used in non-init user namespace") - backingFs = "" - - enableDirpermLock sync.Once - enableDirperm bool -) - -func init() { - graphdriver.Register("aufs", Init) -} - -// Driver contains information about the filesystem mounted. -type Driver struct { - sync.Mutex - root string - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap - ctr *graphdriver.RefCounter - pathCacheLock sync.Mutex - pathCache map[string]string - naiveDiff graphdriver.DiffDriver - locker *locker.Locker -} - -// Init returns a new AUFS driver. -// An error is returned if AUFS is not supported. -func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { - - // Try to load the aufs kernel module - if err := supportsAufs(); err != nil { - return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel does not support aufs") - - } - - fsMagic, err := graphdriver.GetFSMagic(root) - if err != nil { - return nil, err - } - if fsName, ok := graphdriver.FsNames[fsMagic]; ok { - backingFs = fsName - } - - switch fsMagic { - case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicEcryptfs: - logrus.Errorf("AUFS is not supported over %s", backingFs) - return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "AUFS is not supported over %q", backingFs) - } - - paths := []string{ - "mnt", - "diff", - "layers", - } - - a := &Driver{ - root: root, - uidMaps: uidMaps, - gidMaps: gidMaps, - pathCache: make(map[string]string), - ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)), - locker: locker.New(), - } - - rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) - if err != nil { - return nil, err - } - // Create the root aufs driver dir and return - // if it already exists - // If not populate the dir structure - if err := idtools.MkdirAllAs(root, 0700, rootUID, rootGID); err != nil { - if os.IsExist(err) { - return a, nil - } - return nil, err - } - - if err := mountpk.MakePrivate(root); err != nil { - return nil, err - } - - // Populate the dir structure - for _, p := range paths { - if err := idtools.MkdirAllAs(path.Join(root, p), 0700, rootUID, rootGID); err != nil { - return nil, err - } - } - logger := logrus.WithFields(logrus.Fields{ - "module": "graphdriver", - "driver": "aufs", - }) - - for _, path := range []string{"mnt", "diff"} { - p := filepath.Join(root, path) - entries, err := ioutil.ReadDir(p) - if err != nil { - logger.WithError(err).WithField("dir", p).Error("error reading dir entries") - continue - } - for _, entry := range entries { - if !entry.IsDir() { - continue - } - if strings.HasSuffix(entry.Name(), "-removing") { - logger.WithField("dir", entry.Name()).Debug("Cleaning up stale layer dir") - if err := system.EnsureRemoveAll(filepath.Join(p, entry.Name())); err != nil { - logger.WithField("dir", entry.Name()).WithError(err).Error("Error removing stale layer dir") - } - } - } - } - - a.naiveDiff = graphdriver.NewNaiveDiffDriver(a, uidMaps, gidMaps) - return a, nil -} - -// Return a nil error if the kernel supports aufs -// We cannot modprobe because inside dind modprobe fails -// to run -func supportsAufs() error { - // We can try to modprobe aufs first before looking at - // proc/filesystems for when aufs is supported - exec.Command("modprobe", "aufs").Run() - - if rsystem.RunningInUserNS() { - return ErrAufsNested - } - - f, err := os.Open("/proc/filesystems") - if err != nil { - return err - } - defer f.Close() - - s := bufio.NewScanner(f) - for s.Scan() { - if strings.Contains(s.Text(), "aufs") { - return nil - } - } - return ErrAufsNotSupported -} - -func (a *Driver) rootPath() string { - return a.root -} - -func (*Driver) String() string { - return "aufs" -} - -// Status returns current information about the filesystem such as root directory, number of directories mounted, etc. -func (a *Driver) Status() [][2]string { - ids, _ := loadIds(path.Join(a.rootPath(), "layers")) - return [][2]string{ - {"Root Dir", a.rootPath()}, - {"Backing Filesystem", backingFs}, - {"Dirs", fmt.Sprintf("%d", len(ids))}, - {"Dirperm1 Supported", fmt.Sprintf("%v", useDirperm())}, - } -} - -// Metadata not implemented -func (a *Driver) Metadata(id string) (map[string]string, error) { - return nil, nil -} - -// Exists returns true if the given id is registered with -// this driver -func (a *Driver) Exists(id string) bool { - if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil { - return false - } - return true -} - -// AdditionalImageStores returns additional image stores supported by the driver -func (a *Driver) AdditionalImageStores() []string { - return nil -} - -// CreateReadWrite creates a layer that is writable for use as a container -// file system. -func (a *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { - return a.Create(id, parent, opts) -} - -// Create three folders for each id -// mnt, layers, and diff -func (a *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { - - if opts != nil && len(opts.StorageOpt) != 0 { - return fmt.Errorf("--storage-opt is not supported for aufs") - } - - if err := a.createDirsFor(id); err != nil { - return err - } - // Write the layers metadata - f, err := os.Create(path.Join(a.rootPath(), "layers", id)) - if err != nil { - return err - } - defer f.Close() - - if parent != "" { - ids, err := getParentIDs(a.rootPath(), parent) - if err != nil { - return err - } - - if _, err := fmt.Fprintln(f, parent); err != nil { - return err - } - for _, i := range ids { - if _, err := fmt.Fprintln(f, i); err != nil { - return err - } - } - } - - return nil -} - -// createDirsFor creates two directories for the given id. -// mnt and diff -func (a *Driver) createDirsFor(id string) error { - paths := []string{ - "mnt", - "diff", - } - - rootUID, rootGID, err := idtools.GetRootUIDGID(a.uidMaps, a.gidMaps) - if err != nil { - return err - } - // Directory permission is 0755. - // The path of directories are /mnt/ - // and /diff/ - for _, p := range paths { - if err := idtools.MkdirAllAs(path.Join(a.rootPath(), p, id), 0755, rootUID, rootGID); err != nil { - return err - } - } - return nil -} - -// Remove will unmount and remove the given id. -func (a *Driver) Remove(id string) error { - a.locker.Lock(id) - defer a.locker.Unlock(id) - a.pathCacheLock.Lock() - mountpoint, exists := a.pathCache[id] - a.pathCacheLock.Unlock() - if !exists { - mountpoint = a.getMountpoint(id) - } - - logger := logrus.WithFields(logrus.Fields{ - "module": "graphdriver", - "driver": "aufs", - "layer": id, - }) - - var retries int - for { - mounted, err := a.mounted(mountpoint) - if err != nil { - if os.IsNotExist(err) { - break - } - return err - } - if !mounted { - break - } - - err = a.unmount(mountpoint) - if err == nil { - break - } - - if err != unix.EBUSY { - return errors.Wrapf(err, "aufs: unmount error: %s", mountpoint) - } - if retries >= 5 { - return errors.Wrapf(err, "aufs: unmount error after retries: %s", mountpoint) - } - // If unmount returns EBUSY, it could be a transient error. Sleep and retry. - retries++ - logger.Warnf("unmount failed due to EBUSY: retry count: %d", retries) - time.Sleep(100 * time.Millisecond) - } - - // Remove the layers file for the id - if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { - return errors.Wrapf(err, "error removing layers dir for %s", id) - } - - if err := atomicRemove(a.getDiffPath(id)); err != nil { - return errors.Wrapf(err, "could not remove diff path for id %s", id) - } - - // Atomically remove each directory in turn by first moving it out of the - // way (so that container runtime doesn't find it anymore) before doing removal of - // the whole tree. - if err := atomicRemove(mountpoint); err != nil { - if errors.Cause(err) == unix.EBUSY { - logger.WithField("dir", mountpoint).WithError(err).Warn("error performing atomic remove due to EBUSY") - } - return errors.Wrapf(err, "could not remove mountpoint for id %s", id) - } - - a.pathCacheLock.Lock() - delete(a.pathCache, id) - a.pathCacheLock.Unlock() - return nil -} - -func atomicRemove(source string) error { - target := source + "-removing" - - err := os.Rename(source, target) - switch { - case err == nil, os.IsNotExist(err): - case os.IsExist(err): - // Got error saying the target dir already exists, maybe the source doesn't exist due to a previous (failed) remove - if _, e := os.Stat(source); !os.IsNotExist(e) { - return errors.Wrapf(err, "target rename dir '%s' exists but should not, this needs to be manually cleaned up") - } - default: - return errors.Wrapf(err, "error preparing atomic delete") - } - - return system.EnsureRemoveAll(target) -} - -// Get returns the rootfs path for the id. -// This will mount the dir at its given path -func (a *Driver) Get(id, mountLabel string) (string, error) { - a.locker.Lock(id) - defer a.locker.Unlock(id) - parents, err := a.getParentLayerPaths(id) - if err != nil && !os.IsNotExist(err) { - return "", err - } - - a.pathCacheLock.Lock() - m, exists := a.pathCache[id] - a.pathCacheLock.Unlock() - - if !exists { - m = a.getDiffPath(id) - if len(parents) > 0 { - m = a.getMountpoint(id) - } - } - if count := a.ctr.Increment(m); count > 1 { - return m, nil - } - - // If a dir does not have a parent ( no layers )do not try to mount - // just return the diff path to the data - if len(parents) > 0 { - if err := a.mount(id, m, mountLabel, parents); err != nil { - return "", err - } - } - - a.pathCacheLock.Lock() - a.pathCache[id] = m - a.pathCacheLock.Unlock() - return m, nil -} - -// Put unmounts and updates list of active mounts. -func (a *Driver) Put(id string) error { - a.locker.Lock(id) - defer a.locker.Unlock(id) - a.pathCacheLock.Lock() - m, exists := a.pathCache[id] - if !exists { - m = a.getMountpoint(id) - a.pathCache[id] = m - } - a.pathCacheLock.Unlock() - if count := a.ctr.Decrement(m); count > 0 { - return nil - } - - err := a.unmount(m) - if err != nil { - logrus.Debugf("Failed to unmount %s aufs: %v", id, err) - } - return err -} - -// isParent returns if the passed in parent is the direct parent of the passed in layer -func (a *Driver) isParent(id, parent string) bool { - parents, _ := getParentIDs(a.rootPath(), id) - if parent == "" && len(parents) > 0 { - return false - } - return !(len(parents) > 0 && parent != parents[0]) -} - -// Diff produces an archive of the changes between the specified -// layer and its parent layer which may be "". -func (a *Driver) Diff(id, parent, mountLabel string) (io.ReadCloser, error) { - if !a.isParent(id, parent) { - return a.naiveDiff.Diff(id, parent, mountLabel) - } - - // AUFS doesn't need the parent layer to produce a diff. - return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ - Compression: archive.Uncompressed, - ExcludePatterns: []string{archive.WhiteoutMetaPrefix + "*", "!" + archive.WhiteoutOpaqueDir}, - UIDMaps: a.uidMaps, - GIDMaps: a.gidMaps, - }) -} - -type fileGetNilCloser struct { - storage.FileGetter -} - -func (f fileGetNilCloser) Close() error { - return nil -} - -// DiffGetter returns a FileGetCloser that can read files from the directory that -// contains files for the layer differences. Used for direct access for tar-split. -func (a *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { - p := path.Join(a.rootPath(), "diff", id) - return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil -} - -func (a *Driver) applyDiff(id string, diff io.Reader) error { - return chrootarchive.UntarUncompressed(diff, path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ - UIDMaps: a.uidMaps, - GIDMaps: a.gidMaps, - }) -} - -// DiffSize calculates the changes between the specified id -// and its parent and returns the size in bytes of the changes -// relative to its base filesystem directory. -func (a *Driver) DiffSize(id, parent, mountLabel string) (size int64, err error) { - if !a.isParent(id, parent) { - return a.naiveDiff.DiffSize(id, parent, mountLabel) - } - // AUFS doesn't need the parent layer to calculate the diff size. - return directory.Size(path.Join(a.rootPath(), "diff", id)) -} - -// ApplyDiff extracts the changeset from the given diff into the -// layer with the specified id and parent, returning the size of the -// new layer in bytes. -func (a *Driver) ApplyDiff(id, parent, mountLabel string, diff io.Reader) (size int64, err error) { - if !a.isParent(id, parent) { - return a.naiveDiff.ApplyDiff(id, parent, mountLabel, diff) - } - - // AUFS doesn't need the parent id to apply the diff if it is the direct parent. - if err = a.applyDiff(id, diff); err != nil { - return - } - - return a.DiffSize(id, parent, mountLabel) -} - -// Changes produces a list of changes between the specified layer -// and its parent layer. If parent is "", then all changes will be ADD changes. -func (a *Driver) Changes(id, parent, mountLabel string) ([]archive.Change, error) { - if !a.isParent(id, parent) { - return a.naiveDiff.Changes(id, parent, mountLabel) - } - - // AUFS doesn't have snapshots, so we need to get changes from all parent - // layers. - layers, err := a.getParentLayerPaths(id) - if err != nil { - return nil, err - } - return archive.Changes(layers, path.Join(a.rootPath(), "diff", id)) -} - -func (a *Driver) getParentLayerPaths(id string) ([]string, error) { - parentIds, err := getParentIDs(a.rootPath(), id) - if err != nil { - return nil, err - } - layers := make([]string, len(parentIds)) - - // Get the diff paths for all the parent ids - for i, p := range parentIds { - layers[i] = path.Join(a.rootPath(), "diff", p) - } - return layers, nil -} - -func (a *Driver) mount(id string, target string, mountLabel string, layers []string) error { - a.Lock() - defer a.Unlock() - - // If the id is mounted or we get an error return - if mounted, err := a.mounted(target); err != nil || mounted { - return err - } - - rw := a.getDiffPath(id) - - if err := a.aufsMount(layers, rw, target, mountLabel); err != nil { - return fmt.Errorf("error creating aufs mount to %s: %v", target, err) - } - return nil -} - -func (a *Driver) unmount(mountPath string) error { - a.Lock() - defer a.Unlock() - - if mounted, err := a.mounted(mountPath); err != nil || !mounted { - return err - } - if err := Unmount(mountPath); err != nil { - return err - } - return nil -} - -func (a *Driver) mounted(mountpoint string) (bool, error) { - return graphdriver.Mounted(graphdriver.FsMagicAufs, mountpoint) -} - -// Cleanup aufs and unmount all mountpoints -func (a *Driver) Cleanup() error { - var dirs []string - if err := filepath.Walk(a.mntPath(), func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.IsDir() { - return nil - } - dirs = append(dirs, path) - return nil - }); err != nil { - return err - } - - for _, m := range dirs { - if err := a.unmount(m); err != nil { - logrus.Debugf("aufs error unmounting %s: %s", m, err) - } - } - return mountpk.Unmount(a.root) -} - -func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err error) { - defer func() { - if err != nil { - Unmount(target) - } - }() - - // Mount options are clipped to page size(4096 bytes). If there are more - // layers then these are remounted individually using append. - - offset := 54 - if useDirperm() { - offset += len(",dirperm1") - } - b := make([]byte, unix.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel - bp := copy(b, fmt.Sprintf("br:%s=rw", rw)) - - index := 0 - for ; index < len(ro); index++ { - layer := fmt.Sprintf(":%s=ro+wh", ro[index]) - if bp+len(layer) > len(b) { - break - } - bp += copy(b[bp:], layer) - } - - opts := "dio,xino=/dev/shm/aufs.xino" - if useDirperm() { - opts += ",dirperm1" - } - data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), mountLabel) - if err = mount("none", target, "aufs", 0, data); err != nil { - return - } - - for ; index < len(ro); index++ { - layer := fmt.Sprintf(":%s=ro+wh", ro[index]) - data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel) - if err = mount("none", target, "aufs", unix.MS_REMOUNT, data); err != nil { - return - } - } - - return -} - -// useDirperm checks dirperm1 mount option can be used with the current -// version of aufs. -func useDirperm() bool { - enableDirpermLock.Do(func() { - base, err := ioutil.TempDir("", "storage-aufs-base") - if err != nil { - logrus.Errorf("error checking dirperm1: %v", err) - return - } - defer os.RemoveAll(base) - - union, err := ioutil.TempDir("", "storage-aufs-union") - if err != nil { - logrus.Errorf("error checking dirperm1: %v", err) - return - } - defer os.RemoveAll(union) - - opts := fmt.Sprintf("br:%s,dirperm1,xino=/dev/shm/aufs.xino", base) - if err := mount("none", union, "aufs", 0, opts); err != nil { - return - } - enableDirperm = true - if err := Unmount(union); err != nil { - logrus.Errorf("error checking dirperm1: failed to unmount %v", err) - } - }) - return enableDirperm -} diff --git a/vendor/github.com/containers/storage/drivers/aufs/dirs.go b/vendor/github.com/containers/storage/drivers/aufs/dirs.go deleted file mode 100644 index d2325fc46c..0000000000 --- a/vendor/github.com/containers/storage/drivers/aufs/dirs.go +++ /dev/null @@ -1,64 +0,0 @@ -// +build linux - -package aufs - -import ( - "bufio" - "io/ioutil" - "os" - "path" -) - -// Return all the directories -func loadIds(root string) ([]string, error) { - dirs, err := ioutil.ReadDir(root) - if err != nil { - return nil, err - } - out := []string{} - for _, d := range dirs { - if !d.IsDir() { - out = append(out, d.Name()) - } - } - return out, nil -} - -// Read the layers file for the current id and return all the -// layers represented by new lines in the file -// -// If there are no lines in the file then the id has no parent -// and an empty slice is returned. -func getParentIDs(root, id string) ([]string, error) { - f, err := os.Open(path.Join(root, "layers", id)) - if err != nil { - return nil, err - } - defer f.Close() - - out := []string{} - s := bufio.NewScanner(f) - - for s.Scan() { - if t := s.Text(); t != "" { - out = append(out, s.Text()) - } - } - return out, s.Err() -} - -func (a *Driver) getMountpoint(id string) string { - return path.Join(a.mntPath(), id) -} - -func (a *Driver) mntPath() string { - return path.Join(a.rootPath(), "mnt") -} - -func (a *Driver) getDiffPath(id string) string { - return path.Join(a.diffPath(), id) -} - -func (a *Driver) diffPath() string { - return path.Join(a.rootPath(), "diff") -} diff --git a/vendor/github.com/containers/storage/drivers/aufs/mount.go b/vendor/github.com/containers/storage/drivers/aufs/mount.go deleted file mode 100644 index 100e7537a9..0000000000 --- a/vendor/github.com/containers/storage/drivers/aufs/mount.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build linux - -package aufs - -import ( - "os/exec" - - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -// Unmount the target specified. -func Unmount(target string) error { - if err := exec.Command("auplink", target, "flush").Run(); err != nil { - logrus.Warnf("Couldn't run auplink before unmount %s: %s", target, err) - } - if err := unix.Unmount(target, 0); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/containers/storage/drivers/aufs/mount_linux.go b/vendor/github.com/containers/storage/drivers/aufs/mount_linux.go deleted file mode 100644 index 937104ba3f..0000000000 --- a/vendor/github.com/containers/storage/drivers/aufs/mount_linux.go +++ /dev/null @@ -1,7 +0,0 @@ -package aufs - -import "golang.org/x/sys/unix" - -func mount(source string, target string, fstype string, flags uintptr, data string) error { - return unix.Mount(source, target, fstype, flags, data) -} diff --git a/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go b/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go deleted file mode 100644 index d030b06637..0000000000 --- a/vendor/github.com/containers/storage/drivers/aufs/mount_unsupported.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !linux - -package aufs - -import "errors" - -// MsRemount declared to specify a non-linux system mount. -const MsRemount = 0 - -func mount(source string, target string, fstype string, flags uintptr, data string) (err error) { - return errors.New("mount is not implemented on this platform") -} diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go deleted file mode 100644 index abc856c836..0000000000 --- a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go +++ /dev/null @@ -1,677 +0,0 @@ -// +build linux - -package btrfs - -/* -#include -#include -#include -#include - -static void set_name_btrfs_ioctl_vol_args_v2(struct btrfs_ioctl_vol_args_v2* btrfs_struct, const char* value) { - snprintf(btrfs_struct->name, BTRFS_SUBVOL_NAME_MAX, "%s", value); -} -*/ -import "C" - -import ( - "fmt" - "io/ioutil" - "math" - "os" - "path" - "path/filepath" - "strconv" - "strings" - "sync" - "unsafe" - - "github.com/containers/storage/drivers" - "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/mount" - "github.com/containers/storage/pkg/parsers" - "github.com/containers/storage/pkg/system" - "github.com/docker/go-units" - "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -func init() { - graphdriver.Register("btrfs", Init) -} - -type btrfsOptions struct { - minSpace uint64 - size uint64 -} - -// Init returns a new BTRFS driver. -// An error is returned if BTRFS is not supported. -func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { - - fsMagic, err := graphdriver.GetFSMagic(home) - if err != nil { - return nil, err - } - - if fsMagic != graphdriver.FsMagicBtrfs { - return nil, errors.Wrapf(graphdriver.ErrPrerequisites, "%q is not on a btrfs filesystem", home) - } - - rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) - if err != nil { - return nil, err - } - if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { - return nil, err - } - - if err := mount.MakePrivate(home); err != nil { - return nil, err - } - - opt, userDiskQuota, err := parseOptions(options) - if err != nil { - return nil, err - } - - driver := &Driver{ - home: home, - uidMaps: uidMaps, - gidMaps: gidMaps, - options: opt, - } - - if userDiskQuota { - if err := driver.subvolEnableQuota(); err != nil { - return nil, err - } - } - - return graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), nil -} - -func parseOptions(opt []string) (btrfsOptions, bool, error) { - var options btrfsOptions - userDiskQuota := false - for _, option := range opt { - key, val, err := parsers.ParseKeyValueOpt(option) - if err != nil { - return options, userDiskQuota, err - } - key = strings.ToLower(key) - switch key { - case "btrfs.min_space": - minSpace, err := units.RAMInBytes(val) - if err != nil { - return options, userDiskQuota, err - } - userDiskQuota = true - options.minSpace = uint64(minSpace) - default: - return options, userDiskQuota, fmt.Errorf("Unknown option %s", key) - } - } - return options, userDiskQuota, nil -} - -// Driver contains information about the filesystem mounted. -type Driver struct { - //root of the file system - home string - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap - options btrfsOptions - quotaEnabled bool - once sync.Once -} - -// String prints the name of the driver (btrfs). -func (d *Driver) String() string { - return "btrfs" -} - -// Status returns current driver information in a two dimensional string array. -// Output contains "Build Version" and "Library Version" of the btrfs libraries used. -// Version information can be used to check compatibility with your kernel. -func (d *Driver) Status() [][2]string { - status := [][2]string{} - if bv := btrfsBuildVersion(); bv != "-" { - status = append(status, [2]string{"Build Version", bv}) - } - if lv := btrfsLibVersion(); lv != -1 { - status = append(status, [2]string{"Library Version", fmt.Sprintf("%d", lv)}) - } - return status -} - -// Metadata returns empty metadata for this driver. -func (d *Driver) Metadata(id string) (map[string]string, error) { - return nil, nil -} - -// Cleanup unmounts the home directory. -func (d *Driver) Cleanup() error { - if err := d.subvolDisableQuota(); err != nil { - return err - } - - return mount.Unmount(d.home) -} - -func free(p *C.char) { - C.free(unsafe.Pointer(p)) -} - -func openDir(path string) (*C.DIR, error) { - Cpath := C.CString(path) - defer free(Cpath) - - dir := C.opendir(Cpath) - if dir == nil { - return nil, fmt.Errorf("Can't open dir") - } - return dir, nil -} - -func closeDir(dir *C.DIR) { - if dir != nil { - C.closedir(dir) - } -} - -func getDirFd(dir *C.DIR) uintptr { - return uintptr(C.dirfd(dir)) -} - -func subvolCreate(path, name string) error { - dir, err := openDir(path) - if err != nil { - return err - } - defer closeDir(dir) - - var args C.struct_btrfs_ioctl_vol_args - for i, c := range []byte(name) { - args.name[i] = C.char(c) - } - - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error()) - } - return nil -} - -func subvolSnapshot(src, dest, name string) error { - srcDir, err := openDir(src) - if err != nil { - return err - } - defer closeDir(srcDir) - - destDir, err := openDir(dest) - if err != nil { - return err - } - defer closeDir(destDir) - - var args C.struct_btrfs_ioctl_vol_args_v2 - args.fd = C.__s64(getDirFd(srcDir)) - - var cs = C.CString(name) - C.set_name_btrfs_ioctl_vol_args_v2(&args, cs) - C.free(unsafe.Pointer(cs)) - - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error()) - } - return nil -} - -func isSubvolume(p string) (bool, error) { - var bufStat unix.Stat_t - if err := unix.Lstat(p, &bufStat); err != nil { - return false, err - } - - // return true if it is a btrfs subvolume - return bufStat.Ino == C.BTRFS_FIRST_FREE_OBJECTID, nil -} - -func subvolDelete(dirpath, name string, quotaEnabled bool) error { - dir, err := openDir(dirpath) - if err != nil { - return err - } - defer closeDir(dir) - fullPath := path.Join(dirpath, name) - - var args C.struct_btrfs_ioctl_vol_args - - // walk the btrfs subvolumes - walkSubvolumes := func(p string, f os.FileInfo, err error) error { - if err != nil { - if os.IsNotExist(err) && p != fullPath { - // missing most likely because the path was a subvolume that got removed in the previous iteration - // since it's gone anyway, we don't care - return nil - } - return fmt.Errorf("error walking subvolumes: %v", err) - } - // we want to check children only so skip itself - // it will be removed after the filepath walk anyways - if f.IsDir() && p != fullPath { - sv, err := isSubvolume(p) - if err != nil { - return fmt.Errorf("Failed to test if %s is a btrfs subvolume: %v", p, err) - } - if sv { - if err := subvolDelete(path.Dir(p), f.Name(), quotaEnabled); err != nil { - return fmt.Errorf("Failed to destroy btrfs child subvolume (%s) of parent (%s): %v", p, dirpath, err) - } - } - } - return nil - } - if err := filepath.Walk(path.Join(dirpath, name), walkSubvolumes); err != nil { - return fmt.Errorf("Recursively walking subvolumes for %s failed: %v", dirpath, err) - } - - if quotaEnabled { - if qgroupid, err := subvolLookupQgroup(fullPath); err == nil { - var args C.struct_btrfs_ioctl_qgroup_create_args - args.qgroupid = C.__u64(qgroupid) - - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_CREATE, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - logrus.Errorf("Failed to delete btrfs qgroup %v for %s: %v", qgroupid, fullPath, errno.Error()) - } - } else { - logrus.Errorf("Failed to lookup btrfs qgroup for %s: %v", fullPath, err.Error()) - } - } - - // all subvolumes have been removed - // now remove the one originally passed in - for i, c := range []byte(name) { - args.name[i] = C.char(c) - } - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - return fmt.Errorf("Failed to destroy btrfs snapshot %s for %s: %v", dirpath, name, errno.Error()) - } - return nil -} - -func (d *Driver) updateQuotaStatus() { - d.once.Do(func() { - if !d.quotaEnabled { - // In case quotaEnabled is not set, check qgroup and update quotaEnabled as needed - if err := subvolQgroupStatus(d.home); err != nil { - // quota is still not enabled - return - } - d.quotaEnabled = true - } - }) -} - -func (d *Driver) subvolEnableQuota() error { - d.updateQuotaStatus() - - if d.quotaEnabled { - return nil - } - - dir, err := openDir(d.home) - if err != nil { - return err - } - defer closeDir(dir) - - var args C.struct_btrfs_ioctl_quota_ctl_args - args.cmd = C.BTRFS_QUOTA_CTL_ENABLE - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - return fmt.Errorf("Failed to enable btrfs quota for %s: %v", dir, errno.Error()) - } - - d.quotaEnabled = true - - return nil -} - -func (d *Driver) subvolDisableQuota() error { - d.updateQuotaStatus() - - if !d.quotaEnabled { - return nil - } - - dir, err := openDir(d.home) - if err != nil { - return err - } - defer closeDir(dir) - - var args C.struct_btrfs_ioctl_quota_ctl_args - args.cmd = C.BTRFS_QUOTA_CTL_DISABLE - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - return fmt.Errorf("Failed to disable btrfs quota for %s: %v", dir, errno.Error()) - } - - d.quotaEnabled = false - - return nil -} - -func (d *Driver) subvolRescanQuota() error { - d.updateQuotaStatus() - - if !d.quotaEnabled { - return nil - } - - dir, err := openDir(d.home) - if err != nil { - return err - } - defer closeDir(dir) - - var args C.struct_btrfs_ioctl_quota_rescan_args - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_RESCAN_WAIT, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - return fmt.Errorf("Failed to rescan btrfs quota for %s: %v", dir, errno.Error()) - } - - return nil -} - -func subvolLimitQgroup(path string, size uint64) error { - dir, err := openDir(path) - if err != nil { - return err - } - defer closeDir(dir) - - var args C.struct_btrfs_ioctl_qgroup_limit_args - args.lim.max_referenced = C.__u64(size) - args.lim.flags = C.BTRFS_QGROUP_LIMIT_MAX_RFER - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - return fmt.Errorf("Failed to limit qgroup for %s: %v", dir, errno.Error()) - } - - return nil -} - -// subvolQgroupStatus performs a BTRFS_IOC_TREE_SEARCH on the root path -// with search key of BTRFS_QGROUP_STATUS_KEY. -// In case qgroup is enabled, the retuned key type will match BTRFS_QGROUP_STATUS_KEY. -// For more details please see https://github.com/kdave/btrfs-progs/blob/v4.9/qgroup.c#L1035 -func subvolQgroupStatus(path string) error { - dir, err := openDir(path) - if err != nil { - return err - } - defer closeDir(dir) - - var args C.struct_btrfs_ioctl_search_args - args.key.tree_id = C.BTRFS_QUOTA_TREE_OBJECTID - args.key.min_type = C.BTRFS_QGROUP_STATUS_KEY - args.key.max_type = C.BTRFS_QGROUP_STATUS_KEY - args.key.max_objectid = C.__u64(math.MaxUint64) - args.key.max_offset = C.__u64(math.MaxUint64) - args.key.max_transid = C.__u64(math.MaxUint64) - args.key.nr_items = 4096 - - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_TREE_SEARCH, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - return fmt.Errorf("Failed to search qgroup for %s: %v", path, errno.Error()) - } - sh := (*C.struct_btrfs_ioctl_search_header)(unsafe.Pointer(&args.buf)) - if sh._type != C.BTRFS_QGROUP_STATUS_KEY { - return fmt.Errorf("Invalid qgroup search header type for %s: %v", path, sh._type) - } - return nil -} - -func subvolLookupQgroup(path string) (uint64, error) { - dir, err := openDir(path) - if err != nil { - return 0, err - } - defer closeDir(dir) - - var args C.struct_btrfs_ioctl_ino_lookup_args - args.objectid = C.BTRFS_FIRST_FREE_OBJECTID - - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_INO_LOOKUP, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - return 0, fmt.Errorf("Failed to lookup qgroup for %s: %v", dir, errno.Error()) - } - if args.treeid == 0 { - return 0, fmt.Errorf("Invalid qgroup id for %s: 0", dir) - } - - return uint64(args.treeid), nil -} - -func (d *Driver) subvolumesDir() string { - return path.Join(d.home, "subvolumes") -} - -func (d *Driver) subvolumesDirID(id string) string { - return path.Join(d.subvolumesDir(), id) -} - -func (d *Driver) quotasDir() string { - return path.Join(d.home, "quotas") -} - -func (d *Driver) quotasDirID(id string) string { - return path.Join(d.quotasDir(), id) -} - -// CreateReadWrite creates a layer that is writable for use as a container -// file system. -func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { - return d.Create(id, parent, opts) -} - -// Create the filesystem with given id. -func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { - quotas := path.Join(d.home, "quotas") - subvolumes := path.Join(d.home, "subvolumes") - rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) - if err != nil { - return err - } - if err := idtools.MkdirAllAs(subvolumes, 0700, rootUID, rootGID); err != nil { - return err - } - if parent == "" { - if err := subvolCreate(subvolumes, id); err != nil { - return err - } - } else { - parentDir := d.subvolumesDirID(parent) - st, err := os.Stat(parentDir) - if err != nil { - return err - } - if !st.IsDir() { - return fmt.Errorf("%s: not a directory", parentDir) - } - if err := subvolSnapshot(parentDir, subvolumes, id); err != nil { - return err - } - } - - var storageOpt map[string]string - if opts != nil { - storageOpt = opts.StorageOpt - } - - if _, ok := storageOpt["size"]; ok { - driver := &Driver{} - if err := d.parseStorageOpt(storageOpt, driver); err != nil { - return err - } - - if err := d.setStorageSize(path.Join(subvolumes, id), driver); err != nil { - return err - } - if err := idtools.MkdirAllAs(quotas, 0700, rootUID, rootGID); err != nil { - return err - } - if err := ioutil.WriteFile(path.Join(quotas, id), []byte(fmt.Sprint(driver.options.size)), 0644); err != nil { - return err - } - } - - // if we have a remapped root (user namespaces enabled), change the created snapshot - // dir ownership to match - if rootUID != 0 || rootGID != 0 { - if err := os.Chown(path.Join(subvolumes, id), rootUID, rootGID); err != nil { - return err - } - } - - mountLabel := "" - if opts != nil { - mountLabel = opts.MountLabel - } - - return label.Relabel(path.Join(subvolumes, id), mountLabel, false) -} - -// Parse btrfs storage options -func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { - // Read size to change the subvolume disk quota per container - for key, val := range storageOpt { - key := strings.ToLower(key) - switch key { - case "size": - size, err := units.RAMInBytes(val) - if err != nil { - return err - } - driver.options.size = uint64(size) - default: - return fmt.Errorf("Unknown option %s", key) - } - } - - return nil -} - -// Set btrfs storage size -func (d *Driver) setStorageSize(dir string, driver *Driver) error { - if driver.options.size <= 0 { - return fmt.Errorf("btrfs: invalid storage size: %s", units.HumanSize(float64(driver.options.size))) - } - if d.options.minSpace > 0 && driver.options.size < d.options.minSpace { - return fmt.Errorf("btrfs: storage size cannot be less than %s", units.HumanSize(float64(d.options.minSpace))) - } - - if err := d.subvolEnableQuota(); err != nil { - return err - } - - if err := subvolLimitQgroup(dir, driver.options.size); err != nil { - return err - } - - return nil -} - -// Remove the filesystem with given id. -func (d *Driver) Remove(id string) error { - dir := d.subvolumesDirID(id) - if _, err := os.Stat(dir); err != nil { - return err - } - quotasDir := d.quotasDirID(id) - if _, err := os.Stat(quotasDir); err == nil { - if err := os.Remove(quotasDir); err != nil { - return err - } - } else if !os.IsNotExist(err) { - return err - } - - // Call updateQuotaStatus() to invoke status update - d.updateQuotaStatus() - - if err := subvolDelete(d.subvolumesDir(), id, d.quotaEnabled); err != nil { - return err - } - if err := system.EnsureRemoveAll(dir); err != nil { - return err - } - if err := d.subvolRescanQuota(); err != nil { - return err - } - return nil -} - -// Get the requested filesystem id. -func (d *Driver) Get(id, mountLabel string) (string, error) { - dir := d.subvolumesDirID(id) - st, err := os.Stat(dir) - if err != nil { - return "", err - } - - if !st.IsDir() { - return "", fmt.Errorf("%s: not a directory", dir) - } - - if quota, err := ioutil.ReadFile(d.quotasDirID(id)); err == nil { - if size, err := strconv.ParseUint(string(quota), 10, 64); err == nil && size >= d.options.minSpace { - if err := d.subvolEnableQuota(); err != nil { - return "", err - } - if err := subvolLimitQgroup(dir, size); err != nil { - return "", err - } - } - } - - return dir, nil -} - -// Put is not implemented for BTRFS as there is no cleanup required for the id. -func (d *Driver) Put(id string) error { - // Get() creates no runtime resources (like e.g. mounts) - // so this doesn't need to do anything. - return nil -} - -// Exists checks if the id exists in the filesystem. -func (d *Driver) Exists(id string) bool { - dir := d.subvolumesDirID(id) - _, err := os.Stat(dir) - return err == nil -} - -// AdditionalImageStores returns additional image stores supported by the driver -func (d *Driver) AdditionalImageStores() []string { - return nil -} diff --git a/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go b/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go deleted file mode 100644 index f07088887a..0000000000 --- a/vendor/github.com/containers/storage/drivers/btrfs/dummy_unsupported.go +++ /dev/null @@ -1,3 +0,0 @@ -// +build !linux !cgo - -package btrfs diff --git a/vendor/github.com/containers/storage/drivers/btrfs/version.go b/vendor/github.com/containers/storage/drivers/btrfs/version.go deleted file mode 100644 index 73d90cdd71..0000000000 --- a/vendor/github.com/containers/storage/drivers/btrfs/version.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build linux,!btrfs_noversion - -package btrfs - -/* -#include - -// around version 3.16, they did not define lib version yet -#ifndef BTRFS_LIB_VERSION -#define BTRFS_LIB_VERSION -1 -#endif - -// upstream had removed it, but now it will be coming back -#ifndef BTRFS_BUILD_VERSION -#define BTRFS_BUILD_VERSION "-" -#endif -*/ -import "C" - -func btrfsBuildVersion() string { - return string(C.BTRFS_BUILD_VERSION) -} - -func btrfsLibVersion() int { - return int(C.BTRFS_LIB_VERSION) -} diff --git a/vendor/github.com/containers/storage/drivers/btrfs/version_none.go b/vendor/github.com/containers/storage/drivers/btrfs/version_none.go deleted file mode 100644 index f802fbc629..0000000000 --- a/vendor/github.com/containers/storage/drivers/btrfs/version_none.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build linux,btrfs_noversion - -package btrfs - -// TODO(vbatts) remove this work-around once supported linux distros are on -// btrfs utilities of >= 3.16.1 - -func btrfsBuildVersion() string { - return "-" -} - -func btrfsLibVersion() int { - return -1 -} diff --git a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go b/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go deleted file mode 100644 index 1430c8859c..0000000000 --- a/vendor/github.com/containers/storage/drivers/devmapper/device_setup.go +++ /dev/null @@ -1,236 +0,0 @@ -package devmapper - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "reflect" - "strings" - - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -type directLVMConfig struct { - Device string - ThinpPercent uint64 - ThinpMetaPercent uint64 - AutoExtendPercent uint64 - AutoExtendThreshold uint64 -} - -var ( - errThinpPercentMissing = errors.New("must set both `dm.thinp_percent` and `dm.thinp_metapercent` if either is specified") - errThinpPercentTooBig = errors.New("combined `dm.thinp_percent` and `dm.thinp_metapercent` must not be greater than 100") - errMissingSetupDevice = errors.New("must provide device path in `dm.setup_device` in order to configure direct-lvm") -) - -func validateLVMConfig(cfg directLVMConfig) error { - if reflect.DeepEqual(cfg, directLVMConfig{}) { - return nil - } - if cfg.Device == "" { - return errMissingSetupDevice - } - if (cfg.ThinpPercent > 0 && cfg.ThinpMetaPercent == 0) || cfg.ThinpMetaPercent > 0 && cfg.ThinpPercent == 0 { - return errThinpPercentMissing - } - - if cfg.ThinpPercent+cfg.ThinpMetaPercent > 100 { - return errThinpPercentTooBig - } - return nil -} - -func checkDevAvailable(dev string) error { - lvmScan, err := exec.LookPath("lvmdiskscan") - if err != nil { - logrus.Debug("could not find lvmdiskscan") - return nil - } - - out, err := exec.Command(lvmScan).CombinedOutput() - if err != nil { - logrus.WithError(err).Error(string(out)) - return nil - } - - if !bytes.Contains(out, []byte(dev)) { - return errors.Errorf("%s is not available for use with devicemapper", dev) - } - return nil -} - -func checkDevInVG(dev string) error { - pvDisplay, err := exec.LookPath("pvdisplay") - if err != nil { - logrus.Debug("could not find pvdisplay") - return nil - } - - out, err := exec.Command(pvDisplay, dev).CombinedOutput() - if err != nil { - logrus.WithError(err).Error(string(out)) - return nil - } - - scanner := bufio.NewScanner(bytes.NewReader(bytes.TrimSpace(out))) - for scanner.Scan() { - fields := strings.SplitAfter(strings.TrimSpace(scanner.Text()), "VG Name") - if len(fields) > 1 { - // got "VG Name" line" - vg := strings.TrimSpace(fields[1]) - if len(vg) > 0 { - return errors.Errorf("%s is already part of a volume group %q: must remove this device from any volume group or provide a different device", dev, vg) - } - logrus.Error(fields) - break - } - } - return nil -} - -func checkDevHasFS(dev string) error { - blkid, err := exec.LookPath("blkid") - if err != nil { - logrus.Debug("could not find blkid") - return nil - } - - out, err := exec.Command(blkid, dev).CombinedOutput() - if err != nil { - logrus.WithError(err).Error(string(out)) - return nil - } - - fields := bytes.Fields(out) - for _, f := range fields { - kv := bytes.Split(f, []byte{'='}) - if bytes.Equal(kv[0], []byte("TYPE")) { - v := bytes.Trim(kv[1], "\"") - if len(v) > 0 { - return errors.Errorf("%s has a filesystem already, use dm.directlvm_device_force=true if you want to wipe the device", dev) - } - return nil - } - } - return nil -} - -func verifyBlockDevice(dev string, force bool) error { - if err := checkDevAvailable(dev); err != nil { - return err - } - if err := checkDevInVG(dev); err != nil { - return err - } - - if force { - return nil - } - - if err := checkDevHasFS(dev); err != nil { - return err - } - return nil -} - -func readLVMConfig(root string) (directLVMConfig, error) { - var cfg directLVMConfig - - p := filepath.Join(root, "setup-config.json") - b, err := ioutil.ReadFile(p) - if err != nil { - if os.IsNotExist(err) { - return cfg, nil - } - return cfg, errors.Wrap(err, "error reading existing setup config") - } - - // check if this is just an empty file, no need to produce a json error later if so - if len(b) == 0 { - return cfg, nil - } - - err = json.Unmarshal(b, &cfg) - return cfg, errors.Wrap(err, "error unmarshaling previous device setup config") -} - -func writeLVMConfig(root string, cfg directLVMConfig) error { - p := filepath.Join(root, "setup-config.json") - b, err := json.Marshal(cfg) - if err != nil { - return errors.Wrap(err, "error marshalling direct lvm config") - } - err = ioutil.WriteFile(p, b, 0600) - return errors.Wrap(err, "error writing direct lvm config to file") -} - -func setupDirectLVM(cfg directLVMConfig) error { - lvmProfileDir := "/etc/lvm/profile" - binaries := []string{"pvcreate", "vgcreate", "lvcreate", "lvconvert", "lvchange", "thin_check"} - - for _, bin := range binaries { - if _, err := exec.LookPath(bin); err != nil { - return errors.Wrap(err, "error looking up command `"+bin+"` while setting up direct lvm") - } - } - - err := os.MkdirAll(lvmProfileDir, 0755) - if err != nil { - return errors.Wrap(err, "error creating lvm profile directory") - } - - if cfg.AutoExtendPercent == 0 { - cfg.AutoExtendPercent = 20 - } - - if cfg.AutoExtendThreshold == 0 { - cfg.AutoExtendThreshold = 80 - } - - if cfg.ThinpPercent == 0 { - cfg.ThinpPercent = 95 - } - if cfg.ThinpMetaPercent == 0 { - cfg.ThinpMetaPercent = 1 - } - - out, err := exec.Command("pvcreate", "-f", cfg.Device).CombinedOutput() - if err != nil { - return errors.Wrap(err, string(out)) - } - - out, err = exec.Command("vgcreate", "storage", cfg.Device).CombinedOutput() - if err != nil { - return errors.Wrap(err, string(out)) - } - - out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpool", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpPercent)).CombinedOutput() - if err != nil { - return errors.Wrap(err, string(out)) - } - out, err = exec.Command("lvcreate", "--wipesignatures", "y", "-n", "thinpoolmeta", "storage", "--extents", fmt.Sprintf("%d%%VG", cfg.ThinpMetaPercent)).CombinedOutput() - if err != nil { - return errors.Wrap(err, string(out)) - } - - out, err = exec.Command("lvconvert", "-y", "--zero", "n", "-c", "512K", "--thinpool", "storage/thinpool", "--poolmetadata", "storage/thinpoolmeta").CombinedOutput() - if err != nil { - return errors.Wrap(err, string(out)) - } - - profile := fmt.Sprintf("activation{\nthin_pool_autoextend_threshold=%d\nthin_pool_autoextend_percent=%d\n}", cfg.AutoExtendThreshold, cfg.AutoExtendPercent) - err = ioutil.WriteFile(lvmProfileDir+"/storage-thinpool.profile", []byte(profile), 0600) - if err != nil { - return errors.Wrap(err, "error writing storage thinp autoextend profile") - } - - out, err = exec.Command("lvchange", "--metadataprofile", "storage-thinpool", "storage/thinpool").CombinedOutput() - return errors.Wrap(err, string(out)) -} diff --git a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go b/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go deleted file mode 100644 index 6db7b2b2c9..0000000000 --- a/vendor/github.com/containers/storage/drivers/devmapper/deviceset.go +++ /dev/null @@ -1,2825 +0,0 @@ -// +build linux - -package devmapper - -import ( - "bufio" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path" - "path/filepath" - "reflect" - "strconv" - "strings" - "sync" - "time" - - "github.com/containers/storage/drivers" - "github.com/containers/storage/pkg/devicemapper" - "github.com/containers/storage/pkg/dmesg" - "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/loopback" - "github.com/containers/storage/pkg/mount" - "github.com/containers/storage/pkg/parsers" - "github.com/containers/storage/pkg/parsers/kernel" - units "github.com/docker/go-units" - "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -var ( - defaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 - defaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 - defaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 - defaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors - defaultUdevSyncOverride = false - maxDeviceID = 0xffffff // 24 bit, pool limit - deviceIDMapSz = (maxDeviceID + 1) / 8 - driverDeferredRemovalSupport = false - enableDeferredRemoval = false - enableDeferredDeletion = false - userBaseSize = false - defaultMinFreeSpacePercent uint32 = 10 - lvmSetupConfigForce bool -) - -const deviceSetMetaFile string = "deviceset-metadata" -const transactionMetaFile string = "transaction-metadata" - -type transaction struct { - OpenTransactionID uint64 `json:"open_transaction_id"` - DeviceIDHash string `json:"device_hash"` - DeviceID int `json:"device_id"` -} - -type devInfo struct { - Hash string `json:"-"` - DeviceID int `json:"device_id"` - Size uint64 `json:"size"` - TransactionID uint64 `json:"transaction_id"` - Initialized bool `json:"initialized"` - Deleted bool `json:"deleted"` - devices *DeviceSet - - // The global DeviceSet lock guarantees that we serialize all - // the calls to libdevmapper (which is not threadsafe), but we - // sometimes release that lock while sleeping. In that case - // this per-device lock is still held, protecting against - // other accesses to the device that we're doing the wait on. - // - // WARNING: In order to avoid AB-BA deadlocks when releasing - // the global lock while holding the per-device locks all - // device locks must be acquired *before* the device lock, and - // multiple device locks should be acquired parent before child. - lock sync.Mutex -} - -type metaData struct { - Devices map[string]*devInfo `json:"Devices"` -} - -// DeviceSet holds information about list of devices -type DeviceSet struct { - metaData `json:"-"` - sync.Mutex `json:"-"` // Protects all fields of DeviceSet and serializes calls into libdevmapper - root string - devicePrefix string - TransactionID uint64 `json:"-"` - NextDeviceID int `json:"next_device_id"` - deviceIDMap []byte - - // Options - dataLoopbackSize int64 - metaDataLoopbackSize int64 - baseFsSize uint64 - filesystem string - mountOptions string - mkfsArgs []string - dataDevice string // block or loop dev - dataLoopFile string // loopback file, if used - metadataDevice string // block or loop dev - metadataLoopFile string // loopback file, if used - doBlkDiscard bool - thinpBlockSize uint32 - thinPoolDevice string - transaction `json:"-"` - overrideUdevSyncCheck bool - deferredRemove bool // use deferred removal - deferredDelete bool // use deferred deletion - BaseDeviceUUID string // save UUID of base device - BaseDeviceFilesystem string // save filesystem of base device - nrDeletedDevices uint // number of deleted devices - deletionWorkerTicker *time.Ticker - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap - minFreeSpacePercent uint32 //min free space percentage in thinpool - xfsNospaceRetries string // max retries when xfs receives ENOSPC - lvmSetupConfig directLVMConfig -} - -// DiskUsage contains information about disk usage and is used when reporting Status of a device. -type DiskUsage struct { - // Used bytes on the disk. - Used uint64 - // Total bytes on the disk. - Total uint64 - // Available bytes on the disk. - Available uint64 -} - -// Status returns the information about the device. -type Status struct { - // PoolName is the name of the data pool. - PoolName string - // DataFile is the actual block device for data. - DataFile string - // DataLoopback loopback file, if used. - DataLoopback string - // MetadataFile is the actual block device for metadata. - MetadataFile string - // MetadataLoopback is the loopback file, if used. - MetadataLoopback string - // Data is the disk used for data. - Data DiskUsage - // Metadata is the disk used for meta data. - Metadata DiskUsage - // BaseDeviceSize is base size of container and image - BaseDeviceSize uint64 - // BaseDeviceFS is backing filesystem. - BaseDeviceFS string - // SectorSize size of the vector. - SectorSize uint64 - // UdevSyncSupported is true if sync is supported. - UdevSyncSupported bool - // DeferredRemoveEnabled is true then the device is not unmounted. - DeferredRemoveEnabled bool - // True if deferred deletion is enabled. This is different from - // deferred removal. "removal" means that device mapper device is - // deactivated. Thin device is still in thin pool and can be activated - // again. But "deletion" means that thin device will be deleted from - // thin pool and it can't be activated again. - DeferredDeleteEnabled bool - DeferredDeletedDeviceCount uint - MinFreeSpace uint64 -} - -// Structure used to export image/container metadata in inspect. -type deviceMetadata struct { - deviceID int - deviceSize uint64 // size in bytes - deviceName string // Device name as used during activation -} - -// DevStatus returns information about device mounted containing its id, size and sector information. -type DevStatus struct { - // DeviceID is the id of the device. - DeviceID int - // Size is the size of the filesystem. - Size uint64 - // TransactionID is a unique integer per device set used to identify an operation on the file system, this number is incremental. - TransactionID uint64 - // SizeInSectors indicates the size of the sectors allocated. - SizeInSectors uint64 - // MappedSectors indicates number of mapped sectors. - MappedSectors uint64 - // HighestMappedSector is the pointer to the highest mapped sector. - HighestMappedSector uint64 -} - -func getDevName(name string) string { - return "/dev/mapper/" + name -} - -func (info *devInfo) Name() string { - hash := info.Hash - if hash == "" { - hash = "base" - } - return fmt.Sprintf("%s-%s", info.devices.devicePrefix, hash) -} - -func (info *devInfo) DevName() string { - return getDevName(info.Name()) -} - -func (devices *DeviceSet) loopbackDir() string { - return path.Join(devices.root, "devicemapper") -} - -func (devices *DeviceSet) metadataDir() string { - return path.Join(devices.root, "metadata") -} - -func (devices *DeviceSet) metadataFile(info *devInfo) string { - file := info.Hash - if file == "" { - file = "base" - } - return path.Join(devices.metadataDir(), file) -} - -func (devices *DeviceSet) transactionMetaFile() string { - return path.Join(devices.metadataDir(), transactionMetaFile) -} - -func (devices *DeviceSet) deviceSetMetaFile() string { - return path.Join(devices.metadataDir(), deviceSetMetaFile) -} - -func (devices *DeviceSet) oldMetadataFile() string { - return path.Join(devices.loopbackDir(), "json") -} - -func (devices *DeviceSet) getPoolName() string { - if devices.thinPoolDevice == "" { - return devices.devicePrefix + "-pool" - } - return devices.thinPoolDevice -} - -func (devices *DeviceSet) getPoolDevName() string { - return getDevName(devices.getPoolName()) -} - -func (devices *DeviceSet) hasImage(name string) bool { - dirname := devices.loopbackDir() - filename := path.Join(dirname, name) - - _, err := os.Stat(filename) - return err == nil -} - -// ensureImage creates a sparse file of bytes at the path -// /devicemapper/. -// If the file already exists and new size is larger than its current size, it grows to the new size. -// Either way it returns the full path. -func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { - dirname := devices.loopbackDir() - filename := path.Join(dirname, name) - - uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) - if err != nil { - return "", err - } - if err := idtools.MkdirAllAs(dirname, 0700, uid, gid); err != nil && !os.IsExist(err) { - return "", err - } - - if fi, err := os.Stat(filename); err != nil { - if !os.IsNotExist(err) { - return "", err - } - logrus.Debugf("devmapper: Creating loopback file %s for device-manage use", filename) - file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) - if err != nil { - return "", err - } - defer file.Close() - - if err := file.Truncate(size); err != nil { - return "", err - } - } else { - if fi.Size() < size { - file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) - if err != nil { - return "", err - } - defer file.Close() - if err := file.Truncate(size); err != nil { - return "", fmt.Errorf("devmapper: Unable to grow loopback file %s: %v", filename, err) - } - } else if fi.Size() > size { - logrus.Warnf("devmapper: Can't shrink loopback file %s", filename) - } - } - return filename, nil -} - -func (devices *DeviceSet) allocateTransactionID() uint64 { - devices.OpenTransactionID = devices.TransactionID + 1 - return devices.OpenTransactionID -} - -func (devices *DeviceSet) updatePoolTransactionID() error { - if err := devicemapper.SetTransactionID(devices.getPoolDevName(), devices.TransactionID, devices.OpenTransactionID); err != nil { - return fmt.Errorf("devmapper: Error setting devmapper transaction ID: %s", err) - } - devices.TransactionID = devices.OpenTransactionID - return nil -} - -func (devices *DeviceSet) removeMetadata(info *devInfo) error { - if err := os.RemoveAll(devices.metadataFile(info)); err != nil { - return fmt.Errorf("devmapper: Error removing metadata file %s: %s", devices.metadataFile(info), err) - } - return nil -} - -// Given json data and file path, write it to disk -func (devices *DeviceSet) writeMetaFile(jsonData []byte, filePath string) error { - tmpFile, err := ioutil.TempFile(devices.metadataDir(), ".tmp") - if err != nil { - return fmt.Errorf("devmapper: Error creating metadata file: %s", err) - } - - n, err := tmpFile.Write(jsonData) - if err != nil { - return fmt.Errorf("devmapper: Error writing metadata to %s: %s", tmpFile.Name(), err) - } - if n < len(jsonData) { - return io.ErrShortWrite - } - if err := tmpFile.Sync(); err != nil { - return fmt.Errorf("devmapper: Error syncing metadata file %s: %s", tmpFile.Name(), err) - } - if err := tmpFile.Close(); err != nil { - return fmt.Errorf("devmapper: Error closing metadata file %s: %s", tmpFile.Name(), err) - } - if err := os.Rename(tmpFile.Name(), filePath); err != nil { - return fmt.Errorf("devmapper: Error committing metadata file %s: %s", tmpFile.Name(), err) - } - - return nil -} - -func (devices *DeviceSet) saveMetadata(info *devInfo) error { - jsonData, err := json.Marshal(info) - if err != nil { - return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) - } - if err := devices.writeMetaFile(jsonData, devices.metadataFile(info)); err != nil { - return err - } - return nil -} - -func (devices *DeviceSet) markDeviceIDUsed(deviceID int) { - var mask byte - i := deviceID % 8 - mask = 1 << uint(i) - devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] | mask -} - -func (devices *DeviceSet) markDeviceIDFree(deviceID int) { - var mask byte - i := deviceID % 8 - mask = ^(1 << uint(i)) - devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] & mask -} - -func (devices *DeviceSet) isDeviceIDFree(deviceID int) bool { - var mask byte - i := deviceID % 8 - mask = (1 << uint(i)) - return (devices.deviceIDMap[deviceID/8] & mask) == 0 -} - -// Should be called with devices.Lock() held. -func (devices *DeviceSet) lookupDevice(hash string) (*devInfo, error) { - info := devices.Devices[hash] - if info == nil { - info = devices.loadMetadata(hash) - if info == nil { - return nil, fmt.Errorf("devmapper: Unknown device %s", hash) - } - - devices.Devices[hash] = info - } - return info, nil -} - -func (devices *DeviceSet) lookupDeviceWithLock(hash string) (*devInfo, error) { - devices.Lock() - defer devices.Unlock() - info, err := devices.lookupDevice(hash) - return info, err -} - -// This function relies on that device hash map has been loaded in advance. -// Should be called with devices.Lock() held. -func (devices *DeviceSet) constructDeviceIDMap() { - logrus.Debug("devmapper: constructDeviceIDMap()") - defer logrus.Debug("devmapper: constructDeviceIDMap() END") - - for _, info := range devices.Devices { - devices.markDeviceIDUsed(info.DeviceID) - logrus.Debugf("devmapper: Added deviceId=%d to DeviceIdMap", info.DeviceID) - } -} - -func (devices *DeviceSet) deviceFileWalkFunction(path string, finfo os.FileInfo) error { - - // Skip some of the meta files which are not device files. - if strings.HasSuffix(finfo.Name(), ".migrated") { - logrus.Debugf("devmapper: Skipping file %s", path) - return nil - } - - if strings.HasPrefix(finfo.Name(), ".") { - logrus.Debugf("devmapper: Skipping file %s", path) - return nil - } - - if finfo.Name() == deviceSetMetaFile { - logrus.Debugf("devmapper: Skipping file %s", path) - return nil - } - - if finfo.Name() == transactionMetaFile { - logrus.Debugf("devmapper: Skipping file %s", path) - return nil - } - - logrus.Debugf("devmapper: Loading data for file %s", path) - - hash := finfo.Name() - if hash == "base" { - hash = "" - } - - // Include deleted devices also as cleanup delete device logic - // will go through it and see if there are any deleted devices. - if _, err := devices.lookupDevice(hash); err != nil { - return fmt.Errorf("devmapper: Error looking up device %s:%v", hash, err) - } - - return nil -} - -func (devices *DeviceSet) loadDeviceFilesOnStart() error { - logrus.Debug("devmapper: loadDeviceFilesOnStart()") - defer logrus.Debug("devmapper: loadDeviceFilesOnStart() END") - - var scan = func(path string, info os.FileInfo, err error) error { - if err != nil { - logrus.Debugf("devmapper: Can't walk the file %s", path) - return nil - } - - // Skip any directories - if info.IsDir() { - return nil - } - - return devices.deviceFileWalkFunction(path, info) - } - - return filepath.Walk(devices.metadataDir(), scan) -} - -// Should be called with devices.Lock() held. -func (devices *DeviceSet) unregisterDevice(hash string) error { - logrus.Debugf("devmapper: unregisterDevice(%v)", hash) - info := &devInfo{ - Hash: hash, - } - - delete(devices.Devices, hash) - - if err := devices.removeMetadata(info); err != nil { - logrus.Debugf("devmapper: Error removing metadata: %s", err) - return err - } - - return nil -} - -// Should be called with devices.Lock() held. -func (devices *DeviceSet) registerDevice(id int, hash string, size uint64, transactionID uint64) (*devInfo, error) { - logrus.Debugf("devmapper: registerDevice(%v, %v)", id, hash) - info := &devInfo{ - Hash: hash, - DeviceID: id, - Size: size, - TransactionID: transactionID, - Initialized: false, - devices: devices, - } - - devices.Devices[hash] = info - - if err := devices.saveMetadata(info); err != nil { - // Try to remove unused device - delete(devices.Devices, hash) - return nil, err - } - - return info, nil -} - -func (devices *DeviceSet) activateDeviceIfNeeded(info *devInfo, ignoreDeleted bool) error { - logrus.Debugf("devmapper: activateDeviceIfNeeded(%v)", info.Hash) - - if info.Deleted && !ignoreDeleted { - return fmt.Errorf("devmapper: Can't activate device %v as it is marked for deletion", info.Hash) - } - - // Make sure deferred removal on device is canceled, if one was - // scheduled. - if err := devices.cancelDeferredRemovalIfNeeded(info); err != nil { - return fmt.Errorf("devmapper: Device Deferred Removal Cancellation Failed: %s", err) - } - - if devinfo, _ := devicemapper.GetInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 { - return nil - } - - return devicemapper.ActivateDevice(devices.getPoolDevName(), info.Name(), info.DeviceID, info.Size) -} - -// xfsSupported checks if xfs is supported, returns nil if it is, otherwise an error -func xfsSupported() error { - // Make sure mkfs.xfs is available - if _, err := exec.LookPath("mkfs.xfs"); err != nil { - return err // error text is descriptive enough - } - - // Check if kernel supports xfs filesystem or not. - exec.Command("modprobe", "xfs").Run() - - f, err := os.Open("/proc/filesystems") - if err != nil { - return errors.Wrapf(err, "error checking for xfs support") - } - defer f.Close() - - s := bufio.NewScanner(f) - for s.Scan() { - if strings.HasSuffix(s.Text(), "\txfs") { - return nil - } - } - - if err := s.Err(); err != nil { - return errors.Wrapf(err, "error checking for xfs support") - } - - return errors.New(`kernel does not support xfs, or "modprobe xfs" failed`) -} - -func determineDefaultFS() string { - err := xfsSupported() - if err == nil { - return "xfs" - } - - logrus.Warnf("devmapper: XFS is not supported in your system (%v). Defaulting to ext4 filesystem", err) - return "ext4" -} - -// mkfsOptions tries to figure out whether some additional mkfs options are required -func mkfsOptions(fs string) []string { - if fs == "xfs" && !kernel.CheckKernelVersion(3, 16, 0) { - // For kernels earlier than 3.16 (and newer xfsutils), - // some xfs features need to be explicitly disabled. - return []string{"-m", "crc=0,finobt=0"} - } - - return []string{} -} - -func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) { - devname := info.DevName() - - if devices.filesystem == "" { - devices.filesystem = determineDefaultFS() - } - if err := devices.saveBaseDeviceFilesystem(devices.filesystem); err != nil { - return err - } - - args := mkfsOptions(devices.filesystem) - args = append(args, devices.mkfsArgs...) - args = append(args, devname) - - logrus.Infof("devmapper: Creating filesystem %s on device %s, mkfs args: %v", devices.filesystem, info.Name(), args) - defer func() { - if err != nil { - logrus.Infof("devmapper: Error while creating filesystem %s on device %s: %v", devices.filesystem, info.Name(), err) - } else { - logrus.Infof("devmapper: Successfully created filesystem %s on device %s", devices.filesystem, info.Name()) - } - }() - - switch devices.filesystem { - case "xfs": - err = exec.Command("mkfs.xfs", args...).Run() - case "ext4": - err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0"}, args...)...).Run() - if err != nil { - err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0"}, args...)...).Run() - } - if err != nil { - return err - } - err = exec.Command("tune2fs", append([]string{"-c", "-1", "-i", "0"}, devname)...).Run() - default: - err = fmt.Errorf("devmapper: Unsupported filesystem type %s", devices.filesystem) - } - return -} - -func (devices *DeviceSet) migrateOldMetaData() error { - // Migrate old metadata file - jsonData, err := ioutil.ReadFile(devices.oldMetadataFile()) - if err != nil && !os.IsNotExist(err) { - return err - } - - if jsonData != nil { - m := metaData{Devices: make(map[string]*devInfo)} - - if err := json.Unmarshal(jsonData, &m); err != nil { - return err - } - - for hash, info := range m.Devices { - info.Hash = hash - devices.saveMetadata(info) - } - if err := os.Rename(devices.oldMetadataFile(), devices.oldMetadataFile()+".migrated"); err != nil { - return err - } - - } - - return nil -} - -// Cleanup deleted devices. It assumes that all the devices have been -// loaded in the hash table. -func (devices *DeviceSet) cleanupDeletedDevices() error { - devices.Lock() - - // If there are no deleted devices, there is nothing to do. - if devices.nrDeletedDevices == 0 { - devices.Unlock() - return nil - } - - var deletedDevices []*devInfo - - for _, info := range devices.Devices { - if !info.Deleted { - continue - } - logrus.Debugf("devmapper: Found deleted device %s.", info.Hash) - deletedDevices = append(deletedDevices, info) - } - - // Delete the deleted devices. DeleteDevice() first takes the info lock - // and then devices.Lock(). So drop it to avoid deadlock. - devices.Unlock() - - for _, info := range deletedDevices { - // This will again try deferred deletion. - if err := devices.DeleteDevice(info.Hash, false); err != nil { - logrus.Warnf("devmapper: Deletion of device %s, device_id=%v failed:%v", info.Hash, info.DeviceID, err) - } - } - - return nil -} - -func (devices *DeviceSet) countDeletedDevices() { - for _, info := range devices.Devices { - if !info.Deleted { - continue - } - devices.nrDeletedDevices++ - } -} - -func (devices *DeviceSet) startDeviceDeletionWorker() { - // Deferred deletion is not enabled. Don't do anything. - if !devices.deferredDelete { - return - } - - logrus.Debug("devmapper: Worker to cleanup deleted devices started") - for range devices.deletionWorkerTicker.C { - devices.cleanupDeletedDevices() - } -} - -func (devices *DeviceSet) initMetaData() error { - devices.Lock() - defer devices.Unlock() - - if err := devices.migrateOldMetaData(); err != nil { - return err - } - - _, transactionID, _, _, _, _, err := devices.poolStatus() - if err != nil { - return err - } - - devices.TransactionID = transactionID - - if err := devices.loadDeviceFilesOnStart(); err != nil { - return fmt.Errorf("devmapper: Failed to load device files:%v", err) - } - - devices.constructDeviceIDMap() - devices.countDeletedDevices() - - if err := devices.processPendingTransaction(); err != nil { - return err - } - - // Start a goroutine to cleanup Deleted Devices - go devices.startDeviceDeletionWorker() - return nil -} - -func (devices *DeviceSet) incNextDeviceID() { - // IDs are 24bit, so wrap around - devices.NextDeviceID = (devices.NextDeviceID + 1) & maxDeviceID -} - -func (devices *DeviceSet) getNextFreeDeviceID() (int, error) { - devices.incNextDeviceID() - for i := 0; i <= maxDeviceID; i++ { - if devices.isDeviceIDFree(devices.NextDeviceID) { - devices.markDeviceIDUsed(devices.NextDeviceID) - return devices.NextDeviceID, nil - } - devices.incNextDeviceID() - } - - return 0, fmt.Errorf("devmapper: Unable to find a free device ID") -} - -func (devices *DeviceSet) poolHasFreeSpace() error { - if devices.minFreeSpacePercent == 0 { - return nil - } - - _, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() - if err != nil { - return err - } - - minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100 - if minFreeData < 1 { - minFreeData = 1 - } - dataFree := dataTotal - dataUsed - if dataFree < minFreeData { - return fmt.Errorf("devmapper: Thin Pool has %v free data blocks which is less than minimum required %v free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior", (dataTotal - dataUsed), minFreeData) - } - - minFreeMetadata := (metadataTotal * uint64(devices.minFreeSpacePercent)) / 100 - if minFreeMetadata < 1 { - minFreeMetadata = 1 - } - - metadataFree := metadataTotal - metadataUsed - if metadataFree < minFreeMetadata { - return fmt.Errorf("devmapper: Thin Pool has %v free metadata blocks which is less than minimum required %v free metadata blocks. Create more free metadata space in thin pool or use dm.min_free_space option to change behavior", (metadataTotal - metadataUsed), minFreeMetadata) - } - - return nil -} - -func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) { - devices.Lock() - defer devices.Unlock() - - deviceID, err := devices.getNextFreeDeviceID() - if err != nil { - return nil, err - } - - if err := devices.openTransaction(hash, deviceID); err != nil { - logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) - devices.markDeviceIDFree(deviceID) - return nil, err - } - - for { - if err := devicemapper.CreateDevice(devices.getPoolDevName(), deviceID); err != nil { - if devicemapper.DeviceIDExists(err) { - // Device ID already exists. This should not - // happen. Now we have a mechanism to find - // a free device ID. So something is not right. - // Give a warning and continue. - logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) - deviceID, err = devices.getNextFreeDeviceID() - if err != nil { - return nil, err - } - // Save new device id into transaction - devices.refreshTransaction(deviceID) - continue - } - logrus.Debugf("devmapper: Error creating device: %s", err) - devices.markDeviceIDFree(deviceID) - return nil, err - } - break - } - - logrus.Debugf("devmapper: Registering device (id %v) with FS size %v", deviceID, devices.baseFsSize) - info, err := devices.registerDevice(deviceID, hash, devices.baseFsSize, devices.OpenTransactionID) - if err != nil { - _ = devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) - devices.markDeviceIDFree(deviceID) - return nil, err - } - - if err := devices.closeTransaction(); err != nil { - devices.unregisterDevice(hash) - devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) - devices.markDeviceIDFree(deviceID) - return nil, err - } - return info, nil -} - -func (devices *DeviceSet) takeSnapshot(hash string, baseInfo *devInfo, size uint64) error { - var ( - devinfo *devicemapper.Info - err error - ) - - if err = devices.poolHasFreeSpace(); err != nil { - return err - } - - if devices.deferredRemove { - devinfo, err = devicemapper.GetInfoWithDeferred(baseInfo.Name()) - if err != nil { - return err - } - if devinfo != nil && devinfo.DeferredRemove != 0 { - err = devices.cancelDeferredRemoval(baseInfo) - if err != nil { - // If Error is ErrEnxio. Device is probably already gone. Continue. - if errors.Cause(err) != devicemapper.ErrEnxio { - return err - } - devinfo = nil - } else { - defer devices.deactivateDevice(baseInfo) - } - } - } else { - devinfo, err = devicemapper.GetInfo(baseInfo.Name()) - if err != nil { - return err - } - } - - doSuspend := devinfo != nil && devinfo.Exists != 0 - - if doSuspend { - if err = devicemapper.SuspendDevice(baseInfo.Name()); err != nil { - return err - } - defer devicemapper.ResumeDevice(baseInfo.Name()) - } - - if err = devices.createRegisterSnapDevice(hash, baseInfo, size); err != nil { - return err - } - - return nil -} - -func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInfo, size uint64) error { - deviceID, err := devices.getNextFreeDeviceID() - if err != nil { - return err - } - - if err := devices.openTransaction(hash, deviceID); err != nil { - logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) - devices.markDeviceIDFree(deviceID) - return err - } - - for { - if err := devicemapper.CreateSnapDeviceRaw(devices.getPoolDevName(), deviceID, baseInfo.DeviceID); err != nil { - if devicemapper.DeviceIDExists(err) { - // Device ID already exists. This should not - // happen. Now we have a mechanism to find - // a free device ID. So something is not right. - // Give a warning and continue. - logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) - deviceID, err = devices.getNextFreeDeviceID() - if err != nil { - return err - } - // Save new device id into transaction - devices.refreshTransaction(deviceID) - continue - } - logrus.Debugf("devmapper: Error creating snap device: %s", err) - devices.markDeviceIDFree(deviceID) - return err - } - break - } - - if _, err := devices.registerDevice(deviceID, hash, size, devices.OpenTransactionID); err != nil { - devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) - devices.markDeviceIDFree(deviceID) - logrus.Debugf("devmapper: Error registering device: %s", err) - return err - } - - if err := devices.closeTransaction(); err != nil { - devices.unregisterDevice(hash) - devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) - devices.markDeviceIDFree(deviceID) - return err - } - return nil -} - -func (devices *DeviceSet) loadMetadata(hash string) *devInfo { - info := &devInfo{Hash: hash, devices: devices} - - jsonData, err := ioutil.ReadFile(devices.metadataFile(info)) - if err != nil { - logrus.Debugf("devmapper: Failed to read %s with err: %v", devices.metadataFile(info), err) - return nil - } - - if err := json.Unmarshal(jsonData, &info); err != nil { - logrus.Debugf("devmapper: Failed to unmarshal devInfo from %s with err: %v", devices.metadataFile(info), err) - return nil - } - - if info.DeviceID > maxDeviceID { - logrus.Errorf("devmapper: Ignoring Invalid DeviceId=%d", info.DeviceID) - return nil - } - - return info -} - -func getDeviceUUID(device string) (string, error) { - out, err := exec.Command("blkid", "-s", "UUID", "-o", "value", device).Output() - if err != nil { - return "", fmt.Errorf("devmapper: Failed to find uuid for device %s:%v", device, err) - } - - uuid := strings.TrimSuffix(string(out), "\n") - uuid = strings.TrimSpace(uuid) - logrus.Debugf("devmapper: UUID for device: %s is:%s", device, uuid) - return uuid, nil -} - -func (devices *DeviceSet) getBaseDeviceSize() uint64 { - info, _ := devices.lookupDevice("") - if info == nil { - return 0 - } - return info.Size -} - -func (devices *DeviceSet) getBaseDeviceFS() string { - return devices.BaseDeviceFilesystem -} - -func (devices *DeviceSet) verifyBaseDeviceUUIDFS(baseInfo *devInfo) error { - devices.Lock() - defer devices.Unlock() - - if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { - return err - } - defer devices.deactivateDevice(baseInfo) - - uuid, err := getDeviceUUID(baseInfo.DevName()) - if err != nil { - return err - } - - if devices.BaseDeviceUUID != uuid { - return fmt.Errorf("devmapper: Current Base Device UUID:%s does not match with stored UUID:%s. Possibly using a different thin pool than last invocation", uuid, devices.BaseDeviceUUID) - } - - if devices.BaseDeviceFilesystem == "" { - fsType, err := ProbeFsType(baseInfo.DevName()) - if err != nil { - return err - } - if err := devices.saveBaseDeviceFilesystem(fsType); err != nil { - return err - } - } - - // If user specified a filesystem using dm.fs option and current - // file system of base image is not same, warn user that dm.fs - // will be ignored. - if devices.BaseDeviceFilesystem != devices.filesystem { - logrus.Warnf("devmapper: Base device already exists and has filesystem %s on it. User specified filesystem %s will be ignored.", devices.BaseDeviceFilesystem, devices.filesystem) - devices.filesystem = devices.BaseDeviceFilesystem - } - return nil -} - -func (devices *DeviceSet) saveBaseDeviceFilesystem(fs string) error { - devices.BaseDeviceFilesystem = fs - return devices.saveDeviceSetMetaData() -} - -func (devices *DeviceSet) saveBaseDeviceUUID(baseInfo *devInfo) error { - devices.Lock() - defer devices.Unlock() - - if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { - return err - } - defer devices.deactivateDevice(baseInfo) - - uuid, err := getDeviceUUID(baseInfo.DevName()) - if err != nil { - return err - } - - devices.BaseDeviceUUID = uuid - return devices.saveDeviceSetMetaData() -} - -func (devices *DeviceSet) createBaseImage() error { - logrus.Debug("devmapper: Initializing base device-mapper thin volume") - - // Create initial device - info, err := devices.createRegisterDevice("") - if err != nil { - return err - } - - logrus.Debug("devmapper: Creating filesystem on base device-mapper thin volume") - - if err := devices.activateDeviceIfNeeded(info, false); err != nil { - return err - } - - if err := devices.createFilesystem(info); err != nil { - return err - } - - info.Initialized = true - if err := devices.saveMetadata(info); err != nil { - info.Initialized = false - return err - } - - if err := devices.saveBaseDeviceUUID(info); err != nil { - return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) - } - - return nil -} - -// Returns if thin pool device exists or not. If device exists, also makes -// sure it is a thin pool device and not some other type of device. -func (devices *DeviceSet) thinPoolExists(thinPoolDevice string) (bool, error) { - logrus.Debugf("devmapper: Checking for existence of the pool %s", thinPoolDevice) - - info, err := devicemapper.GetInfo(thinPoolDevice) - if err != nil { - return false, fmt.Errorf("devmapper: GetInfo() on device %s failed: %v", thinPoolDevice, err) - } - - // Device does not exist. - if info.Exists == 0 { - return false, nil - } - - _, _, deviceType, _, err := devicemapper.GetStatus(thinPoolDevice) - if err != nil { - return false, fmt.Errorf("devmapper: GetStatus() on device %s failed: %v", thinPoolDevice, err) - } - - if deviceType != "thin-pool" { - return false, fmt.Errorf("devmapper: Device %s is not a thin pool", thinPoolDevice) - } - - return true, nil -} - -func (devices *DeviceSet) checkThinPool() error { - _, transactionID, dataUsed, _, _, _, err := devices.poolStatus() - if err != nil { - return err - } - if dataUsed != 0 { - return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) that already has used data blocks", - devices.thinPoolDevice) - } - if transactionID != 0 { - return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) with non-zero transaction ID", - devices.thinPoolDevice) - } - return nil -} - -// Base image is initialized properly. Either save UUID for first time (for -// upgrade case or verify UUID. -func (devices *DeviceSet) setupVerifyBaseImageUUIDFS(baseInfo *devInfo) error { - // If BaseDeviceUUID is nil (upgrade case), save it and return success. - if devices.BaseDeviceUUID == "" { - if err := devices.saveBaseDeviceUUID(baseInfo); err != nil { - return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) - } - return nil - } - - if err := devices.verifyBaseDeviceUUIDFS(baseInfo); err != nil { - return fmt.Errorf("devmapper: Base Device UUID and Filesystem verification failed: %v", err) - } - - return nil -} - -func (devices *DeviceSet) checkGrowBaseDeviceFS(info *devInfo) error { - - if !userBaseSize { - return nil - } - - if devices.baseFsSize < devices.getBaseDeviceSize() { - return fmt.Errorf("devmapper: Base device size cannot be smaller than %s", units.HumanSize(float64(devices.getBaseDeviceSize()))) - } - - if devices.baseFsSize == devices.getBaseDeviceSize() { - return nil - } - - info.lock.Lock() - defer info.lock.Unlock() - - devices.Lock() - defer devices.Unlock() - - info.Size = devices.baseFsSize - - if err := devices.saveMetadata(info); err != nil { - // Try to remove unused device - delete(devices.Devices, info.Hash) - return err - } - - return devices.growFS(info) -} - -func (devices *DeviceSet) growFS(info *devInfo) error { - if err := devices.activateDeviceIfNeeded(info, false); err != nil { - return fmt.Errorf("Error activating devmapper device: %s", err) - } - - defer devices.deactivateDevice(info) - - fsMountPoint := "/run/containers/storage/mnt" - if _, err := os.Stat(fsMountPoint); os.IsNotExist(err) { - if err := os.MkdirAll(fsMountPoint, 0700); err != nil { - return err - } - defer os.RemoveAll(fsMountPoint) - } - - options := "" - if devices.BaseDeviceFilesystem == "xfs" { - // XFS needs nouuid or it can't mount filesystems with the same fs - options = joinMountOptions(options, "nouuid") - } - options = joinMountOptions(options, devices.mountOptions) - - if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil { - return fmt.Errorf("Error mounting '%s' on '%s': %s\n%v", info.DevName(), fsMountPoint, err, string(dmesg.Dmesg(256))) - } - - defer unix.Unmount(fsMountPoint, unix.MNT_DETACH) - - switch devices.BaseDeviceFilesystem { - case "ext4": - if out, err := exec.Command("resize2fs", info.DevName()).CombinedOutput(); err != nil { - return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) - } - case "xfs": - if out, err := exec.Command("xfs_growfs", info.DevName()).CombinedOutput(); err != nil { - return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) - } - default: - return fmt.Errorf("Unsupported filesystem type %s", devices.BaseDeviceFilesystem) - } - return nil -} - -func (devices *DeviceSet) setupBaseImage() error { - oldInfo, _ := devices.lookupDeviceWithLock("") - - // base image already exists. If it is initialized properly, do UUID - // verification and return. Otherwise remove image and set it up - // fresh. - - if oldInfo != nil { - if oldInfo.Initialized && !oldInfo.Deleted { - if err := devices.setupVerifyBaseImageUUIDFS(oldInfo); err != nil { - return err - } - - if err := devices.checkGrowBaseDeviceFS(oldInfo); err != nil { - return err - } - - return nil - } - - logrus.Debug("devmapper: Removing uninitialized base image") - // If previous base device is in deferred delete state, - // that needs to be cleaned up first. So don't try - // deferred deletion. - if err := devices.DeleteDevice("", true); err != nil { - return err - } - } - - // If we are setting up base image for the first time, make sure - // thin pool is empty. - if devices.thinPoolDevice != "" && oldInfo == nil { - if err := devices.checkThinPool(); err != nil { - return err - } - } - - // Create new base image device - if err := devices.createBaseImage(); err != nil { - return err - } - - return nil -} - -func setCloseOnExec(name string) { - fileInfos, _ := ioutil.ReadDir("/proc/self/fd") - for _, i := range fileInfos { - link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name())) - if link == name { - fd, err := strconv.Atoi(i.Name()) - if err == nil { - unix.CloseOnExec(fd) - } - } - } -} - -func major(device uint64) uint64 { - return (device >> 8) & 0xfff -} - -func minor(device uint64) uint64 { - return (device & 0xff) | ((device >> 12) & 0xfff00) -} - -// ResizePool increases the size of the pool. -func (devices *DeviceSet) ResizePool(size int64) error { - dirname := devices.loopbackDir() - datafilename := path.Join(dirname, "data") - if len(devices.dataDevice) > 0 { - datafilename = devices.dataDevice - } - metadatafilename := path.Join(dirname, "metadata") - if len(devices.metadataDevice) > 0 { - metadatafilename = devices.metadataDevice - } - - datafile, err := os.OpenFile(datafilename, os.O_RDWR, 0) - if datafile == nil { - return err - } - defer datafile.Close() - - fi, err := datafile.Stat() - if fi == nil { - return err - } - - if fi.Size() > size { - return fmt.Errorf("devmapper: Can't shrink file") - } - - dataloopback := loopback.FindLoopDeviceFor(datafile) - if dataloopback == nil { - return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", datafilename) - } - defer dataloopback.Close() - - metadatafile, err := os.OpenFile(metadatafilename, os.O_RDWR, 0) - if metadatafile == nil { - return err - } - defer metadatafile.Close() - - metadataloopback := loopback.FindLoopDeviceFor(metadatafile) - if metadataloopback == nil { - return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", metadatafilename) - } - defer metadataloopback.Close() - - // Grow loopback file - if err := datafile.Truncate(size); err != nil { - return fmt.Errorf("devmapper: Unable to grow loopback file: %s", err) - } - - // Reload size for loopback device - if err := loopback.SetCapacity(dataloopback); err != nil { - return fmt.Errorf("Unable to update loopback capacity: %s", err) - } - - // Suspend the pool - if err := devicemapper.SuspendDevice(devices.getPoolName()); err != nil { - return fmt.Errorf("devmapper: Unable to suspend pool: %s", err) - } - - // Reload with the new block sizes - if err := devicemapper.ReloadPool(devices.getPoolName(), dataloopback, metadataloopback, devices.thinpBlockSize); err != nil { - return fmt.Errorf("devmapper: Unable to reload pool: %s", err) - } - - // Resume the pool - if err := devicemapper.ResumeDevice(devices.getPoolName()); err != nil { - return fmt.Errorf("devmapper: Unable to resume pool: %s", err) - } - - return nil -} - -func (devices *DeviceSet) loadTransactionMetaData() error { - jsonData, err := ioutil.ReadFile(devices.transactionMetaFile()) - if err != nil { - // There is no active transaction. This will be the case - // during upgrade. - if os.IsNotExist(err) { - devices.OpenTransactionID = devices.TransactionID - return nil - } - return err - } - - json.Unmarshal(jsonData, &devices.transaction) - return nil -} - -func (devices *DeviceSet) saveTransactionMetaData() error { - jsonData, err := json.Marshal(&devices.transaction) - if err != nil { - return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) - } - - return devices.writeMetaFile(jsonData, devices.transactionMetaFile()) -} - -func (devices *DeviceSet) removeTransactionMetaData() error { - return os.RemoveAll(devices.transactionMetaFile()) -} - -func (devices *DeviceSet) rollbackTransaction() error { - logrus.Debugf("devmapper: Rolling back open transaction: TransactionID=%d hash=%s device_id=%d", devices.OpenTransactionID, devices.DeviceIDHash, devices.DeviceID) - - // A device id might have already been deleted before transaction - // closed. In that case this call will fail. Just leave a message - // in case of failure. - if err := devicemapper.DeleteDevice(devices.getPoolDevName(), devices.DeviceID); err != nil { - logrus.Errorf("devmapper: Unable to delete device: %s", err) - } - - dinfo := &devInfo{Hash: devices.DeviceIDHash} - if err := devices.removeMetadata(dinfo); err != nil { - logrus.Errorf("devmapper: Unable to remove metadata: %s", err) - } else { - devices.markDeviceIDFree(devices.DeviceID) - } - - if err := devices.removeTransactionMetaData(); err != nil { - logrus.Errorf("devmapper: Unable to remove transaction meta file %s: %s", devices.transactionMetaFile(), err) - } - - return nil -} - -func (devices *DeviceSet) processPendingTransaction() error { - if err := devices.loadTransactionMetaData(); err != nil { - return err - } - - // If there was open transaction but pool transaction ID is same - // as open transaction ID, nothing to roll back. - if devices.TransactionID == devices.OpenTransactionID { - return nil - } - - // If open transaction ID is less than pool transaction ID, something - // is wrong. Bail out. - if devices.OpenTransactionID < devices.TransactionID { - logrus.Errorf("devmapper: Open Transaction id %d is less than pool transaction id %d", devices.OpenTransactionID, devices.TransactionID) - return nil - } - - // Pool transaction ID is not same as open transaction. There is - // a transaction which was not completed. - if err := devices.rollbackTransaction(); err != nil { - return fmt.Errorf("devmapper: Rolling back open transaction failed: %s", err) - } - - devices.OpenTransactionID = devices.TransactionID - return nil -} - -func (devices *DeviceSet) loadDeviceSetMetaData() error { - jsonData, err := ioutil.ReadFile(devices.deviceSetMetaFile()) - if err != nil { - // For backward compatibility return success if file does - // not exist. - if os.IsNotExist(err) { - return nil - } - return err - } - - return json.Unmarshal(jsonData, devices) -} - -func (devices *DeviceSet) saveDeviceSetMetaData() error { - jsonData, err := json.Marshal(devices) - if err != nil { - return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) - } - - return devices.writeMetaFile(jsonData, devices.deviceSetMetaFile()) -} - -func (devices *DeviceSet) openTransaction(hash string, DeviceID int) error { - devices.allocateTransactionID() - devices.DeviceIDHash = hash - devices.DeviceID = DeviceID - if err := devices.saveTransactionMetaData(); err != nil { - return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) - } - return nil -} - -func (devices *DeviceSet) refreshTransaction(DeviceID int) error { - devices.DeviceID = DeviceID - if err := devices.saveTransactionMetaData(); err != nil { - return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) - } - return nil -} - -func (devices *DeviceSet) closeTransaction() error { - if err := devices.updatePoolTransactionID(); err != nil { - logrus.Debug("devmapper: Failed to close Transaction") - return err - } - return nil -} - -func determineDriverCapabilities(version string) error { - // Kernel driver version >= 4.27.0 support deferred removal - - logrus.Debugf("devicemapper: kernel dm driver version is %s", version) - - versionSplit := strings.Split(version, ".") - major, err := strconv.Atoi(versionSplit[0]) - if err != nil { - return errors.Wrapf(graphdriver.ErrNotSupported, "unable to parse driver major version %q as a number", versionSplit[0]) - } - - if major > 4 { - driverDeferredRemovalSupport = true - return nil - } - - if major < 4 { - return nil - } - - minor, err := strconv.Atoi(versionSplit[1]) - if err != nil { - return errors.Wrapf(graphdriver.ErrNotSupported, "unable to parse driver minor version %q as a number", versionSplit[1]) - } - - /* - * If major is 4 and minor is 27, then there is no need to - * check for patch level as it can not be less than 0. - */ - if minor >= 27 { - driverDeferredRemovalSupport = true - return nil - } - - return nil -} - -// Determine the major and minor number of loopback device -func getDeviceMajorMinor(file *os.File) (uint64, uint64, error) { - var stat unix.Stat_t - err := unix.Stat(file.Name(), &stat) - if err != nil { - return 0, 0, err - } - - dev := stat.Rdev - majorNum := major(dev) - minorNum := minor(dev) - - logrus.Debugf("devmapper: Major:Minor for device: %s is:%v:%v", file.Name(), majorNum, minorNum) - return majorNum, minorNum, nil -} - -// Given a file which is backing file of a loop back device, find the -// loopback device name and its major/minor number. -func getLoopFileDeviceMajMin(filename string) (string, uint64, uint64, error) { - file, err := os.Open(filename) - if err != nil { - logrus.Debugf("devmapper: Failed to open file %s", filename) - return "", 0, 0, err - } - - defer file.Close() - loopbackDevice := loopback.FindLoopDeviceFor(file) - if loopbackDevice == nil { - return "", 0, 0, fmt.Errorf("devmapper: Unable to find loopback mount for: %s", filename) - } - defer loopbackDevice.Close() - - Major, Minor, err := getDeviceMajorMinor(loopbackDevice) - if err != nil { - return "", 0, 0, err - } - return loopbackDevice.Name(), Major, Minor, nil -} - -// Get the major/minor numbers of thin pool data and metadata devices -func (devices *DeviceSet) getThinPoolDataMetaMajMin() (uint64, uint64, uint64, uint64, error) { - var params, poolDataMajMin, poolMetadataMajMin string - - _, _, _, params, err := devicemapper.GetTable(devices.getPoolName()) - if err != nil { - return 0, 0, 0, 0, err - } - - if _, err = fmt.Sscanf(params, "%s %s", &poolMetadataMajMin, &poolDataMajMin); err != nil { - return 0, 0, 0, 0, err - } - - logrus.Debugf("devmapper: poolDataMajMin=%s poolMetaMajMin=%s\n", poolDataMajMin, poolMetadataMajMin) - - poolDataMajMinorSplit := strings.Split(poolDataMajMin, ":") - poolDataMajor, err := strconv.ParseUint(poolDataMajMinorSplit[0], 10, 32) - if err != nil { - return 0, 0, 0, 0, err - } - - poolDataMinor, err := strconv.ParseUint(poolDataMajMinorSplit[1], 10, 32) - if err != nil { - return 0, 0, 0, 0, err - } - - poolMetadataMajMinorSplit := strings.Split(poolMetadataMajMin, ":") - poolMetadataMajor, err := strconv.ParseUint(poolMetadataMajMinorSplit[0], 10, 32) - if err != nil { - return 0, 0, 0, 0, err - } - - poolMetadataMinor, err := strconv.ParseUint(poolMetadataMajMinorSplit[1], 10, 32) - if err != nil { - return 0, 0, 0, 0, err - } - - return poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, nil -} - -func (devices *DeviceSet) loadThinPoolLoopBackInfo() error { - poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, err := devices.getThinPoolDataMetaMajMin() - if err != nil { - return err - } - - dirname := devices.loopbackDir() - - // data device has not been passed in. So there should be a data file - // which is being mounted as loop device. - if devices.dataDevice == "" { - datafilename := path.Join(dirname, "data") - dataLoopDevice, dataMajor, dataMinor, err := getLoopFileDeviceMajMin(datafilename) - if err != nil { - return err - } - - // Compare the two - if poolDataMajor == dataMajor && poolDataMinor == dataMinor { - devices.dataDevice = dataLoopDevice - devices.dataLoopFile = datafilename - } - - } - - // metadata device has not been passed in. So there should be a - // metadata file which is being mounted as loop device. - if devices.metadataDevice == "" { - metadatafilename := path.Join(dirname, "metadata") - metadataLoopDevice, metadataMajor, metadataMinor, err := getLoopFileDeviceMajMin(metadatafilename) - if err != nil { - return err - } - if poolMetadataMajor == metadataMajor && poolMetadataMinor == metadataMinor { - devices.metadataDevice = metadataLoopDevice - devices.metadataLoopFile = metadatafilename - } - } - - return nil -} - -func (devices *DeviceSet) enableDeferredRemovalDeletion() error { - - // If user asked for deferred removal then check both libdm library - // and kernel driver support deferred removal otherwise error out. - if enableDeferredRemoval { - if !driverDeferredRemovalSupport { - return fmt.Errorf("devmapper: Deferred removal can not be enabled as kernel does not support it") - } - if !devicemapper.LibraryDeferredRemovalSupport { - return fmt.Errorf("devmapper: Deferred removal can not be enabled as libdm does not support it") - } - logrus.Debug("devmapper: Deferred removal support enabled.") - devices.deferredRemove = true - } - - if enableDeferredDeletion { - if !devices.deferredRemove { - return fmt.Errorf("devmapper: Deferred deletion can not be enabled as deferred removal is not enabled. Enable deferred removal using --storage-opt dm.use_deferred_removal=true parameter") - } - logrus.Debug("devmapper: Deferred deletion support enabled.") - devices.deferredDelete = true - } - return nil -} - -func (devices *DeviceSet) initDevmapper(doInit bool) (retErr error) { - if err := devices.enableDeferredRemovalDeletion(); err != nil { - return err - } - - // https://github.com/docker/docker/issues/4036 - if supported := devicemapper.UdevSetSyncSupport(true); !supported { - logrus.Error("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a more recent version of libdevmapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/dockerd/#storage-driver-options") - - if !devices.overrideUdevSyncCheck { - return graphdriver.ErrNotSupported - } - } - - //create the root dir of the devmapper driver ownership to match this - //daemon's remapped root uid/gid so containers can start properly - uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) - if err != nil { - return err - } - if err := idtools.MkdirAs(devices.root, 0700, uid, gid); err != nil && !os.IsExist(err) { - return err - } - if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil && !os.IsExist(err) { - return err - } - - prevSetupConfig, err := readLVMConfig(devices.root) - if err != nil { - return err - } - - if !reflect.DeepEqual(devices.lvmSetupConfig, directLVMConfig{}) { - if devices.thinPoolDevice != "" { - return errors.New("cannot setup direct-lvm when `dm.thinpooldev` is also specified") - } - - if !reflect.DeepEqual(prevSetupConfig, devices.lvmSetupConfig) { - if !reflect.DeepEqual(prevSetupConfig, directLVMConfig{}) { - return errors.New("changing direct-lvm config is not supported") - } - logrus.WithField("storage-driver", "devicemapper").WithField("direct-lvm-config", devices.lvmSetupConfig).Debugf("Setting up direct lvm mode") - if err := verifyBlockDevice(devices.lvmSetupConfig.Device, lvmSetupConfigForce); err != nil { - return err - } - if err := setupDirectLVM(devices.lvmSetupConfig); err != nil { - return err - } - if err := writeLVMConfig(devices.root, devices.lvmSetupConfig); err != nil { - return err - } - } - devices.thinPoolDevice = "storage-thinpool" - logrus.WithField("storage-driver", "devicemapper").Debugf("Setting dm.thinpooldev to %q", devices.thinPoolDevice) - } - - // Set the device prefix from the device id and inode of the storage root dir - var st unix.Stat_t - if err := unix.Stat(devices.root, &st); err != nil { - return fmt.Errorf("devmapper: Error looking up dir %s: %s", devices.root, err) - } - // "reg-" stands for "regular file". - // In the future we might use "dev-" for "device file", etc. - // container-maj,min[-inode] stands for: - // - Managed by container storage - // - The target of this device is at major and minor - // - If is defined, use that file inside the device as a loopback image. Otherwise use the device itself. - devices.devicePrefix = fmt.Sprintf("container-%d:%d-%d", major(st.Dev), minor(st.Dev), st.Ino) - logrus.Debugf("devmapper: Generated prefix: %s", devices.devicePrefix) - - // Check for the existence of the thin-pool device - poolExists, err := devices.thinPoolExists(devices.getPoolName()) - if err != nil { - return err - } - - // It seems libdevmapper opens this without O_CLOEXEC, and go exec will not close files - // that are not Close-on-exec, - // so we add this badhack to make sure it closes itself - setCloseOnExec("/dev/mapper/control") - - // Make sure the sparse images exist in /devicemapper/data and - // /devicemapper/metadata - - createdLoopback := false - - // If the pool doesn't exist, create it - if !poolExists && devices.thinPoolDevice == "" { - logrus.Debug("devmapper: Pool doesn't exist. Creating it.") - - var ( - dataFile *os.File - metadataFile *os.File - ) - - fsMagic, err := graphdriver.GetFSMagic(devices.loopbackDir()) - if err != nil { - return err - } - switch fsMagic { - case graphdriver.FsMagicAufs: - return errors.Errorf("devmapper: Loopback devices can not be created on AUFS filesystems") - } - - if devices.dataDevice == "" { - // Make sure the sparse images exist in /devicemapper/data - - hasData := devices.hasImage("data") - - if !doInit && !hasData { - return errors.New("loopback data file not found") - } - - if !hasData { - createdLoopback = true - } - - data, err := devices.ensureImage("data", devices.dataLoopbackSize) - if err != nil { - logrus.Debugf("devmapper: Error device ensureImage (data): %s", err) - return err - } - - dataFile, err = loopback.AttachLoopDevice(data) - if err != nil { - return err - } - devices.dataLoopFile = data - devices.dataDevice = dataFile.Name() - } else { - dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0600) - if err != nil { - return err - } - } - defer dataFile.Close() - - if devices.metadataDevice == "" { - // Make sure the sparse images exist in /devicemapper/metadata - - hasMetadata := devices.hasImage("metadata") - - if !doInit && !hasMetadata { - return errors.New("loopback metadata file not found") - } - - if !hasMetadata { - createdLoopback = true - } - - metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize) - if err != nil { - logrus.Debugf("devmapper: Error device ensureImage (metadata): %s", err) - return err - } - - metadataFile, err = loopback.AttachLoopDevice(metadata) - if err != nil { - return err - } - devices.metadataLoopFile = metadata - devices.metadataDevice = metadataFile.Name() - } else { - metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0600) - if err != nil { - return err - } - } - defer metadataFile.Close() - - if err := devicemapper.CreatePool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil { - return err - } - defer func() { - if retErr != nil { - err = devices.deactivatePool() - if err != nil { - logrus.Warnf("devmapper: Failed to deactivatePool: %v", err) - } - } - }() - } - - // Pool already exists and caller did not pass us a pool. That means - // we probably created pool earlier and could not remove it as some - // containers were still using it. Detect some of the properties of - // pool, like is it using loop devices. - if poolExists && devices.thinPoolDevice == "" { - if err := devices.loadThinPoolLoopBackInfo(); err != nil { - logrus.Debugf("devmapper: Failed to load thin pool loopback device information:%v", err) - return err - } - } - - // If we didn't just create the data or metadata image, we need to - // load the transaction id and migrate old metadata - if !createdLoopback { - if err := devices.initMetaData(); err != nil { - return err - } - } - - if devices.thinPoolDevice == "" { - if devices.metadataLoopFile != "" || devices.dataLoopFile != "" { - logrus.Warn("devmapper: Usage of loopback devices is strongly discouraged for production use. Please use `--storage-opt dm.thinpooldev`.") - } - } - - // Right now this loads only NextDeviceID. If there is more metadata - // down the line, we might have to move it earlier. - if err := devices.loadDeviceSetMetaData(); err != nil { - return err - } - - // Setup the base image - if doInit { - if err := devices.setupBaseImage(); err != nil { - logrus.Debugf("devmapper: Error device setupBaseImage: %s", err) - return err - } - } - - return nil -} - -// AddDevice adds a device and registers in the hash. -func (devices *DeviceSet) AddDevice(hash, baseHash string, storageOpt map[string]string) error { - logrus.Debugf("devmapper: AddDevice START(hash=%s basehash=%s)", hash, baseHash) - defer logrus.Debugf("devmapper: AddDevice END(hash=%s basehash=%s)", hash, baseHash) - - // If a deleted device exists, return error. - baseInfo, err := devices.lookupDeviceWithLock(baseHash) - if err != nil { - return err - } - - if baseInfo.Deleted { - return fmt.Errorf("devmapper: Base device %v has been marked for deferred deletion", baseInfo.Hash) - } - - baseInfo.lock.Lock() - defer baseInfo.lock.Unlock() - - devices.Lock() - defer devices.Unlock() - - // Also include deleted devices in case hash of new device is - // same as one of the deleted devices. - if info, _ := devices.lookupDevice(hash); info != nil { - return fmt.Errorf("devmapper: device %s already exists. Deleted=%v", hash, info.Deleted) - } - - size, err := devices.parseStorageOpt(storageOpt) - if err != nil { - return err - } - - if size == 0 { - size = baseInfo.Size - } - - if size < baseInfo.Size { - return fmt.Errorf("devmapper: Container size cannot be smaller than %s", units.HumanSize(float64(baseInfo.Size))) - } - - if err := devices.takeSnapshot(hash, baseInfo, size); err != nil { - return err - } - - // Grow the container rootfs. - if size > baseInfo.Size { - info, err := devices.lookupDevice(hash) - if err != nil { - return err - } - - if err := devices.growFS(info); err != nil { - return err - } - } - - return nil -} - -func (devices *DeviceSet) parseStorageOpt(storageOpt map[string]string) (uint64, error) { - - // Read size to change the block device size per container. - for key, val := range storageOpt { - key := strings.ToLower(key) - switch key { - case "size": - size, err := units.RAMInBytes(val) - if err != nil { - return 0, err - } - return uint64(size), nil - default: - return 0, fmt.Errorf("Unknown option %s", key) - } - } - - return 0, nil -} - -func (devices *DeviceSet) markForDeferredDeletion(info *devInfo) error { - // If device is already in deleted state, there is nothing to be done. - if info.Deleted { - return nil - } - - logrus.Debugf("devmapper: Marking device %s for deferred deletion.", info.Hash) - - info.Deleted = true - - // save device metadata to reflect deleted state. - if err := devices.saveMetadata(info); err != nil { - info.Deleted = false - return err - } - - devices.nrDeletedDevices++ - return nil -} - -// Should be called with devices.Lock() held. -func (devices *DeviceSet) deleteTransaction(info *devInfo, syncDelete bool) error { - if err := devices.openTransaction(info.Hash, info.DeviceID); err != nil { - logrus.Debugf("devmapper: Error opening transaction hash = %s deviceId = %d", "", info.DeviceID) - return err - } - - defer devices.closeTransaction() - - err := devicemapper.DeleteDevice(devices.getPoolDevName(), info.DeviceID) - if err != nil { - // If syncDelete is true, we want to return error. If deferred - // deletion is not enabled, we return an error. If error is - // something other then EBUSY, return an error. - if syncDelete || !devices.deferredDelete || errors.Cause(err) != devicemapper.ErrBusy { - logrus.Debugf("devmapper: Error deleting device: %s", err) - return err - } - } - - if err == nil { - if err := devices.unregisterDevice(info.Hash); err != nil { - return err - } - // If device was already in deferred delete state that means - // deletion was being tried again later. Reduce the deleted - // device count. - if info.Deleted { - devices.nrDeletedDevices-- - } - devices.markDeviceIDFree(info.DeviceID) - } else { - if err := devices.markForDeferredDeletion(info); err != nil { - return err - } - } - - return nil -} - -// Issue discard only if device open count is zero. -func (devices *DeviceSet) issueDiscard(info *devInfo) error { - logrus.Debugf("devmapper: issueDiscard START(device: %s).", info.Hash) - defer logrus.Debugf("devmapper: issueDiscard END(device: %s).", info.Hash) - // This is a workaround for the kernel not discarding block so - // on the thin pool when we remove a thinp device, so we do it - // manually. - // Even if device is deferred deleted, activate it and issue - // discards. - if err := devices.activateDeviceIfNeeded(info, true); err != nil { - return err - } - - devinfo, err := devicemapper.GetInfo(info.Name()) - if err != nil { - return err - } - - if devinfo.OpenCount != 0 { - logrus.Debugf("devmapper: Device: %s is in use. OpenCount=%d. Not issuing discards.", info.Hash, devinfo.OpenCount) - return nil - } - - if err := devicemapper.BlockDeviceDiscard(info.DevName()); err != nil { - logrus.Debugf("devmapper: Error discarding block on device: %s (ignoring)", err) - } - return nil -} - -// Should be called with devices.Lock() held. -func (devices *DeviceSet) deleteDevice(info *devInfo, syncDelete bool) error { - if devices.doBlkDiscard { - devices.issueDiscard(info) - } - - // Try to deactivate device in case it is active. - // If deferred removal is enabled and deferred deletion is disabled - // then make sure device is removed synchronously. There have been - // some cases of device being busy for short duration and we would - // rather busy wait for device removal to take care of these cases. - deferredRemove := devices.deferredRemove - if !devices.deferredDelete { - deferredRemove = false - } - - if err := devices.deactivateDeviceMode(info, deferredRemove); err != nil { - logrus.Debugf("devmapper: Error deactivating device: %s", err) - return err - } - - if err := devices.deleteTransaction(info, syncDelete); err != nil { - return err - } - - return nil -} - -// DeleteDevice will return success if device has been marked for deferred -// removal. If one wants to override that and want DeleteDevice() to fail if -// device was busy and could not be deleted, set syncDelete=true. -func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error { - logrus.Debugf("devmapper: DeleteDevice START(hash=%v syncDelete=%v)", hash, syncDelete) - defer logrus.Debugf("devmapper: DeleteDevice END(hash=%v syncDelete=%v)", hash, syncDelete) - info, err := devices.lookupDeviceWithLock(hash) - if err != nil { - return err - } - - info.lock.Lock() - defer info.lock.Unlock() - - devices.Lock() - defer devices.Unlock() - - return devices.deleteDevice(info, syncDelete) -} - -func (devices *DeviceSet) deactivatePool() error { - logrus.Debug("devmapper: deactivatePool() START") - defer logrus.Debug("devmapper: deactivatePool() END") - devname := devices.getPoolDevName() - - devinfo, err := devicemapper.GetInfo(devname) - if err != nil { - return err - } - - if devinfo.Exists == 0 { - return nil - } - if err := devicemapper.RemoveDevice(devname); err != nil { - return err - } - - if d, err := devicemapper.GetDeps(devname); err == nil { - logrus.Warnf("devmapper: device %s still has %d active dependents", devname, d.Count) - } - - return nil -} - -func (devices *DeviceSet) deactivateDevice(info *devInfo) error { - return devices.deactivateDeviceMode(info, devices.deferredRemove) -} - -func (devices *DeviceSet) deactivateDeviceMode(info *devInfo, deferredRemove bool) error { - var err error - logrus.Debugf("devmapper: deactivateDevice START(%s)", info.Hash) - defer logrus.Debugf("devmapper: deactivateDevice END(%s)", info.Hash) - - devinfo, err := devicemapper.GetInfo(info.Name()) - if err != nil { - return err - } - - if devinfo.Exists == 0 { - return nil - } - - if deferredRemove { - err = devicemapper.RemoveDeviceDeferred(info.Name()) - } else { - err = devices.removeDevice(info.Name()) - } - - // This function's semantics is such that it does not return an - // error if device does not exist. So if device went away by - // the time we actually tried to remove it, do not return error. - if errors.Cause(err) != devicemapper.ErrEnxio { - return err - } - return nil -} - -// Issues the underlying dm remove operation. -func (devices *DeviceSet) removeDevice(devname string) error { - var err error - - logrus.Debugf("devmapper: removeDevice START(%s)", devname) - defer logrus.Debugf("devmapper: removeDevice END(%s)", devname) - - for i := 0; i < 200; i++ { - err = devicemapper.RemoveDevice(devname) - if err == nil { - break - } - if errors.Cause(err) != devicemapper.ErrBusy { - return err - } - - // If we see EBUSY it may be a transient error, - // sleep a bit a retry a few times. - devices.Unlock() - time.Sleep(100 * time.Millisecond) - devices.Lock() - } - - return err -} - -func (devices *DeviceSet) cancelDeferredRemovalIfNeeded(info *devInfo) error { - if !devices.deferredRemove { - return nil - } - - logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded START(%s)", info.Name()) - defer logrus.Debugf("devmapper: cancelDeferredRemovalIfNeeded END(%s)", info.Name()) - - devinfo, err := devicemapper.GetInfoWithDeferred(info.Name()) - if err != nil { - return err - } - - if devinfo != nil && devinfo.DeferredRemove == 0 { - return nil - } - - // Cancel deferred remove - if err := devices.cancelDeferredRemoval(info); err != nil { - // If Error is ErrEnxio. Device is probably already gone. Continue. - if errors.Cause(err) != devicemapper.ErrBusy { - return err - } - } - return nil -} - -func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error { - logrus.Debugf("devmapper: cancelDeferredRemoval START(%s)", info.Name()) - defer logrus.Debugf("devmapper: cancelDeferredRemoval END(%s)", info.Name()) - - var err error - - // Cancel deferred remove - for i := 0; i < 100; i++ { - err = devicemapper.CancelDeferredRemove(info.Name()) - if err != nil { - if errors.Cause(err) != devicemapper.ErrBusy { - // If we see EBUSY it may be a transient error, - // sleep a bit a retry a few times. - devices.Unlock() - time.Sleep(100 * time.Millisecond) - devices.Lock() - continue - } - } - break - } - return err -} - -// Shutdown shuts down the device by unmounting the root. -func (devices *DeviceSet) Shutdown(home string) error { - logrus.Debugf("devmapper: [deviceset %s] Shutdown()", devices.devicePrefix) - logrus.Debugf("devmapper: Shutting down DeviceSet: %s", devices.root) - defer logrus.Debugf("devmapper: [deviceset %s] Shutdown() END", devices.devicePrefix) - - // Stop deletion worker. This should start delivering new events to - // ticker channel. That means no new instance of cleanupDeletedDevice() - // will run after this call. If one instance is already running at - // the time of the call, it must be holding devices.Lock() and - // we will block on this lock till cleanup function exits. - devices.deletionWorkerTicker.Stop() - - devices.Lock() - // Save DeviceSet Metadata first. Docker kills all threads if they - // don't finish in certain time. It is possible that Shutdown() - // routine does not finish in time as we loop trying to deactivate - // some devices while these are busy. In that case shutdown() routine - // will be killed and we will not get a chance to save deviceset - // metadata. Hence save this early before trying to deactivate devices. - devices.saveDeviceSetMetaData() - - // ignore the error since it's just a best effort to not try to unmount something that's mounted - mounts, _ := mount.GetMounts() - mounted := make(map[string]bool, len(mounts)) - for _, mnt := range mounts { - mounted[mnt.Mountpoint] = true - } - - if err := filepath.Walk(path.Join(home, "mnt"), func(p string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.IsDir() { - return nil - } - - if mounted[p] { - // We use MNT_DETACH here in case it is still busy in some running - // container. This means it'll go away from the global scope directly, - // and the device will be released when that container dies. - if err := unix.Unmount(p, unix.MNT_DETACH); err != nil { - logrus.Debugf("devmapper: Shutdown unmounting %s, error: %s", p, err) - } - } - - if devInfo, err := devices.lookupDevice(path.Base(p)); err != nil { - logrus.Debugf("devmapper: Shutdown lookup device %s, error: %s", path.Base(p), err) - } else { - if err := devices.deactivateDevice(devInfo); err != nil { - logrus.Debugf("devmapper: Shutdown deactivate %s , error: %s", devInfo.Hash, err) - } - } - - return nil - }); err != nil && !os.IsNotExist(err) { - devices.Unlock() - return err - } - - devices.Unlock() - - info, _ := devices.lookupDeviceWithLock("") - if info != nil { - info.lock.Lock() - devices.Lock() - if err := devices.deactivateDevice(info); err != nil { - logrus.Debugf("devmapper: Shutdown deactivate base , error: %s", err) - } - devices.Unlock() - info.lock.Unlock() - } - - devices.Lock() - if devices.thinPoolDevice == "" { - if err := devices.deactivatePool(); err != nil { - logrus.Debugf("devmapper: Shutdown deactivate pool , error: %s", err) - } - } - devices.Unlock() - - return nil -} - -// Recent XFS changes allow changing behavior of filesystem in case of errors. -// When thin pool gets full and XFS gets ENOSPC error, currently it tries -// IO infinitely and sometimes it can block the container process -// and process can't be killWith 0 value, XFS will not retry upon error -// and instead will shutdown filesystem. - -func (devices *DeviceSet) xfsSetNospaceRetries(info *devInfo) error { - dmDevicePath, err := os.Readlink(info.DevName()) - if err != nil { - return fmt.Errorf("devmapper: readlink failed for device %v:%v", info.DevName(), err) - } - - dmDeviceName := path.Base(dmDevicePath) - filePath := "/sys/fs/xfs/" + dmDeviceName + "/error/metadata/ENOSPC/max_retries" - maxRetriesFile, err := os.OpenFile(filePath, os.O_WRONLY, 0) - if err != nil { - return fmt.Errorf("devmapper: user specified daemon option dm.xfs_nospace_max_retries but it does not seem to be supported on this system :%v", err) - } - defer maxRetriesFile.Close() - - // Set max retries to 0 - _, err = maxRetriesFile.WriteString(devices.xfsNospaceRetries) - if err != nil { - return fmt.Errorf("devmapper: Failed to write string %v to file %v:%v", devices.xfsNospaceRetries, filePath, err) - } - return nil -} - -// MountDevice mounts the device if not already mounted. -func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error { - info, err := devices.lookupDeviceWithLock(hash) - if err != nil { - return err - } - - if info.Deleted { - return fmt.Errorf("devmapper: Can't mount device %v as it has been marked for deferred deletion", info.Hash) - } - - info.lock.Lock() - defer info.lock.Unlock() - - devices.Lock() - defer devices.Unlock() - - if err := devices.activateDeviceIfNeeded(info, false); err != nil { - return fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) - } - - fstype, err := ProbeFsType(info.DevName()) - if err != nil { - return err - } - - options := "" - - if fstype == "xfs" { - // XFS needs nouuid or it can't mount filesystems with the same fs - options = joinMountOptions(options, "nouuid") - } - - options = joinMountOptions(options, devices.mountOptions) - options = joinMountOptions(options, label.FormatMountLabel("", mountLabel)) - - if err := mount.Mount(info.DevName(), path, fstype, options); err != nil { - return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s\n%v", info.DevName(), path, err, string(dmesg.Dmesg(256))) - } - - if fstype == "xfs" && devices.xfsNospaceRetries != "" { - if err := devices.xfsSetNospaceRetries(info); err != nil { - unix.Unmount(path, unix.MNT_DETACH) - devices.deactivateDevice(info) - return err - } - } - - return nil -} - -// UnmountDevice unmounts the device and removes it from hash. -func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error { - logrus.Debugf("devmapper: UnmountDevice START(hash=%s)", hash) - defer logrus.Debugf("devmapper: UnmountDevice END(hash=%s)", hash) - - info, err := devices.lookupDeviceWithLock(hash) - if err != nil { - return err - } - - info.lock.Lock() - defer info.lock.Unlock() - - devices.Lock() - defer devices.Unlock() - - logrus.Debugf("devmapper: Unmount(%s)", mountPath) - if err := unix.Unmount(mountPath, unix.MNT_DETACH); err != nil { - return err - } - logrus.Debug("devmapper: Unmount done") - - return devices.deactivateDevice(info) -} - -// HasDevice returns true if the device metadata exists. -func (devices *DeviceSet) HasDevice(hash string) bool { - info, _ := devices.lookupDeviceWithLock(hash) - return info != nil -} - -// List returns a list of device ids. -func (devices *DeviceSet) List() []string { - devices.Lock() - defer devices.Unlock() - - ids := make([]string, len(devices.Devices)) - i := 0 - for k := range devices.Devices { - ids[i] = k - i++ - } - return ids -} - -func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSectors, highestMappedSector uint64, err error) { - var params string - _, sizeInSectors, _, params, err = devicemapper.GetStatus(devName) - if err != nil { - return - } - if _, err = fmt.Sscanf(params, "%d %d", &mappedSectors, &highestMappedSector); err == nil { - return - } - return -} - -// GetDeviceStatus provides size, mapped sectors -func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { - info, err := devices.lookupDeviceWithLock(hash) - if err != nil { - return nil, err - } - - info.lock.Lock() - defer info.lock.Unlock() - - devices.Lock() - defer devices.Unlock() - - status := &DevStatus{ - DeviceID: info.DeviceID, - Size: info.Size, - TransactionID: info.TransactionID, - } - - if err := devices.activateDeviceIfNeeded(info, false); err != nil { - return nil, fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) - } - - sizeInSectors, mappedSectors, highestMappedSector, err := devices.deviceStatus(info.DevName()) - - if err != nil { - return nil, err - } - - status.SizeInSectors = sizeInSectors - status.MappedSectors = mappedSectors - status.HighestMappedSector = highestMappedSector - - return status, nil -} - -func (devices *DeviceSet) poolStatus() (totalSizeInSectors, transactionID, dataUsed, dataTotal, metadataUsed, metadataTotal uint64, err error) { - var params string - if _, totalSizeInSectors, _, params, err = devicemapper.GetStatus(devices.getPoolName()); err == nil { - _, err = fmt.Sscanf(params, "%d %d/%d %d/%d", &transactionID, &metadataUsed, &metadataTotal, &dataUsed, &dataTotal) - } - return -} - -// DataDevicePath returns the path to the data storage for this deviceset, -// regardless of loopback or block device -func (devices *DeviceSet) DataDevicePath() string { - return devices.dataDevice -} - -// MetadataDevicePath returns the path to the metadata storage for this deviceset, -// regardless of loopback or block device -func (devices *DeviceSet) MetadataDevicePath() string { - return devices.metadataDevice -} - -func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) { - buf := new(unix.Statfs_t) - if err := unix.Statfs(loopFile, buf); err != nil { - logrus.Warnf("devmapper: Couldn't stat loopfile filesystem %v: %v", loopFile, err) - return 0, err - } - return buf.Bfree * uint64(buf.Bsize), nil -} - -func (devices *DeviceSet) isRealFile(loopFile string) (bool, error) { - if loopFile != "" { - fi, err := os.Stat(loopFile) - if err != nil { - logrus.Warnf("devmapper: Couldn't stat loopfile %v: %v", loopFile, err) - return false, err - } - return fi.Mode().IsRegular(), nil - } - return false, nil -} - -// Status returns the current status of this deviceset -func (devices *DeviceSet) Status() *Status { - devices.Lock() - defer devices.Unlock() - - status := &Status{} - - status.PoolName = devices.getPoolName() - status.DataFile = devices.DataDevicePath() - status.DataLoopback = devices.dataLoopFile - status.MetadataFile = devices.MetadataDevicePath() - status.MetadataLoopback = devices.metadataLoopFile - status.UdevSyncSupported = devicemapper.UdevSyncSupported() - status.DeferredRemoveEnabled = devices.deferredRemove - status.DeferredDeleteEnabled = devices.deferredDelete - status.DeferredDeletedDeviceCount = devices.nrDeletedDevices - status.BaseDeviceSize = devices.getBaseDeviceSize() - status.BaseDeviceFS = devices.getBaseDeviceFS() - - totalSizeInSectors, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() - if err == nil { - // Convert from blocks to bytes - blockSizeInSectors := totalSizeInSectors / dataTotal - - status.Data.Used = dataUsed * blockSizeInSectors * 512 - status.Data.Total = dataTotal * blockSizeInSectors * 512 - status.Data.Available = status.Data.Total - status.Data.Used - - // metadata blocks are always 4k - status.Metadata.Used = metadataUsed * 4096 - status.Metadata.Total = metadataTotal * 4096 - status.Metadata.Available = status.Metadata.Total - status.Metadata.Used - - status.SectorSize = blockSizeInSectors * 512 - - if check, _ := devices.isRealFile(devices.dataLoopFile); check { - actualSpace, err := devices.getUnderlyingAvailableSpace(devices.dataLoopFile) - if err == nil && actualSpace < status.Data.Available { - status.Data.Available = actualSpace - } - } - - if check, _ := devices.isRealFile(devices.metadataLoopFile); check { - actualSpace, err := devices.getUnderlyingAvailableSpace(devices.metadataLoopFile) - if err == nil && actualSpace < status.Metadata.Available { - status.Metadata.Available = actualSpace - } - } - - minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100 - status.MinFreeSpace = minFreeData * blockSizeInSectors * 512 - } - - return status -} - -// Status returns the current status of this deviceset -func (devices *DeviceSet) exportDeviceMetadata(hash string) (*deviceMetadata, error) { - info, err := devices.lookupDeviceWithLock(hash) - if err != nil { - return nil, err - } - - info.lock.Lock() - defer info.lock.Unlock() - - metadata := &deviceMetadata{info.DeviceID, info.Size, info.Name()} - return metadata, nil -} - -// NewDeviceSet creates the device set based on the options provided. -func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps []idtools.IDMap) (*DeviceSet, error) { - devicemapper.SetDevDir("/dev") - - devices := &DeviceSet{ - root: root, - metaData: metaData{Devices: make(map[string]*devInfo)}, - dataLoopbackSize: defaultDataLoopbackSize, - metaDataLoopbackSize: defaultMetaDataLoopbackSize, - baseFsSize: defaultBaseFsSize, - overrideUdevSyncCheck: defaultUdevSyncOverride, - doBlkDiscard: true, - thinpBlockSize: defaultThinpBlockSize, - deviceIDMap: make([]byte, deviceIDMapSz), - deletionWorkerTicker: time.NewTicker(time.Second * 30), - uidMaps: uidMaps, - gidMaps: gidMaps, - minFreeSpacePercent: defaultMinFreeSpacePercent, - } - - version, err := devicemapper.GetDriverVersion() - if err != nil { - // Can't even get driver version, assume not supported - return nil, graphdriver.ErrNotSupported - } - - if err := determineDriverCapabilities(version); err != nil { - return nil, graphdriver.ErrNotSupported - } - - if driverDeferredRemovalSupport && devicemapper.LibraryDeferredRemovalSupport { - // enable deferred stuff by default - enableDeferredDeletion = true - enableDeferredRemoval = true - } - - foundBlkDiscard := false - var lvmSetupConfig directLVMConfig - for _, option := range options { - key, val, err := parsers.ParseKeyValueOpt(option) - if err != nil { - return nil, err - } - key = strings.ToLower(key) - switch key { - case "dm.basesize": - size, err := units.RAMInBytes(val) - if err != nil { - return nil, err - } - userBaseSize = true - devices.baseFsSize = uint64(size) - case "dm.loopdatasize": - size, err := units.RAMInBytes(val) - if err != nil { - return nil, err - } - devices.dataLoopbackSize = size - case "dm.loopmetadatasize": - size, err := units.RAMInBytes(val) - if err != nil { - return nil, err - } - devices.metaDataLoopbackSize = size - case "dm.fs": - if val != "ext4" && val != "xfs" { - return nil, fmt.Errorf("devmapper: Unsupported filesystem %s", val) - } - devices.filesystem = val - case "dm.mkfsarg": - devices.mkfsArgs = append(devices.mkfsArgs, val) - case "dm.mountopt": - devices.mountOptions = joinMountOptions(devices.mountOptions, val) - case "dm.metadatadev": - devices.metadataDevice = val - case "dm.datadev": - devices.dataDevice = val - case "dm.thinpooldev": - devices.thinPoolDevice = strings.TrimPrefix(val, "/dev/mapper/") - case "dm.blkdiscard": - foundBlkDiscard = true - devices.doBlkDiscard, err = strconv.ParseBool(val) - if err != nil { - return nil, err - } - case "dm.blocksize": - size, err := units.RAMInBytes(val) - if err != nil { - return nil, err - } - // convert to 512b sectors - devices.thinpBlockSize = uint32(size) >> 9 - case "dm.override_udev_sync_check": - devices.overrideUdevSyncCheck, err = strconv.ParseBool(val) - if err != nil { - return nil, err - } - - case "dm.use_deferred_removal": - enableDeferredRemoval, err = strconv.ParseBool(val) - if err != nil { - return nil, err - } - - case "dm.use_deferred_deletion": - enableDeferredDeletion, err = strconv.ParseBool(val) - if err != nil { - return nil, err - } - - case "dm.min_free_space": - if !strings.HasSuffix(val, "%") { - return nil, fmt.Errorf("devmapper: Option dm.min_free_space requires %% suffix") - } - - valstring := strings.TrimSuffix(val, "%") - minFreeSpacePercent, err := strconv.ParseUint(valstring, 10, 32) - if err != nil { - return nil, err - } - - if minFreeSpacePercent >= 100 { - return nil, fmt.Errorf("devmapper: Invalid value %v for option dm.min_free_space", val) - } - - devices.minFreeSpacePercent = uint32(minFreeSpacePercent) - case "dm.xfs_nospace_max_retries": - _, err := strconv.ParseUint(val, 10, 64) - if err != nil { - return nil, err - } - devices.xfsNospaceRetries = val - case "dm.directlvm_device": - lvmSetupConfig.Device = val - case "dm.directlvm_device_force": - lvmSetupConfigForce, err = strconv.ParseBool(val) - if err != nil { - return nil, err - } - case "dm.thinp_percent": - per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) - if err != nil { - return nil, errors.Wrapf(err, "could not parse `dm.thinp_percent=%s`", val) - } - if per >= 100 { - return nil, errors.New("dm.thinp_percent must be greater than 0 and less than 100") - } - lvmSetupConfig.ThinpPercent = per - case "dm.thinp_metapercent": - per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) - if err != nil { - return nil, errors.Wrapf(err, "could not parse `dm.thinp_metapercent=%s`", val) - } - if per >= 100 { - return nil, errors.New("dm.thinp_metapercent must be greater than 0 and less than 100") - } - lvmSetupConfig.ThinpMetaPercent = per - case "dm.thinp_autoextend_percent": - per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) - if err != nil { - return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_percent=%s`", val) - } - if per > 100 { - return nil, errors.New("dm.thinp_autoextend_percent must be greater than 0 and less than 100") - } - lvmSetupConfig.AutoExtendPercent = per - case "dm.thinp_autoextend_threshold": - per, err := strconv.ParseUint(strings.TrimSuffix(val, "%"), 10, 32) - if err != nil { - return nil, errors.Wrapf(err, "could not parse `dm.thinp_autoextend_threshold=%s`", val) - } - if per > 100 { - return nil, errors.New("dm.thinp_autoextend_threshold must be greater than 0 and less than 100") - } - lvmSetupConfig.AutoExtendThreshold = per - case "dm.libdm_log_level": - level, err := strconv.ParseInt(val, 10, 32) - if err != nil { - return nil, errors.Wrapf(err, "could not parse `dm.libdm_log_level=%s`", val) - } - if level < devicemapper.LogLevelFatal || level > devicemapper.LogLevelDebug { - return nil, errors.Errorf("dm.libdm_log_level must be in range [%d,%d]", devicemapper.LogLevelFatal, devicemapper.LogLevelDebug) - } - // Register a new logging callback with the specified level. - devicemapper.LogInit(devicemapper.DefaultLogger{ - Level: int(level), - }) - default: - return nil, fmt.Errorf("devmapper: Unknown option %s", key) - } - } - - if err := validateLVMConfig(lvmSetupConfig); err != nil { - return nil, err - } - - devices.lvmSetupConfig = lvmSetupConfig - - // By default, don't do blk discard hack on raw devices, its rarely useful and is expensive - if !foundBlkDiscard && (devices.dataDevice != "" || devices.thinPoolDevice != "") { - devices.doBlkDiscard = false - } - - if err := devices.initDevmapper(doInit); err != nil { - return nil, err - } - - return devices, nil -} diff --git a/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go b/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go deleted file mode 100644 index 9ab3e4f864..0000000000 --- a/vendor/github.com/containers/storage/drivers/devmapper/devmapper_doc.go +++ /dev/null @@ -1,106 +0,0 @@ -package devmapper - -// Definition of struct dm_task and sub structures (from lvm2) -// -// struct dm_ioctl { -// /* -// * The version number is made up of three parts: -// * major - no backward or forward compatibility, -// * minor - only backwards compatible, -// * patch - both backwards and forwards compatible. -// * -// * All clients of the ioctl interface should fill in the -// * version number of the interface that they were -// * compiled with. -// * -// * All recognized ioctl commands (ie. those that don't -// * return -ENOTTY) fill out this field, even if the -// * command failed. -// */ -// uint32_t version[3]; /* in/out */ -// uint32_t data_size; /* total size of data passed in -// * including this struct */ - -// uint32_t data_start; /* offset to start of data -// * relative to start of this struct */ - -// uint32_t target_count; /* in/out */ -// int32_t open_count; /* out */ -// uint32_t flags; /* in/out */ - -// /* -// * event_nr holds either the event number (input and output) or the -// * udev cookie value (input only). -// * The DM_DEV_WAIT ioctl takes an event number as input. -// * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls -// * use the field as a cookie to return in the DM_COOKIE -// * variable with the uevents they issue. -// * For output, the ioctls return the event number, not the cookie. -// */ -// uint32_t event_nr; /* in/out */ -// uint32_t padding; - -// uint64_t dev; /* in/out */ - -// char name[DM_NAME_LEN]; /* device name */ -// char uuid[DM_UUID_LEN]; /* unique identifier for -// * the block device */ -// char data[7]; /* padding or data */ -// }; - -// struct target { -// uint64_t start; -// uint64_t length; -// char *type; -// char *params; - -// struct target *next; -// }; - -// typedef enum { -// DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */ -// DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */ -// } dm_add_node_t; - -// struct dm_task { -// int type; -// char *dev_name; -// char *mangled_dev_name; - -// struct target *head, *tail; - -// int read_only; -// uint32_t event_nr; -// int major; -// int minor; -// int allow_default_major_fallback; -// uid_t uid; -// gid_t gid; -// mode_t mode; -// uint32_t read_ahead; -// uint32_t read_ahead_flags; -// union { -// struct dm_ioctl *v4; -// } dmi; -// char *newname; -// char *message; -// char *geometry; -// uint64_t sector; -// int no_flush; -// int no_open_count; -// int skip_lockfs; -// int query_inactive_table; -// int suppress_identical_reload; -// dm_add_node_t add_node; -// uint64_t existing_table_size; -// int cookie_set; -// int new_uuid; -// int secure_data; -// int retry_remove; -// int enable_checks; -// int expected_errno; - -// char *uuid; -// char *mangled_uuid; -// }; -// diff --git a/vendor/github.com/containers/storage/drivers/devmapper/driver.go b/vendor/github.com/containers/storage/drivers/devmapper/driver.go deleted file mode 100644 index d68fb66cc8..0000000000 --- a/vendor/github.com/containers/storage/drivers/devmapper/driver.go +++ /dev/null @@ -1,240 +0,0 @@ -// +build linux - -package devmapper - -import ( - "fmt" - "io/ioutil" - "os" - "path" - "strconv" - - "github.com/sirupsen/logrus" - - "github.com/containers/storage/drivers" - "github.com/containers/storage/pkg/devicemapper" - "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/locker" - "github.com/containers/storage/pkg/mount" - "github.com/containers/storage/pkg/system" - units "github.com/docker/go-units" -) - -func init() { - graphdriver.Register("devicemapper", Init) -} - -// Driver contains the device set mounted and the home directory -type Driver struct { - *DeviceSet - home string - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap - ctr *graphdriver.RefCounter - locker *locker.Locker -} - -// Init creates a driver with the given home and the set of options. -func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { - deviceSet, err := NewDeviceSet(home, true, options, uidMaps, gidMaps) - if err != nil { - return nil, err - } - - if err := mount.MakePrivate(home); err != nil { - return nil, err - } - - d := &Driver{ - DeviceSet: deviceSet, - home: home, - uidMaps: uidMaps, - gidMaps: gidMaps, - ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), - locker: locker.New(), - } - - return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil -} - -func (d *Driver) String() string { - return "devicemapper" -} - -// Status returns the status about the driver in a printable format. -// Information returned contains Pool Name, Data File, Metadata file, disk usage by -// the data and metadata, etc. -func (d *Driver) Status() [][2]string { - s := d.DeviceSet.Status() - - status := [][2]string{ - {"Pool Name", s.PoolName}, - {"Pool Blocksize", units.HumanSize(float64(s.SectorSize))}, - {"Base Device Size", units.HumanSize(float64(s.BaseDeviceSize))}, - {"Backing Filesystem", s.BaseDeviceFS}, - {"Data file", s.DataFile}, - {"Metadata file", s.MetadataFile}, - {"Data Space Used", units.HumanSize(float64(s.Data.Used))}, - {"Data Space Total", units.HumanSize(float64(s.Data.Total))}, - {"Data Space Available", units.HumanSize(float64(s.Data.Available))}, - {"Metadata Space Used", units.HumanSize(float64(s.Metadata.Used))}, - {"Metadata Space Total", units.HumanSize(float64(s.Metadata.Total))}, - {"Metadata Space Available", units.HumanSize(float64(s.Metadata.Available))}, - {"Thin Pool Minimum Free Space", units.HumanSize(float64(s.MinFreeSpace))}, - {"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)}, - {"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)}, - {"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)}, - {"Deferred Deleted Device Count", fmt.Sprintf("%v", s.DeferredDeletedDeviceCount)}, - } - if len(s.DataLoopback) > 0 { - status = append(status, [2]string{"Data loop file", s.DataLoopback}) - } - if len(s.MetadataLoopback) > 0 { - status = append(status, [2]string{"Metadata loop file", s.MetadataLoopback}) - } - if vStr, err := devicemapper.GetLibraryVersion(); err == nil { - status = append(status, [2]string{"Library Version", vStr}) - } - return status -} - -// Metadata returns a map of information about the device. -func (d *Driver) Metadata(id string) (map[string]string, error) { - m, err := d.DeviceSet.exportDeviceMetadata(id) - - if err != nil { - return nil, err - } - - metadata := make(map[string]string) - metadata["DeviceId"] = strconv.Itoa(m.deviceID) - metadata["DeviceSize"] = strconv.FormatUint(m.deviceSize, 10) - metadata["DeviceName"] = m.deviceName - return metadata, nil -} - -// Cleanup unmounts a device. -func (d *Driver) Cleanup() error { - err := d.DeviceSet.Shutdown(d.home) - - if err2 := mount.Unmount(d.home); err == nil { - err = err2 - } - - return err -} - -// CreateReadWrite creates a layer that is writable for use as a container -// file system. -func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { - return d.Create(id, parent, opts) -} - -// Create adds a device with a given id and the parent. -func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { - var storageOpt map[string]string - if opts != nil { - storageOpt = opts.StorageOpt - } - - if err := d.DeviceSet.AddDevice(id, parent, storageOpt); err != nil { - return err - } - - return nil -} - -// Remove removes a device with a given id, unmounts the filesystem. -func (d *Driver) Remove(id string) error { - d.locker.Lock(id) - defer d.locker.Unlock(id) - if !d.DeviceSet.HasDevice(id) { - // Consider removing a non-existing device a no-op - // This is useful to be able to progress on container removal - // if the underlying device has gone away due to earlier errors - return nil - } - - // This assumes the device has been properly Get/Put:ed and thus is unmounted - if err := d.DeviceSet.DeleteDevice(id, false); err != nil { - return fmt.Errorf("failed to remove device %s: %v", id, err) - } - return system.EnsureRemoveAll(path.Join(d.home, "mnt", id)) -} - -// Get mounts a device with given id into the root filesystem -func (d *Driver) Get(id, mountLabel string) (string, error) { - d.locker.Lock(id) - defer d.locker.Unlock(id) - mp := path.Join(d.home, "mnt", id) - rootFs := path.Join(mp, "rootfs") - if count := d.ctr.Increment(mp); count > 1 { - return rootFs, nil - } - - uid, gid, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) - if err != nil { - d.ctr.Decrement(mp) - return "", err - } - - // Create the target directories if they don't exist - if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0755, uid, gid); err != nil && !os.IsExist(err) { - d.ctr.Decrement(mp) - return "", err - } - if err := idtools.MkdirAs(mp, 0755, uid, gid); err != nil && !os.IsExist(err) { - d.ctr.Decrement(mp) - return "", err - } - - // Mount the device - if err := d.DeviceSet.MountDevice(id, mp, mountLabel); err != nil { - d.ctr.Decrement(mp) - return "", err - } - - if err := idtools.MkdirAllAs(rootFs, 0755, uid, gid); err != nil && !os.IsExist(err) { - d.ctr.Decrement(mp) - d.DeviceSet.UnmountDevice(id, mp) - return "", err - } - - idFile := path.Join(mp, "id") - if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) { - // Create an "id" file with the container/image id in it to help reconstruct this in case - // of later problems - if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil { - d.ctr.Decrement(mp) - d.DeviceSet.UnmountDevice(id, mp) - return "", err - } - } - - return rootFs, nil -} - -// Put unmounts a device and removes it. -func (d *Driver) Put(id string) error { - d.locker.Lock(id) - defer d.locker.Unlock(id) - mp := path.Join(d.home, "mnt", id) - if count := d.ctr.Decrement(mp); count > 0 { - return nil - } - err := d.DeviceSet.UnmountDevice(id, mp) - if err != nil { - logrus.Errorf("devmapper: Error unmounting device %s: %s", id, err) - } - return err -} - -// Exists checks to see if the device exists. -func (d *Driver) Exists(id string) bool { - return d.DeviceSet.HasDevice(id) -} - -// AdditionalImageStores returns additional image stores supported by the driver -func (d *Driver) AdditionalImageStores() []string { - return nil -} diff --git a/vendor/github.com/containers/storage/drivers/devmapper/mount.go b/vendor/github.com/containers/storage/drivers/devmapper/mount.go deleted file mode 100644 index 1dc3262d27..0000000000 --- a/vendor/github.com/containers/storage/drivers/devmapper/mount.go +++ /dev/null @@ -1,88 +0,0 @@ -// +build linux - -package devmapper - -import ( - "bytes" - "fmt" - "os" - "path/filepath" - - "golang.org/x/sys/unix" -) - -// FIXME: this is copy-pasted from the aufs driver. -// It should be moved into the core. - -// Mounted returns true if a mount point exists. -func Mounted(mountpoint string) (bool, error) { - var mntpointSt unix.Stat_t - if err := unix.Stat(mountpoint, &mntpointSt); err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - var parentSt unix.Stat_t - if err := unix.Stat(filepath.Join(mountpoint, ".."), &parentSt); err != nil { - return false, err - } - return mntpointSt.Dev != parentSt.Dev, nil -} - -type probeData struct { - fsName string - magic string - offset uint64 -} - -// ProbeFsType returns the filesystem name for the given device id. -func ProbeFsType(device string) (string, error) { - probes := []probeData{ - {"btrfs", "_BHRfS_M", 0x10040}, - {"ext4", "\123\357", 0x438}, - {"xfs", "XFSB", 0}, - } - - maxLen := uint64(0) - for _, p := range probes { - l := p.offset + uint64(len(p.magic)) - if l > maxLen { - maxLen = l - } - } - - file, err := os.Open(device) - if err != nil { - return "", err - } - defer file.Close() - - buffer := make([]byte, maxLen) - l, err := file.Read(buffer) - if err != nil { - return "", err - } - - if uint64(l) != maxLen { - return "", fmt.Errorf("devmapper: unable to detect filesystem type of %s, short read", device) - } - - for _, p := range probes { - if bytes.Equal([]byte(p.magic), buffer[p.offset:p.offset+uint64(len(p.magic))]) { - return p.fsName, nil - } - } - - return "", fmt.Errorf("devmapper: Unknown filesystem type on %s", device) -} - -func joinMountOptions(a, b string) string { - if a == "" { - return b - } - if b == "" { - return a - } - return a + "," + b -} diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go deleted file mode 100644 index 615d93be5f..0000000000 --- a/vendor/github.com/containers/storage/drivers/driver.go +++ /dev/null @@ -1,285 +0,0 @@ -package graphdriver - -import ( - "fmt" - "io" - "os" - "path/filepath" - "strings" - - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/vbatts/tar-split/tar/storage" - - "github.com/containers/storage/pkg/archive" - "github.com/containers/storage/pkg/idtools" -) - -// FsMagic unsigned id of the filesystem in use. -type FsMagic uint32 - -const ( - // FsMagicUnsupported is a predefined constant value other than a valid filesystem id. - FsMagicUnsupported = FsMagic(0x00000000) -) - -var ( - // All registered drivers - drivers map[string]InitFunc - - // ErrNotSupported returned when driver is not supported. - ErrNotSupported = errors.New("driver not supported") - // ErrPrerequisites returned when driver does not meet prerequisites. - ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)") - // ErrIncompatibleFS returned when file system is not supported. - ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver") -) - -//CreateOpts contains optional arguments for Create() and CreateReadWrite() -// methods. -type CreateOpts struct { - MountLabel string - StorageOpt map[string]string -} - -// InitFunc initializes the storage driver. -type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) - -// ProtoDriver defines the basic capabilities of a driver. -// This interface exists solely to be a minimum set of methods -// for client code which choose not to implement the entire Driver -// interface and use the NaiveDiffDriver wrapper constructor. -// -// Use of ProtoDriver directly by client code is not recommended. -type ProtoDriver interface { - // String returns a string representation of this driver. - String() string - // CreateReadWrite creates a new, empty filesystem layer that is ready - // to be used as the storage for a container. Additional options can - // be passed in opts. parent may be "" and opts may be nil. - CreateReadWrite(id, parent string, opts *CreateOpts) error - // Create creates a new, empty, filesystem layer with the - // specified id and parent and options passed in opts. Parent - // may be "" and opts may be nil. - Create(id, parent string, opts *CreateOpts) error - // Remove attempts to remove the filesystem layer with this id. - Remove(id string) error - // Get returns the mountpoint for the layered filesystem referred - // to by this id. You can optionally specify a mountLabel or "". - // Returns the absolute path to the mounted layered filesystem. - Get(id, mountLabel string) (dir string, err error) - // Put releases the system resources for the specified id, - // e.g, unmounting layered filesystem. - Put(id string) error - // Exists returns whether a filesystem layer with the specified - // ID exists on this driver. - Exists(id string) bool - // Status returns a set of key-value pairs which give low - // level diagnostic status about this driver. - Status() [][2]string - // Returns a set of key-value pairs which give low level information - // about the image/container driver is managing. - Metadata(id string) (map[string]string, error) - // Cleanup performs necessary tasks to release resources - // held by the driver, e.g., unmounting all layered filesystems - // known to this driver. - Cleanup() error - // AdditionalImageStores returns additional image stores supported by the driver - AdditionalImageStores() []string -} - -// DiffDriver is the interface to use to implement graph diffs -type DiffDriver interface { - // Diff produces an archive of the changes between the specified - // layer and its parent layer which may be "". - Diff(id, parent, mountLabel string) (io.ReadCloser, error) - // Changes produces a list of changes between the specified layer - // and its parent layer. If parent is "", then all changes will be ADD changes. - Changes(id, parent, mountLabel string) ([]archive.Change, error) - // ApplyDiff extracts the changeset from the given diff into the - // layer with the specified id and parent, returning the size of the - // new layer in bytes. - // The io.Reader must be an uncompressed stream. - ApplyDiff(id, parent, mountLabel string, diff io.Reader) (size int64, err error) - // DiffSize calculates the changes between the specified id - // and its parent and returns the size in bytes of the changes - // relative to its base filesystem directory. - DiffSize(id, parent, mountLabel string) (size int64, err error) -} - -// Driver is the interface for layered/snapshot file system drivers. -type Driver interface { - ProtoDriver - DiffDriver -} - -// Capabilities defines a list of capabilities a driver may implement. -// These capabilities are not required; however, they do determine how a -// graphdriver can be used. -type Capabilities struct { - // Flags that this driver is capable of reproducing exactly equivalent - // diffs for read-only layers. If set, clients can rely on the driver - // for consistent tar streams, and avoid extra processing to account - // for potential differences (eg: the layer store's use of tar-split). - ReproducesExactDiffs bool -} - -// CapabilityDriver is the interface for layered file system drivers that -// can report on their Capabilities. -type CapabilityDriver interface { - Capabilities() Capabilities -} - -// DiffGetterDriver is the interface for layered file system drivers that -// provide a specialized function for getting file contents for tar-split. -type DiffGetterDriver interface { - Driver - // DiffGetter returns an interface to efficiently retrieve the contents - // of files in a layer. - DiffGetter(id string) (FileGetCloser, error) -} - -// FileGetCloser extends the storage.FileGetter interface with a Close method -// for cleaning up. -type FileGetCloser interface { - storage.FileGetter - // Close cleans up any resources associated with the FileGetCloser. - Close() error -} - -// Checker makes checks on specified filesystems. -type Checker interface { - // IsMounted returns true if the provided path is mounted for the specific checker - IsMounted(path string) bool -} - -func init() { - drivers = make(map[string]InitFunc) -} - -// Register registers an InitFunc for the driver. -func Register(name string, initFunc InitFunc) error { - if _, exists := drivers[name]; exists { - return fmt.Errorf("Name already registered %s", name) - } - drivers[name] = initFunc - - return nil -} - -// GetDriver initializes and returns the registered driver -func GetDriver(name string, config Options) (Driver, error) { - if initFunc, exists := drivers[name]; exists { - return initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) - } - - logrus.Errorf("Failed to GetDriver graph %s %s", name, config.Root) - return nil, errors.Wrapf(ErrNotSupported, "failed to GetDriver graph %s %s", name, config.Root) -} - -// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins -func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { - if initFunc, exists := drivers[name]; exists { - return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps) - } - logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home) - return nil, errors.Wrapf(ErrNotSupported, "failed to built-in GetDriver graph %s %s", name, home) -} - -// Options is used to initialize a graphdriver -type Options struct { - Root string - DriverOptions []string - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap - ExperimentalEnabled bool -} - -// New creates the driver and initializes it at the specified root. -func New(name string, config Options) (Driver, error) { - if name != "" { - logrus.Debugf("[graphdriver] trying provided driver %q", name) // so the logs show specified driver - return GetDriver(name, config) - } - - // Guess for prior driver - driversMap := scanPriorDrivers(config.Root) - for _, name := range priority { - if name == "vfs" { - // don't use vfs even if there is state present. - continue - } - if _, prior := driversMap[name]; prior { - // of the state found from prior drivers, check in order of our priority - // which we would prefer - driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps) - if err != nil { - // unlike below, we will return error here, because there is prior - // state, and now it is no longer supported/prereq/compatible, so - // something changed and needs attention. Otherwise the daemon's - // images would just "disappear". - logrus.Errorf("[graphdriver] prior storage driver %s failed: %s", name, err) - return nil, err - } - - // abort starting when there are other prior configured drivers - // to ensure the user explicitly selects the driver to load - if len(driversMap)-1 > 0 { - var driversSlice []string - for name := range driversMap { - driversSlice = append(driversSlice, name) - } - - return nil, fmt.Errorf("%s contains several valid graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s )", config.Root, strings.Join(driversSlice, ", ")) - } - - logrus.Infof("[graphdriver] using prior storage driver: %s", name) - return driver, nil - } - } - - // Check for priority drivers first - for _, name := range priority { - driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps) - if err != nil { - if isDriverNotSupported(err) { - continue - } - return nil, err - } - return driver, nil - } - - // Check all registered drivers if no priority driver is found - for name, initFunc := range drivers { - driver, err := initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) - if err != nil { - if isDriverNotSupported(err) { - continue - } - return nil, err - } - return driver, nil - } - return nil, fmt.Errorf("No supported storage backend found") -} - -// isDriverNotSupported returns true if the error initializing -// the graph driver is a non-supported error. -func isDriverNotSupported(err error) bool { - cause := errors.Cause(err) - return cause == ErrNotSupported || cause == ErrPrerequisites || cause == ErrIncompatibleFS -} - -// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers -func scanPriorDrivers(root string) map[string]bool { - driversMap := make(map[string]bool) - - for driver := range drivers { - p := filepath.Join(root, driver) - if _, err := os.Stat(p); err == nil && driver != "vfs" { - driversMap[driver] = true - } - } - return driversMap -} diff --git a/vendor/github.com/containers/storage/drivers/driver_freebsd.go b/vendor/github.com/containers/storage/drivers/driver_freebsd.go deleted file mode 100644 index 53394b738d..0000000000 --- a/vendor/github.com/containers/storage/drivers/driver_freebsd.go +++ /dev/null @@ -1,23 +0,0 @@ -package graphdriver - -import ( - "syscall" - - "golang.org/x/sys/unix" -) - -var ( - // Slice of drivers that should be used in an order - priority = []string{ - "zfs", - } -) - -// Mounted checks if the given path is mounted as the fs type -func Mounted(fsType FsMagic, mountPath string) (bool, error) { - var buf unix.Statfs_t - if err := syscall.Statfs(mountPath, &buf); err != nil { - return false, err - } - return FsMagic(buf.Type) == fsType, nil -} diff --git a/vendor/github.com/containers/storage/drivers/driver_solaris.go b/vendor/github.com/containers/storage/drivers/driver_solaris.go deleted file mode 100644 index 174fa9670b..0000000000 --- a/vendor/github.com/containers/storage/drivers/driver_solaris.go +++ /dev/null @@ -1,96 +0,0 @@ -// +build solaris,cgo - -package graphdriver - -/* -#include -#include - -static inline struct statvfs *getstatfs(char *s) { - struct statvfs *buf; - int err; - buf = (struct statvfs *)malloc(sizeof(struct statvfs)); - err = statvfs(s, buf); - return buf; -} -*/ -import "C" -import ( - "path/filepath" - "unsafe" - - "github.com/containers/storage/pkg/mount" - "github.com/sirupsen/logrus" -) - -const ( - // FsMagicZfs filesystem id for Zfs - FsMagicZfs = FsMagic(0x2fc12fc1) -) - -var ( - // Slice of drivers that should be used in an order - priority = []string{ - "zfs", - } - - // FsNames maps filesystem id to name of the filesystem. - FsNames = map[FsMagic]string{ - FsMagicZfs: "zfs", - } -) - -// GetFSMagic returns the filesystem id given the path. -func GetFSMagic(rootpath string) (FsMagic, error) { - return 0, nil -} - -type fsChecker struct { - t FsMagic -} - -func (c *fsChecker) IsMounted(path string) bool { - m, _ := Mounted(c.t, path) - return m -} - -// NewFsChecker returns a checker configured for the provided FsMagic -func NewFsChecker(t FsMagic) Checker { - return &fsChecker{ - t: t, - } -} - -// NewDefaultChecker returns a check that parses /proc/mountinfo to check -// if the specified path is mounted. -// No-op on Solaris. -func NewDefaultChecker() Checker { - return &defaultChecker{} -} - -type defaultChecker struct { -} - -func (c *defaultChecker) IsMounted(path string) bool { - m, _ := mount.Mounted(path) - return m -} - -// Mounted checks if the given path is mounted as the fs type -//Solaris supports only ZFS for now -func Mounted(fsType FsMagic, mountPath string) (bool, error) { - - cs := C.CString(filepath.Dir(mountPath)) - defer C.free(unsafe.Pointer(cs)) - buf := C.getstatfs(cs) - defer C.free(unsafe.Pointer(buf)) - - // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ] - if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) || - (buf.f_basetype[3] != 0) { - logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath) - return false, ErrPrerequisites - } - - return true, nil -} diff --git a/vendor/github.com/containers/storage/drivers/driver_unsupported.go b/vendor/github.com/containers/storage/drivers/driver_unsupported.go deleted file mode 100644 index 4a875608b0..0000000000 --- a/vendor/github.com/containers/storage/drivers/driver_unsupported.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !linux,!windows,!freebsd,!solaris - -package graphdriver - -var ( - // Slice of drivers that should be used in an order - priority = []string{ - "unsupported", - } -) - -// GetFSMagic returns the filesystem id given the path. -func GetFSMagic(rootpath string) (FsMagic, error) { - return FsMagicUnsupported, nil -} diff --git a/vendor/github.com/containers/storage/drivers/driver_windows.go b/vendor/github.com/containers/storage/drivers/driver_windows.go deleted file mode 100644 index ffd30c2950..0000000000 --- a/vendor/github.com/containers/storage/drivers/driver_windows.go +++ /dev/null @@ -1,14 +0,0 @@ -package graphdriver - -var ( - // Slice of drivers that should be used in order - priority = []string{ - "windowsfilter", - } -) - -// GetFSMagic returns the filesystem id given the path. -func GetFSMagic(rootpath string) (FsMagic, error) { - // Note it is OK to return FsMagicUnsupported on Windows. - return FsMagicUnsupported, nil -} diff --git a/vendor/github.com/containers/storage/drivers/fsdiff.go b/vendor/github.com/containers/storage/drivers/fsdiff.go deleted file mode 100644 index f74239cb93..0000000000 --- a/vendor/github.com/containers/storage/drivers/fsdiff.go +++ /dev/null @@ -1,169 +0,0 @@ -package graphdriver - -import ( - "io" - "time" - - "github.com/containers/storage/pkg/archive" - "github.com/containers/storage/pkg/chrootarchive" - "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/ioutils" - "github.com/sirupsen/logrus" -) - -var ( - // ApplyUncompressedLayer defines the unpack method used by the graph - // driver. - ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer -) - -// NaiveDiffDriver takes a ProtoDriver and adds the -// capability of the Diffing methods which it may or may not -// support on its own. See the comment on the exported -// NewNaiveDiffDriver function below. -// Notably, the AUFS driver doesn't need to be wrapped like this. -type NaiveDiffDriver struct { - ProtoDriver - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap -} - -// NewNaiveDiffDriver returns a fully functional driver that wraps the -// given ProtoDriver and adds the capability of the following methods which -// it may or may not support on its own: -// Diff(id, parent, mountLabel string) (io.ReadCloser, error) -// Changes(id, parent, mountLabel string) ([]archive.Change, error) -// ApplyDiff(id, parent, mountLabel string, diff io.Reader) (size int64, err error) -// DiffSize(id, parent, mountLabel string) (size int64, err error) -func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver { - return &NaiveDiffDriver{ProtoDriver: driver, - uidMaps: uidMaps, - gidMaps: gidMaps} -} - -// Diff produces an archive of the changes between the specified -// layer and its parent layer which may be "". -func (gdw *NaiveDiffDriver) Diff(id, parent, mountLabel string) (arch io.ReadCloser, err error) { - startTime := time.Now() - driver := gdw.ProtoDriver - - layerFs, err := driver.Get(id, mountLabel) - if err != nil { - return nil, err - } - - defer func() { - if err != nil { - driver.Put(id) - } - }() - - if parent == "" { - archive, err := archive.Tar(layerFs, archive.Uncompressed) - if err != nil { - return nil, err - } - return ioutils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - driver.Put(id) - return err - }), nil - } - - parentFs, err := driver.Get(parent, mountLabel) - if err != nil { - return nil, err - } - defer driver.Put(parent) - - changes, err := archive.ChangesDirs(layerFs, parentFs) - if err != nil { - return nil, err - } - - archive, err := archive.ExportChanges(layerFs, changes, gdw.uidMaps, gdw.gidMaps) - if err != nil { - return nil, err - } - - return ioutils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - driver.Put(id) - - // NaiveDiffDriver compares file metadata with parent layers. Parent layers - // are extracted from tar's with full second precision on modified time. - // We need this hack here to make sure calls within same second receive - // correct result. - time.Sleep(startTime.Truncate(time.Second).Add(time.Second).Sub(time.Now())) - return err - }), nil -} - -// Changes produces a list of changes between the specified layer -// and its parent layer. If parent is "", then all changes will be ADD changes. -func (gdw *NaiveDiffDriver) Changes(id, parent, mountLabel string) ([]archive.Change, error) { - driver := gdw.ProtoDriver - - layerFs, err := driver.Get(id, mountLabel) - if err != nil { - return nil, err - } - defer driver.Put(id) - - parentFs := "" - - if parent != "" { - parentFs, err = driver.Get(parent, mountLabel) - if err != nil { - return nil, err - } - defer driver.Put(parent) - } - - return archive.ChangesDirs(layerFs, parentFs) -} - -// ApplyDiff extracts the changeset from the given diff into the -// layer with the specified id and parent, returning the size of the -// new layer in bytes. -func (gdw *NaiveDiffDriver) ApplyDiff(id, parent, mountLabel string, diff io.Reader) (size int64, err error) { - driver := gdw.ProtoDriver - - // Mount the root filesystem so we can apply the diff/layer. - layerFs, err := driver.Get(id, mountLabel) - if err != nil { - return - } - defer driver.Put(id) - - options := &archive.TarOptions{UIDMaps: gdw.uidMaps, - GIDMaps: gdw.gidMaps} - start := time.Now().UTC() - logrus.Debug("Start untar layer") - if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil { - return - } - logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) - - return -} - -// DiffSize calculates the changes between the specified layer -// and its parent and returns the size in bytes of the changes -// relative to its base filesystem directory. -func (gdw *NaiveDiffDriver) DiffSize(id, parent, mountLabel string) (size int64, err error) { - driver := gdw.ProtoDriver - - changes, err := gdw.Changes(id, parent, mountLabel) - if err != nil { - return - } - - layerFs, err := driver.Get(id, mountLabel) - if err != nil { - return - } - defer driver.Put(id) - - return archive.ChangesSize(layerFs, changes), nil -} diff --git a/vendor/github.com/containers/storage/drivers/overlay/check.go b/vendor/github.com/containers/storage/drivers/overlay/check.go deleted file mode 100644 index 2a096edf6f..0000000000 --- a/vendor/github.com/containers/storage/drivers/overlay/check.go +++ /dev/null @@ -1,102 +0,0 @@ -// +build linux - -package overlay - -import ( - "fmt" - "io/ioutil" - "os" - "path" - "path/filepath" - "syscall" - - "github.com/containers/storage/pkg/system" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -// doesSupportNativeDiff checks whether the filesystem has a bug -// which copies up the opaque flag when copying up an opaque -// directory or the kernel enable CONFIG_OVERLAY_FS_REDIRECT_DIR. -// When these exist naive diff should be used. -func doesSupportNativeDiff(d string) error { - td, err := ioutil.TempDir(d, "opaque-bug-check") - if err != nil { - return err - } - defer func() { - if err := os.RemoveAll(td); err != nil { - logrus.Warnf("Failed to remove check directory %v: %v", td, err) - } - }() - - // Make directories l1/d, l1/d1, l2/d, l3, work, merged - if err := os.MkdirAll(filepath.Join(td, "l1", "d"), 0755); err != nil { - return err - } - if err := os.MkdirAll(filepath.Join(td, "l1", "d1"), 0755); err != nil { - return err - } - if err := os.MkdirAll(filepath.Join(td, "l2", "d"), 0755); err != nil { - return err - } - if err := os.Mkdir(filepath.Join(td, "l3"), 0755); err != nil { - return err - } - if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil { - return err - } - if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil { - return err - } - - // Mark l2/d as opaque - if err := system.Lsetxattr(filepath.Join(td, "l2", "d"), "trusted.overlay.opaque", []byte("y"), 0); err != nil { - return errors.Wrap(err, "failed to set opaque flag on middle layer") - } - - opts := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", path.Join(td, "l2"), path.Join(td, "l1"), path.Join(td, "l3"), path.Join(td, "work")) - if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", 0, opts); err != nil { - return errors.Wrap(err, "failed to mount overlay") - } - defer func() { - if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil { - logrus.Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err) - } - }() - - // Touch file in d to force copy up of opaque directory "d" from "l2" to "l3" - if err := ioutil.WriteFile(filepath.Join(td, "merged", "d", "f"), []byte{}, 0644); err != nil { - return errors.Wrap(err, "failed to write to merged directory") - } - - // Check l3/d does not have opaque flag - xattrOpaque, err := system.Lgetxattr(filepath.Join(td, "l3", "d"), "trusted.overlay.opaque") - if err != nil { - return errors.Wrap(err, "failed to read opaque flag on upper layer") - } - if string(xattrOpaque) == "y" { - return errors.New("opaque flag erroneously copied up, consider update to kernel 4.8 or later to fix") - } - - // rename "d1" to "d2" - if err := os.Rename(filepath.Join(td, "merged", "d1"), filepath.Join(td, "merged", "d2")); err != nil { - // if rename failed with syscall.EXDEV, the kernel doesn't have CONFIG_OVERLAY_FS_REDIRECT_DIR enabled - if err.(*os.LinkError).Err == syscall.EXDEV { - return nil - } - return errors.Wrap(err, "failed to rename dir in merged directory") - } - // get the xattr of "d2" - xattrRedirect, err := system.Lgetxattr(filepath.Join(td, "l3", "d2"), "trusted.overlay.redirect") - if err != nil { - return errors.Wrap(err, "failed to read redirect flag on upper layer") - } - - if string(xattrRedirect) == "d1" { - return errors.New("kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled") - } - - return nil -} diff --git a/vendor/github.com/containers/storage/drivers/overlay/mount.go b/vendor/github.com/containers/storage/drivers/overlay/mount.go deleted file mode 100644 index feb0395924..0000000000 --- a/vendor/github.com/containers/storage/drivers/overlay/mount.go +++ /dev/null @@ -1,89 +0,0 @@ -// +build linux - -package overlay - -import ( - "bytes" - "encoding/json" - "flag" - "fmt" - "os" - "runtime" - - "github.com/containers/storage/pkg/reexec" - "golang.org/x/sys/unix" -) - -func init() { - reexec.Register("storage-mountfrom", mountFromMain) -} - -func fatal(err error) { - fmt.Fprint(os.Stderr, err) - os.Exit(1) -} - -type mountOptions struct { - Device string - Target string - Type string - Label string - Flag uint32 -} - -func mountFrom(dir, device, target, mType string, flags uintptr, label string) error { - options := &mountOptions{ - Device: device, - Target: target, - Type: mType, - Flag: uint32(flags), - Label: label, - } - - cmd := reexec.Command("storage-mountfrom", dir) - w, err := cmd.StdinPipe() - if err != nil { - return fmt.Errorf("mountfrom error on pipe creation: %v", err) - } - - output := bytes.NewBuffer(nil) - cmd.Stdout = output - cmd.Stderr = output - if err := cmd.Start(); err != nil { - w.Close() - return fmt.Errorf("mountfrom error on re-exec cmd: %v", err) - } - //write the options to the pipe for the untar exec to read - if err := json.NewEncoder(w).Encode(options); err != nil { - w.Close() - return fmt.Errorf("mountfrom json encode to pipe failed: %v", err) - } - w.Close() - - if err := cmd.Wait(); err != nil { - return fmt.Errorf("mountfrom re-exec error: %v: output: %v", err, output) - } - return nil -} - -// mountfromMain is the entry-point for storage-mountfrom on re-exec. -func mountFromMain() { - runtime.LockOSThread() - flag.Parse() - - var options *mountOptions - - if err := json.NewDecoder(os.Stdin).Decode(&options); err != nil { - fatal(err) - } - - if err := os.Chdir(flag.Arg(0)); err != nil { - fatal(err) - } - - if err := unix.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil { - fatal(err) - } - - os.Exit(0) -} diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go deleted file mode 100644 index 4458b679ab..0000000000 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ /dev/null @@ -1,773 +0,0 @@ -// +build linux - -package overlay - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path" - "path/filepath" - "strconv" - "strings" - "sync" - - "github.com/containers/storage/drivers" - "github.com/containers/storage/drivers/overlayutils" - "github.com/containers/storage/drivers/quota" - "github.com/containers/storage/pkg/archive" - "github.com/containers/storage/pkg/chrootarchive" - "github.com/containers/storage/pkg/directory" - "github.com/containers/storage/pkg/fsutils" - "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/locker" - "github.com/containers/storage/pkg/mount" - "github.com/containers/storage/pkg/parsers" - "github.com/containers/storage/pkg/system" - units "github.com/docker/go-units" - "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -var ( - // untar defines the untar method - untar = chrootarchive.UntarUncompressed -) - -// This backend uses the overlay union filesystem for containers -// with diff directories for each layer. - -// This version of the overlay driver requires at least kernel -// 4.0.0 in order to support mounting multiple diff directories. - -// Each container/image has at least a "diff" directory and "link" file. -// If there is also a "lower" file when there are diff layers -// below as well as "merged" and "work" directories. The "diff" directory -// has the upper layer of the overlay and is used to capture any -// changes to the layer. The "lower" file contains all the lower layer -// mounts separated by ":" and ordered from uppermost to lowermost -// layers. The overlay itself is mounted in the "merged" directory, -// and the "work" dir is needed for overlay to work. - -// The "link" file for each layer contains a unique string for the layer. -// Under the "l" directory at the root there will be a symbolic link -// with that unique string pointing the "diff" directory for the layer. -// The symbolic links are used to reference lower layers in the "lower" -// file and on mount. The links are used to shorten the total length -// of a layer reference without requiring changes to the layer identifier -// or root directory. Mounts are always done relative to root and -// referencing the symbolic links in order to ensure the number of -// lower directories can fit in a single page for making the mount -// syscall. A hard upper limit of 128 lower layers is enforced to ensure -// that mounts do not fail due to length. - -const ( - linkDir = "l" - lowerFile = "lower" - maxDepth = 128 - - // idLength represents the number of random characters - // which can be used to create the unique link identifer - // for every layer. If this value is too long then the - // page size limit for the mount command may be exceeded. - // The idLength should be selected such that following equation - // is true (512 is a buffer for label metadata). - // ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512) - idLength = 26 -) - -type overlayOptions struct { - overrideKernelCheck bool - imageStores []string - quota quota.Quota -} - -// Driver contains information about the home directory and the list of active mounts that are created using this driver. -type Driver struct { - name string - home string - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap - ctr *graphdriver.RefCounter - quotaCtl *quota.Control - options overlayOptions - naiveDiff graphdriver.DiffDriver - supportsDType bool - locker *locker.Locker -} - -var ( - backingFs = "" - projectQuotaSupported = false - - useNaiveDiffLock sync.Once - useNaiveDiffOnly bool -) - -func init() { - graphdriver.Register("overlay", Init) - graphdriver.Register("overlay2", Init) -} - -// Init returns the a native diff driver for overlay filesystem. -// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error. -// If an overlay filesystem is not supported over an existing filesystem then error graphdriver.ErrIncompatibleFS is returned. -func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { - opts, err := parseOptions(options) - if err != nil { - return nil, err - } - - fsMagic, err := graphdriver.GetFSMagic(home) - if err != nil { - return nil, err - } - if fsName, ok := graphdriver.FsNames[fsMagic]; ok { - backingFs = fsName - } - - // check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs - switch fsMagic { - case graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs: - logrus.Errorf("'overlay' is not supported over %s", backingFs) - return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s", backingFs) - } - - rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) - if err != nil { - return nil, err - } - - // Create the driver home dir - if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { - return nil, err - } - - supportsDType, err := supportsOverlay(home, fsMagic, rootUID, rootGID) - if err != nil { - os.Remove(filepath.Join(home, linkDir)) - os.Remove(home) - return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel does not support overlay fs") - } - - if err := mount.MakePrivate(home); err != nil { - return nil, err - } - - d := &Driver{ - name: "overlay", - home: home, - uidMaps: uidMaps, - gidMaps: gidMaps, - ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), - supportsDType: supportsDType, - locker: locker.New(), - options: *opts, - } - - d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps) - - if backingFs == "xfs" { - // Try to enable project quota support over xfs. - if d.quotaCtl, err = quota.NewControl(home); err == nil { - projectQuotaSupported = true - } else if opts.quota.Size > 0 { - return nil, fmt.Errorf("Storage option overlay.size not supported. Filesystem does not support Project Quota: %v", err) - } - } else if opts.quota.Size > 0 { - // if xfs is not the backing fs then error out if the storage-opt overlay.size is used. - return nil, fmt.Errorf("Storage option overlay.size only supported for backingFS XFS. Found %v", backingFs) - } - - logrus.Debugf("backingFs=%s, projectQuotaSupported=%v, useNativeDiff=%v", backingFs, projectQuotaSupported, !useNaiveDiff(home)) - - return d, nil -} - -func parseOptions(options []string) (*overlayOptions, error) { - o := &overlayOptions{} - for _, option := range options { - key, val, err := parsers.ParseKeyValueOpt(option) - if err != nil { - return nil, err - } - key = strings.ToLower(key) - switch key { - case ".override_kernel_check", "overlay.override_kernel_check", "overlay2.override_kernel_check": - logrus.Debugf("overlay: override_kernelcheck=%s", val) - o.overrideKernelCheck, err = strconv.ParseBool(val) - if err != nil { - return nil, err - } - case ".size", "overlay.size", "overlay2.size": - logrus.Debugf("overlay: size=%s", val) - size, err := units.RAMInBytes(val) - if err != nil { - return nil, err - } - o.quota.Size = uint64(size) - case ".imagestore", "overlay.imagestore", "overlay2.imagestore": - logrus.Debugf("overlay: imagestore=%s", val) - // Additional read only image stores to use for lower paths - for _, store := range strings.Split(val, ",") { - store = filepath.Clean(store) - if !filepath.IsAbs(store) { - return nil, fmt.Errorf("overlay: image path %q is not absolute. Can not be relative", store) - } - st, err := os.Stat(store) - if err != nil { - return nil, fmt.Errorf("overlay: can't stat imageStore dir %s: %v", store, err) - } - if !st.IsDir() { - return nil, fmt.Errorf("overlay: image path %q must be a directory", store) - } - o.imageStores = append(o.imageStores, store) - } - default: - return nil, fmt.Errorf("overlay: Unknown option %s", key) - } - } - return o, nil -} - -func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGID int) (supportsDType bool, err error) { - // We can try to modprobe overlay first - exec.Command("modprobe", "overlay").Run() - - layerDir, err := ioutil.TempDir(home, "compat") - if err == nil { - // Check if reading the directory's contents populates the d_type field, which is required - // for proper operation of the overlay filesystem. - supportsDType, err = fsutils.SupportsDType(layerDir) - if err != nil { - return false, err - } - if !supportsDType { - return false, overlayutils.ErrDTypeNotSupported("overlay", backingFs) - } - - // Try a test mount in the specific location we're looking at using. - mergedDir := filepath.Join(layerDir, "merged") - lower1Dir := filepath.Join(layerDir, "lower1") - lower2Dir := filepath.Join(layerDir, "lower2") - defer func() { - // Permitted to fail, since the various subdirectories - // can be empty or not even there, and the home might - // legitimately be not empty - _ = unix.Unmount(mergedDir, unix.MNT_DETACH) - _ = os.RemoveAll(layerDir) - _ = os.Remove(home) - }() - _ = idtools.MkdirAs(mergedDir, 0700, rootUID, rootGID) - _ = idtools.MkdirAs(lower1Dir, 0700, rootUID, rootGID) - _ = idtools.MkdirAs(lower2Dir, 0700, rootUID, rootGID) - flags := fmt.Sprintf("lowerdir=%s:%s", lower1Dir, lower2Dir) - if len(flags) < unix.Getpagesize() { - if mountFrom(filepath.Dir(home), "overlay", mergedDir, "overlay", 0, flags) == nil { - logrus.Debugf("overlay test mount with multiple lowers succeeded") - return supportsDType, nil - } - } - flags = fmt.Sprintf("lowerdir=%s", lower1Dir) - if len(flags) < unix.Getpagesize() { - if mountFrom(filepath.Dir(home), "overlay", mergedDir, "overlay", 0, flags) == nil { - logrus.Errorf("overlay test mount with multiple lowers failed, but succeeded with a single lower") - return supportsDType, errors.Wrap(graphdriver.ErrNotSupported, "kernel too old to provide multiple lowers feature for overlay") - } - } - logrus.Errorf("'overlay' is not supported over %s at %q", backingFs, home) - return supportsDType, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s at %q", backingFs, home) - } - - logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") - return supportsDType, errors.Wrap(graphdriver.ErrNotSupported, "'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") -} - -func useNaiveDiff(home string) bool { - useNaiveDiffLock.Do(func() { - if err := doesSupportNativeDiff(home); err != nil { - logrus.Warnf("Not using native diff for overlay, this may cause degraded performance for building images: %v", err) - useNaiveDiffOnly = true - } - }) - return useNaiveDiffOnly -} - -func (d *Driver) String() string { - return d.name -} - -// Status returns current driver information in a two dimensional string array. -// Output contains "Backing Filesystem" used in this implementation. -func (d *Driver) Status() [][2]string { - return [][2]string{ - {"Backing Filesystem", backingFs}, - {"Supports d_type", strconv.FormatBool(d.supportsDType)}, - {"Native Overlay Diff", strconv.FormatBool(!useNaiveDiff(d.home))}, - } -} - -// Metadata returns meta data about the overlay driver such as -// LowerDir, UpperDir, WorkDir and MergeDir used to store data. -func (d *Driver) Metadata(id string) (map[string]string, error) { - dir := d.dir(id) - if _, err := os.Stat(dir); err != nil { - return nil, err - } - - metadata := map[string]string{ - "WorkDir": path.Join(dir, "work"), - "MergedDir": path.Join(dir, "merged"), - "UpperDir": path.Join(dir, "diff"), - } - - lowerDirs, err := d.getLowerDirs(id) - if err != nil { - return nil, err - } - if len(lowerDirs) > 0 { - metadata["LowerDir"] = strings.Join(lowerDirs, ":") - } - - return metadata, nil -} - -// Cleanup any state created by overlay which should be cleaned when daemon -// is being shutdown. For now, we just have to unmount the bind mounted -// we had created. -func (d *Driver) Cleanup() error { - return mount.Unmount(d.home) -} - -// CreateReadWrite creates a layer that is writable for use as a container -// file system. -func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { - if opts != nil && len(opts.StorageOpt) != 0 && !projectQuotaSupported { - return fmt.Errorf("--storage-opt is supported only for overlay over xfs with 'pquota' mount option") - } - - if opts == nil { - opts = &graphdriver.CreateOpts{ - StorageOpt: map[string]string{}, - } - } - - if _, ok := opts.StorageOpt["size"]; !ok { - if opts.StorageOpt == nil { - opts.StorageOpt = map[string]string{} - } - opts.StorageOpt["size"] = strconv.FormatUint(d.options.quota.Size, 10) - } - - return d.create(id, parent, opts) -} - -// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. -// The parent filesystem is used to configure these directories for the overlay. -func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { - if opts != nil && len(opts.StorageOpt) != 0 { - if _, ok := opts.StorageOpt["size"]; ok { - return fmt.Errorf("--storage-opt size is only supported for ReadWrite Layers") - } - } - return d.create(id, parent, opts) -} - -func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts) (retErr error) { - dir := d.dir(id) - - rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) - if err != nil { - return err - } - if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil { - return err - } - if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil { - return err - } - - defer func() { - // Clean up on failure - if retErr != nil { - os.RemoveAll(dir) - } - }() - - if opts != nil && len(opts.StorageOpt) > 0 { - driver := &Driver{} - if err := d.parseStorageOpt(opts.StorageOpt, driver); err != nil { - return err - } - - if driver.options.quota.Size > 0 { - // Set container disk quota limit - if err := d.quotaCtl.SetQuota(dir, driver.options.quota); err != nil { - return err - } - } - } - - if err := idtools.MkdirAs(path.Join(dir, "diff"), 0755, rootUID, rootGID); err != nil { - return err - } - - lid := generateID(idLength) - if err := os.Symlink(path.Join("..", id, "diff"), path.Join(d.home, linkDir, lid)); err != nil { - return err - } - - // Write link id to link file - if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil { - return err - } - - if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { - return err - } - if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { - return err - } - - // if no parent directory, create a dummy lower directory and skip writing a "lowers" file - if parent == "" { - return idtools.MkdirAs(path.Join(dir, "empty"), 0700, rootUID, rootGID) - } - - lower, err := d.getLower(parent) - if err != nil { - return err - } - if lower != "" { - if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil { - return err - } - } - - return nil -} - -// Parse overlay storage options -func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { - // Read size to set the disk project quota per container - for key, val := range storageOpt { - key := strings.ToLower(key) - switch key { - case "size": - size, err := units.RAMInBytes(val) - if err != nil { - return err - } - driver.options.quota.Size = uint64(size) - default: - return fmt.Errorf("Unknown option %s", key) - } - } - - return nil -} - -func (d *Driver) getLower(parent string) (string, error) { - parentDir := d.dir(parent) - - // Ensure parent exists - if _, err := os.Lstat(parentDir); err != nil { - return "", err - } - - // Read Parent link fileA - parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link")) - if err != nil { - return "", err - } - lowers := []string{path.Join(linkDir, string(parentLink))} - - parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile)) - if err == nil { - parentLowers := strings.Split(string(parentLower), ":") - lowers = append(lowers, parentLowers...) - } - if len(lowers) > maxDepth { - return "", errors.New("max depth exceeded") - } - return strings.Join(lowers, ":"), nil -} - -func (d *Driver) dir(id string) string { - newpath := path.Join(d.home, id) - if _, err := os.Stat(newpath); err != nil { - for _, p := range d.AdditionalImageStores() { - l := path.Join(p, d.name, id) - _, err = os.Stat(l) - if err == nil { - return l - } - } - } - return newpath -} - -func (d *Driver) getLowerDirs(id string) ([]string, error) { - var lowersArray []string - lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)) - if err == nil { - for _, s := range strings.Split(string(lowers), ":") { - lower := d.dir(s) - lp, err := os.Readlink(lower) - if err != nil { - return nil, err - } - lowersArray = append(lowersArray, path.Clean(d.dir(path.Join("link", lp)))) - } - } else if !os.IsNotExist(err) { - return nil, err - } - return lowersArray, nil -} - -// Remove cleans the directories that are created for this id. -func (d *Driver) Remove(id string) error { - d.locker.Lock(id) - defer d.locker.Unlock(id) - dir := d.dir(id) - lid, err := ioutil.ReadFile(path.Join(dir, "link")) - if err == nil { - if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil { - logrus.Debugf("Failed to remove link: %v", err) - } - } - - if err := system.EnsureRemoveAll(dir); err != nil && !os.IsNotExist(err) { - return err - } - return nil -} - -// Get creates and mounts the required file system for the given id and returns the mount path. -func (d *Driver) Get(id, mountLabel string) (_ string, retErr error) { - d.locker.Lock(id) - defer d.locker.Unlock(id) - dir := d.dir(id) - if _, err := os.Stat(dir); err != nil { - return "", err - } - - diffDir := path.Join(dir, "diff") - lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile)) - if err != nil && !os.IsNotExist(err) { - return "", err - } - - newlowers := "" - for _, l := range strings.Split(string(lowers), ":") { - lower := "" - newpath := path.Join(d.home, l) - if _, err := os.Stat(newpath); err != nil { - for _, p := range d.AdditionalImageStores() { - lower = path.Join(p, d.name, l) - if _, err2 := os.Stat(lower); err2 == nil { - break - } - lower = "" - } - if lower == "" { - return "", fmt.Errorf("Can't stat lower layer %q: %v", newpath, err) - } - } else { - lower = newpath - } - if newlowers == "" { - newlowers = lower - } else { - newlowers = newlowers + ":" + lower - } - } - if len(lowers) == 0 { - newlowers = path.Join(dir, "empty") - lowers = []byte(newlowers) - } - - mergedDir := path.Join(dir, "merged") - if count := d.ctr.Increment(mergedDir); count > 1 { - return mergedDir, nil - } - defer func() { - if retErr != nil { - if c := d.ctr.Decrement(mergedDir); c <= 0 { - if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil { - logrus.Errorf("error unmounting %v: %v", mergedDir, mntErr) - } - } - } - }() - - workDir := path.Join(dir, "work") - opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", newlowers, diffDir, workDir) - mountData := label.FormatMountLabel(opts, mountLabel) - mount := unix.Mount - mountTarget := mergedDir - - pageSize := unix.Getpagesize() - - // Use relative paths and mountFrom when the mount data has exceeded - // the page size. The mount syscall fails if the mount data cannot - // fit within a page and relative links make the mount data much - // smaller at the expense of requiring a fork exec to chroot. - if len(mountData) > pageSize { - //FIXME: We need to figure out to get this to work with additional stores - opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", string(lowers), path.Join(id, "diff"), path.Join(id, "work")) - mountData = label.FormatMountLabel(opts, mountLabel) - if len(mountData) > pageSize { - return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData)) - } - - mount = func(source string, target string, mType string, flags uintptr, label string) error { - return mountFrom(d.home, source, target, mType, flags, label) - } - mountTarget = path.Join(id, "merged") - } - if err := mount("overlay", mountTarget, "overlay", 0, mountData); err != nil { - return "", fmt.Errorf("error creating overlay mount to %s: %v", mountTarget, err) - } - - // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a - // user namespace requires this to move a directory from lower to upper. - rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) - if err != nil { - return "", err - } - - if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil { - return "", err - } - - return mergedDir, nil -} - -// Put unmounts the mount path created for the give id. -func (d *Driver) Put(id string) error { - d.locker.Lock(id) - defer d.locker.Unlock(id) - dir := d.dir(id) - if _, err := os.Stat(dir); err != nil { - return err - } - mountpoint := path.Join(d.dir(id), "merged") - if count := d.ctr.Decrement(mountpoint); count > 0 { - return nil - } - if _, err := ioutil.ReadFile(path.Join(dir, lowerFile)); err != nil && !os.IsNotExist(err) { - return err - } - if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { - logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) - } - return nil -} - -// Exists checks to see if the id is already mounted. -func (d *Driver) Exists(id string) bool { - _, err := os.Stat(d.dir(id)) - return err == nil -} - -// isParent returns if the passed in parent is the direct parent of the passed in layer -func (d *Driver) isParent(id, parent string) bool { - lowers, err := d.getLowerDirs(id) - if err != nil { - return false - } - if parent == "" && len(lowers) > 0 { - return false - } - - parentDir := d.dir(parent) - var ld string - if len(lowers) > 0 { - ld = filepath.Dir(lowers[0]) - } - if ld == "" && parent == "" { - return true - } - return ld == parentDir -} - -// ApplyDiff applies the new layer into a root -func (d *Driver) ApplyDiff(id, parent, mountLabel string, diff io.Reader) (size int64, err error) { - if !d.isParent(id, parent) { - return d.naiveDiff.ApplyDiff(id, parent, mountLabel, diff) - } - - applyDir := d.getDiffPath(id) - - logrus.Debugf("Applying tar in %s", applyDir) - // Overlay doesn't need the parent id to apply the diff - if err := untar(diff, applyDir, &archive.TarOptions{ - UIDMaps: d.uidMaps, - GIDMaps: d.gidMaps, - WhiteoutFormat: archive.OverlayWhiteoutFormat, - }); err != nil { - return 0, err - } - - return directory.Size(applyDir) -} - -func (d *Driver) getDiffPath(id string) string { - dir := d.dir(id) - - return path.Join(dir, "diff") -} - -// DiffSize calculates the changes between the specified id -// and its parent and returns the size in bytes of the changes -// relative to its base filesystem directory. -func (d *Driver) DiffSize(id, parent, mountLabel string) (size int64, err error) { - if useNaiveDiff(d.home) || !d.isParent(id, parent) { - return d.naiveDiff.DiffSize(id, parent, mountLabel) - } - return directory.Size(d.getDiffPath(id)) -} - -// Diff produces an archive of the changes between the specified -// layer and its parent layer which may be "". -func (d *Driver) Diff(id, parent, mountLabel string) (io.ReadCloser, error) { - if useNaiveDiff(d.home) || !d.isParent(id, parent) { - return d.naiveDiff.Diff(id, parent, mountLabel) - } - - diffPath := d.getDiffPath(id) - logrus.Debugf("Tar with options on %s", diffPath) - return archive.TarWithOptions(diffPath, &archive.TarOptions{ - Compression: archive.Uncompressed, - UIDMaps: d.uidMaps, - GIDMaps: d.gidMaps, - WhiteoutFormat: archive.OverlayWhiteoutFormat, - }) -} - -// Changes produces a list of changes between the specified layer -// and its parent layer. If parent is "", then all changes will be ADD changes. -func (d *Driver) Changes(id, parent, mountLabel string) ([]archive.Change, error) { - if useNaiveDiff(d.home) || !d.isParent(id, parent) { - return d.naiveDiff.Changes(id, parent, mountLabel) - } - // Overlay doesn't have snapshots, so we need to get changes from all parent - // layers. - diffPath := d.getDiffPath(id) - layers, err := d.getLowerDirs(id) - if err != nil { - return nil, err - } - - return archive.OverlayChanges(layers, diffPath) -} - -// AdditionalImageStores returns additional image stores supported by the driver -func (d *Driver) AdditionalImageStores() []string { - return d.options.imageStores -} diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go b/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go deleted file mode 100644 index 3dbb4de44e..0000000000 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay_unsupported.go +++ /dev/null @@ -1,3 +0,0 @@ -// +build !linux - -package overlay diff --git a/vendor/github.com/containers/storage/drivers/overlay/randomid.go b/vendor/github.com/containers/storage/drivers/overlay/randomid.go deleted file mode 100644 index fc565ef0ba..0000000000 --- a/vendor/github.com/containers/storage/drivers/overlay/randomid.go +++ /dev/null @@ -1,81 +0,0 @@ -// +build linux - -package overlay - -import ( - "crypto/rand" - "encoding/base32" - "fmt" - "io" - "os" - "syscall" - "time" - - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -// generateID creates a new random string identifier with the given length -func generateID(l int) string { - const ( - // ensures we backoff for less than 450ms total. Use the following to - // select new value, in units of 10ms: - // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2 - maxretries = 9 - backoff = time.Millisecond * 10 - ) - - var ( - totalBackoff time.Duration - count int - retries int - size = (l*5 + 7) / 8 - u = make([]byte, size) - ) - // TODO: Include time component, counter component, random component - - for { - // This should never block but the read may fail. Because of this, - // we just try to read the random number generator until we get - // something. This is a very rare condition but may happen. - b := time.Duration(retries) * backoff - time.Sleep(b) - totalBackoff += b - - n, err := io.ReadFull(rand.Reader, u[count:]) - if err != nil { - if retryOnError(err) && retries < maxretries { - count += n - retries++ - logrus.Errorf("error generating version 4 uuid, retrying: %v", err) - continue - } - - // Any other errors represent a system problem. What did someone - // do to /dev/urandom? - panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err)) - } - - break - } - - s := base32.StdEncoding.EncodeToString(u) - - return s[:l] -} - -// retryOnError tries to detect whether or not retrying would be fruitful. -func retryOnError(err error) bool { - switch err := err.(type) { - case *os.PathError: - return retryOnError(err.Err) // unpack the target error - case syscall.Errno: - if err == unix.EPERM { - // EPERM represents an entropy pool exhaustion, a condition under - // which we backoff and retry. - return true - } - } - - return false -} diff --git a/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go b/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go deleted file mode 100644 index 467733647c..0000000000 --- a/vendor/github.com/containers/storage/drivers/overlayutils/overlayutils.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build linux - -package overlayutils - -import ( - "errors" - "fmt" -) - -// ErrDTypeNotSupported denotes that the backing filesystem doesn't support d_type. -func ErrDTypeNotSupported(driver, backingFs string) error { - msg := fmt.Sprintf("%s: the backing %s filesystem is formatted without d_type support, which leads to incorrect behavior.", driver, backingFs) - if backingFs == "xfs" { - msg += " Reformat the filesystem with ftype=1 to enable d_type support." - } - msg += " Running without d_type is not supported." - return errors.New(msg) -} diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota.go b/vendor/github.com/containers/storage/drivers/quota/projectquota.go deleted file mode 100644 index 93e7443713..0000000000 --- a/vendor/github.com/containers/storage/drivers/quota/projectquota.go +++ /dev/null @@ -1,337 +0,0 @@ -// +build linux - -// -// projectquota.go - implements XFS project quota controls -// for setting quota limits on a newly created directory. -// It currently supports the legacy XFS specific ioctls. -// -// TODO: use generic quota control ioctl FS_IOC_FS{GET,SET}XATTR -// for both xfs/ext4 for kernel version >= v4.5 -// - -package quota - -/* -#include -#include -#include -#include -#include - -#ifndef FS_XFLAG_PROJINHERIT -struct fsxattr { - __u32 fsx_xflags; - __u32 fsx_extsize; - __u32 fsx_nextents; - __u32 fsx_projid; - unsigned char fsx_pad[12]; -}; -#define FS_XFLAG_PROJINHERIT 0x00000200 -#endif -#ifndef FS_IOC_FSGETXATTR -#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr) -#endif -#ifndef FS_IOC_FSSETXATTR -#define FS_IOC_FSSETXATTR _IOW ('X', 32, struct fsxattr) -#endif - -#ifndef PRJQUOTA -#define PRJQUOTA 2 -#endif -#ifndef XFS_PROJ_QUOTA -#define XFS_PROJ_QUOTA 2 -#endif -#ifndef Q_XSETPQLIM -#define Q_XSETPQLIM QCMD(Q_XSETQLIM, PRJQUOTA) -#endif -#ifndef Q_XGETPQUOTA -#define Q_XGETPQUOTA QCMD(Q_XGETQUOTA, PRJQUOTA) -#endif -*/ -import "C" -import ( - "fmt" - "io/ioutil" - "path" - "path/filepath" - "unsafe" - - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -// Quota limit params - currently we only control blocks hard limit -type Quota struct { - Size uint64 -} - -// Control - Context to be used by storage driver (e.g. overlay) -// who wants to apply project quotas to container dirs -type Control struct { - backingFsBlockDev string - nextProjectID uint32 - quotas map[string]uint32 -} - -// NewControl - initialize project quota support. -// Test to make sure that quota can be set on a test dir and find -// the first project id to be used for the next container create. -// -// Returns nil (and error) if project quota is not supported. -// -// First get the project id of the home directory. -// This test will fail if the backing fs is not xfs. -// -// xfs_quota tool can be used to assign a project id to the driver home directory, e.g.: -// echo 999:/var/lib/containers/storage/overlay >> /etc/projects -// echo storage:999 >> /etc/projid -// xfs_quota -x -c 'project -s storage' / -// -// In that case, the home directory project id will be used as a "start offset" -// and all containers will be assigned larger project ids (e.g. >= 1000). -// This is a way to prevent xfs_quota management from conflicting with containers/storage. -// -// Then try to create a test directory with the next project id and set a quota -// on it. If that works, continue to scan existing containers to map allocated -// project ids. -// -func NewControl(basePath string) (*Control, error) { - // - // Get project id of parent dir as minimal id to be used by driver - // - minProjectID, err := getProjectID(basePath) - if err != nil { - return nil, err - } - minProjectID++ - - // - // create backing filesystem device node - // - backingFsBlockDev, err := makeBackingFsDev(basePath) - if err != nil { - return nil, err - } - - // - // Test if filesystem supports project quotas by trying to set - // a quota on the first available project id - // - quota := Quota{ - Size: 0, - } - if err := setProjectQuota(backingFsBlockDev, minProjectID, quota); err != nil { - return nil, err - } - - q := Control{ - backingFsBlockDev: backingFsBlockDev, - nextProjectID: minProjectID + 1, - quotas: make(map[string]uint32), - } - - // - // get first project id to be used for next container - // - err = q.findNextProjectID(basePath) - if err != nil { - return nil, err - } - - logrus.Debugf("NewControl(%s): nextProjectID = %d", basePath, q.nextProjectID) - return &q, nil -} - -// SetQuota - assign a unique project id to directory and set the quota limits -// for that project id -func (q *Control) SetQuota(targetPath string, quota Quota) error { - - projectID, ok := q.quotas[targetPath] - if !ok { - projectID = q.nextProjectID - - // - // assign project id to new container directory - // - err := setProjectID(targetPath, projectID) - if err != nil { - return err - } - - q.quotas[targetPath] = projectID - q.nextProjectID++ - } - - // - // set the quota limit for the container's project id - // - logrus.Debugf("SetQuota(%s, %d): projectID=%d", targetPath, quota.Size, projectID) - return setProjectQuota(q.backingFsBlockDev, projectID, quota) -} - -// setProjectQuota - set the quota for project id on xfs block device -func setProjectQuota(backingFsBlockDev string, projectID uint32, quota Quota) error { - var d C.fs_disk_quota_t - d.d_version = C.FS_DQUOT_VERSION - d.d_id = C.__u32(projectID) - d.d_flags = C.XFS_PROJ_QUOTA - - d.d_fieldmask = C.FS_DQ_BHARD | C.FS_DQ_BSOFT - d.d_blk_hardlimit = C.__u64(quota.Size / 512) - d.d_blk_softlimit = d.d_blk_hardlimit - - var cs = C.CString(backingFsBlockDev) - defer C.free(unsafe.Pointer(cs)) - - _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM, - uintptr(unsafe.Pointer(cs)), uintptr(d.d_id), - uintptr(unsafe.Pointer(&d)), 0, 0) - if errno != 0 { - return fmt.Errorf("Failed to set quota limit for projid %d on %s: %v", - projectID, backingFsBlockDev, errno.Error()) - } - - return nil -} - -// GetQuota - get the quota limits of a directory that was configured with SetQuota -func (q *Control) GetQuota(targetPath string, quota *Quota) error { - - projectID, ok := q.quotas[targetPath] - if !ok { - return fmt.Errorf("quota not found for path : %s", targetPath) - } - - // - // get the quota limit for the container's project id - // - var d C.fs_disk_quota_t - - var cs = C.CString(q.backingFsBlockDev) - defer C.free(unsafe.Pointer(cs)) - - _, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XGETPQUOTA, - uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)), - uintptr(unsafe.Pointer(&d)), 0, 0) - if errno != 0 { - return fmt.Errorf("Failed to get quota limit for projid %d on %s: %v", - projectID, q.backingFsBlockDev, errno.Error()) - } - quota.Size = uint64(d.d_blk_hardlimit) * 512 - - return nil -} - -// getProjectID - get the project id of path on xfs -func getProjectID(targetPath string) (uint32, error) { - dir, err := openDir(targetPath) - if err != nil { - return 0, err - } - defer closeDir(dir) - - var fsx C.struct_fsxattr - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, - uintptr(unsafe.Pointer(&fsx))) - if errno != 0 { - return 0, fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error()) - } - - return uint32(fsx.fsx_projid), nil -} - -// setProjectID - set the project id of path on xfs -func setProjectID(targetPath string, projectID uint32) error { - dir, err := openDir(targetPath) - if err != nil { - return err - } - defer closeDir(dir) - - var fsx C.struct_fsxattr - _, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR, - uintptr(unsafe.Pointer(&fsx))) - if errno != 0 { - return fmt.Errorf("Failed to get projid for %s: %v", targetPath, errno.Error()) - } - fsx.fsx_projid = C.__u32(projectID) - fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT - _, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR, - uintptr(unsafe.Pointer(&fsx))) - if errno != 0 { - return fmt.Errorf("Failed to set projid for %s: %v", targetPath, errno.Error()) - } - - return nil -} - -// findNextProjectID - find the next project id to be used for containers -// by scanning driver home directory to find used project ids -func (q *Control) findNextProjectID(home string) error { - files, err := ioutil.ReadDir(home) - if err != nil { - return fmt.Errorf("read directory failed : %s", home) - } - for _, file := range files { - if !file.IsDir() { - continue - } - path := filepath.Join(home, file.Name()) - projid, err := getProjectID(path) - if err != nil { - return err - } - if projid > 0 { - q.quotas[path] = projid - } - if q.nextProjectID <= projid { - q.nextProjectID = projid + 1 - } - } - - return nil -} - -func free(p *C.char) { - C.free(unsafe.Pointer(p)) -} - -func openDir(path string) (*C.DIR, error) { - Cpath := C.CString(path) - defer free(Cpath) - - dir := C.opendir(Cpath) - if dir == nil { - return nil, fmt.Errorf("Can't open dir") - } - return dir, nil -} - -func closeDir(dir *C.DIR) { - if dir != nil { - C.closedir(dir) - } -} - -func getDirFd(dir *C.DIR) uintptr { - return uintptr(C.dirfd(dir)) -} - -// Get the backing block device of the driver home directory -// and create a block device node under the home directory -// to be used by quotactl commands -func makeBackingFsDev(home string) (string, error) { - var stat unix.Stat_t - if err := unix.Stat(home, &stat); err != nil { - return "", err - } - - backingFsBlockDev := path.Join(home, "backingFsBlockDev") - // Re-create just in case someone copied the home directory over to a new device - unix.Unlink(backingFsBlockDev) - if err := unix.Mknod(backingFsBlockDev, unix.S_IFBLK|0600, int(stat.Dev)); err != nil { - return "", fmt.Errorf("Failed to mknod %s: %v", backingFsBlockDev, err) - } - - return backingFsBlockDev, nil -} diff --git a/vendor/github.com/containers/storage/drivers/register/register_aufs.go b/vendor/github.com/containers/storage/drivers/register/register_aufs.go deleted file mode 100644 index 7743dcedbd..0000000000 --- a/vendor/github.com/containers/storage/drivers/register/register_aufs.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !exclude_graphdriver_aufs,linux - -package register - -import ( - // register the aufs graphdriver - _ "github.com/containers/storage/drivers/aufs" -) diff --git a/vendor/github.com/containers/storage/drivers/register/register_btrfs.go b/vendor/github.com/containers/storage/drivers/register/register_btrfs.go deleted file mode 100644 index 40ff1cdd0d..0000000000 --- a/vendor/github.com/containers/storage/drivers/register/register_btrfs.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !exclude_graphdriver_btrfs,linux - -package register - -import ( - // register the btrfs graphdriver - _ "github.com/containers/storage/drivers/btrfs" -) diff --git a/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go b/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go deleted file mode 100644 index 08ac984b05..0000000000 --- a/vendor/github.com/containers/storage/drivers/register/register_devicemapper.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !exclude_graphdriver_devicemapper,linux - -package register - -import ( - // register the devmapper graphdriver - _ "github.com/containers/storage/drivers/devmapper" -) diff --git a/vendor/github.com/containers/storage/drivers/register/register_overlay.go b/vendor/github.com/containers/storage/drivers/register/register_overlay.go deleted file mode 100644 index 2d61219bbb..0000000000 --- a/vendor/github.com/containers/storage/drivers/register/register_overlay.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !exclude_graphdriver_overlay,linux - -package register - -import ( - // register the overlay graphdriver - _ "github.com/containers/storage/drivers/overlay" -) diff --git a/vendor/github.com/containers/storage/drivers/register/register_vfs.go b/vendor/github.com/containers/storage/drivers/register/register_vfs.go deleted file mode 100644 index 691ce85929..0000000000 --- a/vendor/github.com/containers/storage/drivers/register/register_vfs.go +++ /dev/null @@ -1,6 +0,0 @@ -package register - -import ( - // register vfs - _ "github.com/containers/storage/drivers/vfs" -) diff --git a/vendor/github.com/containers/storage/drivers/register/register_windows.go b/vendor/github.com/containers/storage/drivers/register/register_windows.go deleted file mode 100644 index 048b27097d..0000000000 --- a/vendor/github.com/containers/storage/drivers/register/register_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -package register - -import ( - // register the windows graph driver - _ "github.com/containers/storage/drivers/windows" -) diff --git a/vendor/github.com/containers/storage/drivers/register/register_zfs.go b/vendor/github.com/containers/storage/drivers/register/register_zfs.go deleted file mode 100644 index c748468e5c..0000000000 --- a/vendor/github.com/containers/storage/drivers/register/register_zfs.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd, solaris - -package register - -import ( - // register the zfs driver - _ "github.com/containers/storage/drivers/zfs" -) diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go deleted file mode 100644 index ae62207d17..0000000000 --- a/vendor/github.com/containers/storage/drivers/vfs/driver.go +++ /dev/null @@ -1,160 +0,0 @@ -package vfs - -import ( - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/containers/storage/drivers" - "github.com/containers/storage/pkg/chrootarchive" - "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/system" - "github.com/opencontainers/selinux/go-selinux/label" -) - -var ( - // CopyWithTar defines the copy method to use. - CopyWithTar = chrootarchive.NewArchiver(nil).CopyWithTar -) - -func init() { - graphdriver.Register("vfs", Init) -} - -// Init returns a new VFS driver. -// This sets the home directory for the driver and returns NaiveDiffDriver. -func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { - d := &Driver{ - homes: []string{home}, - idMappings: idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), - } - rootIDs := d.idMappings.RootPair() - if err := idtools.MkdirAllAndChown(home, 0700, rootIDs); err != nil { - return nil, err - } - for _, option := range options { - if strings.HasPrefix(option, "vfs.imagestore=") { - d.homes = append(d.homes, strings.Split(option[15:], ",")...) - continue - } - if strings.HasPrefix(option, ".imagestore=") { - d.homes = append(d.homes, strings.Split(option[12:], ",")...) - continue - } - } - return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil -} - -// Driver holds information about the driver, home directory of the driver. -// Driver implements graphdriver.ProtoDriver. It uses only basic vfs operations. -// In order to support layering, files are copied from the parent layer into the new layer. There is no copy-on-write support. -// Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver -type Driver struct { - homes []string - idMappings *idtools.IDMappings -} - -func (d *Driver) String() string { - return "vfs" -} - -// Status is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any status information. -func (d *Driver) Status() [][2]string { - return nil -} - -// Metadata is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any meta data. -func (d *Driver) Metadata(id string) (map[string]string, error) { - return nil, nil -} - -// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. -func (d *Driver) Cleanup() error { - return nil -} - -// CreateReadWrite creates a layer that is writable for use as a container -// file system. -func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { - return d.Create(id, parent, opts) -} - -// Create prepares the filesystem for the VFS driver and copies the directory for the given id under the parent. -func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { - if opts != nil && len(opts.StorageOpt) != 0 { - return fmt.Errorf("--storage-opt is not supported for vfs") - } - - dir := d.dir(id) - rootIDs := d.idMappings.RootPair() - if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0700, rootIDs); err != nil { - return err - } - if err := idtools.MkdirAndChown(dir, 0755, rootIDs); err != nil { - return err - } - labelOpts := []string{"level:s0"} - if _, mountLabel, err := label.InitLabels(labelOpts); err == nil { - label.SetFileLabel(dir, mountLabel) - } - if parent == "" { - return nil - } - parentDir, err := d.Get(parent, "") - if err != nil { - return fmt.Errorf("%s: %s", parent, err) - } - return CopyWithTar(parentDir, dir) -} - -func (d *Driver) dir(id string) string { - for i, home := range d.homes { - if i > 0 { - home = filepath.Join(home, d.String()) - } - candidate := filepath.Join(home, "dir", filepath.Base(id)) - fi, err := os.Stat(candidate) - if err == nil && fi.IsDir() { - return candidate - } - } - return filepath.Join(d.homes[0], "dir", filepath.Base(id)) -} - -// Remove deletes the content from the directory for a given id. -func (d *Driver) Remove(id string) error { - return system.EnsureRemoveAll(d.dir(id)) -} - -// Get returns the directory for the given id. -func (d *Driver) Get(id, mountLabel string) (string, error) { - dir := d.dir(id) - if st, err := os.Stat(dir); err != nil { - return "", err - } else if !st.IsDir() { - return "", fmt.Errorf("%s: not a directory", dir) - } - return dir, nil -} - -// Put is a noop for vfs that return nil for the error, since this driver has no runtime resources to clean up. -func (d *Driver) Put(id string) error { - // The vfs driver has no runtime resources (e.g. mounts) - // to clean up, so we don't need anything here - return nil -} - -// Exists checks to see if the directory exists for the given id. -func (d *Driver) Exists(id string) bool { - _, err := os.Stat(d.dir(id)) - return err == nil -} - -// AdditionalImageStores returns additional image stores supported by the driver -func (d *Driver) AdditionalImageStores() []string { - if len(d.homes) > 1 { - return d.homes[1:] - } - return nil -} diff --git a/vendor/github.com/containers/storage/drivers/windows/windows.go b/vendor/github.com/containers/storage/drivers/windows/windows.go deleted file mode 100644 index e9e9f5c65a..0000000000 --- a/vendor/github.com/containers/storage/drivers/windows/windows.go +++ /dev/null @@ -1,965 +0,0 @@ -//+build windows - -package windows - -import ( - "bufio" - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path" - "path/filepath" - "strconv" - "strings" - "sync" - "syscall" - "time" - "unsafe" - - "github.com/Microsoft/go-winio" - "github.com/Microsoft/go-winio/archive/tar" - "github.com/Microsoft/go-winio/backuptar" - "github.com/Microsoft/hcsshim" - "github.com/containers/storage/drivers" - "github.com/containers/storage/pkg/archive" - "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/ioutils" - "github.com/containers/storage/pkg/longpath" - "github.com/containers/storage/pkg/reexec" - "github.com/containers/storage/pkg/system" - units "github.com/docker/go-units" - "github.com/sirupsen/logrus" - "golang.org/x/sys/windows" -) - -// filterDriver is an HCSShim driver type for the Windows Filter driver. -const filterDriver = 1 - -var ( - // mutatedFiles is a list of files that are mutated by the import process - // and must be backed up and restored. - mutatedFiles = map[string]string{ - "UtilityVM/Files/EFI/Microsoft/Boot/BCD": "bcd.bak", - "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG": "bcd.log.bak", - "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG1": "bcd.log1.bak", - "UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG2": "bcd.log2.bak", - } - noreexec = false -) - -// init registers the windows graph drivers to the register. -func init() { - graphdriver.Register("windowsfilter", InitFilter) - // DOCKER_WINDOWSFILTER_NOREEXEC allows for inline processing which makes - // debugging issues in the re-exec codepath significantly easier. - if os.Getenv("DOCKER_WINDOWSFILTER_NOREEXEC") != "" { - logrus.Warnf("WindowsGraphDriver is set to not re-exec. This is intended for debugging purposes only.") - noreexec = true - } else { - reexec.Register("docker-windows-write-layer", writeLayerReexec) - } -} - -type checker struct { -} - -func (c *checker) IsMounted(path string) bool { - return false -} - -// Driver represents a windows graph driver. -type Driver struct { - // info stores the shim driver information - info hcsshim.DriverInfo - ctr *graphdriver.RefCounter - // it is safe for windows to use a cache here because it does not support - // restoring containers when the daemon dies. - cacheMu sync.Mutex - cache map[string]string -} - -// InitFilter returns a new Windows storage filter driver. -func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { - logrus.Debugf("WindowsGraphDriver InitFilter at %s", home) - - fsType, err := getFileSystemType(string(home[0])) - if err != nil { - return nil, err - } - if strings.ToLower(fsType) == "refs" { - return nil, fmt.Errorf("%s is on an ReFS volume - ReFS volumes are not supported", home) - } - - if err := idtools.MkdirAllAs(home, 0700, 0, 0); err != nil { - return nil, fmt.Errorf("windowsfilter failed to create '%s': %v", home, err) - } - - d := &Driver{ - info: hcsshim.DriverInfo{ - HomeDir: home, - Flavour: filterDriver, - }, - cache: make(map[string]string), - ctr: graphdriver.NewRefCounter(&checker{}), - } - return d, nil -} - -// win32FromHresult is a helper function to get the win32 error code from an HRESULT -func win32FromHresult(hr uintptr) uintptr { - if hr&0x1fff0000 == 0x00070000 { - return hr & 0xffff - } - return hr -} - -// getFileSystemType obtains the type of a file system through GetVolumeInformation -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa364993(v=vs.85).aspx -func getFileSystemType(drive string) (fsType string, hr error) { - var ( - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - procGetVolumeInformation = modkernel32.NewProc("GetVolumeInformationW") - buf = make([]uint16, 255) - size = windows.MAX_PATH + 1 - ) - if len(drive) != 1 { - hr = errors.New("getFileSystemType must be called with a drive letter") - return - } - drive += `:\` - n := uintptr(unsafe.Pointer(nil)) - r0, _, _ := syscall.Syscall9(procGetVolumeInformation.Addr(), 8, uintptr(unsafe.Pointer(windows.StringToUTF16Ptr(drive))), n, n, n, n, n, uintptr(unsafe.Pointer(&buf[0])), uintptr(size), 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - fsType = windows.UTF16ToString(buf) - return -} - -// String returns the string representation of a driver. This should match -// the name the graph driver has been registered with. -func (d *Driver) String() string { - return "windowsfilter" -} - -// Status returns the status of the driver. -func (d *Driver) Status() [][2]string { - return [][2]string{ - {"Windows", ""}, - } -} - -// panicIfUsedByLcow does exactly what it says. -// TODO @jhowardmsft - this is a temporary measure for the bring-up of -// Linux containers on Windows. It is a failsafe to ensure that the right -// graphdriver is used. -func panicIfUsedByLcow() { - if system.LCOWSupported() { - panic("inconsistency - windowsfilter graphdriver should not be used when in LCOW mode") - } -} - -// Exists returns true if the given id is registered with this driver. -func (d *Driver) Exists(id string) bool { - panicIfUsedByLcow() - rID, err := d.resolveID(id) - if err != nil { - return false - } - result, err := hcsshim.LayerExists(d.info, rID) - if err != nil { - return false - } - return result -} - -// CreateReadWrite creates a layer that is writable for use as a container -// file system. -func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { - panicIfUsedByLcow() - if opts != nil { - return d.create(id, parent, opts.MountLabel, false, opts.StorageOpt) - } - return d.create(id, parent, "", false, nil) -} - -// Create creates a new read-only layer with the given id. -func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { - panicIfUsedByLcow() - if opts != nil { - return d.create(id, parent, opts.MountLabel, true, opts.StorageOpt) - } - return d.create(id, parent, "", true, nil) -} - -func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt map[string]string) error { - rPId, err := d.resolveID(parent) - if err != nil { - return err - } - - parentChain, err := d.getLayerChain(rPId) - if err != nil { - return err - } - - var layerChain []string - - if rPId != "" { - parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) - if err != nil { - return err - } - if _, err := os.Stat(filepath.Join(parentPath, "Files")); err == nil { - // This is a legitimate parent layer (not the empty "-init" layer), - // so include it in the layer chain. - layerChain = []string{parentPath} - } - } - - layerChain = append(layerChain, parentChain...) - - if readOnly { - if err := hcsshim.CreateLayer(d.info, id, rPId); err != nil { - return err - } - } else { - var parentPath string - if len(layerChain) != 0 { - parentPath = layerChain[0] - } - - if err := hcsshim.CreateSandboxLayer(d.info, id, parentPath, layerChain); err != nil { - return err - } - - storageOptions, err := parseStorageOpt(storageOpt) - if err != nil { - return fmt.Errorf("Failed to parse storage options - %s", err) - } - - if storageOptions.size != 0 { - if err := hcsshim.ExpandSandboxSize(d.info, id, storageOptions.size); err != nil { - return err - } - } - } - - if _, err := os.Lstat(d.dir(parent)); err != nil { - if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { - logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) - } - return fmt.Errorf("Cannot create layer with missing parent %s: %s", parent, err) - } - - if err := d.setLayerChain(id, layerChain); err != nil { - if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { - logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) - } - return err - } - - return nil -} - -// dir returns the absolute path to the layer. -func (d *Driver) dir(id string) string { - return filepath.Join(d.info.HomeDir, filepath.Base(id)) -} - -// Remove unmounts and removes the dir information. -func (d *Driver) Remove(id string) error { - panicIfUsedByLcow() - rID, err := d.resolveID(id) - if err != nil { - return err - } - - // This retry loop is due to a bug in Windows (Internal bug #9432268) - // if GetContainers fails with ErrVmcomputeOperationInvalidState - // it is a transient error. Retry until it succeeds. - var computeSystems []hcsshim.ContainerProperties - retryCount := 0 - osv := system.GetOSVersion() - for { - // Get and terminate any template VMs that are currently using the layer. - // Note: It is unfortunate that we end up in the graphdrivers Remove() call - // for both containers and images, but the logic for template VMs is only - // needed for images - specifically we are looking to see if a base layer - // is in use by a template VM as a result of having started a Hyper-V - // container at some point. - // - // We have a retry loop for ErrVmcomputeOperationInvalidState and - // ErrVmcomputeOperationAccessIsDenied as there is a race condition - // in RS1 and RS2 building during enumeration when a silo is going away - // for example under it, in HCS. AccessIsDenied added to fix 30278. - // - // TODO @jhowardmsft - For RS3, we can remove the retries. Also consider - // using platform APIs (if available) to get this more succinctly. Also - // consider enhancing the Remove() interface to have context of why - // the remove is being called - that could improve efficiency by not - // enumerating compute systems during a remove of a container as it's - // not required. - computeSystems, err = hcsshim.GetContainers(hcsshim.ComputeSystemQuery{}) - if err != nil { - if (osv.Build < 15139) && - ((err == hcsshim.ErrVmcomputeOperationInvalidState) || (err == hcsshim.ErrVmcomputeOperationAccessIsDenied)) { - if retryCount >= 500 { - break - } - retryCount++ - time.Sleep(10 * time.Millisecond) - continue - } - return err - } - break - } - - for _, computeSystem := range computeSystems { - if strings.Contains(computeSystem.RuntimeImagePath, id) && computeSystem.IsRuntimeTemplate { - container, err := hcsshim.OpenContainer(computeSystem.ID) - if err != nil { - return err - } - defer container.Close() - err = container.Terminate() - if hcsshim.IsPending(err) { - err = container.Wait() - } else if hcsshim.IsAlreadyStopped(err) { - err = nil - } - - if err != nil { - return err - } - } - } - - layerPath := filepath.Join(d.info.HomeDir, rID) - tmpID := fmt.Sprintf("%s-removing", rID) - tmpLayerPath := filepath.Join(d.info.HomeDir, tmpID) - if err := os.Rename(layerPath, tmpLayerPath); err != nil && !os.IsNotExist(err) { - return err - } - if err := hcsshim.DestroyLayer(d.info, tmpID); err != nil { - logrus.Errorf("Failed to DestroyLayer %s: %s", id, err) - } - - return nil -} - -// Get returns the rootfs path for the id. This will mount the dir at its given path. -func (d *Driver) Get(id, mountLabel string) (string, error) { - panicIfUsedByLcow() - logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel) - var dir string - - rID, err := d.resolveID(id) - if err != nil { - return "", err - } - if count := d.ctr.Increment(rID); count > 1 { - return d.cache[rID], nil - } - - // Getting the layer paths must be done outside of the lock. - layerChain, err := d.getLayerChain(rID) - if err != nil { - d.ctr.Decrement(rID) - return "", err - } - - if err := hcsshim.ActivateLayer(d.info, rID); err != nil { - d.ctr.Decrement(rID) - return "", err - } - if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { - d.ctr.Decrement(rID) - if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { - logrus.Warnf("Failed to Deactivate %s: %s", id, err) - } - return "", err - } - - mountPath, err := hcsshim.GetLayerMountPath(d.info, rID) - if err != nil { - d.ctr.Decrement(rID) - if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { - logrus.Warnf("Failed to Unprepare %s: %s", id, err) - } - if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { - logrus.Warnf("Failed to Deactivate %s: %s", id, err) - } - return "", err - } - d.cacheMu.Lock() - d.cache[rID] = mountPath - d.cacheMu.Unlock() - - // If the layer has a mount path, use that. Otherwise, use the - // folder path. - if mountPath != "" { - dir = mountPath - } else { - dir = d.dir(id) - } - - return dir, nil -} - -// Put adds a new layer to the driver. -func (d *Driver) Put(id string) error { - panicIfUsedByLcow() - logrus.Debugf("WindowsGraphDriver Put() id %s", id) - - rID, err := d.resolveID(id) - if err != nil { - return err - } - if count := d.ctr.Decrement(rID); count > 0 { - return nil - } - d.cacheMu.Lock() - _, exists := d.cache[rID] - delete(d.cache, rID) - d.cacheMu.Unlock() - - // If the cache was not populated, then the layer was left unprepared and deactivated - if !exists { - return nil - } - - if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { - return err - } - return hcsshim.DeactivateLayer(d.info, rID) -} - -// Cleanup ensures the information the driver stores is properly removed. -// We use this opportunity to cleanup any -removing folders which may be -// still left if the daemon was killed while it was removing a layer. -func (d *Driver) Cleanup() error { - items, err := ioutil.ReadDir(d.info.HomeDir) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - - // Note we don't return an error below - it's possible the files - // are locked. However, next time around after the daemon exits, - // we likely will be able to to cleanup successfully. Instead we log - // warnings if there are errors. - for _, item := range items { - if item.IsDir() && strings.HasSuffix(item.Name(), "-removing") { - if err := hcsshim.DestroyLayer(d.info, item.Name()); err != nil { - logrus.Warnf("Failed to cleanup %s: %s", item.Name(), err) - } else { - logrus.Infof("Cleaned up %s", item.Name()) - } - } - } - - return nil -} - -// Diff produces an archive of the changes between the specified -// layer and its parent layer which may be "". -// The layer should be mounted when calling this function -func (d *Driver) Diff(id, parent, mountLabel string) (_ io.ReadCloser, err error) { - panicIfUsedByLcow() - rID, err := d.resolveID(id) - if err != nil { - return - } - - layerChain, err := d.getLayerChain(rID) - if err != nil { - return - } - - // this is assuming that the layer is unmounted - if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { - return nil, err - } - prepare := func() { - if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { - logrus.Warnf("Failed to Deactivate %s: %s", rID, err) - } - } - - arch, err := d.exportLayer(rID, layerChain) - if err != nil { - prepare() - return - } - return ioutils.NewReadCloserWrapper(arch, func() error { - err := arch.Close() - prepare() - return err - }), nil -} - -// Changes produces a list of changes between the specified layer -// and its parent layer. If parent is "", then all changes will be ADD changes. -// The layer should not be mounted when calling this function. -func (d *Driver) Changes(id, parent, mountLabel string) ([]archive.Change, error) { - panicIfUsedByLcow() - rID, err := d.resolveID(id) - if err != nil { - return nil, err - } - parentChain, err := d.getLayerChain(rID) - if err != nil { - return nil, err - } - - if err := hcsshim.ActivateLayer(d.info, rID); err != nil { - return nil, err - } - defer func() { - if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { - logrus.Errorf("changes() failed to DeactivateLayer %s %s: %s", id, rID, err2) - } - }() - - var changes []archive.Change - err = winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { - r, err := hcsshim.NewLayerReader(d.info, id, parentChain) - if err != nil { - return err - } - defer r.Close() - - for { - name, _, fileInfo, err := r.Next() - if err == io.EOF { - return nil - } - if err != nil { - return err - } - name = filepath.ToSlash(name) - if fileInfo == nil { - changes = append(changes, archive.Change{Path: name, Kind: archive.ChangeDelete}) - } else { - // Currently there is no way to tell between an add and a modify. - changes = append(changes, archive.Change{Path: name, Kind: archive.ChangeModify}) - } - } - }) - if err != nil { - return nil, err - } - - return changes, nil -} - -// ApplyDiff extracts the changeset from the given diff into the -// layer with the specified id and parent, returning the size of the -// new layer in bytes. -// The layer should not be mounted when calling this function -func (d *Driver) ApplyDiff(id, parent, mountLabel string, diff io.Reader) (int64, error) { - panicIfUsedByLcow() - var layerChain []string - if parent != "" { - rPId, err := d.resolveID(parent) - if err != nil { - return 0, err - } - parentChain, err := d.getLayerChain(rPId) - if err != nil { - return 0, err - } - parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) - if err != nil { - return 0, err - } - layerChain = append(layerChain, parentPath) - layerChain = append(layerChain, parentChain...) - } - - size, err := d.importLayer(id, diff, layerChain) - if err != nil { - return 0, err - } - - if err = d.setLayerChain(id, layerChain); err != nil { - return 0, err - } - - return size, nil -} - -// DiffSize calculates the changes between the specified layer -// and its parent and returns the size in bytes of the changes -// relative to its base filesystem directory. -func (d *Driver) DiffSize(id, parent, mountLabel string) (size int64, err error) { - panicIfUsedByLcow() - rPId, err := d.resolveID(parent) - if err != nil { - return - } - - changes, err := d.Changes(id, rPId, mountLabel) - if err != nil { - return - } - - layerFs, err := d.Get(id, "") - if err != nil { - return - } - defer d.Put(id) - - return archive.ChangesSize(layerFs, changes), nil -} - -// Metadata returns custom driver information. -func (d *Driver) Metadata(id string) (map[string]string, error) { - panicIfUsedByLcow() - m := make(map[string]string) - m["dir"] = d.dir(id) - return m, nil -} - -func writeTarFromLayer(r hcsshim.LayerReader, w io.Writer) error { - t := tar.NewWriter(w) - for { - name, size, fileInfo, err := r.Next() - if err == io.EOF { - break - } - if err != nil { - return err - } - if fileInfo == nil { - // Write a whiteout file. - hdr := &tar.Header{ - Name: filepath.ToSlash(filepath.Join(filepath.Dir(name), archive.WhiteoutPrefix+filepath.Base(name))), - } - err := t.WriteHeader(hdr) - if err != nil { - return err - } - } else { - err = backuptar.WriteTarFileFromBackupStream(t, r, name, size, fileInfo) - if err != nil { - return err - } - } - } - return t.Close() -} - -// exportLayer generates an archive from a layer based on the given ID. -func (d *Driver) exportLayer(id string, parentLayerPaths []string) (io.ReadCloser, error) { - archive, w := io.Pipe() - go func() { - err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { - r, err := hcsshim.NewLayerReader(d.info, id, parentLayerPaths) - if err != nil { - return err - } - - err = writeTarFromLayer(r, w) - cerr := r.Close() - if err == nil { - err = cerr - } - return err - }) - w.CloseWithError(err) - }() - - return archive, nil -} - -// writeBackupStreamFromTarAndSaveMutatedFiles reads data from a tar stream and -// writes it to a backup stream, and also saves any files that will be mutated -// by the import layer process to a backup location. -func writeBackupStreamFromTarAndSaveMutatedFiles(buf *bufio.Writer, w io.Writer, t *tar.Reader, hdr *tar.Header, root string) (nextHdr *tar.Header, err error) { - var bcdBackup *os.File - var bcdBackupWriter *winio.BackupFileWriter - if backupPath, ok := mutatedFiles[hdr.Name]; ok { - bcdBackup, err = os.Create(filepath.Join(root, backupPath)) - if err != nil { - return nil, err - } - defer func() { - cerr := bcdBackup.Close() - if err == nil { - err = cerr - } - }() - - bcdBackupWriter = winio.NewBackupFileWriter(bcdBackup, false) - defer func() { - cerr := bcdBackupWriter.Close() - if err == nil { - err = cerr - } - }() - - buf.Reset(io.MultiWriter(w, bcdBackupWriter)) - } else { - buf.Reset(w) - } - - defer func() { - ferr := buf.Flush() - if err == nil { - err = ferr - } - }() - - return backuptar.WriteBackupStreamFromTarFile(buf, t, hdr) -} - -func writeLayerFromTar(r io.Reader, w hcsshim.LayerWriter, root string) (int64, error) { - t := tar.NewReader(r) - hdr, err := t.Next() - totalSize := int64(0) - buf := bufio.NewWriter(nil) - for err == nil { - base := path.Base(hdr.Name) - if strings.HasPrefix(base, archive.WhiteoutPrefix) { - name := path.Join(path.Dir(hdr.Name), base[len(archive.WhiteoutPrefix):]) - err = w.Remove(filepath.FromSlash(name)) - if err != nil { - return 0, err - } - hdr, err = t.Next() - } else if hdr.Typeflag == tar.TypeLink { - err = w.AddLink(filepath.FromSlash(hdr.Name), filepath.FromSlash(hdr.Linkname)) - if err != nil { - return 0, err - } - hdr, err = t.Next() - } else { - var ( - name string - size int64 - fileInfo *winio.FileBasicInfo - ) - name, size, fileInfo, err = backuptar.FileInfoFromHeader(hdr) - if err != nil { - return 0, err - } - err = w.Add(filepath.FromSlash(name), fileInfo) - if err != nil { - return 0, err - } - hdr, err = writeBackupStreamFromTarAndSaveMutatedFiles(buf, w, t, hdr, root) - totalSize += size - } - } - if err != io.EOF { - return 0, err - } - return totalSize, nil -} - -// importLayer adds a new layer to the tag and graph store based on the given data. -func (d *Driver) importLayer(id string, layerData io.Reader, parentLayerPaths []string) (size int64, err error) { - if !noreexec { - cmd := reexec.Command(append([]string{"docker-windows-write-layer", d.info.HomeDir, id}, parentLayerPaths...)...) - output := bytes.NewBuffer(nil) - cmd.Stdin = layerData - cmd.Stdout = output - cmd.Stderr = output - - if err = cmd.Start(); err != nil { - return - } - - if err = cmd.Wait(); err != nil { - return 0, fmt.Errorf("re-exec error: %v: output: %s", err, output) - } - - return strconv.ParseInt(output.String(), 10, 64) - } - return writeLayer(layerData, d.info.HomeDir, id, parentLayerPaths...) -} - -// writeLayerReexec is the re-exec entry point for writing a layer from a tar file -func writeLayerReexec() { - size, err := writeLayer(os.Stdin, os.Args[1], os.Args[2], os.Args[3:]...) - if err != nil { - fmt.Fprint(os.Stderr, err) - os.Exit(1) - } - fmt.Fprint(os.Stdout, size) -} - -// writeLayer writes a layer from a tar file. -func writeLayer(layerData io.Reader, home string, id string, parentLayerPaths ...string) (int64, error) { - err := winio.EnableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}) - if err != nil { - return 0, err - } - if noreexec { - defer func() { - if err := winio.DisableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}); err != nil { - // This should never happen, but just in case when in debugging mode. - // See https://github.com/docker/docker/pull/28002#discussion_r86259241 for rationale. - panic("Failed to disabled process privileges while in non re-exec mode") - } - }() - } - - info := hcsshim.DriverInfo{ - Flavour: filterDriver, - HomeDir: home, - } - - w, err := hcsshim.NewLayerWriter(info, id, parentLayerPaths) - if err != nil { - return 0, err - } - - size, err := writeLayerFromTar(layerData, w, filepath.Join(home, id)) - if err != nil { - return 0, err - } - - err = w.Close() - if err != nil { - return 0, err - } - - return size, nil -} - -// resolveID computes the layerID information based on the given id. -func (d *Driver) resolveID(id string) (string, error) { - content, err := ioutil.ReadFile(filepath.Join(d.dir(id), "layerID")) - if os.IsNotExist(err) { - return id, nil - } else if err != nil { - return "", err - } - return string(content), nil -} - -// setID stores the layerId in disk. -func (d *Driver) setID(id, altID string) error { - return ioutil.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600) -} - -// getLayerChain returns the layer chain information. -func (d *Driver) getLayerChain(id string) ([]string, error) { - jPath := filepath.Join(d.dir(id), "layerchain.json") - content, err := ioutil.ReadFile(jPath) - if os.IsNotExist(err) { - return nil, nil - } else if err != nil { - return nil, fmt.Errorf("Unable to read layerchain file - %s", err) - } - - var layerChain []string - err = json.Unmarshal(content, &layerChain) - if err != nil { - return nil, fmt.Errorf("Failed to unmarshall layerchain json - %s", err) - } - - return layerChain, nil -} - -// setLayerChain stores the layer chain information in disk. -func (d *Driver) setLayerChain(id string, chain []string) error { - content, err := json.Marshal(&chain) - if err != nil { - return fmt.Errorf("Failed to marshall layerchain json - %s", err) - } - - jPath := filepath.Join(d.dir(id), "layerchain.json") - err = ioutil.WriteFile(jPath, content, 0600) - if err != nil { - return fmt.Errorf("Unable to write layerchain file - %s", err) - } - - return nil -} - -type fileGetCloserWithBackupPrivileges struct { - path string -} - -func (fg *fileGetCloserWithBackupPrivileges) Get(filename string) (io.ReadCloser, error) { - if backupPath, ok := mutatedFiles[filename]; ok { - return os.Open(filepath.Join(fg.path, backupPath)) - } - - var f *os.File - // Open the file while holding the Windows backup privilege. This ensures that the - // file can be opened even if the caller does not actually have access to it according - // to the security descriptor. Also use sequential file access to avoid depleting the - // standby list - Microsoft VSO Bug Tracker #9900466 - err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { - path := longpath.AddPrefix(filepath.Join(fg.path, filename)) - p, err := windows.UTF16FromString(path) - if err != nil { - return err - } - const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN - h, err := windows.CreateFile(&p[0], windows.GENERIC_READ, windows.FILE_SHARE_READ, nil, windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS|fileFlagSequentialScan, 0) - if err != nil { - return &os.PathError{Op: "open", Path: path, Err: err} - } - f = os.NewFile(uintptr(h), path) - return nil - }) - return f, err -} - -func (fg *fileGetCloserWithBackupPrivileges) Close() error { - return nil -} - -// DiffGetter returns a FileGetCloser that can read files from the directory that -// contains files for the layer differences. Used for direct access for tar-split. -func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { - panicIfUsedByLcow() - id, err := d.resolveID(id) - if err != nil { - return nil, err - } - - return &fileGetCloserWithBackupPrivileges{d.dir(id)}, nil -} - -// AdditionalImageStores returns additional image stores supported by the driver -func (d *Driver) AdditionalImageStores() []string { - return nil -} - -type storageOptions struct { - size uint64 -} - -func parseStorageOpt(storageOpt map[string]string) (*storageOptions, error) { - options := storageOptions{} - - // Read size to change the block device size per container. - for key, val := range storageOpt { - key := strings.ToLower(key) - switch key { - case "size": - size, err := units.RAMInBytes(val) - if err != nil { - return nil, err - } - options.size = uint64(size) - default: - return nil, fmt.Errorf("Unknown storage option: %s", key) - } - } - return &options, nil -} diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs.go b/vendor/github.com/containers/storage/drivers/zfs/zfs.go deleted file mode 100644 index 8c8e7d6718..0000000000 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs.go +++ /dev/null @@ -1,426 +0,0 @@ -// +build linux freebsd solaris - -package zfs - -import ( - "fmt" - "os" - "os/exec" - "path" - "strconv" - "strings" - "sync" - "time" - - "github.com/containers/storage/drivers" - "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/mount" - "github.com/containers/storage/pkg/parsers" - zfs "github.com/mistifyio/go-zfs" - "github.com/opencontainers/selinux/go-selinux/label" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -type zfsOptions struct { - fsName string - mountPath string -} - -func init() { - graphdriver.Register("zfs", Init) -} - -// Logger returns a zfs logger implementation. -type Logger struct{} - -// Log wraps log message from ZFS driver with a prefix '[zfs]'. -func (*Logger) Log(cmd []string) { - logrus.Debugf("[zfs] %s", strings.Join(cmd, " ")) -} - -// Init returns a new ZFS driver. -// It takes base mount path and an array of options which are represented as key value pairs. -// Each option is in the for key=value. 'zfs.fsname' is expected to be a valid key in the options. -func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { - var err error - - if _, err := exec.LookPath("zfs"); err != nil { - logrus.Debugf("[zfs] zfs command is not available: %v", err) - return nil, errors.Wrap(graphdriver.ErrPrerequisites, "the 'zfs' command is not available") - } - - file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 600) - if err != nil { - logrus.Debugf("[zfs] cannot open /dev/zfs: %v", err) - return nil, errors.Wrapf(graphdriver.ErrPrerequisites, "could not open /dev/zfs: %v", err) - } - defer file.Close() - - options, err := parseOptions(opt) - if err != nil { - return nil, err - } - options.mountPath = base - - rootdir := path.Dir(base) - - if options.fsName == "" { - err = checkRootdirFs(rootdir) - if err != nil { - return nil, err - } - } - - if options.fsName == "" { - options.fsName, err = lookupZfsDataset(rootdir) - if err != nil { - return nil, err - } - } - - zfs.SetLogger(new(Logger)) - - filesystems, err := zfs.Filesystems(options.fsName) - if err != nil { - return nil, fmt.Errorf("Cannot find root filesystem %s: %v", options.fsName, err) - } - - filesystemsCache := make(map[string]bool, len(filesystems)) - var rootDataset *zfs.Dataset - for _, fs := range filesystems { - if fs.Name == options.fsName { - rootDataset = fs - } - filesystemsCache[fs.Name] = true - } - - if rootDataset == nil { - return nil, fmt.Errorf("BUG: zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName) - } - - rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) - if err != nil { - return nil, fmt.Errorf("Failed to get root uid/guid: %v", err) - } - if err := idtools.MkdirAllAs(base, 0700, rootUID, rootGID); err != nil { - return nil, fmt.Errorf("Failed to create '%s': %v", base, err) - } - - if err := mount.MakePrivate(base); err != nil { - return nil, err - } - d := &Driver{ - dataset: rootDataset, - options: options, - filesystemsCache: filesystemsCache, - uidMaps: uidMaps, - gidMaps: gidMaps, - ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), - } - return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil -} - -func parseOptions(opt []string) (zfsOptions, error) { - var options zfsOptions - options.fsName = "" - for _, option := range opt { - key, val, err := parsers.ParseKeyValueOpt(option) - if err != nil { - return options, err - } - key = strings.ToLower(key) - switch key { - case "zfs.fsname": - options.fsName = val - default: - return options, fmt.Errorf("Unknown option %s", key) - } - } - return options, nil -} - -func lookupZfsDataset(rootdir string) (string, error) { - var stat unix.Stat_t - if err := unix.Stat(rootdir, &stat); err != nil { - return "", fmt.Errorf("Failed to access '%s': %s", rootdir, err) - } - wantedDev := stat.Dev - - mounts, err := mount.GetMounts() - if err != nil { - return "", err - } - for _, m := range mounts { - if err := unix.Stat(m.Mountpoint, &stat); err != nil { - logrus.Debugf("[zfs] failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err) - continue // may fail on fuse file systems - } - - if stat.Dev == wantedDev && m.Fstype == "zfs" { - return m.Source, nil - } - } - - return "", fmt.Errorf("Failed to find zfs dataset mounted on '%s' in /proc/mounts", rootdir) -} - -// Driver holds information about the driver, such as zfs dataset, options and cache. -type Driver struct { - dataset *zfs.Dataset - options zfsOptions - sync.Mutex // protects filesystem cache against concurrent access - filesystemsCache map[string]bool - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap - ctr *graphdriver.RefCounter -} - -func (d *Driver) String() string { - return "zfs" -} - -// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. -func (d *Driver) Cleanup() error { - return nil -} - -// Status returns information about the ZFS filesystem. It returns a two dimensional array of information -// such as pool name, dataset name, disk usage, parent quota and compression used. -// Currently it return 'Zpool', 'Zpool Health', 'Parent Dataset', 'Space Used By Parent', -// 'Space Available', 'Parent Quota' and 'Compression'. -func (d *Driver) Status() [][2]string { - parts := strings.Split(d.dataset.Name, "/") - pool, err := zfs.GetZpool(parts[0]) - - var poolName, poolHealth string - if err == nil { - poolName = pool.Name - poolHealth = pool.Health - } else { - poolName = fmt.Sprintf("error while getting pool information %v", err) - poolHealth = "not available" - } - - quota := "no" - if d.dataset.Quota != 0 { - quota = strconv.FormatUint(d.dataset.Quota, 10) - } - - return [][2]string{ - {"Zpool", poolName}, - {"Zpool Health", poolHealth}, - {"Parent Dataset", d.dataset.Name}, - {"Space Used By Parent", strconv.FormatUint(d.dataset.Used, 10)}, - {"Space Available", strconv.FormatUint(d.dataset.Avail, 10)}, - {"Parent Quota", quota}, - {"Compression", d.dataset.Compression}, - } -} - -// Metadata returns image/container metadata related to graph driver -func (d *Driver) Metadata(id string) (map[string]string, error) { - return map[string]string{ - "Mountpoint": d.mountPath(id), - "Dataset": d.zfsPath(id), - }, nil -} - -func (d *Driver) cloneFilesystem(name, parentName string) error { - snapshotName := fmt.Sprintf("%d", time.Now().Nanosecond()) - parentDataset := zfs.Dataset{Name: parentName} - snapshot, err := parentDataset.Snapshot(snapshotName /*recursive */, false) - if err != nil { - return err - } - - _, err = snapshot.Clone(name, map[string]string{"mountpoint": "legacy"}) - if err == nil { - d.Lock() - d.filesystemsCache[name] = true - d.Unlock() - } - - if err != nil { - snapshot.Destroy(zfs.DestroyDeferDeletion) - return err - } - return snapshot.Destroy(zfs.DestroyDeferDeletion) -} - -func (d *Driver) zfsPath(id string) string { - return d.options.fsName + "/" + id -} - -func (d *Driver) mountPath(id string) string { - return path.Join(d.options.mountPath, "graph", getMountpoint(id)) -} - -// CreateReadWrite creates a layer that is writable for use as a container -// file system. -func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error { - return d.Create(id, parent, opts) -} - -// Create prepares the dataset and filesystem for the ZFS driver for the given id under the parent. -func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error { - var storageOpt map[string]string - if opts != nil { - storageOpt = opts.StorageOpt - } - - err := d.create(id, parent, storageOpt) - if err == nil { - return nil - } - if zfsError, ok := err.(*zfs.Error); ok { - if !strings.HasSuffix(zfsError.Stderr, "dataset already exists\n") { - return err - } - // aborted build -> cleanup - } else { - return err - } - - dataset := zfs.Dataset{Name: d.zfsPath(id)} - if err := dataset.Destroy(zfs.DestroyRecursiveClones); err != nil { - return err - } - - // retry - return d.create(id, parent, storageOpt) -} - -func (d *Driver) create(id, parent string, storageOpt map[string]string) error { - name := d.zfsPath(id) - quota, err := parseStorageOpt(storageOpt) - if err != nil { - return err - } - if parent == "" { - mountoptions := map[string]string{"mountpoint": "legacy"} - fs, err := zfs.CreateFilesystem(name, mountoptions) - if err == nil { - err = setQuota(name, quota) - if err == nil { - d.Lock() - d.filesystemsCache[fs.Name] = true - d.Unlock() - } - } - return err - } - err = d.cloneFilesystem(name, d.zfsPath(parent)) - if err == nil { - err = setQuota(name, quota) - } - return err -} - -func parseStorageOpt(storageOpt map[string]string) (string, error) { - // Read size to change the disk quota per container - for k, v := range storageOpt { - key := strings.ToLower(k) - switch key { - case "size": - return v, nil - default: - return "0", fmt.Errorf("Unknown option %s", key) - } - } - return "0", nil -} - -func setQuota(name string, quota string) error { - if quota == "0" { - return nil - } - fs, err := zfs.GetDataset(name) - if err != nil { - return err - } - return fs.SetProperty("quota", quota) -} - -// Remove deletes the dataset, filesystem and the cache for the given id. -func (d *Driver) Remove(id string) error { - name := d.zfsPath(id) - dataset := zfs.Dataset{Name: name} - err := dataset.Destroy(zfs.DestroyRecursive) - if err == nil { - d.Lock() - delete(d.filesystemsCache, name) - d.Unlock() - } - return err -} - -// Get returns the mountpoint for the given id after creating the target directories if necessary. -func (d *Driver) Get(id, mountLabel string) (string, error) { - mountpoint := d.mountPath(id) - if count := d.ctr.Increment(mountpoint); count > 1 { - return mountpoint, nil - } - - filesystem := d.zfsPath(id) - options := label.FormatMountLabel("", mountLabel) - logrus.Debugf(`[zfs] mount("%s", "%s", "%s")`, filesystem, mountpoint, options) - - rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) - if err != nil { - d.ctr.Decrement(mountpoint) - return "", err - } - // Create the target directories if they don't exist - if err := idtools.MkdirAllAs(mountpoint, 0755, rootUID, rootGID); err != nil { - d.ctr.Decrement(mountpoint) - return "", err - } - - if err := mount.Mount(filesystem, mountpoint, "zfs", options); err != nil { - d.ctr.Decrement(mountpoint) - return "", fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err) - } - - // this could be our first mount after creation of the filesystem, and the root dir may still have root - // permissions instead of the remapped root uid:gid (if user namespaces are enabled): - if err := os.Chown(mountpoint, rootUID, rootGID); err != nil { - mount.Unmount(mountpoint) - d.ctr.Decrement(mountpoint) - return "", fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err) - } - - return mountpoint, nil -} - -// Put removes the existing mountpoint for the given id if it exists. -func (d *Driver) Put(id string) error { - mountpoint := d.mountPath(id) - if count := d.ctr.Decrement(mountpoint); count > 0 { - return nil - } - mounted, err := graphdriver.Mounted(graphdriver.FsMagicZfs, mountpoint) - if err != nil || !mounted { - return err - } - - logrus.Debugf(`[zfs] unmount("%s")`, mountpoint) - - if err := mount.Unmount(mountpoint); err != nil { - return fmt.Errorf("error unmounting to %s: %v", mountpoint, err) - } - return nil -} - -// Exists checks to see if the cache entry exists for the given id. -func (d *Driver) Exists(id string) bool { - d.Lock() - defer d.Unlock() - return d.filesystemsCache[d.zfsPath(id)] -} - -// AdditionalImageStores returns additional image stores supported by the driver -func (d *Driver) AdditionalImageStores() []string { - return nil -} diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go deleted file mode 100644 index 69c0448d34..0000000000 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go +++ /dev/null @@ -1,39 +0,0 @@ -package zfs - -import ( - "fmt" - "strings" - - "github.com/containers/storage/drivers" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -func checkRootdirFs(rootdir string) error { - var buf unix.Statfs_t - if err := unix.Statfs(rootdir, &buf); err != nil { - return fmt.Errorf("Failed to access '%s': %s", rootdir, err) - } - - // on FreeBSD buf.Fstypename contains ['z', 'f', 's', 0 ... ] - if (buf.Fstypename[0] != 122) || (buf.Fstypename[1] != 102) || (buf.Fstypename[2] != 115) || (buf.Fstypename[3] != 0) { - logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) - return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootdir) - } - - return nil -} - -func getMountpoint(id string) string { - maxlen := 12 - - // we need to preserve filesystem suffix - suffix := strings.SplitN(id, "-", 2) - - if len(suffix) > 1 { - return id[:maxlen] + "-" + suffix[1] - } - - return id[:maxlen] -} diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go deleted file mode 100644 index da298047d5..0000000000 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go +++ /dev/null @@ -1,28 +0,0 @@ -package zfs - -import ( - "fmt" - - "github.com/containers/storage/drivers" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -func checkRootdirFs(rootdir string) error { - var buf unix.Statfs_t - if err := unix.Statfs(rootdir, &buf); err != nil { - return fmt.Errorf("Failed to access '%s': %s", rootdir, err) - } - - if graphdriver.FsMagic(buf.Type) != graphdriver.FsMagicZfs { - logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) - return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootdir) - } - - return nil -} - -func getMountpoint(id string) string { - return id -} diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_solaris.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_solaris.go deleted file mode 100644 index 2383bf3bf3..0000000000 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs_solaris.go +++ /dev/null @@ -1,59 +0,0 @@ -// +build solaris,cgo - -package zfs - -/* -#include -#include - -static inline struct statvfs *getstatfs(char *s) { - struct statvfs *buf; - int err; - buf = (struct statvfs *)malloc(sizeof(struct statvfs)); - err = statvfs(s, buf); - return buf; -} -*/ -import "C" -import ( - "path/filepath" - "strings" - "unsafe" - - "github.com/containers/storage/drivers" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -func checkRootdirFs(rootdir string) error { - - cs := C.CString(filepath.Dir(rootdir)) - defer C.free(unsafe.Pointer(cs)) - buf := C.getstatfs(cs) - defer C.free(unsafe.Pointer(buf)) - - // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ] - if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) || - (buf.f_basetype[3] != 0) { - logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) - return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootdir) - } - - return nil -} - -/* rootfs is introduced to comply with the OCI spec -which states that root filesystem must be mounted at /rootfs/ instead of / -*/ -func getMountpoint(id string) string { - maxlen := 12 - - // we need to preserve filesystem suffix - suffix := strings.SplitN(id, "-", 2) - - if len(suffix) > 1 { - return filepath.Join(id[:maxlen]+"-"+suffix[1], "rootfs", "root") - } - - return filepath.Join(id[:maxlen], "rootfs", "root") -} diff --git a/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go b/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go deleted file mode 100644 index ce8daadaf6..0000000000 --- a/vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !linux,!freebsd,!solaris - -package zfs - -func checkRootdirFs(rootdir string) error { - return nil -} - -func getMountpoint(id string) string { - return id -} diff --git a/vendor/github.com/containers/storage/errors.go b/vendor/github.com/containers/storage/errors.go deleted file mode 100644 index bed6f8cdc7..0000000000 --- a/vendor/github.com/containers/storage/errors.go +++ /dev/null @@ -1,56 +0,0 @@ -package storage - -import ( - "errors" -) - -var ( - // ErrContainerUnknown indicates that there was no container with the specified name or ID. - ErrContainerUnknown = errors.New("container not known") - // ErrImageUnknown indicates that there was no image with the specified name or ID. - ErrImageUnknown = errors.New("image not known") - // ErrParentUnknown indicates that we didn't record the ID of the parent of the specified layer. - ErrParentUnknown = errors.New("parent of layer not known") - // ErrLayerUnknown indicates that there was no layer with the specified name or ID. - ErrLayerUnknown = errors.New("layer not known") - // ErrLoadError indicates that there was an initialization error. - ErrLoadError = errors.New("error loading storage metadata") - // ErrDuplicateID indicates that an ID which is to be assigned to a new item is already being used. - ErrDuplicateID = errors.New("that ID is already in use") - // ErrDuplicateName indicates that a name which is to be assigned to a new item is already being used. - ErrDuplicateName = errors.New("that name is already in use") - // ErrParentIsContainer is returned when a caller attempts to create a layer as a child of a container's layer. - ErrParentIsContainer = errors.New("would-be parent layer is a container") - // ErrNotAContainer is returned when the caller attempts to delete a container that isn't a container. - ErrNotAContainer = errors.New("identifier is not a container") - // ErrNotAnImage is returned when the caller attempts to delete an image that isn't an image. - ErrNotAnImage = errors.New("identifier is not an image") - // ErrNotALayer is returned when the caller attempts to delete a layer that isn't a layer. - ErrNotALayer = errors.New("identifier is not a layer") - // ErrNotAnID is returned when the caller attempts to read or write metadata from an item that doesn't exist. - ErrNotAnID = errors.New("identifier is not a layer, image, or container") - // ErrLayerHasChildren is returned when the caller attempts to delete a layer that has children. - ErrLayerHasChildren = errors.New("layer has children") - // ErrLayerUsedByImage is returned when the caller attempts to delete a layer that is an image's top layer. - ErrLayerUsedByImage = errors.New("layer is in use by an image") - // ErrLayerUsedByContainer is returned when the caller attempts to delete a layer that is a container's layer. - ErrLayerUsedByContainer = errors.New("layer is in use by a container") - // ErrImageUsedByContainer is returned when the caller attempts to delete an image that is a container's image. - ErrImageUsedByContainer = errors.New("image is in use by a container") - // ErrIncompleteOptions is returned when the caller attempts to initialize a Store without providing required information. - ErrIncompleteOptions = errors.New("missing necessary StoreOptions") - // ErrSizeUnknown is returned when the caller asks for the size of a big data item, but the Store couldn't determine the answer. - ErrSizeUnknown = errors.New("size is not known") - // ErrStoreIsReadOnly is returned when the caller makes a call to a read-only store that would require modifying its contents. - ErrStoreIsReadOnly = errors.New("called a write method on a read-only store") - // ErrLockReadOnly indicates that the caller only took a read-only lock, and is not allowed to write. - ErrLockReadOnly = errors.New("lock is not a read-write lock") - // ErrDuplicateImageNames indicates that the read-only store uses the same name for multiple images. - ErrDuplicateImageNames = errors.New("read-only image store assigns the same name to multiple images") - // ErrDuplicateLayerNames indicates that the read-only store uses the same name for multiple layers. - ErrDuplicateLayerNames = errors.New("read-only layer store assigns the same name to multiple layers") - // ErrInvalidBigDataName indicates that the name for a big data item is not acceptable; it may be empty. - ErrInvalidBigDataName = errors.New("not a valid name for a big data item") - // ErrDigestUnknown indicates that we were unable to compute the digest of a specified item. - ErrDigestUnknown = errors.New("could not compute digest of item") -) diff --git a/vendor/github.com/containers/storage/hack/generate-authors.sh b/vendor/github.com/containers/storage/hack/generate-authors.sh deleted file mode 100755 index e78a97f962..0000000000 --- a/vendor/github.com/containers/storage/hack/generate-authors.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash -set -e - -cd "$(dirname "$(readlink -f "$BASH_SOURCE")")/.." - -# see also ".mailmap" for how email addresses and names are deduplicated - -{ - cat <<-'EOH' - # This file lists all individuals having contributed content to the repository. - # For how it is generated, see `hack/generate-authors.sh`. - EOH - echo - git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf -} > AUTHORS diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go deleted file mode 100644 index 962e1bb760..0000000000 --- a/vendor/github.com/containers/storage/images.go +++ /dev/null @@ -1,673 +0,0 @@ -package storage - -import ( - "encoding/json" - "io/ioutil" - "os" - "path/filepath" - "time" - - "github.com/containers/storage/pkg/ioutils" - "github.com/containers/storage/pkg/stringid" - "github.com/containers/storage/pkg/truncindex" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -const ( - // ImageDigestBigDataKey is the name of the big data item whose - // contents we consider useful for computing a "digest" of the - // image, by which we can locate the image later. - ImageDigestBigDataKey = "manifest" -) - -// An Image is a reference to a layer and an associated metadata string. -type Image struct { - // ID is either one which was specified at create-time, or a random - // value which was generated by the library. - ID string `json:"id"` - - // Digest is a digest value that we can use to locate the image. - Digest digest.Digest `json:"digest,omitempty"` - - // Names is an optional set of user-defined convenience values. The - // image can be referred to by its ID or any of its names. Names are - // unique among images. - Names []string `json:"names,omitempty"` - - // TopLayer is the ID of the topmost layer of the image itself, if the - // image contains one or more layers. Multiple images can refer to the - // same top layer. - TopLayer string `json:"layer,omitempty"` - - // Metadata is data we keep for the convenience of the caller. It is not - // expected to be large, since it is kept in memory. - Metadata string `json:"metadata,omitempty"` - - // BigDataNames is a list of names of data items that we keep for the - // convenience of the caller. They can be large, and are only in - // memory when being read from or written to disk. - BigDataNames []string `json:"big-data-names,omitempty"` - - // BigDataSizes maps the names in BigDataNames to the sizes of the data - // that has been stored, if they're known. - BigDataSizes map[string]int64 `json:"big-data-sizes,omitempty"` - - // BigDataDigests maps the names in BigDataNames to the digests of the - // data that has been stored, if they're known. - BigDataDigests map[string]digest.Digest `json:"big-data-digests,omitempty"` - - // Created is the datestamp for when this image was created. Older - // versions of the library did not track this information, so callers - // will likely want to use the IsZero() method to verify that a value - // is set before using it. - Created time.Time `json:"created,omitempty"` - - Flags map[string]interface{} `json:"flags,omitempty"` -} - -// ROImageStore provides bookkeeping for information about Images. -type ROImageStore interface { - ROFileBasedStore - ROMetadataStore - ROBigDataStore - - // Exists checks if there is an image with the given ID or name. - Exists(id string) bool - - // Get retrieves information about an image given an ID or name. - Get(id string) (*Image, error) - - // Lookup attempts to translate a name to an ID. Most methods do this - // implicitly. - Lookup(name string) (string, error) - - // Images returns a slice enumerating the known images. - Images() ([]Image, error) - - // Images returns a slice enumerating the images which have a big data - // item with the name ImageDigestBigDataKey and the specified digest. - ByDigest(d digest.Digest) ([]*Image, error) -} - -// ImageStore provides bookkeeping for information about Images. -type ImageStore interface { - ROImageStore - RWFileBasedStore - RWMetadataStore - RWBigDataStore - FlaggableStore - - // Create creates an image that has a specified ID (or a random one) and - // optional names, using the specified layer as its topmost (hopefully - // read-only) layer. That layer can be referenced by multiple images. - Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (*Image, error) - - // SetNames replaces the list of names associated with an image with the - // supplied values. - SetNames(id string, names []string) error - - // Delete removes the record of the image. - Delete(id string) error - - // Wipe removes records of all images. - Wipe() error -} - -type imageStore struct { - lockfile Locker - dir string - images []*Image - idindex *truncindex.TruncIndex - byid map[string]*Image - byname map[string]*Image - bydigest map[digest.Digest][]*Image -} - -func (r *imageStore) Images() ([]Image, error) { - images := make([]Image, len(r.images)) - for i := range r.images { - images[i] = *(r.images[i]) - } - return images, nil -} - -func (r *imageStore) imagespath() string { - return filepath.Join(r.dir, "images.json") -} - -func (r *imageStore) datadir(id string) string { - return filepath.Join(r.dir, id) -} - -func (r *imageStore) datapath(id, key string) string { - return filepath.Join(r.datadir(id), makeBigDataBaseName(key)) -} - -func (r *imageStore) Load() error { - shouldSave := false - rpath := r.imagespath() - data, err := ioutil.ReadFile(rpath) - if err != nil && !os.IsNotExist(err) { - return err - } - images := []*Image{} - idlist := []string{} - ids := make(map[string]*Image) - names := make(map[string]*Image) - digests := make(map[digest.Digest][]*Image) - if err = json.Unmarshal(data, &images); len(data) == 0 || err == nil { - idlist = make([]string, 0, len(images)) - for n, image := range images { - ids[image.ID] = images[n] - idlist = append(idlist, image.ID) - for _, name := range image.Names { - if conflict, ok := names[name]; ok { - r.removeName(conflict, name) - shouldSave = true - } - names[name] = images[n] - } - // Implicit digest - if digest, ok := image.BigDataDigests[ImageDigestBigDataKey]; ok { - digests[digest] = append(digests[digest], images[n]) - } - // Explicit digest - if image.Digest == "" { - image.Digest = image.BigDataDigests[ImageDigestBigDataKey] - } else if image.Digest != image.BigDataDigests[ImageDigestBigDataKey] { - digests[image.Digest] = append(digests[image.Digest], images[n]) - } - } - } - if shouldSave && !r.IsReadWrite() { - return ErrDuplicateImageNames - } - r.images = images - r.idindex = truncindex.NewTruncIndex(idlist) - r.byid = ids - r.byname = names - r.bydigest = digests - if shouldSave { - return r.Save() - } - return nil -} - -func (r *imageStore) Save() error { - if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the image store at %q", r.imagespath()) - } - rpath := r.imagespath() - if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil { - return err - } - jdata, err := json.Marshal(&r.images) - if err != nil { - return err - } - defer r.Touch() - return ioutils.AtomicWriteFile(rpath, jdata, 0600) -} - -func newImageStore(dir string) (ImageStore, error) { - if err := os.MkdirAll(dir, 0700); err != nil { - return nil, err - } - lockfile, err := GetLockfile(filepath.Join(dir, "images.lock")) - if err != nil { - return nil, err - } - lockfile.Lock() - defer lockfile.Unlock() - istore := imageStore{ - lockfile: lockfile, - dir: dir, - images: []*Image{}, - byid: make(map[string]*Image), - byname: make(map[string]*Image), - bydigest: make(map[digest.Digest][]*Image), - } - if err := istore.Load(); err != nil { - return nil, err - } - return &istore, nil -} - -func newROImageStore(dir string) (ROImageStore, error) { - lockfile, err := GetROLockfile(filepath.Join(dir, "images.lock")) - if err != nil { - return nil, err - } - lockfile.Lock() - defer lockfile.Unlock() - istore := imageStore{ - lockfile: lockfile, - dir: dir, - images: []*Image{}, - byid: make(map[string]*Image), - byname: make(map[string]*Image), - bydigest: make(map[digest.Digest][]*Image), - } - if err := istore.Load(); err != nil { - return nil, err - } - return &istore, nil -} - -func (r *imageStore) lookup(id string) (*Image, bool) { - if image, ok := r.byid[id]; ok { - return image, ok - } else if image, ok := r.byname[id]; ok { - return image, ok - } else if longid, err := r.idindex.Get(id); err == nil { - image, ok := r.byid[longid] - return image, ok - } - return nil, false -} - -func (r *imageStore) ClearFlag(id string, flag string) error { - if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to clear flags on images at %q", r.imagespath()) - } - image, ok := r.lookup(id) - if !ok { - return ErrImageUnknown - } - delete(image.Flags, flag) - return r.Save() -} - -func (r *imageStore) SetFlag(id string, flag string, value interface{}) error { - if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to set flags on images at %q", r.imagespath()) - } - image, ok := r.lookup(id) - if !ok { - return ErrImageUnknown - } - if image.Flags == nil { - image.Flags = make(map[string]interface{}) - } - image.Flags[flag] = value - return r.Save() -} - -func (r *imageStore) Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (image *Image, err error) { - if !r.IsReadWrite() { - return nil, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new images at %q", r.imagespath()) - } - if id == "" { - id = stringid.GenerateRandomID() - _, idInUse := r.byid[id] - for idInUse { - id = stringid.GenerateRandomID() - _, idInUse = r.byid[id] - } - } - if _, idInUse := r.byid[id]; idInUse { - return nil, ErrDuplicateID - } - names = dedupeNames(names) - for _, name := range names { - if _, nameInUse := r.byname[name]; nameInUse { - return nil, ErrDuplicateName - } - } - if created.IsZero() { - created = time.Now().UTC() - } - if err == nil { - image = &Image{ - ID: id, - Digest: searchableDigest, - Names: names, - TopLayer: layer, - Metadata: metadata, - BigDataNames: []string{}, - BigDataSizes: make(map[string]int64), - BigDataDigests: make(map[string]digest.Digest), - Created: created, - Flags: make(map[string]interface{}), - } - r.images = append(r.images, image) - r.idindex.Add(id) - r.byid[id] = image - if searchableDigest != "" { - list := r.bydigest[searchableDigest] - r.bydigest[searchableDigest] = append(list, image) - } - for _, name := range names { - r.byname[name] = image - } - err = r.Save() - } - return image, err -} - -func (r *imageStore) Metadata(id string) (string, error) { - if image, ok := r.lookup(id); ok { - return image.Metadata, nil - } - return "", ErrImageUnknown -} - -func (r *imageStore) SetMetadata(id, metadata string) error { - if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify image metadata at %q", r.imagespath()) - } - if image, ok := r.lookup(id); ok { - image.Metadata = metadata - return r.Save() - } - return ErrImageUnknown -} - -func (r *imageStore) removeName(image *Image, name string) { - image.Names = stringSliceWithoutValue(image.Names, name) -} - -func (r *imageStore) SetNames(id string, names []string) error { - if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change image name assignments at %q", r.imagespath()) - } - names = dedupeNames(names) - if image, ok := r.lookup(id); ok { - for _, name := range image.Names { - delete(r.byname, name) - } - for _, name := range names { - if otherImage, ok := r.byname[name]; ok { - r.removeName(otherImage, name) - } - r.byname[name] = image - } - image.Names = names - return r.Save() - } - return ErrImageUnknown -} - -func (r *imageStore) Delete(id string) error { - if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete images at %q", r.imagespath()) - } - image, ok := r.lookup(id) - if !ok { - return ErrImageUnknown - } - id = image.ID - toDeleteIndex := -1 - for i, candidate := range r.images { - if candidate.ID == id { - toDeleteIndex = i - } - } - delete(r.byid, id) - r.idindex.Delete(id) - for _, name := range image.Names { - delete(r.byname, name) - } - if toDeleteIndex != -1 { - // delete the image at toDeleteIndex - if toDeleteIndex == len(r.images)-1 { - r.images = r.images[:len(r.images)-1] - } else { - r.images = append(r.images[:toDeleteIndex], r.images[toDeleteIndex+1:]...) - } - } - if digest, ok := image.BigDataDigests[ImageDigestBigDataKey]; ok { - // remove the image from the digest-based index - if list, ok := r.bydigest[digest]; ok { - prunedList := imageSliceWithoutValue(list, image) - if len(prunedList) == 0 { - delete(r.bydigest, digest) - } else { - r.bydigest[digest] = prunedList - } - } - } - if image.Digest != "" { - // remove the image's hard-coded digest from the digest-based index - if list, ok := r.bydigest[image.Digest]; ok { - prunedList := imageSliceWithoutValue(list, image) - if len(prunedList) == 0 { - delete(r.bydigest, image.Digest) - } else { - r.bydigest[image.Digest] = prunedList - } - } - } - if err := r.Save(); err != nil { - return err - } - if err := os.RemoveAll(r.datadir(id)); err != nil { - return err - } - return nil -} - -func (r *imageStore) Get(id string) (*Image, error) { - if image, ok := r.lookup(id); ok { - return image, nil - } - return nil, ErrImageUnknown -} - -func (r *imageStore) Lookup(name string) (id string, err error) { - if image, ok := r.lookup(name); ok { - return image.ID, nil - } - return "", ErrImageUnknown -} - -func (r *imageStore) Exists(id string) bool { - _, ok := r.lookup(id) - return ok -} - -func (r *imageStore) ByDigest(d digest.Digest) ([]*Image, error) { - if images, ok := r.bydigest[d]; ok { - return images, nil - } - return nil, ErrImageUnknown -} - -func (r *imageStore) BigData(id, key string) ([]byte, error) { - if key == "" { - return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve image big data value for empty name") - } - image, ok := r.lookup(id) - if !ok { - return nil, ErrImageUnknown - } - return ioutil.ReadFile(r.datapath(image.ID, key)) -} - -func (r *imageStore) BigDataSize(id, key string) (int64, error) { - if key == "" { - return -1, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve size of image big data with empty name") - } - image, ok := r.lookup(id) - if !ok { - return -1, ErrImageUnknown - } - if image.BigDataSizes == nil { - image.BigDataSizes = make(map[string]int64) - } - if size, ok := image.BigDataSizes[key]; ok { - return size, nil - } - if data, err := r.BigData(id, key); err == nil && data != nil { - if r.SetBigData(id, key, data) == nil { - image, ok := r.lookup(id) - if !ok { - return -1, ErrImageUnknown - } - if size, ok := image.BigDataSizes[key]; ok { - return size, nil - } - } - } - return -1, ErrSizeUnknown -} - -func (r *imageStore) BigDataDigest(id, key string) (digest.Digest, error) { - if key == "" { - return "", errors.Wrapf(ErrInvalidBigDataName, "can't retrieve digest of image big data value with empty name") - } - image, ok := r.lookup(id) - if !ok { - return "", ErrImageUnknown - } - if image.BigDataDigests == nil { - image.BigDataDigests = make(map[string]digest.Digest) - } - if d, ok := image.BigDataDigests[key]; ok { - return d, nil - } - if data, err := r.BigData(id, key); err == nil && data != nil { - if r.SetBigData(id, key, data) == nil { - image, ok := r.lookup(id) - if !ok { - return "", ErrImageUnknown - } - if d, ok := image.BigDataDigests[key]; ok { - return d, nil - } - } - } - return "", ErrDigestUnknown -} - -func (r *imageStore) BigDataNames(id string) ([]string, error) { - image, ok := r.lookup(id) - if !ok { - return nil, ErrImageUnknown - } - return image.BigDataNames, nil -} - -func imageSliceWithoutValue(slice []*Image, value *Image) []*Image { - modified := make([]*Image, 0, len(slice)) - for _, v := range slice { - if v == value { - continue - } - modified = append(modified, v) - } - return modified -} - -func (r *imageStore) SetBigData(id, key string, data []byte) error { - if key == "" { - return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for image big data item") - } - if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to save data items associated with images at %q", r.imagespath()) - } - image, ok := r.lookup(id) - if !ok { - return ErrImageUnknown - } - if err := os.MkdirAll(r.datadir(image.ID), 0700); err != nil { - return err - } - err := ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0600) - if err == nil { - save := false - if image.BigDataSizes == nil { - image.BigDataSizes = make(map[string]int64) - } - oldSize, sizeOk := image.BigDataSizes[key] - image.BigDataSizes[key] = int64(len(data)) - if image.BigDataDigests == nil { - image.BigDataDigests = make(map[string]digest.Digest) - } - oldDigest, digestOk := image.BigDataDigests[key] - newDigest := digest.Canonical.FromBytes(data) - image.BigDataDigests[key] = newDigest - if !sizeOk || oldSize != image.BigDataSizes[key] || !digestOk || oldDigest != newDigest { - save = true - } - addName := true - for _, name := range image.BigDataNames { - if name == key { - addName = false - break - } - } - if addName { - image.BigDataNames = append(image.BigDataNames, key) - save = true - } - if key == ImageDigestBigDataKey { - if oldDigest != "" && oldDigest != newDigest && oldDigest != image.Digest { - // remove the image from the list of images in the digest-based - // index which corresponds to the old digest for this item, unless - // it's also the hard-coded digest - if list, ok := r.bydigest[oldDigest]; ok { - prunedList := imageSliceWithoutValue(list, image) - if len(prunedList) == 0 { - delete(r.bydigest, oldDigest) - } else { - r.bydigest[oldDigest] = prunedList - } - } - } - // add the image to the list of images in the digest-based index which - // corresponds to the new digest for this item, unless it's already there - list := r.bydigest[newDigest] - if len(list) == len(imageSliceWithoutValue(list, image)) { - // the list isn't shortened by trying to prune this image from it, - // so it's not in there yet - r.bydigest[newDigest] = append(list, image) - } - } - if save { - err = r.Save() - } - } - return err -} - -func (r *imageStore) Wipe() error { - if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete images at %q", r.imagespath()) - } - ids := make([]string, 0, len(r.byid)) - for id := range r.byid { - ids = append(ids, id) - } - for _, id := range ids { - if err := r.Delete(id); err != nil { - return err - } - } - return nil -} - -func (r *imageStore) Lock() { - r.lockfile.Lock() -} - -func (r *imageStore) Unlock() { - r.lockfile.Unlock() -} - -func (r *imageStore) Touch() error { - return r.lockfile.Touch() -} - -func (r *imageStore) Modified() (bool, error) { - return r.lockfile.Modified() -} - -func (r *imageStore) IsReadWrite() bool { - return r.lockfile.IsReadWrite() -} - -func (r *imageStore) TouchedSince(when time.Time) bool { - return r.lockfile.TouchedSince(when) -} diff --git a/vendor/github.com/containers/storage/images_ffjson.go b/vendor/github.com/containers/storage/images_ffjson.go deleted file mode 100644 index f6a8b0650e..0000000000 --- a/vendor/github.com/containers/storage/images_ffjson.go +++ /dev/null @@ -1,1202 +0,0 @@ -// Code generated by ffjson . DO NOT EDIT. -// source: images.go - -package storage - -import ( - "bytes" - "encoding/json" - "fmt" - "github.com/opencontainers/go-digest" - fflib "github.com/pquerna/ffjson/fflib/v1" -) - -// MarshalJSON marshal bytes to json - template -func (j *Image) MarshalJSON() ([]byte, error) { - var buf fflib.Buffer - if j == nil { - buf.WriteString("null") - return buf.Bytes(), nil - } - err := j.MarshalJSONBuf(&buf) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// MarshalJSONBuf marshal buff to json - template -func (j *Image) MarshalJSONBuf(buf fflib.EncodingBuffer) error { - if j == nil { - buf.WriteString("null") - return nil - } - var err error - var obj []byte - _ = obj - _ = err - buf.WriteString(`{ "id":`) - fflib.WriteJsonString(buf, string(j.ID)) - buf.WriteByte(',') - if len(j.Digest) != 0 { - buf.WriteString(`"digest":`) - fflib.WriteJsonString(buf, string(j.Digest)) - buf.WriteByte(',') - } - if len(j.Names) != 0 { - buf.WriteString(`"names":`) - if j.Names != nil { - buf.WriteString(`[`) - for i, v := range j.Names { - if i != 0 { - buf.WriteString(`,`) - } - fflib.WriteJsonString(buf, string(v)) - } - buf.WriteString(`]`) - } else { - buf.WriteString(`null`) - } - buf.WriteByte(',') - } - if len(j.TopLayer) != 0 { - buf.WriteString(`"layer":`) - fflib.WriteJsonString(buf, string(j.TopLayer)) - buf.WriteByte(',') - } - if len(j.Metadata) != 0 { - buf.WriteString(`"metadata":`) - fflib.WriteJsonString(buf, string(j.Metadata)) - buf.WriteByte(',') - } - if len(j.BigDataNames) != 0 { - buf.WriteString(`"big-data-names":`) - if j.BigDataNames != nil { - buf.WriteString(`[`) - for i, v := range j.BigDataNames { - if i != 0 { - buf.WriteString(`,`) - } - fflib.WriteJsonString(buf, string(v)) - } - buf.WriteString(`]`) - } else { - buf.WriteString(`null`) - } - buf.WriteByte(',') - } - if len(j.BigDataSizes) != 0 { - if j.BigDataSizes == nil { - buf.WriteString(`"big-data-sizes":null`) - } else { - buf.WriteString(`"big-data-sizes":{ `) - for key, value := range j.BigDataSizes { - fflib.WriteJsonString(buf, key) - buf.WriteString(`:`) - fflib.FormatBits2(buf, uint64(value), 10, value < 0) - buf.WriteByte(',') - } - buf.Rewind(1) - buf.WriteByte('}') - } - buf.WriteByte(',') - } - if len(j.BigDataDigests) != 0 { - if j.BigDataDigests == nil { - buf.WriteString(`"big-data-digests":null`) - } else { - buf.WriteString(`"big-data-digests":{ `) - for key, value := range j.BigDataDigests { - fflib.WriteJsonString(buf, key) - buf.WriteString(`:`) - fflib.WriteJsonString(buf, string(value)) - buf.WriteByte(',') - } - buf.Rewind(1) - buf.WriteByte('}') - } - buf.WriteByte(',') - } - if true { - buf.WriteString(`"created":`) - - { - - obj, err = j.Created.MarshalJSON() - if err != nil { - return err - } - buf.Write(obj) - - } - buf.WriteByte(',') - } - if len(j.Flags) != 0 { - buf.WriteString(`"flags":`) - /* Falling back. type=map[string]interface {} kind=map */ - err = buf.Encode(j.Flags) - if err != nil { - return err - } - buf.WriteByte(',') - } - buf.Rewind(1) - buf.WriteByte('}') - return nil -} - -const ( - ffjtImagebase = iota - ffjtImagenosuchkey - - ffjtImageID - - ffjtImageDigest - - ffjtImageNames - - ffjtImageTopLayer - - ffjtImageMetadata - - ffjtImageBigDataNames - - ffjtImageBigDataSizes - - ffjtImageBigDataDigests - - ffjtImageCreated - - ffjtImageFlags -) - -var ffjKeyImageID = []byte("id") - -var ffjKeyImageDigest = []byte("digest") - -var ffjKeyImageNames = []byte("names") - -var ffjKeyImageTopLayer = []byte("layer") - -var ffjKeyImageMetadata = []byte("metadata") - -var ffjKeyImageBigDataNames = []byte("big-data-names") - -var ffjKeyImageBigDataSizes = []byte("big-data-sizes") - -var ffjKeyImageBigDataDigests = []byte("big-data-digests") - -var ffjKeyImageCreated = []byte("created") - -var ffjKeyImageFlags = []byte("flags") - -// UnmarshalJSON umarshall json - template of ffjson -func (j *Image) UnmarshalJSON(input []byte) error { - fs := fflib.NewFFLexer(input) - return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) -} - -// UnmarshalJSONFFLexer fast json unmarshall - template ffjson -func (j *Image) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { - var err error - currentKey := ffjtImagebase - _ = currentKey - tok := fflib.FFTok_init - wantedTok := fflib.FFTok_init - -mainparse: - for { - tok = fs.Scan() - // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) - if tok == fflib.FFTok_error { - goto tokerror - } - - switch state { - - case fflib.FFParse_map_start: - if tok != fflib.FFTok_left_bracket { - wantedTok = fflib.FFTok_left_bracket - goto wrongtokenerror - } - state = fflib.FFParse_want_key - continue - - case fflib.FFParse_after_value: - if tok == fflib.FFTok_comma { - state = fflib.FFParse_want_key - } else if tok == fflib.FFTok_right_bracket { - goto done - } else { - wantedTok = fflib.FFTok_comma - goto wrongtokenerror - } - - case fflib.FFParse_want_key: - // json {} ended. goto exit. woo. - if tok == fflib.FFTok_right_bracket { - goto done - } - if tok != fflib.FFTok_string { - wantedTok = fflib.FFTok_string - goto wrongtokenerror - } - - kn := fs.Output.Bytes() - if len(kn) <= 0 { - // "" case. hrm. - currentKey = ffjtImagenosuchkey - state = fflib.FFParse_want_colon - goto mainparse - } else { - switch kn[0] { - - case 'b': - - if bytes.Equal(ffjKeyImageBigDataNames, kn) { - currentKey = ffjtImageBigDataNames - state = fflib.FFParse_want_colon - goto mainparse - - } else if bytes.Equal(ffjKeyImageBigDataSizes, kn) { - currentKey = ffjtImageBigDataSizes - state = fflib.FFParse_want_colon - goto mainparse - - } else if bytes.Equal(ffjKeyImageBigDataDigests, kn) { - currentKey = ffjtImageBigDataDigests - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'c': - - if bytes.Equal(ffjKeyImageCreated, kn) { - currentKey = ffjtImageCreated - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'd': - - if bytes.Equal(ffjKeyImageDigest, kn) { - currentKey = ffjtImageDigest - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'f': - - if bytes.Equal(ffjKeyImageFlags, kn) { - currentKey = ffjtImageFlags - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'i': - - if bytes.Equal(ffjKeyImageID, kn) { - currentKey = ffjtImageID - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'l': - - if bytes.Equal(ffjKeyImageTopLayer, kn) { - currentKey = ffjtImageTopLayer - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'm': - - if bytes.Equal(ffjKeyImageMetadata, kn) { - currentKey = ffjtImageMetadata - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'n': - - if bytes.Equal(ffjKeyImageNames, kn) { - currentKey = ffjtImageNames - state = fflib.FFParse_want_colon - goto mainparse - } - - } - - if fflib.EqualFoldRight(ffjKeyImageFlags, kn) { - currentKey = ffjtImageFlags - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.SimpleLetterEqualFold(ffjKeyImageCreated, kn) { - currentKey = ffjtImageCreated - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.EqualFoldRight(ffjKeyImageBigDataDigests, kn) { - currentKey = ffjtImageBigDataDigests - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.EqualFoldRight(ffjKeyImageBigDataSizes, kn) { - currentKey = ffjtImageBigDataSizes - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.EqualFoldRight(ffjKeyImageBigDataNames, kn) { - currentKey = ffjtImageBigDataNames - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.SimpleLetterEqualFold(ffjKeyImageMetadata, kn) { - currentKey = ffjtImageMetadata - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.SimpleLetterEqualFold(ffjKeyImageTopLayer, kn) { - currentKey = ffjtImageTopLayer - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.EqualFoldRight(ffjKeyImageNames, kn) { - currentKey = ffjtImageNames - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.EqualFoldRight(ffjKeyImageDigest, kn) { - currentKey = ffjtImageDigest - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.SimpleLetterEqualFold(ffjKeyImageID, kn) { - currentKey = ffjtImageID - state = fflib.FFParse_want_colon - goto mainparse - } - - currentKey = ffjtImagenosuchkey - state = fflib.FFParse_want_colon - goto mainparse - } - - case fflib.FFParse_want_colon: - if tok != fflib.FFTok_colon { - wantedTok = fflib.FFTok_colon - goto wrongtokenerror - } - state = fflib.FFParse_want_value - continue - case fflib.FFParse_want_value: - - if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { - switch currentKey { - - case ffjtImageID: - goto handle_ID - - case ffjtImageDigest: - goto handle_Digest - - case ffjtImageNames: - goto handle_Names - - case ffjtImageTopLayer: - goto handle_TopLayer - - case ffjtImageMetadata: - goto handle_Metadata - - case ffjtImageBigDataNames: - goto handle_BigDataNames - - case ffjtImageBigDataSizes: - goto handle_BigDataSizes - - case ffjtImageBigDataDigests: - goto handle_BigDataDigests - - case ffjtImageCreated: - goto handle_Created - - case ffjtImageFlags: - goto handle_Flags - - case ffjtImagenosuchkey: - err = fs.SkipField(tok) - if err != nil { - return fs.WrapErr(err) - } - state = fflib.FFParse_after_value - goto mainparse - } - } else { - goto wantedvalue - } - } - } - -handle_ID: - - /* handler: j.ID type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - j.ID = string(string(outBuf)) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_Digest: - - /* handler: j.Digest type=digest.Digest kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - j.Digest = digest.Digest(string(outBuf)) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_Names: - - /* handler: j.Names type=[]string kind=slice quoted=false*/ - - { - - { - if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) - } - } - - if tok == fflib.FFTok_null { - j.Names = nil - } else { - - j.Names = []string{} - - wantVal := true - - for { - - var tmpJNames string - - tok = fs.Scan() - if tok == fflib.FFTok_error { - goto tokerror - } - if tok == fflib.FFTok_right_brace { - break - } - - if tok == fflib.FFTok_comma { - if wantVal == true { - // TODO(pquerna): this isn't an ideal error message, this handles - // things like [,,,] as an array value. - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) - } - continue - } else { - wantVal = true - } - - /* handler: tmpJNames type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - tmpJNames = string(string(outBuf)) - - } - } - - j.Names = append(j.Names, tmpJNames) - - wantVal = false - } - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_TopLayer: - - /* handler: j.TopLayer type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - j.TopLayer = string(string(outBuf)) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_Metadata: - - /* handler: j.Metadata type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - j.Metadata = string(string(outBuf)) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_BigDataNames: - - /* handler: j.BigDataNames type=[]string kind=slice quoted=false*/ - - { - - { - if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) - } - } - - if tok == fflib.FFTok_null { - j.BigDataNames = nil - } else { - - j.BigDataNames = []string{} - - wantVal := true - - for { - - var tmpJBigDataNames string - - tok = fs.Scan() - if tok == fflib.FFTok_error { - goto tokerror - } - if tok == fflib.FFTok_right_brace { - break - } - - if tok == fflib.FFTok_comma { - if wantVal == true { - // TODO(pquerna): this isn't an ideal error message, this handles - // things like [,,,] as an array value. - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) - } - continue - } else { - wantVal = true - } - - /* handler: tmpJBigDataNames type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - tmpJBigDataNames = string(string(outBuf)) - - } - } - - j.BigDataNames = append(j.BigDataNames, tmpJBigDataNames) - - wantVal = false - } - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_BigDataSizes: - - /* handler: j.BigDataSizes type=map[string]int64 kind=map quoted=false*/ - - { - - { - if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) - } - } - - if tok == fflib.FFTok_null { - j.BigDataSizes = nil - } else { - - j.BigDataSizes = make(map[string]int64, 0) - - wantVal := true - - for { - - var k string - - var tmpJBigDataSizes int64 - - tok = fs.Scan() - if tok == fflib.FFTok_error { - goto tokerror - } - if tok == fflib.FFTok_right_bracket { - break - } - - if tok == fflib.FFTok_comma { - if wantVal == true { - // TODO(pquerna): this isn't an ideal error message, this handles - // things like [,,,] as an array value. - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) - } - continue - } else { - wantVal = true - } - - /* handler: k type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - k = string(string(outBuf)) - - } - } - - // Expect ':' after key - tok = fs.Scan() - if tok != fflib.FFTok_colon { - return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) - } - - tok = fs.Scan() - /* handler: tmpJBigDataSizes type=int64 kind=int64 quoted=false*/ - - { - if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) - } - } - - { - - if tok == fflib.FFTok_null { - - } else { - - tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) - - if err != nil { - return fs.WrapErr(err) - } - - tmpJBigDataSizes = int64(tval) - - } - } - - j.BigDataSizes[k] = tmpJBigDataSizes - - wantVal = false - } - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_BigDataDigests: - - /* handler: j.BigDataDigests type=map[string]digest.Digest kind=map quoted=false*/ - - { - - { - if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) - } - } - - if tok == fflib.FFTok_null { - j.BigDataDigests = nil - } else { - - j.BigDataDigests = make(map[string]digest.Digest, 0) - - wantVal := true - - for { - - var k string - - var tmpJBigDataDigests digest.Digest - - tok = fs.Scan() - if tok == fflib.FFTok_error { - goto tokerror - } - if tok == fflib.FFTok_right_bracket { - break - } - - if tok == fflib.FFTok_comma { - if wantVal == true { - // TODO(pquerna): this isn't an ideal error message, this handles - // things like [,,,] as an array value. - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) - } - continue - } else { - wantVal = true - } - - /* handler: k type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - k = string(string(outBuf)) - - } - } - - // Expect ':' after key - tok = fs.Scan() - if tok != fflib.FFTok_colon { - return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) - } - - tok = fs.Scan() - /* handler: tmpJBigDataDigests type=digest.Digest kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - tmpJBigDataDigests = digest.Digest(string(outBuf)) - - } - } - - j.BigDataDigests[k] = tmpJBigDataDigests - - wantVal = false - } - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_Created: - - /* handler: j.Created type=time.Time kind=struct quoted=false*/ - - { - if tok == fflib.FFTok_null { - - } else { - - tbuf, err := fs.CaptureField(tok) - if err != nil { - return fs.WrapErr(err) - } - - err = j.Created.UnmarshalJSON(tbuf) - if err != nil { - return fs.WrapErr(err) - } - } - state = fflib.FFParse_after_value - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_Flags: - - /* handler: j.Flags type=map[string]interface {} kind=map quoted=false*/ - - { - - { - if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) - } - } - - if tok == fflib.FFTok_null { - j.Flags = nil - } else { - - j.Flags = make(map[string]interface{}, 0) - - wantVal := true - - for { - - var k string - - var tmpJFlags interface{} - - tok = fs.Scan() - if tok == fflib.FFTok_error { - goto tokerror - } - if tok == fflib.FFTok_right_bracket { - break - } - - if tok == fflib.FFTok_comma { - if wantVal == true { - // TODO(pquerna): this isn't an ideal error message, this handles - // things like [,,,] as an array value. - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) - } - continue - } else { - wantVal = true - } - - /* handler: k type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - k = string(string(outBuf)) - - } - } - - // Expect ':' after key - tok = fs.Scan() - if tok != fflib.FFTok_colon { - return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) - } - - tok = fs.Scan() - /* handler: tmpJFlags type=interface {} kind=interface quoted=false*/ - - { - /* Falling back. type=interface {} kind=interface */ - tbuf, err := fs.CaptureField(tok) - if err != nil { - return fs.WrapErr(err) - } - - err = json.Unmarshal(tbuf, &tmpJFlags) - if err != nil { - return fs.WrapErr(err) - } - } - - j.Flags[k] = tmpJFlags - - wantVal = false - } - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -wantedvalue: - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) -wrongtokenerror: - return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) -tokerror: - if fs.BigError != nil { - return fs.WrapErr(fs.BigError) - } - err = fs.Error.ToError() - if err != nil { - return fs.WrapErr(err) - } - panic("ffjson-generated: unreachable, please report bug.") -done: - - return nil -} - -// MarshalJSON marshal bytes to json - template -func (j *imageStore) MarshalJSON() ([]byte, error) { - var buf fflib.Buffer - if j == nil { - buf.WriteString("null") - return buf.Bytes(), nil - } - err := j.MarshalJSONBuf(&buf) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// MarshalJSONBuf marshal buff to json - template -func (j *imageStore) MarshalJSONBuf(buf fflib.EncodingBuffer) error { - if j == nil { - buf.WriteString("null") - return nil - } - var err error - var obj []byte - _ = obj - _ = err - buf.WriteString(`{}`) - return nil -} - -const ( - ffjtimageStorebase = iota - ffjtimageStorenosuchkey -) - -// UnmarshalJSON umarshall json - template of ffjson -func (j *imageStore) UnmarshalJSON(input []byte) error { - fs := fflib.NewFFLexer(input) - return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) -} - -// UnmarshalJSONFFLexer fast json unmarshall - template ffjson -func (j *imageStore) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { - var err error - currentKey := ffjtimageStorebase - _ = currentKey - tok := fflib.FFTok_init - wantedTok := fflib.FFTok_init - -mainparse: - for { - tok = fs.Scan() - // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) - if tok == fflib.FFTok_error { - goto tokerror - } - - switch state { - - case fflib.FFParse_map_start: - if tok != fflib.FFTok_left_bracket { - wantedTok = fflib.FFTok_left_bracket - goto wrongtokenerror - } - state = fflib.FFParse_want_key - continue - - case fflib.FFParse_after_value: - if tok == fflib.FFTok_comma { - state = fflib.FFParse_want_key - } else if tok == fflib.FFTok_right_bracket { - goto done - } else { - wantedTok = fflib.FFTok_comma - goto wrongtokenerror - } - - case fflib.FFParse_want_key: - // json {} ended. goto exit. woo. - if tok == fflib.FFTok_right_bracket { - goto done - } - if tok != fflib.FFTok_string { - wantedTok = fflib.FFTok_string - goto wrongtokenerror - } - - kn := fs.Output.Bytes() - if len(kn) <= 0 { - // "" case. hrm. - currentKey = ffjtimageStorenosuchkey - state = fflib.FFParse_want_colon - goto mainparse - } else { - switch kn[0] { - - } - - currentKey = ffjtimageStorenosuchkey - state = fflib.FFParse_want_colon - goto mainparse - } - - case fflib.FFParse_want_colon: - if tok != fflib.FFTok_colon { - wantedTok = fflib.FFTok_colon - goto wrongtokenerror - } - state = fflib.FFParse_want_value - continue - case fflib.FFParse_want_value: - - if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { - switch currentKey { - - case ffjtimageStorenosuchkey: - err = fs.SkipField(tok) - if err != nil { - return fs.WrapErr(err) - } - state = fflib.FFParse_after_value - goto mainparse - } - } else { - goto wantedvalue - } - } - } - -wantedvalue: - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) -wrongtokenerror: - return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) -tokerror: - if fs.BigError != nil { - return fs.WrapErr(fs.BigError) - } - err = fs.Error.ToError() - if err != nil { - return fs.WrapErr(err) - } - panic("ffjson-generated: unreachable, please report bug.") -done: - - return nil -} diff --git a/vendor/github.com/containers/storage/layers.go b/vendor/github.com/containers/storage/layers.go deleted file mode 100644 index a1be6eee77..0000000000 --- a/vendor/github.com/containers/storage/layers.go +++ /dev/null @@ -1,1041 +0,0 @@ -package storage - -import ( - "bytes" - "compress/gzip" - "encoding/json" - "io" - "io/ioutil" - "os" - "path/filepath" - "time" - - drivers "github.com/containers/storage/drivers" - "github.com/containers/storage/pkg/archive" - "github.com/containers/storage/pkg/ioutils" - "github.com/containers/storage/pkg/stringid" - "github.com/containers/storage/pkg/truncindex" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/vbatts/tar-split/tar/asm" - "github.com/vbatts/tar-split/tar/storage" -) - -const ( - tarSplitSuffix = ".tar-split.gz" - incompleteFlag = "incomplete" - compressionFlag = "diff-compression" -) - -// A Layer is a record of a copy-on-write layer that's stored by the lower -// level graph driver. -type Layer struct { - // ID is either one which was specified at create-time, or a random - // value which was generated by the library. - ID string `json:"id"` - - // Names is an optional set of user-defined convenience values. The - // layer can be referred to by its ID or any of its names. Names are - // unique among layers. - Names []string `json:"names,omitempty"` - - // Parent is the ID of a layer from which this layer inherits data. - Parent string `json:"parent,omitempty"` - - // Metadata is data we keep for the convenience of the caller. It is not - // expected to be large, since it is kept in memory. - Metadata string `json:"metadata,omitempty"` - - // MountLabel is an SELinux label which should be used when attempting to mount - // the layer. - MountLabel string `json:"mountlabel,omitempty"` - - // MountPoint is the path where the layer is mounted, or where it was most - // recently mounted. This can change between subsequent Unmount() and - // Mount() calls, so the caller should consult this value after Mount() - // succeeds to find the location of the container's root filesystem. - MountPoint string `json:"-"` - - // MountCount is used as a reference count for the container's layer being - // mounted at the mount point. - MountCount int `json:"-"` - - // Created is the datestamp for when this layer was created. Older - // versions of the library did not track this information, so callers - // will likely want to use the IsZero() method to verify that a value - // is set before using it. - Created time.Time `json:"created,omitempty"` - - // CompressedDigest is the digest of the blob that was last passed to - // ApplyDiff() or Put(), as it was presented to us. - CompressedDigest digest.Digest `json:"compressed-diff-digest,omitempty"` - - // CompressedSize is the length of the blob that was last passed to - // ApplyDiff() or Put(), as it was presented to us. If - // CompressedDigest is not set, this should be treated as if it were an - // uninitialized value. - CompressedSize int64 `json:"compressed-size,omitempty"` - - // UncompressedDigest is the digest of the blob that was last passed to - // ApplyDiff() or Put(), after we decompressed it. Often referred to - // as a DiffID. - UncompressedDigest digest.Digest `json:"diff-digest,omitempty"` - - // UncompressedSize is the length of the blob that was last passed to - // ApplyDiff() or Put(), after we decompressed it. If - // UncompressedDigest is not set, this should be treated as if it were - // an uninitialized value. - UncompressedSize int64 `json:"diff-size,omitempty"` - - // CompressionType is the type of compression which we detected on the blob - // that was last passed to ApplyDiff() or Put(). - CompressionType archive.Compression `json:"compression,omitempty"` - - // Flags is arbitrary data about the layer. - Flags map[string]interface{} `json:"flags,omitempty"` -} - -type layerMountPoint struct { - ID string `json:"id"` - MountPoint string `json:"path"` - MountCount int `json:"count"` -} - -// DiffOptions override the default behavior of Diff() methods. -type DiffOptions struct { - // Compression, if set overrides the default compressor when generating a diff. - Compression *archive.Compression -} - -// ROLayerStore wraps a graph driver, adding the ability to refer to layers by -// name, and keeping track of parent-child relationships, along with a list of -// all known layers. -type ROLayerStore interface { - ROFileBasedStore - ROMetadataStore - - // Exists checks if a layer with the specified name or ID is known. - Exists(id string) bool - - // Get retrieves information about a layer given an ID or name. - Get(id string) (*Layer, error) - - // Status returns an slice of key-value pairs, suitable for human consumption, - // relaying whatever status information the underlying driver can share. - Status() ([][2]string, error) - - // Changes returns a slice of Change structures, which contain a pathname - // (Path) and a description of what sort of change (Kind) was made by the - // layer (either ChangeModify, ChangeAdd, or ChangeDelete), relative to a - // specified layer. By default, the layer's parent is used as a reference. - Changes(from, to string) ([]archive.Change, error) - - // Diff produces a tarstream which can be applied to a layer with the contents - // of the first layer to produce a layer with the contents of the second layer. - // By default, the parent of the second layer is used as the first - // layer, so it need not be specified. Options can be used to override - // default behavior, but are also not required. - Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) - - // DiffSize produces an estimate of the length of the tarstream which would be - // produced by Diff. - DiffSize(from, to string) (int64, error) - - // Size produces a cached value for the uncompressed size of the layer, - // if one is known, or -1 if it is not known. If the layer can not be - // found, it returns an error. - Size(name string) (int64, error) - - // Lookup attempts to translate a name to an ID. Most methods do this - // implicitly. - Lookup(name string) (string, error) - - // LayersByCompressedDigest returns a slice of the layers with the - // specified compressed digest value recorded for them. - LayersByCompressedDigest(d digest.Digest) ([]Layer, error) - - // LayersByUncompressedDigest returns a slice of the layers with the - // specified uncompressed digest value recorded for them. - LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) - - // Layers returns a slice of the known layers. - Layers() ([]Layer, error) -} - -// LayerStore wraps a graph driver, adding the ability to refer to layers by -// name, and keeping track of parent-child relationships, along with a list of -// all known layers. -type LayerStore interface { - ROLayerStore - RWFileBasedStore - RWMetadataStore - FlaggableStore - - // Create creates a new layer, optionally giving it a specified ID rather than - // a randomly-generated one, either inheriting data from another specified - // layer or the empty base layer. The new layer can optionally be given names - // and have an SELinux label specified for use when mounting it. Some - // underlying drivers can accept a "size" option. At this time, most - // underlying drivers do not themselves distinguish between writeable - // and read-only layers. - Create(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool) (*Layer, error) - - // CreateWithFlags combines the functions of Create and SetFlag. - CreateWithFlags(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}) (layer *Layer, err error) - - // Put combines the functions of CreateWithFlags and ApplyDiff. - Put(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}, diff io.Reader) (*Layer, int64, error) - - // SetNames replaces the list of names associated with a layer with the - // supplied values. - SetNames(id string, names []string) error - - // Delete deletes a layer with the specified name or ID. - Delete(id string) error - - // Wipe deletes all layers. - Wipe() error - - // Mount mounts a layer for use. If the specified layer is the parent of other - // layers, it should not be written to. An SELinux label to be applied to the - // mount can be specified to override the one configured for the layer. - Mount(id, mountLabel string) (string, error) - - // Unmount unmounts a layer when it is no longer in use. - Unmount(id string) error - - // ApplyDiff reads a tarstream which was created by a previous call to Diff and - // applies its changes to a specified layer. - ApplyDiff(to string, diff io.Reader) (int64, error) -} - -type layerStore struct { - lockfile Locker - rundir string - driver drivers.Driver - layerdir string - layers []*Layer - idindex *truncindex.TruncIndex - byid map[string]*Layer - byname map[string]*Layer - bymount map[string]*Layer - bycompressedsum map[digest.Digest][]string - byuncompressedsum map[digest.Digest][]string -} - -func (r *layerStore) Layers() ([]Layer, error) { - layers := make([]Layer, len(r.layers)) - for i := range r.layers { - layers[i] = *(r.layers[i]) - } - return layers, nil -} - -func (r *layerStore) mountspath() string { - return filepath.Join(r.rundir, "mountpoints.json") -} - -func (r *layerStore) layerspath() string { - return filepath.Join(r.layerdir, "layers.json") -} - -func (r *layerStore) Load() error { - shouldSave := false - rpath := r.layerspath() - data, err := ioutil.ReadFile(rpath) - if err != nil && !os.IsNotExist(err) { - return err - } - layers := []*Layer{} - idlist := []string{} - ids := make(map[string]*Layer) - names := make(map[string]*Layer) - mounts := make(map[string]*Layer) - compressedsums := make(map[digest.Digest][]string) - uncompressedsums := make(map[digest.Digest][]string) - if err = json.Unmarshal(data, &layers); len(data) == 0 || err == nil { - idlist = make([]string, 0, len(layers)) - for n, layer := range layers { - ids[layer.ID] = layers[n] - idlist = append(idlist, layer.ID) - for _, name := range layer.Names { - if conflict, ok := names[name]; ok { - r.removeName(conflict, name) - shouldSave = true - } - names[name] = layers[n] - } - if layer.CompressedDigest != "" { - compressedsums[layer.CompressedDigest] = append(compressedsums[layer.CompressedDigest], layer.ID) - } - if layer.UncompressedDigest != "" { - uncompressedsums[layer.UncompressedDigest] = append(uncompressedsums[layer.UncompressedDigest], layer.ID) - } - } - } - if shouldSave && !r.IsReadWrite() { - return ErrDuplicateLayerNames - } - mpath := r.mountspath() - data, err = ioutil.ReadFile(mpath) - if err != nil && !os.IsNotExist(err) { - return err - } - layerMounts := []layerMountPoint{} - if err = json.Unmarshal(data, &layerMounts); len(data) == 0 || err == nil { - for _, mount := range layerMounts { - if mount.MountPoint != "" { - if layer, ok := ids[mount.ID]; ok { - mounts[mount.MountPoint] = layer - layer.MountPoint = mount.MountPoint - layer.MountCount = mount.MountCount - } - } - } - } - r.layers = layers - r.idindex = truncindex.NewTruncIndex(idlist) - r.byid = ids - r.byname = names - r.bymount = mounts - r.bycompressedsum = compressedsums - r.byuncompressedsum = uncompressedsums - err = nil - // Last step: if we're writable, try to remove anything that a previous - // user of this storage area marked for deletion but didn't manage to - // actually delete. - if r.IsReadWrite() { - for _, layer := range r.layers { - if layer.Flags == nil { - layer.Flags = make(map[string]interface{}) - } - if cleanup, ok := layer.Flags[incompleteFlag]; ok { - if b, ok := cleanup.(bool); ok && b { - err = r.Delete(layer.ID) - if err != nil { - break - } - shouldSave = true - } - } - } - if shouldSave { - return r.Save() - } - } - return err -} - -func (r *layerStore) Save() error { - if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath()) - } - rpath := r.layerspath() - if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil { - return err - } - jldata, err := json.Marshal(&r.layers) - if err != nil { - return err - } - mpath := r.mountspath() - if err := os.MkdirAll(filepath.Dir(mpath), 0700); err != nil { - return err - } - mounts := make([]layerMountPoint, 0, len(r.layers)) - for _, layer := range r.layers { - if layer.MountPoint != "" && layer.MountCount > 0 { - mounts = append(mounts, layerMountPoint{ - ID: layer.ID, - MountPoint: layer.MountPoint, - MountCount: layer.MountCount, - }) - } - } - jmdata, err := json.Marshal(&mounts) - if err != nil { - return err - } - if err := ioutils.AtomicWriteFile(rpath, jldata, 0600); err != nil { - return err - } - defer r.Touch() - return ioutils.AtomicWriteFile(mpath, jmdata, 0600) -} - -func newLayerStore(rundir string, layerdir string, driver drivers.Driver) (LayerStore, error) { - if err := os.MkdirAll(rundir, 0700); err != nil { - return nil, err - } - if err := os.MkdirAll(layerdir, 0700); err != nil { - return nil, err - } - lockfile, err := GetLockfile(filepath.Join(layerdir, "layers.lock")) - if err != nil { - return nil, err - } - lockfile.Lock() - defer lockfile.Unlock() - rlstore := layerStore{ - lockfile: lockfile, - driver: driver, - rundir: rundir, - layerdir: layerdir, - byid: make(map[string]*Layer), - bymount: make(map[string]*Layer), - byname: make(map[string]*Layer), - } - if err := rlstore.Load(); err != nil { - return nil, err - } - return &rlstore, nil -} - -func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (ROLayerStore, error) { - lockfile, err := GetROLockfile(filepath.Join(layerdir, "layers.lock")) - if err != nil { - return nil, err - } - lockfile.Lock() - defer lockfile.Unlock() - rlstore := layerStore{ - lockfile: lockfile, - driver: driver, - rundir: rundir, - layerdir: layerdir, - byid: make(map[string]*Layer), - bymount: make(map[string]*Layer), - byname: make(map[string]*Layer), - } - if err := rlstore.Load(); err != nil { - return nil, err - } - return &rlstore, nil -} - -func (r *layerStore) lookup(id string) (*Layer, bool) { - if layer, ok := r.byid[id]; ok { - return layer, ok - } else if layer, ok := r.byname[id]; ok { - return layer, ok - } else if longid, err := r.idindex.Get(id); err == nil { - layer, ok := r.byid[longid] - return layer, ok - } - return nil, false -} - -func (r *layerStore) Size(name string) (int64, error) { - layer, ok := r.lookup(name) - if !ok { - return -1, ErrLayerUnknown - } - // We use the presence of a non-empty digest as an indicator that the size value was intentionally set, and that - // a zero value is not just present because it was never set to anything else (which can happen if the layer was - // created by a version of this library that didn't keep track of digest and size information). - if layer.UncompressedDigest != "" { - return layer.UncompressedSize, nil - } - return -1, nil -} - -func (r *layerStore) ClearFlag(id string, flag string) error { - if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to clear flags on layers at %q", r.layerspath()) - } - layer, ok := r.lookup(id) - if !ok { - return ErrLayerUnknown - } - delete(layer.Flags, flag) - return r.Save() -} - -func (r *layerStore) SetFlag(id string, flag string, value interface{}) error { - if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to set flags on layers at %q", r.layerspath()) - } - layer, ok := r.lookup(id) - if !ok { - return ErrLayerUnknown - } - if layer.Flags == nil { - layer.Flags = make(map[string]interface{}) - } - layer.Flags[flag] = value - return r.Save() -} - -func (r *layerStore) Status() ([][2]string, error) { - return r.driver.Status(), nil -} - -func (r *layerStore) Put(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}, diff io.Reader) (layer *Layer, size int64, err error) { - if !r.IsReadWrite() { - return nil, -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new layers at %q", r.layerspath()) - } - size = -1 - if err := os.MkdirAll(r.rundir, 0700); err != nil { - return nil, -1, err - } - if err := os.MkdirAll(r.layerdir, 0700); err != nil { - return nil, -1, err - } - if parent != "" { - if parentLayer, ok := r.lookup(parent); ok { - parent = parentLayer.ID - } - } - if id == "" { - id = stringid.GenerateRandomID() - _, idInUse := r.byid[id] - for idInUse { - id = stringid.GenerateRandomID() - _, idInUse = r.byid[id] - } - } - if _, idInUse := r.byid[id]; idInUse { - return nil, -1, ErrDuplicateID - } - names = dedupeNames(names) - for _, name := range names { - if _, nameInUse := r.byname[name]; nameInUse { - return nil, -1, ErrDuplicateName - } - } - opts := drivers.CreateOpts{ - MountLabel: mountLabel, - StorageOpt: options, - } - if writeable { - err = r.driver.CreateReadWrite(id, parent, &opts) - } else { - err = r.driver.Create(id, parent, &opts) - } - if err == nil { - layer = &Layer{ - ID: id, - Parent: parent, - Names: names, - MountLabel: mountLabel, - Created: time.Now().UTC(), - Flags: make(map[string]interface{}), - } - r.layers = append(r.layers, layer) - r.idindex.Add(id) - r.byid[id] = layer - for _, name := range names { - r.byname[name] = layer - } - for flag, value := range flags { - layer.Flags[flag] = value - } - if diff != nil { - layer.Flags[incompleteFlag] = true - err = r.Save() - if err != nil { - // We don't have a record of this layer, but at least - // try to clean it up underneath us. - r.driver.Remove(id) - return nil, -1, err - } - size, err = r.ApplyDiff(layer.ID, diff) - if err != nil { - if r.Delete(layer.ID) != nil { - // Either a driver error or an error saving. - // We now have a layer that's been marked for - // deletion but which we failed to remove. - } - return nil, -1, err - } - delete(layer.Flags, incompleteFlag) - } - err = r.Save() - if err != nil { - // We don't have a record of this layer, but at least - // try to clean it up underneath us. - r.driver.Remove(id) - return nil, -1, err - } - } - return layer, size, err -} - -func (r *layerStore) CreateWithFlags(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool, flags map[string]interface{}) (layer *Layer, err error) { - layer, _, err = r.Put(id, parent, names, mountLabel, options, writeable, flags, nil) - return layer, err -} - -func (r *layerStore) Create(id, parent string, names []string, mountLabel string, options map[string]string, writeable bool) (layer *Layer, err error) { - return r.CreateWithFlags(id, parent, names, mountLabel, options, writeable, nil) -} - -func (r *layerStore) Mount(id, mountLabel string) (string, error) { - if !r.IsReadWrite() { - return "", errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath()) - } - layer, ok := r.lookup(id) - if !ok { - return "", ErrLayerUnknown - } - if layer.MountCount > 0 { - layer.MountCount++ - return layer.MountPoint, r.Save() - } - if mountLabel == "" { - mountLabel = layer.MountLabel - } - mountpoint, err := r.driver.Get(id, mountLabel) - if mountpoint != "" && err == nil { - if layer.MountPoint != "" { - delete(r.bymount, layer.MountPoint) - } - layer.MountPoint = filepath.Clean(mountpoint) - layer.MountCount++ - r.bymount[layer.MountPoint] = layer - err = r.Save() - } - return mountpoint, err -} - -func (r *layerStore) Unmount(id string) error { - if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath()) - } - layer, ok := r.lookup(id) - if !ok { - layerByMount, ok := r.bymount[filepath.Clean(id)] - if !ok { - return ErrLayerUnknown - } - layer = layerByMount - } - if layer.MountCount > 1 { - layer.MountCount-- - return r.Save() - } - err := r.driver.Put(id) - if err == nil || os.IsNotExist(err) { - if layer.MountPoint != "" { - delete(r.bymount, layer.MountPoint) - } - layer.MountCount-- - layer.MountPoint = "" - err = r.Save() - } - return err -} - -func (r *layerStore) removeName(layer *Layer, name string) { - layer.Names = stringSliceWithoutValue(layer.Names, name) -} - -func (r *layerStore) SetNames(id string, names []string) error { - if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to change layer name assignments at %q", r.layerspath()) - } - names = dedupeNames(names) - if layer, ok := r.lookup(id); ok { - for _, name := range layer.Names { - delete(r.byname, name) - } - for _, name := range names { - if otherLayer, ok := r.byname[name]; ok { - r.removeName(otherLayer, name) - } - r.byname[name] = layer - } - layer.Names = names - return r.Save() - } - return ErrLayerUnknown -} - -func (r *layerStore) Metadata(id string) (string, error) { - if layer, ok := r.lookup(id); ok { - return layer.Metadata, nil - } - return "", ErrLayerUnknown -} - -func (r *layerStore) SetMetadata(id, metadata string) error { - if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify layer metadata at %q", r.layerspath()) - } - if layer, ok := r.lookup(id); ok { - layer.Metadata = metadata - return r.Save() - } - return ErrLayerUnknown -} - -func (r *layerStore) tspath(id string) string { - return filepath.Join(r.layerdir, id+tarSplitSuffix) -} - -func (r *layerStore) Delete(id string) error { - if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath()) - } - layer, ok := r.lookup(id) - if !ok { - return ErrLayerUnknown - } - id = layer.ID - for layer.MountCount > 0 { - if err := r.Unmount(id); err != nil { - return err - } - } - err := r.driver.Remove(id) - if err == nil { - os.Remove(r.tspath(id)) - delete(r.byid, id) - r.idindex.Delete(id) - if layer.MountPoint != "" { - delete(r.bymount, layer.MountPoint) - } - toDeleteIndex := -1 - for i, candidate := range r.layers { - if candidate.ID == id { - toDeleteIndex = i - break - } - } - if toDeleteIndex != -1 { - // delete the layer at toDeleteIndex - if toDeleteIndex == len(r.layers)-1 { - r.layers = r.layers[:len(r.layers)-1] - } else { - r.layers = append(r.layers[:toDeleteIndex], r.layers[toDeleteIndex+1:]...) - } - } - if err = r.Save(); err != nil { - return err - } - } - return err -} - -func (r *layerStore) Lookup(name string) (id string, err error) { - if layer, ok := r.lookup(name); ok { - return layer.ID, nil - } - return "", ErrLayerUnknown -} - -func (r *layerStore) Exists(id string) bool { - _, ok := r.lookup(id) - return ok -} - -func (r *layerStore) Get(id string) (*Layer, error) { - if layer, ok := r.lookup(id); ok { - return layer, nil - } - return nil, ErrLayerUnknown -} - -func (r *layerStore) Wipe() error { - if !r.IsReadWrite() { - return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to delete layers at %q", r.layerspath()) - } - ids := make([]string, 0, len(r.byid)) - for id := range r.byid { - ids = append(ids, id) - } - for _, id := range ids { - if err := r.Delete(id); err != nil { - return err - } - } - return nil -} - -func (r *layerStore) findParentAndLayer(from, to string) (fromID string, toID string, toLayer *Layer, err error) { - var ok bool - var fromLayer *Layer - toLayer, ok = r.lookup(to) - if !ok { - return "", "", nil, ErrLayerUnknown - } - to = toLayer.ID - if from == "" { - from = toLayer.Parent - } - if from != "" { - fromLayer, ok = r.lookup(from) - if ok { - from = fromLayer.ID - } else { - fromLayer, ok = r.lookup(toLayer.Parent) - if ok { - from = fromLayer.ID - } - } - } - return from, to, toLayer, nil -} - -func (r *layerStore) Changes(from, to string) ([]archive.Change, error) { - from, to, toLayer, err := r.findParentAndLayer(from, to) - if err != nil { - return nil, ErrLayerUnknown - } - return r.driver.Changes(to, from, toLayer.MountLabel) -} - -type simpleGetCloser struct { - r *layerStore - path string - id string -} - -func (s *simpleGetCloser) Get(path string) (io.ReadCloser, error) { - return os.Open(filepath.Join(s.path, path)) -} - -func (s *simpleGetCloser) Close() error { - return s.r.Unmount(s.id) -} - -func (r *layerStore) newFileGetter(id string) (drivers.FileGetCloser, error) { - if getter, ok := r.driver.(drivers.DiffGetterDriver); ok { - return getter.DiffGetter(id) - } - path, err := r.Mount(id, "") - if err != nil { - return nil, err - } - return &simpleGetCloser{ - r: r, - path: path, - id: id, - }, nil -} - -func (r *layerStore) Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) { - var metadata storage.Unpacker - - from, to, toLayer, err := r.findParentAndLayer(from, to) - if err != nil { - return nil, ErrLayerUnknown - } - // Default to applying the type of compression that we noted was used - // for the layerdiff when it was applied. - compression := toLayer.CompressionType - // If a particular compression type (or no compression) was selected, - // use that instead. - if options != nil && options.Compression != nil { - compression = *options.Compression - } - maybeCompressReadCloser := func(rc io.ReadCloser) (io.ReadCloser, error) { - // Depending on whether or not compression is desired, return either the - // passed-in ReadCloser, or a new one that provides its readers with a - // compressed version of the data that the original would have provided - // to its readers. - if compression == archive.Uncompressed { - return rc, nil - } - preader, pwriter := io.Pipe() - compressor, err := archive.CompressStream(pwriter, compression) - if err != nil { - rc.Close() - pwriter.Close() - preader.Close() - return nil, err - } - go func() { - defer pwriter.Close() - defer compressor.Close() - defer rc.Close() - io.Copy(compressor, rc) - }() - return preader, nil - } - - if from != toLayer.Parent { - diff, err := r.driver.Diff(to, from, toLayer.MountLabel) - if err != nil { - return nil, err - } - return maybeCompressReadCloser(diff) - } - - tsfile, err := os.Open(r.tspath(to)) - if err != nil { - if !os.IsNotExist(err) { - return nil, err - } - diff, err := r.driver.Diff(to, from, toLayer.MountLabel) - if err != nil { - return nil, err - } - return maybeCompressReadCloser(diff) - } - defer tsfile.Close() - - decompressor, err := gzip.NewReader(tsfile) - if err != nil { - return nil, err - } - defer decompressor.Close() - - tsbytes, err := ioutil.ReadAll(decompressor) - if err != nil { - return nil, err - } - - metadata = storage.NewJSONUnpacker(bytes.NewBuffer(tsbytes)) - - fgetter, err := r.newFileGetter(to) - if err != nil { - return nil, err - } - - tarstream := asm.NewOutputTarStream(fgetter, metadata) - rc := ioutils.NewReadCloserWrapper(tarstream, func() error { - err1 := tarstream.Close() - err2 := fgetter.Close() - if err2 == nil { - return err1 - } - return err2 - }) - return maybeCompressReadCloser(rc) -} - -func (r *layerStore) DiffSize(from, to string) (size int64, err error) { - var toLayer *Layer - from, to, toLayer, err = r.findParentAndLayer(from, to) - if err != nil { - return -1, ErrLayerUnknown - } - return r.driver.DiffSize(to, from, toLayer.MountLabel) -} - -func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error) { - if !r.IsReadWrite() { - return -1, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify layer contents at %q", r.layerspath()) - } - - layer, ok := r.lookup(to) - if !ok { - return -1, ErrLayerUnknown - } - - header := make([]byte, 10240) - n, err := diff.Read(header) - if err != nil && err != io.EOF { - return -1, err - } - - compression := archive.DetectCompression(header[:n]) - compressedDigest := digest.Canonical.Digester() - compressedCounter := ioutils.NewWriteCounter(compressedDigest.Hash()) - defragmented := io.TeeReader(io.MultiReader(bytes.NewBuffer(header[:n]), diff), compressedCounter) - - tsdata := bytes.Buffer{} - compressor, err := gzip.NewWriterLevel(&tsdata, gzip.BestSpeed) - if err != nil { - compressor = gzip.NewWriter(&tsdata) - } - metadata := storage.NewJSONPacker(compressor) - uncompressed, err := archive.DecompressStream(defragmented) - if err != nil { - return -1, err - } - uncompressedDigest := digest.Canonical.Digester() - uncompressedCounter := ioutils.NewWriteCounter(uncompressedDigest.Hash()) - payload, err := asm.NewInputTarStream(io.TeeReader(uncompressed, uncompressedCounter), metadata, storage.NewDiscardFilePutter()) - if err != nil { - return -1, err - } - size, err = r.driver.ApplyDiff(layer.ID, layer.Parent, layer.MountLabel, payload) - if err != nil { - return -1, err - } - compressor.Close() - if err == nil { - if err := os.MkdirAll(filepath.Dir(r.tspath(layer.ID)), 0700); err != nil { - return -1, err - } - if err := ioutils.AtomicWriteFile(r.tspath(layer.ID), tsdata.Bytes(), 0600); err != nil { - return -1, err - } - } - - updateDigestMap := func(m *map[digest.Digest][]string, oldvalue, newvalue digest.Digest, id string) { - var newList []string - if oldvalue != "" { - for _, value := range (*m)[oldvalue] { - if value != id { - newList = append(newList, value) - } - } - if len(newList) > 0 { - (*m)[oldvalue] = newList - } else { - delete(*m, oldvalue) - } - } - if newvalue != "" { - (*m)[newvalue] = append((*m)[newvalue], id) - } - } - updateDigestMap(&r.bycompressedsum, layer.CompressedDigest, compressedDigest.Digest(), layer.ID) - layer.CompressedDigest = compressedDigest.Digest() - layer.CompressedSize = compressedCounter.Count - updateDigestMap(&r.byuncompressedsum, layer.UncompressedDigest, uncompressedDigest.Digest(), layer.ID) - layer.UncompressedDigest = uncompressedDigest.Digest() - layer.UncompressedSize = uncompressedCounter.Count - layer.CompressionType = compression - - err = r.Save() - - return size, err -} - -func (r *layerStore) layersByDigestMap(m map[digest.Digest][]string, d digest.Digest) ([]Layer, error) { - var layers []Layer - for _, layerID := range m[d] { - layer, ok := r.lookup(layerID) - if !ok { - return nil, ErrLayerUnknown - } - layers = append(layers, *layer) - } - return layers, nil -} - -func (r *layerStore) LayersByCompressedDigest(d digest.Digest) ([]Layer, error) { - return r.layersByDigestMap(r.bycompressedsum, d) -} - -func (r *layerStore) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) { - return r.layersByDigestMap(r.byuncompressedsum, d) -} - -func (r *layerStore) Lock() { - r.lockfile.Lock() -} - -func (r *layerStore) Unlock() { - r.lockfile.Unlock() -} - -func (r *layerStore) Touch() error { - return r.lockfile.Touch() -} - -func (r *layerStore) Modified() (bool, error) { - return r.lockfile.Modified() -} - -func (r *layerStore) IsReadWrite() bool { - return r.lockfile.IsReadWrite() -} - -func (r *layerStore) TouchedSince(when time.Time) bool { - return r.lockfile.TouchedSince(when) -} diff --git a/vendor/github.com/containers/storage/layers_ffjson.go b/vendor/github.com/containers/storage/layers_ffjson.go deleted file mode 100644 index 1d494e9d43..0000000000 --- a/vendor/github.com/containers/storage/layers_ffjson.go +++ /dev/null @@ -1,1713 +0,0 @@ -// Code generated by ffjson . DO NOT EDIT. -// source: layers.go. Hack to make this work on github.com - -package storage - -import ( - "bytes" - "encoding/json" - "fmt" - "github.com/containers/storage/pkg/archive" - "github.com/opencontainers/go-digest" - fflib "github.com/pquerna/ffjson/fflib/v1" -) - -// MarshalJSON marshal bytes to json - template -func (j *DiffOptions) MarshalJSON() ([]byte, error) { - var buf fflib.Buffer - if j == nil { - buf.WriteString("null") - return buf.Bytes(), nil - } - err := j.MarshalJSONBuf(&buf) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// MarshalJSONBuf marshal buff to json - template -func (j *DiffOptions) MarshalJSONBuf(buf fflib.EncodingBuffer) error { - if j == nil { - buf.WriteString("null") - return nil - } - var err error - var obj []byte - _ = obj - _ = err - if j.Compression != nil { - buf.WriteString(`{"Compression":`) - fflib.FormatBits2(buf, uint64(*j.Compression), 10, *j.Compression < 0) - } else { - buf.WriteString(`{"Compression":null`) - } - buf.WriteByte('}') - return nil -} - -const ( - ffjtDiffOptionsbase = iota - ffjtDiffOptionsnosuchkey - - ffjtDiffOptionsCompression -) - -var ffjKeyDiffOptionsCompression = []byte("Compression") - -// UnmarshalJSON umarshall json - template of ffjson -func (j *DiffOptions) UnmarshalJSON(input []byte) error { - fs := fflib.NewFFLexer(input) - return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) -} - -// UnmarshalJSONFFLexer fast json unmarshall - template ffjson -func (j *DiffOptions) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { - var err error - currentKey := ffjtDiffOptionsbase - _ = currentKey - tok := fflib.FFTok_init - wantedTok := fflib.FFTok_init - -mainparse: - for { - tok = fs.Scan() - // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) - if tok == fflib.FFTok_error { - goto tokerror - } - - switch state { - - case fflib.FFParse_map_start: - if tok != fflib.FFTok_left_bracket { - wantedTok = fflib.FFTok_left_bracket - goto wrongtokenerror - } - state = fflib.FFParse_want_key - continue - - case fflib.FFParse_after_value: - if tok == fflib.FFTok_comma { - state = fflib.FFParse_want_key - } else if tok == fflib.FFTok_right_bracket { - goto done - } else { - wantedTok = fflib.FFTok_comma - goto wrongtokenerror - } - - case fflib.FFParse_want_key: - // json {} ended. goto exit. woo. - if tok == fflib.FFTok_right_bracket { - goto done - } - if tok != fflib.FFTok_string { - wantedTok = fflib.FFTok_string - goto wrongtokenerror - } - - kn := fs.Output.Bytes() - if len(kn) <= 0 { - // "" case. hrm. - currentKey = ffjtDiffOptionsnosuchkey - state = fflib.FFParse_want_colon - goto mainparse - } else { - switch kn[0] { - - case 'C': - - if bytes.Equal(ffjKeyDiffOptionsCompression, kn) { - currentKey = ffjtDiffOptionsCompression - state = fflib.FFParse_want_colon - goto mainparse - } - - } - - if fflib.EqualFoldRight(ffjKeyDiffOptionsCompression, kn) { - currentKey = ffjtDiffOptionsCompression - state = fflib.FFParse_want_colon - goto mainparse - } - - currentKey = ffjtDiffOptionsnosuchkey - state = fflib.FFParse_want_colon - goto mainparse - } - - case fflib.FFParse_want_colon: - if tok != fflib.FFTok_colon { - wantedTok = fflib.FFTok_colon - goto wrongtokenerror - } - state = fflib.FFParse_want_value - continue - case fflib.FFParse_want_value: - - if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { - switch currentKey { - - case ffjtDiffOptionsCompression: - goto handle_Compression - - case ffjtDiffOptionsnosuchkey: - err = fs.SkipField(tok) - if err != nil { - return fs.WrapErr(err) - } - state = fflib.FFParse_after_value - goto mainparse - } - } else { - goto wantedvalue - } - } - } - -handle_Compression: - - /* handler: j.Compression type=archive.Compression kind=int quoted=false*/ - - { - if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Compression", tok)) - } - } - - { - - if tok == fflib.FFTok_null { - - j.Compression = nil - - } else { - - tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) - - if err != nil { - return fs.WrapErr(err) - } - - ttypval := archive.Compression(tval) - j.Compression = &ttypval - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -wantedvalue: - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) -wrongtokenerror: - return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) -tokerror: - if fs.BigError != nil { - return fs.WrapErr(fs.BigError) - } - err = fs.Error.ToError() - if err != nil { - return fs.WrapErr(err) - } - panic("ffjson-generated: unreachable, please report bug.") -done: - - return nil -} - -// MarshalJSON marshal bytes to json - template -func (j *Layer) MarshalJSON() ([]byte, error) { - var buf fflib.Buffer - if j == nil { - buf.WriteString("null") - return buf.Bytes(), nil - } - err := j.MarshalJSONBuf(&buf) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// MarshalJSONBuf marshal buff to json - template -func (j *Layer) MarshalJSONBuf(buf fflib.EncodingBuffer) error { - if j == nil { - buf.WriteString("null") - return nil - } - var err error - var obj []byte - _ = obj - _ = err - buf.WriteString(`{ "id":`) - fflib.WriteJsonString(buf, string(j.ID)) - buf.WriteByte(',') - if len(j.Names) != 0 { - buf.WriteString(`"names":`) - if j.Names != nil { - buf.WriteString(`[`) - for i, v := range j.Names { - if i != 0 { - buf.WriteString(`,`) - } - fflib.WriteJsonString(buf, string(v)) - } - buf.WriteString(`]`) - } else { - buf.WriteString(`null`) - } - buf.WriteByte(',') - } - if len(j.Parent) != 0 { - buf.WriteString(`"parent":`) - fflib.WriteJsonString(buf, string(j.Parent)) - buf.WriteByte(',') - } - if len(j.Metadata) != 0 { - buf.WriteString(`"metadata":`) - fflib.WriteJsonString(buf, string(j.Metadata)) - buf.WriteByte(',') - } - if len(j.MountLabel) != 0 { - buf.WriteString(`"mountlabel":`) - fflib.WriteJsonString(buf, string(j.MountLabel)) - buf.WriteByte(',') - } - if true { - buf.WriteString(`"created":`) - - { - - obj, err = j.Created.MarshalJSON() - if err != nil { - return err - } - buf.Write(obj) - - } - buf.WriteByte(',') - } - if len(j.CompressedDigest) != 0 { - buf.WriteString(`"compressed-diff-digest":`) - fflib.WriteJsonString(buf, string(j.CompressedDigest)) - buf.WriteByte(',') - } - if j.CompressedSize != 0 { - buf.WriteString(`"compressed-size":`) - fflib.FormatBits2(buf, uint64(j.CompressedSize), 10, j.CompressedSize < 0) - buf.WriteByte(',') - } - if len(j.UncompressedDigest) != 0 { - buf.WriteString(`"diff-digest":`) - fflib.WriteJsonString(buf, string(j.UncompressedDigest)) - buf.WriteByte(',') - } - if j.UncompressedSize != 0 { - buf.WriteString(`"diff-size":`) - fflib.FormatBits2(buf, uint64(j.UncompressedSize), 10, j.UncompressedSize < 0) - buf.WriteByte(',') - } - if j.CompressionType != 0 { - buf.WriteString(`"compression":`) - fflib.FormatBits2(buf, uint64(j.CompressionType), 10, j.CompressionType < 0) - buf.WriteByte(',') - } - if len(j.Flags) != 0 { - buf.WriteString(`"flags":`) - /* Falling back. type=map[string]interface {} kind=map */ - err = buf.Encode(j.Flags) - if err != nil { - return err - } - buf.WriteByte(',') - } - buf.Rewind(1) - buf.WriteByte('}') - return nil -} - -const ( - ffjtLayerbase = iota - ffjtLayernosuchkey - - ffjtLayerID - - ffjtLayerNames - - ffjtLayerParent - - ffjtLayerMetadata - - ffjtLayerMountLabel - - ffjtLayerCreated - - ffjtLayerCompressedDigest - - ffjtLayerCompressedSize - - ffjtLayerUncompressedDigest - - ffjtLayerUncompressedSize - - ffjtLayerCompressionType - - ffjtLayerFlags -) - -var ffjKeyLayerID = []byte("id") - -var ffjKeyLayerNames = []byte("names") - -var ffjKeyLayerParent = []byte("parent") - -var ffjKeyLayerMetadata = []byte("metadata") - -var ffjKeyLayerMountLabel = []byte("mountlabel") - -var ffjKeyLayerCreated = []byte("created") - -var ffjKeyLayerCompressedDigest = []byte("compressed-diff-digest") - -var ffjKeyLayerCompressedSize = []byte("compressed-size") - -var ffjKeyLayerUncompressedDigest = []byte("diff-digest") - -var ffjKeyLayerUncompressedSize = []byte("diff-size") - -var ffjKeyLayerCompressionType = []byte("compression") - -var ffjKeyLayerFlags = []byte("flags") - -// UnmarshalJSON umarshall json - template of ffjson -func (j *Layer) UnmarshalJSON(input []byte) error { - fs := fflib.NewFFLexer(input) - return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) -} - -// UnmarshalJSONFFLexer fast json unmarshall - template ffjson -func (j *Layer) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { - var err error - currentKey := ffjtLayerbase - _ = currentKey - tok := fflib.FFTok_init - wantedTok := fflib.FFTok_init - -mainparse: - for { - tok = fs.Scan() - // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) - if tok == fflib.FFTok_error { - goto tokerror - } - - switch state { - - case fflib.FFParse_map_start: - if tok != fflib.FFTok_left_bracket { - wantedTok = fflib.FFTok_left_bracket - goto wrongtokenerror - } - state = fflib.FFParse_want_key - continue - - case fflib.FFParse_after_value: - if tok == fflib.FFTok_comma { - state = fflib.FFParse_want_key - } else if tok == fflib.FFTok_right_bracket { - goto done - } else { - wantedTok = fflib.FFTok_comma - goto wrongtokenerror - } - - case fflib.FFParse_want_key: - // json {} ended. goto exit. woo. - if tok == fflib.FFTok_right_bracket { - goto done - } - if tok != fflib.FFTok_string { - wantedTok = fflib.FFTok_string - goto wrongtokenerror - } - - kn := fs.Output.Bytes() - if len(kn) <= 0 { - // "" case. hrm. - currentKey = ffjtLayernosuchkey - state = fflib.FFParse_want_colon - goto mainparse - } else { - switch kn[0] { - - case 'c': - - if bytes.Equal(ffjKeyLayerCreated, kn) { - currentKey = ffjtLayerCreated - state = fflib.FFParse_want_colon - goto mainparse - - } else if bytes.Equal(ffjKeyLayerCompressedDigest, kn) { - currentKey = ffjtLayerCompressedDigest - state = fflib.FFParse_want_colon - goto mainparse - - } else if bytes.Equal(ffjKeyLayerCompressedSize, kn) { - currentKey = ffjtLayerCompressedSize - state = fflib.FFParse_want_colon - goto mainparse - - } else if bytes.Equal(ffjKeyLayerCompressionType, kn) { - currentKey = ffjtLayerCompressionType - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'd': - - if bytes.Equal(ffjKeyLayerUncompressedDigest, kn) { - currentKey = ffjtLayerUncompressedDigest - state = fflib.FFParse_want_colon - goto mainparse - - } else if bytes.Equal(ffjKeyLayerUncompressedSize, kn) { - currentKey = ffjtLayerUncompressedSize - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'f': - - if bytes.Equal(ffjKeyLayerFlags, kn) { - currentKey = ffjtLayerFlags - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'i': - - if bytes.Equal(ffjKeyLayerID, kn) { - currentKey = ffjtLayerID - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'm': - - if bytes.Equal(ffjKeyLayerMetadata, kn) { - currentKey = ffjtLayerMetadata - state = fflib.FFParse_want_colon - goto mainparse - - } else if bytes.Equal(ffjKeyLayerMountLabel, kn) { - currentKey = ffjtLayerMountLabel - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'n': - - if bytes.Equal(ffjKeyLayerNames, kn) { - currentKey = ffjtLayerNames - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'p': - - if bytes.Equal(ffjKeyLayerParent, kn) { - currentKey = ffjtLayerParent - state = fflib.FFParse_want_colon - goto mainparse - } - - } - - if fflib.EqualFoldRight(ffjKeyLayerFlags, kn) { - currentKey = ffjtLayerFlags - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.EqualFoldRight(ffjKeyLayerCompressionType, kn) { - currentKey = ffjtLayerCompressionType - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.EqualFoldRight(ffjKeyLayerUncompressedSize, kn) { - currentKey = ffjtLayerUncompressedSize - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.EqualFoldRight(ffjKeyLayerUncompressedDigest, kn) { - currentKey = ffjtLayerUncompressedDigest - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.EqualFoldRight(ffjKeyLayerCompressedSize, kn) { - currentKey = ffjtLayerCompressedSize - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.EqualFoldRight(ffjKeyLayerCompressedDigest, kn) { - currentKey = ffjtLayerCompressedDigest - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.SimpleLetterEqualFold(ffjKeyLayerCreated, kn) { - currentKey = ffjtLayerCreated - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.SimpleLetterEqualFold(ffjKeyLayerMountLabel, kn) { - currentKey = ffjtLayerMountLabel - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.SimpleLetterEqualFold(ffjKeyLayerMetadata, kn) { - currentKey = ffjtLayerMetadata - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.SimpleLetterEqualFold(ffjKeyLayerParent, kn) { - currentKey = ffjtLayerParent - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.EqualFoldRight(ffjKeyLayerNames, kn) { - currentKey = ffjtLayerNames - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.SimpleLetterEqualFold(ffjKeyLayerID, kn) { - currentKey = ffjtLayerID - state = fflib.FFParse_want_colon - goto mainparse - } - - currentKey = ffjtLayernosuchkey - state = fflib.FFParse_want_colon - goto mainparse - } - - case fflib.FFParse_want_colon: - if tok != fflib.FFTok_colon { - wantedTok = fflib.FFTok_colon - goto wrongtokenerror - } - state = fflib.FFParse_want_value - continue - case fflib.FFParse_want_value: - - if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { - switch currentKey { - - case ffjtLayerID: - goto handle_ID - - case ffjtLayerNames: - goto handle_Names - - case ffjtLayerParent: - goto handle_Parent - - case ffjtLayerMetadata: - goto handle_Metadata - - case ffjtLayerMountLabel: - goto handle_MountLabel - - case ffjtLayerCreated: - goto handle_Created - - case ffjtLayerCompressedDigest: - goto handle_CompressedDigest - - case ffjtLayerCompressedSize: - goto handle_CompressedSize - - case ffjtLayerUncompressedDigest: - goto handle_UncompressedDigest - - case ffjtLayerUncompressedSize: - goto handle_UncompressedSize - - case ffjtLayerCompressionType: - goto handle_CompressionType - - case ffjtLayerFlags: - goto handle_Flags - - case ffjtLayernosuchkey: - err = fs.SkipField(tok) - if err != nil { - return fs.WrapErr(err) - } - state = fflib.FFParse_after_value - goto mainparse - } - } else { - goto wantedvalue - } - } - } - -handle_ID: - - /* handler: j.ID type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - j.ID = string(string(outBuf)) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_Names: - - /* handler: j.Names type=[]string kind=slice quoted=false*/ - - { - - { - if tok != fflib.FFTok_left_brace && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) - } - } - - if tok == fflib.FFTok_null { - j.Names = nil - } else { - - j.Names = []string{} - - wantVal := true - - for { - - var tmpJNames string - - tok = fs.Scan() - if tok == fflib.FFTok_error { - goto tokerror - } - if tok == fflib.FFTok_right_brace { - break - } - - if tok == fflib.FFTok_comma { - if wantVal == true { - // TODO(pquerna): this isn't an ideal error message, this handles - // things like [,,,] as an array value. - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) - } - continue - } else { - wantVal = true - } - - /* handler: tmpJNames type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - tmpJNames = string(string(outBuf)) - - } - } - - j.Names = append(j.Names, tmpJNames) - - wantVal = false - } - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_Parent: - - /* handler: j.Parent type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - j.Parent = string(string(outBuf)) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_Metadata: - - /* handler: j.Metadata type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - j.Metadata = string(string(outBuf)) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_MountLabel: - - /* handler: j.MountLabel type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - j.MountLabel = string(string(outBuf)) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_Created: - - /* handler: j.Created type=time.Time kind=struct quoted=false*/ - - { - if tok == fflib.FFTok_null { - - } else { - - tbuf, err := fs.CaptureField(tok) - if err != nil { - return fs.WrapErr(err) - } - - err = j.Created.UnmarshalJSON(tbuf) - if err != nil { - return fs.WrapErr(err) - } - } - state = fflib.FFParse_after_value - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_CompressedDigest: - - /* handler: j.CompressedDigest type=digest.Digest kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - j.CompressedDigest = digest.Digest(string(outBuf)) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_CompressedSize: - - /* handler: j.CompressedSize type=int64 kind=int64 quoted=false*/ - - { - if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) - } - } - - { - - if tok == fflib.FFTok_null { - - } else { - - tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) - - if err != nil { - return fs.WrapErr(err) - } - - j.CompressedSize = int64(tval) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_UncompressedDigest: - - /* handler: j.UncompressedDigest type=digest.Digest kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - j.UncompressedDigest = digest.Digest(string(outBuf)) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_UncompressedSize: - - /* handler: j.UncompressedSize type=int64 kind=int64 quoted=false*/ - - { - if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int64", tok)) - } - } - - { - - if tok == fflib.FFTok_null { - - } else { - - tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) - - if err != nil { - return fs.WrapErr(err) - } - - j.UncompressedSize = int64(tval) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_CompressionType: - - /* handler: j.CompressionType type=archive.Compression kind=int quoted=false*/ - - { - if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Compression", tok)) - } - } - - { - - if tok == fflib.FFTok_null { - - } else { - - tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) - - if err != nil { - return fs.WrapErr(err) - } - - j.CompressionType = archive.Compression(tval) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_Flags: - - /* handler: j.Flags type=map[string]interface {} kind=map quoted=false*/ - - { - - { - if tok != fflib.FFTok_left_bracket && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for ", tok)) - } - } - - if tok == fflib.FFTok_null { - j.Flags = nil - } else { - - j.Flags = make(map[string]interface{}, 0) - - wantVal := true - - for { - - var k string - - var tmpJFlags interface{} - - tok = fs.Scan() - if tok == fflib.FFTok_error { - goto tokerror - } - if tok == fflib.FFTok_right_bracket { - break - } - - if tok == fflib.FFTok_comma { - if wantVal == true { - // TODO(pquerna): this isn't an ideal error message, this handles - // things like [,,,] as an array value. - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) - } - continue - } else { - wantVal = true - } - - /* handler: k type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - k = string(string(outBuf)) - - } - } - - // Expect ':' after key - tok = fs.Scan() - if tok != fflib.FFTok_colon { - return fs.WrapErr(fmt.Errorf("wanted colon token, but got token: %v", tok)) - } - - tok = fs.Scan() - /* handler: tmpJFlags type=interface {} kind=interface quoted=false*/ - - { - /* Falling back. type=interface {} kind=interface */ - tbuf, err := fs.CaptureField(tok) - if err != nil { - return fs.WrapErr(err) - } - - err = json.Unmarshal(tbuf, &tmpJFlags) - if err != nil { - return fs.WrapErr(err) - } - } - - j.Flags[k] = tmpJFlags - - wantVal = false - } - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -wantedvalue: - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) -wrongtokenerror: - return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) -tokerror: - if fs.BigError != nil { - return fs.WrapErr(fs.BigError) - } - err = fs.Error.ToError() - if err != nil { - return fs.WrapErr(err) - } - panic("ffjson-generated: unreachable, please report bug.") -done: - - return nil -} - -// MarshalJSON marshal bytes to json - template -func (j *layerMountPoint) MarshalJSON() ([]byte, error) { - var buf fflib.Buffer - if j == nil { - buf.WriteString("null") - return buf.Bytes(), nil - } - err := j.MarshalJSONBuf(&buf) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// MarshalJSONBuf marshal buff to json - template -func (j *layerMountPoint) MarshalJSONBuf(buf fflib.EncodingBuffer) error { - if j == nil { - buf.WriteString("null") - return nil - } - var err error - var obj []byte - _ = obj - _ = err - buf.WriteString(`{"id":`) - fflib.WriteJsonString(buf, string(j.ID)) - buf.WriteString(`,"path":`) - fflib.WriteJsonString(buf, string(j.MountPoint)) - buf.WriteString(`,"count":`) - fflib.FormatBits2(buf, uint64(j.MountCount), 10, j.MountCount < 0) - buf.WriteByte('}') - return nil -} - -const ( - ffjtlayerMountPointbase = iota - ffjtlayerMountPointnosuchkey - - ffjtlayerMountPointID - - ffjtlayerMountPointMountPoint - - ffjtlayerMountPointMountCount -) - -var ffjKeylayerMountPointID = []byte("id") - -var ffjKeylayerMountPointMountPoint = []byte("path") - -var ffjKeylayerMountPointMountCount = []byte("count") - -// UnmarshalJSON umarshall json - template of ffjson -func (j *layerMountPoint) UnmarshalJSON(input []byte) error { - fs := fflib.NewFFLexer(input) - return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) -} - -// UnmarshalJSONFFLexer fast json unmarshall - template ffjson -func (j *layerMountPoint) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { - var err error - currentKey := ffjtlayerMountPointbase - _ = currentKey - tok := fflib.FFTok_init - wantedTok := fflib.FFTok_init - -mainparse: - for { - tok = fs.Scan() - // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) - if tok == fflib.FFTok_error { - goto tokerror - } - - switch state { - - case fflib.FFParse_map_start: - if tok != fflib.FFTok_left_bracket { - wantedTok = fflib.FFTok_left_bracket - goto wrongtokenerror - } - state = fflib.FFParse_want_key - continue - - case fflib.FFParse_after_value: - if tok == fflib.FFTok_comma { - state = fflib.FFParse_want_key - } else if tok == fflib.FFTok_right_bracket { - goto done - } else { - wantedTok = fflib.FFTok_comma - goto wrongtokenerror - } - - case fflib.FFParse_want_key: - // json {} ended. goto exit. woo. - if tok == fflib.FFTok_right_bracket { - goto done - } - if tok != fflib.FFTok_string { - wantedTok = fflib.FFTok_string - goto wrongtokenerror - } - - kn := fs.Output.Bytes() - if len(kn) <= 0 { - // "" case. hrm. - currentKey = ffjtlayerMountPointnosuchkey - state = fflib.FFParse_want_colon - goto mainparse - } else { - switch kn[0] { - - case 'c': - - if bytes.Equal(ffjKeylayerMountPointMountCount, kn) { - currentKey = ffjtlayerMountPointMountCount - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'i': - - if bytes.Equal(ffjKeylayerMountPointID, kn) { - currentKey = ffjtlayerMountPointID - state = fflib.FFParse_want_colon - goto mainparse - } - - case 'p': - - if bytes.Equal(ffjKeylayerMountPointMountPoint, kn) { - currentKey = ffjtlayerMountPointMountPoint - state = fflib.FFParse_want_colon - goto mainparse - } - - } - - if fflib.SimpleLetterEqualFold(ffjKeylayerMountPointMountCount, kn) { - currentKey = ffjtlayerMountPointMountCount - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.SimpleLetterEqualFold(ffjKeylayerMountPointMountPoint, kn) { - currentKey = ffjtlayerMountPointMountPoint - state = fflib.FFParse_want_colon - goto mainparse - } - - if fflib.SimpleLetterEqualFold(ffjKeylayerMountPointID, kn) { - currentKey = ffjtlayerMountPointID - state = fflib.FFParse_want_colon - goto mainparse - } - - currentKey = ffjtlayerMountPointnosuchkey - state = fflib.FFParse_want_colon - goto mainparse - } - - case fflib.FFParse_want_colon: - if tok != fflib.FFTok_colon { - wantedTok = fflib.FFTok_colon - goto wrongtokenerror - } - state = fflib.FFParse_want_value - continue - case fflib.FFParse_want_value: - - if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { - switch currentKey { - - case ffjtlayerMountPointID: - goto handle_ID - - case ffjtlayerMountPointMountPoint: - goto handle_MountPoint - - case ffjtlayerMountPointMountCount: - goto handle_MountCount - - case ffjtlayerMountPointnosuchkey: - err = fs.SkipField(tok) - if err != nil { - return fs.WrapErr(err) - } - state = fflib.FFParse_after_value - goto mainparse - } - } else { - goto wantedvalue - } - } - } - -handle_ID: - - /* handler: j.ID type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - j.ID = string(string(outBuf)) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_MountPoint: - - /* handler: j.MountPoint type=string kind=string quoted=false*/ - - { - - { - if tok != fflib.FFTok_string && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for string", tok)) - } - } - - if tok == fflib.FFTok_null { - - } else { - - outBuf := fs.Output.Bytes() - - j.MountPoint = string(string(outBuf)) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -handle_MountCount: - - /* handler: j.MountCount type=int kind=int quoted=false*/ - - { - if tok != fflib.FFTok_integer && tok != fflib.FFTok_null { - return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for int", tok)) - } - } - - { - - if tok == fflib.FFTok_null { - - } else { - - tval, err := fflib.ParseInt(fs.Output.Bytes(), 10, 64) - - if err != nil { - return fs.WrapErr(err) - } - - j.MountCount = int(tval) - - } - } - - state = fflib.FFParse_after_value - goto mainparse - -wantedvalue: - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) -wrongtokenerror: - return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) -tokerror: - if fs.BigError != nil { - return fs.WrapErr(fs.BigError) - } - err = fs.Error.ToError() - if err != nil { - return fs.WrapErr(err) - } - panic("ffjson-generated: unreachable, please report bug.") -done: - - return nil -} - -// MarshalJSON marshal bytes to json - template -func (j *layerStore) MarshalJSON() ([]byte, error) { - var buf fflib.Buffer - if j == nil { - buf.WriteString("null") - return buf.Bytes(), nil - } - err := j.MarshalJSONBuf(&buf) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// MarshalJSONBuf marshal buff to json - template -func (j *layerStore) MarshalJSONBuf(buf fflib.EncodingBuffer) error { - if j == nil { - buf.WriteString("null") - return nil - } - var err error - var obj []byte - _ = obj - _ = err - buf.WriteString(`{}`) - return nil -} - -const ( - ffjtlayerStorebase = iota - ffjtlayerStorenosuchkey -) - -// UnmarshalJSON umarshall json - template of ffjson -func (j *layerStore) UnmarshalJSON(input []byte) error { - fs := fflib.NewFFLexer(input) - return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) -} - -// UnmarshalJSONFFLexer fast json unmarshall - template ffjson -func (j *layerStore) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { - var err error - currentKey := ffjtlayerStorebase - _ = currentKey - tok := fflib.FFTok_init - wantedTok := fflib.FFTok_init - -mainparse: - for { - tok = fs.Scan() - // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) - if tok == fflib.FFTok_error { - goto tokerror - } - - switch state { - - case fflib.FFParse_map_start: - if tok != fflib.FFTok_left_bracket { - wantedTok = fflib.FFTok_left_bracket - goto wrongtokenerror - } - state = fflib.FFParse_want_key - continue - - case fflib.FFParse_after_value: - if tok == fflib.FFTok_comma { - state = fflib.FFParse_want_key - } else if tok == fflib.FFTok_right_bracket { - goto done - } else { - wantedTok = fflib.FFTok_comma - goto wrongtokenerror - } - - case fflib.FFParse_want_key: - // json {} ended. goto exit. woo. - if tok == fflib.FFTok_right_bracket { - goto done - } - if tok != fflib.FFTok_string { - wantedTok = fflib.FFTok_string - goto wrongtokenerror - } - - kn := fs.Output.Bytes() - if len(kn) <= 0 { - // "" case. hrm. - currentKey = ffjtlayerStorenosuchkey - state = fflib.FFParse_want_colon - goto mainparse - } else { - switch kn[0] { - - } - - currentKey = ffjtlayerStorenosuchkey - state = fflib.FFParse_want_colon - goto mainparse - } - - case fflib.FFParse_want_colon: - if tok != fflib.FFTok_colon { - wantedTok = fflib.FFTok_colon - goto wrongtokenerror - } - state = fflib.FFParse_want_value - continue - case fflib.FFParse_want_value: - - if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { - switch currentKey { - - case ffjtlayerStorenosuchkey: - err = fs.SkipField(tok) - if err != nil { - return fs.WrapErr(err) - } - state = fflib.FFParse_after_value - goto mainparse - } - } else { - goto wantedvalue - } - } - } - -wantedvalue: - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) -wrongtokenerror: - return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) -tokerror: - if fs.BigError != nil { - return fs.WrapErr(fs.BigError) - } - err = fs.Error.ToError() - if err != nil { - return fs.WrapErr(err) - } - panic("ffjson-generated: unreachable, please report bug.") -done: - - return nil -} - -// MarshalJSON marshal bytes to json - template -func (j *simpleGetCloser) MarshalJSON() ([]byte, error) { - var buf fflib.Buffer - if j == nil { - buf.WriteString("null") - return buf.Bytes(), nil - } - err := j.MarshalJSONBuf(&buf) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// MarshalJSONBuf marshal buff to json - template -func (j *simpleGetCloser) MarshalJSONBuf(buf fflib.EncodingBuffer) error { - if j == nil { - buf.WriteString("null") - return nil - } - var err error - var obj []byte - _ = obj - _ = err - buf.WriteString(`{}`) - return nil -} - -const ( - ffjtsimpleGetCloserbase = iota - ffjtsimpleGetClosernosuchkey -) - -// UnmarshalJSON umarshall json - template of ffjson -func (j *simpleGetCloser) UnmarshalJSON(input []byte) error { - fs := fflib.NewFFLexer(input) - return j.UnmarshalJSONFFLexer(fs, fflib.FFParse_map_start) -} - -// UnmarshalJSONFFLexer fast json unmarshall - template ffjson -func (j *simpleGetCloser) UnmarshalJSONFFLexer(fs *fflib.FFLexer, state fflib.FFParseState) error { - var err error - currentKey := ffjtsimpleGetCloserbase - _ = currentKey - tok := fflib.FFTok_init - wantedTok := fflib.FFTok_init - -mainparse: - for { - tok = fs.Scan() - // println(fmt.Sprintf("debug: tok: %v state: %v", tok, state)) - if tok == fflib.FFTok_error { - goto tokerror - } - - switch state { - - case fflib.FFParse_map_start: - if tok != fflib.FFTok_left_bracket { - wantedTok = fflib.FFTok_left_bracket - goto wrongtokenerror - } - state = fflib.FFParse_want_key - continue - - case fflib.FFParse_after_value: - if tok == fflib.FFTok_comma { - state = fflib.FFParse_want_key - } else if tok == fflib.FFTok_right_bracket { - goto done - } else { - wantedTok = fflib.FFTok_comma - goto wrongtokenerror - } - - case fflib.FFParse_want_key: - // json {} ended. goto exit. woo. - if tok == fflib.FFTok_right_bracket { - goto done - } - if tok != fflib.FFTok_string { - wantedTok = fflib.FFTok_string - goto wrongtokenerror - } - - kn := fs.Output.Bytes() - if len(kn) <= 0 { - // "" case. hrm. - currentKey = ffjtsimpleGetClosernosuchkey - state = fflib.FFParse_want_colon - goto mainparse - } else { - switch kn[0] { - - } - - currentKey = ffjtsimpleGetClosernosuchkey - state = fflib.FFParse_want_colon - goto mainparse - } - - case fflib.FFParse_want_colon: - if tok != fflib.FFTok_colon { - wantedTok = fflib.FFTok_colon - goto wrongtokenerror - } - state = fflib.FFParse_want_value - continue - case fflib.FFParse_want_value: - - if tok == fflib.FFTok_left_brace || tok == fflib.FFTok_left_bracket || tok == fflib.FFTok_integer || tok == fflib.FFTok_double || tok == fflib.FFTok_string || tok == fflib.FFTok_bool || tok == fflib.FFTok_null { - switch currentKey { - - case ffjtsimpleGetClosernosuchkey: - err = fs.SkipField(tok) - if err != nil { - return fs.WrapErr(err) - } - state = fflib.FFParse_after_value - goto mainparse - } - } else { - goto wantedvalue - } - } - } - -wantedvalue: - return fs.WrapErr(fmt.Errorf("wanted value token, but got token: %v", tok)) -wrongtokenerror: - return fs.WrapErr(fmt.Errorf("ffjson: wanted token: %v, but got token: %v output=%s", wantedTok, tok, fs.Output.String())) -tokerror: - if fs.BigError != nil { - return fs.WrapErr(fs.BigError) - } - err = fs.Error.ToError() - if err != nil { - return fs.WrapErr(err) - } - panic("ffjson-generated: unreachable, please report bug.") -done: - - return nil -} diff --git a/vendor/github.com/containers/storage/lockfile.go b/vendor/github.com/containers/storage/lockfile.go deleted file mode 100644 index c1aa482f85..0000000000 --- a/vendor/github.com/containers/storage/lockfile.go +++ /dev/null @@ -1,82 +0,0 @@ -package storage - -import ( - "fmt" - "path/filepath" - "sync" - "time" - - "github.com/pkg/errors" -) - -// A Locker represents a file lock where the file is used to cache an -// identifier of the last party that made changes to whatever's being protected -// by the lock. -type Locker interface { - sync.Locker - - // Touch records, for others sharing the lock, that the caller was the - // last writer. It should only be called with the lock held. - Touch() error - - // Modified() checks if the most recent writer was a party other than the - // last recorded writer. It should only be called with the lock held. - Modified() (bool, error) - - // TouchedSince() checks if the most recent writer modified the file (likely using Touch()) after the specified time. - TouchedSince(when time.Time) bool - - // IsReadWrite() checks if the lock file is read-write - IsReadWrite() bool -} - -var ( - lockfiles map[string]Locker - lockfilesLock sync.Mutex -) - -// GetLockfile opens a read-write lock file, creating it if necessary. The -// Locker object it returns will be returned unlocked. -func GetLockfile(path string) (Locker, error) { - lockfilesLock.Lock() - defer lockfilesLock.Unlock() - if lockfiles == nil { - lockfiles = make(map[string]Locker) - } - cleanPath := filepath.Clean(path) - if locker, ok := lockfiles[cleanPath]; ok { - if !locker.IsReadWrite() { - return nil, errors.Wrapf(ErrLockReadOnly, "lock %q is a read-only lock", cleanPath) - } - return locker, nil - } - locker, err := getLockFile(path, false) // platform dependent locker - if err != nil { - return nil, err - } - lockfiles[filepath.Clean(path)] = locker - return locker, nil -} - -// GetROLockfile opens a read-only lock file. The Locker object it returns -// will be returned unlocked. -func GetROLockfile(path string) (Locker, error) { - lockfilesLock.Lock() - defer lockfilesLock.Unlock() - if lockfiles == nil { - lockfiles = make(map[string]Locker) - } - cleanPath := filepath.Clean(path) - if locker, ok := lockfiles[cleanPath]; ok { - if locker.IsReadWrite() { - return nil, fmt.Errorf("lock %q is a read-write lock", cleanPath) - } - return locker, nil - } - locker, err := getLockFile(path, true) // platform dependent locker - if err != nil { - return nil, err - } - lockfiles[filepath.Clean(path)] = locker - return locker, nil -} diff --git a/vendor/github.com/containers/storage/lockfile_darwin.go b/vendor/github.com/containers/storage/lockfile_darwin.go deleted file mode 100644 index 041d54c057..0000000000 --- a/vendor/github.com/containers/storage/lockfile_darwin.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build darwin freebsd - -package storage - -import ( - "time" - - "golang.org/x/sys/unix" -) - -func (l *lockfile) TouchedSince(when time.Time) bool { - st := unix.Stat_t{} - err := unix.Fstat(int(l.fd), &st) - if err != nil { - return true - } - touched := time.Unix(st.Mtimespec.Unix()) - return when.Before(touched) -} diff --git a/vendor/github.com/containers/storage/lockfile_linux.go b/vendor/github.com/containers/storage/lockfile_linux.go deleted file mode 100644 index 903387c661..0000000000 --- a/vendor/github.com/containers/storage/lockfile_linux.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build linux solaris - -package storage - -import ( - "time" - - "golang.org/x/sys/unix" -) - -// TouchedSince indicates if the lock file has been touched since the specified time -func (l *lockfile) TouchedSince(when time.Time) bool { - st := unix.Stat_t{} - err := unix.Fstat(int(l.fd), &st) - if err != nil { - return true - } - touched := time.Unix(st.Mtim.Unix()) - return when.Before(touched) -} diff --git a/vendor/github.com/containers/storage/lockfile_unix.go b/vendor/github.com/containers/storage/lockfile_unix.go deleted file mode 100644 index 6792592344..0000000000 --- a/vendor/github.com/containers/storage/lockfile_unix.go +++ /dev/null @@ -1,115 +0,0 @@ -// +build linux solaris darwin freebsd - -package storage - -import ( - "os" - "sync" - "time" - - "github.com/containers/storage/pkg/stringid" - "github.com/pkg/errors" - "golang.org/x/sys/unix" -) - -func getLockFile(path string, ro bool) (Locker, error) { - var fd int - var err error - if ro { - fd, err = unix.Open(path, os.O_RDONLY, 0) - } else { - fd, err = unix.Open(path, os.O_RDWR|os.O_CREATE, unix.S_IRUSR|unix.S_IWUSR) - } - if err != nil { - return nil, errors.Wrapf(err, "error opening %q", path) - } - unix.CloseOnExec(fd) - if ro { - return &lockfile{file: path, fd: uintptr(fd), lw: stringid.GenerateRandomID(), locktype: unix.F_RDLCK}, nil - } - return &lockfile{file: path, fd: uintptr(fd), lw: stringid.GenerateRandomID(), locktype: unix.F_WRLCK}, nil -} - -type lockfile struct { - mu sync.Mutex - file string - fd uintptr - lw string - locktype int16 -} - -// Lock locks the lock file -func (l *lockfile) Lock() { - lk := unix.Flock_t{ - Type: l.locktype, - Whence: int16(os.SEEK_SET), - Start: 0, - Len: 0, - Pid: int32(os.Getpid()), - } - l.mu.Lock() - for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil { - time.Sleep(10 * time.Millisecond) - } -} - -// Unlock unlocks the lock file -func (l *lockfile) Unlock() { - lk := unix.Flock_t{ - Type: unix.F_UNLCK, - Whence: int16(os.SEEK_SET), - Start: 0, - Len: 0, - Pid: int32(os.Getpid()), - } - for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil { - time.Sleep(10 * time.Millisecond) - } - l.mu.Unlock() -} - -// Touch updates the lock file with the UID of the user -func (l *lockfile) Touch() error { - l.lw = stringid.GenerateRandomID() - id := []byte(l.lw) - _, err := unix.Seek(int(l.fd), 0, os.SEEK_SET) - if err != nil { - return err - } - n, err := unix.Write(int(l.fd), id) - if err != nil { - return err - } - if n != len(id) { - return unix.ENOSPC - } - err = unix.Fsync(int(l.fd)) - if err != nil { - return err - } - return nil -} - -// Modified indicates if the lock file has been updated since the last time it was loaded -func (l *lockfile) Modified() (bool, error) { - id := []byte(l.lw) - _, err := unix.Seek(int(l.fd), 0, os.SEEK_SET) - if err != nil { - return true, err - } - n, err := unix.Read(int(l.fd), id) - if err != nil { - return true, err - } - if n != len(id) { - return true, unix.ENOSPC - } - lw := l.lw - l.lw = string(id) - return l.lw != lw, nil -} - -// IsRWLock indicates if the lock file is a read-write lock -func (l *lockfile) IsReadWrite() bool { - return (l.locktype == unix.F_WRLCK) -} diff --git a/vendor/github.com/containers/storage/lockfile_windows.go b/vendor/github.com/containers/storage/lockfile_windows.go deleted file mode 100644 index ed6c5c4b2b..0000000000 --- a/vendor/github.com/containers/storage/lockfile_windows.go +++ /dev/null @@ -1,40 +0,0 @@ -// +build windows - -package storage - -import ( - "os" - "sync" - "time" -) - -func getLockFile(path string, ro bool) (Locker, error) { - return &lockfile{}, nil -} - -type lockfile struct { - mu sync.Mutex - file string -} - -func (l *lockfile) Lock() { -} -func (l *lockfile) Unlock() { -} -func (l *lockfile) Modified() (bool, error) { - return false, nil -} -func (l *lockfile) Touch() error { - return nil -} -func (l *lockfile) IsReadWrite() bool { - return false -} - -func (l *lockfile) TouchedSince(when time.Time) bool { - stat, err := os.Stat(l.file) - if err != nil { - return true - } - return when.Before(stat.ModTime()) -} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go deleted file mode 100644 index abee36f7e4..0000000000 --- a/vendor/github.com/containers/storage/pkg/archive/archive.go +++ /dev/null @@ -1,1251 +0,0 @@ -package archive - -import ( - "archive/tar" - "bufio" - "bytes" - "compress/bzip2" - "compress/gzip" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" - "syscall" - - "github.com/containers/storage/pkg/fileutils" - "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/ioutils" - "github.com/containers/storage/pkg/pools" - "github.com/containers/storage/pkg/promise" - "github.com/containers/storage/pkg/system" - "github.com/sirupsen/logrus" -) - -type ( - // Compression is the state represents if compressed or not. - Compression int - // WhiteoutFormat is the format of whiteouts unpacked - WhiteoutFormat int - - // TarOptions wraps the tar options. - TarOptions struct { - IncludeFiles []string - ExcludePatterns []string - Compression Compression - NoLchown bool - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap - ChownOpts *idtools.IDPair - IncludeSourceDir bool - // WhiteoutFormat is the expected on disk format for whiteout files. - // This format will be converted to the standard format on pack - // and from the standard format on unpack. - WhiteoutFormat WhiteoutFormat - // When unpacking, specifies whether overwriting a directory with a - // non-directory is allowed and vice versa. - NoOverwriteDirNonDir bool - // For each include when creating an archive, the included name will be - // replaced with the matching name from this map. - RebaseNames map[string]string - InUserNS bool - } -) - -// Archiver allows the reuse of most utility functions of this package -// with a pluggable Untar function. Also, to facilitate the passing of -// specific id mappings for untar, an archiver can be created with maps -// which will then be passed to Untar operations -type Archiver struct { - Untar func(io.Reader, string, *TarOptions) error - IDMappings *idtools.IDMappings -} - -// NewDefaultArchiver returns a new Archiver without any IDMappings -func NewDefaultArchiver() *Archiver { - return &Archiver{Untar: Untar, IDMappings: &idtools.IDMappings{}} -} - -// breakoutError is used to differentiate errors related to breaking out -// When testing archive breakout in the unit tests, this error is expected -// in order for the test to pass. -type breakoutError error - -const ( - // Uncompressed represents the uncompressed. - Uncompressed Compression = iota - // Bzip2 is bzip2 compression algorithm. - Bzip2 - // Gzip is gzip compression algorithm. - Gzip - // Xz is xz compression algorithm. - Xz -) - -const ( - // AUFSWhiteoutFormat is the default format for whiteouts - AUFSWhiteoutFormat WhiteoutFormat = iota - // OverlayWhiteoutFormat formats whiteout according to the overlay - // standard. - OverlayWhiteoutFormat -) - -const ( - modeISDIR = 040000 // Directory - modeISFIFO = 010000 // FIFO - modeISREG = 0100000 // Regular file - modeISLNK = 0120000 // Symbolic link - modeISBLK = 060000 // Block special file - modeISCHR = 020000 // Character special file - modeISSOCK = 0140000 // Socket -) - -// IsArchivePath checks if the (possibly compressed) file at the given path -// starts with a tar file header. -func IsArchivePath(path string) bool { - file, err := os.Open(path) - if err != nil { - return false - } - defer file.Close() - rdr, err := DecompressStream(file) - if err != nil { - return false - } - r := tar.NewReader(rdr) - _, err = r.Next() - return err == nil -} - -// DetectCompression detects the compression algorithm of the source. -func DetectCompression(source []byte) Compression { - for compression, m := range map[Compression][]byte{ - Bzip2: {0x42, 0x5A, 0x68}, - Gzip: {0x1F, 0x8B, 0x08}, - Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, - } { - if len(source) < len(m) { - logrus.Debug("Len too short") - continue - } - if bytes.Equal(m, source[:len(m)]) { - return compression - } - } - return Uncompressed -} - -func xzDecompress(archive io.Reader) (io.ReadCloser, <-chan struct{}, error) { - args := []string{"xz", "-d", "-c", "-q"} - - return cmdStream(exec.Command(args[0], args[1:]...), archive) -} - -// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. -func DecompressStream(archive io.Reader) (io.ReadCloser, error) { - p := pools.BufioReader32KPool - buf := p.Get(archive) - bs, err := buf.Peek(10) - if err != nil && err != io.EOF { - // Note: we'll ignore any io.EOF error because there are some odd - // cases where the layer.tar file will be empty (zero bytes) and - // that results in an io.EOF from the Peek() call. So, in those - // cases we'll just treat it as a non-compressed stream and - // that means just create an empty layer. - // See Issue 18170 - return nil, err - } - - compression := DetectCompression(bs) - switch compression { - case Uncompressed: - readBufWrapper := p.NewReadCloserWrapper(buf, buf) - return readBufWrapper, nil - case Gzip: - gzReader, err := gzip.NewReader(buf) - if err != nil { - return nil, err - } - readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) - return readBufWrapper, nil - case Bzip2: - bz2Reader := bzip2.NewReader(buf) - readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) - return readBufWrapper, nil - case Xz: - xzReader, chdone, err := xzDecompress(buf) - if err != nil { - return nil, err - } - readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) - return ioutils.NewReadCloserWrapper(readBufWrapper, func() error { - <-chdone - return readBufWrapper.Close() - }), nil - default: - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - } -} - -// CompressStream compresses the dest with specified compression algorithm. -func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { - p := pools.BufioWriter32KPool - buf := p.Get(dest) - switch compression { - case Uncompressed: - writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) - return writeBufWrapper, nil - case Gzip: - gzWriter := gzip.NewWriter(dest) - writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) - return writeBufWrapper, nil - case Bzip2, Xz: - // archive/bzip2 does not support writing, and there is no xz support at all - // However, this is not a problem as docker only currently generates gzipped tars - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - default: - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - } -} - -// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to -// modify the contents or header of an entry in the archive. If the file already -// exists in the archive the TarModifierFunc will be called with the Header and -// a reader which will return the files content. If the file does not exist both -// header and content will be nil. -type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) - -// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the -// tar stream are modified if they match any of the keys in mods. -func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { - pipeReader, pipeWriter := io.Pipe() - - go func() { - tarReader := tar.NewReader(inputTarStream) - tarWriter := tar.NewWriter(pipeWriter) - defer inputTarStream.Close() - defer tarWriter.Close() - - modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { - header, data, err := modifier(name, original, tarReader) - switch { - case err != nil: - return err - case header == nil: - return nil - } - - header.Name = name - header.Size = int64(len(data)) - if err := tarWriter.WriteHeader(header); err != nil { - return err - } - if len(data) != 0 { - if _, err := tarWriter.Write(data); err != nil { - return err - } - } - return nil - } - - var err error - var originalHeader *tar.Header - for { - originalHeader, err = tarReader.Next() - if err == io.EOF { - break - } - if err != nil { - pipeWriter.CloseWithError(err) - return - } - - modifier, ok := mods[originalHeader.Name] - if !ok { - // No modifiers for this file, copy the header and data - if err := tarWriter.WriteHeader(originalHeader); err != nil { - pipeWriter.CloseWithError(err) - return - } - if _, err := pools.Copy(tarWriter, tarReader); err != nil { - pipeWriter.CloseWithError(err) - return - } - continue - } - delete(mods, originalHeader.Name) - - if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { - pipeWriter.CloseWithError(err) - return - } - } - - // Apply the modifiers that haven't matched any files in the archive - for name, modifier := range mods { - if err := modify(name, nil, modifier, nil); err != nil { - pipeWriter.CloseWithError(err) - return - } - } - - pipeWriter.Close() - - }() - return pipeReader -} - -// Extension returns the extension of a file that uses the specified compression algorithm. -func (compression *Compression) Extension() string { - switch *compression { - case Uncompressed: - return "tar" - case Bzip2: - return "tar.bz2" - case Gzip: - return "tar.gz" - case Xz: - return "tar.xz" - } - return "" -} - -// FileInfoHeader creates a populated Header from fi. -// Compared to archive pkg this function fills in more information. -// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), -// which have been deleted since Go 1.9 archive/tar. -func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { - hdr, err := tar.FileInfoHeader(fi, link) - if err != nil { - return nil, err - } - hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) - name, err = canonicalTarName(name, fi.IsDir()) - if err != nil { - return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err) - } - hdr.Name = name - if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { - return nil, err - } - return hdr, nil -} - -// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar -// https://github.com/golang/go/commit/66b5a2f -func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { - fm := fi.Mode() - switch { - case fm.IsRegular(): - mode |= modeISREG - case fi.IsDir(): - mode |= modeISDIR - case fm&os.ModeSymlink != 0: - mode |= modeISLNK - case fm&os.ModeDevice != 0: - if fm&os.ModeCharDevice != 0 { - mode |= modeISCHR - } else { - mode |= modeISBLK - } - case fm&os.ModeNamedPipe != 0: - mode |= modeISFIFO - case fm&os.ModeSocket != 0: - mode |= modeISSOCK - } - return mode -} - -// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem -// to a tar header -func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { - capability, _ := system.Lgetxattr(path, "security.capability") - if capability != nil { - hdr.Xattrs = make(map[string]string) - hdr.Xattrs["security.capability"] = string(capability) - } - return nil -} - -type tarWhiteoutConverter interface { - ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) - ConvertRead(*tar.Header, string) (bool, error) -} - -type tarAppender struct { - TarWriter *tar.Writer - Buffer *bufio.Writer - - // for hardlink mapping - SeenFiles map[uint64]string - IDMappings *idtools.IDMappings - ChownOpts *idtools.IDPair - - // For packing and unpacking whiteout files in the - // non standard format. The whiteout files defined - // by the AUFS standard are used as the tar whiteout - // standard. - WhiteoutConverter tarWhiteoutConverter -} - -func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair) *tarAppender { - return &tarAppender{ - SeenFiles: make(map[uint64]string), - TarWriter: tar.NewWriter(writer), - Buffer: pools.BufioWriter32KPool.Get(nil), - IDMappings: idMapping, - ChownOpts: chownOpts, - } -} - -// canonicalTarName provides a platform-independent and consistent posix-style -//path for files and directories to be archived regardless of the platform. -func canonicalTarName(name string, isDir bool) (string, error) { - name, err := CanonicalTarNameForPath(name) - if err != nil { - return "", err - } - - // suffix with '/' for directories - if isDir && !strings.HasSuffix(name, "/") { - name += "/" - } - return name, nil -} - -// addTarFile adds to the tar archive a file from `path` as `name` -func (ta *tarAppender) addTarFile(path, name string) error { - fi, err := os.Lstat(path) - if err != nil { - return err - } - - var link string - if fi.Mode()&os.ModeSymlink != 0 { - var err error - link, err = os.Readlink(path) - if err != nil { - return err - } - } - - hdr, err := FileInfoHeader(name, fi, link) - if err != nil { - return err - } - if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { - return err - } - - // if it's not a directory and has more than 1 link, - // it's hard linked, so set the type flag accordingly - if !fi.IsDir() && hasHardlinks(fi) { - inode, err := getInodeFromStat(fi.Sys()) - if err != nil { - return err - } - // a link should have a name that it links too - // and that linked name should be first in the tar archive - if oldpath, ok := ta.SeenFiles[inode]; ok { - hdr.Typeflag = tar.TypeLink - hdr.Linkname = oldpath - hdr.Size = 0 // This Must be here for the writer math to add up! - } else { - ta.SeenFiles[inode] = name - } - } - - //handle re-mapping container ID mappings back to host ID mappings before - //writing tar headers/files. We skip whiteout files because they were written - //by the kernel and already have proper ownership relative to the host - if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IDMappings.Empty() { - fileIDPair, err := getFileUIDGID(fi.Sys()) - if err != nil { - return err - } - hdr.Uid, hdr.Gid, err = ta.IDMappings.ToContainer(fileIDPair) - if err != nil { - return err - } - } - - // explicitly override with ChownOpts - if ta.ChownOpts != nil { - hdr.Uid = ta.ChownOpts.UID - hdr.Gid = ta.ChownOpts.GID - } - - if ta.WhiteoutConverter != nil { - wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) - if err != nil { - return err - } - - // If a new whiteout file exists, write original hdr, then - // replace hdr with wo to be written after. Whiteouts should - // always be written after the original. Note the original - // hdr may have been updated to be a whiteout with returning - // a whiteout header - if wo != nil { - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - return err - } - if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { - return fmt.Errorf("tar: cannot use whiteout for non-empty file") - } - hdr = wo - } - } - - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - return err - } - - if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { - // We use system.OpenSequential to ensure we use sequential file - // access on Windows to avoid depleting the standby list. - // On Linux, this equates to a regular os.Open. - file, err := system.OpenSequential(path) - if err != nil { - return err - } - - ta.Buffer.Reset(ta.TarWriter) - defer ta.Buffer.Reset(nil) - _, err = io.Copy(ta.Buffer, file) - file.Close() - if err != nil { - return err - } - err = ta.Buffer.Flush() - if err != nil { - return err - } - } - - return nil -} - -func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns bool) error { - // hdr.Mode is in linux format, which we can use for sycalls, - // but for os.Foo() calls we need the mode converted to os.FileMode, - // so use hdrInfo.Mode() (they differ for e.g. setuid bits) - hdrInfo := hdr.FileInfo() - - switch hdr.Typeflag { - case tar.TypeDir: - // Create directory unless it exists as a directory already. - // In that case we just want to merge the two - if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { - if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { - return err - } - } - - case tar.TypeReg, tar.TypeRegA: - // Source is regular file. We use system.OpenFileSequential to use sequential - // file access to avoid depleting the standby list on Windows. - // On Linux, this equates to a regular os.OpenFile - file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) - if err != nil { - return err - } - if _, err := io.Copy(file, reader); err != nil { - file.Close() - return err - } - file.Close() - - case tar.TypeBlock, tar.TypeChar: - if inUserns { // cannot create devices in a userns - return nil - } - // Handle this is an OS-specific way - if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { - return err - } - - case tar.TypeFifo: - // Handle this is an OS-specific way - if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { - return err - } - - case tar.TypeLink: - targetPath := filepath.Join(extractDir, hdr.Linkname) - // check for hardlink breakout - if !strings.HasPrefix(targetPath, extractDir) { - return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) - } - if err := os.Link(targetPath, path); err != nil { - return err - } - - case tar.TypeSymlink: - // path -> hdr.Linkname = targetPath - // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file - targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) - - // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because - // that symlink would first have to be created, which would be caught earlier, at this very check: - if !strings.HasPrefix(targetPath, extractDir) { - return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) - } - if err := os.Symlink(hdr.Linkname, path); err != nil { - return err - } - - case tar.TypeXGlobalHeader: - logrus.Debug("PAX Global Extended Headers found and ignored") - return nil - - default: - return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) - } - - // Lchown is not supported on Windows. - if Lchown && runtime.GOOS != "windows" { - if chownOpts == nil { - chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} - } - if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { - return err - } - } - - var errors []string - for key, value := range hdr.Xattrs { - if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { - if err == syscall.ENOTSUP { - // We ignore errors here because not all graphdrivers support - // xattrs *cough* old versions of AUFS *cough*. However only - // ENOTSUP should be emitted in that case, otherwise we still - // bail. - errors = append(errors, err.Error()) - continue - } - return err - } - - } - - if len(errors) > 0 { - logrus.WithFields(logrus.Fields{ - "errors": errors, - }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") - } - - // There is no LChmod, so ignore mode for symlink. Also, this - // must happen after chown, as that can modify the file mode - if err := handleLChmod(hdr, path, hdrInfo); err != nil { - return err - } - - aTime := hdr.AccessTime - if aTime.Before(hdr.ModTime) { - // Last access time should never be before last modified time. - aTime = hdr.ModTime - } - - // system.Chtimes doesn't support a NOFOLLOW flag atm - if hdr.Typeflag == tar.TypeLink { - if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { - if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { - return err - } - } - } else if hdr.Typeflag != tar.TypeSymlink { - if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { - return err - } - } else { - ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} - if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { - return err - } - } - return nil -} - -// Tar creates an archive from the directory at `path`, and returns it as a -// stream of bytes. -func Tar(path string, compression Compression) (io.ReadCloser, error) { - return TarWithOptions(path, &TarOptions{Compression: compression}) -} - -// TarWithOptions creates an archive from the directory at `path`, only including files whose relative -// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. -func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { - - // Fix the source path to work with long path names. This is a no-op - // on platforms other than Windows. - srcPath = fixVolumePathPrefix(srcPath) - - pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) - if err != nil { - return nil, err - } - - pipeReader, pipeWriter := io.Pipe() - - compressWriter, err := CompressStream(pipeWriter, options.Compression) - if err != nil { - return nil, err - } - - go func() { - ta := newTarAppender( - idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), - compressWriter, - options.ChownOpts, - ) - ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat) - - defer func() { - // Make sure to check the error on Close. - if err := ta.TarWriter.Close(); err != nil { - logrus.Errorf("Can't close tar writer: %s", err) - } - if err := compressWriter.Close(); err != nil { - logrus.Errorf("Can't close compress writer: %s", err) - } - if err := pipeWriter.Close(); err != nil { - logrus.Errorf("Can't close pipe writer: %s", err) - } - }() - - // this buffer is needed for the duration of this piped stream - defer pools.BufioWriter32KPool.Put(ta.Buffer) - - // In general we log errors here but ignore them because - // during e.g. a diff operation the container can continue - // mutating the filesystem and we can see transient errors - // from this - - stat, err := os.Lstat(srcPath) - if err != nil { - return - } - - if !stat.IsDir() { - // We can't later join a non-dir with any includes because the - // 'walk' will error if "file/." is stat-ed and "file" is not a - // directory. So, we must split the source path and use the - // basename as the include. - if len(options.IncludeFiles) > 0 { - logrus.Warn("Tar: Can't archive a file with includes") - } - - dir, base := SplitPathDirEntry(srcPath) - srcPath = dir - options.IncludeFiles = []string{base} - } - - if len(options.IncludeFiles) == 0 { - options.IncludeFiles = []string{"."} - } - - seen := make(map[string]bool) - - for _, include := range options.IncludeFiles { - rebaseName := options.RebaseNames[include] - - walkRoot := getWalkRoot(srcPath, include) - filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { - if err != nil { - logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) - return nil - } - - relFilePath, err := filepath.Rel(srcPath, filePath) - if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { - // Error getting relative path OR we are looking - // at the source directory path. Skip in both situations. - return nil - } - - if options.IncludeSourceDir && include == "." && relFilePath != "." { - relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) - } - - skip := false - - // If "include" is an exact match for the current file - // then even if there's an "excludePatterns" pattern that - // matches it, don't skip it. IOW, assume an explicit 'include' - // is asking for that file no matter what - which is true - // for some files, like .dockerignore and Dockerfile (sometimes) - if include != relFilePath { - skip, err = pm.Matches(relFilePath) - if err != nil { - logrus.Errorf("Error matching %s: %v", relFilePath, err) - return err - } - } - - if skip { - // If we want to skip this file and its a directory - // then we should first check to see if there's an - // excludes pattern (e.g. !dir/file) that starts with this - // dir. If so then we can't skip this dir. - - // Its not a dir then so we can just return/skip. - if !f.IsDir() { - return nil - } - - // No exceptions (!...) in patterns so just skip dir - if !pm.Exclusions() { - return filepath.SkipDir - } - - dirSlash := relFilePath + string(filepath.Separator) - - for _, pat := range pm.Patterns() { - if !pat.Exclusion() { - continue - } - if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { - // found a match - so can't skip this dir - return nil - } - } - - // No matching exclusion dir so just skip dir - return filepath.SkipDir - } - - if seen[relFilePath] { - return nil - } - seen[relFilePath] = true - - // Rename the base resource. - if rebaseName != "" { - var replacement string - if rebaseName != string(filepath.Separator) { - // Special case the root directory to replace with an - // empty string instead so that we don't end up with - // double slashes in the paths. - replacement = rebaseName - } - - relFilePath = strings.Replace(relFilePath, include, replacement, 1) - } - - if err := ta.addTarFile(filePath, relFilePath); err != nil { - logrus.Errorf("Can't add file %s to tar: %s", filePath, err) - // if pipe is broken, stop writing tar stream to it - if err == io.ErrClosedPipe { - return err - } - } - return nil - }) - } - }() - - return pipeReader, nil -} - -// Unpack unpacks the decompressedArchive to dest with options. -func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { - tr := tar.NewReader(decompressedArchive) - trBuf := pools.BufioReader32KPool.Get(nil) - defer pools.BufioReader32KPool.Put(trBuf) - - var dirs []*tar.Header - idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) - rootIDs := idMappings.RootPair() - whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat) - - // Iterate through the files in the archive. -loop: - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - return err - } - - // Normalize name, for safety and for a simple is-root check - // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: - // This keeps "..\" as-is, but normalizes "\..\" to "\". - hdr.Name = filepath.Clean(hdr.Name) - - for _, exclude := range options.ExcludePatterns { - if strings.HasPrefix(hdr.Name, exclude) { - continue loop - } - } - - // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in - // the filepath format for the OS on which the daemon is running. Hence - // the check for a slash-suffix MUST be done in an OS-agnostic way. - if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { - // Not the root directory, ensure that the parent directory exists - parent := filepath.Dir(hdr.Name) - parentPath := filepath.Join(dest, parent) - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = idtools.MkdirAllAndChownNew(parentPath, 0777, rootIDs) - if err != nil { - return err - } - } - } - - path := filepath.Join(dest, hdr.Name) - rel, err := filepath.Rel(dest, path) - if err != nil { - return err - } - if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { - return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) - } - - // If path exits we almost always just want to remove and replace it - // The only exception is when it is a directory *and* the file from - // the layer is also a directory. Then we want to merge them (i.e. - // just apply the metadata from the layer). - if fi, err := os.Lstat(path); err == nil { - if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { - // If NoOverwriteDirNonDir is true then we cannot replace - // an existing directory with a non-directory from the archive. - return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) - } - - if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { - // If NoOverwriteDirNonDir is true then we cannot replace - // an existing non-directory with a directory from the archive. - return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) - } - - if fi.IsDir() && hdr.Name == "." { - continue - } - - if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { - if err := os.RemoveAll(path); err != nil { - return err - } - } - } - trBuf.Reset(tr) - - if err := remapIDs(idMappings, hdr); err != nil { - return err - } - - if whiteoutConverter != nil { - writeFile, err := whiteoutConverter.ConvertRead(hdr, path) - if err != nil { - return err - } - if !writeFile { - continue - } - } - - if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { - return err - } - - // Directory mtimes must be handled at the end to avoid further - // file creation in them to modify the directory mtime - if hdr.Typeflag == tar.TypeDir { - dirs = append(dirs, hdr) - } - } - - for _, hdr := range dirs { - path := filepath.Join(dest, hdr.Name) - - if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { - return err - } - } - return nil -} - -// Untar reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive may be compressed with one of the following algorithms: -// identity (uncompressed), gzip, bzip2, xz. -// FIXME: specify behavior when target path exists vs. doesn't exist. -func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { - return untarHandler(tarArchive, dest, options, true) -} - -// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive must be an uncompressed stream. -func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { - return untarHandler(tarArchive, dest, options, false) -} - -// Handler for teasing out the automatic decompression -func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { - if tarArchive == nil { - return fmt.Errorf("Empty archive") - } - dest = filepath.Clean(dest) - if options == nil { - options = &TarOptions{} - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - - r := tarArchive - if decompress { - decompressedArchive, err := DecompressStream(tarArchive) - if err != nil { - return err - } - defer decompressedArchive.Close() - r = decompressedArchive - } - - return Unpack(r, dest, options) -} - -// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. -// If either Tar or Untar fails, TarUntar aborts and returns the error. -func (archiver *Archiver) TarUntar(src, dst string) error { - logrus.Debugf("TarUntar(%s %s)", src, dst) - archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) - if err != nil { - return err - } - defer archive.Close() - options := &TarOptions{ - UIDMaps: archiver.IDMappings.UIDs(), - GIDMaps: archiver.IDMappings.GIDs(), - } - return archiver.Untar(archive, dst, options) -} - -// UntarPath untar a file from path to a destination, src is the source tar file path. -func (archiver *Archiver) UntarPath(src, dst string) error { - archive, err := os.Open(src) - if err != nil { - return err - } - defer archive.Close() - options := &TarOptions{ - UIDMaps: archiver.IDMappings.UIDs(), - GIDMaps: archiver.IDMappings.GIDs(), - } - return archiver.Untar(archive, dst, options) -} - -// CopyWithTar creates a tar archive of filesystem path `src`, and -// unpacks it at filesystem path `dst`. -// The archive is streamed directly with fixed buffering and no -// intermediary disk IO. -func (archiver *Archiver) CopyWithTar(src, dst string) error { - srcSt, err := os.Stat(src) - if err != nil { - return err - } - if !srcSt.IsDir() { - return archiver.CopyFileWithTar(src, dst) - } - - // if this archiver is set up with ID mapping we need to create - // the new destination directory with the remapped root UID/GID pair - // as owner - rootIDs := archiver.IDMappings.RootPair() - // Create dst, copy src's content into it - logrus.Debugf("Creating dest directory: %s", dst) - if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { - return err - } - logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) - return archiver.TarUntar(src, dst) -} - -// CopyFileWithTar emulates the behavior of the 'cp' command-line -// for a single file. It copies a regular file from path `src` to -// path `dst`, and preserves all its metadata. -func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { - logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) - srcSt, err := os.Stat(src) - if err != nil { - return err - } - - if srcSt.IsDir() { - return fmt.Errorf("Can't copy a directory") - } - - // Clean up the trailing slash. This must be done in an operating - // system specific manner. - if dst[len(dst)-1] == os.PathSeparator { - dst = filepath.Join(dst, filepath.Base(src)) - } - // Create the holding directory if necessary - if err := system.MkdirAll(filepath.Dir(dst), 0700, ""); err != nil { - return err - } - - r, w := io.Pipe() - errC := promise.Go(func() error { - defer w.Close() - - srcF, err := os.Open(src) - if err != nil { - return err - } - defer srcF.Close() - - hdr, err := tar.FileInfoHeader(srcSt, "") - if err != nil { - return err - } - hdr.Name = filepath.Base(dst) - hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) - - if err := remapIDs(archiver.IDMappings, hdr); err != nil { - return err - } - - tw := tar.NewWriter(w) - defer tw.Close() - if err := tw.WriteHeader(hdr); err != nil { - return err - } - if _, err := io.Copy(tw, srcF); err != nil { - return err - } - return nil - }) - defer func() { - if er := <-errC; err == nil && er != nil { - err = er - } - }() - - err = archiver.Untar(r, filepath.Dir(dst), nil) - if err != nil { - r.CloseWithError(err) - } - return err -} - -func remapIDs(idMappings *idtools.IDMappings, hdr *tar.Header) error { - ids, err := idMappings.ToHost(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}) - hdr.Uid, hdr.Gid = ids.UID, ids.GID - return err -} - -// cmdStream executes a command, and returns its stdout as a stream. -// If the command fails to run or doesn't complete successfully, an error -// will be returned, including anything written on stderr. -func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{}, error) { - chdone := make(chan struct{}) - cmd.Stdin = input - pipeR, pipeW := io.Pipe() - cmd.Stdout = pipeW - var errBuf bytes.Buffer - cmd.Stderr = &errBuf - - // Run the command and return the pipe - if err := cmd.Start(); err != nil { - return nil, nil, err - } - - // Copy stdout to the returned pipe - go func() { - if err := cmd.Wait(); err != nil { - pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) - } else { - pipeW.Close() - } - close(chdone) - }() - - return pipeR, chdone, nil -} - -// NewTempArchive reads the content of src into a temporary file, and returns the contents -// of that file as an archive. The archive can only be read once - as soon as reading completes, -// the file will be deleted. -func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { - f, err := ioutil.TempFile(dir, "") - if err != nil { - return nil, err - } - if _, err := io.Copy(f, src); err != nil { - return nil, err - } - if _, err := f.Seek(0, 0); err != nil { - return nil, err - } - st, err := f.Stat() - if err != nil { - return nil, err - } - size := st.Size() - return &TempArchive{File: f, Size: size}, nil -} - -// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, -// the file will be deleted. -type TempArchive struct { - *os.File - Size int64 // Pre-computed from Stat().Size() as a convenience - read int64 - closed bool -} - -// Close closes the underlying file if it's still open, or does a no-op -// to allow callers to try to close the TempArchive multiple times safely. -func (archive *TempArchive) Close() error { - if archive.closed { - return nil - } - - archive.closed = true - - return archive.File.Close() -} - -func (archive *TempArchive) Read(data []byte) (int, error) { - n, err := archive.File.Read(data) - archive.read += int64(n) - if err != nil || archive.read == archive.Size { - archive.Close() - os.Remove(archive.File.Name()) - } - return n, err -} - -// IsArchive checks for the magic bytes of a tar or any supported compression -// algorithm. -func IsArchive(header []byte) bool { - compression := DetectCompression(header) - if compression != Uncompressed { - return true - } - r := tar.NewReader(bytes.NewBuffer(header)) - _, err := r.Next() - return err == nil -} - -// UntarPath is a convenience function which looks for an archive -// at filesystem path `src`, and unpacks it at `dst`. -func UntarPath(src, dst string) error { - return NewDefaultArchiver().UntarPath(src, dst) -} - -const ( - // HeaderSize is the size in bytes of a tar header - HeaderSize = 512 -) diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go b/vendor/github.com/containers/storage/pkg/archive/archive_linux.go deleted file mode 100644 index 5a14eb91a9..0000000000 --- a/vendor/github.com/containers/storage/pkg/archive/archive_linux.go +++ /dev/null @@ -1,92 +0,0 @@ -package archive - -import ( - "archive/tar" - "os" - "path/filepath" - "strings" - - "github.com/containers/storage/pkg/system" - "golang.org/x/sys/unix" -) - -func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { - if format == OverlayWhiteoutFormat { - return overlayWhiteoutConverter{} - } - return nil -} - -type overlayWhiteoutConverter struct{} - -func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { - // convert whiteouts to AUFS format - if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { - // we just rename the file and make it normal - dir, filename := filepath.Split(hdr.Name) - hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) - hdr.Mode = 0600 - hdr.Typeflag = tar.TypeReg - hdr.Size = 0 - } - - if fi.Mode()&os.ModeDir != 0 { - // convert opaque dirs to AUFS format by writing an empty file with the prefix - opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") - if err != nil { - return nil, err - } - if len(opaque) == 1 && opaque[0] == 'y' { - if hdr.Xattrs != nil { - delete(hdr.Xattrs, "trusted.overlay.opaque") - } - - // create a header for the whiteout file - // it should inherit some properties from the parent, but be a regular file - wo = &tar.Header{ - Typeflag: tar.TypeReg, - Mode: hdr.Mode & int64(os.ModePerm), - Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), - Size: 0, - Uid: hdr.Uid, - Uname: hdr.Uname, - Gid: hdr.Gid, - Gname: hdr.Gname, - AccessTime: hdr.AccessTime, - ChangeTime: hdr.ChangeTime, - } - } - } - - return -} - -func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { - base := filepath.Base(path) - dir := filepath.Dir(path) - - // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay - if base == WhiteoutOpaqueDir { - err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0) - // don't write the file itself - return false, err - } - - // if a file was deleted and we are using overlay, we need to create a character device - if strings.HasPrefix(base, WhiteoutPrefix) { - originalBase := base[len(WhiteoutPrefix):] - originalPath := filepath.Join(dir, originalBase) - - if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { - return false, err - } - if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { - return false, err - } - - // don't write the file itself - return false, nil - } - - return true, nil -} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_other.go b/vendor/github.com/containers/storage/pkg/archive/archive_other.go deleted file mode 100644 index 54acbf2856..0000000000 --- a/vendor/github.com/containers/storage/pkg/archive/archive_other.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !linux - -package archive - -func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go b/vendor/github.com/containers/storage/pkg/archive/archive_unix.go deleted file mode 100644 index bdc1a3d794..0000000000 --- a/vendor/github.com/containers/storage/pkg/archive/archive_unix.go +++ /dev/null @@ -1,122 +0,0 @@ -// +build !windows - -package archive - -import ( - "archive/tar" - "errors" - "os" - "path/filepath" - "syscall" - - "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/system" - rsystem "github.com/opencontainers/runc/libcontainer/system" - "golang.org/x/sys/unix" -) - -// fixVolumePathPrefix does platform specific processing to ensure that if -// the path being passed in is not in a volume path format, convert it to one. -func fixVolumePathPrefix(srcPath string) string { - return srcPath -} - -// getWalkRoot calculates the root path when performing a TarWithOptions. -// We use a separate function as this is platform specific. On Linux, we -// can't use filepath.Join(srcPath,include) because this will clean away -// a trailing "." or "/" which may be important. -func getWalkRoot(srcPath string, include string) string { - return srcPath + string(filepath.Separator) + include -} - -// CanonicalTarNameForPath returns platform-specific filepath -// to canonical posix-style path for tar archival. p is relative -// path. -func CanonicalTarNameForPath(p string) (string, error) { - return p, nil // already unix-style -} - -// chmodTarEntry is used to adjust the file permissions used in tar header based -// on the platform the archival is done. - -func chmodTarEntry(perm os.FileMode) os.FileMode { - return perm // noop for unix as golang APIs provide perm bits correctly -} - -func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { - s, ok := stat.(*syscall.Stat_t) - - if ok { - // Currently go does not fill in the major/minors - if s.Mode&unix.S_IFBLK != 0 || - s.Mode&unix.S_IFCHR != 0 { - hdr.Devmajor = int64(major(uint64(s.Rdev))) // nolint: unconvert - hdr.Devminor = int64(minor(uint64(s.Rdev))) // nolint: unconvert - } - } - - return -} - -func getInodeFromStat(stat interface{}) (inode uint64, err error) { - s, ok := stat.(*syscall.Stat_t) - - if ok { - inode = s.Ino - } - - return -} - -func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { - s, ok := stat.(*syscall.Stat_t) - - if !ok { - return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t") - } - return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil -} - -func major(device uint64) uint64 { - return (device >> 8) & 0xfff -} - -func minor(device uint64) uint64 { - return (device & 0xff) | ((device >> 12) & 0xfff00) -} - -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo -func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - if rsystem.RunningInUserNS() { - // cannot create a device if running in user namespace - return nil - } - - mode := uint32(hdr.Mode & 07777) - switch hdr.Typeflag { - case tar.TypeBlock: - mode |= unix.S_IFBLK - case tar.TypeChar: - mode |= unix.S_IFCHR - case tar.TypeFifo: - mode |= unix.S_IFIFO - } - - return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))) -} - -func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { - if hdr.Typeflag == tar.TypeLink { - if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { - if err := os.Chmod(path, hdrInfo.Mode()); err != nil { - return err - } - } - } else if hdr.Typeflag != tar.TypeSymlink { - if err := os.Chmod(path, hdrInfo.Mode()); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go b/vendor/github.com/containers/storage/pkg/archive/archive_windows.go deleted file mode 100644 index 0bcbb925d2..0000000000 --- a/vendor/github.com/containers/storage/pkg/archive/archive_windows.go +++ /dev/null @@ -1,79 +0,0 @@ -// +build windows - -package archive - -import ( - "archive/tar" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/longpath" -) - -// fixVolumePathPrefix does platform specific processing to ensure that if -// the path being passed in is not in a volume path format, convert it to one. -func fixVolumePathPrefix(srcPath string) string { - return longpath.AddPrefix(srcPath) -} - -// getWalkRoot calculates the root path when performing a TarWithOptions. -// We use a separate function as this is platform specific. -func getWalkRoot(srcPath string, include string) string { - return filepath.Join(srcPath, include) -} - -// CanonicalTarNameForPath returns platform-specific filepath -// to canonical posix-style path for tar archival. p is relative -// path. -func CanonicalTarNameForPath(p string) (string, error) { - // windows: convert windows style relative path with backslashes - // into forward slashes. Since windows does not allow '/' or '\' - // in file names, it is mostly safe to replace however we must - // check just in case - if strings.Contains(p, "/") { - return "", fmt.Errorf("Windows path contains forward slash: %s", p) - } - return strings.Replace(p, string(os.PathSeparator), "/", -1), nil - -} - -// chmodTarEntry is used to adjust the file permissions used in tar header based -// on the platform the archival is done. -func chmodTarEntry(perm os.FileMode) os.FileMode { - //perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.) - permPart := perm & os.ModePerm - noPermPart := perm &^ os.ModePerm - // Add the x bit: make everything +x from windows - permPart |= 0111 - permPart &= 0755 - - return noPermPart | permPart -} - -func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { - // do nothing. no notion of Rdev, Nlink in stat on Windows - return -} - -func getInodeFromStat(stat interface{}) (inode uint64, err error) { - // do nothing. no notion of Inode in stat on Windows - return -} - -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo -func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - return nil -} - -func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { - return nil -} - -func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { - // no notion of file ownership mapping yet on Windows - return idtools.IDPair{0, 0}, nil -} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes.go b/vendor/github.com/containers/storage/pkg/archive/changes.go deleted file mode 100644 index 6ba4b8ec6f..0000000000 --- a/vendor/github.com/containers/storage/pkg/archive/changes.go +++ /dev/null @@ -1,441 +0,0 @@ -package archive - -import ( - "archive/tar" - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "sort" - "strings" - "syscall" - "time" - - "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/pools" - "github.com/containers/storage/pkg/system" - "github.com/sirupsen/logrus" -) - -// ChangeType represents the change type. -type ChangeType int - -const ( - // ChangeModify represents the modify operation. - ChangeModify = iota - // ChangeAdd represents the add operation. - ChangeAdd - // ChangeDelete represents the delete operation. - ChangeDelete -) - -func (c ChangeType) String() string { - switch c { - case ChangeModify: - return "C" - case ChangeAdd: - return "A" - case ChangeDelete: - return "D" - } - return "" -} - -// Change represents a change, it wraps the change type and path. -// It describes changes of the files in the path respect to the -// parent layers. The change could be modify, add, delete. -// This is used for layer diff. -type Change struct { - Path string - Kind ChangeType -} - -func (change *Change) String() string { - return fmt.Sprintf("%s %s", change.Kind, change.Path) -} - -// for sort.Sort -type changesByPath []Change - -func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } -func (c changesByPath) Len() int { return len(c) } -func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } - -// Gnu tar and the go tar writer don't have sub-second mtime -// precision, which is problematic when we apply changes via tar -// files, we handle this by comparing for exact times, *or* same -// second count and either a or b having exactly 0 nanoseconds -func sameFsTime(a, b time.Time) bool { - return a == b || - (a.Unix() == b.Unix() && - (a.Nanosecond() == 0 || b.Nanosecond() == 0)) -} - -func sameFsTimeSpec(a, b syscall.Timespec) bool { - return a.Sec == b.Sec && - (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) -} - -// Changes walks the path rw and determines changes for the files in the path, -// with respect to the parent layers -func Changes(layers []string, rw string) ([]Change, error) { - return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip) -} - -func aufsMetadataSkip(path string) (skip bool, err error) { - skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path) - if err != nil { - skip = true - } - return -} - -func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { - f := filepath.Base(path) - - // If there is a whiteout, then the file was removed - if strings.HasPrefix(f, WhiteoutPrefix) { - originalFile := f[len(WhiteoutPrefix):] - return filepath.Join(filepath.Dir(path), originalFile), nil - } - - return "", nil -} - -type skipChange func(string) (bool, error) -type deleteChange func(string, string, os.FileInfo) (string, error) - -func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) { - var ( - changes []Change - changedDirs = make(map[string]struct{}) - ) - - err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - path, err = filepath.Rel(rw, path) - if err != nil { - return err - } - - // As this runs on the daemon side, file paths are OS specific. - path = filepath.Join(string(os.PathSeparator), path) - - // Skip root - if path == string(os.PathSeparator) { - return nil - } - - if sc != nil { - if skip, err := sc(path); skip { - return err - } - } - - change := Change{ - Path: path, - } - - deletedFile, err := dc(rw, path, f) - if err != nil { - return err - } - - // Find out what kind of modification happened - if deletedFile != "" { - change.Path = deletedFile - change.Kind = ChangeDelete - } else { - // Otherwise, the file was added - change.Kind = ChangeAdd - - // ...Unless it already existed in a top layer, in which case, it's a modification - for _, layer := range layers { - stat, err := os.Stat(filepath.Join(layer, path)) - if err != nil && !os.IsNotExist(err) { - return err - } - if err == nil { - // The file existed in the top layer, so that's a modification - - // However, if it's a directory, maybe it wasn't actually modified. - // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar - if stat.IsDir() && f.IsDir() { - if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { - // Both directories are the same, don't record the change - return nil - } - } - change.Kind = ChangeModify - break - } - } - } - - // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. - // This block is here to ensure the change is recorded even if the - // modify time, mode and size of the parent directory in the rw and ro layers are all equal. - // Check https://github.com/docker/docker/pull/13590 for details. - if f.IsDir() { - changedDirs[path] = struct{}{} - } - if change.Kind == ChangeAdd || change.Kind == ChangeDelete { - parent := filepath.Dir(path) - if _, ok := changedDirs[parent]; !ok && parent != "/" { - changes = append(changes, Change{Path: parent, Kind: ChangeModify}) - changedDirs[parent] = struct{}{} - } - } - - // Record change - changes = append(changes, change) - return nil - }) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - return changes, nil -} - -// FileInfo describes the information of a file. -type FileInfo struct { - parent *FileInfo - name string - stat *system.StatT - children map[string]*FileInfo - capability []byte - added bool -} - -// LookUp looks up the file information of a file. -func (info *FileInfo) LookUp(path string) *FileInfo { - // As this runs on the daemon side, file paths are OS specific. - parent := info - if path == string(os.PathSeparator) { - return info - } - - pathElements := strings.Split(path, string(os.PathSeparator)) - for _, elem := range pathElements { - if elem != "" { - child := parent.children[elem] - if child == nil { - return nil - } - parent = child - } - } - return parent -} - -func (info *FileInfo) path() string { - if info.parent == nil { - // As this runs on the daemon side, file paths are OS specific. - return string(os.PathSeparator) - } - return filepath.Join(info.parent.path(), info.name) -} - -func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { - - sizeAtEntry := len(*changes) - - if oldInfo == nil { - // add - change := Change{ - Path: info.path(), - Kind: ChangeAdd, - } - *changes = append(*changes, change) - info.added = true - } - - // We make a copy so we can modify it to detect additions - // also, we only recurse on the old dir if the new info is a directory - // otherwise any previous delete/change is considered recursive - oldChildren := make(map[string]*FileInfo) - if oldInfo != nil && info.isDir() { - for k, v := range oldInfo.children { - oldChildren[k] = v - } - } - - for name, newChild := range info.children { - oldChild := oldChildren[name] - if oldChild != nil { - // change? - oldStat := oldChild.stat - newStat := newChild.stat - // Note: We can't compare inode or ctime or blocksize here, because these change - // when copying a file into a container. However, that is not generally a problem - // because any content change will change mtime, and any status change should - // be visible when actually comparing the stat fields. The only time this - // breaks down is if some code intentionally hides a change by setting - // back mtime - if statDifferent(oldStat, newStat) || - !bytes.Equal(oldChild.capability, newChild.capability) { - change := Change{ - Path: newChild.path(), - Kind: ChangeModify, - } - *changes = append(*changes, change) - newChild.added = true - } - - // Remove from copy so we can detect deletions - delete(oldChildren, name) - } - - newChild.addChanges(oldChild, changes) - } - for _, oldChild := range oldChildren { - // delete - change := Change{ - Path: oldChild.path(), - Kind: ChangeDelete, - } - *changes = append(*changes, change) - } - - // If there were changes inside this directory, we need to add it, even if the directory - // itself wasn't changed. This is needed to properly save and restore filesystem permissions. - // As this runs on the daemon side, file paths are OS specific. - if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { - change := Change{ - Path: info.path(), - Kind: ChangeModify, - } - // Let's insert the directory entry before the recently added entries located inside this dir - *changes = append(*changes, change) // just to resize the slice, will be overwritten - copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) - (*changes)[sizeAtEntry] = change - } - -} - -// Changes add changes to file information. -func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { - var changes []Change - - info.addChanges(oldInfo, &changes) - - return changes -} - -func newRootFileInfo() *FileInfo { - // As this runs on the daemon side, file paths are OS specific. - root := &FileInfo{ - name: string(os.PathSeparator), - children: make(map[string]*FileInfo), - } - return root -} - -// ChangesDirs compares two directories and generates an array of Change objects describing the changes. -// If oldDir is "", then all files in newDir will be Add-Changes. -func ChangesDirs(newDir, oldDir string) ([]Change, error) { - var ( - oldRoot, newRoot *FileInfo - ) - if oldDir == "" { - emptyDir, err := ioutil.TempDir("", "empty") - if err != nil { - return nil, err - } - defer os.Remove(emptyDir) - oldDir = emptyDir - } - oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) - if err != nil { - return nil, err - } - - return newRoot.Changes(oldRoot), nil -} - -// ChangesSize calculates the size in bytes of the provided changes, based on newDir. -func ChangesSize(newDir string, changes []Change) int64 { - var ( - size int64 - sf = make(map[uint64]struct{}) - ) - for _, change := range changes { - if change.Kind == ChangeModify || change.Kind == ChangeAdd { - file := filepath.Join(newDir, change.Path) - fileInfo, err := os.Lstat(file) - if err != nil { - logrus.Errorf("Can not stat %q: %s", file, err) - continue - } - - if fileInfo != nil && !fileInfo.IsDir() { - if hasHardlinks(fileInfo) { - inode := getIno(fileInfo) - if _, ok := sf[inode]; !ok { - size += fileInfo.Size() - sf[inode] = struct{}{} - } - } else { - size += fileInfo.Size() - } - } - } - } - return size -} - -// ExportChanges produces an Archive from the provided changes, relative to dir. -func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) { - reader, writer := io.Pipe() - go func() { - ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil) - - // this buffer is needed for the duration of this piped stream - defer pools.BufioWriter32KPool.Put(ta.Buffer) - - sort.Sort(changesByPath(changes)) - - // In general we log errors here but ignore them because - // during e.g. a diff operation the container can continue - // mutating the filesystem and we can see transient errors - // from this - for _, change := range changes { - if change.Kind == ChangeDelete { - whiteOutDir := filepath.Dir(change.Path) - whiteOutBase := filepath.Base(change.Path) - whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) - timestamp := time.Now() - hdr := &tar.Header{ - Name: whiteOut[1:], - Size: 0, - ModTime: timestamp, - AccessTime: timestamp, - ChangeTime: timestamp, - } - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - logrus.Debugf("Can't write whiteout header: %s", err) - } - } else { - path := filepath.Join(dir, change.Path) - if err := ta.addTarFile(path, change.Path[1:]); err != nil { - logrus.Debugf("Can't add file %s to tar: %s", path, err) - } - } - } - - // Make sure to check the error on Close. - if err := ta.TarWriter.Close(); err != nil { - logrus.Debugf("Can't close layer: %s", err) - } - if err := writer.Close(); err != nil { - logrus.Debugf("failed close Changes writer: %s", err) - } - }() - return reader, nil -} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go b/vendor/github.com/containers/storage/pkg/archive/changes_linux.go deleted file mode 100644 index 90c9a627e5..0000000000 --- a/vendor/github.com/containers/storage/pkg/archive/changes_linux.go +++ /dev/null @@ -1,313 +0,0 @@ -package archive - -import ( - "bytes" - "fmt" - "os" - "path/filepath" - "sort" - "syscall" - "unsafe" - - "github.com/containers/storage/pkg/system" - "golang.org/x/sys/unix" -) - -// walker is used to implement collectFileInfoForChanges on linux. Where this -// method in general returns the entire contents of two directory trees, we -// optimize some FS calls out on linux. In particular, we take advantage of the -// fact that getdents(2) returns the inode of each file in the directory being -// walked, which, when walking two trees in parallel to generate a list of -// changes, can be used to prune subtrees without ever having to lstat(2) them -// directly. Eliminating stat calls in this way can save up to seconds on large -// images. -type walker struct { - dir1 string - dir2 string - root1 *FileInfo - root2 *FileInfo -} - -// collectFileInfoForChanges returns a complete representation of the trees -// rooted at dir1 and dir2, with one important exception: any subtree or -// leaf where the inode and device numbers are an exact match between dir1 -// and dir2 will be pruned from the results. This method is *only* to be used -// to generating a list of changes between the two directories, as it does not -// reflect the full contents. -func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { - w := &walker{ - dir1: dir1, - dir2: dir2, - root1: newRootFileInfo(), - root2: newRootFileInfo(), - } - - i1, err := os.Lstat(w.dir1) - if err != nil { - return nil, nil, err - } - i2, err := os.Lstat(w.dir2) - if err != nil { - return nil, nil, err - } - - if err := w.walk("/", i1, i2); err != nil { - return nil, nil, err - } - - return w.root1, w.root2, nil -} - -// Given a FileInfo, its path info, and a reference to the root of the tree -// being constructed, register this file with the tree. -func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { - if fi == nil { - return nil - } - parent := root.LookUp(filepath.Dir(path)) - if parent == nil { - return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path) - } - info := &FileInfo{ - name: filepath.Base(path), - children: make(map[string]*FileInfo), - parent: parent, - } - cpath := filepath.Join(dir, path) - stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) - if err != nil { - return err - } - info.stat = stat - info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access - parent.children[info.name] = info - return nil -} - -// Walk a subtree rooted at the same path in both trees being iterated. For -// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d -func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { - // Register these nodes with the return trees, unless we're still at the - // (already-created) roots: - if path != "/" { - if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { - return err - } - if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { - return err - } - } - - is1Dir := i1 != nil && i1.IsDir() - is2Dir := i2 != nil && i2.IsDir() - - sameDevice := false - if i1 != nil && i2 != nil { - si1 := i1.Sys().(*syscall.Stat_t) - si2 := i2.Sys().(*syscall.Stat_t) - if si1.Dev == si2.Dev { - sameDevice = true - } - } - - // If these files are both non-existent, or leaves (non-dirs), we are done. - if !is1Dir && !is2Dir { - return nil - } - - // Fetch the names of all the files contained in both directories being walked: - var names1, names2 []nameIno - if is1Dir { - names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access - if err != nil { - return err - } - } - if is2Dir { - names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access - if err != nil { - return err - } - } - - // We have lists of the files contained in both parallel directories, sorted - // in the same order. Walk them in parallel, generating a unique merged list - // of all items present in either or both directories. - var names []string - ix1 := 0 - ix2 := 0 - - for { - if ix1 >= len(names1) { - break - } - if ix2 >= len(names2) { - break - } - - ni1 := names1[ix1] - ni2 := names2[ix2] - - switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { - case -1: // ni1 < ni2 -- advance ni1 - // we will not encounter ni1 in names2 - names = append(names, ni1.name) - ix1++ - case 0: // ni1 == ni2 - if ni1.ino != ni2.ino || !sameDevice { - names = append(names, ni1.name) - } - ix1++ - ix2++ - case 1: // ni1 > ni2 -- advance ni2 - // we will not encounter ni2 in names1 - names = append(names, ni2.name) - ix2++ - } - } - for ix1 < len(names1) { - names = append(names, names1[ix1].name) - ix1++ - } - for ix2 < len(names2) { - names = append(names, names2[ix2].name) - ix2++ - } - - // For each of the names present in either or both of the directories being - // iterated, stat the name under each root, and recurse the pair of them: - for _, name := range names { - fname := filepath.Join(path, name) - var cInfo1, cInfo2 os.FileInfo - if is1Dir { - cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access - if err != nil && !os.IsNotExist(err) { - return err - } - } - if is2Dir { - cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access - if err != nil && !os.IsNotExist(err) { - return err - } - } - if err = w.walk(fname, cInfo1, cInfo2); err != nil { - return err - } - } - return nil -} - -// {name,inode} pairs used to support the early-pruning logic of the walker type -type nameIno struct { - name string - ino uint64 -} - -type nameInoSlice []nameIno - -func (s nameInoSlice) Len() int { return len(s) } -func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } - -// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode -// numbers further up the stack when reading directory contents. Unlike -// os.Readdirnames, which returns a list of filenames, this function returns a -// list of {filename,inode} pairs. -func readdirnames(dirname string) (names []nameIno, err error) { - var ( - size = 100 - buf = make([]byte, 4096) - nbuf int - bufp int - nb int - ) - - f, err := os.Open(dirname) - if err != nil { - return nil, err - } - defer f.Close() - - names = make([]nameIno, 0, size) // Empty with room to grow. - for { - // Refill the buffer if necessary - if bufp >= nbuf { - bufp = 0 - nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux - if nbuf < 0 { - nbuf = 0 - } - if err != nil { - return nil, os.NewSyscallError("readdirent", err) - } - if nbuf <= 0 { - break // EOF - } - } - - // Drain the buffer - nb, names = parseDirent(buf[bufp:nbuf], names) - bufp += nb - } - - sl := nameInoSlice(names) - sort.Sort(sl) - return sl, nil -} - -// parseDirent is a minor modification of unix.ParseDirent (linux version) -// which returns {name,inode} pairs instead of just names. -func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { - origlen := len(buf) - for len(buf) > 0 { - dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0])) - buf = buf[dirent.Reclen:] - if dirent.Ino == 0 { // File absent in directory. - continue - } - bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) - var name = string(bytes[0:clen(bytes[:])]) - if name == "." || name == ".." { // Useless names - continue - } - names = append(names, nameIno{name, dirent.Ino}) - } - return origlen - len(buf), names -} - -func clen(n []byte) int { - for i := 0; i < len(n); i++ { - if n[i] == 0 { - return i - } - } - return len(n) -} - -// OverlayChanges walks the path rw and determines changes for the files in the path, -// with respect to the parent layers -func OverlayChanges(layers []string, rw string) ([]Change, error) { - return changes(layers, rw, overlayDeletedFile, nil) -} - -func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) { - if fi.Mode()&os.ModeCharDevice != 0 { - s := fi.Sys().(*syscall.Stat_t) - if major(s.Rdev) == 0 && minor(s.Rdev) == 0 { - return path, nil - } - } - if fi.Mode()&os.ModeDir != 0 { - opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque") - if err != nil { - return "", err - } - if len(opaque) == 1 && opaque[0] == 'y' { - return path, nil - } - } - - return "", nil - -} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_other.go b/vendor/github.com/containers/storage/pkg/archive/changes_other.go deleted file mode 100644 index e1d1e7a91e..0000000000 --- a/vendor/github.com/containers/storage/pkg/archive/changes_other.go +++ /dev/null @@ -1,97 +0,0 @@ -// +build !linux - -package archive - -import ( - "fmt" - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/containers/storage/pkg/system" -) - -func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { - var ( - oldRoot, newRoot *FileInfo - err1, err2 error - errs = make(chan error, 2) - ) - go func() { - oldRoot, err1 = collectFileInfo(oldDir) - errs <- err1 - }() - go func() { - newRoot, err2 = collectFileInfo(newDir) - errs <- err2 - }() - - // block until both routines have returned - for i := 0; i < 2; i++ { - if err := <-errs; err != nil { - return nil, nil, err - } - } - - return oldRoot, newRoot, nil -} - -func collectFileInfo(sourceDir string) (*FileInfo, error) { - root := newRootFileInfo() - - err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - relPath, err := filepath.Rel(sourceDir, path) - if err != nil { - return err - } - - // As this runs on the daemon side, file paths are OS specific. - relPath = filepath.Join(string(os.PathSeparator), relPath) - - // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. - // Temporary workaround. If the returned path starts with two backslashes, - // trim it down to a single backslash. Only relevant on Windows. - if runtime.GOOS == "windows" { - if strings.HasPrefix(relPath, `\\`) { - relPath = relPath[1:] - } - } - - if relPath == string(os.PathSeparator) { - return nil - } - - parent := root.LookUp(filepath.Dir(relPath)) - if parent == nil { - return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) - } - - info := &FileInfo{ - name: filepath.Base(relPath), - children: make(map[string]*FileInfo), - parent: parent, - } - - s, err := system.Lstat(path) - if err != nil { - return err - } - info.stat = s - - info.capability, _ = system.Lgetxattr(path, "security.capability") - - parent.children[info.name] = info - - return nil - }) - if err != nil { - return nil, err - } - return root, nil -} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_unix.go b/vendor/github.com/containers/storage/pkg/archive/changes_unix.go deleted file mode 100644 index d669c01b46..0000000000 --- a/vendor/github.com/containers/storage/pkg/archive/changes_unix.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build !windows - -package archive - -import ( - "os" - "syscall" - - "github.com/containers/storage/pkg/system" - "golang.org/x/sys/unix" -) - -func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { - // Don't look at size for dirs, its not a good measure of change - if oldStat.Mode() != newStat.Mode() || - oldStat.UID() != newStat.UID() || - oldStat.GID() != newStat.GID() || - oldStat.Rdev() != newStat.Rdev() || - // Don't look at size for dirs, its not a good measure of change - (oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR && - (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { - return true - } - return false -} - -func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0 -} - -func getIno(fi os.FileInfo) uint64 { - return fi.Sys().(*syscall.Stat_t).Ino -} - -func hasHardlinks(fi os.FileInfo) bool { - return fi.Sys().(*syscall.Stat_t).Nlink > 1 -} diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_windows.go b/vendor/github.com/containers/storage/pkg/archive/changes_windows.go deleted file mode 100644 index 5ad3d7e38d..0000000000 --- a/vendor/github.com/containers/storage/pkg/archive/changes_windows.go +++ /dev/null @@ -1,30 +0,0 @@ -package archive - -import ( - "os" - - "github.com/containers/storage/pkg/system" -) - -func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { - - // Don't look at size for dirs, its not a good measure of change - if oldStat.Mtim() != newStat.Mtim() || - oldStat.Mode() != newStat.Mode() || - oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() { - return true - } - return false -} - -func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.Mode().IsDir() -} - -func getIno(fi os.FileInfo) (inode uint64) { - return -} - -func hasHardlinks(fi os.FileInfo) bool { - return false -} diff --git a/vendor/github.com/containers/storage/pkg/archive/copy.go b/vendor/github.com/containers/storage/pkg/archive/copy.go deleted file mode 100644 index ea012b2d99..0000000000 --- a/vendor/github.com/containers/storage/pkg/archive/copy.go +++ /dev/null @@ -1,461 +0,0 @@ -package archive - -import ( - "archive/tar" - "errors" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/containers/storage/pkg/system" - "github.com/sirupsen/logrus" -) - -// Errors used or returned by this file. -var ( - ErrNotDirectory = errors.New("not a directory") - ErrDirNotExists = errors.New("no such directory") - ErrCannotCopyDir = errors.New("cannot copy directory") - ErrInvalidCopySource = errors.New("invalid copy source content") -) - -// PreserveTrailingDotOrSeparator returns the given cleaned path (after -// processing using any utility functions from the path or filepath stdlib -// packages) and appends a trailing `/.` or `/` if its corresponding original -// path (from before being processed by utility functions from the path or -// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned -// path already ends in a `.` path segment, then another is not added. If the -// clean path already ends in a path separator, then another is not added. -func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string { - // Ensure paths are in platform semantics - cleanedPath = normalizePath(cleanedPath) - originalPath = normalizePath(originalPath) - - if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { - if !hasTrailingPathSeparator(cleanedPath) { - // Add a separator if it doesn't already end with one (a cleaned - // path would only end in a separator if it is the root). - cleanedPath += string(filepath.Separator) - } - cleanedPath += "." - } - - if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) { - cleanedPath += string(filepath.Separator) - } - - return cleanedPath -} - -// assertsDirectory returns whether the given path is -// asserted to be a directory, i.e., the path ends with -// a trailing '/' or `/.`, assuming a path separator of `/`. -func assertsDirectory(path string) bool { - return hasTrailingPathSeparator(path) || specifiesCurrentDir(path) -} - -// hasTrailingPathSeparator returns whether the given -// path ends with the system's path separator character. -func hasTrailingPathSeparator(path string) bool { - return len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) -} - -// specifiesCurrentDir returns whether the given path specifies -// a "current directory", i.e., the last path segment is `.`. -func specifiesCurrentDir(path string) bool { - return filepath.Base(path) == "." -} - -// SplitPathDirEntry splits the given path between its directory name and its -// basename by first cleaning the path but preserves a trailing "." if the -// original path specified the current directory. -func SplitPathDirEntry(path string) (dir, base string) { - cleanedPath := filepath.Clean(normalizePath(path)) - - if specifiesCurrentDir(path) { - cleanedPath += string(filepath.Separator) + "." - } - - return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) -} - -// TarResource archives the resource described by the given CopyInfo to a Tar -// archive. A non-nil error is returned if sourcePath does not exist or is -// asserted to be a directory but exists as another type of file. -// -// This function acts as a convenient wrapper around TarWithOptions, which -// requires a directory as the source path. TarResource accepts either a -// directory or a file path and correctly sets the Tar options. -func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) { - return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) -} - -// TarResourceRebase is like TarResource but renames the first path element of -// items in the resulting tar archive to match the given rebaseName if not "". -func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) { - sourcePath = normalizePath(sourcePath) - if _, err = os.Lstat(sourcePath); err != nil { - // Catches the case where the source does not exist or is not a - // directory if asserted to be a directory, as this also causes an - // error. - return - } - - // Separate the source path between its directory and - // the entry in that directory which we are archiving. - sourceDir, sourceBase := SplitPathDirEntry(sourcePath) - - filter := []string{sourceBase} - - logrus.Debugf("copying %q from %q", sourceBase, sourceDir) - - return TarWithOptions(sourceDir, &TarOptions{ - Compression: Uncompressed, - IncludeFiles: filter, - IncludeSourceDir: true, - RebaseNames: map[string]string{ - sourceBase: rebaseName, - }, - }) -} - -// CopyInfo holds basic info about the source -// or destination path of a copy operation. -type CopyInfo struct { - Path string - Exists bool - IsDir bool - RebaseName string -} - -// CopyInfoSourcePath stats the given path to create a CopyInfo -// struct representing that resource for the source of an archive copy -// operation. The given path should be an absolute local path. A source path -// has all symlinks evaluated that appear before the last path separator ("/" -// on Unix). As it is to be a copy source, the path must exist. -func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { - // normalize the file path and then evaluate the symbol link - // we will use the target file instead of the symbol link if - // followLink is set - path = normalizePath(path) - - resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) - if err != nil { - return CopyInfo{}, err - } - - stat, err := os.Lstat(resolvedPath) - if err != nil { - return CopyInfo{}, err - } - - return CopyInfo{ - Path: resolvedPath, - Exists: true, - IsDir: stat.IsDir(), - RebaseName: rebaseName, - }, nil -} - -// CopyInfoDestinationPath stats the given path to create a CopyInfo -// struct representing that resource for the destination of an archive copy -// operation. The given path should be an absolute local path. -func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { - maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. - path = normalizePath(path) - originalPath := path - - stat, err := os.Lstat(path) - - if err == nil && stat.Mode()&os.ModeSymlink == 0 { - // The path exists and is not a symlink. - return CopyInfo{ - Path: path, - Exists: true, - IsDir: stat.IsDir(), - }, nil - } - - // While the path is a symlink. - for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { - if n > maxSymlinkIter { - // Don't follow symlinks more than this arbitrary number of times. - return CopyInfo{}, errors.New("too many symlinks in " + originalPath) - } - - // The path is a symbolic link. We need to evaluate it so that the - // destination of the copy operation is the link target and not the - // link itself. This is notably different than CopyInfoSourcePath which - // only evaluates symlinks before the last appearing path separator. - // Also note that it is okay if the last path element is a broken - // symlink as the copy operation should create the target. - var linkTarget string - - linkTarget, err = os.Readlink(path) - if err != nil { - return CopyInfo{}, err - } - - if !system.IsAbs(linkTarget) { - // Join with the parent directory. - dstParent, _ := SplitPathDirEntry(path) - linkTarget = filepath.Join(dstParent, linkTarget) - } - - path = linkTarget - stat, err = os.Lstat(path) - } - - if err != nil { - // It's okay if the destination path doesn't exist. We can still - // continue the copy operation if the parent directory exists. - if !os.IsNotExist(err) { - return CopyInfo{}, err - } - - // Ensure destination parent dir exists. - dstParent, _ := SplitPathDirEntry(path) - - parentDirStat, err := os.Lstat(dstParent) - if err != nil { - return CopyInfo{}, err - } - if !parentDirStat.IsDir() { - return CopyInfo{}, ErrNotDirectory - } - - return CopyInfo{Path: path}, nil - } - - // The path exists after resolving symlinks. - return CopyInfo{ - Path: path, - Exists: true, - IsDir: stat.IsDir(), - }, nil -} - -// PrepareArchiveCopy prepares the given srcContent archive, which should -// contain the archived resource described by srcInfo, to the destination -// described by dstInfo. Returns the possibly modified content archive along -// with the path to the destination directory which it should be extracted to. -func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) { - // Ensure in platform semantics - srcInfo.Path = normalizePath(srcInfo.Path) - dstInfo.Path = normalizePath(dstInfo.Path) - - // Separate the destination path between its directory and base - // components in case the source archive contents need to be rebased. - dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) - _, srcBase := SplitPathDirEntry(srcInfo.Path) - - switch { - case dstInfo.Exists && dstInfo.IsDir: - // The destination exists as a directory. No alteration - // to srcContent is needed as its contents can be - // simply extracted to the destination directory. - return dstInfo.Path, ioutil.NopCloser(srcContent), nil - case dstInfo.Exists && srcInfo.IsDir: - // The destination exists as some type of file and the source - // content is a directory. This is an error condition since - // you cannot copy a directory to an existing file location. - return "", nil, ErrCannotCopyDir - case dstInfo.Exists: - // The destination exists as some type of file and the source content - // is also a file. The source content entry will have to be renamed to - // have a basename which matches the destination path's basename. - if len(srcInfo.RebaseName) != 0 { - srcBase = srcInfo.RebaseName - } - return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil - case srcInfo.IsDir: - // The destination does not exist and the source content is an archive - // of a directory. The archive should be extracted to the parent of - // the destination path instead, and when it is, the directory that is - // created as a result should take the name of the destination path. - // The source content entries will have to be renamed to have a - // basename which matches the destination path's basename. - if len(srcInfo.RebaseName) != 0 { - srcBase = srcInfo.RebaseName - } - return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil - case assertsDirectory(dstInfo.Path): - // The destination does not exist and is asserted to be created as a - // directory, but the source content is not a directory. This is an - // error condition since you cannot create a directory from a file - // source. - return "", nil, ErrDirNotExists - default: - // The last remaining case is when the destination does not exist, is - // not asserted to be a directory, and the source content is not an - // archive of a directory. It this case, the destination file will need - // to be created when the archive is extracted and the source content - // entry will have to be renamed to have a basename which matches the - // destination path's basename. - if len(srcInfo.RebaseName) != 0 { - srcBase = srcInfo.RebaseName - } - return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil - } - -} - -// RebaseArchiveEntries rewrites the given srcContent archive replacing -// an occurrence of oldBase with newBase at the beginning of entry names. -func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser { - if oldBase == string(os.PathSeparator) { - // If oldBase specifies the root directory, use an empty string as - // oldBase instead so that newBase doesn't replace the path separator - // that all paths will start with. - oldBase = "" - } - - rebased, w := io.Pipe() - - go func() { - srcTar := tar.NewReader(srcContent) - rebasedTar := tar.NewWriter(w) - - for { - hdr, err := srcTar.Next() - if err == io.EOF { - // Signals end of archive. - rebasedTar.Close() - w.Close() - return - } - if err != nil { - w.CloseWithError(err) - return - } - - hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) - if hdr.Typeflag == tar.TypeLink { - hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1) - } - - if err = rebasedTar.WriteHeader(hdr); err != nil { - w.CloseWithError(err) - return - } - - if _, err = io.Copy(rebasedTar, srcTar); err != nil { - w.CloseWithError(err) - return - } - } - }() - - return rebased -} - -// CopyResource performs an archive copy from the given source path to the -// given destination path. The source path MUST exist and the destination -// path's parent directory must exist. -func CopyResource(srcPath, dstPath string, followLink bool) error { - var ( - srcInfo CopyInfo - err error - ) - - // Ensure in platform semantics - srcPath = normalizePath(srcPath) - dstPath = normalizePath(dstPath) - - // Clean the source and destination paths. - srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath) - dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath) - - if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { - return err - } - - content, err := TarResource(srcInfo) - if err != nil { - return err - } - defer content.Close() - - return CopyTo(content, srcInfo, dstPath) -} - -// CopyTo handles extracting the given content whose -// entries should be sourced from srcInfo to dstPath. -func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error { - // The destination path need not exist, but CopyInfoDestinationPath will - // ensure that at least the parent directory exists. - dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) - if err != nil { - return err - } - - dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) - if err != nil { - return err - } - defer copyArchive.Close() - - options := &TarOptions{ - NoLchown: true, - NoOverwriteDirNonDir: true, - } - - return Untar(copyArchive, dstDir, options) -} - -// ResolveHostSourcePath decides real path need to be copied with parameters such as -// whether to follow symbol link or not, if followLink is true, resolvedPath will return -// link target of any symbol link file, else it will only resolve symlink of directory -// but return symbol link file itself without resolving. -func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { - if followLink { - resolvedPath, err = filepath.EvalSymlinks(path) - if err != nil { - return - } - - resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) - } else { - dirPath, basePath := filepath.Split(path) - - // if not follow symbol link, then resolve symbol link of parent dir - var resolvedDirPath string - resolvedDirPath, err = filepath.EvalSymlinks(dirPath) - if err != nil { - return - } - // resolvedDirPath will have been cleaned (no trailing path separators) so - // we can manually join it with the base path element. - resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath - if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) { - rebaseName = filepath.Base(path) - } - } - return resolvedPath, rebaseName, nil -} - -// GetRebaseName normalizes and compares path and resolvedPath, -// return completed resolved path and rebased file name -func GetRebaseName(path, resolvedPath string) (string, string) { - // linkTarget will have been cleaned (no trailing path separators and dot) so - // we can manually join it with them - var rebaseName string - if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) { - resolvedPath += string(filepath.Separator) + "." - } - - if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) { - resolvedPath += string(filepath.Separator) - } - - if filepath.Base(path) != filepath.Base(resolvedPath) { - // In the case where the path had a trailing separator and a symlink - // evaluation has changed the last path component, we will need to - // rebase the name in the archive that is being copied to match the - // originally requested name. - rebaseName = filepath.Base(path) - } - return resolvedPath, rebaseName -} diff --git a/vendor/github.com/containers/storage/pkg/archive/copy_unix.go b/vendor/github.com/containers/storage/pkg/archive/copy_unix.go deleted file mode 100644 index e305b5e4af..0000000000 --- a/vendor/github.com/containers/storage/pkg/archive/copy_unix.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !windows - -package archive - -import ( - "path/filepath" -) - -func normalizePath(path string) string { - return filepath.ToSlash(path) -} diff --git a/vendor/github.com/containers/storage/pkg/archive/copy_windows.go b/vendor/github.com/containers/storage/pkg/archive/copy_windows.go deleted file mode 100644 index 2b775b45c4..0000000000 --- a/vendor/github.com/containers/storage/pkg/archive/copy_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -package archive - -import ( - "path/filepath" -) - -func normalizePath(path string) string { - return filepath.FromSlash(path) -} diff --git a/vendor/github.com/containers/storage/pkg/archive/diff.go b/vendor/github.com/containers/storage/pkg/archive/diff.go deleted file mode 100644 index f93f4cb175..0000000000 --- a/vendor/github.com/containers/storage/pkg/archive/diff.go +++ /dev/null @@ -1,256 +0,0 @@ -package archive - -import ( - "archive/tar" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/pools" - "github.com/containers/storage/pkg/system" - "github.com/sirupsen/logrus" -) - -// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be -// compressed or uncompressed. -// Returns the size in bytes of the contents of the layer. -func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { - tr := tar.NewReader(layer) - trBuf := pools.BufioReader32KPool.Get(tr) - defer pools.BufioReader32KPool.Put(trBuf) - - var dirs []*tar.Header - unpackedPaths := make(map[string]struct{}) - - if options == nil { - options = &TarOptions{} - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) - - aufsTempdir := "" - aufsHardlinks := make(map[string]*tar.Header) - - // Iterate through the files in the archive. - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - return 0, err - } - - size += hdr.Size - - // Normalize name, for safety and for a simple is-root check - hdr.Name = filepath.Clean(hdr.Name) - - // Windows does not support filenames with colons in them. Ignore - // these files. This is not a problem though (although it might - // appear that it is). Let's suppose a client is running docker pull. - // The daemon it points to is Windows. Would it make sense for the - // client to be doing a docker pull Ubuntu for example (which has files - // with colons in the name under /usr/share/man/man3)? No, absolutely - // not as it would really only make sense that they were pulling a - // Windows image. However, for development, it is necessary to be able - // to pull Linux images which are in the repository. - // - // TODO Windows. Once the registry is aware of what images are Windows- - // specific or Linux-specific, this warning should be changed to an error - // to cater for the situation where someone does manage to upload a Linux - // image but have it tagged as Windows inadvertently. - if runtime.GOOS == "windows" { - if strings.Contains(hdr.Name, ":") { - logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) - continue - } - } - - // Note as these operations are platform specific, so must the slash be. - if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { - // Not the root directory, ensure that the parent directory exists. - // This happened in some tests where an image had a tarfile without any - // parent directories. - parent := filepath.Dir(hdr.Name) - parentPath := filepath.Join(dest, parent) - - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = system.MkdirAll(parentPath, 0600, "") - if err != nil { - return 0, err - } - } - } - - // Skip AUFS metadata dirs - if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { - // Regular files inside /.wh..wh.plnk can be used as hardlink targets - // We don't want this directory, but we need the files in them so that - // such hardlinks can be resolved. - if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { - basename := filepath.Base(hdr.Name) - aufsHardlinks[basename] = hdr - if aufsTempdir == "" { - if aufsTempdir, err = ioutil.TempDir("", "storageplnk"); err != nil { - return 0, err - } - defer os.RemoveAll(aufsTempdir) - } - if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil { - return 0, err - } - } - - if hdr.Name != WhiteoutOpaqueDir { - continue - } - } - path := filepath.Join(dest, hdr.Name) - rel, err := filepath.Rel(dest, path) - if err != nil { - return 0, err - } - - // Note as these operations are platform specific, so must the slash be. - if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { - return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) - } - base := filepath.Base(path) - - if strings.HasPrefix(base, WhiteoutPrefix) { - dir := filepath.Dir(path) - if base == WhiteoutOpaqueDir { - _, err := os.Lstat(dir) - if err != nil { - return 0, err - } - err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { - if err != nil { - if os.IsNotExist(err) { - err = nil // parent was deleted - } - return err - } - if path == dir { - return nil - } - if _, exists := unpackedPaths[path]; !exists { - err := os.RemoveAll(path) - return err - } - return nil - }) - if err != nil { - return 0, err - } - } else { - originalBase := base[len(WhiteoutPrefix):] - originalPath := filepath.Join(dir, originalBase) - if err := os.RemoveAll(originalPath); err != nil { - return 0, err - } - } - } else { - // If path exits we almost always just want to remove and replace it. - // The only exception is when it is a directory *and* the file from - // the layer is also a directory. Then we want to merge them (i.e. - // just apply the metadata from the layer). - if fi, err := os.Lstat(path); err == nil { - if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { - if err := os.RemoveAll(path); err != nil { - return 0, err - } - } - } - - trBuf.Reset(tr) - srcData := io.Reader(trBuf) - srcHdr := hdr - - // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so - // we manually retarget these into the temporary files we extracted them into - if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { - linkBasename := filepath.Base(hdr.Linkname) - srcHdr = aufsHardlinks[linkBasename] - if srcHdr == nil { - return 0, fmt.Errorf("Invalid aufs hardlink") - } - tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) - if err != nil { - return 0, err - } - defer tmpFile.Close() - srcData = tmpFile - } - - if err := remapIDs(idMappings, srcHdr); err != nil { - return 0, err - } - - if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS); err != nil { - return 0, err - } - - // Directory mtimes must be handled at the end to avoid further - // file creation in them to modify the directory mtime - if hdr.Typeflag == tar.TypeDir { - dirs = append(dirs, hdr) - } - unpackedPaths[path] = struct{}{} - } - } - - for _, hdr := range dirs { - path := filepath.Join(dest, hdr.Name) - if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { - return 0, err - } - } - - return size, nil -} - -// ApplyLayer parses a diff in the standard layer format from `layer`, -// and applies it to the directory `dest`. The stream `layer` can be -// compressed or uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyLayer(dest string, layer io.Reader) (int64, error) { - return applyLayerHandler(dest, layer, &TarOptions{}, true) -} - -// ApplyUncompressedLayer parses a diff in the standard layer format from -// `layer`, and applies it to the directory `dest`. The stream `layer` -// can only be uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) { - return applyLayerHandler(dest, layer, options, false) -} - -// do the bulk load of ApplyLayer, but allow for not calling DecompressStream -func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) { - dest = filepath.Clean(dest) - - // We need to be able to set any perms - oldmask, err := system.Umask(0) - if err != nil { - return 0, err - } - defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform - - if decompress { - layer, err = DecompressStream(layer) - if err != nil { - return 0, err - } - } - return UnpackLayer(dest, layer, options) -} diff --git a/vendor/github.com/containers/storage/pkg/archive/example_changes.go b/vendor/github.com/containers/storage/pkg/archive/example_changes.go deleted file mode 100644 index 70f9c5564a..0000000000 --- a/vendor/github.com/containers/storage/pkg/archive/example_changes.go +++ /dev/null @@ -1,97 +0,0 @@ -// +build ignore - -// Simple tool to create an archive stream from an old and new directory -// -// By default it will stream the comparison of two temporary directories with junk files -package main - -import ( - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "path" - - "github.com/containers/storage/pkg/archive" - "github.com/sirupsen/logrus" -) - -var ( - flDebug = flag.Bool("D", false, "debugging output") - flNewDir = flag.String("newdir", "", "") - flOldDir = flag.String("olddir", "", "") - log = logrus.New() -) - -func main() { - flag.Usage = func() { - fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") - fmt.Printf("%s [OPTIONS]\n", os.Args[0]) - flag.PrintDefaults() - } - flag.Parse() - log.Out = os.Stderr - if (len(os.Getenv("DEBUG")) > 0) || *flDebug { - logrus.SetLevel(logrus.DebugLevel) - } - var newDir, oldDir string - - if len(*flNewDir) == 0 { - var err error - newDir, err = ioutil.TempDir("", "storage-test-newDir") - if err != nil { - log.Fatal(err) - } - defer os.RemoveAll(newDir) - if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { - log.Fatal(err) - } - } else { - newDir = *flNewDir - } - - if len(*flOldDir) == 0 { - oldDir, err := ioutil.TempDir("", "storage-test-oldDir") - if err != nil { - log.Fatal(err) - } - defer os.RemoveAll(oldDir) - } else { - oldDir = *flOldDir - } - - changes, err := archive.ChangesDirs(newDir, oldDir) - if err != nil { - log.Fatal(err) - } - - a, err := archive.ExportChanges(newDir, changes) - if err != nil { - log.Fatal(err) - } - defer a.Close() - - i, err := io.Copy(os.Stdout, a) - if err != nil && err != io.EOF { - log.Fatal(err) - } - fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) -} - -func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { - fileData := []byte("fooo") - for n := 0; n < numberOfFiles; n++ { - fileName := fmt.Sprintf("file-%d", n) - if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { - return 0, err - } - if makeLinks { - if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { - return 0, err - } - } - } - totalSize := numberOfFiles * len(fileData) - return totalSize, nil -} diff --git a/vendor/github.com/containers/storage/pkg/archive/time_linux.go b/vendor/github.com/containers/storage/pkg/archive/time_linux.go deleted file mode 100644 index 3448569b1e..0000000000 --- a/vendor/github.com/containers/storage/pkg/archive/time_linux.go +++ /dev/null @@ -1,16 +0,0 @@ -package archive - -import ( - "syscall" - "time" -) - -func timeToTimespec(time time.Time) (ts syscall.Timespec) { - if time.IsZero() { - // Return UTIME_OMIT special value - ts.Sec = 0 - ts.Nsec = ((1 << 30) - 2) - return - } - return syscall.NsecToTimespec(time.UnixNano()) -} diff --git a/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go b/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go deleted file mode 100644 index e85aac0540..0000000000 --- a/vendor/github.com/containers/storage/pkg/archive/time_unsupported.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !linux - -package archive - -import ( - "syscall" - "time" -) - -func timeToTimespec(time time.Time) (ts syscall.Timespec) { - nsec := int64(0) - if !time.IsZero() { - nsec = time.UnixNano() - } - return syscall.NsecToTimespec(nsec) -} diff --git a/vendor/github.com/containers/storage/pkg/archive/whiteouts.go b/vendor/github.com/containers/storage/pkg/archive/whiteouts.go deleted file mode 100644 index d20478a10d..0000000000 --- a/vendor/github.com/containers/storage/pkg/archive/whiteouts.go +++ /dev/null @@ -1,23 +0,0 @@ -package archive - -// Whiteouts are files with a special meaning for the layered filesystem. -// Docker uses AUFS whiteout files inside exported archives. In other -// filesystems these files are generated/handled on tar creation/extraction. - -// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a -// filename this means that file has been removed from the base layer. -const WhiteoutPrefix = ".wh." - -// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not -// for removing an actual file. Normally these files are excluded from exported -// archives. -const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix - -// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other -// layers. Normally these should not go into exported archives and all changed -// hardlinks should be copied to the top layer. -const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" - -// WhiteoutOpaqueDir file means directory has been made opaque - meaning -// readdir calls to this directory do not follow to lower layers. -const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" diff --git a/vendor/github.com/containers/storage/pkg/archive/wrap.go b/vendor/github.com/containers/storage/pkg/archive/wrap.go deleted file mode 100644 index b39d12c878..0000000000 --- a/vendor/github.com/containers/storage/pkg/archive/wrap.go +++ /dev/null @@ -1,59 +0,0 @@ -package archive - -import ( - "archive/tar" - "bytes" - "io" -) - -// Generate generates a new archive from the content provided -// as input. -// -// `files` is a sequence of path/content pairs. A new file is -// added to the archive for each pair. -// If the last pair is incomplete, the file is created with an -// empty content. For example: -// -// Generate("foo.txt", "hello world", "emptyfile") -// -// The above call will return an archive with 2 files: -// * ./foo.txt with content "hello world" -// * ./empty with empty content -// -// FIXME: stream content instead of buffering -// FIXME: specify permissions and other archive metadata -func Generate(input ...string) (io.Reader, error) { - files := parseStringPairs(input...) - buf := new(bytes.Buffer) - tw := tar.NewWriter(buf) - for _, file := range files { - name, content := file[0], file[1] - hdr := &tar.Header{ - Name: name, - Size: int64(len(content)), - } - if err := tw.WriteHeader(hdr); err != nil { - return nil, err - } - if _, err := tw.Write([]byte(content)); err != nil { - return nil, err - } - } - if err := tw.Close(); err != nil { - return nil, err - } - return buf, nil -} - -func parseStringPairs(input ...string) (output [][2]string) { - output = make([][2]string, 0, len(input)/2+1) - for i := 0; i < len(input); i += 2 { - var pair [2]string - pair[0] = input[i] - if i+1 < len(input) { - pair[1] = input[i+1] - } - output = append(output, pair) - } - return -} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go deleted file mode 100644 index 2735f14001..0000000000 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go +++ /dev/null @@ -1,70 +0,0 @@ -package chrootarchive - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - - "github.com/containers/storage/pkg/archive" - "github.com/containers/storage/pkg/idtools" -) - -// NewArchiver returns a new Archiver which uses chrootarchive.Untar -func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver { - if idMappings == nil { - idMappings = &idtools.IDMappings{} - } - return &archive.Archiver{Untar: Untar, IDMappings: idMappings} -} - -// Untar reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive may be compressed with one of the following algorithms: -// identity (uncompressed), gzip, bzip2, xz. -func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { - return untarHandler(tarArchive, dest, options, true) -} - -// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive must be an uncompressed stream. -func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error { - return untarHandler(tarArchive, dest, options, false) -} - -// Handler for teasing out the automatic decompression -func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error { - if tarArchive == nil { - return fmt.Errorf("Empty archive") - } - if options == nil { - options = &archive.TarOptions{} - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - - idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) - rootIDs := idMappings.RootPair() - - dest = filepath.Clean(dest) - if _, err := os.Stat(dest); os.IsNotExist(err) { - if err := idtools.MkdirAllAndChownNew(dest, 0755, rootIDs); err != nil { - return err - } - } - - r := ioutil.NopCloser(tarArchive) - if decompress { - decompressedArchive, err := archive.DecompressStream(tarArchive) - if err != nil { - return err - } - defer decompressedArchive.Close() - r = decompressedArchive - } - - return invokeUnpack(r, dest, options) -} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go deleted file mode 100644 index e04ed787c0..0000000000 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go +++ /dev/null @@ -1,86 +0,0 @@ -// +build !windows - -package chrootarchive - -import ( - "bytes" - "encoding/json" - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "runtime" - - "github.com/containers/storage/pkg/archive" - "github.com/containers/storage/pkg/reexec" -) - -// untar is the entry-point for storage-untar on re-exec. This is not used on -// Windows as it does not support chroot, hence no point sandboxing through -// chroot and rexec. -func untar() { - runtime.LockOSThread() - flag.Parse() - - var options *archive.TarOptions - - //read the options from the pipe "ExtraFiles" - if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil { - fatal(err) - } - - if err := chroot(flag.Arg(0)); err != nil { - fatal(err) - } - - if err := archive.Unpack(os.Stdin, "/", options); err != nil { - fatal(err) - } - // fully consume stdin in case it is zero padded - if _, err := flush(os.Stdin); err != nil { - fatal(err) - } - - os.Exit(0) -} - -func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error { - - // We can't pass a potentially large exclude list directly via cmd line - // because we easily overrun the kernel's max argument/environment size - // when the full image list is passed (e.g. when this is used by - // `docker load`). We will marshall the options via a pipe to the - // child - r, w, err := os.Pipe() - if err != nil { - return fmt.Errorf("Untar pipe failure: %v", err) - } - - cmd := reexec.Command("storage-untar", dest) - cmd.Stdin = decompressedArchive - - cmd.ExtraFiles = append(cmd.ExtraFiles, r) - output := bytes.NewBuffer(nil) - cmd.Stdout = output - cmd.Stderr = output - - if err := cmd.Start(); err != nil { - return fmt.Errorf("Untar error on re-exec cmd: %v", err) - } - //write the options to the pipe for the untar exec to read - if err := json.NewEncoder(w).Encode(options); err != nil { - return fmt.Errorf("Untar json encode to pipe failed: %v", err) - } - w.Close() - - if err := cmd.Wait(); err != nil { - // when `xz -d -c -q | storage-untar ...` failed on storage-untar side, - // we need to exhaust `xz`'s output, otherwise the `xz` side will be - // pending on write pipe forever - io.Copy(ioutil.Discard, decompressedArchive) - - return fmt.Errorf("Error processing tar file(%v): %s", err, output) - } - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go deleted file mode 100644 index 93fde42206..0000000000 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_windows.go +++ /dev/null @@ -1,22 +0,0 @@ -package chrootarchive - -import ( - "io" - - "github.com/containers/storage/pkg/archive" - "github.com/containers/storage/pkg/longpath" -) - -// chroot is not supported by Windows -func chroot(path string) error { - return nil -} - -func invokeUnpack(decompressedArchive io.ReadCloser, - dest string, - options *archive.TarOptions) error { - // Windows is different to Linux here because Windows does not support - // chroot. Hence there is no point sandboxing a chrooted process to - // do the unpack. We call inline instead within the daemon process. - return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options) -} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go deleted file mode 100644 index f9b5dece8c..0000000000 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !windows,!linux - -package chrootarchive - -import "golang.org/x/sys/unix" - -func chroot(path string) error { - if err := unix.Chroot(path); err != nil { - return err - } - return unix.Chdir("/") -} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff.go deleted file mode 100644 index 68b8f74f77..0000000000 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/diff.go +++ /dev/null @@ -1,23 +0,0 @@ -package chrootarchive - -import ( - "io" - - "github.com/containers/storage/pkg/archive" -) - -// ApplyLayer parses a diff in the standard layer format from `layer`, -// and applies it to the directory `dest`. The stream `layer` can only be -// uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyLayer(dest string, layer io.Reader) (size int64, err error) { - return applyLayerHandler(dest, layer, &archive.TarOptions{}, true) -} - -// ApplyUncompressedLayer parses a diff in the standard layer format from -// `layer`, and applies it to the directory `dest`. The stream `layer` -// can only be uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyUncompressedLayer(dest string, layer io.Reader, options *archive.TarOptions) (int64, error) { - return applyLayerHandler(dest, layer, options, false) -} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go deleted file mode 100644 index 4369f30c99..0000000000 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go +++ /dev/null @@ -1,130 +0,0 @@ -//+build !windows - -package chrootarchive - -import ( - "bytes" - "encoding/json" - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "runtime" - - "github.com/containers/storage/pkg/archive" - "github.com/containers/storage/pkg/reexec" - "github.com/containers/storage/pkg/system" - rsystem "github.com/opencontainers/runc/libcontainer/system" -) - -type applyLayerResponse struct { - LayerSize int64 `json:"layerSize"` -} - -// applyLayer is the entry-point for storage-applylayer on re-exec. This is not -// used on Windows as it does not support chroot, hence no point sandboxing -// through chroot and rexec. -func applyLayer() { - - var ( - tmpDir string - err error - options *archive.TarOptions - ) - runtime.LockOSThread() - flag.Parse() - - inUserns := rsystem.RunningInUserNS() - if err := chroot(flag.Arg(0)); err != nil { - fatal(err) - } - - // We need to be able to set any perms - oldmask, err := system.Umask(0) - defer system.Umask(oldmask) - if err != nil { - fatal(err) - } - - if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil { - fatal(err) - } - - if inUserns { - options.InUserNS = true - } - - if tmpDir, err = ioutil.TempDir("/", "temp-storage-extract"); err != nil { - fatal(err) - } - - os.Setenv("TMPDIR", tmpDir) - size, err := archive.UnpackLayer("/", os.Stdin, options) - os.RemoveAll(tmpDir) - if err != nil { - fatal(err) - } - - encoder := json.NewEncoder(os.Stdout) - if err := encoder.Encode(applyLayerResponse{size}); err != nil { - fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err)) - } - - if _, err := flush(os.Stdin); err != nil { - fatal(err) - } - - os.Exit(0) -} - -// applyLayerHandler parses a diff in the standard layer format from `layer`, and -// applies it to the directory `dest`. Returns the size in bytes of the -// contents of the layer. -func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { - dest = filepath.Clean(dest) - if decompress { - decompressed, err := archive.DecompressStream(layer) - if err != nil { - return 0, err - } - defer decompressed.Close() - - layer = decompressed - } - if options == nil { - options = &archive.TarOptions{} - if rsystem.RunningInUserNS() { - options.InUserNS = true - } - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - - data, err := json.Marshal(options) - if err != nil { - return 0, fmt.Errorf("ApplyLayer json encode: %v", err) - } - - cmd := reexec.Command("storage-applyLayer", dest) - cmd.Stdin = layer - cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data)) - - outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) - cmd.Stdout, cmd.Stderr = outBuf, errBuf - - if err = cmd.Run(); err != nil { - return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf) - } - - // Stdout should be a valid JSON struct representing an applyLayerResponse. - response := applyLayerResponse{} - decoder := json.NewDecoder(outBuf) - if err = decoder.Decode(&response); err != nil { - return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err) - } - - return response.LayerSize, nil -} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go deleted file mode 100644 index 8f8e88bfbe..0000000000 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_windows.go +++ /dev/null @@ -1,45 +0,0 @@ -package chrootarchive - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - - "github.com/containers/storage/pkg/archive" - "github.com/containers/storage/pkg/longpath" -) - -// applyLayerHandler parses a diff in the standard layer format from `layer`, and -// applies it to the directory `dest`. Returns the size in bytes of the -// contents of the layer. -func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { - dest = filepath.Clean(dest) - - // Ensure it is a Windows-style volume path - dest = longpath.AddPrefix(dest) - - if decompress { - decompressed, err := archive.DecompressStream(layer) - if err != nil { - return 0, err - } - defer decompressed.Close() - - layer = decompressed - } - - tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-storage-extract") - if err != nil { - return 0, fmt.Errorf("ApplyLayer failed to create temp-storage-extract under %s. %s", dest, err) - } - - s, err := archive.UnpackLayer(dest, layer, nil) - os.RemoveAll(tmpDir) - if err != nil { - return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %s", layer, dest, err) - } - - return s, nil -} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go deleted file mode 100644 index 21cd879928..0000000000 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build !windows - -package chrootarchive - -import ( - "fmt" - "io" - "io/ioutil" - "os" - - "github.com/containers/storage/pkg/reexec" -) - -func init() { - reexec.Register("storage-applyLayer", applyLayer) - reexec.Register("storage-untar", untar) -} - -func fatal(err error) { - fmt.Fprint(os.Stderr, err) - os.Exit(1) -} - -// flush consumes all the bytes from the reader discarding -// any errors -func flush(r io.Reader) (bytes int64, err error) { - return io.Copy(ioutil.Discard, r) -} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/init_windows.go b/vendor/github.com/containers/storage/pkg/chrootarchive/init_windows.go deleted file mode 100644 index fa17c9bf83..0000000000 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/init_windows.go +++ /dev/null @@ -1,4 +0,0 @@ -package chrootarchive - -func init() { -} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go deleted file mode 100644 index 6a0ac24647..0000000000 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper.go +++ /dev/null @@ -1,821 +0,0 @@ -// +build linux,cgo - -package devicemapper - -import ( - "errors" - "fmt" - "os" - "runtime" - "unsafe" - - "github.com/sirupsen/logrus" - "golang.org/x/sys/unix" -) - -// Same as DM_DEVICE_* enum values from libdevmapper.h -// nolint: deadcode -const ( - deviceCreate TaskType = iota - deviceReload - deviceRemove - deviceRemoveAll - deviceSuspend - deviceResume - deviceInfo - deviceDeps - deviceRename - deviceVersion - deviceStatus - deviceTable - deviceWaitevent - deviceList - deviceClear - deviceMknodes - deviceListVersions - deviceTargetMsg - deviceSetGeometry -) - -const ( - addNodeOnResume AddNodeType = iota - addNodeOnCreate -) - -// List of errors returned when using devicemapper. -var ( - ErrTaskRun = errors.New("dm_task_run failed") - ErrTaskSetName = errors.New("dm_task_set_name failed") - ErrTaskSetMessage = errors.New("dm_task_set_message failed") - ErrTaskSetAddNode = errors.New("dm_task_set_add_node failed") - ErrTaskSetRo = errors.New("dm_task_set_ro failed") - ErrTaskAddTarget = errors.New("dm_task_add_target failed") - ErrTaskSetSector = errors.New("dm_task_set_sector failed") - ErrTaskGetDeps = errors.New("dm_task_get_deps failed") - ErrTaskGetInfo = errors.New("dm_task_get_info failed") - ErrTaskGetDriverVersion = errors.New("dm_task_get_driver_version failed") - ErrTaskDeferredRemove = errors.New("dm_task_deferred_remove failed") - ErrTaskSetCookie = errors.New("dm_task_set_cookie failed") - ErrNilCookie = errors.New("cookie ptr can't be nil") - ErrGetBlockSize = errors.New("Can't get block size") - ErrUdevWait = errors.New("wait on udev cookie failed") - ErrSetDevDir = errors.New("dm_set_dev_dir failed") - ErrGetLibraryVersion = errors.New("dm_get_library_version failed") - ErrCreateRemoveTask = errors.New("Can't create task of type deviceRemove") - ErrRunRemoveDevice = errors.New("running RemoveDevice failed") - ErrInvalidAddNode = errors.New("Invalid AddNode type") - ErrBusy = errors.New("Device is Busy") - ErrDeviceIDExists = errors.New("Device Id Exists") - ErrEnxio = errors.New("No such device or address") -) - -var ( - dmSawBusy bool - dmSawExist bool - dmSawEnxio bool // No Such Device or Address -) - -type ( - // Task represents a devicemapper task (like lvcreate, etc.) ; a task is needed for each ioctl - // command to execute. - Task struct { - unmanaged *cdmTask - } - // Deps represents dependents (layer) of a device. - Deps struct { - Count uint32 - Filler uint32 - Device []uint64 - } - // Info represents information about a device. - Info struct { - Exists int - Suspended int - LiveTable int - InactiveTable int - OpenCount int32 - EventNr uint32 - Major uint32 - Minor uint32 - ReadOnly int - TargetCount int32 - DeferredRemove int - } - // TaskType represents a type of task - TaskType int - // AddNodeType represents a type of node to be added - AddNodeType int -) - -// DeviceIDExists returns whether error conveys the information about device Id already -// exist or not. This will be true if device creation or snap creation -// operation fails if device or snap device already exists in pool. -// Current implementation is little crude as it scans the error string -// for exact pattern match. Replacing it with more robust implementation -// is desirable. -func DeviceIDExists(err error) bool { - return fmt.Sprint(err) == fmt.Sprint(ErrDeviceIDExists) -} - -func (t *Task) destroy() { - if t != nil { - DmTaskDestroy(t.unmanaged) - runtime.SetFinalizer(t, nil) - } -} - -// TaskCreateNamed is a convenience function for TaskCreate when a name -// will be set on the task as well -func TaskCreateNamed(t TaskType, name string) (*Task, error) { - task := TaskCreate(t) - if task == nil { - return nil, fmt.Errorf("devicemapper: Can't create task of type %d", int(t)) - } - if err := task.setName(name); err != nil { - return nil, fmt.Errorf("devicemapper: Can't set task name %s", name) - } - return task, nil -} - -// TaskCreate initializes a devicemapper task of tasktype -func TaskCreate(tasktype TaskType) *Task { - Ctask := DmTaskCreate(int(tasktype)) - if Ctask == nil { - return nil - } - task := &Task{unmanaged: Ctask} - runtime.SetFinalizer(task, (*Task).destroy) - return task -} - -func (t *Task) run() error { - if res := DmTaskRun(t.unmanaged); res != 1 { - return ErrTaskRun - } - runtime.KeepAlive(t) - return nil -} - -func (t *Task) setName(name string) error { - if res := DmTaskSetName(t.unmanaged, name); res != 1 { - return ErrTaskSetName - } - return nil -} - -func (t *Task) setMessage(message string) error { - if res := DmTaskSetMessage(t.unmanaged, message); res != 1 { - return ErrTaskSetMessage - } - return nil -} - -func (t *Task) setSector(sector uint64) error { - if res := DmTaskSetSector(t.unmanaged, sector); res != 1 { - return ErrTaskSetSector - } - return nil -} - -func (t *Task) setCookie(cookie *uint, flags uint16) error { - if cookie == nil { - return ErrNilCookie - } - if res := DmTaskSetCookie(t.unmanaged, cookie, flags); res != 1 { - return ErrTaskSetCookie - } - return nil -} - -func (t *Task) setAddNode(addNode AddNodeType) error { - if addNode != addNodeOnResume && addNode != addNodeOnCreate { - return ErrInvalidAddNode - } - if res := DmTaskSetAddNode(t.unmanaged, addNode); res != 1 { - return ErrTaskSetAddNode - } - return nil -} - -func (t *Task) setRo() error { - if res := DmTaskSetRo(t.unmanaged); res != 1 { - return ErrTaskSetRo - } - return nil -} - -func (t *Task) addTarget(start, size uint64, ttype, params string) error { - if res := DmTaskAddTarget(t.unmanaged, start, size, - ttype, params); res != 1 { - return ErrTaskAddTarget - } - return nil -} - -func (t *Task) getDeps() (*Deps, error) { - var deps *Deps - if deps = DmTaskGetDeps(t.unmanaged); deps == nil { - return nil, ErrTaskGetDeps - } - return deps, nil -} - -func (t *Task) getInfo() (*Info, error) { - info := &Info{} - if res := DmTaskGetInfo(t.unmanaged, info); res != 1 { - return nil, ErrTaskGetInfo - } - return info, nil -} - -func (t *Task) getInfoWithDeferred() (*Info, error) { - info := &Info{} - if res := DmTaskGetInfoWithDeferred(t.unmanaged, info); res != 1 { - return nil, ErrTaskGetInfo - } - return info, nil -} - -func (t *Task) getDriverVersion() (string, error) { - res := DmTaskGetDriverVersion(t.unmanaged) - if res == "" { - return "", ErrTaskGetDriverVersion - } - return res, nil -} - -func (t *Task) getNextTarget(next unsafe.Pointer) (nextPtr unsafe.Pointer, start uint64, - length uint64, targetType string, params string) { - - return DmGetNextTarget(t.unmanaged, next, &start, &length, - &targetType, ¶ms), - start, length, targetType, params -} - -// UdevWait waits for any processes that are waiting for udev to complete the specified cookie. -func UdevWait(cookie *uint) error { - if res := DmUdevWait(*cookie); res != 1 { - logrus.Debugf("devicemapper: Failed to wait on udev cookie %d, %d", *cookie, res) - return ErrUdevWait - } - return nil -} - -// SetDevDir sets the dev folder for the device mapper library (usually /dev). -func SetDevDir(dir string) error { - if res := DmSetDevDir(dir); res != 1 { - logrus.Debug("devicemapper: Error dm_set_dev_dir") - return ErrSetDevDir - } - return nil -} - -// GetLibraryVersion returns the device mapper library version. -func GetLibraryVersion() (string, error) { - var version string - if res := DmGetLibraryVersion(&version); res != 1 { - return "", ErrGetLibraryVersion - } - return version, nil -} - -// UdevSyncSupported returns whether device-mapper is able to sync with udev -// -// This is essential otherwise race conditions can arise where both udev and -// device-mapper attempt to create and destroy devices. -func UdevSyncSupported() bool { - return DmUdevGetSyncSupport() != 0 -} - -// UdevSetSyncSupport allows setting whether the udev sync should be enabled. -// The return bool indicates the state of whether the sync is enabled. -func UdevSetSyncSupport(enable bool) bool { - if enable { - DmUdevSetSyncSupport(1) - } else { - DmUdevSetSyncSupport(0) - } - - return UdevSyncSupported() -} - -// CookieSupported returns whether the version of device-mapper supports the -// use of cookie's in the tasks. -// This is largely a lower level call that other functions use. -func CookieSupported() bool { - return DmCookieSupported() != 0 -} - -// RemoveDevice is a useful helper for cleaning up a device. -func RemoveDevice(name string) error { - task, err := TaskCreateNamed(deviceRemove, name) - if task == nil { - return err - } - - cookie := new(uint) - if err := task.setCookie(cookie, 0); err != nil { - return fmt.Errorf("devicemapper: Can not set cookie: %s", err) - } - defer UdevWait(cookie) - - dmSawBusy = false // reset before the task is run - dmSawEnxio = false - if err = task.run(); err != nil { - if dmSawBusy { - return ErrBusy - } - if dmSawEnxio { - return ErrEnxio - } - return fmt.Errorf("devicemapper: Error running RemoveDevice %s", err) - } - - return nil -} - -// RemoveDeviceDeferred is a useful helper for cleaning up a device, but deferred. -func RemoveDeviceDeferred(name string) error { - logrus.Debugf("devicemapper: RemoveDeviceDeferred START(%s)", name) - defer logrus.Debugf("devicemapper: RemoveDeviceDeferred END(%s)", name) - task, err := TaskCreateNamed(deviceRemove, name) - if task == nil { - return err - } - - if err := DmTaskDeferredRemove(task.unmanaged); err != 1 { - return ErrTaskDeferredRemove - } - - // set a task cookie and disable library fallback, or else libdevmapper will - // disable udev dm rules and delete the symlink under /dev/mapper by itself, - // even if the removal is deferred by the kernel. - cookie := new(uint) - var flags uint16 - flags = DmUdevDisableLibraryFallback - if err := task.setCookie(cookie, flags); err != nil { - return fmt.Errorf("devicemapper: Can not set cookie: %s", err) - } - - // libdevmapper and udev relies on System V semaphore for synchronization, - // semaphores created in `task.setCookie` will be cleaned up in `UdevWait`. - // So these two function call must come in pairs, otherwise semaphores will - // be leaked, and the limit of number of semaphores defined in `/proc/sys/kernel/sem` - // will be reached, which will eventually make all following calls to 'task.SetCookie' - // fail. - // this call will not wait for the deferred removal's final executing, since no - // udev event will be generated, and the semaphore's value will not be incremented - // by udev, what UdevWait is just cleaning up the semaphore. - defer UdevWait(cookie) - - dmSawEnxio = false - if err = task.run(); err != nil { - if dmSawEnxio { - return ErrEnxio - } - return fmt.Errorf("devicemapper: Error running RemoveDeviceDeferred %s", err) - } - - return nil -} - -// CancelDeferredRemove cancels a deferred remove for a device. -func CancelDeferredRemove(deviceName string) error { - task, err := TaskCreateNamed(deviceTargetMsg, deviceName) - if task == nil { - return err - } - - if err := task.setSector(0); err != nil { - return fmt.Errorf("devicemapper: Can't set sector %s", err) - } - - if err := task.setMessage(fmt.Sprintf("@cancel_deferred_remove")); err != nil { - return fmt.Errorf("devicemapper: Can't set message %s", err) - } - - dmSawBusy = false - dmSawEnxio = false - if err := task.run(); err != nil { - // A device might be being deleted already - if dmSawBusy { - return ErrBusy - } else if dmSawEnxio { - return ErrEnxio - } - return fmt.Errorf("devicemapper: Error running CancelDeferredRemove %s", err) - - } - return nil -} - -// GetBlockDeviceSize returns the size of a block device identified by the specified file. -func GetBlockDeviceSize(file *os.File) (uint64, error) { - size, err := ioctlBlkGetSize64(file.Fd()) - if err != nil { - logrus.Errorf("devicemapper: Error getblockdevicesize: %s", err) - return 0, ErrGetBlockSize - } - return uint64(size), nil -} - -// BlockDeviceDiscard runs discard for the given path. -// This is used as a workaround for the kernel not discarding block so -// on the thin pool when we remove a thinp device, so we do it -// manually -func BlockDeviceDiscard(path string) error { - file, err := os.OpenFile(path, os.O_RDWR, 0) - if err != nil { - return err - } - defer file.Close() - - size, err := GetBlockDeviceSize(file) - if err != nil { - return err - } - - if err := ioctlBlkDiscard(file.Fd(), 0, size); err != nil { - return err - } - - // Without this sometimes the remove of the device that happens after - // discard fails with EBUSY. - unix.Sync() - - return nil -} - -// CreatePool is the programmatic example of "dmsetup create". -// It creates a device with the specified poolName, data and metadata file and block size. -func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { - task, err := TaskCreateNamed(deviceCreate, poolName) - if task == nil { - return err - } - - size, err := GetBlockDeviceSize(dataFile) - if err != nil { - return fmt.Errorf("devicemapper: Can't get data size %s", err) - } - - params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) - if err := task.addTarget(0, size/512, "thin-pool", params); err != nil { - return fmt.Errorf("devicemapper: Can't add target %s", err) - } - - cookie := new(uint) - var flags uint16 - flags = DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag - if err := task.setCookie(cookie, flags); err != nil { - return fmt.Errorf("devicemapper: Can't set cookie %s", err) - } - defer UdevWait(cookie) - - if err := task.run(); err != nil { - return fmt.Errorf("devicemapper: Error running deviceCreate (CreatePool) %s", err) - } - - return nil -} - -// ReloadPool is the programmatic example of "dmsetup reload". -// It reloads the table with the specified poolName, data and metadata file and block size. -func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { - task, err := TaskCreateNamed(deviceReload, poolName) - if task == nil { - return err - } - - size, err := GetBlockDeviceSize(dataFile) - if err != nil { - return fmt.Errorf("devicemapper: Can't get data size %s", err) - } - - params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) - if err := task.addTarget(0, size/512, "thin-pool", params); err != nil { - return fmt.Errorf("devicemapper: Can't add target %s", err) - } - - if err := task.run(); err != nil { - return fmt.Errorf("devicemapper: Error running ReloadPool %s", err) - } - - return nil -} - -// GetDeps is the programmatic example of "dmsetup deps". -// It outputs a list of devices referenced by the live table for the specified device. -func GetDeps(name string) (*Deps, error) { - task, err := TaskCreateNamed(deviceDeps, name) - if task == nil { - return nil, err - } - if err := task.run(); err != nil { - return nil, err - } - return task.getDeps() -} - -// GetInfo is the programmatic example of "dmsetup info". -// It outputs some brief information about the device. -func GetInfo(name string) (*Info, error) { - task, err := TaskCreateNamed(deviceInfo, name) - if task == nil { - return nil, err - } - if err := task.run(); err != nil { - return nil, err - } - return task.getInfo() -} - -// GetInfoWithDeferred is the programmatic example of "dmsetup info", but deferred. -// It outputs some brief information about the device. -func GetInfoWithDeferred(name string) (*Info, error) { - task, err := TaskCreateNamed(deviceInfo, name) - if task == nil { - return nil, err - } - if err := task.run(); err != nil { - return nil, err - } - return task.getInfoWithDeferred() -} - -// GetDriverVersion is the programmatic example of "dmsetup version". -// It outputs version information of the driver. -func GetDriverVersion() (string, error) { - task := TaskCreate(deviceVersion) - if task == nil { - return "", fmt.Errorf("devicemapper: Can't create deviceVersion task") - } - if err := task.run(); err != nil { - return "", err - } - return task.getDriverVersion() -} - -// GetStatus is the programmatic example of "dmsetup status". -// It outputs status information for the specified device name. -func GetStatus(name string) (uint64, uint64, string, string, error) { - task, err := TaskCreateNamed(deviceStatus, name) - if task == nil { - logrus.Debugf("devicemapper: GetStatus() Error TaskCreateNamed: %s", err) - return 0, 0, "", "", err - } - if err := task.run(); err != nil { - logrus.Debugf("devicemapper: GetStatus() Error Run: %s", err) - return 0, 0, "", "", err - } - - devinfo, err := task.getInfo() - if err != nil { - logrus.Debugf("devicemapper: GetStatus() Error GetInfo: %s", err) - return 0, 0, "", "", err - } - if devinfo.Exists == 0 { - logrus.Debugf("devicemapper: GetStatus() Non existing device %s", name) - return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name) - } - - _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil)) - return start, length, targetType, params, nil -} - -// GetTable is the programmatic example for "dmsetup table". -// It outputs the current table for the specified device name. -func GetTable(name string) (uint64, uint64, string, string, error) { - task, err := TaskCreateNamed(deviceTable, name) - if task == nil { - logrus.Debugf("devicemapper: GetTable() Error TaskCreateNamed: %s", err) - return 0, 0, "", "", err - } - if err := task.run(); err != nil { - logrus.Debugf("devicemapper: GetTable() Error Run: %s", err) - return 0, 0, "", "", err - } - - devinfo, err := task.getInfo() - if err != nil { - logrus.Debugf("devicemapper: GetTable() Error GetInfo: %s", err) - return 0, 0, "", "", err - } - if devinfo.Exists == 0 { - logrus.Debugf("devicemapper: GetTable() Non existing device %s", name) - return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name) - } - - _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil)) - return start, length, targetType, params, nil -} - -// SetTransactionID sets a transaction id for the specified device name. -func SetTransactionID(poolName string, oldID uint64, newID uint64) error { - task, err := TaskCreateNamed(deviceTargetMsg, poolName) - if task == nil { - return err - } - - if err := task.setSector(0); err != nil { - return fmt.Errorf("devicemapper: Can't set sector %s", err) - } - - if err := task.setMessage(fmt.Sprintf("set_transaction_id %d %d", oldID, newID)); err != nil { - return fmt.Errorf("devicemapper: Can't set message %s", err) - } - - if err := task.run(); err != nil { - return fmt.Errorf("devicemapper: Error running SetTransactionID %s", err) - } - return nil -} - -// SuspendDevice is the programmatic example of "dmsetup suspend". -// It suspends the specified device. -func SuspendDevice(name string) error { - task, err := TaskCreateNamed(deviceSuspend, name) - if task == nil { - return err - } - if err := task.run(); err != nil { - return fmt.Errorf("devicemapper: Error running deviceSuspend %s", err) - } - return nil -} - -// ResumeDevice is the programmatic example of "dmsetup resume". -// It un-suspends the specified device. -func ResumeDevice(name string) error { - task, err := TaskCreateNamed(deviceResume, name) - if task == nil { - return err - } - - cookie := new(uint) - if err := task.setCookie(cookie, 0); err != nil { - return fmt.Errorf("devicemapper: Can't set cookie %s", err) - } - defer UdevWait(cookie) - - if err := task.run(); err != nil { - return fmt.Errorf("devicemapper: Error running deviceResume %s", err) - } - - return nil -} - -// CreateDevice creates a device with the specified poolName with the specified device id. -func CreateDevice(poolName string, deviceID int) error { - logrus.Debugf("devicemapper: CreateDevice(poolName=%v, deviceID=%v)", poolName, deviceID) - task, err := TaskCreateNamed(deviceTargetMsg, poolName) - if task == nil { - return err - } - - if err := task.setSector(0); err != nil { - return fmt.Errorf("devicemapper: Can't set sector %s", err) - } - - if err := task.setMessage(fmt.Sprintf("create_thin %d", deviceID)); err != nil { - return fmt.Errorf("devicemapper: Can't set message %s", err) - } - - dmSawExist = false // reset before the task is run - if err := task.run(); err != nil { - // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id. - if dmSawExist { - return ErrDeviceIDExists - } - - return fmt.Errorf("devicemapper: Error running CreateDevice %s", err) - - } - return nil -} - -// DeleteDevice deletes a device with the specified poolName with the specified device id. -func DeleteDevice(poolName string, deviceID int) error { - task, err := TaskCreateNamed(deviceTargetMsg, poolName) - if task == nil { - return err - } - - if err := task.setSector(0); err != nil { - return fmt.Errorf("devicemapper: Can't set sector %s", err) - } - - if err := task.setMessage(fmt.Sprintf("delete %d", deviceID)); err != nil { - return fmt.Errorf("devicemapper: Can't set message %s", err) - } - - dmSawBusy = false - if err := task.run(); err != nil { - if dmSawBusy { - return ErrBusy - } - return fmt.Errorf("devicemapper: Error running DeleteDevice %s", err) - } - return nil -} - -// ActivateDevice activates the device identified by the specified -// poolName, name and deviceID with the specified size. -func ActivateDevice(poolName string, name string, deviceID int, size uint64) error { - return activateDevice(poolName, name, deviceID, size, "") -} - -// ActivateDeviceWithExternal activates the device identified by the specified -// poolName, name and deviceID with the specified size. -func ActivateDeviceWithExternal(poolName string, name string, deviceID int, size uint64, external string) error { - return activateDevice(poolName, name, deviceID, size, external) -} - -func activateDevice(poolName string, name string, deviceID int, size uint64, external string) error { - task, err := TaskCreateNamed(deviceCreate, name) - if task == nil { - return err - } - - var params string - if len(external) > 0 { - params = fmt.Sprintf("%s %d %s", poolName, deviceID, external) - } else { - params = fmt.Sprintf("%s %d", poolName, deviceID) - } - if err := task.addTarget(0, size/512, "thin", params); err != nil { - return fmt.Errorf("devicemapper: Can't add target %s", err) - } - if err := task.setAddNode(addNodeOnCreate); err != nil { - return fmt.Errorf("devicemapper: Can't add node %s", err) - } - - cookie := new(uint) - if err := task.setCookie(cookie, 0); err != nil { - return fmt.Errorf("devicemapper: Can't set cookie %s", err) - } - - defer UdevWait(cookie) - - if err := task.run(); err != nil { - return fmt.Errorf("devicemapper: Error running deviceCreate (ActivateDevice) %s", err) - } - - return nil -} - -// CreateSnapDeviceRaw creates a snapshot device. Caller needs to suspend and resume the origin device if it is active. -func CreateSnapDeviceRaw(poolName string, deviceID int, baseDeviceID int) error { - task, err := TaskCreateNamed(deviceTargetMsg, poolName) - if task == nil { - return err - } - - if err := task.setSector(0); err != nil { - return fmt.Errorf("devicemapper: Can't set sector %s", err) - } - - if err := task.setMessage(fmt.Sprintf("create_snap %d %d", deviceID, baseDeviceID)); err != nil { - return fmt.Errorf("devicemapper: Can't set message %s", err) - } - - dmSawExist = false // reset before the task is run - if err := task.run(); err != nil { - // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id. - if dmSawExist { - return ErrDeviceIDExists - } - return fmt.Errorf("devicemapper: Error running deviceCreate (CreateSnapDeviceRaw) %s", err) - } - - return nil -} - -// CreateSnapDevice creates a snapshot based on the device identified by the baseName and baseDeviceId, -func CreateSnapDevice(poolName string, deviceID int, baseName string, baseDeviceID int) error { - devinfo, _ := GetInfo(baseName) - doSuspend := devinfo != nil && devinfo.Exists != 0 - - if doSuspend { - if err := SuspendDevice(baseName); err != nil { - return err - } - } - - if err := CreateSnapDeviceRaw(poolName, deviceID, baseDeviceID); err != nil { - if doSuspend { - if err2 := ResumeDevice(baseName); err2 != nil { - return fmt.Errorf("CreateSnapDeviceRaw Error: (%v): ResumeDevice Error: (%v)", err, err2) - } - } - return err - } - - if doSuspend { - if err := ResumeDevice(baseName); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go deleted file mode 100644 index b540281fab..0000000000 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_log.go +++ /dev/null @@ -1,121 +0,0 @@ -// +build linux,cgo - -package devicemapper - -import "C" - -import ( - "fmt" - "strings" - - "github.com/sirupsen/logrus" -) - -// DevmapperLogger defines methods required to register as a callback for -// logging events recieved from devicemapper. Note that devicemapper will send -// *all* logs regardless to callbacks (including debug logs) so it's -// recommended to not spam the console with the outputs. -type DevmapperLogger interface { - // DMLog is the logging callback containing all of the information from - // devicemapper. The interface is identical to the C libdm counterpart. - DMLog(level int, file string, line int, dmError int, message string) -} - -// dmLogger is the current logger in use that is being forwarded our messages. -var dmLogger DevmapperLogger - -// LogInit changes the logging callback called after processing libdm logs for -// error message information. The default logger simply forwards all logs to -// logrus. Calling LogInit(nil) disables the calling of callbacks. -func LogInit(logger DevmapperLogger) { - dmLogger = logger -} - -// Due to the way cgo works this has to be in a separate file, as devmapper.go has -// definitions in the cgo block, which is incompatible with using "//export" - -// StorageDevmapperLogCallback exports the devmapper log callback for cgo. Note that -// because we are using callbacks, this function will be called for *every* log -// in libdm (even debug ones because there's no way of setting the verbosity -// level for an external logging callback). -//export StorageDevmapperLogCallback -func StorageDevmapperLogCallback(level C.int, file *C.char, line, dmErrnoOrClass C.int, message *C.char) { - msg := C.GoString(message) - - // Track what errno libdm saw, because the library only gives us 0 or 1. - if level < LogLevelDebug { - if strings.Contains(msg, "busy") { - dmSawBusy = true - } - - if strings.Contains(msg, "File exists") { - dmSawExist = true - } - - if strings.Contains(msg, "No such device or address") { - dmSawEnxio = true - } - } - - if dmLogger != nil { - dmLogger.DMLog(int(level), C.GoString(file), int(line), int(dmErrnoOrClass), msg) - } -} - -// DefaultLogger is the default logger used by pkg/devicemapper. It forwards -// all logs that are of higher or equal priority to the given level to the -// corresponding logrus level. -type DefaultLogger struct { - // Level corresponds to the highest libdm level that will be forwarded to - // logrus. In order to change this, register a new DefaultLogger. - Level int -} - -// DMLog is the logging callback containing all of the information from -// devicemapper. The interface is identical to the C libdm counterpart. -func (l DefaultLogger) DMLog(level int, file string, line, dmError int, message string) { - if level <= l.Level { - // Forward the log to the correct logrus level, if allowed by dmLogLevel. - logMsg := fmt.Sprintf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) - switch level { - case LogLevelFatal, LogLevelErr: - logrus.Error(logMsg) - case LogLevelWarn: - logrus.Warn(logMsg) - case LogLevelNotice, LogLevelInfo: - logrus.Info(logMsg) - case LogLevelDebug: - logrus.Debug(logMsg) - default: - // Don't drop any "unknown" levels. - logrus.Info(logMsg) - } - } -} - -// registerLogCallback registers our own logging callback function for libdm -// (which is StorageDevmapperLogCallback). -// -// Because libdm only gives us {0,1} error codes we need to parse the logs -// produced by libdm (to set dmSawBusy and so on). Note that by registering a -// callback using StorageDevmapperLogCallback, libdm will no longer output logs to -// stderr so we have to log everything ourselves. None of this handling is -// optional because we depend on log callbacks to parse the logs, and if we -// don't forward the log information we'll be in a lot of trouble when -// debugging things. -func registerLogCallback() { - LogWithErrnoInit() -} - -func init() { - // Use the default logger by default. We only allow LogLevelFatal by - // default, because internally we mask a lot of libdm errors by retrying - // and similar tricks. Also, libdm is very chatty and we don't want to - // worry users for no reason. - dmLogger = DefaultLogger{ - Level: LogLevelFatal, - } - - // Register as early as possible so we don't miss anything. - registerLogCallback() -} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go deleted file mode 100644 index 190d83d499..0000000000 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper.go +++ /dev/null @@ -1,252 +0,0 @@ -// +build linux,cgo - -package devicemapper - -/* -#define _GNU_SOURCE -#include -#include // FIXME: present only for BLKGETSIZE64, maybe we can remove it? - -// FIXME: Can't we find a way to do the logging in pure Go? -extern void StorageDevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str); - -static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...) -{ - char *buffer = NULL; - va_list ap; - int ret; - - va_start(ap, f); - ret = vasprintf(&buffer, f, ap); - va_end(ap); - if (ret < 0) { - // memory allocation failed -- should never happen? - return; - } - - StorageDevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer); - free(buffer); -} - -static void log_with_errno_init() -{ - dm_log_with_errno_init(log_cb); -} -*/ -import "C" - -import ( - "reflect" - "unsafe" -) - -type ( - cdmTask C.struct_dm_task -) - -// IOCTL consts -const ( - BlkGetSize64 = C.BLKGETSIZE64 - BlkDiscard = C.BLKDISCARD -) - -// Devicemapper cookie flags. -const ( - DmUdevDisableSubsystemRulesFlag = C.DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG - DmUdevDisableDiskRulesFlag = C.DM_UDEV_DISABLE_DISK_RULES_FLAG - DmUdevDisableOtherRulesFlag = C.DM_UDEV_DISABLE_OTHER_RULES_FLAG - DmUdevDisableLibraryFallback = C.DM_UDEV_DISABLE_LIBRARY_FALLBACK -) - -// DeviceMapper mapped functions. -var ( - DmGetLibraryVersion = dmGetLibraryVersionFct - DmGetNextTarget = dmGetNextTargetFct - DmSetDevDir = dmSetDevDirFct - DmTaskAddTarget = dmTaskAddTargetFct - DmTaskCreate = dmTaskCreateFct - DmTaskDestroy = dmTaskDestroyFct - DmTaskGetDeps = dmTaskGetDepsFct - DmTaskGetInfo = dmTaskGetInfoFct - DmTaskGetDriverVersion = dmTaskGetDriverVersionFct - DmTaskRun = dmTaskRunFct - DmTaskSetAddNode = dmTaskSetAddNodeFct - DmTaskSetCookie = dmTaskSetCookieFct - DmTaskSetMessage = dmTaskSetMessageFct - DmTaskSetName = dmTaskSetNameFct - DmTaskSetRo = dmTaskSetRoFct - DmTaskSetSector = dmTaskSetSectorFct - DmUdevWait = dmUdevWaitFct - DmUdevSetSyncSupport = dmUdevSetSyncSupportFct - DmUdevGetSyncSupport = dmUdevGetSyncSupportFct - DmCookieSupported = dmCookieSupportedFct - LogWithErrnoInit = logWithErrnoInitFct - DmTaskDeferredRemove = dmTaskDeferredRemoveFct - DmTaskGetInfoWithDeferred = dmTaskGetInfoWithDeferredFct -) - -func free(p *C.char) { - C.free(unsafe.Pointer(p)) -} - -func dmTaskDestroyFct(task *cdmTask) { - C.dm_task_destroy((*C.struct_dm_task)(task)) -} - -func dmTaskCreateFct(taskType int) *cdmTask { - return (*cdmTask)(C.dm_task_create(C.int(taskType))) -} - -func dmTaskRunFct(task *cdmTask) int { - ret, _ := C.dm_task_run((*C.struct_dm_task)(task)) - return int(ret) -} - -func dmTaskSetNameFct(task *cdmTask, name string) int { - Cname := C.CString(name) - defer free(Cname) - - return int(C.dm_task_set_name((*C.struct_dm_task)(task), Cname)) -} - -func dmTaskSetMessageFct(task *cdmTask, message string) int { - Cmessage := C.CString(message) - defer free(Cmessage) - - return int(C.dm_task_set_message((*C.struct_dm_task)(task), Cmessage)) -} - -func dmTaskSetSectorFct(task *cdmTask, sector uint64) int { - return int(C.dm_task_set_sector((*C.struct_dm_task)(task), C.uint64_t(sector))) -} - -func dmTaskSetCookieFct(task *cdmTask, cookie *uint, flags uint16) int { - cCookie := C.uint32_t(*cookie) - defer func() { - *cookie = uint(cCookie) - }() - return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie, C.uint16_t(flags))) -} - -func dmTaskSetAddNodeFct(task *cdmTask, addNode AddNodeType) int { - return int(C.dm_task_set_add_node((*C.struct_dm_task)(task), C.dm_add_node_t(addNode))) -} - -func dmTaskSetRoFct(task *cdmTask) int { - return int(C.dm_task_set_ro((*C.struct_dm_task)(task))) -} - -func dmTaskAddTargetFct(task *cdmTask, - start, size uint64, ttype, params string) int { - - Cttype := C.CString(ttype) - defer free(Cttype) - - Cparams := C.CString(params) - defer free(Cparams) - - return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams)) -} - -func dmTaskGetDepsFct(task *cdmTask) *Deps { - Cdeps := C.dm_task_get_deps((*C.struct_dm_task)(task)) - if Cdeps == nil { - return nil - } - - // golang issue: https://github.com/golang/go/issues/11925 - hdr := reflect.SliceHeader{ - Data: uintptr(unsafe.Pointer(uintptr(unsafe.Pointer(Cdeps)) + unsafe.Sizeof(*Cdeps))), - Len: int(Cdeps.count), - Cap: int(Cdeps.count), - } - devices := *(*[]C.uint64_t)(unsafe.Pointer(&hdr)) - - deps := &Deps{ - Count: uint32(Cdeps.count), - Filler: uint32(Cdeps.filler), - } - for _, device := range devices { - deps.Device = append(deps.Device, uint64(device)) - } - return deps -} - -func dmTaskGetInfoFct(task *cdmTask, info *Info) int { - Cinfo := C.struct_dm_info{} - defer func() { - info.Exists = int(Cinfo.exists) - info.Suspended = int(Cinfo.suspended) - info.LiveTable = int(Cinfo.live_table) - info.InactiveTable = int(Cinfo.inactive_table) - info.OpenCount = int32(Cinfo.open_count) - info.EventNr = uint32(Cinfo.event_nr) - info.Major = uint32(Cinfo.major) - info.Minor = uint32(Cinfo.minor) - info.ReadOnly = int(Cinfo.read_only) - info.TargetCount = int32(Cinfo.target_count) - }() - return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) -} - -func dmTaskGetDriverVersionFct(task *cdmTask) string { - buffer := C.malloc(128) - defer C.free(buffer) - res := C.dm_task_get_driver_version((*C.struct_dm_task)(task), (*C.char)(buffer), 128) - if res == 0 { - return "" - } - return C.GoString((*C.char)(buffer)) -} - -func dmGetNextTargetFct(task *cdmTask, next unsafe.Pointer, start, length *uint64, target, params *string) unsafe.Pointer { - var ( - Cstart, Clength C.uint64_t - CtargetType, Cparams *C.char - ) - defer func() { - *start = uint64(Cstart) - *length = uint64(Clength) - *target = C.GoString(CtargetType) - *params = C.GoString(Cparams) - }() - - nextp := C.dm_get_next_target((*C.struct_dm_task)(task), next, &Cstart, &Clength, &CtargetType, &Cparams) - return nextp -} - -func dmUdevSetSyncSupportFct(syncWithUdev int) { - (C.dm_udev_set_sync_support(C.int(syncWithUdev))) -} - -func dmUdevGetSyncSupportFct() int { - return int(C.dm_udev_get_sync_support()) -} - -func dmUdevWaitFct(cookie uint) int { - return int(C.dm_udev_wait(C.uint32_t(cookie))) -} - -func dmCookieSupportedFct() int { - return int(C.dm_cookie_supported()) -} - -func logWithErrnoInitFct() { - C.log_with_errno_init() -} - -func dmSetDevDirFct(dir string) int { - Cdir := C.CString(dir) - defer free(Cdir) - - return int(C.dm_set_dev_dir(Cdir)) -} - -func dmGetLibraryVersionFct(version *string) int { - buffer := C.CString(string(make([]byte, 128))) - defer free(buffer) - defer func() { - *version = C.GoString(buffer) - }() - return int(C.dm_get_library_version(buffer, 128)) -} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go deleted file mode 100644 index 7f793c2708..0000000000 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_deferred_remove.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build linux,cgo,!libdm_no_deferred_remove - -package devicemapper - -// #include -import "C" - -// LibraryDeferredRemovalSupport tells if the feature is enabled in the build -const LibraryDeferredRemovalSupport = true - -func dmTaskDeferredRemoveFct(task *cdmTask) int { - return int(C.dm_task_deferred_remove((*C.struct_dm_task)(task))) -} - -func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int { - Cinfo := C.struct_dm_info{} - defer func() { - info.Exists = int(Cinfo.exists) - info.Suspended = int(Cinfo.suspended) - info.LiveTable = int(Cinfo.live_table) - info.InactiveTable = int(Cinfo.inactive_table) - info.OpenCount = int32(Cinfo.open_count) - info.EventNr = uint32(Cinfo.event_nr) - info.Major = uint32(Cinfo.major) - info.Minor = uint32(Cinfo.minor) - info.ReadOnly = int(Cinfo.read_only) - info.TargetCount = int32(Cinfo.target_count) - info.DeferredRemove = int(Cinfo.deferred_remove) - }() - return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) -} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go deleted file mode 100644 index 7d84508982..0000000000 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_dynamic.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build linux,cgo,!static_build - -package devicemapper - -// #cgo pkg-config: devmapper -import "C" diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go deleted file mode 100644 index a880fec8c4..0000000000 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build linux,cgo,libdm_no_deferred_remove - -package devicemapper - -// LibraryDeferredRemovalSupport tells if the feature is enabled in the build -const LibraryDeferredRemovalSupport = false - -func dmTaskDeferredRemoveFct(task *cdmTask) int { - // Error. Nobody should be calling it. - return -1 -} - -func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int { - return -1 -} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go b/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go deleted file mode 100644 index cf7f26a4c6..0000000000 --- a/vendor/github.com/containers/storage/pkg/devicemapper/devmapper_wrapper_static.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build linux,cgo,static_build - -package devicemapper - -// #cgo pkg-config: --static devmapper -import "C" diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go b/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go deleted file mode 100644 index 50ea7c4823..0000000000 --- a/vendor/github.com/containers/storage/pkg/devicemapper/ioctl.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build linux,cgo - -package devicemapper - -import ( - "unsafe" - - "golang.org/x/sys/unix" -) - -func ioctlBlkGetSize64(fd uintptr) (int64, error) { - var size int64 - if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 { - return 0, err - } - return size, nil -} - -func ioctlBlkDiscard(fd uintptr, offset, length uint64) error { - var r [2]uint64 - r[0] = offset - r[1] = length - - if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { - return err - } - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/devicemapper/log.go b/vendor/github.com/containers/storage/pkg/devicemapper/log.go deleted file mode 100644 index cee5e54549..0000000000 --- a/vendor/github.com/containers/storage/pkg/devicemapper/log.go +++ /dev/null @@ -1,11 +0,0 @@ -package devicemapper - -// definitions from lvm2 lib/log/log.h -const ( - LogLevelFatal = 2 + iota // _LOG_FATAL - LogLevelErr // _LOG_ERR - LogLevelWarn // _LOG_WARN - LogLevelNotice // _LOG_NOTICE - LogLevelInfo // _LOG_INFO - LogLevelDebug // _LOG_DEBUG -) diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go b/vendor/github.com/containers/storage/pkg/directory/directory_unix.go deleted file mode 100644 index 397251bdb8..0000000000 --- a/vendor/github.com/containers/storage/pkg/directory/directory_unix.go +++ /dev/null @@ -1,48 +0,0 @@ -// +build linux freebsd solaris - -package directory - -import ( - "os" - "path/filepath" - "syscall" -) - -// Size walks a directory tree and returns its total size in bytes. -func Size(dir string) (size int64, err error) { - data := make(map[uint64]struct{}) - err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { - if err != nil { - // if dir does not exist, Size() returns the error. - // if dir/x disappeared while walking, Size() ignores dir/x. - if os.IsNotExist(err) && d != dir { - return nil - } - return err - } - - // Ignore directory sizes - if fileInfo == nil { - return nil - } - - s := fileInfo.Size() - if fileInfo.IsDir() || s == 0 { - return nil - } - - // Check inode to handle hard links correctly - inode := fileInfo.Sys().(*syscall.Stat_t).Ino - // inode is not a uint64 on all platforms. Cast it to avoid issues. - if _, exists := data[uint64(inode)]; exists { - return nil - } - // inode is not a uint64 on all platforms. Cast it to avoid issues. - data[uint64(inode)] = struct{}{} - - size += s - - return nil - }) - return -} diff --git a/vendor/github.com/containers/storage/pkg/directory/directory_windows.go b/vendor/github.com/containers/storage/pkg/directory/directory_windows.go deleted file mode 100644 index 6fb0917c4c..0000000000 --- a/vendor/github.com/containers/storage/pkg/directory/directory_windows.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build windows - -package directory - -import ( - "os" - "path/filepath" -) - -// Size walks a directory tree and returns its total size in bytes. -func Size(dir string) (size int64, err error) { - err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { - if err != nil { - // if dir does not exist, Size() returns the error. - // if dir/x disappeared while walking, Size() ignores dir/x. - if os.IsNotExist(err) && d != dir { - return nil - } - return err - } - - // Ignore directory sizes - if fileInfo == nil { - return nil - } - - s := fileInfo.Size() - if fileInfo.IsDir() || s == 0 { - return nil - } - - size += s - - return nil - }) - return -} diff --git a/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go b/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go deleted file mode 100644 index 7df7f3d436..0000000000 --- a/vendor/github.com/containers/storage/pkg/dmesg/dmesg_linux.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build linux - -package dmesg - -import ( - "unsafe" - - "golang.org/x/sys/unix" -) - -// Dmesg returns last messages from the kernel log, up to size bytes -func Dmesg(size int) []byte { - t := uintptr(3) // SYSLOG_ACTION_READ_ALL - b := make([]byte, size) - amt, _, err := unix.Syscall(unix.SYS_SYSLOG, t, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b))) - if err != 0 { - return []byte{} - } - return b[:amt] -} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go deleted file mode 100644 index a129e654ea..0000000000 --- a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go +++ /dev/null @@ -1,298 +0,0 @@ -package fileutils - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "regexp" - "strings" - "text/scanner" - - "github.com/sirupsen/logrus" -) - -// PatternMatcher allows checking paths agaist a list of patterns -type PatternMatcher struct { - patterns []*Pattern - exclusions bool -} - -// NewPatternMatcher creates a new matcher object for specific patterns that can -// be used later to match against patterns against paths -func NewPatternMatcher(patterns []string) (*PatternMatcher, error) { - pm := &PatternMatcher{ - patterns: make([]*Pattern, 0, len(patterns)), - } - for _, p := range patterns { - // Eliminate leading and trailing whitespace. - p = strings.TrimSpace(p) - if p == "" { - continue - } - p = filepath.Clean(p) - newp := &Pattern{} - if p[0] == '!' { - if len(p) == 1 { - return nil, errors.New("illegal exclusion pattern: \"!\"") - } - newp.exclusion = true - p = p[1:] - pm.exclusions = true - } - // Do some syntax checking on the pattern. - // filepath's Match() has some really weird rules that are inconsistent - // so instead of trying to dup their logic, just call Match() for its - // error state and if there is an error in the pattern return it. - // If this becomes an issue we can remove this since its really only - // needed in the error (syntax) case - which isn't really critical. - if _, err := filepath.Match(p, "."); err != nil { - return nil, err - } - newp.cleanedPattern = p - newp.dirs = strings.Split(p, string(os.PathSeparator)) - pm.patterns = append(pm.patterns, newp) - } - return pm, nil -} - -// Matches matches path against all the patterns. Matches is not safe to be -// called concurrently -func (pm *PatternMatcher) Matches(file string) (bool, error) { - matched := false - file = filepath.FromSlash(file) - parentPath := filepath.Dir(file) - parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) - - for _, pattern := range pm.patterns { - negative := false - - if pattern.exclusion { - negative = true - } - - match, err := pattern.match(file) - if err != nil { - return false, err - } - - if !match && parentPath != "." { - // Check to see if the pattern matches one of our parent dirs. - if len(pattern.dirs) <= len(parentPathDirs) { - match, _ = pattern.match(strings.Join(parentPathDirs[:len(pattern.dirs)], string(os.PathSeparator))) - } - } - - if match { - matched = !negative - } - } - - if matched { - logrus.Debugf("Skipping excluded path: %s", file) - } - - return matched, nil -} - -// Exclusions returns true if any of the patterns define exclusions -func (pm *PatternMatcher) Exclusions() bool { - return pm.exclusions -} - -// Patterns returns array of active patterns -func (pm *PatternMatcher) Patterns() []*Pattern { - return pm.patterns -} - -// Pattern defines a single regexp used used to filter file paths. -type Pattern struct { - cleanedPattern string - dirs []string - regexp *regexp.Regexp - exclusion bool -} - -func (p *Pattern) String() string { - return p.cleanedPattern -} - -// Exclusion returns true if this pattern defines exclusion -func (p *Pattern) Exclusion() bool { - return p.exclusion -} - -func (p *Pattern) match(path string) (bool, error) { - - if p.regexp == nil { - if err := p.compile(); err != nil { - return false, filepath.ErrBadPattern - } - } - - b := p.regexp.MatchString(path) - - return b, nil -} - -func (p *Pattern) compile() error { - regStr := "^" - pattern := p.cleanedPattern - // Go through the pattern and convert it to a regexp. - // We use a scanner so we can support utf-8 chars. - var scan scanner.Scanner - scan.Init(strings.NewReader(pattern)) - - sl := string(os.PathSeparator) - escSL := sl - if sl == `\` { - escSL += `\` - } - - for scan.Peek() != scanner.EOF { - ch := scan.Next() - - if ch == '*' { - if scan.Peek() == '*' { - // is some flavor of "**" - scan.Next() - - // Treat **/ as ** so eat the "/" - if string(scan.Peek()) == sl { - scan.Next() - } - - if scan.Peek() == scanner.EOF { - // is "**EOF" - to align with .gitignore just accept all - regStr += ".*" - } else { - // is "**" - // Note that this allows for any # of /'s (even 0) because - // the .* will eat everything, even /'s - regStr += "(.*" + escSL + ")?" - } - } else { - // is "*" so map it to anything but "/" - regStr += "[^" + escSL + "]*" - } - } else if ch == '?' { - // "?" is any char except "/" - regStr += "[^" + escSL + "]" - } else if ch == '.' || ch == '$' { - // Escape some regexp special chars that have no meaning - // in golang's filepath.Match - regStr += `\` + string(ch) - } else if ch == '\\' { - // escape next char. Note that a trailing \ in the pattern - // will be left alone (but need to escape it) - if sl == `\` { - // On windows map "\" to "\\", meaning an escaped backslash, - // and then just continue because filepath.Match on - // Windows doesn't allow escaping at all - regStr += escSL - continue - } - if scan.Peek() != scanner.EOF { - regStr += `\` + string(scan.Next()) - } else { - regStr += `\` - } - } else { - regStr += string(ch) - } - } - - regStr += "$" - - re, err := regexp.Compile(regStr) - if err != nil { - return err - } - - p.regexp = re - return nil -} - -// Matches returns true if file matches any of the patterns -// and isn't excluded by any of the subsequent patterns. -func Matches(file string, patterns []string) (bool, error) { - pm, err := NewPatternMatcher(patterns) - if err != nil { - return false, err - } - file = filepath.Clean(file) - - if file == "." { - // Don't let them exclude everything, kind of silly. - return false, nil - } - - return pm.Matches(file) -} - -// CopyFile copies from src to dst until either EOF is reached -// on src or an error occurs. It verifies src exists and removes -// the dst if it exists. -func CopyFile(src, dst string) (int64, error) { - cleanSrc := filepath.Clean(src) - cleanDst := filepath.Clean(dst) - if cleanSrc == cleanDst { - return 0, nil - } - sf, err := os.Open(cleanSrc) - if err != nil { - return 0, err - } - defer sf.Close() - if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { - return 0, err - } - df, err := os.Create(cleanDst) - if err != nil { - return 0, err - } - defer df.Close() - return io.Copy(df, sf) -} - -// ReadSymlinkedDirectory returns the target directory of a symlink. -// The target of the symbolic link may not be a file. -func ReadSymlinkedDirectory(path string) (string, error) { - var realPath string - var err error - if realPath, err = filepath.Abs(path); err != nil { - return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) - } - if realPath, err = filepath.EvalSymlinks(realPath); err != nil { - return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) - } - realPathInfo, err := os.Stat(realPath) - if err != nil { - return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) - } - if !realPathInfo.Mode().IsDir() { - return "", fmt.Errorf("canonical path points to a file '%s'", realPath) - } - return realPath, nil -} - -// CreateIfNotExists creates a file or a directory only if it does not already exist. -func CreateIfNotExists(path string, isDir bool) error { - if _, err := os.Stat(path); err != nil { - if os.IsNotExist(err) { - if isDir { - return os.MkdirAll(path, 0755) - } - if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { - return err - } - f, err := os.OpenFile(path, os.O_CREATE, 0755) - if err != nil { - return err - } - f.Close() - } - } - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_darwin.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_darwin.go deleted file mode 100644 index ccd648fac3..0000000000 --- a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_darwin.go +++ /dev/null @@ -1,27 +0,0 @@ -package fileutils - -import ( - "os" - "os/exec" - "strconv" - "strings" -) - -// GetTotalUsedFds returns the number of used File Descriptors by -// executing `lsof -p PID` -func GetTotalUsedFds() int { - pid := os.Getpid() - - cmd := exec.Command("lsof", "-p", strconv.Itoa(pid)) - - output, err := cmd.CombinedOutput() - if err != nil { - return -1 - } - - outputStr := strings.TrimSpace(string(output)) - - fds := strings.Split(outputStr, "\n") - - return len(fds) - 1 -} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_solaris.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_solaris.go deleted file mode 100644 index 0f2cb7ab93..0000000000 --- a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_solaris.go +++ /dev/null @@ -1,7 +0,0 @@ -package fileutils - -// GetTotalUsedFds Returns the number of used File Descriptors. -// On Solaris these limits are per process and not systemwide -func GetTotalUsedFds() int { - return -1 -} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go deleted file mode 100644 index 9e0e97bd64..0000000000 --- a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build linux freebsd - -package fileutils - -import ( - "fmt" - "io/ioutil" - "os" - - "github.com/sirupsen/logrus" -) - -// GetTotalUsedFds Returns the number of used File Descriptors by -// reading it via /proc filesystem. -func GetTotalUsedFds() int { - if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { - logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) - } else { - return len(fds) - } - return -1 -} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_windows.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_windows.go deleted file mode 100644 index 5ec21cace5..0000000000 --- a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package fileutils - -// GetTotalUsedFds Returns the number of used File Descriptors. Not supported -// on Windows. -func GetTotalUsedFds() int { - return -1 -} diff --git a/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go b/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go deleted file mode 100644 index e6094b55b7..0000000000 --- a/vendor/github.com/containers/storage/pkg/fsutils/fsutils_linux.go +++ /dev/null @@ -1,88 +0,0 @@ -// +build linux - -package fsutils - -import ( - "fmt" - "io/ioutil" - "os" - "unsafe" - - "golang.org/x/sys/unix" -) - -func locateDummyIfEmpty(path string) (string, error) { - children, err := ioutil.ReadDir(path) - if err != nil { - return "", err - } - if len(children) != 0 { - return "", nil - } - dummyFile, err := ioutil.TempFile(path, "fsutils-dummy") - if err != nil { - return "", err - } - name := dummyFile.Name() - err = dummyFile.Close() - return name, err -} - -// SupportsDType returns whether the filesystem mounted on path supports d_type -func SupportsDType(path string) (bool, error) { - // locate dummy so that we have at least one dirent - dummy, err := locateDummyIfEmpty(path) - if err != nil { - return false, err - } - if dummy != "" { - defer os.Remove(dummy) - } - - visited := 0 - supportsDType := true - fn := func(ent *unix.Dirent) bool { - visited++ - if ent.Type == unix.DT_UNKNOWN { - supportsDType = false - // stop iteration - return true - } - // continue iteration - return false - } - if err = iterateReadDir(path, fn); err != nil { - return false, err - } - if visited == 0 { - return false, fmt.Errorf("did not hit any dirent during iteration %s", path) - } - return supportsDType, nil -} - -func iterateReadDir(path string, fn func(*unix.Dirent) bool) error { - d, err := os.Open(path) - if err != nil { - return err - } - defer d.Close() - fd := int(d.Fd()) - buf := make([]byte, 4096) - for { - nbytes, err := unix.ReadDirent(fd, buf) - if err != nil { - return err - } - if nbytes == 0 { - break - } - for off := 0; off < nbytes; { - ent := (*unix.Dirent)(unsafe.Pointer(&buf[off])) - if stop := fn(ent); stop { - return nil - } - off += int(ent.Reclen) - } - } - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go deleted file mode 100644 index c001fbecbf..0000000000 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_linux.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build linux - -package homedir - -import ( - "os" - - "github.com/containers/storage/pkg/idtools" -) - -// GetStatic returns the home directory for the current user without calling -// os/user.Current(). This is useful for static-linked binary on glibc-based -// system, because a call to os/user.Current() in a static binary leads to -// segfault due to a glibc issue that won't be fixed in a short term. -// (#29344, golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341) -func GetStatic() (string, error) { - uid := os.Getuid() - usr, err := idtools.LookupUID(uid) - if err != nil { - return "", err - } - return usr.Home, nil -} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go deleted file mode 100644 index 6b96b856f6..0000000000 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !linux - -package homedir - -import ( - "errors" -) - -// GetStatic is not needed for non-linux systems. -// (Precisely, it is needed only for glibc-based linux systems.) -func GetStatic() (string, error) { - return "", errors.New("homedir.GetStatic() is not supported on this system") -} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go deleted file mode 100644 index f2a20ea8f8..0000000000 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go +++ /dev/null @@ -1,34 +0,0 @@ -// +build !windows - -package homedir - -import ( - "os" - - "github.com/opencontainers/runc/libcontainer/user" -) - -// Key returns the env var name for the user's home dir based on -// the platform being run on -func Key() string { - return "HOME" -} - -// Get returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -func Get() string { - home := os.Getenv(Key()) - if home == "" { - if u, err := user.CurrentUser(); err == nil { - return u.Home - } - } - return home -} - -// GetShortcutString returns the string that is shortcut to user's home directory -// in the native shell of the platform running on. -func GetShortcutString() string { - return "~" -} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go deleted file mode 100644 index fafdb2bbf9..0000000000 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go +++ /dev/null @@ -1,24 +0,0 @@ -package homedir - -import ( - "os" -) - -// Key returns the env var name for the user's home dir based on -// the platform being run on -func Key() string { - return "USERPROFILE" -} - -// Get returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -func Get() string { - return os.Getenv(Key()) -} - -// GetShortcutString returns the string that is shortcut to user's home directory -// in the native shell of the platform running on. -func GetShortcutString() string { - return "%USERPROFILE%" // be careful while using in format functions -} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go deleted file mode 100644 index 68a072db22..0000000000 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools.go +++ /dev/null @@ -1,279 +0,0 @@ -package idtools - -import ( - "bufio" - "fmt" - "os" - "sort" - "strconv" - "strings" -) - -// IDMap contains a single entry for user namespace range remapping. An array -// of IDMap entries represents the structure that will be provided to the Linux -// kernel for creating a user namespace. -type IDMap struct { - ContainerID int `json:"container_id"` - HostID int `json:"host_id"` - Size int `json:"size"` -} - -type subIDRange struct { - Start int - Length int -} - -type ranges []subIDRange - -func (e ranges) Len() int { return len(e) } -func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } -func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } - -const ( - subuidFileName string = "/etc/subuid" - subgidFileName string = "/etc/subgid" -) - -// MkdirAllAs creates a directory (include any along the path) and then modifies -// ownership to the requested uid/gid. If the directory already exists, this -// function will still change ownership to the requested uid/gid pair. -// Deprecated: Use MkdirAllAndChown -func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { - return mkdirAs(path, mode, ownerUID, ownerGID, true, true) -} - -// MkdirAs creates a directory and then modifies ownership to the requested uid/gid. -// If the directory already exists, this function still changes ownership -// Deprecated: Use MkdirAndChown with a IDPair -func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { - return mkdirAs(path, mode, ownerUID, ownerGID, false, true) -} - -// MkdirAllAndChown creates a directory (include any along the path) and then modifies -// ownership to the requested uid/gid. If the directory already exists, this -// function will still change ownership to the requested uid/gid pair. -func MkdirAllAndChown(path string, mode os.FileMode, ids IDPair) error { - return mkdirAs(path, mode, ids.UID, ids.GID, true, true) -} - -// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid. -// If the directory already exists, this function still changes ownership -func MkdirAndChown(path string, mode os.FileMode, ids IDPair) error { - return mkdirAs(path, mode, ids.UID, ids.GID, false, true) -} - -// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies -// ownership ONLY of newly created directories to the requested uid/gid. If the -// directories along the path exist, no change of ownership will be performed -func MkdirAllAndChownNew(path string, mode os.FileMode, ids IDPair) error { - return mkdirAs(path, mode, ids.UID, ids.GID, true, false) -} - -// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. -// If the maps are empty, then the root uid/gid will default to "real" 0/0 -func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { - uid, err := toHost(0, uidMap) - if err != nil { - return -1, -1, err - } - gid, err := toHost(0, gidMap) - if err != nil { - return -1, -1, err - } - return uid, gid, nil -} - -// toContainer takes an id mapping, and uses it to translate a -// host ID to the remapped ID. If no map is provided, then the translation -// assumes a 1-to-1 mapping and returns the passed in id -func toContainer(hostID int, idMap []IDMap) (int, error) { - if idMap == nil { - return hostID, nil - } - for _, m := range idMap { - if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { - contID := m.ContainerID + (hostID - m.HostID) - return contID, nil - } - } - return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) -} - -// toHost takes an id mapping and a remapped ID, and translates the -// ID to the mapped host ID. If no map is provided, then the translation -// assumes a 1-to-1 mapping and returns the passed in id # -func toHost(contID int, idMap []IDMap) (int, error) { - if idMap == nil { - return contID, nil - } - for _, m := range idMap { - if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { - hostID := m.HostID + (contID - m.ContainerID) - return hostID, nil - } - } - return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) -} - -// IDPair is a UID and GID pair -type IDPair struct { - UID int - GID int -} - -// IDMappings contains a mappings of UIDs and GIDs -type IDMappings struct { - uids []IDMap - gids []IDMap -} - -// NewIDMappings takes a requested user and group name and -// using the data from /etc/sub{uid,gid} ranges, creates the -// proper uid and gid remapping ranges for that user/group pair -func NewIDMappings(username, groupname string) (*IDMappings, error) { - subuidRanges, err := parseSubuid(username) - if err != nil { - return nil, err - } - subgidRanges, err := parseSubgid(groupname) - if err != nil { - return nil, err - } - if len(subuidRanges) == 0 { - return nil, fmt.Errorf("No subuid ranges found for user %q", username) - } - if len(subgidRanges) == 0 { - return nil, fmt.Errorf("No subgid ranges found for group %q", groupname) - } - - return &IDMappings{ - uids: createIDMap(subuidRanges), - gids: createIDMap(subgidRanges), - }, nil -} - -// NewIDMappingsFromMaps creates a new mapping from two slices -// Deprecated: this is a temporary shim while transitioning to IDMapping -func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IDMappings { - return &IDMappings{uids: uids, gids: gids} -} - -// RootPair returns a uid and gid pair for the root user. The error is ignored -// because a root user always exists, and the defaults are correct when the uid -// and gid maps are empty. -func (i *IDMappings) RootPair() IDPair { - uid, gid, _ := GetRootUIDGID(i.uids, i.gids) - return IDPair{UID: uid, GID: gid} -} - -// ToHost returns the host UID and GID for the container uid, gid. -// Remapping is only performed if the ids aren't already the remapped root ids -func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) { - var err error - target := i.RootPair() - - if pair.UID != target.UID { - target.UID, err = toHost(pair.UID, i.uids) - if err != nil { - return target, err - } - } - - if pair.GID != target.GID { - target.GID, err = toHost(pair.GID, i.gids) - } - return target, err -} - -// ToContainer returns the container UID and GID for the host uid and gid -func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) { - uid, err := toContainer(pair.UID, i.uids) - if err != nil { - return -1, -1, err - } - gid, err := toContainer(pair.GID, i.gids) - return uid, gid, err -} - -// Empty returns true if there are no id mappings -func (i *IDMappings) Empty() bool { - return len(i.uids) == 0 && len(i.gids) == 0 -} - -// UIDs return the UID mapping -// TODO: remove this once everything has been refactored to use pairs -func (i *IDMappings) UIDs() []IDMap { - return i.uids -} - -// GIDs return the UID mapping -// TODO: remove this once everything has been refactored to use pairs -func (i *IDMappings) GIDs() []IDMap { - return i.gids -} - -func createIDMap(subidRanges ranges) []IDMap { - idMap := []IDMap{} - - // sort the ranges by lowest ID first - sort.Sort(subidRanges) - containerID := 0 - for _, idrange := range subidRanges { - idMap = append(idMap, IDMap{ - ContainerID: containerID, - HostID: idrange.Start, - Size: idrange.Length, - }) - containerID = containerID + idrange.Length - } - return idMap -} - -func parseSubuid(username string) (ranges, error) { - return parseSubidFile(subuidFileName, username) -} - -func parseSubgid(username string) (ranges, error) { - return parseSubidFile(subgidFileName, username) -} - -// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid) -// and return all found ranges for a specified username. If the special value -// "ALL" is supplied for username, then all ranges in the file will be returned -func parseSubidFile(path, username string) (ranges, error) { - var rangeList ranges - - subidFile, err := os.Open(path) - if err != nil { - return rangeList, err - } - defer subidFile.Close() - - s := bufio.NewScanner(subidFile) - for s.Scan() { - if err := s.Err(); err != nil { - return rangeList, err - } - - text := strings.TrimSpace(s.Text()) - if text == "" || strings.HasPrefix(text, "#") { - continue - } - parts := strings.Split(text, ":") - if len(parts) != 3 { - return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path) - } - if parts[0] == username || username == "ALL" { - startid, err := strconv.Atoi(parts[1]) - if err != nil { - return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) - } - length, err := strconv.Atoi(parts[2]) - if err != nil { - return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) - } - rangeList = append(rangeList, subIDRange{startid, length}) - } - } - return rangeList, nil -} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go deleted file mode 100644 index b5870506a0..0000000000 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go +++ /dev/null @@ -1,204 +0,0 @@ -// +build !windows - -package idtools - -import ( - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - - "github.com/containers/storage/pkg/system" - "github.com/opencontainers/runc/libcontainer/user" -) - -var ( - entOnce sync.Once - getentCmd string -) - -func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { - // make an array containing the original path asked for, plus (for mkAll == true) - // all path components leading up to the complete path that don't exist before we MkdirAll - // so that we can chown all of them properly at the end. If chownExisting is false, we won't - // chown the full directory path if it exists - var paths []string - if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { - paths = []string{path} - } else if err == nil && chownExisting { - // short-circuit--we were called with an existing directory and chown was requested - return os.Chown(path, ownerUID, ownerGID) - } else if err == nil { - // nothing to do; directory path fully exists already and chown was NOT requested - return nil - } - - if mkAll { - // walk back to "/" looking for directories which do not exist - // and add them to the paths array for chown after creation - dirPath := path - for { - dirPath = filepath.Dir(dirPath) - if dirPath == "/" { - break - } - if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { - paths = append(paths, dirPath) - } - } - if err := system.MkdirAll(path, mode, ""); err != nil && !os.IsExist(err) { - return err - } - } else { - if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { - return err - } - } - // even if it existed, we will chown the requested path + any subpaths that - // didn't exist when we called MkdirAll - for _, pathComponent := range paths { - if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil { - return err - } - } - return nil -} - -// CanAccess takes a valid (existing) directory and a uid, gid pair and determines -// if that uid, gid pair has access (execute bit) to the directory -func CanAccess(path string, pair IDPair) bool { - statInfo, err := system.Stat(path) - if err != nil { - return false - } - fileMode := os.FileMode(statInfo.Mode()) - permBits := fileMode.Perm() - return accessible(statInfo.UID() == uint32(pair.UID), - statInfo.GID() == uint32(pair.GID), permBits) -} - -func accessible(isOwner, isGroup bool, perms os.FileMode) bool { - if isOwner && (perms&0100 == 0100) { - return true - } - if isGroup && (perms&0010 == 0010) { - return true - } - if perms&0001 == 0001 { - return true - } - return false -} - -// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username, -// followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupUser(username string) (user.User, error) { - // first try a local system files lookup using existing capabilities - usr, err := user.LookupUser(username) - if err == nil { - return usr, nil - } - // local files lookup failed; attempt to call `getent` to query configured passwd dbs - usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username)) - if err != nil { - return user.User{}, err - } - return usr, nil -} - -// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid, -// followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupUID(uid int) (user.User, error) { - // first try a local system files lookup using existing capabilities - usr, err := user.LookupUid(uid) - if err == nil { - return usr, nil - } - // local files lookup failed; attempt to call `getent` to query configured passwd dbs - return getentUser(fmt.Sprintf("%s %d", "passwd", uid)) -} - -func getentUser(args string) (user.User, error) { - reader, err := callGetent(args) - if err != nil { - return user.User{}, err - } - users, err := user.ParsePasswd(reader) - if err != nil { - return user.User{}, err - } - if len(users) == 0 { - return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1]) - } - return users[0], nil -} - -// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, -// followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupGroup(groupname string) (user.Group, error) { - // first try a local system files lookup using existing capabilities - group, err := user.LookupGroup(groupname) - if err == nil { - return group, nil - } - // local files lookup failed; attempt to call `getent` to query configured group dbs - return getentGroup(fmt.Sprintf("%s %s", "group", groupname)) -} - -// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, -// followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupGID(gid int) (user.Group, error) { - // first try a local system files lookup using existing capabilities - group, err := user.LookupGid(gid) - if err == nil { - return group, nil - } - // local files lookup failed; attempt to call `getent` to query configured group dbs - return getentGroup(fmt.Sprintf("%s %d", "group", gid)) -} - -func getentGroup(args string) (user.Group, error) { - reader, err := callGetent(args) - if err != nil { - return user.Group{}, err - } - groups, err := user.ParseGroup(reader) - if err != nil { - return user.Group{}, err - } - if len(groups) == 0 { - return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1]) - } - return groups[0], nil -} - -func callGetent(args string) (io.Reader, error) { - entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) - // if no `getent` command on host, can't do anything else - if getentCmd == "" { - return nil, fmt.Errorf("") - } - out, err := execCmd(getentCmd, args) - if err != nil { - exitCode, errC := system.GetExitCode(err) - if errC != nil { - return nil, err - } - switch exitCode { - case 1: - return nil, fmt.Errorf("getent reported invalid parameters/database unknown") - case 2: - terms := strings.Split(args, " ") - return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0]) - case 3: - return nil, fmt.Errorf("getent database doesn't support enumeration") - default: - return nil, err - } - - } - return bytes.NewReader(out), nil -} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go deleted file mode 100644 index dbf6bc4c94..0000000000 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build windows - -package idtools - -import ( - "os" - - "github.com/containers/storage/pkg/system" -) - -// Platforms such as Windows do not support the UID/GID concept. So make this -// just a wrapper around system.MkdirAll. -func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { - if err := system.MkdirAll(path, mode, ""); err != nil && !os.IsExist(err) { - return err - } - return nil -} - -// CanAccess takes a valid (existing) directory and a uid, gid pair and determines -// if that uid, gid pair has access (execute bit) to the directory -// Windows does not require/support this function, so always return true -func CanAccess(path string, pair IDPair) bool { - return true -} diff --git a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go deleted file mode 100644 index 9da7975e2c..0000000000 --- a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go +++ /dev/null @@ -1,164 +0,0 @@ -package idtools - -import ( - "fmt" - "regexp" - "sort" - "strconv" - "strings" - "sync" -) - -// add a user and/or group to Linux /etc/passwd, /etc/group using standard -// Linux distribution commands: -// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group -// useradd -r -s /bin/false - -var ( - once sync.Once - userCommand string - - cmdTemplates = map[string]string{ - "adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s", - "useradd": "-r -s /bin/false %s", - "usermod": "-%s %d-%d %s", - } - - idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`) - // default length for a UID/GID subordinate range - defaultRangeLen = 65536 - defaultRangeStart = 100000 - userMod = "usermod" -) - -// AddNamespaceRangesUser takes a username and uses the standard system -// utility to create a system user/group pair used to hold the -// /etc/sub{uid,gid} ranges which will be used for user namespace -// mapping ranges in containers. -func AddNamespaceRangesUser(name string) (int, int, error) { - if err := addUser(name); err != nil { - return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err) - } - - // Query the system for the created uid and gid pair - out, err := execCmd("id", name) - if err != nil { - return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err) - } - matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out))) - if len(matches) != 3 { - return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out)) - } - uid, err := strconv.Atoi(matches[1]) - if err != nil { - return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err) - } - gid, err := strconv.Atoi(matches[2]) - if err != nil { - return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err) - } - - // Now we need to create the subuid/subgid ranges for our new user/group (system users - // do not get auto-created ranges in subuid/subgid) - - if err := createSubordinateRanges(name); err != nil { - return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err) - } - return uid, gid, nil -} - -func addUser(userName string) error { - once.Do(func() { - // set up which commands are used for adding users/groups dependent on distro - if _, err := resolveBinary("adduser"); err == nil { - userCommand = "adduser" - } else if _, err := resolveBinary("useradd"); err == nil { - userCommand = "useradd" - } - }) - if userCommand == "" { - return fmt.Errorf("Cannot add user; no useradd/adduser binary found") - } - args := fmt.Sprintf(cmdTemplates[userCommand], userName) - out, err := execCmd(userCommand, args) - if err != nil { - return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out)) - } - return nil -} - -func createSubordinateRanges(name string) error { - - // first, we should verify that ranges weren't automatically created - // by the distro tooling - ranges, err := parseSubuid(name) - if err != nil { - return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err) - } - if len(ranges) == 0 { - // no UID ranges; let's create one - startID, err := findNextUIDRange() - if err != nil { - return fmt.Errorf("Can't find available subuid range: %v", err) - } - out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name)) - if err != nil { - return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err) - } - } - - ranges, err = parseSubgid(name) - if err != nil { - return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err) - } - if len(ranges) == 0 { - // no GID ranges; let's create one - startID, err := findNextGIDRange() - if err != nil { - return fmt.Errorf("Can't find available subgid range: %v", err) - } - out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name)) - if err != nil { - return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err) - } - } - return nil -} - -func findNextUIDRange() (int, error) { - ranges, err := parseSubuid("ALL") - if err != nil { - return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err) - } - sort.Sort(ranges) - return findNextRangeStart(ranges) -} - -func findNextGIDRange() (int, error) { - ranges, err := parseSubgid("ALL") - if err != nil { - return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err) - } - sort.Sort(ranges) - return findNextRangeStart(ranges) -} - -func findNextRangeStart(rangeList ranges) (int, error) { - startID := defaultRangeStart - for _, arange := range rangeList { - if wouldOverlap(arange, startID) { - startID = arange.Start + arange.Length - } - } - return startID, nil -} - -func wouldOverlap(arange subIDRange, ID int) bool { - low := ID - high := ID + defaultRangeLen - if (low >= arange.Start && low <= arange.Start+arange.Length) || - (high <= arange.Start+arange.Length && high >= arange.Start) { - return true - } - return false -} diff --git a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go deleted file mode 100644 index d98b354cbd..0000000000 --- a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !linux - -package idtools - -import "fmt" - -// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair -// and calls the appropriate helper function to add the group and then -// the user to the group in /etc/group and /etc/passwd respectively. -func AddNamespaceRangesUser(name string) (int, int, error) { - return -1, -1, fmt.Errorf("No support for adding users or groups on this OS") -} diff --git a/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go b/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go deleted file mode 100644 index 9703ecbd9d..0000000000 --- a/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go +++ /dev/null @@ -1,32 +0,0 @@ -// +build !windows - -package idtools - -import ( - "fmt" - "os/exec" - "path/filepath" - "strings" -) - -func resolveBinary(binname string) (string, error) { - binaryPath, err := exec.LookPath(binname) - if err != nil { - return "", err - } - resolvedPath, err := filepath.EvalSymlinks(binaryPath) - if err != nil { - return "", err - } - //only return no error if the final resolved binary basename - //matches what was searched for - if filepath.Base(resolvedPath) == binname { - return resolvedPath, nil - } - return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) -} - -func execCmd(cmd, args string) ([]byte, error) { - execCmd := exec.Command(cmd, strings.Split(args, " ")...) - return execCmd.CombinedOutput() -} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/buffer.go b/vendor/github.com/containers/storage/pkg/ioutils/buffer.go deleted file mode 100644 index 3d737b3e19..0000000000 --- a/vendor/github.com/containers/storage/pkg/ioutils/buffer.go +++ /dev/null @@ -1,51 +0,0 @@ -package ioutils - -import ( - "errors" - "io" -) - -var errBufferFull = errors.New("buffer is full") - -type fixedBuffer struct { - buf []byte - pos int - lastRead int -} - -func (b *fixedBuffer) Write(p []byte) (int, error) { - n := copy(b.buf[b.pos:cap(b.buf)], p) - b.pos += n - - if n < len(p) { - if b.pos == cap(b.buf) { - return n, errBufferFull - } - return n, io.ErrShortWrite - } - return n, nil -} - -func (b *fixedBuffer) Read(p []byte) (int, error) { - n := copy(p, b.buf[b.lastRead:b.pos]) - b.lastRead += n - return n, nil -} - -func (b *fixedBuffer) Len() int { - return b.pos - b.lastRead -} - -func (b *fixedBuffer) Cap() int { - return cap(b.buf) -} - -func (b *fixedBuffer) Reset() { - b.pos = 0 - b.lastRead = 0 - b.buf = b.buf[:0] -} - -func (b *fixedBuffer) String() string { - return string(b.buf[b.lastRead:b.pos]) -} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go b/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go deleted file mode 100644 index 72a04f3491..0000000000 --- a/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go +++ /dev/null @@ -1,186 +0,0 @@ -package ioutils - -import ( - "errors" - "io" - "sync" -) - -// maxCap is the highest capacity to use in byte slices that buffer data. -const maxCap = 1e6 - -// minCap is the lowest capacity to use in byte slices that buffer data -const minCap = 64 - -// blockThreshold is the minimum number of bytes in the buffer which will cause -// a write to BytesPipe to block when allocating a new slice. -const blockThreshold = 1e6 - -var ( - // ErrClosed is returned when Write is called on a closed BytesPipe. - ErrClosed = errors.New("write to closed BytesPipe") - - bufPools = make(map[int]*sync.Pool) - bufPoolsLock sync.Mutex -) - -// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). -// All written data may be read at most once. Also, BytesPipe allocates -// and releases new byte slices to adjust to current needs, so the buffer -// won't be overgrown after peak loads. -type BytesPipe struct { - mu sync.Mutex - wait *sync.Cond - buf []*fixedBuffer - bufLen int - closeErr error // error to return from next Read. set to nil if not closed. -} - -// NewBytesPipe creates new BytesPipe, initialized by specified slice. -// If buf is nil, then it will be initialized with slice which cap is 64. -// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). -func NewBytesPipe() *BytesPipe { - bp := &BytesPipe{} - bp.buf = append(bp.buf, getBuffer(minCap)) - bp.wait = sync.NewCond(&bp.mu) - return bp -} - -// Write writes p to BytesPipe. -// It can allocate new []byte slices in a process of writing. -func (bp *BytesPipe) Write(p []byte) (int, error) { - bp.mu.Lock() - - written := 0 -loop0: - for { - if bp.closeErr != nil { - bp.mu.Unlock() - return written, ErrClosed - } - - if len(bp.buf) == 0 { - bp.buf = append(bp.buf, getBuffer(64)) - } - // get the last buffer - b := bp.buf[len(bp.buf)-1] - - n, err := b.Write(p) - written += n - bp.bufLen += n - - // errBufferFull is an error we expect to get if the buffer is full - if err != nil && err != errBufferFull { - bp.wait.Broadcast() - bp.mu.Unlock() - return written, err - } - - // if there was enough room to write all then break - if len(p) == n { - break - } - - // more data: write to the next slice - p = p[n:] - - // make sure the buffer doesn't grow too big from this write - for bp.bufLen >= blockThreshold { - bp.wait.Wait() - if bp.closeErr != nil { - continue loop0 - } - } - - // add new byte slice to the buffers slice and continue writing - nextCap := b.Cap() * 2 - if nextCap > maxCap { - nextCap = maxCap - } - bp.buf = append(bp.buf, getBuffer(nextCap)) - } - bp.wait.Broadcast() - bp.mu.Unlock() - return written, nil -} - -// CloseWithError causes further reads from a BytesPipe to return immediately. -func (bp *BytesPipe) CloseWithError(err error) error { - bp.mu.Lock() - if err != nil { - bp.closeErr = err - } else { - bp.closeErr = io.EOF - } - bp.wait.Broadcast() - bp.mu.Unlock() - return nil -} - -// Close causes further reads from a BytesPipe to return immediately. -func (bp *BytesPipe) Close() error { - return bp.CloseWithError(nil) -} - -// Read reads bytes from BytesPipe. -// Data could be read only once. -func (bp *BytesPipe) Read(p []byte) (n int, err error) { - bp.mu.Lock() - if bp.bufLen == 0 { - if bp.closeErr != nil { - bp.mu.Unlock() - return 0, bp.closeErr - } - bp.wait.Wait() - if bp.bufLen == 0 && bp.closeErr != nil { - err := bp.closeErr - bp.mu.Unlock() - return 0, err - } - } - - for bp.bufLen > 0 { - b := bp.buf[0] - read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error - n += read - bp.bufLen -= read - - if b.Len() == 0 { - // it's empty so return it to the pool and move to the next one - returnBuffer(b) - bp.buf[0] = nil - bp.buf = bp.buf[1:] - } - - if len(p) == read { - break - } - - p = p[read:] - } - - bp.wait.Broadcast() - bp.mu.Unlock() - return -} - -func returnBuffer(b *fixedBuffer) { - b.Reset() - bufPoolsLock.Lock() - pool := bufPools[b.Cap()] - bufPoolsLock.Unlock() - if pool != nil { - pool.Put(b) - } -} - -func getBuffer(size int) *fixedBuffer { - bufPoolsLock.Lock() - pool, ok := bufPools[size] - if !ok { - pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }} - bufPools[size] = pool - } - bufPoolsLock.Unlock() - return pool.Get().(*fixedBuffer) -} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go b/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go deleted file mode 100644 index a56c462651..0000000000 --- a/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go +++ /dev/null @@ -1,162 +0,0 @@ -package ioutils - -import ( - "io" - "io/ioutil" - "os" - "path/filepath" -) - -// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a -// temporary file and closing it atomically changes the temporary file to -// destination path. Writing and closing concurrently is not allowed. -func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) { - f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) - if err != nil { - return nil, err - } - - abspath, err := filepath.Abs(filename) - if err != nil { - return nil, err - } - return &atomicFileWriter{ - f: f, - fn: abspath, - perm: perm, - }, nil -} - -// AtomicWriteFile atomically writes data to a file named by filename. -func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { - f, err := NewAtomicFileWriter(filename, perm) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - f.(*atomicFileWriter).writeErr = err - } - if err1 := f.Close(); err == nil { - err = err1 - } - return err -} - -type atomicFileWriter struct { - f *os.File - fn string - writeErr error - perm os.FileMode -} - -func (w *atomicFileWriter) Write(dt []byte) (int, error) { - n, err := w.f.Write(dt) - if err != nil { - w.writeErr = err - } - return n, err -} - -func (w *atomicFileWriter) Close() (retErr error) { - defer func() { - if retErr != nil || w.writeErr != nil { - os.Remove(w.f.Name()) - } - }() - if err := w.f.Sync(); err != nil { - w.f.Close() - return err - } - if err := w.f.Close(); err != nil { - return err - } - if err := os.Chmod(w.f.Name(), w.perm); err != nil { - return err - } - if w.writeErr == nil { - return os.Rename(w.f.Name(), w.fn) - } - return nil -} - -// AtomicWriteSet is used to atomically write a set -// of files and ensure they are visible at the same time. -// Must be committed to a new directory. -type AtomicWriteSet struct { - root string -} - -// NewAtomicWriteSet creates a new atomic write set to -// atomically create a set of files. The given directory -// is used as the base directory for storing files before -// commit. If no temporary directory is given the system -// default is used. -func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) { - td, err := ioutil.TempDir(tmpDir, "write-set-") - if err != nil { - return nil, err - } - - return &AtomicWriteSet{ - root: td, - }, nil -} - -// WriteFile writes a file to the set, guaranteeing the file -// has been synced. -func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error { - f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - } - if err1 := f.Close(); err == nil { - err = err1 - } - return err -} - -type syncFileCloser struct { - *os.File -} - -func (w syncFileCloser) Close() error { - err := w.File.Sync() - if err1 := w.File.Close(); err == nil { - err = err1 - } - return err -} - -// FileWriter opens a file writer inside the set. The file -// should be synced and closed before calling commit. -func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) { - f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm) - if err != nil { - return nil, err - } - return syncFileCloser{f}, nil -} - -// Cancel cancels the set and removes all temporary data -// created in the set. -func (ws *AtomicWriteSet) Cancel() error { - return os.RemoveAll(ws.root) -} - -// Commit moves all created files to the target directory. The -// target directory must not exist and the parent of the target -// directory must exist. -func (ws *AtomicWriteSet) Commit(target string) error { - return os.Rename(ws.root, target) -} - -// String returns the location the set is writing to. -func (ws *AtomicWriteSet) String() string { - return ws.root -} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/readers.go b/vendor/github.com/containers/storage/pkg/ioutils/readers.go deleted file mode 100644 index 63f3c07f46..0000000000 --- a/vendor/github.com/containers/storage/pkg/ioutils/readers.go +++ /dev/null @@ -1,154 +0,0 @@ -package ioutils - -import ( - "crypto/sha256" - "encoding/hex" - "io" - - "golang.org/x/net/context" -) - -type readCloserWrapper struct { - io.Reader - closer func() error -} - -func (r *readCloserWrapper) Close() error { - return r.closer() -} - -// NewReadCloserWrapper returns a new io.ReadCloser. -func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { - return &readCloserWrapper{ - Reader: r, - closer: closer, - } -} - -type readerErrWrapper struct { - reader io.Reader - closer func() -} - -func (r *readerErrWrapper) Read(p []byte) (int, error) { - n, err := r.reader.Read(p) - if err != nil { - r.closer() - } - return n, err -} - -// NewReaderErrWrapper returns a new io.Reader. -func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { - return &readerErrWrapper{ - reader: r, - closer: closer, - } -} - -// HashData returns the sha256 sum of src. -func HashData(src io.Reader) (string, error) { - h := sha256.New() - if _, err := io.Copy(h, src); err != nil { - return "", err - } - return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil -} - -// OnEOFReader wraps an io.ReadCloser and a function -// the function will run at the end of file or close the file. -type OnEOFReader struct { - Rc io.ReadCloser - Fn func() -} - -func (r *OnEOFReader) Read(p []byte) (n int, err error) { - n, err = r.Rc.Read(p) - if err == io.EOF { - r.runFunc() - } - return -} - -// Close closes the file and run the function. -func (r *OnEOFReader) Close() error { - err := r.Rc.Close() - r.runFunc() - return err -} - -func (r *OnEOFReader) runFunc() { - if fn := r.Fn; fn != nil { - fn() - r.Fn = nil - } -} - -// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read -// operations. -type cancelReadCloser struct { - cancel func() - pR *io.PipeReader // Stream to read from - pW *io.PipeWriter -} - -// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the -// context is cancelled. The returned io.ReadCloser must be closed when it is -// no longer needed. -func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { - pR, pW := io.Pipe() - - // Create a context used to signal when the pipe is closed - doneCtx, cancel := context.WithCancel(context.Background()) - - p := &cancelReadCloser{ - cancel: cancel, - pR: pR, - pW: pW, - } - - go func() { - _, err := io.Copy(pW, in) - select { - case <-ctx.Done(): - // If the context was closed, p.closeWithError - // was already called. Calling it again would - // change the error that Read returns. - default: - p.closeWithError(err) - } - in.Close() - }() - go func() { - for { - select { - case <-ctx.Done(): - p.closeWithError(ctx.Err()) - case <-doneCtx.Done(): - return - } - } - }() - - return p -} - -// Read wraps the Read method of the pipe that provides data from the wrapped -// ReadCloser. -func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { - return p.pR.Read(buf) -} - -// closeWithError closes the wrapper and its underlying reader. It will -// cause future calls to Read to return err. -func (p *cancelReadCloser) closeWithError(err error) { - p.pW.CloseWithError(err) - p.cancel() -} - -// Close closes the wrapper its underlying reader. It will cause -// future calls to Read to return io.EOF. -func (p *cancelReadCloser) Close() error { - p.closeWithError(io.EOF) - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go b/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go deleted file mode 100644 index 1539ad21b5..0000000000 --- a/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !windows - -package ioutils - -import "io/ioutil" - -// TempDir on Unix systems is equivalent to ioutil.TempDir. -func TempDir(dir, prefix string) (string, error) { - return ioutil.TempDir(dir, prefix) -} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go b/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go deleted file mode 100644 index c719c120b5..0000000000 --- a/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build windows - -package ioutils - -import ( - "io/ioutil" - - "github.com/containers/storage/pkg/longpath" -) - -// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. -func TempDir(dir, prefix string) (string, error) { - tempDir, err := ioutil.TempDir(dir, prefix) - if err != nil { - return "", err - } - return longpath.AddPrefix(tempDir), nil -} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/writeflusher.go b/vendor/github.com/containers/storage/pkg/ioutils/writeflusher.go deleted file mode 100644 index 52a4901ade..0000000000 --- a/vendor/github.com/containers/storage/pkg/ioutils/writeflusher.go +++ /dev/null @@ -1,92 +0,0 @@ -package ioutils - -import ( - "io" - "sync" -) - -// WriteFlusher wraps the Write and Flush operation ensuring that every write -// is a flush. In addition, the Close method can be called to intercept -// Read/Write calls if the targets lifecycle has already ended. -type WriteFlusher struct { - w io.Writer - flusher flusher - flushed chan struct{} - flushedOnce sync.Once - closed chan struct{} - closeLock sync.Mutex -} - -type flusher interface { - Flush() -} - -var errWriteFlusherClosed = io.EOF - -func (wf *WriteFlusher) Write(b []byte) (n int, err error) { - select { - case <-wf.closed: - return 0, errWriteFlusherClosed - default: - } - - n, err = wf.w.Write(b) - wf.Flush() // every write is a flush. - return n, err -} - -// Flush the stream immediately. -func (wf *WriteFlusher) Flush() { - select { - case <-wf.closed: - return - default: - } - - wf.flushedOnce.Do(func() { - close(wf.flushed) - }) - wf.flusher.Flush() -} - -// Flushed returns the state of flushed. -// If it's flushed, return true, or else it return false. -func (wf *WriteFlusher) Flushed() bool { - // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to - // be used to detect whether or a response code has been issued or not. - // Another hook should be used instead. - var flushed bool - select { - case <-wf.flushed: - flushed = true - default: - } - return flushed -} - -// Close closes the write flusher, disallowing any further writes to the -// target. After the flusher is closed, all calls to write or flush will -// result in an error. -func (wf *WriteFlusher) Close() error { - wf.closeLock.Lock() - defer wf.closeLock.Unlock() - - select { - case <-wf.closed: - return errWriteFlusherClosed - default: - close(wf.closed) - } - return nil -} - -// NewWriteFlusher returns a new WriteFlusher. -func NewWriteFlusher(w io.Writer) *WriteFlusher { - var fl flusher - if f, ok := w.(flusher); ok { - fl = f - } else { - fl = &NopFlusher{} - } - return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})} -} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/writers.go b/vendor/github.com/containers/storage/pkg/ioutils/writers.go deleted file mode 100644 index ccc7f9c23e..0000000000 --- a/vendor/github.com/containers/storage/pkg/ioutils/writers.go +++ /dev/null @@ -1,66 +0,0 @@ -package ioutils - -import "io" - -// NopWriter represents a type which write operation is nop. -type NopWriter struct{} - -func (*NopWriter) Write(buf []byte) (int, error) { - return len(buf), nil -} - -type nopWriteCloser struct { - io.Writer -} - -func (w *nopWriteCloser) Close() error { return nil } - -// NopWriteCloser returns a nopWriteCloser. -func NopWriteCloser(w io.Writer) io.WriteCloser { - return &nopWriteCloser{w} -} - -// NopFlusher represents a type which flush operation is nop. -type NopFlusher struct{} - -// Flush is a nop operation. -func (f *NopFlusher) Flush() {} - -type writeCloserWrapper struct { - io.Writer - closer func() error -} - -func (r *writeCloserWrapper) Close() error { - return r.closer() -} - -// NewWriteCloserWrapper returns a new io.WriteCloser. -func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { - return &writeCloserWrapper{ - Writer: r, - closer: closer, - } -} - -// WriteCounter wraps a concrete io.Writer and hold a count of the number -// of bytes written to the writer during a "session". -// This can be convenient when write return is masked -// (e.g., json.Encoder.Encode()) -type WriteCounter struct { - Count int64 - Writer io.Writer -} - -// NewWriteCounter returns a new WriteCounter. -func NewWriteCounter(w io.Writer) *WriteCounter { - return &WriteCounter{ - Writer: w, - } -} - -func (wc *WriteCounter) Write(p []byte) (count int, err error) { - count, err = wc.Writer.Write(p) - wc.Count += int64(count) - return -} diff --git a/vendor/github.com/containers/storage/pkg/longpath/longpath.go b/vendor/github.com/containers/storage/pkg/longpath/longpath.go deleted file mode 100644 index 9b15bfff4c..0000000000 --- a/vendor/github.com/containers/storage/pkg/longpath/longpath.go +++ /dev/null @@ -1,26 +0,0 @@ -// longpath introduces some constants and helper functions for handling long paths -// in Windows, which are expected to be prepended with `\\?\` and followed by either -// a drive letter, a UNC server\share, or a volume identifier. - -package longpath - -import ( - "strings" -) - -// Prefix is the longpath prefix for Windows file paths. -const Prefix = `\\?\` - -// AddPrefix will add the Windows long path prefix to the path provided if -// it does not already have it. -func AddPrefix(path string) string { - if !strings.HasPrefix(path, Prefix) { - if strings.HasPrefix(path, `\\`) { - // This is a UNC path, so we need to add 'UNC' to the path as well. - path = Prefix + `UNC` + path[1:] - } else { - path = Prefix + path - } - } - return path -} diff --git a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go b/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go deleted file mode 100644 index 34c80548d2..0000000000 --- a/vendor/github.com/containers/storage/pkg/loopback/attach_loopback.go +++ /dev/null @@ -1,157 +0,0 @@ -// +build linux - -package loopback - -import ( - "errors" - "fmt" - "os" - "syscall" - - "github.com/sirupsen/logrus" -) - -// Loopback related errors -var ( - ErrAttachLoopbackDevice = errors.New("loopback attach failed") - ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file") - ErrSetCapacity = errors.New("Unable set loopback capacity") -) - -func stringToLoopName(src string) [LoNameSize]uint8 { - var dst [LoNameSize]uint8 - copy(dst[:], src[:]) - return dst -} - -func getNextFreeLoopbackIndex() (int, error) { - f, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0644) - if err != nil { - return 0, err - } - defer f.Close() - - index, err := ioctlLoopCtlGetFree(f.Fd()) - if index < 0 { - index = 0 - } - return index, err -} - -func openNextAvailableLoopback(index int, sparseName string, sparseFile *os.File) (loopFile *os.File, err error) { - // Read information about the loopback file. - var st syscall.Stat_t - err = syscall.Fstat(int(sparseFile.Fd()), &st) - if err != nil { - logrus.Errorf("Error reading information about loopback file %s: %v", sparseName, err) - return nil, ErrAttachLoopbackDevice - } - - // Start looking for a free /dev/loop - for { - target := fmt.Sprintf("/dev/loop%d", index) - index++ - - fi, err := os.Stat(target) - if err != nil { - if os.IsNotExist(err) { - logrus.Error("There are no more loopback devices available.") - } - return nil, ErrAttachLoopbackDevice - } - - if fi.Mode()&os.ModeDevice != os.ModeDevice { - logrus.Errorf("Loopback device %s is not a block device.", target) - continue - } - - // OpenFile adds O_CLOEXEC - loopFile, err = os.OpenFile(target, os.O_RDWR, 0644) - if err != nil { - logrus.Errorf("Error opening loopback device: %s", err) - return nil, ErrAttachLoopbackDevice - } - - // Try to attach to the loop file - if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil { - loopFile.Close() - - // If the error is EBUSY, then try the next loopback - if err != syscall.EBUSY { - logrus.Errorf("Cannot set up loopback device %s: %s", target, err) - return nil, ErrAttachLoopbackDevice - } - - // Otherwise, we keep going with the loop - continue - } - - // Check if the loopback driver and underlying filesystem agree on the loopback file's - // device and inode numbers. - dev, ino, err := getLoopbackBackingFile(loopFile) - if err != nil { - logrus.Errorf("Error getting loopback backing file: %s", err) - return nil, ErrGetLoopbackBackingFile - } - if dev != st.Dev || ino != st.Ino { - logrus.Errorf("Loopback device and filesystem disagree on device/inode for %q: %#x(%d):%#x(%d) vs %#x(%d):%#x(%d)", sparseName, dev, dev, ino, ino, st.Dev, st.Dev, st.Ino, st.Ino) - } - - // In case of success, we finished. Break the loop. - break - } - - // This can't happen, but let's be sure - if loopFile == nil { - logrus.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name()) - return nil, ErrAttachLoopbackDevice - } - - return loopFile, nil -} - -// AttachLoopDevice attaches the given sparse file to the next -// available loopback device. It returns an opened *os.File. -func AttachLoopDevice(sparseName string) (loop *os.File, err error) { - - // Try to retrieve the next available loopback device via syscall. - // If it fails, we discard error and start looping for a - // loopback from index 0. - startIndex, err := getNextFreeLoopbackIndex() - if err != nil { - logrus.Debugf("Error retrieving the next available loopback: %s", err) - } - - // OpenFile adds O_CLOEXEC - sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644) - if err != nil { - logrus.Errorf("Error opening sparse file %s: %s", sparseName, err) - return nil, ErrAttachLoopbackDevice - } - defer sparseFile.Close() - - loopFile, err := openNextAvailableLoopback(startIndex, sparseName, sparseFile) - if err != nil { - return nil, err - } - - // Set the status of the loopback device - loopInfo := &loopInfo64{ - loFileName: stringToLoopName(loopFile.Name()), - loOffset: 0, - loFlags: LoFlagsAutoClear, - } - - if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil { - logrus.Errorf("Cannot set up loopback device info: %s", err) - - // If the call failed, then free the loopback device - if err := ioctlLoopClrFd(loopFile.Fd()); err != nil { - logrus.Error("Error while cleaning up the loopback device") - } - loopFile.Close() - return nil, ErrAttachLoopbackDevice - } - - return loopFile, nil -} diff --git a/vendor/github.com/containers/storage/pkg/loopback/ioctl.go b/vendor/github.com/containers/storage/pkg/loopback/ioctl.go deleted file mode 100644 index 0714eb5f87..0000000000 --- a/vendor/github.com/containers/storage/pkg/loopback/ioctl.go +++ /dev/null @@ -1,53 +0,0 @@ -// +build linux - -package loopback - -import ( - "syscall" - "unsafe" -) - -func ioctlLoopCtlGetFree(fd uintptr) (int, error) { - index, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, LoopCtlGetFree, 0) - if err != 0 { - return 0, err - } - return int(index), nil -} - -func ioctlLoopSetFd(loopFd, sparseFd uintptr) error { - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetFd, sparseFd); err != 0 { - return err - } - return nil -} - -func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *loopInfo64) error { - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { - return err - } - return nil -} - -func ioctlLoopClrFd(loopFd uintptr) error { - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopClrFd, 0); err != 0 { - return err - } - return nil -} - -func ioctlLoopGetStatus64(loopFd uintptr) (*loopInfo64, error) { - loopInfo := &loopInfo64{} - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { - return nil, err - } - return loopInfo, nil -} - -func ioctlLoopSetCapacity(loopFd uintptr, value int) error { - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetCapacity, uintptr(value)); err != 0 { - return err - } - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go b/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go deleted file mode 100644 index e1100ce156..0000000000 --- a/vendor/github.com/containers/storage/pkg/loopback/loop_wrapper.go +++ /dev/null @@ -1,52 +0,0 @@ -// +build linux - -package loopback - -/* -#include // FIXME: present only for defines, maybe we can remove it? - -#ifndef LOOP_CTL_GET_FREE - #define LOOP_CTL_GET_FREE 0x4C82 -#endif - -#ifndef LO_FLAGS_PARTSCAN - #define LO_FLAGS_PARTSCAN 8 -#endif - -*/ -import "C" - -type loopInfo64 struct { - loDevice uint64 /* ioctl r/o */ - loInode uint64 /* ioctl r/o */ - loRdevice uint64 /* ioctl r/o */ - loOffset uint64 - loSizelimit uint64 /* bytes, 0 == max available */ - loNumber uint32 /* ioctl r/o */ - loEncryptType uint32 - loEncryptKeySize uint32 /* ioctl w/o */ - loFlags uint32 /* ioctl r/o */ - loFileName [LoNameSize]uint8 - loCryptName [LoNameSize]uint8 - loEncryptKey [LoKeySize]uint8 /* ioctl w/o */ - loInit [2]uint64 -} - -// IOCTL consts -const ( - LoopSetFd = C.LOOP_SET_FD - LoopCtlGetFree = C.LOOP_CTL_GET_FREE - LoopGetStatus64 = C.LOOP_GET_STATUS64 - LoopSetStatus64 = C.LOOP_SET_STATUS64 - LoopClrFd = C.LOOP_CLR_FD - LoopSetCapacity = C.LOOP_SET_CAPACITY -) - -// LOOP consts. -const ( - LoFlagsAutoClear = C.LO_FLAGS_AUTOCLEAR - LoFlagsReadOnly = C.LO_FLAGS_READ_ONLY - LoFlagsPartScan = C.LO_FLAGS_PARTSCAN - LoKeySize = C.LO_KEY_SIZE - LoNameSize = C.LO_NAME_SIZE -) diff --git a/vendor/github.com/containers/storage/pkg/loopback/loopback.go b/vendor/github.com/containers/storage/pkg/loopback/loopback.go deleted file mode 100644 index a8ec3c6163..0000000000 --- a/vendor/github.com/containers/storage/pkg/loopback/loopback.go +++ /dev/null @@ -1,63 +0,0 @@ -// +build linux - -package loopback - -import ( - "fmt" - "os" - "syscall" - - "github.com/sirupsen/logrus" -) - -func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) { - loopInfo, err := ioctlLoopGetStatus64(file.Fd()) - if err != nil { - logrus.Errorf("Error get loopback backing file: %s", err) - return 0, 0, ErrGetLoopbackBackingFile - } - return loopInfo.loDevice, loopInfo.loInode, nil -} - -// SetCapacity reloads the size for the loopback device. -func SetCapacity(file *os.File) error { - if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil { - logrus.Errorf("Error loopbackSetCapacity: %s", err) - return ErrSetCapacity - } - return nil -} - -// FindLoopDeviceFor returns a loopback device file for the specified file which -// is backing file of a loop back device. -func FindLoopDeviceFor(file *os.File) *os.File { - stat, err := file.Stat() - if err != nil { - return nil - } - targetInode := stat.Sys().(*syscall.Stat_t).Ino - targetDevice := stat.Sys().(*syscall.Stat_t).Dev - - for i := 0; true; i++ { - path := fmt.Sprintf("/dev/loop%d", i) - - file, err := os.OpenFile(path, os.O_RDWR, 0) - if err != nil { - if os.IsNotExist(err) { - return nil - } - - // Ignore all errors until the first not-exist - // we want to continue looking for the file - continue - } - - dev, inode, err := getLoopbackBackingFile(file) - if err == nil && dev == targetDevice && inode == targetInode { - return file - } - file.Close() - } - - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/mount/flags.go b/vendor/github.com/containers/storage/pkg/mount/flags.go deleted file mode 100644 index 607dbed43a..0000000000 --- a/vendor/github.com/containers/storage/pkg/mount/flags.go +++ /dev/null @@ -1,149 +0,0 @@ -package mount - -import ( - "fmt" - "strings" -) - -var flags = map[string]struct { - clear bool - flag int -}{ - "defaults": {false, 0}, - "ro": {false, RDONLY}, - "rw": {true, RDONLY}, - "suid": {true, NOSUID}, - "nosuid": {false, NOSUID}, - "dev": {true, NODEV}, - "nodev": {false, NODEV}, - "exec": {true, NOEXEC}, - "noexec": {false, NOEXEC}, - "sync": {false, SYNCHRONOUS}, - "async": {true, SYNCHRONOUS}, - "dirsync": {false, DIRSYNC}, - "remount": {false, REMOUNT}, - "mand": {false, MANDLOCK}, - "nomand": {true, MANDLOCK}, - "atime": {true, NOATIME}, - "noatime": {false, NOATIME}, - "diratime": {true, NODIRATIME}, - "nodiratime": {false, NODIRATIME}, - "bind": {false, BIND}, - "rbind": {false, RBIND}, - "unbindable": {false, UNBINDABLE}, - "runbindable": {false, RUNBINDABLE}, - "private": {false, PRIVATE}, - "rprivate": {false, RPRIVATE}, - "shared": {false, SHARED}, - "rshared": {false, RSHARED}, - "slave": {false, SLAVE}, - "rslave": {false, RSLAVE}, - "relatime": {false, RELATIME}, - "norelatime": {true, RELATIME}, - "strictatime": {false, STRICTATIME}, - "nostrictatime": {true, STRICTATIME}, -} - -var validFlags = map[string]bool{ - "": true, - "size": true, - "mode": true, - "uid": true, - "gid": true, - "nr_inodes": true, - "nr_blocks": true, - "mpol": true, -} - -var propagationFlags = map[string]bool{ - "bind": true, - "rbind": true, - "unbindable": true, - "runbindable": true, - "private": true, - "rprivate": true, - "shared": true, - "rshared": true, - "slave": true, - "rslave": true, -} - -// MergeTmpfsOptions merge mount options to make sure there is no duplicate. -func MergeTmpfsOptions(options []string) ([]string, error) { - // We use collisions maps to remove duplicates. - // For flag, the key is the flag value (the key for propagation flag is -1) - // For data=value, the key is the data - flagCollisions := map[int]bool{} - dataCollisions := map[string]bool{} - - var newOptions []string - // We process in reverse order - for i := len(options) - 1; i >= 0; i-- { - option := options[i] - if option == "defaults" { - continue - } - if f, ok := flags[option]; ok && f.flag != 0 { - // There is only one propagation mode - key := f.flag - if propagationFlags[option] { - key = -1 - } - // Check to see if there is collision for flag - if !flagCollisions[key] { - // We prepend the option and add to collision map - newOptions = append([]string{option}, newOptions...) - flagCollisions[key] = true - } - continue - } - opt := strings.SplitN(option, "=", 2) - if len(opt) != 2 || !validFlags[opt[0]] { - return nil, fmt.Errorf("Invalid tmpfs option %q", opt) - } - if !dataCollisions[opt[0]] { - // We prepend the option and add to collision map - newOptions = append([]string{option}, newOptions...) - dataCollisions[opt[0]] = true - } - } - - return newOptions, nil -} - -// Parse fstab type mount options into mount() flags -// and device specific data -func parseOptions(options string) (int, string) { - var ( - flag int - data []string - ) - - for _, o := range strings.Split(options, ",") { - // If the option does not exist in the flags table or the flag - // is not supported on the platform, - // then it is a data value for a specific fs type - if f, exists := flags[o]; exists && f.flag != 0 { - if f.clear { - flag &= ^f.flag - } else { - flag |= f.flag - } - } else { - data = append(data, o) - } - } - return flag, strings.Join(data, ",") -} - -// ParseTmpfsOptions parse fstab type mount options into flags and data -func ParseTmpfsOptions(options string) (int, string, error) { - flags, data := parseOptions(options) - for _, o := range strings.Split(data, ",") { - opt := strings.SplitN(o, "=", 2) - if !validFlags[opt[0]] { - return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt) - } - } - return flags, data, nil -} diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go deleted file mode 100644 index 5f76f331b6..0000000000 --- a/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go +++ /dev/null @@ -1,49 +0,0 @@ -// +build freebsd,cgo - -package mount - -/* -#include -*/ -import "C" - -const ( - // RDONLY will mount the filesystem as read-only. - RDONLY = C.MNT_RDONLY - - // NOSUID will not allow set-user-identifier or set-group-identifier bits to - // take effect. - NOSUID = C.MNT_NOSUID - - // NOEXEC will not allow execution of any binaries on the mounted file system. - NOEXEC = C.MNT_NOEXEC - - // SYNCHRONOUS will allow any I/O to the file system to be done synchronously. - SYNCHRONOUS = C.MNT_SYNCHRONOUS - - // NOATIME will not update the file access time when reading from a file. - NOATIME = C.MNT_NOATIME -) - -// These flags are unsupported. -const ( - BIND = 0 - DIRSYNC = 0 - MANDLOCK = 0 - NODEV = 0 - NODIRATIME = 0 - UNBINDABLE = 0 - RUNBINDABLE = 0 - PRIVATE = 0 - RPRIVATE = 0 - SHARED = 0 - RSHARED = 0 - SLAVE = 0 - RSLAVE = 0 - RBIND = 0 - RELATIVE = 0 - RELATIME = 0 - REMOUNT = 0 - STRICTATIME = 0 - mntDetach = 0 -) diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_linux.go b/vendor/github.com/containers/storage/pkg/mount/flags_linux.go deleted file mode 100644 index 0425d0dd63..0000000000 --- a/vendor/github.com/containers/storage/pkg/mount/flags_linux.go +++ /dev/null @@ -1,87 +0,0 @@ -package mount - -import ( - "golang.org/x/sys/unix" -) - -const ( - // RDONLY will mount the file system read-only. - RDONLY = unix.MS_RDONLY - - // NOSUID will not allow set-user-identifier or set-group-identifier bits to - // take effect. - NOSUID = unix.MS_NOSUID - - // NODEV will not interpret character or block special devices on the file - // system. - NODEV = unix.MS_NODEV - - // NOEXEC will not allow execution of any binaries on the mounted file system. - NOEXEC = unix.MS_NOEXEC - - // SYNCHRONOUS will allow I/O to the file system to be done synchronously. - SYNCHRONOUS = unix.MS_SYNCHRONOUS - - // DIRSYNC will force all directory updates within the file system to be done - // synchronously. This affects the following system calls: create, link, - // unlink, symlink, mkdir, rmdir, mknod and rename. - DIRSYNC = unix.MS_DIRSYNC - - // REMOUNT will attempt to remount an already-mounted file system. This is - // commonly used to change the mount flags for a file system, especially to - // make a readonly file system writeable. It does not change device or mount - // point. - REMOUNT = unix.MS_REMOUNT - - // MANDLOCK will force mandatory locks on a filesystem. - MANDLOCK = unix.MS_MANDLOCK - - // NOATIME will not update the file access time when reading from a file. - NOATIME = unix.MS_NOATIME - - // NODIRATIME will not update the directory access time. - NODIRATIME = unix.MS_NODIRATIME - - // BIND remounts a subtree somewhere else. - BIND = unix.MS_BIND - - // RBIND remounts a subtree and all possible submounts somewhere else. - RBIND = unix.MS_BIND | unix.MS_REC - - // UNBINDABLE creates a mount which cannot be cloned through a bind operation. - UNBINDABLE = unix.MS_UNBINDABLE - - // RUNBINDABLE marks the entire mount tree as UNBINDABLE. - RUNBINDABLE = unix.MS_UNBINDABLE | unix.MS_REC - - // PRIVATE creates a mount which carries no propagation abilities. - PRIVATE = unix.MS_PRIVATE - - // RPRIVATE marks the entire mount tree as PRIVATE. - RPRIVATE = unix.MS_PRIVATE | unix.MS_REC - - // SLAVE creates a mount which receives propagation from its master, but not - // vice versa. - SLAVE = unix.MS_SLAVE - - // RSLAVE marks the entire mount tree as SLAVE. - RSLAVE = unix.MS_SLAVE | unix.MS_REC - - // SHARED creates a mount which provides the ability to create mirrors of - // that mount such that mounts and unmounts within any of the mirrors - // propagate to the other mirrors. - SHARED = unix.MS_SHARED - - // RSHARED marks the entire mount tree as SHARED. - RSHARED = unix.MS_SHARED | unix.MS_REC - - // RELATIME updates inode access times relative to modify or change time. - RELATIME = unix.MS_RELATIME - - // STRICTATIME allows to explicitly request full atime updates. This makes - // it possible for the kernel to default to relatime or noatime but still - // allow userspace to override it. - STRICTATIME = unix.MS_STRICTATIME - - mntDetach = unix.MNT_DETACH -) diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go deleted file mode 100644 index 9ed741e3ff..0000000000 --- a/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build !linux,!freebsd freebsd,!cgo solaris,!cgo - -package mount - -// These flags are unsupported. -const ( - BIND = 0 - DIRSYNC = 0 - MANDLOCK = 0 - NOATIME = 0 - NODEV = 0 - NODIRATIME = 0 - NOEXEC = 0 - NOSUID = 0 - UNBINDABLE = 0 - RUNBINDABLE = 0 - PRIVATE = 0 - RPRIVATE = 0 - SHARED = 0 - RSHARED = 0 - SLAVE = 0 - RSLAVE = 0 - RBIND = 0 - RELATIME = 0 - RELATIVE = 0 - REMOUNT = 0 - STRICTATIME = 0 - SYNCHRONOUS = 0 - RDONLY = 0 - mntDetach = 0 -) diff --git a/vendor/github.com/containers/storage/pkg/mount/mount.go b/vendor/github.com/containers/storage/pkg/mount/mount.go deleted file mode 100644 index d3caa16bda..0000000000 --- a/vendor/github.com/containers/storage/pkg/mount/mount.go +++ /dev/null @@ -1,106 +0,0 @@ -package mount - -import ( - "sort" - "strings" - "time" - - "github.com/containers/storage/pkg/fileutils" -) - -// GetMounts retrieves a list of mounts for the current running process. -func GetMounts() ([]*Info, error) { - return parseMountTable() -} - -// Mounted determines if a specified mountpoint has been mounted. -// On Linux it looks at /proc/self/mountinfo and on Solaris at mnttab. -func Mounted(mountpoint string) (bool, error) { - entries, err := parseMountTable() - if err != nil { - return false, err - } - - mountpoint, err = fileutils.ReadSymlinkedDirectory(mountpoint) - if err != nil { - return false, err - } - // Search the table for the mountpoint - for _, e := range entries { - if e.Mountpoint == mountpoint { - return true, nil - } - } - return false, nil -} - -// Mount will mount filesystem according to the specified configuration, on the -// condition that the target path is *not* already mounted. Options must be -// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See -// flags.go for supported option flags. -func Mount(device, target, mType, options string) error { - flag, _ := parseOptions(options) - if flag&REMOUNT != REMOUNT { - if mounted, err := Mounted(target); err != nil || mounted { - return err - } - } - return ForceMount(device, target, mType, options) -} - -// ForceMount will mount a filesystem according to the specified configuration, -// *regardless* if the target path is not already mounted. Options must be -// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See -// flags.go for supported option flags. -func ForceMount(device, target, mType, options string) error { - flag, data := parseOptions(options) - return mount(device, target, mType, uintptr(flag), data) -} - -// Unmount lazily unmounts a filesystem on supported platforms, otherwise -// does a normal unmount. -func Unmount(target string) error { - if mounted, err := Mounted(target); err != nil || !mounted { - return err - } - return ForceUnmount(target) -} - -// RecursiveUnmount unmounts the target and all mounts underneath, starting with -// the deepsest mount first. -func RecursiveUnmount(target string) error { - mounts, err := GetMounts() - if err != nil { - return err - } - - // Make the deepest mount be first - sort.Sort(sort.Reverse(byMountpoint(mounts))) - - for i, m := range mounts { - if !strings.HasPrefix(m.Mountpoint, target) { - continue - } - if err := Unmount(m.Mountpoint); err != nil && i == len(mounts)-1 { - if mounted, err := Mounted(m.Mountpoint); err != nil || mounted { - return err - } - // Ignore errors for submounts and continue trying to unmount others - // The final unmount should fail if there ane any submounts remaining - } - } - return nil -} - -// ForceUnmount will force an unmount of the target filesystem, regardless if -// it is mounted or not. -func ForceUnmount(target string) (err error) { - // Simple retry logic for unmount - for i := 0; i < 10; i++ { - if err = unmount(target, 0); err == nil { - return nil - } - time.Sleep(100 * time.Millisecond) - } - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go deleted file mode 100644 index 814896cc9e..0000000000 --- a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go +++ /dev/null @@ -1,60 +0,0 @@ -package mount - -/* -#include -#include -#include -#include -#include -#include -*/ -import "C" - -import ( - "fmt" - "strings" - "unsafe" - - "golang.org/x/sys/unix" -) - -func allocateIOVecs(options []string) []C.struct_iovec { - out := make([]C.struct_iovec, len(options)) - for i, option := range options { - out[i].iov_base = unsafe.Pointer(C.CString(option)) - out[i].iov_len = C.size_t(len(option) + 1) - } - return out -} - -func mount(device, target, mType string, flag uintptr, data string) error { - isNullFS := false - - xs := strings.Split(data, ",") - for _, x := range xs { - if x == "bind" { - isNullFS = true - } - } - - options := []string{"fspath", target} - if isNullFS { - options = append(options, "fstype", "nullfs", "target", device) - } else { - options = append(options, "fstype", mType, "from", device) - } - rawOptions := allocateIOVecs(options) - for _, rawOption := range rawOptions { - defer C.free(rawOption.iov_base) - } - - if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { - reason := C.GoString(C.strerror(*C.__error())) - return fmt.Errorf("Failed to call nmount: %s", reason) - } - return nil -} - -func unmount(target string, flag int) error { - return unix.Unmount(target, flag) -} diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_linux.go b/vendor/github.com/containers/storage/pkg/mount/mounter_linux.go deleted file mode 100644 index 39c36d472a..0000000000 --- a/vendor/github.com/containers/storage/pkg/mount/mounter_linux.go +++ /dev/null @@ -1,57 +0,0 @@ -package mount - -import ( - "golang.org/x/sys/unix" -) - -const ( - // ptypes is the set propagation types. - ptypes = unix.MS_SHARED | unix.MS_PRIVATE | unix.MS_SLAVE | unix.MS_UNBINDABLE - - // pflags is the full set valid flags for a change propagation call. - pflags = ptypes | unix.MS_REC | unix.MS_SILENT - - // broflags is the combination of bind and read only - broflags = unix.MS_BIND | unix.MS_RDONLY -) - -// isremount returns true if either device name or flags identify a remount request, false otherwise. -func isremount(device string, flags uintptr) bool { - switch { - // We treat device "" and "none" as a remount request to provide compatibility with - // requests that don't explicitly set MS_REMOUNT such as those manipulating bind mounts. - case flags&unix.MS_REMOUNT != 0, device == "", device == "none": - return true - default: - return false - } -} - -func mount(device, target, mType string, flags uintptr, data string) error { - oflags := flags &^ ptypes - if !isremount(device, flags) || data != "" { - // Initial call applying all non-propagation flags for mount - // or remount with changed data - if err := unix.Mount(device, target, mType, oflags, data); err != nil { - return err - } - } - - if flags&ptypes != 0 { - // Change the propagation type. - if err := unix.Mount("", target, "", flags&pflags, ""); err != nil { - return err - } - } - - if oflags&broflags == broflags { - // Remount the bind to apply read only. - return unix.Mount("", target, "", oflags|unix.MS_REMOUNT, "") - } - - return nil -} - -func unmount(target string, flag int) error { - return unix.Unmount(target, flag) -} diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_solaris.go b/vendor/github.com/containers/storage/pkg/mount/mounter_solaris.go deleted file mode 100644 index 48b86771e7..0000000000 --- a/vendor/github.com/containers/storage/pkg/mount/mounter_solaris.go +++ /dev/null @@ -1,34 +0,0 @@ -// +build solaris,cgo - -package mount - -import ( - "unsafe" - - "golang.org/x/sys/unix" -) - -// #include -// #include -// #include -// int Mount(const char *spec, const char *dir, int mflag, -// char *fstype, char *dataptr, int datalen, char *optptr, int optlen) { -// return mount(spec, dir, mflag, fstype, dataptr, datalen, optptr, optlen); -// } -import "C" - -func mount(device, target, mType string, flag uintptr, data string) error { - spec := C.CString(device) - dir := C.CString(target) - fstype := C.CString(mType) - _, err := C.Mount(spec, dir, C.int(flag), fstype, nil, 0, nil, 0) - C.free(unsafe.Pointer(spec)) - C.free(unsafe.Pointer(dir)) - C.free(unsafe.Pointer(fstype)) - return err -} - -func unmount(target string, flag int) error { - err := unix.Unmount(target, flag) - return err -} diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go deleted file mode 100644 index a2a3bb457f..0000000000 --- a/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo - -package mount - -func mount(device, target, mType string, flag uintptr, data string) error { - panic("Not implemented") -} - -func unmount(target string, flag int) error { - panic("Not implemented") -} diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo.go deleted file mode 100644 index ff4cc1d86b..0000000000 --- a/vendor/github.com/containers/storage/pkg/mount/mountinfo.go +++ /dev/null @@ -1,54 +0,0 @@ -package mount - -// Info reveals information about a particular mounted filesystem. This -// struct is populated from the content in the /proc//mountinfo file. -type Info struct { - // ID is a unique identifier of the mount (may be reused after umount). - ID int - - // Parent indicates the ID of the mount parent (or of self for the top of the - // mount tree). - Parent int - - // Major indicates one half of the device ID which identifies the device class. - Major int - - // Minor indicates one half of the device ID which identifies a specific - // instance of device. - Minor int - - // Root of the mount within the filesystem. - Root string - - // Mountpoint indicates the mount point relative to the process's root. - Mountpoint string - - // Opts represents mount-specific options. - Opts string - - // Optional represents optional fields. - Optional string - - // Fstype indicates the type of filesystem, such as EXT3. - Fstype string - - // Source indicates filesystem specific information or "none". - Source string - - // VfsOpts represents per super block options. - VfsOpts string -} - -type byMountpoint []*Info - -func (by byMountpoint) Len() int { - return len(by) -} - -func (by byMountpoint) Less(i, j int) bool { - return by[i].Mountpoint < by[j].Mountpoint -} - -func (by byMountpoint) Swap(i, j int) { - by[i], by[j] = by[j], by[i] -} diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo_freebsd.go deleted file mode 100644 index 4f32edcd90..0000000000 --- a/vendor/github.com/containers/storage/pkg/mount/mountinfo_freebsd.go +++ /dev/null @@ -1,41 +0,0 @@ -package mount - -/* -#include -#include -#include -*/ -import "C" - -import ( - "fmt" - "reflect" - "unsafe" -) - -// Parse /proc/self/mountinfo because comparing Dev and ino does not work from -// bind mounts. -func parseMountTable() ([]*Info, error) { - var rawEntries *C.struct_statfs - - count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT)) - if count == 0 { - return nil, fmt.Errorf("Failed to call getmntinfo") - } - - var entries []C.struct_statfs - header := (*reflect.SliceHeader)(unsafe.Pointer(&entries)) - header.Cap = count - header.Len = count - header.Data = uintptr(unsafe.Pointer(rawEntries)) - - var out []*Info - for _, entry := range entries { - var mountinfo Info - mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) - mountinfo.Source = C.GoString(&entry.f_mntfromname[0]) - mountinfo.Fstype = C.GoString(&entry.f_fstypename[0]) - out = append(out, &mountinfo) - } - return out, nil -} diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go deleted file mode 100644 index be69fee1d7..0000000000 --- a/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go +++ /dev/null @@ -1,95 +0,0 @@ -// +build linux - -package mount - -import ( - "bufio" - "fmt" - "io" - "os" - "strings" -) - -const ( - /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue - (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) - - (1) mount ID: unique identifier of the mount (may be reused after umount) - (2) parent ID: ID of parent (or of self for the top of the mount tree) - (3) major:minor: value of st_dev for files on filesystem - (4) root: root of the mount within the filesystem - (5) mount point: mount point relative to the process's root - (6) mount options: per mount options - (7) optional fields: zero or more fields of the form "tag[:value]" - (8) separator: marks the end of the optional fields - (9) filesystem type: name of filesystem of the form "type[.subtype]" - (10) mount source: filesystem specific information or "none" - (11) super options: per super block options*/ - mountinfoFormat = "%d %d %d:%d %s %s %s %s" -) - -// Parse /proc/self/mountinfo because comparing Dev and ino does not work from -// bind mounts -func parseMountTable() ([]*Info, error) { - f, err := os.Open("/proc/self/mountinfo") - if err != nil { - return nil, err - } - defer f.Close() - - return parseInfoFile(f) -} - -func parseInfoFile(r io.Reader) ([]*Info, error) { - var ( - s = bufio.NewScanner(r) - out = []*Info{} - ) - - for s.Scan() { - if err := s.Err(); err != nil { - return nil, err - } - - var ( - p = &Info{} - text = s.Text() - optionalFields string - ) - - if _, err := fmt.Sscanf(text, mountinfoFormat, - &p.ID, &p.Parent, &p.Major, &p.Minor, - &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil { - return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) - } - // Safe as mountinfo encodes mountpoints with spaces as \040. - index := strings.Index(text, " - ") - postSeparatorFields := strings.Fields(text[index+3:]) - if len(postSeparatorFields) < 3 { - return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text) - } - - if optionalFields != "-" { - p.Optional = optionalFields - } - - p.Fstype = postSeparatorFields[0] - p.Source = postSeparatorFields[1] - p.VfsOpts = strings.Join(postSeparatorFields[2:], " ") - out = append(out, p) - } - return out, nil -} - -// PidMountInfo collects the mounts for a specific process ID. If the process -// ID is unknown, it is better to use `GetMounts` which will inspect -// "/proc/self/mountinfo" instead. -func PidMountInfo(pid int) ([]*Info, error) { - f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) - if err != nil { - return nil, err - } - defer f.Close() - - return parseInfoFile(f) -} diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo_solaris.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo_solaris.go deleted file mode 100644 index ad9ab57f8b..0000000000 --- a/vendor/github.com/containers/storage/pkg/mount/mountinfo_solaris.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build solaris,cgo - -package mount - -/* -#include -#include -*/ -import "C" - -import ( - "fmt" -) - -func parseMountTable() ([]*Info, error) { - mnttab := C.fopen(C.CString(C.MNTTAB), C.CString("r")) - if mnttab == nil { - return nil, fmt.Errorf("Failed to open %s", C.MNTTAB) - } - - var out []*Info - var mp C.struct_mnttab - - ret := C.getmntent(mnttab, &mp) - for ret == 0 { - var mountinfo Info - mountinfo.Mountpoint = C.GoString(mp.mnt_mountp) - mountinfo.Source = C.GoString(mp.mnt_special) - mountinfo.Fstype = C.GoString(mp.mnt_fstype) - mountinfo.Opts = C.GoString(mp.mnt_mntopts) - out = append(out, &mountinfo) - ret = C.getmntent(mnttab, &mp) - } - - C.fclose(mnttab) - return out, nil -} diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo_unsupported.go deleted file mode 100644 index 7fbcf19214..0000000000 --- a/vendor/github.com/containers/storage/pkg/mount/mountinfo_unsupported.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !windows,!linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo - -package mount - -import ( - "fmt" - "runtime" -) - -func parseMountTable() ([]*Info, error) { - return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) -} diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo_windows.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo_windows.go deleted file mode 100644 index dab8a37ed0..0000000000 --- a/vendor/github.com/containers/storage/pkg/mount/mountinfo_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -package mount - -func parseMountTable() ([]*Info, error) { - // Do NOT return an error! - return nil, nil -} diff --git a/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_linux.go b/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_linux.go deleted file mode 100644 index 8ceec84bc6..0000000000 --- a/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_linux.go +++ /dev/null @@ -1,69 +0,0 @@ -// +build linux - -package mount - -// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. -// See the supported options in flags.go for further reference. -func MakeShared(mountPoint string) error { - return ensureMountedAs(mountPoint, "shared") -} - -// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. -// See the supported options in flags.go for further reference. -func MakeRShared(mountPoint string) error { - return ensureMountedAs(mountPoint, "rshared") -} - -// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. -// See the supported options in flags.go for further reference. -func MakePrivate(mountPoint string) error { - return ensureMountedAs(mountPoint, "private") -} - -// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option -// enabled. See the supported options in flags.go for further reference. -func MakeRPrivate(mountPoint string) error { - return ensureMountedAs(mountPoint, "rprivate") -} - -// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. -// See the supported options in flags.go for further reference. -func MakeSlave(mountPoint string) error { - return ensureMountedAs(mountPoint, "slave") -} - -// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. -// See the supported options in flags.go for further reference. -func MakeRSlave(mountPoint string) error { - return ensureMountedAs(mountPoint, "rslave") -} - -// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option -// enabled. See the supported options in flags.go for further reference. -func MakeUnbindable(mountPoint string) error { - return ensureMountedAs(mountPoint, "unbindable") -} - -// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount -// option enabled. See the supported options in flags.go for further reference. -func MakeRUnbindable(mountPoint string) error { - return ensureMountedAs(mountPoint, "runbindable") -} - -func ensureMountedAs(mountPoint, options string) error { - mounted, err := Mounted(mountPoint) - if err != nil { - return err - } - - if !mounted { - if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil { - return err - } - } - if _, err = Mounted(mountPoint); err != nil { - return err - } - - return ForceMount("", mountPoint, "none", options) -} diff --git a/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_solaris.go b/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_solaris.go deleted file mode 100644 index 09f6b03cbc..0000000000 --- a/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_solaris.go +++ /dev/null @@ -1,58 +0,0 @@ -// +build solaris - -package mount - -// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. -// See the supported options in flags.go for further reference. -func MakeShared(mountPoint string) error { - return ensureMountedAs(mountPoint, "shared") -} - -// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. -// See the supported options in flags.go for further reference. -func MakeRShared(mountPoint string) error { - return ensureMountedAs(mountPoint, "rshared") -} - -// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. -// See the supported options in flags.go for further reference. -func MakePrivate(mountPoint string) error { - return ensureMountedAs(mountPoint, "private") -} - -// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option -// enabled. See the supported options in flags.go for further reference. -func MakeRPrivate(mountPoint string) error { - return ensureMountedAs(mountPoint, "rprivate") -} - -// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. -// See the supported options in flags.go for further reference. -func MakeSlave(mountPoint string) error { - return ensureMountedAs(mountPoint, "slave") -} - -// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. -// See the supported options in flags.go for further reference. -func MakeRSlave(mountPoint string) error { - return ensureMountedAs(mountPoint, "rslave") -} - -// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option -// enabled. See the supported options in flags.go for further reference. -func MakeUnbindable(mountPoint string) error { - return ensureMountedAs(mountPoint, "unbindable") -} - -// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount -// option enabled. See the supported options in flags.go for further reference. -func MakeRUnbindable(mountPoint string) error { - return ensureMountedAs(mountPoint, "runbindable") -} - -func ensureMountedAs(mountPoint, options string) error { - // TODO: Solaris does not support bind mounts. - // Evaluate lofs and also look at the relevant - // mount flags to be supported. - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go deleted file mode 100644 index e598672776..0000000000 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_windows.go +++ /dev/null @@ -1,70 +0,0 @@ -// +build windows - -package kernel - -import ( - "fmt" - "unsafe" - - "golang.org/x/sys/windows" -) - -// VersionInfo holds information about the kernel. -type VersionInfo struct { - kvi string // Version of the kernel (e.g. 6.1.7601.17592 -> 6) - major int // Major part of the kernel version (e.g. 6.1.7601.17592 -> 1) - minor int // Minor part of the kernel version (e.g. 6.1.7601.17592 -> 7601) - build int // Build number of the kernel version (e.g. 6.1.7601.17592 -> 17592) -} - -func (k *VersionInfo) String() string { - return fmt.Sprintf("%d.%d %d (%s)", k.major, k.minor, k.build, k.kvi) -} - -// GetKernelVersion gets the current kernel version. -func GetKernelVersion() (*VersionInfo, error) { - - var ( - h windows.Handle - dwVersion uint32 - err error - ) - - KVI := &VersionInfo{"Unknown", 0, 0, 0} - - if err = windows.RegOpenKeyEx(windows.HKEY_LOCAL_MACHINE, - windows.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), - 0, - windows.KEY_READ, - &h); err != nil { - return KVI, err - } - defer windows.RegCloseKey(h) - - var buf [1 << 10]uint16 - var typ uint32 - n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 - - if err = windows.RegQueryValueEx(h, - windows.StringToUTF16Ptr("BuildLabEx"), - nil, - &typ, - (*byte)(unsafe.Pointer(&buf[0])), - &n); err != nil { - return KVI, err - } - - KVI.kvi = windows.UTF16ToString(buf[:]) - - // Important - docker.exe MUST be manifested for this API to return - // the correct information. - if dwVersion, err = windows.GetVersion(); err != nil { - return KVI, err - } - - KVI.major = int(dwVersion & 0xFF) - KVI.minor = int((dwVersion & 0XFF00) >> 8) - KVI.build = int((dwVersion & 0xFFFF0000) >> 16) - - return KVI, nil -} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go b/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go deleted file mode 100644 index 49370bd3dd..0000000000 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_solaris.go +++ /dev/null @@ -1,14 +0,0 @@ -package kernel - -import ( - "golang.org/x/sys/unix" -) - -func uname() (*unix.Utsname, error) { - uts := &unix.Utsname{} - - if err := unix.Uname(uts); err != nil { - return nil, err - } - return uts, nil -} diff --git a/vendor/github.com/containers/storage/pkg/pools/pools.go b/vendor/github.com/containers/storage/pkg/pools/pools.go deleted file mode 100644 index a15e3688b9..0000000000 --- a/vendor/github.com/containers/storage/pkg/pools/pools.go +++ /dev/null @@ -1,119 +0,0 @@ -// Package pools provides a collection of pools which provide various -// data types with buffers. These can be used to lower the number of -// memory allocations and reuse buffers. -// -// New pools should be added to this package to allow them to be -// shared across packages. -// -// Utility functions which operate on pools should be added to this -// package to allow them to be reused. -package pools - -import ( - "bufio" - "io" - "sync" - - "github.com/containers/storage/pkg/ioutils" -) - -var ( - // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. - BufioReader32KPool *BufioReaderPool - // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. - BufioWriter32KPool *BufioWriterPool -) - -const buffer32K = 32 * 1024 - -// BufioReaderPool is a bufio reader that uses sync.Pool. -type BufioReaderPool struct { - pool *sync.Pool -} - -func init() { - BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) - BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) -} - -// newBufioReaderPoolWithSize is unexported because new pools should be -// added here to be shared where required. -func newBufioReaderPoolWithSize(size int) *BufioReaderPool { - pool := &sync.Pool{ - New: func() interface{} { return bufio.NewReaderSize(nil, size) }, - } - return &BufioReaderPool{pool: pool} -} - -// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. -func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { - buf := bufPool.pool.Get().(*bufio.Reader) - buf.Reset(r) - return buf -} - -// Put puts the bufio.Reader back into the pool. -func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { - b.Reset(nil) - bufPool.pool.Put(b) -} - -// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. -func Copy(dst io.Writer, src io.Reader) (written int64, err error) { - buf := BufioReader32KPool.Get(src) - written, err = io.Copy(dst, buf) - BufioReader32KPool.Put(buf) - return -} - -// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back -// into the pool and closes the reader if it's an io.ReadCloser. -func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { - return ioutils.NewReadCloserWrapper(r, func() error { - if readCloser, ok := r.(io.ReadCloser); ok { - readCloser.Close() - } - bufPool.Put(buf) - return nil - }) -} - -// BufioWriterPool is a bufio writer that uses sync.Pool. -type BufioWriterPool struct { - pool *sync.Pool -} - -// newBufioWriterPoolWithSize is unexported because new pools should be -// added here to be shared where required. -func newBufioWriterPoolWithSize(size int) *BufioWriterPool { - pool := &sync.Pool{ - New: func() interface{} { return bufio.NewWriterSize(nil, size) }, - } - return &BufioWriterPool{pool: pool} -} - -// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. -func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { - buf := bufPool.pool.Get().(*bufio.Writer) - buf.Reset(w) - return buf -} - -// Put puts the bufio.Writer back into the pool. -func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { - b.Reset(nil) - bufPool.pool.Put(b) -} - -// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back -// into the pool and closes the writer if it's an io.Writecloser. -func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { - return ioutils.NewWriteCloserWrapper(w, func() error { - buf.Flush() - if writeCloser, ok := w.(io.WriteCloser); ok { - writeCloser.Close() - } - bufPool.Put(buf) - return nil - }) -} diff --git a/vendor/github.com/containers/storage/pkg/promise/promise.go b/vendor/github.com/containers/storage/pkg/promise/promise.go deleted file mode 100644 index dd52b9082f..0000000000 --- a/vendor/github.com/containers/storage/pkg/promise/promise.go +++ /dev/null @@ -1,11 +0,0 @@ -package promise - -// Go is a basic promise implementation: it wraps calls a function in a goroutine, -// and returns a channel which will later return the function's return value. -func Go(f func() error) chan error { - ch := make(chan error, 1) - go func() { - ch <- f() - }() - return ch -} diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go deleted file mode 100644 index 76edd82427..0000000000 --- a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !linux,!windows,!freebsd,!solaris,!darwin - -package reexec - -import ( - "os/exec" -) - -// Command is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin. -func Command(args ...string) *exec.Cmd { - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes.go b/vendor/github.com/containers/storage/pkg/system/chtimes.go deleted file mode 100644 index 056d19954d..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/chtimes.go +++ /dev/null @@ -1,35 +0,0 @@ -package system - -import ( - "os" - "time" -) - -// Chtimes changes the access time and modified time of a file at the given path -func Chtimes(name string, atime time.Time, mtime time.Time) error { - unixMinTime := time.Unix(0, 0) - unixMaxTime := maxTime - - // If the modified time is prior to the Unix Epoch, or after the - // end of Unix Time, os.Chtimes has undefined behavior - // default to Unix Epoch in this case, just in case - - if atime.Before(unixMinTime) || atime.After(unixMaxTime) { - atime = unixMinTime - } - - if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) { - mtime = unixMinTime - } - - if err := os.Chtimes(name, atime, mtime); err != nil { - return err - } - - // Take platform specific action for setting create time. - if err := setCTime(name, mtime); err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go b/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go deleted file mode 100644 index 09d58bcbfd..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build !windows - -package system - -import ( - "time" -) - -//setCTime will set the create time on a file. On Unix, the create -//time is updated as a side effect of setting the modified time, so -//no action is required. -func setCTime(path string, ctime time.Time) error { - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go b/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go deleted file mode 100644 index 45428c141c..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build windows - -package system - -import ( - "time" - - "golang.org/x/sys/windows" -) - -//setCTime will set the create time on a file. On Windows, this requires -//calling SetFileTime and explicitly including the create time. -func setCTime(path string, ctime time.Time) error { - ctimespec := windows.NsecToTimespec(ctime.UnixNano()) - pathp, e := windows.UTF16PtrFromString(path) - if e != nil { - return e - } - h, e := windows.CreateFile(pathp, - windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil, - windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0) - if e != nil { - return e - } - defer windows.Close(h) - c := windows.NsecToFiletime(windows.TimespecToNsec(ctimespec)) - return windows.SetFileTime(h, &c, nil, nil) -} diff --git a/vendor/github.com/containers/storage/pkg/system/errors.go b/vendor/github.com/containers/storage/pkg/system/errors.go deleted file mode 100644 index 288318985e..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/errors.go +++ /dev/null @@ -1,10 +0,0 @@ -package system - -import ( - "errors" -) - -var ( - // ErrNotSupportedPlatform means the platform is not supported. - ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") -) diff --git a/vendor/github.com/containers/storage/pkg/system/exitcode.go b/vendor/github.com/containers/storage/pkg/system/exitcode.go deleted file mode 100644 index 60f0514b1d..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/exitcode.go +++ /dev/null @@ -1,33 +0,0 @@ -package system - -import ( - "fmt" - "os/exec" - "syscall" -) - -// GetExitCode returns the ExitStatus of the specified error if its type is -// exec.ExitError, returns 0 and an error otherwise. -func GetExitCode(err error) (int, error) { - exitCode := 0 - if exiterr, ok := err.(*exec.ExitError); ok { - if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { - return procExit.ExitStatus(), nil - } - } - return exitCode, fmt.Errorf("failed to get exit code") -} - -// ProcessExitCode process the specified error and returns the exit status code -// if the error was of type exec.ExitError, returns nothing otherwise. -func ProcessExitCode(err error) (exitCode int) { - if err != nil { - var exiterr error - if exitCode, exiterr = GetExitCode(err); exiterr != nil { - // TODO: Fix this so we check the error's text. - // we've failed to retrieve exit code, so we set it to 127 - exitCode = 127 - } - } - return -} diff --git a/vendor/github.com/containers/storage/pkg/system/filesys.go b/vendor/github.com/containers/storage/pkg/system/filesys.go deleted file mode 100644 index 102565f760..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/filesys.go +++ /dev/null @@ -1,67 +0,0 @@ -// +build !windows - -package system - -import ( - "io/ioutil" - "os" - "path/filepath" -) - -// MkdirAllWithACL is a wrapper for MkdirAll on unix systems. -func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { - return MkdirAll(path, perm, sddl) -} - -// MkdirAll creates a directory named path along with any necessary parents, -// with permission specified by attribute perm for all dir created. -func MkdirAll(path string, perm os.FileMode, sddl string) error { - return os.MkdirAll(path, perm) -} - -// IsAbs is a platform-specific wrapper for filepath.IsAbs. -func IsAbs(path string) bool { - return filepath.IsAbs(path) -} - -// The functions below here are wrappers for the equivalents in the os and ioutils packages. -// They are passthrough on Unix platforms, and only relevant on Windows. - -// CreateSequential creates the named file with mode 0666 (before umask), truncating -// it if it already exists. If successful, methods on the returned -// File can be used for I/O; the associated file descriptor has mode -// O_RDWR. -// If there is an error, it will be of type *PathError. -func CreateSequential(name string) (*os.File, error) { - return os.Create(name) -} - -// OpenSequential opens the named file for reading. If successful, methods on -// the returned file can be used for reading; the associated file -// descriptor has mode O_RDONLY. -// If there is an error, it will be of type *PathError. -func OpenSequential(name string) (*os.File, error) { - return os.Open(name) -} - -// OpenFileSequential is the generalized open call; most users will use Open -// or Create instead. It opens the named file with specified flag -// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, -// methods on the returned File can be used for I/O. -// If there is an error, it will be of type *PathError. -func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) { - return os.OpenFile(name, flag, perm) -} - -// TempFileSequential creates a new temporary file in the directory dir -// with a name beginning with prefix, opens the file for reading -// and writing, and returns the resulting *os.File. -// If dir is the empty string, TempFile uses the default directory -// for temporary files (see os.TempDir). -// Multiple programs calling TempFile simultaneously -// will not choose the same file. The caller can use f.Name() -// to find the pathname of the file. It is the caller's responsibility -// to remove the file when no longer needed. -func TempFileSequential(dir, prefix string) (f *os.File, err error) { - return ioutil.TempFile(dir, prefix) -} diff --git a/vendor/github.com/containers/storage/pkg/system/filesys_windows.go b/vendor/github.com/containers/storage/pkg/system/filesys_windows.go deleted file mode 100644 index a61b53d0ba..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/filesys_windows.go +++ /dev/null @@ -1,298 +0,0 @@ -// +build windows - -package system - -import ( - "os" - "path/filepath" - "regexp" - "strconv" - "strings" - "sync" - "syscall" - "time" - "unsafe" - - winio "github.com/Microsoft/go-winio" - "golang.org/x/sys/windows" -) - -const ( - // SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System - SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" - // SddlNtvmAdministratorsLocalSystem is NT VIRTUAL MACHINE\Virtual Machines plus local administrators plus NT AUTHORITY\System - SddlNtvmAdministratorsLocalSystem = "D:P(A;OICI;GA;;;S-1-5-83-0)(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" -) - -// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory -// with an appropriate SDDL defined ACL. -func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { - return mkdirall(path, true, sddl) -} - -// MkdirAll implementation that is volume path aware for Windows. -func MkdirAll(path string, _ os.FileMode, sddl string) error { - return mkdirall(path, false, sddl) -} - -// mkdirall is a custom version of os.MkdirAll modified for use on Windows -// so that it is both volume path aware, and can create a directory with -// a DACL. -func mkdirall(path string, applyACL bool, sddl string) error { - if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { - return nil - } - - // The rest of this method is largely copied from os.MkdirAll and should be kept - // as-is to ensure compatibility. - - // Fast path: if we can tell whether path is a directory or file, stop with success or error. - dir, err := os.Stat(path) - if err == nil { - if dir.IsDir() { - return nil - } - return &os.PathError{ - Op: "mkdir", - Path: path, - Err: syscall.ENOTDIR, - } - } - - // Slow path: make sure parent exists and then call Mkdir for path. - i := len(path) - for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. - i-- - } - - j := i - for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. - j-- - } - - if j > 1 { - // Create parent - err = mkdirall(path[0:j-1], false, sddl) - if err != nil { - return err - } - } - - // Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result. - if applyACL { - err = mkdirWithACL(path, sddl) - } else { - err = os.Mkdir(path, 0) - } - - if err != nil { - // Handle arguments like "foo/." by - // double-checking that directory doesn't exist. - dir, err1 := os.Lstat(path) - if err1 == nil && dir.IsDir() { - return nil - } - return err - } - return nil -} - -// mkdirWithACL creates a new directory. If there is an error, it will be of -// type *PathError. . -// -// This is a modified and combined version of os.Mkdir and windows.Mkdir -// in golang to cater for creating a directory am ACL permitting full -// access, with inheritance, to any subfolder/file for Built-in Administrators -// and Local System. -func mkdirWithACL(name string, sddl string) error { - sa := windows.SecurityAttributes{Length: 0} - sd, err := winio.SddlToSecurityDescriptor(sddl) - if err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} - } - sa.Length = uint32(unsafe.Sizeof(sa)) - sa.InheritHandle = 1 - sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0])) - - namep, err := windows.UTF16PtrFromString(name) - if err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} - } - - e := windows.CreateDirectory(namep, &sa) - if e != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: e} - } - return nil -} - -// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, -// golang filepath.IsAbs does not consider a path \windows\system32 as absolute -// as it doesn't start with a drive-letter/colon combination. However, in -// docker we need to verify things such as WORKDIR /windows/system32 in -// a Dockerfile (which gets translated to \windows\system32 when being processed -// by the daemon. This SHOULD be treated as absolute from a docker processing -// perspective. -func IsAbs(path string) bool { - if !filepath.IsAbs(path) { - if !strings.HasPrefix(path, string(os.PathSeparator)) { - return false - } - } - return true -} - -// The origin of the functions below here are the golang OS and windows packages, -// slightly modified to only cope with files, not directories due to the -// specific use case. -// -// The alteration is to allow a file on Windows to be opened with -// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating -// the standby list, particularly when accessing large files such as layer.tar. - -// CreateSequential creates the named file with mode 0666 (before umask), truncating -// it if it already exists. If successful, methods on the returned -// File can be used for I/O; the associated file descriptor has mode -// O_RDWR. -// If there is an error, it will be of type *PathError. -func CreateSequential(name string) (*os.File, error) { - return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0) -} - -// OpenSequential opens the named file for reading. If successful, methods on -// the returned file can be used for reading; the associated file -// descriptor has mode O_RDONLY. -// If there is an error, it will be of type *PathError. -func OpenSequential(name string) (*os.File, error) { - return OpenFileSequential(name, os.O_RDONLY, 0) -} - -// OpenFileSequential is the generalized open call; most users will use Open -// or Create instead. -// If there is an error, it will be of type *PathError. -func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) { - if name == "" { - return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} - } - r, errf := windowsOpenFileSequential(name, flag, 0) - if errf == nil { - return r, nil - } - return nil, &os.PathError{Op: "open", Path: name, Err: errf} -} - -func windowsOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { - r, e := windowsOpenSequential(name, flag|windows.O_CLOEXEC, 0) - if e != nil { - return nil, e - } - return os.NewFile(uintptr(r), name), nil -} - -func makeInheritSa() *windows.SecurityAttributes { - var sa windows.SecurityAttributes - sa.Length = uint32(unsafe.Sizeof(sa)) - sa.InheritHandle = 1 - return &sa -} - -func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) { - if len(path) == 0 { - return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND - } - pathp, err := windows.UTF16PtrFromString(path) - if err != nil { - return windows.InvalidHandle, err - } - var access uint32 - switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) { - case windows.O_RDONLY: - access = windows.GENERIC_READ - case windows.O_WRONLY: - access = windows.GENERIC_WRITE - case windows.O_RDWR: - access = windows.GENERIC_READ | windows.GENERIC_WRITE - } - if mode&windows.O_CREAT != 0 { - access |= windows.GENERIC_WRITE - } - if mode&windows.O_APPEND != 0 { - access &^= windows.GENERIC_WRITE - access |= windows.FILE_APPEND_DATA - } - sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE) - var sa *windows.SecurityAttributes - if mode&windows.O_CLOEXEC == 0 { - sa = makeInheritSa() - } - var createmode uint32 - switch { - case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL): - createmode = windows.CREATE_NEW - case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC): - createmode = windows.CREATE_ALWAYS - case mode&windows.O_CREAT == windows.O_CREAT: - createmode = windows.OPEN_ALWAYS - case mode&windows.O_TRUNC == windows.O_TRUNC: - createmode = windows.TRUNCATE_EXISTING - default: - createmode = windows.OPEN_EXISTING - } - // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. - //https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx - const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN - h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) - return h, e -} - -// Helpers for TempFileSequential -var rand uint32 -var randmu sync.Mutex - -func reseed() uint32 { - return uint32(time.Now().UnixNano() + int64(os.Getpid())) -} -func nextSuffix() string { - randmu.Lock() - r := rand - if r == 0 { - r = reseed() - } - r = r*1664525 + 1013904223 // constants from Numerical Recipes - rand = r - randmu.Unlock() - return strconv.Itoa(int(1e9 + r%1e9))[1:] -} - -// TempFileSequential is a copy of ioutil.TempFile, modified to use sequential -// file access. Below is the original comment from golang: -// TempFile creates a new temporary file in the directory dir -// with a name beginning with prefix, opens the file for reading -// and writing, and returns the resulting *os.File. -// If dir is the empty string, TempFile uses the default directory -// for temporary files (see os.TempDir). -// Multiple programs calling TempFile simultaneously -// will not choose the same file. The caller can use f.Name() -// to find the pathname of the file. It is the caller's responsibility -// to remove the file when no longer needed. -func TempFileSequential(dir, prefix string) (f *os.File, err error) { - if dir == "" { - dir = os.TempDir() - } - - nconflict := 0 - for i := 0; i < 10000; i++ { - name := filepath.Join(dir, prefix+nextSuffix()) - f, err = OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) - if os.IsExist(err) { - if nconflict++; nconflict > 10 { - randmu.Lock() - rand = reseed() - randmu.Unlock() - } - continue - } - break - } - return -} diff --git a/vendor/github.com/containers/storage/pkg/system/init.go b/vendor/github.com/containers/storage/pkg/system/init.go deleted file mode 100644 index 17935088de..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/init.go +++ /dev/null @@ -1,22 +0,0 @@ -package system - -import ( - "syscall" - "time" - "unsafe" -) - -// Used by chtimes -var maxTime time.Time - -func init() { - // chtimes initialization - if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { - // This is a 64 bit timespec - // os.Chtimes limits time to the following - maxTime = time.Unix(0, 1<<63-1) - } else { - // This is a 32 bit timespec - maxTime = time.Unix(1<<31-1, 0) - } -} diff --git a/vendor/github.com/containers/storage/pkg/system/init_windows.go b/vendor/github.com/containers/storage/pkg/system/init_windows.go deleted file mode 100644 index 019c66441c..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/init_windows.go +++ /dev/null @@ -1,17 +0,0 @@ -package system - -import "os" - -// LCOWSupported determines if Linux Containers on Windows are supported. -// Note: This feature is in development (06/17) and enabled through an -// environment variable. At a future time, it will be enabled based -// on build number. @jhowardmsft -var lcowSupported = false - -func init() { - // LCOW initialization - if os.Getenv("LCOW_SUPPORTED") != "" { - lcowSupported = true - } - -} diff --git a/vendor/github.com/containers/storage/pkg/system/lcow_unix.go b/vendor/github.com/containers/storage/pkg/system/lcow_unix.go deleted file mode 100644 index cff33bb408..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/lcow_unix.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !windows - -package system - -// LCOWSupported returns true if Linux containers on Windows are supported. -func LCOWSupported() bool { - return false -} diff --git a/vendor/github.com/containers/storage/pkg/system/lcow_windows.go b/vendor/github.com/containers/storage/pkg/system/lcow_windows.go deleted file mode 100644 index e54d01e696..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/lcow_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -package system - -// LCOWSupported returns true if Linux containers on Windows are supported. -func LCOWSupported() bool { - return lcowSupported -} diff --git a/vendor/github.com/containers/storage/pkg/system/lstat_unix.go b/vendor/github.com/containers/storage/pkg/system/lstat_unix.go deleted file mode 100644 index bd23c4d50b..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/lstat_unix.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !windows - -package system - -import ( - "syscall" -) - -// Lstat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Lstat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Lstat(path, s); err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/vendor/github.com/containers/storage/pkg/system/lstat_windows.go b/vendor/github.com/containers/storage/pkg/system/lstat_windows.go deleted file mode 100644 index e51df0dafe..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/lstat_windows.go +++ /dev/null @@ -1,14 +0,0 @@ -package system - -import "os" - -// Lstat calls os.Lstat to get a fileinfo interface back. -// This is then copied into our own locally defined structure. -func Lstat(path string) (*StatT, error) { - fi, err := os.Lstat(path) - if err != nil { - return nil, err - } - - return fromStatT(&fi) -} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo.go b/vendor/github.com/containers/storage/pkg/system/meminfo.go deleted file mode 100644 index 3b6e947e67..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/meminfo.go +++ /dev/null @@ -1,17 +0,0 @@ -package system - -// MemInfo contains memory statistics of the host system. -type MemInfo struct { - // Total usable RAM (i.e. physical RAM minus a few reserved bits and the - // kernel binary code). - MemTotal int64 - - // Amount of free memory. - MemFree int64 - - // Total amount of swap space available. - SwapTotal int64 - - // Amount of swap space that is currently unused. - SwapFree int64 -} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_linux.go b/vendor/github.com/containers/storage/pkg/system/meminfo_linux.go deleted file mode 100644 index 385f1d5e73..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/meminfo_linux.go +++ /dev/null @@ -1,65 +0,0 @@ -package system - -import ( - "bufio" - "io" - "os" - "strconv" - "strings" - - "github.com/docker/go-units" -) - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// MemInfo type. -func ReadMemInfo() (*MemInfo, error) { - file, err := os.Open("/proc/meminfo") - if err != nil { - return nil, err - } - defer file.Close() - return parseMemInfo(file) -} - -// parseMemInfo parses the /proc/meminfo file into -// a MemInfo object given an io.Reader to the file. -// Throws error if there are problems reading from the file -func parseMemInfo(reader io.Reader) (*MemInfo, error) { - meminfo := &MemInfo{} - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - // Expected format: ["MemTotal:", "1234", "kB"] - parts := strings.Fields(scanner.Text()) - - // Sanity checks: Skip malformed entries. - if len(parts) < 3 || parts[2] != "kB" { - continue - } - - // Convert to bytes. - size, err := strconv.Atoi(parts[1]) - if err != nil { - continue - } - bytes := int64(size) * units.KiB - - switch parts[0] { - case "MemTotal:": - meminfo.MemTotal = bytes - case "MemFree:": - meminfo.MemFree = bytes - case "SwapTotal:": - meminfo.SwapTotal = bytes - case "SwapFree:": - meminfo.SwapFree = bytes - } - - } - - // Handle errors that may have occurred during the reading of the file. - if err := scanner.Err(); err != nil { - return nil, err - } - - return meminfo, nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go deleted file mode 100644 index 925776e789..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go +++ /dev/null @@ -1,129 +0,0 @@ -// +build solaris,cgo - -package system - -import ( - "fmt" - "unsafe" -) - -// #cgo CFLAGS: -std=c99 -// #cgo LDFLAGS: -lkstat -// #include -// #include -// #include -// #include -// #include -// #include -// struct swaptable *allocSwaptable(int num) { -// struct swaptable *st; -// struct swapent *swapent; -// st = (struct swaptable *)malloc(num * sizeof(swapent_t) + sizeof (int)); -// swapent = st->swt_ent; -// for (int i = 0; i < num; i++,swapent++) { -// swapent->ste_path = (char *)malloc(MAXPATHLEN * sizeof (char)); -// } -// st->swt_n = num; -// return st; -//} -// void freeSwaptable (struct swaptable *st) { -// struct swapent *swapent = st->swt_ent; -// for (int i = 0; i < st->swt_n; i++,swapent++) { -// free(swapent->ste_path); -// } -// free(st); -// } -// swapent_t getSwapEnt(swapent_t *ent, int i) { -// return ent[i]; -// } -// int64_t getPpKernel() { -// int64_t pp_kernel = 0; -// kstat_ctl_t *ksc; -// kstat_t *ks; -// kstat_named_t *knp; -// kid_t kid; -// -// if ((ksc = kstat_open()) == NULL) { -// return -1; -// } -// if ((ks = kstat_lookup(ksc, "unix", 0, "system_pages")) == NULL) { -// return -1; -// } -// if (((kid = kstat_read(ksc, ks, NULL)) == -1) || -// ((knp = kstat_data_lookup(ks, "pp_kernel")) == NULL)) { -// return -1; -// } -// switch (knp->data_type) { -// case KSTAT_DATA_UINT64: -// pp_kernel = knp->value.ui64; -// break; -// case KSTAT_DATA_UINT32: -// pp_kernel = knp->value.ui32; -// break; -// } -// pp_kernel *= sysconf(_SC_PAGESIZE); -// return (pp_kernel > 0 ? pp_kernel : -1); -// } -import "C" - -// Get the system memory info using sysconf same as prtconf -func getTotalMem() int64 { - pagesize := C.sysconf(C._SC_PAGESIZE) - npages := C.sysconf(C._SC_PHYS_PAGES) - return int64(pagesize * npages) -} - -func getFreeMem() int64 { - pagesize := C.sysconf(C._SC_PAGESIZE) - npages := C.sysconf(C._SC_AVPHYS_PAGES) - return int64(pagesize * npages) -} - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// MemInfo type. -func ReadMemInfo() (*MemInfo, error) { - - ppKernel := C.getPpKernel() - MemTotal := getTotalMem() - MemFree := getFreeMem() - SwapTotal, SwapFree, err := getSysSwap() - - if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || - SwapFree < 0 { - return nil, fmt.Errorf("error getting system memory info %v\n", err) - } - - meminfo := &MemInfo{} - // Total memory is total physical memory less than memory locked by kernel - meminfo.MemTotal = MemTotal - int64(ppKernel) - meminfo.MemFree = MemFree - meminfo.SwapTotal = SwapTotal - meminfo.SwapFree = SwapFree - - return meminfo, nil -} - -func getSysSwap() (int64, int64, error) { - var tSwap int64 - var fSwap int64 - var diskblksPerPage int64 - num, err := C.swapctl(C.SC_GETNSWP, nil) - if err != nil { - return -1, -1, err - } - st := C.allocSwaptable(num) - _, err = C.swapctl(C.SC_LIST, unsafe.Pointer(st)) - if err != nil { - C.freeSwaptable(st) - return -1, -1, err - } - - diskblksPerPage = int64(C.sysconf(C._SC_PAGESIZE) >> C.DEV_BSHIFT) - for i := 0; i < int(num); i++ { - swapent := C.getSwapEnt(&st.swt_ent[0], C.int(i)) - tSwap += int64(swapent.ste_pages) * diskblksPerPage - fSwap += int64(swapent.ste_free) * diskblksPerPage - } - C.freeSwaptable(st) - return tSwap, fSwap, nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go b/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go deleted file mode 100644 index 3ce019dffd..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !linux,!windows,!solaris - -package system - -// ReadMemInfo is not supported on platforms other than linux and windows. -func ReadMemInfo() (*MemInfo, error) { - return nil, ErrNotSupportedPlatform -} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go b/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go deleted file mode 100644 index 883944a4c5..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go +++ /dev/null @@ -1,45 +0,0 @@ -package system - -import ( - "unsafe" - - "golang.org/x/sys/windows" -) - -var ( - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - - procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") -) - -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx -type memorystatusex struct { - dwLength uint32 - dwMemoryLoad uint32 - ullTotalPhys uint64 - ullAvailPhys uint64 - ullTotalPageFile uint64 - ullAvailPageFile uint64 - ullTotalVirtual uint64 - ullAvailVirtual uint64 - ullAvailExtendedVirtual uint64 -} - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// MemInfo type. -func ReadMemInfo() (*MemInfo, error) { - msi := &memorystatusex{ - dwLength: 64, - } - r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) - if r1 == 0 { - return &MemInfo{}, nil - } - return &MemInfo{ - MemTotal: int64(msi.ullTotalPhys), - MemFree: int64(msi.ullAvailPhys), - SwapTotal: int64(msi.ullTotalPageFile), - SwapFree: int64(msi.ullAvailPageFile), - }, nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/mknod.go b/vendor/github.com/containers/storage/pkg/system/mknod.go deleted file mode 100644 index af79a65383..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/mknod.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build !windows - -package system - -import ( - "golang.org/x/sys/unix" -) - -// Mknod creates a filesystem node (file, device special file or named pipe) named path -// with attributes specified by mode and dev. -func Mknod(path string, mode uint32, dev int) error { - return unix.Mknod(path, mode, dev) -} - -// Mkdev is used to build the value of linux devices (in /dev/) which specifies major -// and minor number of the newly created device special file. -// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. -// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, -// then the top 12 bits of the minor. -func Mkdev(major int64, minor int64) uint32 { - return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) -} diff --git a/vendor/github.com/containers/storage/pkg/system/mknod_windows.go b/vendor/github.com/containers/storage/pkg/system/mknod_windows.go deleted file mode 100644 index 2e863c0215..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/mknod_windows.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build windows - -package system - -// Mknod is not implemented on Windows. -func Mknod(path string, mode uint32, dev int) error { - return ErrNotSupportedPlatform -} - -// Mkdev is not implemented on Windows. -func Mkdev(major int64, minor int64) uint32 { - panic("Mkdev not implemented on Windows.") -} diff --git a/vendor/github.com/containers/storage/pkg/system/path.go b/vendor/github.com/containers/storage/pkg/system/path.go deleted file mode 100644 index f634a6be67..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/path.go +++ /dev/null @@ -1,21 +0,0 @@ -package system - -import "runtime" - -const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - -// DefaultPathEnv is unix style list of directories to search for -// executables. Each directory is separated from the next by a colon -// ':' character . -func DefaultPathEnv(platform string) string { - if runtime.GOOS == "windows" { - if platform != runtime.GOOS && LCOWSupported() { - return defaultUnixPathEnv - } - // Deliberately empty on Windows containers on Windows as the default path will be set by - // the container. Docker has no context of what the default path should be. - return "" - } - return defaultUnixPathEnv - -} diff --git a/vendor/github.com/containers/storage/pkg/system/path_unix.go b/vendor/github.com/containers/storage/pkg/system/path_unix.go deleted file mode 100644 index f3762e69d3..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/path_unix.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !windows - -package system - -// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, -// is the system drive. This is a no-op on Linux. -func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { - return path, nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/path_windows.go b/vendor/github.com/containers/storage/pkg/system/path_windows.go deleted file mode 100644 index aab891522d..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/path_windows.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build windows - -package system - -import ( - "fmt" - "path/filepath" - "strings" -) - -// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. -// This is used, for example, when validating a user provided path in docker cp. -// If a drive letter is supplied, it must be the system drive. The drive letter -// is always removed. Also, it translates it to OS semantics (IOW / to \). We -// need the path in this syntax so that it can ultimately be concatenated with -// a Windows long-path which doesn't support drive-letters. Examples: -// C: --> Fail -// C:\ --> \ -// a --> a -// /a --> \a -// d:\ --> Fail -func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { - if len(path) == 2 && string(path[1]) == ":" { - return "", fmt.Errorf("No relative path specified in %q", path) - } - if !filepath.IsAbs(path) || len(path) < 2 { - return filepath.FromSlash(path), nil - } - if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { - return "", fmt.Errorf("The specified path is not on the system drive (C:)") - } - return filepath.FromSlash(path[2:]), nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/process_unix.go b/vendor/github.com/containers/storage/pkg/system/process_unix.go deleted file mode 100644 index 26c8b42c17..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/process_unix.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build linux freebsd solaris darwin - -package system - -import ( - "syscall" - - "golang.org/x/sys/unix" -) - -// IsProcessAlive returns true if process with a given pid is running. -func IsProcessAlive(pid int) bool { - err := unix.Kill(pid, syscall.Signal(0)) - if err == nil || err == unix.EPERM { - return true - } - - return false -} - -// KillProcess force-stops a process. -func KillProcess(pid int) { - unix.Kill(pid, unix.SIGKILL) -} diff --git a/vendor/github.com/containers/storage/pkg/system/rm.go b/vendor/github.com/containers/storage/pkg/system/rm.go deleted file mode 100644 index fc03c3e6b6..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/rm.go +++ /dev/null @@ -1,80 +0,0 @@ -package system - -import ( - "os" - "syscall" - "time" - - "github.com/containers/storage/pkg/mount" - "github.com/pkg/errors" -) - -// EnsureRemoveAll wraps `os.RemoveAll` to check for specific errors that can -// often be remedied. -// Only use `EnsureRemoveAll` if you really want to make every effort to remove -// a directory. -// -// Because of the way `os.Remove` (and by extension `os.RemoveAll`) works, there -// can be a race between reading directory entries and then actually attempting -// to remove everything in the directory. -// These types of errors do not need to be returned since it's ok for the dir to -// be gone we can just retry the remove operation. -// -// This should not return a `os.ErrNotExist` kind of error under any circumstances -func EnsureRemoveAll(dir string) error { - notExistErr := make(map[string]bool) - - // track retries - exitOnErr := make(map[string]int) - maxRetry := 5 - - // Attempt to unmount anything beneath this dir first - mount.RecursiveUnmount(dir) - - for { - err := os.RemoveAll(dir) - if err == nil { - return err - } - - pe, ok := err.(*os.PathError) - if !ok { - return err - } - - if os.IsNotExist(err) { - if notExistErr[pe.Path] { - return err - } - notExistErr[pe.Path] = true - - // There is a race where some subdir can be removed but after the parent - // dir entries have been read. - // So the path could be from `os.Remove(subdir)` - // If the reported non-existent path is not the passed in `dir` we - // should just retry, but otherwise return with no error. - if pe.Path == dir { - return nil - } - continue - } - - if pe.Err != syscall.EBUSY { - return err - } - - if mounted, _ := mount.Mounted(pe.Path); mounted { - if e := mount.Unmount(pe.Path); e != nil { - if mounted, _ := mount.Mounted(pe.Path); mounted { - return errors.Wrapf(e, "error while removing %s", dir) - } - } - } - - if exitOnErr[pe.Path] == maxRetry { - return err - } - exitOnErr[pe.Path]++ - time.Sleep(100 * time.Millisecond) - } -} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_darwin.go b/vendor/github.com/containers/storage/pkg/system/stat_darwin.go deleted file mode 100644 index 715f05b938..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/stat_darwin.go +++ /dev/null @@ -1,13 +0,0 @@ -package system - -import "syscall" - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtimespec}, nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go b/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go deleted file mode 100644 index 715f05b938..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go +++ /dev/null @@ -1,13 +0,0 @@ -package system - -import "syscall" - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtimespec}, nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_linux.go b/vendor/github.com/containers/storage/pkg/system/stat_linux.go deleted file mode 100644 index 1939f95181..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/stat_linux.go +++ /dev/null @@ -1,19 +0,0 @@ -package system - -import "syscall" - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: s.Mode, - uid: s.Uid, - gid: s.Gid, - rdev: s.Rdev, - mtim: s.Mtim}, nil -} - -// FromStatT converts a syscall.Stat_t type to a system.Stat_t type -// This is exposed on Linux as pkg/archive/changes uses it. -func FromStatT(s *syscall.Stat_t) (*StatT, error) { - return fromStatT(s) -} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go b/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go deleted file mode 100644 index b607dea946..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go +++ /dev/null @@ -1,13 +0,0 @@ -package system - -import "syscall" - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtim}, nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_solaris.go b/vendor/github.com/containers/storage/pkg/system/stat_solaris.go deleted file mode 100644 index b607dea946..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/stat_solaris.go +++ /dev/null @@ -1,13 +0,0 @@ -package system - -import "syscall" - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtim}, nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_unix.go b/vendor/github.com/containers/storage/pkg/system/stat_unix.go deleted file mode 100644 index 91c7d121cc..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/stat_unix.go +++ /dev/null @@ -1,60 +0,0 @@ -// +build !windows - -package system - -import ( - "syscall" -) - -// StatT type contains status of a file. It contains metadata -// like permission, owner, group, size, etc about a file. -type StatT struct { - mode uint32 - uid uint32 - gid uint32 - rdev uint64 - size int64 - mtim syscall.Timespec -} - -// Mode returns file's permission mode. -func (s StatT) Mode() uint32 { - return s.mode -} - -// UID returns file's user id of owner. -func (s StatT) UID() uint32 { - return s.uid -} - -// GID returns file's group id of owner. -func (s StatT) GID() uint32 { - return s.gid -} - -// Rdev returns file's device ID (if it's special file). -func (s StatT) Rdev() uint64 { - return s.rdev -} - -// Size returns file's size. -func (s StatT) Size() int64 { - return s.size -} - -// Mtim returns file's last modification time. -func (s StatT) Mtim() syscall.Timespec { - return s.mtim -} - -// Stat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Stat(path, s); err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_windows.go b/vendor/github.com/containers/storage/pkg/system/stat_windows.go deleted file mode 100644 index 6c63972682..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/stat_windows.go +++ /dev/null @@ -1,49 +0,0 @@ -package system - -import ( - "os" - "time" -) - -// StatT type contains status of a file. It contains metadata -// like permission, size, etc about a file. -type StatT struct { - mode os.FileMode - size int64 - mtim time.Time -} - -// Size returns file's size. -func (s StatT) Size() int64 { - return s.size -} - -// Mode returns file's permission mode. -func (s StatT) Mode() os.FileMode { - return os.FileMode(s.mode) -} - -// Mtim returns file's last modification time. -func (s StatT) Mtim() time.Time { - return time.Time(s.mtim) -} - -// Stat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - fi, err := os.Stat(path) - if err != nil { - return nil, err - } - return fromStatT(&fi) -} - -// fromStatT converts a os.FileInfo type to a system.StatT type -func fromStatT(fi *os.FileInfo) (*StatT, error) { - return &StatT{ - size: (*fi).Size(), - mode: (*fi).Mode(), - mtim: (*fi).ModTime()}, nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/syscall_unix.go b/vendor/github.com/containers/storage/pkg/system/syscall_unix.go deleted file mode 100644 index 49dbdd3781..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/syscall_unix.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build linux freebsd - -package system - -import "golang.org/x/sys/unix" - -// Unmount is a platform-specific helper function to call -// the unmount syscall. -func Unmount(dest string) error { - return unix.Unmount(dest, 0) -} - -// CommandLineToArgv should not be used on Unix. -// It simply returns commandLine in the only element in the returned array. -func CommandLineToArgv(commandLine string) ([]string, error) { - return []string{commandLine}, nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/syscall_windows.go b/vendor/github.com/containers/storage/pkg/system/syscall_windows.go deleted file mode 100644 index 23e9b207c7..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/syscall_windows.go +++ /dev/null @@ -1,122 +0,0 @@ -package system - -import ( - "unsafe" - - "github.com/sirupsen/logrus" - "golang.org/x/sys/windows" -) - -var ( - ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") - procGetVersionExW = modkernel32.NewProc("GetVersionExW") - procGetProductInfo = modkernel32.NewProc("GetProductInfo") -) - -// OSVersion is a wrapper for Windows version information -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx -type OSVersion struct { - Version uint32 - MajorVersion uint8 - MinorVersion uint8 - Build uint16 -} - -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx -type osVersionInfoEx struct { - OSVersionInfoSize uint32 - MajorVersion uint32 - MinorVersion uint32 - BuildNumber uint32 - PlatformID uint32 - CSDVersion [128]uint16 - ServicePackMajor uint16 - ServicePackMinor uint16 - SuiteMask uint16 - ProductType byte - Reserve byte -} - -// GetOSVersion gets the operating system version on Windows. Note that -// docker.exe must be manifested to get the correct version information. -func GetOSVersion() OSVersion { - var err error - osv := OSVersion{} - osv.Version, err = windows.GetVersion() - if err != nil { - // GetVersion never fails. - panic(err) - } - osv.MajorVersion = uint8(osv.Version & 0xFF) - osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) - osv.Build = uint16(osv.Version >> 16) - return osv -} - -// IsWindowsClient returns true if the SKU is client -// @engine maintainers - this function should not be removed or modified as it -// is used to enforce licensing restrictions on Windows. -func IsWindowsClient() bool { - osviex := &osVersionInfoEx{OSVersionInfoSize: 284} - r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex))) - if r1 == 0 { - logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err) - return false - } - const verNTWorkstation = 0x00000001 - return osviex.ProductType == verNTWorkstation -} - -// IsIoTCore returns true if the currently running image is based off of -// Windows 10 IoT Core. -// @engine maintainers - this function should not be removed or modified as it -// is used to enforce licensing restrictions on Windows. -func IsIoTCore() bool { - var returnedProductType uint32 - r1, _, err := procGetProductInfo.Call(6, 1, 0, 0, uintptr(unsafe.Pointer(&returnedProductType))) - if r1 == 0 { - logrus.Warnf("GetProductInfo failed - assuming this is not IoT: %v", err) - return false - } - const productIoTUAP = 0x0000007B - const productIoTUAPCommercial = 0x00000083 - return returnedProductType == productIoTUAP || returnedProductType == productIoTUAPCommercial -} - -// Unmount is a platform-specific helper function to call -// the unmount syscall. Not supported on Windows -func Unmount(dest string) error { - return nil -} - -// CommandLineToArgv wraps the Windows syscall to turn a commandline into an argument array. -func CommandLineToArgv(commandLine string) ([]string, error) { - var argc int32 - - argsPtr, err := windows.UTF16PtrFromString(commandLine) - if err != nil { - return nil, err - } - - argv, err := windows.CommandLineToArgv(argsPtr, &argc) - if err != nil { - return nil, err - } - defer windows.LocalFree(windows.Handle(uintptr(unsafe.Pointer(argv)))) - - newArgs := make([]string, argc) - for i, v := range (*argv)[:argc] { - newArgs[i] = string(windows.UTF16ToString((*v)[:])) - } - - return newArgs, nil -} - -// HasWin32KSupport determines whether containers that depend on win32k can -// run on this machine. Win32k is the driver used to implement windowing. -func HasWin32KSupport() bool { - // For now, check for ntuser API support on the host. In the future, a host - // may support win32k in containers even if the host does not support ntuser - // APIs. - return ntuserApiset.Load() == nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/umask.go b/vendor/github.com/containers/storage/pkg/system/umask.go deleted file mode 100644 index 5a10eda5af..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/umask.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !windows - -package system - -import ( - "golang.org/x/sys/unix" -) - -// Umask sets current process's file mode creation mask to newmask -// and returns oldmask. -func Umask(newmask int) (oldmask int, err error) { - return unix.Umask(newmask), nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/umask_windows.go b/vendor/github.com/containers/storage/pkg/system/umask_windows.go deleted file mode 100644 index 13f1de1769..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/umask_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build windows - -package system - -// Umask is not supported on the windows platform. -func Umask(newmask int) (oldmask int, err error) { - // should not be called on cli code path - return 0, ErrNotSupportedPlatform -} diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go b/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go deleted file mode 100644 index 6a77524376..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go +++ /dev/null @@ -1,24 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/unix" -) - -// LUtimesNano is used to change access and modification time of the specified path. -// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. -func LUtimesNano(path string, ts []syscall.Timespec) error { - var _path *byte - _path, err := unix.BytePtrFromString(path) - if err != nil { - return err - } - - if _, _, err := unix.Syscall(unix.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != unix.ENOSYS { - return err - } - - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_linux.go b/vendor/github.com/containers/storage/pkg/system/utimes_linux.go deleted file mode 100644 index edc588a63f..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/utimes_linux.go +++ /dev/null @@ -1,25 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/unix" -) - -// LUtimesNano is used to change access and modification time of the specified path. -// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. -func LUtimesNano(path string, ts []syscall.Timespec) error { - atFdCwd := unix.AT_FDCWD - - var _path *byte - _path, err := unix.BytePtrFromString(path) - if err != nil { - return err - } - if _, _, err := unix.Syscall6(unix.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), unix.AT_SYMLINK_NOFOLLOW, 0, 0); err != 0 && err != unix.ENOSYS { - return err - } - - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go b/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go deleted file mode 100644 index 139714544d..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !linux,!freebsd - -package system - -import "syscall" - -// LUtimesNano is only supported on linux and freebsd. -func LUtimesNano(path string, ts []syscall.Timespec) error { - return ErrNotSupportedPlatform -} diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go deleted file mode 100644 index 98b111be42..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go +++ /dev/null @@ -1,29 +0,0 @@ -package system - -import "golang.org/x/sys/unix" - -// Lgetxattr retrieves the value of the extended attribute identified by attr -// and associated with the given path in the file system. -// It will returns a nil slice and nil error if the xattr is not set. -func Lgetxattr(path string, attr string) ([]byte, error) { - dest := make([]byte, 128) - sz, errno := unix.Lgetxattr(path, attr, dest) - if errno == unix.ENODATA { - return nil, nil - } - if errno == unix.ERANGE { - dest = make([]byte, sz) - sz, errno = unix.Lgetxattr(path, attr, dest) - } - if errno != nil { - return nil, errno - } - - return dest[:sz], nil -} - -// Lsetxattr sets the value of the extended attribute identified by attr -// and associated with the given path in the file system. -func Lsetxattr(path string, attr string, data []byte, flags int) error { - return unix.Lsetxattr(path, attr, data, flags) -} diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go deleted file mode 100644 index 0114f2227c..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !linux - -package system - -// Lgetxattr is not supported on platforms other than linux. -func Lgetxattr(path string, attr string) ([]byte, error) { - return nil, ErrNotSupportedPlatform -} - -// Lsetxattr is not supported on platforms other than linux. -func Lsetxattr(path string, attr string, data []byte, flags int) error { - return ErrNotSupportedPlatform -} diff --git a/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go b/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go deleted file mode 100644 index 74776e65e6..0000000000 --- a/vendor/github.com/containers/storage/pkg/truncindex/truncindex.go +++ /dev/null @@ -1,139 +0,0 @@ -// Package truncindex provides a general 'index tree', used by Docker -// in order to be able to reference containers by only a few unambiguous -// characters of their id. -package truncindex - -import ( - "errors" - "fmt" - "strings" - "sync" - - "github.com/tchap/go-patricia/patricia" -) - -var ( - // ErrEmptyPrefix is an error returned if the prefix was empty. - ErrEmptyPrefix = errors.New("Prefix can't be empty") - - // ErrIllegalChar is returned when a space is in the ID - ErrIllegalChar = errors.New("illegal character: ' '") - - // ErrNotExist is returned when ID or its prefix not found in index. - ErrNotExist = errors.New("ID does not exist") -) - -// ErrAmbiguousPrefix is returned if the prefix was ambiguous -// (multiple ids for the prefix). -type ErrAmbiguousPrefix struct { - prefix string -} - -func (e ErrAmbiguousPrefix) Error() string { - return fmt.Sprintf("Multiple IDs found with provided prefix: %s", e.prefix) -} - -// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes. -// This is used to retrieve image and container IDs by more convenient shorthand prefixes. -type TruncIndex struct { - sync.RWMutex - trie *patricia.Trie - ids map[string]struct{} -} - -// NewTruncIndex creates a new TruncIndex and initializes with a list of IDs. -func NewTruncIndex(ids []string) (idx *TruncIndex) { - idx = &TruncIndex{ - ids: make(map[string]struct{}), - - // Change patricia max prefix per node length, - // because our len(ID) always 64 - trie: patricia.NewTrie(patricia.MaxPrefixPerNode(64)), - } - for _, id := range ids { - idx.addID(id) - } - return -} - -func (idx *TruncIndex) addID(id string) error { - if strings.Contains(id, " ") { - return ErrIllegalChar - } - if id == "" { - return ErrEmptyPrefix - } - if _, exists := idx.ids[id]; exists { - return fmt.Errorf("id already exists: '%s'", id) - } - idx.ids[id] = struct{}{} - if inserted := idx.trie.Insert(patricia.Prefix(id), struct{}{}); !inserted { - return fmt.Errorf("failed to insert id: %s", id) - } - return nil -} - -// Add adds a new ID to the TruncIndex. -func (idx *TruncIndex) Add(id string) error { - idx.Lock() - defer idx.Unlock() - return idx.addID(id) -} - -// Delete removes an ID from the TruncIndex. If there are multiple IDs -// with the given prefix, an error is thrown. -func (idx *TruncIndex) Delete(id string) error { - idx.Lock() - defer idx.Unlock() - if _, exists := idx.ids[id]; !exists || id == "" { - return fmt.Errorf("no such id: '%s'", id) - } - delete(idx.ids, id) - if deleted := idx.trie.Delete(patricia.Prefix(id)); !deleted { - return fmt.Errorf("no such id: '%s'", id) - } - return nil -} - -// Get retrieves an ID from the TruncIndex. If there are multiple IDs -// with the given prefix, an error is thrown. -func (idx *TruncIndex) Get(s string) (string, error) { - if s == "" { - return "", ErrEmptyPrefix - } - var ( - id string - ) - subTreeVisitFunc := func(prefix patricia.Prefix, item patricia.Item) error { - if id != "" { - // we haven't found the ID if there are two or more IDs - id = "" - return ErrAmbiguousPrefix{prefix: string(prefix)} - } - id = string(prefix) - return nil - } - - idx.RLock() - defer idx.RUnlock() - if err := idx.trie.VisitSubtree(patricia.Prefix(s), subTreeVisitFunc); err != nil { - return "", err - } - if id != "" { - return id, nil - } - return "", ErrNotExist -} - -// Iterate iterates over all stored IDs and passes each of them to the given -// handler. Take care that the handler method does not call any public -// method on truncindex as the internal locking is not reentrant/recursive -// and will result in deadlock. -func (idx *TruncIndex) Iterate(handler func(id string)) { - idx.Lock() - defer idx.Unlock() - idx.trie.Visit(func(prefix patricia.Prefix, item patricia.Item) error { - handler(string(prefix)) - return nil - }) -} diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go deleted file mode 100644 index a31a08b2a4..0000000000 --- a/vendor/github.com/containers/storage/store.go +++ /dev/null @@ -1,2425 +0,0 @@ -package storage - -import ( - "encoding/base64" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "sync" - "time" - - // register all of the built-in drivers - _ "github.com/containers/storage/drivers/register" - - "github.com/BurntSushi/toml" - drivers "github.com/containers/storage/drivers" - "github.com/containers/storage/pkg/archive" - "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/ioutils" - "github.com/containers/storage/pkg/stringid" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -var ( - // DefaultStoreOptions is a reasonable default set of options. - DefaultStoreOptions StoreOptions - stores []*store - storesLock sync.Mutex -) - -// ROFileBasedStore wraps up the methods of the various types of file-based -// data stores that we implement which are needed for both read-only and -// read-write files. -type ROFileBasedStore interface { - Locker - - // Load reloads the contents of the store from disk. It should be called - // with the lock held. - Load() error -} - -// RWFileBasedStore wraps up the methods of various types of file-based data -// stores that we implement using read-write files. -type RWFileBasedStore interface { - // Save saves the contents of the store to disk. It should be called with - // the lock held, and Touch() should be called afterward before releasing the - // lock. - Save() error -} - -// FileBasedStore wraps up the common methods of various types of file-based -// data stores that we implement. -type FileBasedStore interface { - ROFileBasedStore - RWFileBasedStore -} - -// ROMetadataStore wraps a method for reading metadata associated with an ID. -type ROMetadataStore interface { - // Metadata reads metadata associated with an item with the specified ID. - Metadata(id string) (string, error) -} - -// RWMetadataStore wraps a method for setting metadata associated with an ID. -type RWMetadataStore interface { - // SetMetadata updates the metadata associated with the item with the specified ID. - SetMetadata(id, metadata string) error -} - -// MetadataStore wraps up methods for getting and setting metadata associated with IDs. -type MetadataStore interface { - ROMetadataStore - RWMetadataStore -} - -// An ROBigDataStore wraps up the read-only big-data related methods of the -// various types of file-based lookaside stores that we implement. -type ROBigDataStore interface { - // BigData retrieves a (potentially large) piece of data associated with - // this ID, if it has previously been set. - BigData(id, key string) ([]byte, error) - - // BigDataSize retrieves the size of a (potentially large) piece of - // data associated with this ID, if it has previously been set. - BigDataSize(id, key string) (int64, error) - - // BigDataDigest retrieves the digest of a (potentially large) piece of - // data associated with this ID, if it has previously been set. - BigDataDigest(id, key string) (digest.Digest, error) - - // BigDataNames() returns a list of the names of previously-stored pieces of - // data. - BigDataNames(id string) ([]string, error) -} - -// A RWBigDataStore wraps up the read-write big-data related methods of the -// various types of file-based lookaside stores that we implement. -type RWBigDataStore interface { - // SetBigData stores a (potentially large) piece of data associated with this - // ID. - SetBigData(id, key string, data []byte) error -} - -// A BigDataStore wraps up the most common big-data related methods of the -// various types of file-based lookaside stores that we implement. -type BigDataStore interface { - ROBigDataStore - RWBigDataStore -} - -// A FlaggableStore can have flags set and cleared on items which it manages. -type FlaggableStore interface { - // ClearFlag removes a named flag from an item in the store. - ClearFlag(id string, flag string) error - - // SetFlag sets a named flag and its value on an item in the store. - SetFlag(id string, flag string, value interface{}) error -} - -// StoreOptions is used for passing initialization options to GetStore(), for -// initializing a Store object and the underlying storage that it controls. -type StoreOptions struct { - // RunRoot is the filesystem path under which we can store run-time - // information, such as the locations of active mount points, that we - // want to lose if the host is rebooted. - RunRoot string `json:"runroot,omitempty"` - // GraphRoot is the filesystem path under which we will store the - // contents of layers, images, and containers. - GraphRoot string `json:"root,omitempty"` - // GraphDriverName is the underlying storage driver that we'll be - // using. It only needs to be specified the first time a Store is - // initialized for a given RunRoot and GraphRoot. - GraphDriverName string `json:"driver,omitempty"` - // GraphDriverOptions are driver-specific options. - GraphDriverOptions []string `json:"driver-options,omitempty"` - // UIDMap and GIDMap are used mainly for deciding on the ownership of - // files in layers as they're stored on disk, which is often necessary - // when user namespaces are being used. - UIDMap []idtools.IDMap `json:"uidmap,omitempty"` - GIDMap []idtools.IDMap `json:"gidmap,omitempty"` -} - -// Store wraps up the various types of file-based stores that we use into a -// singleton object that initializes and manages them all together. -type Store interface { - // RunRoot, GraphRoot, GraphDriverName, and GraphOptions retrieve - // settings that were passed to GetStore() when the object was created. - RunRoot() string - GraphRoot() string - GraphDriverName() string - GraphOptions() []string - - // GraphDriver obtains and returns a handle to the graph Driver object used - // by the Store. - GraphDriver() (drivers.Driver, error) - - // CreateLayer creates a new layer in the underlying storage driver, - // optionally having the specified ID (one will be assigned if none is - // specified), with the specified layer (or no layer) as its parent, - // and with optional names. (The writeable flag is ignored.) - CreateLayer(id, parent string, names []string, mountLabel string, writeable bool) (*Layer, error) - - // PutLayer combines the functions of CreateLayer and ApplyDiff, - // marking the layer for automatic removal if applying the diff fails - // for any reason. - // - // Note that we do some of this work in a child process. The calling - // process's main() function needs to import our pkg/reexec package and - // should begin with something like this in order to allow us to - // properly start that child process: - // if reexec.Init { - // return - // } - PutLayer(id, parent string, names []string, mountLabel string, writeable bool, diff io.Reader) (*Layer, int64, error) - - // CreateImage creates a new image, optionally with the specified ID - // (one will be assigned if none is specified), with optional names, - // referring to a specified image, and with optional metadata. An - // image is a record which associates the ID of a layer with a - // additional bookkeeping information which the library stores for the - // convenience of its caller. - CreateImage(id string, names []string, layer, metadata string, options *ImageOptions) (*Image, error) - - // CreateContainer creates a new container, optionally with the - // specified ID (one will be assigned if none is specified), with - // optional names, using the specified image's top layer as the basis - // for the container's layer, and assigning the specified ID to that - // layer (one will be created if none is specified). A container is a - // layer which is associated with additional bookkeeping information - // which the library stores for the convenience of its caller. - CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) - - // Metadata retrieves the metadata which is associated with a layer, - // image, or container (whichever the passed-in ID refers to). - Metadata(id string) (string, error) - - // SetMetadata updates the metadata which is associated with a layer, - // image, or container (whichever the passed-in ID refers to) to match - // the specified value. The metadata value can be retrieved at any - // time using Metadata, or using Layer, Image, or Container and reading - // the object directly. - SetMetadata(id, metadata string) error - - // Exists checks if there is a layer, image, or container which has the - // passed-in ID or name. - Exists(id string) bool - - // Status asks for a status report, in the form of key-value pairs, - // from the underlying storage driver. The contents vary from driver - // to driver. - Status() ([][2]string, error) - - // Delete removes the layer, image, or container which has the - // passed-in ID or name. Note that no safety checks are performed, so - // this can leave images with references to layers which do not exist, - // and layers with references to parents which no longer exist. - Delete(id string) error - - // DeleteLayer attempts to remove the specified layer. If the layer is the - // parent of any other layer, or is referred to by any images, it will return - // an error. - DeleteLayer(id string) error - - // DeleteImage removes the specified image if it is not referred to by - // any containers. If its top layer is then no longer referred to by - // any other images and is not the parent of any other layers, its top - // layer will be removed. If that layer's parent is no longer referred - // to by any other images and is not the parent of any other layers, - // then it, too, will be removed. This procedure will be repeated - // until a layer which should not be removed, or the base layer, is - // reached, at which point the list of removed layers is returned. If - // the commit argument is false, the image and layers are not removed, - // but the list of layers which would be removed is still returned. - DeleteImage(id string, commit bool) (layers []string, err error) - - // DeleteContainer removes the specified container and its layer. If - // there is no matching container, or if the container exists but its - // layer does not, an error will be returned. - DeleteContainer(id string) error - - // Wipe removes all known layers, images, and containers. - Wipe() error - - // Mount attempts to mount a layer, image, or container for access, and - // returns the pathname if it succeeds. - // - // Note that we do some of this work in a child process. The calling - // process's main() function needs to import our pkg/reexec package and - // should begin with something like this in order to allow us to - // properly start that child process: - // if reexec.Init { - // return - // } - Mount(id, mountLabel string) (string, error) - - // Unmount attempts to unmount a layer, image, or container, given an ID, a - // name, or a mount path. - Unmount(id string) error - - // Changes returns a summary of the changes which would need to be made - // to one layer to make its contents the same as a second layer. If - // the first layer is not specified, the second layer's parent is - // assumed. Each Change structure contains a Path relative to the - // layer's root directory, and a Kind which is either ChangeAdd, - // ChangeModify, or ChangeDelete. - Changes(from, to string) ([]archive.Change, error) - - // DiffSize returns a count of the size of the tarstream which would - // specify the changes returned by Changes. - DiffSize(from, to string) (int64, error) - - // Diff returns the tarstream which would specify the changes returned - // by Changes. If options are passed in, they can override default - // behaviors. - Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) - - // ApplyDiff applies a tarstream to a layer. Information about the - // tarstream is cached with the layer. Typically, a layer which is - // populated using a tarstream will be expected to not be modified in - // any other way, either before or after the diff is applied. - // - // Note that we do some of this work in a child process. The calling - // process's main() function needs to import our pkg/reexec package and - // should begin with something like this in order to allow us to - // properly start that child process: - // if reexec.Init { - // return - // } - ApplyDiff(to string, diff io.Reader) (int64, error) - - // LayersByCompressedDigest returns a slice of the layers with the - // specified compressed digest value recorded for them. - LayersByCompressedDigest(d digest.Digest) ([]Layer, error) - - // LayersByUncompressedDigest returns a slice of the layers with the - // specified uncompressed digest value recorded for them. - LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) - - // LayerSize returns a cached approximation of the layer's size, or -1 - // if we don't have a value on hand. - LayerSize(id string) (int64, error) - - // Layers returns a list of the currently known layers. - Layers() ([]Layer, error) - - // Images returns a list of the currently known images. - Images() ([]Image, error) - - // Containers returns a list of the currently known containers. - Containers() ([]Container, error) - - // Names returns the list of names for a layer, image, or container. - Names(id string) ([]string, error) - - // SetNames changes the list of names for a layer, image, or container. - // Duplicate names are removed from the list automatically. - SetNames(id string, names []string) error - - // ListImageBigData retrieves a list of the (possibly large) chunks of - // named data associated with an image. - ListImageBigData(id string) ([]string, error) - - // ImageBigData retrieves a (possibly large) chunk of named data - // associated with an image. - ImageBigData(id, key string) ([]byte, error) - - // ImageBigDataSize retrieves the size of a (possibly large) chunk - // of named data associated with an image. - ImageBigDataSize(id, key string) (int64, error) - - // ImageBigDataDigest retrieves the digest of a (possibly large) chunk - // of named data associated with an image. - ImageBigDataDigest(id, key string) (digest.Digest, error) - - // SetImageBigData stores a (possibly large) chunk of named data associated - // with an image. - SetImageBigData(id, key string, data []byte) error - - // ListContainerBigData retrieves a list of the (possibly large) chunks of - // named data associated with a container. - ListContainerBigData(id string) ([]string, error) - - // ContainerBigData retrieves a (possibly large) chunk of named data - // associated with a container. - ContainerBigData(id, key string) ([]byte, error) - - // ContainerBigDataSize retrieves the size of a (possibly large) - // chunk of named data associated with a container. - ContainerBigDataSize(id, key string) (int64, error) - - // ContainerBigDataDigest retrieves the digest of a (possibly large) - // chunk of named data associated with a container. - ContainerBigDataDigest(id, key string) (digest.Digest, error) - - // SetContainerBigData stores a (possibly large) chunk of named data - // associated with a container. - SetContainerBigData(id, key string, data []byte) error - - // Layer returns a specific layer. - Layer(id string) (*Layer, error) - - // Image returns a specific image. - Image(id string) (*Image, error) - - // ImagesByTopLayer returns a list of images which reference the specified - // layer as their top layer. They will have different IDs and names - // and may have different metadata, big data items, and flags. - ImagesByTopLayer(id string) ([]*Image, error) - - // ImagesByDigest returns a list of images which contain a big data item - // named ImageDigestBigDataKey whose contents have the specified digest. - ImagesByDigest(d digest.Digest) ([]*Image, error) - - // Container returns a specific container. - Container(id string) (*Container, error) - - // ContainerByLayer returns a specific container based on its layer ID or - // name. - ContainerByLayer(id string) (*Container, error) - - // ContainerDirectory returns a path of a directory which the caller - // can use to store data, specific to the container, which the library - // does not directly manage. The directory will be deleted when the - // container is deleted. - ContainerDirectory(id string) (string, error) - - // SetContainerDirectoryFile is a convenience function which stores - // a piece of data in the specified file relative to the container's - // directory. - SetContainerDirectoryFile(id, file string, data []byte) error - - // FromContainerDirectory is a convenience function which reads - // the contents of the specified file relative to the container's - // directory. - FromContainerDirectory(id, file string) ([]byte, error) - - // ContainerRunDirectory returns a path of a directory which the - // caller can use to store data, specific to the container, which the - // library does not directly manage. The directory will be deleted - // when the host system is restarted. - ContainerRunDirectory(id string) (string, error) - - // SetContainerRunDirectoryFile is a convenience function which stores - // a piece of data in the specified file relative to the container's - // run directory. - SetContainerRunDirectoryFile(id, file string, data []byte) error - - // FromContainerRunDirectory is a convenience function which reads - // the contents of the specified file relative to the container's run - // directory. - FromContainerRunDirectory(id, file string) ([]byte, error) - - // Lookup returns the ID of a layer, image, or container with the specified - // name or ID. - Lookup(name string) (string, error) - - // Shutdown attempts to free any kernel resources which are being used - // by the underlying driver. If "force" is true, any mounted (i.e., in - // use) layers are unmounted beforehand. If "force" is not true, then - // layers being in use is considered to be an error condition. A list - // of still-mounted layers is returned along with possible errors. - Shutdown(force bool) (layers []string, err error) - - // Version returns version information, in the form of key-value pairs, from - // the storage package. - Version() ([][2]string, error) -} - -// ImageOptions is used for passing options to a Store's CreateImage() method. -type ImageOptions struct { - // CreationDate, if not zero, will override the default behavior of marking the image as having been - // created when CreateImage() was called, recording CreationDate instead. - CreationDate time.Time - // Digest is a hard-coded digest value that we can use to look up the image. It is optional. - Digest digest.Digest -} - -// ContainerOptions is used for passing options to a Store's CreateContainer() method. -type ContainerOptions struct { -} - -type store struct { - lastLoaded time.Time - runRoot string - graphLock Locker - graphRoot string - graphDriverName string - graphOptions []string - uidMap []idtools.IDMap - gidMap []idtools.IDMap - graphDriver drivers.Driver - layerStore LayerStore - roLayerStores []ROLayerStore - imageStore ImageStore - roImageStores []ROImageStore - containerStore ContainerStore -} - -// GetStore attempts to find an already-created Store object matching the -// specified location and graph driver, and if it can't, it creates and -// initializes a new Store object, and the underlying storage that it controls. -// -// If StoreOptions `options` haven't been fully populated, then DefaultStoreOptions are used. -// -// These defaults observe environment variables: -// * `STORAGE_DRIVER` for the name of the storage driver to attempt to use -// * `STORAGE_OPTS` for the string of options to pass to the driver -func GetStore(options StoreOptions) (Store, error) { - if options.RunRoot == "" && options.GraphRoot == "" && options.GraphDriverName == "" && len(options.GraphDriverOptions) == 0 { - options = DefaultStoreOptions - } - - if options.GraphRoot != "" { - options.GraphRoot = filepath.Clean(options.GraphRoot) - } - if options.RunRoot != "" { - options.RunRoot = filepath.Clean(options.RunRoot) - } - - storesLock.Lock() - defer storesLock.Unlock() - - for _, s := range stores { - if s.graphRoot == options.GraphRoot && (options.GraphDriverName == "" || s.graphDriverName == options.GraphDriverName) { - return s, nil - } - } - - if options.GraphRoot == "" { - return nil, ErrIncompleteOptions - } - if options.RunRoot == "" { - return nil, ErrIncompleteOptions - } - - if err := os.MkdirAll(options.RunRoot, 0700); err != nil && !os.IsExist(err) { - return nil, err - } - if err := os.MkdirAll(options.GraphRoot, 0700); err != nil && !os.IsExist(err) { - return nil, err - } - for _, subdir := range []string{"mounts", "tmp", options.GraphDriverName} { - if err := os.MkdirAll(filepath.Join(options.GraphRoot, subdir), 0700); err != nil && !os.IsExist(err) { - return nil, err - } - } - - graphLock, err := GetLockfile(filepath.Join(options.GraphRoot, "storage.lock")) - if err != nil { - return nil, err - } - s := &store{ - runRoot: options.RunRoot, - graphLock: graphLock, - graphRoot: options.GraphRoot, - graphDriverName: options.GraphDriverName, - graphOptions: options.GraphDriverOptions, - uidMap: copyIDMap(options.UIDMap), - gidMap: copyIDMap(options.GIDMap), - } - if err := s.load(); err != nil { - return nil, err - } - - stores = append(stores, s) - - return s, nil -} - -func copyIDMap(idmap []idtools.IDMap) []idtools.IDMap { - m := []idtools.IDMap{} - if idmap != nil { - m = make([]idtools.IDMap, len(idmap)) - copy(m, idmap) - } - if len(m) > 0 { - return m[:] - } - return nil -} - -func (s *store) RunRoot() string { - return s.runRoot -} - -func (s *store) GraphDriverName() string { - return s.graphDriverName -} - -func (s *store) GraphRoot() string { - return s.graphRoot -} - -func (s *store) GraphOptions() []string { - return s.graphOptions -} - -func (s *store) load() error { - driver, err := s.GraphDriver() - if err != nil { - return err - } - s.graphDriver = driver - s.graphDriverName = driver.String() - driverPrefix := s.graphDriverName + "-" - - rls, err := s.LayerStore() - if err != nil { - return err - } - s.layerStore = rls - if _, err := s.ROLayerStores(); err != nil { - return err - } - - gipath := filepath.Join(s.graphRoot, driverPrefix+"images") - if err := os.MkdirAll(gipath, 0700); err != nil { - return err - } - ris, err := newImageStore(gipath) - if err != nil { - return err - } - s.imageStore = ris - if _, err := s.ROImageStores(); err != nil { - return err - } - - gcpath := filepath.Join(s.graphRoot, driverPrefix+"containers") - if err := os.MkdirAll(gcpath, 0700); err != nil { - return err - } - rcs, err := newContainerStore(gcpath) - if err != nil { - return err - } - rcpath := filepath.Join(s.runRoot, driverPrefix+"containers") - if err := os.MkdirAll(rcpath, 0700); err != nil { - return err - } - s.containerStore = rcs - return nil -} - -func (s *store) getGraphDriver() (drivers.Driver, error) { - if s.graphDriver != nil { - return s.graphDriver, nil - } - config := drivers.Options{ - Root: s.graphRoot, - DriverOptions: s.graphOptions, - UIDMaps: s.uidMap, - GIDMaps: s.gidMap, - } - driver, err := drivers.New(s.graphDriverName, config) - if err != nil { - return nil, err - } - s.graphDriver = driver - s.graphDriverName = driver.String() - return driver, nil -} - -func (s *store) GraphDriver() (drivers.Driver, error) { - s.graphLock.Lock() - defer s.graphLock.Unlock() - if s.graphLock.TouchedSince(s.lastLoaded) { - s.graphDriver = nil - s.layerStore = nil - s.lastLoaded = time.Now() - } - return s.getGraphDriver() -} - -// LayerStore obtains and returns a handle to the writeable layer store object -// used by the Store. Accessing this store directly will bypass locking and -// synchronization, so it is not a part of the exported Store interface. -func (s *store) LayerStore() (LayerStore, error) { - s.graphLock.Lock() - defer s.graphLock.Unlock() - if s.graphLock.TouchedSince(s.lastLoaded) { - s.graphDriver = nil - s.layerStore = nil - s.lastLoaded = time.Now() - } - if s.layerStore != nil { - return s.layerStore, nil - } - driver, err := s.getGraphDriver() - if err != nil { - return nil, err - } - driverPrefix := s.graphDriverName + "-" - rlpath := filepath.Join(s.runRoot, driverPrefix+"layers") - if err := os.MkdirAll(rlpath, 0700); err != nil { - return nil, err - } - glpath := filepath.Join(s.graphRoot, driverPrefix+"layers") - if err := os.MkdirAll(glpath, 0700); err != nil { - return nil, err - } - rls, err := newLayerStore(rlpath, glpath, driver) - if err != nil { - return nil, err - } - s.layerStore = rls - return s.layerStore, nil -} - -// ROLayerStores obtains additional read/only layer store objects used by the -// Store. Accessing these stores directly will bypass locking and -// synchronization, so it is not part of the exported Store interface. -func (s *store) ROLayerStores() ([]ROLayerStore, error) { - s.graphLock.Lock() - defer s.graphLock.Unlock() - if s.roLayerStores != nil { - return s.roLayerStores, nil - } - driver, err := s.getGraphDriver() - if err != nil { - return nil, err - } - driverPrefix := s.graphDriverName + "-" - rlpath := filepath.Join(s.runRoot, driverPrefix+"layers") - if err := os.MkdirAll(rlpath, 0700); err != nil { - return nil, err - } - for _, store := range driver.AdditionalImageStores() { - glpath := filepath.Join(store, driverPrefix+"layers") - rls, err := newROLayerStore(rlpath, glpath, driver) - if err != nil { - return nil, err - } - s.roLayerStores = append(s.roLayerStores, rls) - } - return s.roLayerStores, nil -} - -// ImageStore obtains and returns a handle to the writable image store object -// used by the Store. Accessing this store directly will bypass locking and -// synchronization, so it is not a part of the exported Store interface. -func (s *store) ImageStore() (ImageStore, error) { - if s.imageStore != nil { - return s.imageStore, nil - } - return nil, ErrLoadError -} - -// ROImageStores obtains additional read/only image store objects used by the -// Store. Accessing these stores directly will bypass locking and -// synchronization, so it is not a part of the exported Store interface. -func (s *store) ROImageStores() ([]ROImageStore, error) { - if len(s.roImageStores) != 0 { - return s.roImageStores, nil - } - driver, err := s.getGraphDriver() - if err != nil { - return nil, err - } - driverPrefix := s.graphDriverName + "-" - for _, store := range driver.AdditionalImageStores() { - gipath := filepath.Join(store, driverPrefix+"images") - ris, err := newROImageStore(gipath) - if err != nil { - return nil, err - } - s.roImageStores = append(s.roImageStores, ris) - } - return s.roImageStores, nil -} - -// ContainerStore obtains and returns a handle to the container store object -// used by the Store. Accessing this store directly will bypass locking and -// synchronization, so it is not a part of the exported Store interface. -func (s *store) ContainerStore() (ContainerStore, error) { - if s.containerStore != nil { - return s.containerStore, nil - } - return nil, ErrLoadError -} - -func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, diff io.Reader) (*Layer, int64, error) { - rlstore, err := s.LayerStore() - if err != nil { - return nil, -1, err - } - rlstores, err := s.ROLayerStores() - if err != nil { - return nil, -1, err - } - rcstore, err := s.ContainerStore() - if err != nil { - return nil, -1, err - } - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() - } - rcstore.Lock() - defer rcstore.Unlock() - if modified, err := rcstore.Modified(); modified || err != nil { - rcstore.Load() - } - if id == "" { - id = stringid.GenerateRandomID() - } - if parent != "" { - var ilayer *Layer - for _, lstore := range append([]ROLayerStore{rlstore}, rlstores...) { - if l, err := lstore.Get(parent); err == nil && l != nil { - ilayer = l - parent = ilayer.ID - break - } - } - if ilayer == nil { - return nil, -1, ErrLayerUnknown - } - containers, err := rcstore.Containers() - if err != nil { - return nil, -1, err - } - for _, container := range containers { - if container.LayerID == parent { - return nil, -1, ErrParentIsContainer - } - } - } - return rlstore.Put(id, parent, names, mountLabel, nil, writeable, nil, diff) -} - -func (s *store) CreateLayer(id, parent string, names []string, mountLabel string, writeable bool) (*Layer, error) { - layer, _, err := s.PutLayer(id, parent, names, mountLabel, writeable, nil) - return layer, err -} - -func (s *store) CreateImage(id string, names []string, layer, metadata string, options *ImageOptions) (*Image, error) { - if id == "" { - id = stringid.GenerateRandomID() - } - - if layer != "" { - lstore, err := s.LayerStore() - if err != nil { - return nil, err - } - lstores, err := s.ROLayerStores() - if err != nil { - return nil, err - } - var ilayer *Layer - for _, store := range append([]ROLayerStore{lstore}, lstores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() - } - ilayer, err = store.Get(layer) - if err == nil { - break - } - } - if ilayer == nil { - return nil, ErrLayerUnknown - } - layer = ilayer.ID - } - - ristore, err := s.ImageStore() - if err != nil { - return nil, err - } - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() - } - - creationDate := time.Now().UTC() - if options != nil && !options.CreationDate.IsZero() { - creationDate = options.CreationDate - } - - return ristore.Create(id, names, layer, metadata, creationDate, options.Digest) -} - -func (s *store) CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) { - rlstore, err := s.LayerStore() - if err != nil { - return nil, err - } - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() - } - if id == "" { - id = stringid.GenerateRandomID() - } - - imageTopLayer := "" - imageID := "" - if image != "" { - istore, err := s.ImageStore() - if err != nil { - return nil, err - } - istores, err := s.ROImageStores() - if err != nil { - return nil, err - } - var cimage *Image - for _, store := range append([]ROImageStore{istore}, istores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() - } - cimage, err = store.Get(image) - if err == nil { - break - } - } - if cimage == nil { - return nil, ErrImageUnknown - } - imageTopLayer = cimage.TopLayer - imageID = cimage.ID - } - clayer, err := rlstore.Create(layer, imageTopLayer, nil, "", nil, true) - if err != nil { - return nil, err - } - layer = clayer.ID - rcstore, err := s.ContainerStore() - if err != nil { - return nil, err - } - rcstore.Lock() - defer rcstore.Unlock() - if modified, err := rcstore.Modified(); modified || err != nil { - rcstore.Load() - } - container, err := rcstore.Create(id, names, imageID, layer, metadata) - if err != nil || container == nil { - rlstore.Delete(layer) - } - return container, err -} - -func (s *store) SetMetadata(id, metadata string) error { - rlstore, err := s.LayerStore() - if err != nil { - return err - } - ristore, err := s.ImageStore() - if err != nil { - return err - } - rcstore, err := s.ContainerStore() - if err != nil { - return err - } - - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() - } - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() - } - rcstore.Lock() - defer rcstore.Unlock() - if modified, err := rcstore.Modified(); modified || err != nil { - rcstore.Load() - } - - if rlstore.Exists(id) { - return rlstore.SetMetadata(id, metadata) - } - if ristore.Exists(id) { - return ristore.SetMetadata(id, metadata) - } - if rcstore.Exists(id) { - return rcstore.SetMetadata(id, metadata) - } - return ErrNotAnID -} - -func (s *store) Metadata(id string) (string, error) { - lstore, err := s.LayerStore() - if err != nil { - return "", err - } - lstores, err := s.ROLayerStores() - if err != nil { - return "", err - } - for _, store := range append([]ROLayerStore{lstore}, lstores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() - } - if store.Exists(id) { - return store.Metadata(id) - } - } - - istore, err := s.ImageStore() - if err != nil { - return "", err - } - istores, err := s.ROImageStores() - if err != nil { - return "", err - } - for _, store := range append([]ROImageStore{istore}, istores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() - } - if store.Exists(id) { - return store.Metadata(id) - } - } - - cstore, err := s.ContainerStore() - if err != nil { - return "", err - } - cstore.Lock() - defer cstore.Unlock() - if modified, err := cstore.Modified(); modified || err != nil { - cstore.Load() - } - if cstore.Exists(id) { - return cstore.Metadata(id) - } - return "", ErrNotAnID -} - -func (s *store) ListImageBigData(id string) ([]string, error) { - istore, err := s.ImageStore() - if err != nil { - return nil, err - } - istores, err := s.ROImageStores() - if err != nil { - return nil, err - } - for _, store := range append([]ROImageStore{istore}, istores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() - } - bigDataNames, err := store.BigDataNames(id) - if err == nil { - return bigDataNames, err - } - } - return nil, ErrImageUnknown -} - -func (s *store) ImageBigDataSize(id, key string) (int64, error) { - istore, err := s.ImageStore() - if err != nil { - return -1, err - } - istores, err := s.ROImageStores() - if err != nil { - return -1, err - } - for _, store := range append([]ROImageStore{istore}, istores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() - } - size, err := store.BigDataSize(id, key) - if err == nil { - return size, nil - } - } - return -1, ErrSizeUnknown -} - -func (s *store) ImageBigDataDigest(id, key string) (digest.Digest, error) { - ristore, err := s.ImageStore() - if err != nil { - return "", err - } - stores, err := s.ROImageStores() - if err != nil { - return "", err - } - stores = append([]ROImageStore{ristore}, stores...) - for _, ristore := range stores { - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() - } - d, err := ristore.BigDataDigest(id, key) - if err == nil && d.Validate() == nil { - return d, nil - } - } - return "", ErrDigestUnknown -} - -func (s *store) ImageBigData(id, key string) ([]byte, error) { - istore, err := s.ImageStore() - if err != nil { - return nil, err - } - istores, err := s.ROImageStores() - if err != nil { - return nil, err - } - for _, store := range append([]ROImageStore{istore}, istores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() - } - data, err := store.BigData(id, key) - if err == nil { - return data, nil - } - } - return nil, ErrImageUnknown -} - -func (s *store) SetImageBigData(id, key string, data []byte) error { - ristore, err := s.ImageStore() - if err != nil { - return err - } - - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() - } - - return ristore.SetBigData(id, key, data) -} - -func (s *store) ListContainerBigData(id string) ([]string, error) { - rcstore, err := s.ContainerStore() - if err != nil { - return nil, err - } - - rcstore.Lock() - defer rcstore.Unlock() - if modified, err := rcstore.Modified(); modified || err != nil { - rcstore.Load() - } - - return rcstore.BigDataNames(id) -} - -func (s *store) ContainerBigDataSize(id, key string) (int64, error) { - rcstore, err := s.ContainerStore() - if err != nil { - return -1, err - } - rcstore.Lock() - defer rcstore.Unlock() - if modified, err := rcstore.Modified(); modified || err != nil { - rcstore.Load() - } - return rcstore.BigDataSize(id, key) -} - -func (s *store) ContainerBigDataDigest(id, key string) (digest.Digest, error) { - rcstore, err := s.ContainerStore() - if err != nil { - return "", err - } - rcstore.Lock() - defer rcstore.Unlock() - if modified, err := rcstore.Modified(); modified || err != nil { - rcstore.Load() - } - return rcstore.BigDataDigest(id, key) -} - -func (s *store) ContainerBigData(id, key string) ([]byte, error) { - rcstore, err := s.ContainerStore() - if err != nil { - return nil, err - } - rcstore.Lock() - defer rcstore.Unlock() - if modified, err := rcstore.Modified(); modified || err != nil { - rcstore.Load() - } - return rcstore.BigData(id, key) -} - -func (s *store) SetContainerBigData(id, key string, data []byte) error { - rcstore, err := s.ContainerStore() - if err != nil { - return err - } - rcstore.Lock() - defer rcstore.Unlock() - if modified, err := rcstore.Modified(); modified || err != nil { - rcstore.Load() - } - return rcstore.SetBigData(id, key, data) -} - -func (s *store) Exists(id string) bool { - lstore, err := s.LayerStore() - if err != nil { - return false - } - lstores, err := s.ROLayerStores() - if err != nil { - return false - } - for _, store := range append([]ROLayerStore{lstore}, lstores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() - } - if store.Exists(id) { - return true - } - } - - istore, err := s.ImageStore() - if err != nil { - return false - } - istores, err := s.ROImageStores() - if err != nil { - return false - } - for _, store := range append([]ROImageStore{istore}, istores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() - } - if store.Exists(id) { - return true - } - } - - rcstore, err := s.ContainerStore() - if err != nil { - return false - } - rcstore.Lock() - defer rcstore.Unlock() - if modified, err := rcstore.Modified(); modified || err != nil { - rcstore.Load() - } - if rcstore.Exists(id) { - return true - } - - return false -} - -func dedupeNames(names []string) []string { - seen := make(map[string]bool) - deduped := make([]string, 0, len(names)) - for _, name := range names { - if _, wasSeen := seen[name]; !wasSeen { - seen[name] = true - deduped = append(deduped, name) - } - } - return deduped -} - -func (s *store) SetNames(id string, names []string) error { - deduped := dedupeNames(names) - - rlstore, err := s.LayerStore() - if err != nil { - return err - } - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() - } - if rlstore.Exists(id) { - return rlstore.SetNames(id, deduped) - } - - ristore, err := s.ImageStore() - if err != nil { - return err - } - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() - } - if ristore.Exists(id) { - return ristore.SetNames(id, deduped) - } - - rcstore, err := s.ContainerStore() - if err != nil { - return err - } - rcstore.Lock() - defer rcstore.Unlock() - if modified, err := rcstore.Modified(); modified || err != nil { - rcstore.Load() - } - if rcstore.Exists(id) { - return rcstore.SetNames(id, deduped) - } - return ErrLayerUnknown -} - -func (s *store) Names(id string) ([]string, error) { - lstore, err := s.LayerStore() - if err != nil { - return nil, err - } - lstores, err := s.ROLayerStores() - if err != nil { - return nil, err - } - for _, store := range append([]ROLayerStore{lstore}, lstores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() - } - if l, err := store.Get(id); l != nil && err == nil { - return l.Names, nil - } - } - - istore, err := s.ImageStore() - if err != nil { - return nil, err - } - istores, err := s.ROImageStores() - if err != nil { - return nil, err - } - for _, store := range append([]ROImageStore{istore}, istores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() - } - if i, err := store.Get(id); i != nil && err == nil { - return i.Names, nil - } - } - - rcstore, err := s.ContainerStore() - if err != nil { - return nil, err - } - rcstore.Lock() - defer rcstore.Unlock() - if modified, err := rcstore.Modified(); modified || err != nil { - rcstore.Load() - } - if c, err := rcstore.Get(id); c != nil && err == nil { - return c.Names, nil - } - return nil, ErrLayerUnknown -} - -func (s *store) Lookup(name string) (string, error) { - lstore, err := s.LayerStore() - if err != nil { - return "", err - } - lstores, err := s.ROLayerStores() - if err != nil { - return "", err - } - for _, store := range append([]ROLayerStore{lstore}, lstores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() - } - if l, err := store.Get(name); l != nil && err == nil { - return l.ID, nil - } - } - - istore, err := s.ImageStore() - if err != nil { - return "", err - } - istores, err := s.ROImageStores() - if err != nil { - return "", err - } - for _, store := range append([]ROImageStore{istore}, istores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() - } - if i, err := store.Get(name); i != nil && err == nil { - return i.ID, nil - } - } - - cstore, err := s.ContainerStore() - if err != nil { - return "", err - } - cstore.Lock() - defer cstore.Unlock() - if modified, err := cstore.Modified(); modified || err != nil { - cstore.Load() - } - if c, err := cstore.Get(name); c != nil && err == nil { - return c.ID, nil - } - - return "", ErrLayerUnknown -} - -func (s *store) DeleteLayer(id string) error { - rlstore, err := s.LayerStore() - if err != nil { - return err - } - ristore, err := s.ImageStore() - if err != nil { - return err - } - rcstore, err := s.ContainerStore() - if err != nil { - return err - } - - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() - } - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() - } - rcstore.Lock() - defer rcstore.Unlock() - if modified, err := rcstore.Modified(); modified || err != nil { - rcstore.Load() - } - - if rlstore.Exists(id) { - if l, err := rlstore.Get(id); err != nil { - id = l.ID - } - layers, err := rlstore.Layers() - if err != nil { - return err - } - for _, layer := range layers { - if layer.Parent == id { - return ErrLayerHasChildren - } - } - images, err := ristore.Images() - if err != nil { - return err - } - for _, image := range images { - if image.TopLayer == id { - return errors.Wrapf(ErrLayerUsedByImage, "Layer %v used by image %v", id, image.ID) - } - } - containers, err := rcstore.Containers() - if err != nil { - return err - } - for _, container := range containers { - if container.LayerID == id { - return errors.Wrapf(ErrLayerUsedByContainer, "Layer %v used by container %v", id, container.ID) - } - } - return rlstore.Delete(id) - } - return ErrNotALayer -} - -func (s *store) DeleteImage(id string, commit bool) (layers []string, err error) { - rlstore, err := s.LayerStore() - if err != nil { - return nil, err - } - ristore, err := s.ImageStore() - if err != nil { - return nil, err - } - rcstore, err := s.ContainerStore() - if err != nil { - return nil, err - } - - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() - } - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() - } - rcstore.Lock() - defer rcstore.Unlock() - if modified, err := rcstore.Modified(); modified || err != nil { - rcstore.Load() - } - layersToRemove := []string{} - if ristore.Exists(id) { - image, err := ristore.Get(id) - if err != nil { - return nil, err - } - id = image.ID - containers, err := rcstore.Containers() - if err != nil { - return nil, err - } - aContainerByImage := make(map[string]string) - for _, container := range containers { - aContainerByImage[container.ImageID] = container.ID - } - if container, ok := aContainerByImage[id]; ok { - return nil, errors.Wrapf(ErrImageUsedByContainer, "Image used by %v", container) - } - images, err := ristore.Images() - if err != nil { - return nil, err - } - layers, err := rlstore.Layers() - if err != nil { - return nil, err - } - childrenByParent := make(map[string]*[]string) - for _, layer := range layers { - parent := layer.Parent - if list, ok := childrenByParent[parent]; ok { - newList := append(*list, layer.ID) - childrenByParent[parent] = &newList - } else { - childrenByParent[parent] = &([]string{layer.ID}) - } - } - anyImageByTopLayer := make(map[string]string) - for _, img := range images { - if img.ID != id { - anyImageByTopLayer[img.TopLayer] = img.ID - } - } - if commit { - if err = ristore.Delete(id); err != nil { - return nil, err - } - } - layer := image.TopLayer - lastRemoved := "" - for layer != "" { - if rcstore.Exists(layer) { - break - } - if _, ok := anyImageByTopLayer[layer]; ok { - break - } - parent := "" - if l, err := rlstore.Get(layer); err == nil { - parent = l.Parent - } - otherRefs := 0 - if childList, ok := childrenByParent[layer]; ok && childList != nil { - children := *childList - for _, child := range children { - if child != lastRemoved { - otherRefs++ - } - } - } - if otherRefs != 0 { - break - } - lastRemoved = layer - layersToRemove = append(layersToRemove, lastRemoved) - layer = parent - } - } else { - return nil, ErrNotAnImage - } - if commit { - for _, layer := range layersToRemove { - if err = rlstore.Delete(layer); err != nil { - return nil, err - } - } - } - return layersToRemove, nil -} - -func (s *store) DeleteContainer(id string) error { - rlstore, err := s.LayerStore() - if err != nil { - return err - } - ristore, err := s.ImageStore() - if err != nil { - return err - } - rcstore, err := s.ContainerStore() - if err != nil { - return err - } - - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() - } - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() - } - rcstore.Lock() - defer rcstore.Unlock() - if modified, err := rcstore.Modified(); modified || err != nil { - rcstore.Load() - } - - if rcstore.Exists(id) { - if container, err := rcstore.Get(id); err == nil { - if rlstore.Exists(container.LayerID) { - if err = rlstore.Delete(container.LayerID); err != nil { - return err - } - if err = rcstore.Delete(id); err != nil { - return err - } - middleDir := s.graphDriverName + "-containers" - gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID) - if err = os.RemoveAll(gcpath); err != nil { - return err - } - rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID) - if err = os.RemoveAll(rcpath); err != nil { - return err - } - return nil - } - return ErrNotALayer - } - } - return ErrNotAContainer -} - -func (s *store) Delete(id string) error { - rlstore, err := s.LayerStore() - if err != nil { - return err - } - ristore, err := s.ImageStore() - if err != nil { - return err - } - rcstore, err := s.ContainerStore() - if err != nil { - return err - } - - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() - } - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() - } - rcstore.Lock() - defer rcstore.Unlock() - if modified, err := rcstore.Modified(); modified || err != nil { - rcstore.Load() - } - - if rcstore.Exists(id) { - if container, err := rcstore.Get(id); err == nil { - if rlstore.Exists(container.LayerID) { - if err = rlstore.Delete(container.LayerID); err != nil { - return err - } - if err = rcstore.Delete(id); err != nil { - return err - } - middleDir := s.graphDriverName + "-containers" - gcpath := filepath.Join(s.GraphRoot(), middleDir, container.ID, "userdata") - if err = os.RemoveAll(gcpath); err != nil { - return err - } - rcpath := filepath.Join(s.RunRoot(), middleDir, container.ID, "userdata") - if err = os.RemoveAll(rcpath); err != nil { - return err - } - return nil - } - return ErrNotALayer - } - } - if ristore.Exists(id) { - return ristore.Delete(id) - } - if rlstore.Exists(id) { - return rlstore.Delete(id) - } - return ErrLayerUnknown -} - -func (s *store) Wipe() error { - rcstore, err := s.ContainerStore() - if err != nil { - return err - } - ristore, err := s.ImageStore() - if err != nil { - return err - } - rlstore, err := s.LayerStore() - if err != nil { - return err - } - - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() - } - ristore.Lock() - defer ristore.Unlock() - if modified, err := ristore.Modified(); modified || err != nil { - ristore.Load() - } - rcstore.Lock() - defer rcstore.Unlock() - if modified, err := rcstore.Modified(); modified || err != nil { - rcstore.Load() - } - - if err = rcstore.Wipe(); err != nil { - return err - } - if err = ristore.Wipe(); err != nil { - return err - } - return rlstore.Wipe() -} - -func (s *store) Status() ([][2]string, error) { - rlstore, err := s.LayerStore() - if err != nil { - return nil, err - } - return rlstore.Status() -} - -func (s *store) Version() ([][2]string, error) { - return [][2]string{}, nil -} - -func (s *store) Mount(id, mountLabel string) (string, error) { - if layerID, err := s.ContainerLayerID(id); err == nil { - id = layerID - } - rlstore, err := s.LayerStore() - if err != nil { - return "", err - } - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() - } - if rlstore.Exists(id) { - return rlstore.Mount(id, mountLabel) - } - return "", ErrLayerUnknown -} - -func (s *store) Unmount(id string) error { - if layerID, err := s.ContainerLayerID(id); err == nil { - id = layerID - } - rlstore, err := s.LayerStore() - if err != nil { - return err - } - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() - } - if rlstore.Exists(id) { - return rlstore.Unmount(id) - } - return ErrLayerUnknown -} - -func (s *store) Changes(from, to string) ([]archive.Change, error) { - lstore, err := s.LayerStore() - if err != nil { - return nil, err - } - lstores, err := s.ROLayerStores() - if err != nil { - return nil, err - } - for _, store := range append([]ROLayerStore{lstore}, lstores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() - } - if store.Exists(to) { - return store.Changes(from, to) - } - } - return nil, ErrLayerUnknown -} - -func (s *store) DiffSize(from, to string) (int64, error) { - lstore, err := s.LayerStore() - if err != nil { - return -1, err - } - lstores, err := s.ROLayerStores() - if err != nil { - return -1, err - } - for _, store := range append([]ROLayerStore{lstore}, lstores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() - } - if store.Exists(to) { - return store.DiffSize(from, to) - } - } - return -1, ErrLayerUnknown -} - -func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, error) { - lstore, err := s.LayerStore() - if err != nil { - return nil, err - } - lstores, err := s.ROLayerStores() - if err != nil { - return nil, err - } - for _, store := range append([]ROLayerStore{lstore}, lstores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() - } - if store.Exists(to) { - return store.Diff(from, to, options) - } - } - return nil, ErrLayerUnknown -} - -func (s *store) ApplyDiff(to string, diff io.Reader) (int64, error) { - rlstore, err := s.LayerStore() - if err != nil { - return -1, err - } - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() - } - if rlstore.Exists(to) { - return rlstore.ApplyDiff(to, diff) - } - return -1, ErrLayerUnknown -} - -func (s *store) layersByMappedDigest(m func(ROLayerStore, digest.Digest) ([]Layer, error), d digest.Digest) ([]Layer, error) { - var layers []Layer - lstore, err := s.LayerStore() - if err != nil { - return nil, err - } - - lstores, err := s.ROLayerStores() - if err != nil { - return nil, err - } - for _, store := range append([]ROLayerStore{lstore}, lstores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() - } - storeLayers, err := m(store, d) - if err != nil { - if errors.Cause(err) != ErrLayerUnknown { - return nil, err - } - continue - } - layers = append(layers, storeLayers...) - } - if len(layers) == 0 { - return nil, ErrLayerUnknown - } - return layers, nil -} - -func (s *store) LayersByCompressedDigest(d digest.Digest) ([]Layer, error) { - if err := d.Validate(); err != nil { - return nil, errors.Wrapf(err, "error looking for compressed layers matching digest %q", d) - } - return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByCompressedDigest(d) }, d) -} - -func (s *store) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) { - if err := d.Validate(); err != nil { - return nil, errors.Wrapf(err, "error looking for layers matching digest %q", d) - } - return s.layersByMappedDigest(func(r ROLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByUncompressedDigest(d) }, d) -} - -func (s *store) LayerSize(id string) (int64, error) { - lstore, err := s.LayerStore() - if err != nil { - return -1, err - } - lstores, err := s.ROLayerStores() - if err != nil { - return -1, err - } - for _, store := range append([]ROLayerStore{lstore}, lstores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() - } - if store.Exists(id) { - return store.Size(id) - } - } - return -1, ErrLayerUnknown -} - -func (s *store) Layers() ([]Layer, error) { - var layers []Layer - lstore, err := s.LayerStore() - if err != nil { - return nil, err - } - - lstores, err := s.ROLayerStores() - if err != nil { - return nil, err - } - - for _, store := range append([]ROLayerStore{lstore}, lstores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() - } - storeLayers, err := store.Layers() - if err != nil { - return nil, err - } - layers = append(layers, storeLayers...) - } - return layers, nil -} - -func (s *store) Images() ([]Image, error) { - var images []Image - istore, err := s.ImageStore() - if err != nil { - return nil, err - } - - istores, err := s.ROImageStores() - if err != nil { - return nil, err - } - for _, store := range append([]ROImageStore{istore}, istores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() - } - storeImages, err := store.Images() - if err != nil { - return nil, err - } - images = append(images, storeImages...) - } - return images, nil -} - -func (s *store) Containers() ([]Container, error) { - rcstore, err := s.ContainerStore() - if err != nil { - return nil, err - } - - rcstore.Lock() - defer rcstore.Unlock() - if modified, err := rcstore.Modified(); modified || err != nil { - rcstore.Load() - } - - return rcstore.Containers() -} - -func (s *store) Layer(id string) (*Layer, error) { - lstore, err := s.LayerStore() - if err != nil { - return nil, err - } - lstores, err := s.ROLayerStores() - if err != nil { - return nil, err - } - for _, store := range append([]ROLayerStore{lstore}, lstores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() - } - layer, err := store.Get(id) - if err == nil { - return layer, nil - } - } - return nil, ErrLayerUnknown -} - -func (s *store) Image(id string) (*Image, error) { - istore, err := s.ImageStore() - if err != nil { - return nil, err - } - istores, err := s.ROImageStores() - if err != nil { - return nil, err - } - for _, store := range append([]ROImageStore{istore}, istores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() - } - image, err := store.Get(id) - if err == nil { - return image, nil - } - } - return nil, ErrImageUnknown -} - -func (s *store) ImagesByTopLayer(id string) ([]*Image, error) { - images := []*Image{} - layer, err := s.Layer(id) - if err != nil { - return nil, err - } - - istore, err := s.ImageStore() - if err != nil { - return nil, err - } - - istores, err := s.ROImageStores() - if err != nil { - return nil, err - } - for _, store := range append([]ROImageStore{istore}, istores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() - } - imageList, err := store.Images() - if err != nil { - return nil, err - } - for _, image := range imageList { - if image.TopLayer == layer.ID { - images = append(images, &image) - } - } - } - return images, nil -} - -func (s *store) ImagesByDigest(d digest.Digest) ([]*Image, error) { - images := []*Image{} - - istore, err := s.ImageStore() - if err != nil { - return nil, err - } - - istores, err := s.ROImageStores() - if err != nil { - return nil, err - } - for _, store := range append([]ROImageStore{istore}, istores...) { - store.Lock() - defer store.Unlock() - if modified, err := store.Modified(); modified || err != nil { - store.Load() - } - imageList, err := store.ByDigest(d) - if err != nil && err != ErrImageUnknown { - return nil, err - } - images = append(images, imageList...) - } - return images, nil -} - -func (s *store) Container(id string) (*Container, error) { - rcstore, err := s.ContainerStore() - if err != nil { - return nil, err - } - rcstore.Lock() - defer rcstore.Unlock() - if modified, err := rcstore.Modified(); modified || err != nil { - rcstore.Load() - } - - return rcstore.Get(id) -} - -func (s *store) ContainerLayerID(id string) (string, error) { - rcstore, err := s.ContainerStore() - if err != nil { - return "", err - } - rcstore.Lock() - defer rcstore.Unlock() - if modified, err := rcstore.Modified(); modified || err != nil { - rcstore.Load() - } - container, err := rcstore.Get(id) - if err != nil { - return "", err - } - return container.LayerID, nil -} - -func (s *store) ContainerByLayer(id string) (*Container, error) { - layer, err := s.Layer(id) - if err != nil { - return nil, err - } - rcstore, err := s.ContainerStore() - if err != nil { - return nil, err - } - rcstore.Lock() - defer rcstore.Unlock() - if modified, err := rcstore.Modified(); modified || err != nil { - rcstore.Load() - } - containerList, err := rcstore.Containers() - if err != nil { - return nil, err - } - for _, container := range containerList { - if container.LayerID == layer.ID { - return &container, nil - } - } - - return nil, ErrContainerUnknown -} - -func (s *store) ContainerDirectory(id string) (string, error) { - rcstore, err := s.ContainerStore() - if err != nil { - return "", err - } - rcstore.Lock() - defer rcstore.Unlock() - if modified, err := rcstore.Modified(); modified || err != nil { - rcstore.Load() - } - - id, err = rcstore.Lookup(id) - if err != nil { - return "", err - } - - middleDir := s.graphDriverName + "-containers" - gcpath := filepath.Join(s.GraphRoot(), middleDir, id, "userdata") - if err := os.MkdirAll(gcpath, 0700); err != nil { - return "", err - } - return gcpath, nil -} - -func (s *store) ContainerRunDirectory(id string) (string, error) { - rcstore, err := s.ContainerStore() - if err != nil { - return "", err - } - - rcstore.Lock() - defer rcstore.Unlock() - if modified, err := rcstore.Modified(); modified || err != nil { - rcstore.Load() - } - - id, err = rcstore.Lookup(id) - if err != nil { - return "", err - } - - middleDir := s.graphDriverName + "-containers" - rcpath := filepath.Join(s.RunRoot(), middleDir, id, "userdata") - if err := os.MkdirAll(rcpath, 0700); err != nil { - return "", err - } - return rcpath, nil -} - -func (s *store) SetContainerDirectoryFile(id, file string, data []byte) error { - dir, err := s.ContainerDirectory(id) - if err != nil { - return err - } - err = os.MkdirAll(filepath.Dir(filepath.Join(dir, file)), 0700) - if err != nil { - return err - } - return ioutils.AtomicWriteFile(filepath.Join(dir, file), data, 0600) -} - -func (s *store) FromContainerDirectory(id, file string) ([]byte, error) { - dir, err := s.ContainerDirectory(id) - if err != nil { - return nil, err - } - return ioutil.ReadFile(filepath.Join(dir, file)) -} - -func (s *store) SetContainerRunDirectoryFile(id, file string, data []byte) error { - dir, err := s.ContainerRunDirectory(id) - if err != nil { - return err - } - err = os.MkdirAll(filepath.Dir(filepath.Join(dir, file)), 0700) - if err != nil { - return err - } - return ioutils.AtomicWriteFile(filepath.Join(dir, file), data, 0600) -} - -func (s *store) FromContainerRunDirectory(id, file string) ([]byte, error) { - dir, err := s.ContainerRunDirectory(id) - if err != nil { - return nil, err - } - return ioutil.ReadFile(filepath.Join(dir, file)) -} - -func (s *store) Shutdown(force bool) ([]string, error) { - mounted := []string{} - modified := false - - rlstore, err := s.LayerStore() - if err != nil { - return mounted, err - } - - s.graphLock.Lock() - defer s.graphLock.Unlock() - - rlstore.Lock() - defer rlstore.Unlock() - if modified, err := rlstore.Modified(); modified || err != nil { - rlstore.Load() - } - - layers, err := rlstore.Layers() - if err != nil { - return mounted, err - } - for _, layer := range layers { - if layer.MountCount == 0 { - continue - } - mounted = append(mounted, layer.ID) - if force { - for layer.MountCount > 0 { - err2 := rlstore.Unmount(layer.ID) - if err2 != nil { - if err == nil { - err = err2 - } - break - } - modified = true - } - } - } - if len(mounted) > 0 && err == nil { - err = errors.Wrap(ErrLayerUsedByContainer, "A layer is mounted") - } - if err == nil { - err = s.graphDriver.Cleanup() - s.graphLock.Touch() - modified = true - } - if modified { - rlstore.Touch() - } - return mounted, err -} - -// Convert a BigData key name into an acceptable file name. -func makeBigDataBaseName(key string) string { - reader := strings.NewReader(key) - for reader.Len() > 0 { - ch, size, err := reader.ReadRune() - if err != nil || size != 1 { - break - } - if ch != '.' && !(ch >= '0' && ch <= '9') && !(ch >= 'a' && ch <= 'z') { - break - } - } - if reader.Len() > 0 { - return "=" + base64.StdEncoding.EncodeToString([]byte(key)) - } - return key -} - -func stringSliceWithoutValue(slice []string, value string) []string { - modified := make([]string, 0, len(slice)) - for _, v := range slice { - if v == value { - continue - } - modified = append(modified, v) - } - return modified -} - -const configFile = "/etc/containers/storage.conf" - -// OptionsConfig represents the "storage.options" TOML config table. -type OptionsConfig struct { - // AdditionalImagesStores is the location of additional read/only - // Image stores. Usually used to access Networked File System - // for shared image content - AdditionalImageStores []string `toml:"additionalimagestores"` - - // Size - Size string `toml:"size"` - - // OverrideKernelCheck - OverrideKernelCheck string `toml:"override_kernel_check"` -} - -// TOML-friendly explicit tables used for conversions. -type tomlConfig struct { - Storage struct { - Driver string `toml:"driver"` - RunRoot string `toml:"runroot"` - GraphRoot string `toml:"graphroot"` - Options struct{ OptionsConfig } `toml:"options"` - } `toml:"storage"` -} - -func init() { - DefaultStoreOptions.RunRoot = "/var/run/containers/storage" - DefaultStoreOptions.GraphRoot = "/var/lib/containers/storage" - DefaultStoreOptions.GraphDriverName = "" - - data, err := ioutil.ReadFile(configFile) - if err != nil { - if !os.IsNotExist(err) { - fmt.Printf("Failed to read %s %v\n", configFile, err.Error()) - return - } - } - - config := new(tomlConfig) - - if _, err := toml.Decode(string(data), config); err != nil { - fmt.Printf("Failed to parse %s %v\n", configFile, err.Error()) - return - } - if config.Storage.Driver != "" { - DefaultStoreOptions.GraphDriverName = config.Storage.Driver - } - if config.Storage.RunRoot != "" { - DefaultStoreOptions.RunRoot = config.Storage.RunRoot - } - if config.Storage.GraphRoot != "" { - DefaultStoreOptions.GraphRoot = config.Storage.GraphRoot - } - for _, s := range config.Storage.Options.AdditionalImageStores { - DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("%s.imagestore=%s", config.Storage.Driver, s)) - } - if config.Storage.Options.Size != "" { - DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("%s.size=%s", config.Storage.Driver, config.Storage.Options.Size)) - } - if config.Storage.Options.OverrideKernelCheck != "" { - DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, fmt.Sprintf("%s.override_kernel_check=%s", config.Storage.Driver, config.Storage.Options.OverrideKernelCheck)) - } - if os.Getenv("STORAGE_DRIVER") != "" { - DefaultStoreOptions.GraphDriverName = os.Getenv("STORAGE_DRIVER") - } - if os.Getenv("STORAGE_OPTS") != "" { - DefaultStoreOptions.GraphDriverOptions = append(DefaultStoreOptions.GraphDriverOptions, strings.Split(os.Getenv("STORAGE_OPTS"), ",")...) - } - if len(DefaultStoreOptions.GraphDriverOptions) == 1 && DefaultStoreOptions.GraphDriverOptions[0] == "" { - DefaultStoreOptions.GraphDriverOptions = nil - } -} diff --git a/vendor/github.com/coreos/etcd/Documentation/README.md b/vendor/github.com/coreos/etcd/Documentation/README.md new file mode 120000 index 0000000000..8828313f5b --- /dev/null +++ b/vendor/github.com/coreos/etcd/Documentation/README.md @@ -0,0 +1 @@ +docs.md \ No newline at end of file diff --git a/vendor/github.com/GoogleCloudPlatform/container-diff/LICENSE b/vendor/github.com/coreos/etcd/LICENSE similarity index 100% rename from vendor/github.com/GoogleCloudPlatform/container-diff/LICENSE rename to vendor/github.com/coreos/etcd/LICENSE diff --git a/vendor/github.com/coreos/etcd/NOTICE b/vendor/github.com/coreos/etcd/NOTICE new file mode 100644 index 0000000000..b39ddfa5cb --- /dev/null +++ b/vendor/github.com/coreos/etcd/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2014 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/etcd/client/auth_role.go b/vendor/github.com/coreos/etcd/client/auth_role.go new file mode 100644 index 0000000000..b6ba7e150d --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/auth_role.go @@ -0,0 +1,236 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "bytes" + "context" + "encoding/json" + "net/http" + "net/url" +) + +type Role struct { + Role string `json:"role"` + Permissions Permissions `json:"permissions"` + Grant *Permissions `json:"grant,omitempty"` + Revoke *Permissions `json:"revoke,omitempty"` +} + +type Permissions struct { + KV rwPermission `json:"kv"` +} + +type rwPermission struct { + Read []string `json:"read"` + Write []string `json:"write"` +} + +type PermissionType int + +const ( + ReadPermission PermissionType = iota + WritePermission + ReadWritePermission +) + +// NewAuthRoleAPI constructs a new AuthRoleAPI that uses HTTP to +// interact with etcd's role creation and modification features. +func NewAuthRoleAPI(c Client) AuthRoleAPI { + return &httpAuthRoleAPI{ + client: c, + } +} + +type AuthRoleAPI interface { + // AddRole adds a role. + AddRole(ctx context.Context, role string) error + + // RemoveRole removes a role. + RemoveRole(ctx context.Context, role string) error + + // GetRole retrieves role details. + GetRole(ctx context.Context, role string) (*Role, error) + + // GrantRoleKV grants a role some permission prefixes for the KV store. + GrantRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error) + + // RevokeRoleKV revokes some permission prefixes for a role on the KV store. + RevokeRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error) + + // ListRoles lists roles. + ListRoles(ctx context.Context) ([]string, error) +} + +type httpAuthRoleAPI struct { + client httpClient +} + +type authRoleAPIAction struct { + verb string + name string + role *Role +} + +type authRoleAPIList struct{} + +func (list *authRoleAPIList) HTTPRequest(ep url.URL) *http.Request { + u := v2AuthURL(ep, "roles", "") + req, _ := http.NewRequest("GET", u.String(), nil) + req.Header.Set("Content-Type", "application/json") + return req +} + +func (l *authRoleAPIAction) HTTPRequest(ep url.URL) *http.Request { + u := v2AuthURL(ep, "roles", l.name) + if l.role == nil { + req, _ := http.NewRequest(l.verb, u.String(), nil) + return req + } + b, err := json.Marshal(l.role) + if err != nil { + panic(err) + } + body := bytes.NewReader(b) + req, _ := http.NewRequest(l.verb, u.String(), body) + req.Header.Set("Content-Type", "application/json") + return req +} + +func (r *httpAuthRoleAPI) ListRoles(ctx context.Context) ([]string, error) { + resp, body, err := r.client.Do(ctx, &authRoleAPIList{}) + if err != nil { + return nil, err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + return nil, err + } + var roleList struct { + Roles []Role `json:"roles"` + } + if err = json.Unmarshal(body, &roleList); err != nil { + return nil, err + } + ret := make([]string, 0, len(roleList.Roles)) + for _, r := range roleList.Roles { + ret = append(ret, r.Role) + } + return ret, nil +} + +func (r *httpAuthRoleAPI) AddRole(ctx context.Context, rolename string) error { + role := &Role{ + Role: rolename, + } + return r.addRemoveRole(ctx, &authRoleAPIAction{ + verb: "PUT", + name: rolename, + role: role, + }) +} + +func (r *httpAuthRoleAPI) RemoveRole(ctx context.Context, rolename string) error { + return r.addRemoveRole(ctx, &authRoleAPIAction{ + verb: "DELETE", + name: rolename, + }) +} + +func (r *httpAuthRoleAPI) addRemoveRole(ctx context.Context, req *authRoleAPIAction) error { + resp, body, err := r.client.Do(ctx, req) + if err != nil { + return err + } + if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { + var sec authError + err := json.Unmarshal(body, &sec) + if err != nil { + return err + } + return sec + } + return nil +} + +func (r *httpAuthRoleAPI) GetRole(ctx context.Context, rolename string) (*Role, error) { + return r.modRole(ctx, &authRoleAPIAction{ + verb: "GET", + name: rolename, + }) +} + +func buildRWPermission(prefixes []string, permType PermissionType) rwPermission { + var out rwPermission + switch permType { + case ReadPermission: + out.Read = prefixes + case WritePermission: + out.Write = prefixes + case ReadWritePermission: + out.Read = prefixes + out.Write = prefixes + } + return out +} + +func (r *httpAuthRoleAPI) GrantRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) { + rwp := buildRWPermission(prefixes, permType) + role := &Role{ + Role: rolename, + Grant: &Permissions{ + KV: rwp, + }, + } + return r.modRole(ctx, &authRoleAPIAction{ + verb: "PUT", + name: rolename, + role: role, + }) +} + +func (r *httpAuthRoleAPI) RevokeRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) { + rwp := buildRWPermission(prefixes, permType) + role := &Role{ + Role: rolename, + Revoke: &Permissions{ + KV: rwp, + }, + } + return r.modRole(ctx, &authRoleAPIAction{ + verb: "PUT", + name: rolename, + role: role, + }) +} + +func (r *httpAuthRoleAPI) modRole(ctx context.Context, req *authRoleAPIAction) (*Role, error) { + resp, body, err := r.client.Do(ctx, req) + if err != nil { + return nil, err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + var sec authError + err = json.Unmarshal(body, &sec) + if err != nil { + return nil, err + } + return nil, sec + } + var role Role + if err = json.Unmarshal(body, &role); err != nil { + return nil, err + } + return &role, nil +} diff --git a/vendor/github.com/coreos/etcd/client/auth_user.go b/vendor/github.com/coreos/etcd/client/auth_user.go new file mode 100644 index 0000000000..8e7e2efe83 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/auth_user.go @@ -0,0 +1,319 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "bytes" + "context" + "encoding/json" + "net/http" + "net/url" + "path" +) + +var ( + defaultV2AuthPrefix = "/v2/auth" +) + +type User struct { + User string `json:"user"` + Password string `json:"password,omitempty"` + Roles []string `json:"roles"` + Grant []string `json:"grant,omitempty"` + Revoke []string `json:"revoke,omitempty"` +} + +// userListEntry is the user representation given by the server for ListUsers +type userListEntry struct { + User string `json:"user"` + Roles []Role `json:"roles"` +} + +type UserRoles struct { + User string `json:"user"` + Roles []Role `json:"roles"` +} + +func v2AuthURL(ep url.URL, action string, name string) *url.URL { + if name != "" { + ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action, name) + return &ep + } + ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action) + return &ep +} + +// NewAuthAPI constructs a new AuthAPI that uses HTTP to +// interact with etcd's general auth features. +func NewAuthAPI(c Client) AuthAPI { + return &httpAuthAPI{ + client: c, + } +} + +type AuthAPI interface { + // Enable auth. + Enable(ctx context.Context) error + + // Disable auth. + Disable(ctx context.Context) error +} + +type httpAuthAPI struct { + client httpClient +} + +func (s *httpAuthAPI) Enable(ctx context.Context) error { + return s.enableDisable(ctx, &authAPIAction{"PUT"}) +} + +func (s *httpAuthAPI) Disable(ctx context.Context) error { + return s.enableDisable(ctx, &authAPIAction{"DELETE"}) +} + +func (s *httpAuthAPI) enableDisable(ctx context.Context, req httpAction) error { + resp, body, err := s.client.Do(ctx, req) + if err != nil { + return err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { + var sec authError + err = json.Unmarshal(body, &sec) + if err != nil { + return err + } + return sec + } + return nil +} + +type authAPIAction struct { + verb string +} + +func (l *authAPIAction) HTTPRequest(ep url.URL) *http.Request { + u := v2AuthURL(ep, "enable", "") + req, _ := http.NewRequest(l.verb, u.String(), nil) + return req +} + +type authError struct { + Message string `json:"message"` + Code int `json:"-"` +} + +func (e authError) Error() string { + return e.Message +} + +// NewAuthUserAPI constructs a new AuthUserAPI that uses HTTP to +// interact with etcd's user creation and modification features. +func NewAuthUserAPI(c Client) AuthUserAPI { + return &httpAuthUserAPI{ + client: c, + } +} + +type AuthUserAPI interface { + // AddUser adds a user. + AddUser(ctx context.Context, username string, password string) error + + // RemoveUser removes a user. + RemoveUser(ctx context.Context, username string) error + + // GetUser retrieves user details. + GetUser(ctx context.Context, username string) (*User, error) + + // GrantUser grants a user some permission roles. + GrantUser(ctx context.Context, username string, roles []string) (*User, error) + + // RevokeUser revokes some permission roles from a user. + RevokeUser(ctx context.Context, username string, roles []string) (*User, error) + + // ChangePassword changes the user's password. + ChangePassword(ctx context.Context, username string, password string) (*User, error) + + // ListUsers lists the users. + ListUsers(ctx context.Context) ([]string, error) +} + +type httpAuthUserAPI struct { + client httpClient +} + +type authUserAPIAction struct { + verb string + username string + user *User +} + +type authUserAPIList struct{} + +func (list *authUserAPIList) HTTPRequest(ep url.URL) *http.Request { + u := v2AuthURL(ep, "users", "") + req, _ := http.NewRequest("GET", u.String(), nil) + req.Header.Set("Content-Type", "application/json") + return req +} + +func (l *authUserAPIAction) HTTPRequest(ep url.URL) *http.Request { + u := v2AuthURL(ep, "users", l.username) + if l.user == nil { + req, _ := http.NewRequest(l.verb, u.String(), nil) + return req + } + b, err := json.Marshal(l.user) + if err != nil { + panic(err) + } + body := bytes.NewReader(b) + req, _ := http.NewRequest(l.verb, u.String(), body) + req.Header.Set("Content-Type", "application/json") + return req +} + +func (u *httpAuthUserAPI) ListUsers(ctx context.Context) ([]string, error) { + resp, body, err := u.client.Do(ctx, &authUserAPIList{}) + if err != nil { + return nil, err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + var sec authError + err = json.Unmarshal(body, &sec) + if err != nil { + return nil, err + } + return nil, sec + } + + var userList struct { + Users []userListEntry `json:"users"` + } + + if err = json.Unmarshal(body, &userList); err != nil { + return nil, err + } + + ret := make([]string, 0, len(userList.Users)) + for _, u := range userList.Users { + ret = append(ret, u.User) + } + return ret, nil +} + +func (u *httpAuthUserAPI) AddUser(ctx context.Context, username string, password string) error { + user := &User{ + User: username, + Password: password, + } + return u.addRemoveUser(ctx, &authUserAPIAction{ + verb: "PUT", + username: username, + user: user, + }) +} + +func (u *httpAuthUserAPI) RemoveUser(ctx context.Context, username string) error { + return u.addRemoveUser(ctx, &authUserAPIAction{ + verb: "DELETE", + username: username, + }) +} + +func (u *httpAuthUserAPI) addRemoveUser(ctx context.Context, req *authUserAPIAction) error { + resp, body, err := u.client.Do(ctx, req) + if err != nil { + return err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { + var sec authError + err = json.Unmarshal(body, &sec) + if err != nil { + return err + } + return sec + } + return nil +} + +func (u *httpAuthUserAPI) GetUser(ctx context.Context, username string) (*User, error) { + return u.modUser(ctx, &authUserAPIAction{ + verb: "GET", + username: username, + }) +} + +func (u *httpAuthUserAPI) GrantUser(ctx context.Context, username string, roles []string) (*User, error) { + user := &User{ + User: username, + Grant: roles, + } + return u.modUser(ctx, &authUserAPIAction{ + verb: "PUT", + username: username, + user: user, + }) +} + +func (u *httpAuthUserAPI) RevokeUser(ctx context.Context, username string, roles []string) (*User, error) { + user := &User{ + User: username, + Revoke: roles, + } + return u.modUser(ctx, &authUserAPIAction{ + verb: "PUT", + username: username, + user: user, + }) +} + +func (u *httpAuthUserAPI) ChangePassword(ctx context.Context, username string, password string) (*User, error) { + user := &User{ + User: username, + Password: password, + } + return u.modUser(ctx, &authUserAPIAction{ + verb: "PUT", + username: username, + user: user, + }) +} + +func (u *httpAuthUserAPI) modUser(ctx context.Context, req *authUserAPIAction) (*User, error) { + resp, body, err := u.client.Do(ctx, req) + if err != nil { + return nil, err + } + if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + var sec authError + err = json.Unmarshal(body, &sec) + if err != nil { + return nil, err + } + return nil, sec + } + var user User + if err = json.Unmarshal(body, &user); err != nil { + var userR UserRoles + if urerr := json.Unmarshal(body, &userR); urerr != nil { + return nil, err + } + user.User = userR.User + for _, r := range userR.Roles { + user.Roles = append(user.Roles, r.Role) + } + } + return &user, nil +} diff --git a/vendor/github.com/coreos/etcd/client/cancelreq.go b/vendor/github.com/coreos/etcd/client/cancelreq.go new file mode 100644 index 0000000000..76d1f04019 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/cancelreq.go @@ -0,0 +1,18 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// borrowed from golang/net/context/ctxhttp/cancelreq.go + +package client + +import "net/http" + +func requestCanceler(tr CancelableTransport, req *http.Request) func() { + ch := make(chan struct{}) + req.Cancel = ch + + return func() { + close(ch) + } +} diff --git a/vendor/github.com/coreos/etcd/client/client.go b/vendor/github.com/coreos/etcd/client/client.go new file mode 100644 index 0000000000..e687450566 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/client.go @@ -0,0 +1,710 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "math/rand" + "net" + "net/http" + "net/url" + "sort" + "strconv" + "sync" + "time" + + "github.com/coreos/etcd/version" +) + +var ( + ErrNoEndpoints = errors.New("client: no endpoints available") + ErrTooManyRedirects = errors.New("client: too many redirects") + ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured") + ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available") + errTooManyRedirectChecks = errors.New("client: too many redirect checks") + + // oneShotCtxValue is set on a context using WithValue(&oneShotValue) so + // that Do() will not retry a request + oneShotCtxValue interface{} +) + +var DefaultRequestTimeout = 5 * time.Second + +var DefaultTransport CancelableTransport = &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, +} + +type EndpointSelectionMode int + +const ( + // EndpointSelectionRandom is the default value of the 'SelectionMode'. + // As the name implies, the client object will pick a node from the members + // of the cluster in a random fashion. If the cluster has three members, A, B, + // and C, the client picks any node from its three members as its request + // destination. + EndpointSelectionRandom EndpointSelectionMode = iota + + // If 'SelectionMode' is set to 'EndpointSelectionPrioritizeLeader', + // requests are sent directly to the cluster leader. This reduces + // forwarding roundtrips compared to making requests to etcd followers + // who then forward them to the cluster leader. In the event of a leader + // failure, however, clients configured this way cannot prioritize among + // the remaining etcd followers. Therefore, when a client sets 'SelectionMode' + // to 'EndpointSelectionPrioritizeLeader', it must use 'client.AutoSync()' to + // maintain its knowledge of current cluster state. + // + // This mode should be used with Client.AutoSync(). + EndpointSelectionPrioritizeLeader +) + +type Config struct { + // Endpoints defines a set of URLs (schemes, hosts and ports only) + // that can be used to communicate with a logical etcd cluster. For + // example, a three-node cluster could be provided like so: + // + // Endpoints: []string{ + // "http://node1.example.com:2379", + // "http://node2.example.com:2379", + // "http://node3.example.com:2379", + // } + // + // If multiple endpoints are provided, the Client will attempt to + // use them all in the event that one or more of them are unusable. + // + // If Client.Sync is ever called, the Client may cache an alternate + // set of endpoints to continue operation. + Endpoints []string + + // Transport is used by the Client to drive HTTP requests. If not + // provided, DefaultTransport will be used. + Transport CancelableTransport + + // CheckRedirect specifies the policy for handling HTTP redirects. + // If CheckRedirect is not nil, the Client calls it before + // following an HTTP redirect. The sole argument is the number of + // requests that have already been made. If CheckRedirect returns + // an error, Client.Do will not make any further requests and return + // the error back it to the caller. + // + // If CheckRedirect is nil, the Client uses its default policy, + // which is to stop after 10 consecutive requests. + CheckRedirect CheckRedirectFunc + + // Username specifies the user credential to add as an authorization header + Username string + + // Password is the password for the specified user to add as an authorization header + // to the request. + Password string + + // HeaderTimeoutPerRequest specifies the time limit to wait for response + // header in a single request made by the Client. The timeout includes + // connection time, any redirects, and header wait time. + // + // For non-watch GET request, server returns the response body immediately. + // For PUT/POST/DELETE request, server will attempt to commit request + // before responding, which is expected to take `100ms + 2 * RTT`. + // For watch request, server returns the header immediately to notify Client + // watch start. But if server is behind some kind of proxy, the response + // header may be cached at proxy, and Client cannot rely on this behavior. + // + // Especially, wait request will ignore this timeout. + // + // One API call may send multiple requests to different etcd servers until it + // succeeds. Use context of the API to specify the overall timeout. + // + // A HeaderTimeoutPerRequest of zero means no timeout. + HeaderTimeoutPerRequest time.Duration + + // SelectionMode is an EndpointSelectionMode enum that specifies the + // policy for choosing the etcd cluster node to which requests are sent. + SelectionMode EndpointSelectionMode +} + +func (cfg *Config) transport() CancelableTransport { + if cfg.Transport == nil { + return DefaultTransport + } + return cfg.Transport +} + +func (cfg *Config) checkRedirect() CheckRedirectFunc { + if cfg.CheckRedirect == nil { + return DefaultCheckRedirect + } + return cfg.CheckRedirect +} + +// CancelableTransport mimics net/http.Transport, but requires that +// the object also support request cancellation. +type CancelableTransport interface { + http.RoundTripper + CancelRequest(req *http.Request) +} + +type CheckRedirectFunc func(via int) error + +// DefaultCheckRedirect follows up to 10 redirects, but no more. +var DefaultCheckRedirect CheckRedirectFunc = func(via int) error { + if via > 10 { + return ErrTooManyRedirects + } + return nil +} + +type Client interface { + // Sync updates the internal cache of the etcd cluster's membership. + Sync(context.Context) error + + // AutoSync periodically calls Sync() every given interval. + // The recommended sync interval is 10 seconds to 1 minute, which does + // not bring too much overhead to server and makes client catch up the + // cluster change in time. + // + // The example to use it: + // + // for { + // err := client.AutoSync(ctx, 10*time.Second) + // if err == context.DeadlineExceeded || err == context.Canceled { + // break + // } + // log.Print(err) + // } + AutoSync(context.Context, time.Duration) error + + // Endpoints returns a copy of the current set of API endpoints used + // by Client to resolve HTTP requests. If Sync has ever been called, + // this may differ from the initial Endpoints provided in the Config. + Endpoints() []string + + // SetEndpoints sets the set of API endpoints used by Client to resolve + // HTTP requests. If the given endpoints are not valid, an error will be + // returned + SetEndpoints(eps []string) error + + // GetVersion retrieves the current etcd server and cluster version + GetVersion(ctx context.Context) (*version.Versions, error) + + httpClient +} + +func New(cfg Config) (Client, error) { + c := &httpClusterClient{ + clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect(), cfg.HeaderTimeoutPerRequest), + rand: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), + selectionMode: cfg.SelectionMode, + } + if cfg.Username != "" { + c.credentials = &credentials{ + username: cfg.Username, + password: cfg.Password, + } + } + if err := c.SetEndpoints(cfg.Endpoints); err != nil { + return nil, err + } + return c, nil +} + +type httpClient interface { + Do(context.Context, httpAction) (*http.Response, []byte, error) +} + +func newHTTPClientFactory(tr CancelableTransport, cr CheckRedirectFunc, headerTimeout time.Duration) httpClientFactory { + return func(ep url.URL) httpClient { + return &redirectFollowingHTTPClient{ + checkRedirect: cr, + client: &simpleHTTPClient{ + transport: tr, + endpoint: ep, + headerTimeout: headerTimeout, + }, + } + } +} + +type credentials struct { + username string + password string +} + +type httpClientFactory func(url.URL) httpClient + +type httpAction interface { + HTTPRequest(url.URL) *http.Request +} + +type httpClusterClient struct { + clientFactory httpClientFactory + endpoints []url.URL + pinned int + credentials *credentials + sync.RWMutex + rand *rand.Rand + selectionMode EndpointSelectionMode +} + +func (c *httpClusterClient) getLeaderEndpoint(ctx context.Context, eps []url.URL) (string, error) { + ceps := make([]url.URL, len(eps)) + copy(ceps, eps) + + // To perform a lookup on the new endpoint list without using the current + // client, we'll copy it + clientCopy := &httpClusterClient{ + clientFactory: c.clientFactory, + credentials: c.credentials, + rand: c.rand, + + pinned: 0, + endpoints: ceps, + } + + mAPI := NewMembersAPI(clientCopy) + leader, err := mAPI.Leader(ctx) + if err != nil { + return "", err + } + if len(leader.ClientURLs) == 0 { + return "", ErrNoLeaderEndpoint + } + + return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs? +} + +func (c *httpClusterClient) parseEndpoints(eps []string) ([]url.URL, error) { + if len(eps) == 0 { + return []url.URL{}, ErrNoEndpoints + } + + neps := make([]url.URL, len(eps)) + for i, ep := range eps { + u, err := url.Parse(ep) + if err != nil { + return []url.URL{}, err + } + neps[i] = *u + } + return neps, nil +} + +func (c *httpClusterClient) SetEndpoints(eps []string) error { + neps, err := c.parseEndpoints(eps) + if err != nil { + return err + } + + c.Lock() + defer c.Unlock() + + c.endpoints = shuffleEndpoints(c.rand, neps) + // We're not doing anything for PrioritizeLeader here. This is + // due to not having a context meaning we can't call getLeaderEndpoint + // However, if you're using PrioritizeLeader, you've already been told + // to regularly call sync, where we do have a ctx, and can figure the + // leader. PrioritizeLeader is also quite a loose guarantee, so deal + // with it + c.pinned = 0 + + return nil +} + +func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { + action := act + c.RLock() + leps := len(c.endpoints) + eps := make([]url.URL, leps) + n := copy(eps, c.endpoints) + pinned := c.pinned + + if c.credentials != nil { + action = &authedAction{ + act: act, + credentials: *c.credentials, + } + } + c.RUnlock() + + if leps == 0 { + return nil, nil, ErrNoEndpoints + } + + if leps != n { + return nil, nil, errors.New("unable to pick endpoint: copy failed") + } + + var resp *http.Response + var body []byte + var err error + cerr := &ClusterError{} + isOneShot := ctx.Value(&oneShotCtxValue) != nil + + for i := pinned; i < leps+pinned; i++ { + k := i % leps + hc := c.clientFactory(eps[k]) + resp, body, err = hc.Do(ctx, action) + if err != nil { + cerr.Errors = append(cerr.Errors, err) + if err == ctx.Err() { + return nil, nil, ctx.Err() + } + if err == context.Canceled || err == context.DeadlineExceeded { + return nil, nil, err + } + } else if resp.StatusCode/100 == 5 { + switch resp.StatusCode { + case http.StatusInternalServerError, http.StatusServiceUnavailable: + // TODO: make sure this is a no leader response + cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s has no leader", eps[k].String())) + default: + cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode))) + } + err = cerr.Errors[0] + } + if err != nil { + if !isOneShot { + continue + } + c.Lock() + c.pinned = (k + 1) % leps + c.Unlock() + return nil, nil, err + } + if k != pinned { + c.Lock() + c.pinned = k + c.Unlock() + } + return resp, body, nil + } + + return nil, nil, cerr +} + +func (c *httpClusterClient) Endpoints() []string { + c.RLock() + defer c.RUnlock() + + eps := make([]string, len(c.endpoints)) + for i, ep := range c.endpoints { + eps[i] = ep.String() + } + + return eps +} + +func (c *httpClusterClient) Sync(ctx context.Context) error { + mAPI := NewMembersAPI(c) + ms, err := mAPI.List(ctx) + if err != nil { + return err + } + + var eps []string + for _, m := range ms { + eps = append(eps, m.ClientURLs...) + } + + neps, err := c.parseEndpoints(eps) + if err != nil { + return err + } + + npin := 0 + + switch c.selectionMode { + case EndpointSelectionRandom: + c.RLock() + eq := endpointsEqual(c.endpoints, neps) + c.RUnlock() + + if eq { + return nil + } + // When items in the endpoint list changes, we choose a new pin + neps = shuffleEndpoints(c.rand, neps) + case EndpointSelectionPrioritizeLeader: + nle, err := c.getLeaderEndpoint(ctx, neps) + if err != nil { + return ErrNoLeaderEndpoint + } + + for i, n := range neps { + if n.String() == nle { + npin = i + break + } + } + default: + return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode) + } + + c.Lock() + defer c.Unlock() + c.endpoints = neps + c.pinned = npin + + return nil +} + +func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error { + ticker := time.NewTicker(interval) + defer ticker.Stop() + for { + err := c.Sync(ctx) + if err != nil { + return err + } + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + } + } +} + +func (c *httpClusterClient) GetVersion(ctx context.Context) (*version.Versions, error) { + act := &getAction{Prefix: "/version"} + + resp, body, err := c.Do(ctx, act) + if err != nil { + return nil, err + } + + switch resp.StatusCode { + case http.StatusOK: + if len(body) == 0 { + return nil, ErrEmptyBody + } + var vresp version.Versions + if err := json.Unmarshal(body, &vresp); err != nil { + return nil, ErrInvalidJSON + } + return &vresp, nil + default: + var etcdErr Error + if err := json.Unmarshal(body, &etcdErr); err != nil { + return nil, ErrInvalidJSON + } + return nil, etcdErr + } +} + +type roundTripResponse struct { + resp *http.Response + err error +} + +type simpleHTTPClient struct { + transport CancelableTransport + endpoint url.URL + headerTimeout time.Duration +} + +func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { + req := act.HTTPRequest(c.endpoint) + + if err := printcURL(req); err != nil { + return nil, nil, err + } + + isWait := false + if req != nil && req.URL != nil { + ws := req.URL.Query().Get("wait") + if len(ws) != 0 { + var err error + isWait, err = strconv.ParseBool(ws) + if err != nil { + return nil, nil, fmt.Errorf("wrong wait value %s (%v for %+v)", ws, err, req) + } + } + } + + var hctx context.Context + var hcancel context.CancelFunc + if !isWait && c.headerTimeout > 0 { + hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout) + } else { + hctx, hcancel = context.WithCancel(ctx) + } + defer hcancel() + + reqcancel := requestCanceler(c.transport, req) + + rtchan := make(chan roundTripResponse, 1) + go func() { + resp, err := c.transport.RoundTrip(req) + rtchan <- roundTripResponse{resp: resp, err: err} + close(rtchan) + }() + + var resp *http.Response + var err error + + select { + case rtresp := <-rtchan: + resp, err = rtresp.resp, rtresp.err + case <-hctx.Done(): + // cancel and wait for request to actually exit before continuing + reqcancel() + rtresp := <-rtchan + resp = rtresp.resp + switch { + case ctx.Err() != nil: + err = ctx.Err() + case hctx.Err() != nil: + err = fmt.Errorf("client: endpoint %s exceeded header timeout", c.endpoint.String()) + default: + panic("failed to get error from context") + } + } + + // always check for resp nil-ness to deal with possible + // race conditions between channels above + defer func() { + if resp != nil { + resp.Body.Close() + } + }() + + if err != nil { + return nil, nil, err + } + + var body []byte + done := make(chan struct{}) + go func() { + body, err = ioutil.ReadAll(resp.Body) + done <- struct{}{} + }() + + select { + case <-ctx.Done(): + resp.Body.Close() + <-done + return nil, nil, ctx.Err() + case <-done: + } + + return resp, body, err +} + +type authedAction struct { + act httpAction + credentials credentials +} + +func (a *authedAction) HTTPRequest(url url.URL) *http.Request { + r := a.act.HTTPRequest(url) + r.SetBasicAuth(a.credentials.username, a.credentials.password) + return r +} + +type redirectFollowingHTTPClient struct { + client httpClient + checkRedirect CheckRedirectFunc +} + +func (r *redirectFollowingHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { + next := act + for i := 0; i < 100; i++ { + if i > 0 { + if err := r.checkRedirect(i); err != nil { + return nil, nil, err + } + } + resp, body, err := r.client.Do(ctx, next) + if err != nil { + return nil, nil, err + } + if resp.StatusCode/100 == 3 { + hdr := resp.Header.Get("Location") + if hdr == "" { + return nil, nil, fmt.Errorf("Location header not set") + } + loc, err := url.Parse(hdr) + if err != nil { + return nil, nil, fmt.Errorf("Location header not valid URL: %s", hdr) + } + next = &redirectedHTTPAction{ + action: act, + location: *loc, + } + continue + } + return resp, body, nil + } + + return nil, nil, errTooManyRedirectChecks +} + +type redirectedHTTPAction struct { + action httpAction + location url.URL +} + +func (r *redirectedHTTPAction) HTTPRequest(ep url.URL) *http.Request { + orig := r.action.HTTPRequest(ep) + orig.URL = &r.location + return orig +} + +func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL { + // copied from Go 1.9<= rand.Rand.Perm + n := len(eps) + p := make([]int, n) + for i := 0; i < n; i++ { + j := r.Intn(i + 1) + p[i] = p[j] + p[j] = i + } + neps := make([]url.URL, n) + for i, k := range p { + neps[i] = eps[k] + } + return neps +} + +func endpointsEqual(left, right []url.URL) bool { + if len(left) != len(right) { + return false + } + + sLeft := make([]string, len(left)) + sRight := make([]string, len(right)) + for i, l := range left { + sLeft[i] = l.String() + } + for i, r := range right { + sRight[i] = r.String() + } + + sort.Strings(sLeft) + sort.Strings(sRight) + for i := range sLeft { + if sLeft[i] != sRight[i] { + return false + } + } + return true +} diff --git a/vendor/github.com/coreos/etcd/client/cluster_error.go b/vendor/github.com/coreos/etcd/client/cluster_error.go new file mode 100644 index 0000000000..34618cdbd9 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/cluster_error.go @@ -0,0 +1,37 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import "fmt" + +type ClusterError struct { + Errors []error +} + +func (ce *ClusterError) Error() string { + s := ErrClusterUnavailable.Error() + for i, e := range ce.Errors { + s += fmt.Sprintf("; error #%d: %s\n", i, e) + } + return s +} + +func (ce *ClusterError) Detail() string { + s := "" + for i, e := range ce.Errors { + s += fmt.Sprintf("error #%d: %s\n", i, e) + } + return s +} diff --git a/vendor/github.com/coreos/etcd/client/curl.go b/vendor/github.com/coreos/etcd/client/curl.go new file mode 100644 index 0000000000..c8bc9fba20 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/curl.go @@ -0,0 +1,70 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "os" +) + +var ( + cURLDebug = false +) + +func EnablecURLDebug() { + cURLDebug = true +} + +func DisablecURLDebug() { + cURLDebug = false +} + +// printcURL prints the cURL equivalent request to stderr. +// It returns an error if the body of the request cannot +// be read. +// The caller MUST cancel the request if there is an error. +func printcURL(req *http.Request) error { + if !cURLDebug { + return nil + } + var ( + command string + b []byte + err error + ) + + if req.URL != nil { + command = fmt.Sprintf("curl -X %s %s", req.Method, req.URL.String()) + } + + if req.Body != nil { + b, err = ioutil.ReadAll(req.Body) + if err != nil { + return err + } + command += fmt.Sprintf(" -d %q", string(b)) + } + + fmt.Fprintf(os.Stderr, "cURL Command: %s\n", command) + + // reset body + body := bytes.NewBuffer(b) + req.Body = ioutil.NopCloser(body) + + return nil +} diff --git a/vendor/github.com/coreos/etcd/client/discover.go b/vendor/github.com/coreos/etcd/client/discover.go new file mode 100644 index 0000000000..442e35fe54 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/discover.go @@ -0,0 +1,40 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "github.com/coreos/etcd/pkg/srv" +) + +// Discoverer is an interface that wraps the Discover method. +type Discoverer interface { + // Discover looks up the etcd servers for the domain. + Discover(domain string) ([]string, error) +} + +type srvDiscover struct{} + +// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records. +func NewSRVDiscover() Discoverer { + return &srvDiscover{} +} + +func (d *srvDiscover) Discover(domain string) ([]string, error) { + srvs, err := srv.GetClient("etcd-client", domain) + if err != nil { + return nil, err + } + return srvs.Endpoints, nil +} diff --git a/vendor/github.com/coreos/etcd/client/doc.go b/vendor/github.com/coreos/etcd/client/doc.go new file mode 100644 index 0000000000..ad4eca4e16 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/doc.go @@ -0,0 +1,73 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package client provides bindings for the etcd APIs. + +Create a Config and exchange it for a Client: + + import ( + "net/http" + "context" + + "github.com/coreos/etcd/client" + ) + + cfg := client.Config{ + Endpoints: []string{"http://127.0.0.1:2379"}, + Transport: DefaultTransport, + } + + c, err := client.New(cfg) + if err != nil { + // handle error + } + +Clients are safe for concurrent use by multiple goroutines. + +Create a KeysAPI using the Client, then use it to interact with etcd: + + kAPI := client.NewKeysAPI(c) + + // create a new key /foo with the value "bar" + _, err = kAPI.Create(context.Background(), "/foo", "bar") + if err != nil { + // handle error + } + + // delete the newly created key only if the value is still "bar" + _, err = kAPI.Delete(context.Background(), "/foo", &DeleteOptions{PrevValue: "bar"}) + if err != nil { + // handle error + } + +Use a custom context to set timeouts on your operations: + + import "time" + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // set a new key, ignoring its previous state + _, err := kAPI.Set(ctx, "/ping", "pong", nil) + if err != nil { + if err == context.DeadlineExceeded { + // request took longer than 5s + } else { + // handle error + } + } + +*/ +package client diff --git a/vendor/github.com/coreos/etcd/client/keys.generated.go b/vendor/github.com/coreos/etcd/client/keys.generated.go new file mode 100644 index 0000000000..237fdbe8ff --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/keys.generated.go @@ -0,0 +1,5218 @@ +// ************************************************************ +// DO NOT EDIT. +// THIS FILE IS AUTO-GENERATED BY codecgen. +// ************************************************************ + +package client + +import ( + "errors" + "fmt" + "reflect" + "runtime" + time "time" + + codec1978 "github.com/ugorji/go/codec" +) + +const ( + // ----- content types ---- + codecSelferC_UTF87612 = 1 + codecSelferC_RAW7612 = 0 + // ----- value types used ---- + codecSelferValueTypeArray7612 = 10 + codecSelferValueTypeMap7612 = 9 + // ----- containerStateValues ---- + codecSelfer_containerMapKey7612 = 2 + codecSelfer_containerMapValue7612 = 3 + codecSelfer_containerMapEnd7612 = 4 + codecSelfer_containerArrayElem7612 = 6 + codecSelfer_containerArrayEnd7612 = 7 +) + +var ( + codecSelferBitsize7612 = uint8(reflect.TypeOf(uint(0)).Bits()) + codecSelferOnlyMapOrArrayEncodeToStructErr7612 = errors.New(`only encoded map or array can be decoded into a struct`) +) + +type codecSelfer7612 struct{} + +func init() { + if codec1978.GenVersion != 8 { + _, file, _, _ := runtime.Caller(0) + err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", + 8, codec1978.GenVersion, file) + panic(err) + } + if false { // reference the types, but skip this branch at build/run time + var v0 time.Duration + _ = v0 + } +} + +func (x *Error) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(4) + } else { + r.WriteMapStart(4) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeInt(int64(x.Code)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("errorCode")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeInt(int64(x.Code)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Message)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("message")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Message)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Cause)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("cause")) + r.WriteMapElemValue() + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Cause)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeUint(uint64(x.Index)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("index")) + r.WriteMapElemValue() + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeUint(uint64(x.Index)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *Error) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *Error) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "errorCode": + if r.TryDecodeAsNil() { + x.Code = 0 + } else { + yyv4 := &x.Code + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*int)(yyv4)) = int(r.DecodeInt(codecSelferBitsize7612)) + } + } + case "message": + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv6 := &x.Message + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "cause": + if r.TryDecodeAsNil() { + x.Cause = "" + } else { + yyv8 := &x.Cause + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "index": + if r.TryDecodeAsNil() { + x.Index = 0 + } else { + yyv10 := &x.Index + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*uint64)(yyv10)) = uint64(r.DecodeUint(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *Error) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Code = 0 + } else { + yyv13 := &x.Code + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*int)(yyv13)) = int(r.DecodeInt(codecSelferBitsize7612)) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Message = "" + } else { + yyv15 := &x.Message + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Cause = "" + } else { + yyv17 := &x.Cause + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Index = 0 + } else { + yyv19 := &x.Index + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*uint64)(yyv19)) = uint64(r.DecodeUint(64)) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj12-1, "") + } + r.ReadArrayEnd() +} + +func (x PrevExistType) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x)) + } +} + +func (x *PrevExistType) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + *((*string)(x)) = r.DecodeString() + } +} + +func (x *WatcherOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(2) + } else { + r.WriteMapStart(2) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeUint(uint64(x.AfterIndex)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("AfterIndex")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeUint(uint64(x.AfterIndex)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Recursive")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *WatcherOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *WatcherOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "AfterIndex": + if r.TryDecodeAsNil() { + x.AfterIndex = 0 + } else { + yyv4 := &x.AfterIndex + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*uint64)(yyv4)) = uint64(r.DecodeUint(64)) + } + } + case "Recursive": + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv6 := &x.Recursive + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*bool)(yyv6)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *WatcherOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.AfterIndex = 0 + } else { + yyv9 := &x.AfterIndex + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*uint64)(yyv9)) = uint64(r.DecodeUint(64)) + } + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv11 := &x.Recursive + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(yyv11)) = r.DecodeBool() + } + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj8-1, "") + } + r.ReadArrayEnd() +} + +func (x *CreateInOrderOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(1) + } else { + r.WriteMapStart(1) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else { + r.EncodeInt(int64(x.TTL)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("TTL")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else { + r.EncodeInt(int64(x.TTL)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *CreateInOrderOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *CreateInOrderOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "TTL": + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv4 := &x.TTL + yym5 := z.DecBinary() + _ = yym5 + if false { + } else if z.HasExtensions() && z.DecExt(yyv4) { + } else { + *((*int64)(yyv4)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *CreateInOrderOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj6 int + var yyb6 bool + var yyhl6 bool = l >= 0 + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv7 := &x.TTL + yym8 := z.DecBinary() + _ = yym8 + if false { + } else if z.HasExtensions() && z.DecExt(yyv7) { + } else { + *((*int64)(yyv7)) = int64(r.DecodeInt(64)) + } + } + for { + yyj6++ + if yyhl6 { + yyb6 = yyj6 > l + } else { + yyb6 = r.CheckBreak() + } + if yyb6 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj6-1, "") + } + r.ReadArrayEnd() +} + +func (x *SetOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(7) + } else { + r.WriteMapStart(7) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevValue")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeUint(uint64(x.PrevIndex)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevIndex")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeUint(uint64(x.PrevIndex)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + x.PrevExist.CodecEncodeSelf(e) + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevExist")) + r.WriteMapElemValue() + x.PrevExist.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else { + r.EncodeInt(int64(x.TTL)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("TTL")) + r.WriteMapElemValue() + yym14 := z.EncBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else { + r.EncodeInt(int64(x.TTL)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(x.Refresh)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Refresh")) + r.WriteMapElemValue() + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeBool(bool(x.Refresh)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Dir")) + r.WriteMapElemValue() + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym22 := z.EncBinary() + _ = yym22 + if false { + } else { + r.EncodeBool(bool(x.NoValueOnSuccess)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("NoValueOnSuccess")) + r.WriteMapElemValue() + yym23 := z.EncBinary() + _ = yym23 + if false { + } else { + r.EncodeBool(bool(x.NoValueOnSuccess)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *SetOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *SetOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "PrevValue": + if r.TryDecodeAsNil() { + x.PrevValue = "" + } else { + yyv4 := &x.PrevValue + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "PrevIndex": + if r.TryDecodeAsNil() { + x.PrevIndex = 0 + } else { + yyv6 := &x.PrevIndex + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*uint64)(yyv6)) = uint64(r.DecodeUint(64)) + } + } + case "PrevExist": + if r.TryDecodeAsNil() { + x.PrevExist = "" + } else { + yyv8 := &x.PrevExist + yyv8.CodecDecodeSelf(d) + } + case "TTL": + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv9 := &x.TTL + yym10 := z.DecBinary() + _ = yym10 + if false { + } else if z.HasExtensions() && z.DecExt(yyv9) { + } else { + *((*int64)(yyv9)) = int64(r.DecodeInt(64)) + } + } + case "Refresh": + if r.TryDecodeAsNil() { + x.Refresh = false + } else { + yyv11 := &x.Refresh + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(yyv11)) = r.DecodeBool() + } + } + case "Dir": + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv13 := &x.Dir + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*bool)(yyv13)) = r.DecodeBool() + } + } + case "NoValueOnSuccess": + if r.TryDecodeAsNil() { + x.NoValueOnSuccess = false + } else { + yyv15 := &x.NoValueOnSuccess + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(yyv15)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *SetOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj17 int + var yyb17 bool + var yyhl17 bool = l >= 0 + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevValue = "" + } else { + yyv18 := &x.PrevValue + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*string)(yyv18)) = r.DecodeString() + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevIndex = 0 + } else { + yyv20 := &x.PrevIndex + yym21 := z.DecBinary() + _ = yym21 + if false { + } else { + *((*uint64)(yyv20)) = uint64(r.DecodeUint(64)) + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevExist = "" + } else { + yyv22 := &x.PrevExist + yyv22.CodecDecodeSelf(d) + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv23 := &x.TTL + yym24 := z.DecBinary() + _ = yym24 + if false { + } else if z.HasExtensions() && z.DecExt(yyv23) { + } else { + *((*int64)(yyv23)) = int64(r.DecodeInt(64)) + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Refresh = false + } else { + yyv25 := &x.Refresh + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*bool)(yyv25)) = r.DecodeBool() + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv27 := &x.Dir + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*bool)(yyv27)) = r.DecodeBool() + } + } + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.NoValueOnSuccess = false + } else { + yyv29 := &x.NoValueOnSuccess + yym30 := z.DecBinary() + _ = yym30 + if false { + } else { + *((*bool)(yyv29)) = r.DecodeBool() + } + } + for { + yyj17++ + if yyhl17 { + yyb17 = yyj17 > l + } else { + yyb17 = r.CheckBreak() + } + if yyb17 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj17-1, "") + } + r.ReadArrayEnd() +} + +func (x *GetOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(3) + } else { + r.WriteMapStart(3) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Recursive")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeBool(bool(x.Sort)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Sort")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeBool(bool(x.Sort)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.Quorum)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Quorum")) + r.WriteMapElemValue() + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(x.Quorum)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *GetOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *GetOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "Recursive": + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv4 := &x.Recursive + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*bool)(yyv4)) = r.DecodeBool() + } + } + case "Sort": + if r.TryDecodeAsNil() { + x.Sort = false + } else { + yyv6 := &x.Sort + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*bool)(yyv6)) = r.DecodeBool() + } + } + case "Quorum": + if r.TryDecodeAsNil() { + x.Quorum = false + } else { + yyv8 := &x.Quorum + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *GetOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj10 int + var yyb10 bool + var yyhl10 bool = l >= 0 + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv11 := &x.Recursive + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*bool)(yyv11)) = r.DecodeBool() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Sort = false + } else { + yyv13 := &x.Sort + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*bool)(yyv13)) = r.DecodeBool() + } + } + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Quorum = false + } else { + yyv15 := &x.Quorum + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*bool)(yyv15)) = r.DecodeBool() + } + } + for { + yyj10++ + if yyhl10 { + yyb10 = yyj10 > l + } else { + yyb10 = r.CheckBreak() + } + if yyb10 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj10-1, "") + } + r.ReadArrayEnd() +} + +func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(4) + } else { + r.WriteMapStart(4) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevValue")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeUint(uint64(x.PrevIndex)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevIndex")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeUint(uint64(x.PrevIndex)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Recursive")) + r.WriteMapElemValue() + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Dir")) + r.WriteMapElemValue() + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *DeleteOptions) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *DeleteOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "PrevValue": + if r.TryDecodeAsNil() { + x.PrevValue = "" + } else { + yyv4 := &x.PrevValue + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "PrevIndex": + if r.TryDecodeAsNil() { + x.PrevIndex = 0 + } else { + yyv6 := &x.PrevIndex + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*uint64)(yyv6)) = uint64(r.DecodeUint(64)) + } + } + case "Recursive": + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv8 := &x.Recursive + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + case "Dir": + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv10 := &x.Dir + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevValue = "" + } else { + yyv13 := &x.PrevValue + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevIndex = 0 + } else { + yyv15 := &x.PrevIndex + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*uint64)(yyv15)) = uint64(r.DecodeUint(64)) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv17 := &x.Recursive + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*bool)(yyv17)) = r.DecodeBool() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv19 := &x.Dir + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj12-1, "") + } + r.ReadArrayEnd() +} + +func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(3) + } else { + r.WriteMapStart(3) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Action)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("action")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Action)) + } + } + var yyn6 bool + if x.Node == nil { + yyn6 = true + goto LABEL6 + } + LABEL6: + if yyr2 || yy2arr2 { + if yyn6 { + r.WriteArrayElem() + r.EncodeNil() + } else { + r.WriteArrayElem() + if x.Node == nil { + r.EncodeNil() + } else { + x.Node.CodecEncodeSelf(e) + } + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("node")) + r.WriteMapElemValue() + if yyn6 { + r.EncodeNil() + } else { + if x.Node == nil { + r.EncodeNil() + } else { + x.Node.CodecEncodeSelf(e) + } + } + } + var yyn9 bool + if x.PrevNode == nil { + yyn9 = true + goto LABEL9 + } + LABEL9: + if yyr2 || yy2arr2 { + if yyn9 { + r.WriteArrayElem() + r.EncodeNil() + } else { + r.WriteArrayElem() + if x.PrevNode == nil { + r.EncodeNil() + } else { + x.PrevNode.CodecEncodeSelf(e) + } + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("prevNode")) + r.WriteMapElemValue() + if yyn9 { + r.EncodeNil() + } else { + if x.PrevNode == nil { + r.EncodeNil() + } else { + x.PrevNode.CodecEncodeSelf(e) + } + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *Response) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *Response) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "action": + if r.TryDecodeAsNil() { + x.Action = "" + } else { + yyv4 := &x.Action + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "node": + if x.Node == nil { + x.Node = new(Node) + } + if r.TryDecodeAsNil() { + if x.Node != nil { + x.Node = nil + } + } else { + if x.Node == nil { + x.Node = new(Node) + } + x.Node.CodecDecodeSelf(d) + } + case "prevNode": + if x.PrevNode == nil { + x.PrevNode = new(Node) + } + if r.TryDecodeAsNil() { + if x.PrevNode != nil { + x.PrevNode = nil + } + } else { + if x.PrevNode == nil { + x.PrevNode = new(Node) + } + x.PrevNode.CodecDecodeSelf(d) + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *Response) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj8 int + var yyb8 bool + var yyhl8 bool = l >= 0 + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Action = "" + } else { + yyv9 := &x.Action + yym10 := z.DecBinary() + _ = yym10 + if false { + } else { + *((*string)(yyv9)) = r.DecodeString() + } + } + if x.Node == nil { + x.Node = new(Node) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + if x.Node != nil { + x.Node = nil + } + } else { + if x.Node == nil { + x.Node = new(Node) + } + x.Node.CodecDecodeSelf(d) + } + if x.PrevNode == nil { + x.PrevNode = new(Node) + } + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + if x.PrevNode != nil { + x.PrevNode = nil + } + } else { + if x.PrevNode == nil { + x.PrevNode = new(Node) + } + x.PrevNode.CodecDecodeSelf(d) + } + for { + yyj8++ + if yyhl8 { + yyb8 = yyj8 > l + } else { + yyb8 = r.CheckBreak() + } + if yyb8 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj8-1, "") + } + r.ReadArrayEnd() +} + +func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + var yyq2 [8]bool + _ = yyq2 + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + yyq2[1] = x.Dir != false + yyq2[6] = x.Expiration != nil + yyq2[7] = x.TTL != 0 + if yyr2 || yy2arr2 { + r.WriteArrayStart(8) + } else { + var yynn2 = 5 + for _, b := range yyq2 { + if b { + yynn2++ + } + } + r.WriteMapStart(yynn2) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("key")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + if yyq2[1] { + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } else { + r.EncodeBool(false) + } + } else { + if yyq2[1] { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("dir")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Value)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("value")) + r.WriteMapElemValue() + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Value)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + if x.Nodes == nil { + r.EncodeNil() + } else { + x.Nodes.CodecEncodeSelf(e) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("nodes")) + r.WriteMapElemValue() + if x.Nodes == nil { + r.EncodeNil() + } else { + x.Nodes.CodecEncodeSelf(e) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeUint(uint64(x.CreatedIndex)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("createdIndex")) + r.WriteMapElemValue() + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeUint(uint64(x.CreatedIndex)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeUint(uint64(x.ModifiedIndex)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("modifiedIndex")) + r.WriteMapElemValue() + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeUint(uint64(x.ModifiedIndex)) + } + } + var yyn21 bool + if x.Expiration == nil { + yyn21 = true + goto LABEL21 + } + LABEL21: + if yyr2 || yy2arr2 { + if yyn21 { + r.WriteArrayElem() + r.EncodeNil() + } else { + r.WriteArrayElem() + if yyq2[6] { + if x.Expiration == nil { + r.EncodeNil() + } else { + yym22 := z.EncBinary() + _ = yym22 + if false { + } else if yym23 := z.TimeRtidIfBinc(); yym23 != 0 { + r.EncodeBuiltin(yym23, x.Expiration) + } else if z.HasExtensions() && z.EncExt(x.Expiration) { + } else if yym22 { + z.EncBinaryMarshal(x.Expiration) + } else if !yym22 && z.IsJSONHandle() { + z.EncJSONMarshal(x.Expiration) + } else { + z.EncFallback(x.Expiration) + } + } + } else { + r.EncodeNil() + } + } + } else { + if yyq2[6] { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("expiration")) + r.WriteMapElemValue() + if yyn21 { + r.EncodeNil() + } else { + if x.Expiration == nil { + r.EncodeNil() + } else { + yym24 := z.EncBinary() + _ = yym24 + if false { + } else if yym25 := z.TimeRtidIfBinc(); yym25 != 0 { + r.EncodeBuiltin(yym25, x.Expiration) + } else if z.HasExtensions() && z.EncExt(x.Expiration) { + } else if yym24 { + z.EncBinaryMarshal(x.Expiration) + } else if !yym24 && z.IsJSONHandle() { + z.EncJSONMarshal(x.Expiration) + } else { + z.EncFallback(x.Expiration) + } + } + } + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + if yyq2[7] { + yym27 := z.EncBinary() + _ = yym27 + if false { + } else { + r.EncodeInt(int64(x.TTL)) + } + } else { + r.EncodeInt(0) + } + } else { + if yyq2[7] { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("ttl")) + r.WriteMapElemValue() + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeInt(int64(x.TTL)) + } + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *Node) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv4 := &x.Key + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "dir": + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv6 := &x.Dir + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*bool)(yyv6)) = r.DecodeBool() + } + } + case "value": + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv8 := &x.Value + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "nodes": + if r.TryDecodeAsNil() { + x.Nodes = nil + } else { + yyv10 := &x.Nodes + yyv10.CodecDecodeSelf(d) + } + case "createdIndex": + if r.TryDecodeAsNil() { + x.CreatedIndex = 0 + } else { + yyv11 := &x.CreatedIndex + yym12 := z.DecBinary() + _ = yym12 + if false { + } else { + *((*uint64)(yyv11)) = uint64(r.DecodeUint(64)) + } + } + case "modifiedIndex": + if r.TryDecodeAsNil() { + x.ModifiedIndex = 0 + } else { + yyv13 := &x.ModifiedIndex + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*uint64)(yyv13)) = uint64(r.DecodeUint(64)) + } + } + case "expiration": + if x.Expiration == nil { + x.Expiration = new(time.Time) + } + if r.TryDecodeAsNil() { + if x.Expiration != nil { + x.Expiration = nil + } + } else { + if x.Expiration == nil { + x.Expiration = new(time.Time) + } + yym16 := z.DecBinary() + _ = yym16 + if false { + } else if yym17 := z.TimeRtidIfBinc(); yym17 != 0 { + r.DecodeBuiltin(yym17, x.Expiration) + } else if z.HasExtensions() && z.DecExt(x.Expiration) { + } else if yym16 { + z.DecBinaryUnmarshal(x.Expiration) + } else if !yym16 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.Expiration) + } else { + z.DecFallback(x.Expiration, false) + } + } + case "ttl": + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv18 := &x.TTL + yym19 := z.DecBinary() + _ = yym19 + if false { + } else { + *((*int64)(yyv18)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj20 int + var yyb20 bool + var yyhl20 bool = l >= 0 + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv21 := &x.Key + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv23 := &x.Dir + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*bool)(yyv23)) = r.DecodeBool() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv25 := &x.Value + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*string)(yyv25)) = r.DecodeString() + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Nodes = nil + } else { + yyv27 := &x.Nodes + yyv27.CodecDecodeSelf(d) + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.CreatedIndex = 0 + } else { + yyv28 := &x.CreatedIndex + yym29 := z.DecBinary() + _ = yym29 + if false { + } else { + *((*uint64)(yyv28)) = uint64(r.DecodeUint(64)) + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.ModifiedIndex = 0 + } else { + yyv30 := &x.ModifiedIndex + yym31 := z.DecBinary() + _ = yym31 + if false { + } else { + *((*uint64)(yyv30)) = uint64(r.DecodeUint(64)) + } + } + if x.Expiration == nil { + x.Expiration = new(time.Time) + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + if x.Expiration != nil { + x.Expiration = nil + } + } else { + if x.Expiration == nil { + x.Expiration = new(time.Time) + } + yym33 := z.DecBinary() + _ = yym33 + if false { + } else if yym34 := z.TimeRtidIfBinc(); yym34 != 0 { + r.DecodeBuiltin(yym34, x.Expiration) + } else if z.HasExtensions() && z.DecExt(x.Expiration) { + } else if yym33 { + z.DecBinaryUnmarshal(x.Expiration) + } else if !yym33 && z.IsJSONHandle() { + z.DecJSONUnmarshal(x.Expiration) + } else { + z.DecFallback(x.Expiration, false) + } + } + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv35 := &x.TTL + yym36 := z.DecBinary() + _ = yym36 + if false { + } else { + *((*int64)(yyv35)) = int64(r.DecodeInt(64)) + } + } + for { + yyj20++ + if yyhl20 { + yyb20 = yyj20 > l + } else { + yyb20 = r.CheckBreak() + } + if yyb20 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj20-1, "") + } + r.ReadArrayEnd() +} + +func (x Nodes) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + h.encNodes((Nodes)(x), e) + } + } +} + +func (x *Nodes) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + h.decNodes((*Nodes)(x), d) + } +} + +func (x *httpKeysAPI) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(0) + } else { + r.WriteMapStart(0) + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *httpKeysAPI) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *httpKeysAPI) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *httpKeysAPI) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj4 int + var yyb4 bool + var yyhl4 bool = l >= 0 + for { + yyj4++ + if yyhl4 { + yyb4 = yyj4 > l + } else { + yyb4 = r.CheckBreak() + } + if yyb4 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj4-1, "") + } + r.ReadArrayEnd() +} + +func (x *httpWatcher) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(0) + } else { + r.WriteMapStart(0) + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *httpWatcher) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *httpWatcher) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *httpWatcher) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj4 int + var yyb4 bool + var yyhl4 bool = l >= 0 + for { + yyj4++ + if yyhl4 { + yyb4 = yyj4 > l + } else { + yyb4 = r.CheckBreak() + } + if yyb4 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj4-1, "") + } + r.ReadArrayEnd() +} + +func (x *getAction) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(5) + } else { + r.WriteMapStart(5) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Prefix")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Key")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Recursive")) + r.WriteMapElemValue() + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.Sorted)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Sorted")) + r.WriteMapElemValue() + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.Sorted)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(x.Quorum)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Quorum")) + r.WriteMapElemValue() + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeBool(bool(x.Quorum)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *getAction) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *getAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "Prefix": + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv4 := &x.Prefix + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "Key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv6 := &x.Key + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "Recursive": + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv8 := &x.Recursive + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*bool)(yyv8)) = r.DecodeBool() + } + } + case "Sorted": + if r.TryDecodeAsNil() { + x.Sorted = false + } else { + yyv10 := &x.Sorted + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + case "Quorum": + if r.TryDecodeAsNil() { + x.Quorum = false + } else { + yyv12 := &x.Quorum + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*bool)(yyv12)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *getAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj14 int + var yyb14 bool + var yyhl14 bool = l >= 0 + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv15 := &x.Prefix + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv17 := &x.Key + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv19 := &x.Recursive + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Sorted = false + } else { + yyv21 := &x.Sorted + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*bool)(yyv21)) = r.DecodeBool() + } + } + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Quorum = false + } else { + yyv23 := &x.Quorum + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*bool)(yyv23)) = r.DecodeBool() + } + } + for { + yyj14++ + if yyhl14 { + yyb14 = yyj14 > l + } else { + yyb14 = r.CheckBreak() + } + if yyb14 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj14-1, "") + } + r.ReadArrayEnd() +} + +func (x *waitAction) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(4) + } else { + r.WriteMapStart(4) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Prefix")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Key")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeUint(uint64(x.WaitIndex)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("WaitIndex")) + r.WriteMapElemValue() + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeUint(uint64(x.WaitIndex)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Recursive")) + r.WriteMapElemValue() + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *waitAction) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *waitAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "Prefix": + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv4 := &x.Prefix + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "Key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv6 := &x.Key + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "WaitIndex": + if r.TryDecodeAsNil() { + x.WaitIndex = 0 + } else { + yyv8 := &x.WaitIndex + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*uint64)(yyv8)) = uint64(r.DecodeUint(64)) + } + } + case "Recursive": + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv10 := &x.Recursive + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*bool)(yyv10)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *waitAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv13 := &x.Prefix + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv15 := &x.Key + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.WaitIndex = 0 + } else { + yyv17 := &x.WaitIndex + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*uint64)(yyv17)) = uint64(r.DecodeUint(64)) + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv19 := &x.Recursive + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj12-1, "") + } + r.ReadArrayEnd() +} + +func (x *setAction) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(10) + } else { + r.WriteMapStart(10) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Prefix")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Key")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Value)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Value")) + r.WriteMapElemValue() + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Value)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevValue")) + r.WriteMapElemValue() + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeUint(uint64(x.PrevIndex)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevIndex")) + r.WriteMapElemValue() + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeUint(uint64(x.PrevIndex)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + x.PrevExist.CodecEncodeSelf(e) + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevExist")) + r.WriteMapElemValue() + x.PrevExist.CodecEncodeSelf(e) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym22 := z.EncBinary() + _ = yym22 + if false { + } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else { + r.EncodeInt(int64(x.TTL)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("TTL")) + r.WriteMapElemValue() + yym23 := z.EncBinary() + _ = yym23 + if false { + } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else { + r.EncodeInt(int64(x.TTL)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym25 := z.EncBinary() + _ = yym25 + if false { + } else { + r.EncodeBool(bool(x.Refresh)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Refresh")) + r.WriteMapElemValue() + yym26 := z.EncBinary() + _ = yym26 + if false { + } else { + r.EncodeBool(bool(x.Refresh)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym28 := z.EncBinary() + _ = yym28 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Dir")) + r.WriteMapElemValue() + yym29 := z.EncBinary() + _ = yym29 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym31 := z.EncBinary() + _ = yym31 + if false { + } else { + r.EncodeBool(bool(x.NoValueOnSuccess)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("NoValueOnSuccess")) + r.WriteMapElemValue() + yym32 := z.EncBinary() + _ = yym32 + if false { + } else { + r.EncodeBool(bool(x.NoValueOnSuccess)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *setAction) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *setAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "Prefix": + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv4 := &x.Prefix + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "Key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv6 := &x.Key + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "Value": + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv8 := &x.Value + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "PrevValue": + if r.TryDecodeAsNil() { + x.PrevValue = "" + } else { + yyv10 := &x.PrevValue + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*string)(yyv10)) = r.DecodeString() + } + } + case "PrevIndex": + if r.TryDecodeAsNil() { + x.PrevIndex = 0 + } else { + yyv12 := &x.PrevIndex + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*uint64)(yyv12)) = uint64(r.DecodeUint(64)) + } + } + case "PrevExist": + if r.TryDecodeAsNil() { + x.PrevExist = "" + } else { + yyv14 := &x.PrevExist + yyv14.CodecDecodeSelf(d) + } + case "TTL": + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv15 := &x.TTL + yym16 := z.DecBinary() + _ = yym16 + if false { + } else if z.HasExtensions() && z.DecExt(yyv15) { + } else { + *((*int64)(yyv15)) = int64(r.DecodeInt(64)) + } + } + case "Refresh": + if r.TryDecodeAsNil() { + x.Refresh = false + } else { + yyv17 := &x.Refresh + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*bool)(yyv17)) = r.DecodeBool() + } + } + case "Dir": + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv19 := &x.Dir + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*bool)(yyv19)) = r.DecodeBool() + } + } + case "NoValueOnSuccess": + if r.TryDecodeAsNil() { + x.NoValueOnSuccess = false + } else { + yyv21 := &x.NoValueOnSuccess + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*bool)(yyv21)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *setAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj23 int + var yyb23 bool + var yyhl23 bool = l >= 0 + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv24 := &x.Prefix + yym25 := z.DecBinary() + _ = yym25 + if false { + } else { + *((*string)(yyv24)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv26 := &x.Key + yym27 := z.DecBinary() + _ = yym27 + if false { + } else { + *((*string)(yyv26)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv28 := &x.Value + yym29 := z.DecBinary() + _ = yym29 + if false { + } else { + *((*string)(yyv28)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevValue = "" + } else { + yyv30 := &x.PrevValue + yym31 := z.DecBinary() + _ = yym31 + if false { + } else { + *((*string)(yyv30)) = r.DecodeString() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevIndex = 0 + } else { + yyv32 := &x.PrevIndex + yym33 := z.DecBinary() + _ = yym33 + if false { + } else { + *((*uint64)(yyv32)) = uint64(r.DecodeUint(64)) + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevExist = "" + } else { + yyv34 := &x.PrevExist + yyv34.CodecDecodeSelf(d) + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv35 := &x.TTL + yym36 := z.DecBinary() + _ = yym36 + if false { + } else if z.HasExtensions() && z.DecExt(yyv35) { + } else { + *((*int64)(yyv35)) = int64(r.DecodeInt(64)) + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Refresh = false + } else { + yyv37 := &x.Refresh + yym38 := z.DecBinary() + _ = yym38 + if false { + } else { + *((*bool)(yyv37)) = r.DecodeBool() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv39 := &x.Dir + yym40 := z.DecBinary() + _ = yym40 + if false { + } else { + *((*bool)(yyv39)) = r.DecodeBool() + } + } + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.NoValueOnSuccess = false + } else { + yyv41 := &x.NoValueOnSuccess + yym42 := z.DecBinary() + _ = yym42 + if false { + } else { + *((*bool)(yyv41)) = r.DecodeBool() + } + } + for { + yyj23++ + if yyhl23 { + yyb23 = yyj23 > l + } else { + yyb23 = r.CheckBreak() + } + if yyb23 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj23-1, "") + } + r.ReadArrayEnd() +} + +func (x *deleteAction) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(6) + } else { + r.WriteMapStart(6) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Prefix")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Key")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Key)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevValue")) + r.WriteMapElemValue() + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym13 := z.EncBinary() + _ = yym13 + if false { + } else { + r.EncodeUint(uint64(x.PrevIndex)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("PrevIndex")) + r.WriteMapElemValue() + yym14 := z.EncBinary() + _ = yym14 + if false { + } else { + r.EncodeUint(uint64(x.PrevIndex)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym16 := z.EncBinary() + _ = yym16 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Dir")) + r.WriteMapElemValue() + yym17 := z.EncBinary() + _ = yym17 + if false { + } else { + r.EncodeBool(bool(x.Dir)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym19 := z.EncBinary() + _ = yym19 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Recursive")) + r.WriteMapElemValue() + yym20 := z.EncBinary() + _ = yym20 + if false { + } else { + r.EncodeBool(bool(x.Recursive)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *deleteAction) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *deleteAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "Prefix": + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv4 := &x.Prefix + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "Key": + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv6 := &x.Key + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "PrevValue": + if r.TryDecodeAsNil() { + x.PrevValue = "" + } else { + yyv8 := &x.PrevValue + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "PrevIndex": + if r.TryDecodeAsNil() { + x.PrevIndex = 0 + } else { + yyv10 := &x.PrevIndex + yym11 := z.DecBinary() + _ = yym11 + if false { + } else { + *((*uint64)(yyv10)) = uint64(r.DecodeUint(64)) + } + } + case "Dir": + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv12 := &x.Dir + yym13 := z.DecBinary() + _ = yym13 + if false { + } else { + *((*bool)(yyv12)) = r.DecodeBool() + } + } + case "Recursive": + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv14 := &x.Recursive + yym15 := z.DecBinary() + _ = yym15 + if false { + } else { + *((*bool)(yyv14)) = r.DecodeBool() + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *deleteAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj16 int + var yyb16 bool + var yyhl16 bool = l >= 0 + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv17 := &x.Prefix + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Key = "" + } else { + yyv19 := &x.Key + yym20 := z.DecBinary() + _ = yym20 + if false { + } else { + *((*string)(yyv19)) = r.DecodeString() + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevValue = "" + } else { + yyv21 := &x.PrevValue + yym22 := z.DecBinary() + _ = yym22 + if false { + } else { + *((*string)(yyv21)) = r.DecodeString() + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.PrevIndex = 0 + } else { + yyv23 := &x.PrevIndex + yym24 := z.DecBinary() + _ = yym24 + if false { + } else { + *((*uint64)(yyv23)) = uint64(r.DecodeUint(64)) + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Dir = false + } else { + yyv25 := &x.Dir + yym26 := z.DecBinary() + _ = yym26 + if false { + } else { + *((*bool)(yyv25)) = r.DecodeBool() + } + } + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Recursive = false + } else { + yyv27 := &x.Recursive + yym28 := z.DecBinary() + _ = yym28 + if false { + } else { + *((*bool)(yyv27)) = r.DecodeBool() + } + } + for { + yyj16++ + if yyhl16 { + yyb16 = yyj16 > l + } else { + yyb16 = r.CheckBreak() + } + if yyb16 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj16-1, "") + } + r.ReadArrayEnd() +} + +func (x *createInOrderAction) CodecEncodeSelf(e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + if x == nil { + r.EncodeNil() + } else { + yym1 := z.EncBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.EncExt(x) { + } else { + yysep2 := !z.EncBinary() + yy2arr2 := z.EncBasicHandle().StructToArray + _, _ = yysep2, yy2arr2 + const yyr2 bool = false + if yyr2 || yy2arr2 { + r.WriteArrayStart(4) + } else { + r.WriteMapStart(4) + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym4 := z.EncBinary() + _ = yym4 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Prefix")) + r.WriteMapElemValue() + yym5 := z.EncBinary() + _ = yym5 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Prefix)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym7 := z.EncBinary() + _ = yym7 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Dir)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Dir")) + r.WriteMapElemValue() + yym8 := z.EncBinary() + _ = yym8 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Dir)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym10 := z.EncBinary() + _ = yym10 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Value)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("Value")) + r.WriteMapElemValue() + yym11 := z.EncBinary() + _ = yym11 + if false { + } else { + r.EncodeString(codecSelferC_UTF87612, string(x.Value)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayElem() + yym13 := z.EncBinary() + _ = yym13 + if false { + } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else { + r.EncodeInt(int64(x.TTL)) + } + } else { + r.WriteMapElemKey() + r.EncodeString(codecSelferC_UTF87612, string("TTL")) + r.WriteMapElemValue() + yym14 := z.EncBinary() + _ = yym14 + if false { + } else if z.HasExtensions() && z.EncExt(x.TTL) { + } else { + r.EncodeInt(int64(x.TTL)) + } + } + if yyr2 || yy2arr2 { + r.WriteArrayEnd() + } else { + r.WriteMapEnd() + } + } + } +} + +func (x *createInOrderAction) CodecDecodeSelf(d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + yym1 := z.DecBinary() + _ = yym1 + if false { + } else if z.HasExtensions() && z.DecExt(x) { + } else { + yyct2 := r.ContainerType() + if yyct2 == codecSelferValueTypeMap7612 { + yyl2 := r.ReadMapStart() + if yyl2 == 0 { + r.ReadMapEnd() + } else { + x.codecDecodeSelfFromMap(yyl2, d) + } + } else if yyct2 == codecSelferValueTypeArray7612 { + yyl2 := r.ReadArrayStart() + if yyl2 == 0 { + r.ReadArrayEnd() + } else { + x.codecDecodeSelfFromArray(yyl2, d) + } + } else { + panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612) + } + } +} + +func (x *createInOrderAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yys3Slc = z.DecScratchBuffer() // default slice to decode into + _ = yys3Slc + var yyhl3 bool = l >= 0 + for yyj3 := 0; ; yyj3++ { + if yyhl3 { + if yyj3 >= l { + break + } + } else { + if r.CheckBreak() { + break + } + } + r.ReadMapElemKey() + yys3Slc = r.DecodeStringAsBytes() + yys3 := string(yys3Slc) + r.ReadMapElemValue() + switch yys3 { + case "Prefix": + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv4 := &x.Prefix + yym5 := z.DecBinary() + _ = yym5 + if false { + } else { + *((*string)(yyv4)) = r.DecodeString() + } + } + case "Dir": + if r.TryDecodeAsNil() { + x.Dir = "" + } else { + yyv6 := &x.Dir + yym7 := z.DecBinary() + _ = yym7 + if false { + } else { + *((*string)(yyv6)) = r.DecodeString() + } + } + case "Value": + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv8 := &x.Value + yym9 := z.DecBinary() + _ = yym9 + if false { + } else { + *((*string)(yyv8)) = r.DecodeString() + } + } + case "TTL": + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv10 := &x.TTL + yym11 := z.DecBinary() + _ = yym11 + if false { + } else if z.HasExtensions() && z.DecExt(yyv10) { + } else { + *((*int64)(yyv10)) = int64(r.DecodeInt(64)) + } + } + default: + z.DecStructFieldNotFound(-1, yys3) + } // end switch yys3 + } // end for yyj3 + r.ReadMapEnd() +} + +func (x *createInOrderAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + var yyj12 int + var yyb12 bool + var yyhl12 bool = l >= 0 + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Prefix = "" + } else { + yyv13 := &x.Prefix + yym14 := z.DecBinary() + _ = yym14 + if false { + } else { + *((*string)(yyv13)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Dir = "" + } else { + yyv15 := &x.Dir + yym16 := z.DecBinary() + _ = yym16 + if false { + } else { + *((*string)(yyv15)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.Value = "" + } else { + yyv17 := &x.Value + yym18 := z.DecBinary() + _ = yym18 + if false { + } else { + *((*string)(yyv17)) = r.DecodeString() + } + } + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + r.ReadArrayEnd() + return + } + r.ReadArrayElem() + if r.TryDecodeAsNil() { + x.TTL = 0 + } else { + yyv19 := &x.TTL + yym20 := z.DecBinary() + _ = yym20 + if false { + } else if z.HasExtensions() && z.DecExt(yyv19) { + } else { + *((*int64)(yyv19)) = int64(r.DecodeInt(64)) + } + } + for { + yyj12++ + if yyhl12 { + yyb12 = yyj12 > l + } else { + yyb12 = r.CheckBreak() + } + if yyb12 { + break + } + r.ReadArrayElem() + z.DecStructFieldNotFound(yyj12-1, "") + } + r.ReadArrayEnd() +} + +func (x codecSelfer7612) encNodes(v Nodes, e *codec1978.Encoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperEncoder(e) + _, _, _ = h, z, r + r.WriteArrayStart(len(v)) + for _, yyv1 := range v { + r.WriteArrayElem() + if yyv1 == nil { + r.EncodeNil() + } else { + yyv1.CodecEncodeSelf(e) + } + } + r.WriteArrayEnd() +} + +func (x codecSelfer7612) decNodes(v *Nodes, d *codec1978.Decoder) { + var h codecSelfer7612 + z, r := codec1978.GenHelperDecoder(d) + _, _, _ = h, z, r + + yyv1 := *v + yyh1, yyl1 := z.DecSliceHelperStart() + var yyc1 bool + _ = yyc1 + if yyl1 == 0 { + if yyv1 == nil { + yyv1 = []*Node{} + yyc1 = true + } else if len(yyv1) != 0 { + yyv1 = yyv1[:0] + yyc1 = true + } + } else { + yyhl1 := yyl1 > 0 + var yyrl1 int + _ = yyrl1 + if yyhl1 { + if yyl1 > cap(yyv1) { + yyrl1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) + if yyrl1 <= cap(yyv1) { + yyv1 = yyv1[:yyrl1] + } else { + yyv1 = make([]*Node, yyrl1) + } + yyc1 = true + } else if yyl1 != len(yyv1) { + yyv1 = yyv1[:yyl1] + yyc1 = true + } + } + var yyj1 int + // var yydn1 bool + for ; (yyhl1 && yyj1 < yyl1) || !(yyhl1 || r.CheckBreak()); yyj1++ { + if yyj1 == 0 && len(yyv1) == 0 { + if yyhl1 { + yyrl1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8) + } else { + yyrl1 = 8 + } + yyv1 = make([]*Node, yyrl1) + yyc1 = true + } + yyh1.ElemContainerState(yyj1) + // yydn1 = r.TryDecodeAsNil() + + // if indefinite, etc, then expand the slice if necessary + var yydb1 bool + if yyj1 >= len(yyv1) { + yyv1 = append(yyv1, nil) + yyc1 = true + + } + if yydb1 { + z.DecSwallow() + } else { + if r.TryDecodeAsNil() { + if yyv1[yyj1] != nil { + *yyv1[yyj1] = Node{} + } + } else { + if yyv1[yyj1] == nil { + yyv1[yyj1] = new(Node) + } + yyw2 := yyv1[yyj1] + yyw2.CodecDecodeSelf(d) + } + + } + + } + if yyj1 < len(yyv1) { + yyv1 = yyv1[:yyj1] + yyc1 = true + } else if yyj1 == 0 && yyv1 == nil { + yyv1 = make([]*Node, 0) + yyc1 = true + } + } + yyh1.End() + if yyc1 { + *v = yyv1 + } + +} diff --git a/vendor/github.com/coreos/etcd/client/keys.go b/vendor/github.com/coreos/etcd/client/keys.go new file mode 100644 index 0000000000..8b9fd3f87a --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/keys.go @@ -0,0 +1,681 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +//go:generate codecgen -d 1819 -r "Node|Response|Nodes" -o keys.generated.go keys.go + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/coreos/etcd/pkg/pathutil" + "github.com/ugorji/go/codec" +) + +const ( + ErrorCodeKeyNotFound = 100 + ErrorCodeTestFailed = 101 + ErrorCodeNotFile = 102 + ErrorCodeNotDir = 104 + ErrorCodeNodeExist = 105 + ErrorCodeRootROnly = 107 + ErrorCodeDirNotEmpty = 108 + ErrorCodeUnauthorized = 110 + + ErrorCodePrevValueRequired = 201 + ErrorCodeTTLNaN = 202 + ErrorCodeIndexNaN = 203 + ErrorCodeInvalidField = 209 + ErrorCodeInvalidForm = 210 + + ErrorCodeRaftInternal = 300 + ErrorCodeLeaderElect = 301 + + ErrorCodeWatcherCleared = 400 + ErrorCodeEventIndexCleared = 401 +) + +type Error struct { + Code int `json:"errorCode"` + Message string `json:"message"` + Cause string `json:"cause"` + Index uint64 `json:"index"` +} + +func (e Error) Error() string { + return fmt.Sprintf("%v: %v (%v) [%v]", e.Code, e.Message, e.Cause, e.Index) +} + +var ( + ErrInvalidJSON = errors.New("client: response is invalid json. The endpoint is probably not valid etcd cluster endpoint.") + ErrEmptyBody = errors.New("client: response body is empty") +) + +// PrevExistType is used to define an existence condition when setting +// or deleting Nodes. +type PrevExistType string + +const ( + PrevIgnore = PrevExistType("") + PrevExist = PrevExistType("true") + PrevNoExist = PrevExistType("false") +) + +var ( + defaultV2KeysPrefix = "/v2/keys" +) + +// NewKeysAPI builds a KeysAPI that interacts with etcd's key-value +// API over HTTP. +func NewKeysAPI(c Client) KeysAPI { + return NewKeysAPIWithPrefix(c, defaultV2KeysPrefix) +} + +// NewKeysAPIWithPrefix acts like NewKeysAPI, but allows the caller +// to provide a custom base URL path. This should only be used in +// very rare cases. +func NewKeysAPIWithPrefix(c Client, p string) KeysAPI { + return &httpKeysAPI{ + client: c, + prefix: p, + } +} + +type KeysAPI interface { + // Get retrieves a set of Nodes from etcd + Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) + + // Set assigns a new value to a Node identified by a given key. The caller + // may define a set of conditions in the SetOptions. If SetOptions.Dir=true + // then value is ignored. + Set(ctx context.Context, key, value string, opts *SetOptions) (*Response, error) + + // Delete removes a Node identified by the given key, optionally destroying + // all of its children as well. The caller may define a set of required + // conditions in an DeleteOptions object. + Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) + + // Create is an alias for Set w/ PrevExist=false + Create(ctx context.Context, key, value string) (*Response, error) + + // CreateInOrder is used to atomically create in-order keys within the given directory. + CreateInOrder(ctx context.Context, dir, value string, opts *CreateInOrderOptions) (*Response, error) + + // Update is an alias for Set w/ PrevExist=true + Update(ctx context.Context, key, value string) (*Response, error) + + // Watcher builds a new Watcher targeted at a specific Node identified + // by the given key. The Watcher may be configured at creation time + // through a WatcherOptions object. The returned Watcher is designed + // to emit events that happen to a Node, and optionally to its children. + Watcher(key string, opts *WatcherOptions) Watcher +} + +type WatcherOptions struct { + // AfterIndex defines the index after-which the Watcher should + // start emitting events. For example, if a value of 5 is + // provided, the first event will have an index >= 6. + // + // Setting AfterIndex to 0 (default) means that the Watcher + // should start watching for events starting at the current + // index, whatever that may be. + AfterIndex uint64 + + // Recursive specifies whether or not the Watcher should emit + // events that occur in children of the given keyspace. If set + // to false (default), events will be limited to those that + // occur for the exact key. + Recursive bool +} + +type CreateInOrderOptions struct { + // TTL defines a period of time after-which the Node should + // expire and no longer exist. Values <= 0 are ignored. Given + // that the zero-value is ignored, TTL cannot be used to set + // a TTL of 0. + TTL time.Duration +} + +type SetOptions struct { + // PrevValue specifies what the current value of the Node must + // be in order for the Set operation to succeed. + // + // Leaving this field empty means that the caller wishes to + // ignore the current value of the Node. This cannot be used + // to compare the Node's current value to an empty string. + // + // PrevValue is ignored if Dir=true + PrevValue string + + // PrevIndex indicates what the current ModifiedIndex of the + // Node must be in order for the Set operation to succeed. + // + // If PrevIndex is set to 0 (default), no comparison is made. + PrevIndex uint64 + + // PrevExist specifies whether the Node must currently exist + // (PrevExist) or not (PrevNoExist). If the caller does not + // care about existence, set PrevExist to PrevIgnore, or simply + // leave it unset. + PrevExist PrevExistType + + // TTL defines a period of time after-which the Node should + // expire and no longer exist. Values <= 0 are ignored. Given + // that the zero-value is ignored, TTL cannot be used to set + // a TTL of 0. + TTL time.Duration + + // Refresh set to true means a TTL value can be updated + // without firing a watch or changing the node value. A + // value must not be provided when refreshing a key. + Refresh bool + + // Dir specifies whether or not this Node should be created as a directory. + Dir bool + + // NoValueOnSuccess specifies whether the response contains the current value of the Node. + // If set, the response will only contain the current value when the request fails. + NoValueOnSuccess bool +} + +type GetOptions struct { + // Recursive defines whether or not all children of the Node + // should be returned. + Recursive bool + + // Sort instructs the server whether or not to sort the Nodes. + // If true, the Nodes are sorted alphabetically by key in + // ascending order (A to z). If false (default), the Nodes will + // not be sorted and the ordering used should not be considered + // predictable. + Sort bool + + // Quorum specifies whether it gets the latest committed value that + // has been applied in quorum of members, which ensures external + // consistency (or linearizability). + Quorum bool +} + +type DeleteOptions struct { + // PrevValue specifies what the current value of the Node must + // be in order for the Delete operation to succeed. + // + // Leaving this field empty means that the caller wishes to + // ignore the current value of the Node. This cannot be used + // to compare the Node's current value to an empty string. + PrevValue string + + // PrevIndex indicates what the current ModifiedIndex of the + // Node must be in order for the Delete operation to succeed. + // + // If PrevIndex is set to 0 (default), no comparison is made. + PrevIndex uint64 + + // Recursive defines whether or not all children of the Node + // should be deleted. If set to true, all children of the Node + // identified by the given key will be deleted. If left unset + // or explicitly set to false, only a single Node will be + // deleted. + Recursive bool + + // Dir specifies whether or not this Node should be removed as a directory. + Dir bool +} + +type Watcher interface { + // Next blocks until an etcd event occurs, then returns a Response + // representing that event. The behavior of Next depends on the + // WatcherOptions used to construct the Watcher. Next is designed to + // be called repeatedly, each time blocking until a subsequent event + // is available. + // + // If the provided context is cancelled, Next will return a non-nil + // error. Any other failures encountered while waiting for the next + // event (connection issues, deserialization failures, etc) will + // also result in a non-nil error. + Next(context.Context) (*Response, error) +} + +type Response struct { + // Action is the name of the operation that occurred. Possible values + // include get, set, delete, update, create, compareAndSwap, + // compareAndDelete and expire. + Action string `json:"action"` + + // Node represents the state of the relevant etcd Node. + Node *Node `json:"node"` + + // PrevNode represents the previous state of the Node. PrevNode is non-nil + // only if the Node existed before the action occurred and the action + // caused a change to the Node. + PrevNode *Node `json:"prevNode"` + + // Index holds the cluster-level index at the time the Response was generated. + // This index is not tied to the Node(s) contained in this Response. + Index uint64 `json:"-"` + + // ClusterID holds the cluster-level ID reported by the server. This + // should be different for different etcd clusters. + ClusterID string `json:"-"` +} + +type Node struct { + // Key represents the unique location of this Node (e.g. "/foo/bar"). + Key string `json:"key"` + + // Dir reports whether node describes a directory. + Dir bool `json:"dir,omitempty"` + + // Value is the current data stored on this Node. If this Node + // is a directory, Value will be empty. + Value string `json:"value"` + + // Nodes holds the children of this Node, only if this Node is a directory. + // This slice of will be arbitrarily deep (children, grandchildren, great- + // grandchildren, etc.) if a recursive Get or Watch request were made. + Nodes Nodes `json:"nodes"` + + // CreatedIndex is the etcd index at-which this Node was created. + CreatedIndex uint64 `json:"createdIndex"` + + // ModifiedIndex is the etcd index at-which this Node was last modified. + ModifiedIndex uint64 `json:"modifiedIndex"` + + // Expiration is the server side expiration time of the key. + Expiration *time.Time `json:"expiration,omitempty"` + + // TTL is the time to live of the key in second. + TTL int64 `json:"ttl,omitempty"` +} + +func (n *Node) String() string { + return fmt.Sprintf("{Key: %s, CreatedIndex: %d, ModifiedIndex: %d, TTL: %d}", n.Key, n.CreatedIndex, n.ModifiedIndex, n.TTL) +} + +// TTLDuration returns the Node's TTL as a time.Duration object +func (n *Node) TTLDuration() time.Duration { + return time.Duration(n.TTL) * time.Second +} + +type Nodes []*Node + +// interfaces for sorting + +func (ns Nodes) Len() int { return len(ns) } +func (ns Nodes) Less(i, j int) bool { return ns[i].Key < ns[j].Key } +func (ns Nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] } + +type httpKeysAPI struct { + client httpClient + prefix string +} + +func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions) (*Response, error) { + act := &setAction{ + Prefix: k.prefix, + Key: key, + Value: val, + } + + if opts != nil { + act.PrevValue = opts.PrevValue + act.PrevIndex = opts.PrevIndex + act.PrevExist = opts.PrevExist + act.TTL = opts.TTL + act.Refresh = opts.Refresh + act.Dir = opts.Dir + act.NoValueOnSuccess = opts.NoValueOnSuccess + } + + doCtx := ctx + if act.PrevExist == PrevNoExist { + doCtx = context.WithValue(doCtx, &oneShotCtxValue, &oneShotCtxValue) + } + resp, body, err := k.client.Do(doCtx, act) + if err != nil { + return nil, err + } + + return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) +} + +func (k *httpKeysAPI) Create(ctx context.Context, key, val string) (*Response, error) { + return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevNoExist}) +} + +func (k *httpKeysAPI) CreateInOrder(ctx context.Context, dir, val string, opts *CreateInOrderOptions) (*Response, error) { + act := &createInOrderAction{ + Prefix: k.prefix, + Dir: dir, + Value: val, + } + + if opts != nil { + act.TTL = opts.TTL + } + + resp, body, err := k.client.Do(ctx, act) + if err != nil { + return nil, err + } + + return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) +} + +func (k *httpKeysAPI) Update(ctx context.Context, key, val string) (*Response, error) { + return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevExist}) +} + +func (k *httpKeysAPI) Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) { + act := &deleteAction{ + Prefix: k.prefix, + Key: key, + } + + if opts != nil { + act.PrevValue = opts.PrevValue + act.PrevIndex = opts.PrevIndex + act.Dir = opts.Dir + act.Recursive = opts.Recursive + } + + doCtx := context.WithValue(ctx, &oneShotCtxValue, &oneShotCtxValue) + resp, body, err := k.client.Do(doCtx, act) + if err != nil { + return nil, err + } + + return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) +} + +func (k *httpKeysAPI) Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) { + act := &getAction{ + Prefix: k.prefix, + Key: key, + } + + if opts != nil { + act.Recursive = opts.Recursive + act.Sorted = opts.Sort + act.Quorum = opts.Quorum + } + + resp, body, err := k.client.Do(ctx, act) + if err != nil { + return nil, err + } + + return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) +} + +func (k *httpKeysAPI) Watcher(key string, opts *WatcherOptions) Watcher { + act := waitAction{ + Prefix: k.prefix, + Key: key, + } + + if opts != nil { + act.Recursive = opts.Recursive + if opts.AfterIndex > 0 { + act.WaitIndex = opts.AfterIndex + 1 + } + } + + return &httpWatcher{ + client: k.client, + nextWait: act, + } +} + +type httpWatcher struct { + client httpClient + nextWait waitAction +} + +func (hw *httpWatcher) Next(ctx context.Context) (*Response, error) { + for { + httpresp, body, err := hw.client.Do(ctx, &hw.nextWait) + if err != nil { + return nil, err + } + + resp, err := unmarshalHTTPResponse(httpresp.StatusCode, httpresp.Header, body) + if err != nil { + if err == ErrEmptyBody { + continue + } + return nil, err + } + + hw.nextWait.WaitIndex = resp.Node.ModifiedIndex + 1 + return resp, nil + } +} + +// v2KeysURL forms a URL representing the location of a key. +// The endpoint argument represents the base URL of an etcd +// server. The prefix is the path needed to route from the +// provided endpoint's path to the root of the keys API +// (typically "/v2/keys"). +func v2KeysURL(ep url.URL, prefix, key string) *url.URL { + // We concatenate all parts together manually. We cannot use + // path.Join because it does not reserve trailing slash. + // We call CanonicalURLPath to further cleanup the path. + if prefix != "" && prefix[0] != '/' { + prefix = "/" + prefix + } + if key != "" && key[0] != '/' { + key = "/" + key + } + ep.Path = pathutil.CanonicalURLPath(ep.Path + prefix + key) + return &ep +} + +type getAction struct { + Prefix string + Key string + Recursive bool + Sorted bool + Quorum bool +} + +func (g *getAction) HTTPRequest(ep url.URL) *http.Request { + u := v2KeysURL(ep, g.Prefix, g.Key) + + params := u.Query() + params.Set("recursive", strconv.FormatBool(g.Recursive)) + params.Set("sorted", strconv.FormatBool(g.Sorted)) + params.Set("quorum", strconv.FormatBool(g.Quorum)) + u.RawQuery = params.Encode() + + req, _ := http.NewRequest("GET", u.String(), nil) + return req +} + +type waitAction struct { + Prefix string + Key string + WaitIndex uint64 + Recursive bool +} + +func (w *waitAction) HTTPRequest(ep url.URL) *http.Request { + u := v2KeysURL(ep, w.Prefix, w.Key) + + params := u.Query() + params.Set("wait", "true") + params.Set("waitIndex", strconv.FormatUint(w.WaitIndex, 10)) + params.Set("recursive", strconv.FormatBool(w.Recursive)) + u.RawQuery = params.Encode() + + req, _ := http.NewRequest("GET", u.String(), nil) + return req +} + +type setAction struct { + Prefix string + Key string + Value string + PrevValue string + PrevIndex uint64 + PrevExist PrevExistType + TTL time.Duration + Refresh bool + Dir bool + NoValueOnSuccess bool +} + +func (a *setAction) HTTPRequest(ep url.URL) *http.Request { + u := v2KeysURL(ep, a.Prefix, a.Key) + + params := u.Query() + form := url.Values{} + + // we're either creating a directory or setting a key + if a.Dir { + params.Set("dir", strconv.FormatBool(a.Dir)) + } else { + // These options are only valid for setting a key + if a.PrevValue != "" { + params.Set("prevValue", a.PrevValue) + } + form.Add("value", a.Value) + } + + // Options which apply to both setting a key and creating a dir + if a.PrevIndex != 0 { + params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10)) + } + if a.PrevExist != PrevIgnore { + params.Set("prevExist", string(a.PrevExist)) + } + if a.TTL > 0 { + form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10)) + } + + if a.Refresh { + form.Add("refresh", "true") + } + if a.NoValueOnSuccess { + params.Set("noValueOnSuccess", strconv.FormatBool(a.NoValueOnSuccess)) + } + + u.RawQuery = params.Encode() + body := strings.NewReader(form.Encode()) + + req, _ := http.NewRequest("PUT", u.String(), body) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + return req +} + +type deleteAction struct { + Prefix string + Key string + PrevValue string + PrevIndex uint64 + Dir bool + Recursive bool +} + +func (a *deleteAction) HTTPRequest(ep url.URL) *http.Request { + u := v2KeysURL(ep, a.Prefix, a.Key) + + params := u.Query() + if a.PrevValue != "" { + params.Set("prevValue", a.PrevValue) + } + if a.PrevIndex != 0 { + params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10)) + } + if a.Dir { + params.Set("dir", "true") + } + if a.Recursive { + params.Set("recursive", "true") + } + u.RawQuery = params.Encode() + + req, _ := http.NewRequest("DELETE", u.String(), nil) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + return req +} + +type createInOrderAction struct { + Prefix string + Dir string + Value string + TTL time.Duration +} + +func (a *createInOrderAction) HTTPRequest(ep url.URL) *http.Request { + u := v2KeysURL(ep, a.Prefix, a.Dir) + + form := url.Values{} + form.Add("value", a.Value) + if a.TTL > 0 { + form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10)) + } + body := strings.NewReader(form.Encode()) + + req, _ := http.NewRequest("POST", u.String(), body) + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + return req +} + +func unmarshalHTTPResponse(code int, header http.Header, body []byte) (res *Response, err error) { + switch code { + case http.StatusOK, http.StatusCreated: + if len(body) == 0 { + return nil, ErrEmptyBody + } + res, err = unmarshalSuccessfulKeysResponse(header, body) + default: + err = unmarshalFailedKeysResponse(body) + } + return res, err +} + +func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response, error) { + var res Response + err := codec.NewDecoderBytes(body, new(codec.JsonHandle)).Decode(&res) + if err != nil { + return nil, ErrInvalidJSON + } + if header.Get("X-Etcd-Index") != "" { + res.Index, err = strconv.ParseUint(header.Get("X-Etcd-Index"), 10, 64) + if err != nil { + return nil, err + } + } + res.ClusterID = header.Get("X-Etcd-Cluster-ID") + return &res, nil +} + +func unmarshalFailedKeysResponse(body []byte) error { + var etcdErr Error + if err := json.Unmarshal(body, &etcdErr); err != nil { + return ErrInvalidJSON + } + return etcdErr +} diff --git a/vendor/github.com/coreos/etcd/client/members.go b/vendor/github.com/coreos/etcd/client/members.go new file mode 100644 index 0000000000..aafa3d1b87 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/members.go @@ -0,0 +1,303 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + "net/url" + "path" + + "github.com/coreos/etcd/pkg/types" +) + +var ( + defaultV2MembersPrefix = "/v2/members" + defaultLeaderSuffix = "/leader" +) + +type Member struct { + // ID is the unique identifier of this Member. + ID string `json:"id"` + + // Name is a human-readable, non-unique identifier of this Member. + Name string `json:"name"` + + // PeerURLs represents the HTTP(S) endpoints this Member uses to + // participate in etcd's consensus protocol. + PeerURLs []string `json:"peerURLs"` + + // ClientURLs represents the HTTP(S) endpoints on which this Member + // serves its client-facing APIs. + ClientURLs []string `json:"clientURLs"` +} + +type memberCollection []Member + +func (c *memberCollection) UnmarshalJSON(data []byte) error { + d := struct { + Members []Member + }{} + + if err := json.Unmarshal(data, &d); err != nil { + return err + } + + if d.Members == nil { + *c = make([]Member, 0) + return nil + } + + *c = d.Members + return nil +} + +type memberCreateOrUpdateRequest struct { + PeerURLs types.URLs +} + +func (m *memberCreateOrUpdateRequest) MarshalJSON() ([]byte, error) { + s := struct { + PeerURLs []string `json:"peerURLs"` + }{ + PeerURLs: make([]string, len(m.PeerURLs)), + } + + for i, u := range m.PeerURLs { + s.PeerURLs[i] = u.String() + } + + return json.Marshal(&s) +} + +// NewMembersAPI constructs a new MembersAPI that uses HTTP to +// interact with etcd's membership API. +func NewMembersAPI(c Client) MembersAPI { + return &httpMembersAPI{ + client: c, + } +} + +type MembersAPI interface { + // List enumerates the current cluster membership. + List(ctx context.Context) ([]Member, error) + + // Add instructs etcd to accept a new Member into the cluster. + Add(ctx context.Context, peerURL string) (*Member, error) + + // Remove demotes an existing Member out of the cluster. + Remove(ctx context.Context, mID string) error + + // Update instructs etcd to update an existing Member in the cluster. + Update(ctx context.Context, mID string, peerURLs []string) error + + // Leader gets current leader of the cluster + Leader(ctx context.Context) (*Member, error) +} + +type httpMembersAPI struct { + client httpClient +} + +func (m *httpMembersAPI) List(ctx context.Context) ([]Member, error) { + req := &membersAPIActionList{} + resp, body, err := m.client.Do(ctx, req) + if err != nil { + return nil, err + } + + if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + return nil, err + } + + var mCollection memberCollection + if err := json.Unmarshal(body, &mCollection); err != nil { + return nil, err + } + + return []Member(mCollection), nil +} + +func (m *httpMembersAPI) Add(ctx context.Context, peerURL string) (*Member, error) { + urls, err := types.NewURLs([]string{peerURL}) + if err != nil { + return nil, err + } + + req := &membersAPIActionAdd{peerURLs: urls} + resp, body, err := m.client.Do(ctx, req) + if err != nil { + return nil, err + } + + if err := assertStatusCode(resp.StatusCode, http.StatusCreated, http.StatusConflict); err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusCreated { + var merr membersError + if err := json.Unmarshal(body, &merr); err != nil { + return nil, err + } + return nil, merr + } + + var memb Member + if err := json.Unmarshal(body, &memb); err != nil { + return nil, err + } + + return &memb, nil +} + +func (m *httpMembersAPI) Update(ctx context.Context, memberID string, peerURLs []string) error { + urls, err := types.NewURLs(peerURLs) + if err != nil { + return err + } + + req := &membersAPIActionUpdate{peerURLs: urls, memberID: memberID} + resp, body, err := m.client.Do(ctx, req) + if err != nil { + return err + } + + if err := assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusNotFound, http.StatusConflict); err != nil { + return err + } + + if resp.StatusCode != http.StatusNoContent { + var merr membersError + if err := json.Unmarshal(body, &merr); err != nil { + return err + } + return merr + } + + return nil +} + +func (m *httpMembersAPI) Remove(ctx context.Context, memberID string) error { + req := &membersAPIActionRemove{memberID: memberID} + resp, _, err := m.client.Do(ctx, req) + if err != nil { + return err + } + + return assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusGone) +} + +func (m *httpMembersAPI) Leader(ctx context.Context) (*Member, error) { + req := &membersAPIActionLeader{} + resp, body, err := m.client.Do(ctx, req) + if err != nil { + return nil, err + } + + if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { + return nil, err + } + + var leader Member + if err := json.Unmarshal(body, &leader); err != nil { + return nil, err + } + + return &leader, nil +} + +type membersAPIActionList struct{} + +func (l *membersAPIActionList) HTTPRequest(ep url.URL) *http.Request { + u := v2MembersURL(ep) + req, _ := http.NewRequest("GET", u.String(), nil) + return req +} + +type membersAPIActionRemove struct { + memberID string +} + +func (d *membersAPIActionRemove) HTTPRequest(ep url.URL) *http.Request { + u := v2MembersURL(ep) + u.Path = path.Join(u.Path, d.memberID) + req, _ := http.NewRequest("DELETE", u.String(), nil) + return req +} + +type membersAPIActionAdd struct { + peerURLs types.URLs +} + +func (a *membersAPIActionAdd) HTTPRequest(ep url.URL) *http.Request { + u := v2MembersURL(ep) + m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs} + b, _ := json.Marshal(&m) + req, _ := http.NewRequest("POST", u.String(), bytes.NewReader(b)) + req.Header.Set("Content-Type", "application/json") + return req +} + +type membersAPIActionUpdate struct { + memberID string + peerURLs types.URLs +} + +func (a *membersAPIActionUpdate) HTTPRequest(ep url.URL) *http.Request { + u := v2MembersURL(ep) + m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs} + u.Path = path.Join(u.Path, a.memberID) + b, _ := json.Marshal(&m) + req, _ := http.NewRequest("PUT", u.String(), bytes.NewReader(b)) + req.Header.Set("Content-Type", "application/json") + return req +} + +func assertStatusCode(got int, want ...int) (err error) { + for _, w := range want { + if w == got { + return nil + } + } + return fmt.Errorf("unexpected status code %d", got) +} + +type membersAPIActionLeader struct{} + +func (l *membersAPIActionLeader) HTTPRequest(ep url.URL) *http.Request { + u := v2MembersURL(ep) + u.Path = path.Join(u.Path, defaultLeaderSuffix) + req, _ := http.NewRequest("GET", u.String(), nil) + return req +} + +// v2MembersURL add the necessary path to the provided endpoint +// to route requests to the default v2 members API. +func v2MembersURL(ep url.URL) *url.URL { + ep.Path = path.Join(ep.Path, defaultV2MembersPrefix) + return &ep +} + +type membersError struct { + Message string `json:"message"` + Code int `json:"-"` +} + +func (e membersError) Error() string { + return e.Message +} diff --git a/vendor/github.com/coreos/etcd/client/util.go b/vendor/github.com/coreos/etcd/client/util.go new file mode 100644 index 0000000000..15a8babff4 --- /dev/null +++ b/vendor/github.com/coreos/etcd/client/util.go @@ -0,0 +1,53 @@ +// Copyright 2016 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "regexp" +) + +var ( + roleNotFoundRegExp *regexp.Regexp + userNotFoundRegExp *regexp.Regexp +) + +func init() { + roleNotFoundRegExp = regexp.MustCompile("auth: Role .* does not exist.") + userNotFoundRegExp = regexp.MustCompile("auth: User .* does not exist.") +} + +// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound. +func IsKeyNotFound(err error) bool { + if cErr, ok := err.(Error); ok { + return cErr.Code == ErrorCodeKeyNotFound + } + return false +} + +// IsRoleNotFound returns true if the error means role not found of v2 API. +func IsRoleNotFound(err error) bool { + if ae, ok := err.(authError); ok { + return roleNotFoundRegExp.MatchString(ae.Message) + } + return false +} + +// IsUserNotFound returns true if the error means user not found of v2 API. +func IsUserNotFound(err error) bool { + if ae, ok := err.(authError); ok { + return userNotFoundRegExp.MatchString(ae.Message) + } + return false +} diff --git a/vendor/github.com/coreos/etcd/cmd/etcd b/vendor/github.com/coreos/etcd/cmd/etcd new file mode 120000 index 0000000000..b870225aa0 --- /dev/null +++ b/vendor/github.com/coreos/etcd/cmd/etcd @@ -0,0 +1 @@ +../ \ No newline at end of file diff --git a/vendor/github.com/coreos/etcd/cmd/etcdctl b/vendor/github.com/coreos/etcd/cmd/etcdctl new file mode 120000 index 0000000000..05bb269d60 --- /dev/null +++ b/vendor/github.com/coreos/etcd/cmd/etcdctl @@ -0,0 +1 @@ +../etcdctl \ No newline at end of file diff --git a/vendor/github.com/coreos/etcd/cmd/functional b/vendor/github.com/coreos/etcd/cmd/functional new file mode 120000 index 0000000000..44faa31aef --- /dev/null +++ b/vendor/github.com/coreos/etcd/cmd/functional @@ -0,0 +1 @@ +../functional \ No newline at end of file diff --git a/vendor/github.com/coreos/etcd/cmd/tools b/vendor/github.com/coreos/etcd/cmd/tools new file mode 120000 index 0000000000..4887d6e0c9 --- /dev/null +++ b/vendor/github.com/coreos/etcd/cmd/tools @@ -0,0 +1 @@ +../tools \ No newline at end of file diff --git a/vendor/github.com/coreos/etcd/pkg/pathutil/path.go b/vendor/github.com/coreos/etcd/pkg/pathutil/path.go new file mode 100644 index 0000000000..f26254ba93 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/pathutil/path.go @@ -0,0 +1,31 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pathutil implements utility functions for handling slash-separated +// paths. +package pathutil + +import "path" + +// CanonicalURLPath returns the canonical url path for p, which follows the rules: +// 1. the path always starts with "/" +// 2. replace multiple slashes with a single slash +// 3. replace each '.' '..' path name element with equivalent one +// 4. keep the trailing slash +// The function is borrowed from stdlib http.cleanPath in server.go. +func CanonicalURLPath(p string) string { + if p == "" { + return "/" + } + if p[0] != '/' { + p = "/" + p + } + np := path.Clean(p) + // path.Clean removes trailing slash except for root, + // put the trailing slash back if necessary. + if p[len(p)-1] == '/' && np != "/" { + np += "/" + } + return np +} diff --git a/vendor/github.com/coreos/etcd/pkg/srv/srv.go b/vendor/github.com/coreos/etcd/pkg/srv/srv.go new file mode 100644 index 0000000000..600061ce8e --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/srv/srv.go @@ -0,0 +1,141 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package srv looks up DNS SRV records. +package srv + +import ( + "fmt" + "net" + "net/url" + "strings" + + "github.com/coreos/etcd/pkg/types" +) + +var ( + // indirection for testing + lookupSRV = net.LookupSRV // net.DefaultResolver.LookupSRV when ctxs don't conflict + resolveTCPAddr = net.ResolveTCPAddr +) + +// GetCluster gets the cluster information via DNS discovery. +// Also sees each entry as a separate instance. +func GetCluster(service, name, dns string, apurls types.URLs) ([]string, error) { + tempName := int(0) + tcp2ap := make(map[string]url.URL) + + // First, resolve the apurls + for _, url := range apurls { + tcpAddr, err := resolveTCPAddr("tcp", url.Host) + if err != nil { + return nil, err + } + tcp2ap[tcpAddr.String()] = url + } + + stringParts := []string{} + updateNodeMap := func(service, scheme string) error { + _, addrs, err := lookupSRV(service, "tcp", dns) + if err != nil { + return err + } + for _, srv := range addrs { + port := fmt.Sprintf("%d", srv.Port) + host := net.JoinHostPort(srv.Target, port) + tcpAddr, terr := resolveTCPAddr("tcp", host) + if terr != nil { + err = terr + continue + } + n := "" + url, ok := tcp2ap[tcpAddr.String()] + if ok { + n = name + } + if n == "" { + n = fmt.Sprintf("%d", tempName) + tempName++ + } + // SRV records have a trailing dot but URL shouldn't. + shortHost := strings.TrimSuffix(srv.Target, ".") + urlHost := net.JoinHostPort(shortHost, port) + if ok && url.Scheme != scheme { + err = fmt.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String()) + } else { + stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost)) + } + } + if len(stringParts) == 0 { + return err + } + return nil + } + + failCount := 0 + err := updateNodeMap(service+"-ssl", "https") + srvErr := make([]string, 2) + if err != nil { + srvErr[0] = fmt.Sprintf("error querying DNS SRV records for _%s-ssl %s", service, err) + failCount++ + } + err = updateNodeMap(service, "http") + if err != nil { + srvErr[1] = fmt.Sprintf("error querying DNS SRV records for _%s %s", service, err) + failCount++ + } + if failCount == 2 { + return nil, fmt.Errorf("srv: too many errors querying DNS SRV records (%q, %q)", srvErr[0], srvErr[1]) + } + return stringParts, nil +} + +type SRVClients struct { + Endpoints []string + SRVs []*net.SRV +} + +// GetClient looks up the client endpoints for a service and domain. +func GetClient(service, domain string) (*SRVClients, error) { + var urls []*url.URL + var srvs []*net.SRV + + updateURLs := func(service, scheme string) error { + _, addrs, err := lookupSRV(service, "tcp", domain) + if err != nil { + return err + } + for _, srv := range addrs { + urls = append(urls, &url.URL{ + Scheme: scheme, + Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)), + }) + } + srvs = append(srvs, addrs...) + return nil + } + + errHTTPS := updateURLs(service+"-ssl", "https") + errHTTP := updateURLs(service, "http") + + if errHTTPS != nil && errHTTP != nil { + return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP) + } + + endpoints := make([]string, len(urls)) + for i := range urls { + endpoints[i] = urls[i].String() + } + return &SRVClients{Endpoints: endpoints, SRVs: srvs}, nil +} diff --git a/vendor/github.com/coreos/etcd/pkg/types/doc.go b/vendor/github.com/coreos/etcd/pkg/types/doc.go new file mode 100644 index 0000000000..de8ef0bd71 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/doc.go @@ -0,0 +1,17 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package types declares various data types and implements type-checking +// functions. +package types diff --git a/vendor/github.com/coreos/etcd/pkg/types/id.go b/vendor/github.com/coreos/etcd/pkg/types/id.go new file mode 100644 index 0000000000..1b042d9ce6 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/id.go @@ -0,0 +1,41 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "strconv" +) + +// ID represents a generic identifier which is canonically +// stored as a uint64 but is typically represented as a +// base-16 string for input/output +type ID uint64 + +func (i ID) String() string { + return strconv.FormatUint(uint64(i), 16) +} + +// IDFromString attempts to create an ID from a base-16 string. +func IDFromString(s string) (ID, error) { + i, err := strconv.ParseUint(s, 16, 64) + return ID(i), err +} + +// IDSlice implements the sort interface +type IDSlice []ID + +func (p IDSlice) Len() int { return len(p) } +func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) } +func (p IDSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/coreos/etcd/pkg/types/set.go b/vendor/github.com/coreos/etcd/pkg/types/set.go new file mode 100644 index 0000000000..c111b0c0c0 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/set.go @@ -0,0 +1,178 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "reflect" + "sort" + "sync" +) + +type Set interface { + Add(string) + Remove(string) + Contains(string) bool + Equals(Set) bool + Length() int + Values() []string + Copy() Set + Sub(Set) Set +} + +func NewUnsafeSet(values ...string) *unsafeSet { + set := &unsafeSet{make(map[string]struct{})} + for _, v := range values { + set.Add(v) + } + return set +} + +func NewThreadsafeSet(values ...string) *tsafeSet { + us := NewUnsafeSet(values...) + return &tsafeSet{us, sync.RWMutex{}} +} + +type unsafeSet struct { + d map[string]struct{} +} + +// Add adds a new value to the set (no-op if the value is already present) +func (us *unsafeSet) Add(value string) { + us.d[value] = struct{}{} +} + +// Remove removes the given value from the set +func (us *unsafeSet) Remove(value string) { + delete(us.d, value) +} + +// Contains returns whether the set contains the given value +func (us *unsafeSet) Contains(value string) (exists bool) { + _, exists = us.d[value] + return exists +} + +// ContainsAll returns whether the set contains all given values +func (us *unsafeSet) ContainsAll(values []string) bool { + for _, s := range values { + if !us.Contains(s) { + return false + } + } + return true +} + +// Equals returns whether the contents of two sets are identical +func (us *unsafeSet) Equals(other Set) bool { + v1 := sort.StringSlice(us.Values()) + v2 := sort.StringSlice(other.Values()) + v1.Sort() + v2.Sort() + return reflect.DeepEqual(v1, v2) +} + +// Length returns the number of elements in the set +func (us *unsafeSet) Length() int { + return len(us.d) +} + +// Values returns the values of the Set in an unspecified order. +func (us *unsafeSet) Values() (values []string) { + values = make([]string, 0) + for val := range us.d { + values = append(values, val) + } + return values +} + +// Copy creates a new Set containing the values of the first +func (us *unsafeSet) Copy() Set { + cp := NewUnsafeSet() + for val := range us.d { + cp.Add(val) + } + + return cp +} + +// Sub removes all elements in other from the set +func (us *unsafeSet) Sub(other Set) Set { + oValues := other.Values() + result := us.Copy().(*unsafeSet) + + for _, val := range oValues { + if _, ok := result.d[val]; !ok { + continue + } + delete(result.d, val) + } + + return result +} + +type tsafeSet struct { + us *unsafeSet + m sync.RWMutex +} + +func (ts *tsafeSet) Add(value string) { + ts.m.Lock() + defer ts.m.Unlock() + ts.us.Add(value) +} + +func (ts *tsafeSet) Remove(value string) { + ts.m.Lock() + defer ts.m.Unlock() + ts.us.Remove(value) +} + +func (ts *tsafeSet) Contains(value string) (exists bool) { + ts.m.RLock() + defer ts.m.RUnlock() + return ts.us.Contains(value) +} + +func (ts *tsafeSet) Equals(other Set) bool { + ts.m.RLock() + defer ts.m.RUnlock() + return ts.us.Equals(other) +} + +func (ts *tsafeSet) Length() int { + ts.m.RLock() + defer ts.m.RUnlock() + return ts.us.Length() +} + +func (ts *tsafeSet) Values() (values []string) { + ts.m.RLock() + defer ts.m.RUnlock() + return ts.us.Values() +} + +func (ts *tsafeSet) Copy() Set { + ts.m.RLock() + defer ts.m.RUnlock() + usResult := ts.us.Copy().(*unsafeSet) + return &tsafeSet{usResult, sync.RWMutex{}} +} + +func (ts *tsafeSet) Sub(other Set) Set { + ts.m.RLock() + defer ts.m.RUnlock() + usResult := ts.us.Sub(other).(*unsafeSet) + return &tsafeSet{usResult, sync.RWMutex{}} +} diff --git a/vendor/github.com/coreos/etcd/pkg/types/slice.go b/vendor/github.com/coreos/etcd/pkg/types/slice.go new file mode 100644 index 0000000000..0dd9ca798a --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/slice.go @@ -0,0 +1,22 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +// Uint64Slice implements sort interface +type Uint64Slice []uint64 + +func (p Uint64Slice) Len() int { return len(p) } +func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/coreos/etcd/pkg/types/urls.go b/vendor/github.com/coreos/etcd/pkg/types/urls.go new file mode 100644 index 0000000000..9e5d03ff64 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/urls.go @@ -0,0 +1,82 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "errors" + "fmt" + "net" + "net/url" + "sort" + "strings" +) + +type URLs []url.URL + +func NewURLs(strs []string) (URLs, error) { + all := make([]url.URL, len(strs)) + if len(all) == 0 { + return nil, errors.New("no valid URLs given") + } + for i, in := range strs { + in = strings.TrimSpace(in) + u, err := url.Parse(in) + if err != nil { + return nil, err + } + if u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "unix" && u.Scheme != "unixs" { + return nil, fmt.Errorf("URL scheme must be http, https, unix, or unixs: %s", in) + } + if _, _, err := net.SplitHostPort(u.Host); err != nil { + return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in) + } + if u.Path != "" { + return nil, fmt.Errorf("URL must not contain a path: %s", in) + } + all[i] = *u + } + us := URLs(all) + us.Sort() + + return us, nil +} + +func MustNewURLs(strs []string) URLs { + urls, err := NewURLs(strs) + if err != nil { + panic(err) + } + return urls +} + +func (us URLs) String() string { + return strings.Join(us.StringSlice(), ",") +} + +func (us *URLs) Sort() { + sort.Sort(us) +} +func (us URLs) Len() int { return len(us) } +func (us URLs) Less(i, j int) bool { return us[i].String() < us[j].String() } +func (us URLs) Swap(i, j int) { us[i], us[j] = us[j], us[i] } + +func (us URLs) StringSlice() []string { + out := make([]string, len(us)) + for i := range us { + out[i] = us[i].String() + } + + return out +} diff --git a/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go b/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go new file mode 100644 index 0000000000..47690cc381 --- /dev/null +++ b/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go @@ -0,0 +1,107 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "sort" + "strings" +) + +// URLsMap is a map from a name to its URLs. +type URLsMap map[string]URLs + +// NewURLsMap returns a URLsMap instantiated from the given string, +// which consists of discovery-formatted names-to-URLs, like: +// mach0=http://1.1.1.1:2380,mach0=http://2.2.2.2::2380,mach1=http://3.3.3.3:2380,mach2=http://4.4.4.4:2380 +func NewURLsMap(s string) (URLsMap, error) { + m := parse(s) + + cl := URLsMap{} + for name, urls := range m { + us, err := NewURLs(urls) + if err != nil { + return nil, err + } + cl[name] = us + } + return cl, nil +} + +// NewURLsMapFromStringMap takes a map of strings and returns a URLsMap. The +// string values in the map can be multiple values separated by the sep string. +func NewURLsMapFromStringMap(m map[string]string, sep string) (URLsMap, error) { + var err error + um := URLsMap{} + for k, v := range m { + um[k], err = NewURLs(strings.Split(v, sep)) + if err != nil { + return nil, err + } + } + return um, nil +} + +// String turns URLsMap into discovery-formatted name-to-URLs sorted by name. +func (c URLsMap) String() string { + var pairs []string + for name, urls := range c { + for _, url := range urls { + pairs = append(pairs, fmt.Sprintf("%s=%s", name, url.String())) + } + } + sort.Strings(pairs) + return strings.Join(pairs, ",") +} + +// URLs returns a list of all URLs. +// The returned list is sorted in ascending lexicographical order. +func (c URLsMap) URLs() []string { + var urls []string + for _, us := range c { + for _, u := range us { + urls = append(urls, u.String()) + } + } + sort.Strings(urls) + return urls +} + +// Len returns the size of URLsMap. +func (c URLsMap) Len() int { + return len(c) +} + +// parse parses the given string and returns a map listing the values specified for each key. +func parse(s string) map[string][]string { + m := make(map[string][]string) + for s != "" { + key := s + if i := strings.IndexAny(key, ","); i >= 0 { + key, s = key[:i], key[i+1:] + } else { + s = "" + } + if key == "" { + continue + } + value := "" + if i := strings.Index(key, "="); i >= 0 { + key, value = key[:i], key[i+1:] + } + m[key] = append(m[key], value) + } + return m +} diff --git a/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go b/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go new file mode 100644 index 0000000000..fd9ee3729e --- /dev/null +++ b/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go @@ -0,0 +1,2004 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: raft.proto + +/* + Package raftpb is a generated protocol buffer package. + + It is generated from these files: + raft.proto + + It has these top-level messages: + Entry + SnapshotMetadata + Snapshot + Message + HardState + ConfState + ConfChange +*/ +package raftpb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + _ "github.com/gogo/protobuf/gogoproto" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type EntryType int32 + +const ( + EntryNormal EntryType = 0 + EntryConfChange EntryType = 1 +) + +var EntryType_name = map[int32]string{ + 0: "EntryNormal", + 1: "EntryConfChange", +} +var EntryType_value = map[string]int32{ + "EntryNormal": 0, + "EntryConfChange": 1, +} + +func (x EntryType) Enum() *EntryType { + p := new(EntryType) + *p = x + return p +} +func (x EntryType) String() string { + return proto.EnumName(EntryType_name, int32(x)) +} +func (x *EntryType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(EntryType_value, data, "EntryType") + if err != nil { + return err + } + *x = EntryType(value) + return nil +} +func (EntryType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{0} } + +type MessageType int32 + +const ( + MsgHup MessageType = 0 + MsgBeat MessageType = 1 + MsgProp MessageType = 2 + MsgApp MessageType = 3 + MsgAppResp MessageType = 4 + MsgVote MessageType = 5 + MsgVoteResp MessageType = 6 + MsgSnap MessageType = 7 + MsgHeartbeat MessageType = 8 + MsgHeartbeatResp MessageType = 9 + MsgUnreachable MessageType = 10 + MsgSnapStatus MessageType = 11 + MsgCheckQuorum MessageType = 12 + MsgTransferLeader MessageType = 13 + MsgTimeoutNow MessageType = 14 + MsgReadIndex MessageType = 15 + MsgReadIndexResp MessageType = 16 + MsgPreVote MessageType = 17 + MsgPreVoteResp MessageType = 18 +) + +var MessageType_name = map[int32]string{ + 0: "MsgHup", + 1: "MsgBeat", + 2: "MsgProp", + 3: "MsgApp", + 4: "MsgAppResp", + 5: "MsgVote", + 6: "MsgVoteResp", + 7: "MsgSnap", + 8: "MsgHeartbeat", + 9: "MsgHeartbeatResp", + 10: "MsgUnreachable", + 11: "MsgSnapStatus", + 12: "MsgCheckQuorum", + 13: "MsgTransferLeader", + 14: "MsgTimeoutNow", + 15: "MsgReadIndex", + 16: "MsgReadIndexResp", + 17: "MsgPreVote", + 18: "MsgPreVoteResp", +} +var MessageType_value = map[string]int32{ + "MsgHup": 0, + "MsgBeat": 1, + "MsgProp": 2, + "MsgApp": 3, + "MsgAppResp": 4, + "MsgVote": 5, + "MsgVoteResp": 6, + "MsgSnap": 7, + "MsgHeartbeat": 8, + "MsgHeartbeatResp": 9, + "MsgUnreachable": 10, + "MsgSnapStatus": 11, + "MsgCheckQuorum": 12, + "MsgTransferLeader": 13, + "MsgTimeoutNow": 14, + "MsgReadIndex": 15, + "MsgReadIndexResp": 16, + "MsgPreVote": 17, + "MsgPreVoteResp": 18, +} + +func (x MessageType) Enum() *MessageType { + p := new(MessageType) + *p = x + return p +} +func (x MessageType) String() string { + return proto.EnumName(MessageType_name, int32(x)) +} +func (x *MessageType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MessageType_value, data, "MessageType") + if err != nil { + return err + } + *x = MessageType(value) + return nil +} +func (MessageType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{1} } + +type ConfChangeType int32 + +const ( + ConfChangeAddNode ConfChangeType = 0 + ConfChangeRemoveNode ConfChangeType = 1 + ConfChangeUpdateNode ConfChangeType = 2 + ConfChangeAddLearnerNode ConfChangeType = 3 +) + +var ConfChangeType_name = map[int32]string{ + 0: "ConfChangeAddNode", + 1: "ConfChangeRemoveNode", + 2: "ConfChangeUpdateNode", + 3: "ConfChangeAddLearnerNode", +} +var ConfChangeType_value = map[string]int32{ + "ConfChangeAddNode": 0, + "ConfChangeRemoveNode": 1, + "ConfChangeUpdateNode": 2, + "ConfChangeAddLearnerNode": 3, +} + +func (x ConfChangeType) Enum() *ConfChangeType { + p := new(ConfChangeType) + *p = x + return p +} +func (x ConfChangeType) String() string { + return proto.EnumName(ConfChangeType_name, int32(x)) +} +func (x *ConfChangeType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ConfChangeType_value, data, "ConfChangeType") + if err != nil { + return err + } + *x = ConfChangeType(value) + return nil +} +func (ConfChangeType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{2} } + +type Entry struct { + Term uint64 `protobuf:"varint,2,opt,name=Term" json:"Term"` + Index uint64 `protobuf:"varint,3,opt,name=Index" json:"Index"` + Type EntryType `protobuf:"varint,1,opt,name=Type,enum=raftpb.EntryType" json:"Type"` + Data []byte `protobuf:"bytes,4,opt,name=Data" json:"Data,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Entry) Reset() { *m = Entry{} } +func (m *Entry) String() string { return proto.CompactTextString(m) } +func (*Entry) ProtoMessage() {} +func (*Entry) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{0} } + +type SnapshotMetadata struct { + ConfState ConfState `protobuf:"bytes,1,opt,name=conf_state,json=confState" json:"conf_state"` + Index uint64 `protobuf:"varint,2,opt,name=index" json:"index"` + Term uint64 `protobuf:"varint,3,opt,name=term" json:"term"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SnapshotMetadata) Reset() { *m = SnapshotMetadata{} } +func (m *SnapshotMetadata) String() string { return proto.CompactTextString(m) } +func (*SnapshotMetadata) ProtoMessage() {} +func (*SnapshotMetadata) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{1} } + +type Snapshot struct { + Data []byte `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` + Metadata SnapshotMetadata `protobuf:"bytes,2,opt,name=metadata" json:"metadata"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{2} } + +type Message struct { + Type MessageType `protobuf:"varint,1,opt,name=type,enum=raftpb.MessageType" json:"type"` + To uint64 `protobuf:"varint,2,opt,name=to" json:"to"` + From uint64 `protobuf:"varint,3,opt,name=from" json:"from"` + Term uint64 `protobuf:"varint,4,opt,name=term" json:"term"` + LogTerm uint64 `protobuf:"varint,5,opt,name=logTerm" json:"logTerm"` + Index uint64 `protobuf:"varint,6,opt,name=index" json:"index"` + Entries []Entry `protobuf:"bytes,7,rep,name=entries" json:"entries"` + Commit uint64 `protobuf:"varint,8,opt,name=commit" json:"commit"` + Snapshot Snapshot `protobuf:"bytes,9,opt,name=snapshot" json:"snapshot"` + Reject bool `protobuf:"varint,10,opt,name=reject" json:"reject"` + RejectHint uint64 `protobuf:"varint,11,opt,name=rejectHint" json:"rejectHint"` + Context []byte `protobuf:"bytes,12,opt,name=context" json:"context,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{3} } + +type HardState struct { + Term uint64 `protobuf:"varint,1,opt,name=term" json:"term"` + Vote uint64 `protobuf:"varint,2,opt,name=vote" json:"vote"` + Commit uint64 `protobuf:"varint,3,opt,name=commit" json:"commit"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *HardState) Reset() { *m = HardState{} } +func (m *HardState) String() string { return proto.CompactTextString(m) } +func (*HardState) ProtoMessage() {} +func (*HardState) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{4} } + +type ConfState struct { + Nodes []uint64 `protobuf:"varint,1,rep,name=nodes" json:"nodes,omitempty"` + Learners []uint64 `protobuf:"varint,2,rep,name=learners" json:"learners,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConfState) Reset() { *m = ConfState{} } +func (m *ConfState) String() string { return proto.CompactTextString(m) } +func (*ConfState) ProtoMessage() {} +func (*ConfState) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{5} } + +type ConfChange struct { + ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"` + Type ConfChangeType `protobuf:"varint,2,opt,name=Type,enum=raftpb.ConfChangeType" json:"Type"` + NodeID uint64 `protobuf:"varint,3,opt,name=NodeID" json:"NodeID"` + Context []byte `protobuf:"bytes,4,opt,name=Context" json:"Context,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConfChange) Reset() { *m = ConfChange{} } +func (m *ConfChange) String() string { return proto.CompactTextString(m) } +func (*ConfChange) ProtoMessage() {} +func (*ConfChange) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{6} } + +func init() { + proto.RegisterType((*Entry)(nil), "raftpb.Entry") + proto.RegisterType((*SnapshotMetadata)(nil), "raftpb.SnapshotMetadata") + proto.RegisterType((*Snapshot)(nil), "raftpb.Snapshot") + proto.RegisterType((*Message)(nil), "raftpb.Message") + proto.RegisterType((*HardState)(nil), "raftpb.HardState") + proto.RegisterType((*ConfState)(nil), "raftpb.ConfState") + proto.RegisterType((*ConfChange)(nil), "raftpb.ConfChange") + proto.RegisterEnum("raftpb.EntryType", EntryType_name, EntryType_value) + proto.RegisterEnum("raftpb.MessageType", MessageType_name, MessageType_value) + proto.RegisterEnum("raftpb.ConfChangeType", ConfChangeType_name, ConfChangeType_value) +} +func (m *Entry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Entry) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Index)) + if m.Data != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *SnapshotMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SnapshotMetadata) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.ConfState.Size())) + n1, err := m.ConfState.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Index)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Snapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Data != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Metadata.Size())) + n2, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Message) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.To)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.From)) + dAtA[i] = 0x20 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + dAtA[i] = 0x28 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.LogTerm)) + dAtA[i] = 0x30 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Index)) + if len(m.Entries) > 0 { + for _, msg := range m.Entries { + dAtA[i] = 0x3a + i++ + i = encodeVarintRaft(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x40 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Commit)) + dAtA[i] = 0x4a + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Snapshot.Size())) + n3, err := m.Snapshot.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + dAtA[i] = 0x50 + i++ + if m.Reject { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x58 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.RejectHint)) + if m.Context != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Context))) + i += copy(dAtA[i:], m.Context) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *HardState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HardState) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Vote)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Commit)) + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ConfState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfState) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Nodes) > 0 { + for _, num := range m.Nodes { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(num)) + } + } + if len(m.Learners) > 0 { + for _, num := range m.Learners { + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(num)) + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ConfChange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfChange) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.ID)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.NodeID)) + if m.Context != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Context))) + i += copy(dAtA[i:], m.Context) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeVarintRaft(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Entry) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.Type)) + n += 1 + sovRaft(uint64(m.Term)) + n += 1 + sovRaft(uint64(m.Index)) + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovRaft(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SnapshotMetadata) Size() (n int) { + var l int + _ = l + l = m.ConfState.Size() + n += 1 + l + sovRaft(uint64(l)) + n += 1 + sovRaft(uint64(m.Index)) + n += 1 + sovRaft(uint64(m.Term)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Snapshot) Size() (n int) { + var l int + _ = l + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovRaft(uint64(l)) + } + l = m.Metadata.Size() + n += 1 + l + sovRaft(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Message) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.Type)) + n += 1 + sovRaft(uint64(m.To)) + n += 1 + sovRaft(uint64(m.From)) + n += 1 + sovRaft(uint64(m.Term)) + n += 1 + sovRaft(uint64(m.LogTerm)) + n += 1 + sovRaft(uint64(m.Index)) + if len(m.Entries) > 0 { + for _, e := range m.Entries { + l = e.Size() + n += 1 + l + sovRaft(uint64(l)) + } + } + n += 1 + sovRaft(uint64(m.Commit)) + l = m.Snapshot.Size() + n += 1 + l + sovRaft(uint64(l)) + n += 2 + n += 1 + sovRaft(uint64(m.RejectHint)) + if m.Context != nil { + l = len(m.Context) + n += 1 + l + sovRaft(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *HardState) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.Term)) + n += 1 + sovRaft(uint64(m.Vote)) + n += 1 + sovRaft(uint64(m.Commit)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ConfState) Size() (n int) { + var l int + _ = l + if len(m.Nodes) > 0 { + for _, e := range m.Nodes { + n += 1 + sovRaft(uint64(e)) + } + } + if len(m.Learners) > 0 { + for _, e := range m.Learners { + n += 1 + sovRaft(uint64(e)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ConfChange) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.ID)) + n += 1 + sovRaft(uint64(m.Type)) + n += 1 + sovRaft(uint64(m.NodeID)) + if m.Context != nil { + l = len(m.Context) + n += 1 + l + sovRaft(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovRaft(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozRaft(x uint64) (n int) { + return sovRaft(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Entry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Entry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Entry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (EntryType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SnapshotMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SnapshotMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConfState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Snapshot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Snapshot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Message) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Message: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (MessageType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + m.To = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.To |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + m.From = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.From |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LogTerm", wireType) + } + m.LogTerm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LogTerm |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Entries = append(m.Entries, Entry{}) + if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + m.Commit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Commit |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Snapshot", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Snapshot.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Reject", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Reject = bool(v != 0) + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RejectHint", wireType) + } + m.RejectHint = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RejectHint |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...) + if m.Context == nil { + m.Context = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HardState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HardState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HardState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType) + } + m.Vote = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Vote |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + m.Commit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Commit |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Nodes = append(m.Nodes, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Nodes = append(m.Nodes, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) + } + case 2: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Learners = append(m.Learners, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Learners = append(m.Learners, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Learners", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfChange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfChange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfChange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (ConfChangeType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + m.NodeID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NodeID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...) + if m.Context == nil { + m.Context = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRaft(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthRaft + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipRaft(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthRaft = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRaft = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("raft.proto", fileDescriptorRaft) } + +var fileDescriptorRaft = []byte{ + // 815 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x54, 0xcd, 0x6e, 0x23, 0x45, + 0x10, 0xf6, 0x8c, 0xc7, 0x7f, 0x35, 0x8e, 0xd3, 0xa9, 0x35, 0xa8, 0x15, 0x45, 0xc6, 0xb2, 0x38, + 0x58, 0x41, 0x1b, 0x20, 0x07, 0x0e, 0x48, 0x1c, 0x36, 0x09, 0x52, 0x22, 0xad, 0xa3, 0xc5, 0x9b, + 0xe5, 0x80, 0x84, 0x50, 0xc7, 0x53, 0x9e, 0x18, 0x32, 0xd3, 0xa3, 0x9e, 0xf6, 0xb2, 0xb9, 0x20, + 0x1e, 0x80, 0x07, 0xe0, 0xc2, 0xfb, 0xe4, 0xb8, 0x12, 0x77, 0xc4, 0x86, 0x17, 0x41, 0xdd, 0xd3, + 0x63, 0xcf, 0x24, 0xb7, 0xae, 0xef, 0xab, 0xae, 0xfa, 0xea, 0xeb, 0x9a, 0x01, 0x50, 0x62, 0xa9, + 0x8f, 0x32, 0x25, 0xb5, 0xc4, 0xb6, 0x39, 0x67, 0xd7, 0xfb, 0xc3, 0x58, 0xc6, 0xd2, 0x42, 0x9f, + 0x9b, 0x53, 0xc1, 0x4e, 0x7e, 0x83, 0xd6, 0xb7, 0xa9, 0x56, 0x77, 0xf8, 0x19, 0x04, 0x57, 0x77, + 0x19, 0x71, 0x6f, 0xec, 0x4d, 0x07, 0xc7, 0x7b, 0x47, 0xc5, 0xad, 0x23, 0x4b, 0x1a, 0xe2, 0x24, + 0xb8, 0xff, 0xe7, 0x93, 0xc6, 0xdc, 0x26, 0x21, 0x87, 0xe0, 0x8a, 0x54, 0xc2, 0xfd, 0xb1, 0x37, + 0x0d, 0x36, 0x0c, 0xa9, 0x04, 0xf7, 0xa1, 0x75, 0x91, 0x46, 0xf4, 0x8e, 0x37, 0x2b, 0x54, 0x01, + 0x21, 0x42, 0x70, 0x26, 0xb4, 0xe0, 0xc1, 0xd8, 0x9b, 0xf6, 0xe7, 0xf6, 0x3c, 0xf9, 0xdd, 0x03, + 0xf6, 0x3a, 0x15, 0x59, 0x7e, 0x23, 0xf5, 0x8c, 0xb4, 0x88, 0x84, 0x16, 0xf8, 0x15, 0xc0, 0x42, + 0xa6, 0xcb, 0x9f, 0x72, 0x2d, 0x74, 0xa1, 0x28, 0xdc, 0x2a, 0x3a, 0x95, 0xe9, 0xf2, 0xb5, 0x21, + 0x5c, 0xf1, 0xde, 0xa2, 0x04, 0x4c, 0xf3, 0x95, 0x6d, 0x5e, 0xd5, 0x55, 0x40, 0x46, 0xb2, 0x36, + 0x92, 0xab, 0xba, 0x2c, 0x32, 0xf9, 0x01, 0xba, 0xa5, 0x02, 0x23, 0xd1, 0x28, 0xb0, 0x3d, 0xfb, + 0x73, 0x7b, 0xc6, 0xaf, 0xa1, 0x9b, 0x38, 0x65, 0xb6, 0x70, 0x78, 0xcc, 0x4b, 0x2d, 0x8f, 0x95, + 0xbb, 0xba, 0x9b, 0xfc, 0xc9, 0x5f, 0x4d, 0xe8, 0xcc, 0x28, 0xcf, 0x45, 0x4c, 0xf8, 0x1c, 0x02, + 0xbd, 0x75, 0xf8, 0x59, 0x59, 0xc3, 0xd1, 0x55, 0x8f, 0x4d, 0x1a, 0x0e, 0xc1, 0xd7, 0xb2, 0x36, + 0x89, 0xaf, 0xa5, 0x19, 0x63, 0xa9, 0xe4, 0xa3, 0x31, 0x0c, 0xb2, 0x19, 0x30, 0x78, 0x3c, 0x20, + 0x8e, 0xa0, 0x73, 0x2b, 0x63, 0xfb, 0x60, 0xad, 0x0a, 0x59, 0x82, 0x5b, 0xdb, 0xda, 0x4f, 0x6d, + 0x7b, 0x0e, 0x1d, 0x4a, 0xb5, 0x5a, 0x51, 0xce, 0x3b, 0xe3, 0xe6, 0x34, 0x3c, 0xde, 0xa9, 0x6d, + 0x46, 0x59, 0xca, 0xe5, 0xe0, 0x01, 0xb4, 0x17, 0x32, 0x49, 0x56, 0x9a, 0x77, 0x2b, 0xb5, 0x1c, + 0x86, 0xc7, 0xd0, 0xcd, 0x9d, 0x63, 0xbc, 0x67, 0x9d, 0x64, 0x8f, 0x9d, 0x2c, 0x1d, 0x2c, 0xf3, + 0x4c, 0x45, 0x45, 0x3f, 0xd3, 0x42, 0x73, 0x18, 0x7b, 0xd3, 0x6e, 0x59, 0xb1, 0xc0, 0xf0, 0x53, + 0x80, 0xe2, 0x74, 0xbe, 0x4a, 0x35, 0x0f, 0x2b, 0x3d, 0x2b, 0x38, 0x72, 0xe8, 0x2c, 0x64, 0xaa, + 0xe9, 0x9d, 0xe6, 0x7d, 0xfb, 0xb0, 0x65, 0x38, 0xf9, 0x11, 0x7a, 0xe7, 0x42, 0x45, 0xc5, 0xfa, + 0x94, 0x0e, 0x7a, 0x4f, 0x1c, 0xe4, 0x10, 0xbc, 0x95, 0x9a, 0xea, 0xfb, 0x6e, 0x90, 0xca, 0xc0, + 0xcd, 0xa7, 0x03, 0x4f, 0xbe, 0x81, 0xde, 0x66, 0x5d, 0x71, 0x08, 0xad, 0x54, 0x46, 0x94, 0x73, + 0x6f, 0xdc, 0x9c, 0x06, 0xf3, 0x22, 0xc0, 0x7d, 0xe8, 0xde, 0x92, 0x50, 0x29, 0xa9, 0x9c, 0xfb, + 0x96, 0xd8, 0xc4, 0x93, 0x3f, 0x3c, 0x00, 0x73, 0xff, 0xf4, 0x46, 0xa4, 0xb1, 0xdd, 0x88, 0x8b, + 0xb3, 0x9a, 0x3a, 0xff, 0xe2, 0x0c, 0xbf, 0x70, 0x1f, 0xae, 0x6f, 0xd7, 0xea, 0xe3, 0xea, 0x67, + 0x52, 0xdc, 0x7b, 0xf2, 0xf5, 0x1e, 0x40, 0xfb, 0x52, 0x46, 0x74, 0x71, 0x56, 0xd7, 0x5c, 0x60, + 0xc6, 0xac, 0x53, 0x67, 0x56, 0xf1, 0xa1, 0x96, 0xe1, 0xe1, 0x97, 0xd0, 0xdb, 0xfc, 0x0e, 0x70, + 0x17, 0x42, 0x1b, 0x5c, 0x4a, 0x95, 0x88, 0x5b, 0xd6, 0xc0, 0x67, 0xb0, 0x6b, 0x81, 0x6d, 0x63, + 0xe6, 0x1d, 0xfe, 0xed, 0x43, 0x58, 0x59, 0x70, 0x04, 0x68, 0xcf, 0xf2, 0xf8, 0x7c, 0x9d, 0xb1, + 0x06, 0x86, 0xd0, 0x99, 0xe5, 0xf1, 0x09, 0x09, 0xcd, 0x3c, 0x17, 0xbc, 0x52, 0x32, 0x63, 0xbe, + 0xcb, 0x7a, 0x91, 0x65, 0xac, 0x89, 0x03, 0x80, 0xe2, 0x3c, 0xa7, 0x3c, 0x63, 0x81, 0x4b, 0xfc, + 0x5e, 0x6a, 0x62, 0x2d, 0x23, 0xc2, 0x05, 0x96, 0x6d, 0x3b, 0xd6, 0x2c, 0x13, 0xeb, 0x20, 0x83, + 0xbe, 0x69, 0x46, 0x42, 0xe9, 0x6b, 0xd3, 0xa5, 0x8b, 0x43, 0x60, 0x55, 0xc4, 0x5e, 0xea, 0x21, + 0xc2, 0x60, 0x96, 0xc7, 0x6f, 0x52, 0x45, 0x62, 0x71, 0x23, 0xae, 0x6f, 0x89, 0x01, 0xee, 0xc1, + 0x8e, 0x2b, 0x64, 0x1e, 0x6f, 0x9d, 0xb3, 0xd0, 0xa5, 0x9d, 0xde, 0xd0, 0xe2, 0x97, 0xef, 0xd6, + 0x52, 0xad, 0x13, 0xd6, 0xc7, 0x8f, 0x60, 0x6f, 0x96, 0xc7, 0x57, 0x4a, 0xa4, 0xf9, 0x92, 0xd4, + 0x4b, 0x12, 0x11, 0x29, 0xb6, 0xe3, 0x6e, 0x5f, 0xad, 0x12, 0x92, 0x6b, 0x7d, 0x29, 0x7f, 0x65, + 0x03, 0x27, 0x66, 0x4e, 0x22, 0xb2, 0x3f, 0x43, 0xb6, 0xeb, 0xc4, 0x6c, 0x10, 0x2b, 0x86, 0xb9, + 0x79, 0x5f, 0x29, 0xb2, 0x23, 0xee, 0xb9, 0xae, 0x2e, 0xb6, 0x39, 0x78, 0x78, 0x07, 0x83, 0xfa, + 0xf3, 0x1a, 0x1d, 0x5b, 0xe4, 0x45, 0x14, 0x99, 0xb7, 0x64, 0x0d, 0xe4, 0x30, 0xdc, 0xc2, 0x73, + 0x4a, 0xe4, 0x5b, 0xb2, 0x8c, 0x57, 0x67, 0xde, 0x64, 0x91, 0xd0, 0x05, 0xe3, 0xe3, 0x01, 0xf0, + 0x5a, 0xa9, 0x97, 0xc5, 0x36, 0x5a, 0xb6, 0x79, 0xc2, 0xef, 0x3f, 0x8c, 0x1a, 0xef, 0x3f, 0x8c, + 0x1a, 0xf7, 0x0f, 0x23, 0xef, 0xfd, 0xc3, 0xc8, 0xfb, 0xf7, 0x61, 0xe4, 0xfd, 0xf9, 0xdf, 0xa8, + 0xf1, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x86, 0x52, 0x5b, 0xe0, 0x74, 0x06, 0x00, 0x00, +} diff --git a/vendor/github.com/coreos/etcd/version/version.go b/vendor/github.com/coreos/etcd/version/version.go new file mode 100644 index 0000000000..ea04ae5675 --- /dev/null +++ b/vendor/github.com/coreos/etcd/version/version.go @@ -0,0 +1,56 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package version implements etcd version parsing and contains latest version +// information. +package version + +import ( + "fmt" + "strings" + + "github.com/coreos/go-semver/semver" +) + +var ( + // MinClusterVersion is the min cluster version this etcd binary is compatible with. + MinClusterVersion = "3.0.0" + Version = "3.3.5" + APIVersion = "unknown" + + // Git SHA Value will be set during build + GitSHA = "Not provided (use ./build instead of go build)" +) + +func init() { + ver, err := semver.NewVersion(Version) + if err == nil { + APIVersion = fmt.Sprintf("%d.%d", ver.Major, ver.Minor) + } +} + +type Versions struct { + Server string `json:"etcdserver"` + Cluster string `json:"etcdcluster"` + // TODO: raft state machine version +} + +// Cluster only keeps the major.minor. +func Cluster(v string) string { + vs := strings.Split(v, ".") + if len(vs) <= 2 { + return v + } + return fmt.Sprintf("%s.%s", vs[0], vs[1]) +} diff --git a/vendor/github.com/GoogleContainerTools/container-diff/LICENSE b/vendor/github.com/coreos/go-semver/LICENSE similarity index 100% rename from vendor/github.com/GoogleContainerTools/container-diff/LICENSE rename to vendor/github.com/coreos/go-semver/LICENSE diff --git a/vendor/github.com/coreos/go-semver/semver/semver.go b/vendor/github.com/coreos/go-semver/semver/semver.go new file mode 100644 index 0000000000..110fc23e15 --- /dev/null +++ b/vendor/github.com/coreos/go-semver/semver/semver.go @@ -0,0 +1,268 @@ +// Copyright 2013-2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Semantic Versions http://semver.org +package semver + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "strings" +) + +type Version struct { + Major int64 + Minor int64 + Patch int64 + PreRelease PreRelease + Metadata string +} + +type PreRelease string + +func splitOff(input *string, delim string) (val string) { + parts := strings.SplitN(*input, delim, 2) + + if len(parts) == 2 { + *input = parts[0] + val = parts[1] + } + + return val +} + +func New(version string) *Version { + return Must(NewVersion(version)) +} + +func NewVersion(version string) (*Version, error) { + v := Version{} + + if err := v.Set(version); err != nil { + return nil, err + } + + return &v, nil +} + +// Must is a helper for wrapping NewVersion and will panic if err is not nil. +func Must(v *Version, err error) *Version { + if err != nil { + panic(err) + } + return v +} + +// Set parses and updates v from the given version string. Implements flag.Value +func (v *Version) Set(version string) error { + metadata := splitOff(&version, "+") + preRelease := PreRelease(splitOff(&version, "-")) + dotParts := strings.SplitN(version, ".", 3) + + if len(dotParts) != 3 { + return fmt.Errorf("%s is not in dotted-tri format", version) + } + + parsed := make([]int64, 3, 3) + + for i, v := range dotParts[:3] { + val, err := strconv.ParseInt(v, 10, 64) + parsed[i] = val + if err != nil { + return err + } + } + + v.Metadata = metadata + v.PreRelease = preRelease + v.Major = parsed[0] + v.Minor = parsed[1] + v.Patch = parsed[2] + return nil +} + +func (v Version) String() string { + var buffer bytes.Buffer + + fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch) + + if v.PreRelease != "" { + fmt.Fprintf(&buffer, "-%s", v.PreRelease) + } + + if v.Metadata != "" { + fmt.Fprintf(&buffer, "+%s", v.Metadata) + } + + return buffer.String() +} + +func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error { + var data string + if err := unmarshal(&data); err != nil { + return err + } + return v.Set(data) +} + +func (v Version) MarshalJSON() ([]byte, error) { + return []byte(`"` + v.String() + `"`), nil +} + +func (v *Version) UnmarshalJSON(data []byte) error { + l := len(data) + if l == 0 || string(data) == `""` { + return nil + } + if l < 2 || data[0] != '"' || data[l-1] != '"' { + return errors.New("invalid semver string") + } + return v.Set(string(data[1 : l-1])) +} + +// Compare tests if v is less than, equal to, or greater than versionB, +// returning -1, 0, or +1 respectively. +func (v Version) Compare(versionB Version) int { + if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 { + return cmp + } + return preReleaseCompare(v, versionB) +} + +// Equal tests if v is equal to versionB. +func (v Version) Equal(versionB Version) bool { + return v.Compare(versionB) == 0 +} + +// LessThan tests if v is less than versionB. +func (v Version) LessThan(versionB Version) bool { + return v.Compare(versionB) < 0 +} + +// Slice converts the comparable parts of the semver into a slice of integers. +func (v Version) Slice() []int64 { + return []int64{v.Major, v.Minor, v.Patch} +} + +func (p PreRelease) Slice() []string { + preRelease := string(p) + return strings.Split(preRelease, ".") +} + +func preReleaseCompare(versionA Version, versionB Version) int { + a := versionA.PreRelease + b := versionB.PreRelease + + /* Handle the case where if two versions are otherwise equal it is the + * one without a PreRelease that is greater */ + if len(a) == 0 && (len(b) > 0) { + return 1 + } else if len(b) == 0 && (len(a) > 0) { + return -1 + } + + // If there is a prerelease, check and compare each part. + return recursivePreReleaseCompare(a.Slice(), b.Slice()) +} + +func recursiveCompare(versionA []int64, versionB []int64) int { + if len(versionA) == 0 { + return 0 + } + + a := versionA[0] + b := versionB[0] + + if a > b { + return 1 + } else if a < b { + return -1 + } + + return recursiveCompare(versionA[1:], versionB[1:]) +} + +func recursivePreReleaseCompare(versionA []string, versionB []string) int { + // A larger set of pre-release fields has a higher precedence than a smaller set, + // if all of the preceding identifiers are equal. + if len(versionA) == 0 { + if len(versionB) > 0 { + return -1 + } + return 0 + } else if len(versionB) == 0 { + // We're longer than versionB so return 1. + return 1 + } + + a := versionA[0] + b := versionB[0] + + aInt := false + bInt := false + + aI, err := strconv.Atoi(versionA[0]) + if err == nil { + aInt = true + } + + bI, err := strconv.Atoi(versionB[0]) + if err == nil { + bInt = true + } + + // Handle Integer Comparison + if aInt && bInt { + if aI > bI { + return 1 + } else if aI < bI { + return -1 + } + } + + // Handle String Comparison + if a > b { + return 1 + } else if a < b { + return -1 + } + + return recursivePreReleaseCompare(versionA[1:], versionB[1:]) +} + +// BumpMajor increments the Major field by 1 and resets all other fields to their default values +func (v *Version) BumpMajor() { + v.Major += 1 + v.Minor = 0 + v.Patch = 0 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// BumpMinor increments the Minor field by 1 and resets all other fields to their default values +func (v *Version) BumpMinor() { + v.Minor += 1 + v.Patch = 0 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// BumpPatch increments the Patch field by 1 and resets all other fields to their default values +func (v *Version) BumpPatch() { + v.Patch += 1 + v.PreRelease = PreRelease("") + v.Metadata = "" +} diff --git a/vendor/github.com/coreos/go-semver/semver/sort.go b/vendor/github.com/coreos/go-semver/semver/sort.go new file mode 100644 index 0000000000..e256b41a5d --- /dev/null +++ b/vendor/github.com/coreos/go-semver/semver/sort.go @@ -0,0 +1,38 @@ +// Copyright 2013-2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semver + +import ( + "sort" +) + +type Versions []*Version + +func (s Versions) Len() int { + return len(s) +} + +func (s Versions) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s Versions) Less(i, j int) bool { + return s[i].LessThan(*s[j]) +} + +// Sort sorts the given slice of Version +func Sort(versions []*Version) { + sort.Sort(Versions(versions)) +} diff --git a/vendor/github.com/deckarep/golang-set/LICENSE b/vendor/github.com/deckarep/golang-set/LICENSE new file mode 100644 index 0000000000..b5768f89cf --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/LICENSE @@ -0,0 +1,22 @@ +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/deckarep/golang-set/iterator.go b/vendor/github.com/deckarep/golang-set/iterator.go new file mode 100644 index 0000000000..9dfecade42 --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/iterator.go @@ -0,0 +1,58 @@ +/* +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package mapset + +// Iterator defines an iterator over a Set, its C channel can be used to range over the Set's +// elements. +type Iterator struct { + C <-chan interface{} + stop chan struct{} +} + +// Stop stops the Iterator, no further elements will be received on C, C will be closed. +func (i *Iterator) Stop() { + // Allows for Stop() to be called multiple times + // (close() panics when called on already closed channel) + defer func() { + recover() + }() + + close(i.stop) + + // Exhaust any remaining elements. + for range i.C { + } +} + +// newIterator returns a new Iterator instance together with its item and stop channels. +func newIterator() (*Iterator, chan<- interface{}, <-chan struct{}) { + itemChan := make(chan interface{}) + stopChan := make(chan struct{}) + return &Iterator{ + C: itemChan, + stop: stopChan, + }, itemChan, stopChan +} diff --git a/vendor/github.com/deckarep/golang-set/set.go b/vendor/github.com/deckarep/golang-set/set.go new file mode 100644 index 0000000000..7411982a96 --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/set.go @@ -0,0 +1,214 @@ +/* +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// Package mapset implements a simple and generic set collection. +// Items stored within it are unordered and unique. It supports +// typical set operations: membership testing, intersection, union, +// difference, symmetric difference and cloning. +// +// Package mapset provides two implementations of the Set +// interface. The default implementation is safe for concurrent +// access, but a non-thread-safe implementation is also provided for +// programs that can benefit from the slight speed improvement and +// that can enforce mutual exclusion through other means. +package mapset + +// Set is the primary interface provided by the mapset package. It +// represents an unordered set of data and a large number of +// operations that can be applied to that set. +type Set interface { + // Adds an element to the set. Returns whether + // the item was added. + Add(i interface{}) bool + + // Returns the number of elements in the set. + Cardinality() int + + // Removes all elements from the set, leaving + // the empty set. + Clear() + + // Returns a clone of the set using the same + // implementation, duplicating all keys. + Clone() Set + + // Returns whether the given items + // are all in the set. + Contains(i ...interface{}) bool + + // Returns the difference between this set + // and other. The returned set will contain + // all elements of this set that are not also + // elements of other. + // + // Note that the argument to Difference + // must be of the same type as the receiver + // of the method. Otherwise, Difference will + // panic. + Difference(other Set) Set + + // Determines if two sets are equal to each + // other. If they have the same cardinality + // and contain the same elements, they are + // considered equal. The order in which + // the elements were added is irrelevant. + // + // Note that the argument to Equal must be + // of the same type as the receiver of the + // method. Otherwise, Equal will panic. + Equal(other Set) bool + + // Returns a new set containing only the elements + // that exist only in both sets. + // + // Note that the argument to Intersect + // must be of the same type as the receiver + // of the method. Otherwise, Intersect will + // panic. + Intersect(other Set) Set + + // Determines if every element in this set is in + // the other set but the two sets are not equal. + // + // Note that the argument to IsProperSubset + // must be of the same type as the receiver + // of the method. Otherwise, IsProperSubset + // will panic. + IsProperSubset(other Set) bool + + // Determines if every element in the other set + // is in this set but the two sets are not + // equal. + // + // Note that the argument to IsSuperset + // must be of the same type as the receiver + // of the method. Otherwise, IsSuperset will + // panic. + IsProperSuperset(other Set) bool + + // Determines if every element in this set is in + // the other set. + // + // Note that the argument to IsSubset + // must be of the same type as the receiver + // of the method. Otherwise, IsSubset will + // panic. + IsSubset(other Set) bool + + // Determines if every element in the other set + // is in this set. + // + // Note that the argument to IsSuperset + // must be of the same type as the receiver + // of the method. Otherwise, IsSuperset will + // panic. + IsSuperset(other Set) bool + + // Iterates over elements and executes the passed func against each element. + // If passed func returns true, stop iteration at the time. + Each(func(interface{}) bool) + + // Returns a channel of elements that you can + // range over. + Iter() <-chan interface{} + + // Returns an Iterator object that you can + // use to range over the set. + Iterator() *Iterator + + // Remove a single element from the set. + Remove(i interface{}) + + // Provides a convenient string representation + // of the current state of the set. + String() string + + // Returns a new set with all elements which are + // in either this set or the other set but not in both. + // + // Note that the argument to SymmetricDifference + // must be of the same type as the receiver + // of the method. Otherwise, SymmetricDifference + // will panic. + SymmetricDifference(other Set) Set + + // Returns a new set with all elements in both sets. + // + // Note that the argument to Union must be of the + + // same type as the receiver of the method. + // Otherwise, IsSuperset will panic. + Union(other Set) Set + + // Returns all subsets of a given set (Power Set). + PowerSet() Set + + // Returns the Cartesian Product of two sets. + CartesianProduct(other Set) Set + + // Returns the members of the set as a slice. + ToSlice() []interface{} +} + +// NewSet creates and returns a reference to an empty set. Operations +// on the resulting set are thread-safe. +func NewSet(s ...interface{}) Set { + set := newThreadSafeSet() + for _, item := range s { + set.Add(item) + } + return &set +} + +// NewSetWith creates and returns a new set with the given elements. +// Operations on the resulting set are thread-safe. +func NewSetWith(elts ...interface{}) Set { + return NewSetFromSlice(elts) +} + +// NewSetFromSlice creates and returns a reference to a set from an +// existing slice. Operations on the resulting set are thread-safe. +func NewSetFromSlice(s []interface{}) Set { + a := NewSet(s...) + return a +} + +// NewThreadUnsafeSet creates and returns a reference to an empty set. +// Operations on the resulting set are not thread-safe. +func NewThreadUnsafeSet() Set { + set := newThreadUnsafeSet() + return &set +} + +// NewThreadUnsafeSetFromSlice creates and returns a reference to a +// set from an existing slice. Operations on the resulting set are +// not thread-safe. +func NewThreadUnsafeSetFromSlice(s []interface{}) Set { + a := NewThreadUnsafeSet() + for _, item := range s { + a.Add(item) + } + return a +} diff --git a/vendor/github.com/deckarep/golang-set/threadsafe.go b/vendor/github.com/deckarep/golang-set/threadsafe.go new file mode 100644 index 0000000000..8dae16195b --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/threadsafe.go @@ -0,0 +1,271 @@ +/* +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package mapset + +import "sync" + +type threadSafeSet struct { + s threadUnsafeSet + sync.RWMutex +} + +func newThreadSafeSet() threadSafeSet { + return threadSafeSet{s: newThreadUnsafeSet()} +} + +func (set *threadSafeSet) Add(i interface{}) bool { + set.Lock() + ret := set.s.Add(i) + set.Unlock() + return ret +} + +func (set *threadSafeSet) Contains(i ...interface{}) bool { + set.RLock() + ret := set.s.Contains(i...) + set.RUnlock() + return ret +} + +func (set *threadSafeSet) IsSubset(other Set) bool { + o := other.(*threadSafeSet) + + set.RLock() + o.RLock() + + ret := set.s.IsSubset(&o.s) + set.RUnlock() + o.RUnlock() + return ret +} + +func (set *threadSafeSet) IsProperSubset(other Set) bool { + o := other.(*threadSafeSet) + + set.RLock() + defer set.RUnlock() + o.RLock() + defer o.RUnlock() + + return set.s.IsProperSubset(&o.s) +} + +func (set *threadSafeSet) IsSuperset(other Set) bool { + return other.IsSubset(set) +} + +func (set *threadSafeSet) IsProperSuperset(other Set) bool { + return other.IsProperSubset(set) +} + +func (set *threadSafeSet) Union(other Set) Set { + o := other.(*threadSafeSet) + + set.RLock() + o.RLock() + + unsafeUnion := set.s.Union(&o.s).(*threadUnsafeSet) + ret := &threadSafeSet{s: *unsafeUnion} + set.RUnlock() + o.RUnlock() + return ret +} + +func (set *threadSafeSet) Intersect(other Set) Set { + o := other.(*threadSafeSet) + + set.RLock() + o.RLock() + + unsafeIntersection := set.s.Intersect(&o.s).(*threadUnsafeSet) + ret := &threadSafeSet{s: *unsafeIntersection} + set.RUnlock() + o.RUnlock() + return ret +} + +func (set *threadSafeSet) Difference(other Set) Set { + o := other.(*threadSafeSet) + + set.RLock() + o.RLock() + + unsafeDifference := set.s.Difference(&o.s).(*threadUnsafeSet) + ret := &threadSafeSet{s: *unsafeDifference} + set.RUnlock() + o.RUnlock() + return ret +} + +func (set *threadSafeSet) SymmetricDifference(other Set) Set { + o := other.(*threadSafeSet) + + set.RLock() + o.RLock() + + unsafeDifference := set.s.SymmetricDifference(&o.s).(*threadUnsafeSet) + ret := &threadSafeSet{s: *unsafeDifference} + set.RUnlock() + o.RUnlock() + return ret +} + +func (set *threadSafeSet) Clear() { + set.Lock() + set.s = newThreadUnsafeSet() + set.Unlock() +} + +func (set *threadSafeSet) Remove(i interface{}) { + set.Lock() + delete(set.s, i) + set.Unlock() +} + +func (set *threadSafeSet) Cardinality() int { + set.RLock() + defer set.RUnlock() + return len(set.s) +} + +func (set *threadSafeSet) Each(cb func(interface{}) bool) { + set.RLock() + for elem := range set.s { + if cb(elem) { + break + } + } + set.RUnlock() +} + +func (set *threadSafeSet) Iter() <-chan interface{} { + ch := make(chan interface{}) + go func() { + set.RLock() + + for elem := range set.s { + ch <- elem + } + close(ch) + set.RUnlock() + }() + + return ch +} + +func (set *threadSafeSet) Iterator() *Iterator { + iterator, ch, stopCh := newIterator() + + go func() { + set.RLock() + L: + for elem := range set.s { + select { + case <-stopCh: + break L + case ch <- elem: + } + } + close(ch) + set.RUnlock() + }() + + return iterator +} + +func (set *threadSafeSet) Equal(other Set) bool { + o := other.(*threadSafeSet) + + set.RLock() + o.RLock() + + ret := set.s.Equal(&o.s) + set.RUnlock() + o.RUnlock() + return ret +} + +func (set *threadSafeSet) Clone() Set { + set.RLock() + + unsafeClone := set.s.Clone().(*threadUnsafeSet) + ret := &threadSafeSet{s: *unsafeClone} + set.RUnlock() + return ret +} + +func (set *threadSafeSet) String() string { + set.RLock() + ret := set.s.String() + set.RUnlock() + return ret +} + +func (set *threadSafeSet) PowerSet() Set { + set.RLock() + ret := set.s.PowerSet() + set.RUnlock() + return ret +} + +func (set *threadSafeSet) CartesianProduct(other Set) Set { + o := other.(*threadSafeSet) + + set.RLock() + o.RLock() + + unsafeCartProduct := set.s.CartesianProduct(&o.s).(*threadUnsafeSet) + ret := &threadSafeSet{s: *unsafeCartProduct} + set.RUnlock() + o.RUnlock() + return ret +} + +func (set *threadSafeSet) ToSlice() []interface{} { + keys := make([]interface{}, 0, set.Cardinality()) + set.RLock() + for elem := range set.s { + keys = append(keys, elem) + } + set.RUnlock() + return keys +} + +func (set *threadSafeSet) MarshalJSON() ([]byte, error) { + set.RLock() + b, err := set.s.MarshalJSON() + set.RUnlock() + + return b, err +} + +func (set *threadSafeSet) UnmarshalJSON(p []byte) error { + set.RLock() + err := set.s.UnmarshalJSON(p) + set.RUnlock() + + return err +} diff --git a/vendor/github.com/deckarep/golang-set/threadunsafe.go b/vendor/github.com/deckarep/golang-set/threadunsafe.go new file mode 100644 index 0000000000..fec2e3781e --- /dev/null +++ b/vendor/github.com/deckarep/golang-set/threadunsafe.go @@ -0,0 +1,325 @@ +/* +Open Source Initiative OSI - The MIT License (MIT):Licensing + +The MIT License (MIT) +Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package mapset + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "strings" +) + +type threadUnsafeSet map[interface{}]struct{} + +// An OrderedPair represents a 2-tuple of values. +type OrderedPair struct { + First interface{} + Second interface{} +} + +func newThreadUnsafeSet() threadUnsafeSet { + return make(threadUnsafeSet) +} + +// Equal says whether two 2-tuples contain the same values in the same order. +func (pair *OrderedPair) Equal(other OrderedPair) bool { + if pair.First == other.First && + pair.Second == other.Second { + return true + } + + return false +} + +func (set *threadUnsafeSet) Add(i interface{}) bool { + _, found := (*set)[i] + (*set)[i] = struct{}{} + return !found //False if it existed already +} + +func (set *threadUnsafeSet) Contains(i ...interface{}) bool { + for _, val := range i { + if _, ok := (*set)[val]; !ok { + return false + } + } + return true +} + +func (set *threadUnsafeSet) IsSubset(other Set) bool { + _ = other.(*threadUnsafeSet) + for elem := range *set { + if !other.Contains(elem) { + return false + } + } + return true +} + +func (set *threadUnsafeSet) IsProperSubset(other Set) bool { + return set.IsSubset(other) && !set.Equal(other) +} + +func (set *threadUnsafeSet) IsSuperset(other Set) bool { + return other.IsSubset(set) +} + +func (set *threadUnsafeSet) IsProperSuperset(other Set) bool { + return set.IsSuperset(other) && !set.Equal(other) +} + +func (set *threadUnsafeSet) Union(other Set) Set { + o := other.(*threadUnsafeSet) + + unionedSet := newThreadUnsafeSet() + + for elem := range *set { + unionedSet.Add(elem) + } + for elem := range *o { + unionedSet.Add(elem) + } + return &unionedSet +} + +func (set *threadUnsafeSet) Intersect(other Set) Set { + o := other.(*threadUnsafeSet) + + intersection := newThreadUnsafeSet() + // loop over smaller set + if set.Cardinality() < other.Cardinality() { + for elem := range *set { + if other.Contains(elem) { + intersection.Add(elem) + } + } + } else { + for elem := range *o { + if set.Contains(elem) { + intersection.Add(elem) + } + } + } + return &intersection +} + +func (set *threadUnsafeSet) Difference(other Set) Set { + _ = other.(*threadUnsafeSet) + + difference := newThreadUnsafeSet() + for elem := range *set { + if !other.Contains(elem) { + difference.Add(elem) + } + } + return &difference +} + +func (set *threadUnsafeSet) SymmetricDifference(other Set) Set { + _ = other.(*threadUnsafeSet) + + aDiff := set.Difference(other) + bDiff := other.Difference(set) + return aDiff.Union(bDiff) +} + +func (set *threadUnsafeSet) Clear() { + *set = newThreadUnsafeSet() +} + +func (set *threadUnsafeSet) Remove(i interface{}) { + delete(*set, i) +} + +func (set *threadUnsafeSet) Cardinality() int { + return len(*set) +} + +func (set *threadUnsafeSet) Each(cb func(interface{}) bool) { + for elem := range *set { + if cb(elem) { + break + } + } +} + +func (set *threadUnsafeSet) Iter() <-chan interface{} { + ch := make(chan interface{}) + go func() { + for elem := range *set { + ch <- elem + } + close(ch) + }() + + return ch +} + +func (set *threadUnsafeSet) Iterator() *Iterator { + iterator, ch, stopCh := newIterator() + + go func() { + L: + for elem := range *set { + select { + case <-stopCh: + break L + case ch <- elem: + } + } + close(ch) + }() + + return iterator +} + +func (set *threadUnsafeSet) Equal(other Set) bool { + _ = other.(*threadUnsafeSet) + + if set.Cardinality() != other.Cardinality() { + return false + } + for elem := range *set { + if !other.Contains(elem) { + return false + } + } + return true +} + +func (set *threadUnsafeSet) Clone() Set { + clonedSet := newThreadUnsafeSet() + for elem := range *set { + clonedSet.Add(elem) + } + return &clonedSet +} + +func (set *threadUnsafeSet) String() string { + items := make([]string, 0, len(*set)) + + for elem := range *set { + items = append(items, fmt.Sprintf("%v", elem)) + } + return fmt.Sprintf("Set{%s}", strings.Join(items, ", ")) +} + +// String outputs a 2-tuple in the form "(A, B)". +func (pair OrderedPair) String() string { + return fmt.Sprintf("(%v, %v)", pair.First, pair.Second) +} + +func (set *threadUnsafeSet) PowerSet() Set { + powSet := NewThreadUnsafeSet() + nullset := newThreadUnsafeSet() + powSet.Add(&nullset) + + for es := range *set { + u := newThreadUnsafeSet() + j := powSet.Iter() + for er := range j { + p := newThreadUnsafeSet() + if reflect.TypeOf(er).Name() == "" { + k := er.(*threadUnsafeSet) + for ek := range *(k) { + p.Add(ek) + } + } else { + p.Add(er) + } + p.Add(es) + u.Add(&p) + } + + powSet = powSet.Union(&u) + } + + return powSet +} + +func (set *threadUnsafeSet) CartesianProduct(other Set) Set { + o := other.(*threadUnsafeSet) + cartProduct := NewThreadUnsafeSet() + + for i := range *set { + for j := range *o { + elem := OrderedPair{First: i, Second: j} + cartProduct.Add(elem) + } + } + + return cartProduct +} + +func (set *threadUnsafeSet) ToSlice() []interface{} { + keys := make([]interface{}, 0, set.Cardinality()) + for elem := range *set { + keys = append(keys, elem) + } + + return keys +} + +// MarshalJSON creates a JSON array from the set, it marshals all elements +func (set *threadUnsafeSet) MarshalJSON() ([]byte, error) { + items := make([]string, 0, set.Cardinality()) + + for elem := range *set { + b, err := json.Marshal(elem) + if err != nil { + return nil, err + } + + items = append(items, string(b)) + } + + return []byte(fmt.Sprintf("[%s]", strings.Join(items, ","))), nil +} + +// UnmarshalJSON recreates a set from a JSON array, it only decodes +// primitive types. Numbers are decoded as json.Number. +func (set *threadUnsafeSet) UnmarshalJSON(b []byte) error { + var i []interface{} + + d := json.NewDecoder(bytes.NewReader(b)) + d.UseNumber() + err := d.Decode(&i) + if err != nil { + return err + } + + for _, v := range i { + switch t := v.(type) { + case []interface{}, map[string]interface{}: + continue + default: + set.Add(t) + } + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/metrics/prometheus.go b/vendor/github.com/docker/distribution/metrics/prometheus.go deleted file mode 100644 index b5a5321448..0000000000 --- a/vendor/github.com/docker/distribution/metrics/prometheus.go +++ /dev/null @@ -1,13 +0,0 @@ -package metrics - -import "github.com/docker/go-metrics" - -const ( - // NamespacePrefix is the namespace of prometheus metrics - NamespacePrefix = "registry" -) - -var ( - // StorageNamespace is the prometheus namespace of blob/cache related operations - StorageNamespace = metrics.NewNamespace(NamespacePrefix, "storage", nil) -) diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go deleted file mode 100644 index 6d9bb4b62a..0000000000 --- a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go +++ /dev/null @@ -1,267 +0,0 @@ -package errcode - -import ( - "encoding/json" - "fmt" - "strings" -) - -// ErrorCoder is the base interface for ErrorCode and Error allowing -// users of each to just call ErrorCode to get the real ID of each -type ErrorCoder interface { - ErrorCode() ErrorCode -} - -// ErrorCode represents the error type. The errors are serialized via strings -// and the integer format may change and should *never* be exported. -type ErrorCode int - -var _ error = ErrorCode(0) - -// ErrorCode just returns itself -func (ec ErrorCode) ErrorCode() ErrorCode { - return ec -} - -// Error returns the ID/Value -func (ec ErrorCode) Error() string { - // NOTE(stevvooe): Cannot use message here since it may have unpopulated args. - return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) -} - -// Descriptor returns the descriptor for the error code. -func (ec ErrorCode) Descriptor() ErrorDescriptor { - d, ok := errorCodeToDescriptors[ec] - - if !ok { - return ErrorCodeUnknown.Descriptor() - } - - return d -} - -// String returns the canonical identifier for this error code. -func (ec ErrorCode) String() string { - return ec.Descriptor().Value -} - -// Message returned the human-readable error message for this error code. -func (ec ErrorCode) Message() string { - return ec.Descriptor().Message -} - -// MarshalText encodes the receiver into UTF-8-encoded text and returns the -// result. -func (ec ErrorCode) MarshalText() (text []byte, err error) { - return []byte(ec.String()), nil -} - -// UnmarshalText decodes the form generated by MarshalText. -func (ec *ErrorCode) UnmarshalText(text []byte) error { - desc, ok := idToDescriptors[string(text)] - - if !ok { - desc = ErrorCodeUnknown.Descriptor() - } - - *ec = desc.Code - - return nil -} - -// WithMessage creates a new Error struct based on the passed-in info and -// overrides the Message property. -func (ec ErrorCode) WithMessage(message string) Error { - return Error{ - Code: ec, - Message: message, - } -} - -// WithDetail creates a new Error struct based on the passed-in info and -// set the Detail property appropriately -func (ec ErrorCode) WithDetail(detail interface{}) Error { - return Error{ - Code: ec, - Message: ec.Message(), - }.WithDetail(detail) -} - -// WithArgs creates a new Error struct and sets the Args slice -func (ec ErrorCode) WithArgs(args ...interface{}) Error { - return Error{ - Code: ec, - Message: ec.Message(), - }.WithArgs(args...) -} - -// Error provides a wrapper around ErrorCode with extra Details provided. -type Error struct { - Code ErrorCode `json:"code"` - Message string `json:"message"` - Detail interface{} `json:"detail,omitempty"` - - // TODO(duglin): See if we need an "args" property so we can do the - // variable substitution right before showing the message to the user -} - -var _ error = Error{} - -// ErrorCode returns the ID/Value of this Error -func (e Error) ErrorCode() ErrorCode { - return e.Code -} - -// Error returns a human readable representation of the error. -func (e Error) Error() string { - return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) -} - -// WithDetail will return a new Error, based on the current one, but with -// some Detail info added -func (e Error) WithDetail(detail interface{}) Error { - return Error{ - Code: e.Code, - Message: e.Message, - Detail: detail, - } -} - -// WithArgs uses the passed-in list of interface{} as the substitution -// variables in the Error's Message string, but returns a new Error -func (e Error) WithArgs(args ...interface{}) Error { - return Error{ - Code: e.Code, - Message: fmt.Sprintf(e.Code.Message(), args...), - Detail: e.Detail, - } -} - -// ErrorDescriptor provides relevant information about a given error code. -type ErrorDescriptor struct { - // Code is the error code that this descriptor describes. - Code ErrorCode - - // Value provides a unique, string key, often captilized with - // underscores, to identify the error code. This value is used as the - // keyed value when serializing api errors. - Value string - - // Message is a short, human readable decription of the error condition - // included in API responses. - Message string - - // Description provides a complete account of the errors purpose, suitable - // for use in documentation. - Description string - - // HTTPStatusCode provides the http status code that is associated with - // this error condition. - HTTPStatusCode int -} - -// ParseErrorCode returns the value by the string error code. -// `ErrorCodeUnknown` will be returned if the error is not known. -func ParseErrorCode(value string) ErrorCode { - ed, ok := idToDescriptors[value] - if ok { - return ed.Code - } - - return ErrorCodeUnknown -} - -// Errors provides the envelope for multiple errors and a few sugar methods -// for use within the application. -type Errors []error - -var _ error = Errors{} - -func (errs Errors) Error() string { - switch len(errs) { - case 0: - return "" - case 1: - return errs[0].Error() - default: - msg := "errors:\n" - for _, err := range errs { - msg += err.Error() + "\n" - } - return msg - } -} - -// Len returns the current number of errors. -func (errs Errors) Len() int { - return len(errs) -} - -// MarshalJSON converts slice of error, ErrorCode or Error into a -// slice of Error - then serializes -func (errs Errors) MarshalJSON() ([]byte, error) { - var tmpErrs struct { - Errors []Error `json:"errors,omitempty"` - } - - for _, daErr := range errs { - var err Error - - switch daErr.(type) { - case ErrorCode: - err = daErr.(ErrorCode).WithDetail(nil) - case Error: - err = daErr.(Error) - default: - err = ErrorCodeUnknown.WithDetail(daErr) - - } - - // If the Error struct was setup and they forgot to set the - // Message field (meaning its "") then grab it from the ErrCode - msg := err.Message - if msg == "" { - msg = err.Code.Message() - } - - tmpErrs.Errors = append(tmpErrs.Errors, Error{ - Code: err.Code, - Message: msg, - Detail: err.Detail, - }) - } - - return json.Marshal(tmpErrs) -} - -// UnmarshalJSON deserializes []Error and then converts it into slice of -// Error or ErrorCode -func (errs *Errors) UnmarshalJSON(data []byte) error { - var tmpErrs struct { - Errors []Error - } - - if err := json.Unmarshal(data, &tmpErrs); err != nil { - return err - } - - var newErrs Errors - for _, daErr := range tmpErrs.Errors { - // If Message is empty or exactly matches the Code's message string - // then just use the Code, no need for a full Error struct - if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { - // Error's w/o details get converted to ErrorCode - newErrs = append(newErrs, daErr.Code) - } else { - // Error's w/ details are untouched - newErrs = append(newErrs, Error{ - Code: daErr.Code, - Message: daErr.Message, - Detail: daErr.Detail, - }) - } - } - - *errs = newErrs - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/handler.go b/vendor/github.com/docker/distribution/registry/api/errcode/handler.go deleted file mode 100644 index d77e70473e..0000000000 --- a/vendor/github.com/docker/distribution/registry/api/errcode/handler.go +++ /dev/null @@ -1,40 +0,0 @@ -package errcode - -import ( - "encoding/json" - "net/http" -) - -// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err -// and sets the content-type header to 'application/json'. It will handle -// ErrorCoder and Errors, and if necessary will create an envelope. -func ServeJSON(w http.ResponseWriter, err error) error { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - var sc int - - switch errs := err.(type) { - case Errors: - if len(errs) < 1 { - break - } - - if err, ok := errs[0].(ErrorCoder); ok { - sc = err.ErrorCode().Descriptor().HTTPStatusCode - } - case ErrorCoder: - sc = errs.ErrorCode().Descriptor().HTTPStatusCode - err = Errors{err} // create an envelope. - default: - // We just have an unhandled error type, so just place in an envelope - // and move along. - err = Errors{err} - } - - if sc == 0 { - sc = http.StatusInternalServerError - } - - w.WriteHeader(sc) - - return json.NewEncoder(w).Encode(err) -} diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/register.go b/vendor/github.com/docker/distribution/registry/api/errcode/register.go deleted file mode 100644 index d1e8826c6d..0000000000 --- a/vendor/github.com/docker/distribution/registry/api/errcode/register.go +++ /dev/null @@ -1,138 +0,0 @@ -package errcode - -import ( - "fmt" - "net/http" - "sort" - "sync" -) - -var ( - errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} - idToDescriptors = map[string]ErrorDescriptor{} - groupToDescriptors = map[string][]ErrorDescriptor{} -) - -var ( - // ErrorCodeUnknown is a generic error that can be used as a last - // resort if there is no situation-specific error message that can be used - ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ - Value: "UNKNOWN", - Message: "unknown error", - Description: `Generic error returned when the error does not have an - API classification.`, - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeUnsupported is returned when an operation is not supported. - ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ - Value: "UNSUPPORTED", - Message: "The operation is unsupported.", - Description: `The operation was unsupported due to a missing - implementation or invalid set of parameters.`, - HTTPStatusCode: http.StatusMethodNotAllowed, - }) - - // ErrorCodeUnauthorized is returned if a request requires - // authentication. - ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ - Value: "UNAUTHORIZED", - Message: "authentication required", - Description: `The access controller was unable to authenticate - the client. Often this will be accompanied by a - Www-Authenticate HTTP response header indicating how to - authenticate.`, - HTTPStatusCode: http.StatusUnauthorized, - }) - - // ErrorCodeDenied is returned if a client does not have sufficient - // permission to perform an action. - ErrorCodeDenied = Register("errcode", ErrorDescriptor{ - Value: "DENIED", - Message: "requested access to the resource is denied", - Description: `The access controller denied access for the - operation on a resource.`, - HTTPStatusCode: http.StatusForbidden, - }) - - // ErrorCodeUnavailable provides a common error to report unavailability - // of a service or endpoint. - ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ - Value: "UNAVAILABLE", - Message: "service unavailable", - Description: "Returned when a service is not available", - HTTPStatusCode: http.StatusServiceUnavailable, - }) - - // ErrorCodeTooManyRequests is returned if a client attempts too many - // times to contact a service endpoint. - ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{ - Value: "TOOMANYREQUESTS", - Message: "too many requests", - Description: `Returned when a client attempts to contact a - service too many times`, - HTTPStatusCode: http.StatusTooManyRequests, - }) -) - -var nextCode = 1000 -var registerLock sync.Mutex - -// Register will make the passed-in error known to the environment and -// return a new ErrorCode -func Register(group string, descriptor ErrorDescriptor) ErrorCode { - registerLock.Lock() - defer registerLock.Unlock() - - descriptor.Code = ErrorCode(nextCode) - - if _, ok := idToDescriptors[descriptor.Value]; ok { - panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) - } - if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { - panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) - } - - groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) - errorCodeToDescriptors[descriptor.Code] = descriptor - idToDescriptors[descriptor.Value] = descriptor - - nextCode++ - return descriptor.Code -} - -type byValue []ErrorDescriptor - -func (a byValue) Len() int { return len(a) } -func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } - -// GetGroupNames returns the list of Error group names that are registered -func GetGroupNames() []string { - keys := []string{} - - for k := range groupToDescriptors { - keys = append(keys, k) - } - sort.Strings(keys) - return keys -} - -// GetErrorCodeGroup returns the named group of error descriptors -func GetErrorCodeGroup(name string) []ErrorDescriptor { - desc := groupToDescriptors[name] - sort.Sort(byValue(desc)) - return desc -} - -// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are -// registered, irrespective of what group they're in -func GetErrorAllDescriptors() []ErrorDescriptor { - result := []ErrorDescriptor{} - - for _, group := range GetGroupNames() { - result = append(result, GetErrorCodeGroup(group)...) - } - sort.Sort(byValue(result)) - return result -} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go b/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go deleted file mode 100644 index a9616c58ad..0000000000 --- a/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go +++ /dev/null @@ -1,1596 +0,0 @@ -package v2 - -import ( - "net/http" - "regexp" - - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/api/errcode" - "github.com/opencontainers/go-digest" -) - -var ( - nameParameterDescriptor = ParameterDescriptor{ - Name: "name", - Type: "string", - Format: reference.NameRegexp.String(), - Required: true, - Description: `Name of the target repository.`, - } - - referenceParameterDescriptor = ParameterDescriptor{ - Name: "reference", - Type: "string", - Format: reference.TagRegexp.String(), - Required: true, - Description: `Tag or digest of the target manifest.`, - } - - uuidParameterDescriptor = ParameterDescriptor{ - Name: "uuid", - Type: "opaque", - Required: true, - Description: "A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.", - } - - digestPathParameter = ParameterDescriptor{ - Name: "digest", - Type: "path", - Required: true, - Format: digest.DigestRegexp.String(), - Description: `Digest of desired blob.`, - } - - hostHeader = ParameterDescriptor{ - Name: "Host", - Type: "string", - Description: "Standard HTTP Host Header. Should be set to the registry host.", - Format: "", - Examples: []string{"registry-1.docker.io"}, - } - - authHeader = ParameterDescriptor{ - Name: "Authorization", - Type: "string", - Description: "An RFC7235 compliant authorization header.", - Format: " ", - Examples: []string{"Bearer dGhpcyBpcyBhIGZha2UgYmVhcmVyIHRva2VuIQ=="}, - } - - authChallengeHeader = ParameterDescriptor{ - Name: "WWW-Authenticate", - Type: "string", - Description: "An RFC7235 compliant authentication challenge header.", - Format: ` realm="", ..."`, - Examples: []string{ - `Bearer realm="https://auth.docker.com/", service="registry.docker.com", scopes="repository:library/ubuntu:pull"`, - }, - } - - contentLengthZeroHeader = ParameterDescriptor{ - Name: "Content-Length", - Description: "The `Content-Length` header must be zero and the body must be empty.", - Type: "integer", - Format: "0", - } - - dockerUploadUUIDHeader = ParameterDescriptor{ - Name: "Docker-Upload-UUID", - Description: "Identifies the docker upload uuid for the current request.", - Type: "uuid", - Format: "", - } - - digestHeader = ParameterDescriptor{ - Name: "Docker-Content-Digest", - Description: "Digest of the targeted content for the request.", - Type: "digest", - Format: "", - } - - linkHeader = ParameterDescriptor{ - Name: "Link", - Type: "link", - Description: "RFC5988 compliant rel='next' with URL to next result set, if available", - Format: `<?n=&last=>; rel="next"`, - } - - paginationParameters = []ParameterDescriptor{ - { - Name: "n", - Type: "integer", - Description: "Limit the number of entries in each response. It not present, all entries will be returned.", - Format: "", - Required: false, - }, - { - Name: "last", - Type: "string", - Description: "Result set will include values lexically after last.", - Format: "", - Required: false, - }, - } - - unauthorizedResponseDescriptor = ResponseDescriptor{ - Name: "Authentication Required", - StatusCode: http.StatusUnauthorized, - Description: "The client is not authenticated.", - Headers: []ParameterDescriptor{ - authChallengeHeader, - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnauthorized, - }, - } - - repositoryNotFoundResponseDescriptor = ResponseDescriptor{ - Name: "No Such Repository Error", - StatusCode: http.StatusNotFound, - Description: "The repository is not known to the registry.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - }, - } - - deniedResponseDescriptor = ResponseDescriptor{ - Name: "Access Denied", - StatusCode: http.StatusForbidden, - Description: "The client does not have required access to the repository.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeDenied, - }, - } - - tooManyRequestsDescriptor = ResponseDescriptor{ - Name: "Too Many Requests", - StatusCode: http.StatusTooManyRequests, - Description: "The client made too many requests within a time interval.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeTooManyRequests, - }, - } -) - -const ( - manifestBody = `{ - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": "" - }, - ... - ] - ], - "history": , - "signature": -}` - - errorsBody = `{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -}` -) - -// APIDescriptor exports descriptions of the layout of the v2 registry API. -var APIDescriptor = struct { - // RouteDescriptors provides a list of the routes available in the API. - RouteDescriptors []RouteDescriptor -}{ - RouteDescriptors: routeDescriptors, -} - -// RouteDescriptor describes a route specified by name. -type RouteDescriptor struct { - // Name is the name of the route, as specified in RouteNameXXX exports. - // These names a should be considered a unique reference for a route. If - // the route is registered with gorilla, this is the name that will be - // used. - Name string - - // Path is a gorilla/mux-compatible regexp that can be used to match the - // route. For any incoming method and path, only one route descriptor - // should match. - Path string - - // Entity should be a short, human-readalbe description of the object - // targeted by the endpoint. - Entity string - - // Description should provide an accurate overview of the functionality - // provided by the route. - Description string - - // Methods should describe the various HTTP methods that may be used on - // this route, including request and response formats. - Methods []MethodDescriptor -} - -// MethodDescriptor provides a description of the requests that may be -// conducted with the target method. -type MethodDescriptor struct { - - // Method is an HTTP method, such as GET, PUT or POST. - Method string - - // Description should provide an overview of the functionality provided by - // the covered method, suitable for use in documentation. Use of markdown - // here is encouraged. - Description string - - // Requests is a slice of request descriptors enumerating how this - // endpoint may be used. - Requests []RequestDescriptor -} - -// RequestDescriptor covers a particular set of headers and parameters that -// can be carried out with the parent method. Its most helpful to have one -// RequestDescriptor per API use case. -type RequestDescriptor struct { - // Name provides a short identifier for the request, usable as a title or - // to provide quick context for the particular request. - Name string - - // Description should cover the requests purpose, covering any details for - // this particular use case. - Description string - - // Headers describes headers that must be used with the HTTP request. - Headers []ParameterDescriptor - - // PathParameters enumerate the parameterized path components for the - // given request, as defined in the route's regular expression. - PathParameters []ParameterDescriptor - - // QueryParameters provides a list of query parameters for the given - // request. - QueryParameters []ParameterDescriptor - - // Body describes the format of the request body. - Body BodyDescriptor - - // Successes enumerates the possible responses that are considered to be - // the result of a successful request. - Successes []ResponseDescriptor - - // Failures covers the possible failures from this particular request. - Failures []ResponseDescriptor -} - -// ResponseDescriptor describes the components of an API response. -type ResponseDescriptor struct { - // Name provides a short identifier for the response, usable as a title or - // to provide quick context for the particular response. - Name string - - // Description should provide a brief overview of the role of the - // response. - Description string - - // StatusCode specifies the status received by this particular response. - StatusCode int - - // Headers covers any headers that may be returned from the response. - Headers []ParameterDescriptor - - // Fields describes any fields that may be present in the response. - Fields []ParameterDescriptor - - // ErrorCodes enumerates the error codes that may be returned along with - // the response. - ErrorCodes []errcode.ErrorCode - - // Body describes the body of the response, if any. - Body BodyDescriptor -} - -// BodyDescriptor describes a request body and its expected content type. For -// the most part, it should be example json or some placeholder for body -// data in documentation. -type BodyDescriptor struct { - ContentType string - Format string -} - -// ParameterDescriptor describes the format of a request parameter, which may -// be a header, path parameter or query parameter. -type ParameterDescriptor struct { - // Name is the name of the parameter, either of the path component or - // query parameter. - Name string - - // Type specifies the type of the parameter, such as string, integer, etc. - Type string - - // Description provides a human-readable description of the parameter. - Description string - - // Required means the field is required when set. - Required bool - - // Format is a specifying the string format accepted by this parameter. - Format string - - // Regexp is a compiled regular expression that can be used to validate - // the contents of the parameter. - Regexp *regexp.Regexp - - // Examples provides multiple examples for the values that might be valid - // for this parameter. - Examples []string -} - -var routeDescriptors = []RouteDescriptor{ - { - Name: RouteNameBase, - Path: "/v2/", - Entity: "Base", - Description: `Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authentication.`, - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Check that the endpoint implements Docker Registry API V2.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - Successes: []ResponseDescriptor{ - { - Description: "The API implements V2 protocol and is accessible.", - StatusCode: http.StatusOK, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "The registry does not implement the V2 API.", - StatusCode: http.StatusNotFound, - }, - unauthorizedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - }, - }, - { - Name: RouteNameTags, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/tags/list", - Entity: "Tags", - Description: "Retrieve information about tags.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Fetch the tags under the repository identified by `name`.", - Requests: []RequestDescriptor{ - { - Name: "Tags", - Description: "Return all tags for the repository", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusOK, - Description: "A list of tags for the named repository.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "name": , - "tags": [ - , - ... - ] -}`, - }, - }, - }, - Failures: []ResponseDescriptor{ - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - { - Name: "Tags Paginated", - Description: "Return a portion of the tags for the specified repository.", - PathParameters: []ParameterDescriptor{nameParameterDescriptor}, - QueryParameters: paginationParameters, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusOK, - Description: "A list of tags for the named repository.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - linkHeader, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "name": , - "tags": [ - , - ... - ], -}`, - }, - }, - }, - Failures: []ResponseDescriptor{ - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - }, - }, - { - Name: RouteNameManifest, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/manifests/{reference:" + reference.TagRegexp.String() + "|" + digest.DigestRegexp.String() + "}", - Entity: "Manifest", - Description: "Create, update, delete and retrieve manifests.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - referenceParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - Description: "The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image.", - StatusCode: http.StatusOK, - Headers: []ParameterDescriptor{ - digestHeader, - }, - Body: BodyDescriptor{ - ContentType: "", - Format: manifestBody, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "The name or reference was invalid.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeTagInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - { - Method: "PUT", - Description: "Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - referenceParameterDescriptor, - }, - Body: BodyDescriptor{ - ContentType: "", - Format: manifestBody, - }, - Successes: []ResponseDescriptor{ - { - Description: "The manifest has been accepted by the registry and is stored under the specified `name` and `tag`.", - StatusCode: http.StatusCreated, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Description: "The canonical location url of the uploaded manifest.", - Format: "", - }, - contentLengthZeroHeader, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Manifest", - Description: "The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request.", - StatusCode: http.StatusBadRequest, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeTagInvalid, - ErrorCodeManifestInvalid, - ErrorCodeManifestUnverified, - ErrorCodeBlobUnknown, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - { - Name: "Missing Layer(s)", - Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "errors:" [{ - "code": "BLOB_UNKNOWN", - "message": "blob unknown to registry", - "detail": { - "digest": "" - } - }, - ... - ] -}`, - }, - }, - { - Name: "Not allowed", - Description: "Manifest put is not allowed because the registry is configured as a pull-through cache or for some other reason", - StatusCode: http.StatusMethodNotAllowed, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - }, - }, - }, - }, - { - Method: "DELETE", - Description: "Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - referenceParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusAccepted, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Reference", - Description: "The specified `name` or `reference` were invalid and the delete was unable to proceed.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeTagInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - { - Name: "Unknown Manifest", - Description: "The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeManifestUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Name: "Not allowed", - Description: "Manifest delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled.", - StatusCode: http.StatusMethodNotAllowed, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - }, - }, - }, - }, - }, - }, - - { - Name: RouteNameBlob, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}", - Entity: "Blob", - Description: "Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", - Requests: []RequestDescriptor{ - { - Name: "Fetch Blob", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - digestPathParameter, - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob identified by `digest` is available. The blob content will be present in the body of the request.", - StatusCode: http.StatusOK, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "The length of the requested blob content.", - Format: "", - }, - digestHeader, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - }, - { - Description: "The blob identified by `digest` is available at the provided location.", - StatusCode: http.StatusTemporaryRedirect, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Description: "The location where the layer should be accessible.", - Format: "", - }, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeDigestInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", - StatusCode: http.StatusNotFound, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeBlobUnknown, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - { - Name: "Fetch Blob Part", - Description: "This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Range", - Type: "string", - Description: "HTTP Range header specifying blob chunk.", - Format: "bytes=-", - }, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - digestPathParameter, - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request.", - StatusCode: http.StatusPartialContent, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "The length of the requested blob chunk.", - Format: "", - }, - { - Name: "Content-Range", - Type: "byte range", - Description: "Content range of blob chunk.", - Format: "bytes -/", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeDigestInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeBlobUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content.", - StatusCode: http.StatusRequestedRangeNotSatisfiable, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - { - Method: "DELETE", - Description: "Delete the blob identified by `name` and `digest`", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - digestPathParameter, - }, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusAccepted, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "0", - Format: "0", - }, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - { - Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", - StatusCode: http.StatusNotFound, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeBlobUnknown, - }, - }, - { - Description: "Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled", - StatusCode: http.StatusMethodNotAllowed, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - - // TODO(stevvooe): We may want to add a PUT request here to - // kickoff an upload of a blob, integrated with the blob upload - // API. - }, - }, - - { - Name: RouteNameBlobUpload, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/", - Entity: "Initiate Blob Upload", - Description: "Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads.", - Methods: []MethodDescriptor{ - { - Method: "POST", - Description: "Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request.", - Requests: []RequestDescriptor{ - { - Name: "Initiate Monolithic Blob Upload", - Description: "Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Content-Length", - Type: "integer", - Format: "", - }, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - QueryParameters: []ParameterDescriptor{ - { - Name: "digest", - Type: "query", - Format: "", - Regexp: digest.DigestRegexp, - Description: `Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.`, - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octect-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob has been created in the registry and is available at the provided location.", - StatusCode: http.StatusCreated, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - { - Name: "Not allowed", - Description: "Blob upload is not allowed because the registry is configured as a pull-through cache or for some other reason", - StatusCode: http.StatusMethodNotAllowed, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - { - Name: "Initiate Resumable Blob Upload", - Description: "Initiate a resumable blob upload with an empty request body.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - contentLengthZeroHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - Description: "The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header.", - StatusCode: http.StatusAccepted, - Headers: []ParameterDescriptor{ - contentLengthZeroHeader, - { - Name: "Location", - Type: "url", - Format: "/v2//blobs/uploads/", - Description: "The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.", - }, - { - Name: "Range", - Format: "0-0", - Description: "Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.", - }, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - { - Name: "Mount Blob", - Description: "Mount a blob identified by the `mount` parameter from another repository.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - contentLengthZeroHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - QueryParameters: []ParameterDescriptor{ - { - Name: "mount", - Type: "query", - Format: "", - Regexp: digest.DigestRegexp, - Description: `Digest of blob to mount from the source repository.`, - }, - { - Name: "from", - Type: "query", - Format: "", - Regexp: reference.NameRegexp, - Description: `Name of the source repository.`, - }, - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob has been mounted in the repository and is available at the provided location.", - StatusCode: http.StatusCreated, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - { - Name: "Not allowed", - Description: "Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason", - StatusCode: http.StatusMethodNotAllowed, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - }, - }, - - { - Name: RouteNameBlobUploadChunk, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/{uuid:[a-zA-Z0-9-_.=]+}", - Entity: "Blob Upload", - Description: "Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload.", - Requests: []RequestDescriptor{ - { - Description: "Retrieve the progress of the current upload, as reported by the `Range` header.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - Name: "Upload Progress", - Description: "The upload is known and in progress. The last received offset is available in the `Range` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Range", - Type: "header", - Format: "0-", - Description: "Range indicating the current progress of the upload.", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - { - Method: "PATCH", - Description: "Upload a chunk of data for the specified upload.", - Requests: []RequestDescriptor{ - { - Name: "Stream upload", - Description: "Upload a stream of data to upload without completing the upload.", - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Name: "Data Accepted", - Description: "The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "/v2//blobs/uploads/", - Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", - }, - { - Name: "Range", - Type: "header", - Format: "0-", - Description: "Range indicating the current progress of the upload.", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - { - Name: "Chunked upload", - Description: "Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range.", - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Content-Range", - Type: "header", - Format: "-", - Required: true, - Description: "Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.", - }, - { - Name: "Content-Length", - Type: "integer", - Format: "", - Description: "Length of the chunk being uploaded, corresponding the length of the request body.", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Name: "Chunk Accepted", - Description: "The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "/v2//blobs/uploads/", - Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", - }, - { - Name: "Range", - Type: "header", - Format: "0-", - Description: "Range indicating the current progress of the upload.", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid.", - StatusCode: http.StatusRequestedRangeNotSatisfiable, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - { - Method: "PUT", - Description: "Complete the upload specified by `uuid`, optionally appending the body as the final chunk.", - Requests: []RequestDescriptor{ - { - Description: "Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Content-Length", - Type: "integer", - Format: "", - Description: "Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.", - }, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - QueryParameters: []ParameterDescriptor{ - { - Name: "digest", - Type: "string", - Format: "", - Regexp: digest.DigestRegexp, - Required: true, - Description: `Digest of uploaded blob.`, - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Name: "Upload Complete", - Description: "The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "", - Description: "The canonical location of the blob for retrieval", - }, - { - Name: "Content-Range", - Type: "header", - Format: "-", - Description: "Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.", - }, - contentLengthZeroHeader, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - errcode.ErrorCodeUnsupported, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - { - Method: "DELETE", - Description: "Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout.", - Requests: []RequestDescriptor{ - { - Description: "Cancel the upload specified by `uuid`.", - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - contentLengthZeroHeader, - }, - Successes: []ResponseDescriptor{ - { - Name: "Upload Deleted", - Description: "The upload has been successfully deleted.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - contentLengthZeroHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "An error was encountered processing the delete. The client may ignore this error.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - tooManyRequestsDescriptor, - }, - }, - }, - }, - }, - }, - { - Name: RouteNameCatalog, - Path: "/v2/_catalog", - Entity: "Catalog", - Description: "List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Retrieve a sorted, json list of repositories available in the registry.", - Requests: []RequestDescriptor{ - { - Name: "Catalog Fetch", - Description: "Request an unabridged list of repositories available. The implementation may impose a maximum limit and return a partial set with pagination links.", - Successes: []ResponseDescriptor{ - { - Description: "Returns the unabridged list of repositories as a json response.", - StatusCode: http.StatusOK, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "repositories": [ - , - ... - ] -}`, - }, - }, - }, - }, - { - Name: "Catalog Fetch Paginated", - Description: "Return the specified portion of repositories.", - QueryParameters: paginationParameters, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusOK, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "repositories": [ - , - ... - ] - "next": "?last=&n=" -}`, - }, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - linkHeader, - }, - }, - }, - }, - }, - }, - }, - }, -} - -var routeDescriptorsMap map[string]RouteDescriptor - -func init() { - routeDescriptorsMap = make(map[string]RouteDescriptor, len(routeDescriptors)) - - for _, descriptor := range routeDescriptors { - routeDescriptorsMap[descriptor.Name] = descriptor - } -} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/doc.go b/vendor/github.com/docker/distribution/registry/api/v2/doc.go deleted file mode 100644 index cde0119594..0000000000 --- a/vendor/github.com/docker/distribution/registry/api/v2/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Package v2 describes routes, urls and the error codes used in the Docker -// Registry JSON HTTP API V2. In addition to declarations, descriptors are -// provided for routes and error codes that can be used for implementation and -// automatically generating documentation. -// -// Definitions here are considered to be locked down for the V2 registry api. -// Any changes must be considered carefully and should not proceed without a -// change proposal in docker core. -package v2 diff --git a/vendor/github.com/docker/distribution/registry/api/v2/errors.go b/vendor/github.com/docker/distribution/registry/api/v2/errors.go deleted file mode 100644 index 97d6923aa0..0000000000 --- a/vendor/github.com/docker/distribution/registry/api/v2/errors.go +++ /dev/null @@ -1,136 +0,0 @@ -package v2 - -import ( - "net/http" - - "github.com/docker/distribution/registry/api/errcode" -) - -const errGroup = "registry.api.v2" - -var ( - // ErrorCodeDigestInvalid is returned when uploading a blob if the - // provided digest does not match the blob contents. - ErrorCodeDigestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "DIGEST_INVALID", - Message: "provided digest did not match uploaded content", - Description: `When a blob is uploaded, the registry will check that - the content matches the digest provided by the client. The error may - include a detail structure with the key "digest", including the - invalid digest string. This error may also be returned when a manifest - includes an invalid layer digest.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeSizeInvalid is returned when uploading a blob if the provided - ErrorCodeSizeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "SIZE_INVALID", - Message: "provided length did not match content length", - Description: `When a layer is uploaded, the provided size will be - checked against the uploaded content. If they do not match, this error - will be returned.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeNameInvalid is returned when the name in the manifest does not - // match the provided name. - ErrorCodeNameInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NAME_INVALID", - Message: "invalid repository name", - Description: `Invalid repository name encountered either during - manifest validation or any API operation.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeTagInvalid is returned when the tag in the manifest does not - // match the provided tag. - ErrorCodeTagInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "TAG_INVALID", - Message: "manifest tag did not match URI", - Description: `During a manifest upload, if the tag in the manifest - does not match the uri tag, this error will be returned.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeNameUnknown when the repository name is not known. - ErrorCodeNameUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NAME_UNKNOWN", - Message: "repository name not known to registry", - Description: `This is returned if the name used during an operation is - unknown to the registry.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeManifestUnknown returned when image manifest is unknown. - ErrorCodeManifestUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_UNKNOWN", - Message: "manifest unknown", - Description: `This error is returned when the manifest, identified by - name and tag is unknown to the repository.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeManifestInvalid returned when an image manifest is invalid, - // typically during a PUT operation. This error encompasses all errors - // encountered during manifest validation that aren't signature errors. - ErrorCodeManifestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_INVALID", - Message: "manifest invalid", - Description: `During upload, manifests undergo several checks ensuring - validity. If those checks fail, this error may be returned, unless a - more specific error is included. The detail will contain information - the failed validation.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeManifestUnverified is returned when the manifest fails - // signature verification. - ErrorCodeManifestUnverified = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_UNVERIFIED", - Message: "manifest failed signature verification", - Description: `During manifest upload, if the manifest fails signature - verification, this error will be returned.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeManifestBlobUnknown is returned when a manifest blob is - // unknown to the registry. - ErrorCodeManifestBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_BLOB_UNKNOWN", - Message: "blob unknown to registry", - Description: `This error may be returned when a manifest blob is - unknown to the registry.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeBlobUnknown is returned when a blob is unknown to the - // registry. This can happen when the manifest references a nonexistent - // layer or the result is not found by a blob fetch. - ErrorCodeBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BLOB_UNKNOWN", - Message: "blob unknown to registry", - Description: `This error may be returned when a blob is unknown to the - registry in a specified repository. This can be returned with a - standard get or if a manifest references an unknown layer during - upload.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeBlobUploadUnknown is returned when an upload is unknown. - ErrorCodeBlobUploadUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BLOB_UPLOAD_UNKNOWN", - Message: "blob upload unknown to registry", - Description: `If a blob upload has been cancelled or was never - started, this error code may be returned.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeBlobUploadInvalid is returned when an upload is invalid. - ErrorCodeBlobUploadInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BLOB_UPLOAD_INVALID", - Message: "blob upload invalid", - Description: `The blob upload encountered an error and can no - longer proceed.`, - HTTPStatusCode: http.StatusNotFound, - }) -) diff --git a/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go b/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go deleted file mode 100644 index 9bc41a3a64..0000000000 --- a/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go +++ /dev/null @@ -1,161 +0,0 @@ -package v2 - -import ( - "fmt" - "regexp" - "strings" - "unicode" -) - -var ( - // according to rfc7230 - reToken = regexp.MustCompile(`^[^"(),/:;<=>?@[\]{}[:space:][:cntrl:]]+`) - reQuotedValue = regexp.MustCompile(`^[^\\"]+`) - reEscapedCharacter = regexp.MustCompile(`^[[:blank:][:graph:]]`) -) - -// parseForwardedHeader is a benevolent parser of Forwarded header defined in rfc7239. The header contains -// a comma-separated list of forwarding key-value pairs. Each list element is set by single proxy. The -// function parses only the first element of the list, which is set by the very first proxy. It returns a map -// of corresponding key-value pairs and an unparsed slice of the input string. -// -// Examples of Forwarded header values: -// -// 1. Forwarded: For=192.0.2.43; Proto=https,For="[2001:db8:cafe::17]",For=unknown -// 2. Forwarded: for="192.0.2.43:443"; host="registry.example.org", for="10.10.05.40:80" -// -// The first will be parsed into {"for": "192.0.2.43", "proto": "https"} while the second into -// {"for": "192.0.2.43:443", "host": "registry.example.org"}. -func parseForwardedHeader(forwarded string) (map[string]string, string, error) { - // Following are states of forwarded header parser. Any state could transition to a failure. - const ( - // terminating state; can transition to Parameter - stateElement = iota - // terminating state; can transition to KeyValueDelimiter - stateParameter - // can transition to Value - stateKeyValueDelimiter - // can transition to one of { QuotedValue, PairEnd } - stateValue - // can transition to one of { EscapedCharacter, PairEnd } - stateQuotedValue - // can transition to one of { QuotedValue } - stateEscapedCharacter - // terminating state; can transition to one of { Parameter, Element } - statePairEnd - ) - - var ( - parameter string - value string - parse = forwarded[:] - res = map[string]string{} - state = stateElement - ) - -Loop: - for { - // skip spaces unless in quoted value - if state != stateQuotedValue && state != stateEscapedCharacter { - parse = strings.TrimLeftFunc(parse, unicode.IsSpace) - } - - if len(parse) == 0 { - if state != stateElement && state != statePairEnd && state != stateParameter { - return nil, parse, fmt.Errorf("unexpected end of input") - } - // terminating - break - } - - switch state { - // terminate at list element delimiter - case stateElement: - if parse[0] == ',' { - parse = parse[1:] - break Loop - } - state = stateParameter - - // parse parameter (the key of key-value pair) - case stateParameter: - match := reToken.FindString(parse) - if len(match) == 0 { - return nil, parse, fmt.Errorf("failed to parse token at position %d", len(forwarded)-len(parse)) - } - parameter = strings.ToLower(match) - parse = parse[len(match):] - state = stateKeyValueDelimiter - - // parse '=' - case stateKeyValueDelimiter: - if parse[0] != '=' { - return nil, parse, fmt.Errorf("expected '=', not '%c' at position %d", parse[0], len(forwarded)-len(parse)) - } - parse = parse[1:] - state = stateValue - - // parse value or quoted value - case stateValue: - if parse[0] == '"' { - parse = parse[1:] - state = stateQuotedValue - } else { - value = reToken.FindString(parse) - if len(value) == 0 { - return nil, parse, fmt.Errorf("failed to parse value at position %d", len(forwarded)-len(parse)) - } - if _, exists := res[parameter]; exists { - return nil, parse, fmt.Errorf("duplicate parameter %q at position %d", parameter, len(forwarded)-len(parse)) - } - res[parameter] = value - parse = parse[len(value):] - value = "" - state = statePairEnd - } - - // parse a part of quoted value until the first backslash - case stateQuotedValue: - match := reQuotedValue.FindString(parse) - value += match - parse = parse[len(match):] - switch { - case len(parse) == 0: - return nil, parse, fmt.Errorf("unterminated quoted string") - case parse[0] == '"': - res[parameter] = value - value = "" - parse = parse[1:] - state = statePairEnd - case parse[0] == '\\': - parse = parse[1:] - state = stateEscapedCharacter - } - - // parse escaped character in a quoted string, ignore the backslash - // transition back to QuotedValue state - case stateEscapedCharacter: - c := reEscapedCharacter.FindString(parse) - if len(c) == 0 { - return nil, parse, fmt.Errorf("invalid escape sequence at position %d", len(forwarded)-len(parse)-1) - } - value += c - parse = parse[1:] - state = stateQuotedValue - - // expect either a new key-value pair, new list or end of input - case statePairEnd: - switch parse[0] { - case ';': - parse = parse[1:] - state = stateParameter - case ',': - state = stateElement - default: - return nil, parse, fmt.Errorf("expected ',' or ';', not %c at position %d", parse[0], len(forwarded)-len(parse)) - } - } - } - - return res, parse, nil -} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/routes.go b/vendor/github.com/docker/distribution/registry/api/v2/routes.go deleted file mode 100644 index 5b80d5be76..0000000000 --- a/vendor/github.com/docker/distribution/registry/api/v2/routes.go +++ /dev/null @@ -1,49 +0,0 @@ -package v2 - -import "github.com/gorilla/mux" - -// The following are definitions of the name under which all V2 routes are -// registered. These symbols can be used to look up a route based on the name. -const ( - RouteNameBase = "base" - RouteNameManifest = "manifest" - RouteNameTags = "tags" - RouteNameBlob = "blob" - RouteNameBlobUpload = "blob-upload" - RouteNameBlobUploadChunk = "blob-upload-chunk" - RouteNameCatalog = "catalog" -) - -var allEndpoints = []string{ - RouteNameManifest, - RouteNameCatalog, - RouteNameTags, - RouteNameBlob, - RouteNameBlobUpload, - RouteNameBlobUploadChunk, -} - -// Router builds a gorilla router with named routes for the various API -// methods. This can be used directly by both server implementations and -// clients. -func Router() *mux.Router { - return RouterWithPrefix("") -} - -// RouterWithPrefix builds a gorilla router with a configured prefix -// on all routes. -func RouterWithPrefix(prefix string) *mux.Router { - rootRouter := mux.NewRouter() - router := rootRouter - if prefix != "" { - router = router.PathPrefix(prefix).Subrouter() - } - - router.StrictSlash(true) - - for _, descriptor := range routeDescriptors { - router.Path(descriptor.Path).Name(descriptor.Name) - } - - return rootRouter -} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/urls.go b/vendor/github.com/docker/distribution/registry/api/v2/urls.go deleted file mode 100644 index 1337bdb127..0000000000 --- a/vendor/github.com/docker/distribution/registry/api/v2/urls.go +++ /dev/null @@ -1,266 +0,0 @@ -package v2 - -import ( - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/docker/distribution/reference" - "github.com/gorilla/mux" -) - -// URLBuilder creates registry API urls from a single base endpoint. It can be -// used to create urls for use in a registry client or server. -// -// All urls will be created from the given base, including the api version. -// For example, if a root of "/foo/" is provided, urls generated will be fall -// under "/foo/v2/...". Most application will only provide a schema, host and -// port, such as "https://localhost:5000/". -type URLBuilder struct { - root *url.URL // url root (ie http://localhost/) - router *mux.Router - relative bool -} - -// NewURLBuilder creates a URLBuilder with provided root url object. -func NewURLBuilder(root *url.URL, relative bool) *URLBuilder { - return &URLBuilder{ - root: root, - router: Router(), - relative: relative, - } -} - -// NewURLBuilderFromString workes identically to NewURLBuilder except it takes -// a string argument for the root, returning an error if it is not a valid -// url. -func NewURLBuilderFromString(root string, relative bool) (*URLBuilder, error) { - u, err := url.Parse(root) - if err != nil { - return nil, err - } - - return NewURLBuilder(u, relative), nil -} - -// NewURLBuilderFromRequest uses information from an *http.Request to -// construct the root url. -func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder { - var ( - scheme = "http" - host = r.Host - ) - - if r.TLS != nil { - scheme = "https" - } else if len(r.URL.Scheme) > 0 { - scheme = r.URL.Scheme - } - - // Handle fowarded headers - // Prefer "Forwarded" header as defined by rfc7239 if given - // see https://tools.ietf.org/html/rfc7239 - if forwarded := r.Header.Get("Forwarded"); len(forwarded) > 0 { - forwardedHeader, _, err := parseForwardedHeader(forwarded) - if err == nil { - if fproto := forwardedHeader["proto"]; len(fproto) > 0 { - scheme = fproto - } - if fhost := forwardedHeader["host"]; len(fhost) > 0 { - host = fhost - } - } - } else { - if forwardedProto := r.Header.Get("X-Forwarded-Proto"); len(forwardedProto) > 0 { - scheme = forwardedProto - } - if forwardedHost := r.Header.Get("X-Forwarded-Host"); len(forwardedHost) > 0 { - // According to the Apache mod_proxy docs, X-Forwarded-Host can be a - // comma-separated list of hosts, to which each proxy appends the - // requested host. We want to grab the first from this comma-separated - // list. - hosts := strings.SplitN(forwardedHost, ",", 2) - host = strings.TrimSpace(hosts[0]) - } - } - - basePath := routeDescriptorsMap[RouteNameBase].Path - - requestPath := r.URL.Path - index := strings.Index(requestPath, basePath) - - u := &url.URL{ - Scheme: scheme, - Host: host, - } - - if index > 0 { - // N.B. index+1 is important because we want to include the trailing / - u.Path = requestPath[0 : index+1] - } - - return NewURLBuilder(u, relative) -} - -// BuildBaseURL constructs a base url for the API, typically just "/v2/". -func (ub *URLBuilder) BuildBaseURL() (string, error) { - route := ub.cloneRoute(RouteNameBase) - - baseURL, err := route.URL() - if err != nil { - return "", err - } - - return baseURL.String(), nil -} - -// BuildCatalogURL constructs a url get a catalog of repositories -func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) { - route := ub.cloneRoute(RouteNameCatalog) - - catalogURL, err := route.URL() - if err != nil { - return "", err - } - - return appendValuesURL(catalogURL, values...).String(), nil -} - -// BuildTagsURL constructs a url to list the tags in the named repository. -func (ub *URLBuilder) BuildTagsURL(name reference.Named) (string, error) { - route := ub.cloneRoute(RouteNameTags) - - tagsURL, err := route.URL("name", name.Name()) - if err != nil { - return "", err - } - - return tagsURL.String(), nil -} - -// BuildManifestURL constructs a url for the manifest identified by name and -// reference. The argument reference may be either a tag or digest. -func (ub *URLBuilder) BuildManifestURL(ref reference.Named) (string, error) { - route := ub.cloneRoute(RouteNameManifest) - - tagOrDigest := "" - switch v := ref.(type) { - case reference.Tagged: - tagOrDigest = v.Tag() - case reference.Digested: - tagOrDigest = v.Digest().String() - default: - return "", fmt.Errorf("reference must have a tag or digest") - } - - manifestURL, err := route.URL("name", ref.Name(), "reference", tagOrDigest) - if err != nil { - return "", err - } - - return manifestURL.String(), nil -} - -// BuildBlobURL constructs the url for the blob identified by name and dgst. -func (ub *URLBuilder) BuildBlobURL(ref reference.Canonical) (string, error) { - route := ub.cloneRoute(RouteNameBlob) - - layerURL, err := route.URL("name", ref.Name(), "digest", ref.Digest().String()) - if err != nil { - return "", err - } - - return layerURL.String(), nil -} - -// BuildBlobUploadURL constructs a url to begin a blob upload in the -// repository identified by name. -func (ub *URLBuilder) BuildBlobUploadURL(name reference.Named, values ...url.Values) (string, error) { - route := ub.cloneRoute(RouteNameBlobUpload) - - uploadURL, err := route.URL("name", name.Name()) - if err != nil { - return "", err - } - - return appendValuesURL(uploadURL, values...).String(), nil -} - -// BuildBlobUploadChunkURL constructs a url for the upload identified by uuid, -// including any url values. This should generally not be used by clients, as -// this url is provided by server implementations during the blob upload -// process. -func (ub *URLBuilder) BuildBlobUploadChunkURL(name reference.Named, uuid string, values ...url.Values) (string, error) { - route := ub.cloneRoute(RouteNameBlobUploadChunk) - - uploadURL, err := route.URL("name", name.Name(), "uuid", uuid) - if err != nil { - return "", err - } - - return appendValuesURL(uploadURL, values...).String(), nil -} - -// clondedRoute returns a clone of the named route from the router. Routes -// must be cloned to avoid modifying them during url generation. -func (ub *URLBuilder) cloneRoute(name string) clonedRoute { - route := new(mux.Route) - root := new(url.URL) - - *route = *ub.router.GetRoute(name) // clone the route - *root = *ub.root - - return clonedRoute{Route: route, root: root, relative: ub.relative} -} - -type clonedRoute struct { - *mux.Route - root *url.URL - relative bool -} - -func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) { - routeURL, err := cr.Route.URL(pairs...) - if err != nil { - return nil, err - } - - if cr.relative { - return routeURL, nil - } - - if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" { - routeURL.Path = routeURL.Path[1:] - } - - url := cr.root.ResolveReference(routeURL) - url.Scheme = cr.root.Scheme - return url, nil -} - -// appendValuesURL appends the parameters to the url. -func appendValuesURL(u *url.URL, values ...url.Values) *url.URL { - merged := u.Query() - - for _, v := range values { - for k, vv := range v { - merged[k] = append(merged[k], vv...) - } - } - - u.RawQuery = merged.Encode() - return u -} - -// appendValues appends the parameters to the url. Panics if the string is not -// a url. -func appendValues(u string, values ...url.Values) string { - up, err := url.Parse(u) - - if err != nil { - panic(err) // should never happen - } - - return appendValuesURL(up, values...).String() -} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go deleted file mode 100644 index 2c3ebe1653..0000000000 --- a/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go +++ /dev/null @@ -1,27 +0,0 @@ -package challenge - -import ( - "net/url" - "strings" -) - -// FROM: https://golang.org/src/net/http/http.go -// Given a string of the form "host", "host:port", or "[ipv6::address]:port", -// return true if the string includes a port. -func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } - -// FROM: http://golang.org/src/net/http/transport.go -var portMap = map[string]string{ - "http": "80", - "https": "443", -} - -// canonicalAddr returns url.Host but always with a ":port" suffix -// FROM: http://golang.org/src/net/http/transport.go -func canonicalAddr(url *url.URL) string { - addr := url.Host - if !hasPort(addr) { - return addr + ":" + portMap[url.Scheme] - } - return addr -} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go deleted file mode 100644 index 6e3f1ccc41..0000000000 --- a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go +++ /dev/null @@ -1,237 +0,0 @@ -package challenge - -import ( - "fmt" - "net/http" - "net/url" - "strings" - "sync" -) - -// Challenge carries information from a WWW-Authenticate response header. -// See RFC 2617. -type Challenge struct { - // Scheme is the auth-scheme according to RFC 2617 - Scheme string - - // Parameters are the auth-params according to RFC 2617 - Parameters map[string]string -} - -// Manager manages the challenges for endpoints. -// The challenges are pulled out of HTTP responses. Only -// responses which expect challenges should be added to -// the manager, since a non-unauthorized request will be -// viewed as not requiring challenges. -type Manager interface { - // GetChallenges returns the challenges for the given - // endpoint URL. - GetChallenges(endpoint url.URL) ([]Challenge, error) - - // AddResponse adds the response to the challenge - // manager. The challenges will be parsed out of - // the WWW-Authenicate headers and added to the - // URL which was produced the response. If the - // response was authorized, any challenges for the - // endpoint will be cleared. - AddResponse(resp *http.Response) error -} - -// NewSimpleManager returns an instance of -// Manger which only maps endpoints to challenges -// based on the responses which have been added the -// manager. The simple manager will make no attempt to -// perform requests on the endpoints or cache the responses -// to a backend. -func NewSimpleManager() Manager { - return &simpleManager{ - Challenges: make(map[string][]Challenge), - } -} - -type simpleManager struct { - sync.RWMutex - Challenges map[string][]Challenge -} - -func normalizeURL(endpoint *url.URL) { - endpoint.Host = strings.ToLower(endpoint.Host) - endpoint.Host = canonicalAddr(endpoint) -} - -func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) { - normalizeURL(&endpoint) - - m.RLock() - defer m.RUnlock() - challenges := m.Challenges[endpoint.String()] - return challenges, nil -} - -func (m *simpleManager) AddResponse(resp *http.Response) error { - challenges := ResponseChallenges(resp) - if resp.Request == nil { - return fmt.Errorf("missing request reference") - } - urlCopy := url.URL{ - Path: resp.Request.URL.Path, - Host: resp.Request.URL.Host, - Scheme: resp.Request.URL.Scheme, - } - normalizeURL(&urlCopy) - - m.Lock() - defer m.Unlock() - m.Challenges[urlCopy.String()] = challenges - return nil -} - -// Octet types from RFC 2616. -type octetType byte - -var octetTypes [256]octetType - -const ( - isToken octetType = 1 << iota - isSpace -) - -func init() { - // OCTET = - // CHAR = - // CTL = - // CR = - // LF = - // SP = - // HT = - // <"> = - // CRLF = CR LF - // LWS = [CRLF] 1*( SP | HT ) - // TEXT = - // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> - // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT - // token = 1* - // qdtext = > - - for c := 0; c < 256; c++ { - var t octetType - isCtl := c <= 31 || c == 127 - isChar := 0 <= c && c <= 127 - isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 - if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { - t |= isSpace - } - if isChar && !isCtl && !isSeparator { - t |= isToken - } - octetTypes[c] = t - } -} - -// ResponseChallenges returns a list of authorization challenges -// for the given http Response. Challenges are only checked if -// the response status code was a 401. -func ResponseChallenges(resp *http.Response) []Challenge { - if resp.StatusCode == http.StatusUnauthorized { - // Parse the WWW-Authenticate Header and store the challenges - // on this endpoint object. - return parseAuthHeader(resp.Header) - } - - return nil -} - -func parseAuthHeader(header http.Header) []Challenge { - challenges := []Challenge{} - for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { - v, p := parseValueAndParams(h) - if v != "" { - challenges = append(challenges, Challenge{Scheme: v, Parameters: p}) - } - } - return challenges -} - -func parseValueAndParams(header string) (value string, params map[string]string) { - params = make(map[string]string) - value, s := expectToken(header) - if value == "" { - return - } - value = strings.ToLower(value) - s = "," + skipSpace(s) - for strings.HasPrefix(s, ",") { - var pkey string - pkey, s = expectToken(skipSpace(s[1:])) - if pkey == "" { - return - } - if !strings.HasPrefix(s, "=") { - return - } - var pvalue string - pvalue, s = expectTokenOrQuoted(s[1:]) - if pvalue == "" { - return - } - pkey = strings.ToLower(pkey) - params[pkey] = pvalue - s = skipSpace(s) - } - return -} - -func skipSpace(s string) (rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isSpace == 0 { - break - } - } - return s[i:] -} - -func expectToken(s string) (token, rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isToken == 0 { - break - } - } - return s[:i], s[i:] -} - -func expectTokenOrQuoted(s string) (value string, rest string) { - if !strings.HasPrefix(s, "\"") { - return expectToken(s) - } - s = s[1:] - for i := 0; i < len(s); i++ { - switch s[i] { - case '"': - return s[:i], s[i+1:] - case '\\': - p := make([]byte, len(s)-1) - j := copy(p, s[:i]) - escape := true - for i = i + 1; i < len(s); i++ { - b := s[i] - switch { - case escape: - escape = false - p[j] = b - j++ - case b == '\\': - escape = true - case b == '"': - return string(p[:j]), s[i+1:] - default: - p[j] = b - j++ - } - } - return "", "" - } - } - return "", "" -} diff --git a/vendor/github.com/docker/distribution/registry/client/blob_writer.go b/vendor/github.com/docker/distribution/registry/client/blob_writer.go deleted file mode 100644 index 695bf852f1..0000000000 --- a/vendor/github.com/docker/distribution/registry/client/blob_writer.go +++ /dev/null @@ -1,162 +0,0 @@ -package client - -import ( - "bytes" - "context" - "fmt" - "io" - "io/ioutil" - "net/http" - "time" - - "github.com/docker/distribution" -) - -type httpBlobUpload struct { - statter distribution.BlobStatter - client *http.Client - - uuid string - startedAt time.Time - - location string // always the last value of the location header. - offset int64 - closed bool -} - -func (hbu *httpBlobUpload) Reader() (io.ReadCloser, error) { - panic("Not implemented") -} - -func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error { - if resp.StatusCode == http.StatusNotFound { - return distribution.ErrBlobUploadUnknown - } - return HandleErrorResponse(resp) -} - -func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) { - req, err := http.NewRequest("PATCH", hbu.location, ioutil.NopCloser(r)) - if err != nil { - return 0, err - } - defer req.Body.Close() - - resp, err := hbu.client.Do(req) - if err != nil { - return 0, err - } - - if !SuccessStatus(resp.StatusCode) { - return 0, hbu.handleErrorResponse(resp) - } - - hbu.uuid = resp.Header.Get("Docker-Upload-UUID") - hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) - if err != nil { - return 0, err - } - rng := resp.Header.Get("Range") - var start, end int64 - if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { - return 0, err - } else if n != 2 || end < start { - return 0, fmt.Errorf("bad range format: %s", rng) - } - - return (end - start + 1), nil - -} - -func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) { - req, err := http.NewRequest("PATCH", hbu.location, bytes.NewReader(p)) - if err != nil { - return 0, err - } - req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hbu.offset, hbu.offset+int64(len(p)-1))) - req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p))) - req.Header.Set("Content-Type", "application/octet-stream") - - resp, err := hbu.client.Do(req) - if err != nil { - return 0, err - } - - if !SuccessStatus(resp.StatusCode) { - return 0, hbu.handleErrorResponse(resp) - } - - hbu.uuid = resp.Header.Get("Docker-Upload-UUID") - hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) - if err != nil { - return 0, err - } - rng := resp.Header.Get("Range") - var start, end int - if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { - return 0, err - } else if n != 2 || end < start { - return 0, fmt.Errorf("bad range format: %s", rng) - } - - return (end - start + 1), nil - -} - -func (hbu *httpBlobUpload) Size() int64 { - return hbu.offset -} - -func (hbu *httpBlobUpload) ID() string { - return hbu.uuid -} - -func (hbu *httpBlobUpload) StartedAt() time.Time { - return hbu.startedAt -} - -func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { - // TODO(dmcgowan): Check if already finished, if so just fetch - req, err := http.NewRequest("PUT", hbu.location, nil) - if err != nil { - return distribution.Descriptor{}, err - } - - values := req.URL.Query() - values.Set("digest", desc.Digest.String()) - req.URL.RawQuery = values.Encode() - - resp, err := hbu.client.Do(req) - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - if !SuccessStatus(resp.StatusCode) { - return distribution.Descriptor{}, hbu.handleErrorResponse(resp) - } - - return hbu.statter.Stat(ctx, desc.Digest) -} - -func (hbu *httpBlobUpload) Cancel(ctx context.Context) error { - req, err := http.NewRequest("DELETE", hbu.location, nil) - if err != nil { - return err - } - resp, err := hbu.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if resp.StatusCode == http.StatusNotFound || SuccessStatus(resp.StatusCode) { - return nil - } - return hbu.handleErrorResponse(resp) -} - -func (hbu *httpBlobUpload) Close() error { - hbu.closed = true - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/client/errors.go b/vendor/github.com/docker/distribution/registry/client/errors.go deleted file mode 100644 index 52d49d5d29..0000000000 --- a/vendor/github.com/docker/distribution/registry/client/errors.go +++ /dev/null @@ -1,139 +0,0 @@ -package client - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/client/auth/challenge" -) - -// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty -// errcode.Errors slice. -var ErrNoErrorsInBody = errors.New("no error details found in HTTP response body") - -// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is -// returned when making a registry api call. -type UnexpectedHTTPStatusError struct { - Status string -} - -func (e *UnexpectedHTTPStatusError) Error() string { - return fmt.Sprintf("received unexpected HTTP status: %s", e.Status) -} - -// UnexpectedHTTPResponseError is returned when an expected HTTP status code -// is returned, but the content was unexpected and failed to be parsed. -type UnexpectedHTTPResponseError struct { - ParseErr error - StatusCode int - Response []byte -} - -func (e *UnexpectedHTTPResponseError) Error() string { - return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response)) -} - -func parseHTTPErrorResponse(statusCode int, r io.Reader) error { - var errors errcode.Errors - body, err := ioutil.ReadAll(r) - if err != nil { - return err - } - - // For backward compatibility, handle irregularly formatted - // messages that contain a "details" field. - var detailsErr struct { - Details string `json:"details"` - } - err = json.Unmarshal(body, &detailsErr) - if err == nil && detailsErr.Details != "" { - switch statusCode { - case http.StatusUnauthorized: - return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details) - case http.StatusTooManyRequests: - return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details) - default: - return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details) - } - } - - if err := json.Unmarshal(body, &errors); err != nil { - return &UnexpectedHTTPResponseError{ - ParseErr: err, - StatusCode: statusCode, - Response: body, - } - } - - if len(errors) == 0 { - // If there was no error specified in the body, return - // UnexpectedHTTPResponseError. - return &UnexpectedHTTPResponseError{ - ParseErr: ErrNoErrorsInBody, - StatusCode: statusCode, - Response: body, - } - } - - return errors -} - -func makeErrorList(err error) []error { - if errL, ok := err.(errcode.Errors); ok { - return []error(errL) - } - return []error{err} -} - -func mergeErrors(err1, err2 error) error { - return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...)) -} - -// HandleErrorResponse returns error parsed from HTTP response for an -// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An -// UnexpectedHTTPStatusError returned for response code outside of expected -// range. -func HandleErrorResponse(resp *http.Response) error { - if resp.StatusCode >= 400 && resp.StatusCode < 500 { - // Check for OAuth errors within the `WWW-Authenticate` header first - // See https://tools.ietf.org/html/rfc6750#section-3 - for _, c := range challenge.ResponseChallenges(resp) { - if c.Scheme == "bearer" { - var err errcode.Error - // codes defined at https://tools.ietf.org/html/rfc6750#section-3.1 - switch c.Parameters["error"] { - case "invalid_token": - err.Code = errcode.ErrorCodeUnauthorized - case "insufficient_scope": - err.Code = errcode.ErrorCodeDenied - default: - continue - } - if description := c.Parameters["error_description"]; description != "" { - err.Message = description - } else { - err.Message = err.Code.Message() - } - - return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body)) - } - } - err := parseHTTPErrorResponse(resp.StatusCode, resp.Body) - if uErr, ok := err.(*UnexpectedHTTPResponseError); ok && resp.StatusCode == 401 { - return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) - } - return err - } - return &UnexpectedHTTPStatusError{Status: resp.Status} -} - -// SuccessStatus returns true if the argument is a successful HTTP response -// code (in the range 200 - 399 inclusive). -func SuccessStatus(status int) bool { - return status >= 200 && status <= 399 -} diff --git a/vendor/github.com/docker/distribution/registry/client/repository.go b/vendor/github.com/docker/distribution/registry/client/repository.go deleted file mode 100644 index d8e2c795d9..0000000000 --- a/vendor/github.com/docker/distribution/registry/client/repository.go +++ /dev/null @@ -1,869 +0,0 @@ -package client - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/distribution/registry/storage/cache" - "github.com/docker/distribution/registry/storage/cache/memory" - "github.com/opencontainers/go-digest" -) - -// Registry provides an interface for calling Repositories, which returns a catalog of repositories. -type Registry interface { - Repositories(ctx context.Context, repos []string, last string) (n int, err error) -} - -// checkHTTPRedirect is a callback that can manipulate redirected HTTP -// requests. It is used to preserve Accept and Range headers. -func checkHTTPRedirect(req *http.Request, via []*http.Request) error { - if len(via) >= 10 { - return errors.New("stopped after 10 redirects") - } - - if len(via) > 0 { - for headerName, headerVals := range via[0].Header { - if headerName != "Accept" && headerName != "Range" { - continue - } - for _, val := range headerVals { - // Don't add to redirected request if redirected - // request already has a header with the same - // name and value. - hasValue := false - for _, existingVal := range req.Header[headerName] { - if existingVal == val { - hasValue = true - break - } - } - if !hasValue { - req.Header.Add(headerName, val) - } - } - } - } - - return nil -} - -// NewRegistry creates a registry namespace which can be used to get a listing of repositories -func NewRegistry(baseURL string, transport http.RoundTripper) (Registry, error) { - ub, err := v2.NewURLBuilderFromString(baseURL, false) - if err != nil { - return nil, err - } - - client := &http.Client{ - Transport: transport, - Timeout: 1 * time.Minute, - CheckRedirect: checkHTTPRedirect, - } - - return ®istry{ - client: client, - ub: ub, - }, nil -} - -type registry struct { - client *http.Client - ub *v2.URLBuilder - context context.Context -} - -// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size -// of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there -// are no more entries -func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) { - var numFilled int - var returnErr error - - values := buildCatalogValues(len(entries), last) - u, err := r.ub.BuildCatalogURL(values) - if err != nil { - return 0, err - } - - resp, err := r.client.Get(u) - if err != nil { - return 0, err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - var ctlg struct { - Repositories []string `json:"repositories"` - } - decoder := json.NewDecoder(resp.Body) - - if err := decoder.Decode(&ctlg); err != nil { - return 0, err - } - - for cnt := range ctlg.Repositories { - entries[cnt] = ctlg.Repositories[cnt] - } - numFilled = len(ctlg.Repositories) - - link := resp.Header.Get("Link") - if link == "" { - returnErr = io.EOF - } - } else { - return 0, HandleErrorResponse(resp) - } - - return numFilled, returnErr -} - -// NewRepository creates a new Repository for the given repository name and base URL. -func NewRepository(name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { - ub, err := v2.NewURLBuilderFromString(baseURL, false) - if err != nil { - return nil, err - } - - client := &http.Client{ - Transport: transport, - CheckRedirect: checkHTTPRedirect, - // TODO(dmcgowan): create cookie jar - } - - return &repository{ - client: client, - ub: ub, - name: name, - }, nil -} - -type repository struct { - client *http.Client - ub *v2.URLBuilder - context context.Context - name reference.Named -} - -func (r *repository) Named() reference.Named { - return r.name -} - -func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { - statter := &blobStatter{ - name: r.name, - ub: r.ub, - client: r.client, - } - return &blobs{ - name: r.name, - ub: r.ub, - client: r.client, - statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter), - } -} - -func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { - // todo(richardscothern): options should be sent over the wire - return &manifests{ - name: r.name, - ub: r.ub, - client: r.client, - etags: make(map[string]string), - }, nil -} - -func (r *repository) Tags(ctx context.Context) distribution.TagService { - return &tags{ - client: r.client, - ub: r.ub, - name: r.Named(), - } -} - -// tags implements remote tagging operations. -type tags struct { - client *http.Client - ub *v2.URLBuilder - name reference.Named -} - -// All returns all tags -func (t *tags) All(ctx context.Context) ([]string, error) { - var tags []string - - listURLStr, err := t.ub.BuildTagsURL(t.name) - if err != nil { - return tags, err - } - - listURL, err := url.Parse(listURLStr) - if err != nil { - return tags, err - } - - for { - resp, err := t.client.Get(listURL.String()) - if err != nil { - return tags, err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - return tags, err - } - - tagsResponse := struct { - Tags []string `json:"tags"` - }{} - if err := json.Unmarshal(b, &tagsResponse); err != nil { - return tags, err - } - tags = append(tags, tagsResponse.Tags...) - if link := resp.Header.Get("Link"); link != "" { - linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>") - linkURL, err := url.Parse(linkURLStr) - if err != nil { - return tags, err - } - - listURL = listURL.ResolveReference(linkURL) - } else { - return tags, nil - } - } else { - return tags, HandleErrorResponse(resp) - } - } -} - -func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) { - desc := distribution.Descriptor{} - headers := response.Header - - ctHeader := headers.Get("Content-Type") - if ctHeader == "" { - return distribution.Descriptor{}, errors.New("missing or empty Content-Type header") - } - desc.MediaType = ctHeader - - digestHeader := headers.Get("Docker-Content-Digest") - if digestHeader == "" { - bytes, err := ioutil.ReadAll(response.Body) - if err != nil { - return distribution.Descriptor{}, err - } - _, desc, err := distribution.UnmarshalManifest(ctHeader, bytes) - if err != nil { - return distribution.Descriptor{}, err - } - return desc, nil - } - - dgst, err := digest.Parse(digestHeader) - if err != nil { - return distribution.Descriptor{}, err - } - desc.Digest = dgst - - lengthHeader := headers.Get("Content-Length") - if lengthHeader == "" { - return distribution.Descriptor{}, errors.New("missing or empty Content-Length header") - } - length, err := strconv.ParseInt(lengthHeader, 10, 64) - if err != nil { - return distribution.Descriptor{}, err - } - desc.Size = length - - return desc, nil - -} - -// Get issues a HEAD request for a Manifest against its named endpoint in order -// to construct a descriptor for the tag. If the registry doesn't support HEADing -// a manifest, fallback to GET. -func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { - ref, err := reference.WithTag(t.name, tag) - if err != nil { - return distribution.Descriptor{}, err - } - u, err := t.ub.BuildManifestURL(ref) - if err != nil { - return distribution.Descriptor{}, err - } - - newRequest := func(method string) (*http.Response, error) { - req, err := http.NewRequest(method, u, nil) - if err != nil { - return nil, err - } - - for _, t := range distribution.ManifestMediaTypes() { - req.Header.Add("Accept", t) - } - resp, err := t.client.Do(req) - return resp, err - } - - resp, err := newRequest("HEAD") - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - switch { - case resp.StatusCode >= 200 && resp.StatusCode < 400 && len(resp.Header.Get("Docker-Content-Digest")) > 0: - // if the response is a success AND a Docker-Content-Digest can be retrieved from the headers - return descriptorFromResponse(resp) - default: - // if the response is an error - there will be no body to decode. - // Issue a GET request: - // - for data from a server that does not handle HEAD - // - to get error details in case of a failure - resp, err = newRequest("GET") - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - if resp.StatusCode >= 200 && resp.StatusCode < 400 { - return descriptorFromResponse(resp) - } - return distribution.Descriptor{}, HandleErrorResponse(resp) - } -} - -func (t *tags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { - panic("not implemented") -} - -func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { - panic("not implemented") -} - -func (t *tags) Untag(ctx context.Context, tag string) error { - panic("not implemented") -} - -type manifests struct { - name reference.Named - ub *v2.URLBuilder - client *http.Client - etags map[string]string -} - -func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { - ref, err := reference.WithDigest(ms.name, dgst) - if err != nil { - return false, err - } - u, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return false, err - } - - resp, err := ms.client.Head(u) - if err != nil { - return false, err - } - - if SuccessStatus(resp.StatusCode) { - return true, nil - } else if resp.StatusCode == http.StatusNotFound { - return false, nil - } - return false, HandleErrorResponse(resp) -} - -// AddEtagToTag allows a client to supply an eTag to Get which will be -// used for a conditional HTTP request. If the eTag matches, a nil manifest -// and ErrManifestNotModified error will be returned. etag is automatically -// quoted when added to this map. -func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption { - return etagOption{tag, etag} -} - -type etagOption struct{ tag, etag string } - -func (o etagOption) Apply(ms distribution.ManifestService) error { - if ms, ok := ms.(*manifests); ok { - ms.etags[o.tag] = fmt.Sprintf(`"%s"`, o.etag) - return nil - } - return fmt.Errorf("etag options is a client-only option") -} - -// ReturnContentDigest allows a client to set a the content digest on -// a successful request from the 'Docker-Content-Digest' header. This -// returned digest is represents the digest which the registry uses -// to refer to the content and can be used to delete the content. -func ReturnContentDigest(dgst *digest.Digest) distribution.ManifestServiceOption { - return contentDigestOption{dgst} -} - -type contentDigestOption struct{ digest *digest.Digest } - -func (o contentDigestOption) Apply(ms distribution.ManifestService) error { - return nil -} - -func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { - var ( - digestOrTag string - ref reference.Named - err error - contentDgst *digest.Digest - mediaTypes []string - ) - - for _, option := range options { - switch opt := option.(type) { - case distribution.WithTagOption: - digestOrTag = opt.Tag - ref, err = reference.WithTag(ms.name, opt.Tag) - if err != nil { - return nil, err - } - case contentDigestOption: - contentDgst = opt.digest - case distribution.WithManifestMediaTypesOption: - mediaTypes = opt.MediaTypes - default: - err := option.Apply(ms) - if err != nil { - return nil, err - } - } - } - - if digestOrTag == "" { - digestOrTag = dgst.String() - ref, err = reference.WithDigest(ms.name, dgst) - if err != nil { - return nil, err - } - } - - if len(mediaTypes) == 0 { - mediaTypes = distribution.ManifestMediaTypes() - } - - u, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return nil, err - } - - req, err := http.NewRequest("GET", u, nil) - if err != nil { - return nil, err - } - - for _, t := range mediaTypes { - req.Header.Add("Accept", t) - } - - if _, ok := ms.etags[digestOrTag]; ok { - req.Header.Set("If-None-Match", ms.etags[digestOrTag]) - } - - resp, err := ms.client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - if resp.StatusCode == http.StatusNotModified { - return nil, distribution.ErrManifestNotModified - } else if SuccessStatus(resp.StatusCode) { - if contentDgst != nil { - dgst, err := digest.Parse(resp.Header.Get("Docker-Content-Digest")) - if err == nil { - *contentDgst = dgst - } - } - mt := resp.Header.Get("Content-Type") - body, err := ioutil.ReadAll(resp.Body) - - if err != nil { - return nil, err - } - m, _, err := distribution.UnmarshalManifest(mt, body) - if err != nil { - return nil, err - } - return m, nil - } - return nil, HandleErrorResponse(resp) -} - -// Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the -// tag name in order to build the correct upload URL. -func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { - ref := ms.name - var tagged bool - - for _, option := range options { - if opt, ok := option.(distribution.WithTagOption); ok { - var err error - ref, err = reference.WithTag(ref, opt.Tag) - if err != nil { - return "", err - } - tagged = true - } else { - err := option.Apply(ms) - if err != nil { - return "", err - } - } - } - mediaType, p, err := m.Payload() - if err != nil { - return "", err - } - - if !tagged { - // generate a canonical digest and Put by digest - _, d, err := distribution.UnmarshalManifest(mediaType, p) - if err != nil { - return "", err - } - ref, err = reference.WithDigest(ref, d.Digest) - if err != nil { - return "", err - } - } - - manifestURL, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return "", err - } - - putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(p)) - if err != nil { - return "", err - } - - putRequest.Header.Set("Content-Type", mediaType) - - resp, err := ms.client.Do(putRequest) - if err != nil { - return "", err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - dgstHeader := resp.Header.Get("Docker-Content-Digest") - dgst, err := digest.Parse(dgstHeader) - if err != nil { - return "", err - } - - return dgst, nil - } - - return "", HandleErrorResponse(resp) -} - -func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error { - ref, err := reference.WithDigest(ms.name, dgst) - if err != nil { - return err - } - u, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return err - } - req, err := http.NewRequest("DELETE", u, nil) - if err != nil { - return err - } - - resp, err := ms.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - return nil - } - return HandleErrorResponse(resp) -} - -// todo(richardscothern): Restore interface and implementation with merge of #1050 -/*func (ms *manifests) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { - panic("not supported") -}*/ - -type blobs struct { - name reference.Named - ub *v2.URLBuilder - client *http.Client - - statter distribution.BlobDescriptorService - distribution.BlobDeleter -} - -func sanitizeLocation(location, base string) (string, error) { - baseURL, err := url.Parse(base) - if err != nil { - return "", err - } - - locationURL, err := url.Parse(location) - if err != nil { - return "", err - } - - return baseURL.ResolveReference(locationURL).String(), nil -} - -func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - return bs.statter.Stat(ctx, dgst) - -} - -func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { - reader, err := bs.Open(ctx, dgst) - if err != nil { - return nil, err - } - defer reader.Close() - - return ioutil.ReadAll(reader) -} - -func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { - ref, err := reference.WithDigest(bs.name, dgst) - if err != nil { - return nil, err - } - blobURL, err := bs.ub.BuildBlobURL(ref) - if err != nil { - return nil, err - } - - return transport.NewHTTPReadSeeker(bs.client, blobURL, - func(resp *http.Response) error { - if resp.StatusCode == http.StatusNotFound { - return distribution.ErrBlobUnknown - } - return HandleErrorResponse(resp) - }), nil -} - -func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { - panic("not implemented") -} - -func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { - writer, err := bs.Create(ctx) - if err != nil { - return distribution.Descriptor{}, err - } - dgstr := digest.Canonical.Digester() - n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash())) - if err != nil { - return distribution.Descriptor{}, err - } - if n < int64(len(p)) { - return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p)) - } - - desc := distribution.Descriptor{ - MediaType: mediaType, - Size: int64(len(p)), - Digest: dgstr.Digest(), - } - - return writer.Commit(ctx, desc) -} - -type optionFunc func(interface{}) error - -func (f optionFunc) Apply(v interface{}) error { - return f(v) -} - -// WithMountFrom returns a BlobCreateOption which designates that the blob should be -// mounted from the given canonical reference. -func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { - return optionFunc(func(v interface{}) error { - opts, ok := v.(*distribution.CreateOptions) - if !ok { - return fmt.Errorf("unexpected options type: %T", v) - } - - opts.Mount.ShouldMount = true - opts.Mount.From = ref - - return nil - }) -} - -func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { - var opts distribution.CreateOptions - - for _, option := range options { - err := option.Apply(&opts) - if err != nil { - return nil, err - } - } - - var values []url.Values - - if opts.Mount.ShouldMount { - values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}}) - } - - u, err := bs.ub.BuildBlobUploadURL(bs.name, values...) - if err != nil { - return nil, err - } - - resp, err := bs.client.Post(u, "", nil) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - switch resp.StatusCode { - case http.StatusCreated: - desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest()) - if err != nil { - return nil, err - } - return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} - case http.StatusAccepted: - // TODO(dmcgowan): Check for invalid UUID - uuid := resp.Header.Get("Docker-Upload-UUID") - location, err := sanitizeLocation(resp.Header.Get("Location"), u) - if err != nil { - return nil, err - } - - return &httpBlobUpload{ - statter: bs.statter, - client: bs.client, - uuid: uuid, - startedAt: time.Now(), - location: location, - }, nil - default: - return nil, HandleErrorResponse(resp) - } -} - -func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { - panic("not implemented") -} - -func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error { - return bs.statter.Clear(ctx, dgst) -} - -type blobStatter struct { - name reference.Named - ub *v2.URLBuilder - client *http.Client -} - -func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - ref, err := reference.WithDigest(bs.name, dgst) - if err != nil { - return distribution.Descriptor{}, err - } - u, err := bs.ub.BuildBlobURL(ref) - if err != nil { - return distribution.Descriptor{}, err - } - - resp, err := bs.client.Head(u) - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - lengthHeader := resp.Header.Get("Content-Length") - if lengthHeader == "" { - return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u) - } - - length, err := strconv.ParseInt(lengthHeader, 10, 64) - if err != nil { - return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err) - } - - return distribution.Descriptor{ - MediaType: resp.Header.Get("Content-Type"), - Size: length, - Digest: dgst, - }, nil - } else if resp.StatusCode == http.StatusNotFound { - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - return distribution.Descriptor{}, HandleErrorResponse(resp) -} - -func buildCatalogValues(maxEntries int, last string) url.Values { - values := url.Values{} - - if maxEntries > 0 { - values.Add("n", strconv.Itoa(maxEntries)) - } - - if last != "" { - values.Add("last", last) - } - - return values -} - -func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { - ref, err := reference.WithDigest(bs.name, dgst) - if err != nil { - return err - } - blobURL, err := bs.ub.BuildBlobURL(ref) - if err != nil { - return err - } - - req, err := http.NewRequest("DELETE", blobURL, nil) - if err != nil { - return err - } - - resp, err := bs.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - return nil - } - return HandleErrorResponse(resp) -} - -func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go b/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go deleted file mode 100644 index e5ff09d756..0000000000 --- a/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go +++ /dev/null @@ -1,251 +0,0 @@ -package transport - -import ( - "errors" - "fmt" - "io" - "net/http" - "os" - "regexp" - "strconv" -) - -var ( - contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`) - - // ErrWrongCodeForByteRange is returned if the client sends a request - // with a Range header but the server returns a 2xx or 3xx code other - // than 206 Partial Content. - ErrWrongCodeForByteRange = errors.New("expected HTTP 206 from byte range request") -) - -// ReadSeekCloser combines io.ReadSeeker with io.Closer. -type ReadSeekCloser interface { - io.ReadSeeker - io.Closer -} - -// NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET -// request. When seeking and starting a read from a non-zero offset -// the a "Range" header will be added which sets the offset. -// TODO(dmcgowan): Move this into a separate utility package -func NewHTTPReadSeeker(client *http.Client, url string, errorHandler func(*http.Response) error) ReadSeekCloser { - return &httpReadSeeker{ - client: client, - url: url, - errorHandler: errorHandler, - } -} - -type httpReadSeeker struct { - client *http.Client - url string - - // errorHandler creates an error from an unsuccessful HTTP response. - // This allows the error to be created with the HTTP response body - // without leaking the body through a returned error. - errorHandler func(*http.Response) error - - size int64 - - // rc is the remote read closer. - rc io.ReadCloser - // readerOffset tracks the offset as of the last read. - readerOffset int64 - // seekOffset allows Seek to override the offset. Seek changes - // seekOffset instead of changing readOffset directly so that - // connection resets can be delayed and possibly avoided if the - // seek is undone (i.e. seeking to the end and then back to the - // beginning). - seekOffset int64 - err error -} - -func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { - if hrs.err != nil { - return 0, hrs.err - } - - // If we sought to a different position, we need to reset the - // connection. This logic is here instead of Seek so that if - // a seek is undone before the next read, the connection doesn't - // need to be closed and reopened. A common example of this is - // seeking to the end to determine the length, and then seeking - // back to the original position. - if hrs.readerOffset != hrs.seekOffset { - hrs.reset() - } - - hrs.readerOffset = hrs.seekOffset - - rd, err := hrs.reader() - if err != nil { - return 0, err - } - - n, err = rd.Read(p) - hrs.seekOffset += int64(n) - hrs.readerOffset += int64(n) - - return n, err -} - -func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { - if hrs.err != nil { - return 0, hrs.err - } - - lastReaderOffset := hrs.readerOffset - - if whence == os.SEEK_SET && hrs.rc == nil { - // If no request has been made yet, and we are seeking to an - // absolute position, set the read offset as well to avoid an - // unnecessary request. - hrs.readerOffset = offset - } - - _, err := hrs.reader() - if err != nil { - hrs.readerOffset = lastReaderOffset - return 0, err - } - - newOffset := hrs.seekOffset - - switch whence { - case os.SEEK_CUR: - newOffset += offset - case os.SEEK_END: - if hrs.size < 0 { - return 0, errors.New("content length not known") - } - newOffset = hrs.size + offset - case os.SEEK_SET: - newOffset = offset - } - - if newOffset < 0 { - err = errors.New("cannot seek to negative position") - } else { - hrs.seekOffset = newOffset - } - - return hrs.seekOffset, err -} - -func (hrs *httpReadSeeker) Close() error { - if hrs.err != nil { - return hrs.err - } - - // close and release reader chain - if hrs.rc != nil { - hrs.rc.Close() - } - - hrs.rc = nil - - hrs.err = errors.New("httpLayer: closed") - - return nil -} - -func (hrs *httpReadSeeker) reset() { - if hrs.err != nil { - return - } - if hrs.rc != nil { - hrs.rc.Close() - hrs.rc = nil - } -} - -func (hrs *httpReadSeeker) reader() (io.Reader, error) { - if hrs.err != nil { - return nil, hrs.err - } - - if hrs.rc != nil { - return hrs.rc, nil - } - - req, err := http.NewRequest("GET", hrs.url, nil) - if err != nil { - return nil, err - } - - if hrs.readerOffset > 0 { - // If we are at different offset, issue a range request from there. - req.Header.Add("Range", fmt.Sprintf("bytes=%d-", hrs.readerOffset)) - // TODO: get context in here - // context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range")) - } - - req.Header.Add("Accept-Encoding", "identity") - resp, err := hrs.client.Do(req) - if err != nil { - return nil, err - } - - // Normally would use client.SuccessStatus, but that would be a cyclic - // import - if resp.StatusCode >= 200 && resp.StatusCode <= 399 { - if hrs.readerOffset > 0 { - if resp.StatusCode != http.StatusPartialContent { - return nil, ErrWrongCodeForByteRange - } - - contentRange := resp.Header.Get("Content-Range") - if contentRange == "" { - return nil, errors.New("no Content-Range header found in HTTP 206 response") - } - - submatches := contentRangeRegexp.FindStringSubmatch(contentRange) - if len(submatches) < 4 { - return nil, fmt.Errorf("could not parse Content-Range header: %s", contentRange) - } - - startByte, err := strconv.ParseUint(submatches[1], 10, 64) - if err != nil { - return nil, fmt.Errorf("could not parse start of range in Content-Range header: %s", contentRange) - } - - if startByte != uint64(hrs.readerOffset) { - return nil, fmt.Errorf("received Content-Range starting at offset %d instead of requested %d", startByte, hrs.readerOffset) - } - - endByte, err := strconv.ParseUint(submatches[2], 10, 64) - if err != nil { - return nil, fmt.Errorf("could not parse end of range in Content-Range header: %s", contentRange) - } - - if submatches[3] == "*" { - hrs.size = -1 - } else { - size, err := strconv.ParseUint(submatches[3], 10, 64) - if err != nil { - return nil, fmt.Errorf("could not parse total size in Content-Range header: %s", contentRange) - } - - if endByte+1 != size { - return nil, fmt.Errorf("range in Content-Range stops before the end of the content: %s", contentRange) - } - - hrs.size = int64(size) - } - } else if resp.StatusCode == http.StatusOK { - hrs.size = resp.ContentLength - } else { - hrs.size = -1 - } - hrs.rc = resp.Body - } else { - defer resp.Body.Close() - if hrs.errorHandler != nil { - return nil, hrs.errorHandler(resp) - } - return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) - } - - return hrs.rc, nil -} diff --git a/vendor/github.com/docker/distribution/registry/client/transport/transport.go b/vendor/github.com/docker/distribution/registry/client/transport/transport.go deleted file mode 100644 index 30e45fab0f..0000000000 --- a/vendor/github.com/docker/distribution/registry/client/transport/transport.go +++ /dev/null @@ -1,147 +0,0 @@ -package transport - -import ( - "io" - "net/http" - "sync" -) - -// RequestModifier represents an object which will do an inplace -// modification of an HTTP request. -type RequestModifier interface { - ModifyRequest(*http.Request) error -} - -type headerModifier http.Header - -// NewHeaderRequestModifier returns a new RequestModifier which will -// add the given headers to a request. -func NewHeaderRequestModifier(header http.Header) RequestModifier { - return headerModifier(header) -} - -func (h headerModifier) ModifyRequest(req *http.Request) error { - for k, s := range http.Header(h) { - req.Header[k] = append(req.Header[k], s...) - } - - return nil -} - -// NewTransport creates a new transport which will apply modifiers to -// the request on a RoundTrip call. -func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper { - return &transport{ - Modifiers: modifiers, - Base: base, - } -} - -// transport is an http.RoundTripper that makes HTTP requests after -// copying and modifying the request -type transport struct { - Modifiers []RequestModifier - Base http.RoundTripper - - mu sync.Mutex // guards modReq - modReq map[*http.Request]*http.Request // original -> modified -} - -// RoundTrip authorizes and authenticates the request with an -// access token. If no token exists or token is expired, -// tries to refresh/fetch a new token. -func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { - req2 := cloneRequest(req) - for _, modifier := range t.Modifiers { - if err := modifier.ModifyRequest(req2); err != nil { - return nil, err - } - } - - t.setModReq(req, req2) - res, err := t.base().RoundTrip(req2) - if err != nil { - t.setModReq(req, nil) - return nil, err - } - res.Body = &onEOFReader{ - rc: res.Body, - fn: func() { t.setModReq(req, nil) }, - } - return res, nil -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (t *transport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := t.base().(canceler); ok { - t.mu.Lock() - modReq := t.modReq[req] - delete(t.modReq, req) - t.mu.Unlock() - cr.CancelRequest(modReq) - } -} - -func (t *transport) base() http.RoundTripper { - if t.Base != nil { - return t.Base - } - return http.DefaultTransport -} - -func (t *transport) setModReq(orig, mod *http.Request) { - t.mu.Lock() - defer t.mu.Unlock() - if t.modReq == nil { - t.modReq = make(map[*http.Request]*http.Request) - } - if mod == nil { - delete(t.modReq, orig) - } else { - t.modReq[orig] = mod - } -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - - return r2 -} - -type onEOFReader struct { - rc io.ReadCloser - fn func() -} - -func (r *onEOFReader) Read(p []byte) (n int, err error) { - n, err = r.rc.Read(p) - if err == io.EOF { - r.runFunc() - } - return -} - -func (r *onEOFReader) Close() error { - err := r.rc.Close() - r.runFunc() - return err -} - -func (r *onEOFReader) runFunc() { - if fn := r.fn; fn != nil { - fn() - r.fn = nil - } -} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/cache.go b/vendor/github.com/docker/distribution/registry/storage/cache/cache.go deleted file mode 100644 index 10a3909197..0000000000 --- a/vendor/github.com/docker/distribution/registry/storage/cache/cache.go +++ /dev/null @@ -1,35 +0,0 @@ -// Package cache provides facilities to speed up access to the storage -// backend. -package cache - -import ( - "fmt" - - "github.com/docker/distribution" -) - -// BlobDescriptorCacheProvider provides repository scoped -// BlobDescriptorService cache instances and a global descriptor cache. -type BlobDescriptorCacheProvider interface { - distribution.BlobDescriptorService - - RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) -} - -// ValidateDescriptor provides a helper function to ensure that caches have -// common criteria for admitting descriptors. -func ValidateDescriptor(desc distribution.Descriptor) error { - if err := desc.Digest.Validate(); err != nil { - return err - } - - if desc.Size < 0 { - return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Size) - } - - if desc.MediaType == "" { - return fmt.Errorf("cache: empty mediatype on descriptor: %v", desc) - } - - return nil -} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go b/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go deleted file mode 100644 index ac4c452117..0000000000 --- a/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go +++ /dev/null @@ -1,129 +0,0 @@ -package cache - -import ( - "context" - - "github.com/docker/distribution" - prometheus "github.com/docker/distribution/metrics" - "github.com/opencontainers/go-digest" -) - -// Metrics is used to hold metric counters -// related to the number of times a cache was -// hit or missed. -type Metrics struct { - Requests uint64 - Hits uint64 - Misses uint64 -} - -// Logger can be provided on the MetricsTracker to log errors. -// -// Usually, this is just a proxy to dcontext.GetLogger. -type Logger interface { - Errorf(format string, args ...interface{}) -} - -// MetricsTracker represents a metric tracker -// which simply counts the number of hits and misses. -type MetricsTracker interface { - Hit() - Miss() - Metrics() Metrics - Logger(context.Context) Logger -} - -type cachedBlobStatter struct { - cache distribution.BlobDescriptorService - backend distribution.BlobDescriptorService - tracker MetricsTracker -} - -var ( - // cacheCount is the number of total cache request received/hits/misses - cacheCount = prometheus.StorageNamespace.NewLabeledCounter("cache", "The number of cache request received", "type") -) - -// NewCachedBlobStatter creates a new statter which prefers a cache and -// falls back to a backend. -func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService { - return &cachedBlobStatter{ - cache: cache, - backend: backend, - } -} - -// NewCachedBlobStatterWithMetrics creates a new statter which prefers a cache and -// falls back to a backend. Hits and misses will send to the tracker. -func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService, tracker MetricsTracker) distribution.BlobStatter { - return &cachedBlobStatter{ - cache: cache, - backend: backend, - tracker: tracker, - } -} - -func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - cacheCount.WithValues("Request").Inc(1) - desc, err := cbds.cache.Stat(ctx, dgst) - if err != nil { - if err != distribution.ErrBlobUnknown { - logErrorf(ctx, cbds.tracker, "error retrieving descriptor from cache: %v", err) - } - - goto fallback - } - cacheCount.WithValues("Hit").Inc(1) - if cbds.tracker != nil { - cbds.tracker.Hit() - } - return desc, nil -fallback: - cacheCount.WithValues("Miss").Inc(1) - if cbds.tracker != nil { - cbds.tracker.Miss() - } - desc, err = cbds.backend.Stat(ctx, dgst) - if err != nil { - return desc, err - } - - if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { - logErrorf(ctx, cbds.tracker, "error adding descriptor %v to cache: %v", desc.Digest, err) - } - - return desc, err - -} - -func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error { - err := cbds.cache.Clear(ctx, dgst) - if err != nil { - return err - } - - err = cbds.backend.Clear(ctx, dgst) - if err != nil { - return err - } - return nil -} - -func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { - logErrorf(ctx, cbds.tracker, "error adding descriptor %v to cache: %v", desc.Digest, err) - } - return nil -} - -func logErrorf(ctx context.Context, tracker MetricsTracker, format string, args ...interface{}) { - if tracker == nil { - return - } - - logger := tracker.Logger(ctx) - if logger == nil { - return - } - logger.Errorf(format, args...) -} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go deleted file mode 100644 index 42d94d9bde..0000000000 --- a/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go +++ /dev/null @@ -1,179 +0,0 @@ -package memory - -import ( - "context" - "sync" - - "github.com/docker/distribution" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/storage/cache" - "github.com/opencontainers/go-digest" -) - -type inMemoryBlobDescriptorCacheProvider struct { - global *mapBlobDescriptorCache - repositories map[string]*mapBlobDescriptorCache - mu sync.RWMutex -} - -// NewInMemoryBlobDescriptorCacheProvider returns a new mapped-based cache for -// storing blob descriptor data. -func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider { - return &inMemoryBlobDescriptorCacheProvider{ - global: newMapBlobDescriptorCache(), - repositories: make(map[string]*mapBlobDescriptorCache), - } -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { - if _, err := reference.ParseNormalizedNamed(repo); err != nil { - return nil, err - } - - imbdcp.mu.RLock() - defer imbdcp.mu.RUnlock() - - return &repositoryScopedInMemoryBlobDescriptorCache{ - repo: repo, - parent: imbdcp, - repository: imbdcp.repositories[repo], - }, nil -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - return imbdcp.global.Stat(ctx, dgst) -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) Clear(ctx context.Context, dgst digest.Digest) error { - return imbdcp.global.Clear(ctx, dgst) -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - _, err := imbdcp.Stat(ctx, dgst) - if err == distribution.ErrBlobUnknown { - - if dgst.Algorithm() != desc.Digest.Algorithm() && dgst != desc.Digest { - // if the digests differ, set the other canonical mapping - if err := imbdcp.global.SetDescriptor(ctx, desc.Digest, desc); err != nil { - return err - } - } - - // unknown, just set it - return imbdcp.global.SetDescriptor(ctx, dgst, desc) - } - - // we already know it, do nothing - return err -} - -// repositoryScopedInMemoryBlobDescriptorCache provides the request scoped -// repository cache. Instances are not thread-safe but the delegated -// operations are. -type repositoryScopedInMemoryBlobDescriptorCache struct { - repo string - parent *inMemoryBlobDescriptorCacheProvider // allows lazy allocation of repo's map - repository *mapBlobDescriptorCache -} - -func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - rsimbdcp.parent.mu.Lock() - repo := rsimbdcp.repository - rsimbdcp.parent.mu.Unlock() - - if repo == nil { - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - - return repo.Stat(ctx, dgst) -} - -func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { - rsimbdcp.parent.mu.Lock() - repo := rsimbdcp.repository - rsimbdcp.parent.mu.Unlock() - - if repo == nil { - return distribution.ErrBlobUnknown - } - - return repo.Clear(ctx, dgst) -} - -func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - rsimbdcp.parent.mu.Lock() - repo := rsimbdcp.repository - if repo == nil { - // allocate map since we are setting it now. - var ok bool - // have to read back value since we may have allocated elsewhere. - repo, ok = rsimbdcp.parent.repositories[rsimbdcp.repo] - if !ok { - repo = newMapBlobDescriptorCache() - rsimbdcp.parent.repositories[rsimbdcp.repo] = repo - } - rsimbdcp.repository = repo - } - rsimbdcp.parent.mu.Unlock() - - if err := repo.SetDescriptor(ctx, dgst, desc); err != nil { - return err - } - - return rsimbdcp.parent.SetDescriptor(ctx, dgst, desc) -} - -// mapBlobDescriptorCache provides a simple map-based implementation of the -// descriptor cache. -type mapBlobDescriptorCache struct { - descriptors map[digest.Digest]distribution.Descriptor - mu sync.RWMutex -} - -var _ distribution.BlobDescriptorService = &mapBlobDescriptorCache{} - -func newMapBlobDescriptorCache() *mapBlobDescriptorCache { - return &mapBlobDescriptorCache{ - descriptors: make(map[digest.Digest]distribution.Descriptor), - } -} - -func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - if err := dgst.Validate(); err != nil { - return distribution.Descriptor{}, err - } - - mbdc.mu.RLock() - defer mbdc.mu.RUnlock() - - desc, ok := mbdc.descriptors[dgst] - if !ok { - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - - return desc, nil -} - -func (mbdc *mapBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { - mbdc.mu.Lock() - defer mbdc.mu.Unlock() - - delete(mbdc.descriptors, dgst) - return nil -} - -func (mbdc *mapBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - if err := dgst.Validate(); err != nil { - return err - } - - if err := cache.ValidateDescriptor(desc); err != nil { - return err - } - - mbdc.mu.Lock() - defer mbdc.mu.Unlock() - - mbdc.descriptors[dgst] = desc - return nil -} diff --git a/vendor/github.com/docker/docker-credential-helpers/LICENSE b/vendor/github.com/docker/docker-credential-helpers/LICENSE deleted file mode 100644 index 1ea555e2af..0000000000 --- a/vendor/github.com/docker/docker-credential-helpers/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2016 David Calavera - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/docker/docker-credential-helpers/client/client.go b/vendor/github.com/docker/docker-credential-helpers/client/client.go deleted file mode 100644 index d1d0434cb5..0000000000 --- a/vendor/github.com/docker/docker-credential-helpers/client/client.go +++ /dev/null @@ -1,121 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "strings" - - "github.com/docker/docker-credential-helpers/credentials" -) - -// isValidCredsMessage checks if 'msg' contains invalid credentials error message. -// It returns whether the logs are free of invalid credentials errors and the error if it isn't. -// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername. -func isValidCredsMessage(msg string) error { - if credentials.IsCredentialsMissingServerURLMessage(msg) { - return credentials.NewErrCredentialsMissingServerURL() - } - - if credentials.IsCredentialsMissingUsernameMessage(msg) { - return credentials.NewErrCredentialsMissingUsername() - } - - return nil -} - -// Store uses an external program to save credentials. -func Store(program ProgramFunc, creds *credentials.Credentials) error { - cmd := program("store") - - buffer := new(bytes.Buffer) - if err := json.NewEncoder(buffer).Encode(creds); err != nil { - return err - } - cmd.Input(buffer) - - out, err := cmd.Output() - if err != nil { - t := strings.TrimSpace(string(out)) - - if isValidErr := isValidCredsMessage(t); isValidErr != nil { - err = isValidErr - } - - return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, t) - } - - return nil -} - -// Get executes an external program to get the credentials from a native store. -func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error) { - cmd := program("get") - cmd.Input(strings.NewReader(serverURL)) - - out, err := cmd.Output() - if err != nil { - t := strings.TrimSpace(string(out)) - - if credentials.IsErrCredentialsNotFoundMessage(t) { - return nil, credentials.NewErrCredentialsNotFound() - } - - if isValidErr := isValidCredsMessage(t); isValidErr != nil { - err = isValidErr - } - - return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, t) - } - - resp := &credentials.Credentials{ - ServerURL: serverURL, - } - - if err := json.NewDecoder(bytes.NewReader(out)).Decode(resp); err != nil { - return nil, err - } - - return resp, nil -} - -// Erase executes a program to remove the server credentials from the native store. -func Erase(program ProgramFunc, serverURL string) error { - cmd := program("erase") - cmd.Input(strings.NewReader(serverURL)) - out, err := cmd.Output() - if err != nil { - t := strings.TrimSpace(string(out)) - - if isValidErr := isValidCredsMessage(t); isValidErr != nil { - err = isValidErr - } - - return fmt.Errorf("error erasing credentials - err: %v, out: `%s`", err, t) - } - - return nil -} - -// List executes a program to list server credentials in the native store. -func List(program ProgramFunc) (map[string]string, error) { - cmd := program("list") - cmd.Input(strings.NewReader("unused")) - out, err := cmd.Output() - if err != nil { - t := strings.TrimSpace(string(out)) - - if isValidErr := isValidCredsMessage(t); isValidErr != nil { - err = isValidErr - } - - return nil, fmt.Errorf("error listing credentials - err: %v, out: `%s`", err, t) - } - - var resp map[string]string - if err = json.NewDecoder(bytes.NewReader(out)).Decode(&resp); err != nil { - return nil, err - } - - return resp, nil -} diff --git a/vendor/github.com/docker/docker-credential-helpers/client/command.go b/vendor/github.com/docker/docker-credential-helpers/client/command.go deleted file mode 100644 index 8da3343065..0000000000 --- a/vendor/github.com/docker/docker-credential-helpers/client/command.go +++ /dev/null @@ -1,56 +0,0 @@ -package client - -import ( - "fmt" - "io" - "os" - "os/exec" -) - -// Program is an interface to execute external programs. -type Program interface { - Output() ([]byte, error) - Input(in io.Reader) -} - -// ProgramFunc is a type of function that initializes programs based on arguments. -type ProgramFunc func(args ...string) Program - -// NewShellProgramFunc creates programs that are executed in a Shell. -func NewShellProgramFunc(name string) ProgramFunc { - return NewShellProgramFuncWithEnv(name, nil) -} - -// NewShellProgramFuncWithEnv creates programs that are executed in a Shell with environment variables -func NewShellProgramFuncWithEnv(name string, env *map[string]string) ProgramFunc { - return func(args ...string) Program { - return &Shell{cmd: createProgramCmdRedirectErr(name, args, env)} - } -} - -func createProgramCmdRedirectErr(commandName string, args []string, env *map[string]string) *exec.Cmd { - programCmd := exec.Command(commandName, args...) - programCmd.Env = os.Environ() - if env != nil { - for k, v := range *env { - programCmd.Env = append(programCmd.Env, fmt.Sprintf("%s=%s", k, v)) - } - } - programCmd.Stderr = os.Stderr - return programCmd -} - -// Shell invokes shell commands to talk with a remote credentials helper. -type Shell struct { - cmd *exec.Cmd -} - -// Output returns responses from the remote credentials helper. -func (s *Shell) Output() ([]byte, error) { - return s.cmd.Output() -} - -// Input sets the input to send to a remote credentials helper. -func (s *Shell) Input(in io.Reader) { - s.cmd.Stdin = in -} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go b/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go deleted file mode 100644 index da8b594e7f..0000000000 --- a/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go +++ /dev/null @@ -1,186 +0,0 @@ -package credentials - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "io" - "os" - "strings" -) - -// Credentials holds the information shared between docker and the credentials store. -type Credentials struct { - ServerURL string - Username string - Secret string -} - -// isValid checks the integrity of Credentials object such that no credentials lack -// a server URL or a username. -// It returns whether the credentials are valid and the error if it isn't. -// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername -func (c *Credentials) isValid() (bool, error) { - if len(c.ServerURL) == 0 { - return false, NewErrCredentialsMissingServerURL() - } - - if len(c.Username) == 0 { - return false, NewErrCredentialsMissingUsername() - } - - return true, nil -} - -// CredsLabel holds the way Docker credentials should be labeled as such in credentials stores that allow labelling. -// That label allows to filter out non-Docker credentials too at lookup/search in macOS keychain, -// Windows credentials manager and Linux libsecret. Default value is "Docker Credentials" -var CredsLabel = "Docker Credentials" - -// SetCredsLabel is a simple setter for CredsLabel -func SetCredsLabel(label string) { - CredsLabel = label -} - -// Serve initializes the credentials helper and parses the action argument. -// This function is designed to be called from a command line interface. -// It uses os.Args[1] as the key for the action. -// It uses os.Stdin as input and os.Stdout as output. -// This function terminates the program with os.Exit(1) if there is an error. -func Serve(helper Helper) { - var err error - if len(os.Args) != 2 { - err = fmt.Errorf("Usage: %s ", os.Args[0]) - } - - if err == nil { - err = HandleCommand(helper, os.Args[1], os.Stdin, os.Stdout) - } - - if err != nil { - fmt.Fprintf(os.Stdout, "%v\n", err) - os.Exit(1) - } -} - -// HandleCommand uses a helper and a key to run a credential action. -func HandleCommand(helper Helper, key string, in io.Reader, out io.Writer) error { - switch key { - case "store": - return Store(helper, in) - case "get": - return Get(helper, in, out) - case "erase": - return Erase(helper, in) - case "list": - return List(helper, out) - case "version": - return PrintVersion(out) - } - return fmt.Errorf("Unknown credential action `%s`", key) -} - -// Store uses a helper and an input reader to save credentials. -// The reader must contain the JSON serialization of a Credentials struct. -func Store(helper Helper, reader io.Reader) error { - scanner := bufio.NewScanner(reader) - - buffer := new(bytes.Buffer) - for scanner.Scan() { - buffer.Write(scanner.Bytes()) - } - - if err := scanner.Err(); err != nil && err != io.EOF { - return err - } - - var creds Credentials - if err := json.NewDecoder(buffer).Decode(&creds); err != nil { - return err - } - - if ok, err := creds.isValid(); !ok { - return err - } - - return helper.Add(&creds) -} - -// Get retrieves the credentials for a given server url. -// The reader must contain the server URL to search. -// The writer is used to write the JSON serialization of the credentials. -func Get(helper Helper, reader io.Reader, writer io.Writer) error { - scanner := bufio.NewScanner(reader) - - buffer := new(bytes.Buffer) - for scanner.Scan() { - buffer.Write(scanner.Bytes()) - } - - if err := scanner.Err(); err != nil && err != io.EOF { - return err - } - - serverURL := strings.TrimSpace(buffer.String()) - if len(serverURL) == 0 { - return NewErrCredentialsMissingServerURL() - } - - username, secret, err := helper.Get(serverURL) - if err != nil { - return err - } - - resp := Credentials{ - ServerURL: serverURL, - Username: username, - Secret: secret, - } - - buffer.Reset() - if err := json.NewEncoder(buffer).Encode(resp); err != nil { - return err - } - - fmt.Fprint(writer, buffer.String()) - return nil -} - -// Erase removes credentials from the store. -// The reader must contain the server URL to remove. -func Erase(helper Helper, reader io.Reader) error { - scanner := bufio.NewScanner(reader) - - buffer := new(bytes.Buffer) - for scanner.Scan() { - buffer.Write(scanner.Bytes()) - } - - if err := scanner.Err(); err != nil && err != io.EOF { - return err - } - - serverURL := strings.TrimSpace(buffer.String()) - if len(serverURL) == 0 { - return NewErrCredentialsMissingServerURL() - } - - return helper.Delete(serverURL) -} - -//List returns all the serverURLs of keys in -//the OS store as a list of strings -func List(helper Helper, writer io.Writer) error { - accts, err := helper.List() - if err != nil { - return err - } - return json.NewEncoder(writer).Encode(accts) -} - -//PrintVersion outputs the current version. -func PrintVersion(writer io.Writer) error { - fmt.Fprintln(writer, Version) - return nil -} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/error.go b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go deleted file mode 100644 index fe6a5aef45..0000000000 --- a/vendor/github.com/docker/docker-credential-helpers/credentials/error.go +++ /dev/null @@ -1,102 +0,0 @@ -package credentials - -const ( - // ErrCredentialsNotFound standardizes the not found error, so every helper returns - // the same message and docker can handle it properly. - errCredentialsNotFoundMessage = "credentials not found in native keychain" - - // ErrCredentialsMissingServerURL and ErrCredentialsMissingUsername standardize - // invalid credentials or credentials management operations - errCredentialsMissingServerURLMessage = "no credentials server URL" - errCredentialsMissingUsernameMessage = "no credentials username" -) - -// errCredentialsNotFound represents an error -// raised when credentials are not in the store. -type errCredentialsNotFound struct{} - -// Error returns the standard error message -// for when the credentials are not in the store. -func (errCredentialsNotFound) Error() string { - return errCredentialsNotFoundMessage -} - -// NewErrCredentialsNotFound creates a new error -// for when the credentials are not in the store. -func NewErrCredentialsNotFound() error { - return errCredentialsNotFound{} -} - -// IsErrCredentialsNotFound returns true if the error -// was caused by not having a set of credentials in a store. -func IsErrCredentialsNotFound(err error) bool { - _, ok := err.(errCredentialsNotFound) - return ok -} - -// IsErrCredentialsNotFoundMessage returns true if the error -// was caused by not having a set of credentials in a store. -// -// This function helps to check messages returned by an -// external program via its standard output. -func IsErrCredentialsNotFoundMessage(err string) bool { - return err == errCredentialsNotFoundMessage -} - -// errCredentialsMissingServerURL represents an error raised -// when the credentials object has no server URL or when no -// server URL is provided to a credentials operation requiring -// one. -type errCredentialsMissingServerURL struct{} - -func (errCredentialsMissingServerURL) Error() string { - return errCredentialsMissingServerURLMessage -} - -// errCredentialsMissingUsername represents an error raised -// when the credentials object has no username or when no -// username is provided to a credentials operation requiring -// one. -type errCredentialsMissingUsername struct{} - -func (errCredentialsMissingUsername) Error() string { - return errCredentialsMissingUsernameMessage -} - -// NewErrCredentialsMissingServerURL creates a new error for -// errCredentialsMissingServerURL. -func NewErrCredentialsMissingServerURL() error { - return errCredentialsMissingServerURL{} -} - -// NewErrCredentialsMissingUsername creates a new error for -// errCredentialsMissingUsername. -func NewErrCredentialsMissingUsername() error { - return errCredentialsMissingUsername{} -} - -// IsCredentialsMissingServerURL returns true if the error -// was an errCredentialsMissingServerURL. -func IsCredentialsMissingServerURL(err error) bool { - _, ok := err.(errCredentialsMissingServerURL) - return ok -} - -// IsCredentialsMissingServerURLMessage checks for an -// errCredentialsMissingServerURL in the error message. -func IsCredentialsMissingServerURLMessage(err string) bool { - return err == errCredentialsMissingServerURLMessage -} - -// IsCredentialsMissingUsername returns true if the error -// was an errCredentialsMissingUsername. -func IsCredentialsMissingUsername(err error) bool { - _, ok := err.(errCredentialsMissingUsername) - return ok -} - -// IsCredentialsMissingUsernameMessage checks for an -// errCredentialsMissingUsername in the error message. -func IsCredentialsMissingUsernameMessage(err string) bool { - return err == errCredentialsMissingUsernameMessage -} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go b/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go deleted file mode 100644 index 135acd254d..0000000000 --- a/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go +++ /dev/null @@ -1,14 +0,0 @@ -package credentials - -// Helper is the interface a credentials store helper must implement. -type Helper interface { - // Add appends credentials to the store. - Add(*Credentials) error - // Delete removes credentials from the store. - Delete(serverURL string) error - // Get retrieves credentials from the store. - // It returns username and secret as strings. - Get(serverURL string) (string, string, error) - // List returns the stored serverURLs and their associated usernames. - List() (map[string]string, error) -} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/version.go b/vendor/github.com/docker/docker-credential-helpers/credentials/version.go deleted file mode 100644 index 033a5fee55..0000000000 --- a/vendor/github.com/docker/docker-credential-helpers/credentials/version.go +++ /dev/null @@ -1,4 +0,0 @@ -package credentials - -// Version holds a string describing the current version -const Version = "0.6.0" diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS index 84059b7c2e..c5dafd7228 100644 --- a/vendor/github.com/docker/docker/AUTHORS +++ b/vendor/github.com/docker/docker/AUTHORS @@ -374,6 +374,7 @@ Dan Levy Dan McPherson Dan Stine Dan Williams +Dani Louca Daniel Antlinger Daniel Dao Daniel Exner @@ -740,7 +741,7 @@ Isao Jonas Ivan Babrou Ivan Fraixedes Ivan Grcic -Ivan Markin +Ivan Markin J Bruni J. Nunn Jack Danger Canty @@ -886,6 +887,7 @@ John V. Martinez John Warwick John Willis Jon Johnson +Jon Surrell Jon Wedaman Jonas Pfenniger Jonathan A. Sternberg @@ -1161,6 +1163,7 @@ Matt Hoyle Matt McCormick Matt Moore Matt Richardson +Matt Rickard Matt Robenolt Matt Schurenko Matt Williams @@ -1317,6 +1320,7 @@ Nikolay Milovanov Nirmal Mehta Nishant Totla NIWA Hideyuki +Noah Meyerhans Noah Treuhaft noducks Nolan Darilek @@ -1365,6 +1369,7 @@ Paul Nasrat Paul Weaver Paulo Ribeiro Pavel Lobashov +Pavel Pletenev Pavel Pospisil Pavel Sutyrin Pavel Tikhomirov @@ -1642,6 +1647,7 @@ Stefan S. Stefan Scherer Stefan Staudenmeyer Stefan Weil +Stephan Spindler Stephen Crosby Stephen Day Stephen Drake @@ -1823,7 +1829,7 @@ Vojtech Vitek (V-Teq) waitingkuo Walter Leibbrandt Walter Stanish -WANG Chao +Wang Chao Wang Guoliang Wang Jie Wang Long @@ -1834,6 +1840,7 @@ Ward Vandewege WarheadsSE Wayne Chang Wayne Song +Weerasak Chongnguluam Wei Wu Wei-Ting Kuo weiyan diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go index 97a92f8b78..beb251a989 100644 --- a/vendor/github.com/docker/docker/api/common.go +++ b/vendor/github.com/docker/docker/api/common.go @@ -3,7 +3,7 @@ package api // import "github.com/docker/docker/api" // Common constants for daemon and client. const ( // DefaultVersion of Current REST API - DefaultVersion string = "1.36" + DefaultVersion string = "1.37" // NoBaseImageSpecifier is the symbol used by the FROM // command to specify that no base image is to be used. diff --git a/vendor/github.com/docker/docker/api/types/backend/backend.go b/vendor/github.com/docker/docker/api/types/backend/backend.go new file mode 100644 index 0000000000..ef1e669c39 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/backend/backend.go @@ -0,0 +1,128 @@ +// Package backend includes types to send information to server backends. +package backend // import "github.com/docker/docker/api/types/backend" + +import ( + "io" + "time" + + "github.com/docker/docker/api/types/container" +) + +// ContainerAttachConfig holds the streams to use when connecting to a container to view logs. +type ContainerAttachConfig struct { + GetStreams func() (io.ReadCloser, io.Writer, io.Writer, error) + UseStdin bool + UseStdout bool + UseStderr bool + Logs bool + Stream bool + DetachKeys string + + // Used to signify that streams are multiplexed and therefore need a StdWriter to encode stdout/stderr messages accordingly. + // TODO @cpuguy83: This shouldn't be needed. It was only added so that http and websocket endpoints can use the same function, and the websocket function was not using a stdwriter prior to this change... + // HOWEVER, the websocket endpoint is using a single stream and SHOULD be encoded with stdout/stderr as is done for HTTP since it is still just a single stream. + // Since such a change is an API change unrelated to the current changeset we'll keep it as is here and change separately. + MuxStreams bool +} + +// PartialLogMetaData provides meta data for a partial log message. Messages +// exceeding a predefined size are split into chunks with this metadata. The +// expectation is for the logger endpoints to assemble the chunks using this +// metadata. +type PartialLogMetaData struct { + Last bool //true if this message is last of a partial + ID string // identifies group of messages comprising a single record + Ordinal int // ordering of message in partial group +} + +// LogMessage is datastructure that represents piece of output produced by some +// container. The Line member is a slice of an array whose contents can be +// changed after a log driver's Log() method returns. +// changes to this struct need to be reflect in the reset method in +// daemon/logger/logger.go +type LogMessage struct { + Line []byte + Source string + Timestamp time.Time + Attrs []LogAttr + PLogMetaData *PartialLogMetaData + + // Err is an error associated with a message. Completeness of a message + // with Err is not expected, tho it may be partially complete (fields may + // be missing, gibberish, or nil) + Err error +} + +// LogAttr is used to hold the extra attributes available in the log message. +type LogAttr struct { + Key string + Value string +} + +// LogSelector is a list of services and tasks that should be returned as part +// of a log stream. It is similar to swarmapi.LogSelector, with the difference +// that the names don't have to be resolved to IDs; this is mostly to avoid +// accidents later where a swarmapi LogSelector might have been incorrectly +// used verbatim (and to avoid the handler having to import swarmapi types) +type LogSelector struct { + Services []string + Tasks []string +} + +// ContainerStatsConfig holds information for configuring the runtime +// behavior of a backend.ContainerStats() call. +type ContainerStatsConfig struct { + Stream bool + OutStream io.Writer + Version string +} + +// ExecInspect holds information about a running process started +// with docker exec. +type ExecInspect struct { + ID string + Running bool + ExitCode *int + ProcessConfig *ExecProcessConfig + OpenStdin bool + OpenStderr bool + OpenStdout bool + CanRemove bool + ContainerID string + DetachKeys []byte + Pid int +} + +// ExecProcessConfig holds information about the exec process +// running on the host. +type ExecProcessConfig struct { + Tty bool `json:"tty"` + Entrypoint string `json:"entrypoint"` + Arguments []string `json:"arguments"` + Privileged *bool `json:"privileged,omitempty"` + User string `json:"user,omitempty"` +} + +// CreateImageConfig is the configuration for creating an image from a +// container. +type CreateImageConfig struct { + Repo string + Tag string + Pause bool + Author string + Comment string + Config *container.Config + Changes []string +} + +// CommitConfig is the configuration for creating an image as part of a build. +type CommitConfig struct { + Author string + Comment string + Config *container.Config + ContainerConfig *container.Config + ContainerID string + ContainerMountLabel string + ContainerOS string + ParentImageID string +} diff --git a/vendor/github.com/docker/docker/api/types/backend/build.go b/vendor/github.com/docker/docker/api/types/backend/build.go new file mode 100644 index 0000000000..31e00ec6ce --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/backend/build.go @@ -0,0 +1,44 @@ +package backend // import "github.com/docker/docker/api/types/backend" + +import ( + "io" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/streamformatter" +) + +// PullOption defines different modes for accessing images +type PullOption int + +const ( + // PullOptionNoPull only returns local images + PullOptionNoPull PullOption = iota + // PullOptionForcePull always tries to pull a ref from the registry first + PullOptionForcePull + // PullOptionPreferLocal uses local image if it exists, otherwise pulls + PullOptionPreferLocal +) + +// ProgressWriter is a data object to transport progress streams to the client +type ProgressWriter struct { + Output io.Writer + StdoutFormatter io.Writer + StderrFormatter io.Writer + AuxFormatter *streamformatter.AuxFormatter + ProgressReaderFunc func(io.ReadCloser) io.ReadCloser +} + +// BuildConfig is the configuration used by a BuildManager to start a build +type BuildConfig struct { + Source io.ReadCloser + ProgressWriter ProgressWriter + Options *types.ImageBuildOptions +} + +// GetImageAndLayerOptions are the options supported by GetImageAndReleasableLayer +type GetImageAndLayerOptions struct { + PullOption PullOption + AuthConfig map[string]types.AuthConfig + Output io.Writer + OS string +} diff --git a/vendor/github.com/docker/docker/api/types/events/events.go b/vendor/github.com/docker/docker/api/types/events/events.go deleted file mode 100644 index 027c6edb72..0000000000 --- a/vendor/github.com/docker/docker/api/types/events/events.go +++ /dev/null @@ -1,52 +0,0 @@ -package events // import "github.com/docker/docker/api/types/events" - -const ( - // ContainerEventType is the event type that containers generate - ContainerEventType = "container" - // DaemonEventType is the event type that daemon generate - DaemonEventType = "daemon" - // ImageEventType is the event type that images generate - ImageEventType = "image" - // NetworkEventType is the event type that networks generate - NetworkEventType = "network" - // PluginEventType is the event type that plugins generate - PluginEventType = "plugin" - // VolumeEventType is the event type that volumes generate - VolumeEventType = "volume" - // ServiceEventType is the event type that services generate - ServiceEventType = "service" - // NodeEventType is the event type that nodes generate - NodeEventType = "node" - // SecretEventType is the event type that secrets generate - SecretEventType = "secret" - // ConfigEventType is the event type that configs generate - ConfigEventType = "config" -) - -// Actor describes something that generates events, -// like a container, or a network, or a volume. -// It has a defined name and a set or attributes. -// The container attributes are its labels, other actors -// can generate these attributes from other properties. -type Actor struct { - ID string - Attributes map[string]string -} - -// Message represents the information an event contains -type Message struct { - // Deprecated information from JSONMessage. - // With data only in container events. - Status string `json:"status,omitempty"` - ID string `json:"id,omitempty"` - From string `json:"from,omitempty"` - - Type string - Action string - Actor Actor - // Engine events are local scope. Cluster events are swarm scope. - Scope string `json:"scope,omitempty"` - - Time int64 `json:"time,omitempty"` - TimeNano int64 `json:"timeNano,omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/image/image_history.go b/vendor/github.com/docker/docker/api/types/image/image_history.go deleted file mode 100644 index d6b354bcdf..0000000000 --- a/vendor/github.com/docker/docker/api/types/image/image_history.go +++ /dev/null @@ -1,37 +0,0 @@ -package image - -// ---------------------------------------------------------------------------- -// DO NOT EDIT THIS FILE -// This file was generated by `swagger generate operation` -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// HistoryResponseItem individual image layer information in response to ImageHistory operation -// swagger:model HistoryResponseItem -type HistoryResponseItem struct { - - // comment - // Required: true - Comment string `json:"Comment"` - - // created - // Required: true - Created int64 `json:"Created"` - - // created by - // Required: true - CreatedBy string `json:"CreatedBy"` - - // Id - // Required: true - ID string `json:"Id"` - - // size - // Required: true - Size int64 `json:"Size"` - - // tags - // Required: true - Tags []string `json:"Tags"` -} diff --git a/vendor/github.com/docker/docker/api/types/plugins/logdriver/entry.pb.go b/vendor/github.com/docker/docker/api/types/plugins/logdriver/entry.pb.go new file mode 100644 index 0000000000..5d7d8b4c41 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugins/logdriver/entry.pb.go @@ -0,0 +1,449 @@ +// Code generated by protoc-gen-gogo. +// source: entry.proto +// DO NOT EDIT! + +/* + Package logdriver is a generated protocol buffer package. + + It is generated from these files: + entry.proto + + It has these top-level messages: + LogEntry +*/ +package logdriver + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type LogEntry struct { + Source string `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"` + TimeNano int64 `protobuf:"varint,2,opt,name=time_nano,json=timeNano,proto3" json:"time_nano,omitempty"` + Line []byte `protobuf:"bytes,3,opt,name=line,proto3" json:"line,omitempty"` + Partial bool `protobuf:"varint,4,opt,name=partial,proto3" json:"partial,omitempty"` +} + +func (m *LogEntry) Reset() { *m = LogEntry{} } +func (m *LogEntry) String() string { return proto.CompactTextString(m) } +func (*LogEntry) ProtoMessage() {} +func (*LogEntry) Descriptor() ([]byte, []int) { return fileDescriptorEntry, []int{0} } + +func (m *LogEntry) GetSource() string { + if m != nil { + return m.Source + } + return "" +} + +func (m *LogEntry) GetTimeNano() int64 { + if m != nil { + return m.TimeNano + } + return 0 +} + +func (m *LogEntry) GetLine() []byte { + if m != nil { + return m.Line + } + return nil +} + +func (m *LogEntry) GetPartial() bool { + if m != nil { + return m.Partial + } + return false +} + +func init() { + proto.RegisterType((*LogEntry)(nil), "LogEntry") +} +func (m *LogEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LogEntry) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Source) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintEntry(dAtA, i, uint64(len(m.Source))) + i += copy(dAtA[i:], m.Source) + } + if m.TimeNano != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintEntry(dAtA, i, uint64(m.TimeNano)) + } + if len(m.Line) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintEntry(dAtA, i, uint64(len(m.Line))) + i += copy(dAtA[i:], m.Line) + } + if m.Partial { + dAtA[i] = 0x20 + i++ + if m.Partial { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func encodeFixed64Entry(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Entry(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintEntry(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *LogEntry) Size() (n int) { + var l int + _ = l + l = len(m.Source) + if l > 0 { + n += 1 + l + sovEntry(uint64(l)) + } + if m.TimeNano != 0 { + n += 1 + sovEntry(uint64(m.TimeNano)) + } + l = len(m.Line) + if l > 0 { + n += 1 + l + sovEntry(uint64(l)) + } + if m.Partial { + n += 2 + } + return n +} + +func sovEntry(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozEntry(x uint64) (n int) { + return sovEntry(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *LogEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LogEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LogEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEntry + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Source = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeNano", wireType) + } + m.TimeNano = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimeNano |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthEntry + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Line = append(m.Line[:0], dAtA[iNdEx:postIndex]...) + if m.Line == nil { + m.Line = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Partial", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEntry + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Partial = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipEntry(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEntry + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEntry(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEntry + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEntry + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEntry + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthEntry + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEntry + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipEntry(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthEntry = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEntry = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("entry.proto", fileDescriptorEntry) } + +var fileDescriptorEntry = []byte{ + // 149 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x4e, 0xcd, 0x2b, 0x29, + 0xaa, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0xca, 0xe5, 0xe2, 0xf0, 0xc9, 0x4f, 0x77, 0x05, + 0x89, 0x08, 0x89, 0x71, 0xb1, 0x15, 0xe7, 0x97, 0x16, 0x25, 0xa7, 0x4a, 0x30, 0x2a, 0x30, 0x6a, + 0x70, 0x06, 0x41, 0x79, 0x42, 0xd2, 0x5c, 0x9c, 0x25, 0x99, 0xb9, 0xa9, 0xf1, 0x79, 0x89, 0x79, + 0xf9, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x1c, 0x20, 0x01, 0xbf, 0xc4, 0xbc, 0x7c, 0x21, + 0x21, 0x2e, 0x96, 0x9c, 0xcc, 0xbc, 0x54, 0x09, 0x66, 0x05, 0x46, 0x0d, 0x9e, 0x20, 0x30, 0x5b, + 0x48, 0x82, 0x8b, 0xbd, 0x20, 0xb1, 0xa8, 0x24, 0x33, 0x31, 0x47, 0x82, 0x45, 0x81, 0x51, 0x83, + 0x23, 0x08, 0xc6, 0x75, 0xe2, 0x39, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, + 0xe4, 0x18, 0x93, 0xd8, 0xc0, 0x6e, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x2d, 0x24, 0x5a, + 0xd4, 0x92, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/docker/docker/api/types/plugins/logdriver/gen.go b/vendor/github.com/docker/docker/api/types/plugins/logdriver/gen.go new file mode 100644 index 0000000000..e5f10b5e0d --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugins/logdriver/gen.go @@ -0,0 +1,3 @@ +//go:generate protoc --gogofast_out=import_path=github.com/docker/docker/api/types/plugins/logdriver:. entry.proto + +package logdriver // import "github.com/docker/docker/api/types/plugins/logdriver" diff --git a/vendor/github.com/docker/docker/api/types/plugins/logdriver/io.go b/vendor/github.com/docker/docker/api/types/plugins/logdriver/io.go new file mode 100644 index 0000000000..9081b3b45f --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugins/logdriver/io.go @@ -0,0 +1,87 @@ +package logdriver // import "github.com/docker/docker/api/types/plugins/logdriver" + +import ( + "encoding/binary" + "io" +) + +const binaryEncodeLen = 4 + +// LogEntryEncoder encodes a LogEntry to a protobuf stream +// The stream should look like: +// +// [uint32 binary encoded message size][protobuf message] +// +// To decode an entry, read the first 4 bytes to get the size of the entry, +// then read `size` bytes from the stream. +type LogEntryEncoder interface { + Encode(*LogEntry) error +} + +// NewLogEntryEncoder creates a protobuf stream encoder for log entries. +// This is used to write out log entries to a stream. +func NewLogEntryEncoder(w io.Writer) LogEntryEncoder { + return &logEntryEncoder{ + w: w, + buf: make([]byte, 1024), + } +} + +type logEntryEncoder struct { + buf []byte + w io.Writer +} + +func (e *logEntryEncoder) Encode(l *LogEntry) error { + n := l.Size() + + total := n + binaryEncodeLen + if total > len(e.buf) { + e.buf = make([]byte, total) + } + binary.BigEndian.PutUint32(e.buf, uint32(n)) + + if _, err := l.MarshalTo(e.buf[binaryEncodeLen:]); err != nil { + return err + } + _, err := e.w.Write(e.buf[:total]) + return err +} + +// LogEntryDecoder decodes log entries from a stream +// It is expected that the wire format is as defined by LogEntryEncoder. +type LogEntryDecoder interface { + Decode(*LogEntry) error +} + +// NewLogEntryDecoder creates a new stream decoder for log entries +func NewLogEntryDecoder(r io.Reader) LogEntryDecoder { + return &logEntryDecoder{ + lenBuf: make([]byte, binaryEncodeLen), + buf: make([]byte, 1024), + r: r, + } +} + +type logEntryDecoder struct { + r io.Reader + lenBuf []byte + buf []byte +} + +func (d *logEntryDecoder) Decode(l *LogEntry) error { + _, err := io.ReadFull(d.r, d.lenBuf) + if err != nil { + return err + } + + size := int(binary.BigEndian.Uint32(d.lenBuf)) + if len(d.buf) < size { + d.buf = make([]byte, size) + } + + if _, err := io.ReadFull(d.r, d.buf[:size]); err != nil { + return err + } + return l.Unmarshal(d.buf[:size]) +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/config.go b/vendor/github.com/docker/docker/api/types/swarm/config.go index c1fdf3b3e4..a1555cf43e 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/config.go +++ b/vendor/github.com/docker/docker/api/types/swarm/config.go @@ -13,6 +13,10 @@ type Config struct { type ConfigSpec struct { Annotations Data []byte `json:",omitempty"` + + // Templating controls whether and how to evaluate the config payload as + // a template. If it is not set, no templating is used. + Templating *Driver `json:",omitempty"` } // ConfigReferenceFileTarget is a file target in a config reference diff --git a/vendor/github.com/docker/docker/api/types/swarm/network.go b/vendor/github.com/docker/docker/api/types/swarm/network.go index fd9b1a52c2..98ef3284d1 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/network.go +++ b/vendor/github.com/docker/docker/api/types/swarm/network.go @@ -62,6 +62,8 @@ const ( PortConfigProtocolTCP PortConfigProtocol = "tcp" // PortConfigProtocolUDP UDP PortConfigProtocolUDP PortConfigProtocol = "udp" + // PortConfigProtocolSCTP SCTP + PortConfigProtocolSCTP PortConfigProtocol = "sctp" ) // EndpointVirtualIP represents the virtual ip of a port. diff --git a/vendor/github.com/docker/docker/api/types/swarm/secret.go b/vendor/github.com/docker/docker/api/types/swarm/secret.go index cfba1141d8..d5213ec981 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/secret.go +++ b/vendor/github.com/docker/docker/api/types/swarm/secret.go @@ -14,6 +14,10 @@ type SecretSpec struct { Annotations Data []byte `json:",omitempty"` Driver *Driver `json:",omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store + + // Templating controls whether and how to evaluate the secret payload as + // a template. If it is not set, no templating is used. + Templating *Driver `json:",omitempty"` } // SecretReferenceFileTarget is a file target in a secret reference diff --git a/vendor/github.com/docker/docker/api/types/time/duration_convert.go b/vendor/github.com/docker/docker/api/types/time/duration_convert.go deleted file mode 100644 index 84b6f07322..0000000000 --- a/vendor/github.com/docker/docker/api/types/time/duration_convert.go +++ /dev/null @@ -1,12 +0,0 @@ -package time // import "github.com/docker/docker/api/types/time" - -import ( - "strconv" - "time" -) - -// DurationToSecondsString converts the specified duration to the number -// seconds it represents, formatted as a string. -func DurationToSecondsString(duration time.Duration) string { - return strconv.FormatFloat(duration.Seconds(), 'f', 0, 64) -} diff --git a/vendor/github.com/docker/docker/api/types/time/timestamp.go b/vendor/github.com/docker/docker/api/types/time/timestamp.go deleted file mode 100644 index 8d573accb1..0000000000 --- a/vendor/github.com/docker/docker/api/types/time/timestamp.go +++ /dev/null @@ -1,122 +0,0 @@ -package time // import "github.com/docker/docker/api/types/time" - -import ( - "fmt" - "math" - "strconv" - "strings" - "time" -) - -// These are additional predefined layouts for use in Time.Format and Time.Parse -// with --since and --until parameters for `docker logs` and `docker events` -const ( - rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone - rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone - dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00 - dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00 -) - -// GetTimestamp tries to parse given string as golang duration, -// then RFC3339 time and finally as a Unix timestamp. If -// any of these were successful, it returns a Unix timestamp -// as string otherwise returns the given value back. -// In case of duration input, the returned timestamp is computed -// as the given reference time minus the amount of the duration. -func GetTimestamp(value string, reference time.Time) (string, error) { - if d, err := time.ParseDuration(value); value != "0" && err == nil { - return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil - } - - var format string - // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation - parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) - - if strings.Contains(value, ".") { - if parseInLocation { - format = rFC3339NanoLocal - } else { - format = time.RFC3339Nano - } - } else if strings.Contains(value, "T") { - // we want the number of colons in the T portion of the timestamp - tcolons := strings.Count(value, ":") - // if parseInLocation is off and we have a +/- zone offset (not Z) then - // there will be an extra colon in the input for the tz offset subtract that - // colon from the tcolons count - if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 { - tcolons-- - } - if parseInLocation { - switch tcolons { - case 0: - format = "2006-01-02T15" - case 1: - format = "2006-01-02T15:04" - default: - format = rFC3339Local - } - } else { - switch tcolons { - case 0: - format = "2006-01-02T15Z07:00" - case 1: - format = "2006-01-02T15:04Z07:00" - default: - format = time.RFC3339 - } - } - } else if parseInLocation { - format = dateLocal - } else { - format = dateWithZone - } - - var t time.Time - var err error - - if parseInLocation { - t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone())) - } else { - t, err = time.Parse(format, value) - } - - if err != nil { - // if there is a `-` then it's an RFC3339 like timestamp otherwise assume unixtimestamp - if strings.Contains(value, "-") { - return "", err // was probably an RFC3339 like timestamp but the parser failed with an error - } - return value, nil // unixtimestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) - } - - return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil -} - -// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the -// format "%d.%09d", time.Unix(), int64(time.Nanosecond())) -// if the incoming nanosecond portion is longer or shorter than 9 digits it is -// converted to nanoseconds. The expectation is that the seconds and -// seconds will be used to create a time variable. For example: -// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) -// if err == nil since := time.Unix(seconds, nanoseconds) -// returns seconds as def(aultSeconds) if value == "" -func ParseTimestamps(value string, def int64) (int64, int64, error) { - if value == "" { - return def, 0, nil - } - sa := strings.SplitN(value, ".", 2) - s, err := strconv.ParseInt(sa[0], 10, 64) - if err != nil { - return s, 0, err - } - if len(sa) != 2 { - return s, 0, nil - } - n, err := strconv.ParseInt(sa[1], 10, 64) - if err != nil { - return s, n, err - } - // should already be in nanoseconds but just in case convert n to nanoseconds - n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1])))) - return s, n, nil -} diff --git a/vendor/github.com/docker/docker/api/types/volume/volumes_create.go b/vendor/github.com/docker/docker/api/types/volume/volumes_create.go deleted file mode 100644 index b2dd3a419d..0000000000 --- a/vendor/github.com/docker/docker/api/types/volume/volumes_create.go +++ /dev/null @@ -1,29 +0,0 @@ -package volume // import "github.com/docker/docker/api/types/volume" - -// ---------------------------------------------------------------------------- -// DO NOT EDIT THIS FILE -// This file was generated by `swagger generate operation` -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// VolumesCreateBody volumes create body -// swagger:model VolumesCreateBody -type VolumesCreateBody struct { - - // Name of the volume driver to use. - // Required: true - Driver string `json:"Driver"` - - // A mapping of driver options and values. These options are passed directly to the driver and are driver specific. - // Required: true - DriverOpts map[string]string `json:"DriverOpts"` - - // User-defined key/value metadata. - // Required: true - Labels map[string]string `json:"Labels"` - - // The new volume's name. If not specified, Docker generates a name. - // Required: true - Name string `json:"Name"` -} diff --git a/vendor/github.com/docker/docker/api/types/volume/volumes_list.go b/vendor/github.com/docker/docker/api/types/volume/volumes_list.go deleted file mode 100644 index e071ca08fc..0000000000 --- a/vendor/github.com/docker/docker/api/types/volume/volumes_list.go +++ /dev/null @@ -1,23 +0,0 @@ -package volume // import "github.com/docker/docker/api/types/volume" - -// ---------------------------------------------------------------------------- -// DO NOT EDIT THIS FILE -// This file was generated by `swagger generate operation` -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -import "github.com/docker/docker/api/types" - -// VolumesListOKBody volumes list o k body -// swagger:model VolumesListOKBody -type VolumesListOKBody struct { - - // List of volumes - // Required: true - Volumes []*types.Volume `json:"Volumes"` - - // Warnings that occurred when fetching the list of volumes - // Required: true - Warnings []string `json:"Warnings"` -} diff --git a/vendor/github.com/docker/docker/builder/builder.go b/vendor/github.com/docker/docker/builder/builder.go new file mode 100644 index 0000000000..3c5edb0679 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/builder.go @@ -0,0 +1,115 @@ +// Package builder defines interfaces for any Docker builder to implement. +// +// Historically, only server-side Dockerfile interpreters existed. +// This package allows for other implementations of Docker builders. +package builder // import "github.com/docker/docker/builder" + +import ( + "context" + "io" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + containerpkg "github.com/docker/docker/container" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/containerfs" +) + +const ( + // DefaultDockerfileName is the Default filename with Docker commands, read by docker build + DefaultDockerfileName string = "Dockerfile" +) + +// Source defines a location that can be used as a source for the ADD/COPY +// instructions in the builder. +type Source interface { + // Root returns root path for accessing source + Root() containerfs.ContainerFS + // Close allows to signal that the filesystem tree won't be used anymore. + // For Context implementations using a temporary directory, it is recommended to + // delete the temporary directory in Close(). + Close() error + // Hash returns a checksum for a file + Hash(path string) (string, error) +} + +// Backend abstracts calls to a Docker Daemon. +type Backend interface { + ImageBackend + ExecBackend + + // CommitBuildStep creates a new Docker image from the config generated by + // a build step. + CommitBuildStep(backend.CommitConfig) (image.ID, error) + // ContainerCreateWorkdir creates the workdir + ContainerCreateWorkdir(containerID string) error + + CreateImage(config []byte, parent string) (Image, error) + + ImageCacheBuilder +} + +// ImageBackend are the interface methods required from an image component +type ImageBackend interface { + GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (Image, ROLayer, error) +} + +// ExecBackend contains the interface methods required for executing containers +type ExecBackend interface { + // ContainerAttachRaw attaches to container. + ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool, attached chan struct{}) error + // ContainerCreate creates a new Docker container and returns potential warnings + ContainerCreate(config types.ContainerCreateConfig) (container.ContainerCreateCreatedBody, error) + // ContainerRm removes a container specified by `id`. + ContainerRm(name string, config *types.ContainerRmConfig) error + // ContainerKill stops the container execution abruptly. + ContainerKill(containerID string, sig uint64) error + // ContainerStart starts a new container + ContainerStart(containerID string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error + // ContainerWait stops processing until the given container is stopped. + ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error) +} + +// Result is the output produced by a Builder +type Result struct { + ImageID string + FromImage Image +} + +// ImageCacheBuilder represents a generator for stateful image cache. +type ImageCacheBuilder interface { + // MakeImageCache creates a stateful image cache. + MakeImageCache(cacheFrom []string) ImageCache +} + +// ImageCache abstracts an image cache. +// (parent image, child runconfig) -> child image +type ImageCache interface { + // GetCache returns a reference to a cached image whose parent equals `parent` + // and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error. + GetCache(parentID string, cfg *container.Config) (imageID string, err error) +} + +// Image represents a Docker image used by the builder. +type Image interface { + ImageID() string + RunConfig() *container.Config + MarshalJSON() ([]byte, error) + OperatingSystem() string +} + +// ROLayer is a reference to image rootfs layer +type ROLayer interface { + Release() error + NewRWLayer() (RWLayer, error) + DiffID() layer.DiffID +} + +// RWLayer is active layer that can be read/modified +type RWLayer interface { + Release() error + Root() containerfs.ContainerFS + Commit() (ROLayer, error) +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/buildargs.go b/vendor/github.com/docker/docker/builder/dockerfile/buildargs.go new file mode 100644 index 0000000000..232f9d23f6 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/buildargs.go @@ -0,0 +1,172 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "fmt" + "io" + + "github.com/docker/docker/runconfig/opts" +) + +// builtinAllowedBuildArgs is list of built-in allowed build args +// these args are considered transparent and are excluded from the image history. +// Filtering from history is implemented in dispatchers.go +var builtinAllowedBuildArgs = map[string]bool{ + "HTTP_PROXY": true, + "http_proxy": true, + "HTTPS_PROXY": true, + "https_proxy": true, + "FTP_PROXY": true, + "ftp_proxy": true, + "NO_PROXY": true, + "no_proxy": true, +} + +// BuildArgs manages arguments used by the builder +type BuildArgs struct { + // args that are allowed for expansion/substitution and passing to commands in 'run'. + allowedBuildArgs map[string]*string + // args defined before the first `FROM` in a Dockerfile + allowedMetaArgs map[string]*string + // args referenced by the Dockerfile + referencedArgs map[string]struct{} + // args provided by the user on the command line + argsFromOptions map[string]*string +} + +// NewBuildArgs creates a new BuildArgs type +func NewBuildArgs(argsFromOptions map[string]*string) *BuildArgs { + return &BuildArgs{ + allowedBuildArgs: make(map[string]*string), + allowedMetaArgs: make(map[string]*string), + referencedArgs: make(map[string]struct{}), + argsFromOptions: argsFromOptions, + } +} + +// Clone returns a copy of the BuildArgs type +func (b *BuildArgs) Clone() *BuildArgs { + result := NewBuildArgs(b.argsFromOptions) + for k, v := range b.allowedBuildArgs { + result.allowedBuildArgs[k] = v + } + for k, v := range b.allowedMetaArgs { + result.allowedMetaArgs[k] = v + } + for k := range b.referencedArgs { + result.referencedArgs[k] = struct{}{} + } + return result +} + +// MergeReferencedArgs merges referenced args from another BuildArgs +// object into the current one +func (b *BuildArgs) MergeReferencedArgs(other *BuildArgs) { + for k := range other.referencedArgs { + b.referencedArgs[k] = struct{}{} + } +} + +// WarnOnUnusedBuildArgs checks if there are any leftover build-args that were +// passed but not consumed during build. Print a warning, if there are any. +func (b *BuildArgs) WarnOnUnusedBuildArgs(out io.Writer) { + leftoverArgs := []string{} + for arg := range b.argsFromOptions { + _, isReferenced := b.referencedArgs[arg] + _, isBuiltin := builtinAllowedBuildArgs[arg] + if !isBuiltin && !isReferenced { + leftoverArgs = append(leftoverArgs, arg) + } + } + if len(leftoverArgs) > 0 { + fmt.Fprintf(out, "[Warning] One or more build-args %v were not consumed\n", leftoverArgs) + } +} + +// ResetAllowed clears the list of args that are allowed to be used by a +// directive +func (b *BuildArgs) ResetAllowed() { + b.allowedBuildArgs = make(map[string]*string) +} + +// AddMetaArg adds a new meta arg that can be used by FROM directives +func (b *BuildArgs) AddMetaArg(key string, value *string) { + b.allowedMetaArgs[key] = value +} + +// AddArg adds a new arg that can be used by directives +func (b *BuildArgs) AddArg(key string, value *string) { + b.allowedBuildArgs[key] = value + b.referencedArgs[key] = struct{}{} +} + +// IsReferencedOrNotBuiltin checks if the key is a built-in arg, or if it has been +// referenced by the Dockerfile. Returns true if the arg is not a builtin or +// if the builtin has been referenced in the Dockerfile. +func (b *BuildArgs) IsReferencedOrNotBuiltin(key string) bool { + _, isBuiltin := builtinAllowedBuildArgs[key] + _, isAllowed := b.allowedBuildArgs[key] + return isAllowed || !isBuiltin +} + +// GetAllAllowed returns a mapping with all the allowed args +func (b *BuildArgs) GetAllAllowed() map[string]string { + return b.getAllFromMapping(b.allowedBuildArgs) +} + +// GetAllMeta returns a mapping with all the meta meta args +func (b *BuildArgs) GetAllMeta() map[string]string { + return b.getAllFromMapping(b.allowedMetaArgs) +} + +func (b *BuildArgs) getAllFromMapping(source map[string]*string) map[string]string { + m := make(map[string]string) + + keys := keysFromMaps(source, builtinAllowedBuildArgs) + for _, key := range keys { + v, ok := b.getBuildArg(key, source) + if ok { + m[key] = v + } + } + return m +} + +// FilterAllowed returns all allowed args without the filtered args +func (b *BuildArgs) FilterAllowed(filter []string) []string { + envs := []string{} + configEnv := opts.ConvertKVStringsToMap(filter) + + for key, val := range b.GetAllAllowed() { + if _, ok := configEnv[key]; !ok { + envs = append(envs, fmt.Sprintf("%s=%s", key, val)) + } + } + return envs +} + +func (b *BuildArgs) getBuildArg(key string, mapping map[string]*string) (string, bool) { + defaultValue, exists := mapping[key] + // Return override from options if one is defined + if v, ok := b.argsFromOptions[key]; ok && v != nil { + return *v, ok + } + + if defaultValue == nil { + if v, ok := b.allowedMetaArgs[key]; ok && v != nil { + return *v, ok + } + return "", false + } + return *defaultValue, exists +} + +func keysFromMaps(source map[string]*string, builtin map[string]bool) []string { + keys := []string{} + for key := range source { + keys = append(keys, key) + } + for key := range builtin { + keys = append(keys, key) + } + return keys +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/builder.go b/vendor/github.com/docker/docker/builder/dockerfile/builder.go new file mode 100644 index 0000000000..21d84cb513 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/builder.go @@ -0,0 +1,413 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile/instructions" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/builder/dockerfile/shell" + "github.com/docker/docker/builder/fscache" + "github.com/docker/docker/builder/remotecontext" + "github.com/docker/docker/errdefs" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/system" + "github.com/moby/buildkit/session" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sync/syncmap" +) + +var validCommitCommands = map[string]bool{ + "cmd": true, + "entrypoint": true, + "healthcheck": true, + "env": true, + "expose": true, + "label": true, + "onbuild": true, + "user": true, + "volume": true, + "workdir": true, +} + +const ( + stepFormat = "Step %d/%d : %v" +) + +// SessionGetter is object used to get access to a session by uuid +type SessionGetter interface { + Get(ctx context.Context, uuid string) (session.Caller, error) +} + +// BuildManager is shared across all Builder objects +type BuildManager struct { + idMappings *idtools.IDMappings + backend builder.Backend + pathCache pathCache // TODO: make this persistent + sg SessionGetter + fsCache *fscache.FSCache +} + +// NewBuildManager creates a BuildManager +func NewBuildManager(b builder.Backend, sg SessionGetter, fsCache *fscache.FSCache, idMappings *idtools.IDMappings) (*BuildManager, error) { + bm := &BuildManager{ + backend: b, + pathCache: &syncmap.Map{}, + sg: sg, + idMappings: idMappings, + fsCache: fsCache, + } + if err := fsCache.RegisterTransport(remotecontext.ClientSessionRemote, NewClientSessionTransport()); err != nil { + return nil, err + } + return bm, nil +} + +// Build starts a new build from a BuildConfig +func (bm *BuildManager) Build(ctx context.Context, config backend.BuildConfig) (*builder.Result, error) { + buildsTriggered.Inc() + if config.Options.Dockerfile == "" { + config.Options.Dockerfile = builder.DefaultDockerfileName + } + + source, dockerfile, err := remotecontext.Detect(config) + if err != nil { + return nil, err + } + defer func() { + if source != nil { + if err := source.Close(); err != nil { + logrus.Debugf("[BUILDER] failed to remove temporary context: %v", err) + } + } + }() + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + if src, err := bm.initializeClientSession(ctx, cancel, config.Options); err != nil { + return nil, err + } else if src != nil { + source = src + } + + os := "" + apiPlatform := system.ParsePlatform(config.Options.Platform) + if apiPlatform.OS != "" { + os = apiPlatform.OS + } + config.Options.Platform = os + + builderOptions := builderOptions{ + Options: config.Options, + ProgressWriter: config.ProgressWriter, + Backend: bm.backend, + PathCache: bm.pathCache, + IDMappings: bm.idMappings, + } + return newBuilder(ctx, builderOptions).build(source, dockerfile) +} + +func (bm *BuildManager) initializeClientSession(ctx context.Context, cancel func(), options *types.ImageBuildOptions) (builder.Source, error) { + if options.SessionID == "" || bm.sg == nil { + return nil, nil + } + logrus.Debug("client is session enabled") + + connectCtx, cancelCtx := context.WithTimeout(ctx, sessionConnectTimeout) + defer cancelCtx() + + c, err := bm.sg.Get(connectCtx, options.SessionID) + if err != nil { + return nil, err + } + go func() { + <-c.Context().Done() + cancel() + }() + if options.RemoteContext == remotecontext.ClientSessionRemote { + st := time.Now() + csi, err := NewClientSessionSourceIdentifier(ctx, bm.sg, options.SessionID) + if err != nil { + return nil, err + } + src, err := bm.fsCache.SyncFrom(ctx, csi) + if err != nil { + return nil, err + } + logrus.Debugf("sync-time: %v", time.Since(st)) + return src, nil + } + return nil, nil +} + +// builderOptions are the dependencies required by the builder +type builderOptions struct { + Options *types.ImageBuildOptions + Backend builder.Backend + ProgressWriter backend.ProgressWriter + PathCache pathCache + IDMappings *idtools.IDMappings +} + +// Builder is a Dockerfile builder +// It implements the builder.Backend interface. +type Builder struct { + options *types.ImageBuildOptions + + Stdout io.Writer + Stderr io.Writer + Aux *streamformatter.AuxFormatter + Output io.Writer + + docker builder.Backend + clientCtx context.Context + + idMappings *idtools.IDMappings + disableCommit bool + imageSources *imageSources + pathCache pathCache + containerManager *containerManager + imageProber ImageProber +} + +// newBuilder creates a new Dockerfile builder from an optional dockerfile and a Options. +func newBuilder(clientCtx context.Context, options builderOptions) *Builder { + config := options.Options + if config == nil { + config = new(types.ImageBuildOptions) + } + + b := &Builder{ + clientCtx: clientCtx, + options: config, + Stdout: options.ProgressWriter.StdoutFormatter, + Stderr: options.ProgressWriter.StderrFormatter, + Aux: options.ProgressWriter.AuxFormatter, + Output: options.ProgressWriter.Output, + docker: options.Backend, + idMappings: options.IDMappings, + imageSources: newImageSources(clientCtx, options), + pathCache: options.PathCache, + imageProber: newImageProber(options.Backend, config.CacheFrom, config.NoCache), + containerManager: newContainerManager(options.Backend), + } + + return b +} + +// Build runs the Dockerfile builder by parsing the Dockerfile and executing +// the instructions from the file. +func (b *Builder) build(source builder.Source, dockerfile *parser.Result) (*builder.Result, error) { + defer b.imageSources.Unmount() + + addNodesForLabelOption(dockerfile.AST, b.options.Labels) + + stages, metaArgs, err := instructions.Parse(dockerfile.AST) + if err != nil { + if instructions.IsUnknownInstruction(err) { + buildsFailed.WithValues(metricsUnknownInstructionError).Inc() + } + return nil, errdefs.InvalidParameter(err) + } + if b.options.Target != "" { + targetIx, found := instructions.HasStage(stages, b.options.Target) + if !found { + buildsFailed.WithValues(metricsBuildTargetNotReachableError).Inc() + return nil, errdefs.InvalidParameter(errors.Errorf("failed to reach build target %s in Dockerfile", b.options.Target)) + } + stages = stages[:targetIx+1] + } + + dockerfile.PrintWarnings(b.Stderr) + dispatchState, err := b.dispatchDockerfileWithCancellation(stages, metaArgs, dockerfile.EscapeToken, source) + if err != nil { + return nil, err + } + if dispatchState.imageID == "" { + buildsFailed.WithValues(metricsDockerfileEmptyError).Inc() + return nil, errors.New("No image was generated. Is your Dockerfile empty?") + } + return &builder.Result{ImageID: dispatchState.imageID, FromImage: dispatchState.baseImage}, nil +} + +func emitImageID(aux *streamformatter.AuxFormatter, state *dispatchState) error { + if aux == nil || state.imageID == "" { + return nil + } + return aux.Emit(types.BuildResult{ID: state.imageID}) +} + +func processMetaArg(meta instructions.ArgCommand, shlex *shell.Lex, args *BuildArgs) error { + // shell.Lex currently only support the concatenated string format + envs := convertMapToEnvList(args.GetAllAllowed()) + if err := meta.Expand(func(word string) (string, error) { + return shlex.ProcessWord(word, envs) + }); err != nil { + return err + } + args.AddArg(meta.Key, meta.Value) + args.AddMetaArg(meta.Key, meta.Value) + return nil +} + +func printCommand(out io.Writer, currentCommandIndex int, totalCommands int, cmd interface{}) int { + fmt.Fprintf(out, stepFormat, currentCommandIndex, totalCommands, cmd) + fmt.Fprintln(out) + return currentCommandIndex + 1 +} + +func (b *Builder) dispatchDockerfileWithCancellation(parseResult []instructions.Stage, metaArgs []instructions.ArgCommand, escapeToken rune, source builder.Source) (*dispatchState, error) { + dispatchRequest := dispatchRequest{} + buildArgs := NewBuildArgs(b.options.BuildArgs) + totalCommands := len(metaArgs) + len(parseResult) + currentCommandIndex := 1 + for _, stage := range parseResult { + totalCommands += len(stage.Commands) + } + shlex := shell.NewLex(escapeToken) + for _, meta := range metaArgs { + currentCommandIndex = printCommand(b.Stdout, currentCommandIndex, totalCommands, &meta) + + err := processMetaArg(meta, shlex, buildArgs) + if err != nil { + return nil, err + } + } + + stagesResults := newStagesBuildResults() + + for _, stage := range parseResult { + if err := stagesResults.checkStageNameAvailable(stage.Name); err != nil { + return nil, err + } + dispatchRequest = newDispatchRequest(b, escapeToken, source, buildArgs, stagesResults) + + currentCommandIndex = printCommand(b.Stdout, currentCommandIndex, totalCommands, stage.SourceCode) + if err := initializeStage(dispatchRequest, &stage); err != nil { + return nil, err + } + dispatchRequest.state.updateRunConfig() + fmt.Fprintf(b.Stdout, " ---> %s\n", stringid.TruncateID(dispatchRequest.state.imageID)) + for _, cmd := range stage.Commands { + select { + case <-b.clientCtx.Done(): + logrus.Debug("Builder: build cancelled!") + fmt.Fprint(b.Stdout, "Build cancelled\n") + buildsFailed.WithValues(metricsBuildCanceled).Inc() + return nil, errors.New("Build cancelled") + default: + // Not cancelled yet, keep going... + } + + currentCommandIndex = printCommand(b.Stdout, currentCommandIndex, totalCommands, cmd) + + if err := dispatch(dispatchRequest, cmd); err != nil { + return nil, err + } + dispatchRequest.state.updateRunConfig() + fmt.Fprintf(b.Stdout, " ---> %s\n", stringid.TruncateID(dispatchRequest.state.imageID)) + + } + if err := emitImageID(b.Aux, dispatchRequest.state); err != nil { + return nil, err + } + buildArgs.MergeReferencedArgs(dispatchRequest.state.buildArgs) + if err := commitStage(dispatchRequest.state, stagesResults); err != nil { + return nil, err + } + } + buildArgs.WarnOnUnusedBuildArgs(b.Stdout) + return dispatchRequest.state, nil +} + +func addNodesForLabelOption(dockerfile *parser.Node, labels map[string]string) { + if len(labels) == 0 { + return + } + + node := parser.NodeFromLabels(labels) + dockerfile.Children = append(dockerfile.Children, node) +} + +// BuildFromConfig builds directly from `changes`, treating it as if it were the contents of a Dockerfile +// It will: +// - Call parse.Parse() to get an AST root for the concatenated Dockerfile entries. +// - Do build by calling builder.dispatch() to call all entries' handling routines +// +// BuildFromConfig is used by the /commit endpoint, with the changes +// coming from the query parameter of the same name. +// +// TODO: Remove? +func BuildFromConfig(config *container.Config, changes []string, os string) (*container.Config, error) { + if !system.IsOSSupported(os) { + return nil, errdefs.InvalidParameter(system.ErrNotSupportedOperatingSystem) + } + if len(changes) == 0 { + return config, nil + } + + dockerfile, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n"))) + if err != nil { + return nil, errdefs.InvalidParameter(err) + } + + b := newBuilder(context.Background(), builderOptions{ + Options: &types.ImageBuildOptions{NoCache: true}, + }) + + // ensure that the commands are valid + for _, n := range dockerfile.AST.Children { + if !validCommitCommands[n.Value] { + return nil, errdefs.InvalidParameter(errors.Errorf("%s is not a valid change command", n.Value)) + } + } + + b.Stdout = ioutil.Discard + b.Stderr = ioutil.Discard + b.disableCommit = true + + commands := []instructions.Command{} + for _, n := range dockerfile.AST.Children { + cmd, err := instructions.ParseCommand(n) + if err != nil { + return nil, errdefs.InvalidParameter(err) + } + commands = append(commands, cmd) + } + + dispatchRequest := newDispatchRequest(b, dockerfile.EscapeToken, nil, NewBuildArgs(b.options.BuildArgs), newStagesBuildResults()) + // We make mutations to the configuration, ensure we have a copy + dispatchRequest.state.runConfig = copyRunConfig(config) + dispatchRequest.state.imageID = config.Image + dispatchRequest.state.operatingSystem = os + for _, cmd := range commands { + err := dispatch(dispatchRequest, cmd) + if err != nil { + return nil, errdefs.InvalidParameter(err) + } + dispatchRequest.state.updateRunConfig() + } + + return dispatchRequest.state.runConfig, nil +} + +func convertMapToEnvList(m map[string]string) []string { + result := []string{} + for k, v := range m { + result = append(result, k+"="+v) + } + return result +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/builder_unix.go b/vendor/github.com/docker/docker/builder/dockerfile/builder_unix.go new file mode 100644 index 0000000000..c4453459b3 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/builder_unix.go @@ -0,0 +1,7 @@ +// +build !windows + +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +func defaultShellForOS(os string) []string { + return []string{"/bin/sh", "-c"} +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/builder_windows.go b/vendor/github.com/docker/docker/builder/dockerfile/builder_windows.go new file mode 100644 index 0000000000..fbafa52aec --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/builder_windows.go @@ -0,0 +1,8 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +func defaultShellForOS(os string) []string { + if os == "linux" { + return []string{"/bin/sh", "-c"} + } + return []string{"cmd", "/S", "/C"} +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/clientsession.go b/vendor/github.com/docker/docker/builder/dockerfile/clientsession.go new file mode 100644 index 0000000000..b48090d7b5 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/clientsession.go @@ -0,0 +1,76 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "context" + "time" + + "github.com/docker/docker/builder/fscache" + "github.com/docker/docker/builder/remotecontext" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/session/filesync" + "github.com/pkg/errors" +) + +const sessionConnectTimeout = 5 * time.Second + +// ClientSessionTransport is a transport for copying files from docker client +// to the daemon. +type ClientSessionTransport struct{} + +// NewClientSessionTransport returns new ClientSessionTransport instance +func NewClientSessionTransport() *ClientSessionTransport { + return &ClientSessionTransport{} +} + +// Copy data from a remote to a destination directory. +func (cst *ClientSessionTransport) Copy(ctx context.Context, id fscache.RemoteIdentifier, dest string, cu filesync.CacheUpdater) error { + csi, ok := id.(*ClientSessionSourceIdentifier) + if !ok { + return errors.New("invalid identifier for client session") + } + + return filesync.FSSync(ctx, csi.caller, filesync.FSSendRequestOpt{ + IncludePatterns: csi.includePatterns, + DestDir: dest, + CacheUpdater: cu, + }) +} + +// ClientSessionSourceIdentifier is an identifier that can be used for requesting +// files from remote client +type ClientSessionSourceIdentifier struct { + includePatterns []string + caller session.Caller + uuid string +} + +// NewClientSessionSourceIdentifier returns new ClientSessionSourceIdentifier instance +func NewClientSessionSourceIdentifier(ctx context.Context, sg SessionGetter, uuid string) (*ClientSessionSourceIdentifier, error) { + csi := &ClientSessionSourceIdentifier{ + uuid: uuid, + } + caller, err := sg.Get(ctx, uuid) + if err != nil { + return nil, errors.Wrapf(err, "failed to get session for %s", uuid) + } + + csi.caller = caller + return csi, nil +} + +// Transport returns transport identifier for remote identifier +func (csi *ClientSessionSourceIdentifier) Transport() string { + return remotecontext.ClientSessionRemote +} + +// SharedKey returns shared key for remote identifier. Shared key is used +// for finding the base for a repeated transfer. +func (csi *ClientSessionSourceIdentifier) SharedKey() string { + return csi.caller.SharedKey() +} + +// Key returns unique key for remote identifier. Requests with same key return +// same data. +func (csi *ClientSessionSourceIdentifier) Key() string { + return csi.uuid +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/containerbackend.go b/vendor/github.com/docker/docker/builder/dockerfile/containerbackend.go new file mode 100644 index 0000000000..54adfb13f7 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/containerbackend.go @@ -0,0 +1,146 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "context" + "fmt" + "io" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + containerpkg "github.com/docker/docker/container" + "github.com/docker/docker/pkg/stringid" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type containerManager struct { + tmpContainers map[string]struct{} + backend builder.ExecBackend +} + +// newContainerManager creates a new container backend +func newContainerManager(docker builder.ExecBackend) *containerManager { + return &containerManager{ + backend: docker, + tmpContainers: make(map[string]struct{}), + } +} + +// Create a container +func (c *containerManager) Create(runConfig *container.Config, hostConfig *container.HostConfig) (container.ContainerCreateCreatedBody, error) { + container, err := c.backend.ContainerCreate(types.ContainerCreateConfig{ + Config: runConfig, + HostConfig: hostConfig, + }) + if err != nil { + return container, err + } + c.tmpContainers[container.ID] = struct{}{} + return container, nil +} + +var errCancelled = errors.New("build cancelled") + +// Run a container by ID +func (c *containerManager) Run(ctx context.Context, cID string, stdout, stderr io.Writer) (err error) { + attached := make(chan struct{}) + errCh := make(chan error) + go func() { + errCh <- c.backend.ContainerAttachRaw(cID, nil, stdout, stderr, true, attached) + }() + select { + case err := <-errCh: + return err + case <-attached: + } + + finished := make(chan struct{}) + cancelErrCh := make(chan error, 1) + go func() { + select { + case <-ctx.Done(): + logrus.Debugln("Build cancelled, killing and removing container:", cID) + c.backend.ContainerKill(cID, 0) + c.removeContainer(cID, stdout) + cancelErrCh <- errCancelled + case <-finished: + cancelErrCh <- nil + } + }() + + if err := c.backend.ContainerStart(cID, nil, "", ""); err != nil { + close(finished) + logCancellationError(cancelErrCh, "error from ContainerStart: "+err.Error()) + return err + } + + // Block on reading output from container, stop on err or chan closed + if err := <-errCh; err != nil { + close(finished) + logCancellationError(cancelErrCh, "error from errCh: "+err.Error()) + return err + } + + waitC, err := c.backend.ContainerWait(ctx, cID, containerpkg.WaitConditionNotRunning) + if err != nil { + close(finished) + logCancellationError(cancelErrCh, fmt.Sprintf("unable to begin ContainerWait: %s", err)) + return err + } + + if status := <-waitC; status.ExitCode() != 0 { + close(finished) + logCancellationError(cancelErrCh, + fmt.Sprintf("a non-zero code from ContainerWait: %d", status.ExitCode())) + return &statusCodeError{code: status.ExitCode(), err: status.Err()} + } + + close(finished) + return <-cancelErrCh +} + +func logCancellationError(cancelErrCh chan error, msg string) { + if cancelErr := <-cancelErrCh; cancelErr != nil { + logrus.Debugf("Build cancelled (%v): %s", cancelErr, msg) + } +} + +type statusCodeError struct { + code int + err error +} + +func (e *statusCodeError) Error() string { + if e.err == nil { + return "" + } + return e.err.Error() +} + +func (e *statusCodeError) StatusCode() int { + return e.code +} + +func (c *containerManager) removeContainer(containerID string, stdout io.Writer) error { + rmConfig := &types.ContainerRmConfig{ + ForceRemove: true, + RemoveVolume: true, + } + if err := c.backend.ContainerRm(containerID, rmConfig); err != nil { + fmt.Fprintf(stdout, "Error removing intermediate container %s: %v\n", stringid.TruncateID(containerID), err) + return err + } + return nil +} + +// RemoveAll containers managed by this container manager +func (c *containerManager) RemoveAll(stdout io.Writer) { + for containerID := range c.tmpContainers { + if err := c.removeContainer(containerID, stdout); err != nil { + return + } + delete(c.tmpContainers, containerID) + fmt.Fprintf(stdout, "Removing intermediate container %s\n", stringid.TruncateID(containerID)) + } +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/copy.go b/vendor/github.com/docker/docker/builder/dockerfile/copy.go new file mode 100644 index 0000000000..cb9f24d205 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/copy.go @@ -0,0 +1,558 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "archive/tar" + "fmt" + "io" + "mime" + "net/http" + "net/url" + "os" + "path/filepath" + "runtime" + "sort" + "strings" + "time" + + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/remotecontext" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/containerfs" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/pkg/urlutil" + "github.com/pkg/errors" +) + +const unnamedFilename = "__unnamed__" + +type pathCache interface { + Load(key interface{}) (value interface{}, ok bool) + Store(key, value interface{}) +} + +// copyInfo is a data object which stores the metadata about each source file in +// a copyInstruction +type copyInfo struct { + root containerfs.ContainerFS + path string + hash string + noDecompress bool +} + +func (c copyInfo) fullPath() (string, error) { + return c.root.ResolveScopedPath(c.path, true) +} + +func newCopyInfoFromSource(source builder.Source, path string, hash string) copyInfo { + return copyInfo{root: source.Root(), path: path, hash: hash} +} + +func newCopyInfos(copyInfos ...copyInfo) []copyInfo { + return copyInfos +} + +// copyInstruction is a fully parsed COPY or ADD command that is passed to +// Builder.performCopy to copy files into the image filesystem +type copyInstruction struct { + cmdName string + infos []copyInfo + dest string + chownStr string + allowLocalDecompression bool +} + +// copier reads a raw COPY or ADD command, fetches remote sources using a downloader, +// and creates a copyInstruction +type copier struct { + imageSource *imageMount + source builder.Source + pathCache pathCache + download sourceDownloader + platform string + // for cleanup. TODO: having copier.cleanup() is error prone and hard to + // follow. Code calling performCopy should manage the lifecycle of its params. + // Copier should take override source as input, not imageMount. + activeLayer builder.RWLayer + tmpPaths []string +} + +func copierFromDispatchRequest(req dispatchRequest, download sourceDownloader, imageSource *imageMount) copier { + return copier{ + source: req.source, + pathCache: req.builder.pathCache, + download: download, + imageSource: imageSource, + platform: req.builder.options.Platform, + } +} + +func (o *copier) createCopyInstruction(args []string, cmdName string) (copyInstruction, error) { + inst := copyInstruction{cmdName: cmdName} + last := len(args) - 1 + + // Work in platform-specific filepath semantics + inst.dest = fromSlash(args[last], o.platform) + separator := string(separator(o.platform)) + infos, err := o.getCopyInfosForSourcePaths(args[0:last], inst.dest) + if err != nil { + return inst, errors.Wrapf(err, "%s failed", cmdName) + } + if len(infos) > 1 && !strings.HasSuffix(inst.dest, separator) { + return inst, errors.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) + } + inst.infos = infos + return inst, nil +} + +// getCopyInfosForSourcePaths iterates over the source files and calculate the info +// needed to copy (e.g. hash value if cached) +// The dest is used in case source is URL (and ends with "/") +func (o *copier) getCopyInfosForSourcePaths(sources []string, dest string) ([]copyInfo, error) { + var infos []copyInfo + for _, orig := range sources { + subinfos, err := o.getCopyInfoForSourcePath(orig, dest) + if err != nil { + return nil, err + } + infos = append(infos, subinfos...) + } + + if len(infos) == 0 { + return nil, errors.New("no source files were specified") + } + return infos, nil +} + +func (o *copier) getCopyInfoForSourcePath(orig, dest string) ([]copyInfo, error) { + if !urlutil.IsURL(orig) { + return o.calcCopyInfo(orig, true) + } + + remote, path, err := o.download(orig) + if err != nil { + return nil, err + } + // If path == "" then we are unable to determine filename from src + // We have to make sure dest is available + if path == "" { + if strings.HasSuffix(dest, "/") { + return nil, errors.Errorf("cannot determine filename for source %s", orig) + } + path = unnamedFilename + } + o.tmpPaths = append(o.tmpPaths, remote.Root().Path()) + + hash, err := remote.Hash(path) + ci := newCopyInfoFromSource(remote, path, hash) + ci.noDecompress = true // data from http shouldn't be extracted even on ADD + return newCopyInfos(ci), err +} + +// Cleanup removes any temporary directories created as part of downloading +// remote files. +func (o *copier) Cleanup() { + for _, path := range o.tmpPaths { + os.RemoveAll(path) + } + o.tmpPaths = []string{} + if o.activeLayer != nil { + o.activeLayer.Release() + o.activeLayer = nil + } +} + +// TODO: allowWildcards can probably be removed by refactoring this function further. +func (o *copier) calcCopyInfo(origPath string, allowWildcards bool) ([]copyInfo, error) { + imageSource := o.imageSource + + // TODO: do this when creating copier. Requires validateCopySourcePath + // (and other below) to be aware of the difference sources. Why is it only + // done on image Source? + if imageSource != nil { + var err error + rwLayer, err := imageSource.NewRWLayer() + if err != nil { + return nil, err + } + o.activeLayer = rwLayer + + o.source, err = remotecontext.NewLazySource(rwLayer.Root()) + if err != nil { + return nil, errors.Wrapf(err, "failed to create context for copy from %s", rwLayer.Root().Path()) + } + } + + if o.source == nil { + return nil, errors.Errorf("missing build context") + } + + root := o.source.Root() + + if err := validateCopySourcePath(imageSource, origPath, root.OS()); err != nil { + return nil, err + } + + // Work in source OS specific filepath semantics + // For LCOW, this is NOT the daemon OS. + origPath = root.FromSlash(origPath) + origPath = strings.TrimPrefix(origPath, string(root.Separator())) + origPath = strings.TrimPrefix(origPath, "."+string(root.Separator())) + + // Deal with wildcards + if allowWildcards && containsWildcards(origPath, root.OS()) { + return o.copyWithWildcards(origPath) + } + + if imageSource != nil && imageSource.ImageID() != "" { + // return a cached copy if one exists + if h, ok := o.pathCache.Load(imageSource.ImageID() + origPath); ok { + return newCopyInfos(newCopyInfoFromSource(o.source, origPath, h.(string))), nil + } + } + + // Deal with the single file case + copyInfo, err := copyInfoForFile(o.source, origPath) + switch { + case err != nil: + return nil, err + case copyInfo.hash != "": + o.storeInPathCache(imageSource, origPath, copyInfo.hash) + return newCopyInfos(copyInfo), err + } + + // TODO: remove, handle dirs in Hash() + subfiles, err := walkSource(o.source, origPath) + if err != nil { + return nil, err + } + + hash := hashStringSlice("dir", subfiles) + o.storeInPathCache(imageSource, origPath, hash) + return newCopyInfos(newCopyInfoFromSource(o.source, origPath, hash)), nil +} + +func containsWildcards(name, platform string) bool { + isWindows := platform == "windows" + for i := 0; i < len(name); i++ { + ch := name[i] + if ch == '\\' && !isWindows { + i++ + } else if ch == '*' || ch == '?' || ch == '[' { + return true + } + } + return false +} + +func (o *copier) storeInPathCache(im *imageMount, path string, hash string) { + if im != nil { + o.pathCache.Store(im.ImageID()+path, hash) + } +} + +func (o *copier) copyWithWildcards(origPath string) ([]copyInfo, error) { + root := o.source.Root() + var copyInfos []copyInfo + if err := root.Walk(root.Path(), func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + rel, err := remotecontext.Rel(root, path) + if err != nil { + return err + } + + if rel == "." { + return nil + } + if match, _ := root.Match(origPath, rel); !match { + return nil + } + + // Note we set allowWildcards to false in case the name has + // a * in it + subInfos, err := o.calcCopyInfo(rel, false) + if err != nil { + return err + } + copyInfos = append(copyInfos, subInfos...) + return nil + }); err != nil { + return nil, err + } + return copyInfos, nil +} + +func copyInfoForFile(source builder.Source, path string) (copyInfo, error) { + fi, err := remotecontext.StatAt(source, path) + if err != nil { + return copyInfo{}, err + } + + if fi.IsDir() { + return copyInfo{}, nil + } + hash, err := source.Hash(path) + if err != nil { + return copyInfo{}, err + } + return newCopyInfoFromSource(source, path, "file:"+hash), nil +} + +// TODO: dedupe with copyWithWildcards() +func walkSource(source builder.Source, origPath string) ([]string, error) { + fp, err := remotecontext.FullPath(source, origPath) + if err != nil { + return nil, err + } + // Must be a dir + var subfiles []string + err = source.Root().Walk(fp, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + rel, err := remotecontext.Rel(source.Root(), path) + if err != nil { + return err + } + if rel == "." { + return nil + } + hash, err := source.Hash(rel) + if err != nil { + return nil + } + // we already checked handleHash above + subfiles = append(subfiles, hash) + return nil + }) + if err != nil { + return nil, err + } + + sort.Strings(subfiles) + return subfiles, nil +} + +type sourceDownloader func(string) (builder.Source, string, error) + +func newRemoteSourceDownloader(output, stdout io.Writer) sourceDownloader { + return func(url string) (builder.Source, string, error) { + return downloadSource(output, stdout, url) + } +} + +func errOnSourceDownload(_ string) (builder.Source, string, error) { + return nil, "", errors.New("source can't be a URL for COPY") +} + +func getFilenameForDownload(path string, resp *http.Response) string { + // Guess filename based on source + if path != "" && !strings.HasSuffix(path, "/") { + if filename := filepath.Base(filepath.FromSlash(path)); filename != "" { + return filename + } + } + + // Guess filename based on Content-Disposition + if contentDisposition := resp.Header.Get("Content-Disposition"); contentDisposition != "" { + if _, params, err := mime.ParseMediaType(contentDisposition); err == nil { + if params["filename"] != "" && !strings.HasSuffix(params["filename"], "/") { + if filename := filepath.Base(filepath.FromSlash(params["filename"])); filename != "" { + return filename + } + } + } + } + return "" +} + +func downloadSource(output io.Writer, stdout io.Writer, srcURL string) (remote builder.Source, p string, err error) { + u, err := url.Parse(srcURL) + if err != nil { + return + } + + resp, err := remotecontext.GetWithStatusError(srcURL) + if err != nil { + return + } + + filename := getFilenameForDownload(u.Path, resp) + + // Prepare file in a tmp dir + tmpDir, err := ioutils.TempDir("", "docker-remote") + if err != nil { + return + } + defer func() { + if err != nil { + os.RemoveAll(tmpDir) + } + }() + // If filename is empty, the returned filename will be "" but + // the tmp filename will be created as "__unnamed__" + tmpFileName := filename + if filename == "" { + tmpFileName = unnamedFilename + } + tmpFileName = filepath.Join(tmpDir, tmpFileName) + tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + return + } + + progressOutput := streamformatter.NewJSONProgressOutput(output, true) + progressReader := progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Downloading") + // Download and dump result to tmp file + // TODO: add filehash directly + if _, err = io.Copy(tmpFile, progressReader); err != nil { + tmpFile.Close() + return + } + // TODO: how important is this random blank line to the output? + fmt.Fprintln(stdout) + + // Set the mtime to the Last-Modified header value if present + // Otherwise just remove atime and mtime + mTime := time.Time{} + + lastMod := resp.Header.Get("Last-Modified") + if lastMod != "" { + // If we can't parse it then just let it default to 'zero' + // otherwise use the parsed time value + if parsedMTime, err := http.ParseTime(lastMod); err == nil { + mTime = parsedMTime + } + } + + tmpFile.Close() + + if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil { + return + } + + lc, err := remotecontext.NewLazySource(containerfs.NewLocalContainerFS(tmpDir)) + return lc, filename, err +} + +type copyFileOptions struct { + decompress bool + chownPair idtools.IDPair + archiver Archiver +} + +type copyEndpoint struct { + driver containerfs.Driver + path string +} + +func performCopyForInfo(dest copyInfo, source copyInfo, options copyFileOptions) error { + srcPath, err := source.fullPath() + if err != nil { + return err + } + + destPath, err := dest.fullPath() + if err != nil { + return err + } + + archiver := options.archiver + + srcEndpoint := ©Endpoint{driver: source.root, path: srcPath} + destEndpoint := ©Endpoint{driver: dest.root, path: destPath} + + src, err := source.root.Stat(srcPath) + if err != nil { + return errors.Wrapf(err, "source path not found") + } + if src.IsDir() { + return copyDirectory(archiver, srcEndpoint, destEndpoint, options.chownPair) + } + if options.decompress && isArchivePath(source.root, srcPath) && !source.noDecompress { + return archiver.UntarPath(srcPath, destPath) + } + + destExistsAsDir, err := isExistingDirectory(destEndpoint) + if err != nil { + return err + } + // dest.path must be used because destPath has already been cleaned of any + // trailing slash + if endsInSlash(dest.root, dest.path) || destExistsAsDir { + // source.path must be used to get the correct filename when the source + // is a symlink + destPath = dest.root.Join(destPath, source.root.Base(source.path)) + destEndpoint = ©Endpoint{driver: dest.root, path: destPath} + } + return copyFile(archiver, srcEndpoint, destEndpoint, options.chownPair) +} + +func isArchivePath(driver containerfs.ContainerFS, path string) bool { + file, err := driver.Open(path) + if err != nil { + return false + } + defer file.Close() + rdr, err := archive.DecompressStream(file) + if err != nil { + return false + } + r := tar.NewReader(rdr) + _, err = r.Next() + return err == nil +} + +func copyDirectory(archiver Archiver, source, dest *copyEndpoint, chownPair idtools.IDPair) error { + destExists, err := isExistingDirectory(dest) + if err != nil { + return errors.Wrapf(err, "failed to query destination path") + } + + if err := archiver.CopyWithTar(source.path, dest.path); err != nil { + return errors.Wrapf(err, "failed to copy directory") + } + // TODO: @gupta-ak. Investigate how LCOW permission mappings will work. + return fixPermissions(source.path, dest.path, chownPair, !destExists) +} + +func copyFile(archiver Archiver, source, dest *copyEndpoint, chownPair idtools.IDPair) error { + if runtime.GOOS == "windows" && dest.driver.OS() == "linux" { + // LCOW + if err := dest.driver.MkdirAll(dest.driver.Dir(dest.path), 0755); err != nil { + return errors.Wrapf(err, "failed to create new directory") + } + } else { + if err := idtools.MkdirAllAndChownNew(filepath.Dir(dest.path), 0755, chownPair); err != nil { + // Normal containers + return errors.Wrapf(err, "failed to create new directory") + } + } + + if err := archiver.CopyFileWithTar(source.path, dest.path); err != nil { + return errors.Wrapf(err, "failed to copy file") + } + // TODO: @gupta-ak. Investigate how LCOW permission mappings will work. + return fixPermissions(source.path, dest.path, chownPair, false) +} + +func endsInSlash(driver containerfs.Driver, path string) bool { + return strings.HasSuffix(path, string(driver.Separator())) +} + +// isExistingDirectory returns true if the path exists and is a directory +func isExistingDirectory(point *copyEndpoint) (bool, error) { + destStat, err := point.driver.Stat(point.path) + switch { + case os.IsNotExist(err): + return false, nil + case err != nil: + return false, err + } + return destStat.IsDir(), nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/copy_unix.go b/vendor/github.com/docker/docker/builder/dockerfile/copy_unix.go new file mode 100644 index 0000000000..15453452e5 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/copy_unix.go @@ -0,0 +1,48 @@ +// +build !windows + +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "os" + "path/filepath" + + "github.com/docker/docker/pkg/containerfs" + "github.com/docker/docker/pkg/idtools" +) + +func fixPermissions(source, destination string, rootIDs idtools.IDPair, overrideSkip bool) error { + var ( + skipChownRoot bool + err error + ) + if !overrideSkip { + destEndpoint := ©Endpoint{driver: containerfs.NewLocalDriver(), path: destination} + skipChownRoot, err = isExistingDirectory(destEndpoint) + if err != nil { + return err + } + } + + // We Walk on the source rather than on the destination because we don't + // want to change permissions on things we haven't created or modified. + return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error { + // Do not alter the walk root iff. it existed before, as it doesn't fall under + // the domain of "things we should chown". + if skipChownRoot && source == fullpath { + return nil + } + + // Path is prefixed by source: substitute with destination instead. + cleaned, err := filepath.Rel(source, fullpath) + if err != nil { + return err + } + + fullpath = filepath.Join(destination, cleaned) + return os.Lchown(fullpath, rootIDs.UID, rootIDs.GID) + }) +} + +func validateCopySourcePath(imageSource *imageMount, origPath, platform string) error { + return nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/copy_windows.go b/vendor/github.com/docker/docker/builder/dockerfile/copy_windows.go new file mode 100644 index 0000000000..907c34407c --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/copy_windows.go @@ -0,0 +1,43 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "errors" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/idtools" +) + +var pathBlacklist = map[string]bool{ + "c:\\": true, + "c:\\windows": true, +} + +func fixPermissions(source, destination string, rootIDs idtools.IDPair, overrideSkip bool) error { + // chown is not supported on Windows + return nil +} + +func validateCopySourcePath(imageSource *imageMount, origPath, platform string) error { + // validate windows paths from other images + LCOW + if imageSource == nil || platform != "windows" { + return nil + } + + origPath = filepath.FromSlash(origPath) + p := strings.ToLower(filepath.Clean(origPath)) + if !filepath.IsAbs(p) { + if filepath.VolumeName(p) != "" { + if p[len(p)-2:] == ":." { // case where clean returns weird c:. paths + p = p[:len(p)-1] + } + p += "\\" + } else { + p = filepath.Join("c:\\", p) + } + } + if _, blacklisted := pathBlacklist[p]; blacklisted { + return errors.New("copy from c:\\ or c:\\windows is not allowed on windows") + } + return nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go new file mode 100644 index 0000000000..9dd7502453 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers.go @@ -0,0 +1,571 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +// This file contains the dispatchers for each command. Note that +// `nullDispatch` is not actually a command, but support for commands we parse +// but do nothing with. +// +// See evaluator.go for a higher level discussion of the whole evaluator +// package. + +import ( + "bytes" + "fmt" + "runtime" + "sort" + "strings" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile/instructions" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/builder/dockerfile/shell" + "github.com/docker/docker/errdefs" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/system" + "github.com/docker/go-connections/nat" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// ENV foo bar +// +// Sets the environment variable foo to bar, also makes interpolation +// in the dockerfile available from the next statement on via ${foo}. +// +func dispatchEnv(d dispatchRequest, c *instructions.EnvCommand) error { + runConfig := d.state.runConfig + commitMessage := bytes.NewBufferString("ENV") + for _, e := range c.Env { + name := e.Key + newVar := e.String() + + commitMessage.WriteString(" " + newVar) + gotOne := false + for i, envVar := range runConfig.Env { + envParts := strings.SplitN(envVar, "=", 2) + compareFrom := envParts[0] + if shell.EqualEnvKeys(compareFrom, name) { + runConfig.Env[i] = newVar + gotOne = true + break + } + } + if !gotOne { + runConfig.Env = append(runConfig.Env, newVar) + } + } + return d.builder.commit(d.state, commitMessage.String()) +} + +// MAINTAINER some text +// +// Sets the maintainer metadata. +func dispatchMaintainer(d dispatchRequest, c *instructions.MaintainerCommand) error { + + d.state.maintainer = c.Maintainer + return d.builder.commit(d.state, "MAINTAINER "+c.Maintainer) +} + +// LABEL some json data describing the image +// +// Sets the Label variable foo to bar, +// +func dispatchLabel(d dispatchRequest, c *instructions.LabelCommand) error { + if d.state.runConfig.Labels == nil { + d.state.runConfig.Labels = make(map[string]string) + } + commitStr := "LABEL" + for _, v := range c.Labels { + d.state.runConfig.Labels[v.Key] = v.Value + commitStr += " " + v.String() + } + return d.builder.commit(d.state, commitStr) +} + +// ADD foo /path +// +// Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling +// exist here. If you do not wish to have this automatic handling, use COPY. +// +func dispatchAdd(d dispatchRequest, c *instructions.AddCommand) error { + downloader := newRemoteSourceDownloader(d.builder.Output, d.builder.Stdout) + copier := copierFromDispatchRequest(d, downloader, nil) + defer copier.Cleanup() + + copyInstruction, err := copier.createCopyInstruction(c.SourcesAndDest, "ADD") + if err != nil { + return err + } + copyInstruction.chownStr = c.Chown + copyInstruction.allowLocalDecompression = true + + return d.builder.performCopy(d.state, copyInstruction) +} + +// COPY foo /path +// +// Same as 'ADD' but without the tar and remote url handling. +// +func dispatchCopy(d dispatchRequest, c *instructions.CopyCommand) error { + var im *imageMount + var err error + if c.From != "" { + im, err = d.getImageMount(c.From) + if err != nil { + return errors.Wrapf(err, "invalid from flag value %s", c.From) + } + } + copier := copierFromDispatchRequest(d, errOnSourceDownload, im) + defer copier.Cleanup() + copyInstruction, err := copier.createCopyInstruction(c.SourcesAndDest, "COPY") + if err != nil { + return err + } + copyInstruction.chownStr = c.Chown + + return d.builder.performCopy(d.state, copyInstruction) +} + +func (d *dispatchRequest) getImageMount(imageRefOrID string) (*imageMount, error) { + if imageRefOrID == "" { + // TODO: this could return the source in the default case as well? + return nil, nil + } + + var localOnly bool + stage, err := d.stages.get(imageRefOrID) + if err != nil { + return nil, err + } + if stage != nil { + imageRefOrID = stage.Image + localOnly = true + } + return d.builder.imageSources.Get(imageRefOrID, localOnly, d.state.operatingSystem) +} + +// FROM [--platform=platform] imagename[:tag | @digest] [AS build-stage-name] +// +func initializeStage(d dispatchRequest, cmd *instructions.Stage) error { + d.builder.imageProber.Reset() + if err := system.ValidatePlatform(&cmd.Platform); err != nil { + return err + } + image, err := d.getFromImage(d.shlex, cmd.BaseName, cmd.Platform.OS) + if err != nil { + return err + } + state := d.state + if err := state.beginStage(cmd.Name, image); err != nil { + return err + } + if len(state.runConfig.OnBuild) > 0 { + triggers := state.runConfig.OnBuild + state.runConfig.OnBuild = nil + return dispatchTriggeredOnBuild(d, triggers) + } + return nil +} + +func dispatchTriggeredOnBuild(d dispatchRequest, triggers []string) error { + fmt.Fprintf(d.builder.Stdout, "# Executing %d build trigger", len(triggers)) + if len(triggers) > 1 { + fmt.Fprint(d.builder.Stdout, "s") + } + fmt.Fprintln(d.builder.Stdout) + for _, trigger := range triggers { + d.state.updateRunConfig() + ast, err := parser.Parse(strings.NewReader(trigger)) + if err != nil { + return err + } + if len(ast.AST.Children) != 1 { + return errors.New("onbuild trigger should be a single expression") + } + cmd, err := instructions.ParseCommand(ast.AST.Children[0]) + if err != nil { + if instructions.IsUnknownInstruction(err) { + buildsFailed.WithValues(metricsUnknownInstructionError).Inc() + } + return err + } + err = dispatch(d, cmd) + if err != nil { + return err + } + } + return nil +} + +func (d *dispatchRequest) getExpandedImageName(shlex *shell.Lex, name string) (string, error) { + substitutionArgs := []string{} + for key, value := range d.state.buildArgs.GetAllMeta() { + substitutionArgs = append(substitutionArgs, key+"="+value) + } + + name, err := shlex.ProcessWord(name, substitutionArgs) + if err != nil { + return "", err + } + return name, nil +} + +// getOsFromFlagsAndStage calculates the operating system if we need to pull an image. +// stagePlatform contains the value supplied by optional `--platform=` on +// a current FROM statement. b.builder.options.Platform contains the operating +// system part of the optional flag passed in the API call (or CLI flag +// through `docker build --platform=...`). Precedence is for an explicit +// platform indication in the FROM statement. +func (d *dispatchRequest) getOsFromFlagsAndStage(stageOS string) string { + switch { + case stageOS != "": + return stageOS + case d.builder.options.Platform != "": + // Note this is API "platform", but by this point, as the daemon is not + // multi-arch aware yet, it is guaranteed to only hold the OS part here. + return d.builder.options.Platform + default: + return runtime.GOOS + } +} + +func (d *dispatchRequest) getImageOrStage(name string, stageOS string) (builder.Image, error) { + var localOnly bool + if im, ok := d.stages.getByName(name); ok { + name = im.Image + localOnly = true + } + + os := d.getOsFromFlagsAndStage(stageOS) + + // Windows cannot support a container with no base image unless it is LCOW. + if name == api.NoBaseImageSpecifier { + imageImage := &image.Image{} + imageImage.OS = runtime.GOOS + if runtime.GOOS == "windows" { + switch os { + case "windows", "": + return nil, errors.New("Windows does not support FROM scratch") + case "linux": + if !system.LCOWSupported() { + return nil, errors.New("Linux containers are not supported on this system") + } + imageImage.OS = "linux" + default: + return nil, errors.Errorf("operating system %q is not supported", os) + } + } + return builder.Image(imageImage), nil + } + imageMount, err := d.builder.imageSources.Get(name, localOnly, os) + if err != nil { + return nil, err + } + return imageMount.Image(), nil +} +func (d *dispatchRequest) getFromImage(shlex *shell.Lex, name string, stageOS string) (builder.Image, error) { + name, err := d.getExpandedImageName(shlex, name) + if err != nil { + return nil, err + } + return d.getImageOrStage(name, stageOS) +} + +func dispatchOnbuild(d dispatchRequest, c *instructions.OnbuildCommand) error { + + d.state.runConfig.OnBuild = append(d.state.runConfig.OnBuild, c.Expression) + return d.builder.commit(d.state, "ONBUILD "+c.Expression) +} + +// WORKDIR /tmp +// +// Set the working directory for future RUN/CMD/etc statements. +// +func dispatchWorkdir(d dispatchRequest, c *instructions.WorkdirCommand) error { + runConfig := d.state.runConfig + var err error + runConfig.WorkingDir, err = normalizeWorkdir(d.state.operatingSystem, runConfig.WorkingDir, c.Path) + if err != nil { + return err + } + + // For performance reasons, we explicitly do a create/mkdir now + // This avoids having an unnecessary expensive mount/unmount calls + // (on Windows in particular) during each container create. + // Prior to 1.13, the mkdir was deferred and not executed at this step. + if d.builder.disableCommit { + // Don't call back into the daemon if we're going through docker commit --change "WORKDIR /foo". + // We've already updated the runConfig and that's enough. + return nil + } + + comment := "WORKDIR " + runConfig.WorkingDir + runConfigWithCommentCmd := copyRunConfig(runConfig, withCmdCommentString(comment, d.state.operatingSystem)) + containerID, err := d.builder.probeAndCreate(d.state, runConfigWithCommentCmd) + if err != nil || containerID == "" { + return err + } + if err := d.builder.docker.ContainerCreateWorkdir(containerID); err != nil { + return err + } + + return d.builder.commitContainer(d.state, containerID, runConfigWithCommentCmd) +} + +func resolveCmdLine(cmd instructions.ShellDependantCmdLine, runConfig *container.Config, os string) []string { + result := cmd.CmdLine + if cmd.PrependShell && result != nil { + result = append(getShell(runConfig, os), result...) + } + return result +} + +// RUN some command yo +// +// run a command and commit the image. Args are automatically prepended with +// the current SHELL which defaults to 'sh -c' under linux or 'cmd /S /C' under +// Windows, in the event there is only one argument The difference in processing: +// +// RUN echo hi # sh -c echo hi (Linux and LCOW) +// RUN echo hi # cmd /S /C echo hi (Windows) +// RUN [ "echo", "hi" ] # echo hi +// +func dispatchRun(d dispatchRequest, c *instructions.RunCommand) error { + if !system.IsOSSupported(d.state.operatingSystem) { + return system.ErrNotSupportedOperatingSystem + } + stateRunConfig := d.state.runConfig + cmdFromArgs := resolveCmdLine(c.ShellDependantCmdLine, stateRunConfig, d.state.operatingSystem) + buildArgs := d.state.buildArgs.FilterAllowed(stateRunConfig.Env) + + saveCmd := cmdFromArgs + if len(buildArgs) > 0 { + saveCmd = prependEnvOnCmd(d.state.buildArgs, buildArgs, cmdFromArgs) + } + + runConfigForCacheProbe := copyRunConfig(stateRunConfig, + withCmd(saveCmd), + withEntrypointOverride(saveCmd, nil)) + hit, err := d.builder.probeCache(d.state, runConfigForCacheProbe) + if err != nil || hit { + return err + } + + runConfig := copyRunConfig(stateRunConfig, + withCmd(cmdFromArgs), + withEnv(append(stateRunConfig.Env, buildArgs...)), + withEntrypointOverride(saveCmd, strslice.StrSlice{""})) + + // set config as already being escaped, this prevents double escaping on windows + runConfig.ArgsEscaped = true + + logrus.Debugf("[BUILDER] Command to be executed: %v", runConfig.Cmd) + cID, err := d.builder.create(runConfig) + if err != nil { + return err + } + if err := d.builder.containerManager.Run(d.builder.clientCtx, cID, d.builder.Stdout, d.builder.Stderr); err != nil { + if err, ok := err.(*statusCodeError); ok { + // TODO: change error type, because jsonmessage.JSONError assumes HTTP + msg := fmt.Sprintf( + "The command '%s' returned a non-zero code: %d", + strings.Join(runConfig.Cmd, " "), err.StatusCode()) + if err.Error() != "" { + msg = fmt.Sprintf("%s: %s", msg, err.Error()) + } + return &jsonmessage.JSONError{ + Message: msg, + Code: err.StatusCode(), + } + } + return err + } + + return d.builder.commitContainer(d.state, cID, runConfigForCacheProbe) +} + +// Derive the command to use for probeCache() and to commit in this container. +// Note that we only do this if there are any build-time env vars. Also, we +// use the special argument "|#" at the start of the args array. This will +// avoid conflicts with any RUN command since commands can not +// start with | (vertical bar). The "#" (number of build envs) is there to +// help ensure proper cache matches. We don't want a RUN command +// that starts with "foo=abc" to be considered part of a build-time env var. +// +// remove any unreferenced built-in args from the environment variables. +// These args are transparent so resulting image should be the same regardless +// of the value. +func prependEnvOnCmd(buildArgs *BuildArgs, buildArgVars []string, cmd strslice.StrSlice) strslice.StrSlice { + var tmpBuildEnv []string + for _, env := range buildArgVars { + key := strings.SplitN(env, "=", 2)[0] + if buildArgs.IsReferencedOrNotBuiltin(key) { + tmpBuildEnv = append(tmpBuildEnv, env) + } + } + + sort.Strings(tmpBuildEnv) + tmpEnv := append([]string{fmt.Sprintf("|%d", len(tmpBuildEnv))}, tmpBuildEnv...) + return strslice.StrSlice(append(tmpEnv, cmd...)) +} + +// CMD foo +// +// Set the default command to run in the container (which may be empty). +// Argument handling is the same as RUN. +// +func dispatchCmd(d dispatchRequest, c *instructions.CmdCommand) error { + runConfig := d.state.runConfig + cmd := resolveCmdLine(c.ShellDependantCmdLine, runConfig, d.state.operatingSystem) + runConfig.Cmd = cmd + // set config as already being escaped, this prevents double escaping on windows + runConfig.ArgsEscaped = true + + if err := d.builder.commit(d.state, fmt.Sprintf("CMD %q", cmd)); err != nil { + return err + } + + if len(c.ShellDependantCmdLine.CmdLine) != 0 { + d.state.cmdSet = true + } + + return nil +} + +// HEALTHCHECK foo +// +// Set the default healthcheck command to run in the container (which may be empty). +// Argument handling is the same as RUN. +// +func dispatchHealthcheck(d dispatchRequest, c *instructions.HealthCheckCommand) error { + runConfig := d.state.runConfig + if runConfig.Healthcheck != nil { + oldCmd := runConfig.Healthcheck.Test + if len(oldCmd) > 0 && oldCmd[0] != "NONE" { + fmt.Fprintf(d.builder.Stdout, "Note: overriding previous HEALTHCHECK: %v\n", oldCmd) + } + } + runConfig.Healthcheck = c.Health + return d.builder.commit(d.state, fmt.Sprintf("HEALTHCHECK %q", runConfig.Healthcheck)) +} + +// ENTRYPOINT /usr/sbin/nginx +// +// Set the entrypoint to /usr/sbin/nginx. Will accept the CMD as the arguments +// to /usr/sbin/nginx. Uses the default shell if not in JSON format. +// +// Handles command processing similar to CMD and RUN, only req.runConfig.Entrypoint +// is initialized at newBuilder time instead of through argument parsing. +// +func dispatchEntrypoint(d dispatchRequest, c *instructions.EntrypointCommand) error { + runConfig := d.state.runConfig + cmd := resolveCmdLine(c.ShellDependantCmdLine, runConfig, d.state.operatingSystem) + runConfig.Entrypoint = cmd + if !d.state.cmdSet { + runConfig.Cmd = nil + } + + return d.builder.commit(d.state, fmt.Sprintf("ENTRYPOINT %q", runConfig.Entrypoint)) +} + +// EXPOSE 6667/tcp 7000/tcp +// +// Expose ports for links and port mappings. This all ends up in +// req.runConfig.ExposedPorts for runconfig. +// +func dispatchExpose(d dispatchRequest, c *instructions.ExposeCommand, envs []string) error { + // custom multi word expansion + // expose $FOO with FOO="80 443" is expanded as EXPOSE [80,443]. This is the only command supporting word to words expansion + // so the word processing has been de-generalized + ports := []string{} + for _, p := range c.Ports { + ps, err := d.shlex.ProcessWords(p, envs) + if err != nil { + return err + } + ports = append(ports, ps...) + } + c.Ports = ports + + ps, _, err := nat.ParsePortSpecs(ports) + if err != nil { + return err + } + + if d.state.runConfig.ExposedPorts == nil { + d.state.runConfig.ExposedPorts = make(nat.PortSet) + } + for p := range ps { + d.state.runConfig.ExposedPorts[p] = struct{}{} + } + + return d.builder.commit(d.state, "EXPOSE "+strings.Join(c.Ports, " ")) +} + +// USER foo +// +// Set the user to 'foo' for future commands and when running the +// ENTRYPOINT/CMD at container run time. +// +func dispatchUser(d dispatchRequest, c *instructions.UserCommand) error { + d.state.runConfig.User = c.User + return d.builder.commit(d.state, fmt.Sprintf("USER %v", c.User)) +} + +// VOLUME /foo +// +// Expose the volume /foo for use. Will also accept the JSON array form. +// +func dispatchVolume(d dispatchRequest, c *instructions.VolumeCommand) error { + if d.state.runConfig.Volumes == nil { + d.state.runConfig.Volumes = map[string]struct{}{} + } + for _, v := range c.Volumes { + if v == "" { + return errors.New("VOLUME specified can not be an empty string") + } + d.state.runConfig.Volumes[v] = struct{}{} + } + return d.builder.commit(d.state, fmt.Sprintf("VOLUME %v", c.Volumes)) +} + +// STOPSIGNAL signal +// +// Set the signal that will be used to kill the container. +func dispatchStopSignal(d dispatchRequest, c *instructions.StopSignalCommand) error { + + _, err := signal.ParseSignal(c.Signal) + if err != nil { + return errdefs.InvalidParameter(err) + } + d.state.runConfig.StopSignal = c.Signal + return d.builder.commit(d.state, fmt.Sprintf("STOPSIGNAL %v", c.Signal)) +} + +// ARG name[=value] +// +// Adds the variable foo to the trusted list of variables that can be passed +// to builder using the --build-arg flag for expansion/substitution or passing to 'run'. +// Dockerfile author may optionally set a default value of this variable. +func dispatchArg(d dispatchRequest, c *instructions.ArgCommand) error { + + commitStr := "ARG " + c.Key + if c.Value != nil { + commitStr += "=" + *c.Value + } + + d.state.buildArgs.AddArg(c.Key, c.Value) + return d.builder.commit(d.state, commitStr) +} + +// SHELL powershell -command +// +// Set the non-default shell to use. +func dispatchShell(d dispatchRequest, c *instructions.ShellCommand) error { + d.state.runConfig.Shell = c.Shell + return d.builder.commit(d.state, fmt.Sprintf("SHELL %v", d.state.runConfig.Shell)) +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix.go b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix.go new file mode 100644 index 0000000000..b3ba380323 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_unix.go @@ -0,0 +1,23 @@ +// +build !windows + +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "errors" + "os" + "path/filepath" +) + +// normalizeWorkdir normalizes a user requested working directory in a +// platform semantically consistent way. +func normalizeWorkdir(_ string, current string, requested string) (string, error) { + if requested == "" { + return "", errors.New("cannot normalize nothing") + } + current = filepath.FromSlash(current) + requested = filepath.FromSlash(requested) + if !filepath.IsAbs(requested) { + return filepath.Join(string(os.PathSeparator), current, requested), nil + } + return requested, nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows.go b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows.go new file mode 100644 index 0000000000..7824d1169b --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/dispatchers_windows.go @@ -0,0 +1,95 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "errors" + "fmt" + "os" + "path" + "path/filepath" + "regexp" + "strings" + + "github.com/docker/docker/pkg/system" +) + +var pattern = regexp.MustCompile(`^[a-zA-Z]:\.$`) + +// normalizeWorkdir normalizes a user requested working directory in a +// platform semantically consistent way. +func normalizeWorkdir(platform string, current string, requested string) (string, error) { + if platform == "" { + platform = "windows" + } + if platform == "windows" { + return normalizeWorkdirWindows(current, requested) + } + return normalizeWorkdirUnix(current, requested) +} + +// normalizeWorkdirUnix normalizes a user requested working directory in a +// platform semantically consistent way. +func normalizeWorkdirUnix(current string, requested string) (string, error) { + if requested == "" { + return "", errors.New("cannot normalize nothing") + } + current = strings.Replace(current, string(os.PathSeparator), "/", -1) + requested = strings.Replace(requested, string(os.PathSeparator), "/", -1) + if !path.IsAbs(requested) { + return path.Join(`/`, current, requested), nil + } + return requested, nil +} + +// normalizeWorkdirWindows normalizes a user requested working directory in a +// platform semantically consistent way. +func normalizeWorkdirWindows(current string, requested string) (string, error) { + if requested == "" { + return "", errors.New("cannot normalize nothing") + } + + // `filepath.Clean` will replace "" with "." so skip in that case + if current != "" { + current = filepath.Clean(current) + } + if requested != "" { + requested = filepath.Clean(requested) + } + + // If either current or requested in Windows is: + // C: + // C:. + // then an error will be thrown as the definition for the above + // refers to `current directory on drive C:` + // Since filepath.Clean() will automatically normalize the above + // to `C:.`, we only need to check the last format + if pattern.MatchString(current) { + return "", fmt.Errorf("%s is not a directory. If you are specifying a drive letter, please add a trailing '\\'", current) + } + if pattern.MatchString(requested) { + return "", fmt.Errorf("%s is not a directory. If you are specifying a drive letter, please add a trailing '\\'", requested) + } + + // Target semantics is C:\somefolder, specifically in the format: + // UPPERCASEDriveLetter-Colon-Backslash-FolderName. We are already + // guaranteed that `current`, if set, is consistent. This allows us to + // cope correctly with any of the following in a Dockerfile: + // WORKDIR a --> C:\a + // WORKDIR c:\\foo --> C:\foo + // WORKDIR \\foo --> C:\foo + // WORKDIR /foo --> C:\foo + // WORKDIR c:\\foo \ WORKDIR bar --> C:\foo --> C:\foo\bar + // WORKDIR C:/foo \ WORKDIR bar --> C:\foo --> C:\foo\bar + // WORKDIR C:/foo \ WORKDIR \\bar --> C:\foo --> C:\bar + // WORKDIR /foo \ WORKDIR c:/bar --> C:\foo --> C:\bar + if len(current) == 0 || system.IsAbs(requested) { + if (requested[0] == os.PathSeparator) || + (len(requested) > 1 && string(requested[1]) != ":") || + (len(requested) == 1) { + requested = filepath.Join(`C:\`, requested) + } + } else { + requested = filepath.Join(current, requested) + } + // Upper-case drive letter + return (strings.ToUpper(string(requested[0])) + requested[1:]), nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go b/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go new file mode 100644 index 0000000000..75073cec6d --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/evaluator.go @@ -0,0 +1,250 @@ +// Package dockerfile is the evaluation step in the Dockerfile parse/evaluate pipeline. +// +// It incorporates a dispatch table based on the parser.Node values (see the +// parser package for more information) that are yielded from the parser itself. +// Calling newBuilder with the BuildOpts struct can be used to customize the +// experience for execution purposes only. Parsing is controlled in the parser +// package, and this division of responsibility should be respected. +// +// Please see the jump table targets for the actual invocations, most of which +// will call out to the functions in internals.go to deal with their tasks. +// +// ONBUILD is a special case, which is covered in the onbuild() func in +// dispatchers.go. +// +// The evaluator uses the concept of "steps", which are usually each processable +// line in the Dockerfile. Each step is numbered and certain actions are taken +// before and after each step, such as creating an image ID and removing temporary +// containers and images. Note that ONBUILD creates a kinda-sorta "sub run" which +// includes its own set of steps (usually only one of them). +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "reflect" + "runtime" + "strconv" + "strings" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile/instructions" + "github.com/docker/docker/builder/dockerfile/shell" + "github.com/docker/docker/errdefs" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/runconfig/opts" + "github.com/pkg/errors" +) + +func dispatch(d dispatchRequest, cmd instructions.Command) (err error) { + if c, ok := cmd.(instructions.PlatformSpecific); ok { + err := c.CheckPlatform(d.state.operatingSystem) + if err != nil { + return errdefs.InvalidParameter(err) + } + } + runConfigEnv := d.state.runConfig.Env + envs := append(runConfigEnv, d.state.buildArgs.FilterAllowed(runConfigEnv)...) + + if ex, ok := cmd.(instructions.SupportsSingleWordExpansion); ok { + err := ex.Expand(func(word string) (string, error) { + return d.shlex.ProcessWord(word, envs) + }) + if err != nil { + return errdefs.InvalidParameter(err) + } + } + + defer func() { + if d.builder.options.ForceRemove { + d.builder.containerManager.RemoveAll(d.builder.Stdout) + return + } + if d.builder.options.Remove && err == nil { + d.builder.containerManager.RemoveAll(d.builder.Stdout) + return + } + }() + switch c := cmd.(type) { + case *instructions.EnvCommand: + return dispatchEnv(d, c) + case *instructions.MaintainerCommand: + return dispatchMaintainer(d, c) + case *instructions.LabelCommand: + return dispatchLabel(d, c) + case *instructions.AddCommand: + return dispatchAdd(d, c) + case *instructions.CopyCommand: + return dispatchCopy(d, c) + case *instructions.OnbuildCommand: + return dispatchOnbuild(d, c) + case *instructions.WorkdirCommand: + return dispatchWorkdir(d, c) + case *instructions.RunCommand: + return dispatchRun(d, c) + case *instructions.CmdCommand: + return dispatchCmd(d, c) + case *instructions.HealthCheckCommand: + return dispatchHealthcheck(d, c) + case *instructions.EntrypointCommand: + return dispatchEntrypoint(d, c) + case *instructions.ExposeCommand: + return dispatchExpose(d, c, envs) + case *instructions.UserCommand: + return dispatchUser(d, c) + case *instructions.VolumeCommand: + return dispatchVolume(d, c) + case *instructions.StopSignalCommand: + return dispatchStopSignal(d, c) + case *instructions.ArgCommand: + return dispatchArg(d, c) + case *instructions.ShellCommand: + return dispatchShell(d, c) + } + return errors.Errorf("unsupported command type: %v", reflect.TypeOf(cmd)) +} + +// dispatchState is a data object which is modified by dispatchers +type dispatchState struct { + runConfig *container.Config + maintainer string + cmdSet bool + imageID string + baseImage builder.Image + stageName string + buildArgs *BuildArgs + operatingSystem string +} + +func newDispatchState(baseArgs *BuildArgs) *dispatchState { + args := baseArgs.Clone() + args.ResetAllowed() + return &dispatchState{runConfig: &container.Config{}, buildArgs: args} +} + +type stagesBuildResults struct { + flat []*container.Config + indexed map[string]*container.Config +} + +func newStagesBuildResults() *stagesBuildResults { + return &stagesBuildResults{ + indexed: make(map[string]*container.Config), + } +} + +func (r *stagesBuildResults) getByName(name string) (*container.Config, bool) { + c, ok := r.indexed[strings.ToLower(name)] + return c, ok +} + +func (r *stagesBuildResults) validateIndex(i int) error { + if i == len(r.flat) { + return errors.New("refers to current build stage") + } + if i < 0 || i > len(r.flat) { + return errors.New("index out of bounds") + } + return nil +} + +func (r *stagesBuildResults) get(nameOrIndex string) (*container.Config, error) { + if c, ok := r.getByName(nameOrIndex); ok { + return c, nil + } + ix, err := strconv.ParseInt(nameOrIndex, 10, 0) + if err != nil { + return nil, nil + } + if err := r.validateIndex(int(ix)); err != nil { + return nil, err + } + return r.flat[ix], nil +} + +func (r *stagesBuildResults) checkStageNameAvailable(name string) error { + if name != "" { + if _, ok := r.getByName(name); ok { + return errors.Errorf("%s stage name already used", name) + } + } + return nil +} + +func (r *stagesBuildResults) commitStage(name string, config *container.Config) error { + if name != "" { + if _, ok := r.getByName(name); ok { + return errors.Errorf("%s stage name already used", name) + } + r.indexed[strings.ToLower(name)] = config + } + r.flat = append(r.flat, config) + return nil +} + +func commitStage(state *dispatchState, stages *stagesBuildResults) error { + return stages.commitStage(state.stageName, state.runConfig) +} + +type dispatchRequest struct { + state *dispatchState + shlex *shell.Lex + builder *Builder + source builder.Source + stages *stagesBuildResults +} + +func newDispatchRequest(builder *Builder, escapeToken rune, source builder.Source, buildArgs *BuildArgs, stages *stagesBuildResults) dispatchRequest { + return dispatchRequest{ + state: newDispatchState(buildArgs), + shlex: shell.NewLex(escapeToken), + builder: builder, + source: source, + stages: stages, + } +} + +func (s *dispatchState) updateRunConfig() { + s.runConfig.Image = s.imageID +} + +// hasFromImage returns true if the builder has processed a `FROM ` line +func (s *dispatchState) hasFromImage() bool { + return s.imageID != "" || (s.baseImage != nil && s.baseImage.ImageID() == "") +} + +func (s *dispatchState) beginStage(stageName string, image builder.Image) error { + s.stageName = stageName + s.imageID = image.ImageID() + s.operatingSystem = image.OperatingSystem() + if s.operatingSystem == "" { // In case it isn't set + s.operatingSystem = runtime.GOOS + } + if !system.IsOSSupported(s.operatingSystem) { + return system.ErrNotSupportedOperatingSystem + } + + if image.RunConfig() != nil { + // copy avoids referencing the same instance when 2 stages have the same base + s.runConfig = copyRunConfig(image.RunConfig()) + } else { + s.runConfig = &container.Config{} + } + s.baseImage = image + s.setDefaultPath() + s.runConfig.OpenStdin = false + s.runConfig.StdinOnce = false + return nil +} + +// Add the default PATH to runConfig.ENV if one exists for the operating system and there +// is no PATH set. Note that Windows containers on Windows won't have one as it's set by HCS +func (s *dispatchState) setDefaultPath() { + defaultPath := system.DefaultPathEnv(s.operatingSystem) + if defaultPath == "" { + return + } + envMap := opts.ConvertKVStringsToMap(s.runConfig.Env) + if _, ok := envMap["PATH"]; !ok { + s.runConfig.Env = append(s.runConfig.Env, "PATH="+defaultPath) + } +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/imagecontext.go b/vendor/github.com/docker/docker/builder/dockerfile/imagecontext.go new file mode 100644 index 0000000000..53a4b9774b --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/imagecontext.go @@ -0,0 +1,121 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "context" + "runtime" + + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/builder" + dockerimage "github.com/docker/docker/image" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type getAndMountFunc func(string, bool, string) (builder.Image, builder.ROLayer, error) + +// imageSources mounts images and provides a cache for mounted images. It tracks +// all images so they can be unmounted at the end of the build. +type imageSources struct { + byImageID map[string]*imageMount + mounts []*imageMount + getImage getAndMountFunc +} + +func newImageSources(ctx context.Context, options builderOptions) *imageSources { + getAndMount := func(idOrRef string, localOnly bool, osForPull string) (builder.Image, builder.ROLayer, error) { + pullOption := backend.PullOptionNoPull + if !localOnly { + if options.Options.PullParent { + pullOption = backend.PullOptionForcePull + } else { + pullOption = backend.PullOptionPreferLocal + } + } + return options.Backend.GetImageAndReleasableLayer(ctx, idOrRef, backend.GetImageAndLayerOptions{ + PullOption: pullOption, + AuthConfig: options.Options.AuthConfigs, + Output: options.ProgressWriter.Output, + OS: osForPull, + }) + } + + return &imageSources{ + byImageID: make(map[string]*imageMount), + getImage: getAndMount, + } +} + +func (m *imageSources) Get(idOrRef string, localOnly bool, osForPull string) (*imageMount, error) { + if im, ok := m.byImageID[idOrRef]; ok { + return im, nil + } + + image, layer, err := m.getImage(idOrRef, localOnly, osForPull) + if err != nil { + return nil, err + } + im := newImageMount(image, layer) + m.Add(im) + return im, nil +} + +func (m *imageSources) Unmount() (retErr error) { + for _, im := range m.mounts { + if err := im.unmount(); err != nil { + logrus.Error(err) + retErr = err + } + } + return +} + +func (m *imageSources) Add(im *imageMount) { + switch im.image { + case nil: + // set the OS for scratch images + os := runtime.GOOS + // Windows does not support scratch except for LCOW + if runtime.GOOS == "windows" { + os = "linux" + } + im.image = &dockerimage.Image{V1Image: dockerimage.V1Image{OS: os}} + default: + m.byImageID[im.image.ImageID()] = im + } + m.mounts = append(m.mounts, im) +} + +// imageMount is a reference to an image that can be used as a builder.Source +type imageMount struct { + image builder.Image + source builder.Source + layer builder.ROLayer +} + +func newImageMount(image builder.Image, layer builder.ROLayer) *imageMount { + im := &imageMount{image: image, layer: layer} + return im +} + +func (im *imageMount) unmount() error { + if im.layer == nil { + return nil + } + if err := im.layer.Release(); err != nil { + return errors.Wrapf(err, "failed to unmount previous build image %s", im.image.ImageID()) + } + im.layer = nil + return nil +} + +func (im *imageMount) Image() builder.Image { + return im.image +} + +func (im *imageMount) NewRWLayer() (builder.RWLayer, error) { + return im.layer.NewRWLayer() +} + +func (im *imageMount) ImageID() string { + return im.image.ImageID() +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/imageprobe.go b/vendor/github.com/docker/docker/builder/dockerfile/imageprobe.go new file mode 100644 index 0000000000..6960bf8897 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/imageprobe.go @@ -0,0 +1,63 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + "github.com/sirupsen/logrus" +) + +// ImageProber exposes an Image cache to the Builder. It supports resetting a +// cache. +type ImageProber interface { + Reset() + Probe(parentID string, runConfig *container.Config) (string, error) +} + +type imageProber struct { + cache builder.ImageCache + reset func() builder.ImageCache + cacheBusted bool +} + +func newImageProber(cacheBuilder builder.ImageCacheBuilder, cacheFrom []string, noCache bool) ImageProber { + if noCache { + return &nopProber{} + } + + reset := func() builder.ImageCache { + return cacheBuilder.MakeImageCache(cacheFrom) + } + return &imageProber{cache: reset(), reset: reset} +} + +func (c *imageProber) Reset() { + c.cache = c.reset() + c.cacheBusted = false +} + +// Probe checks if cache match can be found for current build instruction. +// It returns the cachedID if there is a hit, and the empty string on miss +func (c *imageProber) Probe(parentID string, runConfig *container.Config) (string, error) { + if c.cacheBusted { + return "", nil + } + cacheID, err := c.cache.GetCache(parentID, runConfig) + if err != nil { + return "", err + } + if len(cacheID) == 0 { + logrus.Debugf("[BUILDER] Cache miss: %s", runConfig.Cmd) + c.cacheBusted = true + return "", nil + } + logrus.Debugf("[BUILDER] Use cached version: %s", runConfig.Cmd) + return cacheID, nil +} + +type nopProber struct{} + +func (c *nopProber) Reset() {} + +func (c *nopProber) Probe(_ string, _ *container.Config) (string, error) { + return "", nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/instructions/commands.go b/vendor/github.com/docker/docker/builder/dockerfile/instructions/commands.go index ef435b27b3..9d864e5325 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/instructions/commands.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/instructions/commands.go @@ -7,9 +7,10 @@ import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/strslice" + specs "github.com/opencontainers/image-spec/specs-go/v1" ) -// KeyValuePair represent an arbitrary named value (useful in slice insted of map[string] string to preserve ordering) +// KeyValuePair represent an arbitrary named value (useful in slice instead of map[string] string to preserve ordering) type KeyValuePair struct { Key string Value string @@ -195,7 +196,7 @@ func (c *WorkdirCommand) Expand(expander SingleWordExpander) error { return nil } -// ShellDependantCmdLine represents a cmdline optionaly prepended with the shell +// ShellDependantCmdLine represents a cmdline optionally prepended with the shell type ShellDependantCmdLine struct { CmdLine strslice.StrSlice PrependShell bool @@ -361,6 +362,7 @@ type Stage struct { Commands []Command BaseName string SourceCode string + Platform specs.Platform } // AddCommand to the stage @@ -388,7 +390,8 @@ func CurrentStage(s []Stage) (*Stage, error) { // HasStage looks for the presence of a given stage name func HasStage(s []Stage, name string) (int, bool) { for i, stage := range s { - if stage.Name == name { + // Stage name is case-insensitive by design + if strings.EqualFold(stage.Name, name) { return i, true } } diff --git a/vendor/github.com/docker/docker/builder/dockerfile/instructions/parse.go b/vendor/github.com/docker/docker/builder/dockerfile/instructions/parse.go index 9226f4d46e..e2d69a4887 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/instructions/parse.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/instructions/parse.go @@ -12,6 +12,7 @@ import ( "github.com/docker/docker/api/types/strslice" "github.com/docker/docker/builder/dockerfile/command" "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/pkg/system" "github.com/pkg/errors" ) @@ -271,16 +272,17 @@ func parseFrom(req parseRequest) (*Stage, error) { return nil, err } + flPlatform := req.flags.AddString("platform", "") if err := req.flags.Parse(); err != nil { return nil, err } code := strings.TrimSpace(req.original) - return &Stage{ BaseName: req.args[0], Name: stageName, SourceCode: code, Commands: []Command{}, + Platform: *system.ParsePlatform(flPlatform.Value), }, nil } diff --git a/vendor/github.com/docker/docker/builder/dockerfile/internals.go b/vendor/github.com/docker/docker/builder/dockerfile/internals.go new file mode 100644 index 0000000000..53748f0619 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/internals.go @@ -0,0 +1,486 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +// internals for handling commands. Covers many areas and a lot of +// non-contiguous functionality. Please read the comments. + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "os" + "path" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/builder" + "github.com/docker/docker/image" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/containerfs" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/system" + "github.com/docker/go-connections/nat" + "github.com/pkg/errors" +) + +// Archiver defines an interface for copying files from one destination to +// another using Tar/Untar. +type Archiver interface { + TarUntar(src, dst string) error + UntarPath(src, dst string) error + CopyWithTar(src, dst string) error + CopyFileWithTar(src, dst string) error + IDMappings() *idtools.IDMappings +} + +// The builder will use the following interfaces if the container fs implements +// these for optimized copies to and from the container. +type extractor interface { + ExtractArchive(src io.Reader, dst string, opts *archive.TarOptions) error +} + +type archiver interface { + ArchivePath(src string, opts *archive.TarOptions) (io.ReadCloser, error) +} + +// helper functions to get tar/untar func +func untarFunc(i interface{}) containerfs.UntarFunc { + if ea, ok := i.(extractor); ok { + return ea.ExtractArchive + } + return chrootarchive.Untar +} + +func tarFunc(i interface{}) containerfs.TarFunc { + if ap, ok := i.(archiver); ok { + return ap.ArchivePath + } + return archive.TarWithOptions +} + +func (b *Builder) getArchiver(src, dst containerfs.Driver) Archiver { + t, u := tarFunc(src), untarFunc(dst) + return &containerfs.Archiver{ + SrcDriver: src, + DstDriver: dst, + Tar: t, + Untar: u, + IDMappingsVar: b.idMappings, + } +} + +func (b *Builder) commit(dispatchState *dispatchState, comment string) error { + if b.disableCommit { + return nil + } + if !dispatchState.hasFromImage() { + return errors.New("Please provide a source image with `from` prior to commit") + } + + runConfigWithCommentCmd := copyRunConfig(dispatchState.runConfig, withCmdComment(comment, dispatchState.operatingSystem)) + hit, err := b.probeCache(dispatchState, runConfigWithCommentCmd) + if err != nil || hit { + return err + } + id, err := b.create(runConfigWithCommentCmd) + if err != nil { + return err + } + + return b.commitContainer(dispatchState, id, runConfigWithCommentCmd) +} + +func (b *Builder) commitContainer(dispatchState *dispatchState, id string, containerConfig *container.Config) error { + if b.disableCommit { + return nil + } + + commitCfg := backend.CommitConfig{ + Author: dispatchState.maintainer, + // TODO: this copy should be done by Commit() + Config: copyRunConfig(dispatchState.runConfig), + ContainerConfig: containerConfig, + ContainerID: id, + } + + imageID, err := b.docker.CommitBuildStep(commitCfg) + dispatchState.imageID = string(imageID) + return err +} + +func (b *Builder) exportImage(state *dispatchState, layer builder.RWLayer, parent builder.Image, runConfig *container.Config) error { + newLayer, err := layer.Commit() + if err != nil { + return err + } + + // add an image mount without an image so the layer is properly unmounted + // if there is an error before we can add the full mount with image + b.imageSources.Add(newImageMount(nil, newLayer)) + + parentImage, ok := parent.(*image.Image) + if !ok { + return errors.Errorf("unexpected image type") + } + + newImage := image.NewChildImage(parentImage, image.ChildConfig{ + Author: state.maintainer, + ContainerConfig: runConfig, + DiffID: newLayer.DiffID(), + Config: copyRunConfig(state.runConfig), + }, parentImage.OS) + + // TODO: it seems strange to marshal this here instead of just passing in the + // image struct + config, err := newImage.MarshalJSON() + if err != nil { + return errors.Wrap(err, "failed to encode image config") + } + + exportedImage, err := b.docker.CreateImage(config, state.imageID) + if err != nil { + return errors.Wrapf(err, "failed to export image") + } + + state.imageID = exportedImage.ImageID() + b.imageSources.Add(newImageMount(exportedImage, newLayer)) + return nil +} + +func (b *Builder) performCopy(state *dispatchState, inst copyInstruction) error { + srcHash := getSourceHashFromInfos(inst.infos) + + var chownComment string + if inst.chownStr != "" { + chownComment = fmt.Sprintf("--chown=%s", inst.chownStr) + } + commentStr := fmt.Sprintf("%s %s%s in %s ", inst.cmdName, chownComment, srcHash, inst.dest) + + // TODO: should this have been using origPaths instead of srcHash in the comment? + runConfigWithCommentCmd := copyRunConfig( + state.runConfig, + withCmdCommentString(commentStr, state.operatingSystem)) + hit, err := b.probeCache(state, runConfigWithCommentCmd) + if err != nil || hit { + return err + } + + imageMount, err := b.imageSources.Get(state.imageID, true, state.operatingSystem) + if err != nil { + return errors.Wrapf(err, "failed to get destination image %q", state.imageID) + } + + rwLayer, err := imageMount.NewRWLayer() + if err != nil { + return err + } + defer rwLayer.Release() + + destInfo, err := createDestInfo(state.runConfig.WorkingDir, inst, rwLayer, state.operatingSystem) + if err != nil { + return err + } + + chownPair := b.idMappings.RootPair() + // if a chown was requested, perform the steps to get the uid, gid + // translated (if necessary because of user namespaces), and replace + // the root pair with the chown pair for copy operations + if inst.chownStr != "" { + chownPair, err = parseChownFlag(inst.chownStr, destInfo.root.Path(), b.idMappings) + if err != nil { + return errors.Wrapf(err, "unable to convert uid/gid chown string to host mapping") + } + } + + for _, info := range inst.infos { + opts := copyFileOptions{ + decompress: inst.allowLocalDecompression, + archiver: b.getArchiver(info.root, destInfo.root), + chownPair: chownPair, + } + if err := performCopyForInfo(destInfo, info, opts); err != nil { + return errors.Wrapf(err, "failed to copy files") + } + } + return b.exportImage(state, rwLayer, imageMount.Image(), runConfigWithCommentCmd) +} + +func createDestInfo(workingDir string, inst copyInstruction, rwLayer builder.RWLayer, platform string) (copyInfo, error) { + // Twiddle the destination when it's a relative path - meaning, make it + // relative to the WORKINGDIR + dest, err := normalizeDest(workingDir, inst.dest, platform) + if err != nil { + return copyInfo{}, errors.Wrapf(err, "invalid %s", inst.cmdName) + } + + return copyInfo{root: rwLayer.Root(), path: dest}, nil +} + +// normalizeDest normalises the destination of a COPY/ADD command in a +// platform semantically consistent way. +func normalizeDest(workingDir, requested string, platform string) (string, error) { + dest := fromSlash(requested, platform) + endsInSlash := strings.HasSuffix(dest, string(separator(platform))) + + if platform != "windows" { + if !path.IsAbs(requested) { + dest = path.Join("/", filepath.ToSlash(workingDir), dest) + // Make sure we preserve any trailing slash + if endsInSlash { + dest += "/" + } + } + return dest, nil + } + + // We are guaranteed that the working directory is already consistent, + // However, Windows also has, for now, the limitation that ADD/COPY can + // only be done to the system drive, not any drives that might be present + // as a result of a bind mount. + // + // So... if the path requested is Linux-style absolute (/foo or \\foo), + // we assume it is the system drive. If it is a Windows-style absolute + // (DRIVE:\\foo), error if DRIVE is not C. And finally, ensure we + // strip any configured working directories drive letter so that it + // can be subsequently legitimately converted to a Windows volume-style + // pathname. + + // Not a typo - filepath.IsAbs, not system.IsAbs on this next check as + // we only want to validate where the DriveColon part has been supplied. + if filepath.IsAbs(dest) { + if strings.ToUpper(string(dest[0])) != "C" { + return "", fmt.Errorf("Windows does not support destinations not on the system drive (C:)") + } + dest = dest[2:] // Strip the drive letter + } + + // Cannot handle relative where WorkingDir is not the system drive. + if len(workingDir) > 0 { + if ((len(workingDir) > 1) && !system.IsAbs(workingDir[2:])) || (len(workingDir) == 1) { + return "", fmt.Errorf("Current WorkingDir %s is not platform consistent", workingDir) + } + if !system.IsAbs(dest) { + if string(workingDir[0]) != "C" { + return "", fmt.Errorf("Windows does not support relative paths when WORKDIR is not the system drive") + } + dest = filepath.Join(string(os.PathSeparator), workingDir[2:], dest) + // Make sure we preserve any trailing slash + if endsInSlash { + dest += string(os.PathSeparator) + } + } + } + return dest, nil +} + +// For backwards compat, if there's just one info then use it as the +// cache look-up string, otherwise hash 'em all into one +func getSourceHashFromInfos(infos []copyInfo) string { + if len(infos) == 1 { + return infos[0].hash + } + var hashs []string + for _, info := range infos { + hashs = append(hashs, info.hash) + } + return hashStringSlice("multi", hashs) +} + +func hashStringSlice(prefix string, slice []string) string { + hasher := sha256.New() + hasher.Write([]byte(strings.Join(slice, ","))) + return prefix + ":" + hex.EncodeToString(hasher.Sum(nil)) +} + +type runConfigModifier func(*container.Config) + +func withCmd(cmd []string) runConfigModifier { + return func(runConfig *container.Config) { + runConfig.Cmd = cmd + } +} + +// withCmdComment sets Cmd to a nop comment string. See withCmdCommentString for +// why there are two almost identical versions of this. +func withCmdComment(comment string, platform string) runConfigModifier { + return func(runConfig *container.Config) { + runConfig.Cmd = append(getShell(runConfig, platform), "#(nop) ", comment) + } +} + +// withCmdCommentString exists to maintain compatibility with older versions. +// A few instructions (workdir, copy, add) used a nop comment that is a single arg +// where as all the other instructions used a two arg comment string. This +// function implements the single arg version. +func withCmdCommentString(comment string, platform string) runConfigModifier { + return func(runConfig *container.Config) { + runConfig.Cmd = append(getShell(runConfig, platform), "#(nop) "+comment) + } +} + +func withEnv(env []string) runConfigModifier { + return func(runConfig *container.Config) { + runConfig.Env = env + } +} + +// withEntrypointOverride sets an entrypoint on runConfig if the command is +// not empty. The entrypoint is left unmodified if command is empty. +// +// The dockerfile RUN instruction expect to run without an entrypoint +// so the runConfig entrypoint needs to be modified accordingly. ContainerCreate +// will change a []string{""} entrypoint to nil, so we probe the cache with the +// nil entrypoint. +func withEntrypointOverride(cmd []string, entrypoint []string) runConfigModifier { + return func(runConfig *container.Config) { + if len(cmd) > 0 { + runConfig.Entrypoint = entrypoint + } + } +} + +func copyRunConfig(runConfig *container.Config, modifiers ...runConfigModifier) *container.Config { + copy := *runConfig + copy.Cmd = copyStringSlice(runConfig.Cmd) + copy.Env = copyStringSlice(runConfig.Env) + copy.Entrypoint = copyStringSlice(runConfig.Entrypoint) + copy.OnBuild = copyStringSlice(runConfig.OnBuild) + copy.Shell = copyStringSlice(runConfig.Shell) + + if copy.Volumes != nil { + copy.Volumes = make(map[string]struct{}, len(runConfig.Volumes)) + for k, v := range runConfig.Volumes { + copy.Volumes[k] = v + } + } + + if copy.ExposedPorts != nil { + copy.ExposedPorts = make(nat.PortSet, len(runConfig.ExposedPorts)) + for k, v := range runConfig.ExposedPorts { + copy.ExposedPorts[k] = v + } + } + + if copy.Labels != nil { + copy.Labels = make(map[string]string, len(runConfig.Labels)) + for k, v := range runConfig.Labels { + copy.Labels[k] = v + } + } + + for _, modifier := range modifiers { + modifier(©) + } + return © +} + +func copyStringSlice(orig []string) []string { + if orig == nil { + return nil + } + return append([]string{}, orig...) +} + +// getShell is a helper function which gets the right shell for prefixing the +// shell-form of RUN, ENTRYPOINT and CMD instructions +func getShell(c *container.Config, os string) []string { + if 0 == len(c.Shell) { + return append([]string{}, defaultShellForOS(os)[:]...) + } + return append([]string{}, c.Shell[:]...) +} + +func (b *Builder) probeCache(dispatchState *dispatchState, runConfig *container.Config) (bool, error) { + cachedID, err := b.imageProber.Probe(dispatchState.imageID, runConfig) + if cachedID == "" || err != nil { + return false, err + } + fmt.Fprint(b.Stdout, " ---> Using cache\n") + + dispatchState.imageID = cachedID + return true, nil +} + +var defaultLogConfig = container.LogConfig{Type: "none"} + +func (b *Builder) probeAndCreate(dispatchState *dispatchState, runConfig *container.Config) (string, error) { + if hit, err := b.probeCache(dispatchState, runConfig); err != nil || hit { + return "", err + } + // Set a log config to override any default value set on the daemon + hostConfig := &container.HostConfig{LogConfig: defaultLogConfig} + container, err := b.containerManager.Create(runConfig, hostConfig) + return container.ID, err +} + +func (b *Builder) create(runConfig *container.Config) (string, error) { + hostConfig := hostConfigFromOptions(b.options) + container, err := b.containerManager.Create(runConfig, hostConfig) + if err != nil { + return "", err + } + // TODO: could this be moved into containerManager.Create() ? + for _, warning := range container.Warnings { + fmt.Fprintf(b.Stdout, " ---> [Warning] %s\n", warning) + } + fmt.Fprintf(b.Stdout, " ---> Running in %s\n", stringid.TruncateID(container.ID)) + return container.ID, nil +} + +func hostConfigFromOptions(options *types.ImageBuildOptions) *container.HostConfig { + resources := container.Resources{ + CgroupParent: options.CgroupParent, + CPUShares: options.CPUShares, + CPUPeriod: options.CPUPeriod, + CPUQuota: options.CPUQuota, + CpusetCpus: options.CPUSetCPUs, + CpusetMems: options.CPUSetMems, + Memory: options.Memory, + MemorySwap: options.MemorySwap, + Ulimits: options.Ulimits, + } + + hc := &container.HostConfig{ + SecurityOpt: options.SecurityOpt, + Isolation: options.Isolation, + ShmSize: options.ShmSize, + Resources: resources, + NetworkMode: container.NetworkMode(options.NetworkMode), + // Set a log config to override any default value set on the daemon + LogConfig: defaultLogConfig, + ExtraHosts: options.ExtraHosts, + } + + // For WCOW, the default of 20GB hard-coded in the platform + // is too small for builder scenarios where many users are + // using RUN statements to install large amounts of data. + // Use 127GB as that's the default size of a VHD in Hyper-V. + if runtime.GOOS == "windows" && options.Platform == "windows" { + hc.StorageOpt = make(map[string]string) + hc.StorageOpt["size"] = "127GB" + } + + return hc +} + +// fromSlash works like filepath.FromSlash but with a given OS platform field +func fromSlash(path, platform string) string { + if platform == "windows" { + return strings.Replace(path, "/", "\\", -1) + } + return path +} + +// separator returns a OS path separator for the given OS platform +func separator(platform string) byte { + if platform == "windows" { + return '\\' + } + return '/' +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/internals_linux.go b/vendor/github.com/docker/docker/builder/dockerfile/internals_linux.go new file mode 100644 index 0000000000..1014b16a21 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/internals_linux.go @@ -0,0 +1,88 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "path/filepath" + "strconv" + "strings" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/symlink" + lcUser "github.com/opencontainers/runc/libcontainer/user" + "github.com/pkg/errors" +) + +func parseChownFlag(chown, ctrRootPath string, idMappings *idtools.IDMappings) (idtools.IDPair, error) { + var userStr, grpStr string + parts := strings.Split(chown, ":") + if len(parts) > 2 { + return idtools.IDPair{}, errors.New("invalid chown string format: " + chown) + } + if len(parts) == 1 { + // if no group specified, use the user spec as group as well + userStr, grpStr = parts[0], parts[0] + } else { + userStr, grpStr = parts[0], parts[1] + } + + passwdPath, err := symlink.FollowSymlinkInScope(filepath.Join(ctrRootPath, "etc", "passwd"), ctrRootPath) + if err != nil { + return idtools.IDPair{}, errors.Wrapf(err, "can't resolve /etc/passwd path in container rootfs") + } + groupPath, err := symlink.FollowSymlinkInScope(filepath.Join(ctrRootPath, "etc", "group"), ctrRootPath) + if err != nil { + return idtools.IDPair{}, errors.Wrapf(err, "can't resolve /etc/group path in container rootfs") + } + uid, err := lookupUser(userStr, passwdPath) + if err != nil { + return idtools.IDPair{}, errors.Wrapf(err, "can't find uid for user "+userStr) + } + gid, err := lookupGroup(grpStr, groupPath) + if err != nil { + return idtools.IDPair{}, errors.Wrapf(err, "can't find gid for group "+grpStr) + } + + // convert as necessary because of user namespaces + chownPair, err := idMappings.ToHost(idtools.IDPair{UID: uid, GID: gid}) + if err != nil { + return idtools.IDPair{}, errors.Wrapf(err, "unable to convert uid/gid to host mapping") + } + return chownPair, nil +} + +func lookupUser(userStr, filepath string) (int, error) { + // if the string is actually a uid integer, parse to int and return + // as we don't need to translate with the help of files + uid, err := strconv.Atoi(userStr) + if err == nil { + return uid, nil + } + users, err := lcUser.ParsePasswdFileFilter(filepath, func(u lcUser.User) bool { + return u.Name == userStr + }) + if err != nil { + return 0, err + } + if len(users) == 0 { + return 0, errors.New("no such user: " + userStr) + } + return users[0].Uid, nil +} + +func lookupGroup(groupStr, filepath string) (int, error) { + // if the string is actually a gid integer, parse to int and return + // as we don't need to translate with the help of files + gid, err := strconv.Atoi(groupStr) + if err == nil { + return gid, nil + } + groups, err := lcUser.ParseGroupFileFilter(filepath, func(g lcUser.Group) bool { + return g.Name == groupStr + }) + if err != nil { + return 0, err + } + if len(groups) == 0 { + return 0, errors.New("no such group: " + groupStr) + } + return groups[0].Gid, nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/internals_windows.go b/vendor/github.com/docker/docker/builder/dockerfile/internals_windows.go new file mode 100644 index 0000000000..26978b48cf --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/internals_windows.go @@ -0,0 +1,7 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import "github.com/docker/docker/pkg/idtools" + +func parseChownFlag(chown, ctrRootPath string, idMappings *idtools.IDMappings) (idtools.IDPair, error) { + return idMappings.RootPair(), nil +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/metrics.go b/vendor/github.com/docker/docker/builder/dockerfile/metrics.go new file mode 100644 index 0000000000..ceafa7ad62 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerfile/metrics.go @@ -0,0 +1,44 @@ +package dockerfile // import "github.com/docker/docker/builder/dockerfile" + +import ( + "github.com/docker/go-metrics" +) + +var ( + buildsTriggered metrics.Counter + buildsFailed metrics.LabeledCounter +) + +// Build metrics prometheus messages, these values must be initialized before +// using them. See the example below in the "builds_failed" metric definition. +const ( + metricsDockerfileSyntaxError = "dockerfile_syntax_error" + metricsDockerfileEmptyError = "dockerfile_empty_error" + metricsCommandNotSupportedError = "command_not_supported_error" + metricsErrorProcessingCommandsError = "error_processing_commands_error" + metricsBuildTargetNotReachableError = "build_target_not_reachable_error" + metricsMissingOnbuildArgumentsError = "missing_onbuild_arguments_error" + metricsUnknownInstructionError = "unknown_instruction_error" + metricsBuildCanceled = "build_canceled" +) + +func init() { + buildMetrics := metrics.NewNamespace("builder", "", nil) + + buildsTriggered = buildMetrics.NewCounter("builds_triggered", "Number of triggered image builds") + buildsFailed = buildMetrics.NewLabeledCounter("builds_failed", "Number of failed image builds", "reason") + for _, r := range []string{ + metricsDockerfileSyntaxError, + metricsDockerfileEmptyError, + metricsCommandNotSupportedError, + metricsErrorProcessingCommandsError, + metricsBuildTargetNotReachableError, + metricsMissingOnbuildArgumentsError, + metricsUnknownInstructionError, + metricsBuildCanceled, + } { + buildsFailed.WithValues(r) + } + + metrics.Register(buildMetrics) +} diff --git a/vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go b/vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go index 277176ee1c..b065b8a4ea 100644 --- a/vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go +++ b/vendor/github.com/docker/docker/builder/dockerfile/parser/parser.go @@ -7,13 +7,11 @@ import ( "fmt" "io" "regexp" - "runtime" "strconv" "strings" "unicode" "github.com/docker/docker/builder/dockerfile/command" - "github.com/docker/docker/pkg/system" "github.com/pkg/errors" ) @@ -81,11 +79,10 @@ func (node *Node) AddChild(child *Node, startLine, endLine int) { } var ( - dispatch map[string]func(string, *Directive) (*Node, map[string]bool, error) - tokenWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`) - tokenEscapeCommand = regexp.MustCompile(`^#[ \t]*escape[ \t]*=[ \t]*(?P.).*$`) - tokenPlatformCommand = regexp.MustCompile(`^#[ \t]*platform[ \t]*=[ \t]*(?P.*)$`) - tokenComment = regexp.MustCompile(`^#.*$`) + dispatch map[string]func(string, *Directive) (*Node, map[string]bool, error) + tokenWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`) + tokenEscapeCommand = regexp.MustCompile(`^#[ \t]*escape[ \t]*=[ \t]*(?P.).*$`) + tokenComment = regexp.MustCompile(`^#.*$`) ) // DefaultEscapeToken is the default escape token @@ -95,11 +92,9 @@ const DefaultEscapeToken = '\\' // parsing directives. type Directive struct { escapeToken rune // Current escape token - platformToken string // Current platform token lineContinuationRegex *regexp.Regexp // Current line continuation regex processingComplete bool // Whether we are done looking for directives escapeSeen bool // Whether the escape directive has been seen - platformSeen bool // Whether the platform directive has been seen } // setEscapeToken sets the default token for escaping characters in a Dockerfile. @@ -112,25 +107,9 @@ func (d *Directive) setEscapeToken(s string) error { return nil } -// setPlatformToken sets the default platform for pulling images in a Dockerfile. -func (d *Directive) setPlatformToken(s string) error { - s = strings.ToLower(s) - valid := []string{runtime.GOOS} - if system.LCOWSupported() { - valid = append(valid, "linux") - } - for _, item := range valid { - if s == item { - d.platformToken = s - return nil - } - } - return fmt.Errorf("invalid PLATFORM '%s'. Must be one of %v", s, valid) -} - -// possibleParserDirective looks for one or more parser directives '# escapeToken=' and -// '# platform='. Parser directives must precede any builder instruction -// or other comments, and cannot be repeated. +// possibleParserDirective looks for parser directives, eg '# escapeToken='. +// Parser directives must precede any builder instruction or other comments, +// and cannot be repeated. func (d *Directive) possibleParserDirective(line string) error { if d.processingComplete { return nil @@ -149,22 +128,6 @@ func (d *Directive) possibleParserDirective(line string) error { } } - // Only recognise a platform token if LCOW is supported - if system.LCOWSupported() { - tpcMatch := tokenPlatformCommand.FindStringSubmatch(strings.ToLower(line)) - if len(tpcMatch) != 0 { - for i, n := range tokenPlatformCommand.SubexpNames() { - if n == "platform" { - if d.platformSeen { - return errors.New("only one platform parser directive can be used") - } - d.platformSeen = true - return d.setPlatformToken(tpcMatch[i]) - } - } - } - } - d.processingComplete = true return nil } @@ -237,10 +200,7 @@ func newNodeFromLine(line string, directive *Directive) (*Node, error) { type Result struct { AST *Node EscapeToken rune - // TODO @jhowardmsft - see https://github.com/moby/moby/issues/34617 - // This next field will be removed in a future update for LCOW support. - OS string - Warnings []string + Warnings []string } // PrintWarnings to the writer @@ -320,7 +280,6 @@ func Parse(rwc io.Reader) (*Result, error) { AST: root, Warnings: warnings, EscapeToken: d.escapeToken, - OS: d.platformToken, }, handleScannerError(scanner.Err()) } diff --git a/vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go b/vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go new file mode 100644 index 0000000000..57f224afc8 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/dockerignore/dockerignore.go @@ -0,0 +1,64 @@ +package dockerignore // import "github.com/docker/docker/builder/dockerignore" + +import ( + "bufio" + "bytes" + "fmt" + "io" + "path/filepath" + "strings" +) + +// ReadAll reads a .dockerignore file and returns the list of file patterns +// to ignore. Note this will trim whitespace from each line as well +// as use GO's "clean" func to get the shortest/cleanest path for each. +func ReadAll(reader io.Reader) ([]string, error) { + if reader == nil { + return nil, nil + } + + scanner := bufio.NewScanner(reader) + var excludes []string + currentLine := 0 + + utf8bom := []byte{0xEF, 0xBB, 0xBF} + for scanner.Scan() { + scannedBytes := scanner.Bytes() + // We trim UTF8 BOM + if currentLine == 0 { + scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom) + } + pattern := string(scannedBytes) + currentLine++ + // Lines starting with # (comments) are ignored before processing + if strings.HasPrefix(pattern, "#") { + continue + } + pattern = strings.TrimSpace(pattern) + if pattern == "" { + continue + } + // normalize absolute paths to paths relative to the context + // (taking care of '!' prefix) + invert := pattern[0] == '!' + if invert { + pattern = strings.TrimSpace(pattern[1:]) + } + if len(pattern) > 0 { + pattern = filepath.Clean(pattern) + pattern = filepath.ToSlash(pattern) + if len(pattern) > 1 && pattern[0] == '/' { + pattern = pattern[1:] + } + } + if invert { + pattern = "!" + pattern + } + + excludes = append(excludes, pattern) + } + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("Error reading .dockerignore: %v", err) + } + return excludes, nil +} diff --git a/vendor/github.com/docker/docker/builder/fscache/fscache.go b/vendor/github.com/docker/docker/builder/fscache/fscache.go new file mode 100644 index 0000000000..e80f3d5bce --- /dev/null +++ b/vendor/github.com/docker/docker/builder/fscache/fscache.go @@ -0,0 +1,652 @@ +package fscache // import "github.com/docker/docker/builder/fscache" + +import ( + "archive/tar" + "context" + "crypto/sha256" + "encoding/json" + "hash" + "os" + "path/filepath" + "sort" + "sync" + "time" + + "github.com/boltdb/bolt" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/remotecontext" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/directory" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/tarsum" + "github.com/moby/buildkit/session/filesync" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/tonistiigi/fsutil" + "golang.org/x/sync/singleflight" +) + +const dbFile = "fscache.db" +const cacheKey = "cache" +const metaKey = "meta" + +// Backend is a backing implementation for FSCache +type Backend interface { + Get(id string) (string, error) + Remove(id string) error +} + +// FSCache allows syncing remote resources to cached snapshots +type FSCache struct { + opt Opt + transports map[string]Transport + mu sync.Mutex + g singleflight.Group + store *fsCacheStore +} + +// Opt defines options for initializing FSCache +type Opt struct { + Backend Backend + Root string // for storing local metadata + GCPolicy GCPolicy +} + +// GCPolicy defines policy for garbage collection +type GCPolicy struct { + MaxSize uint64 + MaxKeepDuration time.Duration +} + +// NewFSCache returns new FSCache object +func NewFSCache(opt Opt) (*FSCache, error) { + store, err := newFSCacheStore(opt) + if err != nil { + return nil, err + } + return &FSCache{ + store: store, + opt: opt, + transports: make(map[string]Transport), + }, nil +} + +// Transport defines a method for syncing remote data to FSCache +type Transport interface { + Copy(ctx context.Context, id RemoteIdentifier, dest string, cs filesync.CacheUpdater) error +} + +// RemoteIdentifier identifies a transfer request +type RemoteIdentifier interface { + Key() string + SharedKey() string + Transport() string +} + +// RegisterTransport registers a new transport method +func (fsc *FSCache) RegisterTransport(id string, transport Transport) error { + fsc.mu.Lock() + defer fsc.mu.Unlock() + if _, ok := fsc.transports[id]; ok { + return errors.Errorf("transport %v already exists", id) + } + fsc.transports[id] = transport + return nil +} + +// SyncFrom returns a source based on a remote identifier +func (fsc *FSCache) SyncFrom(ctx context.Context, id RemoteIdentifier) (builder.Source, error) { // cacheOpt + trasportID := id.Transport() + fsc.mu.Lock() + transport, ok := fsc.transports[id.Transport()] + if !ok { + fsc.mu.Unlock() + return nil, errors.Errorf("invalid transport %s", trasportID) + } + + logrus.Debugf("SyncFrom %s %s", id.Key(), id.SharedKey()) + fsc.mu.Unlock() + sourceRef, err, _ := fsc.g.Do(id.Key(), func() (interface{}, error) { + var sourceRef *cachedSourceRef + sourceRef, err := fsc.store.Get(id.Key()) + if err == nil { + return sourceRef, nil + } + + // check for unused shared cache + sharedKey := id.SharedKey() + if sharedKey != "" { + r, err := fsc.store.Rebase(sharedKey, id.Key()) + if err == nil { + sourceRef = r + } + } + + if sourceRef == nil { + var err error + sourceRef, err = fsc.store.New(id.Key(), sharedKey) + if err != nil { + return nil, errors.Wrap(err, "failed to create remote context") + } + } + + if err := syncFrom(ctx, sourceRef, transport, id); err != nil { + sourceRef.Release() + return nil, err + } + if err := sourceRef.resetSize(-1); err != nil { + return nil, err + } + return sourceRef, nil + }) + if err != nil { + return nil, err + } + ref := sourceRef.(*cachedSourceRef) + if ref.src == nil { // failsafe + return nil, errors.Errorf("invalid empty pull") + } + wc := &wrappedContext{Source: ref.src, closer: func() error { + ref.Release() + return nil + }} + return wc, nil +} + +// DiskUsage reports how much data is allocated by the cache +func (fsc *FSCache) DiskUsage(ctx context.Context) (int64, error) { + return fsc.store.DiskUsage(ctx) +} + +// Prune allows manually cleaning up the cache +func (fsc *FSCache) Prune(ctx context.Context) (uint64, error) { + return fsc.store.Prune(ctx) +} + +// Close stops the gc and closes the persistent db +func (fsc *FSCache) Close() error { + return fsc.store.Close() +} + +func syncFrom(ctx context.Context, cs *cachedSourceRef, transport Transport, id RemoteIdentifier) (retErr error) { + src := cs.src + if src == nil { + src = remotecontext.NewCachableSource(cs.Dir()) + } + + if !cs.cached { + if err := cs.storage.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(id.Key())) + dt := b.Get([]byte(cacheKey)) + if dt != nil { + if err := src.UnmarshalBinary(dt); err != nil { + return err + } + } else { + return errors.Wrap(src.Scan(), "failed to scan cache records") + } + return nil + }); err != nil { + return err + } + } + + dc := &detectChanges{f: src.HandleChange} + + // todo: probably send a bucket to `Copy` and let it return source + // but need to make sure that tx is safe + if err := transport.Copy(ctx, id, cs.Dir(), dc); err != nil { + return errors.Wrapf(err, "failed to copy to %s", cs.Dir()) + } + + if !dc.supported { + if err := src.Scan(); err != nil { + return errors.Wrap(err, "failed to scan cache records after transfer") + } + } + cs.cached = true + cs.src = src + return cs.storage.db.Update(func(tx *bolt.Tx) error { + dt, err := src.MarshalBinary() + if err != nil { + return err + } + b := tx.Bucket([]byte(id.Key())) + return b.Put([]byte(cacheKey), dt) + }) +} + +type fsCacheStore struct { + mu sync.Mutex + sources map[string]*cachedSource + db *bolt.DB + fs Backend + gcTimer *time.Timer + gcPolicy GCPolicy +} + +// CachePolicy defines policy for keeping a resource in cache +type CachePolicy struct { + Priority int + LastUsed time.Time +} + +func defaultCachePolicy() CachePolicy { + return CachePolicy{Priority: 10, LastUsed: time.Now()} +} + +func newFSCacheStore(opt Opt) (*fsCacheStore, error) { + if err := os.MkdirAll(opt.Root, 0700); err != nil { + return nil, err + } + p := filepath.Join(opt.Root, dbFile) + db, err := bolt.Open(p, 0600, nil) + if err != nil { + return nil, errors.Wrap(err, "failed to open database file %s") + } + s := &fsCacheStore{db: db, sources: make(map[string]*cachedSource), fs: opt.Backend, gcPolicy: opt.GCPolicy} + db.View(func(tx *bolt.Tx) error { + return tx.ForEach(func(name []byte, b *bolt.Bucket) error { + dt := b.Get([]byte(metaKey)) + if dt == nil { + return nil + } + var sm sourceMeta + if err := json.Unmarshal(dt, &sm); err != nil { + return err + } + dir, err := s.fs.Get(sm.BackendID) + if err != nil { + return err // TODO: handle gracefully + } + source := &cachedSource{ + refs: make(map[*cachedSourceRef]struct{}), + id: string(name), + dir: dir, + sourceMeta: sm, + storage: s, + } + s.sources[string(name)] = source + return nil + }) + }) + + s.gcTimer = s.startPeriodicGC(5 * time.Minute) + return s, nil +} + +func (s *fsCacheStore) startPeriodicGC(interval time.Duration) *time.Timer { + var t *time.Timer + t = time.AfterFunc(interval, func() { + if err := s.GC(); err != nil { + logrus.Errorf("build gc error: %v", err) + } + t.Reset(interval) + }) + return t +} + +func (s *fsCacheStore) Close() error { + s.gcTimer.Stop() + return s.db.Close() +} + +func (s *fsCacheStore) New(id, sharedKey string) (*cachedSourceRef, error) { + s.mu.Lock() + defer s.mu.Unlock() + var ret *cachedSource + if err := s.db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte(id)) + if err != nil { + return err + } + backendID := stringid.GenerateRandomID() + dir, err := s.fs.Get(backendID) + if err != nil { + return err + } + source := &cachedSource{ + refs: make(map[*cachedSourceRef]struct{}), + id: id, + dir: dir, + sourceMeta: sourceMeta{ + BackendID: backendID, + SharedKey: sharedKey, + CachePolicy: defaultCachePolicy(), + }, + storage: s, + } + dt, err := json.Marshal(source.sourceMeta) + if err != nil { + return err + } + if err := b.Put([]byte(metaKey), dt); err != nil { + return err + } + s.sources[id] = source + ret = source + return nil + }); err != nil { + return nil, err + } + return ret.getRef(), nil +} + +func (s *fsCacheStore) Rebase(sharedKey, newid string) (*cachedSourceRef, error) { + s.mu.Lock() + defer s.mu.Unlock() + var ret *cachedSource + for id, snap := range s.sources { + if snap.SharedKey == sharedKey && len(snap.refs) == 0 { + if err := s.db.Update(func(tx *bolt.Tx) error { + if err := tx.DeleteBucket([]byte(id)); err != nil { + return err + } + b, err := tx.CreateBucket([]byte(newid)) + if err != nil { + return err + } + snap.id = newid + snap.CachePolicy = defaultCachePolicy() + dt, err := json.Marshal(snap.sourceMeta) + if err != nil { + return err + } + if err := b.Put([]byte(metaKey), dt); err != nil { + return err + } + delete(s.sources, id) + s.sources[newid] = snap + return nil + }); err != nil { + return nil, err + } + ret = snap + break + } + } + if ret == nil { + return nil, errors.Errorf("no candidate for rebase") + } + return ret.getRef(), nil +} + +func (s *fsCacheStore) Get(id string) (*cachedSourceRef, error) { + s.mu.Lock() + defer s.mu.Unlock() + src, ok := s.sources[id] + if !ok { + return nil, errors.Errorf("not found") + } + return src.getRef(), nil +} + +// DiskUsage reports how much data is allocated by the cache +func (s *fsCacheStore) DiskUsage(ctx context.Context) (int64, error) { + s.mu.Lock() + defer s.mu.Unlock() + var size int64 + + for _, snap := range s.sources { + if len(snap.refs) == 0 { + ss, err := snap.getSize(ctx) + if err != nil { + return 0, err + } + size += ss + } + } + return size, nil +} + +// Prune allows manually cleaning up the cache +func (s *fsCacheStore) Prune(ctx context.Context) (uint64, error) { + s.mu.Lock() + defer s.mu.Unlock() + var size uint64 + + for id, snap := range s.sources { + select { + case <-ctx.Done(): + logrus.Debugf("Cache prune operation cancelled, pruned size: %d", size) + // when the context is cancelled, only return current size and nil + return size, nil + default: + } + if len(snap.refs) == 0 { + ss, err := snap.getSize(ctx) + if err != nil { + return size, err + } + if err := s.delete(id); err != nil { + return size, errors.Wrapf(err, "failed to delete %s", id) + } + size += uint64(ss) + } + } + return size, nil +} + +// GC runs a garbage collector on FSCache +func (s *fsCacheStore) GC() error { + s.mu.Lock() + defer s.mu.Unlock() + var size uint64 + + ctx := context.Background() + cutoff := time.Now().Add(-s.gcPolicy.MaxKeepDuration) + var blacklist []*cachedSource + + for id, snap := range s.sources { + if len(snap.refs) == 0 { + if cutoff.After(snap.CachePolicy.LastUsed) { + if err := s.delete(id); err != nil { + return errors.Wrapf(err, "failed to delete %s", id) + } + } else { + ss, err := snap.getSize(ctx) + if err != nil { + return err + } + size += uint64(ss) + blacklist = append(blacklist, snap) + } + } + } + + sort.Sort(sortableCacheSources(blacklist)) + for _, snap := range blacklist { + if size <= s.gcPolicy.MaxSize { + break + } + ss, err := snap.getSize(ctx) + if err != nil { + return err + } + if err := s.delete(snap.id); err != nil { + return errors.Wrapf(err, "failed to delete %s", snap.id) + } + size -= uint64(ss) + } + return nil +} + +// keep mu while calling this +func (s *fsCacheStore) delete(id string) error { + src, ok := s.sources[id] + if !ok { + return nil + } + if len(src.refs) > 0 { + return errors.Errorf("can't delete %s because it has active references", id) + } + delete(s.sources, id) + if err := s.db.Update(func(tx *bolt.Tx) error { + return tx.DeleteBucket([]byte(id)) + }); err != nil { + return err + } + return s.fs.Remove(src.BackendID) +} + +type sourceMeta struct { + SharedKey string + BackendID string + CachePolicy CachePolicy + Size int64 +} + +type cachedSource struct { + sourceMeta + refs map[*cachedSourceRef]struct{} + id string + dir string + src *remotecontext.CachableSource + storage *fsCacheStore + cached bool // keep track if cache is up to date +} + +type cachedSourceRef struct { + *cachedSource +} + +func (cs *cachedSource) Dir() string { + return cs.dir +} + +// hold storage lock before calling +func (cs *cachedSource) getRef() *cachedSourceRef { + ref := &cachedSourceRef{cachedSource: cs} + cs.refs[ref] = struct{}{} + return ref +} + +// hold storage lock before calling +func (cs *cachedSource) getSize(ctx context.Context) (int64, error) { + if cs.sourceMeta.Size < 0 { + ss, err := directory.Size(ctx, cs.dir) + if err != nil { + return 0, err + } + if err := cs.resetSize(ss); err != nil { + return 0, err + } + return ss, nil + } + return cs.sourceMeta.Size, nil +} + +func (cs *cachedSource) resetSize(val int64) error { + cs.sourceMeta.Size = val + return cs.saveMeta() +} +func (cs *cachedSource) saveMeta() error { + return cs.storage.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte(cs.id)) + dt, err := json.Marshal(cs.sourceMeta) + if err != nil { + return err + } + return b.Put([]byte(metaKey), dt) + }) +} + +func (csr *cachedSourceRef) Release() error { + csr.cachedSource.storage.mu.Lock() + defer csr.cachedSource.storage.mu.Unlock() + delete(csr.cachedSource.refs, csr) + if len(csr.cachedSource.refs) == 0 { + go csr.cachedSource.storage.GC() + } + return nil +} + +type detectChanges struct { + f fsutil.ChangeFunc + supported bool +} + +func (dc *detectChanges) HandleChange(kind fsutil.ChangeKind, path string, fi os.FileInfo, err error) error { + if dc == nil { + return nil + } + return dc.f(kind, path, fi, err) +} + +func (dc *detectChanges) MarkSupported(v bool) { + if dc == nil { + return + } + dc.supported = v +} + +func (dc *detectChanges) ContentHasher() fsutil.ContentHasher { + return newTarsumHash +} + +type wrappedContext struct { + builder.Source + closer func() error +} + +func (wc *wrappedContext) Close() error { + if err := wc.Source.Close(); err != nil { + return err + } + return wc.closer() +} + +type sortableCacheSources []*cachedSource + +// Len is the number of elements in the collection. +func (s sortableCacheSources) Len() int { + return len(s) +} + +// Less reports whether the element with +// index i should sort before the element with index j. +func (s sortableCacheSources) Less(i, j int) bool { + return s[i].CachePolicy.LastUsed.Before(s[j].CachePolicy.LastUsed) +} + +// Swap swaps the elements with indexes i and j. +func (s sortableCacheSources) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func newTarsumHash(stat *fsutil.Stat) (hash.Hash, error) { + fi := &fsutil.StatInfo{stat} + p := stat.Path + if fi.IsDir() { + p += string(os.PathSeparator) + } + h, err := archive.FileInfoHeader(p, fi, stat.Linkname) + if err != nil { + return nil, err + } + h.Name = p + h.Uid = int(stat.Uid) + h.Gid = int(stat.Gid) + h.Linkname = stat.Linkname + if stat.Xattrs != nil { + h.Xattrs = make(map[string]string) + for k, v := range stat.Xattrs { + h.Xattrs[k] = string(v) + } + } + + tsh := &tarsumHash{h: h, Hash: sha256.New()} + tsh.Reset() + return tsh, nil +} + +// Reset resets the Hash to its initial state. +func (tsh *tarsumHash) Reset() { + tsh.Hash.Reset() + tarsum.WriteV1Header(tsh.h, tsh.Hash) +} + +type tarsumHash struct { + hash.Hash + h *tar.Header +} diff --git a/vendor/github.com/docker/docker/builder/fscache/naivedriver.go b/vendor/github.com/docker/docker/builder/fscache/naivedriver.go new file mode 100644 index 0000000000..053509aecf --- /dev/null +++ b/vendor/github.com/docker/docker/builder/fscache/naivedriver.go @@ -0,0 +1,28 @@ +package fscache // import "github.com/docker/docker/builder/fscache" + +import ( + "os" + "path/filepath" + + "github.com/pkg/errors" +) + +// NewNaiveCacheBackend is a basic backend implementation for fscache +func NewNaiveCacheBackend(root string) Backend { + return &naiveCacheBackend{root: root} +} + +type naiveCacheBackend struct { + root string +} + +func (tcb *naiveCacheBackend) Get(id string) (string, error) { + d := filepath.Join(tcb.root, id) + if err := os.MkdirAll(d, 0700); err != nil { + return "", errors.Wrapf(err, "failed to create tmp dir for %s", d) + } + return d, nil +} +func (tcb *naiveCacheBackend) Remove(id string) error { + return errors.WithStack(os.RemoveAll(filepath.Join(tcb.root, id))) +} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/archive.go b/vendor/github.com/docker/docker/builder/remotecontext/archive.go new file mode 100644 index 0000000000..6d247f945d --- /dev/null +++ b/vendor/github.com/docker/docker/builder/remotecontext/archive.go @@ -0,0 +1,125 @@ +package remotecontext // import "github.com/docker/docker/builder/remotecontext" + +import ( + "io" + "os" + "path/filepath" + + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/containerfs" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/tarsum" + "github.com/pkg/errors" +) + +type archiveContext struct { + root containerfs.ContainerFS + sums tarsum.FileInfoSums +} + +func (c *archiveContext) Close() error { + return c.root.RemoveAll(c.root.Path()) +} + +func convertPathError(err error, cleanpath string) error { + if err, ok := err.(*os.PathError); ok { + err.Path = cleanpath + return err + } + return err +} + +type modifiableContext interface { + builder.Source + // Remove deletes the entry specified by `path`. + // It is usual for directory entries to delete all its subentries. + Remove(path string) error +} + +// FromArchive returns a build source from a tar stream. +// +// It extracts the tar stream to a temporary folder that is deleted as soon as +// the Context is closed. +// As the extraction happens, a tarsum is calculated for every file, and the set of +// all those sums then becomes the source of truth for all operations on this Context. +// +// Closing tarStream has to be done by the caller. +func FromArchive(tarStream io.Reader) (builder.Source, error) { + root, err := ioutils.TempDir("", "docker-builder") + if err != nil { + return nil, err + } + + // Assume local file system. Since it's coming from a tar file. + tsc := &archiveContext{root: containerfs.NewLocalContainerFS(root)} + + // Make sure we clean-up upon error. In the happy case the caller + // is expected to manage the clean-up + defer func() { + if err != nil { + tsc.Close() + } + }() + + decompressedStream, err := archive.DecompressStream(tarStream) + if err != nil { + return nil, err + } + + sum, err := tarsum.NewTarSum(decompressedStream, true, tarsum.Version1) + if err != nil { + return nil, err + } + + err = chrootarchive.Untar(sum, root, nil) + if err != nil { + return nil, err + } + + tsc.sums = sum.GetSums() + return tsc, nil +} + +func (c *archiveContext) Root() containerfs.ContainerFS { + return c.root +} + +func (c *archiveContext) Remove(path string) error { + _, fullpath, err := normalize(path, c.root) + if err != nil { + return err + } + return c.root.RemoveAll(fullpath) +} + +func (c *archiveContext) Hash(path string) (string, error) { + cleanpath, fullpath, err := normalize(path, c.root) + if err != nil { + return "", err + } + + rel, err := c.root.Rel(c.root.Path(), fullpath) + if err != nil { + return "", convertPathError(err, cleanpath) + } + + // Use the checksum of the followed path(not the possible symlink) because + // this is the file that is actually copied. + if tsInfo := c.sums.GetFile(filepath.ToSlash(rel)); tsInfo != nil { + return tsInfo.Sum(), nil + } + // We set sum to path by default for the case where GetFile returns nil. + // The usual case is if relative path is empty. + return path, nil // backwards compat TODO: see if really needed +} + +func normalize(path string, root containerfs.ContainerFS) (cleanPath, fullPath string, err error) { + cleanPath = root.Clean(string(root.Separator()) + path)[1:] + fullPath, err = root.ResolveScopedPath(path, true) + if err != nil { + return "", "", errors.Wrapf(err, "forbidden path outside the build context: %s (%s)", path, cleanPath) + } + return +} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/detect.go b/vendor/github.com/docker/docker/builder/remotecontext/detect.go new file mode 100644 index 0000000000..af4e20f886 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/remotecontext/detect.go @@ -0,0 +1,180 @@ +package remotecontext // import "github.com/docker/docker/builder/remotecontext" + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" + + "github.com/containerd/continuity/driver" + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/dockerfile/parser" + "github.com/docker/docker/builder/dockerignore" + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/urlutil" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// ClientSessionRemote is identifier for client-session context transport +const ClientSessionRemote = "client-session" + +// Detect returns a context and dockerfile from remote location or local +// archive. progressReader is only used if remoteURL is actually a URL +// (not empty, and not a Git endpoint). +func Detect(config backend.BuildConfig) (remote builder.Source, dockerfile *parser.Result, err error) { + remoteURL := config.Options.RemoteContext + dockerfilePath := config.Options.Dockerfile + + switch { + case remoteURL == "": + remote, dockerfile, err = newArchiveRemote(config.Source, dockerfilePath) + case remoteURL == ClientSessionRemote: + res, err := parser.Parse(config.Source) + if err != nil { + return nil, nil, err + } + return nil, res, nil + case urlutil.IsGitURL(remoteURL): + remote, dockerfile, err = newGitRemote(remoteURL, dockerfilePath) + case urlutil.IsURL(remoteURL): + remote, dockerfile, err = newURLRemote(remoteURL, dockerfilePath, config.ProgressWriter.ProgressReaderFunc) + default: + err = fmt.Errorf("remoteURL (%s) could not be recognized as URL", remoteURL) + } + return +} + +func newArchiveRemote(rc io.ReadCloser, dockerfilePath string) (builder.Source, *parser.Result, error) { + defer rc.Close() + c, err := FromArchive(rc) + if err != nil { + return nil, nil, err + } + + return withDockerfileFromContext(c.(modifiableContext), dockerfilePath) +} + +func withDockerfileFromContext(c modifiableContext, dockerfilePath string) (builder.Source, *parser.Result, error) { + df, err := openAt(c, dockerfilePath) + if err != nil { + if os.IsNotExist(err) { + if dockerfilePath == builder.DefaultDockerfileName { + lowercase := strings.ToLower(dockerfilePath) + if _, err := StatAt(c, lowercase); err == nil { + return withDockerfileFromContext(c, lowercase) + } + } + return nil, nil, errors.Errorf("Cannot locate specified Dockerfile: %s", dockerfilePath) // backwards compatible error + } + c.Close() + return nil, nil, err + } + + res, err := readAndParseDockerfile(dockerfilePath, df) + if err != nil { + return nil, nil, err + } + + df.Close() + + if err := removeDockerfile(c, dockerfilePath); err != nil { + c.Close() + return nil, nil, err + } + + return c, res, nil +} + +func newGitRemote(gitURL string, dockerfilePath string) (builder.Source, *parser.Result, error) { + c, err := MakeGitContext(gitURL) // TODO: change this to NewLazySource + if err != nil { + return nil, nil, err + } + return withDockerfileFromContext(c.(modifiableContext), dockerfilePath) +} + +func newURLRemote(url string, dockerfilePath string, progressReader func(in io.ReadCloser) io.ReadCloser) (builder.Source, *parser.Result, error) { + contentType, content, err := downloadRemote(url) + if err != nil { + return nil, nil, err + } + defer content.Close() + + switch contentType { + case mimeTypes.TextPlain: + res, err := parser.Parse(progressReader(content)) + return nil, res, err + default: + source, err := FromArchive(progressReader(content)) + if err != nil { + return nil, nil, err + } + return withDockerfileFromContext(source.(modifiableContext), dockerfilePath) + } +} + +func removeDockerfile(c modifiableContext, filesToRemove ...string) error { + f, err := openAt(c, ".dockerignore") + // Note that a missing .dockerignore file isn't treated as an error + switch { + case os.IsNotExist(err): + return nil + case err != nil: + return err + } + excludes, err := dockerignore.ReadAll(f) + if err != nil { + f.Close() + return err + } + f.Close() + filesToRemove = append([]string{".dockerignore"}, filesToRemove...) + for _, fileToRemove := range filesToRemove { + if rm, _ := fileutils.Matches(fileToRemove, excludes); rm { + if err := c.Remove(fileToRemove); err != nil { + logrus.Errorf("failed to remove %s: %v", fileToRemove, err) + } + } + } + return nil +} + +func readAndParseDockerfile(name string, rc io.Reader) (*parser.Result, error) { + br := bufio.NewReader(rc) + if _, err := br.Peek(1); err != nil { + if err == io.EOF { + return nil, errors.Errorf("the Dockerfile (%s) cannot be empty", name) + } + return nil, errors.Wrap(err, "unexpected error reading Dockerfile") + } + return parser.Parse(br) +} + +func openAt(remote builder.Source, path string) (driver.File, error) { + fullPath, err := FullPath(remote, path) + if err != nil { + return nil, err + } + return remote.Root().Open(fullPath) +} + +// StatAt is a helper for calling Stat on a path from a source +func StatAt(remote builder.Source, path string) (os.FileInfo, error) { + fullPath, err := FullPath(remote, path) + if err != nil { + return nil, err + } + return remote.Root().Stat(fullPath) +} + +// FullPath is a helper for getting a full path for a path from a source +func FullPath(remote builder.Source, path string) (string, error) { + fullPath, err := remote.Root().ResolveScopedPath(path, true) + if err != nil { + return "", fmt.Errorf("Forbidden path outside the build context: %s (%s)", path, fullPath) // backwards compat with old error + } + return fullPath, nil +} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/filehash.go b/vendor/github.com/docker/docker/builder/remotecontext/filehash.go new file mode 100644 index 0000000000..3565dd8279 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/remotecontext/filehash.go @@ -0,0 +1,45 @@ +package remotecontext // import "github.com/docker/docker/builder/remotecontext" + +import ( + "archive/tar" + "crypto/sha256" + "hash" + "os" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/tarsum" +) + +// NewFileHash returns new hash that is used for the builder cache keys +func NewFileHash(path, name string, fi os.FileInfo) (hash.Hash, error) { + var link string + if fi.Mode()&os.ModeSymlink != 0 { + var err error + link, err = os.Readlink(path) + if err != nil { + return nil, err + } + } + hdr, err := archive.FileInfoHeader(name, fi, link) + if err != nil { + return nil, err + } + if err := archive.ReadSecurityXattrToTarHeader(path, hdr); err != nil { + return nil, err + } + tsh := &tarsumHash{hdr: hdr, Hash: sha256.New()} + tsh.Reset() // initialize header + return tsh, nil +} + +type tarsumHash struct { + hash.Hash + hdr *tar.Header +} + +// Reset resets the Hash to its initial state. +func (tsh *tarsumHash) Reset() { + // comply with hash.Hash and reset to the state hash had before any writes + tsh.Hash.Reset() + tarsum.WriteV1Header(tsh.hdr, tsh.Hash) +} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/generate.go b/vendor/github.com/docker/docker/builder/remotecontext/generate.go new file mode 100644 index 0000000000..84c1b3b5ea --- /dev/null +++ b/vendor/github.com/docker/docker/builder/remotecontext/generate.go @@ -0,0 +1,3 @@ +package remotecontext // import "github.com/docker/docker/builder/remotecontext" + +//go:generate protoc --gogoslick_out=. tarsum.proto diff --git a/vendor/github.com/docker/docker/builder/remotecontext/git.go b/vendor/github.com/docker/docker/builder/remotecontext/git.go new file mode 100644 index 0000000000..1583ca28d0 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/remotecontext/git.go @@ -0,0 +1,35 @@ +package remotecontext // import "github.com/docker/docker/builder/remotecontext" + +import ( + "os" + + "github.com/docker/docker/builder" + "github.com/docker/docker/builder/remotecontext/git" + "github.com/docker/docker/pkg/archive" + "github.com/sirupsen/logrus" +) + +// MakeGitContext returns a Context from gitURL that is cloned in a temporary directory. +func MakeGitContext(gitURL string) (builder.Source, error) { + root, err := git.Clone(gitURL) + if err != nil { + return nil, err + } + + c, err := archive.Tar(root, archive.Uncompressed) + if err != nil { + return nil, err + } + + defer func() { + err := c.Close() + if err != nil { + logrus.WithField("action", "MakeGitContext").WithField("module", "builder").WithField("url", gitURL).WithError(err).Error("error while closing git context") + } + err = os.RemoveAll(root) + if err != nil { + logrus.WithField("action", "MakeGitContext").WithField("module", "builder").WithField("url", gitURL).WithError(err).Error("error while removing path and children of root") + } + }() + return FromArchive(c) +} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/git/gitutils.go b/vendor/github.com/docker/docker/builder/remotecontext/git/gitutils.go new file mode 100644 index 0000000000..77a45beff3 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/remotecontext/git/gitutils.go @@ -0,0 +1,204 @@ +package git // import "github.com/docker/docker/builder/remotecontext/git" + +import ( + "io/ioutil" + "net/http" + "net/url" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/urlutil" + "github.com/pkg/errors" +) + +type gitRepo struct { + remote string + ref string + subdir string +} + +// Clone clones a repository into a newly created directory which +// will be under "docker-build-git" +func Clone(remoteURL string) (string, error) { + repo, err := parseRemoteURL(remoteURL) + + if err != nil { + return "", err + } + + return cloneGitRepo(repo) +} + +func cloneGitRepo(repo gitRepo) (checkoutDir string, err error) { + fetch := fetchArgs(repo.remote, repo.ref) + + root, err := ioutil.TempDir("", "docker-build-git") + if err != nil { + return "", err + } + + defer func() { + if err != nil { + os.RemoveAll(root) + } + }() + + if out, err := gitWithinDir(root, "init"); err != nil { + return "", errors.Wrapf(err, "failed to init repo at %s: %s", root, out) + } + + // Add origin remote for compatibility with previous implementation that + // used "git clone" and also to make sure local refs are created for branches + if out, err := gitWithinDir(root, "remote", "add", "origin", repo.remote); err != nil { + return "", errors.Wrapf(err, "failed add origin repo at %s: %s", repo.remote, out) + } + + if output, err := gitWithinDir(root, fetch...); err != nil { + return "", errors.Wrapf(err, "error fetching: %s", output) + } + + checkoutDir, err = checkoutGit(root, repo.ref, repo.subdir) + if err != nil { + return "", err + } + + cmd := exec.Command("git", "submodule", "update", "--init", "--recursive", "--depth=1") + cmd.Dir = root + output, err := cmd.CombinedOutput() + if err != nil { + return "", errors.Wrapf(err, "error initializing submodules: %s", output) + } + + return checkoutDir, nil +} + +func parseRemoteURL(remoteURL string) (gitRepo, error) { + repo := gitRepo{} + + if !isGitTransport(remoteURL) { + remoteURL = "https://" + remoteURL + } + + var fragment string + if strings.HasPrefix(remoteURL, "git@") { + // git@.. is not an URL, so cannot be parsed as URL + parts := strings.SplitN(remoteURL, "#", 2) + + repo.remote = parts[0] + if len(parts) == 2 { + fragment = parts[1] + } + repo.ref, repo.subdir = getRefAndSubdir(fragment) + } else { + u, err := url.Parse(remoteURL) + if err != nil { + return repo, err + } + + repo.ref, repo.subdir = getRefAndSubdir(u.Fragment) + u.Fragment = "" + repo.remote = u.String() + } + return repo, nil +} + +func getRefAndSubdir(fragment string) (ref string, subdir string) { + refAndDir := strings.SplitN(fragment, ":", 2) + ref = "master" + if len(refAndDir[0]) != 0 { + ref = refAndDir[0] + } + if len(refAndDir) > 1 && len(refAndDir[1]) != 0 { + subdir = refAndDir[1] + } + return +} + +func fetchArgs(remoteURL string, ref string) []string { + args := []string{"fetch"} + + if supportsShallowClone(remoteURL) { + args = append(args, "--depth", "1") + } + + return append(args, "origin", ref) +} + +// Check if a given git URL supports a shallow git clone, +// i.e. it is a non-HTTP server or a smart HTTP server. +func supportsShallowClone(remoteURL string) bool { + if urlutil.IsURL(remoteURL) { + // Check if the HTTP server is smart + + // Smart servers must correctly respond to a query for the git-upload-pack service + serviceURL := remoteURL + "/info/refs?service=git-upload-pack" + + // Try a HEAD request and fallback to a Get request on error + res, err := http.Head(serviceURL) + if err != nil || res.StatusCode != http.StatusOK { + res, err = http.Get(serviceURL) + if err == nil { + res.Body.Close() + } + if err != nil || res.StatusCode != http.StatusOK { + // request failed + return false + } + } + + if res.Header.Get("Content-Type") != "application/x-git-upload-pack-advertisement" { + // Fallback, not a smart server + return false + } + return true + } + // Non-HTTP protocols always support shallow clones + return true +} + +func checkoutGit(root, ref, subdir string) (string, error) { + // Try checking out by ref name first. This will work on branches and sets + // .git/HEAD to the current branch name + if output, err := gitWithinDir(root, "checkout", ref); err != nil { + // If checking out by branch name fails check out the last fetched ref + if _, err2 := gitWithinDir(root, "checkout", "FETCH_HEAD"); err2 != nil { + return "", errors.Wrapf(err, "error checking out %s: %s", ref, output) + } + } + + if subdir != "" { + newCtx, err := symlink.FollowSymlinkInScope(filepath.Join(root, subdir), root) + if err != nil { + return "", errors.Wrapf(err, "error setting git context, %q not within git root", subdir) + } + + fi, err := os.Stat(newCtx) + if err != nil { + return "", err + } + if !fi.IsDir() { + return "", errors.Errorf("error setting git context, not a directory: %s", newCtx) + } + root = newCtx + } + + return root, nil +} + +func gitWithinDir(dir string, args ...string) ([]byte, error) { + a := []string{"--work-tree", dir, "--git-dir", filepath.Join(dir, ".git")} + return git(append(a, args...)...) +} + +func git(args ...string) ([]byte, error) { + return exec.Command("git", args...).CombinedOutput() +} + +// isGitTransport returns true if the provided str is a git transport by inspecting +// the prefix of the string for known protocols used in git. +func isGitTransport(str string) bool { + return urlutil.IsURL(str) || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@") +} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/lazycontext.go b/vendor/github.com/docker/docker/builder/remotecontext/lazycontext.go new file mode 100644 index 0000000000..442cecad85 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/remotecontext/lazycontext.go @@ -0,0 +1,102 @@ +package remotecontext // import "github.com/docker/docker/builder/remotecontext" + +import ( + "encoding/hex" + "os" + "strings" + + "github.com/docker/docker/builder" + "github.com/docker/docker/pkg/containerfs" + "github.com/docker/docker/pkg/pools" + "github.com/pkg/errors" +) + +// NewLazySource creates a new LazyContext. LazyContext defines a hashed build +// context based on a root directory. Individual files are hashed first time +// they are asked. It is not safe to call methods of LazyContext concurrently. +func NewLazySource(root containerfs.ContainerFS) (builder.Source, error) { + return &lazySource{ + root: root, + sums: make(map[string]string), + }, nil +} + +type lazySource struct { + root containerfs.ContainerFS + sums map[string]string +} + +func (c *lazySource) Root() containerfs.ContainerFS { + return c.root +} + +func (c *lazySource) Close() error { + return nil +} + +func (c *lazySource) Hash(path string) (string, error) { + cleanPath, fullPath, err := normalize(path, c.root) + if err != nil { + return "", err + } + + relPath, err := Rel(c.root, fullPath) + if err != nil { + return "", errors.WithStack(convertPathError(err, cleanPath)) + } + + fi, err := os.Lstat(fullPath) + if err != nil { + // Backwards compatibility: a missing file returns a path as hash. + // This is reached in the case of a broken symlink. + return relPath, nil + } + + sum, ok := c.sums[relPath] + if !ok { + sum, err = c.prepareHash(relPath, fi) + if err != nil { + return "", err + } + } + + return sum, nil +} + +func (c *lazySource) prepareHash(relPath string, fi os.FileInfo) (string, error) { + p := c.root.Join(c.root.Path(), relPath) + h, err := NewFileHash(p, relPath, fi) + if err != nil { + return "", errors.Wrapf(err, "failed to create hash for %s", relPath) + } + if fi.Mode().IsRegular() && fi.Size() > 0 { + f, err := c.root.Open(p) + if err != nil { + return "", errors.Wrapf(err, "failed to open %s", relPath) + } + defer f.Close() + if _, err := pools.Copy(h, f); err != nil { + return "", errors.Wrapf(err, "failed to copy file data for %s", relPath) + } + } + sum := hex.EncodeToString(h.Sum(nil)) + c.sums[relPath] = sum + return sum, nil +} + +// Rel makes a path relative to base path. Same as `filepath.Rel` but can also +// handle UUID paths in windows. +func Rel(basepath containerfs.ContainerFS, targpath string) (string, error) { + // filepath.Rel can't handle UUID paths in windows + if basepath.OS() == "windows" { + pfx := basepath.Path() + `\` + if strings.HasPrefix(targpath, pfx) { + p := strings.TrimPrefix(targpath, pfx) + if p == "" { + p = "." + } + return p, nil + } + } + return basepath.Rel(basepath.Path(), targpath) +} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/mimetype.go b/vendor/github.com/docker/docker/builder/remotecontext/mimetype.go new file mode 100644 index 0000000000..e8a6210e9c --- /dev/null +++ b/vendor/github.com/docker/docker/builder/remotecontext/mimetype.go @@ -0,0 +1,27 @@ +package remotecontext // import "github.com/docker/docker/builder/remotecontext" + +import ( + "mime" + "net/http" +) + +// mimeTypes stores the MIME content type. +var mimeTypes = struct { + TextPlain string + OctetStream string +}{"text/plain", "application/octet-stream"} + +// detectContentType returns a best guess representation of the MIME +// content type for the bytes at c. The value detected by +// http.DetectContentType is guaranteed not be nil, defaulting to +// application/octet-stream when a better guess cannot be made. The +// result of this detection is then run through mime.ParseMediaType() +// which separates the actual MIME string from any parameters. +func detectContentType(c []byte) (string, map[string]string, error) { + ct := http.DetectContentType(c) + contentType, args, err := mime.ParseMediaType(ct) + if err != nil { + return "", nil, err + } + return contentType, args, nil +} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/remote.go b/vendor/github.com/docker/docker/builder/remotecontext/remote.go new file mode 100644 index 0000000000..1fb80549b8 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/remotecontext/remote.go @@ -0,0 +1,127 @@ +package remotecontext // import "github.com/docker/docker/builder/remotecontext" + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "regexp" + + "github.com/docker/docker/errdefs" + "github.com/docker/docker/pkg/ioutils" + "github.com/pkg/errors" +) + +// When downloading remote contexts, limit the amount (in bytes) +// to be read from the response body in order to detect its Content-Type +const maxPreambleLength = 100 + +const acceptableRemoteMIME = `(?:application/(?:(?:x\-)?tar|octet\-stream|((?:x\-)?(?:gzip|bzip2?|xz)))|(?:text/plain))` + +var mimeRe = regexp.MustCompile(acceptableRemoteMIME) + +// downloadRemote context from a url and returns it, along with the parsed content type +func downloadRemote(remoteURL string) (string, io.ReadCloser, error) { + response, err := GetWithStatusError(remoteURL) + if err != nil { + return "", nil, errors.Wrapf(err, "error downloading remote context %s", remoteURL) + } + + contentType, contextReader, err := inspectResponse( + response.Header.Get("Content-Type"), + response.Body, + response.ContentLength) + if err != nil { + response.Body.Close() + return "", nil, errors.Wrapf(err, "error detecting content type for remote %s", remoteURL) + } + + return contentType, ioutils.NewReadCloserWrapper(contextReader, response.Body.Close), nil +} + +// GetWithStatusError does an http.Get() and returns an error if the +// status code is 4xx or 5xx. +func GetWithStatusError(address string) (resp *http.Response, err error) { + if resp, err = http.Get(address); err != nil { + if uerr, ok := err.(*url.Error); ok { + if derr, ok := uerr.Err.(*net.DNSError); ok && !derr.IsTimeout { + return nil, errdefs.NotFound(err) + } + } + return nil, errdefs.System(err) + } + if resp.StatusCode < 400 { + return resp, nil + } + msg := fmt.Sprintf("failed to GET %s with status %s", address, resp.Status) + body, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + if err != nil { + return nil, errdefs.System(errors.New(msg + ": error reading body")) + } + + msg += ": " + string(bytes.TrimSpace(body)) + switch resp.StatusCode { + case http.StatusNotFound: + return nil, errdefs.NotFound(errors.New(msg)) + case http.StatusBadRequest: + return nil, errdefs.InvalidParameter(errors.New(msg)) + case http.StatusUnauthorized: + return nil, errdefs.Unauthorized(errors.New(msg)) + case http.StatusForbidden: + return nil, errdefs.Forbidden(errors.New(msg)) + } + return nil, errdefs.Unknown(errors.New(msg)) +} + +// inspectResponse looks into the http response data at r to determine whether its +// content-type is on the list of acceptable content types for remote build contexts. +// This function returns: +// - a string representation of the detected content-type +// - an io.Reader for the response body +// - an error value which will be non-nil either when something goes wrong while +// reading bytes from r or when the detected content-type is not acceptable. +func inspectResponse(ct string, r io.Reader, clen int64) (string, io.Reader, error) { + plen := clen + if plen <= 0 || plen > maxPreambleLength { + plen = maxPreambleLength + } + + preamble := make([]byte, plen) + rlen, err := r.Read(preamble) + if rlen == 0 { + return ct, r, errors.New("empty response") + } + if err != nil && err != io.EOF { + return ct, r, err + } + + preambleR := bytes.NewReader(preamble[:rlen]) + bodyReader := io.MultiReader(preambleR, r) + // Some web servers will use application/octet-stream as the default + // content type for files without an extension (e.g. 'Dockerfile') + // so if we receive this value we better check for text content + contentType := ct + if len(ct) == 0 || ct == mimeTypes.OctetStream { + contentType, _, err = detectContentType(preamble) + if err != nil { + return contentType, bodyReader, err + } + } + + contentType = selectAcceptableMIME(contentType) + var cterr error + if len(contentType) == 0 { + cterr = fmt.Errorf("unsupported Content-Type %q", ct) + contentType = ct + } + + return contentType, bodyReader, cterr +} + +func selectAcceptableMIME(ct string) string { + return mimeRe.FindString(ct) +} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/tarsum.go b/vendor/github.com/docker/docker/builder/remotecontext/tarsum.go new file mode 100644 index 0000000000..9e8c7d6072 --- /dev/null +++ b/vendor/github.com/docker/docker/builder/remotecontext/tarsum.go @@ -0,0 +1,157 @@ +package remotecontext // import "github.com/docker/docker/builder/remotecontext" + +import ( + "os" + "sync" + + "github.com/docker/docker/pkg/containerfs" + iradix "github.com/hashicorp/go-immutable-radix" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/tonistiigi/fsutil" +) + +type hashed interface { + Digest() digest.Digest +} + +// CachableSource is a source that contains cache records for its contents +type CachableSource struct { + mu sync.Mutex + root containerfs.ContainerFS + tree *iradix.Tree + txn *iradix.Txn +} + +// NewCachableSource creates new CachableSource +func NewCachableSource(root string) *CachableSource { + ts := &CachableSource{ + tree: iradix.New(), + root: containerfs.NewLocalContainerFS(root), + } + return ts +} + +// MarshalBinary marshals current cache information to a byte array +func (cs *CachableSource) MarshalBinary() ([]byte, error) { + b := TarsumBackup{Hashes: make(map[string]string)} + root := cs.getRoot() + root.Walk(func(k []byte, v interface{}) bool { + b.Hashes[string(k)] = v.(*fileInfo).sum + return false + }) + return b.Marshal() +} + +// UnmarshalBinary decodes cache information for presented byte array +func (cs *CachableSource) UnmarshalBinary(data []byte) error { + var b TarsumBackup + if err := b.Unmarshal(data); err != nil { + return err + } + txn := iradix.New().Txn() + for p, v := range b.Hashes { + txn.Insert([]byte(p), &fileInfo{sum: v}) + } + cs.mu.Lock() + defer cs.mu.Unlock() + cs.tree = txn.Commit() + return nil +} + +// Scan rescans the cache information from the file system +func (cs *CachableSource) Scan() error { + lc, err := NewLazySource(cs.root) + if err != nil { + return err + } + txn := iradix.New().Txn() + err = cs.root.Walk(cs.root.Path(), func(path string, info os.FileInfo, err error) error { + if err != nil { + return errors.Wrapf(err, "failed to walk %s", path) + } + rel, err := Rel(cs.root, path) + if err != nil { + return err + } + h, err := lc.Hash(rel) + if err != nil { + return err + } + txn.Insert([]byte(rel), &fileInfo{sum: h}) + return nil + }) + if err != nil { + return err + } + cs.mu.Lock() + defer cs.mu.Unlock() + cs.tree = txn.Commit() + return nil +} + +// HandleChange notifies the source about a modification operation +func (cs *CachableSource) HandleChange(kind fsutil.ChangeKind, p string, fi os.FileInfo, err error) (retErr error) { + cs.mu.Lock() + if cs.txn == nil { + cs.txn = cs.tree.Txn() + } + if kind == fsutil.ChangeKindDelete { + cs.txn.Delete([]byte(p)) + cs.mu.Unlock() + return + } + + h, ok := fi.(hashed) + if !ok { + cs.mu.Unlock() + return errors.Errorf("invalid fileinfo: %s", p) + } + + hfi := &fileInfo{ + sum: h.Digest().Hex(), + } + cs.txn.Insert([]byte(p), hfi) + cs.mu.Unlock() + return nil +} + +func (cs *CachableSource) getRoot() *iradix.Node { + cs.mu.Lock() + if cs.txn != nil { + cs.tree = cs.txn.Commit() + cs.txn = nil + } + t := cs.tree + cs.mu.Unlock() + return t.Root() +} + +// Close closes the source +func (cs *CachableSource) Close() error { + return nil +} + +// Hash returns a hash for a single file in the source +func (cs *CachableSource) Hash(path string) (string, error) { + n := cs.getRoot() + // TODO: check this for symlinks + v, ok := n.Get([]byte(path)) + if !ok { + return path, nil + } + return v.(*fileInfo).sum, nil +} + +// Root returns a root directory for the source +func (cs *CachableSource) Root() containerfs.ContainerFS { + return cs.root +} + +type fileInfo struct { + sum string +} + +func (fi *fileInfo) Hash() string { + return fi.sum +} diff --git a/vendor/github.com/docker/docker/builder/remotecontext/tarsum.pb.go b/vendor/github.com/docker/docker/builder/remotecontext/tarsum.pb.go new file mode 100644 index 0000000000..1d23bbe65b --- /dev/null +++ b/vendor/github.com/docker/docker/builder/remotecontext/tarsum.pb.go @@ -0,0 +1,525 @@ +// Code generated by protoc-gen-gogo. +// source: tarsum.proto +// DO NOT EDIT! + +/* +Package remotecontext is a generated protocol buffer package. + +It is generated from these files: + tarsum.proto + +It has these top-level messages: + TarsumBackup +*/ +package remotecontext + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type TarsumBackup struct { + Hashes map[string]string `protobuf:"bytes,1,rep,name=Hashes" json:"Hashes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *TarsumBackup) Reset() { *m = TarsumBackup{} } +func (*TarsumBackup) ProtoMessage() {} +func (*TarsumBackup) Descriptor() ([]byte, []int) { return fileDescriptorTarsum, []int{0} } + +func (m *TarsumBackup) GetHashes() map[string]string { + if m != nil { + return m.Hashes + } + return nil +} + +func init() { + proto.RegisterType((*TarsumBackup)(nil), "remotecontext.TarsumBackup") +} +func (this *TarsumBackup) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*TarsumBackup) + if !ok { + that2, ok := that.(TarsumBackup) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if len(this.Hashes) != len(that1.Hashes) { + return false + } + for i := range this.Hashes { + if this.Hashes[i] != that1.Hashes[i] { + return false + } + } + return true +} +func (this *TarsumBackup) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&remotecontext.TarsumBackup{") + keysForHashes := make([]string, 0, len(this.Hashes)) + for k := range this.Hashes { + keysForHashes = append(keysForHashes, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHashes) + mapStringForHashes := "map[string]string{" + for _, k := range keysForHashes { + mapStringForHashes += fmt.Sprintf("%#v: %#v,", k, this.Hashes[k]) + } + mapStringForHashes += "}" + if this.Hashes != nil { + s = append(s, "Hashes: "+mapStringForHashes+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringTarsum(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *TarsumBackup) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TarsumBackup) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Hashes) > 0 { + for k := range m.Hashes { + dAtA[i] = 0xa + i++ + v := m.Hashes[k] + mapSize := 1 + len(k) + sovTarsum(uint64(len(k))) + 1 + len(v) + sovTarsum(uint64(len(v))) + i = encodeVarintTarsum(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintTarsum(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintTarsum(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func encodeFixed64Tarsum(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Tarsum(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintTarsum(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *TarsumBackup) Size() (n int) { + var l int + _ = l + if len(m.Hashes) > 0 { + for k, v := range m.Hashes { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTarsum(uint64(len(k))) + 1 + len(v) + sovTarsum(uint64(len(v))) + n += mapEntrySize + 1 + sovTarsum(uint64(mapEntrySize)) + } + } + return n +} + +func sovTarsum(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozTarsum(x uint64) (n int) { + return sovTarsum(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *TarsumBackup) String() string { + if this == nil { + return "nil" + } + keysForHashes := make([]string, 0, len(this.Hashes)) + for k := range this.Hashes { + keysForHashes = append(keysForHashes, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHashes) + mapStringForHashes := "map[string]string{" + for _, k := range keysForHashes { + mapStringForHashes += fmt.Sprintf("%v: %v,", k, this.Hashes[k]) + } + mapStringForHashes += "}" + s := strings.Join([]string{`&TarsumBackup{`, + `Hashes:` + mapStringForHashes + `,`, + `}`, + }, "") + return s +} +func valueToStringTarsum(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *TarsumBackup) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTarsum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TarsumBackup: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TarsumBackup: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hashes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTarsum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTarsum + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTarsum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTarsum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTarsum + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Hashes == nil { + m.Hashes = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTarsum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTarsum + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTarsum + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Hashes[mapkey] = mapvalue + } else { + var mapvalue string + m.Hashes[mapkey] = mapvalue + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTarsum(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTarsum + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTarsum(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTarsum + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTarsum + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTarsum + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthTarsum + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTarsum + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipTarsum(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthTarsum = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTarsum = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("tarsum.proto", fileDescriptorTarsum) } + +var fileDescriptorTarsum = []byte{ + // 196 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x49, 0x2c, 0x2a, + 0x2e, 0xcd, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x2d, 0x4a, 0xcd, 0xcd, 0x2f, 0x49, + 0x4d, 0xce, 0xcf, 0x2b, 0x49, 0xad, 0x28, 0x51, 0xea, 0x62, 0xe4, 0xe2, 0x09, 0x01, 0xcb, 0x3b, + 0x25, 0x26, 0x67, 0x97, 0x16, 0x08, 0xd9, 0x73, 0xb1, 0x79, 0x24, 0x16, 0x67, 0xa4, 0x16, 0x4b, + 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x1b, 0xa9, 0xeb, 0xa1, 0x68, 0xd0, 0x43, 0x56, 0xac, 0x07, 0x51, + 0xe9, 0x9a, 0x57, 0x52, 0x54, 0x19, 0x04, 0xd5, 0x26, 0x65, 0xc9, 0xc5, 0x8d, 0x24, 0x2c, 0x24, + 0xc0, 0xc5, 0x9c, 0x9d, 0x5a, 0x29, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x19, 0x04, 0x62, 0x0a, 0x89, + 0x70, 0xb1, 0x96, 0x25, 0xe6, 0x94, 0xa6, 0x4a, 0x30, 0x81, 0xc5, 0x20, 0x1c, 0x2b, 0x26, 0x0b, + 0x46, 0x27, 0x9d, 0x0b, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, 0x94, 0x63, 0xf8, 0xf0, 0x50, 0x8e, 0xb1, + 0xe1, 0x91, 0x1c, 0xe3, 0x8a, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, + 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x8b, 0x47, 0x72, 0x0c, 0x1f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, + 0xc7, 0x90, 0xc4, 0x06, 0xf6, 0x90, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x89, 0x57, 0x7d, 0x3f, + 0xe0, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/docker/docker/client/build_prune.go b/vendor/github.com/docker/docker/client/build_prune.go deleted file mode 100644 index 3ad07ccb85..0000000000 --- a/vendor/github.com/docker/docker/client/build_prune.go +++ /dev/null @@ -1,30 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - "fmt" - - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -// BuildCachePrune requests the daemon to delete unused cache data -func (cli *Client) BuildCachePrune(ctx context.Context) (*types.BuildCachePruneReport, error) { - if err := cli.NewVersionError("1.31", "build prune"); err != nil { - return nil, err - } - - report := types.BuildCachePruneReport{} - - serverResp, err := cli.post(ctx, "/build/prune", nil, nil, nil) - if err != nil { - return nil, err - } - defer ensureReaderClosed(serverResp) - - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return nil, fmt.Errorf("Error retrieving disk usage: %v", err) - } - - return &report, nil -} diff --git a/vendor/github.com/docker/docker/client/checkpoint_create.go b/vendor/github.com/docker/docker/client/checkpoint_create.go deleted file mode 100644 index 6441ed2501..0000000000 --- a/vendor/github.com/docker/docker/client/checkpoint_create.go +++ /dev/null @@ -1,13 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -// CheckpointCreate creates a checkpoint from the given container with the given name -func (cli *Client) CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error { - resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/checkpoint_delete.go b/vendor/github.com/docker/docker/client/checkpoint_delete.go deleted file mode 100644 index 5b97c35c6b..0000000000 --- a/vendor/github.com/docker/docker/client/checkpoint_delete.go +++ /dev/null @@ -1,20 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "net/url" - - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -// CheckpointDelete deletes the checkpoint with the given name from the given container -func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options types.CheckpointDeleteOptions) error { - query := url.Values{} - if options.CheckpointDir != "" { - query.Set("dir", options.CheckpointDir) - } - - resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+options.CheckpointID, query, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/checkpoint_list.go b/vendor/github.com/docker/docker/client/checkpoint_list.go deleted file mode 100644 index 8ba1ddff7f..0000000000 --- a/vendor/github.com/docker/docker/client/checkpoint_list.go +++ /dev/null @@ -1,28 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -// CheckpointList returns the checkpoints of the given container in the docker host -func (cli *Client) CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) { - var checkpoints []types.Checkpoint - - query := url.Values{} - if options.CheckpointDir != "" { - query.Set("dir", options.CheckpointDir) - } - - resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil) - if err != nil { - return checkpoints, wrapResponseError(err, resp, "container", container) - } - - err = json.NewDecoder(resp.body).Decode(&checkpoints) - ensureReaderClosed(resp) - return checkpoints, err -} diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go deleted file mode 100644 index 6ce0cdba1f..0000000000 --- a/vendor/github.com/docker/docker/client/client.go +++ /dev/null @@ -1,383 +0,0 @@ -/* -Package client is a Go client for the Docker Engine API. - -For more information about the Engine API, see the documentation: -https://docs.docker.com/engine/reference/api/ - -Usage - -You use the library by creating a client object and calling methods on it. The -client can be created either from environment variables with NewEnvClient, or -configured manually with NewClient. - -For example, to list running containers (the equivalent of "docker ps"): - - package main - - import ( - "context" - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/client" - ) - - func main() { - cli, err := client.NewEnvClient() - if err != nil { - panic(err) - } - - containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) - if err != nil { - panic(err) - } - - for _, container := range containers { - fmt.Printf("%s %s\n", container.ID[:10], container.Image) - } - } - -*/ -package client // import "github.com/docker/docker/client" - -import ( - "errors" - "fmt" - "net/http" - "net/url" - "os" - "path" - "path/filepath" - "strings" - - "github.com/docker/docker/api" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/versions" - "github.com/docker/go-connections/sockets" - "github.com/docker/go-connections/tlsconfig" - "golang.org/x/net/context" -) - -// ErrRedirect is the error returned by checkRedirect when the request is non-GET. -var ErrRedirect = errors.New("unexpected redirect in response") - -// Client is the API client that performs all operations -// against a docker server. -type Client struct { - // scheme sets the scheme for the client - scheme string - // host holds the server address to connect to - host string - // proto holds the client protocol i.e. unix. - proto string - // addr holds the client address. - addr string - // basePath holds the path to prepend to the requests. - basePath string - // client used to send and receive http requests. - client *http.Client - // version of the server to talk to. - version string - // custom http headers configured by users. - customHTTPHeaders map[string]string - // manualOverride is set to true when the version was set by users. - manualOverride bool -} - -// CheckRedirect specifies the policy for dealing with redirect responses: -// If the request is non-GET return `ErrRedirect`. Otherwise use the last response. -// -// Go 1.8 changes behavior for HTTP redirects (specifically 301, 307, and 308) in the client . -// The Docker client (and by extension docker API client) can be made to to send a request -// like POST /containers//start where what would normally be in the name section of the URL is empty. -// This triggers an HTTP 301 from the daemon. -// In go 1.8 this 301 will be converted to a GET request, and ends up getting a 404 from the daemon. -// This behavior change manifests in the client in that before the 301 was not followed and -// the client did not generate an error, but now results in a message like Error response from daemon: page not found. -func CheckRedirect(req *http.Request, via []*http.Request) error { - if via[0].Method == http.MethodGet { - return http.ErrUseLastResponse - } - return ErrRedirect -} - -// NewEnvClient initializes a new API client based on environment variables. -// Use DOCKER_HOST to set the url to the docker server. -// Use DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest. -// Use DOCKER_CERT_PATH to load the TLS certificates from. -// Use DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default. -// deprecated: use NewClientWithOpts(FromEnv) -func NewEnvClient() (*Client, error) { - return NewClientWithOpts(FromEnv) -} - -// FromEnv enhance the default client with values from environment variables -func FromEnv(c *Client) error { - var httpClient *http.Client - if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" { - options := tlsconfig.Options{ - CAFile: filepath.Join(dockerCertPath, "ca.pem"), - CertFile: filepath.Join(dockerCertPath, "cert.pem"), - KeyFile: filepath.Join(dockerCertPath, "key.pem"), - InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "", - } - tlsc, err := tlsconfig.Client(options) - if err != nil { - return err - } - - httpClient = &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: tlsc, - }, - CheckRedirect: CheckRedirect, - } - WithHTTPClient(httpClient)(c) - } - - host := os.Getenv("DOCKER_HOST") - if host != "" { - // WithHost will create an API client if it doesn't exist - if err := WithHost(host)(c); err != nil { - return err - } - } - version := os.Getenv("DOCKER_API_VERSION") - if version != "" { - c.version = version - c.manualOverride = true - } - return nil -} - -// WithVersion overrides the client version with the specified one -func WithVersion(version string) func(*Client) error { - return func(c *Client) error { - c.version = version - return nil - } -} - -// WithHost overrides the client host with the specified one, creating a new -// http client if one doesn't exist -func WithHost(host string) func(*Client) error { - return func(c *Client) error { - hostURL, err := ParseHostURL(host) - if err != nil { - return err - } - c.host = host - c.proto = hostURL.Scheme - c.addr = hostURL.Host - c.basePath = hostURL.Path - if c.client == nil { - client, err := defaultHTTPClient(host) - if err != nil { - return err - } - return WithHTTPClient(client)(c) - } - if transport, ok := c.client.Transport.(*http.Transport); ok { - return sockets.ConfigureTransport(transport, c.proto, c.addr) - } - return fmt.Errorf("cannot apply host to http transport") - } -} - -// WithHTTPClient overrides the client http client with the specified one -func WithHTTPClient(client *http.Client) func(*Client) error { - return func(c *Client) error { - if client != nil { - c.client = client - } - return nil - } -} - -// WithHTTPHeaders overrides the client default http headers -func WithHTTPHeaders(headers map[string]string) func(*Client) error { - return func(c *Client) error { - c.customHTTPHeaders = headers - return nil - } -} - -// NewClientWithOpts initializes a new API client with default values. It takes functors -// to modify values when creating it, like `NewClientWithOpts(WithVersion(…))` -// It also initializes the custom http headers to add to each request. -// -// It won't send any version information if the version number is empty. It is -// highly recommended that you set a version or your client may break if the -// server is upgraded. -func NewClientWithOpts(ops ...func(*Client) error) (*Client, error) { - client, err := defaultHTTPClient(DefaultDockerHost) - if err != nil { - return nil, err - } - c := &Client{ - host: DefaultDockerHost, - version: api.DefaultVersion, - scheme: "http", - client: client, - proto: defaultProto, - addr: defaultAddr, - } - - for _, op := range ops { - if err := op(c); err != nil { - return nil, err - } - } - - if _, ok := c.client.Transport.(http.RoundTripper); !ok { - return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", c.client.Transport) - } - tlsConfig := resolveTLSConfig(c.client.Transport) - if tlsConfig != nil { - // TODO(stevvooe): This isn't really the right way to write clients in Go. - // `NewClient` should probably only take an `*http.Client` and work from there. - // Unfortunately, the model of having a host-ish/url-thingy as the connection - // string has us confusing protocol and transport layers. We continue doing - // this to avoid breaking existing clients but this should be addressed. - c.scheme = "https" - } - - return c, nil -} - -func defaultHTTPClient(host string) (*http.Client, error) { - url, err := ParseHostURL(host) - if err != nil { - return nil, err - } - transport := new(http.Transport) - sockets.ConfigureTransport(transport, url.Scheme, url.Host) - return &http.Client{ - Transport: transport, - CheckRedirect: CheckRedirect, - }, nil -} - -// NewClient initializes a new API client for the given host and API version. -// It uses the given http client as transport. -// It also initializes the custom http headers to add to each request. -// -// It won't send any version information if the version number is empty. It is -// highly recommended that you set a version or your client may break if the -// server is upgraded. -// deprecated: use NewClientWithOpts -func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) { - return NewClientWithOpts(WithHost(host), WithVersion(version), WithHTTPClient(client), WithHTTPHeaders(httpHeaders)) -} - -// Close the transport used by the client -func (cli *Client) Close() error { - if t, ok := cli.client.Transport.(*http.Transport); ok { - t.CloseIdleConnections() - } - return nil -} - -// getAPIPath returns the versioned request path to call the api. -// It appends the query parameters to the path if they are not empty. -func (cli *Client) getAPIPath(p string, query url.Values) string { - var apiPath string - if cli.version != "" { - v := strings.TrimPrefix(cli.version, "v") - apiPath = path.Join(cli.basePath, "/v"+v, p) - } else { - apiPath = path.Join(cli.basePath, p) - } - return (&url.URL{Path: apiPath, RawQuery: query.Encode()}).String() -} - -// ClientVersion returns the API version used by this client. -func (cli *Client) ClientVersion() string { - return cli.version -} - -// NegotiateAPIVersion queries the API and updates the version to match the -// API version. Any errors are silently ignored. -func (cli *Client) NegotiateAPIVersion(ctx context.Context) { - ping, _ := cli.Ping(ctx) - cli.NegotiateAPIVersionPing(ping) -} - -// NegotiateAPIVersionPing updates the client version to match the Ping.APIVersion -// if the ping version is less than the default version. -func (cli *Client) NegotiateAPIVersionPing(p types.Ping) { - if cli.manualOverride { - return - } - - // try the latest version before versioning headers existed - if p.APIVersion == "" { - p.APIVersion = "1.24" - } - - // if the client is not initialized with a version, start with the latest supported version - if cli.version == "" { - cli.version = api.DefaultVersion - } - - // if server version is lower than the client version, downgrade - if versions.LessThan(p.APIVersion, cli.version) { - cli.version = p.APIVersion - } -} - -// DaemonHost returns the host address used by the client -func (cli *Client) DaemonHost() string { - return cli.host -} - -// ParseHost parses a url string, validates the strings is a host url, and returns -// the parsed host as: protocol, address, and base path -// Deprecated: use ParseHostURL -func ParseHost(host string) (string, string, string, error) { - hostURL, err := ParseHostURL(host) - if err != nil { - return "", "", "", err - } - return hostURL.Scheme, hostURL.Host, hostURL.Path, nil -} - -// ParseHostURL parses a url string, validates the string is a host url, and -// returns the parsed URL -func ParseHostURL(host string) (*url.URL, error) { - protoAddrParts := strings.SplitN(host, "://", 2) - if len(protoAddrParts) == 1 { - return nil, fmt.Errorf("unable to parse docker host `%s`", host) - } - - var basePath string - proto, addr := protoAddrParts[0], protoAddrParts[1] - if proto == "tcp" { - parsed, err := url.Parse("tcp://" + addr) - if err != nil { - return nil, err - } - addr = parsed.Host - basePath = parsed.Path - } - return &url.URL{ - Scheme: proto, - Host: addr, - Path: basePath, - }, nil -} - -// CustomHTTPHeaders returns the custom http headers stored by the client. -func (cli *Client) CustomHTTPHeaders() map[string]string { - m := make(map[string]string) - for k, v := range cli.customHTTPHeaders { - m[k] = v - } - return m -} - -// SetCustomHTTPHeaders that will be set on every HTTP request made by the client. -func (cli *Client) SetCustomHTTPHeaders(headers map[string]string) { - cli.customHTTPHeaders = headers -} diff --git a/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/docker/docker/client/client_unix.go deleted file mode 100644 index 3d24470ba3..0000000000 --- a/vendor/github.com/docker/docker/client/client_unix.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build linux freebsd openbsd darwin - -package client // import "github.com/docker/docker/client" - -// DefaultDockerHost defines os specific default if DOCKER_HOST is unset -const DefaultDockerHost = "unix:///var/run/docker.sock" - -const defaultProto = "unix" -const defaultAddr = "/var/run/docker.sock" diff --git a/vendor/github.com/docker/docker/client/client_windows.go b/vendor/github.com/docker/docker/client/client_windows.go deleted file mode 100644 index c649e54412..0000000000 --- a/vendor/github.com/docker/docker/client/client_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package client // import "github.com/docker/docker/client" - -// DefaultDockerHost defines os specific default if DOCKER_HOST is unset -const DefaultDockerHost = "npipe:////./pipe/docker_engine" - -const defaultProto = "npipe" -const defaultAddr = "//./pipe/docker_engine" diff --git a/vendor/github.com/docker/docker/client/config_create.go b/vendor/github.com/docker/docker/client/config_create.go deleted file mode 100644 index b6d15b70e4..0000000000 --- a/vendor/github.com/docker/docker/client/config_create.go +++ /dev/null @@ -1,25 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" -) - -// ConfigCreate creates a new Config. -func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) { - var response types.ConfigCreateResponse - if err := cli.NewVersionError("1.30", "config create"); err != nil { - return response, err - } - resp, err := cli.post(ctx, "/configs/create", nil, config, nil) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/config_inspect.go b/vendor/github.com/docker/docker/client/config_inspect.go deleted file mode 100644 index b49fdf568a..0000000000 --- a/vendor/github.com/docker/docker/client/config_inspect.go +++ /dev/null @@ -1,33 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "io/ioutil" - - "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" -) - -// ConfigInspectWithRaw returns the config information with raw data -func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.Config, []byte, error) { - if err := cli.NewVersionError("1.30", "config inspect"); err != nil { - return swarm.Config{}, nil, err - } - resp, err := cli.get(ctx, "/configs/"+id, nil, nil) - if err != nil { - return swarm.Config{}, nil, wrapResponseError(err, resp, "config", id) - } - defer ensureReaderClosed(resp) - - body, err := ioutil.ReadAll(resp.body) - if err != nil { - return swarm.Config{}, nil, err - } - - var config swarm.Config - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&config) - - return config, body, err -} diff --git a/vendor/github.com/docker/docker/client/config_list.go b/vendor/github.com/docker/docker/client/config_list.go deleted file mode 100644 index b9d2632f6a..0000000000 --- a/vendor/github.com/docker/docker/client/config_list.go +++ /dev/null @@ -1,38 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" -) - -// ConfigList returns the list of configs. -func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) { - if err := cli.NewVersionError("1.30", "config list"); err != nil { - return nil, err - } - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/configs", query, nil) - if err != nil { - return nil, err - } - - var configs []swarm.Config - err = json.NewDecoder(resp.body).Decode(&configs) - ensureReaderClosed(resp) - return configs, err -} diff --git a/vendor/github.com/docker/docker/client/config_remove.go b/vendor/github.com/docker/docker/client/config_remove.go deleted file mode 100644 index 9c8f293f3e..0000000000 --- a/vendor/github.com/docker/docker/client/config_remove.go +++ /dev/null @@ -1,13 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "golang.org/x/net/context" - -// ConfigRemove removes a Config. -func (cli *Client) ConfigRemove(ctx context.Context, id string) error { - if err := cli.NewVersionError("1.30", "config remove"); err != nil { - return err - } - resp, err := cli.delete(ctx, "/configs/"+id, nil, nil) - ensureReaderClosed(resp) - return wrapResponseError(err, resp, "config", id) -} diff --git a/vendor/github.com/docker/docker/client/config_update.go b/vendor/github.com/docker/docker/client/config_update.go deleted file mode 100644 index 6b24024cd1..0000000000 --- a/vendor/github.com/docker/docker/client/config_update.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "net/url" - "strconv" - - "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" -) - -// ConfigUpdate attempts to update a Config -func (cli *Client) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error { - if err := cli.NewVersionError("1.30", "config update"); err != nil { - return err - } - query := url.Values{} - query.Set("version", strconv.FormatUint(version.Index, 10)) - resp, err := cli.post(ctx, "/configs/"+id+"/update", query, config, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_attach.go b/vendor/github.com/docker/docker/client/container_attach.go deleted file mode 100644 index 1a2a431975..0000000000 --- a/vendor/github.com/docker/docker/client/container_attach.go +++ /dev/null @@ -1,57 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "net/url" - - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -// ContainerAttach attaches a connection to a container in the server. -// It returns a types.HijackedConnection with the hijacked connection -// and the a reader to get output. It's up to the called to close -// the hijacked connection by calling types.HijackedResponse.Close. -// -// The stream format on the response will be in one of two formats: -// -// If the container is using a TTY, there is only a single stream (stdout), and -// data is copied directly from the container output stream, no extra -// multiplexing or headers. -// -// If the container is *not* using a TTY, streams for stdout and stderr are -// multiplexed. -// The format of the multiplexed stream is as follows: -// -// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} -// -// STREAM_TYPE can be 1 for stdout and 2 for stderr -// -// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian. -// This is the size of OUTPUT. -// -// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this -// stream. -func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) { - query := url.Values{} - if options.Stream { - query.Set("stream", "1") - } - if options.Stdin { - query.Set("stdin", "1") - } - if options.Stdout { - query.Set("stdout", "1") - } - if options.Stderr { - query.Set("stderr", "1") - } - if options.DetachKeys != "" { - query.Set("detachKeys", options.DetachKeys) - } - if options.Logs { - query.Set("logs", "1") - } - - headers := map[string][]string{"Content-Type": {"text/plain"}} - return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers) -} diff --git a/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/docker/docker/client/container_commit.go deleted file mode 100644 index 95219beb17..0000000000 --- a/vendor/github.com/docker/docker/client/container_commit.go +++ /dev/null @@ -1,55 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - "errors" - "net/url" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -// ContainerCommit applies changes into a container and creates a new tagged image. -func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) { - var repository, tag string - if options.Reference != "" { - ref, err := reference.ParseNormalizedNamed(options.Reference) - if err != nil { - return types.IDResponse{}, err - } - - if _, isCanonical := ref.(reference.Canonical); isCanonical { - return types.IDResponse{}, errors.New("refusing to create a tag with a digest reference") - } - ref = reference.TagNameOnly(ref) - - if tagged, ok := ref.(reference.Tagged); ok { - tag = tagged.Tag() - } - repository = reference.FamiliarName(ref) - } - - query := url.Values{} - query.Set("container", container) - query.Set("repo", repository) - query.Set("tag", tag) - query.Set("comment", options.Comment) - query.Set("author", options.Author) - for _, change := range options.Changes { - query.Add("changes", change) - } - if !options.Pause { - query.Set("pause", "0") - } - - var response types.IDResponse - resp, err := cli.post(ctx, "/commit", query, options.Config, nil) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/container_copy.go b/vendor/github.com/docker/docker/client/container_copy.go deleted file mode 100644 index 036298c17c..0000000000 --- a/vendor/github.com/docker/docker/client/container_copy.go +++ /dev/null @@ -1,102 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "path/filepath" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" -) - -// ContainerStatPath returns Stat information about a path inside the container filesystem. -func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) { - query := url.Values{} - query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. - - urlStr := "/containers/" + containerID + "/archive" - response, err := cli.head(ctx, urlStr, query, nil) - if err != nil { - return types.ContainerPathStat{}, wrapResponseError(err, response, "container:path", containerID+":"+path) - } - defer ensureReaderClosed(response) - return getContainerPathStatFromHeader(response.header) -} - -// CopyToContainer copies content into the container filesystem. -// Note that `content` must be a Reader for a TAR archive -func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath string, content io.Reader, options types.CopyToContainerOptions) error { - query := url.Values{} - query.Set("path", filepath.ToSlash(dstPath)) // Normalize the paths used in the API. - // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. - if !options.AllowOverwriteDirWithFile { - query.Set("noOverwriteDirNonDir", "true") - } - - if options.CopyUIDGID { - query.Set("copyUIDGID", "true") - } - - apiPath := "/containers/" + containerID + "/archive" - - response, err := cli.putRaw(ctx, apiPath, query, content, nil) - if err != nil { - return wrapResponseError(err, response, "container:path", containerID+":"+dstPath) - } - defer ensureReaderClosed(response) - - if response.statusCode != http.StatusOK { - return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) - } - - return nil -} - -// CopyFromContainer gets the content from the container and returns it as a Reader -// for a TAR archive to manipulate it in the host. It's up to the caller to close the reader. -func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { - query := make(url.Values, 1) - query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. - - apiPath := "/containers/" + containerID + "/archive" - response, err := cli.get(ctx, apiPath, query, nil) - if err != nil { - return nil, types.ContainerPathStat{}, wrapResponseError(err, response, "container:path", containerID+":"+srcPath) - } - - if response.statusCode != http.StatusOK { - return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) - } - - // In order to get the copy behavior right, we need to know information - // about both the source and the destination. The response headers include - // stat info about the source that we can use in deciding exactly how to - // copy it locally. Along with the stat info about the local destination, - // we have everything we need to handle the multiple possibilities there - // can be when copying a file/dir from one location to another file/dir. - stat, err := getContainerPathStatFromHeader(response.header) - if err != nil { - return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err) - } - return response.body, stat, err -} - -func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) { - var stat types.ContainerPathStat - - encodedStat := header.Get("X-Docker-Container-Path-Stat") - statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat)) - - err := json.NewDecoder(statDecoder).Decode(&stat) - if err != nil { - err = fmt.Errorf("unable to decode container path stat header: %s", err) - } - - return stat, err -} diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go deleted file mode 100644 index 0af82a19ec..0000000000 --- a/vendor/github.com/docker/docker/client/container_create.go +++ /dev/null @@ -1,56 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - "net/url" - "strings" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/versions" - "golang.org/x/net/context" -) - -type configWrapper struct { - *container.Config - HostConfig *container.HostConfig - NetworkingConfig *network.NetworkingConfig -} - -// ContainerCreate creates a new container based in the given configuration. -// It can be associated with a name, but it's not mandatory. -func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error) { - var response container.ContainerCreateCreatedBody - - if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil { - return response, err - } - - // When using API 1.24 and under, the client is responsible for removing the container - if hostConfig != nil && versions.LessThan(cli.ClientVersion(), "1.25") { - hostConfig.AutoRemove = false - } - - query := url.Values{} - if containerName != "" { - query.Set("name", containerName) - } - - body := configWrapper{ - Config: config, - HostConfig: hostConfig, - NetworkingConfig: networkingConfig, - } - - serverResp, err := cli.post(ctx, "/containers/create", query, body, nil) - if err != nil { - if serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") { - return response, objectNotFoundError{object: "image", id: config.Image} - } - return response, err - } - - err = json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/container_diff.go b/vendor/github.com/docker/docker/client/container_diff.go deleted file mode 100644 index f03ebf1da3..0000000000 --- a/vendor/github.com/docker/docker/client/container_diff.go +++ /dev/null @@ -1,23 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/container" - "golang.org/x/net/context" -) - -// ContainerDiff shows differences in a container filesystem since it was started. -func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]container.ContainerChangeResponseItem, error) { - var changes []container.ContainerChangeResponseItem - - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) - if err != nil { - return changes, err - } - - err = json.NewDecoder(serverResp.body).Decode(&changes) - ensureReaderClosed(serverResp) - return changes, err -} diff --git a/vendor/github.com/docker/docker/client/container_exec.go b/vendor/github.com/docker/docker/client/container_exec.go deleted file mode 100644 index 59db8a90fb..0000000000 --- a/vendor/github.com/docker/docker/client/container_exec.go +++ /dev/null @@ -1,54 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -// ContainerExecCreate creates a new exec configuration to run an exec process. -func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) { - var response types.IDResponse - - if err := cli.NewVersionError("1.25", "env"); len(config.Env) != 0 && err != nil { - return response, err - } - - resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil) - if err != nil { - return response, err - } - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} - -// ContainerExecStart starts an exec process already created in the docker host. -func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error { - resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil) - ensureReaderClosed(resp) - return err -} - -// ContainerExecAttach attaches a connection to an exec process in the server. -// It returns a types.HijackedConnection with the hijacked connection -// and the a reader to get output. It's up to the called to close -// the hijacked connection by calling types.HijackedResponse.Close. -func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) { - headers := map[string][]string{"Content-Type": {"application/json"}} - return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers) -} - -// ContainerExecInspect returns information about a specific exec process on the docker host. -func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) { - var response types.ContainerExecInspect - resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/container_export.go b/vendor/github.com/docker/docker/client/container_export.go deleted file mode 100644 index f4c2bee368..0000000000 --- a/vendor/github.com/docker/docker/client/container_export.go +++ /dev/null @@ -1,20 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "io" - "net/url" - - "golang.org/x/net/context" -) - -// ContainerExport retrieves the raw contents of a container -// and returns them as an io.ReadCloser. It's up to the caller -// to close the stream. -func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) { - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil) - if err != nil { - return nil, err - } - - return serverResp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/container_inspect.go b/vendor/github.com/docker/docker/client/container_inspect.go deleted file mode 100644 index 1ec45e823f..0000000000 --- a/vendor/github.com/docker/docker/client/container_inspect.go +++ /dev/null @@ -1,47 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "net/url" - - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -// ContainerInspect returns the container information. -func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) - if err != nil { - return types.ContainerJSON{}, wrapResponseError(err, serverResp, "container", containerID) - } - - var response types.ContainerJSON - err = json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) - return response, err -} - -// ContainerInspectWithRaw returns the container information and its raw representation. -func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) { - query := url.Values{} - if getSize { - query.Set("size", "1") - } - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) - if err != nil { - return types.ContainerJSON{}, nil, wrapResponseError(err, serverResp, "container", containerID) - } - defer ensureReaderClosed(serverResp) - - body, err := ioutil.ReadAll(serverResp.body) - if err != nil { - return types.ContainerJSON{}, nil, err - } - - var response types.ContainerJSON - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/docker/docker/client/container_kill.go b/vendor/github.com/docker/docker/client/container_kill.go deleted file mode 100644 index c5a9fe75b2..0000000000 --- a/vendor/github.com/docker/docker/client/container_kill.go +++ /dev/null @@ -1,17 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "net/url" - - "golang.org/x/net/context" -) - -// ContainerKill terminates the container process but does not remove the container from the docker host. -func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error { - query := url.Values{} - query.Set("signal", signal) - - resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/docker/docker/client/container_list.go deleted file mode 100644 index cd2e13835b..0000000000 --- a/vendor/github.com/docker/docker/client/container_list.go +++ /dev/null @@ -1,56 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - "net/url" - "strconv" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" -) - -// ContainerList returns the list of containers in the docker host. -func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { - query := url.Values{} - - if options.All { - query.Set("all", "1") - } - - if options.Limit != -1 { - query.Set("limit", strconv.Itoa(options.Limit)) - } - - if options.Since != "" { - query.Set("since", options.Since) - } - - if options.Before != "" { - query.Set("before", options.Before) - } - - if options.Size { - query.Set("size", "1") - } - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) - - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/containers/json", query, nil) - if err != nil { - return nil, err - } - - var containers []types.Container - err = json.NewDecoder(resp.body).Decode(&containers) - ensureReaderClosed(resp) - return containers, err -} diff --git a/vendor/github.com/docker/docker/client/container_logs.go b/vendor/github.com/docker/docker/client/container_logs.go deleted file mode 100644 index 01d88a322f..0000000000 --- a/vendor/github.com/docker/docker/client/container_logs.go +++ /dev/null @@ -1,80 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "io" - "net/url" - "time" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - timetypes "github.com/docker/docker/api/types/time" -) - -// ContainerLogs returns the logs generated by a container in an io.ReadCloser. -// It's up to the caller to close the stream. -// -// The stream format on the response will be in one of two formats: -// -// If the container is using a TTY, there is only a single stream (stdout), and -// data is copied directly from the container output stream, no extra -// multiplexing or headers. -// -// If the container is *not* using a TTY, streams for stdout and stderr are -// multiplexed. -// The format of the multiplexed stream is as follows: -// -// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} -// -// STREAM_TYPE can be 1 for stdout and 2 for stderr -// -// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian. -// This is the size of OUTPUT. -// -// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this -// stream. -func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) { - query := url.Values{} - if options.ShowStdout { - query.Set("stdout", "1") - } - - if options.ShowStderr { - query.Set("stderr", "1") - } - - if options.Since != "" { - ts, err := timetypes.GetTimestamp(options.Since, time.Now()) - if err != nil { - return nil, err - } - query.Set("since", ts) - } - - if options.Until != "" { - ts, err := timetypes.GetTimestamp(options.Until, time.Now()) - if err != nil { - return nil, err - } - query.Set("until", ts) - } - - if options.Timestamps { - query.Set("timestamps", "1") - } - - if options.Details { - query.Set("details", "1") - } - - if options.Follow { - query.Set("follow", "1") - } - query.Set("tail", options.Tail) - - resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil) - if err != nil { - return nil, wrapResponseError(err, resp, "container", container) - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/container_pause.go b/vendor/github.com/docker/docker/client/container_pause.go deleted file mode 100644 index fa0a9011a3..0000000000 --- a/vendor/github.com/docker/docker/client/container_pause.go +++ /dev/null @@ -1,10 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "golang.org/x/net/context" - -// ContainerPause pauses the main process of a given container without terminating it. -func (cli *Client) ContainerPause(ctx context.Context, containerID string) error { - resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_prune.go b/vendor/github.com/docker/docker/client/container_prune.go deleted file mode 100644 index 4129841d49..0000000000 --- a/vendor/github.com/docker/docker/client/container_prune.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" -) - -// ContainersPrune requests the daemon to delete unused data -func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) { - var report types.ContainersPruneReport - - if err := cli.NewVersionError("1.25", "container prune"); err != nil { - return report, err - } - - query, err := getFiltersQuery(pruneFilters) - if err != nil { - return report, err - } - - serverResp, err := cli.post(ctx, "/containers/prune", query, nil, nil) - if err != nil { - return report, err - } - defer ensureReaderClosed(serverResp) - - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return report, fmt.Errorf("Error retrieving disk usage: %v", err) - } - - return report, nil -} diff --git a/vendor/github.com/docker/docker/client/container_remove.go b/vendor/github.com/docker/docker/client/container_remove.go deleted file mode 100644 index c782914319..0000000000 --- a/vendor/github.com/docker/docker/client/container_remove.go +++ /dev/null @@ -1,27 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "net/url" - - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -// ContainerRemove kills and removes a container from the docker host. -func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error { - query := url.Values{} - if options.RemoveVolumes { - query.Set("v", "1") - } - if options.RemoveLinks { - query.Set("link", "1") - } - - if options.Force { - query.Set("force", "1") - } - - resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) - ensureReaderClosed(resp) - return wrapResponseError(err, resp, "container", containerID) -} diff --git a/vendor/github.com/docker/docker/client/container_rename.go b/vendor/github.com/docker/docker/client/container_rename.go deleted file mode 100644 index ad43e398d5..0000000000 --- a/vendor/github.com/docker/docker/client/container_rename.go +++ /dev/null @@ -1,16 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "net/url" - - "golang.org/x/net/context" -) - -// ContainerRename changes the name of a given container. -func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error { - query := url.Values{} - query.Set("name", newContainerName) - resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_resize.go b/vendor/github.com/docker/docker/client/container_resize.go deleted file mode 100644 index 8f1244e228..0000000000 --- a/vendor/github.com/docker/docker/client/container_resize.go +++ /dev/null @@ -1,29 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "net/url" - "strconv" - - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -// ContainerResize changes the size of the tty for a container. -func (cli *Client) ContainerResize(ctx context.Context, containerID string, options types.ResizeOptions) error { - return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width) -} - -// ContainerExecResize changes the size of the tty for an exec process running inside a container. -func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error { - return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width) -} - -func (cli *Client) resize(ctx context.Context, basePath string, height, width uint) error { - query := url.Values{} - query.Set("h", strconv.Itoa(int(height))) - query.Set("w", strconv.Itoa(int(width))) - - resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_restart.go b/vendor/github.com/docker/docker/client/container_restart.go deleted file mode 100644 index 8c8085dcf2..0000000000 --- a/vendor/github.com/docker/docker/client/container_restart.go +++ /dev/null @@ -1,22 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "net/url" - "time" - - timetypes "github.com/docker/docker/api/types/time" - "golang.org/x/net/context" -) - -// ContainerRestart stops and starts a container again. -// It makes the daemon to wait for the container to be up again for -// a specific amount of time, given the timeout. -func (cli *Client) ContainerRestart(ctx context.Context, containerID string, timeout *time.Duration) error { - query := url.Values{} - if timeout != nil { - query.Set("t", timetypes.DurationToSecondsString(*timeout)) - } - resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_start.go b/vendor/github.com/docker/docker/client/container_start.go deleted file mode 100644 index d06b637003..0000000000 --- a/vendor/github.com/docker/docker/client/container_start.go +++ /dev/null @@ -1,24 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "net/url" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" -) - -// ContainerStart sends a request to the docker daemon to start a container. -func (cli *Client) ContainerStart(ctx context.Context, containerID string, options types.ContainerStartOptions) error { - query := url.Values{} - if len(options.CheckpointID) != 0 { - query.Set("checkpoint", options.CheckpointID) - } - if len(options.CheckpointDir) != 0 { - query.Set("checkpoint-dir", options.CheckpointDir) - } - - resp, err := cli.post(ctx, "/containers/"+containerID+"/start", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_stats.go b/vendor/github.com/docker/docker/client/container_stats.go deleted file mode 100644 index a7a2b8ec35..0000000000 --- a/vendor/github.com/docker/docker/client/container_stats.go +++ /dev/null @@ -1,26 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "net/url" - - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -// ContainerStats returns near realtime stats for a given container. -// It's up to the caller to close the io.ReadCloser returned. -func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) { - query := url.Values{} - query.Set("stream", "0") - if stream { - query.Set("stream", "1") - } - - resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) - if err != nil { - return types.ContainerStats{}, err - } - - osType := getDockerOS(resp.header.Get("Server")) - return types.ContainerStats{Body: resp.body, OSType: osType}, err -} diff --git a/vendor/github.com/docker/docker/client/container_stop.go b/vendor/github.com/docker/docker/client/container_stop.go deleted file mode 100644 index ca316666a9..0000000000 --- a/vendor/github.com/docker/docker/client/container_stop.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "net/url" - "time" - - timetypes "github.com/docker/docker/api/types/time" - "golang.org/x/net/context" -) - -// ContainerStop stops a container without terminating the process. -// The process is blocked until the container stops or the timeout expires. -func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout *time.Duration) error { - query := url.Values{} - if timeout != nil { - query.Set("t", timetypes.DurationToSecondsString(*timeout)) - } - resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_top.go b/vendor/github.com/docker/docker/client/container_top.go deleted file mode 100644 index 55841ce998..0000000000 --- a/vendor/github.com/docker/docker/client/container_top.go +++ /dev/null @@ -1,28 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - "net/url" - "strings" - - "github.com/docker/docker/api/types/container" - "golang.org/x/net/context" -) - -// ContainerTop shows process information from within a container. -func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (container.ContainerTopOKBody, error) { - var response container.ContainerTopOKBody - query := url.Values{} - if len(arguments) > 0 { - query.Set("ps_args", strings.Join(arguments, " ")) - } - - resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/container_unpause.go b/vendor/github.com/docker/docker/client/container_unpause.go deleted file mode 100644 index b75fb0c31e..0000000000 --- a/vendor/github.com/docker/docker/client/container_unpause.go +++ /dev/null @@ -1,10 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "golang.org/x/net/context" - -// ContainerUnpause resumes the process execution within a container -func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error { - resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/container_update.go b/vendor/github.com/docker/docker/client/container_update.go deleted file mode 100644 index 218ec90fd9..0000000000 --- a/vendor/github.com/docker/docker/client/container_update.go +++ /dev/null @@ -1,22 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - - "github.com/docker/docker/api/types/container" - "golang.org/x/net/context" -) - -// ContainerUpdate updates resources of a container -func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) { - var response container.ContainerUpdateOKBody - serverResp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) - if err != nil { - return response, err - } - - err = json.NewDecoder(serverResp.body).Decode(&response) - - ensureReaderClosed(serverResp) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/container_wait.go b/vendor/github.com/docker/docker/client/container_wait.go deleted file mode 100644 index 87b0dc1afd..0000000000 --- a/vendor/github.com/docker/docker/client/container_wait.go +++ /dev/null @@ -1,84 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - "net/url" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/versions" -) - -// ContainerWait waits until the specified container is in a certain state -// indicated by the given condition, either "not-running" (default), -// "next-exit", or "removed". -// -// If this client's API version is before 1.30, condition is ignored and -// ContainerWait will return immediately with the two channels, as the server -// will wait as if the condition were "not-running". -// -// If this client's API version is at least 1.30, ContainerWait blocks until -// the request has been acknowledged by the server (with a response header), -// then returns two channels on which the caller can wait for the exit status -// of the container or an error if there was a problem either beginning the -// wait request or in getting the response. This allows the caller to -// synchronize ContainerWait with other calls, such as specifying a -// "next-exit" condition before issuing a ContainerStart request. -func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) { - if versions.LessThan(cli.ClientVersion(), "1.30") { - return cli.legacyContainerWait(ctx, containerID) - } - - resultC := make(chan container.ContainerWaitOKBody) - errC := make(chan error, 1) - - query := url.Values{} - query.Set("condition", string(condition)) - - resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", query, nil, nil) - if err != nil { - defer ensureReaderClosed(resp) - errC <- err - return resultC, errC - } - - go func() { - defer ensureReaderClosed(resp) - var res container.ContainerWaitOKBody - if err := json.NewDecoder(resp.body).Decode(&res); err != nil { - errC <- err - return - } - - resultC <- res - }() - - return resultC, errC -} - -// legacyContainerWait returns immediately and doesn't have an option to wait -// until the container is removed. -func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) (<-chan container.ContainerWaitOKBody, <-chan error) { - resultC := make(chan container.ContainerWaitOKBody) - errC := make(chan error) - - go func() { - resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil) - if err != nil { - errC <- err - return - } - defer ensureReaderClosed(resp) - - var res container.ContainerWaitOKBody - if err := json.NewDecoder(resp.body).Decode(&res); err != nil { - errC <- err - return - } - - resultC <- res - }() - - return resultC, errC -} diff --git a/vendor/github.com/docker/docker/client/disk_usage.go b/vendor/github.com/docker/docker/client/disk_usage.go deleted file mode 100644 index 831fd40e62..0000000000 --- a/vendor/github.com/docker/docker/client/disk_usage.go +++ /dev/null @@ -1,26 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - "fmt" - - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -// DiskUsage requests the current data usage from the daemon -func (cli *Client) DiskUsage(ctx context.Context) (types.DiskUsage, error) { - var du types.DiskUsage - - serverResp, err := cli.get(ctx, "/system/df", nil, nil) - if err != nil { - return du, err - } - defer ensureReaderClosed(serverResp) - - if err := json.NewDecoder(serverResp.body).Decode(&du); err != nil { - return du, fmt.Errorf("Error retrieving disk usage: %v", err) - } - - return du, nil -} diff --git a/vendor/github.com/docker/docker/client/distribution_inspect.go b/vendor/github.com/docker/docker/client/distribution_inspect.go deleted file mode 100644 index 5f55945c41..0000000000 --- a/vendor/github.com/docker/docker/client/distribution_inspect.go +++ /dev/null @@ -1,35 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - "net/url" - - registrytypes "github.com/docker/docker/api/types/registry" - "golang.org/x/net/context" -) - -// DistributionInspect returns the image digest with full Manifest -func (cli *Client) DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registrytypes.DistributionInspect, error) { - // Contact the registry to retrieve digest and platform information - var distributionInspect registrytypes.DistributionInspect - - if err := cli.NewVersionError("1.30", "distribution inspect"); err != nil { - return distributionInspect, err - } - var headers map[string][]string - - if encodedRegistryAuth != "" { - headers = map[string][]string{ - "X-Registry-Auth": {encodedRegistryAuth}, - } - } - - resp, err := cli.get(ctx, "/distribution/"+image+"/json", url.Values{}, headers) - if err != nil { - return distributionInspect, err - } - - err = json.NewDecoder(resp.body).Decode(&distributionInspect) - ensureReaderClosed(resp) - return distributionInspect, err -} diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go deleted file mode 100644 index 05c1246276..0000000000 --- a/vendor/github.com/docker/docker/client/errors.go +++ /dev/null @@ -1,133 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "fmt" - - "net/http" - - "github.com/docker/docker/api/types/versions" - "github.com/pkg/errors" -) - -// errConnectionFailed implements an error returned when connection failed. -type errConnectionFailed struct { - host string -} - -// Error returns a string representation of an errConnectionFailed -func (err errConnectionFailed) Error() string { - if err.host == "" { - return "Cannot connect to the Docker daemon. Is the docker daemon running on this host?" - } - return fmt.Sprintf("Cannot connect to the Docker daemon at %s. Is the docker daemon running?", err.host) -} - -// IsErrConnectionFailed returns true if the error is caused by connection failed. -func IsErrConnectionFailed(err error) bool { - _, ok := errors.Cause(err).(errConnectionFailed) - return ok -} - -// ErrorConnectionFailed returns an error with host in the error message when connection to docker daemon failed. -func ErrorConnectionFailed(host string) error { - return errConnectionFailed{host: host} -} - -type notFound interface { - error - NotFound() bool // Is the error a NotFound error -} - -// IsErrNotFound returns true if the error is a NotFound error, which is returned -// by the API when some object is not found. -func IsErrNotFound(err error) bool { - te, ok := err.(notFound) - return ok && te.NotFound() -} - -type objectNotFoundError struct { - object string - id string -} - -func (e objectNotFoundError) NotFound() bool { - return true -} - -func (e objectNotFoundError) Error() string { - return fmt.Sprintf("Error: No such %s: %s", e.object, e.id) -} - -func wrapResponseError(err error, resp serverResponse, object, id string) error { - switch { - case err == nil: - return nil - case resp.statusCode == http.StatusNotFound: - return objectNotFoundError{object: object, id: id} - case resp.statusCode == http.StatusNotImplemented: - return notImplementedError{message: err.Error()} - default: - return err - } -} - -// unauthorizedError represents an authorization error in a remote registry. -type unauthorizedError struct { - cause error -} - -// Error returns a string representation of an unauthorizedError -func (u unauthorizedError) Error() string { - return u.cause.Error() -} - -// IsErrUnauthorized returns true if the error is caused -// when a remote registry authentication fails -func IsErrUnauthorized(err error) bool { - _, ok := err.(unauthorizedError) - return ok -} - -type pluginPermissionDenied struct { - name string -} - -func (e pluginPermissionDenied) Error() string { - return "Permission denied while installing plugin " + e.name -} - -// IsErrPluginPermissionDenied returns true if the error is caused -// when a user denies a plugin's permissions -func IsErrPluginPermissionDenied(err error) bool { - _, ok := err.(pluginPermissionDenied) - return ok -} - -type notImplementedError struct { - message string -} - -func (e notImplementedError) Error() string { - return e.message -} - -func (e notImplementedError) NotImplemented() bool { - return true -} - -// IsErrNotImplemented returns true if the error is a NotImplemented error. -// This is returned by the API when a requested feature has not been -// implemented. -func IsErrNotImplemented(err error) bool { - te, ok := err.(notImplementedError) - return ok && te.NotImplemented() -} - -// NewVersionError returns an error if the APIVersion required -// if less than the current supported version -func (cli *Client) NewVersionError(APIrequired, feature string) error { - if cli.version != "" && versions.LessThan(cli.version, APIrequired) { - return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, APIrequired, cli.version) - } - return nil -} diff --git a/vendor/github.com/docker/docker/client/events.go b/vendor/github.com/docker/docker/client/events.go deleted file mode 100644 index d5d4dd8ac2..0000000000 --- a/vendor/github.com/docker/docker/client/events.go +++ /dev/null @@ -1,102 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - "net/url" - "time" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/events" - "github.com/docker/docker/api/types/filters" - timetypes "github.com/docker/docker/api/types/time" -) - -// Events returns a stream of events in the daemon. It's up to the caller to close the stream -// by cancelling the context. Once the stream has been completely read an io.EOF error will -// be sent over the error channel. If an error is sent all processing will be stopped. It's up -// to the caller to reopen the stream in the event of an error by reinvoking this method. -func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) { - - messages := make(chan events.Message) - errs := make(chan error, 1) - - started := make(chan struct{}) - go func() { - defer close(errs) - - query, err := buildEventsQueryParams(cli.version, options) - if err != nil { - close(started) - errs <- err - return - } - - resp, err := cli.get(ctx, "/events", query, nil) - if err != nil { - close(started) - errs <- err - return - } - defer resp.body.Close() - - decoder := json.NewDecoder(resp.body) - - close(started) - for { - select { - case <-ctx.Done(): - errs <- ctx.Err() - return - default: - var event events.Message - if err := decoder.Decode(&event); err != nil { - errs <- err - return - } - - select { - case messages <- event: - case <-ctx.Done(): - errs <- ctx.Err() - return - } - } - } - }() - <-started - - return messages, errs -} - -func buildEventsQueryParams(cliVersion string, options types.EventsOptions) (url.Values, error) { - query := url.Values{} - ref := time.Now() - - if options.Since != "" { - ts, err := timetypes.GetTimestamp(options.Since, ref) - if err != nil { - return nil, err - } - query.Set("since", ts) - } - - if options.Until != "" { - ts, err := timetypes.GetTimestamp(options.Until, ref) - if err != nil { - return nil, err - } - query.Set("until", ts) - } - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParamWithVersion(cliVersion, options.Filters) - if err != nil { - return nil, err - } - query.Set("filters", filterJSON) - } - - return query, nil -} diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go deleted file mode 100644 index 628adfda65..0000000000 --- a/vendor/github.com/docker/docker/client/hijack.go +++ /dev/null @@ -1,207 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bufio" - "crypto/tls" - "fmt" - "net" - "net/http" - "net/http/httputil" - "net/url" - "strings" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/go-connections/sockets" - "github.com/pkg/errors" - "golang.org/x/net/context" -) - -// tlsClientCon holds tls information and a dialed connection. -type tlsClientCon struct { - *tls.Conn - rawConn net.Conn -} - -func (c *tlsClientCon) CloseWrite() error { - // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it - // on its underlying connection. - if conn, ok := c.rawConn.(types.CloseWriter); ok { - return conn.CloseWrite() - } - return nil -} - -// postHijacked sends a POST request and hijacks the connection. -func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) { - bodyEncoded, err := encodeData(body) - if err != nil { - return types.HijackedResponse{}, err - } - - apiPath := cli.getAPIPath(path, query) - req, err := http.NewRequest("POST", apiPath, bodyEncoded) - if err != nil { - return types.HijackedResponse{}, err - } - req = cli.addHeaders(req, headers) - - conn, err := cli.setupHijackConn(req, "tcp") - if err != nil { - return types.HijackedResponse{}, err - } - - return types.HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn)}, err -} - -func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) { - return tlsDialWithDialer(new(net.Dialer), network, addr, config) -} - -// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in -// order to return our custom tlsClientCon struct which holds both the tls.Conn -// object _and_ its underlying raw connection. The rationale for this is that -// we need to be able to close the write end of the connection when attaching, -// which tls.Conn does not provide. -func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) { - // We want the Timeout and Deadline values from dialer to cover the - // whole process: TCP connection and TLS handshake. This means that we - // also need to start our own timers now. - timeout := dialer.Timeout - - if !dialer.Deadline.IsZero() { - deadlineTimeout := time.Until(dialer.Deadline) - if timeout == 0 || deadlineTimeout < timeout { - timeout = deadlineTimeout - } - } - - var errChannel chan error - - if timeout != 0 { - errChannel = make(chan error, 2) - time.AfterFunc(timeout, func() { - errChannel <- errors.New("") - }) - } - - proxyDialer, err := sockets.DialerFromEnvironment(dialer) - if err != nil { - return nil, err - } - - rawConn, err := proxyDialer.Dial(network, addr) - if err != nil { - return nil, err - } - // When we set up a TCP connection for hijack, there could be long periods - // of inactivity (a long running command with no output) that in certain - // network setups may cause ECONNTIMEOUT, leaving the client in an unknown - // state. Setting TCP KeepAlive on the socket connection will prohibit - // ECONNTIMEOUT unless the socket connection truly is broken - if tcpConn, ok := rawConn.(*net.TCPConn); ok { - tcpConn.SetKeepAlive(true) - tcpConn.SetKeepAlivePeriod(30 * time.Second) - } - - colonPos := strings.LastIndex(addr, ":") - if colonPos == -1 { - colonPos = len(addr) - } - hostname := addr[:colonPos] - - // If no ServerName is set, infer the ServerName - // from the hostname we're connecting to. - if config.ServerName == "" { - // Make a copy to avoid polluting argument or default. - config = tlsConfigClone(config) - config.ServerName = hostname - } - - conn := tls.Client(rawConn, config) - - if timeout == 0 { - err = conn.Handshake() - } else { - go func() { - errChannel <- conn.Handshake() - }() - - err = <-errChannel - } - - if err != nil { - rawConn.Close() - return nil, err - } - - // This is Docker difference with standard's crypto/tls package: returned a - // wrapper which holds both the TLS and raw connections. - return &tlsClientCon{conn, rawConn}, nil -} - -func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { - if tlsConfig != nil && proto != "unix" && proto != "npipe" { - // Notice this isn't Go standard's tls.Dial function - return tlsDial(proto, addr, tlsConfig) - } - if proto == "npipe" { - return sockets.DialPipe(addr, 32*time.Second) - } - return net.Dial(proto, addr) -} - -func (cli *Client) setupHijackConn(req *http.Request, proto string) (net.Conn, error) { - req.Host = cli.addr - req.Header.Set("Connection", "Upgrade") - req.Header.Set("Upgrade", proto) - - conn, err := dial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport)) - if err != nil { - return nil, errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") - } - - // When we set up a TCP connection for hijack, there could be long periods - // of inactivity (a long running command with no output) that in certain - // network setups may cause ECONNTIMEOUT, leaving the client in an unknown - // state. Setting TCP KeepAlive on the socket connection will prohibit - // ECONNTIMEOUT unless the socket connection truly is broken - if tcpConn, ok := conn.(*net.TCPConn); ok { - tcpConn.SetKeepAlive(true) - tcpConn.SetKeepAlivePeriod(30 * time.Second) - } - - clientconn := httputil.NewClientConn(conn, nil) - defer clientconn.Close() - - // Server hijacks the connection, error 'connection closed' expected - resp, err := clientconn.Do(req) - if err != httputil.ErrPersistEOF { - if err != nil { - return nil, err - } - if resp.StatusCode != http.StatusSwitchingProtocols { - resp.Body.Close() - return nil, fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode) - } - } - - c, br := clientconn.Hijack() - if br.Buffered() > 0 { - // If there is buffered content, wrap the connection - c = &hijackedConn{c, br} - } else { - br.Reset(nil) - } - - return c, nil -} - -type hijackedConn struct { - net.Conn - r *bufio.Reader -} - -func (c *hijackedConn) Read(b []byte) (int, error) { - return c.r.Read(b) -} diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go deleted file mode 100644 index 97c9301c82..0000000000 --- a/vendor/github.com/docker/docker/client/image_build.go +++ /dev/null @@ -1,138 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/base64" - "encoding/json" - "io" - "net/http" - "net/url" - "strconv" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" -) - -// ImageBuild sends request to the daemon to build images. -// The Body in the response implement an io.ReadCloser and it's up to the caller to -// close it. -func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { - query, err := cli.imageBuildOptionsToQuery(options) - if err != nil { - return types.ImageBuildResponse{}, err - } - - headers := http.Header(make(map[string][]string)) - buf, err := json.Marshal(options.AuthConfigs) - if err != nil { - return types.ImageBuildResponse{}, err - } - headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) - - if options.Platform != "" { - if err := cli.NewVersionError("1.32", "platform"); err != nil { - return types.ImageBuildResponse{}, err - } - query.Set("platform", options.Platform) - } - headers.Set("Content-Type", "application/x-tar") - - serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) - if err != nil { - return types.ImageBuildResponse{}, err - } - - osType := getDockerOS(serverResp.header.Get("Server")) - - return types.ImageBuildResponse{ - Body: serverResp.body, - OSType: osType, - }, nil -} - -func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) { - query := url.Values{ - "t": options.Tags, - "securityopt": options.SecurityOpt, - "extrahosts": options.ExtraHosts, - } - if options.SuppressOutput { - query.Set("q", "1") - } - if options.RemoteContext != "" { - query.Set("remote", options.RemoteContext) - } - if options.NoCache { - query.Set("nocache", "1") - } - if options.Remove { - query.Set("rm", "1") - } else { - query.Set("rm", "0") - } - - if options.ForceRemove { - query.Set("forcerm", "1") - } - - if options.PullParent { - query.Set("pull", "1") - } - - if options.Squash { - if err := cli.NewVersionError("1.25", "squash"); err != nil { - return query, err - } - query.Set("squash", "1") - } - - if !container.Isolation.IsDefault(options.Isolation) { - query.Set("isolation", string(options.Isolation)) - } - - query.Set("cpusetcpus", options.CPUSetCPUs) - query.Set("networkmode", options.NetworkMode) - query.Set("cpusetmems", options.CPUSetMems) - query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10)) - query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10)) - query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10)) - query.Set("memory", strconv.FormatInt(options.Memory, 10)) - query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10)) - query.Set("cgroupparent", options.CgroupParent) - query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10)) - query.Set("dockerfile", options.Dockerfile) - query.Set("target", options.Target) - - ulimitsJSON, err := json.Marshal(options.Ulimits) - if err != nil { - return query, err - } - query.Set("ulimits", string(ulimitsJSON)) - - buildArgsJSON, err := json.Marshal(options.BuildArgs) - if err != nil { - return query, err - } - query.Set("buildargs", string(buildArgsJSON)) - - labelsJSON, err := json.Marshal(options.Labels) - if err != nil { - return query, err - } - query.Set("labels", string(labelsJSON)) - - cacheFromJSON, err := json.Marshal(options.CacheFrom) - if err != nil { - return query, err - } - query.Set("cachefrom", string(cacheFromJSON)) - if options.SessionID != "" { - query.Set("session", options.SessionID) - } - if options.Platform != "" { - query.Set("platform", strings.ToLower(options.Platform)) - } - return query, nil -} diff --git a/vendor/github.com/docker/docker/client/image_create.go b/vendor/github.com/docker/docker/client/image_create.go deleted file mode 100644 index fe237508b1..0000000000 --- a/vendor/github.com/docker/docker/client/image_create.go +++ /dev/null @@ -1,38 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "io" - "net/url" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" -) - -// ImageCreate creates a new image based in the parent options. -// It returns the JSON content in the response body. -func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) { - ref, err := reference.ParseNormalizedNamed(parentReference) - if err != nil { - return nil, err - } - - query := url.Values{} - query.Set("fromImage", reference.FamiliarName(ref)) - query.Set("tag", getAPITagFromNamedRef(ref)) - if options.Platform != "" { - query.Set("platform", strings.ToLower(options.Platform)) - } - resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) - if err != nil { - return nil, err - } - return resp.body, nil -} - -func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.post(ctx, "/images/create", query, nil, headers) -} diff --git a/vendor/github.com/docker/docker/client/image_history.go b/vendor/github.com/docker/docker/client/image_history.go deleted file mode 100644 index cf74723a03..0000000000 --- a/vendor/github.com/docker/docker/client/image_history.go +++ /dev/null @@ -1,22 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/image" - "golang.org/x/net/context" -) - -// ImageHistory returns the changes in an image in history format. -func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]image.HistoryResponseItem, error) { - var history []image.HistoryResponseItem - serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil) - if err != nil { - return history, err - } - - err = json.NewDecoder(serverResp.body).Decode(&history) - ensureReaderClosed(serverResp) - return history, err -} diff --git a/vendor/github.com/docker/docker/client/image_import.go b/vendor/github.com/docker/docker/client/image_import.go deleted file mode 100644 index dddbf9c629..0000000000 --- a/vendor/github.com/docker/docker/client/image_import.go +++ /dev/null @@ -1,41 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "io" - "net/url" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" -) - -// ImageImport creates a new image based in the source options. -// It returns the JSON content in the response body. -func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { - if ref != "" { - //Check if the given image name can be resolved - if _, err := reference.ParseNormalizedNamed(ref); err != nil { - return nil, err - } - } - - query := url.Values{} - query.Set("fromSrc", source.SourceName) - query.Set("repo", ref) - query.Set("tag", options.Tag) - query.Set("message", options.Message) - if options.Platform != "" { - query.Set("platform", strings.ToLower(options.Platform)) - } - for _, change := range options.Changes { - query.Add("changes", change) - } - - resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/image_inspect.go b/vendor/github.com/docker/docker/client/image_inspect.go deleted file mode 100644 index 75057bf324..0000000000 --- a/vendor/github.com/docker/docker/client/image_inspect.go +++ /dev/null @@ -1,29 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "io/ioutil" - - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -// ImageInspectWithRaw returns the image information and its raw representation. -func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) { - serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil) - if err != nil { - return types.ImageInspect{}, nil, wrapResponseError(err, serverResp, "image", imageID) - } - defer ensureReaderClosed(serverResp) - - body, err := ioutil.ReadAll(serverResp.body) - if err != nil { - return types.ImageInspect{}, nil, err - } - - var response types.ImageInspect - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go deleted file mode 100644 index 1e84a962da..0000000000 --- a/vendor/github.com/docker/docker/client/image_list.go +++ /dev/null @@ -1,45 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/versions" - "golang.org/x/net/context" -) - -// ImageList returns a list of images in the docker host. -func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) { - var images []types.ImageSummary - query := url.Values{} - - optionFilters := options.Filters - referenceFilters := optionFilters.Get("reference") - if versions.LessThan(cli.version, "1.25") && len(referenceFilters) > 0 { - query.Set("filter", referenceFilters[0]) - for _, filterValue := range referenceFilters { - optionFilters.Del("reference", filterValue) - } - } - if optionFilters.Len() > 0 { - filterJSON, err := filters.ToParamWithVersion(cli.version, optionFilters) - if err != nil { - return images, err - } - query.Set("filters", filterJSON) - } - if options.All { - query.Set("all", "1") - } - - serverResp, err := cli.get(ctx, "/images/json", query, nil) - if err != nil { - return images, err - } - - err = json.NewDecoder(serverResp.body).Decode(&images) - ensureReaderClosed(serverResp) - return images, err -} diff --git a/vendor/github.com/docker/docker/client/image_load.go b/vendor/github.com/docker/docker/client/image_load.go deleted file mode 100644 index 411567c0c9..0000000000 --- a/vendor/github.com/docker/docker/client/image_load.go +++ /dev/null @@ -1,30 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "io" - "net/url" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" -) - -// ImageLoad loads an image in the docker host from the client host. -// It's up to the caller to close the io.ReadCloser in the -// ImageLoadResponse returned by this function. -func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) { - v := url.Values{} - v.Set("quiet", "0") - if quiet { - v.Set("quiet", "1") - } - headers := map[string][]string{"Content-Type": {"application/x-tar"}} - resp, err := cli.postRaw(ctx, "/images/load", v, input, headers) - if err != nil { - return types.ImageLoadResponse{}, err - } - return types.ImageLoadResponse{ - Body: resp.body, - JSON: resp.header.Get("Content-Type") == "application/json", - }, nil -} diff --git a/vendor/github.com/docker/docker/client/image_prune.go b/vendor/github.com/docker/docker/client/image_prune.go deleted file mode 100644 index 32ea9098c3..0000000000 --- a/vendor/github.com/docker/docker/client/image_prune.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" -) - -// ImagesPrune requests the daemon to delete unused data -func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (types.ImagesPruneReport, error) { - var report types.ImagesPruneReport - - if err := cli.NewVersionError("1.25", "image prune"); err != nil { - return report, err - } - - query, err := getFiltersQuery(pruneFilters) - if err != nil { - return report, err - } - - serverResp, err := cli.post(ctx, "/images/prune", query, nil, nil) - if err != nil { - return report, err - } - defer ensureReaderClosed(serverResp) - - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return report, fmt.Errorf("Error retrieving disk usage: %v", err) - } - - return report, nil -} diff --git a/vendor/github.com/docker/docker/client/image_pull.go b/vendor/github.com/docker/docker/client/image_pull.go deleted file mode 100644 index ee5923a8f6..0000000000 --- a/vendor/github.com/docker/docker/client/image_pull.go +++ /dev/null @@ -1,65 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "io" - "net/http" - "net/url" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" -) - -// ImagePull requests the docker host to pull an image from a remote registry. -// It executes the privileged function if the operation is unauthorized -// and it tries one more time. -// It's up to the caller to handle the io.ReadCloser and close it properly. -// -// FIXME(vdemeester): there is currently used in a few way in docker/docker -// - if not in trusted content, ref is used to pass the whole reference, and tag is empty -// - if in trusted content, ref is used to pass the reference name, and tag for the digest -func (cli *Client) ImagePull(ctx context.Context, refStr string, options types.ImagePullOptions) (io.ReadCloser, error) { - ref, err := reference.ParseNormalizedNamed(refStr) - if err != nil { - return nil, err - } - - query := url.Values{} - query.Set("fromImage", reference.FamiliarName(ref)) - if !options.All { - query.Set("tag", getAPITagFromNamedRef(ref)) - } - if options.Platform != "" { - query.Set("platform", strings.ToLower(options.Platform)) - } - - resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) - if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { - newAuthHeader, privilegeErr := options.PrivilegeFunc() - if privilegeErr != nil { - return nil, privilegeErr - } - resp, err = cli.tryImageCreate(ctx, query, newAuthHeader) - } - if err != nil { - return nil, err - } - return resp.body, nil -} - -// getAPITagFromNamedRef returns a tag from the specified reference. -// This function is necessary as long as the docker "server" api expects -// digests to be sent as tags and makes a distinction between the name -// and tag/digest part of a reference. -func getAPITagFromNamedRef(ref reference.Named) string { - if digested, ok := ref.(reference.Digested); ok { - return digested.Digest().String() - } - ref = reference.TagNameOnly(ref) - if tagged, ok := ref.(reference.Tagged); ok { - return tagged.Tag() - } - return "" -} diff --git a/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/docker/docker/client/image_push.go deleted file mode 100644 index 6fcc1626ec..0000000000 --- a/vendor/github.com/docker/docker/client/image_push.go +++ /dev/null @@ -1,56 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "errors" - "io" - "net/http" - "net/url" - - "golang.org/x/net/context" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" -) - -// ImagePush requests the docker host to push an image to a remote registry. -// It executes the privileged function if the operation is unauthorized -// and it tries one more time. -// It's up to the caller to handle the io.ReadCloser and close it properly. -func (cli *Client) ImagePush(ctx context.Context, image string, options types.ImagePushOptions) (io.ReadCloser, error) { - ref, err := reference.ParseNormalizedNamed(image) - if err != nil { - return nil, err - } - - if _, isCanonical := ref.(reference.Canonical); isCanonical { - return nil, errors.New("cannot push a digest reference") - } - - tag := "" - name := reference.FamiliarName(ref) - - if nameTaggedRef, isNamedTagged := ref.(reference.NamedTagged); isNamedTagged { - tag = nameTaggedRef.Tag() - } - - query := url.Values{} - query.Set("tag", tag) - - resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth) - if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { - newAuthHeader, privilegeErr := options.PrivilegeFunc() - if privilegeErr != nil { - return nil, privilegeErr - } - resp, err = cli.tryImagePush(ctx, name, query, newAuthHeader) - } - if err != nil { - return nil, err - } - return resp.body, nil -} - -func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (serverResponse, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.post(ctx, "/images/"+imageID+"/push", query, nil, headers) -} diff --git a/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/docker/docker/client/image_remove.go deleted file mode 100644 index 6d222a5077..0000000000 --- a/vendor/github.com/docker/docker/client/image_remove.go +++ /dev/null @@ -1,31 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -// ImageRemove removes an image from the docker host. -func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) { - query := url.Values{} - - if options.Force { - query.Set("force", "1") - } - if !options.PruneChildren { - query.Set("noprune", "1") - } - - var dels []types.ImageDeleteResponseItem - resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) - if err != nil { - return dels, wrapResponseError(err, resp, "image", imageID) - } - - err = json.NewDecoder(resp.body).Decode(&dels) - ensureReaderClosed(resp) - return dels, err -} diff --git a/vendor/github.com/docker/docker/client/image_save.go b/vendor/github.com/docker/docker/client/image_save.go deleted file mode 100644 index a919615966..0000000000 --- a/vendor/github.com/docker/docker/client/image_save.go +++ /dev/null @@ -1,22 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "io" - "net/url" - - "golang.org/x/net/context" -) - -// ImageSave retrieves one or more images from the docker host as an io.ReadCloser. -// It's up to the caller to store the images and close the stream. -func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) { - query := url.Values{ - "names": imageIDs, - } - - resp, err := cli.get(ctx, "/images/get", query, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/docker/docker/client/image_search.go deleted file mode 100644 index ba5072ad59..0000000000 --- a/vendor/github.com/docker/docker/client/image_search.go +++ /dev/null @@ -1,51 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - "fmt" - "net/http" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/registry" - "golang.org/x/net/context" -) - -// ImageSearch makes the docker host to search by a term in a remote registry. -// The list of results is not sorted in any fashion. -func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) { - var results []registry.SearchResult - query := url.Values{} - query.Set("term", term) - query.Set("limit", fmt.Sprintf("%d", options.Limit)) - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return results, err - } - query.Set("filters", filterJSON) - } - - resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) - if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { - newAuthHeader, privilegeErr := options.PrivilegeFunc() - if privilegeErr != nil { - return results, privilegeErr - } - resp, err = cli.tryImageSearch(ctx, query, newAuthHeader) - } - if err != nil { - return results, err - } - - err = json.NewDecoder(resp.body).Decode(&results) - ensureReaderClosed(resp) - return results, err -} - -func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.get(ctx, "/images/search", query, headers) -} diff --git a/vendor/github.com/docker/docker/client/image_tag.go b/vendor/github.com/docker/docker/client/image_tag.go deleted file mode 100644 index 4399a3e9aa..0000000000 --- a/vendor/github.com/docker/docker/client/image_tag.go +++ /dev/null @@ -1,37 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "net/url" - - "github.com/docker/distribution/reference" - "github.com/pkg/errors" - "golang.org/x/net/context" -) - -// ImageTag tags an image in the docker host -func (cli *Client) ImageTag(ctx context.Context, source, target string) error { - if _, err := reference.ParseAnyReference(source); err != nil { - return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", source) - } - - ref, err := reference.ParseNormalizedNamed(target) - if err != nil { - return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", target) - } - - if _, isCanonical := ref.(reference.Canonical); isCanonical { - return errors.New("refusing to create a tag with a digest reference") - } - - ref = reference.TagNameOnly(ref) - - query := url.Values{} - query.Set("repo", reference.FamiliarName(ref)) - if tagged, ok := ref.(reference.Tagged); ok { - query.Set("tag", tagged.Tag()) - } - - resp, err := cli.post(ctx, "/images/"+source+"/tag", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/info.go b/vendor/github.com/docker/docker/client/info.go deleted file mode 100644 index 3f6746c942..0000000000 --- a/vendor/github.com/docker/docker/client/info.go +++ /dev/null @@ -1,26 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - "fmt" - "net/url" - - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -// Info returns information about the docker server. -func (cli *Client) Info(ctx context.Context) (types.Info, error) { - var info types.Info - serverResp, err := cli.get(ctx, "/info", url.Values{}, nil) - if err != nil { - return info, err - } - defer ensureReaderClosed(serverResp) - - if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil { - return info, fmt.Errorf("Error reading remote info: %v", err) - } - - return info, nil -} diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go deleted file mode 100644 index e928e647a7..0000000000 --- a/vendor/github.com/docker/docker/client/interface.go +++ /dev/null @@ -1,194 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "io" - "net" - "time" - - "github.com/docker/docker/api/types" - containertypes "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/events" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/image" - networktypes "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/api/types/swarm" - volumetypes "github.com/docker/docker/api/types/volume" - "golang.org/x/net/context" -) - -// CommonAPIClient is the common methods between stable and experimental versions of APIClient. -type CommonAPIClient interface { - ConfigAPIClient - ContainerAPIClient - DistributionAPIClient - ImageAPIClient - NodeAPIClient - NetworkAPIClient - PluginAPIClient - ServiceAPIClient - SwarmAPIClient - SecretAPIClient - SystemAPIClient - VolumeAPIClient - ClientVersion() string - DaemonHost() string - ServerVersion(ctx context.Context) (types.Version, error) - NegotiateAPIVersion(ctx context.Context) - NegotiateAPIVersionPing(types.Ping) - DialSession(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) -} - -// ContainerAPIClient defines API client methods for the containers -type ContainerAPIClient interface { - ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) - ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) - ContainerCreate(ctx context.Context, config *containertypes.Config, hostConfig *containertypes.HostConfig, networkingConfig *networktypes.NetworkingConfig, containerName string) (containertypes.ContainerCreateCreatedBody, error) - ContainerDiff(ctx context.Context, container string) ([]containertypes.ContainerChangeResponseItem, error) - ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) - ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) - ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) - ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error - ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error - ContainerExport(ctx context.Context, container string) (io.ReadCloser, error) - ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error) - ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error) - ContainerKill(ctx context.Context, container, signal string) error - ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) - ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) - ContainerPause(ctx context.Context, container string) error - ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error - ContainerRename(ctx context.Context, container, newContainerName string) error - ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error - ContainerRestart(ctx context.Context, container string, timeout *time.Duration) error - ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error) - ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error) - ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error - ContainerStop(ctx context.Context, container string, timeout *time.Duration) error - ContainerTop(ctx context.Context, container string, arguments []string) (containertypes.ContainerTopOKBody, error) - ContainerUnpause(ctx context.Context, container string) error - ContainerUpdate(ctx context.Context, container string, updateConfig containertypes.UpdateConfig) (containertypes.ContainerUpdateOKBody, error) - ContainerWait(ctx context.Context, container string, condition containertypes.WaitCondition) (<-chan containertypes.ContainerWaitOKBody, <-chan error) - CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) - CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error - ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) -} - -// DistributionAPIClient defines API client methods for the registry -type DistributionAPIClient interface { - DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registry.DistributionInspect, error) -} - -// ImageAPIClient defines API client methods for the images -type ImageAPIClient interface { - ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) - BuildCachePrune(ctx context.Context) (*types.BuildCachePruneReport, error) - ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) - ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error) - ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) - ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error) - ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) - ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) - ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) - ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) - ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) - ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) - ImageSave(ctx context.Context, images []string) (io.ReadCloser, error) - ImageTag(ctx context.Context, image, ref string) error - ImagesPrune(ctx context.Context, pruneFilter filters.Args) (types.ImagesPruneReport, error) -} - -// NetworkAPIClient defines API client methods for the networks -type NetworkAPIClient interface { - NetworkConnect(ctx context.Context, network, container string, config *networktypes.EndpointSettings) error - NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) - NetworkDisconnect(ctx context.Context, network, container string, force bool) error - NetworkInspect(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, error) - NetworkInspectWithRaw(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) - NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) - NetworkRemove(ctx context.Context, network string) error - NetworksPrune(ctx context.Context, pruneFilter filters.Args) (types.NetworksPruneReport, error) -} - -// NodeAPIClient defines API client methods for the nodes -type NodeAPIClient interface { - NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) - NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) - NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error - NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error -} - -// PluginAPIClient defines API client methods for the plugins -type PluginAPIClient interface { - PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) - PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error - PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error - PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error - PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) - PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) - PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) - PluginSet(ctx context.Context, name string, args []string) error - PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) - PluginCreate(ctx context.Context, createContext io.Reader, options types.PluginCreateOptions) error -} - -// ServiceAPIClient defines API client methods for the services -type ServiceAPIClient interface { - ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) - ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) - ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) - ServiceRemove(ctx context.Context, serviceID string) error - ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) - ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) - TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) - TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) - TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) -} - -// SwarmAPIClient defines API client methods for the swarm -type SwarmAPIClient interface { - SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) - SwarmJoin(ctx context.Context, req swarm.JoinRequest) error - SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) - SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error - SwarmLeave(ctx context.Context, force bool) error - SwarmInspect(ctx context.Context) (swarm.Swarm, error) - SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error -} - -// SystemAPIClient defines API client methods for the system -type SystemAPIClient interface { - Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) - Info(ctx context.Context) (types.Info, error) - RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) - DiskUsage(ctx context.Context) (types.DiskUsage, error) - Ping(ctx context.Context) (types.Ping, error) -} - -// VolumeAPIClient defines API client methods for the volumes -type VolumeAPIClient interface { - VolumeCreate(ctx context.Context, options volumetypes.VolumesCreateBody) (types.Volume, error) - VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) - VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) - VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error) - VolumeRemove(ctx context.Context, volumeID string, force bool) error - VolumesPrune(ctx context.Context, pruneFilter filters.Args) (types.VolumesPruneReport, error) -} - -// SecretAPIClient defines API client methods for secrets -type SecretAPIClient interface { - SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) - SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) - SecretRemove(ctx context.Context, id string) error - SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error) - SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error -} - -// ConfigAPIClient defines API client methods for configs -type ConfigAPIClient interface { - ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) - ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) - ConfigRemove(ctx context.Context, id string) error - ConfigInspectWithRaw(ctx context.Context, name string) (swarm.Config, []byte, error) - ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error -} diff --git a/vendor/github.com/docker/docker/client/interface_experimental.go b/vendor/github.com/docker/docker/client/interface_experimental.go deleted file mode 100644 index 236c1d6ca6..0000000000 --- a/vendor/github.com/docker/docker/client/interface_experimental.go +++ /dev/null @@ -1,17 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -type apiClientExperimental interface { - CheckpointAPIClient -} - -// CheckpointAPIClient defines API client methods for the checkpoints -type CheckpointAPIClient interface { - CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error - CheckpointDelete(ctx context.Context, container string, options types.CheckpointDeleteOptions) error - CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) -} diff --git a/vendor/github.com/docker/docker/client/interface_stable.go b/vendor/github.com/docker/docker/client/interface_stable.go deleted file mode 100644 index 5502cd7426..0000000000 --- a/vendor/github.com/docker/docker/client/interface_stable.go +++ /dev/null @@ -1,10 +0,0 @@ -package client // import "github.com/docker/docker/client" - -// APIClient is an interface that clients that talk with a docker server must implement. -type APIClient interface { - CommonAPIClient - apiClientExperimental -} - -// Ensure that Client always implements APIClient. -var _ APIClient = &Client{} diff --git a/vendor/github.com/docker/docker/client/login.go b/vendor/github.com/docker/docker/client/login.go deleted file mode 100644 index fefb101ade..0000000000 --- a/vendor/github.com/docker/docker/client/login.go +++ /dev/null @@ -1,29 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - "net/http" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/registry" - "golang.org/x/net/context" -) - -// RegistryLogin authenticates the docker server with a given docker registry. -// It returns unauthorizedError when the authentication fails. -func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) { - resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil) - - if resp.statusCode == http.StatusUnauthorized { - return registry.AuthenticateOKBody{}, unauthorizedError{err} - } - if err != nil { - return registry.AuthenticateOKBody{}, err - } - - var response registry.AuthenticateOKBody - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/network_connect.go b/vendor/github.com/docker/docker/client/network_connect.go deleted file mode 100644 index af4a61659b..0000000000 --- a/vendor/github.com/docker/docker/client/network_connect.go +++ /dev/null @@ -1,18 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/network" - "golang.org/x/net/context" -) - -// NetworkConnect connects a container to an existent network in the docker host. -func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error { - nc := types.NetworkConnect{ - Container: containerID, - EndpointConfig: config, - } - resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/network_create.go b/vendor/github.com/docker/docker/client/network_create.go deleted file mode 100644 index fa1d301a4c..0000000000 --- a/vendor/github.com/docker/docker/client/network_create.go +++ /dev/null @@ -1,25 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -// NetworkCreate creates a new network in the docker host. -func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) { - networkCreateRequest := types.NetworkCreateRequest{ - NetworkCreate: options, - Name: name, - } - var response types.NetworkCreateResponse - serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) - if err != nil { - return response, err - } - - json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/network_disconnect.go b/vendor/github.com/docker/docker/client/network_disconnect.go deleted file mode 100644 index 5fc75a9412..0000000000 --- a/vendor/github.com/docker/docker/client/network_disconnect.go +++ /dev/null @@ -1,14 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -// NetworkDisconnect disconnects a container from an existent network in the docker host. -func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error { - nd := types.NetworkDisconnect{Container: containerID, Force: force} - resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/network_inspect.go b/vendor/github.com/docker/docker/client/network_inspect.go deleted file mode 100644 index 3308f18ded..0000000000 --- a/vendor/github.com/docker/docker/client/network_inspect.go +++ /dev/null @@ -1,46 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "net/url" - - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -// NetworkInspect returns the information for a specific network configured in the docker host. -func (cli *Client) NetworkInspect(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error) { - networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID, options) - return networkResource, err -} - -// NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation. -func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) { - var ( - networkResource types.NetworkResource - resp serverResponse - err error - ) - query := url.Values{} - if options.Verbose { - query.Set("verbose", "true") - } - if options.Scope != "" { - query.Set("scope", options.Scope) - } - resp, err = cli.get(ctx, "/networks/"+networkID, query, nil) - if err != nil { - return networkResource, nil, wrapResponseError(err, resp, "network", networkID) - } - defer ensureReaderClosed(resp) - - body, err := ioutil.ReadAll(resp.body) - if err != nil { - return networkResource, nil, err - } - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&networkResource) - return networkResource, body, err -} diff --git a/vendor/github.com/docker/docker/client/network_list.go b/vendor/github.com/docker/docker/client/network_list.go deleted file mode 100644 index 9b424285eb..0000000000 --- a/vendor/github.com/docker/docker/client/network_list.go +++ /dev/null @@ -1,31 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" -) - -// NetworkList returns the list of networks configured in the docker host. -func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { - query := url.Values{} - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - var networkResources []types.NetworkResource - resp, err := cli.get(ctx, "/networks", query, nil) - if err != nil { - return networkResources, err - } - err = json.NewDecoder(resp.body).Decode(&networkResources) - ensureReaderClosed(resp) - return networkResources, err -} diff --git a/vendor/github.com/docker/docker/client/network_prune.go b/vendor/github.com/docker/docker/client/network_prune.go deleted file mode 100644 index d546f0c304..0000000000 --- a/vendor/github.com/docker/docker/client/network_prune.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" -) - -// NetworksPrune requests the daemon to delete unused networks -func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (types.NetworksPruneReport, error) { - var report types.NetworksPruneReport - - if err := cli.NewVersionError("1.25", "network prune"); err != nil { - return report, err - } - - query, err := getFiltersQuery(pruneFilters) - if err != nil { - return report, err - } - - serverResp, err := cli.post(ctx, "/networks/prune", query, nil, nil) - if err != nil { - return report, err - } - defer ensureReaderClosed(serverResp) - - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return report, fmt.Errorf("Error retrieving network prune report: %v", err) - } - - return report, nil -} diff --git a/vendor/github.com/docker/docker/client/network_remove.go b/vendor/github.com/docker/docker/client/network_remove.go deleted file mode 100644 index c99de10c83..0000000000 --- a/vendor/github.com/docker/docker/client/network_remove.go +++ /dev/null @@ -1,10 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "golang.org/x/net/context" - -// NetworkRemove removes an existent network from the docker host. -func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { - resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) - ensureReaderClosed(resp) - return wrapResponseError(err, resp, "network", networkID) -} diff --git a/vendor/github.com/docker/docker/client/node_inspect.go b/vendor/github.com/docker/docker/client/node_inspect.go deleted file mode 100644 index 454db9f17b..0000000000 --- a/vendor/github.com/docker/docker/client/node_inspect.go +++ /dev/null @@ -1,29 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "io/ioutil" - - "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" -) - -// NodeInspectWithRaw returns the node information. -func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) { - serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) - if err != nil { - return swarm.Node{}, nil, wrapResponseError(err, serverResp, "node", nodeID) - } - defer ensureReaderClosed(serverResp) - - body, err := ioutil.ReadAll(serverResp.body) - if err != nil { - return swarm.Node{}, nil, err - } - - var response swarm.Node - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/docker/docker/client/node_list.go b/vendor/github.com/docker/docker/client/node_list.go deleted file mode 100644 index c613ad2197..0000000000 --- a/vendor/github.com/docker/docker/client/node_list.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" -) - -// NodeList returns the list of nodes. -func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) { - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/nodes", query, nil) - if err != nil { - return nil, err - } - - var nodes []swarm.Node - err = json.NewDecoder(resp.body).Decode(&nodes) - ensureReaderClosed(resp) - return nodes, err -} diff --git a/vendor/github.com/docker/docker/client/node_remove.go b/vendor/github.com/docker/docker/client/node_remove.go deleted file mode 100644 index 7c1a757d31..0000000000 --- a/vendor/github.com/docker/docker/client/node_remove.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "net/url" - - "github.com/docker/docker/api/types" - - "golang.org/x/net/context" -) - -// NodeRemove removes a Node. -func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error { - query := url.Values{} - if options.Force { - query.Set("force", "1") - } - - resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil) - ensureReaderClosed(resp) - return wrapResponseError(err, resp, "node", nodeID) -} diff --git a/vendor/github.com/docker/docker/client/node_update.go b/vendor/github.com/docker/docker/client/node_update.go deleted file mode 100644 index 0b528a4703..0000000000 --- a/vendor/github.com/docker/docker/client/node_update.go +++ /dev/null @@ -1,18 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "net/url" - "strconv" - - "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" -) - -// NodeUpdate updates a Node. -func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error { - query := url.Values{} - query.Set("version", strconv.FormatUint(version.Index, 10)) - resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, node, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go deleted file mode 100644 index e2366276da..0000000000 --- a/vendor/github.com/docker/docker/client/ping.go +++ /dev/null @@ -1,32 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "path" - - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -// Ping pings the server and returns the value of the "Docker-Experimental", "OS-Type" & "API-Version" headers -func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { - var ping types.Ping - req, err := cli.buildRequest("GET", path.Join(cli.basePath, "/_ping"), nil, nil) - if err != nil { - return ping, err - } - serverResp, err := cli.doRequest(ctx, req) - if err != nil { - return ping, err - } - defer ensureReaderClosed(serverResp) - - if serverResp.header != nil { - ping.APIVersion = serverResp.header.Get("API-Version") - - if serverResp.header.Get("Docker-Experimental") == "true" { - ping.Experimental = true - } - ping.OSType = serverResp.header.Get("OSType") - } - return ping, cli.checkResponseErr(serverResp) -} diff --git a/vendor/github.com/docker/docker/client/plugin_create.go b/vendor/github.com/docker/docker/client/plugin_create.go deleted file mode 100644 index 51ef9c9945..0000000000 --- a/vendor/github.com/docker/docker/client/plugin_create.go +++ /dev/null @@ -1,26 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "io" - "net/http" - "net/url" - - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -// PluginCreate creates a plugin -func (cli *Client) PluginCreate(ctx context.Context, createContext io.Reader, createOptions types.PluginCreateOptions) error { - headers := http.Header(make(map[string][]string)) - headers.Set("Content-Type", "application/x-tar") - - query := url.Values{} - query.Set("name", createOptions.RepoName) - - resp, err := cli.postRaw(ctx, "/plugins/create", query, createContext, headers) - if err != nil { - return err - } - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/plugin_disable.go b/vendor/github.com/docker/docker/client/plugin_disable.go deleted file mode 100644 index 8d8c41d226..0000000000 --- a/vendor/github.com/docker/docker/client/plugin_disable.go +++ /dev/null @@ -1,19 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "net/url" - - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -// PluginDisable disables a plugin -func (cli *Client) PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error { - query := url.Values{} - if options.Force { - query.Set("force", "1") - } - resp, err := cli.post(ctx, "/plugins/"+name+"/disable", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/plugin_enable.go b/vendor/github.com/docker/docker/client/plugin_enable.go deleted file mode 100644 index 1f0f8f7340..0000000000 --- a/vendor/github.com/docker/docker/client/plugin_enable.go +++ /dev/null @@ -1,19 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "net/url" - "strconv" - - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -// PluginEnable enables a plugin -func (cli *Client) PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error { - query := url.Values{} - query.Set("timeout", strconv.Itoa(options.Timeout)) - - resp, err := cli.post(ctx, "/plugins/"+name+"/enable", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/plugin_inspect.go b/vendor/github.com/docker/docker/client/plugin_inspect.go deleted file mode 100644 index 8957a805ec..0000000000 --- a/vendor/github.com/docker/docker/client/plugin_inspect.go +++ /dev/null @@ -1,28 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "io/ioutil" - - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -// PluginInspectWithRaw inspects an existing plugin -func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) { - resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil) - if err != nil { - return nil, nil, wrapResponseError(err, resp, "plugin", name) - } - - defer ensureReaderClosed(resp) - body, err := ioutil.ReadAll(resp.body) - if err != nil { - return nil, nil, err - } - var p types.Plugin - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&p) - return &p, body, err -} diff --git a/vendor/github.com/docker/docker/client/plugin_install.go b/vendor/github.com/docker/docker/client/plugin_install.go deleted file mode 100644 index ae04387d5d..0000000000 --- a/vendor/github.com/docker/docker/client/plugin_install.go +++ /dev/null @@ -1,113 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - "io" - "net/http" - "net/url" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/pkg/errors" - "golang.org/x/net/context" -) - -// PluginInstall installs a plugin -func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { - query := url.Values{} - if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { - return nil, errors.Wrap(err, "invalid remote reference") - } - query.Set("remote", options.RemoteRef) - - privileges, err := cli.checkPluginPermissions(ctx, query, options) - if err != nil { - return nil, err - } - - // set name for plugin pull, if empty should default to remote reference - query.Set("name", name) - - resp, err := cli.tryPluginPull(ctx, query, privileges, options.RegistryAuth) - if err != nil { - return nil, err - } - - name = resp.header.Get("Docker-Plugin-Name") - - pr, pw := io.Pipe() - go func() { // todo: the client should probably be designed more around the actual api - _, err := io.Copy(pw, resp.body) - if err != nil { - pw.CloseWithError(err) - return - } - defer func() { - if err != nil { - delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil) - ensureReaderClosed(delResp) - } - }() - if len(options.Args) > 0 { - if err := cli.PluginSet(ctx, name, options.Args); err != nil { - pw.CloseWithError(err) - return - } - } - - if options.Disabled { - pw.Close() - return - } - - enableErr := cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0}) - pw.CloseWithError(enableErr) - }() - return pr, nil -} - -func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.get(ctx, "/plugins/privileges", query, headers) -} - -func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileges types.PluginPrivileges, registryAuth string) (serverResponse, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.post(ctx, "/plugins/pull", query, privileges, headers) -} - -func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options types.PluginInstallOptions) (types.PluginPrivileges, error) { - resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) - if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { - // todo: do inspect before to check existing name before checking privileges - newAuthHeader, privilegeErr := options.PrivilegeFunc() - if privilegeErr != nil { - ensureReaderClosed(resp) - return nil, privilegeErr - } - options.RegistryAuth = newAuthHeader - resp, err = cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) - } - if err != nil { - ensureReaderClosed(resp) - return nil, err - } - - var privileges types.PluginPrivileges - if err := json.NewDecoder(resp.body).Decode(&privileges); err != nil { - ensureReaderClosed(resp) - return nil, err - } - ensureReaderClosed(resp) - - if !options.AcceptAllPermissions && options.AcceptPermissionsFunc != nil && len(privileges) > 0 { - accept, err := options.AcceptPermissionsFunc(privileges) - if err != nil { - return nil, err - } - if !accept { - return nil, pluginPermissionDenied{options.RemoteRef} - } - } - return privileges, nil -} diff --git a/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/docker/docker/client/plugin_list.go deleted file mode 100644 index e49876621d..0000000000 --- a/vendor/github.com/docker/docker/client/plugin_list.go +++ /dev/null @@ -1,32 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" -) - -// PluginList returns the installed plugins -func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) { - var plugins types.PluginsListResponse - query := url.Values{} - - if filter.Len() > 0 { - filterJSON, err := filters.ToParamWithVersion(cli.version, filter) - if err != nil { - return plugins, err - } - query.Set("filters", filterJSON) - } - resp, err := cli.get(ctx, "/plugins", query, nil) - if err != nil { - return plugins, wrapResponseError(err, resp, "plugin", "") - } - - err = json.NewDecoder(resp.body).Decode(&plugins) - ensureReaderClosed(resp) - return plugins, err -} diff --git a/vendor/github.com/docker/docker/client/plugin_push.go b/vendor/github.com/docker/docker/client/plugin_push.go deleted file mode 100644 index 33b38d843c..0000000000 --- a/vendor/github.com/docker/docker/client/plugin_push.go +++ /dev/null @@ -1,17 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "io" - - "golang.org/x/net/context" -) - -// PluginPush pushes a plugin to a registry -func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, headers) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/plugin_remove.go b/vendor/github.com/docker/docker/client/plugin_remove.go deleted file mode 100644 index b9e7d8e4a8..0000000000 --- a/vendor/github.com/docker/docker/client/plugin_remove.go +++ /dev/null @@ -1,20 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "net/url" - - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -// PluginRemove removes a plugin -func (cli *Client) PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error { - query := url.Values{} - if options.Force { - query.Set("force", "1") - } - - resp, err := cli.delete(ctx, "/plugins/"+name, query, nil) - ensureReaderClosed(resp) - return wrapResponseError(err, resp, "plugin", name) -} diff --git a/vendor/github.com/docker/docker/client/plugin_set.go b/vendor/github.com/docker/docker/client/plugin_set.go deleted file mode 100644 index de72cc0d3a..0000000000 --- a/vendor/github.com/docker/docker/client/plugin_set.go +++ /dev/null @@ -1,12 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "golang.org/x/net/context" -) - -// PluginSet modifies settings for an existing plugin -func (cli *Client) PluginSet(ctx context.Context, name string, args []string) error { - resp, err := cli.post(ctx, "/plugins/"+name+"/set", nil, args, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/plugin_upgrade.go b/vendor/github.com/docker/docker/client/plugin_upgrade.go deleted file mode 100644 index 33b740ca76..0000000000 --- a/vendor/github.com/docker/docker/client/plugin_upgrade.go +++ /dev/null @@ -1,39 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "io" - "net/url" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/pkg/errors" - "golang.org/x/net/context" -) - -// PluginUpgrade upgrades a plugin -func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { - if err := cli.NewVersionError("1.26", "plugin upgrade"); err != nil { - return nil, err - } - query := url.Values{} - if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { - return nil, errors.Wrap(err, "invalid remote reference") - } - query.Set("remote", options.RemoteRef) - - privileges, err := cli.checkPluginPermissions(ctx, query, options) - if err != nil { - return nil, err - } - - resp, err := cli.tryPluginUpgrade(ctx, query, privileges, name, options.RegistryAuth) - if err != nil { - return nil, err - } - return resp.body, nil -} - -func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (serverResponse, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.post(ctx, "/plugins/"+name+"/upgrade", query, privileges, headers) -} diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go deleted file mode 100644 index 986b512dda..0000000000 --- a/vendor/github.com/docker/docker/client/request.go +++ /dev/null @@ -1,262 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "net/url" - "os" - "strings" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/versions" - "github.com/pkg/errors" - "golang.org/x/net/context" - "golang.org/x/net/context/ctxhttp" -) - -// serverResponse is a wrapper for http API responses. -type serverResponse struct { - body io.ReadCloser - header http.Header - statusCode int - reqURL *url.URL -} - -// head sends an http request to the docker API using the method HEAD. -func (cli *Client) head(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { - return cli.sendRequest(ctx, "HEAD", path, query, nil, headers) -} - -// get sends an http request to the docker API using the method GET with a specific Go context. -func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { - return cli.sendRequest(ctx, "GET", path, query, nil, headers) -} - -// post sends an http request to the docker API using the method POST with a specific Go context. -func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { - body, headers, err := encodeBody(obj, headers) - if err != nil { - return serverResponse{}, err - } - return cli.sendRequest(ctx, "POST", path, query, body, headers) -} - -func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { - return cli.sendRequest(ctx, "POST", path, query, body, headers) -} - -// put sends an http request to the docker API using the method PUT. -func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { - body, headers, err := encodeBody(obj, headers) - if err != nil { - return serverResponse{}, err - } - return cli.sendRequest(ctx, "PUT", path, query, body, headers) -} - -// putRaw sends an http request to the docker API using the method PUT. -func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { - return cli.sendRequest(ctx, "PUT", path, query, body, headers) -} - -// delete sends an http request to the docker API using the method DELETE. -func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { - return cli.sendRequest(ctx, "DELETE", path, query, nil, headers) -} - -type headers map[string][]string - -func encodeBody(obj interface{}, headers headers) (io.Reader, headers, error) { - if obj == nil { - return nil, headers, nil - } - - body, err := encodeData(obj) - if err != nil { - return nil, headers, err - } - if headers == nil { - headers = make(map[string][]string) - } - headers["Content-Type"] = []string{"application/json"} - return body, headers, nil -} - -func (cli *Client) buildRequest(method, path string, body io.Reader, headers headers) (*http.Request, error) { - expectedPayload := (method == "POST" || method == "PUT") - if expectedPayload && body == nil { - body = bytes.NewReader([]byte{}) - } - - req, err := http.NewRequest(method, path, body) - if err != nil { - return nil, err - } - req = cli.addHeaders(req, headers) - - if cli.proto == "unix" || cli.proto == "npipe" { - // For local communications, it doesn't matter what the host is. We just - // need a valid and meaningful host name. (See #189) - req.Host = "docker" - } - - req.URL.Host = cli.addr - req.URL.Scheme = cli.scheme - - if expectedPayload && req.Header.Get("Content-Type") == "" { - req.Header.Set("Content-Type", "text/plain") - } - return req, nil -} - -func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers headers) (serverResponse, error) { - req, err := cli.buildRequest(method, cli.getAPIPath(path, query), body, headers) - if err != nil { - return serverResponse{}, err - } - resp, err := cli.doRequest(ctx, req) - if err != nil { - return resp, err - } - if err := cli.checkResponseErr(resp); err != nil { - return resp, err - } - return resp, nil -} - -func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResponse, error) { - serverResp := serverResponse{statusCode: -1, reqURL: req.URL} - - resp, err := ctxhttp.Do(ctx, cli.client, req) - if err != nil { - if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") { - return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err) - } - - if cli.scheme == "https" && strings.Contains(err.Error(), "bad certificate") { - return serverResp, fmt.Errorf("The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings: %v", err) - } - - // Don't decorate context sentinel errors; users may be comparing to - // them directly. - switch err { - case context.Canceled, context.DeadlineExceeded: - return serverResp, err - } - - if nErr, ok := err.(*url.Error); ok { - if nErr, ok := nErr.Err.(*net.OpError); ok { - if os.IsPermission(nErr.Err) { - return serverResp, errors.Wrapf(err, "Got permission denied while trying to connect to the Docker daemon socket at %v", cli.host) - } - } - } - - if err, ok := err.(net.Error); ok { - if err.Timeout() { - return serverResp, ErrorConnectionFailed(cli.host) - } - if !err.Temporary() { - if strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") { - return serverResp, ErrorConnectionFailed(cli.host) - } - } - } - - // Although there's not a strongly typed error for this in go-winio, - // lots of people are using the default configuration for the docker - // daemon on Windows where the daemon is listening on a named pipe - // `//./pipe/docker_engine, and the client must be running elevated. - // Give users a clue rather than the not-overly useful message - // such as `error during connect: Get http://%2F%2F.%2Fpipe%2Fdocker_engine/v1.26/info: - // open //./pipe/docker_engine: The system cannot find the file specified.`. - // Note we can't string compare "The system cannot find the file specified" as - // this is localised - for example in French the error would be - // `open //./pipe/docker_engine: Le fichier spécifié est introuvable.` - if strings.Contains(err.Error(), `open //./pipe/docker_engine`) { - err = errors.New(err.Error() + " In the default daemon configuration on Windows, the docker client must be run elevated to connect. This error may also indicate that the docker daemon is not running.") - } - - return serverResp, errors.Wrap(err, "error during connect") - } - - if resp != nil { - serverResp.statusCode = resp.StatusCode - serverResp.body = resp.Body - serverResp.header = resp.Header - } - return serverResp, nil -} - -func (cli *Client) checkResponseErr(serverResp serverResponse) error { - if serverResp.statusCode >= 200 && serverResp.statusCode < 400 { - return nil - } - - body, err := ioutil.ReadAll(serverResp.body) - if err != nil { - return err - } - if len(body) == 0 { - return fmt.Errorf("request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL) - } - - var ct string - if serverResp.header != nil { - ct = serverResp.header.Get("Content-Type") - } - - var errorMessage string - if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && ct == "application/json" { - var errorResponse types.ErrorResponse - if err := json.Unmarshal(body, &errorResponse); err != nil { - return fmt.Errorf("Error reading JSON: %v", err) - } - errorMessage = errorResponse.Message - } else { - errorMessage = string(body) - } - - return fmt.Errorf("Error response from daemon: %s", strings.TrimSpace(errorMessage)) -} - -func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request { - // Add CLI Config's HTTP Headers BEFORE we set the Docker headers - // then the user can't change OUR headers - for k, v := range cli.customHTTPHeaders { - if versions.LessThan(cli.version, "1.25") && k == "User-Agent" { - continue - } - req.Header.Set(k, v) - } - - if headers != nil { - for k, v := range headers { - req.Header[k] = v - } - } - return req -} - -func encodeData(data interface{}) (*bytes.Buffer, error) { - params := bytes.NewBuffer(nil) - if data != nil { - if err := json.NewEncoder(params).Encode(data); err != nil { - return nil, err - } - } - return params, nil -} - -func ensureReaderClosed(response serverResponse) { - if response.body != nil { - // Drain up to 512 bytes and close the body to let the Transport reuse the connection - io.CopyN(ioutil.Discard, response.body, 512) - response.body.Close() - } -} diff --git a/vendor/github.com/docker/docker/client/secret_create.go b/vendor/github.com/docker/docker/client/secret_create.go deleted file mode 100644 index fa639caf5b..0000000000 --- a/vendor/github.com/docker/docker/client/secret_create.go +++ /dev/null @@ -1,25 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" -) - -// SecretCreate creates a new Secret. -func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) { - var response types.SecretCreateResponse - if err := cli.NewVersionError("1.25", "secret create"); err != nil { - return response, err - } - resp, err := cli.post(ctx, "/secrets/create", nil, secret, nil) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/docker/docker/client/secret_inspect.go deleted file mode 100644 index 6ff7549e5c..0000000000 --- a/vendor/github.com/docker/docker/client/secret_inspect.go +++ /dev/null @@ -1,33 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "io/ioutil" - - "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" -) - -// SecretInspectWithRaw returns the secret information with raw data -func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) { - if err := cli.NewVersionError("1.25", "secret inspect"); err != nil { - return swarm.Secret{}, nil, err - } - resp, err := cli.get(ctx, "/secrets/"+id, nil, nil) - if err != nil { - return swarm.Secret{}, nil, wrapResponseError(err, resp, "secret", id) - } - defer ensureReaderClosed(resp) - - body, err := ioutil.ReadAll(resp.body) - if err != nil { - return swarm.Secret{}, nil, err - } - - var secret swarm.Secret - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&secret) - - return secret, body, err -} diff --git a/vendor/github.com/docker/docker/client/secret_list.go b/vendor/github.com/docker/docker/client/secret_list.go deleted file mode 100644 index adf4d92be2..0000000000 --- a/vendor/github.com/docker/docker/client/secret_list.go +++ /dev/null @@ -1,38 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" -) - -// SecretList returns the list of secrets. -func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { - if err := cli.NewVersionError("1.25", "secret list"); err != nil { - return nil, err - } - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/secrets", query, nil) - if err != nil { - return nil, err - } - - var secrets []swarm.Secret - err = json.NewDecoder(resp.body).Decode(&secrets) - ensureReaderClosed(resp) - return secrets, err -} diff --git a/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/docker/docker/client/secret_remove.go deleted file mode 100644 index 89326e0908..0000000000 --- a/vendor/github.com/docker/docker/client/secret_remove.go +++ /dev/null @@ -1,13 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "golang.org/x/net/context" - -// SecretRemove removes a Secret. -func (cli *Client) SecretRemove(ctx context.Context, id string) error { - if err := cli.NewVersionError("1.25", "secret remove"); err != nil { - return err - } - resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil) - ensureReaderClosed(resp) - return wrapResponseError(err, resp, "secret", id) -} diff --git a/vendor/github.com/docker/docker/client/secret_update.go b/vendor/github.com/docker/docker/client/secret_update.go deleted file mode 100644 index 8efd35c1eb..0000000000 --- a/vendor/github.com/docker/docker/client/secret_update.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "net/url" - "strconv" - - "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" -) - -// SecretUpdate attempts to update a Secret -func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error { - if err := cli.NewVersionError("1.25", "secret update"); err != nil { - return err - } - query := url.Values{} - query.Set("version", strconv.FormatUint(version.Index, 10)) - resp, err := cli.post(ctx, "/secrets/"+id+"/update", query, secret, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go deleted file mode 100644 index cf85ca4a18..0000000000 --- a/vendor/github.com/docker/docker/client/service_create.go +++ /dev/null @@ -1,166 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - "fmt" - "strings" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "golang.org/x/net/context" -) - -// ServiceCreate creates a new Service. -func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) { - var distErr error - - headers := map[string][]string{ - "version": {cli.version}, - } - - if options.EncodedRegistryAuth != "" { - headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth} - } - - // Make sure containerSpec is not nil when no runtime is set or the runtime is set to container - if service.TaskTemplate.ContainerSpec == nil && (service.TaskTemplate.Runtime == "" || service.TaskTemplate.Runtime == swarm.RuntimeContainer) { - service.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{} - } - - if err := validateServiceSpec(service); err != nil { - return types.ServiceCreateResponse{}, err - } - - // ensure that the image is tagged - var imgPlatforms []swarm.Platform - if service.TaskTemplate.ContainerSpec != nil { - if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { - service.TaskTemplate.ContainerSpec.Image = taggedImg - } - if options.QueryRegistry { - var img string - img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.ContainerSpec.Image, options.EncodedRegistryAuth) - if img != "" { - service.TaskTemplate.ContainerSpec.Image = img - } - } - } - - // ensure that the image is tagged - if service.TaskTemplate.PluginSpec != nil { - if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" { - service.TaskTemplate.PluginSpec.Remote = taggedImg - } - if options.QueryRegistry { - var img string - img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.PluginSpec.Remote, options.EncodedRegistryAuth) - if img != "" { - service.TaskTemplate.PluginSpec.Remote = img - } - } - } - - if service.TaskTemplate.Placement == nil && len(imgPlatforms) > 0 { - service.TaskTemplate.Placement = &swarm.Placement{} - } - if len(imgPlatforms) > 0 { - service.TaskTemplate.Placement.Platforms = imgPlatforms - } - - var response types.ServiceCreateResponse - resp, err := cli.post(ctx, "/services/create", nil, service, headers) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - - if distErr != nil { - response.Warnings = append(response.Warnings, digestWarning(service.TaskTemplate.ContainerSpec.Image)) - } - - ensureReaderClosed(resp) - return response, err -} - -func imageDigestAndPlatforms(ctx context.Context, cli DistributionAPIClient, image, encodedAuth string) (string, []swarm.Platform, error) { - distributionInspect, err := cli.DistributionInspect(ctx, image, encodedAuth) - var platforms []swarm.Platform - if err != nil { - return "", nil, err - } - - imageWithDigest := imageWithDigestString(image, distributionInspect.Descriptor.Digest) - - if len(distributionInspect.Platforms) > 0 { - platforms = make([]swarm.Platform, 0, len(distributionInspect.Platforms)) - for _, p := range distributionInspect.Platforms { - // clear architecture field for arm. This is a temporary patch to address - // https://github.com/docker/swarmkit/issues/2294. The issue is that while - // image manifests report "arm" as the architecture, the node reports - // something like "armv7l" (includes the variant), which causes arm images - // to stop working with swarm mode. This patch removes the architecture - // constraint for arm images to ensure tasks get scheduled. - arch := p.Architecture - if strings.ToLower(arch) == "arm" { - arch = "" - } - platforms = append(platforms, swarm.Platform{ - Architecture: arch, - OS: p.OS, - }) - } - } - return imageWithDigest, platforms, err -} - -// imageWithDigestString takes an image string and a digest, and updates -// the image string if it didn't originally contain a digest. It returns -// an empty string if there are no updates. -func imageWithDigestString(image string, dgst digest.Digest) string { - namedRef, err := reference.ParseNormalizedNamed(image) - if err == nil { - if _, isCanonical := namedRef.(reference.Canonical); !isCanonical { - // ensure that image gets a default tag if none is provided - img, err := reference.WithDigest(namedRef, dgst) - if err == nil { - return reference.FamiliarString(img) - } - } - } - return "" -} - -// imageWithTagString takes an image string, and returns a tagged image -// string, adding a 'latest' tag if one was not provided. It returns an -// emptry string if a canonical reference was provided -func imageWithTagString(image string) string { - namedRef, err := reference.ParseNormalizedNamed(image) - if err == nil { - return reference.FamiliarString(reference.TagNameOnly(namedRef)) - } - return "" -} - -// digestWarning constructs a formatted warning string using the -// image name that could not be pinned by digest. The formatting -// is hardcoded, but could me made smarter in the future -func digestWarning(image string) string { - return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image) -} - -func validateServiceSpec(s swarm.ServiceSpec) error { - if s.TaskTemplate.ContainerSpec != nil && s.TaskTemplate.PluginSpec != nil { - return errors.New("must not specify both a container spec and a plugin spec in the task template") - } - if s.TaskTemplate.PluginSpec != nil && s.TaskTemplate.Runtime != swarm.RuntimePlugin { - return errors.New("mismatched runtime with plugin spec") - } - if s.TaskTemplate.ContainerSpec != nil && (s.TaskTemplate.Runtime != "" && s.TaskTemplate.Runtime != swarm.RuntimeContainer) { - return errors.New("mismatched runtime with container spec") - } - return nil -} diff --git a/vendor/github.com/docker/docker/client/service_inspect.go b/vendor/github.com/docker/docker/client/service_inspect.go deleted file mode 100644 index 67bcc9cfaf..0000000000 --- a/vendor/github.com/docker/docker/client/service_inspect.go +++ /dev/null @@ -1,34 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" -) - -// ServiceInspectWithRaw returns the service information and the raw data. -func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, opts types.ServiceInspectOptions) (swarm.Service, []byte, error) { - query := url.Values{} - query.Set("insertDefaults", fmt.Sprintf("%v", opts.InsertDefaults)) - serverResp, err := cli.get(ctx, "/services/"+serviceID, query, nil) - if err != nil { - return swarm.Service{}, nil, wrapResponseError(err, serverResp, "service", serviceID) - } - defer ensureReaderClosed(serverResp) - - body, err := ioutil.ReadAll(serverResp.body) - if err != nil { - return swarm.Service{}, nil, err - } - - var response swarm.Service - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/docker/docker/client/service_list.go b/vendor/github.com/docker/docker/client/service_list.go deleted file mode 100644 index fdb33cc221..0000000000 --- a/vendor/github.com/docker/docker/client/service_list.go +++ /dev/null @@ -1,35 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" -) - -// ServiceList returns the list of services. -func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/services", query, nil) - if err != nil { - return nil, err - } - - var services []swarm.Service - err = json.NewDecoder(resp.body).Decode(&services) - ensureReaderClosed(resp) - return services, err -} diff --git a/vendor/github.com/docker/docker/client/service_logs.go b/vendor/github.com/docker/docker/client/service_logs.go deleted file mode 100644 index 4b393bb494..0000000000 --- a/vendor/github.com/docker/docker/client/service_logs.go +++ /dev/null @@ -1,52 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "io" - "net/url" - "time" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - timetypes "github.com/docker/docker/api/types/time" -) - -// ServiceLogs returns the logs generated by a service in an io.ReadCloser. -// It's up to the caller to close the stream. -func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { - query := url.Values{} - if options.ShowStdout { - query.Set("stdout", "1") - } - - if options.ShowStderr { - query.Set("stderr", "1") - } - - if options.Since != "" { - ts, err := timetypes.GetTimestamp(options.Since, time.Now()) - if err != nil { - return nil, err - } - query.Set("since", ts) - } - - if options.Timestamps { - query.Set("timestamps", "1") - } - - if options.Details { - query.Set("details", "1") - } - - if options.Follow { - query.Set("follow", "1") - } - query.Set("tail", options.Tail) - - resp, err := cli.get(ctx, "/services/"+serviceID+"/logs", query, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/service_remove.go b/vendor/github.com/docker/docker/client/service_remove.go deleted file mode 100644 index 7ef04e8204..0000000000 --- a/vendor/github.com/docker/docker/client/service_remove.go +++ /dev/null @@ -1,10 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import "golang.org/x/net/context" - -// ServiceRemove kills and removes a service. -func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error { - resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil) - ensureReaderClosed(resp) - return wrapResponseError(err, resp, "service", serviceID) -} diff --git a/vendor/github.com/docker/docker/client/service_update.go b/vendor/github.com/docker/docker/client/service_update.go deleted file mode 100644 index 57cb45d8d2..0000000000 --- a/vendor/github.com/docker/docker/client/service_update.go +++ /dev/null @@ -1,92 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - "net/url" - "strconv" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" -) - -// ServiceUpdate updates a Service. -func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { - var ( - query = url.Values{} - distErr error - ) - - headers := map[string][]string{ - "version": {cli.version}, - } - - if options.EncodedRegistryAuth != "" { - headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth} - } - - if options.RegistryAuthFrom != "" { - query.Set("registryAuthFrom", options.RegistryAuthFrom) - } - - if options.Rollback != "" { - query.Set("rollback", options.Rollback) - } - - query.Set("version", strconv.FormatUint(version.Index, 10)) - - if err := validateServiceSpec(service); err != nil { - return types.ServiceUpdateResponse{}, err - } - - var imgPlatforms []swarm.Platform - // ensure that the image is tagged - if service.TaskTemplate.ContainerSpec != nil { - if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { - service.TaskTemplate.ContainerSpec.Image = taggedImg - } - if options.QueryRegistry { - var img string - img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.ContainerSpec.Image, options.EncodedRegistryAuth) - if img != "" { - service.TaskTemplate.ContainerSpec.Image = img - } - } - } - - // ensure that the image is tagged - if service.TaskTemplate.PluginSpec != nil { - if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" { - service.TaskTemplate.PluginSpec.Remote = taggedImg - } - if options.QueryRegistry { - var img string - img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.PluginSpec.Remote, options.EncodedRegistryAuth) - if img != "" { - service.TaskTemplate.PluginSpec.Remote = img - } - } - } - - if service.TaskTemplate.Placement == nil && len(imgPlatforms) > 0 { - service.TaskTemplate.Placement = &swarm.Placement{} - } - if len(imgPlatforms) > 0 { - service.TaskTemplate.Placement.Platforms = imgPlatforms - } - - var response types.ServiceUpdateResponse - resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - - if distErr != nil { - response.Warnings = append(response.Warnings, digestWarning(service.TaskTemplate.ContainerSpec.Image)) - } - - ensureReaderClosed(resp) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/session.go b/vendor/github.com/docker/docker/client/session.go deleted file mode 100644 index b136538c22..0000000000 --- a/vendor/github.com/docker/docker/client/session.go +++ /dev/null @@ -1,19 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "net" - "net/http" - - "golang.org/x/net/context" -) - -// DialSession returns a connection that can be used communication with daemon -func (cli *Client) DialSession(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) { - req, err := http.NewRequest("POST", "/session", nil) - if err != nil { - return nil, err - } - req = cli.addHeaders(req, meta) - - return cli.setupHijackConn(req, proto) -} diff --git a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go deleted file mode 100644 index c02ae35c6d..0000000000 --- a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -// SwarmGetUnlockKey retrieves the swarm's unlock key. -func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) { - serverResp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil) - if err != nil { - return types.SwarmUnlockKeyResponse{}, err - } - - var response types.SwarmUnlockKeyResponse - err = json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/swarm_init.go b/vendor/github.com/docker/docker/client/swarm_init.go deleted file mode 100644 index 2997836317..0000000000 --- a/vendor/github.com/docker/docker/client/swarm_init.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - - "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" -) - -// SwarmInit initializes the swarm. -func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) { - serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil) - if err != nil { - return "", err - } - - var response string - err = json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/swarm_inspect.go b/vendor/github.com/docker/docker/client/swarm_inspect.go deleted file mode 100644 index 02f42aa8a9..0000000000 --- a/vendor/github.com/docker/docker/client/swarm_inspect.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - - "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" -) - -// SwarmInspect inspects the swarm. -func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) { - serverResp, err := cli.get(ctx, "/swarm", nil, nil) - if err != nil { - return swarm.Swarm{}, err - } - - var response swarm.Swarm - err = json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) - return response, err -} diff --git a/vendor/github.com/docker/docker/client/swarm_join.go b/vendor/github.com/docker/docker/client/swarm_join.go deleted file mode 100644 index 13f8d79d6c..0000000000 --- a/vendor/github.com/docker/docker/client/swarm_join.go +++ /dev/null @@ -1,13 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" -) - -// SwarmJoin joins the swarm. -func (cli *Client) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error { - resp, err := cli.post(ctx, "/swarm/join", nil, req, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/swarm_leave.go b/vendor/github.com/docker/docker/client/swarm_leave.go deleted file mode 100644 index d0d00cbb4a..0000000000 --- a/vendor/github.com/docker/docker/client/swarm_leave.go +++ /dev/null @@ -1,18 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "net/url" - - "golang.org/x/net/context" -) - -// SwarmLeave leaves the swarm. -func (cli *Client) SwarmLeave(ctx context.Context, force bool) error { - query := url.Values{} - if force { - query.Set("force", "1") - } - resp, err := cli.post(ctx, "/swarm/leave", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/swarm_unlock.go b/vendor/github.com/docker/docker/client/swarm_unlock.go deleted file mode 100644 index 44a26bcc95..0000000000 --- a/vendor/github.com/docker/docker/client/swarm_unlock.go +++ /dev/null @@ -1,13 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" -) - -// SwarmUnlock unlocks locked swarm. -func (cli *Client) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error { - serverResp, err := cli.post(ctx, "/swarm/unlock", nil, req, nil) - ensureReaderClosed(serverResp) - return err -} diff --git a/vendor/github.com/docker/docker/client/swarm_update.go b/vendor/github.com/docker/docker/client/swarm_update.go deleted file mode 100644 index df6f1b0dc5..0000000000 --- a/vendor/github.com/docker/docker/client/swarm_update.go +++ /dev/null @@ -1,22 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "fmt" - "net/url" - "strconv" - - "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" -) - -// SwarmUpdate updates the swarm. -func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error { - query := url.Values{} - query.Set("version", strconv.FormatUint(version.Index, 10)) - query.Set("rotateWorkerToken", fmt.Sprintf("%v", flags.RotateWorkerToken)) - query.Set("rotateManagerToken", fmt.Sprintf("%v", flags.RotateManagerToken)) - query.Set("rotateManagerUnlockKey", fmt.Sprintf("%v", flags.RotateManagerUnlockKey)) - resp, err := cli.post(ctx, "/swarm/update", query, swarm, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/github.com/docker/docker/client/task_inspect.go b/vendor/github.com/docker/docker/client/task_inspect.go deleted file mode 100644 index a7b31219a6..0000000000 --- a/vendor/github.com/docker/docker/client/task_inspect.go +++ /dev/null @@ -1,29 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "io/ioutil" - - "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" -) - -// TaskInspectWithRaw returns the task information and its raw representation.. -func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) { - serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) - if err != nil { - return swarm.Task{}, nil, wrapResponseError(err, serverResp, "task", taskID) - } - defer ensureReaderClosed(serverResp) - - body, err := ioutil.ReadAll(serverResp.body) - if err != nil { - return swarm.Task{}, nil, err - } - - var response swarm.Task - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/github.com/docker/docker/client/task_list.go b/vendor/github.com/docker/docker/client/task_list.go deleted file mode 100644 index e56718012c..0000000000 --- a/vendor/github.com/docker/docker/client/task_list.go +++ /dev/null @@ -1,35 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" -) - -// TaskList returns the list of tasks. -func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) { - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToJSON(options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/tasks", query, nil) - if err != nil { - return nil, err - } - - var tasks []swarm.Task - err = json.NewDecoder(resp.body).Decode(&tasks) - ensureReaderClosed(resp) - return tasks, err -} diff --git a/vendor/github.com/docker/docker/client/task_logs.go b/vendor/github.com/docker/docker/client/task_logs.go deleted file mode 100644 index cf0b3f6bba..0000000000 --- a/vendor/github.com/docker/docker/client/task_logs.go +++ /dev/null @@ -1,52 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "io" - "net/url" - "time" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types" - timetypes "github.com/docker/docker/api/types/time" -) - -// TaskLogs returns the logs generated by a task in an io.ReadCloser. -// It's up to the caller to close the stream. -func (cli *Client) TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { - query := url.Values{} - if options.ShowStdout { - query.Set("stdout", "1") - } - - if options.ShowStderr { - query.Set("stderr", "1") - } - - if options.Since != "" { - ts, err := timetypes.GetTimestamp(options.Since, time.Now()) - if err != nil { - return nil, err - } - query.Set("since", ts) - } - - if options.Timestamps { - query.Set("timestamps", "1") - } - - if options.Details { - query.Set("details", "1") - } - - if options.Follow { - query.Set("follow", "1") - } - query.Set("tail", options.Tail) - - resp, err := cli.get(ctx, "/tasks/"+taskID+"/logs", query, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/github.com/docker/docker/client/tlsconfig_clone.go b/vendor/github.com/docker/docker/client/tlsconfig_clone.go deleted file mode 100644 index 88200e92c3..0000000000 --- a/vendor/github.com/docker/docker/client/tlsconfig_clone.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build go1.8 - -package client // import "github.com/docker/docker/client" - -import "crypto/tls" - -// tlsConfigClone returns a clone of tls.Config. This function is provided for -// compatibility for go1.7 that doesn't include this method in stdlib. -func tlsConfigClone(c *tls.Config) *tls.Config { - return c.Clone() -} diff --git a/vendor/github.com/docker/docker/client/tlsconfig_clone_go17.go b/vendor/github.com/docker/docker/client/tlsconfig_clone_go17.go deleted file mode 100644 index e298542367..0000000000 --- a/vendor/github.com/docker/docker/client/tlsconfig_clone_go17.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build go1.7,!go1.8 - -package client // import "github.com/docker/docker/client" - -import "crypto/tls" - -// tlsConfigClone returns a clone of tls.Config. This function is provided for -// compatibility for go1.7 that doesn't include this method in stdlib. -func tlsConfigClone(c *tls.Config) *tls.Config { - return &tls.Config{ - Rand: c.Rand, - Time: c.Time, - Certificates: c.Certificates, - NameToCertificate: c.NameToCertificate, - GetCertificate: c.GetCertificate, - RootCAs: c.RootCAs, - NextProtos: c.NextProtos, - ServerName: c.ServerName, - ClientAuth: c.ClientAuth, - ClientCAs: c.ClientCAs, - InsecureSkipVerify: c.InsecureSkipVerify, - CipherSuites: c.CipherSuites, - PreferServerCipherSuites: c.PreferServerCipherSuites, - SessionTicketsDisabled: c.SessionTicketsDisabled, - SessionTicketKey: c.SessionTicketKey, - ClientSessionCache: c.ClientSessionCache, - MinVersion: c.MinVersion, - MaxVersion: c.MaxVersion, - CurvePreferences: c.CurvePreferences, - DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, - Renegotiation: c.Renegotiation, - } -} diff --git a/vendor/github.com/docker/docker/client/transport.go b/vendor/github.com/docker/docker/client/transport.go deleted file mode 100644 index 5541344366..0000000000 --- a/vendor/github.com/docker/docker/client/transport.go +++ /dev/null @@ -1,17 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "crypto/tls" - "net/http" -) - -// resolveTLSConfig attempts to resolve the TLS configuration from the -// RoundTripper. -func resolveTLSConfig(transport http.RoundTripper) *tls.Config { - switch tr := transport.(type) { - case *http.Transport: - return tr.TLSClientConfig - default: - return nil - } -} diff --git a/vendor/github.com/docker/docker/client/utils.go b/vendor/github.com/docker/docker/client/utils.go deleted file mode 100644 index 7f3ff44eb8..0000000000 --- a/vendor/github.com/docker/docker/client/utils.go +++ /dev/null @@ -1,34 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "net/url" - "regexp" - - "github.com/docker/docker/api/types/filters" -) - -var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`) - -// getDockerOS returns the operating system based on the server header from the daemon. -func getDockerOS(serverHeader string) string { - var osType string - matches := headerRegexp.FindStringSubmatch(serverHeader) - if len(matches) > 0 { - osType = matches[1] - } - return osType -} - -// getFiltersQuery returns a url query with "filters" query term, based on the -// filters provided. -func getFiltersQuery(f filters.Args) (url.Values, error) { - query := url.Values{} - if f.Len() > 0 { - filterJSON, err := filters.ToJSON(f) - if err != nil { - return query, err - } - query.Set("filters", filterJSON) - } - return query, nil -} diff --git a/vendor/github.com/docker/docker/client/version.go b/vendor/github.com/docker/docker/client/version.go deleted file mode 100644 index 4bb60fab58..0000000000 --- a/vendor/github.com/docker/docker/client/version.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -// ServerVersion returns information of the docker client and server host. -func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) { - resp, err := cli.get(ctx, "/version", nil, nil) - if err != nil { - return types.Version{}, err - } - - var server types.Version - err = json.NewDecoder(resp.body).Decode(&server) - ensureReaderClosed(resp) - return server, err -} diff --git a/vendor/github.com/docker/docker/client/volume_create.go b/vendor/github.com/docker/docker/client/volume_create.go deleted file mode 100644 index 8108b7b92f..0000000000 --- a/vendor/github.com/docker/docker/client/volume_create.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - - "github.com/docker/docker/api/types" - volumetypes "github.com/docker/docker/api/types/volume" - "golang.org/x/net/context" -) - -// VolumeCreate creates a volume in the docker host. -func (cli *Client) VolumeCreate(ctx context.Context, options volumetypes.VolumesCreateBody) (types.Volume, error) { - var volume types.Volume - resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) - if err != nil { - return volume, err - } - err = json.NewDecoder(resp.body).Decode(&volume) - ensureReaderClosed(resp) - return volume, err -} diff --git a/vendor/github.com/docker/docker/client/volume_inspect.go b/vendor/github.com/docker/docker/client/volume_inspect.go deleted file mode 100644 index b72e64d793..0000000000 --- a/vendor/github.com/docker/docker/client/volume_inspect.go +++ /dev/null @@ -1,42 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "path" - - "github.com/docker/docker/api/types" - "golang.org/x/net/context" -) - -// VolumeInspect returns the information about a specific volume in the docker host. -func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) { - volume, _, err := cli.VolumeInspectWithRaw(ctx, volumeID) - return volume, err -} - -// VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation -func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) { - // The empty ID needs to be handled here because with an empty ID the - // request url will not contain a trailing / which calls the volume list API - // instead of volume inspect - if volumeID == "" { - return types.Volume{}, nil, objectNotFoundError{object: "volume", id: volumeID} - } - - var volume types.Volume - resp, err := cli.get(ctx, path.Join("/volumes", volumeID), nil, nil) - if err != nil { - return volume, nil, wrapResponseError(err, resp, "volume", volumeID) - } - defer ensureReaderClosed(resp) - - body, err := ioutil.ReadAll(resp.body) - if err != nil { - return volume, nil, err - } - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&volume) - return volume, body, err -} diff --git a/vendor/github.com/docker/docker/client/volume_list.go b/vendor/github.com/docker/docker/client/volume_list.go deleted file mode 100644 index aeca1cc1a3..0000000000 --- a/vendor/github.com/docker/docker/client/volume_list.go +++ /dev/null @@ -1,32 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - "net/url" - - "github.com/docker/docker/api/types/filters" - volumetypes "github.com/docker/docker/api/types/volume" - "golang.org/x/net/context" -) - -// VolumeList returns the volumes configured in the docker host. -func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error) { - var volumes volumetypes.VolumesListOKBody - query := url.Values{} - - if filter.Len() > 0 { - filterJSON, err := filters.ToParamWithVersion(cli.version, filter) - if err != nil { - return volumes, err - } - query.Set("filters", filterJSON) - } - resp, err := cli.get(ctx, "/volumes", query, nil) - if err != nil { - return volumes, err - } - - err = json.NewDecoder(resp.body).Decode(&volumes) - ensureReaderClosed(resp) - return volumes, err -} diff --git a/vendor/github.com/docker/docker/client/volume_prune.go b/vendor/github.com/docker/docker/client/volume_prune.go deleted file mode 100644 index 090a6a66df..0000000000 --- a/vendor/github.com/docker/docker/client/volume_prune.go +++ /dev/null @@ -1,36 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "encoding/json" - "fmt" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" -) - -// VolumesPrune requests the daemon to delete unused data -func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (types.VolumesPruneReport, error) { - var report types.VolumesPruneReport - - if err := cli.NewVersionError("1.25", "volume prune"); err != nil { - return report, err - } - - query, err := getFiltersQuery(pruneFilters) - if err != nil { - return report, err - } - - serverResp, err := cli.post(ctx, "/volumes/prune", query, nil, nil) - if err != nil { - return report, err - } - defer ensureReaderClosed(serverResp) - - if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { - return report, fmt.Errorf("Error retrieving volume prune report: %v", err) - } - - return report, nil -} diff --git a/vendor/github.com/docker/docker/client/volume_remove.go b/vendor/github.com/docker/docker/client/volume_remove.go deleted file mode 100644 index 02ee573a73..0000000000 --- a/vendor/github.com/docker/docker/client/volume_remove.go +++ /dev/null @@ -1,21 +0,0 @@ -package client // import "github.com/docker/docker/client" - -import ( - "net/url" - - "github.com/docker/docker/api/types/versions" - "golang.org/x/net/context" -) - -// VolumeRemove removes a volume from the docker host. -func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool) error { - query := url.Values{} - if versions.GreaterThanOrEqualTo(cli.version, "1.25") { - if force { - query.Set("force", "1") - } - } - resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil) - ensureReaderClosed(resp) - return wrapResponseError(err, resp, "volume", volumeID) -} diff --git a/vendor/github.com/docker/docker/container/archive.go b/vendor/github.com/docker/docker/container/archive.go new file mode 100644 index 0000000000..ed72c4a405 --- /dev/null +++ b/vendor/github.com/docker/docker/container/archive.go @@ -0,0 +1,86 @@ +package container // import "github.com/docker/docker/container" + +import ( + "os" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/system" + "github.com/pkg/errors" +) + +// ResolvePath resolves the given path in the container to a resource on the +// host. Returns a resolved path (absolute path to the resource on the host), +// the absolute path to the resource relative to the container's rootfs, and +// an error if the path points to outside the container's rootfs. +func (container *Container) ResolvePath(path string) (resolvedPath, absPath string, err error) { + if container.BaseFS == nil { + return "", "", errors.New("ResolvePath: BaseFS of container " + container.ID + " is unexpectedly nil") + } + // Check if a drive letter supplied, it must be the system drive. No-op except on Windows + path, err = system.CheckSystemDriveAndRemoveDriveLetter(path, container.BaseFS) + if err != nil { + return "", "", err + } + + // Consider the given path as an absolute path in the container. + absPath = archive.PreserveTrailingDotOrSeparator( + container.BaseFS.Join(string(container.BaseFS.Separator()), path), + path, + container.BaseFS.Separator()) + + // Split the absPath into its Directory and Base components. We will + // resolve the dir in the scope of the container then append the base. + dirPath, basePath := container.BaseFS.Split(absPath) + + resolvedDirPath, err := container.GetResourcePath(dirPath) + if err != nil { + return "", "", err + } + + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(container.BaseFS.Separator()) + basePath + return resolvedPath, absPath, nil +} + +// StatPath is the unexported version of StatPath. Locks and mounts should +// be acquired before calling this method and the given path should be fully +// resolved to a path on the host corresponding to the given absolute path +// inside the container. +func (container *Container) StatPath(resolvedPath, absPath string) (stat *types.ContainerPathStat, err error) { + if container.BaseFS == nil { + return nil, errors.New("StatPath: BaseFS of container " + container.ID + " is unexpectedly nil") + } + driver := container.BaseFS + + lstat, err := driver.Lstat(resolvedPath) + if err != nil { + return nil, err + } + + var linkTarget string + if lstat.Mode()&os.ModeSymlink != 0 { + // Fully evaluate the symlink in the scope of the container rootfs. + hostPath, err := container.GetResourcePath(absPath) + if err != nil { + return nil, err + } + + linkTarget, err = driver.Rel(driver.Path(), hostPath) + if err != nil { + return nil, err + } + + // Make it an absolute path. + linkTarget = driver.Join(string(driver.Separator()), linkTarget) + } + + return &types.ContainerPathStat{ + Name: driver.Base(absPath), + Size: lstat.Size(), + Mode: lstat.Mode(), + Mtime: lstat.ModTime(), + LinkTarget: linkTarget, + }, nil +} diff --git a/vendor/github.com/docker/docker/container/container.go b/vendor/github.com/docker/docker/container/container.go new file mode 100644 index 0000000000..5a6a9255be --- /dev/null +++ b/vendor/github.com/docker/docker/container/container.go @@ -0,0 +1,1095 @@ +package container // import "github.com/docker/docker/container" + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "net" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/containerd/containerd/cio" + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + networktypes "github.com/docker/docker/api/types/network" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/container/stream" + "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/jsonfilelog" + "github.com/docker/docker/daemon/network" + "github.com/docker/docker/image" + "github.com/docker/docker/layer" + "github.com/docker/docker/opts" + "github.com/docker/docker/pkg/containerfs" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/signal" + "github.com/docker/docker/pkg/symlink" + "github.com/docker/docker/pkg/system" + "github.com/docker/docker/restartmanager" + "github.com/docker/docker/runconfig" + "github.com/docker/docker/volume" + volumemounts "github.com/docker/docker/volume/mounts" + "github.com/docker/go-connections/nat" + units "github.com/docker/go-units" + "github.com/docker/libnetwork" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/options" + "github.com/docker/libnetwork/types" + agentexec "github.com/docker/swarmkit/agent/exec" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const configFileName = "config.v2.json" + +var ( + errInvalidEndpoint = errors.New("invalid endpoint while building port map info") + errInvalidNetwork = errors.New("invalid network settings while building port map info") +) + +// ExitStatus provides exit reasons for a container. +type ExitStatus struct { + // The exit code with which the container exited. + ExitCode int + + // Whether the container encountered an OOM. + OOMKilled bool + + // Time at which the container died + ExitedAt time.Time +} + +// Container holds the structure defining a container object. +type Container struct { + StreamConfig *stream.Config + // embed for Container to support states directly. + *State `json:"State"` // Needed for Engine API version <= 1.11 + Root string `json:"-"` // Path to the "home" of the container, including metadata. + BaseFS containerfs.ContainerFS `json:"-"` // interface containing graphdriver mount + RWLayer layer.RWLayer `json:"-"` + ID string + Created time.Time + Managed bool + Path string + Args []string + Config *containertypes.Config + ImageID image.ID `json:"Image"` + NetworkSettings *network.Settings + LogPath string + Name string + Driver string + OS string + // MountLabel contains the options for the 'mount' command + MountLabel string + ProcessLabel string + RestartCount int + HasBeenStartedBefore bool + HasBeenManuallyStopped bool // used for unless-stopped restart policy + MountPoints map[string]*volumemounts.MountPoint + HostConfig *containertypes.HostConfig `json:"-"` // do not serialize the host config in the json, otherwise we'll make the container unportable + ExecCommands *exec.Store `json:"-"` + DependencyStore agentexec.DependencyGetter `json:"-"` + SecretReferences []*swarmtypes.SecretReference + ConfigReferences []*swarmtypes.ConfigReference + // logDriver for closing + LogDriver logger.Logger `json:"-"` + LogCopier *logger.Copier `json:"-"` + restartManager restartmanager.RestartManager + attachContext *attachContext + + // Fields here are specific to Unix platforms + AppArmorProfile string + HostnamePath string + HostsPath string + ShmPath string + ResolvConfPath string + SeccompProfile string + NoNewPrivileges bool + + // Fields here are specific to Windows + NetworkSharedContainerID string `json:"-"` + SharedEndpointList []string `json:"-"` +} + +// NewBaseContainer creates a new container with its +// basic configuration. +func NewBaseContainer(id, root string) *Container { + return &Container{ + ID: id, + State: NewState(), + ExecCommands: exec.NewStore(), + Root: root, + MountPoints: make(map[string]*volumemounts.MountPoint), + StreamConfig: stream.NewConfig(), + attachContext: &attachContext{}, + } +} + +// FromDisk loads the container configuration stored in the host. +func (container *Container) FromDisk() error { + pth, err := container.ConfigPath() + if err != nil { + return err + } + + jsonSource, err := os.Open(pth) + if err != nil { + return err + } + defer jsonSource.Close() + + dec := json.NewDecoder(jsonSource) + + // Load container settings + if err := dec.Decode(container); err != nil { + return err + } + + // Ensure the operating system is set if blank. Assume it is the OS of the + // host OS if not, to ensure containers created before multiple-OS + // support are migrated + if container.OS == "" { + container.OS = runtime.GOOS + } + + return container.readHostConfig() +} + +// toDisk saves the container configuration on disk and returns a deep copy. +func (container *Container) toDisk() (*Container, error) { + var ( + buf bytes.Buffer + deepCopy Container + ) + pth, err := container.ConfigPath() + if err != nil { + return nil, err + } + + // Save container settings + f, err := ioutils.NewAtomicFileWriter(pth, 0600) + if err != nil { + return nil, err + } + defer f.Close() + + w := io.MultiWriter(&buf, f) + if err := json.NewEncoder(w).Encode(container); err != nil { + return nil, err + } + + if err := json.NewDecoder(&buf).Decode(&deepCopy); err != nil { + return nil, err + } + deepCopy.HostConfig, err = container.WriteHostConfig() + if err != nil { + return nil, err + } + + return &deepCopy, nil +} + +// CheckpointTo makes the Container's current state visible to queries, and persists state. +// Callers must hold a Container lock. +func (container *Container) CheckpointTo(store ViewDB) error { + deepCopy, err := container.toDisk() + if err != nil { + return err + } + return store.Save(deepCopy) +} + +// readHostConfig reads the host configuration from disk for the container. +func (container *Container) readHostConfig() error { + container.HostConfig = &containertypes.HostConfig{} + // If the hostconfig file does not exist, do not read it. + // (We still have to initialize container.HostConfig, + // but that's OK, since we just did that above.) + pth, err := container.HostConfigPath() + if err != nil { + return err + } + + f, err := os.Open(pth) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + defer f.Close() + + if err := json.NewDecoder(f).Decode(&container.HostConfig); err != nil { + return err + } + + container.InitDNSHostConfig() + + return nil +} + +// WriteHostConfig saves the host configuration on disk for the container, +// and returns a deep copy of the saved object. Callers must hold a Container lock. +func (container *Container) WriteHostConfig() (*containertypes.HostConfig, error) { + var ( + buf bytes.Buffer + deepCopy containertypes.HostConfig + ) + + pth, err := container.HostConfigPath() + if err != nil { + return nil, err + } + + f, err := ioutils.NewAtomicFileWriter(pth, 0644) + if err != nil { + return nil, err + } + defer f.Close() + + w := io.MultiWriter(&buf, f) + if err := json.NewEncoder(w).Encode(&container.HostConfig); err != nil { + return nil, err + } + + if err := json.NewDecoder(&buf).Decode(&deepCopy); err != nil { + return nil, err + } + return &deepCopy, nil +} + +// SetupWorkingDirectory sets up the container's working directory as set in container.Config.WorkingDir +func (container *Container) SetupWorkingDirectory(rootIDs idtools.IDPair) error { + // TODO @jhowardmsft, @gupta-ak LCOW Support. This will need revisiting. + // We will need to do remote filesystem operations here. + if container.OS != runtime.GOOS { + return nil + } + + if container.Config.WorkingDir == "" { + return nil + } + + container.Config.WorkingDir = filepath.Clean(container.Config.WorkingDir) + pth, err := container.GetResourcePath(container.Config.WorkingDir) + if err != nil { + return err + } + + if err := idtools.MkdirAllAndChownNew(pth, 0755, rootIDs); err != nil { + pthInfo, err2 := os.Stat(pth) + if err2 == nil && pthInfo != nil && !pthInfo.IsDir() { + return errors.Errorf("Cannot mkdir: %s is not a directory", container.Config.WorkingDir) + } + + return err + } + + return nil +} + +// GetResourcePath evaluates `path` in the scope of the container's BaseFS, with proper path +// sanitisation. Symlinks are all scoped to the BaseFS of the container, as +// though the container's BaseFS was `/`. +// +// The BaseFS of a container is the host-facing path which is bind-mounted as +// `/` inside the container. This method is essentially used to access a +// particular path inside the container as though you were a process in that +// container. +// +// NOTE: The returned path is *only* safely scoped inside the container's BaseFS +// if no component of the returned path changes (such as a component +// symlinking to a different path) between using this method and using the +// path. See symlink.FollowSymlinkInScope for more details. +func (container *Container) GetResourcePath(path string) (string, error) { + if container.BaseFS == nil { + return "", errors.New("GetResourcePath: BaseFS of container " + container.ID + " is unexpectedly nil") + } + // IMPORTANT - These are paths on the OS where the daemon is running, hence + // any filepath operations must be done in an OS agnostic way. + r, e := container.BaseFS.ResolveScopedPath(path, false) + + // Log this here on the daemon side as there's otherwise no indication apart + // from the error being propagated all the way back to the client. This makes + // debugging significantly easier and clearly indicates the error comes from the daemon. + if e != nil { + logrus.Errorf("Failed to ResolveScopedPath BaseFS %s path %s %s\n", container.BaseFS.Path(), path, e) + } + return r, e +} + +// GetRootResourcePath evaluates `path` in the scope of the container's root, with proper path +// sanitisation. Symlinks are all scoped to the root of the container, as +// though the container's root was `/`. +// +// The root of a container is the host-facing configuration metadata directory. +// Only use this method to safely access the container's `container.json` or +// other metadata files. If in doubt, use container.GetResourcePath. +// +// NOTE: The returned path is *only* safely scoped inside the container's root +// if no component of the returned path changes (such as a component +// symlinking to a different path) between using this method and using the +// path. See symlink.FollowSymlinkInScope for more details. +func (container *Container) GetRootResourcePath(path string) (string, error) { + // IMPORTANT - These are paths on the OS where the daemon is running, hence + // any filepath operations must be done in an OS agnostic way. + cleanPath := filepath.Join(string(os.PathSeparator), path) + return symlink.FollowSymlinkInScope(filepath.Join(container.Root, cleanPath), container.Root) +} + +// ExitOnNext signals to the monitor that it should not restart the container +// after we send the kill signal. +func (container *Container) ExitOnNext() { + container.RestartManager().Cancel() +} + +// HostConfigPath returns the path to the container's JSON hostconfig +func (container *Container) HostConfigPath() (string, error) { + return container.GetRootResourcePath("hostconfig.json") +} + +// ConfigPath returns the path to the container's JSON config +func (container *Container) ConfigPath() (string, error) { + return container.GetRootResourcePath(configFileName) +} + +// CheckpointDir returns the directory checkpoints are stored in +func (container *Container) CheckpointDir() string { + return filepath.Join(container.Root, "checkpoints") +} + +// StartLogger starts a new logger driver for the container. +func (container *Container) StartLogger() (logger.Logger, error) { + cfg := container.HostConfig.LogConfig + initDriver, err := logger.GetLogDriver(cfg.Type) + if err != nil { + return nil, errors.Wrap(err, "failed to get logging factory") + } + info := logger.Info{ + Config: cfg.Config, + ContainerID: container.ID, + ContainerName: container.Name, + ContainerEntrypoint: container.Path, + ContainerArgs: container.Args, + ContainerImageID: container.ImageID.String(), + ContainerImageName: container.Config.Image, + ContainerCreated: container.Created, + ContainerEnv: container.Config.Env, + ContainerLabels: container.Config.Labels, + DaemonName: "docker", + } + + // Set logging file for "json-logger" + if cfg.Type == jsonfilelog.Name { + info.LogPath, err = container.GetRootResourcePath(fmt.Sprintf("%s-json.log", container.ID)) + if err != nil { + return nil, err + } + + container.LogPath = info.LogPath + } + + l, err := initDriver(info) + if err != nil { + return nil, err + } + + if containertypes.LogMode(cfg.Config["mode"]) == containertypes.LogModeNonBlock { + bufferSize := int64(-1) + if s, exists := cfg.Config["max-buffer-size"]; exists { + bufferSize, err = units.RAMInBytes(s) + if err != nil { + return nil, err + } + } + l = logger.NewRingLogger(l, info, bufferSize) + } + return l, nil +} + +// GetProcessLabel returns the process label for the container. +func (container *Container) GetProcessLabel() string { + // even if we have a process label return "" if we are running + // in privileged mode + if container.HostConfig.Privileged { + return "" + } + return container.ProcessLabel +} + +// GetMountLabel returns the mounting label for the container. +// This label is empty if the container is privileged. +func (container *Container) GetMountLabel() string { + return container.MountLabel +} + +// GetExecIDs returns the list of exec commands running on the container. +func (container *Container) GetExecIDs() []string { + return container.ExecCommands.List() +} + +// ShouldRestart decides whether the daemon should restart the container or not. +// This is based on the container's restart policy. +func (container *Container) ShouldRestart() bool { + shouldRestart, _, _ := container.RestartManager().ShouldRestart(uint32(container.ExitCode()), container.HasBeenManuallyStopped, container.FinishedAt.Sub(container.StartedAt)) + return shouldRestart +} + +// AddMountPointWithVolume adds a new mount point configured with a volume to the container. +func (container *Container) AddMountPointWithVolume(destination string, vol volume.Volume, rw bool) { + operatingSystem := container.OS + if operatingSystem == "" { + operatingSystem = runtime.GOOS + } + volumeParser := volumemounts.NewParser(operatingSystem) + container.MountPoints[destination] = &volumemounts.MountPoint{ + Type: mounttypes.TypeVolume, + Name: vol.Name(), + Driver: vol.DriverName(), + Destination: destination, + RW: rw, + Volume: vol, + CopyData: volumeParser.DefaultCopyMode(), + } +} + +// UnmountVolumes unmounts all volumes +func (container *Container) UnmountVolumes(volumeEventLog func(name, action string, attributes map[string]string)) error { + var errors []string + for _, volumeMount := range container.MountPoints { + if volumeMount.Volume == nil { + continue + } + + if err := volumeMount.Cleanup(); err != nil { + errors = append(errors, err.Error()) + continue + } + + attributes := map[string]string{ + "driver": volumeMount.Volume.DriverName(), + "container": container.ID, + } + volumeEventLog(volumeMount.Volume.Name(), "unmount", attributes) + } + if len(errors) > 0 { + return fmt.Errorf("error while unmounting volumes for container %s: %s", container.ID, strings.Join(errors, "; ")) + } + return nil +} + +// IsDestinationMounted checks whether a path is mounted on the container or not. +func (container *Container) IsDestinationMounted(destination string) bool { + return container.MountPoints[destination] != nil +} + +// StopSignal returns the signal used to stop the container. +func (container *Container) StopSignal() int { + var stopSignal syscall.Signal + if container.Config.StopSignal != "" { + stopSignal, _ = signal.ParseSignal(container.Config.StopSignal) + } + + if int(stopSignal) == 0 { + stopSignal, _ = signal.ParseSignal(signal.DefaultStopSignal) + } + return int(stopSignal) +} + +// StopTimeout returns the timeout (in seconds) used to stop the container. +func (container *Container) StopTimeout() int { + if container.Config.StopTimeout != nil { + return *container.Config.StopTimeout + } + return DefaultStopTimeout +} + +// InitDNSHostConfig ensures that the dns fields are never nil. +// New containers don't ever have those fields nil, +// but pre created containers can still have those nil values. +// The non-recommended host configuration in the start api can +// make these fields nil again, this corrects that issue until +// we remove that behavior for good. +// See https://github.com/docker/docker/pull/17779 +// for a more detailed explanation on why we don't want that. +func (container *Container) InitDNSHostConfig() { + container.Lock() + defer container.Unlock() + if container.HostConfig.DNS == nil { + container.HostConfig.DNS = make([]string, 0) + } + + if container.HostConfig.DNSSearch == nil { + container.HostConfig.DNSSearch = make([]string, 0) + } + + if container.HostConfig.DNSOptions == nil { + container.HostConfig.DNSOptions = make([]string, 0) + } +} + +// GetEndpointInNetwork returns the container's endpoint to the provided network. +func (container *Container) GetEndpointInNetwork(n libnetwork.Network) (libnetwork.Endpoint, error) { + endpointName := strings.TrimPrefix(container.Name, "/") + return n.EndpointByName(endpointName) +} + +func (container *Container) buildPortMapInfo(ep libnetwork.Endpoint) error { + if ep == nil { + return errInvalidEndpoint + } + + networkSettings := container.NetworkSettings + if networkSettings == nil { + return errInvalidNetwork + } + + if len(networkSettings.Ports) == 0 { + pm, err := getEndpointPortMapInfo(ep) + if err != nil { + return err + } + networkSettings.Ports = pm + } + return nil +} + +func getEndpointPortMapInfo(ep libnetwork.Endpoint) (nat.PortMap, error) { + pm := nat.PortMap{} + driverInfo, err := ep.DriverInfo() + if err != nil { + return pm, err + } + + if driverInfo == nil { + // It is not an error for epInfo to be nil + return pm, nil + } + + if expData, ok := driverInfo[netlabel.ExposedPorts]; ok { + if exposedPorts, ok := expData.([]types.TransportPort); ok { + for _, tp := range exposedPorts { + natPort, err := nat.NewPort(tp.Proto.String(), strconv.Itoa(int(tp.Port))) + if err != nil { + return pm, fmt.Errorf("Error parsing Port value(%v):%v", tp.Port, err) + } + pm[natPort] = nil + } + } + } + + mapData, ok := driverInfo[netlabel.PortMap] + if !ok { + return pm, nil + } + + if portMapping, ok := mapData.([]types.PortBinding); ok { + for _, pp := range portMapping { + natPort, err := nat.NewPort(pp.Proto.String(), strconv.Itoa(int(pp.Port))) + if err != nil { + return pm, err + } + natBndg := nat.PortBinding{HostIP: pp.HostIP.String(), HostPort: strconv.Itoa(int(pp.HostPort))} + pm[natPort] = append(pm[natPort], natBndg) + } + } + + return pm, nil +} + +// GetSandboxPortMapInfo retrieves the current port-mapping programmed for the given sandbox +func GetSandboxPortMapInfo(sb libnetwork.Sandbox) nat.PortMap { + pm := nat.PortMap{} + if sb == nil { + return pm + } + + for _, ep := range sb.Endpoints() { + pm, _ = getEndpointPortMapInfo(ep) + if len(pm) > 0 { + break + } + } + return pm +} + +// BuildEndpointInfo sets endpoint-related fields on container.NetworkSettings based on the provided network and endpoint. +func (container *Container) BuildEndpointInfo(n libnetwork.Network, ep libnetwork.Endpoint) error { + if ep == nil { + return errInvalidEndpoint + } + + networkSettings := container.NetworkSettings + if networkSettings == nil { + return errInvalidNetwork + } + + epInfo := ep.Info() + if epInfo == nil { + // It is not an error to get an empty endpoint info + return nil + } + + if _, ok := networkSettings.Networks[n.Name()]; !ok { + networkSettings.Networks[n.Name()] = &network.EndpointSettings{ + EndpointSettings: &networktypes.EndpointSettings{}, + } + } + networkSettings.Networks[n.Name()].NetworkID = n.ID() + networkSettings.Networks[n.Name()].EndpointID = ep.ID() + + iface := epInfo.Iface() + if iface == nil { + return nil + } + + if iface.MacAddress() != nil { + networkSettings.Networks[n.Name()].MacAddress = iface.MacAddress().String() + } + + if iface.Address() != nil { + ones, _ := iface.Address().Mask.Size() + networkSettings.Networks[n.Name()].IPAddress = iface.Address().IP.String() + networkSettings.Networks[n.Name()].IPPrefixLen = ones + } + + if iface.AddressIPv6() != nil && iface.AddressIPv6().IP.To16() != nil { + onesv6, _ := iface.AddressIPv6().Mask.Size() + networkSettings.Networks[n.Name()].GlobalIPv6Address = iface.AddressIPv6().IP.String() + networkSettings.Networks[n.Name()].GlobalIPv6PrefixLen = onesv6 + } + + return nil +} + +type named interface { + Name() string +} + +// UpdateJoinInfo updates network settings when container joins network n with endpoint ep. +func (container *Container) UpdateJoinInfo(n named, ep libnetwork.Endpoint) error { + if err := container.buildPortMapInfo(ep); err != nil { + return err + } + + epInfo := ep.Info() + if epInfo == nil { + // It is not an error to get an empty endpoint info + return nil + } + if epInfo.Gateway() != nil { + container.NetworkSettings.Networks[n.Name()].Gateway = epInfo.Gateway().String() + } + if epInfo.GatewayIPv6().To16() != nil { + container.NetworkSettings.Networks[n.Name()].IPv6Gateway = epInfo.GatewayIPv6().String() + } + + return nil +} + +// UpdateSandboxNetworkSettings updates the sandbox ID and Key. +func (container *Container) UpdateSandboxNetworkSettings(sb libnetwork.Sandbox) error { + container.NetworkSettings.SandboxID = sb.ID() + container.NetworkSettings.SandboxKey = sb.Key() + return nil +} + +// BuildJoinOptions builds endpoint Join options from a given network. +func (container *Container) BuildJoinOptions(n named) ([]libnetwork.EndpointOption, error) { + var joinOptions []libnetwork.EndpointOption + if epConfig, ok := container.NetworkSettings.Networks[n.Name()]; ok { + for _, str := range epConfig.Links { + name, alias, err := opts.ParseLink(str) + if err != nil { + return nil, err + } + joinOptions = append(joinOptions, libnetwork.CreateOptionAlias(name, alias)) + } + for k, v := range epConfig.DriverOpts { + joinOptions = append(joinOptions, libnetwork.EndpointOptionGeneric(options.Generic{k: v})) + } + } + + return joinOptions, nil +} + +// BuildCreateEndpointOptions builds endpoint options from a given network. +func (container *Container) BuildCreateEndpointOptions(n libnetwork.Network, epConfig *networktypes.EndpointSettings, sb libnetwork.Sandbox, daemonDNS []string) ([]libnetwork.EndpointOption, error) { + var ( + bindings = make(nat.PortMap) + pbList []types.PortBinding + exposeList []types.TransportPort + createOptions []libnetwork.EndpointOption + ) + + defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName() + + if (!container.EnableServiceDiscoveryOnDefaultNetwork() && n.Name() == defaultNetName) || + container.NetworkSettings.IsAnonymousEndpoint { + createOptions = append(createOptions, libnetwork.CreateOptionAnonymous()) + } + + if epConfig != nil { + ipam := epConfig.IPAMConfig + + if ipam != nil { + var ( + ipList []net.IP + ip, ip6, linkip net.IP + ) + + for _, ips := range ipam.LinkLocalIPs { + if linkip = net.ParseIP(ips); linkip == nil && ips != "" { + return nil, errors.Errorf("Invalid link-local IP address: %s", ipam.LinkLocalIPs) + } + ipList = append(ipList, linkip) + + } + + if ip = net.ParseIP(ipam.IPv4Address); ip == nil && ipam.IPv4Address != "" { + return nil, errors.Errorf("Invalid IPv4 address: %s)", ipam.IPv4Address) + } + + if ip6 = net.ParseIP(ipam.IPv6Address); ip6 == nil && ipam.IPv6Address != "" { + return nil, errors.Errorf("Invalid IPv6 address: %s)", ipam.IPv6Address) + } + + createOptions = append(createOptions, + libnetwork.CreateOptionIpam(ip, ip6, ipList, nil)) + + } + + for _, alias := range epConfig.Aliases { + createOptions = append(createOptions, libnetwork.CreateOptionMyAlias(alias)) + } + for k, v := range epConfig.DriverOpts { + createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(options.Generic{k: v})) + } + } + + if container.NetworkSettings.Service != nil { + svcCfg := container.NetworkSettings.Service + + var vip string + if svcCfg.VirtualAddresses[n.ID()] != nil { + vip = svcCfg.VirtualAddresses[n.ID()].IPv4 + } + + var portConfigs []*libnetwork.PortConfig + for _, portConfig := range svcCfg.ExposedPorts { + portConfigs = append(portConfigs, &libnetwork.PortConfig{ + Name: portConfig.Name, + Protocol: libnetwork.PortConfig_Protocol(portConfig.Protocol), + TargetPort: portConfig.TargetPort, + PublishedPort: portConfig.PublishedPort, + }) + } + + createOptions = append(createOptions, libnetwork.CreateOptionService(svcCfg.Name, svcCfg.ID, net.ParseIP(vip), portConfigs, svcCfg.Aliases[n.ID()])) + } + + if !containertypes.NetworkMode(n.Name()).IsUserDefined() { + createOptions = append(createOptions, libnetwork.CreateOptionDisableResolution()) + } + + // configs that are applicable only for the endpoint in the network + // to which container was connected to on docker run. + // Ideally all these network-specific endpoint configurations must be moved under + // container.NetworkSettings.Networks[n.Name()] + if n.Name() == container.HostConfig.NetworkMode.NetworkName() || + (n.Name() == defaultNetName && container.HostConfig.NetworkMode.IsDefault()) { + if container.Config.MacAddress != "" { + mac, err := net.ParseMAC(container.Config.MacAddress) + if err != nil { + return nil, err + } + + genericOption := options.Generic{ + netlabel.MacAddress: mac, + } + + createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(genericOption)) + } + + } + + // Port-mapping rules belong to the container & applicable only to non-internal networks + portmaps := GetSandboxPortMapInfo(sb) + if n.Info().Internal() || len(portmaps) > 0 { + return createOptions, nil + } + + if container.HostConfig.PortBindings != nil { + for p, b := range container.HostConfig.PortBindings { + bindings[p] = []nat.PortBinding{} + for _, bb := range b { + bindings[p] = append(bindings[p], nat.PortBinding{ + HostIP: bb.HostIP, + HostPort: bb.HostPort, + }) + } + } + } + + portSpecs := container.Config.ExposedPorts + ports := make([]nat.Port, len(portSpecs)) + var i int + for p := range portSpecs { + ports[i] = p + i++ + } + nat.SortPortMap(ports, bindings) + for _, port := range ports { + expose := types.TransportPort{} + expose.Proto = types.ParseProtocol(port.Proto()) + expose.Port = uint16(port.Int()) + exposeList = append(exposeList, expose) + + pb := types.PortBinding{Port: expose.Port, Proto: expose.Proto} + binding := bindings[port] + for i := 0; i < len(binding); i++ { + pbCopy := pb.GetCopy() + newP, err := nat.NewPort(nat.SplitProtoPort(binding[i].HostPort)) + var portStart, portEnd int + if err == nil { + portStart, portEnd, err = newP.Range() + } + if err != nil { + return nil, errors.Wrapf(err, "Error parsing HostPort value (%s)", binding[i].HostPort) + } + pbCopy.HostPort = uint16(portStart) + pbCopy.HostPortEnd = uint16(portEnd) + pbCopy.HostIP = net.ParseIP(binding[i].HostIP) + pbList = append(pbList, pbCopy) + } + + if container.HostConfig.PublishAllPorts && len(binding) == 0 { + pbList = append(pbList, pb) + } + } + + var dns []string + + if len(container.HostConfig.DNS) > 0 { + dns = container.HostConfig.DNS + } else if len(daemonDNS) > 0 { + dns = daemonDNS + } + + if len(dns) > 0 { + createOptions = append(createOptions, + libnetwork.CreateOptionDNS(dns)) + } + + createOptions = append(createOptions, + libnetwork.CreateOptionPortMapping(pbList), + libnetwork.CreateOptionExposedPorts(exposeList)) + + return createOptions, nil +} + +// UpdateMonitor updates monitor configure for running container +func (container *Container) UpdateMonitor(restartPolicy containertypes.RestartPolicy) { + type policySetter interface { + SetPolicy(containertypes.RestartPolicy) + } + + if rm, ok := container.RestartManager().(policySetter); ok { + rm.SetPolicy(restartPolicy) + } +} + +// FullHostname returns hostname and optional domain appended to it. +func (container *Container) FullHostname() string { + fullHostname := container.Config.Hostname + if container.Config.Domainname != "" { + fullHostname = fmt.Sprintf("%s.%s", fullHostname, container.Config.Domainname) + } + return fullHostname +} + +// RestartManager returns the current restartmanager instance connected to container. +func (container *Container) RestartManager() restartmanager.RestartManager { + if container.restartManager == nil { + container.restartManager = restartmanager.New(container.HostConfig.RestartPolicy, container.RestartCount) + } + return container.restartManager +} + +// ResetRestartManager initializes new restartmanager based on container config +func (container *Container) ResetRestartManager(resetCount bool) { + if container.restartManager != nil { + container.restartManager.Cancel() + } + if resetCount { + container.RestartCount = 0 + } + container.restartManager = nil +} + +type attachContext struct { + ctx context.Context + cancel context.CancelFunc + mu sync.Mutex +} + +// InitAttachContext initializes or returns existing context for attach calls to +// track container liveness. +func (container *Container) InitAttachContext() context.Context { + container.attachContext.mu.Lock() + defer container.attachContext.mu.Unlock() + if container.attachContext.ctx == nil { + container.attachContext.ctx, container.attachContext.cancel = context.WithCancel(context.Background()) + } + return container.attachContext.ctx +} + +// CancelAttachContext cancels attach context. All attach calls should detach +// after this call. +func (container *Container) CancelAttachContext() { + container.attachContext.mu.Lock() + if container.attachContext.ctx != nil { + container.attachContext.cancel() + container.attachContext.ctx = nil + } + container.attachContext.mu.Unlock() +} + +func (container *Container) startLogging() error { + if container.HostConfig.LogConfig.Type == "none" { + return nil // do not start logging routines + } + + l, err := container.StartLogger() + if err != nil { + return fmt.Errorf("failed to initialize logging driver: %v", err) + } + + copier := logger.NewCopier(map[string]io.Reader{"stdout": container.StdoutPipe(), "stderr": container.StderrPipe()}, l) + container.LogCopier = copier + copier.Run() + container.LogDriver = l + + return nil +} + +// StdinPipe gets the stdin stream of the container +func (container *Container) StdinPipe() io.WriteCloser { + return container.StreamConfig.StdinPipe() +} + +// StdoutPipe gets the stdout stream of the container +func (container *Container) StdoutPipe() io.ReadCloser { + return container.StreamConfig.StdoutPipe() +} + +// StderrPipe gets the stderr stream of the container +func (container *Container) StderrPipe() io.ReadCloser { + return container.StreamConfig.StderrPipe() +} + +// CloseStreams closes the container's stdio streams +func (container *Container) CloseStreams() error { + return container.StreamConfig.CloseStreams() +} + +// InitializeStdio is called by libcontainerd to connect the stdio. +func (container *Container) InitializeStdio(iop *cio.DirectIO) (cio.IO, error) { + if err := container.startLogging(); err != nil { + container.Reset(false) + return nil, err + } + + container.StreamConfig.CopyToPipe(iop) + + if container.StreamConfig.Stdin() == nil && !container.Config.Tty { + if iop.Stdin != nil { + if err := iop.Stdin.Close(); err != nil { + logrus.Warnf("error closing stdin: %+v", err) + } + } + } + + return &rio{IO: iop, sc: container.StreamConfig}, nil +} + +// MountsResourcePath returns the path where mounts are stored for the given mount +func (container *Container) MountsResourcePath(mount string) (string, error) { + return container.GetRootResourcePath(filepath.Join("mounts", mount)) +} + +// SecretMountPath returns the path of the secret mount for the container +func (container *Container) SecretMountPath() (string, error) { + return container.MountsResourcePath("secrets") +} + +// SecretFilePath returns the path to the location of a secret on the host. +func (container *Container) SecretFilePath(secretRef swarmtypes.SecretReference) (string, error) { + secrets, err := container.SecretMountPath() + if err != nil { + return "", err + } + return filepath.Join(secrets, secretRef.SecretID), nil +} + +func getSecretTargetPath(r *swarmtypes.SecretReference) string { + if filepath.IsAbs(r.File.Name) { + return r.File.Name + } + + return filepath.Join(containerSecretMountPath, r.File.Name) +} + +// CreateDaemonEnvironment creates a new environment variable slice for this container. +func (container *Container) CreateDaemonEnvironment(tty bool, linkedEnv []string) []string { + // Setup environment + os := container.OS + if os == "" { + os = runtime.GOOS + } + env := []string{} + if runtime.GOOS != "windows" || (runtime.GOOS == "windows" && os == "linux") { + env = []string{ + "PATH=" + system.DefaultPathEnv(os), + "HOSTNAME=" + container.Config.Hostname, + } + if tty { + env = append(env, "TERM=xterm") + } + env = append(env, linkedEnv...) + } + + // because the env on the container can override certain default values + // we need to replace the 'env' keys where they match and append anything + // else. + env = ReplaceOrAppendEnvValues(env, container.Config.Env) + return env +} + +type rio struct { + cio.IO + + sc *stream.Config +} + +func (i *rio) Close() error { + i.IO.Close() + + return i.sc.CloseStreams() +} + +func (i *rio) Wait() { + i.sc.Wait() + + i.IO.Wait() +} diff --git a/vendor/github.com/docker/docker/container/container_linux.go b/vendor/github.com/docker/docker/container/container_linux.go new file mode 100644 index 0000000000..ea2ebabaaa --- /dev/null +++ b/vendor/github.com/docker/docker/container/container_linux.go @@ -0,0 +1,9 @@ +package container // import "github.com/docker/docker/container" + +import ( + "golang.org/x/sys/unix" +) + +func detachMounted(path string) error { + return unix.Unmount(path, unix.MNT_DETACH) +} diff --git a/vendor/github.com/docker/docker/container/container_notlinux.go b/vendor/github.com/docker/docker/container/container_notlinux.go new file mode 100644 index 0000000000..dc9751b8c7 --- /dev/null +++ b/vendor/github.com/docker/docker/container/container_notlinux.go @@ -0,0 +1,23 @@ +// +build freebsd + +package container // import "github.com/docker/docker/container" + +import ( + "golang.org/x/sys/unix" +) + +func detachMounted(path string) error { + // FreeBSD do not support the lazy unmount or MNT_DETACH feature. + // Therefore there are separate definitions for this. + return unix.Unmount(path, 0) +} + +// SecretMounts returns the mounts for the secret path +func (container *Container) SecretMounts() []Mount { + return nil +} + +// UnmountSecrets unmounts the fs for secrets +func (container *Container) UnmountSecrets() error { + return nil +} diff --git a/vendor/github.com/docker/docker/container/container_unix.go b/vendor/github.com/docker/docker/container/container_unix.go new file mode 100644 index 0000000000..9397cdf60b --- /dev/null +++ b/vendor/github.com/docker/docker/container/container_unix.go @@ -0,0 +1,462 @@ +// +build linux freebsd + +package container // import "github.com/docker/docker/container" + +import ( + "io/ioutil" + "os" + "path/filepath" + + "github.com/containerd/continuity/fs" + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/mount" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/volume" + volumemounts "github.com/docker/docker/volume/mounts" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +const ( + // DefaultStopTimeout is the timeout (in seconds) for the syscall signal used to stop a container. + DefaultStopTimeout = 10 + + containerSecretMountPath = "/run/secrets" +) + +// TrySetNetworkMount attempts to set the network mounts given a provided destination and +// the path to use for it; return true if the given destination was a network mount file +func (container *Container) TrySetNetworkMount(destination string, path string) bool { + if destination == "/etc/resolv.conf" { + container.ResolvConfPath = path + return true + } + if destination == "/etc/hostname" { + container.HostnamePath = path + return true + } + if destination == "/etc/hosts" { + container.HostsPath = path + return true + } + + return false +} + +// BuildHostnameFile writes the container's hostname file. +func (container *Container) BuildHostnameFile() error { + hostnamePath, err := container.GetRootResourcePath("hostname") + if err != nil { + return err + } + container.HostnamePath = hostnamePath + return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644) +} + +// NetworkMounts returns the list of network mounts. +func (container *Container) NetworkMounts() []Mount { + var mounts []Mount + shared := container.HostConfig.NetworkMode.IsContainer() + parser := volumemounts.NewParser(container.OS) + if container.ResolvConfPath != "" { + if _, err := os.Stat(container.ResolvConfPath); err != nil { + logrus.Warnf("ResolvConfPath set to %q, but can't stat this filename (err = %v); skipping", container.ResolvConfPath, err) + } else { + writable := !container.HostConfig.ReadonlyRootfs + if m, exists := container.MountPoints["/etc/resolv.conf"]; exists { + writable = m.RW + } else { + label.Relabel(container.ResolvConfPath, container.MountLabel, shared) + } + mounts = append(mounts, Mount{ + Source: container.ResolvConfPath, + Destination: "/etc/resolv.conf", + Writable: writable, + Propagation: string(parser.DefaultPropagationMode()), + }) + } + } + if container.HostnamePath != "" { + if _, err := os.Stat(container.HostnamePath); err != nil { + logrus.Warnf("HostnamePath set to %q, but can't stat this filename (err = %v); skipping", container.HostnamePath, err) + } else { + writable := !container.HostConfig.ReadonlyRootfs + if m, exists := container.MountPoints["/etc/hostname"]; exists { + writable = m.RW + } else { + label.Relabel(container.HostnamePath, container.MountLabel, shared) + } + mounts = append(mounts, Mount{ + Source: container.HostnamePath, + Destination: "/etc/hostname", + Writable: writable, + Propagation: string(parser.DefaultPropagationMode()), + }) + } + } + if container.HostsPath != "" { + if _, err := os.Stat(container.HostsPath); err != nil { + logrus.Warnf("HostsPath set to %q, but can't stat this filename (err = %v); skipping", container.HostsPath, err) + } else { + writable := !container.HostConfig.ReadonlyRootfs + if m, exists := container.MountPoints["/etc/hosts"]; exists { + writable = m.RW + } else { + label.Relabel(container.HostsPath, container.MountLabel, shared) + } + mounts = append(mounts, Mount{ + Source: container.HostsPath, + Destination: "/etc/hosts", + Writable: writable, + Propagation: string(parser.DefaultPropagationMode()), + }) + } + } + return mounts +} + +// CopyImagePathContent copies files in destination to the volume. +func (container *Container) CopyImagePathContent(v volume.Volume, destination string) error { + rootfs, err := container.GetResourcePath(destination) + if err != nil { + return err + } + + if _, err = ioutil.ReadDir(rootfs); err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + id := stringid.GenerateNonCryptoID() + path, err := v.Mount(id) + if err != nil { + return err + } + + defer func() { + if err := v.Unmount(id); err != nil { + logrus.Warnf("error while unmounting volume %s: %v", v.Name(), err) + } + }() + if err := label.Relabel(path, container.MountLabel, true); err != nil && err != unix.ENOTSUP { + return err + } + return copyExistingContents(rootfs, path) +} + +// ShmResourcePath returns path to shm +func (container *Container) ShmResourcePath() (string, error) { + return container.MountsResourcePath("shm") +} + +// HasMountFor checks if path is a mountpoint +func (container *Container) HasMountFor(path string) bool { + _, exists := container.MountPoints[path] + if exists { + return true + } + + // Also search among the tmpfs mounts + for dest := range container.HostConfig.Tmpfs { + if dest == path { + return true + } + } + + return false +} + +// UnmountIpcMount uses the provided unmount function to unmount shm if it was mounted +func (container *Container) UnmountIpcMount(unmount func(pth string) error) error { + if container.HasMountFor("/dev/shm") { + return nil + } + + // container.ShmPath should not be used here as it may point + // to the host's or other container's /dev/shm + shmPath, err := container.ShmResourcePath() + if err != nil { + return err + } + if shmPath == "" { + return nil + } + if err = unmount(shmPath); err != nil && !os.IsNotExist(err) { + if mounted, mErr := mount.Mounted(shmPath); mounted || mErr != nil { + return errors.Wrapf(err, "umount %s", shmPath) + } + } + return nil +} + +// IpcMounts returns the list of IPC mounts +func (container *Container) IpcMounts() []Mount { + var mounts []Mount + parser := volumemounts.NewParser(container.OS) + + if container.HasMountFor("/dev/shm") { + return mounts + } + if container.ShmPath == "" { + return mounts + } + + label.SetFileLabel(container.ShmPath, container.MountLabel) + mounts = append(mounts, Mount{ + Source: container.ShmPath, + Destination: "/dev/shm", + Writable: true, + Propagation: string(parser.DefaultPropagationMode()), + }) + + return mounts +} + +// SecretMounts returns the mounts for the secret path. +func (container *Container) SecretMounts() ([]Mount, error) { + var mounts []Mount + for _, r := range container.SecretReferences { + if r.File == nil { + continue + } + src, err := container.SecretFilePath(*r) + if err != nil { + return nil, err + } + mounts = append(mounts, Mount{ + Source: src, + Destination: getSecretTargetPath(r), + Writable: false, + }) + } + for _, r := range container.ConfigReferences { + fPath, err := container.ConfigFilePath(*r) + if err != nil { + return nil, err + } + mounts = append(mounts, Mount{ + Source: fPath, + Destination: r.File.Name, + Writable: false, + }) + } + + return mounts, nil +} + +// UnmountSecrets unmounts the local tmpfs for secrets +func (container *Container) UnmountSecrets() error { + p, err := container.SecretMountPath() + if err != nil { + return err + } + if _, err := os.Stat(p); err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + + return mount.RecursiveUnmount(p) +} + +type conflictingUpdateOptions string + +func (e conflictingUpdateOptions) Error() string { + return string(e) +} + +func (e conflictingUpdateOptions) Conflict() {} + +// UpdateContainer updates configuration of a container. Callers must hold a Lock on the Container. +func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfig) error { + // update resources of container + resources := hostConfig.Resources + cResources := &container.HostConfig.Resources + + // validate NanoCPUs, CPUPeriod, and CPUQuota + // Because NanoCPU effectively updates CPUPeriod/CPUQuota, + // once NanoCPU is already set, updating CPUPeriod/CPUQuota will be blocked, and vice versa. + // In the following we make sure the intended update (resources) does not conflict with the existing (cResource). + if resources.NanoCPUs > 0 && cResources.CPUPeriod > 0 { + return conflictingUpdateOptions("Conflicting options: Nano CPUs cannot be updated as CPU Period has already been set") + } + if resources.NanoCPUs > 0 && cResources.CPUQuota > 0 { + return conflictingUpdateOptions("Conflicting options: Nano CPUs cannot be updated as CPU Quota has already been set") + } + if resources.CPUPeriod > 0 && cResources.NanoCPUs > 0 { + return conflictingUpdateOptions("Conflicting options: CPU Period cannot be updated as NanoCPUs has already been set") + } + if resources.CPUQuota > 0 && cResources.NanoCPUs > 0 { + return conflictingUpdateOptions("Conflicting options: CPU Quota cannot be updated as NanoCPUs has already been set") + } + + if resources.BlkioWeight != 0 { + cResources.BlkioWeight = resources.BlkioWeight + } + if resources.CPUShares != 0 { + cResources.CPUShares = resources.CPUShares + } + if resources.NanoCPUs != 0 { + cResources.NanoCPUs = resources.NanoCPUs + } + if resources.CPUPeriod != 0 { + cResources.CPUPeriod = resources.CPUPeriod + } + if resources.CPUQuota != 0 { + cResources.CPUQuota = resources.CPUQuota + } + if resources.CpusetCpus != "" { + cResources.CpusetCpus = resources.CpusetCpus + } + if resources.CpusetMems != "" { + cResources.CpusetMems = resources.CpusetMems + } + if resources.Memory != 0 { + // if memory limit smaller than already set memoryswap limit and doesn't + // update the memoryswap limit, then error out. + if resources.Memory > cResources.MemorySwap && resources.MemorySwap == 0 { + return conflictingUpdateOptions("Memory limit should be smaller than already set memoryswap limit, update the memoryswap at the same time") + } + cResources.Memory = resources.Memory + } + if resources.MemorySwap != 0 { + cResources.MemorySwap = resources.MemorySwap + } + if resources.MemoryReservation != 0 { + cResources.MemoryReservation = resources.MemoryReservation + } + if resources.KernelMemory != 0 { + cResources.KernelMemory = resources.KernelMemory + } + if resources.CPURealtimePeriod != 0 { + cResources.CPURealtimePeriod = resources.CPURealtimePeriod + } + if resources.CPURealtimeRuntime != 0 { + cResources.CPURealtimeRuntime = resources.CPURealtimeRuntime + } + + // update HostConfig of container + if hostConfig.RestartPolicy.Name != "" { + if container.HostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() { + return conflictingUpdateOptions("Restart policy cannot be updated because AutoRemove is enabled for the container") + } + container.HostConfig.RestartPolicy = hostConfig.RestartPolicy + } + + return nil +} + +// DetachAndUnmount uses a detached mount on all mount destinations, then +// unmounts each volume normally. +// This is used from daemon/archive for `docker cp` +func (container *Container) DetachAndUnmount(volumeEventLog func(name, action string, attributes map[string]string)) error { + networkMounts := container.NetworkMounts() + mountPaths := make([]string, 0, len(container.MountPoints)+len(networkMounts)) + + for _, mntPoint := range container.MountPoints { + dest, err := container.GetResourcePath(mntPoint.Destination) + if err != nil { + logrus.Warnf("Failed to get volume destination path for container '%s' at '%s' while lazily unmounting: %v", container.ID, mntPoint.Destination, err) + continue + } + mountPaths = append(mountPaths, dest) + } + + for _, m := range networkMounts { + dest, err := container.GetResourcePath(m.Destination) + if err != nil { + logrus.Warnf("Failed to get volume destination path for container '%s' at '%s' while lazily unmounting: %v", container.ID, m.Destination, err) + continue + } + mountPaths = append(mountPaths, dest) + } + + for _, mountPath := range mountPaths { + if err := detachMounted(mountPath); err != nil { + logrus.Warnf("%s unmountVolumes: Failed to do lazy umount fo volume '%s': %v", container.ID, mountPath, err) + } + } + return container.UnmountVolumes(volumeEventLog) +} + +// copyExistingContents copies from the source to the destination and +// ensures the ownership is appropriately set. +func copyExistingContents(source, destination string) error { + dstList, err := ioutil.ReadDir(destination) + if err != nil { + return err + } + if len(dstList) != 0 { + // destination is not empty, do not copy + return nil + } + return fs.CopyDir(destination, source) +} + +// TmpfsMounts returns the list of tmpfs mounts +func (container *Container) TmpfsMounts() ([]Mount, error) { + parser := volumemounts.NewParser(container.OS) + var mounts []Mount + for dest, data := range container.HostConfig.Tmpfs { + mounts = append(mounts, Mount{ + Source: "tmpfs", + Destination: dest, + Data: data, + }) + } + for dest, mnt := range container.MountPoints { + if mnt.Type == mounttypes.TypeTmpfs { + data, err := parser.ConvertTmpfsOptions(mnt.Spec.TmpfsOptions, mnt.Spec.ReadOnly) + if err != nil { + return nil, err + } + mounts = append(mounts, Mount{ + Source: "tmpfs", + Destination: dest, + Data: data, + }) + } + } + return mounts, nil +} + +// EnableServiceDiscoveryOnDefaultNetwork Enable service discovery on default network +func (container *Container) EnableServiceDiscoveryOnDefaultNetwork() bool { + return false +} + +// GetMountPoints gives a platform specific transformation to types.MountPoint. Callers must hold a Container lock. +func (container *Container) GetMountPoints() []types.MountPoint { + mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) + for _, m := range container.MountPoints { + mountPoints = append(mountPoints, types.MountPoint{ + Type: m.Type, + Name: m.Name, + Source: m.Path(), + Destination: m.Destination, + Driver: m.Driver, + Mode: m.Mode, + RW: m.RW, + Propagation: m.Propagation, + }) + } + return mountPoints +} + +// ConfigFilePath returns the path to the on-disk location of a config. +// On unix, configs are always considered secret +func (container *Container) ConfigFilePath(configRef swarmtypes.ConfigReference) (string, error) { + mounts, err := container.SecretMountPath() + if err != nil { + return "", err + } + return filepath.Join(mounts, configRef.ConfigID), nil +} diff --git a/vendor/github.com/docker/docker/container/container_windows.go b/vendor/github.com/docker/docker/container/container_windows.go new file mode 100644 index 0000000000..b5bdb5bc34 --- /dev/null +++ b/vendor/github.com/docker/docker/container/container_windows.go @@ -0,0 +1,213 @@ +package container // import "github.com/docker/docker/container" + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + swarmtypes "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/pkg/system" +) + +const ( + containerSecretMountPath = `C:\ProgramData\Docker\secrets` + containerInternalSecretMountPath = `C:\ProgramData\Docker\internal\secrets` + containerInternalConfigsDirPath = `C:\ProgramData\Docker\internal\configs` + + // DefaultStopTimeout is the timeout (in seconds) for the shutdown call on a container + DefaultStopTimeout = 30 +) + +// UnmountIpcMount unmounts Ipc related mounts. +// This is a NOOP on windows. +func (container *Container) UnmountIpcMount(unmount func(pth string) error) error { + return nil +} + +// IpcMounts returns the list of Ipc related mounts. +func (container *Container) IpcMounts() []Mount { + return nil +} + +// CreateSecretSymlinks creates symlinks to files in the secret mount. +func (container *Container) CreateSecretSymlinks() error { + for _, r := range container.SecretReferences { + if r.File == nil { + continue + } + resolvedPath, _, err := container.ResolvePath(getSecretTargetPath(r)) + if err != nil { + return err + } + if err := system.MkdirAll(filepath.Dir(resolvedPath), 0, ""); err != nil { + return err + } + if err := os.Symlink(filepath.Join(containerInternalSecretMountPath, r.SecretID), resolvedPath); err != nil { + return err + } + } + + return nil +} + +// SecretMounts returns the mount for the secret path. +// All secrets are stored in a single mount on Windows. Target symlinks are +// created for each secret, pointing to the files in this mount. +func (container *Container) SecretMounts() ([]Mount, error) { + var mounts []Mount + if len(container.SecretReferences) > 0 { + src, err := container.SecretMountPath() + if err != nil { + return nil, err + } + mounts = append(mounts, Mount{ + Source: src, + Destination: containerInternalSecretMountPath, + Writable: false, + }) + } + + return mounts, nil +} + +// UnmountSecrets unmounts the fs for secrets +func (container *Container) UnmountSecrets() error { + p, err := container.SecretMountPath() + if err != nil { + return err + } + return os.RemoveAll(p) +} + +// CreateConfigSymlinks creates symlinks to files in the config mount. +func (container *Container) CreateConfigSymlinks() error { + for _, configRef := range container.ConfigReferences { + if configRef.File == nil { + continue + } + resolvedPath, _, err := container.ResolvePath(configRef.File.Name) + if err != nil { + return err + } + if err := system.MkdirAll(filepath.Dir(resolvedPath), 0, ""); err != nil { + return err + } + if err := os.Symlink(filepath.Join(containerInternalConfigsDirPath, configRef.ConfigID), resolvedPath); err != nil { + return err + } + } + + return nil +} + +// ConfigMounts returns the mount for configs. +// TODO: Right now Windows doesn't really have a "secure" storage for secrets, +// however some configs may contain secrets. Once secure storage is worked out, +// configs and secret handling should be merged. +func (container *Container) ConfigMounts() []Mount { + var mounts []Mount + if len(container.ConfigReferences) > 0 { + mounts = append(mounts, Mount{ + Source: container.ConfigsDirPath(), + Destination: containerInternalConfigsDirPath, + Writable: false, + }) + } + + return mounts +} + +// DetachAndUnmount unmounts all volumes. +// On Windows it only delegates to `UnmountVolumes` since there is nothing to +// force unmount. +func (container *Container) DetachAndUnmount(volumeEventLog func(name, action string, attributes map[string]string)) error { + return container.UnmountVolumes(volumeEventLog) +} + +// TmpfsMounts returns the list of tmpfs mounts +func (container *Container) TmpfsMounts() ([]Mount, error) { + var mounts []Mount + return mounts, nil +} + +// UpdateContainer updates configuration of a container. Callers must hold a Lock on the Container. +func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfig) error { + resources := hostConfig.Resources + if resources.CPUShares != 0 || + resources.Memory != 0 || + resources.NanoCPUs != 0 || + resources.CgroupParent != "" || + resources.BlkioWeight != 0 || + len(resources.BlkioWeightDevice) != 0 || + len(resources.BlkioDeviceReadBps) != 0 || + len(resources.BlkioDeviceWriteBps) != 0 || + len(resources.BlkioDeviceReadIOps) != 0 || + len(resources.BlkioDeviceWriteIOps) != 0 || + resources.CPUPeriod != 0 || + resources.CPUQuota != 0 || + resources.CPURealtimePeriod != 0 || + resources.CPURealtimeRuntime != 0 || + resources.CpusetCpus != "" || + resources.CpusetMems != "" || + len(resources.Devices) != 0 || + len(resources.DeviceCgroupRules) != 0 || + resources.DiskQuota != 0 || + resources.KernelMemory != 0 || + resources.MemoryReservation != 0 || + resources.MemorySwap != 0 || + resources.MemorySwappiness != nil || + resources.OomKillDisable != nil || + resources.PidsLimit != 0 || + len(resources.Ulimits) != 0 || + resources.CPUCount != 0 || + resources.CPUPercent != 0 || + resources.IOMaximumIOps != 0 || + resources.IOMaximumBandwidth != 0 { + return fmt.Errorf("resource updating isn't supported on Windows") + } + // update HostConfig of container + if hostConfig.RestartPolicy.Name != "" { + if container.HostConfig.AutoRemove && !hostConfig.RestartPolicy.IsNone() { + return fmt.Errorf("Restart policy cannot be updated because AutoRemove is enabled for the container") + } + container.HostConfig.RestartPolicy = hostConfig.RestartPolicy + } + return nil +} + +// BuildHostnameFile writes the container's hostname file. +func (container *Container) BuildHostnameFile() error { + return nil +} + +// EnableServiceDiscoveryOnDefaultNetwork Enable service discovery on default network +func (container *Container) EnableServiceDiscoveryOnDefaultNetwork() bool { + return true +} + +// GetMountPoints gives a platform specific transformation to types.MountPoint. Callers must hold a Container lock. +func (container *Container) GetMountPoints() []types.MountPoint { + mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) + for _, m := range container.MountPoints { + mountPoints = append(mountPoints, types.MountPoint{ + Type: m.Type, + Name: m.Name, + Source: m.Path(), + Destination: m.Destination, + Driver: m.Driver, + RW: m.RW, + }) + } + return mountPoints +} + +func (container *Container) ConfigsDirPath() string { + return filepath.Join(container.Root, "configs") +} + +// ConfigFilePath returns the path to the on-disk location of a config. +func (container *Container) ConfigFilePath(configRef swarmtypes.ConfigReference) string { + return filepath.Join(container.ConfigsDirPath(), configRef.ConfigID) +} diff --git a/vendor/github.com/docker/docker/container/env.go b/vendor/github.com/docker/docker/container/env.go new file mode 100644 index 0000000000..d225fd1471 --- /dev/null +++ b/vendor/github.com/docker/docker/container/env.go @@ -0,0 +1,43 @@ +package container // import "github.com/docker/docker/container" + +import ( + "strings" +) + +// ReplaceOrAppendEnvValues returns the defaults with the overrides either +// replaced by env key or appended to the list +func ReplaceOrAppendEnvValues(defaults, overrides []string) []string { + cache := make(map[string]int, len(defaults)) + for i, e := range defaults { + parts := strings.SplitN(e, "=", 2) + cache[parts[0]] = i + } + + for _, value := range overrides { + // Values w/o = means they want this env to be removed/unset. + if !strings.Contains(value, "=") { + if i, exists := cache[value]; exists { + defaults[i] = "" // Used to indicate it should be removed + } + continue + } + + // Just do a normal set/update + parts := strings.SplitN(value, "=", 2) + if i, exists := cache[parts[0]]; exists { + defaults[i] = value + } else { + defaults = append(defaults, value) + } + } + + // Now remove all entries that we want to "unset" + for i := 0; i < len(defaults); i++ { + if defaults[i] == "" { + defaults = append(defaults[:i], defaults[i+1:]...) + i-- + } + } + + return defaults +} diff --git a/vendor/github.com/docker/docker/container/health.go b/vendor/github.com/docker/docker/container/health.go new file mode 100644 index 0000000000..167ee9b476 --- /dev/null +++ b/vendor/github.com/docker/docker/container/health.go @@ -0,0 +1,82 @@ +package container // import "github.com/docker/docker/container" + +import ( + "sync" + + "github.com/docker/docker/api/types" + "github.com/sirupsen/logrus" +) + +// Health holds the current container health-check state +type Health struct { + types.Health + stop chan struct{} // Write struct{} to stop the monitor + mu sync.Mutex +} + +// String returns a human-readable description of the health-check state +func (s *Health) String() string { + status := s.Status() + + switch status { + case types.Starting: + return "health: starting" + default: // Healthy and Unhealthy are clear on their own + return s.Health.Status + } +} + +// Status returns the current health status. +// +// Note that this takes a lock and the value may change after being read. +func (s *Health) Status() string { + s.mu.Lock() + defer s.mu.Unlock() + + // This happens when the monitor has yet to be setup. + if s.Health.Status == "" { + return types.Unhealthy + } + + return s.Health.Status +} + +// SetStatus writes the current status to the underlying health structure, +// obeying the locking semantics. +// +// Status may be set directly if another lock is used. +func (s *Health) SetStatus(new string) { + s.mu.Lock() + defer s.mu.Unlock() + + s.Health.Status = new +} + +// OpenMonitorChannel creates and returns a new monitor channel. If there +// already is one, it returns nil. +func (s *Health) OpenMonitorChannel() chan struct{} { + s.mu.Lock() + defer s.mu.Unlock() + + if s.stop == nil { + logrus.Debug("OpenMonitorChannel") + s.stop = make(chan struct{}) + return s.stop + } + return nil +} + +// CloseMonitorChannel closes any existing monitor channel. +func (s *Health) CloseMonitorChannel() { + s.mu.Lock() + defer s.mu.Unlock() + + if s.stop != nil { + logrus.Debug("CloseMonitorChannel: waiting for probe to stop") + close(s.stop) + s.stop = nil + // unhealthy when the monitor has stopped for compatibility reasons + s.Health.Status = types.Unhealthy + logrus.Debug("CloseMonitorChannel done") + } +} diff --git a/vendor/github.com/docker/docker/container/history.go b/vendor/github.com/docker/docker/container/history.go new file mode 100644 index 0000000000..7117d9a437 --- /dev/null +++ b/vendor/github.com/docker/docker/container/history.go @@ -0,0 +1,30 @@ +package container // import "github.com/docker/docker/container" + +import "sort" + +// History is a convenience type for storing a list of containers, +// sorted by creation date in descendant order. +type History []*Container + +// Len returns the number of containers in the history. +func (history *History) Len() int { + return len(*history) +} + +// Less compares two containers and returns true if the second one +// was created before the first one. +func (history *History) Less(i, j int) bool { + containers := *history + return containers[j].Created.Before(containers[i].Created) +} + +// Swap switches containers i and j positions in the history. +func (history *History) Swap(i, j int) { + containers := *history + containers[i], containers[j] = containers[j], containers[i] +} + +// sort orders the history by creation date in descendant order. +func (history *History) sort() { + sort.Sort(history) +} diff --git a/vendor/github.com/docker/docker/container/memory_store.go b/vendor/github.com/docker/docker/container/memory_store.go new file mode 100644 index 0000000000..ad4c9e20f6 --- /dev/null +++ b/vendor/github.com/docker/docker/container/memory_store.go @@ -0,0 +1,95 @@ +package container // import "github.com/docker/docker/container" + +import ( + "sync" +) + +// memoryStore implements a Store in memory. +type memoryStore struct { + s map[string]*Container + sync.RWMutex +} + +// NewMemoryStore initializes a new memory store. +func NewMemoryStore() Store { + return &memoryStore{ + s: make(map[string]*Container), + } +} + +// Add appends a new container to the memory store. +// It overrides the id if it existed before. +func (c *memoryStore) Add(id string, cont *Container) { + c.Lock() + c.s[id] = cont + c.Unlock() +} + +// Get returns a container from the store by id. +func (c *memoryStore) Get(id string) *Container { + var res *Container + c.RLock() + res = c.s[id] + c.RUnlock() + return res +} + +// Delete removes a container from the store by id. +func (c *memoryStore) Delete(id string) { + c.Lock() + delete(c.s, id) + c.Unlock() +} + +// List returns a sorted list of containers from the store. +// The containers are ordered by creation date. +func (c *memoryStore) List() []*Container { + containers := History(c.all()) + containers.sort() + return containers +} + +// Size returns the number of containers in the store. +func (c *memoryStore) Size() int { + c.RLock() + defer c.RUnlock() + return len(c.s) +} + +// First returns the first container found in the store by a given filter. +func (c *memoryStore) First(filter StoreFilter) *Container { + for _, cont := range c.all() { + if filter(cont) { + return cont + } + } + return nil +} + +// ApplyAll calls the reducer function with every container in the store. +// This operation is asynchronous in the memory store. +// NOTE: Modifications to the store MUST NOT be done by the StoreReducer. +func (c *memoryStore) ApplyAll(apply StoreReducer) { + wg := new(sync.WaitGroup) + for _, cont := range c.all() { + wg.Add(1) + go func(container *Container) { + apply(container) + wg.Done() + }(cont) + } + + wg.Wait() +} + +func (c *memoryStore) all() []*Container { + c.RLock() + containers := make([]*Container, 0, len(c.s)) + for _, cont := range c.s { + containers = append(containers, cont) + } + c.RUnlock() + return containers +} + +var _ Store = &memoryStore{} diff --git a/vendor/github.com/docker/docker/container/monitor.go b/vendor/github.com/docker/docker/container/monitor.go new file mode 100644 index 0000000000..1735e3487e --- /dev/null +++ b/vendor/github.com/docker/docker/container/monitor.go @@ -0,0 +1,46 @@ +package container // import "github.com/docker/docker/container" + +import ( + "time" + + "github.com/sirupsen/logrus" +) + +const ( + loggerCloseTimeout = 10 * time.Second +) + +// Reset puts a container into a state where it can be restarted again. +func (container *Container) Reset(lock bool) { + if lock { + container.Lock() + defer container.Unlock() + } + + if err := container.CloseStreams(); err != nil { + logrus.Errorf("%s: %s", container.ID, err) + } + + // Re-create a brand new stdin pipe once the container exited + if container.Config.OpenStdin { + container.StreamConfig.NewInputPipes() + } + + if container.LogDriver != nil { + if container.LogCopier != nil { + exit := make(chan struct{}) + go func() { + container.LogCopier.Wait() + close(exit) + }() + select { + case <-time.After(loggerCloseTimeout): + logrus.Warn("Logger didn't exit in time: logs may be truncated") + case <-exit: + } + } + container.LogDriver.Close() + container.LogCopier = nil + container.LogDriver = nil + } +} diff --git a/vendor/github.com/docker/docker/container/mounts_unix.go b/vendor/github.com/docker/docker/container/mounts_unix.go new file mode 100644 index 0000000000..62f4441dce --- /dev/null +++ b/vendor/github.com/docker/docker/container/mounts_unix.go @@ -0,0 +1,12 @@ +// +build !windows + +package container // import "github.com/docker/docker/container" + +// Mount contains information for a mount operation. +type Mount struct { + Source string `json:"source"` + Destination string `json:"destination"` + Writable bool `json:"writable"` + Data string `json:"data"` + Propagation string `json:"mountpropagation"` +} diff --git a/vendor/github.com/docker/docker/container/mounts_windows.go b/vendor/github.com/docker/docker/container/mounts_windows.go new file mode 100644 index 0000000000..8f27e88067 --- /dev/null +++ b/vendor/github.com/docker/docker/container/mounts_windows.go @@ -0,0 +1,8 @@ +package container // import "github.com/docker/docker/container" + +// Mount contains information for a mount operation. +type Mount struct { + Source string `json:"source"` + Destination string `json:"destination"` + Writable bool `json:"writable"` +} diff --git a/vendor/github.com/docker/docker/container/state.go b/vendor/github.com/docker/docker/container/state.go new file mode 100644 index 0000000000..7c2a1ec81c --- /dev/null +++ b/vendor/github.com/docker/docker/container/state.go @@ -0,0 +1,409 @@ +package container // import "github.com/docker/docker/container" + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/go-units" +) + +// State holds the current container state, and has methods to get and +// set the state. Container has an embed, which allows all of the +// functions defined against State to run against Container. +type State struct { + sync.Mutex + // Note that `Running` and `Paused` are not mutually exclusive: + // When pausing a container (on Linux), the cgroups freezer is used to suspend + // all processes in the container. Freezing the process requires the process to + // be running. As a result, paused containers are both `Running` _and_ `Paused`. + Running bool + Paused bool + Restarting bool + OOMKilled bool + RemovalInProgress bool // Not need for this to be persistent on disk. + Dead bool + Pid int + ExitCodeValue int `json:"ExitCode"` + ErrorMsg string `json:"Error"` // contains last known error during container start, stop, or remove + StartedAt time.Time + FinishedAt time.Time + Health *Health + + waitStop chan struct{} + waitRemove chan struct{} +} + +// StateStatus is used to return container wait results. +// Implements exec.ExitCode interface. +// This type is needed as State include a sync.Mutex field which make +// copying it unsafe. +type StateStatus struct { + exitCode int + err error +} + +// ExitCode returns current exitcode for the state. +func (s StateStatus) ExitCode() int { + return s.exitCode +} + +// Err returns current error for the state. Returns nil if the container had +// exited on its own. +func (s StateStatus) Err() error { + return s.err +} + +// NewState creates a default state object with a fresh channel for state changes. +func NewState() *State { + return &State{ + waitStop: make(chan struct{}), + waitRemove: make(chan struct{}), + } +} + +// String returns a human-readable description of the state +func (s *State) String() string { + if s.Running { + if s.Paused { + return fmt.Sprintf("Up %s (Paused)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + } + if s.Restarting { + return fmt.Sprintf("Restarting (%d) %s ago", s.ExitCodeValue, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) + } + + if h := s.Health; h != nil { + return fmt.Sprintf("Up %s (%s)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt)), h.String()) + } + + return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) + } + + if s.RemovalInProgress { + return "Removal In Progress" + } + + if s.Dead { + return "Dead" + } + + if s.StartedAt.IsZero() { + return "Created" + } + + if s.FinishedAt.IsZero() { + return "" + } + + return fmt.Sprintf("Exited (%d) %s ago", s.ExitCodeValue, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) +} + +// IsValidHealthString checks if the provided string is a valid container health status or not. +func IsValidHealthString(s string) bool { + return s == types.Starting || + s == types.Healthy || + s == types.Unhealthy || + s == types.NoHealthcheck +} + +// StateString returns a single string to describe state +func (s *State) StateString() string { + if s.Running { + if s.Paused { + return "paused" + } + if s.Restarting { + return "restarting" + } + return "running" + } + + if s.RemovalInProgress { + return "removing" + } + + if s.Dead { + return "dead" + } + + if s.StartedAt.IsZero() { + return "created" + } + + return "exited" +} + +// IsValidStateString checks if the provided string is a valid container state or not. +func IsValidStateString(s string) bool { + if s != "paused" && + s != "restarting" && + s != "removing" && + s != "running" && + s != "dead" && + s != "created" && + s != "exited" { + return false + } + return true +} + +// WaitCondition is an enum type for different states to wait for. +type WaitCondition int + +// Possible WaitCondition Values. +// +// WaitConditionNotRunning (default) is used to wait for any of the non-running +// states: "created", "exited", "dead", "removing", or "removed". +// +// WaitConditionNextExit is used to wait for the next time the state changes +// to a non-running state. If the state is currently "created" or "exited", +// this would cause Wait() to block until either the container runs and exits +// or is removed. +// +// WaitConditionRemoved is used to wait for the container to be removed. +const ( + WaitConditionNotRunning WaitCondition = iota + WaitConditionNextExit + WaitConditionRemoved +) + +// Wait waits until the container is in a certain state indicated by the given +// condition. A context must be used for cancelling the request, controlling +// timeouts, and avoiding goroutine leaks. Wait must be called without holding +// the state lock. Returns a channel from which the caller will receive the +// result. If the container exited on its own, the result's Err() method will +// be nil and its ExitCode() method will return the container's exit code, +// otherwise, the results Err() method will return an error indicating why the +// wait operation failed. +func (s *State) Wait(ctx context.Context, condition WaitCondition) <-chan StateStatus { + s.Lock() + defer s.Unlock() + + if condition == WaitConditionNotRunning && !s.Running { + // Buffer so we can put it in the channel now. + resultC := make(chan StateStatus, 1) + + // Send the current status. + resultC <- StateStatus{ + exitCode: s.ExitCode(), + err: s.Err(), + } + + return resultC + } + + // If we are waiting only for removal, the waitStop channel should + // remain nil and block forever. + var waitStop chan struct{} + if condition < WaitConditionRemoved { + waitStop = s.waitStop + } + + // Always wait for removal, just in case the container gets removed + // while it is still in a "created" state, in which case it is never + // actually stopped. + waitRemove := s.waitRemove + + resultC := make(chan StateStatus) + + go func() { + select { + case <-ctx.Done(): + // Context timeout or cancellation. + resultC <- StateStatus{ + exitCode: -1, + err: ctx.Err(), + } + return + case <-waitStop: + case <-waitRemove: + } + + s.Lock() + result := StateStatus{ + exitCode: s.ExitCode(), + err: s.Err(), + } + s.Unlock() + + resultC <- result + }() + + return resultC +} + +// IsRunning returns whether the running flag is set. Used by Container to check whether a container is running. +func (s *State) IsRunning() bool { + s.Lock() + res := s.Running + s.Unlock() + return res +} + +// GetPID holds the process id of a container. +func (s *State) GetPID() int { + s.Lock() + res := s.Pid + s.Unlock() + return res +} + +// ExitCode returns current exitcode for the state. Take lock before if state +// may be shared. +func (s *State) ExitCode() int { + return s.ExitCodeValue +} + +// SetExitCode sets current exitcode for the state. Take lock before if state +// may be shared. +func (s *State) SetExitCode(ec int) { + s.ExitCodeValue = ec +} + +// SetRunning sets the state of the container to "running". +func (s *State) SetRunning(pid int, initial bool) { + s.ErrorMsg = "" + s.Paused = false + s.Running = true + s.Restarting = false + if initial { + s.Paused = false + } + s.ExitCodeValue = 0 + s.Pid = pid + if initial { + s.StartedAt = time.Now().UTC() + } +} + +// SetStopped sets the container state to "stopped" without locking. +func (s *State) SetStopped(exitStatus *ExitStatus) { + s.Running = false + s.Paused = false + s.Restarting = false + s.Pid = 0 + if exitStatus.ExitedAt.IsZero() { + s.FinishedAt = time.Now().UTC() + } else { + s.FinishedAt = exitStatus.ExitedAt + } + s.ExitCodeValue = exitStatus.ExitCode + s.OOMKilled = exitStatus.OOMKilled + close(s.waitStop) // fire waiters for stop + s.waitStop = make(chan struct{}) +} + +// SetRestarting sets the container state to "restarting" without locking. +// It also sets the container PID to 0. +func (s *State) SetRestarting(exitStatus *ExitStatus) { + // we should consider the container running when it is restarting because of + // all the checks in docker around rm/stop/etc + s.Running = true + s.Restarting = true + s.Paused = false + s.Pid = 0 + s.FinishedAt = time.Now().UTC() + s.ExitCodeValue = exitStatus.ExitCode + s.OOMKilled = exitStatus.OOMKilled + close(s.waitStop) // fire waiters for stop + s.waitStop = make(chan struct{}) +} + +// SetError sets the container's error state. This is useful when we want to +// know the error that occurred when container transits to another state +// when inspecting it +func (s *State) SetError(err error) { + s.ErrorMsg = "" + if err != nil { + s.ErrorMsg = err.Error() + } +} + +// IsPaused returns whether the container is paused or not. +func (s *State) IsPaused() bool { + s.Lock() + res := s.Paused + s.Unlock() + return res +} + +// IsRestarting returns whether the container is restarting or not. +func (s *State) IsRestarting() bool { + s.Lock() + res := s.Restarting + s.Unlock() + return res +} + +// SetRemovalInProgress sets the container state as being removed. +// It returns true if the container was already in that state. +func (s *State) SetRemovalInProgress() bool { + s.Lock() + defer s.Unlock() + if s.RemovalInProgress { + return true + } + s.RemovalInProgress = true + return false +} + +// ResetRemovalInProgress makes the RemovalInProgress state to false. +func (s *State) ResetRemovalInProgress() { + s.Lock() + s.RemovalInProgress = false + s.Unlock() +} + +// IsRemovalInProgress returns whether the RemovalInProgress flag is set. +// Used by Container to check whether a container is being removed. +func (s *State) IsRemovalInProgress() bool { + s.Lock() + res := s.RemovalInProgress + s.Unlock() + return res +} + +// SetDead sets the container state to "dead" +func (s *State) SetDead() { + s.Lock() + s.Dead = true + s.Unlock() +} + +// IsDead returns whether the Dead flag is set. Used by Container to check whether a container is dead. +func (s *State) IsDead() bool { + s.Lock() + res := s.Dead + s.Unlock() + return res +} + +// SetRemoved assumes this container is already in the "dead" state and +// closes the internal waitRemove channel to unblock callers waiting for a +// container to be removed. +func (s *State) SetRemoved() { + s.SetRemovalError(nil) +} + +// SetRemovalError is to be called in case a container remove failed. +// It sets an error and closes the internal waitRemove channel to unblock +// callers waiting for the container to be removed. +func (s *State) SetRemovalError(err error) { + s.SetError(err) + s.Lock() + close(s.waitRemove) // Unblock those waiting on remove. + // Recreate the channel so next ContainerWait will work + s.waitRemove = make(chan struct{}) + s.Unlock() +} + +// Err returns an error if there is one. +func (s *State) Err() error { + if s.ErrorMsg != "" { + return errors.New(s.ErrorMsg) + } + return nil +} diff --git a/vendor/github.com/docker/docker/container/store.go b/vendor/github.com/docker/docker/container/store.go new file mode 100644 index 0000000000..3af0389856 --- /dev/null +++ b/vendor/github.com/docker/docker/container/store.go @@ -0,0 +1,28 @@ +package container // import "github.com/docker/docker/container" + +// StoreFilter defines a function to filter +// container in the store. +type StoreFilter func(*Container) bool + +// StoreReducer defines a function to +// manipulate containers in the store +type StoreReducer func(*Container) + +// Store defines an interface that +// any container store must implement. +type Store interface { + // Add appends a new container to the store. + Add(string, *Container) + // Get returns a container from the store by the identifier it was stored with. + Get(string) *Container + // Delete removes a container from the store by the identifier it was stored with. + Delete(string) + // List returns a list of containers from the store. + List() []*Container + // Size returns the number of containers in the store. + Size() int + // First returns the first container found in the store by a given filter. + First(StoreFilter) *Container + // ApplyAll calls the reducer function with every container in the store. + ApplyAll(StoreReducer) +} diff --git a/vendor/github.com/docker/docker/container/stream/attach.go b/vendor/github.com/docker/docker/container/stream/attach.go new file mode 100644 index 0000000000..cee90b88a1 --- /dev/null +++ b/vendor/github.com/docker/docker/container/stream/attach.go @@ -0,0 +1,184 @@ +package stream // import "github.com/docker/docker/container/stream" + +import ( + "context" + "io" + "sync" + + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/term" + "github.com/sirupsen/logrus" +) + +var defaultEscapeSequence = []byte{16, 17} // ctrl-p, ctrl-q + +// AttachConfig is the config struct used to attach a client to a stream's stdio +type AttachConfig struct { + // Tells the attach copier that the stream's stdin is a TTY and to look for + // escape sequences in stdin to detach from the stream. + // When true the escape sequence is not passed to the underlying stream + TTY bool + // Specifies the detach keys the client will be using + // Only useful when `TTY` is true + DetachKeys []byte + + // CloseStdin signals that once done, stdin for the attached stream should be closed + // For example, this would close the attached container's stdin. + CloseStdin bool + + // UseStd* indicate whether the client has requested to be connected to the + // given stream or not. These flags are used instead of checking Std* != nil + // at points before the client streams Std* are wired up. + UseStdin, UseStdout, UseStderr bool + + // CStd* are the streams directly connected to the container + CStdin io.WriteCloser + CStdout, CStderr io.ReadCloser + + // Provide client streams to wire up to + Stdin io.ReadCloser + Stdout, Stderr io.Writer +} + +// AttachStreams attaches the container's streams to the AttachConfig +func (c *Config) AttachStreams(cfg *AttachConfig) { + if cfg.UseStdin { + cfg.CStdin = c.StdinPipe() + } + + if cfg.UseStdout { + cfg.CStdout = c.StdoutPipe() + } + + if cfg.UseStderr { + cfg.CStderr = c.StderrPipe() + } +} + +// CopyStreams starts goroutines to copy data in and out to/from the container +func (c *Config) CopyStreams(ctx context.Context, cfg *AttachConfig) <-chan error { + var ( + wg sync.WaitGroup + errors = make(chan error, 3) + ) + + if cfg.Stdin != nil { + wg.Add(1) + } + + if cfg.Stdout != nil { + wg.Add(1) + } + + if cfg.Stderr != nil { + wg.Add(1) + } + + // Connect stdin of container to the attach stdin stream. + go func() { + if cfg.Stdin == nil { + return + } + logrus.Debug("attach: stdin: begin") + + var err error + if cfg.TTY { + _, err = copyEscapable(cfg.CStdin, cfg.Stdin, cfg.DetachKeys) + } else { + _, err = pools.Copy(cfg.CStdin, cfg.Stdin) + } + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + logrus.Errorf("attach: stdin: %s", err) + errors <- err + } + if cfg.CloseStdin && !cfg.TTY { + cfg.CStdin.Close() + } else { + // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr + if cfg.CStdout != nil { + cfg.CStdout.Close() + } + if cfg.CStderr != nil { + cfg.CStderr.Close() + } + } + logrus.Debug("attach: stdin: end") + wg.Done() + }() + + attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) { + if stream == nil { + return + } + + logrus.Debugf("attach: %s: begin", name) + _, err := pools.Copy(stream, streamPipe) + if err == io.ErrClosedPipe { + err = nil + } + if err != nil { + logrus.Errorf("attach: %s: %v", name, err) + errors <- err + } + // Make sure stdin gets closed + if cfg.Stdin != nil { + cfg.Stdin.Close() + } + streamPipe.Close() + logrus.Debugf("attach: %s: end", name) + wg.Done() + } + + go attachStream("stdout", cfg.Stdout, cfg.CStdout) + go attachStream("stderr", cfg.Stderr, cfg.CStderr) + + errs := make(chan error, 1) + + go func() { + defer close(errs) + errs <- func() error { + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + select { + case <-done: + case <-ctx.Done(): + // close all pipes + if cfg.CStdin != nil { + cfg.CStdin.Close() + } + if cfg.CStdout != nil { + cfg.CStdout.Close() + } + if cfg.CStderr != nil { + cfg.CStderr.Close() + } + <-done + } + close(errors) + for err := range errors { + if err != nil { + return err + } + } + return nil + }() + }() + + return errs +} + +func copyEscapable(dst io.Writer, src io.ReadCloser, keys []byte) (written int64, err error) { + if len(keys) == 0 { + keys = defaultEscapeSequence + } + pr := term.NewEscapeProxy(src, keys) + defer src.Close() + + return pools.Copy(dst, pr) +} diff --git a/vendor/github.com/docker/docker/container/stream/streams.go b/vendor/github.com/docker/docker/container/stream/streams.go new file mode 100644 index 0000000000..d81867c1da --- /dev/null +++ b/vendor/github.com/docker/docker/container/stream/streams.go @@ -0,0 +1,146 @@ +package stream // import "github.com/docker/docker/container/stream" + +import ( + "fmt" + "io" + "io/ioutil" + "strings" + "sync" + + "github.com/containerd/containerd/cio" + "github.com/docker/docker/pkg/broadcaster" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/pools" + "github.com/sirupsen/logrus" +) + +// Config holds information about I/O streams managed together. +// +// config.StdinPipe returns a WriteCloser which can be used to feed data +// to the standard input of the streamConfig's active process. +// config.StdoutPipe and streamConfig.StderrPipe each return a ReadCloser +// which can be used to retrieve the standard output (and error) generated +// by the container's active process. The output (and error) are actually +// copied and delivered to all StdoutPipe and StderrPipe consumers, using +// a kind of "broadcaster". +type Config struct { + sync.WaitGroup + stdout *broadcaster.Unbuffered + stderr *broadcaster.Unbuffered + stdin io.ReadCloser + stdinPipe io.WriteCloser +} + +// NewConfig creates a stream config and initializes +// the standard err and standard out to new unbuffered broadcasters. +func NewConfig() *Config { + return &Config{ + stderr: new(broadcaster.Unbuffered), + stdout: new(broadcaster.Unbuffered), + } +} + +// Stdout returns the standard output in the configuration. +func (c *Config) Stdout() *broadcaster.Unbuffered { + return c.stdout +} + +// Stderr returns the standard error in the configuration. +func (c *Config) Stderr() *broadcaster.Unbuffered { + return c.stderr +} + +// Stdin returns the standard input in the configuration. +func (c *Config) Stdin() io.ReadCloser { + return c.stdin +} + +// StdinPipe returns an input writer pipe as an io.WriteCloser. +func (c *Config) StdinPipe() io.WriteCloser { + return c.stdinPipe +} + +// StdoutPipe creates a new io.ReadCloser with an empty bytes pipe. +// It adds this new out pipe to the Stdout broadcaster. +// This will block stdout if unconsumed. +func (c *Config) StdoutPipe() io.ReadCloser { + bytesPipe := ioutils.NewBytesPipe() + c.stdout.Add(bytesPipe) + return bytesPipe +} + +// StderrPipe creates a new io.ReadCloser with an empty bytes pipe. +// It adds this new err pipe to the Stderr broadcaster. +// This will block stderr if unconsumed. +func (c *Config) StderrPipe() io.ReadCloser { + bytesPipe := ioutils.NewBytesPipe() + c.stderr.Add(bytesPipe) + return bytesPipe +} + +// NewInputPipes creates new pipes for both standard inputs, Stdin and StdinPipe. +func (c *Config) NewInputPipes() { + c.stdin, c.stdinPipe = io.Pipe() +} + +// NewNopInputPipe creates a new input pipe that will silently drop all messages in the input. +func (c *Config) NewNopInputPipe() { + c.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) +} + +// CloseStreams ensures that the configured streams are properly closed. +func (c *Config) CloseStreams() error { + var errors []string + + if c.stdin != nil { + if err := c.stdin.Close(); err != nil { + errors = append(errors, fmt.Sprintf("error close stdin: %s", err)) + } + } + + if err := c.stdout.Clean(); err != nil { + errors = append(errors, fmt.Sprintf("error close stdout: %s", err)) + } + + if err := c.stderr.Clean(); err != nil { + errors = append(errors, fmt.Sprintf("error close stderr: %s", err)) + } + + if len(errors) > 0 { + return fmt.Errorf(strings.Join(errors, "\n")) + } + + return nil +} + +// CopyToPipe connects streamconfig with a libcontainerd.IOPipe +func (c *Config) CopyToPipe(iop *cio.DirectIO) { + copyFunc := func(w io.Writer, r io.ReadCloser) { + c.Add(1) + go func() { + if _, err := pools.Copy(w, r); err != nil { + logrus.Errorf("stream copy error: %v", err) + } + r.Close() + c.Done() + }() + } + + if iop.Stdout != nil { + copyFunc(c.Stdout(), iop.Stdout) + } + if iop.Stderr != nil { + copyFunc(c.Stderr(), iop.Stderr) + } + + if stdin := c.Stdin(); stdin != nil { + if iop.Stdin != nil { + go func() { + pools.Copy(iop.Stdin, stdin) + if err := iop.Stdin.Close(); err != nil { + logrus.Warnf("failed to close stdin: %v", err) + } + }() + } + } +} diff --git a/vendor/github.com/docker/docker/container/view.go b/vendor/github.com/docker/docker/container/view.go new file mode 100644 index 0000000000..baf6fe7195 --- /dev/null +++ b/vendor/github.com/docker/docker/container/view.go @@ -0,0 +1,494 @@ +package container // import "github.com/docker/docker/container" + +import ( + "errors" + "fmt" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + "github.com/docker/go-connections/nat" + "github.com/hashicorp/go-memdb" + "github.com/sirupsen/logrus" +) + +const ( + memdbContainersTable = "containers" + memdbNamesTable = "names" + memdbIDIndex = "id" + memdbContainerIDIndex = "containerid" +) + +var ( + // ErrNameReserved is an error which is returned when a name is requested to be reserved that already is reserved + ErrNameReserved = errors.New("name is reserved") + // ErrNameNotReserved is an error which is returned when trying to find a name that is not reserved + ErrNameNotReserved = errors.New("name is not reserved") +) + +// Snapshot is a read only view for Containers. It holds all information necessary to serve container queries in a +// versioned ACID in-memory store. +type Snapshot struct { + types.Container + + // additional info queries need to filter on + // preserve nanosec resolution for queries + CreatedAt time.Time + StartedAt time.Time + Name string + Pid int + ExitCode int + Running bool + Paused bool + Managed bool + ExposedPorts nat.PortSet + PortBindings nat.PortSet + Health string + HostConfig struct { + Isolation string + } +} + +// nameAssociation associates a container id with a name. +type nameAssociation struct { + // name is the name to associate. Note that name is the primary key + // ("id" in memdb). + name string + containerID string +} + +// ViewDB provides an in-memory transactional (ACID) container Store +type ViewDB interface { + Snapshot() View + Save(*Container) error + Delete(*Container) error + + ReserveName(name, containerID string) error + ReleaseName(name string) error +} + +// View can be used by readers to avoid locking +type View interface { + All() ([]Snapshot, error) + Get(id string) (*Snapshot, error) + + GetID(name string) (string, error) + GetAllNames() map[string][]string +} + +var schema = &memdb.DBSchema{ + Tables: map[string]*memdb.TableSchema{ + memdbContainersTable: { + Name: memdbContainersTable, + Indexes: map[string]*memdb.IndexSchema{ + memdbIDIndex: { + Name: memdbIDIndex, + Unique: true, + Indexer: &containerByIDIndexer{}, + }, + }, + }, + memdbNamesTable: { + Name: memdbNamesTable, + Indexes: map[string]*memdb.IndexSchema{ + // Used for names, because "id" is the primary key in memdb. + memdbIDIndex: { + Name: memdbIDIndex, + Unique: true, + Indexer: &namesByNameIndexer{}, + }, + memdbContainerIDIndex: { + Name: memdbContainerIDIndex, + Indexer: &namesByContainerIDIndexer{}, + }, + }, + }, + }, +} + +type memDB struct { + store *memdb.MemDB +} + +// NoSuchContainerError indicates that the container wasn't found in the +// database. +type NoSuchContainerError struct { + id string +} + +// Error satisfies the error interface. +func (e NoSuchContainerError) Error() string { + return "no such container " + e.id +} + +// NewViewDB provides the default implementation, with the default schema +func NewViewDB() (ViewDB, error) { + store, err := memdb.NewMemDB(schema) + if err != nil { + return nil, err + } + return &memDB{store: store}, nil +} + +// Snapshot provides a consistent read-only View of the database +func (db *memDB) Snapshot() View { + return &memdbView{ + txn: db.store.Txn(false), + } +} + +func (db *memDB) withTxn(cb func(*memdb.Txn) error) error { + txn := db.store.Txn(true) + err := cb(txn) + if err != nil { + txn.Abort() + return err + } + txn.Commit() + return nil +} + +// Save atomically updates the in-memory store state for a Container. +// Only read only (deep) copies of containers may be passed in. +func (db *memDB) Save(c *Container) error { + return db.withTxn(func(txn *memdb.Txn) error { + return txn.Insert(memdbContainersTable, c) + }) +} + +// Delete removes an item by ID +func (db *memDB) Delete(c *Container) error { + return db.withTxn(func(txn *memdb.Txn) error { + view := &memdbView{txn: txn} + names := view.getNames(c.ID) + + for _, name := range names { + txn.Delete(memdbNamesTable, nameAssociation{name: name}) + } + + // Ignore error - the container may not actually exist in the + // db, but we still need to clean up associated names. + txn.Delete(memdbContainersTable, NewBaseContainer(c.ID, c.Root)) + return nil + }) +} + +// ReserveName registers a container ID to a name +// ReserveName is idempotent +// Attempting to reserve a container ID to a name that already exists results in an `ErrNameReserved` +// A name reservation is globally unique +func (db *memDB) ReserveName(name, containerID string) error { + return db.withTxn(func(txn *memdb.Txn) error { + s, err := txn.First(memdbNamesTable, memdbIDIndex, name) + if err != nil { + return err + } + if s != nil { + if s.(nameAssociation).containerID != containerID { + return ErrNameReserved + } + return nil + } + return txn.Insert(memdbNamesTable, nameAssociation{name: name, containerID: containerID}) + }) +} + +// ReleaseName releases the reserved name +// Once released, a name can be reserved again +func (db *memDB) ReleaseName(name string) error { + return db.withTxn(func(txn *memdb.Txn) error { + return txn.Delete(memdbNamesTable, nameAssociation{name: name}) + }) +} + +type memdbView struct { + txn *memdb.Txn +} + +// All returns a all items in this snapshot. Returned objects must never be modified. +func (v *memdbView) All() ([]Snapshot, error) { + var all []Snapshot + iter, err := v.txn.Get(memdbContainersTable, memdbIDIndex) + if err != nil { + return nil, err + } + for { + item := iter.Next() + if item == nil { + break + } + snapshot := v.transform(item.(*Container)) + all = append(all, *snapshot) + } + return all, nil +} + +// Get returns an item by id. Returned objects must never be modified. +func (v *memdbView) Get(id string) (*Snapshot, error) { + s, err := v.txn.First(memdbContainersTable, memdbIDIndex, id) + if err != nil { + return nil, err + } + if s == nil { + return nil, NoSuchContainerError{id: id} + } + return v.transform(s.(*Container)), nil +} + +// getNames lists all the reserved names for the given container ID. +func (v *memdbView) getNames(containerID string) []string { + iter, err := v.txn.Get(memdbNamesTable, memdbContainerIDIndex, containerID) + if err != nil { + return nil + } + + var names []string + for { + item := iter.Next() + if item == nil { + break + } + names = append(names, item.(nameAssociation).name) + } + + return names +} + +// GetID returns the container ID that the passed in name is reserved to. +func (v *memdbView) GetID(name string) (string, error) { + s, err := v.txn.First(memdbNamesTable, memdbIDIndex, name) + if err != nil { + return "", err + } + if s == nil { + return "", ErrNameNotReserved + } + return s.(nameAssociation).containerID, nil +} + +// GetAllNames returns all registered names. +func (v *memdbView) GetAllNames() map[string][]string { + iter, err := v.txn.Get(memdbNamesTable, memdbContainerIDIndex) + if err != nil { + return nil + } + + out := make(map[string][]string) + for { + item := iter.Next() + if item == nil { + break + } + assoc := item.(nameAssociation) + out[assoc.containerID] = append(out[assoc.containerID], assoc.name) + } + + return out +} + +// transform maps a (deep) copied Container object to what queries need. +// A lock on the Container is not held because these are immutable deep copies. +func (v *memdbView) transform(container *Container) *Snapshot { + health := types.NoHealthcheck + if container.Health != nil { + health = container.Health.Status() + } + snapshot := &Snapshot{ + Container: types.Container{ + ID: container.ID, + Names: v.getNames(container.ID), + ImageID: container.ImageID.String(), + Ports: []types.Port{}, + Mounts: container.GetMountPoints(), + State: container.State.StateString(), + Status: container.State.String(), + Created: container.Created.Unix(), + }, + CreatedAt: container.Created, + StartedAt: container.StartedAt, + Name: container.Name, + Pid: container.Pid, + Managed: container.Managed, + ExposedPorts: make(nat.PortSet), + PortBindings: make(nat.PortSet), + Health: health, + Running: container.Running, + Paused: container.Paused, + ExitCode: container.ExitCode(), + } + + if snapshot.Names == nil { + // Dead containers will often have no name, so make sure the response isn't null + snapshot.Names = []string{} + } + + if container.HostConfig != nil { + snapshot.Container.HostConfig.NetworkMode = string(container.HostConfig.NetworkMode) + snapshot.HostConfig.Isolation = string(container.HostConfig.Isolation) + for binding := range container.HostConfig.PortBindings { + snapshot.PortBindings[binding] = struct{}{} + } + } + + if container.Config != nil { + snapshot.Image = container.Config.Image + snapshot.Labels = container.Config.Labels + for exposed := range container.Config.ExposedPorts { + snapshot.ExposedPorts[exposed] = struct{}{} + } + } + + if len(container.Args) > 0 { + args := []string{} + for _, arg := range container.Args { + if strings.Contains(arg, " ") { + args = append(args, fmt.Sprintf("'%s'", arg)) + } else { + args = append(args, arg) + } + } + argsAsString := strings.Join(args, " ") + snapshot.Command = fmt.Sprintf("%s %s", container.Path, argsAsString) + } else { + snapshot.Command = container.Path + } + + snapshot.Ports = []types.Port{} + networks := make(map[string]*network.EndpointSettings) + if container.NetworkSettings != nil { + for name, netw := range container.NetworkSettings.Networks { + if netw == nil || netw.EndpointSettings == nil { + continue + } + networks[name] = &network.EndpointSettings{ + EndpointID: netw.EndpointID, + Gateway: netw.Gateway, + IPAddress: netw.IPAddress, + IPPrefixLen: netw.IPPrefixLen, + IPv6Gateway: netw.IPv6Gateway, + GlobalIPv6Address: netw.GlobalIPv6Address, + GlobalIPv6PrefixLen: netw.GlobalIPv6PrefixLen, + MacAddress: netw.MacAddress, + NetworkID: netw.NetworkID, + } + if netw.IPAMConfig != nil { + networks[name].IPAMConfig = &network.EndpointIPAMConfig{ + IPv4Address: netw.IPAMConfig.IPv4Address, + IPv6Address: netw.IPAMConfig.IPv6Address, + } + } + } + for port, bindings := range container.NetworkSettings.Ports { + p, err := nat.ParsePort(port.Port()) + if err != nil { + logrus.Warnf("invalid port map %+v", err) + continue + } + if len(bindings) == 0 { + snapshot.Ports = append(snapshot.Ports, types.Port{ + PrivatePort: uint16(p), + Type: port.Proto(), + }) + continue + } + for _, binding := range bindings { + h, err := nat.ParsePort(binding.HostPort) + if err != nil { + logrus.Warnf("invalid host port map %+v", err) + continue + } + snapshot.Ports = append(snapshot.Ports, types.Port{ + PrivatePort: uint16(p), + PublicPort: uint16(h), + Type: port.Proto(), + IP: binding.HostIP, + }) + } + } + } + snapshot.NetworkSettings = &types.SummaryNetworkSettings{Networks: networks} + + return snapshot +} + +// containerByIDIndexer is used to extract the ID field from Container types. +// memdb.StringFieldIndex can not be used since ID is a field from an embedded struct. +type containerByIDIndexer struct{} + +// FromObject implements the memdb.SingleIndexer interface for Container objects +func (e *containerByIDIndexer) FromObject(obj interface{}) (bool, []byte, error) { + c, ok := obj.(*Container) + if !ok { + return false, nil, fmt.Errorf("%T is not a Container", obj) + } + // Add the null character as a terminator + v := c.ID + "\x00" + return true, []byte(v), nil +} + +// FromArgs implements the memdb.Indexer interface +func (e *containerByIDIndexer) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + arg, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + // Add the null character as a terminator + arg += "\x00" + return []byte(arg), nil +} + +// namesByNameIndexer is used to index container name associations by name. +type namesByNameIndexer struct{} + +func (e *namesByNameIndexer) FromObject(obj interface{}) (bool, []byte, error) { + n, ok := obj.(nameAssociation) + if !ok { + return false, nil, fmt.Errorf(`%T does not have type "nameAssociation"`, obj) + } + + // Add the null character as a terminator + return true, []byte(n.name + "\x00"), nil +} + +func (e *namesByNameIndexer) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + arg, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + // Add the null character as a terminator + arg += "\x00" + return []byte(arg), nil +} + +// namesByContainerIDIndexer is used to index container names by container ID. +type namesByContainerIDIndexer struct{} + +func (e *namesByContainerIDIndexer) FromObject(obj interface{}) (bool, []byte, error) { + n, ok := obj.(nameAssociation) + if !ok { + return false, nil, fmt.Errorf(`%T does not have type "nameAssocation"`, obj) + } + + // Add the null character as a terminator + return true, []byte(n.containerID + "\x00"), nil +} + +func (e *namesByContainerIDIndexer) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + arg, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + // Add the null character as a terminator + arg += "\x00" + return []byte(arg), nil +} diff --git a/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE b/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE deleted file mode 100644 index d511905c16..0000000000 --- a/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE +++ /dev/null @@ -1,339 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Lesser General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may -be called something other than `show w' and `show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - `Gnomovision' (which makes passes at compilers) written by James Hacker. - - , 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. diff --git a/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE b/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE deleted file mode 100644 index 5b6e7c66c2..0000000000 --- a/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE +++ /dev/null @@ -1,340 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc. - 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Library General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may -be called something other than `show w' and `show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - `Gnomovision' (which makes passes at compilers) written by James Hacker. - - , 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Library General -Public License instead of this License. diff --git a/vendor/github.com/docker/docker/daemon/cluster/provider/network.go b/vendor/github.com/docker/docker/daemon/cluster/provider/network.go new file mode 100644 index 0000000000..533baa0e17 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/cluster/provider/network.go @@ -0,0 +1,37 @@ +package provider // import "github.com/docker/docker/daemon/cluster/provider" + +import "github.com/docker/docker/api/types" + +// NetworkCreateRequest is a request when creating a network. +type NetworkCreateRequest struct { + ID string + types.NetworkCreateRequest +} + +// NetworkCreateResponse is a response when creating a network. +type NetworkCreateResponse struct { + ID string `json:"Id"` +} + +// VirtualAddress represents a virtual address. +type VirtualAddress struct { + IPv4 string + IPv6 string +} + +// PortConfig represents a port configuration. +type PortConfig struct { + Name string + Protocol int32 + TargetPort uint32 + PublishedPort uint32 +} + +// ServiceConfig represents a service configuration. +type ServiceConfig struct { + ID string + Name string + Aliases map[string][]string + VirtualAddresses map[string]*VirtualAddress + ExposedPorts []*PortConfig +} diff --git a/vendor/github.com/docker/docker/daemon/exec/exec.go b/vendor/github.com/docker/docker/daemon/exec/exec.go new file mode 100644 index 0000000000..ca4e1aeb49 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/exec/exec.go @@ -0,0 +1,144 @@ +package exec // import "github.com/docker/docker/daemon/exec" + +import ( + "runtime" + "sync" + + "github.com/containerd/containerd/cio" + "github.com/docker/docker/container/stream" + "github.com/docker/docker/pkg/stringid" + "github.com/sirupsen/logrus" +) + +// Config holds the configurations for execs. The Daemon keeps +// track of both running and finished execs so that they can be +// examined both during and after completion. +type Config struct { + sync.Mutex + StreamConfig *stream.Config + ID string + Running bool + ExitCode *int + OpenStdin bool + OpenStderr bool + OpenStdout bool + CanRemove bool + ContainerID string + DetachKeys []byte + Entrypoint string + Args []string + Tty bool + Privileged bool + User string + WorkingDir string + Env []string + Pid int +} + +// NewConfig initializes the a new exec configuration +func NewConfig() *Config { + return &Config{ + ID: stringid.GenerateNonCryptoID(), + StreamConfig: stream.NewConfig(), + } +} + +type rio struct { + cio.IO + + sc *stream.Config +} + +func (i *rio) Close() error { + i.IO.Close() + + return i.sc.CloseStreams() +} + +func (i *rio) Wait() { + i.sc.Wait() + + i.IO.Wait() +} + +// InitializeStdio is called by libcontainerd to connect the stdio. +func (c *Config) InitializeStdio(iop *cio.DirectIO) (cio.IO, error) { + c.StreamConfig.CopyToPipe(iop) + + if c.StreamConfig.Stdin() == nil && !c.Tty && runtime.GOOS == "windows" { + if iop.Stdin != nil { + if err := iop.Stdin.Close(); err != nil { + logrus.Errorf("error closing exec stdin: %+v", err) + } + } + } + + return &rio{IO: iop, sc: c.StreamConfig}, nil +} + +// CloseStreams closes the stdio streams for the exec +func (c *Config) CloseStreams() error { + return c.StreamConfig.CloseStreams() +} + +// SetExitCode sets the exec config's exit code +func (c *Config) SetExitCode(code int) { + c.ExitCode = &code +} + +// Store keeps track of the exec configurations. +type Store struct { + byID map[string]*Config + sync.RWMutex +} + +// NewStore initializes a new exec store. +func NewStore() *Store { + return &Store{ + byID: make(map[string]*Config), + } +} + +// Commands returns the exec configurations in the store. +func (e *Store) Commands() map[string]*Config { + e.RLock() + byID := make(map[string]*Config, len(e.byID)) + for id, config := range e.byID { + byID[id] = config + } + e.RUnlock() + return byID +} + +// Add adds a new exec configuration to the store. +func (e *Store) Add(id string, Config *Config) { + e.Lock() + e.byID[id] = Config + e.Unlock() +} + +// Get returns an exec configuration by its id. +func (e *Store) Get(id string) *Config { + e.RLock() + res := e.byID[id] + e.RUnlock() + return res +} + +// Delete removes an exec configuration from the store. +func (e *Store) Delete(id string, pid int) { + e.Lock() + delete(e.byID, id) + e.Unlock() +} + +// List returns the list of exec ids in the store. +func (e *Store) List() []string { + var IDs []string + e.RLock() + for id := range e.byID { + IDs = append(IDs, id) + } + e.RUnlock() + return IDs +} diff --git a/vendor/github.com/containers/storage/drivers/counter.go b/vendor/github.com/docker/docker/daemon/graphdriver/counter.go similarity index 91% rename from vendor/github.com/containers/storage/drivers/counter.go rename to vendor/github.com/docker/docker/daemon/graphdriver/counter.go index 72551a38d4..2772bd247d 100644 --- a/vendor/github.com/containers/storage/drivers/counter.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/counter.go @@ -1,4 +1,4 @@ -package graphdriver +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" import "sync" @@ -54,6 +54,9 @@ func (c *RefCounter) incdec(path string, infoOp func(minfo *minfo)) int { } infoOp(m) count := m.count + if count <= 0 { + delete(c.counts, path) + } c.mu.Unlock() return count } diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver.go new file mode 100644 index 0000000000..a9e1957393 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver.go @@ -0,0 +1,307 @@ +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/sirupsen/logrus" + "github.com/vbatts/tar-split/tar/storage" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/containerfs" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/plugingetter" +) + +// FsMagic unsigned id of the filesystem in use. +type FsMagic uint32 + +const ( + // FsMagicUnsupported is a predefined constant value other than a valid filesystem id. + FsMagicUnsupported = FsMagic(0x00000000) +) + +var ( + // All registered drivers + drivers map[string]InitFunc +) + +//CreateOpts contains optional arguments for Create() and CreateReadWrite() +// methods. +type CreateOpts struct { + MountLabel string + StorageOpt map[string]string +} + +// InitFunc initializes the storage driver. +type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) + +// ProtoDriver defines the basic capabilities of a driver. +// This interface exists solely to be a minimum set of methods +// for client code which choose not to implement the entire Driver +// interface and use the NaiveDiffDriver wrapper constructor. +// +// Use of ProtoDriver directly by client code is not recommended. +type ProtoDriver interface { + // String returns a string representation of this driver. + String() string + // CreateReadWrite creates a new, empty filesystem layer that is ready + // to be used as the storage for a container. Additional options can + // be passed in opts. parent may be "" and opts may be nil. + CreateReadWrite(id, parent string, opts *CreateOpts) error + // Create creates a new, empty, filesystem layer with the + // specified id and parent and options passed in opts. Parent + // may be "" and opts may be nil. + Create(id, parent string, opts *CreateOpts) error + // Remove attempts to remove the filesystem layer with this id. + Remove(id string) error + // Get returns the mountpoint for the layered filesystem referred + // to by this id. You can optionally specify a mountLabel or "". + // Returns the absolute path to the mounted layered filesystem. + Get(id, mountLabel string) (fs containerfs.ContainerFS, err error) + // Put releases the system resources for the specified id, + // e.g, unmounting layered filesystem. + Put(id string) error + // Exists returns whether a filesystem layer with the specified + // ID exists on this driver. + Exists(id string) bool + // Status returns a set of key-value pairs which give low + // level diagnostic status about this driver. + Status() [][2]string + // Returns a set of key-value pairs which give low level information + // about the image/container driver is managing. + GetMetadata(id string) (map[string]string, error) + // Cleanup performs necessary tasks to release resources + // held by the driver, e.g., unmounting all layered filesystems + // known to this driver. + Cleanup() error +} + +// DiffDriver is the interface to use to implement graph diffs +type DiffDriver interface { + // Diff produces an archive of the changes between the specified + // layer and its parent layer which may be "". + Diff(id, parent string) (io.ReadCloser, error) + // Changes produces a list of changes between the specified layer + // and its parent layer. If parent is "", then all changes will be ADD changes. + Changes(id, parent string) ([]archive.Change, error) + // ApplyDiff extracts the changeset from the given diff into the + // layer with the specified id and parent, returning the size of the + // new layer in bytes. + // The archive.Reader must be an uncompressed stream. + ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) + // DiffSize calculates the changes between the specified id + // and its parent and returns the size in bytes of the changes + // relative to its base filesystem directory. + DiffSize(id, parent string) (size int64, err error) +} + +// Driver is the interface for layered/snapshot file system drivers. +type Driver interface { + ProtoDriver + DiffDriver +} + +// Capabilities defines a list of capabilities a driver may implement. +// These capabilities are not required; however, they do determine how a +// graphdriver can be used. +type Capabilities struct { + // Flags that this driver is capable of reproducing exactly equivalent + // diffs for read-only layers. If set, clients can rely on the driver + // for consistent tar streams, and avoid extra processing to account + // for potential differences (eg: the layer store's use of tar-split). + ReproducesExactDiffs bool +} + +// CapabilityDriver is the interface for layered file system drivers that +// can report on their Capabilities. +type CapabilityDriver interface { + Capabilities() Capabilities +} + +// DiffGetterDriver is the interface for layered file system drivers that +// provide a specialized function for getting file contents for tar-split. +type DiffGetterDriver interface { + Driver + // DiffGetter returns an interface to efficiently retrieve the contents + // of files in a layer. + DiffGetter(id string) (FileGetCloser, error) +} + +// FileGetCloser extends the storage.FileGetter interface with a Close method +// for cleaning up. +type FileGetCloser interface { + storage.FileGetter + // Close cleans up any resources associated with the FileGetCloser. + Close() error +} + +// Checker makes checks on specified filesystems. +type Checker interface { + // IsMounted returns true if the provided path is mounted for the specific checker + IsMounted(path string) bool +} + +func init() { + drivers = make(map[string]InitFunc) +} + +// Register registers an InitFunc for the driver. +func Register(name string, initFunc InitFunc) error { + if _, exists := drivers[name]; exists { + return fmt.Errorf("Name already registered %s", name) + } + drivers[name] = initFunc + + return nil +} + +// GetDriver initializes and returns the registered driver +func GetDriver(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) + } + + pluginDriver, err := lookupPlugin(name, pg, config) + if err == nil { + return pluginDriver, nil + } + logrus.WithError(err).WithField("driver", name).WithField("home-dir", config.Root).Error("Failed to GetDriver graph") + return nil, ErrNotSupported +} + +// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins +func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { + if initFunc, exists := drivers[name]; exists { + return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps) + } + logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home) + return nil, ErrNotSupported +} + +// Options is used to initialize a graphdriver +type Options struct { + Root string + DriverOptions []string + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + ExperimentalEnabled bool +} + +// New creates the driver and initializes it at the specified root. +func New(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { + if name != "" { + logrus.Debugf("[graphdriver] trying provided driver: %s", name) // so the logs show specified driver + return GetDriver(name, pg, config) + } + + // Guess for prior driver + driversMap := scanPriorDrivers(config.Root) + list := strings.Split(priority, ",") + logrus.Debugf("[graphdriver] priority list: %v", list) + for _, name := range list { + if name == "vfs" { + // don't use vfs even if there is state present. + continue + } + if _, prior := driversMap[name]; prior { + // of the state found from prior drivers, check in order of our priority + // which we would prefer + driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps) + if err != nil { + // unlike below, we will return error here, because there is prior + // state, and now it is no longer supported/prereq/compatible, so + // something changed and needs attention. Otherwise the daemon's + // images would just "disappear". + logrus.Errorf("[graphdriver] prior storage driver %s failed: %s", name, err) + return nil, err + } + + // abort starting when there are other prior configured drivers + // to ensure the user explicitly selects the driver to load + if len(driversMap)-1 > 0 { + var driversSlice []string + for name := range driversMap { + driversSlice = append(driversSlice, name) + } + + return nil, fmt.Errorf("%s contains several valid graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s )", config.Root, strings.Join(driversSlice, ", ")) + } + + logrus.Infof("[graphdriver] using prior storage driver: %s", name) + return driver, nil + } + } + + // Check for priority drivers first + for _, name := range list { + driver, err := getBuiltinDriver(name, config.Root, config.DriverOptions, config.UIDMaps, config.GIDMaps) + if err != nil { + if IsDriverNotSupported(err) { + continue + } + return nil, err + } + return driver, nil + } + + // Check all registered drivers if no priority driver is found + for name, initFunc := range drivers { + driver, err := initFunc(filepath.Join(config.Root, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) + if err != nil { + if IsDriverNotSupported(err) { + continue + } + return nil, err + } + return driver, nil + } + return nil, fmt.Errorf("No supported storage backend found") +} + +// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers +func scanPriorDrivers(root string) map[string]bool { + driversMap := make(map[string]bool) + + for driver := range drivers { + p := filepath.Join(root, driver) + if _, err := os.Stat(p); err == nil && driver != "vfs" { + if !isEmptyDir(p) { + driversMap[driver] = true + } + } + } + return driversMap +} + +// IsInitialized checks if the driver's home-directory exists and is non-empty. +func IsInitialized(driverHome string) bool { + _, err := os.Stat(driverHome) + if os.IsNotExist(err) { + return false + } + if err != nil { + logrus.Warnf("graphdriver.IsInitialized: stat failed: %v", err) + } + return !isEmptyDir(driverHome) +} + +// isEmptyDir checks if a directory is empty. It is used to check if prior +// storage-driver directories exist. If an error occurs, it also assumes the +// directory is not empty (which preserves the behavior _before_ this check +// was added) +func isEmptyDir(name string) bool { + f, err := os.Open(name) + if err != nil { + return false + } + defer f.Close() + + if _, err = f.Readdirnames(1); err == io.EOF { + return true + } + return false +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go new file mode 100644 index 0000000000..cd83c4e21a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver_freebsd.go @@ -0,0 +1,21 @@ +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + // List of drivers that should be used in an order + priority = "zfs" +) + +// Mounted checks if the given path is mounted as the fs type +func Mounted(fsType FsMagic, mountPath string) (bool, error) { + var buf unix.Statfs_t + if err := syscall.Statfs(mountPath, &buf); err != nil { + return false, err + } + return FsMagic(buf.Type) == fsType, nil +} diff --git a/vendor/github.com/containers/storage/drivers/driver_linux.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go similarity index 91% rename from vendor/github.com/containers/storage/drivers/driver_linux.go rename to vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go index 94f7270eae..61c6b24a9c 100644 --- a/vendor/github.com/containers/storage/drivers/driver_linux.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver_linux.go @@ -1,11 +1,7 @@ -// +build linux - -package graphdriver +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" import ( - "path/filepath" - - "github.com/containers/storage/pkg/mount" + "github.com/docker/docker/pkg/mount" "golang.org/x/sys/unix" ) @@ -51,15 +47,8 @@ const ( ) var ( - // Slice of drivers that should be used in an order - priority = []string{ - "overlay", - "devicemapper", - "aufs", - "btrfs", - "zfs", - "vfs", - } + // List of drivers that should be used in an order + priority = "btrfs,zfs,overlay2,aufs,overlay,devicemapper,vfs" // FsNames maps filesystem id to name of the filesystem. FsNames = map[FsMagic]string{ @@ -89,7 +78,7 @@ var ( // GetFSMagic returns the filesystem id given the path. func GetFSMagic(rootpath string) (FsMagic, error) { var buf unix.Statfs_t - if err := unix.Statfs(filepath.Dir(rootpath), &buf); err != nil { + if err := unix.Statfs(rootpath, &buf); err != nil { return 0, err } return FsMagic(buf.Type), nil diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go new file mode 100644 index 0000000000..1f2e8f071b --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux,!windows,!freebsd + +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" + +var ( + // List of drivers that should be used in an order + priority = "unsupported" +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + return FsMagicUnsupported, nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go new file mode 100644 index 0000000000..856b575e75 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver_windows.go @@ -0,0 +1,12 @@ +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" + +var ( + // List of drivers that should be used in order + priority = "windowsfilter" +) + +// GetFSMagic returns the filesystem id given the path. +func GetFSMagic(rootpath string) (FsMagic, error) { + // Note it is OK to return FsMagicUnsupported on Windows. + return FsMagicUnsupported, nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/errors.go b/vendor/github.com/docker/docker/daemon/graphdriver/errors.go new file mode 100644 index 0000000000..96d3544552 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/errors.go @@ -0,0 +1,36 @@ +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" + +const ( + // ErrNotSupported returned when driver is not supported. + ErrNotSupported NotSupportedError = "driver not supported" + // ErrPrerequisites returned when driver does not meet prerequisites. + ErrPrerequisites NotSupportedError = "prerequisites for driver not satisfied (wrong filesystem?)" + // ErrIncompatibleFS returned when file system is not supported. + ErrIncompatibleFS NotSupportedError = "backing file system is unsupported for this graph driver" +) + +// ErrUnSupported signals that the graph-driver is not supported on the current configuration +type ErrUnSupported interface { + NotSupported() +} + +// NotSupportedError signals that the graph-driver is not supported on the current configuration +type NotSupportedError string + +func (e NotSupportedError) Error() string { + return string(e) +} + +// NotSupported signals that a graph-driver is not supported. +func (e NotSupportedError) NotSupported() {} + +// IsDriverNotSupported returns true if the error initializing +// the graph driver is a non-supported error. +func IsDriverNotSupported(err error) bool { + switch err.(type) { + case ErrUnSupported: + return true + default: + return false + } +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go b/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go new file mode 100644 index 0000000000..e1f368508a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/fsdiff.go @@ -0,0 +1,175 @@ +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" + +import ( + "io" + "time" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/sirupsen/logrus" +) + +var ( + // ApplyUncompressedLayer defines the unpack method used by the graph + // driver. + ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer +) + +// NaiveDiffDriver takes a ProtoDriver and adds the +// capability of the Diffing methods on the local file system, +// which it may or may not support on its own. See the comment +// on the exported NewNaiveDiffDriver function below. +// Notably, the AUFS driver doesn't need to be wrapped like this. +type NaiveDiffDriver struct { + ProtoDriver + uidMaps []idtools.IDMap + gidMaps []idtools.IDMap +} + +// NewNaiveDiffDriver returns a fully functional driver that wraps the +// given ProtoDriver and adds the capability of the following methods which +// it may or may not support on its own: +// Diff(id, parent string) (archive.Archive, error) +// Changes(id, parent string) ([]archive.Change, error) +// ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) +// DiffSize(id, parent string) (size int64, err error) +func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver { + return &NaiveDiffDriver{ProtoDriver: driver, + uidMaps: uidMaps, + gidMaps: gidMaps} +} + +// Diff produces an archive of the changes between the specified +// layer and its parent layer which may be "". +func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch io.ReadCloser, err error) { + startTime := time.Now() + driver := gdw.ProtoDriver + + layerRootFs, err := driver.Get(id, "") + if err != nil { + return nil, err + } + layerFs := layerRootFs.Path() + + defer func() { + if err != nil { + driver.Put(id) + } + }() + + if parent == "" { + archive, err := archive.Tar(layerFs, archive.Uncompressed) + if err != nil { + return nil, err + } + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + return err + }), nil + } + + parentRootFs, err := driver.Get(parent, "") + if err != nil { + return nil, err + } + defer driver.Put(parent) + + parentFs := parentRootFs.Path() + + changes, err := archive.ChangesDirs(layerFs, parentFs) + if err != nil { + return nil, err + } + + archive, err := archive.ExportChanges(layerFs, changes, gdw.uidMaps, gdw.gidMaps) + if err != nil { + return nil, err + } + + return ioutils.NewReadCloserWrapper(archive, func() error { + err := archive.Close() + driver.Put(id) + + // NaiveDiffDriver compares file metadata with parent layers. Parent layers + // are extracted from tar's with full second precision on modified time. + // We need this hack here to make sure calls within same second receive + // correct result. + time.Sleep(time.Until(startTime.Truncate(time.Second).Add(time.Second))) + return err + }), nil +} + +// Changes produces a list of changes between the specified layer +// and its parent layer. If parent is "", then all changes will be ADD changes. +func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) { + driver := gdw.ProtoDriver + + layerRootFs, err := driver.Get(id, "") + if err != nil { + return nil, err + } + defer driver.Put(id) + + layerFs := layerRootFs.Path() + parentFs := "" + + if parent != "" { + parentRootFs, err := driver.Get(parent, "") + if err != nil { + return nil, err + } + defer driver.Put(parent) + parentFs = parentRootFs.Path() + } + + return archive.ChangesDirs(layerFs, parentFs) +} + +// ApplyDiff extracts the changeset from the given diff into the +// layer with the specified id and parent, returning the size of the +// new layer in bytes. +func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff io.Reader) (size int64, err error) { + driver := gdw.ProtoDriver + + // Mount the root filesystem so we can apply the diff/layer. + layerRootFs, err := driver.Get(id, "") + if err != nil { + return + } + defer driver.Put(id) + + layerFs := layerRootFs.Path() + options := &archive.TarOptions{UIDMaps: gdw.uidMaps, + GIDMaps: gdw.gidMaps} + start := time.Now().UTC() + logrus.Debug("Start untar layer") + if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil { + return + } + logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) + + return +} + +// DiffSize calculates the changes between the specified layer +// and its parent and returns the size in bytes of the changes +// relative to its base filesystem directory. +func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error) { + driver := gdw.ProtoDriver + + changes, err := gdw.Changes(id, parent) + if err != nil { + return + } + + layerFs, err := driver.Get(id, "") + if err != nil { + return + } + defer driver.Put(id) + + return archive.ChangesSize(layerFs.Path(), changes), nil +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/plugin.go b/vendor/github.com/docker/docker/daemon/graphdriver/plugin.go new file mode 100644 index 0000000000..d8058d9236 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/plugin.go @@ -0,0 +1,33 @@ +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" + +import ( + "fmt" + "path/filepath" + + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/plugin/v2" +) + +func lookupPlugin(name string, pg plugingetter.PluginGetter, config Options) (Driver, error) { + if !config.ExperimentalEnabled { + return nil, fmt.Errorf("graphdriver plugins are only supported with experimental mode") + } + pl, err := pg.Get(name, "GraphDriver", plugingetter.Acquire) + if err != nil { + return nil, fmt.Errorf("Error looking up graphdriver plugin %s: %v", name, err) + } + return newPluginDriver(name, pl, config) +} + +func newPluginDriver(name string, pl plugingetter.CompatPlugin, config Options) (Driver, error) { + home := config.Root + if !pl.IsV1() { + if p, ok := pl.(*v2.Plugin); ok { + if p.PluginObj.Config.PropagatedMount != "" { + home = p.PluginObj.Config.PropagatedMount + } + } + } + proxy := &graphDriverProxy{name, pl, Capabilities{}} + return proxy, proxy.Init(filepath.Join(home, name), config.DriverOptions, config.UIDMaps, config.GIDMaps) +} diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go b/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go new file mode 100644 index 0000000000..10a7a527ae --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/graphdriver/proxy.go @@ -0,0 +1,263 @@ +package graphdriver // import "github.com/docker/docker/daemon/graphdriver" + +import ( + "errors" + "fmt" + "io" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/containerfs" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" +) + +type graphDriverProxy struct { + name string + p plugingetter.CompatPlugin + caps Capabilities +} + +type graphDriverRequest struct { + ID string `json:",omitempty"` + Parent string `json:",omitempty"` + MountLabel string `json:",omitempty"` + StorageOpt map[string]string `json:",omitempty"` +} + +type graphDriverResponse struct { + Err string `json:",omitempty"` + Dir string `json:",omitempty"` + Exists bool `json:",omitempty"` + Status [][2]string `json:",omitempty"` + Changes []archive.Change `json:",omitempty"` + Size int64 `json:",omitempty"` + Metadata map[string]string `json:",omitempty"` + Capabilities Capabilities `json:",omitempty"` +} + +type graphDriverInitRequest struct { + Home string + Opts []string `json:"Opts"` + UIDMaps []idtools.IDMap `json:"UIDMaps"` + GIDMaps []idtools.IDMap `json:"GIDMaps"` +} + +func (d *graphDriverProxy) Init(home string, opts []string, uidMaps, gidMaps []idtools.IDMap) error { + if !d.p.IsV1() { + if cp, ok := d.p.(plugingetter.CountedPlugin); ok { + // always acquire here, it will be cleaned up on daemon shutdown + cp.Acquire() + } + } + args := &graphDriverInitRequest{ + Home: home, + Opts: opts, + UIDMaps: uidMaps, + GIDMaps: gidMaps, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Init", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + caps, err := d.fetchCaps() + if err != nil { + return err + } + d.caps = caps + return nil +} + +func (d *graphDriverProxy) fetchCaps() (Capabilities, error) { + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Capabilities", args, &ret); err != nil { + if !plugins.IsNotFound(err) { + return Capabilities{}, err + } + } + return ret.Capabilities, nil +} + +func (d *graphDriverProxy) String() string { + return d.name +} + +func (d *graphDriverProxy) Capabilities() Capabilities { + return d.caps +} + +func (d *graphDriverProxy) CreateReadWrite(id, parent string, opts *CreateOpts) error { + return d.create("GraphDriver.CreateReadWrite", id, parent, opts) +} + +func (d *graphDriverProxy) Create(id, parent string, opts *CreateOpts) error { + return d.create("GraphDriver.Create", id, parent, opts) +} + +func (d *graphDriverProxy) create(method, id, parent string, opts *CreateOpts) error { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + if opts != nil { + args.MountLabel = opts.MountLabel + args.StorageOpt = opts.StorageOpt + } + var ret graphDriverResponse + if err := d.p.Client().Call(method, args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Remove(id string) error { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Remove", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Get(id, mountLabel string) (containerfs.ContainerFS, error) { + args := &graphDriverRequest{ + ID: id, + MountLabel: mountLabel, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Get", args, &ret); err != nil { + return nil, err + } + var err error + if ret.Err != "" { + err = errors.New(ret.Err) + } + return containerfs.NewLocalContainerFS(d.p.ScopedPath(ret.Dir)), err +} + +func (d *graphDriverProxy) Put(id string) error { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Put", args, &ret); err != nil { + return err + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Exists(id string) bool { + args := &graphDriverRequest{ID: id} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Exists", args, &ret); err != nil { + return false + } + return ret.Exists +} + +func (d *graphDriverProxy) Status() [][2]string { + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Status", args, &ret); err != nil { + return nil + } + return ret.Status +} + +func (d *graphDriverProxy) GetMetadata(id string) (map[string]string, error) { + args := &graphDriverRequest{ + ID: id, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.GetMetadata", args, &ret); err != nil { + return nil, err + } + if ret.Err != "" { + return nil, errors.New(ret.Err) + } + return ret.Metadata, nil +} + +func (d *graphDriverProxy) Cleanup() error { + if !d.p.IsV1() { + if cp, ok := d.p.(plugingetter.CountedPlugin); ok { + // always release + defer cp.Release() + } + } + + args := &graphDriverRequest{} + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Cleanup", args, &ret); err != nil { + return nil + } + if ret.Err != "" { + return errors.New(ret.Err) + } + return nil +} + +func (d *graphDriverProxy) Diff(id, parent string) (io.ReadCloser, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + body, err := d.p.Client().Stream("GraphDriver.Diff", args) + if err != nil { + return nil, err + } + return body, nil +} + +func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.Changes", args, &ret); err != nil { + return nil, err + } + if ret.Err != "" { + return nil, errors.New(ret.Err) + } + + return ret.Changes, nil +} + +func (d *graphDriverProxy) ApplyDiff(id, parent string, diff io.Reader) (int64, error) { + var ret graphDriverResponse + if err := d.p.Client().SendFile(fmt.Sprintf("GraphDriver.ApplyDiff?id=%s&parent=%s", id, parent), diff, &ret); err != nil { + return -1, err + } + if ret.Err != "" { + return -1, errors.New(ret.Err) + } + return ret.Size, nil +} + +func (d *graphDriverProxy) DiffSize(id, parent string) (int64, error) { + args := &graphDriverRequest{ + ID: id, + Parent: parent, + } + var ret graphDriverResponse + if err := d.p.Client().Call("GraphDriver.DiffSize", args, &ret); err != nil { + return -1, err + } + if ret.Err != "" { + return -1, errors.New(ret.Err) + } + return ret.Size, nil +} diff --git a/vendor/github.com/docker/docker/daemon/logger/adapter.go b/vendor/github.com/docker/docker/daemon/logger/adapter.go new file mode 100644 index 0000000000..5b9252d324 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/adapter.go @@ -0,0 +1,139 @@ +package logger // import "github.com/docker/docker/daemon/logger" + +import ( + "io" + "os" + "path/filepath" + "sync" + "time" + + "github.com/docker/docker/api/types/plugins/logdriver" + "github.com/docker/docker/pkg/plugingetter" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// pluginAdapter takes a plugin and implements the Logger interface for logger +// instances +type pluginAdapter struct { + driverName string + id string + plugin logPlugin + fifoPath string + capabilities Capability + logInfo Info + + // synchronize access to the log stream and shared buffer + mu sync.Mutex + enc logdriver.LogEntryEncoder + stream io.WriteCloser + // buf is shared for each `Log()` call to reduce allocations. + // buf must be protected by mutex + buf logdriver.LogEntry +} + +func (a *pluginAdapter) Log(msg *Message) error { + a.mu.Lock() + + a.buf.Line = msg.Line + a.buf.TimeNano = msg.Timestamp.UnixNano() + a.buf.Partial = (msg.PLogMetaData != nil) + a.buf.Source = msg.Source + + err := a.enc.Encode(&a.buf) + a.buf.Reset() + + a.mu.Unlock() + + PutMessage(msg) + return err +} + +func (a *pluginAdapter) Name() string { + return a.driverName +} + +func (a *pluginAdapter) Close() error { + a.mu.Lock() + defer a.mu.Unlock() + + if err := a.plugin.StopLogging(filepath.Join("/", "run", "docker", "logging", a.id)); err != nil { + return err + } + + if err := a.stream.Close(); err != nil { + logrus.WithError(err).Error("error closing plugin fifo") + } + if err := os.Remove(a.fifoPath); err != nil && !os.IsNotExist(err) { + logrus.WithError(err).Error("error cleaning up plugin fifo") + } + + // may be nil, especially for unit tests + if pluginGetter != nil { + pluginGetter.Get(a.Name(), extName, plugingetter.Release) + } + return nil +} + +type pluginAdapterWithRead struct { + *pluginAdapter +} + +func (a *pluginAdapterWithRead) ReadLogs(config ReadConfig) *LogWatcher { + watcher := NewLogWatcher() + + go func() { + defer close(watcher.Msg) + stream, err := a.plugin.ReadLogs(a.logInfo, config) + if err != nil { + watcher.Err <- errors.Wrap(err, "error getting log reader") + return + } + defer stream.Close() + + dec := logdriver.NewLogEntryDecoder(stream) + for { + select { + case <-watcher.WatchClose(): + return + default: + } + + var buf logdriver.LogEntry + if err := dec.Decode(&buf); err != nil { + if err == io.EOF { + return + } + select { + case watcher.Err <- errors.Wrap(err, "error decoding log message"): + case <-watcher.WatchClose(): + } + return + } + + msg := &Message{ + Timestamp: time.Unix(0, buf.TimeNano), + Line: buf.Line, + Source: buf.Source, + } + + // plugin should handle this, but check just in case + if !config.Since.IsZero() && msg.Timestamp.Before(config.Since) { + continue + } + if !config.Until.IsZero() && msg.Timestamp.After(config.Until) { + return + } + + select { + case watcher.Msg <- msg: + case <-watcher.WatchClose(): + // make sure the message we consumed is sent + watcher.Msg <- msg + return + } + } + }() + + return watcher +} diff --git a/vendor/github.com/docker/docker/daemon/logger/copier.go b/vendor/github.com/docker/docker/daemon/logger/copier.go new file mode 100644 index 0000000000..ae86777f33 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/copier.go @@ -0,0 +1,182 @@ +package logger // import "github.com/docker/docker/daemon/logger" + +import ( + "bytes" + "io" + "sync" + "time" + + types "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/pkg/stringid" + "github.com/sirupsen/logrus" +) + +const ( + // readSize is the maximum bytes read during a single read + // operation. + readSize = 2 * 1024 + + // defaultBufSize provides a reasonable default for loggers that do + // not have an external limit to impose on log line size. + defaultBufSize = 16 * 1024 +) + +// Copier can copy logs from specified sources to Logger and attach Timestamp. +// Writes are concurrent, so you need implement some sync in your logger. +type Copier struct { + // srcs is map of name -> reader pairs, for example "stdout", "stderr" + srcs map[string]io.Reader + dst Logger + copyJobs sync.WaitGroup + closeOnce sync.Once + closed chan struct{} +} + +// NewCopier creates a new Copier +func NewCopier(srcs map[string]io.Reader, dst Logger) *Copier { + return &Copier{ + srcs: srcs, + dst: dst, + closed: make(chan struct{}), + } +} + +// Run starts logs copying +func (c *Copier) Run() { + for src, w := range c.srcs { + c.copyJobs.Add(1) + go c.copySrc(src, w) + } +} + +func (c *Copier) copySrc(name string, src io.Reader) { + defer c.copyJobs.Done() + + bufSize := defaultBufSize + if sizedLogger, ok := c.dst.(SizedLogger); ok { + bufSize = sizedLogger.BufSize() + } + buf := make([]byte, bufSize) + + n := 0 + eof := false + var partialid string + var partialTS time.Time + var ordinal int + firstPartial := true + hasMorePartial := false + + for { + select { + case <-c.closed: + return + default: + // Work out how much more data we are okay with reading this time. + upto := n + readSize + if upto > cap(buf) { + upto = cap(buf) + } + // Try to read that data. + if upto > n { + read, err := src.Read(buf[n:upto]) + if err != nil { + if err != io.EOF { + logrus.Errorf("Error scanning log stream: %s", err) + return + } + eof = true + } + n += read + } + // If we have no data to log, and there's no more coming, we're done. + if n == 0 && eof { + return + } + // Break up the data that we've buffered up into lines, and log each in turn. + p := 0 + + for q := bytes.IndexByte(buf[p:n], '\n'); q >= 0; q = bytes.IndexByte(buf[p:n], '\n') { + select { + case <-c.closed: + return + default: + msg := NewMessage() + msg.Source = name + msg.Line = append(msg.Line, buf[p:p+q]...) + + if hasMorePartial { + msg.PLogMetaData = &types.PartialLogMetaData{ID: partialid, Ordinal: ordinal, Last: true} + + // reset + partialid = "" + ordinal = 0 + firstPartial = true + hasMorePartial = false + } + if msg.PLogMetaData == nil { + msg.Timestamp = time.Now().UTC() + } else { + msg.Timestamp = partialTS + } + + if logErr := c.dst.Log(msg); logErr != nil { + logrus.Errorf("Failed to log msg %q for logger %s: %s", msg.Line, c.dst.Name(), logErr) + } + } + p += q + 1 + } + // If there's no more coming, or the buffer is full but + // has no newlines, log whatever we haven't logged yet, + // noting that it's a partial log line. + if eof || (p == 0 && n == len(buf)) { + if p < n { + msg := NewMessage() + msg.Source = name + msg.Line = append(msg.Line, buf[p:n]...) + + // Generate unique partialID for first partial. Use it across partials. + // Record timestamp for first partial. Use it across partials. + // Initialize Ordinal for first partial. Increment it across partials. + if firstPartial { + msg.Timestamp = time.Now().UTC() + partialTS = msg.Timestamp + partialid = stringid.GenerateRandomID() + ordinal = 1 + firstPartial = false + } else { + msg.Timestamp = partialTS + } + msg.PLogMetaData = &types.PartialLogMetaData{ID: partialid, Ordinal: ordinal, Last: false} + ordinal++ + hasMorePartial = true + + if logErr := c.dst.Log(msg); logErr != nil { + logrus.Errorf("Failed to log msg %q for logger %s: %s", msg.Line, c.dst.Name(), logErr) + } + p = 0 + n = 0 + } + if eof { + return + } + } + // Move any unlogged data to the front of the buffer in preparation for another read. + if p > 0 { + copy(buf[0:], buf[p:n]) + n -= p + } + } + } +} + +// Wait waits until all copying is done +func (c *Copier) Wait() { + c.copyJobs.Wait() +} + +// Close closes the copier +func (c *Copier) Close() { + c.closeOnce.Do(func() { + close(c.closed) + }) +} diff --git a/vendor/github.com/docker/docker/daemon/logger/factory.go b/vendor/github.com/docker/docker/daemon/logger/factory.go new file mode 100644 index 0000000000..9723f7fc0c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/factory.go @@ -0,0 +1,162 @@ +package logger // import "github.com/docker/docker/daemon/logger" + +import ( + "fmt" + "sort" + "sync" + + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/plugingetter" + units "github.com/docker/go-units" + "github.com/pkg/errors" +) + +// Creator builds a logging driver instance with given context. +type Creator func(Info) (Logger, error) + +// LogOptValidator checks the options specific to the underlying +// logging implementation. +type LogOptValidator func(cfg map[string]string) error + +type logdriverFactory struct { + registry map[string]Creator + optValidator map[string]LogOptValidator + m sync.Mutex +} + +func (lf *logdriverFactory) list() []string { + ls := make([]string, 0, len(lf.registry)) + lf.m.Lock() + for name := range lf.registry { + ls = append(ls, name) + } + lf.m.Unlock() + sort.Strings(ls) + return ls +} + +// ListDrivers gets the list of registered log driver names +func ListDrivers() []string { + return factory.list() +} + +func (lf *logdriverFactory) register(name string, c Creator) error { + if lf.driverRegistered(name) { + return fmt.Errorf("logger: log driver named '%s' is already registered", name) + } + + lf.m.Lock() + lf.registry[name] = c + lf.m.Unlock() + return nil +} + +func (lf *logdriverFactory) driverRegistered(name string) bool { + lf.m.Lock() + _, ok := lf.registry[name] + lf.m.Unlock() + if !ok { + if pluginGetter != nil { // this can be nil when the init functions are running + if l, _ := getPlugin(name, plugingetter.Lookup); l != nil { + return true + } + } + } + return ok +} + +func (lf *logdriverFactory) registerLogOptValidator(name string, l LogOptValidator) error { + lf.m.Lock() + defer lf.m.Unlock() + + if _, ok := lf.optValidator[name]; ok { + return fmt.Errorf("logger: log validator named '%s' is already registered", name) + } + lf.optValidator[name] = l + return nil +} + +func (lf *logdriverFactory) get(name string) (Creator, error) { + lf.m.Lock() + defer lf.m.Unlock() + + c, ok := lf.registry[name] + if ok { + return c, nil + } + + c, err := getPlugin(name, plugingetter.Acquire) + return c, errors.Wrapf(err, "logger: no log driver named '%s' is registered", name) +} + +func (lf *logdriverFactory) getLogOptValidator(name string) LogOptValidator { + lf.m.Lock() + defer lf.m.Unlock() + + c := lf.optValidator[name] + return c +} + +var factory = &logdriverFactory{registry: make(map[string]Creator), optValidator: make(map[string]LogOptValidator)} // global factory instance + +// RegisterLogDriver registers the given logging driver builder with given logging +// driver name. +func RegisterLogDriver(name string, c Creator) error { + return factory.register(name, c) +} + +// RegisterLogOptValidator registers the logging option validator with +// the given logging driver name. +func RegisterLogOptValidator(name string, l LogOptValidator) error { + return factory.registerLogOptValidator(name, l) +} + +// GetLogDriver provides the logging driver builder for a logging driver name. +func GetLogDriver(name string) (Creator, error) { + return factory.get(name) +} + +var builtInLogOpts = map[string]bool{ + "mode": true, + "max-buffer-size": true, +} + +// ValidateLogOpts checks the options for the given log driver. The +// options supported are specific to the LogDriver implementation. +func ValidateLogOpts(name string, cfg map[string]string) error { + if name == "none" { + return nil + } + + switch containertypes.LogMode(cfg["mode"]) { + case containertypes.LogModeBlocking, containertypes.LogModeNonBlock, containertypes.LogModeUnset: + default: + return fmt.Errorf("logger: logging mode not supported: %s", cfg["mode"]) + } + + if s, ok := cfg["max-buffer-size"]; ok { + if containertypes.LogMode(cfg["mode"]) != containertypes.LogModeNonBlock { + return fmt.Errorf("logger: max-buffer-size option is only supported with 'mode=%s'", containertypes.LogModeNonBlock) + } + if _, err := units.RAMInBytes(s); err != nil { + return errors.Wrap(err, "error parsing option max-buffer-size") + } + } + + if !factory.driverRegistered(name) { + return fmt.Errorf("logger: no log driver named '%s' is registered", name) + } + + filteredOpts := make(map[string]string, len(builtInLogOpts)) + for k, v := range cfg { + if !builtInLogOpts[k] { + filteredOpts[k] = v + } + } + + validator := factory.getLogOptValidator(name) + if validator != nil { + return validator(filteredOpts) + } + return nil +} diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go new file mode 100644 index 0000000000..7d0533ec84 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonfilelog.go @@ -0,0 +1,185 @@ +// Package jsonfilelog provides the default Logger implementation for +// Docker logging. This logger logs to files on the host server in the +// JSON format. +package jsonfilelog // import "github.com/docker/docker/daemon/logger/jsonfilelog" + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "sync" + + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog" + "github.com/docker/docker/daemon/logger/loggerutils" + units "github.com/docker/go-units" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Name is the name of the file that the jsonlogger logs to. +const Name = "json-file" + +// JSONFileLogger is Logger implementation for default Docker logging. +type JSONFileLogger struct { + mu sync.Mutex + closed bool + writer *loggerutils.LogFile + readers map[*logger.LogWatcher]struct{} // stores the active log followers + tag string // tag values requested by the user to log +} + +func init() { + if err := logger.RegisterLogDriver(Name, New); err != nil { + logrus.Fatal(err) + } + if err := logger.RegisterLogOptValidator(Name, ValidateLogOpt); err != nil { + logrus.Fatal(err) + } +} + +// New creates new JSONFileLogger which writes to filename passed in +// on given context. +func New(info logger.Info) (logger.Logger, error) { + var capval int64 = -1 + if capacity, ok := info.Config["max-size"]; ok { + var err error + capval, err = units.FromHumanSize(capacity) + if err != nil { + return nil, err + } + if capval <= 0 { + return nil, fmt.Errorf("max-size should be a positive numbler") + } + } + var maxFiles = 1 + if maxFileString, ok := info.Config["max-file"]; ok { + var err error + maxFiles, err = strconv.Atoi(maxFileString) + if err != nil { + return nil, err + } + if maxFiles < 1 { + return nil, fmt.Errorf("max-file cannot be less than 1") + } + } + + var compress bool + if compressString, ok := info.Config["compress"]; ok { + var err error + compress, err = strconv.ParseBool(compressString) + if err != nil { + return nil, err + } + if compress && (maxFiles == 1 || capval == -1) { + return nil, fmt.Errorf("compress cannot be true when max-file is less than 2 or max-size is not set") + } + } + + attrs, err := info.ExtraAttributes(nil) + if err != nil { + return nil, err + } + + // no default template. only use a tag if the user asked for it + tag, err := loggerutils.ParseLogTag(info, "") + if err != nil { + return nil, err + } + if tag != "" { + attrs["tag"] = tag + } + + var extra []byte + if len(attrs) > 0 { + var err error + extra, err = json.Marshal(attrs) + if err != nil { + return nil, err + } + } + + buf := bytes.NewBuffer(nil) + marshalFunc := func(msg *logger.Message) ([]byte, error) { + if err := marshalMessage(msg, extra, buf); err != nil { + return nil, err + } + b := buf.Bytes() + buf.Reset() + return b, nil + } + + writer, err := loggerutils.NewLogFile(info.LogPath, capval, maxFiles, compress, marshalFunc, decodeFunc, 0640) + if err != nil { + return nil, err + } + + return &JSONFileLogger{ + writer: writer, + readers: make(map[*logger.LogWatcher]struct{}), + tag: tag, + }, nil +} + +// Log converts logger.Message to jsonlog.JSONLog and serializes it to file. +func (l *JSONFileLogger) Log(msg *logger.Message) error { + l.mu.Lock() + err := l.writer.WriteLogEntry(msg) + l.mu.Unlock() + return err +} + +func marshalMessage(msg *logger.Message, extra json.RawMessage, buf *bytes.Buffer) error { + logLine := msg.Line + if msg.PLogMetaData == nil || (msg.PLogMetaData != nil && msg.PLogMetaData.Last) { + logLine = append(msg.Line, '\n') + } + err := (&jsonlog.JSONLogs{ + Log: logLine, + Stream: msg.Source, + Created: msg.Timestamp, + RawAttrs: extra, + }).MarshalJSONBuf(buf) + if err != nil { + return errors.Wrap(err, "error writing log message to buffer") + } + err = buf.WriteByte('\n') + return errors.Wrap(err, "error finalizing log buffer") +} + +// ValidateLogOpt looks for json specific log options max-file & max-size. +func ValidateLogOpt(cfg map[string]string) error { + for key := range cfg { + switch key { + case "max-file": + case "max-size": + case "compress": + case "labels": + case "env": + case "env-regex": + case "tag": + default: + return fmt.Errorf("unknown log opt '%s' for json-file log driver", key) + } + } + return nil +} + +// Close closes underlying file and signals all readers to stop. +func (l *JSONFileLogger) Close() error { + l.mu.Lock() + l.closed = true + err := l.writer.Close() + for r := range l.readers { + r.Close() + delete(l.readers, r) + } + l.mu.Unlock() + return err +} + +// Name returns name of this logger. +func (l *JSONFileLogger) Name() string { + return Name +} diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlog.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlog.go new file mode 100644 index 0000000000..74be8e7da0 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlog.go @@ -0,0 +1,25 @@ +package jsonlog // import "github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog" + +import ( + "time" +) + +// JSONLog is a log message, typically a single entry from a given log stream. +type JSONLog struct { + // Log is the log message + Log string `json:"log,omitempty"` + // Stream is the log source + Stream string `json:"stream,omitempty"` + // Created is the created timestamp of log + Created time.Time `json:"time"` + // Attrs is the list of extra attributes provided by the user + Attrs map[string]string `json:"attrs,omitempty"` +} + +// Reset all fields to their zero value. +func (jl *JSONLog) Reset() { + jl.Log = "" + jl.Stream = "" + jl.Created = time.Time{} + jl.Attrs = make(map[string]string) +} diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlogbytes.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlogbytes.go new file mode 100644 index 0000000000..577c718f63 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/jsonlogbytes.go @@ -0,0 +1,125 @@ +package jsonlog // import "github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog" + +import ( + "bytes" + "encoding/json" + "time" + "unicode/utf8" +) + +// JSONLogs marshals encoded JSONLog objects +type JSONLogs struct { + Log []byte `json:"log,omitempty"` + Stream string `json:"stream,omitempty"` + Created time.Time `json:"time"` + + // json-encoded bytes + RawAttrs json.RawMessage `json:"attrs,omitempty"` +} + +// MarshalJSONBuf is an optimized JSON marshaller that avoids reflection +// and unnecessary allocation. +func (mj *JSONLogs) MarshalJSONBuf(buf *bytes.Buffer) error { + var first = true + + buf.WriteString(`{`) + if len(mj.Log) != 0 { + first = false + buf.WriteString(`"log":`) + ffjsonWriteJSONBytesAsString(buf, mj.Log) + } + if len(mj.Stream) != 0 { + if first { + first = false + } else { + buf.WriteString(`,`) + } + buf.WriteString(`"stream":`) + ffjsonWriteJSONBytesAsString(buf, []byte(mj.Stream)) + } + if len(mj.RawAttrs) > 0 { + if first { + first = false + } else { + buf.WriteString(`,`) + } + buf.WriteString(`"attrs":`) + buf.Write(mj.RawAttrs) + } + if !first { + buf.WriteString(`,`) + } + + created, err := fastTimeMarshalJSON(mj.Created) + if err != nil { + return err + } + + buf.WriteString(`"time":`) + buf.WriteString(created) + buf.WriteString(`}`) + return nil +} + +func ffjsonWriteJSONBytesAsString(buf *bytes.Buffer, s []byte) { + const hex = "0123456789abcdef" + + buf.WriteByte('"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + i++ + continue + } + if start < i { + buf.Write(s[start:i]) + } + switch b { + case '\\', '"': + buf.WriteByte('\\') + buf.WriteByte(b) + case '\n': + buf.WriteByte('\\') + buf.WriteByte('n') + case '\r': + buf.WriteByte('\\') + buf.WriteByte('r') + default: + + buf.WriteString(`\u00`) + buf.WriteByte(hex[b>>4]) + buf.WriteByte(hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRune(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + buf.Write(s[start:i]) + } + buf.WriteString(`\ufffd`) + i += size + start = i + continue + } + + if c == '\u2028' || c == '\u2029' { + if start < i { + buf.Write(s[start:i]) + } + buf.WriteString(`\u202`) + buf.WriteByte(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + buf.Write(s[start:]) + } + buf.WriteByte('"') +} diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/time_marshalling.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/time_marshalling.go new file mode 100644 index 0000000000..1822ea5dbc --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog/time_marshalling.go @@ -0,0 +1,20 @@ +package jsonlog // import "github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog" + +import ( + "time" + + "github.com/pkg/errors" +) + +const jsonFormat = `"` + time.RFC3339Nano + `"` + +// fastTimeMarshalJSON avoids one of the extra allocations that +// time.MarshalJSON is making. +func fastTimeMarshalJSON(t time.Time) (string, error) { + if y := t.Year(); y < 0 || y >= 10000 { + // RFC 3339 is clear that years are 4 digits exactly. + // See golang.org/issue/4556#c15 for more discussion. + return "", errors.New("time.MarshalJSON: year outside of range [0,9999]") + } + return t.Format(jsonFormat), nil +} diff --git a/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go new file mode 100644 index 0000000000..ab1793bb72 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/jsonfilelog/read.go @@ -0,0 +1,89 @@ +package jsonfilelog // import "github.com/docker/docker/daemon/logger/jsonfilelog" + +import ( + "encoding/json" + "io" + + "github.com/docker/docker/api/types/backend" + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog" +) + +const maxJSONDecodeRetry = 20000 + +// ReadLogs implements the logger's LogReader interface for the logs +// created by this driver. +func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { + logWatcher := logger.NewLogWatcher() + + go l.readLogs(logWatcher, config) + return logWatcher +} + +func (l *JSONFileLogger) readLogs(watcher *logger.LogWatcher, config logger.ReadConfig) { + defer close(watcher.Msg) + + l.mu.Lock() + l.readers[watcher] = struct{}{} + l.mu.Unlock() + + l.writer.ReadLogs(config, watcher) + + l.mu.Lock() + delete(l.readers, watcher) + l.mu.Unlock() +} + +func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) { + l.Reset() + if err := dec.Decode(l); err != nil { + return nil, err + } + + var attrs []backend.LogAttr + if len(l.Attrs) != 0 { + attrs = make([]backend.LogAttr, 0, len(l.Attrs)) + for k, v := range l.Attrs { + attrs = append(attrs, backend.LogAttr{Key: k, Value: v}) + } + } + msg := &logger.Message{ + Source: l.Stream, + Timestamp: l.Created, + Line: []byte(l.Log), + Attrs: attrs, + } + return msg, nil +} + +// decodeFunc is used to create a decoder for the log file reader +func decodeFunc(rdr io.Reader) func() (*logger.Message, error) { + l := &jsonlog.JSONLog{} + dec := json.NewDecoder(rdr) + return func() (msg *logger.Message, err error) { + for retries := 0; retries < maxJSONDecodeRetry; retries++ { + msg, err = decodeLogLine(dec, l) + if err == nil { + break + } + + // try again, could be due to a an incomplete json object as we read + if _, ok := err.(*json.SyntaxError); ok { + dec = json.NewDecoder(rdr) + retries++ + continue + } + + // io.ErrUnexpectedEOF is returned from json.Decoder when there is + // remaining data in the parser's buffer while an io.EOF occurs. + // If the json logger writes a partial json log entry to the disk + // while at the same time the decoder tries to decode it, the race condition happens. + if err == io.ErrUnexpectedEOF { + reader := io.MultiReader(dec.Buffered(), rdr) + dec = json.NewDecoder(reader) + retries++ + } + } + return msg, err + } +} diff --git a/vendor/github.com/docker/docker/daemon/logger/logger.go b/vendor/github.com/docker/docker/daemon/logger/logger.go new file mode 100644 index 0000000000..912e855c7f --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/logger.go @@ -0,0 +1,145 @@ +// Package logger defines interfaces that logger drivers implement to +// log messages. +// +// The other half of a logger driver is the implementation of the +// factory, which holds the contextual instance information that +// allows multiple loggers of the same type to perform different +// actions, such as logging to different locations. +package logger // import "github.com/docker/docker/daemon/logger" + +import ( + "sync" + "time" + + "github.com/docker/docker/api/types/backend" +) + +// ErrReadLogsNotSupported is returned when the underlying log driver does not support reading +type ErrReadLogsNotSupported struct{} + +func (ErrReadLogsNotSupported) Error() string { + return "configured logging driver does not support reading" +} + +// NotImplemented makes this error implement the `NotImplemented` interface from api/errdefs +func (ErrReadLogsNotSupported) NotImplemented() {} + +const ( + logWatcherBufferSize = 4096 +) + +var messagePool = &sync.Pool{New: func() interface{} { return &Message{Line: make([]byte, 0, 256)} }} + +// NewMessage returns a new message from the message sync.Pool +func NewMessage() *Message { + return messagePool.Get().(*Message) +} + +// PutMessage puts the specified message back n the message pool. +// The message fields are reset before putting into the pool. +func PutMessage(msg *Message) { + msg.reset() + messagePool.Put(msg) +} + +// Message is data structure that represents piece of output produced by some +// container. The Line member is a slice of an array whose contents can be +// changed after a log driver's Log() method returns. +// +// Message is subtyped from backend.LogMessage because there is a lot of +// internal complexity around the Message type that should not be exposed +// to any package not explicitly importing the logger type. +// +// Any changes made to this struct must also be updated in the `reset` function +type Message backend.LogMessage + +// reset sets the message back to default values +// This is used when putting a message back into the message pool. +// Any changes to the `Message` struct should be reflected here. +func (m *Message) reset() { + m.Line = m.Line[:0] + m.Source = "" + m.Attrs = nil + m.PLogMetaData = nil + + m.Err = nil +} + +// AsLogMessage returns a pointer to the message as a pointer to +// backend.LogMessage, which is an identical type with a different purpose +func (m *Message) AsLogMessage() *backend.LogMessage { + return (*backend.LogMessage)(m) +} + +// Logger is the interface for docker logging drivers. +type Logger interface { + Log(*Message) error + Name() string + Close() error +} + +// SizedLogger is the interface for logging drivers that can control +// the size of buffer used for their messages. +type SizedLogger interface { + Logger + BufSize() int +} + +// ReadConfig is the configuration passed into ReadLogs. +type ReadConfig struct { + Since time.Time + Until time.Time + Tail int + Follow bool +} + +// LogReader is the interface for reading log messages for loggers that support reading. +type LogReader interface { + // Read logs from underlying logging backend + ReadLogs(ReadConfig) *LogWatcher +} + +// LogWatcher is used when consuming logs read from the LogReader interface. +type LogWatcher struct { + // For sending log messages to a reader. + Msg chan *Message + // For sending error messages that occur while while reading logs. + Err chan error + closeOnce sync.Once + closeNotifier chan struct{} +} + +// NewLogWatcher returns a new LogWatcher. +func NewLogWatcher() *LogWatcher { + return &LogWatcher{ + Msg: make(chan *Message, logWatcherBufferSize), + Err: make(chan error, 1), + closeNotifier: make(chan struct{}), + } +} + +// Close notifies the underlying log reader to stop. +func (w *LogWatcher) Close() { + // only close if not already closed + w.closeOnce.Do(func() { + close(w.closeNotifier) + }) +} + +// WatchClose returns a channel receiver that receives notification +// when the watcher has been closed. This should only be called from +// one goroutine. +func (w *LogWatcher) WatchClose() <-chan struct{} { + return w.closeNotifier +} + +// Capability defines the list of capabilities that a driver can implement +// These capabilities are not required to be a logging driver, however do +// determine how a logging driver can be used +type Capability struct { + // Determines if a log driver can read back logs + ReadLogs bool +} + +// MarshalFunc is a func that marshals a message into an arbitrary format +type MarshalFunc func(*Message) ([]byte, error) diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag.go new file mode 100644 index 0000000000..719512dbdb --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/loggerutils/log_tag.go @@ -0,0 +1,31 @@ +package loggerutils // import "github.com/docker/docker/daemon/logger/loggerutils" + +import ( + "bytes" + + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/templates" +) + +// DefaultTemplate defines the defaults template logger should use. +const DefaultTemplate = "{{.ID}}" + +// ParseLogTag generates a context aware tag for consistency across different +// log drivers based on the context of the running container. +func ParseLogTag(info logger.Info, defaultTemplate string) (string, error) { + tagTemplate := info.Config["tag"] + if tagTemplate == "" { + tagTemplate = defaultTemplate + } + + tmpl, err := templates.NewParse("log-tag", tagTemplate) + if err != nil { + return "", err + } + buf := new(bytes.Buffer) + if err := tmpl.Execute(buf, &info); err != nil { + return "", err + } + + return buf.String(), nil +} diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go new file mode 100644 index 0000000000..b4148ce645 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/loggerutils/logfile.go @@ -0,0 +1,656 @@ +package loggerutils // import "github.com/docker/docker/daemon/logger/loggerutils" + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/json" + "fmt" + "io" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/docker/docker/daemon/logger" + "github.com/docker/docker/daemon/logger/loggerutils/multireader" + "github.com/docker/docker/pkg/filenotify" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/pubsub" + "github.com/docker/docker/pkg/tailfile" + "github.com/fsnotify/fsnotify" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const tmpLogfileSuffix = ".tmp" + +// rotateFileMetadata is a metadata of the gzip header of the compressed log file +type rotateFileMetadata struct { + LastTime time.Time `json:"lastTime,omitempty"` +} + +// refCounter is a counter of logfile being referenced +type refCounter struct { + mu sync.Mutex + counter map[string]int +} + +// Reference increase the reference counter for specified logfile +func (rc *refCounter) GetReference(fileName string, openRefFile func(fileName string, exists bool) (*os.File, error)) (*os.File, error) { + rc.mu.Lock() + defer rc.mu.Unlock() + + var ( + file *os.File + err error + ) + _, ok := rc.counter[fileName] + file, err = openRefFile(fileName, ok) + if err != nil { + return nil, err + } + + if ok { + rc.counter[fileName]++ + } else if file != nil { + rc.counter[file.Name()] = 1 + } + + return file, nil +} + +// Dereference reduce the reference counter for specified logfile +func (rc *refCounter) Dereference(fileName string) error { + rc.mu.Lock() + defer rc.mu.Unlock() + + rc.counter[fileName]-- + if rc.counter[fileName] <= 0 { + delete(rc.counter, fileName) + err := os.Remove(fileName) + if err != nil { + return err + } + } + return nil +} + +// LogFile is Logger implementation for default Docker logging. +type LogFile struct { + mu sync.RWMutex // protects the logfile access + f *os.File // store for closing + closed bool + rotateMu sync.Mutex // blocks the next rotation until the current rotation is completed + capacity int64 // maximum size of each file + currentSize int64 // current size of the latest file + maxFiles int // maximum number of files + compress bool // whether old versions of log files are compressed + lastTimestamp time.Time // timestamp of the last log + filesRefCounter refCounter // keep reference-counted of decompressed files + notifyRotate *pubsub.Publisher + marshal logger.MarshalFunc + createDecoder makeDecoderFunc + perms os.FileMode +} + +type makeDecoderFunc func(rdr io.Reader) func() (*logger.Message, error) + +//NewLogFile creates new LogFile +func NewLogFile(logPath string, capacity int64, maxFiles int, compress bool, marshaller logger.MarshalFunc, decodeFunc makeDecoderFunc, perms os.FileMode) (*LogFile, error) { + log, err := os.OpenFile(logPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, perms) + if err != nil { + return nil, err + } + + size, err := log.Seek(0, os.SEEK_END) + if err != nil { + return nil, err + } + + return &LogFile{ + f: log, + capacity: capacity, + currentSize: size, + maxFiles: maxFiles, + compress: compress, + filesRefCounter: refCounter{counter: make(map[string]int)}, + notifyRotate: pubsub.NewPublisher(0, 1), + marshal: marshaller, + createDecoder: decodeFunc, + perms: perms, + }, nil +} + +// WriteLogEntry writes the provided log message to the current log file. +// This may trigger a rotation event if the max file/capacity limits are hit. +func (w *LogFile) WriteLogEntry(msg *logger.Message) error { + b, err := w.marshal(msg) + if err != nil { + return errors.Wrap(err, "error marshalling log message") + } + + logger.PutMessage(msg) + + w.mu.Lock() + if w.closed { + w.mu.Unlock() + return errors.New("cannot write because the output file was closed") + } + + if err := w.checkCapacityAndRotate(); err != nil { + w.mu.Unlock() + return err + } + + n, err := w.f.Write(b) + if err == nil { + w.currentSize += int64(n) + w.lastTimestamp = msg.Timestamp + } + w.mu.Unlock() + return err +} + +func (w *LogFile) checkCapacityAndRotate() error { + if w.capacity == -1 { + return nil + } + + if w.currentSize >= w.capacity { + w.rotateMu.Lock() + fname := w.f.Name() + if err := w.f.Close(); err != nil { + w.rotateMu.Unlock() + return errors.Wrap(err, "error closing file") + } + if err := rotate(fname, w.maxFiles, w.compress); err != nil { + w.rotateMu.Unlock() + return err + } + file, err := os.OpenFile(fname, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, w.perms) + if err != nil { + w.rotateMu.Unlock() + return err + } + w.f = file + w.currentSize = 0 + w.notifyRotate.Publish(struct{}{}) + + if w.maxFiles <= 1 || !w.compress { + w.rotateMu.Unlock() + return nil + } + + go func() { + compressFile(fname+".1", w.lastTimestamp) + w.rotateMu.Unlock() + }() + } + + return nil +} + +func rotate(name string, maxFiles int, compress bool) error { + if maxFiles < 2 { + return nil + } + + var extension string + if compress { + extension = ".gz" + } + for i := maxFiles - 1; i > 1; i-- { + toPath := name + "." + strconv.Itoa(i) + extension + fromPath := name + "." + strconv.Itoa(i-1) + extension + if err := os.Rename(fromPath, toPath); err != nil && !os.IsNotExist(err) { + return err + } + } + + if err := os.Rename(name, name+".1"); err != nil && !os.IsNotExist(err) { + return err + } + + return nil +} + +func compressFile(fileName string, lastTimestamp time.Time) { + file, err := os.Open(fileName) + if err != nil { + logrus.Errorf("Failed to open log file: %v", err) + return + } + defer func() { + file.Close() + err := os.Remove(fileName) + if err != nil { + logrus.Errorf("Failed to remove source log file: %v", err) + } + }() + + outFile, err := os.OpenFile(fileName+".gz", os.O_CREATE|os.O_RDWR, 0640) + if err != nil { + logrus.Errorf("Failed to open or create gzip log file: %v", err) + return + } + defer func() { + outFile.Close() + if err != nil { + os.Remove(fileName + ".gz") + } + }() + + compressWriter := gzip.NewWriter(outFile) + defer compressWriter.Close() + + // Add the last log entry timestramp to the gzip header + extra := rotateFileMetadata{} + extra.LastTime = lastTimestamp + compressWriter.Header.Extra, err = json.Marshal(&extra) + if err != nil { + // Here log the error only and don't return since this is just an optimization. + logrus.Warningf("Failed to marshal JSON: %v", err) + } + + _, err = pools.Copy(compressWriter, file) + if err != nil { + logrus.WithError(err).WithField("module", "container.logs").WithField("file", fileName).Error("Error compressing log file") + return + } +} + +// MaxFiles return maximum number of files +func (w *LogFile) MaxFiles() int { + return w.maxFiles +} + +// Close closes underlying file and signals all readers to stop. +func (w *LogFile) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + if w.closed { + return nil + } + if err := w.f.Close(); err != nil { + return err + } + w.closed = true + return nil +} + +// ReadLogs decodes entries from log files and sends them the passed in watcher +func (w *LogFile) ReadLogs(config logger.ReadConfig, watcher *logger.LogWatcher) { + w.mu.RLock() + currentFile, err := os.Open(w.f.Name()) + if err != nil { + w.mu.RUnlock() + watcher.Err <- err + return + } + defer currentFile.Close() + + currentChunk, err := newSectionReader(currentFile) + if err != nil { + w.mu.RUnlock() + watcher.Err <- err + return + } + + if config.Tail != 0 { + files, err := w.openRotatedFiles(config) + if err != nil { + w.mu.RUnlock() + watcher.Err <- err + return + } + w.mu.RUnlock() + seekers := make([]io.ReadSeeker, 0, len(files)+1) + for _, f := range files { + seekers = append(seekers, f) + } + if currentChunk.Size() > 0 { + seekers = append(seekers, currentChunk) + } + if len(seekers) > 0 { + tailFile(multireader.MultiReadSeeker(seekers...), watcher, w.createDecoder, config) + } + for _, f := range files { + f.Close() + fileName := f.Name() + if strings.HasSuffix(fileName, tmpLogfileSuffix) { + err := w.filesRefCounter.Dereference(fileName) + if err != nil { + logrus.Errorf("Failed to dereference the log file %q: %v", fileName, err) + } + } + } + + w.mu.RLock() + } + + if !config.Follow || w.closed { + w.mu.RUnlock() + return + } + w.mu.RUnlock() + + notifyRotate := w.notifyRotate.Subscribe() + defer w.notifyRotate.Evict(notifyRotate) + followLogs(currentFile, watcher, notifyRotate, w.createDecoder, config.Since, config.Until) +} + +func (w *LogFile) openRotatedFiles(config logger.ReadConfig) (files []*os.File, err error) { + w.rotateMu.Lock() + defer w.rotateMu.Unlock() + + defer func() { + if err == nil { + return + } + for _, f := range files { + f.Close() + if strings.HasSuffix(f.Name(), tmpLogfileSuffix) { + err := os.Remove(f.Name()) + if err != nil && !os.IsNotExist(err) { + logrus.Warningf("Failed to remove the logfile %q: %v", f.Name, err) + } + } + } + }() + + for i := w.maxFiles; i > 1; i-- { + f, err := os.Open(fmt.Sprintf("%s.%d", w.f.Name(), i-1)) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + + fileName := fmt.Sprintf("%s.%d.gz", w.f.Name(), i-1) + decompressedFileName := fileName + tmpLogfileSuffix + tmpFile, err := w.filesRefCounter.GetReference(decompressedFileName, func(refFileName string, exists bool) (*os.File, error) { + if exists { + return os.Open(refFileName) + } + return decompressfile(fileName, refFileName, config.Since) + }) + + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + continue + } + if tmpFile == nil { + // The log before `config.Since` does not need to read + break + } + + files = append(files, tmpFile) + continue + } + files = append(files, f) + } + + return files, nil +} + +func decompressfile(fileName, destFileName string, since time.Time) (*os.File, error) { + cf, err := os.Open(fileName) + if err != nil { + return nil, err + } + defer cf.Close() + + rc, err := gzip.NewReader(cf) + if err != nil { + return nil, err + } + defer rc.Close() + + // Extract the last log entry timestramp from the gzip header + extra := &rotateFileMetadata{} + err = json.Unmarshal(rc.Header.Extra, extra) + if err == nil && extra.LastTime.Before(since) { + return nil, nil + } + + rs, err := os.OpenFile(destFileName, os.O_CREATE|os.O_RDWR, 0640) + if err != nil { + return nil, err + } + + _, err = pools.Copy(rs, rc) + if err != nil { + rs.Close() + rErr := os.Remove(rs.Name()) + if rErr != nil && os.IsNotExist(rErr) { + logrus.Errorf("Failed to remove the logfile %q: %v", rs.Name(), rErr) + } + return nil, err + } + + return rs, nil +} + +func newSectionReader(f *os.File) (*io.SectionReader, error) { + // seek to the end to get the size + // we'll leave this at the end of the file since section reader does not advance the reader + size, err := f.Seek(0, os.SEEK_END) + if err != nil { + return nil, errors.Wrap(err, "error getting current file size") + } + return io.NewSectionReader(f, 0, size), nil +} + +type decodeFunc func() (*logger.Message, error) + +func tailFile(f io.ReadSeeker, watcher *logger.LogWatcher, createDecoder makeDecoderFunc, config logger.ReadConfig) { + var rdr io.Reader = f + if config.Tail > 0 { + ls, err := tailfile.TailFile(f, config.Tail) + if err != nil { + watcher.Err <- err + return + } + rdr = bytes.NewBuffer(bytes.Join(ls, []byte("\n"))) + } + + decodeLogLine := createDecoder(rdr) + for { + msg, err := decodeLogLine() + if err != nil { + if err != io.EOF { + watcher.Err <- err + } + return + } + if !config.Since.IsZero() && msg.Timestamp.Before(config.Since) { + continue + } + if !config.Until.IsZero() && msg.Timestamp.After(config.Until) { + return + } + select { + case <-watcher.WatchClose(): + return + case watcher.Msg <- msg: + } + } +} + +func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, createDecoder makeDecoderFunc, since, until time.Time) { + decodeLogLine := createDecoder(f) + + name := f.Name() + fileWatcher, err := watchFile(name) + if err != nil { + logWatcher.Err <- err + return + } + defer func() { + f.Close() + fileWatcher.Remove(name) + fileWatcher.Close() + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + select { + case <-logWatcher.WatchClose(): + fileWatcher.Remove(name) + cancel() + case <-ctx.Done(): + return + } + }() + + var retries int + handleRotate := func() error { + f.Close() + fileWatcher.Remove(name) + + // retry when the file doesn't exist + for retries := 0; retries <= 5; retries++ { + f, err = os.Open(name) + if err == nil || !os.IsNotExist(err) { + break + } + } + if err != nil { + return err + } + if err := fileWatcher.Add(name); err != nil { + return err + } + decodeLogLine = createDecoder(f) + return nil + } + + errRetry := errors.New("retry") + errDone := errors.New("done") + waitRead := func() error { + select { + case e := <-fileWatcher.Events(): + switch e.Op { + case fsnotify.Write: + decodeLogLine = createDecoder(f) + return nil + case fsnotify.Rename, fsnotify.Remove: + select { + case <-notifyRotate: + case <-ctx.Done(): + return errDone + } + if err := handleRotate(); err != nil { + return err + } + return nil + } + return errRetry + case err := <-fileWatcher.Errors(): + logrus.Debug("logger got error watching file: %v", err) + // Something happened, let's try and stay alive and create a new watcher + if retries <= 5 { + fileWatcher.Close() + fileWatcher, err = watchFile(name) + if err != nil { + return err + } + retries++ + return errRetry + } + return err + case <-ctx.Done(): + return errDone + } + } + + handleDecodeErr := func(err error) error { + if err != io.EOF { + return err + } + + for { + err := waitRead() + if err == nil { + break + } + if err == errRetry { + continue + } + return err + } + return nil + } + + // main loop + for { + msg, err := decodeLogLine() + if err != nil { + if err := handleDecodeErr(err); err != nil { + if err == errDone { + return + } + // we got an unrecoverable error, so return + logWatcher.Err <- err + return + } + // ready to try again + continue + } + + retries = 0 // reset retries since we've succeeded + if !since.IsZero() && msg.Timestamp.Before(since) { + continue + } + if !until.IsZero() && msg.Timestamp.After(until) { + return + } + select { + case logWatcher.Msg <- msg: + case <-ctx.Done(): + logWatcher.Msg <- msg + for { + msg, err := decodeLogLine() + if err != nil { + return + } + if !since.IsZero() && msg.Timestamp.Before(since) { + continue + } + if !until.IsZero() && msg.Timestamp.After(until) { + return + } + logWatcher.Msg <- msg + } + } + } +} + +func watchFile(name string) (filenotify.FileWatcher, error) { + fileWatcher, err := filenotify.New() + if err != nil { + return nil, err + } + + logger := logrus.WithFields(logrus.Fields{ + "module": "logger", + "fille": name, + }) + + if err := fileWatcher.Add(name); err != nil { + logger.WithError(err).Warnf("falling back to file poller") + fileWatcher.Close() + fileWatcher = filenotify.NewPollingWatcher() + + if err := fileWatcher.Add(name); err != nil { + fileWatcher.Close() + logger.WithError(err).Debugf("error watching log file for modifications") + return nil, err + } + } + return fileWatcher, nil +} diff --git a/vendor/github.com/docker/docker/daemon/logger/loggerutils/multireader/multireader.go b/vendor/github.com/docker/docker/daemon/logger/loggerutils/multireader/multireader.go new file mode 100644 index 0000000000..77980a2a0a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/loggerutils/multireader/multireader.go @@ -0,0 +1,212 @@ +package multireader // import "github.com/docker/docker/daemon/logger/loggerutils/multireader" + +import ( + "bytes" + "fmt" + "io" + "os" +) + +type pos struct { + idx int + offset int64 +} + +type multiReadSeeker struct { + readers []io.ReadSeeker + pos *pos + posIdx map[io.ReadSeeker]int +} + +func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) { + var tmpOffset int64 + switch whence { + case os.SEEK_SET: + for i, rdr := range r.readers { + // get size of the current reader + s, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + + if offset > tmpOffset+s { + if i == len(r.readers)-1 { + rdrOffset := s + (offset - tmpOffset) + if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil { + return -1, err + } + r.pos = &pos{i, rdrOffset} + return offset, nil + } + + tmpOffset += s + continue + } + + rdrOffset := offset - tmpOffset + idx := i + + if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil { + return -1, err + } + // make sure all following readers are at 0 + for _, rdr := range r.readers[i+1:] { + rdr.Seek(0, os.SEEK_SET) + } + + if rdrOffset == s && i != len(r.readers)-1 { + idx++ + rdrOffset = 0 + } + r.pos = &pos{idx, rdrOffset} + return offset, nil + } + case os.SEEK_END: + for _, rdr := range r.readers { + s, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + tmpOffset += s + } + if _, err := r.Seek(tmpOffset+offset, os.SEEK_SET); err != nil { + return -1, err + } + return tmpOffset + offset, nil + case os.SEEK_CUR: + if r.pos == nil { + return r.Seek(offset, os.SEEK_SET) + } + // Just return the current offset + if offset == 0 { + return r.getCurOffset() + } + + curOffset, err := r.getCurOffset() + if err != nil { + return -1, err + } + rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset) + if err != nil { + return -1, err + } + + r.pos = &pos{r.posIdx[rdr], rdrOffset} + return curOffset + offset, nil + default: + return -1, fmt.Errorf("Invalid whence: %d", whence) + } + + return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset) +} + +func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) { + + var offsetTo int64 + + for _, rdr := range r.readers { + size, err := getReadSeekerSize(rdr) + if err != nil { + return nil, -1, err + } + if offsetTo+size > offset { + return rdr, offset - offsetTo, nil + } + if rdr == r.readers[len(r.readers)-1] { + return rdr, offsetTo + offset, nil + } + offsetTo += size + } + + return nil, 0, nil +} + +func (r *multiReadSeeker) getCurOffset() (int64, error) { + var totalSize int64 + for _, rdr := range r.readers[:r.pos.idx+1] { + if r.posIdx[rdr] == r.pos.idx { + totalSize += r.pos.offset + break + } + + size, err := getReadSeekerSize(rdr) + if err != nil { + return -1, fmt.Errorf("error getting seeker size: %v", err) + } + totalSize += size + } + return totalSize, nil +} + +func (r *multiReadSeeker) Read(b []byte) (int, error) { + if r.pos == nil { + // make sure all readers are at 0 + r.Seek(0, os.SEEK_SET) + } + + bLen := int64(len(b)) + buf := bytes.NewBuffer(nil) + var rdr io.ReadSeeker + + for _, rdr = range r.readers[r.pos.idx:] { + readBytes, err := io.CopyN(buf, rdr, bLen) + if err != nil && err != io.EOF { + return -1, err + } + bLen -= readBytes + + if bLen == 0 { + break + } + } + + rdrPos, err := rdr.Seek(0, os.SEEK_CUR) + if err != nil { + return -1, err + } + r.pos = &pos{r.posIdx[rdr], rdrPos} + return buf.Read(b) +} + +func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) { + // save the current position + pos, err := rdr.Seek(0, os.SEEK_CUR) + if err != nil { + return -1, err + } + + // get the size + size, err := rdr.Seek(0, os.SEEK_END) + if err != nil { + return -1, err + } + + // reset the position + if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil { + return -1, err + } + return size, nil +} + +// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided +// input readseekers. After calling this method the initial position is set to the +// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances +// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker. +// Seek can be used over the sum of lengths of all readseekers. +// +// When a MultiReadSeeker is used, no Read and Seek operations should be made on +// its ReadSeeker components. Also, users should make no assumption on the state +// of individual readseekers while the MultiReadSeeker is used. +func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker { + if len(readers) == 1 { + return readers[0] + } + idx := make(map[io.ReadSeeker]int) + for i, rdr := range readers { + idx[rdr] = i + } + return &multiReadSeeker{ + readers: readers, + posIdx: idx, + } +} diff --git a/vendor/github.com/docker/docker/daemon/logger/loginfo.go b/vendor/github.com/docker/docker/daemon/logger/loginfo.go new file mode 100644 index 0000000000..4c48235f5c --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/loginfo.go @@ -0,0 +1,129 @@ +package logger // import "github.com/docker/docker/daemon/logger" + +import ( + "fmt" + "os" + "regexp" + "strings" + "time" +) + +// Info provides enough information for a logging driver to do its function. +type Info struct { + Config map[string]string + ContainerID string + ContainerName string + ContainerEntrypoint string + ContainerArgs []string + ContainerImageID string + ContainerImageName string + ContainerCreated time.Time + ContainerEnv []string + ContainerLabels map[string]string + LogPath string + DaemonName string +} + +// ExtraAttributes returns the user-defined extra attributes (labels, +// environment variables) in key-value format. This can be used by log drivers +// that support metadata to add more context to a log. +func (info *Info) ExtraAttributes(keyMod func(string) string) (map[string]string, error) { + extra := make(map[string]string) + labels, ok := info.Config["labels"] + if ok && len(labels) > 0 { + for _, l := range strings.Split(labels, ",") { + if v, ok := info.ContainerLabels[l]; ok { + if keyMod != nil { + l = keyMod(l) + } + extra[l] = v + } + } + } + + envMapping := make(map[string]string) + for _, e := range info.ContainerEnv { + if kv := strings.SplitN(e, "=", 2); len(kv) == 2 { + envMapping[kv[0]] = kv[1] + } + } + + env, ok := info.Config["env"] + if ok && len(env) > 0 { + for _, l := range strings.Split(env, ",") { + if v, ok := envMapping[l]; ok { + if keyMod != nil { + l = keyMod(l) + } + extra[l] = v + } + } + } + + envRegex, ok := info.Config["env-regex"] + if ok && len(envRegex) > 0 { + re, err := regexp.Compile(envRegex) + if err != nil { + return nil, err + } + for k, v := range envMapping { + if re.MatchString(k) { + if keyMod != nil { + k = keyMod(k) + } + extra[k] = v + } + } + } + + return extra, nil +} + +// Hostname returns the hostname from the underlying OS. +func (info *Info) Hostname() (string, error) { + hostname, err := os.Hostname() + if err != nil { + return "", fmt.Errorf("logger: can not resolve hostname: %v", err) + } + return hostname, nil +} + +// Command returns the command that the container being logged was +// started with. The Entrypoint is prepended to the container +// arguments. +func (info *Info) Command() string { + terms := []string{info.ContainerEntrypoint} + terms = append(terms, info.ContainerArgs...) + command := strings.Join(terms, " ") + return command +} + +// ID Returns the Container ID shortened to 12 characters. +func (info *Info) ID() string { + return info.ContainerID[:12] +} + +// FullID is an alias of ContainerID. +func (info *Info) FullID() string { + return info.ContainerID +} + +// Name returns the ContainerName without a preceding '/'. +func (info *Info) Name() string { + return strings.TrimPrefix(info.ContainerName, "/") +} + +// ImageID returns the ContainerImageID shortened to 12 characters. +func (info *Info) ImageID() string { + return info.ContainerImageID[:12] +} + +// ImageFullID is an alias of ContainerImageID. +func (info *Info) ImageFullID() string { + return info.ContainerImageID +} + +// ImageName is an alias of ContainerImageName +func (info *Info) ImageName() string { + return info.ContainerImageName +} diff --git a/vendor/github.com/docker/docker/daemon/logger/plugin.go b/vendor/github.com/docker/docker/daemon/logger/plugin.go new file mode 100644 index 0000000000..cd0e60b7cd --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/plugin.go @@ -0,0 +1,90 @@ +package logger // import "github.com/docker/docker/daemon/logger" + +import ( + "fmt" + "io" + "os" + "path/filepath" + + "github.com/docker/docker/api/types/plugins/logdriver" + getter "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/stringid" + "github.com/pkg/errors" +) + +var pluginGetter getter.PluginGetter + +const extName = "LogDriver" + +// logPlugin defines the available functions that logging plugins must implement. +type logPlugin interface { + StartLogging(streamPath string, info Info) (err error) + StopLogging(streamPath string) (err error) + Capabilities() (cap Capability, err error) + ReadLogs(info Info, config ReadConfig) (stream io.ReadCloser, err error) +} + +// RegisterPluginGetter sets the plugingetter +func RegisterPluginGetter(plugingetter getter.PluginGetter) { + pluginGetter = plugingetter +} + +// GetDriver returns a logging driver by its name. +// If the driver is empty, it looks for the local driver. +func getPlugin(name string, mode int) (Creator, error) { + p, err := pluginGetter.Get(name, extName, mode) + if err != nil { + return nil, fmt.Errorf("error looking up logging plugin %s: %v", name, err) + } + + d := &logPluginProxy{p.Client()} + return makePluginCreator(name, d, p.ScopedPath), nil +} + +func makePluginCreator(name string, l *logPluginProxy, scopePath func(s string) string) Creator { + return func(logCtx Info) (logger Logger, err error) { + defer func() { + if err != nil { + pluginGetter.Get(name, extName, getter.Release) + } + }() + + unscopedPath := filepath.Join("/", "run", "docker", "logging") + logRoot := scopePath(unscopedPath) + if err := os.MkdirAll(logRoot, 0700); err != nil { + return nil, err + } + + id := stringid.GenerateNonCryptoID() + a := &pluginAdapter{ + driverName: name, + id: id, + plugin: l, + fifoPath: filepath.Join(logRoot, id), + logInfo: logCtx, + } + + cap, err := a.plugin.Capabilities() + if err == nil { + a.capabilities = cap + } + + stream, err := openPluginStream(a) + if err != nil { + return nil, err + } + + a.stream = stream + a.enc = logdriver.NewLogEntryEncoder(a.stream) + + if err := l.StartLogging(filepath.Join(unscopedPath, id), logCtx); err != nil { + return nil, errors.Wrapf(err, "error creating logger") + } + + if cap.ReadLogs { + return &pluginAdapterWithRead{a}, nil + } + + return a, nil + } +} diff --git a/vendor/github.com/docker/docker/daemon/logger/plugin_unix.go b/vendor/github.com/docker/docker/daemon/logger/plugin_unix.go new file mode 100644 index 0000000000..6192f52eb8 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/plugin_unix.go @@ -0,0 +1,20 @@ +// +build linux freebsd + +package logger // import "github.com/docker/docker/daemon/logger" + +import ( + "context" + "io" + + "github.com/containerd/fifo" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +func openPluginStream(a *pluginAdapter) (io.WriteCloser, error) { + f, err := fifo.OpenFifo(context.Background(), a.fifoPath, unix.O_WRONLY|unix.O_CREAT|unix.O_NONBLOCK, 0700) + if err != nil { + return nil, errors.Wrapf(err, "error creating i/o pipe for log plugin: %s", a.Name()) + } + return f, nil +} diff --git a/vendor/github.com/docker/docker/daemon/logger/plugin_unsupported.go b/vendor/github.com/docker/docker/daemon/logger/plugin_unsupported.go new file mode 100644 index 0000000000..2ad47cc077 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/plugin_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux,!freebsd + +package logger // import "github.com/docker/docker/daemon/logger" + +import ( + "errors" + "io" +) + +func openPluginStream(a *pluginAdapter) (io.WriteCloser, error) { + return nil, errors.New("log plugin not supported") +} diff --git a/vendor/github.com/docker/docker/daemon/logger/proxy.go b/vendor/github.com/docker/docker/daemon/logger/proxy.go new file mode 100644 index 0000000000..4a1c778108 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/proxy.go @@ -0,0 +1,107 @@ +package logger // import "github.com/docker/docker/daemon/logger" + +import ( + "errors" + "io" +) + +type client interface { + Call(string, interface{}, interface{}) error + Stream(string, interface{}) (io.ReadCloser, error) +} + +type logPluginProxy struct { + client +} + +type logPluginProxyStartLoggingRequest struct { + File string + Info Info +} + +type logPluginProxyStartLoggingResponse struct { + Err string +} + +func (pp *logPluginProxy) StartLogging(file string, info Info) (err error) { + var ( + req logPluginProxyStartLoggingRequest + ret logPluginProxyStartLoggingResponse + ) + + req.File = file + req.Info = info + if err = pp.Call("LogDriver.StartLogging", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type logPluginProxyStopLoggingRequest struct { + File string +} + +type logPluginProxyStopLoggingResponse struct { + Err string +} + +func (pp *logPluginProxy) StopLogging(file string) (err error) { + var ( + req logPluginProxyStopLoggingRequest + ret logPluginProxyStopLoggingResponse + ) + + req.File = file + if err = pp.Call("LogDriver.StopLogging", req, &ret); err != nil { + return + } + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type logPluginProxyCapabilitiesResponse struct { + Cap Capability + Err string +} + +func (pp *logPluginProxy) Capabilities() (cap Capability, err error) { + var ( + ret logPluginProxyCapabilitiesResponse + ) + + if err = pp.Call("LogDriver.Capabilities", nil, &ret); err != nil { + return + } + + cap = ret.Cap + + if ret.Err != "" { + err = errors.New(ret.Err) + } + + return +} + +type logPluginProxyReadLogsRequest struct { + Info Info + Config ReadConfig +} + +func (pp *logPluginProxy) ReadLogs(info Info, config ReadConfig) (stream io.ReadCloser, err error) { + var ( + req logPluginProxyReadLogsRequest + ) + + req.Info = info + req.Config = config + return pp.Stream("LogDriver.ReadLogs", req) +} diff --git a/vendor/github.com/docker/docker/daemon/logger/ring.go b/vendor/github.com/docker/docker/daemon/logger/ring.go new file mode 100644 index 0000000000..887f2ea18a --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/ring.go @@ -0,0 +1,223 @@ +package logger // import "github.com/docker/docker/daemon/logger" + +import ( + "errors" + "sync" + "sync/atomic" + + "github.com/sirupsen/logrus" +) + +const ( + defaultRingMaxSize = 1e6 // 1MB +) + +// RingLogger is a ring buffer that implements the Logger interface. +// This is used when lossy logging is OK. +type RingLogger struct { + buffer *messageRing + l Logger + logInfo Info + closeFlag int32 +} + +type ringWithReader struct { + *RingLogger +} + +func (r *ringWithReader) ReadLogs(cfg ReadConfig) *LogWatcher { + reader, ok := r.l.(LogReader) + if !ok { + // something is wrong if we get here + panic("expected log reader") + } + return reader.ReadLogs(cfg) +} + +func newRingLogger(driver Logger, logInfo Info, maxSize int64) *RingLogger { + l := &RingLogger{ + buffer: newRing(maxSize), + l: driver, + logInfo: logInfo, + } + go l.run() + return l +} + +// NewRingLogger creates a new Logger that is implemented as a RingBuffer wrapping +// the passed in logger. +func NewRingLogger(driver Logger, logInfo Info, maxSize int64) Logger { + if maxSize < 0 { + maxSize = defaultRingMaxSize + } + l := newRingLogger(driver, logInfo, maxSize) + if _, ok := driver.(LogReader); ok { + return &ringWithReader{l} + } + return l +} + +// Log queues messages into the ring buffer +func (r *RingLogger) Log(msg *Message) error { + if r.closed() { + return errClosed + } + return r.buffer.Enqueue(msg) +} + +// Name returns the name of the underlying logger +func (r *RingLogger) Name() string { + return r.l.Name() +} + +func (r *RingLogger) closed() bool { + return atomic.LoadInt32(&r.closeFlag) == 1 +} + +func (r *RingLogger) setClosed() { + atomic.StoreInt32(&r.closeFlag, 1) +} + +// Close closes the logger +func (r *RingLogger) Close() error { + r.setClosed() + r.buffer.Close() + // empty out the queue + var logErr bool + for _, msg := range r.buffer.Drain() { + if logErr { + // some error logging a previous message, so re-insert to message pool + // and assume log driver is hosed + PutMessage(msg) + continue + } + + if err := r.l.Log(msg); err != nil { + logrus.WithField("driver", r.l.Name()). + WithField("container", r.logInfo.ContainerID). + WithError(err). + Errorf("Error writing log message") + logErr = true + } + } + return r.l.Close() +} + +// run consumes messages from the ring buffer and forwards them to the underling +// logger. +// This is run in a goroutine when the RingLogger is created +func (r *RingLogger) run() { + for { + if r.closed() { + return + } + msg, err := r.buffer.Dequeue() + if err != nil { + // buffer is closed + return + } + if err := r.l.Log(msg); err != nil { + logrus.WithField("driver", r.l.Name()). + WithField("container", r.logInfo.ContainerID). + WithError(err). + Errorf("Error writing log message") + } + } +} + +type messageRing struct { + mu sync.Mutex + // signals callers of `Dequeue` to wake up either on `Close` or when a new `Message` is added + wait *sync.Cond + + sizeBytes int64 // current buffer size + maxBytes int64 // max buffer size size + queue []*Message + closed bool +} + +func newRing(maxBytes int64) *messageRing { + queueSize := 1000 + if maxBytes == 0 || maxBytes == 1 { + // With 0 or 1 max byte size, the maximum size of the queue would only ever be 1 + // message long. + queueSize = 1 + } + + r := &messageRing{queue: make([]*Message, 0, queueSize), maxBytes: maxBytes} + r.wait = sync.NewCond(&r.mu) + return r +} + +// Enqueue adds a message to the buffer queue +// If the message is too big for the buffer it drops the oldest messages to make room +// If there are no messages in the queue and the message is still too big, it adds the message anyway. +func (r *messageRing) Enqueue(m *Message) error { + mSize := int64(len(m.Line)) + + r.mu.Lock() + if r.closed { + r.mu.Unlock() + return errClosed + } + if mSize+r.sizeBytes > r.maxBytes && len(r.queue) > 0 { + r.wait.Signal() + r.mu.Unlock() + return nil + } + + r.queue = append(r.queue, m) + r.sizeBytes += mSize + r.wait.Signal() + r.mu.Unlock() + return nil +} + +// Dequeue pulls a message off the queue +// If there are no messages, it waits for one. +// If the buffer is closed, it will return immediately. +func (r *messageRing) Dequeue() (*Message, error) { + r.mu.Lock() + for len(r.queue) == 0 && !r.closed { + r.wait.Wait() + } + + if r.closed { + r.mu.Unlock() + return nil, errClosed + } + + msg := r.queue[0] + r.queue = r.queue[1:] + r.sizeBytes -= int64(len(msg.Line)) + r.mu.Unlock() + return msg, nil +} + +var errClosed = errors.New("closed") + +// Close closes the buffer ensuring no new messages can be added. +// Any callers waiting to dequeue a message will be woken up. +func (r *messageRing) Close() { + r.mu.Lock() + if r.closed { + r.mu.Unlock() + return + } + + r.closed = true + r.wait.Broadcast() + r.mu.Unlock() +} + +// Drain drains all messages from the queue. +// This can be used after `Close()` to get any remaining messages that were in queue. +func (r *messageRing) Drain() []*Message { + r.mu.Lock() + ls := make([]*Message, 0, len(r.queue)) + ls = append(ls, r.queue...) + r.sizeBytes = 0 + r.queue = r.queue[:0] + r.mu.Unlock() + return ls +} diff --git a/vendor/github.com/docker/docker/daemon/logger/templates/templates.go b/vendor/github.com/docker/docker/daemon/logger/templates/templates.go new file mode 100644 index 0000000000..ab76d0f1c2 --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/logger/templates/templates.go @@ -0,0 +1,50 @@ +package templates // import "github.com/docker/docker/daemon/logger/templates" + +import ( + "bytes" + "encoding/json" + "strings" + "text/template" +) + +// basicFunctions are the set of initial +// functions provided to every template. +var basicFunctions = template.FuncMap{ + "json": func(v interface{}) string { + buf := &bytes.Buffer{} + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + enc.Encode(v) + // Remove the trailing new line added by the encoder + return strings.TrimSpace(buf.String()) + }, + "split": strings.Split, + "join": strings.Join, + "title": strings.Title, + "lower": strings.ToLower, + "upper": strings.ToUpper, + "pad": padWithSpace, + "truncate": truncateWithLength, +} + +// NewParse creates a new tagged template with the basic functions +// and parses the given format. +func NewParse(tag, format string) (*template.Template, error) { + return template.New(tag).Funcs(basicFunctions).Parse(format) +} + +// padWithSpace adds whitespace to the input if the input is non-empty +func padWithSpace(source string, prefix, suffix int) string { + if source == "" { + return source + } + return strings.Repeat(" ", prefix) + source + strings.Repeat(" ", suffix) +} + +// truncateWithLength truncates the source string up to the length provided by the input +func truncateWithLength(source string, length int) string { + if len(source) < length { + return source + } + return source[:length] +} diff --git a/vendor/github.com/docker/docker/daemon/network/settings.go b/vendor/github.com/docker/docker/daemon/network/settings.go new file mode 100644 index 0000000000..b0460ed6ae --- /dev/null +++ b/vendor/github.com/docker/docker/daemon/network/settings.go @@ -0,0 +1,69 @@ +package network // import "github.com/docker/docker/daemon/network" + +import ( + "net" + + networktypes "github.com/docker/docker/api/types/network" + clustertypes "github.com/docker/docker/daemon/cluster/provider" + "github.com/docker/go-connections/nat" + "github.com/pkg/errors" +) + +// Settings stores configuration details about the daemon network config +// TODO Windows. Many of these fields can be factored out., +type Settings struct { + Bridge string + SandboxID string + HairpinMode bool + LinkLocalIPv6Address string + LinkLocalIPv6PrefixLen int + Networks map[string]*EndpointSettings + Service *clustertypes.ServiceConfig + Ports nat.PortMap + SandboxKey string + SecondaryIPAddresses []networktypes.Address + SecondaryIPv6Addresses []networktypes.Address + IsAnonymousEndpoint bool + HasSwarmEndpoint bool +} + +// EndpointSettings is a package local wrapper for +// networktypes.EndpointSettings which stores Endpoint state that +// needs to be persisted to disk but not exposed in the api. +type EndpointSettings struct { + *networktypes.EndpointSettings + IPAMOperational bool +} + +// AttachmentStore stores the load balancer IP address for a network id. +type AttachmentStore struct { + //key: networkd id + //value: load balancer ip address + networkToNodeLBIP map[string]net.IP +} + +// ResetAttachments clears any existing load balancer IP to network mapping and +// sets the mapping to the given attachments. +func (store *AttachmentStore) ResetAttachments(attachments map[string]string) error { + store.ClearAttachments() + for nid, nodeIP := range attachments { + ip, _, err := net.ParseCIDR(nodeIP) + if err != nil { + store.networkToNodeLBIP = make(map[string]net.IP) + return errors.Wrapf(err, "Failed to parse load balancer address %s", nodeIP) + } + store.networkToNodeLBIP[nid] = ip + } + return nil +} + +// ClearAttachments clears all the mappings of network to load balancer IP Address. +func (store *AttachmentStore) ClearAttachments() { + store.networkToNodeLBIP = make(map[string]net.IP) +} + +// GetIPForNetwork return the load balancer IP address for the given network. +func (store *AttachmentStore) GetIPForNetwork(networkID string) (net.IP, bool) { + ip, exists := store.networkToNodeLBIP[networkID] + return ip, exists +} diff --git a/vendor/github.com/docker/docker/dockerversion/useragent.go b/vendor/github.com/docker/docker/dockerversion/useragent.go new file mode 100644 index 0000000000..2eceb6fa9e --- /dev/null +++ b/vendor/github.com/docker/docker/dockerversion/useragent.go @@ -0,0 +1,76 @@ +package dockerversion // import "github.com/docker/docker/dockerversion" + +import ( + "context" + "fmt" + "runtime" + + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/useragent" +) + +// UAStringKey is used as key type for user-agent string in net/context struct +const UAStringKey = "upstream-user-agent" + +// DockerUserAgent is the User-Agent the Docker client uses to identify itself. +// In accordance with RFC 7231 (5.5.3) is of the form: +// [docker client's UA] UpstreamClient([upstream client's UA]) +func DockerUserAgent(ctx context.Context) string { + httpVersion := make([]useragent.VersionInfo, 0, 6) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "docker", Version: Version}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "go", Version: runtime.Version()}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "git-commit", Version: GitCommit}) + if kernelVersion, err := kernel.GetKernelVersion(); err == nil { + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "kernel", Version: kernelVersion.String()}) + } + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "os", Version: runtime.GOOS}) + httpVersion = append(httpVersion, useragent.VersionInfo{Name: "arch", Version: runtime.GOARCH}) + + dockerUA := useragent.AppendVersions("", httpVersion...) + upstreamUA := getUserAgentFromContext(ctx) + if len(upstreamUA) > 0 { + ret := insertUpstreamUserAgent(upstreamUA, dockerUA) + return ret + } + return dockerUA +} + +// getUserAgentFromContext returns the previously saved user-agent context stored in ctx, if one exists +func getUserAgentFromContext(ctx context.Context) string { + var upstreamUA string + if ctx != nil { + var ki interface{} = ctx.Value(UAStringKey) + if ki != nil { + upstreamUA = ctx.Value(UAStringKey).(string) + } + } + return upstreamUA +} + +// escapeStr returns s with every rune in charsToEscape escaped by a backslash +func escapeStr(s string, charsToEscape string) string { + var ret string + for _, currRune := range s { + appended := false + for _, escapableRune := range charsToEscape { + if currRune == escapableRune { + ret += `\` + string(currRune) + appended = true + break + } + } + if !appended { + ret += string(currRune) + } + } + return ret +} + +// insertUpstreamUserAgent adds the upstream client useragent to create a user-agent +// string of the form: +// $dockerUA UpstreamClient($upstreamUA) +func insertUpstreamUserAgent(upstreamUA string, dockerUA string) string { + charsToEscape := `();\` + upstreamUAEscaped := escapeStr(upstreamUA, charsToEscape) + return fmt.Sprintf("%s UpstreamClient(%s)", dockerUA, upstreamUAEscaped) +} diff --git a/vendor/github.com/docker/docker/dockerversion/version_lib.go b/vendor/github.com/docker/docker/dockerversion/version_lib.go new file mode 100644 index 0000000000..1489be0a25 --- /dev/null +++ b/vendor/github.com/docker/docker/dockerversion/version_lib.go @@ -0,0 +1,17 @@ +// +build !autogen + +// Package dockerversion is auto-generated at build-time +package dockerversion // import "github.com/docker/docker/dockerversion" + +// Default build-time variable for library-import. +// This file is overridden on build with build-time informations. +const ( + GitCommit string = "library-import" + Version string = "library-import" + BuildTime string = "library-import" + IAmStatic string = "library-import" + ContainerdCommitID string = "library-import" + RuncCommitID string = "library-import" + InitCommitID string = "library-import" + PlatformName string = "" +) diff --git a/vendor/github.com/docker/docker/errdefs/defs.go b/vendor/github.com/docker/docker/errdefs/defs.go new file mode 100644 index 0000000000..e6a2275b2d --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/defs.go @@ -0,0 +1,74 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +// ErrNotFound signals that the requested object doesn't exist +type ErrNotFound interface { + NotFound() +} + +// ErrInvalidParameter signals that the user input is invalid +type ErrInvalidParameter interface { + InvalidParameter() +} + +// ErrConflict signals that some internal state conflicts with the requested action and can't be performed. +// A change in state should be able to clear this error. +type ErrConflict interface { + Conflict() +} + +// ErrUnauthorized is used to signify that the user is not authorized to perform a specific action +type ErrUnauthorized interface { + Unauthorized() +} + +// ErrUnavailable signals that the requested action/subsystem is not available. +type ErrUnavailable interface { + Unavailable() +} + +// ErrForbidden signals that the requested action cannot be performed under any circumstances. +// When a ErrForbidden is returned, the caller should never retry the action. +type ErrForbidden interface { + Forbidden() +} + +// ErrSystem signals that some internal error occurred. +// An example of this would be a failed mount request. +type ErrSystem interface { + System() +} + +// ErrNotModified signals that an action can't be performed because it's already in the desired state +type ErrNotModified interface { + NotModified() +} + +// ErrAlreadyExists is a special case of ErrConflict which signals that the desired object already exists +type ErrAlreadyExists interface { + AlreadyExists() +} + +// ErrNotImplemented signals that the requested action/feature is not implemented on the system as configured. +type ErrNotImplemented interface { + NotImplemented() +} + +// ErrUnknown signals that the kind of error that occurred is not known. +type ErrUnknown interface { + Unknown() +} + +// ErrCancelled signals that the action was cancelled. +type ErrCancelled interface { + Cancelled() +} + +// ErrDeadline signals that the deadline was reached before the action completed. +type ErrDeadline interface { + DeadlineExceeded() +} + +// ErrDataLoss indicates that data was lost or there is data corruption. +type ErrDataLoss interface { + DataLoss() +} diff --git a/vendor/github.com/docker/docker/errdefs/doc.go b/vendor/github.com/docker/docker/errdefs/doc.go new file mode 100644 index 0000000000..c211f174fc --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/doc.go @@ -0,0 +1,8 @@ +// Package errdefs defines a set of error interfaces that packages should use for communicating classes of errors. +// Errors that cross the package boundary should implement one (and only one) of these interfaces. +// +// Packages should not reference these interfaces directly, only implement them. +// To check if a particular error implements one of these interfaces, there are helper +// functions provided (e.g. `Is`) which can be used rather than asserting the interfaces directly. +// If you must assert on these interfaces, be sure to check the causal chain (`err.Cause()`). +package errdefs // import "github.com/docker/docker/errdefs" diff --git a/vendor/github.com/docker/docker/errdefs/helpers.go b/vendor/github.com/docker/docker/errdefs/helpers.go new file mode 100644 index 0000000000..6169c2bc62 --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/helpers.go @@ -0,0 +1,240 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +import "context" + +type errNotFound struct{ error } + +func (errNotFound) NotFound() {} + +func (e errNotFound) Cause() error { + return e.error +} + +// NotFound is a helper to create an error of the class with the same name from any error type +func NotFound(err error) error { + if err == nil { + return nil + } + return errNotFound{err} +} + +type errInvalidParameter struct{ error } + +func (errInvalidParameter) InvalidParameter() {} + +func (e errInvalidParameter) Cause() error { + return e.error +} + +// InvalidParameter is a helper to create an error of the class with the same name from any error type +func InvalidParameter(err error) error { + if err == nil { + return nil + } + return errInvalidParameter{err} +} + +type errConflict struct{ error } + +func (errConflict) Conflict() {} + +func (e errConflict) Cause() error { + return e.error +} + +// Conflict is a helper to create an error of the class with the same name from any error type +func Conflict(err error) error { + if err == nil { + return nil + } + return errConflict{err} +} + +type errUnauthorized struct{ error } + +func (errUnauthorized) Unauthorized() {} + +func (e errUnauthorized) Cause() error { + return e.error +} + +// Unauthorized is a helper to create an error of the class with the same name from any error type +func Unauthorized(err error) error { + if err == nil { + return nil + } + return errUnauthorized{err} +} + +type errUnavailable struct{ error } + +func (errUnavailable) Unavailable() {} + +func (e errUnavailable) Cause() error { + return e.error +} + +// Unavailable is a helper to create an error of the class with the same name from any error type +func Unavailable(err error) error { + return errUnavailable{err} +} + +type errForbidden struct{ error } + +func (errForbidden) Forbidden() {} + +func (e errForbidden) Cause() error { + return e.error +} + +// Forbidden is a helper to create an error of the class with the same name from any error type +func Forbidden(err error) error { + if err == nil { + return nil + } + return errForbidden{err} +} + +type errSystem struct{ error } + +func (errSystem) System() {} + +func (e errSystem) Cause() error { + return e.error +} + +// System is a helper to create an error of the class with the same name from any error type +func System(err error) error { + if err == nil { + return nil + } + return errSystem{err} +} + +type errNotModified struct{ error } + +func (errNotModified) NotModified() {} + +func (e errNotModified) Cause() error { + return e.error +} + +// NotModified is a helper to create an error of the class with the same name from any error type +func NotModified(err error) error { + if err == nil { + return nil + } + return errNotModified{err} +} + +type errAlreadyExists struct{ error } + +func (errAlreadyExists) AlreadyExists() {} + +func (e errAlreadyExists) Cause() error { + return e.error +} + +// AlreadyExists is a helper to create an error of the class with the same name from any error type +func AlreadyExists(err error) error { + if err == nil { + return nil + } + return errAlreadyExists{err} +} + +type errNotImplemented struct{ error } + +func (errNotImplemented) NotImplemented() {} + +func (e errNotImplemented) Cause() error { + return e.error +} + +// NotImplemented is a helper to create an error of the class with the same name from any error type +func NotImplemented(err error) error { + if err == nil { + return nil + } + return errNotImplemented{err} +} + +type errUnknown struct{ error } + +func (errUnknown) Unknown() {} + +func (e errUnknown) Cause() error { + return e.error +} + +// Unknown is a helper to create an error of the class with the same name from any error type +func Unknown(err error) error { + if err == nil { + return nil + } + return errUnknown{err} +} + +type errCancelled struct{ error } + +func (errCancelled) Cancelled() {} + +func (e errCancelled) Cause() error { + return e.error +} + +// Cancelled is a helper to create an error of the class with the same name from any error type +func Cancelled(err error) error { + if err == nil { + return nil + } + return errCancelled{err} +} + +type errDeadline struct{ error } + +func (errDeadline) DeadlineExceeded() {} + +func (e errDeadline) Cause() error { + return e.error +} + +// Deadline is a helper to create an error of the class with the same name from any error type +func Deadline(err error) error { + if err == nil { + return nil + } + return errDeadline{err} +} + +type errDataLoss struct{ error } + +func (errDataLoss) DataLoss() {} + +func (e errDataLoss) Cause() error { + return e.error +} + +// DataLoss is a helper to create an error of the class with the same name from any error type +func DataLoss(err error) error { + if err == nil { + return nil + } + return errDataLoss{err} +} + +// FromContext returns the error class from the passed in context +func FromContext(ctx context.Context) error { + e := ctx.Err() + if e == nil { + return nil + } + + if e == context.Canceled { + return Cancelled(e) + } + if e == context.DeadlineExceeded { + return Deadline(e) + } + return Unknown(e) +} diff --git a/vendor/github.com/docker/docker/errdefs/is.go b/vendor/github.com/docker/docker/errdefs/is.go new file mode 100644 index 0000000000..cc26e4b750 --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/is.go @@ -0,0 +1,114 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +type causer interface { + Cause() error +} + +func getImplementer(err error) error { + switch e := err.(type) { + case + ErrNotFound, + ErrInvalidParameter, + ErrConflict, + ErrUnauthorized, + ErrUnavailable, + ErrForbidden, + ErrSystem, + ErrNotModified, + ErrAlreadyExists, + ErrNotImplemented, + ErrCancelled, + ErrDeadline, + ErrDataLoss, + ErrUnknown: + return err + case causer: + return getImplementer(e.Cause()) + default: + return err + } +} + +// IsNotFound returns if the passed in error is an ErrNotFound +func IsNotFound(err error) bool { + _, ok := getImplementer(err).(ErrNotFound) + return ok +} + +// IsInvalidParameter returns if the passed in error is an ErrInvalidParameter +func IsInvalidParameter(err error) bool { + _, ok := getImplementer(err).(ErrInvalidParameter) + return ok +} + +// IsConflict returns if the passed in error is an ErrConflict +func IsConflict(err error) bool { + _, ok := getImplementer(err).(ErrConflict) + return ok +} + +// IsUnauthorized returns if the the passed in error is an ErrUnauthorized +func IsUnauthorized(err error) bool { + _, ok := getImplementer(err).(ErrUnauthorized) + return ok +} + +// IsUnavailable returns if the passed in error is an ErrUnavailable +func IsUnavailable(err error) bool { + _, ok := getImplementer(err).(ErrUnavailable) + return ok +} + +// IsForbidden returns if the passed in error is an ErrForbidden +func IsForbidden(err error) bool { + _, ok := getImplementer(err).(ErrForbidden) + return ok +} + +// IsSystem returns if the passed in error is an ErrSystem +func IsSystem(err error) bool { + _, ok := getImplementer(err).(ErrSystem) + return ok +} + +// IsNotModified returns if the passed in error is a NotModified error +func IsNotModified(err error) bool { + _, ok := getImplementer(err).(ErrNotModified) + return ok +} + +// IsAlreadyExists returns if the passed in error is a AlreadyExists error +func IsAlreadyExists(err error) bool { + _, ok := getImplementer(err).(ErrAlreadyExists) + return ok +} + +// IsNotImplemented returns if the passed in error is an ErrNotImplemented +func IsNotImplemented(err error) bool { + _, ok := getImplementer(err).(ErrNotImplemented) + return ok +} + +// IsUnknown returns if the passed in error is an ErrUnknown +func IsUnknown(err error) bool { + _, ok := getImplementer(err).(ErrUnknown) + return ok +} + +// IsCancelled returns if the passed in error is an ErrCancelled +func IsCancelled(err error) bool { + _, ok := getImplementer(err).(ErrCancelled) + return ok +} + +// IsDeadline returns if the passed in error is an ErrDeadline +func IsDeadline(err error) bool { + _, ok := getImplementer(err).(ErrDeadline) + return ok +} + +// IsDataLoss returns if the passed in error is an ErrDataLoss +func IsDataLoss(err error) bool { + _, ok := getImplementer(err).(ErrDataLoss) + return ok +} diff --git a/vendor/github.com/docker/docker/image/fs.go b/vendor/github.com/docker/docker/image/fs.go new file mode 100644 index 0000000000..7080c8c015 --- /dev/null +++ b/vendor/github.com/docker/docker/image/fs.go @@ -0,0 +1,175 @@ +package image // import "github.com/docker/docker/image" + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/docker/docker/pkg/ioutils" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// DigestWalkFunc is function called by StoreBackend.Walk +type DigestWalkFunc func(id digest.Digest) error + +// StoreBackend provides interface for image.Store persistence +type StoreBackend interface { + Walk(f DigestWalkFunc) error + Get(id digest.Digest) ([]byte, error) + Set(data []byte) (digest.Digest, error) + Delete(id digest.Digest) error + SetMetadata(id digest.Digest, key string, data []byte) error + GetMetadata(id digest.Digest, key string) ([]byte, error) + DeleteMetadata(id digest.Digest, key string) error +} + +// fs implements StoreBackend using the filesystem. +type fs struct { + sync.RWMutex + root string +} + +const ( + contentDirName = "content" + metadataDirName = "metadata" +) + +// NewFSStoreBackend returns new filesystem based backend for image.Store +func NewFSStoreBackend(root string) (StoreBackend, error) { + return newFSStore(root) +} + +func newFSStore(root string) (*fs, error) { + s := &fs{ + root: root, + } + if err := os.MkdirAll(filepath.Join(root, contentDirName, string(digest.Canonical)), 0700); err != nil { + return nil, errors.Wrap(err, "failed to create storage backend") + } + if err := os.MkdirAll(filepath.Join(root, metadataDirName, string(digest.Canonical)), 0700); err != nil { + return nil, errors.Wrap(err, "failed to create storage backend") + } + return s, nil +} + +func (s *fs) contentFile(dgst digest.Digest) string { + return filepath.Join(s.root, contentDirName, string(dgst.Algorithm()), dgst.Hex()) +} + +func (s *fs) metadataDir(dgst digest.Digest) string { + return filepath.Join(s.root, metadataDirName, string(dgst.Algorithm()), dgst.Hex()) +} + +// Walk calls the supplied callback for each image ID in the storage backend. +func (s *fs) Walk(f DigestWalkFunc) error { + // Only Canonical digest (sha256) is currently supported + s.RLock() + dir, err := ioutil.ReadDir(filepath.Join(s.root, contentDirName, string(digest.Canonical))) + s.RUnlock() + if err != nil { + return err + } + for _, v := range dir { + dgst := digest.NewDigestFromHex(string(digest.Canonical), v.Name()) + if err := dgst.Validate(); err != nil { + logrus.Debugf("skipping invalid digest %s: %s", dgst, err) + continue + } + if err := f(dgst); err != nil { + return err + } + } + return nil +} + +// Get returns the content stored under a given digest. +func (s *fs) Get(dgst digest.Digest) ([]byte, error) { + s.RLock() + defer s.RUnlock() + + return s.get(dgst) +} + +func (s *fs) get(dgst digest.Digest) ([]byte, error) { + content, err := ioutil.ReadFile(s.contentFile(dgst)) + if err != nil { + return nil, errors.Wrapf(err, "failed to get digest %s", dgst) + } + + // todo: maybe optional + if digest.FromBytes(content) != dgst { + return nil, fmt.Errorf("failed to verify: %v", dgst) + } + + return content, nil +} + +// Set stores content by checksum. +func (s *fs) Set(data []byte) (digest.Digest, error) { + s.Lock() + defer s.Unlock() + + if len(data) == 0 { + return "", fmt.Errorf("invalid empty data") + } + + dgst := digest.FromBytes(data) + if err := ioutils.AtomicWriteFile(s.contentFile(dgst), data, 0600); err != nil { + return "", errors.Wrap(err, "failed to write digest data") + } + + return dgst, nil +} + +// Delete removes content and metadata files associated with the digest. +func (s *fs) Delete(dgst digest.Digest) error { + s.Lock() + defer s.Unlock() + + if err := os.RemoveAll(s.metadataDir(dgst)); err != nil { + return err + } + return os.Remove(s.contentFile(dgst)) +} + +// SetMetadata sets metadata for a given ID. It fails if there's no base file. +func (s *fs) SetMetadata(dgst digest.Digest, key string, data []byte) error { + s.Lock() + defer s.Unlock() + if _, err := s.get(dgst); err != nil { + return err + } + + baseDir := filepath.Join(s.metadataDir(dgst)) + if err := os.MkdirAll(baseDir, 0700); err != nil { + return err + } + return ioutils.AtomicWriteFile(filepath.Join(s.metadataDir(dgst), key), data, 0600) +} + +// GetMetadata returns metadata for a given digest. +func (s *fs) GetMetadata(dgst digest.Digest, key string) ([]byte, error) { + s.RLock() + defer s.RUnlock() + + if _, err := s.get(dgst); err != nil { + return nil, err + } + bytes, err := ioutil.ReadFile(filepath.Join(s.metadataDir(dgst), key)) + if err != nil { + return nil, errors.Wrap(err, "failed to read metadata") + } + return bytes, nil +} + +// DeleteMetadata removes the metadata associated with a digest. +func (s *fs) DeleteMetadata(dgst digest.Digest, key string) error { + s.Lock() + defer s.Unlock() + + return os.RemoveAll(filepath.Join(s.metadataDir(dgst), key)) +} diff --git a/vendor/github.com/docker/docker/image/image.go b/vendor/github.com/docker/docker/image/image.go new file mode 100644 index 0000000000..7e0646f072 --- /dev/null +++ b/vendor/github.com/docker/docker/image/image.go @@ -0,0 +1,232 @@ +package image // import "github.com/docker/docker/image" + +import ( + "encoding/json" + "errors" + "io" + "runtime" + "strings" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/dockerversion" + "github.com/docker/docker/layer" + "github.com/opencontainers/go-digest" +) + +// ID is the content-addressable ID of an image. +type ID digest.Digest + +func (id ID) String() string { + return id.Digest().String() +} + +// Digest converts ID into a digest +func (id ID) Digest() digest.Digest { + return digest.Digest(id) +} + +// IDFromDigest creates an ID from a digest +func IDFromDigest(digest digest.Digest) ID { + return ID(digest) +} + +// V1Image stores the V1 image configuration. +type V1Image struct { + // ID is a unique 64 character identifier of the image + ID string `json:"id,omitempty"` + // Parent is the ID of the parent image + Parent string `json:"parent,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Container is the id of the container used to commit + Container string `json:"container,omitempty"` + // ContainerConfig is the configuration of the container that is committed into the image + ContainerConfig container.Config `json:"container_config,omitempty"` + // DockerVersion specifies the version of Docker that was used to build the image + DockerVersion string `json:"docker_version,omitempty"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // Config is the configuration of the container received from the client + Config *container.Config `json:"config,omitempty"` + // Architecture is the hardware that the image is built and runs on + Architecture string `json:"architecture,omitempty"` + // OS is the operating system used to build and run the image + OS string `json:"os,omitempty"` + // Size is the total size of the image including all layers it is composed of + Size int64 `json:",omitempty"` +} + +// Image stores the image configuration +type Image struct { + V1Image + Parent ID `json:"parent,omitempty"` + RootFS *RootFS `json:"rootfs,omitempty"` + History []History `json:"history,omitempty"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` + + // rawJSON caches the immutable JSON associated with this image. + rawJSON []byte + + // computedID is the ID computed from the hash of the image config. + // Not to be confused with the legacy V1 ID in V1Image. + computedID ID +} + +// RawJSON returns the immutable JSON associated with the image. +func (img *Image) RawJSON() []byte { + return img.rawJSON +} + +// ID returns the image's content-addressable ID. +func (img *Image) ID() ID { + return img.computedID +} + +// ImageID stringifies ID. +func (img *Image) ImageID() string { + return img.ID().String() +} + +// RunConfig returns the image's container config. +func (img *Image) RunConfig() *container.Config { + return img.Config +} + +// BaseImgArch returns the image's architecture. If not populated, defaults to the host runtime arch. +func (img *Image) BaseImgArch() string { + arch := img.Architecture + if arch == "" { + arch = runtime.GOARCH + } + return arch +} + +// OperatingSystem returns the image's operating system. If not populated, defaults to the host runtime OS. +func (img *Image) OperatingSystem() string { + os := img.OS + if os == "" { + os = runtime.GOOS + } + return os +} + +// MarshalJSON serializes the image to JSON. It sorts the top-level keys so +// that JSON that's been manipulated by a push/pull cycle with a legacy +// registry won't end up with a different key order. +func (img *Image) MarshalJSON() ([]byte, error) { + type MarshalImage Image + + pass1, err := json.Marshal(MarshalImage(*img)) + if err != nil { + return nil, err + } + + var c map[string]*json.RawMessage + if err := json.Unmarshal(pass1, &c); err != nil { + return nil, err + } + return json.Marshal(c) +} + +// ChildConfig is the configuration to apply to an Image to create a new +// Child image. Other properties of the image are copied from the parent. +type ChildConfig struct { + ContainerID string + Author string + Comment string + DiffID layer.DiffID + ContainerConfig *container.Config + Config *container.Config +} + +// NewChildImage creates a new Image as a child of this image. +func NewChildImage(img *Image, child ChildConfig, platform string) *Image { + isEmptyLayer := layer.IsEmpty(child.DiffID) + var rootFS *RootFS + if img.RootFS != nil { + rootFS = img.RootFS.Clone() + } else { + rootFS = NewRootFS() + } + + if !isEmptyLayer { + rootFS.Append(child.DiffID) + } + imgHistory := NewHistory( + child.Author, + child.Comment, + strings.Join(child.ContainerConfig.Cmd, " "), + isEmptyLayer) + + return &Image{ + V1Image: V1Image{ + DockerVersion: dockerversion.Version, + Config: child.Config, + Architecture: img.BaseImgArch(), + OS: platform, + Container: child.ContainerID, + ContainerConfig: *child.ContainerConfig, + Author: child.Author, + Created: imgHistory.Created, + }, + RootFS: rootFS, + History: append(img.History, imgHistory), + OSFeatures: img.OSFeatures, + OSVersion: img.OSVersion, + } +} + +// History stores build commands that were used to create an image +type History struct { + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // CreatedBy keeps the Dockerfile command used while building the image + CreatedBy string `json:"created_by,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // EmptyLayer is set to true if this history item did not generate a + // layer. Otherwise, the history item is associated with the next + // layer in the RootFS section. + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// NewHistory creates a new history struct from arguments, and sets the created +// time to the current time in UTC +func NewHistory(author, comment, createdBy string, isEmptyLayer bool) History { + return History{ + Author: author, + Created: time.Now().UTC(), + CreatedBy: createdBy, + Comment: comment, + EmptyLayer: isEmptyLayer, + } +} + +// Exporter provides interface for loading and saving images +type Exporter interface { + Load(io.ReadCloser, io.Writer, bool) error + // TODO: Load(net.Context, io.ReadCloser, <- chan StatusMessage) error + Save([]string, io.Writer) error +} + +// NewFromJSON creates an Image configuration from json. +func NewFromJSON(src []byte) (*Image, error) { + img := &Image{} + + if err := json.Unmarshal(src, img); err != nil { + return nil, err + } + if img.RootFS == nil { + return nil, errors.New("invalid image JSON, no RootFS key") + } + + img.rawJSON = src + + return img, nil +} diff --git a/vendor/github.com/docker/docker/image/rootfs.go b/vendor/github.com/docker/docker/image/rootfs.go new file mode 100644 index 0000000000..84843e10c6 --- /dev/null +++ b/vendor/github.com/docker/docker/image/rootfs.go @@ -0,0 +1,52 @@ +package image // import "github.com/docker/docker/image" + +import ( + "runtime" + + "github.com/docker/docker/layer" + "github.com/sirupsen/logrus" +) + +// TypeLayers is used for RootFS.Type for filesystems organized into layers. +const TypeLayers = "layers" + +// typeLayersWithBase is an older format used by Windows up to v1.12. We +// explicitly handle this as an error case to ensure that a daemon which still +// has an older image like this on disk can still start, even though the +// image itself is not usable. See https://github.com/docker/docker/pull/25806. +const typeLayersWithBase = "layers+base" + +// RootFS describes images root filesystem +// This is currently a placeholder that only supports layers. In the future +// this can be made into an interface that supports different implementations. +type RootFS struct { + Type string `json:"type"` + DiffIDs []layer.DiffID `json:"diff_ids,omitempty"` +} + +// NewRootFS returns empty RootFS struct +func NewRootFS() *RootFS { + return &RootFS{Type: TypeLayers} +} + +// Append appends a new diffID to rootfs +func (r *RootFS) Append(id layer.DiffID) { + r.DiffIDs = append(r.DiffIDs, id) +} + +// Clone returns a copy of the RootFS +func (r *RootFS) Clone() *RootFS { + newRoot := NewRootFS() + newRoot.Type = r.Type + newRoot.DiffIDs = append(r.DiffIDs) + return newRoot +} + +// ChainID returns the ChainID for the top layer in RootFS. +func (r *RootFS) ChainID() layer.ChainID { + if runtime.GOOS == "windows" && r.Type == typeLayersWithBase { + logrus.Warnf("Layer type is unsupported on this platform. DiffIDs: '%v'", r.DiffIDs) + return "" + } + return layer.CreateChainID(r.DiffIDs) +} diff --git a/vendor/github.com/docker/docker/image/store.go b/vendor/github.com/docker/docker/image/store.go new file mode 100644 index 0000000000..9fd7d7dcf3 --- /dev/null +++ b/vendor/github.com/docker/docker/image/store.go @@ -0,0 +1,345 @@ +package image // import "github.com/docker/docker/image" + +import ( + "encoding/json" + "fmt" + "sync" + "time" + + "github.com/docker/distribution/digestset" + "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Store is an interface for creating and accessing images +type Store interface { + Create(config []byte) (ID, error) + Get(id ID) (*Image, error) + Delete(id ID) ([]layer.Metadata, error) + Search(partialID string) (ID, error) + SetParent(id ID, parent ID) error + GetParent(id ID) (ID, error) + SetLastUpdated(id ID) error + GetLastUpdated(id ID) (time.Time, error) + Children(id ID) []ID + Map() map[ID]*Image + Heads() map[ID]*Image + Len() int +} + +// LayerGetReleaser is a minimal interface for getting and releasing images. +type LayerGetReleaser interface { + Get(layer.ChainID) (layer.Layer, error) + Release(layer.Layer) ([]layer.Metadata, error) +} + +type imageMeta struct { + layer layer.Layer + children map[ID]struct{} +} + +type store struct { + sync.RWMutex + lss map[string]LayerGetReleaser + images map[ID]*imageMeta + fs StoreBackend + digestSet *digestset.Set +} + +// NewImageStore returns new store object for given set of layer stores +func NewImageStore(fs StoreBackend, lss map[string]LayerGetReleaser) (Store, error) { + is := &store{ + lss: lss, + images: make(map[ID]*imageMeta), + fs: fs, + digestSet: digestset.NewSet(), + } + + // load all current images and retain layers + if err := is.restore(); err != nil { + return nil, err + } + + return is, nil +} + +func (is *store) restore() error { + err := is.fs.Walk(func(dgst digest.Digest) error { + img, err := is.Get(IDFromDigest(dgst)) + if err != nil { + logrus.Errorf("invalid image %v, %v", dgst, err) + return nil + } + var l layer.Layer + if chainID := img.RootFS.ChainID(); chainID != "" { + if !system.IsOSSupported(img.OperatingSystem()) { + return system.ErrNotSupportedOperatingSystem + } + l, err = is.lss[img.OperatingSystem()].Get(chainID) + if err != nil { + if err == layer.ErrLayerDoesNotExist { + logrus.Errorf("layer does not exist, not restoring image %v, %v, %s", dgst, chainID, img.OperatingSystem()) + return nil + } + return err + } + } + if err := is.digestSet.Add(dgst); err != nil { + return err + } + + imageMeta := &imageMeta{ + layer: l, + children: make(map[ID]struct{}), + } + + is.images[IDFromDigest(dgst)] = imageMeta + + return nil + }) + if err != nil { + return err + } + + // Second pass to fill in children maps + for id := range is.images { + if parent, err := is.GetParent(id); err == nil { + if parentMeta := is.images[parent]; parentMeta != nil { + parentMeta.children[id] = struct{}{} + } + } + } + + return nil +} + +func (is *store) Create(config []byte) (ID, error) { + var img Image + err := json.Unmarshal(config, &img) + if err != nil { + return "", err + } + + // Must reject any config that references diffIDs from the history + // which aren't among the rootfs layers. + rootFSLayers := make(map[layer.DiffID]struct{}) + for _, diffID := range img.RootFS.DiffIDs { + rootFSLayers[diffID] = struct{}{} + } + + layerCounter := 0 + for _, h := range img.History { + if !h.EmptyLayer { + layerCounter++ + } + } + if layerCounter > len(img.RootFS.DiffIDs) { + return "", errors.New("too many non-empty layers in History section") + } + + dgst, err := is.fs.Set(config) + if err != nil { + return "", err + } + imageID := IDFromDigest(dgst) + + is.Lock() + defer is.Unlock() + + if _, exists := is.images[imageID]; exists { + return imageID, nil + } + + layerID := img.RootFS.ChainID() + + var l layer.Layer + if layerID != "" { + if !system.IsOSSupported(img.OperatingSystem()) { + return "", system.ErrNotSupportedOperatingSystem + } + l, err = is.lss[img.OperatingSystem()].Get(layerID) + if err != nil { + return "", errors.Wrapf(err, "failed to get layer %s", layerID) + } + } + + imageMeta := &imageMeta{ + layer: l, + children: make(map[ID]struct{}), + } + + is.images[imageID] = imageMeta + if err := is.digestSet.Add(imageID.Digest()); err != nil { + delete(is.images, imageID) + return "", err + } + + return imageID, nil +} + +type imageNotFoundError string + +func (e imageNotFoundError) Error() string { + return "No such image: " + string(e) +} + +func (imageNotFoundError) NotFound() {} + +func (is *store) Search(term string) (ID, error) { + dgst, err := is.digestSet.Lookup(term) + if err != nil { + if err == digestset.ErrDigestNotFound { + err = imageNotFoundError(term) + } + return "", errors.WithStack(err) + } + return IDFromDigest(dgst), nil +} + +func (is *store) Get(id ID) (*Image, error) { + // todo: Check if image is in images + // todo: Detect manual insertions and start using them + config, err := is.fs.Get(id.Digest()) + if err != nil { + return nil, err + } + + img, err := NewFromJSON(config) + if err != nil { + return nil, err + } + img.computedID = id + + img.Parent, err = is.GetParent(id) + if err != nil { + img.Parent = "" + } + + return img, nil +} + +func (is *store) Delete(id ID) ([]layer.Metadata, error) { + is.Lock() + defer is.Unlock() + + imageMeta := is.images[id] + if imageMeta == nil { + return nil, fmt.Errorf("unrecognized image ID %s", id.String()) + } + img, err := is.Get(id) + if err != nil { + return nil, fmt.Errorf("unrecognized image %s, %v", id.String(), err) + } + if !system.IsOSSupported(img.OperatingSystem()) { + return nil, fmt.Errorf("unsupported image operating system %q", img.OperatingSystem()) + } + for id := range imageMeta.children { + is.fs.DeleteMetadata(id.Digest(), "parent") + } + if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil { + delete(is.images[parent].children, id) + } + + if err := is.digestSet.Remove(id.Digest()); err != nil { + logrus.Errorf("error removing %s from digest set: %q", id, err) + } + delete(is.images, id) + is.fs.Delete(id.Digest()) + + if imageMeta.layer != nil { + return is.lss[img.OperatingSystem()].Release(imageMeta.layer) + } + return nil, nil +} + +func (is *store) SetParent(id, parent ID) error { + is.Lock() + defer is.Unlock() + parentMeta := is.images[parent] + if parentMeta == nil { + return fmt.Errorf("unknown parent image ID %s", parent.String()) + } + if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil { + delete(is.images[parent].children, id) + } + parentMeta.children[id] = struct{}{} + return is.fs.SetMetadata(id.Digest(), "parent", []byte(parent)) +} + +func (is *store) GetParent(id ID) (ID, error) { + d, err := is.fs.GetMetadata(id.Digest(), "parent") + if err != nil { + return "", err + } + return ID(d), nil // todo: validate? +} + +// SetLastUpdated time for the image ID to the current time +func (is *store) SetLastUpdated(id ID) error { + lastUpdated := []byte(time.Now().Format(time.RFC3339Nano)) + return is.fs.SetMetadata(id.Digest(), "lastUpdated", lastUpdated) +} + +// GetLastUpdated time for the image ID +func (is *store) GetLastUpdated(id ID) (time.Time, error) { + bytes, err := is.fs.GetMetadata(id.Digest(), "lastUpdated") + if err != nil || len(bytes) == 0 { + // No lastUpdated time + return time.Time{}, nil + } + return time.Parse(time.RFC3339Nano, string(bytes)) +} + +func (is *store) Children(id ID) []ID { + is.RLock() + defer is.RUnlock() + + return is.children(id) +} + +func (is *store) children(id ID) []ID { + var ids []ID + if is.images[id] != nil { + for id := range is.images[id].children { + ids = append(ids, id) + } + } + return ids +} + +func (is *store) Heads() map[ID]*Image { + return is.imagesMap(false) +} + +func (is *store) Map() map[ID]*Image { + return is.imagesMap(true) +} + +func (is *store) imagesMap(all bool) map[ID]*Image { + is.RLock() + defer is.RUnlock() + + images := make(map[ID]*Image) + + for id := range is.images { + if !all && len(is.children(id)) > 0 { + continue + } + img, err := is.Get(id) + if err != nil { + logrus.Errorf("invalid image access: %q, error: %q", id, err) + continue + } + images[id] = img + } + return images +} + +func (is *store) Len() int { + is.RLock() + defer is.RUnlock() + return len(is.images) +} diff --git a/vendor/github.com/docker/docker/layer/empty.go b/vendor/github.com/docker/docker/layer/empty.go new file mode 100644 index 0000000000..c81c702140 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/empty.go @@ -0,0 +1,61 @@ +package layer // import "github.com/docker/docker/layer" + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" +) + +// DigestSHA256EmptyTar is the canonical sha256 digest of empty tar file - +// (1024 NULL bytes) +const DigestSHA256EmptyTar = DiffID("sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef") + +type emptyLayer struct{} + +// EmptyLayer is a layer that corresponds to empty tar. +var EmptyLayer = &emptyLayer{} + +func (el *emptyLayer) TarStream() (io.ReadCloser, error) { + buf := new(bytes.Buffer) + tarWriter := tar.NewWriter(buf) + tarWriter.Close() + return ioutil.NopCloser(buf), nil +} + +func (el *emptyLayer) TarStreamFrom(p ChainID) (io.ReadCloser, error) { + if p == "" { + return el.TarStream() + } + return nil, fmt.Errorf("can't get parent tar stream of an empty layer") +} + +func (el *emptyLayer) ChainID() ChainID { + return ChainID(DigestSHA256EmptyTar) +} + +func (el *emptyLayer) DiffID() DiffID { + return DigestSHA256EmptyTar +} + +func (el *emptyLayer) Parent() Layer { + return nil +} + +func (el *emptyLayer) Size() (size int64, err error) { + return 0, nil +} + +func (el *emptyLayer) DiffSize() (size int64, err error) { + return 0, nil +} + +func (el *emptyLayer) Metadata() (map[string]string, error) { + return make(map[string]string), nil +} + +// IsEmpty returns true if the layer is an EmptyLayer +func IsEmpty(diffID DiffID) bool { + return diffID == DigestSHA256EmptyTar +} diff --git a/vendor/github.com/docker/docker/layer/filestore.go b/vendor/github.com/docker/docker/layer/filestore.go new file mode 100644 index 0000000000..b1cbb80166 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/filestore.go @@ -0,0 +1,355 @@ +package layer // import "github.com/docker/docker/layer" + +import ( + "compress/gzip" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + + "github.com/docker/distribution" + "github.com/docker/docker/pkg/ioutils" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +var ( + stringIDRegexp = regexp.MustCompile(`^[a-f0-9]{64}(-init)?$`) + supportedAlgorithms = []digest.Algorithm{ + digest.SHA256, + // digest.SHA384, // Currently not used + // digest.SHA512, // Currently not used + } +) + +type fileMetadataStore struct { + root string +} + +type fileMetadataTransaction struct { + store *fileMetadataStore + ws *ioutils.AtomicWriteSet +} + +// newFSMetadataStore returns an instance of a metadata store +// which is backed by files on disk using the provided root +// as the root of metadata files. +func newFSMetadataStore(root string) (*fileMetadataStore, error) { + if err := os.MkdirAll(root, 0700); err != nil { + return nil, err + } + return &fileMetadataStore{ + root: root, + }, nil +} + +func (fms *fileMetadataStore) getLayerDirectory(layer ChainID) string { + dgst := digest.Digest(layer) + return filepath.Join(fms.root, string(dgst.Algorithm()), dgst.Hex()) +} + +func (fms *fileMetadataStore) getLayerFilename(layer ChainID, filename string) string { + return filepath.Join(fms.getLayerDirectory(layer), filename) +} + +func (fms *fileMetadataStore) getMountDirectory(mount string) string { + return filepath.Join(fms.root, "mounts", mount) +} + +func (fms *fileMetadataStore) getMountFilename(mount, filename string) string { + return filepath.Join(fms.getMountDirectory(mount), filename) +} + +func (fms *fileMetadataStore) StartTransaction() (*fileMetadataTransaction, error) { + tmpDir := filepath.Join(fms.root, "tmp") + if err := os.MkdirAll(tmpDir, 0755); err != nil { + return nil, err + } + ws, err := ioutils.NewAtomicWriteSet(tmpDir) + if err != nil { + return nil, err + } + + return &fileMetadataTransaction{ + store: fms, + ws: ws, + }, nil +} + +func (fm *fileMetadataTransaction) SetSize(size int64) error { + content := fmt.Sprintf("%d", size) + return fm.ws.WriteFile("size", []byte(content), 0644) +} + +func (fm *fileMetadataTransaction) SetParent(parent ChainID) error { + return fm.ws.WriteFile("parent", []byte(digest.Digest(parent).String()), 0644) +} + +func (fm *fileMetadataTransaction) SetDiffID(diff DiffID) error { + return fm.ws.WriteFile("diff", []byte(digest.Digest(diff).String()), 0644) +} + +func (fm *fileMetadataTransaction) SetCacheID(cacheID string) error { + return fm.ws.WriteFile("cache-id", []byte(cacheID), 0644) +} + +func (fm *fileMetadataTransaction) SetDescriptor(ref distribution.Descriptor) error { + jsonRef, err := json.Marshal(ref) + if err != nil { + return err + } + return fm.ws.WriteFile("descriptor.json", jsonRef, 0644) +} + +func (fm *fileMetadataTransaction) TarSplitWriter(compressInput bool) (io.WriteCloser, error) { + f, err := fm.ws.FileWriter("tar-split.json.gz", os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return nil, err + } + var wc io.WriteCloser + if compressInput { + wc = gzip.NewWriter(f) + } else { + wc = f + } + + return ioutils.NewWriteCloserWrapper(wc, func() error { + wc.Close() + return f.Close() + }), nil +} + +func (fm *fileMetadataTransaction) Commit(layer ChainID) error { + finalDir := fm.store.getLayerDirectory(layer) + if err := os.MkdirAll(filepath.Dir(finalDir), 0755); err != nil { + return err + } + + return fm.ws.Commit(finalDir) +} + +func (fm *fileMetadataTransaction) Cancel() error { + return fm.ws.Cancel() +} + +func (fm *fileMetadataTransaction) String() string { + return fm.ws.String() +} + +func (fms *fileMetadataStore) GetSize(layer ChainID) (int64, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "size")) + if err != nil { + return 0, err + } + + size, err := strconv.ParseInt(string(content), 10, 64) + if err != nil { + return 0, err + } + + return size, nil +} + +func (fms *fileMetadataStore) GetParent(layer ChainID) (ChainID, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "parent")) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + + dgst, err := digest.Parse(strings.TrimSpace(string(content))) + if err != nil { + return "", err + } + + return ChainID(dgst), nil +} + +func (fms *fileMetadataStore) GetDiffID(layer ChainID) (DiffID, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "diff")) + if err != nil { + return "", err + } + + dgst, err := digest.Parse(strings.TrimSpace(string(content))) + if err != nil { + return "", err + } + + return DiffID(dgst), nil +} + +func (fms *fileMetadataStore) GetCacheID(layer ChainID) (string, error) { + contentBytes, err := ioutil.ReadFile(fms.getLayerFilename(layer, "cache-id")) + if err != nil { + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if !stringIDRegexp.MatchString(content) { + return "", errors.New("invalid cache id value") + } + + return content, nil +} + +func (fms *fileMetadataStore) GetDescriptor(layer ChainID) (distribution.Descriptor, error) { + content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "descriptor.json")) + if err != nil { + if os.IsNotExist(err) { + // only return empty descriptor to represent what is stored + return distribution.Descriptor{}, nil + } + return distribution.Descriptor{}, err + } + + var ref distribution.Descriptor + err = json.Unmarshal(content, &ref) + if err != nil { + return distribution.Descriptor{}, err + } + return ref, err +} + +func (fms *fileMetadataStore) TarSplitReader(layer ChainID) (io.ReadCloser, error) { + fz, err := os.Open(fms.getLayerFilename(layer, "tar-split.json.gz")) + if err != nil { + return nil, err + } + f, err := gzip.NewReader(fz) + if err != nil { + fz.Close() + return nil, err + } + + return ioutils.NewReadCloserWrapper(f, func() error { + f.Close() + return fz.Close() + }), nil +} + +func (fms *fileMetadataStore) SetMountID(mount string, mountID string) error { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + return err + } + return ioutil.WriteFile(fms.getMountFilename(mount, "mount-id"), []byte(mountID), 0644) +} + +func (fms *fileMetadataStore) SetInitID(mount string, init string) error { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + return err + } + return ioutil.WriteFile(fms.getMountFilename(mount, "init-id"), []byte(init), 0644) +} + +func (fms *fileMetadataStore) SetMountParent(mount string, parent ChainID) error { + if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { + return err + } + return ioutil.WriteFile(fms.getMountFilename(mount, "parent"), []byte(digest.Digest(parent).String()), 0644) +} + +func (fms *fileMetadataStore) GetMountID(mount string) (string, error) { + contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "mount-id")) + if err != nil { + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if !stringIDRegexp.MatchString(content) { + return "", errors.New("invalid mount id value") + } + + return content, nil +} + +func (fms *fileMetadataStore) GetInitID(mount string) (string, error) { + contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "init-id")) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if !stringIDRegexp.MatchString(content) { + return "", errors.New("invalid init id value") + } + + return content, nil +} + +func (fms *fileMetadataStore) GetMountParent(mount string) (ChainID, error) { + content, err := ioutil.ReadFile(fms.getMountFilename(mount, "parent")) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + + dgst, err := digest.Parse(strings.TrimSpace(string(content))) + if err != nil { + return "", err + } + + return ChainID(dgst), nil +} + +func (fms *fileMetadataStore) List() ([]ChainID, []string, error) { + var ids []ChainID + for _, algorithm := range supportedAlgorithms { + fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, string(algorithm))) + if err != nil { + if os.IsNotExist(err) { + continue + } + return nil, nil, err + } + + for _, fi := range fileInfos { + if fi.IsDir() && fi.Name() != "mounts" { + dgst := digest.NewDigestFromHex(string(algorithm), fi.Name()) + if err := dgst.Validate(); err != nil { + logrus.Debugf("Ignoring invalid digest %s:%s", algorithm, fi.Name()) + } else { + ids = append(ids, ChainID(dgst)) + } + } + } + } + + fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, "mounts")) + if err != nil { + if os.IsNotExist(err) { + return ids, []string{}, nil + } + return nil, nil, err + } + + var mounts []string + for _, fi := range fileInfos { + if fi.IsDir() { + mounts = append(mounts, fi.Name()) + } + } + + return ids, mounts, nil +} + +func (fms *fileMetadataStore) Remove(layer ChainID) error { + return os.RemoveAll(fms.getLayerDirectory(layer)) +} + +func (fms *fileMetadataStore) RemoveMount(mount string) error { + return os.RemoveAll(fms.getMountDirectory(mount)) +} diff --git a/vendor/github.com/docker/docker/layer/filestore_unix.go b/vendor/github.com/docker/docker/layer/filestore_unix.go new file mode 100644 index 0000000000..68e7f90779 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/filestore_unix.go @@ -0,0 +1,15 @@ +// +build !windows + +package layer // import "github.com/docker/docker/layer" + +import "runtime" + +// setOS writes the "os" file to the layer filestore +func (fm *fileMetadataTransaction) setOS(os string) error { + return nil +} + +// getOS reads the "os" file from the layer filestore +func (fms *fileMetadataStore) getOS(layer ChainID) (string, error) { + return runtime.GOOS, nil +} diff --git a/vendor/github.com/docker/docker/layer/filestore_windows.go b/vendor/github.com/docker/docker/layer/filestore_windows.go new file mode 100644 index 0000000000..cecad426c8 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/filestore_windows.go @@ -0,0 +1,35 @@ +package layer // import "github.com/docker/docker/layer" + +import ( + "fmt" + "io/ioutil" + "os" + "strings" +) + +// setOS writes the "os" file to the layer filestore +func (fm *fileMetadataTransaction) setOS(os string) error { + if os == "" { + return nil + } + return fm.ws.WriteFile("os", []byte(os), 0644) +} + +// getOS reads the "os" file from the layer filestore +func (fms *fileMetadataStore) getOS(layer ChainID) (string, error) { + contentBytes, err := ioutil.ReadFile(fms.getLayerFilename(layer, "os")) + if err != nil { + // For backwards compatibility, the os file may not exist. Default to "windows" if missing. + if os.IsNotExist(err) { + return "windows", nil + } + return "", err + } + content := strings.TrimSpace(string(contentBytes)) + + if content != "windows" && content != "linux" { + return "", fmt.Errorf("invalid operating system value: %s", content) + } + + return content, nil +} diff --git a/vendor/github.com/docker/docker/layer/layer.go b/vendor/github.com/docker/docker/layer/layer.go new file mode 100644 index 0000000000..d0c7fa8608 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/layer.go @@ -0,0 +1,237 @@ +// Package layer is package for managing read-only +// and read-write mounts on the union file system +// driver. Read-only mounts are referenced using a +// content hash and are protected from mutation in +// the exposed interface. The tar format is used +// to create read-only layers and export both +// read-only and writable layers. The exported +// tar data for a read-only layer should match +// the tar used to create the layer. +package layer // import "github.com/docker/docker/layer" + +import ( + "errors" + "io" + + "github.com/docker/distribution" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/containerfs" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +var ( + // ErrLayerDoesNotExist is used when an operation is + // attempted on a layer which does not exist. + ErrLayerDoesNotExist = errors.New("layer does not exist") + + // ErrLayerNotRetained is used when a release is + // attempted on a layer which is not retained. + ErrLayerNotRetained = errors.New("layer not retained") + + // ErrMountDoesNotExist is used when an operation is + // attempted on a mount layer which does not exist. + ErrMountDoesNotExist = errors.New("mount does not exist") + + // ErrMountNameConflict is used when a mount is attempted + // to be created but there is already a mount with the name + // used for creation. + ErrMountNameConflict = errors.New("mount already exists with name") + + // ErrActiveMount is used when an operation on a + // mount is attempted but the layer is still + // mounted and the operation cannot be performed. + ErrActiveMount = errors.New("mount still active") + + // ErrNotMounted is used when requesting an active + // mount but the layer is not mounted. + ErrNotMounted = errors.New("not mounted") + + // ErrMaxDepthExceeded is used when a layer is attempted + // to be created which would result in a layer depth + // greater than the 125 max. + ErrMaxDepthExceeded = errors.New("max depth exceeded") + + // ErrNotSupported is used when the action is not supported + // on the current host operating system. + ErrNotSupported = errors.New("not support on this host operating system") +) + +// ChainID is the content-addressable ID of a layer. +type ChainID digest.Digest + +// String returns a string rendition of a layer ID +func (id ChainID) String() string { + return string(id) +} + +// DiffID is the hash of an individual layer tar. +type DiffID digest.Digest + +// String returns a string rendition of a layer DiffID +func (diffID DiffID) String() string { + return string(diffID) +} + +// TarStreamer represents an object which may +// have its contents exported as a tar stream. +type TarStreamer interface { + // TarStream returns a tar archive stream + // for the contents of a layer. + TarStream() (io.ReadCloser, error) +} + +// Layer represents a read-only layer +type Layer interface { + TarStreamer + + // TarStreamFrom returns a tar archive stream for all the layer chain with + // arbitrary depth. + TarStreamFrom(ChainID) (io.ReadCloser, error) + + // ChainID returns the content hash of the entire layer chain. The hash + // chain is made up of DiffID of top layer and all of its parents. + ChainID() ChainID + + // DiffID returns the content hash of the layer + // tar stream used to create this layer. + DiffID() DiffID + + // Parent returns the next layer in the layer chain. + Parent() Layer + + // Size returns the size of the entire layer chain. The size + // is calculated from the total size of all files in the layers. + Size() (int64, error) + + // DiffSize returns the size difference of the top layer + // from parent layer. + DiffSize() (int64, error) + + // Metadata returns the low level storage metadata associated + // with layer. + Metadata() (map[string]string, error) +} + +// RWLayer represents a layer which is +// read and writable +type RWLayer interface { + TarStreamer + + // Name of mounted layer + Name() string + + // Parent returns the layer which the writable + // layer was created from. + Parent() Layer + + // Mount mounts the RWLayer and returns the filesystem path + // the to the writable layer. + Mount(mountLabel string) (containerfs.ContainerFS, error) + + // Unmount unmounts the RWLayer. This should be called + // for every mount. If there are multiple mount calls + // this operation will only decrement the internal mount counter. + Unmount() error + + // Size represents the size of the writable layer + // as calculated by the total size of the files + // changed in the mutable layer. + Size() (int64, error) + + // Changes returns the set of changes for the mutable layer + // from the base layer. + Changes() ([]archive.Change, error) + + // Metadata returns the low level metadata for the mutable layer + Metadata() (map[string]string, error) +} + +// Metadata holds information about a +// read-only layer +type Metadata struct { + // ChainID is the content hash of the layer + ChainID ChainID + + // DiffID is the hash of the tar data used to + // create the layer + DiffID DiffID + + // Size is the size of the layer and all parents + Size int64 + + // DiffSize is the size of the top layer + DiffSize int64 +} + +// MountInit is a function to initialize a +// writable mount. Changes made here will +// not be included in the Tar stream of the +// RWLayer. +type MountInit func(root containerfs.ContainerFS) error + +// CreateRWLayerOpts contains optional arguments to be passed to CreateRWLayer +type CreateRWLayerOpts struct { + MountLabel string + InitFunc MountInit + StorageOpt map[string]string +} + +// Store represents a backend for managing both +// read-only and read-write layers. +type Store interface { + Register(io.Reader, ChainID) (Layer, error) + Get(ChainID) (Layer, error) + Map() map[ChainID]Layer + Release(Layer) ([]Metadata, error) + + CreateRWLayer(id string, parent ChainID, opts *CreateRWLayerOpts) (RWLayer, error) + GetRWLayer(id string) (RWLayer, error) + GetMountID(id string) (string, error) + ReleaseRWLayer(RWLayer) ([]Metadata, error) + + Cleanup() error + DriverStatus() [][2]string + DriverName() string +} + +// DescribableStore represents a layer store capable of storing +// descriptors for layers. +type DescribableStore interface { + RegisterWithDescriptor(io.Reader, ChainID, distribution.Descriptor) (Layer, error) +} + +// CreateChainID returns ID for a layerDigest slice +func CreateChainID(dgsts []DiffID) ChainID { + return createChainIDFromParent("", dgsts...) +} + +func createChainIDFromParent(parent ChainID, dgsts ...DiffID) ChainID { + if len(dgsts) == 0 { + return parent + } + if parent == "" { + return createChainIDFromParent(ChainID(dgsts[0]), dgsts[1:]...) + } + // H = "H(n-1) SHA256(n)" + dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0]))) + return createChainIDFromParent(ChainID(dgst), dgsts[1:]...) +} + +// ReleaseAndLog releases the provided layer from the given layer +// store, logging any error and release metadata +func ReleaseAndLog(ls Store, l Layer) { + metadata, err := ls.Release(l) + if err != nil { + logrus.Errorf("Error releasing layer %s: %v", l.ChainID(), err) + } + LogReleaseMetadata(metadata) +} + +// LogReleaseMetadata logs a metadata array, uses this to +// ensure consistent logging for release metadata +func LogReleaseMetadata(metadatas []Metadata) { + for _, metadata := range metadatas { + logrus.Infof("Layer %s cleaned up", metadata.ChainID) + } +} diff --git a/vendor/github.com/docker/docker/layer/layer_store.go b/vendor/github.com/docker/docker/layer/layer_store.go new file mode 100644 index 0000000000..bf0705afc5 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/layer_store.go @@ -0,0 +1,750 @@ +package layer // import "github.com/docker/docker/layer" + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "sync" + + "github.com/docker/distribution" + "github.com/docker/docker/daemon/graphdriver" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// maxLayerDepth represents the maximum number of +// layers which can be chained together. 125 was +// chosen to account for the 127 max in some +// graphdrivers plus the 2 additional layers +// used to create a rwlayer. +const maxLayerDepth = 125 + +type layerStore struct { + store *fileMetadataStore + driver graphdriver.Driver + useTarSplit bool + + layerMap map[ChainID]*roLayer + layerL sync.Mutex + + mounts map[string]*mountedLayer + mountL sync.Mutex + os string +} + +// StoreOptions are the options used to create a new Store instance +type StoreOptions struct { + Root string + MetadataStorePathTemplate string + GraphDriver string + GraphDriverOptions []string + IDMappings *idtools.IDMappings + PluginGetter plugingetter.PluginGetter + ExperimentalEnabled bool + OS string +} + +// NewStoreFromOptions creates a new Store instance +func NewStoreFromOptions(options StoreOptions) (Store, error) { + driver, err := graphdriver.New(options.GraphDriver, options.PluginGetter, graphdriver.Options{ + Root: options.Root, + DriverOptions: options.GraphDriverOptions, + UIDMaps: options.IDMappings.UIDs(), + GIDMaps: options.IDMappings.GIDs(), + ExperimentalEnabled: options.ExperimentalEnabled, + }) + if err != nil { + return nil, fmt.Errorf("error initializing graphdriver: %v", err) + } + logrus.Debugf("Initialized graph driver %s", driver) + + root := fmt.Sprintf(options.MetadataStorePathTemplate, driver) + + return newStoreFromGraphDriver(root, driver, options.OS) +} + +// newStoreFromGraphDriver creates a new Store instance using the provided +// metadata store and graph driver. The metadata store will be used to restore +// the Store. +func newStoreFromGraphDriver(root string, driver graphdriver.Driver, os string) (Store, error) { + if !system.IsOSSupported(os) { + return nil, fmt.Errorf("failed to initialize layer store as operating system '%s' is not supported", os) + } + caps := graphdriver.Capabilities{} + if capDriver, ok := driver.(graphdriver.CapabilityDriver); ok { + caps = capDriver.Capabilities() + } + + ms, err := newFSMetadataStore(root) + if err != nil { + return nil, err + } + + ls := &layerStore{ + store: ms, + driver: driver, + layerMap: map[ChainID]*roLayer{}, + mounts: map[string]*mountedLayer{}, + useTarSplit: !caps.ReproducesExactDiffs, + os: os, + } + + ids, mounts, err := ms.List() + if err != nil { + return nil, err + } + + for _, id := range ids { + l, err := ls.loadLayer(id) + if err != nil { + logrus.Debugf("Failed to load layer %s: %s", id, err) + continue + } + if l.parent != nil { + l.parent.referenceCount++ + } + } + + for _, mount := range mounts { + if err := ls.loadMount(mount); err != nil { + logrus.Debugf("Failed to load mount %s: %s", mount, err) + } + } + + return ls, nil +} + +func (ls *layerStore) loadLayer(layer ChainID) (*roLayer, error) { + cl, ok := ls.layerMap[layer] + if ok { + return cl, nil + } + + diff, err := ls.store.GetDiffID(layer) + if err != nil { + return nil, fmt.Errorf("failed to get diff id for %s: %s", layer, err) + } + + size, err := ls.store.GetSize(layer) + if err != nil { + return nil, fmt.Errorf("failed to get size for %s: %s", layer, err) + } + + cacheID, err := ls.store.GetCacheID(layer) + if err != nil { + return nil, fmt.Errorf("failed to get cache id for %s: %s", layer, err) + } + + parent, err := ls.store.GetParent(layer) + if err != nil { + return nil, fmt.Errorf("failed to get parent for %s: %s", layer, err) + } + + descriptor, err := ls.store.GetDescriptor(layer) + if err != nil { + return nil, fmt.Errorf("failed to get descriptor for %s: %s", layer, err) + } + + os, err := ls.store.getOS(layer) + if err != nil { + return nil, fmt.Errorf("failed to get operating system for %s: %s", layer, err) + } + + if os != ls.os { + return nil, fmt.Errorf("failed to load layer with os %s into layerstore for %s", os, ls.os) + } + + cl = &roLayer{ + chainID: layer, + diffID: diff, + size: size, + cacheID: cacheID, + layerStore: ls, + references: map[Layer]struct{}{}, + descriptor: descriptor, + } + + if parent != "" { + p, err := ls.loadLayer(parent) + if err != nil { + return nil, err + } + cl.parent = p + } + + ls.layerMap[cl.chainID] = cl + + return cl, nil +} + +func (ls *layerStore) loadMount(mount string) error { + if _, ok := ls.mounts[mount]; ok { + return nil + } + + mountID, err := ls.store.GetMountID(mount) + if err != nil { + return err + } + + initID, err := ls.store.GetInitID(mount) + if err != nil { + return err + } + + parent, err := ls.store.GetMountParent(mount) + if err != nil { + return err + } + + ml := &mountedLayer{ + name: mount, + mountID: mountID, + initID: initID, + layerStore: ls, + references: map[RWLayer]*referencedRWLayer{}, + } + + if parent != "" { + p, err := ls.loadLayer(parent) + if err != nil { + return err + } + ml.parent = p + + p.referenceCount++ + } + + ls.mounts[ml.name] = ml + + return nil +} + +func (ls *layerStore) applyTar(tx *fileMetadataTransaction, ts io.Reader, parent string, layer *roLayer) error { + digester := digest.Canonical.Digester() + tr := io.TeeReader(ts, digester.Hash()) + + rdr := tr + if ls.useTarSplit { + tsw, err := tx.TarSplitWriter(true) + if err != nil { + return err + } + metaPacker := storage.NewJSONPacker(tsw) + defer tsw.Close() + + // we're passing nil here for the file putter, because the ApplyDiff will + // handle the extraction of the archive + rdr, err = asm.NewInputTarStream(tr, metaPacker, nil) + if err != nil { + return err + } + } + + applySize, err := ls.driver.ApplyDiff(layer.cacheID, parent, rdr) + if err != nil { + return err + } + + // Discard trailing data but ensure metadata is picked up to reconstruct stream + io.Copy(ioutil.Discard, rdr) // ignore error as reader may be closed + + layer.size = applySize + layer.diffID = DiffID(digester.Digest()) + + logrus.Debugf("Applied tar %s to %s, size: %d", layer.diffID, layer.cacheID, applySize) + + return nil +} + +func (ls *layerStore) Register(ts io.Reader, parent ChainID) (Layer, error) { + return ls.registerWithDescriptor(ts, parent, distribution.Descriptor{}) +} + +func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, descriptor distribution.Descriptor) (Layer, error) { + // err is used to hold the error which will always trigger + // cleanup of creates sources but may not be an error returned + // to the caller (already exists). + var err error + var pid string + var p *roLayer + + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return nil, ErrLayerDoesNotExist + } + pid = p.cacheID + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + if p.depth() >= maxLayerDepth { + err = ErrMaxDepthExceeded + return nil, err + } + } + + // Create new roLayer + layer := &roLayer{ + parent: p, + cacheID: stringid.GenerateRandomID(), + referenceCount: 1, + layerStore: ls, + references: map[Layer]struct{}{}, + descriptor: descriptor, + } + + if err = ls.driver.Create(layer.cacheID, pid, nil); err != nil { + return nil, err + } + + tx, err := ls.store.StartTransaction() + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + logrus.Debugf("Cleaning up layer %s: %v", layer.cacheID, err) + if err := ls.driver.Remove(layer.cacheID); err != nil { + logrus.Errorf("Error cleaning up cache layer %s: %v", layer.cacheID, err) + } + if err := tx.Cancel(); err != nil { + logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) + } + } + }() + + if err = ls.applyTar(tx, ts, pid, layer); err != nil { + return nil, err + } + + if layer.parent == nil { + layer.chainID = ChainID(layer.diffID) + } else { + layer.chainID = createChainIDFromParent(layer.parent.chainID, layer.diffID) + } + + if err = storeLayer(tx, layer); err != nil { + return nil, err + } + + ls.layerL.Lock() + defer ls.layerL.Unlock() + + if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { + // Set error for cleanup, but do not return the error + err = errors.New("layer already exists") + return existingLayer.getReference(), nil + } + + if err = tx.Commit(layer.chainID); err != nil { + return nil, err + } + + ls.layerMap[layer.chainID] = layer + + return layer.getReference(), nil +} + +func (ls *layerStore) getWithoutLock(layer ChainID) *roLayer { + l, ok := ls.layerMap[layer] + if !ok { + return nil + } + + l.referenceCount++ + + return l +} + +func (ls *layerStore) get(l ChainID) *roLayer { + ls.layerL.Lock() + defer ls.layerL.Unlock() + return ls.getWithoutLock(l) +} + +func (ls *layerStore) Get(l ChainID) (Layer, error) { + ls.layerL.Lock() + defer ls.layerL.Unlock() + + layer := ls.getWithoutLock(l) + if layer == nil { + return nil, ErrLayerDoesNotExist + } + + return layer.getReference(), nil +} + +func (ls *layerStore) Map() map[ChainID]Layer { + ls.layerL.Lock() + defer ls.layerL.Unlock() + + layers := map[ChainID]Layer{} + + for k, v := range ls.layerMap { + layers[k] = v + } + + return layers +} + +func (ls *layerStore) deleteLayer(layer *roLayer, metadata *Metadata) error { + err := ls.driver.Remove(layer.cacheID) + if err != nil { + return err + } + err = ls.store.Remove(layer.chainID) + if err != nil { + return err + } + metadata.DiffID = layer.diffID + metadata.ChainID = layer.chainID + metadata.Size, err = layer.Size() + if err != nil { + return err + } + metadata.DiffSize = layer.size + + return nil +} + +func (ls *layerStore) releaseLayer(l *roLayer) ([]Metadata, error) { + depth := 0 + removed := []Metadata{} + for { + if l.referenceCount == 0 { + panic("layer not retained") + } + l.referenceCount-- + if l.referenceCount != 0 { + return removed, nil + } + + if len(removed) == 0 && depth > 0 { + panic("cannot remove layer with child") + } + if l.hasReferences() { + panic("cannot delete referenced layer") + } + var metadata Metadata + if err := ls.deleteLayer(l, &metadata); err != nil { + return nil, err + } + + delete(ls.layerMap, l.chainID) + removed = append(removed, metadata) + + if l.parent == nil { + return removed, nil + } + + depth++ + l = l.parent + } +} + +func (ls *layerStore) Release(l Layer) ([]Metadata, error) { + ls.layerL.Lock() + defer ls.layerL.Unlock() + layer, ok := ls.layerMap[l.ChainID()] + if !ok { + return []Metadata{}, nil + } + if !layer.hasReference(l) { + return nil, ErrLayerNotRetained + } + + layer.deleteReference(l) + + return ls.releaseLayer(layer) +} + +func (ls *layerStore) CreateRWLayer(name string, parent ChainID, opts *CreateRWLayerOpts) (RWLayer, error) { + var ( + storageOpt map[string]string + initFunc MountInit + mountLabel string + ) + + if opts != nil { + mountLabel = opts.MountLabel + storageOpt = opts.StorageOpt + initFunc = opts.InitFunc + } + + ls.mountL.Lock() + defer ls.mountL.Unlock() + m, ok := ls.mounts[name] + if ok { + return nil, ErrMountNameConflict + } + + var err error + var pid string + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return nil, ErrLayerDoesNotExist + } + pid = p.cacheID + + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + } + + m = &mountedLayer{ + name: name, + parent: p, + mountID: ls.mountID(name), + layerStore: ls, + references: map[RWLayer]*referencedRWLayer{}, + } + + if initFunc != nil { + pid, err = ls.initMount(m.mountID, pid, mountLabel, initFunc, storageOpt) + if err != nil { + return nil, err + } + m.initID = pid + } + + createOpts := &graphdriver.CreateOpts{ + StorageOpt: storageOpt, + } + + if err = ls.driver.CreateReadWrite(m.mountID, pid, createOpts); err != nil { + return nil, err + } + if err = ls.saveMount(m); err != nil { + return nil, err + } + + return m.getReference(), nil +} + +func (ls *layerStore) GetRWLayer(id string) (RWLayer, error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + mount, ok := ls.mounts[id] + if !ok { + return nil, ErrMountDoesNotExist + } + + return mount.getReference(), nil +} + +func (ls *layerStore) GetMountID(id string) (string, error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + mount, ok := ls.mounts[id] + if !ok { + return "", ErrMountDoesNotExist + } + logrus.Debugf("GetMountID id: %s -> mountID: %s", id, mount.mountID) + + return mount.mountID, nil +} + +func (ls *layerStore) ReleaseRWLayer(l RWLayer) ([]Metadata, error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + m, ok := ls.mounts[l.Name()] + if !ok { + return []Metadata{}, nil + } + + if err := m.deleteReference(l); err != nil { + return nil, err + } + + if m.hasReferences() { + return []Metadata{}, nil + } + + if err := ls.driver.Remove(m.mountID); err != nil { + logrus.Errorf("Error removing mounted layer %s: %s", m.name, err) + m.retakeReference(l) + return nil, err + } + + if m.initID != "" { + if err := ls.driver.Remove(m.initID); err != nil { + logrus.Errorf("Error removing init layer %s: %s", m.name, err) + m.retakeReference(l) + return nil, err + } + } + + if err := ls.store.RemoveMount(m.name); err != nil { + logrus.Errorf("Error removing mount metadata: %s: %s", m.name, err) + m.retakeReference(l) + return nil, err + } + + delete(ls.mounts, m.Name()) + + ls.layerL.Lock() + defer ls.layerL.Unlock() + if m.parent != nil { + return ls.releaseLayer(m.parent) + } + + return []Metadata{}, nil +} + +func (ls *layerStore) saveMount(mount *mountedLayer) error { + if err := ls.store.SetMountID(mount.name, mount.mountID); err != nil { + return err + } + + if mount.initID != "" { + if err := ls.store.SetInitID(mount.name, mount.initID); err != nil { + return err + } + } + + if mount.parent != nil { + if err := ls.store.SetMountParent(mount.name, mount.parent.chainID); err != nil { + return err + } + } + + ls.mounts[mount.name] = mount + + return nil +} + +func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit, storageOpt map[string]string) (string, error) { + // Use "-init" to maintain compatibility with graph drivers + // which are expecting this layer with this special name. If all + // graph drivers can be updated to not rely on knowing about this layer + // then the initID should be randomly generated. + initID := fmt.Sprintf("%s-init", graphID) + + createOpts := &graphdriver.CreateOpts{ + MountLabel: mountLabel, + StorageOpt: storageOpt, + } + + if err := ls.driver.CreateReadWrite(initID, parent, createOpts); err != nil { + return "", err + } + p, err := ls.driver.Get(initID, "") + if err != nil { + return "", err + } + + if err := initFunc(p); err != nil { + ls.driver.Put(initID) + return "", err + } + + if err := ls.driver.Put(initID); err != nil { + return "", err + } + + return initID, nil +} + +func (ls *layerStore) getTarStream(rl *roLayer) (io.ReadCloser, error) { + if !ls.useTarSplit { + var parentCacheID string + if rl.parent != nil { + parentCacheID = rl.parent.cacheID + } + + return ls.driver.Diff(rl.cacheID, parentCacheID) + } + + r, err := ls.store.TarSplitReader(rl.chainID) + if err != nil { + return nil, err + } + + pr, pw := io.Pipe() + go func() { + err := ls.assembleTarTo(rl.cacheID, r, nil, pw) + if err != nil { + pw.CloseWithError(err) + } else { + pw.Close() + } + }() + + return pr, nil +} + +func (ls *layerStore) assembleTarTo(graphID string, metadata io.ReadCloser, size *int64, w io.Writer) error { + diffDriver, ok := ls.driver.(graphdriver.DiffGetterDriver) + if !ok { + diffDriver = &naiveDiffPathDriver{ls.driver} + } + + defer metadata.Close() + + // get our relative path to the container + fileGetCloser, err := diffDriver.DiffGetter(graphID) + if err != nil { + return err + } + defer fileGetCloser.Close() + + metaUnpacker := storage.NewJSONUnpacker(metadata) + upackerCounter := &unpackSizeCounter{metaUnpacker, size} + logrus.Debugf("Assembling tar data for %s", graphID) + return asm.WriteOutputTarStream(fileGetCloser, upackerCounter, w) +} + +func (ls *layerStore) Cleanup() error { + return ls.driver.Cleanup() +} + +func (ls *layerStore) DriverStatus() [][2]string { + return ls.driver.Status() +} + +func (ls *layerStore) DriverName() string { + return ls.driver.String() +} + +type naiveDiffPathDriver struct { + graphdriver.Driver +} + +type fileGetPutter struct { + storage.FileGetter + driver graphdriver.Driver + id string +} + +func (w *fileGetPutter) Close() error { + return w.driver.Put(w.id) +} + +func (n *naiveDiffPathDriver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { + p, err := n.Driver.Get(id, "") + if err != nil { + return nil, err + } + return &fileGetPutter{storage.NewPathFileGetter(p.Path()), n.Driver, id}, nil +} diff --git a/vendor/github.com/docker/docker/layer/layer_store_windows.go b/vendor/github.com/docker/docker/layer/layer_store_windows.go new file mode 100644 index 0000000000..eca1f6a83b --- /dev/null +++ b/vendor/github.com/docker/docker/layer/layer_store_windows.go @@ -0,0 +1,11 @@ +package layer // import "github.com/docker/docker/layer" + +import ( + "io" + + "github.com/docker/distribution" +) + +func (ls *layerStore) RegisterWithDescriptor(ts io.Reader, parent ChainID, descriptor distribution.Descriptor) (Layer, error) { + return ls.registerWithDescriptor(ts, parent, descriptor) +} diff --git a/vendor/github.com/docker/docker/layer/layer_unix.go b/vendor/github.com/docker/docker/layer/layer_unix.go new file mode 100644 index 0000000000..002c7ff838 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/layer_unix.go @@ -0,0 +1,9 @@ +// +build linux freebsd darwin openbsd + +package layer // import "github.com/docker/docker/layer" + +import "github.com/docker/docker/pkg/stringid" + +func (ls *layerStore) mountID(name string) string { + return stringid.GenerateRandomID() +} diff --git a/vendor/github.com/docker/docker/layer/layer_windows.go b/vendor/github.com/docker/docker/layer/layer_windows.go new file mode 100644 index 0000000000..25ef26afc1 --- /dev/null +++ b/vendor/github.com/docker/docker/layer/layer_windows.go @@ -0,0 +1,46 @@ +package layer // import "github.com/docker/docker/layer" + +import ( + "errors" +) + +// Getter is an interface to get the path to a layer on the host. +type Getter interface { + // GetLayerPath gets the path for the layer. This is different from Get() + // since that returns an interface to account for umountable layers. + GetLayerPath(id string) (string, error) +} + +// GetLayerPath returns the path to a layer +func GetLayerPath(s Store, layer ChainID) (string, error) { + ls, ok := s.(*layerStore) + if !ok { + return "", errors.New("unsupported layer store") + } + ls.layerL.Lock() + defer ls.layerL.Unlock() + + rl, ok := ls.layerMap[layer] + if !ok { + return "", ErrLayerDoesNotExist + } + + if layerGetter, ok := ls.driver.(Getter); ok { + return layerGetter.GetLayerPath(rl.cacheID) + } + path, err := ls.driver.Get(rl.cacheID, "") + if err != nil { + return "", err + } + + if err := ls.driver.Put(rl.cacheID); err != nil { + return "", err + } + + return path.Path(), nil +} + +func (ls *layerStore) mountID(name string) string { + // windows has issues if container ID doesn't match mount ID + return name +} diff --git a/vendor/github.com/docker/docker/layer/migration.go b/vendor/github.com/docker/docker/layer/migration.go new file mode 100644 index 0000000000..2668ea96bb --- /dev/null +++ b/vendor/github.com/docker/docker/layer/migration.go @@ -0,0 +1,252 @@ +package layer // import "github.com/docker/docker/layer" + +import ( + "compress/gzip" + "errors" + "fmt" + "io" + "os" + + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// CreateRWLayerByGraphID creates a RWLayer in the layer store using +// the provided name with the given graphID. To get the RWLayer +// after migration the layer may be retrieved by the given name. +func (ls *layerStore) CreateRWLayerByGraphID(name, graphID string, parent ChainID) (err error) { + ls.mountL.Lock() + defer ls.mountL.Unlock() + m, ok := ls.mounts[name] + if ok { + if m.parent.chainID != parent { + return errors.New("name conflict, mismatched parent") + } + if m.mountID != graphID { + return errors.New("mount already exists") + } + + return nil + } + + if !ls.driver.Exists(graphID) { + return fmt.Errorf("graph ID does not exist: %q", graphID) + } + + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return ErrLayerDoesNotExist + } + + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + } + + // TODO: Ensure graphID has correct parent + + m = &mountedLayer{ + name: name, + parent: p, + mountID: graphID, + layerStore: ls, + references: map[RWLayer]*referencedRWLayer{}, + } + + // Check for existing init layer + initID := fmt.Sprintf("%s-init", graphID) + if ls.driver.Exists(initID) { + m.initID = initID + } + + return ls.saveMount(m) +} + +func (ls *layerStore) ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID DiffID, size int64, err error) { + defer func() { + if err != nil { + logrus.Debugf("could not get checksum for %q with tar-split: %q", id, err) + diffID, size, err = ls.checksumForGraphIDNoTarsplit(id, parent, newTarDataPath) + } + }() + + if oldTarDataPath == "" { + err = errors.New("no tar-split file") + return + } + + tarDataFile, err := os.Open(oldTarDataPath) + if err != nil { + return + } + defer tarDataFile.Close() + uncompressed, err := gzip.NewReader(tarDataFile) + if err != nil { + return + } + + dgst := digest.Canonical.Digester() + err = ls.assembleTarTo(id, uncompressed, &size, dgst.Hash()) + if err != nil { + return + } + + diffID = DiffID(dgst.Digest()) + err = os.RemoveAll(newTarDataPath) + if err != nil { + return + } + err = os.Link(oldTarDataPath, newTarDataPath) + + return +} + +func (ls *layerStore) checksumForGraphIDNoTarsplit(id, parent, newTarDataPath string) (diffID DiffID, size int64, err error) { + rawarchive, err := ls.driver.Diff(id, parent) + if err != nil { + return + } + defer rawarchive.Close() + + f, err := os.Create(newTarDataPath) + if err != nil { + return + } + defer f.Close() + mfz := gzip.NewWriter(f) + defer mfz.Close() + metaPacker := storage.NewJSONPacker(mfz) + + packerCounter := &packSizeCounter{metaPacker, &size} + + archive, err := asm.NewInputTarStream(rawarchive, packerCounter, nil) + if err != nil { + return + } + dgst, err := digest.FromReader(archive) + if err != nil { + return + } + diffID = DiffID(dgst) + return +} + +func (ls *layerStore) RegisterByGraphID(graphID string, parent ChainID, diffID DiffID, tarDataFile string, size int64) (Layer, error) { + // err is used to hold the error which will always trigger + // cleanup of creates sources but may not be an error returned + // to the caller (already exists). + var err error + var p *roLayer + if string(parent) != "" { + p = ls.get(parent) + if p == nil { + return nil, ErrLayerDoesNotExist + } + + // Release parent chain if error + defer func() { + if err != nil { + ls.layerL.Lock() + ls.releaseLayer(p) + ls.layerL.Unlock() + } + }() + } + + // Create new roLayer + layer := &roLayer{ + parent: p, + cacheID: graphID, + referenceCount: 1, + layerStore: ls, + references: map[Layer]struct{}{}, + diffID: diffID, + size: size, + chainID: createChainIDFromParent(parent, diffID), + } + + ls.layerL.Lock() + defer ls.layerL.Unlock() + + if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { + // Set error for cleanup, but do not return + err = errors.New("layer already exists") + return existingLayer.getReference(), nil + } + + tx, err := ls.store.StartTransaction() + if err != nil { + return nil, err + } + + defer func() { + if err != nil { + logrus.Debugf("Cleaning up transaction after failed migration for %s: %v", graphID, err) + if err := tx.Cancel(); err != nil { + logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) + } + } + }() + + tsw, err := tx.TarSplitWriter(false) + if err != nil { + return nil, err + } + defer tsw.Close() + tdf, err := os.Open(tarDataFile) + if err != nil { + return nil, err + } + defer tdf.Close() + _, err = io.Copy(tsw, tdf) + if err != nil { + return nil, err + } + + if err = storeLayer(tx, layer); err != nil { + return nil, err + } + + if err = tx.Commit(layer.chainID); err != nil { + return nil, err + } + + ls.layerMap[layer.chainID] = layer + + return layer.getReference(), nil +} + +type unpackSizeCounter struct { + unpacker storage.Unpacker + size *int64 +} + +func (u *unpackSizeCounter) Next() (*storage.Entry, error) { + e, err := u.unpacker.Next() + if err == nil && u.size != nil { + *u.size += e.Size + } + return e, err +} + +type packSizeCounter struct { + packer storage.Packer + size *int64 +} + +func (p *packSizeCounter) AddEntry(e storage.Entry) (int, error) { + n, err := p.packer.AddEntry(e) + if err == nil && p.size != nil { + *p.size += e.Size + } + return n, err +} diff --git a/vendor/github.com/docker/docker/layer/mounted_layer.go b/vendor/github.com/docker/docker/layer/mounted_layer.go new file mode 100644 index 0000000000..d6858c662c --- /dev/null +++ b/vendor/github.com/docker/docker/layer/mounted_layer.go @@ -0,0 +1,100 @@ +package layer // import "github.com/docker/docker/layer" + +import ( + "io" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/containerfs" +) + +type mountedLayer struct { + name string + mountID string + initID string + parent *roLayer + path string + layerStore *layerStore + + references map[RWLayer]*referencedRWLayer +} + +func (ml *mountedLayer) cacheParent() string { + if ml.initID != "" { + return ml.initID + } + if ml.parent != nil { + return ml.parent.cacheID + } + return "" +} + +func (ml *mountedLayer) TarStream() (io.ReadCloser, error) { + return ml.layerStore.driver.Diff(ml.mountID, ml.cacheParent()) +} + +func (ml *mountedLayer) Name() string { + return ml.name +} + +func (ml *mountedLayer) Parent() Layer { + if ml.parent != nil { + return ml.parent + } + + // Return a nil interface instead of an interface wrapping a nil + // pointer. + return nil +} + +func (ml *mountedLayer) Size() (int64, error) { + return ml.layerStore.driver.DiffSize(ml.mountID, ml.cacheParent()) +} + +func (ml *mountedLayer) Changes() ([]archive.Change, error) { + return ml.layerStore.driver.Changes(ml.mountID, ml.cacheParent()) +} + +func (ml *mountedLayer) Metadata() (map[string]string, error) { + return ml.layerStore.driver.GetMetadata(ml.mountID) +} + +func (ml *mountedLayer) getReference() RWLayer { + ref := &referencedRWLayer{ + mountedLayer: ml, + } + ml.references[ref] = ref + + return ref +} + +func (ml *mountedLayer) hasReferences() bool { + return len(ml.references) > 0 +} + +func (ml *mountedLayer) deleteReference(ref RWLayer) error { + if _, ok := ml.references[ref]; !ok { + return ErrLayerNotRetained + } + delete(ml.references, ref) + return nil +} + +func (ml *mountedLayer) retakeReference(r RWLayer) { + if ref, ok := r.(*referencedRWLayer); ok { + ml.references[ref] = ref + } +} + +type referencedRWLayer struct { + *mountedLayer +} + +func (rl *referencedRWLayer) Mount(mountLabel string) (containerfs.ContainerFS, error) { + return rl.layerStore.driver.Get(rl.mountedLayer.mountID, mountLabel) +} + +// Unmount decrements the activity count and unmounts the underlying layer +// Callers should only call `Unmount` once per call to `Mount`, even on error. +func (rl *referencedRWLayer) Unmount() error { + return rl.layerStore.driver.Put(rl.mountedLayer.mountID) +} diff --git a/vendor/github.com/docker/docker/layer/ro_layer.go b/vendor/github.com/docker/docker/layer/ro_layer.go new file mode 100644 index 0000000000..bc0fe1dddf --- /dev/null +++ b/vendor/github.com/docker/docker/layer/ro_layer.go @@ -0,0 +1,178 @@ +package layer // import "github.com/docker/docker/layer" + +import ( + "fmt" + "io" + + "github.com/docker/distribution" + "github.com/opencontainers/go-digest" +) + +type roLayer struct { + chainID ChainID + diffID DiffID + parent *roLayer + cacheID string + size int64 + layerStore *layerStore + descriptor distribution.Descriptor + + referenceCount int + references map[Layer]struct{} +} + +// TarStream for roLayer guarantees that the data that is produced is the exact +// data that the layer was registered with. +func (rl *roLayer) TarStream() (io.ReadCloser, error) { + rc, err := rl.layerStore.getTarStream(rl) + if err != nil { + return nil, err + } + + vrc, err := newVerifiedReadCloser(rc, digest.Digest(rl.diffID)) + if err != nil { + return nil, err + } + return vrc, nil +} + +// TarStreamFrom does not make any guarantees to the correctness of the produced +// data. As such it should not be used when the layer content must be verified +// to be an exact match to the registered layer. +func (rl *roLayer) TarStreamFrom(parent ChainID) (io.ReadCloser, error) { + var parentCacheID string + for pl := rl.parent; pl != nil; pl = pl.parent { + if pl.chainID == parent { + parentCacheID = pl.cacheID + break + } + } + + if parent != ChainID("") && parentCacheID == "" { + return nil, fmt.Errorf("layer ID '%s' is not a parent of the specified layer: cannot provide diff to non-parent", parent) + } + return rl.layerStore.driver.Diff(rl.cacheID, parentCacheID) +} + +func (rl *roLayer) ChainID() ChainID { + return rl.chainID +} + +func (rl *roLayer) DiffID() DiffID { + return rl.diffID +} + +func (rl *roLayer) Parent() Layer { + if rl.parent == nil { + return nil + } + return rl.parent +} + +func (rl *roLayer) Size() (size int64, err error) { + if rl.parent != nil { + size, err = rl.parent.Size() + if err != nil { + return + } + } + + return size + rl.size, nil +} + +func (rl *roLayer) DiffSize() (size int64, err error) { + return rl.size, nil +} + +func (rl *roLayer) Metadata() (map[string]string, error) { + return rl.layerStore.driver.GetMetadata(rl.cacheID) +} + +type referencedCacheLayer struct { + *roLayer +} + +func (rl *roLayer) getReference() Layer { + ref := &referencedCacheLayer{ + roLayer: rl, + } + rl.references[ref] = struct{}{} + + return ref +} + +func (rl *roLayer) hasReference(ref Layer) bool { + _, ok := rl.references[ref] + return ok +} + +func (rl *roLayer) hasReferences() bool { + return len(rl.references) > 0 +} + +func (rl *roLayer) deleteReference(ref Layer) { + delete(rl.references, ref) +} + +func (rl *roLayer) depth() int { + if rl.parent == nil { + return 1 + } + return rl.parent.depth() + 1 +} + +func storeLayer(tx *fileMetadataTransaction, layer *roLayer) error { + if err := tx.SetDiffID(layer.diffID); err != nil { + return err + } + if err := tx.SetSize(layer.size); err != nil { + return err + } + if err := tx.SetCacheID(layer.cacheID); err != nil { + return err + } + // Do not store empty descriptors + if layer.descriptor.Digest != "" { + if err := tx.SetDescriptor(layer.descriptor); err != nil { + return err + } + } + if layer.parent != nil { + if err := tx.SetParent(layer.parent.chainID); err != nil { + return err + } + } + return tx.setOS(layer.layerStore.os) +} + +func newVerifiedReadCloser(rc io.ReadCloser, dgst digest.Digest) (io.ReadCloser, error) { + return &verifiedReadCloser{ + rc: rc, + dgst: dgst, + verifier: dgst.Verifier(), + }, nil +} + +type verifiedReadCloser struct { + rc io.ReadCloser + dgst digest.Digest + verifier digest.Verifier +} + +func (vrc *verifiedReadCloser) Read(p []byte) (n int, err error) { + n, err = vrc.rc.Read(p) + if n > 0 { + if n, err := vrc.verifier.Write(p[:n]); err != nil { + return n, err + } + } + if err == io.EOF { + if !vrc.verifier.Verified() { + err = fmt.Errorf("could not verify layer data for: %s. This may be because internal files in the layer store were modified. Re-pulling or rebuilding this image may resolve the issue", vrc.dgst) + } + } + return +} +func (vrc *verifiedReadCloser) Close() error { + return vrc.rc.Close() +} diff --git a/vendor/github.com/docker/docker/layer/ro_layer_windows.go b/vendor/github.com/docker/docker/layer/ro_layer_windows.go new file mode 100644 index 0000000000..a4f0c8088e --- /dev/null +++ b/vendor/github.com/docker/docker/layer/ro_layer_windows.go @@ -0,0 +1,9 @@ +package layer // import "github.com/docker/docker/layer" + +import "github.com/docker/distribution" + +var _ distribution.Describable = &roLayer{} + +func (rl *roLayer) Descriptor() distribution.Descriptor { + return rl.descriptor +} diff --git a/vendor/github.com/docker/docker/oci/defaults.go b/vendor/github.com/docker/docker/oci/defaults.go new file mode 100644 index 0000000000..4145412dd4 --- /dev/null +++ b/vendor/github.com/docker/docker/oci/defaults.go @@ -0,0 +1,211 @@ +package oci // import "github.com/docker/docker/oci" + +import ( + "os" + "runtime" + + "github.com/opencontainers/runtime-spec/specs-go" +) + +func iPtr(i int64) *int64 { return &i } +func u32Ptr(i int64) *uint32 { u := uint32(i); return &u } +func fmPtr(i int64) *os.FileMode { fm := os.FileMode(i); return &fm } + +func defaultCapabilities() []string { + return []string{ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FSETID", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETFCAP", + "CAP_SETPCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_KILL", + "CAP_AUDIT_WRITE", + } +} + +// DefaultSpec returns the default spec used by docker for the current Platform +func DefaultSpec() specs.Spec { + return DefaultOSSpec(runtime.GOOS) +} + +// DefaultOSSpec returns the spec for a given OS +func DefaultOSSpec(osName string) specs.Spec { + if osName == "windows" { + return DefaultWindowsSpec() + } + return DefaultLinuxSpec() +} + +// DefaultWindowsSpec create a default spec for running Windows containers +func DefaultWindowsSpec() specs.Spec { + return specs.Spec{ + Version: specs.Version, + Windows: &specs.Windows{}, + Process: &specs.Process{}, + Root: &specs.Root{}, + } +} + +// DefaultLinuxSpec create a default spec for running Linux containers +func DefaultLinuxSpec() specs.Spec { + s := specs.Spec{ + Version: specs.Version, + Process: &specs.Process{ + Capabilities: &specs.LinuxCapabilities{ + Bounding: defaultCapabilities(), + Permitted: defaultCapabilities(), + Inheritable: defaultCapabilities(), + Effective: defaultCapabilities(), + }, + }, + Root: &specs.Root{}, + } + s.Mounts = []specs.Mount{ + { + Destination: "/proc", + Type: "proc", + Source: "proc", + Options: []string{"nosuid", "noexec", "nodev"}, + }, + { + Destination: "/dev", + Type: "tmpfs", + Source: "tmpfs", + Options: []string{"nosuid", "strictatime", "mode=755", "size=65536k"}, + }, + { + Destination: "/dev/pts", + Type: "devpts", + Source: "devpts", + Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"}, + }, + { + Destination: "/sys", + Type: "sysfs", + Source: "sysfs", + Options: []string{"nosuid", "noexec", "nodev", "ro"}, + }, + { + Destination: "/sys/fs/cgroup", + Type: "cgroup", + Source: "cgroup", + Options: []string{"ro", "nosuid", "noexec", "nodev"}, + }, + { + Destination: "/dev/mqueue", + Type: "mqueue", + Source: "mqueue", + Options: []string{"nosuid", "noexec", "nodev"}, + }, + { + Destination: "/dev/shm", + Type: "tmpfs", + Source: "shm", + Options: []string{"nosuid", "noexec", "nodev", "mode=1777"}, + }, + } + + s.Linux = &specs.Linux{ + MaskedPaths: []string{ + "/proc/kcore", + "/proc/keys", + "/proc/latency_stats", + "/proc/timer_list", + "/proc/timer_stats", + "/proc/sched_debug", + "/proc/scsi", + "/sys/firmware", + }, + ReadonlyPaths: []string{ + "/proc/asound", + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger", + }, + Namespaces: []specs.LinuxNamespace{ + {Type: "mount"}, + {Type: "network"}, + {Type: "uts"}, + {Type: "pid"}, + {Type: "ipc"}, + }, + // Devices implicitly contains the following devices: + // null, zero, full, random, urandom, tty, console, and ptmx. + // ptmx is a bind mount or symlink of the container's ptmx. + // See also: https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md#default-devices + Devices: []specs.LinuxDevice{}, + Resources: &specs.LinuxResources{ + Devices: []specs.LinuxDeviceCgroup{ + { + Allow: false, + Access: "rwm", + }, + { + Allow: true, + Type: "c", + Major: iPtr(1), + Minor: iPtr(5), + Access: "rwm", + }, + { + Allow: true, + Type: "c", + Major: iPtr(1), + Minor: iPtr(3), + Access: "rwm", + }, + { + Allow: true, + Type: "c", + Major: iPtr(1), + Minor: iPtr(9), + Access: "rwm", + }, + { + Allow: true, + Type: "c", + Major: iPtr(1), + Minor: iPtr(8), + Access: "rwm", + }, + { + Allow: true, + Type: "c", + Major: iPtr(5), + Minor: iPtr(0), + Access: "rwm", + }, + { + Allow: true, + Type: "c", + Major: iPtr(5), + Minor: iPtr(1), + Access: "rwm", + }, + { + Allow: false, + Type: "c", + Major: iPtr(10), + Minor: iPtr(229), + Access: "rwm", + }, + }, + }, + } + + // For LCOW support, populate a blank Windows spec + if runtime.GOOS == "windows" { + s.Windows = &specs.Windows{} + } + + return s +} diff --git a/vendor/github.com/docker/docker/oci/devices_linux.go b/vendor/github.com/docker/docker/oci/devices_linux.go new file mode 100644 index 0000000000..7f1658e53c --- /dev/null +++ b/vendor/github.com/docker/docker/oci/devices_linux.go @@ -0,0 +1,86 @@ +package oci // import "github.com/docker/docker/oci" + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/libcontainer/devices" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// Device transforms a libcontainer configs.Device to a specs.LinuxDevice object. +func Device(d *configs.Device) specs.LinuxDevice { + return specs.LinuxDevice{ + Type: string(d.Type), + Path: d.Path, + Major: d.Major, + Minor: d.Minor, + FileMode: fmPtr(int64(d.FileMode)), + UID: u32Ptr(int64(d.Uid)), + GID: u32Ptr(int64(d.Gid)), + } +} + +func deviceCgroup(d *configs.Device) specs.LinuxDeviceCgroup { + t := string(d.Type) + return specs.LinuxDeviceCgroup{ + Allow: true, + Type: t, + Major: &d.Major, + Minor: &d.Minor, + Access: d.Permissions, + } +} + +// DevicesFromPath computes a list of devices and device permissions from paths (pathOnHost and pathInContainer) and cgroup permissions. +func DevicesFromPath(pathOnHost, pathInContainer, cgroupPermissions string) (devs []specs.LinuxDevice, devPermissions []specs.LinuxDeviceCgroup, err error) { + resolvedPathOnHost := pathOnHost + + // check if it is a symbolic link + if src, e := os.Lstat(pathOnHost); e == nil && src.Mode()&os.ModeSymlink == os.ModeSymlink { + if linkedPathOnHost, e := filepath.EvalSymlinks(pathOnHost); e == nil { + resolvedPathOnHost = linkedPathOnHost + } + } + + device, err := devices.DeviceFromPath(resolvedPathOnHost, cgroupPermissions) + // if there was no error, return the device + if err == nil { + device.Path = pathInContainer + return append(devs, Device(device)), append(devPermissions, deviceCgroup(device)), nil + } + + // if the device is not a device node + // try to see if it's a directory holding many devices + if err == devices.ErrNotADevice { + + // check if it is a directory + if src, e := os.Stat(resolvedPathOnHost); e == nil && src.IsDir() { + + // mount the internal devices recursively + filepath.Walk(resolvedPathOnHost, func(dpath string, f os.FileInfo, e error) error { + childDevice, e := devices.DeviceFromPath(dpath, cgroupPermissions) + if e != nil { + // ignore the device + return nil + } + + // add the device to userSpecified devices + childDevice.Path = strings.Replace(dpath, resolvedPathOnHost, pathInContainer, 1) + devs = append(devs, Device(childDevice)) + devPermissions = append(devPermissions, deviceCgroup(childDevice)) + + return nil + }) + } + } + + if len(devs) > 0 { + return devs, devPermissions, nil + } + + return devs, devPermissions, fmt.Errorf("error gathering device information while adding custom device %q: %s", pathOnHost, err) +} diff --git a/vendor/github.com/docker/docker/oci/devices_unsupported.go b/vendor/github.com/docker/docker/oci/devices_unsupported.go new file mode 100644 index 0000000000..af6dd3bda2 --- /dev/null +++ b/vendor/github.com/docker/docker/oci/devices_unsupported.go @@ -0,0 +1,20 @@ +// +build !linux + +package oci // import "github.com/docker/docker/oci" + +import ( + "errors" + + "github.com/opencontainers/runc/libcontainer/configs" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// Device transforms a libcontainer configs.Device to a specs.Device object. +// Not implemented +func Device(d *configs.Device) specs.LinuxDevice { return specs.LinuxDevice{} } + +// DevicesFromPath computes a list of devices and device permissions from paths (pathOnHost and pathInContainer) and cgroup permissions. +// Not implemented +func DevicesFromPath(pathOnHost, pathInContainer, cgroupPermissions string) (devs []specs.LinuxDevice, devPermissions []specs.LinuxDeviceCgroup, err error) { + return nil, nil, errors.New("oci/devices: unsupported platform") +} diff --git a/vendor/github.com/docker/docker/oci/namespaces.go b/vendor/github.com/docker/docker/oci/namespaces.go new file mode 100644 index 0000000000..f32e489b4a --- /dev/null +++ b/vendor/github.com/docker/docker/oci/namespaces.go @@ -0,0 +1,13 @@ +package oci // import "github.com/docker/docker/oci" + +import specs "github.com/opencontainers/runtime-spec/specs-go" + +// RemoveNamespace removes the `nsType` namespace from OCI spec `s` +func RemoveNamespace(s *specs.Spec, nsType specs.LinuxNamespaceType) { + for i, n := range s.Linux.Namespaces { + if n.Type == nsType { + s.Linux.Namespaces = append(s.Linux.Namespaces[:i], s.Linux.Namespaces[i+1:]...) + return + } + } +} diff --git a/vendor/github.com/docker/docker/opts/address_pools.go b/vendor/github.com/docker/docker/opts/address_pools.go new file mode 100644 index 0000000000..99c89bb626 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/address_pools.go @@ -0,0 +1,84 @@ +package opts + +import ( + "encoding/csv" + "encoding/json" + "fmt" + "strconv" + "strings" + + types "github.com/docker/libnetwork/ipamutils" +) + +// PoolsOpt is a Value type for parsing the default address pools definitions +type PoolsOpt struct { + values []*types.NetworkToSplit +} + +// UnmarshalJSON fills values structure info from JSON input +func (p *PoolsOpt) UnmarshalJSON(raw []byte) error { + return json.Unmarshal(raw, &(p.values)) +} + +// Set predefined pools +func (p *PoolsOpt) Set(value string) error { + csvReader := csv.NewReader(strings.NewReader(value)) + fields, err := csvReader.Read() + if err != nil { + return err + } + + poolsDef := types.NetworkToSplit{} + + for _, field := range fields { + parts := strings.SplitN(field, "=", 2) + if len(parts) != 2 { + return fmt.Errorf("invalid field '%s' must be a key=value pair", field) + } + + key := strings.ToLower(parts[0]) + value := strings.ToLower(parts[1]) + + switch key { + case "base": + poolsDef.Base = value + case "size": + size, err := strconv.Atoi(value) + if err != nil { + return fmt.Errorf("invalid size value: %q (must be integer): %v", value, err) + } + poolsDef.Size = size + default: + return fmt.Errorf("unexpected key '%s' in '%s'", key, field) + } + } + + p.values = append(p.values, &poolsDef) + + return nil +} + +// Type returns the type of this option +func (p *PoolsOpt) Type() string { + return "pool-options" +} + +// String returns a string repr of this option +func (p *PoolsOpt) String() string { + pools := []string{} + for _, pool := range p.values { + repr := fmt.Sprintf("%s %d", pool.Base, pool.Size) + pools = append(pools, repr) + } + return strings.Join(pools, ", ") +} + +// Value returns the mounts +func (p *PoolsOpt) Value() []*types.NetworkToSplit { + return p.values +} + +// Name returns the flag name of this option +func (p *PoolsOpt) Name() string { + return "default-address-pools" +} diff --git a/vendor/github.com/docker/docker/opts/env.go b/vendor/github.com/docker/docker/opts/env.go new file mode 100644 index 0000000000..f6e5e9074d --- /dev/null +++ b/vendor/github.com/docker/docker/opts/env.go @@ -0,0 +1,48 @@ +package opts // import "github.com/docker/docker/opts" + +import ( + "fmt" + "os" + "runtime" + "strings" + + "github.com/pkg/errors" +) + +// ValidateEnv validates an environment variable and returns it. +// If no value is specified, it returns the current value using os.Getenv. +// +// As on ParseEnvFile and related to #16585, environment variable names +// are not validate what so ever, it's up to application inside docker +// to validate them or not. +// +// The only validation here is to check if name is empty, per #25099 +func ValidateEnv(val string) (string, error) { + arr := strings.Split(val, "=") + if arr[0] == "" { + return "", errors.Errorf("invalid environment variable: %s", val) + } + if len(arr) > 1 { + return val, nil + } + if !doesEnvExist(val) { + return val, nil + } + return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil +} + +func doesEnvExist(name string) bool { + for _, entry := range os.Environ() { + parts := strings.SplitN(entry, "=", 2) + if runtime.GOOS == "windows" { + // Environment variable are case-insensitive on Windows. PaTh, path and PATH are equivalent. + if strings.EqualFold(parts[0], name) { + return true + } + } + if parts[0] == name { + return true + } + } + return false +} diff --git a/vendor/github.com/docker/docker/opts/hosts.go b/vendor/github.com/docker/docker/opts/hosts.go new file mode 100644 index 0000000000..2adf4211d5 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/hosts.go @@ -0,0 +1,165 @@ +package opts // import "github.com/docker/docker/opts" + +import ( + "fmt" + "net" + "net/url" + "strconv" + "strings" +) + +var ( + // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. dockerd -H tcp:// + // These are the IANA registered port numbers for use with Docker + // see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker + DefaultHTTPPort = 2375 // Default HTTP Port + // DefaultTLSHTTPPort Default HTTP Port used when TLS enabled + DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port + // DefaultUnixSocket Path for the unix socket. + // Docker daemon by default always listens on the default unix socket + DefaultUnixSocket = "/var/run/docker.sock" + // DefaultTCPHost constant defines the default host string used by docker on Windows + DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort) + // DefaultTLSHost constant defines the default host string used by docker for TLS sockets + DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort) + // DefaultNamedPipe defines the default named pipe used by docker on Windows + DefaultNamedPipe = `//./pipe/docker_engine` +) + +// ValidateHost validates that the specified string is a valid host and returns it. +func ValidateHost(val string) (string, error) { + host := strings.TrimSpace(val) + // The empty string means default and is not handled by parseDaemonHost + if host != "" { + _, err := parseDaemonHost(host) + if err != nil { + return val, err + } + } + // Note: unlike most flag validators, we don't return the mutated value here + // we need to know what the user entered later (using ParseHost) to adjust for TLS + return val, nil +} + +// ParseHost and set defaults for a Daemon host string +func ParseHost(defaultToTLS bool, val string) (string, error) { + host := strings.TrimSpace(val) + if host == "" { + if defaultToTLS { + host = DefaultTLSHost + } else { + host = DefaultHost + } + } else { + var err error + host, err = parseDaemonHost(host) + if err != nil { + return val, err + } + } + return host, nil +} + +// parseDaemonHost parses the specified address and returns an address that will be used as the host. +// Depending of the address specified, this may return one of the global Default* strings defined in hosts.go. +func parseDaemonHost(addr string) (string, error) { + addrParts := strings.SplitN(addr, "://", 2) + if len(addrParts) == 1 && addrParts[0] != "" { + addrParts = []string{"tcp", addrParts[0]} + } + + switch addrParts[0] { + case "tcp": + return ParseTCPAddr(addrParts[1], DefaultTCPHost) + case "unix": + return parseSimpleProtoAddr("unix", addrParts[1], DefaultUnixSocket) + case "npipe": + return parseSimpleProtoAddr("npipe", addrParts[1], DefaultNamedPipe) + case "fd": + return addr, nil + default: + return "", fmt.Errorf("Invalid bind address format: %s", addr) + } +} + +// parseSimpleProtoAddr parses and validates that the specified address is a valid +// socket address for simple protocols like unix and npipe. It returns a formatted +// socket address, either using the address parsed from addr, or the contents of +// defaultAddr if addr is a blank string. +func parseSimpleProtoAddr(proto, addr, defaultAddr string) (string, error) { + addr = strings.TrimPrefix(addr, proto+"://") + if strings.Contains(addr, "://") { + return "", fmt.Errorf("Invalid proto, expected %s: %s", proto, addr) + } + if addr == "" { + addr = defaultAddr + } + return fmt.Sprintf("%s://%s", proto, addr), nil +} + +// ParseTCPAddr parses and validates that the specified address is a valid TCP +// address. It returns a formatted TCP address, either using the address parsed +// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string. +// tryAddr is expected to have already been Trim()'d +// defaultAddr must be in the full `tcp://host:port` form +func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) { + if tryAddr == "" || tryAddr == "tcp://" { + return defaultAddr, nil + } + addr := strings.TrimPrefix(tryAddr, "tcp://") + if strings.Contains(addr, "://") || addr == "" { + return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr) + } + + defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://") + defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr) + if err != nil { + return "", err + } + // url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but + // not 1.4. See https://github.com/golang/go/issues/12200 and + // https://github.com/golang/go/issues/6530. + if strings.HasSuffix(addr, "]:") { + addr += defaultPort + } + + u, err := url.Parse("tcp://" + addr) + if err != nil { + return "", err + } + host, port, err := net.SplitHostPort(u.Host) + if err != nil { + // try port addition once + host, port, err = net.SplitHostPort(net.JoinHostPort(u.Host, defaultPort)) + } + if err != nil { + return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) + } + + if host == "" { + host = defaultHost + } + if port == "" { + port = defaultPort + } + p, err := strconv.Atoi(port) + if err != nil && p == 0 { + return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) + } + + return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil +} + +// ValidateExtraHost validates that the specified string is a valid extrahost and returns it. +// ExtraHost is in the form of name:ip where the ip has to be a valid ip (IPv4 or IPv6). +func ValidateExtraHost(val string) (string, error) { + // allow for IPv6 addresses in extra hosts by only splitting on first ":" + arr := strings.SplitN(val, ":", 2) + if len(arr) != 2 || len(arr[0]) == 0 { + return "", fmt.Errorf("bad format for add-host: %q", val) + } + if _, err := ValidateIPAddress(arr[1]); err != nil { + return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1]) + } + return val, nil +} diff --git a/vendor/github.com/docker/docker/opts/hosts_unix.go b/vendor/github.com/docker/docker/opts/hosts_unix.go new file mode 100644 index 0000000000..9d5bb64565 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/hosts_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package opts // import "github.com/docker/docker/opts" + +import "fmt" + +// DefaultHost constant defines the default host string used by docker on other hosts than Windows +var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket) diff --git a/vendor/github.com/docker/docker/opts/hosts_windows.go b/vendor/github.com/docker/docker/opts/hosts_windows.go new file mode 100644 index 0000000000..906eba53ee --- /dev/null +++ b/vendor/github.com/docker/docker/opts/hosts_windows.go @@ -0,0 +1,4 @@ +package opts // import "github.com/docker/docker/opts" + +// DefaultHost constant defines the default host string used by docker on Windows +var DefaultHost = "npipe://" + DefaultNamedPipe diff --git a/vendor/github.com/docker/docker/opts/ip.go b/vendor/github.com/docker/docker/opts/ip.go new file mode 100644 index 0000000000..cfbff3a9fd --- /dev/null +++ b/vendor/github.com/docker/docker/opts/ip.go @@ -0,0 +1,47 @@ +package opts // import "github.com/docker/docker/opts" + +import ( + "fmt" + "net" +) + +// IPOpt holds an IP. It is used to store values from CLI flags. +type IPOpt struct { + *net.IP +} + +// NewIPOpt creates a new IPOpt from a reference net.IP and a +// string representation of an IP. If the string is not a valid +// IP it will fallback to the specified reference. +func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt { + o := &IPOpt{ + IP: ref, + } + o.Set(defaultVal) + return o +} + +// Set sets an IPv4 or IPv6 address from a given string. If the given +// string is not parsable as an IP address it returns an error. +func (o *IPOpt) Set(val string) error { + ip := net.ParseIP(val) + if ip == nil { + return fmt.Errorf("%s is not an ip address", val) + } + *o.IP = ip + return nil +} + +// String returns the IP address stored in the IPOpt. If stored IP is a +// nil pointer, it returns an empty string. +func (o *IPOpt) String() string { + if *o.IP == nil { + return "" + } + return o.IP.String() +} + +// Type returns the type of the option +func (o *IPOpt) Type() string { + return "ip" +} diff --git a/vendor/github.com/docker/docker/opts/opts.go b/vendor/github.com/docker/docker/opts/opts.go new file mode 100644 index 0000000000..bfdcb996b0 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/opts.go @@ -0,0 +1,337 @@ +package opts // import "github.com/docker/docker/opts" + +import ( + "fmt" + "net" + "path" + "regexp" + "strings" + + units "github.com/docker/go-units" +) + +var ( + alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) + domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) +) + +// ListOpts holds a list of values and a validation function. +type ListOpts struct { + values *[]string + validator ValidatorFctType +} + +// NewListOpts creates a new ListOpts with the specified validator. +func NewListOpts(validator ValidatorFctType) ListOpts { + var values []string + return *NewListOptsRef(&values, validator) +} + +// NewListOptsRef creates a new ListOpts with the specified values and validator. +func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts { + return &ListOpts{ + values: values, + validator: validator, + } +} + +func (opts *ListOpts) String() string { + if len(*opts.values) == 0 { + return "" + } + return fmt.Sprintf("%v", *opts.values) +} + +// Set validates if needed the input value and adds it to the +// internal slice. +func (opts *ListOpts) Set(value string) error { + if opts.validator != nil { + v, err := opts.validator(value) + if err != nil { + return err + } + value = v + } + (*opts.values) = append((*opts.values), value) + return nil +} + +// Delete removes the specified element from the slice. +func (opts *ListOpts) Delete(key string) { + for i, k := range *opts.values { + if k == key { + (*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...) + return + } + } +} + +// GetMap returns the content of values in a map in order to avoid +// duplicates. +func (opts *ListOpts) GetMap() map[string]struct{} { + ret := make(map[string]struct{}) + for _, k := range *opts.values { + ret[k] = struct{}{} + } + return ret +} + +// GetAll returns the values of slice. +func (opts *ListOpts) GetAll() []string { + return (*opts.values) +} + +// GetAllOrEmpty returns the values of the slice +// or an empty slice when there are no values. +func (opts *ListOpts) GetAllOrEmpty() []string { + v := *opts.values + if v == nil { + return make([]string, 0) + } + return v +} + +// Get checks the existence of the specified key. +func (opts *ListOpts) Get(key string) bool { + for _, k := range *opts.values { + if k == key { + return true + } + } + return false +} + +// Len returns the amount of element in the slice. +func (opts *ListOpts) Len() int { + return len((*opts.values)) +} + +// Type returns a string name for this Option type +func (opts *ListOpts) Type() string { + return "list" +} + +// WithValidator returns the ListOpts with validator set. +func (opts *ListOpts) WithValidator(validator ValidatorFctType) *ListOpts { + opts.validator = validator + return opts +} + +// NamedOption is an interface that list and map options +// with names implement. +type NamedOption interface { + Name() string +} + +// NamedListOpts is a ListOpts with a configuration name. +// This struct is useful to keep reference to the assigned +// field name in the internal configuration struct. +type NamedListOpts struct { + name string + ListOpts +} + +var _ NamedOption = &NamedListOpts{} + +// NewNamedListOptsRef creates a reference to a new NamedListOpts struct. +func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts { + return &NamedListOpts{ + name: name, + ListOpts: *NewListOptsRef(values, validator), + } +} + +// Name returns the name of the NamedListOpts in the configuration. +func (o *NamedListOpts) Name() string { + return o.name +} + +// MapOpts holds a map of values and a validation function. +type MapOpts struct { + values map[string]string + validator ValidatorFctType +} + +// Set validates if needed the input value and add it to the +// internal map, by splitting on '='. +func (opts *MapOpts) Set(value string) error { + if opts.validator != nil { + v, err := opts.validator(value) + if err != nil { + return err + } + value = v + } + vals := strings.SplitN(value, "=", 2) + if len(vals) == 1 { + (opts.values)[vals[0]] = "" + } else { + (opts.values)[vals[0]] = vals[1] + } + return nil +} + +// GetAll returns the values of MapOpts as a map. +func (opts *MapOpts) GetAll() map[string]string { + return opts.values +} + +func (opts *MapOpts) String() string { + return fmt.Sprintf("%v", opts.values) +} + +// Type returns a string name for this Option type +func (opts *MapOpts) Type() string { + return "map" +} + +// NewMapOpts creates a new MapOpts with the specified map of values and a validator. +func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts { + if values == nil { + values = make(map[string]string) + } + return &MapOpts{ + values: values, + validator: validator, + } +} + +// NamedMapOpts is a MapOpts struct with a configuration name. +// This struct is useful to keep reference to the assigned +// field name in the internal configuration struct. +type NamedMapOpts struct { + name string + MapOpts +} + +var _ NamedOption = &NamedMapOpts{} + +// NewNamedMapOpts creates a reference to a new NamedMapOpts struct. +func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts { + return &NamedMapOpts{ + name: name, + MapOpts: *NewMapOpts(values, validator), + } +} + +// Name returns the name of the NamedMapOpts in the configuration. +func (o *NamedMapOpts) Name() string { + return o.name +} + +// ValidatorFctType defines a validator function that returns a validated string and/or an error. +type ValidatorFctType func(val string) (string, error) + +// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error +type ValidatorFctListType func(val string) ([]string, error) + +// ValidateIPAddress validates an Ip address. +func ValidateIPAddress(val string) (string, error) { + var ip = net.ParseIP(strings.TrimSpace(val)) + if ip != nil { + return ip.String(), nil + } + return "", fmt.Errorf("%s is not an ip address", val) +} + +// ValidateDNSSearch validates domain for resolvconf search configuration. +// A zero length domain is represented by a dot (.). +func ValidateDNSSearch(val string) (string, error) { + if val = strings.Trim(val, " "); val == "." { + return val, nil + } + return validateDomain(val) +} + +func validateDomain(val string) (string, error) { + if alphaRegexp.FindString(val) == "" { + return "", fmt.Errorf("%s is not a valid domain", val) + } + ns := domainRegexp.FindSubmatch([]byte(val)) + if len(ns) > 0 && len(ns[1]) < 255 { + return string(ns[1]), nil + } + return "", fmt.Errorf("%s is not a valid domain", val) +} + +// ValidateLabel validates that the specified string is a valid label, and returns it. +// Labels are in the form on key=value. +func ValidateLabel(val string) (string, error) { + if strings.Count(val, "=") < 1 { + return "", fmt.Errorf("bad attribute format: %s", val) + } + return val, nil +} + +// ValidateSingleGenericResource validates that a single entry in the +// generic resource list is valid. +// i.e 'GPU=UID1' is valid however 'GPU:UID1' or 'UID1' isn't +func ValidateSingleGenericResource(val string) (string, error) { + if strings.Count(val, "=") < 1 { + return "", fmt.Errorf("invalid node-generic-resource format `%s` expected `name=value`", val) + } + return val, nil +} + +// ParseLink parses and validates the specified string as a link format (name:alias) +func ParseLink(val string) (string, string, error) { + if val == "" { + return "", "", fmt.Errorf("empty string specified for links") + } + arr := strings.Split(val, ":") + if len(arr) > 2 { + return "", "", fmt.Errorf("bad format for links: %s", val) + } + if len(arr) == 1 { + return val, val, nil + } + // This is kept because we can actually get a HostConfig with links + // from an already created container and the format is not `foo:bar` + // but `/foo:/c1/bar` + if strings.HasPrefix(arr[0], "/") { + _, alias := path.Split(arr[1]) + return arr[0][1:], alias, nil + } + return arr[0], arr[1], nil +} + +// MemBytes is a type for human readable memory bytes (like 128M, 2g, etc) +type MemBytes int64 + +// String returns the string format of the human readable memory bytes +func (m *MemBytes) String() string { + // NOTE: In spf13/pflag/flag.go, "0" is considered as "zero value" while "0 B" is not. + // We return "0" in case value is 0 here so that the default value is hidden. + // (Sometimes "default 0 B" is actually misleading) + if m.Value() != 0 { + return units.BytesSize(float64(m.Value())) + } + return "0" +} + +// Set sets the value of the MemBytes by passing a string +func (m *MemBytes) Set(value string) error { + val, err := units.RAMInBytes(value) + *m = MemBytes(val) + return err +} + +// Type returns the type +func (m *MemBytes) Type() string { + return "bytes" +} + +// Value returns the value in int64 +func (m *MemBytes) Value() int64 { + return int64(*m) +} + +// UnmarshalJSON is the customized unmarshaler for MemBytes +func (m *MemBytes) UnmarshalJSON(s []byte) error { + if len(s) <= 2 || s[0] != '"' || s[len(s)-1] != '"' { + return fmt.Errorf("invalid size: %q", s) + } + val, err := units.RAMInBytes(string(s[1 : len(s)-1])) + *m = MemBytes(val) + return err +} diff --git a/vendor/github.com/docker/docker/opts/opts_unix.go b/vendor/github.com/docker/docker/opts/opts_unix.go new file mode 100644 index 0000000000..0c32367cb2 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/opts_unix.go @@ -0,0 +1,6 @@ +// +build !windows + +package opts // import "github.com/docker/docker/opts" + +// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080 +const DefaultHTTPHost = "localhost" diff --git a/vendor/github.com/docker/docker/opts/opts_windows.go b/vendor/github.com/docker/docker/opts/opts_windows.go new file mode 100644 index 0000000000..0e1b6c6d18 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/opts_windows.go @@ -0,0 +1,56 @@ +package opts // import "github.com/docker/docker/opts" + +// TODO Windows. Identify bug in GOLang 1.5.1+ and/or Windows Server 2016 TP5. +// @jhowardmsft, @swernli. +// +// On Windows, this mitigates a problem with the default options of running +// a docker client against a local docker daemon on TP5. +// +// What was found that if the default host is "localhost", even if the client +// (and daemon as this is local) is not physically on a network, and the DNS +// cache is flushed (ipconfig /flushdns), then the client will pause for +// exactly one second when connecting to the daemon for calls. For example +// using docker run windowsservercore cmd, the CLI will send a create followed +// by an attach. You see the delay between the attach finishing and the attach +// being seen by the daemon. +// +// Here's some daemon debug logs with additional debug spew put in. The +// AfterWriteJSON log is the very last thing the daemon does as part of the +// create call. The POST /attach is the second CLI call. Notice the second +// time gap. +// +// time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs" +// time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig" +// time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...." +// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking.... +// time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...." +// time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...." +// time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func" +// time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create" +// time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2" +// time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate" +// time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON" +// ... 1 second gap here.... +// time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach" +// time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1" +// +// We suspect this is either a bug introduced in GOLang 1.5.1, or that a change +// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows. In theory, +// the Windows networking stack is supposed to resolve "localhost" internally, +// without hitting DNS, or even reading the hosts file (which is why localhost +// is commented out in the hosts file on Windows). +// +// We have validated that working around this using the actual IPv4 localhost +// address does not cause the delay. +// +// This does not occur with the docker client built with 1.4.3 on the same +// Windows build, regardless of whether the daemon is built using 1.5.1 +// or 1.4.3. It does not occur on Linux. We also verified we see the same thing +// on a cross-compiled Windows binary (from Linux). +// +// Final note: This is a mitigation, not a 'real' fix. It is still susceptible +// to the delay if a user were to do 'docker run -H=tcp://localhost:2375...' +// explicitly. + +// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080 +const DefaultHTTPHost = "127.0.0.1" diff --git a/vendor/github.com/docker/docker/opts/quotedstring.go b/vendor/github.com/docker/docker/opts/quotedstring.go new file mode 100644 index 0000000000..6c889070e8 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/quotedstring.go @@ -0,0 +1,37 @@ +package opts // import "github.com/docker/docker/opts" + +// QuotedString is a string that may have extra quotes around the value. The +// quotes are stripped from the value. +type QuotedString struct { + value *string +} + +// Set sets a new value +func (s *QuotedString) Set(val string) error { + *s.value = trimQuotes(val) + return nil +} + +// Type returns the type of the value +func (s *QuotedString) Type() string { + return "string" +} + +func (s *QuotedString) String() string { + return *s.value +} + +func trimQuotes(value string) string { + lastIndex := len(value) - 1 + for _, char := range []byte{'\'', '"'} { + if value[0] == char && value[lastIndex] == char { + return value[1:lastIndex] + } + } + return value +} + +// NewQuotedString returns a new quoted string option +func NewQuotedString(value *string) *QuotedString { + return &QuotedString{value: value} +} diff --git a/vendor/github.com/docker/docker/opts/runtime.go b/vendor/github.com/docker/docker/opts/runtime.go new file mode 100644 index 0000000000..4b9babf0a5 --- /dev/null +++ b/vendor/github.com/docker/docker/opts/runtime.go @@ -0,0 +1,79 @@ +package opts // import "github.com/docker/docker/opts" + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types" +) + +// RuntimeOpt defines a map of Runtimes +type RuntimeOpt struct { + name string + stockRuntimeName string + values *map[string]types.Runtime +} + +// NewNamedRuntimeOpt creates a new RuntimeOpt +func NewNamedRuntimeOpt(name string, ref *map[string]types.Runtime, stockRuntime string) *RuntimeOpt { + if ref == nil { + ref = &map[string]types.Runtime{} + } + return &RuntimeOpt{name: name, values: ref, stockRuntimeName: stockRuntime} +} + +// Name returns the name of the NamedListOpts in the configuration. +func (o *RuntimeOpt) Name() string { + return o.name +} + +// Set validates and updates the list of Runtimes +func (o *RuntimeOpt) Set(val string) error { + parts := strings.SplitN(val, "=", 2) + if len(parts) != 2 { + return fmt.Errorf("invalid runtime argument: %s", val) + } + + parts[0] = strings.TrimSpace(parts[0]) + parts[1] = strings.TrimSpace(parts[1]) + if parts[0] == "" || parts[1] == "" { + return fmt.Errorf("invalid runtime argument: %s", val) + } + + parts[0] = strings.ToLower(parts[0]) + if parts[0] == o.stockRuntimeName { + return fmt.Errorf("runtime name '%s' is reserved", o.stockRuntimeName) + } + + if _, ok := (*o.values)[parts[0]]; ok { + return fmt.Errorf("runtime '%s' was already defined", parts[0]) + } + + (*o.values)[parts[0]] = types.Runtime{Path: parts[1]} + + return nil +} + +// String returns Runtime values as a string. +func (o *RuntimeOpt) String() string { + var out []string + for k := range *o.values { + out = append(out, k) + } + + return fmt.Sprintf("%v", out) +} + +// GetMap returns a map of Runtimes (name: path) +func (o *RuntimeOpt) GetMap() map[string]types.Runtime { + if o.values != nil { + return *o.values + } + + return map[string]types.Runtime{} +} + +// Type returns the type of the option +func (o *RuntimeOpt) Type() string { + return "runtime" +} diff --git a/vendor/github.com/docker/docker/opts/ulimit.go b/vendor/github.com/docker/docker/opts/ulimit.go new file mode 100644 index 0000000000..0e2a36236c --- /dev/null +++ b/vendor/github.com/docker/docker/opts/ulimit.go @@ -0,0 +1,81 @@ +package opts // import "github.com/docker/docker/opts" + +import ( + "fmt" + + "github.com/docker/go-units" +) + +// UlimitOpt defines a map of Ulimits +type UlimitOpt struct { + values *map[string]*units.Ulimit +} + +// NewUlimitOpt creates a new UlimitOpt +func NewUlimitOpt(ref *map[string]*units.Ulimit) *UlimitOpt { + if ref == nil { + ref = &map[string]*units.Ulimit{} + } + return &UlimitOpt{ref} +} + +// Set validates a Ulimit and sets its name as a key in UlimitOpt +func (o *UlimitOpt) Set(val string) error { + l, err := units.ParseUlimit(val) + if err != nil { + return err + } + + (*o.values)[l.Name] = l + + return nil +} + +// String returns Ulimit values as a string. +func (o *UlimitOpt) String() string { + var out []string + for _, v := range *o.values { + out = append(out, v.String()) + } + + return fmt.Sprintf("%v", out) +} + +// GetList returns a slice of pointers to Ulimits. +func (o *UlimitOpt) GetList() []*units.Ulimit { + var ulimits []*units.Ulimit + for _, v := range *o.values { + ulimits = append(ulimits, v) + } + + return ulimits +} + +// Type returns the option type +func (o *UlimitOpt) Type() string { + return "ulimit" +} + +// NamedUlimitOpt defines a named map of Ulimits +type NamedUlimitOpt struct { + name string + UlimitOpt +} + +var _ NamedOption = &NamedUlimitOpt{} + +// NewNamedUlimitOpt creates a new NamedUlimitOpt +func NewNamedUlimitOpt(name string, ref *map[string]*units.Ulimit) *NamedUlimitOpt { + if ref == nil { + ref = &map[string]*units.Ulimit{} + } + return &NamedUlimitOpt{ + name: name, + UlimitOpt: *NewUlimitOpt(ref), + } +} + +// Name returns the option name +func (o *NamedUlimitOpt) Name() string { + return o.name +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go index e8f50869d9..5fb3995e9b 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive.go @@ -17,6 +17,7 @@ import ( "strconv" "strings" "syscall" + "time" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/idtools" @@ -360,6 +361,10 @@ func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, erro if err != nil { return nil, err } + hdr.Format = tar.FormatPAX + hdr.ModTime = hdr.ModTime.Truncate(time.Second) + hdr.AccessTime = time.Time{} + hdr.ChangeTime = time.Time{} hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) name, err = canonicalTarName(name, fi.IsDir()) if err != nil { @@ -1158,6 +1163,10 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { if err != nil { return err } + hdr.Format = tar.FormatPAX + hdr.ModTime = hdr.ModTime.Truncate(time.Second) + hdr.AccessTime = time.Time{} + hdr.ChangeTime = time.Time{} hdr.Name = filepath.Base(dst) hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) diff --git a/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered.go b/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered.go new file mode 100644 index 0000000000..6bb285123f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/broadcaster/unbuffered.go @@ -0,0 +1,49 @@ +package broadcaster // import "github.com/docker/docker/pkg/broadcaster" + +import ( + "io" + "sync" +) + +// Unbuffered accumulates multiple io.WriteCloser by stream. +type Unbuffered struct { + mu sync.Mutex + writers []io.WriteCloser +} + +// Add adds new io.WriteCloser. +func (w *Unbuffered) Add(writer io.WriteCloser) { + w.mu.Lock() + w.writers = append(w.writers, writer) + w.mu.Unlock() +} + +// Write writes bytes to all writers. Failed writers will be evicted during +// this call. +func (w *Unbuffered) Write(p []byte) (n int, err error) { + w.mu.Lock() + var evict []int + for i, sw := range w.writers { + if n, err := sw.Write(p); err != nil || n != len(p) { + // On error, evict the writer + evict = append(evict, i) + } + } + for n, i := range evict { + w.writers = append(w.writers[:i-n], w.writers[i-n+1:]...) + } + w.mu.Unlock() + return len(p), nil +} + +// Clean closes and removes all writers. Last non-eol-terminated part of data +// will be saved. +func (w *Unbuffered) Clean() error { + w.mu.Lock() + for _, sw := range w.writers { + sw.Close() + } + w.writers = nil + w.mu.Unlock() + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go new file mode 100644 index 0000000000..47c9a2b94c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go @@ -0,0 +1,73 @@ +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" +) + +// NewArchiver returns a new Archiver which uses chrootarchive.Untar +func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver { + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + return &archive.Archiver{ + Untar: Untar, + IDMappingsVar: idMappings, + } +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error { + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + if options == nil { + options = &archive.TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + rootIDs := idMappings.RootPair() + + dest = filepath.Clean(dest) + if _, err := os.Stat(dest); os.IsNotExist(err) { + if err := idtools.MkdirAllAndChownNew(dest, 0755, rootIDs); err != nil { + return err + } + } + + r := ioutil.NopCloser(tarArchive) + if decompress { + decompressedArchive, err := archive.DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return invokeUnpack(r, dest, options) +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go new file mode 100644 index 0000000000..5df8afd662 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go @@ -0,0 +1,88 @@ +// +build !windows + +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +// untar is the entry-point for docker-untar on re-exec. This is not used on +// Windows as it does not support chroot, hence no point sandboxing through +// chroot and rexec. +func untar() { + runtime.LockOSThread() + flag.Parse() + + var options *archive.TarOptions + + //read the options from the pipe "ExtraFiles" + if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil { + fatal(err) + } + + if err := chroot(flag.Arg(0)); err != nil { + fatal(err) + } + + if err := archive.Unpack(os.Stdin, "/", options); err != nil { + fatal(err) + } + // fully consume stdin in case it is zero padded + if _, err := flush(os.Stdin); err != nil { + fatal(err) + } + + os.Exit(0) +} + +func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error { + + // We can't pass a potentially large exclude list directly via cmd line + // because we easily overrun the kernel's max argument/environment size + // when the full image list is passed (e.g. when this is used by + // `docker load`). We will marshall the options via a pipe to the + // child + r, w, err := os.Pipe() + if err != nil { + return fmt.Errorf("Untar pipe failure: %v", err) + } + + cmd := reexec.Command("docker-untar", dest) + cmd.Stdin = decompressedArchive + + cmd.ExtraFiles = append(cmd.ExtraFiles, r) + output := bytes.NewBuffer(nil) + cmd.Stdout = output + cmd.Stderr = output + + if err := cmd.Start(); err != nil { + w.Close() + return fmt.Errorf("Untar error on re-exec cmd: %v", err) + } + //write the options to the pipe for the untar exec to read + if err := json.NewEncoder(w).Encode(options); err != nil { + w.Close() + return fmt.Errorf("Untar json encode to pipe failed: %v", err) + } + w.Close() + + if err := cmd.Wait(); err != nil { + // when `xz -d -c -q | docker-untar ...` failed on docker-untar side, + // we need to exhaust `xz`'s output, otherwise the `xz` side will be + // pending on write pipe forever + io.Copy(ioutil.Discard, decompressedArchive) + + return fmt.Errorf("Error processing tar file(%v): %s", err, output) + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go new file mode 100644 index 0000000000..f2973132a3 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go @@ -0,0 +1,22 @@ +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "io" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/longpath" +) + +// chroot is not supported by Windows +func chroot(path string) error { + return nil +} + +func invokeUnpack(decompressedArchive io.ReadCloser, + dest string, + options *archive.TarOptions) error { + // Windows is different to Linux here because Windows does not support + // chroot. Hence there is no point sandboxing a chrooted process to + // do the unpack. We call inline instead within the daemon process. + return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options) +} diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go similarity index 86% rename from vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go rename to vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go index e8bd22e36b..9802fad514 100644 --- a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go @@ -1,4 +1,4 @@ -package chrootarchive +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" import ( "fmt" @@ -6,7 +6,7 @@ import ( "os" "path/filepath" - "github.com/containers/storage/pkg/mount" + "github.com/docker/docker/pkg/mount" rsystem "github.com/opencontainers/runc/libcontainer/system" "golang.org/x/sys/unix" ) @@ -26,8 +26,13 @@ func chroot(path string) (err error) { return fmt.Errorf("Error creating mount namespace before pivot: %v", err) } - // make everything in new ns private - if err := mount.MakeRPrivate("/"); err != nil { + // Make everything in new ns slave. + // Don't use `private` here as this could race where the mountns gets a + // reference to a mount and an unmount from the host does not propagate, + // which could potentially cause transient errors for other operations, + // even though this should be relatively small window here `slave` should + // not cause any problems. + if err := mount.MakeRSlave("/"); err != nil { return err } diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go new file mode 100644 index 0000000000..9a1ee58754 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go @@ -0,0 +1,12 @@ +// +build !windows,!linux + +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import "golang.org/x/sys/unix" + +func chroot(path string) error { + if err := unix.Chroot(path); err != nil { + return err + } + return unix.Chdir("/") +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go new file mode 100644 index 0000000000..7712cc17c8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go @@ -0,0 +1,23 @@ +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "io" + + "github.com/docker/docker/pkg/archive" +) + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can only be +// uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer io.Reader) (size int64, err error) { + return applyLayerHandler(dest, layer, &archive.TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer io.Reader, options *archive.TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go new file mode 100644 index 0000000000..d96a09f8fa --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go @@ -0,0 +1,130 @@ +//+build !windows + +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/system" + rsystem "github.com/opencontainers/runc/libcontainer/system" +) + +type applyLayerResponse struct { + LayerSize int64 `json:"layerSize"` +} + +// applyLayer is the entry-point for docker-applylayer on re-exec. This is not +// used on Windows as it does not support chroot, hence no point sandboxing +// through chroot and rexec. +func applyLayer() { + + var ( + tmpDir string + err error + options *archive.TarOptions + ) + runtime.LockOSThread() + flag.Parse() + + inUserns := rsystem.RunningInUserNS() + if err := chroot(flag.Arg(0)); err != nil { + fatal(err) + } + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + defer system.Umask(oldmask) + if err != nil { + fatal(err) + } + + if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil { + fatal(err) + } + + if inUserns { + options.InUserNS = true + } + + if tmpDir, err = ioutil.TempDir("/", "temp-docker-extract"); err != nil { + fatal(err) + } + + os.Setenv("TMPDIR", tmpDir) + size, err := archive.UnpackLayer("/", os.Stdin, options) + os.RemoveAll(tmpDir) + if err != nil { + fatal(err) + } + + encoder := json.NewEncoder(os.Stdout) + if err := encoder.Encode(applyLayerResponse{size}); err != nil { + fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err)) + } + + if _, err := flush(os.Stdin); err != nil { + fatal(err) + } + + os.Exit(0) +} + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + if options == nil { + options = &archive.TarOptions{} + if rsystem.RunningInUserNS() { + options.InUserNS = true + } + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + data, err := json.Marshal(options) + if err != nil { + return 0, fmt.Errorf("ApplyLayer json encode: %v", err) + } + + cmd := reexec.Command("docker-applyLayer", dest) + cmd.Stdin = layer + cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data)) + + outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) + cmd.Stdout, cmd.Stderr = outBuf, errBuf + + if err = cmd.Run(); err != nil { + return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf) + } + + // Stdout should be a valid JSON struct representing an applyLayerResponse. + response := applyLayerResponse{} + decoder := json.NewDecoder(outBuf) + if err = decoder.Decode(&response); err != nil { + return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err) + } + + return response.LayerSize, nil +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go new file mode 100644 index 0000000000..8f3f3a4a8a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go @@ -0,0 +1,45 @@ +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/longpath" +) + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + + // Ensure it is a Windows-style volume path + dest = longpath.AddPrefix(dest) + + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + + tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-docker-extract") + if err != nil { + return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err) + } + + s, err := archive.UnpackLayer(dest, layer, nil) + os.RemoveAll(tmpDir) + if err != nil { + return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %s", layer, dest, err) + } + + return s, nil +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go new file mode 100644 index 0000000000..a15e4bb83c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go @@ -0,0 +1,28 @@ +// +build !windows + +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Register("docker-applyLayer", applyLayer) + reexec.Register("docker-untar", untar) +} + +func fatal(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} + +// flush consumes all the bytes from the reader discarding +// any errors +func flush(r io.Reader) (bytes int64, err error) { + return io.Copy(ioutil.Discard, r) +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go new file mode 100644 index 0000000000..15ed874e77 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go @@ -0,0 +1,4 @@ +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +func init() { +} diff --git a/vendor/github.com/docker/docker/pkg/containerfs/archiver.go b/vendor/github.com/docker/docker/pkg/containerfs/archiver.go new file mode 100644 index 0000000000..1fb7ff7bdc --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/containerfs/archiver.go @@ -0,0 +1,203 @@ +package containerfs // import "github.com/docker/docker/pkg/containerfs" + +import ( + "archive/tar" + "fmt" + "io" + "os" + "path/filepath" + "time" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +// TarFunc provides a function definition for a custom Tar function +type TarFunc func(string, *archive.TarOptions) (io.ReadCloser, error) + +// UntarFunc provides a function definition for a custom Untar function +type UntarFunc func(io.Reader, string, *archive.TarOptions) error + +// Archiver provides a similar implementation of the archive.Archiver package with the rootfs abstraction +type Archiver struct { + SrcDriver Driver + DstDriver Driver + Tar TarFunc + Untar UntarFunc + IDMappingsVar *idtools.IDMappings +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func (archiver *Archiver) TarUntar(src, dst string) error { + logrus.Debugf("TarUntar(%s %s)", src, dst) + tarArchive, err := archiver.Tar(src, &archive.TarOptions{Compression: archive.Uncompressed}) + if err != nil { + return err + } + defer tarArchive.Close() + options := &archive.TarOptions{ + UIDMaps: archiver.IDMappingsVar.UIDs(), + GIDMaps: archiver.IDMappingsVar.GIDs(), + } + return archiver.Untar(tarArchive, dst, options) +} + +// UntarPath untar a file from path to a destination, src is the source tar file path. +func (archiver *Archiver) UntarPath(src, dst string) error { + tarArchive, err := archiver.SrcDriver.Open(src) + if err != nil { + return err + } + defer tarArchive.Close() + options := &archive.TarOptions{ + UIDMaps: archiver.IDMappingsVar.UIDs(), + GIDMaps: archiver.IDMappingsVar.GIDs(), + } + return archiver.Untar(tarArchive, dst, options) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func (archiver *Archiver) CopyWithTar(src, dst string) error { + srcSt, err := archiver.SrcDriver.Stat(src) + if err != nil { + return err + } + if !srcSt.IsDir() { + return archiver.CopyFileWithTar(src, dst) + } + + // if this archiver is set up with ID mapping we need to create + // the new destination directory with the remapped root UID/GID pair + // as owner + rootIDs := archiver.IDMappingsVar.RootPair() + // Create dst, copy src's content into it + if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { + return err + } + logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) + return archiver.TarUntar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { + logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) + srcDriver := archiver.SrcDriver + dstDriver := archiver.DstDriver + + srcSt, err := srcDriver.Stat(src) + if err != nil { + return err + } + + if srcSt.IsDir() { + return fmt.Errorf("Can't copy a directory") + } + + // Clean up the trailing slash. This must be done in an operating + // system specific manner. + if dst[len(dst)-1] == dstDriver.Separator() { + dst = dstDriver.Join(dst, srcDriver.Base(src)) + } + + // The original call was system.MkdirAll, which is just + // os.MkdirAll on not-Windows and changed for Windows. + if dstDriver.OS() == "windows" { + // Now we are WCOW + if err := system.MkdirAll(filepath.Dir(dst), 0700, ""); err != nil { + return err + } + } else { + // We can just use the driver.MkdirAll function + if err := dstDriver.MkdirAll(dstDriver.Dir(dst), 0700); err != nil { + return err + } + } + + r, w := io.Pipe() + errC := make(chan error, 1) + + go func() { + defer close(errC) + errC <- func() error { + defer w.Close() + + srcF, err := srcDriver.Open(src) + if err != nil { + return err + } + defer srcF.Close() + + hdr, err := tar.FileInfoHeader(srcSt, "") + if err != nil { + return err + } + hdr.Format = tar.FormatPAX + hdr.ModTime = hdr.ModTime.Truncate(time.Second) + hdr.AccessTime = time.Time{} + hdr.ChangeTime = time.Time{} + hdr.Name = dstDriver.Base(dst) + if dstDriver.OS() == "windows" { + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + } else { + hdr.Mode = int64(os.FileMode(hdr.Mode)) + } + + if err := remapIDs(archiver.IDMappingsVar, hdr); err != nil { + return err + } + + tw := tar.NewWriter(w) + defer tw.Close() + if err := tw.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(tw, srcF); err != nil { + return err + } + return nil + }() + }() + defer func() { + if er := <-errC; err == nil && er != nil { + err = er + } + }() + + err = archiver.Untar(r, dstDriver.Dir(dst), nil) + if err != nil { + r.CloseWithError(err) + } + return err +} + +// IDMappings returns the IDMappings of the archiver. +func (archiver *Archiver) IDMappings() *idtools.IDMappings { + return archiver.IDMappingsVar +} + +func remapIDs(idMappings *idtools.IDMappings, hdr *tar.Header) error { + ids, err := idMappings.ToHost(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}) + hdr.Uid, hdr.Gid = ids.UID, ids.GID + return err +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + //perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.) + permPart := perm & os.ModePerm + noPermPart := perm &^ os.ModePerm + // Add the x bit: make everything +x from windows + permPart |= 0111 + permPart &= 0755 + + return noPermPart | permPart +} diff --git a/vendor/github.com/docker/docker/pkg/containerfs/containerfs.go b/vendor/github.com/docker/docker/pkg/containerfs/containerfs.go new file mode 100644 index 0000000000..7bb1d8c369 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/containerfs/containerfs.go @@ -0,0 +1,87 @@ +package containerfs // import "github.com/docker/docker/pkg/containerfs" + +import ( + "path/filepath" + "runtime" + + "github.com/containerd/continuity/driver" + "github.com/containerd/continuity/pathdriver" + "github.com/docker/docker/pkg/symlink" +) + +// ContainerFS is that represents a root file system +type ContainerFS interface { + // Path returns the path to the root. Note that this may not exist + // on the local system, so the continuity operations must be used + Path() string + + // ResolveScopedPath evaluates the given path scoped to the root. + // For example, if root=/a, and path=/b/c, then this function would return /a/b/c. + // If rawPath is true, then the function will not preform any modifications + // before path resolution. Otherwise, the function will clean the given path + // by making it an absolute path. + ResolveScopedPath(path string, rawPath bool) (string, error) + + Driver +} + +// Driver combines both continuity's Driver and PathDriver interfaces with a Platform +// field to determine the OS. +type Driver interface { + // OS returns the OS where the rootfs is located. Essentially, + // runtime.GOOS for everything aside from LCOW, which is "linux" + OS() string + + // Architecture returns the hardware architecture where the + // container is located. + Architecture() string + + // Driver & PathDriver provide methods to manipulate files & paths + driver.Driver + pathdriver.PathDriver +} + +// NewLocalContainerFS is a helper function to implement daemon's Mount interface +// when the graphdriver mount point is a local path on the machine. +func NewLocalContainerFS(path string) ContainerFS { + return &local{ + path: path, + Driver: driver.LocalDriver, + PathDriver: pathdriver.LocalPathDriver, + } +} + +// NewLocalDriver provides file and path drivers for a local file system. They are +// essentially a wrapper around the `os` and `filepath` functions. +func NewLocalDriver() Driver { + return &local{ + Driver: driver.LocalDriver, + PathDriver: pathdriver.LocalPathDriver, + } +} + +type local struct { + path string + driver.Driver + pathdriver.PathDriver +} + +func (l *local) Path() string { + return l.path +} + +func (l *local) ResolveScopedPath(path string, rawPath bool) (string, error) { + cleanedPath := path + if !rawPath { + cleanedPath = cleanScopedPath(path) + } + return symlink.FollowSymlinkInScope(filepath.Join(l.path, cleanedPath), l.path) +} + +func (l *local) OS() string { + return runtime.GOOS +} + +func (l *local) Architecture() string { + return runtime.GOARCH +} diff --git a/vendor/github.com/docker/docker/pkg/containerfs/containerfs_unix.go b/vendor/github.com/docker/docker/pkg/containerfs/containerfs_unix.go new file mode 100644 index 0000000000..6a99459517 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/containerfs/containerfs_unix.go @@ -0,0 +1,10 @@ +// +build !windows + +package containerfs // import "github.com/docker/docker/pkg/containerfs" + +import "path/filepath" + +// cleanScopedPath preappends a to combine with a mnt path. +func cleanScopedPath(path string) string { + return filepath.Join(string(filepath.Separator), path) +} diff --git a/vendor/github.com/docker/docker/pkg/containerfs/containerfs_windows.go b/vendor/github.com/docker/docker/pkg/containerfs/containerfs_windows.go new file mode 100644 index 0000000000..9fb7084628 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/containerfs/containerfs_windows.go @@ -0,0 +1,15 @@ +package containerfs // import "github.com/docker/docker/pkg/containerfs" + +import "path/filepath" + +// cleanScopedPath removes the C:\ syntax, and prepares to combine +// with a volume path +func cleanScopedPath(path string) string { + if len(path) >= 2 { + c := path[0] + if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') { + path = path[2:] + } + } + return filepath.Join(string(filepath.Separator), path) +} diff --git a/vendor/github.com/containers/storage/pkg/directory/directory.go b/vendor/github.com/docker/docker/pkg/directory/directory.go similarity index 88% rename from vendor/github.com/containers/storage/pkg/directory/directory.go rename to vendor/github.com/docker/docker/pkg/directory/directory.go index 1715ef45d9..51d4a6ea22 100644 --- a/vendor/github.com/containers/storage/pkg/directory/directory.go +++ b/vendor/github.com/docker/docker/pkg/directory/directory.go @@ -1,4 +1,4 @@ -package directory +package directory // import "github.com/docker/docker/pkg/directory" import ( "io/ioutil" diff --git a/vendor/github.com/docker/docker/pkg/directory/directory_unix.go b/vendor/github.com/docker/docker/pkg/directory/directory_unix.go new file mode 100644 index 0000000000..60e6dfd7ea --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/directory/directory_unix.go @@ -0,0 +1,54 @@ +// +build linux freebsd + +package directory // import "github.com/docker/docker/pkg/directory" + +import ( + "context" + "os" + "path/filepath" + "syscall" +) + +// Size walks a directory tree and returns its total size in bytes. +func Size(ctx context.Context, dir string) (size int64, err error) { + data := make(map[uint64]struct{}) + err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { + if err != nil { + // if dir does not exist, Size() returns the error. + // if dir/x disappeared while walking, Size() ignores dir/x. + if os.IsNotExist(err) && d != dir { + return nil + } + return err + } + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + // Ignore directory sizes + if fileInfo == nil { + return nil + } + + s := fileInfo.Size() + if fileInfo.IsDir() || s == 0 { + return nil + } + + // Check inode to handle hard links correctly + inode := fileInfo.Sys().(*syscall.Stat_t).Ino + // inode is not a uint64 on all platforms. Cast it to avoid issues. + if _, exists := data[inode]; exists { + return nil + } + // inode is not a uint64 on all platforms. Cast it to avoid issues. + data[inode] = struct{}{} + + size += s + + return nil + }) + return +} diff --git a/vendor/github.com/docker/docker/pkg/directory/directory_windows.go b/vendor/github.com/docker/docker/pkg/directory/directory_windows.go new file mode 100644 index 0000000000..f07f241880 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/directory/directory_windows.go @@ -0,0 +1,42 @@ +package directory // import "github.com/docker/docker/pkg/directory" + +import ( + "context" + "os" + "path/filepath" +) + +// Size walks a directory tree and returns its total size in bytes. +func Size(ctx context.Context, dir string) (size int64, err error) { + err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { + if err != nil { + // if dir does not exist, Size() returns the error. + // if dir/x disappeared while walking, Size() ignores dir/x. + if os.IsNotExist(err) && d != dir { + return nil + } + return err + } + + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + // Ignore directory sizes + if fileInfo == nil { + return nil + } + + s := fileInfo.Size() + if fileInfo.IsDir() || s == 0 { + return nil + } + + size += s + + return nil + }) + return +} diff --git a/vendor/github.com/docker/docker/pkg/discovery/backends.go b/vendor/github.com/docker/docker/pkg/discovery/backends.go new file mode 100644 index 0000000000..1d038285ad --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/discovery/backends.go @@ -0,0 +1,107 @@ +package discovery // import "github.com/docker/docker/pkg/discovery" + +import ( + "fmt" + "net" + "strings" + "time" + + "github.com/sirupsen/logrus" +) + +var ( + // Backends is a global map of discovery backends indexed by their + // associated scheme. + backends = make(map[string]Backend) +) + +// Register makes a discovery backend available by the provided scheme. +// If Register is called twice with the same scheme an error is returned. +func Register(scheme string, d Backend) error { + if _, exists := backends[scheme]; exists { + return fmt.Errorf("scheme already registered %s", scheme) + } + logrus.WithField("name", scheme).Debugf("Registering discovery service") + backends[scheme] = d + return nil +} + +func parse(rawurl string) (string, string) { + parts := strings.SplitN(rawurl, "://", 2) + + // nodes:port,node2:port => nodes://node1:port,node2:port + if len(parts) == 1 { + return "nodes", parts[0] + } + return parts[0], parts[1] +} + +// ParseAdvertise parses the --cluster-advertise daemon config which accepts +// : or : +func ParseAdvertise(advertise string) (string, error) { + var ( + iface *net.Interface + addrs []net.Addr + err error + ) + + addr, port, err := net.SplitHostPort(advertise) + + if err != nil { + return "", fmt.Errorf("invalid --cluster-advertise configuration: %s: %v", advertise, err) + } + + ip := net.ParseIP(addr) + // If it is a valid ip-address, use it as is + if ip != nil { + return advertise, nil + } + + // If advertise is a valid interface name, get the valid IPv4 address and use it to advertise + ifaceName := addr + iface, err = net.InterfaceByName(ifaceName) + if err != nil { + return "", fmt.Errorf("invalid cluster advertise IP address or interface name (%s) : %v", advertise, err) + } + + addrs, err = iface.Addrs() + if err != nil { + return "", fmt.Errorf("unable to get advertise IP address from interface (%s) : %v", advertise, err) + } + + if len(addrs) == 0 { + return "", fmt.Errorf("no available advertise IP address in interface (%s)", advertise) + } + + addr = "" + for _, a := range addrs { + ip, _, err := net.ParseCIDR(a.String()) + if err != nil { + return "", fmt.Errorf("error deriving advertise ip-address in interface (%s) : %v", advertise, err) + } + if ip.To4() == nil || ip.IsLoopback() { + continue + } + addr = ip.String() + break + } + if addr == "" { + return "", fmt.Errorf("could not find a valid ip-address in interface %s", advertise) + } + + addr = net.JoinHostPort(addr, port) + return addr, nil +} + +// New returns a new Discovery given a URL, heartbeat and ttl settings. +// Returns an error if the URL scheme is not supported. +func New(rawurl string, heartbeat time.Duration, ttl time.Duration, clusterOpts map[string]string) (Backend, error) { + scheme, uri := parse(rawurl) + if backend, exists := backends[scheme]; exists { + logrus.WithFields(logrus.Fields{"name": scheme, "uri": uri}).Debugf("Initializing discovery service") + err := backend.Initialize(uri, heartbeat, ttl, clusterOpts) + return backend, err + } + + return nil, ErrNotSupported +} diff --git a/vendor/github.com/docker/docker/pkg/discovery/discovery.go b/vendor/github.com/docker/docker/pkg/discovery/discovery.go new file mode 100644 index 0000000000..828c5ca488 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/discovery/discovery.go @@ -0,0 +1,35 @@ +package discovery // import "github.com/docker/docker/pkg/discovery" + +import ( + "errors" + "time" +) + +var ( + // ErrNotSupported is returned when a discovery service is not supported. + ErrNotSupported = errors.New("discovery service not supported") + + // ErrNotImplemented is returned when discovery feature is not implemented + // by discovery backend. + ErrNotImplemented = errors.New("not implemented in this discovery service") +) + +// Watcher provides watching over a cluster for nodes joining and leaving. +type Watcher interface { + // Watch the discovery for entry changes. + // Returns a channel that will receive changes or an error. + // Providing a non-nil stopCh can be used to stop watching. + Watch(stopCh <-chan struct{}) (<-chan Entries, <-chan error) +} + +// Backend is implemented by discovery backends which manage cluster entries. +type Backend interface { + // Watcher must be provided by every backend. + Watcher + + // Initialize the discovery with URIs, a heartbeat, a ttl and optional settings. + Initialize(string, time.Duration, time.Duration, map[string]string) error + + // Register to the discovery. + Register(string) error +} diff --git a/vendor/github.com/docker/docker/pkg/discovery/entry.go b/vendor/github.com/docker/docker/pkg/discovery/entry.go new file mode 100644 index 0000000000..be06c75787 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/discovery/entry.go @@ -0,0 +1,94 @@ +package discovery // import "github.com/docker/docker/pkg/discovery" + +import "net" + +// NewEntry creates a new entry. +func NewEntry(url string) (*Entry, error) { + host, port, err := net.SplitHostPort(url) + if err != nil { + return nil, err + } + return &Entry{host, port}, nil +} + +// An Entry represents a host. +type Entry struct { + Host string + Port string +} + +// Equals returns true if cmp contains the same data. +func (e *Entry) Equals(cmp *Entry) bool { + return e.Host == cmp.Host && e.Port == cmp.Port +} + +// String returns the string form of an entry. +func (e *Entry) String() string { + return net.JoinHostPort(e.Host, e.Port) +} + +// Entries is a list of *Entry with some helpers. +type Entries []*Entry + +// Equals returns true if cmp contains the same data. +func (e Entries) Equals(cmp Entries) bool { + // Check if the file has really changed. + if len(e) != len(cmp) { + return false + } + for i := range e { + if !e[i].Equals(cmp[i]) { + return false + } + } + return true +} + +// Contains returns true if the Entries contain a given Entry. +func (e Entries) Contains(entry *Entry) bool { + for _, curr := range e { + if curr.Equals(entry) { + return true + } + } + return false +} + +// Diff compares two entries and returns the added and removed entries. +func (e Entries) Diff(cmp Entries) (Entries, Entries) { + added := Entries{} + for _, entry := range cmp { + if !e.Contains(entry) { + added = append(added, entry) + } + } + + removed := Entries{} + for _, entry := range e { + if !cmp.Contains(entry) { + removed = append(removed, entry) + } + } + + return added, removed +} + +// CreateEntries returns an array of entries based on the given addresses. +func CreateEntries(addrs []string) (Entries, error) { + entries := Entries{} + if addrs == nil { + return entries, nil + } + + for _, addr := range addrs { + if len(addr) == 0 { + continue + } + entry, err := NewEntry(addr) + if err != nil { + return nil, err + } + entries = append(entries, entry) + } + return entries, nil +} diff --git a/vendor/github.com/docker/docker/pkg/discovery/generator.go b/vendor/github.com/docker/docker/pkg/discovery/generator.go new file mode 100644 index 0000000000..788015fe23 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/discovery/generator.go @@ -0,0 +1,35 @@ +package discovery // import "github.com/docker/docker/pkg/discovery" + +import ( + "fmt" + "regexp" + "strconv" +) + +// Generate takes care of IP generation +func Generate(pattern string) []string { + re, _ := regexp.Compile(`\[(.+):(.+)\]`) + submatch := re.FindStringSubmatch(pattern) + if submatch == nil { + return []string{pattern} + } + + from, err := strconv.Atoi(submatch[1]) + if err != nil { + return []string{pattern} + } + to, err := strconv.Atoi(submatch[2]) + if err != nil { + return []string{pattern} + } + + template := re.ReplaceAllString(pattern, "%d") + + var result []string + for val := from; val <= to; val++ { + entry := fmt.Sprintf(template, val) + result = append(result, entry) + } + + return result +} diff --git a/vendor/github.com/docker/docker/pkg/discovery/kv/kv.go b/vendor/github.com/docker/docker/pkg/discovery/kv/kv.go new file mode 100644 index 0000000000..30fe6714c8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/discovery/kv/kv.go @@ -0,0 +1,192 @@ +package kv // import "github.com/docker/docker/pkg/discovery/kv" + +import ( + "fmt" + "path" + "strings" + "time" + + "github.com/docker/docker/pkg/discovery" + "github.com/docker/go-connections/tlsconfig" + "github.com/docker/libkv" + "github.com/docker/libkv/store" + "github.com/docker/libkv/store/consul" + "github.com/docker/libkv/store/etcd" + "github.com/docker/libkv/store/zookeeper" + "github.com/sirupsen/logrus" +) + +const ( + defaultDiscoveryPath = "docker/nodes" +) + +// Discovery is exported +type Discovery struct { + backend store.Backend + store store.Store + heartbeat time.Duration + ttl time.Duration + prefix string + path string +} + +func init() { + Init() +} + +// Init is exported +func Init() { + // Register to libkv + zookeeper.Register() + consul.Register() + etcd.Register() + + // Register to internal discovery service + discovery.Register("zk", &Discovery{backend: store.ZK}) + discovery.Register("consul", &Discovery{backend: store.CONSUL}) + discovery.Register("etcd", &Discovery{backend: store.ETCD}) +} + +// Initialize is exported +func (s *Discovery) Initialize(uris string, heartbeat time.Duration, ttl time.Duration, clusterOpts map[string]string) error { + var ( + parts = strings.SplitN(uris, "/", 2) + addrs = strings.Split(parts[0], ",") + err error + ) + + // A custom prefix to the path can be optionally used. + if len(parts) == 2 { + s.prefix = parts[1] + } + + s.heartbeat = heartbeat + s.ttl = ttl + + // Use a custom path if specified in discovery options + dpath := defaultDiscoveryPath + if clusterOpts["kv.path"] != "" { + dpath = clusterOpts["kv.path"] + } + + s.path = path.Join(s.prefix, dpath) + + var config *store.Config + if clusterOpts["kv.cacertfile"] != "" && clusterOpts["kv.certfile"] != "" && clusterOpts["kv.keyfile"] != "" { + logrus.Info("Initializing discovery with TLS") + tlsConfig, err := tlsconfig.Client(tlsconfig.Options{ + CAFile: clusterOpts["kv.cacertfile"], + CertFile: clusterOpts["kv.certfile"], + KeyFile: clusterOpts["kv.keyfile"], + }) + if err != nil { + return err + } + config = &store.Config{ + // Set ClientTLS to trigger https (bug in libkv/etcd) + ClientTLS: &store.ClientTLSConfig{ + CACertFile: clusterOpts["kv.cacertfile"], + CertFile: clusterOpts["kv.certfile"], + KeyFile: clusterOpts["kv.keyfile"], + }, + // The actual TLS config that will be used + TLS: tlsConfig, + } + } else { + logrus.Info("Initializing discovery without TLS") + } + + // Creates a new store, will ignore options given + // if not supported by the chosen store + s.store, err = libkv.NewStore(s.backend, addrs, config) + return err +} + +// Watch the store until either there's a store error or we receive a stop request. +// Returns false if we shouldn't attempt watching the store anymore (stop request received). +func (s *Discovery) watchOnce(stopCh <-chan struct{}, watchCh <-chan []*store.KVPair, discoveryCh chan discovery.Entries, errCh chan error) bool { + for { + select { + case pairs := <-watchCh: + if pairs == nil { + return true + } + + logrus.WithField("discovery", s.backend).Debugf("Watch triggered with %d nodes", len(pairs)) + + // Convert `KVPair` into `discovery.Entry`. + addrs := make([]string, len(pairs)) + for _, pair := range pairs { + addrs = append(addrs, string(pair.Value)) + } + + entries, err := discovery.CreateEntries(addrs) + if err != nil { + errCh <- err + } else { + discoveryCh <- entries + } + case <-stopCh: + // We were requested to stop watching. + return false + } + } +} + +// Watch is exported +func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { + ch := make(chan discovery.Entries) + errCh := make(chan error) + + go func() { + defer close(ch) + defer close(errCh) + + // Forever: Create a store watch, watch until we get an error and then try again. + // Will only stop if we receive a stopCh request. + for { + // Create the path to watch if it does not exist yet + exists, err := s.store.Exists(s.path) + if err != nil { + errCh <- err + } + if !exists { + if err := s.store.Put(s.path, []byte(""), &store.WriteOptions{IsDir: true}); err != nil { + errCh <- err + } + } + + // Set up a watch. + watchCh, err := s.store.WatchTree(s.path, stopCh) + if err != nil { + errCh <- err + } else { + if !s.watchOnce(stopCh, watchCh, ch, errCh) { + return + } + } + + // If we get here it means the store watch channel was closed. This + // is unexpected so let's retry later. + errCh <- fmt.Errorf("Unexpected watch error") + time.Sleep(s.heartbeat) + } + }() + return ch, errCh +} + +// Register is exported +func (s *Discovery) Register(addr string) error { + opts := &store.WriteOptions{TTL: s.ttl} + return s.store.Put(path.Join(s.path, addr), []byte(addr), opts) +} + +// Store returns the underlying store used by KV discovery. +func (s *Discovery) Store() store.Store { + return s.store +} + +// Prefix returns the store prefix +func (s *Discovery) Prefix() string { + return s.prefix +} diff --git a/vendor/github.com/docker/docker/pkg/filenotify/filenotify.go b/vendor/github.com/docker/docker/pkg/filenotify/filenotify.go new file mode 100644 index 0000000000..8b6cb56f17 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/filenotify/filenotify.go @@ -0,0 +1,40 @@ +// Package filenotify provides a mechanism for watching file(s) for changes. +// Generally leans on fsnotify, but provides a poll-based notifier which fsnotify does not support. +// These are wrapped up in a common interface so that either can be used interchangeably in your code. +package filenotify // import "github.com/docker/docker/pkg/filenotify" + +import "github.com/fsnotify/fsnotify" + +// FileWatcher is an interface for implementing file notification watchers +type FileWatcher interface { + Events() <-chan fsnotify.Event + Errors() <-chan error + Add(name string) error + Remove(name string) error + Close() error +} + +// New tries to use an fs-event watcher, and falls back to the poller if there is an error +func New() (FileWatcher, error) { + if watcher, err := NewEventWatcher(); err == nil { + return watcher, nil + } + return NewPollingWatcher(), nil +} + +// NewPollingWatcher returns a poll-based file watcher +func NewPollingWatcher() FileWatcher { + return &filePoller{ + events: make(chan fsnotify.Event), + errors: make(chan error), + } +} + +// NewEventWatcher returns an fs-event based file watcher +func NewEventWatcher() (FileWatcher, error) { + watcher, err := fsnotify.NewWatcher() + if err != nil { + return nil, err + } + return &fsNotifyWatcher{watcher}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go b/vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go new file mode 100644 index 0000000000..5a737d6530 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/filenotify/fsnotify.go @@ -0,0 +1,18 @@ +package filenotify // import "github.com/docker/docker/pkg/filenotify" + +import "github.com/fsnotify/fsnotify" + +// fsNotifyWatcher wraps the fsnotify package to satisfy the FileNotifier interface +type fsNotifyWatcher struct { + *fsnotify.Watcher +} + +// Events returns the fsnotify event channel receiver +func (w *fsNotifyWatcher) Events() <-chan fsnotify.Event { + return w.Watcher.Events +} + +// Errors returns the fsnotify error channel receiver +func (w *fsNotifyWatcher) Errors() <-chan error { + return w.Watcher.Errors +} diff --git a/vendor/github.com/docker/docker/pkg/filenotify/poller.go b/vendor/github.com/docker/docker/pkg/filenotify/poller.go new file mode 100644 index 0000000000..22f1897034 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/filenotify/poller.go @@ -0,0 +1,204 @@ +package filenotify // import "github.com/docker/docker/pkg/filenotify" + +import ( + "errors" + "fmt" + "os" + "sync" + "time" + + "github.com/sirupsen/logrus" + + "github.com/fsnotify/fsnotify" +) + +var ( + // errPollerClosed is returned when the poller is closed + errPollerClosed = errors.New("poller is closed") + // errNoSuchWatch is returned when trying to remove a watch that doesn't exist + errNoSuchWatch = errors.New("watch does not exist") +) + +// watchWaitTime is the time to wait between file poll loops +const watchWaitTime = 200 * time.Millisecond + +// filePoller is used to poll files for changes, especially in cases where fsnotify +// can't be run (e.g. when inotify handles are exhausted) +// filePoller satisfies the FileWatcher interface +type filePoller struct { + // watches is the list of files currently being polled, close the associated channel to stop the watch + watches map[string]chan struct{} + // events is the channel to listen to for watch events + events chan fsnotify.Event + // errors is the channel to listen to for watch errors + errors chan error + // mu locks the poller for modification + mu sync.Mutex + // closed is used to specify when the poller has already closed + closed bool +} + +// Add adds a filename to the list of watches +// once added the file is polled for changes in a separate goroutine +func (w *filePoller) Add(name string) error { + w.mu.Lock() + defer w.mu.Unlock() + + if w.closed { + return errPollerClosed + } + + f, err := os.Open(name) + if err != nil { + return err + } + fi, err := os.Stat(name) + if err != nil { + return err + } + + if w.watches == nil { + w.watches = make(map[string]chan struct{}) + } + if _, exists := w.watches[name]; exists { + return fmt.Errorf("watch exists") + } + chClose := make(chan struct{}) + w.watches[name] = chClose + + go w.watch(f, fi, chClose) + return nil +} + +// Remove stops and removes watch with the specified name +func (w *filePoller) Remove(name string) error { + w.mu.Lock() + defer w.mu.Unlock() + return w.remove(name) +} + +func (w *filePoller) remove(name string) error { + if w.closed { + return errPollerClosed + } + + chClose, exists := w.watches[name] + if !exists { + return errNoSuchWatch + } + close(chClose) + delete(w.watches, name) + return nil +} + +// Events returns the event channel +// This is used for notifications on events about watched files +func (w *filePoller) Events() <-chan fsnotify.Event { + return w.events +} + +// Errors returns the errors channel +// This is used for notifications about errors on watched files +func (w *filePoller) Errors() <-chan error { + return w.errors +} + +// Close closes the poller +// All watches are stopped, removed, and the poller cannot be added to +func (w *filePoller) Close() error { + w.mu.Lock() + defer w.mu.Unlock() + + if w.closed { + return nil + } + + w.closed = true + for name := range w.watches { + w.remove(name) + delete(w.watches, name) + } + return nil +} + +// sendEvent publishes the specified event to the events channel +func (w *filePoller) sendEvent(e fsnotify.Event, chClose <-chan struct{}) error { + select { + case w.events <- e: + case <-chClose: + return fmt.Errorf("closed") + } + return nil +} + +// sendErr publishes the specified error to the errors channel +func (w *filePoller) sendErr(e error, chClose <-chan struct{}) error { + select { + case w.errors <- e: + case <-chClose: + return fmt.Errorf("closed") + } + return nil +} + +// watch is responsible for polling the specified file for changes +// upon finding changes to a file or errors, sendEvent/sendErr is called +func (w *filePoller) watch(f *os.File, lastFi os.FileInfo, chClose chan struct{}) { + defer f.Close() + for { + time.Sleep(watchWaitTime) + select { + case <-chClose: + logrus.Debugf("watch for %s closed", f.Name()) + return + default: + } + + fi, err := os.Stat(f.Name()) + if err != nil { + // if we got an error here and lastFi is not set, we can presume that nothing has changed + // This should be safe since before `watch()` is called, a stat is performed, there is any error `watch` is not called + if lastFi == nil { + continue + } + // If it doesn't exist at this point, it must have been removed + // no need to send the error here since this is a valid operation + if os.IsNotExist(err) { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Remove, Name: f.Name()}, chClose); err != nil { + return + } + lastFi = nil + continue + } + // at this point, send the error + if err := w.sendErr(err, chClose); err != nil { + return + } + continue + } + + if lastFi == nil { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Create, Name: fi.Name()}, chClose); err != nil { + return + } + lastFi = fi + continue + } + + if fi.Mode() != lastFi.Mode() { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Chmod, Name: fi.Name()}, chClose); err != nil { + return + } + lastFi = fi + continue + } + + if fi.ModTime() != lastFi.ModTime() || fi.Size() != lastFi.Size() { + if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Write, Name: fi.Name()}, chClose); err != nil { + return + } + lastFi = fi + continue + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go deleted file mode 100644 index ee15ed52b1..0000000000 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go +++ /dev/null @@ -1,21 +0,0 @@ -package homedir // import "github.com/docker/docker/pkg/homedir" - -import ( - "os" - - "github.com/docker/docker/pkg/idtools" -) - -// GetStatic returns the home directory for the current user without calling -// os/user.Current(). This is useful for static-linked binary on glibc-based -// system, because a call to os/user.Current() in a static binary leads to -// segfault due to a glibc issue that won't be fixed in a short term. -// (#29344, golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341) -func GetStatic() (string, error) { - uid := os.Getuid() - usr, err := idtools.LookupUID(uid) - if err != nil { - return "", err - } - return usr.Home, nil -} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go deleted file mode 100644 index 75ada2fe54..0000000000 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !linux - -package homedir // import "github.com/docker/docker/pkg/homedir" - -import ( - "errors" -) - -// GetStatic is not needed for non-linux systems. -// (Precisely, it is needed only for glibc-based linux systems.) -func GetStatic() (string, error) { - return "", errors.New("homedir.GetStatic() is not supported on this system") -} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go deleted file mode 100644 index d85e124488..0000000000 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go +++ /dev/null @@ -1,34 +0,0 @@ -// +build !windows - -package homedir // import "github.com/docker/docker/pkg/homedir" - -import ( - "os" - - "github.com/opencontainers/runc/libcontainer/user" -) - -// Key returns the env var name for the user's home dir based on -// the platform being run on -func Key() string { - return "HOME" -} - -// Get returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -func Get() string { - home := os.Getenv(Key()) - if home == "" { - if u, err := user.CurrentUser(); err == nil { - return u.Home - } - } - return home -} - -// GetShortcutString returns the string that is shortcut to user's home directory -// in the native shell of the platform running on. -func GetShortcutString() string { - return "~" -} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go deleted file mode 100644 index 2f81813b28..0000000000 --- a/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go +++ /dev/null @@ -1,24 +0,0 @@ -package homedir // import "github.com/docker/docker/pkg/homedir" - -import ( - "os" -) - -// Key returns the env var name for the user's home dir based on -// the platform being run on -func Key() string { - return "USERPROFILE" -} - -// Get returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -func Get() string { - return os.Getenv(Key()) -} - -// GetShortcutString returns the string that is shortcut to user's home directory -// in the native shell of the platform running on. -func GetShortcutString() string { - return "%USERPROFILE%" // be careful while using in format functions -} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/docker/docker/pkg/ioutils/readers.go index 72f7f2319f..1f657bd3dc 100644 --- a/vendor/github.com/docker/docker/pkg/ioutils/readers.go +++ b/vendor/github.com/docker/docker/pkg/ioutils/readers.go @@ -1,11 +1,10 @@ package ioutils // import "github.com/docker/docker/pkg/ioutils" import ( + "context" "crypto/sha256" "encoding/hex" "io" - - "golang.org/x/net/context" ) // ReadCloserWrapper wraps an io.Reader, and implements an io.ReadCloser diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go new file mode 100644 index 0000000000..b9f40d3ef1 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go @@ -0,0 +1,335 @@ +package jsonmessage // import "github.com/docker/docker/pkg/jsonmessage" + +import ( + "encoding/json" + "fmt" + "io" + "os" + "strings" + "time" + + gotty "github.com/Nvveen/Gotty" + "github.com/docker/docker/pkg/term" + units "github.com/docker/go-units" +) + +// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to +// ensure the formatted time isalways the same number of characters. +const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" + +// JSONError wraps a concrete Code and Message, `Code` is +// is an integer error code, `Message` is the error message. +type JSONError struct { + Code int `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (e *JSONError) Error() string { + return e.Message +} + +// JSONProgress describes a Progress. terminalFd is the fd of the current terminal, +// Start is the initial value for the operation. Current is the current status and +// value of the progress made towards Total. Total is the end value describing when +// we made 100% progress for an operation. +type JSONProgress struct { + terminalFd uintptr + Current int64 `json:"current,omitempty"` + Total int64 `json:"total,omitempty"` + Start int64 `json:"start,omitempty"` + // If true, don't show xB/yB + HideCounts bool `json:"hidecounts,omitempty"` + Units string `json:"units,omitempty"` + nowFunc func() time.Time + winSize int +} + +func (p *JSONProgress) String() string { + var ( + width = p.width() + pbBox string + numbersBox string + timeLeftBox string + ) + if p.Current <= 0 && p.Total <= 0 { + return "" + } + if p.Total <= 0 { + switch p.Units { + case "": + current := units.HumanSize(float64(p.Current)) + return fmt.Sprintf("%8v", current) + default: + return fmt.Sprintf("%d %s", p.Current, p.Units) + } + } + + percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 + if percentage > 50 { + percentage = 50 + } + if width > 110 { + // this number can't be negative gh#7136 + numSpaces := 0 + if 50-percentage > 0 { + numSpaces = 50 - percentage + } + pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) + } + + switch { + case p.HideCounts: + case p.Units == "": // no units, use bytes + current := units.HumanSize(float64(p.Current)) + total := units.HumanSize(float64(p.Total)) + + numbersBox = fmt.Sprintf("%8v/%v", current, total) + + if p.Current > p.Total { + // remove total display if the reported current is wonky. + numbersBox = fmt.Sprintf("%8v", current) + } + default: + numbersBox = fmt.Sprintf("%d/%d %s", p.Current, p.Total, p.Units) + + if p.Current > p.Total { + // remove total display if the reported current is wonky. + numbersBox = fmt.Sprintf("%d %s", p.Current, p.Units) + } + } + + if p.Current > 0 && p.Start > 0 && percentage < 50 { + fromStart := p.now().Sub(time.Unix(p.Start, 0)) + perEntry := fromStart / time.Duration(p.Current) + left := time.Duration(p.Total-p.Current) * perEntry + left = (left / time.Second) * time.Second + + if width > 50 { + timeLeftBox = " " + left.String() + } + } + return pbBox + numbersBox + timeLeftBox +} + +// shim for testing +func (p *JSONProgress) now() time.Time { + if p.nowFunc == nil { + p.nowFunc = func() time.Time { + return time.Now().UTC() + } + } + return p.nowFunc() +} + +// shim for testing +func (p *JSONProgress) width() int { + if p.winSize != 0 { + return p.winSize + } + ws, err := term.GetWinsize(p.terminalFd) + if err == nil { + return int(ws.Width) + } + return 200 +} + +// JSONMessage defines a message struct. It describes +// the created time, where it from, status, ID of the +// message. It's used for docker events. +type JSONMessage struct { + Stream string `json:"stream,omitempty"` + Status string `json:"status,omitempty"` + Progress *JSONProgress `json:"progressDetail,omitempty"` + ProgressMessage string `json:"progress,omitempty"` //deprecated + ID string `json:"id,omitempty"` + From string `json:"from,omitempty"` + Time int64 `json:"time,omitempty"` + TimeNano int64 `json:"timeNano,omitempty"` + Error *JSONError `json:"errorDetail,omitempty"` + ErrorMessage string `json:"error,omitempty"` //deprecated + // Aux contains out-of-band data, such as digests for push signing and image id after building. + Aux *json.RawMessage `json:"aux,omitempty"` +} + +/* Satisfied by gotty.TermInfo as well as noTermInfo from below */ +type termInfo interface { + Parse(attr string, params ...interface{}) (string, error) +} + +type noTermInfo struct{} // canary used when no terminfo. + +func (ti *noTermInfo) Parse(attr string, params ...interface{}) (string, error) { + return "", fmt.Errorf("noTermInfo") +} + +func clearLine(out io.Writer, ti termInfo) { + // el2 (clear whole line) is not exposed by terminfo. + + // First clear line from beginning to cursor + if attr, err := ti.Parse("el1"); err == nil { + fmt.Fprintf(out, "%s", attr) + } else { + fmt.Fprintf(out, "\x1b[1K") + } + // Then clear line from cursor to end + if attr, err := ti.Parse("el"); err == nil { + fmt.Fprintf(out, "%s", attr) + } else { + fmt.Fprintf(out, "\x1b[K") + } +} + +func cursorUp(out io.Writer, ti termInfo, l int) { + if l == 0 { // Should never be the case, but be tolerant + return + } + if attr, err := ti.Parse("cuu", l); err == nil { + fmt.Fprintf(out, "%s", attr) + } else { + fmt.Fprintf(out, "\x1b[%dA", l) + } +} + +func cursorDown(out io.Writer, ti termInfo, l int) { + if l == 0 { // Should never be the case, but be tolerant + return + } + if attr, err := ti.Parse("cud", l); err == nil { + fmt.Fprintf(out, "%s", attr) + } else { + fmt.Fprintf(out, "\x1b[%dB", l) + } +} + +// Display displays the JSONMessage to `out`. `termInfo` is non-nil if `out` +// is a terminal. If this is the case, it will erase the entire current line +// when displaying the progressbar. +func (jm *JSONMessage) Display(out io.Writer, termInfo termInfo) error { + if jm.Error != nil { + if jm.Error.Code == 401 { + return fmt.Errorf("authentication is required") + } + return jm.Error + } + var endl string + if termInfo != nil && jm.Stream == "" && jm.Progress != nil { + clearLine(out, termInfo) + endl = "\r" + fmt.Fprintf(out, endl) + } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal + return nil + } + if jm.TimeNano != 0 { + fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(RFC3339NanoFixed)) + } else if jm.Time != 0 { + fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(RFC3339NanoFixed)) + } + if jm.ID != "" { + fmt.Fprintf(out, "%s: ", jm.ID) + } + if jm.From != "" { + fmt.Fprintf(out, "(from %s) ", jm.From) + } + if jm.Progress != nil && termInfo != nil { + fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) + } else if jm.ProgressMessage != "" { //deprecated + fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) + } else if jm.Stream != "" { + fmt.Fprintf(out, "%s%s", jm.Stream, endl) + } else { + fmt.Fprintf(out, "%s%s\n", jm.Status, endl) + } + return nil +} + +// DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal` +// describes if `out` is a terminal. If this is the case, it will print `\n` at the end of +// each line and move the cursor while displaying. +func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(*json.RawMessage)) error { + var ( + dec = json.NewDecoder(in) + ids = make(map[string]int) + ) + + var termInfo termInfo + + if isTerminal { + term := os.Getenv("TERM") + if term == "" { + term = "vt102" + } + + var err error + if termInfo, err = gotty.OpenTermInfo(term); err != nil { + termInfo = &noTermInfo{} + } + } + + for { + diff := 0 + var jm JSONMessage + if err := dec.Decode(&jm); err != nil { + if err == io.EOF { + break + } + return err + } + + if jm.Aux != nil { + if auxCallback != nil { + auxCallback(jm.Aux) + } + continue + } + + if jm.Progress != nil { + jm.Progress.terminalFd = terminalFd + } + if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { + line, ok := ids[jm.ID] + if !ok { + // NOTE: This approach of using len(id) to + // figure out the number of lines of history + // only works as long as we clear the history + // when we output something that's not + // accounted for in the map, such as a line + // with no ID. + line = len(ids) + ids[jm.ID] = line + if termInfo != nil { + fmt.Fprintf(out, "\n") + } + } + diff = len(ids) - line + if termInfo != nil { + cursorUp(out, termInfo, diff) + } + } else { + // When outputting something that isn't progress + // output, clear the history of previous lines. We + // don't want progress entries from some previous + // operation to be updated (for example, pull -a + // with multiple tags). + ids = make(map[string]int) + } + err := jm.Display(out, termInfo) + if jm.ID != "" && termInfo != nil { + cursorDown(out, termInfo, diff) + } + if err != nil { + return err + } + } + return nil +} + +type stream interface { + io.Writer + FD() uintptr + IsTerminal() bool +} + +// DisplayJSONMessagesToStream prints json messages to the output stream +func DisplayJSONMessagesToStream(in io.Reader, stream stream, auxCallback func(*json.RawMessage)) error { + return DisplayJSONMessagesStream(in, stream, stream.FD(), stream.IsTerminal(), auxCallback) +} diff --git a/vendor/github.com/containers/storage/pkg/locker/locker.go b/vendor/github.com/docker/docker/pkg/locker/locker.go similarity index 97% rename from vendor/github.com/containers/storage/pkg/locker/locker.go rename to vendor/github.com/docker/docker/pkg/locker/locker.go index 0b22ddfab8..dbd47fc465 100644 --- a/vendor/github.com/containers/storage/pkg/locker/locker.go +++ b/vendor/github.com/docker/docker/pkg/locker/locker.go @@ -11,7 +11,7 @@ created. Lock references are automatically cleaned up on `Unlock` if nothing else is waiting for the lock. */ -package locker +package locker // import "github.com/docker/docker/pkg/locker" import ( "errors" diff --git a/vendor/github.com/docker/docker/pkg/mount/mount.go b/vendor/github.com/docker/docker/pkg/mount/mount.go index 8ff4925d73..874aff6545 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mount.go +++ b/vendor/github.com/docker/docker/pkg/mount/mount.go @@ -3,32 +3,64 @@ package mount // import "github.com/docker/docker/pkg/mount" import ( "sort" "strings" - "syscall" "github.com/sirupsen/logrus" ) -// GetMounts retrieves a list of mounts for the current running process. -func GetMounts() ([]*Info, error) { - return parseMountTable() +// FilterFunc is a type defining a callback function +// to filter out unwanted entries. It takes a pointer +// to an Info struct (not fully populated, currently +// only Mountpoint is filled in), and returns two booleans: +// - skip: true if the entry should be skipped +// - stop: true if parsing should be stopped after the entry +type FilterFunc func(*Info) (skip, stop bool) + +// PrefixFilter discards all entries whose mount points +// do not start with a prefix specified +func PrefixFilter(prefix string) FilterFunc { + return func(m *Info) (bool, bool) { + skip := !strings.HasPrefix(m.Mountpoint, prefix) + return skip, false + } +} + +// SingleEntryFilter looks for a specific entry +func SingleEntryFilter(mp string) FilterFunc { + return func(m *Info) (bool, bool) { + if m.Mountpoint == mp { + return false, true // don't skip, stop now + } + return true, false // skip, keep going + } +} + +// ParentsFilter returns all entries whose mount points +// can be parents of a path specified, discarding others. +// For example, given `/var/lib/docker/something`, entries +// like `/var/lib/docker`, `/var` and `/` are returned. +func ParentsFilter(path string) FilterFunc { + return func(m *Info) (bool, bool) { + skip := !strings.HasPrefix(path, m.Mountpoint) + return skip, false + } +} + +// GetMounts retrieves a list of mounts for the current running process, +// with an optional filter applied (use nil for no filter). +func GetMounts(f FilterFunc) ([]*Info, error) { + return parseMountTable(f) } // Mounted determines if a specified mountpoint has been mounted. // On Linux it looks at /proc/self/mountinfo. func Mounted(mountpoint string) (bool, error) { - entries, err := parseMountTable() + entries, err := GetMounts(SingleEntryFilter(mountpoint)) if err != nil { return false, err } - // Search the table for the mountpoint - for _, e := range entries { - if e.Mountpoint == mountpoint { - return true, nil - } - } - return false, nil + return len(entries) > 0, nil } // Mount will mount filesystem according to the specified configuration, on the @@ -57,27 +89,28 @@ func ForceMount(device, target, mType, options string) error { // Unmount lazily unmounts a filesystem on supported platforms, otherwise // does a normal unmount. func Unmount(target string) error { - if mounted, err := Mounted(target); err != nil || !mounted { - return err + err := unmount(target, mntDetach) + if err == syscall.EINVAL { + // ignore "not mounted" error + err = nil } - return unmount(target, mntDetach) + return err } // RecursiveUnmount unmounts the target and all mounts underneath, starting with // the deepsest mount first. func RecursiveUnmount(target string) error { - mounts, err := GetMounts() + mounts, err := parseMountTable(PrefixFilter(target)) if err != nil { return err } // Make the deepest mount be first - sort.Sort(sort.Reverse(byMountpoint(mounts))) + sort.Slice(mounts, func(i, j int) bool { + return len(mounts[i].Mountpoint) > len(mounts[j].Mountpoint) + }) for i, m := range mounts { - if !strings.HasPrefix(m.Mountpoint, target) { - continue - } logrus.Debugf("Trying to unmount %s", m.Mountpoint) err = unmount(m.Mountpoint, mntDetach) if err != nil { diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo.go index 05803938af..ecd03fc022 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo.go +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo.go @@ -38,17 +38,3 @@ type Info struct { // VfsOpts represents per super block options. VfsOpts string } - -type byMountpoint []*Info - -func (by byMountpoint) Len() int { - return len(by) -} - -func (by byMountpoint) Less(i, j int) bool { - return by[i].Mountpoint < by[j].Mountpoint -} - -func (by byMountpoint) Swap(i, j int) { - by[i], by[j] = by[j], by[i] -} diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go index b86f4a97f6..36c89dc1a2 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go @@ -15,7 +15,7 @@ import ( // Parse /proc/self/mountinfo because comparing Dev and ino does not work from // bind mounts. -func parseMountTable() ([]*Info, error) { +func parseMountTable(filter FilterFunc) ([]*Info, error) { var rawEntries *C.struct_statfs count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT)) @@ -32,10 +32,24 @@ func parseMountTable() ([]*Info, error) { var out []*Info for _, entry := range entries { var mountinfo Info + var skip, stop bool mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) + + if filter != nil { + // filter out entries we're not interested in + skip, stop = filter(p) + if skip { + continue + } + } + mountinfo.Source = C.GoString(&entry.f_mntfromname[0]) mountinfo.Fstype = C.GoString(&entry.f_fstypename[0]) + out = append(out, &mountinfo) + if stop { + break + } } return out, nil } diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go index 4afa8d538d..c1dba01fc3 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go @@ -5,80 +5,119 @@ import ( "fmt" "io" "os" + "strconv" "strings" ) -const ( - /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue - (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) +func parseInfoFile(r io.Reader, filter FilterFunc) ([]*Info, error) { + s := bufio.NewScanner(r) + out := []*Info{} + for s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + /* + 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue + (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) - (1) mount ID: unique identifier of the mount (may be reused after umount) - (2) parent ID: ID of parent (or of self for the top of the mount tree) - (3) major:minor: value of st_dev for files on filesystem - (4) root: root of the mount within the filesystem - (5) mount point: mount point relative to the process's root - (6) mount options: per mount options - (7) optional fields: zero or more fields of the form "tag[:value]" - (8) separator: marks the end of the optional fields - (9) filesystem type: name of filesystem of the form "type[.subtype]" - (10) mount source: filesystem specific information or "none" - (11) super options: per super block options*/ - mountinfoFormat = "%d %d %d:%d %s %s %s %s" -) + (1) mount ID: unique identifier of the mount (may be reused after umount) + (2) parent ID: ID of parent (or of self for the top of the mount tree) + (3) major:minor: value of st_dev for files on filesystem + (4) root: root of the mount within the filesystem + (5) mount point: mount point relative to the process's root + (6) mount options: per mount options + (7) optional fields: zero or more fields of the form "tag[:value]" + (8) separator: marks the end of the optional fields + (9) filesystem type: name of filesystem of the form "type[.subtype]" + (10) mount source: filesystem specific information or "none" + (11) super options: per super block options + */ -// Parse /proc/self/mountinfo because comparing Dev and ino does not work from -// bind mounts -func parseMountTable() ([]*Info, error) { - f, err := os.Open("/proc/self/mountinfo") - if err != nil { - return nil, err - } - defer f.Close() + text := s.Text() + fields := strings.Split(text, " ") + numFields := len(fields) + if numFields < 10 { + // should be at least 10 fields + return nil, fmt.Errorf("Parsing '%s' failed: not enough fields (%d)", text, numFields) + } - return parseInfoFile(f) -} + p := &Info{} + // ignore any numbers parsing errors, as there should not be any + p.ID, _ = strconv.Atoi(fields[0]) + p.Parent, _ = strconv.Atoi(fields[1]) + mm := strings.Split(fields[2], ":") + if len(mm) != 2 { + return nil, fmt.Errorf("Parsing '%s' failed: unexpected minor:major pair %s", text, mm) + } + p.Major, _ = strconv.Atoi(mm[0]) + p.Minor, _ = strconv.Atoi(mm[1]) -func parseInfoFile(r io.Reader) ([]*Info, error) { - var ( - s = bufio.NewScanner(r) - out = []*Info{} - ) + p.Root = fields[3] + p.Mountpoint = fields[4] + p.Opts = fields[5] - for s.Scan() { - if err := s.Err(); err != nil { - return nil, err + var skip, stop bool + if filter != nil { + // filter out entries we're not interested in + skip, stop = filter(p) + if skip { + continue + } } - var ( - p = &Info{} - text = s.Text() - optionalFields string - ) - - if _, err := fmt.Sscanf(text, mountinfoFormat, - &p.ID, &p.Parent, &p.Major, &p.Minor, - &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil { - return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) + // one or more optional fields, when a separator (-) + i := 6 + for ; i < numFields && fields[i] != "-"; i++ { + switch i { + case 6: + p.Optional = fields[6] + default: + /* NOTE there might be more optional fields before the such as + fields[7]...fields[N] (where N < sepIndex), although + as of Linux kernel 4.15 the only known ones are + mount propagation flags in fields[6]. The correct + behavior is to ignore any unknown optional fields. + */ + break + } } - // Safe as mountinfo encodes mountpoints with spaces as \040. - index := strings.Index(text, " - ") - postSeparatorFields := strings.Fields(text[index+3:]) - if len(postSeparatorFields) < 3 { - return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text) + if i == numFields { + return nil, fmt.Errorf("Parsing '%s' failed: missing separator ('-')", text) } - if optionalFields != "-" { - p.Optional = optionalFields + // There should be 3 fields after the separator... + if i+4 > numFields { + return nil, fmt.Errorf("Parsing '%s' failed: not enough fields after a separator", text) } + // ... but in Linux <= 3.9 mounting a cifs with spaces in a share name + // (like "//serv/My Documents") _may_ end up having a space in the last field + // of mountinfo (like "unc=//serv/My Documents"). Since kernel 3.10-rc1, cifs + // option unc= is ignored, so a space should not appear. In here we ignore + // those "extra" fields caused by extra spaces. + p.Fstype = fields[i+1] + p.Source = fields[i+2] + p.VfsOpts = fields[i+3] - p.Fstype = postSeparatorFields[0] - p.Source = postSeparatorFields[1] - p.VfsOpts = strings.Join(postSeparatorFields[2:], " ") out = append(out, p) + if stop { + break + } } return out, nil } +// Parse /proc/self/mountinfo because comparing Dev and ino does not work from +// bind mounts +func parseMountTable(filter FilterFunc) ([]*Info, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return nil, err + } + defer f.Close() + + return parseInfoFile(f, filter) +} + // PidMountInfo collects the mounts for a specific process ID. If the process // ID is unknown, it is better to use `GetMounts` which will inspect // "/proc/self/mountinfo" instead. @@ -89,5 +128,5 @@ func PidMountInfo(pid int) ([]*Info, error) { } defer f.Close() - return parseInfoFile(f) + return parseInfoFile(f, nil) } diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go index 2ecc8baed8..fd16d3ed69 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go @@ -7,6 +7,6 @@ import ( "runtime" ) -func parseMountTable() ([]*Info, error) { +func parseMountTable(f FilterFunc) ([]*Info, error) { return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) } diff --git a/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go b/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go index 7ecba7c13a..27e0f6976e 100644 --- a/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go +++ b/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go @@ -1,6 +1,6 @@ package mount // import "github.com/docker/docker/pkg/mount" -func parseMountTable() ([]*Info, error) { +func parseMountTable(f FilterFunc) ([]*Info, error) { // Do NOT return an error! return nil, nil } diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go similarity index 96% rename from vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go rename to vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go index 7738fc7411..94780ef610 100644 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel.go +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel.go @@ -2,7 +2,7 @@ // Package kernel provides helper function to get, parse and compare kernel // versions for different platforms. -package kernel +package kernel // import "github.com/docker/docker/pkg/parsers/kernel" import ( "errors" diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go similarity index 94% rename from vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go rename to vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go index 71f205b285..6e599eebcc 100644 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_darwin.go +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_darwin.go @@ -2,7 +2,7 @@ // Package kernel provides helper function to get, parse and compare kernel // versions for different platforms. -package kernel +package kernel // import "github.com/docker/docker/pkg/parsers/kernel" import ( "fmt" diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go similarity index 75% rename from vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go rename to vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go index 76e1e499f3..8a9aa31225 100644 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/kernel_unix.go +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_unix.go @@ -1,8 +1,8 @@ -// +build linux freebsd solaris openbsd +// +build linux freebsd openbsd // Package kernel provides helper function to get, parse and compare kernel // versions for different platforms. -package kernel +package kernel // import "github.com/docker/docker/pkg/parsers/kernel" import ( "bytes" @@ -17,18 +17,8 @@ func GetKernelVersion() (*VersionInfo, error) { return nil, err } - release := make([]byte, len(uts.Release)) - - i := 0 - for _, c := range uts.Release { - release[i] = byte(c) - i++ - } - // Remove the \x00 from the release for Atoi to parse correctly - release = release[:bytes.IndexByte(release, 0)] - - return ParseRelease(string(release)) + return ParseRelease(string(uts.Release[:bytes.IndexByte(uts.Release[:], 0)])) } // CheckKernelVersion checks if current kernel is newer than (or equal to) diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go new file mode 100644 index 0000000000..b7b15a1fd2 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/kernel_windows.go @@ -0,0 +1,51 @@ +package kernel // import "github.com/docker/docker/pkg/parsers/kernel" + +import ( + "fmt" + + "golang.org/x/sys/windows" + "golang.org/x/sys/windows/registry" +) + +// VersionInfo holds information about the kernel. +type VersionInfo struct { + kvi string // Version of the kernel (e.g. 6.1.7601.17592 -> 6) + major int // Major part of the kernel version (e.g. 6.1.7601.17592 -> 1) + minor int // Minor part of the kernel version (e.g. 6.1.7601.17592 -> 7601) + build int // Build number of the kernel version (e.g. 6.1.7601.17592 -> 17592) +} + +func (k *VersionInfo) String() string { + return fmt.Sprintf("%d.%d %d (%s)", k.major, k.minor, k.build, k.kvi) +} + +// GetKernelVersion gets the current kernel version. +func GetKernelVersion() (*VersionInfo, error) { + + KVI := &VersionInfo{"Unknown", 0, 0, 0} + + k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) + if err != nil { + return KVI, err + } + defer k.Close() + + blex, _, err := k.GetStringValue("BuildLabEx") + if err != nil { + return KVI, err + } + KVI.kvi = blex + + // Important - docker.exe MUST be manifested for this API to return + // the correct information. + dwVersion, err := windows.GetVersion() + if err != nil { + return KVI, err + } + + KVI.major = int(dwVersion & 0xFF) + KVI.minor = int((dwVersion & 0XFF00) >> 8) + KVI.build = int((dwVersion & 0xFFFF0000) >> 16) + + return KVI, nil +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go similarity index 83% rename from vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go rename to vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go index e913fad001..212ff4502b 100644 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_linux.go +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_linux.go @@ -1,4 +1,4 @@ -package kernel +package kernel // import "github.com/docker/docker/pkg/parsers/kernel" import "golang.org/x/sys/unix" diff --git a/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go new file mode 100644 index 0000000000..b2139b60e8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_solaris.go @@ -0,0 +1,14 @@ +package kernel // import "github.com/docker/docker/pkg/parsers/kernel" + +import ( + "golang.org/x/sys/unix" +) + +func uname() (*unix.Utsname, error) { + uts := &unix.Utsname{} + + if err := unix.Uname(uts); err != nil { + return nil, err + } + return uts, nil +} diff --git a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go similarity index 78% rename from vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go rename to vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go index 1da3f239fa..97906e4cd7 100644 --- a/vendor/github.com/containers/storage/pkg/parsers/kernel/uname_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/parsers/kernel/uname_unsupported.go @@ -1,6 +1,6 @@ -// +build !linux,!solaris +// +build !linux -package kernel +package kernel // import "github.com/docker/docker/pkg/parsers/kernel" import ( "errors" diff --git a/vendor/github.com/containers/storage/pkg/parsers/parsers.go b/vendor/github.com/docker/docker/pkg/parsers/parsers.go similarity index 96% rename from vendor/github.com/containers/storage/pkg/parsers/parsers.go rename to vendor/github.com/docker/docker/pkg/parsers/parsers.go index acc897168f..c4186a4c0a 100644 --- a/vendor/github.com/containers/storage/pkg/parsers/parsers.go +++ b/vendor/github.com/docker/docker/pkg/parsers/parsers.go @@ -1,7 +1,7 @@ // Package parsers provides helper functions to parse and validate different type // of string. It can be hosts, unix addresses, tcp addresses, filters, kernel // operating system versions. -package parsers +package parsers // import "github.com/docker/docker/pkg/parsers" import ( "fmt" diff --git a/vendor/github.com/docker/docker/pkg/plugingetter/getter.go b/vendor/github.com/docker/docker/pkg/plugingetter/getter.go new file mode 100644 index 0000000000..0e1699d913 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugingetter/getter.go @@ -0,0 +1,37 @@ +package plugingetter // import "github.com/docker/docker/pkg/plugingetter" + +import ( + "github.com/docker/docker/pkg/plugins" +) + +const ( + // Lookup doesn't update RefCount + Lookup = 0 + // Acquire increments RefCount + Acquire = 1 + // Release decrements RefCount + Release = -1 +) + +// CompatPlugin is an abstraction to handle both v2(new) and v1(legacy) plugins. +type CompatPlugin interface { + Client() *plugins.Client + Name() string + ScopedPath(string) string + IsV1() bool +} + +// CountedPlugin is a plugin which is reference counted. +type CountedPlugin interface { + Acquire() + Release() + CompatPlugin +} + +// PluginGetter is the interface implemented by Store +type PluginGetter interface { + Get(name, capability string, mode int) (CompatPlugin, error) + GetAllByCap(capability string) ([]CompatPlugin, error) + GetAllManagedPluginsByCap(capability string) []CompatPlugin + Handle(capability string, callback func(string, *plugins.Client)) +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/client.go b/vendor/github.com/docker/docker/pkg/plugins/client.go new file mode 100644 index 0000000000..0353305358 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/client.go @@ -0,0 +1,242 @@ +package plugins // import "github.com/docker/docker/pkg/plugins" + +import ( + "bytes" + "context" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "net/url" + "time" + + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/plugins/transport" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" + "github.com/sirupsen/logrus" +) + +const ( + defaultTimeOut = 30 +) + +func newTransport(addr string, tlsConfig *tlsconfig.Options) (transport.Transport, error) { + tr := &http.Transport{} + + if tlsConfig != nil { + c, err := tlsconfig.Client(*tlsConfig) + if err != nil { + return nil, err + } + tr.TLSClientConfig = c + } + + u, err := url.Parse(addr) + if err != nil { + return nil, err + } + socket := u.Host + if socket == "" { + // valid local socket addresses have the host empty. + socket = u.Path + } + if err := sockets.ConfigureTransport(tr, u.Scheme, socket); err != nil { + return nil, err + } + scheme := httpScheme(u) + + return transport.NewHTTPTransport(tr, scheme, socket), nil +} + +// NewClient creates a new plugin client (http). +func NewClient(addr string, tlsConfig *tlsconfig.Options) (*Client, error) { + clientTransport, err := newTransport(addr, tlsConfig) + if err != nil { + return nil, err + } + return newClientWithTransport(clientTransport, 0), nil +} + +// NewClientWithTimeout creates a new plugin client (http). +func NewClientWithTimeout(addr string, tlsConfig *tlsconfig.Options, timeout time.Duration) (*Client, error) { + clientTransport, err := newTransport(addr, tlsConfig) + if err != nil { + return nil, err + } + return newClientWithTransport(clientTransport, timeout), nil +} + +// newClientWithTransport creates a new plugin client with a given transport. +func newClientWithTransport(tr transport.Transport, timeout time.Duration) *Client { + return &Client{ + http: &http.Client{ + Transport: tr, + Timeout: timeout, + }, + requestFactory: tr, + } +} + +// Client represents a plugin client. +type Client struct { + http *http.Client // http client to use + requestFactory transport.RequestFactory +} + +// RequestOpts is the set of options that can be passed into a request +type RequestOpts struct { + Timeout time.Duration +} + +// WithRequestTimeout sets a timeout duration for plugin requests +func WithRequestTimeout(t time.Duration) func(*RequestOpts) { + return func(o *RequestOpts) { + o.Timeout = t + } +} + +// Call calls the specified method with the specified arguments for the plugin. +// It will retry for 30 seconds if a failure occurs when calling. +func (c *Client) Call(serviceMethod string, args, ret interface{}) error { + return c.CallWithOptions(serviceMethod, args, ret) +} + +// CallWithOptions is just like call except it takes options +func (c *Client) CallWithOptions(serviceMethod string, args interface{}, ret interface{}, opts ...func(*RequestOpts)) error { + var buf bytes.Buffer + if args != nil { + if err := json.NewEncoder(&buf).Encode(args); err != nil { + return err + } + } + body, err := c.callWithRetry(serviceMethod, &buf, true, opts...) + if err != nil { + return err + } + defer body.Close() + if ret != nil { + if err := json.NewDecoder(body).Decode(&ret); err != nil { + logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) + return err + } + } + return nil +} + +// Stream calls the specified method with the specified arguments for the plugin and returns the response body +func (c *Client) Stream(serviceMethod string, args interface{}) (io.ReadCloser, error) { + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(args); err != nil { + return nil, err + } + return c.callWithRetry(serviceMethod, &buf, true) +} + +// SendFile calls the specified method, and passes through the IO stream +func (c *Client) SendFile(serviceMethod string, data io.Reader, ret interface{}) error { + body, err := c.callWithRetry(serviceMethod, data, true) + if err != nil { + return err + } + defer body.Close() + if err := json.NewDecoder(body).Decode(&ret); err != nil { + logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) + return err + } + return nil +} + +func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool, reqOpts ...func(*RequestOpts)) (io.ReadCloser, error) { + var retries int + start := time.Now() + + var opts RequestOpts + for _, o := range reqOpts { + o(&opts) + } + + for { + req, err := c.requestFactory.NewRequest(serviceMethod, data) + if err != nil { + return nil, err + } + + cancelRequest := func() {} + if opts.Timeout > 0 { + var ctx context.Context + ctx, cancelRequest = context.WithTimeout(req.Context(), opts.Timeout) + req = req.WithContext(ctx) + } + + resp, err := c.http.Do(req) + if err != nil { + cancelRequest() + if !retry { + return nil, err + } + + timeOff := backoff(retries) + if abort(start, timeOff) { + return nil, err + } + retries++ + logrus.Warnf("Unable to connect to plugin: %s%s: %v, retrying in %v", req.URL.Host, req.URL.Path, err, timeOff) + time.Sleep(timeOff) + continue + } + + if resp.StatusCode != http.StatusOK { + b, err := ioutil.ReadAll(resp.Body) + resp.Body.Close() + cancelRequest() + if err != nil { + return nil, &statusError{resp.StatusCode, serviceMethod, err.Error()} + } + + // Plugins' Response(s) should have an Err field indicating what went + // wrong. Try to unmarshal into ResponseErr. Otherwise fallback to just + // return the string(body) + type responseErr struct { + Err string + } + remoteErr := responseErr{} + if err := json.Unmarshal(b, &remoteErr); err == nil { + if remoteErr.Err != "" { + return nil, &statusError{resp.StatusCode, serviceMethod, remoteErr.Err} + } + } + // old way... + return nil, &statusError{resp.StatusCode, serviceMethod, string(b)} + } + return ioutils.NewReadCloserWrapper(resp.Body, func() error { + err := resp.Body.Close() + cancelRequest() + return err + }), nil + } +} + +func backoff(retries int) time.Duration { + b, max := 1, defaultTimeOut + for b < max && retries > 0 { + b *= 2 + retries-- + } + if b > max { + b = max + } + return time.Duration(b) * time.Second +} + +func abort(start time.Time, timeOff time.Duration) bool { + return timeOff+time.Since(start) >= time.Duration(defaultTimeOut)*time.Second +} + +func httpScheme(u *url.URL) string { + scheme := u.Scheme + if scheme != "https" { + scheme = "http" + } + return scheme +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery.go b/vendor/github.com/docker/docker/pkg/plugins/discovery.go new file mode 100644 index 0000000000..4b79bd29ad --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/discovery.go @@ -0,0 +1,154 @@ +package plugins // import "github.com/docker/docker/pkg/plugins" + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/url" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/pkg/errors" +) + +var ( + // ErrNotFound plugin not found + ErrNotFound = errors.New("plugin not found") + socketsPath = "/run/docker/plugins" +) + +// localRegistry defines a registry that is local (using unix socket). +type localRegistry struct{} + +func newLocalRegistry() localRegistry { + return localRegistry{} +} + +// Scan scans all the plugin paths and returns all the names it found +func Scan() ([]string, error) { + var names []string + dirEntries, err := ioutil.ReadDir(socketsPath) + if err != nil && !os.IsNotExist(err) { + return nil, errors.Wrap(err, "error reading dir entries") + } + + for _, fi := range dirEntries { + if fi.IsDir() { + fi, err = os.Stat(filepath.Join(socketsPath, fi.Name(), fi.Name()+".sock")) + if err != nil { + continue + } + } + + if fi.Mode()&os.ModeSocket != 0 { + names = append(names, strings.TrimSuffix(filepath.Base(fi.Name()), filepath.Ext(fi.Name()))) + } + } + + for _, p := range specsPaths { + dirEntries, err := ioutil.ReadDir(p) + if err != nil && !os.IsNotExist(err) { + return nil, errors.Wrap(err, "error reading dir entries") + } + + for _, fi := range dirEntries { + if fi.IsDir() { + infos, err := ioutil.ReadDir(filepath.Join(p, fi.Name())) + if err != nil { + continue + } + + for _, info := range infos { + if strings.TrimSuffix(info.Name(), filepath.Ext(info.Name())) == fi.Name() { + fi = info + break + } + } + } + + ext := filepath.Ext(fi.Name()) + switch ext { + case ".spec", ".json": + plugin := strings.TrimSuffix(fi.Name(), ext) + names = append(names, plugin) + default: + } + } + } + return names, nil +} + +// Plugin returns the plugin registered with the given name (or returns an error). +func (l *localRegistry) Plugin(name string) (*Plugin, error) { + socketpaths := pluginPaths(socketsPath, name, ".sock") + + for _, p := range socketpaths { + if fi, err := os.Stat(p); err == nil && fi.Mode()&os.ModeSocket != 0 { + return NewLocalPlugin(name, "unix://"+p), nil + } + } + + var txtspecpaths []string + for _, p := range specsPaths { + txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".spec")...) + txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".json")...) + } + + for _, p := range txtspecpaths { + if _, err := os.Stat(p); err == nil { + if strings.HasSuffix(p, ".json") { + return readPluginJSONInfo(name, p) + } + return readPluginInfo(name, p) + } + } + return nil, errors.Wrapf(ErrNotFound, "could not find plugin %s in v1 plugin registry", name) +} + +func readPluginInfo(name, path string) (*Plugin, error) { + content, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + addr := strings.TrimSpace(string(content)) + + u, err := url.Parse(addr) + if err != nil { + return nil, err + } + + if len(u.Scheme) == 0 { + return nil, fmt.Errorf("Unknown protocol") + } + + return NewLocalPlugin(name, addr), nil +} + +func readPluginJSONInfo(name, path string) (*Plugin, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + var p Plugin + if err := json.NewDecoder(f).Decode(&p); err != nil { + return nil, err + } + p.name = name + if p.TLSConfig != nil && len(p.TLSConfig.CAFile) == 0 { + p.TLSConfig.InsecureSkipVerify = true + } + p.activateWait = sync.NewCond(&sync.Mutex{}) + + return &p, nil +} + +func pluginPaths(base, name, ext string) []string { + return []string{ + filepath.Join(base, name+ext), + filepath.Join(base, name, name+ext), + } +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go b/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go new file mode 100644 index 0000000000..58058f2828 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/discovery_unix.go @@ -0,0 +1,5 @@ +// +build !windows + +package plugins // import "github.com/docker/docker/pkg/plugins" + +var specsPaths = []string{"/etc/docker/plugins", "/usr/lib/docker/plugins"} diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go b/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go new file mode 100644 index 0000000000..f0af3477f4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/discovery_windows.go @@ -0,0 +1,8 @@ +package plugins // import "github.com/docker/docker/pkg/plugins" + +import ( + "os" + "path/filepath" +) + +var specsPaths = []string{filepath.Join(os.Getenv("programdata"), "docker", "plugins")} diff --git a/vendor/github.com/docker/docker/pkg/plugins/errors.go b/vendor/github.com/docker/docker/pkg/plugins/errors.go new file mode 100644 index 0000000000..6735c304bf --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/errors.go @@ -0,0 +1,33 @@ +package plugins // import "github.com/docker/docker/pkg/plugins" + +import ( + "fmt" + "net/http" +) + +type statusError struct { + status int + method string + err string +} + +// Error returns a formatted string for this error type +func (e *statusError) Error() string { + return fmt.Sprintf("%s: %v", e.method, e.err) +} + +// IsNotFound indicates if the passed in error is from an http.StatusNotFound from the plugin +func IsNotFound(err error) bool { + return isStatusError(err, http.StatusNotFound) +} + +func isStatusError(err error, status int) bool { + if err == nil { + return false + } + e, ok := err.(*statusError) + if !ok { + return false + } + return e.status == status +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/plugins.go b/vendor/github.com/docker/docker/pkg/plugins/plugins.go new file mode 100644 index 0000000000..3ee4720a19 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/plugins.go @@ -0,0 +1,329 @@ +// Package plugins provides structures and helper functions to manage Docker +// plugins. +// +// Docker discovers plugins by looking for them in the plugin directory whenever +// a user or container tries to use one by name. UNIX domain socket files must +// be located under /run/docker/plugins, whereas spec files can be located +// either under /etc/docker/plugins or /usr/lib/docker/plugins. This is handled +// by the Registry interface, which lets you list all plugins or get a plugin by +// its name if it exists. +// +// The plugins need to implement an HTTP server and bind this to the UNIX socket +// or the address specified in the spec files. +// A handshake is send at /Plugin.Activate, and plugins are expected to return +// a Manifest with a list of of Docker subsystems which this plugin implements. +// +// In order to use a plugins, you can use the ``Get`` with the name of the +// plugin and the subsystem it implements. +// +// plugin, err := plugins.Get("example", "VolumeDriver") +// if err != nil { +// return fmt.Errorf("Error looking up volume plugin example: %v", err) +// } +package plugins // import "github.com/docker/docker/pkg/plugins" + +import ( + "errors" + "sync" + "time" + + "github.com/docker/go-connections/tlsconfig" + "github.com/sirupsen/logrus" +) + +var ( + // ErrNotImplements is returned if the plugin does not implement the requested driver. + ErrNotImplements = errors.New("Plugin does not implement the requested driver") +) + +type plugins struct { + sync.Mutex + plugins map[string]*Plugin +} + +type extpointHandlers struct { + sync.RWMutex + extpointHandlers map[string][]func(string, *Client) +} + +var ( + storage = plugins{plugins: make(map[string]*Plugin)} + handlers = extpointHandlers{extpointHandlers: make(map[string][]func(string, *Client))} +) + +// Manifest lists what a plugin implements. +type Manifest struct { + // List of subsystem the plugin implements. + Implements []string +} + +// Plugin is the definition of a docker plugin. +type Plugin struct { + // Name of the plugin + name string + // Address of the plugin + Addr string + // TLS configuration of the plugin + TLSConfig *tlsconfig.Options + // Client attached to the plugin + client *Client + // Manifest of the plugin (see above) + Manifest *Manifest `json:"-"` + + // wait for activation to finish + activateWait *sync.Cond + // error produced by activation + activateErr error + // keeps track of callback handlers run against this plugin + handlersRun bool +} + +// Name returns the name of the plugin. +func (p *Plugin) Name() string { + return p.name +} + +// Client returns a ready-to-use plugin client that can be used to communicate with the plugin. +func (p *Plugin) Client() *Client { + return p.client +} + +// IsV1 returns true for V1 plugins and false otherwise. +func (p *Plugin) IsV1() bool { + return true +} + +// NewLocalPlugin creates a new local plugin. +func NewLocalPlugin(name, addr string) *Plugin { + return &Plugin{ + name: name, + Addr: addr, + // TODO: change to nil + TLSConfig: &tlsconfig.Options{InsecureSkipVerify: true}, + activateWait: sync.NewCond(&sync.Mutex{}), + } +} + +func (p *Plugin) activate() error { + p.activateWait.L.Lock() + + if p.activated() { + p.runHandlers() + p.activateWait.L.Unlock() + return p.activateErr + } + + p.activateErr = p.activateWithLock() + + p.runHandlers() + p.activateWait.L.Unlock() + p.activateWait.Broadcast() + return p.activateErr +} + +// runHandlers runs the registered handlers for the implemented plugin types +// This should only be run after activation, and while the activation lock is held. +func (p *Plugin) runHandlers() { + if !p.activated() { + return + } + + handlers.RLock() + if !p.handlersRun { + for _, iface := range p.Manifest.Implements { + hdlrs, handled := handlers.extpointHandlers[iface] + if !handled { + continue + } + for _, handler := range hdlrs { + handler(p.name, p.client) + } + } + p.handlersRun = true + } + handlers.RUnlock() + +} + +// activated returns if the plugin has already been activated. +// This should only be called with the activation lock held +func (p *Plugin) activated() bool { + return p.Manifest != nil +} + +func (p *Plugin) activateWithLock() error { + c, err := NewClient(p.Addr, p.TLSConfig) + if err != nil { + return err + } + p.client = c + + m := new(Manifest) + if err = p.client.Call("Plugin.Activate", nil, m); err != nil { + return err + } + + p.Manifest = m + return nil +} + +func (p *Plugin) waitActive() error { + p.activateWait.L.Lock() + for !p.activated() && p.activateErr == nil { + p.activateWait.Wait() + } + p.activateWait.L.Unlock() + return p.activateErr +} + +func (p *Plugin) implements(kind string) bool { + if p.Manifest == nil { + return false + } + for _, driver := range p.Manifest.Implements { + if driver == kind { + return true + } + } + return false +} + +func load(name string) (*Plugin, error) { + return loadWithRetry(name, true) +} + +func loadWithRetry(name string, retry bool) (*Plugin, error) { + registry := newLocalRegistry() + start := time.Now() + + var retries int + for { + pl, err := registry.Plugin(name) + if err != nil { + if !retry { + return nil, err + } + + timeOff := backoff(retries) + if abort(start, timeOff) { + return nil, err + } + retries++ + logrus.Warnf("Unable to locate plugin: %s, retrying in %v", name, timeOff) + time.Sleep(timeOff) + continue + } + + storage.Lock() + if pl, exists := storage.plugins[name]; exists { + storage.Unlock() + return pl, pl.activate() + } + storage.plugins[name] = pl + storage.Unlock() + + err = pl.activate() + + if err != nil { + storage.Lock() + delete(storage.plugins, name) + storage.Unlock() + } + + return pl, err + } +} + +func get(name string) (*Plugin, error) { + storage.Lock() + pl, ok := storage.plugins[name] + storage.Unlock() + if ok { + return pl, pl.activate() + } + return load(name) +} + +// Get returns the plugin given the specified name and requested implementation. +func Get(name, imp string) (*Plugin, error) { + pl, err := get(name) + if err != nil { + return nil, err + } + if err := pl.waitActive(); err == nil && pl.implements(imp) { + logrus.Debugf("%s implements: %s", name, imp) + return pl, nil + } + return nil, ErrNotImplements +} + +// Handle adds the specified function to the extpointHandlers. +func Handle(iface string, fn func(string, *Client)) { + handlers.Lock() + hdlrs, ok := handlers.extpointHandlers[iface] + if !ok { + hdlrs = []func(string, *Client){} + } + + hdlrs = append(hdlrs, fn) + handlers.extpointHandlers[iface] = hdlrs + + storage.Lock() + for _, p := range storage.plugins { + p.activateWait.L.Lock() + if p.activated() && p.implements(iface) { + p.handlersRun = false + } + p.activateWait.L.Unlock() + } + storage.Unlock() + + handlers.Unlock() +} + +// GetAll returns all the plugins for the specified implementation +func GetAll(imp string) ([]*Plugin, error) { + pluginNames, err := Scan() + if err != nil { + return nil, err + } + + type plLoad struct { + pl *Plugin + err error + } + + chPl := make(chan *plLoad, len(pluginNames)) + var wg sync.WaitGroup + for _, name := range pluginNames { + storage.Lock() + pl, ok := storage.plugins[name] + storage.Unlock() + if ok { + chPl <- &plLoad{pl, nil} + continue + } + + wg.Add(1) + go func(name string) { + defer wg.Done() + pl, err := loadWithRetry(name, false) + chPl <- &plLoad{pl, err} + }(name) + } + + wg.Wait() + close(chPl) + + var out []*Plugin + for pl := range chPl { + if pl.err != nil { + logrus.Error(pl.err) + continue + } + if err := pl.pl.waitActive(); err == nil && pl.pl.implements(imp) { + out = append(out, pl.pl) + } + } + return out, nil +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/plugins_unix.go b/vendor/github.com/docker/docker/pkg/plugins/plugins_unix.go new file mode 100644 index 0000000000..cdfbe93458 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/plugins_unix.go @@ -0,0 +1,9 @@ +// +build !windows + +package plugins // import "github.com/docker/docker/pkg/plugins" + +// ScopedPath returns the path scoped to the plugin's rootfs. +// For v1 plugins, this always returns the path unchanged as v1 plugins run directly on the host. +func (p *Plugin) ScopedPath(s string) string { + return s +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/plugins_windows.go b/vendor/github.com/docker/docker/pkg/plugins/plugins_windows.go new file mode 100644 index 0000000000..ddf1d786c6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/plugins_windows.go @@ -0,0 +1,7 @@ +package plugins // import "github.com/docker/docker/pkg/plugins" + +// ScopedPath returns the path scoped to the plugin's rootfs. +// For v1 plugins, this always returns the path unchanged as v1 plugins run directly on the host. +func (p *Plugin) ScopedPath(s string) string { + return s +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/transport/http.go b/vendor/github.com/docker/docker/pkg/plugins/transport/http.go new file mode 100644 index 0000000000..76d3bdb712 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/transport/http.go @@ -0,0 +1,36 @@ +package transport // import "github.com/docker/docker/pkg/plugins/transport" + +import ( + "io" + "net/http" +) + +// httpTransport holds an http.RoundTripper +// and information about the scheme and address the transport +// sends request to. +type httpTransport struct { + http.RoundTripper + scheme string + addr string +} + +// NewHTTPTransport creates a new httpTransport. +func NewHTTPTransport(r http.RoundTripper, scheme, addr string) Transport { + return httpTransport{ + RoundTripper: r, + scheme: scheme, + addr: addr, + } +} + +// NewRequest creates a new http.Request and sets the URL +// scheme and address with the transport's fields. +func (t httpTransport) NewRequest(path string, data io.Reader) (*http.Request, error) { + req, err := newHTTPRequest(path, data) + if err != nil { + return nil, err + } + req.URL.Scheme = t.scheme + req.URL.Host = t.addr + return req, nil +} diff --git a/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go b/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go new file mode 100644 index 0000000000..9cb13335a8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/plugins/transport/transport.go @@ -0,0 +1,36 @@ +package transport // import "github.com/docker/docker/pkg/plugins/transport" + +import ( + "io" + "net/http" + "strings" +) + +// VersionMimetype is the Content-Type the engine sends to plugins. +const VersionMimetype = "application/vnd.docker.plugins.v1.2+json" + +// RequestFactory defines an interface that +// transports can implement to create new requests. +type RequestFactory interface { + NewRequest(path string, data io.Reader) (*http.Request, error) +} + +// Transport defines an interface that plugin transports +// must implement. +type Transport interface { + http.RoundTripper + RequestFactory +} + +// newHTTPRequest creates a new request with a path and a body. +func newHTTPRequest(path string, data io.Reader) (*http.Request, error) { + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + req, err := http.NewRequest("POST", path, data) + if err != nil { + return nil, err + } + req.Header.Add("Accept", VersionMimetype) + return req, nil +} diff --git a/vendor/github.com/docker/docker/pkg/progress/progress.go b/vendor/github.com/docker/docker/pkg/progress/progress.go new file mode 100644 index 0000000000..9aea591954 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/progress/progress.go @@ -0,0 +1,89 @@ +package progress // import "github.com/docker/docker/pkg/progress" + +import ( + "fmt" +) + +// Progress represents the progress of a transfer. +type Progress struct { + ID string + + // Progress contains a Message or... + Message string + + // ...progress of an action + Action string + Current int64 + Total int64 + + // If true, don't show xB/yB + HideCounts bool + // If not empty, use units instead of bytes for counts + Units string + + // Aux contains extra information not presented to the user, such as + // digests for push signing. + Aux interface{} + + LastUpdate bool +} + +// Output is an interface for writing progress information. It's +// like a writer for progress, but we don't call it Writer because +// that would be confusing next to ProgressReader (also, because it +// doesn't implement the io.Writer interface). +type Output interface { + WriteProgress(Progress) error +} + +type chanOutput chan<- Progress + +func (out chanOutput) WriteProgress(p Progress) error { + out <- p + return nil +} + +// ChanOutput returns an Output that writes progress updates to the +// supplied channel. +func ChanOutput(progressChan chan<- Progress) Output { + return chanOutput(progressChan) +} + +type discardOutput struct{} + +func (discardOutput) WriteProgress(Progress) error { + return nil +} + +// DiscardOutput returns an Output that discards progress +func DiscardOutput() Output { + return discardOutput{} +} + +// Update is a convenience function to write a progress update to the channel. +func Update(out Output, id, action string) { + out.WriteProgress(Progress{ID: id, Action: action}) +} + +// Updatef is a convenience function to write a printf-formatted progress update +// to the channel. +func Updatef(out Output, id, format string, a ...interface{}) { + Update(out, id, fmt.Sprintf(format, a...)) +} + +// Message is a convenience function to write a progress message to the channel. +func Message(out Output, id, message string) { + out.WriteProgress(Progress{ID: id, Message: message}) +} + +// Messagef is a convenience function to write a printf-formatted progress +// message to the channel. +func Messagef(out Output, id, format string, a ...interface{}) { + Message(out, id, fmt.Sprintf(format, a...)) +} + +// Aux sends auxiliary information over a progress interface, which will not be +// formatted for the UI. This is used for things such as push signing. +func Aux(out Output, a interface{}) { + out.WriteProgress(Progress{Aux: a}) +} diff --git a/vendor/github.com/docker/docker/pkg/progress/progressreader.go b/vendor/github.com/docker/docker/pkg/progress/progressreader.go new file mode 100644 index 0000000000..7ca07dc640 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/progress/progressreader.go @@ -0,0 +1,66 @@ +package progress // import "github.com/docker/docker/pkg/progress" + +import ( + "io" + "time" + + "golang.org/x/time/rate" +) + +// Reader is a Reader with progress bar. +type Reader struct { + in io.ReadCloser // Stream to read from + out Output // Where to send progress bar to + size int64 + current int64 + lastUpdate int64 + id string + action string + rateLimiter *rate.Limiter +} + +// NewProgressReader creates a new ProgressReader. +func NewProgressReader(in io.ReadCloser, out Output, size int64, id, action string) *Reader { + return &Reader{ + in: in, + out: out, + size: size, + id: id, + action: action, + rateLimiter: rate.NewLimiter(rate.Every(100*time.Millisecond), 1), + } +} + +func (p *Reader) Read(buf []byte) (n int, err error) { + read, err := p.in.Read(buf) + p.current += int64(read) + updateEvery := int64(1024 * 512) //512kB + if p.size > 0 { + // Update progress for every 1% read if 1% < 512kB + if increment := int64(0.01 * float64(p.size)); increment < updateEvery { + updateEvery = increment + } + } + if p.current-p.lastUpdate > updateEvery || err != nil { + p.updateProgress(err != nil && read == 0) + p.lastUpdate = p.current + } + + return read, err +} + +// Close closes the progress reader and its underlying reader. +func (p *Reader) Close() error { + if p.current < p.size { + // print a full progress bar when closing prematurely + p.current = p.size + p.updateProgress(false) + } + return p.in.Close() +} + +func (p *Reader) updateProgress(last bool) { + if last || p.current == p.size || p.rateLimiter.Allow() { + p.out.WriteProgress(Progress{ID: p.id, Action: p.action, Current: p.current, Total: p.size, LastUpdate: last}) + } +} diff --git a/vendor/github.com/docker/docker/pkg/pubsub/publisher.go b/vendor/github.com/docker/docker/pkg/pubsub/publisher.go new file mode 100644 index 0000000000..76033ed9e4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/pubsub/publisher.go @@ -0,0 +1,121 @@ +package pubsub // import "github.com/docker/docker/pkg/pubsub" + +import ( + "sync" + "time" +) + +var wgPool = sync.Pool{New: func() interface{} { return new(sync.WaitGroup) }} + +// NewPublisher creates a new pub/sub publisher to broadcast messages. +// The duration is used as the send timeout as to not block the publisher publishing +// messages to other clients if one client is slow or unresponsive. +// The buffer is used when creating new channels for subscribers. +func NewPublisher(publishTimeout time.Duration, buffer int) *Publisher { + return &Publisher{ + buffer: buffer, + timeout: publishTimeout, + subscribers: make(map[subscriber]topicFunc), + } +} + +type subscriber chan interface{} +type topicFunc func(v interface{}) bool + +// Publisher is basic pub/sub structure. Allows to send events and subscribe +// to them. Can be safely used from multiple goroutines. +type Publisher struct { + m sync.RWMutex + buffer int + timeout time.Duration + subscribers map[subscriber]topicFunc +} + +// Len returns the number of subscribers for the publisher +func (p *Publisher) Len() int { + p.m.RLock() + i := len(p.subscribers) + p.m.RUnlock() + return i +} + +// Subscribe adds a new subscriber to the publisher returning the channel. +func (p *Publisher) Subscribe() chan interface{} { + return p.SubscribeTopic(nil) +} + +// SubscribeTopic adds a new subscriber that filters messages sent by a topic. +func (p *Publisher) SubscribeTopic(topic topicFunc) chan interface{} { + ch := make(chan interface{}, p.buffer) + p.m.Lock() + p.subscribers[ch] = topic + p.m.Unlock() + return ch +} + +// SubscribeTopicWithBuffer adds a new subscriber that filters messages sent by a topic. +// The returned channel has a buffer of the specified size. +func (p *Publisher) SubscribeTopicWithBuffer(topic topicFunc, buffer int) chan interface{} { + ch := make(chan interface{}, buffer) + p.m.Lock() + p.subscribers[ch] = topic + p.m.Unlock() + return ch +} + +// Evict removes the specified subscriber from receiving any more messages. +func (p *Publisher) Evict(sub chan interface{}) { + p.m.Lock() + delete(p.subscribers, sub) + close(sub) + p.m.Unlock() +} + +// Publish sends the data in v to all subscribers currently registered with the publisher. +func (p *Publisher) Publish(v interface{}) { + p.m.RLock() + if len(p.subscribers) == 0 { + p.m.RUnlock() + return + } + + wg := wgPool.Get().(*sync.WaitGroup) + for sub, topic := range p.subscribers { + wg.Add(1) + go p.sendTopic(sub, topic, v, wg) + } + wg.Wait() + wgPool.Put(wg) + p.m.RUnlock() +} + +// Close closes the channels to all subscribers registered with the publisher. +func (p *Publisher) Close() { + p.m.Lock() + for sub := range p.subscribers { + delete(p.subscribers, sub) + close(sub) + } + p.m.Unlock() +} + +func (p *Publisher) sendTopic(sub subscriber, topic topicFunc, v interface{}, wg *sync.WaitGroup) { + defer wg.Done() + if topic != nil && !topic(v) { + return + } + + // send under a select as to not block if the receiver is unavailable + if p.timeout > 0 { + select { + case sub <- v: + case <-time.After(p.timeout): + } + return + } + + select { + case sub <- v: + default: + } +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go b/vendor/github.com/docker/docker/pkg/reexec/command_linux.go similarity index 90% rename from vendor/github.com/containers/storage/pkg/reexec/command_linux.go rename to vendor/github.com/docker/docker/pkg/reexec/command_linux.go index 05319eacc9..efea71794f 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go +++ b/vendor/github.com/docker/docker/pkg/reexec/command_linux.go @@ -1,6 +1,4 @@ -// +build linux - -package reexec +package reexec // import "github.com/docker/docker/pkg/reexec" import ( "os/exec" diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go b/vendor/github.com/docker/docker/pkg/reexec/command_unix.go similarity index 82% rename from vendor/github.com/containers/storage/pkg/reexec/command_unix.go rename to vendor/github.com/docker/docker/pkg/reexec/command_unix.go index 778a720e3b..ceaabbdeee 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go +++ b/vendor/github.com/docker/docker/pkg/reexec/command_unix.go @@ -1,6 +1,6 @@ -// +build freebsd solaris darwin +// +build freebsd darwin -package reexec +package reexec // import "github.com/docker/docker/pkg/reexec" import ( "os/exec" diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go b/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go new file mode 100644 index 0000000000..09fb4b2d29 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux,!windows,!freebsd,!darwin + +package reexec // import "github.com/docker/docker/pkg/reexec" + +import ( + "os/exec" +) + +// Command is unsupported on operating systems apart from Linux, Windows, and Darwin. +func Command(args ...string) *exec.Cmd { + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go b/vendor/github.com/docker/docker/pkg/reexec/command_windows.go similarity index 86% rename from vendor/github.com/containers/storage/pkg/reexec/command_windows.go rename to vendor/github.com/docker/docker/pkg/reexec/command_windows.go index ca871c4227..438226890f 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go +++ b/vendor/github.com/docker/docker/pkg/reexec/command_windows.go @@ -1,6 +1,4 @@ -// +build windows - -package reexec +package reexec // import "github.com/docker/docker/pkg/reexec" import ( "os/exec" diff --git a/vendor/github.com/containers/storage/pkg/reexec/reexec.go b/vendor/github.com/docker/docker/pkg/reexec/reexec.go similarity index 94% rename from vendor/github.com/containers/storage/pkg/reexec/reexec.go rename to vendor/github.com/docker/docker/pkg/reexec/reexec.go index c56671d919..f8ccddd599 100644 --- a/vendor/github.com/containers/storage/pkg/reexec/reexec.go +++ b/vendor/github.com/docker/docker/pkg/reexec/reexec.go @@ -1,4 +1,4 @@ -package reexec +package reexec // import "github.com/docker/docker/pkg/reexec" import ( "fmt" diff --git a/vendor/github.com/docker/docker/pkg/signal/signal.go b/vendor/github.com/docker/docker/pkg/signal/signal.go new file mode 100644 index 0000000000..6a663091a1 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal.go @@ -0,0 +1,54 @@ +// Package signal provides helper functions for dealing with signals across +// various operating systems. +package signal // import "github.com/docker/docker/pkg/signal" + +import ( + "fmt" + "os" + "os/signal" + "strconv" + "strings" + "syscall" +) + +// CatchAll catches all signals and relays them to the specified channel. +func CatchAll(sigc chan os.Signal) { + handledSigs := []os.Signal{} + for _, s := range SignalMap { + handledSigs = append(handledSigs, s) + } + signal.Notify(sigc, handledSigs...) +} + +// StopCatch stops catching the signals and closes the specified channel. +func StopCatch(sigc chan os.Signal) { + signal.Stop(sigc) + close(sigc) +} + +// ParseSignal translates a string to a valid syscall signal. +// It returns an error if the signal map doesn't include the given signal. +func ParseSignal(rawSignal string) (syscall.Signal, error) { + s, err := strconv.Atoi(rawSignal) + if err == nil { + if s == 0 { + return -1, fmt.Errorf("Invalid signal: %s", rawSignal) + } + return syscall.Signal(s), nil + } + signal, ok := SignalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")] + if !ok { + return -1, fmt.Errorf("Invalid signal: %s", rawSignal) + } + return signal, nil +} + +// ValidSignalForPlatform returns true if a signal is valid on the platform +func ValidSignalForPlatform(sig syscall.Signal) bool { + for _, v := range SignalMap { + if v == sig { + return true + } + } + return false +} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go b/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go new file mode 100644 index 0000000000..ee5501e3d9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal_darwin.go @@ -0,0 +1,41 @@ +package signal // import "github.com/docker/docker/pkg/signal" + +import ( + "syscall" +) + +// SignalMap is a map of Darwin signals. +var SignalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUG": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CONT": syscall.SIGCONT, + "EMT": syscall.SIGEMT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INFO": syscall.SIGINFO, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "PIPE": syscall.SIGPIPE, + "PROF": syscall.SIGPROF, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, +} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go b/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go new file mode 100644 index 0000000000..764f90e264 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal_freebsd.go @@ -0,0 +1,43 @@ +package signal // import "github.com/docker/docker/pkg/signal" + +import ( + "syscall" +) + +// SignalMap is a map of FreeBSD signals. +var SignalMap = map[string]syscall.Signal{ + "ABRT": syscall.SIGABRT, + "ALRM": syscall.SIGALRM, + "BUF": syscall.SIGBUS, + "CHLD": syscall.SIGCHLD, + "CONT": syscall.SIGCONT, + "EMT": syscall.SIGEMT, + "FPE": syscall.SIGFPE, + "HUP": syscall.SIGHUP, + "ILL": syscall.SIGILL, + "INFO": syscall.SIGINFO, + "INT": syscall.SIGINT, + "IO": syscall.SIGIO, + "IOT": syscall.SIGIOT, + "KILL": syscall.SIGKILL, + "LWP": syscall.SIGLWP, + "PIPE": syscall.SIGPIPE, + "PROF": syscall.SIGPROF, + "QUIT": syscall.SIGQUIT, + "SEGV": syscall.SIGSEGV, + "STOP": syscall.SIGSTOP, + "SYS": syscall.SIGSYS, + "TERM": syscall.SIGTERM, + "THR": syscall.SIGTHR, + "TRAP": syscall.SIGTRAP, + "TSTP": syscall.SIGTSTP, + "TTIN": syscall.SIGTTIN, + "TTOU": syscall.SIGTTOU, + "URG": syscall.SIGURG, + "USR1": syscall.SIGUSR1, + "USR2": syscall.SIGUSR2, + "VTALRM": syscall.SIGVTALRM, + "WINCH": syscall.SIGWINCH, + "XCPU": syscall.SIGXCPU, + "XFSZ": syscall.SIGXFSZ, +} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_linux.go b/vendor/github.com/docker/docker/pkg/signal/signal_linux.go new file mode 100644 index 0000000000..caed97c963 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal_linux.go @@ -0,0 +1,81 @@ +package signal // import "github.com/docker/docker/pkg/signal" + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +const ( + sigrtmin = 34 + sigrtmax = 64 +) + +// SignalMap is a map of Linux signals. +var SignalMap = map[string]syscall.Signal{ + "ABRT": unix.SIGABRT, + "ALRM": unix.SIGALRM, + "BUS": unix.SIGBUS, + "CHLD": unix.SIGCHLD, + "CLD": unix.SIGCLD, + "CONT": unix.SIGCONT, + "FPE": unix.SIGFPE, + "HUP": unix.SIGHUP, + "ILL": unix.SIGILL, + "INT": unix.SIGINT, + "IO": unix.SIGIO, + "IOT": unix.SIGIOT, + "KILL": unix.SIGKILL, + "PIPE": unix.SIGPIPE, + "POLL": unix.SIGPOLL, + "PROF": unix.SIGPROF, + "PWR": unix.SIGPWR, + "QUIT": unix.SIGQUIT, + "SEGV": unix.SIGSEGV, + "STKFLT": unix.SIGSTKFLT, + "STOP": unix.SIGSTOP, + "SYS": unix.SIGSYS, + "TERM": unix.SIGTERM, + "TRAP": unix.SIGTRAP, + "TSTP": unix.SIGTSTP, + "TTIN": unix.SIGTTIN, + "TTOU": unix.SIGTTOU, + "URG": unix.SIGURG, + "USR1": unix.SIGUSR1, + "USR2": unix.SIGUSR2, + "VTALRM": unix.SIGVTALRM, + "WINCH": unix.SIGWINCH, + "XCPU": unix.SIGXCPU, + "XFSZ": unix.SIGXFSZ, + "RTMIN": sigrtmin, + "RTMIN+1": sigrtmin + 1, + "RTMIN+2": sigrtmin + 2, + "RTMIN+3": sigrtmin + 3, + "RTMIN+4": sigrtmin + 4, + "RTMIN+5": sigrtmin + 5, + "RTMIN+6": sigrtmin + 6, + "RTMIN+7": sigrtmin + 7, + "RTMIN+8": sigrtmin + 8, + "RTMIN+9": sigrtmin + 9, + "RTMIN+10": sigrtmin + 10, + "RTMIN+11": sigrtmin + 11, + "RTMIN+12": sigrtmin + 12, + "RTMIN+13": sigrtmin + 13, + "RTMIN+14": sigrtmin + 14, + "RTMIN+15": sigrtmin + 15, + "RTMAX-14": sigrtmax - 14, + "RTMAX-13": sigrtmax - 13, + "RTMAX-12": sigrtmax - 12, + "RTMAX-11": sigrtmax - 11, + "RTMAX-10": sigrtmax - 10, + "RTMAX-9": sigrtmax - 9, + "RTMAX-8": sigrtmax - 8, + "RTMAX-7": sigrtmax - 7, + "RTMAX-6": sigrtmax - 6, + "RTMAX-5": sigrtmax - 5, + "RTMAX-4": sigrtmax - 4, + "RTMAX-3": sigrtmax - 3, + "RTMAX-2": sigrtmax - 2, + "RTMAX-1": sigrtmax - 1, + "RTMAX": sigrtmax, +} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_unix.go b/vendor/github.com/docker/docker/pkg/signal/signal_unix.go new file mode 100644 index 0000000000..a2aa4248fa --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal_unix.go @@ -0,0 +1,21 @@ +// +build !windows + +package signal // import "github.com/docker/docker/pkg/signal" + +import ( + "syscall" +) + +// Signals used in cli/command (no windows equivalent, use +// invalid signals so they don't get handled) + +const ( + // SIGCHLD is a signal sent to a process when a child process terminates, is interrupted, or resumes after being interrupted. + SIGCHLD = syscall.SIGCHLD + // SIGWINCH is a signal sent to a process when its controlling terminal changes its size + SIGWINCH = syscall.SIGWINCH + // SIGPIPE is a signal sent to a process when a pipe is written to before the other end is open for reading + SIGPIPE = syscall.SIGPIPE + // DefaultStopSignal is the syscall signal used to stop a container in unix systems. + DefaultStopSignal = "SIGTERM" +) diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go b/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go new file mode 100644 index 0000000000..1fd25a83c6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal_unsupported.go @@ -0,0 +1,10 @@ +// +build !linux,!darwin,!freebsd,!windows + +package signal // import "github.com/docker/docker/pkg/signal" + +import ( + "syscall" +) + +// SignalMap is an empty map of signals for unsupported platform. +var SignalMap = map[string]syscall.Signal{} diff --git a/vendor/github.com/docker/docker/pkg/signal/signal_windows.go b/vendor/github.com/docker/docker/pkg/signal/signal_windows.go new file mode 100644 index 0000000000..65752f24aa --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/signal_windows.go @@ -0,0 +1,26 @@ +package signal // import "github.com/docker/docker/pkg/signal" + +import ( + "syscall" +) + +// Signals used in cli/command (no windows equivalent, use +// invalid signals so they don't get handled) +const ( + SIGCHLD = syscall.Signal(0xff) + SIGWINCH = syscall.Signal(0xff) + SIGPIPE = syscall.Signal(0xff) + // DefaultStopSignal is the syscall signal used to stop a container in windows systems. + DefaultStopSignal = "15" +) + +// SignalMap is a map of "supported" signals. As per the comment in GOLang's +// ztypes_windows.go: "More invented values for signals". Windows doesn't +// really support signals in any way, shape or form that Unix does. +// +// We have these so that docker kill can be used to gracefully (TERM) and +// forcibly (KILL) terminate a container on Windows. +var SignalMap = map[string]syscall.Signal{ + "KILL": syscall.SIGKILL, + "TERM": syscall.SIGTERM, +} diff --git a/vendor/github.com/docker/docker/pkg/signal/trap.go b/vendor/github.com/docker/docker/pkg/signal/trap.go new file mode 100644 index 0000000000..2a6e69fb50 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/signal/trap.go @@ -0,0 +1,104 @@ +package signal // import "github.com/docker/docker/pkg/signal" + +import ( + "fmt" + "os" + gosignal "os/signal" + "path/filepath" + "runtime" + "strings" + "sync/atomic" + "syscall" + "time" + + "github.com/pkg/errors" +) + +// Trap sets up a simplified signal "trap", appropriate for common +// behavior expected from a vanilla unix command-line tool in general +// (and the Docker engine in particular). +// +// * If SIGINT or SIGTERM are received, `cleanup` is called, then the process is terminated. +// * If SIGINT or SIGTERM are received 3 times before cleanup is complete, then cleanup is +// skipped and the process is terminated immediately (allows force quit of stuck daemon) +// * A SIGQUIT always causes an exit without cleanup, with a goroutine dump preceding exit. +// * Ignore SIGPIPE events. These are generated by systemd when journald is restarted while +// the docker daemon is not restarted and also running under systemd. +// Fixes https://github.com/docker/docker/issues/19728 +// +func Trap(cleanup func(), logger interface { + Info(args ...interface{}) +}) { + c := make(chan os.Signal, 1) + // we will handle INT, TERM, QUIT, SIGPIPE here + signals := []os.Signal{os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGPIPE} + gosignal.Notify(c, signals...) + go func() { + interruptCount := uint32(0) + for sig := range c { + if sig == syscall.SIGPIPE { + continue + } + + go func(sig os.Signal) { + logger.Info(fmt.Sprintf("Processing signal '%v'", sig)) + switch sig { + case os.Interrupt, syscall.SIGTERM: + if atomic.LoadUint32(&interruptCount) < 3 { + // Initiate the cleanup only once + if atomic.AddUint32(&interruptCount, 1) == 1 { + // Call the provided cleanup handler + cleanup() + os.Exit(0) + } else { + return + } + } else { + // 3 SIGTERM/INT signals received; force exit without cleanup + logger.Info("Forcing docker daemon shutdown without cleanup; 3 interrupts received") + } + case syscall.SIGQUIT: + DumpStacks("") + logger.Info("Forcing docker daemon shutdown without cleanup on SIGQUIT") + } + //for the SIGINT/TERM, and SIGQUIT non-clean shutdown case, exit with 128 + signal # + os.Exit(128 + int(sig.(syscall.Signal))) + }(sig) + } + }() +} + +const stacksLogNameTemplate = "goroutine-stacks-%s.log" + +// DumpStacks appends the runtime stack into file in dir and returns full path +// to that file. +func DumpStacks(dir string) (string, error) { + var ( + buf []byte + stackSize int + ) + bufferLen := 16384 + for stackSize == len(buf) { + buf = make([]byte, bufferLen) + stackSize = runtime.Stack(buf, true) + bufferLen *= 2 + } + buf = buf[:stackSize] + var f *os.File + if dir != "" { + path := filepath.Join(dir, fmt.Sprintf(stacksLogNameTemplate, strings.Replace(time.Now().Format(time.RFC3339), ":", "", -1))) + var err error + f, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0666) + if err != nil { + return "", errors.Wrap(err, "failed to open file to write the goroutine stacks") + } + defer f.Close() + defer f.Sync() + } else { + f = os.Stderr + } + if _, err := f.Write(buf); err != nil { + return "", errors.Wrap(err, "failed to write goroutine stacks") + } + return f.Name(), nil +} diff --git a/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go b/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go new file mode 100644 index 0000000000..2b5e713040 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/streamformatter/streamformatter.go @@ -0,0 +1,159 @@ +// Package streamformatter provides helper functions to format a stream. +package streamformatter // import "github.com/docker/docker/pkg/streamformatter" + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/progress" +) + +const streamNewline = "\r\n" + +type jsonProgressFormatter struct{} + +func appendNewline(source []byte) []byte { + return append(source, []byte(streamNewline)...) +} + +// FormatStatus formats the specified objects according to the specified format (and id). +func FormatStatus(id, format string, a ...interface{}) []byte { + str := fmt.Sprintf(format, a...) + b, err := json.Marshal(&jsonmessage.JSONMessage{ID: id, Status: str}) + if err != nil { + return FormatError(err) + } + return appendNewline(b) +} + +// FormatError formats the error as a JSON object +func FormatError(err error) []byte { + jsonError, ok := err.(*jsonmessage.JSONError) + if !ok { + jsonError = &jsonmessage.JSONError{Message: err.Error()} + } + if b, err := json.Marshal(&jsonmessage.JSONMessage{Error: jsonError, ErrorMessage: err.Error()}); err == nil { + return appendNewline(b) + } + return []byte(`{"error":"format error"}` + streamNewline) +} + +func (sf *jsonProgressFormatter) formatStatus(id, format string, a ...interface{}) []byte { + return FormatStatus(id, format, a...) +} + +// formatProgress formats the progress information for a specified action. +func (sf *jsonProgressFormatter) formatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte { + if progress == nil { + progress = &jsonmessage.JSONProgress{} + } + var auxJSON *json.RawMessage + if aux != nil { + auxJSONBytes, err := json.Marshal(aux) + if err != nil { + return nil + } + auxJSON = new(json.RawMessage) + *auxJSON = auxJSONBytes + } + b, err := json.Marshal(&jsonmessage.JSONMessage{ + Status: action, + ProgressMessage: progress.String(), + Progress: progress, + ID: id, + Aux: auxJSON, + }) + if err != nil { + return nil + } + return appendNewline(b) +} + +type rawProgressFormatter struct{} + +func (sf *rawProgressFormatter) formatStatus(id, format string, a ...interface{}) []byte { + return []byte(fmt.Sprintf(format, a...) + streamNewline) +} + +func (sf *rawProgressFormatter) formatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte { + if progress == nil { + progress = &jsonmessage.JSONProgress{} + } + endl := "\r" + if progress.String() == "" { + endl += "\n" + } + return []byte(action + " " + progress.String() + endl) +} + +// NewProgressOutput returns a progress.Output object that can be passed to +// progress.NewProgressReader. +func NewProgressOutput(out io.Writer) progress.Output { + return &progressOutput{sf: &rawProgressFormatter{}, out: out, newLines: true} +} + +// NewJSONProgressOutput returns a progress.Output that that formats output +// using JSON objects +func NewJSONProgressOutput(out io.Writer, newLines bool) progress.Output { + return &progressOutput{sf: &jsonProgressFormatter{}, out: out, newLines: newLines} +} + +type formatProgress interface { + formatStatus(id, format string, a ...interface{}) []byte + formatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte +} + +type progressOutput struct { + sf formatProgress + out io.Writer + newLines bool +} + +// WriteProgress formats progress information from a ProgressReader. +func (out *progressOutput) WriteProgress(prog progress.Progress) error { + var formatted []byte + if prog.Message != "" { + formatted = out.sf.formatStatus(prog.ID, prog.Message) + } else { + jsonProgress := jsonmessage.JSONProgress{Current: prog.Current, Total: prog.Total, HideCounts: prog.HideCounts, Units: prog.Units} + formatted = out.sf.formatProgress(prog.ID, prog.Action, &jsonProgress, prog.Aux) + } + _, err := out.out.Write(formatted) + if err != nil { + return err + } + + if out.newLines && prog.LastUpdate { + _, err = out.out.Write(out.sf.formatStatus("", "")) + return err + } + + return nil +} + +// AuxFormatter is a streamFormatter that writes aux progress messages +type AuxFormatter struct { + io.Writer +} + +// Emit emits the given interface as an aux progress message +func (sf *AuxFormatter) Emit(aux interface{}) error { + auxJSONBytes, err := json.Marshal(aux) + if err != nil { + return err + } + auxJSON := new(json.RawMessage) + *auxJSON = auxJSONBytes + msgJSON, err := json.Marshal(&jsonmessage.JSONMessage{Aux: auxJSON}) + if err != nil { + return err + } + msgJSON = appendNewline(msgJSON) + n, err := sf.Writer.Write(msgJSON) + if n != len(msgJSON) { + return io.ErrShortWrite + } + return err +} diff --git a/vendor/github.com/docker/docker/pkg/streamformatter/streamwriter.go b/vendor/github.com/docker/docker/pkg/streamformatter/streamwriter.go new file mode 100644 index 0000000000..1473ed974a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/streamformatter/streamwriter.go @@ -0,0 +1,47 @@ +package streamformatter // import "github.com/docker/docker/pkg/streamformatter" + +import ( + "encoding/json" + "io" + + "github.com/docker/docker/pkg/jsonmessage" +) + +type streamWriter struct { + io.Writer + lineFormat func([]byte) string +} + +func (sw *streamWriter) Write(buf []byte) (int, error) { + formattedBuf := sw.format(buf) + n, err := sw.Writer.Write(formattedBuf) + if n != len(formattedBuf) { + return n, io.ErrShortWrite + } + return len(buf), err +} + +func (sw *streamWriter) format(buf []byte) []byte { + msg := &jsonmessage.JSONMessage{Stream: sw.lineFormat(buf)} + b, err := json.Marshal(msg) + if err != nil { + return FormatError(err) + } + return appendNewline(b) +} + +// NewStdoutWriter returns a writer which formats the output as json message +// representing stdout lines +func NewStdoutWriter(out io.Writer) io.Writer { + return &streamWriter{Writer: out, lineFormat: func(buf []byte) string { + return string(buf) + }} +} + +// NewStderrWriter returns a writer which formats the output as json message +// representing stderr lines +func NewStderrWriter(out io.Writer) io.Writer { + return &streamWriter{Writer: out, lineFormat: func(buf []byte) string { + return "\033[91m" + string(buf) + "\033[0m" + }} +} diff --git a/vendor/github.com/containers/storage/pkg/stringid/stringid.go b/vendor/github.com/docker/docker/pkg/stringid/stringid.go similarity index 97% rename from vendor/github.com/containers/storage/pkg/stringid/stringid.go rename to vendor/github.com/docker/docker/pkg/stringid/stringid.go index a0c7c42a05..fa7d9166eb 100644 --- a/vendor/github.com/containers/storage/pkg/stringid/stringid.go +++ b/vendor/github.com/docker/docker/pkg/stringid/stringid.go @@ -1,5 +1,5 @@ // Package stringid provides helper functions for dealing with string identifiers -package stringid +package stringid // import "github.com/docker/docker/pkg/stringid" import ( cryptorand "crypto/rand" diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs.go b/vendor/github.com/docker/docker/pkg/symlink/fs.go new file mode 100644 index 0000000000..7b894cde73 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/symlink/fs.go @@ -0,0 +1,144 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE.BSD file. + +// This code is a modified version of path/filepath/symlink.go from the Go standard library. + +package symlink // import "github.com/docker/docker/pkg/symlink" + +import ( + "bytes" + "errors" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/system" +) + +// FollowSymlinkInScope is a wrapper around evalSymlinksInScope that returns an +// absolute path. This function handles paths in a platform-agnostic manner. +func FollowSymlinkInScope(path, root string) (string, error) { + path, err := filepath.Abs(filepath.FromSlash(path)) + if err != nil { + return "", err + } + root, err = filepath.Abs(filepath.FromSlash(root)) + if err != nil { + return "", err + } + return evalSymlinksInScope(path, root) +} + +// evalSymlinksInScope will evaluate symlinks in `path` within a scope `root` and return +// a result guaranteed to be contained within the scope `root`, at the time of the call. +// Symlinks in `root` are not evaluated and left as-is. +// Errors encountered while attempting to evaluate symlinks in path will be returned. +// Non-existing paths are valid and do not constitute an error. +// `path` has to contain `root` as a prefix, or else an error will be returned. +// Trying to break out from `root` does not constitute an error. +// +// Example: +// If /foo/bar -> /outside, +// FollowSymlinkInScope("/foo/bar", "/foo") == "/foo/outside" instead of "/outside" +// +// IMPORTANT: it is the caller's responsibility to call evalSymlinksInScope *after* relevant symlinks +// are created and not to create subsequently, additional symlinks that could potentially make a +// previously-safe path, unsafe. Example: if /foo/bar does not exist, evalSymlinksInScope("/foo/bar", "/foo") +// would return "/foo/bar". If one makes /foo/bar a symlink to /baz subsequently, then "/foo/bar" should +// no longer be considered safely contained in "/foo". +func evalSymlinksInScope(path, root string) (string, error) { + root = filepath.Clean(root) + if path == root { + return path, nil + } + if !strings.HasPrefix(path, root) { + return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) + } + const maxIter = 255 + originalPath := path + // given root of "/a" and path of "/a/b/../../c" we want path to be "/b/../../c" + path = path[len(root):] + if root == string(filepath.Separator) { + path = string(filepath.Separator) + path + } + if !strings.HasPrefix(path, string(filepath.Separator)) { + return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) + } + path = filepath.Clean(path) + // consume path by taking each frontmost path element, + // expanding it if it's a symlink, and appending it to b + var b bytes.Buffer + // b here will always be considered to be the "current absolute path inside + // root" when we append paths to it, we also append a slash and use + // filepath.Clean after the loop to trim the trailing slash + for n := 0; path != ""; n++ { + if n > maxIter { + return "", errors.New("evalSymlinksInScope: too many links in " + originalPath) + } + + // find next path component, p + i := strings.IndexRune(path, filepath.Separator) + var p string + if i == -1 { + p, path = path, "" + } else { + p, path = path[:i], path[i+1:] + } + + if p == "" { + continue + } + + // this takes a b.String() like "b/../" and a p like "c" and turns it + // into "/b/../c" which then gets filepath.Cleaned into "/c" and then + // root gets prepended and we Clean again (to remove any trailing slash + // if the first Clean gave us just "/") + cleanP := filepath.Clean(string(filepath.Separator) + b.String() + p) + if isDriveOrRoot(cleanP) { + // never Lstat "/" itself, or drive letters on Windows + b.Reset() + continue + } + fullP := filepath.Clean(root + cleanP) + + fi, err := os.Lstat(fullP) + if os.IsNotExist(err) { + // if p does not exist, accept it + b.WriteString(p) + b.WriteRune(filepath.Separator) + continue + } + if err != nil { + return "", err + } + if fi.Mode()&os.ModeSymlink == 0 { + b.WriteString(p) + b.WriteRune(filepath.Separator) + continue + } + + // it's a symlink, put it at the front of path + dest, err := os.Readlink(fullP) + if err != nil { + return "", err + } + if system.IsAbs(dest) { + b.Reset() + } + path = dest + string(filepath.Separator) + path + } + + // see note above on "fullP := ..." for why this is double-cleaned and + // what's happening here + return filepath.Clean(root + filepath.Clean(string(filepath.Separator)+b.String())), nil +} + +// EvalSymlinks returns the path name after the evaluation of any symbolic +// links. +// If path is relative the result will be relative to the current directory, +// unless one of the components is an absolute symbolic link. +// This version has been updated to support long paths prepended with `\\?\`. +func EvalSymlinks(path string) (string, error) { + return evalSymlinks(path) +} diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go b/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go new file mode 100644 index 0000000000..c6dafcb0b9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/symlink/fs_unix.go @@ -0,0 +1,15 @@ +// +build !windows + +package symlink // import "github.com/docker/docker/pkg/symlink" + +import ( + "path/filepath" +) + +func evalSymlinks(path string) (string, error) { + return filepath.EvalSymlinks(path) +} + +func isDriveOrRoot(p string) bool { + return p == string(filepath.Separator) +} diff --git a/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go b/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go new file mode 100644 index 0000000000..754761717b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/symlink/fs_windows.go @@ -0,0 +1,169 @@ +package symlink // import "github.com/docker/docker/pkg/symlink" + +import ( + "bytes" + "errors" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/longpath" + "golang.org/x/sys/windows" +) + +func toShort(path string) (string, error) { + p, err := windows.UTF16FromString(path) + if err != nil { + return "", err + } + b := p // GetShortPathName says we can reuse buffer + n, err := windows.GetShortPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + if n > uint32(len(b)) { + b = make([]uint16, n) + if _, err = windows.GetShortPathName(&p[0], &b[0], uint32(len(b))); err != nil { + return "", err + } + } + return windows.UTF16ToString(b), nil +} + +func toLong(path string) (string, error) { + p, err := windows.UTF16FromString(path) + if err != nil { + return "", err + } + b := p // GetLongPathName says we can reuse buffer + n, err := windows.GetLongPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + if n > uint32(len(b)) { + b = make([]uint16, n) + n, err = windows.GetLongPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + } + b = b[:n] + return windows.UTF16ToString(b), nil +} + +func evalSymlinks(path string) (string, error) { + path, err := walkSymlinks(path) + if err != nil { + return "", err + } + + p, err := toShort(path) + if err != nil { + return "", err + } + p, err = toLong(p) + if err != nil { + return "", err + } + // windows.GetLongPathName does not change the case of the drive letter, + // but the result of EvalSymlinks must be unique, so we have + // EvalSymlinks(`c:\a`) == EvalSymlinks(`C:\a`). + // Make drive letter upper case. + if len(p) >= 2 && p[1] == ':' && 'a' <= p[0] && p[0] <= 'z' { + p = string(p[0]+'A'-'a') + p[1:] + } else if len(p) >= 6 && p[5] == ':' && 'a' <= p[4] && p[4] <= 'z' { + p = p[:3] + string(p[4]+'A'-'a') + p[5:] + } + return filepath.Clean(p), nil +} + +const utf8RuneSelf = 0x80 + +func walkSymlinks(path string) (string, error) { + const maxIter = 255 + originalPath := path + // consume path by taking each frontmost path element, + // expanding it if it's a symlink, and appending it to b + var b bytes.Buffer + for n := 0; path != ""; n++ { + if n > maxIter { + return "", errors.New("EvalSymlinks: too many links in " + originalPath) + } + + // A path beginning with `\\?\` represents the root, so automatically + // skip that part and begin processing the next segment. + if strings.HasPrefix(path, longpath.Prefix) { + b.WriteString(longpath.Prefix) + path = path[4:] + continue + } + + // find next path component, p + var i = -1 + for j, c := range path { + if c < utf8RuneSelf && os.IsPathSeparator(uint8(c)) { + i = j + break + } + } + var p string + if i == -1 { + p, path = path, "" + } else { + p, path = path[:i], path[i+1:] + } + + if p == "" { + if b.Len() == 0 { + // must be absolute path + b.WriteRune(filepath.Separator) + } + continue + } + + // If this is the first segment after the long path prefix, accept the + // current segment as a volume root or UNC share and move on to the next. + if b.String() == longpath.Prefix { + b.WriteString(p) + b.WriteRune(filepath.Separator) + continue + } + + fi, err := os.Lstat(b.String() + p) + if err != nil { + return "", err + } + if fi.Mode()&os.ModeSymlink == 0 { + b.WriteString(p) + if path != "" || (b.Len() == 2 && len(p) == 2 && p[1] == ':') { + b.WriteRune(filepath.Separator) + } + continue + } + + // it's a symlink, put it at the front of path + dest, err := os.Readlink(b.String() + p) + if err != nil { + return "", err + } + if filepath.IsAbs(dest) || os.IsPathSeparator(dest[0]) { + b.Reset() + } + path = dest + string(filepath.Separator) + path + } + return filepath.Clean(b.String()), nil +} + +func isDriveOrRoot(p string) bool { + if p == string(filepath.Separator) { + return true + } + + length := len(p) + if length >= 2 { + if p[length-1] == ':' && (('a' <= p[length-2] && p[length-2] <= 'z') || ('A' <= p[length-2] && p[length-2] <= 'Z')) { + return true + } + } + return false +} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/numcpu.go b/vendor/github.com/docker/docker/pkg/sysinfo/numcpu.go new file mode 100644 index 0000000000..eea2d25bf9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/numcpu.go @@ -0,0 +1,12 @@ +// +build !linux,!windows + +package sysinfo // import "github.com/docker/docker/pkg/sysinfo" + +import ( + "runtime" +) + +// NumCPU returns the number of CPUs +func NumCPU() int { + return runtime.NumCPU() +} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_linux.go b/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_linux.go new file mode 100644 index 0000000000..5f6c6df8c4 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_linux.go @@ -0,0 +1,42 @@ +package sysinfo // import "github.com/docker/docker/pkg/sysinfo" + +import ( + "runtime" + "unsafe" + + "golang.org/x/sys/unix" +) + +// numCPU queries the system for the count of threads available +// for use to this process. +// +// Issues two syscalls. +// Returns 0 on errors. Use |runtime.NumCPU| in that case. +func numCPU() int { + // Gets the affinity mask for a process: The very one invoking this function. + pid, _, _ := unix.RawSyscall(unix.SYS_GETPID, 0, 0, 0) + + var mask [1024 / 64]uintptr + _, _, err := unix.RawSyscall(unix.SYS_SCHED_GETAFFINITY, pid, uintptr(len(mask)*8), uintptr(unsafe.Pointer(&mask[0]))) + if err != 0 { + return 0 + } + + // For every available thread a bit is set in the mask. + ncpu := 0 + for _, e := range mask { + if e == 0 { + continue + } + ncpu += int(popcnt(uint64(e))) + } + return ncpu +} + +// NumCPU returns the number of CPUs which are currently online +func NumCPU() int { + if ncpu := numCPU(); ncpu > 0 { + return ncpu + } + return runtime.NumCPU() +} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_windows.go b/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_windows.go new file mode 100644 index 0000000000..13523f671f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/numcpu_windows.go @@ -0,0 +1,35 @@ +package sysinfo // import "github.com/docker/docker/pkg/sysinfo" + +import ( + "runtime" + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + kernel32 = windows.NewLazySystemDLL("kernel32.dll") + getCurrentProcess = kernel32.NewProc("GetCurrentProcess") + getProcessAffinityMask = kernel32.NewProc("GetProcessAffinityMask") +) + +func numCPU() int { + // Gets the affinity mask for a process + var mask, sysmask uintptr + currentProcess, _, _ := getCurrentProcess.Call() + ret, _, _ := getProcessAffinityMask.Call(currentProcess, uintptr(unsafe.Pointer(&mask)), uintptr(unsafe.Pointer(&sysmask))) + if ret == 0 { + return 0 + } + // For every available thread a bit is set in the mask. + ncpu := int(popcnt(uint64(mask))) + return ncpu +} + +// NumCPU returns the number of CPUs which are currently online +func NumCPU() int { + if ncpu := numCPU(); ncpu > 0 { + return ncpu + } + return runtime.NumCPU() +} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go new file mode 100644 index 0000000000..8fc0ecc25e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo.go @@ -0,0 +1,144 @@ +package sysinfo // import "github.com/docker/docker/pkg/sysinfo" + +import "github.com/docker/docker/pkg/parsers" + +// SysInfo stores information about which features a kernel supports. +// TODO Windows: Factor out platform specific capabilities. +type SysInfo struct { + // Whether the kernel supports AppArmor or not + AppArmor bool + // Whether the kernel supports Seccomp or not + Seccomp bool + + cgroupMemInfo + cgroupCPUInfo + cgroupBlkioInfo + cgroupCpusetInfo + cgroupPids + + // Whether IPv4 forwarding is supported or not, if this was disabled, networking will not work + IPv4ForwardingDisabled bool + + // Whether bridge-nf-call-iptables is supported or not + BridgeNFCallIPTablesDisabled bool + + // Whether bridge-nf-call-ip6tables is supported or not + BridgeNFCallIP6TablesDisabled bool + + // Whether the cgroup has the mountpoint of "devices" or not + CgroupDevicesEnabled bool +} + +type cgroupMemInfo struct { + // Whether memory limit is supported or not + MemoryLimit bool + + // Whether swap limit is supported or not + SwapLimit bool + + // Whether soft limit is supported or not + MemoryReservation bool + + // Whether OOM killer disable is supported or not + OomKillDisable bool + + // Whether memory swappiness is supported or not + MemorySwappiness bool + + // Whether kernel memory limit is supported or not + KernelMemory bool +} + +type cgroupCPUInfo struct { + // Whether CPU shares is supported or not + CPUShares bool + + // Whether CPU CFS(Completely Fair Scheduler) period is supported or not + CPUCfsPeriod bool + + // Whether CPU CFS(Completely Fair Scheduler) quota is supported or not + CPUCfsQuota bool + + // Whether CPU real-time period is supported or not + CPURealtimePeriod bool + + // Whether CPU real-time runtime is supported or not + CPURealtimeRuntime bool +} + +type cgroupBlkioInfo struct { + // Whether Block IO weight is supported or not + BlkioWeight bool + + // Whether Block IO weight_device is supported or not + BlkioWeightDevice bool + + // Whether Block IO read limit in bytes per second is supported or not + BlkioReadBpsDevice bool + + // Whether Block IO write limit in bytes per second is supported or not + BlkioWriteBpsDevice bool + + // Whether Block IO read limit in IO per second is supported or not + BlkioReadIOpsDevice bool + + // Whether Block IO write limit in IO per second is supported or not + BlkioWriteIOpsDevice bool +} + +type cgroupCpusetInfo struct { + // Whether Cpuset is supported or not + Cpuset bool + + // Available Cpuset's cpus + Cpus string + + // Available Cpuset's memory nodes + Mems string +} + +type cgroupPids struct { + // Whether Pids Limit is supported or not + PidsLimit bool +} + +// IsCpusetCpusAvailable returns `true` if the provided string set is contained +// in cgroup's cpuset.cpus set, `false` otherwise. +// If error is not nil a parsing error occurred. +func (c cgroupCpusetInfo) IsCpusetCpusAvailable(provided string) (bool, error) { + return isCpusetListAvailable(provided, c.Cpus) +} + +// IsCpusetMemsAvailable returns `true` if the provided string set is contained +// in cgroup's cpuset.mems set, `false` otherwise. +// If error is not nil a parsing error occurred. +func (c cgroupCpusetInfo) IsCpusetMemsAvailable(provided string) (bool, error) { + return isCpusetListAvailable(provided, c.Mems) +} + +func isCpusetListAvailable(provided, available string) (bool, error) { + parsedProvided, err := parsers.ParseUintList(provided) + if err != nil { + return false, err + } + parsedAvailable, err := parsers.ParseUintList(available) + if err != nil { + return false, err + } + for k := range parsedProvided { + if !parsedAvailable[k] { + return false, nil + } + } + return true, nil +} + +// Returns bit count of 1, used by NumCPU +func popcnt(x uint64) (n byte) { + x -= (x >> 1) & 0x5555555555555555 + x = (x>>2)&0x3333333333333333 + x&0x3333333333333333 + x += x >> 4 + x &= 0x0f0f0f0f0f0f0f0f + x *= 0x0101010101010101 + return byte(x >> 56) +} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go new file mode 100644 index 0000000000..dde5be19bc --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_linux.go @@ -0,0 +1,254 @@ +package sysinfo // import "github.com/docker/docker/pkg/sysinfo" + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strings" + + "github.com/opencontainers/runc/libcontainer/cgroups" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +func findCgroupMountpoints() (map[string]string, error) { + cgMounts, err := cgroups.GetCgroupMounts(false) + if err != nil { + return nil, fmt.Errorf("Failed to parse cgroup information: %v", err) + } + mps := make(map[string]string) + for _, m := range cgMounts { + for _, ss := range m.Subsystems { + mps[ss] = m.Mountpoint + } + } + return mps, nil +} + +// New returns a new SysInfo, using the filesystem to detect which features +// the kernel supports. If `quiet` is `false` warnings are printed in logs +// whenever an error occurs or misconfigurations are present. +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + cgMounts, err := findCgroupMountpoints() + if err != nil { + logrus.Warnf("Failed to parse cgroup information: %v", err) + } else { + sysInfo.cgroupMemInfo = checkCgroupMem(cgMounts, quiet) + sysInfo.cgroupCPUInfo = checkCgroupCPU(cgMounts, quiet) + sysInfo.cgroupBlkioInfo = checkCgroupBlkioInfo(cgMounts, quiet) + sysInfo.cgroupCpusetInfo = checkCgroupCpusetInfo(cgMounts, quiet) + sysInfo.cgroupPids = checkCgroupPids(quiet) + } + + _, ok := cgMounts["devices"] + sysInfo.CgroupDevicesEnabled = ok + + sysInfo.IPv4ForwardingDisabled = !readProcBool("/proc/sys/net/ipv4/ip_forward") + sysInfo.BridgeNFCallIPTablesDisabled = !readProcBool("/proc/sys/net/bridge/bridge-nf-call-iptables") + sysInfo.BridgeNFCallIP6TablesDisabled = !readProcBool("/proc/sys/net/bridge/bridge-nf-call-ip6tables") + + // Check if AppArmor is supported. + if _, err := os.Stat("/sys/kernel/security/apparmor"); !os.IsNotExist(err) { + sysInfo.AppArmor = true + } + + // Check if Seccomp is supported, via CONFIG_SECCOMP. + if err := unix.Prctl(unix.PR_GET_SECCOMP, 0, 0, 0, 0); err != unix.EINVAL { + // Make sure the kernel has CONFIG_SECCOMP_FILTER. + if err := unix.Prctl(unix.PR_SET_SECCOMP, unix.SECCOMP_MODE_FILTER, 0, 0, 0); err != unix.EINVAL { + sysInfo.Seccomp = true + } + } + + return sysInfo +} + +// checkCgroupMem reads the memory information from the memory cgroup mount point. +func checkCgroupMem(cgMounts map[string]string, quiet bool) cgroupMemInfo { + mountPoint, ok := cgMounts["memory"] + if !ok { + if !quiet { + logrus.Warn("Your kernel does not support cgroup memory limit") + } + return cgroupMemInfo{} + } + + swapLimit := cgroupEnabled(mountPoint, "memory.memsw.limit_in_bytes") + if !quiet && !swapLimit { + logrus.Warn("Your kernel does not support swap memory limit") + } + memoryReservation := cgroupEnabled(mountPoint, "memory.soft_limit_in_bytes") + if !quiet && !memoryReservation { + logrus.Warn("Your kernel does not support memory reservation") + } + oomKillDisable := cgroupEnabled(mountPoint, "memory.oom_control") + if !quiet && !oomKillDisable { + logrus.Warn("Your kernel does not support oom control") + } + memorySwappiness := cgroupEnabled(mountPoint, "memory.swappiness") + if !quiet && !memorySwappiness { + logrus.Warn("Your kernel does not support memory swappiness") + } + kernelMemory := cgroupEnabled(mountPoint, "memory.kmem.limit_in_bytes") + if !quiet && !kernelMemory { + logrus.Warn("Your kernel does not support kernel memory limit") + } + + return cgroupMemInfo{ + MemoryLimit: true, + SwapLimit: swapLimit, + MemoryReservation: memoryReservation, + OomKillDisable: oomKillDisable, + MemorySwappiness: memorySwappiness, + KernelMemory: kernelMemory, + } +} + +// checkCgroupCPU reads the cpu information from the cpu cgroup mount point. +func checkCgroupCPU(cgMounts map[string]string, quiet bool) cgroupCPUInfo { + mountPoint, ok := cgMounts["cpu"] + if !ok { + if !quiet { + logrus.Warn("Unable to find cpu cgroup in mounts") + } + return cgroupCPUInfo{} + } + + cpuShares := cgroupEnabled(mountPoint, "cpu.shares") + if !quiet && !cpuShares { + logrus.Warn("Your kernel does not support cgroup cpu shares") + } + + cpuCfsPeriod := cgroupEnabled(mountPoint, "cpu.cfs_period_us") + if !quiet && !cpuCfsPeriod { + logrus.Warn("Your kernel does not support cgroup cfs period") + } + + cpuCfsQuota := cgroupEnabled(mountPoint, "cpu.cfs_quota_us") + if !quiet && !cpuCfsQuota { + logrus.Warn("Your kernel does not support cgroup cfs quotas") + } + + cpuRealtimePeriod := cgroupEnabled(mountPoint, "cpu.rt_period_us") + if !quiet && !cpuRealtimePeriod { + logrus.Warn("Your kernel does not support cgroup rt period") + } + + cpuRealtimeRuntime := cgroupEnabled(mountPoint, "cpu.rt_runtime_us") + if !quiet && !cpuRealtimeRuntime { + logrus.Warn("Your kernel does not support cgroup rt runtime") + } + + return cgroupCPUInfo{ + CPUShares: cpuShares, + CPUCfsPeriod: cpuCfsPeriod, + CPUCfsQuota: cpuCfsQuota, + CPURealtimePeriod: cpuRealtimePeriod, + CPURealtimeRuntime: cpuRealtimeRuntime, + } +} + +// checkCgroupBlkioInfo reads the blkio information from the blkio cgroup mount point. +func checkCgroupBlkioInfo(cgMounts map[string]string, quiet bool) cgroupBlkioInfo { + mountPoint, ok := cgMounts["blkio"] + if !ok { + if !quiet { + logrus.Warn("Unable to find blkio cgroup in mounts") + } + return cgroupBlkioInfo{} + } + + weight := cgroupEnabled(mountPoint, "blkio.weight") + if !quiet && !weight { + logrus.Warn("Your kernel does not support cgroup blkio weight") + } + + weightDevice := cgroupEnabled(mountPoint, "blkio.weight_device") + if !quiet && !weightDevice { + logrus.Warn("Your kernel does not support cgroup blkio weight_device") + } + + readBpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.read_bps_device") + if !quiet && !readBpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.read_bps_device") + } + + writeBpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.write_bps_device") + if !quiet && !writeBpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.write_bps_device") + } + readIOpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.read_iops_device") + if !quiet && !readIOpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.read_iops_device") + } + + writeIOpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.write_iops_device") + if !quiet && !writeIOpsDevice { + logrus.Warn("Your kernel does not support cgroup blkio throttle.write_iops_device") + } + return cgroupBlkioInfo{ + BlkioWeight: weight, + BlkioWeightDevice: weightDevice, + BlkioReadBpsDevice: readBpsDevice, + BlkioWriteBpsDevice: writeBpsDevice, + BlkioReadIOpsDevice: readIOpsDevice, + BlkioWriteIOpsDevice: writeIOpsDevice, + } +} + +// checkCgroupCpusetInfo reads the cpuset information from the cpuset cgroup mount point. +func checkCgroupCpusetInfo(cgMounts map[string]string, quiet bool) cgroupCpusetInfo { + mountPoint, ok := cgMounts["cpuset"] + if !ok { + if !quiet { + logrus.Warn("Unable to find cpuset cgroup in mounts") + } + return cgroupCpusetInfo{} + } + + cpus, err := ioutil.ReadFile(path.Join(mountPoint, "cpuset.cpus")) + if err != nil { + return cgroupCpusetInfo{} + } + + mems, err := ioutil.ReadFile(path.Join(mountPoint, "cpuset.mems")) + if err != nil { + return cgroupCpusetInfo{} + } + + return cgroupCpusetInfo{ + Cpuset: true, + Cpus: strings.TrimSpace(string(cpus)), + Mems: strings.TrimSpace(string(mems)), + } +} + +// checkCgroupPids reads the pids information from the pids cgroup mount point. +func checkCgroupPids(quiet bool) cgroupPids { + _, err := cgroups.FindCgroupMountpoint("pids") + if err != nil { + if !quiet { + logrus.Warn(err) + } + return cgroupPids{} + } + + return cgroupPids{ + PidsLimit: true, + } +} + +func cgroupEnabled(mountPoint, name string) bool { + _, err := os.Stat(path.Join(mountPoint, name)) + return err == nil +} + +func readProcBool(path string) bool { + val, err := ioutil.ReadFile(path) + if err != nil { + return false + } + return strings.TrimSpace(string(val)) == "1" +} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_unix.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_unix.go new file mode 100644 index 0000000000..23cc695fb8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_unix.go @@ -0,0 +1,9 @@ +// +build !linux,!windows + +package sysinfo // import "github.com/docker/docker/pkg/sysinfo" + +// New returns an empty SysInfo for non linux for now. +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + return sysInfo +} diff --git a/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_windows.go b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_windows.go new file mode 100644 index 0000000000..5f68524e7e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/sysinfo/sysinfo_windows.go @@ -0,0 +1,7 @@ +package sysinfo // import "github.com/docker/docker/pkg/sysinfo" + +// New returns an empty SysInfo for windows for now. +func New(quiet bool) *SysInfo { + sysInfo := &SysInfo{} + return sysInfo +} diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go index 85e89a7eea..ee7e0256f3 100644 --- a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go @@ -1,6 +1,7 @@ package system // import "github.com/docker/docker/pkg/system" import ( + "fmt" "unsafe" "github.com/sirupsen/logrus" @@ -53,6 +54,10 @@ func GetOSVersion() OSVersion { return osv } +func (osv OSVersion) ToString() string { + return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build) +} + // IsWindowsClient returns true if the SKU is client // @engine maintainers - this function should not be removed or modified as it // is used to enforce licensing restrictions on Windows. diff --git a/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go b/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go new file mode 100644 index 0000000000..e835893746 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tailfile/tailfile.go @@ -0,0 +1,66 @@ +// Package tailfile provides helper functions to read the nth lines of any +// ReadSeeker. +package tailfile // import "github.com/docker/docker/pkg/tailfile" + +import ( + "bytes" + "errors" + "io" + "os" +) + +const blockSize = 1024 + +var eol = []byte("\n") + +// ErrNonPositiveLinesNumber is an error returned if the lines number was negative. +var ErrNonPositiveLinesNumber = errors.New("The number of lines to extract from the file must be positive") + +//TailFile returns last n lines of reader f (could be a nil). +func TailFile(f io.ReadSeeker, n int) ([][]byte, error) { + if n <= 0 { + return nil, ErrNonPositiveLinesNumber + } + size, err := f.Seek(0, os.SEEK_END) + if err != nil { + return nil, err + } + block := -1 + var data []byte + var cnt int + for { + var b []byte + step := int64(block * blockSize) + left := size + step // how many bytes to beginning + if left < 0 { + if _, err := f.Seek(0, os.SEEK_SET); err != nil { + return nil, err + } + b = make([]byte, blockSize+left) + if _, err := f.Read(b); err != nil { + return nil, err + } + data = append(b, data...) + break + } else { + b = make([]byte, blockSize) + if _, err := f.Seek(left, os.SEEK_SET); err != nil { + return nil, err + } + if _, err := f.Read(b); err != nil { + return nil, err + } + data = append(b, data...) + } + cnt += bytes.Count(b, eol) + if cnt > n { + break + } + block-- + } + lines := bytes.Split(data, eol) + if n < len(lines) { + return lines[len(lines)-n-1 : len(lines)-1], nil + } + return lines[:len(lines)-1], nil +} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/builder_context.go b/vendor/github.com/docker/docker/pkg/tarsum/builder_context.go new file mode 100644 index 0000000000..bc7d84df4e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tarsum/builder_context.go @@ -0,0 +1,21 @@ +package tarsum // import "github.com/docker/docker/pkg/tarsum" + +// BuilderContext is an interface extending TarSum by adding the Remove method. +// In general there was concern about adding this method to TarSum itself +// so instead it is being added just to "BuilderContext" which will then +// only be used during the .dockerignore file processing +// - see builder/evaluator.go +type BuilderContext interface { + TarSum + Remove(string) +} + +func (bc *tarSum) Remove(filename string) { + for i, fis := range bc.sums { + if fis.Name() == filename { + bc.sums = append(bc.sums[:i], bc.sums[i+1:]...) + // Note, we don't just return because there could be + // more than one with this name + } + } +} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go b/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go new file mode 100644 index 0000000000..01d4ed59b2 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tarsum/fileinfosums.go @@ -0,0 +1,133 @@ +package tarsum // import "github.com/docker/docker/pkg/tarsum" + +import ( + "runtime" + "sort" + "strings" +) + +// FileInfoSumInterface provides an interface for accessing file checksum +// information within a tar file. This info is accessed through interface +// so the actual name and sum cannot be melded with. +type FileInfoSumInterface interface { + // File name + Name() string + // Checksum of this particular file and its headers + Sum() string + // Position of file in the tar + Pos() int64 +} + +type fileInfoSum struct { + name string + sum string + pos int64 +} + +func (fis fileInfoSum) Name() string { + return fis.name +} +func (fis fileInfoSum) Sum() string { + return fis.sum +} +func (fis fileInfoSum) Pos() int64 { + return fis.pos +} + +// FileInfoSums provides a list of FileInfoSumInterfaces. +type FileInfoSums []FileInfoSumInterface + +// GetFile returns the first FileInfoSumInterface with a matching name. +func (fis FileInfoSums) GetFile(name string) FileInfoSumInterface { + // We do case insensitive matching on Windows as c:\APP and c:\app are + // the same. See issue #33107. + for i := range fis { + if (runtime.GOOS == "windows" && strings.EqualFold(fis[i].Name(), name)) || + (runtime.GOOS != "windows" && fis[i].Name() == name) { + return fis[i] + } + } + return nil +} + +// GetAllFile returns a FileInfoSums with all matching names. +func (fis FileInfoSums) GetAllFile(name string) FileInfoSums { + f := FileInfoSums{} + for i := range fis { + if fis[i].Name() == name { + f = append(f, fis[i]) + } + } + return f +} + +// GetDuplicatePaths returns a FileInfoSums with all duplicated paths. +func (fis FileInfoSums) GetDuplicatePaths() (dups FileInfoSums) { + seen := make(map[string]int, len(fis)) // allocate earl. no need to grow this map. + for i := range fis { + f := fis[i] + if _, ok := seen[f.Name()]; ok { + dups = append(dups, f) + } else { + seen[f.Name()] = 0 + } + } + return dups +} + +// Len returns the size of the FileInfoSums. +func (fis FileInfoSums) Len() int { return len(fis) } + +// Swap swaps two FileInfoSum values if a FileInfoSums list. +func (fis FileInfoSums) Swap(i, j int) { fis[i], fis[j] = fis[j], fis[i] } + +// SortByPos sorts FileInfoSums content by position. +func (fis FileInfoSums) SortByPos() { + sort.Sort(byPos{fis}) +} + +// SortByNames sorts FileInfoSums content by name. +func (fis FileInfoSums) SortByNames() { + sort.Sort(byName{fis}) +} + +// SortBySums sorts FileInfoSums content by sums. +func (fis FileInfoSums) SortBySums() { + dups := fis.GetDuplicatePaths() + if len(dups) > 0 { + sort.Sort(bySum{fis, dups}) + } else { + sort.Sort(bySum{fis, nil}) + } +} + +// byName is a sort.Sort helper for sorting by file names. +// If names are the same, order them by their appearance in the tar archive +type byName struct{ FileInfoSums } + +func (bn byName) Less(i, j int) bool { + if bn.FileInfoSums[i].Name() == bn.FileInfoSums[j].Name() { + return bn.FileInfoSums[i].Pos() < bn.FileInfoSums[j].Pos() + } + return bn.FileInfoSums[i].Name() < bn.FileInfoSums[j].Name() +} + +// bySum is a sort.Sort helper for sorting by the sums of all the fileinfos in the tar archive +type bySum struct { + FileInfoSums + dups FileInfoSums +} + +func (bs bySum) Less(i, j int) bool { + if bs.dups != nil && bs.FileInfoSums[i].Name() == bs.FileInfoSums[j].Name() { + return bs.FileInfoSums[i].Pos() < bs.FileInfoSums[j].Pos() + } + return bs.FileInfoSums[i].Sum() < bs.FileInfoSums[j].Sum() +} + +// byPos is a sort.Sort helper for sorting by the sums of all the fileinfos by their original order +type byPos struct{ FileInfoSums } + +func (bp byPos) Less(i, j int) bool { + return bp.FileInfoSums[i].Pos() < bp.FileInfoSums[j].Pos() +} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go b/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go new file mode 100644 index 0000000000..5542e1b2c0 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tarsum/tarsum.go @@ -0,0 +1,301 @@ +// Package tarsum provides algorithms to perform checksum calculation on +// filesystem layers. +// +// The transportation of filesystems, regarding Docker, is done with tar(1) +// archives. There are a variety of tar serialization formats [2], and a key +// concern here is ensuring a repeatable checksum given a set of inputs from a +// generic tar archive. Types of transportation include distribution to and from a +// registry endpoint, saving and loading through commands or Docker daemon APIs, +// transferring the build context from client to Docker daemon, and committing the +// filesystem of a container to become an image. +// +// As tar archives are used for transit, but not preserved in many situations, the +// focus of the algorithm is to ensure the integrity of the preserved filesystem, +// while maintaining a deterministic accountability. This includes neither +// constraining the ordering or manipulation of the files during the creation or +// unpacking of the archive, nor include additional metadata state about the file +// system attributes. +package tarsum // import "github.com/docker/docker/pkg/tarsum" + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto" + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + "hash" + "io" + "path" + "strings" +) + +const ( + buf8K = 8 * 1024 + buf16K = 16 * 1024 + buf32K = 32 * 1024 +) + +// NewTarSum creates a new interface for calculating a fixed time checksum of a +// tar archive. +// +// This is used for calculating checksums of layers of an image, in some cases +// including the byte payload of the image's json metadata as well, and for +// calculating the checksums for buildcache. +func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) { + return NewTarSumHash(r, dc, v, DefaultTHash) +} + +// NewTarSumHash creates a new TarSum, providing a THash to use rather than +// the DefaultTHash. +func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) { + headerSelector, err := getTarHeaderSelector(v) + if err != nil { + return nil, err + } + ts := &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash} + err = ts.initTarSum() + return ts, err +} + +// NewTarSumForLabel creates a new TarSum using the provided TarSum version+hash label. +func NewTarSumForLabel(r io.Reader, disableCompression bool, label string) (TarSum, error) { + parts := strings.SplitN(label, "+", 2) + if len(parts) != 2 { + return nil, errors.New("tarsum label string should be of the form: {tarsum_version}+{hash_name}") + } + + versionName, hashName := parts[0], parts[1] + + version, ok := tarSumVersionsByName[versionName] + if !ok { + return nil, fmt.Errorf("unknown TarSum version name: %q", versionName) + } + + hashConfig, ok := standardHashConfigs[hashName] + if !ok { + return nil, fmt.Errorf("unknown TarSum hash name: %q", hashName) + } + + tHash := NewTHash(hashConfig.name, hashConfig.hash.New) + + return NewTarSumHash(r, disableCompression, version, tHash) +} + +// TarSum is the generic interface for calculating fixed time +// checksums of a tar archive. +type TarSum interface { + io.Reader + GetSums() FileInfoSums + Sum([]byte) string + Version() Version + Hash() THash +} + +// tarSum struct is the structure for a Version0 checksum calculation. +type tarSum struct { + io.Reader + tarR *tar.Reader + tarW *tar.Writer + writer writeCloseFlusher + bufTar *bytes.Buffer + bufWriter *bytes.Buffer + bufData []byte + h hash.Hash + tHash THash + sums FileInfoSums + fileCounter int64 + currentFile string + finished bool + first bool + DisableCompression bool // false by default. When false, the output gzip compressed. + tarSumVersion Version // this field is not exported so it can not be mutated during use + headerSelector tarHeaderSelector // handles selecting and ordering headers for files in the archive +} + +func (ts tarSum) Hash() THash { + return ts.tHash +} + +func (ts tarSum) Version() Version { + return ts.tarSumVersion +} + +// THash provides a hash.Hash type generator and its name. +type THash interface { + Hash() hash.Hash + Name() string +} + +// NewTHash is a convenience method for creating a THash. +func NewTHash(name string, h func() hash.Hash) THash { + return simpleTHash{n: name, h: h} +} + +type tHashConfig struct { + name string + hash crypto.Hash +} + +var ( + // NOTE: DO NOT include MD5 or SHA1, which are considered insecure. + standardHashConfigs = map[string]tHashConfig{ + "sha256": {name: "sha256", hash: crypto.SHA256}, + "sha512": {name: "sha512", hash: crypto.SHA512}, + } +) + +// DefaultTHash is default TarSum hashing algorithm - "sha256". +var DefaultTHash = NewTHash("sha256", sha256.New) + +type simpleTHash struct { + n string + h func() hash.Hash +} + +func (sth simpleTHash) Name() string { return sth.n } +func (sth simpleTHash) Hash() hash.Hash { return sth.h() } + +func (ts *tarSum) encodeHeader(h *tar.Header) error { + for _, elem := range ts.headerSelector.selectHeaders(h) { + // Ignore these headers to be compatible with versions + // before go 1.10 + if elem[0] == "gname" || elem[0] == "uname" { + elem[1] = "" + } + if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil { + return err + } + } + return nil +} + +func (ts *tarSum) initTarSum() error { + ts.bufTar = bytes.NewBuffer([]byte{}) + ts.bufWriter = bytes.NewBuffer([]byte{}) + ts.tarR = tar.NewReader(ts.Reader) + ts.tarW = tar.NewWriter(ts.bufTar) + if !ts.DisableCompression { + ts.writer = gzip.NewWriter(ts.bufWriter) + } else { + ts.writer = &nopCloseFlusher{Writer: ts.bufWriter} + } + if ts.tHash == nil { + ts.tHash = DefaultTHash + } + ts.h = ts.tHash.Hash() + ts.h.Reset() + ts.first = true + ts.sums = FileInfoSums{} + return nil +} + +func (ts *tarSum) Read(buf []byte) (int, error) { + if ts.finished { + return ts.bufWriter.Read(buf) + } + if len(ts.bufData) < len(buf) { + switch { + case len(buf) <= buf8K: + ts.bufData = make([]byte, buf8K) + case len(buf) <= buf16K: + ts.bufData = make([]byte, buf16K) + case len(buf) <= buf32K: + ts.bufData = make([]byte, buf32K) + default: + ts.bufData = make([]byte, len(buf)) + } + } + buf2 := ts.bufData[:len(buf)] + + n, err := ts.tarR.Read(buf2) + if err != nil { + if err == io.EOF { + if _, err := ts.h.Write(buf2[:n]); err != nil { + return 0, err + } + if !ts.first { + ts.sums = append(ts.sums, fileInfoSum{name: ts.currentFile, sum: hex.EncodeToString(ts.h.Sum(nil)), pos: ts.fileCounter}) + ts.fileCounter++ + ts.h.Reset() + } else { + ts.first = false + } + + if _, err := ts.tarW.Write(buf2[:n]); err != nil { + return 0, err + } + + currentHeader, err := ts.tarR.Next() + if err != nil { + if err == io.EOF { + if err := ts.tarW.Close(); err != nil { + return 0, err + } + if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + if err := ts.writer.Close(); err != nil { + return 0, err + } + ts.finished = true + return ts.bufWriter.Read(buf) + } + return 0, err + } + + ts.currentFile = path.Join(".", path.Join("/", currentHeader.Name)) + if err := ts.encodeHeader(currentHeader); err != nil { + return 0, err + } + if err := ts.tarW.WriteHeader(currentHeader); err != nil { + return 0, err + } + + if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + ts.writer.Flush() + + return ts.bufWriter.Read(buf) + } + return 0, err + } + + // Filling the hash buffer + if _, err = ts.h.Write(buf2[:n]); err != nil { + return 0, err + } + + // Filling the tar writer + if _, err = ts.tarW.Write(buf2[:n]); err != nil { + return 0, err + } + + // Filling the output writer + if _, err = io.Copy(ts.writer, ts.bufTar); err != nil { + return 0, err + } + ts.writer.Flush() + + return ts.bufWriter.Read(buf) +} + +func (ts *tarSum) Sum(extra []byte) string { + ts.sums.SortBySums() + h := ts.tHash.Hash() + if extra != nil { + h.Write(extra) + } + for _, fis := range ts.sums { + h.Write([]byte(fis.Sum())) + } + checksum := ts.Version().String() + "+" + ts.tHash.Name() + ":" + hex.EncodeToString(h.Sum(nil)) + return checksum +} + +func (ts *tarSum) GetSums() FileInfoSums { + return ts.sums +} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/versioning.go b/vendor/github.com/docker/docker/pkg/tarsum/versioning.go new file mode 100644 index 0000000000..aa1f171862 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tarsum/versioning.go @@ -0,0 +1,158 @@ +package tarsum // import "github.com/docker/docker/pkg/tarsum" + +import ( + "archive/tar" + "errors" + "io" + "sort" + "strconv" + "strings" +) + +// Version is used for versioning of the TarSum algorithm +// based on the prefix of the hash used +// i.e. "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b" +type Version int + +// Prefix of "tarsum" +const ( + Version0 Version = iota + Version1 + // VersionDev this constant will be either the latest or an unsettled next-version of the TarSum calculation + VersionDev +) + +// WriteV1Header writes a tar header to a writer in V1 tarsum format. +func WriteV1Header(h *tar.Header, w io.Writer) { + for _, elem := range v1TarHeaderSelect(h) { + w.Write([]byte(elem[0] + elem[1])) + } +} + +// VersionLabelForChecksum returns the label for the given tarsum +// checksum, i.e., everything before the first `+` character in +// the string or an empty string if no label separator is found. +func VersionLabelForChecksum(checksum string) string { + // Checksums are in the form: {versionLabel}+{hashID}:{hex} + sepIndex := strings.Index(checksum, "+") + if sepIndex < 0 { + return "" + } + return checksum[:sepIndex] +} + +// GetVersions gets a list of all known tarsum versions. +func GetVersions() []Version { + v := []Version{} + for k := range tarSumVersions { + v = append(v, k) + } + return v +} + +var ( + tarSumVersions = map[Version]string{ + Version0: "tarsum", + Version1: "tarsum.v1", + VersionDev: "tarsum.dev", + } + tarSumVersionsByName = map[string]Version{ + "tarsum": Version0, + "tarsum.v1": Version1, + "tarsum.dev": VersionDev, + } +) + +func (tsv Version) String() string { + return tarSumVersions[tsv] +} + +// GetVersionFromTarsum returns the Version from the provided string. +func GetVersionFromTarsum(tarsum string) (Version, error) { + tsv := tarsum + if strings.Contains(tarsum, "+") { + tsv = strings.SplitN(tarsum, "+", 2)[0] + } + for v, s := range tarSumVersions { + if s == tsv { + return v, nil + } + } + return -1, ErrNotVersion +} + +// Errors that may be returned by functions in this package +var ( + ErrNotVersion = errors.New("string does not include a TarSum Version") + ErrVersionNotImplemented = errors.New("TarSum Version is not yet implemented") +) + +// tarHeaderSelector is the interface which different versions +// of tarsum should use for selecting and ordering tar headers +// for each item in the archive. +type tarHeaderSelector interface { + selectHeaders(h *tar.Header) (orderedHeaders [][2]string) +} + +type tarHeaderSelectFunc func(h *tar.Header) (orderedHeaders [][2]string) + +func (f tarHeaderSelectFunc) selectHeaders(h *tar.Header) (orderedHeaders [][2]string) { + return f(h) +} + +func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { + return [][2]string{ + {"name", h.Name}, + {"mode", strconv.FormatInt(h.Mode, 10)}, + {"uid", strconv.Itoa(h.Uid)}, + {"gid", strconv.Itoa(h.Gid)}, + {"size", strconv.FormatInt(h.Size, 10)}, + {"mtime", strconv.FormatInt(h.ModTime.UTC().Unix(), 10)}, + {"typeflag", string([]byte{h.Typeflag})}, + {"linkname", h.Linkname}, + {"uname", h.Uname}, + {"gname", h.Gname}, + {"devmajor", strconv.FormatInt(h.Devmajor, 10)}, + {"devminor", strconv.FormatInt(h.Devminor, 10)}, + } +} + +func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { + // Get extended attributes. + xAttrKeys := make([]string, len(h.Xattrs)) + for k := range h.Xattrs { + xAttrKeys = append(xAttrKeys, k) + } + sort.Strings(xAttrKeys) + + // Make the slice with enough capacity to hold the 11 basic headers + // we want from the v0 selector plus however many xattrs we have. + orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys)) + + // Copy all headers from v0 excluding the 'mtime' header (the 5th element). + v0headers := v0TarHeaderSelect(h) + orderedHeaders = append(orderedHeaders, v0headers[0:5]...) + orderedHeaders = append(orderedHeaders, v0headers[6:]...) + + // Finally, append the sorted xattrs. + for _, k := range xAttrKeys { + orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]}) + } + + return +} + +var registeredHeaderSelectors = map[Version]tarHeaderSelectFunc{ + Version0: v0TarHeaderSelect, + Version1: v1TarHeaderSelect, + VersionDev: v1TarHeaderSelect, +} + +func getTarHeaderSelector(v Version) (tarHeaderSelector, error) { + headerSelector, ok := registeredHeaderSelectors[v] + if !ok { + return nil, ErrVersionNotImplemented + } + + return headerSelector, nil +} diff --git a/vendor/github.com/docker/docker/pkg/tarsum/writercloser.go b/vendor/github.com/docker/docker/pkg/tarsum/writercloser.go new file mode 100644 index 0000000000..c4c45a35e7 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tarsum/writercloser.go @@ -0,0 +1,22 @@ +package tarsum // import "github.com/docker/docker/pkg/tarsum" + +import ( + "io" +) + +type writeCloseFlusher interface { + io.WriteCloser + Flush() error +} + +type nopCloseFlusher struct { + io.Writer +} + +func (n *nopCloseFlusher) Close() error { + return nil +} + +func (n *nopCloseFlusher) Flush() error { + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/term/ascii.go b/vendor/github.com/docker/docker/pkg/term/ascii.go new file mode 100644 index 0000000000..87bca8d4ac --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/ascii.go @@ -0,0 +1,66 @@ +package term // import "github.com/docker/docker/pkg/term" + +import ( + "fmt" + "strings" +) + +// ASCII list the possible supported ASCII key sequence +var ASCII = []string{ + "ctrl-@", + "ctrl-a", + "ctrl-b", + "ctrl-c", + "ctrl-d", + "ctrl-e", + "ctrl-f", + "ctrl-g", + "ctrl-h", + "ctrl-i", + "ctrl-j", + "ctrl-k", + "ctrl-l", + "ctrl-m", + "ctrl-n", + "ctrl-o", + "ctrl-p", + "ctrl-q", + "ctrl-r", + "ctrl-s", + "ctrl-t", + "ctrl-u", + "ctrl-v", + "ctrl-w", + "ctrl-x", + "ctrl-y", + "ctrl-z", + "ctrl-[", + "ctrl-\\", + "ctrl-]", + "ctrl-^", + "ctrl-_", +} + +// ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code. +func ToBytes(keys string) ([]byte, error) { + codes := []byte{} +next: + for _, key := range strings.Split(keys, ",") { + if len(key) != 1 { + for code, ctrl := range ASCII { + if ctrl == key { + codes = append(codes, byte(code)) + continue next + } + } + if key == "DEL" { + codes = append(codes, 127) + } else { + return nil, fmt.Errorf("Unknown character: '%s'", key) + } + } else { + codes = append(codes, key[0]) + } + } + return codes, nil +} diff --git a/vendor/github.com/docker/docker/pkg/term/proxy.go b/vendor/github.com/docker/docker/pkg/term/proxy.go new file mode 100644 index 0000000000..da733e5848 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/proxy.go @@ -0,0 +1,78 @@ +package term // import "github.com/docker/docker/pkg/term" + +import ( + "io" +) + +// EscapeError is special error which returned by a TTY proxy reader's Read() +// method in case its detach escape sequence is read. +type EscapeError struct{} + +func (EscapeError) Error() string { + return "read escape sequence" +} + +// escapeProxy is used only for attaches with a TTY. It is used to proxy +// stdin keypresses from the underlying reader and look for the passed in +// escape key sequence to signal a detach. +type escapeProxy struct { + escapeKeys []byte + escapeKeyPos int + r io.Reader +} + +// NewEscapeProxy returns a new TTY proxy reader which wraps the given reader +// and detects when the specified escape keys are read, in which case the Read +// method will return an error of type EscapeError. +func NewEscapeProxy(r io.Reader, escapeKeys []byte) io.Reader { + return &escapeProxy{ + escapeKeys: escapeKeys, + r: r, + } +} + +func (r *escapeProxy) Read(buf []byte) (int, error) { + nr, err := r.r.Read(buf) + + if len(r.escapeKeys) == 0 { + return nr, err + } + + preserve := func() { + // this preserves the original key presses in the passed in buffer + nr += r.escapeKeyPos + preserve := make([]byte, 0, r.escapeKeyPos+len(buf)) + preserve = append(preserve, r.escapeKeys[:r.escapeKeyPos]...) + preserve = append(preserve, buf...) + r.escapeKeyPos = 0 + copy(buf[0:nr], preserve) + } + + if nr != 1 || err != nil { + if r.escapeKeyPos > 0 { + preserve() + } + return nr, err + } + + if buf[0] != r.escapeKeys[r.escapeKeyPos] { + if r.escapeKeyPos > 0 { + preserve() + } + return nr, nil + } + + if r.escapeKeyPos == len(r.escapeKeys)-1 { + return 0, EscapeError{} + } + + // Looks like we've got an escape key, but we need to match again on the next + // read. + // Store the current escape key we found so we can look for the next one on + // the next read. + // Since this is an escape key, make sure we don't let the caller read it + // If later on we find that this is not the escape sequence, we'll add the + // keys back + r.escapeKeyPos++ + return nr - r.escapeKeyPos, nil +} diff --git a/vendor/github.com/docker/docker/pkg/term/tc.go b/vendor/github.com/docker/docker/pkg/term/tc.go new file mode 100644 index 0000000000..01bcaa8abb --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/tc.go @@ -0,0 +1,20 @@ +// +build !windows + +package term // import "github.com/docker/docker/pkg/term" + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +func tcget(fd uintptr, p *Termios) syscall.Errno { + _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p))) + return err +} + +func tcset(fd uintptr, p *Termios) syscall.Errno { + _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p))) + return err +} diff --git a/vendor/github.com/docker/docker/pkg/term/term.go b/vendor/github.com/docker/docker/pkg/term/term.go new file mode 100644 index 0000000000..0589a95519 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/term.go @@ -0,0 +1,124 @@ +// +build !windows + +// Package term provides structures and helper functions to work with +// terminal (state, sizes). +package term // import "github.com/docker/docker/pkg/term" + +import ( + "errors" + "fmt" + "io" + "os" + "os/signal" + + "golang.org/x/sys/unix" +) + +var ( + // ErrInvalidState is returned if the state of the terminal is invalid. + ErrInvalidState = errors.New("Invalid terminal state") +) + +// State represents the state of the terminal. +type State struct { + termios Termios +} + +// Winsize represents the size of the terminal window. +type Winsize struct { + Height uint16 + Width uint16 + x uint16 + y uint16 +} + +// StdStreams returns the standard streams (stdin, stdout, stderr). +func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + return os.Stdin, os.Stdout, os.Stderr +} + +// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. +func GetFdInfo(in interface{}) (uintptr, bool) { + var inFd uintptr + var isTerminalIn bool + if file, ok := in.(*os.File); ok { + inFd = file.Fd() + isTerminalIn = IsTerminal(inFd) + } + return inFd, isTerminalIn +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + var termios Termios + return tcget(fd, &termios) == 0 +} + +// RestoreTerminal restores the terminal connected to the given file descriptor +// to a previous state. +func RestoreTerminal(fd uintptr, state *State) error { + if state == nil { + return ErrInvalidState + } + if err := tcset(fd, &state.termios); err != 0 { + return err + } + return nil +} + +// SaveState saves the state of the terminal connected to the given file descriptor. +func SaveState(fd uintptr) (*State, error) { + var oldState State + if err := tcget(fd, &oldState.termios); err != 0 { + return nil, err + } + + return &oldState, nil +} + +// DisableEcho applies the specified state to the terminal connected to the file +// descriptor, with echo disabled. +func DisableEcho(fd uintptr, state *State) error { + newState := state.termios + newState.Lflag &^= unix.ECHO + + if err := tcset(fd, &newState); err != 0 { + return err + } + handleInterrupt(fd, state) + return nil +} + +// SetRawTerminal puts the terminal connected to the given file descriptor into +// raw mode and returns the previous state. On UNIX, this puts both the input +// and output into raw mode. On Windows, it only puts the input into raw mode. +func SetRawTerminal(fd uintptr) (*State, error) { + oldState, err := MakeRaw(fd) + if err != nil { + return nil, err + } + handleInterrupt(fd, oldState) + return oldState, err +} + +// SetRawTerminalOutput puts the output of terminal connected to the given file +// descriptor into raw mode. On UNIX, this does nothing and returns nil for the +// state. On Windows, it disables LF -> CRLF translation. +func SetRawTerminalOutput(fd uintptr) (*State, error) { + return nil, nil +} + +func handleInterrupt(fd uintptr, state *State) { + sigchan := make(chan os.Signal, 1) + signal.Notify(sigchan, os.Interrupt) + go func() { + for range sigchan { + // quit cleanly and the new terminal item is on a new line + fmt.Println() + signal.Stop(sigchan) + close(sigchan) + RestoreTerminal(fd, state) + os.Exit(1) + } + }() +} diff --git a/vendor/github.com/docker/docker/pkg/term/term_windows.go b/vendor/github.com/docker/docker/pkg/term/term_windows.go new file mode 100644 index 0000000000..64ead3c53b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/term_windows.go @@ -0,0 +1,228 @@ +package term // import "github.com/docker/docker/pkg/term" + +import ( + "io" + "os" + "os/signal" + "syscall" // used for STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and STD_ERROR_HANDLE + + "github.com/Azure/go-ansiterm/winterm" + "github.com/docker/docker/pkg/term/windows" +) + +// State holds the console mode for the terminal. +type State struct { + mode uint32 +} + +// Winsize is used for window size. +type Winsize struct { + Height uint16 + Width uint16 +} + +// vtInputSupported is true if winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported by the console +var vtInputSupported bool + +// StdStreams returns the standard streams (stdin, stdout, stderr). +func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + // Turn on VT handling on all std handles, if possible. This might + // fail, in which case we will fall back to terminal emulation. + var emulateStdin, emulateStdout, emulateStderr bool + fd := os.Stdin.Fd() + if mode, err := winterm.GetConsoleMode(fd); err == nil { + // Validate that winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it. + if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_INPUT); err != nil { + emulateStdin = true + } else { + vtInputSupported = true + } + // Unconditionally set the console mode back even on failure because SetConsoleMode + // remembers invalid bits on input handles. + winterm.SetConsoleMode(fd, mode) + } + + fd = os.Stdout.Fd() + if mode, err := winterm.GetConsoleMode(fd); err == nil { + // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it. + if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING|winterm.DISABLE_NEWLINE_AUTO_RETURN); err != nil { + emulateStdout = true + } else { + winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING) + } + } + + fd = os.Stderr.Fd() + if mode, err := winterm.GetConsoleMode(fd); err == nil { + // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it. + if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING|winterm.DISABLE_NEWLINE_AUTO_RETURN); err != nil { + emulateStderr = true + } else { + winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING) + } + } + + if os.Getenv("ConEmuANSI") == "ON" || os.Getenv("ConsoleZVersion") != "" { + // The ConEmu and ConsoleZ terminals emulate ANSI on output streams well. + emulateStdin = true + emulateStdout = false + emulateStderr = false + } + + // Temporarily use STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and + // STD_ERROR_HANDLE from syscall rather than x/sys/windows as long as + // go-ansiterm hasn't switch to x/sys/windows. + // TODO: switch back to x/sys/windows once go-ansiterm has switched + if emulateStdin { + stdIn = windowsconsole.NewAnsiReader(syscall.STD_INPUT_HANDLE) + } else { + stdIn = os.Stdin + } + + if emulateStdout { + stdOut = windowsconsole.NewAnsiWriter(syscall.STD_OUTPUT_HANDLE) + } else { + stdOut = os.Stdout + } + + if emulateStderr { + stdErr = windowsconsole.NewAnsiWriter(syscall.STD_ERROR_HANDLE) + } else { + stdErr = os.Stderr + } + + return +} + +// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. +func GetFdInfo(in interface{}) (uintptr, bool) { + return windowsconsole.GetHandleInfo(in) +} + +// GetWinsize returns the window size based on the specified file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + info, err := winterm.GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil, err + } + + winsize := &Winsize{ + Width: uint16(info.Window.Right - info.Window.Left + 1), + Height: uint16(info.Window.Bottom - info.Window.Top + 1), + } + + return winsize, nil +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + return windowsconsole.IsConsole(fd) +} + +// RestoreTerminal restores the terminal connected to the given file descriptor +// to a previous state. +func RestoreTerminal(fd uintptr, state *State) error { + return winterm.SetConsoleMode(fd, state.mode) +} + +// SaveState saves the state of the terminal connected to the given file descriptor. +func SaveState(fd uintptr) (*State, error) { + mode, e := winterm.GetConsoleMode(fd) + if e != nil { + return nil, e + } + + return &State{mode: mode}, nil +} + +// DisableEcho disables echo for the terminal connected to the given file descriptor. +// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx +func DisableEcho(fd uintptr, state *State) error { + mode := state.mode + mode &^= winterm.ENABLE_ECHO_INPUT + mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT + err := winterm.SetConsoleMode(fd, mode) + if err != nil { + return err + } + + // Register an interrupt handler to catch and restore prior state + restoreAtInterrupt(fd, state) + return nil +} + +// SetRawTerminal puts the terminal connected to the given file descriptor into +// raw mode and returns the previous state. On UNIX, this puts both the input +// and output into raw mode. On Windows, it only puts the input into raw mode. +func SetRawTerminal(fd uintptr) (*State, error) { + state, err := MakeRaw(fd) + if err != nil { + return nil, err + } + + // Register an interrupt handler to catch and restore prior state + restoreAtInterrupt(fd, state) + return state, err +} + +// SetRawTerminalOutput puts the output of terminal connected to the given file +// descriptor into raw mode. On UNIX, this does nothing and returns nil for the +// state. On Windows, it disables LF -> CRLF translation. +func SetRawTerminalOutput(fd uintptr) (*State, error) { + state, err := SaveState(fd) + if err != nil { + return nil, err + } + + // Ignore failures, since winterm.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this + // version of Windows. + winterm.SetConsoleMode(fd, state.mode|winterm.DISABLE_NEWLINE_AUTO_RETURN) + return state, err +} + +// MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be restored. +func MakeRaw(fd uintptr) (*State, error) { + state, err := SaveState(fd) + if err != nil { + return nil, err + } + + mode := state.mode + + // See + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx + + // Disable these modes + mode &^= winterm.ENABLE_ECHO_INPUT + mode &^= winterm.ENABLE_LINE_INPUT + mode &^= winterm.ENABLE_MOUSE_INPUT + mode &^= winterm.ENABLE_WINDOW_INPUT + mode &^= winterm.ENABLE_PROCESSED_INPUT + + // Enable these modes + mode |= winterm.ENABLE_EXTENDED_FLAGS + mode |= winterm.ENABLE_INSERT_MODE + mode |= winterm.ENABLE_QUICK_EDIT_MODE + if vtInputSupported { + mode |= winterm.ENABLE_VIRTUAL_TERMINAL_INPUT + } + + err = winterm.SetConsoleMode(fd, mode) + if err != nil { + return nil, err + } + return state, nil +} + +func restoreAtInterrupt(fd uintptr, state *State) { + sigchan := make(chan os.Signal, 1) + signal.Notify(sigchan, os.Interrupt) + + go func() { + _ = <-sigchan + RestoreTerminal(fd, state) + os.Exit(0) + }() +} diff --git a/vendor/github.com/docker/docker/pkg/term/termios_bsd.go b/vendor/github.com/docker/docker/pkg/term/termios_bsd.go new file mode 100644 index 0000000000..48b16f5203 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/termios_bsd.go @@ -0,0 +1,42 @@ +// +build darwin freebsd openbsd netbsd + +package term // import "github.com/docker/docker/pkg/term" + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +const ( + getTermios = unix.TIOCGETA + setTermios = unix.TIOCSETA +) + +// Termios is the Unix API for terminal I/O. +type Termios unix.Termios + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + var oldState State + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { + return nil, err + } + + newState := oldState.termios + newState.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) + newState.Oflag &^= unix.OPOST + newState.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) + newState.Cflag &^= (unix.CSIZE | unix.PARENB) + newState.Cflag |= unix.CS8 + newState.Cc[unix.VMIN] = 1 + newState.Cc[unix.VTIME] = 0 + + if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { + return nil, err + } + + return &oldState, nil +} diff --git a/vendor/github.com/docker/docker/pkg/term/termios_linux.go b/vendor/github.com/docker/docker/pkg/term/termios_linux.go new file mode 100644 index 0000000000..6d4c63fdb7 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/termios_linux.go @@ -0,0 +1,39 @@ +package term // import "github.com/docker/docker/pkg/term" + +import ( + "golang.org/x/sys/unix" +) + +const ( + getTermios = unix.TCGETS + setTermios = unix.TCSETS +) + +// Termios is the Unix API for terminal I/O. +type Termios unix.Termios + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + termios, err := unix.IoctlGetTermios(int(fd), getTermios) + if err != nil { + return nil, err + } + + var oldState State + oldState.termios = Termios(*termios) + + termios.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) + termios.Oflag &^= unix.OPOST + termios.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) + termios.Cflag &^= (unix.CSIZE | unix.PARENB) + termios.Cflag |= unix.CS8 + termios.Cc[unix.VMIN] = 1 + termios.Cc[unix.VTIME] = 0 + + if err := unix.IoctlSetTermios(int(fd), setTermios, termios); err != nil { + return nil, err + } + return &oldState, nil +} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go new file mode 100644 index 0000000000..1d7c452cc8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/windows/ansi_reader.go @@ -0,0 +1,263 @@ +// +build windows + +package windowsconsole // import "github.com/docker/docker/pkg/term/windows" + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "strings" + "unsafe" + + ansiterm "github.com/Azure/go-ansiterm" + "github.com/Azure/go-ansiterm/winterm" +) + +const ( + escapeSequence = ansiterm.KEY_ESC_CSI +) + +// ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation. +type ansiReader struct { + file *os.File + fd uintptr + buffer []byte + cbBuffer int + command []byte +} + +// NewAnsiReader returns an io.ReadCloser that provides VT100 terminal emulation on top of a +// Windows console input handle. +func NewAnsiReader(nFile int) io.ReadCloser { + initLogger() + file, fd := winterm.GetStdFile(nFile) + return &ansiReader{ + file: file, + fd: fd, + command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), + buffer: make([]byte, 0), + } +} + +// Close closes the wrapped file. +func (ar *ansiReader) Close() (err error) { + return ar.file.Close() +} + +// Fd returns the file descriptor of the wrapped file. +func (ar *ansiReader) Fd() uintptr { + return ar.fd +} + +// Read reads up to len(p) bytes of translated input events into p. +func (ar *ansiReader) Read(p []byte) (int, error) { + if len(p) == 0 { + return 0, nil + } + + // Previously read bytes exist, read as much as we can and return + if len(ar.buffer) > 0 { + logger.Debugf("Reading previously cached bytes") + + originalLength := len(ar.buffer) + copiedLength := copy(p, ar.buffer) + + if copiedLength == originalLength { + ar.buffer = make([]byte, 0, len(p)) + } else { + ar.buffer = ar.buffer[copiedLength:] + } + + logger.Debugf("Read from cache p[%d]: % x", copiedLength, p) + return copiedLength, nil + } + + // Read and translate key events + events, err := readInputEvents(ar.fd, len(p)) + if err != nil { + return 0, err + } else if len(events) == 0 { + logger.Debug("No input events detected") + return 0, nil + } + + keyBytes := translateKeyEvents(events, []byte(escapeSequence)) + + // Save excess bytes and right-size keyBytes + if len(keyBytes) > len(p) { + logger.Debugf("Received %d keyBytes, only room for %d bytes", len(keyBytes), len(p)) + ar.buffer = keyBytes[len(p):] + keyBytes = keyBytes[:len(p)] + } else if len(keyBytes) == 0 { + logger.Debug("No key bytes returned from the translator") + return 0, nil + } + + copiedLength := copy(p, keyBytes) + if copiedLength != len(keyBytes) { + return 0, errors.New("unexpected copy length encountered") + } + + logger.Debugf("Read p[%d]: % x", copiedLength, p) + logger.Debugf("Read keyBytes[%d]: % x", copiedLength, keyBytes) + return copiedLength, nil +} + +// readInputEvents polls until at least one event is available. +func readInputEvents(fd uintptr, maxBytes int) ([]winterm.INPUT_RECORD, error) { + // Determine the maximum number of records to retrieve + // -- Cast around the type system to obtain the size of a single INPUT_RECORD. + // unsafe.Sizeof requires an expression vs. a type-reference; the casting + // tricks the type system into believing it has such an expression. + recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes))))) + countRecords := maxBytes / recordSize + if countRecords > ansiterm.MAX_INPUT_EVENTS { + countRecords = ansiterm.MAX_INPUT_EVENTS + } else if countRecords == 0 { + countRecords = 1 + } + logger.Debugf("[windows] readInputEvents: Reading %v records (buffer size %v, record size %v)", countRecords, maxBytes, recordSize) + + // Wait for and read input events + events := make([]winterm.INPUT_RECORD, countRecords) + nEvents := uint32(0) + eventsExist, err := winterm.WaitForSingleObject(fd, winterm.WAIT_INFINITE) + if err != nil { + return nil, err + } + + if eventsExist { + err = winterm.ReadConsoleInput(fd, events, &nEvents) + if err != nil { + return nil, err + } + } + + // Return a slice restricted to the number of returned records + logger.Debugf("[windows] readInputEvents: Read %v events", nEvents) + return events[:nEvents], nil +} + +// KeyEvent Translation Helpers + +var arrowKeyMapPrefix = map[uint16]string{ + winterm.VK_UP: "%s%sA", + winterm.VK_DOWN: "%s%sB", + winterm.VK_RIGHT: "%s%sC", + winterm.VK_LEFT: "%s%sD", +} + +var keyMapPrefix = map[uint16]string{ + winterm.VK_UP: "\x1B[%sA", + winterm.VK_DOWN: "\x1B[%sB", + winterm.VK_RIGHT: "\x1B[%sC", + winterm.VK_LEFT: "\x1B[%sD", + winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1 + winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4 + winterm.VK_INSERT: "\x1B[2%s~", + winterm.VK_DELETE: "\x1B[3%s~", + winterm.VK_PRIOR: "\x1B[5%s~", + winterm.VK_NEXT: "\x1B[6%s~", + winterm.VK_F1: "", + winterm.VK_F2: "", + winterm.VK_F3: "\x1B[13%s~", + winterm.VK_F4: "\x1B[14%s~", + winterm.VK_F5: "\x1B[15%s~", + winterm.VK_F6: "\x1B[17%s~", + winterm.VK_F7: "\x1B[18%s~", + winterm.VK_F8: "\x1B[19%s~", + winterm.VK_F9: "\x1B[20%s~", + winterm.VK_F10: "\x1B[21%s~", + winterm.VK_F11: "\x1B[23%s~", + winterm.VK_F12: "\x1B[24%s~", +} + +// translateKeyEvents converts the input events into the appropriate ANSI string. +func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte { + var buffer bytes.Buffer + for _, event := range events { + if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 { + buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence)) + } + } + + return buffer.Bytes() +} + +// keyToString maps the given input event record to the corresponding string. +func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string { + if keyEvent.UnicodeChar == 0 { + return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence) + } + + _, alt, control := getControlKeys(keyEvent.ControlKeyState) + if control { + // TODO(azlinux): Implement following control sequences + // -D Signals the end of input from the keyboard; also exits current shell. + // -H Deletes the first character to the left of the cursor. Also called the ERASE key. + // -Q Restarts printing after it has been stopped with -s. + // -S Suspends printing on the screen (does not stop the program). + // -U Deletes all characters on the current line. Also called the KILL key. + // -E Quits current command and creates a core + + } + + // +Key generates ESC N Key + if !control && alt { + return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar)) + } + + return string(keyEvent.UnicodeChar) +} + +// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string. +func formatVirtualKey(key uint16, controlState uint32, escapeSequence []byte) string { + shift, alt, control := getControlKeys(controlState) + modifier := getControlKeysModifier(shift, alt, control) + + if format, ok := arrowKeyMapPrefix[key]; ok { + return fmt.Sprintf(format, escapeSequence, modifier) + } + + if format, ok := keyMapPrefix[key]; ok { + return fmt.Sprintf(format, modifier) + } + + return "" +} + +// getControlKeys extracts the shift, alt, and ctrl key states. +func getControlKeys(controlState uint32) (shift, alt, control bool) { + shift = 0 != (controlState & winterm.SHIFT_PRESSED) + alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED)) + control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED)) + return shift, alt, control +} + +// getControlKeysModifier returns the ANSI modifier for the given combination of control keys. +func getControlKeysModifier(shift, alt, control bool) string { + if shift && alt && control { + return ansiterm.KEY_CONTROL_PARAM_8 + } + if alt && control { + return ansiterm.KEY_CONTROL_PARAM_7 + } + if shift && control { + return ansiterm.KEY_CONTROL_PARAM_6 + } + if control { + return ansiterm.KEY_CONTROL_PARAM_5 + } + if shift && alt { + return ansiterm.KEY_CONTROL_PARAM_4 + } + if alt { + return ansiterm.KEY_CONTROL_PARAM_3 + } + if shift { + return ansiterm.KEY_CONTROL_PARAM_2 + } + return "" +} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go b/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go new file mode 100644 index 0000000000..7799a03fc5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/windows/ansi_writer.go @@ -0,0 +1,64 @@ +// +build windows + +package windowsconsole // import "github.com/docker/docker/pkg/term/windows" + +import ( + "io" + "os" + + ansiterm "github.com/Azure/go-ansiterm" + "github.com/Azure/go-ansiterm/winterm" +) + +// ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation. +type ansiWriter struct { + file *os.File + fd uintptr + infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO + command []byte + escapeSequence []byte + inAnsiSequence bool + parser *ansiterm.AnsiParser +} + +// NewAnsiWriter returns an io.Writer that provides VT100 terminal emulation on top of a +// Windows console output handle. +func NewAnsiWriter(nFile int) io.Writer { + initLogger() + file, fd := winterm.GetStdFile(nFile) + info, err := winterm.GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil + } + + parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file)) + logger.Infof("newAnsiWriter: parser %p", parser) + + aw := &ansiWriter{ + file: file, + fd: fd, + infoReset: info, + command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), + escapeSequence: []byte(ansiterm.KEY_ESC_CSI), + parser: parser, + } + + logger.Infof("newAnsiWriter: aw.parser %p", aw.parser) + logger.Infof("newAnsiWriter: %v", aw) + return aw +} + +func (aw *ansiWriter) Fd() uintptr { + return aw.fd +} + +// Write writes len(p) bytes from p to the underlying data stream. +func (aw *ansiWriter) Write(p []byte) (total int, err error) { + if len(p) == 0 { + return 0, nil + } + + logger.Infof("Write: % x", p) + logger.Infof("Write: %s", string(p)) + return aw.parser.Parse(p) +} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/console.go b/vendor/github.com/docker/docker/pkg/term/windows/console.go new file mode 100644 index 0000000000..5274019758 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/windows/console.go @@ -0,0 +1,35 @@ +// +build windows + +package windowsconsole // import "github.com/docker/docker/pkg/term/windows" + +import ( + "os" + + "github.com/Azure/go-ansiterm/winterm" +) + +// GetHandleInfo returns file descriptor and bool indicating whether the file is a console. +func GetHandleInfo(in interface{}) (uintptr, bool) { + switch t := in.(type) { + case *ansiReader: + return t.Fd(), true + case *ansiWriter: + return t.Fd(), true + } + + var inFd uintptr + var isTerminal bool + + if file, ok := in.(*os.File); ok { + inFd = file.Fd() + isTerminal = IsConsole(inFd) + } + return inFd, isTerminal +} + +// IsConsole returns true if the given file descriptor is a Windows Console. +// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. +func IsConsole(fd uintptr) bool { + _, e := winterm.GetConsoleMode(fd) + return e == nil +} diff --git a/vendor/github.com/docker/docker/pkg/term/windows/windows.go b/vendor/github.com/docker/docker/pkg/term/windows/windows.go new file mode 100644 index 0000000000..1f8965969c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/windows/windows.go @@ -0,0 +1,33 @@ +// These files implement ANSI-aware input and output streams for use by the Docker Windows client. +// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create +// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. + +package windowsconsole // import "github.com/docker/docker/pkg/term/windows" + +import ( + "io/ioutil" + "os" + "sync" + + ansiterm "github.com/Azure/go-ansiterm" + "github.com/sirupsen/logrus" +) + +var logger *logrus.Logger +var initOnce sync.Once + +func initLogger() { + initOnce.Do(func() { + logFile := ioutil.Discard + + if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { + logFile, _ = os.Create("ansiReaderWriter.log") + } + + logger = &logrus.Logger{ + Out: logFile, + Formatter: new(logrus.TextFormatter), + Level: logrus.DebugLevel, + } + }) +} diff --git a/vendor/github.com/docker/docker/pkg/term/winsize.go b/vendor/github.com/docker/docker/pkg/term/winsize.go new file mode 100644 index 0000000000..a19663ad83 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/term/winsize.go @@ -0,0 +1,20 @@ +// +build !windows + +package term // import "github.com/docker/docker/pkg/term" + +import ( + "golang.org/x/sys/unix" +) + +// GetWinsize returns the window size based on the specified file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ) + ws := &Winsize{Height: uws.Row, Width: uws.Col, x: uws.Xpixel, y: uws.Ypixel} + return ws, err +} + +// SetWinsize tries to set the specified window size for the specified file descriptor. +func SetWinsize(fd uintptr, ws *Winsize) error { + uws := &unix.Winsize{Row: ws.Height, Col: ws.Width, Xpixel: ws.x, Ypixel: ws.y} + return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, uws) +} diff --git a/vendor/github.com/docker/docker/pkg/urlutil/urlutil.go b/vendor/github.com/docker/docker/pkg/urlutil/urlutil.go new file mode 100644 index 0000000000..eaf2535da3 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/urlutil/urlutil.go @@ -0,0 +1,44 @@ +// Package urlutil provides helper function to check urls kind. +// It supports http urls, git urls and transport url (tcp://, …) +package urlutil // import "github.com/docker/docker/pkg/urlutil" + +import ( + "regexp" + "strings" +) + +var ( + validPrefixes = map[string][]string{ + "url": {"http://", "https://"}, + "git": {"git://", "github.com/", "git@"}, + "transport": {"tcp://", "tcp+tls://", "udp://", "unix://", "unixgram://"}, + } + urlPathWithFragmentSuffix = regexp.MustCompile(".git(?:#.+)?$") +) + +// IsURL returns true if the provided str is an HTTP(S) URL. +func IsURL(str string) bool { + return checkURL(str, "url") +} + +// IsGitURL returns true if the provided str is a git repository URL. +func IsGitURL(str string) bool { + if IsURL(str) && urlPathWithFragmentSuffix.MatchString(str) { + return true + } + return checkURL(str, "git") +} + +// IsTransportURL returns true if the provided str is a transport (tcp, tcp+tls, udp, unix) URL. +func IsTransportURL(str string) bool { + return checkURL(str, "transport") +} + +func checkURL(str, kind string) bool { + for _, prefix := range validPrefixes[kind] { + if strings.HasPrefix(str, prefix) { + return true + } + } + return false +} diff --git a/vendor/github.com/docker/docker/pkg/useragent/useragent.go b/vendor/github.com/docker/docker/pkg/useragent/useragent.go new file mode 100644 index 0000000000..22db82129b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/useragent/useragent.go @@ -0,0 +1,55 @@ +// Package useragent provides helper functions to pack +// version information into a single User-Agent header. +package useragent // import "github.com/docker/docker/pkg/useragent" + +import ( + "strings" +) + +// VersionInfo is used to model UserAgent versions. +type VersionInfo struct { + Name string + Version string +} + +func (vi *VersionInfo) isValid() bool { + const stopChars = " \t\r\n/" + name := vi.Name + vers := vi.Version + if len(name) == 0 || strings.ContainsAny(name, stopChars) { + return false + } + if len(vers) == 0 || strings.ContainsAny(vers, stopChars) { + return false + } + return true +} + +// AppendVersions converts versions to a string and appends the string to the string base. +// +// Each VersionInfo will be converted to a string in the format of +// "product/version", where the "product" is get from the name field, while +// version is get from the version field. Several pieces of version information +// will be concatenated and separated by space. +// +// Example: +// AppendVersions("base", VersionInfo{"foo", "1.0"}, VersionInfo{"bar", "2.0"}) +// results in "base foo/1.0 bar/2.0". +func AppendVersions(base string, versions ...VersionInfo) string { + if len(versions) == 0 { + return base + } + + verstrs := make([]string, 0, 1+len(versions)) + if len(base) > 0 { + verstrs = append(verstrs, base) + } + + for _, v := range versions { + if !v.isValid() { + continue + } + verstrs = append(verstrs, v.Name+"/"+v.Version) + } + return strings.Join(verstrs, " ") +} diff --git a/vendor/github.com/docker/docker/plugin/v2/plugin.go b/vendor/github.com/docker/docker/plugin/v2/plugin.go new file mode 100644 index 0000000000..c00a9d0169 --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/v2/plugin.go @@ -0,0 +1,266 @@ +package v2 // import "github.com/docker/docker/plugin/v2" + +import ( + "fmt" + "path/filepath" + "strings" + "sync" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" + "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// Plugin represents an individual plugin. +type Plugin struct { + mu sync.RWMutex + PluginObj types.Plugin `json:"plugin"` // todo: embed struct + pClient *plugins.Client + refCount int + Rootfs string // TODO: make private + + Config digest.Digest + Blobsums []digest.Digest + + modifyRuntimeSpec func(*specs.Spec) + + SwarmServiceID string +} + +const defaultPluginRuntimeDestination = "/run/docker/plugins" + +// ErrInadequateCapability indicates that the plugin did not have the requested capability. +type ErrInadequateCapability struct { + cap string +} + +func (e ErrInadequateCapability) Error() string { + return fmt.Sprintf("plugin does not provide %q capability", e.cap) +} + +// ScopedPath returns the path scoped to the plugin rootfs +func (p *Plugin) ScopedPath(s string) string { + if p.PluginObj.Config.PropagatedMount != "" && strings.HasPrefix(s, p.PluginObj.Config.PropagatedMount) { + // re-scope to the propagated mount path on the host + return filepath.Join(filepath.Dir(p.Rootfs), "propagated-mount", strings.TrimPrefix(s, p.PluginObj.Config.PropagatedMount)) + } + return filepath.Join(p.Rootfs, s) +} + +// Client returns the plugin client. +func (p *Plugin) Client() *plugins.Client { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.pClient +} + +// SetPClient set the plugin client. +func (p *Plugin) SetPClient(client *plugins.Client) { + p.mu.Lock() + defer p.mu.Unlock() + + p.pClient = client +} + +// IsV1 returns true for V1 plugins and false otherwise. +func (p *Plugin) IsV1() bool { + return false +} + +// Name returns the plugin name. +func (p *Plugin) Name() string { + return p.PluginObj.Name +} + +// FilterByCap query the plugin for a given capability. +func (p *Plugin) FilterByCap(capability string) (*Plugin, error) { + capability = strings.ToLower(capability) + for _, typ := range p.PluginObj.Config.Interface.Types { + if typ.Capability == capability && typ.Prefix == "docker" { + return p, nil + } + } + return nil, ErrInadequateCapability{capability} +} + +// InitEmptySettings initializes empty settings for a plugin. +func (p *Plugin) InitEmptySettings() { + p.PluginObj.Settings.Mounts = make([]types.PluginMount, len(p.PluginObj.Config.Mounts)) + copy(p.PluginObj.Settings.Mounts, p.PluginObj.Config.Mounts) + p.PluginObj.Settings.Devices = make([]types.PluginDevice, len(p.PluginObj.Config.Linux.Devices)) + copy(p.PluginObj.Settings.Devices, p.PluginObj.Config.Linux.Devices) + p.PluginObj.Settings.Env = make([]string, 0, len(p.PluginObj.Config.Env)) + for _, env := range p.PluginObj.Config.Env { + if env.Value != nil { + p.PluginObj.Settings.Env = append(p.PluginObj.Settings.Env, fmt.Sprintf("%s=%s", env.Name, *env.Value)) + } + } + p.PluginObj.Settings.Args = make([]string, len(p.PluginObj.Config.Args.Value)) + copy(p.PluginObj.Settings.Args, p.PluginObj.Config.Args.Value) +} + +// Set is used to pass arguments to the plugin. +func (p *Plugin) Set(args []string) error { + p.mu.Lock() + defer p.mu.Unlock() + + if p.PluginObj.Enabled { + return fmt.Errorf("cannot set on an active plugin, disable plugin before setting") + } + + sets, err := newSettables(args) + if err != nil { + return err + } + + // TODO(vieux): lots of code duplication here, needs to be refactored. + +next: + for _, s := range sets { + // range over all the envs in the config + for _, env := range p.PluginObj.Config.Env { + // found the env in the config + if env.Name == s.name { + // is it settable ? + if ok, err := s.isSettable(allowedSettableFieldsEnv, env.Settable); err != nil { + return err + } else if !ok { + return fmt.Errorf("%q is not settable", s.prettyName()) + } + // is it, so lets update the settings in memory + updateSettingsEnv(&p.PluginObj.Settings.Env, &s) + continue next + } + } + + // range over all the mounts in the config + for _, mount := range p.PluginObj.Config.Mounts { + // found the mount in the config + if mount.Name == s.name { + // is it settable ? + if ok, err := s.isSettable(allowedSettableFieldsMounts, mount.Settable); err != nil { + return err + } else if !ok { + return fmt.Errorf("%q is not settable", s.prettyName()) + } + + // it is, so lets update the settings in memory + if mount.Source == nil { + return fmt.Errorf("Plugin config has no mount source") + } + *mount.Source = s.value + continue next + } + } + + // range over all the devices in the config + for _, device := range p.PluginObj.Config.Linux.Devices { + // found the device in the config + if device.Name == s.name { + // is it settable ? + if ok, err := s.isSettable(allowedSettableFieldsDevices, device.Settable); err != nil { + return err + } else if !ok { + return fmt.Errorf("%q is not settable", s.prettyName()) + } + + // it is, so lets update the settings in memory + if device.Path == nil { + return fmt.Errorf("Plugin config has no device path") + } + *device.Path = s.value + continue next + } + } + + // found the name in the config + if p.PluginObj.Config.Args.Name == s.name { + // is it settable ? + if ok, err := s.isSettable(allowedSettableFieldsArgs, p.PluginObj.Config.Args.Settable); err != nil { + return err + } else if !ok { + return fmt.Errorf("%q is not settable", s.prettyName()) + } + + // it is, so lets update the settings in memory + p.PluginObj.Settings.Args = strings.Split(s.value, " ") + continue next + } + + return fmt.Errorf("setting %q not found in the plugin configuration", s.name) + } + + return nil +} + +// IsEnabled returns the active state of the plugin. +func (p *Plugin) IsEnabled() bool { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.PluginObj.Enabled +} + +// GetID returns the plugin's ID. +func (p *Plugin) GetID() string { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.PluginObj.ID +} + +// GetSocket returns the plugin socket. +func (p *Plugin) GetSocket() string { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.PluginObj.Config.Interface.Socket +} + +// GetTypes returns the interface types of a plugin. +func (p *Plugin) GetTypes() []types.PluginInterfaceType { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.PluginObj.Config.Interface.Types +} + +// GetRefCount returns the reference count. +func (p *Plugin) GetRefCount() int { + p.mu.RLock() + defer p.mu.RUnlock() + + return p.refCount +} + +// AddRefCount adds to reference count. +func (p *Plugin) AddRefCount(count int) { + p.mu.Lock() + defer p.mu.Unlock() + + p.refCount += count +} + +// Acquire increments the plugin's reference count +// This should be followed up by `Release()` when the plugin is no longer in use. +func (p *Plugin) Acquire() { + p.AddRefCount(plugingetter.Acquire) +} + +// Release decrements the plugin's reference count +// This should only be called when the plugin is no longer in use, e.g. with +// via `Acquire()` or getter.Get("name", "type", plugingetter.Acquire) +func (p *Plugin) Release() { + p.AddRefCount(plugingetter.Release) +} + +// SetSpecOptModifier sets the function to use to modify the the generated +// runtime spec. +func (p *Plugin) SetSpecOptModifier(f func(*specs.Spec)) { + p.mu.Lock() + p.modifyRuntimeSpec = f + p.mu.Unlock() +} diff --git a/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go b/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go new file mode 100644 index 0000000000..4ad582cd83 --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/v2/plugin_linux.go @@ -0,0 +1,141 @@ +package v2 // import "github.com/docker/docker/plugin/v2" + +import ( + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/oci" + "github.com/docker/docker/pkg/system" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +// InitSpec creates an OCI spec from the plugin's config. +func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) { + s := oci.DefaultSpec() + + s.Root = &specs.Root{ + Path: p.Rootfs, + Readonly: false, // TODO: all plugins should be readonly? settable in config? + } + + userMounts := make(map[string]struct{}, len(p.PluginObj.Settings.Mounts)) + for _, m := range p.PluginObj.Settings.Mounts { + userMounts[m.Destination] = struct{}{} + } + + execRoot = filepath.Join(execRoot, p.PluginObj.ID) + if err := os.MkdirAll(execRoot, 0700); err != nil { + return nil, errors.WithStack(err) + } + + if p.PluginObj.Config.PropagatedMount != "" { + pRoot := filepath.Join(filepath.Dir(p.Rootfs), "propagated-mount") + s.Mounts = append(s.Mounts, specs.Mount{ + Source: pRoot, + Destination: p.PluginObj.Config.PropagatedMount, + Type: "bind", + Options: []string{"rbind", "rw", "rshared"}, + }) + s.Linux.RootfsPropagation = "rshared" + } + + mounts := append(p.PluginObj.Config.Mounts, types.PluginMount{ + Source: &execRoot, + Destination: defaultPluginRuntimeDestination, + Type: "bind", + Options: []string{"rbind", "rshared"}, + }) + + if p.PluginObj.Config.Network.Type != "" { + // TODO: if net == bridge, use libnetwork controller to create a new plugin-specific bridge, bind mount /etc/hosts and /etc/resolv.conf look at the docker code (allocateNetwork, initialize) + if p.PluginObj.Config.Network.Type == "host" { + oci.RemoveNamespace(&s, specs.LinuxNamespaceType("network")) + } + etcHosts := "/etc/hosts" + resolvConf := "/etc/resolv.conf" + mounts = append(mounts, + types.PluginMount{ + Source: &etcHosts, + Destination: etcHosts, + Type: "bind", + Options: []string{"rbind", "ro"}, + }, + types.PluginMount{ + Source: &resolvConf, + Destination: resolvConf, + Type: "bind", + Options: []string{"rbind", "ro"}, + }) + } + if p.PluginObj.Config.PidHost { + oci.RemoveNamespace(&s, specs.LinuxNamespaceType("pid")) + } + + if p.PluginObj.Config.IpcHost { + oci.RemoveNamespace(&s, specs.LinuxNamespaceType("ipc")) + } + + for _, mnt := range mounts { + m := specs.Mount{ + Destination: mnt.Destination, + Type: mnt.Type, + Options: mnt.Options, + } + if mnt.Source == nil { + return nil, errors.New("mount source is not specified") + } + m.Source = *mnt.Source + s.Mounts = append(s.Mounts, m) + } + + for i, m := range s.Mounts { + if strings.HasPrefix(m.Destination, "/dev/") { + if _, ok := userMounts[m.Destination]; ok { + s.Mounts = append(s.Mounts[:i], s.Mounts[i+1:]...) + } + } + } + + if p.PluginObj.Config.Linux.AllowAllDevices { + s.Linux.Resources.Devices = []specs.LinuxDeviceCgroup{{Allow: true, Access: "rwm"}} + } + for _, dev := range p.PluginObj.Settings.Devices { + path := *dev.Path + d, dPermissions, err := oci.DevicesFromPath(path, path, "rwm") + if err != nil { + return nil, errors.WithStack(err) + } + s.Linux.Devices = append(s.Linux.Devices, d...) + s.Linux.Resources.Devices = append(s.Linux.Resources.Devices, dPermissions...) + } + + envs := make([]string, 1, len(p.PluginObj.Settings.Env)+1) + envs[0] = "PATH=" + system.DefaultPathEnv(runtime.GOOS) + envs = append(envs, p.PluginObj.Settings.Env...) + + args := append(p.PluginObj.Config.Entrypoint, p.PluginObj.Settings.Args...) + cwd := p.PluginObj.Config.WorkDir + if len(cwd) == 0 { + cwd = "/" + } + s.Process.Terminal = false + s.Process.Args = args + s.Process.Cwd = cwd + s.Process.Env = envs + + caps := s.Process.Capabilities + caps.Bounding = append(caps.Bounding, p.PluginObj.Config.Linux.Capabilities...) + caps.Permitted = append(caps.Permitted, p.PluginObj.Config.Linux.Capabilities...) + caps.Inheritable = append(caps.Inheritable, p.PluginObj.Config.Linux.Capabilities...) + caps.Effective = append(caps.Effective, p.PluginObj.Config.Linux.Capabilities...) + + if p.modifyRuntimeSpec != nil { + p.modifyRuntimeSpec(&s) + } + + return &s, nil +} diff --git a/vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go b/vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go new file mode 100644 index 0000000000..734b2ac664 --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/v2/plugin_unsupported.go @@ -0,0 +1,14 @@ +// +build !linux + +package v2 // import "github.com/docker/docker/plugin/v2" + +import ( + "errors" + + specs "github.com/opencontainers/runtime-spec/specs-go" +) + +// InitSpec creates an OCI spec from the plugin's config. +func (p *Plugin) InitSpec(execRoot string) (*specs.Spec, error) { + return nil, errors.New("not supported") +} diff --git a/vendor/github.com/docker/docker/plugin/v2/settable.go b/vendor/github.com/docker/docker/plugin/v2/settable.go new file mode 100644 index 0000000000..efda564705 --- /dev/null +++ b/vendor/github.com/docker/docker/plugin/v2/settable.go @@ -0,0 +1,102 @@ +package v2 // import "github.com/docker/docker/plugin/v2" + +import ( + "errors" + "fmt" + "strings" +) + +type settable struct { + name string + field string + value string +} + +var ( + allowedSettableFieldsEnv = []string{"value"} + allowedSettableFieldsArgs = []string{"value"} + allowedSettableFieldsDevices = []string{"path"} + allowedSettableFieldsMounts = []string{"source"} + + errMultipleFields = errors.New("multiple fields are settable, one must be specified") + errInvalidFormat = errors.New("invalid format, must be [.][=]") +) + +func newSettables(args []string) ([]settable, error) { + sets := make([]settable, 0, len(args)) + for _, arg := range args { + set, err := newSettable(arg) + if err != nil { + return nil, err + } + sets = append(sets, set) + } + return sets, nil +} + +func newSettable(arg string) (settable, error) { + var set settable + if i := strings.Index(arg, "="); i == 0 { + return set, errInvalidFormat + } else if i < 0 { + set.name = arg + } else { + set.name = arg[:i] + set.value = arg[i+1:] + } + + if i := strings.LastIndex(set.name, "."); i > 0 { + set.field = set.name[i+1:] + set.name = arg[:i] + } + + return set, nil +} + +// prettyName return name.field if there is a field, otherwise name. +func (set *settable) prettyName() string { + if set.field != "" { + return fmt.Sprintf("%s.%s", set.name, set.field) + } + return set.name +} + +func (set *settable) isSettable(allowedSettableFields []string, settable []string) (bool, error) { + if set.field == "" { + if len(settable) == 1 { + // if field is not specified and there only one settable, default to it. + set.field = settable[0] + } else if len(settable) > 1 { + return false, errMultipleFields + } + } + + isAllowed := false + for _, allowedSettableField := range allowedSettableFields { + if set.field == allowedSettableField { + isAllowed = true + break + } + } + + if isAllowed { + for _, settableField := range settable { + if set.field == settableField { + return true, nil + } + } + } + + return false, nil +} + +func updateSettingsEnv(env *[]string, set *settable) { + for i, e := range *env { + if parts := strings.SplitN(e, "=", 2); parts[0] == set.name { + (*env)[i] = fmt.Sprintf("%s=%s", set.name, set.value) + return + } + } + + *env = append(*env, fmt.Sprintf("%s=%s", set.name, set.value)) +} diff --git a/vendor/github.com/docker/docker/restartmanager/restartmanager.go b/vendor/github.com/docker/docker/restartmanager/restartmanager.go new file mode 100644 index 0000000000..6468ccf7e6 --- /dev/null +++ b/vendor/github.com/docker/docker/restartmanager/restartmanager.go @@ -0,0 +1,133 @@ +package restartmanager // import "github.com/docker/docker/restartmanager" + +import ( + "errors" + "fmt" + "sync" + "time" + + "github.com/docker/docker/api/types/container" +) + +const ( + backoffMultiplier = 2 + defaultTimeout = 100 * time.Millisecond + maxRestartTimeout = 1 * time.Minute +) + +// ErrRestartCanceled is returned when the restart manager has been +// canceled and will no longer restart the container. +var ErrRestartCanceled = errors.New("restart canceled") + +// RestartManager defines object that controls container restarting rules. +type RestartManager interface { + Cancel() error + ShouldRestart(exitCode uint32, hasBeenManuallyStopped bool, executionDuration time.Duration) (bool, chan error, error) +} + +type restartManager struct { + sync.Mutex + sync.Once + policy container.RestartPolicy + restartCount int + timeout time.Duration + active bool + cancel chan struct{} + canceled bool +} + +// New returns a new restartManager based on a policy. +func New(policy container.RestartPolicy, restartCount int) RestartManager { + return &restartManager{policy: policy, restartCount: restartCount, cancel: make(chan struct{})} +} + +func (rm *restartManager) SetPolicy(policy container.RestartPolicy) { + rm.Lock() + rm.policy = policy + rm.Unlock() +} + +func (rm *restartManager) ShouldRestart(exitCode uint32, hasBeenManuallyStopped bool, executionDuration time.Duration) (bool, chan error, error) { + if rm.policy.IsNone() { + return false, nil, nil + } + rm.Lock() + unlockOnExit := true + defer func() { + if unlockOnExit { + rm.Unlock() + } + }() + + if rm.canceled { + return false, nil, ErrRestartCanceled + } + + if rm.active { + return false, nil, fmt.Errorf("invalid call on an active restart manager") + } + // if the container ran for more than 10s, regardless of status and policy reset the + // the timeout back to the default. + if executionDuration.Seconds() >= 10 { + rm.timeout = 0 + } + switch { + case rm.timeout == 0: + rm.timeout = defaultTimeout + case rm.timeout < maxRestartTimeout: + rm.timeout *= backoffMultiplier + } + if rm.timeout > maxRestartTimeout { + rm.timeout = maxRestartTimeout + } + + var restart bool + switch { + case rm.policy.IsAlways(): + restart = true + case rm.policy.IsUnlessStopped() && !hasBeenManuallyStopped: + restart = true + case rm.policy.IsOnFailure(): + // the default value of 0 for MaximumRetryCount means that we will not enforce a maximum count + if max := rm.policy.MaximumRetryCount; max == 0 || rm.restartCount < max { + restart = exitCode != 0 + } + } + + if !restart { + rm.active = false + return false, nil, nil + } + + rm.restartCount++ + + unlockOnExit = false + rm.active = true + rm.Unlock() + + ch := make(chan error) + go func() { + select { + case <-rm.cancel: + ch <- ErrRestartCanceled + close(ch) + case <-time.After(rm.timeout): + rm.Lock() + close(ch) + rm.active = false + rm.Unlock() + } + }() + + return true, ch, nil +} + +func (rm *restartManager) Cancel() error { + rm.Do(func() { + rm.Lock() + rm.canceled = true + close(rm.cancel) + rm.Unlock() + }) + return nil +} diff --git a/vendor/github.com/docker/docker/runconfig/config.go b/vendor/github.com/docker/docker/runconfig/config.go new file mode 100644 index 0000000000..cbacf47df3 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/config.go @@ -0,0 +1,81 @@ +package runconfig // import "github.com/docker/docker/runconfig" + +import ( + "encoding/json" + "io" + + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/pkg/sysinfo" +) + +// ContainerDecoder implements httputils.ContainerDecoder +// calling DecodeContainerConfig. +type ContainerDecoder struct{} + +// DecodeConfig makes ContainerDecoder to implement httputils.ContainerDecoder +func (r ContainerDecoder) DecodeConfig(src io.Reader) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { + return decodeContainerConfig(src) +} + +// DecodeHostConfig makes ContainerDecoder to implement httputils.ContainerDecoder +func (r ContainerDecoder) DecodeHostConfig(src io.Reader) (*container.HostConfig, error) { + return decodeHostConfig(src) +} + +// decodeContainerConfig decodes a json encoded config into a ContainerConfigWrapper +// struct and returns both a Config and a HostConfig struct +// Be aware this function is not checking whether the resulted structs are nil, +// it's your business to do so +func decodeContainerConfig(src io.Reader) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { + var w ContainerConfigWrapper + + decoder := json.NewDecoder(src) + if err := decoder.Decode(&w); err != nil { + return nil, nil, nil, err + } + + hc := w.getHostConfig() + + // Perform platform-specific processing of Volumes and Binds. + if w.Config != nil && hc != nil { + + // Initialize the volumes map if currently nil + if w.Config.Volumes == nil { + w.Config.Volumes = make(map[string]struct{}) + } + } + + // Certain parameters need daemon-side validation that cannot be done + // on the client, as only the daemon knows what is valid for the platform. + if err := validateNetMode(w.Config, hc); err != nil { + return nil, nil, nil, err + } + + // Validate isolation + if err := validateIsolation(hc); err != nil { + return nil, nil, nil, err + } + + // Validate QoS + if err := validateQoS(hc); err != nil { + return nil, nil, nil, err + } + + // Validate Resources + if err := validateResources(hc, sysinfo.New(true)); err != nil { + return nil, nil, nil, err + } + + // Validate Privileged + if err := validatePrivileged(hc); err != nil { + return nil, nil, nil, err + } + + // Validate ReadonlyRootfs + if err := validateReadonlyRootfs(hc); err != nil { + return nil, nil, nil, err + } + + return w.Config, hc, w.NetworkingConfig, nil +} diff --git a/vendor/github.com/docker/docker/runconfig/config_unix.go b/vendor/github.com/docker/docker/runconfig/config_unix.go new file mode 100644 index 0000000000..65e8d6fcd4 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/config_unix.go @@ -0,0 +1,59 @@ +// +build !windows + +package runconfig // import "github.com/docker/docker/runconfig" + +import ( + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" +) + +// ContainerConfigWrapper is a Config wrapper that holds the container Config (portable) +// and the corresponding HostConfig (non-portable). +type ContainerConfigWrapper struct { + *container.Config + InnerHostConfig *container.HostConfig `json:"HostConfig,omitempty"` + Cpuset string `json:",omitempty"` // Deprecated. Exported for backwards compatibility. + NetworkingConfig *networktypes.NetworkingConfig `json:"NetworkingConfig,omitempty"` + *container.HostConfig // Deprecated. Exported to read attributes from json that are not in the inner host config structure. +} + +// getHostConfig gets the HostConfig of the Config. +// It's mostly there to handle Deprecated fields of the ContainerConfigWrapper +func (w *ContainerConfigWrapper) getHostConfig() *container.HostConfig { + hc := w.HostConfig + + if hc == nil && w.InnerHostConfig != nil { + hc = w.InnerHostConfig + } else if w.InnerHostConfig != nil { + if hc.Memory != 0 && w.InnerHostConfig.Memory == 0 { + w.InnerHostConfig.Memory = hc.Memory + } + if hc.MemorySwap != 0 && w.InnerHostConfig.MemorySwap == 0 { + w.InnerHostConfig.MemorySwap = hc.MemorySwap + } + if hc.CPUShares != 0 && w.InnerHostConfig.CPUShares == 0 { + w.InnerHostConfig.CPUShares = hc.CPUShares + } + if hc.CpusetCpus != "" && w.InnerHostConfig.CpusetCpus == "" { + w.InnerHostConfig.CpusetCpus = hc.CpusetCpus + } + + if hc.VolumeDriver != "" && w.InnerHostConfig.VolumeDriver == "" { + w.InnerHostConfig.VolumeDriver = hc.VolumeDriver + } + + hc = w.InnerHostConfig + } + + if hc != nil { + if w.Cpuset != "" && hc.CpusetCpus == "" { + hc.CpusetCpus = w.Cpuset + } + } + + // Make sure NetworkMode has an acceptable value. We do this to ensure + // backwards compatible API behavior. + SetDefaultNetModeIfBlank(hc) + + return hc +} diff --git a/vendor/github.com/docker/docker/runconfig/config_windows.go b/vendor/github.com/docker/docker/runconfig/config_windows.go new file mode 100644 index 0000000000..cced59d4df --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/config_windows.go @@ -0,0 +1,19 @@ +package runconfig // import "github.com/docker/docker/runconfig" + +import ( + "github.com/docker/docker/api/types/container" + networktypes "github.com/docker/docker/api/types/network" +) + +// ContainerConfigWrapper is a Config wrapper that holds the container Config (portable) +// and the corresponding HostConfig (non-portable). +type ContainerConfigWrapper struct { + *container.Config + HostConfig *container.HostConfig `json:"HostConfig,omitempty"` + NetworkingConfig *networktypes.NetworkingConfig `json:"NetworkingConfig,omitempty"` +} + +// getHostConfig gets the HostConfig of the Config. +func (w *ContainerConfigWrapper) getHostConfig() *container.HostConfig { + return w.HostConfig +} diff --git a/vendor/github.com/docker/docker/runconfig/errors.go b/vendor/github.com/docker/docker/runconfig/errors.go new file mode 100644 index 0000000000..038fe39660 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/errors.go @@ -0,0 +1,42 @@ +package runconfig // import "github.com/docker/docker/runconfig" + +const ( + // ErrConflictContainerNetworkAndLinks conflict between --net=container and links + ErrConflictContainerNetworkAndLinks validationError = "conflicting options: container type network can't be used with links. This would result in undefined behavior" + // ErrConflictSharedNetwork conflict between private and other networks + ErrConflictSharedNetwork validationError = "container sharing network namespace with another container or host cannot be connected to any other network" + // ErrConflictHostNetwork conflict from being disconnected from host network or connected to host network. + ErrConflictHostNetwork validationError = "container cannot be disconnected from host network or connected to host network" + // ErrConflictNoNetwork conflict between private and other networks + ErrConflictNoNetwork validationError = "container cannot be connected to multiple networks with one of the networks in private (none) mode" + // ErrConflictNetworkAndDNS conflict between --dns and the network mode + ErrConflictNetworkAndDNS validationError = "conflicting options: dns and the network mode" + // ErrConflictNetworkHostname conflict between the hostname and the network mode + ErrConflictNetworkHostname validationError = "conflicting options: hostname and the network mode" + // ErrConflictHostNetworkAndLinks conflict between --net=host and links + ErrConflictHostNetworkAndLinks validationError = "conflicting options: host type networking can't be used with links. This would result in undefined behavior" + // ErrConflictContainerNetworkAndMac conflict between the mac address and the network mode + ErrConflictContainerNetworkAndMac validationError = "conflicting options: mac-address and the network mode" + // ErrConflictNetworkHosts conflict between add-host and the network mode + ErrConflictNetworkHosts validationError = "conflicting options: custom host-to-IP mapping and the network mode" + // ErrConflictNetworkPublishPorts conflict between the publish options and the network mode + ErrConflictNetworkPublishPorts validationError = "conflicting options: port publishing and the container type network mode" + // ErrConflictNetworkExposePorts conflict between the expose option and the network mode + ErrConflictNetworkExposePorts validationError = "conflicting options: port exposing and the container type network mode" + // ErrUnsupportedNetworkAndIP conflict between network mode and requested ip address + ErrUnsupportedNetworkAndIP validationError = "user specified IP address is supported on user defined networks only" + // ErrUnsupportedNetworkNoSubnetAndIP conflict between network with no configured subnet and requested ip address + ErrUnsupportedNetworkNoSubnetAndIP validationError = "user specified IP address is supported only when connecting to networks with user configured subnets" + // ErrUnsupportedNetworkAndAlias conflict between network mode and alias + ErrUnsupportedNetworkAndAlias validationError = "network-scoped alias is supported only for containers in user defined networks" + // ErrConflictUTSHostname conflict between the hostname and the UTS mode + ErrConflictUTSHostname validationError = "conflicting options: hostname and the UTS mode" +) + +type validationError string + +func (e validationError) Error() string { + return string(e) +} + +func (e validationError) InvalidParameter() {} diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig.go b/vendor/github.com/docker/docker/runconfig/hostconfig.go new file mode 100644 index 0000000000..7d99e5acfa --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/hostconfig.go @@ -0,0 +1,79 @@ +package runconfig // import "github.com/docker/docker/runconfig" + +import ( + "encoding/json" + "io" + "strings" + + "github.com/docker/docker/api/types/container" +) + +// DecodeHostConfig creates a HostConfig based on the specified Reader. +// It assumes the content of the reader will be JSON, and decodes it. +func decodeHostConfig(src io.Reader) (*container.HostConfig, error) { + decoder := json.NewDecoder(src) + + var w ContainerConfigWrapper + if err := decoder.Decode(&w); err != nil { + return nil, err + } + + hc := w.getHostConfig() + return hc, nil +} + +// SetDefaultNetModeIfBlank changes the NetworkMode in a HostConfig structure +// to default if it is not populated. This ensures backwards compatibility after +// the validation of the network mode was moved from the docker CLI to the +// docker daemon. +func SetDefaultNetModeIfBlank(hc *container.HostConfig) { + if hc != nil { + if hc.NetworkMode == container.NetworkMode("") { + hc.NetworkMode = container.NetworkMode("default") + } + } +} + +// validateNetContainerMode ensures that the various combinations of requested +// network settings wrt container mode are valid. +func validateNetContainerMode(c *container.Config, hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + parts := strings.Split(string(hc.NetworkMode), ":") + if parts[0] == "container" { + if len(parts) < 2 || parts[1] == "" { + return validationError("Invalid network mode: invalid container format container:") + } + } + + if hc.NetworkMode.IsContainer() && c.Hostname != "" { + return ErrConflictNetworkHostname + } + + if hc.NetworkMode.IsContainer() && len(hc.Links) > 0 { + return ErrConflictContainerNetworkAndLinks + } + + if hc.NetworkMode.IsContainer() && len(hc.DNS) > 0 { + return ErrConflictNetworkAndDNS + } + + if hc.NetworkMode.IsContainer() && len(hc.ExtraHosts) > 0 { + return ErrConflictNetworkHosts + } + + if (hc.NetworkMode.IsContainer() || hc.NetworkMode.IsHost()) && c.MacAddress != "" { + return ErrConflictContainerNetworkAndMac + } + + if hc.NetworkMode.IsContainer() && (len(hc.PortBindings) > 0 || hc.PublishAllPorts) { + return ErrConflictNetworkPublishPorts + } + + if hc.NetworkMode.IsContainer() && len(c.ExposedPorts) > 0 { + return ErrConflictNetworkExposePorts + } + return nil +} diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go b/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go new file mode 100644 index 0000000000..e579b06d9b --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/hostconfig_unix.go @@ -0,0 +1,110 @@ +// +build !windows + +package runconfig // import "github.com/docker/docker/runconfig" + +import ( + "fmt" + "runtime" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/sysinfo" +) + +// DefaultDaemonNetworkMode returns the default network stack the daemon should +// use. +func DefaultDaemonNetworkMode() container.NetworkMode { + return container.NetworkMode("bridge") +} + +// IsPreDefinedNetwork indicates if a network is predefined by the daemon +func IsPreDefinedNetwork(network string) bool { + n := container.NetworkMode(network) + return n.IsBridge() || n.IsHost() || n.IsNone() || n.IsDefault() +} + +// validateNetMode ensures that the various combinations of requested +// network settings are valid. +func validateNetMode(c *container.Config, hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + + err := validateNetContainerMode(c, hc) + if err != nil { + return err + } + + if hc.UTSMode.IsHost() && c.Hostname != "" { + return ErrConflictUTSHostname + } + + if hc.NetworkMode.IsHost() && len(hc.Links) > 0 { + return ErrConflictHostNetworkAndLinks + } + + return nil +} + +// validateIsolation performs platform specific validation of +// isolation in the hostconfig structure. Linux only supports "default" +// which is LXC container isolation +func validateIsolation(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if !hc.Isolation.IsValid() { + return fmt.Errorf("Invalid isolation: %q - %s only supports 'default'", hc.Isolation, runtime.GOOS) + } + return nil +} + +// validateQoS performs platform specific validation of the QoS settings +func validateQoS(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + + if hc.IOMaximumBandwidth != 0 { + return fmt.Errorf("Invalid QoS settings: %s does not support configuration of maximum bandwidth", runtime.GOOS) + } + + if hc.IOMaximumIOps != 0 { + return fmt.Errorf("Invalid QoS settings: %s does not support configuration of maximum IOPs", runtime.GOOS) + } + return nil +} + +// validateResources performs platform specific validation of the resource settings +// cpu-rt-runtime and cpu-rt-period can not be greater than their parent, cpu-rt-runtime requires sys_nice +func validateResources(hc *container.HostConfig, si *sysinfo.SysInfo) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + + if hc.Resources.CPURealtimePeriod > 0 && !si.CPURealtimePeriod { + return fmt.Errorf("Your kernel does not support cgroup cpu real-time period") + } + + if hc.Resources.CPURealtimeRuntime > 0 && !si.CPURealtimeRuntime { + return fmt.Errorf("Your kernel does not support cgroup cpu real-time runtime") + } + + if hc.Resources.CPURealtimePeriod != 0 && hc.Resources.CPURealtimeRuntime != 0 && hc.Resources.CPURealtimeRuntime > hc.Resources.CPURealtimePeriod { + return fmt.Errorf("cpu real-time runtime cannot be higher than cpu real-time period") + } + return nil +} + +// validatePrivileged performs platform specific validation of the Privileged setting +func validatePrivileged(hc *container.HostConfig) error { + return nil +} + +// validateReadonlyRootfs performs platform specific validation of the ReadonlyRootfs setting +func validateReadonlyRootfs(hc *container.HostConfig) error { + return nil +} diff --git a/vendor/github.com/docker/docker/runconfig/hostconfig_windows.go b/vendor/github.com/docker/docker/runconfig/hostconfig_windows.go new file mode 100644 index 0000000000..33a4668af1 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/hostconfig_windows.go @@ -0,0 +1,96 @@ +package runconfig // import "github.com/docker/docker/runconfig" + +import ( + "fmt" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/sysinfo" +) + +// DefaultDaemonNetworkMode returns the default network stack the daemon should +// use. +func DefaultDaemonNetworkMode() container.NetworkMode { + return container.NetworkMode("nat") +} + +// IsPreDefinedNetwork indicates if a network is predefined by the daemon +func IsPreDefinedNetwork(network string) bool { + return !container.NetworkMode(network).IsUserDefined() +} + +// validateNetMode ensures that the various combinations of requested +// network settings are valid. +func validateNetMode(c *container.Config, hc *container.HostConfig) error { + if hc == nil { + return nil + } + + err := validateNetContainerMode(c, hc) + if err != nil { + return err + } + + if hc.NetworkMode.IsContainer() && hc.Isolation.IsHyperV() { + return fmt.Errorf("Using the network stack of another container is not supported while using Hyper-V Containers") + } + + return nil +} + +// validateIsolation performs platform specific validation of the +// isolation in the hostconfig structure. Windows supports 'default' (or +// blank), 'process', or 'hyperv'. +func validateIsolation(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if !hc.Isolation.IsValid() { + return fmt.Errorf("Invalid isolation: %q. Windows supports 'default', 'process', or 'hyperv'", hc.Isolation) + } + return nil +} + +// validateQoS performs platform specific validation of the Qos settings +func validateQoS(hc *container.HostConfig) error { + return nil +} + +// validateResources performs platform specific validation of the resource settings +func validateResources(hc *container.HostConfig, si *sysinfo.SysInfo) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if hc.Resources.CPURealtimePeriod != 0 { + return fmt.Errorf("Windows does not support CPU real-time period") + } + if hc.Resources.CPURealtimeRuntime != 0 { + return fmt.Errorf("Windows does not support CPU real-time runtime") + } + return nil +} + +// validatePrivileged performs platform specific validation of the Privileged setting +func validatePrivileged(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if hc.Privileged { + return fmt.Errorf("Windows does not support privileged mode") + } + return nil +} + +// validateReadonlyRootfs performs platform specific validation of the ReadonlyRootfs setting +func validateReadonlyRootfs(hc *container.HostConfig) error { + // We may not be passed a host config, such as in the case of docker commit + if hc == nil { + return nil + } + if hc.ReadonlyRootfs { + return fmt.Errorf("Windows does not support root filesystem in read-only mode") + } + return nil +} diff --git a/vendor/github.com/docker/docker/runconfig/opts/parse.go b/vendor/github.com/docker/docker/runconfig/opts/parse.go new file mode 100644 index 0000000000..8f7baeb637 --- /dev/null +++ b/vendor/github.com/docker/docker/runconfig/opts/parse.go @@ -0,0 +1,20 @@ +package opts // import "github.com/docker/docker/runconfig/opts" + +import ( + "strings" +) + +// ConvertKVStringsToMap converts ["key=value"] to {"key":"value"} +func ConvertKVStringsToMap(values []string) map[string]string { + result := make(map[string]string, len(values)) + for _, value := range values { + kv := strings.SplitN(value, "=", 2) + if len(kv) == 1 { + result[kv[0]] = "" + } else { + result[kv[0]] = kv[1] + } + } + + return result +} diff --git a/vendor/github.com/docker/docker/volume/mounts/lcow_parser.go b/vendor/github.com/docker/docker/volume/mounts/lcow_parser.go new file mode 100644 index 0000000000..bafb7b07f8 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/mounts/lcow_parser.go @@ -0,0 +1,34 @@ +package mounts // import "github.com/docker/docker/volume/mounts" + +import ( + "errors" + "path" + + "github.com/docker/docker/api/types/mount" +) + +var lcowSpecificValidators mountValidator = func(m *mount.Mount) error { + if path.Clean(m.Target) == "/" { + return ErrVolumeTargetIsRoot + } + if m.Type == mount.TypeNamedPipe { + return errors.New("Linux containers on Windows do not support named pipe mounts") + } + return nil +} + +type lcowParser struct { + windowsParser +} + +func (p *lcowParser) ValidateMountConfig(mnt *mount.Mount) error { + return p.validateMountConfigReg(mnt, rxLCOWDestination, lcowSpecificValidators) +} + +func (p *lcowParser) ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) { + return p.parseMountRaw(raw, volumeDriver, rxLCOWDestination, false, lcowSpecificValidators) +} + +func (p *lcowParser) ParseMountSpec(cfg mount.Mount) (*MountPoint, error) { + return p.parseMountSpec(cfg, rxLCOWDestination, false, lcowSpecificValidators) +} diff --git a/vendor/github.com/docker/docker/volume/mounts/linux_parser.go b/vendor/github.com/docker/docker/volume/mounts/linux_parser.go new file mode 100644 index 0000000000..8e436aec0e --- /dev/null +++ b/vendor/github.com/docker/docker/volume/mounts/linux_parser.go @@ -0,0 +1,417 @@ +package mounts // import "github.com/docker/docker/volume/mounts" + +import ( + "errors" + "fmt" + "path" + "path/filepath" + "strings" + + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/volume" +) + +type linuxParser struct { +} + +func linuxSplitRawSpec(raw string) ([]string, error) { + if strings.Count(raw, ":") > 2 { + return nil, errInvalidSpec(raw) + } + + arr := strings.SplitN(raw, ":", 3) + if arr[0] == "" { + return nil, errInvalidSpec(raw) + } + return arr, nil +} + +func linuxValidateNotRoot(p string) error { + p = path.Clean(strings.Replace(p, `\`, `/`, -1)) + if p == "/" { + return ErrVolumeTargetIsRoot + } + return nil +} +func linuxValidateAbsolute(p string) error { + p = strings.Replace(p, `\`, `/`, -1) + if path.IsAbs(p) { + return nil + } + return fmt.Errorf("invalid mount path: '%s' mount path must be absolute", p) +} +func (p *linuxParser) ValidateMountConfig(mnt *mount.Mount) error { + // there was something looking like a bug in existing codebase: + // - validateMountConfig on linux was called with options skipping bind source existence when calling ParseMountRaw + // - but not when calling ParseMountSpec directly... nor when the unit test called it directly + return p.validateMountConfigImpl(mnt, true) +} +func (p *linuxParser) validateMountConfigImpl(mnt *mount.Mount, validateBindSourceExists bool) error { + if len(mnt.Target) == 0 { + return &errMountConfig{mnt, errMissingField("Target")} + } + + if err := linuxValidateNotRoot(mnt.Target); err != nil { + return &errMountConfig{mnt, err} + } + + if err := linuxValidateAbsolute(mnt.Target); err != nil { + return &errMountConfig{mnt, err} + } + + switch mnt.Type { + case mount.TypeBind: + if len(mnt.Source) == 0 { + return &errMountConfig{mnt, errMissingField("Source")} + } + // Don't error out just because the propagation mode is not supported on the platform + if opts := mnt.BindOptions; opts != nil { + if len(opts.Propagation) > 0 && len(linuxPropagationModes) > 0 { + if _, ok := linuxPropagationModes[opts.Propagation]; !ok { + return &errMountConfig{mnt, fmt.Errorf("invalid propagation mode: %s", opts.Propagation)} + } + } + } + if mnt.VolumeOptions != nil { + return &errMountConfig{mnt, errExtraField("VolumeOptions")} + } + + if err := linuxValidateAbsolute(mnt.Source); err != nil { + return &errMountConfig{mnt, err} + } + + if validateBindSourceExists { + exists, _, _ := currentFileInfoProvider.fileInfo(mnt.Source) + if !exists { + return &errMountConfig{mnt, errBindSourceDoesNotExist(mnt.Source)} + } + } + + case mount.TypeVolume: + if mnt.BindOptions != nil { + return &errMountConfig{mnt, errExtraField("BindOptions")} + } + + if len(mnt.Source) == 0 && mnt.ReadOnly { + return &errMountConfig{mnt, fmt.Errorf("must not set ReadOnly mode when using anonymous volumes")} + } + case mount.TypeTmpfs: + if len(mnt.Source) != 0 { + return &errMountConfig{mnt, errExtraField("Source")} + } + if _, err := p.ConvertTmpfsOptions(mnt.TmpfsOptions, mnt.ReadOnly); err != nil { + return &errMountConfig{mnt, err} + } + default: + return &errMountConfig{mnt, errors.New("mount type unknown")} + } + return nil +} + +// read-write modes +var rwModes = map[string]bool{ + "rw": true, + "ro": true, +} + +// label modes +var linuxLabelModes = map[string]bool{ + "Z": true, + "z": true, +} + +// consistency modes +var linuxConsistencyModes = map[mount.Consistency]bool{ + mount.ConsistencyFull: true, + mount.ConsistencyCached: true, + mount.ConsistencyDelegated: true, +} +var linuxPropagationModes = map[mount.Propagation]bool{ + mount.PropagationPrivate: true, + mount.PropagationRPrivate: true, + mount.PropagationSlave: true, + mount.PropagationRSlave: true, + mount.PropagationShared: true, + mount.PropagationRShared: true, +} + +const linuxDefaultPropagationMode = mount.PropagationRPrivate + +func linuxGetPropagation(mode string) mount.Propagation { + for _, o := range strings.Split(mode, ",") { + prop := mount.Propagation(o) + if linuxPropagationModes[prop] { + return prop + } + } + return linuxDefaultPropagationMode +} + +func linuxHasPropagation(mode string) bool { + for _, o := range strings.Split(mode, ",") { + if linuxPropagationModes[mount.Propagation(o)] { + return true + } + } + return false +} + +func linuxValidMountMode(mode string) bool { + if mode == "" { + return true + } + + rwModeCount := 0 + labelModeCount := 0 + propagationModeCount := 0 + copyModeCount := 0 + consistencyModeCount := 0 + + for _, o := range strings.Split(mode, ",") { + switch { + case rwModes[o]: + rwModeCount++ + case linuxLabelModes[o]: + labelModeCount++ + case linuxPropagationModes[mount.Propagation(o)]: + propagationModeCount++ + case copyModeExists(o): + copyModeCount++ + case linuxConsistencyModes[mount.Consistency(o)]: + consistencyModeCount++ + default: + return false + } + } + + // Only one string for each mode is allowed. + if rwModeCount > 1 || labelModeCount > 1 || propagationModeCount > 1 || copyModeCount > 1 || consistencyModeCount > 1 { + return false + } + return true +} + +func (p *linuxParser) ReadWrite(mode string) bool { + if !linuxValidMountMode(mode) { + return false + } + + for _, o := range strings.Split(mode, ",") { + if o == "ro" { + return false + } + } + return true +} + +func (p *linuxParser) ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) { + arr, err := linuxSplitRawSpec(raw) + if err != nil { + return nil, err + } + + var spec mount.Mount + var mode string + switch len(arr) { + case 1: + // Just a destination path in the container + spec.Target = arr[0] + case 2: + if linuxValidMountMode(arr[1]) { + // Destination + Mode is not a valid volume - volumes + // cannot include a mode. e.g. /foo:rw + return nil, errInvalidSpec(raw) + } + // Host Source Path or Name + Destination + spec.Source = arr[0] + spec.Target = arr[1] + case 3: + // HostSourcePath+DestinationPath+Mode + spec.Source = arr[0] + spec.Target = arr[1] + mode = arr[2] + default: + return nil, errInvalidSpec(raw) + } + + if !linuxValidMountMode(mode) { + return nil, errInvalidMode(mode) + } + + if path.IsAbs(spec.Source) { + spec.Type = mount.TypeBind + } else { + spec.Type = mount.TypeVolume + } + + spec.ReadOnly = !p.ReadWrite(mode) + + // cannot assume that if a volume driver is passed in that we should set it + if volumeDriver != "" && spec.Type == mount.TypeVolume { + spec.VolumeOptions = &mount.VolumeOptions{ + DriverConfig: &mount.Driver{Name: volumeDriver}, + } + } + + if copyData, isSet := getCopyMode(mode, p.DefaultCopyMode()); isSet { + if spec.VolumeOptions == nil { + spec.VolumeOptions = &mount.VolumeOptions{} + } + spec.VolumeOptions.NoCopy = !copyData + } + if linuxHasPropagation(mode) { + spec.BindOptions = &mount.BindOptions{ + Propagation: linuxGetPropagation(mode), + } + } + + mp, err := p.parseMountSpec(spec, false) + if mp != nil { + mp.Mode = mode + } + if err != nil { + err = fmt.Errorf("%v: %v", errInvalidSpec(raw), err) + } + return mp, err +} +func (p *linuxParser) ParseMountSpec(cfg mount.Mount) (*MountPoint, error) { + return p.parseMountSpec(cfg, true) +} +func (p *linuxParser) parseMountSpec(cfg mount.Mount, validateBindSourceExists bool) (*MountPoint, error) { + if err := p.validateMountConfigImpl(&cfg, validateBindSourceExists); err != nil { + return nil, err + } + mp := &MountPoint{ + RW: !cfg.ReadOnly, + Destination: path.Clean(filepath.ToSlash(cfg.Target)), + Type: cfg.Type, + Spec: cfg, + } + + switch cfg.Type { + case mount.TypeVolume: + if cfg.Source == "" { + mp.Name = stringid.GenerateNonCryptoID() + } else { + mp.Name = cfg.Source + } + mp.CopyData = p.DefaultCopyMode() + + if cfg.VolumeOptions != nil { + if cfg.VolumeOptions.DriverConfig != nil { + mp.Driver = cfg.VolumeOptions.DriverConfig.Name + } + if cfg.VolumeOptions.NoCopy { + mp.CopyData = false + } + } + case mount.TypeBind: + mp.Source = path.Clean(filepath.ToSlash(cfg.Source)) + if cfg.BindOptions != nil && len(cfg.BindOptions.Propagation) > 0 { + mp.Propagation = cfg.BindOptions.Propagation + } else { + // If user did not specify a propagation mode, get + // default propagation mode. + mp.Propagation = linuxDefaultPropagationMode + } + case mount.TypeTmpfs: + // NOP + } + return mp, nil +} + +func (p *linuxParser) ParseVolumesFrom(spec string) (string, string, error) { + if len(spec) == 0 { + return "", "", fmt.Errorf("volumes-from specification cannot be an empty string") + } + + specParts := strings.SplitN(spec, ":", 2) + id := specParts[0] + mode := "rw" + + if len(specParts) == 2 { + mode = specParts[1] + if !linuxValidMountMode(mode) { + return "", "", errInvalidMode(mode) + } + // For now don't allow propagation properties while importing + // volumes from data container. These volumes will inherit + // the same propagation property as of the original volume + // in data container. This probably can be relaxed in future. + if linuxHasPropagation(mode) { + return "", "", errInvalidMode(mode) + } + // Do not allow copy modes on volumes-from + if _, isSet := getCopyMode(mode, p.DefaultCopyMode()); isSet { + return "", "", errInvalidMode(mode) + } + } + return id, mode, nil +} + +func (p *linuxParser) DefaultPropagationMode() mount.Propagation { + return linuxDefaultPropagationMode +} + +func (p *linuxParser) ConvertTmpfsOptions(opt *mount.TmpfsOptions, readOnly bool) (string, error) { + var rawOpts []string + if readOnly { + rawOpts = append(rawOpts, "ro") + } + + if opt != nil && opt.Mode != 0 { + rawOpts = append(rawOpts, fmt.Sprintf("mode=%o", opt.Mode)) + } + + if opt != nil && opt.SizeBytes != 0 { + // calculate suffix here, making this linux specific, but that is + // okay, since API is that way anyways. + + // we do this by finding the suffix that divides evenly into the + // value, returning the value itself, with no suffix, if it fails. + // + // For the most part, we don't enforce any semantic to this values. + // The operating system will usually align this and enforce minimum + // and maximums. + var ( + size = opt.SizeBytes + suffix string + ) + for _, r := range []struct { + suffix string + divisor int64 + }{ + {"g", 1 << 30}, + {"m", 1 << 20}, + {"k", 1 << 10}, + } { + if size%r.divisor == 0 { + size = size / r.divisor + suffix = r.suffix + break + } + } + + rawOpts = append(rawOpts, fmt.Sprintf("size=%d%s", size, suffix)) + } + return strings.Join(rawOpts, ","), nil +} + +func (p *linuxParser) DefaultCopyMode() bool { + return true +} +func (p *linuxParser) ValidateVolumeName(name string) error { + return nil +} + +func (p *linuxParser) IsBackwardCompatible(m *MountPoint) bool { + return len(m.Source) > 0 || m.Driver == volume.DefaultDriverName +} + +func (p *linuxParser) ValidateTmpfsMountDestination(dest string) error { + if err := linuxValidateNotRoot(dest); err != nil { + return err + } + return linuxValidateAbsolute(dest) +} diff --git a/vendor/github.com/docker/docker/volume/mounts/mounts.go b/vendor/github.com/docker/docker/volume/mounts/mounts.go new file mode 100644 index 0000000000..8f255a5482 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/mounts/mounts.go @@ -0,0 +1,170 @@ +package mounts // import "github.com/docker/docker/volume/mounts" + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/volume" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" +) + +// MountPoint is the intersection point between a volume and a container. It +// specifies which volume is to be used and where inside a container it should +// be mounted. +// +// Note that this type is embedded in `container.Container` object and persisted to disk. +// Changes to this struct need to by synced with on disk state. +type MountPoint struct { + // Source is the source path of the mount. + // E.g. `mount --bind /foo /bar`, `/foo` is the `Source`. + Source string + // Destination is the path relative to the container root (`/`) to the mount point + // It is where the `Source` is mounted to + Destination string + // RW is set to true when the mountpoint should be mounted as read-write + RW bool + // Name is the name reference to the underlying data defined by `Source` + // e.g., the volume name + Name string + // Driver is the volume driver used to create the volume (if it is a volume) + Driver string + // Type of mount to use, see `Type` definitions in github.com/docker/docker/api/types/mount + Type mounttypes.Type `json:",omitempty"` + // Volume is the volume providing data to this mountpoint. + // This is nil unless `Type` is set to `TypeVolume` + Volume volume.Volume `json:"-"` + + // Mode is the comma separated list of options supplied by the user when creating + // the bind/volume mount. + // Note Mode is not used on Windows + Mode string `json:"Relabel,omitempty"` // Originally field was `Relabel`" + + // Propagation describes how the mounts are propagated from the host into the + // mount point, and vice-versa. + // See https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt + // Note Propagation is not used on Windows + Propagation mounttypes.Propagation `json:",omitempty"` // Mount propagation string + + // Specifies if data should be copied from the container before the first mount + // Use a pointer here so we can tell if the user set this value explicitly + // This allows us to error out when the user explicitly enabled copy but we can't copy due to the volume being populated + CopyData bool `json:"-"` + // ID is the opaque ID used to pass to the volume driver. + // This should be set by calls to `Mount` and unset by calls to `Unmount` + ID string `json:",omitempty"` + + // Sepc is a copy of the API request that created this mount. + Spec mounttypes.Mount + + // Track usage of this mountpoint + // Specifically needed for containers which are running and calls to `docker cp` + // because both these actions require mounting the volumes. + active int +} + +// Cleanup frees resources used by the mountpoint +func (m *MountPoint) Cleanup() error { + if m.Volume == nil || m.ID == "" { + return nil + } + + if err := m.Volume.Unmount(m.ID); err != nil { + return errors.Wrapf(err, "error unmounting volume %s", m.Volume.Name()) + } + + m.active-- + if m.active == 0 { + m.ID = "" + } + return nil +} + +// Setup sets up a mount point by either mounting the volume if it is +// configured, or creating the source directory if supplied. +// The, optional, checkFun parameter allows doing additional checking +// before creating the source directory on the host. +func (m *MountPoint) Setup(mountLabel string, rootIDs idtools.IDPair, checkFun func(m *MountPoint) error) (path string, err error) { + defer func() { + if err != nil || !label.RelabelNeeded(m.Mode) { + return + } + + var sourcePath string + sourcePath, err = filepath.EvalSymlinks(m.Source) + if err != nil { + path = "" + err = errors.Wrapf(err, "error evaluating symlinks from mount source %q", m.Source) + return + } + err = label.Relabel(sourcePath, mountLabel, label.IsShared(m.Mode)) + if err == syscall.ENOTSUP { + err = nil + } + if err != nil { + path = "" + err = errors.Wrapf(err, "error setting label on mount source '%s'", sourcePath) + } + }() + + if m.Volume != nil { + id := m.ID + if id == "" { + id = stringid.GenerateNonCryptoID() + } + path, err := m.Volume.Mount(id) + if err != nil { + return "", errors.Wrapf(err, "error while mounting volume '%s'", m.Source) + } + + m.ID = id + m.active++ + return path, nil + } + + if len(m.Source) == 0 { + return "", fmt.Errorf("Unable to setup mount point, neither source nor volume defined") + } + + if m.Type == mounttypes.TypeBind { + // Before creating the source directory on the host, invoke checkFun if it's not nil. One of + // the use case is to forbid creating the daemon socket as a directory if the daemon is in + // the process of shutting down. + if checkFun != nil { + if err := checkFun(m); err != nil { + return "", err + } + } + // idtools.MkdirAllNewAs() produces an error if m.Source exists and is a file (not a directory) + // also, makes sure that if the directory is created, the correct remapped rootUID/rootGID will own it + if err := idtools.MkdirAllAndChownNew(m.Source, 0755, rootIDs); err != nil { + if perr, ok := err.(*os.PathError); ok { + if perr.Err != syscall.ENOTDIR { + return "", errors.Wrapf(err, "error while creating mount source path '%s'", m.Source) + } + } + } + } + return m.Source, nil +} + +// Path returns the path of a volume in a mount point. +func (m *MountPoint) Path() string { + if m.Volume != nil { + return m.Volume.Path() + } + return m.Source +} + +func errInvalidMode(mode string) error { + return errors.Errorf("invalid mode: %v", mode) +} + +func errInvalidSpec(spec string) error { + return errors.Errorf("invalid volume specification: '%s'", spec) +} diff --git a/vendor/github.com/docker/docker/volume/mounts/parser.go b/vendor/github.com/docker/docker/volume/mounts/parser.go new file mode 100644 index 0000000000..73681750ea --- /dev/null +++ b/vendor/github.com/docker/docker/volume/mounts/parser.go @@ -0,0 +1,47 @@ +package mounts // import "github.com/docker/docker/volume/mounts" + +import ( + "errors" + "runtime" + + "github.com/docker/docker/api/types/mount" +) + +const ( + // OSLinux is the same as runtime.GOOS on linux + OSLinux = "linux" + // OSWindows is the same as runtime.GOOS on windows + OSWindows = "windows" +) + +// ErrVolumeTargetIsRoot is returned when the target destination is root. +// It's used by both LCOW and Linux parsers. +var ErrVolumeTargetIsRoot = errors.New("invalid specification: destination can't be '/'") + +// Parser represents a platform specific parser for mount expressions +type Parser interface { + ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) + ParseMountSpec(cfg mount.Mount) (*MountPoint, error) + ParseVolumesFrom(spec string) (string, string, error) + DefaultPropagationMode() mount.Propagation + ConvertTmpfsOptions(opt *mount.TmpfsOptions, readOnly bool) (string, error) + DefaultCopyMode() bool + ValidateVolumeName(name string) error + ReadWrite(mode string) bool + IsBackwardCompatible(m *MountPoint) bool + HasResource(m *MountPoint, absPath string) bool + ValidateTmpfsMountDestination(dest string) error + ValidateMountConfig(mt *mount.Mount) error +} + +// NewParser creates a parser for a given container OS, depending on the current host OS (linux on a windows host will resolve to an lcowParser) +func NewParser(containerOS string) Parser { + switch containerOS { + case OSWindows: + return &windowsParser{} + } + if runtime.GOOS == OSWindows { + return &lcowParser{} + } + return &linuxParser{} +} diff --git a/vendor/github.com/docker/docker/volume/mounts/validate.go b/vendor/github.com/docker/docker/volume/mounts/validate.go new file mode 100644 index 0000000000..0b71526901 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/mounts/validate.go @@ -0,0 +1,28 @@ +package mounts // import "github.com/docker/docker/volume/mounts" + +import ( + "fmt" + + "github.com/docker/docker/api/types/mount" + "github.com/pkg/errors" +) + +type errMountConfig struct { + mount *mount.Mount + err error +} + +func (e *errMountConfig) Error() string { + return fmt.Sprintf("invalid mount config for type %q: %v", e.mount.Type, e.err.Error()) +} + +func errBindSourceDoesNotExist(path string) error { + return errors.Errorf("bind mount source path does not exist: %s", path) +} + +func errExtraField(name string) error { + return errors.Errorf("field %s must not be specified", name) +} +func errMissingField(name string) error { + return errors.Errorf("field %s must not be empty", name) +} diff --git a/vendor/github.com/docker/docker/volume/mounts/volume_copy.go b/vendor/github.com/docker/docker/volume/mounts/volume_copy.go new file mode 100644 index 0000000000..04056fa50a --- /dev/null +++ b/vendor/github.com/docker/docker/volume/mounts/volume_copy.go @@ -0,0 +1,23 @@ +package mounts // import "github.com/docker/docker/volume/mounts" + +import "strings" + +// {=isEnabled} +var copyModes = map[string]bool{ + "nocopy": false, +} + +func copyModeExists(mode string) bool { + _, exists := copyModes[mode] + return exists +} + +// GetCopyMode gets the copy mode from the mode string for mounts +func getCopyMode(mode string, def bool) (bool, bool) { + for _, o := range strings.Split(mode, ",") { + if isEnabled, exists := copyModes[o]; exists { + return isEnabled, true + } + } + return def, false +} diff --git a/vendor/github.com/docker/docker/volume/mounts/volume_unix.go b/vendor/github.com/docker/docker/volume/mounts/volume_unix.go new file mode 100644 index 0000000000..c6d51e0710 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/mounts/volume_unix.go @@ -0,0 +1,18 @@ +// +build linux freebsd darwin + +package mounts // import "github.com/docker/docker/volume/mounts" + +import ( + "fmt" + "path/filepath" + "strings" +) + +func (p *linuxParser) HasResource(m *MountPoint, absolutePath string) bool { + relPath, err := filepath.Rel(m.Destination, absolutePath) + return err == nil && relPath != ".." && !strings.HasPrefix(relPath, fmt.Sprintf("..%c", filepath.Separator)) +} + +func (p *windowsParser) HasResource(m *MountPoint, absolutePath string) bool { + return false +} diff --git a/vendor/github.com/docker/docker/volume/mounts/volume_windows.go b/vendor/github.com/docker/docker/volume/mounts/volume_windows.go new file mode 100644 index 0000000000..773e7db88a --- /dev/null +++ b/vendor/github.com/docker/docker/volume/mounts/volume_windows.go @@ -0,0 +1,8 @@ +package mounts // import "github.com/docker/docker/volume/mounts" + +func (p *windowsParser) HasResource(m *MountPoint, absolutePath string) bool { + return false +} +func (p *linuxParser) HasResource(m *MountPoint, absolutePath string) bool { + return false +} diff --git a/vendor/github.com/docker/docker/volume/mounts/windows_parser.go b/vendor/github.com/docker/docker/volume/mounts/windows_parser.go new file mode 100644 index 0000000000..ac61044043 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/mounts/windows_parser.go @@ -0,0 +1,456 @@ +package mounts // import "github.com/docker/docker/volume/mounts" + +import ( + "errors" + "fmt" + "os" + "regexp" + "runtime" + "strings" + + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/pkg/stringid" +) + +type windowsParser struct { +} + +const ( + // Spec should be in the format [source:]destination[:mode] + // + // Examples: c:\foo bar:d:rw + // c:\foo:d:\bar + // myname:d: + // d:\ + // + // Explanation of this regex! Thanks @thaJeztah on IRC and gist for help. See + // https://gist.github.com/thaJeztah/6185659e4978789fb2b2. A good place to + // test is https://regex-golang.appspot.com/assets/html/index.html + // + // Useful link for referencing named capturing groups: + // http://stackoverflow.com/questions/20750843/using-named-matches-from-go-regex + // + // There are three match groups: source, destination and mode. + // + + // rxHostDir is the first option of a source + rxHostDir = `(?:\\\\\?\\)?[a-z]:[\\/](?:[^\\/:*?"<>|\r\n]+[\\/]?)*` + // rxName is the second option of a source + rxName = `[^\\/:*?"<>|\r\n]+` + + // RXReservedNames are reserved names not possible on Windows + rxReservedNames = `(con)|(prn)|(nul)|(aux)|(com[1-9])|(lpt[1-9])` + + // rxPipe is a named path pipe (starts with `\\.\pipe\`, possibly with / instead of \) + rxPipe = `[/\\]{2}.[/\\]pipe[/\\][^:*?"<>|\r\n]+` + // rxSource is the combined possibilities for a source + rxSource = `((?P((` + rxHostDir + `)|(` + rxName + `)|(` + rxPipe + `))):)?` + + // Source. Can be either a host directory, a name, or omitted: + // HostDir: + // - Essentially using the folder solution from + // https://www.safaribooksonline.com/library/view/regular-expressions-cookbook/9781449327453/ch08s18.html + // but adding case insensitivity. + // - Must be an absolute path such as c:\path + // - Can include spaces such as `c:\program files` + // - And then followed by a colon which is not in the capture group + // - And can be optional + // Name: + // - Must not contain invalid NTFS filename characters (https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx) + // - And then followed by a colon which is not in the capture group + // - And can be optional + + // rxDestination is the regex expression for the mount destination + rxDestination = `(?P((?:\\\\\?\\)?([a-z]):((?:[\\/][^\\/:*?"<>\r\n]+)*[\\/]?))|(` + rxPipe + `))` + + rxLCOWDestination = `(?P/(?:[^\\/:*?"<>\r\n]+[/]?)*)` + // Destination (aka container path): + // - Variation on hostdir but can be a drive followed by colon as well + // - If a path, must be absolute. Can include spaces + // - Drive cannot be c: (explicitly checked in code, not RegEx) + + // rxMode is the regex expression for the mode of the mount + // Mode (optional): + // - Hopefully self explanatory in comparison to above regex's. + // - Colon is not in the capture group + rxMode = `(:(?P(?i)ro|rw))?` +) + +type mountValidator func(mnt *mount.Mount) error + +func windowsSplitRawSpec(raw, destRegex string) ([]string, error) { + specExp := regexp.MustCompile(`^` + rxSource + destRegex + rxMode + `$`) + match := specExp.FindStringSubmatch(strings.ToLower(raw)) + + // Must have something back + if len(match) == 0 { + return nil, errInvalidSpec(raw) + } + + var split []string + matchgroups := make(map[string]string) + // Pull out the sub expressions from the named capture groups + for i, name := range specExp.SubexpNames() { + matchgroups[name] = strings.ToLower(match[i]) + } + if source, exists := matchgroups["source"]; exists { + if source != "" { + split = append(split, source) + } + } + if destination, exists := matchgroups["destination"]; exists { + if destination != "" { + split = append(split, destination) + } + } + if mode, exists := matchgroups["mode"]; exists { + if mode != "" { + split = append(split, mode) + } + } + // Fix #26329. If the destination appears to be a file, and the source is null, + // it may be because we've fallen through the possible naming regex and hit a + // situation where the user intention was to map a file into a container through + // a local volume, but this is not supported by the platform. + if matchgroups["source"] == "" && matchgroups["destination"] != "" { + volExp := regexp.MustCompile(`^` + rxName + `$`) + reservedNameExp := regexp.MustCompile(`^` + rxReservedNames + `$`) + + if volExp.MatchString(matchgroups["destination"]) { + if reservedNameExp.MatchString(matchgroups["destination"]) { + return nil, fmt.Errorf("volume name %q cannot be a reserved word for Windows filenames", matchgroups["destination"]) + } + } else { + + exists, isDir, _ := currentFileInfoProvider.fileInfo(matchgroups["destination"]) + if exists && !isDir { + return nil, fmt.Errorf("file '%s' cannot be mapped. Only directories can be mapped on this platform", matchgroups["destination"]) + + } + } + } + return split, nil +} + +func windowsValidMountMode(mode string) bool { + if mode == "" { + return true + } + return rwModes[strings.ToLower(mode)] +} +func windowsValidateNotRoot(p string) error { + p = strings.ToLower(strings.Replace(p, `/`, `\`, -1)) + if p == "c:" || p == `c:\` { + return fmt.Errorf("destination path cannot be `c:` or `c:\\`: %v", p) + } + return nil +} + +var windowsSpecificValidators mountValidator = func(mnt *mount.Mount) error { + return windowsValidateNotRoot(mnt.Target) +} + +func windowsValidateRegex(p, r string) error { + if regexp.MustCompile(`^` + r + `$`).MatchString(strings.ToLower(p)) { + return nil + } + return fmt.Errorf("invalid mount path: '%s'", p) +} +func windowsValidateAbsolute(p string) error { + if err := windowsValidateRegex(p, rxDestination); err != nil { + return fmt.Errorf("invalid mount path: '%s' mount path must be absolute", p) + } + return nil +} + +func windowsDetectMountType(p string) mount.Type { + if strings.HasPrefix(p, `\\.\pipe\`) { + return mount.TypeNamedPipe + } else if regexp.MustCompile(`^` + rxHostDir + `$`).MatchString(p) { + return mount.TypeBind + } else { + return mount.TypeVolume + } +} + +func (p *windowsParser) ReadWrite(mode string) bool { + return strings.ToLower(mode) != "ro" +} + +// IsVolumeNameValid checks a volume name in a platform specific manner. +func (p *windowsParser) ValidateVolumeName(name string) error { + nameExp := regexp.MustCompile(`^` + rxName + `$`) + if !nameExp.MatchString(name) { + return errors.New("invalid volume name") + } + nameExp = regexp.MustCompile(`^` + rxReservedNames + `$`) + if nameExp.MatchString(name) { + return fmt.Errorf("volume name %q cannot be a reserved word for Windows filenames", name) + } + return nil +} +func (p *windowsParser) ValidateMountConfig(mnt *mount.Mount) error { + return p.validateMountConfigReg(mnt, rxDestination, windowsSpecificValidators) +} + +type fileInfoProvider interface { + fileInfo(path string) (exist, isDir bool, err error) +} + +type defaultFileInfoProvider struct { +} + +func (defaultFileInfoProvider) fileInfo(path string) (exist, isDir bool, err error) { + fi, err := os.Stat(path) + if err != nil { + if !os.IsNotExist(err) { + return false, false, err + } + return false, false, nil + } + return true, fi.IsDir(), nil +} + +var currentFileInfoProvider fileInfoProvider = defaultFileInfoProvider{} + +func (p *windowsParser) validateMountConfigReg(mnt *mount.Mount, destRegex string, additionalValidators ...mountValidator) error { + + for _, v := range additionalValidators { + if err := v(mnt); err != nil { + return &errMountConfig{mnt, err} + } + } + if len(mnt.Target) == 0 { + return &errMountConfig{mnt, errMissingField("Target")} + } + + if err := windowsValidateRegex(mnt.Target, destRegex); err != nil { + return &errMountConfig{mnt, err} + } + + switch mnt.Type { + case mount.TypeBind: + if len(mnt.Source) == 0 { + return &errMountConfig{mnt, errMissingField("Source")} + } + // Don't error out just because the propagation mode is not supported on the platform + if opts := mnt.BindOptions; opts != nil { + if len(opts.Propagation) > 0 { + return &errMountConfig{mnt, fmt.Errorf("invalid propagation mode: %s", opts.Propagation)} + } + } + if mnt.VolumeOptions != nil { + return &errMountConfig{mnt, errExtraField("VolumeOptions")} + } + + if err := windowsValidateAbsolute(mnt.Source); err != nil { + return &errMountConfig{mnt, err} + } + + exists, isdir, err := currentFileInfoProvider.fileInfo(mnt.Source) + if err != nil { + return &errMountConfig{mnt, err} + } + if !exists { + return &errMountConfig{mnt, errBindSourceDoesNotExist(mnt.Source)} + } + if !isdir { + return &errMountConfig{mnt, fmt.Errorf("source path must be a directory")} + } + + case mount.TypeVolume: + if mnt.BindOptions != nil { + return &errMountConfig{mnt, errExtraField("BindOptions")} + } + + if len(mnt.Source) == 0 && mnt.ReadOnly { + return &errMountConfig{mnt, fmt.Errorf("must not set ReadOnly mode when using anonymous volumes")} + } + + if len(mnt.Source) != 0 { + if err := p.ValidateVolumeName(mnt.Source); err != nil { + return &errMountConfig{mnt, err} + } + } + case mount.TypeNamedPipe: + if len(mnt.Source) == 0 { + return &errMountConfig{mnt, errMissingField("Source")} + } + + if mnt.BindOptions != nil { + return &errMountConfig{mnt, errExtraField("BindOptions")} + } + + if mnt.ReadOnly { + return &errMountConfig{mnt, errExtraField("ReadOnly")} + } + + if windowsDetectMountType(mnt.Source) != mount.TypeNamedPipe { + return &errMountConfig{mnt, fmt.Errorf("'%s' is not a valid pipe path", mnt.Source)} + } + + if windowsDetectMountType(mnt.Target) != mount.TypeNamedPipe { + return &errMountConfig{mnt, fmt.Errorf("'%s' is not a valid pipe path", mnt.Target)} + } + default: + return &errMountConfig{mnt, errors.New("mount type unknown")} + } + return nil +} +func (p *windowsParser) ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) { + return p.parseMountRaw(raw, volumeDriver, rxDestination, true, windowsSpecificValidators) +} + +func (p *windowsParser) parseMountRaw(raw, volumeDriver, destRegex string, convertTargetToBackslash bool, additionalValidators ...mountValidator) (*MountPoint, error) { + arr, err := windowsSplitRawSpec(raw, destRegex) + if err != nil { + return nil, err + } + + var spec mount.Mount + var mode string + switch len(arr) { + case 1: + // Just a destination path in the container + spec.Target = arr[0] + case 2: + if windowsValidMountMode(arr[1]) { + // Destination + Mode is not a valid volume - volumes + // cannot include a mode. e.g. /foo:rw + return nil, errInvalidSpec(raw) + } + // Host Source Path or Name + Destination + spec.Source = strings.Replace(arr[0], `/`, `\`, -1) + spec.Target = arr[1] + case 3: + // HostSourcePath+DestinationPath+Mode + spec.Source = strings.Replace(arr[0], `/`, `\`, -1) + spec.Target = arr[1] + mode = arr[2] + default: + return nil, errInvalidSpec(raw) + } + if convertTargetToBackslash { + spec.Target = strings.Replace(spec.Target, `/`, `\`, -1) + } + + if !windowsValidMountMode(mode) { + return nil, errInvalidMode(mode) + } + + spec.Type = windowsDetectMountType(spec.Source) + spec.ReadOnly = !p.ReadWrite(mode) + + // cannot assume that if a volume driver is passed in that we should set it + if volumeDriver != "" && spec.Type == mount.TypeVolume { + spec.VolumeOptions = &mount.VolumeOptions{ + DriverConfig: &mount.Driver{Name: volumeDriver}, + } + } + + if copyData, isSet := getCopyMode(mode, p.DefaultCopyMode()); isSet { + if spec.VolumeOptions == nil { + spec.VolumeOptions = &mount.VolumeOptions{} + } + spec.VolumeOptions.NoCopy = !copyData + } + + mp, err := p.parseMountSpec(spec, destRegex, convertTargetToBackslash, additionalValidators...) + if mp != nil { + mp.Mode = mode + } + if err != nil { + err = fmt.Errorf("%v: %v", errInvalidSpec(raw), err) + } + return mp, err +} + +func (p *windowsParser) ParseMountSpec(cfg mount.Mount) (*MountPoint, error) { + return p.parseMountSpec(cfg, rxDestination, true, windowsSpecificValidators) +} +func (p *windowsParser) parseMountSpec(cfg mount.Mount, destRegex string, convertTargetToBackslash bool, additionalValidators ...mountValidator) (*MountPoint, error) { + if err := p.validateMountConfigReg(&cfg, destRegex, additionalValidators...); err != nil { + return nil, err + } + mp := &MountPoint{ + RW: !cfg.ReadOnly, + Destination: cfg.Target, + Type: cfg.Type, + Spec: cfg, + } + if convertTargetToBackslash { + mp.Destination = strings.Replace(cfg.Target, `/`, `\`, -1) + } + + switch cfg.Type { + case mount.TypeVolume: + if cfg.Source == "" { + mp.Name = stringid.GenerateNonCryptoID() + } else { + mp.Name = cfg.Source + } + mp.CopyData = p.DefaultCopyMode() + + if cfg.VolumeOptions != nil { + if cfg.VolumeOptions.DriverConfig != nil { + mp.Driver = cfg.VolumeOptions.DriverConfig.Name + } + if cfg.VolumeOptions.NoCopy { + mp.CopyData = false + } + } + case mount.TypeBind: + mp.Source = strings.Replace(cfg.Source, `/`, `\`, -1) + case mount.TypeNamedPipe: + mp.Source = strings.Replace(cfg.Source, `/`, `\`, -1) + } + // cleanup trailing `\` except for paths like `c:\` + if len(mp.Source) > 3 && mp.Source[len(mp.Source)-1] == '\\' { + mp.Source = mp.Source[:len(mp.Source)-1] + } + if len(mp.Destination) > 3 && mp.Destination[len(mp.Destination)-1] == '\\' { + mp.Destination = mp.Destination[:len(mp.Destination)-1] + } + return mp, nil +} + +func (p *windowsParser) ParseVolumesFrom(spec string) (string, string, error) { + if len(spec) == 0 { + return "", "", fmt.Errorf("volumes-from specification cannot be an empty string") + } + + specParts := strings.SplitN(spec, ":", 2) + id := specParts[0] + mode := "rw" + + if len(specParts) == 2 { + mode = specParts[1] + if !windowsValidMountMode(mode) { + return "", "", errInvalidMode(mode) + } + + // Do not allow copy modes on volumes-from + if _, isSet := getCopyMode(mode, p.DefaultCopyMode()); isSet { + return "", "", errInvalidMode(mode) + } + } + return id, mode, nil +} + +func (p *windowsParser) DefaultPropagationMode() mount.Propagation { + return mount.Propagation("") +} + +func (p *windowsParser) ConvertTmpfsOptions(opt *mount.TmpfsOptions, readOnly bool) (string, error) { + return "", fmt.Errorf("%s does not support tmpfs", runtime.GOOS) +} +func (p *windowsParser) DefaultCopyMode() bool { + return false +} +func (p *windowsParser) IsBackwardCompatible(m *MountPoint) bool { + return false +} + +func (p *windowsParser) ValidateTmpfsMountDestination(dest string) error { + return errors.New("Platform does not support tmpfs") +} diff --git a/vendor/github.com/docker/docker/volume/volume.go b/vendor/github.com/docker/docker/volume/volume.go new file mode 100644 index 0000000000..61c8243979 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume.go @@ -0,0 +1,69 @@ +package volume // import "github.com/docker/docker/volume" + +import ( + "time" +) + +// DefaultDriverName is the driver name used for the driver +// implemented in the local package. +const DefaultDriverName = "local" + +// Scopes define if a volume has is cluster-wide (global) or local only. +// Scopes are returned by the volume driver when it is queried for capabilities and then set on a volume +const ( + LocalScope = "local" + GlobalScope = "global" +) + +// Driver is for creating and removing volumes. +type Driver interface { + // Name returns the name of the volume driver. + Name() string + // Create makes a new volume with the given name. + Create(name string, opts map[string]string) (Volume, error) + // Remove deletes the volume. + Remove(vol Volume) (err error) + // List lists all the volumes the driver has + List() ([]Volume, error) + // Get retrieves the volume with the requested name + Get(name string) (Volume, error) + // Scope returns the scope of the driver (e.g. `global` or `local`). + // Scope determines how the driver is handled at a cluster level + Scope() string +} + +// Capability defines a set of capabilities that a driver is able to handle. +type Capability struct { + // Scope is the scope of the driver, `global` or `local` + // A `global` scope indicates that the driver manages volumes across the cluster + // A `local` scope indicates that the driver only manages volumes resources local to the host + // Scope is declared by the driver + Scope string +} + +// Volume is a place to store data. It is backed by a specific driver, and can be mounted. +type Volume interface { + // Name returns the name of the volume + Name() string + // DriverName returns the name of the driver which owns this volume. + DriverName() string + // Path returns the absolute path to the volume. + Path() string + // Mount mounts the volume and returns the absolute path to + // where it can be consumed. + Mount(id string) (string, error) + // Unmount unmounts the volume when it is no longer in use. + Unmount(id string) error + // CreatedAt returns Volume Creation time + CreatedAt() (time.Time, error) + // Status returns low-level status information about a volume + Status() map[string]interface{} +} + +// DetailedVolume wraps a Volume with user-defined labels, options, and cluster scope (e.g., `local` or `global`) +type DetailedVolume interface { + Labels() map[string]string + Options() map[string]string + Scope() string + Volume +} diff --git a/vendor/github.com/docker/go-events/LICENSE b/vendor/github.com/docker/go-events/LICENSE new file mode 100644 index 0000000000..6d630cf595 --- /dev/null +++ b/vendor/github.com/docker/go-events/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/go-events/broadcast.go b/vendor/github.com/docker/go-events/broadcast.go new file mode 100644 index 0000000000..5120078dfb --- /dev/null +++ b/vendor/github.com/docker/go-events/broadcast.go @@ -0,0 +1,178 @@ +package events + +import ( + "fmt" + "sync" + + "github.com/sirupsen/logrus" +) + +// Broadcaster sends events to multiple, reliable Sinks. The goal of this +// component is to dispatch events to configured endpoints. Reliability can be +// provided by wrapping incoming sinks. +type Broadcaster struct { + sinks []Sink + events chan Event + adds chan configureRequest + removes chan configureRequest + + shutdown chan struct{} + closed chan struct{} + once sync.Once +} + +// NewBroadcaster appends one or more sinks to the list of sinks. The +// broadcaster behavior will be affected by the properties of the sink. +// Generally, the sink should accept all messages and deal with reliability on +// its own. Use of EventQueue and RetryingSink should be used here. +func NewBroadcaster(sinks ...Sink) *Broadcaster { + b := Broadcaster{ + sinks: sinks, + events: make(chan Event), + adds: make(chan configureRequest), + removes: make(chan configureRequest), + shutdown: make(chan struct{}), + closed: make(chan struct{}), + } + + // Start the broadcaster + go b.run() + + return &b +} + +// Write accepts an event to be dispatched to all sinks. This method will never +// fail and should never block (hopefully!). The caller cedes the memory to the +// broadcaster and should not modify it after calling write. +func (b *Broadcaster) Write(event Event) error { + select { + case b.events <- event: + case <-b.closed: + return ErrSinkClosed + } + return nil +} + +// Add the sink to the broadcaster. +// +// The provided sink must be comparable with equality. Typically, this just +// works with a regular pointer type. +func (b *Broadcaster) Add(sink Sink) error { + return b.configure(b.adds, sink) +} + +// Remove the provided sink. +func (b *Broadcaster) Remove(sink Sink) error { + return b.configure(b.removes, sink) +} + +type configureRequest struct { + sink Sink + response chan error +} + +func (b *Broadcaster) configure(ch chan configureRequest, sink Sink) error { + response := make(chan error, 1) + + for { + select { + case ch <- configureRequest{ + sink: sink, + response: response}: + ch = nil + case err := <-response: + return err + case <-b.closed: + return ErrSinkClosed + } + } +} + +// Close the broadcaster, ensuring that all messages are flushed to the +// underlying sink before returning. +func (b *Broadcaster) Close() error { + b.once.Do(func() { + close(b.shutdown) + }) + + <-b.closed + return nil +} + +// run is the main broadcast loop, started when the broadcaster is created. +// Under normal conditions, it waits for events on the event channel. After +// Close is called, this goroutine will exit. +func (b *Broadcaster) run() { + defer close(b.closed) + remove := func(target Sink) { + for i, sink := range b.sinks { + if sink == target { + b.sinks = append(b.sinks[:i], b.sinks[i+1:]...) + break + } + } + } + + for { + select { + case event := <-b.events: + for _, sink := range b.sinks { + if err := sink.Write(event); err != nil { + if err == ErrSinkClosed { + // remove closed sinks + remove(sink) + continue + } + logrus.WithField("event", event).WithField("events.sink", sink).WithError(err). + Errorf("broadcaster: dropping event") + } + } + case request := <-b.adds: + // while we have to iterate for add/remove, common iteration for + // send is faster against slice. + + var found bool + for _, sink := range b.sinks { + if request.sink == sink { + found = true + break + } + } + + if !found { + b.sinks = append(b.sinks, request.sink) + } + // b.sinks[request.sink] = struct{}{} + request.response <- nil + case request := <-b.removes: + remove(request.sink) + request.response <- nil + case <-b.shutdown: + // close all the underlying sinks + for _, sink := range b.sinks { + if err := sink.Close(); err != nil && err != ErrSinkClosed { + logrus.WithField("events.sink", sink).WithError(err). + Errorf("broadcaster: closing sink failed") + } + } + return + } + } +} + +func (b *Broadcaster) String() string { + // Serialize copy of this broadcaster without the sync.Once, to avoid + // a data race. + + b2 := map[string]interface{}{ + "sinks": b.sinks, + "events": b.events, + "adds": b.adds, + "removes": b.removes, + + "shutdown": b.shutdown, + "closed": b.closed, + } + + return fmt.Sprint(b2) +} diff --git a/vendor/github.com/docker/go-events/channel.go b/vendor/github.com/docker/go-events/channel.go new file mode 100644 index 0000000000..802cf51ffe --- /dev/null +++ b/vendor/github.com/docker/go-events/channel.go @@ -0,0 +1,61 @@ +package events + +import ( + "fmt" + "sync" +) + +// Channel provides a sink that can be listened on. The writer and channel +// listener must operate in separate goroutines. +// +// Consumers should listen on Channel.C until Closed is closed. +type Channel struct { + C chan Event + + closed chan struct{} + once sync.Once +} + +// NewChannel returns a channel. If buffer is zero, the channel is +// unbuffered. +func NewChannel(buffer int) *Channel { + return &Channel{ + C: make(chan Event, buffer), + closed: make(chan struct{}), + } +} + +// Done returns a channel that will always proceed once the sink is closed. +func (ch *Channel) Done() chan struct{} { + return ch.closed +} + +// Write the event to the channel. Must be called in a separate goroutine from +// the listener. +func (ch *Channel) Write(event Event) error { + select { + case ch.C <- event: + return nil + case <-ch.closed: + return ErrSinkClosed + } +} + +// Close the channel sink. +func (ch *Channel) Close() error { + ch.once.Do(func() { + close(ch.closed) + }) + + return nil +} + +func (ch *Channel) String() string { + // Serialize a copy of the Channel that doesn't contain the sync.Once, + // to avoid a data race. + ch2 := map[string]interface{}{ + "C": ch.C, + "closed": ch.closed, + } + return fmt.Sprint(ch2) +} diff --git a/vendor/github.com/docker/go-events/errors.go b/vendor/github.com/docker/go-events/errors.go new file mode 100644 index 0000000000..56db7c2510 --- /dev/null +++ b/vendor/github.com/docker/go-events/errors.go @@ -0,0 +1,10 @@ +package events + +import "fmt" + +var ( + // ErrSinkClosed is returned if a write is issued to a sink that has been + // closed. If encountered, the error should be considered terminal and + // retries will not be successful. + ErrSinkClosed = fmt.Errorf("events: sink closed") +) diff --git a/vendor/github.com/docker/go-events/event.go b/vendor/github.com/docker/go-events/event.go new file mode 100644 index 0000000000..f0f1d9ea5f --- /dev/null +++ b/vendor/github.com/docker/go-events/event.go @@ -0,0 +1,15 @@ +package events + +// Event marks items that can be sent as events. +type Event interface{} + +// Sink accepts and sends events. +type Sink interface { + // Write an event to the Sink. If no error is returned, the caller will + // assume that all events have been committed to the sink. If an error is + // received, the caller may retry sending the event. + Write(event Event) error + + // Close the sink, possibly waiting for pending events to flush. + Close() error +} diff --git a/vendor/github.com/docker/go-events/filter.go b/vendor/github.com/docker/go-events/filter.go new file mode 100644 index 0000000000..e6c0eb69dd --- /dev/null +++ b/vendor/github.com/docker/go-events/filter.go @@ -0,0 +1,52 @@ +package events + +// Matcher matches events. +type Matcher interface { + Match(event Event) bool +} + +// MatcherFunc implements matcher with just a function. +type MatcherFunc func(event Event) bool + +// Match calls the wrapped function. +func (fn MatcherFunc) Match(event Event) bool { + return fn(event) +} + +// Filter provides an event sink that sends only events that are accepted by a +// Matcher. No methods on filter are goroutine safe. +type Filter struct { + dst Sink + matcher Matcher + closed bool +} + +// NewFilter returns a new filter that will send to events to dst that return +// true for Matcher. +func NewFilter(dst Sink, matcher Matcher) Sink { + return &Filter{dst: dst, matcher: matcher} +} + +// Write an event to the filter. +func (f *Filter) Write(event Event) error { + if f.closed { + return ErrSinkClosed + } + + if f.matcher.Match(event) { + return f.dst.Write(event) + } + + return nil +} + +// Close the filter and allow no more events to pass through. +func (f *Filter) Close() error { + // TODO(stevvooe): Not all sinks should have Close. + if f.closed { + return nil + } + + f.closed = true + return f.dst.Close() +} diff --git a/vendor/github.com/docker/go-events/queue.go b/vendor/github.com/docker/go-events/queue.go new file mode 100644 index 0000000000..4bb770afc2 --- /dev/null +++ b/vendor/github.com/docker/go-events/queue.go @@ -0,0 +1,111 @@ +package events + +import ( + "container/list" + "sync" + + "github.com/sirupsen/logrus" +) + +// Queue accepts all messages into a queue for asynchronous consumption +// by a sink. It is unbounded and thread safe but the sink must be reliable or +// events will be dropped. +type Queue struct { + dst Sink + events *list.List + cond *sync.Cond + mu sync.Mutex + closed bool +} + +// NewQueue returns a queue to the provided Sink dst. +func NewQueue(dst Sink) *Queue { + eq := Queue{ + dst: dst, + events: list.New(), + } + + eq.cond = sync.NewCond(&eq.mu) + go eq.run() + return &eq +} + +// Write accepts the events into the queue, only failing if the queue has +// been closed. +func (eq *Queue) Write(event Event) error { + eq.mu.Lock() + defer eq.mu.Unlock() + + if eq.closed { + return ErrSinkClosed + } + + eq.events.PushBack(event) + eq.cond.Signal() // signal waiters + + return nil +} + +// Close shutsdown the event queue, flushing +func (eq *Queue) Close() error { + eq.mu.Lock() + defer eq.mu.Unlock() + + if eq.closed { + return nil + } + + // set closed flag + eq.closed = true + eq.cond.Signal() // signal flushes queue + eq.cond.Wait() // wait for signal from last flush + return eq.dst.Close() +} + +// run is the main goroutine to flush events to the target sink. +func (eq *Queue) run() { + for { + event := eq.next() + + if event == nil { + return // nil block means event queue is closed. + } + + if err := eq.dst.Write(event); err != nil { + // TODO(aaronl): Dropping events could be bad depending + // on the application. We should have a way of + // communicating this condition. However, logging + // at a log level above debug may not be appropriate. + // Eventually, go-events should not use logrus at all, + // and should bubble up conditions like this through + // error values. + logrus.WithFields(logrus.Fields{ + "event": event, + "sink": eq.dst, + }).WithError(err).Debug("eventqueue: dropped event") + } + } +} + +// next encompasses the critical section of the run loop. When the queue is +// empty, it will block on the condition. If new data arrives, it will wake +// and return a block. When closed, a nil slice will be returned. +func (eq *Queue) next() Event { + eq.mu.Lock() + defer eq.mu.Unlock() + + for eq.events.Len() < 1 { + if eq.closed { + eq.cond.Broadcast() + return nil + } + + eq.cond.Wait() + } + + front := eq.events.Front() + block := front.Value.(Event) + eq.events.Remove(front) + + return block +} diff --git a/vendor/github.com/docker/go-events/retry.go b/vendor/github.com/docker/go-events/retry.go new file mode 100644 index 0000000000..2df55d2160 --- /dev/null +++ b/vendor/github.com/docker/go-events/retry.go @@ -0,0 +1,260 @@ +package events + +import ( + "fmt" + "math/rand" + "sync" + "sync/atomic" + "time" + + "github.com/sirupsen/logrus" +) + +// RetryingSink retries the write until success or an ErrSinkClosed is +// returned. Underlying sink must have p > 0 of succeeding or the sink will +// block. Retry is configured with a RetryStrategy. Concurrent calls to a +// retrying sink are serialized through the sink, meaning that if one is +// in-flight, another will not proceed. +type RetryingSink struct { + sink Sink + strategy RetryStrategy + closed chan struct{} + once sync.Once +} + +// NewRetryingSink returns a sink that will retry writes to a sink, backing +// off on failure. Parameters threshold and backoff adjust the behavior of the +// circuit breaker. +func NewRetryingSink(sink Sink, strategy RetryStrategy) *RetryingSink { + rs := &RetryingSink{ + sink: sink, + strategy: strategy, + closed: make(chan struct{}), + } + + return rs +} + +// Write attempts to flush the events to the downstream sink until it succeeds +// or the sink is closed. +func (rs *RetryingSink) Write(event Event) error { + logger := logrus.WithField("event", event) + +retry: + select { + case <-rs.closed: + return ErrSinkClosed + default: + } + + if backoff := rs.strategy.Proceed(event); backoff > 0 { + select { + case <-time.After(backoff): + // TODO(stevvooe): This branch holds up the next try. Before, we + // would simply break to the "retry" label and then possibly wait + // again. However, this requires all retry strategies to have a + // large probability of probing the sync for success, rather than + // just backing off and sending the request. + case <-rs.closed: + return ErrSinkClosed + } + } + + if err := rs.sink.Write(event); err != nil { + if err == ErrSinkClosed { + // terminal! + return err + } + + logger := logger.WithError(err) // shadow!! + + if rs.strategy.Failure(event, err) { + logger.Errorf("retryingsink: dropped event") + return nil + } + + logger.Errorf("retryingsink: error writing event, retrying") + goto retry + } + + rs.strategy.Success(event) + return nil +} + +// Close closes the sink and the underlying sink. +func (rs *RetryingSink) Close() error { + rs.once.Do(func() { + close(rs.closed) + }) + + return nil +} + +func (rs *RetryingSink) String() string { + // Serialize a copy of the RetryingSink without the sync.Once, to avoid + // a data race. + rs2 := map[string]interface{}{ + "sink": rs.sink, + "strategy": rs.strategy, + "closed": rs.closed, + } + return fmt.Sprint(rs2) +} + +// RetryStrategy defines a strategy for retrying event sink writes. +// +// All methods should be goroutine safe. +type RetryStrategy interface { + // Proceed is called before every event send. If proceed returns a + // positive, non-zero integer, the retryer will back off by the provided + // duration. + // + // An event is provided, by may be ignored. + Proceed(event Event) time.Duration + + // Failure reports a failure to the strategy. If this method returns true, + // the event should be dropped. + Failure(event Event, err error) bool + + // Success should be called when an event is sent successfully. + Success(event Event) +} + +// Breaker implements a circuit breaker retry strategy. +// +// The current implementation never drops events. +type Breaker struct { + threshold int + recent int + last time.Time + backoff time.Duration // time after which we retry after failure. + mu sync.Mutex +} + +var _ RetryStrategy = &Breaker{} + +// NewBreaker returns a breaker that will backoff after the threshold has been +// tripped. A Breaker is thread safe and may be shared by many goroutines. +func NewBreaker(threshold int, backoff time.Duration) *Breaker { + return &Breaker{ + threshold: threshold, + backoff: backoff, + } +} + +// Proceed checks the failures against the threshold. +func (b *Breaker) Proceed(event Event) time.Duration { + b.mu.Lock() + defer b.mu.Unlock() + + if b.recent < b.threshold { + return 0 + } + + return b.last.Add(b.backoff).Sub(time.Now()) +} + +// Success resets the breaker. +func (b *Breaker) Success(event Event) { + b.mu.Lock() + defer b.mu.Unlock() + + b.recent = 0 + b.last = time.Time{} +} + +// Failure records the failure and latest failure time. +func (b *Breaker) Failure(event Event, err error) bool { + b.mu.Lock() + defer b.mu.Unlock() + + b.recent++ + b.last = time.Now().UTC() + return false // never drop events. +} + +var ( + // DefaultExponentialBackoffConfig provides a default configuration for + // exponential backoff. + DefaultExponentialBackoffConfig = ExponentialBackoffConfig{ + Base: time.Second, + Factor: time.Second, + Max: 20 * time.Second, + } +) + +// ExponentialBackoffConfig configures backoff parameters. +// +// Note that these parameters operate on the upper bound for choosing a random +// value. For example, at Base=1s, a random value in [0,1s) will be chosen for +// the backoff value. +type ExponentialBackoffConfig struct { + // Base is the minimum bound for backing off after failure. + Base time.Duration + + // Factor sets the amount of time by which the backoff grows with each + // failure. + Factor time.Duration + + // Max is the absolute maxiumum bound for a single backoff. + Max time.Duration +} + +// ExponentialBackoff implements random backoff with exponentially increasing +// bounds as the number consecutive failures increase. +type ExponentialBackoff struct { + config ExponentialBackoffConfig + failures uint64 // consecutive failure counter. +} + +// NewExponentialBackoff returns an exponential backoff strategy with the +// desired config. If config is nil, the default is returned. +func NewExponentialBackoff(config ExponentialBackoffConfig) *ExponentialBackoff { + return &ExponentialBackoff{ + config: config, + } +} + +// Proceed returns the next randomly bound exponential backoff time. +func (b *ExponentialBackoff) Proceed(event Event) time.Duration { + return b.backoff(atomic.LoadUint64(&b.failures)) +} + +// Success resets the failures counter. +func (b *ExponentialBackoff) Success(event Event) { + atomic.StoreUint64(&b.failures, 0) +} + +// Failure increments the failure counter. +func (b *ExponentialBackoff) Failure(event Event, err error) bool { + atomic.AddUint64(&b.failures, 1) + return false +} + +// backoff calculates the amount of time to wait based on the number of +// consecutive failures. +func (b *ExponentialBackoff) backoff(failures uint64) time.Duration { + if failures <= 0 { + // proceed normally when there are no failures. + return 0 + } + + factor := b.config.Factor + if factor <= 0 { + factor = DefaultExponentialBackoffConfig.Factor + } + + backoff := b.config.Base + factor*time.Duration(1<<(failures-1)) + + max := b.config.Max + if max <= 0 { + max = DefaultExponentialBackoffConfig.Max + } + + if backoff > max || backoff < 0 { + backoff = max + } + + // Choose a uniformly distributed value from [0, backoff). + return time.Duration(rand.Int63n(int64(backoff))) +} diff --git a/vendor/github.com/docker/libkv/LICENSE.code b/vendor/github.com/docker/libkv/LICENSE.code new file mode 100644 index 0000000000..34c4ea7c50 --- /dev/null +++ b/vendor/github.com/docker/libkv/LICENSE.code @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/libkv/LICENSE.docs b/vendor/github.com/docker/libkv/LICENSE.docs new file mode 100644 index 0000000000..e26cd4fc8e --- /dev/null +++ b/vendor/github.com/docker/libkv/LICENSE.docs @@ -0,0 +1,425 @@ +Attribution-ShareAlike 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-ShareAlike 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-ShareAlike 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. BY-SA Compatible License means a license listed at + creativecommons.org/compatiblelicenses, approved by Creative + Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + e. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name + of a Creative Commons Public License. The License Elements of this + Public License are Attribution and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + i. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + k. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + l. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + m. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. Additional offer from the Licensor -- Adapted Material. + Every recipient of Adapted Material from You + automatically receives an offer from the Licensor to + exercise the Licensed Rights in the Adapted Material + under the conditions of the Adapter's License You apply. + + c. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + b. ShareAlike. + + In addition to the conditions in Section 3(a), if You Share + Adapted Material You produce, the following conditions also apply. + + 1. The Adapter's License You apply must be a Creative Commons + license with the same License Elements, this version or + later, or a BY-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the + Adapter's License You apply. You may satisfy this condition + in any reasonable manner based on the medium, means, and + context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms + or conditions on, or apply any Effective Technological + Measures to, Adapted Material that restrict exercise of the + rights granted under the Adapter's License You apply. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material, + + including for purposes of Section 3(b); and + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public licenses. +Notwithstanding, Creative Commons may elect to apply one of its public +licenses to material it publishes and in those instances will be +considered the "Licensor." Except for the limited purpose of indicating +that material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the public +licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/github.com/docker/libkv/libkv.go b/vendor/github.com/docker/libkv/libkv.go new file mode 100644 index 0000000000..bdb8c7529f --- /dev/null +++ b/vendor/github.com/docker/libkv/libkv.go @@ -0,0 +1,40 @@ +package libkv + +import ( + "fmt" + "sort" + "strings" + + "github.com/docker/libkv/store" +) + +// Initialize creates a new Store object, initializing the client +type Initialize func(addrs []string, options *store.Config) (store.Store, error) + +var ( + // Backend initializers + initializers = make(map[store.Backend]Initialize) + + supportedBackend = func() string { + keys := make([]string, 0, len(initializers)) + for k := range initializers { + keys = append(keys, string(k)) + } + sort.Strings(keys) + return strings.Join(keys, ", ") + }() +) + +// NewStore creates an instance of store +func NewStore(backend store.Backend, addrs []string, options *store.Config) (store.Store, error) { + if init, exists := initializers[backend]; exists { + return init(addrs, options) + } + + return nil, fmt.Errorf("%s %s", store.ErrBackendNotSupported.Error(), supportedBackend) +} + +// AddStore adds a new store backend to libkv +func AddStore(store store.Backend, init Initialize) { + initializers[store] = init +} diff --git a/vendor/github.com/docker/libkv/store/boltdb/boltdb.go b/vendor/github.com/docker/libkv/store/boltdb/boltdb.go new file mode 100644 index 0000000000..4026e0a20c --- /dev/null +++ b/vendor/github.com/docker/libkv/store/boltdb/boltdb.go @@ -0,0 +1,469 @@ +package boltdb + +import ( + "bytes" + "encoding/binary" + "errors" + "os" + "path/filepath" + "sync" + "sync/atomic" + "time" + + "github.com/boltdb/bolt" + "github.com/docker/libkv" + "github.com/docker/libkv/store" +) + +var ( + // ErrMultipleEndpointsUnsupported is thrown when multiple endpoints specified for + // BoltDB. Endpoint has to be a local file path + ErrMultipleEndpointsUnsupported = errors.New("boltdb supports one endpoint and should be a file path") + // ErrBoltBucketOptionMissing is thrown when boltBcuket config option is missing + ErrBoltBucketOptionMissing = errors.New("boltBucket config option missing") +) + +const ( + filePerm os.FileMode = 0644 +) + +//BoltDB type implements the Store interface +type BoltDB struct { + client *bolt.DB + boltBucket []byte + dbIndex uint64 + path string + timeout time.Duration + // By default libkv opens and closes the bolt DB connection for every + // get/put operation. This allows multiple apps to use a Bolt DB at the + // same time. + // PersistConnection flag provides an option to override ths behavior. + // ie: open the connection in New and use it till Close is called. + PersistConnection bool + sync.Mutex +} + +const ( + libkvmetadatalen = 8 + transientTimeout = time.Duration(10) * time.Second +) + +// Register registers boltdb to libkv +func Register() { + libkv.AddStore(store.BOLTDB, New) +} + +// New opens a new BoltDB connection to the specified path and bucket +func New(endpoints []string, options *store.Config) (store.Store, error) { + var ( + db *bolt.DB + err error + boltOptions *bolt.Options + ) + + if len(endpoints) > 1 { + return nil, ErrMultipleEndpointsUnsupported + } + + if (options == nil) || (len(options.Bucket) == 0) { + return nil, ErrBoltBucketOptionMissing + } + + dir, _ := filepath.Split(endpoints[0]) + if err = os.MkdirAll(dir, 0750); err != nil { + return nil, err + } + + if options.PersistConnection { + boltOptions = &bolt.Options{Timeout: options.ConnectionTimeout} + db, err = bolt.Open(endpoints[0], filePerm, boltOptions) + if err != nil { + return nil, err + } + } + + b := &BoltDB{ + client: db, + path: endpoints[0], + boltBucket: []byte(options.Bucket), + timeout: transientTimeout, + PersistConnection: options.PersistConnection, + } + + return b, nil +} + +func (b *BoltDB) reset() { + b.path = "" + b.boltBucket = []byte{} +} + +func (b *BoltDB) getDBhandle() (*bolt.DB, error) { + var ( + db *bolt.DB + err error + ) + if !b.PersistConnection { + boltOptions := &bolt.Options{Timeout: b.timeout} + if db, err = bolt.Open(b.path, filePerm, boltOptions); err != nil { + return nil, err + } + b.client = db + } + + return b.client, nil +} + +func (b *BoltDB) releaseDBhandle() { + if !b.PersistConnection { + b.client.Close() + } +} + +// Get the value at "key". BoltDB doesn't provide an inbuilt last modified index with every kv pair. Its implemented by +// by a atomic counter maintained by the libkv and appened to the value passed by the client. +func (b *BoltDB) Get(key string) (*store.KVPair, error) { + var ( + val []byte + db *bolt.DB + err error + ) + b.Lock() + defer b.Unlock() + + if db, err = b.getDBhandle(); err != nil { + return nil, err + } + defer b.releaseDBhandle() + + err = db.View(func(tx *bolt.Tx) error { + bucket := tx.Bucket(b.boltBucket) + if bucket == nil { + return store.ErrKeyNotFound + } + + v := bucket.Get([]byte(key)) + val = make([]byte, len(v)) + copy(val, v) + + return nil + }) + + if len(val) == 0 { + return nil, store.ErrKeyNotFound + } + if err != nil { + return nil, err + } + + dbIndex := binary.LittleEndian.Uint64(val[:libkvmetadatalen]) + val = val[libkvmetadatalen:] + + return &store.KVPair{Key: key, Value: val, LastIndex: (dbIndex)}, nil +} + +//Put the key, value pair. index number metadata is prepended to the value +func (b *BoltDB) Put(key string, value []byte, opts *store.WriteOptions) error { + var ( + dbIndex uint64 + db *bolt.DB + err error + ) + b.Lock() + defer b.Unlock() + + dbval := make([]byte, libkvmetadatalen) + + if db, err = b.getDBhandle(); err != nil { + return err + } + defer b.releaseDBhandle() + + err = db.Update(func(tx *bolt.Tx) error { + bucket, err := tx.CreateBucketIfNotExists(b.boltBucket) + if err != nil { + return err + } + + dbIndex = atomic.AddUint64(&b.dbIndex, 1) + binary.LittleEndian.PutUint64(dbval, dbIndex) + dbval = append(dbval, value...) + + err = bucket.Put([]byte(key), dbval) + if err != nil { + return err + } + return nil + }) + return err +} + +//Delete the value for the given key. +func (b *BoltDB) Delete(key string) error { + var ( + db *bolt.DB + err error + ) + b.Lock() + defer b.Unlock() + + if db, err = b.getDBhandle(); err != nil { + return err + } + defer b.releaseDBhandle() + + err = db.Update(func(tx *bolt.Tx) error { + bucket := tx.Bucket(b.boltBucket) + if bucket == nil { + return store.ErrKeyNotFound + } + err := bucket.Delete([]byte(key)) + return err + }) + return err +} + +// Exists checks if the key exists inside the store +func (b *BoltDB) Exists(key string) (bool, error) { + var ( + val []byte + db *bolt.DB + err error + ) + b.Lock() + defer b.Unlock() + + if db, err = b.getDBhandle(); err != nil { + return false, err + } + defer b.releaseDBhandle() + + err = db.View(func(tx *bolt.Tx) error { + bucket := tx.Bucket(b.boltBucket) + if bucket == nil { + return store.ErrKeyNotFound + } + + val = bucket.Get([]byte(key)) + + return nil + }) + + if len(val) == 0 { + return false, err + } + return true, err +} + +// List returns the range of keys starting with the passed in prefix +func (b *BoltDB) List(keyPrefix string) ([]*store.KVPair, error) { + var ( + db *bolt.DB + err error + ) + b.Lock() + defer b.Unlock() + + kv := []*store.KVPair{} + + if db, err = b.getDBhandle(); err != nil { + return nil, err + } + defer b.releaseDBhandle() + + err = db.View(func(tx *bolt.Tx) error { + bucket := tx.Bucket(b.boltBucket) + if bucket == nil { + return store.ErrKeyNotFound + } + + cursor := bucket.Cursor() + prefix := []byte(keyPrefix) + + for key, v := cursor.Seek(prefix); bytes.HasPrefix(key, prefix); key, v = cursor.Next() { + + dbIndex := binary.LittleEndian.Uint64(v[:libkvmetadatalen]) + v = v[libkvmetadatalen:] + val := make([]byte, len(v)) + copy(val, v) + + kv = append(kv, &store.KVPair{ + Key: string(key), + Value: val, + LastIndex: dbIndex, + }) + } + return nil + }) + if len(kv) == 0 { + return nil, store.ErrKeyNotFound + } + return kv, err +} + +// AtomicDelete deletes a value at "key" if the key +// has not been modified in the meantime, throws an +// error if this is the case +func (b *BoltDB) AtomicDelete(key string, previous *store.KVPair) (bool, error) { + var ( + val []byte + db *bolt.DB + err error + ) + b.Lock() + defer b.Unlock() + + if previous == nil { + return false, store.ErrPreviousNotSpecified + } + if db, err = b.getDBhandle(); err != nil { + return false, err + } + defer b.releaseDBhandle() + + err = db.Update(func(tx *bolt.Tx) error { + bucket := tx.Bucket(b.boltBucket) + if bucket == nil { + return store.ErrKeyNotFound + } + + val = bucket.Get([]byte(key)) + if val == nil { + return store.ErrKeyNotFound + } + dbIndex := binary.LittleEndian.Uint64(val[:libkvmetadatalen]) + if dbIndex != previous.LastIndex { + return store.ErrKeyModified + } + err := bucket.Delete([]byte(key)) + return err + }) + if err != nil { + return false, err + } + return true, err +} + +// AtomicPut puts a value at "key" if the key has not been +// modified since the last Put, throws an error if this is the case +func (b *BoltDB) AtomicPut(key string, value []byte, previous *store.KVPair, options *store.WriteOptions) (bool, *store.KVPair, error) { + var ( + val []byte + dbIndex uint64 + db *bolt.DB + err error + ) + b.Lock() + defer b.Unlock() + + dbval := make([]byte, libkvmetadatalen) + + if db, err = b.getDBhandle(); err != nil { + return false, nil, err + } + defer b.releaseDBhandle() + + err = db.Update(func(tx *bolt.Tx) error { + var err error + bucket := tx.Bucket(b.boltBucket) + if bucket == nil { + if previous != nil { + return store.ErrKeyNotFound + } + bucket, err = tx.CreateBucket(b.boltBucket) + if err != nil { + return err + } + } + // AtomicPut is equivalent to Put if previous is nil and the Ky + // doesn't exist in the DB. + val = bucket.Get([]byte(key)) + if previous == nil && len(val) != 0 { + return store.ErrKeyExists + } + if previous != nil { + if len(val) == 0 { + return store.ErrKeyNotFound + } + dbIndex = binary.LittleEndian.Uint64(val[:libkvmetadatalen]) + if dbIndex != previous.LastIndex { + return store.ErrKeyModified + } + } + dbIndex = atomic.AddUint64(&b.dbIndex, 1) + binary.LittleEndian.PutUint64(dbval, b.dbIndex) + dbval = append(dbval, value...) + return (bucket.Put([]byte(key), dbval)) + }) + if err != nil { + return false, nil, err + } + + updated := &store.KVPair{ + Key: key, + Value: value, + LastIndex: dbIndex, + } + + return true, updated, nil +} + +// Close the db connection to the BoltDB +func (b *BoltDB) Close() { + b.Lock() + defer b.Unlock() + + if !b.PersistConnection { + b.reset() + } else { + b.client.Close() + } + return +} + +// DeleteTree deletes a range of keys with a given prefix +func (b *BoltDB) DeleteTree(keyPrefix string) error { + var ( + db *bolt.DB + err error + ) + b.Lock() + defer b.Unlock() + + if db, err = b.getDBhandle(); err != nil { + return err + } + defer b.releaseDBhandle() + + err = db.Update(func(tx *bolt.Tx) error { + bucket := tx.Bucket(b.boltBucket) + if bucket == nil { + return store.ErrKeyNotFound + } + + cursor := bucket.Cursor() + prefix := []byte(keyPrefix) + + for key, _ := cursor.Seek(prefix); bytes.HasPrefix(key, prefix); key, _ = cursor.Next() { + _ = bucket.Delete([]byte(key)) + } + return nil + }) + + return err +} + +// NewLock has to implemented at the library level since its not supported by BoltDB +func (b *BoltDB) NewLock(key string, options *store.LockOptions) (store.Locker, error) { + return nil, store.ErrCallNotSupported +} + +// Watch has to implemented at the library level since its not supported by BoltDB +func (b *BoltDB) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) { + return nil, store.ErrCallNotSupported +} + +// WatchTree has to implemented at the library level since its not supported by BoltDB +func (b *BoltDB) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) { + return nil, store.ErrCallNotSupported +} diff --git a/vendor/github.com/docker/libkv/store/consul/consul.go b/vendor/github.com/docker/libkv/store/consul/consul.go new file mode 100644 index 0000000000..cb64be72d6 --- /dev/null +++ b/vendor/github.com/docker/libkv/store/consul/consul.go @@ -0,0 +1,558 @@ +package consul + +import ( + "crypto/tls" + "errors" + "net/http" + "strings" + "sync" + "time" + + "github.com/docker/libkv" + "github.com/docker/libkv/store" + api "github.com/hashicorp/consul/api" +) + +const ( + // DefaultWatchWaitTime is how long we block for at a + // time to check if the watched key has changed. This + // affects the minimum time it takes to cancel a watch. + DefaultWatchWaitTime = 15 * time.Second + + // RenewSessionRetryMax is the number of time we should try + // to renew the session before giving up and throwing an error + RenewSessionRetryMax = 5 + + // MaxSessionDestroyAttempts is the maximum times we will try + // to explicitely destroy the session attached to a lock after + // the connectivity to the store has been lost + MaxSessionDestroyAttempts = 5 + + // defaultLockTTL is the default ttl for the consul lock + defaultLockTTL = 20 * time.Second +) + +var ( + // ErrMultipleEndpointsUnsupported is thrown when there are + // multiple endpoints specified for Consul + ErrMultipleEndpointsUnsupported = errors.New("consul does not support multiple endpoints") + + // ErrSessionRenew is thrown when the session can't be + // renewed because the Consul version does not support sessions + ErrSessionRenew = errors.New("cannot set or renew session for ttl, unable to operate on sessions") +) + +// Consul is the receiver type for the +// Store interface +type Consul struct { + sync.Mutex + config *api.Config + client *api.Client +} + +type consulLock struct { + lock *api.Lock + renewCh chan struct{} +} + +// Register registers consul to libkv +func Register() { + libkv.AddStore(store.CONSUL, New) +} + +// New creates a new Consul client given a list +// of endpoints and optional tls config +func New(endpoints []string, options *store.Config) (store.Store, error) { + if len(endpoints) > 1 { + return nil, ErrMultipleEndpointsUnsupported + } + + s := &Consul{} + + // Create Consul client + config := api.DefaultConfig() + s.config = config + config.HttpClient = http.DefaultClient + config.Address = endpoints[0] + config.Scheme = "http" + + // Set options + if options != nil { + if options.TLS != nil { + s.setTLS(options.TLS) + } + if options.ConnectionTimeout != 0 { + s.setTimeout(options.ConnectionTimeout) + } + } + + // Creates a new client + client, err := api.NewClient(config) + if err != nil { + return nil, err + } + s.client = client + + return s, nil +} + +// SetTLS sets Consul TLS options +func (s *Consul) setTLS(tls *tls.Config) { + s.config.HttpClient.Transport = &http.Transport{ + TLSClientConfig: tls, + } + s.config.Scheme = "https" +} + +// SetTimeout sets the timeout for connecting to Consul +func (s *Consul) setTimeout(time time.Duration) { + s.config.WaitTime = time +} + +// Normalize the key for usage in Consul +func (s *Consul) normalize(key string) string { + key = store.Normalize(key) + return strings.TrimPrefix(key, "/") +} + +func (s *Consul) renewSession(pair *api.KVPair, ttl time.Duration) error { + // Check if there is any previous session with an active TTL + session, err := s.getActiveSession(pair.Key) + if err != nil { + return err + } + + if session == "" { + entry := &api.SessionEntry{ + Behavior: api.SessionBehaviorDelete, // Delete the key when the session expires + TTL: (ttl / 2).String(), // Consul multiplies the TTL by 2x + LockDelay: 1 * time.Millisecond, // Virtually disable lock delay + } + + // Create the key session + session, _, err = s.client.Session().Create(entry, nil) + if err != nil { + return err + } + + lockOpts := &api.LockOptions{ + Key: pair.Key, + Session: session, + } + + // Lock and ignore if lock is held + // It's just a placeholder for the + // ephemeral behavior + lock, _ := s.client.LockOpts(lockOpts) + if lock != nil { + lock.Lock(nil) + } + } + + _, _, err = s.client.Session().Renew(session, nil) + return err +} + +// getActiveSession checks if the key already has +// a session attached +func (s *Consul) getActiveSession(key string) (string, error) { + pair, _, err := s.client.KV().Get(key, nil) + if err != nil { + return "", err + } + if pair != nil && pair.Session != "" { + return pair.Session, nil + } + return "", nil +} + +// Get the value at "key", returns the last modified index +// to use in conjunction to CAS calls +func (s *Consul) Get(key string) (*store.KVPair, error) { + options := &api.QueryOptions{ + AllowStale: false, + RequireConsistent: true, + } + + pair, meta, err := s.client.KV().Get(s.normalize(key), options) + if err != nil { + return nil, err + } + + // If pair is nil then the key does not exist + if pair == nil { + return nil, store.ErrKeyNotFound + } + + return &store.KVPair{Key: pair.Key, Value: pair.Value, LastIndex: meta.LastIndex}, nil +} + +// Put a value at "key" +func (s *Consul) Put(key string, value []byte, opts *store.WriteOptions) error { + key = s.normalize(key) + + p := &api.KVPair{ + Key: key, + Value: value, + Flags: api.LockFlagValue, + } + + if opts != nil && opts.TTL > 0 { + // Create or renew a session holding a TTL. Operations on sessions + // are not deterministic: creating or renewing a session can fail + for retry := 1; retry <= RenewSessionRetryMax; retry++ { + err := s.renewSession(p, opts.TTL) + if err == nil { + break + } + if retry == RenewSessionRetryMax { + return ErrSessionRenew + } + } + } + + _, err := s.client.KV().Put(p, nil) + return err +} + +// Delete a value at "key" +func (s *Consul) Delete(key string) error { + if _, err := s.Get(key); err != nil { + return err + } + _, err := s.client.KV().Delete(s.normalize(key), nil) + return err +} + +// Exists checks that the key exists inside the store +func (s *Consul) Exists(key string) (bool, error) { + _, err := s.Get(key) + if err != nil { + if err == store.ErrKeyNotFound { + return false, nil + } + return false, err + } + return true, nil +} + +// List child nodes of a given directory +func (s *Consul) List(directory string) ([]*store.KVPair, error) { + pairs, _, err := s.client.KV().List(s.normalize(directory), nil) + if err != nil { + return nil, err + } + if len(pairs) == 0 { + return nil, store.ErrKeyNotFound + } + + kv := []*store.KVPair{} + + for _, pair := range pairs { + if pair.Key == directory { + continue + } + kv = append(kv, &store.KVPair{ + Key: pair.Key, + Value: pair.Value, + LastIndex: pair.ModifyIndex, + }) + } + + return kv, nil +} + +// DeleteTree deletes a range of keys under a given directory +func (s *Consul) DeleteTree(directory string) error { + if _, err := s.List(directory); err != nil { + return err + } + _, err := s.client.KV().DeleteTree(s.normalize(directory), nil) + return err +} + +// Watch for changes on a "key" +// It returns a channel that will receive changes or pass +// on errors. Upon creation, the current value will first +// be sent to the channel. Providing a non-nil stopCh can +// be used to stop watching. +func (s *Consul) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) { + kv := s.client.KV() + watchCh := make(chan *store.KVPair) + + go func() { + defer close(watchCh) + + // Use a wait time in order to check if we should quit + // from time to time. + opts := &api.QueryOptions{WaitTime: DefaultWatchWaitTime} + + for { + // Check if we should quit + select { + case <-stopCh: + return + default: + } + + // Get the key + pair, meta, err := kv.Get(key, opts) + if err != nil { + return + } + + // If LastIndex didn't change then it means `Get` returned + // because of the WaitTime and the key didn't changed. + if opts.WaitIndex == meta.LastIndex { + continue + } + opts.WaitIndex = meta.LastIndex + + // Return the value to the channel + // FIXME: What happens when a key is deleted? + if pair != nil { + watchCh <- &store.KVPair{ + Key: pair.Key, + Value: pair.Value, + LastIndex: pair.ModifyIndex, + } + } + } + }() + + return watchCh, nil +} + +// WatchTree watches for changes on a "directory" +// It returns a channel that will receive changes or pass +// on errors. Upon creating a watch, the current childs values +// will be sent to the channel .Providing a non-nil stopCh can +// be used to stop watching. +func (s *Consul) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) { + kv := s.client.KV() + watchCh := make(chan []*store.KVPair) + + go func() { + defer close(watchCh) + + // Use a wait time in order to check if we should quit + // from time to time. + opts := &api.QueryOptions{WaitTime: DefaultWatchWaitTime} + for { + // Check if we should quit + select { + case <-stopCh: + return + default: + } + + // Get all the childrens + pairs, meta, err := kv.List(directory, opts) + if err != nil { + return + } + + // If LastIndex didn't change then it means `Get` returned + // because of the WaitTime and the child keys didn't change. + if opts.WaitIndex == meta.LastIndex { + continue + } + opts.WaitIndex = meta.LastIndex + + // Return children KV pairs to the channel + kvpairs := []*store.KVPair{} + for _, pair := range pairs { + if pair.Key == directory { + continue + } + kvpairs = append(kvpairs, &store.KVPair{ + Key: pair.Key, + Value: pair.Value, + LastIndex: pair.ModifyIndex, + }) + } + watchCh <- kvpairs + } + }() + + return watchCh, nil +} + +// NewLock returns a handle to a lock struct which can +// be used to provide mutual exclusion on a key +func (s *Consul) NewLock(key string, options *store.LockOptions) (store.Locker, error) { + lockOpts := &api.LockOptions{ + Key: s.normalize(key), + } + + lock := &consulLock{} + + ttl := defaultLockTTL + + if options != nil { + // Set optional TTL on Lock + if options.TTL != 0 { + ttl = options.TTL + } + // Set optional value on Lock + if options.Value != nil { + lockOpts.Value = options.Value + } + } + + entry := &api.SessionEntry{ + Behavior: api.SessionBehaviorRelease, // Release the lock when the session expires + TTL: (ttl / 2).String(), // Consul multiplies the TTL by 2x + LockDelay: 1 * time.Millisecond, // Virtually disable lock delay + } + + // Create the key session + session, _, err := s.client.Session().Create(entry, nil) + if err != nil { + return nil, err + } + + // Place the session and renew chan on lock + lockOpts.Session = session + lock.renewCh = options.RenewLock + + l, err := s.client.LockOpts(lockOpts) + if err != nil { + return nil, err + } + + // Renew the session ttl lock periodically + s.renewLockSession(entry.TTL, session, options.RenewLock) + + lock.lock = l + return lock, nil +} + +// renewLockSession is used to renew a session Lock, it takes +// a stopRenew chan which is used to explicitely stop the session +// renew process. The renew routine never stops until a signal is +// sent to this channel. If deleting the session fails because the +// connection to the store is lost, it keeps trying to delete the +// session periodically until it can contact the store, this ensures +// that the lock is not maintained indefinitely which ensures liveness +// over safety for the lock when the store becomes unavailable. +func (s *Consul) renewLockSession(initialTTL string, id string, stopRenew chan struct{}) { + sessionDestroyAttempts := 0 + ttl, err := time.ParseDuration(initialTTL) + if err != nil { + return + } + go func() { + for { + select { + case <-time.After(ttl / 2): + entry, _, err := s.client.Session().Renew(id, nil) + if err != nil { + // If an error occurs, continue until the + // session gets destroyed explicitely or + // the session ttl times out + continue + } + if entry == nil { + return + } + + // Handle the server updating the TTL + ttl, _ = time.ParseDuration(entry.TTL) + + case <-stopRenew: + // Attempt a session destroy + _, err := s.client.Session().Destroy(id, nil) + if err == nil { + return + } + + if sessionDestroyAttempts >= MaxSessionDestroyAttempts { + return + } + + // We can't destroy the session because the store + // is unavailable, wait for the session renew period + sessionDestroyAttempts++ + time.Sleep(ttl / 2) + } + } + }() +} + +// Lock attempts to acquire the lock and blocks while +// doing so. It returns a channel that is closed if our +// lock is lost or if an error occurs +func (l *consulLock) Lock(stopChan chan struct{}) (<-chan struct{}, error) { + return l.lock.Lock(stopChan) +} + +// Unlock the "key". Calling unlock while +// not holding the lock will throw an error +func (l *consulLock) Unlock() error { + if l.renewCh != nil { + close(l.renewCh) + } + return l.lock.Unlock() +} + +// AtomicPut put a value at "key" if the key has not been +// modified in the meantime, throws an error if this is the case +func (s *Consul) AtomicPut(key string, value []byte, previous *store.KVPair, options *store.WriteOptions) (bool, *store.KVPair, error) { + + p := &api.KVPair{Key: s.normalize(key), Value: value, Flags: api.LockFlagValue} + + if previous == nil { + // Consul interprets ModifyIndex = 0 as new key. + p.ModifyIndex = 0 + } else { + p.ModifyIndex = previous.LastIndex + } + + ok, _, err := s.client.KV().CAS(p, nil) + if err != nil { + return false, nil, err + } + if !ok { + if previous == nil { + return false, nil, store.ErrKeyExists + } + return false, nil, store.ErrKeyModified + } + + pair, err := s.Get(key) + if err != nil { + return false, nil, err + } + + return true, pair, nil +} + +// AtomicDelete deletes a value at "key" if the key has not +// been modified in the meantime, throws an error if this is the case +func (s *Consul) AtomicDelete(key string, previous *store.KVPair) (bool, error) { + if previous == nil { + return false, store.ErrPreviousNotSpecified + } + + p := &api.KVPair{Key: s.normalize(key), ModifyIndex: previous.LastIndex, Flags: api.LockFlagValue} + + // Extra Get operation to check on the key + _, err := s.Get(key) + if err != nil && err == store.ErrKeyNotFound { + return false, err + } + + if work, _, err := s.client.KV().DeleteCAS(p, nil); err != nil { + return false, err + } else if !work { + return false, store.ErrKeyModified + } + + return true, nil +} + +// Close closes the client connection +func (s *Consul) Close() { + return +} diff --git a/vendor/github.com/docker/libkv/store/etcd/etcd.go b/vendor/github.com/docker/libkv/store/etcd/etcd.go new file mode 100644 index 0000000000..c932ca665e --- /dev/null +++ b/vendor/github.com/docker/libkv/store/etcd/etcd.go @@ -0,0 +1,606 @@ +package etcd + +import ( + "crypto/tls" + "errors" + "log" + "net" + "net/http" + "strings" + "time" + + "golang.org/x/net/context" + + etcd "github.com/coreos/etcd/client" + "github.com/docker/libkv" + "github.com/docker/libkv/store" +) + +var ( + // ErrAbortTryLock is thrown when a user stops trying to seek the lock + // by sending a signal to the stop chan, this is used to verify if the + // operation succeeded + ErrAbortTryLock = errors.New("lock operation aborted") +) + +// Etcd is the receiver type for the +// Store interface +type Etcd struct { + client etcd.KeysAPI +} + +type etcdLock struct { + client etcd.KeysAPI + stopLock chan struct{} + stopRenew chan struct{} + key string + value string + last *etcd.Response + ttl time.Duration +} + +const ( + periodicSync = 5 * time.Minute + defaultLockTTL = 20 * time.Second + defaultUpdateTime = 5 * time.Second +) + +// Register registers etcd to libkv +func Register() { + libkv.AddStore(store.ETCD, New) +} + +// New creates a new Etcd client given a list +// of endpoints and an optional tls config +func New(addrs []string, options *store.Config) (store.Store, error) { + s := &Etcd{} + + var ( + entries []string + err error + ) + + entries = store.CreateEndpoints(addrs, "http") + cfg := &etcd.Config{ + Endpoints: entries, + Transport: etcd.DefaultTransport, + HeaderTimeoutPerRequest: 3 * time.Second, + } + + // Set options + if options != nil { + if options.TLS != nil { + setTLS(cfg, options.TLS, addrs) + } + if options.ConnectionTimeout != 0 { + setTimeout(cfg, options.ConnectionTimeout) + } + if options.Username != "" { + setCredentials(cfg, options.Username, options.Password) + } + } + + c, err := etcd.New(*cfg) + if err != nil { + log.Fatal(err) + } + + s.client = etcd.NewKeysAPI(c) + + // Periodic Cluster Sync + go func() { + for { + if err := c.AutoSync(context.Background(), periodicSync); err != nil { + return + } + } + }() + + return s, nil +} + +// SetTLS sets the tls configuration given a tls.Config scheme +func setTLS(cfg *etcd.Config, tls *tls.Config, addrs []string) { + entries := store.CreateEndpoints(addrs, "https") + cfg.Endpoints = entries + + // Set transport + t := http.Transport{ + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: tls, + } + + cfg.Transport = &t +} + +// setTimeout sets the timeout used for connecting to the store +func setTimeout(cfg *etcd.Config, time time.Duration) { + cfg.HeaderTimeoutPerRequest = time +} + +// setCredentials sets the username/password credentials for connecting to Etcd +func setCredentials(cfg *etcd.Config, username, password string) { + cfg.Username = username + cfg.Password = password +} + +// Normalize the key for usage in Etcd +func (s *Etcd) normalize(key string) string { + key = store.Normalize(key) + return strings.TrimPrefix(key, "/") +} + +// keyNotFound checks on the error returned by the KeysAPI +// to verify if the key exists in the store or not +func keyNotFound(err error) bool { + if err != nil { + if etcdError, ok := err.(etcd.Error); ok { + if etcdError.Code == etcd.ErrorCodeKeyNotFound || + etcdError.Code == etcd.ErrorCodeNotFile || + etcdError.Code == etcd.ErrorCodeNotDir { + return true + } + } + } + return false +} + +// Get the value at "key", returns the last modified +// index to use in conjunction to Atomic calls +func (s *Etcd) Get(key string) (pair *store.KVPair, err error) { + getOpts := &etcd.GetOptions{ + Quorum: true, + } + + result, err := s.client.Get(context.Background(), s.normalize(key), getOpts) + if err != nil { + if keyNotFound(err) { + return nil, store.ErrKeyNotFound + } + return nil, err + } + + pair = &store.KVPair{ + Key: key, + Value: []byte(result.Node.Value), + LastIndex: result.Node.ModifiedIndex, + } + + return pair, nil +} + +// Put a value at "key" +func (s *Etcd) Put(key string, value []byte, opts *store.WriteOptions) error { + setOpts := &etcd.SetOptions{} + + // Set options + if opts != nil { + setOpts.Dir = opts.IsDir + setOpts.TTL = opts.TTL + } + + _, err := s.client.Set(context.Background(), s.normalize(key), string(value), setOpts) + return err +} + +// Delete a value at "key" +func (s *Etcd) Delete(key string) error { + opts := &etcd.DeleteOptions{ + Recursive: false, + } + + _, err := s.client.Delete(context.Background(), s.normalize(key), opts) + if keyNotFound(err) { + return store.ErrKeyNotFound + } + return err +} + +// Exists checks if the key exists inside the store +func (s *Etcd) Exists(key string) (bool, error) { + _, err := s.Get(key) + if err != nil { + if err == store.ErrKeyNotFound { + return false, nil + } + return false, err + } + return true, nil +} + +// Watch for changes on a "key" +// It returns a channel that will receive changes or pass +// on errors. Upon creation, the current value will first +// be sent to the channel. Providing a non-nil stopCh can +// be used to stop watching. +func (s *Etcd) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) { + opts := &etcd.WatcherOptions{Recursive: false} + watcher := s.client.Watcher(s.normalize(key), opts) + + // watchCh is sending back events to the caller + watchCh := make(chan *store.KVPair) + + go func() { + defer close(watchCh) + + // Get the current value + pair, err := s.Get(key) + if err != nil { + return + } + + // Push the current value through the channel. + watchCh <- pair + + for { + // Check if the watch was stopped by the caller + select { + case <-stopCh: + return + default: + } + + result, err := watcher.Next(context.Background()) + + if err != nil { + return + } + + watchCh <- &store.KVPair{ + Key: key, + Value: []byte(result.Node.Value), + LastIndex: result.Node.ModifiedIndex, + } + } + }() + + return watchCh, nil +} + +// WatchTree watches for changes on a "directory" +// It returns a channel that will receive changes or pass +// on errors. Upon creating a watch, the current childs values +// will be sent to the channel. Providing a non-nil stopCh can +// be used to stop watching. +func (s *Etcd) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) { + watchOpts := &etcd.WatcherOptions{Recursive: true} + watcher := s.client.Watcher(s.normalize(directory), watchOpts) + + // watchCh is sending back events to the caller + watchCh := make(chan []*store.KVPair) + + go func() { + defer close(watchCh) + + // Get child values + list, err := s.List(directory) + if err != nil { + return + } + + // Push the current value through the channel. + watchCh <- list + + for { + // Check if the watch was stopped by the caller + select { + case <-stopCh: + return + default: + } + + _, err := watcher.Next(context.Background()) + + if err != nil { + return + } + + list, err = s.List(directory) + if err != nil { + return + } + + watchCh <- list + } + }() + + return watchCh, nil +} + +// AtomicPut puts a value at "key" if the key has not been +// modified in the meantime, throws an error if this is the case +func (s *Etcd) AtomicPut(key string, value []byte, previous *store.KVPair, opts *store.WriteOptions) (bool, *store.KVPair, error) { + var ( + meta *etcd.Response + err error + ) + + setOpts := &etcd.SetOptions{} + + if previous != nil { + setOpts.PrevExist = etcd.PrevExist + setOpts.PrevIndex = previous.LastIndex + if previous.Value != nil { + setOpts.PrevValue = string(previous.Value) + } + } else { + setOpts.PrevExist = etcd.PrevNoExist + } + + if opts != nil { + if opts.TTL > 0 { + setOpts.TTL = opts.TTL + } + } + + meta, err = s.client.Set(context.Background(), s.normalize(key), string(value), setOpts) + if err != nil { + if etcdError, ok := err.(etcd.Error); ok { + // Compare failed + if etcdError.Code == etcd.ErrorCodeTestFailed { + return false, nil, store.ErrKeyModified + } + // Node exists error (when PrevNoExist) + if etcdError.Code == etcd.ErrorCodeNodeExist { + return false, nil, store.ErrKeyExists + } + } + return false, nil, err + } + + updated := &store.KVPair{ + Key: key, + Value: value, + LastIndex: meta.Node.ModifiedIndex, + } + + return true, updated, nil +} + +// AtomicDelete deletes a value at "key" if the key +// has not been modified in the meantime, throws an +// error if this is the case +func (s *Etcd) AtomicDelete(key string, previous *store.KVPair) (bool, error) { + if previous == nil { + return false, store.ErrPreviousNotSpecified + } + + delOpts := &etcd.DeleteOptions{} + + if previous != nil { + delOpts.PrevIndex = previous.LastIndex + if previous.Value != nil { + delOpts.PrevValue = string(previous.Value) + } + } + + _, err := s.client.Delete(context.Background(), s.normalize(key), delOpts) + if err != nil { + if etcdError, ok := err.(etcd.Error); ok { + // Key Not Found + if etcdError.Code == etcd.ErrorCodeKeyNotFound { + return false, store.ErrKeyNotFound + } + // Compare failed + if etcdError.Code == etcd.ErrorCodeTestFailed { + return false, store.ErrKeyModified + } + } + return false, err + } + + return true, nil +} + +// List child nodes of a given directory +func (s *Etcd) List(directory string) ([]*store.KVPair, error) { + getOpts := &etcd.GetOptions{ + Quorum: true, + Recursive: true, + Sort: true, + } + + resp, err := s.client.Get(context.Background(), s.normalize(directory), getOpts) + if err != nil { + if keyNotFound(err) { + return nil, store.ErrKeyNotFound + } + return nil, err + } + + kv := []*store.KVPair{} + for _, n := range resp.Node.Nodes { + kv = append(kv, &store.KVPair{ + Key: n.Key, + Value: []byte(n.Value), + LastIndex: n.ModifiedIndex, + }) + } + return kv, nil +} + +// DeleteTree deletes a range of keys under a given directory +func (s *Etcd) DeleteTree(directory string) error { + delOpts := &etcd.DeleteOptions{ + Recursive: true, + } + + _, err := s.client.Delete(context.Background(), s.normalize(directory), delOpts) + if keyNotFound(err) { + return store.ErrKeyNotFound + } + return err +} + +// NewLock returns a handle to a lock struct which can +// be used to provide mutual exclusion on a key +func (s *Etcd) NewLock(key string, options *store.LockOptions) (lock store.Locker, err error) { + var value string + ttl := defaultLockTTL + renewCh := make(chan struct{}) + + // Apply options on Lock + if options != nil { + if options.Value != nil { + value = string(options.Value) + } + if options.TTL != 0 { + ttl = options.TTL + } + if options.RenewLock != nil { + renewCh = options.RenewLock + } + } + + // Create lock object + lock = &etcdLock{ + client: s.client, + stopRenew: renewCh, + key: s.normalize(key), + value: value, + ttl: ttl, + } + + return lock, nil +} + +// Lock attempts to acquire the lock and blocks while +// doing so. It returns a channel that is closed if our +// lock is lost or if an error occurs +func (l *etcdLock) Lock(stopChan chan struct{}) (<-chan struct{}, error) { + + // Lock holder channel + lockHeld := make(chan struct{}) + stopLocking := l.stopRenew + + setOpts := &etcd.SetOptions{ + TTL: l.ttl, + } + + for { + setOpts.PrevExist = etcd.PrevNoExist + resp, err := l.client.Set(context.Background(), l.key, l.value, setOpts) + if err != nil { + if etcdError, ok := err.(etcd.Error); ok { + if etcdError.Code != etcd.ErrorCodeNodeExist { + return nil, err + } + setOpts.PrevIndex = ^uint64(0) + } + } else { + setOpts.PrevIndex = resp.Node.ModifiedIndex + } + + setOpts.PrevExist = etcd.PrevExist + l.last, err = l.client.Set(context.Background(), l.key, l.value, setOpts) + + if err == nil { + // Leader section + l.stopLock = stopLocking + go l.holdLock(l.key, lockHeld, stopLocking) + break + } else { + // If this is a legitimate error, return + if etcdError, ok := err.(etcd.Error); ok { + if etcdError.Code != etcd.ErrorCodeTestFailed { + return nil, err + } + } + + // Seeker section + errorCh := make(chan error) + chWStop := make(chan bool) + free := make(chan bool) + + go l.waitLock(l.key, errorCh, chWStop, free) + + // Wait for the key to be available or for + // a signal to stop trying to lock the key + select { + case <-free: + break + case err := <-errorCh: + return nil, err + case <-stopChan: + return nil, ErrAbortTryLock + } + + // Delete or Expire event occurred + // Retry + } + } + + return lockHeld, nil +} + +// Hold the lock as long as we can +// Updates the key ttl periodically until we receive +// an explicit stop signal from the Unlock method +func (l *etcdLock) holdLock(key string, lockHeld chan struct{}, stopLocking <-chan struct{}) { + defer close(lockHeld) + + update := time.NewTicker(l.ttl / 3) + defer update.Stop() + + var err error + setOpts := &etcd.SetOptions{TTL: l.ttl} + + for { + select { + case <-update.C: + setOpts.PrevIndex = l.last.Node.ModifiedIndex + l.last, err = l.client.Set(context.Background(), key, l.value, setOpts) + if err != nil { + return + } + + case <-stopLocking: + return + } + } +} + +// WaitLock simply waits for the key to be available for creation +func (l *etcdLock) waitLock(key string, errorCh chan error, stopWatchCh chan bool, free chan<- bool) { + opts := &etcd.WatcherOptions{Recursive: false} + watcher := l.client.Watcher(key, opts) + + for { + event, err := watcher.Next(context.Background()) + if err != nil { + errorCh <- err + return + } + if event.Action == "delete" || event.Action == "expire" { + free <- true + return + } + } +} + +// Unlock the "key". Calling unlock while +// not holding the lock will throw an error +func (l *etcdLock) Unlock() error { + if l.stopLock != nil { + l.stopLock <- struct{}{} + } + if l.last != nil { + delOpts := &etcd.DeleteOptions{ + PrevIndex: l.last.Node.ModifiedIndex, + } + _, err := l.client.Delete(context.Background(), l.key, delOpts) + if err != nil { + return err + } + } + return nil +} + +// Close closes the client connection +func (s *Etcd) Close() { + return +} diff --git a/vendor/github.com/docker/libkv/store/helpers.go b/vendor/github.com/docker/libkv/store/helpers.go new file mode 100644 index 0000000000..0fb74c9ae1 --- /dev/null +++ b/vendor/github.com/docker/libkv/store/helpers.go @@ -0,0 +1,47 @@ +package store + +import ( + "strings" +) + +// CreateEndpoints creates a list of endpoints given the right scheme +func CreateEndpoints(addrs []string, scheme string) (entries []string) { + for _, addr := range addrs { + entries = append(entries, scheme+"://"+addr) + } + return entries +} + +// Normalize the key for each store to the form: +// +// /path/to/key +// +func Normalize(key string) string { + return "/" + join(SplitKey(key)) +} + +// GetDirectory gets the full directory part of +// the key to the form: +// +// /path/to/ +// +func GetDirectory(key string) string { + parts := SplitKey(key) + parts = parts[:len(parts)-1] + return "/" + join(parts) +} + +// SplitKey splits the key to extract path informations +func SplitKey(key string) (path []string) { + if strings.Contains(key, "/") { + path = strings.Split(key, "/") + } else { + path = []string{key} + } + return path +} + +// join the path parts with '/' +func join(parts []string) string { + return strings.Join(parts, "/") +} diff --git a/vendor/github.com/docker/libkv/store/store.go b/vendor/github.com/docker/libkv/store/store.go new file mode 100644 index 0000000000..7a4850c019 --- /dev/null +++ b/vendor/github.com/docker/libkv/store/store.go @@ -0,0 +1,132 @@ +package store + +import ( + "crypto/tls" + "errors" + "time" +) + +// Backend represents a KV Store Backend +type Backend string + +const ( + // CONSUL backend + CONSUL Backend = "consul" + // ETCD backend + ETCD Backend = "etcd" + // ZK backend + ZK Backend = "zk" + // BOLTDB backend + BOLTDB Backend = "boltdb" +) + +var ( + // ErrBackendNotSupported is thrown when the backend k/v store is not supported by libkv + ErrBackendNotSupported = errors.New("Backend storage not supported yet, please choose one of") + // ErrCallNotSupported is thrown when a method is not implemented/supported by the current backend + ErrCallNotSupported = errors.New("The current call is not supported with this backend") + // ErrNotReachable is thrown when the API cannot be reached for issuing common store operations + ErrNotReachable = errors.New("Api not reachable") + // ErrCannotLock is thrown when there is an error acquiring a lock on a key + ErrCannotLock = errors.New("Error acquiring the lock") + // ErrKeyModified is thrown during an atomic operation if the index does not match the one in the store + ErrKeyModified = errors.New("Unable to complete atomic operation, key modified") + // ErrKeyNotFound is thrown when the key is not found in the store during a Get operation + ErrKeyNotFound = errors.New("Key not found in store") + // ErrPreviousNotSpecified is thrown when the previous value is not specified for an atomic operation + ErrPreviousNotSpecified = errors.New("Previous K/V pair should be provided for the Atomic operation") + // ErrKeyExists is thrown when the previous value exists in the case of an AtomicPut + ErrKeyExists = errors.New("Previous K/V pair exists, cannot complete Atomic operation") +) + +// Config contains the options for a storage client +type Config struct { + ClientTLS *ClientTLSConfig + TLS *tls.Config + ConnectionTimeout time.Duration + Bucket string + PersistConnection bool + Username string + Password string +} + +// ClientTLSConfig contains data for a Client TLS configuration in the form +// the etcd client wants it. Eventually we'll adapt it for ZK and Consul. +type ClientTLSConfig struct { + CertFile string + KeyFile string + CACertFile string +} + +// Store represents the backend K/V storage +// Each store should support every call listed +// here. Or it couldn't be implemented as a K/V +// backend for libkv +type Store interface { + // Put a value at the specified key + Put(key string, value []byte, options *WriteOptions) error + + // Get a value given its key + Get(key string) (*KVPair, error) + + // Delete the value at the specified key + Delete(key string) error + + // Verify if a Key exists in the store + Exists(key string) (bool, error) + + // Watch for changes on a key + Watch(key string, stopCh <-chan struct{}) (<-chan *KVPair, error) + + // WatchTree watches for changes on child nodes under + // a given directory + WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*KVPair, error) + + // NewLock creates a lock for a given key. + // The returned Locker is not held and must be acquired + // with `.Lock`. The Value is optional. + NewLock(key string, options *LockOptions) (Locker, error) + + // List the content of a given prefix + List(directory string) ([]*KVPair, error) + + // DeleteTree deletes a range of keys under a given directory + DeleteTree(directory string) error + + // Atomic CAS operation on a single value. + // Pass previous = nil to create a new key. + AtomicPut(key string, value []byte, previous *KVPair, options *WriteOptions) (bool, *KVPair, error) + + // Atomic delete of a single value + AtomicDelete(key string, previous *KVPair) (bool, error) + + // Close the store connection + Close() +} + +// KVPair represents {Key, Value, Lastindex} tuple +type KVPair struct { + Key string + Value []byte + LastIndex uint64 +} + +// WriteOptions contains optional request parameters +type WriteOptions struct { + IsDir bool + TTL time.Duration +} + +// LockOptions contains optional request parameters +type LockOptions struct { + Value []byte // Optional, value to associate with the lock + TTL time.Duration // Optional, expiration ttl associated with the lock + RenewLock chan struct{} // Optional, chan used to control and stop the session ttl renewal for the lock +} + +// Locker provides locking mechanism on top of the store. +// Similar to `sync.Lock` except it may return errors. +type Locker interface { + Lock(stopChan chan struct{}) (<-chan struct{}, error) + Unlock() error +} diff --git a/vendor/github.com/docker/libkv/store/zookeeper/zookeeper.go b/vendor/github.com/docker/libkv/store/zookeeper/zookeeper.go new file mode 100644 index 0000000000..ff8d4ebe0f --- /dev/null +++ b/vendor/github.com/docker/libkv/store/zookeeper/zookeeper.go @@ -0,0 +1,429 @@ +package zookeeper + +import ( + "strings" + "time" + + "github.com/docker/libkv" + "github.com/docker/libkv/store" + zk "github.com/samuel/go-zookeeper/zk" +) + +const ( + // SOH control character + SOH = "\x01" + + defaultTimeout = 10 * time.Second +) + +// Zookeeper is the receiver type for +// the Store interface +type Zookeeper struct { + timeout time.Duration + client *zk.Conn +} + +type zookeeperLock struct { + client *zk.Conn + lock *zk.Lock + key string + value []byte +} + +// Register registers zookeeper to libkv +func Register() { + libkv.AddStore(store.ZK, New) +} + +// New creates a new Zookeeper client given a +// list of endpoints and an optional tls config +func New(endpoints []string, options *store.Config) (store.Store, error) { + s := &Zookeeper{} + s.timeout = defaultTimeout + + // Set options + if options != nil { + if options.ConnectionTimeout != 0 { + s.setTimeout(options.ConnectionTimeout) + } + } + + // Connect to Zookeeper + conn, _, err := zk.Connect(endpoints, s.timeout) + if err != nil { + return nil, err + } + s.client = conn + + return s, nil +} + +// setTimeout sets the timeout for connecting to Zookeeper +func (s *Zookeeper) setTimeout(time time.Duration) { + s.timeout = time +} + +// Get the value at "key", returns the last modified index +// to use in conjunction to Atomic calls +func (s *Zookeeper) Get(key string) (pair *store.KVPair, err error) { + resp, meta, err := s.client.Get(s.normalize(key)) + + if err != nil { + if err == zk.ErrNoNode { + return nil, store.ErrKeyNotFound + } + return nil, err + } + + // FIXME handle very rare cases where Get returns the + // SOH control character instead of the actual value + if string(resp) == SOH { + return s.Get(store.Normalize(key)) + } + + pair = &store.KVPair{ + Key: key, + Value: resp, + LastIndex: uint64(meta.Version), + } + + return pair, nil +} + +// createFullPath creates the entire path for a directory +// that does not exist +func (s *Zookeeper) createFullPath(path []string, ephemeral bool) error { + for i := 1; i <= len(path); i++ { + newpath := "/" + strings.Join(path[:i], "/") + if i == len(path) && ephemeral { + _, err := s.client.Create(newpath, []byte{}, zk.FlagEphemeral, zk.WorldACL(zk.PermAll)) + return err + } + _, err := s.client.Create(newpath, []byte{}, 0, zk.WorldACL(zk.PermAll)) + if err != nil { + // Skip if node already exists + if err != zk.ErrNodeExists { + return err + } + } + } + return nil +} + +// Put a value at "key" +func (s *Zookeeper) Put(key string, value []byte, opts *store.WriteOptions) error { + fkey := s.normalize(key) + + exists, err := s.Exists(key) + if err != nil { + return err + } + + if !exists { + if opts != nil && opts.TTL > 0 { + s.createFullPath(store.SplitKey(strings.TrimSuffix(key, "/")), true) + } else { + s.createFullPath(store.SplitKey(strings.TrimSuffix(key, "/")), false) + } + } + + _, err = s.client.Set(fkey, value, -1) + return err +} + +// Delete a value at "key" +func (s *Zookeeper) Delete(key string) error { + err := s.client.Delete(s.normalize(key), -1) + if err == zk.ErrNoNode { + return store.ErrKeyNotFound + } + return err +} + +// Exists checks if the key exists inside the store +func (s *Zookeeper) Exists(key string) (bool, error) { + exists, _, err := s.client.Exists(s.normalize(key)) + if err != nil { + return false, err + } + return exists, nil +} + +// Watch for changes on a "key" +// It returns a channel that will receive changes or pass +// on errors. Upon creation, the current value will first +// be sent to the channel. Providing a non-nil stopCh can +// be used to stop watching. +func (s *Zookeeper) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) { + // Get the key first + pair, err := s.Get(key) + if err != nil { + return nil, err + } + + // Catch zk notifications and fire changes into the channel. + watchCh := make(chan *store.KVPair) + go func() { + defer close(watchCh) + + // Get returns the current value to the channel prior + // to listening to any event that may occur on that key + watchCh <- pair + for { + _, _, eventCh, err := s.client.GetW(s.normalize(key)) + if err != nil { + return + } + select { + case e := <-eventCh: + if e.Type == zk.EventNodeDataChanged { + if entry, err := s.Get(key); err == nil { + watchCh <- entry + } + } + case <-stopCh: + // There is no way to stop GetW so just quit + return + } + } + }() + + return watchCh, nil +} + +// WatchTree watches for changes on a "directory" +// It returns a channel that will receive changes or pass +// on errors. Upon creating a watch, the current childs values +// will be sent to the channel .Providing a non-nil stopCh can +// be used to stop watching. +func (s *Zookeeper) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) { + // List the childrens first + entries, err := s.List(directory) + if err != nil { + return nil, err + } + + // Catch zk notifications and fire changes into the channel. + watchCh := make(chan []*store.KVPair) + go func() { + defer close(watchCh) + + // List returns the children values to the channel + // prior to listening to any events that may occur + // on those keys + watchCh <- entries + + for { + _, _, eventCh, err := s.client.ChildrenW(s.normalize(directory)) + if err != nil { + return + } + select { + case e := <-eventCh: + if e.Type == zk.EventNodeChildrenChanged { + if kv, err := s.List(directory); err == nil { + watchCh <- kv + } + } + case <-stopCh: + // There is no way to stop GetW so just quit + return + } + } + }() + + return watchCh, nil +} + +// List child nodes of a given directory +func (s *Zookeeper) List(directory string) ([]*store.KVPair, error) { + keys, stat, err := s.client.Children(s.normalize(directory)) + if err != nil { + if err == zk.ErrNoNode { + return nil, store.ErrKeyNotFound + } + return nil, err + } + + kv := []*store.KVPair{} + + // FIXME Costly Get request for each child key.. + for _, key := range keys { + pair, err := s.Get(strings.TrimSuffix(directory, "/") + s.normalize(key)) + if err != nil { + // If node is not found: List is out of date, retry + if err == store.ErrKeyNotFound { + return s.List(directory) + } + return nil, err + } + + kv = append(kv, &store.KVPair{ + Key: key, + Value: []byte(pair.Value), + LastIndex: uint64(stat.Version), + }) + } + + return kv, nil +} + +// DeleteTree deletes a range of keys under a given directory +func (s *Zookeeper) DeleteTree(directory string) error { + pairs, err := s.List(directory) + if err != nil { + return err + } + + var reqs []interface{} + + for _, pair := range pairs { + reqs = append(reqs, &zk.DeleteRequest{ + Path: s.normalize(directory + "/" + pair.Key), + Version: -1, + }) + } + + _, err = s.client.Multi(reqs...) + return err +} + +// AtomicPut put a value at "key" if the key has not been +// modified in the meantime, throws an error if this is the case +func (s *Zookeeper) AtomicPut(key string, value []byte, previous *store.KVPair, _ *store.WriteOptions) (bool, *store.KVPair, error) { + var lastIndex uint64 + + if previous != nil { + meta, err := s.client.Set(s.normalize(key), value, int32(previous.LastIndex)) + if err != nil { + // Compare Failed + if err == zk.ErrBadVersion { + return false, nil, store.ErrKeyModified + } + return false, nil, err + } + lastIndex = uint64(meta.Version) + } else { + // Interpret previous == nil as create operation. + _, err := s.client.Create(s.normalize(key), value, 0, zk.WorldACL(zk.PermAll)) + if err != nil { + // Directory does not exist + if err == zk.ErrNoNode { + + // Create the directory + parts := store.SplitKey(strings.TrimSuffix(key, "/")) + parts = parts[:len(parts)-1] + if err = s.createFullPath(parts, false); err != nil { + // Failed to create the directory. + return false, nil, err + } + + // Create the node + if _, err := s.client.Create(s.normalize(key), value, 0, zk.WorldACL(zk.PermAll)); err != nil { + // Node exist error (when previous nil) + if err == zk.ErrNodeExists { + return false, nil, store.ErrKeyExists + } + return false, nil, err + } + + } else { + // Node Exists error (when previous nil) + if err == zk.ErrNodeExists { + return false, nil, store.ErrKeyExists + } + + // Unhandled error + return false, nil, err + } + } + lastIndex = 0 // Newly created nodes have version 0. + } + + pair := &store.KVPair{ + Key: key, + Value: value, + LastIndex: lastIndex, + } + + return true, pair, nil +} + +// AtomicDelete deletes a value at "key" if the key +// has not been modified in the meantime, throws an +// error if this is the case +func (s *Zookeeper) AtomicDelete(key string, previous *store.KVPair) (bool, error) { + if previous == nil { + return false, store.ErrPreviousNotSpecified + } + + err := s.client.Delete(s.normalize(key), int32(previous.LastIndex)) + if err != nil { + // Key not found + if err == zk.ErrNoNode { + return false, store.ErrKeyNotFound + } + // Compare failed + if err == zk.ErrBadVersion { + return false, store.ErrKeyModified + } + // General store error + return false, err + } + return true, nil +} + +// NewLock returns a handle to a lock struct which can +// be used to provide mutual exclusion on a key +func (s *Zookeeper) NewLock(key string, options *store.LockOptions) (lock store.Locker, err error) { + value := []byte("") + + // Apply options + if options != nil { + if options.Value != nil { + value = options.Value + } + } + + lock = &zookeeperLock{ + client: s.client, + key: s.normalize(key), + value: value, + lock: zk.NewLock(s.client, s.normalize(key), zk.WorldACL(zk.PermAll)), + } + + return lock, err +} + +// Lock attempts to acquire the lock and blocks while +// doing so. It returns a channel that is closed if our +// lock is lost or if an error occurs +func (l *zookeeperLock) Lock(stopChan chan struct{}) (<-chan struct{}, error) { + err := l.lock.Lock() + + if err == nil { + // We hold the lock, we can set our value + // FIXME: The value is left behind + // (problematic for leader election) + _, err = l.client.Set(l.key, l.value, -1) + } + + return make(chan struct{}), err +} + +// Unlock the "key". Calling unlock while +// not holding the lock will throw an error +func (l *zookeeperLock) Unlock() error { + return l.lock.Unlock() +} + +// Close closes the client connection +func (s *Zookeeper) Close() { + s.client.Close() +} + +// Normalize the key for usage in Zookeeper +func (s *Zookeeper) normalize(key string) string { + key = store.Normalize(key) + return strings.TrimSuffix(key, "/") +} diff --git a/vendor/github.com/docker/libnetwork/LICENSE b/vendor/github.com/docker/libnetwork/LICENSE new file mode 100644 index 0000000000..e06d208186 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/docker/libnetwork/agent.go b/vendor/github.com/docker/libnetwork/agent.go new file mode 100644 index 0000000000..c90fa81ff8 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/agent.go @@ -0,0 +1,975 @@ +package libnetwork + +//go:generate protoc -I.:Godeps/_workspace/src/github.com/gogo/protobuf --gogo_out=import_path=github.com/docker/libnetwork,Mgogoproto/gogo.proto=github.com/gogo/protobuf/gogoproto:. agent.proto + +import ( + "encoding/json" + "fmt" + "net" + "sort" + "sync" + + "github.com/docker/go-events" + "github.com/docker/libnetwork/cluster" + "github.com/docker/libnetwork/datastore" + "github.com/docker/libnetwork/discoverapi" + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/networkdb" + "github.com/docker/libnetwork/types" + "github.com/gogo/protobuf/proto" + "github.com/sirupsen/logrus" +) + +const ( + subsysGossip = "networking:gossip" + subsysIPSec = "networking:ipsec" + keyringSize = 3 +) + +// ByTime implements sort.Interface for []*types.EncryptionKey based on +// the LamportTime field. +type ByTime []*types.EncryptionKey + +func (b ByTime) Len() int { return len(b) } +func (b ByTime) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b ByTime) Less(i, j int) bool { return b[i].LamportTime < b[j].LamportTime } + +type agent struct { + networkDB *networkdb.NetworkDB + bindAddr string + advertiseAddr string + dataPathAddr string + coreCancelFuncs []func() + driverCancelFuncs map[string][]func() + sync.Mutex +} + +func (a *agent) dataPathAddress() string { + a.Lock() + defer a.Unlock() + if a.dataPathAddr != "" { + return a.dataPathAddr + } + return a.advertiseAddr +} + +const libnetworkEPTable = "endpoint_table" + +func getBindAddr(ifaceName string) (string, error) { + iface, err := net.InterfaceByName(ifaceName) + if err != nil { + return "", fmt.Errorf("failed to find interface %s: %v", ifaceName, err) + } + + addrs, err := iface.Addrs() + if err != nil { + return "", fmt.Errorf("failed to get interface addresses: %v", err) + } + + for _, a := range addrs { + addr, ok := a.(*net.IPNet) + if !ok { + continue + } + addrIP := addr.IP + + if addrIP.IsLinkLocalUnicast() { + continue + } + + return addrIP.String(), nil + } + + return "", fmt.Errorf("failed to get bind address") +} + +func resolveAddr(addrOrInterface string) (string, error) { + // Try and see if this is a valid IP address + if net.ParseIP(addrOrInterface) != nil { + return addrOrInterface, nil + } + + addr, err := net.ResolveIPAddr("ip", addrOrInterface) + if err != nil { + // If not a valid IP address, it should be a valid interface + return getBindAddr(addrOrInterface) + } + return addr.String(), nil +} + +func (c *controller) handleKeyChange(keys []*types.EncryptionKey) error { + drvEnc := discoverapi.DriverEncryptionUpdate{} + + a := c.getAgent() + if a == nil { + logrus.Debug("Skipping key change as agent is nil") + return nil + } + + // Find the deleted key. If the deleted key was the primary key, + // a new primary key should be set before removing if from keyring. + c.Lock() + added := []byte{} + deleted := []byte{} + j := len(c.keys) + for i := 0; i < j; { + same := false + for _, key := range keys { + if same = key.LamportTime == c.keys[i].LamportTime; same { + break + } + } + if !same { + cKey := c.keys[i] + if cKey.Subsystem == subsysGossip { + deleted = cKey.Key + } + + if cKey.Subsystem == subsysIPSec { + drvEnc.Prune = cKey.Key + drvEnc.PruneTag = cKey.LamportTime + } + c.keys[i], c.keys[j-1] = c.keys[j-1], c.keys[i] + c.keys[j-1] = nil + j-- + } + i++ + } + c.keys = c.keys[:j] + + // Find the new key and add it to the key ring + for _, key := range keys { + same := false + for _, cKey := range c.keys { + if same = cKey.LamportTime == key.LamportTime; same { + break + } + } + if !same { + c.keys = append(c.keys, key) + if key.Subsystem == subsysGossip { + added = key.Key + } + + if key.Subsystem == subsysIPSec { + drvEnc.Key = key.Key + drvEnc.Tag = key.LamportTime + } + } + } + c.Unlock() + + if len(added) > 0 { + a.networkDB.SetKey(added) + } + + key, _, err := c.getPrimaryKeyTag(subsysGossip) + if err != nil { + return err + } + a.networkDB.SetPrimaryKey(key) + + key, tag, err := c.getPrimaryKeyTag(subsysIPSec) + if err != nil { + return err + } + drvEnc.Primary = key + drvEnc.PrimaryTag = tag + + if len(deleted) > 0 { + a.networkDB.RemoveKey(deleted) + } + + c.drvRegistry.WalkDrivers(func(name string, driver driverapi.Driver, capability driverapi.Capability) bool { + err := driver.DiscoverNew(discoverapi.EncryptionKeysUpdate, drvEnc) + if err != nil { + logrus.Warnf("Failed to update datapath keys in driver %s: %v", name, err) + } + return false + }) + + return nil +} + +func (c *controller) agentSetup(clusterProvider cluster.Provider) error { + agent := c.getAgent() + + // If the agent is already present there is no need to try to initilize it again + if agent != nil { + return nil + } + + bindAddr := clusterProvider.GetLocalAddress() + advAddr := clusterProvider.GetAdvertiseAddress() + dataAddr := clusterProvider.GetDataPathAddress() + remoteList := clusterProvider.GetRemoteAddressList() + remoteAddrList := make([]string, 0, len(remoteList)) + for _, remote := range remoteList { + addr, _, _ := net.SplitHostPort(remote) + remoteAddrList = append(remoteAddrList, addr) + } + + listen := clusterProvider.GetListenAddress() + listenAddr, _, _ := net.SplitHostPort(listen) + + logrus.Infof("Initializing Libnetwork Agent Listen-Addr=%s Local-addr=%s Adv-addr=%s Data-addr=%s Remote-addr-list=%v MTU=%d", + listenAddr, bindAddr, advAddr, dataAddr, remoteAddrList, c.Config().Daemon.NetworkControlPlaneMTU) + if advAddr != "" && agent == nil { + if err := c.agentInit(listenAddr, bindAddr, advAddr, dataAddr); err != nil { + logrus.Errorf("error in agentInit: %v", err) + return err + } + c.drvRegistry.WalkDrivers(func(name string, driver driverapi.Driver, capability driverapi.Capability) bool { + if capability.ConnectivityScope == datastore.GlobalScope { + c.agentDriverNotify(driver) + } + return false + }) + } + + if len(remoteAddrList) > 0 { + if err := c.agentJoin(remoteAddrList); err != nil { + logrus.Errorf("Error in joining gossip cluster : %v(join will be retried in background)", err) + } + } + + return nil +} + +// For a given subsystem getKeys sorts the keys by lamport time and returns +// slice of keys and lamport time which can used as a unique tag for the keys +func (c *controller) getKeys(subsys string) ([][]byte, []uint64) { + c.Lock() + defer c.Unlock() + + sort.Sort(ByTime(c.keys)) + + keys := [][]byte{} + tags := []uint64{} + for _, key := range c.keys { + if key.Subsystem == subsys { + keys = append(keys, key.Key) + tags = append(tags, key.LamportTime) + } + } + + keys[0], keys[1] = keys[1], keys[0] + tags[0], tags[1] = tags[1], tags[0] + return keys, tags +} + +// getPrimaryKeyTag returns the primary key for a given subsystem from the +// list of sorted key and the associated tag +func (c *controller) getPrimaryKeyTag(subsys string) ([]byte, uint64, error) { + c.Lock() + defer c.Unlock() + sort.Sort(ByTime(c.keys)) + keys := []*types.EncryptionKey{} + for _, key := range c.keys { + if key.Subsystem == subsys { + keys = append(keys, key) + } + } + return keys[1].Key, keys[1].LamportTime, nil +} + +func (c *controller) agentInit(listenAddr, bindAddrOrInterface, advertiseAddr, dataPathAddr string) error { + bindAddr, err := resolveAddr(bindAddrOrInterface) + if err != nil { + return err + } + + keys, _ := c.getKeys(subsysGossip) + + netDBConf := networkdb.DefaultConfig() + netDBConf.BindAddr = listenAddr + netDBConf.AdvertiseAddr = advertiseAddr + netDBConf.Keys = keys + if c.Config().Daemon.NetworkControlPlaneMTU != 0 { + // Consider the MTU remove the IP hdr (IPv4 or IPv6) and the TCP/UDP hdr. + // To be on the safe side let's cut 100 bytes + netDBConf.PacketBufferSize = (c.Config().Daemon.NetworkControlPlaneMTU - 100) + logrus.Debugf("Control plane MTU: %d will initialize NetworkDB with: %d", + c.Config().Daemon.NetworkControlPlaneMTU, netDBConf.PacketBufferSize) + } + nDB, err := networkdb.New(netDBConf) + if err != nil { + return err + } + + // Register the diagnostic handlers + c.DiagnosticServer.RegisterHandler(nDB, networkdb.NetDbPaths2Func) + + var cancelList []func() + ch, cancel := nDB.Watch(libnetworkEPTable, "", "") + cancelList = append(cancelList, cancel) + nodeCh, cancel := nDB.Watch(networkdb.NodeTable, "", "") + cancelList = append(cancelList, cancel) + + c.Lock() + c.agent = &agent{ + networkDB: nDB, + bindAddr: bindAddr, + advertiseAddr: advertiseAddr, + dataPathAddr: dataPathAddr, + coreCancelFuncs: cancelList, + driverCancelFuncs: make(map[string][]func()), + } + c.Unlock() + + go c.handleTableEvents(ch, c.handleEpTableEvent) + go c.handleTableEvents(nodeCh, c.handleNodeTableEvent) + + drvEnc := discoverapi.DriverEncryptionConfig{} + keys, tags := c.getKeys(subsysIPSec) + drvEnc.Keys = keys + drvEnc.Tags = tags + + c.drvRegistry.WalkDrivers(func(name string, driver driverapi.Driver, capability driverapi.Capability) bool { + err := driver.DiscoverNew(discoverapi.EncryptionKeysConfig, drvEnc) + if err != nil { + logrus.Warnf("Failed to set datapath keys in driver %s: %v", name, err) + } + return false + }) + + c.WalkNetworks(joinCluster) + + return nil +} + +func (c *controller) agentJoin(remoteAddrList []string) error { + agent := c.getAgent() + if agent == nil { + return nil + } + return agent.networkDB.Join(remoteAddrList) +} + +func (c *controller) agentDriverNotify(d driverapi.Driver) { + agent := c.getAgent() + if agent == nil { + return + } + + if err := d.DiscoverNew(discoverapi.NodeDiscovery, discoverapi.NodeDiscoveryData{ + Address: agent.dataPathAddress(), + BindAddress: agent.bindAddr, + Self: true, + }); err != nil { + logrus.Warnf("Failed the node discovery in driver: %v", err) + } + + drvEnc := discoverapi.DriverEncryptionConfig{} + keys, tags := c.getKeys(subsysIPSec) + drvEnc.Keys = keys + drvEnc.Tags = tags + + if err := d.DiscoverNew(discoverapi.EncryptionKeysConfig, drvEnc); err != nil { + logrus.Warnf("Failed to set datapath keys in driver: %v", err) + } +} + +func (c *controller) agentClose() { + // Acquire current agent instance and reset its pointer + // then run closing functions + c.Lock() + agent := c.agent + c.agent = nil + c.Unlock() + + if agent == nil { + return + } + + var cancelList []func() + + agent.Lock() + for _, cancelFuncs := range agent.driverCancelFuncs { + cancelList = append(cancelList, cancelFuncs...) + } + + // Add also the cancel functions for the network db + cancelList = append(cancelList, agent.coreCancelFuncs...) + agent.Unlock() + + for _, cancel := range cancelList { + cancel() + } + + agent.networkDB.Close() +} + +// Task has the backend container details +type Task struct { + Name string + EndpointID string + EndpointIP string + Info map[string]string +} + +// ServiceInfo has service specific details along with the list of backend tasks +type ServiceInfo struct { + VIP string + LocalLBIndex int + Tasks []Task + Ports []string +} + +type epRecord struct { + ep EndpointRecord + info map[string]string + lbIndex int +} + +func (n *network) Services() map[string]ServiceInfo { + eps := make(map[string]epRecord) + + if !n.isClusterEligible() { + return nil + } + agent := n.getController().getAgent() + if agent == nil { + return nil + } + + // Walk through libnetworkEPTable and fetch the driver agnostic endpoint info + entries := agent.networkDB.GetTableByNetwork(libnetworkEPTable, n.id) + for eid, value := range entries { + var epRec EndpointRecord + nid := n.ID() + if err := proto.Unmarshal(value.Value, &epRec); err != nil { + logrus.Errorf("Unmarshal of libnetworkEPTable failed for endpoint %s in network %s, %v", eid, nid, err) + continue + } + i := n.getController().getLBIndex(epRec.ServiceID, nid, epRec.IngressPorts) + eps[eid] = epRecord{ + ep: epRec, + lbIndex: i, + } + } + + // Walk through the driver's tables, have the driver decode the entries + // and return the tuple {ep ID, value}. value is a string that coveys + // relevant info about the endpoint. + d, err := n.driver(true) + if err != nil { + logrus.Errorf("Could not resolve driver for network %s/%s while fetching services: %v", n.networkType, n.ID(), err) + return nil + } + for _, table := range n.driverTables { + if table.objType != driverapi.EndpointObject { + continue + } + entries := agent.networkDB.GetTableByNetwork(table.name, n.id) + for key, value := range entries { + epID, info := d.DecodeTableEntry(table.name, key, value.Value) + if ep, ok := eps[epID]; !ok { + logrus.Errorf("Inconsistent driver and libnetwork state for endpoint %s", epID) + } else { + ep.info = info + eps[epID] = ep + } + } + } + + // group the endpoints into a map keyed by the service name + sinfo := make(map[string]ServiceInfo) + for ep, epr := range eps { + var ( + s ServiceInfo + ok bool + ) + if s, ok = sinfo[epr.ep.ServiceName]; !ok { + s = ServiceInfo{ + VIP: epr.ep.VirtualIP, + LocalLBIndex: epr.lbIndex, + } + } + ports := []string{} + if s.Ports == nil { + for _, port := range epr.ep.IngressPorts { + p := fmt.Sprintf("Target: %d, Publish: %d", port.TargetPort, port.PublishedPort) + ports = append(ports, p) + } + s.Ports = ports + } + s.Tasks = append(s.Tasks, Task{ + Name: epr.ep.Name, + EndpointID: ep, + EndpointIP: epr.ep.EndpointIP, + Info: epr.info, + }) + sinfo[epr.ep.ServiceName] = s + } + return sinfo +} + +func (n *network) isClusterEligible() bool { + if n.scope != datastore.SwarmScope || !n.driverIsMultihost() { + return false + } + return n.getController().getAgent() != nil +} + +func (n *network) joinCluster() error { + if !n.isClusterEligible() { + return nil + } + + agent := n.getController().getAgent() + if agent == nil { + return nil + } + + return agent.networkDB.JoinNetwork(n.ID()) +} + +func (n *network) leaveCluster() error { + if !n.isClusterEligible() { + return nil + } + + agent := n.getController().getAgent() + if agent == nil { + return nil + } + + return agent.networkDB.LeaveNetwork(n.ID()) +} + +func (ep *endpoint) addDriverInfoToCluster() error { + n := ep.getNetwork() + if !n.isClusterEligible() { + return nil + } + if ep.joinInfo == nil { + return nil + } + + agent := n.getController().getAgent() + if agent == nil { + return nil + } + + for _, te := range ep.joinInfo.driverTableEntries { + if err := agent.networkDB.CreateEntry(te.tableName, n.ID(), te.key, te.value); err != nil { + return err + } + } + return nil +} + +func (ep *endpoint) deleteDriverInfoFromCluster() error { + n := ep.getNetwork() + if !n.isClusterEligible() { + return nil + } + if ep.joinInfo == nil { + return nil + } + + agent := n.getController().getAgent() + if agent == nil { + return nil + } + + for _, te := range ep.joinInfo.driverTableEntries { + if err := agent.networkDB.DeleteEntry(te.tableName, n.ID(), te.key); err != nil { + return err + } + } + return nil +} + +func (ep *endpoint) addServiceInfoToCluster(sb *sandbox) error { + if ep.isAnonymous() && len(ep.myAliases) == 0 || ep.Iface().Address() == nil { + return nil + } + + n := ep.getNetwork() + if !n.isClusterEligible() { + return nil + } + + sb.Service.Lock() + defer sb.Service.Unlock() + logrus.Debugf("addServiceInfoToCluster START for %s %s", ep.svcName, ep.ID()) + + // Check that the endpoint is still present on the sandbox before adding it to the service discovery. + // This is to handle a race between the EnableService and the sbLeave + // It is possible that the EnableService starts, fetches the list of the endpoints and + // by the time the addServiceInfoToCluster is called the endpoint got removed from the sandbox + // The risk is that the deleteServiceInfoToCluster happens before the addServiceInfoToCluster. + // This check under the Service lock of the sandbox ensure the correct behavior. + // If the addServiceInfoToCluster arrives first may find or not the endpoint and will proceed or exit + // but in any case the deleteServiceInfoToCluster will follow doing the cleanup if needed. + // In case the deleteServiceInfoToCluster arrives first, this one is happening after the endpoint is + // removed from the list, in this situation the delete will bail out not finding any data to cleanup + // and the add will bail out not finding the endpoint on the sandbox. + if e := sb.getEndpoint(ep.ID()); e == nil { + logrus.Warnf("addServiceInfoToCluster suppressing service resolution ep is not anymore in the sandbox %s", ep.ID()) + return nil + } + + c := n.getController() + agent := c.getAgent() + + name := ep.Name() + if ep.isAnonymous() { + name = ep.MyAliases()[0] + } + + var ingressPorts []*PortConfig + if ep.svcID != "" { + // This is a task part of a service + // Gossip ingress ports only in ingress network. + if n.ingress { + ingressPorts = ep.ingressPorts + } + if err := c.addServiceBinding(ep.svcName, ep.svcID, n.ID(), ep.ID(), name, ep.virtualIP, ingressPorts, ep.svcAliases, ep.myAliases, ep.Iface().Address().IP, "addServiceInfoToCluster"); err != nil { + return err + } + } else { + // This is a container simply attached to an attachable network + if err := c.addContainerNameResolution(n.ID(), ep.ID(), name, ep.myAliases, ep.Iface().Address().IP, "addServiceInfoToCluster"); err != nil { + return err + } + } + + buf, err := proto.Marshal(&EndpointRecord{ + Name: name, + ServiceName: ep.svcName, + ServiceID: ep.svcID, + VirtualIP: ep.virtualIP.String(), + IngressPorts: ingressPorts, + Aliases: ep.svcAliases, + TaskAliases: ep.myAliases, + EndpointIP: ep.Iface().Address().IP.String(), + ServiceDisabled: false, + }) + if err != nil { + return err + } + + if agent != nil { + if err := agent.networkDB.CreateEntry(libnetworkEPTable, n.ID(), ep.ID(), buf); err != nil { + logrus.Warnf("addServiceInfoToCluster NetworkDB CreateEntry failed for %s %s err:%s", ep.id, n.id, err) + return err + } + } + + logrus.Debugf("addServiceInfoToCluster END for %s %s", ep.svcName, ep.ID()) + + return nil +} + +func (ep *endpoint) deleteServiceInfoFromCluster(sb *sandbox, fullRemove bool, method string) error { + if ep.isAnonymous() && len(ep.myAliases) == 0 { + return nil + } + + n := ep.getNetwork() + if !n.isClusterEligible() { + return nil + } + + sb.Service.Lock() + defer sb.Service.Unlock() + logrus.Debugf("deleteServiceInfoFromCluster from %s START for %s %s", method, ep.svcName, ep.ID()) + + // Avoid a race w/ with a container that aborts preemptively. This would + // get caught in disableServceInNetworkDB, but we check here to make the + // nature of the condition more clear. + // See comment in addServiceInfoToCluster() + if e := sb.getEndpoint(ep.ID()); e == nil { + logrus.Warnf("deleteServiceInfoFromCluster suppressing service resolution ep is not anymore in the sandbox %s", ep.ID()) + return nil + } + + c := n.getController() + agent := c.getAgent() + + name := ep.Name() + if ep.isAnonymous() { + name = ep.MyAliases()[0] + } + + if agent != nil { + // First update the networkDB then locally + if fullRemove { + if err := agent.networkDB.DeleteEntry(libnetworkEPTable, n.ID(), ep.ID()); err != nil { + logrus.Warnf("deleteServiceInfoFromCluster NetworkDB DeleteEntry failed for %s %s err:%s", ep.id, n.id, err) + } + } else { + disableServiceInNetworkDB(agent, n, ep) + } + } + + if ep.Iface().Address() != nil { + if ep.svcID != "" { + // This is a task part of a service + var ingressPorts []*PortConfig + if n.ingress { + ingressPorts = ep.ingressPorts + } + if err := c.rmServiceBinding(ep.svcName, ep.svcID, n.ID(), ep.ID(), name, ep.virtualIP, ingressPorts, ep.svcAliases, ep.myAliases, ep.Iface().Address().IP, "deleteServiceInfoFromCluster", true, fullRemove); err != nil { + return err + } + } else { + // This is a container simply attached to an attachable network + if err := c.delContainerNameResolution(n.ID(), ep.ID(), name, ep.myAliases, ep.Iface().Address().IP, "deleteServiceInfoFromCluster"); err != nil { + return err + } + } + } + + logrus.Debugf("deleteServiceInfoFromCluster from %s END for %s %s", method, ep.svcName, ep.ID()) + + return nil +} + +func disableServiceInNetworkDB(a *agent, n *network, ep *endpoint) { + var epRec EndpointRecord + + logrus.Debugf("disableServiceInNetworkDB for %s %s", ep.svcName, ep.ID()) + + // Update existing record to indicate that the service is disabled + inBuf, err := a.networkDB.GetEntry(libnetworkEPTable, n.ID(), ep.ID()) + if err != nil { + logrus.Warnf("disableServiceInNetworkDB GetEntry failed for %s %s err:%s", ep.id, n.id, err) + return + } + // Should never fail + if err := proto.Unmarshal(inBuf, &epRec); err != nil { + logrus.Errorf("disableServiceInNetworkDB unmarshal failed for %s %s err:%s", ep.id, n.id, err) + return + } + epRec.ServiceDisabled = true + // Should never fail + outBuf, err := proto.Marshal(&epRec) + if err != nil { + logrus.Errorf("disableServiceInNetworkDB marshalling failed for %s %s err:%s", ep.id, n.id, err) + return + } + // Send update to the whole cluster + if err := a.networkDB.UpdateEntry(libnetworkEPTable, n.ID(), ep.ID(), outBuf); err != nil { + logrus.Warnf("disableServiceInNetworkDB UpdateEntry failed for %s %s err:%s", ep.id, n.id, err) + } +} + +func (n *network) addDriverWatches() { + if !n.isClusterEligible() { + return + } + + c := n.getController() + agent := c.getAgent() + if agent == nil { + return + } + for _, table := range n.driverTables { + ch, cancel := agent.networkDB.Watch(table.name, n.ID(), "") + agent.Lock() + agent.driverCancelFuncs[n.ID()] = append(agent.driverCancelFuncs[n.ID()], cancel) + agent.Unlock() + go c.handleTableEvents(ch, n.handleDriverTableEvent) + d, err := n.driver(false) + if err != nil { + logrus.Errorf("Could not resolve driver %s while walking driver tabl: %v", n.networkType, err) + return + } + + agent.networkDB.WalkTable(table.name, func(nid, key string, value []byte, deleted bool) bool { + // skip the entries that are mark for deletion, this is safe because this function is + // called at initialization time so there is no state to delete + if nid == n.ID() && !deleted { + d.EventNotify(driverapi.Create, nid, table.name, key, value) + } + return false + }) + } +} + +func (n *network) cancelDriverWatches() { + if !n.isClusterEligible() { + return + } + + agent := n.getController().getAgent() + if agent == nil { + return + } + + agent.Lock() + cancelFuncs := agent.driverCancelFuncs[n.ID()] + delete(agent.driverCancelFuncs, n.ID()) + agent.Unlock() + + for _, cancel := range cancelFuncs { + cancel() + } +} + +func (c *controller) handleTableEvents(ch *events.Channel, fn func(events.Event)) { + for { + select { + case ev := <-ch.C: + fn(ev) + case <-ch.Done(): + return + } + } +} + +func (n *network) handleDriverTableEvent(ev events.Event) { + d, err := n.driver(false) + if err != nil { + logrus.Errorf("Could not resolve driver %s while handling driver table event: %v", n.networkType, err) + return + } + + var ( + etype driverapi.EventType + tname string + key string + value []byte + ) + + switch event := ev.(type) { + case networkdb.CreateEvent: + tname = event.Table + key = event.Key + value = event.Value + etype = driverapi.Create + case networkdb.DeleteEvent: + tname = event.Table + key = event.Key + value = event.Value + etype = driverapi.Delete + case networkdb.UpdateEvent: + tname = event.Table + key = event.Key + value = event.Value + etype = driverapi.Delete + } + + d.EventNotify(etype, n.ID(), tname, key, value) +} + +func (c *controller) handleNodeTableEvent(ev events.Event) { + var ( + value []byte + isAdd bool + nodeAddr networkdb.NodeAddr + ) + switch event := ev.(type) { + case networkdb.CreateEvent: + value = event.Value + isAdd = true + case networkdb.DeleteEvent: + value = event.Value + case networkdb.UpdateEvent: + logrus.Errorf("Unexpected update node table event = %#v", event) + } + + err := json.Unmarshal(value, &nodeAddr) + if err != nil { + logrus.Errorf("Error unmarshalling node table event %v", err) + return + } + c.processNodeDiscovery([]net.IP{nodeAddr.Addr}, isAdd) + +} + +func (c *controller) handleEpTableEvent(ev events.Event) { + var ( + nid string + eid string + value []byte + epRec EndpointRecord + ) + + switch event := ev.(type) { + case networkdb.CreateEvent: + nid = event.NetworkID + eid = event.Key + value = event.Value + case networkdb.DeleteEvent: + nid = event.NetworkID + eid = event.Key + value = event.Value + case networkdb.UpdateEvent: + nid = event.NetworkID + eid = event.Key + value = event.Value + default: + logrus.Errorf("Unexpected update service table event = %#v", event) + return + } + + err := proto.Unmarshal(value, &epRec) + if err != nil { + logrus.Errorf("Failed to unmarshal service table value: %v", err) + return + } + + containerName := epRec.Name + svcName := epRec.ServiceName + svcID := epRec.ServiceID + vip := net.ParseIP(epRec.VirtualIP) + ip := net.ParseIP(epRec.EndpointIP) + ingressPorts := epRec.IngressPorts + serviceAliases := epRec.Aliases + taskAliases := epRec.TaskAliases + + if containerName == "" || ip == nil { + logrus.Errorf("Invalid endpoint name/ip received while handling service table event %s", value) + return + } + + switch ev.(type) { + case networkdb.CreateEvent: + logrus.Debugf("handleEpTableEvent ADD %s R:%v", eid, epRec) + if svcID != "" { + // This is a remote task part of a service + if err := c.addServiceBinding(svcName, svcID, nid, eid, containerName, vip, ingressPorts, serviceAliases, taskAliases, ip, "handleEpTableEvent"); err != nil { + logrus.Errorf("failed adding service binding for %s epRec:%v err:%v", eid, epRec, err) + return + } + } else { + // This is a remote container simply attached to an attachable network + if err := c.addContainerNameResolution(nid, eid, containerName, taskAliases, ip, "handleEpTableEvent"); err != nil { + logrus.Errorf("failed adding container name resolution for %s epRec:%v err:%v", eid, epRec, err) + } + } + + case networkdb.DeleteEvent: + logrus.Debugf("handleEpTableEvent DEL %s R:%v", eid, epRec) + if svcID != "" { + // This is a remote task part of a service + if err := c.rmServiceBinding(svcName, svcID, nid, eid, containerName, vip, ingressPorts, serviceAliases, taskAliases, ip, "handleEpTableEvent", true, true); err != nil { + logrus.Errorf("failed removing service binding for %s epRec:%v err:%v", eid, epRec, err) + return + } + } else { + // This is a remote container simply attached to an attachable network + if err := c.delContainerNameResolution(nid, eid, containerName, taskAliases, ip, "handleEpTableEvent"); err != nil { + logrus.Errorf("failed removing container name resolution for %s epRec:%v err:%v", eid, epRec, err) + } + } + case networkdb.UpdateEvent: + logrus.Debugf("handleEpTableEvent UPD %s R:%v", eid, epRec) + // We currently should only get these to inform us that an endpoint + // is disabled. Report if otherwise. + if svcID == "" || !epRec.ServiceDisabled { + logrus.Errorf("Unexpected update table event for %s epRec:%v", eid, epRec) + return + } + // This is a remote task that is part of a service that is now disabled + if err := c.rmServiceBinding(svcName, svcID, nid, eid, containerName, vip, ingressPorts, serviceAliases, taskAliases, ip, "handleEpTableEvent", true, false); err != nil { + logrus.Errorf("failed disabling service binding for %s epRec:%v err:%v", eid, epRec, err) + return + } + } +} diff --git a/vendor/github.com/docker/libnetwork/agent.pb.go b/vendor/github.com/docker/libnetwork/agent.pb.go new file mode 100644 index 0000000000..4092973c9b --- /dev/null +++ b/vendor/github.com/docker/libnetwork/agent.pb.go @@ -0,0 +1,1095 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: agent.proto + +/* + Package libnetwork is a generated protocol buffer package. + + It is generated from these files: + agent.proto + + It has these top-level messages: + EndpointRecord + PortConfig +*/ +package libnetwork + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type PortConfig_Protocol int32 + +const ( + ProtocolTCP PortConfig_Protocol = 0 + ProtocolUDP PortConfig_Protocol = 1 + ProtocolSCTP PortConfig_Protocol = 2 +) + +var PortConfig_Protocol_name = map[int32]string{ + 0: "TCP", + 1: "UDP", + 2: "SCTP", +} +var PortConfig_Protocol_value = map[string]int32{ + "TCP": 0, + "UDP": 1, + "SCTP": 2, +} + +func (x PortConfig_Protocol) String() string { + return proto.EnumName(PortConfig_Protocol_name, int32(x)) +} +func (PortConfig_Protocol) EnumDescriptor() ([]byte, []int) { return fileDescriptorAgent, []int{1, 0} } + +// EndpointRecord specifies all the endpoint specific information that +// needs to gossiped to nodes participating in the network. +type EndpointRecord struct { + // Name of the container + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Service name of the service to which this endpoint belongs. + ServiceName string `protobuf:"bytes,2,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"` + // Service ID of the service to which this endpoint belongs. + ServiceID string `protobuf:"bytes,3,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + // Virtual IP of the service to which this endpoint belongs. + VirtualIP string `protobuf:"bytes,4,opt,name=virtual_ip,json=virtualIp,proto3" json:"virtual_ip,omitempty"` + // IP assigned to this endpoint. + EndpointIP string `protobuf:"bytes,5,opt,name=endpoint_ip,json=endpointIp,proto3" json:"endpoint_ip,omitempty"` + // IngressPorts exposed by the service to which this endpoint belongs. + IngressPorts []*PortConfig `protobuf:"bytes,6,rep,name=ingress_ports,json=ingressPorts" json:"ingress_ports,omitempty"` + // A list of aliases which are alternate names for the service + Aliases []string `protobuf:"bytes,7,rep,name=aliases" json:"aliases,omitempty"` + // List of aliases task specific aliases + TaskAliases []string `protobuf:"bytes,8,rep,name=task_aliases,json=taskAliases" json:"task_aliases,omitempty"` + // Whether this enpoint's service has been disabled + ServiceDisabled bool `protobuf:"varint,9,opt,name=service_disabled,json=serviceDisabled,proto3" json:"service_disabled,omitempty"` +} + +func (m *EndpointRecord) Reset() { *m = EndpointRecord{} } +func (*EndpointRecord) ProtoMessage() {} +func (*EndpointRecord) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{0} } + +func (m *EndpointRecord) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *EndpointRecord) GetServiceName() string { + if m != nil { + return m.ServiceName + } + return "" +} + +func (m *EndpointRecord) GetServiceID() string { + if m != nil { + return m.ServiceID + } + return "" +} + +func (m *EndpointRecord) GetVirtualIP() string { + if m != nil { + return m.VirtualIP + } + return "" +} + +func (m *EndpointRecord) GetEndpointIP() string { + if m != nil { + return m.EndpointIP + } + return "" +} + +func (m *EndpointRecord) GetIngressPorts() []*PortConfig { + if m != nil { + return m.IngressPorts + } + return nil +} + +func (m *EndpointRecord) GetAliases() []string { + if m != nil { + return m.Aliases + } + return nil +} + +func (m *EndpointRecord) GetTaskAliases() []string { + if m != nil { + return m.TaskAliases + } + return nil +} + +func (m *EndpointRecord) GetServiceDisabled() bool { + if m != nil { + return m.ServiceDisabled + } + return false +} + +// PortConfig specifies an exposed port which can be +// addressed using the given name. This can be later queried +// using a service discovery api or a DNS SRV query. The node +// port specifies a port that can be used to address this +// service external to the cluster by sending a connection +// request to this port to any node on the cluster. +type PortConfig struct { + // Name for the port. If provided the port information can + // be queried using the name as in a DNS SRV query. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Protocol for the port which is exposed. + Protocol PortConfig_Protocol `protobuf:"varint,2,opt,name=protocol,proto3,enum=libnetwork.PortConfig_Protocol" json:"protocol,omitempty"` + // The port which the application is exposing and is bound to. + TargetPort uint32 `protobuf:"varint,3,opt,name=target_port,json=targetPort,proto3" json:"target_port,omitempty"` + // PublishedPort specifies the port on which the service is + // exposed on all nodes on the cluster. If not specified an + // arbitrary port in the node port range is allocated by the + // system. If specified it should be within the node port + // range and it should be available. + PublishedPort uint32 `protobuf:"varint,4,opt,name=published_port,json=publishedPort,proto3" json:"published_port,omitempty"` +} + +func (m *PortConfig) Reset() { *m = PortConfig{} } +func (*PortConfig) ProtoMessage() {} +func (*PortConfig) Descriptor() ([]byte, []int) { return fileDescriptorAgent, []int{1} } + +func (m *PortConfig) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PortConfig) GetProtocol() PortConfig_Protocol { + if m != nil { + return m.Protocol + } + return ProtocolTCP +} + +func (m *PortConfig) GetTargetPort() uint32 { + if m != nil { + return m.TargetPort + } + return 0 +} + +func (m *PortConfig) GetPublishedPort() uint32 { + if m != nil { + return m.PublishedPort + } + return 0 +} + +func init() { + proto.RegisterType((*EndpointRecord)(nil), "libnetwork.EndpointRecord") + proto.RegisterType((*PortConfig)(nil), "libnetwork.PortConfig") + proto.RegisterEnum("libnetwork.PortConfig_Protocol", PortConfig_Protocol_name, PortConfig_Protocol_value) +} +func (this *EndpointRecord) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 13) + s = append(s, "&libnetwork.EndpointRecord{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "ServiceName: "+fmt.Sprintf("%#v", this.ServiceName)+",\n") + s = append(s, "ServiceID: "+fmt.Sprintf("%#v", this.ServiceID)+",\n") + s = append(s, "VirtualIP: "+fmt.Sprintf("%#v", this.VirtualIP)+",\n") + s = append(s, "EndpointIP: "+fmt.Sprintf("%#v", this.EndpointIP)+",\n") + if this.IngressPorts != nil { + s = append(s, "IngressPorts: "+fmt.Sprintf("%#v", this.IngressPorts)+",\n") + } + s = append(s, "Aliases: "+fmt.Sprintf("%#v", this.Aliases)+",\n") + s = append(s, "TaskAliases: "+fmt.Sprintf("%#v", this.TaskAliases)+",\n") + s = append(s, "ServiceDisabled: "+fmt.Sprintf("%#v", this.ServiceDisabled)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *PortConfig) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&libnetwork.PortConfig{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "Protocol: "+fmt.Sprintf("%#v", this.Protocol)+",\n") + s = append(s, "TargetPort: "+fmt.Sprintf("%#v", this.TargetPort)+",\n") + s = append(s, "PublishedPort: "+fmt.Sprintf("%#v", this.PublishedPort)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringAgent(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *EndpointRecord) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointRecord) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintAgent(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.ServiceName) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintAgent(dAtA, i, uint64(len(m.ServiceName))) + i += copy(dAtA[i:], m.ServiceName) + } + if len(m.ServiceID) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintAgent(dAtA, i, uint64(len(m.ServiceID))) + i += copy(dAtA[i:], m.ServiceID) + } + if len(m.VirtualIP) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintAgent(dAtA, i, uint64(len(m.VirtualIP))) + i += copy(dAtA[i:], m.VirtualIP) + } + if len(m.EndpointIP) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintAgent(dAtA, i, uint64(len(m.EndpointIP))) + i += copy(dAtA[i:], m.EndpointIP) + } + if len(m.IngressPorts) > 0 { + for _, msg := range m.IngressPorts { + dAtA[i] = 0x32 + i++ + i = encodeVarintAgent(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Aliases) > 0 { + for _, s := range m.Aliases { + dAtA[i] = 0x3a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.TaskAliases) > 0 { + for _, s := range m.TaskAliases { + dAtA[i] = 0x42 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.ServiceDisabled { + dAtA[i] = 0x48 + i++ + if m.ServiceDisabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *PortConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PortConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintAgent(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if m.Protocol != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.Protocol)) + } + if m.TargetPort != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.TargetPort)) + } + if m.PublishedPort != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintAgent(dAtA, i, uint64(m.PublishedPort)) + } + return i, nil +} + +func encodeVarintAgent(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *EndpointRecord) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovAgent(uint64(l)) + } + l = len(m.ServiceName) + if l > 0 { + n += 1 + l + sovAgent(uint64(l)) + } + l = len(m.ServiceID) + if l > 0 { + n += 1 + l + sovAgent(uint64(l)) + } + l = len(m.VirtualIP) + if l > 0 { + n += 1 + l + sovAgent(uint64(l)) + } + l = len(m.EndpointIP) + if l > 0 { + n += 1 + l + sovAgent(uint64(l)) + } + if len(m.IngressPorts) > 0 { + for _, e := range m.IngressPorts { + l = e.Size() + n += 1 + l + sovAgent(uint64(l)) + } + } + if len(m.Aliases) > 0 { + for _, s := range m.Aliases { + l = len(s) + n += 1 + l + sovAgent(uint64(l)) + } + } + if len(m.TaskAliases) > 0 { + for _, s := range m.TaskAliases { + l = len(s) + n += 1 + l + sovAgent(uint64(l)) + } + } + if m.ServiceDisabled { + n += 2 + } + return n +} + +func (m *PortConfig) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovAgent(uint64(l)) + } + if m.Protocol != 0 { + n += 1 + sovAgent(uint64(m.Protocol)) + } + if m.TargetPort != 0 { + n += 1 + sovAgent(uint64(m.TargetPort)) + } + if m.PublishedPort != 0 { + n += 1 + sovAgent(uint64(m.PublishedPort)) + } + return n +} + +func sovAgent(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozAgent(x uint64) (n int) { + return sovAgent(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *EndpointRecord) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointRecord{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, + `ServiceID:` + fmt.Sprintf("%v", this.ServiceID) + `,`, + `VirtualIP:` + fmt.Sprintf("%v", this.VirtualIP) + `,`, + `EndpointIP:` + fmt.Sprintf("%v", this.EndpointIP) + `,`, + `IngressPorts:` + strings.Replace(fmt.Sprintf("%v", this.IngressPorts), "PortConfig", "PortConfig", 1) + `,`, + `Aliases:` + fmt.Sprintf("%v", this.Aliases) + `,`, + `TaskAliases:` + fmt.Sprintf("%v", this.TaskAliases) + `,`, + `ServiceDisabled:` + fmt.Sprintf("%v", this.ServiceDisabled) + `,`, + `}`, + }, "") + return s +} +func (this *PortConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PortConfig{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `TargetPort:` + fmt.Sprintf("%v", this.TargetPort) + `,`, + `PublishedPort:` + fmt.Sprintf("%v", this.PublishedPort) + `,`, + `}`, + }, "") + return s +} +func valueToStringAgent(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *EndpointRecord) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointRecord: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointRecord: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VirtualIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VirtualIP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EndpointIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EndpointIP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IngressPorts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IngressPorts = append(m.IngressPorts, &PortConfig{}) + if err := m.IngressPorts[len(m.IngressPorts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Aliases", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Aliases = append(m.Aliases, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskAliases", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskAliases = append(m.TaskAliases, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceDisabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ServiceDisabled = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PortConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PortConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PortConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAgent + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + m.Protocol = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Protocol |= (PortConfig_Protocol(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetPort", wireType) + } + m.TargetPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TargetPort |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PublishedPort", wireType) + } + m.PublishedPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAgent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PublishedPort |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipAgent(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAgent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipAgent(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAgent + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAgent + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAgent + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthAgent + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAgent + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipAgent(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthAgent = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowAgent = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("agent.proto", fileDescriptorAgent) } + +var fileDescriptorAgent = []byte{ + // 459 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0x31, 0x6f, 0xd3, 0x4c, + 0x18, 0xc7, 0xe3, 0xc4, 0x6f, 0x1b, 0x3f, 0x4e, 0x52, 0xeb, 0xf4, 0x0a, 0x59, 0x1e, 0x1c, 0x13, + 0x09, 0x29, 0x48, 0x28, 0x95, 0xca, 0xd8, 0x89, 0x26, 0x0c, 0x5e, 0x90, 0x75, 0x4d, 0x59, 0x83, + 0x13, 0x1f, 0xe6, 0x54, 0xe3, 0xb3, 0xee, 0xae, 0x65, 0x65, 0x03, 0xf5, 0x3b, 0x74, 0xe2, 0xcb, + 0x30, 0x32, 0x32, 0x55, 0xd4, 0x9f, 0x80, 0x95, 0x0d, 0xdd, 0xf9, 0xae, 0x11, 0x52, 0xb7, 0xf3, + 0xef, 0xff, 0x3b, 0xeb, 0xb9, 0xff, 0x03, 0x7e, 0x5e, 0x92, 0x5a, 0x2e, 0x1a, 0xce, 0x24, 0x43, + 0x50, 0xd1, 0x6d, 0x4d, 0xe4, 0x27, 0xc6, 0x2f, 0xa3, 0xff, 0x4b, 0x56, 0x32, 0x8d, 0x8f, 0xd5, + 0xa9, 0x33, 0x66, 0x7f, 0xfa, 0x30, 0x79, 0x5d, 0x17, 0x0d, 0xa3, 0xb5, 0xc4, 0x64, 0xc7, 0x78, + 0x81, 0x10, 0xb8, 0x75, 0xfe, 0x91, 0x84, 0x4e, 0xe2, 0xcc, 0x3d, 0xac, 0xcf, 0xe8, 0x29, 0x8c, + 0x04, 0xe1, 0xd7, 0x74, 0x47, 0x36, 0x3a, 0xeb, 0xeb, 0xcc, 0x37, 0xec, 0x8d, 0x52, 0x5e, 0x00, + 0x58, 0x85, 0x16, 0xe1, 0x40, 0x09, 0x67, 0xe3, 0xf6, 0x6e, 0xea, 0x9d, 0x77, 0x34, 0x5d, 0x61, + 0xcf, 0x08, 0x69, 0xa1, 0xec, 0x6b, 0xca, 0xe5, 0x55, 0x5e, 0x6d, 0x68, 0x13, 0xba, 0x7b, 0xfb, + 0x6d, 0x47, 0xd3, 0x0c, 0x7b, 0x46, 0x48, 0x1b, 0x74, 0x0c, 0x3e, 0x31, 0x43, 0x2a, 0xfd, 0x3f, + 0xad, 0x4f, 0xda, 0xbb, 0x29, 0xd8, 0xd9, 0xd3, 0x0c, 0x83, 0x55, 0xd2, 0x06, 0x9d, 0xc2, 0x98, + 0xd6, 0x25, 0x27, 0x42, 0x6c, 0x1a, 0xc6, 0xa5, 0x08, 0x0f, 0x92, 0xc1, 0xdc, 0x3f, 0x79, 0xb2, + 0xd8, 0x17, 0xb2, 0xc8, 0x18, 0x97, 0x4b, 0x56, 0xbf, 0xa7, 0x25, 0x1e, 0x19, 0x59, 0x21, 0x81, + 0x42, 0x38, 0xcc, 0x2b, 0x9a, 0x0b, 0x22, 0xc2, 0xc3, 0x64, 0x30, 0xf7, 0xb0, 0xfd, 0x54, 0x35, + 0xc8, 0x5c, 0x5c, 0x6e, 0x6c, 0x3c, 0xd4, 0xb1, 0xaf, 0xd8, 0x2b, 0xa3, 0x3c, 0x87, 0xc0, 0xd6, + 0x50, 0x50, 0x91, 0x6f, 0x2b, 0x52, 0x84, 0x5e, 0xe2, 0xcc, 0x87, 0xf8, 0xc8, 0xf0, 0x95, 0xc1, + 0xb3, 0x2f, 0x7d, 0x80, 0xfd, 0x10, 0x8f, 0xf6, 0x7e, 0x0a, 0x43, 0xbd, 0xa7, 0x1d, 0xab, 0x74, + 0xe7, 0x93, 0x93, 0xe9, 0xe3, 0x4f, 0x58, 0x64, 0x46, 0xc3, 0x0f, 0x17, 0xd0, 0x14, 0x7c, 0x99, + 0xf3, 0x92, 0x48, 0xdd, 0x81, 0x5e, 0xc9, 0x18, 0x43, 0x87, 0xd4, 0x4d, 0xf4, 0x0c, 0x26, 0xcd, + 0xd5, 0xb6, 0xa2, 0xe2, 0x03, 0x29, 0x3a, 0xc7, 0xd5, 0xce, 0xf8, 0x81, 0x2a, 0x6d, 0xf6, 0x0e, + 0x86, 0xf6, 0xef, 0x28, 0x84, 0xc1, 0x7a, 0x99, 0x05, 0xbd, 0xe8, 0xe8, 0xe6, 0x36, 0xf1, 0x2d, + 0x5e, 0x2f, 0x33, 0x95, 0x5c, 0xac, 0xb2, 0xc0, 0xf9, 0x37, 0xb9, 0x58, 0x65, 0x28, 0x02, 0xf7, + 0x7c, 0xb9, 0xce, 0x82, 0x7e, 0x14, 0xdc, 0xdc, 0x26, 0x23, 0x1b, 0x29, 0x16, 0xb9, 0x5f, 0xbf, + 0xc5, 0xbd, 0xb3, 0xf0, 0xe7, 0x7d, 0xdc, 0xfb, 0x7d, 0x1f, 0x3b, 0x9f, 0xdb, 0xd8, 0xf9, 0xde, + 0xc6, 0xce, 0x8f, 0x36, 0x76, 0x7e, 0xb5, 0xb1, 0xb3, 0x3d, 0xd0, 0xaf, 0x79, 0xf9, 0x37, 0x00, + 0x00, 0xff, 0xff, 0x55, 0x29, 0x75, 0x5c, 0xd7, 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/docker/libnetwork/bitseq/sequence.go b/vendor/github.com/docker/libnetwork/bitseq/sequence.go new file mode 100644 index 0000000000..0069d495b7 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/bitseq/sequence.go @@ -0,0 +1,737 @@ +// Package bitseq provides a structure and utilities for representing long bitmask +// as sequence of run-length encoded blocks. It operates directly on the encoded +// representation, it does not decode/encode. +package bitseq + +import ( + "encoding/binary" + "encoding/json" + "errors" + "fmt" + "sync" + + "github.com/docker/libnetwork/datastore" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" +) + +// block sequence constants +// If needed we can think of making these configurable +const ( + blockLen = uint32(32) + blockBytes = uint64(blockLen / 8) + blockMAX = uint32(1<%s", s.block, s.count, nextBlock) +} + +// GetAvailableBit returns the position of the first unset bit in the bitmask represented by this sequence +func (s *sequence) getAvailableBit(from uint64) (uint64, uint64, error) { + if s.block == blockMAX || s.count == 0 { + return invalidPos, invalidPos, ErrNoBitAvailable + } + bits := from + bitSel := blockFirstBit >> from + for bitSel > 0 && s.block&bitSel != 0 { + bitSel >>= 1 + bits++ + } + // Check if the loop exited because it could not + // find any available bit int block starting from + // "from". Return invalid pos in that case. + if bitSel == 0 { + return invalidPos, invalidPos, ErrNoBitAvailable + } + return bits / 8, bits % 8, nil +} + +// GetCopy returns a copy of the linked list rooted at this node +func (s *sequence) getCopy() *sequence { + n := &sequence{block: s.block, count: s.count} + pn := n + ps := s.next + for ps != nil { + pn.next = &sequence{block: ps.block, count: ps.count} + pn = pn.next + ps = ps.next + } + return n +} + +// Equal checks if this sequence is equal to the passed one +func (s *sequence) equal(o *sequence) bool { + this := s + other := o + for this != nil { + if other == nil { + return false + } + if this.block != other.block || this.count != other.count { + return false + } + this = this.next + other = other.next + } + // Check if other is longer than this + if other != nil { + return false + } + return true +} + +// ToByteArray converts the sequence into a byte array +func (s *sequence) toByteArray() ([]byte, error) { + var bb []byte + + p := s + for p != nil { + b := make([]byte, 12) + binary.BigEndian.PutUint32(b[0:], p.block) + binary.BigEndian.PutUint64(b[4:], p.count) + bb = append(bb, b...) + p = p.next + } + + return bb, nil +} + +// fromByteArray construct the sequence from the byte array +func (s *sequence) fromByteArray(data []byte) error { + l := len(data) + if l%12 != 0 { + return fmt.Errorf("cannot deserialize byte sequence of length %d (%v)", l, data) + } + + p := s + i := 0 + for { + p.block = binary.BigEndian.Uint32(data[i : i+4]) + p.count = binary.BigEndian.Uint64(data[i+4 : i+12]) + i += 12 + if i == l { + break + } + p.next = &sequence{} + p = p.next + } + + return nil +} + +func (h *Handle) getCopy() *Handle { + return &Handle{ + bits: h.bits, + unselected: h.unselected, + head: h.head.getCopy(), + app: h.app, + id: h.id, + dbIndex: h.dbIndex, + dbExists: h.dbExists, + store: h.store, + curr: h.curr, + } +} + +// SetAnyInRange atomically sets the first unset bit in the specified range in the sequence and returns the corresponding ordinal +func (h *Handle) SetAnyInRange(start, end uint64, serial bool) (uint64, error) { + if end < start || end >= h.bits { + return invalidPos, fmt.Errorf("invalid bit range [%d, %d]", start, end) + } + if h.Unselected() == 0 { + return invalidPos, ErrNoBitAvailable + } + return h.set(0, start, end, true, false, serial) +} + +// SetAny atomically sets the first unset bit in the sequence and returns the corresponding ordinal +func (h *Handle) SetAny(serial bool) (uint64, error) { + if h.Unselected() == 0 { + return invalidPos, ErrNoBitAvailable + } + return h.set(0, 0, h.bits-1, true, false, serial) +} + +// Set atomically sets the corresponding bit in the sequence +func (h *Handle) Set(ordinal uint64) error { + if err := h.validateOrdinal(ordinal); err != nil { + return err + } + _, err := h.set(ordinal, 0, 0, false, false, false) + return err +} + +// Unset atomically unsets the corresponding bit in the sequence +func (h *Handle) Unset(ordinal uint64) error { + if err := h.validateOrdinal(ordinal); err != nil { + return err + } + _, err := h.set(ordinal, 0, 0, false, true, false) + return err +} + +// IsSet atomically checks if the ordinal bit is set. In case ordinal +// is outside of the bit sequence limits, false is returned. +func (h *Handle) IsSet(ordinal uint64) bool { + if err := h.validateOrdinal(ordinal); err != nil { + return false + } + h.Lock() + _, _, err := checkIfAvailable(h.head, ordinal) + h.Unlock() + return err != nil +} + +func (h *Handle) runConsistencyCheck() bool { + corrupted := false + for p, c := h.head, h.head.next; c != nil; c = c.next { + if c.count == 0 { + corrupted = true + p.next = c.next + continue // keep same p + } + p = c + } + return corrupted +} + +// CheckConsistency checks if the bit sequence is in an inconsistent state and attempts to fix it. +// It looks for a corruption signature that may happen in docker 1.9.0 and 1.9.1. +func (h *Handle) CheckConsistency() error { + for { + h.Lock() + store := h.store + h.Unlock() + + if store != nil { + if err := store.GetObject(datastore.Key(h.Key()...), h); err != nil && err != datastore.ErrKeyNotFound { + return err + } + } + + h.Lock() + nh := h.getCopy() + h.Unlock() + + if !nh.runConsistencyCheck() { + return nil + } + + if err := nh.writeToStore(); err != nil { + if _, ok := err.(types.RetryError); !ok { + return fmt.Errorf("internal failure while fixing inconsistent bitsequence: %v", err) + } + continue + } + + logrus.Infof("Fixed inconsistent bit sequence in datastore:\n%s\n%s", h, nh) + + h.Lock() + h.head = nh.head + h.Unlock() + + return nil + } +} + +// set/reset the bit +func (h *Handle) set(ordinal, start, end uint64, any bool, release bool, serial bool) (uint64, error) { + var ( + bitPos uint64 + bytePos uint64 + ret uint64 + err error + ) + + for { + var store datastore.DataStore + curr := uint64(0) + h.Lock() + store = h.store + if store != nil { + h.Unlock() // The lock is acquired in the GetObject + if err := store.GetObject(datastore.Key(h.Key()...), h); err != nil && err != datastore.ErrKeyNotFound { + return ret, err + } + h.Lock() // Acquire the lock back + } + logrus.Debugf("Received set for ordinal %v, start %v, end %v, any %t, release %t, serial:%v curr:%d \n", ordinal, start, end, any, release, serial, h.curr) + if serial { + curr = h.curr + } + // Get position if available + if release { + bytePos, bitPos = ordinalToPos(ordinal) + } else { + if any { + bytePos, bitPos, err = getAvailableFromCurrent(h.head, start, curr, end) + ret = posToOrdinal(bytePos, bitPos) + if err == nil { + h.curr = ret + 1 + } + } else { + bytePos, bitPos, err = checkIfAvailable(h.head, ordinal) + ret = ordinal + } + } + if err != nil { + h.Unlock() + return ret, err + } + + // Create a private copy of h and work on it + nh := h.getCopy() + + nh.head = pushReservation(bytePos, bitPos, nh.head, release) + if release { + nh.unselected++ + } else { + nh.unselected-- + } + + if h.store != nil { + h.Unlock() + // Attempt to write private copy to store + if err := nh.writeToStore(); err != nil { + if _, ok := err.(types.RetryError); !ok { + return ret, fmt.Errorf("internal failure while setting the bit: %v", err) + } + // Retry + continue + } + h.Lock() + } + + // Previous atomic push was succesfull. Save private copy to local copy + h.unselected = nh.unselected + h.head = nh.head + h.dbExists = nh.dbExists + h.dbIndex = nh.dbIndex + h.Unlock() + return ret, nil + } +} + +// checks is needed because to cover the case where the number of bits is not a multiple of blockLen +func (h *Handle) validateOrdinal(ordinal uint64) error { + h.Lock() + defer h.Unlock() + if ordinal >= h.bits { + return errors.New("bit does not belong to the sequence") + } + return nil +} + +// Destroy removes from the datastore the data belonging to this handle +func (h *Handle) Destroy() error { + for { + if err := h.deleteFromStore(); err != nil { + if _, ok := err.(types.RetryError); !ok { + return fmt.Errorf("internal failure while destroying the sequence: %v", err) + } + // Fetch latest + if err := h.store.GetObject(datastore.Key(h.Key()...), h); err != nil { + if err == datastore.ErrKeyNotFound { // already removed + return nil + } + return fmt.Errorf("failed to fetch from store when destroying the sequence: %v", err) + } + continue + } + return nil + } +} + +// ToByteArray converts this handle's data into a byte array +func (h *Handle) ToByteArray() ([]byte, error) { + + h.Lock() + defer h.Unlock() + ba := make([]byte, 16) + binary.BigEndian.PutUint64(ba[0:], h.bits) + binary.BigEndian.PutUint64(ba[8:], h.unselected) + bm, err := h.head.toByteArray() + if err != nil { + return nil, fmt.Errorf("failed to serialize head: %s", err.Error()) + } + ba = append(ba, bm...) + + return ba, nil +} + +// FromByteArray reads his handle's data from a byte array +func (h *Handle) FromByteArray(ba []byte) error { + if ba == nil { + return errors.New("nil byte array") + } + + nh := &sequence{} + err := nh.fromByteArray(ba[16:]) + if err != nil { + return fmt.Errorf("failed to deserialize head: %s", err.Error()) + } + + h.Lock() + h.head = nh + h.bits = binary.BigEndian.Uint64(ba[0:8]) + h.unselected = binary.BigEndian.Uint64(ba[8:16]) + h.Unlock() + + return nil +} + +// Bits returns the length of the bit sequence +func (h *Handle) Bits() uint64 { + return h.bits +} + +// Unselected returns the number of bits which are not selected +func (h *Handle) Unselected() uint64 { + h.Lock() + defer h.Unlock() + return h.unselected +} + +func (h *Handle) String() string { + h.Lock() + defer h.Unlock() + return fmt.Sprintf("App: %s, ID: %s, DBIndex: 0x%x, bits: %d, unselected: %d, sequence: %s", + h.app, h.id, h.dbIndex, h.bits, h.unselected, h.head.toString()) +} + +// MarshalJSON encodes Handle into json message +func (h *Handle) MarshalJSON() ([]byte, error) { + m := map[string]interface{}{ + "id": h.id, + } + + b, err := h.ToByteArray() + if err != nil { + return nil, err + } + m["sequence"] = b + return json.Marshal(m) +} + +// UnmarshalJSON decodes json message into Handle +func (h *Handle) UnmarshalJSON(data []byte) error { + var ( + m map[string]interface{} + b []byte + err error + ) + if err = json.Unmarshal(data, &m); err != nil { + return err + } + h.id = m["id"].(string) + bi, _ := json.Marshal(m["sequence"]) + if err := json.Unmarshal(bi, &b); err != nil { + return err + } + return h.FromByteArray(b) +} + +// getFirstAvailable looks for the first unset bit in passed mask starting from start +func getFirstAvailable(head *sequence, start uint64) (uint64, uint64, error) { + // Find sequence which contains the start bit + byteStart, bitStart := ordinalToPos(start) + current, _, precBlocks, inBlockBytePos := findSequence(head, byteStart) + // Derive the this sequence offsets + byteOffset := byteStart - inBlockBytePos + bitOffset := inBlockBytePos*8 + bitStart + for current != nil { + if current.block != blockMAX { + // If the current block is not full, check if there is any bit + // from the current bit in the current block. If not, before proceeding to the + // next block node, make sure we check for available bit in the next + // instance of the same block. Due to RLE same block signature will be + // compressed. + retry: + bytePos, bitPos, err := current.getAvailableBit(bitOffset) + if err != nil && precBlocks == current.count-1 { + // This is the last instance in the same block node, + // so move to the next block. + goto next + } + if err != nil { + // There are some more instances of the same block, so add the offset + // and be optimistic that you will find the available bit in the next + // instance of the same block. + bitOffset = 0 + byteOffset += blockBytes + precBlocks++ + goto retry + } + return byteOffset + bytePos, bitPos, err + } + // Moving to next block: Reset bit offset. + next: + bitOffset = 0 + byteOffset += (current.count * blockBytes) - (precBlocks * blockBytes) + precBlocks = 0 + current = current.next + } + return invalidPos, invalidPos, ErrNoBitAvailable +} + +// getAvailableFromCurrent will look for available ordinal from the current ordinal. +// If none found then it will loop back to the start to check of the available bit. +// This can be further optimized to check from start till curr in case of a rollover +func getAvailableFromCurrent(head *sequence, start, curr, end uint64) (uint64, uint64, error) { + var bytePos, bitPos uint64 + var err error + if curr != 0 && curr > start { + bytePos, bitPos, err = getFirstAvailable(head, curr) + ret := posToOrdinal(bytePos, bitPos) + if end < ret || err != nil { + goto begin + } + return bytePos, bitPos, nil + } + +begin: + bytePos, bitPos, err = getFirstAvailable(head, start) + ret := posToOrdinal(bytePos, bitPos) + if end < ret || err != nil { + return invalidPos, invalidPos, ErrNoBitAvailable + } + return bytePos, bitPos, nil +} + +// checkIfAvailable checks if the bit correspondent to the specified ordinal is unset +// If the ordinal is beyond the sequence limits, a negative response is returned +func checkIfAvailable(head *sequence, ordinal uint64) (uint64, uint64, error) { + bytePos, bitPos := ordinalToPos(ordinal) + + // Find the sequence containing this byte + current, _, _, inBlockBytePos := findSequence(head, bytePos) + if current != nil { + // Check whether the bit corresponding to the ordinal address is unset + bitSel := blockFirstBit >> (inBlockBytePos*8 + bitPos) + if current.block&bitSel == 0 { + return bytePos, bitPos, nil + } + } + + return invalidPos, invalidPos, ErrBitAllocated +} + +// Given the byte position and the sequences list head, return the pointer to the +// sequence containing the byte (current), the pointer to the previous sequence, +// the number of blocks preceding the block containing the byte inside the current sequence. +// If bytePos is outside of the list, function will return (nil, nil, 0, invalidPos) +func findSequence(head *sequence, bytePos uint64) (*sequence, *sequence, uint64, uint64) { + // Find the sequence containing this byte + previous := head + current := head + n := bytePos + for current.next != nil && n >= (current.count*blockBytes) { // Nil check for less than 32 addresses masks + n -= (current.count * blockBytes) + previous = current + current = current.next + } + + // If byte is outside of the list, let caller know + if n >= (current.count * blockBytes) { + return nil, nil, 0, invalidPos + } + + // Find the byte position inside the block and the number of blocks + // preceding the block containing the byte inside this sequence + precBlocks := n / blockBytes + inBlockBytePos := bytePos % blockBytes + + return current, previous, precBlocks, inBlockBytePos +} + +// PushReservation pushes the bit reservation inside the bitmask. +// Given byte and bit positions, identify the sequence (current) which holds the block containing the affected bit. +// Create a new block with the modified bit according to the operation (allocate/release). +// Create a new sequence containing the new block and insert it in the proper position. +// Remove current sequence if empty. +// Check if new sequence can be merged with neighbour (previous/next) sequences. +// +// +// Identify "current" sequence containing block: +// [prev seq] [current seq] [next seq] +// +// Based on block position, resulting list of sequences can be any of three forms: +// +// block position Resulting list of sequences +// A) block is first in current: [prev seq] [new] [modified current seq] [next seq] +// B) block is last in current: [prev seq] [modified current seq] [new] [next seq] +// C) block is in the middle of current: [prev seq] [curr pre] [new] [curr post] [next seq] +func pushReservation(bytePos, bitPos uint64, head *sequence, release bool) *sequence { + // Store list's head + newHead := head + + // Find the sequence containing this byte + current, previous, precBlocks, inBlockBytePos := findSequence(head, bytePos) + if current == nil { + return newHead + } + + // Construct updated block + bitSel := blockFirstBit >> (inBlockBytePos*8 + bitPos) + newBlock := current.block + if release { + newBlock &^= bitSel + } else { + newBlock |= bitSel + } + + // Quit if it was a redundant request + if current.block == newBlock { + return newHead + } + + // Current sequence inevitably looses one block, upadate count + current.count-- + + // Create new sequence + newSequence := &sequence{block: newBlock, count: 1} + + // Insert the new sequence in the list based on block position + if precBlocks == 0 { // First in sequence (A) + newSequence.next = current + if current == head { + newHead = newSequence + previous = newHead + } else { + previous.next = newSequence + } + removeCurrentIfEmpty(&newHead, newSequence, current) + mergeSequences(previous) + } else if precBlocks == current.count { // Last in sequence (B) + newSequence.next = current.next + current.next = newSequence + mergeSequences(current) + } else { // In between the sequence (C) + currPre := &sequence{block: current.block, count: precBlocks, next: newSequence} + currPost := current + currPost.count -= precBlocks + newSequence.next = currPost + if currPost == head { + newHead = currPre + } else { + previous.next = currPre + } + // No merging or empty current possible here + } + + return newHead +} + +// Removes the current sequence from the list if empty, adjusting the head pointer if needed +func removeCurrentIfEmpty(head **sequence, previous, current *sequence) { + if current.count == 0 { + if current == *head { + *head = current.next + } else { + previous.next = current.next + current = current.next + } + } +} + +// Given a pointer to a sequence, it checks if it can be merged with any following sequences +// It stops when no more merging is possible. +// TODO: Optimization: only attempt merge from start to end sequence, no need to scan till the end of the list +func mergeSequences(seq *sequence) { + if seq != nil { + // Merge all what possible from seq + for seq.next != nil && seq.block == seq.next.block { + seq.count += seq.next.count + seq.next = seq.next.next + } + // Move to next + mergeSequences(seq.next) + } +} + +func getNumBlocks(numBits uint64) uint64 { + numBlocks := numBits / uint64(blockLen) + if numBits%uint64(blockLen) != 0 { + numBlocks++ + } + return numBlocks +} + +func ordinalToPos(ordinal uint64) (uint64, uint64) { + return ordinal / 8, ordinal % 8 +} + +func posToOrdinal(bytePos, bitPos uint64) uint64 { + return bytePos*8 + bitPos +} diff --git a/vendor/github.com/docker/libnetwork/bitseq/store.go b/vendor/github.com/docker/libnetwork/bitseq/store.go new file mode 100644 index 0000000000..cdb7f04264 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/bitseq/store.go @@ -0,0 +1,142 @@ +package bitseq + +import ( + "encoding/json" + "fmt" + + "github.com/docker/libnetwork/datastore" + "github.com/docker/libnetwork/types" +) + +// Key provides the Key to be used in KV Store +func (h *Handle) Key() []string { + h.Lock() + defer h.Unlock() + return []string{h.app, h.id} +} + +// KeyPrefix returns the immediate parent key that can be used for tree walk +func (h *Handle) KeyPrefix() []string { + h.Lock() + defer h.Unlock() + return []string{h.app} +} + +// Value marshals the data to be stored in the KV store +func (h *Handle) Value() []byte { + b, err := json.Marshal(h) + if err != nil { + return nil + } + return b +} + +// SetValue unmarshals the data from the KV store +func (h *Handle) SetValue(value []byte) error { + return json.Unmarshal(value, h) +} + +// Index returns the latest DB Index as seen by this object +func (h *Handle) Index() uint64 { + h.Lock() + defer h.Unlock() + return h.dbIndex +} + +// SetIndex method allows the datastore to store the latest DB Index into this object +func (h *Handle) SetIndex(index uint64) { + h.Lock() + h.dbIndex = index + h.dbExists = true + h.Unlock() +} + +// Exists method is true if this object has been stored in the DB. +func (h *Handle) Exists() bool { + h.Lock() + defer h.Unlock() + return h.dbExists +} + +// New method returns a handle based on the receiver handle +func (h *Handle) New() datastore.KVObject { + h.Lock() + defer h.Unlock() + + return &Handle{ + app: h.app, + store: h.store, + } +} + +// CopyTo deep copies the handle into the passed destination object +func (h *Handle) CopyTo(o datastore.KVObject) error { + h.Lock() + defer h.Unlock() + + dstH := o.(*Handle) + if h == dstH { + return nil + } + dstH.Lock() + dstH.bits = h.bits + dstH.unselected = h.unselected + dstH.head = h.head.getCopy() + dstH.app = h.app + dstH.id = h.id + dstH.dbIndex = h.dbIndex + dstH.dbExists = h.dbExists + dstH.store = h.store + dstH.curr = h.curr + dstH.Unlock() + + return nil +} + +// Skip provides a way for a KV Object to avoid persisting it in the KV Store +func (h *Handle) Skip() bool { + return false +} + +// DataScope method returns the storage scope of the datastore +func (h *Handle) DataScope() string { + h.Lock() + defer h.Unlock() + + return h.store.Scope() +} + +func (h *Handle) fromDsValue(value []byte) error { + var ba []byte + if err := json.Unmarshal(value, &ba); err != nil { + return fmt.Errorf("failed to decode json: %s", err.Error()) + } + if err := h.FromByteArray(ba); err != nil { + return fmt.Errorf("failed to decode handle: %s", err.Error()) + } + return nil +} + +func (h *Handle) writeToStore() error { + h.Lock() + store := h.store + h.Unlock() + if store == nil { + return nil + } + err := store.PutObjectAtomic(h) + if err == datastore.ErrKeyModified { + return types.RetryErrorf("failed to perform atomic write (%v). Retry might fix the error", err) + } + return err +} + +func (h *Handle) deleteFromStore() error { + h.Lock() + store := h.store + h.Unlock() + if store == nil { + return nil + } + return store.DeleteObjectAtomic(h) +} diff --git a/vendor/github.com/containers/storage/pkg/mflag/LICENSE b/vendor/github.com/docker/libnetwork/client/mflag/LICENSE similarity index 100% rename from vendor/github.com/containers/storage/pkg/mflag/LICENSE rename to vendor/github.com/docker/libnetwork/client/mflag/LICENSE diff --git a/vendor/github.com/docker/libnetwork/cluster/provider.go b/vendor/github.com/docker/libnetwork/cluster/provider.go new file mode 100644 index 0000000000..0259eb7005 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/cluster/provider.go @@ -0,0 +1,37 @@ +package cluster + +import ( + "context" + + "github.com/docker/docker/api/types/network" +) + +const ( + // EventSocketChange control socket changed + EventSocketChange = iota + // EventNodeReady cluster node in ready state + EventNodeReady + // EventNodeLeave node is leaving the cluster + EventNodeLeave + // EventNetworkKeysAvailable network keys correctly configured in the networking layer + EventNetworkKeysAvailable +) + +// ConfigEventType type of the event produced by the cluster +type ConfigEventType uint8 + +// Provider provides clustering config details +type Provider interface { + IsManager() bool + IsAgent() bool + GetLocalAddress() string + GetListenAddress() string + GetAdvertiseAddress() string + GetDataPathAddress() string + GetRemoteAddressList() []string + ListenClusterEvents() <-chan ConfigEventType + AttachNetwork(string, string, []string) (*network.NetworkingConfig, error) + DetachNetwork(string, string) error + UpdateAttachment(string, string, *network.NetworkingConfig) error + WaitForDetachment(context.Context, string, string, string, string) error +} diff --git a/vendor/github.com/docker/libnetwork/common/caller.go b/vendor/github.com/docker/libnetwork/common/caller.go new file mode 100644 index 0000000000..0dec3bc0bc --- /dev/null +++ b/vendor/github.com/docker/libnetwork/common/caller.go @@ -0,0 +1,29 @@ +package common + +import ( + "runtime" + "strings" +) + +func callerInfo(i int) string { + ptr, _, _, ok := runtime.Caller(i) + fName := "unknown" + if ok { + f := runtime.FuncForPC(ptr) + if f != nil { + // f.Name() is like: github.com/docker/libnetwork/common.MethodName + tmp := strings.Split(f.Name(), ".") + if len(tmp) > 0 { + fName = tmp[len(tmp)-1] + } + } + } + + return fName +} + +// CallerName returns the name of the function at the specified level +// level == 0 means current method name +func CallerName(level int) string { + return callerInfo(2 + level) +} diff --git a/vendor/github.com/docker/libnetwork/common/setmatrix.go b/vendor/github.com/docker/libnetwork/common/setmatrix.go new file mode 100644 index 0000000000..72be5bbbfc --- /dev/null +++ b/vendor/github.com/docker/libnetwork/common/setmatrix.go @@ -0,0 +1,135 @@ +package common + +import ( + "sync" + + mapset "github.com/deckarep/golang-set" +) + +// SetMatrix is a map of Sets +type SetMatrix interface { + // Get returns the members of the set for a specific key as a slice. + Get(key string) ([]interface{}, bool) + // Contains is used to verify if an element is in a set for a specific key + // returns true if the element is in the set + // returns true if there is a set for the key + Contains(key string, value interface{}) (bool, bool) + // Insert inserts the value in the set of a key + // returns true if the value is inserted (was not already in the set), false otherwise + // returns also the length of the set for the key + Insert(key string, value interface{}) (bool, int) + // Remove removes the value in the set for a specific key + // returns true if the value is deleted, false otherwise + // returns also the length of the set for the key + Remove(key string, value interface{}) (bool, int) + // Cardinality returns the number of elements in the set for a key + // returns false if the set is not present + Cardinality(key string) (int, bool) + // String returns the string version of the set, empty otherwise + // returns false if the set is not present + String(key string) (string, bool) + // Returns all the keys in the map + Keys() []string +} + +type setMatrix struct { + matrix map[string]mapset.Set + + sync.Mutex +} + +// NewSetMatrix creates a new set matrix object +func NewSetMatrix() SetMatrix { + s := &setMatrix{} + s.init() + return s +} + +func (s *setMatrix) init() { + s.matrix = make(map[string]mapset.Set) +} + +func (s *setMatrix) Get(key string) ([]interface{}, bool) { + s.Lock() + defer s.Unlock() + set, ok := s.matrix[key] + if !ok { + return nil, ok + } + return set.ToSlice(), ok +} + +func (s *setMatrix) Contains(key string, value interface{}) (bool, bool) { + s.Lock() + defer s.Unlock() + set, ok := s.matrix[key] + if !ok { + return false, ok + } + return set.Contains(value), ok +} + +func (s *setMatrix) Insert(key string, value interface{}) (bool, int) { + s.Lock() + defer s.Unlock() + set, ok := s.matrix[key] + if !ok { + s.matrix[key] = mapset.NewSet() + s.matrix[key].Add(value) + return true, 1 + } + + return set.Add(value), set.Cardinality() +} + +func (s *setMatrix) Remove(key string, value interface{}) (bool, int) { + s.Lock() + defer s.Unlock() + set, ok := s.matrix[key] + if !ok { + return false, 0 + } + + var removed bool + if set.Contains(value) { + set.Remove(value) + removed = true + // If the set is empty remove it from the matrix + if set.Cardinality() == 0 { + delete(s.matrix, key) + } + } + + return removed, set.Cardinality() +} + +func (s *setMatrix) Cardinality(key string) (int, bool) { + s.Lock() + defer s.Unlock() + set, ok := s.matrix[key] + if !ok { + return 0, ok + } + + return set.Cardinality(), ok +} + +func (s *setMatrix) String(key string) (string, bool) { + s.Lock() + defer s.Unlock() + set, ok := s.matrix[key] + if !ok { + return "", ok + } + return set.String(), ok +} + +func (s *setMatrix) Keys() []string { + s.Lock() + defer s.Unlock() + keys := make([]string, 0, len(s.matrix)) + for k := range s.matrix { + keys = append(keys, k) + } + return keys +} diff --git a/vendor/github.com/docker/libnetwork/config/config.go b/vendor/github.com/docker/libnetwork/config/config.go new file mode 100644 index 0000000000..4ba85a9c61 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/config/config.go @@ -0,0 +1,307 @@ +package config + +import ( + "strings" + + "github.com/BurntSushi/toml" + "github.com/docker/docker/pkg/discovery" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/go-connections/tlsconfig" + "github.com/docker/libkv/store" + "github.com/docker/libnetwork/cluster" + "github.com/docker/libnetwork/datastore" + "github.com/docker/libnetwork/ipamutils" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/osl" + "github.com/sirupsen/logrus" +) + +const ( + warningThNetworkControlPlaneMTU = 1500 + minimumNetworkControlPlaneMTU = 500 +) + +// Config encapsulates configurations of various Libnetwork components +type Config struct { + Daemon DaemonCfg + Cluster ClusterCfg + Scopes map[string]*datastore.ScopeCfg + ActiveSandboxes map[string]interface{} + PluginGetter plugingetter.PluginGetter +} + +// DaemonCfg represents libnetwork core configuration +type DaemonCfg struct { + Debug bool + Experimental bool + DataDir string + DefaultNetwork string + DefaultDriver string + Labels []string + DriverCfg map[string]interface{} + ClusterProvider cluster.Provider + NetworkControlPlaneMTU int + DefaultAddressPool []*ipamutils.NetworkToSplit +} + +// ClusterCfg represents cluster configuration +type ClusterCfg struct { + Watcher discovery.Watcher + Address string + Discovery string + Heartbeat uint64 +} + +// LoadDefaultScopes loads default scope configs for scopes which +// doesn't have explicit user specified configs. +func (c *Config) LoadDefaultScopes(dataDir string) { + for k, v := range datastore.DefaultScopes(dataDir) { + if _, ok := c.Scopes[k]; !ok { + c.Scopes[k] = v + } + } +} + +// ParseConfig parses the libnetwork configuration file +func ParseConfig(tomlCfgFile string) (*Config, error) { + cfg := &Config{ + Scopes: map[string]*datastore.ScopeCfg{}, + } + + if _, err := toml.DecodeFile(tomlCfgFile, cfg); err != nil { + return nil, err + } + + cfg.LoadDefaultScopes(cfg.Daemon.DataDir) + return cfg, nil +} + +// ParseConfigOptions parses the configuration options and returns +// a reference to the corresponding Config structure +func ParseConfigOptions(cfgOptions ...Option) *Config { + cfg := &Config{ + Daemon: DaemonCfg{ + DriverCfg: make(map[string]interface{}), + }, + Scopes: make(map[string]*datastore.ScopeCfg), + } + + cfg.ProcessOptions(cfgOptions...) + cfg.LoadDefaultScopes(cfg.Daemon.DataDir) + + return cfg +} + +// Option is an option setter function type used to pass various configurations +// to the controller +type Option func(c *Config) + +// OptionDefaultNetwork function returns an option setter for a default network +func OptionDefaultNetwork(dn string) Option { + return func(c *Config) { + logrus.Debugf("Option DefaultNetwork: %s", dn) + c.Daemon.DefaultNetwork = strings.TrimSpace(dn) + } +} + +// OptionDefaultDriver function returns an option setter for default driver +func OptionDefaultDriver(dd string) Option { + return func(c *Config) { + logrus.Debugf("Option DefaultDriver: %s", dd) + c.Daemon.DefaultDriver = strings.TrimSpace(dd) + } +} + +// OptionDefaultAddressPoolConfig function returns an option setter for default address pool +func OptionDefaultAddressPoolConfig(addressPool []*ipamutils.NetworkToSplit) Option { + return func(c *Config) { + c.Daemon.DefaultAddressPool = addressPool + } +} + +// OptionDriverConfig returns an option setter for driver configuration. +func OptionDriverConfig(networkType string, config map[string]interface{}) Option { + return func(c *Config) { + c.Daemon.DriverCfg[networkType] = config + } +} + +// OptionLabels function returns an option setter for labels +func OptionLabels(labels []string) Option { + return func(c *Config) { + for _, label := range labels { + if strings.HasPrefix(label, netlabel.Prefix) { + c.Daemon.Labels = append(c.Daemon.Labels, label) + } + } + } +} + +// OptionKVProvider function returns an option setter for kvstore provider +func OptionKVProvider(provider string) Option { + return func(c *Config) { + logrus.Debugf("Option OptionKVProvider: %s", provider) + if _, ok := c.Scopes[datastore.GlobalScope]; !ok { + c.Scopes[datastore.GlobalScope] = &datastore.ScopeCfg{} + } + c.Scopes[datastore.GlobalScope].Client.Provider = strings.TrimSpace(provider) + } +} + +// OptionKVProviderURL function returns an option setter for kvstore url +func OptionKVProviderURL(url string) Option { + return func(c *Config) { + logrus.Debugf("Option OptionKVProviderURL: %s", url) + if _, ok := c.Scopes[datastore.GlobalScope]; !ok { + c.Scopes[datastore.GlobalScope] = &datastore.ScopeCfg{} + } + c.Scopes[datastore.GlobalScope].Client.Address = strings.TrimSpace(url) + } +} + +// OptionKVOpts function returns an option setter for kvstore options +func OptionKVOpts(opts map[string]string) Option { + return func(c *Config) { + if opts["kv.cacertfile"] != "" && opts["kv.certfile"] != "" && opts["kv.keyfile"] != "" { + logrus.Info("Option Initializing KV with TLS") + tlsConfig, err := tlsconfig.Client(tlsconfig.Options{ + CAFile: opts["kv.cacertfile"], + CertFile: opts["kv.certfile"], + KeyFile: opts["kv.keyfile"], + }) + if err != nil { + logrus.Errorf("Unable to set up TLS: %s", err) + return + } + if _, ok := c.Scopes[datastore.GlobalScope]; !ok { + c.Scopes[datastore.GlobalScope] = &datastore.ScopeCfg{} + } + if c.Scopes[datastore.GlobalScope].Client.Config == nil { + c.Scopes[datastore.GlobalScope].Client.Config = &store.Config{TLS: tlsConfig} + } else { + c.Scopes[datastore.GlobalScope].Client.Config.TLS = tlsConfig + } + // Workaround libkv/etcd bug for https + c.Scopes[datastore.GlobalScope].Client.Config.ClientTLS = &store.ClientTLSConfig{ + CACertFile: opts["kv.cacertfile"], + CertFile: opts["kv.certfile"], + KeyFile: opts["kv.keyfile"], + } + } else { + logrus.Info("Option Initializing KV without TLS") + } + } +} + +// OptionDiscoveryWatcher function returns an option setter for discovery watcher +func OptionDiscoveryWatcher(watcher discovery.Watcher) Option { + return func(c *Config) { + c.Cluster.Watcher = watcher + } +} + +// OptionDiscoveryAddress function returns an option setter for self discovery address +func OptionDiscoveryAddress(address string) Option { + return func(c *Config) { + c.Cluster.Address = address + } +} + +// OptionDataDir function returns an option setter for data folder +func OptionDataDir(dataDir string) Option { + return func(c *Config) { + c.Daemon.DataDir = dataDir + } +} + +// OptionExecRoot function returns an option setter for exec root folder +func OptionExecRoot(execRoot string) Option { + return func(c *Config) { + osl.SetBasePath(execRoot) + } +} + +// OptionPluginGetter returns a plugingetter for remote drivers. +func OptionPluginGetter(pg plugingetter.PluginGetter) Option { + return func(c *Config) { + c.PluginGetter = pg + } +} + +// OptionExperimental function returns an option setter for experimental daemon +func OptionExperimental(exp bool) Option { + return func(c *Config) { + logrus.Debugf("Option Experimental: %v", exp) + c.Daemon.Experimental = exp + } +} + +// OptionNetworkControlPlaneMTU function returns an option setter for control plane MTU +func OptionNetworkControlPlaneMTU(exp int) Option { + return func(c *Config) { + logrus.Debugf("Network Control Plane MTU: %d", exp) + if exp < warningThNetworkControlPlaneMTU { + logrus.Warnf("Received a MTU of %d, this value is very low, the network control plane can misbehave,"+ + " defaulting to minimum value (%d)", exp, minimumNetworkControlPlaneMTU) + if exp < minimumNetworkControlPlaneMTU { + exp = minimumNetworkControlPlaneMTU + } + } + c.Daemon.NetworkControlPlaneMTU = exp + } +} + +// ProcessOptions processes options and stores it in config +func (c *Config) ProcessOptions(options ...Option) { + for _, opt := range options { + if opt != nil { + opt(c) + } + } +} + +// IsValidName validates configuration objects supported by libnetwork +func IsValidName(name string) bool { + return strings.TrimSpace(name) != "" +} + +// OptionLocalKVProvider function returns an option setter for kvstore provider +func OptionLocalKVProvider(provider string) Option { + return func(c *Config) { + logrus.Debugf("Option OptionLocalKVProvider: %s", provider) + if _, ok := c.Scopes[datastore.LocalScope]; !ok { + c.Scopes[datastore.LocalScope] = &datastore.ScopeCfg{} + } + c.Scopes[datastore.LocalScope].Client.Provider = strings.TrimSpace(provider) + } +} + +// OptionLocalKVProviderURL function returns an option setter for kvstore url +func OptionLocalKVProviderURL(url string) Option { + return func(c *Config) { + logrus.Debugf("Option OptionLocalKVProviderURL: %s", url) + if _, ok := c.Scopes[datastore.LocalScope]; !ok { + c.Scopes[datastore.LocalScope] = &datastore.ScopeCfg{} + } + c.Scopes[datastore.LocalScope].Client.Address = strings.TrimSpace(url) + } +} + +// OptionLocalKVProviderConfig function returns an option setter for kvstore config +func OptionLocalKVProviderConfig(config *store.Config) Option { + return func(c *Config) { + logrus.Debugf("Option OptionLocalKVProviderConfig: %v", config) + if _, ok := c.Scopes[datastore.LocalScope]; !ok { + c.Scopes[datastore.LocalScope] = &datastore.ScopeCfg{} + } + c.Scopes[datastore.LocalScope].Client.Config = config + } +} + +// OptionActiveSandboxes function returns an option setter for passing the sandboxes +// which were active during previous daemon life +func OptionActiveSandboxes(sandboxes map[string]interface{}) Option { + return func(c *Config) { + c.ActiveSandboxes = sandboxes + } +} diff --git a/vendor/github.com/docker/libnetwork/controller.go b/vendor/github.com/docker/libnetwork/controller.go new file mode 100644 index 0000000000..225f7fa609 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/controller.go @@ -0,0 +1,1333 @@ +/* +Package libnetwork provides the basic functionality and extension points to +create network namespaces and allocate interfaces for containers to use. + + networkType := "bridge" + + // Create a new controller instance + driverOptions := options.Generic{} + genericOption := make(map[string]interface{}) + genericOption[netlabel.GenericData] = driverOptions + controller, err := libnetwork.New(config.OptionDriverConfig(networkType, genericOption)) + if err != nil { + return + } + + // Create a network for containers to join. + // NewNetwork accepts Variadic optional arguments that libnetwork and Drivers can make use of + network, err := controller.NewNetwork(networkType, "network1", "") + if err != nil { + return + } + + // For each new container: allocate IP and interfaces. The returned network + // settings will be used for container infos (inspect and such), as well as + // iptables rules for port publishing. This info is contained or accessible + // from the returned endpoint. + ep, err := network.CreateEndpoint("Endpoint1") + if err != nil { + return + } + + // Create the sandbox for the container. + // NewSandbox accepts Variadic optional arguments which libnetwork can use. + sbx, err := controller.NewSandbox("container1", + libnetwork.OptionHostname("test"), + libnetwork.OptionDomainname("docker.io")) + + // A sandbox can join the endpoint via the join api. + err = ep.Join(sbx) + if err != nil { + return + } +*/ +package libnetwork + +import ( + "container/heap" + "fmt" + "net" + "path/filepath" + "runtime" + "strings" + "sync" + "time" + + "github.com/docker/docker/pkg/discovery" + "github.com/docker/docker/pkg/locker" + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/docker/pkg/plugins" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/libnetwork/cluster" + "github.com/docker/libnetwork/config" + "github.com/docker/libnetwork/datastore" + "github.com/docker/libnetwork/diagnostic" + "github.com/docker/libnetwork/discoverapi" + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/drvregistry" + "github.com/docker/libnetwork/hostdiscovery" + "github.com/docker/libnetwork/ipamapi" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/osl" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" +) + +// NetworkController provides the interface for controller instance which manages +// networks. +type NetworkController interface { + // ID provides a unique identity for the controller + ID() string + + // BuiltinDrivers returns list of builtin drivers + BuiltinDrivers() []string + + // BuiltinIPAMDrivers returns list of builtin ipam drivers + BuiltinIPAMDrivers() []string + + // Config method returns the bootup configuration for the controller + Config() config.Config + + // Create a new network. The options parameter carries network specific options. + NewNetwork(networkType, name string, id string, options ...NetworkOption) (Network, error) + + // Networks returns the list of Network(s) managed by this controller. + Networks() []Network + + // WalkNetworks uses the provided function to walk the Network(s) managed by this controller. + WalkNetworks(walker NetworkWalker) + + // NetworkByName returns the Network which has the passed name. If not found, the error ErrNoSuchNetwork is returned. + NetworkByName(name string) (Network, error) + + // NetworkByID returns the Network which has the passed id. If not found, the error ErrNoSuchNetwork is returned. + NetworkByID(id string) (Network, error) + + // NewSandbox creates a new network sandbox for the passed container id + NewSandbox(containerID string, options ...SandboxOption) (Sandbox, error) + + // Sandboxes returns the list of Sandbox(s) managed by this controller. + Sandboxes() []Sandbox + + // WalkSandboxes uses the provided function to walk the Sandbox(s) managed by this controller. + WalkSandboxes(walker SandboxWalker) + + // SandboxByID returns the Sandbox which has the passed id. If not found, a types.NotFoundError is returned. + SandboxByID(id string) (Sandbox, error) + + // SandboxDestroy destroys a sandbox given a container ID + SandboxDestroy(id string) error + + // Stop network controller + Stop() + + // ReloadCondfiguration updates the controller configuration + ReloadConfiguration(cfgOptions ...config.Option) error + + // SetClusterProvider sets cluster provider + SetClusterProvider(provider cluster.Provider) + + // Wait for agent initialization complete in libnetwork controller + AgentInitWait() + + // Wait for agent to stop if running + AgentStopWait() + + // SetKeys configures the encryption key for gossip and overlay data path + SetKeys(keys []*types.EncryptionKey) error + + // StartDiagnostic start the network diagnostic mode + StartDiagnostic(port int) + // StopDiagnostic start the network diagnostic mode + StopDiagnostic() + // IsDiagnosticEnabled returns true if the diagnostic is enabled + IsDiagnosticEnabled() bool +} + +// NetworkWalker is a client provided function which will be used to walk the Networks. +// When the function returns true, the walk will stop. +type NetworkWalker func(nw Network) bool + +// SandboxWalker is a client provided function which will be used to walk the Sandboxes. +// When the function returns true, the walk will stop. +type SandboxWalker func(sb Sandbox) bool + +type sandboxTable map[string]*sandbox + +type controller struct { + id string + drvRegistry *drvregistry.DrvRegistry + sandboxes sandboxTable + cfg *config.Config + stores []datastore.DataStore + discovery hostdiscovery.HostDiscovery + extKeyListener net.Listener + watchCh chan *endpoint + unWatchCh chan *endpoint + svcRecords map[string]svcInfo + nmap map[string]*netWatch + serviceBindings map[serviceKey]*service + defOsSbox osl.Sandbox + ingressSandbox *sandbox + sboxOnce sync.Once + agent *agent + networkLocker *locker.Locker + agentInitDone chan struct{} + agentStopDone chan struct{} + keys []*types.EncryptionKey + clusterConfigAvailable bool + DiagnosticServer *diagnostic.Server + sync.Mutex +} + +type initializer struct { + fn drvregistry.InitFunc + ntype string +} + +// New creates a new instance of network controller. +func New(cfgOptions ...config.Option) (NetworkController, error) { + c := &controller{ + id: stringid.GenerateRandomID(), + cfg: config.ParseConfigOptions(cfgOptions...), + sandboxes: sandboxTable{}, + svcRecords: make(map[string]svcInfo), + serviceBindings: make(map[serviceKey]*service), + agentInitDone: make(chan struct{}), + networkLocker: locker.New(), + DiagnosticServer: diagnostic.New(), + } + c.DiagnosticServer.Init() + + if err := c.initStores(); err != nil { + return nil, err + } + + drvRegistry, err := drvregistry.New(c.getStore(datastore.LocalScope), c.getStore(datastore.GlobalScope), c.RegisterDriver, nil, c.cfg.PluginGetter) + if err != nil { + return nil, err + } + + for _, i := range getInitializers(c.cfg.Daemon.Experimental) { + var dcfg map[string]interface{} + + // External plugins don't need config passed through daemon. They can + // bootstrap themselves + if i.ntype != "remote" { + dcfg = c.makeDriverConfig(i.ntype) + } + + if err := drvRegistry.AddDriver(i.ntype, i.fn, dcfg); err != nil { + return nil, err + } + } + + if err = initIPAMDrivers(drvRegistry, nil, c.getStore(datastore.GlobalScope), c.cfg.Daemon.DefaultAddressPool); err != nil { + return nil, err + } + + c.drvRegistry = drvRegistry + + if c.cfg != nil && c.cfg.Cluster.Watcher != nil { + if err := c.initDiscovery(c.cfg.Cluster.Watcher); err != nil { + // Failing to initialize discovery is a bad situation to be in. + // But it cannot fail creating the Controller + logrus.Errorf("Failed to Initialize Discovery : %v", err) + } + } + + c.WalkNetworks(populateSpecial) + + // Reserve pools first before doing cleanup. Otherwise the + // cleanups of endpoint/network and sandbox below will + // generate many unnecessary warnings + c.reservePools() + + // Cleanup resources + c.sandboxCleanup(c.cfg.ActiveSandboxes) + c.cleanupLocalEndpoints() + c.networkCleanup() + + if err := c.startExternalKeyListener(); err != nil { + return nil, err + } + + return c, nil +} + +func (c *controller) SetClusterProvider(provider cluster.Provider) { + var sameProvider bool + c.Lock() + // Avoids to spawn multiple goroutine for the same cluster provider + if c.cfg.Daemon.ClusterProvider == provider { + // If the cluster provider is already set, there is already a go routine spawned + // that is listening for events, so nothing to do here + sameProvider = true + } else { + c.cfg.Daemon.ClusterProvider = provider + } + c.Unlock() + + if provider == nil || sameProvider { + return + } + // We don't want to spawn a new go routine if the previous one did not exit yet + c.AgentStopWait() + go c.clusterAgentInit() +} + +func isValidClusteringIP(addr string) bool { + return addr != "" && !net.ParseIP(addr).IsLoopback() && !net.ParseIP(addr).IsUnspecified() +} + +// libnetwork side of agent depends on the keys. On the first receipt of +// keys setup the agent. For subsequent key set handle the key change +func (c *controller) SetKeys(keys []*types.EncryptionKey) error { + subsysKeys := make(map[string]int) + for _, key := range keys { + if key.Subsystem != subsysGossip && + key.Subsystem != subsysIPSec { + return fmt.Errorf("key received for unrecognized subsystem") + } + subsysKeys[key.Subsystem]++ + } + for s, count := range subsysKeys { + if count != keyringSize { + return fmt.Errorf("incorrect number of keys for subsystem %v", s) + } + } + + agent := c.getAgent() + + if agent == nil { + c.Lock() + c.keys = keys + c.Unlock() + return nil + } + return c.handleKeyChange(keys) +} + +func (c *controller) getAgent() *agent { + c.Lock() + defer c.Unlock() + return c.agent +} + +func (c *controller) clusterAgentInit() { + clusterProvider := c.cfg.Daemon.ClusterProvider + var keysAvailable bool + for { + eventType := <-clusterProvider.ListenClusterEvents() + // The events: EventSocketChange, EventNodeReady and EventNetworkKeysAvailable are not ordered + // when all the condition for the agent initialization are met then proceed with it + switch eventType { + case cluster.EventNetworkKeysAvailable: + // Validates that the keys are actually available before starting the initialization + // This will handle old spurious messages left on the channel + c.Lock() + keysAvailable = c.keys != nil + c.Unlock() + fallthrough + case cluster.EventSocketChange, cluster.EventNodeReady: + if keysAvailable && !c.isDistributedControl() { + c.agentOperationStart() + if err := c.agentSetup(clusterProvider); err != nil { + c.agentStopComplete() + } else { + c.agentInitComplete() + } + } + case cluster.EventNodeLeave: + keysAvailable = false + c.agentOperationStart() + c.Lock() + c.keys = nil + c.Unlock() + + // We are leaving the cluster. Make sure we + // close the gossip so that we stop all + // incoming gossip updates before cleaning up + // any remaining service bindings. But before + // deleting the networks since the networks + // should still be present when cleaning up + // service bindings + c.agentClose() + c.cleanupServiceDiscovery("") + c.cleanupServiceBindings("") + + c.agentStopComplete() + + return + } + } +} + +// AgentInitWait waits for agent initialization to be completed in the controller. +func (c *controller) AgentInitWait() { + c.Lock() + agentInitDone := c.agentInitDone + c.Unlock() + + if agentInitDone != nil { + <-agentInitDone + } +} + +// AgentStopWait waits for the Agent stop to be completed in the controller +func (c *controller) AgentStopWait() { + c.Lock() + agentStopDone := c.agentStopDone + c.Unlock() + if agentStopDone != nil { + <-agentStopDone + } +} + +// agentOperationStart marks the start of an Agent Init or Agent Stop +func (c *controller) agentOperationStart() { + c.Lock() + if c.agentInitDone == nil { + c.agentInitDone = make(chan struct{}) + } + if c.agentStopDone == nil { + c.agentStopDone = make(chan struct{}) + } + c.Unlock() +} + +// agentInitComplete notifies the successful completion of the Agent initialization +func (c *controller) agentInitComplete() { + c.Lock() + if c.agentInitDone != nil { + close(c.agentInitDone) + c.agentInitDone = nil + } + c.Unlock() +} + +// agentStopComplete notifies the successful completion of the Agent stop +func (c *controller) agentStopComplete() { + c.Lock() + if c.agentStopDone != nil { + close(c.agentStopDone) + c.agentStopDone = nil + } + c.Unlock() +} + +func (c *controller) makeDriverConfig(ntype string) map[string]interface{} { + if c.cfg == nil { + return nil + } + + config := make(map[string]interface{}) + + for _, label := range c.cfg.Daemon.Labels { + if !strings.HasPrefix(netlabel.Key(label), netlabel.DriverPrefix+"."+ntype) { + continue + } + + config[netlabel.Key(label)] = netlabel.Value(label) + } + + drvCfg, ok := c.cfg.Daemon.DriverCfg[ntype] + if ok { + for k, v := range drvCfg.(map[string]interface{}) { + config[k] = v + } + } + + for k, v := range c.cfg.Scopes { + if !v.IsValid() { + continue + } + config[netlabel.MakeKVClient(k)] = discoverapi.DatastoreConfigData{ + Scope: k, + Provider: v.Client.Provider, + Address: v.Client.Address, + Config: v.Client.Config, + } + } + + return config +} + +var procReloadConfig = make(chan (bool), 1) + +func (c *controller) ReloadConfiguration(cfgOptions ...config.Option) error { + procReloadConfig <- true + defer func() { <-procReloadConfig }() + + // For now we accept the configuration reload only as a mean to provide a global store config after boot. + // Refuse the configuration if it alters an existing datastore client configuration. + update := false + cfg := config.ParseConfigOptions(cfgOptions...) + + for s := range c.cfg.Scopes { + if _, ok := cfg.Scopes[s]; !ok { + return types.ForbiddenErrorf("cannot accept new configuration because it removes an existing datastore client") + } + } + for s, nSCfg := range cfg.Scopes { + if eSCfg, ok := c.cfg.Scopes[s]; ok { + if eSCfg.Client.Provider != nSCfg.Client.Provider || + eSCfg.Client.Address != nSCfg.Client.Address { + return types.ForbiddenErrorf("cannot accept new configuration because it modifies an existing datastore client") + } + } else { + if err := c.initScopedStore(s, nSCfg); err != nil { + return err + } + update = true + } + } + if !update { + return nil + } + + c.Lock() + c.cfg = cfg + c.Unlock() + + var dsConfig *discoverapi.DatastoreConfigData + for scope, sCfg := range cfg.Scopes { + if scope == datastore.LocalScope || !sCfg.IsValid() { + continue + } + dsConfig = &discoverapi.DatastoreConfigData{ + Scope: scope, + Provider: sCfg.Client.Provider, + Address: sCfg.Client.Address, + Config: sCfg.Client.Config, + } + break + } + if dsConfig == nil { + return nil + } + + c.drvRegistry.WalkIPAMs(func(name string, driver ipamapi.Ipam, cap *ipamapi.Capability) bool { + err := driver.DiscoverNew(discoverapi.DatastoreConfig, *dsConfig) + if err != nil { + logrus.Errorf("Failed to set datastore in driver %s: %v", name, err) + } + return false + }) + + c.drvRegistry.WalkDrivers(func(name string, driver driverapi.Driver, capability driverapi.Capability) bool { + err := driver.DiscoverNew(discoverapi.DatastoreConfig, *dsConfig) + if err != nil { + logrus.Errorf("Failed to set datastore in driver %s: %v", name, err) + } + return false + }) + + if c.discovery == nil && c.cfg.Cluster.Watcher != nil { + if err := c.initDiscovery(c.cfg.Cluster.Watcher); err != nil { + logrus.Errorf("Failed to Initialize Discovery after configuration update: %v", err) + } + } + + return nil +} + +func (c *controller) ID() string { + return c.id +} + +func (c *controller) BuiltinDrivers() []string { + drivers := []string{} + c.drvRegistry.WalkDrivers(func(name string, driver driverapi.Driver, capability driverapi.Capability) bool { + if driver.IsBuiltIn() { + drivers = append(drivers, name) + } + return false + }) + return drivers +} + +func (c *controller) BuiltinIPAMDrivers() []string { + drivers := []string{} + c.drvRegistry.WalkIPAMs(func(name string, driver ipamapi.Ipam, cap *ipamapi.Capability) bool { + if driver.IsBuiltIn() { + drivers = append(drivers, name) + } + return false + }) + return drivers +} + +func (c *controller) validateHostDiscoveryConfig() bool { + if c.cfg == nil || c.cfg.Cluster.Discovery == "" || c.cfg.Cluster.Address == "" { + return false + } + return true +} + +func (c *controller) clusterHostID() string { + c.Lock() + defer c.Unlock() + if c.cfg == nil || c.cfg.Cluster.Address == "" { + return "" + } + addr := strings.Split(c.cfg.Cluster.Address, ":") + return addr[0] +} + +func (c *controller) isNodeAlive(node string) bool { + if c.discovery == nil { + return false + } + + nodes := c.discovery.Fetch() + for _, n := range nodes { + if n.String() == node { + return true + } + } + + return false +} + +func (c *controller) initDiscovery(watcher discovery.Watcher) error { + if c.cfg == nil { + return fmt.Errorf("discovery initialization requires a valid configuration") + } + + c.discovery = hostdiscovery.NewHostDiscovery(watcher) + return c.discovery.Watch(c.activeCallback, c.hostJoinCallback, c.hostLeaveCallback) +} + +func (c *controller) activeCallback() { + ds := c.getStore(datastore.GlobalScope) + if ds != nil && !ds.Active() { + ds.RestartWatch() + } +} + +func (c *controller) hostJoinCallback(nodes []net.IP) { + c.processNodeDiscovery(nodes, true) +} + +func (c *controller) hostLeaveCallback(nodes []net.IP) { + c.processNodeDiscovery(nodes, false) +} + +func (c *controller) processNodeDiscovery(nodes []net.IP, add bool) { + c.drvRegistry.WalkDrivers(func(name string, driver driverapi.Driver, capability driverapi.Capability) bool { + c.pushNodeDiscovery(driver, capability, nodes, add) + return false + }) +} + +func (c *controller) pushNodeDiscovery(d driverapi.Driver, cap driverapi.Capability, nodes []net.IP, add bool) { + var self net.IP + if c.cfg != nil { + addr := strings.Split(c.cfg.Cluster.Address, ":") + self = net.ParseIP(addr[0]) + // if external kvstore is not configured, try swarm-mode config + if self == nil { + if agent := c.getAgent(); agent != nil { + self = net.ParseIP(agent.advertiseAddr) + } + } + } + + if d == nil || cap.ConnectivityScope != datastore.GlobalScope || nodes == nil { + return + } + + for _, node := range nodes { + nodeData := discoverapi.NodeDiscoveryData{Address: node.String(), Self: node.Equal(self)} + var err error + if add { + err = d.DiscoverNew(discoverapi.NodeDiscovery, nodeData) + } else { + err = d.DiscoverDelete(discoverapi.NodeDiscovery, nodeData) + } + if err != nil { + logrus.Debugf("discovery notification error: %v", err) + } + } +} + +func (c *controller) Config() config.Config { + c.Lock() + defer c.Unlock() + if c.cfg == nil { + return config.Config{} + } + return *c.cfg +} + +func (c *controller) isManager() bool { + c.Lock() + defer c.Unlock() + if c.cfg == nil || c.cfg.Daemon.ClusterProvider == nil { + return false + } + return c.cfg.Daemon.ClusterProvider.IsManager() +} + +func (c *controller) isAgent() bool { + c.Lock() + defer c.Unlock() + if c.cfg == nil || c.cfg.Daemon.ClusterProvider == nil { + return false + } + return c.cfg.Daemon.ClusterProvider.IsAgent() +} + +func (c *controller) isDistributedControl() bool { + return !c.isManager() && !c.isAgent() +} + +func (c *controller) GetPluginGetter() plugingetter.PluginGetter { + return c.drvRegistry.GetPluginGetter() +} + +func (c *controller) RegisterDriver(networkType string, driver driverapi.Driver, capability driverapi.Capability) error { + c.Lock() + hd := c.discovery + c.Unlock() + + if hd != nil { + c.pushNodeDiscovery(driver, capability, hd.Fetch(), true) + } + + c.agentDriverNotify(driver) + return nil +} + +// NewNetwork creates a new network of the specified network type. The options +// are network specific and modeled in a generic way. +func (c *controller) NewNetwork(networkType, name string, id string, options ...NetworkOption) (Network, error) { + if id != "" { + c.networkLocker.Lock(id) + defer c.networkLocker.Unlock(id) + + if _, err := c.NetworkByID(id); err == nil { + return nil, NetworkNameError(id) + } + } + + if !config.IsValidName(name) { + return nil, ErrInvalidName(name) + } + + if id == "" { + id = stringid.GenerateRandomID() + } + + defaultIpam := defaultIpamForNetworkType(networkType) + // Construct the network object + network := &network{ + name: name, + networkType: networkType, + generic: map[string]interface{}{netlabel.GenericData: make(map[string]string)}, + ipamType: defaultIpam, + id: id, + created: time.Now(), + ctrlr: c, + persist: true, + drvOnce: &sync.Once{}, + } + + network.processOptions(options...) + if err := network.validateConfiguration(); err != nil { + return nil, err + } + + var ( + cap *driverapi.Capability + err error + ) + + // Reset network types, force local scope and skip allocation and + // plumbing for configuration networks. Reset of the config-only + // network drivers is needed so that this special network is not + // usable by old engine versions. + if network.configOnly { + network.scope = datastore.LocalScope + network.networkType = "null" + goto addToStore + } + + _, cap, err = network.resolveDriver(network.networkType, true) + if err != nil { + return nil, err + } + + if network.scope == datastore.LocalScope && cap.DataScope == datastore.GlobalScope { + return nil, types.ForbiddenErrorf("cannot downgrade network scope for %s networks", networkType) + + } + if network.ingress && cap.DataScope != datastore.GlobalScope { + return nil, types.ForbiddenErrorf("Ingress network can only be global scope network") + } + + // At this point the network scope is still unknown if not set by user + if (cap.DataScope == datastore.GlobalScope || network.scope == datastore.SwarmScope) && + !c.isDistributedControl() && !network.dynamic { + if c.isManager() { + // For non-distributed controlled environment, globalscoped non-dynamic networks are redirected to Manager + return nil, ManagerRedirectError(name) + } + return nil, types.ForbiddenErrorf("Cannot create a multi-host network from a worker node. Please create the network from a manager node.") + } + + if network.scope == datastore.SwarmScope && c.isDistributedControl() { + return nil, types.ForbiddenErrorf("cannot create a swarm scoped network when swarm is not active") + } + + // Make sure we have a driver available for this network type + // before we allocate anything. + if _, err := network.driver(true); err != nil { + return nil, err + } + + // From this point on, we need the network specific configuration, + // which may come from a configuration-only network + if network.configFrom != "" { + t, err := c.getConfigNetwork(network.configFrom) + if err != nil { + return nil, types.NotFoundErrorf("configuration network %q does not exist", network.configFrom) + } + if err := t.applyConfigurationTo(network); err != nil { + return nil, types.InternalErrorf("Failed to apply configuration: %v", err) + } + defer func() { + if err == nil { + if err := t.getEpCnt().IncEndpointCnt(); err != nil { + logrus.Warnf("Failed to update reference count for configuration network %q on creation of network %q: %v", + t.Name(), network.Name(), err) + } + } + }() + } + + err = network.ipamAllocate() + if err != nil { + return nil, err + } + defer func() { + if err != nil { + network.ipamRelease() + } + }() + + err = c.addNetwork(network) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + if e := network.deleteNetwork(); e != nil { + logrus.Warnf("couldn't roll back driver network on network %s creation failure: %v", network.name, err) + } + } + }() + +addToStore: + // First store the endpoint count, then the network. To avoid to + // end up with a datastore containing a network and not an epCnt, + // in case of an ungraceful shutdown during this function call. + epCnt := &endpointCnt{n: network} + if err = c.updateToStore(epCnt); err != nil { + return nil, err + } + defer func() { + if err != nil { + if e := c.deleteFromStore(epCnt); e != nil { + logrus.Warnf("could not rollback from store, epCnt %v on failure (%v): %v", epCnt, err, e) + } + } + }() + + network.epCnt = epCnt + if err = c.updateToStore(network); err != nil { + return nil, err + } + defer func() { + if err != nil { + if e := c.deleteFromStore(network); e != nil { + logrus.Warnf("could not rollback from store, network %v on failure (%v): %v", network, err, e) + } + } + }() + + if network.configOnly { + return network, nil + } + + joinCluster(network) + defer func() { + if err != nil { + network.cancelDriverWatches() + if e := network.leaveCluster(); e != nil { + logrus.Warnf("Failed to leave agent cluster on network %s on failure (%v): %v", network.name, err, e) + } + } + }() + + if len(network.loadBalancerIP) != 0 { + if err = network.createLoadBalancerSandbox(); err != nil { + return nil, err + } + } + + if !c.isDistributedControl() { + c.Lock() + arrangeIngressFilterRule() + c.Unlock() + } + + c.arrangeUserFilterRule() + + return network, nil +} + +var joinCluster NetworkWalker = func(nw Network) bool { + n := nw.(*network) + if n.configOnly { + return false + } + if err := n.joinCluster(); err != nil { + logrus.Errorf("Failed to join network %s (%s) into agent cluster: %v", n.Name(), n.ID(), err) + } + n.addDriverWatches() + return false +} + +func (c *controller) reservePools() { + networks, err := c.getNetworksForScope(datastore.LocalScope) + if err != nil { + logrus.Warnf("Could not retrieve networks from local store during ipam allocation for existing networks: %v", err) + return + } + + for _, n := range networks { + if n.configOnly { + continue + } + if !doReplayPoolReserve(n) { + continue + } + // Construct pseudo configs for the auto IP case + autoIPv4 := (len(n.ipamV4Config) == 0 || (len(n.ipamV4Config) == 1 && n.ipamV4Config[0].PreferredPool == "")) && len(n.ipamV4Info) > 0 + autoIPv6 := (len(n.ipamV6Config) == 0 || (len(n.ipamV6Config) == 1 && n.ipamV6Config[0].PreferredPool == "")) && len(n.ipamV6Info) > 0 + if autoIPv4 { + n.ipamV4Config = []*IpamConf{{PreferredPool: n.ipamV4Info[0].Pool.String()}} + } + if n.enableIPv6 && autoIPv6 { + n.ipamV6Config = []*IpamConf{{PreferredPool: n.ipamV6Info[0].Pool.String()}} + } + // Account current network gateways + for i, c := range n.ipamV4Config { + if c.Gateway == "" && n.ipamV4Info[i].Gateway != nil { + c.Gateway = n.ipamV4Info[i].Gateway.IP.String() + } + } + if n.enableIPv6 { + for i, c := range n.ipamV6Config { + if c.Gateway == "" && n.ipamV6Info[i].Gateway != nil { + c.Gateway = n.ipamV6Info[i].Gateway.IP.String() + } + } + } + // Reserve pools + if err := n.ipamAllocate(); err != nil { + logrus.Warnf("Failed to allocate ipam pool(s) for network %q (%s): %v", n.Name(), n.ID(), err) + } + // Reserve existing endpoints' addresses + ipam, _, err := n.getController().getIPAMDriver(n.ipamType) + if err != nil { + logrus.Warnf("Failed to retrieve ipam driver for network %q (%s) during address reservation", n.Name(), n.ID()) + continue + } + epl, err := n.getEndpointsFromStore() + if err != nil { + logrus.Warnf("Failed to retrieve list of current endpoints on network %q (%s)", n.Name(), n.ID()) + continue + } + for _, ep := range epl { + if err := ep.assignAddress(ipam, true, ep.Iface().AddressIPv6() != nil); err != nil { + logrus.Warnf("Failed to reserve current address for endpoint %q (%s) on network %q (%s)", + ep.Name(), ep.ID(), n.Name(), n.ID()) + } + } + } +} + +func doReplayPoolReserve(n *network) bool { + _, caps, err := n.getController().getIPAMDriver(n.ipamType) + if err != nil { + logrus.Warnf("Failed to retrieve ipam driver for network %q (%s): %v", n.Name(), n.ID(), err) + return false + } + return caps.RequiresRequestReplay +} + +func (c *controller) addNetwork(n *network) error { + d, err := n.driver(true) + if err != nil { + return err + } + + // Create the network + if err := d.CreateNetwork(n.id, n.generic, n, n.getIPData(4), n.getIPData(6)); err != nil { + return err + } + + n.startResolver() + + return nil +} + +func (c *controller) Networks() []Network { + var list []Network + + networks, err := c.getNetworksFromStore() + if err != nil { + logrus.Error(err) + } + + for _, n := range networks { + if n.inDelete { + continue + } + list = append(list, n) + } + + return list +} + +func (c *controller) WalkNetworks(walker NetworkWalker) { + for _, n := range c.Networks() { + if walker(n) { + return + } + } +} + +func (c *controller) NetworkByName(name string) (Network, error) { + if name == "" { + return nil, ErrInvalidName(name) + } + var n Network + + s := func(current Network) bool { + if current.Name() == name { + n = current + return true + } + return false + } + + c.WalkNetworks(s) + + if n == nil { + return nil, ErrNoSuchNetwork(name) + } + + return n, nil +} + +func (c *controller) NetworkByID(id string) (Network, error) { + if id == "" { + return nil, ErrInvalidID(id) + } + + n, err := c.getNetworkFromStore(id) + if err != nil { + return nil, ErrNoSuchNetwork(id) + } + + return n, nil +} + +// NewSandbox creates a new sandbox for the passed container id +func (c *controller) NewSandbox(containerID string, options ...SandboxOption) (Sandbox, error) { + if containerID == "" { + return nil, types.BadRequestErrorf("invalid container ID") + } + + var sb *sandbox + c.Lock() + for _, s := range c.sandboxes { + if s.containerID == containerID { + // If not a stub, then we already have a complete sandbox. + if !s.isStub { + sbID := s.ID() + c.Unlock() + return nil, types.ForbiddenErrorf("container %s is already present in sandbox %s", containerID, sbID) + } + + // We already have a stub sandbox from the + // store. Make use of it so that we don't lose + // the endpoints from store but reset the + // isStub flag. + sb = s + sb.isStub = false + break + } + } + c.Unlock() + + sandboxID := stringid.GenerateRandomID() + if runtime.GOOS == "windows" { + sandboxID = containerID + } + + // Create sandbox and process options first. Key generation depends on an option + if sb == nil { + sb = &sandbox{ + id: sandboxID, + containerID: containerID, + endpoints: epHeap{}, + epPriority: map[string]int{}, + populatedEndpoints: map[string]struct{}{}, + config: containerConfig{}, + controller: c, + extDNS: []extDNSEntry{}, + } + } + + heap.Init(&sb.endpoints) + + sb.processOptions(options...) + + c.Lock() + if sb.ingress && c.ingressSandbox != nil { + c.Unlock() + return nil, types.ForbiddenErrorf("ingress sandbox already present") + } + + if sb.ingress { + c.ingressSandbox = sb + sb.config.hostsPath = filepath.Join(c.cfg.Daemon.DataDir, "/network/files/hosts") + sb.config.resolvConfPath = filepath.Join(c.cfg.Daemon.DataDir, "/network/files/resolv.conf") + sb.id = "ingress_sbox" + } + c.Unlock() + + var err error + defer func() { + if err != nil { + c.Lock() + if sb.ingress { + c.ingressSandbox = nil + } + c.Unlock() + } + }() + + if err = sb.setupResolutionFiles(); err != nil { + return nil, err + } + + if sb.config.useDefaultSandBox { + c.sboxOnce.Do(func() { + c.defOsSbox, err = osl.NewSandbox(sb.Key(), false, false) + }) + + if err != nil { + c.sboxOnce = sync.Once{} + return nil, fmt.Errorf("failed to create default sandbox: %v", err) + } + + sb.osSbox = c.defOsSbox + } + + if sb.osSbox == nil && !sb.config.useExternalKey { + if sb.osSbox, err = osl.NewSandbox(sb.Key(), !sb.config.useDefaultSandBox, false); err != nil { + return nil, fmt.Errorf("failed to create new osl sandbox: %v", err) + } + } + + c.Lock() + c.sandboxes[sb.id] = sb + c.Unlock() + defer func() { + if err != nil { + c.Lock() + delete(c.sandboxes, sb.id) + c.Unlock() + } + }() + + err = sb.storeUpdate() + if err != nil { + return nil, fmt.Errorf("failed to update the store state of sandbox: %v", err) + } + + return sb, nil +} + +func (c *controller) Sandboxes() []Sandbox { + c.Lock() + defer c.Unlock() + + list := make([]Sandbox, 0, len(c.sandboxes)) + for _, s := range c.sandboxes { + // Hide stub sandboxes from libnetwork users + if s.isStub { + continue + } + + list = append(list, s) + } + + return list +} + +func (c *controller) WalkSandboxes(walker SandboxWalker) { + for _, sb := range c.Sandboxes() { + if walker(sb) { + return + } + } +} + +func (c *controller) SandboxByID(id string) (Sandbox, error) { + if id == "" { + return nil, ErrInvalidID(id) + } + c.Lock() + s, ok := c.sandboxes[id] + c.Unlock() + if !ok { + return nil, types.NotFoundErrorf("sandbox %s not found", id) + } + return s, nil +} + +// SandboxDestroy destroys a sandbox given a container ID +func (c *controller) SandboxDestroy(id string) error { + var sb *sandbox + c.Lock() + for _, s := range c.sandboxes { + if s.containerID == id { + sb = s + break + } + } + c.Unlock() + + // It is not an error if sandbox is not available + if sb == nil { + return nil + } + + return sb.Delete() +} + +// SandboxContainerWalker returns a Sandbox Walker function which looks for an existing Sandbox with the passed containerID +func SandboxContainerWalker(out *Sandbox, containerID string) SandboxWalker { + return func(sb Sandbox) bool { + if sb.ContainerID() == containerID { + *out = sb + return true + } + return false + } +} + +// SandboxKeyWalker returns a Sandbox Walker function which looks for an existing Sandbox with the passed key +func SandboxKeyWalker(out *Sandbox, key string) SandboxWalker { + return func(sb Sandbox) bool { + if sb.Key() == key { + *out = sb + return true + } + return false + } +} + +func (c *controller) loadDriver(networkType string) error { + var err error + + if pg := c.GetPluginGetter(); pg != nil { + _, err = pg.Get(networkType, driverapi.NetworkPluginEndpointType, plugingetter.Lookup) + } else { + _, err = plugins.Get(networkType, driverapi.NetworkPluginEndpointType) + } + + if err != nil { + if err == plugins.ErrNotFound { + return types.NotFoundErrorf(err.Error()) + } + return err + } + + return nil +} + +func (c *controller) loadIPAMDriver(name string) error { + var err error + + if pg := c.GetPluginGetter(); pg != nil { + _, err = pg.Get(name, ipamapi.PluginEndpointType, plugingetter.Lookup) + } else { + _, err = plugins.Get(name, ipamapi.PluginEndpointType) + } + + if err != nil { + if err == plugins.ErrNotFound { + return types.NotFoundErrorf(err.Error()) + } + return err + } + + return nil +} + +func (c *controller) getIPAMDriver(name string) (ipamapi.Ipam, *ipamapi.Capability, error) { + id, cap := c.drvRegistry.IPAM(name) + if id == nil { + // Might be a plugin name. Try loading it + if err := c.loadIPAMDriver(name); err != nil { + return nil, nil, err + } + + // Now that we resolved the plugin, try again looking up the registry + id, cap = c.drvRegistry.IPAM(name) + if id == nil { + return nil, nil, types.BadRequestErrorf("invalid ipam driver: %q", name) + } + } + + return id, cap, nil +} + +func (c *controller) Stop() { + c.closeStores() + c.stopExternalKeyListener() + osl.GC() +} + +// StartDiagnostic start the network dias mode +func (c *controller) StartDiagnostic(port int) { + c.Lock() + if !c.DiagnosticServer.IsDiagnosticEnabled() { + c.DiagnosticServer.EnableDiagnostic("127.0.0.1", port) + } + c.Unlock() +} + +// StopDiagnostic start the network dias mode +func (c *controller) StopDiagnostic() { + c.Lock() + if c.DiagnosticServer.IsDiagnosticEnabled() { + c.DiagnosticServer.DisableDiagnostic() + } + c.Unlock() +} + +// IsDiagnosticEnabled returns true if the dias is enabled +func (c *controller) IsDiagnosticEnabled() bool { + c.Lock() + defer c.Unlock() + return c.DiagnosticServer.IsDiagnosticEnabled() +} diff --git a/vendor/github.com/docker/libnetwork/datastore/cache.go b/vendor/github.com/docker/libnetwork/datastore/cache.go new file mode 100644 index 0000000000..49839ae8f2 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/datastore/cache.go @@ -0,0 +1,178 @@ +package datastore + +import ( + "errors" + "fmt" + "sync" + + "github.com/docker/libkv/store" +) + +type kvMap map[string]KVObject + +type cache struct { + sync.Mutex + kmm map[string]kvMap + ds *datastore +} + +func newCache(ds *datastore) *cache { + return &cache{kmm: make(map[string]kvMap), ds: ds} +} + +func (c *cache) kmap(kvObject KVObject) (kvMap, error) { + var err error + + c.Lock() + keyPrefix := Key(kvObject.KeyPrefix()...) + kmap, ok := c.kmm[keyPrefix] + c.Unlock() + + if ok { + return kmap, nil + } + + kmap = kvMap{} + + // Bail out right away if the kvObject does not implement KVConstructor + ctor, ok := kvObject.(KVConstructor) + if !ok { + return nil, errors.New("error while populating kmap, object does not implement KVConstructor interface") + } + + kvList, err := c.ds.store.List(keyPrefix) + if err != nil { + if err == store.ErrKeyNotFound { + // If the store doesn't have anything then there is nothing to + // populate in the cache. Just bail out. + goto out + } + + return nil, fmt.Errorf("error while populating kmap: %v", err) + } + + for _, kvPair := range kvList { + // Ignore empty kvPair values + if len(kvPair.Value) == 0 { + continue + } + + dstO := ctor.New() + err = dstO.SetValue(kvPair.Value) + if err != nil { + return nil, err + } + + // Make sure the object has a correct view of the DB index in + // case we need to modify it and update the DB. + dstO.SetIndex(kvPair.LastIndex) + + kmap[Key(dstO.Key()...)] = dstO + } + +out: + // There may multiple go routines racing to fill the + // cache. The one which places the kmap in c.kmm first + // wins. The others should just use what the first populated. + c.Lock() + kmapNew, ok := c.kmm[keyPrefix] + if ok { + c.Unlock() + return kmapNew, nil + } + + c.kmm[keyPrefix] = kmap + c.Unlock() + + return kmap, nil +} + +func (c *cache) add(kvObject KVObject, atomic bool) error { + kmap, err := c.kmap(kvObject) + if err != nil { + return err + } + + c.Lock() + // If atomic is true, cache needs to maintain its own index + // for atomicity and the add needs to be atomic. + if atomic { + if prev, ok := kmap[Key(kvObject.Key()...)]; ok { + if prev.Index() != kvObject.Index() { + c.Unlock() + return ErrKeyModified + } + } + + // Increment index + index := kvObject.Index() + index++ + kvObject.SetIndex(index) + } + + kmap[Key(kvObject.Key()...)] = kvObject + c.Unlock() + return nil +} + +func (c *cache) del(kvObject KVObject, atomic bool) error { + kmap, err := c.kmap(kvObject) + if err != nil { + return err + } + + c.Lock() + // If atomic is true, cache needs to maintain its own index + // for atomicity and del needs to be atomic. + if atomic { + if prev, ok := kmap[Key(kvObject.Key()...)]; ok { + if prev.Index() != kvObject.Index() { + c.Unlock() + return ErrKeyModified + } + } + } + + delete(kmap, Key(kvObject.Key()...)) + c.Unlock() + return nil +} + +func (c *cache) get(key string, kvObject KVObject) error { + kmap, err := c.kmap(kvObject) + if err != nil { + return err + } + + c.Lock() + defer c.Unlock() + + o, ok := kmap[Key(kvObject.Key()...)] + if !ok { + return ErrKeyNotFound + } + + ctor, ok := o.(KVConstructor) + if !ok { + return errors.New("kvobject does not implement KVConstructor interface. could not get object") + } + + return ctor.CopyTo(kvObject) +} + +func (c *cache) list(kvObject KVObject) ([]KVObject, error) { + kmap, err := c.kmap(kvObject) + if err != nil { + return nil, err + } + + c.Lock() + defer c.Unlock() + + var kvol []KVObject + for _, v := range kmap { + kvol = append(kvol, v) + } + + return kvol, nil +} diff --git a/vendor/github.com/docker/libnetwork/datastore/datastore.go b/vendor/github.com/docker/libnetwork/datastore/datastore.go new file mode 100644 index 0000000000..82feef1c84 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/datastore/datastore.go @@ -0,0 +1,660 @@ +package datastore + +import ( + "fmt" + "log" + "reflect" + "strings" + "sync" + "time" + + "github.com/docker/libkv" + "github.com/docker/libkv/store" + "github.com/docker/libnetwork/discoverapi" + "github.com/docker/libnetwork/types" +) + +//DataStore exported +type DataStore interface { + // GetObject gets data from datastore and unmarshals to the specified object + GetObject(key string, o KVObject) error + // PutObject adds a new Record based on an object into the datastore + PutObject(kvObject KVObject) error + // PutObjectAtomic provides an atomic add and update operation for a Record + PutObjectAtomic(kvObject KVObject) error + // DeleteObject deletes a record + DeleteObject(kvObject KVObject) error + // DeleteObjectAtomic performs an atomic delete operation + DeleteObjectAtomic(kvObject KVObject) error + // DeleteTree deletes a record + DeleteTree(kvObject KVObject) error + // Watchable returns whether the store is watchable or not + Watchable() bool + // Watch for changes on a KVObject + Watch(kvObject KVObject, stopCh <-chan struct{}) (<-chan KVObject, error) + // RestartWatch retriggers stopped Watches + RestartWatch() + // Active returns if the store is active + Active() bool + // List returns of a list of KVObjects belonging to the parent + // key. The caller must pass a KVObject of the same type as + // the objects that need to be listed + List(string, KVObject) ([]KVObject, error) + // Map returns a Map of KVObjects + Map(key string, kvObject KVObject) (map[string]KVObject, error) + // Scope returns the scope of the store + Scope() string + // KVStore returns access to the KV Store + KVStore() store.Store + // Close closes the data store + Close() +} + +// ErrKeyModified is raised for an atomic update when the update is working on a stale state +var ( + ErrKeyModified = store.ErrKeyModified + ErrKeyNotFound = store.ErrKeyNotFound +) + +type datastore struct { + scope string + store store.Store + cache *cache + watchCh chan struct{} + active bool + sequential bool + sync.Mutex +} + +// KVObject is Key/Value interface used by objects to be part of the DataStore +type KVObject interface { + // Key method lets an object provide the Key to be used in KV Store + Key() []string + // KeyPrefix method lets an object return immediate parent key that can be used for tree walk + KeyPrefix() []string + // Value method lets an object marshal its content to be stored in the KV store + Value() []byte + // SetValue is used by the datastore to set the object's value when loaded from the data store. + SetValue([]byte) error + // Index method returns the latest DB Index as seen by the object + Index() uint64 + // SetIndex method allows the datastore to store the latest DB Index into the object + SetIndex(uint64) + // True if the object exists in the datastore, false if it hasn't been stored yet. + // When SetIndex() is called, the object has been stored. + Exists() bool + // DataScope indicates the storage scope of the KV object + DataScope() string + // Skip provides a way for a KV Object to avoid persisting it in the KV Store + Skip() bool +} + +// KVConstructor interface defines methods which can construct a KVObject from another. +type KVConstructor interface { + // New returns a new object which is created based on the + // source object + New() KVObject + // CopyTo deep copies the contents of the implementing object + // to the passed destination object + CopyTo(KVObject) error +} + +// ScopeCfg represents Datastore configuration. +type ScopeCfg struct { + Client ScopeClientCfg +} + +// ScopeClientCfg represents Datastore Client-only mode configuration +type ScopeClientCfg struct { + Provider string + Address string + Config *store.Config +} + +const ( + // LocalScope indicates to store the KV object in local datastore such as boltdb + LocalScope = "local" + // GlobalScope indicates to store the KV object in global datastore such as consul/etcd/zookeeper + GlobalScope = "global" + // SwarmScope is not indicating a datastore location. It is defined here + // along with the other two scopes just for consistency. + SwarmScope = "swarm" + defaultPrefix = "/var/lib/docker/network/files" +) + +const ( + // NetworkKeyPrefix is the prefix for network key in the kv store + NetworkKeyPrefix = "network" + // EndpointKeyPrefix is the prefix for endpoint key in the kv store + EndpointKeyPrefix = "endpoint" +) + +var ( + defaultScopes = makeDefaultScopes() +) + +func makeDefaultScopes() map[string]*ScopeCfg { + def := make(map[string]*ScopeCfg) + def[LocalScope] = &ScopeCfg{ + Client: ScopeClientCfg{ + Provider: string(store.BOLTDB), + Address: defaultPrefix + "/local-kv.db", + Config: &store.Config{ + Bucket: "libnetwork", + ConnectionTimeout: time.Minute, + }, + }, + } + + return def +} + +var defaultRootChain = []string{"docker", "network", "v1.0"} +var rootChain = defaultRootChain + +// DefaultScopes returns a map of default scopes and its config for clients to use. +func DefaultScopes(dataDir string) map[string]*ScopeCfg { + if dataDir != "" { + defaultScopes[LocalScope].Client.Address = dataDir + "/network/files/local-kv.db" + return defaultScopes + } + + defaultScopes[LocalScope].Client.Address = defaultPrefix + "/local-kv.db" + return defaultScopes +} + +// IsValid checks if the scope config has valid configuration. +func (cfg *ScopeCfg) IsValid() bool { + if cfg == nil || + strings.TrimSpace(cfg.Client.Provider) == "" || + strings.TrimSpace(cfg.Client.Address) == "" { + return false + } + + return true +} + +//Key provides convenient method to create a Key +func Key(key ...string) string { + keychain := append(rootChain, key...) + str := strings.Join(keychain, "/") + return str + "/" +} + +//ParseKey provides convenient method to unpack the key to complement the Key function +func ParseKey(key string) ([]string, error) { + chain := strings.Split(strings.Trim(key, "/"), "/") + + // The key must atleast be equal to the rootChain in order to be considered as valid + if len(chain) <= len(rootChain) || !reflect.DeepEqual(chain[0:len(rootChain)], rootChain) { + return nil, types.BadRequestErrorf("invalid Key : %s", key) + } + return chain[len(rootChain):], nil +} + +// newClient used to connect to KV Store +func newClient(scope string, kv string, addr string, config *store.Config, cached bool) (DataStore, error) { + + if cached && scope != LocalScope { + return nil, fmt.Errorf("caching supported only for scope %s", LocalScope) + } + sequential := false + if scope == LocalScope { + sequential = true + } + + if config == nil { + config = &store.Config{} + } + + var addrs []string + + if kv == string(store.BOLTDB) { + // Parse file path + addrs = strings.Split(addr, ",") + } else { + // Parse URI + parts := strings.SplitN(addr, "/", 2) + addrs = strings.Split(parts[0], ",") + + // Add the custom prefix to the root chain + if len(parts) == 2 { + rootChain = append([]string{parts[1]}, defaultRootChain...) + } + } + + store, err := libkv.NewStore(store.Backend(kv), addrs, config) + if err != nil { + return nil, err + } + + ds := &datastore{scope: scope, store: store, active: true, watchCh: make(chan struct{}), sequential: sequential} + if cached { + ds.cache = newCache(ds) + } + + return ds, nil +} + +// NewDataStore creates a new instance of LibKV data store +func NewDataStore(scope string, cfg *ScopeCfg) (DataStore, error) { + if cfg == nil || cfg.Client.Provider == "" || cfg.Client.Address == "" { + c, ok := defaultScopes[scope] + if !ok || c.Client.Provider == "" || c.Client.Address == "" { + return nil, fmt.Errorf("unexpected scope %s without configuration passed", scope) + } + + cfg = c + } + + var cached bool + if scope == LocalScope { + cached = true + } + + return newClient(scope, cfg.Client.Provider, cfg.Client.Address, cfg.Client.Config, cached) +} + +// NewDataStoreFromConfig creates a new instance of LibKV data store starting from the datastore config data +func NewDataStoreFromConfig(dsc discoverapi.DatastoreConfigData) (DataStore, error) { + var ( + ok bool + sCfgP *store.Config + ) + + sCfgP, ok = dsc.Config.(*store.Config) + if !ok && dsc.Config != nil { + return nil, fmt.Errorf("cannot parse store configuration: %v", dsc.Config) + } + + scopeCfg := &ScopeCfg{ + Client: ScopeClientCfg{ + Address: dsc.Address, + Provider: dsc.Provider, + Config: sCfgP, + }, + } + + ds, err := NewDataStore(dsc.Scope, scopeCfg) + if err != nil { + return nil, fmt.Errorf("failed to construct datastore client from datastore configuration %v: %v", dsc, err) + } + + return ds, err +} + +func (ds *datastore) Close() { + ds.store.Close() +} + +func (ds *datastore) Scope() string { + return ds.scope +} + +func (ds *datastore) Active() bool { + return ds.active +} + +func (ds *datastore) Watchable() bool { + return ds.scope != LocalScope +} + +func (ds *datastore) Watch(kvObject KVObject, stopCh <-chan struct{}) (<-chan KVObject, error) { + sCh := make(chan struct{}) + + ctor, ok := kvObject.(KVConstructor) + if !ok { + return nil, fmt.Errorf("error watching object type %T, object does not implement KVConstructor interface", kvObject) + } + + kvpCh, err := ds.store.Watch(Key(kvObject.Key()...), sCh) + if err != nil { + return nil, err + } + + kvoCh := make(chan KVObject) + + go func() { + retry_watch: + var err error + + // Make sure to get a new instance of watch channel + ds.Lock() + watchCh := ds.watchCh + ds.Unlock() + + loop: + for { + select { + case <-stopCh: + close(sCh) + return + case kvPair := <-kvpCh: + // If the backend KV store gets reset libkv's go routine + // for the watch can exit resulting in a nil value in + // channel. + if kvPair == nil { + ds.Lock() + ds.active = false + ds.Unlock() + break loop + } + + dstO := ctor.New() + + if err = dstO.SetValue(kvPair.Value); err != nil { + log.Printf("Could not unmarshal kvpair value = %s", string(kvPair.Value)) + break + } + + dstO.SetIndex(kvPair.LastIndex) + kvoCh <- dstO + } + } + + // Wait on watch channel for a re-trigger when datastore becomes active + <-watchCh + + kvpCh, err = ds.store.Watch(Key(kvObject.Key()...), sCh) + if err != nil { + log.Printf("Could not watch the key %s in store: %v", Key(kvObject.Key()...), err) + } + + goto retry_watch + }() + + return kvoCh, nil +} + +func (ds *datastore) RestartWatch() { + ds.Lock() + defer ds.Unlock() + + ds.active = true + watchCh := ds.watchCh + ds.watchCh = make(chan struct{}) + close(watchCh) +} + +func (ds *datastore) KVStore() store.Store { + return ds.store +} + +// PutObjectAtomic adds a new Record based on an object into the datastore +func (ds *datastore) PutObjectAtomic(kvObject KVObject) error { + var ( + previous *store.KVPair + pair *store.KVPair + err error + ) + if ds.sequential { + ds.Lock() + defer ds.Unlock() + } + + if kvObject == nil { + return types.BadRequestErrorf("invalid KV Object : nil") + } + + kvObjValue := kvObject.Value() + + if kvObjValue == nil { + return types.BadRequestErrorf("invalid KV Object with a nil Value for key %s", Key(kvObject.Key()...)) + } + + if kvObject.Skip() { + goto add_cache + } + + if kvObject.Exists() { + previous = &store.KVPair{Key: Key(kvObject.Key()...), LastIndex: kvObject.Index()} + } else { + previous = nil + } + + _, pair, err = ds.store.AtomicPut(Key(kvObject.Key()...), kvObjValue, previous, nil) + if err != nil { + if err == store.ErrKeyExists { + return ErrKeyModified + } + return err + } + + kvObject.SetIndex(pair.LastIndex) + +add_cache: + if ds.cache != nil { + // If persistent store is skipped, sequencing needs to + // happen in cache. + return ds.cache.add(kvObject, kvObject.Skip()) + } + + return nil +} + +// PutObject adds a new Record based on an object into the datastore +func (ds *datastore) PutObject(kvObject KVObject) error { + if ds.sequential { + ds.Lock() + defer ds.Unlock() + } + + if kvObject == nil { + return types.BadRequestErrorf("invalid KV Object : nil") + } + + if kvObject.Skip() { + goto add_cache + } + + if err := ds.putObjectWithKey(kvObject, kvObject.Key()...); err != nil { + return err + } + +add_cache: + if ds.cache != nil { + // If persistent store is skipped, sequencing needs to + // happen in cache. + return ds.cache.add(kvObject, kvObject.Skip()) + } + + return nil +} + +func (ds *datastore) putObjectWithKey(kvObject KVObject, key ...string) error { + kvObjValue := kvObject.Value() + + if kvObjValue == nil { + return types.BadRequestErrorf("invalid KV Object with a nil Value for key %s", Key(kvObject.Key()...)) + } + return ds.store.Put(Key(key...), kvObjValue, nil) +} + +// GetObject returns a record matching the key +func (ds *datastore) GetObject(key string, o KVObject) error { + if ds.sequential { + ds.Lock() + defer ds.Unlock() + } + + if ds.cache != nil { + return ds.cache.get(key, o) + } + + kvPair, err := ds.store.Get(key) + if err != nil { + return err + } + + if err := o.SetValue(kvPair.Value); err != nil { + return err + } + + // Make sure the object has a correct view of the DB index in + // case we need to modify it and update the DB. + o.SetIndex(kvPair.LastIndex) + return nil +} + +func (ds *datastore) ensureParent(parent string) error { + exists, err := ds.store.Exists(parent) + if err != nil { + return err + } + if exists { + return nil + } + return ds.store.Put(parent, []byte{}, &store.WriteOptions{IsDir: true}) +} + +func (ds *datastore) List(key string, kvObject KVObject) ([]KVObject, error) { + if ds.sequential { + ds.Lock() + defer ds.Unlock() + } + + if ds.cache != nil { + return ds.cache.list(kvObject) + } + + var kvol []KVObject + cb := func(key string, val KVObject) { + kvol = append(kvol, val) + } + err := ds.iterateKVPairsFromStore(key, kvObject, cb) + if err != nil { + return nil, err + } + return kvol, nil +} + +func (ds *datastore) iterateKVPairsFromStore(key string, kvObject KVObject, callback func(string, KVObject)) error { + // Bail out right away if the kvObject does not implement KVConstructor + ctor, ok := kvObject.(KVConstructor) + if !ok { + return fmt.Errorf("error listing objects, object does not implement KVConstructor interface") + } + + // Make sure the parent key exists + if err := ds.ensureParent(key); err != nil { + return err + } + + kvList, err := ds.store.List(key) + if err != nil { + return err + } + + for _, kvPair := range kvList { + if len(kvPair.Value) == 0 { + continue + } + + dstO := ctor.New() + if err := dstO.SetValue(kvPair.Value); err != nil { + return err + } + + // Make sure the object has a correct view of the DB index in + // case we need to modify it and update the DB. + dstO.SetIndex(kvPair.LastIndex) + callback(kvPair.Key, dstO) + } + + return nil +} + +func (ds *datastore) Map(key string, kvObject KVObject) (map[string]KVObject, error) { + if ds.sequential { + ds.Lock() + defer ds.Unlock() + } + + kvol := make(map[string]KVObject) + cb := func(key string, val KVObject) { + // Trim the leading & trailing "/" to make it consistent across all stores + kvol[strings.Trim(key, "/")] = val + } + err := ds.iterateKVPairsFromStore(key, kvObject, cb) + if err != nil { + return nil, err + } + return kvol, nil +} + +// DeleteObject unconditionally deletes a record from the store +func (ds *datastore) DeleteObject(kvObject KVObject) error { + if ds.sequential { + ds.Lock() + defer ds.Unlock() + } + + // cleaup the cache first + if ds.cache != nil { + // If persistent store is skipped, sequencing needs to + // happen in cache. + ds.cache.del(kvObject, kvObject.Skip()) + } + + if kvObject.Skip() { + return nil + } + + return ds.store.Delete(Key(kvObject.Key()...)) +} + +// DeleteObjectAtomic performs atomic delete on a record +func (ds *datastore) DeleteObjectAtomic(kvObject KVObject) error { + if ds.sequential { + ds.Lock() + defer ds.Unlock() + } + + if kvObject == nil { + return types.BadRequestErrorf("invalid KV Object : nil") + } + + previous := &store.KVPair{Key: Key(kvObject.Key()...), LastIndex: kvObject.Index()} + + if kvObject.Skip() { + goto del_cache + } + + if _, err := ds.store.AtomicDelete(Key(kvObject.Key()...), previous); err != nil { + if err == store.ErrKeyExists { + return ErrKeyModified + } + return err + } + +del_cache: + // cleanup the cache only if AtomicDelete went through successfully + if ds.cache != nil { + // If persistent store is skipped, sequencing needs to + // happen in cache. + return ds.cache.del(kvObject, kvObject.Skip()) + } + + return nil +} + +// DeleteTree unconditionally deletes a record from the store +func (ds *datastore) DeleteTree(kvObject KVObject) error { + if ds.sequential { + ds.Lock() + defer ds.Unlock() + } + + // cleaup the cache first + if ds.cache != nil { + // If persistent store is skipped, sequencing needs to + // happen in cache. + ds.cache.del(kvObject, kvObject.Skip()) + } + + if kvObject.Skip() { + return nil + } + + return ds.store.DeleteTree(Key(kvObject.KeyPrefix()...)) +} diff --git a/vendor/github.com/docker/libnetwork/datastore/mock_store.go b/vendor/github.com/docker/libnetwork/datastore/mock_store.go new file mode 100644 index 0000000000..0817339b6a --- /dev/null +++ b/vendor/github.com/docker/libnetwork/datastore/mock_store.go @@ -0,0 +1,129 @@ +package datastore + +import ( + "errors" + + "github.com/docker/libkv/store" + "github.com/docker/libnetwork/types" +) + +var ( + // ErrNotImplmented exported + ErrNotImplmented = errors.New("Functionality not implemented") +) + +// MockData exported +type MockData struct { + Data []byte + Index uint64 +} + +// MockStore exported +type MockStore struct { + db map[string]*MockData +} + +// NewMockStore creates a Map backed Datastore that is useful for mocking +func NewMockStore() *MockStore { + db := make(map[string]*MockData) + return &MockStore{db} +} + +// Get the value at "key", returns the last modified index +// to use in conjunction to CAS calls +func (s *MockStore) Get(key string) (*store.KVPair, error) { + mData := s.db[key] + if mData == nil { + return nil, nil + } + return &store.KVPair{Value: mData.Data, LastIndex: mData.Index}, nil + +} + +// Put a value at "key" +func (s *MockStore) Put(key string, value []byte, options *store.WriteOptions) error { + mData := s.db[key] + if mData == nil { + mData = &MockData{value, 0} + } + mData.Index = mData.Index + 1 + s.db[key] = mData + return nil +} + +// Delete a value at "key" +func (s *MockStore) Delete(key string) error { + delete(s.db, key) + return nil +} + +// Exists checks that the key exists inside the store +func (s *MockStore) Exists(key string) (bool, error) { + _, ok := s.db[key] + return ok, nil +} + +// List gets a range of values at "directory" +func (s *MockStore) List(prefix string) ([]*store.KVPair, error) { + return nil, ErrNotImplmented +} + +// DeleteTree deletes a range of values at "directory" +func (s *MockStore) DeleteTree(prefix string) error { + delete(s.db, prefix) + return nil +} + +// Watch a single key for modifications +func (s *MockStore) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) { + return nil, ErrNotImplmented +} + +// WatchTree triggers a watch on a range of values at "directory" +func (s *MockStore) WatchTree(prefix string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) { + return nil, ErrNotImplmented +} + +// NewLock exposed +func (s *MockStore) NewLock(key string, options *store.LockOptions) (store.Locker, error) { + return nil, ErrNotImplmented +} + +// AtomicPut put a value at "key" if the key has not been +// modified in the meantime, throws an error if this is the case +func (s *MockStore) AtomicPut(key string, newValue []byte, previous *store.KVPair, options *store.WriteOptions) (bool, *store.KVPair, error) { + mData := s.db[key] + + if previous == nil { + if mData != nil { + return false, nil, types.BadRequestErrorf("atomic put failed because key exists") + } // Else OK. + } else { + if mData == nil { + return false, nil, types.BadRequestErrorf("atomic put failed because key exists") + } + if mData != nil && mData.Index != previous.LastIndex { + return false, nil, types.BadRequestErrorf("atomic put failed due to mismatched Index") + } // Else OK. + } + err := s.Put(key, newValue, nil) + if err != nil { + return false, nil, err + } + return true, &store.KVPair{Key: key, Value: newValue, LastIndex: s.db[key].Index}, nil +} + +// AtomicDelete deletes a value at "key" if the key has not +// been modified in the meantime, throws an error if this is the case +func (s *MockStore) AtomicDelete(key string, previous *store.KVPair) (bool, error) { + mData := s.db[key] + if mData != nil && mData.Index != previous.LastIndex { + return false, types.BadRequestErrorf("atomic delete failed due to mismatched Index") + } + return true, s.Delete(key) +} + +// Close closes the client connection +func (s *MockStore) Close() { + return +} diff --git a/vendor/github.com/docker/libnetwork/default_gateway.go b/vendor/github.com/docker/libnetwork/default_gateway.go new file mode 100644 index 0000000000..9a60fd6758 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/default_gateway.go @@ -0,0 +1,201 @@ +package libnetwork + +import ( + "fmt" + "strings" + + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" +) + +const ( + gwEPlen = 12 +) + +var procGwNetwork = make(chan (bool), 1) + +/* + libnetwork creates a bridge network "docker_gw_bridge" for providing + default gateway for the containers if none of the container's endpoints + have GW set by the driver. ICC is set to false for the GW_bridge network. + + If a driver can't provide external connectivity it can choose to not set + the GW IP for the endpoint. + + endpoint on the GW_bridge network is managed dynamically by libnetwork. + ie: + - its created when an endpoint without GW joins the container + - its deleted when an endpoint with GW joins the container +*/ + +func (sb *sandbox) setupDefaultGW() error { + + // check if the container already has a GW endpoint + if ep := sb.getEndpointInGWNetwork(); ep != nil { + return nil + } + + c := sb.controller + + // Look for default gw network. In case of error (includes not found), + // retry and create it if needed in a serialized execution. + n, err := c.NetworkByName(libnGWNetwork) + if err != nil { + if n, err = c.defaultGwNetwork(); err != nil { + return err + } + } + + createOptions := []EndpointOption{CreateOptionAnonymous()} + + eplen := gwEPlen + if len(sb.containerID) < gwEPlen { + eplen = len(sb.containerID) + } + + sbLabels := sb.Labels() + + if sbLabels[netlabel.PortMap] != nil { + createOptions = append(createOptions, CreateOptionPortMapping(sbLabels[netlabel.PortMap].([]types.PortBinding))) + } + + if sbLabels[netlabel.ExposedPorts] != nil { + createOptions = append(createOptions, CreateOptionExposedPorts(sbLabels[netlabel.ExposedPorts].([]types.TransportPort))) + } + + epOption := getPlatformOption() + if epOption != nil { + createOptions = append(createOptions, epOption) + } + + newEp, err := n.CreateEndpoint("gateway_"+sb.containerID[0:eplen], createOptions...) + if err != nil { + return fmt.Errorf("container %s: endpoint create on GW Network failed: %v", sb.containerID, err) + } + + defer func() { + if err != nil { + if err2 := newEp.Delete(true); err2 != nil { + logrus.Warnf("Failed to remove gw endpoint for container %s after failing to join the gateway network: %v", + sb.containerID, err2) + } + } + }() + + epLocal := newEp.(*endpoint) + + if err = epLocal.sbJoin(sb); err != nil { + return fmt.Errorf("container %s: endpoint join on GW Network failed: %v", sb.containerID, err) + } + + return nil +} + +// If present, detach and remove the endpoint connecting the sandbox to the default gw network. +func (sb *sandbox) clearDefaultGW() error { + var ep *endpoint + + if ep = sb.getEndpointInGWNetwork(); ep == nil { + return nil + } + if err := ep.sbLeave(sb, false); err != nil { + return fmt.Errorf("container %s: endpoint leaving GW Network failed: %v", sb.containerID, err) + } + if err := ep.Delete(false); err != nil { + return fmt.Errorf("container %s: deleting endpoint on GW Network failed: %v", sb.containerID, err) + } + return nil +} + +// Evaluate whether the sandbox requires a default gateway based +// on the endpoints to which it is connected. It does not account +// for the default gateway network endpoint. + +func (sb *sandbox) needDefaultGW() bool { + var needGW bool + + for _, ep := range sb.getConnectedEndpoints() { + if ep.endpointInGWNetwork() { + continue + } + if ep.getNetwork().Type() == "null" || ep.getNetwork().Type() == "host" { + continue + } + if ep.getNetwork().Internal() { + continue + } + // During stale sandbox cleanup, joinInfo may be nil + if ep.joinInfo != nil && ep.joinInfo.disableGatewayService { + continue + } + // TODO v6 needs to be handled. + if len(ep.Gateway()) > 0 { + return false + } + for _, r := range ep.StaticRoutes() { + if r.Destination != nil && r.Destination.String() == "0.0.0.0/0" { + return false + } + } + needGW = true + } + + return needGW +} + +func (sb *sandbox) getEndpointInGWNetwork() *endpoint { + for _, ep := range sb.getConnectedEndpoints() { + if ep.getNetwork().name == libnGWNetwork && strings.HasPrefix(ep.Name(), "gateway_") { + return ep + } + } + return nil +} + +func (ep *endpoint) endpointInGWNetwork() bool { + if ep.getNetwork().name == libnGWNetwork && strings.HasPrefix(ep.Name(), "gateway_") { + return true + } + return false +} + +func (sb *sandbox) getEPwithoutGateway() *endpoint { + for _, ep := range sb.getConnectedEndpoints() { + if ep.getNetwork().Type() == "null" || ep.getNetwork().Type() == "host" { + continue + } + if len(ep.Gateway()) == 0 { + return ep + } + } + return nil +} + +// Looks for the default gw network and creates it if not there. +// Parallel executions are serialized. +func (c *controller) defaultGwNetwork() (Network, error) { + procGwNetwork <- true + defer func() { <-procGwNetwork }() + + n, err := c.NetworkByName(libnGWNetwork) + if err != nil { + if _, ok := err.(types.NotFoundError); ok { + n, err = c.createGWNetwork() + } + } + return n, err +} + +// Returns the endpoint which is providing external connectivity to the sandbox +func (sb *sandbox) getGatewayEndpoint() *endpoint { + for _, ep := range sb.getConnectedEndpoints() { + if ep.getNetwork().Type() == "null" || ep.getNetwork().Type() == "host" { + continue + } + if len(ep.Gateway()) != 0 { + return ep + } + } + return nil +} diff --git a/vendor/github.com/docker/libnetwork/default_gateway_freebsd.go b/vendor/github.com/docker/libnetwork/default_gateway_freebsd.go new file mode 100644 index 0000000000..dc4b1bd592 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/default_gateway_freebsd.go @@ -0,0 +1,13 @@ +package libnetwork + +import "github.com/docker/libnetwork/types" + +const libnGWNetwork = "docker_gwbridge" + +func getPlatformOption() EndpointOption { + return nil +} + +func (c *controller) createGWNetwork() (Network, error) { + return nil, types.NotImplementedErrorf("default gateway functionality is not implemented in freebsd") +} diff --git a/vendor/github.com/docker/libnetwork/default_gateway_linux.go b/vendor/github.com/docker/libnetwork/default_gateway_linux.go new file mode 100644 index 0000000000..60df856722 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/default_gateway_linux.go @@ -0,0 +1,32 @@ +package libnetwork + +import ( + "fmt" + "strconv" + + "github.com/docker/libnetwork/drivers/bridge" +) + +const libnGWNetwork = "docker_gwbridge" + +func getPlatformOption() EndpointOption { + return nil +} + +func (c *controller) createGWNetwork() (Network, error) { + netOption := map[string]string{ + bridge.BridgeName: libnGWNetwork, + bridge.EnableICC: strconv.FormatBool(false), + bridge.EnableIPMasquerade: strconv.FormatBool(true), + } + + n, err := c.NewNetwork("bridge", libnGWNetwork, "", + NetworkOptionDriverOpts(netOption), + NetworkOptionEnableIPv6(false), + ) + + if err != nil { + return nil, fmt.Errorf("error creating external connectivity network: %v", err) + } + return n, err +} diff --git a/vendor/github.com/docker/libnetwork/default_gateway_windows.go b/vendor/github.com/docker/libnetwork/default_gateway_windows.go new file mode 100644 index 0000000000..f4ba198e57 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/default_gateway_windows.go @@ -0,0 +1,22 @@ +package libnetwork + +import ( + windriver "github.com/docker/libnetwork/drivers/windows" + "github.com/docker/libnetwork/options" + "github.com/docker/libnetwork/types" +) + +const libnGWNetwork = "nat" + +func getPlatformOption() EndpointOption { + + epOption := options.Generic{ + windriver.DisableICC: true, + windriver.DisableDNS: true, + } + return EndpointOptionGeneric(epOption) +} + +func (c *controller) createGWNetwork() (Network, error) { + return nil, types.NotImplementedErrorf("default gateway functionality is not implemented in windows") +} diff --git a/vendor/github.com/docker/libnetwork/diagnostic/server.go b/vendor/github.com/docker/libnetwork/diagnostic/server.go new file mode 100644 index 0000000000..f351762bd1 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/diagnostic/server.go @@ -0,0 +1,227 @@ +package diagnostic + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "sync" + "sync/atomic" + + stackdump "github.com/docker/docker/pkg/signal" + "github.com/docker/libnetwork/common" + "github.com/sirupsen/logrus" +) + +// HTTPHandlerFunc TODO +type HTTPHandlerFunc func(interface{}, http.ResponseWriter, *http.Request) + +type httpHandlerCustom struct { + ctx interface{} + F func(interface{}, http.ResponseWriter, *http.Request) +} + +// ServeHTTP TODO +func (h httpHandlerCustom) ServeHTTP(w http.ResponseWriter, r *http.Request) { + h.F(h.ctx, w, r) +} + +var diagPaths2Func = map[string]HTTPHandlerFunc{ + "/": notImplemented, + "/help": help, + "/ready": ready, + "/stackdump": stackTrace, +} + +// Server when the debug is enabled exposes a +// This data structure is protected by the Agent mutex so does not require and additional mutex here +type Server struct { + enable int32 + srv *http.Server + port int + mux *http.ServeMux + registeredHanders map[string]bool + sync.Mutex +} + +// New creates a new diagnostic server +func New() *Server { + return &Server{ + registeredHanders: make(map[string]bool), + } +} + +// Init initialize the mux for the http handling and register the base hooks +func (s *Server) Init() { + s.mux = http.NewServeMux() + + // Register local handlers + s.RegisterHandler(s, diagPaths2Func) +} + +// RegisterHandler allows to register new handlers to the mux and to a specific path +func (s *Server) RegisterHandler(ctx interface{}, hdlrs map[string]HTTPHandlerFunc) { + s.Lock() + defer s.Unlock() + for path, fun := range hdlrs { + if _, ok := s.registeredHanders[path]; ok { + continue + } + s.mux.Handle(path, httpHandlerCustom{ctx, fun}) + s.registeredHanders[path] = true + } +} + +// ServeHTTP this is the method called bu the ListenAndServe, and is needed to allow us to +// use our custom mux +func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + s.mux.ServeHTTP(w, r) +} + +// EnableDiagnostic opens a TCP socket to debug the passed network DB +func (s *Server) EnableDiagnostic(ip string, port int) { + s.Lock() + defer s.Unlock() + + s.port = port + + if s.enable == 1 { + logrus.Info("The server is already up and running") + return + } + + logrus.Infof("Starting the diagnostic server listening on %d for commands", port) + srv := &http.Server{Addr: fmt.Sprintf("%s:%d", ip, port), Handler: s} + s.srv = srv + s.enable = 1 + go func(n *Server) { + // Ingore ErrServerClosed that is returned on the Shutdown call + if err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed { + logrus.Errorf("ListenAndServe error: %s", err) + atomic.SwapInt32(&n.enable, 0) + } + }(s) +} + +// DisableDiagnostic stop the dubug and closes the tcp socket +func (s *Server) DisableDiagnostic() { + s.Lock() + defer s.Unlock() + + s.srv.Shutdown(context.Background()) + s.srv = nil + s.enable = 0 + logrus.Info("Disabling the diagnostic server") +} + +// IsDiagnosticEnabled returns true when the debug is enabled +func (s *Server) IsDiagnosticEnabled() bool { + s.Lock() + defer s.Unlock() + return s.enable == 1 +} + +func notImplemented(ctx interface{}, w http.ResponseWriter, r *http.Request) { + r.ParseForm() + _, json := ParseHTTPFormOptions(r) + rsp := WrongCommand("not implemented", fmt.Sprintf("URL path: %s no method implemented check /help\n", r.URL.Path)) + + // audit logs + log := logrus.WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()}) + log.Info("command not implemented done") + + HTTPReply(w, rsp, json) +} + +func help(ctx interface{}, w http.ResponseWriter, r *http.Request) { + r.ParseForm() + _, json := ParseHTTPFormOptions(r) + + // audit logs + log := logrus.WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()}) + log.Info("help done") + + n, ok := ctx.(*Server) + var result string + if ok { + for path := range n.registeredHanders { + result += fmt.Sprintf("%s\n", path) + } + HTTPReply(w, CommandSucceed(&StringCmd{Info: result}), json) + } +} + +func ready(ctx interface{}, w http.ResponseWriter, r *http.Request) { + r.ParseForm() + _, json := ParseHTTPFormOptions(r) + + // audit logs + log := logrus.WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()}) + log.Info("ready done") + HTTPReply(w, CommandSucceed(&StringCmd{Info: "OK"}), json) +} + +func stackTrace(ctx interface{}, w http.ResponseWriter, r *http.Request) { + r.ParseForm() + _, json := ParseHTTPFormOptions(r) + + // audit logs + log := logrus.WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()}) + log.Info("stack trace") + + path, err := stackdump.DumpStacks("/tmp/") + if err != nil { + log.WithError(err).Error("failed to write goroutines dump") + HTTPReply(w, FailCommand(err), json) + } else { + log.Info("stack trace done") + HTTPReply(w, CommandSucceed(&StringCmd{Info: fmt.Sprintf("goroutine stacks written to %s", path)}), json) + } +} + +// DebugHTTPForm helper to print the form url parameters +func DebugHTTPForm(r *http.Request) { + for k, v := range r.Form { + logrus.Debugf("Form[%q] = %q\n", k, v) + } +} + +// JSONOutput contains details on JSON output printing +type JSONOutput struct { + enable bool + prettyPrint bool +} + +// ParseHTTPFormOptions easily parse the JSON printing options +func ParseHTTPFormOptions(r *http.Request) (bool, *JSONOutput) { + _, unsafe := r.Form["unsafe"] + v, json := r.Form["json"] + var pretty bool + if len(v) > 0 { + pretty = v[0] == "pretty" + } + return unsafe, &JSONOutput{enable: json, prettyPrint: pretty} +} + +// HTTPReply helper function that takes care of sending the message out +func HTTPReply(w http.ResponseWriter, r *HTTPResult, j *JSONOutput) (int, error) { + var response []byte + if j.enable { + w.Header().Set("Content-Type", "application/json") + var err error + if j.prettyPrint { + response, err = json.MarshalIndent(r, "", " ") + if err != nil { + response, _ = json.MarshalIndent(FailCommand(err), "", " ") + } + } else { + response, err = json.Marshal(r) + if err != nil { + response, _ = json.Marshal(FailCommand(err)) + } + } + } else { + response = []byte(r.String()) + } + return fmt.Fprint(w, string(response)) +} diff --git a/vendor/github.com/docker/libnetwork/diagnostic/types.go b/vendor/github.com/docker/libnetwork/diagnostic/types.go new file mode 100644 index 0000000000..4eb4ca0d9f --- /dev/null +++ b/vendor/github.com/docker/libnetwork/diagnostic/types.go @@ -0,0 +1,122 @@ +package diagnostic + +import "fmt" + +// StringInterface interface that has to be implemented by messages +type StringInterface interface { + String() string +} + +// CommandSucceed creates a success message +func CommandSucceed(result StringInterface) *HTTPResult { + return &HTTPResult{ + Message: "OK", + Details: result, + } +} + +// FailCommand creates a failure message with error +func FailCommand(err error) *HTTPResult { + return &HTTPResult{ + Message: "FAIL", + Details: &ErrorCmd{Error: err.Error()}, + } +} + +// WrongCommand creates a wrong command response +func WrongCommand(message, usage string) *HTTPResult { + return &HTTPResult{ + Message: message, + Details: &UsageCmd{Usage: usage}, + } +} + +// HTTPResult Diagnostic Server HTTP result operation +type HTTPResult struct { + Message string `json:"message"` + Details StringInterface `json:"details"` +} + +func (h *HTTPResult) String() string { + rsp := h.Message + if h.Details != nil { + rsp += "\n" + h.Details.String() + } + return rsp +} + +// UsageCmd command with usage field +type UsageCmd struct { + Usage string `json:"usage"` +} + +func (u *UsageCmd) String() string { + return "Usage: " + u.Usage +} + +// StringCmd command with info string +type StringCmd struct { + Info string `json:"info"` +} + +func (s *StringCmd) String() string { + return s.Info +} + +// ErrorCmd command with error +type ErrorCmd struct { + Error string `json:"error"` +} + +func (e *ErrorCmd) String() string { + return "Error: " + e.Error +} + +// TableObj network db table object +type TableObj struct { + Length int `json:"size"` + Elements []StringInterface `json:"entries"` +} + +func (t *TableObj) String() string { + output := fmt.Sprintf("total entries: %d\n", t.Length) + for _, e := range t.Elements { + output += e.String() + } + return output +} + +// PeerEntryObj entry in the networkdb peer table +type PeerEntryObj struct { + Index int `json:"-"` + Name string `json:"-=name"` + IP string `json:"ip"` +} + +func (p *PeerEntryObj) String() string { + return fmt.Sprintf("%d) %s -> %s\n", p.Index, p.Name, p.IP) +} + +// TableEntryObj network db table entry object +type TableEntryObj struct { + Index int `json:"-"` + Key string `json:"key"` + Value string `json:"value"` + Owner string `json:"owner"` +} + +func (t *TableEntryObj) String() string { + return fmt.Sprintf("%d) k:`%s` -> v:`%s` owner:`%s`\n", t.Index, t.Key, t.Value, t.Owner) +} + +// TableEndpointsResult fully typed message for proper unmarshaling on the client side +type TableEndpointsResult struct { + TableObj + Elements []TableEntryObj `json:"entries"` +} + +// TablePeersResult fully typed message for proper unmarshaling on the client side +type TablePeersResult struct { + TableObj + Elements []PeerEntryObj `json:"entries"` +} diff --git a/vendor/github.com/docker/libnetwork/discoverapi/discoverapi.go b/vendor/github.com/docker/libnetwork/discoverapi/discoverapi.go new file mode 100644 index 0000000000..7ac36155db --- /dev/null +++ b/vendor/github.com/docker/libnetwork/discoverapi/discoverapi.go @@ -0,0 +1,60 @@ +package discoverapi + +// Discover is an interface to be implemented by the component interested in receiving discover events +// like new node joining the cluster or datastore updates +type Discover interface { + // DiscoverNew is a notification for a new discovery event, Example:a new node joining a cluster + DiscoverNew(dType DiscoveryType, data interface{}) error + + // DiscoverDelete is a notification for a discovery delete event, Example:a node leaving a cluster + DiscoverDelete(dType DiscoveryType, data interface{}) error +} + +// DiscoveryType represents the type of discovery element the DiscoverNew function is invoked on +type DiscoveryType int + +const ( + // NodeDiscovery represents Node join/leave events provided by discovery + NodeDiscovery = iota + 1 + // DatastoreConfig represents an add/remove datastore event + DatastoreConfig + // EncryptionKeysConfig represents the initial key(s) for performing datapath encryption + EncryptionKeysConfig + // EncryptionKeysUpdate represents an update to the datapath encryption key(s) + EncryptionKeysUpdate +) + +// NodeDiscoveryData represents the structure backing the node discovery data json string +type NodeDiscoveryData struct { + Address string + BindAddress string + Self bool +} + +// DatastoreConfigData is the data for the datastore update event message +type DatastoreConfigData struct { + Scope string + Provider string + Address string + Config interface{} +} + +// DriverEncryptionConfig contains the initial datapath encryption key(s) +// Key in first position is the primary key, the one to be used in tx. +// Original key and tag types are []byte and uint64 +type DriverEncryptionConfig struct { + Keys [][]byte + Tags []uint64 +} + +// DriverEncryptionUpdate carries an update to the encryption key(s) as: +// a new key and/or set a primary key and/or a removal of an existing key. +// Original key and tag types are []byte and uint64 +type DriverEncryptionUpdate struct { + Key []byte + Tag uint64 + Primary []byte + PrimaryTag uint64 + Prune []byte + PruneTag uint64 +} diff --git a/vendor/github.com/docker/libnetwork/driverapi/driverapi.go b/vendor/github.com/docker/libnetwork/driverapi/driverapi.go new file mode 100644 index 0000000000..48a14ae57a --- /dev/null +++ b/vendor/github.com/docker/libnetwork/driverapi/driverapi.go @@ -0,0 +1,213 @@ +package driverapi + +import ( + "net" + + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/libnetwork/discoverapi" +) + +// NetworkPluginEndpointType represents the Endpoint Type used by Plugin system +const NetworkPluginEndpointType = "NetworkDriver" + +// Driver is an interface that every plugin driver needs to implement. +type Driver interface { + discoverapi.Discover + + // NetworkAllocate invokes the driver method to allocate network + // specific resources passing network id and network specific config. + // It returns a key,value pair of network specific driver allocations + // to the caller. + NetworkAllocate(nid string, options map[string]string, ipV4Data, ipV6Data []IPAMData) (map[string]string, error) + + // NetworkFree invokes the driver method to free network specific resources + // associated with a given network id. + NetworkFree(nid string) error + + // CreateNetwork invokes the driver method to create a network + // passing the network id and network specific config. The + // config mechanism will eventually be replaced with labels + // which are yet to be introduced. The driver can return a + // list of table names for which it is interested in receiving + // notification when a CRUD operation is performed on any + // entry in that table. This will be ignored for local scope + // drivers. + CreateNetwork(nid string, options map[string]interface{}, nInfo NetworkInfo, ipV4Data, ipV6Data []IPAMData) error + + // DeleteNetwork invokes the driver method to delete network passing + // the network id. + DeleteNetwork(nid string) error + + // CreateEndpoint invokes the driver method to create an endpoint + // passing the network id, endpoint id endpoint information and driver + // specific config. The endpoint information can be either consumed by + // the driver or populated by the driver. The config mechanism will + // eventually be replaced with labels which are yet to be introduced. + CreateEndpoint(nid, eid string, ifInfo InterfaceInfo, options map[string]interface{}) error + + // DeleteEndpoint invokes the driver method to delete an endpoint + // passing the network id and endpoint id. + DeleteEndpoint(nid, eid string) error + + // EndpointOperInfo retrieves from the driver the operational data related to the specified endpoint + EndpointOperInfo(nid, eid string) (map[string]interface{}, error) + + // Join method is invoked when a Sandbox is attached to an endpoint. + Join(nid, eid string, sboxKey string, jinfo JoinInfo, options map[string]interface{}) error + + // Leave method is invoked when a Sandbox detaches from an endpoint. + Leave(nid, eid string) error + + // ProgramExternalConnectivity invokes the driver method which does the necessary + // programming to allow the external connectivity dictated by the passed options + ProgramExternalConnectivity(nid, eid string, options map[string]interface{}) error + + // RevokeExternalConnectivity asks the driver to remove any external connectivity + // programming that was done so far + RevokeExternalConnectivity(nid, eid string) error + + // EventNotify notifies the driver when a CRUD operation has + // happened on a table of its interest as soon as this node + // receives such an event in the gossip layer. This method is + // only invoked for the global scope driver. + EventNotify(event EventType, nid string, tableName string, key string, value []byte) + + // DecodeTableEntry passes the driver a key, value pair from table it registered + // with libnetwork. Driver should return {object ID, map[string]string} tuple. + // If DecodeTableEntry is called for a table associated with NetworkObject or + // EndpointObject the return object ID should be the network id or endppoint id + // associated with that entry. map should have information about the object that + // can be presented to the user. + // For exampe: overlay driver returns the VTEP IP of the host that has the endpoint + // which is shown in 'network inspect --verbose' + DecodeTableEntry(tablename string, key string, value []byte) (string, map[string]string) + + // Type returns the type of this driver, the network type this driver manages + Type() string + + // IsBuiltIn returns true if it is a built-in driver + IsBuiltIn() bool +} + +// NetworkInfo provides a go interface for drivers to provide network +// specific information to libnetwork. +type NetworkInfo interface { + // TableEventRegister registers driver interest in a given + // table name. + TableEventRegister(tableName string, objType ObjectType) error +} + +// InterfaceInfo provides a go interface for drivers to retrive +// network information to interface resources. +type InterfaceInfo interface { + // SetMacAddress allows the driver to set the mac address to the endpoint interface + // during the call to CreateEndpoint, if the mac address is not already set. + SetMacAddress(mac net.HardwareAddr) error + + // SetIPAddress allows the driver to set the ip address to the endpoint interface + // during the call to CreateEndpoint, if the address is not already set. + // The API is to be used to assign both the IPv4 and IPv6 address types. + SetIPAddress(ip *net.IPNet) error + + // MacAddress returns the MAC address. + MacAddress() net.HardwareAddr + + // Address returns the IPv4 address. + Address() *net.IPNet + + // AddressIPv6 returns the IPv6 address. + AddressIPv6() *net.IPNet +} + +// InterfaceNameInfo provides a go interface for the drivers to assign names +// to interfaces. +type InterfaceNameInfo interface { + // SetNames method assigns the srcName and dstPrefix for the interface. + SetNames(srcName, dstPrefix string) error +} + +// JoinInfo represents a set of resources that the driver has the ability to provide during +// join time. +type JoinInfo interface { + // InterfaceName returns an InterfaceNameInfo go interface to facilitate + // setting the names for the interface. + InterfaceName() InterfaceNameInfo + + // SetGateway sets the default IPv4 gateway when a container joins the endpoint. + SetGateway(net.IP) error + + // SetGatewayIPv6 sets the default IPv6 gateway when a container joins the endpoint. + SetGatewayIPv6(net.IP) error + + // AddStaticRoute adds a route to the sandbox. + // It may be used in addition to or instead of a default gateway (as above). + AddStaticRoute(destination *net.IPNet, routeType int, nextHop net.IP) error + + // DisableGatewayService tells libnetwork not to provide Default GW for the container + DisableGatewayService() + + // AddTableEntry adds a table entry to the gossip layer + // passing the table name, key and an opaque value. + AddTableEntry(tableName string, key string, value []byte) error +} + +// DriverCallback provides a Callback interface for Drivers into LibNetwork +type DriverCallback interface { + // GetPluginGetter returns the pluginv2 getter. + GetPluginGetter() plugingetter.PluginGetter + // RegisterDriver provides a way for Remote drivers to dynamically register new NetworkType and associate with a driver instance + RegisterDriver(name string, driver Driver, capability Capability) error +} + +// Capability represents the high level capabilities of the drivers which libnetwork can make use of +type Capability struct { + DataScope string + ConnectivityScope string +} + +// IPAMData represents the per-network ip related +// operational information libnetwork will send +// to the network driver during CreateNetwork() +type IPAMData struct { + AddressSpace string + Pool *net.IPNet + Gateway *net.IPNet + AuxAddresses map[string]*net.IPNet +} + +// EventType defines a type for the CRUD event +type EventType uint8 + +const ( + // Create event is generated when a table entry is created, + Create EventType = 1 + iota + // Update event is generated when a table entry is updated. + Update + // Delete event is generated when a table entry is deleted. + Delete +) + +// ObjectType represents the type of object driver wants to store in libnetwork's networkDB +type ObjectType int + +const ( + // EndpointObject should be set for libnetwork endpoint object related data + EndpointObject ObjectType = 1 + iota + // NetworkObject should be set for libnetwork network object related data + NetworkObject + // OpaqueObject is for driver specific data with no corresponding libnetwork object + OpaqueObject +) + +// IsValidType validates the passed in type against the valid object types +func IsValidType(objType ObjectType) bool { + switch objType { + case EndpointObject: + fallthrough + case NetworkObject: + fallthrough + case OpaqueObject: + return true + } + return false +} diff --git a/vendor/github.com/docker/libnetwork/driverapi/errors.go b/vendor/github.com/docker/libnetwork/driverapi/errors.go new file mode 100644 index 0000000000..041ef41506 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/driverapi/errors.go @@ -0,0 +1,56 @@ +package driverapi + +import ( + "fmt" +) + +// ErrNoNetwork is returned if no network with the specified id exists +type ErrNoNetwork string + +func (enn ErrNoNetwork) Error() string { + return fmt.Sprintf("No network (%s) exists", string(enn)) +} + +// NotFound denotes the type of this error +func (enn ErrNoNetwork) NotFound() {} + +// ErrEndpointExists is returned if more than one endpoint is added to the network +type ErrEndpointExists string + +func (ee ErrEndpointExists) Error() string { + return fmt.Sprintf("Endpoint (%s) already exists (Only one endpoint allowed)", string(ee)) +} + +// Forbidden denotes the type of this error +func (ee ErrEndpointExists) Forbidden() {} + +// ErrNotImplemented is returned when a Driver has not implemented an API yet +type ErrNotImplemented struct{} + +func (eni *ErrNotImplemented) Error() string { + return "The API is not implemented yet" +} + +// NotImplemented denotes the type of this error +func (eni *ErrNotImplemented) NotImplemented() {} + +// ErrNoEndpoint is returned if no endpoint with the specified id exists +type ErrNoEndpoint string + +func (ene ErrNoEndpoint) Error() string { + return fmt.Sprintf("No endpoint (%s) exists", string(ene)) +} + +// NotFound denotes the type of this error +func (ene ErrNoEndpoint) NotFound() {} + +// ErrActiveRegistration represents an error when a driver is registered to a networkType that is previously registered +type ErrActiveRegistration string + +// Error interface for ErrActiveRegistration +func (ar ErrActiveRegistration) Error() string { + return fmt.Sprintf("Driver already registered for type %q", string(ar)) +} + +// Forbidden denotes the type of this error +func (ar ErrActiveRegistration) Forbidden() {} diff --git a/vendor/github.com/docker/libnetwork/driverapi/ipamdata.go b/vendor/github.com/docker/libnetwork/driverapi/ipamdata.go new file mode 100644 index 0000000000..fc1c2af441 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/driverapi/ipamdata.go @@ -0,0 +1,103 @@ +package driverapi + +import ( + "encoding/json" + "fmt" + "net" + + "github.com/docker/libnetwork/types" +) + +// MarshalJSON encodes IPAMData into json message +func (i *IPAMData) MarshalJSON() ([]byte, error) { + m := map[string]interface{}{} + m["AddressSpace"] = i.AddressSpace + if i.Pool != nil { + m["Pool"] = i.Pool.String() + } + if i.Gateway != nil { + m["Gateway"] = i.Gateway.String() + } + if i.AuxAddresses != nil { + am := make(map[string]string, len(i.AuxAddresses)) + for k, v := range i.AuxAddresses { + am[k] = v.String() + } + m["AuxAddresses"] = am + } + return json.Marshal(m) +} + +// UnmarshalJSON decodes a json message into IPAMData +func (i *IPAMData) UnmarshalJSON(data []byte) error { + var ( + m map[string]interface{} + err error + ) + if err := json.Unmarshal(data, &m); err != nil { + return err + } + i.AddressSpace = m["AddressSpace"].(string) + if v, ok := m["Pool"]; ok { + if i.Pool, err = types.ParseCIDR(v.(string)); err != nil { + return err + } + } + if v, ok := m["Gateway"]; ok { + if i.Gateway, err = types.ParseCIDR(v.(string)); err != nil { + return err + } + } + if v, ok := m["AuxAddresses"]; ok { + b, _ := json.Marshal(v) + var am map[string]string + if err = json.Unmarshal(b, &am); err != nil { + return err + } + i.AuxAddresses = make(map[string]*net.IPNet, len(am)) + for k, v := range am { + if i.AuxAddresses[k], err = types.ParseCIDR(v); err != nil { + return err + } + } + } + return nil +} + +// Validate checks whether the IPAMData structure contains congruent data +func (i *IPAMData) Validate() error { + var isV6 bool + if i.Pool == nil { + return types.BadRequestErrorf("invalid pool") + } + if i.Gateway == nil { + return types.BadRequestErrorf("invalid gateway address") + } + isV6 = i.IsV6() + if isV6 && i.Gateway.IP.To4() != nil || !isV6 && i.Gateway.IP.To4() == nil { + return types.BadRequestErrorf("incongruent ip versions for pool and gateway") + } + for k, sip := range i.AuxAddresses { + if isV6 && sip.IP.To4() != nil || !isV6 && sip.IP.To4() == nil { + return types.BadRequestErrorf("incongruent ip versions for pool and secondary ip address %s", k) + } + } + if !i.Pool.Contains(i.Gateway.IP) { + return types.BadRequestErrorf("invalid gateway address (%s) does not belong to the pool (%s)", i.Gateway, i.Pool) + } + for k, sip := range i.AuxAddresses { + if !i.Pool.Contains(sip.IP) { + return types.BadRequestErrorf("invalid secondary address %s (%s) does not belong to the pool (%s)", k, i.Gateway, i.Pool) + } + } + return nil +} + +// IsV6 returns whether this is an IPv6 IPAMData structure +func (i *IPAMData) IsV6() bool { + return nil == i.Pool.IP.To4() +} + +func (i *IPAMData) String() string { + return fmt.Sprintf("AddressSpace: %s\nPool: %v\nGateway: %v\nAddresses: %v", i.AddressSpace, i.Pool, i.Gateway, i.AuxAddresses) +} diff --git a/vendor/github.com/docker/libnetwork/drivers/bridge/bridge.go b/vendor/github.com/docker/libnetwork/drivers/bridge/bridge.go new file mode 100644 index 0000000000..783d45c113 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/bridge/bridge.go @@ -0,0 +1,1508 @@ +package bridge + +import ( + "errors" + "fmt" + "io/ioutil" + "net" + "os" + "os/exec" + "path/filepath" + "strconv" + "sync" + "syscall" + + "github.com/docker/libnetwork/datastore" + "github.com/docker/libnetwork/discoverapi" + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/iptables" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/netutils" + "github.com/docker/libnetwork/ns" + "github.com/docker/libnetwork/options" + "github.com/docker/libnetwork/osl" + "github.com/docker/libnetwork/portmapper" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" +) + +const ( + networkType = "bridge" + vethPrefix = "veth" + vethLen = 7 + defaultContainerVethPrefix = "eth" + maxAllocatePortAttempts = 10 +) + +const ( + // DefaultGatewayV4AuxKey represents the default-gateway configured by the user + DefaultGatewayV4AuxKey = "DefaultGatewayIPv4" + // DefaultGatewayV6AuxKey represents the ipv6 default-gateway configured by the user + DefaultGatewayV6AuxKey = "DefaultGatewayIPv6" +) + +type defaultBridgeNetworkConflict struct { + ID string +} + +func (d defaultBridgeNetworkConflict) Error() string { + return fmt.Sprintf("Stale default bridge network %s", d.ID) +} + +type iptableCleanFunc func() error +type iptablesCleanFuncs []iptableCleanFunc + +// configuration info for the "bridge" driver. +type configuration struct { + EnableIPForwarding bool + EnableIPTables bool + EnableUserlandProxy bool + UserlandProxyPath string +} + +// networkConfiguration for network specific configuration +type networkConfiguration struct { + ID string + BridgeName string + EnableIPv6 bool + EnableIPMasquerade bool + EnableICC bool + Mtu int + DefaultBindingIP net.IP + DefaultBridge bool + ContainerIfacePrefix string + // Internal fields set after ipam data parsing + AddressIPv4 *net.IPNet + AddressIPv6 *net.IPNet + DefaultGatewayIPv4 net.IP + DefaultGatewayIPv6 net.IP + dbIndex uint64 + dbExists bool + Internal bool + + BridgeIfaceCreator ifaceCreator +} + +// ifaceCreator represents how the bridge interface was created +type ifaceCreator int8 + +const ( + ifaceCreatorUnknown ifaceCreator = iota + ifaceCreatedByLibnetwork + ifaceCreatedByUser +) + +// endpointConfiguration represents the user specified configuration for the sandbox endpoint +type endpointConfiguration struct { + MacAddress net.HardwareAddr +} + +// containerConfiguration represents the user specified configuration for a container +type containerConfiguration struct { + ParentEndpoints []string + ChildEndpoints []string +} + +// cnnectivityConfiguration represents the user specified configuration regarding the external connectivity +type connectivityConfiguration struct { + PortBindings []types.PortBinding + ExposedPorts []types.TransportPort +} + +type bridgeEndpoint struct { + id string + nid string + srcName string + addr *net.IPNet + addrv6 *net.IPNet + macAddress net.HardwareAddr + config *endpointConfiguration // User specified parameters + containerConfig *containerConfiguration + extConnConfig *connectivityConfiguration + portMapping []types.PortBinding // Operation port bindings + dbIndex uint64 + dbExists bool +} + +type bridgeNetwork struct { + id string + bridge *bridgeInterface // The bridge's L3 interface + config *networkConfiguration + endpoints map[string]*bridgeEndpoint // key: endpoint id + portMapper *portmapper.PortMapper + driver *driver // The network's driver + iptCleanFuncs iptablesCleanFuncs + sync.Mutex +} + +type driver struct { + config *configuration + network *bridgeNetwork + natChain *iptables.ChainInfo + filterChain *iptables.ChainInfo + isolationChain1 *iptables.ChainInfo + isolationChain2 *iptables.ChainInfo + networks map[string]*bridgeNetwork + store datastore.DataStore + nlh *netlink.Handle + configNetwork sync.Mutex + sync.Mutex +} + +// New constructs a new bridge driver +func newDriver() *driver { + return &driver{networks: map[string]*bridgeNetwork{}, config: &configuration{}} +} + +// Init registers a new instance of bridge driver +func Init(dc driverapi.DriverCallback, config map[string]interface{}) error { + d := newDriver() + if err := d.configure(config); err != nil { + return err + } + + c := driverapi.Capability{ + DataScope: datastore.LocalScope, + ConnectivityScope: datastore.LocalScope, + } + return dc.RegisterDriver(networkType, d, c) +} + +// Validate performs a static validation on the network configuration parameters. +// Whatever can be assessed a priori before attempting any programming. +func (c *networkConfiguration) Validate() error { + if c.Mtu < 0 { + return ErrInvalidMtu(c.Mtu) + } + + // If bridge v4 subnet is specified + if c.AddressIPv4 != nil { + // If default gw is specified, it must be part of bridge subnet + if c.DefaultGatewayIPv4 != nil { + if !c.AddressIPv4.Contains(c.DefaultGatewayIPv4) { + return &ErrInvalidGateway{} + } + } + } + + // If default v6 gw is specified, AddressIPv6 must be specified and gw must belong to AddressIPv6 subnet + if c.EnableIPv6 && c.DefaultGatewayIPv6 != nil { + if c.AddressIPv6 == nil || !c.AddressIPv6.Contains(c.DefaultGatewayIPv6) { + return &ErrInvalidGateway{} + } + } + return nil +} + +// Conflicts check if two NetworkConfiguration objects overlap +func (c *networkConfiguration) Conflicts(o *networkConfiguration) error { + if o == nil { + return errors.New("same configuration") + } + + // Also empty, because only one network with empty name is allowed + if c.BridgeName == o.BridgeName { + return errors.New("networks have same bridge name") + } + + // They must be in different subnets + if (c.AddressIPv4 != nil && o.AddressIPv4 != nil) && + (c.AddressIPv4.Contains(o.AddressIPv4.IP) || o.AddressIPv4.Contains(c.AddressIPv4.IP)) { + return errors.New("networks have overlapping IPv4") + } + + // They must be in different v6 subnets + if (c.AddressIPv6 != nil && o.AddressIPv6 != nil) && + (c.AddressIPv6.Contains(o.AddressIPv6.IP) || o.AddressIPv6.Contains(c.AddressIPv6.IP)) { + return errors.New("networks have overlapping IPv6") + } + + return nil +} + +func (c *networkConfiguration) fromLabels(labels map[string]string) error { + var err error + for label, value := range labels { + switch label { + case BridgeName: + c.BridgeName = value + case netlabel.DriverMTU: + if c.Mtu, err = strconv.Atoi(value); err != nil { + return parseErr(label, value, err.Error()) + } + case netlabel.EnableIPv6: + if c.EnableIPv6, err = strconv.ParseBool(value); err != nil { + return parseErr(label, value, err.Error()) + } + case EnableIPMasquerade: + if c.EnableIPMasquerade, err = strconv.ParseBool(value); err != nil { + return parseErr(label, value, err.Error()) + } + case EnableICC: + if c.EnableICC, err = strconv.ParseBool(value); err != nil { + return parseErr(label, value, err.Error()) + } + case DefaultBridge: + if c.DefaultBridge, err = strconv.ParseBool(value); err != nil { + return parseErr(label, value, err.Error()) + } + case DefaultBindingIP: + if c.DefaultBindingIP = net.ParseIP(value); c.DefaultBindingIP == nil { + return parseErr(label, value, "nil ip") + } + case netlabel.ContainerIfacePrefix: + c.ContainerIfacePrefix = value + } + } + + return nil +} + +func parseErr(label, value, errString string) error { + return types.BadRequestErrorf("failed to parse %s value: %v (%s)", label, value, errString) +} + +func (n *bridgeNetwork) registerIptCleanFunc(clean iptableCleanFunc) { + n.iptCleanFuncs = append(n.iptCleanFuncs, clean) +} + +func (n *bridgeNetwork) getDriverChains() (*iptables.ChainInfo, *iptables.ChainInfo, *iptables.ChainInfo, *iptables.ChainInfo, error) { + n.Lock() + defer n.Unlock() + + if n.driver == nil { + return nil, nil, nil, nil, types.BadRequestErrorf("no driver found") + } + + return n.driver.natChain, n.driver.filterChain, n.driver.isolationChain1, n.driver.isolationChain2, nil +} + +func (n *bridgeNetwork) getNetworkBridgeName() string { + n.Lock() + config := n.config + n.Unlock() + + return config.BridgeName +} + +func (n *bridgeNetwork) getEndpoint(eid string) (*bridgeEndpoint, error) { + n.Lock() + defer n.Unlock() + + if eid == "" { + return nil, InvalidEndpointIDError(eid) + } + + if ep, ok := n.endpoints[eid]; ok { + return ep, nil + } + + return nil, nil +} + +// Install/Removes the iptables rules needed to isolate this network +// from each of the other networks +func (n *bridgeNetwork) isolateNetwork(others []*bridgeNetwork, enable bool) error { + n.Lock() + thisConfig := n.config + n.Unlock() + + if thisConfig.Internal { + return nil + } + + // Install the rules to isolate this network against each of the other networks + return setINC(thisConfig.BridgeName, enable) +} + +func (d *driver) configure(option map[string]interface{}) error { + var ( + config *configuration + err error + natChain *iptables.ChainInfo + filterChain *iptables.ChainInfo + isolationChain1 *iptables.ChainInfo + isolationChain2 *iptables.ChainInfo + ) + + genericData, ok := option[netlabel.GenericData] + if !ok || genericData == nil { + return nil + } + + switch opt := genericData.(type) { + case options.Generic: + opaqueConfig, err := options.GenerateFromModel(opt, &configuration{}) + if err != nil { + return err + } + config = opaqueConfig.(*configuration) + case *configuration: + config = opt + default: + return &ErrInvalidDriverConfig{} + } + + if config.EnableIPTables { + if _, err := os.Stat("/proc/sys/net/bridge"); err != nil { + if out, err := exec.Command("modprobe", "-va", "bridge", "br_netfilter").CombinedOutput(); err != nil { + logrus.Warnf("Running modprobe bridge br_netfilter failed with message: %s, error: %v", out, err) + } + } + removeIPChains() + natChain, filterChain, isolationChain1, isolationChain2, err = setupIPChains(config) + if err != nil { + return err + } + // Make sure on firewall reload, first thing being re-played is chains creation + iptables.OnReloaded(func() { logrus.Debugf("Recreating iptables chains on firewall reload"); setupIPChains(config) }) + } + + if config.EnableIPForwarding { + err = setupIPForwarding(config.EnableIPTables) + if err != nil { + logrus.Warn(err) + return err + } + } + + d.Lock() + d.natChain = natChain + d.filterChain = filterChain + d.isolationChain1 = isolationChain1 + d.isolationChain2 = isolationChain2 + d.config = config + d.Unlock() + + err = d.initStore(option) + if err != nil { + return err + } + + return nil +} + +func (d *driver) getNetwork(id string) (*bridgeNetwork, error) { + d.Lock() + defer d.Unlock() + + if id == "" { + return nil, types.BadRequestErrorf("invalid network id: %s", id) + } + + if nw, ok := d.networks[id]; ok { + return nw, nil + } + + return nil, types.NotFoundErrorf("network not found: %s", id) +} + +func parseNetworkGenericOptions(data interface{}) (*networkConfiguration, error) { + var ( + err error + config *networkConfiguration + ) + + switch opt := data.(type) { + case *networkConfiguration: + config = opt + case map[string]string: + config = &networkConfiguration{ + EnableICC: true, + EnableIPMasquerade: true, + } + err = config.fromLabels(opt) + case options.Generic: + var opaqueConfig interface{} + if opaqueConfig, err = options.GenerateFromModel(opt, config); err == nil { + config = opaqueConfig.(*networkConfiguration) + } + default: + err = types.BadRequestErrorf("do not recognize network configuration format: %T", opt) + } + + return config, err +} + +func (c *networkConfiguration) processIPAM(id string, ipamV4Data, ipamV6Data []driverapi.IPAMData) error { + if len(ipamV4Data) > 1 || len(ipamV6Data) > 1 { + return types.ForbiddenErrorf("bridge driver doesn't support multiple subnets") + } + + if len(ipamV4Data) == 0 { + return types.BadRequestErrorf("bridge network %s requires ipv4 configuration", id) + } + + if ipamV4Data[0].Gateway != nil { + c.AddressIPv4 = types.GetIPNetCopy(ipamV4Data[0].Gateway) + } + + if gw, ok := ipamV4Data[0].AuxAddresses[DefaultGatewayV4AuxKey]; ok { + c.DefaultGatewayIPv4 = gw.IP + } + + if len(ipamV6Data) > 0 { + c.AddressIPv6 = ipamV6Data[0].Pool + + if ipamV6Data[0].Gateway != nil { + c.AddressIPv6 = types.GetIPNetCopy(ipamV6Data[0].Gateway) + } + + if gw, ok := ipamV6Data[0].AuxAddresses[DefaultGatewayV6AuxKey]; ok { + c.DefaultGatewayIPv6 = gw.IP + } + } + + return nil +} + +func parseNetworkOptions(id string, option options.Generic) (*networkConfiguration, error) { + var ( + err error + config = &networkConfiguration{} + ) + + // Parse generic label first, config will be re-assigned + if genData, ok := option[netlabel.GenericData]; ok && genData != nil { + if config, err = parseNetworkGenericOptions(genData); err != nil { + return nil, err + } + } + + // Process well-known labels next + if val, ok := option[netlabel.EnableIPv6]; ok { + config.EnableIPv6 = val.(bool) + } + + if val, ok := option[netlabel.Internal]; ok { + if internal, ok := val.(bool); ok && internal { + config.Internal = true + } + } + + // Finally validate the configuration + if err = config.Validate(); err != nil { + return nil, err + } + + if config.BridgeName == "" && config.DefaultBridge == false { + config.BridgeName = "br-" + id[:12] + } + + exists, err := bridgeInterfaceExists(config.BridgeName) + if err != nil { + return nil, err + } + + if !exists { + config.BridgeIfaceCreator = ifaceCreatedByLibnetwork + } else { + config.BridgeIfaceCreator = ifaceCreatedByUser + } + + config.ID = id + return config, nil +} + +// Returns the non link-local IPv6 subnet for the containers attached to this bridge if found, nil otherwise +func getV6Network(config *networkConfiguration, i *bridgeInterface) *net.IPNet { + if config.AddressIPv6 != nil { + return config.AddressIPv6 + } + if i.bridgeIPv6 != nil && i.bridgeIPv6.IP != nil && !i.bridgeIPv6.IP.IsLinkLocalUnicast() { + return i.bridgeIPv6 + } + + return nil +} + +// Return a slice of networks over which caller can iterate safely +func (d *driver) getNetworks() []*bridgeNetwork { + d.Lock() + defer d.Unlock() + + ls := make([]*bridgeNetwork, 0, len(d.networks)) + for _, nw := range d.networks { + ls = append(ls, nw) + } + return ls +} + +func (d *driver) NetworkAllocate(id string, option map[string]string, ipV4Data, ipV6Data []driverapi.IPAMData) (map[string]string, error) { + return nil, types.NotImplementedErrorf("not implemented") +} + +func (d *driver) NetworkFree(id string) error { + return types.NotImplementedErrorf("not implemented") +} + +func (d *driver) EventNotify(etype driverapi.EventType, nid, tableName, key string, value []byte) { +} + +func (d *driver) DecodeTableEntry(tablename string, key string, value []byte) (string, map[string]string) { + return "", nil +} + +// Create a new network using bridge plugin +func (d *driver) CreateNetwork(id string, option map[string]interface{}, nInfo driverapi.NetworkInfo, ipV4Data, ipV6Data []driverapi.IPAMData) error { + if len(ipV4Data) == 0 || ipV4Data[0].Pool.String() == "0.0.0.0/0" { + return types.BadRequestErrorf("ipv4 pool is empty") + } + // Sanity checks + d.Lock() + if _, ok := d.networks[id]; ok { + d.Unlock() + return types.ForbiddenErrorf("network %s exists", id) + } + d.Unlock() + + // Parse and validate the config. It should not be conflict with existing networks' config + config, err := parseNetworkOptions(id, option) + if err != nil { + return err + } + + if err = config.processIPAM(id, ipV4Data, ipV6Data); err != nil { + return err + } + + // start the critical section, from this point onward we are dealing with the list of networks + // so to be consistent we cannot allow that the list changes + d.configNetwork.Lock() + defer d.configNetwork.Unlock() + + // check network conflicts + if err = d.checkConflict(config); err != nil { + nerr, ok := err.(defaultBridgeNetworkConflict) + if !ok { + return err + } + // Got a conflict with a stale default network, clean that up and continue + logrus.Warn(nerr) + d.deleteNetwork(nerr.ID) + } + + // there is no conflict, now create the network + if err = d.createNetwork(config); err != nil { + return err + } + + return d.storeUpdate(config) +} + +func (d *driver) checkConflict(config *networkConfiguration) error { + networkList := d.getNetworks() + for _, nw := range networkList { + nw.Lock() + nwConfig := nw.config + nw.Unlock() + if err := nwConfig.Conflicts(config); err != nil { + if config.DefaultBridge { + // We encountered and identified a stale default network + // We must delete it as libnetwork is the source of truth + // The default network being created must be the only one + // This can happen only from docker 1.12 on ward + logrus.Infof("Found stale default bridge network %s (%s)", nwConfig.ID, nwConfig.BridgeName) + return defaultBridgeNetworkConflict{nwConfig.ID} + } + + return types.ForbiddenErrorf("cannot create network %s (%s): conflicts with network %s (%s): %s", + config.ID, config.BridgeName, nwConfig.ID, nwConfig.BridgeName, err.Error()) + } + } + return nil +} + +func (d *driver) createNetwork(config *networkConfiguration) error { + var err error + + defer osl.InitOSContext()() + + networkList := d.getNetworks() + + // Initialize handle when needed + d.Lock() + if d.nlh == nil { + d.nlh = ns.NlHandle() + } + d.Unlock() + + // Create or retrieve the bridge L3 interface + bridgeIface, err := newInterface(d.nlh, config) + if err != nil { + return err + } + + // Create and set network handler in driver + network := &bridgeNetwork{ + id: config.ID, + endpoints: make(map[string]*bridgeEndpoint), + config: config, + portMapper: portmapper.New(d.config.UserlandProxyPath), + bridge: bridgeIface, + driver: d, + } + + d.Lock() + d.networks[config.ID] = network + d.Unlock() + + // On failure make sure to reset driver network handler to nil + defer func() { + if err != nil { + d.Lock() + delete(d.networks, config.ID) + d.Unlock() + } + }() + + // Add inter-network communication rules. + setupNetworkIsolationRules := func(config *networkConfiguration, i *bridgeInterface) error { + if err := network.isolateNetwork(networkList, true); err != nil { + if err = network.isolateNetwork(networkList, false); err != nil { + logrus.Warnf("Failed on removing the inter-network iptables rules on cleanup: %v", err) + } + return err + } + // register the cleanup function + network.registerIptCleanFunc(func() error { + nwList := d.getNetworks() + return network.isolateNetwork(nwList, false) + }) + return nil + } + + // Prepare the bridge setup configuration + bridgeSetup := newBridgeSetup(config, bridgeIface) + + // If the bridge interface doesn't exist, we need to start the setup steps + // by creating a new device and assigning it an IPv4 address. + bridgeAlreadyExists := bridgeIface.exists() + if !bridgeAlreadyExists { + bridgeSetup.queueStep(setupDevice) + } + + // Even if a bridge exists try to setup IPv4. + bridgeSetup.queueStep(setupBridgeIPv4) + + enableIPv6Forwarding := d.config.EnableIPForwarding && config.AddressIPv6 != nil + + // Conditionally queue setup steps depending on configuration values. + for _, step := range []struct { + Condition bool + Fn setupStep + }{ + // Enable IPv6 on the bridge if required. We do this even for a + // previously existing bridge, as it may be here from a previous + // installation where IPv6 wasn't supported yet and needs to be + // assigned an IPv6 link-local address. + {config.EnableIPv6, setupBridgeIPv6}, + + // We ensure that the bridge has the expectedIPv4 and IPv6 addresses in + // the case of a previously existing device. + {bridgeAlreadyExists, setupVerifyAndReconcile}, + + // Enable IPv6 Forwarding + {enableIPv6Forwarding, setupIPv6Forwarding}, + + // Setup Loopback Adresses Routing + {!d.config.EnableUserlandProxy, setupLoopbackAdressesRouting}, + + // Setup IPTables. + {d.config.EnableIPTables, network.setupIPTables}, + + //We want to track firewalld configuration so that + //if it is started/reloaded, the rules can be applied correctly + {d.config.EnableIPTables, network.setupFirewalld}, + + // Setup DefaultGatewayIPv4 + {config.DefaultGatewayIPv4 != nil, setupGatewayIPv4}, + + // Setup DefaultGatewayIPv6 + {config.DefaultGatewayIPv6 != nil, setupGatewayIPv6}, + + // Add inter-network communication rules. + {d.config.EnableIPTables, setupNetworkIsolationRules}, + + //Configure bridge networking filtering if ICC is off and IP tables are enabled + {!config.EnableICC && d.config.EnableIPTables, setupBridgeNetFiltering}, + } { + if step.Condition { + bridgeSetup.queueStep(step.Fn) + } + } + + // Apply the prepared list of steps, and abort at the first error. + bridgeSetup.queueStep(setupDeviceUp) + return bridgeSetup.apply() +} + +func (d *driver) DeleteNetwork(nid string) error { + + d.configNetwork.Lock() + defer d.configNetwork.Unlock() + + return d.deleteNetwork(nid) +} + +func (d *driver) deleteNetwork(nid string) error { + var err error + + defer osl.InitOSContext()() + // Get network handler and remove it from driver + d.Lock() + n, ok := d.networks[nid] + d.Unlock() + + if !ok { + return types.InternalMaskableErrorf("network %s does not exist", nid) + } + + n.Lock() + config := n.config + n.Unlock() + + // delele endpoints belong to this network + for _, ep := range n.endpoints { + if err := n.releasePorts(ep); err != nil { + logrus.Warn(err) + } + if link, err := d.nlh.LinkByName(ep.srcName); err == nil { + if err := d.nlh.LinkDel(link); err != nil { + logrus.WithError(err).Errorf("Failed to delete interface (%s)'s link on endpoint (%s) delete", ep.srcName, ep.id) + } + } + + if err := d.storeDelete(ep); err != nil { + logrus.Warnf("Failed to remove bridge endpoint %s from store: %v", ep.id[0:7], err) + } + } + + d.Lock() + delete(d.networks, nid) + d.Unlock() + + // On failure set network handler back in driver, but + // only if is not already taken over by some other thread + defer func() { + if err != nil { + d.Lock() + if _, ok := d.networks[nid]; !ok { + d.networks[nid] = n + } + d.Unlock() + } + }() + + switch config.BridgeIfaceCreator { + case ifaceCreatedByLibnetwork, ifaceCreatorUnknown: + // We only delete the bridge if it was created by the bridge driver and + // it is not the default one (to keep the backward compatible behavior.) + if !config.DefaultBridge { + if err := d.nlh.LinkDel(n.bridge.Link); err != nil { + logrus.Warnf("Failed to remove bridge interface %s on network %s delete: %v", config.BridgeName, nid, err) + } + } + case ifaceCreatedByUser: + // Don't delete the bridge interface if it was not created by libnetwork. + } + + // clean all relevant iptables rules + for _, cleanFunc := range n.iptCleanFuncs { + if errClean := cleanFunc(); errClean != nil { + logrus.Warnf("Failed to clean iptables rules for bridge network: %v", errClean) + } + } + return d.storeDelete(config) +} + +func addToBridge(nlh *netlink.Handle, ifaceName, bridgeName string) error { + link, err := nlh.LinkByName(ifaceName) + if err != nil { + return fmt.Errorf("could not find interface %s: %v", ifaceName, err) + } + if err = nlh.LinkSetMaster(link, + &netlink.Bridge{LinkAttrs: netlink.LinkAttrs{Name: bridgeName}}); err != nil { + logrus.Debugf("Failed to add %s to bridge via netlink.Trying ioctl: %v", ifaceName, err) + iface, err := net.InterfaceByName(ifaceName) + if err != nil { + return fmt.Errorf("could not find network interface %s: %v", ifaceName, err) + } + + master, err := net.InterfaceByName(bridgeName) + if err != nil { + return fmt.Errorf("could not find bridge %s: %v", bridgeName, err) + } + + return ioctlAddToBridge(iface, master) + } + return nil +} + +func setHairpinMode(nlh *netlink.Handle, link netlink.Link, enable bool) error { + err := nlh.LinkSetHairpin(link, enable) + if err != nil && err != syscall.EINVAL { + // If error is not EINVAL something else went wrong, bail out right away + return fmt.Errorf("unable to set hairpin mode on %s via netlink: %v", + link.Attrs().Name, err) + } + + // Hairpin mode successfully set up + if err == nil { + return nil + } + + // The netlink method failed with EINVAL which is probably because of an older + // kernel. Try one more time via the sysfs method. + path := filepath.Join("/sys/class/net", link.Attrs().Name, "brport/hairpin_mode") + + var val []byte + if enable { + val = []byte{'1', '\n'} + } else { + val = []byte{'0', '\n'} + } + + if err := ioutil.WriteFile(path, val, 0644); err != nil { + return fmt.Errorf("unable to set hairpin mode on %s via sysfs: %v", link.Attrs().Name, err) + } + + return nil +} + +func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo, epOptions map[string]interface{}) error { + defer osl.InitOSContext()() + + if ifInfo == nil { + return errors.New("invalid interface info passed") + } + + // Get the network handler and make sure it exists + d.Lock() + n, ok := d.networks[nid] + dconfig := d.config + d.Unlock() + + if !ok { + return types.NotFoundErrorf("network %s does not exist", nid) + } + if n == nil { + return driverapi.ErrNoNetwork(nid) + } + + // Sanity check + n.Lock() + if n.id != nid { + n.Unlock() + return InvalidNetworkIDError(nid) + } + n.Unlock() + + // Check if endpoint id is good and retrieve correspondent endpoint + ep, err := n.getEndpoint(eid) + if err != nil { + return err + } + + // Endpoint with that id exists either on desired or other sandbox + if ep != nil { + return driverapi.ErrEndpointExists(eid) + } + + // Try to convert the options to endpoint configuration + epConfig, err := parseEndpointOptions(epOptions) + if err != nil { + return err + } + + // Create and add the endpoint + n.Lock() + endpoint := &bridgeEndpoint{id: eid, nid: nid, config: epConfig} + n.endpoints[eid] = endpoint + n.Unlock() + + // On failure make sure to remove the endpoint + defer func() { + if err != nil { + n.Lock() + delete(n.endpoints, eid) + n.Unlock() + } + }() + + // Generate a name for what will be the host side pipe interface + hostIfName, err := netutils.GenerateIfaceName(d.nlh, vethPrefix, vethLen) + if err != nil { + return err + } + + // Generate a name for what will be the sandbox side pipe interface + containerIfName, err := netutils.GenerateIfaceName(d.nlh, vethPrefix, vethLen) + if err != nil { + return err + } + + // Generate and add the interface pipe host <-> sandbox + veth := &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{Name: hostIfName, TxQLen: 0}, + PeerName: containerIfName} + if err = d.nlh.LinkAdd(veth); err != nil { + return types.InternalErrorf("failed to add the host (%s) <=> sandbox (%s) pair interfaces: %v", hostIfName, containerIfName, err) + } + + // Get the host side pipe interface handler + host, err := d.nlh.LinkByName(hostIfName) + if err != nil { + return types.InternalErrorf("failed to find host side interface %s: %v", hostIfName, err) + } + defer func() { + if err != nil { + if err := d.nlh.LinkDel(host); err != nil { + logrus.WithError(err).Warnf("Failed to delete host side interface (%s)'s link", hostIfName) + } + } + }() + + // Get the sandbox side pipe interface handler + sbox, err := d.nlh.LinkByName(containerIfName) + if err != nil { + return types.InternalErrorf("failed to find sandbox side interface %s: %v", containerIfName, err) + } + defer func() { + if err != nil { + if err := d.nlh.LinkDel(sbox); err != nil { + logrus.WithError(err).Warnf("Failed to delete sandbox side interface (%s)'s link", containerIfName) + } + } + }() + + n.Lock() + config := n.config + n.Unlock() + + // Add bridge inherited attributes to pipe interfaces + if config.Mtu != 0 { + err = d.nlh.LinkSetMTU(host, config.Mtu) + if err != nil { + return types.InternalErrorf("failed to set MTU on host interface %s: %v", hostIfName, err) + } + err = d.nlh.LinkSetMTU(sbox, config.Mtu) + if err != nil { + return types.InternalErrorf("failed to set MTU on sandbox interface %s: %v", containerIfName, err) + } + } + + // Attach host side pipe interface into the bridge + if err = addToBridge(d.nlh, hostIfName, config.BridgeName); err != nil { + return fmt.Errorf("adding interface %s to bridge %s failed: %v", hostIfName, config.BridgeName, err) + } + + if !dconfig.EnableUserlandProxy { + err = setHairpinMode(d.nlh, host, true) + if err != nil { + return err + } + } + + // Store the sandbox side pipe interface parameters + endpoint.srcName = containerIfName + endpoint.macAddress = ifInfo.MacAddress() + endpoint.addr = ifInfo.Address() + endpoint.addrv6 = ifInfo.AddressIPv6() + + // Set the sbox's MAC if not provided. If specified, use the one configured by user, otherwise generate one based on IP. + if endpoint.macAddress == nil { + endpoint.macAddress = electMacAddress(epConfig, endpoint.addr.IP) + if err = ifInfo.SetMacAddress(endpoint.macAddress); err != nil { + return err + } + } + + // Up the host interface after finishing all netlink configuration + if err = d.nlh.LinkSetUp(host); err != nil { + return fmt.Errorf("could not set link up for host interface %s: %v", hostIfName, err) + } + + if endpoint.addrv6 == nil && config.EnableIPv6 { + var ip6 net.IP + network := n.bridge.bridgeIPv6 + if config.AddressIPv6 != nil { + network = config.AddressIPv6 + } + + ones, _ := network.Mask.Size() + if ones > 80 { + err = types.ForbiddenErrorf("Cannot self generate an IPv6 address on network %v: At least 48 host bits are needed.", network) + return err + } + + ip6 = make(net.IP, len(network.IP)) + copy(ip6, network.IP) + for i, h := range endpoint.macAddress { + ip6[i+10] = h + } + + endpoint.addrv6 = &net.IPNet{IP: ip6, Mask: network.Mask} + if err = ifInfo.SetIPAddress(endpoint.addrv6); err != nil { + return err + } + } + + if err = d.storeUpdate(endpoint); err != nil { + return fmt.Errorf("failed to save bridge endpoint %s to store: %v", endpoint.id[0:7], err) + } + + return nil +} + +func (d *driver) DeleteEndpoint(nid, eid string) error { + var err error + + defer osl.InitOSContext()() + + // Get the network handler and make sure it exists + d.Lock() + n, ok := d.networks[nid] + d.Unlock() + + if !ok { + return types.InternalMaskableErrorf("network %s does not exist", nid) + } + if n == nil { + return driverapi.ErrNoNetwork(nid) + } + + // Sanity Check + n.Lock() + if n.id != nid { + n.Unlock() + return InvalidNetworkIDError(nid) + } + n.Unlock() + + // Check endpoint id and if an endpoint is actually there + ep, err := n.getEndpoint(eid) + if err != nil { + return err + } + if ep == nil { + return EndpointNotFoundError(eid) + } + + // Remove it + n.Lock() + delete(n.endpoints, eid) + n.Unlock() + + // On failure make sure to set back ep in n.endpoints, but only + // if it hasn't been taken over already by some other thread. + defer func() { + if err != nil { + n.Lock() + if _, ok := n.endpoints[eid]; !ok { + n.endpoints[eid] = ep + } + n.Unlock() + } + }() + + // Try removal of link. Discard error: it is a best effort. + // Also make sure defer does not see this error either. + if link, err := d.nlh.LinkByName(ep.srcName); err == nil { + if err := d.nlh.LinkDel(link); err != nil { + logrus.WithError(err).Errorf("Failed to delete interface (%s)'s link on endpoint (%s) delete", ep.srcName, ep.id) + } + } + + if err := d.storeDelete(ep); err != nil { + logrus.Warnf("Failed to remove bridge endpoint %s from store: %v", ep.id[0:7], err) + } + + return nil +} + +func (d *driver) EndpointOperInfo(nid, eid string) (map[string]interface{}, error) { + // Get the network handler and make sure it exists + d.Lock() + n, ok := d.networks[nid] + d.Unlock() + if !ok { + return nil, types.NotFoundErrorf("network %s does not exist", nid) + } + if n == nil { + return nil, driverapi.ErrNoNetwork(nid) + } + + // Sanity check + n.Lock() + if n.id != nid { + n.Unlock() + return nil, InvalidNetworkIDError(nid) + } + n.Unlock() + + // Check if endpoint id is good and retrieve correspondent endpoint + ep, err := n.getEndpoint(eid) + if err != nil { + return nil, err + } + if ep == nil { + return nil, driverapi.ErrNoEndpoint(eid) + } + + m := make(map[string]interface{}) + + if ep.extConnConfig != nil && ep.extConnConfig.ExposedPorts != nil { + // Return a copy of the config data + epc := make([]types.TransportPort, 0, len(ep.extConnConfig.ExposedPorts)) + for _, tp := range ep.extConnConfig.ExposedPorts { + epc = append(epc, tp.GetCopy()) + } + m[netlabel.ExposedPorts] = epc + } + + if ep.portMapping != nil { + // Return a copy of the operational data + pmc := make([]types.PortBinding, 0, len(ep.portMapping)) + for _, pm := range ep.portMapping { + pmc = append(pmc, pm.GetCopy()) + } + m[netlabel.PortMap] = pmc + } + + if len(ep.macAddress) != 0 { + m[netlabel.MacAddress] = ep.macAddress + } + + return m, nil +} + +// Join method is invoked when a Sandbox is attached to an endpoint. +func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo, options map[string]interface{}) error { + defer osl.InitOSContext()() + + network, err := d.getNetwork(nid) + if err != nil { + return err + } + + endpoint, err := network.getEndpoint(eid) + if err != nil { + return err + } + + if endpoint == nil { + return EndpointNotFoundError(eid) + } + + endpoint.containerConfig, err = parseContainerOptions(options) + if err != nil { + return err + } + + iNames := jinfo.InterfaceName() + containerVethPrefix := defaultContainerVethPrefix + if network.config.ContainerIfacePrefix != "" { + containerVethPrefix = network.config.ContainerIfacePrefix + } + err = iNames.SetNames(endpoint.srcName, containerVethPrefix) + if err != nil { + return err + } + + err = jinfo.SetGateway(network.bridge.gatewayIPv4) + if err != nil { + return err + } + + err = jinfo.SetGatewayIPv6(network.bridge.gatewayIPv6) + if err != nil { + return err + } + + return nil +} + +// Leave method is invoked when a Sandbox detaches from an endpoint. +func (d *driver) Leave(nid, eid string) error { + defer osl.InitOSContext()() + + network, err := d.getNetwork(nid) + if err != nil { + return types.InternalMaskableErrorf("%s", err) + } + + endpoint, err := network.getEndpoint(eid) + if err != nil { + return err + } + + if endpoint == nil { + return EndpointNotFoundError(eid) + } + + if !network.config.EnableICC { + if err = d.link(network, endpoint, false); err != nil { + return err + } + } + + return nil +} + +func (d *driver) ProgramExternalConnectivity(nid, eid string, options map[string]interface{}) error { + defer osl.InitOSContext()() + + network, err := d.getNetwork(nid) + if err != nil { + return err + } + + endpoint, err := network.getEndpoint(eid) + if err != nil { + return err + } + + if endpoint == nil { + return EndpointNotFoundError(eid) + } + + endpoint.extConnConfig, err = parseConnectivityOptions(options) + if err != nil { + return err + } + + // Program any required port mapping and store them in the endpoint + endpoint.portMapping, err = network.allocatePorts(endpoint, network.config.DefaultBindingIP, d.config.EnableUserlandProxy) + if err != nil { + return err + } + + defer func() { + if err != nil { + if e := network.releasePorts(endpoint); e != nil { + logrus.Errorf("Failed to release ports allocated for the bridge endpoint %s on failure %v because of %v", + eid, err, e) + } + endpoint.portMapping = nil + } + }() + + if err = d.storeUpdate(endpoint); err != nil { + return fmt.Errorf("failed to update bridge endpoint %s to store: %v", endpoint.id[0:7], err) + } + + if !network.config.EnableICC { + return d.link(network, endpoint, true) + } + + return nil +} + +func (d *driver) RevokeExternalConnectivity(nid, eid string) error { + defer osl.InitOSContext()() + + network, err := d.getNetwork(nid) + if err != nil { + return err + } + + endpoint, err := network.getEndpoint(eid) + if err != nil { + return err + } + + if endpoint == nil { + return EndpointNotFoundError(eid) + } + + err = network.releasePorts(endpoint) + if err != nil { + logrus.Warn(err) + } + + endpoint.portMapping = nil + + // Clean the connection tracker state of the host for the specific endpoint + // The host kernel keeps track of the connections (TCP and UDP), so if a new endpoint gets the same IP of + // this one (that is going down), is possible that some of the packets would not be routed correctly inside + // the new endpoint + // Deeper details: https://github.com/docker/docker/issues/8795 + clearEndpointConnections(d.nlh, endpoint) + + if err = d.storeUpdate(endpoint); err != nil { + return fmt.Errorf("failed to update bridge endpoint %s to store: %v", endpoint.id[0:7], err) + } + + return nil +} + +func (d *driver) link(network *bridgeNetwork, endpoint *bridgeEndpoint, enable bool) error { + var err error + + cc := endpoint.containerConfig + if cc == nil { + return nil + } + ec := endpoint.extConnConfig + if ec == nil { + return nil + } + + if ec.ExposedPorts != nil { + for _, p := range cc.ParentEndpoints { + var parentEndpoint *bridgeEndpoint + parentEndpoint, err = network.getEndpoint(p) + if err != nil { + return err + } + if parentEndpoint == nil { + err = InvalidEndpointIDError(p) + return err + } + + l := newLink(parentEndpoint.addr.IP.String(), + endpoint.addr.IP.String(), + ec.ExposedPorts, network.config.BridgeName) + if enable { + err = l.Enable() + if err != nil { + return err + } + defer func() { + if err != nil { + l.Disable() + } + }() + } else { + l.Disable() + } + } + } + + for _, c := range cc.ChildEndpoints { + var childEndpoint *bridgeEndpoint + childEndpoint, err = network.getEndpoint(c) + if err != nil { + return err + } + if childEndpoint == nil { + err = InvalidEndpointIDError(c) + return err + } + if childEndpoint.extConnConfig == nil || childEndpoint.extConnConfig.ExposedPorts == nil { + continue + } + + l := newLink(endpoint.addr.IP.String(), + childEndpoint.addr.IP.String(), + childEndpoint.extConnConfig.ExposedPorts, network.config.BridgeName) + if enable { + err = l.Enable() + if err != nil { + return err + } + defer func() { + if err != nil { + l.Disable() + } + }() + } else { + l.Disable() + } + } + + return nil +} + +func (d *driver) Type() string { + return networkType +} + +func (d *driver) IsBuiltIn() bool { + return true +} + +// DiscoverNew is a notification for a new discovery event, such as a new node joining a cluster +func (d *driver) DiscoverNew(dType discoverapi.DiscoveryType, data interface{}) error { + return nil +} + +// DiscoverDelete is a notification for a discovery delete event, such as a node leaving a cluster +func (d *driver) DiscoverDelete(dType discoverapi.DiscoveryType, data interface{}) error { + return nil +} + +func parseEndpointOptions(epOptions map[string]interface{}) (*endpointConfiguration, error) { + if epOptions == nil { + return nil, nil + } + + ec := &endpointConfiguration{} + + if opt, ok := epOptions[netlabel.MacAddress]; ok { + if mac, ok := opt.(net.HardwareAddr); ok { + ec.MacAddress = mac + } else { + return nil, &ErrInvalidEndpointConfig{} + } + } + + return ec, nil +} + +func parseContainerOptions(cOptions map[string]interface{}) (*containerConfiguration, error) { + if cOptions == nil { + return nil, nil + } + genericData := cOptions[netlabel.GenericData] + if genericData == nil { + return nil, nil + } + switch opt := genericData.(type) { + case options.Generic: + opaqueConfig, err := options.GenerateFromModel(opt, &containerConfiguration{}) + if err != nil { + return nil, err + } + return opaqueConfig.(*containerConfiguration), nil + case *containerConfiguration: + return opt, nil + default: + return nil, nil + } +} + +func parseConnectivityOptions(cOptions map[string]interface{}) (*connectivityConfiguration, error) { + if cOptions == nil { + return nil, nil + } + + cc := &connectivityConfiguration{} + + if opt, ok := cOptions[netlabel.PortMap]; ok { + if pb, ok := opt.([]types.PortBinding); ok { + cc.PortBindings = pb + } else { + return nil, types.BadRequestErrorf("Invalid port mapping data in connectivity configuration: %v", opt) + } + } + + if opt, ok := cOptions[netlabel.ExposedPorts]; ok { + if ports, ok := opt.([]types.TransportPort); ok { + cc.ExposedPorts = ports + } else { + return nil, types.BadRequestErrorf("Invalid exposed ports data in connectivity configuration: %v", opt) + } + } + + return cc, nil +} + +func electMacAddress(epConfig *endpointConfiguration, ip net.IP) net.HardwareAddr { + if epConfig != nil && epConfig.MacAddress != nil { + return epConfig.MacAddress + } + return netutils.GenerateMACFromIP(ip) +} diff --git a/vendor/github.com/docker/libnetwork/drivers/bridge/bridge_store.go b/vendor/github.com/docker/libnetwork/drivers/bridge/bridge_store.go new file mode 100644 index 0000000000..b0e4ff02b5 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/bridge/bridge_store.go @@ -0,0 +1,388 @@ +package bridge + +import ( + "encoding/json" + "fmt" + "net" + + "github.com/docker/libnetwork/datastore" + "github.com/docker/libnetwork/discoverapi" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" +) + +const ( + // network config prefix was not specific enough. + // To be backward compatible, need custom endpoint + // prefix with different root + bridgePrefix = "bridge" + bridgeEndpointPrefix = "bridge-endpoint" +) + +func (d *driver) initStore(option map[string]interface{}) error { + if data, ok := option[netlabel.LocalKVClient]; ok { + var err error + dsc, ok := data.(discoverapi.DatastoreConfigData) + if !ok { + return types.InternalErrorf("incorrect data in datastore configuration: %v", data) + } + d.store, err = datastore.NewDataStoreFromConfig(dsc) + if err != nil { + return types.InternalErrorf("bridge driver failed to initialize data store: %v", err) + } + + err = d.populateNetworks() + if err != nil { + return err + } + + err = d.populateEndpoints() + if err != nil { + return err + } + } + + return nil +} + +func (d *driver) populateNetworks() error { + kvol, err := d.store.List(datastore.Key(bridgePrefix), &networkConfiguration{}) + if err != nil && err != datastore.ErrKeyNotFound { + return fmt.Errorf("failed to get bridge network configurations from store: %v", err) + } + + // It's normal for network configuration state to be empty. Just return. + if err == datastore.ErrKeyNotFound { + return nil + } + + for _, kvo := range kvol { + ncfg := kvo.(*networkConfiguration) + if err = d.createNetwork(ncfg); err != nil { + logrus.Warnf("could not create bridge network for id %s bridge name %s while booting up from persistent state: %v", ncfg.ID, ncfg.BridgeName, err) + } + logrus.Debugf("Network (%s) restored", ncfg.ID[0:7]) + } + + return nil +} + +func (d *driver) populateEndpoints() error { + kvol, err := d.store.List(datastore.Key(bridgeEndpointPrefix), &bridgeEndpoint{}) + if err != nil && err != datastore.ErrKeyNotFound { + return fmt.Errorf("failed to get bridge endpoints from store: %v", err) + } + + if err == datastore.ErrKeyNotFound { + return nil + } + + for _, kvo := range kvol { + ep := kvo.(*bridgeEndpoint) + n, ok := d.networks[ep.nid] + if !ok { + logrus.Debugf("Network (%s) not found for restored bridge endpoint (%s)", ep.nid[0:7], ep.id[0:7]) + logrus.Debugf("Deleting stale bridge endpoint (%s) from store", ep.id[0:7]) + if err := d.storeDelete(ep); err != nil { + logrus.Debugf("Failed to delete stale bridge endpoint (%s) from store", ep.id[0:7]) + } + continue + } + n.endpoints[ep.id] = ep + n.restorePortAllocations(ep) + logrus.Debugf("Endpoint (%s) restored to network (%s)", ep.id[0:7], ep.nid[0:7]) + } + + return nil +} + +func (d *driver) storeUpdate(kvObject datastore.KVObject) error { + if d.store == nil { + logrus.Warnf("bridge store not initialized. kv object %s is not added to the store", datastore.Key(kvObject.Key()...)) + return nil + } + + if err := d.store.PutObjectAtomic(kvObject); err != nil { + return fmt.Errorf("failed to update bridge store for object type %T: %v", kvObject, err) + } + + return nil +} + +func (d *driver) storeDelete(kvObject datastore.KVObject) error { + if d.store == nil { + logrus.Debugf("bridge store not initialized. kv object %s is not deleted from store", datastore.Key(kvObject.Key()...)) + return nil + } + +retry: + if err := d.store.DeleteObjectAtomic(kvObject); err != nil { + if err == datastore.ErrKeyModified { + if err := d.store.GetObject(datastore.Key(kvObject.Key()...), kvObject); err != nil { + return fmt.Errorf("could not update the kvobject to latest when trying to delete: %v", err) + } + goto retry + } + return err + } + + return nil +} + +func (ncfg *networkConfiguration) MarshalJSON() ([]byte, error) { + nMap := make(map[string]interface{}) + nMap["ID"] = ncfg.ID + nMap["BridgeName"] = ncfg.BridgeName + nMap["EnableIPv6"] = ncfg.EnableIPv6 + nMap["EnableIPMasquerade"] = ncfg.EnableIPMasquerade + nMap["EnableICC"] = ncfg.EnableICC + nMap["Mtu"] = ncfg.Mtu + nMap["Internal"] = ncfg.Internal + nMap["DefaultBridge"] = ncfg.DefaultBridge + nMap["DefaultBindingIP"] = ncfg.DefaultBindingIP.String() + nMap["DefaultGatewayIPv4"] = ncfg.DefaultGatewayIPv4.String() + nMap["DefaultGatewayIPv6"] = ncfg.DefaultGatewayIPv6.String() + nMap["ContainerIfacePrefix"] = ncfg.ContainerIfacePrefix + nMap["BridgeIfaceCreator"] = ncfg.BridgeIfaceCreator + + if ncfg.AddressIPv4 != nil { + nMap["AddressIPv4"] = ncfg.AddressIPv4.String() + } + + if ncfg.AddressIPv6 != nil { + nMap["AddressIPv6"] = ncfg.AddressIPv6.String() + } + + return json.Marshal(nMap) +} + +func (ncfg *networkConfiguration) UnmarshalJSON(b []byte) error { + var ( + err error + nMap map[string]interface{} + ) + + if err = json.Unmarshal(b, &nMap); err != nil { + return err + } + + if v, ok := nMap["AddressIPv4"]; ok { + if ncfg.AddressIPv4, err = types.ParseCIDR(v.(string)); err != nil { + return types.InternalErrorf("failed to decode bridge network address IPv4 after json unmarshal: %s", v.(string)) + } + } + + if v, ok := nMap["AddressIPv6"]; ok { + if ncfg.AddressIPv6, err = types.ParseCIDR(v.(string)); err != nil { + return types.InternalErrorf("failed to decode bridge network address IPv6 after json unmarshal: %s", v.(string)) + } + } + + if v, ok := nMap["ContainerIfacePrefix"]; ok { + ncfg.ContainerIfacePrefix = v.(string) + } + + ncfg.DefaultBridge = nMap["DefaultBridge"].(bool) + ncfg.DefaultBindingIP = net.ParseIP(nMap["DefaultBindingIP"].(string)) + ncfg.DefaultGatewayIPv4 = net.ParseIP(nMap["DefaultGatewayIPv4"].(string)) + ncfg.DefaultGatewayIPv6 = net.ParseIP(nMap["DefaultGatewayIPv6"].(string)) + ncfg.ID = nMap["ID"].(string) + ncfg.BridgeName = nMap["BridgeName"].(string) + ncfg.EnableIPv6 = nMap["EnableIPv6"].(bool) + ncfg.EnableIPMasquerade = nMap["EnableIPMasquerade"].(bool) + ncfg.EnableICC = nMap["EnableICC"].(bool) + ncfg.Mtu = int(nMap["Mtu"].(float64)) + if v, ok := nMap["Internal"]; ok { + ncfg.Internal = v.(bool) + } + + if v, ok := nMap["BridgeIfaceCreator"]; ok { + ncfg.BridgeIfaceCreator = ifaceCreator(v.(float64)) + } + + return nil +} + +func (ncfg *networkConfiguration) Key() []string { + return []string{bridgePrefix, ncfg.ID} +} + +func (ncfg *networkConfiguration) KeyPrefix() []string { + return []string{bridgePrefix} +} + +func (ncfg *networkConfiguration) Value() []byte { + b, err := json.Marshal(ncfg) + if err != nil { + return nil + } + return b +} + +func (ncfg *networkConfiguration) SetValue(value []byte) error { + return json.Unmarshal(value, ncfg) +} + +func (ncfg *networkConfiguration) Index() uint64 { + return ncfg.dbIndex +} + +func (ncfg *networkConfiguration) SetIndex(index uint64) { + ncfg.dbIndex = index + ncfg.dbExists = true +} + +func (ncfg *networkConfiguration) Exists() bool { + return ncfg.dbExists +} + +func (ncfg *networkConfiguration) Skip() bool { + return false +} + +func (ncfg *networkConfiguration) New() datastore.KVObject { + return &networkConfiguration{} +} + +func (ncfg *networkConfiguration) CopyTo(o datastore.KVObject) error { + dstNcfg := o.(*networkConfiguration) + *dstNcfg = *ncfg + return nil +} + +func (ncfg *networkConfiguration) DataScope() string { + return datastore.LocalScope +} + +func (ep *bridgeEndpoint) MarshalJSON() ([]byte, error) { + epMap := make(map[string]interface{}) + epMap["id"] = ep.id + epMap["nid"] = ep.nid + epMap["SrcName"] = ep.srcName + epMap["MacAddress"] = ep.macAddress.String() + epMap["Addr"] = ep.addr.String() + if ep.addrv6 != nil { + epMap["Addrv6"] = ep.addrv6.String() + } + epMap["Config"] = ep.config + epMap["ContainerConfig"] = ep.containerConfig + epMap["ExternalConnConfig"] = ep.extConnConfig + epMap["PortMapping"] = ep.portMapping + + return json.Marshal(epMap) +} + +func (ep *bridgeEndpoint) UnmarshalJSON(b []byte) error { + var ( + err error + epMap map[string]interface{} + ) + + if err = json.Unmarshal(b, &epMap); err != nil { + return fmt.Errorf("Failed to unmarshal to bridge endpoint: %v", err) + } + + if v, ok := epMap["MacAddress"]; ok { + if ep.macAddress, err = net.ParseMAC(v.(string)); err != nil { + return types.InternalErrorf("failed to decode bridge endpoint MAC address (%s) after json unmarshal: %v", v.(string), err) + } + } + if v, ok := epMap["Addr"]; ok { + if ep.addr, err = types.ParseCIDR(v.(string)); err != nil { + return types.InternalErrorf("failed to decode bridge endpoint IPv4 address (%s) after json unmarshal: %v", v.(string), err) + } + } + if v, ok := epMap["Addrv6"]; ok { + if ep.addrv6, err = types.ParseCIDR(v.(string)); err != nil { + return types.InternalErrorf("failed to decode bridge endpoint IPv6 address (%s) after json unmarshal: %v", v.(string), err) + } + } + ep.id = epMap["id"].(string) + ep.nid = epMap["nid"].(string) + ep.srcName = epMap["SrcName"].(string) + d, _ := json.Marshal(epMap["Config"]) + if err := json.Unmarshal(d, &ep.config); err != nil { + logrus.Warnf("Failed to decode endpoint config %v", err) + } + d, _ = json.Marshal(epMap["ContainerConfig"]) + if err := json.Unmarshal(d, &ep.containerConfig); err != nil { + logrus.Warnf("Failed to decode endpoint container config %v", err) + } + d, _ = json.Marshal(epMap["ExternalConnConfig"]) + if err := json.Unmarshal(d, &ep.extConnConfig); err != nil { + logrus.Warnf("Failed to decode endpoint external connectivity configuration %v", err) + } + d, _ = json.Marshal(epMap["PortMapping"]) + if err := json.Unmarshal(d, &ep.portMapping); err != nil { + logrus.Warnf("Failed to decode endpoint port mapping %v", err) + } + + return nil +} + +func (ep *bridgeEndpoint) Key() []string { + return []string{bridgeEndpointPrefix, ep.id} +} + +func (ep *bridgeEndpoint) KeyPrefix() []string { + return []string{bridgeEndpointPrefix} +} + +func (ep *bridgeEndpoint) Value() []byte { + b, err := json.Marshal(ep) + if err != nil { + return nil + } + return b +} + +func (ep *bridgeEndpoint) SetValue(value []byte) error { + return json.Unmarshal(value, ep) +} + +func (ep *bridgeEndpoint) Index() uint64 { + return ep.dbIndex +} + +func (ep *bridgeEndpoint) SetIndex(index uint64) { + ep.dbIndex = index + ep.dbExists = true +} + +func (ep *bridgeEndpoint) Exists() bool { + return ep.dbExists +} + +func (ep *bridgeEndpoint) Skip() bool { + return false +} + +func (ep *bridgeEndpoint) New() datastore.KVObject { + return &bridgeEndpoint{} +} + +func (ep *bridgeEndpoint) CopyTo(o datastore.KVObject) error { + dstEp := o.(*bridgeEndpoint) + *dstEp = *ep + return nil +} + +func (ep *bridgeEndpoint) DataScope() string { + return datastore.LocalScope +} + +func (n *bridgeNetwork) restorePortAllocations(ep *bridgeEndpoint) { + if ep.extConnConfig == nil || + ep.extConnConfig.ExposedPorts == nil || + ep.extConnConfig.PortBindings == nil { + return + } + tmp := ep.extConnConfig.PortBindings + ep.extConnConfig.PortBindings = ep.portMapping + _, err := n.allocatePorts(ep, n.config.DefaultBindingIP, n.driver.config.EnableUserlandProxy) + if err != nil { + logrus.Warnf("Failed to reserve existing port mapping for endpoint %s:%v", ep.id[0:7], err) + } + ep.extConnConfig.PortBindings = tmp +} diff --git a/vendor/github.com/docker/libnetwork/drivers/bridge/errors.go b/vendor/github.com/docker/libnetwork/drivers/bridge/errors.go new file mode 100644 index 0000000000..93960794cb --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/bridge/errors.go @@ -0,0 +1,341 @@ +package bridge + +import ( + "fmt" + "net" +) + +// ErrConfigExists error is returned when driver already has a config applied. +type ErrConfigExists struct{} + +func (ece *ErrConfigExists) Error() string { + return "configuration already exists, bridge configuration can be applied only once" +} + +// Forbidden denotes the type of this error +func (ece *ErrConfigExists) Forbidden() {} + +// ErrInvalidDriverConfig error is returned when Bridge Driver is passed an invalid config +type ErrInvalidDriverConfig struct{} + +func (eidc *ErrInvalidDriverConfig) Error() string { + return "Invalid configuration passed to Bridge Driver" +} + +// BadRequest denotes the type of this error +func (eidc *ErrInvalidDriverConfig) BadRequest() {} + +// ErrInvalidNetworkConfig error is returned when a network is created on a driver without valid config. +type ErrInvalidNetworkConfig struct{} + +func (einc *ErrInvalidNetworkConfig) Error() string { + return "trying to create a network on a driver without valid config" +} + +// Forbidden denotes the type of this error +func (einc *ErrInvalidNetworkConfig) Forbidden() {} + +// ErrInvalidContainerConfig error is returned when an endpoint create is attempted with an invalid configuration. +type ErrInvalidContainerConfig struct{} + +func (eicc *ErrInvalidContainerConfig) Error() string { + return "Error in joining a container due to invalid configuration" +} + +// BadRequest denotes the type of this error +func (eicc *ErrInvalidContainerConfig) BadRequest() {} + +// ErrInvalidEndpointConfig error is returned when an endpoint create is attempted with an invalid endpoint configuration. +type ErrInvalidEndpointConfig struct{} + +func (eiec *ErrInvalidEndpointConfig) Error() string { + return "trying to create an endpoint with an invalid endpoint configuration" +} + +// BadRequest denotes the type of this error +func (eiec *ErrInvalidEndpointConfig) BadRequest() {} + +// ErrNetworkExists error is returned when a network already exists and another network is created. +type ErrNetworkExists struct{} + +func (ene *ErrNetworkExists) Error() string { + return "network already exists, bridge can only have one network" +} + +// Forbidden denotes the type of this error +func (ene *ErrNetworkExists) Forbidden() {} + +// ErrIfaceName error is returned when a new name could not be generated. +type ErrIfaceName struct{} + +func (ein *ErrIfaceName) Error() string { + return "failed to find name for new interface" +} + +// InternalError denotes the type of this error +func (ein *ErrIfaceName) InternalError() {} + +// ErrNoIPAddr error is returned when bridge has no IPv4 address configured. +type ErrNoIPAddr struct{} + +func (enip *ErrNoIPAddr) Error() string { + return "bridge has no IPv4 address configured" +} + +// InternalError denotes the type of this error +func (enip *ErrNoIPAddr) InternalError() {} + +// ErrInvalidGateway is returned when the user provided default gateway (v4/v6) is not not valid. +type ErrInvalidGateway struct{} + +func (eig *ErrInvalidGateway) Error() string { + return "default gateway ip must be part of the network" +} + +// BadRequest denotes the type of this error +func (eig *ErrInvalidGateway) BadRequest() {} + +// ErrInvalidContainerSubnet is returned when the container subnet (FixedCIDR) is not valid. +type ErrInvalidContainerSubnet struct{} + +func (eis *ErrInvalidContainerSubnet) Error() string { + return "container subnet must be a subset of bridge network" +} + +// BadRequest denotes the type of this error +func (eis *ErrInvalidContainerSubnet) BadRequest() {} + +// ErrInvalidMtu is returned when the user provided MTU is not valid. +type ErrInvalidMtu int + +func (eim ErrInvalidMtu) Error() string { + return fmt.Sprintf("invalid MTU number: %d", int(eim)) +} + +// BadRequest denotes the type of this error +func (eim ErrInvalidMtu) BadRequest() {} + +// ErrInvalidPort is returned when the container or host port specified in the port binding is not valid. +type ErrInvalidPort string + +func (ip ErrInvalidPort) Error() string { + return fmt.Sprintf("invalid transport port: %s", string(ip)) +} + +// BadRequest denotes the type of this error +func (ip ErrInvalidPort) BadRequest() {} + +// ErrUnsupportedAddressType is returned when the specified address type is not supported. +type ErrUnsupportedAddressType string + +func (uat ErrUnsupportedAddressType) Error() string { + return fmt.Sprintf("unsupported address type: %s", string(uat)) +} + +// BadRequest denotes the type of this error +func (uat ErrUnsupportedAddressType) BadRequest() {} + +// ErrInvalidAddressBinding is returned when the host address specified in the port binding is not valid. +type ErrInvalidAddressBinding string + +func (iab ErrInvalidAddressBinding) Error() string { + return fmt.Sprintf("invalid host address in port binding: %s", string(iab)) +} + +// BadRequest denotes the type of this error +func (iab ErrInvalidAddressBinding) BadRequest() {} + +// ActiveEndpointsError is returned when there are +// still active endpoints in the network being deleted. +type ActiveEndpointsError string + +func (aee ActiveEndpointsError) Error() string { + return fmt.Sprintf("network %s has active endpoint", string(aee)) +} + +// Forbidden denotes the type of this error +func (aee ActiveEndpointsError) Forbidden() {} + +// InvalidNetworkIDError is returned when the passed +// network id for an existing network is not a known id. +type InvalidNetworkIDError string + +func (inie InvalidNetworkIDError) Error() string { + return fmt.Sprintf("invalid network id %s", string(inie)) +} + +// NotFound denotes the type of this error +func (inie InvalidNetworkIDError) NotFound() {} + +// InvalidEndpointIDError is returned when the passed +// endpoint id is not valid. +type InvalidEndpointIDError string + +func (ieie InvalidEndpointIDError) Error() string { + return fmt.Sprintf("invalid endpoint id: %s", string(ieie)) +} + +// BadRequest denotes the type of this error +func (ieie InvalidEndpointIDError) BadRequest() {} + +// InvalidSandboxIDError is returned when the passed +// sandbox id is not valid. +type InvalidSandboxIDError string + +func (isie InvalidSandboxIDError) Error() string { + return fmt.Sprintf("invalid sandbox id: %s", string(isie)) +} + +// BadRequest denotes the type of this error +func (isie InvalidSandboxIDError) BadRequest() {} + +// EndpointNotFoundError is returned when the no endpoint +// with the passed endpoint id is found. +type EndpointNotFoundError string + +func (enfe EndpointNotFoundError) Error() string { + return fmt.Sprintf("endpoint not found: %s", string(enfe)) +} + +// NotFound denotes the type of this error +func (enfe EndpointNotFoundError) NotFound() {} + +// NonDefaultBridgeExistError is returned when a non-default +// bridge config is passed but it does not already exist. +type NonDefaultBridgeExistError string + +func (ndbee NonDefaultBridgeExistError) Error() string { + return fmt.Sprintf("bridge device with non default name %s must be created manually", string(ndbee)) +} + +// Forbidden denotes the type of this error +func (ndbee NonDefaultBridgeExistError) Forbidden() {} + +// NonDefaultBridgeNeedsIPError is returned when a non-default +// bridge config is passed but it has no ip configured +type NonDefaultBridgeNeedsIPError string + +func (ndbee NonDefaultBridgeNeedsIPError) Error() string { + return fmt.Sprintf("bridge device with non default name %s must have a valid IP address", string(ndbee)) +} + +// Forbidden denotes the type of this error +func (ndbee NonDefaultBridgeNeedsIPError) Forbidden() {} + +// FixedCIDRv4Error is returned when fixed-cidrv4 configuration +// failed. +type FixedCIDRv4Error struct { + Net *net.IPNet + Subnet *net.IPNet + Err error +} + +func (fcv4 *FixedCIDRv4Error) Error() string { + return fmt.Sprintf("setup FixedCIDRv4 failed for subnet %s in %s: %v", fcv4.Subnet, fcv4.Net, fcv4.Err) +} + +// InternalError denotes the type of this error +func (fcv4 *FixedCIDRv4Error) InternalError() {} + +// FixedCIDRv6Error is returned when fixed-cidrv6 configuration +// failed. +type FixedCIDRv6Error struct { + Net *net.IPNet + Err error +} + +func (fcv6 *FixedCIDRv6Error) Error() string { + return fmt.Sprintf("setup FixedCIDRv6 failed for subnet %s in %s: %v", fcv6.Net, fcv6.Net, fcv6.Err) +} + +// InternalError denotes the type of this error +func (fcv6 *FixedCIDRv6Error) InternalError() {} + +// IPTableCfgError is returned when an unexpected ip tables configuration is entered +type IPTableCfgError string + +func (name IPTableCfgError) Error() string { + return fmt.Sprintf("unexpected request to set IP tables for interface: %s", string(name)) +} + +// BadRequest denotes the type of this error +func (name IPTableCfgError) BadRequest() {} + +// InvalidIPTablesCfgError is returned when an invalid ip tables configuration is entered +type InvalidIPTablesCfgError string + +func (action InvalidIPTablesCfgError) Error() string { + return fmt.Sprintf("Invalid IPTables action '%s'", string(action)) +} + +// BadRequest denotes the type of this error +func (action InvalidIPTablesCfgError) BadRequest() {} + +// IPv4AddrRangeError is returned when a valid IP address range couldn't be found. +type IPv4AddrRangeError string + +func (name IPv4AddrRangeError) Error() string { + return fmt.Sprintf("can't find an address range for interface %q", string(name)) +} + +// BadRequest denotes the type of this error +func (name IPv4AddrRangeError) BadRequest() {} + +// IPv4AddrAddError is returned when IPv4 address could not be added to the bridge. +type IPv4AddrAddError struct { + IP *net.IPNet + Err error +} + +func (ipv4 *IPv4AddrAddError) Error() string { + return fmt.Sprintf("failed to add IPv4 address %s to bridge: %v", ipv4.IP, ipv4.Err) +} + +// InternalError denotes the type of this error +func (ipv4 *IPv4AddrAddError) InternalError() {} + +// IPv6AddrAddError is returned when IPv6 address could not be added to the bridge. +type IPv6AddrAddError struct { + IP *net.IPNet + Err error +} + +func (ipv6 *IPv6AddrAddError) Error() string { + return fmt.Sprintf("failed to add IPv6 address %s to bridge: %v", ipv6.IP, ipv6.Err) +} + +// InternalError denotes the type of this error +func (ipv6 *IPv6AddrAddError) InternalError() {} + +// IPv4AddrNoMatchError is returned when the bridge's IPv4 address does not match configured. +type IPv4AddrNoMatchError struct { + IP net.IP + CfgIP net.IP +} + +func (ipv4 *IPv4AddrNoMatchError) Error() string { + return fmt.Sprintf("bridge IPv4 (%s) does not match requested configuration %s", ipv4.IP, ipv4.CfgIP) +} + +// BadRequest denotes the type of this error +func (ipv4 *IPv4AddrNoMatchError) BadRequest() {} + +// IPv6AddrNoMatchError is returned when the bridge's IPv6 address does not match configured. +type IPv6AddrNoMatchError net.IPNet + +func (ipv6 *IPv6AddrNoMatchError) Error() string { + return fmt.Sprintf("bridge IPv6 addresses do not match the expected bridge configuration %s", (*net.IPNet)(ipv6).String()) +} + +// BadRequest denotes the type of this error +func (ipv6 *IPv6AddrNoMatchError) BadRequest() {} + +// InvalidLinkIPAddrError is returned when a link is configured to a container with an invalid ip address +type InvalidLinkIPAddrError string + +func (address InvalidLinkIPAddrError) Error() string { + return fmt.Sprintf("Cannot link to a container with Invalid IP Address '%s'", string(address)) +} + +// BadRequest denotes the type of this error +func (address InvalidLinkIPAddrError) BadRequest() {} diff --git a/vendor/github.com/docker/libnetwork/drivers/bridge/interface.go b/vendor/github.com/docker/libnetwork/drivers/bridge/interface.go new file mode 100644 index 0000000000..c9f3e8dfb7 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/bridge/interface.go @@ -0,0 +1,86 @@ +package bridge + +import ( + "fmt" + "net" + + "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" +) + +const ( + // DefaultBridgeName is the default name for the bridge interface managed + // by the driver when unspecified by the caller. + DefaultBridgeName = "docker0" +) + +// Interface models the bridge network device. +type bridgeInterface struct { + Link netlink.Link + bridgeIPv4 *net.IPNet + bridgeIPv6 *net.IPNet + gatewayIPv4 net.IP + gatewayIPv6 net.IP + nlh *netlink.Handle +} + +// newInterface creates a new bridge interface structure. It attempts to find +// an already existing device identified by the configuration BridgeName field, +// or the default bridge name when unspecified, but doesn't attempt to create +// one when missing +func newInterface(nlh *netlink.Handle, config *networkConfiguration) (*bridgeInterface, error) { + var err error + i := &bridgeInterface{nlh: nlh} + + // Initialize the bridge name to the default if unspecified. + if config.BridgeName == "" { + config.BridgeName = DefaultBridgeName + } + + // Attempt to find an existing bridge named with the specified name. + i.Link, err = nlh.LinkByName(config.BridgeName) + if err != nil { + logrus.Debugf("Did not find any interface with name %s: %v", config.BridgeName, err) + } else if _, ok := i.Link.(*netlink.Bridge); !ok { + return nil, fmt.Errorf("existing interface %s is not a bridge", i.Link.Attrs().Name) + } + return i, nil +} + +// exists indicates if the existing bridge interface exists on the system. +func (i *bridgeInterface) exists() bool { + return i.Link != nil +} + +// addresses returns all IPv4 addresses and all IPv6 addresses for the bridge interface. +func (i *bridgeInterface) addresses() ([]netlink.Addr, []netlink.Addr, error) { + v4addr, err := i.nlh.AddrList(i.Link, netlink.FAMILY_V4) + if err != nil { + return nil, nil, fmt.Errorf("Failed to retrieve V4 addresses: %v", err) + } + + v6addr, err := i.nlh.AddrList(i.Link, netlink.FAMILY_V6) + if err != nil { + return nil, nil, fmt.Errorf("Failed to retrieve V6 addresses: %v", err) + } + + if len(v4addr) == 0 { + return nil, v6addr, nil + } + return v4addr, v6addr, nil +} + +func (i *bridgeInterface) programIPv6Address() error { + _, nlAddressList, err := i.addresses() + if err != nil { + return &IPv6AddrAddError{IP: i.bridgeIPv6, Err: fmt.Errorf("failed to retrieve address list: %v", err)} + } + nlAddr := netlink.Addr{IPNet: i.bridgeIPv6} + if findIPv6Address(nlAddr, nlAddressList) { + return nil + } + if err := i.nlh.AddrAdd(i.Link, &nlAddr); err != nil { + return &IPv6AddrAddError{IP: i.bridgeIPv6, Err: err} + } + return nil +} diff --git a/vendor/github.com/docker/libnetwork/drivers/bridge/labels.go b/vendor/github.com/docker/libnetwork/drivers/bridge/labels.go new file mode 100644 index 0000000000..7447bd3f93 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/bridge/labels.go @@ -0,0 +1,18 @@ +package bridge + +const ( + // BridgeName label for bridge driver + BridgeName = "com.docker.network.bridge.name" + + // EnableIPMasquerade label for bridge driver + EnableIPMasquerade = "com.docker.network.bridge.enable_ip_masquerade" + + // EnableICC label + EnableICC = "com.docker.network.bridge.enable_icc" + + // DefaultBindingIP label + DefaultBindingIP = "com.docker.network.bridge.host_binding_ipv4" + + // DefaultBridge label + DefaultBridge = "com.docker.network.bridge.default_bridge" +) diff --git a/vendor/github.com/docker/libnetwork/drivers/bridge/link.go b/vendor/github.com/docker/libnetwork/drivers/bridge/link.go new file mode 100644 index 0000000000..d364516f1a --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/bridge/link.go @@ -0,0 +1,85 @@ +package bridge + +import ( + "fmt" + "net" + + "github.com/docker/libnetwork/iptables" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" +) + +type link struct { + parentIP string + childIP string + ports []types.TransportPort + bridge string +} + +func (l *link) String() string { + return fmt.Sprintf("%s <-> %s [%v] on %s", l.parentIP, l.childIP, l.ports, l.bridge) +} + +func newLink(parentIP, childIP string, ports []types.TransportPort, bridge string) *link { + return &link{ + childIP: childIP, + parentIP: parentIP, + ports: ports, + bridge: bridge, + } + +} + +func (l *link) Enable() error { + // -A == iptables append flag + linkFunction := func() error { + return linkContainers("-A", l.parentIP, l.childIP, l.ports, l.bridge, false) + } + + iptables.OnReloaded(func() { linkFunction() }) + return linkFunction() +} + +func (l *link) Disable() { + // -D == iptables delete flag + err := linkContainers("-D", l.parentIP, l.childIP, l.ports, l.bridge, true) + if err != nil { + logrus.Errorf("Error removing IPTables rules for a link %s due to %s", l.String(), err.Error()) + } + // Return proper error once we move to use a proper iptables package + // that returns typed errors +} + +func linkContainers(action, parentIP, childIP string, ports []types.TransportPort, bridge string, + ignoreErrors bool) error { + var nfAction iptables.Action + + switch action { + case "-A": + nfAction = iptables.Append + case "-I": + nfAction = iptables.Insert + case "-D": + nfAction = iptables.Delete + default: + return InvalidIPTablesCfgError(action) + } + + ip1 := net.ParseIP(parentIP) + if ip1 == nil { + return InvalidLinkIPAddrError(parentIP) + } + ip2 := net.ParseIP(childIP) + if ip2 == nil { + return InvalidLinkIPAddrError(childIP) + } + + chain := iptables.ChainInfo{Name: DockerChain} + for _, port := range ports { + err := chain.Link(nfAction, ip1, ip2, int(port.Port), port.Proto.String(), bridge) + if !ignoreErrors && err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/docker/libnetwork/drivers/bridge/netlink_deprecated_linux.go b/vendor/github.com/docker/libnetwork/drivers/bridge/netlink_deprecated_linux.go new file mode 100644 index 0000000000..6b49efa166 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/bridge/netlink_deprecated_linux.go @@ -0,0 +1,131 @@ +package bridge + +import ( + "fmt" + "math/rand" + "net" + "syscall" + "time" + "unsafe" + + "github.com/docker/libnetwork/netutils" +) + +const ( + ifNameSize = 16 + ioctlBrAdd = 0x89a0 + ioctlBrAddIf = 0x89a2 +) + +type ifreqIndex struct { + IfrnName [ifNameSize]byte + IfruIndex int32 +} + +type ifreqHwaddr struct { + IfrnName [ifNameSize]byte + IfruHwaddr syscall.RawSockaddr +} + +var rnd = rand.New(rand.NewSource(time.Now().UnixNano())) + +// THIS CODE DOES NOT COMMUNICATE WITH KERNEL VIA RTNETLINK INTERFACE +// IT IS HERE FOR BACKWARDS COMPATIBILITY WITH OLDER LINUX KERNELS +// WHICH SHIP WITH OLDER NOT ENTIRELY FUNCTIONAL VERSION OF NETLINK +func getIfSocket() (fd int, err error) { + for _, socket := range []int{ + syscall.AF_INET, + syscall.AF_PACKET, + syscall.AF_INET6, + } { + if fd, err = syscall.Socket(socket, syscall.SOCK_DGRAM, 0); err == nil { + break + } + } + if err == nil { + return fd, nil + } + return -1, err +} + +func ifIoctBridge(iface, master *net.Interface, op uintptr) error { + if len(master.Name) >= ifNameSize { + return fmt.Errorf("Interface name %s too long", master.Name) + } + + s, err := getIfSocket() + if err != nil { + return err + } + defer syscall.Close(s) + + ifr := ifreqIndex{} + copy(ifr.IfrnName[:len(ifr.IfrnName)-1], master.Name) + ifr.IfruIndex = int32(iface.Index) + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), op, uintptr(unsafe.Pointer(&ifr))); err != 0 { + return err + } + + return nil +} + +// Add a slave to a bridge device. This is more backward-compatible than +// netlink.NetworkSetMaster and works on RHEL 6. +func ioctlAddToBridge(iface, master *net.Interface) error { + return ifIoctBridge(iface, master, ioctlBrAddIf) +} + +func ioctlSetMacAddress(name, addr string) error { + if len(name) >= ifNameSize { + return fmt.Errorf("Interface name %s too long", name) + } + + hw, err := net.ParseMAC(addr) + if err != nil { + return err + } + + s, err := getIfSocket() + if err != nil { + return err + } + defer syscall.Close(s) + + ifr := ifreqHwaddr{} + ifr.IfruHwaddr.Family = syscall.ARPHRD_ETHER + copy(ifr.IfrnName[:len(ifr.IfrnName)-1], name) + + for i := 0; i < 6; i++ { + ifr.IfruHwaddr.Data[i] = ifrDataByte(hw[i]) + } + + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), syscall.SIOCSIFHWADDR, uintptr(unsafe.Pointer(&ifr))); err != 0 { + return err + } + return nil +} + +func ioctlCreateBridge(name string, setMacAddr bool) error { + if len(name) >= ifNameSize { + return fmt.Errorf("Interface name %s too long", name) + } + + s, err := getIfSocket() + if err != nil { + return err + } + defer syscall.Close(s) + + nameBytePtr, err := syscall.BytePtrFromString(name) + if err != nil { + return err + } + if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(s), ioctlBrAdd, uintptr(unsafe.Pointer(nameBytePtr))); err != 0 { + return err + } + if setMacAddr { + return ioctlSetMacAddress(name, netutils.GenerateRandomMAC().String()) + } + return nil +} diff --git a/vendor/github.com/docker/libnetwork/drivers/bridge/netlink_deprecated_linux_armppc64.go b/vendor/github.com/docker/libnetwork/drivers/bridge/netlink_deprecated_linux_armppc64.go new file mode 100644 index 0000000000..739d9c6ba3 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/bridge/netlink_deprecated_linux_armppc64.go @@ -0,0 +1,7 @@ +// +build arm ppc64 ppc64le + +package bridge + +func ifrDataByte(b byte) uint8 { + return uint8(b) +} diff --git a/vendor/github.com/docker/libnetwork/drivers/bridge/netlink_deprecated_linux_notarm.go b/vendor/github.com/docker/libnetwork/drivers/bridge/netlink_deprecated_linux_notarm.go new file mode 100644 index 0000000000..df526952f7 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/bridge/netlink_deprecated_linux_notarm.go @@ -0,0 +1,7 @@ +// +build !arm,!ppc64,!ppc64le + +package bridge + +func ifrDataByte(b byte) int8 { + return int8(b) +} diff --git a/vendor/github.com/docker/libnetwork/drivers/bridge/netlink_deprecated_unsupported.go b/vendor/github.com/docker/libnetwork/drivers/bridge/netlink_deprecated_unsupported.go new file mode 100644 index 0000000000..7e2d57b660 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/bridge/netlink_deprecated_unsupported.go @@ -0,0 +1,18 @@ +// +build !linux + +package bridge + +import ( + "errors" + "net" +) + +// Add a slave to a bridge device. This is more backward-compatible than +// netlink.NetworkSetMaster and works on RHEL 6. +func ioctlAddToBridge(iface, master *net.Interface) error { + return errors.New("not implemented") +} + +func ioctlCreateBridge(name string, setMacAddr bool) error { + return errors.New("not implemented") +} diff --git a/vendor/github.com/docker/libnetwork/drivers/bridge/port_mapping.go b/vendor/github.com/docker/libnetwork/drivers/bridge/port_mapping.go new file mode 100644 index 0000000000..853129fc27 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/bridge/port_mapping.go @@ -0,0 +1,132 @@ +package bridge + +import ( + "bytes" + "errors" + "fmt" + "net" + + "github.com/docker/libnetwork/types" + "github.com/ishidawataru/sctp" + "github.com/sirupsen/logrus" +) + +var ( + defaultBindingIP = net.IPv4(0, 0, 0, 0) +) + +func (n *bridgeNetwork) allocatePorts(ep *bridgeEndpoint, reqDefBindIP net.IP, ulPxyEnabled bool) ([]types.PortBinding, error) { + if ep.extConnConfig == nil || ep.extConnConfig.PortBindings == nil { + return nil, nil + } + + defHostIP := defaultBindingIP + if reqDefBindIP != nil { + defHostIP = reqDefBindIP + } + + return n.allocatePortsInternal(ep.extConnConfig.PortBindings, ep.addr.IP, defHostIP, ulPxyEnabled) +} + +func (n *bridgeNetwork) allocatePortsInternal(bindings []types.PortBinding, containerIP, defHostIP net.IP, ulPxyEnabled bool) ([]types.PortBinding, error) { + bs := make([]types.PortBinding, 0, len(bindings)) + for _, c := range bindings { + b := c.GetCopy() + if err := n.allocatePort(&b, containerIP, defHostIP, ulPxyEnabled); err != nil { + // On allocation failure, release previously allocated ports. On cleanup error, just log a warning message + if cuErr := n.releasePortsInternal(bs); cuErr != nil { + logrus.Warnf("Upon allocation failure for %v, failed to clear previously allocated port bindings: %v", b, cuErr) + } + return nil, err + } + bs = append(bs, b) + } + return bs, nil +} + +func (n *bridgeNetwork) allocatePort(bnd *types.PortBinding, containerIP, defHostIP net.IP, ulPxyEnabled bool) error { + var ( + host net.Addr + err error + ) + + // Store the container interface address in the operational binding + bnd.IP = containerIP + + // Adjust the host address in the operational binding + if len(bnd.HostIP) == 0 { + bnd.HostIP = defHostIP + } + + // Adjust HostPortEnd if this is not a range. + if bnd.HostPortEnd == 0 { + bnd.HostPortEnd = bnd.HostPort + } + + // Construct the container side transport address + container, err := bnd.ContainerAddr() + if err != nil { + return err + } + + // Try up to maxAllocatePortAttempts times to get a port that's not already allocated. + for i := 0; i < maxAllocatePortAttempts; i++ { + if host, err = n.portMapper.MapRange(container, bnd.HostIP, int(bnd.HostPort), int(bnd.HostPortEnd), ulPxyEnabled); err == nil { + break + } + // There is no point in immediately retrying to map an explicitly chosen port. + if bnd.HostPort != 0 { + logrus.Warnf("Failed to allocate and map port %d-%d: %s", bnd.HostPort, bnd.HostPortEnd, err) + break + } + logrus.Warnf("Failed to allocate and map port: %s, retry: %d", err, i+1) + } + if err != nil { + return err + } + + // Save the host port (regardless it was or not specified in the binding) + switch netAddr := host.(type) { + case *net.TCPAddr: + bnd.HostPort = uint16(host.(*net.TCPAddr).Port) + return nil + case *net.UDPAddr: + bnd.HostPort = uint16(host.(*net.UDPAddr).Port) + return nil + case *sctp.SCTPAddr: + bnd.HostPort = uint16(host.(*sctp.SCTPAddr).Port) + return nil + default: + // For completeness + return ErrUnsupportedAddressType(fmt.Sprintf("%T", netAddr)) + } +} + +func (n *bridgeNetwork) releasePorts(ep *bridgeEndpoint) error { + return n.releasePortsInternal(ep.portMapping) +} + +func (n *bridgeNetwork) releasePortsInternal(bindings []types.PortBinding) error { + var errorBuf bytes.Buffer + + // Attempt to release all port bindings, do not stop on failure + for _, m := range bindings { + if err := n.releasePort(m); err != nil { + errorBuf.WriteString(fmt.Sprintf("\ncould not release %v because of %v", m, err)) + } + } + + if errorBuf.Len() != 0 { + return errors.New(errorBuf.String()) + } + return nil +} + +func (n *bridgeNetwork) releasePort(bnd types.PortBinding) error { + // Construct the host side transport address + host, err := bnd.HostAddr() + if err != nil { + return err + } + return n.portMapper.Unmap(host) +} diff --git a/vendor/github.com/docker/libnetwork/drivers/bridge/setup.go b/vendor/github.com/docker/libnetwork/drivers/bridge/setup.go new file mode 100644 index 0000000000..eeb3611b78 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/bridge/setup.go @@ -0,0 +1,26 @@ +package bridge + +type setupStep func(*networkConfiguration, *bridgeInterface) error + +type bridgeSetup struct { + config *networkConfiguration + bridge *bridgeInterface + steps []setupStep +} + +func newBridgeSetup(c *networkConfiguration, i *bridgeInterface) *bridgeSetup { + return &bridgeSetup{config: c, bridge: i} +} + +func (b *bridgeSetup) apply() error { + for _, fn := range b.steps { + if err := fn(b.config, b.bridge); err != nil { + return err + } + } + return nil +} + +func (b *bridgeSetup) queueStep(step setupStep) { + b.steps = append(b.steps, step) +} diff --git a/vendor/github.com/docker/libnetwork/drivers/bridge/setup_bridgenetfiltering.go b/vendor/github.com/docker/libnetwork/drivers/bridge/setup_bridgenetfiltering.go new file mode 100644 index 0000000000..9b90acfac2 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/bridge/setup_bridgenetfiltering.go @@ -0,0 +1,163 @@ +package bridge + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "syscall" + + "github.com/sirupsen/logrus" +) + +// Enumeration type saying which versions of IP protocol to process. +type ipVersion int + +const ( + ipvnone ipVersion = iota + ipv4 + ipv6 + ipvboth +) + +//Gets the IP version in use ( [ipv4], [ipv6] or [ipv4 and ipv6] ) +func getIPVersion(config *networkConfiguration) ipVersion { + ipVersion := ipv4 + if config.AddressIPv6 != nil || config.EnableIPv6 { + ipVersion |= ipv6 + } + return ipVersion +} + +func setupBridgeNetFiltering(config *networkConfiguration, i *bridgeInterface) error { + err := checkBridgeNetFiltering(config, i) + if err != nil { + if ptherr, ok := err.(*os.PathError); ok { + if errno, ok := ptherr.Err.(syscall.Errno); ok && errno == syscall.ENOENT { + if isRunningInContainer() { + logrus.Warnf("running inside docker container, ignoring missing kernel params: %v", err) + err = nil + } else { + err = errors.New("please ensure that br_netfilter kernel module is loaded") + } + } + } + if err != nil { + return fmt.Errorf("cannot restrict inter-container communication: %v", err) + } + } + return nil +} + +//Enable bridge net filtering if ip forwarding is enabled. See github issue #11404 +func checkBridgeNetFiltering(config *networkConfiguration, i *bridgeInterface) error { + ipVer := getIPVersion(config) + iface := config.BridgeName + doEnable := func(ipVer ipVersion) error { + var ipVerName string + if ipVer == ipv4 { + ipVerName = "IPv4" + } else { + ipVerName = "IPv6" + } + enabled, err := isPacketForwardingEnabled(ipVer, iface) + if err != nil { + logrus.Warnf("failed to check %s forwarding: %v", ipVerName, err) + } else if enabled { + enabled, err := getKernelBoolParam(getBridgeNFKernelParam(ipVer)) + if err != nil || enabled { + return err + } + return setKernelBoolParam(getBridgeNFKernelParam(ipVer), true) + } + return nil + } + + switch ipVer { + case ipv4, ipv6: + return doEnable(ipVer) + case ipvboth: + v4err := doEnable(ipv4) + v6err := doEnable(ipv6) + if v4err == nil { + return v6err + } + return v4err + default: + return nil + } +} + +// Get kernel param path saying whether IPv${ipVer} traffic is being forwarded +// on particular interface. Interface may be specified for IPv6 only. If +// `iface` is empty, `default` will be assumed, which represents default value +// for new interfaces. +func getForwardingKernelParam(ipVer ipVersion, iface string) string { + switch ipVer { + case ipv4: + return "/proc/sys/net/ipv4/ip_forward" + case ipv6: + if iface == "" { + iface = "default" + } + return fmt.Sprintf("/proc/sys/net/ipv6/conf/%s/forwarding", iface) + default: + return "" + } +} + +// Get kernel param path saying whether bridged IPv${ipVer} traffic shall be +// passed to ip${ipVer}tables' chains. +func getBridgeNFKernelParam(ipVer ipVersion) string { + switch ipVer { + case ipv4: + return "/proc/sys/net/bridge/bridge-nf-call-iptables" + case ipv6: + return "/proc/sys/net/bridge/bridge-nf-call-ip6tables" + default: + return "" + } +} + +//Gets the value of the kernel parameters located at the given path +func getKernelBoolParam(path string) (bool, error) { + enabled := false + line, err := ioutil.ReadFile(path) + if err != nil { + return false, err + } + if len(line) > 0 { + enabled = line[0] == '1' + } + return enabled, err +} + +//Sets the value of the kernel parameter located at the given path +func setKernelBoolParam(path string, on bool) error { + value := byte('0') + if on { + value = byte('1') + } + return ioutil.WriteFile(path, []byte{value, '\n'}, 0644) +} + +//Checks to see if packet forwarding is enabled +func isPacketForwardingEnabled(ipVer ipVersion, iface string) (bool, error) { + switch ipVer { + case ipv4, ipv6: + return getKernelBoolParam(getForwardingKernelParam(ipVer, iface)) + case ipvboth: + enabled, err := getKernelBoolParam(getForwardingKernelParam(ipv4, "")) + if err != nil || !enabled { + return enabled, err + } + return getKernelBoolParam(getForwardingKernelParam(ipv6, iface)) + default: + return true, nil + } +} + +func isRunningInContainer() bool { + _, err := os.Stat("/.dockerenv") + return !os.IsNotExist(err) +} diff --git a/vendor/github.com/docker/libnetwork/drivers/bridge/setup_device.go b/vendor/github.com/docker/libnetwork/drivers/bridge/setup_device.go new file mode 100644 index 0000000000..a9dfd06771 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/bridge/setup_device.go @@ -0,0 +1,68 @@ +package bridge + +import ( + "fmt" + + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/libnetwork/netutils" + "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" +) + +// SetupDevice create a new bridge interface/ +func setupDevice(config *networkConfiguration, i *bridgeInterface) error { + var setMac bool + + // We only attempt to create the bridge when the requested device name is + // the default one. + if config.BridgeName != DefaultBridgeName && config.DefaultBridge { + return NonDefaultBridgeExistError(config.BridgeName) + } + + // Set the bridgeInterface netlink.Bridge. + i.Link = &netlink.Bridge{ + LinkAttrs: netlink.LinkAttrs{ + Name: config.BridgeName, + }, + } + + // Only set the bridge's MAC address if the kernel version is > 3.3, as it + // was not supported before that. + kv, err := kernel.GetKernelVersion() + if err != nil { + logrus.Errorf("Failed to check kernel versions: %v. Will not assign a MAC address to the bridge interface", err) + } else { + setMac = kv.Kernel > 3 || (kv.Kernel == 3 && kv.Major >= 3) + } + + if err = i.nlh.LinkAdd(i.Link); err != nil { + logrus.Debugf("Failed to create bridge %s via netlink. Trying ioctl", config.BridgeName) + return ioctlCreateBridge(config.BridgeName, setMac) + } + + if setMac { + hwAddr := netutils.GenerateRandomMAC() + if err = i.nlh.LinkSetHardwareAddr(i.Link, hwAddr); err != nil { + return fmt.Errorf("failed to set bridge mac-address %s : %s", hwAddr, err.Error()) + } + logrus.Debugf("Setting bridge mac address to %s", hwAddr) + } + return err +} + +// SetupDeviceUp ups the given bridge interface. +func setupDeviceUp(config *networkConfiguration, i *bridgeInterface) error { + err := i.nlh.LinkSetUp(i.Link) + if err != nil { + return fmt.Errorf("Failed to set link up for %s: %v", config.BridgeName, err) + } + + // Attempt to update the bridge interface to refresh the flags status, + // ignoring any failure to do so. + if lnk, err := i.nlh.LinkByName(config.BridgeName); err == nil { + i.Link = lnk + } else { + logrus.Warnf("Failed to retrieve link for interface (%s): %v", config.BridgeName, err) + } + return nil +} diff --git a/vendor/github.com/docker/libnetwork/drivers/bridge/setup_firewalld.go b/vendor/github.com/docker/libnetwork/drivers/bridge/setup_firewalld.go new file mode 100644 index 0000000000..50cbdb1ddc --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/bridge/setup_firewalld.go @@ -0,0 +1,20 @@ +package bridge + +import "github.com/docker/libnetwork/iptables" + +func (n *bridgeNetwork) setupFirewalld(config *networkConfiguration, i *bridgeInterface) error { + d := n.driver + d.Lock() + driverConfig := d.config + d.Unlock() + + // Sanity check. + if !driverConfig.EnableIPTables { + return IPTableCfgError(config.BridgeName) + } + + iptables.OnReloaded(func() { n.setupIPTables(config, i) }) + iptables.OnReloaded(n.portMapper.ReMapAll) + + return nil +} diff --git a/vendor/github.com/docker/libnetwork/drivers/bridge/setup_ip_forwarding.go b/vendor/github.com/docker/libnetwork/drivers/bridge/setup_ip_forwarding.go new file mode 100644 index 0000000000..355a14d996 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/bridge/setup_ip_forwarding.go @@ -0,0 +1,56 @@ +package bridge + +import ( + "fmt" + "io/ioutil" + + "github.com/docker/libnetwork/iptables" + "github.com/sirupsen/logrus" +) + +const ( + ipv4ForwardConf = "/proc/sys/net/ipv4/ip_forward" + ipv4ForwardConfPerm = 0644 +) + +func configureIPForwarding(enable bool) error { + var val byte + if enable { + val = '1' + } + return ioutil.WriteFile(ipv4ForwardConf, []byte{val, '\n'}, ipv4ForwardConfPerm) +} + +func setupIPForwarding(enableIPTables bool) error { + // Get current IPv4 forward setup + ipv4ForwardData, err := ioutil.ReadFile(ipv4ForwardConf) + if err != nil { + return fmt.Errorf("Cannot read IP forwarding setup: %v", err) + } + + // Enable IPv4 forwarding only if it is not already enabled + if ipv4ForwardData[0] != '1' { + // Enable IPv4 forwarding + if err := configureIPForwarding(true); err != nil { + return fmt.Errorf("Enabling IP forwarding failed: %v", err) + } + // When enabling ip_forward set the default policy on forward chain to + // drop only if the daemon option iptables is not set to false. + if !enableIPTables { + return nil + } + if err := iptables.SetDefaultPolicy(iptables.Filter, "FORWARD", iptables.Drop); err != nil { + if err := configureIPForwarding(false); err != nil { + logrus.Errorf("Disabling IP forwarding failed, %v", err) + } + return err + } + iptables.OnReloaded(func() { + logrus.Debug("Setting the default DROP policy on firewall reload") + if err := iptables.SetDefaultPolicy(iptables.Filter, "FORWARD", iptables.Drop); err != nil { + logrus.Warnf("Settig the default DROP policy on firewall reload failed, %v", err) + } + }) + } + return nil +} diff --git a/vendor/github.com/docker/libnetwork/drivers/bridge/setup_ip_tables.go b/vendor/github.com/docker/libnetwork/drivers/bridge/setup_ip_tables.go new file mode 100644 index 0000000000..5865a18f18 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/bridge/setup_ip_tables.go @@ -0,0 +1,363 @@ +package bridge + +import ( + "errors" + "fmt" + "net" + + "github.com/docker/libnetwork/iptables" + "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" +) + +// DockerChain: DOCKER iptable chain name +const ( + DockerChain = "DOCKER" + // Isolation between bridge networks is achieved in two stages by means + // of the following two chains in the filter table. The first chain matches + // on the source interface being a bridge network's bridge and the + // destination being a different interface. A positive match leads to the + // second isolation chain. No match returns to the parent chain. The second + // isolation chain matches on destination interface being a bridge network's + // bridge. A positive match identifies a packet originated from one bridge + // network's bridge destined to another bridge network's bridge and will + // result in the packet being dropped. No match returns to the parent chain. + IsolationChain1 = "DOCKER-ISOLATION-STAGE-1" + IsolationChain2 = "DOCKER-ISOLATION-STAGE-2" +) + +func setupIPChains(config *configuration) (*iptables.ChainInfo, *iptables.ChainInfo, *iptables.ChainInfo, *iptables.ChainInfo, error) { + // Sanity check. + if config.EnableIPTables == false { + return nil, nil, nil, nil, errors.New("cannot create new chains, EnableIPTable is disabled") + } + + hairpinMode := !config.EnableUserlandProxy + + natChain, err := iptables.NewChain(DockerChain, iptables.Nat, hairpinMode) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("failed to create NAT chain %s: %v", DockerChain, err) + } + defer func() { + if err != nil { + if err := iptables.RemoveExistingChain(DockerChain, iptables.Nat); err != nil { + logrus.Warnf("failed on removing iptables NAT chain %s on cleanup: %v", DockerChain, err) + } + } + }() + + filterChain, err := iptables.NewChain(DockerChain, iptables.Filter, false) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("failed to create FILTER chain %s: %v", DockerChain, err) + } + defer func() { + if err != nil { + if err := iptables.RemoveExistingChain(DockerChain, iptables.Filter); err != nil { + logrus.Warnf("failed on removing iptables FILTER chain %s on cleanup: %v", DockerChain, err) + } + } + }() + + isolationChain1, err := iptables.NewChain(IsolationChain1, iptables.Filter, false) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("failed to create FILTER isolation chain: %v", err) + } + defer func() { + if err != nil { + if err := iptables.RemoveExistingChain(IsolationChain1, iptables.Filter); err != nil { + logrus.Warnf("failed on removing iptables FILTER chain %s on cleanup: %v", IsolationChain1, err) + } + } + }() + + isolationChain2, err := iptables.NewChain(IsolationChain2, iptables.Filter, false) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("failed to create FILTER isolation chain: %v", err) + } + defer func() { + if err != nil { + if err := iptables.RemoveExistingChain(IsolationChain2, iptables.Filter); err != nil { + logrus.Warnf("failed on removing iptables FILTER chain %s on cleanup: %v", IsolationChain2, err) + } + } + }() + + if err := iptables.AddReturnRule(IsolationChain1); err != nil { + return nil, nil, nil, nil, err + } + + if err := iptables.AddReturnRule(IsolationChain2); err != nil { + return nil, nil, nil, nil, err + } + + return natChain, filterChain, isolationChain1, isolationChain2, nil +} + +func (n *bridgeNetwork) setupIPTables(config *networkConfiguration, i *bridgeInterface) error { + var err error + + d := n.driver + d.Lock() + driverConfig := d.config + d.Unlock() + + // Sanity check. + if driverConfig.EnableIPTables == false { + return errors.New("Cannot program chains, EnableIPTable is disabled") + } + + // Pickup this configuration option from driver + hairpinMode := !driverConfig.EnableUserlandProxy + + maskedAddrv4 := &net.IPNet{ + IP: i.bridgeIPv4.IP.Mask(i.bridgeIPv4.Mask), + Mask: i.bridgeIPv4.Mask, + } + if config.Internal { + if err = setupInternalNetworkRules(config.BridgeName, maskedAddrv4, config.EnableICC, true); err != nil { + return fmt.Errorf("Failed to Setup IP tables: %s", err.Error()) + } + n.registerIptCleanFunc(func() error { + return setupInternalNetworkRules(config.BridgeName, maskedAddrv4, config.EnableICC, false) + }) + } else { + if err = setupIPTablesInternal(config.BridgeName, maskedAddrv4, config.EnableICC, config.EnableIPMasquerade, hairpinMode, true); err != nil { + return fmt.Errorf("Failed to Setup IP tables: %s", err.Error()) + } + n.registerIptCleanFunc(func() error { + return setupIPTablesInternal(config.BridgeName, maskedAddrv4, config.EnableICC, config.EnableIPMasquerade, hairpinMode, false) + }) + natChain, filterChain, _, _, err := n.getDriverChains() + if err != nil { + return fmt.Errorf("Failed to setup IP tables, cannot acquire chain info %s", err.Error()) + } + + err = iptables.ProgramChain(natChain, config.BridgeName, hairpinMode, true) + if err != nil { + return fmt.Errorf("Failed to program NAT chain: %s", err.Error()) + } + + err = iptables.ProgramChain(filterChain, config.BridgeName, hairpinMode, true) + if err != nil { + return fmt.Errorf("Failed to program FILTER chain: %s", err.Error()) + } + + n.registerIptCleanFunc(func() error { + return iptables.ProgramChain(filterChain, config.BridgeName, hairpinMode, false) + }) + + n.portMapper.SetIptablesChain(natChain, n.getNetworkBridgeName()) + } + + d.Lock() + err = iptables.EnsureJumpRule("FORWARD", IsolationChain1) + d.Unlock() + if err != nil { + return err + } + + return nil +} + +type iptRule struct { + table iptables.Table + chain string + preArgs []string + args []string +} + +func setupIPTablesInternal(bridgeIface string, addr net.Addr, icc, ipmasq, hairpin, enable bool) error { + + var ( + address = addr.String() + natRule = iptRule{table: iptables.Nat, chain: "POSTROUTING", preArgs: []string{"-t", "nat"}, args: []string{"-s", address, "!", "-o", bridgeIface, "-j", "MASQUERADE"}} + hpNatRule = iptRule{table: iptables.Nat, chain: "POSTROUTING", preArgs: []string{"-t", "nat"}, args: []string{"-m", "addrtype", "--src-type", "LOCAL", "-o", bridgeIface, "-j", "MASQUERADE"}} + skipDNAT = iptRule{table: iptables.Nat, chain: DockerChain, preArgs: []string{"-t", "nat"}, args: []string{"-i", bridgeIface, "-j", "RETURN"}} + outRule = iptRule{table: iptables.Filter, chain: "FORWARD", args: []string{"-i", bridgeIface, "!", "-o", bridgeIface, "-j", "ACCEPT"}} + ) + + // Set NAT. + if ipmasq { + if err := programChainRule(natRule, "NAT", enable); err != nil { + return err + } + } + + if ipmasq && !hairpin { + if err := programChainRule(skipDNAT, "SKIP DNAT", enable); err != nil { + return err + } + } + + // In hairpin mode, masquerade traffic from localhost + if hairpin { + if err := programChainRule(hpNatRule, "MASQ LOCAL HOST", enable); err != nil { + return err + } + } + + // Set Inter Container Communication. + if err := setIcc(bridgeIface, icc, enable); err != nil { + return err + } + + // Set Accept on all non-intercontainer outgoing packets. + return programChainRule(outRule, "ACCEPT NON_ICC OUTGOING", enable) +} + +func programChainRule(rule iptRule, ruleDescr string, insert bool) error { + var ( + prefix []string + operation string + condition bool + doesExist = iptables.Exists(rule.table, rule.chain, rule.args...) + ) + + if insert { + condition = !doesExist + prefix = []string{"-I", rule.chain} + operation = "enable" + } else { + condition = doesExist + prefix = []string{"-D", rule.chain} + operation = "disable" + } + if rule.preArgs != nil { + prefix = append(rule.preArgs, prefix...) + } + + if condition { + if err := iptables.RawCombinedOutput(append(prefix, rule.args...)...); err != nil { + return fmt.Errorf("Unable to %s %s rule: %s", operation, ruleDescr, err.Error()) + } + } + + return nil +} + +func setIcc(bridgeIface string, iccEnable, insert bool) error { + var ( + table = iptables.Filter + chain = "FORWARD" + args = []string{"-i", bridgeIface, "-o", bridgeIface, "-j"} + acceptArgs = append(args, "ACCEPT") + dropArgs = append(args, "DROP") + ) + + if insert { + if !iccEnable { + iptables.Raw(append([]string{"-D", chain}, acceptArgs...)...) + + if !iptables.Exists(table, chain, dropArgs...) { + if err := iptables.RawCombinedOutput(append([]string{"-A", chain}, dropArgs...)...); err != nil { + return fmt.Errorf("Unable to prevent intercontainer communication: %s", err.Error()) + } + } + } else { + iptables.Raw(append([]string{"-D", chain}, dropArgs...)...) + + if !iptables.Exists(table, chain, acceptArgs...) { + if err := iptables.RawCombinedOutput(append([]string{"-I", chain}, acceptArgs...)...); err != nil { + return fmt.Errorf("Unable to allow intercontainer communication: %s", err.Error()) + } + } + } + } else { + // Remove any ICC rule. + if !iccEnable { + if iptables.Exists(table, chain, dropArgs...) { + iptables.Raw(append([]string{"-D", chain}, dropArgs...)...) + } + } else { + if iptables.Exists(table, chain, acceptArgs...) { + iptables.Raw(append([]string{"-D", chain}, acceptArgs...)...) + } + } + } + + return nil +} + +// Control Inter Network Communication. Install[Remove] only if it is [not] present. +func setINC(iface string, enable bool) error { + var ( + action = iptables.Insert + actionMsg = "add" + chains = []string{IsolationChain1, IsolationChain2} + rules = [][]string{ + {"-i", iface, "!", "-o", iface, "-j", IsolationChain2}, + {"-o", iface, "-j", "DROP"}, + } + ) + + if !enable { + action = iptables.Delete + actionMsg = "remove" + } + + for i, chain := range chains { + if err := iptables.ProgramRule(iptables.Filter, chain, action, rules[i]); err != nil { + msg := fmt.Sprintf("unable to %s inter-network communication rule: %v", actionMsg, err) + if enable { + if i == 1 { + // Rollback the rule installed on first chain + if err2 := iptables.ProgramRule(iptables.Filter, chains[0], iptables.Delete, rules[0]); err2 != nil { + logrus.Warn("Failed to rollback iptables rule after failure (%v): %v", err, err2) + } + } + return fmt.Errorf(msg) + } + logrus.Warn(msg) + } + } + + return nil +} + +// Obsolete chain from previous docker versions +const oldIsolationChain = "DOCKER-ISOLATION" + +func removeIPChains() { + // Remove obsolete rules from default chains + iptables.ProgramRule(iptables.Filter, "FORWARD", iptables.Delete, []string{"-j", oldIsolationChain}) + + // Remove chains + for _, chainInfo := range []iptables.ChainInfo{ + {Name: DockerChain, Table: iptables.Nat}, + {Name: DockerChain, Table: iptables.Filter}, + {Name: IsolationChain1, Table: iptables.Filter}, + {Name: IsolationChain2, Table: iptables.Filter}, + {Name: oldIsolationChain, Table: iptables.Filter}, + } { + if err := chainInfo.Remove(); err != nil { + logrus.Warnf("Failed to remove existing iptables entries in table %s chain %s : %v", chainInfo.Table, chainInfo.Name, err) + } + } +} + +func setupInternalNetworkRules(bridgeIface string, addr net.Addr, icc, insert bool) error { + var ( + inDropRule = iptRule{table: iptables.Filter, chain: IsolationChain1, args: []string{"-i", bridgeIface, "!", "-d", addr.String(), "-j", "DROP"}} + outDropRule = iptRule{table: iptables.Filter, chain: IsolationChain1, args: []string{"-o", bridgeIface, "!", "-s", addr.String(), "-j", "DROP"}} + ) + if err := programChainRule(inDropRule, "DROP INCOMING", insert); err != nil { + return err + } + if err := programChainRule(outDropRule, "DROP OUTGOING", insert); err != nil { + return err + } + // Set Inter Container Communication. + return setIcc(bridgeIface, icc, insert) +} + +func clearEndpointConnections(nlh *netlink.Handle, ep *bridgeEndpoint) { + var ipv4List []net.IP + var ipv6List []net.IP + if ep.addr != nil { + ipv4List = append(ipv4List, ep.addr.IP) + } + if ep.addrv6 != nil { + ipv6List = append(ipv6List, ep.addrv6.IP) + } + iptables.DeleteConntrackEntries(nlh, ipv4List, ipv6List) +} diff --git a/vendor/github.com/docker/libnetwork/drivers/bridge/setup_ipv4.go b/vendor/github.com/docker/libnetwork/drivers/bridge/setup_ipv4.go new file mode 100644 index 0000000000..671bd3302e --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/bridge/setup_ipv4.go @@ -0,0 +1,80 @@ +package bridge + +import ( + "errors" + "fmt" + "io/ioutil" + "net" + "path/filepath" + + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" +) + +func selectIPv4Address(addresses []netlink.Addr, selector *net.IPNet) (netlink.Addr, error) { + if len(addresses) == 0 { + return netlink.Addr{}, errors.New("unable to select an address as the address pool is empty") + } + if selector != nil { + for _, addr := range addresses { + if selector.Contains(addr.IP) { + return addr, nil + } + } + } + return addresses[0], nil +} + +func setupBridgeIPv4(config *networkConfiguration, i *bridgeInterface) error { + addrv4List, _, err := i.addresses() + if err != nil { + return fmt.Errorf("failed to retrieve bridge interface addresses: %v", err) + } + + addrv4, _ := selectIPv4Address(addrv4List, config.AddressIPv4) + + if !types.CompareIPNet(addrv4.IPNet, config.AddressIPv4) { + if addrv4.IPNet != nil { + if err := i.nlh.AddrDel(i.Link, &addrv4); err != nil { + return fmt.Errorf("failed to remove current ip address from bridge: %v", err) + } + } + logrus.Debugf("Assigning address to bridge interface %s: %s", config.BridgeName, config.AddressIPv4) + if err := i.nlh.AddrAdd(i.Link, &netlink.Addr{IPNet: config.AddressIPv4}); err != nil { + return &IPv4AddrAddError{IP: config.AddressIPv4, Err: err} + } + } + + // Store bridge network and default gateway + i.bridgeIPv4 = config.AddressIPv4 + i.gatewayIPv4 = config.AddressIPv4.IP + + return nil +} + +func setupGatewayIPv4(config *networkConfiguration, i *bridgeInterface) error { + if !i.bridgeIPv4.Contains(config.DefaultGatewayIPv4) { + return &ErrInvalidGateway{} + } + + // Store requested default gateway + i.gatewayIPv4 = config.DefaultGatewayIPv4 + + return nil +} + +func setupLoopbackAdressesRouting(config *networkConfiguration, i *bridgeInterface) error { + sysPath := filepath.Join("/proc/sys/net/ipv4/conf", config.BridgeName, "route_localnet") + ipv4LoRoutingData, err := ioutil.ReadFile(sysPath) + if err != nil { + return fmt.Errorf("Cannot read IPv4 local routing setup: %v", err) + } + // Enable loopback adresses routing only if it isn't already enabled + if ipv4LoRoutingData[0] != '1' { + if err := ioutil.WriteFile(sysPath, []byte{'1', '\n'}, 0644); err != nil { + return fmt.Errorf("Unable to enable local routing for hairpin mode: %v", err) + } + } + return nil +} diff --git a/vendor/github.com/docker/libnetwork/drivers/bridge/setup_ipv6.go b/vendor/github.com/docker/libnetwork/drivers/bridge/setup_ipv6.go new file mode 100644 index 0000000000..b944be081e --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/bridge/setup_ipv6.go @@ -0,0 +1,119 @@ +package bridge + +import ( + "fmt" + "io/ioutil" + "net" + "os" + + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" +) + +var bridgeIPv6 *net.IPNet + +const ( + bridgeIPv6Str = "fe80::1/64" + ipv6ForwardConfPerm = 0644 + ipv6ForwardConfDefault = "/proc/sys/net/ipv6/conf/default/forwarding" + ipv6ForwardConfAll = "/proc/sys/net/ipv6/conf/all/forwarding" +) + +func init() { + // We allow ourselves to panic in this special case because we indicate a + // failure to parse a compile-time define constant. + var err error + if bridgeIPv6, err = types.ParseCIDR(bridgeIPv6Str); err != nil { + panic(fmt.Sprintf("Cannot parse default bridge IPv6 address %q: %v", bridgeIPv6Str, err)) + } +} + +func setupBridgeIPv6(config *networkConfiguration, i *bridgeInterface) error { + procFile := "/proc/sys/net/ipv6/conf/" + config.BridgeName + "/disable_ipv6" + ipv6BridgeData, err := ioutil.ReadFile(procFile) + if err != nil { + return fmt.Errorf("Cannot read IPv6 setup for bridge %v: %v", config.BridgeName, err) + } + // Enable IPv6 on the bridge only if it isn't already enabled + if ipv6BridgeData[0] != '0' { + if err := ioutil.WriteFile(procFile, []byte{'0', '\n'}, ipv6ForwardConfPerm); err != nil { + return fmt.Errorf("Unable to enable IPv6 addresses on bridge: %v", err) + } + } + + // Store bridge network and default gateway + i.bridgeIPv6 = bridgeIPv6 + i.gatewayIPv6 = i.bridgeIPv6.IP + + if err := i.programIPv6Address(); err != nil { + return err + } + + if config.AddressIPv6 == nil { + return nil + } + + // Store the user specified bridge network and network gateway and program it + i.bridgeIPv6 = config.AddressIPv6 + i.gatewayIPv6 = config.AddressIPv6.IP + + if err := i.programIPv6Address(); err != nil { + return err + } + + // Setting route to global IPv6 subnet + logrus.Debugf("Adding route to IPv6 network %s via device %s", config.AddressIPv6.String(), config.BridgeName) + err = i.nlh.RouteAdd(&netlink.Route{ + Scope: netlink.SCOPE_UNIVERSE, + LinkIndex: i.Link.Attrs().Index, + Dst: config.AddressIPv6, + }) + if err != nil && !os.IsExist(err) { + logrus.Errorf("Could not add route to IPv6 network %s via device %s", config.AddressIPv6.String(), config.BridgeName) + } + + return nil +} + +func setupGatewayIPv6(config *networkConfiguration, i *bridgeInterface) error { + if config.AddressIPv6 == nil { + return &ErrInvalidContainerSubnet{} + } + if !config.AddressIPv6.Contains(config.DefaultGatewayIPv6) { + return &ErrInvalidGateway{} + } + + // Store requested default gateway + i.gatewayIPv6 = config.DefaultGatewayIPv6 + + return nil +} + +func setupIPv6Forwarding(config *networkConfiguration, i *bridgeInterface) error { + // Get current IPv6 default forwarding setup + ipv6ForwardDataDefault, err := ioutil.ReadFile(ipv6ForwardConfDefault) + if err != nil { + return fmt.Errorf("Cannot read IPv6 default forwarding setup: %v", err) + } + // Enable IPv6 default forwarding only if it is not already enabled + if ipv6ForwardDataDefault[0] != '1' { + if err := ioutil.WriteFile(ipv6ForwardConfDefault, []byte{'1', '\n'}, ipv6ForwardConfPerm); err != nil { + logrus.Warnf("Unable to enable IPv6 default forwarding: %v", err) + } + } + + // Get current IPv6 all forwarding setup + ipv6ForwardDataAll, err := ioutil.ReadFile(ipv6ForwardConfAll) + if err != nil { + return fmt.Errorf("Cannot read IPv6 all forwarding setup: %v", err) + } + // Enable IPv6 all forwarding only if it is not already enabled + if ipv6ForwardDataAll[0] != '1' { + if err := ioutil.WriteFile(ipv6ForwardConfAll, []byte{'1', '\n'}, ipv6ForwardConfPerm); err != nil { + logrus.Warnf("Unable to enable IPv6 all forwarding: %v", err) + } + } + + return nil +} diff --git a/vendor/github.com/docker/libnetwork/drivers/bridge/setup_verify.go b/vendor/github.com/docker/libnetwork/drivers/bridge/setup_verify.go new file mode 100644 index 0000000000..de77c38a66 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/bridge/setup_verify.go @@ -0,0 +1,73 @@ +package bridge + +import ( + "fmt" + "strings" + + "github.com/docker/libnetwork/ns" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" +) + +func setupVerifyAndReconcile(config *networkConfiguration, i *bridgeInterface) error { + // Fetch a slice of IPv4 addresses and a slice of IPv6 addresses from the bridge. + addrsv4, addrsv6, err := i.addresses() + if err != nil { + return fmt.Errorf("Failed to verify ip addresses: %v", err) + } + + addrv4, _ := selectIPv4Address(addrsv4, config.AddressIPv4) + + // Verify that the bridge does have an IPv4 address. + if addrv4.IPNet == nil { + return &ErrNoIPAddr{} + } + + // Verify that the bridge IPv4 address matches the requested configuration. + if config.AddressIPv4 != nil && !addrv4.IP.Equal(config.AddressIPv4.IP) { + return &IPv4AddrNoMatchError{IP: addrv4.IP, CfgIP: config.AddressIPv4.IP} + } + + // Verify that one of the bridge IPv6 addresses matches the requested + // configuration. + if config.EnableIPv6 && !findIPv6Address(netlink.Addr{IPNet: bridgeIPv6}, addrsv6) { + return (*IPv6AddrNoMatchError)(bridgeIPv6) + } + + // Release any residual IPv6 address that might be there because of older daemon instances + for _, addrv6 := range addrsv6 { + if addrv6.IP.IsGlobalUnicast() && !types.CompareIPNet(addrv6.IPNet, i.bridgeIPv6) { + if err := i.nlh.AddrDel(i.Link, &addrv6); err != nil { + logrus.Warnf("Failed to remove residual IPv6 address %s from bridge: %v", addrv6.IPNet, err) + } + } + } + + return nil +} + +func findIPv6Address(addr netlink.Addr, addresses []netlink.Addr) bool { + for _, addrv6 := range addresses { + if addrv6.String() == addr.String() { + return true + } + } + return false +} + +func bridgeInterfaceExists(name string) (bool, error) { + nlh := ns.NlHandle() + link, err := nlh.LinkByName(name) + if err != nil { + if strings.Contains(err.Error(), "Link not found") { + return false, nil + } + return false, fmt.Errorf("failed to check bridge interface existence: %v", err) + } + + if link.Type() == "bridge" { + return true, nil + } + return false, fmt.Errorf("existing interface %s is not a bridge", name) +} diff --git a/vendor/github.com/docker/libnetwork/drivers/host/host.go b/vendor/github.com/docker/libnetwork/drivers/host/host.go new file mode 100644 index 0000000000..a71d461380 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/host/host.go @@ -0,0 +1,106 @@ +package host + +import ( + "sync" + + "github.com/docker/libnetwork/datastore" + "github.com/docker/libnetwork/discoverapi" + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/types" +) + +const networkType = "host" + +type driver struct { + network string + sync.Mutex +} + +// Init registers a new instance of host driver +func Init(dc driverapi.DriverCallback, config map[string]interface{}) error { + c := driverapi.Capability{ + DataScope: datastore.LocalScope, + ConnectivityScope: datastore.LocalScope, + } + return dc.RegisterDriver(networkType, &driver{}, c) +} + +func (d *driver) NetworkAllocate(id string, option map[string]string, ipV4Data, ipV6Data []driverapi.IPAMData) (map[string]string, error) { + return nil, types.NotImplementedErrorf("not implemented") +} + +func (d *driver) NetworkFree(id string) error { + return types.NotImplementedErrorf("not implemented") +} + +func (d *driver) EventNotify(etype driverapi.EventType, nid, tableName, key string, value []byte) { +} + +func (d *driver) DecodeTableEntry(tablename string, key string, value []byte) (string, map[string]string) { + return "", nil +} + +func (d *driver) CreateNetwork(id string, option map[string]interface{}, nInfo driverapi.NetworkInfo, ipV4Data, ipV6Data []driverapi.IPAMData) error { + d.Lock() + defer d.Unlock() + + if d.network != "" { + return types.ForbiddenErrorf("only one instance of \"%s\" network is allowed", networkType) + } + + d.network = id + + return nil +} + +func (d *driver) DeleteNetwork(nid string) error { + return types.ForbiddenErrorf("network of type \"%s\" cannot be deleted", networkType) +} + +func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo, epOptions map[string]interface{}) error { + return nil +} + +func (d *driver) DeleteEndpoint(nid, eid string) error { + return nil +} + +func (d *driver) EndpointOperInfo(nid, eid string) (map[string]interface{}, error) { + return make(map[string]interface{}, 0), nil +} + +// Join method is invoked when a Sandbox is attached to an endpoint. +func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo, options map[string]interface{}) error { + return nil +} + +// Leave method is invoked when a Sandbox detaches from an endpoint. +func (d *driver) Leave(nid, eid string) error { + return nil +} + +func (d *driver) ProgramExternalConnectivity(nid, eid string, options map[string]interface{}) error { + return nil +} + +func (d *driver) RevokeExternalConnectivity(nid, eid string) error { + return nil +} + +func (d *driver) Type() string { + return networkType +} + +func (d *driver) IsBuiltIn() bool { + return true +} + +// DiscoverNew is a notification for a new discovery event, such as a new node joining a cluster +func (d *driver) DiscoverNew(dType discoverapi.DiscoveryType, data interface{}) error { + return nil +} + +// DiscoverDelete is a notification for a discovery delete event, such as a node leaving a cluster +func (d *driver) DiscoverDelete(dType discoverapi.DiscoveryType, data interface{}) error { + return nil +} diff --git a/vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan.go b/vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan.go new file mode 100644 index 0000000000..c64ad555a3 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan.go @@ -0,0 +1,115 @@ +package ipvlan + +import ( + "net" + "sync" + + "github.com/docker/libnetwork/datastore" + "github.com/docker/libnetwork/discoverapi" + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/osl" + "github.com/docker/libnetwork/types" +) + +const ( + vethLen = 7 + containerVethPrefix = "eth" + vethPrefix = "veth" + ipvlanType = "ipvlan" // driver type name + modeL2 = "l2" // ipvlan mode l2 is the default + modeL3 = "l3" // ipvlan L3 mode + parentOpt = "parent" // parent interface -o parent + modeOpt = "_mode" // ipvlan mode ux opt suffix +) + +var driverModeOpt = ipvlanType + modeOpt // mode -o ipvlan_mode + +type endpointTable map[string]*endpoint + +type networkTable map[string]*network + +type driver struct { + networks networkTable + sync.Once + sync.Mutex + store datastore.DataStore +} + +type endpoint struct { + id string + nid string + mac net.HardwareAddr + addr *net.IPNet + addrv6 *net.IPNet + srcName string + dbIndex uint64 + dbExists bool +} + +type network struct { + id string + sbox osl.Sandbox + endpoints endpointTable + driver *driver + config *configuration + sync.Mutex +} + +// Init initializes and registers the libnetwork ipvlan driver +func Init(dc driverapi.DriverCallback, config map[string]interface{}) error { + c := driverapi.Capability{ + DataScope: datastore.LocalScope, + ConnectivityScope: datastore.GlobalScope, + } + d := &driver{ + networks: networkTable{}, + } + d.initStore(config) + + return dc.RegisterDriver(ipvlanType, d, c) +} + +func (d *driver) NetworkAllocate(id string, option map[string]string, ipV4Data, ipV6Data []driverapi.IPAMData) (map[string]string, error) { + return nil, types.NotImplementedErrorf("not implemented") +} + +func (d *driver) NetworkFree(id string) error { + return types.NotImplementedErrorf("not implemented") +} + +func (d *driver) EndpointOperInfo(nid, eid string) (map[string]interface{}, error) { + return make(map[string]interface{}, 0), nil +} + +func (d *driver) Type() string { + return ipvlanType +} + +func (d *driver) IsBuiltIn() bool { + return true +} + +func (d *driver) ProgramExternalConnectivity(nid, eid string, options map[string]interface{}) error { + return nil +} + +func (d *driver) RevokeExternalConnectivity(nid, eid string) error { + return nil +} + +// DiscoverNew is a notification for a new discovery event. +func (d *driver) DiscoverNew(dType discoverapi.DiscoveryType, data interface{}) error { + return nil +} + +// DiscoverDelete is a notification for a discovery delete event. +func (d *driver) DiscoverDelete(dType discoverapi.DiscoveryType, data interface{}) error { + return nil +} + +func (d *driver) EventNotify(etype driverapi.EventType, nid, tableName, key string, value []byte) { +} + +func (d *driver) DecodeTableEntry(tablename string, key string, value []byte) (string, map[string]string) { + return "", nil +} diff --git a/vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_endpoint.go b/vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_endpoint.go new file mode 100644 index 0000000000..139cbaf8b7 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_endpoint.go @@ -0,0 +1,89 @@ +package ipvlan + +import ( + "fmt" + + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/ns" + "github.com/docker/libnetwork/osl" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" +) + +// CreateEndpoint assigns the mac, ip and endpoint id for the new container +func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo, + epOptions map[string]interface{}) error { + defer osl.InitOSContext()() + + if err := validateID(nid, eid); err != nil { + return err + } + n, err := d.getNetwork(nid) + if err != nil { + return fmt.Errorf("network id %q not found", nid) + } + if ifInfo.MacAddress() != nil { + return fmt.Errorf("%s interfaces do not support custom mac address assigment", ipvlanType) + } + ep := &endpoint{ + id: eid, + nid: nid, + addr: ifInfo.Address(), + addrv6: ifInfo.AddressIPv6(), + } + if ep.addr == nil { + return fmt.Errorf("create endpoint was not passed an IP address") + } + // disallow port mapping -p + if opt, ok := epOptions[netlabel.PortMap]; ok { + if _, ok := opt.([]types.PortBinding); ok { + if len(opt.([]types.PortBinding)) > 0 { + logrus.Warnf("%s driver does not support port mappings", ipvlanType) + } + } + } + // disallow port exposure --expose + if opt, ok := epOptions[netlabel.ExposedPorts]; ok { + if _, ok := opt.([]types.TransportPort); ok { + if len(opt.([]types.TransportPort)) > 0 { + logrus.Warnf("%s driver does not support port exposures", ipvlanType) + } + } + } + + if err := d.storeUpdate(ep); err != nil { + return fmt.Errorf("failed to save ipvlan endpoint %s to store: %v", ep.id[0:7], err) + } + + n.addEndpoint(ep) + + return nil +} + +// DeleteEndpoint remove the endpoint and associated netlink interface +func (d *driver) DeleteEndpoint(nid, eid string) error { + defer osl.InitOSContext()() + if err := validateID(nid, eid); err != nil { + return err + } + n := d.network(nid) + if n == nil { + return fmt.Errorf("network id %q not found", nid) + } + ep := n.endpoint(eid) + if ep == nil { + return fmt.Errorf("endpoint id %q not found", eid) + } + if link, err := ns.NlHandle().LinkByName(ep.srcName); err == nil { + if err := ns.NlHandle().LinkDel(link); err != nil { + logrus.WithError(err).Warnf("Failed to delete interface (%s)'s link on endpoint (%s) delete", ep.srcName, ep.id) + } + } + + if err := d.storeDelete(ep); err != nil { + logrus.Warnf("Failed to remove ipvlan endpoint %s from store: %v", ep.id[0:7], err) + } + n.deleteEndpoint(ep.id) + return nil +} diff --git a/vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_joinleave.go b/vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_joinleave.go new file mode 100644 index 0000000000..9d229a245e --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_joinleave.go @@ -0,0 +1,199 @@ +package ipvlan + +import ( + "fmt" + "net" + + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/netutils" + "github.com/docker/libnetwork/ns" + "github.com/docker/libnetwork/osl" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" +) + +type staticRoute struct { + Destination *net.IPNet + RouteType int + NextHop net.IP +} + +const ( + defaultV4RouteCidr = "0.0.0.0/0" + defaultV6RouteCidr = "::/0" +) + +// Join method is invoked when a Sandbox is attached to an endpoint. +func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo, options map[string]interface{}) error { + defer osl.InitOSContext()() + n, err := d.getNetwork(nid) + if err != nil { + return err + } + endpoint := n.endpoint(eid) + if endpoint == nil { + return fmt.Errorf("could not find endpoint with id %s", eid) + } + // generate a name for the iface that will be renamed to eth0 in the sbox + containerIfName, err := netutils.GenerateIfaceName(ns.NlHandle(), vethPrefix, vethLen) + if err != nil { + return fmt.Errorf("error generating an interface name: %v", err) + } + // create the netlink ipvlan interface + vethName, err := createIPVlan(containerIfName, n.config.Parent, n.config.IpvlanMode) + if err != nil { + return err + } + // bind the generated iface name to the endpoint + endpoint.srcName = vethName + ep := n.endpoint(eid) + if ep == nil { + return fmt.Errorf("could not find endpoint with id %s", eid) + } + if n.config.IpvlanMode == modeL3 { + // disable gateway services to add a default gw using dev eth0 only + jinfo.DisableGatewayService() + defaultRoute, err := ifaceGateway(defaultV4RouteCidr) + if err != nil { + return err + } + if err := jinfo.AddStaticRoute(defaultRoute.Destination, defaultRoute.RouteType, defaultRoute.NextHop); err != nil { + return fmt.Errorf("failed to set an ipvlan l3 mode ipv4 default gateway: %v", err) + } + logrus.Debugf("Ipvlan Endpoint Joined with IPv4_Addr: %s, Ipvlan_Mode: %s, Parent: %s", + ep.addr.IP.String(), n.config.IpvlanMode, n.config.Parent) + // If the endpoint has a v6 address, set a v6 default route + if ep.addrv6 != nil { + default6Route, err := ifaceGateway(defaultV6RouteCidr) + if err != nil { + return err + } + if err = jinfo.AddStaticRoute(default6Route.Destination, default6Route.RouteType, default6Route.NextHop); err != nil { + return fmt.Errorf("failed to set an ipvlan l3 mode ipv6 default gateway: %v", err) + } + logrus.Debugf("Ipvlan Endpoint Joined with IPv6_Addr: %s, Ipvlan_Mode: %s, Parent: %s", + ep.addrv6.IP.String(), n.config.IpvlanMode, n.config.Parent) + } + } + if n.config.IpvlanMode == modeL2 { + // parse and correlate the endpoint v4 address with the available v4 subnets + if len(n.config.Ipv4Subnets) > 0 { + s := n.getSubnetforIPv4(ep.addr) + if s == nil { + return fmt.Errorf("could not find a valid ipv4 subnet for endpoint %s", eid) + } + v4gw, _, err := net.ParseCIDR(s.GwIP) + if err != nil { + return fmt.Errorf("gatway %s is not a valid ipv4 address: %v", s.GwIP, err) + } + err = jinfo.SetGateway(v4gw) + if err != nil { + return err + } + logrus.Debugf("Ipvlan Endpoint Joined with IPv4_Addr: %s, Gateway: %s, Ipvlan_Mode: %s, Parent: %s", + ep.addr.IP.String(), v4gw.String(), n.config.IpvlanMode, n.config.Parent) + } + // parse and correlate the endpoint v6 address with the available v6 subnets + if len(n.config.Ipv6Subnets) > 0 { + s := n.getSubnetforIPv6(ep.addrv6) + if s == nil { + return fmt.Errorf("could not find a valid ipv6 subnet for endpoint %s", eid) + } + v6gw, _, err := net.ParseCIDR(s.GwIP) + if err != nil { + return fmt.Errorf("gatway %s is not a valid ipv6 address: %v", s.GwIP, err) + } + err = jinfo.SetGatewayIPv6(v6gw) + if err != nil { + return err + } + logrus.Debugf("Ipvlan Endpoint Joined with IPv6_Addr: %s, Gateway: %s, Ipvlan_Mode: %s, Parent: %s", + ep.addrv6.IP.String(), v6gw.String(), n.config.IpvlanMode, n.config.Parent) + } + } + iNames := jinfo.InterfaceName() + err = iNames.SetNames(vethName, containerVethPrefix) + if err != nil { + return err + } + if err = d.storeUpdate(ep); err != nil { + return fmt.Errorf("failed to save ipvlan endpoint %s to store: %v", ep.id[0:7], err) + } + + return nil +} + +// Leave method is invoked when a Sandbox detaches from an endpoint. +func (d *driver) Leave(nid, eid string) error { + defer osl.InitOSContext()() + network, err := d.getNetwork(nid) + if err != nil { + return err + } + endpoint, err := network.getEndpoint(eid) + if err != nil { + return err + } + if endpoint == nil { + return fmt.Errorf("could not find endpoint with id %s", eid) + } + + return nil +} + +// ifaceGateway returns a static route for either v4/v6 to be set to the container eth0 +func ifaceGateway(dfNet string) (*staticRoute, error) { + nh, dst, err := net.ParseCIDR(dfNet) + if err != nil { + return nil, fmt.Errorf("unable to parse default route %v", err) + } + defaultRoute := &staticRoute{ + Destination: dst, + RouteType: types.CONNECTED, + NextHop: nh, + } + + return defaultRoute, nil +} + +// getSubnetforIPv4 returns the ipv4 subnet to which the given IP belongs +func (n *network) getSubnetforIPv4(ip *net.IPNet) *ipv4Subnet { + for _, s := range n.config.Ipv4Subnets { + _, snet, err := net.ParseCIDR(s.SubnetIP) + if err != nil { + return nil + } + // first check if the mask lengths are the same + i, _ := snet.Mask.Size() + j, _ := ip.Mask.Size() + if i != j { + continue + } + if snet.Contains(ip.IP) { + return s + } + } + + return nil +} + +// getSubnetforIPv6 returns the ipv6 subnet to which the given IP belongs +func (n *network) getSubnetforIPv6(ip *net.IPNet) *ipv6Subnet { + for _, s := range n.config.Ipv6Subnets { + _, snet, err := net.ParseCIDR(s.SubnetIP) + if err != nil { + return nil + } + // first check if the mask lengths are the same + i, _ := snet.Mask.Size() + j, _ := ip.Mask.Size() + if i != j { + continue + } + if snet.Contains(ip.IP) { + return s + } + } + + return nil +} diff --git a/vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_network.go b/vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_network.go new file mode 100644 index 0000000000..437919cf9d --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_network.go @@ -0,0 +1,252 @@ +package ipvlan + +import ( + "fmt" + + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/ns" + "github.com/docker/libnetwork/options" + "github.com/docker/libnetwork/osl" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" +) + +// CreateNetwork the network for the specified driver type +func (d *driver) CreateNetwork(nid string, option map[string]interface{}, nInfo driverapi.NetworkInfo, ipV4Data, ipV6Data []driverapi.IPAMData) error { + defer osl.InitOSContext()() + kv, err := kernel.GetKernelVersion() + if err != nil { + return fmt.Errorf("Failed to check kernel version for %s driver support: %v", ipvlanType, err) + } + // ensure Kernel version is >= v4.2 for ipvlan support + if kv.Kernel < ipvlanKernelVer || (kv.Kernel == ipvlanKernelVer && kv.Major < ipvlanMajorVer) { + return fmt.Errorf("kernel version failed to meet the minimum ipvlan kernel requirement of %d.%d, found %d.%d.%d", + ipvlanKernelVer, ipvlanMajorVer, kv.Kernel, kv.Major, kv.Minor) + } + // reject a null v4 network + if len(ipV4Data) == 0 || ipV4Data[0].Pool.String() == "0.0.0.0/0" { + return fmt.Errorf("ipv4 pool is empty") + } + // parse and validate the config and bind to networkConfiguration + config, err := parseNetworkOptions(nid, option) + if err != nil { + return err + } + config.ID = nid + err = config.processIPAM(nid, ipV4Data, ipV6Data) + if err != nil { + return err + } + // verify the ipvlan mode from -o ipvlan_mode option + switch config.IpvlanMode { + case "", modeL2: + // default to ipvlan L2 mode if -o ipvlan_mode is empty + config.IpvlanMode = modeL2 + case modeL3: + config.IpvlanMode = modeL3 + default: + return fmt.Errorf("requested ipvlan mode '%s' is not valid, 'l2' mode is the ipvlan driver default", config.IpvlanMode) + } + // loopback is not a valid parent link + if config.Parent == "lo" { + return fmt.Errorf("loopback interface is not a valid %s parent link", ipvlanType) + } + // if parent interface not specified, create a dummy type link to use named dummy+net_id + if config.Parent == "" { + config.Parent = getDummyName(stringid.TruncateID(config.ID)) + // empty parent and --internal are handled the same. Set here to update k/v + config.Internal = true + } + err = d.createNetwork(config) + if err != nil { + return err + } + // update persistent db, rollback on fail + err = d.storeUpdate(config) + if err != nil { + d.deleteNetwork(config.ID) + logrus.Debugf("encoutered an error rolling back a network create for %s : %v", config.ID, err) + return err + } + + return nil +} + +// createNetwork is used by new network callbacks and persistent network cache +func (d *driver) createNetwork(config *configuration) error { + networkList := d.getNetworks() + for _, nw := range networkList { + if config.Parent == nw.config.Parent { + return fmt.Errorf("network %s is already using parent interface %s", + getDummyName(stringid.TruncateID(nw.config.ID)), config.Parent) + } + } + if !parentExists(config.Parent) { + // if the --internal flag is set, create a dummy link + if config.Internal { + err := createDummyLink(config.Parent, getDummyName(stringid.TruncateID(config.ID))) + if err != nil { + return err + } + config.CreatedSlaveLink = true + // notify the user in logs they have limited comunicatins + if config.Parent == getDummyName(stringid.TruncateID(config.ID)) { + logrus.Debugf("Empty -o parent= and --internal flags limit communications to other containers inside of network: %s", + config.Parent) + } + } else { + // if the subinterface parent_iface.vlan_id checks do not pass, return err. + // a valid example is 'eth0.10' for a parent iface 'eth0' with a vlan id '10' + err := createVlanLink(config.Parent) + if err != nil { + return err + } + // if driver created the networks slave link, record it for future deletion + config.CreatedSlaveLink = true + } + } + n := &network{ + id: config.ID, + driver: d, + endpoints: endpointTable{}, + config: config, + } + // add the *network + d.addNetwork(n) + + return nil +} + +// DeleteNetwork the network for the specified driver type +func (d *driver) DeleteNetwork(nid string) error { + defer osl.InitOSContext()() + n := d.network(nid) + if n == nil { + return fmt.Errorf("network id %s not found", nid) + } + // if the driver created the slave interface, delete it, otherwise leave it + if ok := n.config.CreatedSlaveLink; ok { + // if the interface exists, only delete if it matches iface.vlan or dummy.net_id naming + if ok := parentExists(n.config.Parent); ok { + // only delete the link if it is named the net_id + if n.config.Parent == getDummyName(stringid.TruncateID(nid)) { + err := delDummyLink(n.config.Parent) + if err != nil { + logrus.Debugf("link %s was not deleted, continuing the delete network operation: %v", + n.config.Parent, err) + } + } else { + // only delete the link if it matches iface.vlan naming + err := delVlanLink(n.config.Parent) + if err != nil { + logrus.Debugf("link %s was not deleted, continuing the delete network operation: %v", + n.config.Parent, err) + } + } + } + } + for _, ep := range n.endpoints { + if link, err := ns.NlHandle().LinkByName(ep.srcName); err == nil { + if err := ns.NlHandle().LinkDel(link); err != nil { + logrus.WithError(err).Warnf("Failed to delete interface (%s)'s link on endpoint (%s) delete", ep.srcName, ep.id) + } + } + + if err := d.storeDelete(ep); err != nil { + logrus.Warnf("Failed to remove ipvlan endpoint %s from store: %v", ep.id[0:7], err) + } + } + // delete the *network + d.deleteNetwork(nid) + // delete the network record from persistent cache + err := d.storeDelete(n.config) + if err != nil { + return fmt.Errorf("error deleting deleting id %s from datastore: %v", nid, err) + } + return nil +} + +// parseNetworkOptions parse docker network options +func parseNetworkOptions(id string, option options.Generic) (*configuration, error) { + var ( + err error + config = &configuration{} + ) + // parse generic labels first + if genData, ok := option[netlabel.GenericData]; ok && genData != nil { + if config, err = parseNetworkGenericOptions(genData); err != nil { + return nil, err + } + } + // setting the parent to "" will trigger an isolated network dummy parent link + if _, ok := option[netlabel.Internal]; ok { + config.Internal = true + // empty --parent= and --internal are handled the same. + config.Parent = "" + } + return config, nil +} + +// parseNetworkGenericOptions parse generic driver docker network options +func parseNetworkGenericOptions(data interface{}) (*configuration, error) { + var ( + err error + config *configuration + ) + switch opt := data.(type) { + case *configuration: + config = opt + case map[string]string: + config = &configuration{} + err = config.fromOptions(opt) + case options.Generic: + var opaqueConfig interface{} + if opaqueConfig, err = options.GenerateFromModel(opt, config); err == nil { + config = opaqueConfig.(*configuration) + } + default: + err = types.BadRequestErrorf("unrecognized network configuration format: %v", opt) + } + return config, err +} + +// fromOptions binds the generic options to networkConfiguration to cache +func (config *configuration) fromOptions(labels map[string]string) error { + for label, value := range labels { + switch label { + case parentOpt: + // parse driver option '-o parent' + config.Parent = value + case driverModeOpt: + // parse driver option '-o ipvlan_mode' + config.IpvlanMode = value + } + } + return nil +} + +// processIPAM parses v4 and v6 IP information and binds it to the network configuration +func (config *configuration) processIPAM(id string, ipamV4Data, ipamV6Data []driverapi.IPAMData) error { + if len(ipamV4Data) > 0 { + for _, ipd := range ipamV4Data { + s := &ipv4Subnet{ + SubnetIP: ipd.Pool.String(), + GwIP: ipd.Gateway.String(), + } + config.Ipv4Subnets = append(config.Ipv4Subnets, s) + } + } + if len(ipamV6Data) > 0 { + for _, ipd := range ipamV6Data { + s := &ipv6Subnet{ + SubnetIP: ipd.Pool.String(), + GwIP: ipd.Gateway.String(), + } + config.Ipv6Subnets = append(config.Ipv6Subnets, s) + } + } + return nil +} diff --git a/vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_setup.go b/vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_setup.go new file mode 100644 index 0000000000..28d6cca4e0 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_setup.go @@ -0,0 +1,205 @@ +package ipvlan + +import ( + "fmt" + "strconv" + "strings" + + "github.com/docker/libnetwork/ns" + "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" +) + +const ( + dummyPrefix = "di-" // ipvlan prefix for dummy parent interface + ipvlanKernelVer = 4 // minimum ipvlan kernel support + ipvlanMajorVer = 2 // minimum ipvlan major kernel support +) + +// createIPVlan Create the ipvlan slave specifying the source name +func createIPVlan(containerIfName, parent, ipvlanMode string) (string, error) { + // Set the ipvlan mode. Default is bridge mode + mode, err := setIPVlanMode(ipvlanMode) + if err != nil { + return "", fmt.Errorf("Unsupported %s ipvlan mode: %v", ipvlanMode, err) + } + // verify the Docker host interface acting as the macvlan parent iface exists + if !parentExists(parent) { + return "", fmt.Errorf("the requested parent interface %s was not found on the Docker host", parent) + } + // Get the link for the master index (Example: the docker host eth iface) + parentLink, err := ns.NlHandle().LinkByName(parent) + if err != nil { + return "", fmt.Errorf("error occoured looking up the %s parent iface %s error: %s", ipvlanType, parent, err) + } + // Create an ipvlan link + ipvlan := &netlink.IPVlan{ + LinkAttrs: netlink.LinkAttrs{ + Name: containerIfName, + ParentIndex: parentLink.Attrs().Index, + }, + Mode: mode, + } + if err := ns.NlHandle().LinkAdd(ipvlan); err != nil { + // If a user creates a macvlan and ipvlan on same parent, only one slave iface can be active at a time. + return "", fmt.Errorf("failed to create the %s port: %v", ipvlanType, err) + } + + return ipvlan.Attrs().Name, nil +} + +// setIPVlanMode setter for one of the two ipvlan port types +func setIPVlanMode(mode string) (netlink.IPVlanMode, error) { + switch mode { + case modeL2: + return netlink.IPVLAN_MODE_L2, nil + case modeL3: + return netlink.IPVLAN_MODE_L3, nil + default: + return 0, fmt.Errorf("Unknown ipvlan mode: %s", mode) + } +} + +// parentExists check if the specified interface exists in the default namespace +func parentExists(ifaceStr string) bool { + _, err := ns.NlHandle().LinkByName(ifaceStr) + if err != nil { + return false + } + + return true +} + +// createVlanLink parses sub-interfaces and vlan id for creation +func createVlanLink(parentName string) error { + if strings.Contains(parentName, ".") { + parent, vidInt, err := parseVlan(parentName) + if err != nil { + return err + } + // VLAN identifier or VID is a 12-bit field specifying the VLAN to which the frame belongs + if vidInt > 4094 || vidInt < 1 { + return fmt.Errorf("vlan id must be between 1-4094, received: %d", vidInt) + } + // get the parent link to attach a vlan subinterface + parentLink, err := ns.NlHandle().LinkByName(parent) + if err != nil { + return fmt.Errorf("failed to find master interface %s on the Docker host: %v", parent, err) + } + vlanLink := &netlink.Vlan{ + LinkAttrs: netlink.LinkAttrs{ + Name: parentName, + ParentIndex: parentLink.Attrs().Index, + }, + VlanId: vidInt, + } + // create the subinterface + if err := ns.NlHandle().LinkAdd(vlanLink); err != nil { + return fmt.Errorf("failed to create %s vlan link: %v", vlanLink.Name, err) + } + // Bring the new netlink iface up + if err := ns.NlHandle().LinkSetUp(vlanLink); err != nil { + return fmt.Errorf("failed to enable %s the ipvlan parent link %v", vlanLink.Name, err) + } + logrus.Debugf("Added a vlan tagged netlink subinterface: %s with a vlan id: %d", parentName, vidInt) + return nil + } + + return fmt.Errorf("invalid subinterface vlan name %s, example formatting is eth0.10", parentName) +} + +// delVlanLink verifies only sub-interfaces with a vlan id get deleted +func delVlanLink(linkName string) error { + if strings.Contains(linkName, ".") { + _, _, err := parseVlan(linkName) + if err != nil { + return err + } + // delete the vlan subinterface + vlanLink, err := ns.NlHandle().LinkByName(linkName) + if err != nil { + return fmt.Errorf("failed to find interface %s on the Docker host : %v", linkName, err) + } + // verify a parent interface isn't being deleted + if vlanLink.Attrs().ParentIndex == 0 { + return fmt.Errorf("interface %s does not appear to be a slave device: %v", linkName, err) + } + // delete the ipvlan slave device + if err := ns.NlHandle().LinkDel(vlanLink); err != nil { + return fmt.Errorf("failed to delete %s link: %v", linkName, err) + } + logrus.Debugf("Deleted a vlan tagged netlink subinterface: %s", linkName) + } + // if the subinterface doesn't parse to iface.vlan_id leave the interface in + // place since it could be a user specified name not created by the driver. + return nil +} + +// parseVlan parses and verifies a slave interface name: -o parent=eth0.10 +func parseVlan(linkName string) (string, int, error) { + // parse -o parent=eth0.10 + splitName := strings.Split(linkName, ".") + if len(splitName) != 2 { + return "", 0, fmt.Errorf("required interface name format is: name.vlan_id, ex. eth0.10 for vlan 10, instead received %s", linkName) + } + parent, vidStr := splitName[0], splitName[1] + // validate type and convert vlan id to int + vidInt, err := strconv.Atoi(vidStr) + if err != nil { + return "", 0, fmt.Errorf("unable to parse a valid vlan id from: %s (ex. eth0.10 for vlan 10)", vidStr) + } + // Check if the interface exists + if !parentExists(parent) { + return "", 0, fmt.Errorf("-o parent interface does was not found on the host: %s", parent) + } + + return parent, vidInt, nil +} + +// createDummyLink creates a dummy0 parent link +func createDummyLink(dummyName, truncNetID string) error { + // create a parent interface since one was not specified + parent := &netlink.Dummy{ + LinkAttrs: netlink.LinkAttrs{ + Name: dummyName, + }, + } + if err := ns.NlHandle().LinkAdd(parent); err != nil { + return err + } + parentDummyLink, err := ns.NlHandle().LinkByName(dummyName) + if err != nil { + return fmt.Errorf("error occoured looking up the %s parent iface %s error: %s", ipvlanType, dummyName, err) + } + // bring the new netlink iface up + if err := ns.NlHandle().LinkSetUp(parentDummyLink); err != nil { + return fmt.Errorf("failed to enable %s the ipvlan parent link: %v", dummyName, err) + } + + return nil +} + +// delDummyLink deletes the link type dummy used when -o parent is not passed +func delDummyLink(linkName string) error { + // delete the vlan subinterface + dummyLink, err := ns.NlHandle().LinkByName(linkName) + if err != nil { + return fmt.Errorf("failed to find link %s on the Docker host : %v", linkName, err) + } + // verify a parent interface is being deleted + if dummyLink.Attrs().ParentIndex != 0 { + return fmt.Errorf("link %s is not a parent dummy interface", linkName) + } + // delete the ipvlan dummy device + if err := ns.NlHandle().LinkDel(dummyLink); err != nil { + return fmt.Errorf("failed to delete the dummy %s link: %v", linkName, err) + } + logrus.Debugf("Deleted a dummy parent link: %s", linkName) + + return nil +} + +// getDummyName returns the name of a dummy parent with truncated net ID and driver prefix +func getDummyName(netID string) string { + return dummyPrefix + netID +} diff --git a/vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_state.go b/vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_state.go new file mode 100644 index 0000000000..2a4ad25b40 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_state.go @@ -0,0 +1,115 @@ +package ipvlan + +import ( + "fmt" + + "github.com/docker/libnetwork/osl" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" +) + +func (d *driver) network(nid string) *network { + d.Lock() + n, ok := d.networks[nid] + d.Unlock() + if !ok { + logrus.Errorf("network id %s not found", nid) + } + + return n +} + +func (d *driver) addNetwork(n *network) { + d.Lock() + d.networks[n.id] = n + d.Unlock() +} + +func (d *driver) deleteNetwork(nid string) { + d.Lock() + delete(d.networks, nid) + d.Unlock() +} + +// getNetworks Safely returns a slice of existng networks +func (d *driver) getNetworks() []*network { + d.Lock() + defer d.Unlock() + + ls := make([]*network, 0, len(d.networks)) + for _, nw := range d.networks { + ls = append(ls, nw) + } + + return ls +} + +func (n *network) endpoint(eid string) *endpoint { + n.Lock() + defer n.Unlock() + + return n.endpoints[eid] +} + +func (n *network) addEndpoint(ep *endpoint) { + n.Lock() + n.endpoints[ep.id] = ep + n.Unlock() +} + +func (n *network) deleteEndpoint(eid string) { + n.Lock() + delete(n.endpoints, eid) + n.Unlock() +} + +func (n *network) getEndpoint(eid string) (*endpoint, error) { + n.Lock() + defer n.Unlock() + if eid == "" { + return nil, fmt.Errorf("endpoint id %s not found", eid) + } + if ep, ok := n.endpoints[eid]; ok { + return ep, nil + } + + return nil, nil +} + +func validateID(nid, eid string) error { + if nid == "" { + return fmt.Errorf("invalid network id") + } + if eid == "" { + return fmt.Errorf("invalid endpoint id") + } + + return nil +} + +func (n *network) sandbox() osl.Sandbox { + n.Lock() + defer n.Unlock() + + return n.sbox +} + +func (n *network) setSandbox(sbox osl.Sandbox) { + n.Lock() + n.sbox = sbox + n.Unlock() +} + +func (d *driver) getNetwork(id string) (*network, error) { + d.Lock() + defer d.Unlock() + if id == "" { + return nil, types.BadRequestErrorf("invalid network id: %s", id) + } + + if nw, ok := d.networks[id]; ok { + return nw, nil + } + + return nil, types.NotFoundErrorf("network not found: %s", id) +} diff --git a/vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_store.go b/vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_store.go new file mode 100644 index 0000000000..197e29966a --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_store.go @@ -0,0 +1,349 @@ +package ipvlan + +import ( + "encoding/json" + "fmt" + "net" + + "github.com/docker/libnetwork/datastore" + "github.com/docker/libnetwork/discoverapi" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" +) + +const ( + ipvlanPrefix = "ipvlan" + ipvlanNetworkPrefix = ipvlanPrefix + "/network" + ipvlanEndpointPrefix = ipvlanPrefix + "/endpoint" +) + +// networkConfiguration for this driver's network specific configuration +type configuration struct { + ID string + Mtu int + dbIndex uint64 + dbExists bool + Internal bool + Parent string + IpvlanMode string + CreatedSlaveLink bool + Ipv4Subnets []*ipv4Subnet + Ipv6Subnets []*ipv6Subnet +} + +type ipv4Subnet struct { + SubnetIP string + GwIP string +} + +type ipv6Subnet struct { + SubnetIP string + GwIP string +} + +// initStore drivers are responsible for caching their own persistent state +func (d *driver) initStore(option map[string]interface{}) error { + if data, ok := option[netlabel.LocalKVClient]; ok { + var err error + dsc, ok := data.(discoverapi.DatastoreConfigData) + if !ok { + return types.InternalErrorf("incorrect data in datastore configuration: %v", data) + } + d.store, err = datastore.NewDataStoreFromConfig(dsc) + if err != nil { + return types.InternalErrorf("ipvlan driver failed to initialize data store: %v", err) + } + + return d.populateNetworks() + } + + return nil +} + +// populateNetworks is invoked at driver init to recreate persistently stored networks +func (d *driver) populateNetworks() error { + kvol, err := d.store.List(datastore.Key(ipvlanNetworkPrefix), &configuration{}) + if err != nil && err != datastore.ErrKeyNotFound { + return fmt.Errorf("failed to get ipvlan network configurations from store: %v", err) + } + // If empty it simply means no ipvlan networks have been created yet + if err == datastore.ErrKeyNotFound { + return nil + } + for _, kvo := range kvol { + config := kvo.(*configuration) + if err = d.createNetwork(config); err != nil { + logrus.Warnf("could not create ipvlan network for id %s from persistent state", config.ID) + } + } + + return nil +} + +func (d *driver) populateEndpoints() error { + kvol, err := d.store.List(datastore.Key(ipvlanEndpointPrefix), &endpoint{}) + if err != nil && err != datastore.ErrKeyNotFound { + return fmt.Errorf("failed to get ipvlan endpoints from store: %v", err) + } + + if err == datastore.ErrKeyNotFound { + return nil + } + + for _, kvo := range kvol { + ep := kvo.(*endpoint) + n, ok := d.networks[ep.nid] + if !ok { + logrus.Debugf("Network (%s) not found for restored ipvlan endpoint (%s)", ep.nid[0:7], ep.id[0:7]) + logrus.Debugf("Deleting stale ipvlan endpoint (%s) from store", ep.id[0:7]) + if err := d.storeDelete(ep); err != nil { + logrus.Debugf("Failed to delete stale ipvlan endpoint (%s) from store", ep.id[0:7]) + } + continue + } + n.endpoints[ep.id] = ep + logrus.Debugf("Endpoint (%s) restored to network (%s)", ep.id[0:7], ep.nid[0:7]) + } + + return nil +} + +// storeUpdate used to update persistent ipvlan network records as they are created +func (d *driver) storeUpdate(kvObject datastore.KVObject) error { + if d.store == nil { + logrus.Warnf("ipvlan store not initialized. kv object %s is not added to the store", datastore.Key(kvObject.Key()...)) + return nil + } + if err := d.store.PutObjectAtomic(kvObject); err != nil { + return fmt.Errorf("failed to update ipvlan store for object type %T: %v", kvObject, err) + } + + return nil +} + +// storeDelete used to delete ipvlan network records from persistent cache as they are deleted +func (d *driver) storeDelete(kvObject datastore.KVObject) error { + if d.store == nil { + logrus.Debugf("ipvlan store not initialized. kv object %s is not deleted from store", datastore.Key(kvObject.Key()...)) + return nil + } +retry: + if err := d.store.DeleteObjectAtomic(kvObject); err != nil { + if err == datastore.ErrKeyModified { + if err := d.store.GetObject(datastore.Key(kvObject.Key()...), kvObject); err != nil { + return fmt.Errorf("could not update the kvobject to latest when trying to delete: %v", err) + } + goto retry + } + return err + } + + return nil +} + +func (config *configuration) MarshalJSON() ([]byte, error) { + nMap := make(map[string]interface{}) + nMap["ID"] = config.ID + nMap["Mtu"] = config.Mtu + nMap["Parent"] = config.Parent + nMap["IpvlanMode"] = config.IpvlanMode + nMap["Internal"] = config.Internal + nMap["CreatedSubIface"] = config.CreatedSlaveLink + if len(config.Ipv4Subnets) > 0 { + iis, err := json.Marshal(config.Ipv4Subnets) + if err != nil { + return nil, err + } + nMap["Ipv4Subnets"] = string(iis) + } + if len(config.Ipv6Subnets) > 0 { + iis, err := json.Marshal(config.Ipv6Subnets) + if err != nil { + return nil, err + } + nMap["Ipv6Subnets"] = string(iis) + } + + return json.Marshal(nMap) +} + +func (config *configuration) UnmarshalJSON(b []byte) error { + var ( + err error + nMap map[string]interface{} + ) + + if err = json.Unmarshal(b, &nMap); err != nil { + return err + } + config.ID = nMap["ID"].(string) + config.Mtu = int(nMap["Mtu"].(float64)) + config.Parent = nMap["Parent"].(string) + config.IpvlanMode = nMap["IpvlanMode"].(string) + config.Internal = nMap["Internal"].(bool) + config.CreatedSlaveLink = nMap["CreatedSubIface"].(bool) + if v, ok := nMap["Ipv4Subnets"]; ok { + if err := json.Unmarshal([]byte(v.(string)), &config.Ipv4Subnets); err != nil { + return err + } + } + if v, ok := nMap["Ipv6Subnets"]; ok { + if err := json.Unmarshal([]byte(v.(string)), &config.Ipv6Subnets); err != nil { + return err + } + } + + return nil +} + +func (config *configuration) Key() []string { + return []string{ipvlanNetworkPrefix, config.ID} +} + +func (config *configuration) KeyPrefix() []string { + return []string{ipvlanNetworkPrefix} +} + +func (config *configuration) Value() []byte { + b, err := json.Marshal(config) + if err != nil { + return nil + } + return b +} + +func (config *configuration) SetValue(value []byte) error { + return json.Unmarshal(value, config) +} + +func (config *configuration) Index() uint64 { + return config.dbIndex +} + +func (config *configuration) SetIndex(index uint64) { + config.dbIndex = index + config.dbExists = true +} + +func (config *configuration) Exists() bool { + return config.dbExists +} + +func (config *configuration) Skip() bool { + return false +} + +func (config *configuration) New() datastore.KVObject { + return &configuration{} +} + +func (config *configuration) CopyTo(o datastore.KVObject) error { + dstNcfg := o.(*configuration) + *dstNcfg = *config + return nil +} + +func (config *configuration) DataScope() string { + return datastore.LocalScope +} + +func (ep *endpoint) MarshalJSON() ([]byte, error) { + epMap := make(map[string]interface{}) + epMap["id"] = ep.id + epMap["nid"] = ep.nid + epMap["SrcName"] = ep.srcName + if len(ep.mac) != 0 { + epMap["MacAddress"] = ep.mac.String() + } + if ep.addr != nil { + epMap["Addr"] = ep.addr.String() + } + if ep.addrv6 != nil { + epMap["Addrv6"] = ep.addrv6.String() + } + return json.Marshal(epMap) +} + +func (ep *endpoint) UnmarshalJSON(b []byte) error { + var ( + err error + epMap map[string]interface{} + ) + + if err = json.Unmarshal(b, &epMap); err != nil { + return fmt.Errorf("Failed to unmarshal to ipvlan endpoint: %v", err) + } + + if v, ok := epMap["MacAddress"]; ok { + if ep.mac, err = net.ParseMAC(v.(string)); err != nil { + return types.InternalErrorf("failed to decode ipvlan endpoint MAC address (%s) after json unmarshal: %v", v.(string), err) + } + } + if v, ok := epMap["Addr"]; ok { + if ep.addr, err = types.ParseCIDR(v.(string)); err != nil { + return types.InternalErrorf("failed to decode ipvlan endpoint IPv4 address (%s) after json unmarshal: %v", v.(string), err) + } + } + if v, ok := epMap["Addrv6"]; ok { + if ep.addrv6, err = types.ParseCIDR(v.(string)); err != nil { + return types.InternalErrorf("failed to decode ipvlan endpoint IPv6 address (%s) after json unmarshal: %v", v.(string), err) + } + } + ep.id = epMap["id"].(string) + ep.nid = epMap["nid"].(string) + ep.srcName = epMap["SrcName"].(string) + + return nil +} + +func (ep *endpoint) Key() []string { + return []string{ipvlanEndpointPrefix, ep.id} +} + +func (ep *endpoint) KeyPrefix() []string { + return []string{ipvlanEndpointPrefix} +} + +func (ep *endpoint) Value() []byte { + b, err := json.Marshal(ep) + if err != nil { + return nil + } + return b +} + +func (ep *endpoint) SetValue(value []byte) error { + return json.Unmarshal(value, ep) +} + +func (ep *endpoint) Index() uint64 { + return ep.dbIndex +} + +func (ep *endpoint) SetIndex(index uint64) { + ep.dbIndex = index + ep.dbExists = true +} + +func (ep *endpoint) Exists() bool { + return ep.dbExists +} + +func (ep *endpoint) Skip() bool { + return false +} + +func (ep *endpoint) New() datastore.KVObject { + return &endpoint{} +} + +func (ep *endpoint) CopyTo(o datastore.KVObject) error { + dstEp := o.(*endpoint) + *dstEp = *ep + return nil +} + +func (ep *endpoint) DataScope() string { + return datastore.LocalScope +} diff --git a/vendor/github.com/docker/libnetwork/drivers/macvlan/macvlan.go b/vendor/github.com/docker/libnetwork/drivers/macvlan/macvlan.go new file mode 100644 index 0000000000..872e6f3ec1 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/macvlan/macvlan.go @@ -0,0 +1,117 @@ +package macvlan + +import ( + "net" + "sync" + + "github.com/docker/libnetwork/datastore" + "github.com/docker/libnetwork/discoverapi" + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/osl" + "github.com/docker/libnetwork/types" +) + +const ( + vethLen = 7 + containerVethPrefix = "eth" + vethPrefix = "veth" + macvlanType = "macvlan" // driver type name + modePrivate = "private" // macvlan mode private + modeVepa = "vepa" // macvlan mode vepa + modeBridge = "bridge" // macvlan mode bridge + modePassthru = "passthru" // macvlan mode passthrough + parentOpt = "parent" // parent interface -o parent + modeOpt = "_mode" // macvlan mode ux opt suffix +) + +var driverModeOpt = macvlanType + modeOpt // mode --option macvlan_mode + +type endpointTable map[string]*endpoint + +type networkTable map[string]*network + +type driver struct { + networks networkTable + sync.Once + sync.Mutex + store datastore.DataStore +} + +type endpoint struct { + id string + nid string + mac net.HardwareAddr + addr *net.IPNet + addrv6 *net.IPNet + srcName string + dbIndex uint64 + dbExists bool +} + +type network struct { + id string + sbox osl.Sandbox + endpoints endpointTable + driver *driver + config *configuration + sync.Mutex +} + +// Init initializes and registers the libnetwork macvlan driver +func Init(dc driverapi.DriverCallback, config map[string]interface{}) error { + c := driverapi.Capability{ + DataScope: datastore.LocalScope, + ConnectivityScope: datastore.GlobalScope, + } + d := &driver{ + networks: networkTable{}, + } + d.initStore(config) + + return dc.RegisterDriver(macvlanType, d, c) +} + +func (d *driver) NetworkAllocate(id string, option map[string]string, ipV4Data, ipV6Data []driverapi.IPAMData) (map[string]string, error) { + return nil, types.NotImplementedErrorf("not implemented") +} + +func (d *driver) NetworkFree(id string) error { + return types.NotImplementedErrorf("not implemented") +} + +func (d *driver) EndpointOperInfo(nid, eid string) (map[string]interface{}, error) { + return make(map[string]interface{}, 0), nil +} + +func (d *driver) Type() string { + return macvlanType +} + +func (d *driver) IsBuiltIn() bool { + return true +} + +func (d *driver) ProgramExternalConnectivity(nid, eid string, options map[string]interface{}) error { + return nil +} + +func (d *driver) RevokeExternalConnectivity(nid, eid string) error { + return nil +} + +// DiscoverNew is a notification for a new discovery event +func (d *driver) DiscoverNew(dType discoverapi.DiscoveryType, data interface{}) error { + return nil +} + +// DiscoverDelete is a notification for a discovery delete event +func (d *driver) DiscoverDelete(dType discoverapi.DiscoveryType, data interface{}) error { + return nil +} + +func (d *driver) EventNotify(etype driverapi.EventType, nid, tableName, key string, value []byte) { +} + +func (d *driver) DecodeTableEntry(tablename string, key string, value []byte) (string, map[string]string) { + return "", nil +} diff --git a/vendor/github.com/docker/libnetwork/drivers/macvlan/macvlan_endpoint.go b/vendor/github.com/docker/libnetwork/drivers/macvlan/macvlan_endpoint.go new file mode 100644 index 0000000000..bf3e0bbd14 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/macvlan/macvlan_endpoint.go @@ -0,0 +1,96 @@ +package macvlan + +import ( + "fmt" + + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/netutils" + "github.com/docker/libnetwork/ns" + "github.com/docker/libnetwork/osl" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" +) + +// CreateEndpoint assigns the mac, ip and endpoint id for the new container +func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo, + epOptions map[string]interface{}) error { + defer osl.InitOSContext()() + + if err := validateID(nid, eid); err != nil { + return err + } + n, err := d.getNetwork(nid) + if err != nil { + return fmt.Errorf("network id %q not found", nid) + } + ep := &endpoint{ + id: eid, + nid: nid, + addr: ifInfo.Address(), + addrv6: ifInfo.AddressIPv6(), + mac: ifInfo.MacAddress(), + } + if ep.addr == nil { + return fmt.Errorf("create endpoint was not passed an IP address") + } + if ep.mac == nil { + ep.mac = netutils.GenerateMACFromIP(ep.addr.IP) + if err := ifInfo.SetMacAddress(ep.mac); err != nil { + return err + } + } + // disallow portmapping -p + if opt, ok := epOptions[netlabel.PortMap]; ok { + if _, ok := opt.([]types.PortBinding); ok { + if len(opt.([]types.PortBinding)) > 0 { + logrus.Warnf("%s driver does not support port mappings", macvlanType) + } + } + } + // disallow port exposure --expose + if opt, ok := epOptions[netlabel.ExposedPorts]; ok { + if _, ok := opt.([]types.TransportPort); ok { + if len(opt.([]types.TransportPort)) > 0 { + logrus.Warnf("%s driver does not support port exposures", macvlanType) + } + } + } + + if err := d.storeUpdate(ep); err != nil { + return fmt.Errorf("failed to save macvlan endpoint %s to store: %v", ep.id[0:7], err) + } + + n.addEndpoint(ep) + + return nil +} + +// DeleteEndpoint removes the endpoint and associated netlink interface +func (d *driver) DeleteEndpoint(nid, eid string) error { + defer osl.InitOSContext()() + if err := validateID(nid, eid); err != nil { + return err + } + n := d.network(nid) + if n == nil { + return fmt.Errorf("network id %q not found", nid) + } + ep := n.endpoint(eid) + if ep == nil { + return fmt.Errorf("endpoint id %q not found", eid) + } + if link, err := ns.NlHandle().LinkByName(ep.srcName); err == nil { + if err := ns.NlHandle().LinkDel(link); err != nil { + logrus.WithError(err).Warnf("Failed to delete interface (%s)'s link on endpoint (%s) delete", ep.srcName, ep.id) + } + } + + if err := d.storeDelete(ep); err != nil { + logrus.Warnf("Failed to remove macvlan endpoint %s from store: %v", ep.id[0:7], err) + } + + n.deleteEndpoint(ep.id) + + return nil +} diff --git a/vendor/github.com/docker/libnetwork/drivers/macvlan/macvlan_joinleave.go b/vendor/github.com/docker/libnetwork/drivers/macvlan/macvlan_joinleave.go new file mode 100644 index 0000000000..778613db77 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/macvlan/macvlan_joinleave.go @@ -0,0 +1,144 @@ +package macvlan + +import ( + "fmt" + "net" + + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/netutils" + "github.com/docker/libnetwork/ns" + "github.com/docker/libnetwork/osl" + "github.com/sirupsen/logrus" +) + +// Join method is invoked when a Sandbox is attached to an endpoint. +func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo, options map[string]interface{}) error { + defer osl.InitOSContext()() + n, err := d.getNetwork(nid) + if err != nil { + return err + } + endpoint := n.endpoint(eid) + if endpoint == nil { + return fmt.Errorf("could not find endpoint with id %s", eid) + } + // generate a name for the iface that will be renamed to eth0 in the sbox + containerIfName, err := netutils.GenerateIfaceName(ns.NlHandle(), vethPrefix, vethLen) + if err != nil { + return fmt.Errorf("error generating an interface name: %s", err) + } + // create the netlink macvlan interface + vethName, err := createMacVlan(containerIfName, n.config.Parent, n.config.MacvlanMode) + if err != nil { + return err + } + // bind the generated iface name to the endpoint + endpoint.srcName = vethName + ep := n.endpoint(eid) + if ep == nil { + return fmt.Errorf("could not find endpoint with id %s", eid) + } + // parse and match the endpoint address with the available v4 subnets + if len(n.config.Ipv4Subnets) > 0 { + s := n.getSubnetforIPv4(ep.addr) + if s == nil { + return fmt.Errorf("could not find a valid ipv4 subnet for endpoint %s", eid) + } + v4gw, _, err := net.ParseCIDR(s.GwIP) + if err != nil { + return fmt.Errorf("gatway %s is not a valid ipv4 address: %v", s.GwIP, err) + } + err = jinfo.SetGateway(v4gw) + if err != nil { + return err + } + logrus.Debugf("Macvlan Endpoint Joined with IPv4_Addr: %s, Gateway: %s, MacVlan_Mode: %s, Parent: %s", + ep.addr.IP.String(), v4gw.String(), n.config.MacvlanMode, n.config.Parent) + } + // parse and match the endpoint address with the available v6 subnets + if len(n.config.Ipv6Subnets) > 0 { + s := n.getSubnetforIPv6(ep.addrv6) + if s == nil { + return fmt.Errorf("could not find a valid ipv6 subnet for endpoint %s", eid) + } + v6gw, _, err := net.ParseCIDR(s.GwIP) + if err != nil { + return fmt.Errorf("gatway %s is not a valid ipv6 address: %v", s.GwIP, err) + } + err = jinfo.SetGatewayIPv6(v6gw) + if err != nil { + return err + } + logrus.Debugf("Macvlan Endpoint Joined with IPv6_Addr: %s Gateway: %s MacVlan_Mode: %s, Parent: %s", + ep.addrv6.IP.String(), v6gw.String(), n.config.MacvlanMode, n.config.Parent) + } + iNames := jinfo.InterfaceName() + err = iNames.SetNames(vethName, containerVethPrefix) + if err != nil { + return err + } + if err := d.storeUpdate(ep); err != nil { + return fmt.Errorf("failed to save macvlan endpoint %s to store: %v", ep.id[0:7], err) + } + return nil +} + +// Leave method is invoked when a Sandbox detaches from an endpoint. +func (d *driver) Leave(nid, eid string) error { + defer osl.InitOSContext()() + network, err := d.getNetwork(nid) + if err != nil { + return err + } + endpoint, err := network.getEndpoint(eid) + if err != nil { + return err + } + if endpoint == nil { + return fmt.Errorf("could not find endpoint with id %s", eid) + } + + return nil +} + +// getSubnetforIP returns the ipv4 subnet to which the given IP belongs +func (n *network) getSubnetforIPv4(ip *net.IPNet) *ipv4Subnet { + for _, s := range n.config.Ipv4Subnets { + _, snet, err := net.ParseCIDR(s.SubnetIP) + if err != nil { + return nil + } + // first check if the mask lengths are the same + i, _ := snet.Mask.Size() + j, _ := ip.Mask.Size() + if i != j { + continue + } + if snet.Contains(ip.IP) { + return s + } + } + + return nil +} + +// getSubnetforIPv6 returns the ipv6 subnet to which the given IP belongs +func (n *network) getSubnetforIPv6(ip *net.IPNet) *ipv6Subnet { + for _, s := range n.config.Ipv6Subnets { + _, snet, err := net.ParseCIDR(s.SubnetIP) + if err != nil { + return nil + } + // first check if the mask lengths are the same + i, _ := snet.Mask.Size() + j, _ := ip.Mask.Size() + if i != j { + continue + } + if snet.Contains(ip.IP) { + return s + } + } + + return nil +} diff --git a/vendor/github.com/docker/libnetwork/drivers/macvlan/macvlan_network.go b/vendor/github.com/docker/libnetwork/drivers/macvlan/macvlan_network.go new file mode 100644 index 0000000000..be4cf0f445 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/macvlan/macvlan_network.go @@ -0,0 +1,260 @@ +package macvlan + +import ( + "fmt" + + "github.com/docker/docker/pkg/parsers/kernel" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/ns" + "github.com/docker/libnetwork/options" + "github.com/docker/libnetwork/osl" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" +) + +// CreateNetwork the network for the specified driver type +func (d *driver) CreateNetwork(nid string, option map[string]interface{}, nInfo driverapi.NetworkInfo, ipV4Data, ipV6Data []driverapi.IPAMData) error { + defer osl.InitOSContext()() + kv, err := kernel.GetKernelVersion() + if err != nil { + return fmt.Errorf("failed to check kernel version for %s driver support: %v", macvlanType, err) + } + // ensure Kernel version is >= v3.9 for macvlan support + if kv.Kernel < macvlanKernelVer || (kv.Kernel == macvlanKernelVer && kv.Major < macvlanMajorVer) { + return fmt.Errorf("kernel version failed to meet the minimum macvlan kernel requirement of %d.%d, found %d.%d.%d", + macvlanKernelVer, macvlanMajorVer, kv.Kernel, kv.Major, kv.Minor) + } + // reject a null v4 network + if len(ipV4Data) == 0 || ipV4Data[0].Pool.String() == "0.0.0.0/0" { + return fmt.Errorf("ipv4 pool is empty") + } + // parse and validate the config and bind to networkConfiguration + config, err := parseNetworkOptions(nid, option) + if err != nil { + return err + } + config.ID = nid + err = config.processIPAM(nid, ipV4Data, ipV6Data) + if err != nil { + return err + } + // verify the macvlan mode from -o macvlan_mode option + switch config.MacvlanMode { + case "", modeBridge: + // default to macvlan bridge mode if -o macvlan_mode is empty + config.MacvlanMode = modeBridge + case modePrivate: + config.MacvlanMode = modePrivate + case modePassthru: + config.MacvlanMode = modePassthru + case modeVepa: + config.MacvlanMode = modeVepa + default: + return fmt.Errorf("requested macvlan mode '%s' is not valid, 'bridge' mode is the macvlan driver default", config.MacvlanMode) + } + // loopback is not a valid parent link + if config.Parent == "lo" { + return fmt.Errorf("loopback interface is not a valid %s parent link", macvlanType) + } + // if parent interface not specified, create a dummy type link to use named dummy+net_id + if config.Parent == "" { + config.Parent = getDummyName(stringid.TruncateID(config.ID)) + // empty parent and --internal are handled the same. Set here to update k/v + config.Internal = true + } + err = d.createNetwork(config) + if err != nil { + return err + } + // update persistent db, rollback on fail + err = d.storeUpdate(config) + if err != nil { + d.deleteNetwork(config.ID) + logrus.Debugf("encoutered an error rolling back a network create for %s : %v", config.ID, err) + return err + } + + return nil +} + +// createNetwork is used by new network callbacks and persistent network cache +func (d *driver) createNetwork(config *configuration) error { + networkList := d.getNetworks() + for _, nw := range networkList { + if config.Parent == nw.config.Parent { + return fmt.Errorf("network %s is already using parent interface %s", + getDummyName(stringid.TruncateID(nw.config.ID)), config.Parent) + } + } + if !parentExists(config.Parent) { + // if the --internal flag is set, create a dummy link + if config.Internal { + err := createDummyLink(config.Parent, getDummyName(stringid.TruncateID(config.ID))) + if err != nil { + return err + } + config.CreatedSlaveLink = true + // notify the user in logs they have limited comunicatins + if config.Parent == getDummyName(stringid.TruncateID(config.ID)) { + logrus.Debugf("Empty -o parent= and --internal flags limit communications to other containers inside of network: %s", + config.Parent) + } + } else { + // if the subinterface parent_iface.vlan_id checks do not pass, return err. + // a valid example is 'eth0.10' for a parent iface 'eth0' with a vlan id '10' + err := createVlanLink(config.Parent) + if err != nil { + return err + } + // if driver created the networks slave link, record it for future deletion + config.CreatedSlaveLink = true + } + } + n := &network{ + id: config.ID, + driver: d, + endpoints: endpointTable{}, + config: config, + } + // add the *network + d.addNetwork(n) + + return nil +} + +// DeleteNetwork deletes the network for the specified driver type +func (d *driver) DeleteNetwork(nid string) error { + defer osl.InitOSContext()() + n := d.network(nid) + if n == nil { + return fmt.Errorf("network id %s not found", nid) + } + // if the driver created the slave interface, delete it, otherwise leave it + if ok := n.config.CreatedSlaveLink; ok { + // if the interface exists, only delete if it matches iface.vlan or dummy.net_id naming + if ok := parentExists(n.config.Parent); ok { + // only delete the link if it is named the net_id + if n.config.Parent == getDummyName(stringid.TruncateID(nid)) { + err := delDummyLink(n.config.Parent) + if err != nil { + logrus.Debugf("link %s was not deleted, continuing the delete network operation: %v", + n.config.Parent, err) + } + } else { + // only delete the link if it matches iface.vlan naming + err := delVlanLink(n.config.Parent) + if err != nil { + logrus.Debugf("link %s was not deleted, continuing the delete network operation: %v", + n.config.Parent, err) + } + } + } + } + for _, ep := range n.endpoints { + if link, err := ns.NlHandle().LinkByName(ep.srcName); err == nil { + if err := ns.NlHandle().LinkDel(link); err != nil { + logrus.WithError(err).Warnf("Failed to delete interface (%s)'s link on endpoint (%s) delete", ep.srcName, ep.id) + } + } + + if err := d.storeDelete(ep); err != nil { + logrus.Warnf("Failed to remove macvlan endpoint %s from store: %v", ep.id[0:7], err) + } + } + // delete the *network + d.deleteNetwork(nid) + // delete the network record from persistent cache + err := d.storeDelete(n.config) + if err != nil { + return fmt.Errorf("error deleting deleting id %s from datastore: %v", nid, err) + } + return nil +} + +// parseNetworkOptions parses docker network options +func parseNetworkOptions(id string, option options.Generic) (*configuration, error) { + var ( + err error + config = &configuration{} + ) + // parse generic labels first + if genData, ok := option[netlabel.GenericData]; ok && genData != nil { + if config, err = parseNetworkGenericOptions(genData); err != nil { + return nil, err + } + } + // setting the parent to "" will trigger an isolated network dummy parent link + if _, ok := option[netlabel.Internal]; ok { + config.Internal = true + // empty --parent= and --internal are handled the same. + config.Parent = "" + } + + return config, nil +} + +// parseNetworkGenericOptions parses generic driver docker network options +func parseNetworkGenericOptions(data interface{}) (*configuration, error) { + var ( + err error + config *configuration + ) + switch opt := data.(type) { + case *configuration: + config = opt + case map[string]string: + config = &configuration{} + err = config.fromOptions(opt) + case options.Generic: + var opaqueConfig interface{} + if opaqueConfig, err = options.GenerateFromModel(opt, config); err == nil { + config = opaqueConfig.(*configuration) + } + default: + err = types.BadRequestErrorf("unrecognized network configuration format: %v", opt) + } + + return config, err +} + +// fromOptions binds the generic options to networkConfiguration to cache +func (config *configuration) fromOptions(labels map[string]string) error { + for label, value := range labels { + switch label { + case parentOpt: + // parse driver option '-o parent' + config.Parent = value + case driverModeOpt: + // parse driver option '-o macvlan_mode' + config.MacvlanMode = value + } + } + + return nil +} + +// processIPAM parses v4 and v6 IP information and binds it to the network configuration +func (config *configuration) processIPAM(id string, ipamV4Data, ipamV6Data []driverapi.IPAMData) error { + if len(ipamV4Data) > 0 { + for _, ipd := range ipamV4Data { + s := &ipv4Subnet{ + SubnetIP: ipd.Pool.String(), + GwIP: ipd.Gateway.String(), + } + config.Ipv4Subnets = append(config.Ipv4Subnets, s) + } + } + if len(ipamV6Data) > 0 { + for _, ipd := range ipamV6Data { + s := &ipv6Subnet{ + SubnetIP: ipd.Pool.String(), + GwIP: ipd.Gateway.String(), + } + config.Ipv6Subnets = append(config.Ipv6Subnets, s) + } + } + + return nil +} diff --git a/vendor/github.com/docker/libnetwork/drivers/macvlan/macvlan_setup.go b/vendor/github.com/docker/libnetwork/drivers/macvlan/macvlan_setup.go new file mode 100644 index 0000000000..98d4bd3832 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/macvlan/macvlan_setup.go @@ -0,0 +1,209 @@ +package macvlan + +import ( + "fmt" + "strconv" + "strings" + + "github.com/docker/libnetwork/ns" + "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" +) + +const ( + dummyPrefix = "dm-" // macvlan prefix for dummy parent interface + macvlanKernelVer = 3 // minimum macvlan kernel support + macvlanMajorVer = 9 // minimum macvlan major kernel support +) + +// Create the macvlan slave specifying the source name +func createMacVlan(containerIfName, parent, macvlanMode string) (string, error) { + // Set the macvlan mode. Default is bridge mode + mode, err := setMacVlanMode(macvlanMode) + if err != nil { + return "", fmt.Errorf("Unsupported %s macvlan mode: %v", macvlanMode, err) + } + // verify the Docker host interface acting as the macvlan parent iface exists + if !parentExists(parent) { + return "", fmt.Errorf("the requested parent interface %s was not found on the Docker host", parent) + } + // Get the link for the master index (Example: the docker host eth iface) + parentLink, err := ns.NlHandle().LinkByName(parent) + if err != nil { + return "", fmt.Errorf("error occoured looking up the %s parent iface %s error: %s", macvlanType, parent, err) + } + // Create a macvlan link + macvlan := &netlink.Macvlan{ + LinkAttrs: netlink.LinkAttrs{ + Name: containerIfName, + ParentIndex: parentLink.Attrs().Index, + }, + Mode: mode, + } + if err := ns.NlHandle().LinkAdd(macvlan); err != nil { + // If a user creates a macvlan and ipvlan on same parent, only one slave iface can be active at a time. + return "", fmt.Errorf("failed to create the %s port: %v", macvlanType, err) + } + + return macvlan.Attrs().Name, nil +} + +// setMacVlanMode setter for one of the four macvlan port types +func setMacVlanMode(mode string) (netlink.MacvlanMode, error) { + switch mode { + case modePrivate: + return netlink.MACVLAN_MODE_PRIVATE, nil + case modeVepa: + return netlink.MACVLAN_MODE_VEPA, nil + case modeBridge: + return netlink.MACVLAN_MODE_BRIDGE, nil + case modePassthru: + return netlink.MACVLAN_MODE_PASSTHRU, nil + default: + return 0, fmt.Errorf("unknown macvlan mode: %s", mode) + } +} + +// parentExists checks if the specified interface exists in the default namespace +func parentExists(ifaceStr string) bool { + _, err := ns.NlHandle().LinkByName(ifaceStr) + if err != nil { + return false + } + + return true +} + +// createVlanLink parses sub-interfaces and vlan id for creation +func createVlanLink(parentName string) error { + if strings.Contains(parentName, ".") { + parent, vidInt, err := parseVlan(parentName) + if err != nil { + return err + } + // VLAN identifier or VID is a 12-bit field specifying the VLAN to which the frame belongs + if vidInt > 4094 || vidInt < 1 { + return fmt.Errorf("vlan id must be between 1-4094, received: %d", vidInt) + } + // get the parent link to attach a vlan subinterface + parentLink, err := ns.NlHandle().LinkByName(parent) + if err != nil { + return fmt.Errorf("failed to find master interface %s on the Docker host: %v", parent, err) + } + vlanLink := &netlink.Vlan{ + LinkAttrs: netlink.LinkAttrs{ + Name: parentName, + ParentIndex: parentLink.Attrs().Index, + }, + VlanId: vidInt, + } + // create the subinterface + if err := ns.NlHandle().LinkAdd(vlanLink); err != nil { + return fmt.Errorf("failed to create %s vlan link: %v", vlanLink.Name, err) + } + // Bring the new netlink iface up + if err := ns.NlHandle().LinkSetUp(vlanLink); err != nil { + return fmt.Errorf("failed to enable %s the macvlan parent link %v", vlanLink.Name, err) + } + logrus.Debugf("Added a vlan tagged netlink subinterface: %s with a vlan id: %d", parentName, vidInt) + return nil + } + + return fmt.Errorf("invalid subinterface vlan name %s, example formatting is eth0.10", parentName) +} + +// delVlanLink verifies only sub-interfaces with a vlan id get deleted +func delVlanLink(linkName string) error { + if strings.Contains(linkName, ".") { + _, _, err := parseVlan(linkName) + if err != nil { + return err + } + // delete the vlan subinterface + vlanLink, err := ns.NlHandle().LinkByName(linkName) + if err != nil { + return fmt.Errorf("failed to find interface %s on the Docker host : %v", linkName, err) + } + // verify a parent interface isn't being deleted + if vlanLink.Attrs().ParentIndex == 0 { + return fmt.Errorf("interface %s does not appear to be a slave device: %v", linkName, err) + } + // delete the macvlan slave device + if err := ns.NlHandle().LinkDel(vlanLink); err != nil { + return fmt.Errorf("failed to delete %s link: %v", linkName, err) + } + logrus.Debugf("Deleted a vlan tagged netlink subinterface: %s", linkName) + } + // if the subinterface doesn't parse to iface.vlan_id leave the interface in + // place since it could be a user specified name not created by the driver. + return nil +} + +// parseVlan parses and verifies a slave interface name: -o parent=eth0.10 +func parseVlan(linkName string) (string, int, error) { + // parse -o parent=eth0.10 + splitName := strings.Split(linkName, ".") + if len(splitName) != 2 { + return "", 0, fmt.Errorf("required interface name format is: name.vlan_id, ex. eth0.10 for vlan 10, instead received %s", linkName) + } + parent, vidStr := splitName[0], splitName[1] + // validate type and convert vlan id to int + vidInt, err := strconv.Atoi(vidStr) + if err != nil { + return "", 0, fmt.Errorf("unable to parse a valid vlan id from: %s (ex. eth0.10 for vlan 10)", vidStr) + } + // Check if the interface exists + if !parentExists(parent) { + return "", 0, fmt.Errorf("-o parent interface does was not found on the host: %s", parent) + } + + return parent, vidInt, nil +} + +// createDummyLink creates a dummy0 parent link +func createDummyLink(dummyName, truncNetID string) error { + // create a parent interface since one was not specified + parent := &netlink.Dummy{ + LinkAttrs: netlink.LinkAttrs{ + Name: dummyName, + }, + } + if err := ns.NlHandle().LinkAdd(parent); err != nil { + return err + } + parentDummyLink, err := ns.NlHandle().LinkByName(dummyName) + if err != nil { + return fmt.Errorf("error occoured looking up the %s parent iface %s error: %s", macvlanType, dummyName, err) + } + // bring the new netlink iface up + if err := ns.NlHandle().LinkSetUp(parentDummyLink); err != nil { + return fmt.Errorf("failed to enable %s the macvlan parent link: %v", dummyName, err) + } + + return nil +} + +// delDummyLink deletes the link type dummy used when -o parent is not passed +func delDummyLink(linkName string) error { + // delete the vlan subinterface + dummyLink, err := ns.NlHandle().LinkByName(linkName) + if err != nil { + return fmt.Errorf("failed to find link %s on the Docker host : %v", linkName, err) + } + // verify a parent interface is being deleted + if dummyLink.Attrs().ParentIndex != 0 { + return fmt.Errorf("link %s is not a parent dummy interface", linkName) + } + // delete the macvlan dummy device + if err := ns.NlHandle().LinkDel(dummyLink); err != nil { + return fmt.Errorf("failed to delete the dummy %s link: %v", linkName, err) + } + logrus.Debugf("Deleted a dummy parent link: %s", linkName) + + return nil +} + +// getDummyName returns the name of a dummy parent with truncated net ID and driver prefix +func getDummyName(netID string) string { + return dummyPrefix + netID +} diff --git a/vendor/github.com/docker/libnetwork/drivers/macvlan/macvlan_state.go b/vendor/github.com/docker/libnetwork/drivers/macvlan/macvlan_state.go new file mode 100644 index 0000000000..8fd1a9e4dc --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/macvlan/macvlan_state.go @@ -0,0 +1,113 @@ +package macvlan + +import ( + "fmt" + + "github.com/docker/libnetwork/osl" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" +) + +func (d *driver) network(nid string) *network { + d.Lock() + n, ok := d.networks[nid] + d.Unlock() + if !ok { + logrus.Errorf("network id %s not found", nid) + } + + return n +} + +func (d *driver) addNetwork(n *network) { + d.Lock() + d.networks[n.id] = n + d.Unlock() +} + +func (d *driver) deleteNetwork(nid string) { + d.Lock() + delete(d.networks, nid) + d.Unlock() +} + +// getNetworks Safely returns a slice of existing networks +func (d *driver) getNetworks() []*network { + d.Lock() + defer d.Unlock() + + ls := make([]*network, 0, len(d.networks)) + for _, nw := range d.networks { + ls = append(ls, nw) + } + + return ls +} + +func (n *network) endpoint(eid string) *endpoint { + n.Lock() + defer n.Unlock() + + return n.endpoints[eid] +} + +func (n *network) addEndpoint(ep *endpoint) { + n.Lock() + n.endpoints[ep.id] = ep + n.Unlock() +} + +func (n *network) deleteEndpoint(eid string) { + n.Lock() + delete(n.endpoints, eid) + n.Unlock() +} + +func (n *network) getEndpoint(eid string) (*endpoint, error) { + n.Lock() + defer n.Unlock() + if eid == "" { + return nil, fmt.Errorf("endpoint id %s not found", eid) + } + if ep, ok := n.endpoints[eid]; ok { + return ep, nil + } + + return nil, nil +} + +func validateID(nid, eid string) error { + if nid == "" { + return fmt.Errorf("invalid network id") + } + if eid == "" { + return fmt.Errorf("invalid endpoint id") + } + return nil +} + +func (n *network) sandbox() osl.Sandbox { + n.Lock() + defer n.Unlock() + + return n.sbox +} + +func (n *network) setSandbox(sbox osl.Sandbox) { + n.Lock() + n.sbox = sbox + n.Unlock() +} + +func (d *driver) getNetwork(id string) (*network, error) { + d.Lock() + defer d.Unlock() + if id == "" { + return nil, types.BadRequestErrorf("invalid network id: %s", id) + } + if nw, ok := d.networks[id]; ok { + return nw, nil + } + + return nil, types.NotFoundErrorf("network not found: %s", id) +} diff --git a/vendor/github.com/docker/libnetwork/drivers/macvlan/macvlan_store.go b/vendor/github.com/docker/libnetwork/drivers/macvlan/macvlan_store.go new file mode 100644 index 0000000000..655a49c08b --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/macvlan/macvlan_store.go @@ -0,0 +1,351 @@ +package macvlan + +import ( + "encoding/json" + "fmt" + "net" + + "github.com/docker/libnetwork/datastore" + "github.com/docker/libnetwork/discoverapi" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" +) + +const ( + macvlanPrefix = "macvlan" + macvlanNetworkPrefix = macvlanPrefix + "/network" + macvlanEndpointPrefix = macvlanPrefix + "/endpoint" +) + +// networkConfiguration for this driver's network specific configuration +type configuration struct { + ID string + Mtu int + dbIndex uint64 + dbExists bool + Internal bool + Parent string + MacvlanMode string + CreatedSlaveLink bool + Ipv4Subnets []*ipv4Subnet + Ipv6Subnets []*ipv6Subnet +} + +type ipv4Subnet struct { + SubnetIP string + GwIP string +} + +type ipv6Subnet struct { + SubnetIP string + GwIP string +} + +// initStore drivers are responsible for caching their own persistent state +func (d *driver) initStore(option map[string]interface{}) error { + if data, ok := option[netlabel.LocalKVClient]; ok { + var err error + dsc, ok := data.(discoverapi.DatastoreConfigData) + if !ok { + return types.InternalErrorf("incorrect data in datastore configuration: %v", data) + } + d.store, err = datastore.NewDataStoreFromConfig(dsc) + if err != nil { + return types.InternalErrorf("macvlan driver failed to initialize data store: %v", err) + } + + return d.populateNetworks() + } + + return nil +} + +// populateNetworks is invoked at driver init to recreate persistently stored networks +func (d *driver) populateNetworks() error { + kvol, err := d.store.List(datastore.Key(macvlanPrefix), &configuration{}) + if err != nil && err != datastore.ErrKeyNotFound { + return fmt.Errorf("failed to get macvlan network configurations from store: %v", err) + } + // If empty it simply means no macvlan networks have been created yet + if err == datastore.ErrKeyNotFound { + return nil + } + for _, kvo := range kvol { + config := kvo.(*configuration) + if err = d.createNetwork(config); err != nil { + logrus.Warnf("Could not create macvlan network for id %s from persistent state", config.ID) + } + } + + return nil +} + +func (d *driver) populateEndpoints() error { + kvol, err := d.store.List(datastore.Key(macvlanEndpointPrefix), &endpoint{}) + if err != nil && err != datastore.ErrKeyNotFound { + return fmt.Errorf("failed to get macvlan endpoints from store: %v", err) + } + + if err == datastore.ErrKeyNotFound { + return nil + } + + for _, kvo := range kvol { + ep := kvo.(*endpoint) + n, ok := d.networks[ep.nid] + if !ok { + logrus.Debugf("Network (%s) not found for restored macvlan endpoint (%s)", ep.nid[0:7], ep.id[0:7]) + logrus.Debugf("Deleting stale macvlan endpoint (%s) from store", ep.id[0:7]) + if err := d.storeDelete(ep); err != nil { + logrus.Debugf("Failed to delete stale macvlan endpoint (%s) from store", ep.id[0:7]) + } + continue + } + n.endpoints[ep.id] = ep + logrus.Debugf("Endpoint (%s) restored to network (%s)", ep.id[0:7], ep.nid[0:7]) + } + + return nil +} + +// storeUpdate used to update persistent macvlan network records as they are created +func (d *driver) storeUpdate(kvObject datastore.KVObject) error { + if d.store == nil { + logrus.Warnf("macvlan store not initialized. kv object %s is not added to the store", datastore.Key(kvObject.Key()...)) + return nil + } + if err := d.store.PutObjectAtomic(kvObject); err != nil { + return fmt.Errorf("failed to update macvlan store for object type %T: %v", kvObject, err) + } + + return nil +} + +// storeDelete used to delete macvlan records from persistent cache as they are deleted +func (d *driver) storeDelete(kvObject datastore.KVObject) error { + if d.store == nil { + logrus.Debugf("macvlan store not initialized. kv object %s is not deleted from store", datastore.Key(kvObject.Key()...)) + return nil + } +retry: + if err := d.store.DeleteObjectAtomic(kvObject); err != nil { + if err == datastore.ErrKeyModified { + if err := d.store.GetObject(datastore.Key(kvObject.Key()...), kvObject); err != nil { + return fmt.Errorf("could not update the kvobject to latest when trying to delete: %v", err) + } + goto retry + } + return err + } + + return nil +} + +func (config *configuration) MarshalJSON() ([]byte, error) { + nMap := make(map[string]interface{}) + nMap["ID"] = config.ID + nMap["Mtu"] = config.Mtu + nMap["Parent"] = config.Parent + nMap["MacvlanMode"] = config.MacvlanMode + nMap["Internal"] = config.Internal + nMap["CreatedSubIface"] = config.CreatedSlaveLink + if len(config.Ipv4Subnets) > 0 { + iis, err := json.Marshal(config.Ipv4Subnets) + if err != nil { + return nil, err + } + nMap["Ipv4Subnets"] = string(iis) + } + if len(config.Ipv6Subnets) > 0 { + iis, err := json.Marshal(config.Ipv6Subnets) + if err != nil { + return nil, err + } + nMap["Ipv6Subnets"] = string(iis) + } + + return json.Marshal(nMap) +} + +func (config *configuration) UnmarshalJSON(b []byte) error { + var ( + err error + nMap map[string]interface{} + ) + + if err = json.Unmarshal(b, &nMap); err != nil { + return err + } + config.ID = nMap["ID"].(string) + config.Mtu = int(nMap["Mtu"].(float64)) + config.Parent = nMap["Parent"].(string) + config.MacvlanMode = nMap["MacvlanMode"].(string) + config.Internal = nMap["Internal"].(bool) + config.CreatedSlaveLink = nMap["CreatedSubIface"].(bool) + if v, ok := nMap["Ipv4Subnets"]; ok { + if err := json.Unmarshal([]byte(v.(string)), &config.Ipv4Subnets); err != nil { + return err + } + } + if v, ok := nMap["Ipv6Subnets"]; ok { + if err := json.Unmarshal([]byte(v.(string)), &config.Ipv6Subnets); err != nil { + return err + } + } + + return nil +} + +func (config *configuration) Key() []string { + return []string{macvlanNetworkPrefix, config.ID} +} + +func (config *configuration) KeyPrefix() []string { + return []string{macvlanNetworkPrefix} +} + +func (config *configuration) Value() []byte { + b, err := json.Marshal(config) + if err != nil { + return nil + } + + return b +} + +func (config *configuration) SetValue(value []byte) error { + return json.Unmarshal(value, config) +} + +func (config *configuration) Index() uint64 { + return config.dbIndex +} + +func (config *configuration) SetIndex(index uint64) { + config.dbIndex = index + config.dbExists = true +} + +func (config *configuration) Exists() bool { + return config.dbExists +} + +func (config *configuration) Skip() bool { + return false +} + +func (config *configuration) New() datastore.KVObject { + return &configuration{} +} + +func (config *configuration) CopyTo(o datastore.KVObject) error { + dstNcfg := o.(*configuration) + *dstNcfg = *config + + return nil +} + +func (config *configuration) DataScope() string { + return datastore.LocalScope +} + +func (ep *endpoint) MarshalJSON() ([]byte, error) { + epMap := make(map[string]interface{}) + epMap["id"] = ep.id + epMap["nid"] = ep.nid + epMap["SrcName"] = ep.srcName + if len(ep.mac) != 0 { + epMap["MacAddress"] = ep.mac.String() + } + if ep.addr != nil { + epMap["Addr"] = ep.addr.String() + } + if ep.addrv6 != nil { + epMap["Addrv6"] = ep.addrv6.String() + } + return json.Marshal(epMap) +} + +func (ep *endpoint) UnmarshalJSON(b []byte) error { + var ( + err error + epMap map[string]interface{} + ) + + if err = json.Unmarshal(b, &epMap); err != nil { + return fmt.Errorf("Failed to unmarshal to macvlan endpoint: %v", err) + } + + if v, ok := epMap["MacAddress"]; ok { + if ep.mac, err = net.ParseMAC(v.(string)); err != nil { + return types.InternalErrorf("failed to decode macvlan endpoint MAC address (%s) after json unmarshal: %v", v.(string), err) + } + } + if v, ok := epMap["Addr"]; ok { + if ep.addr, err = types.ParseCIDR(v.(string)); err != nil { + return types.InternalErrorf("failed to decode macvlan endpoint IPv4 address (%s) after json unmarshal: %v", v.(string), err) + } + } + if v, ok := epMap["Addrv6"]; ok { + if ep.addrv6, err = types.ParseCIDR(v.(string)); err != nil { + return types.InternalErrorf("failed to decode macvlan endpoint IPv6 address (%s) after json unmarshal: %v", v.(string), err) + } + } + ep.id = epMap["id"].(string) + ep.nid = epMap["nid"].(string) + ep.srcName = epMap["SrcName"].(string) + + return nil +} + +func (ep *endpoint) Key() []string { + return []string{macvlanEndpointPrefix, ep.id} +} + +func (ep *endpoint) KeyPrefix() []string { + return []string{macvlanEndpointPrefix} +} + +func (ep *endpoint) Value() []byte { + b, err := json.Marshal(ep) + if err != nil { + return nil + } + return b +} + +func (ep *endpoint) SetValue(value []byte) error { + return json.Unmarshal(value, ep) +} + +func (ep *endpoint) Index() uint64 { + return ep.dbIndex +} + +func (ep *endpoint) SetIndex(index uint64) { + ep.dbIndex = index + ep.dbExists = true +} + +func (ep *endpoint) Exists() bool { + return ep.dbExists +} + +func (ep *endpoint) Skip() bool { + return false +} + +func (ep *endpoint) New() datastore.KVObject { + return &endpoint{} +} + +func (ep *endpoint) CopyTo(o datastore.KVObject) error { + dstEp := o.(*endpoint) + *dstEp = *ep + return nil +} + +func (ep *endpoint) DataScope() string { + return datastore.LocalScope +} diff --git a/vendor/github.com/docker/libnetwork/drivers/null/null.go b/vendor/github.com/docker/libnetwork/drivers/null/null.go new file mode 100644 index 0000000000..7f2a5e32f7 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/null/null.go @@ -0,0 +1,105 @@ +package null + +import ( + "sync" + + "github.com/docker/libnetwork/datastore" + "github.com/docker/libnetwork/discoverapi" + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/types" +) + +const networkType = "null" + +type driver struct { + network string + sync.Mutex +} + +// Init registers a new instance of null driver +func Init(dc driverapi.DriverCallback, config map[string]interface{}) error { + c := driverapi.Capability{ + DataScope: datastore.LocalScope, + } + return dc.RegisterDriver(networkType, &driver{}, c) +} + +func (d *driver) NetworkAllocate(id string, option map[string]string, ipV4Data, ipV6Data []driverapi.IPAMData) (map[string]string, error) { + return nil, types.NotImplementedErrorf("not implemented") +} + +func (d *driver) NetworkFree(id string) error { + return types.NotImplementedErrorf("not implemented") +} + +func (d *driver) EventNotify(etype driverapi.EventType, nid, tableName, key string, value []byte) { +} + +func (d *driver) DecodeTableEntry(tablename string, key string, value []byte) (string, map[string]string) { + return "", nil +} + +func (d *driver) CreateNetwork(id string, option map[string]interface{}, nInfo driverapi.NetworkInfo, ipV4Data, ipV6Data []driverapi.IPAMData) error { + d.Lock() + defer d.Unlock() + + if d.network != "" { + return types.ForbiddenErrorf("only one instance of \"%s\" network is allowed", networkType) + } + + d.network = id + + return nil +} + +func (d *driver) DeleteNetwork(nid string) error { + return types.ForbiddenErrorf("network of type \"%s\" cannot be deleted", networkType) +} + +func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo, epOptions map[string]interface{}) error { + return nil +} + +func (d *driver) DeleteEndpoint(nid, eid string) error { + return nil +} + +func (d *driver) EndpointOperInfo(nid, eid string) (map[string]interface{}, error) { + return make(map[string]interface{}, 0), nil +} + +// Join method is invoked when a Sandbox is attached to an endpoint. +func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo, options map[string]interface{}) error { + return nil +} + +// Leave method is invoked when a Sandbox detaches from an endpoint. +func (d *driver) Leave(nid, eid string) error { + return nil +} + +func (d *driver) ProgramExternalConnectivity(nid, eid string, options map[string]interface{}) error { + return nil +} + +func (d *driver) RevokeExternalConnectivity(nid, eid string) error { + return nil +} + +func (d *driver) Type() string { + return networkType +} + +func (d *driver) IsBuiltIn() bool { + return true +} + +// DiscoverNew is a notification for a new discovery event, such as a new node joining a cluster +func (d *driver) DiscoverNew(dType discoverapi.DiscoveryType, data interface{}) error { + return nil +} + +// DiscoverDelete is a notification for a discovery delete event, such as a node leaving a cluster +func (d *driver) DiscoverDelete(dType discoverapi.DiscoveryType, data interface{}) error { + return nil +} diff --git a/vendor/github.com/docker/libnetwork/drivers/overlay/encryption.go b/vendor/github.com/docker/libnetwork/drivers/overlay/encryption.go new file mode 100644 index 0000000000..bcae0bd4e5 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/overlay/encryption.go @@ -0,0 +1,639 @@ +package overlay + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "fmt" + "hash/fnv" + "net" + "sync" + "syscall" + + "strconv" + + "github.com/docker/libnetwork/iptables" + "github.com/docker/libnetwork/ns" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" +) + +const ( + r = 0xD0C4E3 + pktExpansion = 26 // SPI(4) + SeqN(4) + IV(8) + PadLength(1) + NextHeader(1) + ICV(8) +) + +const ( + forward = iota + 1 + reverse + bidir +) + +var spMark = netlink.XfrmMark{Value: uint32(r), Mask: 0xffffffff} + +type key struct { + value []byte + tag uint32 +} + +func (k *key) String() string { + if k != nil { + return fmt.Sprintf("(key: %s, tag: 0x%x)", hex.EncodeToString(k.value)[0:5], k.tag) + } + return "" +} + +type spi struct { + forward int + reverse int +} + +func (s *spi) String() string { + return fmt.Sprintf("SPI(FWD: 0x%x, REV: 0x%x)", uint32(s.forward), uint32(s.reverse)) +} + +type encrMap struct { + nodes map[string][]*spi + sync.Mutex +} + +func (e *encrMap) String() string { + e.Lock() + defer e.Unlock() + b := new(bytes.Buffer) + for k, v := range e.nodes { + b.WriteString("\n") + b.WriteString(k) + b.WriteString(":") + b.WriteString("[") + for _, s := range v { + b.WriteString(s.String()) + b.WriteString(",") + } + b.WriteString("]") + + } + return b.String() +} + +func (d *driver) checkEncryption(nid string, rIP net.IP, vxlanID uint32, isLocal, add bool) error { + logrus.Debugf("checkEncryption(%s, %v, %d, %t)", nid[0:7], rIP, vxlanID, isLocal) + + n := d.network(nid) + if n == nil || !n.secure { + return nil + } + + if len(d.keys) == 0 { + return types.ForbiddenErrorf("encryption key is not present") + } + + lIP := net.ParseIP(d.bindAddress) + aIP := net.ParseIP(d.advertiseAddress) + nodes := map[string]net.IP{} + + switch { + case isLocal: + if err := d.peerDbNetworkWalk(nid, func(pKey *peerKey, pEntry *peerEntry) bool { + if !aIP.Equal(pEntry.vtep) { + nodes[pEntry.vtep.String()] = pEntry.vtep + } + return false + }); err != nil { + logrus.Warnf("Failed to retrieve list of participating nodes in overlay network %s: %v", nid[0:5], err) + } + default: + if len(d.network(nid).endpoints) > 0 { + nodes[rIP.String()] = rIP + } + } + + logrus.Debugf("List of nodes: %s", nodes) + + if add { + for _, rIP := range nodes { + if err := setupEncryption(lIP, aIP, rIP, vxlanID, d.secMap, d.keys); err != nil { + logrus.Warnf("Failed to program network encryption between %s and %s: %v", lIP, rIP, err) + } + } + } else { + if len(nodes) == 0 { + if err := removeEncryption(lIP, rIP, d.secMap); err != nil { + logrus.Warnf("Failed to remove network encryption between %s and %s: %v", lIP, rIP, err) + } + } + } + + return nil +} + +func setupEncryption(localIP, advIP, remoteIP net.IP, vni uint32, em *encrMap, keys []*key) error { + logrus.Debugf("Programming encryption for vxlan %d between %s and %s", vni, localIP, remoteIP) + rIPs := remoteIP.String() + + indices := make([]*spi, 0, len(keys)) + + err := programMangle(vni, true) + if err != nil { + logrus.Warn(err) + } + + err = programInput(vni, true) + if err != nil { + logrus.Warn(err) + } + + for i, k := range keys { + spis := &spi{buildSPI(advIP, remoteIP, k.tag), buildSPI(remoteIP, advIP, k.tag)} + dir := reverse + if i == 0 { + dir = bidir + } + fSA, rSA, err := programSA(localIP, remoteIP, spis, k, dir, true) + if err != nil { + logrus.Warn(err) + } + indices = append(indices, spis) + if i != 0 { + continue + } + err = programSP(fSA, rSA, true) + if err != nil { + logrus.Warn(err) + } + } + + em.Lock() + em.nodes[rIPs] = indices + em.Unlock() + + return nil +} + +func removeEncryption(localIP, remoteIP net.IP, em *encrMap) error { + em.Lock() + indices, ok := em.nodes[remoteIP.String()] + em.Unlock() + if !ok { + return nil + } + for i, idxs := range indices { + dir := reverse + if i == 0 { + dir = bidir + } + fSA, rSA, err := programSA(localIP, remoteIP, idxs, nil, dir, false) + if err != nil { + logrus.Warn(err) + } + if i != 0 { + continue + } + err = programSP(fSA, rSA, false) + if err != nil { + logrus.Warn(err) + } + } + return nil +} + +func programMangle(vni uint32, add bool) (err error) { + var ( + p = strconv.FormatUint(uint64(vxlanPort), 10) + c = fmt.Sprintf("0>>22&0x3C@12&0xFFFFFF00=%d", int(vni)<<8) + m = strconv.FormatUint(uint64(r), 10) + chain = "OUTPUT" + rule = []string{"-p", "udp", "--dport", p, "-m", "u32", "--u32", c, "-j", "MARK", "--set-mark", m} + a = "-A" + action = "install" + ) + + if add == iptables.Exists(iptables.Mangle, chain, rule...) { + return + } + + if !add { + a = "-D" + action = "remove" + } + + if err = iptables.RawCombinedOutput(append([]string{"-t", string(iptables.Mangle), a, chain}, rule...)...); err != nil { + logrus.Warnf("could not %s mangle rule: %v", action, err) + } + + return +} + +func programInput(vni uint32, add bool) (err error) { + var ( + port = strconv.FormatUint(uint64(vxlanPort), 10) + vniMatch = fmt.Sprintf("0>>22&0x3C@12&0xFFFFFF00=%d", int(vni)<<8) + plainVxlan = []string{"-p", "udp", "--dport", port, "-m", "u32", "--u32", vniMatch, "-j"} + ipsecVxlan = append([]string{"-m", "policy", "--dir", "in", "--pol", "ipsec"}, plainVxlan...) + block = append(plainVxlan, "DROP") + accept = append(ipsecVxlan, "ACCEPT") + chain = "INPUT" + action = iptables.Append + msg = "add" + ) + + if !add { + action = iptables.Delete + msg = "remove" + } + + if err := iptables.ProgramRule(iptables.Filter, chain, action, accept); err != nil { + logrus.Errorf("could not %s input rule: %v. Please do it manually.", msg, err) + } + + if err := iptables.ProgramRule(iptables.Filter, chain, action, block); err != nil { + logrus.Errorf("could not %s input rule: %v. Please do it manually.", msg, err) + } + + return +} + +func programSA(localIP, remoteIP net.IP, spi *spi, k *key, dir int, add bool) (fSA *netlink.XfrmState, rSA *netlink.XfrmState, err error) { + var ( + action = "Removing" + xfrmProgram = ns.NlHandle().XfrmStateDel + ) + + if add { + action = "Adding" + xfrmProgram = ns.NlHandle().XfrmStateAdd + } + + if dir&reverse > 0 { + rSA = &netlink.XfrmState{ + Src: remoteIP, + Dst: localIP, + Proto: netlink.XFRM_PROTO_ESP, + Spi: spi.reverse, + Mode: netlink.XFRM_MODE_TRANSPORT, + Reqid: r, + } + if add { + rSA.Aead = buildAeadAlgo(k, spi.reverse) + } + + exists, err := saExists(rSA) + if err != nil { + exists = !add + } + + if add != exists { + logrus.Debugf("%s: rSA{%s}", action, rSA) + if err := xfrmProgram(rSA); err != nil { + logrus.Warnf("Failed %s rSA{%s}: %v", action, rSA, err) + } + } + } + + if dir&forward > 0 { + fSA = &netlink.XfrmState{ + Src: localIP, + Dst: remoteIP, + Proto: netlink.XFRM_PROTO_ESP, + Spi: spi.forward, + Mode: netlink.XFRM_MODE_TRANSPORT, + Reqid: r, + } + if add { + fSA.Aead = buildAeadAlgo(k, spi.forward) + } + + exists, err := saExists(fSA) + if err != nil { + exists = !add + } + + if add != exists { + logrus.Debugf("%s fSA{%s}", action, fSA) + if err := xfrmProgram(fSA); err != nil { + logrus.Warnf("Failed %s fSA{%s}: %v.", action, fSA, err) + } + } + } + + return +} + +func programSP(fSA *netlink.XfrmState, rSA *netlink.XfrmState, add bool) error { + action := "Removing" + xfrmProgram := ns.NlHandle().XfrmPolicyDel + if add { + action = "Adding" + xfrmProgram = ns.NlHandle().XfrmPolicyAdd + } + + // Create a congruent cidr + s := types.GetMinimalIP(fSA.Src) + d := types.GetMinimalIP(fSA.Dst) + fullMask := net.CIDRMask(8*len(s), 8*len(s)) + + fPol := &netlink.XfrmPolicy{ + Src: &net.IPNet{IP: s, Mask: fullMask}, + Dst: &net.IPNet{IP: d, Mask: fullMask}, + Dir: netlink.XFRM_DIR_OUT, + Proto: 17, + DstPort: 4789, + Mark: &spMark, + Tmpls: []netlink.XfrmPolicyTmpl{ + { + Src: fSA.Src, + Dst: fSA.Dst, + Proto: netlink.XFRM_PROTO_ESP, + Mode: netlink.XFRM_MODE_TRANSPORT, + Spi: fSA.Spi, + Reqid: r, + }, + }, + } + + exists, err := spExists(fPol) + if err != nil { + exists = !add + } + + if add != exists { + logrus.Debugf("%s fSP{%s}", action, fPol) + if err := xfrmProgram(fPol); err != nil { + logrus.Warnf("%s fSP{%s}: %v", action, fPol, err) + } + } + + return nil +} + +func saExists(sa *netlink.XfrmState) (bool, error) { + _, err := ns.NlHandle().XfrmStateGet(sa) + switch err { + case nil: + return true, nil + case syscall.ESRCH: + return false, nil + default: + err = fmt.Errorf("Error while checking for SA existence: %v", err) + logrus.Warn(err) + return false, err + } +} + +func spExists(sp *netlink.XfrmPolicy) (bool, error) { + _, err := ns.NlHandle().XfrmPolicyGet(sp) + switch err { + case nil: + return true, nil + case syscall.ENOENT: + return false, nil + default: + err = fmt.Errorf("Error while checking for SP existence: %v", err) + logrus.Warn(err) + return false, err + } +} + +func buildSPI(src, dst net.IP, st uint32) int { + b := make([]byte, 4) + binary.BigEndian.PutUint32(b, st) + h := fnv.New32a() + h.Write(src) + h.Write(b) + h.Write(dst) + return int(binary.BigEndian.Uint32(h.Sum(nil))) +} + +func buildAeadAlgo(k *key, s int) *netlink.XfrmStateAlgo { + salt := make([]byte, 4) + binary.BigEndian.PutUint32(salt, uint32(s)) + return &netlink.XfrmStateAlgo{ + Name: "rfc4106(gcm(aes))", + Key: append(k.value, salt...), + ICVLen: 64, + } +} + +func (d *driver) secMapWalk(f func(string, []*spi) ([]*spi, bool)) error { + d.secMap.Lock() + for node, indices := range d.secMap.nodes { + idxs, stop := f(node, indices) + if idxs != nil { + d.secMap.nodes[node] = idxs + } + if stop { + break + } + } + d.secMap.Unlock() + return nil +} + +func (d *driver) setKeys(keys []*key) error { + // Remove any stale policy, state + clearEncryptionStates() + // Accept the encryption keys and clear any stale encryption map + d.Lock() + d.keys = keys + d.secMap = &encrMap{nodes: map[string][]*spi{}} + d.Unlock() + logrus.Debugf("Initial encryption keys: %v", keys) + return nil +} + +// updateKeys allows to add a new key and/or change the primary key and/or prune an existing key +// The primary key is the key used in transmission and will go in first position in the list. +func (d *driver) updateKeys(newKey, primary, pruneKey *key) error { + logrus.Debugf("Updating Keys. New: %v, Primary: %v, Pruned: %v", newKey, primary, pruneKey) + + logrus.Debugf("Current: %v", d.keys) + + var ( + newIdx = -1 + priIdx = -1 + delIdx = -1 + lIP = net.ParseIP(d.bindAddress) + aIP = net.ParseIP(d.advertiseAddress) + ) + + d.Lock() + defer d.Unlock() + + // add new + if newKey != nil { + d.keys = append(d.keys, newKey) + newIdx += len(d.keys) + } + for i, k := range d.keys { + if primary != nil && k.tag == primary.tag { + priIdx = i + } + if pruneKey != nil && k.tag == pruneKey.tag { + delIdx = i + } + } + + if (newKey != nil && newIdx == -1) || + (primary != nil && priIdx == -1) || + (pruneKey != nil && delIdx == -1) { + return types.BadRequestErrorf("cannot find proper key indices while processing key update:"+ + "(newIdx,priIdx,delIdx):(%d, %d, %d)", newIdx, priIdx, delIdx) + } + + if priIdx != -1 && priIdx == delIdx { + return types.BadRequestErrorf("attempting to both make a key (index %d) primary and delete it", priIdx) + } + + d.secMapWalk(func(rIPs string, spis []*spi) ([]*spi, bool) { + rIP := net.ParseIP(rIPs) + return updateNodeKey(lIP, aIP, rIP, spis, d.keys, newIdx, priIdx, delIdx), false + }) + + // swap primary + if priIdx != -1 { + d.keys[0], d.keys[priIdx] = d.keys[priIdx], d.keys[0] + } + // prune + if delIdx != -1 { + if delIdx == 0 { + delIdx = priIdx + } + d.keys = append(d.keys[:delIdx], d.keys[delIdx+1:]...) + } + + logrus.Debugf("Updated: %v", d.keys) + + return nil +} + +/******************************************************** + * Steady state: rSA0, rSA1, rSA2, fSA1, fSP1 + * Rotation --> -rSA0, +rSA3, +fSA2, +fSP2/-fSP1, -fSA1 + * Steady state: rSA1, rSA2, rSA3, fSA2, fSP2 + *********************************************************/ + +// Spis and keys are sorted in such away the one in position 0 is the primary +func updateNodeKey(lIP, aIP, rIP net.IP, idxs []*spi, curKeys []*key, newIdx, priIdx, delIdx int) []*spi { + logrus.Debugf("Updating keys for node: %s (%d,%d,%d)", rIP, newIdx, priIdx, delIdx) + + spis := idxs + logrus.Debugf("Current: %v", spis) + + // add new + if newIdx != -1 { + spis = append(spis, &spi{ + forward: buildSPI(aIP, rIP, curKeys[newIdx].tag), + reverse: buildSPI(rIP, aIP, curKeys[newIdx].tag), + }) + } + + if delIdx != -1 { + // -rSA0 + programSA(lIP, rIP, spis[delIdx], nil, reverse, false) + } + + if newIdx > -1 { + // +rSA2 + programSA(lIP, rIP, spis[newIdx], curKeys[newIdx], reverse, true) + } + + if priIdx > 0 { + // +fSA2 + fSA2, _, _ := programSA(lIP, rIP, spis[priIdx], curKeys[priIdx], forward, true) + + // +fSP2, -fSP1 + s := types.GetMinimalIP(fSA2.Src) + d := types.GetMinimalIP(fSA2.Dst) + fullMask := net.CIDRMask(8*len(s), 8*len(s)) + + fSP1 := &netlink.XfrmPolicy{ + Src: &net.IPNet{IP: s, Mask: fullMask}, + Dst: &net.IPNet{IP: d, Mask: fullMask}, + Dir: netlink.XFRM_DIR_OUT, + Proto: 17, + DstPort: 4789, + Mark: &spMark, + Tmpls: []netlink.XfrmPolicyTmpl{ + { + Src: fSA2.Src, + Dst: fSA2.Dst, + Proto: netlink.XFRM_PROTO_ESP, + Mode: netlink.XFRM_MODE_TRANSPORT, + Spi: fSA2.Spi, + Reqid: r, + }, + }, + } + logrus.Debugf("Updating fSP{%s}", fSP1) + if err := ns.NlHandle().XfrmPolicyUpdate(fSP1); err != nil { + logrus.Warnf("Failed to update fSP{%s}: %v", fSP1, err) + } + + // -fSA1 + programSA(lIP, rIP, spis[0], nil, forward, false) + } + + // swap + if priIdx > 0 { + swp := spis[0] + spis[0] = spis[priIdx] + spis[priIdx] = swp + } + // prune + if delIdx != -1 { + if delIdx == 0 { + delIdx = priIdx + } + spis = append(spis[:delIdx], spis[delIdx+1:]...) + } + + logrus.Debugf("Updated: %v", spis) + + return spis +} + +func (n *network) maxMTU() int { + mtu := 1500 + if n.mtu != 0 { + mtu = n.mtu + } + mtu -= vxlanEncap + if n.secure { + // In case of encryption account for the + // esp packet espansion and padding + mtu -= pktExpansion + mtu -= (mtu % 4) + } + return mtu +} + +func clearEncryptionStates() { + nlh := ns.NlHandle() + spList, err := nlh.XfrmPolicyList(netlink.FAMILY_ALL) + if err != nil { + logrus.Warnf("Failed to retrieve SP list for cleanup: %v", err) + } + saList, err := nlh.XfrmStateList(netlink.FAMILY_ALL) + if err != nil { + logrus.Warnf("Failed to retrieve SA list for cleanup: %v", err) + } + for _, sp := range spList { + if sp.Mark != nil && sp.Mark.Value == spMark.Value { + if err := nlh.XfrmPolicyDel(&sp); err != nil { + logrus.Warnf("Failed to delete stale SP %s: %v", sp, err) + continue + } + logrus.Debugf("Removed stale SP: %s", sp) + } + } + for _, sa := range saList { + if sa.Reqid == r { + if err := nlh.XfrmStateDel(&sa); err != nil { + logrus.Warnf("Failed to delete stale SA %s: %v", sa, err) + continue + } + logrus.Debugf("Removed stale SA: %s", sa) + } + } +} diff --git a/vendor/github.com/docker/libnetwork/drivers/overlay/filter.go b/vendor/github.com/docker/libnetwork/drivers/overlay/filter.go new file mode 100644 index 0000000000..1601803aa0 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/overlay/filter.go @@ -0,0 +1,142 @@ +package overlay + +import ( + "fmt" + "sync" + + "github.com/docker/libnetwork/iptables" + "github.com/sirupsen/logrus" +) + +const globalChain = "DOCKER-OVERLAY" + +var filterOnce sync.Once + +var filterChan = make(chan struct{}, 1) + +func filterWait() func() { + filterChan <- struct{}{} + return func() { <-filterChan } +} + +func chainExists(cname string) bool { + if _, err := iptables.Raw("-L", cname); err != nil { + return false + } + + return true +} + +func setupGlobalChain() { + // Because of an ungraceful shutdown, chain could already be present + if !chainExists(globalChain) { + if err := iptables.RawCombinedOutput("-N", globalChain); err != nil { + logrus.Errorf("could not create global overlay chain: %v", err) + return + } + } + + if !iptables.Exists(iptables.Filter, globalChain, "-j", "RETURN") { + if err := iptables.RawCombinedOutput("-A", globalChain, "-j", "RETURN"); err != nil { + logrus.Errorf("could not install default return chain in the overlay global chain: %v", err) + } + } +} + +func setNetworkChain(cname string, remove bool) error { + // Initialize the onetime global overlay chain + filterOnce.Do(setupGlobalChain) + + exists := chainExists(cname) + + opt := "-N" + // In case of remove, make sure to flush the rules in the chain + if remove && exists { + if err := iptables.RawCombinedOutput("-F", cname); err != nil { + return fmt.Errorf("failed to flush overlay network chain %s rules: %v", cname, err) + } + opt = "-X" + } + + if (!remove && !exists) || (remove && exists) { + if err := iptables.RawCombinedOutput(opt, cname); err != nil { + return fmt.Errorf("failed network chain operation %q for chain %s: %v", opt, cname, err) + } + } + + if !remove { + if !iptables.Exists(iptables.Filter, cname, "-j", "DROP") { + if err := iptables.RawCombinedOutput("-A", cname, "-j", "DROP"); err != nil { + return fmt.Errorf("failed adding default drop rule to overlay network chain %s: %v", cname, err) + } + } + } + + return nil +} + +func addNetworkChain(cname string) error { + defer filterWait()() + + return setNetworkChain(cname, false) +} + +func removeNetworkChain(cname string) error { + defer filterWait()() + + return setNetworkChain(cname, true) +} + +func setFilters(cname, brName string, remove bool) error { + opt := "-I" + if remove { + opt = "-D" + } + + // Every time we set filters for a new subnet make sure to move the global overlay hook to the top of the both the OUTPUT and forward chains + if !remove { + for _, chain := range []string{"OUTPUT", "FORWARD"} { + exists := iptables.Exists(iptables.Filter, chain, "-j", globalChain) + if exists { + if err := iptables.RawCombinedOutput("-D", chain, "-j", globalChain); err != nil { + return fmt.Errorf("failed to delete overlay hook in chain %s while moving the hook: %v", chain, err) + } + } + + if err := iptables.RawCombinedOutput("-I", chain, "-j", globalChain); err != nil { + return fmt.Errorf("failed to insert overlay hook in chain %s: %v", chain, err) + } + } + } + + // Insert/Delete the rule to jump to per-bridge chain + exists := iptables.Exists(iptables.Filter, globalChain, "-o", brName, "-j", cname) + if (!remove && !exists) || (remove && exists) { + if err := iptables.RawCombinedOutput(opt, globalChain, "-o", brName, "-j", cname); err != nil { + return fmt.Errorf("failed to add per-bridge filter rule for bridge %s, network chain %s: %v", brName, cname, err) + } + } + + exists = iptables.Exists(iptables.Filter, cname, "-i", brName, "-j", "ACCEPT") + if (!remove && exists) || (remove && !exists) { + return nil + } + + if err := iptables.RawCombinedOutput(opt, cname, "-i", brName, "-j", "ACCEPT"); err != nil { + return fmt.Errorf("failed to add overlay filter rile for network chain %s, bridge %s: %v", cname, brName, err) + } + + return nil +} + +func addFilters(cname, brName string) error { + defer filterWait()() + + return setFilters(cname, brName, false) +} + +func removeFilters(cname, brName string) error { + defer filterWait()() + + return setFilters(cname, brName, true) +} diff --git a/vendor/github.com/docker/libnetwork/drivers/overlay/joinleave.go b/vendor/github.com/docker/libnetwork/drivers/overlay/joinleave.go new file mode 100644 index 0000000000..0770513e7d --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/overlay/joinleave.go @@ -0,0 +1,240 @@ +package overlay + +import ( + "fmt" + "net" + "syscall" + + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/ns" + "github.com/docker/libnetwork/types" + "github.com/gogo/protobuf/proto" + "github.com/sirupsen/logrus" +) + +// Join method is invoked when a Sandbox is attached to an endpoint. +func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo, options map[string]interface{}) error { + if err := validateID(nid, eid); err != nil { + return err + } + + n := d.network(nid) + if n == nil { + return fmt.Errorf("could not find network with id %s", nid) + } + + ep := n.endpoint(eid) + if ep == nil { + return fmt.Errorf("could not find endpoint with id %s", eid) + } + + if n.secure && len(d.keys) == 0 { + return fmt.Errorf("cannot join secure network: encryption keys not present") + } + + nlh := ns.NlHandle() + + if n.secure && !nlh.SupportsNetlinkFamily(syscall.NETLINK_XFRM) { + return fmt.Errorf("cannot join secure network: required modules to install IPSEC rules are missing on host") + } + + s := n.getSubnetforIP(ep.addr) + if s == nil { + return fmt.Errorf("could not find subnet for endpoint %s", eid) + } + + if err := n.obtainVxlanID(s); err != nil { + return fmt.Errorf("couldn't get vxlan id for %q: %v", s.subnetIP.String(), err) + } + + if err := n.joinSandbox(false); err != nil { + return fmt.Errorf("network sandbox join failed: %v", err) + } + + if err := n.joinSubnetSandbox(s, false); err != nil { + return fmt.Errorf("subnet sandbox join failed for %q: %v", s.subnetIP.String(), err) + } + + // joinSubnetSandbox gets called when an endpoint comes up on a new subnet in the + // overlay network. Hence the Endpoint count should be updated outside joinSubnetSandbox + n.incEndpointCount() + + sbox := n.sandbox() + + overlayIfName, containerIfName, err := createVethPair() + if err != nil { + return err + } + + ep.ifName = containerIfName + + if err = d.writeEndpointToStore(ep); err != nil { + return fmt.Errorf("failed to update overlay endpoint %s to local data store: %v", ep.id[0:7], err) + } + + // Set the container interface and its peer MTU to 1450 to allow + // for 50 bytes vxlan encap (inner eth header(14) + outer IP(20) + + // outer UDP(8) + vxlan header(8)) + mtu := n.maxMTU() + + veth, err := nlh.LinkByName(overlayIfName) + if err != nil { + return fmt.Errorf("cound not find link by name %s: %v", overlayIfName, err) + } + err = nlh.LinkSetMTU(veth, mtu) + if err != nil { + return err + } + + if err = sbox.AddInterface(overlayIfName, "veth", + sbox.InterfaceOptions().Master(s.brName)); err != nil { + return fmt.Errorf("could not add veth pair inside the network sandbox: %v", err) + } + + veth, err = nlh.LinkByName(containerIfName) + if err != nil { + return fmt.Errorf("could not find link by name %s: %v", containerIfName, err) + } + err = nlh.LinkSetMTU(veth, mtu) + if err != nil { + return err + } + + if err = nlh.LinkSetHardwareAddr(veth, ep.mac); err != nil { + return fmt.Errorf("could not set mac address (%v) to the container interface: %v", ep.mac, err) + } + + for _, sub := range n.subnets { + if sub == s { + continue + } + if err = jinfo.AddStaticRoute(sub.subnetIP, types.NEXTHOP, s.gwIP.IP); err != nil { + logrus.Errorf("Adding subnet %s static route in network %q failed\n", s.subnetIP, n.id) + } + } + + if iNames := jinfo.InterfaceName(); iNames != nil { + err = iNames.SetNames(containerIfName, "eth") + if err != nil { + return err + } + } + + d.peerAdd(nid, eid, ep.addr.IP, ep.addr.Mask, ep.mac, net.ParseIP(d.advertiseAddress), false, false, true) + + if err = d.checkEncryption(nid, nil, n.vxlanID(s), true, true); err != nil { + logrus.Warn(err) + } + + buf, err := proto.Marshal(&PeerRecord{ + EndpointIP: ep.addr.String(), + EndpointMAC: ep.mac.String(), + TunnelEndpointIP: d.advertiseAddress, + }) + if err != nil { + return err + } + + if err := jinfo.AddTableEntry(ovPeerTable, eid, buf); err != nil { + logrus.Errorf("overlay: Failed adding table entry to joininfo: %v", err) + } + + d.pushLocalEndpointEvent("join", nid, eid) + + return nil +} + +func (d *driver) DecodeTableEntry(tablename string, key string, value []byte) (string, map[string]string) { + if tablename != ovPeerTable { + logrus.Errorf("DecodeTableEntry: unexpected table name %s", tablename) + return "", nil + } + + var peer PeerRecord + if err := proto.Unmarshal(value, &peer); err != nil { + logrus.Errorf("DecodeTableEntry: failed to unmarshal peer record for key %s: %v", key, err) + return "", nil + } + + return key, map[string]string{ + "Host IP": peer.TunnelEndpointIP, + } +} + +func (d *driver) EventNotify(etype driverapi.EventType, nid, tableName, key string, value []byte) { + if tableName != ovPeerTable { + logrus.Errorf("Unexpected table notification for table %s received", tableName) + return + } + + eid := key + + var peer PeerRecord + if err := proto.Unmarshal(value, &peer); err != nil { + logrus.Errorf("Failed to unmarshal peer record: %v", err) + return + } + + // Ignore local peers. We already know about them and they + // should not be added to vxlan fdb. + if peer.TunnelEndpointIP == d.advertiseAddress { + return + } + + addr, err := types.ParseCIDR(peer.EndpointIP) + if err != nil { + logrus.Errorf("Invalid peer IP %s received in event notify", peer.EndpointIP) + return + } + + mac, err := net.ParseMAC(peer.EndpointMAC) + if err != nil { + logrus.Errorf("Invalid mac %s received in event notify", peer.EndpointMAC) + return + } + + vtep := net.ParseIP(peer.TunnelEndpointIP) + if vtep == nil { + logrus.Errorf("Invalid VTEP %s received in event notify", peer.TunnelEndpointIP) + return + } + + if etype == driverapi.Delete { + d.peerDelete(nid, eid, addr.IP, addr.Mask, mac, vtep, false) + return + } + + d.peerAdd(nid, eid, addr.IP, addr.Mask, mac, vtep, false, false, false) +} + +// Leave method is invoked when a Sandbox detaches from an endpoint. +func (d *driver) Leave(nid, eid string) error { + if err := validateID(nid, eid); err != nil { + return err + } + + n := d.network(nid) + if n == nil { + return fmt.Errorf("could not find network with id %s", nid) + } + + ep := n.endpoint(eid) + + if ep == nil { + return types.InternalMaskableErrorf("could not find endpoint with id %s", eid) + } + + if d.notifyCh != nil { + d.notifyCh <- ovNotify{ + action: "leave", + nw: n, + ep: ep, + } + } + + d.peerDelete(nid, eid, ep.addr.IP, ep.addr.Mask, ep.mac, net.ParseIP(d.advertiseAddress), true) + + n.leaveSandbox() + + return nil +} diff --git a/vendor/github.com/docker/libnetwork/drivers/overlay/ostweaks_linux.go b/vendor/github.com/docker/libnetwork/drivers/overlay/ostweaks_linux.go new file mode 100644 index 0000000000..68f1ee9cee --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/overlay/ostweaks_linux.go @@ -0,0 +1,72 @@ +package overlay + +import ( + "io/ioutil" + "path" + "strconv" + "strings" + + "github.com/sirupsen/logrus" +) + +type conditionalCheck func(val1, val2 string) bool + +type osValue struct { + value string + checkFn conditionalCheck +} + +var osConfig = map[string]osValue{ + "net.ipv4.neigh.default.gc_thresh1": {"8192", checkHigher}, + "net.ipv4.neigh.default.gc_thresh2": {"49152", checkHigher}, + "net.ipv4.neigh.default.gc_thresh3": {"65536", checkHigher}, +} + +func propertyIsValid(val1, val2 string, check conditionalCheck) bool { + if check == nil || check(val1, val2) { + return true + } + return false +} + +func checkHigher(val1, val2 string) bool { + val1Int, _ := strconv.ParseInt(val1, 10, 32) + val2Int, _ := strconv.ParseInt(val2, 10, 32) + return val1Int < val2Int +} + +// writeSystemProperty writes the value to a path under /proc/sys as determined from the key. +// For e.g. net.ipv4.ip_forward translated to /proc/sys/net/ipv4/ip_forward. +func writeSystemProperty(key, value string) error { + keyPath := strings.Replace(key, ".", "/", -1) + return ioutil.WriteFile(path.Join("/proc/sys", keyPath), []byte(value), 0644) +} + +func readSystemProperty(key string) (string, error) { + keyPath := strings.Replace(key, ".", "/", -1) + value, err := ioutil.ReadFile(path.Join("/proc/sys", keyPath)) + if err != nil { + return "", err + } + return string(value), nil +} + +func applyOStweaks() { + for k, v := range osConfig { + // read the existing property from disk + oldv, err := readSystemProperty(k) + if err != nil { + logrus.Errorf("error reading the kernel parameter %s, error: %s", k, err) + continue + } + + if propertyIsValid(oldv, v.value, v.checkFn) { + // write new prop value to disk + if err := writeSystemProperty(k, v.value); err != nil { + logrus.Errorf("error setting the kernel parameter %s = %s, (leaving as %s) error: %s", k, v.value, oldv, err) + continue + } + logrus.Debugf("updated kernel parameter %s = %s (was %s)", k, v.value, oldv) + } + } +} diff --git a/vendor/github.com/docker/libnetwork/drivers/overlay/ostweaks_unsupported.go b/vendor/github.com/docker/libnetwork/drivers/overlay/ostweaks_unsupported.go new file mode 100644 index 0000000000..a5e8d91083 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/overlay/ostweaks_unsupported.go @@ -0,0 +1,5 @@ +// +build !linux + +package overlay + +func applyOStweaks() {} diff --git a/vendor/github.com/docker/libnetwork/drivers/overlay/ov_endpoint.go b/vendor/github.com/docker/libnetwork/drivers/overlay/ov_endpoint.go new file mode 100644 index 0000000000..bb08de465c --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/overlay/ov_endpoint.go @@ -0,0 +1,252 @@ +package overlay + +import ( + "encoding/json" + "fmt" + "net" + + "github.com/docker/libnetwork/datastore" + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/netutils" + "github.com/docker/libnetwork/ns" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" +) + +type endpointTable map[string]*endpoint + +const overlayEndpointPrefix = "overlay/endpoint" + +type endpoint struct { + id string + nid string + ifName string + mac net.HardwareAddr + addr *net.IPNet + dbExists bool + dbIndex uint64 +} + +func (n *network) endpoint(eid string) *endpoint { + n.Lock() + defer n.Unlock() + + return n.endpoints[eid] +} + +func (n *network) addEndpoint(ep *endpoint) { + n.Lock() + n.endpoints[ep.id] = ep + n.Unlock() +} + +func (n *network) deleteEndpoint(eid string) { + n.Lock() + delete(n.endpoints, eid) + n.Unlock() +} + +func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo, + epOptions map[string]interface{}) error { + var err error + + if err = validateID(nid, eid); err != nil { + return err + } + + // Since we perform lazy configuration make sure we try + // configuring the driver when we enter CreateEndpoint since + // CreateNetwork may not be called in every node. + if err := d.configure(); err != nil { + return err + } + + n := d.network(nid) + if n == nil { + return fmt.Errorf("network id %q not found", nid) + } + + ep := &endpoint{ + id: eid, + nid: n.id, + addr: ifInfo.Address(), + mac: ifInfo.MacAddress(), + } + if ep.addr == nil { + return fmt.Errorf("create endpoint was not passed interface IP address") + } + + if s := n.getSubnetforIP(ep.addr); s == nil { + return fmt.Errorf("no matching subnet for IP %q in network %q", ep.addr, nid) + } + + if ep.mac == nil { + ep.mac = netutils.GenerateMACFromIP(ep.addr.IP) + if err := ifInfo.SetMacAddress(ep.mac); err != nil { + return err + } + } + + n.addEndpoint(ep) + + if err := d.writeEndpointToStore(ep); err != nil { + return fmt.Errorf("failed to update overlay endpoint %s to local store: %v", ep.id[0:7], err) + } + + return nil +} + +func (d *driver) DeleteEndpoint(nid, eid string) error { + nlh := ns.NlHandle() + + if err := validateID(nid, eid); err != nil { + return err + } + + n := d.network(nid) + if n == nil { + return fmt.Errorf("network id %q not found", nid) + } + + ep := n.endpoint(eid) + if ep == nil { + return fmt.Errorf("endpoint id %q not found", eid) + } + + n.deleteEndpoint(eid) + + if err := d.deleteEndpointFromStore(ep); err != nil { + logrus.Warnf("Failed to delete overlay endpoint %s from local store: %v", ep.id[0:7], err) + } + + if ep.ifName == "" { + return nil + } + + link, err := nlh.LinkByName(ep.ifName) + if err != nil { + logrus.Debugf("Failed to retrieve interface (%s)'s link on endpoint (%s) delete: %v", ep.ifName, ep.id, err) + return nil + } + if err := nlh.LinkDel(link); err != nil { + logrus.Debugf("Failed to delete interface (%s)'s link on endpoint (%s) delete: %v", ep.ifName, ep.id, err) + } + + return nil +} + +func (d *driver) EndpointOperInfo(nid, eid string) (map[string]interface{}, error) { + return make(map[string]interface{}, 0), nil +} + +func (d *driver) deleteEndpointFromStore(e *endpoint) error { + if d.localStore == nil { + return fmt.Errorf("overlay local store not initialized, ep not deleted") + } + + return d.localStore.DeleteObjectAtomic(e) +} + +func (d *driver) writeEndpointToStore(e *endpoint) error { + if d.localStore == nil { + return fmt.Errorf("overlay local store not initialized, ep not added") + } + + return d.localStore.PutObjectAtomic(e) +} + +func (ep *endpoint) DataScope() string { + return datastore.LocalScope +} + +func (ep *endpoint) New() datastore.KVObject { + return &endpoint{} +} + +func (ep *endpoint) CopyTo(o datastore.KVObject) error { + dstep := o.(*endpoint) + *dstep = *ep + return nil +} + +func (ep *endpoint) Key() []string { + return []string{overlayEndpointPrefix, ep.id} +} + +func (ep *endpoint) KeyPrefix() []string { + return []string{overlayEndpointPrefix} +} + +func (ep *endpoint) Index() uint64 { + return ep.dbIndex +} + +func (ep *endpoint) SetIndex(index uint64) { + ep.dbIndex = index + ep.dbExists = true +} + +func (ep *endpoint) Exists() bool { + return ep.dbExists +} + +func (ep *endpoint) Skip() bool { + return false +} + +func (ep *endpoint) Value() []byte { + b, err := json.Marshal(ep) + if err != nil { + return nil + } + return b +} + +func (ep *endpoint) SetValue(value []byte) error { + return json.Unmarshal(value, ep) +} + +func (ep *endpoint) MarshalJSON() ([]byte, error) { + epMap := make(map[string]interface{}) + + epMap["id"] = ep.id + epMap["nid"] = ep.nid + if ep.ifName != "" { + epMap["ifName"] = ep.ifName + } + if ep.addr != nil { + epMap["addr"] = ep.addr.String() + } + if len(ep.mac) != 0 { + epMap["mac"] = ep.mac.String() + } + + return json.Marshal(epMap) +} + +func (ep *endpoint) UnmarshalJSON(value []byte) error { + var ( + err error + epMap map[string]interface{} + ) + + json.Unmarshal(value, &epMap) + + ep.id = epMap["id"].(string) + ep.nid = epMap["nid"].(string) + if v, ok := epMap["mac"]; ok { + if ep.mac, err = net.ParseMAC(v.(string)); err != nil { + return types.InternalErrorf("failed to decode endpoint interface mac address after json unmarshal: %s", v.(string)) + } + } + if v, ok := epMap["addr"]; ok { + if ep.addr, err = types.ParseCIDR(v.(string)); err != nil { + return types.InternalErrorf("failed to decode endpoint interface ipv4 address after json unmarshal: %v", err) + } + } + if v, ok := epMap["ifName"]; ok { + ep.ifName = v.(string) + } + + return nil +} diff --git a/vendor/github.com/docker/libnetwork/drivers/overlay/ov_network.go b/vendor/github.com/docker/libnetwork/drivers/overlay/ov_network.go new file mode 100644 index 0000000000..9c83127d4f --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/overlay/ov_network.go @@ -0,0 +1,1128 @@ +package overlay + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "syscall" + + "github.com/docker/docker/pkg/reexec" + "github.com/docker/libnetwork/datastore" + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/netutils" + "github.com/docker/libnetwork/ns" + "github.com/docker/libnetwork/osl" + "github.com/docker/libnetwork/resolvconf" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" + "github.com/vishvananda/netlink/nl" + "github.com/vishvananda/netns" +) + +var ( + hostMode bool + networkOnce sync.Once + networkMu sync.Mutex + vniTbl = make(map[uint32]string) +) + +type networkTable map[string]*network + +type subnet struct { + once *sync.Once + vxlanName string + brName string + vni uint32 + initErr error + subnetIP *net.IPNet + gwIP *net.IPNet +} + +type subnetJSON struct { + SubnetIP string + GwIP string + Vni uint32 +} + +type network struct { + id string + dbIndex uint64 + dbExists bool + sbox osl.Sandbox + nlSocket *nl.NetlinkSocket + endpoints endpointTable + driver *driver + joinCnt int + once *sync.Once + initEpoch int + initErr error + subnets []*subnet + secure bool + mtu int + sync.Mutex +} + +func init() { + reexec.Register("set-default-vlan", setDefaultVlan) +} + +func setDefaultVlan() { + if len(os.Args) < 3 { + logrus.Error("insufficient number of arguments") + os.Exit(1) + } + + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + nsPath := os.Args[1] + ns, err := netns.GetFromPath(nsPath) + if err != nil { + logrus.Errorf("overlay namespace get failed, %v", err) + os.Exit(1) + } + if err = netns.Set(ns); err != nil { + logrus.Errorf("setting into overlay namespace failed, %v", err) + os.Exit(1) + } + + // make sure the sysfs mount doesn't propagate back + if err = syscall.Unshare(syscall.CLONE_NEWNS); err != nil { + logrus.Errorf("unshare failed, %v", err) + os.Exit(1) + } + + flag := syscall.MS_PRIVATE | syscall.MS_REC + if err = syscall.Mount("", "/", "", uintptr(flag), ""); err != nil { + logrus.Errorf("root mount failed, %v", err) + os.Exit(1) + } + + if err = syscall.Mount("sysfs", "/sys", "sysfs", 0, ""); err != nil { + logrus.Errorf("mounting sysfs failed, %v", err) + os.Exit(1) + } + + brName := os.Args[2] + path := filepath.Join("/sys/class/net", brName, "bridge/default_pvid") + data := []byte{'0', '\n'} + + if err = ioutil.WriteFile(path, data, 0644); err != nil { + logrus.Errorf("enabling default vlan on bridge %s failed %v", brName, err) + os.Exit(1) + } + os.Exit(0) +} + +func (d *driver) NetworkAllocate(id string, option map[string]string, ipV4Data, ipV6Data []driverapi.IPAMData) (map[string]string, error) { + return nil, types.NotImplementedErrorf("not implemented") +} + +func (d *driver) NetworkFree(id string) error { + return types.NotImplementedErrorf("not implemented") +} + +func (d *driver) CreateNetwork(id string, option map[string]interface{}, nInfo driverapi.NetworkInfo, ipV4Data, ipV6Data []driverapi.IPAMData) error { + if id == "" { + return fmt.Errorf("invalid network id") + } + if len(ipV4Data) == 0 || ipV4Data[0].Pool.String() == "0.0.0.0/0" { + return types.BadRequestErrorf("ipv4 pool is empty") + } + + // Since we perform lazy configuration make sure we try + // configuring the driver when we enter CreateNetwork + if err := d.configure(); err != nil { + return err + } + + n := &network{ + id: id, + driver: d, + endpoints: endpointTable{}, + once: &sync.Once{}, + subnets: []*subnet{}, + } + + vnis := make([]uint32, 0, len(ipV4Data)) + if gval, ok := option[netlabel.GenericData]; ok { + optMap := gval.(map[string]string) + if val, ok := optMap[netlabel.OverlayVxlanIDList]; ok { + logrus.Debugf("overlay: Received vxlan IDs: %s", val) + vniStrings := strings.Split(val, ",") + for _, vniStr := range vniStrings { + vni, err := strconv.Atoi(vniStr) + if err != nil { + return fmt.Errorf("invalid vxlan id value %q passed", vniStr) + } + + vnis = append(vnis, uint32(vni)) + } + } + if _, ok := optMap[secureOption]; ok { + n.secure = true + } + if val, ok := optMap[netlabel.DriverMTU]; ok { + var err error + if n.mtu, err = strconv.Atoi(val); err != nil { + return fmt.Errorf("failed to parse %v: %v", val, err) + } + if n.mtu < 0 { + return fmt.Errorf("invalid MTU value: %v", n.mtu) + } + } + } + + // If we are getting vnis from libnetwork, either we get for + // all subnets or none. + if len(vnis) != 0 && len(vnis) < len(ipV4Data) { + return fmt.Errorf("insufficient vnis(%d) passed to overlay", len(vnis)) + } + + for i, ipd := range ipV4Data { + s := &subnet{ + subnetIP: ipd.Pool, + gwIP: ipd.Gateway, + once: &sync.Once{}, + } + + if len(vnis) != 0 { + s.vni = vnis[i] + } + + n.subnets = append(n.subnets, s) + } + + d.Lock() + defer d.Unlock() + if d.networks[n.id] != nil { + return fmt.Errorf("attempt to create overlay network %v that already exists", n.id) + } + + if err := n.writeToStore(); err != nil { + return fmt.Errorf("failed to update data store for network %v: %v", n.id, err) + } + + // Make sure no rule is on the way from any stale secure network + if !n.secure { + for _, vni := range vnis { + programMangle(vni, false) + programInput(vni, false) + } + } + + if nInfo != nil { + if err := nInfo.TableEventRegister(ovPeerTable, driverapi.EndpointObject); err != nil { + // XXX Undo writeToStore? No method to so. Why? + return err + } + } + + d.networks[id] = n + + return nil +} + +func (d *driver) DeleteNetwork(nid string) error { + if nid == "" { + return fmt.Errorf("invalid network id") + } + + // Make sure driver resources are initialized before proceeding + if err := d.configure(); err != nil { + return err + } + + d.Lock() + defer d.Unlock() + + // This is similar to d.network(), but we need to keep holding the lock + // until we are done removing this network. + n, ok := d.networks[nid] + if !ok { + n = d.restoreNetworkFromStore(nid) + } + if n == nil { + return fmt.Errorf("could not find network with id %s", nid) + } + + for _, ep := range n.endpoints { + if ep.ifName != "" { + if link, err := ns.NlHandle().LinkByName(ep.ifName); err == nil { + if err := ns.NlHandle().LinkDel(link); err != nil { + logrus.WithError(err).Warnf("Failed to delete interface (%s)'s link on endpoint (%s) delete", ep.ifName, ep.id) + } + } + } + + if err := d.deleteEndpointFromStore(ep); err != nil { + logrus.Warnf("Failed to delete overlay endpoint %s from local store: %v", ep.id[0:7], err) + } + } + // flush the peerDB entries + d.peerFlush(nid) + delete(d.networks, nid) + + vnis, err := n.releaseVxlanID() + if err != nil { + return err + } + + if n.secure { + for _, vni := range vnis { + programMangle(vni, false) + programInput(vni, false) + } + } + + return nil +} + +func (d *driver) ProgramExternalConnectivity(nid, eid string, options map[string]interface{}) error { + return nil +} + +func (d *driver) RevokeExternalConnectivity(nid, eid string) error { + return nil +} + +func (n *network) incEndpointCount() { + n.Lock() + defer n.Unlock() + n.joinCnt++ +} + +func (n *network) joinSandbox(restore bool) error { + // If there is a race between two go routines here only one will win + // the other will wait. + n.once.Do(func() { + // save the error status of initSandbox in n.initErr so that + // all the racing go routines are able to know the status. + n.initErr = n.initSandbox(restore) + }) + + return n.initErr +} + +func (n *network) joinSubnetSandbox(s *subnet, restore bool) error { + s.once.Do(func() { + s.initErr = n.initSubnetSandbox(s, restore) + }) + return s.initErr +} + +func (n *network) leaveSandbox() { + n.Lock() + defer n.Unlock() + n.joinCnt-- + if n.joinCnt != 0 { + return + } + + // We are about to destroy sandbox since the container is leaving the network + // Reinitialize the once variable so that we will be able to trigger one time + // sandbox initialization(again) when another container joins subsequently. + n.once = &sync.Once{} + for _, s := range n.subnets { + s.once = &sync.Once{} + } + + n.destroySandbox() +} + +// to be called while holding network lock +func (n *network) destroySandbox() { + if n.sbox != nil { + for _, iface := range n.sbox.Info().Interfaces() { + if err := iface.Remove(); err != nil { + logrus.Debugf("Remove interface %s failed: %v", iface.SrcName(), err) + } + } + + for _, s := range n.subnets { + if hostMode { + if err := removeFilters(n.id[:12], s.brName); err != nil { + logrus.Warnf("Could not remove overlay filters: %v", err) + } + } + + if s.vxlanName != "" { + err := deleteInterface(s.vxlanName) + if err != nil { + logrus.Warnf("could not cleanup sandbox properly: %v", err) + } + } + } + + if hostMode { + if err := removeNetworkChain(n.id[:12]); err != nil { + logrus.Warnf("could not remove network chain: %v", err) + } + } + + // Close the netlink socket, this will also release the watchMiss goroutine that is using it + if n.nlSocket != nil { + n.nlSocket.Close() + n.nlSocket = nil + } + + n.sbox.Destroy() + n.sbox = nil + } +} + +func populateVNITbl() { + filepath.Walk(filepath.Dir(osl.GenerateKey("walk")), + func(path string, info os.FileInfo, err error) error { + _, fname := filepath.Split(path) + + if len(strings.Split(fname, "-")) <= 1 { + return nil + } + + ns, err := netns.GetFromPath(path) + if err != nil { + logrus.Errorf("Could not open namespace path %s during vni population: %v", path, err) + return nil + } + defer ns.Close() + + nlh, err := netlink.NewHandleAt(ns, syscall.NETLINK_ROUTE) + if err != nil { + logrus.Errorf("Could not open netlink handle during vni population for ns %s: %v", path, err) + return nil + } + defer nlh.Delete() + + err = nlh.SetSocketTimeout(soTimeout) + if err != nil { + logrus.Warnf("Failed to set the timeout on the netlink handle sockets for vni table population: %v", err) + } + + links, err := nlh.LinkList() + if err != nil { + logrus.Errorf("Failed to list interfaces during vni population for ns %s: %v", path, err) + return nil + } + + for _, l := range links { + if l.Type() == "vxlan" { + vniTbl[uint32(l.(*netlink.Vxlan).VxlanId)] = path + } + } + + return nil + }) +} + +func networkOnceInit() { + populateVNITbl() + + if os.Getenv("_OVERLAY_HOST_MODE") != "" { + hostMode = true + return + } + + err := createVxlan("testvxlan", 1, 0) + if err != nil { + logrus.Errorf("Failed to create testvxlan interface: %v", err) + return + } + + defer deleteInterface("testvxlan") + + path := "/proc/self/ns/net" + hNs, err := netns.GetFromPath(path) + if err != nil { + logrus.Errorf("Failed to get network namespace from path %s while setting host mode: %v", path, err) + return + } + defer hNs.Close() + + nlh := ns.NlHandle() + + iface, err := nlh.LinkByName("testvxlan") + if err != nil { + logrus.Errorf("Failed to get link testvxlan while setting host mode: %v", err) + return + } + + // If we are not able to move the vxlan interface to a namespace + // then fallback to host mode + if err := nlh.LinkSetNsFd(iface, int(hNs)); err != nil { + hostMode = true + } +} + +func (n *network) generateVxlanName(s *subnet) string { + id := n.id + if len(n.id) > 5 { + id = n.id[:5] + } + + return "vx-" + fmt.Sprintf("%06x", n.vxlanID(s)) + "-" + id +} + +func (n *network) generateBridgeName(s *subnet) string { + id := n.id + if len(n.id) > 5 { + id = n.id[:5] + } + + return n.getBridgeNamePrefix(s) + "-" + id +} + +func (n *network) getBridgeNamePrefix(s *subnet) string { + return "ov-" + fmt.Sprintf("%06x", n.vxlanID(s)) +} + +func checkOverlap(nw *net.IPNet) error { + var nameservers []string + + if rc, err := resolvconf.Get(); err == nil { + nameservers = resolvconf.GetNameserversAsCIDR(rc.Content) + } + + if err := netutils.CheckNameserverOverlaps(nameservers, nw); err != nil { + return fmt.Errorf("overlay subnet %s failed check with nameserver: %v: %v", nw.String(), nameservers, err) + } + + if err := netutils.CheckRouteOverlaps(nw); err != nil { + return fmt.Errorf("overlay subnet %s failed check with host route table: %v", nw.String(), err) + } + + return nil +} + +func (n *network) restoreSubnetSandbox(s *subnet, brName, vxlanName string) error { + sbox := n.sandbox() + + // restore overlay osl sandbox + Ifaces := make(map[string][]osl.IfaceOption) + brIfaceOption := make([]osl.IfaceOption, 2) + brIfaceOption = append(brIfaceOption, sbox.InterfaceOptions().Address(s.gwIP)) + brIfaceOption = append(brIfaceOption, sbox.InterfaceOptions().Bridge(true)) + Ifaces[brName+"+br"] = brIfaceOption + + err := sbox.Restore(Ifaces, nil, nil, nil) + if err != nil { + return err + } + + Ifaces = make(map[string][]osl.IfaceOption) + vxlanIfaceOption := make([]osl.IfaceOption, 1) + vxlanIfaceOption = append(vxlanIfaceOption, sbox.InterfaceOptions().Master(brName)) + Ifaces[vxlanName+"+vxlan"] = vxlanIfaceOption + return sbox.Restore(Ifaces, nil, nil, nil) +} + +func (n *network) setupSubnetSandbox(s *subnet, brName, vxlanName string) error { + + if hostMode { + // Try to delete stale bridge interface if it exists + if err := deleteInterface(brName); err != nil { + deleteInterfaceBySubnet(n.getBridgeNamePrefix(s), s) + } + // Try to delete the vxlan interface by vni if already present + deleteVxlanByVNI("", n.vxlanID(s)) + + if err := checkOverlap(s.subnetIP); err != nil { + return err + } + } + + if !hostMode { + // Try to find this subnet's vni is being used in some + // other namespace by looking at vniTbl that we just + // populated in the once init. If a hit is found then + // it must a stale namespace from previous + // life. Destroy it completely and reclaim resourced. + networkMu.Lock() + path, ok := vniTbl[n.vxlanID(s)] + networkMu.Unlock() + + if ok { + deleteVxlanByVNI(path, n.vxlanID(s)) + if err := syscall.Unmount(path, syscall.MNT_FORCE); err != nil { + logrus.Errorf("unmount of %s failed: %v", path, err) + } + os.Remove(path) + + networkMu.Lock() + delete(vniTbl, n.vxlanID(s)) + networkMu.Unlock() + } + } + + // create a bridge and vxlan device for this subnet and move it to the sandbox + sbox := n.sandbox() + + if err := sbox.AddInterface(brName, "br", + sbox.InterfaceOptions().Address(s.gwIP), + sbox.InterfaceOptions().Bridge(true)); err != nil { + return fmt.Errorf("bridge creation in sandbox failed for subnet %q: %v", s.subnetIP.String(), err) + } + + err := createVxlan(vxlanName, n.vxlanID(s), n.maxMTU()) + if err != nil { + return err + } + + if err := sbox.AddInterface(vxlanName, "vxlan", + sbox.InterfaceOptions().Master(brName)); err != nil { + return fmt.Errorf("vxlan interface creation failed for subnet %q: %v", s.subnetIP.String(), err) + } + + if !hostMode { + var name string + for _, i := range sbox.Info().Interfaces() { + if i.Bridge() { + name = i.DstName() + } + } + cmd := &exec.Cmd{ + Path: reexec.Self(), + Args: []string{"set-default-vlan", sbox.Key(), name}, + Stdout: os.Stdout, + Stderr: os.Stderr, + } + if err := cmd.Run(); err != nil { + // not a fatal error + logrus.Errorf("reexec to set bridge default vlan failed %v", err) + } + } + + if hostMode { + if err := addFilters(n.id[:12], brName); err != nil { + return err + } + } + + return nil +} + +func (n *network) initSubnetSandbox(s *subnet, restore bool) error { + brName := n.generateBridgeName(s) + vxlanName := n.generateVxlanName(s) + + if restore { + if err := n.restoreSubnetSandbox(s, brName, vxlanName); err != nil { + return err + } + } else { + if err := n.setupSubnetSandbox(s, brName, vxlanName); err != nil { + return err + } + } + + n.Lock() + s.vxlanName = vxlanName + s.brName = brName + n.Unlock() + + return nil +} + +func (n *network) cleanupStaleSandboxes() { + filepath.Walk(filepath.Dir(osl.GenerateKey("walk")), + func(path string, info os.FileInfo, err error) error { + _, fname := filepath.Split(path) + + pList := strings.Split(fname, "-") + if len(pList) <= 1 { + return nil + } + + pattern := pList[1] + if strings.Contains(n.id, pattern) { + // Delete all vnis + deleteVxlanByVNI(path, 0) + syscall.Unmount(path, syscall.MNT_DETACH) + os.Remove(path) + + // Now that we have destroyed this + // sandbox, remove all references to + // it in vniTbl so that we don't + // inadvertently destroy the sandbox + // created in this life. + networkMu.Lock() + for vni, tblPath := range vniTbl { + if tblPath == path { + delete(vniTbl, vni) + } + } + networkMu.Unlock() + } + + return nil + }) +} + +func (n *network) initSandbox(restore bool) error { + n.Lock() + n.initEpoch++ + n.Unlock() + + networkOnce.Do(networkOnceInit) + + if !restore { + if hostMode { + if err := addNetworkChain(n.id[:12]); err != nil { + return err + } + } + + // If there are any stale sandboxes related to this network + // from previous daemon life clean it up here + n.cleanupStaleSandboxes() + } + + // In the restore case network sandbox already exist; but we don't know + // what epoch number it was created with. It has to be retrieved by + // searching the net namespaces. + var key string + if restore { + key = osl.GenerateKey("-" + n.id) + } else { + key = osl.GenerateKey(fmt.Sprintf("%d-", n.initEpoch) + n.id) + } + + sbox, err := osl.NewSandbox(key, !hostMode, restore) + if err != nil { + return fmt.Errorf("could not get network sandbox (oper %t): %v", restore, err) + } + + // this is needed to let the peerAdd configure the sandbox + n.setSandbox(sbox) + + if !restore { + // Initialize the sandbox with all the peers previously received from networkdb + n.driver.initSandboxPeerDB(n.id) + } + + // If we are in swarm mode, we don't need anymore the watchMiss routine. + // This will save 1 thread and 1 netlink socket per network + if !n.driver.isSerfAlive() { + return nil + } + + var nlSock *nl.NetlinkSocket + sbox.InvokeFunc(func() { + nlSock, err = nl.Subscribe(syscall.NETLINK_ROUTE, syscall.RTNLGRP_NEIGH) + if err != nil { + return + } + // set the receive timeout to not remain stuck on the RecvFrom if the fd gets closed + tv := syscall.NsecToTimeval(soTimeout.Nanoseconds()) + err = nlSock.SetReceiveTimeout(&tv) + }) + n.setNetlinkSocket(nlSock) + + if err == nil { + go n.watchMiss(nlSock, key) + } else { + logrus.Errorf("failed to subscribe to neighbor group netlink messages for overlay network %s in sbox %s: %v", + n.id, sbox.Key(), err) + } + + return nil +} + +func (n *network) watchMiss(nlSock *nl.NetlinkSocket, nsPath string) { + // With the new version of the netlink library the deserialize function makes + // requests about the interface of the netlink message. This can succeed only + // if this go routine is in the target namespace. For this reason following we + // lock the thread on that namespace + runtime.LockOSThread() + defer runtime.UnlockOSThread() + newNs, err := netns.GetFromPath(nsPath) + if err != nil { + logrus.WithError(err).Errorf("failed to get the namespace %s", nsPath) + return + } + defer newNs.Close() + if err = netns.Set(newNs); err != nil { + logrus.WithError(err).Errorf("failed to enter the namespace %s", nsPath) + return + } + for { + msgs, err := nlSock.Receive() + if err != nil { + n.Lock() + nlFd := nlSock.GetFd() + n.Unlock() + if nlFd == -1 { + // The netlink socket got closed, simply exit to not leak this goroutine + return + } + // When the receive timeout expires the receive will return EAGAIN + if err == syscall.EAGAIN { + // we continue here to avoid spam for timeouts + continue + } + logrus.Errorf("Failed to receive from netlink: %v ", err) + continue + } + + for _, msg := range msgs { + if msg.Header.Type != syscall.RTM_GETNEIGH && msg.Header.Type != syscall.RTM_NEWNEIGH { + continue + } + + neigh, err := netlink.NeighDeserialize(msg.Data) + if err != nil { + logrus.Errorf("Failed to deserialize netlink ndmsg: %v", err) + continue + } + + var ( + ip net.IP + mac net.HardwareAddr + l2Miss, l3Miss bool + ) + if neigh.IP.To4() != nil { + ip = neigh.IP + l3Miss = true + } else if neigh.HardwareAddr != nil { + mac = []byte(neigh.HardwareAddr) + ip = net.IP(mac[2:]) + l2Miss = true + } else { + continue + } + + // Not any of the network's subnets. Ignore. + if !n.contains(ip) { + continue + } + + if neigh.State&(netlink.NUD_STALE|netlink.NUD_INCOMPLETE) == 0 { + continue + } + + logrus.Debugf("miss notification: dest IP %v, dest MAC %v", ip, mac) + mac, IPmask, vtep, err := n.driver.resolvePeer(n.id, ip) + if err != nil { + logrus.Errorf("could not resolve peer %q: %v", ip, err) + continue + } + n.driver.peerAdd(n.id, "dummy", ip, IPmask, mac, vtep, l2Miss, l3Miss, false) + } + } +} + +// Restore a network from the store to the driver if it is present. +// Must be called with the driver locked! +func (d *driver) restoreNetworkFromStore(nid string) *network { + n := d.getNetworkFromStore(nid) + if n != nil { + n.driver = d + n.endpoints = endpointTable{} + n.once = &sync.Once{} + d.networks[nid] = n + } + return n +} + +func (d *driver) network(nid string) *network { + d.Lock() + defer d.Unlock() + n, ok := d.networks[nid] + if !ok { + n = d.restoreNetworkFromStore(nid) + } + + return n +} + +func (d *driver) getNetworkFromStore(nid string) *network { + if d.store == nil { + return nil + } + + n := &network{id: nid} + if err := d.store.GetObject(datastore.Key(n.Key()...), n); err != nil { + return nil + } + + return n +} + +func (n *network) sandbox() osl.Sandbox { + n.Lock() + defer n.Unlock() + + return n.sbox +} + +func (n *network) setSandbox(sbox osl.Sandbox) { + n.Lock() + n.sbox = sbox + n.Unlock() +} + +func (n *network) setNetlinkSocket(nlSk *nl.NetlinkSocket) { + n.Lock() + n.nlSocket = nlSk + n.Unlock() +} + +func (n *network) vxlanID(s *subnet) uint32 { + n.Lock() + defer n.Unlock() + + return s.vni +} + +func (n *network) setVxlanID(s *subnet, vni uint32) { + n.Lock() + s.vni = vni + n.Unlock() +} + +func (n *network) Key() []string { + return []string{"overlay", "network", n.id} +} + +func (n *network) KeyPrefix() []string { + return []string{"overlay", "network"} +} + +func (n *network) Value() []byte { + m := map[string]interface{}{} + + netJSON := []*subnetJSON{} + + for _, s := range n.subnets { + sj := &subnetJSON{ + SubnetIP: s.subnetIP.String(), + GwIP: s.gwIP.String(), + Vni: s.vni, + } + netJSON = append(netJSON, sj) + } + + m["secure"] = n.secure + m["subnets"] = netJSON + m["mtu"] = n.mtu + b, err := json.Marshal(m) + if err != nil { + return []byte{} + } + + return b +} + +func (n *network) Index() uint64 { + return n.dbIndex +} + +func (n *network) SetIndex(index uint64) { + n.dbIndex = index + n.dbExists = true +} + +func (n *network) Exists() bool { + return n.dbExists +} + +func (n *network) Skip() bool { + return false +} + +func (n *network) SetValue(value []byte) error { + var ( + m map[string]interface{} + newNet bool + isMap = true + netJSON = []*subnetJSON{} + ) + + if err := json.Unmarshal(value, &m); err != nil { + err := json.Unmarshal(value, &netJSON) + if err != nil { + return err + } + isMap = false + } + + if len(n.subnets) == 0 { + newNet = true + } + + if isMap { + if val, ok := m["secure"]; ok { + n.secure = val.(bool) + } + if val, ok := m["mtu"]; ok { + n.mtu = int(val.(float64)) + } + bytes, err := json.Marshal(m["subnets"]) + if err != nil { + return err + } + if err := json.Unmarshal(bytes, &netJSON); err != nil { + return err + } + } + + for _, sj := range netJSON { + subnetIPstr := sj.SubnetIP + gwIPstr := sj.GwIP + vni := sj.Vni + + subnetIP, _ := types.ParseCIDR(subnetIPstr) + gwIP, _ := types.ParseCIDR(gwIPstr) + + if newNet { + s := &subnet{ + subnetIP: subnetIP, + gwIP: gwIP, + vni: vni, + once: &sync.Once{}, + } + n.subnets = append(n.subnets, s) + } else { + sNet := n.getMatchingSubnet(subnetIP) + if sNet != nil { + sNet.vni = vni + } + } + } + return nil +} + +func (n *network) DataScope() string { + return datastore.GlobalScope +} + +func (n *network) writeToStore() error { + if n.driver.store == nil { + return nil + } + + return n.driver.store.PutObjectAtomic(n) +} + +func (n *network) releaseVxlanID() ([]uint32, error) { + if len(n.subnets) == 0 { + return nil, nil + } + + if n.driver.store != nil { + if err := n.driver.store.DeleteObjectAtomic(n); err != nil { + if err == datastore.ErrKeyModified || err == datastore.ErrKeyNotFound { + // In both the above cases we can safely assume that the key has been removed by some other + // instance and so simply get out of here + return nil, nil + } + + return nil, fmt.Errorf("failed to delete network to vxlan id map: %v", err) + } + } + var vnis []uint32 + for _, s := range n.subnets { + if n.driver.vxlanIdm != nil { + vni := n.vxlanID(s) + vnis = append(vnis, vni) + n.driver.vxlanIdm.Release(uint64(vni)) + } + + n.setVxlanID(s, 0) + } + + return vnis, nil +} + +func (n *network) obtainVxlanID(s *subnet) error { + //return if the subnet already has a vxlan id assigned + if s.vni != 0 { + return nil + } + + if n.driver.store == nil { + return fmt.Errorf("no valid vxlan id and no datastore configured, cannot obtain vxlan id") + } + + for { + if err := n.driver.store.GetObject(datastore.Key(n.Key()...), n); err != nil { + return fmt.Errorf("getting network %q from datastore failed %v", n.id, err) + } + + if s.vni == 0 { + vxlanID, err := n.driver.vxlanIdm.GetID(true) + if err != nil { + return fmt.Errorf("failed to allocate vxlan id: %v", err) + } + + n.setVxlanID(s, uint32(vxlanID)) + if err := n.writeToStore(); err != nil { + n.driver.vxlanIdm.Release(uint64(n.vxlanID(s))) + n.setVxlanID(s, 0) + if err == datastore.ErrKeyModified { + continue + } + return fmt.Errorf("network %q failed to update data store: %v", n.id, err) + } + return nil + } + return nil + } +} + +// contains return true if the passed ip belongs to one the network's +// subnets +func (n *network) contains(ip net.IP) bool { + for _, s := range n.subnets { + if s.subnetIP.Contains(ip) { + return true + } + } + + return false +} + +// getSubnetforIP returns the subnet to which the given IP belongs +func (n *network) getSubnetforIP(ip *net.IPNet) *subnet { + for _, s := range n.subnets { + // first check if the mask lengths are the same + i, _ := s.subnetIP.Mask.Size() + j, _ := ip.Mask.Size() + if i != j { + continue + } + if s.subnetIP.Contains(ip.IP) { + return s + } + } + return nil +} + +// getMatchingSubnet return the network's subnet that matches the input +func (n *network) getMatchingSubnet(ip *net.IPNet) *subnet { + if ip == nil { + return nil + } + for _, s := range n.subnets { + // first check if the mask lengths are the same + i, _ := s.subnetIP.Mask.Size() + j, _ := ip.Mask.Size() + if i != j { + continue + } + if s.subnetIP.IP.Equal(ip.IP) { + return s + } + } + return nil +} diff --git a/vendor/github.com/docker/libnetwork/drivers/overlay/ov_serf.go b/vendor/github.com/docker/libnetwork/drivers/overlay/ov_serf.go new file mode 100644 index 0000000000..f644799afd --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/overlay/ov_serf.go @@ -0,0 +1,229 @@ +package overlay + +import ( + "fmt" + "net" + "strings" + "time" + + "github.com/hashicorp/serf/serf" + "github.com/sirupsen/logrus" +) + +type ovNotify struct { + action string + ep *endpoint + nw *network +} + +type logWriter struct{} + +func (l *logWriter) Write(p []byte) (int, error) { + str := string(p) + + switch { + case strings.Contains(str, "[WARN]"): + logrus.Warn(str) + case strings.Contains(str, "[DEBUG]"): + logrus.Debug(str) + case strings.Contains(str, "[INFO]"): + logrus.Info(str) + case strings.Contains(str, "[ERR]"): + logrus.Error(str) + } + + return len(p), nil +} + +func (d *driver) serfInit() error { + var err error + + config := serf.DefaultConfig() + config.Init() + config.MemberlistConfig.BindAddr = d.advertiseAddress + + d.eventCh = make(chan serf.Event, 4) + config.EventCh = d.eventCh + config.UserCoalescePeriod = 1 * time.Second + config.UserQuiescentPeriod = 50 * time.Millisecond + + config.LogOutput = &logWriter{} + config.MemberlistConfig.LogOutput = config.LogOutput + + s, err := serf.Create(config) + if err != nil { + return fmt.Errorf("failed to create cluster node: %v", err) + } + defer func() { + if err != nil { + s.Shutdown() + } + }() + + d.serfInstance = s + + d.notifyCh = make(chan ovNotify) + d.exitCh = make(chan chan struct{}) + + go d.startSerfLoop(d.eventCh, d.notifyCh, d.exitCh) + return nil +} + +func (d *driver) serfJoin(neighIP string) error { + if neighIP == "" { + return fmt.Errorf("no neighbor to join") + } + if _, err := d.serfInstance.Join([]string{neighIP}, true); err != nil { + return fmt.Errorf("Failed to join the cluster at neigh IP %s: %v", + neighIP, err) + } + return nil +} + +func (d *driver) notifyEvent(event ovNotify) { + ep := event.ep + + ePayload := fmt.Sprintf("%s %s %s %s", event.action, ep.addr.IP.String(), + net.IP(ep.addr.Mask).String(), ep.mac.String()) + eName := fmt.Sprintf("jl %s %s %s", d.serfInstance.LocalMember().Addr.String(), + event.nw.id, ep.id) + + if err := d.serfInstance.UserEvent(eName, []byte(ePayload), true); err != nil { + logrus.Errorf("Sending user event failed: %v\n", err) + } +} + +func (d *driver) processEvent(u serf.UserEvent) { + logrus.Debugf("Received user event name:%s, payload:%s LTime:%d \n", u.Name, + string(u.Payload), uint64(u.LTime)) + + var dummy, action, vtepStr, nid, eid, ipStr, maskStr, macStr string + if _, err := fmt.Sscan(u.Name, &dummy, &vtepStr, &nid, &eid); err != nil { + fmt.Printf("Failed to scan name string: %v\n", err) + } + + if _, err := fmt.Sscan(string(u.Payload), &action, + &ipStr, &maskStr, &macStr); err != nil { + fmt.Printf("Failed to scan value string: %v\n", err) + } + + logrus.Debugf("Parsed data = %s/%s/%s/%s/%s/%s\n", nid, eid, vtepStr, ipStr, maskStr, macStr) + + mac, err := net.ParseMAC(macStr) + if err != nil { + logrus.Errorf("Failed to parse mac: %v\n", err) + } + + if d.serfInstance.LocalMember().Addr.String() == vtepStr { + return + } + + switch action { + case "join": + d.peerAdd(nid, eid, net.ParseIP(ipStr), net.IPMask(net.ParseIP(maskStr).To4()), mac, net.ParseIP(vtepStr), false, false, false) + case "leave": + d.peerDelete(nid, eid, net.ParseIP(ipStr), net.IPMask(net.ParseIP(maskStr).To4()), mac, net.ParseIP(vtepStr), false) + } +} + +func (d *driver) processQuery(q *serf.Query) { + logrus.Debugf("Received query name:%s, payload:%s\n", q.Name, + string(q.Payload)) + + var nid, ipStr string + if _, err := fmt.Sscan(string(q.Payload), &nid, &ipStr); err != nil { + fmt.Printf("Failed to scan query payload string: %v\n", err) + } + + pKey, pEntry, err := d.peerDbSearch(nid, net.ParseIP(ipStr)) + if err != nil { + return + } + + logrus.Debugf("Sending peer query resp mac %v, mask %s, vtep %s", pKey.peerMac, net.IP(pEntry.peerIPMask).String(), pEntry.vtep) + q.Respond([]byte(fmt.Sprintf("%s %s %s", pKey.peerMac.String(), net.IP(pEntry.peerIPMask).String(), pEntry.vtep.String()))) +} + +func (d *driver) resolvePeer(nid string, peerIP net.IP) (net.HardwareAddr, net.IPMask, net.IP, error) { + if d.serfInstance == nil { + return nil, nil, nil, fmt.Errorf("could not resolve peer: serf instance not initialized") + } + + qPayload := fmt.Sprintf("%s %s", string(nid), peerIP.String()) + resp, err := d.serfInstance.Query("peerlookup", []byte(qPayload), nil) + if err != nil { + return nil, nil, nil, fmt.Errorf("resolving peer by querying the cluster failed: %v", err) + } + + respCh := resp.ResponseCh() + select { + case r := <-respCh: + var macStr, maskStr, vtepStr string + if _, err := fmt.Sscan(string(r.Payload), &macStr, &maskStr, &vtepStr); err != nil { + return nil, nil, nil, fmt.Errorf("bad response %q for the resolve query: %v", string(r.Payload), err) + } + + mac, err := net.ParseMAC(macStr) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed to parse mac: %v", err) + } + + logrus.Debugf("Received peer query response, mac %s, vtep %s, mask %s", macStr, vtepStr, maskStr) + return mac, net.IPMask(net.ParseIP(maskStr).To4()), net.ParseIP(vtepStr), nil + + case <-time.After(time.Second): + return nil, nil, nil, fmt.Errorf("timed out resolving peer by querying the cluster") + } +} + +func (d *driver) startSerfLoop(eventCh chan serf.Event, notifyCh chan ovNotify, + exitCh chan chan struct{}) { + + for { + select { + case notify, ok := <-notifyCh: + if !ok { + break + } + + d.notifyEvent(notify) + case ch, ok := <-exitCh: + if !ok { + break + } + + if err := d.serfInstance.Leave(); err != nil { + logrus.Errorf("failed leaving the cluster: %v\n", err) + } + + d.serfInstance.Shutdown() + close(ch) + return + case e, ok := <-eventCh: + if !ok { + break + } + + if e.EventType() == serf.EventQuery { + d.processQuery(e.(*serf.Query)) + break + } + + u, ok := e.(serf.UserEvent) + if !ok { + break + } + d.processEvent(u) + } + } +} + +func (d *driver) isSerfAlive() bool { + d.Lock() + serfInstance := d.serfInstance + d.Unlock() + if serfInstance == nil || serfInstance.State() != serf.SerfAlive { + return false + } + return true +} diff --git a/vendor/github.com/docker/libnetwork/drivers/overlay/ov_utils.go b/vendor/github.com/docker/libnetwork/drivers/overlay/ov_utils.go new file mode 100644 index 0000000000..27f57c1fe2 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/overlay/ov_utils.go @@ -0,0 +1,161 @@ +package overlay + +import ( + "fmt" + "strings" + "syscall" + + "github.com/docker/libnetwork/netutils" + "github.com/docker/libnetwork/ns" + "github.com/docker/libnetwork/osl" + "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" + "github.com/vishvananda/netns" +) + +var soTimeout = ns.NetlinkSocketsTimeout + +func validateID(nid, eid string) error { + if nid == "" { + return fmt.Errorf("invalid network id") + } + + if eid == "" { + return fmt.Errorf("invalid endpoint id") + } + + return nil +} + +func createVethPair() (string, string, error) { + defer osl.InitOSContext()() + nlh := ns.NlHandle() + + // Generate a name for what will be the host side pipe interface + name1, err := netutils.GenerateIfaceName(nlh, vethPrefix, vethLen) + if err != nil { + return "", "", fmt.Errorf("error generating veth name1: %v", err) + } + + // Generate a name for what will be the sandbox side pipe interface + name2, err := netutils.GenerateIfaceName(nlh, vethPrefix, vethLen) + if err != nil { + return "", "", fmt.Errorf("error generating veth name2: %v", err) + } + + // Generate and add the interface pipe host <-> sandbox + veth := &netlink.Veth{ + LinkAttrs: netlink.LinkAttrs{Name: name1, TxQLen: 0}, + PeerName: name2} + if err := nlh.LinkAdd(veth); err != nil { + return "", "", fmt.Errorf("error creating veth pair: %v", err) + } + + return name1, name2, nil +} + +func createVxlan(name string, vni uint32, mtu int) error { + defer osl.InitOSContext()() + + vxlan := &netlink.Vxlan{ + LinkAttrs: netlink.LinkAttrs{Name: name, MTU: mtu}, + VxlanId: int(vni), + Learning: true, + Port: vxlanPort, + Proxy: true, + L3miss: true, + L2miss: true, + } + + if err := ns.NlHandle().LinkAdd(vxlan); err != nil { + return fmt.Errorf("error creating vxlan interface: %v", err) + } + + return nil +} + +func deleteInterfaceBySubnet(brPrefix string, s *subnet) error { + defer osl.InitOSContext()() + + nlh := ns.NlHandle() + links, err := nlh.LinkList() + if err != nil { + return fmt.Errorf("failed to list interfaces while deleting bridge interface by subnet: %v", err) + } + + for _, l := range links { + name := l.Attrs().Name + if _, ok := l.(*netlink.Bridge); ok && strings.HasPrefix(name, brPrefix) { + addrList, err := nlh.AddrList(l, netlink.FAMILY_V4) + if err != nil { + logrus.Errorf("error getting AddressList for bridge %s", name) + continue + } + for _, addr := range addrList { + if netutils.NetworkOverlaps(addr.IPNet, s.subnetIP) { + err = nlh.LinkDel(l) + if err != nil { + logrus.Errorf("error deleting bridge (%s) with subnet %v: %v", name, addr.IPNet, err) + } + } + } + } + } + return nil + +} + +func deleteInterface(name string) error { + defer osl.InitOSContext()() + + link, err := ns.NlHandle().LinkByName(name) + if err != nil { + return fmt.Errorf("failed to find interface with name %s: %v", name, err) + } + + if err := ns.NlHandle().LinkDel(link); err != nil { + return fmt.Errorf("error deleting interface with name %s: %v", name, err) + } + + return nil +} + +func deleteVxlanByVNI(path string, vni uint32) error { + defer osl.InitOSContext()() + + nlh := ns.NlHandle() + if path != "" { + ns, err := netns.GetFromPath(path) + if err != nil { + return fmt.Errorf("failed to get ns handle for %s: %v", path, err) + } + defer ns.Close() + + nlh, err = netlink.NewHandleAt(ns, syscall.NETLINK_ROUTE) + if err != nil { + return fmt.Errorf("failed to get netlink handle for ns %s: %v", path, err) + } + defer nlh.Delete() + err = nlh.SetSocketTimeout(soTimeout) + if err != nil { + logrus.Warnf("Failed to set the timeout on the netlink handle sockets for vxlan deletion: %v", err) + } + } + + links, err := nlh.LinkList() + if err != nil { + return fmt.Errorf("failed to list interfaces while deleting vxlan interface by vni: %v", err) + } + + for _, l := range links { + if l.Type() == "vxlan" && (vni == 0 || l.(*netlink.Vxlan).VxlanId == int(vni)) { + err = nlh.LinkDel(l) + if err != nil { + return fmt.Errorf("error deleting vxlan interface with id %d: %v", vni, err) + } + return nil + } + } + + return fmt.Errorf("could not find a vxlan interface to delete with id %d", vni) +} diff --git a/vendor/github.com/docker/libnetwork/drivers/overlay/overlay.go b/vendor/github.com/docker/libnetwork/drivers/overlay/overlay.go new file mode 100644 index 0000000000..f029c5cce4 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/overlay/overlay.go @@ -0,0 +1,407 @@ +package overlay + +//go:generate protoc -I.:../../Godeps/_workspace/src/github.com/gogo/protobuf --gogo_out=import_path=github.com/docker/libnetwork/drivers/overlay,Mgogoproto/gogo.proto=github.com/gogo/protobuf/gogoproto:. overlay.proto + +import ( + "context" + "fmt" + "net" + "sync" + + "github.com/docker/libnetwork/datastore" + "github.com/docker/libnetwork/discoverapi" + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/idm" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/osl" + "github.com/docker/libnetwork/types" + "github.com/hashicorp/serf/serf" + "github.com/sirupsen/logrus" +) + +const ( + networkType = "overlay" + vethPrefix = "veth" + vethLen = 7 + vxlanIDStart = 256 + vxlanIDEnd = (1 << 24) - 1 + vxlanPort = 4789 + vxlanEncap = 50 + secureOption = "encrypted" +) + +var initVxlanIdm = make(chan (bool), 1) + +type driver struct { + eventCh chan serf.Event + notifyCh chan ovNotify + exitCh chan chan struct{} + bindAddress string + advertiseAddress string + neighIP string + config map[string]interface{} + peerDb peerNetworkMap + secMap *encrMap + serfInstance *serf.Serf + networks networkTable + store datastore.DataStore + localStore datastore.DataStore + vxlanIdm *idm.Idm + initOS sync.Once + joinOnce sync.Once + localJoinOnce sync.Once + keys []*key + peerOpCh chan *peerOperation + peerOpCancel context.CancelFunc + sync.Mutex +} + +// Init registers a new instance of overlay driver +func Init(dc driverapi.DriverCallback, config map[string]interface{}) error { + c := driverapi.Capability{ + DataScope: datastore.GlobalScope, + ConnectivityScope: datastore.GlobalScope, + } + d := &driver{ + networks: networkTable{}, + peerDb: peerNetworkMap{ + mp: map[string]*peerMap{}, + }, + secMap: &encrMap{nodes: map[string][]*spi{}}, + config: config, + peerOpCh: make(chan *peerOperation), + } + + // Launch the go routine for processing peer operations + ctx, cancel := context.WithCancel(context.Background()) + d.peerOpCancel = cancel + go d.peerOpRoutine(ctx, d.peerOpCh) + + if data, ok := config[netlabel.GlobalKVClient]; ok { + var err error + dsc, ok := data.(discoverapi.DatastoreConfigData) + if !ok { + return types.InternalErrorf("incorrect data in datastore configuration: %v", data) + } + d.store, err = datastore.NewDataStoreFromConfig(dsc) + if err != nil { + return types.InternalErrorf("failed to initialize data store: %v", err) + } + } + + if data, ok := config[netlabel.LocalKVClient]; ok { + var err error + dsc, ok := data.(discoverapi.DatastoreConfigData) + if !ok { + return types.InternalErrorf("incorrect data in datastore configuration: %v", data) + } + d.localStore, err = datastore.NewDataStoreFromConfig(dsc) + if err != nil { + return types.InternalErrorf("failed to initialize local data store: %v", err) + } + } + + if err := d.restoreEndpoints(); err != nil { + logrus.Warnf("Failure during overlay endpoints restore: %v", err) + } + + // If an error happened when the network join the sandbox during the endpoints restore + // we should reset it now along with the once variable, so that subsequent endpoint joins + // outside of the restore path can potentially fix the network join and succeed. + for nid, n := range d.networks { + if n.initErr != nil { + logrus.Infof("resetting init error and once variable for network %s after unsuccessful endpoint restore: %v", nid, n.initErr) + n.initErr = nil + n.once = &sync.Once{} + } + } + + return dc.RegisterDriver(networkType, d, c) +} + +// Endpoints are stored in the local store. Restore them and reconstruct the overlay sandbox +func (d *driver) restoreEndpoints() error { + if d.localStore == nil { + logrus.Warn("Cannot restore overlay endpoints because local datastore is missing") + return nil + } + kvol, err := d.localStore.List(datastore.Key(overlayEndpointPrefix), &endpoint{}) + if err != nil && err != datastore.ErrKeyNotFound { + return fmt.Errorf("failed to read overlay endpoint from store: %v", err) + } + + if err == datastore.ErrKeyNotFound { + return nil + } + for _, kvo := range kvol { + ep := kvo.(*endpoint) + n := d.network(ep.nid) + if n == nil { + logrus.Debugf("Network (%s) not found for restored endpoint (%s)", ep.nid[0:7], ep.id[0:7]) + logrus.Debugf("Deleting stale overlay endpoint (%s) from store", ep.id[0:7]) + if err := d.deleteEndpointFromStore(ep); err != nil { + logrus.Debugf("Failed to delete stale overlay endpoint (%s) from store", ep.id[0:7]) + } + continue + } + n.addEndpoint(ep) + + s := n.getSubnetforIP(ep.addr) + if s == nil { + return fmt.Errorf("could not find subnet for endpoint %s", ep.id) + } + + if err := n.joinSandbox(true); err != nil { + return fmt.Errorf("restore network sandbox failed: %v", err) + } + + if err := n.joinSubnetSandbox(s, true); err != nil { + return fmt.Errorf("restore subnet sandbox failed for %q: %v", s.subnetIP.String(), err) + } + + Ifaces := make(map[string][]osl.IfaceOption) + vethIfaceOption := make([]osl.IfaceOption, 1) + vethIfaceOption = append(vethIfaceOption, n.sbox.InterfaceOptions().Master(s.brName)) + Ifaces["veth+veth"] = vethIfaceOption + + err := n.sbox.Restore(Ifaces, nil, nil, nil) + if err != nil { + return fmt.Errorf("failed to restore overlay sandbox: %v", err) + } + + n.incEndpointCount() + d.peerAdd(ep.nid, ep.id, ep.addr.IP, ep.addr.Mask, ep.mac, net.ParseIP(d.advertiseAddress), false, false, true) + } + return nil +} + +// Fini cleans up the driver resources +func Fini(drv driverapi.Driver) { + d := drv.(*driver) + + // Notify the peer go routine to return + if d.peerOpCancel != nil { + d.peerOpCancel() + } + + if d.exitCh != nil { + waitCh := make(chan struct{}) + + d.exitCh <- waitCh + + <-waitCh + } +} + +func (d *driver) configure() error { + + // Apply OS specific kernel configs if needed + d.initOS.Do(applyOStweaks) + + if d.store == nil { + return nil + } + + if d.vxlanIdm == nil { + return d.initializeVxlanIdm() + } + + return nil +} + +func (d *driver) initializeVxlanIdm() error { + var err error + + initVxlanIdm <- true + defer func() { <-initVxlanIdm }() + + if d.vxlanIdm != nil { + return nil + } + + d.vxlanIdm, err = idm.New(d.store, "vxlan-id", vxlanIDStart, vxlanIDEnd) + if err != nil { + return fmt.Errorf("failed to initialize vxlan id manager: %v", err) + } + + return nil +} + +func (d *driver) Type() string { + return networkType +} + +func (d *driver) IsBuiltIn() bool { + return true +} + +func validateSelf(node string) error { + advIP := net.ParseIP(node) + if advIP == nil { + return fmt.Errorf("invalid self address (%s)", node) + } + + addrs, err := net.InterfaceAddrs() + if err != nil { + return fmt.Errorf("Unable to get interface addresses %v", err) + } + for _, addr := range addrs { + ip, _, err := net.ParseCIDR(addr.String()) + if err == nil && ip.Equal(advIP) { + return nil + } + } + return fmt.Errorf("Multi-Host overlay networking requires cluster-advertise(%s) to be configured with a local ip-address that is reachable within the cluster", advIP.String()) +} + +func (d *driver) nodeJoin(advertiseAddress, bindAddress string, self bool) { + if self && !d.isSerfAlive() { + d.Lock() + d.advertiseAddress = advertiseAddress + d.bindAddress = bindAddress + d.Unlock() + + // If containers are already running on this network update the + // advertise address in the peerDB + d.localJoinOnce.Do(func() { + d.peerDBUpdateSelf() + }) + + // If there is no cluster store there is no need to start serf. + if d.store != nil { + if err := validateSelf(advertiseAddress); err != nil { + logrus.Warn(err.Error()) + } + err := d.serfInit() + if err != nil { + logrus.Errorf("initializing serf instance failed: %v", err) + d.Lock() + d.advertiseAddress = "" + d.bindAddress = "" + d.Unlock() + return + } + } + } + + d.Lock() + if !self { + d.neighIP = advertiseAddress + } + neighIP := d.neighIP + d.Unlock() + + if d.serfInstance != nil && neighIP != "" { + var err error + d.joinOnce.Do(func() { + err = d.serfJoin(neighIP) + if err == nil { + d.pushLocalDb() + } + }) + if err != nil { + logrus.Errorf("joining serf neighbor %s failed: %v", advertiseAddress, err) + d.Lock() + d.joinOnce = sync.Once{} + d.Unlock() + return + } + } +} + +func (d *driver) pushLocalEndpointEvent(action, nid, eid string) { + n := d.network(nid) + if n == nil { + logrus.Debugf("Error pushing local endpoint event for network %s", nid) + return + } + ep := n.endpoint(eid) + if ep == nil { + logrus.Debugf("Error pushing local endpoint event for ep %s / %s", nid, eid) + return + } + + if !d.isSerfAlive() { + return + } + d.notifyCh <- ovNotify{ + action: "join", + nw: n, + ep: ep, + } +} + +// DiscoverNew is a notification for a new discovery event, such as a new node joining a cluster +func (d *driver) DiscoverNew(dType discoverapi.DiscoveryType, data interface{}) error { + var err error + switch dType { + case discoverapi.NodeDiscovery: + nodeData, ok := data.(discoverapi.NodeDiscoveryData) + if !ok || nodeData.Address == "" { + return fmt.Errorf("invalid discovery data") + } + d.nodeJoin(nodeData.Address, nodeData.BindAddress, nodeData.Self) + case discoverapi.DatastoreConfig: + if d.store != nil { + return types.ForbiddenErrorf("cannot accept datastore configuration: Overlay driver has a datastore configured already") + } + dsc, ok := data.(discoverapi.DatastoreConfigData) + if !ok { + return types.InternalErrorf("incorrect data in datastore configuration: %v", data) + } + d.store, err = datastore.NewDataStoreFromConfig(dsc) + if err != nil { + return types.InternalErrorf("failed to initialize data store: %v", err) + } + case discoverapi.EncryptionKeysConfig: + encrData, ok := data.(discoverapi.DriverEncryptionConfig) + if !ok { + return fmt.Errorf("invalid encryption key notification data") + } + keys := make([]*key, 0, len(encrData.Keys)) + for i := 0; i < len(encrData.Keys); i++ { + k := &key{ + value: encrData.Keys[i], + tag: uint32(encrData.Tags[i]), + } + keys = append(keys, k) + } + if err := d.setKeys(keys); err != nil { + logrus.Warn(err) + } + case discoverapi.EncryptionKeysUpdate: + var newKey, delKey, priKey *key + encrData, ok := data.(discoverapi.DriverEncryptionUpdate) + if !ok { + return fmt.Errorf("invalid encryption key notification data") + } + if encrData.Key != nil { + newKey = &key{ + value: encrData.Key, + tag: uint32(encrData.Tag), + } + } + if encrData.Primary != nil { + priKey = &key{ + value: encrData.Primary, + tag: uint32(encrData.PrimaryTag), + } + } + if encrData.Prune != nil { + delKey = &key{ + value: encrData.Prune, + tag: uint32(encrData.PruneTag), + } + } + if err := d.updateKeys(newKey, priKey, delKey); err != nil { + logrus.Warn(err) + } + default: + } + return nil +} + +// DiscoverDelete is a notification for a discovery delete event, such as a node leaving a cluster +func (d *driver) DiscoverDelete(dType discoverapi.DiscoveryType, data interface{}) error { + return nil +} diff --git a/vendor/github.com/docker/libnetwork/drivers/overlay/overlay.pb.go b/vendor/github.com/docker/libnetwork/drivers/overlay/overlay.pb.go new file mode 100644 index 0000000000..cfa0eeeae4 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/overlay/overlay.pb.go @@ -0,0 +1,468 @@ +// Code generated by protoc-gen-gogo. +// source: overlay.proto +// DO NOT EDIT! + +/* + Package overlay is a generated protocol buffer package. + + It is generated from these files: + overlay.proto + + It has these top-level messages: + PeerRecord +*/ +package overlay + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import strings "strings" +import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" +import sort "sort" +import strconv "strconv" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +// PeerRecord defines the information corresponding to a peer +// container in the overlay network. +type PeerRecord struct { + // Endpoint IP is the IP of the container attachment on the + // given overlay network. + EndpointIP string `protobuf:"bytes,1,opt,name=endpoint_ip,json=endpointIp,proto3" json:"endpoint_ip,omitempty"` + // Endpoint MAC is the mac address of the container attachment + // on the given overlay network. + EndpointMAC string `protobuf:"bytes,2,opt,name=endpoint_mac,json=endpointMac,proto3" json:"endpoint_mac,omitempty"` + // Tunnel Endpoint IP defines the host IP for the host in + // which this container is running and can be reached by + // building a tunnel to that host IP. + TunnelEndpointIP string `protobuf:"bytes,3,opt,name=tunnel_endpoint_ip,json=tunnelEndpointIp,proto3" json:"tunnel_endpoint_ip,omitempty"` +} + +func (m *PeerRecord) Reset() { *m = PeerRecord{} } +func (*PeerRecord) ProtoMessage() {} +func (*PeerRecord) Descriptor() ([]byte, []int) { return fileDescriptorOverlay, []int{0} } + +func init() { + proto.RegisterType((*PeerRecord)(nil), "overlay.PeerRecord") +} +func (this *PeerRecord) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&overlay.PeerRecord{") + s = append(s, "EndpointIP: "+fmt.Sprintf("%#v", this.EndpointIP)+",\n") + s = append(s, "EndpointMAC: "+fmt.Sprintf("%#v", this.EndpointMAC)+",\n") + s = append(s, "TunnelEndpointIP: "+fmt.Sprintf("%#v", this.TunnelEndpointIP)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringOverlay(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func extensionToGoStringOverlay(e map[int32]github_com_gogo_protobuf_proto.Extension) string { + if e == nil { + return "nil" + } + s := "map[int32]proto.Extension{" + keys := make([]int, 0, len(e)) + for k := range e { + keys = append(keys, int(k)) + } + sort.Ints(keys) + ss := []string{} + for _, k := range keys { + ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString()) + } + s += strings.Join(ss, ",") + "}" + return s +} +func (m *PeerRecord) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PeerRecord) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.EndpointIP) > 0 { + data[i] = 0xa + i++ + i = encodeVarintOverlay(data, i, uint64(len(m.EndpointIP))) + i += copy(data[i:], m.EndpointIP) + } + if len(m.EndpointMAC) > 0 { + data[i] = 0x12 + i++ + i = encodeVarintOverlay(data, i, uint64(len(m.EndpointMAC))) + i += copy(data[i:], m.EndpointMAC) + } + if len(m.TunnelEndpointIP) > 0 { + data[i] = 0x1a + i++ + i = encodeVarintOverlay(data, i, uint64(len(m.TunnelEndpointIP))) + i += copy(data[i:], m.TunnelEndpointIP) + } + return i, nil +} + +func encodeFixed64Overlay(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Overlay(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintOverlay(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *PeerRecord) Size() (n int) { + var l int + _ = l + l = len(m.EndpointIP) + if l > 0 { + n += 1 + l + sovOverlay(uint64(l)) + } + l = len(m.EndpointMAC) + if l > 0 { + n += 1 + l + sovOverlay(uint64(l)) + } + l = len(m.TunnelEndpointIP) + if l > 0 { + n += 1 + l + sovOverlay(uint64(l)) + } + return n +} + +func sovOverlay(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozOverlay(x uint64) (n int) { + return sovOverlay(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *PeerRecord) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PeerRecord{`, + `EndpointIP:` + fmt.Sprintf("%v", this.EndpointIP) + `,`, + `EndpointMAC:` + fmt.Sprintf("%v", this.EndpointMAC) + `,`, + `TunnelEndpointIP:` + fmt.Sprintf("%v", this.TunnelEndpointIP) + `,`, + `}`, + }, "") + return s +} +func valueToStringOverlay(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *PeerRecord) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOverlay + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PeerRecord: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PeerRecord: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EndpointIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOverlay + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOverlay + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EndpointIP = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EndpointMAC", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOverlay + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOverlay + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EndpointMAC = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TunnelEndpointIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOverlay + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOverlay + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TunnelEndpointIP = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOverlay(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOverlay + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipOverlay(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowOverlay + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowOverlay + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowOverlay + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthOverlay + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowOverlay + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipOverlay(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthOverlay = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowOverlay = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorOverlay = []byte{ + // 195 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0xcd, 0x2f, 0x4b, 0x2d, + 0xca, 0x49, 0xac, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x87, 0x72, 0xa5, 0x44, 0xd2, + 0xf3, 0xd3, 0xf3, 0xc1, 0x62, 0xfa, 0x20, 0x16, 0x44, 0x5a, 0x69, 0x2b, 0x23, 0x17, 0x57, 0x40, + 0x6a, 0x6a, 0x51, 0x50, 0x6a, 0x72, 0x7e, 0x51, 0x8a, 0x90, 0x3e, 0x17, 0x77, 0x6a, 0x5e, 0x4a, + 0x41, 0x7e, 0x66, 0x5e, 0x49, 0x7c, 0x66, 0x81, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0xa7, 0x13, 0xdf, + 0xa3, 0x7b, 0xf2, 0x5c, 0xae, 0x50, 0x61, 0xcf, 0x80, 0x20, 0x2e, 0x98, 0x12, 0xcf, 0x02, 0x21, + 0x23, 0x2e, 0x1e, 0xb8, 0x86, 0xdc, 0xc4, 0x64, 0x09, 0x26, 0xb0, 0x0e, 0x7e, 0xa0, 0x0e, 0x6e, + 0x98, 0x0e, 0x5f, 0x47, 0xe7, 0x20, 0xb8, 0xa9, 0xbe, 0x89, 0xc9, 0x42, 0x4e, 0x5c, 0x42, 0x25, + 0xa5, 0x79, 0x79, 0xa9, 0x39, 0xf1, 0xc8, 0x76, 0x31, 0x83, 0x75, 0x8a, 0x00, 0x75, 0x0a, 0x84, + 0x80, 0x65, 0x91, 0x6c, 0x14, 0x28, 0x41, 0x15, 0x29, 0x70, 0x92, 0xb8, 0xf1, 0x50, 0x8e, 0xe1, + 0xc3, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x27, 0x80, 0xf8, 0x02, 0x10, 0x3f, 0x00, 0xe2, + 0x24, 0x36, 0xb0, 0xc7, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbf, 0xd7, 0x7d, 0x7d, 0x08, + 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/docker/libnetwork/drivers/overlay/peerdb.go b/vendor/github.com/docker/libnetwork/drivers/overlay/peerdb.go new file mode 100644 index 0000000000..bdd3cb12af --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/overlay/peerdb.go @@ -0,0 +1,525 @@ +package overlay + +import ( + "context" + "fmt" + "net" + "sync" + "syscall" + + "github.com/docker/libnetwork/common" + "github.com/docker/libnetwork/osl" + "github.com/sirupsen/logrus" +) + +const ovPeerTable = "overlay_peer_table" + +type peerKey struct { + peerIP net.IP + peerMac net.HardwareAddr +} + +type peerEntry struct { + eid string + vtep net.IP + peerIPMask net.IPMask + isLocal bool +} + +func (p *peerEntry) MarshalDB() peerEntryDB { + ones, bits := p.peerIPMask.Size() + return peerEntryDB{ + eid: p.eid, + vtep: p.vtep.String(), + peerIPMaskOnes: ones, + peerIPMaskBits: bits, + isLocal: p.isLocal, + } +} + +// This the structure saved into the set (SetMatrix), due to the implementation of it +// the value inserted in the set has to be Hashable so the []byte had to be converted into +// strings +type peerEntryDB struct { + eid string + vtep string + peerIPMaskOnes int + peerIPMaskBits int + isLocal bool +} + +func (p *peerEntryDB) UnMarshalDB() peerEntry { + return peerEntry{ + eid: p.eid, + vtep: net.ParseIP(p.vtep), + peerIPMask: net.CIDRMask(p.peerIPMaskOnes, p.peerIPMaskBits), + isLocal: p.isLocal, + } +} + +type peerMap struct { + // set of peerEntry, note they have to be objects and not pointers to maintain the proper equality checks + mp common.SetMatrix + sync.Mutex +} + +type peerNetworkMap struct { + // map with key peerKey + mp map[string]*peerMap + sync.Mutex +} + +func (pKey peerKey) String() string { + return fmt.Sprintf("%s %s", pKey.peerIP, pKey.peerMac) +} + +func (pKey *peerKey) Scan(state fmt.ScanState, verb rune) error { + ipB, err := state.Token(true, nil) + if err != nil { + return err + } + + pKey.peerIP = net.ParseIP(string(ipB)) + + macB, err := state.Token(true, nil) + if err != nil { + return err + } + + pKey.peerMac, err = net.ParseMAC(string(macB)) + return err +} + +func (d *driver) peerDbWalk(f func(string, *peerKey, *peerEntry) bool) error { + d.peerDb.Lock() + nids := []string{} + for nid := range d.peerDb.mp { + nids = append(nids, nid) + } + d.peerDb.Unlock() + + for _, nid := range nids { + d.peerDbNetworkWalk(nid, func(pKey *peerKey, pEntry *peerEntry) bool { + return f(nid, pKey, pEntry) + }) + } + return nil +} + +func (d *driver) peerDbNetworkWalk(nid string, f func(*peerKey, *peerEntry) bool) error { + d.peerDb.Lock() + pMap, ok := d.peerDb.mp[nid] + d.peerDb.Unlock() + + if !ok { + return nil + } + + mp := map[string]peerEntry{} + pMap.Lock() + for _, pKeyStr := range pMap.mp.Keys() { + entryDBList, ok := pMap.mp.Get(pKeyStr) + if ok { + peerEntryDB := entryDBList[0].(peerEntryDB) + mp[pKeyStr] = peerEntryDB.UnMarshalDB() + } + } + pMap.Unlock() + + for pKeyStr, pEntry := range mp { + var pKey peerKey + if _, err := fmt.Sscan(pKeyStr, &pKey); err != nil { + logrus.Warnf("Peer key scan on network %s failed: %v", nid, err) + } + if f(&pKey, &pEntry) { + return nil + } + } + + return nil +} + +func (d *driver) peerDbSearch(nid string, peerIP net.IP) (*peerKey, *peerEntry, error) { + var pKeyMatched *peerKey + var pEntryMatched *peerEntry + err := d.peerDbNetworkWalk(nid, func(pKey *peerKey, pEntry *peerEntry) bool { + if pKey.peerIP.Equal(peerIP) { + pKeyMatched = pKey + pEntryMatched = pEntry + return true + } + + return false + }) + + if err != nil { + return nil, nil, fmt.Errorf("peerdb search for peer ip %q failed: %v", peerIP, err) + } + + if pKeyMatched == nil || pEntryMatched == nil { + return nil, nil, fmt.Errorf("peer ip %q not found in peerdb", peerIP) + } + + return pKeyMatched, pEntryMatched, nil +} + +func (d *driver) peerDbAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask, + peerMac net.HardwareAddr, vtep net.IP, isLocal bool) (bool, int) { + + d.peerDb.Lock() + pMap, ok := d.peerDb.mp[nid] + if !ok { + d.peerDb.mp[nid] = &peerMap{ + mp: common.NewSetMatrix(), + } + + pMap = d.peerDb.mp[nid] + } + d.peerDb.Unlock() + + pKey := peerKey{ + peerIP: peerIP, + peerMac: peerMac, + } + + pEntry := peerEntry{ + eid: eid, + vtep: vtep, + peerIPMask: peerIPMask, + isLocal: isLocal, + } + + pMap.Lock() + defer pMap.Unlock() + b, i := pMap.mp.Insert(pKey.String(), pEntry.MarshalDB()) + if i != 1 { + // Transient case, there is more than one endpoint that is using the same IP,MAC pair + s, _ := pMap.mp.String(pKey.String()) + logrus.Warnf("peerDbAdd transient condition - Key:%s cardinality:%d db state:%s", pKey.String(), i, s) + } + return b, i +} + +func (d *driver) peerDbDelete(nid, eid string, peerIP net.IP, peerIPMask net.IPMask, + peerMac net.HardwareAddr, vtep net.IP, isLocal bool) (bool, int) { + + d.peerDb.Lock() + pMap, ok := d.peerDb.mp[nid] + if !ok { + d.peerDb.Unlock() + return false, 0 + } + d.peerDb.Unlock() + + pKey := peerKey{ + peerIP: peerIP, + peerMac: peerMac, + } + + pEntry := peerEntry{ + eid: eid, + vtep: vtep, + peerIPMask: peerIPMask, + isLocal: isLocal, + } + + pMap.Lock() + defer pMap.Unlock() + b, i := pMap.mp.Remove(pKey.String(), pEntry.MarshalDB()) + if i != 0 { + // Transient case, there is more than one endpoint that is using the same IP,MAC pair + s, _ := pMap.mp.String(pKey.String()) + logrus.Warnf("peerDbDelete transient condition - Key:%s cardinality:%d db state:%s", pKey.String(), i, s) + } + return b, i +} + +// The overlay uses a lazy initialization approach, this means that when a network is created +// and the driver registered the overlay does not allocate resources till the moment that a +// sandbox is actually created. +// At the moment of this call, that happens when a sandbox is initialized, is possible that +// networkDB has already delivered some events of peers already available on remote nodes, +// these peers are saved into the peerDB and this function is used to properly configure +// the network sandbox with all those peers that got previously notified. +// Note also that this method sends a single message on the channel and the go routine on the +// other side, will atomically loop on the whole table of peers and will program their state +// in one single atomic operation. This is fundamental to guarantee consistency, and avoid that +// new peerAdd or peerDelete gets reordered during the sandbox init. +func (d *driver) initSandboxPeerDB(nid string) { + d.peerInit(nid) +} + +type peerOperationType int32 + +const ( + peerOperationINIT peerOperationType = iota + peerOperationADD + peerOperationDELETE + peerOperationFLUSH +) + +type peerOperation struct { + opType peerOperationType + networkID string + endpointID string + peerIP net.IP + peerIPMask net.IPMask + peerMac net.HardwareAddr + vtepIP net.IP + l2Miss bool + l3Miss bool + localPeer bool + callerName string +} + +func (d *driver) peerOpRoutine(ctx context.Context, ch chan *peerOperation) { + var err error + for { + select { + case <-ctx.Done(): + return + case op := <-ch: + switch op.opType { + case peerOperationINIT: + err = d.peerInitOp(op.networkID) + case peerOperationADD: + err = d.peerAddOp(op.networkID, op.endpointID, op.peerIP, op.peerIPMask, op.peerMac, op.vtepIP, op.l2Miss, op.l3Miss, true, op.localPeer) + case peerOperationDELETE: + err = d.peerDeleteOp(op.networkID, op.endpointID, op.peerIP, op.peerIPMask, op.peerMac, op.vtepIP, op.localPeer) + case peerOperationFLUSH: + err = d.peerFlushOp(op.networkID) + } + if err != nil { + logrus.Warnf("Peer operation failed:%s op:%v", err, op) + } + } + } +} + +func (d *driver) peerInit(nid string) { + callerName := common.CallerName(1) + d.peerOpCh <- &peerOperation{ + opType: peerOperationINIT, + networkID: nid, + callerName: callerName, + } +} + +func (d *driver) peerInitOp(nid string) error { + return d.peerDbNetworkWalk(nid, func(pKey *peerKey, pEntry *peerEntry) bool { + // Local entries do not need to be added + if pEntry.isLocal { + return false + } + + d.peerAddOp(nid, pEntry.eid, pKey.peerIP, pEntry.peerIPMask, pKey.peerMac, pEntry.vtep, false, false, false, pEntry.isLocal) + // return false to loop on all entries + return false + }) +} + +func (d *driver) peerAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask, + peerMac net.HardwareAddr, vtep net.IP, l2Miss, l3Miss, localPeer bool) { + d.peerOpCh <- &peerOperation{ + opType: peerOperationADD, + networkID: nid, + endpointID: eid, + peerIP: peerIP, + peerIPMask: peerIPMask, + peerMac: peerMac, + vtepIP: vtep, + l2Miss: l2Miss, + l3Miss: l3Miss, + localPeer: localPeer, + callerName: common.CallerName(1), + } +} + +func (d *driver) peerAddOp(nid, eid string, peerIP net.IP, peerIPMask net.IPMask, + peerMac net.HardwareAddr, vtep net.IP, l2Miss, l3Miss, updateDB, localPeer bool) error { + + if err := validateID(nid, eid); err != nil { + return err + } + + var dbEntries int + var inserted bool + if updateDB { + inserted, dbEntries = d.peerDbAdd(nid, eid, peerIP, peerIPMask, peerMac, vtep, localPeer) + if !inserted { + logrus.Warnf("Entry already present in db: nid:%s eid:%s peerIP:%v peerMac:%v isLocal:%t vtep:%v", + nid, eid, peerIP, peerMac, localPeer, vtep) + } + } + + // Local peers do not need any further configuration + if localPeer { + return nil + } + + n := d.network(nid) + if n == nil { + return nil + } + + sbox := n.sandbox() + if sbox == nil { + // We are hitting this case for all the events that are arriving before that the sandbox + // is being created. The peer got already added into the database and the sanbox init will + // call the peerDbUpdateSandbox that will configure all these peers from the database + return nil + } + + IP := &net.IPNet{ + IP: peerIP, + Mask: peerIPMask, + } + + s := n.getSubnetforIP(IP) + if s == nil { + return fmt.Errorf("couldn't find the subnet %q in network %q", IP.String(), n.id) + } + + if err := n.obtainVxlanID(s); err != nil { + return fmt.Errorf("couldn't get vxlan id for %q: %v", s.subnetIP.String(), err) + } + + if err := n.joinSubnetSandbox(s, false); err != nil { + return fmt.Errorf("subnet sandbox join failed for %q: %v", s.subnetIP.String(), err) + } + + if err := d.checkEncryption(nid, vtep, n.vxlanID(s), false, true); err != nil { + logrus.Warn(err) + } + + // Add neighbor entry for the peer IP + if err := sbox.AddNeighbor(peerIP, peerMac, l3Miss, sbox.NeighborOptions().LinkName(s.vxlanName)); err != nil { + if _, ok := err.(osl.NeighborSearchError); ok && dbEntries > 1 { + // We are in the transient case so only the first configuration is programmed into the kernel + // Upon deletion if the active configuration is deleted the next one from the database will be restored + // Note we are skipping also the next configuration + return nil + } + return fmt.Errorf("could not add neighbor entry for nid:%s eid:%s into the sandbox:%v", nid, eid, err) + } + + // Add fdb entry to the bridge for the peer mac + if err := sbox.AddNeighbor(vtep, peerMac, l2Miss, sbox.NeighborOptions().LinkName(s.vxlanName), + sbox.NeighborOptions().Family(syscall.AF_BRIDGE)); err != nil { + return fmt.Errorf("could not add fdb entry for nid:%s eid:%s into the sandbox:%v", nid, eid, err) + } + + return nil +} + +func (d *driver) peerDelete(nid, eid string, peerIP net.IP, peerIPMask net.IPMask, + peerMac net.HardwareAddr, vtep net.IP, localPeer bool) { + d.peerOpCh <- &peerOperation{ + opType: peerOperationDELETE, + networkID: nid, + endpointID: eid, + peerIP: peerIP, + peerIPMask: peerIPMask, + peerMac: peerMac, + vtepIP: vtep, + callerName: common.CallerName(1), + localPeer: localPeer, + } +} + +func (d *driver) peerDeleteOp(nid, eid string, peerIP net.IP, peerIPMask net.IPMask, + peerMac net.HardwareAddr, vtep net.IP, localPeer bool) error { + + if err := validateID(nid, eid); err != nil { + return err + } + + deleted, dbEntries := d.peerDbDelete(nid, eid, peerIP, peerIPMask, peerMac, vtep, localPeer) + if !deleted { + logrus.Warnf("Entry was not in db: nid:%s eid:%s peerIP:%v peerMac:%v isLocal:%t vtep:%v", + nid, eid, peerIP, peerMac, localPeer, vtep) + } + + n := d.network(nid) + if n == nil { + return nil + } + + sbox := n.sandbox() + if sbox == nil { + return nil + } + + if err := d.checkEncryption(nid, vtep, 0, localPeer, false); err != nil { + logrus.Warn(err) + } + + // Local peers do not have any local configuration to delete + if !localPeer { + // Remove fdb entry to the bridge for the peer mac + if err := sbox.DeleteNeighbor(vtep, peerMac, true); err != nil { + if _, ok := err.(osl.NeighborSearchError); ok && dbEntries > 0 { + // We fall in here if there is a transient state and if the neighbor that is being deleted + // was never been configured into the kernel (we allow only 1 configuration at the time per mapping) + return nil + } + return fmt.Errorf("could not delete fdb entry for nid:%s eid:%s into the sandbox:%v", nid, eid, err) + } + + // Delete neighbor entry for the peer IP + if err := sbox.DeleteNeighbor(peerIP, peerMac, true); err != nil { + return fmt.Errorf("could not delete neighbor entry for nid:%s eid:%s into the sandbox:%v", nid, eid, err) + } + } + + if dbEntries == 0 { + return nil + } + + // If there is still an entry into the database and the deletion went through without errors means that there is now no + // configuration active in the kernel. + // Restore one configuration for the directly from the database, note that is guaranteed that there is one + peerKey, peerEntry, err := d.peerDbSearch(nid, peerIP) + if err != nil { + logrus.Errorf("peerDeleteOp unable to restore a configuration for nid:%s ip:%v mac:%v err:%s", nid, peerIP, peerMac, err) + return err + } + return d.peerAddOp(nid, peerEntry.eid, peerIP, peerEntry.peerIPMask, peerKey.peerMac, peerEntry.vtep, false, false, false, peerEntry.isLocal) +} + +func (d *driver) peerFlush(nid string) { + d.peerOpCh <- &peerOperation{ + opType: peerOperationFLUSH, + networkID: nid, + callerName: common.CallerName(1), + } +} + +func (d *driver) peerFlushOp(nid string) error { + d.peerDb.Lock() + defer d.peerDb.Unlock() + _, ok := d.peerDb.mp[nid] + if !ok { + return fmt.Errorf("Unable to find the peerDB for nid:%s", nid) + } + delete(d.peerDb.mp, nid) + return nil +} + +func (d *driver) pushLocalDb() { + d.peerDbWalk(func(nid string, pKey *peerKey, pEntry *peerEntry) bool { + if pEntry.isLocal { + d.pushLocalEndpointEvent("join", nid, pEntry.eid) + } + return false + }) +} + +func (d *driver) peerDBUpdateSelf() { + d.peerDbWalk(func(nid string, pkey *peerKey, pEntry *peerEntry) bool { + if pEntry.isLocal { + pEntry.vtep = net.ParseIP(d.advertiseAddress) + } + return false + }) +} diff --git a/vendor/github.com/docker/libnetwork/drivers/remote/api/api.go b/vendor/github.com/docker/libnetwork/drivers/remote/api/api.go new file mode 100644 index 0000000000..d24f190162 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/remote/api/api.go @@ -0,0 +1,221 @@ +/* +Package api represents all requests and responses suitable for conversation +with a remote driver. +*/ +package api + +import ( + "net" + + "github.com/docker/libnetwork/discoverapi" + "github.com/docker/libnetwork/driverapi" +) + +// Response is the basic response structure used in all responses. +type Response struct { + Err string +} + +// GetError returns the error from the response, if any. +func (r *Response) GetError() string { + return r.Err +} + +// GetCapabilityResponse is the response of GetCapability request +type GetCapabilityResponse struct { + Response + Scope string + ConnectivityScope string +} + +// AllocateNetworkRequest requests allocation of new network by manager +type AllocateNetworkRequest struct { + // A network ID that remote plugins are expected to store for future + // reference. + NetworkID string + + // A free form map->object interface for communication of options. + Options map[string]string + + // IPAMData contains the address pool information for this network + IPv4Data, IPv6Data []driverapi.IPAMData +} + +// AllocateNetworkResponse is the response to the AllocateNetworkRequest. +type AllocateNetworkResponse struct { + Response + // A free form plugin specific string->string object to be sent in + // CreateNetworkRequest call in the libnetwork agents + Options map[string]string +} + +// FreeNetworkRequest is the request to free allocated network in the manager +type FreeNetworkRequest struct { + // The ID of the network to be freed. + NetworkID string +} + +// FreeNetworkResponse is the response to a request for freeing a network. +type FreeNetworkResponse struct { + Response +} + +// CreateNetworkRequest requests a new network. +type CreateNetworkRequest struct { + // A network ID that remote plugins are expected to store for future + // reference. + NetworkID string + + // A free form map->object interface for communication of options. + Options map[string]interface{} + + // IPAMData contains the address pool information for this network + IPv4Data, IPv6Data []driverapi.IPAMData +} + +// CreateNetworkResponse is the response to the CreateNetworkRequest. +type CreateNetworkResponse struct { + Response +} + +// DeleteNetworkRequest is the request to delete an existing network. +type DeleteNetworkRequest struct { + // The ID of the network to delete. + NetworkID string +} + +// DeleteNetworkResponse is the response to a request for deleting a network. +type DeleteNetworkResponse struct { + Response +} + +// CreateEndpointRequest is the request to create an endpoint within a network. +type CreateEndpointRequest struct { + // Provided at create time, this will be the network id referenced. + NetworkID string + // The ID of the endpoint for later reference. + EndpointID string + Interface *EndpointInterface + Options map[string]interface{} +} + +// EndpointInterface represents an interface endpoint. +type EndpointInterface struct { + Address string + AddressIPv6 string + MacAddress string +} + +// CreateEndpointResponse is the response to the CreateEndpoint action. +type CreateEndpointResponse struct { + Response + Interface *EndpointInterface +} + +// Interface is the representation of a linux interface. +type Interface struct { + Address *net.IPNet + AddressIPv6 *net.IPNet + MacAddress net.HardwareAddr +} + +// DeleteEndpointRequest describes the API for deleting an endpoint. +type DeleteEndpointRequest struct { + NetworkID string + EndpointID string +} + +// DeleteEndpointResponse is the response to the DeleteEndpoint action. +type DeleteEndpointResponse struct { + Response +} + +// EndpointInfoRequest retrieves information about the endpoint from the network driver. +type EndpointInfoRequest struct { + NetworkID string + EndpointID string +} + +// EndpointInfoResponse is the response to an EndpointInfoRequest. +type EndpointInfoResponse struct { + Response + Value map[string]interface{} +} + +// JoinRequest describes the API for joining an endpoint to a sandbox. +type JoinRequest struct { + NetworkID string + EndpointID string + SandboxKey string + Options map[string]interface{} +} + +// InterfaceName is the struct represetation of a pair of devices with source +// and destination, for the purposes of putting an endpoint into a container. +type InterfaceName struct { + SrcName string + DstName string + DstPrefix string +} + +// StaticRoute is the plain JSON representation of a static route. +type StaticRoute struct { + Destination string + RouteType int + NextHop string +} + +// JoinResponse is the response to a JoinRequest. +type JoinResponse struct { + Response + InterfaceName *InterfaceName + Gateway string + GatewayIPv6 string + StaticRoutes []StaticRoute + DisableGatewayService bool +} + +// LeaveRequest describes the API for detaching an endpoint from a sandbox. +type LeaveRequest struct { + NetworkID string + EndpointID string +} + +// LeaveResponse is the answer to LeaveRequest. +type LeaveResponse struct { + Response +} + +// ProgramExternalConnectivityRequest describes the API for programming the external connectivity for the given endpoint. +type ProgramExternalConnectivityRequest struct { + NetworkID string + EndpointID string + Options map[string]interface{} +} + +// ProgramExternalConnectivityResponse is the answer to ProgramExternalConnectivityRequest. +type ProgramExternalConnectivityResponse struct { + Response +} + +// RevokeExternalConnectivityRequest describes the API for revoking the external connectivity for the given endpoint. +type RevokeExternalConnectivityRequest struct { + NetworkID string + EndpointID string +} + +// RevokeExternalConnectivityResponse is the answer to RevokeExternalConnectivityRequest. +type RevokeExternalConnectivityResponse struct { + Response +} + +// DiscoveryNotification represents a discovery notification +type DiscoveryNotification struct { + DiscoveryType discoverapi.DiscoveryType + DiscoveryData interface{} +} + +// DiscoveryResponse is used by libnetwork to log any plugin error processing the discovery notifications +type DiscoveryResponse struct { + Response +} diff --git a/vendor/github.com/docker/libnetwork/drivers/remote/driver.go b/vendor/github.com/docker/libnetwork/drivers/remote/driver.go new file mode 100644 index 0000000000..b52163025a --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/remote/driver.go @@ -0,0 +1,409 @@ +package remote + +import ( + "errors" + "fmt" + "net" + + "github.com/docker/docker/pkg/plugins" + "github.com/docker/libnetwork/datastore" + "github.com/docker/libnetwork/discoverapi" + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/drivers/remote/api" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" +) + +type driver struct { + endpoint *plugins.Client + networkType string +} + +type maybeError interface { + GetError() string +} + +func newDriver(name string, client *plugins.Client) driverapi.Driver { + return &driver{networkType: name, endpoint: client} +} + +// Init makes sure a remote driver is registered when a network driver +// plugin is activated. +func Init(dc driverapi.DriverCallback, config map[string]interface{}) error { + newPluginHandler := func(name string, client *plugins.Client) { + // negotiate driver capability with client + d := newDriver(name, client) + c, err := d.(*driver).getCapabilities() + if err != nil { + logrus.Errorf("error getting capability for %s due to %v", name, err) + return + } + if err = dc.RegisterDriver(name, d, *c); err != nil { + logrus.Errorf("error registering driver for %s due to %v", name, err) + } + } + + // Unit test code is unaware of a true PluginStore. So we fall back to v1 plugins. + handleFunc := plugins.Handle + if pg := dc.GetPluginGetter(); pg != nil { + handleFunc = pg.Handle + activePlugins := pg.GetAllManagedPluginsByCap(driverapi.NetworkPluginEndpointType) + for _, ap := range activePlugins { + newPluginHandler(ap.Name(), ap.Client()) + } + } + handleFunc(driverapi.NetworkPluginEndpointType, newPluginHandler) + + return nil +} + +// Get capability from client +func (d *driver) getCapabilities() (*driverapi.Capability, error) { + var capResp api.GetCapabilityResponse + if err := d.call("GetCapabilities", nil, &capResp); err != nil { + return nil, err + } + + c := &driverapi.Capability{} + switch capResp.Scope { + case "global": + c.DataScope = datastore.GlobalScope + case "local": + c.DataScope = datastore.LocalScope + default: + return nil, fmt.Errorf("invalid capability: expecting 'local' or 'global', got %s", capResp.Scope) + } + + switch capResp.ConnectivityScope { + case "global": + c.ConnectivityScope = datastore.GlobalScope + case "local": + c.ConnectivityScope = datastore.LocalScope + case "": + c.ConnectivityScope = c.DataScope + default: + return nil, fmt.Errorf("invalid capability: expecting 'local' or 'global', got %s", capResp.Scope) + } + + return c, nil +} + +// Config is not implemented for remote drivers, since it is assumed +// to be supplied to the remote process out-of-band (e.g., as command +// line arguments). +func (d *driver) Config(option map[string]interface{}) error { + return &driverapi.ErrNotImplemented{} +} + +func (d *driver) call(methodName string, arg interface{}, retVal maybeError) error { + method := driverapi.NetworkPluginEndpointType + "." + methodName + err := d.endpoint.Call(method, arg, retVal) + if err != nil { + return err + } + if e := retVal.GetError(); e != "" { + return fmt.Errorf("remote: %s", e) + } + return nil +} + +func (d *driver) NetworkAllocate(id string, options map[string]string, ipV4Data, ipV6Data []driverapi.IPAMData) (map[string]string, error) { + create := &api.AllocateNetworkRequest{ + NetworkID: id, + Options: options, + IPv4Data: ipV4Data, + IPv6Data: ipV6Data, + } + retVal := api.AllocateNetworkResponse{} + err := d.call("AllocateNetwork", create, &retVal) + return retVal.Options, err +} + +func (d *driver) NetworkFree(id string) error { + fr := &api.FreeNetworkRequest{NetworkID: id} + return d.call("FreeNetwork", fr, &api.FreeNetworkResponse{}) +} + +func (d *driver) EventNotify(etype driverapi.EventType, nid, tableName, key string, value []byte) { +} + +func (d *driver) DecodeTableEntry(tablename string, key string, value []byte) (string, map[string]string) { + return "", nil +} + +func (d *driver) CreateNetwork(id string, options map[string]interface{}, nInfo driverapi.NetworkInfo, ipV4Data, ipV6Data []driverapi.IPAMData) error { + create := &api.CreateNetworkRequest{ + NetworkID: id, + Options: options, + IPv4Data: ipV4Data, + IPv6Data: ipV6Data, + } + return d.call("CreateNetwork", create, &api.CreateNetworkResponse{}) +} + +func (d *driver) DeleteNetwork(nid string) error { + delete := &api.DeleteNetworkRequest{NetworkID: nid} + return d.call("DeleteNetwork", delete, &api.DeleteNetworkResponse{}) +} + +func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo, epOptions map[string]interface{}) error { + if ifInfo == nil { + return errors.New("must not be called with nil InterfaceInfo") + } + + reqIface := &api.EndpointInterface{} + if ifInfo.Address() != nil { + reqIface.Address = ifInfo.Address().String() + } + if ifInfo.AddressIPv6() != nil { + reqIface.AddressIPv6 = ifInfo.AddressIPv6().String() + } + if ifInfo.MacAddress() != nil { + reqIface.MacAddress = ifInfo.MacAddress().String() + } + + create := &api.CreateEndpointRequest{ + NetworkID: nid, + EndpointID: eid, + Interface: reqIface, + Options: epOptions, + } + var res api.CreateEndpointResponse + if err := d.call("CreateEndpoint", create, &res); err != nil { + return err + } + + inIface, err := parseInterface(res) + if err != nil { + return err + } + if inIface == nil { + // Remote driver did not set any field + return nil + } + + if inIface.MacAddress != nil { + if err := ifInfo.SetMacAddress(inIface.MacAddress); err != nil { + return errorWithRollback(fmt.Sprintf("driver modified interface MAC address: %v", err), d.DeleteEndpoint(nid, eid)) + } + } + if inIface.Address != nil { + if err := ifInfo.SetIPAddress(inIface.Address); err != nil { + return errorWithRollback(fmt.Sprintf("driver modified interface address: %v", err), d.DeleteEndpoint(nid, eid)) + } + } + if inIface.AddressIPv6 != nil { + if err := ifInfo.SetIPAddress(inIface.AddressIPv6); err != nil { + return errorWithRollback(fmt.Sprintf("driver modified interface address: %v", err), d.DeleteEndpoint(nid, eid)) + } + } + + return nil +} + +func errorWithRollback(msg string, err error) error { + rollback := "rolled back" + if err != nil { + rollback = "failed to roll back: " + err.Error() + } + return fmt.Errorf("%s; %s", msg, rollback) +} + +func (d *driver) DeleteEndpoint(nid, eid string) error { + delete := &api.DeleteEndpointRequest{ + NetworkID: nid, + EndpointID: eid, + } + return d.call("DeleteEndpoint", delete, &api.DeleteEndpointResponse{}) +} + +func (d *driver) EndpointOperInfo(nid, eid string) (map[string]interface{}, error) { + info := &api.EndpointInfoRequest{ + NetworkID: nid, + EndpointID: eid, + } + var res api.EndpointInfoResponse + if err := d.call("EndpointOperInfo", info, &res); err != nil { + return nil, err + } + return res.Value, nil +} + +// Join method is invoked when a Sandbox is attached to an endpoint. +func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo, options map[string]interface{}) error { + join := &api.JoinRequest{ + NetworkID: nid, + EndpointID: eid, + SandboxKey: sboxKey, + Options: options, + } + var ( + res api.JoinResponse + err error + ) + if err = d.call("Join", join, &res); err != nil { + return err + } + + ifaceName := res.InterfaceName + if iface := jinfo.InterfaceName(); iface != nil && ifaceName != nil { + if err := iface.SetNames(ifaceName.SrcName, ifaceName.DstPrefix); err != nil { + return errorWithRollback(fmt.Sprintf("failed to set interface name: %s", err), d.Leave(nid, eid)) + } + } + + var addr net.IP + if res.Gateway != "" { + if addr = net.ParseIP(res.Gateway); addr == nil { + return fmt.Errorf(`unable to parse Gateway "%s"`, res.Gateway) + } + if jinfo.SetGateway(addr) != nil { + return errorWithRollback(fmt.Sprintf("failed to set gateway: %v", addr), d.Leave(nid, eid)) + } + } + if res.GatewayIPv6 != "" { + if addr = net.ParseIP(res.GatewayIPv6); addr == nil { + return fmt.Errorf(`unable to parse GatewayIPv6 "%s"`, res.GatewayIPv6) + } + if jinfo.SetGatewayIPv6(addr) != nil { + return errorWithRollback(fmt.Sprintf("failed to set gateway IPv6: %v", addr), d.Leave(nid, eid)) + } + } + if len(res.StaticRoutes) > 0 { + routes, err := parseStaticRoutes(res) + if err != nil { + return err + } + for _, route := range routes { + if jinfo.AddStaticRoute(route.Destination, route.RouteType, route.NextHop) != nil { + return errorWithRollback(fmt.Sprintf("failed to set static route: %v", route), d.Leave(nid, eid)) + } + } + } + if res.DisableGatewayService { + jinfo.DisableGatewayService() + } + return nil +} + +// Leave method is invoked when a Sandbox detaches from an endpoint. +func (d *driver) Leave(nid, eid string) error { + leave := &api.LeaveRequest{ + NetworkID: nid, + EndpointID: eid, + } + return d.call("Leave", leave, &api.LeaveResponse{}) +} + +// ProgramExternalConnectivity is invoked to program the rules to allow external connectivity for the endpoint. +func (d *driver) ProgramExternalConnectivity(nid, eid string, options map[string]interface{}) error { + data := &api.ProgramExternalConnectivityRequest{ + NetworkID: nid, + EndpointID: eid, + Options: options, + } + err := d.call("ProgramExternalConnectivity", data, &api.ProgramExternalConnectivityResponse{}) + if err != nil && plugins.IsNotFound(err) { + // It is not mandatory yet to support this method + return nil + } + return err +} + +// RevokeExternalConnectivity method is invoked to remove any external connectivity programming related to the endpoint. +func (d *driver) RevokeExternalConnectivity(nid, eid string) error { + data := &api.RevokeExternalConnectivityRequest{ + NetworkID: nid, + EndpointID: eid, + } + err := d.call("RevokeExternalConnectivity", data, &api.RevokeExternalConnectivityResponse{}) + if err != nil && plugins.IsNotFound(err) { + // It is not mandatory yet to support this method + return nil + } + return err +} + +func (d *driver) Type() string { + return d.networkType +} + +func (d *driver) IsBuiltIn() bool { + return false +} + +// DiscoverNew is a notification for a new discovery event, such as a new node joining a cluster +func (d *driver) DiscoverNew(dType discoverapi.DiscoveryType, data interface{}) error { + if dType != discoverapi.NodeDiscovery { + return nil + } + notif := &api.DiscoveryNotification{ + DiscoveryType: dType, + DiscoveryData: data, + } + return d.call("DiscoverNew", notif, &api.DiscoveryResponse{}) +} + +// DiscoverDelete is a notification for a discovery delete event, such as a node leaving a cluster +func (d *driver) DiscoverDelete(dType discoverapi.DiscoveryType, data interface{}) error { + if dType != discoverapi.NodeDiscovery { + return nil + } + notif := &api.DiscoveryNotification{ + DiscoveryType: dType, + DiscoveryData: data, + } + return d.call("DiscoverDelete", notif, &api.DiscoveryResponse{}) +} + +func parseStaticRoutes(r api.JoinResponse) ([]*types.StaticRoute, error) { + var routes = make([]*types.StaticRoute, len(r.StaticRoutes)) + for i, inRoute := range r.StaticRoutes { + var err error + outRoute := &types.StaticRoute{RouteType: inRoute.RouteType} + + if inRoute.Destination != "" { + if outRoute.Destination, err = types.ParseCIDR(inRoute.Destination); err != nil { + return nil, err + } + } + + if inRoute.NextHop != "" { + outRoute.NextHop = net.ParseIP(inRoute.NextHop) + if outRoute.NextHop == nil { + return nil, fmt.Errorf("failed to parse nexthop IP %s", inRoute.NextHop) + } + } + + routes[i] = outRoute + } + return routes, nil +} + +// parseInterfaces validates all the parameters of an Interface and returns them. +func parseInterface(r api.CreateEndpointResponse) (*api.Interface, error) { + var outIf *api.Interface + + inIf := r.Interface + if inIf != nil { + var err error + outIf = &api.Interface{} + if inIf.Address != "" { + if outIf.Address, err = types.ParseCIDR(inIf.Address); err != nil { + return nil, err + } + } + if inIf.AddressIPv6 != "" { + if outIf.AddressIPv6, err = types.ParseCIDR(inIf.AddressIPv6); err != nil { + return nil, err + } + } + if inIf.MacAddress != "" { + if outIf.MacAddress, err = net.ParseMAC(inIf.MacAddress); err != nil { + return nil, err + } + } + } + + return outIf, nil +} diff --git a/vendor/github.com/docker/libnetwork/drivers/windows/labels.go b/vendor/github.com/docker/libnetwork/drivers/windows/labels.go new file mode 100644 index 0000000000..a4b23c1a22 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/windows/labels.go @@ -0,0 +1,51 @@ +package windows + +const ( + // NetworkName label for bridge driver + NetworkName = "com.docker.network.windowsshim.networkname" + + // HNSID of the discovered network + HNSID = "com.docker.network.windowsshim.hnsid" + + // RoutingDomain of the network + RoutingDomain = "com.docker.network.windowsshim.routingdomain" + + // Interface of the network + Interface = "com.docker.network.windowsshim.interface" + + // QosPolicies of the endpoint + QosPolicies = "com.docker.endpoint.windowsshim.qospolicies" + + // VLAN of the network + VLAN = "com.docker.network.windowsshim.vlanid" + + // VSID of the network + VSID = "com.docker.network.windowsshim.vsid" + + // DNSSuffix of the network + DNSSuffix = "com.docker.network.windowsshim.dnssuffix" + + // DNSServers of the network + DNSServers = "com.docker.network.windowsshim.dnsservers" + + // MacPool of the network + MacPool = "com.docker.network.windowsshim.macpool" + + // SourceMac of the network + SourceMac = "com.docker.network.windowsshim.sourcemac" + + // DisableICC label + DisableICC = "com.docker.network.windowsshim.disableicc" + + // DisableDNS label + DisableDNS = "com.docker.network.windowsshim.disable_dns" + + // DisableGatewayDNS label + DisableGatewayDNS = "com.docker.network.windowsshim.disable_gatewaydns" + + // EnableOutboundNat label + EnableOutboundNat = "com.docker.network.windowsshim.enable_outboundnat" + + // OutboundNatExceptions label + OutboundNatExceptions = "com.docker.network.windowsshim.outboundnat_exceptions" +) diff --git a/vendor/github.com/docker/libnetwork/drivers/windows/overlay/joinleave_windows.go b/vendor/github.com/docker/libnetwork/drivers/windows/overlay/joinleave_windows.go new file mode 100644 index 0000000000..83bee5ad93 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/windows/overlay/joinleave_windows.go @@ -0,0 +1,112 @@ +package overlay + +import ( + "fmt" + "net" + + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/types" + "github.com/gogo/protobuf/proto" + "github.com/sirupsen/logrus" +) + +// Join method is invoked when a Sandbox is attached to an endpoint. +func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo, options map[string]interface{}) error { + if err := validateID(nid, eid); err != nil { + return err + } + + n := d.network(nid) + if n == nil { + return fmt.Errorf("could not find network with id %s", nid) + } + + ep := n.endpoint(eid) + if ep == nil { + return fmt.Errorf("could not find endpoint with id %s", eid) + } + + buf, err := proto.Marshal(&PeerRecord{ + EndpointIP: ep.addr.String(), + EndpointMAC: ep.mac.String(), + TunnelEndpointIP: n.providerAddress, + }) + + if err != nil { + return err + } + + if err := jinfo.AddTableEntry(ovPeerTable, eid, buf); err != nil { + logrus.Errorf("overlay: Failed adding table entry to joininfo: %v", err) + } + + if ep.disablegateway { + jinfo.DisableGatewayService() + } + + return nil +} + +func (d *driver) EventNotify(etype driverapi.EventType, nid, tableName, key string, value []byte) { + if tableName != ovPeerTable { + logrus.Errorf("Unexpected table notification for table %s received", tableName) + return + } + + eid := key + + var peer PeerRecord + if err := proto.Unmarshal(value, &peer); err != nil { + logrus.Errorf("Failed to unmarshal peer record: %v", err) + return + } + + n := d.network(nid) + if n == nil { + return + } + + // Ignore local peers. We already know about them and they + // should not be added to vxlan fdb. + if peer.TunnelEndpointIP == n.providerAddress { + return + } + + addr, err := types.ParseCIDR(peer.EndpointIP) + if err != nil { + logrus.Errorf("Invalid peer IP %s received in event notify", peer.EndpointIP) + return + } + + mac, err := net.ParseMAC(peer.EndpointMAC) + if err != nil { + logrus.Errorf("Invalid mac %s received in event notify", peer.EndpointMAC) + return + } + + vtep := net.ParseIP(peer.TunnelEndpointIP) + if vtep == nil { + logrus.Errorf("Invalid VTEP %s received in event notify", peer.TunnelEndpointIP) + return + } + + if etype == driverapi.Delete { + d.peerDelete(nid, eid, addr.IP, addr.Mask, mac, vtep, true) + return + } + + d.peerAdd(nid, eid, addr.IP, addr.Mask, mac, vtep, true) +} + +func (d *driver) DecodeTableEntry(tablename string, key string, value []byte) (string, map[string]string) { + return "", nil +} + +// Leave method is invoked when a Sandbox detaches from an endpoint. +func (d *driver) Leave(nid, eid string) error { + if err := validateID(nid, eid); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_endpoint_windows.go b/vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_endpoint_windows.go new file mode 100644 index 0000000000..b7bda4a6b2 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_endpoint_windows.go @@ -0,0 +1,265 @@ +package overlay + +import ( + "encoding/json" + "fmt" + "net" + + "github.com/Microsoft/hcsshim" + "github.com/docker/docker/pkg/system" + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/drivers/windows" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" +) + +type endpointTable map[string]*endpoint + +const overlayEndpointPrefix = "overlay/endpoint" + +type endpoint struct { + id string + nid string + profileID string + remote bool + mac net.HardwareAddr + addr *net.IPNet + disablegateway bool + portMapping []types.PortBinding // Operation port bindings +} + +func validateID(nid, eid string) error { + if nid == "" { + return fmt.Errorf("invalid network id") + } + + if eid == "" { + return fmt.Errorf("invalid endpoint id") + } + + return nil +} + +func (n *network) endpoint(eid string) *endpoint { + n.Lock() + defer n.Unlock() + + return n.endpoints[eid] +} + +func (n *network) addEndpoint(ep *endpoint) { + n.Lock() + n.endpoints[ep.id] = ep + n.Unlock() +} + +func (n *network) deleteEndpoint(eid string) { + n.Lock() + delete(n.endpoints, eid) + n.Unlock() +} + +func (n *network) removeEndpointWithAddress(addr *net.IPNet) { + var networkEndpoint *endpoint + n.Lock() + for _, ep := range n.endpoints { + if ep.addr.IP.Equal(addr.IP) { + networkEndpoint = ep + break + } + } + + if networkEndpoint != nil { + delete(n.endpoints, networkEndpoint.id) + } + n.Unlock() + + if networkEndpoint != nil { + logrus.Debugf("Removing stale endpoint from HNS") + _, err := hcsshim.HNSEndpointRequest("DELETE", networkEndpoint.profileID, "") + + if err != nil { + logrus.Debugf("Failed to delete stale overlay endpoint (%s) from hns", networkEndpoint.id[0:7]) + } + } +} + +func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo, + epOptions map[string]interface{}) error { + var err error + if err = validateID(nid, eid); err != nil { + return err + } + + n := d.network(nid) + if n == nil { + return fmt.Errorf("network id %q not found", nid) + } + + ep := n.endpoint(eid) + if ep != nil { + logrus.Debugf("Deleting stale endpoint %s", eid) + n.deleteEndpoint(eid) + + _, err := hcsshim.HNSEndpointRequest("DELETE", ep.profileID, "") + if err != nil { + return err + } + } + + ep = &endpoint{ + id: eid, + nid: n.id, + addr: ifInfo.Address(), + mac: ifInfo.MacAddress(), + } + + if ep.addr == nil { + return fmt.Errorf("create endpoint was not passed interface IP address") + } + + s := n.getSubnetforIP(ep.addr) + if s == nil { + return fmt.Errorf("no matching subnet for IP %q in network %q", ep.addr, nid) + } + + // Todo: Add port bindings and qos policies here + + hnsEndpoint := &hcsshim.HNSEndpoint{ + Name: eid, + VirtualNetwork: n.hnsID, + IPAddress: ep.addr.IP, + EnableInternalDNS: true, + GatewayAddress: s.gwIP.String(), + } + + if ep.mac != nil { + hnsEndpoint.MacAddress = ep.mac.String() + } + + paPolicy, err := json.Marshal(hcsshim.PaPolicy{ + Type: "PA", + PA: n.providerAddress, + }) + + if err != nil { + return err + } + + hnsEndpoint.Policies = append(hnsEndpoint.Policies, paPolicy) + + if system.GetOSVersion().Build > 16236 { + natPolicy, err := json.Marshal(hcsshim.PaPolicy{ + Type: "OutBoundNAT", + }) + + if err != nil { + return err + } + + hnsEndpoint.Policies = append(hnsEndpoint.Policies, natPolicy) + + epConnectivity, err := windows.ParseEndpointConnectivity(epOptions) + if err != nil { + return err + } + + pbPolicy, err := windows.ConvertPortBindings(epConnectivity.PortBindings) + if err != nil { + return err + } + hnsEndpoint.Policies = append(hnsEndpoint.Policies, pbPolicy...) + + ep.disablegateway = true + } + + configurationb, err := json.Marshal(hnsEndpoint) + if err != nil { + return err + } + + hnsresponse, err := hcsshim.HNSEndpointRequest("POST", "", string(configurationb)) + if err != nil { + return err + } + + ep.profileID = hnsresponse.Id + + if ep.mac == nil { + ep.mac, err = net.ParseMAC(hnsresponse.MacAddress) + if err != nil { + return err + } + + if err := ifInfo.SetMacAddress(ep.mac); err != nil { + return err + } + } + + ep.portMapping, err = windows.ParsePortBindingPolicies(hnsresponse.Policies) + if err != nil { + hcsshim.HNSEndpointRequest("DELETE", hnsresponse.Id, "") + return err + } + + n.addEndpoint(ep) + + return nil +} + +func (d *driver) DeleteEndpoint(nid, eid string) error { + if err := validateID(nid, eid); err != nil { + return err + } + + n := d.network(nid) + if n == nil { + return fmt.Errorf("network id %q not found", nid) + } + + ep := n.endpoint(eid) + if ep == nil { + return fmt.Errorf("endpoint id %q not found", eid) + } + + n.deleteEndpoint(eid) + + _, err := hcsshim.HNSEndpointRequest("DELETE", ep.profileID, "") + if err != nil { + return err + } + + return nil +} + +func (d *driver) EndpointOperInfo(nid, eid string) (map[string]interface{}, error) { + if err := validateID(nid, eid); err != nil { + return nil, err + } + + n := d.network(nid) + if n == nil { + return nil, fmt.Errorf("network id %q not found", nid) + } + + ep := n.endpoint(eid) + if ep == nil { + return nil, fmt.Errorf("endpoint id %q not found", eid) + } + + data := make(map[string]interface{}, 1) + data["hnsid"] = ep.profileID + data["AllowUnqualifiedDNSQuery"] = true + + if ep.portMapping != nil { + // Return a copy of the operational data + pmc := make([]types.PortBinding, 0, len(ep.portMapping)) + for _, pm := range ep.portMapping { + pmc = append(pmc, pm.GetCopy()) + } + data[netlabel.PortMap] = pmc + } + + return data, nil +} diff --git a/vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_network_windows.go b/vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_network_windows.go new file mode 100644 index 0000000000..9cc46f8cfe --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_network_windows.go @@ -0,0 +1,381 @@ +package overlay + +import ( + "encoding/json" + "fmt" + "net" + "strconv" + "strings" + "sync" + + "github.com/Microsoft/hcsshim" + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" +) + +var ( + hostMode bool + networkMu sync.Mutex +) + +type networkTable map[string]*network + +type subnet struct { + vni uint32 + subnetIP *net.IPNet + gwIP *net.IP +} + +type subnetJSON struct { + SubnetIP string + GwIP string + Vni uint32 +} + +type network struct { + id string + name string + hnsID string + providerAddress string + interfaceName string + endpoints endpointTable + driver *driver + initEpoch int + initErr error + subnets []*subnet + secure bool + sync.Mutex +} + +func (d *driver) NetworkAllocate(id string, option map[string]string, ipV4Data, ipV6Data []driverapi.IPAMData) (map[string]string, error) { + return nil, types.NotImplementedErrorf("not implemented") +} + +func (d *driver) NetworkFree(id string) error { + return types.NotImplementedErrorf("not implemented") +} + +func (d *driver) CreateNetwork(id string, option map[string]interface{}, nInfo driverapi.NetworkInfo, ipV4Data, ipV6Data []driverapi.IPAMData) error { + var ( + networkName string + interfaceName string + staleNetworks []string + ) + + if id == "" { + return fmt.Errorf("invalid network id") + } + + if nInfo == nil { + return fmt.Errorf("invalid network info structure") + } + + if len(ipV4Data) == 0 || ipV4Data[0].Pool.String() == "0.0.0.0/0" { + return types.BadRequestErrorf("ipv4 pool is empty") + } + + staleNetworks = make([]string, 0) + vnis := make([]uint32, 0, len(ipV4Data)) + + existingNetwork := d.network(id) + if existingNetwork != nil { + logrus.Debugf("Network preexists. Deleting %s", id) + err := d.DeleteNetwork(id) + if err != nil { + logrus.Errorf("Error deleting stale network %s", err.Error()) + } + } + + n := &network{ + id: id, + driver: d, + endpoints: endpointTable{}, + subnets: []*subnet{}, + } + + genData, ok := option[netlabel.GenericData].(map[string]string) + + if !ok { + return fmt.Errorf("Unknown generic data option") + } + + for label, value := range genData { + switch label { + case "com.docker.network.windowsshim.networkname": + networkName = value + case "com.docker.network.windowsshim.interface": + interfaceName = value + case "com.docker.network.windowsshim.hnsid": + n.hnsID = value + case netlabel.OverlayVxlanIDList: + vniStrings := strings.Split(value, ",") + for _, vniStr := range vniStrings { + vni, err := strconv.Atoi(vniStr) + if err != nil { + return fmt.Errorf("invalid vxlan id value %q passed", vniStr) + } + + vnis = append(vnis, uint32(vni)) + } + } + } + + // If we are getting vnis from libnetwork, either we get for + // all subnets or none. + if len(vnis) < len(ipV4Data) { + return fmt.Errorf("insufficient vnis(%d) passed to overlay. Windows driver requires VNIs to be prepopulated", len(vnis)) + } + + for i, ipd := range ipV4Data { + s := &subnet{ + subnetIP: ipd.Pool, + gwIP: &ipd.Gateway.IP, + } + + if len(vnis) != 0 { + s.vni = vnis[i] + } + + d.Lock() + for _, network := range d.networks { + found := false + for _, sub := range network.subnets { + if sub.vni == s.vni { + staleNetworks = append(staleNetworks, network.id) + found = true + break + } + } + if found { + break + } + } + d.Unlock() + + n.subnets = append(n.subnets, s) + } + + for _, staleNetwork := range staleNetworks { + d.DeleteNetwork(staleNetwork) + } + + n.name = networkName + if n.name == "" { + n.name = id + } + + n.interfaceName = interfaceName + + if nInfo != nil { + if err := nInfo.TableEventRegister(ovPeerTable, driverapi.EndpointObject); err != nil { + return err + } + } + + d.addNetwork(n) + + err := d.createHnsNetwork(n) + + if err != nil { + d.deleteNetwork(id) + } else { + genData["com.docker.network.windowsshim.hnsid"] = n.hnsID + } + + return err +} + +func (d *driver) DeleteNetwork(nid string) error { + if nid == "" { + return fmt.Errorf("invalid network id") + } + + n := d.network(nid) + if n == nil { + return types.ForbiddenErrorf("could not find network with id %s", nid) + } + + _, err := hcsshim.HNSNetworkRequest("DELETE", n.hnsID, "") + if err != nil { + return types.ForbiddenErrorf(err.Error()) + } + + d.deleteNetwork(nid) + + return nil +} + +func (d *driver) ProgramExternalConnectivity(nid, eid string, options map[string]interface{}) error { + return nil +} + +func (d *driver) RevokeExternalConnectivity(nid, eid string) error { + return nil +} + +func (d *driver) addNetwork(n *network) { + d.Lock() + d.networks[n.id] = n + d.Unlock() +} + +func (d *driver) deleteNetwork(nid string) { + d.Lock() + delete(d.networks, nid) + d.Unlock() +} + +func (d *driver) network(nid string) *network { + d.Lock() + defer d.Unlock() + return d.networks[nid] +} + +// func (n *network) restoreNetworkEndpoints() error { +// logrus.Infof("Restoring endpoints for overlay network: %s", n.id) + +// hnsresponse, err := hcsshim.HNSListEndpointRequest("GET", "", "") +// if err != nil { +// return err +// } + +// for _, endpoint := range hnsresponse { +// if endpoint.VirtualNetwork != n.hnsID { +// continue +// } + +// ep := n.convertToOverlayEndpoint(&endpoint) + +// if ep != nil { +// logrus.Debugf("Restored endpoint:%s Remote:%t", ep.id, ep.remote) +// n.addEndpoint(ep) +// } +// } + +// return nil +// } + +func (n *network) convertToOverlayEndpoint(v *hcsshim.HNSEndpoint) *endpoint { + ep := &endpoint{ + id: v.Name, + profileID: v.Id, + nid: n.id, + remote: v.IsRemoteEndpoint, + } + + mac, err := net.ParseMAC(v.MacAddress) + + if err != nil { + return nil + } + + ep.mac = mac + ep.addr = &net.IPNet{ + IP: v.IPAddress, + Mask: net.CIDRMask(32, 32), + } + + return ep +} + +func (d *driver) createHnsNetwork(n *network) error { + + subnets := []hcsshim.Subnet{} + + for _, s := range n.subnets { + subnet := hcsshim.Subnet{ + AddressPrefix: s.subnetIP.String(), + } + + if s.gwIP != nil { + subnet.GatewayAddress = s.gwIP.String() + } + + vsidPolicy, err := json.Marshal(hcsshim.VsidPolicy{ + Type: "VSID", + VSID: uint(s.vni), + }) + + if err != nil { + return err + } + + subnet.Policies = append(subnet.Policies, vsidPolicy) + subnets = append(subnets, subnet) + } + + network := &hcsshim.HNSNetwork{ + Name: n.name, + Type: d.Type(), + Subnets: subnets, + NetworkAdapterName: n.interfaceName, + AutomaticDNS: true, + } + + configurationb, err := json.Marshal(network) + if err != nil { + return err + } + + configuration := string(configurationb) + logrus.Infof("HNSNetwork Request =%v", configuration) + + hnsresponse, err := hcsshim.HNSNetworkRequest("POST", "", configuration) + if err != nil { + return err + } + + n.hnsID = hnsresponse.Id + n.providerAddress = hnsresponse.ManagementIP + + return nil +} + +// contains return true if the passed ip belongs to one the network's +// subnets +func (n *network) contains(ip net.IP) bool { + for _, s := range n.subnets { + if s.subnetIP.Contains(ip) { + return true + } + } + + return false +} + +// getSubnetforIP returns the subnet to which the given IP belongs +func (n *network) getSubnetforIP(ip *net.IPNet) *subnet { + for _, s := range n.subnets { + // first check if the mask lengths are the same + i, _ := s.subnetIP.Mask.Size() + j, _ := ip.Mask.Size() + if i != j { + continue + } + if s.subnetIP.Contains(ip.IP) { + return s + } + } + return nil +} + +// getMatchingSubnet return the network's subnet that matches the input +func (n *network) getMatchingSubnet(ip *net.IPNet) *subnet { + if ip == nil { + return nil + } + for _, s := range n.subnets { + // first check if the mask lengths are the same + i, _ := s.subnetIP.Mask.Size() + j, _ := ip.Mask.Size() + if i != j { + continue + } + if s.subnetIP.IP.Equal(ip.IP) { + return s + } + } + return nil +} diff --git a/vendor/github.com/docker/libnetwork/drivers/windows/overlay/overlay.pb.go b/vendor/github.com/docker/libnetwork/drivers/windows/overlay/overlay.pb.go new file mode 100644 index 0000000000..cfa0eeeae4 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/windows/overlay/overlay.pb.go @@ -0,0 +1,468 @@ +// Code generated by protoc-gen-gogo. +// source: overlay.proto +// DO NOT EDIT! + +/* + Package overlay is a generated protocol buffer package. + + It is generated from these files: + overlay.proto + + It has these top-level messages: + PeerRecord +*/ +package overlay + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import strings "strings" +import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" +import sort "sort" +import strconv "strconv" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.GoGoProtoPackageIsVersion1 + +// PeerRecord defines the information corresponding to a peer +// container in the overlay network. +type PeerRecord struct { + // Endpoint IP is the IP of the container attachment on the + // given overlay network. + EndpointIP string `protobuf:"bytes,1,opt,name=endpoint_ip,json=endpointIp,proto3" json:"endpoint_ip,omitempty"` + // Endpoint MAC is the mac address of the container attachment + // on the given overlay network. + EndpointMAC string `protobuf:"bytes,2,opt,name=endpoint_mac,json=endpointMac,proto3" json:"endpoint_mac,omitempty"` + // Tunnel Endpoint IP defines the host IP for the host in + // which this container is running and can be reached by + // building a tunnel to that host IP. + TunnelEndpointIP string `protobuf:"bytes,3,opt,name=tunnel_endpoint_ip,json=tunnelEndpointIp,proto3" json:"tunnel_endpoint_ip,omitempty"` +} + +func (m *PeerRecord) Reset() { *m = PeerRecord{} } +func (*PeerRecord) ProtoMessage() {} +func (*PeerRecord) Descriptor() ([]byte, []int) { return fileDescriptorOverlay, []int{0} } + +func init() { + proto.RegisterType((*PeerRecord)(nil), "overlay.PeerRecord") +} +func (this *PeerRecord) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&overlay.PeerRecord{") + s = append(s, "EndpointIP: "+fmt.Sprintf("%#v", this.EndpointIP)+",\n") + s = append(s, "EndpointMAC: "+fmt.Sprintf("%#v", this.EndpointMAC)+",\n") + s = append(s, "TunnelEndpointIP: "+fmt.Sprintf("%#v", this.TunnelEndpointIP)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringOverlay(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func extensionToGoStringOverlay(e map[int32]github_com_gogo_protobuf_proto.Extension) string { + if e == nil { + return "nil" + } + s := "map[int32]proto.Extension{" + keys := make([]int, 0, len(e)) + for k := range e { + keys = append(keys, int(k)) + } + sort.Ints(keys) + ss := []string{} + for _, k := range keys { + ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString()) + } + s += strings.Join(ss, ",") + "}" + return s +} +func (m *PeerRecord) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalTo(data) + if err != nil { + return nil, err + } + return data[:n], nil +} + +func (m *PeerRecord) MarshalTo(data []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.EndpointIP) > 0 { + data[i] = 0xa + i++ + i = encodeVarintOverlay(data, i, uint64(len(m.EndpointIP))) + i += copy(data[i:], m.EndpointIP) + } + if len(m.EndpointMAC) > 0 { + data[i] = 0x12 + i++ + i = encodeVarintOverlay(data, i, uint64(len(m.EndpointMAC))) + i += copy(data[i:], m.EndpointMAC) + } + if len(m.TunnelEndpointIP) > 0 { + data[i] = 0x1a + i++ + i = encodeVarintOverlay(data, i, uint64(len(m.TunnelEndpointIP))) + i += copy(data[i:], m.TunnelEndpointIP) + } + return i, nil +} + +func encodeFixed64Overlay(data []byte, offset int, v uint64) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + data[offset+4] = uint8(v >> 32) + data[offset+5] = uint8(v >> 40) + data[offset+6] = uint8(v >> 48) + data[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Overlay(data []byte, offset int, v uint32) int { + data[offset] = uint8(v) + data[offset+1] = uint8(v >> 8) + data[offset+2] = uint8(v >> 16) + data[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintOverlay(data []byte, offset int, v uint64) int { + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return offset + 1 +} +func (m *PeerRecord) Size() (n int) { + var l int + _ = l + l = len(m.EndpointIP) + if l > 0 { + n += 1 + l + sovOverlay(uint64(l)) + } + l = len(m.EndpointMAC) + if l > 0 { + n += 1 + l + sovOverlay(uint64(l)) + } + l = len(m.TunnelEndpointIP) + if l > 0 { + n += 1 + l + sovOverlay(uint64(l)) + } + return n +} + +func sovOverlay(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozOverlay(x uint64) (n int) { + return sovOverlay(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *PeerRecord) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PeerRecord{`, + `EndpointIP:` + fmt.Sprintf("%v", this.EndpointIP) + `,`, + `EndpointMAC:` + fmt.Sprintf("%v", this.EndpointMAC) + `,`, + `TunnelEndpointIP:` + fmt.Sprintf("%v", this.TunnelEndpointIP) + `,`, + `}`, + }, "") + return s +} +func valueToStringOverlay(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *PeerRecord) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOverlay + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PeerRecord: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PeerRecord: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EndpointIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOverlay + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOverlay + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EndpointIP = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EndpointMAC", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOverlay + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOverlay + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EndpointMAC = string(data[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TunnelEndpointIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOverlay + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOverlay + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TunnelEndpointIP = string(data[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOverlay(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOverlay + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipOverlay(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowOverlay + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowOverlay + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowOverlay + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthOverlay + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowOverlay + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipOverlay(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthOverlay = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowOverlay = fmt.Errorf("proto: integer overflow") +) + +var fileDescriptorOverlay = []byte{ + // 195 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0xcd, 0x2f, 0x4b, 0x2d, + 0xca, 0x49, 0xac, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x87, 0x72, 0xa5, 0x44, 0xd2, + 0xf3, 0xd3, 0xf3, 0xc1, 0x62, 0xfa, 0x20, 0x16, 0x44, 0x5a, 0x69, 0x2b, 0x23, 0x17, 0x57, 0x40, + 0x6a, 0x6a, 0x51, 0x50, 0x6a, 0x72, 0x7e, 0x51, 0x8a, 0x90, 0x3e, 0x17, 0x77, 0x6a, 0x5e, 0x4a, + 0x41, 0x7e, 0x66, 0x5e, 0x49, 0x7c, 0x66, 0x81, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0xa7, 0x13, 0xdf, + 0xa3, 0x7b, 0xf2, 0x5c, 0xae, 0x50, 0x61, 0xcf, 0x80, 0x20, 0x2e, 0x98, 0x12, 0xcf, 0x02, 0x21, + 0x23, 0x2e, 0x1e, 0xb8, 0x86, 0xdc, 0xc4, 0x64, 0x09, 0x26, 0xb0, 0x0e, 0x7e, 0xa0, 0x0e, 0x6e, + 0x98, 0x0e, 0x5f, 0x47, 0xe7, 0x20, 0xb8, 0xa9, 0xbe, 0x89, 0xc9, 0x42, 0x4e, 0x5c, 0x42, 0x25, + 0xa5, 0x79, 0x79, 0xa9, 0x39, 0xf1, 0xc8, 0x76, 0x31, 0x83, 0x75, 0x8a, 0x00, 0x75, 0x0a, 0x84, + 0x80, 0x65, 0x91, 0x6c, 0x14, 0x28, 0x41, 0x15, 0x29, 0x70, 0x92, 0xb8, 0xf1, 0x50, 0x8e, 0xe1, + 0xc3, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x27, 0x80, 0xf8, 0x02, 0x10, 0x3f, 0x00, 0xe2, + 0x24, 0x36, 0xb0, 0xc7, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbf, 0xd7, 0x7d, 0x7d, 0x08, + 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/docker/libnetwork/drivers/windows/overlay/overlay_windows.go b/vendor/github.com/docker/libnetwork/drivers/windows/overlay/overlay_windows.go new file mode 100644 index 0000000000..65ad62ae0d --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/windows/overlay/overlay_windows.go @@ -0,0 +1,159 @@ +package overlay + +//go:generate protoc -I.:../../Godeps/_workspace/src/github.com/gogo/protobuf --gogo_out=import_path=github.com/docker/libnetwork/drivers/overlay,Mgogoproto/gogo.proto=github.com/gogo/protobuf/gogoproto:. overlay.proto + +import ( + "encoding/json" + "net" + "sync" + + "github.com/Microsoft/hcsshim" + "github.com/docker/libnetwork/datastore" + "github.com/docker/libnetwork/discoverapi" + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" +) + +const ( + networkType = "overlay" + vethPrefix = "veth" + vethLen = 7 + secureOption = "encrypted" +) + +type driver struct { + config map[string]interface{} + networks networkTable + store datastore.DataStore + localStore datastore.DataStore + once sync.Once + joinOnce sync.Once + sync.Mutex +} + +// Init registers a new instance of overlay driver +func Init(dc driverapi.DriverCallback, config map[string]interface{}) error { + c := driverapi.Capability{ + DataScope: datastore.GlobalScope, + ConnectivityScope: datastore.GlobalScope, + } + + d := &driver{ + networks: networkTable{}, + config: config, + } + + if data, ok := config[netlabel.GlobalKVClient]; ok { + var err error + dsc, ok := data.(discoverapi.DatastoreConfigData) + if !ok { + return types.InternalErrorf("incorrect data in datastore configuration: %v", data) + } + d.store, err = datastore.NewDataStoreFromConfig(dsc) + if err != nil { + return types.InternalErrorf("failed to initialize data store: %v", err) + } + } + + if data, ok := config[netlabel.LocalKVClient]; ok { + var err error + dsc, ok := data.(discoverapi.DatastoreConfigData) + if !ok { + return types.InternalErrorf("incorrect data in datastore configuration: %v", data) + } + d.localStore, err = datastore.NewDataStoreFromConfig(dsc) + if err != nil { + return types.InternalErrorf("failed to initialize local data store: %v", err) + } + } + + d.restoreHNSNetworks() + + return dc.RegisterDriver(networkType, d, c) +} + +func (d *driver) restoreHNSNetworks() error { + logrus.Infof("Restoring existing overlay networks from HNS into docker") + + hnsresponse, err := hcsshim.HNSListNetworkRequest("GET", "", "") + if err != nil { + return err + } + + for _, v := range hnsresponse { + if v.Type != networkType { + continue + } + + logrus.Infof("Restoring overlay network: %s", v.Name) + n := d.convertToOverlayNetwork(&v) + d.addNetwork(n) + + // + // We assume that any network will be recreated on daemon restart + // and therefore don't restore hns endpoints for now + // + //n.restoreNetworkEndpoints() + } + + return nil +} + +func (d *driver) convertToOverlayNetwork(v *hcsshim.HNSNetwork) *network { + n := &network{ + id: v.Name, + hnsID: v.Id, + driver: d, + endpoints: endpointTable{}, + subnets: []*subnet{}, + providerAddress: v.ManagementIP, + } + + for _, hnsSubnet := range v.Subnets { + vsidPolicy := &hcsshim.VsidPolicy{} + for _, policy := range hnsSubnet.Policies { + if err := json.Unmarshal([]byte(policy), &vsidPolicy); err == nil && vsidPolicy.Type == "VSID" { + break + } + } + + gwIP := net.ParseIP(hnsSubnet.GatewayAddress) + localsubnet := &subnet{ + vni: uint32(vsidPolicy.VSID), + gwIP: &gwIP, + } + + _, subnetIP, err := net.ParseCIDR(hnsSubnet.AddressPrefix) + + if err != nil { + logrus.Errorf("Error parsing subnet address %s ", hnsSubnet.AddressPrefix) + continue + } + + localsubnet.subnetIP = subnetIP + + n.subnets = append(n.subnets, localsubnet) + } + + return n +} + +func (d *driver) Type() string { + return networkType +} + +func (d *driver) IsBuiltIn() bool { + return true +} + +// DiscoverNew is a notification for a new discovery event, such as a new node joining a cluster +func (d *driver) DiscoverNew(dType discoverapi.DiscoveryType, data interface{}) error { + return types.NotImplementedErrorf("not implemented") +} + +// DiscoverDelete is a notification for a discovery delete event, such as a node leaving a cluster +func (d *driver) DiscoverDelete(dType discoverapi.DiscoveryType, data interface{}) error { + return types.NotImplementedErrorf("not implemented") +} diff --git a/vendor/github.com/docker/libnetwork/drivers/windows/overlay/peerdb_windows.go b/vendor/github.com/docker/libnetwork/drivers/windows/overlay/peerdb_windows.go new file mode 100644 index 0000000000..159bfd6ed1 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/windows/overlay/peerdb_windows.go @@ -0,0 +1,120 @@ +package overlay + +import ( + "fmt" + "net" + + "encoding/json" + + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" + + "github.com/Microsoft/hcsshim" +) + +const ovPeerTable = "overlay_peer_table" + +func (d *driver) peerAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask, + peerMac net.HardwareAddr, vtep net.IP, updateDb bool) error { + + logrus.Debugf("WINOVERLAY: Enter peerAdd for ca ip %s with ca mac %s", peerIP.String(), peerMac.String()) + + if err := validateID(nid, eid); err != nil { + return err + } + + n := d.network(nid) + if n == nil { + return nil + } + + if updateDb { + logrus.Info("WINOVERLAY: peerAdd: notifying HNS of the REMOTE endpoint") + + hnsEndpoint := &hcsshim.HNSEndpoint{ + Name: eid, + VirtualNetwork: n.hnsID, + MacAddress: peerMac.String(), + IPAddress: peerIP, + IsRemoteEndpoint: true, + } + + paPolicy, err := json.Marshal(hcsshim.PaPolicy{ + Type: "PA", + PA: vtep.String(), + }) + + if err != nil { + return err + } + + hnsEndpoint.Policies = append(hnsEndpoint.Policies, paPolicy) + + configurationb, err := json.Marshal(hnsEndpoint) + if err != nil { + return err + } + + // Temp: We have to create an endpoint object to keep track of the HNS ID for + // this endpoint so that we can retrieve it later when the endpoint is deleted. + // This seems unnecessary when we already have dockers EID. See if we can pass + // the global EID to HNS to use as it's ID, rather than having each HNS assign + // it's own local ID for the endpoint + + addr, err := types.ParseCIDR(peerIP.String() + "/32") + if err != nil { + return err + } + + n.removeEndpointWithAddress(addr) + + hnsresponse, err := hcsshim.HNSEndpointRequest("POST", "", string(configurationb)) + if err != nil { + return err + } + + ep := &endpoint{ + id: eid, + nid: nid, + addr: addr, + mac: peerMac, + profileID: hnsresponse.Id, + remote: true, + } + + n.addEndpoint(ep) + } + + return nil +} + +func (d *driver) peerDelete(nid, eid string, peerIP net.IP, peerIPMask net.IPMask, + peerMac net.HardwareAddr, vtep net.IP, updateDb bool) error { + + logrus.Infof("WINOVERLAY: Enter peerDelete for endpoint %s and peer ip %s", eid, peerIP.String()) + + if err := validateID(nid, eid); err != nil { + return err + } + + n := d.network(nid) + if n == nil { + return nil + } + + ep := n.endpoint(eid) + if ep == nil { + return fmt.Errorf("could not find endpoint with id %s", eid) + } + + if updateDb { + _, err := hcsshim.HNSEndpointRequest("DELETE", ep.profileID, "") + if err != nil { + return err + } + + n.deleteEndpoint(eid) + } + + return nil +} diff --git a/vendor/github.com/docker/libnetwork/drivers/windows/windows.go b/vendor/github.com/docker/libnetwork/drivers/windows/windows.go new file mode 100644 index 0000000000..5927fd8560 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/windows/windows.go @@ -0,0 +1,867 @@ +// +build windows + +// Shim for the Host Network Service (HNS) to manage networking for +// Windows Server containers and Hyper-V containers. This module +// is a basic libnetwork driver that passes all the calls to HNS +// It implements the 4 networking modes supported by HNS L2Bridge, +// L2Tunnel, NAT and Transparent(DHCP) +// +// The network are stored in memory and docker daemon ensures discovering +// and loading these networks on startup + +package windows + +import ( + "encoding/json" + "fmt" + "net" + "strconv" + "strings" + "sync" + + "github.com/Microsoft/hcsshim" + "github.com/docker/docker/pkg/system" + "github.com/docker/libnetwork/datastore" + "github.com/docker/libnetwork/discoverapi" + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" +) + +// networkConfiguration for network specific configuration +type networkConfiguration struct { + ID string + Type string + Name string + HnsID string + RDID string + VLAN uint + VSID uint + DNSServers string + MacPools []hcsshim.MacPool + DNSSuffix string + SourceMac string + NetworkAdapterName string + dbIndex uint64 + dbExists bool + DisableGatewayDNS bool + EnableOutboundNat bool + OutboundNatExceptions []string +} + +// endpointConfiguration represents the user specified configuration for the sandbox endpoint +type endpointOption struct { + MacAddress net.HardwareAddr + QosPolicies []types.QosPolicy + DNSServers []string + DisableDNS bool + DisableICC bool +} + +// EndpointConnectivity stores the port bindings and exposed ports that the user has specified in epOptions. +type EndpointConnectivity struct { + PortBindings []types.PortBinding + ExposedPorts []types.TransportPort +} + +type hnsEndpoint struct { + id string + nid string + profileID string + Type string + //Note: Currently, the sandboxID is the same as the containerID since windows does + //not expose the sandboxID. + //In the future, windows will support a proper sandboxID that is different + //than the containerID. + //Therefore, we are using sandboxID now, so that we won't have to change this code + //when windows properly supports a sandboxID. + sandboxID string + macAddress net.HardwareAddr + epOption *endpointOption // User specified parameters + epConnectivity *EndpointConnectivity // User specified parameters + portMapping []types.PortBinding // Operation port bindings + addr *net.IPNet + gateway net.IP + dbIndex uint64 + dbExists bool +} + +type hnsNetwork struct { + id string + created bool + config *networkConfiguration + endpoints map[string]*hnsEndpoint // key: endpoint id + driver *driver // The network's driver + sync.Mutex +} + +type driver struct { + name string + networks map[string]*hnsNetwork + store datastore.DataStore + sync.Mutex +} + +const ( + errNotFound = "HNS failed with error : The object identifier does not represent a valid object. " +) + +// IsBuiltinLocalDriver validates if network-type is a builtin local-scoped driver +func IsBuiltinLocalDriver(networkType string) bool { + if "l2bridge" == networkType || "l2tunnel" == networkType || "nat" == networkType || "ics" == networkType || "transparent" == networkType { + return true + } + + return false +} + +// New constructs a new bridge driver +func newDriver(networkType string) *driver { + return &driver{name: networkType, networks: map[string]*hnsNetwork{}} +} + +// GetInit returns an initializer for the given network type +func GetInit(networkType string) func(dc driverapi.DriverCallback, config map[string]interface{}) error { + return func(dc driverapi.DriverCallback, config map[string]interface{}) error { + if !IsBuiltinLocalDriver(networkType) { + return types.BadRequestErrorf("Network type not supported: %s", networkType) + } + + d := newDriver(networkType) + + err := d.initStore(config) + if err != nil { + return err + } + + return dc.RegisterDriver(networkType, d, driverapi.Capability{ + DataScope: datastore.LocalScope, + ConnectivityScope: datastore.LocalScope, + }) + } +} + +func (d *driver) getNetwork(id string) (*hnsNetwork, error) { + d.Lock() + defer d.Unlock() + + if nw, ok := d.networks[id]; ok { + return nw, nil + } + + return nil, types.NotFoundErrorf("network not found: %s", id) +} + +func (n *hnsNetwork) getEndpoint(eid string) (*hnsEndpoint, error) { + n.Lock() + defer n.Unlock() + + if ep, ok := n.endpoints[eid]; ok { + return ep, nil + } + + return nil, types.NotFoundErrorf("Endpoint not found: %s", eid) +} + +func (d *driver) parseNetworkOptions(id string, genericOptions map[string]string) (*networkConfiguration, error) { + config := &networkConfiguration{Type: d.name} + + for label, value := range genericOptions { + switch label { + case NetworkName: + config.Name = value + case HNSID: + config.HnsID = value + case RoutingDomain: + config.RDID = value + case Interface: + config.NetworkAdapterName = value + case DNSSuffix: + config.DNSSuffix = value + case DNSServers: + config.DNSServers = value + case DisableGatewayDNS: + b, err := strconv.ParseBool(value) + if err != nil { + return nil, err + } + config.DisableGatewayDNS = b + case MacPool: + config.MacPools = make([]hcsshim.MacPool, 0) + s := strings.Split(value, ",") + if len(s)%2 != 0 { + return nil, types.BadRequestErrorf("Invalid mac pool. You must specify both a start range and an end range") + } + for i := 0; i < len(s)-1; i += 2 { + config.MacPools = append(config.MacPools, hcsshim.MacPool{ + StartMacAddress: s[i], + EndMacAddress: s[i+1], + }) + } + case VLAN: + vlan, err := strconv.ParseUint(value, 10, 32) + if err != nil { + return nil, err + } + config.VLAN = uint(vlan) + case VSID: + vsid, err := strconv.ParseUint(value, 10, 32) + if err != nil { + return nil, err + } + config.VSID = uint(vsid) + case EnableOutboundNat: + if system.GetOSVersion().Build <= 16236 { + return nil, fmt.Errorf("Invalid network option. OutboundNat is not supported on this OS version") + } + b, err := strconv.ParseBool(value) + if err != nil { + return nil, err + } + config.EnableOutboundNat = b + case OutboundNatExceptions: + s := strings.Split(value, ",") + config.OutboundNatExceptions = s + } + } + + config.ID = id + config.Type = d.name + return config, nil +} + +func (c *networkConfiguration) processIPAM(id string, ipamV4Data, ipamV6Data []driverapi.IPAMData) error { + if len(ipamV6Data) > 0 { + return types.ForbiddenErrorf("windowsshim driver doesn't support v6 subnets") + } + + if len(ipamV4Data) == 0 { + return types.BadRequestErrorf("network %s requires ipv4 configuration", id) + } + + return nil +} + +func (d *driver) EventNotify(etype driverapi.EventType, nid, tableName, key string, value []byte) { +} + +func (d *driver) DecodeTableEntry(tablename string, key string, value []byte) (string, map[string]string) { + return "", nil +} + +func (d *driver) createNetwork(config *networkConfiguration) error { + network := &hnsNetwork{ + id: config.ID, + endpoints: make(map[string]*hnsEndpoint), + config: config, + driver: d, + } + + d.Lock() + d.networks[config.ID] = network + d.Unlock() + + return nil +} + +// Create a new network +func (d *driver) CreateNetwork(id string, option map[string]interface{}, nInfo driverapi.NetworkInfo, ipV4Data, ipV6Data []driverapi.IPAMData) error { + if _, err := d.getNetwork(id); err == nil { + return types.ForbiddenErrorf("network %s exists", id) + } + + genData, ok := option[netlabel.GenericData].(map[string]string) + if !ok { + return fmt.Errorf("Unknown generic data option") + } + + // Parse and validate the config. It should not conflict with existing networks' config + config, err := d.parseNetworkOptions(id, genData) + if err != nil { + return err + } + + err = config.processIPAM(id, ipV4Data, ipV6Data) + if err != nil { + return err + } + + err = d.createNetwork(config) + + if err != nil { + return err + } + + // A non blank hnsid indicates that the network was discovered + // from HNS. No need to call HNS if this network was discovered + // from HNS + if config.HnsID == "" { + subnets := []hcsshim.Subnet{} + + for _, ipData := range ipV4Data { + subnet := hcsshim.Subnet{ + AddressPrefix: ipData.Pool.String(), + } + + if ipData.Gateway != nil { + subnet.GatewayAddress = ipData.Gateway.IP.String() + } + + subnets = append(subnets, subnet) + } + + network := &hcsshim.HNSNetwork{ + Name: config.Name, + Type: d.name, + Subnets: subnets, + DNSServerList: config.DNSServers, + DNSSuffix: config.DNSSuffix, + MacPools: config.MacPools, + SourceMac: config.SourceMac, + NetworkAdapterName: config.NetworkAdapterName, + } + + if config.VLAN != 0 { + vlanPolicy, err := json.Marshal(hcsshim.VlanPolicy{ + Type: "VLAN", + VLAN: config.VLAN, + }) + + if err != nil { + return err + } + network.Policies = append(network.Policies, vlanPolicy) + } + + if config.VSID != 0 { + vsidPolicy, err := json.Marshal(hcsshim.VsidPolicy{ + Type: "VSID", + VSID: config.VSID, + }) + + if err != nil { + return err + } + network.Policies = append(network.Policies, vsidPolicy) + } + + if network.Name == "" { + network.Name = id + } + + configurationb, err := json.Marshal(network) + if err != nil { + return err + } + + configuration := string(configurationb) + logrus.Debugf("HNSNetwork Request =%v Address Space=%v", configuration, subnets) + + hnsresponse, err := hcsshim.HNSNetworkRequest("POST", "", configuration) + if err != nil { + return err + } + + config.HnsID = hnsresponse.Id + genData[HNSID] = config.HnsID + + } else { + // Delete any stale HNS endpoints for this network. + if endpoints, err := hcsshim.HNSListEndpointRequest(); err == nil { + for _, ep := range endpoints { + if ep.VirtualNetwork == config.HnsID { + logrus.Infof("Removing stale HNS endpoint %s", ep.Id) + _, err = hcsshim.HNSEndpointRequest("DELETE", ep.Id, "") + if err != nil { + logrus.Warnf("Error removing HNS endpoint %s", ep.Id) + } + } + } + } else { + logrus.Warnf("Error listing HNS endpoints for network %s", config.HnsID) + } + } + + n, err := d.getNetwork(id) + if err != nil { + return err + } + n.created = true + return d.storeUpdate(config) +} + +func (d *driver) DeleteNetwork(nid string) error { + n, err := d.getNetwork(nid) + if err != nil { + return types.InternalMaskableErrorf("%s", err) + } + + n.Lock() + config := n.config + n.Unlock() + + if n.created { + _, err = hcsshim.HNSNetworkRequest("DELETE", config.HnsID, "") + if err != nil && err.Error() != errNotFound { + return types.ForbiddenErrorf(err.Error()) + } + } + + d.Lock() + delete(d.networks, nid) + d.Unlock() + + // delele endpoints belong to this network + for _, ep := range n.endpoints { + if err := d.storeDelete(ep); err != nil { + logrus.Warnf("Failed to remove bridge endpoint %s from store: %v", ep.id[0:7], err) + } + } + + return d.storeDelete(config) +} + +func convertQosPolicies(qosPolicies []types.QosPolicy) ([]json.RawMessage, error) { + var qps []json.RawMessage + + // Enumerate through the qos policies specified by the user and convert + // them into the internal structure matching the JSON blob that can be + // understood by the HCS. + for _, elem := range qosPolicies { + encodedPolicy, err := json.Marshal(hcsshim.QosPolicy{ + Type: "QOS", + MaximumOutgoingBandwidthInBytes: elem.MaxEgressBandwidth, + }) + + if err != nil { + return nil, err + } + qps = append(qps, encodedPolicy) + } + return qps, nil +} + +// ConvertPortBindings converts PortBindings to JSON for HNS request +func ConvertPortBindings(portBindings []types.PortBinding) ([]json.RawMessage, error) { + var pbs []json.RawMessage + + // Enumerate through the port bindings specified by the user and convert + // them into the internal structure matching the JSON blob that can be + // understood by the HCS. + for _, elem := range portBindings { + proto := strings.ToUpper(elem.Proto.String()) + if proto != "TCP" && proto != "UDP" { + return nil, fmt.Errorf("invalid protocol %s", elem.Proto.String()) + } + + if elem.HostPort != elem.HostPortEnd { + return nil, fmt.Errorf("Windows does not support more than one host port in NAT settings") + } + + if len(elem.HostIP) != 0 { + return nil, fmt.Errorf("Windows does not support host IP addresses in NAT settings") + } + + encodedPolicy, err := json.Marshal(hcsshim.NatPolicy{ + Type: "NAT", + ExternalPort: elem.HostPort, + InternalPort: elem.Port, + Protocol: elem.Proto.String(), + }) + + if err != nil { + return nil, err + } + pbs = append(pbs, encodedPolicy) + } + return pbs, nil +} + +// ParsePortBindingPolicies parses HNS endpoint response message to PortBindings +func ParsePortBindingPolicies(policies []json.RawMessage) ([]types.PortBinding, error) { + var bindings []types.PortBinding + hcsPolicy := &hcsshim.NatPolicy{} + + for _, elem := range policies { + + if err := json.Unmarshal([]byte(elem), &hcsPolicy); err != nil || hcsPolicy.Type != "NAT" { + continue + } + + binding := types.PortBinding{ + HostPort: hcsPolicy.ExternalPort, + HostPortEnd: hcsPolicy.ExternalPort, + Port: hcsPolicy.InternalPort, + Proto: types.ParseProtocol(hcsPolicy.Protocol), + HostIP: net.IPv4(0, 0, 0, 0), + } + + bindings = append(bindings, binding) + } + + return bindings, nil +} + +func parseEndpointOptions(epOptions map[string]interface{}) (*endpointOption, error) { + if epOptions == nil { + return nil, nil + } + + ec := &endpointOption{} + + if opt, ok := epOptions[netlabel.MacAddress]; ok { + if mac, ok := opt.(net.HardwareAddr); ok { + ec.MacAddress = mac + } else { + return nil, fmt.Errorf("Invalid endpoint configuration") + } + } + + if opt, ok := epOptions[QosPolicies]; ok { + if policies, ok := opt.([]types.QosPolicy); ok { + ec.QosPolicies = policies + } else { + return nil, fmt.Errorf("Invalid endpoint configuration") + } + } + + if opt, ok := epOptions[netlabel.DNSServers]; ok { + if dns, ok := opt.([]string); ok { + ec.DNSServers = dns + } else { + return nil, fmt.Errorf("Invalid endpoint configuration") + } + } + + if opt, ok := epOptions[DisableICC]; ok { + if disableICC, ok := opt.(bool); ok { + ec.DisableICC = disableICC + } else { + return nil, fmt.Errorf("Invalid endpoint configuration") + } + } + + if opt, ok := epOptions[DisableDNS]; ok { + if disableDNS, ok := opt.(bool); ok { + ec.DisableDNS = disableDNS + } else { + return nil, fmt.Errorf("Invalid endpoint configuration") + } + } + + return ec, nil +} + +// ParseEndpointConnectivity parses options passed to CreateEndpoint, specifically port bindings, and store in a endpointConnectivity object. +func ParseEndpointConnectivity(epOptions map[string]interface{}) (*EndpointConnectivity, error) { + if epOptions == nil { + return nil, nil + } + + ec := &EndpointConnectivity{} + + if opt, ok := epOptions[netlabel.PortMap]; ok { + if bs, ok := opt.([]types.PortBinding); ok { + ec.PortBindings = bs + } else { + return nil, fmt.Errorf("Invalid endpoint configuration") + } + } + + if opt, ok := epOptions[netlabel.ExposedPorts]; ok { + if ports, ok := opt.([]types.TransportPort); ok { + ec.ExposedPorts = ports + } else { + return nil, fmt.Errorf("Invalid endpoint configuration") + } + } + return ec, nil +} + +func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo, epOptions map[string]interface{}) error { + n, err := d.getNetwork(nid) + if err != nil { + return err + } + + // Check if endpoint id is good and retrieve corresponding endpoint + ep, err := n.getEndpoint(eid) + if err == nil && ep != nil { + return driverapi.ErrEndpointExists(eid) + } + + endpointStruct := &hcsshim.HNSEndpoint{ + VirtualNetwork: n.config.HnsID, + } + + epOption, err := parseEndpointOptions(epOptions) + if err != nil { + return err + } + epConnectivity, err := ParseEndpointConnectivity(epOptions) + if err != nil { + return err + } + + macAddress := ifInfo.MacAddress() + // Use the macaddress if it was provided + if macAddress != nil { + endpointStruct.MacAddress = strings.Replace(macAddress.String(), ":", "-", -1) + } + + endpointStruct.Policies, err = ConvertPortBindings(epConnectivity.PortBindings) + if err != nil { + return err + } + + qosPolicies, err := convertQosPolicies(epOption.QosPolicies) + if err != nil { + return err + } + endpointStruct.Policies = append(endpointStruct.Policies, qosPolicies...) + + if ifInfo.Address() != nil { + endpointStruct.IPAddress = ifInfo.Address().IP + } + + endpointStruct.DNSServerList = strings.Join(epOption.DNSServers, ",") + + // overwrite the ep DisableDNS option if DisableGatewayDNS was set to true during the network creation option + if n.config.DisableGatewayDNS { + logrus.Debugf("n.config.DisableGatewayDNS[%v] overwrites epOption.DisableDNS[%v]", n.config.DisableGatewayDNS, epOption.DisableDNS) + epOption.DisableDNS = n.config.DisableGatewayDNS + } + + if n.driver.name == "nat" && !epOption.DisableDNS { + logrus.Debugf("endpointStruct.EnableInternalDNS =[%v]", endpointStruct.EnableInternalDNS) + endpointStruct.EnableInternalDNS = true + } + + endpointStruct.DisableICC = epOption.DisableICC + + // Inherit OutboundNat policy from the network + if n.config.EnableOutboundNat { + outboundNatPolicy, err := json.Marshal(hcsshim.OutboundNatPolicy{ + Policy: hcsshim.Policy{Type: hcsshim.OutboundNat}, + Exceptions: n.config.OutboundNatExceptions, + }) + + if err != nil { + return err + } + endpointStruct.Policies = append(endpointStruct.Policies, outboundNatPolicy) + } + + configurationb, err := json.Marshal(endpointStruct) + if err != nil { + return err + } + + hnsresponse, err := hcsshim.HNSEndpointRequest("POST", "", string(configurationb)) + if err != nil { + return err + } + + mac, err := net.ParseMAC(hnsresponse.MacAddress) + if err != nil { + return err + } + + // TODO For now the ip mask is not in the info generated by HNS + endpoint := &hnsEndpoint{ + id: eid, + nid: n.id, + Type: d.name, + addr: &net.IPNet{IP: hnsresponse.IPAddress, Mask: hnsresponse.IPAddress.DefaultMask()}, + macAddress: mac, + } + + if hnsresponse.GatewayAddress != "" { + endpoint.gateway = net.ParseIP(hnsresponse.GatewayAddress) + } + + endpoint.profileID = hnsresponse.Id + endpoint.epConnectivity = epConnectivity + endpoint.epOption = epOption + endpoint.portMapping, err = ParsePortBindingPolicies(hnsresponse.Policies) + + if err != nil { + hcsshim.HNSEndpointRequest("DELETE", hnsresponse.Id, "") + return err + } + + n.Lock() + n.endpoints[eid] = endpoint + n.Unlock() + + if ifInfo.Address() == nil { + ifInfo.SetIPAddress(endpoint.addr) + } + + if macAddress == nil { + ifInfo.SetMacAddress(endpoint.macAddress) + } + + if err = d.storeUpdate(endpoint); err != nil { + logrus.Errorf("Failed to save endpoint %s to store: %v", endpoint.id[0:7], err) + } + + return nil +} + +func (d *driver) DeleteEndpoint(nid, eid string) error { + n, err := d.getNetwork(nid) + if err != nil { + return types.InternalMaskableErrorf("%s", err) + } + + ep, err := n.getEndpoint(eid) + if err != nil { + return err + } + + n.Lock() + delete(n.endpoints, eid) + n.Unlock() + + _, err = hcsshim.HNSEndpointRequest("DELETE", ep.profileID, "") + if err != nil && err.Error() != errNotFound { + return err + } + + if err := d.storeDelete(ep); err != nil { + logrus.Warnf("Failed to remove bridge endpoint %s from store: %v", ep.id[0:7], err) + } + return nil +} + +func (d *driver) EndpointOperInfo(nid, eid string) (map[string]interface{}, error) { + network, err := d.getNetwork(nid) + if err != nil { + return nil, err + } + + ep, err := network.getEndpoint(eid) + if err != nil { + return nil, err + } + + data := make(map[string]interface{}, 1) + if network.driver.name == "nat" { + data["AllowUnqualifiedDNSQuery"] = true + } + + data["hnsid"] = ep.profileID + if ep.epConnectivity.ExposedPorts != nil { + // Return a copy of the config data + epc := make([]types.TransportPort, 0, len(ep.epConnectivity.ExposedPorts)) + for _, tp := range ep.epConnectivity.ExposedPorts { + epc = append(epc, tp.GetCopy()) + } + data[netlabel.ExposedPorts] = epc + } + + if ep.portMapping != nil { + // Return a copy of the operational data + pmc := make([]types.PortBinding, 0, len(ep.portMapping)) + for _, pm := range ep.portMapping { + pmc = append(pmc, pm.GetCopy()) + } + data[netlabel.PortMap] = pmc + } + + if len(ep.macAddress) != 0 { + data[netlabel.MacAddress] = ep.macAddress + } + return data, nil +} + +// Join method is invoked when a Sandbox is attached to an endpoint. +func (d *driver) Join(nid, eid string, sboxKey string, jinfo driverapi.JoinInfo, options map[string]interface{}) error { + network, err := d.getNetwork(nid) + if err != nil { + return err + } + + // Ensure that the endpoint exists + endpoint, err := network.getEndpoint(eid) + if err != nil { + return err + } + + err = jinfo.SetGateway(endpoint.gateway) + if err != nil { + return err + } + + endpoint.sandboxID = sboxKey + + err = hcsshim.HotAttachEndpoint(endpoint.sandboxID, endpoint.profileID) + if err != nil { + // If container doesn't exists in hcs, do not throw error for hot add/remove + if err != hcsshim.ErrComputeSystemDoesNotExist { + return err + } + } + + jinfo.DisableGatewayService() + return nil +} + +// Leave method is invoked when a Sandbox detaches from an endpoint. +func (d *driver) Leave(nid, eid string) error { + network, err := d.getNetwork(nid) + if err != nil { + return types.InternalMaskableErrorf("%s", err) + } + + // Ensure that the endpoint exists + endpoint, err := network.getEndpoint(eid) + if err != nil { + return err + } + + err = hcsshim.HotDetachEndpoint(endpoint.sandboxID, endpoint.profileID) + if err != nil { + // If container doesn't exists in hcs, do not throw error for hot add/remove + if err != hcsshim.ErrComputeSystemDoesNotExist { + return err + } + } + return nil +} + +func (d *driver) ProgramExternalConnectivity(nid, eid string, options map[string]interface{}) error { + return nil +} + +func (d *driver) RevokeExternalConnectivity(nid, eid string) error { + return nil +} + +func (d *driver) NetworkAllocate(id string, option map[string]string, ipV4Data, ipV6Data []driverapi.IPAMData) (map[string]string, error) { + return nil, types.NotImplementedErrorf("not implemented") +} + +func (d *driver) NetworkFree(id string) error { + return types.NotImplementedErrorf("not implemented") +} + +func (d *driver) Type() string { + return d.name +} + +func (d *driver) IsBuiltIn() bool { + return true +} + +// DiscoverNew is a notification for a new discovery event, such as a new node joining a cluster +func (d *driver) DiscoverNew(dType discoverapi.DiscoveryType, data interface{}) error { + return nil +} + +// DiscoverDelete is a notification for a discovery delete event, such as a node leaving a cluster +func (d *driver) DiscoverDelete(dType discoverapi.DiscoveryType, data interface{}) error { + return nil +} diff --git a/vendor/github.com/docker/libnetwork/drivers/windows/windows_store.go b/vendor/github.com/docker/libnetwork/drivers/windows/windows_store.go new file mode 100644 index 0000000000..9f8a7b18f5 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers/windows/windows_store.go @@ -0,0 +1,339 @@ +// +build windows + +package windows + +import ( + "encoding/json" + "fmt" + "net" + + "github.com/docker/libnetwork/datastore" + "github.com/docker/libnetwork/discoverapi" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" +) + +const ( + windowsPrefix = "windows" + windowsEndpointPrefix = "windows-endpoint" +) + +func (d *driver) initStore(option map[string]interface{}) error { + if data, ok := option[netlabel.LocalKVClient]; ok { + var err error + dsc, ok := data.(discoverapi.DatastoreConfigData) + if !ok { + return types.InternalErrorf("incorrect data in datastore configuration: %v", data) + } + d.store, err = datastore.NewDataStoreFromConfig(dsc) + if err != nil { + return types.InternalErrorf("windows driver failed to initialize data store: %v", err) + } + + err = d.populateNetworks() + if err != nil { + return err + } + + err = d.populateEndpoints() + if err != nil { + return err + } + } + + return nil +} + +func (d *driver) populateNetworks() error { + kvol, err := d.store.List(datastore.Key(windowsPrefix), &networkConfiguration{Type: d.name}) + if err != nil && err != datastore.ErrKeyNotFound { + return fmt.Errorf("failed to get windows network configurations from store: %v", err) + } + + // It's normal for network configuration state to be empty. Just return. + if err == datastore.ErrKeyNotFound { + return nil + } + + for _, kvo := range kvol { + ncfg := kvo.(*networkConfiguration) + if ncfg.Type != d.name { + continue + } + if err = d.createNetwork(ncfg); err != nil { + logrus.Warnf("could not create windows network for id %s hnsid %s while booting up from persistent state: %v", ncfg.ID, ncfg.HnsID, err) + } + logrus.Debugf("Network %v (%s) restored", d.name, ncfg.ID[0:7]) + } + + return nil +} + +func (d *driver) populateEndpoints() error { + kvol, err := d.store.List(datastore.Key(windowsEndpointPrefix), &hnsEndpoint{Type: d.name}) + if err != nil && err != datastore.ErrKeyNotFound { + return fmt.Errorf("failed to get endpoints from store: %v", err) + } + + if err == datastore.ErrKeyNotFound { + return nil + } + + for _, kvo := range kvol { + ep := kvo.(*hnsEndpoint) + if ep.Type != d.name { + continue + } + n, ok := d.networks[ep.nid] + if !ok { + logrus.Debugf("Network (%s) not found for restored endpoint (%s)", ep.nid[0:7], ep.id[0:7]) + logrus.Debugf("Deleting stale endpoint (%s) from store", ep.id[0:7]) + if err := d.storeDelete(ep); err != nil { + logrus.Debugf("Failed to delete stale endpoint (%s) from store", ep.id[0:7]) + } + continue + } + n.endpoints[ep.id] = ep + logrus.Debugf("Endpoint (%s) restored to network (%s)", ep.id[0:7], ep.nid[0:7]) + } + + return nil +} + +func (d *driver) storeUpdate(kvObject datastore.KVObject) error { + if d.store == nil { + logrus.Warnf("store not initialized. kv object %s is not added to the store", datastore.Key(kvObject.Key()...)) + return nil + } + + if err := d.store.PutObjectAtomic(kvObject); err != nil { + return fmt.Errorf("failed to update store for object type %T: %v", kvObject, err) + } + + return nil +} + +func (d *driver) storeDelete(kvObject datastore.KVObject) error { + if d.store == nil { + logrus.Debugf("store not initialized. kv object %s is not deleted from store", datastore.Key(kvObject.Key()...)) + return nil + } + +retry: + if err := d.store.DeleteObjectAtomic(kvObject); err != nil { + if err == datastore.ErrKeyModified { + if err := d.store.GetObject(datastore.Key(kvObject.Key()...), kvObject); err != nil { + return fmt.Errorf("could not update the kvobject to latest when trying to delete: %v", err) + } + goto retry + } + return err + } + + return nil +} + +func (ncfg *networkConfiguration) MarshalJSON() ([]byte, error) { + nMap := make(map[string]interface{}) + + nMap["ID"] = ncfg.ID + nMap["Type"] = ncfg.Type + nMap["Name"] = ncfg.Name + nMap["HnsID"] = ncfg.HnsID + nMap["VLAN"] = ncfg.VLAN + nMap["VSID"] = ncfg.VSID + nMap["DNSServers"] = ncfg.DNSServers + nMap["DNSSuffix"] = ncfg.DNSSuffix + nMap["SourceMac"] = ncfg.SourceMac + nMap["NetworkAdapterName"] = ncfg.NetworkAdapterName + + return json.Marshal(nMap) +} + +func (ncfg *networkConfiguration) UnmarshalJSON(b []byte) error { + var ( + err error + nMap map[string]interface{} + ) + + if err = json.Unmarshal(b, &nMap); err != nil { + return err + } + + ncfg.ID = nMap["ID"].(string) + ncfg.Type = nMap["Type"].(string) + ncfg.Name = nMap["Name"].(string) + ncfg.HnsID = nMap["HnsID"].(string) + ncfg.VLAN = uint(nMap["VLAN"].(float64)) + ncfg.VSID = uint(nMap["VSID"].(float64)) + ncfg.DNSServers = nMap["DNSServers"].(string) + ncfg.DNSSuffix = nMap["DNSSuffix"].(string) + ncfg.SourceMac = nMap["SourceMac"].(string) + ncfg.NetworkAdapterName = nMap["NetworkAdapterName"].(string) + return nil +} + +func (ncfg *networkConfiguration) Key() []string { + return []string{windowsPrefix + ncfg.Type, ncfg.ID} +} + +func (ncfg *networkConfiguration) KeyPrefix() []string { + return []string{windowsPrefix + ncfg.Type} +} + +func (ncfg *networkConfiguration) Value() []byte { + b, err := json.Marshal(ncfg) + if err != nil { + return nil + } + return b +} + +func (ncfg *networkConfiguration) SetValue(value []byte) error { + return json.Unmarshal(value, ncfg) +} + +func (ncfg *networkConfiguration) Index() uint64 { + return ncfg.dbIndex +} + +func (ncfg *networkConfiguration) SetIndex(index uint64) { + ncfg.dbIndex = index + ncfg.dbExists = true +} + +func (ncfg *networkConfiguration) Exists() bool { + return ncfg.dbExists +} + +func (ncfg *networkConfiguration) Skip() bool { + return false +} + +func (ncfg *networkConfiguration) New() datastore.KVObject { + return &networkConfiguration{Type: ncfg.Type} +} + +func (ncfg *networkConfiguration) CopyTo(o datastore.KVObject) error { + dstNcfg := o.(*networkConfiguration) + *dstNcfg = *ncfg + return nil +} + +func (ncfg *networkConfiguration) DataScope() string { + return datastore.LocalScope +} + +func (ep *hnsEndpoint) MarshalJSON() ([]byte, error) { + epMap := make(map[string]interface{}) + epMap["id"] = ep.id + epMap["nid"] = ep.nid + epMap["Type"] = ep.Type + epMap["profileID"] = ep.profileID + epMap["MacAddress"] = ep.macAddress.String() + if ep.addr.IP != nil { + epMap["Addr"] = ep.addr.String() + } + if ep.gateway != nil { + epMap["gateway"] = ep.gateway.String() + } + epMap["epOption"] = ep.epOption + epMap["epConnectivity"] = ep.epConnectivity + epMap["PortMapping"] = ep.portMapping + + return json.Marshal(epMap) +} + +func (ep *hnsEndpoint) UnmarshalJSON(b []byte) error { + var ( + err error + epMap map[string]interface{} + ) + + if err = json.Unmarshal(b, &epMap); err != nil { + return fmt.Errorf("Failed to unmarshal to endpoint: %v", err) + } + if v, ok := epMap["MacAddress"]; ok { + if ep.macAddress, err = net.ParseMAC(v.(string)); err != nil { + return types.InternalErrorf("failed to decode endpoint MAC address (%s) after json unmarshal: %v", v.(string), err) + } + } + if v, ok := epMap["Addr"]; ok { + if ep.addr, err = types.ParseCIDR(v.(string)); err != nil { + return types.InternalErrorf("failed to decode endpoint IPv4 address (%s) after json unmarshal: %v", v.(string), err) + } + } + if v, ok := epMap["gateway"]; ok { + ep.gateway = net.ParseIP(v.(string)) + } + ep.id = epMap["id"].(string) + ep.Type = epMap["Type"].(string) + ep.nid = epMap["nid"].(string) + ep.profileID = epMap["profileID"].(string) + d, _ := json.Marshal(epMap["epOption"]) + if err := json.Unmarshal(d, &ep.epOption); err != nil { + logrus.Warnf("Failed to decode endpoint container config %v", err) + } + d, _ = json.Marshal(epMap["epConnectivity"]) + if err := json.Unmarshal(d, &ep.epConnectivity); err != nil { + logrus.Warnf("Failed to decode endpoint external connectivity configuration %v", err) + } + d, _ = json.Marshal(epMap["PortMapping"]) + if err := json.Unmarshal(d, &ep.portMapping); err != nil { + logrus.Warnf("Failed to decode endpoint port mapping %v", err) + } + + return nil +} + +func (ep *hnsEndpoint) Key() []string { + return []string{windowsEndpointPrefix + ep.Type, ep.id} +} + +func (ep *hnsEndpoint) KeyPrefix() []string { + return []string{windowsEndpointPrefix + ep.Type} +} + +func (ep *hnsEndpoint) Value() []byte { + b, err := json.Marshal(ep) + if err != nil { + return nil + } + return b +} + +func (ep *hnsEndpoint) SetValue(value []byte) error { + return json.Unmarshal(value, ep) +} + +func (ep *hnsEndpoint) Index() uint64 { + return ep.dbIndex +} + +func (ep *hnsEndpoint) SetIndex(index uint64) { + ep.dbIndex = index + ep.dbExists = true +} + +func (ep *hnsEndpoint) Exists() bool { + return ep.dbExists +} + +func (ep *hnsEndpoint) Skip() bool { + return false +} + +func (ep *hnsEndpoint) New() datastore.KVObject { + return &hnsEndpoint{Type: ep.Type} +} + +func (ep *hnsEndpoint) CopyTo(o datastore.KVObject) error { + dstEp := o.(*hnsEndpoint) + *dstEp = *ep + return nil +} + +func (ep *hnsEndpoint) DataScope() string { + return datastore.LocalScope +} diff --git a/vendor/github.com/docker/libnetwork/drivers_experimental_linux.go b/vendor/github.com/docker/libnetwork/drivers_experimental_linux.go new file mode 100644 index 0000000000..4f540d4a87 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers_experimental_linux.go @@ -0,0 +1,9 @@ +package libnetwork + +import "github.com/docker/libnetwork/drivers/ipvlan" + +func additionalDrivers() []initializer { + return []initializer{ + {ipvlan.Init, "ipvlan"}, + } +} diff --git a/vendor/github.com/docker/libnetwork/drivers_freebsd.go b/vendor/github.com/docker/libnetwork/drivers_freebsd.go new file mode 100644 index 0000000000..d117c25780 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers_freebsd.go @@ -0,0 +1,13 @@ +package libnetwork + +import ( + "github.com/docker/libnetwork/drivers/null" + "github.com/docker/libnetwork/drivers/remote" +) + +func getInitializers(experimental bool) []initializer { + return []initializer{ + {null.Init, "null"}, + {remote.Init, "remote"}, + } +} diff --git a/vendor/github.com/docker/libnetwork/drivers_ipam.go b/vendor/github.com/docker/libnetwork/drivers_ipam.go new file mode 100644 index 0000000000..f47c01c714 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers_ipam.go @@ -0,0 +1,25 @@ +package libnetwork + +import ( + "github.com/docker/libnetwork/drvregistry" + "github.com/docker/libnetwork/ipamapi" + builtinIpam "github.com/docker/libnetwork/ipams/builtin" + nullIpam "github.com/docker/libnetwork/ipams/null" + remoteIpam "github.com/docker/libnetwork/ipams/remote" + "github.com/docker/libnetwork/ipamutils" +) + +func initIPAMDrivers(r *drvregistry.DrvRegistry, lDs, gDs interface{}, addressPool []*ipamutils.NetworkToSplit) error { + builtinIpam.SetDefaultIPAddressPool(addressPool) + for _, fn := range [](func(ipamapi.Callback, interface{}, interface{}) error){ + builtinIpam.Init, + remoteIpam.Init, + nullIpam.Init, + } { + if err := fn(r, lDs, gDs); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/docker/libnetwork/drivers_linux.go b/vendor/github.com/docker/libnetwork/drivers_linux.go new file mode 100644 index 0000000000..c53d516fa6 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers_linux.go @@ -0,0 +1,26 @@ +package libnetwork + +import ( + "github.com/docker/libnetwork/drivers/bridge" + "github.com/docker/libnetwork/drivers/host" + "github.com/docker/libnetwork/drivers/macvlan" + "github.com/docker/libnetwork/drivers/null" + "github.com/docker/libnetwork/drivers/overlay" + "github.com/docker/libnetwork/drivers/remote" +) + +func getInitializers(experimental bool) []initializer { + in := []initializer{ + {bridge.Init, "bridge"}, + {host.Init, "host"}, + {macvlan.Init, "macvlan"}, + {null.Init, "null"}, + {remote.Init, "remote"}, + {overlay.Init, "overlay"}, + } + + if experimental { + in = append(in, additionalDrivers()...) + } + return in +} diff --git a/vendor/github.com/docker/libnetwork/drivers_windows.go b/vendor/github.com/docker/libnetwork/drivers_windows.go new file mode 100644 index 0000000000..a037c16efb --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drivers_windows.go @@ -0,0 +1,21 @@ +package libnetwork + +import ( + "github.com/docker/libnetwork/drivers/null" + "github.com/docker/libnetwork/drivers/remote" + "github.com/docker/libnetwork/drivers/windows" + "github.com/docker/libnetwork/drivers/windows/overlay" +) + +func getInitializers(experimental bool) []initializer { + return []initializer{ + {null.Init, "null"}, + {overlay.Init, "overlay"}, + {remote.Init, "remote"}, + {windows.GetInit("transparent"), "transparent"}, + {windows.GetInit("l2bridge"), "l2bridge"}, + {windows.GetInit("l2tunnel"), "l2tunnel"}, + {windows.GetInit("nat"), "nat"}, + {windows.GetInit("ics"), "ics"}, + } +} diff --git a/vendor/github.com/docker/libnetwork/drvregistry/drvregistry.go b/vendor/github.com/docker/libnetwork/drvregistry/drvregistry.go new file mode 100644 index 0000000000..b3fe9bafcf --- /dev/null +++ b/vendor/github.com/docker/libnetwork/drvregistry/drvregistry.go @@ -0,0 +1,228 @@ +package drvregistry + +import ( + "errors" + "fmt" + "strings" + "sync" + + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/ipamapi" + "github.com/docker/libnetwork/types" +) + +type driverData struct { + driver driverapi.Driver + capability driverapi.Capability +} + +type ipamData struct { + driver ipamapi.Ipam + capability *ipamapi.Capability + // default address spaces are provided by ipam driver at registration time + defaultLocalAddressSpace, defaultGlobalAddressSpace string +} + +type driverTable map[string]*driverData +type ipamTable map[string]*ipamData + +// DrvRegistry holds the registry of all network drivers and IPAM drivers that it knows about. +type DrvRegistry struct { + sync.Mutex + drivers driverTable + ipamDrivers ipamTable + dfn DriverNotifyFunc + ifn IPAMNotifyFunc + pluginGetter plugingetter.PluginGetter +} + +// Functors definition + +// InitFunc defines the driver initialization function signature. +type InitFunc func(driverapi.DriverCallback, map[string]interface{}) error + +// IPAMWalkFunc defines the IPAM driver table walker function signature. +type IPAMWalkFunc func(name string, driver ipamapi.Ipam, cap *ipamapi.Capability) bool + +// DriverWalkFunc defines the network driver table walker function signature. +type DriverWalkFunc func(name string, driver driverapi.Driver, capability driverapi.Capability) bool + +// IPAMNotifyFunc defines the notify function signature when a new IPAM driver gets registered. +type IPAMNotifyFunc func(name string, driver ipamapi.Ipam, cap *ipamapi.Capability) error + +// DriverNotifyFunc defines the notify function signature when a new network driver gets registered. +type DriverNotifyFunc func(name string, driver driverapi.Driver, capability driverapi.Capability) error + +// New retruns a new driver registry handle. +func New(lDs, gDs interface{}, dfn DriverNotifyFunc, ifn IPAMNotifyFunc, pg plugingetter.PluginGetter) (*DrvRegistry, error) { + r := &DrvRegistry{ + drivers: make(driverTable), + ipamDrivers: make(ipamTable), + dfn: dfn, + ifn: ifn, + pluginGetter: pg, + } + + return r, nil +} + +// AddDriver adds a network driver to the registry. +func (r *DrvRegistry) AddDriver(ntype string, fn InitFunc, config map[string]interface{}) error { + return fn(r, config) +} + +// WalkIPAMs walks the IPAM drivers registered in the registry and invokes the passed walk function and each one of them. +func (r *DrvRegistry) WalkIPAMs(ifn IPAMWalkFunc) { + type ipamVal struct { + name string + data *ipamData + } + + r.Lock() + ivl := make([]ipamVal, 0, len(r.ipamDrivers)) + for k, v := range r.ipamDrivers { + ivl = append(ivl, ipamVal{name: k, data: v}) + } + r.Unlock() + + for _, iv := range ivl { + if ifn(iv.name, iv.data.driver, iv.data.capability) { + break + } + } +} + +// WalkDrivers walks the network drivers registered in the registry and invokes the passed walk function and each one of them. +func (r *DrvRegistry) WalkDrivers(dfn DriverWalkFunc) { + type driverVal struct { + name string + data *driverData + } + + r.Lock() + dvl := make([]driverVal, 0, len(r.drivers)) + for k, v := range r.drivers { + dvl = append(dvl, driverVal{name: k, data: v}) + } + r.Unlock() + + for _, dv := range dvl { + if dfn(dv.name, dv.data.driver, dv.data.capability) { + break + } + } +} + +// Driver returns the actual network driver instance and its capability which registered with the passed name. +func (r *DrvRegistry) Driver(name string) (driverapi.Driver, *driverapi.Capability) { + r.Lock() + defer r.Unlock() + + d, ok := r.drivers[name] + if !ok { + return nil, nil + } + + return d.driver, &d.capability +} + +// IPAM returns the actual IPAM driver instance and its capability which registered with the passed name. +func (r *DrvRegistry) IPAM(name string) (ipamapi.Ipam, *ipamapi.Capability) { + r.Lock() + defer r.Unlock() + + i, ok := r.ipamDrivers[name] + if !ok { + return nil, nil + } + + return i.driver, i.capability +} + +// IPAMDefaultAddressSpaces returns the default address space strings for the passed IPAM driver name. +func (r *DrvRegistry) IPAMDefaultAddressSpaces(name string) (string, string, error) { + r.Lock() + defer r.Unlock() + + i, ok := r.ipamDrivers[name] + if !ok { + return "", "", fmt.Errorf("ipam %s not found", name) + } + + return i.defaultLocalAddressSpace, i.defaultGlobalAddressSpace, nil +} + +// GetPluginGetter returns the plugingetter +func (r *DrvRegistry) GetPluginGetter() plugingetter.PluginGetter { + return r.pluginGetter +} + +// RegisterDriver registers the network driver when it gets discovered. +func (r *DrvRegistry) RegisterDriver(ntype string, driver driverapi.Driver, capability driverapi.Capability) error { + if strings.TrimSpace(ntype) == "" { + return errors.New("network type string cannot be empty") + } + + r.Lock() + dd, ok := r.drivers[ntype] + r.Unlock() + + if ok && dd.driver.IsBuiltIn() { + return driverapi.ErrActiveRegistration(ntype) + } + + if r.dfn != nil { + if err := r.dfn(ntype, driver, capability); err != nil { + return err + } + } + + dData := &driverData{driver, capability} + + r.Lock() + r.drivers[ntype] = dData + r.Unlock() + + return nil +} + +func (r *DrvRegistry) registerIpamDriver(name string, driver ipamapi.Ipam, caps *ipamapi.Capability) error { + if strings.TrimSpace(name) == "" { + return errors.New("ipam driver name string cannot be empty") + } + + r.Lock() + dd, ok := r.ipamDrivers[name] + r.Unlock() + if ok && dd.driver.IsBuiltIn() { + return types.ForbiddenErrorf("ipam driver %q already registered", name) + } + + locAS, glbAS, err := driver.GetDefaultAddressSpaces() + if err != nil { + return types.InternalErrorf("ipam driver %q failed to return default address spaces: %v", name, err) + } + + if r.ifn != nil { + if err := r.ifn(name, driver, caps); err != nil { + return err + } + } + + r.Lock() + r.ipamDrivers[name] = &ipamData{driver: driver, defaultLocalAddressSpace: locAS, defaultGlobalAddressSpace: glbAS, capability: caps} + r.Unlock() + + return nil +} + +// RegisterIpamDriver registers the IPAM driver discovered with default capabilities. +func (r *DrvRegistry) RegisterIpamDriver(name string, driver ipamapi.Ipam) error { + return r.registerIpamDriver(name, driver, &ipamapi.Capability{}) +} + +// RegisterIpamDriverWithCapabilities registers the IPAM driver discovered with specified capabilities. +func (r *DrvRegistry) RegisterIpamDriverWithCapabilities(name string, driver ipamapi.Ipam, caps *ipamapi.Capability) error { + return r.registerIpamDriver(name, driver, caps) +} diff --git a/vendor/github.com/docker/libnetwork/endpoint.go b/vendor/github.com/docker/libnetwork/endpoint.go new file mode 100644 index 0000000000..b2cb9b1a78 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/endpoint.go @@ -0,0 +1,1217 @@ +package libnetwork + +import ( + "container/heap" + "encoding/json" + "fmt" + "net" + "strings" + "sync" + + "github.com/docker/libnetwork/datastore" + "github.com/docker/libnetwork/ipamapi" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/options" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" +) + +// Endpoint represents a logical connection between a network and a sandbox. +type Endpoint interface { + // A system generated id for this endpoint. + ID() string + + // Name returns the name of this endpoint. + Name() string + + // Network returns the name of the network to which this endpoint is attached. + Network() string + + // Join joins the sandbox to the endpoint and populates into the sandbox + // the network resources allocated for the endpoint. + Join(sandbox Sandbox, options ...EndpointOption) error + + // Leave detaches the network resources populated in the sandbox. + Leave(sandbox Sandbox, options ...EndpointOption) error + + // Return certain operational data belonging to this endpoint + Info() EndpointInfo + + // DriverInfo returns a collection of driver operational data related to this endpoint retrieved from the driver + DriverInfo() (map[string]interface{}, error) + + // Delete and detaches this endpoint from the network. + Delete(force bool) error +} + +// EndpointOption is an option setter function type used to pass various options to Network +// and Endpoint interfaces methods. The various setter functions of type EndpointOption are +// provided by libnetwork, they look like Option[...](...) +type EndpointOption func(ep *endpoint) + +type endpoint struct { + name string + id string + network *network + iface *endpointInterface + joinInfo *endpointJoinInfo + sandboxID string + locator string + exposedPorts []types.TransportPort + anonymous bool + disableResolution bool + generic map[string]interface{} + joinLeaveDone chan struct{} + prefAddress net.IP + prefAddressV6 net.IP + ipamOptions map[string]string + aliases map[string]string + myAliases []string + svcID string + svcName string + virtualIP net.IP + svcAliases []string + ingressPorts []*PortConfig + dbIndex uint64 + dbExists bool + serviceEnabled bool + loadBalancer bool + sync.Mutex +} + +func (ep *endpoint) MarshalJSON() ([]byte, error) { + ep.Lock() + defer ep.Unlock() + + epMap := make(map[string]interface{}) + epMap["name"] = ep.name + epMap["id"] = ep.id + epMap["ep_iface"] = ep.iface + epMap["joinInfo"] = ep.joinInfo + epMap["exposed_ports"] = ep.exposedPorts + if ep.generic != nil { + epMap["generic"] = ep.generic + } + epMap["sandbox"] = ep.sandboxID + epMap["locator"] = ep.locator + epMap["anonymous"] = ep.anonymous + epMap["disableResolution"] = ep.disableResolution + epMap["myAliases"] = ep.myAliases + epMap["svcName"] = ep.svcName + epMap["svcID"] = ep.svcID + epMap["virtualIP"] = ep.virtualIP.String() + epMap["ingressPorts"] = ep.ingressPorts + epMap["svcAliases"] = ep.svcAliases + epMap["loadBalancer"] = ep.loadBalancer + + return json.Marshal(epMap) +} + +func (ep *endpoint) UnmarshalJSON(b []byte) (err error) { + ep.Lock() + defer ep.Unlock() + + var epMap map[string]interface{} + if err := json.Unmarshal(b, &epMap); err != nil { + return err + } + ep.name = epMap["name"].(string) + ep.id = epMap["id"].(string) + + ib, _ := json.Marshal(epMap["ep_iface"]) + json.Unmarshal(ib, &ep.iface) + + jb, _ := json.Marshal(epMap["joinInfo"]) + json.Unmarshal(jb, &ep.joinInfo) + + tb, _ := json.Marshal(epMap["exposed_ports"]) + var tPorts []types.TransportPort + json.Unmarshal(tb, &tPorts) + ep.exposedPorts = tPorts + + cb, _ := json.Marshal(epMap["sandbox"]) + json.Unmarshal(cb, &ep.sandboxID) + + if v, ok := epMap["generic"]; ok { + ep.generic = v.(map[string]interface{}) + + if opt, ok := ep.generic[netlabel.PortMap]; ok { + pblist := []types.PortBinding{} + + for i := 0; i < len(opt.([]interface{})); i++ { + pb := types.PortBinding{} + tmp := opt.([]interface{})[i].(map[string]interface{}) + + bytes, err := json.Marshal(tmp) + if err != nil { + logrus.Error(err) + break + } + err = json.Unmarshal(bytes, &pb) + if err != nil { + logrus.Error(err) + break + } + pblist = append(pblist, pb) + } + ep.generic[netlabel.PortMap] = pblist + } + + if opt, ok := ep.generic[netlabel.ExposedPorts]; ok { + tplist := []types.TransportPort{} + + for i := 0; i < len(opt.([]interface{})); i++ { + tp := types.TransportPort{} + tmp := opt.([]interface{})[i].(map[string]interface{}) + + bytes, err := json.Marshal(tmp) + if err != nil { + logrus.Error(err) + break + } + err = json.Unmarshal(bytes, &tp) + if err != nil { + logrus.Error(err) + break + } + tplist = append(tplist, tp) + } + ep.generic[netlabel.ExposedPorts] = tplist + + } + } + + if v, ok := epMap["anonymous"]; ok { + ep.anonymous = v.(bool) + } + if v, ok := epMap["disableResolution"]; ok { + ep.disableResolution = v.(bool) + } + if l, ok := epMap["locator"]; ok { + ep.locator = l.(string) + } + + if sn, ok := epMap["svcName"]; ok { + ep.svcName = sn.(string) + } + + if si, ok := epMap["svcID"]; ok { + ep.svcID = si.(string) + } + + if vip, ok := epMap["virtualIP"]; ok { + ep.virtualIP = net.ParseIP(vip.(string)) + } + + if v, ok := epMap["loadBalancer"]; ok { + ep.loadBalancer = v.(bool) + } + + sal, _ := json.Marshal(epMap["svcAliases"]) + var svcAliases []string + json.Unmarshal(sal, &svcAliases) + ep.svcAliases = svcAliases + + pc, _ := json.Marshal(epMap["ingressPorts"]) + var ingressPorts []*PortConfig + json.Unmarshal(pc, &ingressPorts) + ep.ingressPorts = ingressPorts + + ma, _ := json.Marshal(epMap["myAliases"]) + var myAliases []string + json.Unmarshal(ma, &myAliases) + ep.myAliases = myAliases + return nil +} + +func (ep *endpoint) New() datastore.KVObject { + return &endpoint{network: ep.getNetwork()} +} + +func (ep *endpoint) CopyTo(o datastore.KVObject) error { + ep.Lock() + defer ep.Unlock() + + dstEp := o.(*endpoint) + dstEp.name = ep.name + dstEp.id = ep.id + dstEp.sandboxID = ep.sandboxID + dstEp.locator = ep.locator + dstEp.dbIndex = ep.dbIndex + dstEp.dbExists = ep.dbExists + dstEp.anonymous = ep.anonymous + dstEp.disableResolution = ep.disableResolution + dstEp.svcName = ep.svcName + dstEp.svcID = ep.svcID + dstEp.virtualIP = ep.virtualIP + dstEp.loadBalancer = ep.loadBalancer + + dstEp.svcAliases = make([]string, len(ep.svcAliases)) + copy(dstEp.svcAliases, ep.svcAliases) + + dstEp.ingressPorts = make([]*PortConfig, len(ep.ingressPorts)) + copy(dstEp.ingressPorts, ep.ingressPorts) + + if ep.iface != nil { + dstEp.iface = &endpointInterface{} + ep.iface.CopyTo(dstEp.iface) + } + + if ep.joinInfo != nil { + dstEp.joinInfo = &endpointJoinInfo{} + ep.joinInfo.CopyTo(dstEp.joinInfo) + } + + dstEp.exposedPorts = make([]types.TransportPort, len(ep.exposedPorts)) + copy(dstEp.exposedPorts, ep.exposedPorts) + + dstEp.myAliases = make([]string, len(ep.myAliases)) + copy(dstEp.myAliases, ep.myAliases) + + dstEp.generic = options.Generic{} + for k, v := range ep.generic { + dstEp.generic[k] = v + } + + return nil +} + +func (ep *endpoint) ID() string { + ep.Lock() + defer ep.Unlock() + + return ep.id +} + +func (ep *endpoint) Name() string { + ep.Lock() + defer ep.Unlock() + + return ep.name +} + +func (ep *endpoint) MyAliases() []string { + ep.Lock() + defer ep.Unlock() + + return ep.myAliases +} + +func (ep *endpoint) Network() string { + if ep.network == nil { + return "" + } + + return ep.network.name +} + +func (ep *endpoint) isAnonymous() bool { + ep.Lock() + defer ep.Unlock() + return ep.anonymous +} + +// isServiceEnabled check if service is enabled on the endpoint +func (ep *endpoint) isServiceEnabled() bool { + ep.Lock() + defer ep.Unlock() + return ep.serviceEnabled +} + +// enableService sets service enabled on the endpoint +func (ep *endpoint) enableService() { + ep.Lock() + defer ep.Unlock() + ep.serviceEnabled = true +} + +// disableService disables service on the endpoint +func (ep *endpoint) disableService() { + ep.Lock() + defer ep.Unlock() + ep.serviceEnabled = false +} + +func (ep *endpoint) needResolver() bool { + ep.Lock() + defer ep.Unlock() + return !ep.disableResolution +} + +// endpoint Key structure : endpoint/network-id/endpoint-id +func (ep *endpoint) Key() []string { + if ep.network == nil { + return nil + } + + return []string{datastore.EndpointKeyPrefix, ep.network.id, ep.id} +} + +func (ep *endpoint) KeyPrefix() []string { + if ep.network == nil { + return nil + } + + return []string{datastore.EndpointKeyPrefix, ep.network.id} +} + +func (ep *endpoint) networkIDFromKey(key string) (string, error) { + // endpoint Key structure : docker/libnetwork/endpoint/${network-id}/${endpoint-id} + // it's an invalid key if the key doesn't have all the 5 key elements above + keyElements := strings.Split(key, "/") + if !strings.HasPrefix(key, datastore.Key(datastore.EndpointKeyPrefix)) || len(keyElements) < 5 { + return "", fmt.Errorf("invalid endpoint key : %v", key) + } + // network-id is placed at index=3. pls refer to endpoint.Key() method + return strings.Split(key, "/")[3], nil +} + +func (ep *endpoint) Value() []byte { + b, err := json.Marshal(ep) + if err != nil { + return nil + } + return b +} + +func (ep *endpoint) SetValue(value []byte) error { + return json.Unmarshal(value, ep) +} + +func (ep *endpoint) Index() uint64 { + ep.Lock() + defer ep.Unlock() + return ep.dbIndex +} + +func (ep *endpoint) SetIndex(index uint64) { + ep.Lock() + defer ep.Unlock() + ep.dbIndex = index + ep.dbExists = true +} + +func (ep *endpoint) Exists() bool { + ep.Lock() + defer ep.Unlock() + return ep.dbExists +} + +func (ep *endpoint) Skip() bool { + return ep.getNetwork().Skip() +} + +func (ep *endpoint) processOptions(options ...EndpointOption) { + ep.Lock() + defer ep.Unlock() + + for _, opt := range options { + if opt != nil { + opt(ep) + } + } +} + +func (ep *endpoint) getNetwork() *network { + ep.Lock() + defer ep.Unlock() + + return ep.network +} + +func (ep *endpoint) getNetworkFromStore() (*network, error) { + if ep.network == nil { + return nil, fmt.Errorf("invalid network object in endpoint %s", ep.Name()) + } + + return ep.network.getController().getNetworkFromStore(ep.network.id) +} + +func (ep *endpoint) Join(sbox Sandbox, options ...EndpointOption) error { + if sbox == nil { + return types.BadRequestErrorf("endpoint cannot be joined by nil container") + } + + sb, ok := sbox.(*sandbox) + if !ok { + return types.BadRequestErrorf("not a valid Sandbox interface") + } + + sb.joinLeaveStart() + defer sb.joinLeaveEnd() + + return ep.sbJoin(sb, options...) +} + +func (ep *endpoint) sbJoin(sb *sandbox, options ...EndpointOption) (err error) { + n, err := ep.getNetworkFromStore() + if err != nil { + return fmt.Errorf("failed to get network from store during join: %v", err) + } + + ep, err = n.getEndpointFromStore(ep.ID()) + if err != nil { + return fmt.Errorf("failed to get endpoint from store during join: %v", err) + } + + ep.Lock() + if ep.sandboxID != "" { + ep.Unlock() + return types.ForbiddenErrorf("another container is attached to the same network endpoint") + } + ep.network = n + ep.sandboxID = sb.ID() + ep.joinInfo = &endpointJoinInfo{} + epid := ep.id + ep.Unlock() + defer func() { + if err != nil { + ep.Lock() + ep.sandboxID = "" + ep.Unlock() + } + }() + + nid := n.ID() + + ep.processOptions(options...) + + d, err := n.driver(true) + if err != nil { + return fmt.Errorf("failed to get driver during join: %v", err) + } + + err = d.Join(nid, epid, sb.Key(), ep, sb.Labels()) + if err != nil { + return err + } + defer func() { + if err != nil { + if e := d.Leave(nid, epid); e != nil { + logrus.Warnf("driver leave failed while rolling back join: %v", e) + } + } + }() + + // Watch for service records + if !n.getController().isAgent() { + n.getController().watchSvcRecord(ep) + } + + if doUpdateHostsFile(n, sb) { + address := "" + if ip := ep.getFirstInterfaceAddress(); ip != nil { + address = ip.String() + } + if err = sb.updateHostsFile(address); err != nil { + return err + } + } + if err = sb.updateDNS(n.enableIPv6); err != nil { + return err + } + + // Current endpoint providing external connectivity for the sandbox + extEp := sb.getGatewayEndpoint() + + sb.Lock() + heap.Push(&sb.endpoints, ep) + sb.Unlock() + defer func() { + if err != nil { + sb.removeEndpoint(ep) + } + }() + + if err = sb.populateNetworkResources(ep); err != nil { + return err + } + + if err = n.getController().updateToStore(ep); err != nil { + return err + } + + if err = ep.addDriverInfoToCluster(); err != nil { + return err + } + + defer func() { + if err != nil { + if e := ep.deleteDriverInfoFromCluster(); e != nil { + logrus.Errorf("Could not delete endpoint state for endpoint %s from cluster on join failure: %v", ep.Name(), e) + } + } + }() + + if sb.needDefaultGW() && sb.getEndpointInGWNetwork() == nil { + return sb.setupDefaultGW() + } + + moveExtConn := sb.getGatewayEndpoint() != extEp + + if moveExtConn { + if extEp != nil { + logrus.Debugf("Revoking external connectivity on endpoint %s (%s)", extEp.Name(), extEp.ID()) + extN, err := extEp.getNetworkFromStore() + if err != nil { + return fmt.Errorf("failed to get network from store for revoking external connectivity during join: %v", err) + } + extD, err := extN.driver(true) + if err != nil { + return fmt.Errorf("failed to get driver for revoking external connectivity during join: %v", err) + } + if err = extD.RevokeExternalConnectivity(extEp.network.ID(), extEp.ID()); err != nil { + return types.InternalErrorf( + "driver failed revoking external connectivity on endpoint %s (%s): %v", + extEp.Name(), extEp.ID(), err) + } + defer func() { + if err != nil { + if e := extD.ProgramExternalConnectivity(extEp.network.ID(), extEp.ID(), sb.Labels()); e != nil { + logrus.Warnf("Failed to roll-back external connectivity on endpoint %s (%s): %v", + extEp.Name(), extEp.ID(), e) + } + } + }() + } + if !n.internal { + logrus.Debugf("Programming external connectivity on endpoint %s (%s)", ep.Name(), ep.ID()) + if err = d.ProgramExternalConnectivity(n.ID(), ep.ID(), sb.Labels()); err != nil { + return types.InternalErrorf( + "driver failed programming external connectivity on endpoint %s (%s): %v", + ep.Name(), ep.ID(), err) + } + } + + } + + if !sb.needDefaultGW() { + if e := sb.clearDefaultGW(); e != nil { + logrus.Warnf("Failure while disconnecting sandbox %s (%s) from gateway network: %v", + sb.ID(), sb.ContainerID(), e) + } + } + + return nil +} + +func doUpdateHostsFile(n *network, sb *sandbox) bool { + return !n.ingress && n.Name() != libnGWNetwork +} + +func (ep *endpoint) rename(name string) error { + var ( + err error + netWatch *netWatch + ok bool + ) + + n := ep.getNetwork() + if n == nil { + return fmt.Errorf("network not connected for ep %q", ep.name) + } + + c := n.getController() + + sb, ok := ep.getSandbox() + if !ok { + logrus.Warnf("rename for %s aborted, sandbox %s is not anymore present", ep.ID(), ep.sandboxID) + return nil + } + + if c.isAgent() { + if err = ep.deleteServiceInfoFromCluster(sb, true, "rename"); err != nil { + return types.InternalErrorf("Could not delete service state for endpoint %s from cluster on rename: %v", ep.Name(), err) + } + } else { + c.Lock() + netWatch, ok = c.nmap[n.ID()] + c.Unlock() + if !ok { + return fmt.Errorf("watch null for network %q", n.Name()) + } + n.updateSvcRecord(ep, c.getLocalEps(netWatch), false) + } + + oldName := ep.name + oldAnonymous := ep.anonymous + ep.name = name + ep.anonymous = false + + if c.isAgent() { + if err = ep.addServiceInfoToCluster(sb); err != nil { + return types.InternalErrorf("Could not add service state for endpoint %s to cluster on rename: %v", ep.Name(), err) + } + defer func() { + if err != nil { + ep.deleteServiceInfoFromCluster(sb, true, "rename") + ep.name = oldName + ep.anonymous = oldAnonymous + ep.addServiceInfoToCluster(sb) + } + }() + } else { + n.updateSvcRecord(ep, c.getLocalEps(netWatch), true) + defer func() { + if err != nil { + n.updateSvcRecord(ep, c.getLocalEps(netWatch), false) + ep.name = oldName + ep.anonymous = oldAnonymous + n.updateSvcRecord(ep, c.getLocalEps(netWatch), true) + } + }() + } + + // Update the store with the updated name + if err = c.updateToStore(ep); err != nil { + return err + } + // After the name change do a dummy endpoint count update to + // trigger the service record update in the peer nodes + + // Ignore the error because updateStore fail for EpCnt is a + // benign error. Besides there is no meaningful recovery that + // we can do. When the cluster recovers subsequent EpCnt update + // will force the peers to get the correct EP name. + n.getEpCnt().updateStore() + + return err +} + +func (ep *endpoint) hasInterface(iName string) bool { + ep.Lock() + defer ep.Unlock() + + return ep.iface != nil && ep.iface.srcName == iName +} + +func (ep *endpoint) Leave(sbox Sandbox, options ...EndpointOption) error { + if sbox == nil || sbox.ID() == "" || sbox.Key() == "" { + return types.BadRequestErrorf("invalid Sandbox passed to endpoint leave: %v", sbox) + } + + sb, ok := sbox.(*sandbox) + if !ok { + return types.BadRequestErrorf("not a valid Sandbox interface") + } + + sb.joinLeaveStart() + defer sb.joinLeaveEnd() + + return ep.sbLeave(sb, false, options...) +} + +func (ep *endpoint) sbLeave(sb *sandbox, force bool, options ...EndpointOption) error { + n, err := ep.getNetworkFromStore() + if err != nil { + return fmt.Errorf("failed to get network from store during leave: %v", err) + } + + ep, err = n.getEndpointFromStore(ep.ID()) + if err != nil { + return fmt.Errorf("failed to get endpoint from store during leave: %v", err) + } + + ep.Lock() + sid := ep.sandboxID + ep.Unlock() + + if sid == "" { + return types.ForbiddenErrorf("cannot leave endpoint with no attached sandbox") + } + if sid != sb.ID() { + return types.ForbiddenErrorf("unexpected sandbox ID in leave request. Expected %s. Got %s", ep.sandboxID, sb.ID()) + } + + ep.processOptions(options...) + + d, err := n.driver(!force) + if err != nil { + return fmt.Errorf("failed to get driver during endpoint leave: %v", err) + } + + ep.Lock() + ep.sandboxID = "" + ep.network = n + ep.Unlock() + + // Current endpoint providing external connectivity to the sandbox + extEp := sb.getGatewayEndpoint() + moveExtConn := extEp != nil && (extEp.ID() == ep.ID()) + + if d != nil { + if moveExtConn { + logrus.Debugf("Revoking external connectivity on endpoint %s (%s)", ep.Name(), ep.ID()) + if err := d.RevokeExternalConnectivity(n.id, ep.id); err != nil { + logrus.Warnf("driver failed revoking external connectivity on endpoint %s (%s): %v", + ep.Name(), ep.ID(), err) + } + } + + if err := d.Leave(n.id, ep.id); err != nil { + if _, ok := err.(types.MaskableError); !ok { + logrus.Warnf("driver error disconnecting container %s : %v", ep.name, err) + } + } + } + + if ep.svcID != "" { + if err := ep.deleteServiceInfoFromCluster(sb, true, "sbLeave"); err != nil { + logrus.Warnf("Failed to clean up service info on container %s disconnect: %v", ep.name, err) + } + } + + if err := sb.clearNetworkResources(ep); err != nil { + logrus.Warnf("Failed to clean up network resources on container %s disconnect: %v", ep.name, err) + } + + // Update the store about the sandbox detach only after we + // have completed sb.clearNetworkresources above to avoid + // spurious logs when cleaning up the sandbox when the daemon + // ungracefully exits and restarts before completing sandbox + // detach but after store has been updated. + if err := n.getController().updateToStore(ep); err != nil { + return err + } + + if e := ep.deleteDriverInfoFromCluster(); e != nil { + logrus.Errorf("Failed to delete endpoint state for endpoint %s from cluster: %v", ep.Name(), e) + } + + sb.deleteHostsEntries(n.getSvcRecords(ep)) + if !sb.inDelete && sb.needDefaultGW() && sb.getEndpointInGWNetwork() == nil { + return sb.setupDefaultGW() + } + + // New endpoint providing external connectivity for the sandbox + extEp = sb.getGatewayEndpoint() + if moveExtConn && extEp != nil { + logrus.Debugf("Programming external connectivity on endpoint %s (%s)", extEp.Name(), extEp.ID()) + extN, err := extEp.getNetworkFromStore() + if err != nil { + return fmt.Errorf("failed to get network from store for programming external connectivity during leave: %v", err) + } + extD, err := extN.driver(true) + if err != nil { + return fmt.Errorf("failed to get driver for programming external connectivity during leave: %v", err) + } + if err := extD.ProgramExternalConnectivity(extEp.network.ID(), extEp.ID(), sb.Labels()); err != nil { + logrus.Warnf("driver failed programming external connectivity on endpoint %s: (%s) %v", + extEp.Name(), extEp.ID(), err) + } + } + + if !sb.needDefaultGW() { + if err := sb.clearDefaultGW(); err != nil { + logrus.Warnf("Failure while disconnecting sandbox %s (%s) from gateway network: %v", + sb.ID(), sb.ContainerID(), err) + } + } + + return nil +} + +func (ep *endpoint) Delete(force bool) error { + var err error + n, err := ep.getNetworkFromStore() + if err != nil { + return fmt.Errorf("failed to get network during Delete: %v", err) + } + + ep, err = n.getEndpointFromStore(ep.ID()) + if err != nil { + return fmt.Errorf("failed to get endpoint from store during Delete: %v", err) + } + + ep.Lock() + epid := ep.id + name := ep.name + sbid := ep.sandboxID + ep.Unlock() + + sb, _ := n.getController().SandboxByID(sbid) + if sb != nil && !force { + return &ActiveContainerError{name: name, id: epid} + } + + if sb != nil { + if e := ep.sbLeave(sb.(*sandbox), force); e != nil { + logrus.Warnf("failed to leave sandbox for endpoint %s : %v", name, e) + } + } + + if err = n.getController().deleteFromStore(ep); err != nil { + return err + } + + defer func() { + if err != nil && !force { + ep.dbExists = false + if e := n.getController().updateToStore(ep); e != nil { + logrus.Warnf("failed to recreate endpoint in store %s : %v", name, e) + } + } + }() + + // unwatch for service records + n.getController().unWatchSvcRecord(ep) + + if err = ep.deleteEndpoint(force); err != nil && !force { + return err + } + + ep.releaseAddress() + + if err := n.getEpCnt().DecEndpointCnt(); err != nil { + logrus.Warnf("failed to decrement endpoint count for ep %s: %v", ep.ID(), err) + } + + return nil +} + +func (ep *endpoint) deleteEndpoint(force bool) error { + ep.Lock() + n := ep.network + name := ep.name + epid := ep.id + ep.Unlock() + + driver, err := n.driver(!force) + if err != nil { + return fmt.Errorf("failed to delete endpoint: %v", err) + } + + if driver == nil { + return nil + } + + if err := driver.DeleteEndpoint(n.id, epid); err != nil { + if _, ok := err.(types.ForbiddenError); ok { + return err + } + + if _, ok := err.(types.MaskableError); !ok { + logrus.Warnf("driver error deleting endpoint %s : %v", name, err) + } + } + + return nil +} + +func (ep *endpoint) getSandbox() (*sandbox, bool) { + c := ep.network.getController() + ep.Lock() + sid := ep.sandboxID + ep.Unlock() + + c.Lock() + ps, ok := c.sandboxes[sid] + c.Unlock() + + return ps, ok +} + +func (ep *endpoint) getFirstInterfaceAddress() net.IP { + ep.Lock() + defer ep.Unlock() + + if ep.iface.addr != nil { + return ep.iface.addr.IP + } + + return nil +} + +// EndpointOptionGeneric function returns an option setter for a Generic option defined +// in a Dictionary of Key-Value pair +func EndpointOptionGeneric(generic map[string]interface{}) EndpointOption { + return func(ep *endpoint) { + for k, v := range generic { + ep.generic[k] = v + } + } +} + +var ( + linkLocalMask = net.CIDRMask(16, 32) + linkLocalMaskIPv6 = net.CIDRMask(64, 128) +) + +// CreateOptionIpam function returns an option setter for the ipam configuration for this endpoint +func CreateOptionIpam(ipV4, ipV6 net.IP, llIPs []net.IP, ipamOptions map[string]string) EndpointOption { + return func(ep *endpoint) { + ep.prefAddress = ipV4 + ep.prefAddressV6 = ipV6 + if len(llIPs) != 0 { + for _, ip := range llIPs { + nw := &net.IPNet{IP: ip, Mask: linkLocalMask} + if ip.To4() == nil { + nw.Mask = linkLocalMaskIPv6 + } + ep.iface.llAddrs = append(ep.iface.llAddrs, nw) + } + } + ep.ipamOptions = ipamOptions + } +} + +// CreateOptionExposedPorts function returns an option setter for the container exposed +// ports option to be passed to network.CreateEndpoint() method. +func CreateOptionExposedPorts(exposedPorts []types.TransportPort) EndpointOption { + return func(ep *endpoint) { + // Defensive copy + eps := make([]types.TransportPort, len(exposedPorts)) + copy(eps, exposedPorts) + // Store endpoint label and in generic because driver needs it + ep.exposedPorts = eps + ep.generic[netlabel.ExposedPorts] = eps + } +} + +// CreateOptionPortMapping function returns an option setter for the mapping +// ports option to be passed to network.CreateEndpoint() method. +func CreateOptionPortMapping(portBindings []types.PortBinding) EndpointOption { + return func(ep *endpoint) { + // Store a copy of the bindings as generic data to pass to the driver + pbs := make([]types.PortBinding, len(portBindings)) + copy(pbs, portBindings) + ep.generic[netlabel.PortMap] = pbs + } +} + +// CreateOptionDNS function returns an option setter for dns entry option to +// be passed to container Create method. +func CreateOptionDNS(dns []string) EndpointOption { + return func(ep *endpoint) { + ep.generic[netlabel.DNSServers] = dns + } +} + +// CreateOptionAnonymous function returns an option setter for setting +// this endpoint as anonymous +func CreateOptionAnonymous() EndpointOption { + return func(ep *endpoint) { + ep.anonymous = true + } +} + +// CreateOptionDisableResolution function returns an option setter to indicate +// this endpoint doesn't want embedded DNS server functionality +func CreateOptionDisableResolution() EndpointOption { + return func(ep *endpoint) { + ep.disableResolution = true + } +} + +// CreateOptionAlias function returns an option setter for setting endpoint alias +func CreateOptionAlias(name string, alias string) EndpointOption { + return func(ep *endpoint) { + if ep.aliases == nil { + ep.aliases = make(map[string]string) + } + ep.aliases[alias] = name + } +} + +// CreateOptionService function returns an option setter for setting service binding configuration +func CreateOptionService(name, id string, vip net.IP, ingressPorts []*PortConfig, aliases []string) EndpointOption { + return func(ep *endpoint) { + ep.svcName = name + ep.svcID = id + ep.virtualIP = vip + ep.ingressPorts = ingressPorts + ep.svcAliases = aliases + } +} + +// CreateOptionMyAlias function returns an option setter for setting endpoint's self alias +func CreateOptionMyAlias(alias string) EndpointOption { + return func(ep *endpoint) { + ep.myAliases = append(ep.myAliases, alias) + } +} + +// CreateOptionLoadBalancer function returns an option setter for denoting the endpoint is a load balancer for a network +func CreateOptionLoadBalancer() EndpointOption { + return func(ep *endpoint) { + ep.loadBalancer = true + } +} + +// JoinOptionPriority function returns an option setter for priority option to +// be passed to the endpoint.Join() method. +func JoinOptionPriority(ep Endpoint, prio int) EndpointOption { + return func(ep *endpoint) { + // ep lock already acquired + c := ep.network.getController() + c.Lock() + sb, ok := c.sandboxes[ep.sandboxID] + c.Unlock() + if !ok { + logrus.Errorf("Could not set endpoint priority value during Join to endpoint %s: No sandbox id present in endpoint", ep.id) + return + } + sb.epPriority[ep.id] = prio + } +} + +func (ep *endpoint) DataScope() string { + return ep.getNetwork().DataScope() +} + +func (ep *endpoint) assignAddress(ipam ipamapi.Ipam, assignIPv4, assignIPv6 bool) error { + var err error + + n := ep.getNetwork() + if n.hasSpecialDriver() { + return nil + } + + logrus.Debugf("Assigning addresses for endpoint %s's interface on network %s", ep.Name(), n.Name()) + + if assignIPv4 { + if err = ep.assignAddressVersion(4, ipam); err != nil { + return err + } + } + + if assignIPv6 { + err = ep.assignAddressVersion(6, ipam) + } + + return err +} + +func (ep *endpoint) assignAddressVersion(ipVer int, ipam ipamapi.Ipam) error { + var ( + poolID *string + address **net.IPNet + prefAdd net.IP + progAdd net.IP + ) + + n := ep.getNetwork() + switch ipVer { + case 4: + poolID = &ep.iface.v4PoolID + address = &ep.iface.addr + prefAdd = ep.prefAddress + case 6: + poolID = &ep.iface.v6PoolID + address = &ep.iface.addrv6 + prefAdd = ep.prefAddressV6 + default: + return types.InternalErrorf("incorrect ip version number passed: %d", ipVer) + } + + ipInfo := n.getIPInfo(ipVer) + + // ipv6 address is not mandatory + if len(ipInfo) == 0 && ipVer == 6 { + return nil + } + + // The address to program may be chosen by the user or by the network driver in one specific + // case to support backward compatibility with `docker daemon --fixed-cidrv6` use case + if prefAdd != nil { + progAdd = prefAdd + } else if *address != nil { + progAdd = (*address).IP + } + + for _, d := range ipInfo { + if progAdd != nil && !d.Pool.Contains(progAdd) { + continue + } + addr, _, err := ipam.RequestAddress(d.PoolID, progAdd, ep.ipamOptions) + if err == nil { + ep.Lock() + *address = addr + *poolID = d.PoolID + ep.Unlock() + return nil + } + if err != ipamapi.ErrNoAvailableIPs || progAdd != nil { + return err + } + } + if progAdd != nil { + return types.BadRequestErrorf("Invalid address %s: It does not belong to any of this network's subnets", prefAdd) + } + return fmt.Errorf("no available IPv%d addresses on this network's address pools: %s (%s)", ipVer, n.Name(), n.ID()) +} + +func (ep *endpoint) releaseAddress() { + n := ep.getNetwork() + if n.hasSpecialDriver() { + return + } + + logrus.Debugf("Releasing addresses for endpoint %s's interface on network %s", ep.Name(), n.Name()) + + ipam, _, err := n.getController().getIPAMDriver(n.ipamType) + if err != nil { + logrus.Warnf("Failed to retrieve ipam driver to release interface address on delete of endpoint %s (%s): %v", ep.Name(), ep.ID(), err) + return + } + + if ep.iface.addr != nil { + if err := ipam.ReleaseAddress(ep.iface.v4PoolID, ep.iface.addr.IP); err != nil { + logrus.Warnf("Failed to release ip address %s on delete of endpoint %s (%s): %v", ep.iface.addr.IP, ep.Name(), ep.ID(), err) + } + } + + if ep.iface.addrv6 != nil && ep.iface.addrv6.IP.IsGlobalUnicast() { + if err := ipam.ReleaseAddress(ep.iface.v6PoolID, ep.iface.addrv6.IP); err != nil { + logrus.Warnf("Failed to release ip address %s on delete of endpoint %s (%s): %v", ep.iface.addrv6.IP, ep.Name(), ep.ID(), err) + } + } +} + +func (c *controller) cleanupLocalEndpoints() { + // Get used endpoints + eps := make(map[string]interface{}) + for _, sb := range c.sandboxes { + for _, ep := range sb.endpoints { + eps[ep.id] = true + } + } + nl, err := c.getNetworksForScope(datastore.LocalScope) + if err != nil { + logrus.Warnf("Could not get list of networks during endpoint cleanup: %v", err) + return + } + + for _, n := range nl { + if n.ConfigOnly() { + continue + } + epl, err := n.getEndpointsFromStore() + if err != nil { + logrus.Warnf("Could not get list of endpoints in network %s during endpoint cleanup: %v", n.name, err) + continue + } + + for _, ep := range epl { + if _, ok := eps[ep.id]; ok { + continue + } + logrus.Infof("Removing stale endpoint %s (%s)", ep.name, ep.id) + if err := ep.Delete(true); err != nil { + logrus.Warnf("Could not delete local endpoint %s during endpoint cleanup: %v", ep.name, err) + } + } + + epl, err = n.getEndpointsFromStore() + if err != nil { + logrus.Warnf("Could not get list of endpoints in network %s for count update: %v", n.name, err) + continue + } + + epCnt := n.getEpCnt().EndpointCnt() + if epCnt != uint64(len(epl)) { + logrus.Infof("Fixing inconsistent endpoint_cnt for network %s. Expected=%d, Actual=%d", n.name, len(epl), epCnt) + n.getEpCnt().setCnt(uint64(len(epl))) + } + } +} diff --git a/vendor/github.com/docker/libnetwork/endpoint_cnt.go b/vendor/github.com/docker/libnetwork/endpoint_cnt.go new file mode 100644 index 0000000000..7b7527426d --- /dev/null +++ b/vendor/github.com/docker/libnetwork/endpoint_cnt.go @@ -0,0 +1,182 @@ +package libnetwork + +import ( + "encoding/json" + "fmt" + "sync" + + "github.com/docker/libnetwork/datastore" +) + +type endpointCnt struct { + n *network + Count uint64 + dbIndex uint64 + dbExists bool + sync.Mutex +} + +const epCntKeyPrefix = "endpoint_count" + +func (ec *endpointCnt) Key() []string { + ec.Lock() + defer ec.Unlock() + + return []string{epCntKeyPrefix, ec.n.id} +} + +func (ec *endpointCnt) KeyPrefix() []string { + ec.Lock() + defer ec.Unlock() + + return []string{epCntKeyPrefix, ec.n.id} +} + +func (ec *endpointCnt) Value() []byte { + ec.Lock() + defer ec.Unlock() + + b, err := json.Marshal(ec) + if err != nil { + return nil + } + return b +} + +func (ec *endpointCnt) SetValue(value []byte) error { + ec.Lock() + defer ec.Unlock() + + return json.Unmarshal(value, &ec) +} + +func (ec *endpointCnt) Index() uint64 { + ec.Lock() + defer ec.Unlock() + return ec.dbIndex +} + +func (ec *endpointCnt) SetIndex(index uint64) { + ec.Lock() + ec.dbIndex = index + ec.dbExists = true + ec.Unlock() +} + +func (ec *endpointCnt) Exists() bool { + ec.Lock() + defer ec.Unlock() + return ec.dbExists +} + +func (ec *endpointCnt) Skip() bool { + ec.Lock() + defer ec.Unlock() + return !ec.n.persist +} + +func (ec *endpointCnt) New() datastore.KVObject { + ec.Lock() + defer ec.Unlock() + + return &endpointCnt{ + n: ec.n, + } +} + +func (ec *endpointCnt) CopyTo(o datastore.KVObject) error { + ec.Lock() + defer ec.Unlock() + + dstEc := o.(*endpointCnt) + dstEc.n = ec.n + dstEc.Count = ec.Count + dstEc.dbExists = ec.dbExists + dstEc.dbIndex = ec.dbIndex + + return nil +} + +func (ec *endpointCnt) DataScope() string { + return ec.n.DataScope() +} + +func (ec *endpointCnt) EndpointCnt() uint64 { + ec.Lock() + defer ec.Unlock() + + return ec.Count +} + +func (ec *endpointCnt) updateStore() error { + store := ec.n.getController().getStore(ec.DataScope()) + if store == nil { + return fmt.Errorf("store not found for scope %s on endpoint count update", ec.DataScope()) + } + // make a copy of count and n to avoid being overwritten by store.GetObject + count := ec.EndpointCnt() + n := ec.n + for { + if err := ec.n.getController().updateToStore(ec); err == nil || err != datastore.ErrKeyModified { + return err + } + if err := store.GetObject(datastore.Key(ec.Key()...), ec); err != nil { + return fmt.Errorf("could not update the kvobject to latest on endpoint count update: %v", err) + } + ec.Lock() + ec.Count = count + ec.n = n + ec.Unlock() + } +} + +func (ec *endpointCnt) setCnt(cnt uint64) error { + ec.Lock() + ec.Count = cnt + ec.Unlock() + return ec.updateStore() +} + +func (ec *endpointCnt) atomicIncDecEpCnt(inc bool) error { + store := ec.n.getController().getStore(ec.DataScope()) + if store == nil { + return fmt.Errorf("store not found for scope %s", ec.DataScope()) + } + + tmp := &endpointCnt{n: ec.n} + if err := store.GetObject(datastore.Key(ec.Key()...), tmp); err != nil { + return err + } +retry: + ec.Lock() + if inc { + ec.Count++ + } else { + if ec.Count > 0 { + ec.Count-- + } + } + ec.Unlock() + + if err := ec.n.getController().updateToStore(ec); err != nil { + if err == datastore.ErrKeyModified { + if err := store.GetObject(datastore.Key(ec.Key()...), ec); err != nil { + return fmt.Errorf("could not update the kvobject to latest when trying to atomic add endpoint count: %v", err) + } + + goto retry + } + + return err + } + + return nil +} + +func (ec *endpointCnt) IncEndpointCnt() error { + return ec.atomicIncDecEpCnt(true) +} + +func (ec *endpointCnt) DecEndpointCnt() error { + return ec.atomicIncDecEpCnt(false) +} diff --git a/vendor/github.com/docker/libnetwork/endpoint_info.go b/vendor/github.com/docker/libnetwork/endpoint_info.go new file mode 100644 index 0000000000..68d3e8673d --- /dev/null +++ b/vendor/github.com/docker/libnetwork/endpoint_info.go @@ -0,0 +1,452 @@ +package libnetwork + +import ( + "encoding/json" + "fmt" + "net" + + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/types" +) + +// EndpointInfo provides an interface to retrieve network resources bound to the endpoint. +type EndpointInfo interface { + // Iface returns InterfaceInfo, go interface that can be used + // to get more information on the interface which was assigned to + // the endpoint by the driver. This can be used after the + // endpoint has been created. + Iface() InterfaceInfo + + // Gateway returns the IPv4 gateway assigned by the driver. + // This will only return a valid value if a container has joined the endpoint. + Gateway() net.IP + + // GatewayIPv6 returns the IPv6 gateway assigned by the driver. + // This will only return a valid value if a container has joined the endpoint. + GatewayIPv6() net.IP + + // StaticRoutes returns the list of static routes configured by the network + // driver when the container joins a network + StaticRoutes() []*types.StaticRoute + + // Sandbox returns the attached sandbox if there, nil otherwise. + Sandbox() Sandbox + + // LoadBalancer returns whether the endpoint is the load balancer endpoint for the network. + LoadBalancer() bool +} + +// InterfaceInfo provides an interface to retrieve interface addresses bound to the endpoint. +type InterfaceInfo interface { + // MacAddress returns the MAC address assigned to the endpoint. + MacAddress() net.HardwareAddr + + // Address returns the IPv4 address assigned to the endpoint. + Address() *net.IPNet + + // AddressIPv6 returns the IPv6 address assigned to the endpoint. + AddressIPv6() *net.IPNet + + // LinkLocalAddresses returns the list of link-local (IPv4/IPv6) addresses assigned to the endpoint. + LinkLocalAddresses() []*net.IPNet +} + +type endpointInterface struct { + mac net.HardwareAddr + addr *net.IPNet + addrv6 *net.IPNet + llAddrs []*net.IPNet + srcName string + dstPrefix string + routes []*net.IPNet + v4PoolID string + v6PoolID string +} + +func (epi *endpointInterface) MarshalJSON() ([]byte, error) { + epMap := make(map[string]interface{}) + if epi.mac != nil { + epMap["mac"] = epi.mac.String() + } + if epi.addr != nil { + epMap["addr"] = epi.addr.String() + } + if epi.addrv6 != nil { + epMap["addrv6"] = epi.addrv6.String() + } + if len(epi.llAddrs) != 0 { + list := make([]string, 0, len(epi.llAddrs)) + for _, ll := range epi.llAddrs { + list = append(list, ll.String()) + } + epMap["llAddrs"] = list + } + epMap["srcName"] = epi.srcName + epMap["dstPrefix"] = epi.dstPrefix + var routes []string + for _, route := range epi.routes { + routes = append(routes, route.String()) + } + epMap["routes"] = routes + epMap["v4PoolID"] = epi.v4PoolID + epMap["v6PoolID"] = epi.v6PoolID + return json.Marshal(epMap) +} + +func (epi *endpointInterface) UnmarshalJSON(b []byte) error { + var ( + err error + epMap map[string]interface{} + ) + if err = json.Unmarshal(b, &epMap); err != nil { + return err + } + if v, ok := epMap["mac"]; ok { + if epi.mac, err = net.ParseMAC(v.(string)); err != nil { + return types.InternalErrorf("failed to decode endpoint interface mac address after json unmarshal: %s", v.(string)) + } + } + if v, ok := epMap["addr"]; ok { + if epi.addr, err = types.ParseCIDR(v.(string)); err != nil { + return types.InternalErrorf("failed to decode endpoint interface ipv4 address after json unmarshal: %v", err) + } + } + if v, ok := epMap["addrv6"]; ok { + if epi.addrv6, err = types.ParseCIDR(v.(string)); err != nil { + return types.InternalErrorf("failed to decode endpoint interface ipv6 address after json unmarshal: %v", err) + } + } + if v, ok := epMap["llAddrs"]; ok { + list := v.([]interface{}) + epi.llAddrs = make([]*net.IPNet, 0, len(list)) + for _, llS := range list { + ll, err := types.ParseCIDR(llS.(string)) + if err != nil { + return types.InternalErrorf("failed to decode endpoint interface link-local address (%v) after json unmarshal: %v", llS, err) + } + epi.llAddrs = append(epi.llAddrs, ll) + } + } + epi.srcName = epMap["srcName"].(string) + epi.dstPrefix = epMap["dstPrefix"].(string) + + rb, _ := json.Marshal(epMap["routes"]) + var routes []string + json.Unmarshal(rb, &routes) + epi.routes = make([]*net.IPNet, 0) + for _, route := range routes { + ip, ipr, err := net.ParseCIDR(route) + if err == nil { + ipr.IP = ip + epi.routes = append(epi.routes, ipr) + } + } + epi.v4PoolID = epMap["v4PoolID"].(string) + epi.v6PoolID = epMap["v6PoolID"].(string) + + return nil +} + +func (epi *endpointInterface) CopyTo(dstEpi *endpointInterface) error { + dstEpi.mac = types.GetMacCopy(epi.mac) + dstEpi.addr = types.GetIPNetCopy(epi.addr) + dstEpi.addrv6 = types.GetIPNetCopy(epi.addrv6) + dstEpi.srcName = epi.srcName + dstEpi.dstPrefix = epi.dstPrefix + dstEpi.v4PoolID = epi.v4PoolID + dstEpi.v6PoolID = epi.v6PoolID + if len(epi.llAddrs) != 0 { + dstEpi.llAddrs = make([]*net.IPNet, 0, len(epi.llAddrs)) + dstEpi.llAddrs = append(dstEpi.llAddrs, epi.llAddrs...) + } + + for _, route := range epi.routes { + dstEpi.routes = append(dstEpi.routes, types.GetIPNetCopy(route)) + } + + return nil +} + +type endpointJoinInfo struct { + gw net.IP + gw6 net.IP + StaticRoutes []*types.StaticRoute + driverTableEntries []*tableEntry + disableGatewayService bool +} + +type tableEntry struct { + tableName string + key string + value []byte +} + +func (ep *endpoint) Info() EndpointInfo { + if ep.sandboxID != "" { + return ep + } + n, err := ep.getNetworkFromStore() + if err != nil { + return nil + } + + ep, err = n.getEndpointFromStore(ep.ID()) + if err != nil { + return nil + } + + sb, ok := ep.getSandbox() + if !ok { + // endpoint hasn't joined any sandbox. + // Just return the endpoint + return ep + } + + return sb.getEndpoint(ep.ID()) +} + +func (ep *endpoint) Iface() InterfaceInfo { + ep.Lock() + defer ep.Unlock() + + if ep.iface != nil { + return ep.iface + } + + return nil +} + +func (ep *endpoint) Interface() driverapi.InterfaceInfo { + ep.Lock() + defer ep.Unlock() + + if ep.iface != nil { + return ep.iface + } + + return nil +} + +func (epi *endpointInterface) SetMacAddress(mac net.HardwareAddr) error { + if epi.mac != nil { + return types.ForbiddenErrorf("endpoint interface MAC address present (%s). Cannot be modified with %s.", epi.mac, mac) + } + if mac == nil { + return types.BadRequestErrorf("tried to set nil MAC address to endpoint interface") + } + epi.mac = types.GetMacCopy(mac) + return nil +} + +func (epi *endpointInterface) SetIPAddress(address *net.IPNet) error { + if address.IP == nil { + return types.BadRequestErrorf("tried to set nil IP address to endpoint interface") + } + if address.IP.To4() == nil { + return setAddress(&epi.addrv6, address) + } + return setAddress(&epi.addr, address) +} + +func setAddress(ifaceAddr **net.IPNet, address *net.IPNet) error { + if *ifaceAddr != nil { + return types.ForbiddenErrorf("endpoint interface IP present (%s). Cannot be modified with (%s).", *ifaceAddr, address) + } + *ifaceAddr = types.GetIPNetCopy(address) + return nil +} + +func (epi *endpointInterface) MacAddress() net.HardwareAddr { + return types.GetMacCopy(epi.mac) +} + +func (epi *endpointInterface) Address() *net.IPNet { + return types.GetIPNetCopy(epi.addr) +} + +func (epi *endpointInterface) AddressIPv6() *net.IPNet { + return types.GetIPNetCopy(epi.addrv6) +} + +func (epi *endpointInterface) LinkLocalAddresses() []*net.IPNet { + return epi.llAddrs +} + +func (epi *endpointInterface) SetNames(srcName string, dstPrefix string) error { + epi.srcName = srcName + epi.dstPrefix = dstPrefix + return nil +} + +func (ep *endpoint) InterfaceName() driverapi.InterfaceNameInfo { + ep.Lock() + defer ep.Unlock() + + if ep.iface != nil { + return ep.iface + } + + return nil +} + +func (ep *endpoint) AddStaticRoute(destination *net.IPNet, routeType int, nextHop net.IP) error { + ep.Lock() + defer ep.Unlock() + + r := types.StaticRoute{Destination: destination, RouteType: routeType, NextHop: nextHop} + + if routeType == types.NEXTHOP { + // If the route specifies a next-hop, then it's loosely routed (i.e. not bound to a particular interface). + ep.joinInfo.StaticRoutes = append(ep.joinInfo.StaticRoutes, &r) + } else { + // If the route doesn't specify a next-hop, it must be a connected route, bound to an interface. + ep.iface.routes = append(ep.iface.routes, r.Destination) + } + return nil +} + +func (ep *endpoint) AddTableEntry(tableName, key string, value []byte) error { + ep.Lock() + defer ep.Unlock() + + ep.joinInfo.driverTableEntries = append(ep.joinInfo.driverTableEntries, &tableEntry{ + tableName: tableName, + key: key, + value: value, + }) + + return nil +} + +func (ep *endpoint) Sandbox() Sandbox { + cnt, ok := ep.getSandbox() + if !ok { + return nil + } + return cnt +} + +func (ep *endpoint) LoadBalancer() bool { + ep.Lock() + defer ep.Unlock() + return ep.loadBalancer +} + +func (ep *endpoint) StaticRoutes() []*types.StaticRoute { + ep.Lock() + defer ep.Unlock() + + if ep.joinInfo == nil { + return nil + } + + return ep.joinInfo.StaticRoutes +} + +func (ep *endpoint) Gateway() net.IP { + ep.Lock() + defer ep.Unlock() + + if ep.joinInfo == nil { + return net.IP{} + } + + return types.GetIPCopy(ep.joinInfo.gw) +} + +func (ep *endpoint) GatewayIPv6() net.IP { + ep.Lock() + defer ep.Unlock() + + if ep.joinInfo == nil { + return net.IP{} + } + + return types.GetIPCopy(ep.joinInfo.gw6) +} + +func (ep *endpoint) SetGateway(gw net.IP) error { + ep.Lock() + defer ep.Unlock() + + ep.joinInfo.gw = types.GetIPCopy(gw) + return nil +} + +func (ep *endpoint) SetGatewayIPv6(gw6 net.IP) error { + ep.Lock() + defer ep.Unlock() + + ep.joinInfo.gw6 = types.GetIPCopy(gw6) + return nil +} + +func (ep *endpoint) retrieveFromStore() (*endpoint, error) { + n, err := ep.getNetworkFromStore() + if err != nil { + return nil, fmt.Errorf("could not find network in store to get latest endpoint %s: %v", ep.Name(), err) + } + return n.getEndpointFromStore(ep.ID()) +} + +func (ep *endpoint) DisableGatewayService() { + ep.Lock() + defer ep.Unlock() + + ep.joinInfo.disableGatewayService = true +} + +func (epj *endpointJoinInfo) MarshalJSON() ([]byte, error) { + epMap := make(map[string]interface{}) + if epj.gw != nil { + epMap["gw"] = epj.gw.String() + } + if epj.gw6 != nil { + epMap["gw6"] = epj.gw6.String() + } + epMap["disableGatewayService"] = epj.disableGatewayService + epMap["StaticRoutes"] = epj.StaticRoutes + return json.Marshal(epMap) +} + +func (epj *endpointJoinInfo) UnmarshalJSON(b []byte) error { + var ( + err error + epMap map[string]interface{} + ) + if err = json.Unmarshal(b, &epMap); err != nil { + return err + } + if v, ok := epMap["gw"]; ok { + epj.gw = net.ParseIP(v.(string)) + } + if v, ok := epMap["gw6"]; ok { + epj.gw6 = net.ParseIP(v.(string)) + } + epj.disableGatewayService = epMap["disableGatewayService"].(bool) + + var tStaticRoute []types.StaticRoute + if v, ok := epMap["StaticRoutes"]; ok { + tb, _ := json.Marshal(v) + var tStaticRoute []types.StaticRoute + json.Unmarshal(tb, &tStaticRoute) + } + var StaticRoutes []*types.StaticRoute + for _, r := range tStaticRoute { + StaticRoutes = append(StaticRoutes, &r) + } + epj.StaticRoutes = StaticRoutes + + return nil +} + +func (epj *endpointJoinInfo) CopyTo(dstEpj *endpointJoinInfo) error { + dstEpj.disableGatewayService = epj.disableGatewayService + dstEpj.StaticRoutes = make([]*types.StaticRoute, len(epj.StaticRoutes)) + copy(dstEpj.StaticRoutes, epj.StaticRoutes) + dstEpj.driverTableEntries = make([]*tableEntry, len(epj.driverTableEntries)) + copy(dstEpj.driverTableEntries, epj.driverTableEntries) + dstEpj.gw = types.GetIPCopy(epj.gw) + dstEpj.gw6 = types.GetIPCopy(epj.gw6) + return nil +} diff --git a/vendor/github.com/docker/libnetwork/endpoint_info_unix.go b/vendor/github.com/docker/libnetwork/endpoint_info_unix.go new file mode 100644 index 0000000000..f2534f4904 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/endpoint_info_unix.go @@ -0,0 +1,30 @@ +// +build !windows + +package libnetwork + +import "fmt" + +func (ep *endpoint) DriverInfo() (map[string]interface{}, error) { + ep, err := ep.retrieveFromStore() + if err != nil { + return nil, err + } + + if sb, ok := ep.getSandbox(); ok { + if gwep := sb.getEndpointInGWNetwork(); gwep != nil && gwep.ID() != ep.ID() { + return gwep.DriverInfo() + } + } + + n, err := ep.getNetworkFromStore() + if err != nil { + return nil, fmt.Errorf("could not find network in store for driver info: %v", err) + } + + driver, err := n.driver(true) + if err != nil { + return nil, fmt.Errorf("failed to get driver info: %v", err) + } + + return driver.EndpointOperInfo(n.ID(), ep.ID()) +} diff --git a/vendor/github.com/docker/libnetwork/endpoint_info_windows.go b/vendor/github.com/docker/libnetwork/endpoint_info_windows.go new file mode 100644 index 0000000000..93ad8330e9 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/endpoint_info_windows.go @@ -0,0 +1,45 @@ +// +build windows + +package libnetwork + +import "fmt" + +func (ep *endpoint) DriverInfo() (map[string]interface{}, error) { + ep, err := ep.retrieveFromStore() + if err != nil { + return nil, err + } + + var gwDriverInfo map[string]interface{} + if sb, ok := ep.getSandbox(); ok { + if gwep := sb.getEndpointInGWNetwork(); gwep != nil && gwep.ID() != ep.ID() { + + gwDriverInfo, err = gwep.DriverInfo() + if err != nil { + return nil, err + } + } + } + + n, err := ep.getNetworkFromStore() + if err != nil { + return nil, fmt.Errorf("could not find network in store for driver info: %v", err) + } + + driver, err := n.driver(true) + if err != nil { + return nil, fmt.Errorf("failed to get driver info: %v", err) + } + + epInfo, err := driver.EndpointOperInfo(n.ID(), ep.ID()) + if err != nil { + return nil, err + } + + if epInfo != nil { + epInfo["GW_INFO"] = gwDriverInfo + return epInfo, nil + } + + return gwDriverInfo, nil +} diff --git a/vendor/github.com/docker/libnetwork/error.go b/vendor/github.com/docker/libnetwork/error.go new file mode 100644 index 0000000000..5f00709ff9 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/error.go @@ -0,0 +1,193 @@ +package libnetwork + +import ( + "fmt" +) + +// ErrNoSuchNetwork is returned when a network query finds no result +type ErrNoSuchNetwork string + +func (nsn ErrNoSuchNetwork) Error() string { + return fmt.Sprintf("network %s not found", string(nsn)) +} + +// NotFound denotes the type of this error +func (nsn ErrNoSuchNetwork) NotFound() {} + +// ErrNoSuchEndpoint is returned when an endpoint query finds no result +type ErrNoSuchEndpoint string + +func (nse ErrNoSuchEndpoint) Error() string { + return fmt.Sprintf("endpoint %s not found", string(nse)) +} + +// NotFound denotes the type of this error +func (nse ErrNoSuchEndpoint) NotFound() {} + +// ErrInvalidNetworkDriver is returned if an invalid driver +// name is passed. +type ErrInvalidNetworkDriver string + +func (ind ErrInvalidNetworkDriver) Error() string { + return fmt.Sprintf("invalid driver bound to network: %s", string(ind)) +} + +// BadRequest denotes the type of this error +func (ind ErrInvalidNetworkDriver) BadRequest() {} + +// ErrInvalidJoin is returned if a join is attempted on an endpoint +// which already has a container joined. +type ErrInvalidJoin struct{} + +func (ij ErrInvalidJoin) Error() string { + return "a container has already joined the endpoint" +} + +// BadRequest denotes the type of this error +func (ij ErrInvalidJoin) BadRequest() {} + +// ErrNoContainer is returned when the endpoint has no container +// attached to it. +type ErrNoContainer struct{} + +func (nc ErrNoContainer) Error() string { + return "no container is attached to the endpoint" +} + +// Maskable denotes the type of this error +func (nc ErrNoContainer) Maskable() {} + +// ErrInvalidID is returned when a query-by-id method is being invoked +// with an empty id parameter +type ErrInvalidID string + +func (ii ErrInvalidID) Error() string { + return fmt.Sprintf("invalid id: %s", string(ii)) +} + +// BadRequest denotes the type of this error +func (ii ErrInvalidID) BadRequest() {} + +// ErrInvalidName is returned when a query-by-name or resource create method is +// invoked with an empty name parameter +type ErrInvalidName string + +func (in ErrInvalidName) Error() string { + return fmt.Sprintf("invalid name: %s", string(in)) +} + +// BadRequest denotes the type of this error +func (in ErrInvalidName) BadRequest() {} + +// ErrInvalidConfigFile type is returned when an invalid LibNetwork config file is detected +type ErrInvalidConfigFile string + +func (cf ErrInvalidConfigFile) Error() string { + return fmt.Sprintf("Invalid Config file %q", string(cf)) +} + +// NetworkTypeError type is returned when the network type string is not +// known to libnetwork. +type NetworkTypeError string + +func (nt NetworkTypeError) Error() string { + return fmt.Sprintf("unknown driver %q", string(nt)) +} + +// NotFound denotes the type of this error +func (nt NetworkTypeError) NotFound() {} + +// NetworkNameError is returned when a network with the same name already exists. +type NetworkNameError string + +func (nnr NetworkNameError) Error() string { + return fmt.Sprintf("network with name %s already exists", string(nnr)) +} + +// Forbidden denotes the type of this error +func (nnr NetworkNameError) Forbidden() {} + +// UnknownNetworkError is returned when libnetwork could not find in its database +// a network with the same name and id. +type UnknownNetworkError struct { + name string + id string +} + +func (une *UnknownNetworkError) Error() string { + return fmt.Sprintf("unknown network %s id %s", une.name, une.id) +} + +// NotFound denotes the type of this error +func (une *UnknownNetworkError) NotFound() {} + +// ActiveEndpointsError is returned when a network is deleted which has active +// endpoints in it. +type ActiveEndpointsError struct { + name string + id string +} + +func (aee *ActiveEndpointsError) Error() string { + return fmt.Sprintf("network %s id %s has active endpoints", aee.name, aee.id) +} + +// Forbidden denotes the type of this error +func (aee *ActiveEndpointsError) Forbidden() {} + +// UnknownEndpointError is returned when libnetwork could not find in its database +// an endpoint with the same name and id. +type UnknownEndpointError struct { + name string + id string +} + +func (uee *UnknownEndpointError) Error() string { + return fmt.Sprintf("unknown endpoint %s id %s", uee.name, uee.id) +} + +// NotFound denotes the type of this error +func (uee *UnknownEndpointError) NotFound() {} + +// ActiveContainerError is returned when an endpoint is deleted which has active +// containers attached to it. +type ActiveContainerError struct { + name string + id string +} + +func (ace *ActiveContainerError) Error() string { + return fmt.Sprintf("endpoint with name %s id %s has active containers", ace.name, ace.id) +} + +// Forbidden denotes the type of this error +func (ace *ActiveContainerError) Forbidden() {} + +// InvalidContainerIDError is returned when an invalid container id is passed +// in Join/Leave +type InvalidContainerIDError string + +func (id InvalidContainerIDError) Error() string { + return fmt.Sprintf("invalid container id %s", string(id)) +} + +// BadRequest denotes the type of this error +func (id InvalidContainerIDError) BadRequest() {} + +// ManagerRedirectError is returned when the request should be redirected to Manager +type ManagerRedirectError string + +func (mr ManagerRedirectError) Error() string { + return "Redirect the request to the manager" +} + +// Maskable denotes the type of this error +func (mr ManagerRedirectError) Maskable() {} + +// ErrDataStoreNotInitialized is returned if an invalid data scope is passed +// for getting data store +type ErrDataStoreNotInitialized string + +func (dsni ErrDataStoreNotInitialized) Error() string { + return fmt.Sprintf("datastore for scope %q is not initialized", string(dsni)) +} diff --git a/vendor/github.com/docker/libnetwork/etchosts/etchosts.go b/vendor/github.com/docker/libnetwork/etchosts/etchosts.go new file mode 100644 index 0000000000..d55298af45 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/etchosts/etchosts.go @@ -0,0 +1,208 @@ +package etchosts + +import ( + "bufio" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "regexp" + "strings" + "sync" +) + +// Record Structure for a single host record +type Record struct { + Hosts string + IP string +} + +// WriteTo writes record to file and returns bytes written or error +func (r Record) WriteTo(w io.Writer) (int64, error) { + n, err := fmt.Fprintf(w, "%s\t%s\n", r.IP, r.Hosts) + return int64(n), err +} + +var ( + // Default hosts config records slice + defaultContent = []Record{ + {Hosts: "localhost", IP: "127.0.0.1"}, + {Hosts: "localhost ip6-localhost ip6-loopback", IP: "::1"}, + {Hosts: "ip6-localnet", IP: "fe00::0"}, + {Hosts: "ip6-mcastprefix", IP: "ff00::0"}, + {Hosts: "ip6-allnodes", IP: "ff02::1"}, + {Hosts: "ip6-allrouters", IP: "ff02::2"}, + } + + // A cache of path level locks for synchronizing /etc/hosts + // updates on a file level + pathMap = make(map[string]*sync.Mutex) + + // A package level mutex to synchronize the cache itself + pathMutex sync.Mutex +) + +func pathLock(path string) func() { + pathMutex.Lock() + defer pathMutex.Unlock() + + pl, ok := pathMap[path] + if !ok { + pl = &sync.Mutex{} + pathMap[path] = pl + } + + pl.Lock() + return func() { + pl.Unlock() + } +} + +// Drop drops the path string from the path cache +func Drop(path string) { + pathMutex.Lock() + defer pathMutex.Unlock() + + delete(pathMap, path) +} + +// Build function +// path is path to host file string required +// IP, hostname, and domainname set main record leave empty for no master record +// extraContent is an array of extra host records. +func Build(path, IP, hostname, domainname string, extraContent []Record) error { + defer pathLock(path)() + + content := bytes.NewBuffer(nil) + if IP != "" { + //set main record + var mainRec Record + mainRec.IP = IP + // User might have provided a FQDN in hostname or split it across hostname + // and domainname. We want the FQDN and the bare hostname. + fqdn := hostname + if domainname != "" { + fqdn = fmt.Sprintf("%s.%s", fqdn, domainname) + } + parts := strings.SplitN(fqdn, ".", 2) + if len(parts) == 2 { + mainRec.Hosts = fmt.Sprintf("%s %s", fqdn, parts[0]) + } else { + mainRec.Hosts = fqdn + } + if _, err := mainRec.WriteTo(content); err != nil { + return err + } + } + // Write defaultContent slice to buffer + for _, r := range defaultContent { + if _, err := r.WriteTo(content); err != nil { + return err + } + } + // Write extra content from function arguments + for _, r := range extraContent { + if _, err := r.WriteTo(content); err != nil { + return err + } + } + + return ioutil.WriteFile(path, content.Bytes(), 0644) +} + +// Add adds an arbitrary number of Records to an already existing /etc/hosts file +func Add(path string, recs []Record) error { + defer pathLock(path)() + + if len(recs) == 0 { + return nil + } + + b, err := mergeRecords(path, recs) + if err != nil { + return err + } + + return ioutil.WriteFile(path, b, 0644) +} + +func mergeRecords(path string, recs []Record) ([]byte, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + content := bytes.NewBuffer(nil) + + if _, err := content.ReadFrom(f); err != nil { + return nil, err + } + + for _, r := range recs { + if _, err := r.WriteTo(content); err != nil { + return nil, err + } + } + + return content.Bytes(), nil +} + +// Delete deletes an arbitrary number of Records already existing in /etc/hosts file +func Delete(path string, recs []Record) error { + defer pathLock(path)() + + if len(recs) == 0 { + return nil + } + old, err := os.Open(path) + if err != nil { + return err + } + + var buf bytes.Buffer + + s := bufio.NewScanner(old) + eol := []byte{'\n'} +loop: + for s.Scan() { + b := s.Bytes() + if len(b) == 0 { + continue + } + + if b[0] == '#' { + buf.Write(b) + buf.Write(eol) + continue + } + for _, r := range recs { + if bytes.HasSuffix(b, []byte("\t"+r.Hosts)) { + continue loop + } + } + buf.Write(b) + buf.Write(eol) + } + old.Close() + if err := s.Err(); err != nil { + return err + } + return ioutil.WriteFile(path, buf.Bytes(), 0644) +} + +// Update all IP addresses where hostname matches. +// path is path to host file +// IP is new IP address +// hostname is hostname to search for to replace IP +func Update(path, IP, hostname string) error { + defer pathLock(path)() + + old, err := ioutil.ReadFile(path) + if err != nil { + return err + } + var re = regexp.MustCompile(fmt.Sprintf("(\\S*)(\\t%s)(\\s|\\.)", regexp.QuoteMeta(hostname))) + return ioutil.WriteFile(path, re.ReplaceAll(old, []byte(IP+"$2"+"$3")), 0644) +} diff --git a/vendor/github.com/docker/libnetwork/firewall_linux.go b/vendor/github.com/docker/libnetwork/firewall_linux.go new file mode 100644 index 0000000000..54f9621f81 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/firewall_linux.go @@ -0,0 +1,40 @@ +package libnetwork + +import ( + "github.com/docker/libnetwork/iptables" + "github.com/sirupsen/logrus" +) + +const userChain = "DOCKER-USER" + +func (c *controller) arrangeUserFilterRule() { + c.Lock() + arrangeUserFilterRule() + c.Unlock() + iptables.OnReloaded(func() { + c.Lock() + arrangeUserFilterRule() + c.Unlock() + }) +} + +// This chain allow users to configure firewall policies in a way that persists +// docker operations/restarts. Docker will not delete or modify any pre-existing +// rules from the DOCKER-USER filter chain. +func arrangeUserFilterRule() { + _, err := iptables.NewChain(userChain, iptables.Filter, false) + if err != nil { + logrus.Warnf("Failed to create %s chain: %v", userChain, err) + return + } + + if err = iptables.AddReturnRule(userChain); err != nil { + logrus.Warnf("Failed to add the RETURN rule for %s: %v", userChain, err) + return + } + + err = iptables.EnsureJumpRule("FORWARD", userChain) + if err != nil { + logrus.Warnf("Failed to ensure the jump rule for %s: %v", userChain, err) + } +} diff --git a/vendor/github.com/docker/libnetwork/firewall_others.go b/vendor/github.com/docker/libnetwork/firewall_others.go new file mode 100644 index 0000000000..901f568fed --- /dev/null +++ b/vendor/github.com/docker/libnetwork/firewall_others.go @@ -0,0 +1,6 @@ +// +build !linux + +package libnetwork + +func (c *controller) arrangeUserFilterRule() { +} diff --git a/vendor/github.com/docker/libnetwork/hostdiscovery/hostdiscovery.go b/vendor/github.com/docker/libnetwork/hostdiscovery/hostdiscovery.go new file mode 100644 index 0000000000..452b5628c1 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/hostdiscovery/hostdiscovery.go @@ -0,0 +1,121 @@ +package hostdiscovery + +import ( + "net" + "sync" + + "github.com/sirupsen/logrus" + + mapset "github.com/deckarep/golang-set" + "github.com/docker/docker/pkg/discovery" + // Including KV + _ "github.com/docker/docker/pkg/discovery/kv" + "github.com/docker/libkv/store/consul" + "github.com/docker/libkv/store/etcd" + "github.com/docker/libkv/store/zookeeper" + "github.com/docker/libnetwork/types" +) + +type hostDiscovery struct { + watcher discovery.Watcher + nodes mapset.Set + stopChan chan struct{} + sync.Mutex +} + +func init() { + consul.Register() + etcd.Register() + zookeeper.Register() +} + +// NewHostDiscovery function creates a host discovery object +func NewHostDiscovery(watcher discovery.Watcher) HostDiscovery { + return &hostDiscovery{watcher: watcher, nodes: mapset.NewSet(), stopChan: make(chan struct{})} +} + +func (h *hostDiscovery) Watch(activeCallback ActiveCallback, joinCallback JoinCallback, leaveCallback LeaveCallback) error { + h.Lock() + d := h.watcher + h.Unlock() + if d == nil { + return types.BadRequestErrorf("invalid discovery watcher") + } + discoveryCh, errCh := d.Watch(h.stopChan) + go h.monitorDiscovery(discoveryCh, errCh, activeCallback, joinCallback, leaveCallback) + return nil +} + +func (h *hostDiscovery) monitorDiscovery(ch <-chan discovery.Entries, errCh <-chan error, + activeCallback ActiveCallback, joinCallback JoinCallback, leaveCallback LeaveCallback) { + for { + select { + case entries := <-ch: + h.processCallback(entries, activeCallback, joinCallback, leaveCallback) + case err := <-errCh: + if err != nil { + logrus.Errorf("discovery error: %v", err) + } + case <-h.stopChan: + return + } + } +} + +func (h *hostDiscovery) StopDiscovery() error { + h.Lock() + stopChan := h.stopChan + h.watcher = nil + h.Unlock() + + close(stopChan) + return nil +} + +func (h *hostDiscovery) processCallback(entries discovery.Entries, + activeCallback ActiveCallback, joinCallback JoinCallback, leaveCallback LeaveCallback) { + updated := hosts(entries) + h.Lock() + existing := h.nodes + added, removed := diff(existing, updated) + h.nodes = updated + h.Unlock() + + activeCallback() + if len(added) > 0 { + joinCallback(added) + } + if len(removed) > 0 { + leaveCallback(removed) + } +} + +func diff(existing mapset.Set, updated mapset.Set) (added []net.IP, removed []net.IP) { + addSlice := updated.Difference(existing).ToSlice() + removeSlice := existing.Difference(updated).ToSlice() + for _, ip := range addSlice { + added = append(added, net.ParseIP(ip.(string))) + } + for _, ip := range removeSlice { + removed = append(removed, net.ParseIP(ip.(string))) + } + return +} + +func (h *hostDiscovery) Fetch() []net.IP { + h.Lock() + defer h.Unlock() + ips := []net.IP{} + for _, ipstr := range h.nodes.ToSlice() { + ips = append(ips, net.ParseIP(ipstr.(string))) + } + return ips +} + +func hosts(entries discovery.Entries) mapset.Set { + hosts := mapset.NewSet() + for _, entry := range entries { + hosts.Add(entry.Host) + } + return hosts +} diff --git a/vendor/github.com/docker/libnetwork/hostdiscovery/hostdiscovery_api.go b/vendor/github.com/docker/libnetwork/hostdiscovery/hostdiscovery_api.go new file mode 100644 index 0000000000..f0ca40e31f --- /dev/null +++ b/vendor/github.com/docker/libnetwork/hostdiscovery/hostdiscovery_api.go @@ -0,0 +1,22 @@ +package hostdiscovery + +import "net" + +// JoinCallback provides a callback event for new node joining the cluster +type JoinCallback func(entries []net.IP) + +// ActiveCallback provides a callback event for active discovery event +type ActiveCallback func() + +// LeaveCallback provides a callback event for node leaving the cluster +type LeaveCallback func(entries []net.IP) + +// HostDiscovery primary interface +type HostDiscovery interface { + //Watch Node join and leave cluster events + Watch(activeCallback ActiveCallback, joinCallback JoinCallback, leaveCallback LeaveCallback) error + // StopDiscovery stops the discovery process + StopDiscovery() error + // Fetch returns a list of host IPs that are currently discovered + Fetch() []net.IP +} diff --git a/vendor/github.com/docker/libnetwork/idm/idm.go b/vendor/github.com/docker/libnetwork/idm/idm.go new file mode 100644 index 0000000000..d5843d4a58 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/idm/idm.go @@ -0,0 +1,76 @@ +// Package idm manages reservation/release of numerical ids from a configured set of contiguous ids +package idm + +import ( + "errors" + "fmt" + + "github.com/docker/libnetwork/bitseq" + "github.com/docker/libnetwork/datastore" +) + +// Idm manages the reservation/release of numerical ids from a contiguous set +type Idm struct { + start uint64 + end uint64 + handle *bitseq.Handle +} + +// New returns an instance of id manager for a [start,end] set of numerical ids +func New(ds datastore.DataStore, id string, start, end uint64) (*Idm, error) { + if id == "" { + return nil, errors.New("Invalid id") + } + if end <= start { + return nil, fmt.Errorf("Invalid set range: [%d, %d]", start, end) + } + + h, err := bitseq.NewHandle("idm", ds, id, 1+end-start) + if err != nil { + return nil, fmt.Errorf("failed to initialize bit sequence handler: %s", err.Error()) + } + + return &Idm{start: start, end: end, handle: h}, nil +} + +// GetID returns the first available id in the set +func (i *Idm) GetID(serial bool) (uint64, error) { + if i.handle == nil { + return 0, errors.New("ID set is not initialized") + } + ordinal, err := i.handle.SetAny(serial) + return i.start + ordinal, err +} + +// GetSpecificID tries to reserve the specified id +func (i *Idm) GetSpecificID(id uint64) error { + if i.handle == nil { + return errors.New("ID set is not initialized") + } + + if id < i.start || id > i.end { + return errors.New("Requested id does not belong to the set") + } + + return i.handle.Set(id - i.start) +} + +// GetIDInRange returns the first available id in the set within a [start,end] range +func (i *Idm) GetIDInRange(start, end uint64, serial bool) (uint64, error) { + if i.handle == nil { + return 0, errors.New("ID set is not initialized") + } + + if start < i.start || end > i.end { + return 0, errors.New("Requested range does not belong to the set") + } + + ordinal, err := i.handle.SetAnyInRange(start-i.start, end-i.start, serial) + + return i.start + ordinal, err +} + +// Release releases the specified id +func (i *Idm) Release(id uint64) { + i.handle.Unset(id - i.start) +} diff --git a/vendor/github.com/docker/libnetwork/ipam/allocator.go b/vendor/github.com/docker/libnetwork/ipam/allocator.go new file mode 100644 index 0000000000..d1a91c077f --- /dev/null +++ b/vendor/github.com/docker/libnetwork/ipam/allocator.go @@ -0,0 +1,609 @@ +package ipam + +import ( + "fmt" + "net" + "sort" + "sync" + + "github.com/docker/libnetwork/bitseq" + "github.com/docker/libnetwork/datastore" + "github.com/docker/libnetwork/discoverapi" + "github.com/docker/libnetwork/ipamapi" + "github.com/docker/libnetwork/ipamutils" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" +) + +const ( + localAddressSpace = "LocalDefault" + globalAddressSpace = "GlobalDefault" + // The biggest configurable host subnets + minNetSize = 8 + minNetSizeV6 = 64 + // datastore keyes for ipam objects + dsConfigKey = "ipam/" + ipamapi.DefaultIPAM + "/config" + dsDataKey = "ipam/" + ipamapi.DefaultIPAM + "/data" +) + +// Allocator provides per address space ipv4/ipv6 book keeping +type Allocator struct { + // Predefined pools for default address spaces + predefined map[string][]*net.IPNet + addrSpaces map[string]*addrSpace + // stores []datastore.Datastore + // Allocated addresses in each address space's subnet + addresses map[SubnetKey]*bitseq.Handle + sync.Mutex +} + +// NewAllocator returns an instance of libnetwork ipam +func NewAllocator(lcDs, glDs datastore.DataStore) (*Allocator, error) { + a := &Allocator{} + + // Load predefined subnet pools + a.predefined = map[string][]*net.IPNet{ + localAddressSpace: ipamutils.PredefinedBroadNetworks, + globalAddressSpace: ipamutils.PredefinedGranularNetworks, + } + + // Initialize bitseq map + a.addresses = make(map[SubnetKey]*bitseq.Handle) + + // Initialize address spaces + a.addrSpaces = make(map[string]*addrSpace) + for _, aspc := range []struct { + as string + ds datastore.DataStore + }{ + {localAddressSpace, lcDs}, + {globalAddressSpace, glDs}, + } { + a.initializeAddressSpace(aspc.as, aspc.ds) + } + + return a, nil +} + +func (a *Allocator) refresh(as string) error { + aSpace, err := a.getAddressSpaceFromStore(as) + if err != nil { + return types.InternalErrorf("error getting pools config from store: %v", err) + } + + if aSpace == nil { + return nil + } + + a.Lock() + a.addrSpaces[as] = aSpace + a.Unlock() + + return nil +} + +func (a *Allocator) updateBitMasks(aSpace *addrSpace) error { + var inserterList []func() error + + aSpace.Lock() + for k, v := range aSpace.subnets { + if v.Range == nil { + kk := k + vv := v + inserterList = append(inserterList, func() error { return a.insertBitMask(kk, vv.Pool) }) + } + } + aSpace.Unlock() + + // Add the bitmasks (data could come from datastore) + if inserterList != nil { + for _, f := range inserterList { + if err := f(); err != nil { + return err + } + } + } + + return nil +} + +// Checks for and fixes damaged bitmask. +func (a *Allocator) checkConsistency(as string) { + var sKeyList []SubnetKey + + // Retrieve this address space's configuration and bitmasks from the datastore + a.refresh(as) + a.Lock() + aSpace, ok := a.addrSpaces[as] + a.Unlock() + if !ok { + return + } + a.updateBitMasks(aSpace) + + aSpace.Lock() + for sk, pd := range aSpace.subnets { + if pd.Range != nil { + continue + } + sKeyList = append(sKeyList, sk) + } + aSpace.Unlock() + + for _, sk := range sKeyList { + a.Lock() + bm := a.addresses[sk] + a.Unlock() + if err := bm.CheckConsistency(); err != nil { + logrus.Warnf("Error while running consistency check for %s: %v", sk, err) + } + } +} + +func (a *Allocator) initializeAddressSpace(as string, ds datastore.DataStore) error { + scope := "" + if ds != nil { + scope = ds.Scope() + } + + a.Lock() + if currAS, ok := a.addrSpaces[as]; ok { + if currAS.ds != nil { + a.Unlock() + return types.ForbiddenErrorf("a datastore is already configured for the address space %s", as) + } + } + a.addrSpaces[as] = &addrSpace{ + subnets: map[SubnetKey]*PoolData{}, + id: dsConfigKey + "/" + as, + scope: scope, + ds: ds, + alloc: a, + } + a.Unlock() + + a.checkConsistency(as) + + return nil +} + +// DiscoverNew informs the allocator about a new global scope datastore +func (a *Allocator) DiscoverNew(dType discoverapi.DiscoveryType, data interface{}) error { + if dType != discoverapi.DatastoreConfig { + return nil + } + + dsc, ok := data.(discoverapi.DatastoreConfigData) + if !ok { + return types.InternalErrorf("incorrect data in datastore update notification: %v", data) + } + + ds, err := datastore.NewDataStoreFromConfig(dsc) + if err != nil { + return err + } + + return a.initializeAddressSpace(globalAddressSpace, ds) +} + +// DiscoverDelete is a notification of no interest for the allocator +func (a *Allocator) DiscoverDelete(dType discoverapi.DiscoveryType, data interface{}) error { + return nil +} + +// GetDefaultAddressSpaces returns the local and global default address spaces +func (a *Allocator) GetDefaultAddressSpaces() (string, string, error) { + return localAddressSpace, globalAddressSpace, nil +} + +// RequestPool returns an address pool along with its unique id. +func (a *Allocator) RequestPool(addressSpace, pool, subPool string, options map[string]string, v6 bool) (string, *net.IPNet, map[string]string, error) { + logrus.Debugf("RequestPool(%s, %s, %s, %v, %t)", addressSpace, pool, subPool, options, v6) + + k, nw, ipr, err := a.parsePoolRequest(addressSpace, pool, subPool, v6) + if err != nil { + return "", nil, nil, types.InternalErrorf("failed to parse pool request for address space %q pool %q subpool %q: %v", addressSpace, pool, subPool, err) + } + + pdf := k == nil + +retry: + if pdf { + if nw, err = a.getPredefinedPool(addressSpace, v6); err != nil { + return "", nil, nil, err + } + k = &SubnetKey{AddressSpace: addressSpace, Subnet: nw.String()} + } + + if err := a.refresh(addressSpace); err != nil { + return "", nil, nil, err + } + + aSpace, err := a.getAddrSpace(addressSpace) + if err != nil { + return "", nil, nil, err + } + + insert, err := aSpace.updatePoolDBOnAdd(*k, nw, ipr, pdf) + if err != nil { + if _, ok := err.(types.MaskableError); ok { + logrus.Debugf("Retrying predefined pool search: %v", err) + goto retry + } + return "", nil, nil, err + } + + if err := a.writeToStore(aSpace); err != nil { + if _, ok := err.(types.RetryError); !ok { + return "", nil, nil, types.InternalErrorf("pool configuration failed because of %s", err.Error()) + } + + goto retry + } + + return k.String(), nw, nil, insert() +} + +// ReleasePool releases the address pool identified by the passed id +func (a *Allocator) ReleasePool(poolID string) error { + logrus.Debugf("ReleasePool(%s)", poolID) + k := SubnetKey{} + if err := k.FromString(poolID); err != nil { + return types.BadRequestErrorf("invalid pool id: %s", poolID) + } + +retry: + if err := a.refresh(k.AddressSpace); err != nil { + return err + } + + aSpace, err := a.getAddrSpace(k.AddressSpace) + if err != nil { + return err + } + + remove, err := aSpace.updatePoolDBOnRemoval(k) + if err != nil { + return err + } + + if err = a.writeToStore(aSpace); err != nil { + if _, ok := err.(types.RetryError); !ok { + return types.InternalErrorf("pool (%s) removal failed because of %v", poolID, err) + } + goto retry + } + + return remove() +} + +// Given the address space, returns the local or global PoolConfig based on the +// address space is local or global. AddressSpace locality is being registered with IPAM out of band. +func (a *Allocator) getAddrSpace(as string) (*addrSpace, error) { + a.Lock() + defer a.Unlock() + aSpace, ok := a.addrSpaces[as] + if !ok { + return nil, types.BadRequestErrorf("cannot find address space %s (most likely the backing datastore is not configured)", as) + } + return aSpace, nil +} + +func (a *Allocator) parsePoolRequest(addressSpace, pool, subPool string, v6 bool) (*SubnetKey, *net.IPNet, *AddressRange, error) { + var ( + nw *net.IPNet + ipr *AddressRange + err error + ) + + if addressSpace == "" { + return nil, nil, nil, ipamapi.ErrInvalidAddressSpace + } + + if pool == "" && subPool != "" { + return nil, nil, nil, ipamapi.ErrInvalidSubPool + } + + if pool == "" { + return nil, nil, nil, nil + } + + if _, nw, err = net.ParseCIDR(pool); err != nil { + return nil, nil, nil, ipamapi.ErrInvalidPool + } + + if subPool != "" { + if ipr, err = getAddressRange(subPool, nw); err != nil { + return nil, nil, nil, err + } + } + + return &SubnetKey{AddressSpace: addressSpace, Subnet: nw.String(), ChildSubnet: subPool}, nw, ipr, nil +} + +func (a *Allocator) insertBitMask(key SubnetKey, pool *net.IPNet) error { + //logrus.Debugf("Inserting bitmask (%s, %s)", key.String(), pool.String()) + + store := a.getStore(key.AddressSpace) + ipVer := getAddressVersion(pool.IP) + ones, bits := pool.Mask.Size() + numAddresses := uint64(1 << uint(bits-ones)) + + // Allow /64 subnet + if ipVer == v6 && numAddresses == 0 { + numAddresses-- + } + + // Generate the new address masks. AddressMask content may come from datastore + h, err := bitseq.NewHandle(dsDataKey, store, key.String(), numAddresses) + if err != nil { + return err + } + + // Do not let network identifier address be reserved + // Do the same for IPv6 so that bridge ip starts with XXXX...::1 + h.Set(0) + + // Do not let broadcast address be reserved + if ipVer == v4 { + h.Set(numAddresses - 1) + } + + a.Lock() + a.addresses[key] = h + a.Unlock() + return nil +} + +func (a *Allocator) retrieveBitmask(k SubnetKey, n *net.IPNet) (*bitseq.Handle, error) { + a.Lock() + bm, ok := a.addresses[k] + a.Unlock() + if !ok { + logrus.Debugf("Retrieving bitmask (%s, %s)", k.String(), n.String()) + if err := a.insertBitMask(k, n); err != nil { + return nil, types.InternalErrorf("could not find bitmask in datastore for %s", k.String()) + } + a.Lock() + bm = a.addresses[k] + a.Unlock() + } + return bm, nil +} + +func (a *Allocator) getPredefineds(as string) []*net.IPNet { + a.Lock() + defer a.Unlock() + l := make([]*net.IPNet, 0, len(a.predefined[as])) + for _, pool := range a.predefined[as] { + l = append(l, pool) + } + return l +} + +func (a *Allocator) getPredefinedPool(as string, ipV6 bool) (*net.IPNet, error) { + var v ipVersion + v = v4 + if ipV6 { + v = v6 + } + + if as != localAddressSpace && as != globalAddressSpace { + return nil, types.NotImplementedErrorf("no default pool availbale for non-default addresss spaces") + } + + aSpace, err := a.getAddrSpace(as) + if err != nil { + return nil, err + } + + for _, nw := range a.getPredefineds(as) { + if v != getAddressVersion(nw.IP) { + continue + } + aSpace.Lock() + if _, ok := aSpace.subnets[SubnetKey{AddressSpace: as, Subnet: nw.String()}]; ok { + aSpace.Unlock() + continue + } + if !aSpace.contains(as, nw) { + aSpace.Unlock() + return nw, nil + } + aSpace.Unlock() + } + + return nil, types.NotFoundErrorf("could not find an available, non-overlapping IPv%d address pool among the defaults to assign to the network", v) +} + +// RequestAddress returns an address from the specified pool ID +func (a *Allocator) RequestAddress(poolID string, prefAddress net.IP, opts map[string]string) (*net.IPNet, map[string]string, error) { + logrus.Debugf("RequestAddress(%s, %v, %v)", poolID, prefAddress, opts) + k := SubnetKey{} + if err := k.FromString(poolID); err != nil { + return nil, nil, types.BadRequestErrorf("invalid pool id: %s", poolID) + } + + if err := a.refresh(k.AddressSpace); err != nil { + return nil, nil, err + } + + aSpace, err := a.getAddrSpace(k.AddressSpace) + if err != nil { + return nil, nil, err + } + + aSpace.Lock() + p, ok := aSpace.subnets[k] + if !ok { + aSpace.Unlock() + return nil, nil, types.NotFoundErrorf("cannot find address pool for poolID:%s", poolID) + } + + if prefAddress != nil && !p.Pool.Contains(prefAddress) { + aSpace.Unlock() + return nil, nil, ipamapi.ErrIPOutOfRange + } + + c := p + for c.Range != nil { + k = c.ParentKey + c = aSpace.subnets[k] + } + aSpace.Unlock() + + bm, err := a.retrieveBitmask(k, c.Pool) + if err != nil { + return nil, nil, types.InternalErrorf("could not find bitmask in datastore for %s on address %v request from pool %s: %v", + k.String(), prefAddress, poolID, err) + } + // In order to request for a serial ip address allocation, callers can pass in the option to request + // IP allocation serially or first available IP in the subnet + var serial bool + if opts != nil { + if val, ok := opts[ipamapi.AllocSerialPrefix]; ok { + serial = (val == "true") + } + } + ip, err := a.getAddress(p.Pool, bm, prefAddress, p.Range, serial) + if err != nil { + return nil, nil, err + } + + return &net.IPNet{IP: ip, Mask: p.Pool.Mask}, nil, nil +} + +// ReleaseAddress releases the address from the specified pool ID +func (a *Allocator) ReleaseAddress(poolID string, address net.IP) error { + logrus.Debugf("ReleaseAddress(%s, %v)", poolID, address) + k := SubnetKey{} + if err := k.FromString(poolID); err != nil { + return types.BadRequestErrorf("invalid pool id: %s", poolID) + } + + if err := a.refresh(k.AddressSpace); err != nil { + return err + } + + aSpace, err := a.getAddrSpace(k.AddressSpace) + if err != nil { + return err + } + + aSpace.Lock() + p, ok := aSpace.subnets[k] + if !ok { + aSpace.Unlock() + return types.NotFoundErrorf("cannot find address pool for poolID:%s", poolID) + } + + if address == nil { + aSpace.Unlock() + return types.BadRequestErrorf("invalid address: nil") + } + + if !p.Pool.Contains(address) { + aSpace.Unlock() + return ipamapi.ErrIPOutOfRange + } + + c := p + for c.Range != nil { + k = c.ParentKey + c = aSpace.subnets[k] + } + aSpace.Unlock() + + mask := p.Pool.Mask + + h, err := types.GetHostPartIP(address, mask) + if err != nil { + return types.InternalErrorf("failed to release address %s: %v", address.String(), err) + } + + bm, err := a.retrieveBitmask(k, c.Pool) + if err != nil { + return types.InternalErrorf("could not find bitmask in datastore for %s on address %v release from pool %s: %v", + k.String(), address, poolID, err) + } + + return bm.Unset(ipToUint64(h)) +} + +func (a *Allocator) getAddress(nw *net.IPNet, bitmask *bitseq.Handle, prefAddress net.IP, ipr *AddressRange, serial bool) (net.IP, error) { + var ( + ordinal uint64 + err error + base *net.IPNet + ) + + base = types.GetIPNetCopy(nw) + + if bitmask.Unselected() <= 0 { + return nil, ipamapi.ErrNoAvailableIPs + } + if ipr == nil && prefAddress == nil { + ordinal, err = bitmask.SetAny(serial) + } else if prefAddress != nil { + hostPart, e := types.GetHostPartIP(prefAddress, base.Mask) + if e != nil { + return nil, types.InternalErrorf("failed to allocate requested address %s: %v", prefAddress.String(), e) + } + ordinal = ipToUint64(types.GetMinimalIP(hostPart)) + err = bitmask.Set(ordinal) + } else { + ordinal, err = bitmask.SetAnyInRange(ipr.Start, ipr.End, serial) + } + + switch err { + case nil: + // Convert IP ordinal for this subnet into IP address + return generateAddress(ordinal, base), nil + case bitseq.ErrBitAllocated: + return nil, ipamapi.ErrIPAlreadyAllocated + case bitseq.ErrNoBitAvailable: + return nil, ipamapi.ErrNoAvailableIPs + default: + return nil, err + } +} + +// DumpDatabase dumps the internal info +func (a *Allocator) DumpDatabase() string { + a.Lock() + aspaces := make(map[string]*addrSpace, len(a.addrSpaces)) + orderedAS := make([]string, 0, len(a.addrSpaces)) + for as, aSpace := range a.addrSpaces { + orderedAS = append(orderedAS, as) + aspaces[as] = aSpace + } + a.Unlock() + + sort.Strings(orderedAS) + + var s string + for _, as := range orderedAS { + aSpace := aspaces[as] + s = fmt.Sprintf("\n\n%s Config", as) + aSpace.Lock() + for k, config := range aSpace.subnets { + s += fmt.Sprintf("\n%v: %v", k, config) + if config.Range == nil { + a.retrieveBitmask(k, config.Pool) + } + } + aSpace.Unlock() + } + + s = fmt.Sprintf("%s\n\nBitmasks", s) + for k, bm := range a.addresses { + s += fmt.Sprintf("\n%s: %s", k, bm) + } + + return s +} + +// IsBuiltIn returns true for builtin drivers +func (a *Allocator) IsBuiltIn() bool { + return true +} diff --git a/vendor/github.com/docker/libnetwork/ipam/store.go b/vendor/github.com/docker/libnetwork/ipam/store.go new file mode 100644 index 0000000000..124d585518 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/ipam/store.go @@ -0,0 +1,136 @@ +package ipam + +import ( + "encoding/json" + + "github.com/docker/libnetwork/datastore" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" +) + +// Key provides the Key to be used in KV Store +func (aSpace *addrSpace) Key() []string { + aSpace.Lock() + defer aSpace.Unlock() + return []string{aSpace.id} +} + +// KeyPrefix returns the immediate parent key that can be used for tree walk +func (aSpace *addrSpace) KeyPrefix() []string { + aSpace.Lock() + defer aSpace.Unlock() + return []string{dsConfigKey} +} + +// Value marshals the data to be stored in the KV store +func (aSpace *addrSpace) Value() []byte { + b, err := json.Marshal(aSpace) + if err != nil { + logrus.Warnf("Failed to marshal ipam configured pools: %v", err) + return nil + } + return b +} + +// SetValue unmarshalls the data from the KV store. +func (aSpace *addrSpace) SetValue(value []byte) error { + rc := &addrSpace{subnets: make(map[SubnetKey]*PoolData)} + if err := json.Unmarshal(value, rc); err != nil { + return err + } + aSpace.subnets = rc.subnets + return nil +} + +// Index returns the latest DB Index as seen by this object +func (aSpace *addrSpace) Index() uint64 { + aSpace.Lock() + defer aSpace.Unlock() + return aSpace.dbIndex +} + +// SetIndex method allows the datastore to store the latest DB Index into this object +func (aSpace *addrSpace) SetIndex(index uint64) { + aSpace.Lock() + aSpace.dbIndex = index + aSpace.dbExists = true + aSpace.Unlock() +} + +// Exists method is true if this object has been stored in the DB. +func (aSpace *addrSpace) Exists() bool { + aSpace.Lock() + defer aSpace.Unlock() + return aSpace.dbExists +} + +// Skip provides a way for a KV Object to avoid persisting it in the KV Store +func (aSpace *addrSpace) Skip() bool { + return false +} + +func (a *Allocator) getStore(as string) datastore.DataStore { + a.Lock() + defer a.Unlock() + + if aSpace, ok := a.addrSpaces[as]; ok { + return aSpace.ds + } + + return nil +} + +func (a *Allocator) getAddressSpaceFromStore(as string) (*addrSpace, error) { + store := a.getStore(as) + + // IPAM may not have a valid store. In such cases it is just in-memory state. + if store == nil { + return nil, nil + } + + pc := &addrSpace{id: dsConfigKey + "/" + as, ds: store, alloc: a} + if err := store.GetObject(datastore.Key(pc.Key()...), pc); err != nil { + if err == datastore.ErrKeyNotFound { + return nil, nil + } + + return nil, types.InternalErrorf("could not get pools config from store: %v", err) + } + + return pc, nil +} + +func (a *Allocator) writeToStore(aSpace *addrSpace) error { + store := aSpace.store() + + // IPAM may not have a valid store. In such cases it is just in-memory state. + if store == nil { + return nil + } + + err := store.PutObjectAtomic(aSpace) + if err == datastore.ErrKeyModified { + return types.RetryErrorf("failed to perform atomic write (%v). retry might fix the error", err) + } + + return err +} + +func (a *Allocator) deleteFromStore(aSpace *addrSpace) error { + store := aSpace.store() + + // IPAM may not have a valid store. In such cases it is just in-memory state. + if store == nil { + return nil + } + + return store.DeleteObjectAtomic(aSpace) +} + +// DataScope method returns the storage scope of the datastore +func (aSpace *addrSpace) DataScope() string { + aSpace.Lock() + defer aSpace.Unlock() + + return aSpace.scope +} diff --git a/vendor/github.com/docker/libnetwork/ipam/structures.go b/vendor/github.com/docker/libnetwork/ipam/structures.go new file mode 100644 index 0000000000..09a77695dd --- /dev/null +++ b/vendor/github.com/docker/libnetwork/ipam/structures.go @@ -0,0 +1,362 @@ +package ipam + +import ( + "encoding/json" + "fmt" + "net" + "strings" + "sync" + + "github.com/docker/libnetwork/datastore" + "github.com/docker/libnetwork/ipamapi" + "github.com/docker/libnetwork/types" +) + +// SubnetKey is the pointer to the configured pools in each address space +type SubnetKey struct { + AddressSpace string + Subnet string + ChildSubnet string +} + +// PoolData contains the configured pool data +type PoolData struct { + ParentKey SubnetKey + Pool *net.IPNet + Range *AddressRange `json:",omitempty"` + RefCount int +} + +// addrSpace contains the pool configurations for the address space +type addrSpace struct { + subnets map[SubnetKey]*PoolData + dbIndex uint64 + dbExists bool + id string + scope string + ds datastore.DataStore + alloc *Allocator + sync.Mutex +} + +// AddressRange specifies first and last ip ordinal which +// identifies a range in a pool of addresses +type AddressRange struct { + Sub *net.IPNet + Start, End uint64 +} + +// String returns the string form of the AddressRange object +func (r *AddressRange) String() string { + return fmt.Sprintf("Sub: %s, range [%d, %d]", r.Sub, r.Start, r.End) +} + +// MarshalJSON returns the JSON encoding of the Range object +func (r *AddressRange) MarshalJSON() ([]byte, error) { + m := map[string]interface{}{ + "Sub": r.Sub.String(), + "Start": r.Start, + "End": r.End, + } + return json.Marshal(m) +} + +// UnmarshalJSON decodes data into the Range object +func (r *AddressRange) UnmarshalJSON(data []byte) error { + m := map[string]interface{}{} + err := json.Unmarshal(data, &m) + if err != nil { + return err + } + if r.Sub, err = types.ParseCIDR(m["Sub"].(string)); err != nil { + return err + } + r.Start = uint64(m["Start"].(float64)) + r.End = uint64(m["End"].(float64)) + return nil +} + +// String returns the string form of the SubnetKey object +func (s *SubnetKey) String() string { + k := fmt.Sprintf("%s/%s", s.AddressSpace, s.Subnet) + if s.ChildSubnet != "" { + k = fmt.Sprintf("%s/%s", k, s.ChildSubnet) + } + return k +} + +// FromString populates the SubnetKey object reading it from string +func (s *SubnetKey) FromString(str string) error { + if str == "" || !strings.Contains(str, "/") { + return types.BadRequestErrorf("invalid string form for subnetkey: %s", str) + } + + p := strings.Split(str, "/") + if len(p) != 3 && len(p) != 5 { + return types.BadRequestErrorf("invalid string form for subnetkey: %s", str) + } + s.AddressSpace = p[0] + s.Subnet = fmt.Sprintf("%s/%s", p[1], p[2]) + if len(p) == 5 { + s.ChildSubnet = fmt.Sprintf("%s/%s", p[3], p[4]) + } + + return nil +} + +// String returns the string form of the PoolData object +func (p *PoolData) String() string { + return fmt.Sprintf("ParentKey: %s, Pool: %s, Range: %s, RefCount: %d", + p.ParentKey.String(), p.Pool.String(), p.Range, p.RefCount) +} + +// MarshalJSON returns the JSON encoding of the PoolData object +func (p *PoolData) MarshalJSON() ([]byte, error) { + m := map[string]interface{}{ + "ParentKey": p.ParentKey, + "RefCount": p.RefCount, + } + if p.Pool != nil { + m["Pool"] = p.Pool.String() + } + if p.Range != nil { + m["Range"] = p.Range + } + return json.Marshal(m) +} + +// UnmarshalJSON decodes data into the PoolData object +func (p *PoolData) UnmarshalJSON(data []byte) error { + var ( + err error + t struct { + ParentKey SubnetKey + Pool string + Range *AddressRange `json:",omitempty"` + RefCount int + } + ) + + if err = json.Unmarshal(data, &t); err != nil { + return err + } + + p.ParentKey = t.ParentKey + p.Range = t.Range + p.RefCount = t.RefCount + if t.Pool != "" { + if p.Pool, err = types.ParseCIDR(t.Pool); err != nil { + return err + } + } + + return nil +} + +// MarshalJSON returns the JSON encoding of the addrSpace object +func (aSpace *addrSpace) MarshalJSON() ([]byte, error) { + aSpace.Lock() + defer aSpace.Unlock() + + m := map[string]interface{}{ + "Scope": string(aSpace.scope), + } + + if aSpace.subnets != nil { + s := map[string]*PoolData{} + for k, v := range aSpace.subnets { + s[k.String()] = v + } + m["Subnets"] = s + } + + return json.Marshal(m) +} + +// UnmarshalJSON decodes data into the addrSpace object +func (aSpace *addrSpace) UnmarshalJSON(data []byte) error { + aSpace.Lock() + defer aSpace.Unlock() + + m := map[string]interface{}{} + err := json.Unmarshal(data, &m) + if err != nil { + return err + } + + aSpace.scope = datastore.LocalScope + s := m["Scope"].(string) + if s == string(datastore.GlobalScope) { + aSpace.scope = datastore.GlobalScope + } + + if v, ok := m["Subnets"]; ok { + sb, _ := json.Marshal(v) + var s map[string]*PoolData + err := json.Unmarshal(sb, &s) + if err != nil { + return err + } + for ks, v := range s { + k := SubnetKey{} + k.FromString(ks) + aSpace.subnets[k] = v + } + } + + return nil +} + +// CopyTo deep copies the pool data to the destination pooldata +func (p *PoolData) CopyTo(dstP *PoolData) error { + dstP.ParentKey = p.ParentKey + dstP.Pool = types.GetIPNetCopy(p.Pool) + + if p.Range != nil { + dstP.Range = &AddressRange{} + dstP.Range.Sub = types.GetIPNetCopy(p.Range.Sub) + dstP.Range.Start = p.Range.Start + dstP.Range.End = p.Range.End + } + + dstP.RefCount = p.RefCount + return nil +} + +func (aSpace *addrSpace) CopyTo(o datastore.KVObject) error { + aSpace.Lock() + defer aSpace.Unlock() + + dstAspace := o.(*addrSpace) + + dstAspace.id = aSpace.id + dstAspace.ds = aSpace.ds + dstAspace.alloc = aSpace.alloc + dstAspace.scope = aSpace.scope + dstAspace.dbIndex = aSpace.dbIndex + dstAspace.dbExists = aSpace.dbExists + + dstAspace.subnets = make(map[SubnetKey]*PoolData) + for k, v := range aSpace.subnets { + dstAspace.subnets[k] = &PoolData{} + v.CopyTo(dstAspace.subnets[k]) + } + + return nil +} + +func (aSpace *addrSpace) New() datastore.KVObject { + aSpace.Lock() + defer aSpace.Unlock() + + return &addrSpace{ + id: aSpace.id, + ds: aSpace.ds, + alloc: aSpace.alloc, + scope: aSpace.scope, + } +} + +func (aSpace *addrSpace) updatePoolDBOnAdd(k SubnetKey, nw *net.IPNet, ipr *AddressRange, pdf bool) (func() error, error) { + aSpace.Lock() + defer aSpace.Unlock() + + // Check if already allocated + if p, ok := aSpace.subnets[k]; ok { + if pdf { + return nil, types.InternalMaskableErrorf("predefined pool %s is already reserved", nw) + } + aSpace.incRefCount(p, 1) + return func() error { return nil }, nil + } + + // If master pool, check for overlap + if ipr == nil { + if aSpace.contains(k.AddressSpace, nw) { + return nil, ipamapi.ErrPoolOverlap + } + // This is a new master pool, add it along with corresponding bitmask + aSpace.subnets[k] = &PoolData{Pool: nw, RefCount: 1} + return func() error { return aSpace.alloc.insertBitMask(k, nw) }, nil + } + + // This is a new non-master pool + p := &PoolData{ + ParentKey: SubnetKey{AddressSpace: k.AddressSpace, Subnet: k.Subnet}, + Pool: nw, + Range: ipr, + RefCount: 1, + } + aSpace.subnets[k] = p + + // Look for parent pool + pp, ok := aSpace.subnets[p.ParentKey] + if ok { + aSpace.incRefCount(pp, 1) + return func() error { return nil }, nil + } + + // Parent pool does not exist, add it along with corresponding bitmask + aSpace.subnets[p.ParentKey] = &PoolData{Pool: nw, RefCount: 1} + return func() error { return aSpace.alloc.insertBitMask(p.ParentKey, nw) }, nil +} + +func (aSpace *addrSpace) updatePoolDBOnRemoval(k SubnetKey) (func() error, error) { + aSpace.Lock() + defer aSpace.Unlock() + + p, ok := aSpace.subnets[k] + if !ok { + return nil, ipamapi.ErrBadPool + } + + aSpace.incRefCount(p, -1) + + c := p + for ok { + if c.RefCount == 0 { + delete(aSpace.subnets, k) + if c.Range == nil { + return func() error { + bm, err := aSpace.alloc.retrieveBitmask(k, c.Pool) + if err != nil { + return types.InternalErrorf("could not find bitmask in datastore for pool %s removal: %v", k.String(), err) + } + return bm.Destroy() + }, nil + } + } + k = c.ParentKey + c, ok = aSpace.subnets[k] + } + + return func() error { return nil }, nil +} + +func (aSpace *addrSpace) incRefCount(p *PoolData, delta int) { + c := p + ok := true + for ok { + c.RefCount += delta + c, ok = aSpace.subnets[c.ParentKey] + } +} + +// Checks whether the passed subnet is a superset or subset of any of the subset in this config db +func (aSpace *addrSpace) contains(space string, nw *net.IPNet) bool { + for k, v := range aSpace.subnets { + if space == k.AddressSpace && k.ChildSubnet == "" { + if nw.Contains(v.Pool.IP) || v.Pool.Contains(nw.IP) { + return true + } + } + } + return false +} + +func (aSpace *addrSpace) store() datastore.DataStore { + aSpace.Lock() + defer aSpace.Unlock() + + return aSpace.ds +} diff --git a/vendor/github.com/docker/libnetwork/ipam/utils.go b/vendor/github.com/docker/libnetwork/ipam/utils.go new file mode 100644 index 0000000000..5117c55cc7 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/ipam/utils.go @@ -0,0 +1,81 @@ +package ipam + +import ( + "fmt" + "net" + + "github.com/docker/libnetwork/ipamapi" + "github.com/docker/libnetwork/types" +) + +type ipVersion int + +const ( + v4 = 4 + v6 = 6 +) + +func getAddressRange(pool string, masterNw *net.IPNet) (*AddressRange, error) { + ip, nw, err := net.ParseCIDR(pool) + if err != nil { + return nil, ipamapi.ErrInvalidSubPool + } + lIP, e := types.GetHostPartIP(nw.IP, masterNw.Mask) + if e != nil { + return nil, fmt.Errorf("failed to compute range's lowest ip address: %v", e) + } + bIP, e := types.GetBroadcastIP(nw.IP, nw.Mask) + if e != nil { + return nil, fmt.Errorf("failed to compute range's broadcast ip address: %v", e) + } + hIP, e := types.GetHostPartIP(bIP, masterNw.Mask) + if e != nil { + return nil, fmt.Errorf("failed to compute range's highest ip address: %v", e) + } + nw.IP = ip + return &AddressRange{nw, ipToUint64(types.GetMinimalIP(lIP)), ipToUint64(types.GetMinimalIP(hIP))}, nil +} + +// It generates the ip address in the passed subnet specified by +// the passed host address ordinal +func generateAddress(ordinal uint64, network *net.IPNet) net.IP { + var address [16]byte + + // Get network portion of IP + if getAddressVersion(network.IP) == v4 { + copy(address[:], network.IP.To4()) + } else { + copy(address[:], network.IP) + } + + end := len(network.Mask) + addIntToIP(address[:end], ordinal) + + return net.IP(address[:end]) +} + +func getAddressVersion(ip net.IP) ipVersion { + if ip.To4() == nil { + return v6 + } + return v4 +} + +// Adds the ordinal IP to the current array +// 192.168.0.0 + 53 => 192.168.0.53 +func addIntToIP(array []byte, ordinal uint64) { + for i := len(array) - 1; i >= 0; i-- { + array[i] |= (byte)(ordinal & 0xff) + ordinal >>= 8 + } +} + +// Convert an ordinal to the respective IP address +func ipToUint64(ip []byte) (value uint64) { + cip := types.GetMinimalIP(ip) + for i := 0; i < len(cip); i++ { + j := len(cip) - 1 - i + value += uint64(cip[i]) << uint(j*8) + } + return value +} diff --git a/vendor/github.com/docker/libnetwork/ipamapi/contract.go b/vendor/github.com/docker/libnetwork/ipamapi/contract.go new file mode 100644 index 0000000000..7f967863d8 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/ipamapi/contract.go @@ -0,0 +1,96 @@ +// Package ipamapi specifies the contract the IPAM service (built-in or remote) needs to satisfy. +package ipamapi + +import ( + "net" + + "github.com/docker/docker/pkg/plugingetter" + "github.com/docker/libnetwork/discoverapi" + "github.com/docker/libnetwork/types" +) + +/******************** + * IPAM plugin types + ********************/ + +const ( + // DefaultIPAM is the name of the built-in default ipam driver + DefaultIPAM = "default" + // NullIPAM is the name of the built-in null ipam driver + NullIPAM = "null" + // PluginEndpointType represents the Endpoint Type used by Plugin system + PluginEndpointType = "IpamDriver" + // RequestAddressType represents the Address Type used when requesting an address + RequestAddressType = "RequestAddressType" +) + +// Callback provides a Callback interface for registering an IPAM instance into LibNetwork +type Callback interface { + // GetPluginGetter returns the pluginv2 getter. + GetPluginGetter() plugingetter.PluginGetter + // RegisterIpamDriver provides a way for Remote drivers to dynamically register with libnetwork + RegisterIpamDriver(name string, driver Ipam) error + // RegisterIpamDriverWithCapabilities provides a way for Remote drivers to dynamically register with libnetwork and specify capabilities + RegisterIpamDriverWithCapabilities(name string, driver Ipam, capability *Capability) error +} + +/************** + * IPAM Errors + **************/ + +// Well-known errors returned by IPAM +var ( + ErrIpamInternalError = types.InternalErrorf("IPAM Internal Error") + ErrInvalidAddressSpace = types.BadRequestErrorf("Invalid Address Space") + ErrInvalidPool = types.BadRequestErrorf("Invalid Address Pool") + ErrInvalidSubPool = types.BadRequestErrorf("Invalid Address SubPool") + ErrInvalidRequest = types.BadRequestErrorf("Invalid Request") + ErrPoolNotFound = types.BadRequestErrorf("Address Pool not found") + ErrOverlapPool = types.ForbiddenErrorf("Address pool overlaps with existing pool on this address space") + ErrNoAvailablePool = types.NoServiceErrorf("No available pool") + ErrNoAvailableIPs = types.NoServiceErrorf("No available addresses on this pool") + ErrNoIPReturned = types.NoServiceErrorf("No address returned") + ErrIPAlreadyAllocated = types.ForbiddenErrorf("Address already in use") + ErrIPOutOfRange = types.BadRequestErrorf("Requested address is out of range") + ErrPoolOverlap = types.ForbiddenErrorf("Pool overlaps with other one on this address space") + ErrBadPool = types.BadRequestErrorf("Address space does not contain specified address pool") +) + +/******************************* + * IPAM Service Interface + *******************************/ + +// Ipam represents the interface the IPAM service plugins must implement +// in order to allow injection/modification of IPAM database. +type Ipam interface { + discoverapi.Discover + + // GetDefaultAddressSpaces returns the default local and global address spaces for this ipam + GetDefaultAddressSpaces() (string, string, error) + // RequestPool returns an address pool along with its unique id. Address space is a mandatory field + // which denotes a set of non-overlapping pools. pool describes the pool of addresses in CIDR notation. + // subpool indicates a smaller range of addresses from the pool, for now it is specified in CIDR notation. + // Both pool and subpool are non mandatory fields. When they are not specified, Ipam driver may choose to + // return a self chosen pool for this request. In such case the v6 flag needs to be set appropriately so + // that the driver would return the expected ip version pool. + RequestPool(addressSpace, pool, subPool string, options map[string]string, v6 bool) (string, *net.IPNet, map[string]string, error) + // ReleasePool releases the address pool identified by the passed id + ReleasePool(poolID string) error + // Request address from the specified pool ID. Input options or required IP can be passed. + RequestAddress(string, net.IP, map[string]string) (*net.IPNet, map[string]string, error) + // Release the address from the specified pool ID + ReleaseAddress(string, net.IP) error + + //IsBuiltIn returns true if it is a built-in driver. + IsBuiltIn() bool +} + +// Capability represents the requirements and capabilities of the IPAM driver +type Capability struct { + // Whether on address request, libnetwork must + // specify the endpoint MAC address + RequiresMACAddress bool + // Whether of daemon start, libnetwork must replay the pool + // request and the address request for current local networks + RequiresRequestReplay bool +} diff --git a/vendor/github.com/docker/libnetwork/ipamapi/labels.go b/vendor/github.com/docker/libnetwork/ipamapi/labels.go new file mode 100644 index 0000000000..e5c7d1cc7e --- /dev/null +++ b/vendor/github.com/docker/libnetwork/ipamapi/labels.go @@ -0,0 +1,10 @@ +package ipamapi + +const ( + // Prefix constant marks the reserved label space for libnetwork + Prefix = "com.docker.network" + + // AllocSerialPrefix constant marks the reserved label space for libnetwork ipam + // allocation ordering.(serial/first available) + AllocSerialPrefix = Prefix + ".ipam.serial" +) diff --git a/vendor/github.com/docker/libnetwork/ipams/builtin/builtin_unix.go b/vendor/github.com/docker/libnetwork/ipams/builtin/builtin_unix.go new file mode 100644 index 0000000000..0ab861d170 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/ipams/builtin/builtin_unix.go @@ -0,0 +1,58 @@ +// +build linux freebsd darwin + +package builtin + +import ( + "errors" + + "github.com/docker/libnetwork/datastore" + "github.com/docker/libnetwork/ipam" + "github.com/docker/libnetwork/ipamapi" + "github.com/docker/libnetwork/ipamutils" +) + +var ( + // defaultAddressPool Stores user configured subnet list + defaultAddressPool []*ipamutils.NetworkToSplit +) + +// Init registers the built-in ipam service with libnetwork +func Init(ic ipamapi.Callback, l, g interface{}) error { + var ( + ok bool + localDs, globalDs datastore.DataStore + ) + + if l != nil { + if localDs, ok = l.(datastore.DataStore); !ok { + return errors.New("incorrect local datastore passed to built-in ipam init") + } + } + + if g != nil { + if globalDs, ok = g.(datastore.DataStore); !ok { + return errors.New("incorrect global datastore passed to built-in ipam init") + } + } + + ipamutils.InitNetworks(GetDefaultIPAddressPool()) + + a, err := ipam.NewAllocator(localDs, globalDs) + if err != nil { + return err + } + + cps := &ipamapi.Capability{RequiresRequestReplay: true} + + return ic.RegisterIpamDriverWithCapabilities(ipamapi.DefaultIPAM, a, cps) +} + +// SetDefaultIPAddressPool stores default address pool. +func SetDefaultIPAddressPool(addressPool []*ipamutils.NetworkToSplit) { + defaultAddressPool = addressPool +} + +// GetDefaultIPAddressPool returns default address pool. +func GetDefaultIPAddressPool() []*ipamutils.NetworkToSplit { + return defaultAddressPool +} diff --git a/vendor/github.com/docker/libnetwork/ipams/builtin/builtin_windows.go b/vendor/github.com/docker/libnetwork/ipams/builtin/builtin_windows.go new file mode 100644 index 0000000000..e994595658 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/ipams/builtin/builtin_windows.go @@ -0,0 +1,72 @@ +// +build windows + +package builtin + +import ( + "errors" + + "github.com/docker/libnetwork/datastore" + "github.com/docker/libnetwork/ipam" + "github.com/docker/libnetwork/ipamapi" + "github.com/docker/libnetwork/ipamutils" + + windowsipam "github.com/docker/libnetwork/ipams/windowsipam" +) + +var ( + // defaultAddressPool Stores user configured subnet list + defaultAddressPool []*ipamutils.NetworkToSplit +) + +// InitDockerDefault registers the built-in ipam service with libnetwork +func InitDockerDefault(ic ipamapi.Callback, l, g interface{}) error { + var ( + ok bool + localDs, globalDs datastore.DataStore + ) + + if l != nil { + if localDs, ok = l.(datastore.DataStore); !ok { + return errors.New("incorrect local datastore passed to built-in ipam init") + } + } + + if g != nil { + if globalDs, ok = g.(datastore.DataStore); !ok { + return errors.New("incorrect global datastore passed to built-in ipam init") + } + } + + ipamutils.InitNetworks(nil) + + a, err := ipam.NewAllocator(localDs, globalDs) + if err != nil { + return err + } + + cps := &ipamapi.Capability{RequiresRequestReplay: true} + + return ic.RegisterIpamDriverWithCapabilities(ipamapi.DefaultIPAM, a, cps) +} + +// Init registers the built-in ipam service with libnetwork +func Init(ic ipamapi.Callback, l, g interface{}) error { + initFunc := windowsipam.GetInit(windowsipam.DefaultIPAM) + + err := InitDockerDefault(ic, l, g) + if err != nil { + return err + } + + return initFunc(ic, l, g) +} + +// SetDefaultIPAddressPool stores default address pool . +func SetDefaultIPAddressPool(addressPool []*ipamutils.NetworkToSplit) { + defaultAddressPool = addressPool +} + +// GetDefaultIPAddressPool returns default address pool . +func GetDefaultIPAddressPool() []*ipamutils.NetworkToSplit { + return defaultAddressPool +} diff --git a/vendor/github.com/docker/libnetwork/ipams/null/null.go b/vendor/github.com/docker/libnetwork/ipams/null/null.go new file mode 100644 index 0000000000..339b5308d1 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/ipams/null/null.go @@ -0,0 +1,75 @@ +// Package null implements the null ipam driver. Null ipam driver satisfies ipamapi contract, +// but does not effectively reserve/allocate any address pool or address +package null + +import ( + "fmt" + "net" + + "github.com/docker/libnetwork/discoverapi" + "github.com/docker/libnetwork/ipamapi" + "github.com/docker/libnetwork/types" +) + +var ( + defaultAS = "null" + defaultPool, _ = types.ParseCIDR("0.0.0.0/0") + defaultPoolID = fmt.Sprintf("%s/%s", defaultAS, defaultPool.String()) +) + +type allocator struct{} + +func (a *allocator) GetDefaultAddressSpaces() (string, string, error) { + return defaultAS, defaultAS, nil +} + +func (a *allocator) RequestPool(addressSpace, pool, subPool string, options map[string]string, v6 bool) (string, *net.IPNet, map[string]string, error) { + if addressSpace != defaultAS { + return "", nil, nil, types.BadRequestErrorf("unknown address space: %s", addressSpace) + } + if pool != "" { + return "", nil, nil, types.BadRequestErrorf("null ipam driver does not handle specific address pool requests") + } + if subPool != "" { + return "", nil, nil, types.BadRequestErrorf("null ipam driver does not handle specific address subpool requests") + } + if v6 { + return "", nil, nil, types.BadRequestErrorf("null ipam driver does not handle IPv6 address pool pool requests") + } + return defaultPoolID, defaultPool, nil, nil +} + +func (a *allocator) ReleasePool(poolID string) error { + return nil +} + +func (a *allocator) RequestAddress(poolID string, ip net.IP, opts map[string]string) (*net.IPNet, map[string]string, error) { + if poolID != defaultPoolID { + return nil, nil, types.BadRequestErrorf("unknown pool id: %s", poolID) + } + return nil, nil, nil +} + +func (a *allocator) ReleaseAddress(poolID string, ip net.IP) error { + if poolID != defaultPoolID { + return types.BadRequestErrorf("unknown pool id: %s", poolID) + } + return nil +} + +func (a *allocator) DiscoverNew(dType discoverapi.DiscoveryType, data interface{}) error { + return nil +} + +func (a *allocator) DiscoverDelete(dType discoverapi.DiscoveryType, data interface{}) error { + return nil +} + +func (a *allocator) IsBuiltIn() bool { + return true +} + +// Init registers a remote ipam when its plugin is activated +func Init(ic ipamapi.Callback, l, g interface{}) error { + return ic.RegisterIpamDriver(ipamapi.NullIPAM, &allocator{}) +} diff --git a/vendor/github.com/docker/libnetwork/ipams/remote/api/api.go b/vendor/github.com/docker/libnetwork/ipams/remote/api/api.go new file mode 100644 index 0000000000..543c99bb00 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/ipams/remote/api/api.go @@ -0,0 +1,94 @@ +// Package api defines the data structure to be used in the request/response +// messages between libnetwork and the remote ipam plugin +package api + +import "github.com/docker/libnetwork/ipamapi" + +// Response is the basic response structure used in all responses +type Response struct { + Error string +} + +// IsSuccess returns whether the plugin response is successful +func (r *Response) IsSuccess() bool { + return r.Error == "" +} + +// GetError returns the error from the response, if any. +func (r *Response) GetError() string { + return r.Error +} + +// GetCapabilityResponse is the response of GetCapability request +type GetCapabilityResponse struct { + Response + RequiresMACAddress bool + RequiresRequestReplay bool +} + +// ToCapability converts the capability response into the internal ipam driver capability structure +func (capRes GetCapabilityResponse) ToCapability() *ipamapi.Capability { + return &ipamapi.Capability{ + RequiresMACAddress: capRes.RequiresMACAddress, + RequiresRequestReplay: capRes.RequiresRequestReplay, + } +} + +// GetAddressSpacesResponse is the response to the ``get default address spaces`` request message +type GetAddressSpacesResponse struct { + Response + LocalDefaultAddressSpace string + GlobalDefaultAddressSpace string +} + +// RequestPoolRequest represents the expected data in a ``request address pool`` request message +type RequestPoolRequest struct { + AddressSpace string + Pool string + SubPool string + Options map[string]string + V6 bool +} + +// RequestPoolResponse represents the response message to a ``request address pool`` request +type RequestPoolResponse struct { + Response + PoolID string + Pool string // CIDR format + Data map[string]string +} + +// ReleasePoolRequest represents the expected data in a ``release address pool`` request message +type ReleasePoolRequest struct { + PoolID string +} + +// ReleasePoolResponse represents the response message to a ``release address pool`` request +type ReleasePoolResponse struct { + Response +} + +// RequestAddressRequest represents the expected data in a ``request address`` request message +type RequestAddressRequest struct { + PoolID string + Address string + Options map[string]string +} + +// RequestAddressResponse represents the expected data in the response message to a ``request address`` request +type RequestAddressResponse struct { + Response + Address string // in CIDR format + Data map[string]string +} + +// ReleaseAddressRequest represents the expected data in a ``release address`` request message +type ReleaseAddressRequest struct { + PoolID string + Address string +} + +// ReleaseAddressResponse represents the response message to a ``release address`` request +type ReleaseAddressResponse struct { + Response +} diff --git a/vendor/github.com/docker/libnetwork/ipams/remote/remote.go b/vendor/github.com/docker/libnetwork/ipams/remote/remote.go new file mode 100644 index 0000000000..9f2f1d5fed --- /dev/null +++ b/vendor/github.com/docker/libnetwork/ipams/remote/remote.go @@ -0,0 +1,155 @@ +package remote + +import ( + "fmt" + "net" + + "github.com/docker/docker/pkg/plugins" + "github.com/docker/libnetwork/discoverapi" + "github.com/docker/libnetwork/ipamapi" + "github.com/docker/libnetwork/ipams/remote/api" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" +) + +type allocator struct { + endpoint *plugins.Client + name string +} + +// PluginResponse is the interface for the plugin request responses +type PluginResponse interface { + IsSuccess() bool + GetError() string +} + +func newAllocator(name string, client *plugins.Client) ipamapi.Ipam { + a := &allocator{name: name, endpoint: client} + return a +} + +// Init registers a remote ipam when its plugin is activated +func Init(cb ipamapi.Callback, l, g interface{}) error { + + newPluginHandler := func(name string, client *plugins.Client) { + a := newAllocator(name, client) + if cps, err := a.(*allocator).getCapabilities(); err == nil { + if err := cb.RegisterIpamDriverWithCapabilities(name, a, cps); err != nil { + logrus.Errorf("error registering remote ipam driver %s due to %v", name, err) + } + } else { + logrus.Infof("remote ipam driver %s does not support capabilities", name) + logrus.Debug(err) + if err := cb.RegisterIpamDriver(name, a); err != nil { + logrus.Errorf("error registering remote ipam driver %s due to %v", name, err) + } + } + } + + // Unit test code is unaware of a true PluginStore. So we fall back to v1 plugins. + handleFunc := plugins.Handle + if pg := cb.GetPluginGetter(); pg != nil { + handleFunc = pg.Handle + activePlugins := pg.GetAllManagedPluginsByCap(ipamapi.PluginEndpointType) + for _, ap := range activePlugins { + newPluginHandler(ap.Name(), ap.Client()) + } + } + handleFunc(ipamapi.PluginEndpointType, newPluginHandler) + return nil +} + +func (a *allocator) call(methodName string, arg interface{}, retVal PluginResponse) error { + method := ipamapi.PluginEndpointType + "." + methodName + err := a.endpoint.Call(method, arg, retVal) + if err != nil { + return err + } + if !retVal.IsSuccess() { + return fmt.Errorf("remote: %s", retVal.GetError()) + } + return nil +} + +func (a *allocator) getCapabilities() (*ipamapi.Capability, error) { + var res api.GetCapabilityResponse + if err := a.call("GetCapabilities", nil, &res); err != nil { + return nil, err + } + return res.ToCapability(), nil +} + +// GetDefaultAddressSpaces returns the local and global default address spaces +func (a *allocator) GetDefaultAddressSpaces() (string, string, error) { + res := &api.GetAddressSpacesResponse{} + if err := a.call("GetDefaultAddressSpaces", nil, res); err != nil { + return "", "", err + } + return res.LocalDefaultAddressSpace, res.GlobalDefaultAddressSpace, nil +} + +// RequestPool requests an address pool in the specified address space +func (a *allocator) RequestPool(addressSpace, pool, subPool string, options map[string]string, v6 bool) (string, *net.IPNet, map[string]string, error) { + req := &api.RequestPoolRequest{AddressSpace: addressSpace, Pool: pool, SubPool: subPool, Options: options, V6: v6} + res := &api.RequestPoolResponse{} + if err := a.call("RequestPool", req, res); err != nil { + return "", nil, nil, err + } + retPool, err := types.ParseCIDR(res.Pool) + return res.PoolID, retPool, res.Data, err +} + +// ReleasePool removes an address pool from the specified address space +func (a *allocator) ReleasePool(poolID string) error { + req := &api.ReleasePoolRequest{PoolID: poolID} + res := &api.ReleasePoolResponse{} + return a.call("ReleasePool", req, res) +} + +// RequestAddress requests an address from the address pool +func (a *allocator) RequestAddress(poolID string, address net.IP, options map[string]string) (*net.IPNet, map[string]string, error) { + var ( + prefAddress string + retAddress *net.IPNet + err error + ) + if address != nil { + prefAddress = address.String() + } + req := &api.RequestAddressRequest{PoolID: poolID, Address: prefAddress, Options: options} + res := &api.RequestAddressResponse{} + if err := a.call("RequestAddress", req, res); err != nil { + return nil, nil, err + } + if res.Address != "" { + retAddress, err = types.ParseCIDR(res.Address) + } else { + return nil, nil, ipamapi.ErrNoIPReturned + } + return retAddress, res.Data, err +} + +// ReleaseAddress releases the address from the specified address pool +func (a *allocator) ReleaseAddress(poolID string, address net.IP) error { + var relAddress string + if address != nil { + relAddress = address.String() + } + req := &api.ReleaseAddressRequest{PoolID: poolID, Address: relAddress} + res := &api.ReleaseAddressResponse{} + return a.call("ReleaseAddress", req, res) +} + +// DiscoverNew is a notification for a new discovery event, such as a new global datastore +func (a *allocator) DiscoverNew(dType discoverapi.DiscoveryType, data interface{}) error { + return nil +} + +// DiscoverDelete is a notification for a discovery delete event, such as a node leaving a cluster +func (a *allocator) DiscoverDelete(dType discoverapi.DiscoveryType, data interface{}) error { + return nil +} + +func (a *allocator) IsBuiltIn() bool { + return false +} diff --git a/vendor/github.com/docker/libnetwork/ipams/windowsipam/windowsipam.go b/vendor/github.com/docker/libnetwork/ipams/windowsipam/windowsipam.go new file mode 100644 index 0000000000..5c7b1f5411 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/ipams/windowsipam/windowsipam.go @@ -0,0 +1,102 @@ +package windowsipam + +import ( + "net" + + "github.com/docker/libnetwork/discoverapi" + "github.com/docker/libnetwork/ipamapi" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" +) + +const ( + localAddressSpace = "LocalDefault" + globalAddressSpace = "GlobalDefault" +) + +// DefaultIPAM defines the default ipam-driver for local-scoped windows networks +const DefaultIPAM = "windows" + +var ( + defaultPool, _ = types.ParseCIDR("0.0.0.0/0") +) + +type allocator struct { +} + +// GetInit registers the built-in ipam service with libnetwork +func GetInit(ipamName string) func(ic ipamapi.Callback, l, g interface{}) error { + return func(ic ipamapi.Callback, l, g interface{}) error { + return ic.RegisterIpamDriver(ipamName, &allocator{}) + } +} + +func (a *allocator) GetDefaultAddressSpaces() (string, string, error) { + return localAddressSpace, globalAddressSpace, nil +} + +// RequestPool returns an address pool along with its unique id. This is a null ipam driver. It allocates the +// subnet user asked and does not validate anything. Doesn't support subpool allocation +func (a *allocator) RequestPool(addressSpace, pool, subPool string, options map[string]string, v6 bool) (string, *net.IPNet, map[string]string, error) { + logrus.Debugf("RequestPool(%s, %s, %s, %v, %t)", addressSpace, pool, subPool, options, v6) + if subPool != "" || v6 { + return "", nil, nil, types.InternalErrorf("This request is not supported by null ipam driver") + } + + var ipNet *net.IPNet + var err error + + if pool != "" { + _, ipNet, err = net.ParseCIDR(pool) + if err != nil { + return "", nil, nil, err + } + } else { + ipNet = defaultPool + } + + return ipNet.String(), ipNet, nil, nil +} + +// ReleasePool releases the address pool - always succeeds +func (a *allocator) ReleasePool(poolID string) error { + logrus.Debugf("ReleasePool(%s)", poolID) + return nil +} + +// RequestAddress returns an address from the specified pool ID. +// Always allocate the 0.0.0.0/32 ip if no preferred address was specified +func (a *allocator) RequestAddress(poolID string, prefAddress net.IP, opts map[string]string) (*net.IPNet, map[string]string, error) { + logrus.Debugf("RequestAddress(%s, %v, %v)", poolID, prefAddress, opts) + _, ipNet, err := net.ParseCIDR(poolID) + + if err != nil { + return nil, nil, err + } + + if prefAddress != nil { + return &net.IPNet{IP: prefAddress, Mask: ipNet.Mask}, nil, nil + } + + return nil, nil, nil +} + +// ReleaseAddress releases the address - always succeeds +func (a *allocator) ReleaseAddress(poolID string, address net.IP) error { + logrus.Debugf("ReleaseAddress(%s, %v)", poolID, address) + return nil +} + +// DiscoverNew informs the allocator about a new global scope datastore +func (a *allocator) DiscoverNew(dType discoverapi.DiscoveryType, data interface{}) error { + return nil +} + +// DiscoverDelete is a notification of no interest for the allocator +func (a *allocator) DiscoverDelete(dType discoverapi.DiscoveryType, data interface{}) error { + return nil +} + +func (a *allocator) IsBuiltIn() bool { + return true +} diff --git a/vendor/github.com/docker/libnetwork/ipamutils/utils.go b/vendor/github.com/docker/libnetwork/ipamutils/utils.go new file mode 100644 index 0000000000..f8eca58e8c --- /dev/null +++ b/vendor/github.com/docker/libnetwork/ipamutils/utils.go @@ -0,0 +1,96 @@ +// Package ipamutils provides utility functions for ipam management +package ipamutils + +import ( + "fmt" + "net" + "sync" + + "github.com/sirupsen/logrus" +) + +var ( + // PredefinedBroadNetworks contains a list of 31 IPv4 private networks with host size 16 and 12 + // (172.17-31.x.x/16, 192.168.x.x/20) which do not overlap with the networks in `PredefinedGranularNetworks` + PredefinedBroadNetworks []*net.IPNet + // PredefinedGranularNetworks contains a list of 64K IPv4 private networks with host size 8 + // (10.x.x.x/24) which do not overlap with the networks in `PredefinedBroadNetworks` + PredefinedGranularNetworks []*net.IPNet + initNetworksOnce sync.Once + + defaultBroadNetwork = []*NetworkToSplit{{"172.17.0.0/16", 16}, {"172.18.0.0/16", 16}, {"172.19.0.0/16", 16}, + {"172.20.0.0/14", 16}, {"172.24.0.0/14", 16}, {"172.28.0.0/14", 16}, + {"192.168.0.0/16", 20}} + defaultGranularNetwork = []*NetworkToSplit{{"10.0.0.0/8", 24}} +) + +// NetworkToSplit represent a network that has to be split in chunks with mask length Size. +// Each subnet in the set is derived from the Base pool. Base is to be passed +// in CIDR format. +// Example: a Base "10.10.0.0/16 with Size 24 will define the set of 256 +// 10.10.[0-255].0/24 address pools +type NetworkToSplit struct { + Base string `json:"base"` + Size int `json:"size"` +} + +// InitNetworks initializes the broad network pool and the granular network pool +func InitNetworks(defaultAddressPool []*NetworkToSplit) { + initNetworksOnce.Do(func() { + // error ingnored should never fail + PredefinedGranularNetworks, _ = splitNetworks(defaultGranularNetwork) + if defaultAddressPool == nil { + defaultAddressPool = defaultBroadNetwork + } + var err error + if PredefinedBroadNetworks, err = splitNetworks(defaultAddressPool); err != nil { + logrus.WithError(err).Error("InitAddressPools failed to initialize the default address pool") + } + }) +} + +// splitNetworks takes a slice of networks, split them accordingly and returns them +func splitNetworks(list []*NetworkToSplit) ([]*net.IPNet, error) { + localPools := make([]*net.IPNet, 0, len(list)) + + for _, p := range list { + _, b, err := net.ParseCIDR(p.Base) + if err != nil { + return nil, fmt.Errorf("invalid base pool %q: %v", p.Base, err) + } + ones, _ := b.Mask.Size() + if p.Size <= 0 || p.Size < ones { + return nil, fmt.Errorf("invalid pools size: %d", p.Size) + } + localPools = append(localPools, splitNetwork(p.Size, b)...) + } + return localPools, nil +} + +func splitNetwork(size int, base *net.IPNet) []*net.IPNet { + one, bits := base.Mask.Size() + mask := net.CIDRMask(size, bits) + n := 1 << uint(size-one) + s := uint(bits - size) + list := make([]*net.IPNet, 0, n) + + for i := 0; i < n; i++ { + ip := copyIP(base.IP) + addIntToIP(ip, uint(i<= 0; i-- { + array[i] |= (byte)(ordinal & 0xff) + ordinal >>= 8 + } +} diff --git a/vendor/github.com/docker/libnetwork/iptables/conntrack.go b/vendor/github.com/docker/libnetwork/iptables/conntrack.go new file mode 100644 index 0000000000..08317c33ee --- /dev/null +++ b/vendor/github.com/docker/libnetwork/iptables/conntrack.go @@ -0,0 +1,59 @@ +package iptables + +import ( + "errors" + "net" + "syscall" + + "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" +) + +var ( + // ErrConntrackNotConfigurable means that conntrack module is not loaded or does not have the netlink module loaded + ErrConntrackNotConfigurable = errors.New("conntrack is not available") +) + +// IsConntrackProgrammable returns true if the handle supports the NETLINK_NETFILTER and the base modules are loaded +func IsConntrackProgrammable(nlh *netlink.Handle) bool { + return nlh.SupportsNetlinkFamily(syscall.NETLINK_NETFILTER) +} + +// DeleteConntrackEntries deletes all the conntrack connections on the host for the specified IP +// Returns the number of flows deleted for IPv4, IPv6 else error +func DeleteConntrackEntries(nlh *netlink.Handle, ipv4List []net.IP, ipv6List []net.IP) (uint, uint, error) { + if !IsConntrackProgrammable(nlh) { + return 0, 0, ErrConntrackNotConfigurable + } + + var totalIPv4FlowPurged uint + for _, ipAddress := range ipv4List { + flowPurged, err := purgeConntrackState(nlh, syscall.AF_INET, ipAddress) + if err != nil { + logrus.Warnf("Failed to delete conntrack state for %s: %v", ipAddress, err) + continue + } + totalIPv4FlowPurged += flowPurged + } + + var totalIPv6FlowPurged uint + for _, ipAddress := range ipv6List { + flowPurged, err := purgeConntrackState(nlh, syscall.AF_INET6, ipAddress) + if err != nil { + logrus.Warnf("Failed to delete conntrack state for %s: %v", ipAddress, err) + continue + } + totalIPv6FlowPurged += flowPurged + } + + logrus.Debugf("DeleteConntrackEntries purged ipv4:%d, ipv6:%d", totalIPv4FlowPurged, totalIPv6FlowPurged) + return totalIPv4FlowPurged, totalIPv6FlowPurged, nil +} + +func purgeConntrackState(nlh *netlink.Handle, family netlink.InetFamily, ipAddress net.IP) (uint, error) { + filter := &netlink.ConntrackFilter{} + // NOTE: doing the flush using the ipAddress is safe because today there cannot be multiple networks with the same subnet + // so it will not be possible to flush flows that are of other containers + filter.AddIP(netlink.ConntrackNatAnyIP, ipAddress) + return nlh.ConntrackDeleteFilter(netlink.ConntrackTable, family, filter) +} diff --git a/vendor/github.com/docker/libnetwork/iptables/firewalld.go b/vendor/github.com/docker/libnetwork/iptables/firewalld.go new file mode 100644 index 0000000000..c9838d5b65 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/iptables/firewalld.go @@ -0,0 +1,167 @@ +package iptables + +import ( + "fmt" + "strings" + + "github.com/godbus/dbus" + "github.com/sirupsen/logrus" +) + +// IPV defines the table string +type IPV string + +const ( + // Iptables point ipv4 table + Iptables IPV = "ipv4" + // IP6Tables point to ipv6 table + IP6Tables IPV = "ipv6" + // Ebtables point to bridge table + Ebtables IPV = "eb" +) +const ( + dbusInterface = "org.fedoraproject.FirewallD1" + dbusPath = "/org/fedoraproject/FirewallD1" +) + +// Conn is a connection to firewalld dbus endpoint. +type Conn struct { + sysconn *dbus.Conn + sysobj dbus.BusObject + signal chan *dbus.Signal +} + +var ( + connection *Conn + firewalldRunning bool // is Firewalld service running + onReloaded []*func() // callbacks when Firewalld has been reloaded +) + +// FirewalldInit initializes firewalld management code. +func FirewalldInit() error { + var err error + + if connection, err = newConnection(); err != nil { + return fmt.Errorf("Failed to connect to D-Bus system bus: %v", err) + } + firewalldRunning = checkRunning() + if !firewalldRunning { + connection.sysconn.Close() + connection = nil + } + if connection != nil { + go signalHandler() + } + + return nil +} + +// New() establishes a connection to the system bus. +func newConnection() (*Conn, error) { + c := new(Conn) + if err := c.initConnection(); err != nil { + return nil, err + } + + return c, nil +} + +// Innitialize D-Bus connection. +func (c *Conn) initConnection() error { + var err error + + c.sysconn, err = dbus.SystemBus() + if err != nil { + return err + } + + // This never fails, even if the service is not running atm. + c.sysobj = c.sysconn.Object(dbusInterface, dbus.ObjectPath(dbusPath)) + + rule := fmt.Sprintf("type='signal',path='%s',interface='%s',sender='%s',member='Reloaded'", + dbusPath, dbusInterface, dbusInterface) + c.sysconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, rule) + + rule = fmt.Sprintf("type='signal',interface='org.freedesktop.DBus',member='NameOwnerChanged',path='/org/freedesktop/DBus',sender='org.freedesktop.DBus',arg0='%s'", + dbusInterface) + c.sysconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, rule) + + c.signal = make(chan *dbus.Signal, 10) + c.sysconn.Signal(c.signal) + + return nil +} + +func signalHandler() { + for signal := range connection.signal { + if strings.Contains(signal.Name, "NameOwnerChanged") { + firewalldRunning = checkRunning() + dbusConnectionChanged(signal.Body) + } else if strings.Contains(signal.Name, "Reloaded") { + reloaded() + } + } +} + +func dbusConnectionChanged(args []interface{}) { + name := args[0].(string) + oldOwner := args[1].(string) + newOwner := args[2].(string) + + if name != dbusInterface { + return + } + + if len(newOwner) > 0 { + connectionEstablished() + } else if len(oldOwner) > 0 { + connectionLost() + } +} + +func connectionEstablished() { + reloaded() +} + +func connectionLost() { + // Doesn't do anything for now. Libvirt also doesn't react to this. +} + +// call all callbacks +func reloaded() { + for _, pf := range onReloaded { + (*pf)() + } +} + +// OnReloaded add callback +func OnReloaded(callback func()) { + for _, pf := range onReloaded { + if pf == &callback { + return + } + } + onReloaded = append(onReloaded, &callback) +} + +// Call some remote method to see whether the service is actually running. +func checkRunning() bool { + var zone string + var err error + + if connection != nil { + err = connection.sysobj.Call(dbusInterface+".getDefaultZone", 0).Store(&zone) + return err == nil + } + return false +} + +// Passthrough method simply passes args through to iptables/ip6tables +func Passthrough(ipv IPV, args ...string) ([]byte, error) { + var output string + logrus.Debugf("Firewalld passthrough: %s, %s", ipv, args) + if err := connection.sysobj.Call(dbusInterface+".direct.passthrough", 0, ipv, args).Store(&output); err != nil { + return nil, err + } + return []byte(output), nil +} diff --git a/vendor/github.com/docker/libnetwork/iptables/iptables.go b/vendor/github.com/docker/libnetwork/iptables/iptables.go new file mode 100644 index 0000000000..50896d935a --- /dev/null +++ b/vendor/github.com/docker/libnetwork/iptables/iptables.go @@ -0,0 +1,575 @@ +package iptables + +import ( + "errors" + "fmt" + "net" + "os/exec" + "regexp" + "strconv" + "strings" + "sync" + "time" + + "github.com/sirupsen/logrus" +) + +// Action signifies the iptable action. +type Action string + +// Policy is the default iptable policies +type Policy string + +// Table refers to Nat, Filter or Mangle. +type Table string + +const ( + // Append appends the rule at the end of the chain. + Append Action = "-A" + // Delete deletes the rule from the chain. + Delete Action = "-D" + // Insert inserts the rule at the top of the chain. + Insert Action = "-I" + // Nat table is used for nat translation rules. + Nat Table = "nat" + // Filter table is used for filter rules. + Filter Table = "filter" + // Mangle table is used for mangling the packet. + Mangle Table = "mangle" + // Drop is the default iptables DROP policy + Drop Policy = "DROP" + // Accept is the default iptables ACCEPT policy + Accept Policy = "ACCEPT" +) + +var ( + iptablesPath string + supportsXlock = false + supportsCOpt = false + xLockWaitMsg = "Another app is currently holding the xtables lock" + // used to lock iptables commands if xtables lock is not supported + bestEffortLock sync.Mutex + // ErrIptablesNotFound is returned when the rule is not found. + ErrIptablesNotFound = errors.New("Iptables not found") + initOnce sync.Once +) + +// ChainInfo defines the iptables chain. +type ChainInfo struct { + Name string + Table Table + HairpinMode bool +} + +// ChainError is returned to represent errors during ip table operation. +type ChainError struct { + Chain string + Output []byte +} + +func (e ChainError) Error() string { + return fmt.Sprintf("Error iptables %s: %s", e.Chain, string(e.Output)) +} + +func probe() { + if out, err := exec.Command("modprobe", "-va", "nf_nat").CombinedOutput(); err != nil { + logrus.Warnf("Running modprobe nf_nat failed with message: `%s`, error: %v", strings.TrimSpace(string(out)), err) + } + if out, err := exec.Command("modprobe", "-va", "xt_conntrack").CombinedOutput(); err != nil { + logrus.Warnf("Running modprobe xt_conntrack failed with message: `%s`, error: %v", strings.TrimSpace(string(out)), err) + } +} + +func initFirewalld() { + if err := FirewalldInit(); err != nil { + logrus.Debugf("Fail to initialize firewalld: %v, using raw iptables instead", err) + } +} + +func detectIptables() { + path, err := exec.LookPath("iptables") + if err != nil { + return + } + iptablesPath = path + supportsXlock = exec.Command(iptablesPath, "--wait", "-L", "-n").Run() == nil + mj, mn, mc, err := GetVersion() + if err != nil { + logrus.Warnf("Failed to read iptables version: %v", err) + return + } + supportsCOpt = supportsCOption(mj, mn, mc) +} + +func initDependencies() { + probe() + initFirewalld() + detectIptables() +} + +func initCheck() error { + initOnce.Do(initDependencies) + + if iptablesPath == "" { + return ErrIptablesNotFound + } + return nil +} + +// NewChain adds a new chain to ip table. +func NewChain(name string, table Table, hairpinMode bool) (*ChainInfo, error) { + c := &ChainInfo{ + Name: name, + Table: table, + HairpinMode: hairpinMode, + } + if string(c.Table) == "" { + c.Table = Filter + } + + // Add chain if it doesn't exist + if _, err := Raw("-t", string(c.Table), "-n", "-L", c.Name); err != nil { + if output, err := Raw("-t", string(c.Table), "-N", c.Name); err != nil { + return nil, err + } else if len(output) != 0 { + return nil, fmt.Errorf("Could not create %s/%s chain: %s", c.Table, c.Name, output) + } + } + return c, nil +} + +// ProgramChain is used to add rules to a chain +func ProgramChain(c *ChainInfo, bridgeName string, hairpinMode, enable bool) error { + if c.Name == "" { + return errors.New("Could not program chain, missing chain name") + } + + switch c.Table { + case Nat: + preroute := []string{ + "-m", "addrtype", + "--dst-type", "LOCAL", + "-j", c.Name} + if !Exists(Nat, "PREROUTING", preroute...) && enable { + if err := c.Prerouting(Append, preroute...); err != nil { + return fmt.Errorf("Failed to inject %s in PREROUTING chain: %s", c.Name, err) + } + } else if Exists(Nat, "PREROUTING", preroute...) && !enable { + if err := c.Prerouting(Delete, preroute...); err != nil { + return fmt.Errorf("Failed to remove %s in PREROUTING chain: %s", c.Name, err) + } + } + output := []string{ + "-m", "addrtype", + "--dst-type", "LOCAL", + "-j", c.Name} + if !hairpinMode { + output = append(output, "!", "--dst", "127.0.0.0/8") + } + if !Exists(Nat, "OUTPUT", output...) && enable { + if err := c.Output(Append, output...); err != nil { + return fmt.Errorf("Failed to inject %s in OUTPUT chain: %s", c.Name, err) + } + } else if Exists(Nat, "OUTPUT", output...) && !enable { + if err := c.Output(Delete, output...); err != nil { + return fmt.Errorf("Failed to inject %s in OUTPUT chain: %s", c.Name, err) + } + } + case Filter: + if bridgeName == "" { + return fmt.Errorf("Could not program chain %s/%s, missing bridge name", + c.Table, c.Name) + } + link := []string{ + "-o", bridgeName, + "-j", c.Name} + if !Exists(Filter, "FORWARD", link...) && enable { + insert := append([]string{string(Insert), "FORWARD"}, link...) + if output, err := Raw(insert...); err != nil { + return err + } else if len(output) != 0 { + return fmt.Errorf("Could not create linking rule to %s/%s: %s", c.Table, c.Name, output) + } + } else if Exists(Filter, "FORWARD", link...) && !enable { + del := append([]string{string(Delete), "FORWARD"}, link...) + if output, err := Raw(del...); err != nil { + return err + } else if len(output) != 0 { + return fmt.Errorf("Could not delete linking rule from %s/%s: %s", c.Table, c.Name, output) + } + + } + establish := []string{ + "-o", bridgeName, + "-m", "conntrack", + "--ctstate", "RELATED,ESTABLISHED", + "-j", "ACCEPT"} + if !Exists(Filter, "FORWARD", establish...) && enable { + insert := append([]string{string(Insert), "FORWARD"}, establish...) + if output, err := Raw(insert...); err != nil { + return err + } else if len(output) != 0 { + return fmt.Errorf("Could not create establish rule to %s: %s", c.Table, output) + } + } else if Exists(Filter, "FORWARD", establish...) && !enable { + del := append([]string{string(Delete), "FORWARD"}, establish...) + if output, err := Raw(del...); err != nil { + return err + } else if len(output) != 0 { + return fmt.Errorf("Could not delete establish rule from %s: %s", c.Table, output) + } + } + } + return nil +} + +// RemoveExistingChain removes existing chain from the table. +func RemoveExistingChain(name string, table Table) error { + c := &ChainInfo{ + Name: name, + Table: table, + } + if string(c.Table) == "" { + c.Table = Filter + } + return c.Remove() +} + +// Forward adds forwarding rule to 'filter' table and corresponding nat rule to 'nat' table. +func (c *ChainInfo) Forward(action Action, ip net.IP, port int, proto, destAddr string, destPort int, bridgeName string) error { + daddr := ip.String() + if ip.IsUnspecified() { + // iptables interprets "0.0.0.0" as "0.0.0.0/32", whereas we + // want "0.0.0.0/0". "0/0" is correctly interpreted as "any + // value" by both iptables and ip6tables. + daddr = "0/0" + } + + args := []string{ + "-p", proto, + "-d", daddr, + "--dport", strconv.Itoa(port), + "-j", "DNAT", + "--to-destination", net.JoinHostPort(destAddr, strconv.Itoa(destPort))} + if !c.HairpinMode { + args = append(args, "!", "-i", bridgeName) + } + if err := ProgramRule(Nat, c.Name, action, args); err != nil { + return err + } + + args = []string{ + "!", "-i", bridgeName, + "-o", bridgeName, + "-p", proto, + "-d", destAddr, + "--dport", strconv.Itoa(destPort), + "-j", "ACCEPT", + } + if err := ProgramRule(Filter, c.Name, action, args); err != nil { + return err + } + + args = []string{ + "-p", proto, + "-s", destAddr, + "-d", destAddr, + "--dport", strconv.Itoa(destPort), + "-j", "MASQUERADE", + } + + if err := ProgramRule(Nat, "POSTROUTING", action, args); err != nil { + return err + } + + if proto == "sctp" { + // Linux kernel v4.9 and below enables NETIF_F_SCTP_CRC for veth by + // the following commit. + // This introduces a problem when conbined with a physical NIC without + // NETIF_F_SCTP_CRC. As for a workaround, here we add an iptables entry + // to fill the checksum. + // + // https://github.com/torvalds/linux/commit/c80fafbbb59ef9924962f83aac85531039395b18 + args = []string{ + "-p", proto, + "--sport", strconv.Itoa(destPort), + "-j", "CHECKSUM", + "--checksum-fill", + } + if err := ProgramRule(Mangle, "POSTROUTING", action, args); err != nil { + return err + } + } + + return nil +} + +// Link adds reciprocal ACCEPT rule for two supplied IP addresses. +// Traffic is allowed from ip1 to ip2 and vice-versa +func (c *ChainInfo) Link(action Action, ip1, ip2 net.IP, port int, proto string, bridgeName string) error { + // forward + args := []string{ + "-i", bridgeName, "-o", bridgeName, + "-p", proto, + "-s", ip1.String(), + "-d", ip2.String(), + "--dport", strconv.Itoa(port), + "-j", "ACCEPT", + } + if err := ProgramRule(Filter, c.Name, action, args); err != nil { + return err + } + // reverse + args[7], args[9] = args[9], args[7] + args[10] = "--sport" + return ProgramRule(Filter, c.Name, action, args) +} + +// ProgramRule adds the rule specified by args only if the +// rule is not already present in the chain. Reciprocally, +// it removes the rule only if present. +func ProgramRule(table Table, chain string, action Action, args []string) error { + if Exists(table, chain, args...) != (action == Delete) { + return nil + } + return RawCombinedOutput(append([]string{"-t", string(table), string(action), chain}, args...)...) +} + +// Prerouting adds linking rule to nat/PREROUTING chain. +func (c *ChainInfo) Prerouting(action Action, args ...string) error { + a := []string{"-t", string(Nat), string(action), "PREROUTING"} + if len(args) > 0 { + a = append(a, args...) + } + if output, err := Raw(a...); err != nil { + return err + } else if len(output) != 0 { + return ChainError{Chain: "PREROUTING", Output: output} + } + return nil +} + +// Output adds linking rule to an OUTPUT chain. +func (c *ChainInfo) Output(action Action, args ...string) error { + a := []string{"-t", string(c.Table), string(action), "OUTPUT"} + if len(args) > 0 { + a = append(a, args...) + } + if output, err := Raw(a...); err != nil { + return err + } else if len(output) != 0 { + return ChainError{Chain: "OUTPUT", Output: output} + } + return nil +} + +// Remove removes the chain. +func (c *ChainInfo) Remove() error { + // Ignore errors - This could mean the chains were never set up + if c.Table == Nat { + c.Prerouting(Delete, "-m", "addrtype", "--dst-type", "LOCAL", "-j", c.Name) + c.Output(Delete, "-m", "addrtype", "--dst-type", "LOCAL", "!", "--dst", "127.0.0.0/8", "-j", c.Name) + c.Output(Delete, "-m", "addrtype", "--dst-type", "LOCAL", "-j", c.Name) // Created in versions <= 0.1.6 + + c.Prerouting(Delete) + c.Output(Delete) + } + Raw("-t", string(c.Table), "-F", c.Name) + Raw("-t", string(c.Table), "-X", c.Name) + return nil +} + +// Exists checks if a rule exists +func Exists(table Table, chain string, rule ...string) bool { + return exists(false, table, chain, rule...) +} + +// ExistsNative behaves as Exists with the difference it +// will always invoke `iptables` binary. +func ExistsNative(table Table, chain string, rule ...string) bool { + return exists(true, table, chain, rule...) +} + +func exists(native bool, table Table, chain string, rule ...string) bool { + f := Raw + if native { + f = raw + } + + if string(table) == "" { + table = Filter + } + + if err := initCheck(); err != nil { + // The exists() signature does not allow us to return an error, but at least + // we can skip the (likely invalid) exec invocation. + return false + } + + if supportsCOpt { + // if exit status is 0 then return true, the rule exists + _, err := f(append([]string{"-t", string(table), "-C", chain}, rule...)...) + return err == nil + } + + // parse "iptables -S" for the rule (it checks rules in a specific chain + // in a specific table and it is very unreliable) + return existsRaw(table, chain, rule...) +} + +func existsRaw(table Table, chain string, rule ...string) bool { + ruleString := fmt.Sprintf("%s %s\n", chain, strings.Join(rule, " ")) + existingRules, _ := exec.Command(iptablesPath, "-t", string(table), "-S", chain).Output() + + return strings.Contains(string(existingRules), ruleString) +} + +// Maximum duration that an iptables operation can take +// before flagging a warning. +const opWarnTime = 2 * time.Second + +func filterOutput(start time.Time, output []byte, args ...string) []byte { + // Flag operations that have taken a long time to complete + opTime := time.Since(start) + if opTime > opWarnTime { + logrus.Warnf("xtables contention detected while running [%s]: Waited for %.2f seconds and received %q", strings.Join(args, " "), float64(opTime)/float64(time.Second), string(output)) + } + // ignore iptables' message about xtables lock: + // it is a warning, not an error. + if strings.Contains(string(output), xLockWaitMsg) { + output = []byte("") + } + // Put further filters here if desired + return output +} + +// Raw calls 'iptables' system command, passing supplied arguments. +func Raw(args ...string) ([]byte, error) { + if firewalldRunning { + startTime := time.Now() + output, err := Passthrough(Iptables, args...) + if err == nil || !strings.Contains(err.Error(), "was not provided by any .service files") { + return filterOutput(startTime, output, args...), err + } + } + return raw(args...) +} + +func raw(args ...string) ([]byte, error) { + if err := initCheck(); err != nil { + return nil, err + } + if supportsXlock { + args = append([]string{"--wait"}, args...) + } else { + bestEffortLock.Lock() + defer bestEffortLock.Unlock() + } + + logrus.Debugf("%s, %v", iptablesPath, args) + + startTime := time.Now() + output, err := exec.Command(iptablesPath, args...).CombinedOutput() + if err != nil { + return nil, fmt.Errorf("iptables failed: iptables %v: %s (%s)", strings.Join(args, " "), output, err) + } + + return filterOutput(startTime, output, args...), err +} + +// RawCombinedOutput inernally calls the Raw function and returns a non nil +// error if Raw returned a non nil error or a non empty output +func RawCombinedOutput(args ...string) error { + if output, err := Raw(args...); err != nil || len(output) != 0 { + return fmt.Errorf("%s (%v)", string(output), err) + } + return nil +} + +// RawCombinedOutputNative behave as RawCombinedOutput with the difference it +// will always invoke `iptables` binary +func RawCombinedOutputNative(args ...string) error { + if output, err := raw(args...); err != nil || len(output) != 0 { + return fmt.Errorf("%s (%v)", string(output), err) + } + return nil +} + +// ExistChain checks if a chain exists +func ExistChain(chain string, table Table) bool { + if _, err := Raw("-t", string(table), "-nL", chain); err == nil { + return true + } + return false +} + +// GetVersion reads the iptables version numbers during initialization +func GetVersion() (major, minor, micro int, err error) { + out, err := exec.Command(iptablesPath, "--version").CombinedOutput() + if err == nil { + major, minor, micro = parseVersionNumbers(string(out)) + } + return +} + +// SetDefaultPolicy sets the passed default policy for the table/chain +func SetDefaultPolicy(table Table, chain string, policy Policy) error { + if err := RawCombinedOutput("-t", string(table), "-P", chain, string(policy)); err != nil { + return fmt.Errorf("setting default policy to %v in %v chain failed: %v", policy, chain, err) + } + return nil +} + +func parseVersionNumbers(input string) (major, minor, micro int) { + re := regexp.MustCompile(`v\d*.\d*.\d*`) + line := re.FindString(input) + fmt.Sscanf(line, "v%d.%d.%d", &major, &minor, µ) + return +} + +// iptables -C, --check option was added in v.1.4.11 +// http://ftp.netfilter.org/pub/iptables/changes-iptables-1.4.11.txt +func supportsCOption(mj, mn, mc int) bool { + return mj > 1 || (mj == 1 && (mn > 4 || (mn == 4 && mc >= 11))) +} + +// AddReturnRule adds a return rule for the chain in the filter table +func AddReturnRule(chain string) error { + var ( + table = Filter + args = []string{"-j", "RETURN"} + ) + + if Exists(table, chain, args...) { + return nil + } + + err := RawCombinedOutput(append([]string{"-A", chain}, args...)...) + if err != nil { + return fmt.Errorf("unable to add return rule in %s chain: %s", chain, err.Error()) + } + + return nil +} + +// EnsureJumpRule ensures the jump rule is on top +func EnsureJumpRule(fromChain, toChain string) error { + var ( + table = Filter + args = []string{"-j", toChain} + ) + + if Exists(table, fromChain, args...) { + err := RawCombinedOutput(append([]string{"-D", fromChain}, args...)...) + if err != nil { + return fmt.Errorf("unable to remove jump to %s rule in %s chain: %s", toChain, fromChain, err.Error()) + } + } + + err := RawCombinedOutput(append([]string{"-I", fromChain}, args...)...) + if err != nil { + return fmt.Errorf("unable to insert jump to %s rule in %s chain: %s", toChain, fromChain, err.Error()) + } + + return nil +} diff --git a/vendor/github.com/docker/libnetwork/ipvs/constants.go b/vendor/github.com/docker/libnetwork/ipvs/constants.go new file mode 100644 index 0000000000..d36bec0e80 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/ipvs/constants.go @@ -0,0 +1,147 @@ +// +build linux + +package ipvs + +const ( + genlCtrlID = 0x10 +) + +// GENL control commands +const ( + genlCtrlCmdUnspec uint8 = iota + genlCtrlCmdNewFamily + genlCtrlCmdDelFamily + genlCtrlCmdGetFamily +) + +// GENL family attributes +const ( + genlCtrlAttrUnspec int = iota + genlCtrlAttrFamilyID + genlCtrlAttrFamilyName +) + +// IPVS genl commands +const ( + ipvsCmdUnspec uint8 = iota + ipvsCmdNewService + ipvsCmdSetService + ipvsCmdDelService + ipvsCmdGetService + ipvsCmdNewDest + ipvsCmdSetDest + ipvsCmdDelDest + ipvsCmdGetDest + ipvsCmdNewDaemon + ipvsCmdDelDaemon + ipvsCmdGetDaemon + ipvsCmdSetConfig + ipvsCmdGetConfig + ipvsCmdSetInfo + ipvsCmdGetInfo + ipvsCmdZero + ipvsCmdFlush +) + +// Attributes used in the first level of commands +const ( + ipvsCmdAttrUnspec int = iota + ipvsCmdAttrService + ipvsCmdAttrDest + ipvsCmdAttrDaemon + ipvsCmdAttrTimeoutTCP + ipvsCmdAttrTimeoutTCPFin + ipvsCmdAttrTimeoutUDP +) + +// Attributes used to describe a service. Used inside nested attribute +// ipvsCmdAttrService +const ( + ipvsSvcAttrUnspec int = iota + ipvsSvcAttrAddressFamily + ipvsSvcAttrProtocol + ipvsSvcAttrAddress + ipvsSvcAttrPort + ipvsSvcAttrFWMark + ipvsSvcAttrSchedName + ipvsSvcAttrFlags + ipvsSvcAttrTimeout + ipvsSvcAttrNetmask + ipvsSvcAttrStats + ipvsSvcAttrPEName +) + +// Attributes used to describe a destination (real server). Used +// inside nested attribute ipvsCmdAttrDest. +const ( + ipvsDestAttrUnspec int = iota + ipvsDestAttrAddress + ipvsDestAttrPort + ipvsDestAttrForwardingMethod + ipvsDestAttrWeight + ipvsDestAttrUpperThreshold + ipvsDestAttrLowerThreshold + ipvsDestAttrActiveConnections + ipvsDestAttrInactiveConnections + ipvsDestAttrPersistentConnections + ipvsDestAttrStats + ipvsDestAttrAddressFamily +) + +// IPVS Svc Statistics constancs + +const ( + ipvsSvcStatsUnspec int = iota + ipvsSvcStatsConns + ipvsSvcStatsPktsIn + ipvsSvcStatsPktsOut + ipvsSvcStatsBytesIn + ipvsSvcStatsBytesOut + ipvsSvcStatsCPS + ipvsSvcStatsPPSIn + ipvsSvcStatsPPSOut + ipvsSvcStatsBPSIn + ipvsSvcStatsBPSOut +) + +// Destination forwarding methods +const ( + // ConnectionFlagFwdmask indicates the mask in the connection + // flags which is used by forwarding method bits. + ConnectionFlagFwdMask = 0x0007 + + // ConnectionFlagMasq is used for masquerade forwarding method. + ConnectionFlagMasq = 0x0000 + + // ConnectionFlagLocalNode is used for local node forwarding + // method. + ConnectionFlagLocalNode = 0x0001 + + // ConnectionFlagTunnel is used for tunnel mode forwarding + // method. + ConnectionFlagTunnel = 0x0002 + + // ConnectionFlagDirectRoute is used for direct routing + // forwarding method. + ConnectionFlagDirectRoute = 0x0003 +) + +const ( + // RoundRobin distributes jobs equally amongst the available + // real servers. + RoundRobin = "rr" + + // LeastConnection assigns more jobs to real servers with + // fewer active jobs. + LeastConnection = "lc" + + // DestinationHashing assigns jobs to servers through looking + // up a statically assigned hash table by their destination IP + // addresses. + DestinationHashing = "dh" + + // SourceHashing assigns jobs to servers through looking up + // a statically assigned hash table by their source IP + // addresses. + SourceHashing = "sh" +) diff --git a/vendor/github.com/docker/libnetwork/ipvs/ipvs.go b/vendor/github.com/docker/libnetwork/ipvs/ipvs.go new file mode 100644 index 0000000000..effbb716eb --- /dev/null +++ b/vendor/github.com/docker/libnetwork/ipvs/ipvs.go @@ -0,0 +1,184 @@ +// +build linux + +package ipvs + +import ( + "net" + "syscall" + "time" + + "fmt" + + "github.com/vishvananda/netlink/nl" + "github.com/vishvananda/netns" +) + +const ( + netlinkRecvSocketsTimeout = 3 * time.Second + netlinkSendSocketTimeout = 30 * time.Second +) + +// Service defines an IPVS service in its entirety. +type Service struct { + // Virtual service address. + Address net.IP + Protocol uint16 + Port uint16 + FWMark uint32 // Firewall mark of the service. + + // Virtual service options. + SchedName string + Flags uint32 + Timeout uint32 + Netmask uint32 + AddressFamily uint16 + PEName string + Stats SvcStats +} + +// SvcStats defines an IPVS service statistics +type SvcStats struct { + Connections uint32 + PacketsIn uint32 + PacketsOut uint32 + BytesIn uint64 + BytesOut uint64 + CPS uint32 + BPSOut uint32 + PPSIn uint32 + PPSOut uint32 + BPSIn uint32 +} + +// Destination defines an IPVS destination (real server) in its +// entirety. +type Destination struct { + Address net.IP + Port uint16 + Weight int + ConnectionFlags uint32 + AddressFamily uint16 + UpperThreshold uint32 + LowerThreshold uint32 +} + +// Handle provides a namespace specific ipvs handle to program ipvs +// rules. +type Handle struct { + seq uint32 + sock *nl.NetlinkSocket +} + +// New provides a new ipvs handle in the namespace pointed to by the +// passed path. It will return a valid handle or an error in case an +// error occurred while creating the handle. +func New(path string) (*Handle, error) { + setup() + + n := netns.None() + if path != "" { + var err error + n, err = netns.GetFromPath(path) + if err != nil { + return nil, err + } + } + defer n.Close() + + sock, err := nl.GetNetlinkSocketAt(n, netns.None(), syscall.NETLINK_GENERIC) + if err != nil { + return nil, err + } + // Add operation timeout to avoid deadlocks + tv := syscall.NsecToTimeval(netlinkSendSocketTimeout.Nanoseconds()) + if err := sock.SetSendTimeout(&tv); err != nil { + return nil, err + } + tv = syscall.NsecToTimeval(netlinkRecvSocketsTimeout.Nanoseconds()) + if err := sock.SetReceiveTimeout(&tv); err != nil { + return nil, err + } + + return &Handle{sock: sock}, nil +} + +// Close closes the ipvs handle. The handle is invalid after Close +// returns. +func (i *Handle) Close() { + if i.sock != nil { + i.sock.Close() + } +} + +// NewService creates a new ipvs service in the passed handle. +func (i *Handle) NewService(s *Service) error { + return i.doCmd(s, nil, ipvsCmdNewService) +} + +// IsServicePresent queries for the ipvs service in the passed handle. +func (i *Handle) IsServicePresent(s *Service) bool { + return nil == i.doCmd(s, nil, ipvsCmdGetService) +} + +// UpdateService updates an already existing service in the passed +// handle. +func (i *Handle) UpdateService(s *Service) error { + return i.doCmd(s, nil, ipvsCmdSetService) +} + +// DelService deletes an already existing service in the passed +// handle. +func (i *Handle) DelService(s *Service) error { + return i.doCmd(s, nil, ipvsCmdDelService) +} + +// Flush deletes all existing services in the passed +// handle. +func (i *Handle) Flush() error { + _, err := i.doCmdWithoutAttr(ipvsCmdFlush) + return err +} + +// NewDestination creates a new real server in the passed ipvs +// service which should already be existing in the passed handle. +func (i *Handle) NewDestination(s *Service, d *Destination) error { + return i.doCmd(s, d, ipvsCmdNewDest) +} + +// UpdateDestination updates an already existing real server in the +// passed ipvs service in the passed handle. +func (i *Handle) UpdateDestination(s *Service, d *Destination) error { + return i.doCmd(s, d, ipvsCmdSetDest) +} + +// DelDestination deletes an already existing real server in the +// passed ipvs service in the passed handle. +func (i *Handle) DelDestination(s *Service, d *Destination) error { + return i.doCmd(s, d, ipvsCmdDelDest) +} + +// GetServices returns an array of services configured on the Node +func (i *Handle) GetServices() ([]*Service, error) { + return i.doGetServicesCmd(nil) +} + +// GetDestinations returns an array of Destinations configured for this Service +func (i *Handle) GetDestinations(s *Service) ([]*Destination, error) { + return i.doGetDestinationsCmd(s, nil) +} + +// GetService gets details of a specific IPVS services, useful in updating statisics etc., +func (i *Handle) GetService(s *Service) (*Service, error) { + + res, err := i.doGetServicesCmd(s) + if err != nil { + return nil, err + } + + // We are looking for exactly one service otherwise error out + if len(res) != 1 { + return nil, fmt.Errorf("Expected only one service obtained=%d", len(res)) + } + + return res[0], nil +} diff --git a/vendor/github.com/docker/libnetwork/ipvs/netlink.go b/vendor/github.com/docker/libnetwork/ipvs/netlink.go new file mode 100644 index 0000000000..c062a1789d --- /dev/null +++ b/vendor/github.com/docker/libnetwork/ipvs/netlink.go @@ -0,0 +1,556 @@ +// +build linux + +package ipvs + +import ( + "bytes" + "encoding/binary" + "fmt" + "net" + "os/exec" + "strings" + "sync" + "sync/atomic" + "syscall" + "unsafe" + + "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink/nl" + "github.com/vishvananda/netns" +) + +// For Quick Reference IPVS related netlink message is described at the end of this file. +var ( + native = nl.NativeEndian() + ipvsFamily int + ipvsOnce sync.Once +) + +type genlMsgHdr struct { + cmd uint8 + version uint8 + reserved uint16 +} + +type ipvsFlags struct { + flags uint32 + mask uint32 +} + +func deserializeGenlMsg(b []byte) (hdr *genlMsgHdr) { + return (*genlMsgHdr)(unsafe.Pointer(&b[0:unsafe.Sizeof(*hdr)][0])) +} + +func (hdr *genlMsgHdr) Serialize() []byte { + return (*(*[unsafe.Sizeof(*hdr)]byte)(unsafe.Pointer(hdr)))[:] +} + +func (hdr *genlMsgHdr) Len() int { + return int(unsafe.Sizeof(*hdr)) +} + +func (f *ipvsFlags) Serialize() []byte { + return (*(*[unsafe.Sizeof(*f)]byte)(unsafe.Pointer(f)))[:] +} + +func (f *ipvsFlags) Len() int { + return int(unsafe.Sizeof(*f)) +} + +func setup() { + ipvsOnce.Do(func() { + var err error + if out, err := exec.Command("modprobe", "-va", "ip_vs").CombinedOutput(); err != nil { + logrus.Warnf("Running modprobe ip_vs failed with message: `%s`, error: %v", strings.TrimSpace(string(out)), err) + } + + ipvsFamily, err = getIPVSFamily() + if err != nil { + logrus.Error("Could not get ipvs family information from the kernel. It is possible that ipvs is not enabled in your kernel. Native loadbalancing will not work until this is fixed.") + } + }) +} + +func fillService(s *Service) nl.NetlinkRequestData { + cmdAttr := nl.NewRtAttr(ipvsCmdAttrService, nil) + nl.NewRtAttrChild(cmdAttr, ipvsSvcAttrAddressFamily, nl.Uint16Attr(s.AddressFamily)) + if s.FWMark != 0 { + nl.NewRtAttrChild(cmdAttr, ipvsSvcAttrFWMark, nl.Uint32Attr(s.FWMark)) + } else { + nl.NewRtAttrChild(cmdAttr, ipvsSvcAttrProtocol, nl.Uint16Attr(s.Protocol)) + nl.NewRtAttrChild(cmdAttr, ipvsSvcAttrAddress, rawIPData(s.Address)) + + // Port needs to be in network byte order. + portBuf := new(bytes.Buffer) + binary.Write(portBuf, binary.BigEndian, s.Port) + nl.NewRtAttrChild(cmdAttr, ipvsSvcAttrPort, portBuf.Bytes()) + } + + nl.NewRtAttrChild(cmdAttr, ipvsSvcAttrSchedName, nl.ZeroTerminated(s.SchedName)) + if s.PEName != "" { + nl.NewRtAttrChild(cmdAttr, ipvsSvcAttrPEName, nl.ZeroTerminated(s.PEName)) + } + f := &ipvsFlags{ + flags: s.Flags, + mask: 0xFFFFFFFF, + } + nl.NewRtAttrChild(cmdAttr, ipvsSvcAttrFlags, f.Serialize()) + nl.NewRtAttrChild(cmdAttr, ipvsSvcAttrTimeout, nl.Uint32Attr(s.Timeout)) + nl.NewRtAttrChild(cmdAttr, ipvsSvcAttrNetmask, nl.Uint32Attr(s.Netmask)) + return cmdAttr +} + +func fillDestinaton(d *Destination) nl.NetlinkRequestData { + cmdAttr := nl.NewRtAttr(ipvsCmdAttrDest, nil) + + nl.NewRtAttrChild(cmdAttr, ipvsDestAttrAddress, rawIPData(d.Address)) + // Port needs to be in network byte order. + portBuf := new(bytes.Buffer) + binary.Write(portBuf, binary.BigEndian, d.Port) + nl.NewRtAttrChild(cmdAttr, ipvsDestAttrPort, portBuf.Bytes()) + + nl.NewRtAttrChild(cmdAttr, ipvsDestAttrForwardingMethod, nl.Uint32Attr(d.ConnectionFlags&ConnectionFlagFwdMask)) + nl.NewRtAttrChild(cmdAttr, ipvsDestAttrWeight, nl.Uint32Attr(uint32(d.Weight))) + nl.NewRtAttrChild(cmdAttr, ipvsDestAttrUpperThreshold, nl.Uint32Attr(d.UpperThreshold)) + nl.NewRtAttrChild(cmdAttr, ipvsDestAttrLowerThreshold, nl.Uint32Attr(d.LowerThreshold)) + + return cmdAttr +} + +func (i *Handle) doCmdwithResponse(s *Service, d *Destination, cmd uint8) ([][]byte, error) { + req := newIPVSRequest(cmd) + req.Seq = atomic.AddUint32(&i.seq, 1) + + if s == nil { + req.Flags |= syscall.NLM_F_DUMP //Flag to dump all messages + req.AddData(nl.NewRtAttr(ipvsCmdAttrService, nil)) //Add a dummy attribute + } else { + req.AddData(fillService(s)) + } + + if d == nil { + if cmd == ipvsCmdGetDest { + req.Flags |= syscall.NLM_F_DUMP + } + + } else { + req.AddData(fillDestinaton(d)) + } + + res, err := execute(i.sock, req, 0) + if err != nil { + return [][]byte{}, err + } + + return res, nil +} + +func (i *Handle) doCmd(s *Service, d *Destination, cmd uint8) error { + _, err := i.doCmdwithResponse(s, d, cmd) + + return err +} + +func getIPVSFamily() (int, error) { + sock, err := nl.GetNetlinkSocketAt(netns.None(), netns.None(), syscall.NETLINK_GENERIC) + if err != nil { + return 0, err + } + defer sock.Close() + + req := newGenlRequest(genlCtrlID, genlCtrlCmdGetFamily) + req.AddData(nl.NewRtAttr(genlCtrlAttrFamilyName, nl.ZeroTerminated("IPVS"))) + + msgs, err := execute(sock, req, 0) + if err != nil { + return 0, err + } + + for _, m := range msgs { + hdr := deserializeGenlMsg(m) + attrs, err := nl.ParseRouteAttr(m[hdr.Len():]) + if err != nil { + return 0, err + } + + for _, attr := range attrs { + switch int(attr.Attr.Type) { + case genlCtrlAttrFamilyID: + return int(native.Uint16(attr.Value[0:2])), nil + } + } + } + + return 0, fmt.Errorf("no family id in the netlink response") +} + +func rawIPData(ip net.IP) []byte { + family := nl.GetIPFamily(ip) + if family == nl.FAMILY_V4 { + return ip.To4() + } + return ip +} + +func newIPVSRequest(cmd uint8) *nl.NetlinkRequest { + return newGenlRequest(ipvsFamily, cmd) +} + +func newGenlRequest(familyID int, cmd uint8) *nl.NetlinkRequest { + req := nl.NewNetlinkRequest(familyID, syscall.NLM_F_ACK) + req.AddData(&genlMsgHdr{cmd: cmd, version: 1}) + return req +} + +func execute(s *nl.NetlinkSocket, req *nl.NetlinkRequest, resType uint16) ([][]byte, error) { + if err := s.Send(req); err != nil { + return nil, err + } + + pid, err := s.GetPid() + if err != nil { + return nil, err + } + + var res [][]byte + +done: + for { + msgs, err := s.Receive() + if err != nil { + if s.GetFd() == -1 { + return nil, fmt.Errorf("Socket got closed on receive") + } + if err == syscall.EAGAIN { + // timeout fired + continue + } + return nil, err + } + for _, m := range msgs { + if m.Header.Seq != req.Seq { + continue + } + if m.Header.Pid != pid { + return nil, fmt.Errorf("Wrong pid %d, expected %d", m.Header.Pid, pid) + } + if m.Header.Type == syscall.NLMSG_DONE { + break done + } + if m.Header.Type == syscall.NLMSG_ERROR { + error := int32(native.Uint32(m.Data[0:4])) + if error == 0 { + break done + } + return nil, syscall.Errno(-error) + } + if resType != 0 && m.Header.Type != resType { + continue + } + res = append(res, m.Data) + if m.Header.Flags&syscall.NLM_F_MULTI == 0 { + break done + } + } + } + return res, nil +} + +func parseIP(ip []byte, family uint16) (net.IP, error) { + + var resIP net.IP + + switch family { + case syscall.AF_INET: + resIP = (net.IP)(ip[:4]) + case syscall.AF_INET6: + resIP = (net.IP)(ip[:16]) + default: + return nil, fmt.Errorf("parseIP Error ip=%v", ip) + + } + return resIP, nil +} + +// parseStats +func assembleStats(msg []byte) (SvcStats, error) { + + var s SvcStats + + attrs, err := nl.ParseRouteAttr(msg) + if err != nil { + return s, err + } + + for _, attr := range attrs { + attrType := int(attr.Attr.Type) + switch attrType { + case ipvsSvcStatsConns: + s.Connections = native.Uint32(attr.Value) + case ipvsSvcStatsPktsIn: + s.PacketsIn = native.Uint32(attr.Value) + case ipvsSvcStatsPktsOut: + s.PacketsOut = native.Uint32(attr.Value) + case ipvsSvcStatsBytesIn: + s.BytesIn = native.Uint64(attr.Value) + case ipvsSvcStatsBytesOut: + s.BytesOut = native.Uint64(attr.Value) + case ipvsSvcStatsCPS: + s.CPS = native.Uint32(attr.Value) + case ipvsSvcStatsPPSIn: + s.PPSIn = native.Uint32(attr.Value) + case ipvsSvcStatsPPSOut: + s.PPSOut = native.Uint32(attr.Value) + case ipvsSvcStatsBPSIn: + s.BPSIn = native.Uint32(attr.Value) + case ipvsSvcStatsBPSOut: + s.BPSOut = native.Uint32(attr.Value) + } + } + return s, nil +} + +// assembleService assembles a services back from a hain of netlink attributes +func assembleService(attrs []syscall.NetlinkRouteAttr) (*Service, error) { + + var s Service + + for _, attr := range attrs { + + attrType := int(attr.Attr.Type) + + switch attrType { + + case ipvsSvcAttrAddressFamily: + s.AddressFamily = native.Uint16(attr.Value) + case ipvsSvcAttrProtocol: + s.Protocol = native.Uint16(attr.Value) + case ipvsSvcAttrAddress: + ip, err := parseIP(attr.Value, s.AddressFamily) + if err != nil { + return nil, err + } + s.Address = ip + case ipvsSvcAttrPort: + s.Port = binary.BigEndian.Uint16(attr.Value) + case ipvsSvcAttrFWMark: + s.FWMark = native.Uint32(attr.Value) + case ipvsSvcAttrSchedName: + s.SchedName = nl.BytesToString(attr.Value) + case ipvsSvcAttrFlags: + s.Flags = native.Uint32(attr.Value) + case ipvsSvcAttrTimeout: + s.Timeout = native.Uint32(attr.Value) + case ipvsSvcAttrNetmask: + s.Netmask = native.Uint32(attr.Value) + case ipvsSvcAttrStats: + stats, err := assembleStats(attr.Value) + if err != nil { + return nil, err + } + s.Stats = stats + } + + } + return &s, nil +} + +// parseService given a ipvs netlink response this function will respond with a valid service entry, an error otherwise +func (i *Handle) parseService(msg []byte) (*Service, error) { + + var s *Service + + //Remove General header for this message and parse the NetLink message + hdr := deserializeGenlMsg(msg) + NetLinkAttrs, err := nl.ParseRouteAttr(msg[hdr.Len():]) + if err != nil { + return nil, err + } + if len(NetLinkAttrs) == 0 { + return nil, fmt.Errorf("error no valid netlink message found while parsing service record") + } + + //Now Parse and get IPVS related attributes messages packed in this message. + ipvsAttrs, err := nl.ParseRouteAttr(NetLinkAttrs[0].Value) + if err != nil { + return nil, err + } + + //Assemble all the IPVS related attribute messages and create a service record + s, err = assembleService(ipvsAttrs) + if err != nil { + return nil, err + } + + return s, nil +} + +// doGetServicesCmd a wrapper which could be used commonly for both GetServices() and GetService(*Service) +func (i *Handle) doGetServicesCmd(svc *Service) ([]*Service, error) { + var res []*Service + + msgs, err := i.doCmdwithResponse(svc, nil, ipvsCmdGetService) + if err != nil { + return nil, err + } + + for _, msg := range msgs { + srv, err := i.parseService(msg) + if err != nil { + return nil, err + } + res = append(res, srv) + } + + return res, nil +} + +// doCmdWithoutAttr a simple wrapper of netlink socket execute command +func (i *Handle) doCmdWithoutAttr(cmd uint8) ([][]byte, error) { + req := newIPVSRequest(cmd) + req.Seq = atomic.AddUint32(&i.seq, 1) + return execute(i.sock, req, 0) +} + +func assembleDestination(attrs []syscall.NetlinkRouteAttr) (*Destination, error) { + + var d Destination + + for _, attr := range attrs { + + attrType := int(attr.Attr.Type) + + switch attrType { + case ipvsDestAttrAddress: + ip, err := parseIP(attr.Value, syscall.AF_INET) + if err != nil { + return nil, err + } + d.Address = ip + case ipvsDestAttrPort: + d.Port = binary.BigEndian.Uint16(attr.Value) + case ipvsDestAttrForwardingMethod: + d.ConnectionFlags = native.Uint32(attr.Value) + case ipvsDestAttrWeight: + d.Weight = int(native.Uint16(attr.Value)) + case ipvsDestAttrUpperThreshold: + d.UpperThreshold = native.Uint32(attr.Value) + case ipvsDestAttrLowerThreshold: + d.LowerThreshold = native.Uint32(attr.Value) + case ipvsDestAttrAddressFamily: + d.AddressFamily = native.Uint16(attr.Value) + } + } + return &d, nil +} + +// parseDestination given a ipvs netlink response this function will respond with a valid destination entry, an error otherwise +func (i *Handle) parseDestination(msg []byte) (*Destination, error) { + var dst *Destination + + //Remove General header for this message + hdr := deserializeGenlMsg(msg) + NetLinkAttrs, err := nl.ParseRouteAttr(msg[hdr.Len():]) + if err != nil { + return nil, err + } + if len(NetLinkAttrs) == 0 { + return nil, fmt.Errorf("error no valid netlink message found while parsing destination record") + } + + //Now Parse and get IPVS related attributes messages packed in this message. + ipvsAttrs, err := nl.ParseRouteAttr(NetLinkAttrs[0].Value) + if err != nil { + return nil, err + } + + //Assemble netlink attributes and create a Destination record + dst, err = assembleDestination(ipvsAttrs) + if err != nil { + return nil, err + } + + return dst, nil +} + +// doGetDestinationsCmd a wrapper function to be used by GetDestinations and GetDestination(d) apis +func (i *Handle) doGetDestinationsCmd(s *Service, d *Destination) ([]*Destination, error) { + + var res []*Destination + + msgs, err := i.doCmdwithResponse(s, d, ipvsCmdGetDest) + if err != nil { + return nil, err + } + + for _, msg := range msgs { + dest, err := i.parseDestination(msg) + if err != nil { + return res, err + } + res = append(res, dest) + } + return res, nil +} + +// IPVS related netlink message format explained + +/* EACH NETLINK MSG is of the below format, this is what we will receive from execute() api. + If we have multiple netlink objects to process like GetServices() etc., execute() will + supply an array of this below object + + NETLINK MSG +|-----------------------------------| + 0 1 2 3 +|--------|--------|--------|--------| - +| CMD ID | VER | RESERVED | |==> General Message Header represented by genlMsgHdr +|-----------------------------------| - +| ATTR LEN | ATTR TYPE | | +|-----------------------------------| | +| | | +| VALUE | | +| []byte Array of IPVS MSG | |==> Attribute Message represented by syscall.NetlinkRouteAttr +| PADDED BY 4 BYTES | | +| | | +|-----------------------------------| - + + + Once We strip genlMsgHdr from above NETLINK MSG, we should parse the VALUE. + VALUE will have an array of netlink attributes (syscall.NetlinkRouteAttr) such that each attribute will + represent a "Service" or "Destination" object's field. If we assemble these attributes we can construct + Service or Destination. + + IPVS MSG +|-----------------------------------| + 0 1 2 3 +|--------|--------|--------|--------| +| ATTR LEN | ATTR TYPE | +|-----------------------------------| +| | +| | +| []byte IPVS ATTRIBUTE BY 4 BYTES | +| | +| | +|-----------------------------------| + NEXT ATTRIBUTE +|-----------------------------------| +| ATTR LEN | ATTR TYPE | +|-----------------------------------| +| | +| | +| []byte IPVS ATTRIBUTE BY 4 BYTES | +| | +| | +|-----------------------------------| + NEXT ATTRIBUTE +|-----------------------------------| +| ATTR LEN | ATTR TYPE | +|-----------------------------------| +| | +| | +| []byte IPVS ATTRIBUTE BY 4 BYTES | +| | +| | +|-----------------------------------| + +*/ diff --git a/vendor/github.com/docker/libnetwork/netlabel/labels.go b/vendor/github.com/docker/libnetwork/netlabel/labels.go new file mode 100644 index 0000000000..1594556ec7 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/netlabel/labels.go @@ -0,0 +1,129 @@ +package netlabel + +import ( + "strings" +) + +const ( + // Prefix constant marks the reserved label space for libnetwork + Prefix = "com.docker.network" + + // DriverPrefix constant marks the reserved label space for libnetwork drivers + DriverPrefix = Prefix + ".driver" + + // DriverPrivatePrefix constant marks the reserved label space + // for internal libnetwork drivers + DriverPrivatePrefix = DriverPrefix + ".private" + + // GenericData constant that helps to identify an option as a Generic constant + GenericData = Prefix + ".generic" + + // PortMap constant represents Port Mapping + PortMap = Prefix + ".portmap" + + // MacAddress constant represents Mac Address config of a Container + MacAddress = Prefix + ".endpoint.macaddress" + + // ExposedPorts constant represents the container's Exposed Ports + ExposedPorts = Prefix + ".endpoint.exposedports" + + // DNSServers A list of DNS servers associated with the endpoint + DNSServers = Prefix + ".endpoint.dnsservers" + + //EnableIPv6 constant represents enabling IPV6 at network level + EnableIPv6 = Prefix + ".enable_ipv6" + + // DriverMTU constant represents the MTU size for the network driver + DriverMTU = DriverPrefix + ".mtu" + + // OverlayBindInterface constant represents overlay driver bind interface + OverlayBindInterface = DriverPrefix + ".overlay.bind_interface" + + // OverlayNeighborIP constant represents overlay driver neighbor IP + OverlayNeighborIP = DriverPrefix + ".overlay.neighbor_ip" + + // OverlayVxlanIDList constant represents a list of VXLAN Ids as csv + OverlayVxlanIDList = DriverPrefix + ".overlay.vxlanid_list" + + // Gateway represents the gateway for the network + Gateway = Prefix + ".gateway" + + // Internal constant represents that the network is internal which disables default gateway service + Internal = Prefix + ".internal" + + // ContainerIfacePrefix can be used to override the interface prefix used inside the container + ContainerIfacePrefix = Prefix + ".container_iface_prefix" +) + +var ( + // GlobalKVProvider constant represents the KV provider backend + GlobalKVProvider = MakeKVProvider("global") + + // GlobalKVProviderURL constant represents the KV provider URL + GlobalKVProviderURL = MakeKVProviderURL("global") + + // GlobalKVProviderConfig constant represents the KV provider Config + GlobalKVProviderConfig = MakeKVProviderConfig("global") + + // GlobalKVClient constants represents the global kv store client + GlobalKVClient = MakeKVClient("global") + + // LocalKVProvider constant represents the KV provider backend + LocalKVProvider = MakeKVProvider("local") + + // LocalKVProviderURL constant represents the KV provider URL + LocalKVProviderURL = MakeKVProviderURL("local") + + // LocalKVProviderConfig constant represents the KV provider Config + LocalKVProviderConfig = MakeKVProviderConfig("local") + + // LocalKVClient constants represents the local kv store client + LocalKVClient = MakeKVClient("local") +) + +// MakeKVProvider returns the kvprovider label for the scope +func MakeKVProvider(scope string) string { + return DriverPrivatePrefix + scope + "kv_provider" +} + +// MakeKVProviderURL returns the kvprovider url label for the scope +func MakeKVProviderURL(scope string) string { + return DriverPrivatePrefix + scope + "kv_provider_url" +} + +// MakeKVProviderConfig returns the kvprovider config label for the scope +func MakeKVProviderConfig(scope string) string { + return DriverPrivatePrefix + scope + "kv_provider_config" +} + +// MakeKVClient returns the kv client label for the scope +func MakeKVClient(scope string) string { + return DriverPrivatePrefix + scope + "kv_client" +} + +// Key extracts the key portion of the label +func Key(label string) (key string) { + if kv := strings.SplitN(label, "=", 2); len(kv) > 0 { + key = kv[0] + } + return +} + +// Value extracts the value portion of the label +func Value(label string) (value string) { + if kv := strings.SplitN(label, "=", 2); len(kv) > 1 { + value = kv[1] + } + return +} + +// KeyValue decomposes the label in the (key,value) pair +func KeyValue(label string) (key string, value string) { + if kv := strings.SplitN(label, "=", 2); len(kv) > 0 { + key = kv[0] + if len(kv) > 1 { + value = kv[1] + } + } + return +} diff --git a/vendor/github.com/docker/libnetwork/netutils/utils.go b/vendor/github.com/docker/libnetwork/netutils/utils.go new file mode 100644 index 0000000000..7de98f6b07 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/netutils/utils.go @@ -0,0 +1,194 @@ +// Network utility functions. + +package netutils + +import ( + "crypto/rand" + "encoding/hex" + "errors" + "fmt" + "io" + "net" + "strings" + + "github.com/docker/libnetwork/types" +) + +var ( + // ErrNetworkOverlapsWithNameservers preformatted error + ErrNetworkOverlapsWithNameservers = errors.New("requested network overlaps with nameserver") + // ErrNetworkOverlaps preformatted error + ErrNetworkOverlaps = errors.New("requested network overlaps with existing network") + // ErrNoDefaultRoute preformatted error + ErrNoDefaultRoute = errors.New("no default route") +) + +// CheckNameserverOverlaps checks whether the passed network overlaps with any of the nameservers +func CheckNameserverOverlaps(nameservers []string, toCheck *net.IPNet) error { + if len(nameservers) > 0 { + for _, ns := range nameservers { + _, nsNetwork, err := net.ParseCIDR(ns) + if err != nil { + return err + } + if NetworkOverlaps(toCheck, nsNetwork) { + return ErrNetworkOverlapsWithNameservers + } + } + } + return nil +} + +// NetworkOverlaps detects overlap between one IPNet and another +func NetworkOverlaps(netX *net.IPNet, netY *net.IPNet) bool { + return netX.Contains(netY.IP) || netY.Contains(netX.IP) +} + +// NetworkRange calculates the first and last IP addresses in an IPNet +func NetworkRange(network *net.IPNet) (net.IP, net.IP) { + if network == nil { + return nil, nil + } + + firstIP := network.IP.Mask(network.Mask) + lastIP := types.GetIPCopy(firstIP) + for i := 0; i < len(firstIP); i++ { + lastIP[i] = firstIP[i] | ^network.Mask[i] + } + + if network.IP.To4() != nil { + firstIP = firstIP.To4() + lastIP = lastIP.To4() + } + + return firstIP, lastIP +} + +// GetIfaceAddr returns the first IPv4 address and slice of IPv6 addresses for the specified network interface +func GetIfaceAddr(name string) (net.Addr, []net.Addr, error) { + iface, err := net.InterfaceByName(name) + if err != nil { + return nil, nil, err + } + addrs, err := iface.Addrs() + if err != nil { + return nil, nil, err + } + var addrs4 []net.Addr + var addrs6 []net.Addr + for _, addr := range addrs { + ip := (addr.(*net.IPNet)).IP + if ip4 := ip.To4(); ip4 != nil { + addrs4 = append(addrs4, addr) + } else if ip6 := ip.To16(); len(ip6) == net.IPv6len { + addrs6 = append(addrs6, addr) + } + } + switch { + case len(addrs4) == 0: + return nil, nil, fmt.Errorf("Interface %v has no IPv4 addresses", name) + case len(addrs4) > 1: + fmt.Printf("Interface %v has more than 1 IPv4 address. Defaulting to using %v\n", + name, (addrs4[0].(*net.IPNet)).IP) + } + return addrs4[0], addrs6, nil +} + +func genMAC(ip net.IP) net.HardwareAddr { + hw := make(net.HardwareAddr, 6) + // The first byte of the MAC address has to comply with these rules: + // 1. Unicast: Set the least-significant bit to 0. + // 2. Address is locally administered: Set the second-least-significant bit (U/L) to 1. + hw[0] = 0x02 + // The first 24 bits of the MAC represent the Organizationally Unique Identifier (OUI). + // Since this address is locally administered, we can do whatever we want as long as + // it doesn't conflict with other addresses. + hw[1] = 0x42 + // Fill the remaining 4 bytes based on the input + if ip == nil { + rand.Read(hw[2:]) + } else { + copy(hw[2:], ip.To4()) + } + return hw +} + +// GenerateRandomMAC returns a new 6-byte(48-bit) hardware address (MAC) +func GenerateRandomMAC() net.HardwareAddr { + return genMAC(nil) +} + +// GenerateMACFromIP returns a locally administered MAC address where the 4 least +// significant bytes are derived from the IPv4 address. +func GenerateMACFromIP(ip net.IP) net.HardwareAddr { + return genMAC(ip) +} + +// GenerateRandomName returns a new name joined with a prefix. This size +// specified is used to truncate the randomly generated value +func GenerateRandomName(prefix string, size int) (string, error) { + id := make([]byte, 32) + if _, err := io.ReadFull(rand.Reader, id); err != nil { + return "", err + } + return prefix + hex.EncodeToString(id)[:size], nil +} + +// ReverseIP accepts a V4 or V6 IP string in the canonical form and returns a reversed IP in +// the dotted decimal form . This is used to setup the IP to service name mapping in the optimal +// way for the DNS PTR queries. +func ReverseIP(IP string) string { + var reverseIP []string + + if net.ParseIP(IP).To4() != nil { + reverseIP = strings.Split(IP, ".") + l := len(reverseIP) + for i, j := 0, l-1; i < l/2; i, j = i+1, j-1 { + reverseIP[i], reverseIP[j] = reverseIP[j], reverseIP[i] + } + } else { + reverseIP = strings.Split(IP, ":") + + // Reversed IPv6 is represented in dotted decimal instead of the typical + // colon hex notation + for key := range reverseIP { + if len(reverseIP[key]) == 0 { // expand the compressed 0s + reverseIP[key] = strings.Repeat("0000", 8-strings.Count(IP, ":")) + } else if len(reverseIP[key]) < 4 { // 0-padding needed + reverseIP[key] = strings.Repeat("0", 4-len(reverseIP[key])) + reverseIP[key] + } + } + + reverseIP = strings.Split(strings.Join(reverseIP, ""), "") + + l := len(reverseIP) + for i, j := 0, l-1; i < l/2; i, j = i+1, j-1 { + reverseIP[i], reverseIP[j] = reverseIP[j], reverseIP[i] + } + } + + return strings.Join(reverseIP, ".") +} + +// ParseAlias parses and validates the specified string as an alias format (name:alias) +func ParseAlias(val string) (string, string, error) { + if val == "" { + return "", "", errors.New("empty string specified for alias") + } + arr := strings.Split(val, ":") + if len(arr) > 2 { + return "", "", fmt.Errorf("bad format for alias: %s", val) + } + if len(arr) == 1 { + return val, val, nil + } + return arr[0], arr[1], nil +} + +// ValidateAlias validates that the specified string has a valid alias format (containerName:alias). +func ValidateAlias(val string) (string, error) { + if _, _, err := ParseAlias(val); err != nil { + return val, err + } + return val, nil +} diff --git a/vendor/github.com/docker/libnetwork/netutils/utils_freebsd.go b/vendor/github.com/docker/libnetwork/netutils/utils_freebsd.go new file mode 100644 index 0000000000..02bcd32aa8 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/netutils/utils_freebsd.go @@ -0,0 +1,23 @@ +package netutils + +import ( + "net" + + "github.com/docker/libnetwork/types" +) + +// ElectInterfaceAddresses looks for an interface on the OS with the specified name +// and returns returns all its IPv4 and IPv6 addresses in CIDR notation. +// If a failure in retrieving the addresses or no IPv4 address is found, an error is returned. +// If the interface does not exist, it chooses from a predefined +// list the first IPv4 address which does not conflict with other +// interfaces on the system. +func ElectInterfaceAddresses(name string) ([]*net.IPNet, []*net.IPNet, error) { + return nil, nil, types.NotImplementedErrorf("not supported on freebsd") +} + +// FindAvailableNetwork returns a network from the passed list which does not +// overlap with existing interfaces in the system +func FindAvailableNetwork(list []*net.IPNet) (*net.IPNet, error) { + return nil, types.NotImplementedErrorf("not supported on freebsd") +} diff --git a/vendor/github.com/docker/libnetwork/netutils/utils_linux.go b/vendor/github.com/docker/libnetwork/netutils/utils_linux.go new file mode 100644 index 0000000000..870cc12855 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/netutils/utils_linux.go @@ -0,0 +1,126 @@ +// +build linux +// Network utility functions. + +package netutils + +import ( + "fmt" + "net" + "strings" + + "github.com/docker/libnetwork/ipamutils" + "github.com/docker/libnetwork/ns" + "github.com/docker/libnetwork/osl" + "github.com/docker/libnetwork/resolvconf" + "github.com/docker/libnetwork/types" + "github.com/vishvananda/netlink" +) + +var ( + networkGetRoutesFct func(netlink.Link, int) ([]netlink.Route, error) +) + +// CheckRouteOverlaps checks whether the passed network overlaps with any existing routes +func CheckRouteOverlaps(toCheck *net.IPNet) error { + if networkGetRoutesFct == nil { + networkGetRoutesFct = ns.NlHandle().RouteList + } + networks, err := networkGetRoutesFct(nil, netlink.FAMILY_V4) + if err != nil { + return err + } + for _, network := range networks { + if network.Dst != nil && NetworkOverlaps(toCheck, network.Dst) { + return ErrNetworkOverlaps + } + } + return nil +} + +// GenerateIfaceName returns an interface name using the passed in +// prefix and the length of random bytes. The api ensures that the +// there are is no interface which exists with that name. +func GenerateIfaceName(nlh *netlink.Handle, prefix string, len int) (string, error) { + linkByName := netlink.LinkByName + if nlh != nil { + linkByName = nlh.LinkByName + } + for i := 0; i < 3; i++ { + name, err := GenerateRandomName(prefix, len) + if err != nil { + continue + } + _, err = linkByName(name) + if err != nil { + if strings.Contains(err.Error(), "not found") { + return name, nil + } + return "", err + } + } + return "", types.InternalErrorf("could not generate interface name") +} + +// ElectInterfaceAddresses looks for an interface on the OS with the +// specified name and returns returns all its IPv4 and IPv6 addresses in CIDR notation. +// If a failure in retrieving the addresses or no IPv4 address is found, an error is returned. +// If the interface does not exist, it chooses from a predefined +// list the first IPv4 address which does not conflict with other +// interfaces on the system. +func ElectInterfaceAddresses(name string) ([]*net.IPNet, []*net.IPNet, error) { + var ( + v4Nets []*net.IPNet + v6Nets []*net.IPNet + ) + + defer osl.InitOSContext()() + + link, _ := ns.NlHandle().LinkByName(name) + if link != nil { + v4addr, err := ns.NlHandle().AddrList(link, netlink.FAMILY_V4) + if err != nil { + return nil, nil, err + } + v6addr, err := ns.NlHandle().AddrList(link, netlink.FAMILY_V6) + if err != nil { + return nil, nil, err + } + for _, nlAddr := range v4addr { + v4Nets = append(v4Nets, nlAddr.IPNet) + } + for _, nlAddr := range v6addr { + v6Nets = append(v6Nets, nlAddr.IPNet) + } + } + + if link == nil || len(v4Nets) == 0 { + // Choose from predefined broad networks + v4Net, err := FindAvailableNetwork(ipamutils.PredefinedBroadNetworks) + if err != nil { + return nil, nil, err + } + v4Nets = append(v4Nets, v4Net) + } + + return v4Nets, v6Nets, nil +} + +// FindAvailableNetwork returns a network from the passed list which does not +// overlap with existing interfaces in the system +func FindAvailableNetwork(list []*net.IPNet) (*net.IPNet, error) { + // We don't check for an error here, because we don't really care if we + // can't read /etc/resolv.conf. So instead we skip the append if resolvConf + // is nil. It either doesn't exist, or we can't read it for some reason. + var nameservers []string + if rc, err := resolvconf.Get(); err == nil { + nameservers = resolvconf.GetNameserversAsCIDR(rc.Content) + } + for _, nw := range list { + if err := CheckNameserverOverlaps(nameservers, nw); err == nil { + if err := CheckRouteOverlaps(nw); err == nil { + return nw, nil + } + } + } + return nil, fmt.Errorf("no available network") +} diff --git a/vendor/github.com/docker/libnetwork/netutils/utils_windows.go b/vendor/github.com/docker/libnetwork/netutils/utils_windows.go new file mode 100644 index 0000000000..73af44ec71 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/netutils/utils_windows.go @@ -0,0 +1,25 @@ +package netutils + +import ( + "net" + + "github.com/docker/libnetwork/types" +) + +// ElectInterfaceAddresses looks for an interface on the OS with the specified name +// and returns returns all its IPv4 and IPv6 addresses in CIDR notation. +// If a failure in retrieving the addresses or no IPv4 address is found, an error is returned. +// If the interface does not exist, it chooses from a predefined +// list the first IPv4 address which does not conflict with other +// interfaces on the system. +func ElectInterfaceAddresses(name string) ([]*net.IPNet, []*net.IPNet, error) { + return nil, nil, types.NotImplementedErrorf("not supported on windows") +} + +// FindAvailableNetwork returns a network from the passed list which does not +// overlap with existing interfaces in the system + +// TODO : Use appropriate windows APIs to identify non-overlapping subnets +func FindAvailableNetwork(list []*net.IPNet) (*net.IPNet, error) { + return nil, nil +} diff --git a/vendor/github.com/docker/libnetwork/network.go b/vendor/github.com/docker/libnetwork/network.go new file mode 100644 index 0000000000..e283d65815 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/network.go @@ -0,0 +1,2133 @@ +package libnetwork + +import ( + "encoding/json" + "fmt" + "net" + "strings" + "sync" + "time" + + "github.com/docker/docker/pkg/stringid" + "github.com/docker/libnetwork/common" + "github.com/docker/libnetwork/config" + "github.com/docker/libnetwork/datastore" + "github.com/docker/libnetwork/driverapi" + "github.com/docker/libnetwork/etchosts" + "github.com/docker/libnetwork/ipamapi" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/netutils" + "github.com/docker/libnetwork/networkdb" + "github.com/docker/libnetwork/options" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" +) + +// A Network represents a logical connectivity zone that containers may +// join using the Link method. A Network is managed by a specific driver. +type Network interface { + // A user chosen name for this network. + Name() string + + // A system generated id for this network. + ID() string + + // The type of network, which corresponds to its managing driver. + Type() string + + // Create a new endpoint to this network symbolically identified by the + // specified unique name. The options parameter carries driver specific options. + CreateEndpoint(name string, options ...EndpointOption) (Endpoint, error) + + // Delete the network. + Delete() error + + // Endpoints returns the list of Endpoint(s) in this network. + Endpoints() []Endpoint + + // WalkEndpoints uses the provided function to walk the Endpoints + WalkEndpoints(walker EndpointWalker) + + // EndpointByName returns the Endpoint which has the passed name. If not found, the error ErrNoSuchEndpoint is returned. + EndpointByName(name string) (Endpoint, error) + + // EndpointByID returns the Endpoint which has the passed id. If not found, the error ErrNoSuchEndpoint is returned. + EndpointByID(id string) (Endpoint, error) + + // Return certain operational data belonging to this network + Info() NetworkInfo +} + +// NetworkInfo returns some configuration and operational information about the network +type NetworkInfo interface { + IpamConfig() (string, map[string]string, []*IpamConf, []*IpamConf) + IpamInfo() ([]*IpamInfo, []*IpamInfo) + DriverOptions() map[string]string + Scope() string + IPv6Enabled() bool + Internal() bool + Attachable() bool + Ingress() bool + ConfigFrom() string + ConfigOnly() bool + Labels() map[string]string + Dynamic() bool + Created() time.Time + // Peers returns a slice of PeerInfo structures which has the information about the peer + // nodes participating in the same overlay network. This is currently the per-network + // gossip cluster. For non-dynamic overlay networks and bridge networks it returns an + // empty slice + Peers() []networkdb.PeerInfo + //Services returns a map of services keyed by the service name with the details + //of all the tasks that belong to the service. Applicable only in swarm mode. + Services() map[string]ServiceInfo +} + +// EndpointWalker is a client provided function which will be used to walk the Endpoints. +// When the function returns true, the walk will stop. +type EndpointWalker func(ep Endpoint) bool + +// ipInfo is the reverse mapping from IP to service name to serve the PTR query. +// extResolver is set if an externl server resolves a service name to this IP. +// Its an indication to defer PTR queries also to that external server. +type ipInfo struct { + name string + serviceID string + extResolver bool +} + +// svcMapEntry is the body of the element into the svcMap +// The ip is a string because the SetMatrix does not accept non hashable values +type svcMapEntry struct { + ip string + serviceID string +} + +type svcInfo struct { + svcMap common.SetMatrix + svcIPv6Map common.SetMatrix + ipMap common.SetMatrix + service map[string][]servicePorts +} + +// backing container or host's info +type serviceTarget struct { + name string + ip net.IP + port uint16 +} + +type servicePorts struct { + portName string + proto string + target []serviceTarget +} + +type networkDBTable struct { + name string + objType driverapi.ObjectType +} + +// IpamConf contains all the ipam related configurations for a network +type IpamConf struct { + // The master address pool for containers and network interfaces + PreferredPool string + // A subset of the master pool. If specified, + // this becomes the container pool + SubPool string + // Preferred Network Gateway address (optional) + Gateway string + // Auxiliary addresses for network driver. Must be within the master pool. + // libnetwork will reserve them if they fall into the container pool + AuxAddresses map[string]string +} + +// Validate checks whether the configuration is valid +func (c *IpamConf) Validate() error { + if c.Gateway != "" && nil == net.ParseIP(c.Gateway) { + return types.BadRequestErrorf("invalid gateway address %s in Ipam configuration", c.Gateway) + } + return nil +} + +// IpamInfo contains all the ipam related operational info for a network +type IpamInfo struct { + PoolID string + Meta map[string]string + driverapi.IPAMData +} + +// MarshalJSON encodes IpamInfo into json message +func (i *IpamInfo) MarshalJSON() ([]byte, error) { + m := map[string]interface{}{ + "PoolID": i.PoolID, + } + v, err := json.Marshal(&i.IPAMData) + if err != nil { + return nil, err + } + m["IPAMData"] = string(v) + + if i.Meta != nil { + m["Meta"] = i.Meta + } + return json.Marshal(m) +} + +// UnmarshalJSON decodes json message into PoolData +func (i *IpamInfo) UnmarshalJSON(data []byte) error { + var ( + m map[string]interface{} + err error + ) + if err = json.Unmarshal(data, &m); err != nil { + return err + } + i.PoolID = m["PoolID"].(string) + if v, ok := m["Meta"]; ok { + b, _ := json.Marshal(v) + if err = json.Unmarshal(b, &i.Meta); err != nil { + return err + } + } + if v, ok := m["IPAMData"]; ok { + if err = json.Unmarshal([]byte(v.(string)), &i.IPAMData); err != nil { + return err + } + } + return nil +} + +type network struct { + ctrlr *controller + name string + networkType string + id string + created time.Time + scope string // network data scope + labels map[string]string + ipamType string + ipamOptions map[string]string + addrSpace string + ipamV4Config []*IpamConf + ipamV6Config []*IpamConf + ipamV4Info []*IpamInfo + ipamV6Info []*IpamInfo + enableIPv6 bool + postIPv6 bool + epCnt *endpointCnt + generic options.Generic + dbIndex uint64 + dbExists bool + persist bool + stopWatchCh chan struct{} + drvOnce *sync.Once + resolverOnce sync.Once + resolver []Resolver + internal bool + attachable bool + inDelete bool + ingress bool + driverTables []networkDBTable + dynamic bool + configOnly bool + configFrom string + loadBalancerIP net.IP + sync.Mutex +} + +func (n *network) Name() string { + n.Lock() + defer n.Unlock() + + return n.name +} + +func (n *network) ID() string { + n.Lock() + defer n.Unlock() + + return n.id +} + +func (n *network) Created() time.Time { + n.Lock() + defer n.Unlock() + + return n.created +} + +func (n *network) Type() string { + n.Lock() + defer n.Unlock() + + return n.networkType +} + +func (n *network) Key() []string { + n.Lock() + defer n.Unlock() + return []string{datastore.NetworkKeyPrefix, n.id} +} + +func (n *network) KeyPrefix() []string { + return []string{datastore.NetworkKeyPrefix} +} + +func (n *network) Value() []byte { + n.Lock() + defer n.Unlock() + b, err := json.Marshal(n) + if err != nil { + return nil + } + return b +} + +func (n *network) SetValue(value []byte) error { + return json.Unmarshal(value, n) +} + +func (n *network) Index() uint64 { + n.Lock() + defer n.Unlock() + return n.dbIndex +} + +func (n *network) SetIndex(index uint64) { + n.Lock() + n.dbIndex = index + n.dbExists = true + n.Unlock() +} + +func (n *network) Exists() bool { + n.Lock() + defer n.Unlock() + return n.dbExists +} + +func (n *network) Skip() bool { + n.Lock() + defer n.Unlock() + return !n.persist +} + +func (n *network) New() datastore.KVObject { + n.Lock() + defer n.Unlock() + + return &network{ + ctrlr: n.ctrlr, + drvOnce: &sync.Once{}, + scope: n.scope, + } +} + +// CopyTo deep copies to the destination IpamConfig +func (c *IpamConf) CopyTo(dstC *IpamConf) error { + dstC.PreferredPool = c.PreferredPool + dstC.SubPool = c.SubPool + dstC.Gateway = c.Gateway + if c.AuxAddresses != nil { + dstC.AuxAddresses = make(map[string]string, len(c.AuxAddresses)) + for k, v := range c.AuxAddresses { + dstC.AuxAddresses[k] = v + } + } + return nil +} + +// CopyTo deep copies to the destination IpamInfo +func (i *IpamInfo) CopyTo(dstI *IpamInfo) error { + dstI.PoolID = i.PoolID + if i.Meta != nil { + dstI.Meta = make(map[string]string) + for k, v := range i.Meta { + dstI.Meta[k] = v + } + } + + dstI.AddressSpace = i.AddressSpace + dstI.Pool = types.GetIPNetCopy(i.Pool) + dstI.Gateway = types.GetIPNetCopy(i.Gateway) + + if i.AuxAddresses != nil { + dstI.AuxAddresses = make(map[string]*net.IPNet) + for k, v := range i.AuxAddresses { + dstI.AuxAddresses[k] = types.GetIPNetCopy(v) + } + } + + return nil +} + +func (n *network) validateConfiguration() error { + if n.configOnly { + // Only supports network specific configurations. + // Network operator configurations are not supported. + if n.ingress || n.internal || n.attachable || n.scope != "" { + return types.ForbiddenErrorf("configuration network can only contain network " + + "specific fields. Network operator fields like " + + "[ ingress | internal | attachable | scope ] are not supported.") + } + } + if n.configFrom != "" { + if n.configOnly { + return types.ForbiddenErrorf("a configuration network cannot depend on another configuration network") + } + if n.ipamType != "" && + n.ipamType != defaultIpamForNetworkType(n.networkType) || + n.enableIPv6 || + len(n.labels) > 0 || len(n.ipamOptions) > 0 || + len(n.ipamV4Config) > 0 || len(n.ipamV6Config) > 0 { + return types.ForbiddenErrorf("user specified configurations are not supported if the network depends on a configuration network") + } + if len(n.generic) > 0 { + if data, ok := n.generic[netlabel.GenericData]; ok { + var ( + driverOptions map[string]string + opts interface{} + ) + switch data.(type) { + case map[string]interface{}: + opts = data.(map[string]interface{}) + case map[string]string: + opts = data.(map[string]string) + } + ba, err := json.Marshal(opts) + if err != nil { + return fmt.Errorf("failed to validate network configuration: %v", err) + } + if err := json.Unmarshal(ba, &driverOptions); err != nil { + return fmt.Errorf("failed to validate network configuration: %v", err) + } + if len(driverOptions) > 0 { + return types.ForbiddenErrorf("network driver options are not supported if the network depends on a configuration network") + } + } + } + } + return nil +} + +// Applies network specific configurations +func (n *network) applyConfigurationTo(to *network) error { + to.enableIPv6 = n.enableIPv6 + if len(n.labels) > 0 { + to.labels = make(map[string]string, len(n.labels)) + for k, v := range n.labels { + if _, ok := to.labels[k]; !ok { + to.labels[k] = v + } + } + } + if len(n.ipamType) != 0 { + to.ipamType = n.ipamType + } + if len(n.ipamOptions) > 0 { + to.ipamOptions = make(map[string]string, len(n.ipamOptions)) + for k, v := range n.ipamOptions { + if _, ok := to.ipamOptions[k]; !ok { + to.ipamOptions[k] = v + } + } + } + if len(n.ipamV4Config) > 0 { + to.ipamV4Config = make([]*IpamConf, 0, len(n.ipamV4Config)) + to.ipamV4Config = append(to.ipamV4Config, n.ipamV4Config...) + } + if len(n.ipamV6Config) > 0 { + to.ipamV6Config = make([]*IpamConf, 0, len(n.ipamV6Config)) + to.ipamV6Config = append(to.ipamV6Config, n.ipamV6Config...) + } + if len(n.generic) > 0 { + to.generic = options.Generic{} + for k, v := range n.generic { + to.generic[k] = v + } + } + return nil +} + +func (n *network) CopyTo(o datastore.KVObject) error { + n.Lock() + defer n.Unlock() + + dstN := o.(*network) + dstN.name = n.name + dstN.id = n.id + dstN.created = n.created + dstN.networkType = n.networkType + dstN.scope = n.scope + dstN.dynamic = n.dynamic + dstN.ipamType = n.ipamType + dstN.enableIPv6 = n.enableIPv6 + dstN.persist = n.persist + dstN.postIPv6 = n.postIPv6 + dstN.dbIndex = n.dbIndex + dstN.dbExists = n.dbExists + dstN.drvOnce = n.drvOnce + dstN.internal = n.internal + dstN.attachable = n.attachable + dstN.inDelete = n.inDelete + dstN.ingress = n.ingress + dstN.configOnly = n.configOnly + dstN.configFrom = n.configFrom + dstN.loadBalancerIP = n.loadBalancerIP + + // copy labels + if dstN.labels == nil { + dstN.labels = make(map[string]string, len(n.labels)) + } + for k, v := range n.labels { + dstN.labels[k] = v + } + + if n.ipamOptions != nil { + dstN.ipamOptions = make(map[string]string, len(n.ipamOptions)) + for k, v := range n.ipamOptions { + dstN.ipamOptions[k] = v + } + } + + for _, v4conf := range n.ipamV4Config { + dstV4Conf := &IpamConf{} + v4conf.CopyTo(dstV4Conf) + dstN.ipamV4Config = append(dstN.ipamV4Config, dstV4Conf) + } + + for _, v4info := range n.ipamV4Info { + dstV4Info := &IpamInfo{} + v4info.CopyTo(dstV4Info) + dstN.ipamV4Info = append(dstN.ipamV4Info, dstV4Info) + } + + for _, v6conf := range n.ipamV6Config { + dstV6Conf := &IpamConf{} + v6conf.CopyTo(dstV6Conf) + dstN.ipamV6Config = append(dstN.ipamV6Config, dstV6Conf) + } + + for _, v6info := range n.ipamV6Info { + dstV6Info := &IpamInfo{} + v6info.CopyTo(dstV6Info) + dstN.ipamV6Info = append(dstN.ipamV6Info, dstV6Info) + } + + dstN.generic = options.Generic{} + for k, v := range n.generic { + dstN.generic[k] = v + } + + return nil +} + +func (n *network) DataScope() string { + s := n.Scope() + // All swarm scope networks have local datascope + if s == datastore.SwarmScope { + s = datastore.LocalScope + } + return s +} + +func (n *network) getEpCnt() *endpointCnt { + n.Lock() + defer n.Unlock() + + return n.epCnt +} + +// TODO : Can be made much more generic with the help of reflection (but has some golang limitations) +func (n *network) MarshalJSON() ([]byte, error) { + netMap := make(map[string]interface{}) + netMap["name"] = n.name + netMap["id"] = n.id + netMap["created"] = n.created + netMap["networkType"] = n.networkType + netMap["scope"] = n.scope + netMap["labels"] = n.labels + netMap["ipamType"] = n.ipamType + netMap["ipamOptions"] = n.ipamOptions + netMap["addrSpace"] = n.addrSpace + netMap["enableIPv6"] = n.enableIPv6 + if n.generic != nil { + netMap["generic"] = n.generic + } + netMap["persist"] = n.persist + netMap["postIPv6"] = n.postIPv6 + if len(n.ipamV4Config) > 0 { + ics, err := json.Marshal(n.ipamV4Config) + if err != nil { + return nil, err + } + netMap["ipamV4Config"] = string(ics) + } + if len(n.ipamV4Info) > 0 { + iis, err := json.Marshal(n.ipamV4Info) + if err != nil { + return nil, err + } + netMap["ipamV4Info"] = string(iis) + } + if len(n.ipamV6Config) > 0 { + ics, err := json.Marshal(n.ipamV6Config) + if err != nil { + return nil, err + } + netMap["ipamV6Config"] = string(ics) + } + if len(n.ipamV6Info) > 0 { + iis, err := json.Marshal(n.ipamV6Info) + if err != nil { + return nil, err + } + netMap["ipamV6Info"] = string(iis) + } + netMap["internal"] = n.internal + netMap["attachable"] = n.attachable + netMap["inDelete"] = n.inDelete + netMap["ingress"] = n.ingress + netMap["configOnly"] = n.configOnly + netMap["configFrom"] = n.configFrom + netMap["loadBalancerIP"] = n.loadBalancerIP + return json.Marshal(netMap) +} + +// TODO : Can be made much more generic with the help of reflection (but has some golang limitations) +func (n *network) UnmarshalJSON(b []byte) (err error) { + var netMap map[string]interface{} + if err := json.Unmarshal(b, &netMap); err != nil { + return err + } + n.name = netMap["name"].(string) + n.id = netMap["id"].(string) + // "created" is not available in older versions + if v, ok := netMap["created"]; ok { + // n.created is time.Time but marshalled as string + if err = n.created.UnmarshalText([]byte(v.(string))); err != nil { + logrus.Warnf("failed to unmarshal creation time %v: %v", v, err) + n.created = time.Time{} + } + } + n.networkType = netMap["networkType"].(string) + n.enableIPv6 = netMap["enableIPv6"].(bool) + + // if we weren't unmarshaling to netMap we could simply set n.labels + // unfortunately, we can't because map[string]interface{} != map[string]string + if labels, ok := netMap["labels"].(map[string]interface{}); ok { + n.labels = make(map[string]string, len(labels)) + for label, value := range labels { + n.labels[label] = value.(string) + } + } + + if v, ok := netMap["ipamOptions"]; ok { + if iOpts, ok := v.(map[string]interface{}); ok { + n.ipamOptions = make(map[string]string, len(iOpts)) + for k, v := range iOpts { + n.ipamOptions[k] = v.(string) + } + } + } + + if v, ok := netMap["generic"]; ok { + n.generic = v.(map[string]interface{}) + // Restore opts in their map[string]string form + if v, ok := n.generic[netlabel.GenericData]; ok { + var lmap map[string]string + ba, err := json.Marshal(v) + if err != nil { + return err + } + if err := json.Unmarshal(ba, &lmap); err != nil { + return err + } + n.generic[netlabel.GenericData] = lmap + } + } + if v, ok := netMap["persist"]; ok { + n.persist = v.(bool) + } + if v, ok := netMap["postIPv6"]; ok { + n.postIPv6 = v.(bool) + } + if v, ok := netMap["ipamType"]; ok { + n.ipamType = v.(string) + } else { + n.ipamType = ipamapi.DefaultIPAM + } + if v, ok := netMap["addrSpace"]; ok { + n.addrSpace = v.(string) + } + if v, ok := netMap["ipamV4Config"]; ok { + if err := json.Unmarshal([]byte(v.(string)), &n.ipamV4Config); err != nil { + return err + } + } + if v, ok := netMap["ipamV4Info"]; ok { + if err := json.Unmarshal([]byte(v.(string)), &n.ipamV4Info); err != nil { + return err + } + } + if v, ok := netMap["ipamV6Config"]; ok { + if err := json.Unmarshal([]byte(v.(string)), &n.ipamV6Config); err != nil { + return err + } + } + if v, ok := netMap["ipamV6Info"]; ok { + if err := json.Unmarshal([]byte(v.(string)), &n.ipamV6Info); err != nil { + return err + } + } + if v, ok := netMap["internal"]; ok { + n.internal = v.(bool) + } + if v, ok := netMap["attachable"]; ok { + n.attachable = v.(bool) + } + if s, ok := netMap["scope"]; ok { + n.scope = s.(string) + } + if v, ok := netMap["inDelete"]; ok { + n.inDelete = v.(bool) + } + if v, ok := netMap["ingress"]; ok { + n.ingress = v.(bool) + } + if v, ok := netMap["configOnly"]; ok { + n.configOnly = v.(bool) + } + if v, ok := netMap["configFrom"]; ok { + n.configFrom = v.(string) + } + if v, ok := netMap["loadBalancerIP"]; ok { + n.loadBalancerIP = net.ParseIP(v.(string)) + } + // Reconcile old networks with the recently added `--ipv6` flag + if !n.enableIPv6 { + n.enableIPv6 = len(n.ipamV6Info) > 0 + } + return nil +} + +// NetworkOption is an option setter function type used to pass various options to +// NewNetwork method. The various setter functions of type NetworkOption are +// provided by libnetwork, they look like NetworkOptionXXXX(...) +type NetworkOption func(n *network) + +// NetworkOptionGeneric function returns an option setter for a Generic option defined +// in a Dictionary of Key-Value pair +func NetworkOptionGeneric(generic map[string]interface{}) NetworkOption { + return func(n *network) { + if n.generic == nil { + n.generic = make(map[string]interface{}) + } + if val, ok := generic[netlabel.EnableIPv6]; ok { + n.enableIPv6 = val.(bool) + } + if val, ok := generic[netlabel.Internal]; ok { + n.internal = val.(bool) + } + for k, v := range generic { + n.generic[k] = v + } + } +} + +// NetworkOptionIngress returns an option setter to indicate if a network is +// an ingress network. +func NetworkOptionIngress(ingress bool) NetworkOption { + return func(n *network) { + n.ingress = ingress + } +} + +// NetworkOptionPersist returns an option setter to set persistence policy for a network +func NetworkOptionPersist(persist bool) NetworkOption { + return func(n *network) { + n.persist = persist + } +} + +// NetworkOptionEnableIPv6 returns an option setter to explicitly configure IPv6 +func NetworkOptionEnableIPv6(enableIPv6 bool) NetworkOption { + return func(n *network) { + if n.generic == nil { + n.generic = make(map[string]interface{}) + } + n.enableIPv6 = enableIPv6 + n.generic[netlabel.EnableIPv6] = enableIPv6 + } +} + +// NetworkOptionInternalNetwork returns an option setter to config the network +// to be internal which disables default gateway service +func NetworkOptionInternalNetwork() NetworkOption { + return func(n *network) { + if n.generic == nil { + n.generic = make(map[string]interface{}) + } + n.internal = true + n.generic[netlabel.Internal] = true + } +} + +// NetworkOptionAttachable returns an option setter to set attachable for a network +func NetworkOptionAttachable(attachable bool) NetworkOption { + return func(n *network) { + n.attachable = attachable + } +} + +// NetworkOptionScope returns an option setter to overwrite the network's scope. +// By default the network's scope is set to the network driver's datascope. +func NetworkOptionScope(scope string) NetworkOption { + return func(n *network) { + n.scope = scope + } +} + +// NetworkOptionIpam function returns an option setter for the ipam configuration for this network +func NetworkOptionIpam(ipamDriver string, addrSpace string, ipV4 []*IpamConf, ipV6 []*IpamConf, opts map[string]string) NetworkOption { + return func(n *network) { + if ipamDriver != "" { + n.ipamType = ipamDriver + if ipamDriver == ipamapi.DefaultIPAM { + n.ipamType = defaultIpamForNetworkType(n.Type()) + } + } + n.ipamOptions = opts + n.addrSpace = addrSpace + n.ipamV4Config = ipV4 + n.ipamV6Config = ipV6 + } +} + +// NetworkOptionLBEndpoint function returns an option setter for the configuration of the load balancer endpoint for this network +func NetworkOptionLBEndpoint(ip net.IP) NetworkOption { + return func(n *network) { + n.loadBalancerIP = ip + } +} + +// NetworkOptionDriverOpts function returns an option setter for any driver parameter described by a map +func NetworkOptionDriverOpts(opts map[string]string) NetworkOption { + return func(n *network) { + if n.generic == nil { + n.generic = make(map[string]interface{}) + } + if opts == nil { + opts = make(map[string]string) + } + // Store the options + n.generic[netlabel.GenericData] = opts + } +} + +// NetworkOptionLabels function returns an option setter for labels specific to a network +func NetworkOptionLabels(labels map[string]string) NetworkOption { + return func(n *network) { + n.labels = labels + } +} + +// NetworkOptionDynamic function returns an option setter for dynamic option for a network +func NetworkOptionDynamic() NetworkOption { + return func(n *network) { + n.dynamic = true + } +} + +// NetworkOptionDeferIPv6Alloc instructs the network to defer the IPV6 address allocation until after the endpoint has been created +// It is being provided to support the specific docker daemon flags where user can deterministically assign an IPv6 address +// to a container as combination of fixed-cidr-v6 + mac-address +// TODO: Remove this option setter once we support endpoint ipam options +func NetworkOptionDeferIPv6Alloc(enable bool) NetworkOption { + return func(n *network) { + n.postIPv6 = enable + } +} + +// NetworkOptionConfigOnly tells controller this network is +// a configuration only network. It serves as a configuration +// for other networks. +func NetworkOptionConfigOnly() NetworkOption { + return func(n *network) { + n.configOnly = true + } +} + +// NetworkOptionConfigFrom tells controller to pick the +// network configuration from a configuration only network +func NetworkOptionConfigFrom(name string) NetworkOption { + return func(n *network) { + n.configFrom = name + } +} + +func (n *network) processOptions(options ...NetworkOption) { + for _, opt := range options { + if opt != nil { + opt(n) + } + } +} + +func (n *network) resolveDriver(name string, load bool) (driverapi.Driver, *driverapi.Capability, error) { + c := n.getController() + + // Check if a driver for the specified network type is available + d, cap := c.drvRegistry.Driver(name) + if d == nil { + if load { + err := c.loadDriver(name) + if err != nil { + return nil, nil, err + } + + d, cap = c.drvRegistry.Driver(name) + if d == nil { + return nil, nil, fmt.Errorf("could not resolve driver %s in registry", name) + } + } else { + // don't fail if driver loading is not required + return nil, nil, nil + } + } + + return d, cap, nil +} + +func (n *network) driverScope() string { + _, cap, err := n.resolveDriver(n.networkType, true) + if err != nil { + // If driver could not be resolved simply return an empty string + return "" + } + + return cap.DataScope +} + +func (n *network) driverIsMultihost() bool { + _, cap, err := n.resolveDriver(n.networkType, true) + if err != nil { + return false + } + return cap.ConnectivityScope == datastore.GlobalScope +} + +func (n *network) driver(load bool) (driverapi.Driver, error) { + d, cap, err := n.resolveDriver(n.networkType, load) + if err != nil { + return nil, err + } + + n.Lock() + // If load is not required, driver, cap and err may all be nil + if n.scope == "" && cap != nil { + n.scope = cap.DataScope + } + if n.dynamic { + // If the network is dynamic, then it is swarm + // scoped regardless of the backing driver. + n.scope = datastore.SwarmScope + } + n.Unlock() + return d, nil +} + +func (n *network) Delete() error { + return n.delete(false) +} + +func (n *network) delete(force bool) error { + n.Lock() + c := n.ctrlr + name := n.name + id := n.id + n.Unlock() + + c.networkLocker.Lock(id) + defer c.networkLocker.Unlock(id) + + n, err := c.getNetworkFromStore(id) + if err != nil { + return &UnknownNetworkError{name: name, id: id} + } + + if len(n.loadBalancerIP) != 0 { + endpoints := n.Endpoints() + if force || (len(endpoints) == 1 && !n.ingress) { + n.deleteLoadBalancerSandbox() + } + //Reload the network from the store to update the epcnt. + n, err = c.getNetworkFromStore(id) + if err != nil { + return &UnknownNetworkError{name: name, id: id} + } + } + + if !force && n.getEpCnt().EndpointCnt() != 0 { + if n.configOnly { + return types.ForbiddenErrorf("configuration network %q is in use", n.Name()) + } + return &ActiveEndpointsError{name: n.name, id: n.id} + } + + // Mark the network for deletion + n.inDelete = true + if err = c.updateToStore(n); err != nil { + return fmt.Errorf("error marking network %s (%s) for deletion: %v", n.Name(), n.ID(), err) + } + + if n.ConfigFrom() != "" { + if t, err := c.getConfigNetwork(n.ConfigFrom()); err == nil { + if err := t.getEpCnt().DecEndpointCnt(); err != nil { + logrus.Warnf("Failed to update reference count for configuration network %q on removal of network %q: %v", + t.Name(), n.Name(), err) + } + } else { + logrus.Warnf("Could not find configuration network %q during removal of network %q", n.configOnly, n.Name()) + } + } + + if n.configOnly { + goto removeFromStore + } + + if err = n.deleteNetwork(); err != nil { + if !force { + return err + } + logrus.Debugf("driver failed to delete stale network %s (%s): %v", n.Name(), n.ID(), err) + } + + n.ipamRelease() + if err = c.updateToStore(n); err != nil { + logrus.Warnf("Failed to update store after ipam release for network %s (%s): %v", n.Name(), n.ID(), err) + } + + // We are about to delete the network. Leave the gossip + // cluster for the network to stop all incoming network + // specific gossip updates before cleaning up all the service + // bindings for the network. But cleanup service binding + // before deleting the network from the store since service + // bindings cleanup requires the network in the store. + n.cancelDriverWatches() + if err = n.leaveCluster(); err != nil { + logrus.Errorf("Failed leaving network %s from the agent cluster: %v", n.Name(), err) + } + + // Cleanup the service discovery for this network + c.cleanupServiceDiscovery(n.ID()) + + // Cleanup the load balancer + c.cleanupServiceBindings(n.ID()) + +removeFromStore: + // deleteFromStore performs an atomic delete operation and the + // network.epCnt will help prevent any possible + // race between endpoint join and network delete + if err = c.deleteFromStore(n.getEpCnt()); err != nil { + if !force { + return fmt.Errorf("error deleting network endpoint count from store: %v", err) + } + logrus.Debugf("Error deleting endpoint count from store for stale network %s (%s) for deletion: %v", n.Name(), n.ID(), err) + } + + if err = c.deleteFromStore(n); err != nil { + return fmt.Errorf("error deleting network from store: %v", err) + } + + return nil +} + +func (n *network) deleteNetwork() error { + d, err := n.driver(true) + if err != nil { + return fmt.Errorf("failed deleting network: %v", err) + } + + if err := d.DeleteNetwork(n.ID()); err != nil { + // Forbidden Errors should be honored + if _, ok := err.(types.ForbiddenError); ok { + return err + } + + if _, ok := err.(types.MaskableError); !ok { + logrus.Warnf("driver error deleting network %s : %v", n.name, err) + } + } + + for _, resolver := range n.resolver { + resolver.Stop() + } + return nil +} + +func (n *network) addEndpoint(ep *endpoint) error { + d, err := n.driver(true) + if err != nil { + return fmt.Errorf("failed to add endpoint: %v", err) + } + + err = d.CreateEndpoint(n.id, ep.id, ep.Interface(), ep.generic) + if err != nil { + return types.InternalErrorf("failed to create endpoint %s on network %s: %v", + ep.Name(), n.Name(), err) + } + + return nil +} + +func (n *network) CreateEndpoint(name string, options ...EndpointOption) (Endpoint, error) { + var err error + if !config.IsValidName(name) { + return nil, ErrInvalidName(name) + } + + if n.ConfigOnly() { + return nil, types.ForbiddenErrorf("cannot create endpoint on configuration-only network") + } + + if _, err = n.EndpointByName(name); err == nil { + return nil, types.ForbiddenErrorf("endpoint with name %s already exists in network %s", name, n.Name()) + } + + n.ctrlr.networkLocker.Lock(n.id) + defer n.ctrlr.networkLocker.Unlock(n.id) + + return n.createEndpoint(name, options...) + +} + +func (n *network) createEndpoint(name string, options ...EndpointOption) (Endpoint, error) { + var err error + + ep := &endpoint{name: name, generic: make(map[string]interface{}), iface: &endpointInterface{}} + ep.id = stringid.GenerateRandomID() + + // Initialize ep.network with a possibly stale copy of n. We need this to get network from + // store. But once we get it from store we will have the most uptodate copy possibly. + ep.network = n + ep.locator = n.getController().clusterHostID() + ep.network, err = ep.getNetworkFromStore() + if err != nil { + return nil, fmt.Errorf("failed to get network during CreateEndpoint: %v", err) + } + n = ep.network + + ep.processOptions(options...) + + for _, llIPNet := range ep.Iface().LinkLocalAddresses() { + if !llIPNet.IP.IsLinkLocalUnicast() { + return nil, types.BadRequestErrorf("invalid link local IP address: %v", llIPNet.IP) + } + } + + if opt, ok := ep.generic[netlabel.MacAddress]; ok { + if mac, ok := opt.(net.HardwareAddr); ok { + ep.iface.mac = mac + } + } + + ipam, cap, err := n.getController().getIPAMDriver(n.ipamType) + if err != nil { + return nil, err + } + + if cap.RequiresMACAddress { + if ep.iface.mac == nil { + ep.iface.mac = netutils.GenerateRandomMAC() + } + if ep.ipamOptions == nil { + ep.ipamOptions = make(map[string]string) + } + ep.ipamOptions[netlabel.MacAddress] = ep.iface.mac.String() + } + + if err = ep.assignAddress(ipam, true, n.enableIPv6 && !n.postIPv6); err != nil { + return nil, err + } + defer func() { + if err != nil { + ep.releaseAddress() + } + }() + // Moving updateToSTore before calling addEndpoint so that we shall clean up VETH interfaces in case + // DockerD get killed between addEndpoint and updateSTore call + if err = n.getController().updateToStore(ep); err != nil { + return nil, err + } + defer func() { + if err != nil { + if e := n.getController().deleteFromStore(ep); e != nil { + logrus.Warnf("error rolling back endpoint %s from store: %v", name, e) + } + } + }() + + if err = n.addEndpoint(ep); err != nil { + return nil, err + } + defer func() { + if err != nil { + if e := ep.deleteEndpoint(false); e != nil { + logrus.Warnf("cleaning up endpoint failed %s : %v", name, e) + } + } + }() + + if err = ep.assignAddress(ipam, false, n.enableIPv6 && n.postIPv6); err != nil { + return nil, err + } + + // Watch for service records + n.getController().watchSvcRecord(ep) + defer func() { + if err != nil { + n.getController().unWatchSvcRecord(ep) + } + }() + + // Increment endpoint count to indicate completion of endpoint addition + if err = n.getEpCnt().IncEndpointCnt(); err != nil { + return nil, err + } + + return ep, nil +} + +func (n *network) Endpoints() []Endpoint { + var list []Endpoint + + endpoints, err := n.getEndpointsFromStore() + if err != nil { + logrus.Error(err) + } + + for _, ep := range endpoints { + list = append(list, ep) + } + + return list +} + +func (n *network) WalkEndpoints(walker EndpointWalker) { + for _, e := range n.Endpoints() { + if walker(e) { + return + } + } +} + +func (n *network) EndpointByName(name string) (Endpoint, error) { + if name == "" { + return nil, ErrInvalidName(name) + } + var e Endpoint + + s := func(current Endpoint) bool { + if current.Name() == name { + e = current + return true + } + return false + } + + n.WalkEndpoints(s) + + if e == nil { + return nil, ErrNoSuchEndpoint(name) + } + + return e, nil +} + +func (n *network) EndpointByID(id string) (Endpoint, error) { + if id == "" { + return nil, ErrInvalidID(id) + } + + ep, err := n.getEndpointFromStore(id) + if err != nil { + return nil, ErrNoSuchEndpoint(id) + } + + return ep, nil +} + +func (n *network) updateSvcRecord(ep *endpoint, localEps []*endpoint, isAdd bool) { + var ipv6 net.IP + epName := ep.Name() + if iface := ep.Iface(); iface.Address() != nil { + myAliases := ep.MyAliases() + if iface.AddressIPv6() != nil { + ipv6 = iface.AddressIPv6().IP + } + + serviceID := ep.svcID + if serviceID == "" { + serviceID = ep.ID() + } + if isAdd { + // If anonymous endpoint has an alias use the first alias + // for ip->name mapping. Not having the reverse mapping + // breaks some apps + if ep.isAnonymous() { + if len(myAliases) > 0 { + n.addSvcRecords(ep.ID(), myAliases[0], serviceID, iface.Address().IP, ipv6, true, "updateSvcRecord") + } + } else { + n.addSvcRecords(ep.ID(), epName, serviceID, iface.Address().IP, ipv6, true, "updateSvcRecord") + } + for _, alias := range myAliases { + n.addSvcRecords(ep.ID(), alias, serviceID, iface.Address().IP, ipv6, false, "updateSvcRecord") + } + } else { + if ep.isAnonymous() { + if len(myAliases) > 0 { + n.deleteSvcRecords(ep.ID(), myAliases[0], serviceID, iface.Address().IP, ipv6, true, "updateSvcRecord") + } + } else { + n.deleteSvcRecords(ep.ID(), epName, serviceID, iface.Address().IP, ipv6, true, "updateSvcRecord") + } + for _, alias := range myAliases { + n.deleteSvcRecords(ep.ID(), alias, serviceID, iface.Address().IP, ipv6, false, "updateSvcRecord") + } + } + } +} + +func addIPToName(ipMap common.SetMatrix, name, serviceID string, ip net.IP) { + reverseIP := netutils.ReverseIP(ip.String()) + ipMap.Insert(reverseIP, ipInfo{ + name: name, + serviceID: serviceID, + }) +} + +func delIPToName(ipMap common.SetMatrix, name, serviceID string, ip net.IP) { + reverseIP := netutils.ReverseIP(ip.String()) + ipMap.Remove(reverseIP, ipInfo{ + name: name, + serviceID: serviceID, + }) +} + +func addNameToIP(svcMap common.SetMatrix, name, serviceID string, epIP net.IP) { + svcMap.Insert(name, svcMapEntry{ + ip: epIP.String(), + serviceID: serviceID, + }) +} + +func delNameToIP(svcMap common.SetMatrix, name, serviceID string, epIP net.IP) { + svcMap.Remove(name, svcMapEntry{ + ip: epIP.String(), + serviceID: serviceID, + }) +} + +func (n *network) addSvcRecords(eID, name, serviceID string, epIP, epIPv6 net.IP, ipMapUpdate bool, method string) { + // Do not add service names for ingress network as this is a + // routing only network + if n.ingress { + return + } + + logrus.Debugf("%s (%s).addSvcRecords(%s, %s, %s, %t) %s sid:%s", eID, n.ID()[0:7], name, epIP, epIPv6, ipMapUpdate, method, serviceID) + + c := n.getController() + c.Lock() + defer c.Unlock() + + sr, ok := c.svcRecords[n.ID()] + if !ok { + sr = svcInfo{ + svcMap: common.NewSetMatrix(), + svcIPv6Map: common.NewSetMatrix(), + ipMap: common.NewSetMatrix(), + } + c.svcRecords[n.ID()] = sr + } + + if ipMapUpdate { + addIPToName(sr.ipMap, name, serviceID, epIP) + if epIPv6 != nil { + addIPToName(sr.ipMap, name, serviceID, epIPv6) + } + } + + addNameToIP(sr.svcMap, name, serviceID, epIP) + if epIPv6 != nil { + addNameToIP(sr.svcIPv6Map, name, serviceID, epIPv6) + } +} + +func (n *network) deleteSvcRecords(eID, name, serviceID string, epIP net.IP, epIPv6 net.IP, ipMapUpdate bool, method string) { + // Do not delete service names from ingress network as this is a + // routing only network + if n.ingress { + return + } + + logrus.Debugf("%s (%s).deleteSvcRecords(%s, %s, %s, %t) %s sid:%s ", eID, n.ID()[0:7], name, epIP, epIPv6, ipMapUpdate, method, serviceID) + + c := n.getController() + c.Lock() + defer c.Unlock() + + sr, ok := c.svcRecords[n.ID()] + if !ok { + return + } + + if ipMapUpdate { + delIPToName(sr.ipMap, name, serviceID, epIP) + + if epIPv6 != nil { + delIPToName(sr.ipMap, name, serviceID, epIPv6) + } + } + + delNameToIP(sr.svcMap, name, serviceID, epIP) + + if epIPv6 != nil { + delNameToIP(sr.svcIPv6Map, name, serviceID, epIPv6) + } +} + +func (n *network) getSvcRecords(ep *endpoint) []etchosts.Record { + n.Lock() + defer n.Unlock() + + if ep == nil { + return nil + } + + var recs []etchosts.Record + + epName := ep.Name() + + n.ctrlr.Lock() + defer n.ctrlr.Unlock() + sr, ok := n.ctrlr.svcRecords[n.id] + if !ok || sr.svcMap == nil { + return nil + } + + svcMapKeys := sr.svcMap.Keys() + // Loop on service names on this network + for _, k := range svcMapKeys { + if strings.Split(k, ".")[0] == epName { + continue + } + // Get all the IPs associated to this service + mapEntryList, ok := sr.svcMap.Get(k) + if !ok { + // The key got deleted + continue + } + if len(mapEntryList) == 0 { + logrus.Warnf("Found empty list of IP addresses for service %s on network %s (%s)", k, n.name, n.id) + continue + } + + recs = append(recs, etchosts.Record{ + Hosts: k, + IP: mapEntryList[0].(svcMapEntry).ip, + }) + } + + return recs +} + +func (n *network) getController() *controller { + n.Lock() + defer n.Unlock() + return n.ctrlr +} + +func (n *network) ipamAllocate() error { + if n.hasSpecialDriver() { + return nil + } + + ipam, _, err := n.getController().getIPAMDriver(n.ipamType) + if err != nil { + return err + } + + if n.addrSpace == "" { + if n.addrSpace, err = n.deriveAddressSpace(); err != nil { + return err + } + } + + err = n.ipamAllocateVersion(4, ipam) + if err != nil { + return err + } + + defer func() { + if err != nil { + n.ipamReleaseVersion(4, ipam) + } + }() + + if !n.enableIPv6 { + return nil + } + + err = n.ipamAllocateVersion(6, ipam) + return err +} + +func (n *network) requestPoolHelper(ipam ipamapi.Ipam, addressSpace, preferredPool, subPool string, options map[string]string, v6 bool) (string, *net.IPNet, map[string]string, error) { + for { + poolID, pool, meta, err := ipam.RequestPool(addressSpace, preferredPool, subPool, options, v6) + if err != nil { + return "", nil, nil, err + } + + // If the network belongs to global scope or the pool was + // explicitly chosen or it is invalid, do not perform the overlap check. + if n.Scope() == datastore.GlobalScope || preferredPool != "" || !types.IsIPNetValid(pool) { + return poolID, pool, meta, nil + } + + // Check for overlap and if none found, we have found the right pool. + if _, err := netutils.FindAvailableNetwork([]*net.IPNet{pool}); err == nil { + return poolID, pool, meta, nil + } + + // Pool obtained in this iteration is + // overlapping. Hold onto the pool and don't release + // it yet, because we don't want ipam to give us back + // the same pool over again. But make sure we still do + // a deferred release when we have either obtained a + // non-overlapping pool or ran out of pre-defined + // pools. + defer func() { + if err := ipam.ReleasePool(poolID); err != nil { + logrus.Warnf("Failed to release overlapping pool %s while returning from pool request helper for network %s", pool, n.Name()) + } + }() + + // If this is a preferred pool request and the network + // is local scope and there is an overlap, we fail the + // network creation right here. The pool will be + // released in the defer. + if preferredPool != "" { + return "", nil, nil, fmt.Errorf("requested subnet %s overlaps in the host", preferredPool) + } + } +} + +func (n *network) ipamAllocateVersion(ipVer int, ipam ipamapi.Ipam) error { + var ( + cfgList *[]*IpamConf + infoList *[]*IpamInfo + err error + ) + + switch ipVer { + case 4: + cfgList = &n.ipamV4Config + infoList = &n.ipamV4Info + case 6: + cfgList = &n.ipamV6Config + infoList = &n.ipamV6Info + default: + return types.InternalErrorf("incorrect ip version passed to ipam allocate: %d", ipVer) + } + + if len(*cfgList) == 0 { + *cfgList = []*IpamConf{{}} + } + + *infoList = make([]*IpamInfo, len(*cfgList)) + + logrus.Debugf("Allocating IPv%d pools for network %s (%s)", ipVer, n.Name(), n.ID()) + + for i, cfg := range *cfgList { + if err = cfg.Validate(); err != nil { + return err + } + d := &IpamInfo{} + (*infoList)[i] = d + + d.AddressSpace = n.addrSpace + d.PoolID, d.Pool, d.Meta, err = n.requestPoolHelper(ipam, n.addrSpace, cfg.PreferredPool, cfg.SubPool, n.ipamOptions, ipVer == 6) + if err != nil { + return err + } + + defer func() { + if err != nil { + if err := ipam.ReleasePool(d.PoolID); err != nil { + logrus.Warnf("Failed to release address pool %s after failure to create network %s (%s)", d.PoolID, n.Name(), n.ID()) + } + } + }() + + if gws, ok := d.Meta[netlabel.Gateway]; ok { + if d.Gateway, err = types.ParseCIDR(gws); err != nil { + return types.BadRequestErrorf("failed to parse gateway address (%v) returned by ipam driver: %v", gws, err) + } + } + + // If user requested a specific gateway, libnetwork will allocate it + // irrespective of whether ipam driver returned a gateway already. + // If none of the above is true, libnetwork will allocate one. + if cfg.Gateway != "" || d.Gateway == nil { + var gatewayOpts = map[string]string{ + ipamapi.RequestAddressType: netlabel.Gateway, + } + if d.Gateway, _, err = ipam.RequestAddress(d.PoolID, net.ParseIP(cfg.Gateway), gatewayOpts); err != nil { + return types.InternalErrorf("failed to allocate gateway (%v): %v", cfg.Gateway, err) + } + } + + // Auxiliary addresses must be part of the master address pool + // If they fall into the container addressable pool, libnetwork will reserve them + if cfg.AuxAddresses != nil { + var ip net.IP + d.IPAMData.AuxAddresses = make(map[string]*net.IPNet, len(cfg.AuxAddresses)) + for k, v := range cfg.AuxAddresses { + if ip = net.ParseIP(v); ip == nil { + return types.BadRequestErrorf("non parsable secondary ip address (%s:%s) passed for network %s", k, v, n.Name()) + } + if !d.Pool.Contains(ip) { + return types.ForbiddenErrorf("auxilairy address: (%s:%s) must belong to the master pool: %s", k, v, d.Pool) + } + // Attempt reservation in the container addressable pool, silent the error if address does not belong to that pool + if d.IPAMData.AuxAddresses[k], _, err = ipam.RequestAddress(d.PoolID, ip, nil); err != nil && err != ipamapi.ErrIPOutOfRange { + return types.InternalErrorf("failed to allocate secondary ip address (%s:%s): %v", k, v, err) + } + } + } + } + + return nil +} + +func (n *network) ipamRelease() { + if n.hasSpecialDriver() { + return + } + ipam, _, err := n.getController().getIPAMDriver(n.ipamType) + if err != nil { + logrus.Warnf("Failed to retrieve ipam driver to release address pool(s) on delete of network %s (%s): %v", n.Name(), n.ID(), err) + return + } + n.ipamReleaseVersion(4, ipam) + n.ipamReleaseVersion(6, ipam) +} + +func (n *network) ipamReleaseVersion(ipVer int, ipam ipamapi.Ipam) { + var infoList *[]*IpamInfo + + switch ipVer { + case 4: + infoList = &n.ipamV4Info + case 6: + infoList = &n.ipamV6Info + default: + logrus.Warnf("incorrect ip version passed to ipam release: %d", ipVer) + return + } + + if len(*infoList) == 0 { + return + } + + logrus.Debugf("releasing IPv%d pools from network %s (%s)", ipVer, n.Name(), n.ID()) + + for _, d := range *infoList { + if d.Gateway != nil { + if err := ipam.ReleaseAddress(d.PoolID, d.Gateway.IP); err != nil { + logrus.Warnf("Failed to release gateway ip address %s on delete of network %s (%s): %v", d.Gateway.IP, n.Name(), n.ID(), err) + } + } + if d.IPAMData.AuxAddresses != nil { + for k, nw := range d.IPAMData.AuxAddresses { + if d.Pool.Contains(nw.IP) { + if err := ipam.ReleaseAddress(d.PoolID, nw.IP); err != nil && err != ipamapi.ErrIPOutOfRange { + logrus.Warnf("Failed to release secondary ip address %s (%v) on delete of network %s (%s): %v", k, nw.IP, n.Name(), n.ID(), err) + } + } + } + } + if err := ipam.ReleasePool(d.PoolID); err != nil { + logrus.Warnf("Failed to release address pool %s on delete of network %s (%s): %v", d.PoolID, n.Name(), n.ID(), err) + } + } + + *infoList = nil +} + +func (n *network) getIPInfo(ipVer int) []*IpamInfo { + var info []*IpamInfo + switch ipVer { + case 4: + info = n.ipamV4Info + case 6: + info = n.ipamV6Info + default: + return nil + } + l := make([]*IpamInfo, 0, len(info)) + n.Lock() + l = append(l, info...) + n.Unlock() + return l +} + +func (n *network) getIPData(ipVer int) []driverapi.IPAMData { + var info []*IpamInfo + switch ipVer { + case 4: + info = n.ipamV4Info + case 6: + info = n.ipamV6Info + default: + return nil + } + l := make([]driverapi.IPAMData, 0, len(info)) + n.Lock() + for _, d := range info { + l = append(l, d.IPAMData) + } + n.Unlock() + return l +} + +func (n *network) deriveAddressSpace() (string, error) { + local, global, err := n.getController().drvRegistry.IPAMDefaultAddressSpaces(n.ipamType) + if err != nil { + return "", types.NotFoundErrorf("failed to get default address space: %v", err) + } + if n.DataScope() == datastore.GlobalScope { + return global, nil + } + return local, nil +} + +func (n *network) Info() NetworkInfo { + return n +} + +func (n *network) Peers() []networkdb.PeerInfo { + if !n.Dynamic() { + return []networkdb.PeerInfo{} + } + + agent := n.getController().getAgent() + if agent == nil { + return []networkdb.PeerInfo{} + } + + return agent.networkDB.Peers(n.ID()) +} + +func (n *network) DriverOptions() map[string]string { + n.Lock() + defer n.Unlock() + if n.generic != nil { + if m, ok := n.generic[netlabel.GenericData]; ok { + return m.(map[string]string) + } + } + return map[string]string{} +} + +func (n *network) Scope() string { + n.Lock() + defer n.Unlock() + return n.scope +} + +func (n *network) IpamConfig() (string, map[string]string, []*IpamConf, []*IpamConf) { + n.Lock() + defer n.Unlock() + + v4L := make([]*IpamConf, len(n.ipamV4Config)) + v6L := make([]*IpamConf, len(n.ipamV6Config)) + + for i, c := range n.ipamV4Config { + cc := &IpamConf{} + c.CopyTo(cc) + v4L[i] = cc + } + + for i, c := range n.ipamV6Config { + cc := &IpamConf{} + c.CopyTo(cc) + v6L[i] = cc + } + + return n.ipamType, n.ipamOptions, v4L, v6L +} + +func (n *network) IpamInfo() ([]*IpamInfo, []*IpamInfo) { + n.Lock() + defer n.Unlock() + + v4Info := make([]*IpamInfo, len(n.ipamV4Info)) + v6Info := make([]*IpamInfo, len(n.ipamV6Info)) + + for i, info := range n.ipamV4Info { + ic := &IpamInfo{} + info.CopyTo(ic) + v4Info[i] = ic + } + + for i, info := range n.ipamV6Info { + ic := &IpamInfo{} + info.CopyTo(ic) + v6Info[i] = ic + } + + return v4Info, v6Info +} + +func (n *network) Internal() bool { + n.Lock() + defer n.Unlock() + + return n.internal +} + +func (n *network) Attachable() bool { + n.Lock() + defer n.Unlock() + + return n.attachable +} + +func (n *network) Ingress() bool { + n.Lock() + defer n.Unlock() + + return n.ingress +} + +func (n *network) Dynamic() bool { + n.Lock() + defer n.Unlock() + + return n.dynamic +} + +func (n *network) IPv6Enabled() bool { + n.Lock() + defer n.Unlock() + + return n.enableIPv6 +} + +func (n *network) ConfigFrom() string { + n.Lock() + defer n.Unlock() + + return n.configFrom +} + +func (n *network) ConfigOnly() bool { + n.Lock() + defer n.Unlock() + + return n.configOnly +} + +func (n *network) Labels() map[string]string { + n.Lock() + defer n.Unlock() + + var lbls = make(map[string]string, len(n.labels)) + for k, v := range n.labels { + lbls[k] = v + } + + return lbls +} + +func (n *network) TableEventRegister(tableName string, objType driverapi.ObjectType) error { + if !driverapi.IsValidType(objType) { + return fmt.Errorf("invalid object type %v in registering table, %s", objType, tableName) + } + + t := networkDBTable{ + name: tableName, + objType: objType, + } + n.Lock() + defer n.Unlock() + n.driverTables = append(n.driverTables, t) + return nil +} + +// Special drivers are ones which do not need to perform any network plumbing +func (n *network) hasSpecialDriver() bool { + return n.Type() == "host" || n.Type() == "null" +} + +func (n *network) ResolveName(req string, ipType int) ([]net.IP, bool) { + var ipv6Miss bool + + c := n.getController() + c.Lock() + defer c.Unlock() + sr, ok := c.svcRecords[n.ID()] + + if !ok { + return nil, false + } + + req = strings.TrimSuffix(req, ".") + ipSet, ok := sr.svcMap.Get(req) + + if ipType == types.IPv6 { + // If the name resolved to v4 address then its a valid name in + // the docker network domain. If the network is not v6 enabled + // set ipv6Miss to filter the DNS query from going to external + // resolvers. + if ok && !n.enableIPv6 { + ipv6Miss = true + } + ipSet, ok = sr.svcIPv6Map.Get(req) + } + + if ok && len(ipSet) > 0 { + // this map is to avoid IP duplicates, this can happen during a transition period where 2 services are using the same IP + noDup := make(map[string]bool) + var ipLocal []net.IP + for _, ip := range ipSet { + if _, dup := noDup[ip.(svcMapEntry).ip]; !dup { + noDup[ip.(svcMapEntry).ip] = true + ipLocal = append(ipLocal, net.ParseIP(ip.(svcMapEntry).ip)) + } + } + return ipLocal, ok + } + + return nil, ipv6Miss +} + +func (n *network) HandleQueryResp(name string, ip net.IP) { + c := n.getController() + c.Lock() + defer c.Unlock() + sr, ok := c.svcRecords[n.ID()] + + if !ok { + return + } + + ipStr := netutils.ReverseIP(ip.String()) + // If an object with extResolver == true is already in the set this call will fail + // but anyway it means that has already been inserted before + if ok, _ := sr.ipMap.Contains(ipStr, ipInfo{name: name}); ok { + sr.ipMap.Remove(ipStr, ipInfo{name: name}) + sr.ipMap.Insert(ipStr, ipInfo{name: name, extResolver: true}) + } +} + +func (n *network) ResolveIP(ip string) string { + c := n.getController() + c.Lock() + defer c.Unlock() + sr, ok := c.svcRecords[n.ID()] + + if !ok { + return "" + } + + nwName := n.Name() + + elemSet, ok := sr.ipMap.Get(ip) + if !ok || len(elemSet) == 0 { + return "" + } + // NOTE it is possible to have more than one element in the Set, this will happen + // because of interleave of different events from different sources (local container create vs + // network db notifications) + // In such cases the resolution will be based on the first element of the set, and can vary + // during the system stabilitation + elem, ok := elemSet[0].(ipInfo) + if !ok { + setStr, b := sr.ipMap.String(ip) + logrus.Errorf("expected set of ipInfo type for key %s set:%t %s", ip, b, setStr) + return "" + } + + if elem.extResolver { + return "" + } + + return elem.name + "." + nwName +} + +func (n *network) ResolveService(name string) ([]*net.SRV, []net.IP) { + c := n.getController() + + srv := []*net.SRV{} + ip := []net.IP{} + + logrus.Debugf("Service name To resolve: %v", name) + + // There are DNS implementaions that allow SRV queries for names not in + // the format defined by RFC 2782. Hence specific validations checks are + // not done + parts := strings.Split(name, ".") + if len(parts) < 3 { + return nil, nil + } + + portName := parts[0] + proto := parts[1] + svcName := strings.Join(parts[2:], ".") + + c.Lock() + defer c.Unlock() + sr, ok := c.svcRecords[n.ID()] + + if !ok { + return nil, nil + } + + svcs, ok := sr.service[svcName] + if !ok { + return nil, nil + } + + for _, svc := range svcs { + if svc.portName != portName { + continue + } + if svc.proto != proto { + continue + } + for _, t := range svc.target { + srv = append(srv, + &net.SRV{ + Target: t.name, + Port: t.port, + }) + + ip = append(ip, t.ip) + } + } + + return srv, ip +} + +func (n *network) ExecFunc(f func()) error { + return types.NotImplementedErrorf("ExecFunc not supported by network") +} + +func (n *network) NdotsSet() bool { + return false +} + +// config-only network is looked up by name +func (c *controller) getConfigNetwork(name string) (*network, error) { + var n Network + + s := func(current Network) bool { + if current.Info().ConfigOnly() && current.Name() == name { + n = current + return true + } + return false + } + + c.WalkNetworks(s) + + if n == nil { + return nil, types.NotFoundErrorf("configuration network %q not found", name) + } + + return n.(*network), nil +} + +func (n *network) createLoadBalancerSandbox() error { + sandboxName := n.name + "-sbox" + sbOptions := []SandboxOption{} + if n.ingress { + sbOptions = append(sbOptions, OptionIngress()) + } + sb, err := n.ctrlr.NewSandbox(sandboxName, sbOptions...) + if err != nil { + return err + } + defer func() { + if err != nil { + if e := n.ctrlr.SandboxDestroy(sandboxName); e != nil { + logrus.Warnf("could not delete sandbox %s on failure on failure (%v): %v", sandboxName, err, e) + } + } + }() + + endpointName := n.name + "-endpoint" + epOptions := []EndpointOption{ + CreateOptionIpam(n.loadBalancerIP, nil, nil, nil), + CreateOptionLoadBalancer(), + } + ep, err := n.createEndpoint(endpointName, epOptions...) + if err != nil { + return err + } + defer func() { + if err != nil { + if e := ep.Delete(true); e != nil { + logrus.Warnf("could not delete endpoint %s on failure on failure (%v): %v", endpointName, err, e) + } + } + }() + + if err := ep.Join(sb, nil); err != nil { + return err + } + return sb.EnableService() +} + +func (n *network) deleteLoadBalancerSandbox() { + n.Lock() + c := n.ctrlr + name := n.name + n.Unlock() + + endpointName := name + "-endpoint" + sandboxName := name + "-sbox" + + endpoint, err := n.EndpointByName(endpointName) + if err != nil { + logrus.Warnf("Failed to find load balancer endpoint %s on network %s: %v", endpointName, name, err) + } else { + + info := endpoint.Info() + if info != nil { + sb := info.Sandbox() + if sb != nil { + if err := sb.DisableService(); err != nil { + logrus.Warnf("Failed to disable service on sandbox %s: %v", sandboxName, err) + //Ignore error and attempt to delete the load balancer endpoint + } + } + } + + if err := endpoint.Delete(true); err != nil { + logrus.Warnf("Failed to delete endpoint %s (%s) in %s: %v", endpoint.Name(), endpoint.ID(), sandboxName, err) + //Ignore error and attempt to delete the sandbox. + } + } + + if err := c.SandboxDestroy(sandboxName); err != nil { + logrus.Warnf("Failed to delete %s sandbox: %v", sandboxName, err) + } +} diff --git a/vendor/github.com/docker/libnetwork/network_unix.go b/vendor/github.com/docker/libnetwork/network_unix.go new file mode 100644 index 0000000000..585261ece0 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/network_unix.go @@ -0,0 +1,14 @@ +// +build !windows + +package libnetwork + +import "github.com/docker/libnetwork/ipamapi" + +// Stub implementations for DNS related functions + +func (n *network) startResolver() { +} + +func defaultIpamForNetworkType(networkType string) string { + return ipamapi.DefaultIPAM +} diff --git a/vendor/github.com/docker/libnetwork/network_windows.go b/vendor/github.com/docker/libnetwork/network_windows.go new file mode 100644 index 0000000000..e7819e1c3e --- /dev/null +++ b/vendor/github.com/docker/libnetwork/network_windows.go @@ -0,0 +1,75 @@ +// +build windows + +package libnetwork + +import ( + "runtime" + "time" + + "github.com/Microsoft/hcsshim" + "github.com/docker/libnetwork/drivers/windows" + "github.com/docker/libnetwork/ipamapi" + "github.com/docker/libnetwork/ipams/windowsipam" + "github.com/sirupsen/logrus" +) + +func executeInCompartment(compartmentID uint32, x func()) { + runtime.LockOSThread() + + if err := hcsshim.SetCurrentThreadCompartmentId(compartmentID); err != nil { + logrus.Error(err) + } + defer func() { + hcsshim.SetCurrentThreadCompartmentId(0) + runtime.UnlockOSThread() + }() + + x() +} + +func (n *network) startResolver() { + if n.networkType == "ics" { + return + } + n.resolverOnce.Do(func() { + logrus.Debugf("Launching DNS server for network %q", n.Name()) + options := n.Info().DriverOptions() + hnsid := options[windows.HNSID] + + if hnsid == "" { + return + } + + hnsresponse, err := hcsshim.HNSNetworkRequest("GET", hnsid, "") + if err != nil { + logrus.Errorf("Resolver Setup/Start failed for container %s, %q", n.Name(), err) + return + } + + for _, subnet := range hnsresponse.Subnets { + if subnet.GatewayAddress != "" { + for i := 0; i < 3; i++ { + resolver := NewResolver(subnet.GatewayAddress, false, "", n) + logrus.Debugf("Binding a resolver on network %s gateway %s", n.Name(), subnet.GatewayAddress) + executeInCompartment(hnsresponse.DNSServerCompartment, resolver.SetupFunc(53)) + + if err = resolver.Start(); err != nil { + logrus.Errorf("Resolver Setup/Start failed for container %s, %q", n.Name(), err) + time.Sleep(1 * time.Second) + } else { + logrus.Debugf("Resolver bound successfully for network %s", n.Name()) + n.resolver = append(n.resolver, resolver) + break + } + } + } + } + }) +} + +func defaultIpamForNetworkType(networkType string) string { + if windows.IsBuiltinLocalDriver(networkType) { + return windowsipam.DefaultIPAM + } + return ipamapi.DefaultIPAM +} diff --git a/vendor/github.com/docker/libnetwork/networkdb/broadcast.go b/vendor/github.com/docker/libnetwork/networkdb/broadcast.go new file mode 100644 index 0000000000..174023b22b --- /dev/null +++ b/vendor/github.com/docker/libnetwork/networkdb/broadcast.go @@ -0,0 +1,174 @@ +package networkdb + +import ( + "errors" + "time" + + "github.com/hashicorp/memberlist" + "github.com/hashicorp/serf/serf" +) + +const broadcastTimeout = 5 * time.Second + +type networkEventMessage struct { + id string + node string + msg []byte +} + +func (m *networkEventMessage) Invalidates(other memberlist.Broadcast) bool { + otherm := other.(*networkEventMessage) + return m.id == otherm.id && m.node == otherm.node +} + +func (m *networkEventMessage) Message() []byte { + return m.msg +} + +func (m *networkEventMessage) Finished() { +} + +func (nDB *NetworkDB) sendNetworkEvent(nid string, event NetworkEvent_Type, ltime serf.LamportTime) error { + nEvent := NetworkEvent{ + Type: event, + LTime: ltime, + NodeName: nDB.config.NodeID, + NetworkID: nid, + } + + raw, err := encodeMessage(MessageTypeNetworkEvent, &nEvent) + if err != nil { + return err + } + + nDB.networkBroadcasts.QueueBroadcast(&networkEventMessage{ + msg: raw, + id: nid, + node: nDB.config.NodeID, + }) + return nil +} + +type nodeEventMessage struct { + msg []byte + notify chan<- struct{} +} + +func (m *nodeEventMessage) Invalidates(other memberlist.Broadcast) bool { + return false +} + +func (m *nodeEventMessage) Message() []byte { + return m.msg +} + +func (m *nodeEventMessage) Finished() { + if m.notify != nil { + close(m.notify) + } +} + +func (nDB *NetworkDB) sendNodeEvent(event NodeEvent_Type) error { + nEvent := NodeEvent{ + Type: event, + LTime: nDB.networkClock.Increment(), + NodeName: nDB.config.NodeID, + } + + raw, err := encodeMessage(MessageTypeNodeEvent, &nEvent) + if err != nil { + return err + } + + notifyCh := make(chan struct{}) + nDB.nodeBroadcasts.QueueBroadcast(&nodeEventMessage{ + msg: raw, + notify: notifyCh, + }) + + nDB.RLock() + noPeers := len(nDB.nodes) <= 1 + nDB.RUnlock() + + // Message enqueued, do not wait for a send if no peer is present + if noPeers { + return nil + } + + // Wait for the broadcast + select { + case <-notifyCh: + case <-time.After(broadcastTimeout): + return errors.New("timed out broadcasting node event") + } + + return nil +} + +type tableEventMessage struct { + id string + tname string + key string + msg []byte + node string +} + +func (m *tableEventMessage) Invalidates(other memberlist.Broadcast) bool { + otherm := other.(*tableEventMessage) + return m.tname == otherm.tname && m.id == otherm.id && m.key == otherm.key +} + +func (m *tableEventMessage) Message() []byte { + return m.msg +} + +func (m *tableEventMessage) Finished() { +} + +func (nDB *NetworkDB) sendTableEvent(event TableEvent_Type, nid string, tname string, key string, entry *entry) error { + tEvent := TableEvent{ + Type: event, + LTime: entry.ltime, + NodeName: nDB.config.NodeID, + NetworkID: nid, + TableName: tname, + Key: key, + Value: entry.value, + // The duration in second is a float that below would be truncated + ResidualReapTime: int32(entry.reapTime.Seconds()), + } + + raw, err := encodeMessage(MessageTypeTableEvent, &tEvent) + if err != nil { + return err + } + + var broadcastQ *memberlist.TransmitLimitedQueue + nDB.RLock() + thisNodeNetworks, ok := nDB.networks[nDB.config.NodeID] + if ok { + // The network may have been removed + network, networkOk := thisNodeNetworks[nid] + if !networkOk { + nDB.RUnlock() + return nil + } + + broadcastQ = network.tableBroadcasts + } + nDB.RUnlock() + + // The network may have been removed + if broadcastQ == nil { + return nil + } + + broadcastQ.QueueBroadcast(&tableEventMessage{ + msg: raw, + id: nid, + tname: tname, + key: key, + node: nDB.config.NodeID, + }) + return nil +} diff --git a/vendor/github.com/docker/libnetwork/networkdb/cluster.go b/vendor/github.com/docker/libnetwork/networkdb/cluster.go new file mode 100644 index 0000000000..bd48fb9f18 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/networkdb/cluster.go @@ -0,0 +1,731 @@ +package networkdb + +import ( + "bytes" + "context" + "crypto/rand" + "encoding/hex" + "fmt" + "log" + "math/big" + rnd "math/rand" + "net" + "strings" + "time" + + "github.com/hashicorp/memberlist" + "github.com/sirupsen/logrus" +) + +const ( + reapPeriod = 5 * time.Second + rejoinClusterDuration = 10 * time.Second + rejoinInterval = 60 * time.Second + retryInterval = 1 * time.Second + nodeReapInterval = 24 * time.Hour + nodeReapPeriod = 2 * time.Hour +) + +type logWriter struct{} + +func (l *logWriter) Write(p []byte) (int, error) { + str := string(p) + str = strings.TrimSuffix(str, "\n") + + switch { + case strings.HasPrefix(str, "[WARN] "): + str = strings.TrimPrefix(str, "[WARN] ") + logrus.Warn(str) + case strings.HasPrefix(str, "[DEBUG] "): + str = strings.TrimPrefix(str, "[DEBUG] ") + logrus.Debug(str) + case strings.HasPrefix(str, "[INFO] "): + str = strings.TrimPrefix(str, "[INFO] ") + logrus.Info(str) + case strings.HasPrefix(str, "[ERR] "): + str = strings.TrimPrefix(str, "[ERR] ") + logrus.Warn(str) + } + + return len(p), nil +} + +// SetKey adds a new key to the key ring +func (nDB *NetworkDB) SetKey(key []byte) { + logrus.Debugf("Adding key %s", hex.EncodeToString(key)[0:5]) + nDB.Lock() + defer nDB.Unlock() + for _, dbKey := range nDB.config.Keys { + if bytes.Equal(key, dbKey) { + return + } + } + nDB.config.Keys = append(nDB.config.Keys, key) + if nDB.keyring != nil { + nDB.keyring.AddKey(key) + } +} + +// SetPrimaryKey sets the given key as the primary key. This should have +// been added apriori through SetKey +func (nDB *NetworkDB) SetPrimaryKey(key []byte) { + logrus.Debugf("Primary Key %s", hex.EncodeToString(key)[0:5]) + nDB.RLock() + defer nDB.RUnlock() + for _, dbKey := range nDB.config.Keys { + if bytes.Equal(key, dbKey) { + if nDB.keyring != nil { + nDB.keyring.UseKey(dbKey) + } + break + } + } +} + +// RemoveKey removes a key from the key ring. The key being removed +// can't be the primary key +func (nDB *NetworkDB) RemoveKey(key []byte) { + logrus.Debugf("Remove Key %s", hex.EncodeToString(key)[0:5]) + nDB.Lock() + defer nDB.Unlock() + for i, dbKey := range nDB.config.Keys { + if bytes.Equal(key, dbKey) { + nDB.config.Keys = append(nDB.config.Keys[:i], nDB.config.Keys[i+1:]...) + if nDB.keyring != nil { + nDB.keyring.RemoveKey(dbKey) + } + break + } + } +} + +func (nDB *NetworkDB) clusterInit() error { + nDB.lastStatsTimestamp = time.Now() + nDB.lastHealthTimestamp = nDB.lastStatsTimestamp + + config := memberlist.DefaultLANConfig() + config.Name = nDB.config.NodeID + config.BindAddr = nDB.config.BindAddr + config.AdvertiseAddr = nDB.config.AdvertiseAddr + config.UDPBufferSize = nDB.config.PacketBufferSize + + if nDB.config.BindPort != 0 { + config.BindPort = nDB.config.BindPort + } + + config.ProtocolVersion = memberlist.ProtocolVersion2Compatible + config.Delegate = &delegate{nDB: nDB} + config.Events = &eventDelegate{nDB: nDB} + // custom logger that does not add time or date, so they are not + // duplicated by logrus + config.Logger = log.New(&logWriter{}, "", 0) + + var err error + if len(nDB.config.Keys) > 0 { + for i, key := range nDB.config.Keys { + logrus.Debugf("Encryption key %d: %s", i+1, hex.EncodeToString(key)[0:5]) + } + nDB.keyring, err = memberlist.NewKeyring(nDB.config.Keys, nDB.config.Keys[0]) + if err != nil { + return err + } + config.Keyring = nDB.keyring + } + + nDB.networkBroadcasts = &memberlist.TransmitLimitedQueue{ + NumNodes: func() int { + nDB.RLock() + num := len(nDB.nodes) + nDB.RUnlock() + return num + }, + RetransmitMult: config.RetransmitMult, + } + + nDB.nodeBroadcasts = &memberlist.TransmitLimitedQueue{ + NumNodes: func() int { + nDB.RLock() + num := len(nDB.nodes) + nDB.RUnlock() + return num + }, + RetransmitMult: config.RetransmitMult, + } + + mlist, err := memberlist.Create(config) + if err != nil { + return fmt.Errorf("failed to create memberlist: %v", err) + } + + nDB.ctx, nDB.cancelCtx = context.WithCancel(context.Background()) + nDB.memberlist = mlist + + for _, trigger := range []struct { + interval time.Duration + fn func() + }{ + {reapPeriod, nDB.reapState}, + {config.GossipInterval, nDB.gossip}, + {config.PushPullInterval, nDB.bulkSyncTables}, + {retryInterval, nDB.reconnectNode}, + {nodeReapPeriod, nDB.reapDeadNode}, + {rejoinInterval, nDB.rejoinClusterBootStrap}, + } { + t := time.NewTicker(trigger.interval) + go nDB.triggerFunc(trigger.interval, t.C, trigger.fn) + nDB.tickers = append(nDB.tickers, t) + } + + return nil +} + +func (nDB *NetworkDB) retryJoin(ctx context.Context, members []string) { + t := time.NewTicker(retryInterval) + defer t.Stop() + + for { + select { + case <-t.C: + if _, err := nDB.memberlist.Join(members); err != nil { + logrus.Errorf("Failed to join memberlist %s on retry: %v", members, err) + continue + } + if err := nDB.sendNodeEvent(NodeEventTypeJoin); err != nil { + logrus.Errorf("failed to send node join on retry: %v", err) + continue + } + return + case <-ctx.Done(): + return + } + } + +} + +func (nDB *NetworkDB) clusterJoin(members []string) error { + mlist := nDB.memberlist + + if _, err := mlist.Join(members); err != nil { + // In case of failure, we no longer need to explicitly call retryJoin. + // rejoinClusterBootStrap, which runs every minute, will retryJoin for 10sec + return fmt.Errorf("could not join node to memberlist: %v", err) + } + + if err := nDB.sendNodeEvent(NodeEventTypeJoin); err != nil { + return fmt.Errorf("failed to send node join: %v", err) + } + + return nil +} + +func (nDB *NetworkDB) clusterLeave() error { + mlist := nDB.memberlist + + if err := nDB.sendNodeEvent(NodeEventTypeLeave); err != nil { + logrus.Errorf("failed to send node leave: %v", err) + } + + if err := mlist.Leave(time.Second); err != nil { + return err + } + + // cancel the context + nDB.cancelCtx() + + for _, t := range nDB.tickers { + t.Stop() + } + + return mlist.Shutdown() +} + +func (nDB *NetworkDB) triggerFunc(stagger time.Duration, C <-chan time.Time, f func()) { + // Use a random stagger to avoid syncronizing + randStagger := time.Duration(uint64(rnd.Int63()) % uint64(stagger)) + select { + case <-time.After(randStagger): + case <-nDB.ctx.Done(): + return + } + for { + select { + case <-C: + f() + case <-nDB.ctx.Done(): + return + } + } +} + +func (nDB *NetworkDB) reapDeadNode() { + nDB.Lock() + defer nDB.Unlock() + for _, nodeMap := range []map[string]*node{ + nDB.failedNodes, + nDB.leftNodes, + } { + for id, n := range nodeMap { + if n.reapTime > nodeReapPeriod { + n.reapTime -= nodeReapPeriod + continue + } + logrus.Debugf("Garbage collect node %v", n.Name) + delete(nodeMap, id) + } + } +} + +// rejoinClusterBootStrap is called periodically to check if all bootStrap nodes are active in the cluster, +// if not, call the cluster join to merge 2 separate clusters that are formed when all managers +// stopped/started at the same time +func (nDB *NetworkDB) rejoinClusterBootStrap() { + nDB.RLock() + if len(nDB.bootStrapIP) == 0 { + nDB.RUnlock() + return + } + + bootStrapIPs := make([]string, 0, len(nDB.bootStrapIP)) + for _, bootIP := range nDB.bootStrapIP { + for _, node := range nDB.nodes { + if node.Addr.Equal(bootIP) { + // One of the bootstrap nodes is part of the cluster, return + nDB.RUnlock() + return + } + } + bootStrapIPs = append(bootStrapIPs, bootIP.String()) + } + nDB.RUnlock() + // None of the bootStrap nodes are in the cluster, call memberlist join + logrus.Debugf("rejoinClusterBootStrap, calling cluster join with bootStrap %v", bootStrapIPs) + ctx, cancel := context.WithTimeout(nDB.ctx, rejoinClusterDuration) + defer cancel() + nDB.retryJoin(ctx, bootStrapIPs) +} + +func (nDB *NetworkDB) reconnectNode() { + nDB.RLock() + if len(nDB.failedNodes) == 0 { + nDB.RUnlock() + return + } + + nodes := make([]*node, 0, len(nDB.failedNodes)) + for _, n := range nDB.failedNodes { + nodes = append(nodes, n) + } + nDB.RUnlock() + + node := nodes[randomOffset(len(nodes))] + addr := net.UDPAddr{IP: node.Addr, Port: int(node.Port)} + + if _, err := nDB.memberlist.Join([]string{addr.String()}); err != nil { + return + } + + if err := nDB.sendNodeEvent(NodeEventTypeJoin); err != nil { + return + } + + logrus.Debugf("Initiating bulk sync with node %s after reconnect", node.Name) + nDB.bulkSync([]string{node.Name}, true) +} + +// For timing the entry deletion in the repaer APIs that doesn't use monotonic clock +// source (time.Now, Sub etc.) should be avoided. Hence we use reapTime in every +// entry which is set initially to reapInterval and decremented by reapPeriod every time +// the reaper runs. NOTE nDB.reapTableEntries updates the reapTime with a readlock. This +// is safe as long as no other concurrent path touches the reapTime field. +func (nDB *NetworkDB) reapState() { + // The reapTableEntries leverage the presence of the network so garbage collect entries first + nDB.reapTableEntries() + nDB.reapNetworks() +} + +func (nDB *NetworkDB) reapNetworks() { + nDB.Lock() + for _, nn := range nDB.networks { + for id, n := range nn { + if n.leaving { + if n.reapTime <= 0 { + delete(nn, id) + continue + } + n.reapTime -= reapPeriod + } + } + } + nDB.Unlock() +} + +func (nDB *NetworkDB) reapTableEntries() { + var nodeNetworks []string + // This is best effort, if the list of network changes will be picked up in the next cycle + nDB.RLock() + for nid := range nDB.networks[nDB.config.NodeID] { + nodeNetworks = append(nodeNetworks, nid) + } + nDB.RUnlock() + + cycleStart := time.Now() + // In order to avoid blocking the database for a long time, apply the garbage collection logic by network + // The lock is taken at the beginning of the cycle and the deletion is inline + for _, nid := range nodeNetworks { + nDB.Lock() + nDB.indexes[byNetwork].WalkPrefix(fmt.Sprintf("/%s", nid), func(path string, v interface{}) bool { + // timeCompensation compensate in case the lock took some time to be released + timeCompensation := time.Since(cycleStart) + entry, ok := v.(*entry) + if !ok || !entry.deleting { + return false + } + + // In this check we are adding an extra 1 second to guarantee that when the number is truncated to int32 to fit the packet + // for the tableEvent the number is always strictly > 1 and never 0 + if entry.reapTime > reapPeriod+timeCompensation+time.Second { + entry.reapTime -= reapPeriod + timeCompensation + return false + } + + params := strings.Split(path[1:], "/") + nid := params[0] + tname := params[1] + key := params[2] + + okTable, okNetwork := nDB.deleteEntry(nid, tname, key) + if !okTable { + logrus.Errorf("Table tree delete failed, entry with key:%s does not exists in the table:%s network:%s", key, tname, nid) + } + if !okNetwork { + logrus.Errorf("Network tree delete failed, entry with key:%s does not exists in the network:%s table:%s", key, nid, tname) + } + + return false + }) + nDB.Unlock() + } +} + +func (nDB *NetworkDB) gossip() { + networkNodes := make(map[string][]string) + nDB.RLock() + thisNodeNetworks := nDB.networks[nDB.config.NodeID] + for nid := range thisNodeNetworks { + networkNodes[nid] = nDB.networkNodes[nid] + } + printStats := time.Since(nDB.lastStatsTimestamp) >= nDB.config.StatsPrintPeriod + printHealth := time.Since(nDB.lastHealthTimestamp) >= nDB.config.HealthPrintPeriod + nDB.RUnlock() + + if printHealth { + healthScore := nDB.memberlist.GetHealthScore() + if healthScore != 0 { + logrus.Warnf("NetworkDB stats %v(%v) - healthscore:%d (connectivity issues)", nDB.config.Hostname, nDB.config.NodeID, healthScore) + } + nDB.lastHealthTimestamp = time.Now() + } + + for nid, nodes := range networkNodes { + mNodes := nDB.mRandomNodes(3, nodes) + bytesAvail := nDB.config.PacketBufferSize - compoundHeaderOverhead + + nDB.RLock() + network, ok := thisNodeNetworks[nid] + nDB.RUnlock() + if !ok || network == nil { + // It is normal for the network to be removed + // between the time we collect the network + // attachments of this node and processing + // them here. + continue + } + + broadcastQ := network.tableBroadcasts + + if broadcastQ == nil { + logrus.Errorf("Invalid broadcastQ encountered while gossiping for network %s", nid) + continue + } + + msgs := broadcastQ.GetBroadcasts(compoundOverhead, bytesAvail) + // Collect stats and print the queue info, note this code is here also to have a view of the queues empty + network.qMessagesSent += len(msgs) + if printStats { + logrus.Infof("NetworkDB stats %v(%v) - netID:%s leaving:%t netPeers:%d entries:%d Queue qLen:%d netMsg/s:%d", + nDB.config.Hostname, nDB.config.NodeID, + nid, network.leaving, broadcastQ.NumNodes(), network.entriesNumber, broadcastQ.NumQueued(), + network.qMessagesSent/int((nDB.config.StatsPrintPeriod/time.Second))) + network.qMessagesSent = 0 + } + + if len(msgs) == 0 { + continue + } + + // Create a compound message + compound := makeCompoundMessage(msgs) + + for _, node := range mNodes { + nDB.RLock() + mnode := nDB.nodes[node] + nDB.RUnlock() + + if mnode == nil { + break + } + + // Send the compound message + if err := nDB.memberlist.SendBestEffort(&mnode.Node, compound); err != nil { + logrus.Errorf("Failed to send gossip to %s: %s", mnode.Addr, err) + } + } + } + // Reset the stats + if printStats { + nDB.lastStatsTimestamp = time.Now() + } +} + +func (nDB *NetworkDB) bulkSyncTables() { + var networks []string + nDB.RLock() + for nid, network := range nDB.networks[nDB.config.NodeID] { + if network.leaving { + continue + } + networks = append(networks, nid) + } + nDB.RUnlock() + + for { + if len(networks) == 0 { + break + } + + nid := networks[0] + networks = networks[1:] + + nDB.RLock() + nodes := nDB.networkNodes[nid] + nDB.RUnlock() + + // No peer nodes on this network. Move on. + if len(nodes) == 0 { + continue + } + + completed, err := nDB.bulkSync(nodes, false) + if err != nil { + logrus.Errorf("periodic bulk sync failure for network %s: %v", nid, err) + continue + } + + // Remove all the networks for which we have + // successfully completed bulk sync in this iteration. + updatedNetworks := make([]string, 0, len(networks)) + for _, nid := range networks { + var found bool + for _, completedNid := range completed { + if nid == completedNid { + found = true + break + } + } + + if !found { + updatedNetworks = append(updatedNetworks, nid) + } + } + + networks = updatedNetworks + } +} + +func (nDB *NetworkDB) bulkSync(nodes []string, all bool) ([]string, error) { + if !all { + // Get 2 random nodes. 2nd node will be tried if the bulk sync to + // 1st node fails. + nodes = nDB.mRandomNodes(2, nodes) + } + + if len(nodes) == 0 { + return nil, nil + } + + var err error + var networks []string + for _, node := range nodes { + if node == nDB.config.NodeID { + continue + } + logrus.Debugf("%v(%v): Initiating bulk sync with node %v", nDB.config.Hostname, nDB.config.NodeID, node) + networks = nDB.findCommonNetworks(node) + err = nDB.bulkSyncNode(networks, node, true) + // if its periodic bulksync stop after the first successful sync + if !all && err == nil { + break + } + if err != nil { + err = fmt.Errorf("bulk sync to node %s failed: %v", node, err) + logrus.Warn(err.Error()) + } + } + + if err != nil { + return nil, err + } + + return networks, nil +} + +// Bulk sync all the table entries belonging to a set of networks to a +// single peer node. It can be unsolicited or can be in response to an +// unsolicited bulk sync +func (nDB *NetworkDB) bulkSyncNode(networks []string, node string, unsolicited bool) error { + var msgs [][]byte + + var unsolMsg string + if unsolicited { + unsolMsg = "unsolicited" + } + + logrus.Debugf("%v(%v): Initiating %s bulk sync for networks %v with node %s", + nDB.config.Hostname, nDB.config.NodeID, unsolMsg, networks, node) + + nDB.RLock() + mnode := nDB.nodes[node] + if mnode == nil { + nDB.RUnlock() + return nil + } + + for _, nid := range networks { + nDB.indexes[byNetwork].WalkPrefix(fmt.Sprintf("/%s", nid), func(path string, v interface{}) bool { + entry, ok := v.(*entry) + if !ok { + return false + } + + eType := TableEventTypeCreate + if entry.deleting { + eType = TableEventTypeDelete + } + + params := strings.Split(path[1:], "/") + tEvent := TableEvent{ + Type: eType, + LTime: entry.ltime, + NodeName: entry.node, + NetworkID: nid, + TableName: params[1], + Key: params[2], + Value: entry.value, + // The duration in second is a float that below would be truncated + ResidualReapTime: int32(entry.reapTime.Seconds()), + } + + msg, err := encodeMessage(MessageTypeTableEvent, &tEvent) + if err != nil { + logrus.Errorf("Encode failure during bulk sync: %#v", tEvent) + return false + } + + msgs = append(msgs, msg) + return false + }) + } + nDB.RUnlock() + + // Create a compound message + compound := makeCompoundMessage(msgs) + + bsm := BulkSyncMessage{ + LTime: nDB.tableClock.Time(), + Unsolicited: unsolicited, + NodeName: nDB.config.NodeID, + Networks: networks, + Payload: compound, + } + + buf, err := encodeMessage(MessageTypeBulkSync, &bsm) + if err != nil { + return fmt.Errorf("failed to encode bulk sync message: %v", err) + } + + nDB.Lock() + ch := make(chan struct{}) + nDB.bulkSyncAckTbl[node] = ch + nDB.Unlock() + + err = nDB.memberlist.SendReliable(&mnode.Node, buf) + if err != nil { + nDB.Lock() + delete(nDB.bulkSyncAckTbl, node) + nDB.Unlock() + + return fmt.Errorf("failed to send a TCP message during bulk sync: %v", err) + } + + // Wait on a response only if it is unsolicited. + if unsolicited { + startTime := time.Now() + t := time.NewTimer(30 * time.Second) + select { + case <-t.C: + logrus.Errorf("Bulk sync to node %s timed out", node) + case <-ch: + logrus.Debugf("%v(%v): Bulk sync to node %s took %s", nDB.config.Hostname, nDB.config.NodeID, node, time.Since(startTime)) + } + t.Stop() + } + + return nil +} + +// Returns a random offset between 0 and n +func randomOffset(n int) int { + if n == 0 { + return 0 + } + + val, err := rand.Int(rand.Reader, big.NewInt(int64(n))) + if err != nil { + logrus.Errorf("Failed to get a random offset: %v", err) + return 0 + } + + return int(val.Int64()) +} + +// mRandomNodes is used to select up to m random nodes. It is possible +// that less than m nodes are returned. +func (nDB *NetworkDB) mRandomNodes(m int, nodes []string) []string { + n := len(nodes) + mNodes := make([]string, 0, m) +OUTER: + // Probe up to 3*n times, with large n this is not necessary + // since k << n, but with small n we want search to be + // exhaustive + for i := 0; i < 3*n && len(mNodes) < m; i++ { + // Get random node + idx := randomOffset(n) + node := nodes[idx] + + if node == nDB.config.NodeID { + continue + } + + // Check if we have this node already + for j := 0; j < len(mNodes); j++ { + if node == mNodes[j] { + continue OUTER + } + } + + // Append the node + mNodes = append(mNodes, node) + } + + return mNodes +} diff --git a/vendor/github.com/docker/libnetwork/networkdb/delegate.go b/vendor/github.com/docker/libnetwork/networkdb/delegate.go new file mode 100644 index 0000000000..9a379fe7cd --- /dev/null +++ b/vendor/github.com/docker/libnetwork/networkdb/delegate.go @@ -0,0 +1,476 @@ +package networkdb + +import ( + "net" + "time" + + "github.com/gogo/protobuf/proto" + "github.com/sirupsen/logrus" +) + +type delegate struct { + nDB *NetworkDB +} + +func (d *delegate) NodeMeta(limit int) []byte { + return []byte{} +} + +func (nDB *NetworkDB) handleNodeEvent(nEvent *NodeEvent) bool { + // Update our local clock if the received messages has newer + // time. + nDB.networkClock.Witness(nEvent.LTime) + + nDB.Lock() + defer nDB.Unlock() + + // check if the node exists + n, _, _ := nDB.findNode(nEvent.NodeName) + if n == nil { + return false + } + + // check if the event is fresh + if n.ltime >= nEvent.LTime { + return false + } + + // If we are here means that the event is fresher and the node is known. Update the laport time + n.ltime = nEvent.LTime + + // If the node is not known from memberlist we cannot process save any state of it else if it actually + // dies we won't receive any notification and we will remain stuck with it + if _, ok := nDB.nodes[nEvent.NodeName]; !ok { + logrus.Error("node: %s is unknown to memberlist", nEvent.NodeName) + return false + } + + switch nEvent.Type { + case NodeEventTypeJoin: + moved, err := nDB.changeNodeState(n.Name, nodeActiveState) + if err != nil { + logrus.WithError(err).Error("unable to find the node to move") + return false + } + if moved { + logrus.Infof("%v(%v): Node join event for %s/%s", nDB.config.Hostname, nDB.config.NodeID, n.Name, n.Addr) + } + return moved + case NodeEventTypeLeave: + moved, err := nDB.changeNodeState(n.Name, nodeLeftState) + if err != nil { + logrus.WithError(err).Error("unable to find the node to move") + return false + } + if moved { + logrus.Infof("%v(%v): Node leave event for %s/%s", nDB.config.Hostname, nDB.config.NodeID, n.Name, n.Addr) + } + return moved + } + + return false +} + +func (nDB *NetworkDB) handleNetworkEvent(nEvent *NetworkEvent) bool { + // Update our local clock if the received messages has newer + // time. + nDB.networkClock.Witness(nEvent.LTime) + + nDB.Lock() + defer nDB.Unlock() + + if nEvent.NodeName == nDB.config.NodeID { + return false + } + + nodeNetworks, ok := nDB.networks[nEvent.NodeName] + if !ok { + // We haven't heard about this node at all. Ignore the leave + if nEvent.Type == NetworkEventTypeLeave { + return false + } + + nodeNetworks = make(map[string]*network) + nDB.networks[nEvent.NodeName] = nodeNetworks + } + + if n, ok := nodeNetworks[nEvent.NetworkID]; ok { + // We have the latest state. Ignore the event + // since it is stale. + if n.ltime >= nEvent.LTime { + return false + } + + n.ltime = nEvent.LTime + n.leaving = nEvent.Type == NetworkEventTypeLeave + if n.leaving { + n.reapTime = nDB.config.reapNetworkInterval + + // The remote node is leaving the network, but not the gossip cluster. + // Mark all its entries in deleted state, this will guarantee that + // if some node bulk sync with us, the deleted state of + // these entries will be propagated. + nDB.deleteNodeNetworkEntries(nEvent.NetworkID, nEvent.NodeName) + } + + if nEvent.Type == NetworkEventTypeLeave { + nDB.deleteNetworkNode(nEvent.NetworkID, nEvent.NodeName) + } else { + nDB.addNetworkNode(nEvent.NetworkID, nEvent.NodeName) + } + + return true + } + + if nEvent.Type == NetworkEventTypeLeave { + return false + } + + // If the node is not known from memberlist we cannot process save any state of it else if it actually + // dies we won't receive any notification and we will remain stuck with it + if _, ok := nDB.nodes[nEvent.NodeName]; !ok { + return false + } + + // This remote network join is being seen the first time. + nodeNetworks[nEvent.NetworkID] = &network{ + id: nEvent.NetworkID, + ltime: nEvent.LTime, + } + + nDB.addNetworkNode(nEvent.NetworkID, nEvent.NodeName) + return true +} + +func (nDB *NetworkDB) handleTableEvent(tEvent *TableEvent) bool { + // Update our local clock if the received messages has newer time. + nDB.tableClock.Witness(tEvent.LTime) + + // Ignore the table events for networks that are in the process of going away + nDB.RLock() + networks := nDB.networks[nDB.config.NodeID] + network, ok := networks[tEvent.NetworkID] + // Check if the owner of the event is still part of the network + nodes := nDB.networkNodes[tEvent.NetworkID] + var nodePresent bool + for _, node := range nodes { + if node == tEvent.NodeName { + nodePresent = true + break + } + } + nDB.RUnlock() + + if !ok || network.leaving || !nodePresent { + // I'm out of the network OR the event owner is not anymore part of the network so do not propagate + return false + } + + nDB.Lock() + e, err := nDB.getEntry(tEvent.TableName, tEvent.NetworkID, tEvent.Key) + if err == nil { + // We have the latest state. Ignore the event + // since it is stale. + if e.ltime >= tEvent.LTime { + nDB.Unlock() + return false + } + } + + e = &entry{ + ltime: tEvent.LTime, + node: tEvent.NodeName, + value: tEvent.Value, + deleting: tEvent.Type == TableEventTypeDelete, + reapTime: time.Duration(tEvent.ResidualReapTime) * time.Second, + } + + // All the entries marked for deletion should have a reapTime set greater than 0 + // This case can happen if the cluster is running different versions of the engine where the old version does not have the + // field. If that is not the case, this can be a BUG + if e.deleting && e.reapTime == 0 { + logrus.Warnf("%v(%v) handleTableEvent object %+v has a 0 reapTime, is the cluster running the same docker engine version?", + nDB.config.Hostname, nDB.config.NodeID, tEvent) + e.reapTime = nDB.config.reapEntryInterval + } + nDB.createOrUpdateEntry(tEvent.NetworkID, tEvent.TableName, tEvent.Key, e) + nDB.Unlock() + + if err != nil && tEvent.Type == TableEventTypeDelete { + // If it is a delete event and we did not have a state for it, don't propagate to the application + // If the residual reapTime is lower or equal to 1/6 of the total reapTime don't bother broadcasting it around + // most likely the cluster is already aware of it, if not who will sync with this node will catch the state too. + // This also avoids that deletion of entries close to their garbage collection ends up circuling around forever + return e.reapTime > nDB.config.reapEntryInterval/6 + } + + var op opType + switch tEvent.Type { + case TableEventTypeCreate: + op = opCreate + case TableEventTypeUpdate: + op = opUpdate + case TableEventTypeDelete: + op = opDelete + } + + nDB.broadcaster.Write(makeEvent(op, tEvent.TableName, tEvent.NetworkID, tEvent.Key, tEvent.Value)) + return true +} + +func (nDB *NetworkDB) handleCompound(buf []byte, isBulkSync bool) { + // Decode the parts + parts, err := decodeCompoundMessage(buf) + if err != nil { + logrus.Errorf("Failed to decode compound request: %v", err) + return + } + + // Handle each message + for _, part := range parts { + nDB.handleMessage(part, isBulkSync) + } +} + +func (nDB *NetworkDB) handleTableMessage(buf []byte, isBulkSync bool) { + var tEvent TableEvent + if err := proto.Unmarshal(buf, &tEvent); err != nil { + logrus.Errorf("Error decoding table event message: %v", err) + return + } + + // Ignore messages that this node generated. + if tEvent.NodeName == nDB.config.NodeID { + return + } + + if rebroadcast := nDB.handleTableEvent(&tEvent); rebroadcast { + var err error + buf, err = encodeRawMessage(MessageTypeTableEvent, buf) + if err != nil { + logrus.Errorf("Error marshalling gossip message for network event rebroadcast: %v", err) + return + } + + nDB.RLock() + n, ok := nDB.networks[nDB.config.NodeID][tEvent.NetworkID] + nDB.RUnlock() + + // if the network is not there anymore, OR we are leaving the network OR the broadcast queue is not present + if !ok || n.leaving || n.tableBroadcasts == nil { + return + } + + n.tableBroadcasts.QueueBroadcast(&tableEventMessage{ + msg: buf, + id: tEvent.NetworkID, + tname: tEvent.TableName, + key: tEvent.Key, + node: tEvent.NodeName, + }) + } +} + +func (nDB *NetworkDB) handleNodeMessage(buf []byte) { + var nEvent NodeEvent + if err := proto.Unmarshal(buf, &nEvent); err != nil { + logrus.Errorf("Error decoding node event message: %v", err) + return + } + + if rebroadcast := nDB.handleNodeEvent(&nEvent); rebroadcast { + var err error + buf, err = encodeRawMessage(MessageTypeNodeEvent, buf) + if err != nil { + logrus.Errorf("Error marshalling gossip message for node event rebroadcast: %v", err) + return + } + + nDB.nodeBroadcasts.QueueBroadcast(&nodeEventMessage{ + msg: buf, + }) + } +} + +func (nDB *NetworkDB) handleNetworkMessage(buf []byte) { + var nEvent NetworkEvent + if err := proto.Unmarshal(buf, &nEvent); err != nil { + logrus.Errorf("Error decoding network event message: %v", err) + return + } + + if rebroadcast := nDB.handleNetworkEvent(&nEvent); rebroadcast { + var err error + buf, err = encodeRawMessage(MessageTypeNetworkEvent, buf) + if err != nil { + logrus.Errorf("Error marshalling gossip message for network event rebroadcast: %v", err) + return + } + + nDB.networkBroadcasts.QueueBroadcast(&networkEventMessage{ + msg: buf, + id: nEvent.NetworkID, + node: nEvent.NodeName, + }) + } +} + +func (nDB *NetworkDB) handleBulkSync(buf []byte) { + var bsm BulkSyncMessage + if err := proto.Unmarshal(buf, &bsm); err != nil { + logrus.Errorf("Error decoding bulk sync message: %v", err) + return + } + + if bsm.LTime > 0 { + nDB.tableClock.Witness(bsm.LTime) + } + + nDB.handleMessage(bsm.Payload, true) + + // Don't respond to a bulk sync which was not unsolicited + if !bsm.Unsolicited { + nDB.Lock() + ch, ok := nDB.bulkSyncAckTbl[bsm.NodeName] + if ok { + close(ch) + delete(nDB.bulkSyncAckTbl, bsm.NodeName) + } + nDB.Unlock() + + return + } + + var nodeAddr net.IP + nDB.RLock() + if node, ok := nDB.nodes[bsm.NodeName]; ok { + nodeAddr = node.Addr + } + nDB.RUnlock() + + if err := nDB.bulkSyncNode(bsm.Networks, bsm.NodeName, false); err != nil { + logrus.Errorf("Error in responding to bulk sync from node %s: %v", nodeAddr, err) + } +} + +func (nDB *NetworkDB) handleMessage(buf []byte, isBulkSync bool) { + mType, data, err := decodeMessage(buf) + if err != nil { + logrus.Errorf("Error decoding gossip message to get message type: %v", err) + return + } + + switch mType { + case MessageTypeNodeEvent: + nDB.handleNodeMessage(data) + case MessageTypeNetworkEvent: + nDB.handleNetworkMessage(data) + case MessageTypeTableEvent: + nDB.handleTableMessage(data, isBulkSync) + case MessageTypeBulkSync: + nDB.handleBulkSync(data) + case MessageTypeCompound: + nDB.handleCompound(data, isBulkSync) + default: + logrus.Errorf("%v(%v): unknown message type %d", nDB.config.Hostname, nDB.config.NodeID, mType) + } +} + +func (d *delegate) NotifyMsg(buf []byte) { + if len(buf) == 0 { + return + } + + d.nDB.handleMessage(buf, false) +} + +func (d *delegate) GetBroadcasts(overhead, limit int) [][]byte { + msgs := d.nDB.networkBroadcasts.GetBroadcasts(overhead, limit) + msgs = append(msgs, d.nDB.nodeBroadcasts.GetBroadcasts(overhead, limit)...) + return msgs +} + +func (d *delegate) LocalState(join bool) []byte { + if join { + // Update all the local node/network state to a new time to + // force update on the node we are trying to rejoin, just in + // case that node has these in leaving state still. This is + // facilitate fast convergence after recovering from a gossip + // failure. + d.nDB.updateLocalNetworkTime() + } + + d.nDB.RLock() + defer d.nDB.RUnlock() + + pp := NetworkPushPull{ + LTime: d.nDB.networkClock.Time(), + NodeName: d.nDB.config.NodeID, + } + + for name, nn := range d.nDB.networks { + for _, n := range nn { + pp.Networks = append(pp.Networks, &NetworkEntry{ + LTime: n.ltime, + NetworkID: n.id, + NodeName: name, + Leaving: n.leaving, + }) + } + } + + buf, err := encodeMessage(MessageTypePushPull, &pp) + if err != nil { + logrus.Errorf("Failed to encode local network state: %v", err) + return nil + } + + return buf +} + +func (d *delegate) MergeRemoteState(buf []byte, isJoin bool) { + if len(buf) == 0 { + logrus.Error("zero byte remote network state received") + return + } + + var gMsg GossipMessage + err := proto.Unmarshal(buf, &gMsg) + if err != nil { + logrus.Errorf("Error unmarshalling push pull message: %v", err) + return + } + + if gMsg.Type != MessageTypePushPull { + logrus.Errorf("Invalid message type %v received from remote", buf[0]) + } + + pp := NetworkPushPull{} + if err := proto.Unmarshal(gMsg.Data, &pp); err != nil { + logrus.Errorf("Failed to decode remote network state: %v", err) + return + } + + nodeEvent := &NodeEvent{ + LTime: pp.LTime, + NodeName: pp.NodeName, + Type: NodeEventTypeJoin, + } + d.nDB.handleNodeEvent(nodeEvent) + + for _, n := range pp.Networks { + nEvent := &NetworkEvent{ + LTime: n.LTime, + NodeName: n.NodeName, + NetworkID: n.NetworkID, + Type: NetworkEventTypeJoin, + } + + if n.Leaving { + nEvent.Type = NetworkEventTypeLeave + } + + d.nDB.handleNetworkEvent(nEvent) + } + +} diff --git a/vendor/github.com/docker/libnetwork/networkdb/event_delegate.go b/vendor/github.com/docker/libnetwork/networkdb/event_delegate.go new file mode 100644 index 0000000000..78ebe0fd9e --- /dev/null +++ b/vendor/github.com/docker/libnetwork/networkdb/event_delegate.go @@ -0,0 +1,72 @@ +package networkdb + +import ( + "encoding/json" + "net" + + "github.com/hashicorp/memberlist" + "github.com/sirupsen/logrus" +) + +type eventDelegate struct { + nDB *NetworkDB +} + +func (e *eventDelegate) broadcastNodeEvent(addr net.IP, op opType) { + value, err := json.Marshal(&NodeAddr{addr}) + if err == nil { + e.nDB.broadcaster.Write(makeEvent(op, NodeTable, "", "", value)) + } else { + logrus.Errorf("Error marshalling node broadcast event %s", addr.String()) + } +} + +func (e *eventDelegate) NotifyJoin(mn *memberlist.Node) { + logrus.Infof("Node %s/%s, joined gossip cluster", mn.Name, mn.Addr) + e.broadcastNodeEvent(mn.Addr, opCreate) + e.nDB.Lock() + defer e.nDB.Unlock() + + // In case the node is rejoining after a failure or leave, + // just add the node back to active + if moved, _ := e.nDB.changeNodeState(mn.Name, nodeActiveState); moved { + return + } + + // Every node has a unique ID + // Check on the base of the IP address if the new node that joined is actually a new incarnation of a previous + // failed or shutdown one + e.nDB.purgeReincarnation(mn) + + e.nDB.nodes[mn.Name] = &node{Node: *mn} + logrus.Infof("Node %s/%s, added to nodes list", mn.Name, mn.Addr) +} + +func (e *eventDelegate) NotifyLeave(mn *memberlist.Node) { + logrus.Infof("Node %s/%s, left gossip cluster", mn.Name, mn.Addr) + e.broadcastNodeEvent(mn.Addr, opDelete) + + e.nDB.Lock() + defer e.nDB.Unlock() + + n, currState, _ := e.nDB.findNode(mn.Name) + if n == nil { + logrus.Errorf("Node %s/%s not found in the node lists", mn.Name, mn.Addr) + return + } + // if the node was active means that did not send the leave cluster message, so it's probable that + // failed. Else would be already in the left list so nothing else has to be done + if currState == nodeActiveState { + moved, err := e.nDB.changeNodeState(mn.Name, nodeFailedState) + if err != nil { + logrus.WithError(err).Errorf("impossible condition, node %s/%s not present in the list", mn.Name, mn.Addr) + return + } + if moved { + logrus.Infof("Node %s/%s, added to failed nodes list", mn.Name, mn.Addr) + } + } +} + +func (e *eventDelegate) NotifyUpdate(n *memberlist.Node) { +} diff --git a/vendor/github.com/docker/libnetwork/networkdb/message.go b/vendor/github.com/docker/libnetwork/networkdb/message.go new file mode 100644 index 0000000000..81a6d832a6 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/networkdb/message.go @@ -0,0 +1,98 @@ +package networkdb + +import "github.com/gogo/protobuf/proto" + +const ( + // Compound message header overhead 1 byte(message type) + 4 + // bytes (num messages) + compoundHeaderOverhead = 5 + + // Overhead for each embedded message in a compound message 4 + // bytes (len of embedded message) + compoundOverhead = 4 +) + +func encodeRawMessage(t MessageType, raw []byte) ([]byte, error) { + gMsg := GossipMessage{ + Type: t, + Data: raw, + } + + buf, err := proto.Marshal(&gMsg) + if err != nil { + return nil, err + } + + return buf, nil +} + +func encodeMessage(t MessageType, msg interface{}) ([]byte, error) { + buf, err := proto.Marshal(msg.(proto.Message)) + if err != nil { + return nil, err + } + + buf, err = encodeRawMessage(t, buf) + if err != nil { + return nil, err + } + + return buf, nil +} + +func decodeMessage(buf []byte) (MessageType, []byte, error) { + var gMsg GossipMessage + + err := proto.Unmarshal(buf, &gMsg) + if err != nil { + return MessageTypeInvalid, nil, err + } + + return gMsg.Type, gMsg.Data, nil +} + +// makeCompoundMessage takes a list of messages and generates +// a single compound message containing all of them +func makeCompoundMessage(msgs [][]byte) []byte { + cMsg := CompoundMessage{} + + cMsg.Messages = make([]*CompoundMessage_SimpleMessage, 0, len(msgs)) + for _, m := range msgs { + cMsg.Messages = append(cMsg.Messages, &CompoundMessage_SimpleMessage{ + Payload: m, + }) + } + + buf, err := proto.Marshal(&cMsg) + if err != nil { + return nil + } + + gMsg := GossipMessage{ + Type: MessageTypeCompound, + Data: buf, + } + + buf, err = proto.Marshal(&gMsg) + if err != nil { + return nil + } + + return buf +} + +// decodeCompoundMessage splits a compound message and returns +// the slices of individual messages. Returns any potential error. +func decodeCompoundMessage(buf []byte) ([][]byte, error) { + var cMsg CompoundMessage + if err := proto.Unmarshal(buf, &cMsg); err != nil { + return nil, err + } + + parts := make([][]byte, 0, len(cMsg.Messages)) + for _, m := range cMsg.Messages { + parts = append(parts, m.Payload) + } + + return parts, nil +} diff --git a/vendor/github.com/docker/libnetwork/networkdb/networkdb.go b/vendor/github.com/docker/libnetwork/networkdb/networkdb.go new file mode 100644 index 0000000000..c433913a46 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/networkdb/networkdb.go @@ -0,0 +1,762 @@ +package networkdb + +//go:generate protoc -I.:../vendor/github.com/gogo/protobuf --gogo_out=import_path=github.com/docker/libnetwork/networkdb,Mgogoproto/gogo.proto=github.com/gogo/protobuf/gogoproto:. networkdb.proto + +import ( + "context" + "fmt" + "net" + "os" + "strings" + "sync" + "time" + + "github.com/armon/go-radix" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/go-events" + "github.com/docker/libnetwork/types" + "github.com/hashicorp/memberlist" + "github.com/hashicorp/serf/serf" + "github.com/sirupsen/logrus" +) + +const ( + byTable int = 1 + iota + byNetwork +) + +// NetworkDB instance drives the networkdb cluster and acts the broker +// for cluster-scoped and network-scoped gossip and watches. +type NetworkDB struct { + // The clocks MUST be the first things + // in this struct due to Golang issue #599. + + // Global lamport clock for node network attach events. + networkClock serf.LamportClock + + // Global lamport clock for table events. + tableClock serf.LamportClock + + sync.RWMutex + + // NetworkDB configuration. + config *Config + + // All the tree index (byTable, byNetwork) that we maintain + // the db. + indexes map[int]*radix.Tree + + // Memberlist we use to drive the cluster. + memberlist *memberlist.Memberlist + + // List of all peer nodes in the cluster not-limited to any + // network. + nodes map[string]*node + + // List of all peer nodes which have failed + failedNodes map[string]*node + + // List of all peer nodes which have left + leftNodes map[string]*node + + // A multi-dimensional map of network/node attachmemts. The + // first key is a node name and the second key is a network ID + // for the network that node is participating in. + networks map[string]map[string]*network + + // A map of nodes which are participating in a given + // network. The key is a network ID. + networkNodes map[string][]string + + // A table of ack channels for every node from which we are + // waiting for an ack. + bulkSyncAckTbl map[string]chan struct{} + + // Broadcast queue for network event gossip. + networkBroadcasts *memberlist.TransmitLimitedQueue + + // Broadcast queue for node event gossip. + nodeBroadcasts *memberlist.TransmitLimitedQueue + + // A central context to stop all go routines running on + // behalf of the NetworkDB instance. + ctx context.Context + cancelCtx context.CancelFunc + + // A central broadcaster for all local watchers watching table + // events. + broadcaster *events.Broadcaster + + // List of all tickers which needed to be stopped when + // cleaning up. + tickers []*time.Ticker + + // Reference to the memberlist's keyring to add & remove keys + keyring *memberlist.Keyring + + // bootStrapIP is the list of IPs that can be used to bootstrap + // the gossip. + bootStrapIP []net.IP + + // lastStatsTimestamp is the last timestamp when the stats got printed + lastStatsTimestamp time.Time + + // lastHealthTimestamp is the last timestamp when the health score got printed + lastHealthTimestamp time.Time +} + +// PeerInfo represents the peer (gossip cluster) nodes of a network +type PeerInfo struct { + Name string + IP string +} + +// PeerClusterInfo represents the peer (gossip cluster) nodes +type PeerClusterInfo struct { + PeerInfo +} + +type node struct { + memberlist.Node + ltime serf.LamportTime + // Number of hours left before the reaper removes the node + reapTime time.Duration +} + +// network describes the node/network attachment. +type network struct { + // Network ID + id string + + // Lamport time for the latest state of the entry. + ltime serf.LamportTime + + // Node leave is in progress. + leaving bool + + // Number of seconds still left before a deleted network entry gets + // removed from networkDB + reapTime time.Duration + + // The broadcast queue for table event gossip. This is only + // initialized for this node's network attachment entries. + tableBroadcasts *memberlist.TransmitLimitedQueue + + // Number of gossip messages sent related to this network during the last stats collection period + qMessagesSent int + + // Number of entries on the network. This value is the sum of all the entries of all the tables of a specific network. + // Its use is for statistics purposes. It keep tracks of database size and is printed per network every StatsPrintPeriod + // interval + entriesNumber int +} + +// Config represents the configuration of the networdb instance and +// can be passed by the caller. +type Config struct { + // NodeID is the node unique identifier of the node when is part of the cluster + NodeID string + + // Hostname is the node hostname. + Hostname string + + // BindAddr is the IP on which networkdb listens. It can be + // 0.0.0.0 to listen on all addresses on the host. + BindAddr string + + // AdvertiseAddr is the node's IP address that we advertise for + // cluster communication. + AdvertiseAddr string + + // BindPort is the local node's port to which we bind to for + // cluster communication. + BindPort int + + // Keys to be added to the Keyring of the memberlist. Key at index + // 0 is the primary key + Keys [][]byte + + // PacketBufferSize is the maximum number of bytes that memberlist will + // put in a packet (this will be for UDP packets by default with a NetTransport). + // A safe value for this is typically 1400 bytes (which is the default). However, + // depending on your network's MTU (Maximum Transmission Unit) you may + // be able to increase this to get more content into each gossip packet. + PacketBufferSize int + + // reapEntryInterval duration of a deleted entry before being garbage collected + reapEntryInterval time.Duration + + // reapNetworkInterval duration of a delted network before being garbage collected + // NOTE this MUST always be higher than reapEntryInterval + reapNetworkInterval time.Duration + + // StatsPrintPeriod the period to use to print queue stats + // Default is 5min + StatsPrintPeriod time.Duration + + // HealthPrintPeriod the period to use to print the health score + // Default is 1min + HealthPrintPeriod time.Duration +} + +// entry defines a table entry +type entry struct { + // node from which this entry was learned. + node string + + // Lamport time for the most recent update to the entry + ltime serf.LamportTime + + // Opaque value store in the entry + value []byte + + // Deleting the entry is in progress. All entries linger in + // the cluster for certain amount of time after deletion. + deleting bool + + // Number of seconds still left before a deleted table entry gets + // removed from networkDB + reapTime time.Duration +} + +// DefaultConfig returns a NetworkDB config with default values +func DefaultConfig() *Config { + hostname, _ := os.Hostname() + return &Config{ + NodeID: stringid.TruncateID(stringid.GenerateRandomID()), + Hostname: hostname, + BindAddr: "0.0.0.0", + PacketBufferSize: 1400, + StatsPrintPeriod: 5 * time.Minute, + HealthPrintPeriod: 1 * time.Minute, + reapEntryInterval: 30 * time.Minute, + } +} + +// New creates a new instance of NetworkDB using the Config passed by +// the caller. +func New(c *Config) (*NetworkDB, error) { + // The garbage collection logic for entries leverage the presence of the network. + // For this reason the expiration time of the network is put slightly higher than the entry expiration so that + // there is at least 5 extra cycle to make sure that all the entries are properly deleted before deleting the network. + c.reapNetworkInterval = c.reapEntryInterval + 5*reapPeriod + + nDB := &NetworkDB{ + config: c, + indexes: make(map[int]*radix.Tree), + networks: make(map[string]map[string]*network), + nodes: make(map[string]*node), + failedNodes: make(map[string]*node), + leftNodes: make(map[string]*node), + networkNodes: make(map[string][]string), + bulkSyncAckTbl: make(map[string]chan struct{}), + broadcaster: events.NewBroadcaster(), + } + + nDB.indexes[byTable] = radix.New() + nDB.indexes[byNetwork] = radix.New() + + logrus.Infof("New memberlist node - Node:%v will use memberlist nodeID:%v with config:%+v", c.Hostname, c.NodeID, c) + if err := nDB.clusterInit(); err != nil { + return nil, err + } + + return nDB, nil +} + +// Join joins this NetworkDB instance with a list of peer NetworkDB +// instances passed by the caller in the form of addr:port +func (nDB *NetworkDB) Join(members []string) error { + nDB.Lock() + nDB.bootStrapIP = make([]net.IP, 0, len(members)) + for _, m := range members { + nDB.bootStrapIP = append(nDB.bootStrapIP, net.ParseIP(m)) + } + nDB.Unlock() + return nDB.clusterJoin(members) +} + +// Close destroys this NetworkDB instance by leave the cluster, +// stopping timers, canceling goroutines etc. +func (nDB *NetworkDB) Close() { + if err := nDB.clusterLeave(); err != nil { + logrus.Errorf("%v(%v) Could not close DB: %v", nDB.config.Hostname, nDB.config.NodeID, err) + } + + //Avoid (*Broadcaster).run goroutine leak + nDB.broadcaster.Close() +} + +// ClusterPeers returns all the gossip cluster peers. +func (nDB *NetworkDB) ClusterPeers() []PeerInfo { + nDB.RLock() + defer nDB.RUnlock() + peers := make([]PeerInfo, 0, len(nDB.nodes)) + for _, node := range nDB.nodes { + peers = append(peers, PeerInfo{ + Name: node.Name, + IP: node.Node.Addr.String(), + }) + } + return peers +} + +// Peers returns the gossip peers for a given network. +func (nDB *NetworkDB) Peers(nid string) []PeerInfo { + nDB.RLock() + defer nDB.RUnlock() + peers := make([]PeerInfo, 0, len(nDB.networkNodes[nid])) + for _, nodeName := range nDB.networkNodes[nid] { + if node, ok := nDB.nodes[nodeName]; ok { + peers = append(peers, PeerInfo{ + Name: node.Name, + IP: node.Addr.String(), + }) + } else { + // Added for testing purposes, this condition should never happen else mean that the network list + // is out of sync with the node list + peers = append(peers, PeerInfo{Name: nodeName, IP: "unknown"}) + } + } + return peers +} + +// GetEntry retrieves the value of a table entry in a given (network, +// table, key) tuple +func (nDB *NetworkDB) GetEntry(tname, nid, key string) ([]byte, error) { + nDB.RLock() + defer nDB.RUnlock() + entry, err := nDB.getEntry(tname, nid, key) + if err != nil { + return nil, err + } + if entry != nil && entry.deleting { + return nil, types.NotFoundErrorf("entry in table %s network id %s and key %s deleted and pending garbage collection", tname, nid, key) + } + + return entry.value, nil +} + +func (nDB *NetworkDB) getEntry(tname, nid, key string) (*entry, error) { + e, ok := nDB.indexes[byTable].Get(fmt.Sprintf("/%s/%s/%s", tname, nid, key)) + if !ok { + return nil, types.NotFoundErrorf("could not get entry in table %s with network id %s and key %s", tname, nid, key) + } + + return e.(*entry), nil +} + +// CreateEntry creates a table entry in NetworkDB for given (network, +// table, key) tuple and if the NetworkDB is part of the cluster +// propagates this event to the cluster. It is an error to create an +// entry for the same tuple for which there is already an existing +// entry unless the current entry is deleting state. +func (nDB *NetworkDB) CreateEntry(tname, nid, key string, value []byte) error { + nDB.Lock() + oldEntry, err := nDB.getEntry(tname, nid, key) + if err == nil || (oldEntry != nil && !oldEntry.deleting) { + nDB.Unlock() + return fmt.Errorf("cannot create entry in table %s with network id %s and key %s, already exists", tname, nid, key) + } + + entry := &entry{ + ltime: nDB.tableClock.Increment(), + node: nDB.config.NodeID, + value: value, + } + + nDB.createOrUpdateEntry(nid, tname, key, entry) + nDB.Unlock() + + if err := nDB.sendTableEvent(TableEventTypeCreate, nid, tname, key, entry); err != nil { + return fmt.Errorf("cannot send create event for table %s, %v", tname, err) + } + + return nil +} + +// UpdateEntry updates a table entry in NetworkDB for given (network, +// table, key) tuple and if the NetworkDB is part of the cluster +// propagates this event to the cluster. It is an error to update a +// non-existent entry. +func (nDB *NetworkDB) UpdateEntry(tname, nid, key string, value []byte) error { + nDB.Lock() + if _, err := nDB.getEntry(tname, nid, key); err != nil { + nDB.Unlock() + return fmt.Errorf("cannot update entry as the entry in table %s with network id %s and key %s does not exist", tname, nid, key) + } + + entry := &entry{ + ltime: nDB.tableClock.Increment(), + node: nDB.config.NodeID, + value: value, + } + + nDB.createOrUpdateEntry(nid, tname, key, entry) + nDB.Unlock() + + if err := nDB.sendTableEvent(TableEventTypeUpdate, nid, tname, key, entry); err != nil { + return fmt.Errorf("cannot send table update event: %v", err) + } + + return nil +} + +// TableElem elem +type TableElem struct { + Value []byte + owner string +} + +// GetTableByNetwork walks the networkdb by the give table and network id and +// returns a map of keys and values +func (nDB *NetworkDB) GetTableByNetwork(tname, nid string) map[string]*TableElem { + entries := make(map[string]*TableElem) + nDB.indexes[byTable].WalkPrefix(fmt.Sprintf("/%s/%s", tname, nid), func(k string, v interface{}) bool { + entry := v.(*entry) + if entry.deleting { + return false + } + key := k[strings.LastIndex(k, "/")+1:] + entries[key] = &TableElem{Value: entry.value, owner: entry.node} + return false + }) + return entries +} + +// DeleteEntry deletes a table entry in NetworkDB for given (network, +// table, key) tuple and if the NetworkDB is part of the cluster +// propagates this event to the cluster. +func (nDB *NetworkDB) DeleteEntry(tname, nid, key string) error { + nDB.Lock() + oldEntry, err := nDB.getEntry(tname, nid, key) + if err != nil || oldEntry == nil || oldEntry.deleting { + nDB.Unlock() + return fmt.Errorf("cannot delete entry %s with network id %s and key %s "+ + "does not exist or is already being deleted", tname, nid, key) + } + + entry := &entry{ + ltime: nDB.tableClock.Increment(), + node: nDB.config.NodeID, + value: oldEntry.value, + deleting: true, + reapTime: nDB.config.reapEntryInterval, + } + + nDB.createOrUpdateEntry(nid, tname, key, entry) + nDB.Unlock() + + if err := nDB.sendTableEvent(TableEventTypeDelete, nid, tname, key, entry); err != nil { + return fmt.Errorf("cannot send table delete event: %v", err) + } + + return nil +} + +func (nDB *NetworkDB) deleteNodeFromNetworks(deletedNode string) { + for nid, nodes := range nDB.networkNodes { + updatedNodes := make([]string, 0, len(nodes)) + for _, node := range nodes { + if node == deletedNode { + continue + } + + updatedNodes = append(updatedNodes, node) + } + + nDB.networkNodes[nid] = updatedNodes + } + + delete(nDB.networks, deletedNode) +} + +// deleteNodeNetworkEntries is called in 2 conditions with 2 different outcomes: +// 1) when a notification is coming of a node leaving the network +// - Walk all the network entries and mark the leaving node's entries for deletion +// These will be garbage collected when the reap timer will expire +// 2) when the local node is leaving the network +// - Walk all the network entries: +// A) if the entry is owned by the local node +// then we will mark it for deletion. This will ensure that if a node did not +// yet received the notification that the local node is leaving, will be aware +// of the entries to be deleted. +// B) if the entry is owned by a remote node, then we can safely delete it. This +// ensures that if we join back this network as we receive the CREATE event for +// entries owned by remote nodes, we will accept them and we notify the application +func (nDB *NetworkDB) deleteNodeNetworkEntries(nid, node string) { + // Indicates if the delete is triggered for the local node + isNodeLocal := node == nDB.config.NodeID + + nDB.indexes[byNetwork].WalkPrefix(fmt.Sprintf("/%s", nid), + func(path string, v interface{}) bool { + oldEntry := v.(*entry) + params := strings.Split(path[1:], "/") + nid := params[0] + tname := params[1] + key := params[2] + + // If the entry is owned by a remote node and this node is not leaving the network + if oldEntry.node != node && !isNodeLocal { + // Don't do anything because the event is triggered for a node that does not own this entry + return false + } + + // If this entry is already marked for deletion and this node is not leaving the network + if oldEntry.deleting && !isNodeLocal { + // Don't do anything this entry will be already garbage collected using the old reapTime + return false + } + + entry := &entry{ + ltime: oldEntry.ltime, + node: oldEntry.node, + value: oldEntry.value, + deleting: true, + reapTime: nDB.config.reapEntryInterval, + } + + // we arrived at this point in 2 cases: + // 1) this entry is owned by the node that is leaving the network + // 2) the local node is leaving the network + if oldEntry.node == node { + if isNodeLocal { + // TODO fcrisciani: this can be removed if there is no way to leave the network + // without doing a delete of all the objects + entry.ltime++ + } + + if !oldEntry.deleting { + nDB.createOrUpdateEntry(nid, tname, key, entry) + } + } else { + // the local node is leaving the network, all the entries of remote nodes can be safely removed + nDB.deleteEntry(nid, tname, key) + } + + // Notify to the upper layer only entries not already marked for deletion + if !oldEntry.deleting { + nDB.broadcaster.Write(makeEvent(opDelete, tname, nid, key, entry.value)) + } + return false + }) +} + +func (nDB *NetworkDB) deleteNodeTableEntries(node string) { + nDB.indexes[byTable].Walk(func(path string, v interface{}) bool { + oldEntry := v.(*entry) + if oldEntry.node != node { + return false + } + + params := strings.Split(path[1:], "/") + tname := params[0] + nid := params[1] + key := params[2] + + nDB.deleteEntry(nid, tname, key) + + if !oldEntry.deleting { + nDB.broadcaster.Write(makeEvent(opDelete, tname, nid, key, oldEntry.value)) + } + return false + }) +} + +// WalkTable walks a single table in NetworkDB and invokes the passed +// function for each entry in the table passing the network, key, +// value. The walk stops if the passed function returns a true. +func (nDB *NetworkDB) WalkTable(tname string, fn func(string, string, []byte, bool) bool) error { + nDB.RLock() + values := make(map[string]interface{}) + nDB.indexes[byTable].WalkPrefix(fmt.Sprintf("/%s", tname), func(path string, v interface{}) bool { + values[path] = v + return false + }) + nDB.RUnlock() + + for k, v := range values { + params := strings.Split(k[1:], "/") + nid := params[1] + key := params[2] + if fn(nid, key, v.(*entry).value, v.(*entry).deleting) { + return nil + } + } + + return nil +} + +// JoinNetwork joins this node to a given network and propagates this +// event across the cluster. This triggers this node joining the +// sub-cluster of this network and participates in the network-scoped +// gossip and bulk sync for this network. +func (nDB *NetworkDB) JoinNetwork(nid string) error { + ltime := nDB.networkClock.Increment() + + nDB.Lock() + nodeNetworks, ok := nDB.networks[nDB.config.NodeID] + if !ok { + nodeNetworks = make(map[string]*network) + nDB.networks[nDB.config.NodeID] = nodeNetworks + } + n, ok := nodeNetworks[nid] + var entries int + if ok { + entries = n.entriesNumber + } + nodeNetworks[nid] = &network{id: nid, ltime: ltime, entriesNumber: entries} + nodeNetworks[nid].tableBroadcasts = &memberlist.TransmitLimitedQueue{ + NumNodes: func() int { + //TODO fcrisciani this can be optimized maybe avoiding the lock? + // this call is done each GetBroadcasts call to evaluate the number of + // replicas for the message + nDB.RLock() + defer nDB.RUnlock() + return len(nDB.networkNodes[nid]) + }, + RetransmitMult: 4, + } + nDB.addNetworkNode(nid, nDB.config.NodeID) + networkNodes := nDB.networkNodes[nid] + nDB.Unlock() + + if err := nDB.sendNetworkEvent(nid, NetworkEventTypeJoin, ltime); err != nil { + return fmt.Errorf("failed to send leave network event for %s: %v", nid, err) + } + + logrus.Debugf("%v(%v): joined network %s", nDB.config.Hostname, nDB.config.NodeID, nid) + if _, err := nDB.bulkSync(networkNodes, true); err != nil { + logrus.Errorf("Error bulk syncing while joining network %s: %v", nid, err) + } + + return nil +} + +// LeaveNetwork leaves this node from a given network and propagates +// this event across the cluster. This triggers this node leaving the +// sub-cluster of this network and as a result will no longer +// participate in the network-scoped gossip and bulk sync for this +// network. Also remove all the table entries for this network from +// networkdb +func (nDB *NetworkDB) LeaveNetwork(nid string) error { + ltime := nDB.networkClock.Increment() + if err := nDB.sendNetworkEvent(nid, NetworkEventTypeLeave, ltime); err != nil { + return fmt.Errorf("failed to send leave network event for %s: %v", nid, err) + } + + nDB.Lock() + defer nDB.Unlock() + + // Remove myself from the list of the nodes participating to the network + nDB.deleteNetworkNode(nid, nDB.config.NodeID) + + // Update all the local entries marking them for deletion and delete all the remote entries + nDB.deleteNodeNetworkEntries(nid, nDB.config.NodeID) + + nodeNetworks, ok := nDB.networks[nDB.config.NodeID] + if !ok { + return fmt.Errorf("could not find self node for network %s while trying to leave", nid) + } + + n, ok := nodeNetworks[nid] + if !ok { + return fmt.Errorf("could not find network %s while trying to leave", nid) + } + + logrus.Debugf("%v(%v): leaving network %s", nDB.config.Hostname, nDB.config.NodeID, nid) + n.ltime = ltime + n.reapTime = nDB.config.reapNetworkInterval + n.leaving = true + return nil +} + +// addNetworkNode adds the node to the list of nodes which participate +// in the passed network only if it is not already present. Caller +// should hold the NetworkDB lock while calling this +func (nDB *NetworkDB) addNetworkNode(nid string, nodeName string) { + nodes := nDB.networkNodes[nid] + for _, node := range nodes { + if node == nodeName { + return + } + } + + nDB.networkNodes[nid] = append(nDB.networkNodes[nid], nodeName) +} + +// Deletes the node from the list of nodes which participate in the +// passed network. Caller should hold the NetworkDB lock while calling +// this +func (nDB *NetworkDB) deleteNetworkNode(nid string, nodeName string) { + nodes, ok := nDB.networkNodes[nid] + if !ok || len(nodes) == 0 { + return + } + newNodes := make([]string, 0, len(nodes)-1) + for _, name := range nodes { + if name == nodeName { + continue + } + newNodes = append(newNodes, name) + } + nDB.networkNodes[nid] = newNodes +} + +// findCommonnetworks find the networks that both this node and the +// passed node have joined. +func (nDB *NetworkDB) findCommonNetworks(nodeName string) []string { + nDB.RLock() + defer nDB.RUnlock() + + var networks []string + for nid := range nDB.networks[nDB.config.NodeID] { + if n, ok := nDB.networks[nodeName][nid]; ok { + if !n.leaving { + networks = append(networks, nid) + } + } + } + + return networks +} + +func (nDB *NetworkDB) updateLocalNetworkTime() { + nDB.Lock() + defer nDB.Unlock() + + ltime := nDB.networkClock.Increment() + for _, n := range nDB.networks[nDB.config.NodeID] { + n.ltime = ltime + } +} + +// createOrUpdateEntry this function handles the creation or update of entries into the local +// tree store. It is also used to keep in sync the entries number of the network (all tables are aggregated) +func (nDB *NetworkDB) createOrUpdateEntry(nid, tname, key string, entry interface{}) (bool, bool) { + _, okTable := nDB.indexes[byTable].Insert(fmt.Sprintf("/%s/%s/%s", tname, nid, key), entry) + _, okNetwork := nDB.indexes[byNetwork].Insert(fmt.Sprintf("/%s/%s/%s", nid, tname, key), entry) + if !okNetwork { + // Add only if it is an insert not an update + n, ok := nDB.networks[nDB.config.NodeID][nid] + if ok { + n.entriesNumber++ + } + } + return okTable, okNetwork +} + +// deleteEntry this function handles the deletion of entries into the local tree store. +// It is also used to keep in sync the entries number of the network (all tables are aggregated) +func (nDB *NetworkDB) deleteEntry(nid, tname, key string) (bool, bool) { + _, okTable := nDB.indexes[byTable].Delete(fmt.Sprintf("/%s/%s/%s", tname, nid, key)) + _, okNetwork := nDB.indexes[byNetwork].Delete(fmt.Sprintf("/%s/%s/%s", nid, tname, key)) + if okNetwork { + // Remove only if the delete is successful + n, ok := nDB.networks[nDB.config.NodeID][nid] + if ok { + n.entriesNumber-- + } + } + return okTable, okNetwork +} diff --git a/vendor/github.com/docker/libnetwork/networkdb/networkdb.pb.go b/vendor/github.com/docker/libnetwork/networkdb/networkdb.pb.go new file mode 100644 index 0000000000..7087a57ca0 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/networkdb/networkdb.pb.go @@ -0,0 +1,2733 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: networkdb.proto + +/* + Package networkdb is a generated protocol buffer package. + + It is generated from these files: + networkdb.proto + + It has these top-level messages: + GossipMessage + NodeEvent + NetworkEvent + NetworkEntry + NetworkPushPull + TableEvent + BulkSyncMessage + CompoundMessage +*/ +package networkdb + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import github_com_hashicorp_serf_serf "github.com/hashicorp/serf/serf" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// MessageType enum defines all the core message types that networkdb +// uses to communicate to peers. +type MessageType int32 + +const ( + MessageTypeInvalid MessageType = 0 + // NetworEvent message type is used to communicate network + // attachments on the node. + MessageTypeNetworkEvent MessageType = 1 + // TableEvent message type is used to communicate any table + // CRUD event that happened on the node. + MessageTypeTableEvent MessageType = 2 + // PushPull message type is used to syncup all network + // attachments on a peer node either during startup of this + // node or with a random peer node periodically thereafter. + MessageTypePushPull MessageType = 3 + // BulkSync message is used to bulksync the whole networkdb + // state with a peer node during startup of this node or with + // a random peer node periodically thereafter. + MessageTypeBulkSync MessageType = 4 + // Compound message type is used to form a compound message + // which is a pack of many message of above types, packed into + // a single compound message. + MessageTypeCompound MessageType = 5 + // NodeEvent message type is used to communicare node + // join/leave events in the cluster + MessageTypeNodeEvent MessageType = 6 +) + +var MessageType_name = map[int32]string{ + 0: "INVALID", + 1: "NETWORK_EVENT", + 2: "TABLE_EVENT", + 3: "PUSH_PULL", + 4: "BULK_SYNC", + 5: "COMPOUND", + 6: "NODE_EVENT", +} +var MessageType_value = map[string]int32{ + "INVALID": 0, + "NETWORK_EVENT": 1, + "TABLE_EVENT": 2, + "PUSH_PULL": 3, + "BULK_SYNC": 4, + "COMPOUND": 5, + "NODE_EVENT": 6, +} + +func (x MessageType) String() string { + return proto.EnumName(MessageType_name, int32(x)) +} +func (MessageType) EnumDescriptor() ([]byte, []int) { return fileDescriptorNetworkdb, []int{0} } + +type NodeEvent_Type int32 + +const ( + NodeEventTypeInvalid NodeEvent_Type = 0 + // Join event is generated when this node joins the cluster. + NodeEventTypeJoin NodeEvent_Type = 1 + // Leave event is generated when this node leaves the cluster. + NodeEventTypeLeave NodeEvent_Type = 2 +) + +var NodeEvent_Type_name = map[int32]string{ + 0: "INVALID", + 1: "JOIN", + 2: "LEAVE", +} +var NodeEvent_Type_value = map[string]int32{ + "INVALID": 0, + "JOIN": 1, + "LEAVE": 2, +} + +func (x NodeEvent_Type) String() string { + return proto.EnumName(NodeEvent_Type_name, int32(x)) +} +func (NodeEvent_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorNetworkdb, []int{1, 0} } + +type NetworkEvent_Type int32 + +const ( + NetworkEventTypeInvalid NetworkEvent_Type = 0 + // Join event is generated when this node joins a network. + NetworkEventTypeJoin NetworkEvent_Type = 1 + // Leave event is generated when this node leaves a network. + NetworkEventTypeLeave NetworkEvent_Type = 2 +) + +var NetworkEvent_Type_name = map[int32]string{ + 0: "INVALID", + 1: "JOIN", + 2: "LEAVE", +} +var NetworkEvent_Type_value = map[string]int32{ + "INVALID": 0, + "JOIN": 1, + "LEAVE": 2, +} + +func (x NetworkEvent_Type) String() string { + return proto.EnumName(NetworkEvent_Type_name, int32(x)) +} +func (NetworkEvent_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorNetworkdb, []int{2, 0} } + +type TableEvent_Type int32 + +const ( + TableEventTypeInvalid TableEvent_Type = 0 + // Create signifies that this table entry was just + // created. + TableEventTypeCreate TableEvent_Type = 1 + // Update signifies that this table entry was just + // updated. + TableEventTypeUpdate TableEvent_Type = 2 + // Delete signifies that this table entry was just + // updated. + TableEventTypeDelete TableEvent_Type = 3 +) + +var TableEvent_Type_name = map[int32]string{ + 0: "INVALID", + 1: "CREATE", + 2: "UPDATE", + 3: "DELETE", +} +var TableEvent_Type_value = map[string]int32{ + "INVALID": 0, + "CREATE": 1, + "UPDATE": 2, + "DELETE": 3, +} + +func (x TableEvent_Type) String() string { + return proto.EnumName(TableEvent_Type_name, int32(x)) +} +func (TableEvent_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorNetworkdb, []int{5, 0} } + +// GossipMessage is a basic message header used by all messages types. +type GossipMessage struct { + Type MessageType `protobuf:"varint,1,opt,name=type,proto3,enum=networkdb.MessageType" json:"type,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *GossipMessage) Reset() { *m = GossipMessage{} } +func (*GossipMessage) ProtoMessage() {} +func (*GossipMessage) Descriptor() ([]byte, []int) { return fileDescriptorNetworkdb, []int{0} } + +func (m *GossipMessage) GetType() MessageType { + if m != nil { + return m.Type + } + return MessageTypeInvalid +} + +func (m *GossipMessage) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +// NodeEvent message payload definition. +type NodeEvent struct { + Type NodeEvent_Type `protobuf:"varint,1,opt,name=type,proto3,enum=networkdb.NodeEvent_Type" json:"type,omitempty"` + // Lamport time using a network lamport clock indicating the + // time this event was generated on the node where it was + // generated. + LTime github_com_hashicorp_serf_serf.LamportTime `protobuf:"varint,2,opt,name=l_time,json=lTime,proto3,customtype=github.com/hashicorp/serf/serf.LamportTime" json:"l_time"` + // Source node name. + NodeName string `protobuf:"bytes,3,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"` +} + +func (m *NodeEvent) Reset() { *m = NodeEvent{} } +func (*NodeEvent) ProtoMessage() {} +func (*NodeEvent) Descriptor() ([]byte, []int) { return fileDescriptorNetworkdb, []int{1} } + +func (m *NodeEvent) GetType() NodeEvent_Type { + if m != nil { + return m.Type + } + return NodeEventTypeInvalid +} + +func (m *NodeEvent) GetNodeName() string { + if m != nil { + return m.NodeName + } + return "" +} + +// NetworkEvent message payload definition. +type NetworkEvent struct { + Type NetworkEvent_Type `protobuf:"varint,1,opt,name=type,proto3,enum=networkdb.NetworkEvent_Type" json:"type,omitempty"` + // Lamport time using a network lamport clock indicating the + // time this event was generated on the node where it was + // generated. + LTime github_com_hashicorp_serf_serf.LamportTime `protobuf:"varint,2,opt,name=l_time,json=lTime,proto3,customtype=github.com/hashicorp/serf/serf.LamportTime" json:"l_time"` + // Source node name. + NodeName string `protobuf:"bytes,3,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"` + // ID of the network for which the event is generated. + NetworkID string `protobuf:"bytes,4,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` +} + +func (m *NetworkEvent) Reset() { *m = NetworkEvent{} } +func (*NetworkEvent) ProtoMessage() {} +func (*NetworkEvent) Descriptor() ([]byte, []int) { return fileDescriptorNetworkdb, []int{2} } + +func (m *NetworkEvent) GetType() NetworkEvent_Type { + if m != nil { + return m.Type + } + return NetworkEventTypeInvalid +} + +func (m *NetworkEvent) GetNodeName() string { + if m != nil { + return m.NodeName + } + return "" +} + +func (m *NetworkEvent) GetNetworkID() string { + if m != nil { + return m.NetworkID + } + return "" +} + +// NetworkEntry for push pull of networks. +type NetworkEntry struct { + // ID of the network + NetworkID string `protobuf:"bytes,1,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` + // Latest lamport time of the network attachment when this + // network event was recorded. + LTime github_com_hashicorp_serf_serf.LamportTime `protobuf:"varint,2,opt,name=l_time,json=lTime,proto3,customtype=github.com/hashicorp/serf/serf.LamportTime" json:"l_time"` + // Source node name where this network attachment happened. + NodeName string `protobuf:"bytes,3,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"` + // Indicates if a leave from this network is in progress. + Leaving bool `protobuf:"varint,4,opt,name=leaving,proto3" json:"leaving,omitempty"` +} + +func (m *NetworkEntry) Reset() { *m = NetworkEntry{} } +func (*NetworkEntry) ProtoMessage() {} +func (*NetworkEntry) Descriptor() ([]byte, []int) { return fileDescriptorNetworkdb, []int{3} } + +func (m *NetworkEntry) GetNetworkID() string { + if m != nil { + return m.NetworkID + } + return "" +} + +func (m *NetworkEntry) GetNodeName() string { + if m != nil { + return m.NodeName + } + return "" +} + +func (m *NetworkEntry) GetLeaving() bool { + if m != nil { + return m.Leaving + } + return false +} + +// NetworkPushpull message payload definition. +type NetworkPushPull struct { + // Lamport time when this push pull was initiated. + LTime github_com_hashicorp_serf_serf.LamportTime `protobuf:"varint,1,opt,name=l_time,json=lTime,proto3,customtype=github.com/hashicorp/serf/serf.LamportTime" json:"l_time"` + Networks []*NetworkEntry `protobuf:"bytes,2,rep,name=networks" json:"networks,omitempty"` + // Name of the node sending this push pull payload. + NodeName string `protobuf:"bytes,3,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"` +} + +func (m *NetworkPushPull) Reset() { *m = NetworkPushPull{} } +func (*NetworkPushPull) ProtoMessage() {} +func (*NetworkPushPull) Descriptor() ([]byte, []int) { return fileDescriptorNetworkdb, []int{4} } + +func (m *NetworkPushPull) GetNetworks() []*NetworkEntry { + if m != nil { + return m.Networks + } + return nil +} + +func (m *NetworkPushPull) GetNodeName() string { + if m != nil { + return m.NodeName + } + return "" +} + +// TableEvent message payload definition. +type TableEvent struct { + Type TableEvent_Type `protobuf:"varint,1,opt,name=type,proto3,enum=networkdb.TableEvent_Type" json:"type,omitempty"` + // Lamport time when this event was generated. + LTime github_com_hashicorp_serf_serf.LamportTime `protobuf:"varint,2,opt,name=l_time,json=lTime,proto3,customtype=github.com/hashicorp/serf/serf.LamportTime" json:"l_time"` + // Node name where this event originated. + NodeName string `protobuf:"bytes,3,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"` + // ID of the network to which this table entry belongs. + NetworkID string `protobuf:"bytes,4,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` + // Name of the table to which this table entry belongs. + TableName string `protobuf:"bytes,5,opt,name=table_name,json=tableName,proto3" json:"table_name,omitempty"` + // Entry key. + Key string `protobuf:"bytes,6,opt,name=key,proto3" json:"key,omitempty"` + // Entry value. + Value []byte `protobuf:"bytes,7,opt,name=value,proto3" json:"value,omitempty"` + // Residual reap time for the entry before getting deleted in seconds + ResidualReapTime int32 `protobuf:"varint,8,opt,name=residual_reap_time,json=residualReapTime,proto3" json:"residual_reap_time,omitempty"` +} + +func (m *TableEvent) Reset() { *m = TableEvent{} } +func (*TableEvent) ProtoMessage() {} +func (*TableEvent) Descriptor() ([]byte, []int) { return fileDescriptorNetworkdb, []int{5} } + +func (m *TableEvent) GetType() TableEvent_Type { + if m != nil { + return m.Type + } + return TableEventTypeInvalid +} + +func (m *TableEvent) GetNodeName() string { + if m != nil { + return m.NodeName + } + return "" +} + +func (m *TableEvent) GetNetworkID() string { + if m != nil { + return m.NetworkID + } + return "" +} + +func (m *TableEvent) GetTableName() string { + if m != nil { + return m.TableName + } + return "" +} + +func (m *TableEvent) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *TableEvent) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *TableEvent) GetResidualReapTime() int32 { + if m != nil { + return m.ResidualReapTime + } + return 0 +} + +// BulkSync message payload definition. +type BulkSyncMessage struct { + // Lamport time when this bulk sync was initiated. + LTime github_com_hashicorp_serf_serf.LamportTime `protobuf:"varint,1,opt,name=l_time,json=lTime,proto3,customtype=github.com/hashicorp/serf/serf.LamportTime" json:"l_time"` + // Indicates if this bulksync is a response to a bulk sync + // request from a peer node. + Unsolicited bool `protobuf:"varint,2,opt,name=unsolicited,proto3" json:"unsolicited,omitempty"` + // Name of the node which is producing this bulk sync message. + NodeName string `protobuf:"bytes,3,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"` + // List of network names whose table entries are getting + // bulksynced as part of the bulksync. + Networks []string `protobuf:"bytes,4,rep,name=networks" json:"networks,omitempty"` + // Bulksync payload + Payload []byte `protobuf:"bytes,5,opt,name=payload,proto3" json:"payload,omitempty"` +} + +func (m *BulkSyncMessage) Reset() { *m = BulkSyncMessage{} } +func (*BulkSyncMessage) ProtoMessage() {} +func (*BulkSyncMessage) Descriptor() ([]byte, []int) { return fileDescriptorNetworkdb, []int{6} } + +func (m *BulkSyncMessage) GetUnsolicited() bool { + if m != nil { + return m.Unsolicited + } + return false +} + +func (m *BulkSyncMessage) GetNodeName() string { + if m != nil { + return m.NodeName + } + return "" +} + +func (m *BulkSyncMessage) GetNetworks() []string { + if m != nil { + return m.Networks + } + return nil +} + +func (m *BulkSyncMessage) GetPayload() []byte { + if m != nil { + return m.Payload + } + return nil +} + +// Compound message payload definition. +type CompoundMessage struct { + // A list of simple messages. + Messages []*CompoundMessage_SimpleMessage `protobuf:"bytes,1,rep,name=messages" json:"messages,omitempty"` +} + +func (m *CompoundMessage) Reset() { *m = CompoundMessage{} } +func (*CompoundMessage) ProtoMessage() {} +func (*CompoundMessage) Descriptor() ([]byte, []int) { return fileDescriptorNetworkdb, []int{7} } + +func (m *CompoundMessage) GetMessages() []*CompoundMessage_SimpleMessage { + if m != nil { + return m.Messages + } + return nil +} + +type CompoundMessage_SimpleMessage struct { + // Bytestring payload of a message constructed using + // other message type definitions. + Payload []byte `protobuf:"bytes,1,opt,name=Payload,proto3" json:"Payload,omitempty"` +} + +func (m *CompoundMessage_SimpleMessage) Reset() { *m = CompoundMessage_SimpleMessage{} } +func (*CompoundMessage_SimpleMessage) ProtoMessage() {} +func (*CompoundMessage_SimpleMessage) Descriptor() ([]byte, []int) { + return fileDescriptorNetworkdb, []int{7, 0} +} + +func (m *CompoundMessage_SimpleMessage) GetPayload() []byte { + if m != nil { + return m.Payload + } + return nil +} + +func init() { + proto.RegisterType((*GossipMessage)(nil), "networkdb.GossipMessage") + proto.RegisterType((*NodeEvent)(nil), "networkdb.NodeEvent") + proto.RegisterType((*NetworkEvent)(nil), "networkdb.NetworkEvent") + proto.RegisterType((*NetworkEntry)(nil), "networkdb.NetworkEntry") + proto.RegisterType((*NetworkPushPull)(nil), "networkdb.NetworkPushPull") + proto.RegisterType((*TableEvent)(nil), "networkdb.TableEvent") + proto.RegisterType((*BulkSyncMessage)(nil), "networkdb.BulkSyncMessage") + proto.RegisterType((*CompoundMessage)(nil), "networkdb.CompoundMessage") + proto.RegisterType((*CompoundMessage_SimpleMessage)(nil), "networkdb.CompoundMessage.SimpleMessage") + proto.RegisterEnum("networkdb.MessageType", MessageType_name, MessageType_value) + proto.RegisterEnum("networkdb.NodeEvent_Type", NodeEvent_Type_name, NodeEvent_Type_value) + proto.RegisterEnum("networkdb.NetworkEvent_Type", NetworkEvent_Type_name, NetworkEvent_Type_value) + proto.RegisterEnum("networkdb.TableEvent_Type", TableEvent_Type_name, TableEvent_Type_value) +} +func (this *GossipMessage) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&networkdb.GossipMessage{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *NodeEvent) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&networkdb.NodeEvent{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + s = append(s, "LTime: "+fmt.Sprintf("%#v", this.LTime)+",\n") + s = append(s, "NodeName: "+fmt.Sprintf("%#v", this.NodeName)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *NetworkEvent) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&networkdb.NetworkEvent{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + s = append(s, "LTime: "+fmt.Sprintf("%#v", this.LTime)+",\n") + s = append(s, "NodeName: "+fmt.Sprintf("%#v", this.NodeName)+",\n") + s = append(s, "NetworkID: "+fmt.Sprintf("%#v", this.NetworkID)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *NetworkEntry) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&networkdb.NetworkEntry{") + s = append(s, "NetworkID: "+fmt.Sprintf("%#v", this.NetworkID)+",\n") + s = append(s, "LTime: "+fmt.Sprintf("%#v", this.LTime)+",\n") + s = append(s, "NodeName: "+fmt.Sprintf("%#v", this.NodeName)+",\n") + s = append(s, "Leaving: "+fmt.Sprintf("%#v", this.Leaving)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *NetworkPushPull) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&networkdb.NetworkPushPull{") + s = append(s, "LTime: "+fmt.Sprintf("%#v", this.LTime)+",\n") + if this.Networks != nil { + s = append(s, "Networks: "+fmt.Sprintf("%#v", this.Networks)+",\n") + } + s = append(s, "NodeName: "+fmt.Sprintf("%#v", this.NodeName)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *TableEvent) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 12) + s = append(s, "&networkdb.TableEvent{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + s = append(s, "LTime: "+fmt.Sprintf("%#v", this.LTime)+",\n") + s = append(s, "NodeName: "+fmt.Sprintf("%#v", this.NodeName)+",\n") + s = append(s, "NetworkID: "+fmt.Sprintf("%#v", this.NetworkID)+",\n") + s = append(s, "TableName: "+fmt.Sprintf("%#v", this.TableName)+",\n") + s = append(s, "Key: "+fmt.Sprintf("%#v", this.Key)+",\n") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "ResidualReapTime: "+fmt.Sprintf("%#v", this.ResidualReapTime)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *BulkSyncMessage) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&networkdb.BulkSyncMessage{") + s = append(s, "LTime: "+fmt.Sprintf("%#v", this.LTime)+",\n") + s = append(s, "Unsolicited: "+fmt.Sprintf("%#v", this.Unsolicited)+",\n") + s = append(s, "NodeName: "+fmt.Sprintf("%#v", this.NodeName)+",\n") + s = append(s, "Networks: "+fmt.Sprintf("%#v", this.Networks)+",\n") + s = append(s, "Payload: "+fmt.Sprintf("%#v", this.Payload)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CompoundMessage) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&networkdb.CompoundMessage{") + if this.Messages != nil { + s = append(s, "Messages: "+fmt.Sprintf("%#v", this.Messages)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *CompoundMessage_SimpleMessage) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&networkdb.CompoundMessage_SimpleMessage{") + s = append(s, "Payload: "+fmt.Sprintf("%#v", this.Payload)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringNetworkdb(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *GossipMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GossipMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Type != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintNetworkdb(dAtA, i, uint64(m.Type)) + } + if len(m.Data) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintNetworkdb(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + return i, nil +} + +func (m *NodeEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeEvent) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Type != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintNetworkdb(dAtA, i, uint64(m.Type)) + } + if m.LTime != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintNetworkdb(dAtA, i, uint64(m.LTime)) + } + if len(m.NodeName) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintNetworkdb(dAtA, i, uint64(len(m.NodeName))) + i += copy(dAtA[i:], m.NodeName) + } + return i, nil +} + +func (m *NetworkEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkEvent) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Type != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintNetworkdb(dAtA, i, uint64(m.Type)) + } + if m.LTime != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintNetworkdb(dAtA, i, uint64(m.LTime)) + } + if len(m.NodeName) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintNetworkdb(dAtA, i, uint64(len(m.NodeName))) + i += copy(dAtA[i:], m.NodeName) + } + if len(m.NetworkID) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintNetworkdb(dAtA, i, uint64(len(m.NetworkID))) + i += copy(dAtA[i:], m.NetworkID) + } + return i, nil +} + +func (m *NetworkEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkEntry) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.NetworkID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintNetworkdb(dAtA, i, uint64(len(m.NetworkID))) + i += copy(dAtA[i:], m.NetworkID) + } + if m.LTime != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintNetworkdb(dAtA, i, uint64(m.LTime)) + } + if len(m.NodeName) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintNetworkdb(dAtA, i, uint64(len(m.NodeName))) + i += copy(dAtA[i:], m.NodeName) + } + if m.Leaving { + dAtA[i] = 0x20 + i++ + if m.Leaving { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *NetworkPushPull) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkPushPull) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.LTime != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintNetworkdb(dAtA, i, uint64(m.LTime)) + } + if len(m.Networks) > 0 { + for _, msg := range m.Networks { + dAtA[i] = 0x12 + i++ + i = encodeVarintNetworkdb(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.NodeName) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintNetworkdb(dAtA, i, uint64(len(m.NodeName))) + i += copy(dAtA[i:], m.NodeName) + } + return i, nil +} + +func (m *TableEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TableEvent) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Type != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintNetworkdb(dAtA, i, uint64(m.Type)) + } + if m.LTime != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintNetworkdb(dAtA, i, uint64(m.LTime)) + } + if len(m.NodeName) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintNetworkdb(dAtA, i, uint64(len(m.NodeName))) + i += copy(dAtA[i:], m.NodeName) + } + if len(m.NetworkID) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintNetworkdb(dAtA, i, uint64(len(m.NetworkID))) + i += copy(dAtA[i:], m.NetworkID) + } + if len(m.TableName) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintNetworkdb(dAtA, i, uint64(len(m.TableName))) + i += copy(dAtA[i:], m.TableName) + } + if len(m.Key) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintNetworkdb(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.Value) > 0 { + dAtA[i] = 0x3a + i++ + i = encodeVarintNetworkdb(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + if m.ResidualReapTime != 0 { + dAtA[i] = 0x40 + i++ + i = encodeVarintNetworkdb(dAtA, i, uint64(m.ResidualReapTime)) + } + return i, nil +} + +func (m *BulkSyncMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BulkSyncMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.LTime != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintNetworkdb(dAtA, i, uint64(m.LTime)) + } + if m.Unsolicited { + dAtA[i] = 0x10 + i++ + if m.Unsolicited { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.NodeName) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintNetworkdb(dAtA, i, uint64(len(m.NodeName))) + i += copy(dAtA[i:], m.NodeName) + } + if len(m.Networks) > 0 { + for _, s := range m.Networks { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Payload) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintNetworkdb(dAtA, i, uint64(len(m.Payload))) + i += copy(dAtA[i:], m.Payload) + } + return i, nil +} + +func (m *CompoundMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CompoundMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Messages) > 0 { + for _, msg := range m.Messages { + dAtA[i] = 0xa + i++ + i = encodeVarintNetworkdb(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CompoundMessage_SimpleMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CompoundMessage_SimpleMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Payload) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintNetworkdb(dAtA, i, uint64(len(m.Payload))) + i += copy(dAtA[i:], m.Payload) + } + return i, nil +} + +func encodeFixed64Networkdb(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Networkdb(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintNetworkdb(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *GossipMessage) Size() (n int) { + var l int + _ = l + if m.Type != 0 { + n += 1 + sovNetworkdb(uint64(m.Type)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovNetworkdb(uint64(l)) + } + return n +} + +func (m *NodeEvent) Size() (n int) { + var l int + _ = l + if m.Type != 0 { + n += 1 + sovNetworkdb(uint64(m.Type)) + } + if m.LTime != 0 { + n += 1 + sovNetworkdb(uint64(m.LTime)) + } + l = len(m.NodeName) + if l > 0 { + n += 1 + l + sovNetworkdb(uint64(l)) + } + return n +} + +func (m *NetworkEvent) Size() (n int) { + var l int + _ = l + if m.Type != 0 { + n += 1 + sovNetworkdb(uint64(m.Type)) + } + if m.LTime != 0 { + n += 1 + sovNetworkdb(uint64(m.LTime)) + } + l = len(m.NodeName) + if l > 0 { + n += 1 + l + sovNetworkdb(uint64(l)) + } + l = len(m.NetworkID) + if l > 0 { + n += 1 + l + sovNetworkdb(uint64(l)) + } + return n +} + +func (m *NetworkEntry) Size() (n int) { + var l int + _ = l + l = len(m.NetworkID) + if l > 0 { + n += 1 + l + sovNetworkdb(uint64(l)) + } + if m.LTime != 0 { + n += 1 + sovNetworkdb(uint64(m.LTime)) + } + l = len(m.NodeName) + if l > 0 { + n += 1 + l + sovNetworkdb(uint64(l)) + } + if m.Leaving { + n += 2 + } + return n +} + +func (m *NetworkPushPull) Size() (n int) { + var l int + _ = l + if m.LTime != 0 { + n += 1 + sovNetworkdb(uint64(m.LTime)) + } + if len(m.Networks) > 0 { + for _, e := range m.Networks { + l = e.Size() + n += 1 + l + sovNetworkdb(uint64(l)) + } + } + l = len(m.NodeName) + if l > 0 { + n += 1 + l + sovNetworkdb(uint64(l)) + } + return n +} + +func (m *TableEvent) Size() (n int) { + var l int + _ = l + if m.Type != 0 { + n += 1 + sovNetworkdb(uint64(m.Type)) + } + if m.LTime != 0 { + n += 1 + sovNetworkdb(uint64(m.LTime)) + } + l = len(m.NodeName) + if l > 0 { + n += 1 + l + sovNetworkdb(uint64(l)) + } + l = len(m.NetworkID) + if l > 0 { + n += 1 + l + sovNetworkdb(uint64(l)) + } + l = len(m.TableName) + if l > 0 { + n += 1 + l + sovNetworkdb(uint64(l)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovNetworkdb(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovNetworkdb(uint64(l)) + } + if m.ResidualReapTime != 0 { + n += 1 + sovNetworkdb(uint64(m.ResidualReapTime)) + } + return n +} + +func (m *BulkSyncMessage) Size() (n int) { + var l int + _ = l + if m.LTime != 0 { + n += 1 + sovNetworkdb(uint64(m.LTime)) + } + if m.Unsolicited { + n += 2 + } + l = len(m.NodeName) + if l > 0 { + n += 1 + l + sovNetworkdb(uint64(l)) + } + if len(m.Networks) > 0 { + for _, s := range m.Networks { + l = len(s) + n += 1 + l + sovNetworkdb(uint64(l)) + } + } + l = len(m.Payload) + if l > 0 { + n += 1 + l + sovNetworkdb(uint64(l)) + } + return n +} + +func (m *CompoundMessage) Size() (n int) { + var l int + _ = l + if len(m.Messages) > 0 { + for _, e := range m.Messages { + l = e.Size() + n += 1 + l + sovNetworkdb(uint64(l)) + } + } + return n +} + +func (m *CompoundMessage_SimpleMessage) Size() (n int) { + var l int + _ = l + l = len(m.Payload) + if l > 0 { + n += 1 + l + sovNetworkdb(uint64(l)) + } + return n +} + +func sovNetworkdb(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozNetworkdb(x uint64) (n int) { + return sovNetworkdb(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *GossipMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GossipMessage{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `}`, + }, "") + return s +} +func (this *NodeEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeEvent{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `LTime:` + fmt.Sprintf("%v", this.LTime) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkEvent{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `LTime:` + fmt.Sprintf("%v", this.LTime) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `NetworkID:` + fmt.Sprintf("%v", this.NetworkID) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkEntry{`, + `NetworkID:` + fmt.Sprintf("%v", this.NetworkID) + `,`, + `LTime:` + fmt.Sprintf("%v", this.LTime) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `Leaving:` + fmt.Sprintf("%v", this.Leaving) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPushPull) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkPushPull{`, + `LTime:` + fmt.Sprintf("%v", this.LTime) + `,`, + `Networks:` + strings.Replace(fmt.Sprintf("%v", this.Networks), "NetworkEntry", "NetworkEntry", 1) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `}`, + }, "") + return s +} +func (this *TableEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TableEvent{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `LTime:` + fmt.Sprintf("%v", this.LTime) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `NetworkID:` + fmt.Sprintf("%v", this.NetworkID) + `,`, + `TableName:` + fmt.Sprintf("%v", this.TableName) + `,`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `ResidualReapTime:` + fmt.Sprintf("%v", this.ResidualReapTime) + `,`, + `}`, + }, "") + return s +} +func (this *BulkSyncMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BulkSyncMessage{`, + `LTime:` + fmt.Sprintf("%v", this.LTime) + `,`, + `Unsolicited:` + fmt.Sprintf("%v", this.Unsolicited) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `Networks:` + fmt.Sprintf("%v", this.Networks) + `,`, + `Payload:` + fmt.Sprintf("%v", this.Payload) + `,`, + `}`, + }, "") + return s +} +func (this *CompoundMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CompoundMessage{`, + `Messages:` + strings.Replace(fmt.Sprintf("%v", this.Messages), "CompoundMessage_SimpleMessage", "CompoundMessage_SimpleMessage", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CompoundMessage_SimpleMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CompoundMessage_SimpleMessage{`, + `Payload:` + fmt.Sprintf("%v", this.Payload) + `,`, + `}`, + }, "") + return s +} +func valueToStringNetworkdb(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *GossipMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GossipMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GossipMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (MessageType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthNetworkdb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipNetworkdb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNetworkdb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (NodeEvent_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LTime", wireType) + } + m.LTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LTime |= (github_com_hashicorp_serf_serf.LamportTime(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthNetworkdb + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipNetworkdb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNetworkdb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (NetworkEvent_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LTime", wireType) + } + m.LTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LTime |= (github_com_hashicorp_serf_serf.LamportTime(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthNetworkdb + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetworkID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthNetworkdb + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NetworkID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipNetworkdb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNetworkdb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetworkID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthNetworkdb + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NetworkID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LTime", wireType) + } + m.LTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LTime |= (github_com_hashicorp_serf_serf.LamportTime(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthNetworkdb + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Leaving", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Leaving = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipNetworkdb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNetworkdb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPushPull) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPushPull: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPushPull: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LTime", wireType) + } + m.LTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LTime |= (github_com_hashicorp_serf_serf.LamportTime(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Networks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthNetworkdb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Networks = append(m.Networks, &NetworkEntry{}) + if err := m.Networks[len(m.Networks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthNetworkdb + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipNetworkdb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNetworkdb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TableEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TableEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TableEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (TableEvent_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LTime", wireType) + } + m.LTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LTime |= (github_com_hashicorp_serf_serf.LamportTime(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthNetworkdb + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetworkID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthNetworkdb + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NetworkID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TableName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthNetworkdb + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TableName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthNetworkdb + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthNetworkdb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResidualReapTime", wireType) + } + m.ResidualReapTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ResidualReapTime |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipNetworkdb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNetworkdb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BulkSyncMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BulkSyncMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BulkSyncMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LTime", wireType) + } + m.LTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LTime |= (github_com_hashicorp_serf_serf.LamportTime(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Unsolicited", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Unsolicited = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthNetworkdb + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Networks", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthNetworkdb + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Networks = append(m.Networks, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthNetworkdb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Payload = append(m.Payload[:0], dAtA[iNdEx:postIndex]...) + if m.Payload == nil { + m.Payload = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipNetworkdb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNetworkdb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CompoundMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CompoundMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CompoundMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Messages", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthNetworkdb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Messages = append(m.Messages, &CompoundMessage_SimpleMessage{}) + if err := m.Messages[len(m.Messages)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipNetworkdb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNetworkdb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CompoundMessage_SimpleMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SimpleMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SimpleMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthNetworkdb + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Payload = append(m.Payload[:0], dAtA[iNdEx:postIndex]...) + if m.Payload == nil { + m.Payload = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipNetworkdb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthNetworkdb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipNetworkdb(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthNetworkdb + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowNetworkdb + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipNetworkdb(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthNetworkdb = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowNetworkdb = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("networkdb.proto", fileDescriptorNetworkdb) } + +var fileDescriptorNetworkdb = []byte{ + // 953 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x96, 0xcd, 0x6e, 0xe3, 0x54, + 0x14, 0xc7, 0x7b, 0xf3, 0xd5, 0xe4, 0x34, 0xa5, 0xe6, 0x4e, 0x67, 0xc6, 0xe3, 0x81, 0xc4, 0x98, + 0x99, 0x2a, 0x53, 0x41, 0x8a, 0x3a, 0x4f, 0xd0, 0x24, 0x16, 0x64, 0x26, 0xe3, 0x44, 0x6e, 0x52, + 0xc4, 0x2a, 0xba, 0xad, 0x2f, 0xa9, 0x55, 0xc7, 0xb6, 0x6c, 0x27, 0x28, 0x2b, 0x10, 0xab, 0x51, + 0x16, 0xbc, 0x41, 0x56, 0xc3, 0x9a, 0x07, 0x40, 0x2c, 0x59, 0xcc, 0x82, 0x05, 0xec, 0x10, 0x8b, + 0x88, 0xe6, 0x09, 0x78, 0x04, 0xe4, 0x6b, 0x3b, 0xb9, 0x49, 0xab, 0x91, 0x10, 0x23, 0xc1, 0x26, + 0xb9, 0x1f, 0xbf, 0x1c, 0x9f, 0xf3, 0xf7, 0xff, 0xdc, 0x1b, 0xd8, 0xb3, 0x69, 0xf0, 0x95, 0xe3, + 0x5d, 0x19, 0xe7, 0x55, 0xd7, 0x73, 0x02, 0x07, 0x17, 0x96, 0x0b, 0xd2, 0xfe, 0xc0, 0x19, 0x38, + 0x6c, 0xf5, 0x28, 0x1c, 0x45, 0x80, 0xd2, 0x86, 0xdd, 0x4f, 0x1d, 0xdf, 0x37, 0xdd, 0x17, 0xd4, + 0xf7, 0xc9, 0x80, 0xe2, 0x43, 0xc8, 0x04, 0x13, 0x97, 0x8a, 0x48, 0x46, 0x95, 0x77, 0x8e, 0xef, + 0x55, 0x57, 0x11, 0x63, 0xa2, 0x3b, 0x71, 0xa9, 0xce, 0x18, 0x8c, 0x21, 0x63, 0x90, 0x80, 0x88, + 0x29, 0x19, 0x55, 0x8a, 0x3a, 0x1b, 0x2b, 0xaf, 0x52, 0x50, 0xd0, 0x1c, 0x83, 0xaa, 0x63, 0x6a, + 0x07, 0xf8, 0xe3, 0xb5, 0x68, 0x0f, 0xb8, 0x68, 0x4b, 0xa6, 0xca, 0x05, 0x6c, 0x42, 0xce, 0xea, + 0x07, 0xe6, 0x90, 0xb2, 0x90, 0x99, 0xda, 0xf1, 0xeb, 0x79, 0x79, 0xeb, 0x8f, 0x79, 0xf9, 0x70, + 0x60, 0x06, 0x97, 0xa3, 0xf3, 0xea, 0x85, 0x33, 0x3c, 0xba, 0x24, 0xfe, 0xa5, 0x79, 0xe1, 0x78, + 0xee, 0x91, 0x4f, 0xbd, 0x2f, 0xd9, 0x47, 0xb5, 0x45, 0x86, 0xae, 0xe3, 0x05, 0x5d, 0x73, 0x48, + 0xf5, 0xac, 0x15, 0x7e, 0xe1, 0x87, 0x50, 0xb0, 0x1d, 0x83, 0xf6, 0x6d, 0x32, 0xa4, 0x62, 0x5a, + 0x46, 0x95, 0x82, 0x9e, 0x0f, 0x17, 0x34, 0x32, 0xa4, 0xca, 0xd7, 0x90, 0x09, 0x9f, 0x8a, 0x1f, + 0xc3, 0x76, 0x53, 0x3b, 0x3b, 0x69, 0x35, 0x1b, 0xc2, 0x96, 0x24, 0x4e, 0x67, 0xf2, 0xfe, 0x32, + 0xad, 0x70, 0xbf, 0x69, 0x8f, 0x89, 0x65, 0x1a, 0xb8, 0x0c, 0x99, 0x67, 0xed, 0xa6, 0x26, 0x20, + 0xe9, 0xee, 0x74, 0x26, 0xbf, 0xbb, 0xc6, 0x3c, 0x73, 0x4c, 0x1b, 0x7f, 0x00, 0xd9, 0x96, 0x7a, + 0x72, 0xa6, 0x0a, 0x29, 0xe9, 0xde, 0x74, 0x26, 0xe3, 0x35, 0xa2, 0x45, 0xc9, 0x98, 0x4a, 0xc5, + 0x97, 0xaf, 0x4a, 0x5b, 0x3f, 0x7e, 0x5f, 0x62, 0x0f, 0x56, 0xae, 0x53, 0x50, 0xd4, 0x22, 0x2d, + 0x22, 0xa1, 0x3e, 0x59, 0x13, 0xea, 0x3d, 0x5e, 0x28, 0x0e, 0xfb, 0x0f, 0xb4, 0xc2, 0x1f, 0x01, + 0xc4, 0xc9, 0xf4, 0x4d, 0x43, 0xcc, 0x84, 0xbb, 0xb5, 0xdd, 0xc5, 0xbc, 0x5c, 0x88, 0x13, 0x6b, + 0x36, 0xf4, 0xc4, 0x65, 0x4d, 0x43, 0x79, 0x89, 0x62, 0x69, 0x2b, 0xbc, 0xb4, 0x0f, 0xa7, 0x33, + 0xf9, 0x3e, 0x5f, 0x08, 0xaf, 0xae, 0xb2, 0x54, 0x37, 0x7a, 0x03, 0x1b, 0x18, 0x13, 0xf8, 0xd1, + 0x4a, 0xe0, 0x07, 0xd3, 0x99, 0x7c, 0x77, 0x13, 0xba, 0x4d, 0xe3, 0x5f, 0xd0, 0x4a, 0x63, 0x3b, + 0xf0, 0x26, 0x1b, 0x95, 0xa0, 0x37, 0x57, 0xf2, 0x36, 0xf5, 0x7d, 0x72, 0x43, 0xdf, 0x5a, 0x71, + 0x31, 0x2f, 0xe7, 0xb5, 0x58, 0x63, 0x4e, 0x6d, 0x11, 0xb6, 0x2d, 0x4a, 0xc6, 0xa6, 0x3d, 0x60, + 0x52, 0xe7, 0xf5, 0x64, 0xaa, 0xfc, 0x84, 0x60, 0x2f, 0x4e, 0xb4, 0x33, 0xf2, 0x2f, 0x3b, 0x23, + 0xcb, 0xe2, 0x72, 0x44, 0xff, 0x36, 0xc7, 0xa7, 0x90, 0x8f, 0x6b, 0xf7, 0xc5, 0x94, 0x9c, 0xae, + 0xec, 0x1c, 0xdf, 0xbf, 0xc5, 0x84, 0xa1, 0x8e, 0xfa, 0x12, 0xfc, 0x07, 0x85, 0x29, 0xdf, 0x65, + 0x00, 0xba, 0xe4, 0xdc, 0x8a, 0x0f, 0x86, 0xea, 0x9a, 0xdf, 0x25, 0xee, 0x51, 0x2b, 0xe8, 0x7f, + 0xef, 0x76, 0xfc, 0x3e, 0x40, 0x10, 0xa6, 0x1b, 0xc5, 0xca, 0xb2, 0x58, 0x05, 0xb6, 0xc2, 0x82, + 0x09, 0x90, 0xbe, 0xa2, 0x13, 0x31, 0xc7, 0xd6, 0xc3, 0x21, 0xde, 0x87, 0xec, 0x98, 0x58, 0x23, + 0x2a, 0x6e, 0xb3, 0x23, 0x33, 0x9a, 0xe0, 0x1a, 0x60, 0x8f, 0xfa, 0xa6, 0x31, 0x22, 0x56, 0xdf, + 0xa3, 0xc4, 0x8d, 0x0a, 0xcd, 0xcb, 0xa8, 0x92, 0xad, 0xed, 0x2f, 0xe6, 0x65, 0x41, 0x8f, 0x77, + 0x75, 0x4a, 0x5c, 0x56, 0x8a, 0xe0, 0x6d, 0xac, 0x28, 0x3f, 0x24, 0x8d, 0x77, 0xc0, 0x37, 0x1e, + 0x6b, 0x96, 0x95, 0xa2, 0x7c, 0xdb, 0x3d, 0x82, 0x5c, 0x5d, 0x57, 0x4f, 0xba, 0x6a, 0xd2, 0x78, + 0xeb, 0x58, 0xdd, 0xa3, 0x24, 0xa0, 0x21, 0xd5, 0xeb, 0x34, 0x42, 0x2a, 0x75, 0x1b, 0xd5, 0x73, + 0x8d, 0x98, 0x6a, 0xa8, 0x2d, 0xb5, 0xab, 0x0a, 0xe9, 0xdb, 0xa8, 0x06, 0xb5, 0x68, 0xb0, 0xd9, + 0x9e, 0xbf, 0x21, 0xd8, 0xab, 0x8d, 0xac, 0xab, 0xd3, 0x89, 0x7d, 0x91, 0x5c, 0x3e, 0x6f, 0xd1, + 0xcf, 0x32, 0xec, 0x8c, 0x6c, 0xdf, 0xb1, 0xcc, 0x0b, 0x33, 0xa0, 0x06, 0x73, 0x4d, 0x5e, 0xe7, + 0x97, 0xde, 0xec, 0x03, 0x89, 0x6b, 0x87, 0x8c, 0x9c, 0x66, 0x7b, 0x89, 0xeb, 0x45, 0xd8, 0x76, + 0xc9, 0xc4, 0x72, 0x88, 0xc1, 0x5e, 0x79, 0x51, 0x4f, 0xa6, 0xca, 0xb7, 0x08, 0xf6, 0xea, 0xce, + 0xd0, 0x75, 0x46, 0xb6, 0x91, 0xd4, 0xd4, 0x80, 0xfc, 0x30, 0x1a, 0xfa, 0x22, 0x62, 0x8d, 0x55, + 0xe1, 0xdc, 0xbe, 0x41, 0x57, 0x4f, 0xcd, 0xa1, 0x6b, 0xd1, 0x78, 0xa6, 0x2f, 0x7f, 0x29, 0x3d, + 0x81, 0xdd, 0xb5, 0xad, 0x30, 0x89, 0x4e, 0x9c, 0x04, 0x8a, 0x92, 0x88, 0xa7, 0x87, 0x3f, 0xa7, + 0x60, 0x87, 0xbb, 0xab, 0xf1, 0x87, 0xbc, 0x21, 0xd8, 0xf5, 0xc4, 0xed, 0x26, 0x6e, 0xa8, 0xc2, + 0xae, 0xa6, 0x76, 0x3f, 0x6f, 0xeb, 0xcf, 0xfb, 0xea, 0x99, 0xaa, 0x75, 0x05, 0x14, 0x1d, 0xda, + 0x1c, 0xba, 0x76, 0x5f, 0x1d, 0xc2, 0x4e, 0xf7, 0xa4, 0xd6, 0x52, 0x63, 0x3a, 0x3e, 0x96, 0x39, + 0x9a, 0xeb, 0xf5, 0x03, 0x28, 0x74, 0x7a, 0xa7, 0x9f, 0xf5, 0x3b, 0xbd, 0x56, 0x4b, 0x48, 0x4b, + 0xf7, 0xa7, 0x33, 0xf9, 0x0e, 0x47, 0x2e, 0x4f, 0xb3, 0x03, 0x28, 0xd4, 0x7a, 0xad, 0xe7, 0xfd, + 0xd3, 0x2f, 0xb4, 0xba, 0x90, 0xb9, 0xc1, 0x25, 0x66, 0xc1, 0x8f, 0x21, 0x5f, 0x6f, 0xbf, 0xe8, + 0xb4, 0x7b, 0x5a, 0x43, 0xc8, 0xde, 0xc0, 0x12, 0x45, 0x71, 0x05, 0x40, 0x6b, 0x37, 0x92, 0x0c, + 0x73, 0x91, 0x31, 0xf9, 0x7a, 0x92, 0x4b, 0x5a, 0xba, 0x13, 0x1b, 0x93, 0x97, 0xad, 0x26, 0xfe, + 0x7e, 0x5d, 0xda, 0xfa, 0xeb, 0xba, 0x84, 0xbe, 0x59, 0x94, 0xd0, 0xeb, 0x45, 0x09, 0xfd, 0xba, + 0x28, 0xa1, 0x3f, 0x17, 0x25, 0x74, 0x9e, 0x63, 0x7f, 0x9d, 0x9e, 0xfe, 0x1d, 0x00, 0x00, 0xff, + 0xff, 0x92, 0x82, 0xdb, 0x1a, 0x6e, 0x09, 0x00, 0x00, +} diff --git a/vendor/github.com/docker/libnetwork/networkdb/networkdbdiagnostic.go b/vendor/github.com/docker/libnetwork/networkdb/networkdbdiagnostic.go new file mode 100644 index 0000000000..ffeb98d607 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/networkdb/networkdbdiagnostic.go @@ -0,0 +1,413 @@ +package networkdb + +import ( + "encoding/base64" + "fmt" + "net/http" + "strings" + + "github.com/docker/libnetwork/common" + "github.com/docker/libnetwork/diagnostic" + "github.com/sirupsen/logrus" +) + +const ( + missingParameter = "missing parameter" + dbNotAvailable = "database not available" +) + +// NetDbPaths2Func TODO +var NetDbPaths2Func = map[string]diagnostic.HTTPHandlerFunc{ + "/join": dbJoin, + "/networkpeers": dbPeers, + "/clusterpeers": dbClusterPeers, + "/joinnetwork": dbJoinNetwork, + "/leavenetwork": dbLeaveNetwork, + "/createentry": dbCreateEntry, + "/updateentry": dbUpdateEntry, + "/deleteentry": dbDeleteEntry, + "/getentry": dbGetEntry, + "/gettable": dbGetTable, +} + +func dbJoin(ctx interface{}, w http.ResponseWriter, r *http.Request) { + r.ParseForm() + diagnostic.DebugHTTPForm(r) + _, json := diagnostic.ParseHTTPFormOptions(r) + + // audit logs + log := logrus.WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()}) + log.Info("join cluster") + + if len(r.Form["members"]) < 1 { + rsp := diagnostic.WrongCommand(missingParameter, fmt.Sprintf("%s?members=ip1,ip2,...", r.URL.Path)) + log.Error("join cluster failed, wrong input") + diagnostic.HTTPReply(w, rsp, json) + return + } + + nDB, ok := ctx.(*NetworkDB) + if ok { + err := nDB.Join(strings.Split(r.Form["members"][0], ",")) + if err != nil { + rsp := diagnostic.FailCommand(fmt.Errorf("%s error in the DB join %s", r.URL.Path, err)) + log.WithError(err).Error("join cluster failed") + diagnostic.HTTPReply(w, rsp, json) + return + } + + log.Info("join cluster done") + diagnostic.HTTPReply(w, diagnostic.CommandSucceed(nil), json) + return + } + diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json) +} + +func dbPeers(ctx interface{}, w http.ResponseWriter, r *http.Request) { + r.ParseForm() + diagnostic.DebugHTTPForm(r) + _, json := diagnostic.ParseHTTPFormOptions(r) + + // audit logs + log := logrus.WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()}) + log.Info("network peers") + + if len(r.Form["nid"]) < 1 { + rsp := diagnostic.WrongCommand(missingParameter, fmt.Sprintf("%s?nid=test", r.URL.Path)) + log.Error("network peers failed, wrong input") + diagnostic.HTTPReply(w, rsp, json) + return + } + + nDB, ok := ctx.(*NetworkDB) + if ok { + peers := nDB.Peers(r.Form["nid"][0]) + rsp := &diagnostic.TableObj{Length: len(peers)} + for i, peerInfo := range peers { + if peerInfo.IP == "unknown" { + rsp.Elements = append(rsp.Elements, &diagnostic.PeerEntryObj{Index: i, Name: "orphan-" + peerInfo.Name, IP: peerInfo.IP}) + } else { + rsp.Elements = append(rsp.Elements, &diagnostic.PeerEntryObj{Index: i, Name: peerInfo.Name, IP: peerInfo.IP}) + } + } + log.WithField("response", fmt.Sprintf("%+v", rsp)).Info("network peers done") + diagnostic.HTTPReply(w, diagnostic.CommandSucceed(rsp), json) + return + } + diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json) +} + +func dbClusterPeers(ctx interface{}, w http.ResponseWriter, r *http.Request) { + r.ParseForm() + diagnostic.DebugHTTPForm(r) + _, json := diagnostic.ParseHTTPFormOptions(r) + + // audit logs + log := logrus.WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()}) + log.Info("cluster peers") + + nDB, ok := ctx.(*NetworkDB) + if ok { + peers := nDB.ClusterPeers() + rsp := &diagnostic.TableObj{Length: len(peers)} + for i, peerInfo := range peers { + rsp.Elements = append(rsp.Elements, &diagnostic.PeerEntryObj{Index: i, Name: peerInfo.Name, IP: peerInfo.IP}) + } + log.WithField("response", fmt.Sprintf("%+v", rsp)).Info("cluster peers done") + diagnostic.HTTPReply(w, diagnostic.CommandSucceed(rsp), json) + return + } + diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json) +} + +func dbCreateEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) { + r.ParseForm() + diagnostic.DebugHTTPForm(r) + unsafe, json := diagnostic.ParseHTTPFormOptions(r) + + // audit logs + log := logrus.WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()}) + log.Info("create entry") + + if len(r.Form["tname"]) < 1 || + len(r.Form["nid"]) < 1 || + len(r.Form["key"]) < 1 || + len(r.Form["value"]) < 1 { + rsp := diagnostic.WrongCommand(missingParameter, fmt.Sprintf("%s?tname=table_name&nid=network_id&key=k&value=v", r.URL.Path)) + log.Error("create entry failed, wrong input") + diagnostic.HTTPReply(w, rsp, json) + return + } + + tname := r.Form["tname"][0] + nid := r.Form["nid"][0] + key := r.Form["key"][0] + value := r.Form["value"][0] + decodedValue := []byte(value) + if !unsafe { + var err error + decodedValue, err = base64.StdEncoding.DecodeString(value) + if err != nil { + log.WithError(err).Error("create entry failed") + diagnostic.HTTPReply(w, diagnostic.FailCommand(err), json) + return + } + } + + nDB, ok := ctx.(*NetworkDB) + if ok { + if err := nDB.CreateEntry(tname, nid, key, decodedValue); err != nil { + rsp := diagnostic.FailCommand(err) + diagnostic.HTTPReply(w, rsp, json) + log.WithError(err).Error("create entry failed") + return + } + log.Info("create entry done") + diagnostic.HTTPReply(w, diagnostic.CommandSucceed(nil), json) + return + } + diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json) +} + +func dbUpdateEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) { + r.ParseForm() + diagnostic.DebugHTTPForm(r) + unsafe, json := diagnostic.ParseHTTPFormOptions(r) + + // audit logs + log := logrus.WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()}) + log.Info("update entry") + + if len(r.Form["tname"]) < 1 || + len(r.Form["nid"]) < 1 || + len(r.Form["key"]) < 1 || + len(r.Form["value"]) < 1 { + rsp := diagnostic.WrongCommand(missingParameter, fmt.Sprintf("%s?tname=table_name&nid=network_id&key=k&value=v", r.URL.Path)) + log.Error("update entry failed, wrong input") + diagnostic.HTTPReply(w, rsp, json) + return + } + + tname := r.Form["tname"][0] + nid := r.Form["nid"][0] + key := r.Form["key"][0] + value := r.Form["value"][0] + decodedValue := []byte(value) + if !unsafe { + var err error + decodedValue, err = base64.StdEncoding.DecodeString(value) + if err != nil { + log.WithError(err).Error("update entry failed") + diagnostic.HTTPReply(w, diagnostic.FailCommand(err), json) + return + } + } + + nDB, ok := ctx.(*NetworkDB) + if ok { + if err := nDB.UpdateEntry(tname, nid, key, decodedValue); err != nil { + log.WithError(err).Error("update entry failed") + diagnostic.HTTPReply(w, diagnostic.FailCommand(err), json) + return + } + log.Info("update entry done") + diagnostic.HTTPReply(w, diagnostic.CommandSucceed(nil), json) + return + } + diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json) +} + +func dbDeleteEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) { + r.ParseForm() + diagnostic.DebugHTTPForm(r) + _, json := diagnostic.ParseHTTPFormOptions(r) + + // audit logs + log := logrus.WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()}) + log.Info("delete entry") + + if len(r.Form["tname"]) < 1 || + len(r.Form["nid"]) < 1 || + len(r.Form["key"]) < 1 { + rsp := diagnostic.WrongCommand(missingParameter, fmt.Sprintf("%s?tname=table_name&nid=network_id&key=k", r.URL.Path)) + log.Error("delete entry failed, wrong input") + diagnostic.HTTPReply(w, rsp, json) + return + } + + tname := r.Form["tname"][0] + nid := r.Form["nid"][0] + key := r.Form["key"][0] + + nDB, ok := ctx.(*NetworkDB) + if ok { + err := nDB.DeleteEntry(tname, nid, key) + if err != nil { + log.WithError(err).Error("delete entry failed") + diagnostic.HTTPReply(w, diagnostic.FailCommand(err), json) + return + } + log.Info("delete entry done") + diagnostic.HTTPReply(w, diagnostic.CommandSucceed(nil), json) + return + } + diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json) +} + +func dbGetEntry(ctx interface{}, w http.ResponseWriter, r *http.Request) { + r.ParseForm() + diagnostic.DebugHTTPForm(r) + unsafe, json := diagnostic.ParseHTTPFormOptions(r) + + // audit logs + log := logrus.WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()}) + log.Info("get entry") + + if len(r.Form["tname"]) < 1 || + len(r.Form["nid"]) < 1 || + len(r.Form["key"]) < 1 { + rsp := diagnostic.WrongCommand(missingParameter, fmt.Sprintf("%s?tname=table_name&nid=network_id&key=k", r.URL.Path)) + log.Error("get entry failed, wrong input") + diagnostic.HTTPReply(w, rsp, json) + return + } + + tname := r.Form["tname"][0] + nid := r.Form["nid"][0] + key := r.Form["key"][0] + + nDB, ok := ctx.(*NetworkDB) + if ok { + value, err := nDB.GetEntry(tname, nid, key) + if err != nil { + log.WithError(err).Error("get entry failed") + diagnostic.HTTPReply(w, diagnostic.FailCommand(err), json) + return + } + + var encodedValue string + if unsafe { + encodedValue = string(value) + } else { + encodedValue = base64.StdEncoding.EncodeToString(value) + } + + rsp := &diagnostic.TableEntryObj{Key: key, Value: encodedValue} + log.WithField("response", fmt.Sprintf("%+v", rsp)).Info("get entry done") + diagnostic.HTTPReply(w, diagnostic.CommandSucceed(rsp), json) + return + } + diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json) +} + +func dbJoinNetwork(ctx interface{}, w http.ResponseWriter, r *http.Request) { + r.ParseForm() + diagnostic.DebugHTTPForm(r) + _, json := diagnostic.ParseHTTPFormOptions(r) + + // audit logs + log := logrus.WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()}) + log.Info("join network") + + if len(r.Form["nid"]) < 1 { + rsp := diagnostic.WrongCommand(missingParameter, fmt.Sprintf("%s?nid=network_id", r.URL.Path)) + log.Error("join network failed, wrong input") + diagnostic.HTTPReply(w, rsp, json) + return + } + + nid := r.Form["nid"][0] + + nDB, ok := ctx.(*NetworkDB) + if ok { + if err := nDB.JoinNetwork(nid); err != nil { + log.WithError(err).Error("join network failed") + diagnostic.HTTPReply(w, diagnostic.FailCommand(err), json) + return + } + log.Info("join network done") + diagnostic.HTTPReply(w, diagnostic.CommandSucceed(nil), json) + return + } + diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json) +} + +func dbLeaveNetwork(ctx interface{}, w http.ResponseWriter, r *http.Request) { + r.ParseForm() + diagnostic.DebugHTTPForm(r) + _, json := diagnostic.ParseHTTPFormOptions(r) + + // audit logs + log := logrus.WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()}) + log.Info("leave network") + + if len(r.Form["nid"]) < 1 { + rsp := diagnostic.WrongCommand(missingParameter, fmt.Sprintf("%s?nid=network_id", r.URL.Path)) + log.Error("leave network failed, wrong input") + diagnostic.HTTPReply(w, rsp, json) + return + } + + nid := r.Form["nid"][0] + + nDB, ok := ctx.(*NetworkDB) + if ok { + if err := nDB.LeaveNetwork(nid); err != nil { + log.WithError(err).Error("leave network failed") + diagnostic.HTTPReply(w, diagnostic.FailCommand(err), json) + return + } + log.Info("leave network done") + diagnostic.HTTPReply(w, diagnostic.CommandSucceed(nil), json) + return + } + diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json) +} + +func dbGetTable(ctx interface{}, w http.ResponseWriter, r *http.Request) { + r.ParseForm() + diagnostic.DebugHTTPForm(r) + unsafe, json := diagnostic.ParseHTTPFormOptions(r) + + // audit logs + log := logrus.WithFields(logrus.Fields{"component": "diagnostic", "remoteIP": r.RemoteAddr, "method": common.CallerName(0), "url": r.URL.String()}) + log.Info("get table") + + if len(r.Form["tname"]) < 1 || + len(r.Form["nid"]) < 1 { + rsp := diagnostic.WrongCommand(missingParameter, fmt.Sprintf("%s?tname=table_name&nid=network_id", r.URL.Path)) + log.Error("get table failed, wrong input") + diagnostic.HTTPReply(w, rsp, json) + return + } + + tname := r.Form["tname"][0] + nid := r.Form["nid"][0] + + nDB, ok := ctx.(*NetworkDB) + if ok { + table := nDB.GetTableByNetwork(tname, nid) + rsp := &diagnostic.TableObj{Length: len(table)} + var i = 0 + for k, v := range table { + var encodedValue string + if unsafe { + encodedValue = string(v.Value) + } else { + encodedValue = base64.StdEncoding.EncodeToString(v.Value) + } + rsp.Elements = append(rsp.Elements, + &diagnostic.TableEntryObj{ + Index: i, + Key: k, + Value: encodedValue, + Owner: v.owner, + }) + i++ + } + log.WithField("response", fmt.Sprintf("%+v", rsp)).Info("get table done") + diagnostic.HTTPReply(w, diagnostic.CommandSucceed(rsp), json) + return + } + diagnostic.HTTPReply(w, diagnostic.FailCommand(fmt.Errorf("%s", dbNotAvailable)), json) +} diff --git a/vendor/github.com/docker/libnetwork/networkdb/nodemgmt.go b/vendor/github.com/docker/libnetwork/networkdb/nodemgmt.go new file mode 100644 index 0000000000..f5a7498522 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/networkdb/nodemgmt.go @@ -0,0 +1,120 @@ +package networkdb + +import ( + "fmt" + + "github.com/hashicorp/memberlist" + "github.com/sirupsen/logrus" +) + +type nodeState int + +const ( + nodeNotFound nodeState = -1 + nodeActiveState nodeState = 0 + nodeLeftState nodeState = 1 + nodeFailedState nodeState = 2 +) + +var nodeStateName = map[nodeState]string{ + -1: "NodeNotFound", + 0: "NodeActive", + 1: "NodeLeft", + 2: "NodeFailed", +} + +// findNode search the node into the 3 node lists and returns the node pointer and the list +// where it got found +func (nDB *NetworkDB) findNode(nodeName string) (*node, nodeState, map[string]*node) { + for i, nodes := range []map[string]*node{ + nDB.nodes, + nDB.leftNodes, + nDB.failedNodes, + } { + if n, ok := nodes[nodeName]; ok { + return n, nodeState(i), nodes + } + } + return nil, nodeNotFound, nil +} + +// changeNodeState changes the state of the node specified, returns true if the node was moved, +// false if there was no need to change the node state. Error will be returned if the node does not +// exists +func (nDB *NetworkDB) changeNodeState(nodeName string, newState nodeState) (bool, error) { + n, currState, m := nDB.findNode(nodeName) + if n == nil { + return false, fmt.Errorf("node %s not found", nodeName) + } + + switch newState { + case nodeActiveState: + if currState == nodeActiveState { + return false, nil + } + + delete(m, nodeName) + // reset the node reap time + n.reapTime = 0 + nDB.nodes[nodeName] = n + case nodeLeftState: + if currState == nodeLeftState { + return false, nil + } + + delete(m, nodeName) + nDB.leftNodes[nodeName] = n + case nodeFailedState: + if currState == nodeFailedState { + return false, nil + } + + delete(m, nodeName) + nDB.failedNodes[nodeName] = n + } + + logrus.Infof("Node %s change state %s --> %s", nodeName, nodeStateName[currState], nodeStateName[newState]) + + if newState == nodeLeftState || newState == nodeFailedState { + // set the node reap time, if not already set + // It is possible that a node passes from failed to left and the reaptime was already set so keep that value + if n.reapTime == 0 { + n.reapTime = nodeReapInterval + } + // The node leave or fails, delete all the entries created by it. + // If the node was temporary down, deleting the entries will guarantee that the CREATE events will be accepted + // If the node instead left because was going down, then it makes sense to just delete all its state + nDB.deleteNodeFromNetworks(n.Name) + nDB.deleteNodeTableEntries(n.Name) + } + + return true, nil +} + +func (nDB *NetworkDB) purgeReincarnation(mn *memberlist.Node) bool { + for name, node := range nDB.nodes { + if node.Addr.Equal(mn.Addr) && node.Port == mn.Port && mn.Name != name { + logrus.Infof("Node %s/%s, is the new incarnation of the active node %s/%s", mn.Name, mn.Addr, name, node.Addr) + nDB.changeNodeState(name, nodeLeftState) + return true + } + } + + for name, node := range nDB.failedNodes { + if node.Addr.Equal(mn.Addr) && node.Port == mn.Port && mn.Name != name { + logrus.Infof("Node %s/%s, is the new incarnation of the failed node %s/%s", mn.Name, mn.Addr, name, node.Addr) + nDB.changeNodeState(name, nodeLeftState) + return true + } + } + + for name, node := range nDB.leftNodes { + if node.Addr.Equal(mn.Addr) && node.Port == mn.Port && mn.Name != name { + logrus.Infof("Node %s/%s, is the new incarnation of the shutdown node %s/%s", mn.Name, mn.Addr, name, node.Addr) + nDB.changeNodeState(name, nodeLeftState) + return true + } + } + + return false +} diff --git a/vendor/github.com/docker/libnetwork/networkdb/watch.go b/vendor/github.com/docker/libnetwork/networkdb/watch.go new file mode 100644 index 0000000000..2ef30422a8 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/networkdb/watch.go @@ -0,0 +1,110 @@ +package networkdb + +import ( + "net" + + "github.com/docker/go-events" +) + +type opType uint8 + +const ( + opCreate opType = 1 + iota + opUpdate + opDelete +) + +type event struct { + Table string + NetworkID string + Key string + Value []byte +} + +// NodeTable represents table event for node join and leave +const NodeTable = "NodeTable" + +// NodeAddr represents the value carried for node event in NodeTable +type NodeAddr struct { + Addr net.IP +} + +// CreateEvent generates a table entry create event to the watchers +type CreateEvent event + +// UpdateEvent generates a table entry update event to the watchers +type UpdateEvent event + +// DeleteEvent generates a table entry delete event to the watchers +type DeleteEvent event + +// Watch creates a watcher with filters for a particular table or +// network or key or any combination of the tuple. If any of the +// filter is an empty string it acts as a wildcard for that +// field. Watch returns a channel of events, where the events will be +// sent. +func (nDB *NetworkDB) Watch(tname, nid, key string) (*events.Channel, func()) { + var matcher events.Matcher + + if tname != "" || nid != "" || key != "" { + matcher = events.MatcherFunc(func(ev events.Event) bool { + var evt event + switch ev := ev.(type) { + case CreateEvent: + evt = event(ev) + case UpdateEvent: + evt = event(ev) + case DeleteEvent: + evt = event(ev) + } + + if tname != "" && evt.Table != tname { + return false + } + + if nid != "" && evt.NetworkID != nid { + return false + } + + if key != "" && evt.Key != key { + return false + } + + return true + }) + } + + ch := events.NewChannel(0) + sink := events.Sink(events.NewQueue(ch)) + + if matcher != nil { + sink = events.NewFilter(sink, matcher) + } + + nDB.broadcaster.Add(sink) + return ch, func() { + nDB.broadcaster.Remove(sink) + ch.Close() + sink.Close() + } +} + +func makeEvent(op opType, tname, nid, key string, value []byte) events.Event { + ev := event{ + Table: tname, + NetworkID: nid, + Key: key, + Value: value, + } + + switch op { + case opCreate: + return CreateEvent(ev) + case opUpdate: + return UpdateEvent(ev) + case opDelete: + return DeleteEvent(ev) + } + + return nil +} diff --git a/vendor/github.com/docker/libnetwork/ns/init_linux.go b/vendor/github.com/docker/libnetwork/ns/init_linux.go new file mode 100644 index 0000000000..567a6242ac --- /dev/null +++ b/vendor/github.com/docker/libnetwork/ns/init_linux.go @@ -0,0 +1,140 @@ +package ns + +import ( + "fmt" + "os" + "os/exec" + "strings" + "sync" + "syscall" + "time" + + "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" + "github.com/vishvananda/netns" +) + +var ( + initNs netns.NsHandle + initNl *netlink.Handle + initOnce sync.Once + // NetlinkSocketsTimeout represents the default timeout duration for the sockets + NetlinkSocketsTimeout = 3 * time.Second +) + +// Init initializes a new network namespace +func Init() { + var err error + initNs, err = netns.Get() + if err != nil { + logrus.Errorf("could not get initial namespace: %v", err) + } + initNl, err = netlink.NewHandle(getSupportedNlFamilies()...) + if err != nil { + logrus.Errorf("could not create netlink handle on initial namespace: %v", err) + } + err = initNl.SetSocketTimeout(NetlinkSocketsTimeout) + if err != nil { + logrus.Warnf("Failed to set the timeout on the default netlink handle sockets: %v", err) + } +} + +// SetNamespace sets the initial namespace handler +func SetNamespace() error { + initOnce.Do(Init) + if err := netns.Set(initNs); err != nil { + linkInfo, linkErr := getLink() + if linkErr != nil { + linkInfo = linkErr.Error() + } + return fmt.Errorf("failed to set to initial namespace, %v, initns fd %d: %v", linkInfo, initNs, err) + } + return nil +} + +// ParseHandlerInt transforms the namespace handler into an integer +func ParseHandlerInt() int { + return int(getHandler()) +} + +// GetHandler returns the namespace handler +func getHandler() netns.NsHandle { + initOnce.Do(Init) + return initNs +} + +func getLink() (string, error) { + return os.Readlink(fmt.Sprintf("/proc/%d/task/%d/ns/net", os.Getpid(), syscall.Gettid())) +} + +// NlHandle returns the netlink handler +func NlHandle() *netlink.Handle { + initOnce.Do(Init) + return initNl +} + +func getSupportedNlFamilies() []int { + fams := []int{syscall.NETLINK_ROUTE} + // NETLINK_XFRM test + if err := loadXfrmModules(); err != nil { + if checkXfrmSocket() != nil { + logrus.Warnf("Could not load necessary modules for IPSEC rules: %v", err) + } else { + fams = append(fams, syscall.NETLINK_XFRM) + } + } else { + fams = append(fams, syscall.NETLINK_XFRM) + } + // NETLINK_NETFILTER test + if err := loadNfConntrackModules(); err != nil { + if checkNfSocket() != nil { + logrus.Warnf("Could not load necessary modules for Conntrack: %v", err) + } else { + fams = append(fams, syscall.NETLINK_NETFILTER) + } + } else { + fams = append(fams, syscall.NETLINK_NETFILTER) + } + + return fams +} + +func loadXfrmModules() error { + if out, err := exec.Command("modprobe", "-va", "xfrm_user").CombinedOutput(); err != nil { + return fmt.Errorf("Running modprobe xfrm_user failed with message: `%s`, error: %v", strings.TrimSpace(string(out)), err) + } + if out, err := exec.Command("modprobe", "-va", "xfrm_algo").CombinedOutput(); err != nil { + return fmt.Errorf("Running modprobe xfrm_algo failed with message: `%s`, error: %v", strings.TrimSpace(string(out)), err) + } + return nil +} + +// API check on required xfrm modules (xfrm_user, xfrm_algo) +func checkXfrmSocket() error { + fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_RAW, syscall.NETLINK_XFRM) + if err != nil { + return err + } + syscall.Close(fd) + return nil +} + +func loadNfConntrackModules() error { + if out, err := exec.Command("modprobe", "-va", "nf_conntrack").CombinedOutput(); err != nil { + return fmt.Errorf("Running modprobe nf_conntrack failed with message: `%s`, error: %v", strings.TrimSpace(string(out)), err) + } + if out, err := exec.Command("modprobe", "-va", "nf_conntrack_netlink").CombinedOutput(); err != nil { + return fmt.Errorf("Running modprobe nf_conntrack_netlink failed with message: `%s`, error: %v", strings.TrimSpace(string(out)), err) + } + return nil +} + +// API check on required nf_conntrack* modules (nf_conntrack, nf_conntrack_netlink) +func checkNfSocket() error { + fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_RAW, syscall.NETLINK_NETFILTER) + if err != nil { + return err + } + syscall.Close(fd) + return nil +} diff --git a/vendor/github.com/docker/libnetwork/options/options.go b/vendor/github.com/docker/libnetwork/options/options.go new file mode 100644 index 0000000000..06d8ae5902 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/options/options.go @@ -0,0 +1,88 @@ +// Package options provides a way to pass unstructured sets of options to a +// component expecting a strongly-typed configuration structure. +package options + +import ( + "fmt" + "reflect" +) + +// NoSuchFieldError is the error returned when the generic parameters hold a +// value for a field absent from the destination structure. +type NoSuchFieldError struct { + Field string + Type string +} + +func (e NoSuchFieldError) Error() string { + return fmt.Sprintf("no field %q in type %q", e.Field, e.Type) +} + +// CannotSetFieldError is the error returned when the generic parameters hold a +// value for a field that cannot be set in the destination structure. +type CannotSetFieldError struct { + Field string + Type string +} + +func (e CannotSetFieldError) Error() string { + return fmt.Sprintf("cannot set field %q of type %q", e.Field, e.Type) +} + +// TypeMismatchError is the error returned when the type of the generic value +// for a field mismatches the type of the destination structure. +type TypeMismatchError struct { + Field string + ExpectType string + ActualType string +} + +func (e TypeMismatchError) Error() string { + return fmt.Sprintf("type mismatch, field %s require type %v, actual type %v", e.Field, e.ExpectType, e.ActualType) +} + +// Generic is a basic type to store arbitrary settings. +type Generic map[string]interface{} + +// NewGeneric returns a new Generic instance. +func NewGeneric() Generic { + return make(Generic) +} + +// GenerateFromModel takes the generic options, and tries to build a new +// instance of the model's type by matching keys from the generic options to +// fields in the model. +// +// The return value is of the same type than the model (including a potential +// pointer qualifier). +func GenerateFromModel(options Generic, model interface{}) (interface{}, error) { + modType := reflect.TypeOf(model) + + // If the model is of pointer type, we need to dereference for New. + resType := reflect.TypeOf(model) + if modType.Kind() == reflect.Ptr { + resType = resType.Elem() + } + + // Populate the result structure with the generic layout content. + res := reflect.New(resType) + for name, value := range options { + field := res.Elem().FieldByName(name) + if !field.IsValid() { + return nil, NoSuchFieldError{name, resType.String()} + } + if !field.CanSet() { + return nil, CannotSetFieldError{name, resType.String()} + } + if reflect.TypeOf(value) != field.Type() { + return nil, TypeMismatchError{name, field.Type().String(), reflect.TypeOf(value).String()} + } + field.Set(reflect.ValueOf(value)) + } + + // If the model is not of pointer type, return content of the result. + if modType.Kind() == reflect.Ptr { + return res.Interface(), nil + } + return res.Elem().Interface(), nil +} diff --git a/vendor/github.com/docker/libnetwork/osl/interface_freebsd.go b/vendor/github.com/docker/libnetwork/osl/interface_freebsd.go new file mode 100644 index 0000000000..9c0141fd9b --- /dev/null +++ b/vendor/github.com/docker/libnetwork/osl/interface_freebsd.go @@ -0,0 +1,4 @@ +package osl + +// IfaceOption is a function option type to set interface options +type IfaceOption func() diff --git a/vendor/github.com/docker/libnetwork/osl/interface_linux.go b/vendor/github.com/docker/libnetwork/osl/interface_linux.go new file mode 100644 index 0000000000..0ecda09f6e --- /dev/null +++ b/vendor/github.com/docker/libnetwork/osl/interface_linux.go @@ -0,0 +1,450 @@ +package osl + +import ( + "fmt" + "net" + "regexp" + "sync" + "syscall" + "time" + + "github.com/docker/libnetwork/ns" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" + "github.com/vishvananda/netns" +) + +// IfaceOption is a function option type to set interface options +type IfaceOption func(i *nwIface) + +type nwIface struct { + srcName string + dstName string + master string + dstMaster string + mac net.HardwareAddr + address *net.IPNet + addressIPv6 *net.IPNet + llAddrs []*net.IPNet + routes []*net.IPNet + bridge bool + ns *networkNamespace + sync.Mutex +} + +func (i *nwIface) SrcName() string { + i.Lock() + defer i.Unlock() + + return i.srcName +} + +func (i *nwIface) DstName() string { + i.Lock() + defer i.Unlock() + + return i.dstName +} + +func (i *nwIface) DstMaster() string { + i.Lock() + defer i.Unlock() + + return i.dstMaster +} + +func (i *nwIface) Bridge() bool { + i.Lock() + defer i.Unlock() + + return i.bridge +} + +func (i *nwIface) Master() string { + i.Lock() + defer i.Unlock() + + return i.master +} + +func (i *nwIface) MacAddress() net.HardwareAddr { + i.Lock() + defer i.Unlock() + + return types.GetMacCopy(i.mac) +} + +func (i *nwIface) Address() *net.IPNet { + i.Lock() + defer i.Unlock() + + return types.GetIPNetCopy(i.address) +} + +func (i *nwIface) AddressIPv6() *net.IPNet { + i.Lock() + defer i.Unlock() + + return types.GetIPNetCopy(i.addressIPv6) +} + +func (i *nwIface) LinkLocalAddresses() []*net.IPNet { + i.Lock() + defer i.Unlock() + + return i.llAddrs +} + +func (i *nwIface) Routes() []*net.IPNet { + i.Lock() + defer i.Unlock() + + routes := make([]*net.IPNet, len(i.routes)) + for index, route := range i.routes { + r := types.GetIPNetCopy(route) + routes[index] = r + } + + return routes +} + +func (n *networkNamespace) Interfaces() []Interface { + n.Lock() + defer n.Unlock() + + ifaces := make([]Interface, len(n.iFaces)) + + for i, iface := range n.iFaces { + ifaces[i] = iface + } + + return ifaces +} + +func (i *nwIface) Remove() error { + i.Lock() + n := i.ns + i.Unlock() + + n.Lock() + isDefault := n.isDefault + nlh := n.nlHandle + n.Unlock() + + // Find the network interface identified by the DstName attribute. + iface, err := nlh.LinkByName(i.DstName()) + if err != nil { + return err + } + + // Down the interface before configuring + if err := nlh.LinkSetDown(iface); err != nil { + return err + } + + err = nlh.LinkSetName(iface, i.SrcName()) + if err != nil { + logrus.Debugf("LinkSetName failed for interface %s: %v", i.SrcName(), err) + return err + } + + // if it is a bridge just delete it. + if i.Bridge() { + if err := nlh.LinkDel(iface); err != nil { + return fmt.Errorf("failed deleting bridge %q: %v", i.SrcName(), err) + } + } else if !isDefault { + // Move the network interface to caller namespace. + if err := nlh.LinkSetNsFd(iface, ns.ParseHandlerInt()); err != nil { + logrus.Debugf("LinkSetNsPid failed for interface %s: %v", i.SrcName(), err) + return err + } + } + + n.Lock() + for index, intf := range n.iFaces { + if intf == i { + n.iFaces = append(n.iFaces[:index], n.iFaces[index+1:]...) + break + } + } + n.Unlock() + + n.checkLoV6() + + return nil +} + +// Returns the sandbox's side veth interface statistics +func (i *nwIface) Statistics() (*types.InterfaceStatistics, error) { + i.Lock() + n := i.ns + i.Unlock() + + l, err := n.nlHandle.LinkByName(i.DstName()) + if err != nil { + return nil, fmt.Errorf("failed to retrieve the statistics for %s in netns %s: %v", i.DstName(), n.path, err) + } + + stats := l.Attrs().Statistics + if stats == nil { + return nil, fmt.Errorf("no statistics were returned") + } + + return &types.InterfaceStatistics{ + RxBytes: uint64(stats.RxBytes), + TxBytes: uint64(stats.TxBytes), + RxPackets: uint64(stats.RxPackets), + TxPackets: uint64(stats.TxPackets), + RxDropped: uint64(stats.RxDropped), + TxDropped: uint64(stats.TxDropped), + }, nil +} + +func (n *networkNamespace) findDst(srcName string, isBridge bool) string { + n.Lock() + defer n.Unlock() + + for _, i := range n.iFaces { + // The master should match the srcname of the interface and the + // master interface should be of type bridge, if searching for a bridge type + if i.SrcName() == srcName && (!isBridge || i.Bridge()) { + return i.DstName() + } + } + + return "" +} + +func (n *networkNamespace) AddInterface(srcName, dstPrefix string, options ...IfaceOption) error { + i := &nwIface{srcName: srcName, dstName: dstPrefix, ns: n} + i.processInterfaceOptions(options...) + + if i.master != "" { + i.dstMaster = n.findDst(i.master, true) + if i.dstMaster == "" { + return fmt.Errorf("could not find an appropriate master %q for %q", + i.master, i.srcName) + } + } + + n.Lock() + if n.isDefault { + i.dstName = i.srcName + } else { + i.dstName = fmt.Sprintf("%s%d", dstPrefix, n.nextIfIndex[dstPrefix]) + n.nextIfIndex[dstPrefix]++ + } + + path := n.path + isDefault := n.isDefault + nlh := n.nlHandle + nlhHost := ns.NlHandle() + n.Unlock() + + // If it is a bridge interface we have to create the bridge inside + // the namespace so don't try to lookup the interface using srcName + if i.bridge { + link := &netlink.Bridge{ + LinkAttrs: netlink.LinkAttrs{ + Name: i.srcName, + }, + } + if err := nlh.LinkAdd(link); err != nil { + return fmt.Errorf("failed to create bridge %q: %v", i.srcName, err) + } + } else { + // Find the network interface identified by the SrcName attribute. + iface, err := nlhHost.LinkByName(i.srcName) + if err != nil { + return fmt.Errorf("failed to get link by name %q: %v", i.srcName, err) + } + + // Move the network interface to the destination + // namespace only if the namespace is not a default + // type + if !isDefault { + newNs, err := netns.GetFromPath(path) + if err != nil { + return fmt.Errorf("failed get network namespace %q: %v", path, err) + } + defer newNs.Close() + if err := nlhHost.LinkSetNsFd(iface, int(newNs)); err != nil { + return fmt.Errorf("failed to set namespace on link %q: %v", i.srcName, err) + } + } + } + + // Find the network interface identified by the SrcName attribute. + iface, err := nlh.LinkByName(i.srcName) + if err != nil { + return fmt.Errorf("failed to get link by name %q: %v", i.srcName, err) + } + + // Down the interface before configuring + if err := nlh.LinkSetDown(iface); err != nil { + return fmt.Errorf("failed to set link down: %v", err) + } + + // Configure the interface now this is moved in the proper namespace. + if err := configureInterface(nlh, iface, i); err != nil { + return err + } + + // Up the interface. + cnt := 0 + for err = nlh.LinkSetUp(iface); err != nil && cnt < 3; cnt++ { + logrus.Debugf("retrying link setup because of: %v", err) + time.Sleep(10 * time.Millisecond) + err = nlh.LinkSetUp(iface) + } + if err != nil { + return fmt.Errorf("failed to set link up: %v", err) + } + + // Set the routes on the interface. This can only be done when the interface is up. + if err := setInterfaceRoutes(nlh, iface, i); err != nil { + return fmt.Errorf("error setting interface %q routes to %q: %v", iface.Attrs().Name, i.Routes(), err) + } + + n.Lock() + n.iFaces = append(n.iFaces, i) + n.Unlock() + + n.checkLoV6() + + return nil +} + +func configureInterface(nlh *netlink.Handle, iface netlink.Link, i *nwIface) error { + ifaceName := iface.Attrs().Name + ifaceConfigurators := []struct { + Fn func(*netlink.Handle, netlink.Link, *nwIface) error + ErrMessage string + }{ + {setInterfaceName, fmt.Sprintf("error renaming interface %q to %q", ifaceName, i.DstName())}, + {setInterfaceMAC, fmt.Sprintf("error setting interface %q MAC to %q", ifaceName, i.MacAddress())}, + {setInterfaceIP, fmt.Sprintf("error setting interface %q IP to %v", ifaceName, i.Address())}, + {setInterfaceIPv6, fmt.Sprintf("error setting interface %q IPv6 to %v", ifaceName, i.AddressIPv6())}, + {setInterfaceMaster, fmt.Sprintf("error setting interface %q master to %q", ifaceName, i.DstMaster())}, + {setInterfaceLinkLocalIPs, fmt.Sprintf("error setting interface %q link local IPs to %v", ifaceName, i.LinkLocalAddresses())}, + } + + for _, config := range ifaceConfigurators { + if err := config.Fn(nlh, iface, i); err != nil { + return fmt.Errorf("%s: %v", config.ErrMessage, err) + } + } + return nil +} + +func setInterfaceMaster(nlh *netlink.Handle, iface netlink.Link, i *nwIface) error { + if i.DstMaster() == "" { + return nil + } + + return nlh.LinkSetMaster(iface, &netlink.Bridge{ + LinkAttrs: netlink.LinkAttrs{Name: i.DstMaster()}}) +} + +func setInterfaceMAC(nlh *netlink.Handle, iface netlink.Link, i *nwIface) error { + if i.MacAddress() == nil { + return nil + } + return nlh.LinkSetHardwareAddr(iface, i.MacAddress()) +} + +func setInterfaceIP(nlh *netlink.Handle, iface netlink.Link, i *nwIface) error { + if i.Address() == nil { + return nil + } + if err := checkRouteConflict(nlh, i.Address(), netlink.FAMILY_V4); err != nil { + return err + } + ipAddr := &netlink.Addr{IPNet: i.Address(), Label: ""} + return nlh.AddrAdd(iface, ipAddr) +} + +func setInterfaceIPv6(nlh *netlink.Handle, iface netlink.Link, i *nwIface) error { + if i.AddressIPv6() == nil { + return nil + } + if err := checkRouteConflict(nlh, i.AddressIPv6(), netlink.FAMILY_V6); err != nil { + return err + } + if err := setIPv6(i.ns.path, i.DstName(), true); err != nil { + return fmt.Errorf("failed to enable ipv6: %v", err) + } + ipAddr := &netlink.Addr{IPNet: i.AddressIPv6(), Label: "", Flags: syscall.IFA_F_NODAD} + return nlh.AddrAdd(iface, ipAddr) +} + +func setInterfaceLinkLocalIPs(nlh *netlink.Handle, iface netlink.Link, i *nwIface) error { + for _, llIP := range i.LinkLocalAddresses() { + ipAddr := &netlink.Addr{IPNet: llIP} + if err := nlh.AddrAdd(iface, ipAddr); err != nil { + return err + } + } + return nil +} + +func setInterfaceName(nlh *netlink.Handle, iface netlink.Link, i *nwIface) error { + return nlh.LinkSetName(iface, i.DstName()) +} + +func setInterfaceRoutes(nlh *netlink.Handle, iface netlink.Link, i *nwIface) error { + for _, route := range i.Routes() { + err := nlh.RouteAdd(&netlink.Route{ + Scope: netlink.SCOPE_LINK, + LinkIndex: iface.Attrs().Index, + Dst: route, + }) + if err != nil { + return err + } + } + return nil +} + +// In older kernels (like the one in Centos 6.6 distro) sysctl does not have netns support. Therefore +// we cannot gather the statistics from /sys/class/net//statistics/ files. Per-netns stats +// are naturally found in /proc/net/dev in kernels which support netns (ifconfig relies on that). +const ( + netStatsFile = "/proc/net/dev" + base = "[ ]*%s:([ ]+[0-9]+){16}" +) + +func scanInterfaceStats(data, ifName string, i *types.InterfaceStatistics) error { + var ( + bktStr string + bkt uint64 + ) + + regex := fmt.Sprintf(base, ifName) + re := regexp.MustCompile(regex) + line := re.FindString(data) + + _, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d", + &bktStr, &i.RxBytes, &i.RxPackets, &i.RxErrors, &i.RxDropped, &bkt, &bkt, &bkt, + &bkt, &i.TxBytes, &i.TxPackets, &i.TxErrors, &i.TxDropped, &bkt, &bkt, &bkt, &bkt) + + return err +} + +func checkRouteConflict(nlh *netlink.Handle, address *net.IPNet, family int) error { + routes, err := nlh.RouteList(nil, family) + if err != nil { + return err + } + for _, route := range routes { + if route.Dst != nil { + if route.Dst.Contains(address.IP) || address.Contains(route.Dst.IP) { + return fmt.Errorf("cannot program address %v in sandbox interface because it conflicts with existing route %s", + address, route) + } + } + } + return nil +} diff --git a/vendor/github.com/docker/libnetwork/osl/interface_windows.go b/vendor/github.com/docker/libnetwork/osl/interface_windows.go new file mode 100644 index 0000000000..9c0141fd9b --- /dev/null +++ b/vendor/github.com/docker/libnetwork/osl/interface_windows.go @@ -0,0 +1,4 @@ +package osl + +// IfaceOption is a function option type to set interface options +type IfaceOption func() diff --git a/vendor/github.com/docker/libnetwork/osl/namespace_linux.go b/vendor/github.com/docker/libnetwork/osl/namespace_linux.go new file mode 100644 index 0000000000..6389215e1a --- /dev/null +++ b/vendor/github.com/docker/libnetwork/osl/namespace_linux.go @@ -0,0 +1,628 @@ +package osl + +import ( + "fmt" + "io/ioutil" + "net" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/docker/docker/pkg/reexec" + "github.com/docker/libnetwork/ns" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" + "github.com/vishvananda/netns" +) + +const defaultPrefix = "/var/run/docker" + +func init() { + reexec.Register("set-ipv6", reexecSetIPv6) +} + +var ( + once sync.Once + garbagePathMap = make(map[string]bool) + gpmLock sync.Mutex + gpmWg sync.WaitGroup + gpmCleanupPeriod = 60 * time.Second + gpmChan = make(chan chan struct{}) + prefix = defaultPrefix +) + +// The networkNamespace type is the linux implementation of the Sandbox +// interface. It represents a linux network namespace, and moves an interface +// into it when called on method AddInterface or sets the gateway etc. +type networkNamespace struct { + path string + iFaces []*nwIface + gw net.IP + gwv6 net.IP + staticRoutes []*types.StaticRoute + neighbors []*neigh + nextIfIndex map[string]int + isDefault bool + nlHandle *netlink.Handle + loV6Enabled bool + sync.Mutex +} + +// SetBasePath sets the base url prefix for the ns path +func SetBasePath(path string) { + prefix = path +} + +func init() { + reexec.Register("netns-create", reexecCreateNamespace) +} + +func basePath() string { + return filepath.Join(prefix, "netns") +} + +func createBasePath() { + err := os.MkdirAll(basePath(), 0755) + if err != nil { + panic("Could not create net namespace path directory") + } + + // Start the garbage collection go routine + go removeUnusedPaths() +} + +func removeUnusedPaths() { + gpmLock.Lock() + period := gpmCleanupPeriod + gpmLock.Unlock() + + ticker := time.NewTicker(period) + for { + var ( + gc chan struct{} + gcOk bool + ) + + select { + case <-ticker.C: + case gc, gcOk = <-gpmChan: + } + + gpmLock.Lock() + pathList := make([]string, 0, len(garbagePathMap)) + for path := range garbagePathMap { + pathList = append(pathList, path) + } + garbagePathMap = make(map[string]bool) + gpmWg.Add(1) + gpmLock.Unlock() + + for _, path := range pathList { + os.Remove(path) + } + + gpmWg.Done() + if gcOk { + close(gc) + } + } +} + +func addToGarbagePaths(path string) { + gpmLock.Lock() + garbagePathMap[path] = true + gpmLock.Unlock() +} + +func removeFromGarbagePaths(path string) { + gpmLock.Lock() + delete(garbagePathMap, path) + gpmLock.Unlock() +} + +// GC triggers garbage collection of namespace path right away +// and waits for it. +func GC() { + gpmLock.Lock() + if len(garbagePathMap) == 0 { + // No need for GC if map is empty + gpmLock.Unlock() + return + } + gpmLock.Unlock() + + // if content exists in the garbage paths + // we can trigger GC to run, providing a + // channel to be notified on completion + waitGC := make(chan struct{}) + gpmChan <- waitGC + // wait for GC completion + <-waitGC +} + +// GenerateKey generates a sandbox key based on the passed +// container id. +func GenerateKey(containerID string) string { + maxLen := 12 + // Read sandbox key from host for overlay + if strings.HasPrefix(containerID, "-") { + var ( + index int + indexStr string + tmpkey string + ) + dir, err := ioutil.ReadDir(basePath()) + if err != nil { + return "" + } + + for _, v := range dir { + id := v.Name() + if strings.HasSuffix(id, containerID[:maxLen-1]) { + indexStr = strings.TrimSuffix(id, containerID[:maxLen-1]) + tmpindex, err := strconv.Atoi(indexStr) + if err != nil { + return "" + } + if tmpindex > index { + index = tmpindex + tmpkey = id + } + + } + } + containerID = tmpkey + if containerID == "" { + return "" + } + } + + if len(containerID) < maxLen { + maxLen = len(containerID) + } + + return basePath() + "/" + containerID[:maxLen] +} + +// NewSandbox provides a new sandbox instance created in an os specific way +// provided a key which uniquely identifies the sandbox +func NewSandbox(key string, osCreate, isRestore bool) (Sandbox, error) { + if !isRestore { + err := createNetworkNamespace(key, osCreate) + if err != nil { + return nil, err + } + } else { + once.Do(createBasePath) + } + + n := &networkNamespace{path: key, isDefault: !osCreate, nextIfIndex: make(map[string]int)} + + sboxNs, err := netns.GetFromPath(n.path) + if err != nil { + return nil, fmt.Errorf("failed get network namespace %q: %v", n.path, err) + } + defer sboxNs.Close() + + n.nlHandle, err = netlink.NewHandleAt(sboxNs, syscall.NETLINK_ROUTE) + if err != nil { + return nil, fmt.Errorf("failed to create a netlink handle: %v", err) + } + + err = n.nlHandle.SetSocketTimeout(ns.NetlinkSocketsTimeout) + if err != nil { + logrus.Warnf("Failed to set the timeout on the sandbox netlink handle sockets: %v", err) + } + // In live-restore mode, IPV6 entries are getting cleaned up due to below code + // We should retain IPV6 configrations in live-restore mode when Docker Daemon + // comes back. It should work as it is on other cases + // As starting point, disable IPv6 on all interfaces + if !isRestore && !n.isDefault { + err = setIPv6(n.path, "all", false) + if err != nil { + logrus.Warnf("Failed to disable IPv6 on all interfaces on network namespace %q: %v", n.path, err) + } + } + + if err = n.loopbackUp(); err != nil { + n.nlHandle.Delete() + return nil, err + } + + return n, nil +} + +func (n *networkNamespace) InterfaceOptions() IfaceOptionSetter { + return n +} + +func (n *networkNamespace) NeighborOptions() NeighborOptionSetter { + return n +} + +func mountNetworkNamespace(basePath string, lnPath string) error { + return syscall.Mount(basePath, lnPath, "bind", syscall.MS_BIND, "") +} + +// GetSandboxForExternalKey returns sandbox object for the supplied path +func GetSandboxForExternalKey(basePath string, key string) (Sandbox, error) { + if err := createNamespaceFile(key); err != nil { + return nil, err + } + + if err := mountNetworkNamespace(basePath, key); err != nil { + return nil, err + } + n := &networkNamespace{path: key, nextIfIndex: make(map[string]int)} + + sboxNs, err := netns.GetFromPath(n.path) + if err != nil { + return nil, fmt.Errorf("failed get network namespace %q: %v", n.path, err) + } + defer sboxNs.Close() + + n.nlHandle, err = netlink.NewHandleAt(sboxNs, syscall.NETLINK_ROUTE) + if err != nil { + return nil, fmt.Errorf("failed to create a netlink handle: %v", err) + } + + err = n.nlHandle.SetSocketTimeout(ns.NetlinkSocketsTimeout) + if err != nil { + logrus.Warnf("Failed to set the timeout on the sandbox netlink handle sockets: %v", err) + } + + // As starting point, disable IPv6 on all interfaces + err = setIPv6(n.path, "all", false) + if err != nil { + logrus.Warnf("Failed to disable IPv6 on all interfaces on network namespace %q: %v", n.path, err) + } + + if err = n.loopbackUp(); err != nil { + n.nlHandle.Delete() + return nil, err + } + + return n, nil +} + +func reexecCreateNamespace() { + if len(os.Args) < 2 { + logrus.Fatal("no namespace path provided") + } + if err := mountNetworkNamespace("/proc/self/ns/net", os.Args[1]); err != nil { + logrus.Fatal(err) + } +} + +func createNetworkNamespace(path string, osCreate bool) error { + if err := createNamespaceFile(path); err != nil { + return err + } + + cmd := &exec.Cmd{ + Path: reexec.Self(), + Args: append([]string{"netns-create"}, path), + Stdout: os.Stdout, + Stderr: os.Stderr, + } + if osCreate { + cmd.SysProcAttr = &syscall.SysProcAttr{} + cmd.SysProcAttr.Cloneflags = syscall.CLONE_NEWNET + } + if err := cmd.Run(); err != nil { + return fmt.Errorf("namespace creation reexec command failed: %v", err) + } + + return nil +} + +func unmountNamespaceFile(path string) { + if _, err := os.Stat(path); err == nil { + syscall.Unmount(path, syscall.MNT_DETACH) + } +} + +func createNamespaceFile(path string) (err error) { + var f *os.File + + once.Do(createBasePath) + // Remove it from garbage collection list if present + removeFromGarbagePaths(path) + + // If the path is there unmount it first + unmountNamespaceFile(path) + + // wait for garbage collection to complete if it is in progress + // before trying to create the file. + gpmWg.Wait() + + if f, err = os.Create(path); err == nil { + f.Close() + } + + return err +} + +func (n *networkNamespace) loopbackUp() error { + iface, err := n.nlHandle.LinkByName("lo") + if err != nil { + return err + } + return n.nlHandle.LinkSetUp(iface) +} + +func (n *networkNamespace) AddLoopbackAliasIP(ip *net.IPNet) error { + iface, err := n.nlHandle.LinkByName("lo") + if err != nil { + return err + } + return n.nlHandle.AddrAdd(iface, &netlink.Addr{IPNet: ip}) +} + +func (n *networkNamespace) RemoveLoopbackAliasIP(ip *net.IPNet) error { + iface, err := n.nlHandle.LinkByName("lo") + if err != nil { + return err + } + return n.nlHandle.AddrDel(iface, &netlink.Addr{IPNet: ip}) +} + +func (n *networkNamespace) InvokeFunc(f func()) error { + return nsInvoke(n.nsPath(), func(nsFD int) error { return nil }, func(callerFD int) error { + f() + return nil + }) +} + +// InitOSContext initializes OS context while configuring network resources +func InitOSContext() func() { + runtime.LockOSThread() + if err := ns.SetNamespace(); err != nil { + logrus.Error(err) + } + return runtime.UnlockOSThread +} + +func nsInvoke(path string, prefunc func(nsFD int) error, postfunc func(callerFD int) error) error { + defer InitOSContext()() + + newNs, err := netns.GetFromPath(path) + if err != nil { + return fmt.Errorf("failed get network namespace %q: %v", path, err) + } + defer newNs.Close() + + // Invoked before the namespace switch happens but after the namespace file + // handle is obtained. + if err := prefunc(int(newNs)); err != nil { + return fmt.Errorf("failed in prefunc: %v", err) + } + + if err = netns.Set(newNs); err != nil { + return err + } + defer ns.SetNamespace() + + // Invoked after the namespace switch. + return postfunc(ns.ParseHandlerInt()) +} + +func (n *networkNamespace) nsPath() string { + n.Lock() + defer n.Unlock() + + return n.path +} + +func (n *networkNamespace) Info() Info { + return n +} + +func (n *networkNamespace) Key() string { + return n.path +} + +func (n *networkNamespace) Destroy() error { + if n.nlHandle != nil { + n.nlHandle.Delete() + } + // Assuming no running process is executing in this network namespace, + // unmounting is sufficient to destroy it. + if err := syscall.Unmount(n.path, syscall.MNT_DETACH); err != nil { + return err + } + + // Stash it into the garbage collection list + addToGarbagePaths(n.path) + return nil +} + +// Restore restore the network namespace +func (n *networkNamespace) Restore(ifsopt map[string][]IfaceOption, routes []*types.StaticRoute, gw net.IP, gw6 net.IP) error { + // restore interfaces + for name, opts := range ifsopt { + if !strings.Contains(name, "+") { + return fmt.Errorf("wrong iface name in restore osl sandbox interface: %s", name) + } + seps := strings.Split(name, "+") + srcName := seps[0] + dstPrefix := seps[1] + i := &nwIface{srcName: srcName, dstName: dstPrefix, ns: n} + i.processInterfaceOptions(opts...) + if i.master != "" { + i.dstMaster = n.findDst(i.master, true) + if i.dstMaster == "" { + return fmt.Errorf("could not find an appropriate master %q for %q", + i.master, i.srcName) + } + } + if n.isDefault { + i.dstName = i.srcName + } else { + links, err := n.nlHandle.LinkList() + if err != nil { + return fmt.Errorf("failed to retrieve list of links in network namespace %q during restore", n.path) + } + // due to the docker network connect/disconnect, so the dstName should + // restore from the namespace + for _, link := range links { + addrs, err := n.nlHandle.AddrList(link, netlink.FAMILY_V4) + if err != nil { + return err + } + ifaceName := link.Attrs().Name + if strings.HasPrefix(ifaceName, "vxlan") { + if i.dstName == "vxlan" { + i.dstName = ifaceName + break + } + } + // find the interface name by ip + if i.address != nil { + for _, addr := range addrs { + if addr.IPNet.String() == i.address.String() { + i.dstName = ifaceName + break + } + continue + } + if i.dstName == ifaceName { + break + } + } + // This is to find the interface name of the pair in overlay sandbox + if strings.HasPrefix(ifaceName, "veth") { + if i.master != "" && i.dstName == "veth" { + i.dstName = ifaceName + } + } + } + + var index int + indexStr := strings.TrimPrefix(i.dstName, dstPrefix) + if indexStr != "" { + index, err = strconv.Atoi(indexStr) + if err != nil { + return err + } + } + index++ + n.Lock() + if index > n.nextIfIndex[dstPrefix] { + n.nextIfIndex[dstPrefix] = index + } + n.iFaces = append(n.iFaces, i) + n.Unlock() + } + } + + // restore routes + for _, r := range routes { + n.Lock() + n.staticRoutes = append(n.staticRoutes, r) + n.Unlock() + } + + // restore gateway + if len(gw) > 0 { + n.Lock() + n.gw = gw + n.Unlock() + } + + if len(gw6) > 0 { + n.Lock() + n.gwv6 = gw6 + n.Unlock() + } + + return nil +} + +// Checks whether IPv6 needs to be enabled/disabled on the loopback interface +func (n *networkNamespace) checkLoV6() { + var ( + enable = false + action = "disable" + ) + + n.Lock() + for _, iface := range n.iFaces { + if iface.AddressIPv6() != nil { + enable = true + action = "enable" + break + } + } + n.Unlock() + + if n.loV6Enabled == enable { + return + } + + if err := setIPv6(n.path, "lo", enable); err != nil { + logrus.Warnf("Failed to %s IPv6 on loopback interface on network namespace %q: %v", action, n.path, err) + } + + n.loV6Enabled = enable +} + +func reexecSetIPv6() { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + if len(os.Args) < 3 { + logrus.Errorf("invalid number of arguments for %s", os.Args[0]) + os.Exit(1) + } + + ns, err := netns.GetFromPath(os.Args[1]) + if err != nil { + logrus.Errorf("failed get network namespace %q: %v", os.Args[1], err) + os.Exit(2) + } + defer ns.Close() + + if err = netns.Set(ns); err != nil { + logrus.Errorf("setting into container netns %q failed: %v", os.Args[1], err) + os.Exit(3) + } + + var ( + action = "disable" + value = byte('1') + path = fmt.Sprintf("/proc/sys/net/ipv6/conf/%s/disable_ipv6", os.Args[2]) + ) + + if os.Args[3] == "true" { + action = "enable" + value = byte('0') + } + + if err = ioutil.WriteFile(path, []byte{value, '\n'}, 0644); err != nil { + logrus.Errorf("failed to %s IPv6 forwarding for container's interface %s: %v", action, os.Args[2], err) + os.Exit(4) + } + + os.Exit(0) +} + +func setIPv6(path, iface string, enable bool) error { + cmd := &exec.Cmd{ + Path: reexec.Self(), + Args: append([]string{"set-ipv6"}, path, iface, strconv.FormatBool(enable)), + Stdout: os.Stdout, + Stderr: os.Stderr, + } + if err := cmd.Run(); err != nil { + return fmt.Errorf("reexec to set IPv6 failed: %v", err) + } + return nil +} diff --git a/vendor/github.com/docker/libnetwork/osl/namespace_unsupported.go b/vendor/github.com/docker/libnetwork/osl/namespace_unsupported.go new file mode 100644 index 0000000000..74372e2492 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/osl/namespace_unsupported.go @@ -0,0 +1,17 @@ +// +build !linux,!windows,!freebsd + +package osl + +// GC triggers garbage collection of namespace path right away +// and waits for it. +func GC() { +} + +// GetSandboxForExternalKey returns sandbox object for the supplied path +func GetSandboxForExternalKey(path string, key string) (Sandbox, error) { + return nil, nil +} + +// SetBasePath sets the base url prefix for the ns path +func SetBasePath(path string) { +} diff --git a/vendor/github.com/docker/libnetwork/osl/namespace_windows.go b/vendor/github.com/docker/libnetwork/osl/namespace_windows.go new file mode 100644 index 0000000000..49503c00ff --- /dev/null +++ b/vendor/github.com/docker/libnetwork/osl/namespace_windows.go @@ -0,0 +1,38 @@ +package osl + +import "testing" + +// GenerateKey generates a sandbox key based on the passed +// container id. +func GenerateKey(containerID string) string { + return containerID +} + +// NewSandbox provides a new sandbox instance created in an os specific way +// provided a key which uniquely identifies the sandbox +func NewSandbox(key string, osCreate, isRestore bool) (Sandbox, error) { + return nil, nil +} + +func GetSandboxForExternalKey(path string, key string) (Sandbox, error) { + return nil, nil +} + +// GC triggers garbage collection of namespace path right away +// and waits for it. +func GC() { +} + +// InitOSContext initializes OS context while configuring network resources +func InitOSContext() func() { + return func() {} +} + +// SetupTestOSContext sets up a separate test OS context in which tests will be executed. +func SetupTestOSContext(t *testing.T) func() { + return func() {} +} + +// SetBasePath sets the base url prefix for the ns path +func SetBasePath(path string) { +} diff --git a/vendor/github.com/docker/libnetwork/osl/neigh_freebsd.go b/vendor/github.com/docker/libnetwork/osl/neigh_freebsd.go new file mode 100644 index 0000000000..280f006396 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/osl/neigh_freebsd.go @@ -0,0 +1,4 @@ +package osl + +// NeighOption is a function option type to set neighbor options +type NeighOption func() diff --git a/vendor/github.com/docker/libnetwork/osl/neigh_linux.go b/vendor/github.com/docker/libnetwork/osl/neigh_linux.go new file mode 100644 index 0000000000..6bf1c16dc5 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/osl/neigh_linux.go @@ -0,0 +1,194 @@ +package osl + +import ( + "bytes" + "fmt" + "net" + + "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" +) + +// NeighborSearchError indicates that the neighbor is already present +type NeighborSearchError struct { + ip net.IP + mac net.HardwareAddr + present bool +} + +func (n NeighborSearchError) Error() string { + return fmt.Sprintf("Search neighbor failed for IP %v, mac %v, present in db:%t", n.ip, n.mac, n.present) +} + +// NeighOption is a function option type to set interface options +type NeighOption func(nh *neigh) + +type neigh struct { + dstIP net.IP + dstMac net.HardwareAddr + linkName string + linkDst string + family int +} + +func (n *networkNamespace) findNeighbor(dstIP net.IP, dstMac net.HardwareAddr) *neigh { + n.Lock() + defer n.Unlock() + + for _, nh := range n.neighbors { + if nh.dstIP.Equal(dstIP) && bytes.Equal(nh.dstMac, dstMac) { + return nh + } + } + + return nil +} + +func (n *networkNamespace) DeleteNeighbor(dstIP net.IP, dstMac net.HardwareAddr, osDelete bool) error { + var ( + iface netlink.Link + err error + ) + + nh := n.findNeighbor(dstIP, dstMac) + if nh == nil { + return NeighborSearchError{dstIP, dstMac, false} + } + + if osDelete { + n.Lock() + nlh := n.nlHandle + n.Unlock() + + if nh.linkDst != "" { + iface, err = nlh.LinkByName(nh.linkDst) + if err != nil { + return fmt.Errorf("could not find interface with destination name %s: %v", + nh.linkDst, err) + } + } + + nlnh := &netlink.Neigh{ + IP: dstIP, + State: netlink.NUD_PERMANENT, + Family: nh.family, + } + + if nlnh.Family > 0 { + nlnh.HardwareAddr = dstMac + nlnh.Flags = netlink.NTF_SELF + } + + if nh.linkDst != "" { + nlnh.LinkIndex = iface.Attrs().Index + } + + // If the kernel deletion fails for the neighbor entry still remote it + // from the namespace cache. Otherwise if the neighbor moves back to the + // same host again, kernel update can fail. + if err := nlh.NeighDel(nlnh); err != nil { + logrus.Warnf("Deleting neighbor IP %s, mac %s failed, %v", dstIP, dstMac, err) + } + + // Delete the dynamic entry in the bridge + if nlnh.Family > 0 { + nlnh := &netlink.Neigh{ + IP: dstIP, + Family: nh.family, + } + + nlnh.HardwareAddr = dstMac + nlnh.Flags = netlink.NTF_MASTER + if nh.linkDst != "" { + nlnh.LinkIndex = iface.Attrs().Index + } + nlh.NeighDel(nlnh) + } + } + + n.Lock() + for i, nh := range n.neighbors { + if nh.dstIP.Equal(dstIP) && bytes.Equal(nh.dstMac, dstMac) { + n.neighbors = append(n.neighbors[:i], n.neighbors[i+1:]...) + break + } + } + n.Unlock() + logrus.Debugf("Neighbor entry deleted for IP %v, mac %v osDelete:%t", dstIP, dstMac, osDelete) + + return nil +} + +func (n *networkNamespace) AddNeighbor(dstIP net.IP, dstMac net.HardwareAddr, force bool, options ...NeighOption) error { + var ( + iface netlink.Link + err error + neighborAlreadyPresent bool + ) + + // If the namespace already has the neighbor entry but the AddNeighbor is called + // because of a miss notification (force flag) program the kernel anyway. + nh := n.findNeighbor(dstIP, dstMac) + if nh != nil { + neighborAlreadyPresent = true + logrus.Warnf("Neighbor entry already present for IP %v, mac %v neighbor:%+v forceUpdate:%t", dstIP, dstMac, nh, force) + if !force { + return NeighborSearchError{dstIP, dstMac, true} + } + } + + nh = &neigh{ + dstIP: dstIP, + dstMac: dstMac, + } + + nh.processNeighOptions(options...) + + if nh.linkName != "" { + nh.linkDst = n.findDst(nh.linkName, false) + if nh.linkDst == "" { + return fmt.Errorf("could not find the interface with name %s", nh.linkName) + } + } + + n.Lock() + nlh := n.nlHandle + n.Unlock() + + if nh.linkDst != "" { + iface, err = nlh.LinkByName(nh.linkDst) + if err != nil { + return fmt.Errorf("could not find interface with destination name %s: %v", nh.linkDst, err) + } + } + + nlnh := &netlink.Neigh{ + IP: dstIP, + HardwareAddr: dstMac, + State: netlink.NUD_PERMANENT, + Family: nh.family, + } + + if nlnh.Family > 0 { + nlnh.Flags = netlink.NTF_SELF + } + + if nh.linkDst != "" { + nlnh.LinkIndex = iface.Attrs().Index + } + + if err := nlh.NeighSet(nlnh); err != nil { + return fmt.Errorf("could not add neighbor entry:%+v error:%v", nlnh, err) + } + + if neighborAlreadyPresent { + return nil + } + + n.Lock() + n.neighbors = append(n.neighbors, nh) + n.Unlock() + logrus.Debugf("Neighbor entry added for IP:%v, mac:%v on ifc:%s", dstIP, dstMac, nh.linkName) + + return nil +} diff --git a/vendor/github.com/docker/libnetwork/osl/neigh_windows.go b/vendor/github.com/docker/libnetwork/osl/neigh_windows.go new file mode 100644 index 0000000000..280f006396 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/osl/neigh_windows.go @@ -0,0 +1,4 @@ +package osl + +// NeighOption is a function option type to set neighbor options +type NeighOption func() diff --git a/vendor/github.com/docker/libnetwork/osl/options_linux.go b/vendor/github.com/docker/libnetwork/osl/options_linux.go new file mode 100644 index 0000000000..818669647f --- /dev/null +++ b/vendor/github.com/docker/libnetwork/osl/options_linux.go @@ -0,0 +1,73 @@ +package osl + +import "net" + +func (nh *neigh) processNeighOptions(options ...NeighOption) { + for _, opt := range options { + if opt != nil { + opt(nh) + } + } +} + +func (n *networkNamespace) LinkName(name string) NeighOption { + return func(nh *neigh) { + nh.linkName = name + } +} + +func (n *networkNamespace) Family(family int) NeighOption { + return func(nh *neigh) { + nh.family = family + } +} + +func (i *nwIface) processInterfaceOptions(options ...IfaceOption) { + for _, opt := range options { + if opt != nil { + opt(i) + } + } +} + +func (n *networkNamespace) Bridge(isBridge bool) IfaceOption { + return func(i *nwIface) { + i.bridge = isBridge + } +} + +func (n *networkNamespace) Master(name string) IfaceOption { + return func(i *nwIface) { + i.master = name + } +} + +func (n *networkNamespace) MacAddress(mac net.HardwareAddr) IfaceOption { + return func(i *nwIface) { + i.mac = mac + } +} + +func (n *networkNamespace) Address(addr *net.IPNet) IfaceOption { + return func(i *nwIface) { + i.address = addr + } +} + +func (n *networkNamespace) AddressIPv6(addr *net.IPNet) IfaceOption { + return func(i *nwIface) { + i.addressIPv6 = addr + } +} + +func (n *networkNamespace) LinkLocalAddresses(list []*net.IPNet) IfaceOption { + return func(i *nwIface) { + i.llAddrs = list + } +} + +func (n *networkNamespace) Routes(routes []*net.IPNet) IfaceOption { + return func(i *nwIface) { + i.routes = routes + } +} diff --git a/vendor/github.com/docker/libnetwork/osl/route_linux.go b/vendor/github.com/docker/libnetwork/osl/route_linux.go new file mode 100644 index 0000000000..a9ff191b37 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/osl/route_linux.go @@ -0,0 +1,203 @@ +package osl + +import ( + "fmt" + "net" + + "github.com/docker/libnetwork/types" + "github.com/vishvananda/netlink" +) + +func (n *networkNamespace) Gateway() net.IP { + n.Lock() + defer n.Unlock() + + return n.gw +} + +func (n *networkNamespace) GatewayIPv6() net.IP { + n.Lock() + defer n.Unlock() + + return n.gwv6 +} + +func (n *networkNamespace) StaticRoutes() []*types.StaticRoute { + n.Lock() + defer n.Unlock() + + routes := make([]*types.StaticRoute, len(n.staticRoutes)) + for i, route := range n.staticRoutes { + r := route.GetCopy() + routes[i] = r + } + + return routes +} + +func (n *networkNamespace) setGateway(gw net.IP) { + n.Lock() + n.gw = gw + n.Unlock() +} + +func (n *networkNamespace) setGatewayIPv6(gwv6 net.IP) { + n.Lock() + n.gwv6 = gwv6 + n.Unlock() +} + +func (n *networkNamespace) SetGateway(gw net.IP) error { + // Silently return if the gateway is empty + if len(gw) == 0 { + return nil + } + + err := n.programGateway(gw, true) + if err == nil { + n.setGateway(gw) + } + + return err +} + +func (n *networkNamespace) UnsetGateway() error { + gw := n.Gateway() + + // Silently return if the gateway is empty + if len(gw) == 0 { + return nil + } + + err := n.programGateway(gw, false) + if err == nil { + n.setGateway(net.IP{}) + } + + return err +} + +func (n *networkNamespace) programGateway(gw net.IP, isAdd bool) error { + gwRoutes, err := n.nlHandle.RouteGet(gw) + if err != nil { + return fmt.Errorf("route for the gateway %s could not be found: %v", gw, err) + } + + var linkIndex int + for _, gwRoute := range gwRoutes { + if gwRoute.Gw == nil { + linkIndex = gwRoute.LinkIndex + break + } + } + + if linkIndex == 0 { + return fmt.Errorf("Direct route for the gateway %s could not be found", gw) + } + + if isAdd { + return n.nlHandle.RouteAdd(&netlink.Route{ + Scope: netlink.SCOPE_UNIVERSE, + LinkIndex: linkIndex, + Gw: gw, + }) + } + + return n.nlHandle.RouteDel(&netlink.Route{ + Scope: netlink.SCOPE_UNIVERSE, + LinkIndex: linkIndex, + Gw: gw, + }) +} + +// Program a route in to the namespace routing table. +func (n *networkNamespace) programRoute(path string, dest *net.IPNet, nh net.IP) error { + gwRoutes, err := n.nlHandle.RouteGet(nh) + if err != nil { + return fmt.Errorf("route for the next hop %s could not be found: %v", nh, err) + } + + return n.nlHandle.RouteAdd(&netlink.Route{ + Scope: netlink.SCOPE_UNIVERSE, + LinkIndex: gwRoutes[0].LinkIndex, + Gw: nh, + Dst: dest, + }) +} + +// Delete a route from the namespace routing table. +func (n *networkNamespace) removeRoute(path string, dest *net.IPNet, nh net.IP) error { + gwRoutes, err := n.nlHandle.RouteGet(nh) + if err != nil { + return fmt.Errorf("route for the next hop could not be found: %v", err) + } + + return n.nlHandle.RouteDel(&netlink.Route{ + Scope: netlink.SCOPE_UNIVERSE, + LinkIndex: gwRoutes[0].LinkIndex, + Gw: nh, + Dst: dest, + }) +} + +func (n *networkNamespace) SetGatewayIPv6(gwv6 net.IP) error { + // Silently return if the gateway is empty + if len(gwv6) == 0 { + return nil + } + + err := n.programGateway(gwv6, true) + if err == nil { + n.setGatewayIPv6(gwv6) + } + + return err +} + +func (n *networkNamespace) UnsetGatewayIPv6() error { + gwv6 := n.GatewayIPv6() + + // Silently return if the gateway is empty + if len(gwv6) == 0 { + return nil + } + + err := n.programGateway(gwv6, false) + if err == nil { + n.Lock() + n.gwv6 = net.IP{} + n.Unlock() + } + + return err +} + +func (n *networkNamespace) AddStaticRoute(r *types.StaticRoute) error { + err := n.programRoute(n.nsPath(), r.Destination, r.NextHop) + if err == nil { + n.Lock() + n.staticRoutes = append(n.staticRoutes, r) + n.Unlock() + } + return err +} + +func (n *networkNamespace) RemoveStaticRoute(r *types.StaticRoute) error { + + err := n.removeRoute(n.nsPath(), r.Destination, r.NextHop) + if err == nil { + n.Lock() + lastIndex := len(n.staticRoutes) - 1 + for i, v := range n.staticRoutes { + if v == r { + // Overwrite the route we're removing with the last element + n.staticRoutes[i] = n.staticRoutes[lastIndex] + // Shorten the slice to trim the extra element + n.staticRoutes = n.staticRoutes[:lastIndex] + break + } + } + n.Unlock() + } + return err +} diff --git a/vendor/github.com/docker/libnetwork/osl/sandbox.go b/vendor/github.com/docker/libnetwork/osl/sandbox.go new file mode 100644 index 0000000000..6ffc46775c --- /dev/null +++ b/vendor/github.com/docker/libnetwork/osl/sandbox.go @@ -0,0 +1,171 @@ +// Package osl describes structures and interfaces which abstract os entities +package osl + +import ( + "net" + + "github.com/docker/libnetwork/types" +) + +// Sandbox represents a network sandbox, identified by a specific key. It +// holds a list of Interfaces, routes etc, and more can be added dynamically. +type Sandbox interface { + // The path where the network namespace is mounted. + Key() string + + // Add an existing Interface to this sandbox. The operation will rename + // from the Interface SrcName to DstName as it moves, and reconfigure the + // interface according to the specified settings. The caller is expected + // to only provide a prefix for DstName. The AddInterface api will auto-generate + // an appropriate suffix for the DstName to disambiguate. + AddInterface(SrcName string, DstPrefix string, options ...IfaceOption) error + + // Set default IPv4 gateway for the sandbox + SetGateway(gw net.IP) error + + // Set default IPv6 gateway for the sandbox + SetGatewayIPv6(gw net.IP) error + + // Unset the previously set default IPv4 gateway in the sandbox + UnsetGateway() error + + // Unset the previously set default IPv6 gateway in the sandbox + UnsetGatewayIPv6() error + + // AddLoopbackAliasIP adds the passed IP address to the sandbox loopback interface + AddLoopbackAliasIP(ip *net.IPNet) error + + // RemoveLoopbackAliasIP removes the passed IP address from the sandbox loopback interface + RemoveLoopbackAliasIP(ip *net.IPNet) error + + // Add a static route to the sandbox. + AddStaticRoute(*types.StaticRoute) error + + // Remove a static route from the sandbox. + RemoveStaticRoute(*types.StaticRoute) error + + // AddNeighbor adds a neighbor entry into the sandbox. + AddNeighbor(dstIP net.IP, dstMac net.HardwareAddr, force bool, option ...NeighOption) error + + // DeleteNeighbor deletes neighbor entry from the sandbox. + DeleteNeighbor(dstIP net.IP, dstMac net.HardwareAddr, osDelete bool) error + + // Returns an interface with methods to set neighbor options. + NeighborOptions() NeighborOptionSetter + + // Returns an interface with methods to set interface options. + InterfaceOptions() IfaceOptionSetter + + //Invoke + InvokeFunc(func()) error + + // Returns an interface with methods to get sandbox state. + Info() Info + + // Destroy the sandbox + Destroy() error + + // restore sandbox + Restore(ifsopt map[string][]IfaceOption, routes []*types.StaticRoute, gw net.IP, gw6 net.IP) error +} + +// NeighborOptionSetter interface defines the option setter methods for interface options +type NeighborOptionSetter interface { + // LinkName returns an option setter to set the srcName of the link that should + // be used in the neighbor entry + LinkName(string) NeighOption + + // Family returns an option setter to set the address family for the neighbor + // entry. eg. AF_BRIDGE + Family(int) NeighOption +} + +// IfaceOptionSetter interface defines the option setter methods for interface options. +type IfaceOptionSetter interface { + // Bridge returns an option setter to set if the interface is a bridge. + Bridge(bool) IfaceOption + + // MacAddress returns an option setter to set the MAC address. + MacAddress(net.HardwareAddr) IfaceOption + + // Address returns an option setter to set IPv4 address. + Address(*net.IPNet) IfaceOption + + // Address returns an option setter to set IPv6 address. + AddressIPv6(*net.IPNet) IfaceOption + + // LinkLocalAddresses returns an option setter to set the link-local IP addresses. + LinkLocalAddresses([]*net.IPNet) IfaceOption + + // Master returns an option setter to set the master interface if any for this + // interface. The master interface name should refer to the srcname of a + // previously added interface of type bridge. + Master(string) IfaceOption + + // Address returns an option setter to set interface routes. + Routes([]*net.IPNet) IfaceOption +} + +// Info represents all possible information that +// the driver wants to place in the sandbox which includes +// interfaces, routes and gateway +type Info interface { + // The collection of Interface previously added with the AddInterface + // method. Note that this doesn't include network interfaces added in any + // other way (such as the default loopback interface which is automatically + // created on creation of a sandbox). + Interfaces() []Interface + + // IPv4 gateway for the sandbox. + Gateway() net.IP + + // IPv6 gateway for the sandbox. + GatewayIPv6() net.IP + + // Additional static routes for the sandbox. (Note that directly + // connected routes are stored on the particular interface they refer to.) + StaticRoutes() []*types.StaticRoute + + // TODO: Add ip tables etc. +} + +// Interface represents the settings and identity of a network device. It is +// used as a return type for Network.Link, and it is common practice for the +// caller to use this information when moving interface SrcName from host +// namespace to DstName in a different net namespace with the appropriate +// network settings. +type Interface interface { + // The name of the interface in the origin network namespace. + SrcName() string + + // The name that will be assigned to the interface once moves inside a + // network namespace. When the caller passes in a DstName, it is only + // expected to pass a prefix. The name will modified with an appropriately + // auto-generated suffix. + DstName() string + + // IPv4 address for the interface. + Address() *net.IPNet + + // IPv6 address for the interface. + AddressIPv6() *net.IPNet + + // LinkLocalAddresses returns the link-local IP addresses assigned to the interface. + LinkLocalAddresses() []*net.IPNet + + // IP routes for the interface. + Routes() []*net.IPNet + + // Bridge returns true if the interface is a bridge + Bridge() bool + + // Master returns the srcname of the master interface for this interface. + Master() string + + // Remove an interface from the sandbox by renaming to original name + // and moving it out of the sandbox. + Remove() error + + // Statistics returns the statistics for this interface + Statistics() (*types.InterfaceStatistics, error) +} diff --git a/vendor/github.com/docker/libnetwork/osl/sandbox_freebsd.go b/vendor/github.com/docker/libnetwork/osl/sandbox_freebsd.go new file mode 100644 index 0000000000..e5bc6278ee --- /dev/null +++ b/vendor/github.com/docker/libnetwork/osl/sandbox_freebsd.go @@ -0,0 +1,44 @@ +package osl + +import "testing" + +// GenerateKey generates a sandbox key based on the passed +// container id. +func GenerateKey(containerID string) string { + maxLen := 12 + if len(containerID) < maxLen { + maxLen = len(containerID) + } + + return containerID[:maxLen] +} + +// NewSandbox provides a new sandbox instance created in an os specific way +// provided a key which uniquely identifies the sandbox +func NewSandbox(key string, osCreate, isRestore bool) (Sandbox, error) { + return nil, nil +} + +// GetSandboxForExternalKey returns sandbox object for the supplied path +func GetSandboxForExternalKey(path string, key string) (Sandbox, error) { + return nil, nil +} + +// GC triggers garbage collection of namespace path right away +// and waits for it. +func GC() { +} + +// InitOSContext initializes OS context while configuring network resources +func InitOSContext() func() { + return func() {} +} + +// SetupTestOSContext sets up a separate test OS context in which tests will be executed. +func SetupTestOSContext(t *testing.T) func() { + return func() {} +} + +// SetBasePath sets the base url prefix for the ns path +func SetBasePath(path string) { +} diff --git a/vendor/github.com/docker/libnetwork/osl/sandbox_unsupported.go b/vendor/github.com/docker/libnetwork/osl/sandbox_unsupported.go new file mode 100644 index 0000000000..51a656c806 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/osl/sandbox_unsupported.go @@ -0,0 +1,22 @@ +// +build !linux,!windows,!freebsd + +package osl + +import "errors" + +var ( + // ErrNotImplemented is for platforms which don't implement sandbox + ErrNotImplemented = errors.New("not implemented") +) + +// NewSandbox provides a new sandbox instance created in an os specific way +// provided a key which uniquely identifies the sandbox +func NewSandbox(key string, osCreate, isRestore bool) (Sandbox, error) { + return nil, ErrNotImplemented +} + +// GenerateKey generates a sandbox key based on the passed +// container id. +func GenerateKey(containerID string) string { + return "" +} diff --git a/vendor/github.com/docker/libnetwork/portallocator/portallocator.go b/vendor/github.com/docker/libnetwork/portallocator/portallocator.go new file mode 100644 index 0000000000..9798d23eb1 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/portallocator/portallocator.go @@ -0,0 +1,249 @@ +package portallocator + +import ( + "errors" + "fmt" + "net" + "sync" +) + +const ( + // DefaultPortRangeStart indicates the first port in port range + DefaultPortRangeStart = 49153 + // DefaultPortRangeEnd indicates the last port in port range + DefaultPortRangeEnd = 65535 +) + +type ipMapping map[string]protoMap + +var ( + // ErrAllPortsAllocated is returned when no more ports are available + ErrAllPortsAllocated = errors.New("all ports are allocated") + // ErrUnknownProtocol is returned when an unknown protocol was specified + ErrUnknownProtocol = errors.New("unknown protocol") + defaultIP = net.ParseIP("0.0.0.0") + once sync.Once + instance *PortAllocator + createInstance = func() { instance = newInstance() } +) + +// ErrPortAlreadyAllocated is the returned error information when a requested port is already being used +type ErrPortAlreadyAllocated struct { + ip string + port int +} + +func newErrPortAlreadyAllocated(ip string, port int) ErrPortAlreadyAllocated { + return ErrPortAlreadyAllocated{ + ip: ip, + port: port, + } +} + +// IP returns the address to which the used port is associated +func (e ErrPortAlreadyAllocated) IP() string { + return e.ip +} + +// Port returns the value of the already used port +func (e ErrPortAlreadyAllocated) Port() int { + return e.port +} + +// IPPort returns the address and the port in the form ip:port +func (e ErrPortAlreadyAllocated) IPPort() string { + return fmt.Sprintf("%s:%d", e.ip, e.port) +} + +// Error is the implementation of error.Error interface +func (e ErrPortAlreadyAllocated) Error() string { + return fmt.Sprintf("Bind for %s:%d failed: port is already allocated", e.ip, e.port) +} + +type ( + // PortAllocator manages the transport ports database + PortAllocator struct { + mutex sync.Mutex + ipMap ipMapping + Begin int + End int + } + portRange struct { + begin int + end int + last int + } + portMap struct { + p map[int]struct{} + defaultRange string + portRanges map[string]*portRange + } + protoMap map[string]*portMap +) + +// Get returns the default instance of PortAllocator +func Get() *PortAllocator { + // Port Allocator is a singleton + // Note: Long term solution will be each PortAllocator will have access to + // the OS so that it can have up to date view of the OS port allocation. + // When this happens singleton behavior will be removed. Clients do not + // need to worry about this, they will not see a change in behavior. + once.Do(createInstance) + return instance +} + +func newInstance() *PortAllocator { + start, end, err := getDynamicPortRange() + if err != nil { + start, end = DefaultPortRangeStart, DefaultPortRangeEnd + } + return &PortAllocator{ + ipMap: ipMapping{}, + Begin: start, + End: end, + } +} + +// RequestPort requests new port from global ports pool for specified ip and proto. +// If port is 0 it returns first free port. Otherwise it checks port availability +// in proto's pool and returns that port or error if port is already busy. +func (p *PortAllocator) RequestPort(ip net.IP, proto string, port int) (int, error) { + return p.RequestPortInRange(ip, proto, port, port) +} + +// RequestPortInRange requests new port from global ports pool for specified ip and proto. +// If portStart and portEnd are 0 it returns the first free port in the default ephemeral range. +// If portStart != portEnd it returns the first free port in the requested range. +// Otherwise (portStart == portEnd) it checks port availability in the requested proto's port-pool +// and returns that port or error if port is already busy. +func (p *PortAllocator) RequestPortInRange(ip net.IP, proto string, portStart, portEnd int) (int, error) { + p.mutex.Lock() + defer p.mutex.Unlock() + + if proto != "tcp" && proto != "udp" && proto != "sctp" { + return 0, ErrUnknownProtocol + } + + if ip == nil { + ip = defaultIP + } + ipstr := ip.String() + protomap, ok := p.ipMap[ipstr] + if !ok { + protomap = protoMap{ + "tcp": p.newPortMap(), + "udp": p.newPortMap(), + "sctp": p.newPortMap(), + } + + p.ipMap[ipstr] = protomap + } + mapping := protomap[proto] + if portStart > 0 && portStart == portEnd { + if _, ok := mapping.p[portStart]; !ok { + mapping.p[portStart] = struct{}{} + return portStart, nil + } + return 0, newErrPortAlreadyAllocated(ipstr, portStart) + } + + port, err := mapping.findPort(portStart, portEnd) + if err != nil { + return 0, err + } + return port, nil +} + +// ReleasePort releases port from global ports pool for specified ip and proto. +func (p *PortAllocator) ReleasePort(ip net.IP, proto string, port int) error { + p.mutex.Lock() + defer p.mutex.Unlock() + + if ip == nil { + ip = defaultIP + } + protomap, ok := p.ipMap[ip.String()] + if !ok { + return nil + } + delete(protomap[proto].p, port) + return nil +} + +func (p *PortAllocator) newPortMap() *portMap { + defaultKey := getRangeKey(p.Begin, p.End) + pm := &portMap{ + p: map[int]struct{}{}, + defaultRange: defaultKey, + portRanges: map[string]*portRange{ + defaultKey: newPortRange(p.Begin, p.End), + }, + } + return pm +} + +// ReleaseAll releases all ports for all ips. +func (p *PortAllocator) ReleaseAll() error { + p.mutex.Lock() + p.ipMap = ipMapping{} + p.mutex.Unlock() + return nil +} + +func getRangeKey(portStart, portEnd int) string { + return fmt.Sprintf("%d-%d", portStart, portEnd) +} + +func newPortRange(portStart, portEnd int) *portRange { + return &portRange{ + begin: portStart, + end: portEnd, + last: portEnd, + } +} + +func (pm *portMap) getPortRange(portStart, portEnd int) (*portRange, error) { + var key string + if portStart == 0 && portEnd == 0 { + key = pm.defaultRange + } else { + key = getRangeKey(portStart, portEnd) + if portStart == portEnd || + portStart == 0 || portEnd == 0 || + portEnd < portStart { + return nil, fmt.Errorf("invalid port range: %s", key) + } + } + + // Return existing port range, if already known. + if pr, exists := pm.portRanges[key]; exists { + return pr, nil + } + + // Otherwise create a new port range. + pr := newPortRange(portStart, portEnd) + pm.portRanges[key] = pr + return pr, nil +} + +func (pm *portMap) findPort(portStart, portEnd int) (int, error) { + pr, err := pm.getPortRange(portStart, portEnd) + if err != nil { + return 0, err + } + port := pr.last + + for i := 0; i <= pr.end-pr.begin; i++ { + port++ + if port > pr.end { + port = pr.begin + } + + if _, ok := pm.p[port]; !ok { + pm.p[port] = struct{}{} + pr.last = port + return port, nil + } + } + return 0, ErrAllPortsAllocated +} diff --git a/vendor/github.com/docker/libnetwork/portallocator/portallocator_freebsd.go b/vendor/github.com/docker/libnetwork/portallocator/portallocator_freebsd.go new file mode 100644 index 0000000000..97d7fbb49d --- /dev/null +++ b/vendor/github.com/docker/libnetwork/portallocator/portallocator_freebsd.go @@ -0,0 +1,42 @@ +package portallocator + +import ( + "bytes" + "fmt" + "os/exec" +) + +func getDynamicPortRange() (start int, end int, err error) { + portRangeKernelSysctl := []string{"net.inet.ip.portrange.hifirst", "net.ip.portrange.hilast"} + portRangeFallback := fmt.Sprintf("using fallback port range %d-%d", DefaultPortRangeStart, DefaultPortRangeEnd) + portRangeLowCmd := exec.Command("/sbin/sysctl", portRangeKernelSysctl[0]) + var portRangeLowOut bytes.Buffer + portRangeLowCmd.Stdout = &portRangeLowOut + cmdErr := portRangeLowCmd.Run() + if cmdErr != nil { + return 0, 0, fmt.Errorf("port allocator - sysctl net.inet.ip.portrange.hifirst failed - %s: %v", portRangeFallback, err) + } + n, err := fmt.Sscanf(portRangeLowOut.String(), "%d", &start) + if n != 1 || err != nil { + if err == nil { + err = fmt.Errorf("unexpected count of parsed numbers (%d)", n) + } + return 0, 0, fmt.Errorf("port allocator - failed to parse system ephemeral port range start from %s - %s: %v", portRangeLowOut.String(), portRangeFallback, err) + } + + portRangeHighCmd := exec.Command("/sbin/sysctl", portRangeKernelSysctl[1]) + var portRangeHighOut bytes.Buffer + portRangeHighCmd.Stdout = &portRangeHighOut + cmdErr = portRangeHighCmd.Run() + if cmdErr != nil { + return 0, 0, fmt.Errorf("port allocator - sysctl net.inet.ip.portrange.hilast failed - %s: %v", portRangeFallback, err) + } + n, err = fmt.Sscanf(portRangeHighOut.String(), "%d", &end) + if n != 1 || err != nil { + if err == nil { + err = fmt.Errorf("unexpected count of parsed numbers (%d)", n) + } + return 0, 0, fmt.Errorf("port allocator - failed to parse system ephemeral port range end from %s - %s: %v", portRangeHighOut.String(), portRangeFallback, err) + } + return start, end, nil +} diff --git a/vendor/github.com/docker/libnetwork/portallocator/portallocator_linux.go b/vendor/github.com/docker/libnetwork/portallocator/portallocator_linux.go new file mode 100644 index 0000000000..687f6dabb7 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/portallocator/portallocator_linux.go @@ -0,0 +1,27 @@ +package portallocator + +import ( + "bufio" + "fmt" + "os" +) + +func getDynamicPortRange() (start int, end int, err error) { + const portRangeKernelParam = "/proc/sys/net/ipv4/ip_local_port_range" + portRangeFallback := fmt.Sprintf("using fallback port range %d-%d", DefaultPortRangeStart, DefaultPortRangeEnd) + file, err := os.Open(portRangeKernelParam) + if err != nil { + return 0, 0, fmt.Errorf("port allocator - %s due to error: %v", portRangeFallback, err) + } + + defer file.Close() + + n, err := fmt.Fscanf(bufio.NewReader(file), "%d\t%d", &start, &end) + if n != 2 || err != nil { + if err == nil { + err = fmt.Errorf("unexpected count of parsed numbers (%d)", n) + } + return 0, 0, fmt.Errorf("port allocator - failed to parse system ephemeral port range from %s - %s: %v", portRangeKernelParam, portRangeFallback, err) + } + return start, end, nil +} diff --git a/vendor/github.com/docker/libnetwork/portmapper/mapper.go b/vendor/github.com/docker/libnetwork/portmapper/mapper.go new file mode 100644 index 0000000000..7fa37b1fb6 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/portmapper/mapper.go @@ -0,0 +1,294 @@ +package portmapper + +import ( + "errors" + "fmt" + "net" + "sync" + + "github.com/docker/libnetwork/iptables" + "github.com/docker/libnetwork/portallocator" + "github.com/ishidawataru/sctp" + "github.com/sirupsen/logrus" +) + +type mapping struct { + proto string + userlandProxy userlandProxy + host net.Addr + container net.Addr +} + +var newProxy = newProxyCommand + +var ( + // ErrUnknownBackendAddressType refers to an unknown container or unsupported address type + ErrUnknownBackendAddressType = errors.New("unknown container address type not supported") + // ErrPortMappedForIP refers to a port already mapped to an ip address + ErrPortMappedForIP = errors.New("port is already mapped to ip") + // ErrPortNotMapped refers to an unmapped port + ErrPortNotMapped = errors.New("port is not mapped") + // ErrSCTPAddrNoIP refers to a SCTP address without IP address. + ErrSCTPAddrNoIP = errors.New("sctp address does not contain any IP address") +) + +// PortMapper manages the network address translation +type PortMapper struct { + chain *iptables.ChainInfo + bridgeName string + + // udp:ip:port + currentMappings map[string]*mapping + lock sync.Mutex + + proxyPath string + + Allocator *portallocator.PortAllocator +} + +// New returns a new instance of PortMapper +func New(proxyPath string) *PortMapper { + return NewWithPortAllocator(portallocator.Get(), proxyPath) +} + +// NewWithPortAllocator returns a new instance of PortMapper which will use the specified PortAllocator +func NewWithPortAllocator(allocator *portallocator.PortAllocator, proxyPath string) *PortMapper { + return &PortMapper{ + currentMappings: make(map[string]*mapping), + Allocator: allocator, + proxyPath: proxyPath, + } +} + +// SetIptablesChain sets the specified chain into portmapper +func (pm *PortMapper) SetIptablesChain(c *iptables.ChainInfo, bridgeName string) { + pm.chain = c + pm.bridgeName = bridgeName +} + +// Map maps the specified container transport address to the host's network address and transport port +func (pm *PortMapper) Map(container net.Addr, hostIP net.IP, hostPort int, useProxy bool) (host net.Addr, err error) { + return pm.MapRange(container, hostIP, hostPort, hostPort, useProxy) +} + +// MapRange maps the specified container transport address to the host's network address and transport port range +func (pm *PortMapper) MapRange(container net.Addr, hostIP net.IP, hostPortStart, hostPortEnd int, useProxy bool) (host net.Addr, err error) { + pm.lock.Lock() + defer pm.lock.Unlock() + + var ( + m *mapping + proto string + allocatedHostPort int + ) + + switch container.(type) { + case *net.TCPAddr: + proto = "tcp" + if allocatedHostPort, err = pm.Allocator.RequestPortInRange(hostIP, proto, hostPortStart, hostPortEnd); err != nil { + return nil, err + } + + m = &mapping{ + proto: proto, + host: &net.TCPAddr{IP: hostIP, Port: allocatedHostPort}, + container: container, + } + + if useProxy { + m.userlandProxy, err = newProxy(proto, hostIP, allocatedHostPort, container.(*net.TCPAddr).IP, container.(*net.TCPAddr).Port, pm.proxyPath) + if err != nil { + return nil, err + } + } else { + m.userlandProxy, err = newDummyProxy(proto, hostIP, allocatedHostPort) + if err != nil { + return nil, err + } + } + case *net.UDPAddr: + proto = "udp" + if allocatedHostPort, err = pm.Allocator.RequestPortInRange(hostIP, proto, hostPortStart, hostPortEnd); err != nil { + return nil, err + } + + m = &mapping{ + proto: proto, + host: &net.UDPAddr{IP: hostIP, Port: allocatedHostPort}, + container: container, + } + + if useProxy { + m.userlandProxy, err = newProxy(proto, hostIP, allocatedHostPort, container.(*net.UDPAddr).IP, container.(*net.UDPAddr).Port, pm.proxyPath) + if err != nil { + return nil, err + } + } else { + m.userlandProxy, err = newDummyProxy(proto, hostIP, allocatedHostPort) + if err != nil { + return nil, err + } + } + case *sctp.SCTPAddr: + proto = "sctp" + if allocatedHostPort, err = pm.Allocator.RequestPortInRange(hostIP, proto, hostPortStart, hostPortEnd); err != nil { + return nil, err + } + + m = &mapping{ + proto: proto, + host: &sctp.SCTPAddr{IP: []net.IP{hostIP}, Port: allocatedHostPort}, + container: container, + } + + if useProxy { + sctpAddr := container.(*sctp.SCTPAddr) + if len(sctpAddr.IP) == 0 { + return nil, ErrSCTPAddrNoIP + } + m.userlandProxy, err = newProxy(proto, hostIP, allocatedHostPort, sctpAddr.IP[0], sctpAddr.Port, pm.proxyPath) + if err != nil { + return nil, err + } + } else { + m.userlandProxy, err = newDummyProxy(proto, hostIP, allocatedHostPort) + if err != nil { + return nil, err + } + } + default: + return nil, ErrUnknownBackendAddressType + } + + // release the allocated port on any further error during return. + defer func() { + if err != nil { + pm.Allocator.ReleasePort(hostIP, proto, allocatedHostPort) + } + }() + + key := getKey(m.host) + if _, exists := pm.currentMappings[key]; exists { + return nil, ErrPortMappedForIP + } + + containerIP, containerPort := getIPAndPort(m.container) + if hostIP.To4() != nil { + if err := pm.forward(iptables.Append, m.proto, hostIP, allocatedHostPort, containerIP.String(), containerPort); err != nil { + return nil, err + } + } + + cleanup := func() error { + // need to undo the iptables rules before we return + m.userlandProxy.Stop() + if hostIP.To4() != nil { + pm.forward(iptables.Delete, m.proto, hostIP, allocatedHostPort, containerIP.String(), containerPort) + if err := pm.Allocator.ReleasePort(hostIP, m.proto, allocatedHostPort); err != nil { + return err + } + } + + return nil + } + + if err := m.userlandProxy.Start(); err != nil { + if err := cleanup(); err != nil { + return nil, fmt.Errorf("Error during port allocation cleanup: %v", err) + } + return nil, err + } + + pm.currentMappings[key] = m + return m.host, nil +} + +// Unmap removes stored mapping for the specified host transport address +func (pm *PortMapper) Unmap(host net.Addr) error { + pm.lock.Lock() + defer pm.lock.Unlock() + + key := getKey(host) + data, exists := pm.currentMappings[key] + if !exists { + return ErrPortNotMapped + } + + if data.userlandProxy != nil { + data.userlandProxy.Stop() + } + + delete(pm.currentMappings, key) + + containerIP, containerPort := getIPAndPort(data.container) + hostIP, hostPort := getIPAndPort(data.host) + if err := pm.forward(iptables.Delete, data.proto, hostIP, hostPort, containerIP.String(), containerPort); err != nil { + logrus.Errorf("Error on iptables delete: %s", err) + } + + switch a := host.(type) { + case *net.TCPAddr: + return pm.Allocator.ReleasePort(a.IP, "tcp", a.Port) + case *net.UDPAddr: + return pm.Allocator.ReleasePort(a.IP, "udp", a.Port) + case *sctp.SCTPAddr: + if len(a.IP) == 0 { + return ErrSCTPAddrNoIP + } + return pm.Allocator.ReleasePort(a.IP[0], "sctp", a.Port) + } + return ErrUnknownBackendAddressType +} + +//ReMapAll will re-apply all port mappings +func (pm *PortMapper) ReMapAll() { + pm.lock.Lock() + defer pm.lock.Unlock() + logrus.Debugln("Re-applying all port mappings.") + for _, data := range pm.currentMappings { + containerIP, containerPort := getIPAndPort(data.container) + hostIP, hostPort := getIPAndPort(data.host) + if err := pm.forward(iptables.Append, data.proto, hostIP, hostPort, containerIP.String(), containerPort); err != nil { + logrus.Errorf("Error on iptables add: %s", err) + } + } +} + +func getKey(a net.Addr) string { + switch t := a.(type) { + case *net.TCPAddr: + return fmt.Sprintf("%s:%d/%s", t.IP.String(), t.Port, "tcp") + case *net.UDPAddr: + return fmt.Sprintf("%s:%d/%s", t.IP.String(), t.Port, "udp") + case *sctp.SCTPAddr: + if len(t.IP) == 0 { + logrus.Error(ErrSCTPAddrNoIP) + return "" + } + return fmt.Sprintf("%s:%d/%s", t.IP[0].String(), t.Port, "sctp") + } + return "" +} + +func getIPAndPort(a net.Addr) (net.IP, int) { + switch t := a.(type) { + case *net.TCPAddr: + return t.IP, t.Port + case *net.UDPAddr: + return t.IP, t.Port + case *sctp.SCTPAddr: + if len(t.IP) == 0 { + logrus.Error(ErrSCTPAddrNoIP) + return nil, 0 + } + return t.IP[0], t.Port + } + return nil, 0 +} + +func (pm *PortMapper) forward(action iptables.Action, proto string, sourceIP net.IP, sourcePort int, containerIP string, containerPort int) error { + if pm.chain == nil { + return nil + } + return pm.chain.Forward(action, sourceIP, sourcePort, proto, containerIP, containerPort, pm.bridgeName) +} diff --git a/vendor/github.com/docker/libnetwork/portmapper/mock_proxy.go b/vendor/github.com/docker/libnetwork/portmapper/mock_proxy.go new file mode 100644 index 0000000000..ceb7b02926 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/portmapper/mock_proxy.go @@ -0,0 +1,18 @@ +package portmapper + +import "net" + +func newMockProxyCommand(proto string, hostIP net.IP, hostPort int, containerIP net.IP, containerPort int, userlandProxyPath string) (userlandProxy, error) { + return &mockProxyCommand{}, nil +} + +type mockProxyCommand struct { +} + +func (p *mockProxyCommand) Start() error { + return nil +} + +func (p *mockProxyCommand) Stop() error { + return nil +} diff --git a/vendor/github.com/docker/libnetwork/portmapper/proxy.go b/vendor/github.com/docker/libnetwork/portmapper/proxy.go new file mode 100644 index 0000000000..1183c33a7e --- /dev/null +++ b/vendor/github.com/docker/libnetwork/portmapper/proxy.go @@ -0,0 +1,131 @@ +package portmapper + +import ( + "fmt" + "io" + "io/ioutil" + "net" + "os" + "os/exec" + "time" + + "github.com/ishidawataru/sctp" +) + +var userlandProxyCommandName = "docker-proxy" + +type userlandProxy interface { + Start() error + Stop() error +} + +// proxyCommand wraps an exec.Cmd to run the userland TCP and UDP +// proxies as separate processes. +type proxyCommand struct { + cmd *exec.Cmd +} + +func (p *proxyCommand) Start() error { + r, w, err := os.Pipe() + if err != nil { + return fmt.Errorf("proxy unable to open os.Pipe %s", err) + } + defer r.Close() + p.cmd.ExtraFiles = []*os.File{w} + if err := p.cmd.Start(); err != nil { + return err + } + w.Close() + + errchan := make(chan error, 1) + go func() { + buf := make([]byte, 2) + r.Read(buf) + + if string(buf) != "0\n" { + errStr, err := ioutil.ReadAll(r) + if err != nil { + errchan <- fmt.Errorf("Error reading exit status from userland proxy: %v", err) + return + } + + errchan <- fmt.Errorf("Error starting userland proxy: %s", errStr) + return + } + errchan <- nil + }() + + select { + case err := <-errchan: + return err + case <-time.After(16 * time.Second): + return fmt.Errorf("Timed out proxy starting the userland proxy") + } +} + +func (p *proxyCommand) Stop() error { + if p.cmd.Process != nil { + if err := p.cmd.Process.Signal(os.Interrupt); err != nil { + return err + } + return p.cmd.Wait() + } + return nil +} + +// dummyProxy just listen on some port, it is needed to prevent accidental +// port allocations on bound port, because without userland proxy we using +// iptables rules and not net.Listen +type dummyProxy struct { + listener io.Closer + addr net.Addr +} + +func newDummyProxy(proto string, hostIP net.IP, hostPort int) (userlandProxy, error) { + switch proto { + case "tcp": + addr := &net.TCPAddr{IP: hostIP, Port: hostPort} + return &dummyProxy{addr: addr}, nil + case "udp": + addr := &net.UDPAddr{IP: hostIP, Port: hostPort} + return &dummyProxy{addr: addr}, nil + case "sctp": + addr := &sctp.SCTPAddr{IP: []net.IP{hostIP}, Port: hostPort} + return &dummyProxy{addr: addr}, nil + default: + return nil, fmt.Errorf("Unknown addr type: %s", proto) + } +} + +func (p *dummyProxy) Start() error { + switch addr := p.addr.(type) { + case *net.TCPAddr: + l, err := net.ListenTCP("tcp", addr) + if err != nil { + return err + } + p.listener = l + case *net.UDPAddr: + l, err := net.ListenUDP("udp", addr) + if err != nil { + return err + } + p.listener = l + case *sctp.SCTPAddr: + l, err := sctp.ListenSCTP("sctp", addr) + if err != nil { + return err + } + p.listener = l + default: + return fmt.Errorf("Unknown addr type: %T", p.addr) + } + return nil +} + +func (p *dummyProxy) Stop() error { + if p.listener != nil { + return p.listener.Close() + } + return nil +} diff --git a/vendor/github.com/docker/libnetwork/portmapper/proxy_linux.go b/vendor/github.com/docker/libnetwork/portmapper/proxy_linux.go new file mode 100644 index 0000000000..947cd0ba4b --- /dev/null +++ b/vendor/github.com/docker/libnetwork/portmapper/proxy_linux.go @@ -0,0 +1,38 @@ +package portmapper + +import ( + "net" + "os/exec" + "strconv" + "syscall" +) + +func newProxyCommand(proto string, hostIP net.IP, hostPort int, containerIP net.IP, containerPort int, proxyPath string) (userlandProxy, error) { + path := proxyPath + if proxyPath == "" { + cmd, err := exec.LookPath(userlandProxyCommandName) + if err != nil { + return nil, err + } + path = cmd + } + + args := []string{ + path, + "-proto", proto, + "-host-ip", hostIP.String(), + "-host-port", strconv.Itoa(hostPort), + "-container-ip", containerIP.String(), + "-container-port", strconv.Itoa(containerPort), + } + + return &proxyCommand{ + cmd: &exec.Cmd{ + Path: path, + Args: args, + SysProcAttr: &syscall.SysProcAttr{ + Pdeathsig: syscall.SIGTERM, // send a sigterm to the proxy if the daemon process dies + }, + }, + }, nil +} diff --git a/vendor/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go b/vendor/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go new file mode 100644 index 0000000000..e348bc57f5 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go @@ -0,0 +1,26 @@ +package dns + +import ( + "regexp" +) + +// IPLocalhost is a regex pattern for IPv4 or IPv6 loopback range. +const IPLocalhost = `((127\.([0-9]{1,3}\.){2}[0-9]{1,3})|(::1)$)` + +// IPv4Localhost is a regex pattern for IPv4 localhost address range. +const IPv4Localhost = `(127\.([0-9]{1,3}\.){2}[0-9]{1,3})` + +var localhostIPRegexp = regexp.MustCompile(IPLocalhost) +var localhostIPv4Regexp = regexp.MustCompile(IPv4Localhost) + +// IsLocalhost returns true if ip matches the localhost IP regular expression. +// Used for determining if nameserver settings are being passed which are +// localhost addresses +func IsLocalhost(ip string) bool { + return localhostIPRegexp.MatchString(ip) +} + +// IsIPv4Localhost returns true if ip matches the IPv4 localhost regular expression. +func IsIPv4Localhost(ip string) bool { + return localhostIPv4Regexp.MatchString(ip) +} diff --git a/vendor/github.com/docker/libnetwork/resolvconf/resolvconf.go b/vendor/github.com/docker/libnetwork/resolvconf/resolvconf.go new file mode 100644 index 0000000000..5cb251b131 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/resolvconf/resolvconf.go @@ -0,0 +1,254 @@ +// Package resolvconf provides utility code to query and update DNS configuration in /etc/resolv.conf +package resolvconf + +import ( + "bytes" + "io/ioutil" + "regexp" + "strings" + "sync" + + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/libnetwork/resolvconf/dns" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" +) + +var ( + // Note: the default IPv4 & IPv6 resolvers are set to Google's Public DNS + defaultIPv4Dns = []string{"nameserver 8.8.8.8", "nameserver 8.8.4.4"} + defaultIPv6Dns = []string{"nameserver 2001:4860:4860::8888", "nameserver 2001:4860:4860::8844"} + ipv4NumBlock = `(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)` + ipv4Address = `(` + ipv4NumBlock + `\.){3}` + ipv4NumBlock + // This is not an IPv6 address verifier as it will accept a super-set of IPv6, and also + // will *not match* IPv4-Embedded IPv6 Addresses (RFC6052), but that and other variants + // -- e.g. other link-local types -- either won't work in containers or are unnecessary. + // For readability and sufficiency for Docker purposes this seemed more reasonable than a + // 1000+ character regexp with exact and complete IPv6 validation + ipv6Address = `([0-9A-Fa-f]{0,4}:){2,7}([0-9A-Fa-f]{0,4})(%\w+)?` + + localhostNSRegexp = regexp.MustCompile(`(?m)^nameserver\s+` + dns.IPLocalhost + `\s*\n*`) + nsIPv6Regexp = regexp.MustCompile(`(?m)^nameserver\s+` + ipv6Address + `\s*\n*`) + nsRegexp = regexp.MustCompile(`^\s*nameserver\s*((` + ipv4Address + `)|(` + ipv6Address + `))\s*$`) + nsIPv6Regexpmatch = regexp.MustCompile(`^\s*nameserver\s*((` + ipv6Address + `))\s*$`) + nsIPv4Regexpmatch = regexp.MustCompile(`^\s*nameserver\s*((` + ipv4Address + `))\s*$`) + searchRegexp = regexp.MustCompile(`^\s*search\s*(([^\s]+\s*)*)$`) + optionsRegexp = regexp.MustCompile(`^\s*options\s*(([^\s]+\s*)*)$`) +) + +var lastModified struct { + sync.Mutex + sha256 string + contents []byte +} + +// File contains the resolv.conf content and its hash +type File struct { + Content []byte + Hash string +} + +// Get returns the contents of /etc/resolv.conf and its hash +func Get() (*File, error) { + resolv, err := ioutil.ReadFile("/etc/resolv.conf") + if err != nil { + return nil, err + } + hash, err := ioutils.HashData(bytes.NewReader(resolv)) + if err != nil { + return nil, err + } + return &File{Content: resolv, Hash: hash}, nil +} + +// GetSpecific returns the contents of the user specified resolv.conf file and its hash +func GetSpecific(path string) (*File, error) { + resolv, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + hash, err := ioutils.HashData(bytes.NewReader(resolv)) + if err != nil { + return nil, err + } + return &File{Content: resolv, Hash: hash}, nil +} + +// GetIfChanged retrieves the host /etc/resolv.conf file, checks against the last hash +// and, if modified since last check, returns the bytes and new hash. +// This feature is used by the resolv.conf updater for containers +func GetIfChanged() (*File, error) { + lastModified.Lock() + defer lastModified.Unlock() + + resolv, err := ioutil.ReadFile("/etc/resolv.conf") + if err != nil { + return nil, err + } + newHash, err := ioutils.HashData(bytes.NewReader(resolv)) + if err != nil { + return nil, err + } + if lastModified.sha256 != newHash { + lastModified.sha256 = newHash + lastModified.contents = resolv + return &File{Content: resolv, Hash: newHash}, nil + } + // nothing changed, so return no data + return nil, nil +} + +// GetLastModified retrieves the last used contents and hash of the host resolv.conf. +// Used by containers updating on restart +func GetLastModified() *File { + lastModified.Lock() + defer lastModified.Unlock() + + return &File{Content: lastModified.contents, Hash: lastModified.sha256} +} + +// FilterResolvDNS cleans up the config in resolvConf. It has two main jobs: +// 1. It looks for localhost (127.*|::1) entries in the provided +// resolv.conf, removing local nameserver entries, and, if the resulting +// cleaned config has no defined nameservers left, adds default DNS entries +// 2. Given the caller provides the enable/disable state of IPv6, the filter +// code will remove all IPv6 nameservers if it is not enabled for containers +// +func FilterResolvDNS(resolvConf []byte, ipv6Enabled bool) (*File, error) { + cleanedResolvConf := localhostNSRegexp.ReplaceAll(resolvConf, []byte{}) + // if IPv6 is not enabled, also clean out any IPv6 address nameserver + if !ipv6Enabled { + cleanedResolvConf = nsIPv6Regexp.ReplaceAll(cleanedResolvConf, []byte{}) + } + // if the resulting resolvConf has no more nameservers defined, add appropriate + // default DNS servers for IPv4 and (optionally) IPv6 + if len(GetNameservers(cleanedResolvConf, types.IP)) == 0 { + logrus.Infof("No non-localhost DNS nameservers are left in resolv.conf. Using default external servers: %v", defaultIPv4Dns) + dns := defaultIPv4Dns + if ipv6Enabled { + logrus.Infof("IPv6 enabled; Adding default IPv6 external servers: %v", defaultIPv6Dns) + dns = append(dns, defaultIPv6Dns...) + } + cleanedResolvConf = append(cleanedResolvConf, []byte("\n"+strings.Join(dns, "\n"))...) + } + hash, err := ioutils.HashData(bytes.NewReader(cleanedResolvConf)) + if err != nil { + return nil, err + } + return &File{Content: cleanedResolvConf, Hash: hash}, nil +} + +// getLines parses input into lines and strips away comments. +func getLines(input []byte, commentMarker []byte) [][]byte { + lines := bytes.Split(input, []byte("\n")) + var output [][]byte + for _, currentLine := range lines { + var commentIndex = bytes.Index(currentLine, commentMarker) + if commentIndex == -1 { + output = append(output, currentLine) + } else { + output = append(output, currentLine[:commentIndex]) + } + } + return output +} + +// GetNameservers returns nameservers (if any) listed in /etc/resolv.conf +func GetNameservers(resolvConf []byte, kind int) []string { + nameservers := []string{} + for _, line := range getLines(resolvConf, []byte("#")) { + var ns [][]byte + if kind == types.IP { + ns = nsRegexp.FindSubmatch(line) + } else if kind == types.IPv4 { + ns = nsIPv4Regexpmatch.FindSubmatch(line) + } else if kind == types.IPv6 { + ns = nsIPv6Regexpmatch.FindSubmatch(line) + } + if len(ns) > 0 { + nameservers = append(nameservers, string(ns[1])) + } + } + return nameservers +} + +// GetNameserversAsCIDR returns nameservers (if any) listed in +// /etc/resolv.conf as CIDR blocks (e.g., "1.2.3.4/32") +// This function's output is intended for net.ParseCIDR +func GetNameserversAsCIDR(resolvConf []byte) []string { + nameservers := []string{} + for _, nameserver := range GetNameservers(resolvConf, types.IP) { + var address string + // If IPv6, strip zone if present + if strings.Contains(nameserver, ":") { + address = strings.Split(nameserver, "%")[0] + "/128" + } else { + address = nameserver + "/32" + } + nameservers = append(nameservers, address) + } + return nameservers +} + +// GetSearchDomains returns search domains (if any) listed in /etc/resolv.conf +// If more than one search line is encountered, only the contents of the last +// one is returned. +func GetSearchDomains(resolvConf []byte) []string { + domains := []string{} + for _, line := range getLines(resolvConf, []byte("#")) { + match := searchRegexp.FindSubmatch(line) + if match == nil { + continue + } + domains = strings.Fields(string(match[1])) + } + return domains +} + +// GetOptions returns options (if any) listed in /etc/resolv.conf +// If more than one options line is encountered, only the contents of the last +// one is returned. +func GetOptions(resolvConf []byte) []string { + options := []string{} + for _, line := range getLines(resolvConf, []byte("#")) { + match := optionsRegexp.FindSubmatch(line) + if match == nil { + continue + } + options = strings.Fields(string(match[1])) + } + return options +} + +// Build writes a configuration file to path containing a "nameserver" entry +// for every element in dns, a "search" entry for every element in +// dnsSearch, and an "options" entry for every element in dnsOptions. +func Build(path string, dns, dnsSearch, dnsOptions []string) (*File, error) { + content := bytes.NewBuffer(nil) + if len(dnsSearch) > 0 { + if searchString := strings.Join(dnsSearch, " "); strings.Trim(searchString, " ") != "." { + if _, err := content.WriteString("search " + searchString + "\n"); err != nil { + return nil, err + } + } + } + for _, dns := range dns { + if _, err := content.WriteString("nameserver " + dns + "\n"); err != nil { + return nil, err + } + } + if len(dnsOptions) > 0 { + if optsString := strings.Join(dnsOptions, " "); strings.Trim(optsString, " ") != "" { + if _, err := content.WriteString("options " + optsString + "\n"); err != nil { + return nil, err + } + } + } + + hash, err := ioutils.HashData(bytes.NewReader(content.Bytes())) + if err != nil { + return nil, err + } + + return &File{Content: content.Bytes(), Hash: hash}, ioutil.WriteFile(path, content.Bytes(), 0644) +} diff --git a/vendor/github.com/docker/libnetwork/resolver.go b/vendor/github.com/docker/libnetwork/resolver.go new file mode 100644 index 0000000000..e420905350 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/resolver.go @@ -0,0 +1,555 @@ +package libnetwork + +import ( + "fmt" + "math/rand" + "net" + "strings" + "sync" + "time" + + "github.com/docker/libnetwork/types" + "github.com/miekg/dns" + "github.com/sirupsen/logrus" +) + +// Resolver represents the embedded DNS server in Docker. It operates +// by listening on container's loopback interface for DNS queries. +type Resolver interface { + // Start starts the name server for the container + Start() error + // Stop stops the name server for the container. Stopped resolver + // can be reused after running the SetupFunc again. + Stop() + // SetupFunc() provides the setup function that should be run + // in the container's network namespace. + SetupFunc(int) func() + // NameServer() returns the IP of the DNS resolver for the + // containers. + NameServer() string + // SetExtServers configures the external nameservers the resolver + // should use to forward queries + SetExtServers([]extDNSEntry) + // ResolverOptions returns resolv.conf options that should be set + ResolverOptions() []string +} + +// DNSBackend represents a backend DNS resolver used for DNS name +// resolution. All the queries to the resolver are forwared to the +// backend resolver. +type DNSBackend interface { + // ResolveName resolves a service name to an IPv4 or IPv6 address by searching + // the networks the sandbox is connected to. For IPv6 queries, second return + // value will be true if the name exists in docker domain but doesn't have an + // IPv6 address. Such queries shouldn't be forwarded to external nameservers. + ResolveName(name string, iplen int) ([]net.IP, bool) + // ResolveIP returns the service name for the passed in IP. IP is in reverse dotted + // notation; the format used for DNS PTR records + ResolveIP(name string) string + // ResolveService returns all the backend details about the containers or hosts + // backing a service. Its purpose is to satisfy an SRV query + ResolveService(name string) ([]*net.SRV, []net.IP) + // ExecFunc allows a function to be executed in the context of the backend + // on behalf of the resolver. + ExecFunc(f func()) error + //NdotsSet queries the backends ndots dns option settings + NdotsSet() bool + // HandleQueryResp passes the name & IP from a response to the backend. backend + // can use it to maintain any required state about the resolution + HandleQueryResp(name string, ip net.IP) +} + +const ( + dnsPort = "53" + ptrIPv4domain = ".in-addr.arpa." + ptrIPv6domain = ".ip6.arpa." + respTTL = 600 + maxExtDNS = 3 //max number of external servers to try + extIOTimeout = 4 * time.Second + defaultRespSize = 512 + maxConcurrent = 100 + logInterval = 2 * time.Second +) + +type extDNSEntry struct { + IPStr string + HostLoopback bool +} + +// resolver implements the Resolver interface +type resolver struct { + backend DNSBackend + extDNSList [maxExtDNS]extDNSEntry + server *dns.Server + conn *net.UDPConn + tcpServer *dns.Server + tcpListen *net.TCPListener + err error + count int32 + tStamp time.Time + queryLock sync.Mutex + listenAddress string + proxyDNS bool + resolverKey string + startCh chan struct{} +} + +func init() { + rand.Seed(time.Now().Unix()) +} + +// NewResolver creates a new instance of the Resolver +func NewResolver(address string, proxyDNS bool, resolverKey string, backend DNSBackend) Resolver { + return &resolver{ + backend: backend, + proxyDNS: proxyDNS, + listenAddress: address, + resolverKey: resolverKey, + err: fmt.Errorf("setup not done yet"), + startCh: make(chan struct{}, 1), + } +} + +func (r *resolver) SetupFunc(port int) func() { + return (func() { + var err error + + // DNS operates primarily on UDP + addr := &net.UDPAddr{ + IP: net.ParseIP(r.listenAddress), + Port: port, + } + + r.conn, err = net.ListenUDP("udp", addr) + if err != nil { + r.err = fmt.Errorf("error in opening name server socket %v", err) + return + } + + // Listen on a TCP as well + tcpaddr := &net.TCPAddr{ + IP: net.ParseIP(r.listenAddress), + Port: port, + } + + r.tcpListen, err = net.ListenTCP("tcp", tcpaddr) + if err != nil { + r.err = fmt.Errorf("error in opening name TCP server socket %v", err) + return + } + r.err = nil + }) +} + +func (r *resolver) Start() error { + r.startCh <- struct{}{} + defer func() { <-r.startCh }() + + // make sure the resolver has been setup before starting + if r.err != nil { + return r.err + } + + if err := r.setupIPTable(); err != nil { + return fmt.Errorf("setting up IP table rules failed: %v", err) + } + + s := &dns.Server{Handler: r, PacketConn: r.conn} + r.server = s + go func() { + s.ActivateAndServe() + }() + + tcpServer := &dns.Server{Handler: r, Listener: r.tcpListen} + r.tcpServer = tcpServer + go func() { + tcpServer.ActivateAndServe() + }() + return nil +} + +func (r *resolver) Stop() { + r.startCh <- struct{}{} + defer func() { <-r.startCh }() + + if r.server != nil { + r.server.Shutdown() + } + if r.tcpServer != nil { + r.tcpServer.Shutdown() + } + r.conn = nil + r.tcpServer = nil + r.err = fmt.Errorf("setup not done yet") + r.tStamp = time.Time{} + r.count = 0 + r.queryLock = sync.Mutex{} +} + +func (r *resolver) SetExtServers(extDNS []extDNSEntry) { + l := len(extDNS) + if l > maxExtDNS { + l = maxExtDNS + } + for i := 0; i < l; i++ { + r.extDNSList[i] = extDNS[i] + } +} + +func (r *resolver) NameServer() string { + return r.listenAddress +} + +func (r *resolver) ResolverOptions() []string { + return []string{"ndots:0"} +} + +func setCommonFlags(msg *dns.Msg) { + msg.RecursionAvailable = true +} + +func shuffleAddr(addr []net.IP) []net.IP { + for i := len(addr) - 1; i > 0; i-- { + r := rand.Intn(i + 1) + addr[i], addr[r] = addr[r], addr[i] + } + return addr +} + +func createRespMsg(query *dns.Msg) *dns.Msg { + resp := new(dns.Msg) + resp.SetReply(query) + setCommonFlags(resp) + + return resp +} + +func (r *resolver) handleMXQuery(name string, query *dns.Msg) (*dns.Msg, error) { + addrv4, _ := r.backend.ResolveName(name, types.IPv4) + addrv6, _ := r.backend.ResolveName(name, types.IPv6) + + if addrv4 == nil && addrv6 == nil { + return nil, nil + } + + // We were able to resolve the name. Respond with an empty list with + // RcodeSuccess/NOERROR so that email clients can treat it as "implicit MX" + // [RFC 5321 Section-5.1] and issue a Type A/AAAA query for the name. + + resp := createRespMsg(query) + return resp, nil +} + +func (r *resolver) handleIPQuery(name string, query *dns.Msg, ipType int) (*dns.Msg, error) { + var addr []net.IP + var ipv6Miss bool + addr, ipv6Miss = r.backend.ResolveName(name, ipType) + + if addr == nil && ipv6Miss { + // Send a reply without any Answer sections + logrus.Debugf("[resolver] lookup name %s present without IPv6 address", name) + resp := createRespMsg(query) + return resp, nil + } + if addr == nil { + return nil, nil + } + + logrus.Debugf("[resolver] lookup for %s: IP %v", name, addr) + + resp := createRespMsg(query) + if len(addr) > 1 { + addr = shuffleAddr(addr) + } + if ipType == types.IPv4 { + for _, ip := range addr { + rr := new(dns.A) + rr.Hdr = dns.RR_Header{Name: name, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: respTTL} + rr.A = ip + resp.Answer = append(resp.Answer, rr) + } + } else { + for _, ip := range addr { + rr := new(dns.AAAA) + rr.Hdr = dns.RR_Header{Name: name, Rrtype: dns.TypeAAAA, Class: dns.ClassINET, Ttl: respTTL} + rr.AAAA = ip + resp.Answer = append(resp.Answer, rr) + } + } + return resp, nil +} + +func (r *resolver) handlePTRQuery(ptr string, query *dns.Msg) (*dns.Msg, error) { + parts := []string{} + + if strings.HasSuffix(ptr, ptrIPv4domain) { + parts = strings.Split(ptr, ptrIPv4domain) + } else if strings.HasSuffix(ptr, ptrIPv6domain) { + parts = strings.Split(ptr, ptrIPv6domain) + } else { + return nil, fmt.Errorf("invalid PTR query, %v", ptr) + } + + host := r.backend.ResolveIP(parts[0]) + + if len(host) == 0 { + return nil, nil + } + + logrus.Debugf("[resolver] lookup for IP %s: name %s", parts[0], host) + fqdn := dns.Fqdn(host) + + resp := new(dns.Msg) + resp.SetReply(query) + setCommonFlags(resp) + + rr := new(dns.PTR) + rr.Hdr = dns.RR_Header{Name: ptr, Rrtype: dns.TypePTR, Class: dns.ClassINET, Ttl: respTTL} + rr.Ptr = fqdn + resp.Answer = append(resp.Answer, rr) + return resp, nil +} + +func (r *resolver) handleSRVQuery(svc string, query *dns.Msg) (*dns.Msg, error) { + + srv, ip := r.backend.ResolveService(svc) + + if len(srv) == 0 { + return nil, nil + } + if len(srv) != len(ip) { + return nil, fmt.Errorf("invalid reply for SRV query %s", svc) + } + + resp := createRespMsg(query) + + for i, r := range srv { + rr := new(dns.SRV) + rr.Hdr = dns.RR_Header{Name: svc, Rrtype: dns.TypePTR, Class: dns.ClassINET, Ttl: respTTL} + rr.Port = r.Port + rr.Target = r.Target + resp.Answer = append(resp.Answer, rr) + + rr1 := new(dns.A) + rr1.Hdr = dns.RR_Header{Name: r.Target, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: respTTL} + rr1.A = ip[i] + resp.Extra = append(resp.Extra, rr1) + } + return resp, nil + +} + +func truncateResp(resp *dns.Msg, maxSize int, isTCP bool) { + if !isTCP { + resp.Truncated = true + } + + srv := resp.Question[0].Qtype == dns.TypeSRV + // trim the Answer RRs one by one till the whole message fits + // within the reply size + for resp.Len() > maxSize { + resp.Answer = resp.Answer[:len(resp.Answer)-1] + + if srv && len(resp.Extra) > 0 { + resp.Extra = resp.Extra[:len(resp.Extra)-1] + } + } +} + +func (r *resolver) ServeDNS(w dns.ResponseWriter, query *dns.Msg) { + var ( + extConn net.Conn + resp *dns.Msg + err error + ) + + if query == nil || len(query.Question) == 0 { + return + } + name := query.Question[0].Name + + switch query.Question[0].Qtype { + case dns.TypeA: + resp, err = r.handleIPQuery(name, query, types.IPv4) + case dns.TypeAAAA: + resp, err = r.handleIPQuery(name, query, types.IPv6) + case dns.TypeMX: + resp, err = r.handleMXQuery(name, query) + case dns.TypePTR: + resp, err = r.handlePTRQuery(name, query) + case dns.TypeSRV: + resp, err = r.handleSRVQuery(name, query) + } + + if err != nil { + logrus.Error(err) + return + } + + if resp == nil { + // If the backend doesn't support proxying dns request + // fail the response + if !r.proxyDNS { + resp = new(dns.Msg) + resp.SetRcode(query, dns.RcodeServerFailure) + w.WriteMsg(resp) + return + } + + // If the user sets ndots > 0 explicitly and the query is + // in the root domain don't forward it out. We will return + // failure and let the client retry with the search domain + // attached + switch query.Question[0].Qtype { + case dns.TypeA: + fallthrough + case dns.TypeAAAA: + if r.backend.NdotsSet() && !strings.Contains(strings.TrimSuffix(name, "."), ".") { + resp = createRespMsg(query) + } + } + } + + proto := w.LocalAddr().Network() + maxSize := 0 + if proto == "tcp" { + maxSize = dns.MaxMsgSize - 1 + } else if proto == "udp" { + optRR := query.IsEdns0() + if optRR != nil { + maxSize = int(optRR.UDPSize()) + } + if maxSize < defaultRespSize { + maxSize = defaultRespSize + } + } + + if resp != nil { + if resp.Len() > maxSize { + truncateResp(resp, maxSize, proto == "tcp") + } + } else { + for i := 0; i < maxExtDNS; i++ { + extDNS := &r.extDNSList[i] + if extDNS.IPStr == "" { + break + } + extConnect := func() { + addr := fmt.Sprintf("%s:%d", extDNS.IPStr, 53) + extConn, err = net.DialTimeout(proto, addr, extIOTimeout) + } + + if extDNS.HostLoopback { + extConnect() + } else { + execErr := r.backend.ExecFunc(extConnect) + if execErr != nil { + logrus.Warn(execErr) + continue + } + } + if err != nil { + logrus.Warnf("[resolver] connect failed: %s", err) + continue + } + queryType := dns.TypeToString[query.Question[0].Qtype] + logrus.Debugf("[resolver] query %s (%s) from %s, forwarding to %s:%s", name, queryType, + extConn.LocalAddr().String(), proto, extDNS.IPStr) + + // Timeout has to be set for every IO operation. + extConn.SetDeadline(time.Now().Add(extIOTimeout)) + co := &dns.Conn{ + Conn: extConn, + UDPSize: uint16(maxSize), + } + defer co.Close() + + // limits the number of outstanding concurrent queries. + if !r.forwardQueryStart() { + old := r.tStamp + r.tStamp = time.Now() + if r.tStamp.Sub(old) > logInterval { + logrus.Errorf("[resolver] more than %v concurrent queries from %s", maxConcurrent, extConn.LocalAddr().String()) + } + continue + } + + err = co.WriteMsg(query) + if err != nil { + r.forwardQueryEnd() + logrus.Debugf("[resolver] send to DNS server failed, %s", err) + continue + } + + resp, err = co.ReadMsg() + // Truncated DNS replies should be sent to the client so that the + // client can retry over TCP + if err != nil && err != dns.ErrTruncated { + r.forwardQueryEnd() + logrus.Debugf("[resolver] read from DNS server failed, %s", err) + continue + } + r.forwardQueryEnd() + if resp != nil { + if resp.Rcode == dns.RcodeServerFailure { + // for Server Failure response, continue to the next external DNS server + logrus.Debugf("[resolver] external DNS %s:%s responded with ServFail for %q", proto, extDNS.IPStr, name) + continue + } + answers := 0 + for _, rr := range resp.Answer { + h := rr.Header() + switch h.Rrtype { + case dns.TypeA: + answers++ + ip := rr.(*dns.A).A + logrus.Debugf("[resolver] received A record %q for %q from %s:%s", ip, h.Name, proto, extDNS.IPStr) + r.backend.HandleQueryResp(h.Name, ip) + case dns.TypeAAAA: + answers++ + ip := rr.(*dns.AAAA).AAAA + logrus.Debugf("[resolver] received AAAA record %q for %q from %s:%s", ip, h.Name, proto, extDNS.IPStr) + r.backend.HandleQueryResp(h.Name, ip) + } + } + if resp.Answer == nil || answers == 0 { + logrus.Debugf("[resolver] external DNS %s:%s did not return any %s records for %q", proto, extDNS.IPStr, queryType, name) + } + resp.Compress = true + } else { + logrus.Debugf("[resolver] external DNS %s:%s returned empty response for %q", proto, extDNS.IPStr, name) + } + break + } + if resp == nil { + return + } + } + + if err = w.WriteMsg(resp); err != nil { + logrus.Errorf("[resolver] error writing resolver resp, %s", err) + } +} + +func (r *resolver) forwardQueryStart() bool { + r.queryLock.Lock() + defer r.queryLock.Unlock() + + if r.count == maxConcurrent { + return false + } + r.count++ + + return true +} + +func (r *resolver) forwardQueryEnd() { + r.queryLock.Lock() + defer r.queryLock.Unlock() + + if r.count == 0 { + logrus.Error("[resolver] invalid concurrent query count") + } else { + r.count-- + } +} diff --git a/vendor/github.com/docker/libnetwork/resolver_unix.go b/vendor/github.com/docker/libnetwork/resolver_unix.go new file mode 100644 index 0000000000..b35009ee74 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/resolver_unix.go @@ -0,0 +1,101 @@ +// +build !windows + +package libnetwork + +import ( + "fmt" + "net" + "os" + "os/exec" + "runtime" + + "github.com/docker/docker/pkg/reexec" + "github.com/docker/libnetwork/iptables" + "github.com/sirupsen/logrus" + "github.com/vishvananda/netns" +) + +func init() { + reexec.Register("setup-resolver", reexecSetupResolver) +} + +const ( + // outputChain used for docker embed dns + outputChain = "DOCKER_OUTPUT" + //postroutingchain used for docker embed dns + postroutingchain = "DOCKER_POSTROUTING" +) + +func reexecSetupResolver() { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + if len(os.Args) < 4 { + logrus.Error("invalid number of arguments..") + os.Exit(1) + } + + resolverIP, ipPort, _ := net.SplitHostPort(os.Args[2]) + _, tcpPort, _ := net.SplitHostPort(os.Args[3]) + rules := [][]string{ + {"-t", "nat", "-I", outputChain, "-d", resolverIP, "-p", "udp", "--dport", dnsPort, "-j", "DNAT", "--to-destination", os.Args[2]}, + {"-t", "nat", "-I", postroutingchain, "-s", resolverIP, "-p", "udp", "--sport", ipPort, "-j", "SNAT", "--to-source", ":" + dnsPort}, + {"-t", "nat", "-I", outputChain, "-d", resolverIP, "-p", "tcp", "--dport", dnsPort, "-j", "DNAT", "--to-destination", os.Args[3]}, + {"-t", "nat", "-I", postroutingchain, "-s", resolverIP, "-p", "tcp", "--sport", tcpPort, "-j", "SNAT", "--to-source", ":" + dnsPort}, + } + + f, err := os.OpenFile(os.Args[1], os.O_RDONLY, 0) + if err != nil { + logrus.Errorf("failed get network namespace %q: %v", os.Args[1], err) + os.Exit(2) + } + defer f.Close() + + nsFD := f.Fd() + if err = netns.Set(netns.NsHandle(nsFD)); err != nil { + logrus.Errorf("setting into container net ns %v failed, %v", os.Args[1], err) + os.Exit(3) + } + + // insert outputChain and postroutingchain + err = iptables.RawCombinedOutputNative("-t", "nat", "-C", "OUTPUT", "-d", resolverIP, "-j", outputChain) + if err == nil { + iptables.RawCombinedOutputNative("-t", "nat", "-F", outputChain) + } else { + iptables.RawCombinedOutputNative("-t", "nat", "-N", outputChain) + iptables.RawCombinedOutputNative("-t", "nat", "-I", "OUTPUT", "-d", resolverIP, "-j", outputChain) + } + + err = iptables.RawCombinedOutputNative("-t", "nat", "-C", "POSTROUTING", "-d", resolverIP, "-j", postroutingchain) + if err == nil { + iptables.RawCombinedOutputNative("-t", "nat", "-F", postroutingchain) + } else { + iptables.RawCombinedOutputNative("-t", "nat", "-N", postroutingchain) + iptables.RawCombinedOutputNative("-t", "nat", "-I", "POSTROUTING", "-d", resolverIP, "-j", postroutingchain) + } + + for _, rule := range rules { + if iptables.RawCombinedOutputNative(rule...) != nil { + logrus.Errorf("setting up rule failed, %v", rule) + } + } +} + +func (r *resolver) setupIPTable() error { + if r.err != nil { + return r.err + } + laddr := r.conn.LocalAddr().String() + ltcpaddr := r.tcpListen.Addr().String() + + cmd := &exec.Cmd{ + Path: reexec.Self(), + Args: append([]string{"setup-resolver"}, r.resolverKey, laddr, ltcpaddr), + Stdout: os.Stdout, + Stderr: os.Stderr, + } + if err := cmd.Run(); err != nil { + return fmt.Errorf("reexec failed: %v", err) + } + return nil +} diff --git a/vendor/github.com/docker/libnetwork/resolver_windows.go b/vendor/github.com/docker/libnetwork/resolver_windows.go new file mode 100644 index 0000000000..aa33b1a2ec --- /dev/null +++ b/vendor/github.com/docker/libnetwork/resolver_windows.go @@ -0,0 +1,7 @@ +// +build windows + +package libnetwork + +func (r *resolver) setupIPTable() error { + return nil +} diff --git a/vendor/github.com/docker/libnetwork/sandbox.go b/vendor/github.com/docker/libnetwork/sandbox.go new file mode 100644 index 0000000000..d71f36cf1e --- /dev/null +++ b/vendor/github.com/docker/libnetwork/sandbox.go @@ -0,0 +1,1246 @@ +package libnetwork + +import ( + "container/heap" + "encoding/json" + "fmt" + "net" + "strings" + "sync" + "time" + + "github.com/docker/libnetwork/etchosts" + "github.com/docker/libnetwork/netlabel" + "github.com/docker/libnetwork/osl" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" +) + +// Sandbox provides the control over the network container entity. It is a one to one mapping with the container. +type Sandbox interface { + // ID returns the ID of the sandbox + ID() string + // Key returns the sandbox's key + Key() string + // ContainerID returns the container id associated to this sandbox + ContainerID() string + // Labels returns the sandbox's labels + Labels() map[string]interface{} + // Statistics retrieves the interfaces' statistics for the sandbox + Statistics() (map[string]*types.InterfaceStatistics, error) + // Refresh leaves all the endpoints, resets and re-applies the options, + // re-joins all the endpoints without destroying the osl sandbox + Refresh(options ...SandboxOption) error + // SetKey updates the Sandbox Key + SetKey(key string) error + // Rename changes the name of all attached Endpoints + Rename(name string) error + // Delete destroys this container after detaching it from all connected endpoints. + Delete() error + // Endpoints returns all the endpoints connected to the sandbox + Endpoints() []Endpoint + // ResolveService returns all the backend details about the containers or hosts + // backing a service. Its purpose is to satisfy an SRV query + ResolveService(name string) ([]*net.SRV, []net.IP) + // EnableService makes a managed container's service available by adding the + // endpoint to the service load balancer and service discovery + EnableService() error + // DisableService removes a managed container's endpoints from the load balancer + // and service discovery + DisableService() error +} + +// SandboxOption is an option setter function type used to pass various options to +// NewNetContainer method. The various setter functions of type SandboxOption are +// provided by libnetwork, they look like ContainerOptionXXXX(...) +type SandboxOption func(sb *sandbox) + +func (sb *sandbox) processOptions(options ...SandboxOption) { + for _, opt := range options { + if opt != nil { + opt(sb) + } + } +} + +type epHeap []*endpoint + +type sandbox struct { + id string + containerID string + config containerConfig + extDNS []extDNSEntry + osSbox osl.Sandbox + controller *controller + resolver Resolver + resolverOnce sync.Once + refCnt int + endpoints epHeap + epPriority map[string]int + populatedEndpoints map[string]struct{} + joinLeaveDone chan struct{} + dbIndex uint64 + dbExists bool + isStub bool + inDelete bool + ingress bool + ndotsSet bool + sync.Mutex + // This mutex is used to serialize service related operation for an endpoint + // The lock is here because the endpoint is saved into the store so is not unique + Service sync.Mutex +} + +// These are the container configs used to customize container /etc/hosts file. +type hostsPathConfig struct { + hostName string + domainName string + hostsPath string + originHostsPath string + extraHosts []extraHost + parentUpdates []parentUpdate +} + +type parentUpdate struct { + cid string + name string + ip string +} + +type extraHost struct { + name string + IP string +} + +// These are the container configs used to customize container /etc/resolv.conf file. +type resolvConfPathConfig struct { + resolvConfPath string + originResolvConfPath string + resolvConfHashFile string + dnsList []string + dnsSearchList []string + dnsOptionsList []string +} + +type containerConfig struct { + hostsPathConfig + resolvConfPathConfig + generic map[string]interface{} + useDefaultSandBox bool + useExternalKey bool + prio int // higher the value, more the priority + exposedPorts []types.TransportPort +} + +const ( + resolverIPSandbox = "127.0.0.11" +) + +func (sb *sandbox) ID() string { + return sb.id +} + +func (sb *sandbox) ContainerID() string { + return sb.containerID +} + +func (sb *sandbox) Key() string { + if sb.config.useDefaultSandBox { + return osl.GenerateKey("default") + } + return osl.GenerateKey(sb.id) +} + +func (sb *sandbox) Labels() map[string]interface{} { + sb.Lock() + defer sb.Unlock() + opts := make(map[string]interface{}, len(sb.config.generic)) + for k, v := range sb.config.generic { + opts[k] = v + } + return opts +} + +func (sb *sandbox) Statistics() (map[string]*types.InterfaceStatistics, error) { + m := make(map[string]*types.InterfaceStatistics) + + sb.Lock() + osb := sb.osSbox + sb.Unlock() + if osb == nil { + return m, nil + } + + var err error + for _, i := range osb.Info().Interfaces() { + if m[i.DstName()], err = i.Statistics(); err != nil { + return m, err + } + } + + return m, nil +} + +func (sb *sandbox) Delete() error { + return sb.delete(false) +} + +func (sb *sandbox) delete(force bool) error { + sb.Lock() + if sb.inDelete { + sb.Unlock() + return types.ForbiddenErrorf("another sandbox delete in progress") + } + // Set the inDelete flag. This will ensure that we don't + // update the store until we have completed all the endpoint + // leaves and deletes. And when endpoint leaves and deletes + // are completed then we can finally delete the sandbox object + // altogether from the data store. If the daemon exits + // ungracefully in the middle of a sandbox delete this way we + // will have all the references to the endpoints in the + // sandbox so that we can clean them up when we restart + sb.inDelete = true + sb.Unlock() + + c := sb.controller + + // Detach from all endpoints + retain := false + for _, ep := range sb.getConnectedEndpoints() { + // gw network endpoint detach and removal are automatic + if ep.endpointInGWNetwork() && !force { + continue + } + // Retain the sanbdox if we can't obtain the network from store. + if _, err := c.getNetworkFromStore(ep.getNetwork().ID()); err != nil { + if c.isDistributedControl() { + retain = true + } + logrus.Warnf("Failed getting network for ep %s during sandbox %s delete: %v", ep.ID(), sb.ID(), err) + continue + } + + if !force { + if err := ep.Leave(sb); err != nil { + logrus.Warnf("Failed detaching sandbox %s from endpoint %s: %v\n", sb.ID(), ep.ID(), err) + } + } + + if err := ep.Delete(force); err != nil { + logrus.Warnf("Failed deleting endpoint %s: %v\n", ep.ID(), err) + } + } + + if retain { + sb.Lock() + sb.inDelete = false + sb.Unlock() + return fmt.Errorf("could not cleanup all the endpoints in container %s / sandbox %s", sb.containerID, sb.id) + } + // Container is going away. Path cache in etchosts is most + // likely not required any more. Drop it. + etchosts.Drop(sb.config.hostsPath) + + if sb.resolver != nil { + sb.resolver.Stop() + } + + if sb.osSbox != nil && !sb.config.useDefaultSandBox { + sb.osSbox.Destroy() + } + + if err := sb.storeDelete(); err != nil { + logrus.Warnf("Failed to delete sandbox %s from store: %v", sb.ID(), err) + } + + c.Lock() + if sb.ingress { + c.ingressSandbox = nil + } + delete(c.sandboxes, sb.ID()) + c.Unlock() + + return nil +} + +func (sb *sandbox) Rename(name string) error { + var err error + + for _, ep := range sb.getConnectedEndpoints() { + if ep.endpointInGWNetwork() { + continue + } + + oldName := ep.Name() + lEp := ep + if err = ep.rename(name); err != nil { + break + } + + defer func() { + if err != nil { + lEp.rename(oldName) + } + }() + } + + return err +} + +func (sb *sandbox) Refresh(options ...SandboxOption) error { + // Store connected endpoints + epList := sb.getConnectedEndpoints() + + // Detach from all endpoints + for _, ep := range epList { + if err := ep.Leave(sb); err != nil { + logrus.Warnf("Failed detaching sandbox %s from endpoint %s: %v\n", sb.ID(), ep.ID(), err) + } + } + + // Re-apply options + sb.config = containerConfig{} + sb.processOptions(options...) + + // Setup discovery files + if err := sb.setupResolutionFiles(); err != nil { + return err + } + + // Re-connect to all endpoints + for _, ep := range epList { + if err := ep.Join(sb); err != nil { + logrus.Warnf("Failed attach sandbox %s to endpoint %s: %v\n", sb.ID(), ep.ID(), err) + } + } + + return nil +} + +func (sb *sandbox) MarshalJSON() ([]byte, error) { + sb.Lock() + defer sb.Unlock() + + // We are just interested in the container ID. This can be expanded to include all of containerInfo if there is a need + return json.Marshal(sb.id) +} + +func (sb *sandbox) UnmarshalJSON(b []byte) (err error) { + sb.Lock() + defer sb.Unlock() + + var id string + if err := json.Unmarshal(b, &id); err != nil { + return err + } + sb.id = id + return nil +} + +func (sb *sandbox) Endpoints() []Endpoint { + sb.Lock() + defer sb.Unlock() + + endpoints := make([]Endpoint, len(sb.endpoints)) + for i, ep := range sb.endpoints { + endpoints[i] = ep + } + return endpoints +} + +func (sb *sandbox) getConnectedEndpoints() []*endpoint { + sb.Lock() + defer sb.Unlock() + + eps := make([]*endpoint, len(sb.endpoints)) + for i, ep := range sb.endpoints { + eps[i] = ep + } + + return eps +} + +func (sb *sandbox) removeEndpoint(ep *endpoint) { + sb.Lock() + defer sb.Unlock() + + for i, e := range sb.endpoints { + if e == ep { + heap.Remove(&sb.endpoints, i) + return + } + } +} + +func (sb *sandbox) getEndpoint(id string) *endpoint { + sb.Lock() + defer sb.Unlock() + + for _, ep := range sb.endpoints { + if ep.id == id { + return ep + } + } + + return nil +} + +func (sb *sandbox) updateGateway(ep *endpoint) error { + sb.Lock() + osSbox := sb.osSbox + sb.Unlock() + if osSbox == nil { + return nil + } + osSbox.UnsetGateway() + osSbox.UnsetGatewayIPv6() + + if ep == nil { + return nil + } + + ep.Lock() + joinInfo := ep.joinInfo + ep.Unlock() + + if err := osSbox.SetGateway(joinInfo.gw); err != nil { + return fmt.Errorf("failed to set gateway while updating gateway: %v", err) + } + + if err := osSbox.SetGatewayIPv6(joinInfo.gw6); err != nil { + return fmt.Errorf("failed to set IPv6 gateway while updating gateway: %v", err) + } + + return nil +} + +func (sb *sandbox) HandleQueryResp(name string, ip net.IP) { + for _, ep := range sb.getConnectedEndpoints() { + n := ep.getNetwork() + n.HandleQueryResp(name, ip) + } +} + +func (sb *sandbox) ResolveIP(ip string) string { + var svc string + logrus.Debugf("IP To resolve %v", ip) + + for _, ep := range sb.getConnectedEndpoints() { + n := ep.getNetwork() + svc = n.ResolveIP(ip) + if len(svc) != 0 { + return svc + } + } + + return svc +} + +func (sb *sandbox) ExecFunc(f func()) error { + sb.Lock() + osSbox := sb.osSbox + sb.Unlock() + if osSbox != nil { + return osSbox.InvokeFunc(f) + } + return fmt.Errorf("osl sandbox unavailable in ExecFunc for %v", sb.ContainerID()) +} + +func (sb *sandbox) ResolveService(name string) ([]*net.SRV, []net.IP) { + srv := []*net.SRV{} + ip := []net.IP{} + + logrus.Debugf("Service name To resolve: %v", name) + + // There are DNS implementaions that allow SRV queries for names not in + // the format defined by RFC 2782. Hence specific validations checks are + // not done + parts := strings.Split(name, ".") + if len(parts) < 3 { + return nil, nil + } + + for _, ep := range sb.getConnectedEndpoints() { + n := ep.getNetwork() + + srv, ip = n.ResolveService(name) + if len(srv) > 0 { + break + } + } + return srv, ip +} + +func getDynamicNwEndpoints(epList []*endpoint) []*endpoint { + eps := []*endpoint{} + for _, ep := range epList { + n := ep.getNetwork() + if n.dynamic && !n.ingress { + eps = append(eps, ep) + } + } + return eps +} + +func getIngressNwEndpoint(epList []*endpoint) *endpoint { + for _, ep := range epList { + n := ep.getNetwork() + if n.ingress { + return ep + } + } + return nil +} + +func getLocalNwEndpoints(epList []*endpoint) []*endpoint { + eps := []*endpoint{} + for _, ep := range epList { + n := ep.getNetwork() + if !n.dynamic && !n.ingress { + eps = append(eps, ep) + } + } + return eps +} + +func (sb *sandbox) ResolveName(name string, ipType int) ([]net.IP, bool) { + // Embedded server owns the docker network domain. Resolution should work + // for both container_name and container_name.network_name + // We allow '.' in service name and network name. For a name a.b.c.d the + // following have to tried; + // {a.b.c.d in the networks container is connected to} + // {a.b.c in network d}, + // {a.b in network c.d}, + // {a in network b.c.d}, + + logrus.Debugf("Name To resolve: %v", name) + name = strings.TrimSuffix(name, ".") + reqName := []string{name} + networkName := []string{""} + + if strings.Contains(name, ".") { + var i int + dup := name + for { + if i = strings.LastIndex(dup, "."); i == -1 { + break + } + networkName = append(networkName, name[i+1:]) + reqName = append(reqName, name[:i]) + + dup = dup[:i] + } + } + + epList := sb.getConnectedEndpoints() + + // In swarm mode services with exposed ports are connected to user overlay + // network, ingress network and docker_gwbridge network. Name resolution + // should prioritize returning the VIP/IPs on user overlay network. + newList := []*endpoint{} + if !sb.controller.isDistributedControl() { + newList = append(newList, getDynamicNwEndpoints(epList)...) + ingressEP := getIngressNwEndpoint(epList) + if ingressEP != nil { + newList = append(newList, ingressEP) + } + newList = append(newList, getLocalNwEndpoints(epList)...) + epList = newList + } + + for i := 0; i < len(reqName); i++ { + + // First check for local container alias + ip, ipv6Miss := sb.resolveName(reqName[i], networkName[i], epList, true, ipType) + if ip != nil { + return ip, false + } + if ipv6Miss { + return ip, ipv6Miss + } + + // Resolve the actual container name + ip, ipv6Miss = sb.resolveName(reqName[i], networkName[i], epList, false, ipType) + if ip != nil { + return ip, false + } + if ipv6Miss { + return ip, ipv6Miss + } + } + return nil, false +} + +func (sb *sandbox) resolveName(req string, networkName string, epList []*endpoint, alias bool, ipType int) ([]net.IP, bool) { + var ipv6Miss bool + + for _, ep := range epList { + name := req + n := ep.getNetwork() + + if networkName != "" && networkName != n.Name() { + continue + } + + if alias { + if ep.aliases == nil { + continue + } + + var ok bool + ep.Lock() + name, ok = ep.aliases[req] + ep.Unlock() + if !ok { + continue + } + } else { + // If it is a regular lookup and if the requested name is an alias + // don't perform a svc lookup for this endpoint. + ep.Lock() + if _, ok := ep.aliases[req]; ok { + ep.Unlock() + continue + } + ep.Unlock() + } + + ip, miss := n.ResolveName(name, ipType) + + if ip != nil { + return ip, false + } + + if miss { + ipv6Miss = miss + } + } + return nil, ipv6Miss +} + +func (sb *sandbox) SetKey(basePath string) error { + start := time.Now() + defer func() { + logrus.Debugf("sandbox set key processing took %s for container %s", time.Since(start), sb.ContainerID()) + }() + + if basePath == "" { + return types.BadRequestErrorf("invalid sandbox key") + } + + sb.Lock() + if sb.inDelete { + sb.Unlock() + return types.ForbiddenErrorf("failed to SetKey: sandbox %q delete in progress", sb.id) + } + oldosSbox := sb.osSbox + sb.Unlock() + + if oldosSbox != nil { + // If we already have an OS sandbox, release the network resources from that + // and destroy the OS snab. We are moving into a new home further down. Note that none + // of the network resources gets destroyed during the move. + sb.releaseOSSbox() + } + + osSbox, err := osl.GetSandboxForExternalKey(basePath, sb.Key()) + if err != nil { + return err + } + + sb.Lock() + sb.osSbox = osSbox + sb.Unlock() + + // If the resolver was setup before stop it and set it up in the + // new osl sandbox. + if oldosSbox != nil && sb.resolver != nil { + sb.resolver.Stop() + + if err := sb.osSbox.InvokeFunc(sb.resolver.SetupFunc(0)); err == nil { + if err := sb.resolver.Start(); err != nil { + logrus.Errorf("Resolver Start failed for container %s, %q", sb.ContainerID(), err) + } + } else { + logrus.Errorf("Resolver Setup Function failed for container %s, %q", sb.ContainerID(), err) + } + } + + for _, ep := range sb.getConnectedEndpoints() { + if err = sb.populateNetworkResources(ep); err != nil { + return err + } + } + return nil +} + +func (sb *sandbox) EnableService() (err error) { + logrus.Debugf("EnableService %s START", sb.containerID) + defer func() { + if err != nil { + sb.DisableService() + } + }() + for _, ep := range sb.getConnectedEndpoints() { + if !ep.isServiceEnabled() { + if err := ep.addServiceInfoToCluster(sb); err != nil { + return fmt.Errorf("could not update state for endpoint %s into cluster: %v", ep.Name(), err) + } + ep.enableService() + } + } + logrus.Debugf("EnableService %s DONE", sb.containerID) + return nil +} + +func (sb *sandbox) DisableService() (err error) { + logrus.Debugf("DisableService %s START", sb.containerID) + failedEps := []string{} + defer func() { + if len(failedEps) > 0 { + err = fmt.Errorf("failed to disable service on sandbox:%s, for endpoints %s", sb.ID(), strings.Join(failedEps, ",")) + } + }() + for _, ep := range sb.getConnectedEndpoints() { + if ep.isServiceEnabled() { + if err := ep.deleteServiceInfoFromCluster(sb, false, "DisableService"); err != nil { + failedEps = append(failedEps, ep.Name()) + logrus.Warnf("failed update state for endpoint %s into cluster: %v", ep.Name(), err) + } + ep.disableService() + } + } + logrus.Debugf("DisableService %s DONE", sb.containerID) + return nil +} + +func releaseOSSboxResources(osSbox osl.Sandbox, ep *endpoint) { + for _, i := range osSbox.Info().Interfaces() { + // Only remove the interfaces owned by this endpoint from the sandbox. + if ep.hasInterface(i.SrcName()) { + if err := i.Remove(); err != nil { + logrus.Debugf("Remove interface %s failed: %v", i.SrcName(), err) + } + } + } + + ep.Lock() + joinInfo := ep.joinInfo + vip := ep.virtualIP + ep.Unlock() + + if len(vip) != 0 { + if err := osSbox.RemoveLoopbackAliasIP(&net.IPNet{IP: vip, Mask: net.CIDRMask(32, 32)}); err != nil { + logrus.Warnf("Remove virtual IP %v failed: %v", vip, err) + } + } + + if joinInfo == nil { + return + } + + // Remove non-interface routes. + for _, r := range joinInfo.StaticRoutes { + if err := osSbox.RemoveStaticRoute(r); err != nil { + logrus.Debugf("Remove route failed: %v", err) + } + } +} + +func (sb *sandbox) releaseOSSbox() { + sb.Lock() + osSbox := sb.osSbox + sb.osSbox = nil + sb.Unlock() + + if osSbox == nil { + return + } + + for _, ep := range sb.getConnectedEndpoints() { + releaseOSSboxResources(osSbox, ep) + } + + osSbox.Destroy() +} + +func (sb *sandbox) restoreOslSandbox() error { + var routes []*types.StaticRoute + + // restore osl sandbox + Ifaces := make(map[string][]osl.IfaceOption) + for _, ep := range sb.endpoints { + var ifaceOptions []osl.IfaceOption + ep.Lock() + joinInfo := ep.joinInfo + i := ep.iface + ep.Unlock() + + if i == nil { + logrus.Errorf("error restoring endpoint %s for container %s", ep.Name(), sb.ContainerID()) + continue + } + + ifaceOptions = append(ifaceOptions, sb.osSbox.InterfaceOptions().Address(i.addr), sb.osSbox.InterfaceOptions().Routes(i.routes)) + if i.addrv6 != nil && i.addrv6.IP.To16() != nil { + ifaceOptions = append(ifaceOptions, sb.osSbox.InterfaceOptions().AddressIPv6(i.addrv6)) + } + if i.mac != nil { + ifaceOptions = append(ifaceOptions, sb.osSbox.InterfaceOptions().MacAddress(i.mac)) + } + if len(i.llAddrs) != 0 { + ifaceOptions = append(ifaceOptions, sb.osSbox.InterfaceOptions().LinkLocalAddresses(i.llAddrs)) + } + Ifaces[fmt.Sprintf("%s+%s", i.srcName, i.dstPrefix)] = ifaceOptions + if joinInfo != nil { + routes = append(routes, joinInfo.StaticRoutes...) + } + if ep.needResolver() { + sb.startResolver(true) + } + } + + gwep := sb.getGatewayEndpoint() + if gwep == nil { + return nil + } + + // restore osl sandbox + err := sb.osSbox.Restore(Ifaces, routes, gwep.joinInfo.gw, gwep.joinInfo.gw6) + return err +} + +func (sb *sandbox) populateNetworkResources(ep *endpoint) error { + sb.Lock() + if sb.osSbox == nil { + sb.Unlock() + return nil + } + inDelete := sb.inDelete + sb.Unlock() + + ep.Lock() + joinInfo := ep.joinInfo + i := ep.iface + ep.Unlock() + + if ep.needResolver() { + sb.startResolver(false) + } + + if i != nil && i.srcName != "" { + var ifaceOptions []osl.IfaceOption + + ifaceOptions = append(ifaceOptions, sb.osSbox.InterfaceOptions().Address(i.addr), sb.osSbox.InterfaceOptions().Routes(i.routes)) + if i.addrv6 != nil && i.addrv6.IP.To16() != nil { + ifaceOptions = append(ifaceOptions, sb.osSbox.InterfaceOptions().AddressIPv6(i.addrv6)) + } + if len(i.llAddrs) != 0 { + ifaceOptions = append(ifaceOptions, sb.osSbox.InterfaceOptions().LinkLocalAddresses(i.llAddrs)) + } + if i.mac != nil { + ifaceOptions = append(ifaceOptions, sb.osSbox.InterfaceOptions().MacAddress(i.mac)) + } + + if err := sb.osSbox.AddInterface(i.srcName, i.dstPrefix, ifaceOptions...); err != nil { + return fmt.Errorf("failed to add interface %s to sandbox: %v", i.srcName, err) + } + } + + if len(ep.virtualIP) != 0 { + err := sb.osSbox.AddLoopbackAliasIP(&net.IPNet{IP: ep.virtualIP, Mask: net.CIDRMask(32, 32)}) + if err != nil { + return fmt.Errorf("failed to add virtual IP %v: %v", ep.virtualIP, err) + } + } + + if joinInfo != nil { + // Set up non-interface routes. + for _, r := range joinInfo.StaticRoutes { + if err := sb.osSbox.AddStaticRoute(r); err != nil { + return fmt.Errorf("failed to add static route %s: %v", r.Destination.String(), err) + } + } + } + + if ep == sb.getGatewayEndpoint() { + if err := sb.updateGateway(ep); err != nil { + return err + } + } + + // Make sure to add the endpoint to the populated endpoint set + // before populating loadbalancers. + sb.Lock() + sb.populatedEndpoints[ep.ID()] = struct{}{} + sb.Unlock() + + // Populate load balancer only after updating all the other + // information including gateway and other routes so that + // loadbalancers are populated all the network state is in + // place in the sandbox. + sb.populateLoadbalancers(ep) + + // Only update the store if we did not come here as part of + // sandbox delete. If we came here as part of delete then do + // not bother updating the store. The sandbox object will be + // deleted anyway + if !inDelete { + return sb.storeUpdate() + } + + return nil +} + +func (sb *sandbox) clearNetworkResources(origEp *endpoint) error { + ep := sb.getEndpoint(origEp.id) + if ep == nil { + return fmt.Errorf("could not find the sandbox endpoint data for endpoint %s", + origEp.id) + } + + sb.Lock() + osSbox := sb.osSbox + inDelete := sb.inDelete + sb.Unlock() + if osSbox != nil { + releaseOSSboxResources(osSbox, ep) + } + + sb.Lock() + delete(sb.populatedEndpoints, ep.ID()) + + if len(sb.endpoints) == 0 { + // sb.endpoints should never be empty and this is unexpected error condition + // We log an error message to note this down for debugging purposes. + logrus.Errorf("No endpoints in sandbox while trying to remove endpoint %s", ep.Name()) + sb.Unlock() + return nil + } + + var ( + gwepBefore, gwepAfter *endpoint + index = -1 + ) + for i, e := range sb.endpoints { + if e == ep { + index = i + } + if len(e.Gateway()) > 0 && gwepBefore == nil { + gwepBefore = e + } + if index != -1 && gwepBefore != nil { + break + } + } + + if index == -1 { + logrus.Warnf("Endpoint %s has already been deleted", ep.Name()) + sb.Unlock() + return nil + } + + heap.Remove(&sb.endpoints, index) + for _, e := range sb.endpoints { + if len(e.Gateway()) > 0 { + gwepAfter = e + break + } + } + delete(sb.epPriority, ep.ID()) + sb.Unlock() + + if gwepAfter != nil && gwepBefore != gwepAfter { + sb.updateGateway(gwepAfter) + } + + // Only update the store if we did not come here as part of + // sandbox delete. If we came here as part of delete then do + // not bother updating the store. The sandbox object will be + // deleted anyway + if !inDelete { + return sb.storeUpdate() + } + + return nil +} + +func (sb *sandbox) isEndpointPopulated(ep *endpoint) bool { + sb.Lock() + _, ok := sb.populatedEndpoints[ep.ID()] + sb.Unlock() + return ok +} + +// joinLeaveStart waits to ensure there are no joins or leaves in progress and +// marks this join/leave in progress without race +func (sb *sandbox) joinLeaveStart() { + sb.Lock() + defer sb.Unlock() + + for sb.joinLeaveDone != nil { + joinLeaveDone := sb.joinLeaveDone + sb.Unlock() + + <-joinLeaveDone + + sb.Lock() + } + + sb.joinLeaveDone = make(chan struct{}) +} + +// joinLeaveEnd marks the end of this join/leave operation and +// signals the same without race to other join and leave waiters +func (sb *sandbox) joinLeaveEnd() { + sb.Lock() + defer sb.Unlock() + + if sb.joinLeaveDone != nil { + close(sb.joinLeaveDone) + sb.joinLeaveDone = nil + } +} + +func (sb *sandbox) hasPortConfigs() bool { + opts := sb.Labels() + _, hasExpPorts := opts[netlabel.ExposedPorts] + _, hasPortMaps := opts[netlabel.PortMap] + return hasExpPorts || hasPortMaps +} + +// OptionHostname function returns an option setter for hostname option to +// be passed to NewSandbox method. +func OptionHostname(name string) SandboxOption { + return func(sb *sandbox) { + sb.config.hostName = name + } +} + +// OptionDomainname function returns an option setter for domainname option to +// be passed to NewSandbox method. +func OptionDomainname(name string) SandboxOption { + return func(sb *sandbox) { + sb.config.domainName = name + } +} + +// OptionHostsPath function returns an option setter for hostspath option to +// be passed to NewSandbox method. +func OptionHostsPath(path string) SandboxOption { + return func(sb *sandbox) { + sb.config.hostsPath = path + } +} + +// OptionOriginHostsPath function returns an option setter for origin hosts file path +// to be passed to NewSandbox method. +func OptionOriginHostsPath(path string) SandboxOption { + return func(sb *sandbox) { + sb.config.originHostsPath = path + } +} + +// OptionExtraHost function returns an option setter for extra /etc/hosts options +// which is a name and IP as strings. +func OptionExtraHost(name string, IP string) SandboxOption { + return func(sb *sandbox) { + sb.config.extraHosts = append(sb.config.extraHosts, extraHost{name: name, IP: IP}) + } +} + +// OptionParentUpdate function returns an option setter for parent container +// which needs to update the IP address for the linked container. +func OptionParentUpdate(cid string, name, ip string) SandboxOption { + return func(sb *sandbox) { + sb.config.parentUpdates = append(sb.config.parentUpdates, parentUpdate{cid: cid, name: name, ip: ip}) + } +} + +// OptionResolvConfPath function returns an option setter for resolvconfpath option to +// be passed to net container methods. +func OptionResolvConfPath(path string) SandboxOption { + return func(sb *sandbox) { + sb.config.resolvConfPath = path + } +} + +// OptionOriginResolvConfPath function returns an option setter to set the path to the +// origin resolv.conf file to be passed to net container methods. +func OptionOriginResolvConfPath(path string) SandboxOption { + return func(sb *sandbox) { + sb.config.originResolvConfPath = path + } +} + +// OptionDNS function returns an option setter for dns entry option to +// be passed to container Create method. +func OptionDNS(dns string) SandboxOption { + return func(sb *sandbox) { + sb.config.dnsList = append(sb.config.dnsList, dns) + } +} + +// OptionDNSSearch function returns an option setter for dns search entry option to +// be passed to container Create method. +func OptionDNSSearch(search string) SandboxOption { + return func(sb *sandbox) { + sb.config.dnsSearchList = append(sb.config.dnsSearchList, search) + } +} + +// OptionDNSOptions function returns an option setter for dns options entry option to +// be passed to container Create method. +func OptionDNSOptions(options string) SandboxOption { + return func(sb *sandbox) { + sb.config.dnsOptionsList = append(sb.config.dnsOptionsList, options) + } +} + +// OptionUseDefaultSandbox function returns an option setter for using default sandbox to +// be passed to container Create method. +func OptionUseDefaultSandbox() SandboxOption { + return func(sb *sandbox) { + sb.config.useDefaultSandBox = true + } +} + +// OptionUseExternalKey function returns an option setter for using provided namespace +// instead of creating one. +func OptionUseExternalKey() SandboxOption { + return func(sb *sandbox) { + sb.config.useExternalKey = true + } +} + +// OptionGeneric function returns an option setter for Generic configuration +// that is not managed by libNetwork but can be used by the Drivers during the call to +// net container creation method. Container Labels are a good example. +func OptionGeneric(generic map[string]interface{}) SandboxOption { + return func(sb *sandbox) { + if sb.config.generic == nil { + sb.config.generic = make(map[string]interface{}, len(generic)) + } + for k, v := range generic { + sb.config.generic[k] = v + } + } +} + +// OptionExposedPorts function returns an option setter for the container exposed +// ports option to be passed to container Create method. +func OptionExposedPorts(exposedPorts []types.TransportPort) SandboxOption { + return func(sb *sandbox) { + if sb.config.generic == nil { + sb.config.generic = make(map[string]interface{}) + } + // Defensive copy + eps := make([]types.TransportPort, len(exposedPorts)) + copy(eps, exposedPorts) + // Store endpoint label and in generic because driver needs it + sb.config.exposedPorts = eps + sb.config.generic[netlabel.ExposedPorts] = eps + } +} + +// OptionPortMapping function returns an option setter for the mapping +// ports option to be passed to container Create method. +func OptionPortMapping(portBindings []types.PortBinding) SandboxOption { + return func(sb *sandbox) { + if sb.config.generic == nil { + sb.config.generic = make(map[string]interface{}) + } + // Store a copy of the bindings as generic data to pass to the driver + pbs := make([]types.PortBinding, len(portBindings)) + copy(pbs, portBindings) + sb.config.generic[netlabel.PortMap] = pbs + } +} + +// OptionIngress function returns an option setter for marking a +// sandbox as the controller's ingress sandbox. +func OptionIngress() SandboxOption { + return func(sb *sandbox) { + sb.ingress = true + } +} + +func (eh epHeap) Len() int { return len(eh) } + +func (eh epHeap) Less(i, j int) bool { + var ( + cip, cjp int + ok bool + ) + + ci, _ := eh[i].getSandbox() + cj, _ := eh[j].getSandbox() + + epi := eh[i] + epj := eh[j] + + if epi.endpointInGWNetwork() { + return false + } + + if epj.endpointInGWNetwork() { + return true + } + + if epi.getNetwork().Internal() { + return false + } + + if epj.getNetwork().Internal() { + return true + } + + if epi.joinInfo != nil && epj.joinInfo != nil { + if (epi.joinInfo.gw != nil && epi.joinInfo.gw6 != nil) && + (epj.joinInfo.gw == nil || epj.joinInfo.gw6 == nil) { + return true + } + if (epj.joinInfo.gw != nil && epj.joinInfo.gw6 != nil) && + (epi.joinInfo.gw == nil || epi.joinInfo.gw6 == nil) { + return false + } + } + + if ci != nil { + cip, ok = ci.epPriority[eh[i].ID()] + if !ok { + cip = 0 + } + } + + if cj != nil { + cjp, ok = cj.epPriority[eh[j].ID()] + if !ok { + cjp = 0 + } + } + + if cip == cjp { + return eh[i].network.Name() < eh[j].network.Name() + } + + return cip > cjp +} + +func (eh epHeap) Swap(i, j int) { eh[i], eh[j] = eh[j], eh[i] } + +func (eh *epHeap) Push(x interface{}) { + *eh = append(*eh, x.(*endpoint)) +} + +func (eh *epHeap) Pop() interface{} { + old := *eh + n := len(old) + x := old[n-1] + *eh = old[0 : n-1] + return x +} + +func (sb *sandbox) NdotsSet() bool { + return sb.ndotsSet +} diff --git a/vendor/github.com/docker/libnetwork/sandbox_dns_unix.go b/vendor/github.com/docker/libnetwork/sandbox_dns_unix.go new file mode 100644 index 0000000000..c4da9272e5 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/sandbox_dns_unix.go @@ -0,0 +1,420 @@ +// +build !windows + +package libnetwork + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "strconv" + "strings" + + "github.com/docker/libnetwork/etchosts" + "github.com/docker/libnetwork/resolvconf" + "github.com/docker/libnetwork/resolvconf/dns" + "github.com/docker/libnetwork/types" + "github.com/sirupsen/logrus" +) + +const ( + defaultPrefix = "/var/lib/docker/network/files" + dirPerm = 0755 + filePerm = 0644 +) + +func (sb *sandbox) startResolver(restore bool) { + sb.resolverOnce.Do(func() { + var err error + sb.resolver = NewResolver(resolverIPSandbox, true, sb.Key(), sb) + defer func() { + if err != nil { + sb.resolver = nil + } + }() + + // In the case of live restore container is already running with + // right resolv.conf contents created before. Just update the + // external DNS servers from the restored sandbox for embedded + // server to use. + if !restore { + err = sb.rebuildDNS() + if err != nil { + logrus.Errorf("Updating resolv.conf failed for container %s, %q", sb.ContainerID(), err) + return + } + } + sb.resolver.SetExtServers(sb.extDNS) + + if err = sb.osSbox.InvokeFunc(sb.resolver.SetupFunc(0)); err != nil { + logrus.Errorf("Resolver Setup function failed for container %s, %q", sb.ContainerID(), err) + return + } + + if err = sb.resolver.Start(); err != nil { + logrus.Errorf("Resolver Start failed for container %s, %q", sb.ContainerID(), err) + } + }) +} + +func (sb *sandbox) setupResolutionFiles() error { + if err := sb.buildHostsFile(); err != nil { + return err + } + + if err := sb.updateParentHosts(); err != nil { + return err + } + + return sb.setupDNS() +} + +func (sb *sandbox) buildHostsFile() error { + if sb.config.hostsPath == "" { + sb.config.hostsPath = defaultPrefix + "/" + sb.id + "/hosts" + } + + dir, _ := filepath.Split(sb.config.hostsPath) + if err := createBasePath(dir); err != nil { + return err + } + + // This is for the host mode networking + if sb.config.originHostsPath != "" { + if err := copyFile(sb.config.originHostsPath, sb.config.hostsPath); err != nil && !os.IsNotExist(err) { + return types.InternalErrorf("could not copy source hosts file %s to %s: %v", sb.config.originHostsPath, sb.config.hostsPath, err) + } + return nil + } + + extraContent := make([]etchosts.Record, 0, len(sb.config.extraHosts)) + for _, extraHost := range sb.config.extraHosts { + extraContent = append(extraContent, etchosts.Record{Hosts: extraHost.name, IP: extraHost.IP}) + } + + return etchosts.Build(sb.config.hostsPath, "", sb.config.hostName, sb.config.domainName, extraContent) +} + +func (sb *sandbox) updateHostsFile(ifaceIP string) error { + if ifaceIP == "" { + return nil + } + + if sb.config.originHostsPath != "" { + return nil + } + + // User might have provided a FQDN in hostname or split it across hostname + // and domainname. We want the FQDN and the bare hostname. + fqdn := sb.config.hostName + mhost := sb.config.hostName + if sb.config.domainName != "" { + fqdn = fmt.Sprintf("%s.%s", fqdn, sb.config.domainName) + } + + parts := strings.SplitN(fqdn, ".", 2) + if len(parts) == 2 { + mhost = fmt.Sprintf("%s %s", fqdn, parts[0]) + } + + extraContent := []etchosts.Record{{Hosts: mhost, IP: ifaceIP}} + + sb.addHostsEntries(extraContent) + return nil +} + +func (sb *sandbox) addHostsEntries(recs []etchosts.Record) { + if err := etchosts.Add(sb.config.hostsPath, recs); err != nil { + logrus.Warnf("Failed adding service host entries to the running container: %v", err) + } +} + +func (sb *sandbox) deleteHostsEntries(recs []etchosts.Record) { + if err := etchosts.Delete(sb.config.hostsPath, recs); err != nil { + logrus.Warnf("Failed deleting service host entries to the running container: %v", err) + } +} + +func (sb *sandbox) updateParentHosts() error { + var pSb Sandbox + + for _, update := range sb.config.parentUpdates { + sb.controller.WalkSandboxes(SandboxContainerWalker(&pSb, update.cid)) + if pSb == nil { + continue + } + if err := etchosts.Update(pSb.(*sandbox).config.hostsPath, update.ip, update.name); err != nil { + return err + } + } + + return nil +} + +func (sb *sandbox) restorePath() { + if sb.config.resolvConfPath == "" { + sb.config.resolvConfPath = defaultPrefix + "/" + sb.id + "/resolv.conf" + } + sb.config.resolvConfHashFile = sb.config.resolvConfPath + ".hash" + if sb.config.hostsPath == "" { + sb.config.hostsPath = defaultPrefix + "/" + sb.id + "/hosts" + } +} + +func (sb *sandbox) setExternalResolvers(content []byte, addrType int, checkLoopback bool) { + servers := resolvconf.GetNameservers(content, addrType) + for _, ip := range servers { + hostLoopback := false + if checkLoopback { + hostLoopback = dns.IsIPv4Localhost(ip) + } + sb.extDNS = append(sb.extDNS, extDNSEntry{ + IPStr: ip, + HostLoopback: hostLoopback, + }) + } +} + +func (sb *sandbox) setupDNS() error { + var newRC *resolvconf.File + + if sb.config.resolvConfPath == "" { + sb.config.resolvConfPath = defaultPrefix + "/" + sb.id + "/resolv.conf" + } + + sb.config.resolvConfHashFile = sb.config.resolvConfPath + ".hash" + + dir, _ := filepath.Split(sb.config.resolvConfPath) + if err := createBasePath(dir); err != nil { + return err + } + + // This is for the host mode networking + if sb.config.originResolvConfPath != "" { + if err := copyFile(sb.config.originResolvConfPath, sb.config.resolvConfPath); err != nil { + if !os.IsNotExist(err) { + return fmt.Errorf("could not copy source resolv.conf file %s to %s: %v", sb.config.originResolvConfPath, sb.config.resolvConfPath, err) + } + logrus.Infof("%s does not exist, we create an empty resolv.conf for container", sb.config.originResolvConfPath) + if err := createFile(sb.config.resolvConfPath); err != nil { + return err + } + } + return nil + } + + currRC, err := resolvconf.Get() + if err != nil { + if !os.IsNotExist(err) { + return err + } + // it's ok to continue if /etc/resolv.conf doesn't exist, default resolvers (Google's Public DNS) + // will be used + currRC = &resolvconf.File{} + logrus.Infof("/etc/resolv.conf does not exist") + } + + if len(sb.config.dnsList) > 0 || len(sb.config.dnsSearchList) > 0 || len(sb.config.dnsOptionsList) > 0 { + var ( + err error + dnsList = resolvconf.GetNameservers(currRC.Content, types.IP) + dnsSearchList = resolvconf.GetSearchDomains(currRC.Content) + dnsOptionsList = resolvconf.GetOptions(currRC.Content) + ) + if len(sb.config.dnsList) > 0 { + dnsList = sb.config.dnsList + } + if len(sb.config.dnsSearchList) > 0 { + dnsSearchList = sb.config.dnsSearchList + } + if len(sb.config.dnsOptionsList) > 0 { + dnsOptionsList = sb.config.dnsOptionsList + } + newRC, err = resolvconf.Build(sb.config.resolvConfPath, dnsList, dnsSearchList, dnsOptionsList) + if err != nil { + return err + } + // After building the resolv.conf from the user config save the + // external resolvers in the sandbox. Note that --dns 127.0.0.x + // config refers to the loopback in the container namespace + sb.setExternalResolvers(newRC.Content, types.IPv4, false) + } else { + // If the host resolv.conf file has 127.0.0.x container should + // use the host restolver for queries. This is supported by the + // docker embedded DNS server. Hence save the external resolvers + // before filtering it out. + sb.setExternalResolvers(currRC.Content, types.IPv4, true) + + // Replace any localhost/127.* (at this point we have no info about ipv6, pass it as true) + if newRC, err = resolvconf.FilterResolvDNS(currRC.Content, true); err != nil { + return err + } + // No contention on container resolv.conf file at sandbox creation + if err := ioutil.WriteFile(sb.config.resolvConfPath, newRC.Content, filePerm); err != nil { + return types.InternalErrorf("failed to write unhaltered resolv.conf file content when setting up dns for sandbox %s: %v", sb.ID(), err) + } + } + + // Write hash + if err := ioutil.WriteFile(sb.config.resolvConfHashFile, []byte(newRC.Hash), filePerm); err != nil { + return types.InternalErrorf("failed to write resolv.conf hash file when setting up dns for sandbox %s: %v", sb.ID(), err) + } + + return nil +} + +func (sb *sandbox) updateDNS(ipv6Enabled bool) error { + var ( + currHash string + hashFile = sb.config.resolvConfHashFile + ) + + // This is for the host mode networking + if sb.config.originResolvConfPath != "" { + return nil + } + + if len(sb.config.dnsList) > 0 || len(sb.config.dnsSearchList) > 0 || len(sb.config.dnsOptionsList) > 0 { + return nil + } + + currRC, err := resolvconf.GetSpecific(sb.config.resolvConfPath) + if err != nil { + if !os.IsNotExist(err) { + return err + } + } else { + h, err := ioutil.ReadFile(hashFile) + if err != nil { + if !os.IsNotExist(err) { + return err + } + } else { + currHash = string(h) + } + } + + if currHash != "" && currHash != currRC.Hash { + // Seems the user has changed the container resolv.conf since the last time + // we checked so return without doing anything. + //logrus.Infof("Skipping update of resolv.conf file with ipv6Enabled: %t because file was touched by user", ipv6Enabled) + return nil + } + + // replace any localhost/127.* and remove IPv6 nameservers if IPv6 disabled. + newRC, err := resolvconf.FilterResolvDNS(currRC.Content, ipv6Enabled) + if err != nil { + return err + } + err = ioutil.WriteFile(sb.config.resolvConfPath, newRC.Content, 0644) + if err != nil { + return err + } + + // write the new hash in a temp file and rename it to make the update atomic + dir := path.Dir(sb.config.resolvConfPath) + tmpHashFile, err := ioutil.TempFile(dir, "hash") + if err != nil { + return err + } + if err = tmpHashFile.Chmod(filePerm); err != nil { + tmpHashFile.Close() + return err + } + _, err = tmpHashFile.Write([]byte(newRC.Hash)) + if err1 := tmpHashFile.Close(); err == nil { + err = err1 + } + if err != nil { + return err + } + return os.Rename(tmpHashFile.Name(), hashFile) +} + +// Embedded DNS server has to be enabled for this sandbox. Rebuild the container's +// resolv.conf by doing the following +// - Add only the embedded server's IP to container's resolv.conf +// - If the embedded server needs any resolv.conf options add it to the current list +func (sb *sandbox) rebuildDNS() error { + currRC, err := resolvconf.GetSpecific(sb.config.resolvConfPath) + if err != nil { + return err + } + + if len(sb.extDNS) == 0 { + sb.setExternalResolvers(currRC.Content, types.IPv4, false) + } + var ( + dnsList = []string{sb.resolver.NameServer()} + dnsOptionsList = resolvconf.GetOptions(currRC.Content) + dnsSearchList = resolvconf.GetSearchDomains(currRC.Content) + ) + + // external v6 DNS servers has to be listed in resolv.conf + dnsList = append(dnsList, resolvconf.GetNameservers(currRC.Content, types.IPv6)...) + + // If the user config and embedded DNS server both have ndots option set, + // remember the user's config so that unqualified names not in the docker + // domain can be dropped. + resOptions := sb.resolver.ResolverOptions() + +dnsOpt: + for _, resOpt := range resOptions { + if strings.Contains(resOpt, "ndots") { + for _, option := range dnsOptionsList { + if strings.Contains(option, "ndots") { + parts := strings.Split(option, ":") + if len(parts) != 2 { + return fmt.Errorf("invalid ndots option %v", option) + } + if num, err := strconv.Atoi(parts[1]); err != nil { + return fmt.Errorf("invalid number for ndots option %v", option) + } else if num > 0 { + // if the user sets ndots, use the user setting + sb.ndotsSet = true + break dnsOpt + } + } + } + } + } + + if !sb.ndotsSet { + // if the user did not set the ndots, set it to 0 to prioritize the service name resolution + // Ref: https://linux.die.net/man/5/resolv.conf + dnsOptionsList = append(dnsOptionsList, resOptions...) + } + + _, err = resolvconf.Build(sb.config.resolvConfPath, dnsList, dnsSearchList, dnsOptionsList) + return err +} + +func createBasePath(dir string) error { + return os.MkdirAll(dir, dirPerm) +} + +func createFile(path string) error { + var f *os.File + + dir, _ := filepath.Split(path) + err := createBasePath(dir) + if err != nil { + return err + } + + f, err = os.Create(path) + if err == nil { + f.Close() + } + + return err +} + +func copyFile(src, dst string) error { + sBytes, err := ioutil.ReadFile(src) + if err != nil { + return err + } + return ioutil.WriteFile(dst, sBytes, filePerm) +} diff --git a/vendor/github.com/docker/libnetwork/sandbox_dns_windows.go b/vendor/github.com/docker/libnetwork/sandbox_dns_windows.go new file mode 100644 index 0000000000..e1ca73edef --- /dev/null +++ b/vendor/github.com/docker/libnetwork/sandbox_dns_windows.go @@ -0,0 +1,35 @@ +// +build windows + +package libnetwork + +import ( + "github.com/docker/libnetwork/etchosts" +) + +// Stub implementations for DNS related functions + +func (sb *sandbox) startResolver(bool) { +} + +func (sb *sandbox) setupResolutionFiles() error { + return nil +} + +func (sb *sandbox) restorePath() { +} + +func (sb *sandbox) updateHostsFile(ifaceIP string) error { + return nil +} + +func (sb *sandbox) addHostsEntries(recs []etchosts.Record) { + +} + +func (sb *sandbox) deleteHostsEntries(recs []etchosts.Record) { + +} + +func (sb *sandbox) updateDNS(ipv6Enabled bool) error { + return nil +} diff --git a/vendor/github.com/docker/libnetwork/sandbox_externalkey.go b/vendor/github.com/docker/libnetwork/sandbox_externalkey.go new file mode 100644 index 0000000000..3c362f30d6 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/sandbox_externalkey.go @@ -0,0 +1,12 @@ +package libnetwork + +import "github.com/docker/docker/pkg/reexec" + +type setKeyData struct { + ContainerID string + Key string +} + +func init() { + reexec.Register("libnetwork-setkey", processSetKeyReexec) +} diff --git a/vendor/github.com/docker/libnetwork/sandbox_externalkey_unix.go b/vendor/github.com/docker/libnetwork/sandbox_externalkey_unix.go new file mode 100644 index 0000000000..f4c4276848 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/sandbox_externalkey_unix.go @@ -0,0 +1,176 @@ +// +build linux freebsd + +package libnetwork + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "os" + + "github.com/docker/libnetwork/types" + "github.com/opencontainers/runc/libcontainer/configs" + "github.com/sirupsen/logrus" +) + +const udsBase = "/run/docker/libnetwork/" +const success = "success" + +// processSetKeyReexec is a private function that must be called only on an reexec path +// It expects 3 args { [0] = "libnetwork-setkey", [1] = , [2] = } +// It also expects configs.HookState as a json string in +// Refer to https://github.com/opencontainers/runc/pull/160/ for more information +func processSetKeyReexec() { + var err error + + // Return a failure to the calling process via ExitCode + defer func() { + if err != nil { + logrus.Fatalf("%v", err) + } + }() + + // expecting 3 args {[0]="libnetwork-setkey", [1]=, [2]= } + if len(os.Args) < 3 { + err = fmt.Errorf("Re-exec expects 3 args, received : %d", len(os.Args)) + return + } + containerID := os.Args[1] + + // We expect configs.HookState as a json string in + stateBuf, err := ioutil.ReadAll(os.Stdin) + if err != nil { + return + } + var state configs.HookState + if err = json.Unmarshal(stateBuf, &state); err != nil { + return + } + + controllerID := os.Args[2] + + err = SetExternalKey(controllerID, containerID, fmt.Sprintf("/proc/%d/ns/net", state.Pid)) +} + +// SetExternalKey provides a convenient way to set an External key to a sandbox +func SetExternalKey(controllerID string, containerID string, key string) error { + keyData := setKeyData{ + ContainerID: containerID, + Key: key} + + c, err := net.Dial("unix", udsBase+controllerID+".sock") + if err != nil { + return err + } + defer c.Close() + + if err = sendKey(c, keyData); err != nil { + return fmt.Errorf("sendKey failed with : %v", err) + } + return processReturn(c) +} + +func sendKey(c net.Conn, data setKeyData) error { + var err error + defer func() { + if err != nil { + c.Close() + } + }() + + var b []byte + if b, err = json.Marshal(data); err != nil { + return err + } + + _, err = c.Write(b) + return err +} + +func processReturn(r io.Reader) error { + buf := make([]byte, 1024) + n, err := r.Read(buf[:]) + if err != nil { + return fmt.Errorf("failed to read buf in processReturn : %v", err) + } + if string(buf[0:n]) != success { + return fmt.Errorf(string(buf[0:n])) + } + return nil +} + +func (c *controller) startExternalKeyListener() error { + if err := os.MkdirAll(udsBase, 0600); err != nil { + return err + } + uds := udsBase + c.id + ".sock" + l, err := net.Listen("unix", uds) + if err != nil { + return err + } + if err := os.Chmod(uds, 0600); err != nil { + l.Close() + return err + } + c.Lock() + c.extKeyListener = l + c.Unlock() + + go c.acceptClientConnections(uds, l) + return nil +} + +func (c *controller) acceptClientConnections(sock string, l net.Listener) { + for { + conn, err := l.Accept() + if err != nil { + if _, err1 := os.Stat(sock); os.IsNotExist(err1) { + logrus.Debugf("Unix socket %s doesn't exist. cannot accept client connections", sock) + return + } + logrus.Errorf("Error accepting connection %v", err) + continue + } + go func() { + defer conn.Close() + + err := c.processExternalKey(conn) + ret := success + if err != nil { + ret = err.Error() + } + + _, err = conn.Write([]byte(ret)) + if err != nil { + logrus.Errorf("Error returning to the client %v", err) + } + }() + } +} + +func (c *controller) processExternalKey(conn net.Conn) error { + buf := make([]byte, 1280) + nr, err := conn.Read(buf) + if err != nil { + return err + } + var s setKeyData + if err = json.Unmarshal(buf[0:nr], &s); err != nil { + return err + } + + var sandbox Sandbox + search := SandboxContainerWalker(&sandbox, s.ContainerID) + c.WalkSandboxes(search) + if sandbox == nil { + return types.BadRequestErrorf("no sandbox present for %s", s.ContainerID) + } + + return sandbox.SetKey(s.Key) +} + +func (c *controller) stopExternalKeyListener() { + c.extKeyListener.Close() +} diff --git a/vendor/github.com/docker/libnetwork/sandbox_externalkey_windows.go b/vendor/github.com/docker/libnetwork/sandbox_externalkey_windows.go new file mode 100644 index 0000000000..340cd1735f --- /dev/null +++ b/vendor/github.com/docker/libnetwork/sandbox_externalkey_windows.go @@ -0,0 +1,45 @@ +// +build windows + +package libnetwork + +import ( + "io" + "net" + + "github.com/docker/libnetwork/types" +) + +// processSetKeyReexec is a private function that must be called only on an reexec path +// It expects 3 args { [0] = "libnetwork-setkey", [1] = , [2] = } +// It also expects configs.HookState as a json string in +// Refer to https://github.com/opencontainers/runc/pull/160/ for more information +func processSetKeyReexec() { +} + +// SetExternalKey provides a convenient way to set an External key to a sandbox +func SetExternalKey(controllerID string, containerID string, key string) error { + return types.NotImplementedErrorf("SetExternalKey isn't supported on non linux systems") +} + +func sendKey(c net.Conn, data setKeyData) error { + return types.NotImplementedErrorf("sendKey isn't supported on non linux systems") +} + +func processReturn(r io.Reader) error { + return types.NotImplementedErrorf("processReturn isn't supported on non linux systems") +} + +// no-op on non linux systems +func (c *controller) startExternalKeyListener() error { + return nil +} + +func (c *controller) acceptClientConnections(sock string, l net.Listener) { +} + +func (c *controller) processExternalKey(conn net.Conn) error { + return types.NotImplementedErrorf("processExternalKey isn't supported on non linux systems") +} + +func (c *controller) stopExternalKeyListener() { +} diff --git a/vendor/github.com/docker/libnetwork/sandbox_store.go b/vendor/github.com/docker/libnetwork/sandbox_store.go new file mode 100644 index 0000000000..a083644598 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/sandbox_store.go @@ -0,0 +1,305 @@ +package libnetwork + +import ( + "container/heap" + "encoding/json" + "sync" + + "github.com/docker/libnetwork/datastore" + "github.com/docker/libnetwork/osl" + "github.com/sirupsen/logrus" +) + +const ( + sandboxPrefix = "sandbox" +) + +type epState struct { + Eid string + Nid string +} + +type sbState struct { + ID string + Cid string + c *controller + dbIndex uint64 + dbExists bool + Eps []epState + EpPriority map[string]int + // external servers have to be persisted so that on restart of a live-restore + // enabled daemon we get the external servers for the running containers. + // We have two versions of ExtDNS to support upgrade & downgrade of the daemon + // between >=1.14 and <1.14 versions. + ExtDNS []string + ExtDNS2 []extDNSEntry +} + +func (sbs *sbState) Key() []string { + return []string{sandboxPrefix, sbs.ID} +} + +func (sbs *sbState) KeyPrefix() []string { + return []string{sandboxPrefix} +} + +func (sbs *sbState) Value() []byte { + b, err := json.Marshal(sbs) + if err != nil { + return nil + } + return b +} + +func (sbs *sbState) SetValue(value []byte) error { + return json.Unmarshal(value, sbs) +} + +func (sbs *sbState) Index() uint64 { + sbi, err := sbs.c.SandboxByID(sbs.ID) + if err != nil { + return sbs.dbIndex + } + + sb := sbi.(*sandbox) + maxIndex := sb.dbIndex + if sbs.dbIndex > maxIndex { + maxIndex = sbs.dbIndex + } + + return maxIndex +} + +func (sbs *sbState) SetIndex(index uint64) { + sbs.dbIndex = index + sbs.dbExists = true + + sbi, err := sbs.c.SandboxByID(sbs.ID) + if err != nil { + return + } + + sb := sbi.(*sandbox) + sb.dbIndex = index + sb.dbExists = true +} + +func (sbs *sbState) Exists() bool { + if sbs.dbExists { + return sbs.dbExists + } + + sbi, err := sbs.c.SandboxByID(sbs.ID) + if err != nil { + return false + } + + sb := sbi.(*sandbox) + return sb.dbExists +} + +func (sbs *sbState) Skip() bool { + return false +} + +func (sbs *sbState) New() datastore.KVObject { + return &sbState{c: sbs.c} +} + +func (sbs *sbState) CopyTo(o datastore.KVObject) error { + dstSbs := o.(*sbState) + dstSbs.c = sbs.c + dstSbs.ID = sbs.ID + dstSbs.Cid = sbs.Cid + dstSbs.dbIndex = sbs.dbIndex + dstSbs.dbExists = sbs.dbExists + dstSbs.EpPriority = sbs.EpPriority + + dstSbs.Eps = append(dstSbs.Eps, sbs.Eps...) + + if len(sbs.ExtDNS2) > 0 { + for _, dns := range sbs.ExtDNS2 { + dstSbs.ExtDNS2 = append(dstSbs.ExtDNS2, dns) + dstSbs.ExtDNS = append(dstSbs.ExtDNS, dns.IPStr) + } + return nil + } + for _, dns := range sbs.ExtDNS { + dstSbs.ExtDNS = append(dstSbs.ExtDNS, dns) + dstSbs.ExtDNS2 = append(dstSbs.ExtDNS2, extDNSEntry{IPStr: dns}) + } + + return nil +} + +func (sbs *sbState) DataScope() string { + return datastore.LocalScope +} + +func (sb *sandbox) storeUpdate() error { + sbs := &sbState{ + c: sb.controller, + ID: sb.id, + Cid: sb.containerID, + EpPriority: sb.epPriority, + ExtDNS2: sb.extDNS, + } + + for _, ext := range sb.extDNS { + sbs.ExtDNS = append(sbs.ExtDNS, ext.IPStr) + } + +retry: + sbs.Eps = nil + for _, ep := range sb.getConnectedEndpoints() { + // If the endpoint is not persisted then do not add it to + // the sandbox checkpoint + if ep.Skip() { + continue + } + + eps := epState{ + Nid: ep.getNetwork().ID(), + Eid: ep.ID(), + } + + sbs.Eps = append(sbs.Eps, eps) + } + + err := sb.controller.updateToStore(sbs) + if err == datastore.ErrKeyModified { + // When we get ErrKeyModified it is sufficient to just + // go back and retry. No need to get the object from + // the store because we always regenerate the store + // state from in memory sandbox state + goto retry + } + + return err +} + +func (sb *sandbox) storeDelete() error { + sbs := &sbState{ + c: sb.controller, + ID: sb.id, + Cid: sb.containerID, + dbIndex: sb.dbIndex, + dbExists: sb.dbExists, + } + + return sb.controller.deleteFromStore(sbs) +} + +func (c *controller) sandboxCleanup(activeSandboxes map[string]interface{}) { + store := c.getStore(datastore.LocalScope) + if store == nil { + logrus.Error("Could not find local scope store while trying to cleanup sandboxes") + return + } + + kvol, err := store.List(datastore.Key(sandboxPrefix), &sbState{c: c}) + if err != nil && err != datastore.ErrKeyNotFound { + logrus.Errorf("failed to get sandboxes for scope %s: %v", store.Scope(), err) + return + } + + // It's normal for no sandboxes to be found. Just bail out. + if err == datastore.ErrKeyNotFound { + return + } + + for _, kvo := range kvol { + sbs := kvo.(*sbState) + + sb := &sandbox{ + id: sbs.ID, + controller: sbs.c, + containerID: sbs.Cid, + endpoints: epHeap{}, + populatedEndpoints: map[string]struct{}{}, + dbIndex: sbs.dbIndex, + isStub: true, + dbExists: true, + } + // If we are restoring from a older version extDNSEntry won't have the + // HostLoopback field + if len(sbs.ExtDNS2) > 0 { + sb.extDNS = sbs.ExtDNS2 + } else { + for _, dns := range sbs.ExtDNS { + sb.extDNS = append(sb.extDNS, extDNSEntry{IPStr: dns}) + } + } + + msg := " for cleanup" + create := true + isRestore := false + if val, ok := activeSandboxes[sb.ID()]; ok { + msg = "" + sb.isStub = false + isRestore = true + opts := val.([]SandboxOption) + sb.processOptions(opts...) + sb.restorePath() + create = !sb.config.useDefaultSandBox + heap.Init(&sb.endpoints) + } + sb.osSbox, err = osl.NewSandbox(sb.Key(), create, isRestore) + if err != nil { + logrus.Errorf("failed to create osl sandbox while trying to restore sandbox %s%s: %v", sb.ID()[0:7], msg, err) + continue + } + + c.Lock() + c.sandboxes[sb.id] = sb + c.Unlock() + + for _, eps := range sbs.Eps { + n, err := c.getNetworkFromStore(eps.Nid) + var ep *endpoint + if err != nil { + logrus.Errorf("getNetworkFromStore for nid %s failed while trying to build sandbox for cleanup: %v", eps.Nid, err) + n = &network{id: eps.Nid, ctrlr: c, drvOnce: &sync.Once{}, persist: true} + ep = &endpoint{id: eps.Eid, network: n, sandboxID: sbs.ID} + } else { + ep, err = n.getEndpointFromStore(eps.Eid) + if err != nil { + logrus.Errorf("getEndpointFromStore for eid %s failed while trying to build sandbox for cleanup: %v", eps.Eid, err) + ep = &endpoint{id: eps.Eid, network: n, sandboxID: sbs.ID} + } + } + if _, ok := activeSandboxes[sb.ID()]; ok && err != nil { + logrus.Errorf("failed to restore endpoint %s in %s for container %s due to %v", eps.Eid, eps.Nid, sb.ContainerID(), err) + continue + } + heap.Push(&sb.endpoints, ep) + } + + if _, ok := activeSandboxes[sb.ID()]; !ok { + logrus.Infof("Removing stale sandbox %s (%s)", sb.id, sb.containerID) + if err := sb.delete(true); err != nil { + logrus.Errorf("Failed to delete sandbox %s while trying to cleanup: %v", sb.id, err) + } + continue + } + + // reconstruct osl sandbox field + if !sb.config.useDefaultSandBox { + if err := sb.restoreOslSandbox(); err != nil { + logrus.Errorf("failed to populate fields for osl sandbox %s", sb.ID()) + continue + } + } else { + c.sboxOnce.Do(func() { + c.defOsSbox = sb.osSbox + }) + } + + for _, ep := range sb.endpoints { + // Watch for service records + if !c.isAgent() { + c.watchSvcRecord(ep) + } + } + } +} diff --git a/vendor/github.com/docker/libnetwork/service.go b/vendor/github.com/docker/libnetwork/service.go new file mode 100644 index 0000000000..02bcdb1884 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/service.go @@ -0,0 +1,98 @@ +package libnetwork + +import ( + "fmt" + "net" + "sync" + + "github.com/docker/libnetwork/common" +) + +var ( + // A global monotonic counter to assign firewall marks to + // services. + fwMarkCtr uint32 = 256 + fwMarkCtrMu sync.Mutex +) + +type portConfigs []*PortConfig + +func (p portConfigs) String() string { + if len(p) == 0 { + return "" + } + + pc := p[0] + str := fmt.Sprintf("%d:%d/%s", pc.PublishedPort, pc.TargetPort, PortConfig_Protocol_name[int32(pc.Protocol)]) + for _, pc := range p[1:] { + str = str + fmt.Sprintf(",%d:%d/%s", pc.PublishedPort, pc.TargetPort, PortConfig_Protocol_name[int32(pc.Protocol)]) + } + + return str +} + +type serviceKey struct { + id string + ports string +} + +type service struct { + name string // Service Name + id string // Service ID + + // Map of loadbalancers for the service one-per attached + // network. It is keyed with network ID. + loadBalancers map[string]*loadBalancer + + // List of ingress ports exposed by the service + ingressPorts portConfigs + + // Service aliases + aliases []string + + // This maps tracks for each IP address the list of endpoints ID + // associated with it. At stable state the endpoint ID expected is 1 + // but during transition and service change it is possible to have + // temporary more than 1 + ipToEndpoint common.SetMatrix + + deleted bool + + sync.Mutex +} + +// assignIPToEndpoint inserts the mapping between the IP and the endpoint identifier +// returns true if the mapping was not present, false otherwise +// returns also the number of endpoints associated to the IP +func (s *service) assignIPToEndpoint(ip, eID string) (bool, int) { + return s.ipToEndpoint.Insert(ip, eID) +} + +// removeIPToEndpoint removes the mapping between the IP and the endpoint identifier +// returns true if the mapping was deleted, false otherwise +// returns also the number of endpoints associated to the IP +func (s *service) removeIPToEndpoint(ip, eID string) (bool, int) { + return s.ipToEndpoint.Remove(ip, eID) +} + +func (s *service) printIPToEndpoint(ip string) (string, bool) { + return s.ipToEndpoint.String(ip) +} + +type lbBackend struct { + ip net.IP + disabled bool +} + +type loadBalancer struct { + vip net.IP + fwMark uint32 + + // Map of backend IPs backing this loadbalancer on this + // network. It is keyed with endpoint ID. + backEnds map[string]*lbBackend + + // Back pointer to service to which the loadbalancer belongs. + service *service + sync.Mutex +} diff --git a/vendor/github.com/docker/libnetwork/service_common.go b/vendor/github.com/docker/libnetwork/service_common.go new file mode 100644 index 0000000000..8be2c38528 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/service_common.go @@ -0,0 +1,395 @@ +// +build linux windows + +package libnetwork + +import ( + "net" + + "github.com/docker/libnetwork/common" + "github.com/sirupsen/logrus" +) + +const maxSetStringLen = 350 + +func (c *controller) addEndpointNameResolution(svcName, svcID, nID, eID, containerName string, vip net.IP, serviceAliases, taskAliases []string, ip net.IP, addService bool, method string) error { + n, err := c.NetworkByID(nID) + if err != nil { + return err + } + + logrus.Debugf("addEndpointNameResolution %s %s add_service:%t sAliases:%v tAliases:%v", eID, svcName, addService, serviceAliases, taskAliases) + + // Add container resolution mappings + c.addContainerNameResolution(nID, eID, containerName, taskAliases, ip, method) + + serviceID := svcID + if serviceID == "" { + // This is the case of a normal container not part of a service + serviceID = eID + } + + // Add endpoint IP to special "tasks.svc_name" so that the applications have access to DNS RR. + n.(*network).addSvcRecords(eID, "tasks."+svcName, serviceID, ip, nil, false, method) + for _, alias := range serviceAliases { + n.(*network).addSvcRecords(eID, "tasks."+alias, serviceID, ip, nil, false, method) + } + + // Add service name to vip in DNS, if vip is valid. Otherwise resort to DNS RR + if len(vip) == 0 { + n.(*network).addSvcRecords(eID, svcName, serviceID, ip, nil, false, method) + for _, alias := range serviceAliases { + n.(*network).addSvcRecords(eID, alias, serviceID, ip, nil, false, method) + } + } + + if addService && len(vip) != 0 { + n.(*network).addSvcRecords(eID, svcName, serviceID, vip, nil, false, method) + for _, alias := range serviceAliases { + n.(*network).addSvcRecords(eID, alias, serviceID, vip, nil, false, method) + } + } + + return nil +} + +func (c *controller) addContainerNameResolution(nID, eID, containerName string, taskAliases []string, ip net.IP, method string) error { + n, err := c.NetworkByID(nID) + if err != nil { + return err + } + logrus.Debugf("addContainerNameResolution %s %s", eID, containerName) + + // Add resolution for container name + n.(*network).addSvcRecords(eID, containerName, eID, ip, nil, true, method) + + // Add resolution for taskaliases + for _, alias := range taskAliases { + n.(*network).addSvcRecords(eID, alias, eID, ip, nil, true, method) + } + + return nil +} + +func (c *controller) deleteEndpointNameResolution(svcName, svcID, nID, eID, containerName string, vip net.IP, serviceAliases, taskAliases []string, ip net.IP, rmService, multipleEntries bool, method string) error { + n, err := c.NetworkByID(nID) + if err != nil { + return err + } + + logrus.Debugf("deleteEndpointNameResolution %s %s rm_service:%t suppress:%t sAliases:%v tAliases:%v", eID, svcName, rmService, multipleEntries, serviceAliases, taskAliases) + + // Delete container resolution mappings + c.delContainerNameResolution(nID, eID, containerName, taskAliases, ip, method) + + serviceID := svcID + if serviceID == "" { + // This is the case of a normal container not part of a service + serviceID = eID + } + + // Delete the special "tasks.svc_name" backend record. + if !multipleEntries { + n.(*network).deleteSvcRecords(eID, "tasks."+svcName, serviceID, ip, nil, false, method) + for _, alias := range serviceAliases { + n.(*network).deleteSvcRecords(eID, "tasks."+alias, serviceID, ip, nil, false, method) + } + } + + // If we are doing DNS RR delete the endpoint IP from DNS record right away. + if !multipleEntries && len(vip) == 0 { + n.(*network).deleteSvcRecords(eID, svcName, serviceID, ip, nil, false, method) + for _, alias := range serviceAliases { + n.(*network).deleteSvcRecords(eID, alias, serviceID, ip, nil, false, method) + } + } + + // Remove the DNS record for VIP only if we are removing the service + if rmService && len(vip) != 0 && !multipleEntries { + n.(*network).deleteSvcRecords(eID, svcName, serviceID, vip, nil, false, method) + for _, alias := range serviceAliases { + n.(*network).deleteSvcRecords(eID, alias, serviceID, vip, nil, false, method) + } + } + + return nil +} + +func (c *controller) delContainerNameResolution(nID, eID, containerName string, taskAliases []string, ip net.IP, method string) error { + n, err := c.NetworkByID(nID) + if err != nil { + return err + } + logrus.Debugf("delContainerNameResolution %s %s", eID, containerName) + + // Delete resolution for container name + n.(*network).deleteSvcRecords(eID, containerName, eID, ip, nil, true, method) + + // Delete resolution for taskaliases + for _, alias := range taskAliases { + n.(*network).deleteSvcRecords(eID, alias, eID, ip, nil, true, method) + } + + return nil +} + +func newService(name string, id string, ingressPorts []*PortConfig, serviceAliases []string) *service { + return &service{ + name: name, + id: id, + ingressPorts: ingressPorts, + loadBalancers: make(map[string]*loadBalancer), + aliases: serviceAliases, + ipToEndpoint: common.NewSetMatrix(), + } +} + +func (c *controller) getLBIndex(sid, nid string, ingressPorts []*PortConfig) int { + skey := serviceKey{ + id: sid, + ports: portConfigs(ingressPorts).String(), + } + c.Lock() + s, ok := c.serviceBindings[skey] + c.Unlock() + + if !ok { + return 0 + } + + s.Lock() + lb := s.loadBalancers[nid] + s.Unlock() + + return int(lb.fwMark) +} + +// cleanupServiceDiscovery when the network is being deleted, erase all the associated service discovery records +func (c *controller) cleanupServiceDiscovery(cleanupNID string) { + c.Lock() + defer c.Unlock() + if cleanupNID == "" { + logrus.Debugf("cleanupServiceDiscovery for all networks") + c.svcRecords = make(map[string]svcInfo) + return + } + logrus.Debugf("cleanupServiceDiscovery for network:%s", cleanupNID) + delete(c.svcRecords, cleanupNID) +} + +func (c *controller) cleanupServiceBindings(cleanupNID string) { + var cleanupFuncs []func() + + logrus.Debugf("cleanupServiceBindings for %s", cleanupNID) + c.Lock() + services := make([]*service, 0, len(c.serviceBindings)) + for _, s := range c.serviceBindings { + services = append(services, s) + } + c.Unlock() + + for _, s := range services { + s.Lock() + // Skip the serviceBindings that got deleted + if s.deleted { + s.Unlock() + continue + } + for nid, lb := range s.loadBalancers { + if cleanupNID != "" && nid != cleanupNID { + continue + } + for eid, be := range lb.backEnds { + cleanupFuncs = append(cleanupFuncs, makeServiceCleanupFunc(c, s, nid, eid, lb.vip, be.ip)) + } + } + s.Unlock() + } + + for _, f := range cleanupFuncs { + f() + } + +} + +func makeServiceCleanupFunc(c *controller, s *service, nID, eID string, vip net.IP, ip net.IP) func() { + // ContainerName and taskAliases are not available here, this is still fine because the Service discovery + // cleanup already happened before. The only thing that rmServiceBinding is still doing here a part from the Load + // Balancer bookeeping, is to keep consistent the mapping of endpoint to IP. + return func() { + if err := c.rmServiceBinding(s.name, s.id, nID, eID, "", vip, s.ingressPorts, s.aliases, []string{}, ip, "cleanupServiceBindings", false, true); err != nil { + logrus.Errorf("Failed to remove service bindings for service %s network %s endpoint %s while cleanup: %v", s.id, nID, eID, err) + } + } +} + +func (c *controller) addServiceBinding(svcName, svcID, nID, eID, containerName string, vip net.IP, ingressPorts []*PortConfig, serviceAliases, taskAliases []string, ip net.IP, method string) error { + var addService bool + + n, err := c.NetworkByID(nID) + if err != nil { + return err + } + + skey := serviceKey{ + id: svcID, + ports: portConfigs(ingressPorts).String(), + } + + var s *service + for { + c.Lock() + var ok bool + s, ok = c.serviceBindings[skey] + if !ok { + // Create a new service if we are seeing this service + // for the first time. + s = newService(svcName, svcID, ingressPorts, serviceAliases) + c.serviceBindings[skey] = s + } + c.Unlock() + s.Lock() + if !s.deleted { + // ok the object is good to be used + break + } + s.Unlock() + } + logrus.Debugf("addServiceBinding from %s START for %s %s p:%p nid:%s skey:%v", method, svcName, eID, s, nID, skey) + defer s.Unlock() + + lb, ok := s.loadBalancers[nID] + if !ok { + // Create a new load balancer if we are seeing this + // network attachment on the service for the first + // time. + fwMarkCtrMu.Lock() + + lb = &loadBalancer{ + vip: vip, + fwMark: fwMarkCtr, + backEnds: make(map[string]*lbBackend), + service: s, + } + + fwMarkCtr++ + fwMarkCtrMu.Unlock() + + s.loadBalancers[nID] = lb + addService = true + } + + lb.backEnds[eID] = &lbBackend{ip, false} + + ok, entries := s.assignIPToEndpoint(ip.String(), eID) + if !ok || entries > 1 { + setStr, b := s.printIPToEndpoint(ip.String()) + if len(setStr) > maxSetStringLen { + setStr = setStr[:maxSetStringLen] + } + logrus.Warnf("addServiceBinding %s possible transient state ok:%t entries:%d set:%t %s", eID, ok, entries, b, setStr) + } + + // Add loadbalancer service and backend in all sandboxes in + // the network only if vip is valid. + if len(vip) != 0 { + n.(*network).addLBBackend(ip, vip, lb, ingressPorts) + } + + // Add the appropriate name resolutions + c.addEndpointNameResolution(svcName, svcID, nID, eID, containerName, vip, serviceAliases, taskAliases, ip, addService, "addServiceBinding") + + logrus.Debugf("addServiceBinding from %s END for %s %s", method, svcName, eID) + + return nil +} + +func (c *controller) rmServiceBinding(svcName, svcID, nID, eID, containerName string, vip net.IP, ingressPorts []*PortConfig, serviceAliases []string, taskAliases []string, ip net.IP, method string, deleteSvcRecords bool, fullRemove bool) error { + + var rmService bool + + n, err := c.NetworkByID(nID) + if err != nil { + return err + } + + skey := serviceKey{ + id: svcID, + ports: portConfigs(ingressPorts).String(), + } + + c.Lock() + s, ok := c.serviceBindings[skey] + c.Unlock() + if !ok { + logrus.Warnf("rmServiceBinding %s %s %s aborted c.serviceBindings[skey] !ok", method, svcName, eID) + return nil + } + + s.Lock() + defer s.Unlock() + logrus.Debugf("rmServiceBinding from %s START for %s %s p:%p nid:%s sKey:%v deleteSvc:%t", method, svcName, eID, s, nID, skey, deleteSvcRecords) + lb, ok := s.loadBalancers[nID] + if !ok { + logrus.Warnf("rmServiceBinding %s %s %s aborted s.loadBalancers[nid] !ok", method, svcName, eID) + return nil + } + + be, ok := lb.backEnds[eID] + if !ok { + logrus.Warnf("rmServiceBinding %s %s %s aborted lb.backEnds[eid] && lb.disabled[eid] !ok", method, svcName, eID) + return nil + } + + if fullRemove { + // delete regardless + delete(lb.backEnds, eID) + } else { + be.disabled = true + } + + if len(lb.backEnds) == 0 { + // All the backends for this service have been + // removed. Time to remove the load balancer and also + // remove the service entry in IPVS. + rmService = true + + delete(s.loadBalancers, nID) + logrus.Debugf("rmServiceBinding %s delete %s, p:%p in loadbalancers len:%d", eID, nID, lb, len(s.loadBalancers)) + } + + ok, entries := s.removeIPToEndpoint(ip.String(), eID) + if !ok || entries > 0 { + setStr, b := s.printIPToEndpoint(ip.String()) + if len(setStr) > maxSetStringLen { + setStr = setStr[:maxSetStringLen] + } + logrus.Warnf("rmServiceBinding %s possible transient state ok:%t entries:%d set:%t %s", eID, ok, entries, b, setStr) + } + + // Remove loadbalancer service(if needed) and backend in all + // sandboxes in the network only if the vip is valid. + if len(vip) != 0 && entries == 0 { + n.(*network).rmLBBackend(ip, vip, lb, ingressPorts, rmService, fullRemove) + } + + // Delete the name resolutions + if deleteSvcRecords { + c.deleteEndpointNameResolution(svcName, svcID, nID, eID, containerName, vip, serviceAliases, taskAliases, ip, rmService, entries > 0, "rmServiceBinding") + } + + if len(s.loadBalancers) == 0 { + // All loadbalancers for the service removed. Time to + // remove the service itself. + c.Lock() + + // Mark the object as deleted so that the add won't use it wrongly + s.deleted = true + // NOTE The delete from the serviceBindings map has to be the last operation else we are allowing a race between this service + // that is getting deleted and a new service that will be created if the entry is not anymore there + delete(c.serviceBindings, skey) + c.Unlock() + } + + logrus.Debugf("rmServiceBinding from %s END for %s %s", method, svcName, eID) + return nil +} diff --git a/vendor/github.com/docker/libnetwork/service_linux.go b/vendor/github.com/docker/libnetwork/service_linux.go new file mode 100644 index 0000000000..ef0590df78 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/service_linux.go @@ -0,0 +1,796 @@ +package libnetwork + +import ( + "fmt" + "io" + "io/ioutil" + "net" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "syscall" + + "github.com/docker/docker/pkg/reexec" + "github.com/docker/libnetwork/iptables" + "github.com/docker/libnetwork/ipvs" + "github.com/docker/libnetwork/ns" + "github.com/gogo/protobuf/proto" + "github.com/ishidawataru/sctp" + "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink/nl" + "github.com/vishvananda/netns" +) + +func init() { + reexec.Register("fwmarker", fwMarker) + reexec.Register("redirecter", redirecter) +} + +// Get all loadbalancers on this network that is currently discovered +// on this node. +func (n *network) connectedLoadbalancers() []*loadBalancer { + c := n.getController() + + c.Lock() + serviceBindings := make([]*service, 0, len(c.serviceBindings)) + for _, s := range c.serviceBindings { + serviceBindings = append(serviceBindings, s) + } + c.Unlock() + + var lbs []*loadBalancer + for _, s := range serviceBindings { + s.Lock() + // Skip the serviceBindings that got deleted + if s.deleted { + s.Unlock() + continue + } + if lb, ok := s.loadBalancers[n.ID()]; ok { + lbs = append(lbs, lb) + } + s.Unlock() + } + + return lbs +} + +// Populate all loadbalancers on the network that the passed endpoint +// belongs to, into this sandbox. +func (sb *sandbox) populateLoadbalancers(ep *endpoint) { + var gwIP net.IP + + // This is an interface less endpoint. Nothing to do. + if ep.Iface() == nil { + return + } + + n := ep.getNetwork() + eIP := ep.Iface().Address() + + if n.ingress { + if err := addRedirectRules(sb.Key(), eIP, ep.ingressPorts); err != nil { + logrus.Errorf("Failed to add redirect rules for ep %s (%s): %v", ep.Name(), ep.ID()[0:7], err) + } + } + + if sb.ingress { + // For the ingress sandbox if this is not gateway + // endpoint do nothing. + if ep != sb.getGatewayEndpoint() { + return + } + + // This is the gateway endpoint. Now get the ingress + // network and plumb the loadbalancers. + gwIP = ep.Iface().Address().IP + for _, ep := range sb.getConnectedEndpoints() { + if !ep.endpointInGWNetwork() { + n = ep.getNetwork() + eIP = ep.Iface().Address() + } + } + } + + for _, lb := range n.connectedLoadbalancers() { + // Skip if vip is not valid. + if len(lb.vip) == 0 { + continue + } + + lb.service.Lock() + for _, be := range lb.backEnds { + if !be.disabled { + sb.addLBBackend(be.ip, lb.vip, lb.fwMark, lb.service.ingressPorts, eIP, gwIP, n.ingress) + } + } + lb.service.Unlock() + } +} + +// Add loadbalancer backend to all sandboxes which has a connection to +// this network. If needed add the service as well. +func (n *network) addLBBackend(ip, vip net.IP, lb *loadBalancer, ingressPorts []*PortConfig) { + n.WalkEndpoints(func(e Endpoint) bool { + ep := e.(*endpoint) + if sb, ok := ep.getSandbox(); ok { + if !sb.isEndpointPopulated(ep) { + return false + } + + var gwIP net.IP + if ep := sb.getGatewayEndpoint(); ep != nil { + gwIP = ep.Iface().Address().IP + } + + sb.addLBBackend(ip, vip, lb.fwMark, ingressPorts, ep.Iface().Address(), gwIP, n.ingress) + } + + return false + }) +} + +// Remove loadbalancer backend from all sandboxes which has a +// connection to this network. If needed remove the service entry as +// well, as specified by the rmService bool. +func (n *network) rmLBBackend(ip, vip net.IP, lb *loadBalancer, ingressPorts []*PortConfig, rmService bool, fullRemove bool) { + n.WalkEndpoints(func(e Endpoint) bool { + ep := e.(*endpoint) + if sb, ok := ep.getSandbox(); ok { + if !sb.isEndpointPopulated(ep) { + return false + } + + var gwIP net.IP + if ep := sb.getGatewayEndpoint(); ep != nil { + gwIP = ep.Iface().Address().IP + } + + sb.rmLBBackend(ip, vip, lb.fwMark, ingressPorts, ep.Iface().Address(), gwIP, rmService, fullRemove, n.ingress) + } + + return false + }) +} + +// Add loadbalancer backend into one connected sandbox. +func (sb *sandbox) addLBBackend(ip, vip net.IP, fwMark uint32, ingressPorts []*PortConfig, eIP *net.IPNet, gwIP net.IP, isIngressNetwork bool) { + if sb.osSbox == nil { + return + } + + if isIngressNetwork && !sb.ingress { + return + } + + i, err := ipvs.New(sb.Key()) + if err != nil { + logrus.Errorf("Failed to create an ipvs handle for sbox %s (%s,%s) for lb addition: %v", sb.ID()[0:7], sb.ContainerID()[0:7], sb.Key(), err) + return + } + defer i.Close() + + s := &ipvs.Service{ + AddressFamily: nl.FAMILY_V4, + FWMark: fwMark, + SchedName: ipvs.RoundRobin, + } + + if !i.IsServicePresent(s) { + var filteredPorts []*PortConfig + if sb.ingress { + filteredPorts = filterPortConfigs(ingressPorts, false) + if err := programIngress(gwIP, filteredPorts, false); err != nil { + logrus.Errorf("Failed to add ingress: %v", err) + return + } + } + + logrus.Debugf("Creating service for vip %s fwMark %d ingressPorts %#v in sbox %s (%s)", vip, fwMark, ingressPorts, sb.ID()[0:7], sb.ContainerID()[0:7]) + if err := invokeFWMarker(sb.Key(), vip, fwMark, ingressPorts, eIP, false); err != nil { + logrus.Errorf("Failed to add firewall mark rule in sbox %s (%s): %v", sb.ID()[0:7], sb.ContainerID()[0:7], err) + return + } + + if err := i.NewService(s); err != nil && err != syscall.EEXIST { + logrus.Errorf("Failed to create a new service for vip %s fwmark %d in sbox %s (%s): %v", vip, fwMark, sb.ID()[0:7], sb.ContainerID()[0:7], err) + return + } + } + + d := &ipvs.Destination{ + AddressFamily: nl.FAMILY_V4, + Address: ip, + Weight: 1, + } + + // Remove the sched name before using the service to add + // destination. + s.SchedName = "" + if err := i.NewDestination(s, d); err != nil && err != syscall.EEXIST { + logrus.Errorf("Failed to create real server %s for vip %s fwmark %d in sbox %s (%s): %v", ip, vip, fwMark, sb.ID()[0:7], sb.ContainerID()[0:7], err) + } +} + +// Remove loadbalancer backend from one connected sandbox. +func (sb *sandbox) rmLBBackend(ip, vip net.IP, fwMark uint32, ingressPorts []*PortConfig, eIP *net.IPNet, gwIP net.IP, rmService bool, fullRemove bool, isIngressNetwork bool) { + if sb.osSbox == nil { + return + } + + if isIngressNetwork && !sb.ingress { + return + } + + i, err := ipvs.New(sb.Key()) + if err != nil { + logrus.Errorf("Failed to create an ipvs handle for sbox %s (%s,%s) for lb removal: %v", sb.ID()[0:7], sb.ContainerID()[0:7], sb.Key(), err) + return + } + defer i.Close() + + s := &ipvs.Service{ + AddressFamily: nl.FAMILY_V4, + FWMark: fwMark, + } + + d := &ipvs.Destination{ + AddressFamily: nl.FAMILY_V4, + Address: ip, + Weight: 1, + } + + if fullRemove { + if err := i.DelDestination(s, d); err != nil && err != syscall.ENOENT { + logrus.Errorf("Failed to delete real server %s for vip %s fwmark %d in sbox %s (%s): %v", ip, vip, fwMark, sb.ID()[0:7], sb.ContainerID()[0:7], err) + } + } else { + d.Weight = 0 + if err := i.UpdateDestination(s, d); err != nil && err != syscall.ENOENT { + logrus.Errorf("Failed to set LB weight of real server %s to 0 for vip %s fwmark %d in sbox %s (%s): %v", ip, vip, fwMark, sb.ID()[0:7], sb.ContainerID()[0:7], err) + } + } + + if rmService { + s.SchedName = ipvs.RoundRobin + if err := i.DelService(s); err != nil && err != syscall.ENOENT { + logrus.Errorf("Failed to delete service for vip %s fwmark %d in sbox %s (%s): %v", vip, fwMark, sb.ID()[0:7], sb.ContainerID()[0:7], err) + } + + var filteredPorts []*PortConfig + if sb.ingress { + filteredPorts = filterPortConfigs(ingressPorts, true) + if err := programIngress(gwIP, filteredPorts, true); err != nil { + logrus.Errorf("Failed to delete ingress: %v", err) + } + } + + if err := invokeFWMarker(sb.Key(), vip, fwMark, ingressPorts, eIP, true); err != nil { + logrus.Errorf("Failed to delete firewall mark rule in sbox %s (%s): %v", sb.ID()[0:7], sb.ContainerID()[0:7], err) + } + } +} + +const ingressChain = "DOCKER-INGRESS" + +var ( + ingressOnce sync.Once + ingressProxyMu sync.Mutex + ingressProxyTbl = make(map[string]io.Closer) + portConfigMu sync.Mutex + portConfigTbl = make(map[PortConfig]int) +) + +func filterPortConfigs(ingressPorts []*PortConfig, isDelete bool) []*PortConfig { + portConfigMu.Lock() + iPorts := make([]*PortConfig, 0, len(ingressPorts)) + for _, pc := range ingressPorts { + if isDelete { + if cnt, ok := portConfigTbl[*pc]; ok { + // This is the last reference to this + // port config. Delete the port config + // and add it to filtered list to be + // plumbed. + if cnt == 1 { + delete(portConfigTbl, *pc) + iPorts = append(iPorts, pc) + continue + } + + portConfigTbl[*pc] = cnt - 1 + } + + continue + } + + if cnt, ok := portConfigTbl[*pc]; ok { + portConfigTbl[*pc] = cnt + 1 + continue + } + + // We are adding it for the first time. Add it to the + // filter list to be plumbed. + portConfigTbl[*pc] = 1 + iPorts = append(iPorts, pc) + } + portConfigMu.Unlock() + + return iPorts +} + +func programIngress(gwIP net.IP, ingressPorts []*PortConfig, isDelete bool) error { + addDelOpt := "-I" + if isDelete { + addDelOpt = "-D" + } + + chainExists := iptables.ExistChain(ingressChain, iptables.Nat) + filterChainExists := iptables.ExistChain(ingressChain, iptables.Filter) + + ingressOnce.Do(func() { + // Flush nat table and filter table ingress chain rules during init if it + // exists. It might contain stale rules from previous life. + if chainExists { + if err := iptables.RawCombinedOutput("-t", "nat", "-F", ingressChain); err != nil { + logrus.Errorf("Could not flush nat table ingress chain rules during init: %v", err) + } + } + if filterChainExists { + if err := iptables.RawCombinedOutput("-F", ingressChain); err != nil { + logrus.Errorf("Could not flush filter table ingress chain rules during init: %v", err) + } + } + }) + + if !isDelete { + if !chainExists { + if err := iptables.RawCombinedOutput("-t", "nat", "-N", ingressChain); err != nil { + return fmt.Errorf("failed to create ingress chain: %v", err) + } + } + if !filterChainExists { + if err := iptables.RawCombinedOutput("-N", ingressChain); err != nil { + return fmt.Errorf("failed to create filter table ingress chain: %v", err) + } + } + + if !iptables.Exists(iptables.Nat, ingressChain, "-j", "RETURN") { + if err := iptables.RawCombinedOutput("-t", "nat", "-A", ingressChain, "-j", "RETURN"); err != nil { + return fmt.Errorf("failed to add return rule in nat table ingress chain: %v", err) + } + } + + if !iptables.Exists(iptables.Filter, ingressChain, "-j", "RETURN") { + if err := iptables.RawCombinedOutput("-A", ingressChain, "-j", "RETURN"); err != nil { + return fmt.Errorf("failed to add return rule to filter table ingress chain: %v", err) + } + } + + for _, chain := range []string{"OUTPUT", "PREROUTING"} { + if !iptables.Exists(iptables.Nat, chain, "-m", "addrtype", "--dst-type", "LOCAL", "-j", ingressChain) { + if err := iptables.RawCombinedOutput("-t", "nat", "-I", chain, "-m", "addrtype", "--dst-type", "LOCAL", "-j", ingressChain); err != nil { + return fmt.Errorf("failed to add jump rule in %s to ingress chain: %v", chain, err) + } + } + } + + if !iptables.Exists(iptables.Filter, "FORWARD", "-j", ingressChain) { + if err := iptables.RawCombinedOutput("-I", "FORWARD", "-j", ingressChain); err != nil { + return fmt.Errorf("failed to add jump rule to %s in filter table forward chain: %v", ingressChain, err) + } + arrangeUserFilterRule() + } + + oifName, err := findOIFName(gwIP) + if err != nil { + return fmt.Errorf("failed to find gateway bridge interface name for %s: %v", gwIP, err) + } + + path := filepath.Join("/proc/sys/net/ipv4/conf", oifName, "route_localnet") + if err := ioutil.WriteFile(path, []byte{'1', '\n'}, 0644); err != nil { + return fmt.Errorf("could not write to %s: %v", path, err) + } + + ruleArgs := strings.Fields(fmt.Sprintf("-m addrtype --src-type LOCAL -o %s -j MASQUERADE", oifName)) + if !iptables.Exists(iptables.Nat, "POSTROUTING", ruleArgs...) { + if err := iptables.RawCombinedOutput(append([]string{"-t", "nat", "-I", "POSTROUTING"}, ruleArgs...)...); err != nil { + return fmt.Errorf("failed to add ingress localhost POSTROUTING rule for %s: %v", oifName, err) + } + } + } + + for _, iPort := range ingressPorts { + if iptables.ExistChain(ingressChain, iptables.Nat) { + rule := strings.Fields(fmt.Sprintf("-t nat %s %s -p %s --dport %d -j DNAT --to-destination %s:%d", + addDelOpt, ingressChain, strings.ToLower(PortConfig_Protocol_name[int32(iPort.Protocol)]), iPort.PublishedPort, gwIP, iPort.PublishedPort)) + if err := iptables.RawCombinedOutput(rule...); err != nil { + errStr := fmt.Sprintf("setting up rule failed, %v: %v", rule, err) + if !isDelete { + return fmt.Errorf("%s", errStr) + } + + logrus.Infof("%s", errStr) + } + } + + // Filter table rules to allow a published service to be accessible in the local node from.. + // 1) service tasks attached to other networks + // 2) unmanaged containers on bridge networks + rule := strings.Fields(fmt.Sprintf("%s %s -m state -p %s --sport %d --state ESTABLISHED,RELATED -j ACCEPT", + addDelOpt, ingressChain, strings.ToLower(PortConfig_Protocol_name[int32(iPort.Protocol)]), iPort.PublishedPort)) + if err := iptables.RawCombinedOutput(rule...); err != nil { + errStr := fmt.Sprintf("setting up rule failed, %v: %v", rule, err) + if !isDelete { + return fmt.Errorf("%s", errStr) + } + logrus.Warnf("%s", errStr) + } + + rule = strings.Fields(fmt.Sprintf("%s %s -p %s --dport %d -j ACCEPT", + addDelOpt, ingressChain, strings.ToLower(PortConfig_Protocol_name[int32(iPort.Protocol)]), iPort.PublishedPort)) + if err := iptables.RawCombinedOutput(rule...); err != nil { + errStr := fmt.Sprintf("setting up rule failed, %v: %v", rule, err) + if !isDelete { + return fmt.Errorf("%s", errStr) + } + + logrus.Warnf("%s", errStr) + } + + if err := plumbProxy(iPort, isDelete); err != nil { + logrus.Warnf("failed to create proxy for port %d: %v", iPort.PublishedPort, err) + } + } + + return nil +} + +// In the filter table FORWARD chain the first rule should be to jump to +// DOCKER-USER so the user is able to filter packet first. +// The second rule should be jump to INGRESS-CHAIN. +// This chain has the rules to allow access to the published ports for swarm tasks +// from local bridge networks and docker_gwbridge (ie:taks on other swarm netwroks) +func arrangeIngressFilterRule() { + if iptables.ExistChain(ingressChain, iptables.Filter) { + if iptables.Exists(iptables.Filter, "FORWARD", "-j", ingressChain) { + if err := iptables.RawCombinedOutput("-D", "FORWARD", "-j", ingressChain); err != nil { + logrus.Warnf("failed to delete jump rule to ingressChain in filter table: %v", err) + } + } + if err := iptables.RawCombinedOutput("-I", "FORWARD", "-j", ingressChain); err != nil { + logrus.Warnf("failed to add jump rule to ingressChain in filter table: %v", err) + } + } +} + +func findOIFName(ip net.IP) (string, error) { + nlh := ns.NlHandle() + + routes, err := nlh.RouteGet(ip) + if err != nil { + return "", err + } + + if len(routes) == 0 { + return "", fmt.Errorf("no route to %s", ip) + } + + // Pick the first route(typically there is only one route). We + // don't support multipath. + link, err := nlh.LinkByIndex(routes[0].LinkIndex) + if err != nil { + return "", err + } + + return link.Attrs().Name, nil +} + +func plumbProxy(iPort *PortConfig, isDelete bool) error { + var ( + err error + l io.Closer + ) + + portSpec := fmt.Sprintf("%d/%s", iPort.PublishedPort, strings.ToLower(PortConfig_Protocol_name[int32(iPort.Protocol)])) + if isDelete { + ingressProxyMu.Lock() + if listener, ok := ingressProxyTbl[portSpec]; ok { + if listener != nil { + listener.Close() + } + } + ingressProxyMu.Unlock() + + return nil + } + + switch iPort.Protocol { + case ProtocolTCP: + l, err = net.ListenTCP("tcp", &net.TCPAddr{Port: int(iPort.PublishedPort)}) + case ProtocolUDP: + l, err = net.ListenUDP("udp", &net.UDPAddr{Port: int(iPort.PublishedPort)}) + case ProtocolSCTP: + l, err = sctp.ListenSCTP("sctp", &sctp.SCTPAddr{Port: int(iPort.PublishedPort)}) + default: + err = fmt.Errorf("unknown protocol %v", iPort.Protocol) + } + + if err != nil { + return err + } + + ingressProxyMu.Lock() + ingressProxyTbl[portSpec] = l + ingressProxyMu.Unlock() + + return nil +} + +func writePortsToFile(ports []*PortConfig) (string, error) { + f, err := ioutil.TempFile("", "port_configs") + if err != nil { + return "", err + } + defer f.Close() + + buf, _ := proto.Marshal(&EndpointRecord{ + IngressPorts: ports, + }) + + n, err := f.Write(buf) + if err != nil { + return "", err + } + + if n < len(buf) { + return "", io.ErrShortWrite + } + + return f.Name(), nil +} + +func readPortsFromFile(fileName string) ([]*PortConfig, error) { + buf, err := ioutil.ReadFile(fileName) + if err != nil { + return nil, err + } + + var epRec EndpointRecord + err = proto.Unmarshal(buf, &epRec) + if err != nil { + return nil, err + } + + return epRec.IngressPorts, nil +} + +// Invoke fwmarker reexec routine to mark vip destined packets with +// the passed firewall mark. +func invokeFWMarker(path string, vip net.IP, fwMark uint32, ingressPorts []*PortConfig, eIP *net.IPNet, isDelete bool) error { + var ingressPortsFile string + + if len(ingressPorts) != 0 { + var err error + ingressPortsFile, err = writePortsToFile(ingressPorts) + if err != nil { + return err + } + + defer os.Remove(ingressPortsFile) + } + + addDelOpt := "-A" + if isDelete { + addDelOpt = "-D" + } + + cmd := &exec.Cmd{ + Path: reexec.Self(), + Args: append([]string{"fwmarker"}, path, vip.String(), fmt.Sprintf("%d", fwMark), addDelOpt, ingressPortsFile, eIP.String()), + Stdout: os.Stdout, + Stderr: os.Stderr, + } + + if err := cmd.Run(); err != nil { + return fmt.Errorf("reexec failed: %v", err) + } + + return nil +} + +// Firewall marker reexec function. +func fwMarker() { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + if len(os.Args) < 7 { + logrus.Error("invalid number of arguments..") + os.Exit(1) + } + + var ingressPorts []*PortConfig + if os.Args[5] != "" { + var err error + ingressPorts, err = readPortsFromFile(os.Args[5]) + if err != nil { + logrus.Errorf("Failed reading ingress ports file: %v", err) + os.Exit(6) + } + } + + vip := os.Args[2] + fwMark, err := strconv.ParseUint(os.Args[3], 10, 32) + if err != nil { + logrus.Errorf("bad fwmark value(%s) passed: %v", os.Args[3], err) + os.Exit(2) + } + addDelOpt := os.Args[4] + + rules := [][]string{} + for _, iPort := range ingressPorts { + rule := strings.Fields(fmt.Sprintf("-t mangle %s PREROUTING -p %s --dport %d -j MARK --set-mark %d", + addDelOpt, strings.ToLower(PortConfig_Protocol_name[int32(iPort.Protocol)]), iPort.PublishedPort, fwMark)) + rules = append(rules, rule) + } + + ns, err := netns.GetFromPath(os.Args[1]) + if err != nil { + logrus.Errorf("failed get network namespace %q: %v", os.Args[1], err) + os.Exit(3) + } + defer ns.Close() + + if err := netns.Set(ns); err != nil { + logrus.Errorf("setting into container net ns %v failed, %v", os.Args[1], err) + os.Exit(4) + } + + if addDelOpt == "-A" { + eIP, subnet, err := net.ParseCIDR(os.Args[6]) + if err != nil { + logrus.Errorf("Failed to parse endpoint IP %s: %v", os.Args[6], err) + os.Exit(9) + } + + ruleParams := strings.Fields(fmt.Sprintf("-m ipvs --ipvs -d %s -j SNAT --to-source %s", subnet, eIP)) + if !iptables.Exists("nat", "POSTROUTING", ruleParams...) { + rule := append(strings.Fields("-t nat -A POSTROUTING"), ruleParams...) + rules = append(rules, rule) + + err := ioutil.WriteFile("/proc/sys/net/ipv4/vs/conntrack", []byte{'1', '\n'}, 0644) + if err != nil { + logrus.Errorf("Failed to write to /proc/sys/net/ipv4/vs/conntrack: %v", err) + os.Exit(8) + } + } + } + + rule := strings.Fields(fmt.Sprintf("-t mangle %s OUTPUT -d %s/32 -j MARK --set-mark %d", addDelOpt, vip, fwMark)) + rules = append(rules, rule) + + rule = strings.Fields(fmt.Sprintf("-t nat %s OUTPUT -p icmp --icmp echo-request -d %s -j DNAT --to 127.0.0.1", addDelOpt, vip)) + rules = append(rules, rule) + + for _, rule := range rules { + if err := iptables.RawCombinedOutputNative(rule...); err != nil { + logrus.Errorf("setting up rule failed, %v: %v", rule, err) + os.Exit(5) + } + } +} + +func addRedirectRules(path string, eIP *net.IPNet, ingressPorts []*PortConfig) error { + var ingressPortsFile string + + if len(ingressPorts) != 0 { + var err error + ingressPortsFile, err = writePortsToFile(ingressPorts) + if err != nil { + return err + } + defer os.Remove(ingressPortsFile) + } + + cmd := &exec.Cmd{ + Path: reexec.Self(), + Args: append([]string{"redirecter"}, path, eIP.String(), ingressPortsFile), + Stdout: os.Stdout, + Stderr: os.Stderr, + } + + if err := cmd.Run(); err != nil { + return fmt.Errorf("reexec failed: %v", err) + } + + return nil +} + +// Redirecter reexec function. +func redirecter() { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + if len(os.Args) < 4 { + logrus.Error("invalid number of arguments..") + os.Exit(1) + } + + var ingressPorts []*PortConfig + if os.Args[3] != "" { + var err error + ingressPorts, err = readPortsFromFile(os.Args[3]) + if err != nil { + logrus.Errorf("Failed reading ingress ports file: %v", err) + os.Exit(2) + } + } + + eIP, _, err := net.ParseCIDR(os.Args[2]) + if err != nil { + logrus.Errorf("Failed to parse endpoint IP %s: %v", os.Args[2], err) + os.Exit(3) + } + + rules := [][]string{} + for _, iPort := range ingressPorts { + rule := strings.Fields(fmt.Sprintf("-t nat -A PREROUTING -d %s -p %s --dport %d -j REDIRECT --to-port %d", + eIP.String(), strings.ToLower(PortConfig_Protocol_name[int32(iPort.Protocol)]), iPort.PublishedPort, iPort.TargetPort)) + rules = append(rules, rule) + // Allow only incoming connections to exposed ports + iRule := strings.Fields(fmt.Sprintf("-I INPUT -d %s -p %s --dport %d -m conntrack --ctstate NEW,ESTABLISHED -j ACCEPT", + eIP.String(), strings.ToLower(PortConfig_Protocol_name[int32(iPort.Protocol)]), iPort.TargetPort)) + rules = append(rules, iRule) + // Allow only outgoing connections from exposed ports + oRule := strings.Fields(fmt.Sprintf("-I OUTPUT -s %s -p %s --sport %d -m conntrack --ctstate ESTABLISHED -j ACCEPT", + eIP.String(), strings.ToLower(PortConfig_Protocol_name[int32(iPort.Protocol)]), iPort.TargetPort)) + rules = append(rules, oRule) + } + + ns, err := netns.GetFromPath(os.Args[1]) + if err != nil { + logrus.Errorf("failed get network namespace %q: %v", os.Args[1], err) + os.Exit(4) + } + defer ns.Close() + + if err := netns.Set(ns); err != nil { + logrus.Errorf("setting into container net ns %v failed, %v", os.Args[1], err) + os.Exit(5) + } + + for _, rule := range rules { + if err := iptables.RawCombinedOutputNative(rule...); err != nil { + logrus.Errorf("setting up rule failed, %v: %v", rule, err) + os.Exit(6) + } + } + + if len(ingressPorts) == 0 { + return + } + + // Ensure blocking rules for anything else in/to ingress network + for _, rule := range [][]string{ + {"-d", eIP.String(), "-p", "sctp", "-j", "DROP"}, + {"-d", eIP.String(), "-p", "udp", "-j", "DROP"}, + {"-d", eIP.String(), "-p", "tcp", "-j", "DROP"}, + } { + if !iptables.ExistsNative(iptables.Filter, "INPUT", rule...) { + if err := iptables.RawCombinedOutputNative(append([]string{"-A", "INPUT"}, rule...)...); err != nil { + logrus.Errorf("setting up rule failed, %v: %v", rule, err) + os.Exit(7) + } + } + rule[0] = "-s" + if !iptables.ExistsNative(iptables.Filter, "OUTPUT", rule...) { + if err := iptables.RawCombinedOutputNative(append([]string{"-A", "OUTPUT"}, rule...)...); err != nil { + logrus.Errorf("setting up rule failed, %v: %v", rule, err) + os.Exit(8) + } + } + } +} diff --git a/vendor/github.com/docker/libnetwork/service_unsupported.go b/vendor/github.com/docker/libnetwork/service_unsupported.go new file mode 100644 index 0000000000..37b9828191 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/service_unsupported.go @@ -0,0 +1,25 @@ +// +build !linux,!windows + +package libnetwork + +import ( + "fmt" + "net" +) + +func (c *controller) cleanupServiceBindings(nid string) { +} + +func (c *controller) addServiceBinding(name, sid, nid, eid string, vip net.IP, ingressPorts []*PortConfig, aliases []string, ip net.IP) error { + return fmt.Errorf("not supported") +} + +func (c *controller) rmServiceBinding(name, sid, nid, eid string, vip net.IP, ingressPorts []*PortConfig, aliases []string, ip net.IP) error { + return fmt.Errorf("not supported") +} + +func (sb *sandbox) populateLoadbalancers(ep *endpoint) { +} + +func arrangeIngressFilterRule() { +} diff --git a/vendor/github.com/docker/libnetwork/service_windows.go b/vendor/github.com/docker/libnetwork/service_windows.go new file mode 100644 index 0000000000..a9ce244cfd --- /dev/null +++ b/vendor/github.com/docker/libnetwork/service_windows.go @@ -0,0 +1,163 @@ +package libnetwork + +import ( + "net" + + "github.com/Microsoft/hcsshim" + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +type policyLists struct { + ilb *hcsshim.PolicyList + elb *hcsshim.PolicyList +} + +var lbPolicylistMap map[*loadBalancer]*policyLists + +func init() { + lbPolicylistMap = make(map[*loadBalancer]*policyLists) +} + +func (n *network) addLBBackend(ip, vip net.IP, lb *loadBalancer, ingressPorts []*PortConfig) { + + if system.GetOSVersion().Build > 16236 { + lb.Lock() + defer lb.Unlock() + //find the load balancer IP for the network. + var sourceVIP string + for _, e := range n.Endpoints() { + epInfo := e.Info() + if epInfo == nil { + continue + } + if epInfo.LoadBalancer() { + sourceVIP = epInfo.Iface().Address().IP.String() + break + } + } + + if sourceVIP == "" { + logrus.Errorf("Failed to find load balancer IP for network %s", n.Name()) + return + } + + var endpoints []hcsshim.HNSEndpoint + + for eid, be := range lb.backEnds { + if be.disabled { + continue + } + //Call HNS to get back ID (GUID) corresponding to the endpoint. + hnsEndpoint, err := hcsshim.GetHNSEndpointByName(eid) + if err != nil { + logrus.Errorf("Failed to find HNS ID for endpoint %v: %v", eid, err) + return + } + + endpoints = append(endpoints, *hnsEndpoint) + } + + if policies, ok := lbPolicylistMap[lb]; ok { + + if policies.ilb != nil { + policies.ilb.Delete() + policies.ilb = nil + } + + if policies.elb != nil { + policies.elb.Delete() + policies.elb = nil + } + delete(lbPolicylistMap, lb) + } + + ilbPolicy, err := hcsshim.AddLoadBalancer(endpoints, true, sourceVIP, vip.String(), 0, 0, 0) + if err != nil { + logrus.Errorf("Failed to add ILB policy for service %s (%s) with endpoints %v using load balancer IP %s on network %s: %v", + lb.service.name, vip.String(), endpoints, sourceVIP, n.Name(), err) + return + } + + lbPolicylistMap[lb] = &policyLists{ + ilb: ilbPolicy, + } + + publishedPorts := make(map[uint32]uint32) + + for i, port := range ingressPorts { + protocol := uint16(6) + + // Skip already published port + if publishedPorts[port.PublishedPort] == port.TargetPort { + continue + } + + if port.Protocol == ProtocolUDP { + protocol = 17 + } + + // check if already has udp matching to add wild card publishing + for j := i + 1; j < len(ingressPorts); j++ { + if ingressPorts[j].TargetPort == port.TargetPort && + ingressPorts[j].PublishedPort == port.PublishedPort { + protocol = 0 + } + } + + publishedPorts[port.PublishedPort] = port.TargetPort + + lbPolicylistMap[lb].elb, err = hcsshim.AddLoadBalancer(endpoints, false, sourceVIP, "", protocol, uint16(port.TargetPort), uint16(port.PublishedPort)) + if err != nil { + logrus.Errorf("Failed to add ELB policy for service %s (ip:%s target port:%v published port:%v) with endpoints %v using load balancer IP %s on network %s: %v", + lb.service.name, vip.String(), uint16(port.TargetPort), uint16(port.PublishedPort), endpoints, sourceVIP, n.Name(), err) + return + } + } + } +} + +func (n *network) rmLBBackend(ip, vip net.IP, lb *loadBalancer, ingressPorts []*PortConfig, rmService bool, fullRemove bool) { + if system.GetOSVersion().Build > 16236 { + if numEnabledBackends(lb) > 0 { + //Reprogram HNS (actually VFP) with the existing backends. + n.addLBBackend(ip, vip, lb, ingressPorts) + } else { + lb.Lock() + defer lb.Unlock() + logrus.Debugf("No more backends for service %s (ip:%s). Removing all policies", lb.service.name, lb.vip.String()) + + if policyLists, ok := lbPolicylistMap[lb]; ok { + if policyLists.ilb != nil { + policyLists.ilb.Delete() + policyLists.ilb = nil + } + + if policyLists.elb != nil { + policyLists.elb.Delete() + policyLists.elb = nil + } + delete(lbPolicylistMap, lb) + + } else { + logrus.Errorf("Failed to find policies for service %s (%s)", lb.service.name, lb.vip.String()) + } + } + } +} + +func numEnabledBackends(lb *loadBalancer) int { + nEnabled := 0 + for _, be := range lb.backEnds { + if !be.disabled { + nEnabled++ + } + } + return nEnabled +} + +func (sb *sandbox) populateLoadbalancers(ep *endpoint) { +} + +func arrangeIngressFilterRule() { +} diff --git a/vendor/github.com/docker/libnetwork/store.go b/vendor/github.com/docker/libnetwork/store.go new file mode 100644 index 0000000000..95943f6f45 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/store.go @@ -0,0 +1,496 @@ +package libnetwork + +import ( + "fmt" + "strings" + + "github.com/docker/libkv/store/boltdb" + "github.com/docker/libkv/store/consul" + "github.com/docker/libkv/store/etcd" + "github.com/docker/libkv/store/zookeeper" + "github.com/docker/libnetwork/datastore" + "github.com/sirupsen/logrus" +) + +func registerKVStores() { + consul.Register() + zookeeper.Register() + etcd.Register() + boltdb.Register() +} + +func (c *controller) initScopedStore(scope string, scfg *datastore.ScopeCfg) error { + store, err := datastore.NewDataStore(scope, scfg) + if err != nil { + return err + } + c.Lock() + c.stores = append(c.stores, store) + c.Unlock() + + return nil +} + +func (c *controller) initStores() error { + registerKVStores() + + c.Lock() + if c.cfg == nil { + c.Unlock() + return nil + } + scopeConfigs := c.cfg.Scopes + c.stores = nil + c.Unlock() + + for scope, scfg := range scopeConfigs { + if err := c.initScopedStore(scope, scfg); err != nil { + return err + } + } + + c.startWatch() + return nil +} + +func (c *controller) closeStores() { + for _, store := range c.getStores() { + store.Close() + } +} + +func (c *controller) getStore(scope string) datastore.DataStore { + c.Lock() + defer c.Unlock() + + for _, store := range c.stores { + if store.Scope() == scope { + return store + } + } + + return nil +} + +func (c *controller) getStores() []datastore.DataStore { + c.Lock() + defer c.Unlock() + + return c.stores +} + +func (c *controller) getNetworkFromStore(nid string) (*network, error) { + for _, store := range c.getStores() { + n := &network{id: nid, ctrlr: c} + err := store.GetObject(datastore.Key(n.Key()...), n) + // Continue searching in the next store if the key is not found in this store + if err != nil { + if err != datastore.ErrKeyNotFound { + logrus.Debugf("could not find network %s: %v", nid, err) + } + continue + } + + ec := &endpointCnt{n: n} + err = store.GetObject(datastore.Key(ec.Key()...), ec) + if err != nil && !n.inDelete { + return nil, fmt.Errorf("could not find endpoint count for network %s: %v", n.Name(), err) + } + + n.epCnt = ec + if n.scope == "" { + n.scope = store.Scope() + } + return n, nil + } + + return nil, fmt.Errorf("network %s not found", nid) +} + +func (c *controller) getNetworksForScope(scope string) ([]*network, error) { + var nl []*network + + store := c.getStore(scope) + if store == nil { + return nil, nil + } + + kvol, err := store.List(datastore.Key(datastore.NetworkKeyPrefix), + &network{ctrlr: c}) + if err != nil && err != datastore.ErrKeyNotFound { + return nil, fmt.Errorf("failed to get networks for scope %s: %v", + scope, err) + } + + for _, kvo := range kvol { + n := kvo.(*network) + n.ctrlr = c + + ec := &endpointCnt{n: n} + err = store.GetObject(datastore.Key(ec.Key()...), ec) + if err != nil && !n.inDelete { + logrus.Warnf("Could not find endpoint count key %s for network %s while listing: %v", datastore.Key(ec.Key()...), n.Name(), err) + continue + } + + n.epCnt = ec + if n.scope == "" { + n.scope = scope + } + nl = append(nl, n) + } + + return nl, nil +} + +func (c *controller) getNetworksFromStore() ([]*network, error) { + var nl []*network + + for _, store := range c.getStores() { + kvol, err := store.List(datastore.Key(datastore.NetworkKeyPrefix), + &network{ctrlr: c}) + // Continue searching in the next store if no keys found in this store + if err != nil { + if err != datastore.ErrKeyNotFound { + logrus.Debugf("failed to get networks for scope %s: %v", store.Scope(), err) + } + continue + } + + kvep, err := store.Map(datastore.Key(epCntKeyPrefix), &endpointCnt{}) + if err != nil { + if err != datastore.ErrKeyNotFound { + logrus.Warnf("failed to get endpoint_count map for scope %s: %v", store.Scope(), err) + } + } + + for _, kvo := range kvol { + n := kvo.(*network) + n.Lock() + n.ctrlr = c + ec := &endpointCnt{n: n} + // Trim the leading & trailing "/" to make it consistent across all stores + if val, ok := kvep[strings.Trim(datastore.Key(ec.Key()...), "/")]; ok { + ec = val.(*endpointCnt) + ec.n = n + n.epCnt = ec + } + if n.scope == "" { + n.scope = store.Scope() + } + n.Unlock() + nl = append(nl, n) + } + } + + return nl, nil +} + +func (n *network) getEndpointFromStore(eid string) (*endpoint, error) { + var errors []string + for _, store := range n.ctrlr.getStores() { + ep := &endpoint{id: eid, network: n} + err := store.GetObject(datastore.Key(ep.Key()...), ep) + // Continue searching in the next store if the key is not found in this store + if err != nil { + if err != datastore.ErrKeyNotFound { + errors = append(errors, fmt.Sprintf("{%s:%v}, ", store.Scope(), err)) + logrus.Debugf("could not find endpoint %s in %s: %v", eid, store.Scope(), err) + } + continue + } + return ep, nil + } + return nil, fmt.Errorf("could not find endpoint %s: %v", eid, errors) +} + +func (n *network) getEndpointsFromStore() ([]*endpoint, error) { + var epl []*endpoint + + tmp := endpoint{network: n} + for _, store := range n.getController().getStores() { + kvol, err := store.List(datastore.Key(tmp.KeyPrefix()...), &endpoint{network: n}) + // Continue searching in the next store if no keys found in this store + if err != nil { + if err != datastore.ErrKeyNotFound { + logrus.Debugf("failed to get endpoints for network %s scope %s: %v", + n.Name(), store.Scope(), err) + } + continue + } + + for _, kvo := range kvol { + ep := kvo.(*endpoint) + epl = append(epl, ep) + } + } + + return epl, nil +} + +func (c *controller) updateToStore(kvObject datastore.KVObject) error { + cs := c.getStore(kvObject.DataScope()) + if cs == nil { + return ErrDataStoreNotInitialized(kvObject.DataScope()) + } + + if err := cs.PutObjectAtomic(kvObject); err != nil { + if err == datastore.ErrKeyModified { + return err + } + return fmt.Errorf("failed to update store for object type %T: %v", kvObject, err) + } + + return nil +} + +func (c *controller) deleteFromStore(kvObject datastore.KVObject) error { + cs := c.getStore(kvObject.DataScope()) + if cs == nil { + return ErrDataStoreNotInitialized(kvObject.DataScope()) + } + +retry: + if err := cs.DeleteObjectAtomic(kvObject); err != nil { + if err == datastore.ErrKeyModified { + if err := cs.GetObject(datastore.Key(kvObject.Key()...), kvObject); err != nil { + return fmt.Errorf("could not update the kvobject to latest when trying to delete: %v", err) + } + logrus.Warnf("Error (%v) deleting object %v, retrying....", err, kvObject.Key()) + goto retry + } + return err + } + + return nil +} + +type netWatch struct { + localEps map[string]*endpoint + remoteEps map[string]*endpoint + stopCh chan struct{} +} + +func (c *controller) getLocalEps(nw *netWatch) []*endpoint { + c.Lock() + defer c.Unlock() + + var epl []*endpoint + for _, ep := range nw.localEps { + epl = append(epl, ep) + } + + return epl +} + +func (c *controller) watchSvcRecord(ep *endpoint) { + c.watchCh <- ep +} + +func (c *controller) unWatchSvcRecord(ep *endpoint) { + c.unWatchCh <- ep +} + +func (c *controller) networkWatchLoop(nw *netWatch, ep *endpoint, ecCh <-chan datastore.KVObject) { + for { + select { + case <-nw.stopCh: + return + case o := <-ecCh: + ec := o.(*endpointCnt) + + epl, err := ec.n.getEndpointsFromStore() + if err != nil { + break + } + + c.Lock() + var addEp []*endpoint + + delEpMap := make(map[string]*endpoint) + renameEpMap := make(map[string]bool) + for k, v := range nw.remoteEps { + delEpMap[k] = v + } + + for _, lEp := range epl { + if _, ok := nw.localEps[lEp.ID()]; ok { + continue + } + + if ep, ok := nw.remoteEps[lEp.ID()]; ok { + // On a container rename EP ID will remain + // the same but the name will change. service + // records should reflect the change. + // Keep old EP entry in the delEpMap and add + // EP from the store (which has the new name) + // into the new list + if lEp.name == ep.name { + delete(delEpMap, lEp.ID()) + continue + } + renameEpMap[lEp.ID()] = true + } + nw.remoteEps[lEp.ID()] = lEp + addEp = append(addEp, lEp) + } + + // EPs whose name are to be deleted from the svc records + // should also be removed from nw's remote EP list, except + // the ones that are getting renamed. + for _, lEp := range delEpMap { + if !renameEpMap[lEp.ID()] { + delete(nw.remoteEps, lEp.ID()) + } + } + c.Unlock() + + for _, lEp := range delEpMap { + ep.getNetwork().updateSvcRecord(lEp, c.getLocalEps(nw), false) + + } + for _, lEp := range addEp { + ep.getNetwork().updateSvcRecord(lEp, c.getLocalEps(nw), true) + } + } + } +} + +func (c *controller) processEndpointCreate(nmap map[string]*netWatch, ep *endpoint) { + n := ep.getNetwork() + if !c.isDistributedControl() && n.Scope() == datastore.SwarmScope && n.driverIsMultihost() { + return + } + + c.Lock() + nw, ok := nmap[n.ID()] + c.Unlock() + + if ok { + // Update the svc db for the local endpoint join right away + n.updateSvcRecord(ep, c.getLocalEps(nw), true) + + c.Lock() + nw.localEps[ep.ID()] = ep + + // If we had learned that from the kv store remove it + // from remote ep list now that we know that this is + // indeed a local endpoint + delete(nw.remoteEps, ep.ID()) + c.Unlock() + return + } + + nw = &netWatch{ + localEps: make(map[string]*endpoint), + remoteEps: make(map[string]*endpoint), + } + + // Update the svc db for the local endpoint join right away + // Do this before adding this ep to localEps so that we don't + // try to update this ep's container's svc records + n.updateSvcRecord(ep, c.getLocalEps(nw), true) + + c.Lock() + nw.localEps[ep.ID()] = ep + nmap[n.ID()] = nw + nw.stopCh = make(chan struct{}) + c.Unlock() + + store := c.getStore(n.DataScope()) + if store == nil { + return + } + + if !store.Watchable() { + return + } + + ch, err := store.Watch(n.getEpCnt(), nw.stopCh) + if err != nil { + logrus.Warnf("Error creating watch for network: %v", err) + return + } + + go c.networkWatchLoop(nw, ep, ch) +} + +func (c *controller) processEndpointDelete(nmap map[string]*netWatch, ep *endpoint) { + n := ep.getNetwork() + if !c.isDistributedControl() && n.Scope() == datastore.SwarmScope && n.driverIsMultihost() { + return + } + + c.Lock() + nw, ok := nmap[n.ID()] + + if ok { + delete(nw.localEps, ep.ID()) + c.Unlock() + + // Update the svc db about local endpoint leave right away + // Do this after we remove this ep from localEps so that we + // don't try to remove this svc record from this ep's container. + n.updateSvcRecord(ep, c.getLocalEps(nw), false) + + c.Lock() + if len(nw.localEps) == 0 { + close(nw.stopCh) + + // This is the last container going away for the network. Destroy + // this network's svc db entry + delete(c.svcRecords, n.ID()) + + delete(nmap, n.ID()) + } + } + c.Unlock() +} + +func (c *controller) watchLoop() { + for { + select { + case ep := <-c.watchCh: + c.processEndpointCreate(c.nmap, ep) + case ep := <-c.unWatchCh: + c.processEndpointDelete(c.nmap, ep) + } + } +} + +func (c *controller) startWatch() { + if c.watchCh != nil { + return + } + c.watchCh = make(chan *endpoint) + c.unWatchCh = make(chan *endpoint) + c.nmap = make(map[string]*netWatch) + + go c.watchLoop() +} + +func (c *controller) networkCleanup() { + networks, err := c.getNetworksFromStore() + if err != nil { + logrus.Warnf("Could not retrieve networks from store(s) during network cleanup: %v", err) + return + } + + for _, n := range networks { + if n.inDelete { + logrus.Infof("Removing stale network %s (%s)", n.Name(), n.ID()) + if err := n.delete(true); err != nil { + logrus.Debugf("Error while removing stale network: %v", err) + } + } + } +} + +var populateSpecial NetworkWalker = func(nw Network) bool { + if n := nw.(*network); n.hasSpecialDriver() && !n.ConfigOnly() { + if err := n.getController().addNetwork(n); err != nil { + logrus.Warnf("Failed to populate network %q with driver %q", nw.Name(), nw.Type()) + } + } + return false +} diff --git a/vendor/github.com/docker/libnetwork/types/types.go b/vendor/github.com/docker/libnetwork/types/types.go new file mode 100644 index 0000000000..cb18f054c5 --- /dev/null +++ b/vendor/github.com/docker/libnetwork/types/types.go @@ -0,0 +1,648 @@ +// Package types contains types that are common across libnetwork project +package types + +import ( + "bytes" + "fmt" + "net" + "strconv" + "strings" + + "github.com/ishidawataru/sctp" +) + +// constants for the IP address type +const ( + IP = iota // IPv4 and IPv6 + IPv4 + IPv6 +) + +// EncryptionKey is the libnetwork representation of the key distributed by the lead +// manager. +type EncryptionKey struct { + Subsystem string + Algorithm int32 + Key []byte + LamportTime uint64 +} + +// UUID represents a globally unique ID of various resources like network and endpoint +type UUID string + +// QosPolicy represents a quality of service policy on an endpoint +type QosPolicy struct { + MaxEgressBandwidth uint64 +} + +// TransportPort represents a local Layer 4 endpoint +type TransportPort struct { + Proto Protocol + Port uint16 +} + +// Equal checks if this instance of Transportport is equal to the passed one +func (t *TransportPort) Equal(o *TransportPort) bool { + if t == o { + return true + } + + if o == nil { + return false + } + + if t.Proto != o.Proto || t.Port != o.Port { + return false + } + + return true +} + +// GetCopy returns a copy of this TransportPort structure instance +func (t *TransportPort) GetCopy() TransportPort { + return TransportPort{Proto: t.Proto, Port: t.Port} +} + +// String returns the TransportPort structure in string form +func (t *TransportPort) String() string { + return fmt.Sprintf("%s/%d", t.Proto.String(), t.Port) +} + +// FromString reads the TransportPort structure from string +func (t *TransportPort) FromString(s string) error { + ps := strings.Split(s, "/") + if len(ps) == 2 { + t.Proto = ParseProtocol(ps[0]) + if p, err := strconv.ParseUint(ps[1], 10, 16); err == nil { + t.Port = uint16(p) + return nil + } + } + return BadRequestErrorf("invalid format for transport port: %s", s) +} + +// PortBinding represents a port binding between the container and the host +type PortBinding struct { + Proto Protocol + IP net.IP + Port uint16 + HostIP net.IP + HostPort uint16 + HostPortEnd uint16 +} + +// HostAddr returns the host side transport address +func (p PortBinding) HostAddr() (net.Addr, error) { + switch p.Proto { + case UDP: + return &net.UDPAddr{IP: p.HostIP, Port: int(p.HostPort)}, nil + case TCP: + return &net.TCPAddr{IP: p.HostIP, Port: int(p.HostPort)}, nil + case SCTP: + return &sctp.SCTPAddr{IP: []net.IP{p.HostIP}, Port: int(p.HostPort)}, nil + default: + return nil, ErrInvalidProtocolBinding(p.Proto.String()) + } +} + +// ContainerAddr returns the container side transport address +func (p PortBinding) ContainerAddr() (net.Addr, error) { + switch p.Proto { + case UDP: + return &net.UDPAddr{IP: p.IP, Port: int(p.Port)}, nil + case TCP: + return &net.TCPAddr{IP: p.IP, Port: int(p.Port)}, nil + case SCTP: + return &sctp.SCTPAddr{IP: []net.IP{p.IP}, Port: int(p.Port)}, nil + default: + return nil, ErrInvalidProtocolBinding(p.Proto.String()) + } +} + +// GetCopy returns a copy of this PortBinding structure instance +func (p *PortBinding) GetCopy() PortBinding { + return PortBinding{ + Proto: p.Proto, + IP: GetIPCopy(p.IP), + Port: p.Port, + HostIP: GetIPCopy(p.HostIP), + HostPort: p.HostPort, + HostPortEnd: p.HostPortEnd, + } +} + +// String returns the PortBinding structure in string form +func (p *PortBinding) String() string { + ret := fmt.Sprintf("%s/", p.Proto) + if p.IP != nil { + ret += p.IP.String() + } + ret = fmt.Sprintf("%s:%d/", ret, p.Port) + if p.HostIP != nil { + ret += p.HostIP.String() + } + ret = fmt.Sprintf("%s:%d", ret, p.HostPort) + return ret +} + +// FromString reads the TransportPort structure from string +func (p *PortBinding) FromString(s string) error { + ps := strings.Split(s, "/") + if len(ps) != 3 { + return BadRequestErrorf("invalid format for port binding: %s", s) + } + + p.Proto = ParseProtocol(ps[0]) + + var err error + if p.IP, p.Port, err = parseIPPort(ps[1]); err != nil { + return BadRequestErrorf("failed to parse Container IP/Port in port binding: %s", err.Error()) + } + + if p.HostIP, p.HostPort, err = parseIPPort(ps[2]); err != nil { + return BadRequestErrorf("failed to parse Host IP/Port in port binding: %s", err.Error()) + } + + return nil +} + +func parseIPPort(s string) (net.IP, uint16, error) { + pp := strings.Split(s, ":") + if len(pp) != 2 { + return nil, 0, BadRequestErrorf("invalid format: %s", s) + } + + var ip net.IP + if pp[0] != "" { + if ip = net.ParseIP(pp[0]); ip == nil { + return nil, 0, BadRequestErrorf("invalid ip: %s", pp[0]) + } + } + + port, err := strconv.ParseUint(pp[1], 10, 16) + if err != nil { + return nil, 0, BadRequestErrorf("invalid port: %s", pp[1]) + } + + return ip, uint16(port), nil +} + +// Equal checks if this instance of PortBinding is equal to the passed one +func (p *PortBinding) Equal(o *PortBinding) bool { + if p == o { + return true + } + + if o == nil { + return false + } + + if p.Proto != o.Proto || p.Port != o.Port || + p.HostPort != o.HostPort || p.HostPortEnd != o.HostPortEnd { + return false + } + + if p.IP != nil { + if !p.IP.Equal(o.IP) { + return false + } + } else { + if o.IP != nil { + return false + } + } + + if p.HostIP != nil { + if !p.HostIP.Equal(o.HostIP) { + return false + } + } else { + if o.HostIP != nil { + return false + } + } + + return true +} + +// ErrInvalidProtocolBinding is returned when the port binding protocol is not valid. +type ErrInvalidProtocolBinding string + +func (ipb ErrInvalidProtocolBinding) Error() string { + return fmt.Sprintf("invalid transport protocol: %s", string(ipb)) +} + +const ( + // ICMP is for the ICMP ip protocol + ICMP = 1 + // TCP is for the TCP ip protocol + TCP = 6 + // UDP is for the UDP ip protocol + UDP = 17 + // SCTP is for the SCTP ip protocol + SCTP = 132 +) + +// Protocol represents an IP protocol number +type Protocol uint8 + +func (p Protocol) String() string { + switch p { + case ICMP: + return "icmp" + case TCP: + return "tcp" + case UDP: + return "udp" + case SCTP: + return "sctp" + default: + return fmt.Sprintf("%d", p) + } +} + +// ParseProtocol returns the respective Protocol type for the passed string +func ParseProtocol(s string) Protocol { + switch strings.ToLower(s) { + case "icmp": + return ICMP + case "udp": + return UDP + case "tcp": + return TCP + case "sctp": + return SCTP + default: + return 0 + } +} + +// GetMacCopy returns a copy of the passed MAC address +func GetMacCopy(from net.HardwareAddr) net.HardwareAddr { + if from == nil { + return nil + } + to := make(net.HardwareAddr, len(from)) + copy(to, from) + return to +} + +// GetIPCopy returns a copy of the passed IP address +func GetIPCopy(from net.IP) net.IP { + if from == nil { + return nil + } + to := make(net.IP, len(from)) + copy(to, from) + return to +} + +// GetIPNetCopy returns a copy of the passed IP Network +func GetIPNetCopy(from *net.IPNet) *net.IPNet { + if from == nil { + return nil + } + bm := make(net.IPMask, len(from.Mask)) + copy(bm, from.Mask) + return &net.IPNet{IP: GetIPCopy(from.IP), Mask: bm} +} + +// GetIPNetCanonical returns the canonical form for the passed network +func GetIPNetCanonical(nw *net.IPNet) *net.IPNet { + if nw == nil { + return nil + } + c := GetIPNetCopy(nw) + c.IP = c.IP.Mask(nw.Mask) + return c +} + +// CompareIPNet returns equal if the two IP Networks are equal +func CompareIPNet(a, b *net.IPNet) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.IP.Equal(b.IP) && bytes.Equal(a.Mask, b.Mask) +} + +// GetMinimalIP returns the address in its shortest form +func GetMinimalIP(ip net.IP) net.IP { + if ip != nil && ip.To4() != nil { + return ip.To4() + } + return ip +} + +// GetMinimalIPNet returns a copy of the passed IP Network with congruent ip and mask notation +func GetMinimalIPNet(nw *net.IPNet) *net.IPNet { + if nw == nil { + return nil + } + if len(nw.IP) == 16 && nw.IP.To4() != nil { + m := nw.Mask + if len(m) == 16 { + m = m[12:16] + } + return &net.IPNet{IP: nw.IP.To4(), Mask: m} + } + return nw +} + +// IsIPNetValid returns true if the ipnet is a valid network/mask +// combination. Otherwise returns false. +func IsIPNetValid(nw *net.IPNet) bool { + return nw.String() != "0.0.0.0/0" +} + +var v4inV6MaskPrefix = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} + +// compareIPMask checks if the passed ip and mask are semantically compatible. +// It returns the byte indexes for the address and mask so that caller can +// do bitwise operations without modifying address representation. +func compareIPMask(ip net.IP, mask net.IPMask) (is int, ms int, err error) { + // Find the effective starting of address and mask + if len(ip) == net.IPv6len && ip.To4() != nil { + is = 12 + } + if len(ip[is:]) == net.IPv4len && len(mask) == net.IPv6len && bytes.Equal(mask[:12], v4inV6MaskPrefix) { + ms = 12 + } + // Check if address and mask are semantically compatible + if len(ip[is:]) != len(mask[ms:]) { + err = fmt.Errorf("ip and mask are not compatible: (%#v, %#v)", ip, mask) + } + return +} + +// GetHostPartIP returns the host portion of the ip address identified by the mask. +// IP address representation is not modified. If address and mask are not compatible +// an error is returned. +func GetHostPartIP(ip net.IP, mask net.IPMask) (net.IP, error) { + // Find the effective starting of address and mask + is, ms, err := compareIPMask(ip, mask) + if err != nil { + return nil, fmt.Errorf("cannot compute host portion ip address because %s", err) + } + + // Compute host portion + out := GetIPCopy(ip) + for i := 0; i < len(mask[ms:]); i++ { + out[is+i] &= ^mask[ms+i] + } + + return out, nil +} + +// GetBroadcastIP returns the broadcast ip address for the passed network (ip and mask). +// IP address representation is not modified. If address and mask are not compatible +// an error is returned. +func GetBroadcastIP(ip net.IP, mask net.IPMask) (net.IP, error) { + // Find the effective starting of address and mask + is, ms, err := compareIPMask(ip, mask) + if err != nil { + return nil, fmt.Errorf("cannot compute broadcast ip address because %s", err) + } + + // Compute broadcast address + out := GetIPCopy(ip) + for i := 0; i < len(mask[ms:]); i++ { + out[is+i] |= ^mask[ms+i] + } + + return out, nil +} + +// ParseCIDR returns the *net.IPNet represented by the passed CIDR notation +func ParseCIDR(cidr string) (n *net.IPNet, e error) { + var i net.IP + if i, n, e = net.ParseCIDR(cidr); e == nil { + n.IP = i + } + return +} + +const ( + // NEXTHOP indicates a StaticRoute with an IP next hop. + NEXTHOP = iota + + // CONNECTED indicates a StaticRoute with an interface for directly connected peers. + CONNECTED +) + +// StaticRoute is a statically-provisioned IP route. +type StaticRoute struct { + Destination *net.IPNet + + RouteType int // NEXT_HOP or CONNECTED + + // NextHop will be resolved by the kernel (i.e. as a loose hop). + NextHop net.IP +} + +// GetCopy returns a copy of this StaticRoute structure +func (r *StaticRoute) GetCopy() *StaticRoute { + d := GetIPNetCopy(r.Destination) + nh := GetIPCopy(r.NextHop) + return &StaticRoute{Destination: d, + RouteType: r.RouteType, + NextHop: nh, + } +} + +// InterfaceStatistics represents the interface's statistics +type InterfaceStatistics struct { + RxBytes uint64 + RxPackets uint64 + RxErrors uint64 + RxDropped uint64 + TxBytes uint64 + TxPackets uint64 + TxErrors uint64 + TxDropped uint64 +} + +func (is *InterfaceStatistics) String() string { + return fmt.Sprintf("\nRxBytes: %d, RxPackets: %d, RxErrors: %d, RxDropped: %d, TxBytes: %d, TxPackets: %d, TxErrors: %d, TxDropped: %d", + is.RxBytes, is.RxPackets, is.RxErrors, is.RxDropped, is.TxBytes, is.TxPackets, is.TxErrors, is.TxDropped) +} + +/****************************** + * Well-known Error Interfaces + ******************************/ + +// MaskableError is an interface for errors which can be ignored by caller +type MaskableError interface { + // Maskable makes implementer into MaskableError type + Maskable() +} + +// RetryError is an interface for errors which might get resolved through retry +type RetryError interface { + // Retry makes implementer into RetryError type + Retry() +} + +// BadRequestError is an interface for errors originated by a bad request +type BadRequestError interface { + // BadRequest makes implementer into BadRequestError type + BadRequest() +} + +// NotFoundError is an interface for errors raised because a needed resource is not available +type NotFoundError interface { + // NotFound makes implementer into NotFoundError type + NotFound() +} + +// ForbiddenError is an interface for errors which denote a valid request that cannot be honored +type ForbiddenError interface { + // Forbidden makes implementer into ForbiddenError type + Forbidden() +} + +// NoServiceError is an interface for errors returned when the required service is not available +type NoServiceError interface { + // NoService makes implementer into NoServiceError type + NoService() +} + +// TimeoutError is an interface for errors raised because of timeout +type TimeoutError interface { + // Timeout makes implementer into TimeoutError type + Timeout() +} + +// NotImplementedError is an interface for errors raised because of requested functionality is not yet implemented +type NotImplementedError interface { + // NotImplemented makes implementer into NotImplementedError type + NotImplemented() +} + +// InternalError is an interface for errors raised because of an internal error +type InternalError interface { + // Internal makes implementer into InternalError type + Internal() +} + +/****************************** + * Well-known Error Formatters + ******************************/ + +// BadRequestErrorf creates an instance of BadRequestError +func BadRequestErrorf(format string, params ...interface{}) error { + return badRequest(fmt.Sprintf(format, params...)) +} + +// NotFoundErrorf creates an instance of NotFoundError +func NotFoundErrorf(format string, params ...interface{}) error { + return notFound(fmt.Sprintf(format, params...)) +} + +// ForbiddenErrorf creates an instance of ForbiddenError +func ForbiddenErrorf(format string, params ...interface{}) error { + return forbidden(fmt.Sprintf(format, params...)) +} + +// NoServiceErrorf creates an instance of NoServiceError +func NoServiceErrorf(format string, params ...interface{}) error { + return noService(fmt.Sprintf(format, params...)) +} + +// NotImplementedErrorf creates an instance of NotImplementedError +func NotImplementedErrorf(format string, params ...interface{}) error { + return notImpl(fmt.Sprintf(format, params...)) +} + +// TimeoutErrorf creates an instance of TimeoutError +func TimeoutErrorf(format string, params ...interface{}) error { + return timeout(fmt.Sprintf(format, params...)) +} + +// InternalErrorf creates an instance of InternalError +func InternalErrorf(format string, params ...interface{}) error { + return internal(fmt.Sprintf(format, params...)) +} + +// InternalMaskableErrorf creates an instance of InternalError and MaskableError +func InternalMaskableErrorf(format string, params ...interface{}) error { + return maskInternal(fmt.Sprintf(format, params...)) +} + +// RetryErrorf creates an instance of RetryError +func RetryErrorf(format string, params ...interface{}) error { + return retry(fmt.Sprintf(format, params...)) +} + +/*********************** + * Internal Error Types + ***********************/ +type badRequest string + +func (br badRequest) Error() string { + return string(br) +} +func (br badRequest) BadRequest() {} + +type maskBadRequest string + +type notFound string + +func (nf notFound) Error() string { + return string(nf) +} +func (nf notFound) NotFound() {} + +type forbidden string + +func (frb forbidden) Error() string { + return string(frb) +} +func (frb forbidden) Forbidden() {} + +type noService string + +func (ns noService) Error() string { + return string(ns) +} +func (ns noService) NoService() {} + +type maskNoService string + +type timeout string + +func (to timeout) Error() string { + return string(to) +} +func (to timeout) Timeout() {} + +type notImpl string + +func (ni notImpl) Error() string { + return string(ni) +} +func (ni notImpl) NotImplemented() {} + +type internal string + +func (nt internal) Error() string { + return string(nt) +} +func (nt internal) Internal() {} + +type maskInternal string + +func (mnt maskInternal) Error() string { + return string(mnt) +} +func (mnt maskInternal) Internal() {} +func (mnt maskInternal) Maskable() {} + +type retry string + +func (r retry) Error() string { + return string(r) +} +func (r retry) Retry() {} diff --git a/vendor/github.com/docker/libtrust/LICENSE b/vendor/github.com/docker/libtrust/LICENSE deleted file mode 100644 index 27448585ad..0000000000 --- a/vendor/github.com/docker/libtrust/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2014 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/docker/libtrust/certificates.go b/vendor/github.com/docker/libtrust/certificates.go deleted file mode 100644 index 3dcca33cb1..0000000000 --- a/vendor/github.com/docker/libtrust/certificates.go +++ /dev/null @@ -1,175 +0,0 @@ -package libtrust - -import ( - "crypto/rand" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "fmt" - "io/ioutil" - "math/big" - "net" - "time" -) - -type certTemplateInfo struct { - commonName string - domains []string - ipAddresses []net.IP - isCA bool - clientAuth bool - serverAuth bool -} - -func generateCertTemplate(info *certTemplateInfo) *x509.Certificate { - // Generate a certificate template which is valid from the past week to - // 10 years from now. The usage of the certificate depends on the - // specified fields in the given certTempInfo object. - var ( - keyUsage x509.KeyUsage - extKeyUsage []x509.ExtKeyUsage - ) - - if info.isCA { - keyUsage = x509.KeyUsageCertSign - } - - if info.clientAuth { - extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageClientAuth) - } - - if info.serverAuth { - extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth) - } - - return &x509.Certificate{ - SerialNumber: big.NewInt(0), - Subject: pkix.Name{ - CommonName: info.commonName, - }, - NotBefore: time.Now().Add(-time.Hour * 24 * 7), - NotAfter: time.Now().Add(time.Hour * 24 * 365 * 10), - DNSNames: info.domains, - IPAddresses: info.ipAddresses, - IsCA: info.isCA, - KeyUsage: keyUsage, - ExtKeyUsage: extKeyUsage, - BasicConstraintsValid: info.isCA, - } -} - -func generateCert(pub PublicKey, priv PrivateKey, subInfo, issInfo *certTemplateInfo) (cert *x509.Certificate, err error) { - pubCertTemplate := generateCertTemplate(subInfo) - privCertTemplate := generateCertTemplate(issInfo) - - certDER, err := x509.CreateCertificate( - rand.Reader, pubCertTemplate, privCertTemplate, - pub.CryptoPublicKey(), priv.CryptoPrivateKey(), - ) - if err != nil { - return nil, fmt.Errorf("failed to create certificate: %s", err) - } - - cert, err = x509.ParseCertificate(certDER) - if err != nil { - return nil, fmt.Errorf("failed to parse certificate: %s", err) - } - - return -} - -// GenerateSelfSignedServerCert creates a self-signed certificate for the -// given key which is to be used for TLS servers with the given domains and -// IP addresses. -func GenerateSelfSignedServerCert(key PrivateKey, domains []string, ipAddresses []net.IP) (*x509.Certificate, error) { - info := &certTemplateInfo{ - commonName: key.KeyID(), - domains: domains, - ipAddresses: ipAddresses, - serverAuth: true, - } - - return generateCert(key.PublicKey(), key, info, info) -} - -// GenerateSelfSignedClientCert creates a self-signed certificate for the -// given key which is to be used for TLS clients. -func GenerateSelfSignedClientCert(key PrivateKey) (*x509.Certificate, error) { - info := &certTemplateInfo{ - commonName: key.KeyID(), - clientAuth: true, - } - - return generateCert(key.PublicKey(), key, info, info) -} - -// GenerateCACert creates a certificate which can be used as a trusted -// certificate authority. -func GenerateCACert(signer PrivateKey, trustedKey PublicKey) (*x509.Certificate, error) { - subjectInfo := &certTemplateInfo{ - commonName: trustedKey.KeyID(), - isCA: true, - } - issuerInfo := &certTemplateInfo{ - commonName: signer.KeyID(), - } - - return generateCert(trustedKey, signer, subjectInfo, issuerInfo) -} - -// GenerateCACertPool creates a certificate authority pool to be used for a -// TLS configuration. Any self-signed certificates issued by the specified -// trusted keys will be verified during a TLS handshake -func GenerateCACertPool(signer PrivateKey, trustedKeys []PublicKey) (*x509.CertPool, error) { - certPool := x509.NewCertPool() - - for _, trustedKey := range trustedKeys { - cert, err := GenerateCACert(signer, trustedKey) - if err != nil { - return nil, fmt.Errorf("failed to generate CA certificate: %s", err) - } - - certPool.AddCert(cert) - } - - return certPool, nil -} - -// LoadCertificateBundle loads certificates from the given file. The file should be pem encoded -// containing one or more certificates. The expected pem type is "CERTIFICATE". -func LoadCertificateBundle(filename string) ([]*x509.Certificate, error) { - b, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - certificates := []*x509.Certificate{} - var block *pem.Block - block, b = pem.Decode(b) - for ; block != nil; block, b = pem.Decode(b) { - if block.Type == "CERTIFICATE" { - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return nil, err - } - certificates = append(certificates, cert) - } else { - return nil, fmt.Errorf("invalid pem block type: %s", block.Type) - } - } - - return certificates, nil -} - -// LoadCertificatePool loads a CA pool from the given file. The file should be pem encoded -// containing one or more certificates. The expected pem type is "CERTIFICATE". -func LoadCertificatePool(filename string) (*x509.CertPool, error) { - certs, err := LoadCertificateBundle(filename) - if err != nil { - return nil, err - } - pool := x509.NewCertPool() - for _, cert := range certs { - pool.AddCert(cert) - } - return pool, nil -} diff --git a/vendor/github.com/docker/libtrust/doc.go b/vendor/github.com/docker/libtrust/doc.go deleted file mode 100644 index ec5d2159c1..0000000000 --- a/vendor/github.com/docker/libtrust/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -/* -Package libtrust provides an interface for managing authentication and -authorization using public key cryptography. Authentication is handled -using the identity attached to the public key and verified through TLS -x509 certificates, a key challenge, or signature. Authorization and -access control is managed through a trust graph distributed between -both remote trust servers and locally cached and managed data. -*/ -package libtrust diff --git a/vendor/github.com/docker/libtrust/ec_key.go b/vendor/github.com/docker/libtrust/ec_key.go deleted file mode 100644 index 00bbe4b3ca..0000000000 --- a/vendor/github.com/docker/libtrust/ec_key.go +++ /dev/null @@ -1,428 +0,0 @@ -package libtrust - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/x509" - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "io" - "math/big" -) - -/* - * EC DSA PUBLIC KEY - */ - -// ecPublicKey implements a libtrust.PublicKey using elliptic curve digital -// signature algorithms. -type ecPublicKey struct { - *ecdsa.PublicKey - curveName string - signatureAlgorithm *signatureAlgorithm - extended map[string]interface{} -} - -func fromECPublicKey(cryptoPublicKey *ecdsa.PublicKey) (*ecPublicKey, error) { - curve := cryptoPublicKey.Curve - - switch { - case curve == elliptic.P256(): - return &ecPublicKey{cryptoPublicKey, "P-256", es256, map[string]interface{}{}}, nil - case curve == elliptic.P384(): - return &ecPublicKey{cryptoPublicKey, "P-384", es384, map[string]interface{}{}}, nil - case curve == elliptic.P521(): - return &ecPublicKey{cryptoPublicKey, "P-521", es512, map[string]interface{}{}}, nil - default: - return nil, errors.New("unsupported elliptic curve") - } -} - -// KeyType returns the key type for elliptic curve keys, i.e., "EC". -func (k *ecPublicKey) KeyType() string { - return "EC" -} - -// CurveName returns the elliptic curve identifier. -// Possible values are "P-256", "P-384", and "P-521". -func (k *ecPublicKey) CurveName() string { - return k.curveName -} - -// KeyID returns a distinct identifier which is unique to this Public Key. -func (k *ecPublicKey) KeyID() string { - return keyIDFromCryptoKey(k) -} - -func (k *ecPublicKey) String() string { - return fmt.Sprintf("EC Public Key <%s>", k.KeyID()) -} - -// Verify verifyies the signature of the data in the io.Reader using this -// PublicKey. The alg parameter should identify the digital signature -// algorithm which was used to produce the signature and should be supported -// by this public key. Returns a nil error if the signature is valid. -func (k *ecPublicKey) Verify(data io.Reader, alg string, signature []byte) error { - // For EC keys there is only one supported signature algorithm depending - // on the curve parameters. - if k.signatureAlgorithm.HeaderParam() != alg { - return fmt.Errorf("unable to verify signature: EC Public Key with curve %q does not support signature algorithm %q", k.curveName, alg) - } - - // signature is the concatenation of (r, s), base64Url encoded. - sigLength := len(signature) - expectedOctetLength := 2 * ((k.Params().BitSize + 7) >> 3) - if sigLength != expectedOctetLength { - return fmt.Errorf("signature length is %d octets long, should be %d", sigLength, expectedOctetLength) - } - - rBytes, sBytes := signature[:sigLength/2], signature[sigLength/2:] - r := new(big.Int).SetBytes(rBytes) - s := new(big.Int).SetBytes(sBytes) - - hasher := k.signatureAlgorithm.HashID().New() - _, err := io.Copy(hasher, data) - if err != nil { - return fmt.Errorf("error reading data to sign: %s", err) - } - hash := hasher.Sum(nil) - - if !ecdsa.Verify(k.PublicKey, hash, r, s) { - return errors.New("invalid signature") - } - - return nil -} - -// CryptoPublicKey returns the internal object which can be used as a -// crypto.PublicKey for use with other standard library operations. The type -// is either *rsa.PublicKey or *ecdsa.PublicKey -func (k *ecPublicKey) CryptoPublicKey() crypto.PublicKey { - return k.PublicKey -} - -func (k *ecPublicKey) toMap() map[string]interface{} { - jwk := make(map[string]interface{}) - for k, v := range k.extended { - jwk[k] = v - } - jwk["kty"] = k.KeyType() - jwk["kid"] = k.KeyID() - jwk["crv"] = k.CurveName() - - xBytes := k.X.Bytes() - yBytes := k.Y.Bytes() - octetLength := (k.Params().BitSize + 7) >> 3 - // MUST include leading zeros in the output so that x, y are each - // *octetLength* bytes long. - xBuf := make([]byte, octetLength-len(xBytes), octetLength) - yBuf := make([]byte, octetLength-len(yBytes), octetLength) - xBuf = append(xBuf, xBytes...) - yBuf = append(yBuf, yBytes...) - - jwk["x"] = joseBase64UrlEncode(xBuf) - jwk["y"] = joseBase64UrlEncode(yBuf) - - return jwk -} - -// MarshalJSON serializes this Public Key using the JWK JSON serialization format for -// elliptic curve keys. -func (k *ecPublicKey) MarshalJSON() (data []byte, err error) { - return json.Marshal(k.toMap()) -} - -// PEMBlock serializes this Public Key to DER-encoded PKIX format. -func (k *ecPublicKey) PEMBlock() (*pem.Block, error) { - derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) - if err != nil { - return nil, fmt.Errorf("unable to serialize EC PublicKey to DER-encoded PKIX format: %s", err) - } - k.extended["kid"] = k.KeyID() // For display purposes. - return createPemBlock("PUBLIC KEY", derBytes, k.extended) -} - -func (k *ecPublicKey) AddExtendedField(field string, value interface{}) { - k.extended[field] = value -} - -func (k *ecPublicKey) GetExtendedField(field string) interface{} { - v, ok := k.extended[field] - if !ok { - return nil - } - return v -} - -func ecPublicKeyFromMap(jwk map[string]interface{}) (*ecPublicKey, error) { - // JWK key type (kty) has already been determined to be "EC". - // Need to extract 'crv', 'x', 'y', and 'kid' and check for - // consistency. - - // Get the curve identifier value. - crv, err := stringFromMap(jwk, "crv") - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key curve identifier: %s", err) - } - - var ( - curve elliptic.Curve - sigAlg *signatureAlgorithm - ) - - switch { - case crv == "P-256": - curve = elliptic.P256() - sigAlg = es256 - case crv == "P-384": - curve = elliptic.P384() - sigAlg = es384 - case crv == "P-521": - curve = elliptic.P521() - sigAlg = es512 - default: - return nil, fmt.Errorf("JWK EC Public Key curve identifier not supported: %q\n", crv) - } - - // Get the X and Y coordinates for the public key point. - xB64Url, err := stringFromMap(jwk, "x") - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) - } - x, err := parseECCoordinate(xB64Url, curve) - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key x-coordinate: %s", err) - } - - yB64Url, err := stringFromMap(jwk, "y") - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) - } - y, err := parseECCoordinate(yB64Url, curve) - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key y-coordinate: %s", err) - } - - key := &ecPublicKey{ - PublicKey: &ecdsa.PublicKey{Curve: curve, X: x, Y: y}, - curveName: crv, signatureAlgorithm: sigAlg, - } - - // Key ID is optional too, but if it exists, it should match the key. - _, ok := jwk["kid"] - if ok { - kid, err := stringFromMap(jwk, "kid") - if err != nil { - return nil, fmt.Errorf("JWK EC Public Key ID: %s", err) - } - if kid != key.KeyID() { - return nil, fmt.Errorf("JWK EC Public Key ID does not match: %s", kid) - } - } - - key.extended = jwk - - return key, nil -} - -/* - * EC DSA PRIVATE KEY - */ - -// ecPrivateKey implements a JWK Private Key using elliptic curve digital signature -// algorithms. -type ecPrivateKey struct { - ecPublicKey - *ecdsa.PrivateKey -} - -func fromECPrivateKey(cryptoPrivateKey *ecdsa.PrivateKey) (*ecPrivateKey, error) { - publicKey, err := fromECPublicKey(&cryptoPrivateKey.PublicKey) - if err != nil { - return nil, err - } - - return &ecPrivateKey{*publicKey, cryptoPrivateKey}, nil -} - -// PublicKey returns the Public Key data associated with this Private Key. -func (k *ecPrivateKey) PublicKey() PublicKey { - return &k.ecPublicKey -} - -func (k *ecPrivateKey) String() string { - return fmt.Sprintf("EC Private Key <%s>", k.KeyID()) -} - -// Sign signs the data read from the io.Reader using a signature algorithm supported -// by the elliptic curve private key. If the specified hashing algorithm is -// supported by this key, that hash function is used to generate the signature -// otherwise the the default hashing algorithm for this key is used. Returns -// the signature and the name of the JWK signature algorithm used, e.g., -// "ES256", "ES384", "ES512". -func (k *ecPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { - // Generate a signature of the data using the internal alg. - // The given hashId is only a suggestion, and since EC keys only support - // on signature/hash algorithm given the curve name, we disregard it for - // the elliptic curve JWK signature implementation. - hasher := k.signatureAlgorithm.HashID().New() - _, err = io.Copy(hasher, data) - if err != nil { - return nil, "", fmt.Errorf("error reading data to sign: %s", err) - } - hash := hasher.Sum(nil) - - r, s, err := ecdsa.Sign(rand.Reader, k.PrivateKey, hash) - if err != nil { - return nil, "", fmt.Errorf("error producing signature: %s", err) - } - rBytes, sBytes := r.Bytes(), s.Bytes() - octetLength := (k.ecPublicKey.Params().BitSize + 7) >> 3 - // MUST include leading zeros in the output - rBuf := make([]byte, octetLength-len(rBytes), octetLength) - sBuf := make([]byte, octetLength-len(sBytes), octetLength) - - rBuf = append(rBuf, rBytes...) - sBuf = append(sBuf, sBytes...) - - signature = append(rBuf, sBuf...) - alg = k.signatureAlgorithm.HeaderParam() - - return -} - -// CryptoPrivateKey returns the internal object which can be used as a -// crypto.PublicKey for use with other standard library operations. The type -// is either *rsa.PublicKey or *ecdsa.PublicKey -func (k *ecPrivateKey) CryptoPrivateKey() crypto.PrivateKey { - return k.PrivateKey -} - -func (k *ecPrivateKey) toMap() map[string]interface{} { - jwk := k.ecPublicKey.toMap() - - dBytes := k.D.Bytes() - // The length of this octet string MUST be ceiling(log-base-2(n)/8) - // octets (where n is the order of the curve). This is because the private - // key d must be in the interval [1, n-1] so the bitlength of d should be - // no larger than the bitlength of n-1. The easiest way to find the octet - // length is to take bitlength(n-1), add 7 to force a carry, and shift this - // bit sequence right by 3, which is essentially dividing by 8 and adding - // 1 if there is any remainder. Thus, the private key value d should be - // output to (bitlength(n-1)+7)>>3 octets. - n := k.ecPublicKey.Params().N - octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 - // Create a buffer with the necessary zero-padding. - dBuf := make([]byte, octetLength-len(dBytes), octetLength) - dBuf = append(dBuf, dBytes...) - - jwk["d"] = joseBase64UrlEncode(dBuf) - - return jwk -} - -// MarshalJSON serializes this Private Key using the JWK JSON serialization format for -// elliptic curve keys. -func (k *ecPrivateKey) MarshalJSON() (data []byte, err error) { - return json.Marshal(k.toMap()) -} - -// PEMBlock serializes this Private Key to DER-encoded PKIX format. -func (k *ecPrivateKey) PEMBlock() (*pem.Block, error) { - derBytes, err := x509.MarshalECPrivateKey(k.PrivateKey) - if err != nil { - return nil, fmt.Errorf("unable to serialize EC PrivateKey to DER-encoded PKIX format: %s", err) - } - k.extended["keyID"] = k.KeyID() // For display purposes. - return createPemBlock("EC PRIVATE KEY", derBytes, k.extended) -} - -func ecPrivateKeyFromMap(jwk map[string]interface{}) (*ecPrivateKey, error) { - dB64Url, err := stringFromMap(jwk, "d") - if err != nil { - return nil, fmt.Errorf("JWK EC Private Key: %s", err) - } - - // JWK key type (kty) has already been determined to be "EC". - // Need to extract the public key information, then extract the private - // key value 'd'. - publicKey, err := ecPublicKeyFromMap(jwk) - if err != nil { - return nil, err - } - - d, err := parseECPrivateParam(dB64Url, publicKey.Curve) - if err != nil { - return nil, fmt.Errorf("JWK EC Private Key d-param: %s", err) - } - - key := &ecPrivateKey{ - ecPublicKey: *publicKey, - PrivateKey: &ecdsa.PrivateKey{ - PublicKey: *publicKey.PublicKey, - D: d, - }, - } - - return key, nil -} - -/* - * Key Generation Functions. - */ - -func generateECPrivateKey(curve elliptic.Curve) (k *ecPrivateKey, err error) { - k = new(ecPrivateKey) - k.PrivateKey, err = ecdsa.GenerateKey(curve, rand.Reader) - if err != nil { - return nil, err - } - - k.ecPublicKey.PublicKey = &k.PrivateKey.PublicKey - k.extended = make(map[string]interface{}) - - return -} - -// GenerateECP256PrivateKey generates a key pair using elliptic curve P-256. -func GenerateECP256PrivateKey() (PrivateKey, error) { - k, err := generateECPrivateKey(elliptic.P256()) - if err != nil { - return nil, fmt.Errorf("error generating EC P-256 key: %s", err) - } - - k.curveName = "P-256" - k.signatureAlgorithm = es256 - - return k, nil -} - -// GenerateECP384PrivateKey generates a key pair using elliptic curve P-384. -func GenerateECP384PrivateKey() (PrivateKey, error) { - k, err := generateECPrivateKey(elliptic.P384()) - if err != nil { - return nil, fmt.Errorf("error generating EC P-384 key: %s", err) - } - - k.curveName = "P-384" - k.signatureAlgorithm = es384 - - return k, nil -} - -// GenerateECP521PrivateKey generates aß key pair using elliptic curve P-521. -func GenerateECP521PrivateKey() (PrivateKey, error) { - k, err := generateECPrivateKey(elliptic.P521()) - if err != nil { - return nil, fmt.Errorf("error generating EC P-521 key: %s", err) - } - - k.curveName = "P-521" - k.signatureAlgorithm = es512 - - return k, nil -} diff --git a/vendor/github.com/docker/libtrust/filter.go b/vendor/github.com/docker/libtrust/filter.go deleted file mode 100644 index 5b2b4fca6f..0000000000 --- a/vendor/github.com/docker/libtrust/filter.go +++ /dev/null @@ -1,50 +0,0 @@ -package libtrust - -import ( - "path/filepath" -) - -// FilterByHosts filters the list of PublicKeys to only those which contain a -// 'hosts' pattern which matches the given host. If *includeEmpty* is true, -// then keys which do not specify any hosts are also returned. -func FilterByHosts(keys []PublicKey, host string, includeEmpty bool) ([]PublicKey, error) { - filtered := make([]PublicKey, 0, len(keys)) - - for _, pubKey := range keys { - var hosts []string - switch v := pubKey.GetExtendedField("hosts").(type) { - case []string: - hosts = v - case []interface{}: - for _, value := range v { - h, ok := value.(string) - if !ok { - continue - } - hosts = append(hosts, h) - } - } - - if len(hosts) == 0 { - if includeEmpty { - filtered = append(filtered, pubKey) - } - continue - } - - // Check if any hosts match pattern - for _, hostPattern := range hosts { - match, err := filepath.Match(hostPattern, host) - if err != nil { - return nil, err - } - - if match { - filtered = append(filtered, pubKey) - continue - } - } - } - - return filtered, nil -} diff --git a/vendor/github.com/docker/libtrust/hash.go b/vendor/github.com/docker/libtrust/hash.go deleted file mode 100644 index a2df787dd9..0000000000 --- a/vendor/github.com/docker/libtrust/hash.go +++ /dev/null @@ -1,56 +0,0 @@ -package libtrust - -import ( - "crypto" - _ "crypto/sha256" // Registrer SHA224 and SHA256 - _ "crypto/sha512" // Registrer SHA384 and SHA512 - "fmt" -) - -type signatureAlgorithm struct { - algHeaderParam string - hashID crypto.Hash -} - -func (h *signatureAlgorithm) HeaderParam() string { - return h.algHeaderParam -} - -func (h *signatureAlgorithm) HashID() crypto.Hash { - return h.hashID -} - -var ( - rs256 = &signatureAlgorithm{"RS256", crypto.SHA256} - rs384 = &signatureAlgorithm{"RS384", crypto.SHA384} - rs512 = &signatureAlgorithm{"RS512", crypto.SHA512} - es256 = &signatureAlgorithm{"ES256", crypto.SHA256} - es384 = &signatureAlgorithm{"ES384", crypto.SHA384} - es512 = &signatureAlgorithm{"ES512", crypto.SHA512} -) - -func rsaSignatureAlgorithmByName(alg string) (*signatureAlgorithm, error) { - switch { - case alg == "RS256": - return rs256, nil - case alg == "RS384": - return rs384, nil - case alg == "RS512": - return rs512, nil - default: - return nil, fmt.Errorf("RSA Digital Signature Algorithm %q not supported", alg) - } -} - -func rsaPKCS1v15SignatureAlgorithmForHashID(hashID crypto.Hash) *signatureAlgorithm { - switch { - case hashID == crypto.SHA512: - return rs512 - case hashID == crypto.SHA384: - return rs384 - case hashID == crypto.SHA256: - fallthrough - default: - return rs256 - } -} diff --git a/vendor/github.com/docker/libtrust/jsonsign.go b/vendor/github.com/docker/libtrust/jsonsign.go deleted file mode 100644 index cb2ca9a769..0000000000 --- a/vendor/github.com/docker/libtrust/jsonsign.go +++ /dev/null @@ -1,657 +0,0 @@ -package libtrust - -import ( - "bytes" - "crypto" - "crypto/x509" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "sort" - "time" - "unicode" -) - -var ( - // ErrInvalidSignContent is used when the content to be signed is invalid. - ErrInvalidSignContent = errors.New("invalid sign content") - - // ErrInvalidJSONContent is used when invalid json is encountered. - ErrInvalidJSONContent = errors.New("invalid json content") - - // ErrMissingSignatureKey is used when the specified signature key - // does not exist in the JSON content. - ErrMissingSignatureKey = errors.New("missing signature key") -) - -type jsHeader struct { - JWK PublicKey `json:"jwk,omitempty"` - Algorithm string `json:"alg"` - Chain []string `json:"x5c,omitempty"` -} - -type jsSignature struct { - Header jsHeader `json:"header"` - Signature string `json:"signature"` - Protected string `json:"protected,omitempty"` -} - -type jsSignaturesSorted []jsSignature - -func (jsbkid jsSignaturesSorted) Swap(i, j int) { jsbkid[i], jsbkid[j] = jsbkid[j], jsbkid[i] } -func (jsbkid jsSignaturesSorted) Len() int { return len(jsbkid) } - -func (jsbkid jsSignaturesSorted) Less(i, j int) bool { - ki, kj := jsbkid[i].Header.JWK.KeyID(), jsbkid[j].Header.JWK.KeyID() - si, sj := jsbkid[i].Signature, jsbkid[j].Signature - - if ki == kj { - return si < sj - } - - return ki < kj -} - -type signKey struct { - PrivateKey - Chain []*x509.Certificate -} - -// JSONSignature represents a signature of a json object. -type JSONSignature struct { - payload string - signatures []jsSignature - indent string - formatLength int - formatTail []byte -} - -func newJSONSignature() *JSONSignature { - return &JSONSignature{ - signatures: make([]jsSignature, 0, 1), - } -} - -// Payload returns the encoded payload of the signature. This -// payload should not be signed directly -func (js *JSONSignature) Payload() ([]byte, error) { - return joseBase64UrlDecode(js.payload) -} - -func (js *JSONSignature) protectedHeader() (string, error) { - protected := map[string]interface{}{ - "formatLength": js.formatLength, - "formatTail": joseBase64UrlEncode(js.formatTail), - "time": time.Now().UTC().Format(time.RFC3339), - } - protectedBytes, err := json.Marshal(protected) - if err != nil { - return "", err - } - - return joseBase64UrlEncode(protectedBytes), nil -} - -func (js *JSONSignature) signBytes(protectedHeader string) ([]byte, error) { - buf := make([]byte, len(js.payload)+len(protectedHeader)+1) - copy(buf, protectedHeader) - buf[len(protectedHeader)] = '.' - copy(buf[len(protectedHeader)+1:], js.payload) - return buf, nil -} - -// Sign adds a signature using the given private key. -func (js *JSONSignature) Sign(key PrivateKey) error { - protected, err := js.protectedHeader() - if err != nil { - return err - } - signBytes, err := js.signBytes(protected) - if err != nil { - return err - } - sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) - if err != nil { - return err - } - - js.signatures = append(js.signatures, jsSignature{ - Header: jsHeader{ - JWK: key.PublicKey(), - Algorithm: algorithm, - }, - Signature: joseBase64UrlEncode(sigBytes), - Protected: protected, - }) - - return nil -} - -// SignWithChain adds a signature using the given private key -// and setting the x509 chain. The public key of the first element -// in the chain must be the public key corresponding with the sign key. -func (js *JSONSignature) SignWithChain(key PrivateKey, chain []*x509.Certificate) error { - // Ensure key.Chain[0] is public key for key - //key.Chain.PublicKey - //key.PublicKey().CryptoPublicKey() - - // Verify chain - protected, err := js.protectedHeader() - if err != nil { - return err - } - signBytes, err := js.signBytes(protected) - if err != nil { - return err - } - sigBytes, algorithm, err := key.Sign(bytes.NewReader(signBytes), crypto.SHA256) - if err != nil { - return err - } - - header := jsHeader{ - Chain: make([]string, len(chain)), - Algorithm: algorithm, - } - - for i, cert := range chain { - header.Chain[i] = base64.StdEncoding.EncodeToString(cert.Raw) - } - - js.signatures = append(js.signatures, jsSignature{ - Header: header, - Signature: joseBase64UrlEncode(sigBytes), - Protected: protected, - }) - - return nil -} - -// Verify verifies all the signatures and returns the list of -// public keys used to sign. Any x509 chains are not checked. -func (js *JSONSignature) Verify() ([]PublicKey, error) { - keys := make([]PublicKey, len(js.signatures)) - for i, signature := range js.signatures { - signBytes, err := js.signBytes(signature.Protected) - if err != nil { - return nil, err - } - var publicKey PublicKey - if len(signature.Header.Chain) > 0 { - certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) - if err != nil { - return nil, err - } - cert, err := x509.ParseCertificate(certBytes) - if err != nil { - return nil, err - } - publicKey, err = FromCryptoPublicKey(cert.PublicKey) - if err != nil { - return nil, err - } - } else if signature.Header.JWK != nil { - publicKey = signature.Header.JWK - } else { - return nil, errors.New("missing public key") - } - - sigBytes, err := joseBase64UrlDecode(signature.Signature) - if err != nil { - return nil, err - } - - err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) - if err != nil { - return nil, err - } - - keys[i] = publicKey - } - return keys, nil -} - -// VerifyChains verifies all the signatures and the chains associated -// with each signature and returns the list of verified chains. -// Signatures without an x509 chain are not checked. -func (js *JSONSignature) VerifyChains(ca *x509.CertPool) ([][]*x509.Certificate, error) { - chains := make([][]*x509.Certificate, 0, len(js.signatures)) - for _, signature := range js.signatures { - signBytes, err := js.signBytes(signature.Protected) - if err != nil { - return nil, err - } - var publicKey PublicKey - if len(signature.Header.Chain) > 0 { - certBytes, err := base64.StdEncoding.DecodeString(signature.Header.Chain[0]) - if err != nil { - return nil, err - } - cert, err := x509.ParseCertificate(certBytes) - if err != nil { - return nil, err - } - publicKey, err = FromCryptoPublicKey(cert.PublicKey) - if err != nil { - return nil, err - } - intermediates := x509.NewCertPool() - if len(signature.Header.Chain) > 1 { - intermediateChain := signature.Header.Chain[1:] - for i := range intermediateChain { - certBytes, err := base64.StdEncoding.DecodeString(intermediateChain[i]) - if err != nil { - return nil, err - } - intermediate, err := x509.ParseCertificate(certBytes) - if err != nil { - return nil, err - } - intermediates.AddCert(intermediate) - } - } - - verifyOptions := x509.VerifyOptions{ - Intermediates: intermediates, - Roots: ca, - } - - verifiedChains, err := cert.Verify(verifyOptions) - if err != nil { - return nil, err - } - chains = append(chains, verifiedChains...) - - sigBytes, err := joseBase64UrlDecode(signature.Signature) - if err != nil { - return nil, err - } - - err = publicKey.Verify(bytes.NewReader(signBytes), signature.Header.Algorithm, sigBytes) - if err != nil { - return nil, err - } - } - - } - return chains, nil -} - -// JWS returns JSON serialized JWS according to -// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-7.2 -func (js *JSONSignature) JWS() ([]byte, error) { - if len(js.signatures) == 0 { - return nil, errors.New("missing signature") - } - - sort.Sort(jsSignaturesSorted(js.signatures)) - - jsonMap := map[string]interface{}{ - "payload": js.payload, - "signatures": js.signatures, - } - - return json.MarshalIndent(jsonMap, "", " ") -} - -func notSpace(r rune) bool { - return !unicode.IsSpace(r) -} - -func detectJSONIndent(jsonContent []byte) (indent string) { - if len(jsonContent) > 2 && jsonContent[0] == '{' && jsonContent[1] == '\n' { - quoteIndex := bytes.IndexRune(jsonContent[1:], '"') - if quoteIndex > 0 { - indent = string(jsonContent[2 : quoteIndex+1]) - } - } - return -} - -type jsParsedHeader struct { - JWK json.RawMessage `json:"jwk"` - Algorithm string `json:"alg"` - Chain []string `json:"x5c"` -} - -type jsParsedSignature struct { - Header jsParsedHeader `json:"header"` - Signature string `json:"signature"` - Protected string `json:"protected"` -} - -// ParseJWS parses a JWS serialized JSON object into a Json Signature. -func ParseJWS(content []byte) (*JSONSignature, error) { - type jsParsed struct { - Payload string `json:"payload"` - Signatures []jsParsedSignature `json:"signatures"` - } - parsed := &jsParsed{} - err := json.Unmarshal(content, parsed) - if err != nil { - return nil, err - } - if len(parsed.Signatures) == 0 { - return nil, errors.New("missing signatures") - } - payload, err := joseBase64UrlDecode(parsed.Payload) - if err != nil { - return nil, err - } - - js, err := NewJSONSignature(payload) - if err != nil { - return nil, err - } - js.signatures = make([]jsSignature, len(parsed.Signatures)) - for i, signature := range parsed.Signatures { - header := jsHeader{ - Algorithm: signature.Header.Algorithm, - } - if signature.Header.Chain != nil { - header.Chain = signature.Header.Chain - } - if signature.Header.JWK != nil { - publicKey, err := UnmarshalPublicKeyJWK([]byte(signature.Header.JWK)) - if err != nil { - return nil, err - } - header.JWK = publicKey - } - js.signatures[i] = jsSignature{ - Header: header, - Signature: signature.Signature, - Protected: signature.Protected, - } - } - - return js, nil -} - -// NewJSONSignature returns a new unsigned JWS from a json byte array. -// JSONSignature will need to be signed before serializing or storing. -// Optionally, one or more signatures can be provided as byte buffers, -// containing serialized JWS signatures, to assemble a fully signed JWS -// package. It is the callers responsibility to ensure uniqueness of the -// provided signatures. -func NewJSONSignature(content []byte, signatures ...[]byte) (*JSONSignature, error) { - var dataMap map[string]interface{} - err := json.Unmarshal(content, &dataMap) - if err != nil { - return nil, err - } - - js := newJSONSignature() - js.indent = detectJSONIndent(content) - - js.payload = joseBase64UrlEncode(content) - - // Find trailing } and whitespace, put in protected header - closeIndex := bytes.LastIndexFunc(content, notSpace) - if content[closeIndex] != '}' { - return nil, ErrInvalidJSONContent - } - lastRuneIndex := bytes.LastIndexFunc(content[:closeIndex], notSpace) - if content[lastRuneIndex] == ',' { - return nil, ErrInvalidJSONContent - } - js.formatLength = lastRuneIndex + 1 - js.formatTail = content[js.formatLength:] - - if len(signatures) > 0 { - for _, signature := range signatures { - var parsedJSig jsParsedSignature - - if err := json.Unmarshal(signature, &parsedJSig); err != nil { - return nil, err - } - - // TODO(stevvooe): A lot of the code below is repeated in - // ParseJWS. It will require more refactoring to fix that. - jsig := jsSignature{ - Header: jsHeader{ - Algorithm: parsedJSig.Header.Algorithm, - }, - Signature: parsedJSig.Signature, - Protected: parsedJSig.Protected, - } - - if parsedJSig.Header.Chain != nil { - jsig.Header.Chain = parsedJSig.Header.Chain - } - - if parsedJSig.Header.JWK != nil { - publicKey, err := UnmarshalPublicKeyJWK([]byte(parsedJSig.Header.JWK)) - if err != nil { - return nil, err - } - jsig.Header.JWK = publicKey - } - - js.signatures = append(js.signatures, jsig) - } - } - - return js, nil -} - -// NewJSONSignatureFromMap returns a new unsigned JSONSignature from a map or -// struct. JWS will need to be signed before serializing or storing. -func NewJSONSignatureFromMap(content interface{}) (*JSONSignature, error) { - switch content.(type) { - case map[string]interface{}: - case struct{}: - default: - return nil, errors.New("invalid data type") - } - - js := newJSONSignature() - js.indent = " " - - payload, err := json.MarshalIndent(content, "", js.indent) - if err != nil { - return nil, err - } - js.payload = joseBase64UrlEncode(payload) - - // Remove '\n}' from formatted section, put in protected header - js.formatLength = len(payload) - 2 - js.formatTail = payload[js.formatLength:] - - return js, nil -} - -func readIntFromMap(key string, m map[string]interface{}) (int, bool) { - value, ok := m[key] - if !ok { - return 0, false - } - switch v := value.(type) { - case int: - return v, true - case float64: - return int(v), true - default: - return 0, false - } -} - -func readStringFromMap(key string, m map[string]interface{}) (v string, ok bool) { - value, ok := m[key] - if !ok { - return "", false - } - v, ok = value.(string) - return -} - -// ParsePrettySignature parses a formatted signature into a -// JSON signature. If the signatures are missing the format information -// an error is thrown. The formatted signature must be created by -// the same method as format signature. -func ParsePrettySignature(content []byte, signatureKey string) (*JSONSignature, error) { - var contentMap map[string]json.RawMessage - err := json.Unmarshal(content, &contentMap) - if err != nil { - return nil, fmt.Errorf("error unmarshalling content: %s", err) - } - sigMessage, ok := contentMap[signatureKey] - if !ok { - return nil, ErrMissingSignatureKey - } - - var signatureBlocks []jsParsedSignature - err = json.Unmarshal([]byte(sigMessage), &signatureBlocks) - if err != nil { - return nil, fmt.Errorf("error unmarshalling signatures: %s", err) - } - - js := newJSONSignature() - js.signatures = make([]jsSignature, len(signatureBlocks)) - - for i, signatureBlock := range signatureBlocks { - protectedBytes, err := joseBase64UrlDecode(signatureBlock.Protected) - if err != nil { - return nil, fmt.Errorf("base64 decode error: %s", err) - } - var protectedHeader map[string]interface{} - err = json.Unmarshal(protectedBytes, &protectedHeader) - if err != nil { - return nil, fmt.Errorf("error unmarshalling protected header: %s", err) - } - - formatLength, ok := readIntFromMap("formatLength", protectedHeader) - if !ok { - return nil, errors.New("missing formatted length") - } - encodedTail, ok := readStringFromMap("formatTail", protectedHeader) - if !ok { - return nil, errors.New("missing formatted tail") - } - formatTail, err := joseBase64UrlDecode(encodedTail) - if err != nil { - return nil, fmt.Errorf("base64 decode error on tail: %s", err) - } - if js.formatLength == 0 { - js.formatLength = formatLength - } else if js.formatLength != formatLength { - return nil, errors.New("conflicting format length") - } - if len(js.formatTail) == 0 { - js.formatTail = formatTail - } else if bytes.Compare(js.formatTail, formatTail) != 0 { - return nil, errors.New("conflicting format tail") - } - - header := jsHeader{ - Algorithm: signatureBlock.Header.Algorithm, - Chain: signatureBlock.Header.Chain, - } - if signatureBlock.Header.JWK != nil { - publicKey, err := UnmarshalPublicKeyJWK([]byte(signatureBlock.Header.JWK)) - if err != nil { - return nil, fmt.Errorf("error unmarshalling public key: %s", err) - } - header.JWK = publicKey - } - js.signatures[i] = jsSignature{ - Header: header, - Signature: signatureBlock.Signature, - Protected: signatureBlock.Protected, - } - } - if js.formatLength > len(content) { - return nil, errors.New("invalid format length") - } - formatted := make([]byte, js.formatLength+len(js.formatTail)) - copy(formatted, content[:js.formatLength]) - copy(formatted[js.formatLength:], js.formatTail) - js.indent = detectJSONIndent(formatted) - js.payload = joseBase64UrlEncode(formatted) - - return js, nil -} - -// PrettySignature formats a json signature into an easy to read -// single json serialized object. -func (js *JSONSignature) PrettySignature(signatureKey string) ([]byte, error) { - if len(js.signatures) == 0 { - return nil, errors.New("no signatures") - } - payload, err := joseBase64UrlDecode(js.payload) - if err != nil { - return nil, err - } - payload = payload[:js.formatLength] - - sort.Sort(jsSignaturesSorted(js.signatures)) - - var marshalled []byte - var marshallErr error - if js.indent != "" { - marshalled, marshallErr = json.MarshalIndent(js.signatures, js.indent, js.indent) - } else { - marshalled, marshallErr = json.Marshal(js.signatures) - } - if marshallErr != nil { - return nil, marshallErr - } - - buf := bytes.NewBuffer(make([]byte, 0, len(payload)+len(marshalled)+34)) - buf.Write(payload) - buf.WriteByte(',') - if js.indent != "" { - buf.WriteByte('\n') - buf.WriteString(js.indent) - buf.WriteByte('"') - buf.WriteString(signatureKey) - buf.WriteString("\": ") - buf.Write(marshalled) - buf.WriteByte('\n') - } else { - buf.WriteByte('"') - buf.WriteString(signatureKey) - buf.WriteString("\":") - buf.Write(marshalled) - } - buf.WriteByte('}') - - return buf.Bytes(), nil -} - -// Signatures provides the signatures on this JWS as opaque blobs, sorted by -// keyID. These blobs can be stored and reassembled with payloads. Internally, -// they are simply marshaled json web signatures but implementations should -// not rely on this. -func (js *JSONSignature) Signatures() ([][]byte, error) { - sort.Sort(jsSignaturesSorted(js.signatures)) - - var sb [][]byte - for _, jsig := range js.signatures { - p, err := json.Marshal(jsig) - if err != nil { - return nil, err - } - - sb = append(sb, p) - } - - return sb, nil -} - -// Merge combines the signatures from one or more other signatures into the -// method receiver. If the payloads differ for any argument, an error will be -// returned and the receiver will not be modified. -func (js *JSONSignature) Merge(others ...*JSONSignature) error { - merged := js.signatures - for _, other := range others { - if js.payload != other.payload { - return fmt.Errorf("payloads differ from merge target") - } - merged = append(merged, other.signatures...) - } - - js.signatures = merged - return nil -} diff --git a/vendor/github.com/docker/libtrust/key.go b/vendor/github.com/docker/libtrust/key.go deleted file mode 100644 index 73642db2a8..0000000000 --- a/vendor/github.com/docker/libtrust/key.go +++ /dev/null @@ -1,253 +0,0 @@ -package libtrust - -import ( - "crypto" - "crypto/ecdsa" - "crypto/rsa" - "crypto/x509" - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "io" -) - -// PublicKey is a generic interface for a Public Key. -type PublicKey interface { - // KeyType returns the key type for this key. For elliptic curve keys, - // this value should be "EC". For RSA keys, this value should be "RSA". - KeyType() string - // KeyID returns a distinct identifier which is unique to this Public Key. - // The format generated by this library is a base32 encoding of a 240 bit - // hash of the public key data divided into 12 groups like so: - // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP - KeyID() string - // Verify verifyies the signature of the data in the io.Reader using this - // Public Key. The alg parameter should identify the digital signature - // algorithm which was used to produce the signature and should be - // supported by this public key. Returns a nil error if the signature - // is valid. - Verify(data io.Reader, alg string, signature []byte) error - // CryptoPublicKey returns the internal object which can be used as a - // crypto.PublicKey for use with other standard library operations. The type - // is either *rsa.PublicKey or *ecdsa.PublicKey - CryptoPublicKey() crypto.PublicKey - // These public keys can be serialized to the standard JSON encoding for - // JSON Web Keys. See section 6 of the IETF draft RFC for JOSE JSON Web - // Algorithms. - MarshalJSON() ([]byte, error) - // These keys can also be serialized to the standard PEM encoding. - PEMBlock() (*pem.Block, error) - // The string representation of a key is its key type and ID. - String() string - AddExtendedField(string, interface{}) - GetExtendedField(string) interface{} -} - -// PrivateKey is a generic interface for a Private Key. -type PrivateKey interface { - // A PrivateKey contains all fields and methods of a PublicKey of the - // same type. The MarshalJSON method also outputs the private key as a - // JSON Web Key, and the PEMBlock method outputs the private key as a - // PEM block. - PublicKey - // PublicKey returns the PublicKey associated with this PrivateKey. - PublicKey() PublicKey - // Sign signs the data read from the io.Reader using a signature algorithm - // supported by the private key. If the specified hashing algorithm is - // supported by this key, that hash function is used to generate the - // signature otherwise the the default hashing algorithm for this key is - // used. Returns the signature and identifier of the algorithm used. - Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) - // CryptoPrivateKey returns the internal object which can be used as a - // crypto.PublicKey for use with other standard library operations. The - // type is either *rsa.PublicKey or *ecdsa.PublicKey - CryptoPrivateKey() crypto.PrivateKey -} - -// FromCryptoPublicKey returns a libtrust PublicKey representation of the given -// *ecdsa.PublicKey or *rsa.PublicKey. Returns a non-nil error when the given -// key is of an unsupported type. -func FromCryptoPublicKey(cryptoPublicKey crypto.PublicKey) (PublicKey, error) { - switch cryptoPublicKey := cryptoPublicKey.(type) { - case *ecdsa.PublicKey: - return fromECPublicKey(cryptoPublicKey) - case *rsa.PublicKey: - return fromRSAPublicKey(cryptoPublicKey), nil - default: - return nil, fmt.Errorf("public key type %T is not supported", cryptoPublicKey) - } -} - -// FromCryptoPrivateKey returns a libtrust PrivateKey representation of the given -// *ecdsa.PrivateKey or *rsa.PrivateKey. Returns a non-nil error when the given -// key is of an unsupported type. -func FromCryptoPrivateKey(cryptoPrivateKey crypto.PrivateKey) (PrivateKey, error) { - switch cryptoPrivateKey := cryptoPrivateKey.(type) { - case *ecdsa.PrivateKey: - return fromECPrivateKey(cryptoPrivateKey) - case *rsa.PrivateKey: - return fromRSAPrivateKey(cryptoPrivateKey), nil - default: - return nil, fmt.Errorf("private key type %T is not supported", cryptoPrivateKey) - } -} - -// UnmarshalPublicKeyPEM parses the PEM encoded data and returns a libtrust -// PublicKey or an error if there is a problem with the encoding. -func UnmarshalPublicKeyPEM(data []byte) (PublicKey, error) { - pemBlock, _ := pem.Decode(data) - if pemBlock == nil { - return nil, errors.New("unable to find PEM encoded data") - } else if pemBlock.Type != "PUBLIC KEY" { - return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) - } - - return pubKeyFromPEMBlock(pemBlock) -} - -// UnmarshalPublicKeyPEMBundle parses the PEM encoded data as a bundle of -// PEM blocks appended one after the other and returns a slice of PublicKey -// objects that it finds. -func UnmarshalPublicKeyPEMBundle(data []byte) ([]PublicKey, error) { - pubKeys := []PublicKey{} - - for { - var pemBlock *pem.Block - pemBlock, data = pem.Decode(data) - if pemBlock == nil { - break - } else if pemBlock.Type != "PUBLIC KEY" { - return nil, fmt.Errorf("unable to get PublicKey from PEM type: %s", pemBlock.Type) - } - - pubKey, err := pubKeyFromPEMBlock(pemBlock) - if err != nil { - return nil, err - } - - pubKeys = append(pubKeys, pubKey) - } - - return pubKeys, nil -} - -// UnmarshalPrivateKeyPEM parses the PEM encoded data and returns a libtrust -// PrivateKey or an error if there is a problem with the encoding. -func UnmarshalPrivateKeyPEM(data []byte) (PrivateKey, error) { - pemBlock, _ := pem.Decode(data) - if pemBlock == nil { - return nil, errors.New("unable to find PEM encoded data") - } - - var key PrivateKey - - switch { - case pemBlock.Type == "RSA PRIVATE KEY": - rsaPrivateKey, err := x509.ParsePKCS1PrivateKey(pemBlock.Bytes) - if err != nil { - return nil, fmt.Errorf("unable to decode RSA Private Key PEM data: %s", err) - } - key = fromRSAPrivateKey(rsaPrivateKey) - case pemBlock.Type == "EC PRIVATE KEY": - ecPrivateKey, err := x509.ParseECPrivateKey(pemBlock.Bytes) - if err != nil { - return nil, fmt.Errorf("unable to decode EC Private Key PEM data: %s", err) - } - key, err = fromECPrivateKey(ecPrivateKey) - if err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("unable to get PrivateKey from PEM type: %s", pemBlock.Type) - } - - addPEMHeadersToKey(pemBlock, key.PublicKey()) - - return key, nil -} - -// UnmarshalPublicKeyJWK unmarshals the given JSON Web Key into a generic -// Public Key to be used with libtrust. -func UnmarshalPublicKeyJWK(data []byte) (PublicKey, error) { - jwk := make(map[string]interface{}) - - err := json.Unmarshal(data, &jwk) - if err != nil { - return nil, fmt.Errorf( - "decoding JWK Public Key JSON data: %s\n", err, - ) - } - - // Get the Key Type value. - kty, err := stringFromMap(jwk, "kty") - if err != nil { - return nil, fmt.Errorf("JWK Public Key type: %s", err) - } - - switch { - case kty == "EC": - // Call out to unmarshal EC public key. - return ecPublicKeyFromMap(jwk) - case kty == "RSA": - // Call out to unmarshal RSA public key. - return rsaPublicKeyFromMap(jwk) - default: - return nil, fmt.Errorf( - "JWK Public Key type not supported: %q\n", kty, - ) - } -} - -// UnmarshalPublicKeyJWKSet parses the JSON encoded data as a JSON Web Key Set -// and returns a slice of Public Key objects. -func UnmarshalPublicKeyJWKSet(data []byte) ([]PublicKey, error) { - rawKeys, err := loadJSONKeySetRaw(data) - if err != nil { - return nil, err - } - - pubKeys := make([]PublicKey, 0, len(rawKeys)) - - for _, rawKey := range rawKeys { - pubKey, err := UnmarshalPublicKeyJWK(rawKey) - if err != nil { - return nil, err - } - pubKeys = append(pubKeys, pubKey) - } - - return pubKeys, nil -} - -// UnmarshalPrivateKeyJWK unmarshals the given JSON Web Key into a generic -// Private Key to be used with libtrust. -func UnmarshalPrivateKeyJWK(data []byte) (PrivateKey, error) { - jwk := make(map[string]interface{}) - - err := json.Unmarshal(data, &jwk) - if err != nil { - return nil, fmt.Errorf( - "decoding JWK Private Key JSON data: %s\n", err, - ) - } - - // Get the Key Type value. - kty, err := stringFromMap(jwk, "kty") - if err != nil { - return nil, fmt.Errorf("JWK Private Key type: %s", err) - } - - switch { - case kty == "EC": - // Call out to unmarshal EC private key. - return ecPrivateKeyFromMap(jwk) - case kty == "RSA": - // Call out to unmarshal RSA private key. - return rsaPrivateKeyFromMap(jwk) - default: - return nil, fmt.Errorf( - "JWK Private Key type not supported: %q\n", kty, - ) - } -} diff --git a/vendor/github.com/docker/libtrust/key_files.go b/vendor/github.com/docker/libtrust/key_files.go deleted file mode 100644 index c526de5455..0000000000 --- a/vendor/github.com/docker/libtrust/key_files.go +++ /dev/null @@ -1,255 +0,0 @@ -package libtrust - -import ( - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "io/ioutil" - "os" - "strings" -) - -var ( - // ErrKeyFileDoesNotExist indicates that the private key file does not exist. - ErrKeyFileDoesNotExist = errors.New("key file does not exist") -) - -func readKeyFileBytes(filename string) ([]byte, error) { - data, err := ioutil.ReadFile(filename) - if err != nil { - if os.IsNotExist(err) { - err = ErrKeyFileDoesNotExist - } else { - err = fmt.Errorf("unable to read key file %s: %s", filename, err) - } - - return nil, err - } - - return data, nil -} - -/* - Loading and Saving of Public and Private Keys in either PEM or JWK format. -*/ - -// LoadKeyFile opens the given filename and attempts to read a Private Key -// encoded in either PEM or JWK format (if .json or .jwk file extension). -func LoadKeyFile(filename string) (PrivateKey, error) { - contents, err := readKeyFileBytes(filename) - if err != nil { - return nil, err - } - - var key PrivateKey - - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - key, err = UnmarshalPrivateKeyJWK(contents) - if err != nil { - return nil, fmt.Errorf("unable to decode private key JWK: %s", err) - } - } else { - key, err = UnmarshalPrivateKeyPEM(contents) - if err != nil { - return nil, fmt.Errorf("unable to decode private key PEM: %s", err) - } - } - - return key, nil -} - -// LoadPublicKeyFile opens the given filename and attempts to read a Public Key -// encoded in either PEM or JWK format (if .json or .jwk file extension). -func LoadPublicKeyFile(filename string) (PublicKey, error) { - contents, err := readKeyFileBytes(filename) - if err != nil { - return nil, err - } - - var key PublicKey - - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - key, err = UnmarshalPublicKeyJWK(contents) - if err != nil { - return nil, fmt.Errorf("unable to decode public key JWK: %s", err) - } - } else { - key, err = UnmarshalPublicKeyPEM(contents) - if err != nil { - return nil, fmt.Errorf("unable to decode public key PEM: %s", err) - } - } - - return key, nil -} - -// SaveKey saves the given key to a file using the provided filename. -// This process will overwrite any existing file at the provided location. -func SaveKey(filename string, key PrivateKey) error { - var encodedKey []byte - var err error - - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - // Encode in JSON Web Key format. - encodedKey, err = json.MarshalIndent(key, "", " ") - if err != nil { - return fmt.Errorf("unable to encode private key JWK: %s", err) - } - } else { - // Encode in PEM format. - pemBlock, err := key.PEMBlock() - if err != nil { - return fmt.Errorf("unable to encode private key PEM: %s", err) - } - encodedKey = pem.EncodeToMemory(pemBlock) - } - - err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0600)) - if err != nil { - return fmt.Errorf("unable to write private key file %s: %s", filename, err) - } - - return nil -} - -// SavePublicKey saves the given public key to the file. -func SavePublicKey(filename string, key PublicKey) error { - var encodedKey []byte - var err error - - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - // Encode in JSON Web Key format. - encodedKey, err = json.MarshalIndent(key, "", " ") - if err != nil { - return fmt.Errorf("unable to encode public key JWK: %s", err) - } - } else { - // Encode in PEM format. - pemBlock, err := key.PEMBlock() - if err != nil { - return fmt.Errorf("unable to encode public key PEM: %s", err) - } - encodedKey = pem.EncodeToMemory(pemBlock) - } - - err = ioutil.WriteFile(filename, encodedKey, os.FileMode(0644)) - if err != nil { - return fmt.Errorf("unable to write public key file %s: %s", filename, err) - } - - return nil -} - -// Public Key Set files - -type jwkSet struct { - Keys []json.RawMessage `json:"keys"` -} - -// LoadKeySetFile loads a key set -func LoadKeySetFile(filename string) ([]PublicKey, error) { - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - return loadJSONKeySetFile(filename) - } - - // Must be a PEM format file - return loadPEMKeySetFile(filename) -} - -func loadJSONKeySetRaw(data []byte) ([]json.RawMessage, error) { - if len(data) == 0 { - // This is okay, just return an empty slice. - return []json.RawMessage{}, nil - } - - keySet := jwkSet{} - - err := json.Unmarshal(data, &keySet) - if err != nil { - return nil, fmt.Errorf("unable to decode JSON Web Key Set: %s", err) - } - - return keySet.Keys, nil -} - -func loadJSONKeySetFile(filename string) ([]PublicKey, error) { - contents, err := readKeyFileBytes(filename) - if err != nil && err != ErrKeyFileDoesNotExist { - return nil, err - } - - return UnmarshalPublicKeyJWKSet(contents) -} - -func loadPEMKeySetFile(filename string) ([]PublicKey, error) { - data, err := readKeyFileBytes(filename) - if err != nil && err != ErrKeyFileDoesNotExist { - return nil, err - } - - return UnmarshalPublicKeyPEMBundle(data) -} - -// AddKeySetFile adds a key to a key set -func AddKeySetFile(filename string, key PublicKey) error { - if strings.HasSuffix(filename, ".json") || strings.HasSuffix(filename, ".jwk") { - return addKeySetJSONFile(filename, key) - } - - // Must be a PEM format file - return addKeySetPEMFile(filename, key) -} - -func addKeySetJSONFile(filename string, key PublicKey) error { - encodedKey, err := json.Marshal(key) - if err != nil { - return fmt.Errorf("unable to encode trusted client key: %s", err) - } - - contents, err := readKeyFileBytes(filename) - if err != nil && err != ErrKeyFileDoesNotExist { - return err - } - - rawEntries, err := loadJSONKeySetRaw(contents) - if err != nil { - return err - } - - rawEntries = append(rawEntries, json.RawMessage(encodedKey)) - entriesWrapper := jwkSet{Keys: rawEntries} - - encodedEntries, err := json.MarshalIndent(entriesWrapper, "", " ") - if err != nil { - return fmt.Errorf("unable to encode trusted client keys: %s", err) - } - - err = ioutil.WriteFile(filename, encodedEntries, os.FileMode(0644)) - if err != nil { - return fmt.Errorf("unable to write trusted client keys file %s: %s", filename, err) - } - - return nil -} - -func addKeySetPEMFile(filename string, key PublicKey) error { - // Encode to PEM, open file for appending, write PEM. - file, err := os.OpenFile(filename, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.FileMode(0644)) - if err != nil { - return fmt.Errorf("unable to open trusted client keys file %s: %s", filename, err) - } - defer file.Close() - - pemBlock, err := key.PEMBlock() - if err != nil { - return fmt.Errorf("unable to encoded trusted key: %s", err) - } - - _, err = file.Write(pem.EncodeToMemory(pemBlock)) - if err != nil { - return fmt.Errorf("unable to write trusted keys file: %s", err) - } - - return nil -} diff --git a/vendor/github.com/docker/libtrust/key_manager.go b/vendor/github.com/docker/libtrust/key_manager.go deleted file mode 100644 index 9a98ae3574..0000000000 --- a/vendor/github.com/docker/libtrust/key_manager.go +++ /dev/null @@ -1,175 +0,0 @@ -package libtrust - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "net" - "os" - "path" - "sync" -) - -// ClientKeyManager manages client keys on the filesystem -type ClientKeyManager struct { - key PrivateKey - clientFile string - clientDir string - - clientLock sync.RWMutex - clients []PublicKey - - configLock sync.Mutex - configs []*tls.Config -} - -// NewClientKeyManager loads a new manager from a set of key files -// and managed by the given private key. -func NewClientKeyManager(trustKey PrivateKey, clientFile, clientDir string) (*ClientKeyManager, error) { - m := &ClientKeyManager{ - key: trustKey, - clientFile: clientFile, - clientDir: clientDir, - } - if err := m.loadKeys(); err != nil { - return nil, err - } - // TODO Start watching file and directory - - return m, nil -} - -func (c *ClientKeyManager) loadKeys() (err error) { - // Load authorized keys file - var clients []PublicKey - if c.clientFile != "" { - clients, err = LoadKeySetFile(c.clientFile) - if err != nil { - return fmt.Errorf("unable to load authorized keys: %s", err) - } - } - - // Add clients from authorized keys directory - files, err := ioutil.ReadDir(c.clientDir) - if err != nil && !os.IsNotExist(err) { - return fmt.Errorf("unable to open authorized keys directory: %s", err) - } - for _, f := range files { - if !f.IsDir() { - publicKey, err := LoadPublicKeyFile(path.Join(c.clientDir, f.Name())) - if err != nil { - return fmt.Errorf("unable to load authorized key file: %s", err) - } - clients = append(clients, publicKey) - } - } - - c.clientLock.Lock() - c.clients = clients - c.clientLock.Unlock() - - return nil -} - -// RegisterTLSConfig registers a tls configuration to manager -// such that any changes to the keys may be reflected in -// the tls client CA pool -func (c *ClientKeyManager) RegisterTLSConfig(tlsConfig *tls.Config) error { - c.clientLock.RLock() - certPool, err := GenerateCACertPool(c.key, c.clients) - if err != nil { - return fmt.Errorf("CA pool generation error: %s", err) - } - c.clientLock.RUnlock() - - tlsConfig.ClientCAs = certPool - - c.configLock.Lock() - c.configs = append(c.configs, tlsConfig) - c.configLock.Unlock() - - return nil -} - -// NewIdentityAuthTLSConfig creates a tls.Config for the server to use for -// libtrust identity authentication for the domain specified -func NewIdentityAuthTLSConfig(trustKey PrivateKey, clients *ClientKeyManager, addr string, domain string) (*tls.Config, error) { - tlsConfig := newTLSConfig() - - tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert - if err := clients.RegisterTLSConfig(tlsConfig); err != nil { - return nil, err - } - - // Generate cert - ips, domains, err := parseAddr(addr) - if err != nil { - return nil, err - } - // add domain that it expects clients to use - domains = append(domains, domain) - x509Cert, err := GenerateSelfSignedServerCert(trustKey, domains, ips) - if err != nil { - return nil, fmt.Errorf("certificate generation error: %s", err) - } - tlsConfig.Certificates = []tls.Certificate{{ - Certificate: [][]byte{x509Cert.Raw}, - PrivateKey: trustKey.CryptoPrivateKey(), - Leaf: x509Cert, - }} - - return tlsConfig, nil -} - -// NewCertAuthTLSConfig creates a tls.Config for the server to use for -// certificate authentication -func NewCertAuthTLSConfig(caPath, certPath, keyPath string) (*tls.Config, error) { - tlsConfig := newTLSConfig() - - cert, err := tls.LoadX509KeyPair(certPath, keyPath) - if err != nil { - return nil, fmt.Errorf("Couldn't load X509 key pair (%s, %s): %s. Key encrypted?", certPath, keyPath, err) - } - tlsConfig.Certificates = []tls.Certificate{cert} - - // Verify client certificates against a CA? - if caPath != "" { - certPool := x509.NewCertPool() - file, err := ioutil.ReadFile(caPath) - if err != nil { - return nil, fmt.Errorf("Couldn't read CA certificate: %s", err) - } - certPool.AppendCertsFromPEM(file) - - tlsConfig.ClientAuth = tls.RequireAndVerifyClientCert - tlsConfig.ClientCAs = certPool - } - - return tlsConfig, nil -} - -func newTLSConfig() *tls.Config { - return &tls.Config{ - NextProtos: []string{"http/1.1"}, - // Avoid fallback on insecure SSL protocols - MinVersion: tls.VersionTLS10, - } -} - -// parseAddr parses an address into an array of IPs and domains -func parseAddr(addr string) ([]net.IP, []string, error) { - host, _, err := net.SplitHostPort(addr) - if err != nil { - return nil, nil, err - } - var domains []string - var ips []net.IP - ip := net.ParseIP(host) - if ip != nil { - ips = []net.IP{ip} - } else { - domains = []string{host} - } - return ips, domains, nil -} diff --git a/vendor/github.com/docker/libtrust/rsa_key.go b/vendor/github.com/docker/libtrust/rsa_key.go deleted file mode 100644 index dac4cacf20..0000000000 --- a/vendor/github.com/docker/libtrust/rsa_key.go +++ /dev/null @@ -1,427 +0,0 @@ -package libtrust - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "encoding/json" - "encoding/pem" - "errors" - "fmt" - "io" - "math/big" -) - -/* - * RSA DSA PUBLIC KEY - */ - -// rsaPublicKey implements a JWK Public Key using RSA digital signature algorithms. -type rsaPublicKey struct { - *rsa.PublicKey - extended map[string]interface{} -} - -func fromRSAPublicKey(cryptoPublicKey *rsa.PublicKey) *rsaPublicKey { - return &rsaPublicKey{cryptoPublicKey, map[string]interface{}{}} -} - -// KeyType returns the JWK key type for RSA keys, i.e., "RSA". -func (k *rsaPublicKey) KeyType() string { - return "RSA" -} - -// KeyID returns a distinct identifier which is unique to this Public Key. -func (k *rsaPublicKey) KeyID() string { - return keyIDFromCryptoKey(k) -} - -func (k *rsaPublicKey) String() string { - return fmt.Sprintf("RSA Public Key <%s>", k.KeyID()) -} - -// Verify verifyies the signature of the data in the io.Reader using this Public Key. -// The alg parameter should be the name of the JWA digital signature algorithm -// which was used to produce the signature and should be supported by this -// public key. Returns a nil error if the signature is valid. -func (k *rsaPublicKey) Verify(data io.Reader, alg string, signature []byte) error { - // Verify the signature of the given date, return non-nil error if valid. - sigAlg, err := rsaSignatureAlgorithmByName(alg) - if err != nil { - return fmt.Errorf("unable to verify Signature: %s", err) - } - - hasher := sigAlg.HashID().New() - _, err = io.Copy(hasher, data) - if err != nil { - return fmt.Errorf("error reading data to sign: %s", err) - } - hash := hasher.Sum(nil) - - err = rsa.VerifyPKCS1v15(k.PublicKey, sigAlg.HashID(), hash, signature) - if err != nil { - return fmt.Errorf("invalid %s signature: %s", sigAlg.HeaderParam(), err) - } - - return nil -} - -// CryptoPublicKey returns the internal object which can be used as a -// crypto.PublicKey for use with other standard library operations. The type -// is either *rsa.PublicKey or *ecdsa.PublicKey -func (k *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return k.PublicKey -} - -func (k *rsaPublicKey) toMap() map[string]interface{} { - jwk := make(map[string]interface{}) - for k, v := range k.extended { - jwk[k] = v - } - jwk["kty"] = k.KeyType() - jwk["kid"] = k.KeyID() - jwk["n"] = joseBase64UrlEncode(k.N.Bytes()) - jwk["e"] = joseBase64UrlEncode(serializeRSAPublicExponentParam(k.E)) - - return jwk -} - -// MarshalJSON serializes this Public Key using the JWK JSON serialization format for -// RSA keys. -func (k *rsaPublicKey) MarshalJSON() (data []byte, err error) { - return json.Marshal(k.toMap()) -} - -// PEMBlock serializes this Public Key to DER-encoded PKIX format. -func (k *rsaPublicKey) PEMBlock() (*pem.Block, error) { - derBytes, err := x509.MarshalPKIXPublicKey(k.PublicKey) - if err != nil { - return nil, fmt.Errorf("unable to serialize RSA PublicKey to DER-encoded PKIX format: %s", err) - } - k.extended["kid"] = k.KeyID() // For display purposes. - return createPemBlock("PUBLIC KEY", derBytes, k.extended) -} - -func (k *rsaPublicKey) AddExtendedField(field string, value interface{}) { - k.extended[field] = value -} - -func (k *rsaPublicKey) GetExtendedField(field string) interface{} { - v, ok := k.extended[field] - if !ok { - return nil - } - return v -} - -func rsaPublicKeyFromMap(jwk map[string]interface{}) (*rsaPublicKey, error) { - // JWK key type (kty) has already been determined to be "RSA". - // Need to extract 'n', 'e', and 'kid' and check for - // consistency. - - // Get the modulus parameter N. - nB64Url, err := stringFromMap(jwk, "n") - if err != nil { - return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) - } - - n, err := parseRSAModulusParam(nB64Url) - if err != nil { - return nil, fmt.Errorf("JWK RSA Public Key modulus: %s", err) - } - - // Get the public exponent E. - eB64Url, err := stringFromMap(jwk, "e") - if err != nil { - return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) - } - - e, err := parseRSAPublicExponentParam(eB64Url) - if err != nil { - return nil, fmt.Errorf("JWK RSA Public Key exponent: %s", err) - } - - key := &rsaPublicKey{ - PublicKey: &rsa.PublicKey{N: n, E: e}, - } - - // Key ID is optional, but if it exists, it should match the key. - _, ok := jwk["kid"] - if ok { - kid, err := stringFromMap(jwk, "kid") - if err != nil { - return nil, fmt.Errorf("JWK RSA Public Key ID: %s", err) - } - if kid != key.KeyID() { - return nil, fmt.Errorf("JWK RSA Public Key ID does not match: %s", kid) - } - } - - if _, ok := jwk["d"]; ok { - return nil, fmt.Errorf("JWK RSA Public Key cannot contain private exponent") - } - - key.extended = jwk - - return key, nil -} - -/* - * RSA DSA PRIVATE KEY - */ - -// rsaPrivateKey implements a JWK Private Key using RSA digital signature algorithms. -type rsaPrivateKey struct { - rsaPublicKey - *rsa.PrivateKey -} - -func fromRSAPrivateKey(cryptoPrivateKey *rsa.PrivateKey) *rsaPrivateKey { - return &rsaPrivateKey{ - *fromRSAPublicKey(&cryptoPrivateKey.PublicKey), - cryptoPrivateKey, - } -} - -// PublicKey returns the Public Key data associated with this Private Key. -func (k *rsaPrivateKey) PublicKey() PublicKey { - return &k.rsaPublicKey -} - -func (k *rsaPrivateKey) String() string { - return fmt.Sprintf("RSA Private Key <%s>", k.KeyID()) -} - -// Sign signs the data read from the io.Reader using a signature algorithm supported -// by the RSA private key. If the specified hashing algorithm is supported by -// this key, that hash function is used to generate the signature otherwise the -// the default hashing algorithm for this key is used. Returns the signature -// and the name of the JWK signature algorithm used, e.g., "RS256", "RS384", -// "RS512". -func (k *rsaPrivateKey) Sign(data io.Reader, hashID crypto.Hash) (signature []byte, alg string, err error) { - // Generate a signature of the data using the internal alg. - sigAlg := rsaPKCS1v15SignatureAlgorithmForHashID(hashID) - hasher := sigAlg.HashID().New() - - _, err = io.Copy(hasher, data) - if err != nil { - return nil, "", fmt.Errorf("error reading data to sign: %s", err) - } - hash := hasher.Sum(nil) - - signature, err = rsa.SignPKCS1v15(rand.Reader, k.PrivateKey, sigAlg.HashID(), hash) - if err != nil { - return nil, "", fmt.Errorf("error producing signature: %s", err) - } - - alg = sigAlg.HeaderParam() - - return -} - -// CryptoPrivateKey returns the internal object which can be used as a -// crypto.PublicKey for use with other standard library operations. The type -// is either *rsa.PublicKey or *ecdsa.PublicKey -func (k *rsaPrivateKey) CryptoPrivateKey() crypto.PrivateKey { - return k.PrivateKey -} - -func (k *rsaPrivateKey) toMap() map[string]interface{} { - k.Precompute() // Make sure the precomputed values are stored. - jwk := k.rsaPublicKey.toMap() - - jwk["d"] = joseBase64UrlEncode(k.D.Bytes()) - jwk["p"] = joseBase64UrlEncode(k.Primes[0].Bytes()) - jwk["q"] = joseBase64UrlEncode(k.Primes[1].Bytes()) - jwk["dp"] = joseBase64UrlEncode(k.Precomputed.Dp.Bytes()) - jwk["dq"] = joseBase64UrlEncode(k.Precomputed.Dq.Bytes()) - jwk["qi"] = joseBase64UrlEncode(k.Precomputed.Qinv.Bytes()) - - otherPrimes := k.Primes[2:] - - if len(otherPrimes) > 0 { - otherPrimesInfo := make([]interface{}, len(otherPrimes)) - for i, r := range otherPrimes { - otherPrimeInfo := make(map[string]string, 3) - otherPrimeInfo["r"] = joseBase64UrlEncode(r.Bytes()) - crtVal := k.Precomputed.CRTValues[i] - otherPrimeInfo["d"] = joseBase64UrlEncode(crtVal.Exp.Bytes()) - otherPrimeInfo["t"] = joseBase64UrlEncode(crtVal.Coeff.Bytes()) - otherPrimesInfo[i] = otherPrimeInfo - } - jwk["oth"] = otherPrimesInfo - } - - return jwk -} - -// MarshalJSON serializes this Private Key using the JWK JSON serialization format for -// RSA keys. -func (k *rsaPrivateKey) MarshalJSON() (data []byte, err error) { - return json.Marshal(k.toMap()) -} - -// PEMBlock serializes this Private Key to DER-encoded PKIX format. -func (k *rsaPrivateKey) PEMBlock() (*pem.Block, error) { - derBytes := x509.MarshalPKCS1PrivateKey(k.PrivateKey) - k.extended["keyID"] = k.KeyID() // For display purposes. - return createPemBlock("RSA PRIVATE KEY", derBytes, k.extended) -} - -func rsaPrivateKeyFromMap(jwk map[string]interface{}) (*rsaPrivateKey, error) { - // The JWA spec for RSA Private Keys (draft rfc section 5.3.2) states that - // only the private key exponent 'd' is REQUIRED, the others are just for - // signature/decryption optimizations and SHOULD be included when the JWK - // is produced. We MAY choose to accept a JWK which only includes 'd', but - // we're going to go ahead and not choose to accept it without the extra - // fields. Only the 'oth' field will be optional (for multi-prime keys). - privateExponent, err := parseRSAPrivateKeyParamFromMap(jwk, "d") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key exponent: %s", err) - } - firstPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "p") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) - } - secondPrimeFactor, err := parseRSAPrivateKeyParamFromMap(jwk, "q") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) - } - firstFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dp") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) - } - secondFactorCRT, err := parseRSAPrivateKeyParamFromMap(jwk, "dq") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) - } - crtCoeff, err := parseRSAPrivateKeyParamFromMap(jwk, "qi") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) - } - - var oth interface{} - if _, ok := jwk["oth"]; ok { - oth = jwk["oth"] - delete(jwk, "oth") - } - - // JWK key type (kty) has already been determined to be "RSA". - // Need to extract the public key information, then extract the private - // key values. - publicKey, err := rsaPublicKeyFromMap(jwk) - if err != nil { - return nil, err - } - - privateKey := &rsa.PrivateKey{ - PublicKey: *publicKey.PublicKey, - D: privateExponent, - Primes: []*big.Int{firstPrimeFactor, secondPrimeFactor}, - Precomputed: rsa.PrecomputedValues{ - Dp: firstFactorCRT, - Dq: secondFactorCRT, - Qinv: crtCoeff, - }, - } - - if oth != nil { - // Should be an array of more JSON objects. - otherPrimesInfo, ok := oth.([]interface{}) - if !ok { - return nil, errors.New("JWK RSA Private Key: Invalid other primes info: must be an array") - } - numOtherPrimeFactors := len(otherPrimesInfo) - if numOtherPrimeFactors == 0 { - return nil, errors.New("JWK RSA Privake Key: Invalid other primes info: must be absent or non-empty") - } - otherPrimeFactors := make([]*big.Int, numOtherPrimeFactors) - productOfPrimes := new(big.Int).Mul(firstPrimeFactor, secondPrimeFactor) - crtValues := make([]rsa.CRTValue, numOtherPrimeFactors) - - for i, val := range otherPrimesInfo { - otherPrimeinfo, ok := val.(map[string]interface{}) - if !ok { - return nil, errors.New("JWK RSA Private Key: Invalid other prime info: must be a JSON object") - } - - otherPrimeFactor, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "r") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key prime factor: %s", err) - } - otherFactorCRT, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "d") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key CRT exponent: %s", err) - } - otherCrtCoeff, err := parseRSAPrivateKeyParamFromMap(otherPrimeinfo, "t") - if err != nil { - return nil, fmt.Errorf("JWK RSA Private Key CRT coefficient: %s", err) - } - - crtValue := crtValues[i] - crtValue.Exp = otherFactorCRT - crtValue.Coeff = otherCrtCoeff - crtValue.R = productOfPrimes - otherPrimeFactors[i] = otherPrimeFactor - productOfPrimes = new(big.Int).Mul(productOfPrimes, otherPrimeFactor) - } - - privateKey.Primes = append(privateKey.Primes, otherPrimeFactors...) - privateKey.Precomputed.CRTValues = crtValues - } - - key := &rsaPrivateKey{ - rsaPublicKey: *publicKey, - PrivateKey: privateKey, - } - - return key, nil -} - -/* - * Key Generation Functions. - */ - -func generateRSAPrivateKey(bits int) (k *rsaPrivateKey, err error) { - k = new(rsaPrivateKey) - k.PrivateKey, err = rsa.GenerateKey(rand.Reader, bits) - if err != nil { - return nil, err - } - - k.rsaPublicKey.PublicKey = &k.PrivateKey.PublicKey - k.extended = make(map[string]interface{}) - - return -} - -// GenerateRSA2048PrivateKey generates a key pair using 2048-bit RSA. -func GenerateRSA2048PrivateKey() (PrivateKey, error) { - k, err := generateRSAPrivateKey(2048) - if err != nil { - return nil, fmt.Errorf("error generating RSA 2048-bit key: %s", err) - } - - return k, nil -} - -// GenerateRSA3072PrivateKey generates a key pair using 3072-bit RSA. -func GenerateRSA3072PrivateKey() (PrivateKey, error) { - k, err := generateRSAPrivateKey(3072) - if err != nil { - return nil, fmt.Errorf("error generating RSA 3072-bit key: %s", err) - } - - return k, nil -} - -// GenerateRSA4096PrivateKey generates a key pair using 4096-bit RSA. -func GenerateRSA4096PrivateKey() (PrivateKey, error) { - k, err := generateRSAPrivateKey(4096) - if err != nil { - return nil, fmt.Errorf("error generating RSA 4096-bit key: %s", err) - } - - return k, nil -} diff --git a/vendor/github.com/docker/libtrust/util.go b/vendor/github.com/docker/libtrust/util.go deleted file mode 100644 index a5a101d3f1..0000000000 --- a/vendor/github.com/docker/libtrust/util.go +++ /dev/null @@ -1,363 +0,0 @@ -package libtrust - -import ( - "bytes" - "crypto" - "crypto/elliptic" - "crypto/tls" - "crypto/x509" - "encoding/base32" - "encoding/base64" - "encoding/binary" - "encoding/pem" - "errors" - "fmt" - "math/big" - "net/url" - "os" - "path/filepath" - "strings" - "time" -) - -// LoadOrCreateTrustKey will load a PrivateKey from the specified path -func LoadOrCreateTrustKey(trustKeyPath string) (PrivateKey, error) { - if err := os.MkdirAll(filepath.Dir(trustKeyPath), 0700); err != nil { - return nil, err - } - - trustKey, err := LoadKeyFile(trustKeyPath) - if err == ErrKeyFileDoesNotExist { - trustKey, err = GenerateECP256PrivateKey() - if err != nil { - return nil, fmt.Errorf("error generating key: %s", err) - } - - if err := SaveKey(trustKeyPath, trustKey); err != nil { - return nil, fmt.Errorf("error saving key file: %s", err) - } - - dir, file := filepath.Split(trustKeyPath) - if err := SavePublicKey(filepath.Join(dir, "public-"+file), trustKey.PublicKey()); err != nil { - return nil, fmt.Errorf("error saving public key file: %s", err) - } - } else if err != nil { - return nil, fmt.Errorf("error loading key file: %s", err) - } - return trustKey, nil -} - -// NewIdentityAuthTLSClientConfig returns a tls.Config configured to use identity -// based authentication from the specified dockerUrl, the rootConfigPath and -// the server name to which it is connecting. -// If trustUnknownHosts is true it will automatically add the host to the -// known-hosts.json in rootConfigPath. -func NewIdentityAuthTLSClientConfig(dockerUrl string, trustUnknownHosts bool, rootConfigPath string, serverName string) (*tls.Config, error) { - tlsConfig := newTLSConfig() - - trustKeyPath := filepath.Join(rootConfigPath, "key.json") - knownHostsPath := filepath.Join(rootConfigPath, "known-hosts.json") - - u, err := url.Parse(dockerUrl) - if err != nil { - return nil, fmt.Errorf("unable to parse machine url") - } - - if u.Scheme == "unix" { - return nil, nil - } - - addr := u.Host - proto := "tcp" - - trustKey, err := LoadOrCreateTrustKey(trustKeyPath) - if err != nil { - return nil, fmt.Errorf("unable to load trust key: %s", err) - } - - knownHosts, err := LoadKeySetFile(knownHostsPath) - if err != nil { - return nil, fmt.Errorf("could not load trusted hosts file: %s", err) - } - - allowedHosts, err := FilterByHosts(knownHosts, addr, false) - if err != nil { - return nil, fmt.Errorf("error filtering hosts: %s", err) - } - - certPool, err := GenerateCACertPool(trustKey, allowedHosts) - if err != nil { - return nil, fmt.Errorf("Could not create CA pool: %s", err) - } - - tlsConfig.ServerName = serverName - tlsConfig.RootCAs = certPool - - x509Cert, err := GenerateSelfSignedClientCert(trustKey) - if err != nil { - return nil, fmt.Errorf("certificate generation error: %s", err) - } - - tlsConfig.Certificates = []tls.Certificate{{ - Certificate: [][]byte{x509Cert.Raw}, - PrivateKey: trustKey.CryptoPrivateKey(), - Leaf: x509Cert, - }} - - tlsConfig.InsecureSkipVerify = true - - testConn, err := tls.Dial(proto, addr, tlsConfig) - if err != nil { - return nil, fmt.Errorf("tls Handshake error: %s", err) - } - - opts := x509.VerifyOptions{ - Roots: tlsConfig.RootCAs, - CurrentTime: time.Now(), - DNSName: tlsConfig.ServerName, - Intermediates: x509.NewCertPool(), - } - - certs := testConn.ConnectionState().PeerCertificates - for i, cert := range certs { - if i == 0 { - continue - } - opts.Intermediates.AddCert(cert) - } - - if _, err := certs[0].Verify(opts); err != nil { - if _, ok := err.(x509.UnknownAuthorityError); ok { - if trustUnknownHosts { - pubKey, err := FromCryptoPublicKey(certs[0].PublicKey) - if err != nil { - return nil, fmt.Errorf("error extracting public key from cert: %s", err) - } - - pubKey.AddExtendedField("hosts", []string{addr}) - - if err := AddKeySetFile(knownHostsPath, pubKey); err != nil { - return nil, fmt.Errorf("error adding machine to known hosts: %s", err) - } - } else { - return nil, fmt.Errorf("unable to connect. unknown host: %s", addr) - } - } - } - - testConn.Close() - tlsConfig.InsecureSkipVerify = false - - return tlsConfig, nil -} - -// joseBase64UrlEncode encodes the given data using the standard base64 url -// encoding format but with all trailing '=' characters omitted in accordance -// with the jose specification. -// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 -func joseBase64UrlEncode(b []byte) string { - return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") -} - -// joseBase64UrlDecode decodes the given string using the standard base64 url -// decoder but first adds the appropriate number of trailing '=' characters in -// accordance with the jose specification. -// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 -func joseBase64UrlDecode(s string) ([]byte, error) { - s = strings.Replace(s, "\n", "", -1) - s = strings.Replace(s, " ", "", -1) - switch len(s) % 4 { - case 0: - case 2: - s += "==" - case 3: - s += "=" - default: - return nil, errors.New("illegal base64url string") - } - return base64.URLEncoding.DecodeString(s) -} - -func keyIDEncode(b []byte) string { - s := strings.TrimRight(base32.StdEncoding.EncodeToString(b), "=") - var buf bytes.Buffer - var i int - for i = 0; i < len(s)/4-1; i++ { - start := i * 4 - end := start + 4 - buf.WriteString(s[start:end] + ":") - } - buf.WriteString(s[i*4:]) - return buf.String() -} - -func keyIDFromCryptoKey(pubKey PublicKey) string { - // Generate and return a 'libtrust' fingerprint of the public key. - // For an RSA key this should be: - // SHA256(DER encoded ASN1) - // Then truncated to 240 bits and encoded into 12 base32 groups like so: - // ABCD:EFGH:IJKL:MNOP:QRST:UVWX:YZ23:4567:ABCD:EFGH:IJKL:MNOP - derBytes, err := x509.MarshalPKIXPublicKey(pubKey.CryptoPublicKey()) - if err != nil { - return "" - } - hasher := crypto.SHA256.New() - hasher.Write(derBytes) - return keyIDEncode(hasher.Sum(nil)[:30]) -} - -func stringFromMap(m map[string]interface{}, key string) (string, error) { - val, ok := m[key] - if !ok { - return "", fmt.Errorf("%q value not specified", key) - } - - str, ok := val.(string) - if !ok { - return "", fmt.Errorf("%q value must be a string", key) - } - delete(m, key) - - return str, nil -} - -func parseECCoordinate(cB64Url string, curve elliptic.Curve) (*big.Int, error) { - curveByteLen := (curve.Params().BitSize + 7) >> 3 - - cBytes, err := joseBase64UrlDecode(cB64Url) - if err != nil { - return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) - } - cByteLength := len(cBytes) - if cByteLength != curveByteLen { - return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", cByteLength, curveByteLen) - } - return new(big.Int).SetBytes(cBytes), nil -} - -func parseECPrivateParam(dB64Url string, curve elliptic.Curve) (*big.Int, error) { - dBytes, err := joseBase64UrlDecode(dB64Url) - if err != nil { - return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) - } - - // The length of this octet string MUST be ceiling(log-base-2(n)/8) - // octets (where n is the order of the curve). This is because the private - // key d must be in the interval [1, n-1] so the bitlength of d should be - // no larger than the bitlength of n-1. The easiest way to find the octet - // length is to take bitlength(n-1), add 7 to force a carry, and shift this - // bit sequence right by 3, which is essentially dividing by 8 and adding - // 1 if there is any remainder. Thus, the private key value d should be - // output to (bitlength(n-1)+7)>>3 octets. - n := curve.Params().N - octetLength := (new(big.Int).Sub(n, big.NewInt(1)).BitLen() + 7) >> 3 - dByteLength := len(dBytes) - - if dByteLength != octetLength { - return nil, fmt.Errorf("invalid number of octets: got %d, should be %d", dByteLength, octetLength) - } - - return new(big.Int).SetBytes(dBytes), nil -} - -func parseRSAModulusParam(nB64Url string) (*big.Int, error) { - nBytes, err := joseBase64UrlDecode(nB64Url) - if err != nil { - return nil, fmt.Errorf("invalid base64 URL encoding: %s", err) - } - - return new(big.Int).SetBytes(nBytes), nil -} - -func serializeRSAPublicExponentParam(e int) []byte { - // We MUST use the minimum number of octets to represent E. - // E is supposed to be 65537 for performance and security reasons - // and is what golang's rsa package generates, but it might be - // different if imported from some other generator. - buf := make([]byte, 4) - binary.BigEndian.PutUint32(buf, uint32(e)) - var i int - for i = 0; i < 8; i++ { - if buf[i] != 0 { - break - } - } - return buf[i:] -} - -func parseRSAPublicExponentParam(eB64Url string) (int, error) { - eBytes, err := joseBase64UrlDecode(eB64Url) - if err != nil { - return 0, fmt.Errorf("invalid base64 URL encoding: %s", err) - } - // Only the minimum number of bytes were used to represent E, but - // binary.BigEndian.Uint32 expects at least 4 bytes, so we need - // to add zero padding if necassary. - byteLen := len(eBytes) - buf := make([]byte, 4-byteLen, 4) - eBytes = append(buf, eBytes...) - - return int(binary.BigEndian.Uint32(eBytes)), nil -} - -func parseRSAPrivateKeyParamFromMap(m map[string]interface{}, key string) (*big.Int, error) { - b64Url, err := stringFromMap(m, key) - if err != nil { - return nil, err - } - - paramBytes, err := joseBase64UrlDecode(b64Url) - if err != nil { - return nil, fmt.Errorf("invaled base64 URL encoding: %s", err) - } - - return new(big.Int).SetBytes(paramBytes), nil -} - -func createPemBlock(name string, derBytes []byte, headers map[string]interface{}) (*pem.Block, error) { - pemBlock := &pem.Block{Type: name, Bytes: derBytes, Headers: map[string]string{}} - for k, v := range headers { - switch val := v.(type) { - case string: - pemBlock.Headers[k] = val - case []string: - if k == "hosts" { - pemBlock.Headers[k] = strings.Join(val, ",") - } else { - // Return error, non-encodable type - } - default: - // Return error, non-encodable type - } - } - - return pemBlock, nil -} - -func pubKeyFromPEMBlock(pemBlock *pem.Block) (PublicKey, error) { - cryptoPublicKey, err := x509.ParsePKIXPublicKey(pemBlock.Bytes) - if err != nil { - return nil, fmt.Errorf("unable to decode Public Key PEM data: %s", err) - } - - pubKey, err := FromCryptoPublicKey(cryptoPublicKey) - if err != nil { - return nil, err - } - - addPEMHeadersToKey(pemBlock, pubKey) - - return pubKey, nil -} - -func addPEMHeadersToKey(pemBlock *pem.Block, pubKey PublicKey) { - for key, value := range pemBlock.Headers { - var safeVal interface{} - if key == "hosts" { - safeVal = strings.Split(value, ",") - } else { - safeVal = value - } - pubKey.AddExtendedField(key, safeVal) - } -} diff --git a/vendor/github.com/docker/swarmkit/LICENSE b/vendor/github.com/docker/swarmkit/LICENSE new file mode 100644 index 0000000000..e2db6ed114 --- /dev/null +++ b/vendor/github.com/docker/swarmkit/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2018 Docker Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/swarmkit/agent/exec/controller.go b/vendor/github.com/docker/swarmkit/agent/exec/controller.go new file mode 100644 index 0000000000..c9e9343fd7 --- /dev/null +++ b/vendor/github.com/docker/swarmkit/agent/exec/controller.go @@ -0,0 +1,362 @@ +package exec + +import ( + "fmt" + "time" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/equality" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/protobuf/ptypes" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/net/context" +) + +// Controller controls execution of a task. +type Controller interface { + // Update the task definition seen by the controller. Will return + // ErrTaskUpdateFailed if the provided task definition changes fields that + // cannot be changed. + // + // Will be ignored if the task has exited. + Update(ctx context.Context, t *api.Task) error + + // Prepare the task for execution. This should ensure that all resources + // are created such that a call to start should execute immediately. + Prepare(ctx context.Context) error + + // Start the target and return when it has started successfully. + Start(ctx context.Context) error + + // Wait blocks until the target has exited. + Wait(ctx context.Context) error + + // Shutdown requests to exit the target gracefully. + Shutdown(ctx context.Context) error + + // Terminate the target. + Terminate(ctx context.Context) error + + // Remove all resources allocated by the controller. + Remove(ctx context.Context) error + + // Close closes any ephemeral resources associated with controller instance. + Close() error +} + +// ControllerLogs defines a component that makes logs accessible. +// +// Can usually be accessed on a controller instance via type assertion. +type ControllerLogs interface { + // Logs will write publisher until the context is cancelled or an error + // occurs. + Logs(ctx context.Context, publisher LogPublisher, options api.LogSubscriptionOptions) error +} + +// LogPublisher defines the protocol for receiving a log message. +type LogPublisher interface { + Publish(ctx context.Context, message api.LogMessage) error +} + +// LogPublisherFunc implements publisher with just a function. +type LogPublisherFunc func(ctx context.Context, message api.LogMessage) error + +// Publish calls the wrapped function. +func (fn LogPublisherFunc) Publish(ctx context.Context, message api.LogMessage) error { + return fn(ctx, message) +} + +// LogPublisherProvider defines the protocol for receiving a log publisher +type LogPublisherProvider interface { + Publisher(ctx context.Context, subscriptionID string) (LogPublisher, func(), error) +} + +// ContainerStatuser reports status of a container. +// +// This can be implemented by controllers or error types. +type ContainerStatuser interface { + // ContainerStatus returns the status of the target container, if + // available. When the container is not available, the status will be nil. + ContainerStatus(ctx context.Context) (*api.ContainerStatus, error) +} + +// PortStatuser reports status of ports which are allocated by the executor +type PortStatuser interface { + // PortStatus returns the status on a list of PortConfigs + // which are managed at the host level by the controller. + PortStatus(ctx context.Context) (*api.PortStatus, error) +} + +// Resolve attempts to get a controller from the executor and reports the +// correct status depending on the tasks current state according to the result. +// +// Unlike Do, if an error is returned, the status should still be reported. The +// error merely reports the failure at getting the controller. +func Resolve(ctx context.Context, task *api.Task, executor Executor) (Controller, *api.TaskStatus, error) { + status := task.Status.Copy() + + defer func() { + logStateChange(ctx, task.DesiredState, task.Status.State, status.State) + }() + + ctlr, err := executor.Controller(task) + + // depending on the tasks state, a failed controller resolution has varying + // impact. The following expresses that impact. + if err != nil { + status.Message = "resolving controller failed" + status.Err = err.Error() + // before the task has been started, we consider it a rejection. + // if task is running, consider the task has failed + // otherwise keep the existing state + if task.Status.State < api.TaskStateStarting { + status.State = api.TaskStateRejected + } else if task.Status.State <= api.TaskStateRunning { + status.State = api.TaskStateFailed + } + } else if task.Status.State < api.TaskStateAccepted { + // we always want to proceed to accepted when we resolve the controller + status.Message = "accepted" + status.State = api.TaskStateAccepted + status.Err = "" + } + + return ctlr, status, err +} + +// Do progresses the task state using the controller performing a single +// operation on the controller. The return TaskStatus should be marked as the +// new state of the task. +// +// The returned status should be reported and placed back on to task +// before the next call. The operation can be cancelled by creating a +// cancelling context. +// +// Errors from the task controller will reported on the returned status. Any +// errors coming from this function should not be reported as related to the +// individual task. +// +// If ErrTaskNoop is returned, it means a second call to Do will result in no +// change. If ErrTaskDead is returned, calls to Do will no longer result in any +// action. +func Do(ctx context.Context, task *api.Task, ctlr Controller) (*api.TaskStatus, error) { + status := task.Status.Copy() + + // stay in the current state. + noop := func(errs ...error) (*api.TaskStatus, error) { + return status, ErrTaskNoop + } + + retry := func() (*api.TaskStatus, error) { + // while we retry on all errors, this allows us to explicitly declare + // retry cases. + return status, ErrTaskRetry + } + + // transition moves the task to the next state. + transition := func(state api.TaskState, msg string) (*api.TaskStatus, error) { + current := status.State + status.State = state + status.Message = msg + status.Err = "" + + if current > state { + panic("invalid state transition") + } + return status, nil + } + + // containerStatus exitCode keeps track of whether or not we've set it in + // this particular method. Eventually, we assemble this as part of a defer. + var ( + containerStatus *api.ContainerStatus + portStatus *api.PortStatus + exitCode int + ) + + // returned when a fatal execution of the task is fatal. In this case, we + // proceed to a terminal error state and set the appropriate fields. + // + // Common checks for the nature of an error should be included here. If the + // error is determined not to be fatal for the task, + fatal := func(err error) (*api.TaskStatus, error) { + if err == nil { + panic("err must not be nil when fatal") + } + + if cs, ok := err.(ContainerStatuser); ok { + var err error + containerStatus, err = cs.ContainerStatus(ctx) + if err != nil && !contextDoneError(err) { + log.G(ctx).WithError(err).Error("error resolving container status on fatal") + } + } + + // make sure we've set the *correct* exit code + if ec, ok := err.(ExitCoder); ok { + exitCode = ec.ExitCode() + } + + if cause := errors.Cause(err); cause == context.DeadlineExceeded || cause == context.Canceled { + return retry() + } + + status.Err = err.Error() // still reported on temporary + if IsTemporary(err) { + return retry() + } + + // only at this point do we consider the error fatal to the task. + log.G(ctx).WithError(err).Error("fatal task error") + + // NOTE(stevvooe): The following switch dictates the terminal failure + // state based on the state in which the failure was encountered. + switch { + case status.State < api.TaskStateStarting: + status.State = api.TaskStateRejected + case status.State >= api.TaskStateStarting: + status.State = api.TaskStateFailed + } + + return status, nil + } + + // below, we have several callbacks that are run after the state transition + // is completed. + defer func() { + logStateChange(ctx, task.DesiredState, task.Status.State, status.State) + + if !equality.TaskStatusesEqualStable(status, &task.Status) { + status.Timestamp = ptypes.MustTimestampProto(time.Now()) + } + }() + + // extract the container status from the container, if supported. + defer func() { + // only do this if in an active state + if status.State < api.TaskStateStarting { + return + } + + if containerStatus == nil { + // collect this, if we haven't + cctlr, ok := ctlr.(ContainerStatuser) + if !ok { + return + } + + var err error + containerStatus, err = cctlr.ContainerStatus(ctx) + if err != nil && !contextDoneError(err) { + log.G(ctx).WithError(err).Error("container status unavailable") + } + + // at this point, things have gone fairly wrong. Remain positive + // and let's get something out the door. + if containerStatus == nil { + containerStatus = new(api.ContainerStatus) + containerStatusTask := task.Status.GetContainer() + if containerStatusTask != nil { + *containerStatus = *containerStatusTask // copy it over. + } + } + } + + // at this point, we *must* have a containerStatus. + if exitCode != 0 { + containerStatus.ExitCode = int32(exitCode) + } + + status.RuntimeStatus = &api.TaskStatus_Container{ + Container: containerStatus, + } + + if portStatus == nil { + pctlr, ok := ctlr.(PortStatuser) + if !ok { + return + } + + var err error + portStatus, err = pctlr.PortStatus(ctx) + if err != nil && !contextDoneError(err) { + log.G(ctx).WithError(err).Error("container port status unavailable") + } + } + + status.PortStatus = portStatus + }() + + // this branch bounds the largest state achievable in the agent as SHUTDOWN, which + // is exactly the correct behavior for the agent. + if task.DesiredState >= api.TaskStateShutdown { + if status.State >= api.TaskStateCompleted { + return noop() + } + + if err := ctlr.Shutdown(ctx); err != nil { + return fatal(err) + } + + return transition(api.TaskStateShutdown, "shutdown") + } + + if status.State > task.DesiredState { + return noop() // way beyond desired state, pause + } + + // the following states may proceed past desired state. + switch status.State { + case api.TaskStatePreparing: + if err := ctlr.Prepare(ctx); err != nil && err != ErrTaskPrepared { + return fatal(err) + } + + return transition(api.TaskStateReady, "prepared") + case api.TaskStateStarting: + if err := ctlr.Start(ctx); err != nil && err != ErrTaskStarted { + return fatal(err) + } + + return transition(api.TaskStateRunning, "started") + case api.TaskStateRunning: + if err := ctlr.Wait(ctx); err != nil { + return fatal(err) + } + + return transition(api.TaskStateCompleted, "finished") + } + + // The following represent "pause" states. We can only proceed when the + // desired state is beyond our current state. + if status.State >= task.DesiredState { + return noop() + } + + switch status.State { + case api.TaskStateNew, api.TaskStatePending, api.TaskStateAssigned: + return transition(api.TaskStateAccepted, "accepted") + case api.TaskStateAccepted: + return transition(api.TaskStatePreparing, "preparing") + case api.TaskStateReady: + return transition(api.TaskStateStarting, "starting") + default: // terminal states + return noop() + } +} + +func logStateChange(ctx context.Context, desired, previous, next api.TaskState) { + if previous != next { + fields := logrus.Fields{ + "state.transition": fmt.Sprintf("%v->%v", previous, next), + "state.desired": desired, + } + log.G(ctx).WithFields(fields).Debug("state changed") + } +} + +func contextDoneError(err error) bool { + cause := errors.Cause(err) + return cause == context.Canceled || cause == context.DeadlineExceeded +} diff --git a/vendor/github.com/docker/swarmkit/agent/exec/controller_stub.go b/vendor/github.com/docker/swarmkit/agent/exec/controller_stub.go new file mode 100644 index 0000000000..076955ff80 --- /dev/null +++ b/vendor/github.com/docker/swarmkit/agent/exec/controller_stub.go @@ -0,0 +1,75 @@ +package exec + +import ( + "github.com/docker/swarmkit/api" + "golang.org/x/net/context" + "runtime" + "strings" +) + +// StubController implements the Controller interface, +// but allows you to specify behaviors for each of its methods. +type StubController struct { + Controller + UpdateFn func(ctx context.Context, t *api.Task) error + PrepareFn func(ctx context.Context) error + StartFn func(ctx context.Context) error + WaitFn func(ctx context.Context) error + ShutdownFn func(ctx context.Context) error + TerminateFn func(ctx context.Context) error + RemoveFn func(ctx context.Context) error + CloseFn func() error + calls map[string]int + cstatus *api.ContainerStatus +} + +// NewStubController returns an initialized StubController +func NewStubController() *StubController { + return &StubController{ + calls: make(map[string]int), + } +} + +// If function A calls updateCountsForSelf, +// The callCount[A] value will be incremented +func (sc *StubController) called() { + pc, _, _, ok := runtime.Caller(1) + if !ok { + panic("Failed to find caller of function") + } + // longName looks like 'github.com/docker/swarmkit/agent/exec.(*StubController).Prepare:1' + longName := runtime.FuncForPC(pc).Name() + parts := strings.Split(longName, ".") + tail := strings.Split(parts[len(parts)-1], ":") + sc.calls[tail[0]]++ +} + +// Update is part of the Controller interface +func (sc *StubController) Update(ctx context.Context, t *api.Task) error { + sc.called() + return sc.UpdateFn(ctx, t) +} + +// Prepare is part of the Controller interface +func (sc *StubController) Prepare(ctx context.Context) error { sc.called(); return sc.PrepareFn(ctx) } + +// Start is part of the Controller interface +func (sc *StubController) Start(ctx context.Context) error { sc.called(); return sc.StartFn(ctx) } + +// Wait is part of the Controller interface +func (sc *StubController) Wait(ctx context.Context) error { sc.called(); return sc.WaitFn(ctx) } + +// Shutdown is part of the Controller interface +func (sc *StubController) Shutdown(ctx context.Context) error { sc.called(); return sc.ShutdownFn(ctx) } + +// Terminate is part of the Controller interface +func (sc *StubController) Terminate(ctx context.Context) error { + sc.called() + return sc.TerminateFn(ctx) +} + +// Remove is part of the Controller interface +func (sc *StubController) Remove(ctx context.Context) error { sc.called(); return sc.RemoveFn(ctx) } + +// Close is part of the Controller interface +func (sc *StubController) Close() error { sc.called(); return sc.CloseFn() } diff --git a/vendor/github.com/docker/swarmkit/agent/exec/errors.go b/vendor/github.com/docker/swarmkit/agent/exec/errors.go new file mode 100644 index 0000000000..af57e6b322 --- /dev/null +++ b/vendor/github.com/docker/swarmkit/agent/exec/errors.go @@ -0,0 +1,82 @@ +package exec + +import "github.com/pkg/errors" + +var ( + // ErrRuntimeUnsupported encountered when a task requires a runtime + // unsupported by the executor. + ErrRuntimeUnsupported = errors.New("exec: unsupported runtime") + + // ErrTaskPrepared is called if the task is already prepared. + ErrTaskPrepared = errors.New("exec: task already prepared") + + // ErrTaskStarted can be returned from any operation that cannot be + // performed because the task has already been started. This does not imply + // that the task is running but rather that it is no longer valid to call + // Start. + ErrTaskStarted = errors.New("exec: task already started") + + // ErrTaskUpdateRejected is returned if a task update is rejected by a controller. + ErrTaskUpdateRejected = errors.New("exec: task update rejected") + + // ErrControllerClosed returned when a task controller has been closed. + ErrControllerClosed = errors.New("exec: controller closed") + + // ErrTaskRetry is returned by Do when an operation failed by should be + // retried. The status should still be reported in this case. + ErrTaskRetry = errors.New("exec: task retry") + + // ErrTaskNoop returns when the a subsequent call to Do will not result in + // advancing the task. Callers should avoid calling Do until the task has been updated. + ErrTaskNoop = errors.New("exec: task noop") +) + +// ExitCoder is implemented by errors that have an exit code. +type ExitCoder interface { + // ExitCode returns the exit code. + ExitCode() int +} + +// Temporary indicates whether or not the error condition is temporary. +// +// If this is encountered in the controller, the failing operation will be +// retried when this returns true. Otherwise, the operation is considered +// fatal. +type Temporary interface { + Temporary() bool +} + +// MakeTemporary makes the error temporary. +func MakeTemporary(err error) error { + if IsTemporary(err) { + return err + } + + return temporary{err} +} + +type temporary struct { + error +} + +func (t temporary) Cause() error { return t.error } +func (t temporary) Temporary() bool { return true } + +// IsTemporary returns true if the error or a recursive cause returns true for +// temporary. +func IsTemporary(err error) bool { + for err != nil { + if tmp, ok := err.(Temporary); ok && tmp.Temporary() { + return true + } + + cause := errors.Cause(err) + if cause == err { + break + } + + err = cause + } + + return false +} diff --git a/vendor/github.com/docker/swarmkit/agent/exec/executor.go b/vendor/github.com/docker/swarmkit/agent/exec/executor.go new file mode 100644 index 0000000000..8c3fd03506 --- /dev/null +++ b/vendor/github.com/docker/swarmkit/agent/exec/executor.go @@ -0,0 +1,81 @@ +package exec + +import ( + "github.com/docker/swarmkit/api" + "golang.org/x/net/context" +) + +// Executor provides controllers for tasks. +type Executor interface { + // Describe returns the underlying node description. + Describe(ctx context.Context) (*api.NodeDescription, error) + + // Configure uses the node object state to propagate node + // state to the underlying executor. + Configure(ctx context.Context, node *api.Node) error + + // Controller provides a controller for the given task. + Controller(t *api.Task) (Controller, error) + + // SetNetworkBootstrapKeys passes the symmetric keys from the + // manager to the executor. + SetNetworkBootstrapKeys([]*api.EncryptionKey) error +} + +// SecretsProvider is implemented by objects that can store secrets, typically +// an executor. +type SecretsProvider interface { + Secrets() SecretsManager +} + +// ConfigsProvider is implemented by objects that can store configs, +// typically an executor. +type ConfigsProvider interface { + Configs() ConfigsManager +} + +// DependencyManager is a meta-object that can keep track of typed objects +// such as secrets and configs. +type DependencyManager interface { + SecretsProvider + ConfigsProvider +} + +// DependencyGetter is a meta-object that can provide access to typed objects +// such as secrets and configs. +type DependencyGetter interface { + Secrets() SecretGetter + Configs() ConfigGetter +} + +// SecretGetter contains secret data necessary for the Controller. +type SecretGetter interface { + // Get returns the the secret with a specific secret ID, if available. + // When the secret is not available, the return will be nil. + Get(secretID string) (*api.Secret, error) +} + +// SecretsManager is the interface for secret storage and updates. +type SecretsManager interface { + SecretGetter + + Add(secrets ...api.Secret) // add one or more secrets + Remove(secrets []string) // remove the secrets by ID + Reset() // remove all secrets +} + +// ConfigGetter contains config data necessary for the Controller. +type ConfigGetter interface { + // Get returns the the config with a specific config ID, if available. + // When the config is not available, the return will be nil. + Get(configID string) (*api.Config, error) +} + +// ConfigsManager is the interface for config storage and updates. +type ConfigsManager interface { + ConfigGetter + + Add(configs ...api.Config) // add one or more configs + Remove(configs []string) // remove the configs by ID + Reset() // remove all configs +} diff --git a/vendor/github.com/docker/swarmkit/api/ca.pb.go b/vendor/github.com/docker/swarmkit/api/ca.pb.go new file mode 100644 index 0000000000..2704fc376b --- /dev/null +++ b/vendor/github.com/docker/swarmkit/api/ca.pb.go @@ -0,0 +1,2342 @@ +// Code generated by protoc-gen-gogo. +// source: github.com/docker/swarmkit/api/ca.proto +// DO NOT EDIT! + +/* + Package api is a generated protocol buffer package. + + It is generated from these files: + github.com/docker/swarmkit/api/ca.proto + github.com/docker/swarmkit/api/control.proto + github.com/docker/swarmkit/api/dispatcher.proto + github.com/docker/swarmkit/api/health.proto + github.com/docker/swarmkit/api/logbroker.proto + github.com/docker/swarmkit/api/objects.proto + github.com/docker/swarmkit/api/raft.proto + github.com/docker/swarmkit/api/resource.proto + github.com/docker/swarmkit/api/snapshot.proto + github.com/docker/swarmkit/api/specs.proto + github.com/docker/swarmkit/api/types.proto + github.com/docker/swarmkit/api/watch.proto + + It has these top-level messages: + NodeCertificateStatusRequest + NodeCertificateStatusResponse + IssueNodeCertificateRequest + IssueNodeCertificateResponse + GetRootCACertificateRequest + GetRootCACertificateResponse + GetUnlockKeyRequest + GetUnlockKeyResponse + GetNodeRequest + GetNodeResponse + ListNodesRequest + ListNodesResponse + UpdateNodeRequest + UpdateNodeResponse + RemoveNodeRequest + RemoveNodeResponse + GetTaskRequest + GetTaskResponse + RemoveTaskRequest + RemoveTaskResponse + ListTasksRequest + ListTasksResponse + CreateServiceRequest + CreateServiceResponse + GetServiceRequest + GetServiceResponse + UpdateServiceRequest + UpdateServiceResponse + RemoveServiceRequest + RemoveServiceResponse + ListServicesRequest + ListServicesResponse + CreateNetworkRequest + CreateNetworkResponse + GetNetworkRequest + GetNetworkResponse + RemoveNetworkRequest + RemoveNetworkResponse + ListNetworksRequest + ListNetworksResponse + GetClusterRequest + GetClusterResponse + ListClustersRequest + ListClustersResponse + KeyRotation + UpdateClusterRequest + UpdateClusterResponse + GetSecretRequest + GetSecretResponse + UpdateSecretRequest + UpdateSecretResponse + ListSecretsRequest + ListSecretsResponse + CreateSecretRequest + CreateSecretResponse + RemoveSecretRequest + RemoveSecretResponse + GetConfigRequest + GetConfigResponse + UpdateConfigRequest + UpdateConfigResponse + ListConfigsRequest + ListConfigsResponse + CreateConfigRequest + CreateConfigResponse + RemoveConfigRequest + RemoveConfigResponse + SessionRequest + SessionMessage + HeartbeatRequest + HeartbeatResponse + UpdateTaskStatusRequest + UpdateTaskStatusResponse + TasksRequest + TasksMessage + AssignmentsRequest + Assignment + AssignmentChange + AssignmentsMessage + HealthCheckRequest + HealthCheckResponse + LogSubscriptionOptions + LogSelector + LogContext + LogAttr + LogMessage + SubscribeLogsRequest + SubscribeLogsMessage + ListenSubscriptionsRequest + SubscriptionMessage + PublishLogsMessage + PublishLogsResponse + Meta + Node + Service + Endpoint + Task + NetworkAttachment + Network + Cluster + Secret + Config + Resource + Extension + RaftMember + JoinRequest + JoinResponse + LeaveRequest + LeaveResponse + ProcessRaftMessageRequest + ProcessRaftMessageResponse + StreamRaftMessageRequest + StreamRaftMessageResponse + ResolveAddressRequest + ResolveAddressResponse + InternalRaftRequest + StoreAction + AttachNetworkRequest + AttachNetworkResponse + DetachNetworkRequest + DetachNetworkResponse + StoreSnapshot + ClusterSnapshot + Snapshot + NodeSpec + ServiceSpec + ReplicatedService + GlobalService + TaskSpec + ResourceReference + GenericRuntimeSpec + NetworkAttachmentSpec + ContainerSpec + EndpointSpec + NetworkSpec + ClusterSpec + SecretSpec + ConfigSpec + Version + IndexEntry + Annotations + NamedGenericResource + DiscreteGenericResource + GenericResource + Resources + ResourceRequirements + Platform + PluginDescription + EngineDescription + NodeDescription + NodeTLSInfo + RaftMemberStatus + NodeStatus + Image + Mount + RestartPolicy + UpdateConfig + UpdateStatus + ContainerStatus + PortStatus + TaskStatus + NetworkAttachmentConfig + IPAMConfig + PortConfig + Driver + IPAMOptions + Peer + WeightedPeer + IssuanceStatus + AcceptancePolicy + ExternalCA + CAConfig + OrchestrationConfig + TaskDefaults + DispatcherConfig + RaftConfig + EncryptionConfig + SpreadOver + PlacementPreference + Placement + JoinTokens + RootCA + Certificate + EncryptionKey + ManagerStatus + FileTarget + SecretReference + ConfigReference + BlacklistedCertificate + HealthConfig + MaybeEncryptedRecord + RootRotation + Privileges + Object + SelectBySlot + SelectByCustom + SelectBy + WatchRequest + WatchMessage +*/ +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import _ "github.com/docker/swarmkit/protobuf/plugin" + +import github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +import raftselector "github.com/docker/swarmkit/manager/raftselector" +import codes "google.golang.org/grpc/codes" +import status "google.golang.org/grpc/status" +import metadata "google.golang.org/grpc/metadata" +import transport "google.golang.org/grpc/transport" +import rafttime "time" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type NodeCertificateStatusRequest struct { + NodeID string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` +} + +func (m *NodeCertificateStatusRequest) Reset() { *m = NodeCertificateStatusRequest{} } +func (*NodeCertificateStatusRequest) ProtoMessage() {} +func (*NodeCertificateStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{0} } + +type NodeCertificateStatusResponse struct { + Status *IssuanceStatus `protobuf:"bytes,1,opt,name=status" json:"status,omitempty"` + Certificate *Certificate `protobuf:"bytes,2,opt,name=certificate" json:"certificate,omitempty"` +} + +func (m *NodeCertificateStatusResponse) Reset() { *m = NodeCertificateStatusResponse{} } +func (*NodeCertificateStatusResponse) ProtoMessage() {} +func (*NodeCertificateStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{1} } + +type IssueNodeCertificateRequest struct { + // DEPRECATED: Role is now selected based on which secret is matched. + Role NodeRole `protobuf:"varint,1,opt,name=role,proto3,enum=docker.swarmkit.v1.NodeRole" json:"role,omitempty"` + // CSR is the certificate signing request. + CSR []byte `protobuf:"bytes,2,opt,name=csr,proto3" json:"csr,omitempty"` + // Token represents a user-provided string that is necessary for new + // nodes to join the cluster + Token string `protobuf:"bytes,3,opt,name=token,proto3" json:"token,omitempty"` + // Availability allows a user to control the current scheduling status of a node + Availability NodeSpec_Availability `protobuf:"varint,4,opt,name=availability,proto3,enum=docker.swarmkit.v1.NodeSpec_Availability" json:"availability,omitempty"` +} + +func (m *IssueNodeCertificateRequest) Reset() { *m = IssueNodeCertificateRequest{} } +func (*IssueNodeCertificateRequest) ProtoMessage() {} +func (*IssueNodeCertificateRequest) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{2} } + +type IssueNodeCertificateResponse struct { + NodeID string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + NodeMembership NodeSpec_Membership `protobuf:"varint,2,opt,name=node_membership,json=nodeMembership,proto3,enum=docker.swarmkit.v1.NodeSpec_Membership" json:"node_membership,omitempty"` +} + +func (m *IssueNodeCertificateResponse) Reset() { *m = IssueNodeCertificateResponse{} } +func (*IssueNodeCertificateResponse) ProtoMessage() {} +func (*IssueNodeCertificateResponse) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{3} } + +type GetRootCACertificateRequest struct { +} + +func (m *GetRootCACertificateRequest) Reset() { *m = GetRootCACertificateRequest{} } +func (*GetRootCACertificateRequest) ProtoMessage() {} +func (*GetRootCACertificateRequest) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{4} } + +type GetRootCACertificateResponse struct { + Certificate []byte `protobuf:"bytes,1,opt,name=certificate,proto3" json:"certificate,omitempty"` +} + +func (m *GetRootCACertificateResponse) Reset() { *m = GetRootCACertificateResponse{} } +func (*GetRootCACertificateResponse) ProtoMessage() {} +func (*GetRootCACertificateResponse) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{5} } + +type GetUnlockKeyRequest struct { +} + +func (m *GetUnlockKeyRequest) Reset() { *m = GetUnlockKeyRequest{} } +func (*GetUnlockKeyRequest) ProtoMessage() {} +func (*GetUnlockKeyRequest) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{6} } + +type GetUnlockKeyResponse struct { + UnlockKey []byte `protobuf:"bytes,1,opt,name=unlock_key,json=unlockKey,proto3" json:"unlock_key,omitempty"` + Version Version `protobuf:"bytes,2,opt,name=version" json:"version"` +} + +func (m *GetUnlockKeyResponse) Reset() { *m = GetUnlockKeyResponse{} } +func (*GetUnlockKeyResponse) ProtoMessage() {} +func (*GetUnlockKeyResponse) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{7} } + +func init() { + proto.RegisterType((*NodeCertificateStatusRequest)(nil), "docker.swarmkit.v1.NodeCertificateStatusRequest") + proto.RegisterType((*NodeCertificateStatusResponse)(nil), "docker.swarmkit.v1.NodeCertificateStatusResponse") + proto.RegisterType((*IssueNodeCertificateRequest)(nil), "docker.swarmkit.v1.IssueNodeCertificateRequest") + proto.RegisterType((*IssueNodeCertificateResponse)(nil), "docker.swarmkit.v1.IssueNodeCertificateResponse") + proto.RegisterType((*GetRootCACertificateRequest)(nil), "docker.swarmkit.v1.GetRootCACertificateRequest") + proto.RegisterType((*GetRootCACertificateResponse)(nil), "docker.swarmkit.v1.GetRootCACertificateResponse") + proto.RegisterType((*GetUnlockKeyRequest)(nil), "docker.swarmkit.v1.GetUnlockKeyRequest") + proto.RegisterType((*GetUnlockKeyResponse)(nil), "docker.swarmkit.v1.GetUnlockKeyResponse") +} + +type authenticatedWrapperCAServer struct { + local CAServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperCAServer(local CAServer, authorize func(context.Context, []string) error) CAServer { + return &authenticatedWrapperCAServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperCAServer) GetRootCACertificate(ctx context.Context, r *GetRootCACertificateRequest) (*GetRootCACertificateResponse, error) { + + return p.local.GetRootCACertificate(ctx, r) +} + +func (p *authenticatedWrapperCAServer) GetUnlockKey(ctx context.Context, r *GetUnlockKeyRequest) (*GetUnlockKeyResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.GetUnlockKey(ctx, r) +} + +type authenticatedWrapperNodeCAServer struct { + local NodeCAServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperNodeCAServer(local NodeCAServer, authorize func(context.Context, []string) error) NodeCAServer { + return &authenticatedWrapperNodeCAServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperNodeCAServer) IssueNodeCertificate(ctx context.Context, r *IssueNodeCertificateRequest) (*IssueNodeCertificateResponse, error) { + + return p.local.IssueNodeCertificate(ctx, r) +} + +func (p *authenticatedWrapperNodeCAServer) NodeCertificateStatus(ctx context.Context, r *NodeCertificateStatusRequest) (*NodeCertificateStatusResponse, error) { + + return p.local.NodeCertificateStatus(ctx, r) +} + +func (m *NodeCertificateStatusRequest) Copy() *NodeCertificateStatusRequest { + if m == nil { + return nil + } + o := &NodeCertificateStatusRequest{} + o.CopyFrom(m) + return o +} + +func (m *NodeCertificateStatusRequest) CopyFrom(src interface{}) { + + o := src.(*NodeCertificateStatusRequest) + *m = *o +} + +func (m *NodeCertificateStatusResponse) Copy() *NodeCertificateStatusResponse { + if m == nil { + return nil + } + o := &NodeCertificateStatusResponse{} + o.CopyFrom(m) + return o +} + +func (m *NodeCertificateStatusResponse) CopyFrom(src interface{}) { + + o := src.(*NodeCertificateStatusResponse) + *m = *o + if o.Status != nil { + m.Status = &IssuanceStatus{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Status, o.Status) + } + if o.Certificate != nil { + m.Certificate = &Certificate{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Certificate, o.Certificate) + } +} + +func (m *IssueNodeCertificateRequest) Copy() *IssueNodeCertificateRequest { + if m == nil { + return nil + } + o := &IssueNodeCertificateRequest{} + o.CopyFrom(m) + return o +} + +func (m *IssueNodeCertificateRequest) CopyFrom(src interface{}) { + + o := src.(*IssueNodeCertificateRequest) + *m = *o + if o.CSR != nil { + m.CSR = make([]byte, len(o.CSR)) + copy(m.CSR, o.CSR) + } +} + +func (m *IssueNodeCertificateResponse) Copy() *IssueNodeCertificateResponse { + if m == nil { + return nil + } + o := &IssueNodeCertificateResponse{} + o.CopyFrom(m) + return o +} + +func (m *IssueNodeCertificateResponse) CopyFrom(src interface{}) { + + o := src.(*IssueNodeCertificateResponse) + *m = *o +} + +func (m *GetRootCACertificateRequest) Copy() *GetRootCACertificateRequest { + if m == nil { + return nil + } + o := &GetRootCACertificateRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetRootCACertificateRequest) CopyFrom(src interface{}) {} +func (m *GetRootCACertificateResponse) Copy() *GetRootCACertificateResponse { + if m == nil { + return nil + } + o := &GetRootCACertificateResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetRootCACertificateResponse) CopyFrom(src interface{}) { + + o := src.(*GetRootCACertificateResponse) + *m = *o + if o.Certificate != nil { + m.Certificate = make([]byte, len(o.Certificate)) + copy(m.Certificate, o.Certificate) + } +} + +func (m *GetUnlockKeyRequest) Copy() *GetUnlockKeyRequest { + if m == nil { + return nil + } + o := &GetUnlockKeyRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetUnlockKeyRequest) CopyFrom(src interface{}) {} +func (m *GetUnlockKeyResponse) Copy() *GetUnlockKeyResponse { + if m == nil { + return nil + } + o := &GetUnlockKeyResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetUnlockKeyResponse) CopyFrom(src interface{}) { + + o := src.(*GetUnlockKeyResponse) + *m = *o + if o.UnlockKey != nil { + m.UnlockKey = make([]byte, len(o.UnlockKey)) + copy(m.UnlockKey, o.UnlockKey) + } + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Version, &o.Version) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for CA service + +type CAClient interface { + GetRootCACertificate(ctx context.Context, in *GetRootCACertificateRequest, opts ...grpc.CallOption) (*GetRootCACertificateResponse, error) + // GetUnlockKey returns the current unlock key for the cluster for the role of the client + // asking. + GetUnlockKey(ctx context.Context, in *GetUnlockKeyRequest, opts ...grpc.CallOption) (*GetUnlockKeyResponse, error) +} + +type cAClient struct { + cc *grpc.ClientConn +} + +func NewCAClient(cc *grpc.ClientConn) CAClient { + return &cAClient{cc} +} + +func (c *cAClient) GetRootCACertificate(ctx context.Context, in *GetRootCACertificateRequest, opts ...grpc.CallOption) (*GetRootCACertificateResponse, error) { + out := new(GetRootCACertificateResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.CA/GetRootCACertificate", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cAClient) GetUnlockKey(ctx context.Context, in *GetUnlockKeyRequest, opts ...grpc.CallOption) (*GetUnlockKeyResponse, error) { + out := new(GetUnlockKeyResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.CA/GetUnlockKey", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for CA service + +type CAServer interface { + GetRootCACertificate(context.Context, *GetRootCACertificateRequest) (*GetRootCACertificateResponse, error) + // GetUnlockKey returns the current unlock key for the cluster for the role of the client + // asking. + GetUnlockKey(context.Context, *GetUnlockKeyRequest) (*GetUnlockKeyResponse, error) +} + +func RegisterCAServer(s *grpc.Server, srv CAServer) { + s.RegisterService(&_CA_serviceDesc, srv) +} + +func _CA_GetRootCACertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetRootCACertificateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CAServer).GetRootCACertificate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.CA/GetRootCACertificate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CAServer).GetRootCACertificate(ctx, req.(*GetRootCACertificateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CA_GetUnlockKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetUnlockKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CAServer).GetUnlockKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.CA/GetUnlockKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CAServer).GetUnlockKey(ctx, req.(*GetUnlockKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _CA_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.CA", + HandlerType: (*CAServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetRootCACertificate", + Handler: _CA_GetRootCACertificate_Handler, + }, + { + MethodName: "GetUnlockKey", + Handler: _CA_GetUnlockKey_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/docker/swarmkit/api/ca.proto", +} + +// Client API for NodeCA service + +type NodeCAClient interface { + IssueNodeCertificate(ctx context.Context, in *IssueNodeCertificateRequest, opts ...grpc.CallOption) (*IssueNodeCertificateResponse, error) + NodeCertificateStatus(ctx context.Context, in *NodeCertificateStatusRequest, opts ...grpc.CallOption) (*NodeCertificateStatusResponse, error) +} + +type nodeCAClient struct { + cc *grpc.ClientConn +} + +func NewNodeCAClient(cc *grpc.ClientConn) NodeCAClient { + return &nodeCAClient{cc} +} + +func (c *nodeCAClient) IssueNodeCertificate(ctx context.Context, in *IssueNodeCertificateRequest, opts ...grpc.CallOption) (*IssueNodeCertificateResponse, error) { + out := new(IssueNodeCertificateResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.NodeCA/IssueNodeCertificate", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeCAClient) NodeCertificateStatus(ctx context.Context, in *NodeCertificateStatusRequest, opts ...grpc.CallOption) (*NodeCertificateStatusResponse, error) { + out := new(NodeCertificateStatusResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.NodeCA/NodeCertificateStatus", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for NodeCA service + +type NodeCAServer interface { + IssueNodeCertificate(context.Context, *IssueNodeCertificateRequest) (*IssueNodeCertificateResponse, error) + NodeCertificateStatus(context.Context, *NodeCertificateStatusRequest) (*NodeCertificateStatusResponse, error) +} + +func RegisterNodeCAServer(s *grpc.Server, srv NodeCAServer) { + s.RegisterService(&_NodeCA_serviceDesc, srv) +} + +func _NodeCA_IssueNodeCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(IssueNodeCertificateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeCAServer).IssueNodeCertificate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.NodeCA/IssueNodeCertificate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeCAServer).IssueNodeCertificate(ctx, req.(*IssueNodeCertificateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NodeCA_NodeCertificateStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeCertificateStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeCAServer).NodeCertificateStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.NodeCA/NodeCertificateStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeCAServer).NodeCertificateStatus(ctx, req.(*NodeCertificateStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _NodeCA_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.NodeCA", + HandlerType: (*NodeCAServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "IssueNodeCertificate", + Handler: _NodeCA_IssueNodeCertificate_Handler, + }, + { + MethodName: "NodeCertificateStatus", + Handler: _NodeCA_NodeCertificateStatus_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/docker/swarmkit/api/ca.proto", +} + +func (m *NodeCertificateStatusRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeCertificateStatusRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.NodeID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintCa(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + } + return i, nil +} + +func (m *NodeCertificateStatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeCertificateStatusResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Status != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintCa(dAtA, i, uint64(m.Status.Size())) + n1, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.Certificate != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintCa(dAtA, i, uint64(m.Certificate.Size())) + n2, err := m.Certificate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func (m *IssueNodeCertificateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IssueNodeCertificateRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Role != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintCa(dAtA, i, uint64(m.Role)) + } + if len(m.CSR) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintCa(dAtA, i, uint64(len(m.CSR))) + i += copy(dAtA[i:], m.CSR) + } + if len(m.Token) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintCa(dAtA, i, uint64(len(m.Token))) + i += copy(dAtA[i:], m.Token) + } + if m.Availability != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintCa(dAtA, i, uint64(m.Availability)) + } + return i, nil +} + +func (m *IssueNodeCertificateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IssueNodeCertificateResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.NodeID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintCa(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + } + if m.NodeMembership != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintCa(dAtA, i, uint64(m.NodeMembership)) + } + return i, nil +} + +func (m *GetRootCACertificateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetRootCACertificateRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *GetRootCACertificateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetRootCACertificateResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Certificate) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintCa(dAtA, i, uint64(len(m.Certificate))) + i += copy(dAtA[i:], m.Certificate) + } + return i, nil +} + +func (m *GetUnlockKeyRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetUnlockKeyRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *GetUnlockKeyResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetUnlockKeyResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.UnlockKey) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintCa(dAtA, i, uint64(len(m.UnlockKey))) + i += copy(dAtA[i:], m.UnlockKey) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintCa(dAtA, i, uint64(m.Version.Size())) + n3, err := m.Version.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func encodeFixed64Ca(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Ca(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintCa(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +type raftProxyCAServer struct { + local CAServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyCAServer(local CAServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) CAServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + s, ok := transport.StreamFromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := s.ServerTransport().RemoteAddr().String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyCAServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyCAServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyCAServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +func (p *raftProxyCAServer) GetRootCACertificate(ctx context.Context, r *GetRootCACertificateRequest) (*GetRootCACertificateResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetRootCACertificate(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewCAClient(conn).GetRootCACertificate(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetRootCACertificate(ctx, r) + } + return nil, err + } + return NewCAClient(conn).GetRootCACertificate(modCtx, r) + } + return resp, err +} + +func (p *raftProxyCAServer) GetUnlockKey(ctx context.Context, r *GetUnlockKeyRequest) (*GetUnlockKeyResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetUnlockKey(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewCAClient(conn).GetUnlockKey(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetUnlockKey(ctx, r) + } + return nil, err + } + return NewCAClient(conn).GetUnlockKey(modCtx, r) + } + return resp, err +} + +type raftProxyNodeCAServer struct { + local NodeCAServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyNodeCAServer(local NodeCAServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) NodeCAServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + s, ok := transport.StreamFromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := s.ServerTransport().RemoteAddr().String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyNodeCAServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyNodeCAServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyNodeCAServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +func (p *raftProxyNodeCAServer) IssueNodeCertificate(ctx context.Context, r *IssueNodeCertificateRequest) (*IssueNodeCertificateResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.IssueNodeCertificate(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewNodeCAClient(conn).IssueNodeCertificate(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.IssueNodeCertificate(ctx, r) + } + return nil, err + } + return NewNodeCAClient(conn).IssueNodeCertificate(modCtx, r) + } + return resp, err +} + +func (p *raftProxyNodeCAServer) NodeCertificateStatus(ctx context.Context, r *NodeCertificateStatusRequest) (*NodeCertificateStatusResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.NodeCertificateStatus(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewNodeCAClient(conn).NodeCertificateStatus(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.NodeCertificateStatus(ctx, r) + } + return nil, err + } + return NewNodeCAClient(conn).NodeCertificateStatus(modCtx, r) + } + return resp, err +} + +func (m *NodeCertificateStatusRequest) Size() (n int) { + var l int + _ = l + l = len(m.NodeID) + if l > 0 { + n += 1 + l + sovCa(uint64(l)) + } + return n +} + +func (m *NodeCertificateStatusResponse) Size() (n int) { + var l int + _ = l + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovCa(uint64(l)) + } + if m.Certificate != nil { + l = m.Certificate.Size() + n += 1 + l + sovCa(uint64(l)) + } + return n +} + +func (m *IssueNodeCertificateRequest) Size() (n int) { + var l int + _ = l + if m.Role != 0 { + n += 1 + sovCa(uint64(m.Role)) + } + l = len(m.CSR) + if l > 0 { + n += 1 + l + sovCa(uint64(l)) + } + l = len(m.Token) + if l > 0 { + n += 1 + l + sovCa(uint64(l)) + } + if m.Availability != 0 { + n += 1 + sovCa(uint64(m.Availability)) + } + return n +} + +func (m *IssueNodeCertificateResponse) Size() (n int) { + var l int + _ = l + l = len(m.NodeID) + if l > 0 { + n += 1 + l + sovCa(uint64(l)) + } + if m.NodeMembership != 0 { + n += 1 + sovCa(uint64(m.NodeMembership)) + } + return n +} + +func (m *GetRootCACertificateRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *GetRootCACertificateResponse) Size() (n int) { + var l int + _ = l + l = len(m.Certificate) + if l > 0 { + n += 1 + l + sovCa(uint64(l)) + } + return n +} + +func (m *GetUnlockKeyRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *GetUnlockKeyResponse) Size() (n int) { + var l int + _ = l + l = len(m.UnlockKey) + if l > 0 { + n += 1 + l + sovCa(uint64(l)) + } + l = m.Version.Size() + n += 1 + l + sovCa(uint64(l)) + return n +} + +func sovCa(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozCa(x uint64) (n int) { + return sovCa(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *NodeCertificateStatusRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeCertificateStatusRequest{`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `}`, + }, "") + return s +} +func (this *NodeCertificateStatusResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeCertificateStatusResponse{`, + `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "IssuanceStatus", "IssuanceStatus", 1) + `,`, + `Certificate:` + strings.Replace(fmt.Sprintf("%v", this.Certificate), "Certificate", "Certificate", 1) + `,`, + `}`, + }, "") + return s +} +func (this *IssueNodeCertificateRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IssueNodeCertificateRequest{`, + `Role:` + fmt.Sprintf("%v", this.Role) + `,`, + `CSR:` + fmt.Sprintf("%v", this.CSR) + `,`, + `Token:` + fmt.Sprintf("%v", this.Token) + `,`, + `Availability:` + fmt.Sprintf("%v", this.Availability) + `,`, + `}`, + }, "") + return s +} +func (this *IssueNodeCertificateResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IssueNodeCertificateResponse{`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `NodeMembership:` + fmt.Sprintf("%v", this.NodeMembership) + `,`, + `}`, + }, "") + return s +} +func (this *GetRootCACertificateRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetRootCACertificateRequest{`, + `}`, + }, "") + return s +} +func (this *GetRootCACertificateResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetRootCACertificateResponse{`, + `Certificate:` + fmt.Sprintf("%v", this.Certificate) + `,`, + `}`, + }, "") + return s +} +func (this *GetUnlockKeyRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetUnlockKeyRequest{`, + `}`, + }, "") + return s +} +func (this *GetUnlockKeyResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetUnlockKeyResponse{`, + `UnlockKey:` + fmt.Sprintf("%v", this.UnlockKey) + `,`, + `Version:` + strings.Replace(strings.Replace(this.Version.String(), "Version", "Version", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringCa(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *NodeCertificateStatusRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeCertificateStatusRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeCertificateStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCa + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCa(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCa + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeCertificateStatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeCertificateStatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeCertificateStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCa + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &IssuanceStatus{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCa + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Certificate == nil { + m.Certificate = &Certificate{} + } + if err := m.Certificate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCa(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCa + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IssueNodeCertificateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IssueNodeCertificateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IssueNodeCertificateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + m.Role = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Role |= (NodeRole(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CSR", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCa + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CSR = append(m.CSR[:0], dAtA[iNdEx:postIndex]...) + if m.CSR == nil { + m.CSR = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCa + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Token = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Availability", wireType) + } + m.Availability = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Availability |= (NodeSpec_Availability(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipCa(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCa + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IssueNodeCertificateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IssueNodeCertificateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IssueNodeCertificateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCa + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeMembership", wireType) + } + m.NodeMembership = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NodeMembership |= (NodeSpec_Membership(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipCa(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCa + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetRootCACertificateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetRootCACertificateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetRootCACertificateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipCa(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCa + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetRootCACertificateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetRootCACertificateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetRootCACertificateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCa + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Certificate = append(m.Certificate[:0], dAtA[iNdEx:postIndex]...) + if m.Certificate == nil { + m.Certificate = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCa(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCa + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetUnlockKeyRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetUnlockKeyRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetUnlockKeyRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipCa(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCa + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetUnlockKeyResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetUnlockKeyResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetUnlockKeyResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UnlockKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCa + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UnlockKey = append(m.UnlockKey[:0], dAtA[iNdEx:postIndex]...) + if m.UnlockKey == nil { + m.UnlockKey = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCa + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCa(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCa + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipCa(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCa + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCa + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCa + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthCa + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCa + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipCa(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthCa = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowCa = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("github.com/docker/swarmkit/api/ca.proto", fileDescriptorCa) } + +var fileDescriptorCa = []byte{ + // 638 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xc1, 0x6e, 0xd3, 0x4c, + 0x10, 0xee, 0xba, 0xfd, 0xd3, 0xbf, 0xd3, 0xd0, 0xa2, 0xa5, 0x95, 0x4c, 0x9a, 0x3a, 0x95, 0x39, + 0xb4, 0x20, 0x61, 0xb7, 0x01, 0x09, 0x09, 0x2e, 0x24, 0x41, 0xaa, 0x2a, 0x54, 0x84, 0xb6, 0x82, + 0x6b, 0xe5, 0x38, 0xdb, 0x74, 0x15, 0xc7, 0x6b, 0xbc, 0xeb, 0x42, 0x6e, 0x48, 0x20, 0xde, 0x00, + 0xc1, 0x89, 0x47, 0xe0, 0x39, 0x2a, 0x4e, 0x48, 0x5c, 0x38, 0x55, 0xd4, 0x0f, 0xc0, 0x33, 0x20, + 0xaf, 0x6d, 0x9a, 0xb4, 0x4e, 0x5a, 0x4e, 0xf1, 0xce, 0x7c, 0xdf, 0x37, 0x33, 0xdf, 0x4e, 0x16, + 0xd6, 0xbb, 0x4c, 0x1e, 0x46, 0x6d, 0xcb, 0xe5, 0x7d, 0xbb, 0xc3, 0xdd, 0x1e, 0x0d, 0x6d, 0xf1, + 0xda, 0x09, 0xfb, 0x3d, 0x26, 0x6d, 0x27, 0x60, 0xb6, 0xeb, 0x58, 0x41, 0xc8, 0x25, 0xc7, 0x38, + 0xcd, 0x5a, 0x79, 0xd6, 0x3a, 0xda, 0xaa, 0xdc, 0xb9, 0x84, 0x2c, 0x07, 0x01, 0x15, 0x29, 0xff, + 0x52, 0xac, 0x08, 0xa8, 0x9b, 0x63, 0x97, 0xba, 0xbc, 0xcb, 0xd5, 0xa7, 0x9d, 0x7c, 0x65, 0xd1, + 0x07, 0x13, 0x14, 0x14, 0xa2, 0x1d, 0x1d, 0xd8, 0x81, 0x17, 0x75, 0x99, 0x9f, 0xfd, 0xa4, 0x44, + 0xb3, 0x05, 0xd5, 0x67, 0xbc, 0x43, 0x5b, 0x34, 0x94, 0xec, 0x80, 0xb9, 0x8e, 0xa4, 0x7b, 0xd2, + 0x91, 0x91, 0x20, 0xf4, 0x55, 0x44, 0x85, 0xc4, 0xb7, 0x60, 0xd6, 0xe7, 0x1d, 0xba, 0xcf, 0x3a, + 0x3a, 0x5a, 0x43, 0x1b, 0x73, 0x4d, 0x88, 0x4f, 0x6a, 0xa5, 0x84, 0xb2, 0xf3, 0x84, 0x94, 0x92, + 0xd4, 0x4e, 0xc7, 0xfc, 0x82, 0x60, 0x75, 0x8c, 0x8a, 0x08, 0xb8, 0x2f, 0x28, 0x7e, 0x08, 0x25, + 0xa1, 0x22, 0x4a, 0x65, 0xbe, 0x6e, 0x5a, 0x17, 0x2d, 0xb3, 0x76, 0x84, 0x88, 0x1c, 0xdf, 0xcd, + 0xb9, 0x19, 0x03, 0x37, 0x60, 0xde, 0x3d, 0x13, 0xd6, 0x35, 0x25, 0x50, 0x2b, 0x12, 0x18, 0xaa, + 0x4f, 0x86, 0x39, 0xe6, 0x0f, 0x04, 0x2b, 0x89, 0x3a, 0x3d, 0xd7, 0x65, 0x3e, 0xe5, 0x7d, 0x98, + 0x09, 0xb9, 0x47, 0x55, 0x73, 0x0b, 0xf5, 0x6a, 0x91, 0x76, 0xc2, 0x24, 0xdc, 0xa3, 0x4d, 0x4d, + 0x47, 0x44, 0xa1, 0xf1, 0x4d, 0x98, 0x76, 0x45, 0xa8, 0x1a, 0x2a, 0x37, 0x67, 0xe3, 0x93, 0xda, + 0x74, 0x6b, 0x8f, 0x90, 0x24, 0x86, 0x97, 0xe0, 0x3f, 0xc9, 0x7b, 0xd4, 0xd7, 0xa7, 0x13, 0xd3, + 0x48, 0x7a, 0xc0, 0xbb, 0x50, 0x76, 0x8e, 0x1c, 0xe6, 0x39, 0x6d, 0xe6, 0x31, 0x39, 0xd0, 0x67, + 0x54, 0xb9, 0xdb, 0xe3, 0xca, 0xed, 0x05, 0xd4, 0xb5, 0x1a, 0x43, 0x04, 0x32, 0x42, 0x37, 0x3f, + 0x22, 0xa8, 0x16, 0x4f, 0x95, 0xb9, 0x7e, 0x95, 0xcb, 0xc3, 0xcf, 0x61, 0x51, 0x81, 0xfa, 0xb4, + 0xdf, 0xa6, 0xa1, 0x38, 0x64, 0x81, 0x9a, 0x68, 0xa1, 0xbe, 0x3e, 0xb1, 0xaf, 0xdd, 0xbf, 0x70, + 0xb2, 0x90, 0xf0, 0xcf, 0xce, 0xe6, 0x2a, 0xac, 0x6c, 0x53, 0x49, 0x38, 0x97, 0xad, 0xc6, 0x45, + 0xb3, 0xcd, 0xc7, 0x50, 0x2d, 0x4e, 0x67, 0x5d, 0xaf, 0x8d, 0xde, 0x77, 0xd2, 0x79, 0x79, 0xf4, + 0x3a, 0x97, 0xe1, 0xc6, 0x36, 0x95, 0x2f, 0x7c, 0x8f, 0xbb, 0xbd, 0xa7, 0x74, 0x90, 0x0b, 0x87, + 0xb0, 0x34, 0x1a, 0xce, 0x04, 0x57, 0x01, 0x22, 0x15, 0xdc, 0xef, 0xd1, 0x41, 0xa6, 0x37, 0x17, + 0xe5, 0x30, 0xfc, 0x08, 0x66, 0x8f, 0x68, 0x28, 0x18, 0xf7, 0xb3, 0xdd, 0x5a, 0x29, 0x1a, 0xfc, + 0x65, 0x0a, 0x69, 0xce, 0x1c, 0x9f, 0xd4, 0xa6, 0x48, 0xce, 0xa8, 0xbf, 0xd7, 0x40, 0x6b, 0x35, + 0xf0, 0x3b, 0xa4, 0x6a, 0x5f, 0x18, 0x0a, 0xdb, 0x45, 0x5a, 0x13, 0xdc, 0xa9, 0x6c, 0x5e, 0x9d, + 0x90, 0x8e, 0x67, 0xfe, 0xff, 0xed, 0xeb, 0xef, 0xcf, 0x9a, 0x76, 0x1d, 0xe1, 0x37, 0x50, 0x1e, + 0x36, 0x00, 0xaf, 0x8f, 0xd1, 0x3a, 0xef, 0x5c, 0x65, 0xe3, 0x72, 0x60, 0x56, 0x6c, 0x59, 0x15, + 0x5b, 0x84, 0x6b, 0x0a, 0x79, 0xb7, 0xef, 0xf8, 0x4e, 0x97, 0x86, 0xf5, 0x4f, 0x1a, 0xa8, 0xbd, + 0xca, 0xac, 0x28, 0xda, 0xca, 0x62, 0x2b, 0x26, 0xfc, 0x2b, 0x8b, 0xad, 0x98, 0xb4, 0xf0, 0x43, + 0x56, 0x7c, 0x40, 0xb0, 0x5c, 0xf8, 0x24, 0xe1, 0xcd, 0x71, 0x6b, 0x3d, 0xee, 0x0d, 0xac, 0x6c, + 0xfd, 0x03, 0xe3, 0x7c, 0x23, 0x4d, 0xfd, 0xf8, 0xd4, 0x98, 0xfa, 0x79, 0x6a, 0x4c, 0xbd, 0x8d, + 0x0d, 0x74, 0x1c, 0x1b, 0xe8, 0x7b, 0x6c, 0xa0, 0x5f, 0xb1, 0x81, 0xda, 0x25, 0xf5, 0x02, 0xdf, + 0xfb, 0x13, 0x00, 0x00, 0xff, 0xff, 0xe1, 0xda, 0xca, 0xba, 0x67, 0x06, 0x00, 0x00, +} diff --git a/vendor/github.com/docker/swarmkit/api/control.pb.go b/vendor/github.com/docker/swarmkit/api/control.pb.go new file mode 100644 index 0000000000..6fcb0b6442 --- /dev/null +++ b/vendor/github.com/docker/swarmkit/api/control.pb.go @@ -0,0 +1,16095 @@ +// Code generated by protoc-gen-gogo. +// source: github.com/docker/swarmkit/api/control.proto +// DO NOT EDIT! + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import _ "github.com/docker/swarmkit/protobuf/plugin" + +import github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +import raftselector "github.com/docker/swarmkit/manager/raftselector" +import codes "google.golang.org/grpc/codes" +import status "google.golang.org/grpc/status" +import metadata "google.golang.org/grpc/metadata" +import transport "google.golang.org/grpc/transport" +import rafttime "time" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type UpdateServiceRequest_Rollback int32 + +const ( + // This is not a rollback. The spec field of the request will + // be honored. + UpdateServiceRequest_NONE UpdateServiceRequest_Rollback = 0 + // Roll back the service - get spec from the service's + // previous_spec. + UpdateServiceRequest_PREVIOUS UpdateServiceRequest_Rollback = 1 +) + +var UpdateServiceRequest_Rollback_name = map[int32]string{ + 0: "NONE", + 1: "PREVIOUS", +} +var UpdateServiceRequest_Rollback_value = map[string]int32{ + "NONE": 0, + "PREVIOUS": 1, +} + +func (x UpdateServiceRequest_Rollback) String() string { + return proto.EnumName(UpdateServiceRequest_Rollback_name, int32(x)) +} +func (UpdateServiceRequest_Rollback) EnumDescriptor() ([]byte, []int) { + return fileDescriptorControl, []int{18, 0} +} + +type GetNodeRequest struct { + NodeID string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` +} + +func (m *GetNodeRequest) Reset() { *m = GetNodeRequest{} } +func (*GetNodeRequest) ProtoMessage() {} +func (*GetNodeRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{0} } + +type GetNodeResponse struct { + Node *Node `protobuf:"bytes,1,opt,name=node" json:"node,omitempty"` +} + +func (m *GetNodeResponse) Reset() { *m = GetNodeResponse{} } +func (*GetNodeResponse) ProtoMessage() {} +func (*GetNodeResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{1} } + +type ListNodesRequest struct { + Filters *ListNodesRequest_Filters `protobuf:"bytes,1,opt,name=filters" json:"filters,omitempty"` +} + +func (m *ListNodesRequest) Reset() { *m = ListNodesRequest{} } +func (*ListNodesRequest) ProtoMessage() {} +func (*ListNodesRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{2} } + +type ListNodesRequest_Filters struct { + Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"` + IDPrefixes []string `protobuf:"bytes,2,rep,name=id_prefixes,json=idPrefixes" json:"id_prefixes,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Memberships []NodeSpec_Membership `protobuf:"varint,4,rep,name=memberships,enum=docker.swarmkit.v1.NodeSpec_Membership" json:"memberships,omitempty"` + Roles []NodeRole `protobuf:"varint,5,rep,name=roles,enum=docker.swarmkit.v1.NodeRole" json:"roles,omitempty"` + // NamePrefixes matches all objects with the given prefixes + NamePrefixes []string `protobuf:"bytes,6,rep,name=name_prefixes,json=namePrefixes" json:"name_prefixes,omitempty"` +} + +func (m *ListNodesRequest_Filters) Reset() { *m = ListNodesRequest_Filters{} } +func (*ListNodesRequest_Filters) ProtoMessage() {} +func (*ListNodesRequest_Filters) Descriptor() ([]byte, []int) { + return fileDescriptorControl, []int{2, 0} +} + +type ListNodesResponse struct { + Nodes []*Node `protobuf:"bytes,1,rep,name=nodes" json:"nodes,omitempty"` +} + +func (m *ListNodesResponse) Reset() { *m = ListNodesResponse{} } +func (*ListNodesResponse) ProtoMessage() {} +func (*ListNodesResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{3} } + +// UpdateNodeRequest requests an update to the specified node. This may be used +// to request a new availability for a node, such as PAUSE. Invalid updates +// will be denied and cause an error. +type UpdateNodeRequest struct { + NodeID string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + NodeVersion *Version `protobuf:"bytes,2,opt,name=node_version,json=nodeVersion" json:"node_version,omitempty"` + Spec *NodeSpec `protobuf:"bytes,3,opt,name=spec" json:"spec,omitempty"` +} + +func (m *UpdateNodeRequest) Reset() { *m = UpdateNodeRequest{} } +func (*UpdateNodeRequest) ProtoMessage() {} +func (*UpdateNodeRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{4} } + +type UpdateNodeResponse struct { + Node *Node `protobuf:"bytes,1,opt,name=node" json:"node,omitempty"` +} + +func (m *UpdateNodeResponse) Reset() { *m = UpdateNodeResponse{} } +func (*UpdateNodeResponse) ProtoMessage() {} +func (*UpdateNodeResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{5} } + +// RemoveNodeRequest requests to delete the specified node from store. +type RemoveNodeRequest struct { + NodeID string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + Force bool `protobuf:"varint,2,opt,name=force,proto3" json:"force,omitempty"` +} + +func (m *RemoveNodeRequest) Reset() { *m = RemoveNodeRequest{} } +func (*RemoveNodeRequest) ProtoMessage() {} +func (*RemoveNodeRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{6} } + +type RemoveNodeResponse struct { +} + +func (m *RemoveNodeResponse) Reset() { *m = RemoveNodeResponse{} } +func (*RemoveNodeResponse) ProtoMessage() {} +func (*RemoveNodeResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{7} } + +type GetTaskRequest struct { + TaskID string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` +} + +func (m *GetTaskRequest) Reset() { *m = GetTaskRequest{} } +func (*GetTaskRequest) ProtoMessage() {} +func (*GetTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{8} } + +type GetTaskResponse struct { + Task *Task `protobuf:"bytes,1,opt,name=task" json:"task,omitempty"` +} + +func (m *GetTaskResponse) Reset() { *m = GetTaskResponse{} } +func (*GetTaskResponse) ProtoMessage() {} +func (*GetTaskResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{9} } + +type RemoveTaskRequest struct { + TaskID string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` +} + +func (m *RemoveTaskRequest) Reset() { *m = RemoveTaskRequest{} } +func (*RemoveTaskRequest) ProtoMessage() {} +func (*RemoveTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{10} } + +type RemoveTaskResponse struct { +} + +func (m *RemoveTaskResponse) Reset() { *m = RemoveTaskResponse{} } +func (*RemoveTaskResponse) ProtoMessage() {} +func (*RemoveTaskResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{11} } + +type ListTasksRequest struct { + Filters *ListTasksRequest_Filters `protobuf:"bytes,1,opt,name=filters" json:"filters,omitempty"` +} + +func (m *ListTasksRequest) Reset() { *m = ListTasksRequest{} } +func (*ListTasksRequest) ProtoMessage() {} +func (*ListTasksRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{12} } + +type ListTasksRequest_Filters struct { + Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"` + IDPrefixes []string `protobuf:"bytes,2,rep,name=id_prefixes,json=idPrefixes" json:"id_prefixes,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + ServiceIDs []string `protobuf:"bytes,4,rep,name=service_ids,json=serviceIds" json:"service_ids,omitempty"` + NodeIDs []string `protobuf:"bytes,5,rep,name=node_ids,json=nodeIds" json:"node_ids,omitempty"` + DesiredStates []TaskState `protobuf:"varint,6,rep,name=desired_states,json=desiredStates,enum=docker.swarmkit.v1.TaskState" json:"desired_states,omitempty"` + // NamePrefixes matches all objects with the given prefixes + NamePrefixes []string `protobuf:"bytes,7,rep,name=name_prefixes,json=namePrefixes" json:"name_prefixes,omitempty"` + Runtimes []string `protobuf:"bytes,9,rep,name=runtimes" json:"runtimes,omitempty"` + // UpToDate matches tasks that are consistent with the current + // service definition. + // Note: this is intended for internal status reporting rather + // than being exposed to users. It may be removed in the future. + UpToDate bool `protobuf:"varint,8,opt,name=up_to_date,json=upToDate,proto3" json:"up_to_date,omitempty"` +} + +func (m *ListTasksRequest_Filters) Reset() { *m = ListTasksRequest_Filters{} } +func (*ListTasksRequest_Filters) ProtoMessage() {} +func (*ListTasksRequest_Filters) Descriptor() ([]byte, []int) { + return fileDescriptorControl, []int{12, 0} +} + +type ListTasksResponse struct { + Tasks []*Task `protobuf:"bytes,1,rep,name=tasks" json:"tasks,omitempty"` +} + +func (m *ListTasksResponse) Reset() { *m = ListTasksResponse{} } +func (*ListTasksResponse) ProtoMessage() {} +func (*ListTasksResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{13} } + +type CreateServiceRequest struct { + Spec *ServiceSpec `protobuf:"bytes,1,opt,name=spec" json:"spec,omitempty"` +} + +func (m *CreateServiceRequest) Reset() { *m = CreateServiceRequest{} } +func (*CreateServiceRequest) ProtoMessage() {} +func (*CreateServiceRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{14} } + +type CreateServiceResponse struct { + Service *Service `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"` +} + +func (m *CreateServiceResponse) Reset() { *m = CreateServiceResponse{} } +func (*CreateServiceResponse) ProtoMessage() {} +func (*CreateServiceResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{15} } + +type GetServiceRequest struct { + ServiceID string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + InsertDefaults bool `protobuf:"varint,2,opt,name=insert_defaults,json=insertDefaults,proto3" json:"insert_defaults,omitempty"` +} + +func (m *GetServiceRequest) Reset() { *m = GetServiceRequest{} } +func (*GetServiceRequest) ProtoMessage() {} +func (*GetServiceRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{16} } + +type GetServiceResponse struct { + Service *Service `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"` +} + +func (m *GetServiceResponse) Reset() { *m = GetServiceResponse{} } +func (*GetServiceResponse) ProtoMessage() {} +func (*GetServiceResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{17} } + +type UpdateServiceRequest struct { + ServiceID string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + ServiceVersion *Version `protobuf:"bytes,2,opt,name=service_version,json=serviceVersion" json:"service_version,omitempty"` + Spec *ServiceSpec `protobuf:"bytes,3,opt,name=spec" json:"spec,omitempty"` + // Rollback may be set to PREVIOUS to request a rollback (the service's + // spec will be set to the value of its previous_spec field). In this + // case, the spec field of this request is ignored. + Rollback UpdateServiceRequest_Rollback `protobuf:"varint,4,opt,name=rollback,proto3,enum=docker.swarmkit.v1.UpdateServiceRequest_Rollback" json:"rollback,omitempty"` +} + +func (m *UpdateServiceRequest) Reset() { *m = UpdateServiceRequest{} } +func (*UpdateServiceRequest) ProtoMessage() {} +func (*UpdateServiceRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{18} } + +type UpdateServiceResponse struct { + Service *Service `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"` +} + +func (m *UpdateServiceResponse) Reset() { *m = UpdateServiceResponse{} } +func (*UpdateServiceResponse) ProtoMessage() {} +func (*UpdateServiceResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{19} } + +type RemoveServiceRequest struct { + ServiceID string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` +} + +func (m *RemoveServiceRequest) Reset() { *m = RemoveServiceRequest{} } +func (*RemoveServiceRequest) ProtoMessage() {} +func (*RemoveServiceRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{20} } + +type RemoveServiceResponse struct { +} + +func (m *RemoveServiceResponse) Reset() { *m = RemoveServiceResponse{} } +func (*RemoveServiceResponse) ProtoMessage() {} +func (*RemoveServiceResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{21} } + +type ListServicesRequest struct { + Filters *ListServicesRequest_Filters `protobuf:"bytes,1,opt,name=filters" json:"filters,omitempty"` +} + +func (m *ListServicesRequest) Reset() { *m = ListServicesRequest{} } +func (*ListServicesRequest) ProtoMessage() {} +func (*ListServicesRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{22} } + +type ListServicesRequest_Filters struct { + Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"` + IDPrefixes []string `protobuf:"bytes,2,rep,name=id_prefixes,json=idPrefixes" json:"id_prefixes,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // NamePrefixes matches all objects with the given prefixes + NamePrefixes []string `protobuf:"bytes,4,rep,name=name_prefixes,json=namePrefixes" json:"name_prefixes,omitempty"` + Runtimes []string `protobuf:"bytes,5,rep,name=runtimes" json:"runtimes,omitempty"` +} + +func (m *ListServicesRequest_Filters) Reset() { *m = ListServicesRequest_Filters{} } +func (*ListServicesRequest_Filters) ProtoMessage() {} +func (*ListServicesRequest_Filters) Descriptor() ([]byte, []int) { + return fileDescriptorControl, []int{22, 0} +} + +type ListServicesResponse struct { + Services []*Service `protobuf:"bytes,1,rep,name=services" json:"services,omitempty"` +} + +func (m *ListServicesResponse) Reset() { *m = ListServicesResponse{} } +func (*ListServicesResponse) ProtoMessage() {} +func (*ListServicesResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{23} } + +type CreateNetworkRequest struct { + Spec *NetworkSpec `protobuf:"bytes,1,opt,name=spec" json:"spec,omitempty"` +} + +func (m *CreateNetworkRequest) Reset() { *m = CreateNetworkRequest{} } +func (*CreateNetworkRequest) ProtoMessage() {} +func (*CreateNetworkRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{24} } + +type CreateNetworkResponse struct { + Network *Network `protobuf:"bytes,1,opt,name=network" json:"network,omitempty"` +} + +func (m *CreateNetworkResponse) Reset() { *m = CreateNetworkResponse{} } +func (*CreateNetworkResponse) ProtoMessage() {} +func (*CreateNetworkResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{25} } + +type GetNetworkRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + NetworkID string `protobuf:"bytes,2,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` +} + +func (m *GetNetworkRequest) Reset() { *m = GetNetworkRequest{} } +func (*GetNetworkRequest) ProtoMessage() {} +func (*GetNetworkRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{26} } + +type GetNetworkResponse struct { + Network *Network `protobuf:"bytes,1,opt,name=network" json:"network,omitempty"` +} + +func (m *GetNetworkResponse) Reset() { *m = GetNetworkResponse{} } +func (*GetNetworkResponse) ProtoMessage() {} +func (*GetNetworkResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{27} } + +type RemoveNetworkRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + NetworkID string `protobuf:"bytes,2,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` +} + +func (m *RemoveNetworkRequest) Reset() { *m = RemoveNetworkRequest{} } +func (*RemoveNetworkRequest) ProtoMessage() {} +func (*RemoveNetworkRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{28} } + +type RemoveNetworkResponse struct { +} + +func (m *RemoveNetworkResponse) Reset() { *m = RemoveNetworkResponse{} } +func (*RemoveNetworkResponse) ProtoMessage() {} +func (*RemoveNetworkResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{29} } + +type ListNetworksRequest struct { + Filters *ListNetworksRequest_Filters `protobuf:"bytes,1,opt,name=filters" json:"filters,omitempty"` +} + +func (m *ListNetworksRequest) Reset() { *m = ListNetworksRequest{} } +func (*ListNetworksRequest) ProtoMessage() {} +func (*ListNetworksRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{30} } + +type ListNetworksRequest_Filters struct { + Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"` + IDPrefixes []string `protobuf:"bytes,2,rep,name=id_prefixes,json=idPrefixes" json:"id_prefixes,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // NamePrefixes matches all objects with the given prefixes + NamePrefixes []string `protobuf:"bytes,4,rep,name=name_prefixes,json=namePrefixes" json:"name_prefixes,omitempty"` +} + +func (m *ListNetworksRequest_Filters) Reset() { *m = ListNetworksRequest_Filters{} } +func (*ListNetworksRequest_Filters) ProtoMessage() {} +func (*ListNetworksRequest_Filters) Descriptor() ([]byte, []int) { + return fileDescriptorControl, []int{30, 0} +} + +type ListNetworksResponse struct { + Networks []*Network `protobuf:"bytes,1,rep,name=networks" json:"networks,omitempty"` +} + +func (m *ListNetworksResponse) Reset() { *m = ListNetworksResponse{} } +func (*ListNetworksResponse) ProtoMessage() {} +func (*ListNetworksResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{31} } + +type GetClusterRequest struct { + ClusterID string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` +} + +func (m *GetClusterRequest) Reset() { *m = GetClusterRequest{} } +func (*GetClusterRequest) ProtoMessage() {} +func (*GetClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{32} } + +type GetClusterResponse struct { + Cluster *Cluster `protobuf:"bytes,1,opt,name=cluster" json:"cluster,omitempty"` +} + +func (m *GetClusterResponse) Reset() { *m = GetClusterResponse{} } +func (*GetClusterResponse) ProtoMessage() {} +func (*GetClusterResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{33} } + +type ListClustersRequest struct { + Filters *ListClustersRequest_Filters `protobuf:"bytes,1,opt,name=filters" json:"filters,omitempty"` +} + +func (m *ListClustersRequest) Reset() { *m = ListClustersRequest{} } +func (*ListClustersRequest) ProtoMessage() {} +func (*ListClustersRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{34} } + +type ListClustersRequest_Filters struct { + Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"` + IDPrefixes []string `protobuf:"bytes,2,rep,name=id_prefixes,json=idPrefixes" json:"id_prefixes,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // NamePrefixes matches all objects with the given prefixes + NamePrefixes []string `protobuf:"bytes,4,rep,name=name_prefixes,json=namePrefixes" json:"name_prefixes,omitempty"` +} + +func (m *ListClustersRequest_Filters) Reset() { *m = ListClustersRequest_Filters{} } +func (*ListClustersRequest_Filters) ProtoMessage() {} +func (*ListClustersRequest_Filters) Descriptor() ([]byte, []int) { + return fileDescriptorControl, []int{34, 0} +} + +type ListClustersResponse struct { + Clusters []*Cluster `protobuf:"bytes,1,rep,name=clusters" json:"clusters,omitempty"` +} + +func (m *ListClustersResponse) Reset() { *m = ListClustersResponse{} } +func (*ListClustersResponse) ProtoMessage() {} +func (*ListClustersResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{35} } + +// KeyRotation tells UpdateCluster what items to rotate +type KeyRotation struct { + // WorkerJoinToken tells UpdateCluster to rotate the worker secret token. + WorkerJoinToken bool `protobuf:"varint,1,opt,name=worker_join_token,json=workerJoinToken,proto3" json:"worker_join_token,omitempty"` + // ManagerJoinToken tells UpdateCluster to rotate the manager secret token. + ManagerJoinToken bool `protobuf:"varint,2,opt,name=manager_join_token,json=managerJoinToken,proto3" json:"manager_join_token,omitempty"` + // ManagerUnlockKey tells UpdateCluster to rotate the manager unlock key + ManagerUnlockKey bool `protobuf:"varint,3,opt,name=manager_unlock_key,json=managerUnlockKey,proto3" json:"manager_unlock_key,omitempty"` +} + +func (m *KeyRotation) Reset() { *m = KeyRotation{} } +func (*KeyRotation) ProtoMessage() {} +func (*KeyRotation) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{36} } + +type UpdateClusterRequest struct { + // ClusterID is the cluster ID to update. + ClusterID string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // ClusterVersion is the version of the cluster being updated. + ClusterVersion *Version `protobuf:"bytes,2,opt,name=cluster_version,json=clusterVersion" json:"cluster_version,omitempty"` + // Spec is the new spec to apply to the cluster. + Spec *ClusterSpec `protobuf:"bytes,3,opt,name=spec" json:"spec,omitempty"` + // Rotation contains flags for join token and unlock key rotation + Rotation KeyRotation `protobuf:"bytes,4,opt,name=rotation" json:"rotation"` +} + +func (m *UpdateClusterRequest) Reset() { *m = UpdateClusterRequest{} } +func (*UpdateClusterRequest) ProtoMessage() {} +func (*UpdateClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{37} } + +type UpdateClusterResponse struct { + Cluster *Cluster `protobuf:"bytes,1,opt,name=cluster" json:"cluster,omitempty"` +} + +func (m *UpdateClusterResponse) Reset() { *m = UpdateClusterResponse{} } +func (*UpdateClusterResponse) ProtoMessage() {} +func (*UpdateClusterResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{38} } + +// GetSecretRequest is the request to get a `Secret` object given a secret id. +type GetSecretRequest struct { + SecretID string `protobuf:"bytes,1,opt,name=secret_id,json=secretId,proto3" json:"secret_id,omitempty"` +} + +func (m *GetSecretRequest) Reset() { *m = GetSecretRequest{} } +func (*GetSecretRequest) ProtoMessage() {} +func (*GetSecretRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{39} } + +// GetSecretResponse contains the Secret corresponding to the id in +// `GetSecretRequest`, but the `Secret.Spec.Data` field in each `Secret` +// object should be nil instead of actually containing the secret bytes. +type GetSecretResponse struct { + Secret *Secret `protobuf:"bytes,1,opt,name=secret" json:"secret,omitempty"` +} + +func (m *GetSecretResponse) Reset() { *m = GetSecretResponse{} } +func (*GetSecretResponse) ProtoMessage() {} +func (*GetSecretResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{40} } + +type UpdateSecretRequest struct { + // SecretID is the secret ID to update. + SecretID string `protobuf:"bytes,1,opt,name=secret_id,json=secretId,proto3" json:"secret_id,omitempty"` + // SecretVersion is the version of the secret being updated. + SecretVersion *Version `protobuf:"bytes,2,opt,name=secret_version,json=secretVersion" json:"secret_version,omitempty"` + // Spec is the new spec to apply to the Secret + // Only some fields are allowed to be updated. + Spec *SecretSpec `protobuf:"bytes,3,opt,name=spec" json:"spec,omitempty"` +} + +func (m *UpdateSecretRequest) Reset() { *m = UpdateSecretRequest{} } +func (*UpdateSecretRequest) ProtoMessage() {} +func (*UpdateSecretRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{41} } + +type UpdateSecretResponse struct { + Secret *Secret `protobuf:"bytes,1,opt,name=secret" json:"secret,omitempty"` +} + +func (m *UpdateSecretResponse) Reset() { *m = UpdateSecretResponse{} } +func (*UpdateSecretResponse) ProtoMessage() {} +func (*UpdateSecretResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{42} } + +// ListSecretRequest is the request to list all non-internal secrets in the secret store, +// or all secrets filtered by (name or name prefix or id prefix) and labels. +type ListSecretsRequest struct { + Filters *ListSecretsRequest_Filters `protobuf:"bytes,1,opt,name=filters" json:"filters,omitempty"` +} + +func (m *ListSecretsRequest) Reset() { *m = ListSecretsRequest{} } +func (*ListSecretsRequest) ProtoMessage() {} +func (*ListSecretsRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{43} } + +type ListSecretsRequest_Filters struct { + Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"` + IDPrefixes []string `protobuf:"bytes,2,rep,name=id_prefixes,json=idPrefixes" json:"id_prefixes,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + NamePrefixes []string `protobuf:"bytes,4,rep,name=name_prefixes,json=namePrefixes" json:"name_prefixes,omitempty"` +} + +func (m *ListSecretsRequest_Filters) Reset() { *m = ListSecretsRequest_Filters{} } +func (*ListSecretsRequest_Filters) ProtoMessage() {} +func (*ListSecretsRequest_Filters) Descriptor() ([]byte, []int) { + return fileDescriptorControl, []int{43, 0} +} + +// ListSecretResponse contains a list of all the secrets that match the name or +// name prefix filters provided in `ListSecretRequest`. The `Secret.Spec.Data` +// field in each `Secret` object should be nil instead of actually containing +// the secret bytes. +type ListSecretsResponse struct { + Secrets []*Secret `protobuf:"bytes,1,rep,name=secrets" json:"secrets,omitempty"` +} + +func (m *ListSecretsResponse) Reset() { *m = ListSecretsResponse{} } +func (*ListSecretsResponse) ProtoMessage() {} +func (*ListSecretsResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{44} } + +// CreateSecretRequest specifies a new secret (it will not update an existing +// secret) to create. +type CreateSecretRequest struct { + Spec *SecretSpec `protobuf:"bytes,1,opt,name=spec" json:"spec,omitempty"` +} + +func (m *CreateSecretRequest) Reset() { *m = CreateSecretRequest{} } +func (*CreateSecretRequest) ProtoMessage() {} +func (*CreateSecretRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{45} } + +// CreateSecretResponse contains the newly created `Secret` corresponding to the +// name in `CreateSecretRequest`. The `Secret.Spec.Data` field should be nil instead +// of actually containing the secret bytes. +type CreateSecretResponse struct { + Secret *Secret `protobuf:"bytes,1,opt,name=secret" json:"secret,omitempty"` +} + +func (m *CreateSecretResponse) Reset() { *m = CreateSecretResponse{} } +func (*CreateSecretResponse) ProtoMessage() {} +func (*CreateSecretResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{46} } + +// RemoveSecretRequest contains the ID of the secret that should be removed. This +// removes all versions of the secret. +type RemoveSecretRequest struct { + SecretID string `protobuf:"bytes,1,opt,name=secret_id,json=secretId,proto3" json:"secret_id,omitempty"` +} + +func (m *RemoveSecretRequest) Reset() { *m = RemoveSecretRequest{} } +func (*RemoveSecretRequest) ProtoMessage() {} +func (*RemoveSecretRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{47} } + +// RemoveSecretResponse is an empty object indicating the successful removal of +// a secret. +type RemoveSecretResponse struct { +} + +func (m *RemoveSecretResponse) Reset() { *m = RemoveSecretResponse{} } +func (*RemoveSecretResponse) ProtoMessage() {} +func (*RemoveSecretResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{48} } + +// GetConfigRequest is the request to get a `Config` object given a config id. +type GetConfigRequest struct { + ConfigID string `protobuf:"bytes,1,opt,name=config_id,json=configId,proto3" json:"config_id,omitempty"` +} + +func (m *GetConfigRequest) Reset() { *m = GetConfigRequest{} } +func (*GetConfigRequest) ProtoMessage() {} +func (*GetConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{49} } + +// GetConfigResponse contains the Config corresponding to the id in +// `GetConfigRequest`. +type GetConfigResponse struct { + Config *Config `protobuf:"bytes,1,opt,name=config" json:"config,omitempty"` +} + +func (m *GetConfigResponse) Reset() { *m = GetConfigResponse{} } +func (*GetConfigResponse) ProtoMessage() {} +func (*GetConfigResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{50} } + +type UpdateConfigRequest struct { + // ConfigID is the config ID to update. + ConfigID string `protobuf:"bytes,1,opt,name=config_id,json=configId,proto3" json:"config_id,omitempty"` + // ConfigVersion is the version of the config being updated. + ConfigVersion *Version `protobuf:"bytes,2,opt,name=config_version,json=configVersion" json:"config_version,omitempty"` + // Spec is the new spec to apply to the Config + // Only some fields are allowed to be updated. + Spec *ConfigSpec `protobuf:"bytes,3,opt,name=spec" json:"spec,omitempty"` +} + +func (m *UpdateConfigRequest) Reset() { *m = UpdateConfigRequest{} } +func (*UpdateConfigRequest) ProtoMessage() {} +func (*UpdateConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{51} } + +type UpdateConfigResponse struct { + Config *Config `protobuf:"bytes,1,opt,name=config" json:"config,omitempty"` +} + +func (m *UpdateConfigResponse) Reset() { *m = UpdateConfigResponse{} } +func (*UpdateConfigResponse) ProtoMessage() {} +func (*UpdateConfigResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{52} } + +// ListConfigRequest is the request to list all configs in the config store, +// or all configs filtered by (name or name prefix or id prefix) and labels. +type ListConfigsRequest struct { + Filters *ListConfigsRequest_Filters `protobuf:"bytes,1,opt,name=filters" json:"filters,omitempty"` +} + +func (m *ListConfigsRequest) Reset() { *m = ListConfigsRequest{} } +func (*ListConfigsRequest) ProtoMessage() {} +func (*ListConfigsRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{53} } + +type ListConfigsRequest_Filters struct { + Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"` + IDPrefixes []string `protobuf:"bytes,2,rep,name=id_prefixes,json=idPrefixes" json:"id_prefixes,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + NamePrefixes []string `protobuf:"bytes,4,rep,name=name_prefixes,json=namePrefixes" json:"name_prefixes,omitempty"` +} + +func (m *ListConfigsRequest_Filters) Reset() { *m = ListConfigsRequest_Filters{} } +func (*ListConfigsRequest_Filters) ProtoMessage() {} +func (*ListConfigsRequest_Filters) Descriptor() ([]byte, []int) { + return fileDescriptorControl, []int{53, 0} +} + +// ListConfigResponse contains a list of all the configs that match the name or +// name prefix filters provided in `ListConfigRequest`. +type ListConfigsResponse struct { + Configs []*Config `protobuf:"bytes,1,rep,name=configs" json:"configs,omitempty"` +} + +func (m *ListConfigsResponse) Reset() { *m = ListConfigsResponse{} } +func (*ListConfigsResponse) ProtoMessage() {} +func (*ListConfigsResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{54} } + +// CreateConfigRequest specifies a new config (it will not update an existing +// config) to create. +type CreateConfigRequest struct { + Spec *ConfigSpec `protobuf:"bytes,1,opt,name=spec" json:"spec,omitempty"` +} + +func (m *CreateConfigRequest) Reset() { *m = CreateConfigRequest{} } +func (*CreateConfigRequest) ProtoMessage() {} +func (*CreateConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{55} } + +// CreateConfigResponse contains the newly created `Config` corresponding to the +// name in `CreateConfigRequest`. +type CreateConfigResponse struct { + Config *Config `protobuf:"bytes,1,opt,name=config" json:"config,omitempty"` +} + +func (m *CreateConfigResponse) Reset() { *m = CreateConfigResponse{} } +func (*CreateConfigResponse) ProtoMessage() {} +func (*CreateConfigResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{56} } + +// RemoveConfigRequest contains the ID of the config that should be removed. This +// removes all versions of the config. +type RemoveConfigRequest struct { + ConfigID string `protobuf:"bytes,1,opt,name=config_id,json=configId,proto3" json:"config_id,omitempty"` +} + +func (m *RemoveConfigRequest) Reset() { *m = RemoveConfigRequest{} } +func (*RemoveConfigRequest) ProtoMessage() {} +func (*RemoveConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{57} } + +// RemoveConfigResponse is an empty object indicating the successful removal of +// a config. +type RemoveConfigResponse struct { +} + +func (m *RemoveConfigResponse) Reset() { *m = RemoveConfigResponse{} } +func (*RemoveConfigResponse) ProtoMessage() {} +func (*RemoveConfigResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{58} } + +func init() { + proto.RegisterType((*GetNodeRequest)(nil), "docker.swarmkit.v1.GetNodeRequest") + proto.RegisterType((*GetNodeResponse)(nil), "docker.swarmkit.v1.GetNodeResponse") + proto.RegisterType((*ListNodesRequest)(nil), "docker.swarmkit.v1.ListNodesRequest") + proto.RegisterType((*ListNodesRequest_Filters)(nil), "docker.swarmkit.v1.ListNodesRequest.Filters") + proto.RegisterType((*ListNodesResponse)(nil), "docker.swarmkit.v1.ListNodesResponse") + proto.RegisterType((*UpdateNodeRequest)(nil), "docker.swarmkit.v1.UpdateNodeRequest") + proto.RegisterType((*UpdateNodeResponse)(nil), "docker.swarmkit.v1.UpdateNodeResponse") + proto.RegisterType((*RemoveNodeRequest)(nil), "docker.swarmkit.v1.RemoveNodeRequest") + proto.RegisterType((*RemoveNodeResponse)(nil), "docker.swarmkit.v1.RemoveNodeResponse") + proto.RegisterType((*GetTaskRequest)(nil), "docker.swarmkit.v1.GetTaskRequest") + proto.RegisterType((*GetTaskResponse)(nil), "docker.swarmkit.v1.GetTaskResponse") + proto.RegisterType((*RemoveTaskRequest)(nil), "docker.swarmkit.v1.RemoveTaskRequest") + proto.RegisterType((*RemoveTaskResponse)(nil), "docker.swarmkit.v1.RemoveTaskResponse") + proto.RegisterType((*ListTasksRequest)(nil), "docker.swarmkit.v1.ListTasksRequest") + proto.RegisterType((*ListTasksRequest_Filters)(nil), "docker.swarmkit.v1.ListTasksRequest.Filters") + proto.RegisterType((*ListTasksResponse)(nil), "docker.swarmkit.v1.ListTasksResponse") + proto.RegisterType((*CreateServiceRequest)(nil), "docker.swarmkit.v1.CreateServiceRequest") + proto.RegisterType((*CreateServiceResponse)(nil), "docker.swarmkit.v1.CreateServiceResponse") + proto.RegisterType((*GetServiceRequest)(nil), "docker.swarmkit.v1.GetServiceRequest") + proto.RegisterType((*GetServiceResponse)(nil), "docker.swarmkit.v1.GetServiceResponse") + proto.RegisterType((*UpdateServiceRequest)(nil), "docker.swarmkit.v1.UpdateServiceRequest") + proto.RegisterType((*UpdateServiceResponse)(nil), "docker.swarmkit.v1.UpdateServiceResponse") + proto.RegisterType((*RemoveServiceRequest)(nil), "docker.swarmkit.v1.RemoveServiceRequest") + proto.RegisterType((*RemoveServiceResponse)(nil), "docker.swarmkit.v1.RemoveServiceResponse") + proto.RegisterType((*ListServicesRequest)(nil), "docker.swarmkit.v1.ListServicesRequest") + proto.RegisterType((*ListServicesRequest_Filters)(nil), "docker.swarmkit.v1.ListServicesRequest.Filters") + proto.RegisterType((*ListServicesResponse)(nil), "docker.swarmkit.v1.ListServicesResponse") + proto.RegisterType((*CreateNetworkRequest)(nil), "docker.swarmkit.v1.CreateNetworkRequest") + proto.RegisterType((*CreateNetworkResponse)(nil), "docker.swarmkit.v1.CreateNetworkResponse") + proto.RegisterType((*GetNetworkRequest)(nil), "docker.swarmkit.v1.GetNetworkRequest") + proto.RegisterType((*GetNetworkResponse)(nil), "docker.swarmkit.v1.GetNetworkResponse") + proto.RegisterType((*RemoveNetworkRequest)(nil), "docker.swarmkit.v1.RemoveNetworkRequest") + proto.RegisterType((*RemoveNetworkResponse)(nil), "docker.swarmkit.v1.RemoveNetworkResponse") + proto.RegisterType((*ListNetworksRequest)(nil), "docker.swarmkit.v1.ListNetworksRequest") + proto.RegisterType((*ListNetworksRequest_Filters)(nil), "docker.swarmkit.v1.ListNetworksRequest.Filters") + proto.RegisterType((*ListNetworksResponse)(nil), "docker.swarmkit.v1.ListNetworksResponse") + proto.RegisterType((*GetClusterRequest)(nil), "docker.swarmkit.v1.GetClusterRequest") + proto.RegisterType((*GetClusterResponse)(nil), "docker.swarmkit.v1.GetClusterResponse") + proto.RegisterType((*ListClustersRequest)(nil), "docker.swarmkit.v1.ListClustersRequest") + proto.RegisterType((*ListClustersRequest_Filters)(nil), "docker.swarmkit.v1.ListClustersRequest.Filters") + proto.RegisterType((*ListClustersResponse)(nil), "docker.swarmkit.v1.ListClustersResponse") + proto.RegisterType((*KeyRotation)(nil), "docker.swarmkit.v1.KeyRotation") + proto.RegisterType((*UpdateClusterRequest)(nil), "docker.swarmkit.v1.UpdateClusterRequest") + proto.RegisterType((*UpdateClusterResponse)(nil), "docker.swarmkit.v1.UpdateClusterResponse") + proto.RegisterType((*GetSecretRequest)(nil), "docker.swarmkit.v1.GetSecretRequest") + proto.RegisterType((*GetSecretResponse)(nil), "docker.swarmkit.v1.GetSecretResponse") + proto.RegisterType((*UpdateSecretRequest)(nil), "docker.swarmkit.v1.UpdateSecretRequest") + proto.RegisterType((*UpdateSecretResponse)(nil), "docker.swarmkit.v1.UpdateSecretResponse") + proto.RegisterType((*ListSecretsRequest)(nil), "docker.swarmkit.v1.ListSecretsRequest") + proto.RegisterType((*ListSecretsRequest_Filters)(nil), "docker.swarmkit.v1.ListSecretsRequest.Filters") + proto.RegisterType((*ListSecretsResponse)(nil), "docker.swarmkit.v1.ListSecretsResponse") + proto.RegisterType((*CreateSecretRequest)(nil), "docker.swarmkit.v1.CreateSecretRequest") + proto.RegisterType((*CreateSecretResponse)(nil), "docker.swarmkit.v1.CreateSecretResponse") + proto.RegisterType((*RemoveSecretRequest)(nil), "docker.swarmkit.v1.RemoveSecretRequest") + proto.RegisterType((*RemoveSecretResponse)(nil), "docker.swarmkit.v1.RemoveSecretResponse") + proto.RegisterType((*GetConfigRequest)(nil), "docker.swarmkit.v1.GetConfigRequest") + proto.RegisterType((*GetConfigResponse)(nil), "docker.swarmkit.v1.GetConfigResponse") + proto.RegisterType((*UpdateConfigRequest)(nil), "docker.swarmkit.v1.UpdateConfigRequest") + proto.RegisterType((*UpdateConfigResponse)(nil), "docker.swarmkit.v1.UpdateConfigResponse") + proto.RegisterType((*ListConfigsRequest)(nil), "docker.swarmkit.v1.ListConfigsRequest") + proto.RegisterType((*ListConfigsRequest_Filters)(nil), "docker.swarmkit.v1.ListConfigsRequest.Filters") + proto.RegisterType((*ListConfigsResponse)(nil), "docker.swarmkit.v1.ListConfigsResponse") + proto.RegisterType((*CreateConfigRequest)(nil), "docker.swarmkit.v1.CreateConfigRequest") + proto.RegisterType((*CreateConfigResponse)(nil), "docker.swarmkit.v1.CreateConfigResponse") + proto.RegisterType((*RemoveConfigRequest)(nil), "docker.swarmkit.v1.RemoveConfigRequest") + proto.RegisterType((*RemoveConfigResponse)(nil), "docker.swarmkit.v1.RemoveConfigResponse") + proto.RegisterEnum("docker.swarmkit.v1.UpdateServiceRequest_Rollback", UpdateServiceRequest_Rollback_name, UpdateServiceRequest_Rollback_value) +} + +type authenticatedWrapperControlServer struct { + local ControlServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperControlServer(local ControlServer, authorize func(context.Context, []string) error) ControlServer { + return &authenticatedWrapperControlServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperControlServer) GetNode(ctx context.Context, r *GetNodeRequest) (*GetNodeResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.GetNode(ctx, r) +} + +func (p *authenticatedWrapperControlServer) ListNodes(ctx context.Context, r *ListNodesRequest) (*ListNodesResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ListNodes(ctx, r) +} + +func (p *authenticatedWrapperControlServer) UpdateNode(ctx context.Context, r *UpdateNodeRequest) (*UpdateNodeResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.UpdateNode(ctx, r) +} + +func (p *authenticatedWrapperControlServer) RemoveNode(ctx context.Context, r *RemoveNodeRequest) (*RemoveNodeResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.RemoveNode(ctx, r) +} + +func (p *authenticatedWrapperControlServer) GetTask(ctx context.Context, r *GetTaskRequest) (*GetTaskResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.GetTask(ctx, r) +} + +func (p *authenticatedWrapperControlServer) ListTasks(ctx context.Context, r *ListTasksRequest) (*ListTasksResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ListTasks(ctx, r) +} + +func (p *authenticatedWrapperControlServer) RemoveTask(ctx context.Context, r *RemoveTaskRequest) (*RemoveTaskResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.RemoveTask(ctx, r) +} + +func (p *authenticatedWrapperControlServer) GetService(ctx context.Context, r *GetServiceRequest) (*GetServiceResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.GetService(ctx, r) +} + +func (p *authenticatedWrapperControlServer) ListServices(ctx context.Context, r *ListServicesRequest) (*ListServicesResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ListServices(ctx, r) +} + +func (p *authenticatedWrapperControlServer) CreateService(ctx context.Context, r *CreateServiceRequest) (*CreateServiceResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.CreateService(ctx, r) +} + +func (p *authenticatedWrapperControlServer) UpdateService(ctx context.Context, r *UpdateServiceRequest) (*UpdateServiceResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.UpdateService(ctx, r) +} + +func (p *authenticatedWrapperControlServer) RemoveService(ctx context.Context, r *RemoveServiceRequest) (*RemoveServiceResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.RemoveService(ctx, r) +} + +func (p *authenticatedWrapperControlServer) GetNetwork(ctx context.Context, r *GetNetworkRequest) (*GetNetworkResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.GetNetwork(ctx, r) +} + +func (p *authenticatedWrapperControlServer) ListNetworks(ctx context.Context, r *ListNetworksRequest) (*ListNetworksResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ListNetworks(ctx, r) +} + +func (p *authenticatedWrapperControlServer) CreateNetwork(ctx context.Context, r *CreateNetworkRequest) (*CreateNetworkResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.CreateNetwork(ctx, r) +} + +func (p *authenticatedWrapperControlServer) RemoveNetwork(ctx context.Context, r *RemoveNetworkRequest) (*RemoveNetworkResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.RemoveNetwork(ctx, r) +} + +func (p *authenticatedWrapperControlServer) GetCluster(ctx context.Context, r *GetClusterRequest) (*GetClusterResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.GetCluster(ctx, r) +} + +func (p *authenticatedWrapperControlServer) ListClusters(ctx context.Context, r *ListClustersRequest) (*ListClustersResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ListClusters(ctx, r) +} + +func (p *authenticatedWrapperControlServer) UpdateCluster(ctx context.Context, r *UpdateClusterRequest) (*UpdateClusterResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.UpdateCluster(ctx, r) +} + +func (p *authenticatedWrapperControlServer) GetSecret(ctx context.Context, r *GetSecretRequest) (*GetSecretResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.GetSecret(ctx, r) +} + +func (p *authenticatedWrapperControlServer) UpdateSecret(ctx context.Context, r *UpdateSecretRequest) (*UpdateSecretResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.UpdateSecret(ctx, r) +} + +func (p *authenticatedWrapperControlServer) ListSecrets(ctx context.Context, r *ListSecretsRequest) (*ListSecretsResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ListSecrets(ctx, r) +} + +func (p *authenticatedWrapperControlServer) CreateSecret(ctx context.Context, r *CreateSecretRequest) (*CreateSecretResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.CreateSecret(ctx, r) +} + +func (p *authenticatedWrapperControlServer) RemoveSecret(ctx context.Context, r *RemoveSecretRequest) (*RemoveSecretResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.RemoveSecret(ctx, r) +} + +func (p *authenticatedWrapperControlServer) GetConfig(ctx context.Context, r *GetConfigRequest) (*GetConfigResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.GetConfig(ctx, r) +} + +func (p *authenticatedWrapperControlServer) UpdateConfig(ctx context.Context, r *UpdateConfigRequest) (*UpdateConfigResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.UpdateConfig(ctx, r) +} + +func (p *authenticatedWrapperControlServer) ListConfigs(ctx context.Context, r *ListConfigsRequest) (*ListConfigsResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ListConfigs(ctx, r) +} + +func (p *authenticatedWrapperControlServer) CreateConfig(ctx context.Context, r *CreateConfigRequest) (*CreateConfigResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.CreateConfig(ctx, r) +} + +func (p *authenticatedWrapperControlServer) RemoveConfig(ctx context.Context, r *RemoveConfigRequest) (*RemoveConfigResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.RemoveConfig(ctx, r) +} + +func (m *GetNodeRequest) Copy() *GetNodeRequest { + if m == nil { + return nil + } + o := &GetNodeRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetNodeRequest) CopyFrom(src interface{}) { + + o := src.(*GetNodeRequest) + *m = *o +} + +func (m *GetNodeResponse) Copy() *GetNodeResponse { + if m == nil { + return nil + } + o := &GetNodeResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetNodeResponse) CopyFrom(src interface{}) { + + o := src.(*GetNodeResponse) + *m = *o + if o.Node != nil { + m.Node = &Node{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Node, o.Node) + } +} + +func (m *ListNodesRequest) Copy() *ListNodesRequest { + if m == nil { + return nil + } + o := &ListNodesRequest{} + o.CopyFrom(m) + return o +} + +func (m *ListNodesRequest) CopyFrom(src interface{}) { + + o := src.(*ListNodesRequest) + *m = *o + if o.Filters != nil { + m.Filters = &ListNodesRequest_Filters{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Filters, o.Filters) + } +} + +func (m *ListNodesRequest_Filters) Copy() *ListNodesRequest_Filters { + if m == nil { + return nil + } + o := &ListNodesRequest_Filters{} + o.CopyFrom(m) + return o +} + +func (m *ListNodesRequest_Filters) CopyFrom(src interface{}) { + + o := src.(*ListNodesRequest_Filters) + *m = *o + if o.Names != nil { + m.Names = make([]string, len(o.Names)) + copy(m.Names, o.Names) + } + + if o.IDPrefixes != nil { + m.IDPrefixes = make([]string, len(o.IDPrefixes)) + copy(m.IDPrefixes, o.IDPrefixes) + } + + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.Memberships != nil { + m.Memberships = make([]NodeSpec_Membership, len(o.Memberships)) + copy(m.Memberships, o.Memberships) + } + + if o.Roles != nil { + m.Roles = make([]NodeRole, len(o.Roles)) + copy(m.Roles, o.Roles) + } + + if o.NamePrefixes != nil { + m.NamePrefixes = make([]string, len(o.NamePrefixes)) + copy(m.NamePrefixes, o.NamePrefixes) + } + +} + +func (m *ListNodesResponse) Copy() *ListNodesResponse { + if m == nil { + return nil + } + o := &ListNodesResponse{} + o.CopyFrom(m) + return o +} + +func (m *ListNodesResponse) CopyFrom(src interface{}) { + + o := src.(*ListNodesResponse) + *m = *o + if o.Nodes != nil { + m.Nodes = make([]*Node, len(o.Nodes)) + for i := range m.Nodes { + m.Nodes[i] = &Node{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Nodes[i], o.Nodes[i]) + } + } + +} + +func (m *UpdateNodeRequest) Copy() *UpdateNodeRequest { + if m == nil { + return nil + } + o := &UpdateNodeRequest{} + o.CopyFrom(m) + return o +} + +func (m *UpdateNodeRequest) CopyFrom(src interface{}) { + + o := src.(*UpdateNodeRequest) + *m = *o + if o.NodeVersion != nil { + m.NodeVersion = &Version{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.NodeVersion, o.NodeVersion) + } + if o.Spec != nil { + m.Spec = &NodeSpec{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec) + } +} + +func (m *UpdateNodeResponse) Copy() *UpdateNodeResponse { + if m == nil { + return nil + } + o := &UpdateNodeResponse{} + o.CopyFrom(m) + return o +} + +func (m *UpdateNodeResponse) CopyFrom(src interface{}) { + + o := src.(*UpdateNodeResponse) + *m = *o + if o.Node != nil { + m.Node = &Node{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Node, o.Node) + } +} + +func (m *RemoveNodeRequest) Copy() *RemoveNodeRequest { + if m == nil { + return nil + } + o := &RemoveNodeRequest{} + o.CopyFrom(m) + return o +} + +func (m *RemoveNodeRequest) CopyFrom(src interface{}) { + + o := src.(*RemoveNodeRequest) + *m = *o +} + +func (m *RemoveNodeResponse) Copy() *RemoveNodeResponse { + if m == nil { + return nil + } + o := &RemoveNodeResponse{} + o.CopyFrom(m) + return o +} + +func (m *RemoveNodeResponse) CopyFrom(src interface{}) {} +func (m *GetTaskRequest) Copy() *GetTaskRequest { + if m == nil { + return nil + } + o := &GetTaskRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetTaskRequest) CopyFrom(src interface{}) { + + o := src.(*GetTaskRequest) + *m = *o +} + +func (m *GetTaskResponse) Copy() *GetTaskResponse { + if m == nil { + return nil + } + o := &GetTaskResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetTaskResponse) CopyFrom(src interface{}) { + + o := src.(*GetTaskResponse) + *m = *o + if o.Task != nil { + m.Task = &Task{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Task, o.Task) + } +} + +func (m *RemoveTaskRequest) Copy() *RemoveTaskRequest { + if m == nil { + return nil + } + o := &RemoveTaskRequest{} + o.CopyFrom(m) + return o +} + +func (m *RemoveTaskRequest) CopyFrom(src interface{}) { + + o := src.(*RemoveTaskRequest) + *m = *o +} + +func (m *RemoveTaskResponse) Copy() *RemoveTaskResponse { + if m == nil { + return nil + } + o := &RemoveTaskResponse{} + o.CopyFrom(m) + return o +} + +func (m *RemoveTaskResponse) CopyFrom(src interface{}) {} +func (m *ListTasksRequest) Copy() *ListTasksRequest { + if m == nil { + return nil + } + o := &ListTasksRequest{} + o.CopyFrom(m) + return o +} + +func (m *ListTasksRequest) CopyFrom(src interface{}) { + + o := src.(*ListTasksRequest) + *m = *o + if o.Filters != nil { + m.Filters = &ListTasksRequest_Filters{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Filters, o.Filters) + } +} + +func (m *ListTasksRequest_Filters) Copy() *ListTasksRequest_Filters { + if m == nil { + return nil + } + o := &ListTasksRequest_Filters{} + o.CopyFrom(m) + return o +} + +func (m *ListTasksRequest_Filters) CopyFrom(src interface{}) { + + o := src.(*ListTasksRequest_Filters) + *m = *o + if o.Names != nil { + m.Names = make([]string, len(o.Names)) + copy(m.Names, o.Names) + } + + if o.IDPrefixes != nil { + m.IDPrefixes = make([]string, len(o.IDPrefixes)) + copy(m.IDPrefixes, o.IDPrefixes) + } + + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.ServiceIDs != nil { + m.ServiceIDs = make([]string, len(o.ServiceIDs)) + copy(m.ServiceIDs, o.ServiceIDs) + } + + if o.NodeIDs != nil { + m.NodeIDs = make([]string, len(o.NodeIDs)) + copy(m.NodeIDs, o.NodeIDs) + } + + if o.DesiredStates != nil { + m.DesiredStates = make([]TaskState, len(o.DesiredStates)) + copy(m.DesiredStates, o.DesiredStates) + } + + if o.NamePrefixes != nil { + m.NamePrefixes = make([]string, len(o.NamePrefixes)) + copy(m.NamePrefixes, o.NamePrefixes) + } + + if o.Runtimes != nil { + m.Runtimes = make([]string, len(o.Runtimes)) + copy(m.Runtimes, o.Runtimes) + } + +} + +func (m *ListTasksResponse) Copy() *ListTasksResponse { + if m == nil { + return nil + } + o := &ListTasksResponse{} + o.CopyFrom(m) + return o +} + +func (m *ListTasksResponse) CopyFrom(src interface{}) { + + o := src.(*ListTasksResponse) + *m = *o + if o.Tasks != nil { + m.Tasks = make([]*Task, len(o.Tasks)) + for i := range m.Tasks { + m.Tasks[i] = &Task{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Tasks[i], o.Tasks[i]) + } + } + +} + +func (m *CreateServiceRequest) Copy() *CreateServiceRequest { + if m == nil { + return nil + } + o := &CreateServiceRequest{} + o.CopyFrom(m) + return o +} + +func (m *CreateServiceRequest) CopyFrom(src interface{}) { + + o := src.(*CreateServiceRequest) + *m = *o + if o.Spec != nil { + m.Spec = &ServiceSpec{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec) + } +} + +func (m *CreateServiceResponse) Copy() *CreateServiceResponse { + if m == nil { + return nil + } + o := &CreateServiceResponse{} + o.CopyFrom(m) + return o +} + +func (m *CreateServiceResponse) CopyFrom(src interface{}) { + + o := src.(*CreateServiceResponse) + *m = *o + if o.Service != nil { + m.Service = &Service{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Service, o.Service) + } +} + +func (m *GetServiceRequest) Copy() *GetServiceRequest { + if m == nil { + return nil + } + o := &GetServiceRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetServiceRequest) CopyFrom(src interface{}) { + + o := src.(*GetServiceRequest) + *m = *o +} + +func (m *GetServiceResponse) Copy() *GetServiceResponse { + if m == nil { + return nil + } + o := &GetServiceResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetServiceResponse) CopyFrom(src interface{}) { + + o := src.(*GetServiceResponse) + *m = *o + if o.Service != nil { + m.Service = &Service{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Service, o.Service) + } +} + +func (m *UpdateServiceRequest) Copy() *UpdateServiceRequest { + if m == nil { + return nil + } + o := &UpdateServiceRequest{} + o.CopyFrom(m) + return o +} + +func (m *UpdateServiceRequest) CopyFrom(src interface{}) { + + o := src.(*UpdateServiceRequest) + *m = *o + if o.ServiceVersion != nil { + m.ServiceVersion = &Version{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.ServiceVersion, o.ServiceVersion) + } + if o.Spec != nil { + m.Spec = &ServiceSpec{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec) + } +} + +func (m *UpdateServiceResponse) Copy() *UpdateServiceResponse { + if m == nil { + return nil + } + o := &UpdateServiceResponse{} + o.CopyFrom(m) + return o +} + +func (m *UpdateServiceResponse) CopyFrom(src interface{}) { + + o := src.(*UpdateServiceResponse) + *m = *o + if o.Service != nil { + m.Service = &Service{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Service, o.Service) + } +} + +func (m *RemoveServiceRequest) Copy() *RemoveServiceRequest { + if m == nil { + return nil + } + o := &RemoveServiceRequest{} + o.CopyFrom(m) + return o +} + +func (m *RemoveServiceRequest) CopyFrom(src interface{}) { + + o := src.(*RemoveServiceRequest) + *m = *o +} + +func (m *RemoveServiceResponse) Copy() *RemoveServiceResponse { + if m == nil { + return nil + } + o := &RemoveServiceResponse{} + o.CopyFrom(m) + return o +} + +func (m *RemoveServiceResponse) CopyFrom(src interface{}) {} +func (m *ListServicesRequest) Copy() *ListServicesRequest { + if m == nil { + return nil + } + o := &ListServicesRequest{} + o.CopyFrom(m) + return o +} + +func (m *ListServicesRequest) CopyFrom(src interface{}) { + + o := src.(*ListServicesRequest) + *m = *o + if o.Filters != nil { + m.Filters = &ListServicesRequest_Filters{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Filters, o.Filters) + } +} + +func (m *ListServicesRequest_Filters) Copy() *ListServicesRequest_Filters { + if m == nil { + return nil + } + o := &ListServicesRequest_Filters{} + o.CopyFrom(m) + return o +} + +func (m *ListServicesRequest_Filters) CopyFrom(src interface{}) { + + o := src.(*ListServicesRequest_Filters) + *m = *o + if o.Names != nil { + m.Names = make([]string, len(o.Names)) + copy(m.Names, o.Names) + } + + if o.IDPrefixes != nil { + m.IDPrefixes = make([]string, len(o.IDPrefixes)) + copy(m.IDPrefixes, o.IDPrefixes) + } + + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.NamePrefixes != nil { + m.NamePrefixes = make([]string, len(o.NamePrefixes)) + copy(m.NamePrefixes, o.NamePrefixes) + } + + if o.Runtimes != nil { + m.Runtimes = make([]string, len(o.Runtimes)) + copy(m.Runtimes, o.Runtimes) + } + +} + +func (m *ListServicesResponse) Copy() *ListServicesResponse { + if m == nil { + return nil + } + o := &ListServicesResponse{} + o.CopyFrom(m) + return o +} + +func (m *ListServicesResponse) CopyFrom(src interface{}) { + + o := src.(*ListServicesResponse) + *m = *o + if o.Services != nil { + m.Services = make([]*Service, len(o.Services)) + for i := range m.Services { + m.Services[i] = &Service{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Services[i], o.Services[i]) + } + } + +} + +func (m *CreateNetworkRequest) Copy() *CreateNetworkRequest { + if m == nil { + return nil + } + o := &CreateNetworkRequest{} + o.CopyFrom(m) + return o +} + +func (m *CreateNetworkRequest) CopyFrom(src interface{}) { + + o := src.(*CreateNetworkRequest) + *m = *o + if o.Spec != nil { + m.Spec = &NetworkSpec{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec) + } +} + +func (m *CreateNetworkResponse) Copy() *CreateNetworkResponse { + if m == nil { + return nil + } + o := &CreateNetworkResponse{} + o.CopyFrom(m) + return o +} + +func (m *CreateNetworkResponse) CopyFrom(src interface{}) { + + o := src.(*CreateNetworkResponse) + *m = *o + if o.Network != nil { + m.Network = &Network{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Network, o.Network) + } +} + +func (m *GetNetworkRequest) Copy() *GetNetworkRequest { + if m == nil { + return nil + } + o := &GetNetworkRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetNetworkRequest) CopyFrom(src interface{}) { + + o := src.(*GetNetworkRequest) + *m = *o +} + +func (m *GetNetworkResponse) Copy() *GetNetworkResponse { + if m == nil { + return nil + } + o := &GetNetworkResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetNetworkResponse) CopyFrom(src interface{}) { + + o := src.(*GetNetworkResponse) + *m = *o + if o.Network != nil { + m.Network = &Network{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Network, o.Network) + } +} + +func (m *RemoveNetworkRequest) Copy() *RemoveNetworkRequest { + if m == nil { + return nil + } + o := &RemoveNetworkRequest{} + o.CopyFrom(m) + return o +} + +func (m *RemoveNetworkRequest) CopyFrom(src interface{}) { + + o := src.(*RemoveNetworkRequest) + *m = *o +} + +func (m *RemoveNetworkResponse) Copy() *RemoveNetworkResponse { + if m == nil { + return nil + } + o := &RemoveNetworkResponse{} + o.CopyFrom(m) + return o +} + +func (m *RemoveNetworkResponse) CopyFrom(src interface{}) {} +func (m *ListNetworksRequest) Copy() *ListNetworksRequest { + if m == nil { + return nil + } + o := &ListNetworksRequest{} + o.CopyFrom(m) + return o +} + +func (m *ListNetworksRequest) CopyFrom(src interface{}) { + + o := src.(*ListNetworksRequest) + *m = *o + if o.Filters != nil { + m.Filters = &ListNetworksRequest_Filters{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Filters, o.Filters) + } +} + +func (m *ListNetworksRequest_Filters) Copy() *ListNetworksRequest_Filters { + if m == nil { + return nil + } + o := &ListNetworksRequest_Filters{} + o.CopyFrom(m) + return o +} + +func (m *ListNetworksRequest_Filters) CopyFrom(src interface{}) { + + o := src.(*ListNetworksRequest_Filters) + *m = *o + if o.Names != nil { + m.Names = make([]string, len(o.Names)) + copy(m.Names, o.Names) + } + + if o.IDPrefixes != nil { + m.IDPrefixes = make([]string, len(o.IDPrefixes)) + copy(m.IDPrefixes, o.IDPrefixes) + } + + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.NamePrefixes != nil { + m.NamePrefixes = make([]string, len(o.NamePrefixes)) + copy(m.NamePrefixes, o.NamePrefixes) + } + +} + +func (m *ListNetworksResponse) Copy() *ListNetworksResponse { + if m == nil { + return nil + } + o := &ListNetworksResponse{} + o.CopyFrom(m) + return o +} + +func (m *ListNetworksResponse) CopyFrom(src interface{}) { + + o := src.(*ListNetworksResponse) + *m = *o + if o.Networks != nil { + m.Networks = make([]*Network, len(o.Networks)) + for i := range m.Networks { + m.Networks[i] = &Network{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Networks[i], o.Networks[i]) + } + } + +} + +func (m *GetClusterRequest) Copy() *GetClusterRequest { + if m == nil { + return nil + } + o := &GetClusterRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetClusterRequest) CopyFrom(src interface{}) { + + o := src.(*GetClusterRequest) + *m = *o +} + +func (m *GetClusterResponse) Copy() *GetClusterResponse { + if m == nil { + return nil + } + o := &GetClusterResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetClusterResponse) CopyFrom(src interface{}) { + + o := src.(*GetClusterResponse) + *m = *o + if o.Cluster != nil { + m.Cluster = &Cluster{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Cluster, o.Cluster) + } +} + +func (m *ListClustersRequest) Copy() *ListClustersRequest { + if m == nil { + return nil + } + o := &ListClustersRequest{} + o.CopyFrom(m) + return o +} + +func (m *ListClustersRequest) CopyFrom(src interface{}) { + + o := src.(*ListClustersRequest) + *m = *o + if o.Filters != nil { + m.Filters = &ListClustersRequest_Filters{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Filters, o.Filters) + } +} + +func (m *ListClustersRequest_Filters) Copy() *ListClustersRequest_Filters { + if m == nil { + return nil + } + o := &ListClustersRequest_Filters{} + o.CopyFrom(m) + return o +} + +func (m *ListClustersRequest_Filters) CopyFrom(src interface{}) { + + o := src.(*ListClustersRequest_Filters) + *m = *o + if o.Names != nil { + m.Names = make([]string, len(o.Names)) + copy(m.Names, o.Names) + } + + if o.IDPrefixes != nil { + m.IDPrefixes = make([]string, len(o.IDPrefixes)) + copy(m.IDPrefixes, o.IDPrefixes) + } + + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.NamePrefixes != nil { + m.NamePrefixes = make([]string, len(o.NamePrefixes)) + copy(m.NamePrefixes, o.NamePrefixes) + } + +} + +func (m *ListClustersResponse) Copy() *ListClustersResponse { + if m == nil { + return nil + } + o := &ListClustersResponse{} + o.CopyFrom(m) + return o +} + +func (m *ListClustersResponse) CopyFrom(src interface{}) { + + o := src.(*ListClustersResponse) + *m = *o + if o.Clusters != nil { + m.Clusters = make([]*Cluster, len(o.Clusters)) + for i := range m.Clusters { + m.Clusters[i] = &Cluster{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Clusters[i], o.Clusters[i]) + } + } + +} + +func (m *KeyRotation) Copy() *KeyRotation { + if m == nil { + return nil + } + o := &KeyRotation{} + o.CopyFrom(m) + return o +} + +func (m *KeyRotation) CopyFrom(src interface{}) { + + o := src.(*KeyRotation) + *m = *o +} + +func (m *UpdateClusterRequest) Copy() *UpdateClusterRequest { + if m == nil { + return nil + } + o := &UpdateClusterRequest{} + o.CopyFrom(m) + return o +} + +func (m *UpdateClusterRequest) CopyFrom(src interface{}) { + + o := src.(*UpdateClusterRequest) + *m = *o + if o.ClusterVersion != nil { + m.ClusterVersion = &Version{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.ClusterVersion, o.ClusterVersion) + } + if o.Spec != nil { + m.Spec = &ClusterSpec{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec) + } + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Rotation, &o.Rotation) +} + +func (m *UpdateClusterResponse) Copy() *UpdateClusterResponse { + if m == nil { + return nil + } + o := &UpdateClusterResponse{} + o.CopyFrom(m) + return o +} + +func (m *UpdateClusterResponse) CopyFrom(src interface{}) { + + o := src.(*UpdateClusterResponse) + *m = *o + if o.Cluster != nil { + m.Cluster = &Cluster{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Cluster, o.Cluster) + } +} + +func (m *GetSecretRequest) Copy() *GetSecretRequest { + if m == nil { + return nil + } + o := &GetSecretRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetSecretRequest) CopyFrom(src interface{}) { + + o := src.(*GetSecretRequest) + *m = *o +} + +func (m *GetSecretResponse) Copy() *GetSecretResponse { + if m == nil { + return nil + } + o := &GetSecretResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetSecretResponse) CopyFrom(src interface{}) { + + o := src.(*GetSecretResponse) + *m = *o + if o.Secret != nil { + m.Secret = &Secret{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Secret, o.Secret) + } +} + +func (m *UpdateSecretRequest) Copy() *UpdateSecretRequest { + if m == nil { + return nil + } + o := &UpdateSecretRequest{} + o.CopyFrom(m) + return o +} + +func (m *UpdateSecretRequest) CopyFrom(src interface{}) { + + o := src.(*UpdateSecretRequest) + *m = *o + if o.SecretVersion != nil { + m.SecretVersion = &Version{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.SecretVersion, o.SecretVersion) + } + if o.Spec != nil { + m.Spec = &SecretSpec{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec) + } +} + +func (m *UpdateSecretResponse) Copy() *UpdateSecretResponse { + if m == nil { + return nil + } + o := &UpdateSecretResponse{} + o.CopyFrom(m) + return o +} + +func (m *UpdateSecretResponse) CopyFrom(src interface{}) { + + o := src.(*UpdateSecretResponse) + *m = *o + if o.Secret != nil { + m.Secret = &Secret{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Secret, o.Secret) + } +} + +func (m *ListSecretsRequest) Copy() *ListSecretsRequest { + if m == nil { + return nil + } + o := &ListSecretsRequest{} + o.CopyFrom(m) + return o +} + +func (m *ListSecretsRequest) CopyFrom(src interface{}) { + + o := src.(*ListSecretsRequest) + *m = *o + if o.Filters != nil { + m.Filters = &ListSecretsRequest_Filters{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Filters, o.Filters) + } +} + +func (m *ListSecretsRequest_Filters) Copy() *ListSecretsRequest_Filters { + if m == nil { + return nil + } + o := &ListSecretsRequest_Filters{} + o.CopyFrom(m) + return o +} + +func (m *ListSecretsRequest_Filters) CopyFrom(src interface{}) { + + o := src.(*ListSecretsRequest_Filters) + *m = *o + if o.Names != nil { + m.Names = make([]string, len(o.Names)) + copy(m.Names, o.Names) + } + + if o.IDPrefixes != nil { + m.IDPrefixes = make([]string, len(o.IDPrefixes)) + copy(m.IDPrefixes, o.IDPrefixes) + } + + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.NamePrefixes != nil { + m.NamePrefixes = make([]string, len(o.NamePrefixes)) + copy(m.NamePrefixes, o.NamePrefixes) + } + +} + +func (m *ListSecretsResponse) Copy() *ListSecretsResponse { + if m == nil { + return nil + } + o := &ListSecretsResponse{} + o.CopyFrom(m) + return o +} + +func (m *ListSecretsResponse) CopyFrom(src interface{}) { + + o := src.(*ListSecretsResponse) + *m = *o + if o.Secrets != nil { + m.Secrets = make([]*Secret, len(o.Secrets)) + for i := range m.Secrets { + m.Secrets[i] = &Secret{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Secrets[i], o.Secrets[i]) + } + } + +} + +func (m *CreateSecretRequest) Copy() *CreateSecretRequest { + if m == nil { + return nil + } + o := &CreateSecretRequest{} + o.CopyFrom(m) + return o +} + +func (m *CreateSecretRequest) CopyFrom(src interface{}) { + + o := src.(*CreateSecretRequest) + *m = *o + if o.Spec != nil { + m.Spec = &SecretSpec{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec) + } +} + +func (m *CreateSecretResponse) Copy() *CreateSecretResponse { + if m == nil { + return nil + } + o := &CreateSecretResponse{} + o.CopyFrom(m) + return o +} + +func (m *CreateSecretResponse) CopyFrom(src interface{}) { + + o := src.(*CreateSecretResponse) + *m = *o + if o.Secret != nil { + m.Secret = &Secret{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Secret, o.Secret) + } +} + +func (m *RemoveSecretRequest) Copy() *RemoveSecretRequest { + if m == nil { + return nil + } + o := &RemoveSecretRequest{} + o.CopyFrom(m) + return o +} + +func (m *RemoveSecretRequest) CopyFrom(src interface{}) { + + o := src.(*RemoveSecretRequest) + *m = *o +} + +func (m *RemoveSecretResponse) Copy() *RemoveSecretResponse { + if m == nil { + return nil + } + o := &RemoveSecretResponse{} + o.CopyFrom(m) + return o +} + +func (m *RemoveSecretResponse) CopyFrom(src interface{}) {} +func (m *GetConfigRequest) Copy() *GetConfigRequest { + if m == nil { + return nil + } + o := &GetConfigRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetConfigRequest) CopyFrom(src interface{}) { + + o := src.(*GetConfigRequest) + *m = *o +} + +func (m *GetConfigResponse) Copy() *GetConfigResponse { + if m == nil { + return nil + } + o := &GetConfigResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetConfigResponse) CopyFrom(src interface{}) { + + o := src.(*GetConfigResponse) + *m = *o + if o.Config != nil { + m.Config = &Config{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Config, o.Config) + } +} + +func (m *UpdateConfigRequest) Copy() *UpdateConfigRequest { + if m == nil { + return nil + } + o := &UpdateConfigRequest{} + o.CopyFrom(m) + return o +} + +func (m *UpdateConfigRequest) CopyFrom(src interface{}) { + + o := src.(*UpdateConfigRequest) + *m = *o + if o.ConfigVersion != nil { + m.ConfigVersion = &Version{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.ConfigVersion, o.ConfigVersion) + } + if o.Spec != nil { + m.Spec = &ConfigSpec{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec) + } +} + +func (m *UpdateConfigResponse) Copy() *UpdateConfigResponse { + if m == nil { + return nil + } + o := &UpdateConfigResponse{} + o.CopyFrom(m) + return o +} + +func (m *UpdateConfigResponse) CopyFrom(src interface{}) { + + o := src.(*UpdateConfigResponse) + *m = *o + if o.Config != nil { + m.Config = &Config{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Config, o.Config) + } +} + +func (m *ListConfigsRequest) Copy() *ListConfigsRequest { + if m == nil { + return nil + } + o := &ListConfigsRequest{} + o.CopyFrom(m) + return o +} + +func (m *ListConfigsRequest) CopyFrom(src interface{}) { + + o := src.(*ListConfigsRequest) + *m = *o + if o.Filters != nil { + m.Filters = &ListConfigsRequest_Filters{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Filters, o.Filters) + } +} + +func (m *ListConfigsRequest_Filters) Copy() *ListConfigsRequest_Filters { + if m == nil { + return nil + } + o := &ListConfigsRequest_Filters{} + o.CopyFrom(m) + return o +} + +func (m *ListConfigsRequest_Filters) CopyFrom(src interface{}) { + + o := src.(*ListConfigsRequest_Filters) + *m = *o + if o.Names != nil { + m.Names = make([]string, len(o.Names)) + copy(m.Names, o.Names) + } + + if o.IDPrefixes != nil { + m.IDPrefixes = make([]string, len(o.IDPrefixes)) + copy(m.IDPrefixes, o.IDPrefixes) + } + + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.NamePrefixes != nil { + m.NamePrefixes = make([]string, len(o.NamePrefixes)) + copy(m.NamePrefixes, o.NamePrefixes) + } + +} + +func (m *ListConfigsResponse) Copy() *ListConfigsResponse { + if m == nil { + return nil + } + o := &ListConfigsResponse{} + o.CopyFrom(m) + return o +} + +func (m *ListConfigsResponse) CopyFrom(src interface{}) { + + o := src.(*ListConfigsResponse) + *m = *o + if o.Configs != nil { + m.Configs = make([]*Config, len(o.Configs)) + for i := range m.Configs { + m.Configs[i] = &Config{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Configs[i], o.Configs[i]) + } + } + +} + +func (m *CreateConfigRequest) Copy() *CreateConfigRequest { + if m == nil { + return nil + } + o := &CreateConfigRequest{} + o.CopyFrom(m) + return o +} + +func (m *CreateConfigRequest) CopyFrom(src interface{}) { + + o := src.(*CreateConfigRequest) + *m = *o + if o.Spec != nil { + m.Spec = &ConfigSpec{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec) + } +} + +func (m *CreateConfigResponse) Copy() *CreateConfigResponse { + if m == nil { + return nil + } + o := &CreateConfigResponse{} + o.CopyFrom(m) + return o +} + +func (m *CreateConfigResponse) CopyFrom(src interface{}) { + + o := src.(*CreateConfigResponse) + *m = *o + if o.Config != nil { + m.Config = &Config{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Config, o.Config) + } +} + +func (m *RemoveConfigRequest) Copy() *RemoveConfigRequest { + if m == nil { + return nil + } + o := &RemoveConfigRequest{} + o.CopyFrom(m) + return o +} + +func (m *RemoveConfigRequest) CopyFrom(src interface{}) { + + o := src.(*RemoveConfigRequest) + *m = *o +} + +func (m *RemoveConfigResponse) Copy() *RemoveConfigResponse { + if m == nil { + return nil + } + o := &RemoveConfigResponse{} + o.CopyFrom(m) + return o +} + +func (m *RemoveConfigResponse) CopyFrom(src interface{}) {} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Control service + +type ControlClient interface { + GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error) + ListNodes(ctx context.Context, in *ListNodesRequest, opts ...grpc.CallOption) (*ListNodesResponse, error) + UpdateNode(ctx context.Context, in *UpdateNodeRequest, opts ...grpc.CallOption) (*UpdateNodeResponse, error) + RemoveNode(ctx context.Context, in *RemoveNodeRequest, opts ...grpc.CallOption) (*RemoveNodeResponse, error) + GetTask(ctx context.Context, in *GetTaskRequest, opts ...grpc.CallOption) (*GetTaskResponse, error) + ListTasks(ctx context.Context, in *ListTasksRequest, opts ...grpc.CallOption) (*ListTasksResponse, error) + RemoveTask(ctx context.Context, in *RemoveTaskRequest, opts ...grpc.CallOption) (*RemoveTaskResponse, error) + GetService(ctx context.Context, in *GetServiceRequest, opts ...grpc.CallOption) (*GetServiceResponse, error) + ListServices(ctx context.Context, in *ListServicesRequest, opts ...grpc.CallOption) (*ListServicesResponse, error) + CreateService(ctx context.Context, in *CreateServiceRequest, opts ...grpc.CallOption) (*CreateServiceResponse, error) + UpdateService(ctx context.Context, in *UpdateServiceRequest, opts ...grpc.CallOption) (*UpdateServiceResponse, error) + RemoveService(ctx context.Context, in *RemoveServiceRequest, opts ...grpc.CallOption) (*RemoveServiceResponse, error) + GetNetwork(ctx context.Context, in *GetNetworkRequest, opts ...grpc.CallOption) (*GetNetworkResponse, error) + ListNetworks(ctx context.Context, in *ListNetworksRequest, opts ...grpc.CallOption) (*ListNetworksResponse, error) + CreateNetwork(ctx context.Context, in *CreateNetworkRequest, opts ...grpc.CallOption) (*CreateNetworkResponse, error) + RemoveNetwork(ctx context.Context, in *RemoveNetworkRequest, opts ...grpc.CallOption) (*RemoveNetworkResponse, error) + GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*GetClusterResponse, error) + ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) + UpdateCluster(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*UpdateClusterResponse, error) + // GetSecret returns a `GetSecretResponse` with a `Secret` with the same + // id as `GetSecretRequest.SecretID` + // - Returns `NotFound` if the Secret with the given id is not found. + // - Returns `InvalidArgument` if the `GetSecretRequest.SecretID` is empty. + // - Returns an error if getting fails. + GetSecret(ctx context.Context, in *GetSecretRequest, opts ...grpc.CallOption) (*GetSecretResponse, error) + // UpdateSecret returns a `UpdateSecretResponse` with a `Secret` with the same + // id as `GetSecretRequest.SecretID` + // - Returns `NotFound` if the Secret with the given id is not found. + // - Returns `InvalidArgument` if the `GetSecretRequest.SecretID` is empty. + // - Returns an error if updating fails. + UpdateSecret(ctx context.Context, in *UpdateSecretRequest, opts ...grpc.CallOption) (*UpdateSecretResponse, error) + // ListSecrets returns a `ListSecretResponse` with a list of all non-internal `Secret`s being + // managed, or all secrets matching any name in `ListSecretsRequest.Names`, any + // name prefix in `ListSecretsRequest.NamePrefixes`, any id in + // `ListSecretsRequest.SecretIDs`, or any id prefix in `ListSecretsRequest.IDPrefixes`. + // - Returns an error if listing fails. + ListSecrets(ctx context.Context, in *ListSecretsRequest, opts ...grpc.CallOption) (*ListSecretsResponse, error) + // CreateSecret creates and return a `CreateSecretResponse` with a `Secret` based + // on the provided `CreateSecretRequest.SecretSpec`. + // - Returns `InvalidArgument` if the `CreateSecretRequest.SecretSpec` is malformed, + // or if the secret data is too long or contains invalid characters. + // - Returns an error if the creation fails. + CreateSecret(ctx context.Context, in *CreateSecretRequest, opts ...grpc.CallOption) (*CreateSecretResponse, error) + // RemoveSecret removes the secret referenced by `RemoveSecretRequest.ID`. + // - Returns `InvalidArgument` if `RemoveSecretRequest.ID` is empty. + // - Returns `NotFound` if the a secret named `RemoveSecretRequest.ID` is not found. + // - Returns an error if the deletion fails. + RemoveSecret(ctx context.Context, in *RemoveSecretRequest, opts ...grpc.CallOption) (*RemoveSecretResponse, error) + // GetConfig returns a `GetConfigResponse` with a `Config` with the same + // id as `GetConfigRequest.ConfigID` + // - Returns `NotFound` if the Config with the given id is not found. + // - Returns `InvalidArgument` if the `GetConfigRequest.ConfigID` is empty. + // - Returns an error if getting fails. + GetConfig(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*GetConfigResponse, error) + // UpdateConfig returns a `UpdateConfigResponse` with a `Config` with the same + // id as `GetConfigRequest.ConfigID` + // - Returns `NotFound` if the Config with the given id is not found. + // - Returns `InvalidArgument` if the `GetConfigRequest.ConfigID` is empty. + // - Returns an error if updating fails. + UpdateConfig(ctx context.Context, in *UpdateConfigRequest, opts ...grpc.CallOption) (*UpdateConfigResponse, error) + // ListConfigs returns a `ListConfigResponse` with a list of `Config`s being + // managed, or all configs matching any name in `ListConfigsRequest.Names`, any + // name prefix in `ListConfigsRequest.NamePrefixes`, any id in + // `ListConfigsRequest.ConfigIDs`, or any id prefix in `ListConfigsRequest.IDPrefixes`. + // - Returns an error if listing fails. + ListConfigs(ctx context.Context, in *ListConfigsRequest, opts ...grpc.CallOption) (*ListConfigsResponse, error) + // CreateConfig creates and return a `CreateConfigResponse` with a `Config` based + // on the provided `CreateConfigRequest.ConfigSpec`. + // - Returns `InvalidArgument` if the `CreateConfigRequest.ConfigSpec` is malformed, + // or if the config data is too long or contains invalid characters. + // - Returns an error if the creation fails. + CreateConfig(ctx context.Context, in *CreateConfigRequest, opts ...grpc.CallOption) (*CreateConfigResponse, error) + // RemoveConfig removes the config referenced by `RemoveConfigRequest.ID`. + // - Returns `InvalidArgument` if `RemoveConfigRequest.ID` is empty. + // - Returns `NotFound` if the a config named `RemoveConfigRequest.ID` is not found. + // - Returns an error if the deletion fails. + RemoveConfig(ctx context.Context, in *RemoveConfigRequest, opts ...grpc.CallOption) (*RemoveConfigResponse, error) +} + +type controlClient struct { + cc *grpc.ClientConn +} + +func NewControlClient(cc *grpc.ClientConn) ControlClient { + return &controlClient{cc} +} + +func (c *controlClient) GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error) { + out := new(GetNodeResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/GetNode", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) ListNodes(ctx context.Context, in *ListNodesRequest, opts ...grpc.CallOption) (*ListNodesResponse, error) { + out := new(ListNodesResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/ListNodes", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) UpdateNode(ctx context.Context, in *UpdateNodeRequest, opts ...grpc.CallOption) (*UpdateNodeResponse, error) { + out := new(UpdateNodeResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/UpdateNode", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) RemoveNode(ctx context.Context, in *RemoveNodeRequest, opts ...grpc.CallOption) (*RemoveNodeResponse, error) { + out := new(RemoveNodeResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/RemoveNode", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) GetTask(ctx context.Context, in *GetTaskRequest, opts ...grpc.CallOption) (*GetTaskResponse, error) { + out := new(GetTaskResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/GetTask", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) ListTasks(ctx context.Context, in *ListTasksRequest, opts ...grpc.CallOption) (*ListTasksResponse, error) { + out := new(ListTasksResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/ListTasks", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) RemoveTask(ctx context.Context, in *RemoveTaskRequest, opts ...grpc.CallOption) (*RemoveTaskResponse, error) { + out := new(RemoveTaskResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/RemoveTask", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) GetService(ctx context.Context, in *GetServiceRequest, opts ...grpc.CallOption) (*GetServiceResponse, error) { + out := new(GetServiceResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/GetService", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) ListServices(ctx context.Context, in *ListServicesRequest, opts ...grpc.CallOption) (*ListServicesResponse, error) { + out := new(ListServicesResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/ListServices", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) CreateService(ctx context.Context, in *CreateServiceRequest, opts ...grpc.CallOption) (*CreateServiceResponse, error) { + out := new(CreateServiceResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/CreateService", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) UpdateService(ctx context.Context, in *UpdateServiceRequest, opts ...grpc.CallOption) (*UpdateServiceResponse, error) { + out := new(UpdateServiceResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/UpdateService", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) RemoveService(ctx context.Context, in *RemoveServiceRequest, opts ...grpc.CallOption) (*RemoveServiceResponse, error) { + out := new(RemoveServiceResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/RemoveService", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) GetNetwork(ctx context.Context, in *GetNetworkRequest, opts ...grpc.CallOption) (*GetNetworkResponse, error) { + out := new(GetNetworkResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/GetNetwork", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) ListNetworks(ctx context.Context, in *ListNetworksRequest, opts ...grpc.CallOption) (*ListNetworksResponse, error) { + out := new(ListNetworksResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/ListNetworks", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) CreateNetwork(ctx context.Context, in *CreateNetworkRequest, opts ...grpc.CallOption) (*CreateNetworkResponse, error) { + out := new(CreateNetworkResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/CreateNetwork", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) RemoveNetwork(ctx context.Context, in *RemoveNetworkRequest, opts ...grpc.CallOption) (*RemoveNetworkResponse, error) { + out := new(RemoveNetworkResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/RemoveNetwork", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*GetClusterResponse, error) { + out := new(GetClusterResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/GetCluster", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) { + out := new(ListClustersResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/ListClusters", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) UpdateCluster(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*UpdateClusterResponse, error) { + out := new(UpdateClusterResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/UpdateCluster", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) GetSecret(ctx context.Context, in *GetSecretRequest, opts ...grpc.CallOption) (*GetSecretResponse, error) { + out := new(GetSecretResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/GetSecret", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) UpdateSecret(ctx context.Context, in *UpdateSecretRequest, opts ...grpc.CallOption) (*UpdateSecretResponse, error) { + out := new(UpdateSecretResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/UpdateSecret", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) ListSecrets(ctx context.Context, in *ListSecretsRequest, opts ...grpc.CallOption) (*ListSecretsResponse, error) { + out := new(ListSecretsResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/ListSecrets", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) CreateSecret(ctx context.Context, in *CreateSecretRequest, opts ...grpc.CallOption) (*CreateSecretResponse, error) { + out := new(CreateSecretResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/CreateSecret", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) RemoveSecret(ctx context.Context, in *RemoveSecretRequest, opts ...grpc.CallOption) (*RemoveSecretResponse, error) { + out := new(RemoveSecretResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/RemoveSecret", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) GetConfig(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*GetConfigResponse, error) { + out := new(GetConfigResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/GetConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) UpdateConfig(ctx context.Context, in *UpdateConfigRequest, opts ...grpc.CallOption) (*UpdateConfigResponse, error) { + out := new(UpdateConfigResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/UpdateConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) ListConfigs(ctx context.Context, in *ListConfigsRequest, opts ...grpc.CallOption) (*ListConfigsResponse, error) { + out := new(ListConfigsResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/ListConfigs", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) CreateConfig(ctx context.Context, in *CreateConfigRequest, opts ...grpc.CallOption) (*CreateConfigResponse, error) { + out := new(CreateConfigResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/CreateConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) RemoveConfig(ctx context.Context, in *RemoveConfigRequest, opts ...grpc.CallOption) (*RemoveConfigResponse, error) { + out := new(RemoveConfigResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/RemoveConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Control service + +type ControlServer interface { + GetNode(context.Context, *GetNodeRequest) (*GetNodeResponse, error) + ListNodes(context.Context, *ListNodesRequest) (*ListNodesResponse, error) + UpdateNode(context.Context, *UpdateNodeRequest) (*UpdateNodeResponse, error) + RemoveNode(context.Context, *RemoveNodeRequest) (*RemoveNodeResponse, error) + GetTask(context.Context, *GetTaskRequest) (*GetTaskResponse, error) + ListTasks(context.Context, *ListTasksRequest) (*ListTasksResponse, error) + RemoveTask(context.Context, *RemoveTaskRequest) (*RemoveTaskResponse, error) + GetService(context.Context, *GetServiceRequest) (*GetServiceResponse, error) + ListServices(context.Context, *ListServicesRequest) (*ListServicesResponse, error) + CreateService(context.Context, *CreateServiceRequest) (*CreateServiceResponse, error) + UpdateService(context.Context, *UpdateServiceRequest) (*UpdateServiceResponse, error) + RemoveService(context.Context, *RemoveServiceRequest) (*RemoveServiceResponse, error) + GetNetwork(context.Context, *GetNetworkRequest) (*GetNetworkResponse, error) + ListNetworks(context.Context, *ListNetworksRequest) (*ListNetworksResponse, error) + CreateNetwork(context.Context, *CreateNetworkRequest) (*CreateNetworkResponse, error) + RemoveNetwork(context.Context, *RemoveNetworkRequest) (*RemoveNetworkResponse, error) + GetCluster(context.Context, *GetClusterRequest) (*GetClusterResponse, error) + ListClusters(context.Context, *ListClustersRequest) (*ListClustersResponse, error) + UpdateCluster(context.Context, *UpdateClusterRequest) (*UpdateClusterResponse, error) + // GetSecret returns a `GetSecretResponse` with a `Secret` with the same + // id as `GetSecretRequest.SecretID` + // - Returns `NotFound` if the Secret with the given id is not found. + // - Returns `InvalidArgument` if the `GetSecretRequest.SecretID` is empty. + // - Returns an error if getting fails. + GetSecret(context.Context, *GetSecretRequest) (*GetSecretResponse, error) + // UpdateSecret returns a `UpdateSecretResponse` with a `Secret` with the same + // id as `GetSecretRequest.SecretID` + // - Returns `NotFound` if the Secret with the given id is not found. + // - Returns `InvalidArgument` if the `GetSecretRequest.SecretID` is empty. + // - Returns an error if updating fails. + UpdateSecret(context.Context, *UpdateSecretRequest) (*UpdateSecretResponse, error) + // ListSecrets returns a `ListSecretResponse` with a list of all non-internal `Secret`s being + // managed, or all secrets matching any name in `ListSecretsRequest.Names`, any + // name prefix in `ListSecretsRequest.NamePrefixes`, any id in + // `ListSecretsRequest.SecretIDs`, or any id prefix in `ListSecretsRequest.IDPrefixes`. + // - Returns an error if listing fails. + ListSecrets(context.Context, *ListSecretsRequest) (*ListSecretsResponse, error) + // CreateSecret creates and return a `CreateSecretResponse` with a `Secret` based + // on the provided `CreateSecretRequest.SecretSpec`. + // - Returns `InvalidArgument` if the `CreateSecretRequest.SecretSpec` is malformed, + // or if the secret data is too long or contains invalid characters. + // - Returns an error if the creation fails. + CreateSecret(context.Context, *CreateSecretRequest) (*CreateSecretResponse, error) + // RemoveSecret removes the secret referenced by `RemoveSecretRequest.ID`. + // - Returns `InvalidArgument` if `RemoveSecretRequest.ID` is empty. + // - Returns `NotFound` if the a secret named `RemoveSecretRequest.ID` is not found. + // - Returns an error if the deletion fails. + RemoveSecret(context.Context, *RemoveSecretRequest) (*RemoveSecretResponse, error) + // GetConfig returns a `GetConfigResponse` with a `Config` with the same + // id as `GetConfigRequest.ConfigID` + // - Returns `NotFound` if the Config with the given id is not found. + // - Returns `InvalidArgument` if the `GetConfigRequest.ConfigID` is empty. + // - Returns an error if getting fails. + GetConfig(context.Context, *GetConfigRequest) (*GetConfigResponse, error) + // UpdateConfig returns a `UpdateConfigResponse` with a `Config` with the same + // id as `GetConfigRequest.ConfigID` + // - Returns `NotFound` if the Config with the given id is not found. + // - Returns `InvalidArgument` if the `GetConfigRequest.ConfigID` is empty. + // - Returns an error if updating fails. + UpdateConfig(context.Context, *UpdateConfigRequest) (*UpdateConfigResponse, error) + // ListConfigs returns a `ListConfigResponse` with a list of `Config`s being + // managed, or all configs matching any name in `ListConfigsRequest.Names`, any + // name prefix in `ListConfigsRequest.NamePrefixes`, any id in + // `ListConfigsRequest.ConfigIDs`, or any id prefix in `ListConfigsRequest.IDPrefixes`. + // - Returns an error if listing fails. + ListConfigs(context.Context, *ListConfigsRequest) (*ListConfigsResponse, error) + // CreateConfig creates and return a `CreateConfigResponse` with a `Config` based + // on the provided `CreateConfigRequest.ConfigSpec`. + // - Returns `InvalidArgument` if the `CreateConfigRequest.ConfigSpec` is malformed, + // or if the config data is too long or contains invalid characters. + // - Returns an error if the creation fails. + CreateConfig(context.Context, *CreateConfigRequest) (*CreateConfigResponse, error) + // RemoveConfig removes the config referenced by `RemoveConfigRequest.ID`. + // - Returns `InvalidArgument` if `RemoveConfigRequest.ID` is empty. + // - Returns `NotFound` if the a config named `RemoveConfigRequest.ID` is not found. + // - Returns an error if the deletion fails. + RemoveConfig(context.Context, *RemoveConfigRequest) (*RemoveConfigResponse, error) +} + +func RegisterControlServer(s *grpc.Server, srv ControlServer) { + s.RegisterService(&_Control_serviceDesc, srv) +} + +func _Control_GetNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNodeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).GetNode(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/GetNode", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).GetNode(ctx, req.(*GetNodeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_ListNodes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNodesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).ListNodes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/ListNodes", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).ListNodes(ctx, req.(*ListNodesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_UpdateNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateNodeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).UpdateNode(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/UpdateNode", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).UpdateNode(ctx, req.(*UpdateNodeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_RemoveNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveNodeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).RemoveNode(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/RemoveNode", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).RemoveNode(ctx, req.(*RemoveNodeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_GetTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).GetTask(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/GetTask", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).GetTask(ctx, req.(*GetTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_ListTasks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListTasksRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).ListTasks(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/ListTasks", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).ListTasks(ctx, req.(*ListTasksRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_RemoveTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).RemoveTask(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/RemoveTask", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).RemoveTask(ctx, req.(*RemoveTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_GetService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).GetService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/GetService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).GetService(ctx, req.(*GetServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_ListServices_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListServicesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).ListServices(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/ListServices", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).ListServices(ctx, req.(*ListServicesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_CreateService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).CreateService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/CreateService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).CreateService(ctx, req.(*CreateServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_UpdateService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).UpdateService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/UpdateService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).UpdateService(ctx, req.(*UpdateServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_RemoveService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).RemoveService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/RemoveService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).RemoveService(ctx, req.(*RemoveServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_GetNetwork_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNetworkRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).GetNetwork(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/GetNetwork", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).GetNetwork(ctx, req.(*GetNetworkRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_ListNetworks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNetworksRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).ListNetworks(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/ListNetworks", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).ListNetworks(ctx, req.(*ListNetworksRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_CreateNetwork_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateNetworkRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).CreateNetwork(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/CreateNetwork", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).CreateNetwork(ctx, req.(*CreateNetworkRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_RemoveNetwork_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveNetworkRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).RemoveNetwork(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/RemoveNetwork", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).RemoveNetwork(ctx, req.(*RemoveNetworkRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_GetCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).GetCluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/GetCluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).GetCluster(ctx, req.(*GetClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_ListClusters_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListClustersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).ListClusters(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/ListClusters", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).ListClusters(ctx, req.(*ListClustersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_UpdateCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).UpdateCluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/UpdateCluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).UpdateCluster(ctx, req.(*UpdateClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_GetSecret_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSecretRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).GetSecret(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/GetSecret", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).GetSecret(ctx, req.(*GetSecretRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_UpdateSecret_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateSecretRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).UpdateSecret(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/UpdateSecret", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).UpdateSecret(ctx, req.(*UpdateSecretRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_ListSecrets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListSecretsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).ListSecrets(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/ListSecrets", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).ListSecrets(ctx, req.(*ListSecretsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_CreateSecret_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateSecretRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).CreateSecret(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/CreateSecret", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).CreateSecret(ctx, req.(*CreateSecretRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_RemoveSecret_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveSecretRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).RemoveSecret(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/RemoveSecret", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).RemoveSecret(ctx, req.(*RemoveSecretRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_GetConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).GetConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/GetConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).GetConfig(ctx, req.(*GetConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_UpdateConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).UpdateConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/UpdateConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).UpdateConfig(ctx, req.(*UpdateConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_ListConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListConfigsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).ListConfigs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/ListConfigs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).ListConfigs(ctx, req.(*ListConfigsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_CreateConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).CreateConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/CreateConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).CreateConfig(ctx, req.(*CreateConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_RemoveConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).RemoveConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/RemoveConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).RemoveConfig(ctx, req.(*RemoveConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Control_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.Control", + HandlerType: (*ControlServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetNode", + Handler: _Control_GetNode_Handler, + }, + { + MethodName: "ListNodes", + Handler: _Control_ListNodes_Handler, + }, + { + MethodName: "UpdateNode", + Handler: _Control_UpdateNode_Handler, + }, + { + MethodName: "RemoveNode", + Handler: _Control_RemoveNode_Handler, + }, + { + MethodName: "GetTask", + Handler: _Control_GetTask_Handler, + }, + { + MethodName: "ListTasks", + Handler: _Control_ListTasks_Handler, + }, + { + MethodName: "RemoveTask", + Handler: _Control_RemoveTask_Handler, + }, + { + MethodName: "GetService", + Handler: _Control_GetService_Handler, + }, + { + MethodName: "ListServices", + Handler: _Control_ListServices_Handler, + }, + { + MethodName: "CreateService", + Handler: _Control_CreateService_Handler, + }, + { + MethodName: "UpdateService", + Handler: _Control_UpdateService_Handler, + }, + { + MethodName: "RemoveService", + Handler: _Control_RemoveService_Handler, + }, + { + MethodName: "GetNetwork", + Handler: _Control_GetNetwork_Handler, + }, + { + MethodName: "ListNetworks", + Handler: _Control_ListNetworks_Handler, + }, + { + MethodName: "CreateNetwork", + Handler: _Control_CreateNetwork_Handler, + }, + { + MethodName: "RemoveNetwork", + Handler: _Control_RemoveNetwork_Handler, + }, + { + MethodName: "GetCluster", + Handler: _Control_GetCluster_Handler, + }, + { + MethodName: "ListClusters", + Handler: _Control_ListClusters_Handler, + }, + { + MethodName: "UpdateCluster", + Handler: _Control_UpdateCluster_Handler, + }, + { + MethodName: "GetSecret", + Handler: _Control_GetSecret_Handler, + }, + { + MethodName: "UpdateSecret", + Handler: _Control_UpdateSecret_Handler, + }, + { + MethodName: "ListSecrets", + Handler: _Control_ListSecrets_Handler, + }, + { + MethodName: "CreateSecret", + Handler: _Control_CreateSecret_Handler, + }, + { + MethodName: "RemoveSecret", + Handler: _Control_RemoveSecret_Handler, + }, + { + MethodName: "GetConfig", + Handler: _Control_GetConfig_Handler, + }, + { + MethodName: "UpdateConfig", + Handler: _Control_UpdateConfig_Handler, + }, + { + MethodName: "ListConfigs", + Handler: _Control_ListConfigs_Handler, + }, + { + MethodName: "CreateConfig", + Handler: _Control_CreateConfig_Handler, + }, + { + MethodName: "RemoveConfig", + Handler: _Control_RemoveConfig_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/docker/swarmkit/api/control.proto", +} + +func (m *GetNodeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetNodeRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.NodeID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + } + return i, nil +} + +func (m *GetNodeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetNodeResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Node != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Node.Size())) + n1, err := m.Node.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + return i, nil +} + +func (m *ListNodesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListNodesRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Filters != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Filters.Size())) + n2, err := m.Filters.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func (m *ListNodesRequest_Filters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListNodesRequest_Filters) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x1a + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.Memberships) > 0 { + for _, num := range m.Memberships { + dAtA[i] = 0x20 + i++ + i = encodeVarintControl(dAtA, i, uint64(num)) + } + } + if len(m.Roles) > 0 { + for _, num := range m.Roles { + dAtA[i] = 0x28 + i++ + i = encodeVarintControl(dAtA, i, uint64(num)) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + dAtA[i] = 0x32 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ListNodesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListNodesResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Nodes) > 0 { + for _, msg := range m.Nodes { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *UpdateNodeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateNodeRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.NodeID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + } + if m.NodeVersion != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.NodeVersion.Size())) + n3, err := m.NodeVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if m.Spec != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Spec.Size())) + n4, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + return i, nil +} + +func (m *UpdateNodeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateNodeResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Node != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Node.Size())) + n5, err := m.Node.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + return i, nil +} + +func (m *RemoveNodeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveNodeRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.NodeID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + } + if m.Force { + dAtA[i] = 0x10 + i++ + if m.Force { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *RemoveNodeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveNodeResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *GetTaskRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetTaskRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.TaskID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.TaskID))) + i += copy(dAtA[i:], m.TaskID) + } + return i, nil +} + +func (m *GetTaskResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetTaskResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Task != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Task.Size())) + n6, err := m.Task.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} + +func (m *RemoveTaskRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveTaskRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.TaskID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.TaskID))) + i += copy(dAtA[i:], m.TaskID) + } + return i, nil +} + +func (m *RemoveTaskResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveTaskResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *ListTasksRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListTasksRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Filters != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Filters.Size())) + n7, err := m.Filters.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} + +func (m *ListTasksRequest_Filters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListTasksRequest_Filters) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x1a + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.ServiceIDs) > 0 { + for _, s := range m.ServiceIDs { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.NodeIDs) > 0 { + for _, s := range m.NodeIDs { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.DesiredStates) > 0 { + for _, num := range m.DesiredStates { + dAtA[i] = 0x30 + i++ + i = encodeVarintControl(dAtA, i, uint64(num)) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + dAtA[i] = 0x3a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.UpToDate { + dAtA[i] = 0x40 + i++ + if m.UpToDate { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Runtimes) > 0 { + for _, s := range m.Runtimes { + dAtA[i] = 0x4a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ListTasksResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListTasksResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Tasks) > 0 { + for _, msg := range m.Tasks { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CreateServiceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateServiceRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Spec != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Spec.Size())) + n8, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} + +func (m *CreateServiceResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateServiceResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Service != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Service.Size())) + n9, err := m.Service.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} + +func (m *GetServiceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetServiceRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ServiceID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ServiceID))) + i += copy(dAtA[i:], m.ServiceID) + } + if m.InsertDefaults { + dAtA[i] = 0x10 + i++ + if m.InsertDefaults { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *GetServiceResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetServiceResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Service != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Service.Size())) + n10, err := m.Service.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + return i, nil +} + +func (m *UpdateServiceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateServiceRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ServiceID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ServiceID))) + i += copy(dAtA[i:], m.ServiceID) + } + if m.ServiceVersion != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.ServiceVersion.Size())) + n11, err := m.ServiceVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + if m.Spec != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Spec.Size())) + n12, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + if m.Rollback != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Rollback)) + } + return i, nil +} + +func (m *UpdateServiceResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateServiceResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Service != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Service.Size())) + n13, err := m.Service.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + return i, nil +} + +func (m *RemoveServiceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveServiceRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ServiceID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ServiceID))) + i += copy(dAtA[i:], m.ServiceID) + } + return i, nil +} + +func (m *RemoveServiceResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveServiceResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *ListServicesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListServicesRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Filters != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Filters.Size())) + n14, err := m.Filters.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + return i, nil +} + +func (m *ListServicesRequest_Filters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListServicesRequest_Filters) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x1a + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Runtimes) > 0 { + for _, s := range m.Runtimes { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ListServicesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListServicesResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Services) > 0 { + for _, msg := range m.Services { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CreateNetworkRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateNetworkRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Spec != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Spec.Size())) + n15, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + return i, nil +} + +func (m *CreateNetworkResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateNetworkResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Network != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Network.Size())) + n16, err := m.Network.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + return i, nil +} + +func (m *GetNetworkRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetNetworkRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.NetworkID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.NetworkID))) + i += copy(dAtA[i:], m.NetworkID) + } + return i, nil +} + +func (m *GetNetworkResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetNetworkResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Network != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Network.Size())) + n17, err := m.Network.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + return i, nil +} + +func (m *RemoveNetworkRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveNetworkRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.NetworkID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.NetworkID))) + i += copy(dAtA[i:], m.NetworkID) + } + return i, nil +} + +func (m *RemoveNetworkResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveNetworkResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *ListNetworksRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListNetworksRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Filters != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Filters.Size())) + n18, err := m.Filters.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + return i, nil +} + +func (m *ListNetworksRequest_Filters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListNetworksRequest_Filters) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x1a + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ListNetworksResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListNetworksResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Networks) > 0 { + for _, msg := range m.Networks { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *GetClusterRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetClusterRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ClusterID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ClusterID))) + i += copy(dAtA[i:], m.ClusterID) + } + return i, nil +} + +func (m *GetClusterResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetClusterResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Cluster != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Cluster.Size())) + n19, err := m.Cluster.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + } + return i, nil +} + +func (m *ListClustersRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListClustersRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Filters != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Filters.Size())) + n20, err := m.Filters.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + } + return i, nil +} + +func (m *ListClustersRequest_Filters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListClustersRequest_Filters) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x1a + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ListClustersResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListClustersResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Clusters) > 0 { + for _, msg := range m.Clusters { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *KeyRotation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyRotation) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.WorkerJoinToken { + dAtA[i] = 0x8 + i++ + if m.WorkerJoinToken { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.ManagerJoinToken { + dAtA[i] = 0x10 + i++ + if m.ManagerJoinToken { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.ManagerUnlockKey { + dAtA[i] = 0x18 + i++ + if m.ManagerUnlockKey { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *UpdateClusterRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateClusterRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ClusterID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ClusterID))) + i += copy(dAtA[i:], m.ClusterID) + } + if m.ClusterVersion != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.ClusterVersion.Size())) + n21, err := m.ClusterVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 + } + if m.Spec != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Spec.Size())) + n22, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + } + dAtA[i] = 0x22 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Rotation.Size())) + n23, err := m.Rotation.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + return i, nil +} + +func (m *UpdateClusterResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateClusterResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Cluster != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Cluster.Size())) + n24, err := m.Cluster.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + } + return i, nil +} + +func (m *GetSecretRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetSecretRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SecretID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.SecretID))) + i += copy(dAtA[i:], m.SecretID) + } + return i, nil +} + +func (m *GetSecretResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetSecretResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Secret != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Secret.Size())) + n25, err := m.Secret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + } + return i, nil +} + +func (m *UpdateSecretRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateSecretRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SecretID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.SecretID))) + i += copy(dAtA[i:], m.SecretID) + } + if m.SecretVersion != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.SecretVersion.Size())) + n26, err := m.SecretVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + } + if m.Spec != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Spec.Size())) + n27, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + } + return i, nil +} + +func (m *UpdateSecretResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateSecretResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Secret != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Secret.Size())) + n28, err := m.Secret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + } + return i, nil +} + +func (m *ListSecretsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListSecretsRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Filters != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Filters.Size())) + n29, err := m.Filters.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + } + return i, nil +} + +func (m *ListSecretsRequest_Filters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListSecretsRequest_Filters) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x1a + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ListSecretsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListSecretsResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Secrets) > 0 { + for _, msg := range m.Secrets { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CreateSecretRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateSecretRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Spec != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Spec.Size())) + n30, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + } + return i, nil +} + +func (m *CreateSecretResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateSecretResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Secret != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Secret.Size())) + n31, err := m.Secret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 + } + return i, nil +} + +func (m *RemoveSecretRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveSecretRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SecretID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.SecretID))) + i += copy(dAtA[i:], m.SecretID) + } + return i, nil +} + +func (m *RemoveSecretResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveSecretResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *GetConfigRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetConfigRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ConfigID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ConfigID))) + i += copy(dAtA[i:], m.ConfigID) + } + return i, nil +} + +func (m *GetConfigResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetConfigResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Config != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Config.Size())) + n32, err := m.Config.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + } + return i, nil +} + +func (m *UpdateConfigRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateConfigRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ConfigID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ConfigID))) + i += copy(dAtA[i:], m.ConfigID) + } + if m.ConfigVersion != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.ConfigVersion.Size())) + n33, err := m.ConfigVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 + } + if m.Spec != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Spec.Size())) + n34, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 + } + return i, nil +} + +func (m *UpdateConfigResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateConfigResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Config != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Config.Size())) + n35, err := m.Config.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + } + return i, nil +} + +func (m *ListConfigsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListConfigsRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Filters != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Filters.Size())) + n36, err := m.Filters.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 + } + return i, nil +} + +func (m *ListConfigsRequest_Filters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListConfigsRequest_Filters) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x1a + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ListConfigsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListConfigsResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Configs) > 0 { + for _, msg := range m.Configs { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CreateConfigRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateConfigRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Spec != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Spec.Size())) + n37, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n37 + } + return i, nil +} + +func (m *CreateConfigResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateConfigResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Config != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Config.Size())) + n38, err := m.Config.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 + } + return i, nil +} + +func (m *RemoveConfigRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveConfigRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ConfigID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ConfigID))) + i += copy(dAtA[i:], m.ConfigID) + } + return i, nil +} + +func (m *RemoveConfigResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveConfigResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func encodeFixed64Control(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Control(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintControl(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +type raftProxyControlServer struct { + local ControlServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyControlServer(local ControlServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) ControlServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + s, ok := transport.StreamFromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := s.ServerTransport().RemoteAddr().String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyControlServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyControlServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyControlServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +func (p *raftProxyControlServer) GetNode(ctx context.Context, r *GetNodeRequest) (*GetNodeResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetNode(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).GetNode(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetNode(ctx, r) + } + return nil, err + } + return NewControlClient(conn).GetNode(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) ListNodes(ctx context.Context, r *ListNodesRequest) (*ListNodesResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ListNodes(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).ListNodes(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ListNodes(ctx, r) + } + return nil, err + } + return NewControlClient(conn).ListNodes(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) UpdateNode(ctx context.Context, r *UpdateNodeRequest) (*UpdateNodeResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.UpdateNode(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).UpdateNode(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.UpdateNode(ctx, r) + } + return nil, err + } + return NewControlClient(conn).UpdateNode(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) RemoveNode(ctx context.Context, r *RemoveNodeRequest) (*RemoveNodeResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.RemoveNode(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).RemoveNode(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.RemoveNode(ctx, r) + } + return nil, err + } + return NewControlClient(conn).RemoveNode(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) GetTask(ctx context.Context, r *GetTaskRequest) (*GetTaskResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetTask(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).GetTask(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetTask(ctx, r) + } + return nil, err + } + return NewControlClient(conn).GetTask(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) ListTasks(ctx context.Context, r *ListTasksRequest) (*ListTasksResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ListTasks(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).ListTasks(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ListTasks(ctx, r) + } + return nil, err + } + return NewControlClient(conn).ListTasks(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) RemoveTask(ctx context.Context, r *RemoveTaskRequest) (*RemoveTaskResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.RemoveTask(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).RemoveTask(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.RemoveTask(ctx, r) + } + return nil, err + } + return NewControlClient(conn).RemoveTask(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) GetService(ctx context.Context, r *GetServiceRequest) (*GetServiceResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetService(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).GetService(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetService(ctx, r) + } + return nil, err + } + return NewControlClient(conn).GetService(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) ListServices(ctx context.Context, r *ListServicesRequest) (*ListServicesResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ListServices(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).ListServices(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ListServices(ctx, r) + } + return nil, err + } + return NewControlClient(conn).ListServices(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) CreateService(ctx context.Context, r *CreateServiceRequest) (*CreateServiceResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.CreateService(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).CreateService(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.CreateService(ctx, r) + } + return nil, err + } + return NewControlClient(conn).CreateService(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) UpdateService(ctx context.Context, r *UpdateServiceRequest) (*UpdateServiceResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.UpdateService(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).UpdateService(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.UpdateService(ctx, r) + } + return nil, err + } + return NewControlClient(conn).UpdateService(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) RemoveService(ctx context.Context, r *RemoveServiceRequest) (*RemoveServiceResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.RemoveService(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).RemoveService(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.RemoveService(ctx, r) + } + return nil, err + } + return NewControlClient(conn).RemoveService(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) GetNetwork(ctx context.Context, r *GetNetworkRequest) (*GetNetworkResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetNetwork(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).GetNetwork(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetNetwork(ctx, r) + } + return nil, err + } + return NewControlClient(conn).GetNetwork(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) ListNetworks(ctx context.Context, r *ListNetworksRequest) (*ListNetworksResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ListNetworks(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).ListNetworks(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ListNetworks(ctx, r) + } + return nil, err + } + return NewControlClient(conn).ListNetworks(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) CreateNetwork(ctx context.Context, r *CreateNetworkRequest) (*CreateNetworkResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.CreateNetwork(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).CreateNetwork(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.CreateNetwork(ctx, r) + } + return nil, err + } + return NewControlClient(conn).CreateNetwork(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) RemoveNetwork(ctx context.Context, r *RemoveNetworkRequest) (*RemoveNetworkResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.RemoveNetwork(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).RemoveNetwork(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.RemoveNetwork(ctx, r) + } + return nil, err + } + return NewControlClient(conn).RemoveNetwork(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) GetCluster(ctx context.Context, r *GetClusterRequest) (*GetClusterResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetCluster(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).GetCluster(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetCluster(ctx, r) + } + return nil, err + } + return NewControlClient(conn).GetCluster(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) ListClusters(ctx context.Context, r *ListClustersRequest) (*ListClustersResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ListClusters(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).ListClusters(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ListClusters(ctx, r) + } + return nil, err + } + return NewControlClient(conn).ListClusters(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) UpdateCluster(ctx context.Context, r *UpdateClusterRequest) (*UpdateClusterResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.UpdateCluster(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).UpdateCluster(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.UpdateCluster(ctx, r) + } + return nil, err + } + return NewControlClient(conn).UpdateCluster(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) GetSecret(ctx context.Context, r *GetSecretRequest) (*GetSecretResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetSecret(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).GetSecret(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetSecret(ctx, r) + } + return nil, err + } + return NewControlClient(conn).GetSecret(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) UpdateSecret(ctx context.Context, r *UpdateSecretRequest) (*UpdateSecretResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.UpdateSecret(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).UpdateSecret(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.UpdateSecret(ctx, r) + } + return nil, err + } + return NewControlClient(conn).UpdateSecret(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) ListSecrets(ctx context.Context, r *ListSecretsRequest) (*ListSecretsResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ListSecrets(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).ListSecrets(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ListSecrets(ctx, r) + } + return nil, err + } + return NewControlClient(conn).ListSecrets(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) CreateSecret(ctx context.Context, r *CreateSecretRequest) (*CreateSecretResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.CreateSecret(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).CreateSecret(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.CreateSecret(ctx, r) + } + return nil, err + } + return NewControlClient(conn).CreateSecret(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) RemoveSecret(ctx context.Context, r *RemoveSecretRequest) (*RemoveSecretResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.RemoveSecret(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).RemoveSecret(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.RemoveSecret(ctx, r) + } + return nil, err + } + return NewControlClient(conn).RemoveSecret(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) GetConfig(ctx context.Context, r *GetConfigRequest) (*GetConfigResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetConfig(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).GetConfig(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetConfig(ctx, r) + } + return nil, err + } + return NewControlClient(conn).GetConfig(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) UpdateConfig(ctx context.Context, r *UpdateConfigRequest) (*UpdateConfigResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.UpdateConfig(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).UpdateConfig(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.UpdateConfig(ctx, r) + } + return nil, err + } + return NewControlClient(conn).UpdateConfig(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) ListConfigs(ctx context.Context, r *ListConfigsRequest) (*ListConfigsResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ListConfigs(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).ListConfigs(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ListConfigs(ctx, r) + } + return nil, err + } + return NewControlClient(conn).ListConfigs(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) CreateConfig(ctx context.Context, r *CreateConfigRequest) (*CreateConfigResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.CreateConfig(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).CreateConfig(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.CreateConfig(ctx, r) + } + return nil, err + } + return NewControlClient(conn).CreateConfig(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) RemoveConfig(ctx context.Context, r *RemoveConfigRequest) (*RemoveConfigResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.RemoveConfig(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).RemoveConfig(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.RemoveConfig(ctx, r) + } + return nil, err + } + return NewControlClient(conn).RemoveConfig(modCtx, r) + } + return resp, err +} + +func (m *GetNodeRequest) Size() (n int) { + var l int + _ = l + l = len(m.NodeID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetNodeResponse) Size() (n int) { + var l int + _ = l + if m.Node != nil { + l = m.Node.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListNodesRequest) Size() (n int) { + var l int + _ = l + if m.Filters != nil { + l = m.Filters.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListNodesRequest_Filters) Size() (n int) { + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if len(m.Memberships) > 0 { + for _, e := range m.Memberships { + n += 1 + sovControl(uint64(e)) + } + } + if len(m.Roles) > 0 { + for _, e := range m.Roles { + n += 1 + sovControl(uint64(e)) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *ListNodesResponse) Size() (n int) { + var l int + _ = l + if len(m.Nodes) > 0 { + for _, e := range m.Nodes { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *UpdateNodeRequest) Size() (n int) { + var l int + _ = l + l = len(m.NodeID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.NodeVersion != nil { + l = m.NodeVersion.Size() + n += 1 + l + sovControl(uint64(l)) + } + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *UpdateNodeResponse) Size() (n int) { + var l int + _ = l + if m.Node != nil { + l = m.Node.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveNodeRequest) Size() (n int) { + var l int + _ = l + l = len(m.NodeID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.Force { + n += 2 + } + return n +} + +func (m *RemoveNodeResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *GetTaskRequest) Size() (n int) { + var l int + _ = l + l = len(m.TaskID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetTaskResponse) Size() (n int) { + var l int + _ = l + if m.Task != nil { + l = m.Task.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveTaskRequest) Size() (n int) { + var l int + _ = l + l = len(m.TaskID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveTaskResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *ListTasksRequest) Size() (n int) { + var l int + _ = l + if m.Filters != nil { + l = m.Filters.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListTasksRequest_Filters) Size() (n int) { + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if len(m.ServiceIDs) > 0 { + for _, s := range m.ServiceIDs { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.NodeIDs) > 0 { + for _, s := range m.NodeIDs { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.DesiredStates) > 0 { + for _, e := range m.DesiredStates { + n += 1 + sovControl(uint64(e)) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if m.UpToDate { + n += 2 + } + if len(m.Runtimes) > 0 { + for _, s := range m.Runtimes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *ListTasksResponse) Size() (n int) { + var l int + _ = l + if len(m.Tasks) > 0 { + for _, e := range m.Tasks { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *CreateServiceRequest) Size() (n int) { + var l int + _ = l + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *CreateServiceResponse) Size() (n int) { + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetServiceRequest) Size() (n int) { + var l int + _ = l + l = len(m.ServiceID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.InsertDefaults { + n += 2 + } + return n +} + +func (m *GetServiceResponse) Size() (n int) { + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *UpdateServiceRequest) Size() (n int) { + var l int + _ = l + l = len(m.ServiceID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.ServiceVersion != nil { + l = m.ServiceVersion.Size() + n += 1 + l + sovControl(uint64(l)) + } + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovControl(uint64(l)) + } + if m.Rollback != 0 { + n += 1 + sovControl(uint64(m.Rollback)) + } + return n +} + +func (m *UpdateServiceResponse) Size() (n int) { + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveServiceRequest) Size() (n int) { + var l int + _ = l + l = len(m.ServiceID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveServiceResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *ListServicesRequest) Size() (n int) { + var l int + _ = l + if m.Filters != nil { + l = m.Filters.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListServicesRequest_Filters) Size() (n int) { + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Runtimes) > 0 { + for _, s := range m.Runtimes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *ListServicesResponse) Size() (n int) { + var l int + _ = l + if len(m.Services) > 0 { + for _, e := range m.Services { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *CreateNetworkRequest) Size() (n int) { + var l int + _ = l + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *CreateNetworkResponse) Size() (n int) { + var l int + _ = l + if m.Network != nil { + l = m.Network.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetNetworkRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + l = len(m.NetworkID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetNetworkResponse) Size() (n int) { + var l int + _ = l + if m.Network != nil { + l = m.Network.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveNetworkRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + l = len(m.NetworkID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveNetworkResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *ListNetworksRequest) Size() (n int) { + var l int + _ = l + if m.Filters != nil { + l = m.Filters.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListNetworksRequest_Filters) Size() (n int) { + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *ListNetworksResponse) Size() (n int) { + var l int + _ = l + if len(m.Networks) > 0 { + for _, e := range m.Networks { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *GetClusterRequest) Size() (n int) { + var l int + _ = l + l = len(m.ClusterID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetClusterResponse) Size() (n int) { + var l int + _ = l + if m.Cluster != nil { + l = m.Cluster.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListClustersRequest) Size() (n int) { + var l int + _ = l + if m.Filters != nil { + l = m.Filters.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListClustersRequest_Filters) Size() (n int) { + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *ListClustersResponse) Size() (n int) { + var l int + _ = l + if len(m.Clusters) > 0 { + for _, e := range m.Clusters { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *KeyRotation) Size() (n int) { + var l int + _ = l + if m.WorkerJoinToken { + n += 2 + } + if m.ManagerJoinToken { + n += 2 + } + if m.ManagerUnlockKey { + n += 2 + } + return n +} + +func (m *UpdateClusterRequest) Size() (n int) { + var l int + _ = l + l = len(m.ClusterID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.ClusterVersion != nil { + l = m.ClusterVersion.Size() + n += 1 + l + sovControl(uint64(l)) + } + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovControl(uint64(l)) + } + l = m.Rotation.Size() + n += 1 + l + sovControl(uint64(l)) + return n +} + +func (m *UpdateClusterResponse) Size() (n int) { + var l int + _ = l + if m.Cluster != nil { + l = m.Cluster.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetSecretRequest) Size() (n int) { + var l int + _ = l + l = len(m.SecretID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetSecretResponse) Size() (n int) { + var l int + _ = l + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *UpdateSecretRequest) Size() (n int) { + var l int + _ = l + l = len(m.SecretID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.SecretVersion != nil { + l = m.SecretVersion.Size() + n += 1 + l + sovControl(uint64(l)) + } + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *UpdateSecretResponse) Size() (n int) { + var l int + _ = l + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListSecretsRequest) Size() (n int) { + var l int + _ = l + if m.Filters != nil { + l = m.Filters.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListSecretsRequest_Filters) Size() (n int) { + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *ListSecretsResponse) Size() (n int) { + var l int + _ = l + if len(m.Secrets) > 0 { + for _, e := range m.Secrets { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *CreateSecretRequest) Size() (n int) { + var l int + _ = l + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *CreateSecretResponse) Size() (n int) { + var l int + _ = l + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveSecretRequest) Size() (n int) { + var l int + _ = l + l = len(m.SecretID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveSecretResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *GetConfigRequest) Size() (n int) { + var l int + _ = l + l = len(m.ConfigID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetConfigResponse) Size() (n int) { + var l int + _ = l + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *UpdateConfigRequest) Size() (n int) { + var l int + _ = l + l = len(m.ConfigID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.ConfigVersion != nil { + l = m.ConfigVersion.Size() + n += 1 + l + sovControl(uint64(l)) + } + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *UpdateConfigResponse) Size() (n int) { + var l int + _ = l + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListConfigsRequest) Size() (n int) { + var l int + _ = l + if m.Filters != nil { + l = m.Filters.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListConfigsRequest_Filters) Size() (n int) { + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *ListConfigsResponse) Size() (n int) { + var l int + _ = l + if len(m.Configs) > 0 { + for _, e := range m.Configs { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *CreateConfigRequest) Size() (n int) { + var l int + _ = l + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *CreateConfigResponse) Size() (n int) { + var l int + _ = l + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveConfigRequest) Size() (n int) { + var l int + _ = l + l = len(m.ConfigID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveConfigResponse) Size() (n int) { + var l int + _ = l + return n +} + +func sovControl(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozControl(x uint64) (n int) { + return sovControl(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *GetNodeRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetNodeRequest{`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `}`, + }, "") + return s +} +func (this *GetNodeResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetNodeResponse{`, + `Node:` + strings.Replace(fmt.Sprintf("%v", this.Node), "Node", "Node", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListNodesRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListNodesRequest{`, + `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "ListNodesRequest_Filters", "ListNodesRequest_Filters", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListNodesRequest_Filters) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&ListNodesRequest_Filters{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `IDPrefixes:` + fmt.Sprintf("%v", this.IDPrefixes) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Memberships:` + fmt.Sprintf("%v", this.Memberships) + `,`, + `Roles:` + fmt.Sprintf("%v", this.Roles) + `,`, + `NamePrefixes:` + fmt.Sprintf("%v", this.NamePrefixes) + `,`, + `}`, + }, "") + return s +} +func (this *ListNodesResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListNodesResponse{`, + `Nodes:` + strings.Replace(fmt.Sprintf("%v", this.Nodes), "Node", "Node", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateNodeRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateNodeRequest{`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `NodeVersion:` + strings.Replace(fmt.Sprintf("%v", this.NodeVersion), "Version", "Version", 1) + `,`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "NodeSpec", "NodeSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateNodeResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateNodeResponse{`, + `Node:` + strings.Replace(fmt.Sprintf("%v", this.Node), "Node", "Node", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveNodeRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveNodeRequest{`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `Force:` + fmt.Sprintf("%v", this.Force) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveNodeResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveNodeResponse{`, + `}`, + }, "") + return s +} +func (this *GetTaskRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetTaskRequest{`, + `TaskID:` + fmt.Sprintf("%v", this.TaskID) + `,`, + `}`, + }, "") + return s +} +func (this *GetTaskResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetTaskResponse{`, + `Task:` + strings.Replace(fmt.Sprintf("%v", this.Task), "Task", "Task", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveTaskRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveTaskRequest{`, + `TaskID:` + fmt.Sprintf("%v", this.TaskID) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveTaskResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveTaskResponse{`, + `}`, + }, "") + return s +} +func (this *ListTasksRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListTasksRequest{`, + `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "ListTasksRequest_Filters", "ListTasksRequest_Filters", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListTasksRequest_Filters) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&ListTasksRequest_Filters{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `IDPrefixes:` + fmt.Sprintf("%v", this.IDPrefixes) + `,`, + `Labels:` + mapStringForLabels + `,`, + `ServiceIDs:` + fmt.Sprintf("%v", this.ServiceIDs) + `,`, + `NodeIDs:` + fmt.Sprintf("%v", this.NodeIDs) + `,`, + `DesiredStates:` + fmt.Sprintf("%v", this.DesiredStates) + `,`, + `NamePrefixes:` + fmt.Sprintf("%v", this.NamePrefixes) + `,`, + `UpToDate:` + fmt.Sprintf("%v", this.UpToDate) + `,`, + `Runtimes:` + fmt.Sprintf("%v", this.Runtimes) + `,`, + `}`, + }, "") + return s +} +func (this *ListTasksResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListTasksResponse{`, + `Tasks:` + strings.Replace(fmt.Sprintf("%v", this.Tasks), "Task", "Task", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateServiceRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateServiceRequest{`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "ServiceSpec", "ServiceSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateServiceResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateServiceResponse{`, + `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "Service", "Service", 1) + `,`, + `}`, + }, "") + return s +} +func (this *GetServiceRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetServiceRequest{`, + `ServiceID:` + fmt.Sprintf("%v", this.ServiceID) + `,`, + `InsertDefaults:` + fmt.Sprintf("%v", this.InsertDefaults) + `,`, + `}`, + }, "") + return s +} +func (this *GetServiceResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetServiceResponse{`, + `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "Service", "Service", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateServiceRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateServiceRequest{`, + `ServiceID:` + fmt.Sprintf("%v", this.ServiceID) + `,`, + `ServiceVersion:` + strings.Replace(fmt.Sprintf("%v", this.ServiceVersion), "Version", "Version", 1) + `,`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "ServiceSpec", "ServiceSpec", 1) + `,`, + `Rollback:` + fmt.Sprintf("%v", this.Rollback) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateServiceResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateServiceResponse{`, + `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "Service", "Service", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveServiceRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveServiceRequest{`, + `ServiceID:` + fmt.Sprintf("%v", this.ServiceID) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveServiceResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveServiceResponse{`, + `}`, + }, "") + return s +} +func (this *ListServicesRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListServicesRequest{`, + `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "ListServicesRequest_Filters", "ListServicesRequest_Filters", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListServicesRequest_Filters) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&ListServicesRequest_Filters{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `IDPrefixes:` + fmt.Sprintf("%v", this.IDPrefixes) + `,`, + `Labels:` + mapStringForLabels + `,`, + `NamePrefixes:` + fmt.Sprintf("%v", this.NamePrefixes) + `,`, + `Runtimes:` + fmt.Sprintf("%v", this.Runtimes) + `,`, + `}`, + }, "") + return s +} +func (this *ListServicesResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListServicesResponse{`, + `Services:` + strings.Replace(fmt.Sprintf("%v", this.Services), "Service", "Service", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateNetworkRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateNetworkRequest{`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "NetworkSpec", "NetworkSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateNetworkResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateNetworkResponse{`, + `Network:` + strings.Replace(fmt.Sprintf("%v", this.Network), "Network", "Network", 1) + `,`, + `}`, + }, "") + return s +} +func (this *GetNetworkRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetNetworkRequest{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `NetworkID:` + fmt.Sprintf("%v", this.NetworkID) + `,`, + `}`, + }, "") + return s +} +func (this *GetNetworkResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetNetworkResponse{`, + `Network:` + strings.Replace(fmt.Sprintf("%v", this.Network), "Network", "Network", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveNetworkRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveNetworkRequest{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `NetworkID:` + fmt.Sprintf("%v", this.NetworkID) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveNetworkResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveNetworkResponse{`, + `}`, + }, "") + return s +} +func (this *ListNetworksRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListNetworksRequest{`, + `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "ListNetworksRequest_Filters", "ListNetworksRequest_Filters", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListNetworksRequest_Filters) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&ListNetworksRequest_Filters{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `IDPrefixes:` + fmt.Sprintf("%v", this.IDPrefixes) + `,`, + `Labels:` + mapStringForLabels + `,`, + `NamePrefixes:` + fmt.Sprintf("%v", this.NamePrefixes) + `,`, + `}`, + }, "") + return s +} +func (this *ListNetworksResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListNetworksResponse{`, + `Networks:` + strings.Replace(fmt.Sprintf("%v", this.Networks), "Network", "Network", 1) + `,`, + `}`, + }, "") + return s +} +func (this *GetClusterRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetClusterRequest{`, + `ClusterID:` + fmt.Sprintf("%v", this.ClusterID) + `,`, + `}`, + }, "") + return s +} +func (this *GetClusterResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetClusterResponse{`, + `Cluster:` + strings.Replace(fmt.Sprintf("%v", this.Cluster), "Cluster", "Cluster", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListClustersRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListClustersRequest{`, + `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "ListClustersRequest_Filters", "ListClustersRequest_Filters", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListClustersRequest_Filters) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&ListClustersRequest_Filters{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `IDPrefixes:` + fmt.Sprintf("%v", this.IDPrefixes) + `,`, + `Labels:` + mapStringForLabels + `,`, + `NamePrefixes:` + fmt.Sprintf("%v", this.NamePrefixes) + `,`, + `}`, + }, "") + return s +} +func (this *ListClustersResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListClustersResponse{`, + `Clusters:` + strings.Replace(fmt.Sprintf("%v", this.Clusters), "Cluster", "Cluster", 1) + `,`, + `}`, + }, "") + return s +} +func (this *KeyRotation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KeyRotation{`, + `WorkerJoinToken:` + fmt.Sprintf("%v", this.WorkerJoinToken) + `,`, + `ManagerJoinToken:` + fmt.Sprintf("%v", this.ManagerJoinToken) + `,`, + `ManagerUnlockKey:` + fmt.Sprintf("%v", this.ManagerUnlockKey) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateClusterRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateClusterRequest{`, + `ClusterID:` + fmt.Sprintf("%v", this.ClusterID) + `,`, + `ClusterVersion:` + strings.Replace(fmt.Sprintf("%v", this.ClusterVersion), "Version", "Version", 1) + `,`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "ClusterSpec", "ClusterSpec", 1) + `,`, + `Rotation:` + strings.Replace(strings.Replace(this.Rotation.String(), "KeyRotation", "KeyRotation", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateClusterResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateClusterResponse{`, + `Cluster:` + strings.Replace(fmt.Sprintf("%v", this.Cluster), "Cluster", "Cluster", 1) + `,`, + `}`, + }, "") + return s +} +func (this *GetSecretRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetSecretRequest{`, + `SecretID:` + fmt.Sprintf("%v", this.SecretID) + `,`, + `}`, + }, "") + return s +} +func (this *GetSecretResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetSecretResponse{`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "Secret", "Secret", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateSecretRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateSecretRequest{`, + `SecretID:` + fmt.Sprintf("%v", this.SecretID) + `,`, + `SecretVersion:` + strings.Replace(fmt.Sprintf("%v", this.SecretVersion), "Version", "Version", 1) + `,`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "SecretSpec", "SecretSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateSecretResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateSecretResponse{`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "Secret", "Secret", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListSecretsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListSecretsRequest{`, + `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "ListSecretsRequest_Filters", "ListSecretsRequest_Filters", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListSecretsRequest_Filters) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&ListSecretsRequest_Filters{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `IDPrefixes:` + fmt.Sprintf("%v", this.IDPrefixes) + `,`, + `Labels:` + mapStringForLabels + `,`, + `NamePrefixes:` + fmt.Sprintf("%v", this.NamePrefixes) + `,`, + `}`, + }, "") + return s +} +func (this *ListSecretsResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListSecretsResponse{`, + `Secrets:` + strings.Replace(fmt.Sprintf("%v", this.Secrets), "Secret", "Secret", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateSecretRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateSecretRequest{`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "SecretSpec", "SecretSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateSecretResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateSecretResponse{`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "Secret", "Secret", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveSecretRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveSecretRequest{`, + `SecretID:` + fmt.Sprintf("%v", this.SecretID) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveSecretResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveSecretResponse{`, + `}`, + }, "") + return s +} +func (this *GetConfigRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetConfigRequest{`, + `ConfigID:` + fmt.Sprintf("%v", this.ConfigID) + `,`, + `}`, + }, "") + return s +} +func (this *GetConfigResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetConfigResponse{`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "Config", "Config", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateConfigRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateConfigRequest{`, + `ConfigID:` + fmt.Sprintf("%v", this.ConfigID) + `,`, + `ConfigVersion:` + strings.Replace(fmt.Sprintf("%v", this.ConfigVersion), "Version", "Version", 1) + `,`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "ConfigSpec", "ConfigSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateConfigResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateConfigResponse{`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "Config", "Config", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListConfigsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListConfigsRequest{`, + `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "ListConfigsRequest_Filters", "ListConfigsRequest_Filters", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListConfigsRequest_Filters) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&ListConfigsRequest_Filters{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `IDPrefixes:` + fmt.Sprintf("%v", this.IDPrefixes) + `,`, + `Labels:` + mapStringForLabels + `,`, + `NamePrefixes:` + fmt.Sprintf("%v", this.NamePrefixes) + `,`, + `}`, + }, "") + return s +} +func (this *ListConfigsResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListConfigsResponse{`, + `Configs:` + strings.Replace(fmt.Sprintf("%v", this.Configs), "Config", "Config", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateConfigRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateConfigRequest{`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "ConfigSpec", "ConfigSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateConfigResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateConfigResponse{`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "Config", "Config", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveConfigRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveConfigRequest{`, + `ConfigID:` + fmt.Sprintf("%v", this.ConfigID) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveConfigResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveConfigResponse{`, + `}`, + }, "") + return s +} +func valueToStringControl(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *GetNodeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetNodeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetNodeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetNodeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetNodeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetNodeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Node == nil { + m.Node = &Node{} + } + if err := m.Node.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListNodesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListNodesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListNodesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filters == nil { + m.Filters = &ListNodesRequest_Filters{} + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListNodesRequest_Filters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Filters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Filters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IDPrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IDPrefixes = append(m.IDPrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Labels == nil { + m.Labels = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Labels[mapkey] = mapvalue + } else { + var mapvalue string + m.Labels[mapkey] = mapvalue + } + iNdEx = postIndex + case 4: + if wireType == 0 { + var v NodeSpec_Membership + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (NodeSpec_Membership(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Memberships = append(m.Memberships, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v NodeSpec_Membership + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (NodeSpec_Membership(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Memberships = append(m.Memberships, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Memberships", wireType) + } + case 5: + if wireType == 0 { + var v NodeRole + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (NodeRole(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Roles = append(m.Roles, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v NodeRole + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (NodeRole(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Roles = append(m.Roles, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamePrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NamePrefixes = append(m.NamePrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListNodesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListNodesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListNodesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Nodes = append(m.Nodes, &Node{}) + if err := m.Nodes[len(m.Nodes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateNodeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateNodeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateNodeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeVersion == nil { + m.NodeVersion = &Version{} + } + if err := m.NodeVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &NodeSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateNodeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateNodeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateNodeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Node == nil { + m.Node = &Node{} + } + if err := m.Node.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveNodeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveNodeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveNodeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Force = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveNodeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveNodeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveNodeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetTaskRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetTaskRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetTaskResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetTaskResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetTaskResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Task == nil { + m.Task = &Task{} + } + if err := m.Task.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveTaskRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveTaskRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveTaskResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveTaskResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveTaskResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListTasksRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListTasksRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListTasksRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filters == nil { + m.Filters = &ListTasksRequest_Filters{} + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListTasksRequest_Filters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Filters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Filters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IDPrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IDPrefixes = append(m.IDPrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Labels == nil { + m.Labels = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Labels[mapkey] = mapvalue + } else { + var mapvalue string + m.Labels[mapkey] = mapvalue + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceIDs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceIDs = append(m.ServiceIDs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeIDs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeIDs = append(m.NodeIDs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType == 0 { + var v TaskState + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (TaskState(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DesiredStates = append(m.DesiredStates, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v TaskState + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (TaskState(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DesiredStates = append(m.DesiredStates, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredStates", wireType) + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamePrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NamePrefixes = append(m.NamePrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpToDate", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.UpToDate = bool(v != 0) + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Runtimes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Runtimes = append(m.Runtimes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListTasksResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListTasksResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListTasksResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tasks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tasks = append(m.Tasks, &Task{}) + if err := m.Tasks[len(m.Tasks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateServiceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateServiceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &ServiceSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateServiceResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateServiceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Service == nil { + m.Service = &Service{} + } + if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetServiceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetServiceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InsertDefaults", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.InsertDefaults = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetServiceResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetServiceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Service == nil { + m.Service = &Service{} + } + if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateServiceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateServiceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ServiceVersion == nil { + m.ServiceVersion = &Version{} + } + if err := m.ServiceVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &ServiceSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Rollback", wireType) + } + m.Rollback = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Rollback |= (UpdateServiceRequest_Rollback(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateServiceResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateServiceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Service == nil { + m.Service = &Service{} + } + if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveServiceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveServiceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveServiceResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveServiceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListServicesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListServicesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListServicesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filters == nil { + m.Filters = &ListServicesRequest_Filters{} + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListServicesRequest_Filters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Filters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Filters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IDPrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IDPrefixes = append(m.IDPrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Labels == nil { + m.Labels = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Labels[mapkey] = mapvalue + } else { + var mapvalue string + m.Labels[mapkey] = mapvalue + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamePrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NamePrefixes = append(m.NamePrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Runtimes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Runtimes = append(m.Runtimes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListServicesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListServicesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListServicesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Services", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Services = append(m.Services, &Service{}) + if err := m.Services[len(m.Services)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateNetworkRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateNetworkRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateNetworkRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &NetworkSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateNetworkResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateNetworkResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateNetworkResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Network == nil { + m.Network = &Network{} + } + if err := m.Network.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetNetworkRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetNetworkRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetNetworkRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetworkID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NetworkID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetNetworkResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetNetworkResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetNetworkResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Network == nil { + m.Network = &Network{} + } + if err := m.Network.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveNetworkRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveNetworkRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveNetworkRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetworkID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NetworkID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveNetworkResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveNetworkResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveNetworkResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListNetworksRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListNetworksRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListNetworksRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filters == nil { + m.Filters = &ListNetworksRequest_Filters{} + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListNetworksRequest_Filters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Filters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Filters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IDPrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IDPrefixes = append(m.IDPrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Labels == nil { + m.Labels = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Labels[mapkey] = mapvalue + } else { + var mapvalue string + m.Labels[mapkey] = mapvalue + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamePrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NamePrefixes = append(m.NamePrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListNetworksResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListNetworksResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListNetworksResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Networks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Networks = append(m.Networks, &Network{}) + if err := m.Networks[len(m.Networks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetClusterRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetClusterRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetClusterRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetClusterResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetClusterResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetClusterResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Cluster == nil { + m.Cluster = &Cluster{} + } + if err := m.Cluster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListClustersRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListClustersRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListClustersRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filters == nil { + m.Filters = &ListClustersRequest_Filters{} + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListClustersRequest_Filters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Filters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Filters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IDPrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IDPrefixes = append(m.IDPrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Labels == nil { + m.Labels = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Labels[mapkey] = mapvalue + } else { + var mapvalue string + m.Labels[mapkey] = mapvalue + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamePrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NamePrefixes = append(m.NamePrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListClustersResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListClustersResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListClustersResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Clusters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Clusters = append(m.Clusters, &Cluster{}) + if err := m.Clusters[len(m.Clusters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KeyRotation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyRotation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyRotation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkerJoinToken", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.WorkerJoinToken = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ManagerJoinToken", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ManagerJoinToken = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ManagerUnlockKey", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ManagerUnlockKey = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateClusterRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateClusterRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateClusterRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClusterVersion == nil { + m.ClusterVersion = &Version{} + } + if err := m.ClusterVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &ClusterSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rotation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Rotation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateClusterResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateClusterResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateClusterResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Cluster == nil { + m.Cluster = &Cluster{} + } + if err := m.Cluster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetSecretRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetSecretRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetSecretRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetSecretResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetSecretResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetSecretResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Secret == nil { + m.Secret = &Secret{} + } + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateSecretRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateSecretRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateSecretRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretVersion == nil { + m.SecretVersion = &Version{} + } + if err := m.SecretVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &SecretSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateSecretResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateSecretResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateSecretResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Secret == nil { + m.Secret = &Secret{} + } + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListSecretsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListSecretsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListSecretsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filters == nil { + m.Filters = &ListSecretsRequest_Filters{} + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListSecretsRequest_Filters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Filters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Filters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IDPrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IDPrefixes = append(m.IDPrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Labels == nil { + m.Labels = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Labels[mapkey] = mapvalue + } else { + var mapvalue string + m.Labels[mapkey] = mapvalue + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamePrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NamePrefixes = append(m.NamePrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListSecretsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListSecretsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListSecretsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secrets = append(m.Secrets, &Secret{}) + if err := m.Secrets[len(m.Secrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateSecretRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateSecretRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateSecretRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &SecretSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateSecretResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateSecretResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateSecretResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Secret == nil { + m.Secret = &Secret{} + } + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveSecretRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveSecretRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveSecretRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveSecretResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveSecretResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveSecretResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetConfigRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetConfigRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConfigID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetConfigResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetConfigResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &Config{} + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateConfigRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateConfigRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConfigID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigVersion == nil { + m.ConfigVersion = &Version{} + } + if err := m.ConfigVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &ConfigSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateConfigResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateConfigResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &Config{} + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListConfigsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListConfigsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListConfigsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filters == nil { + m.Filters = &ListConfigsRequest_Filters{} + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListConfigsRequest_Filters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Filters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Filters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IDPrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IDPrefixes = append(m.IDPrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Labels == nil { + m.Labels = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Labels[mapkey] = mapvalue + } else { + var mapvalue string + m.Labels[mapkey] = mapvalue + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamePrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NamePrefixes = append(m.NamePrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListConfigsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListConfigsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListConfigsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Configs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Configs = append(m.Configs, &Config{}) + if err := m.Configs[len(m.Configs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateConfigRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateConfigRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &ConfigSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateConfigResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateConfigResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &Config{} + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveConfigRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveConfigRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConfigID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveConfigResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveConfigResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipControl(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowControl + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowControl + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowControl + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthControl + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowControl + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipControl(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthControl = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowControl = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("github.com/docker/swarmkit/api/control.proto", fileDescriptorControl) } + +var fileDescriptorControl = []byte{ + // 2137 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0x4f, 0x73, 0x1b, 0x49, + 0x15, 0xb7, 0xfe, 0xd8, 0x92, 0x9f, 0x6c, 0xd9, 0xee, 0x38, 0xa0, 0x52, 0x82, 0x9d, 0x9a, 0x90, + 0x44, 0xd9, 0x32, 0x12, 0xab, 0xb0, 0x6c, 0x58, 0x8a, 0x3f, 0x6b, 0x3b, 0x9b, 0xd5, 0x7a, 0xe3, + 0xa4, 0xc6, 0xc9, 0x16, 0x37, 0x95, 0x2c, 0xb5, 0xbd, 0x13, 0xc9, 0x1a, 0x31, 0x33, 0xf2, 0xae, + 0x8b, 0x0b, 0x50, 0xcb, 0x81, 0x0f, 0x40, 0x15, 0x57, 0xae, 0x1c, 0x38, 0x70, 0xe2, 0xc0, 0x07, + 0x48, 0x71, 0xe2, 0xc8, 0xc9, 0xb0, 0xaa, 0x82, 0xe2, 0xc4, 0x67, 0xa0, 0xba, 0xfb, 0xf5, 0xfc, + 0x53, 0xcf, 0x8c, 0x24, 0xab, 0xca, 0x39, 0x59, 0xd3, 0xf3, 0x7b, 0xfd, 0x5e, 0xf7, 0xfb, 0xf5, + 0x6f, 0xba, 0x5f, 0x1b, 0x76, 0x4e, 0x0d, 0xe7, 0xf3, 0xe1, 0x71, 0xb5, 0x6d, 0x9e, 0xd5, 0x3a, + 0x66, 0xbb, 0x4b, 0xad, 0x9a, 0xfd, 0x45, 0xcb, 0x3a, 0xeb, 0x1a, 0x4e, 0xad, 0x35, 0x30, 0x6a, + 0x6d, 0xb3, 0xef, 0x58, 0x66, 0xaf, 0x3a, 0xb0, 0x4c, 0xc7, 0x24, 0x44, 0x40, 0xaa, 0x12, 0x52, + 0x3d, 0x7f, 0xb7, 0xfc, 0x4e, 0x42, 0x0f, 0xf6, 0x80, 0xb6, 0x6d, 0x61, 0x5f, 0x4e, 0xf2, 0x66, + 0x1e, 0xbf, 0xa6, 0x6d, 0x47, 0xa2, 0x93, 0x7a, 0x76, 0x2e, 0x06, 0x54, 0x62, 0x37, 0x4f, 0xcd, + 0x53, 0x93, 0xff, 0xac, 0xb1, 0x5f, 0xd8, 0xfa, 0x7e, 0x4c, 0x0f, 0x1c, 0x71, 0x3c, 0x3c, 0xa9, + 0x0d, 0x7a, 0xc3, 0x53, 0xa3, 0x8f, 0x7f, 0x84, 0xa1, 0xf6, 0x1e, 0x14, 0x9f, 0x52, 0xe7, 0xd0, + 0xec, 0x50, 0x9d, 0xfe, 0x7c, 0x48, 0x6d, 0x87, 0xdc, 0x85, 0x5c, 0xdf, 0xec, 0xd0, 0xa6, 0xd1, + 0x29, 0xa5, 0xee, 0xa4, 0x2a, 0xcb, 0xbb, 0x30, 0xba, 0xdc, 0x5e, 0x62, 0x88, 0xc6, 0xbe, 0xbe, + 0xc4, 0x5e, 0x35, 0x3a, 0xda, 0x4f, 0x60, 0xcd, 0x35, 0xb3, 0x07, 0x66, 0xdf, 0xa6, 0x64, 0x07, + 0xb2, 0xec, 0x25, 0x37, 0x2a, 0xd4, 0x4b, 0xd5, 0xf1, 0x19, 0xac, 0x72, 0x3c, 0x47, 0x69, 0xff, + 0xc9, 0xc0, 0xfa, 0xa7, 0x86, 0xcd, 0xbb, 0xb0, 0xa5, 0xeb, 0x8f, 0x20, 0x77, 0x62, 0xf4, 0x1c, + 0x6a, 0xd9, 0xd8, 0xcb, 0x8e, 0xaa, 0x97, 0xb0, 0x59, 0xf5, 0x23, 0x61, 0xa3, 0x4b, 0xe3, 0xf2, + 0x6f, 0x33, 0x90, 0xc3, 0x46, 0xb2, 0x09, 0x8b, 0xfd, 0xd6, 0x19, 0x65, 0x3d, 0x66, 0x2a, 0xcb, + 0xba, 0x78, 0x20, 0x35, 0x28, 0x18, 0x9d, 0xe6, 0xc0, 0xa2, 0x27, 0xc6, 0x97, 0xd4, 0x2e, 0xa5, + 0xd9, 0xbb, 0xdd, 0xe2, 0xe8, 0x72, 0x1b, 0x1a, 0xfb, 0x2f, 0xb0, 0x55, 0x07, 0xa3, 0x23, 0x7f, + 0x93, 0x17, 0xb0, 0xd4, 0x6b, 0x1d, 0xd3, 0x9e, 0x5d, 0xca, 0xdc, 0xc9, 0x54, 0x0a, 0xf5, 0xc7, + 0xd3, 0x44, 0x56, 0xfd, 0x94, 0x9b, 0x3e, 0xe9, 0x3b, 0xd6, 0x85, 0x8e, 0xfd, 0x90, 0x67, 0x50, + 0x38, 0xa3, 0x67, 0xc7, 0xd4, 0xb2, 0x3f, 0x37, 0x06, 0x76, 0x29, 0x7b, 0x27, 0x53, 0x29, 0xd6, + 0x1f, 0x44, 0x4d, 0xdb, 0xd1, 0x80, 0xb6, 0xab, 0xcf, 0x5c, 0xfc, 0x6e, 0x7a, 0x7d, 0x41, 0xf7, + 0xdb, 0x93, 0xef, 0xc3, 0xa2, 0x65, 0xf6, 0xa8, 0x5d, 0x5a, 0xe4, 0x1d, 0xdd, 0x8e, 0x9c, 0x7f, + 0xb3, 0x47, 0xb9, 0xb5, 0x80, 0x93, 0xbb, 0xb0, 0xca, 0xa6, 0xc4, 0x9b, 0x8b, 0x25, 0x3e, 0x4f, + 0x2b, 0xac, 0x51, 0x8e, 0xbe, 0xfc, 0x03, 0x28, 0xf8, 0x86, 0x40, 0xd6, 0x21, 0xd3, 0xa5, 0x17, + 0x82, 0x1e, 0x3a, 0xfb, 0xc9, 0x66, 0xf9, 0xbc, 0xd5, 0x1b, 0xd2, 0x52, 0x9a, 0xb7, 0x89, 0x87, + 0x0f, 0xd2, 0x8f, 0x53, 0xda, 0x1e, 0x6c, 0xf8, 0xa6, 0x05, 0xb9, 0x52, 0x85, 0x45, 0xc6, 0x02, + 0x91, 0x94, 0x38, 0xb2, 0x08, 0x98, 0xf6, 0xc7, 0x14, 0x6c, 0xbc, 0x1a, 0x74, 0x5a, 0x0e, 0x9d, + 0x96, 0xa9, 0xe4, 0xc7, 0xb0, 0xc2, 0x41, 0xe7, 0xd4, 0xb2, 0x0d, 0xb3, 0xcf, 0x03, 0x2c, 0xd4, + 0x6f, 0xa9, 0x3c, 0x7e, 0x26, 0x20, 0x7a, 0x81, 0x19, 0xe0, 0x03, 0xf9, 0x2e, 0x64, 0xd9, 0xc2, + 0x2e, 0x65, 0xb8, 0xdd, 0xed, 0xb8, 0xfc, 0xe8, 0x1c, 0xa9, 0xed, 0x02, 0xf1, 0xc7, 0x3a, 0xd3, + 0xf2, 0x38, 0x84, 0x0d, 0x9d, 0x9e, 0x99, 0xe7, 0xd3, 0x8f, 0x77, 0x13, 0x16, 0x4f, 0x4c, 0xab, + 0x2d, 0x32, 0x91, 0xd7, 0xc5, 0x83, 0xb6, 0x09, 0xc4, 0xdf, 0x9f, 0x88, 0x09, 0x17, 0xff, 0xcb, + 0x96, 0xdd, 0xf5, 0xb9, 0x70, 0x5a, 0x76, 0x37, 0xe4, 0x82, 0x21, 0x98, 0x0b, 0xf6, 0xca, 0x5d, + 0xfc, 0xc2, 0xcc, 0x1b, 0x1d, 0x7b, 0x19, 0x37, 0x3a, 0x8e, 0xe7, 0x28, 0xed, 0xb1, 0x1c, 0xdd, + 0xd4, 0xae, 0xdd, 0x71, 0xf8, 0xbd, 0x6b, 0x7f, 0xcd, 0x0a, 0x31, 0x61, 0x8d, 0x33, 0x88, 0x89, + 0xdf, 0x6c, 0x5c, 0x4c, 0xfe, 0x79, 0x8d, 0x62, 0xa2, 0x8a, 0x4c, 0x29, 0x26, 0x35, 0x28, 0xd8, + 0xd4, 0x3a, 0x37, 0xda, 0x8c, 0x1d, 0x42, 0x4c, 0x30, 0x84, 0x23, 0xd1, 0xdc, 0xd8, 0xb7, 0x75, + 0x40, 0x48, 0xa3, 0x63, 0x93, 0xfb, 0x90, 0x47, 0x2e, 0x09, 0xc5, 0x58, 0xde, 0x2d, 0x8c, 0x2e, + 0xb7, 0x73, 0x82, 0x4c, 0xb6, 0x9e, 0x13, 0x6c, 0xb2, 0xc9, 0xc7, 0x50, 0xec, 0x50, 0xdb, 0xb0, + 0x68, 0xa7, 0x69, 0x3b, 0x2d, 0x07, 0xf5, 0xa1, 0x58, 0xff, 0x56, 0x54, 0x8a, 0x8f, 0x18, 0x8a, + 0x0b, 0xcc, 0x2a, 0x1a, 0xf2, 0x16, 0x85, 0xd0, 0xe4, 0xc6, 0x85, 0x86, 0xdc, 0x06, 0x18, 0x0e, + 0x9a, 0x8e, 0xd9, 0x64, 0xeb, 0xa7, 0x94, 0xe7, 0x14, 0xce, 0x0f, 0x07, 0x2f, 0xcd, 0xfd, 0x96, + 0x43, 0x49, 0x19, 0xf2, 0xd6, 0xb0, 0xef, 0x18, 0x2c, 0x03, 0xcb, 0xdc, 0xda, 0x7d, 0x9e, 0x83, + 0x44, 0xe1, 0x64, 0x7b, 0x12, 0xc5, 0x38, 0x17, 0x2b, 0x51, 0x9c, 0x84, 0x02, 0xa6, 0x1d, 0xc0, + 0xe6, 0x9e, 0x45, 0x5b, 0x0e, 0xc5, 0x09, 0x97, 0x34, 0x7c, 0x84, 0xfa, 0x21, 0x38, 0xb8, 0xad, + 0xea, 0x06, 0x2d, 0x7c, 0x12, 0x72, 0x08, 0x37, 0x43, 0x9d, 0x61, 0x54, 0xef, 0x41, 0x0e, 0x93, + 0x88, 0x1d, 0xde, 0x8a, 0xe9, 0x50, 0x97, 0x58, 0xed, 0x35, 0x6c, 0x3c, 0xa5, 0x4e, 0x28, 0xb2, + 0x1d, 0x00, 0x8f, 0x33, 0xb8, 0xe6, 0x56, 0x47, 0x97, 0xdb, 0xcb, 0x2e, 0x65, 0xf4, 0x65, 0x97, + 0x31, 0xe4, 0x01, 0xac, 0x19, 0x7d, 0x9b, 0x5a, 0x4e, 0xb3, 0x43, 0x4f, 0x5a, 0xc3, 0x9e, 0x63, + 0xa3, 0xc2, 0x14, 0x45, 0xf3, 0x3e, 0xb6, 0x6a, 0x07, 0x40, 0xfc, 0xbe, 0xae, 0x16, 0xf8, 0x9f, + 0xd3, 0xb0, 0x29, 0xc4, 0xf4, 0x4a, 0xc1, 0xef, 0xc3, 0x9a, 0x44, 0x4f, 0xf1, 0x1d, 0x28, 0xa2, + 0x8d, 0xfc, 0x14, 0x3c, 0x0a, 0x7c, 0x0a, 0x26, 0x4b, 0x25, 0x79, 0x06, 0x79, 0xcb, 0xec, 0xf5, + 0x8e, 0x5b, 0xed, 0x6e, 0x29, 0x7b, 0x27, 0x55, 0x29, 0xd6, 0xdf, 0x55, 0x19, 0xaa, 0x06, 0x59, + 0xd5, 0xd1, 0x50, 0x77, 0xbb, 0xd0, 0x34, 0xc8, 0xcb, 0x56, 0x92, 0x87, 0xec, 0xe1, 0xf3, 0xc3, + 0x27, 0xeb, 0x0b, 0x64, 0x05, 0xf2, 0x2f, 0xf4, 0x27, 0x9f, 0x35, 0x9e, 0xbf, 0x3a, 0x5a, 0x4f, + 0x31, 0xf6, 0x84, 0xba, 0xbb, 0x5a, 0x12, 0xf6, 0x61, 0x53, 0x88, 0xee, 0x55, 0x72, 0xa0, 0x7d, + 0x13, 0x6e, 0x86, 0x7a, 0x41, 0xf5, 0xfe, 0x2a, 0x03, 0x37, 0xd8, 0xfa, 0xc3, 0x76, 0x57, 0xc0, + 0x1b, 0x61, 0x01, 0xaf, 0x45, 0xc9, 0x64, 0xc8, 0x72, 0x5c, 0xc3, 0xff, 0x90, 0x9e, 0xbb, 0x86, + 0x1f, 0x85, 0x34, 0xfc, 0x87, 0x53, 0x06, 0xa7, 0x94, 0xf1, 0x31, 0x8d, 0xcc, 0x2a, 0x34, 0xd2, + 0xaf, 0x82, 0x8b, 0xf3, 0x53, 0xc1, 0xe7, 0xb0, 0x19, 0x0c, 0x17, 0x49, 0xf3, 0x3e, 0xe4, 0x31, + 0x89, 0x52, 0x0b, 0x63, 0x59, 0xe3, 0x82, 0x3d, 0x45, 0x3c, 0xa4, 0xce, 0x17, 0xa6, 0xd5, 0x9d, + 0x42, 0x11, 0xd1, 0x42, 0xa5, 0x88, 0x6e, 0x67, 0x1e, 0xa7, 0xfb, 0xa2, 0x29, 0x8e, 0xd3, 0xd2, + 0x4a, 0x62, 0xb5, 0x57, 0x5c, 0x11, 0x43, 0x91, 0x11, 0xc8, 0xb2, 0x99, 0xc6, 0xf9, 0xe2, 0xbf, + 0x19, 0xc9, 0xd1, 0x86, 0x91, 0x3c, 0xed, 0x91, 0x1c, 0x6d, 0x19, 0xc9, 0x11, 0xd0, 0xe8, 0xa0, + 0xf8, 0xcd, 0x29, 0xc6, 0x9f, 0xc9, 0x75, 0x37, 0xf7, 0x30, 0xdd, 0xb5, 0x18, 0x8a, 0x54, 0xfb, + 0x6f, 0x5a, 0xac, 0x45, 0x6c, 0x9f, 0x61, 0x2d, 0x86, 0x2c, 0xc7, 0xd7, 0xe2, 0x6f, 0xae, 0x71, + 0x2d, 0x46, 0x04, 0x37, 0xf3, 0x5a, 0x9c, 0xc3, 0x7a, 0xf3, 0x42, 0xf2, 0xd6, 0x1b, 0x26, 0x2a, + 0x76, 0xbd, 0xc9, 0xcc, 0xb9, 0x60, 0xed, 0x43, 0x4e, 0xe9, 0xbd, 0xde, 0xd0, 0x76, 0xa8, 0xe5, + 0xd3, 0xe8, 0xb6, 0x68, 0x09, 0x69, 0x34, 0xe2, 0x18, 0x2f, 0x10, 0xe0, 0xd2, 0xd7, 0xed, 0xc2, + 0xa3, 0x2f, 0x42, 0xe2, 0xe8, 0x2b, 0xad, 0x24, 0xd6, 0xe5, 0x12, 0xbe, 0x98, 0x81, 0x4b, 0x21, + 0xcb, 0xb7, 0x8b, 0x4b, 0x11, 0xc1, 0x5d, 0x27, 0x97, 0xbc, 0x90, 0x3c, 0x2e, 0x61, 0x36, 0x62, + 0xb9, 0x24, 0x53, 0xe7, 0x82, 0xb5, 0xdf, 0xa5, 0xa0, 0x70, 0x40, 0x2f, 0x74, 0xd3, 0x69, 0x39, + 0x6c, 0xeb, 0xf3, 0x0e, 0x6c, 0x30, 0x92, 0x51, 0xab, 0xf9, 0xda, 0x34, 0xfa, 0x4d, 0xc7, 0xec, + 0xd2, 0x3e, 0x0f, 0x2d, 0xaf, 0xaf, 0x89, 0x17, 0x9f, 0x98, 0x46, 0xff, 0x25, 0x6b, 0x26, 0x3b, + 0x40, 0xce, 0x5a, 0xfd, 0xd6, 0x69, 0x10, 0x2c, 0x36, 0x8b, 0xeb, 0xf8, 0x46, 0x89, 0x1e, 0xf6, + 0x7b, 0x66, 0xbb, 0xdb, 0x64, 0xa3, 0xce, 0x04, 0xd0, 0xaf, 0xf8, 0x8b, 0x03, 0x7a, 0xa1, 0xfd, + 0xda, 0xdd, 0x0f, 0x5e, 0x85, 0xe7, 0x6c, 0x3f, 0x28, 0xd1, 0xd3, 0xec, 0x07, 0xd1, 0x66, 0x8a, + 0xfd, 0x20, 0x7a, 0xf7, 0xed, 0x07, 0x3f, 0x64, 0xfb, 0x41, 0x31, 0xab, 0x7c, 0x3f, 0x18, 0x61, + 0xe8, 0x9b, 0xfc, 0xdd, 0xec, 0x9b, 0xcb, 0xed, 0x05, 0xdd, 0x35, 0xf3, 0xf6, 0x77, 0x73, 0x5a, + 0xa8, 0x3f, 0x82, 0x75, 0xbe, 0x63, 0x6f, 0x5b, 0xd4, 0x91, 0xf3, 0xf9, 0x10, 0x96, 0x6d, 0xde, + 0xe0, 0x4d, 0xe7, 0xca, 0xe8, 0x72, 0x3b, 0x2f, 0x50, 0x8d, 0x7d, 0xf6, 0x9d, 0xe7, 0xbf, 0x3a, + 0xda, 0x53, 0x3c, 0x5c, 0x08, 0x73, 0x0c, 0xa5, 0x0e, 0x4b, 0x02, 0x80, 0x91, 0x94, 0xd5, 0x7b, + 0x06, 0x6e, 0x83, 0x48, 0xed, 0x2f, 0x29, 0xb8, 0x21, 0x37, 0xae, 0xb3, 0xc5, 0x42, 0x76, 0xa1, + 0x88, 0xd0, 0x29, 0xf2, 0xba, 0x2a, 0x4c, 0x64, 0x5a, 0xeb, 0x81, 0xb4, 0x6e, 0x45, 0x07, 0xee, + 0xdb, 0x9e, 0x7c, 0xe2, 0x1d, 0x53, 0xae, 0x3c, 0x0d, 0xff, 0x4e, 0x03, 0x11, 0x3b, 0x31, 0xf6, + 0xe8, 0xca, 0xe6, 0xc7, 0x61, 0xd9, 0xac, 0x46, 0xef, 0x38, 0xfd, 0x86, 0xe3, 0xaa, 0xf9, 0xd5, + 0xfc, 0x55, 0x53, 0x0f, 0xa9, 0xe6, 0x07, 0xd3, 0xc5, 0x76, 0x2d, 0xa2, 0x79, 0x20, 0x8f, 0x1d, + 0x18, 0x11, 0xa6, 0xec, 0x7b, 0xec, 0x90, 0xc4, 0x9b, 0x50, 0x32, 0xe3, 0x72, 0x26, 0xa1, 0x5a, + 0x03, 0x6e, 0xc8, 0x13, 0xbb, 0x9f, 0xba, 0xf5, 0xc0, 0x5e, 0x77, 0x62, 0x2e, 0x05, 0xbb, 0xba, + 0x02, 0x97, 0x7e, 0x0a, 0x37, 0xe4, 0xa1, 0x6b, 0xc6, 0xd5, 0xfd, 0x0d, 0xef, 0xf0, 0xe7, 0x8f, + 0x06, 0x45, 0x63, 0xcf, 0xec, 0x9f, 0x18, 0xa7, 0xbe, 0x6e, 0xdb, 0xbc, 0x21, 0xd4, 0xad, 0x40, + 0xb1, 0x6e, 0xc5, 0x6b, 0x57, 0x34, 0xa4, 0xb9, 0x37, 0x42, 0x01, 0x88, 0x1b, 0x21, 0xda, 0x20, + 0xd2, 0x27, 0x1a, 0xb3, 0xc6, 0xc2, 0x44, 0x03, 0xa1, 0xd3, 0x88, 0x86, 0x30, 0x99, 0x42, 0x34, + 0x84, 0x67, 0x95, 0x68, 0xcc, 0x61, 0x1a, 0xa4, 0x68, 0x88, 0xe6, 0x19, 0x44, 0x23, 0x68, 0xf8, + 0x76, 0x89, 0x86, 0x3a, 0xb6, 0xeb, 0x14, 0x0d, 0x37, 0x22, 0x4f, 0x34, 0x44, 0x22, 0x62, 0x45, + 0x03, 0x73, 0x26, 0xa1, 0x9e, 0x68, 0x04, 0xa9, 0x3b, 0x81, 0x68, 0xa8, 0xb8, 0x14, 0xec, 0xea, + 0x0a, 0x5c, 0x72, 0x45, 0x63, 0xe6, 0xd5, 0xed, 0x8a, 0x46, 0x30, 0x9a, 0xfa, 0xaf, 0x6e, 0x41, + 0x6e, 0x4f, 0x5c, 0xb4, 0x12, 0x03, 0x72, 0x78, 0x85, 0x48, 0x34, 0x55, 0x50, 0xc1, 0x6b, 0xc9, + 0xf2, 0xdd, 0x58, 0x0c, 0x8a, 0xd2, 0xcd, 0xbf, 0xfd, 0xe9, 0x7f, 0xbf, 0x4f, 0xaf, 0xc1, 0x2a, + 0x07, 0x7d, 0x07, 0xb7, 0x8f, 0xc4, 0x84, 0x65, 0xf7, 0x0e, 0x8a, 0x7c, 0x7b, 0x92, 0x9b, 0xbb, + 0xf2, 0xbd, 0x04, 0x54, 0xbc, 0x43, 0x0b, 0xc0, 0xbb, 0x02, 0x22, 0xf7, 0xa2, 0x0b, 0x7e, 0xfe, + 0x11, 0xde, 0x4f, 0x82, 0x25, 0xfa, 0xf4, 0xae, 0x78, 0xd4, 0x3e, 0xc7, 0xae, 0x94, 0xd4, 0x3e, + 0x15, 0x37, 0x45, 0x11, 0x3e, 0x45, 0x0e, 0x5f, 0xb6, 0xec, 0x6e, 0x64, 0x0e, 0x7d, 0x57, 0x3c, + 0x91, 0x39, 0x0c, 0x5c, 0xe6, 0xc4, 0xe7, 0x90, 0x17, 0xe9, 0xa3, 0x73, 0xe8, 0xbf, 0x30, 0x89, + 0xce, 0x61, 0xa0, 0xd2, 0x9f, 0x38, 0x9f, 0x7c, 0x78, 0x31, 0xf3, 0xe9, 0x1f, 0xe1, 0xfd, 0x24, + 0x58, 0xa2, 0x4f, 0xaf, 0x76, 0xae, 0xf6, 0x39, 0x56, 0xc7, 0x57, 0xfb, 0x1c, 0x2f, 0xc1, 0x47, + 0xf9, 0xfc, 0x12, 0x56, 0xfc, 0x75, 0x3f, 0xf2, 0x60, 0xc2, 0x42, 0x66, 0xb9, 0x92, 0x0c, 0x8c, + 0xf7, 0xfc, 0x0b, 0x58, 0x0d, 0xdc, 0x72, 0x10, 0x65, 0x8f, 0xaa, 0x5b, 0x95, 0xf2, 0xc3, 0x09, + 0x90, 0x89, 0xce, 0x03, 0x45, 0x72, 0xb5, 0x73, 0x55, 0x59, 0x5e, 0xed, 0x5c, 0x59, 0x71, 0x8f, + 0x71, 0x1e, 0xa8, 0x85, 0xab, 0x9d, 0xab, 0x8a, 0xee, 0x6a, 0xe7, 0xea, 0xc2, 0x7a, 0x2c, 0xc9, + 0xb0, 0x7e, 0x14, 0x49, 0xb2, 0x60, 0xcd, 0x31, 0x92, 0x64, 0xe1, 0x02, 0x62, 0x3c, 0xc9, 0x64, + 0xb1, 0x2b, 0x9a, 0x64, 0xa1, 0x0a, 0x5d, 0x34, 0xc9, 0xc2, 0x75, 0xb3, 0x44, 0x92, 0xc9, 0x01, + 0xc7, 0x90, 0x2c, 0x34, 0xe6, 0x87, 0x13, 0x20, 0x27, 0xcc, 0x73, 0xac, 0x73, 0x55, 0x91, 0x37, + 0x2e, 0xcf, 0x13, 0x3a, 0x17, 0x79, 0xc6, 0xd3, 0x7e, 0x64, 0x9e, 0x83, 0x75, 0x94, 0xc8, 0x3c, + 0x87, 0x4a, 0x0d, 0x09, 0x79, 0x96, 0x85, 0xa8, 0xe8, 0x3c, 0x87, 0xaa, 0x67, 0xd1, 0x79, 0x0e, + 0xd7, 0xb4, 0x12, 0xd7, 0xb3, 0x1c, 0x70, 0xcc, 0x7a, 0x0e, 0x8d, 0xf9, 0xe1, 0x04, 0xc8, 0xc4, + 0x8f, 0x93, 0x5b, 0x02, 0x51, 0x7f, 0x9c, 0xc2, 0x05, 0x96, 0xf2, 0xbd, 0x04, 0x54, 0xe2, 0x3c, + 0xfb, 0xeb, 0x0d, 0xea, 0x79, 0x56, 0xd4, 0x52, 0xca, 0x95, 0x64, 0x60, 0xbc, 0xe7, 0x21, 0x14, + 0x7c, 0xa7, 0x66, 0x72, 0x7f, 0xb2, 0x83, 0x7e, 0xf9, 0x41, 0x22, 0x2e, 0x71, 0xc0, 0xfe, 0x43, + 0xb1, 0x7a, 0xc0, 0x8a, 0x13, 0x78, 0xb9, 0x92, 0x0c, 0x4c, 0xf4, 0xec, 0x3f, 0x00, 0xab, 0x3d, + 0x2b, 0x0e, 0xd9, 0xe5, 0x4a, 0x32, 0x70, 0x12, 0x56, 0x89, 0x2d, 0x74, 0x24, 0xab, 0x02, 0x7b, + 0xf4, 0x48, 0x56, 0x05, 0xf7, 0xe1, 0x89, 0xac, 0x42, 0x9f, 0x31, 0xac, 0x0a, 0xba, 0xad, 0x24, + 0x03, 0x27, 0x62, 0x15, 0x1e, 0xab, 0xa2, 0x59, 0x15, 0x3c, 0x09, 0x46, 0xb3, 0x2a, 0x74, 0x3e, + 0x4b, 0x64, 0x55, 0xdc, 0x80, 0x15, 0x47, 0xb4, 0x38, 0x56, 0x4d, 0x3c, 0xd5, 0xfe, 0x13, 0x52, + 0x1c, 0xab, 0x26, 0xf0, 0xac, 0x3a, 0x6c, 0x45, 0x78, 0xde, 0x2d, 0xbd, 0xf9, 0x7a, 0x6b, 0xe1, + 0x1f, 0x5f, 0x6f, 0x2d, 0xfc, 0x72, 0xb4, 0x95, 0x7a, 0x33, 0xda, 0x4a, 0xfd, 0x7d, 0xb4, 0x95, + 0xfa, 0xd7, 0x68, 0x2b, 0x75, 0xbc, 0xc4, 0xff, 0x25, 0xf4, 0xd1, 0xff, 0x03, 0x00, 0x00, 0xff, + 0xff, 0x47, 0x18, 0x50, 0x6c, 0x2b, 0x2b, 0x00, 0x00, +} diff --git a/vendor/github.com/docker/swarmkit/api/deepcopy/copy.go b/vendor/github.com/docker/swarmkit/api/deepcopy/copy.go new file mode 100644 index 0000000000..64e370ca85 --- /dev/null +++ b/vendor/github.com/docker/swarmkit/api/deepcopy/copy.go @@ -0,0 +1,53 @@ +package deepcopy + +import ( + "fmt" + "time" + + "github.com/gogo/protobuf/types" +) + +// CopierFrom can be implemented if an object knows how to copy another into itself. +type CopierFrom interface { + // Copy takes the fields from src and copies them into the target object. + // + // Calling this method with a nil receiver or a nil src may panic. + CopyFrom(src interface{}) +} + +// Copy copies src into dst. dst and src must have the same type. +// +// If the type has a copy function defined, it will be used. +// +// Default implementations for builtin types and well known protobuf types may +// be provided. +// +// If the copy cannot be performed, this function will panic. Make sure to test +// types that use this function. +func Copy(dst, src interface{}) { + switch dst := dst.(type) { + case *types.Any: + src := src.(*types.Any) + dst.TypeUrl = src.TypeUrl + if src.Value != nil { + dst.Value = make([]byte, len(src.Value)) + copy(dst.Value, src.Value) + } else { + dst.Value = nil + } + case *types.Duration: + src := src.(*types.Duration) + *dst = *src + case *time.Duration: + src := src.(*time.Duration) + *dst = *src + case *types.Timestamp: + src := src.(*types.Timestamp) + *dst = *src + case CopierFrom: + dst.CopyFrom(src) + default: + panic(fmt.Sprintf("Copy for %T not implemented", dst)) + } + +} diff --git a/vendor/github.com/docker/swarmkit/api/dispatcher.pb.go b/vendor/github.com/docker/swarmkit/api/dispatcher.pb.go new file mode 100644 index 0000000000..b22f69ef68 --- /dev/null +++ b/vendor/github.com/docker/swarmkit/api/dispatcher.pb.go @@ -0,0 +1,3851 @@ +// Code generated by protoc-gen-gogo. +// source: github.com/docker/swarmkit/api/dispatcher.proto +// DO NOT EDIT! + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import _ "github.com/docker/swarmkit/protobuf/plugin" +import _ "github.com/gogo/protobuf/types" + +import time "time" + +import github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +import github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + +import raftselector "github.com/docker/swarmkit/manager/raftselector" +import codes "google.golang.org/grpc/codes" +import status "google.golang.org/grpc/status" +import metadata "google.golang.org/grpc/metadata" +import transport "google.golang.org/grpc/transport" +import rafttime "time" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +type AssignmentChange_AssignmentAction int32 + +const ( + AssignmentChange_AssignmentActionUpdate AssignmentChange_AssignmentAction = 0 + AssignmentChange_AssignmentActionRemove AssignmentChange_AssignmentAction = 1 +) + +var AssignmentChange_AssignmentAction_name = map[int32]string{ + 0: "UPDATE", + 1: "REMOVE", +} +var AssignmentChange_AssignmentAction_value = map[string]int32{ + "UPDATE": 0, + "REMOVE": 1, +} + +func (x AssignmentChange_AssignmentAction) String() string { + return proto.EnumName(AssignmentChange_AssignmentAction_name, int32(x)) +} +func (AssignmentChange_AssignmentAction) EnumDescriptor() ([]byte, []int) { + return fileDescriptorDispatcher, []int{10, 0} +} + +// AssignmentType specifies whether this assignment message carries +// the full state, or is an update to an existing state. +type AssignmentsMessage_Type int32 + +const ( + AssignmentsMessage_COMPLETE AssignmentsMessage_Type = 0 + AssignmentsMessage_INCREMENTAL AssignmentsMessage_Type = 1 +) + +var AssignmentsMessage_Type_name = map[int32]string{ + 0: "COMPLETE", + 1: "INCREMENTAL", +} +var AssignmentsMessage_Type_value = map[string]int32{ + "COMPLETE": 0, + "INCREMENTAL": 1, +} + +func (x AssignmentsMessage_Type) String() string { + return proto.EnumName(AssignmentsMessage_Type_name, int32(x)) +} +func (AssignmentsMessage_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptorDispatcher, []int{11, 0} +} + +// SessionRequest starts a session. +type SessionRequest struct { + Description *NodeDescription `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` + // SessionID can be provided to attempt resuming an existing session. If the + // SessionID is empty or invalid, a new SessionID will be assigned. + // + // See SessionMessage.SessionID for details. + SessionID string `protobuf:"bytes,2,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` +} + +func (m *SessionRequest) Reset() { *m = SessionRequest{} } +func (*SessionRequest) ProtoMessage() {} +func (*SessionRequest) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{0} } + +// SessionMessage instructs an agent on various actions as part of the current +// session. An agent should act immediately on the contents. +type SessionMessage struct { + // SessionID is allocated after a successful registration. It should be + // used on all RPC calls after registration. A dispatcher may choose to + // change the SessionID, at which time an agent must re-register and obtain + // a new one. + // + // All Dispatcher calls after register should include the SessionID. If the + // Dispatcher so chooses, it may reject the call with an InvalidArgument + // error code, at which time the agent should call Register to start a new + // session. + // + // As a rule, once an agent has a SessionID, it should never save it to + // disk or try to otherwise reuse. If the agent loses its SessionID, it + // must start a new session through a call to Register. A Dispatcher may + // choose to reuse the SessionID, if it sees fit, but it is not advised. + // + // The actual implementation of the SessionID is Dispatcher specific and + // should be treated as opaque by agents. + // + // From a Dispatcher perspective, there are many ways to use the SessionID + // to ensure uniqueness of a set of client RPC calls. One method is to keep + // the SessionID unique to every call to Register in a single Dispatcher + // instance. This ensures that the SessionID represents the unique + // session from a single Agent to Manager. If the Agent restarts, we + // allocate a new session, since the restarted Agent is not aware of the + // new SessionID. + // + // The most compelling use case is to support duplicate node detection. If + // one clones a virtual machine, including certificate material, two nodes + // may end up with the same identity. This can also happen if two identical + // agent processes are coming from the same node. If the SessionID is + // replicated through the cluster, we can immediately detect the condition + // and address it. + // + // Extending from the case above, we can actually detect a compromised + // identity. Coupled with provisions to rebuild node identity, we can ban + // the compromised node identity and have the nodes re-authenticate and + // build a new identity. At this time, an administrator can then + // re-authorize the compromised nodes, if it was a mistake or ensure that a + // misbehaved node can no longer connect to the cluster. + // + // We considered placing this field in a GRPC header. Because this is a + // critical feature of the protocol, we thought it should be represented + // directly in the RPC message set. + SessionID string `protobuf:"bytes,1,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` + // Node identifies the registering node. + Node *Node `protobuf:"bytes,2,opt,name=node" json:"node,omitempty"` + // Managers provides a weight list of alternative dispatchers + Managers []*WeightedPeer `protobuf:"bytes,3,rep,name=managers" json:"managers,omitempty"` + // Symmetric encryption key distributed by the lead manager. Used by agents + // for securing network bootstrapping and communication. + NetworkBootstrapKeys []*EncryptionKey `protobuf:"bytes,4,rep,name=network_bootstrap_keys,json=networkBootstrapKeys" json:"network_bootstrap_keys,omitempty"` + // Which root certificates to trust + RootCA []byte `protobuf:"bytes,5,opt,name=RootCA,proto3" json:"RootCA,omitempty"` +} + +func (m *SessionMessage) Reset() { *m = SessionMessage{} } +func (*SessionMessage) ProtoMessage() {} +func (*SessionMessage) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{1} } + +// HeartbeatRequest provides identifying properties for a single heartbeat. +type HeartbeatRequest struct { + SessionID string `protobuf:"bytes,1,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` +} + +func (m *HeartbeatRequest) Reset() { *m = HeartbeatRequest{} } +func (*HeartbeatRequest) ProtoMessage() {} +func (*HeartbeatRequest) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{2} } + +type HeartbeatResponse struct { + // Period is the duration to wait before sending the next heartbeat. + // Well-behaved agents should update this on every heartbeat round trip. + Period time.Duration `protobuf:"bytes,1,opt,name=period,stdduration" json:"period"` +} + +func (m *HeartbeatResponse) Reset() { *m = HeartbeatResponse{} } +func (*HeartbeatResponse) ProtoMessage() {} +func (*HeartbeatResponse) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{3} } + +type UpdateTaskStatusRequest struct { + // Tasks should contain all statuses for running tasks. Only the status + // field must be set. The spec is not required. + SessionID string `protobuf:"bytes,1,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` + Updates []*UpdateTaskStatusRequest_TaskStatusUpdate `protobuf:"bytes,3,rep,name=updates" json:"updates,omitempty"` +} + +func (m *UpdateTaskStatusRequest) Reset() { *m = UpdateTaskStatusRequest{} } +func (*UpdateTaskStatusRequest) ProtoMessage() {} +func (*UpdateTaskStatusRequest) Descriptor() ([]byte, []int) { + return fileDescriptorDispatcher, []int{4} +} + +type UpdateTaskStatusRequest_TaskStatusUpdate struct { + TaskID string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + Status *TaskStatus `protobuf:"bytes,2,opt,name=status" json:"status,omitempty"` +} + +func (m *UpdateTaskStatusRequest_TaskStatusUpdate) Reset() { + *m = UpdateTaskStatusRequest_TaskStatusUpdate{} +} +func (*UpdateTaskStatusRequest_TaskStatusUpdate) ProtoMessage() {} +func (*UpdateTaskStatusRequest_TaskStatusUpdate) Descriptor() ([]byte, []int) { + return fileDescriptorDispatcher, []int{4, 0} +} + +type UpdateTaskStatusResponse struct { +} + +func (m *UpdateTaskStatusResponse) Reset() { *m = UpdateTaskStatusResponse{} } +func (*UpdateTaskStatusResponse) ProtoMessage() {} +func (*UpdateTaskStatusResponse) Descriptor() ([]byte, []int) { + return fileDescriptorDispatcher, []int{5} +} + +type TasksRequest struct { + SessionID string `protobuf:"bytes,1,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` +} + +func (m *TasksRequest) Reset() { *m = TasksRequest{} } +func (*TasksRequest) ProtoMessage() {} +func (*TasksRequest) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{6} } + +type TasksMessage struct { + // Tasks is the set of tasks that should be running on the node. + // Tasks outside of this set running on the node should be terminated. + Tasks []*Task `protobuf:"bytes,1,rep,name=tasks" json:"tasks,omitempty"` +} + +func (m *TasksMessage) Reset() { *m = TasksMessage{} } +func (*TasksMessage) ProtoMessage() {} +func (*TasksMessage) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{7} } + +type AssignmentsRequest struct { + SessionID string `protobuf:"bytes,1,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` +} + +func (m *AssignmentsRequest) Reset() { *m = AssignmentsRequest{} } +func (*AssignmentsRequest) ProtoMessage() {} +func (*AssignmentsRequest) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{8} } + +type Assignment struct { + // Types that are valid to be assigned to Item: + // *Assignment_Task + // *Assignment_Secret + // *Assignment_Config + Item isAssignment_Item `protobuf_oneof:"item"` +} + +func (m *Assignment) Reset() { *m = Assignment{} } +func (*Assignment) ProtoMessage() {} +func (*Assignment) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{9} } + +type isAssignment_Item interface { + isAssignment_Item() + MarshalTo([]byte) (int, error) + Size() int +} + +type Assignment_Task struct { + Task *Task `protobuf:"bytes,1,opt,name=task,oneof"` +} +type Assignment_Secret struct { + Secret *Secret `protobuf:"bytes,2,opt,name=secret,oneof"` +} +type Assignment_Config struct { + Config *Config `protobuf:"bytes,3,opt,name=config,oneof"` +} + +func (*Assignment_Task) isAssignment_Item() {} +func (*Assignment_Secret) isAssignment_Item() {} +func (*Assignment_Config) isAssignment_Item() {} + +func (m *Assignment) GetItem() isAssignment_Item { + if m != nil { + return m.Item + } + return nil +} + +func (m *Assignment) GetTask() *Task { + if x, ok := m.GetItem().(*Assignment_Task); ok { + return x.Task + } + return nil +} + +func (m *Assignment) GetSecret() *Secret { + if x, ok := m.GetItem().(*Assignment_Secret); ok { + return x.Secret + } + return nil +} + +func (m *Assignment) GetConfig() *Config { + if x, ok := m.GetItem().(*Assignment_Config); ok { + return x.Config + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Assignment) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Assignment_OneofMarshaler, _Assignment_OneofUnmarshaler, _Assignment_OneofSizer, []interface{}{ + (*Assignment_Task)(nil), + (*Assignment_Secret)(nil), + (*Assignment_Config)(nil), + } +} + +func _Assignment_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Assignment) + // item + switch x := m.Item.(type) { + case *Assignment_Task: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Task); err != nil { + return err + } + case *Assignment_Secret: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Secret); err != nil { + return err + } + case *Assignment_Config: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Config); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Assignment.Item has unexpected type %T", x) + } + return nil +} + +func _Assignment_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Assignment) + switch tag { + case 1: // item.task + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Task) + err := b.DecodeMessage(msg) + m.Item = &Assignment_Task{msg} + return true, err + case 2: // item.secret + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Secret) + err := b.DecodeMessage(msg) + m.Item = &Assignment_Secret{msg} + return true, err + case 3: // item.config + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Config) + err := b.DecodeMessage(msg) + m.Item = &Assignment_Config{msg} + return true, err + default: + return false, nil + } +} + +func _Assignment_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Assignment) + // item + switch x := m.Item.(type) { + case *Assignment_Task: + s := proto.Size(x.Task) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Assignment_Secret: + s := proto.Size(x.Secret) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Assignment_Config: + s := proto.Size(x.Config) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type AssignmentChange struct { + Assignment *Assignment `protobuf:"bytes,1,opt,name=assignment" json:"assignment,omitempty"` + Action AssignmentChange_AssignmentAction `protobuf:"varint,2,opt,name=action,proto3,enum=docker.swarmkit.v1.AssignmentChange_AssignmentAction" json:"action,omitempty"` +} + +func (m *AssignmentChange) Reset() { *m = AssignmentChange{} } +func (*AssignmentChange) ProtoMessage() {} +func (*AssignmentChange) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{10} } + +type AssignmentsMessage struct { + Type AssignmentsMessage_Type `protobuf:"varint,1,opt,name=type,proto3,enum=docker.swarmkit.v1.AssignmentsMessage_Type" json:"type,omitempty"` + // AppliesTo references the previous ResultsIn value, to chain + // incremental updates together. For the first update in a stream, + // AppliesTo is empty. If AppliesTo does not match the previously + // received ResultsIn, the consumer of the stream should start a new + // Assignments stream to re-sync. + AppliesTo string `protobuf:"bytes,2,opt,name=applies_to,json=appliesTo,proto3" json:"applies_to,omitempty"` + // ResultsIn identifies the result of this assignments message, to + // match against the next message's AppliesTo value and protect + // against missed messages. + ResultsIn string `protobuf:"bytes,3,opt,name=results_in,json=resultsIn,proto3" json:"results_in,omitempty"` + // AssignmentChange is a set of changes to apply on this node. + Changes []*AssignmentChange `protobuf:"bytes,4,rep,name=changes" json:"changes,omitempty"` +} + +func (m *AssignmentsMessage) Reset() { *m = AssignmentsMessage{} } +func (*AssignmentsMessage) ProtoMessage() {} +func (*AssignmentsMessage) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{11} } + +func init() { + proto.RegisterType((*SessionRequest)(nil), "docker.swarmkit.v1.SessionRequest") + proto.RegisterType((*SessionMessage)(nil), "docker.swarmkit.v1.SessionMessage") + proto.RegisterType((*HeartbeatRequest)(nil), "docker.swarmkit.v1.HeartbeatRequest") + proto.RegisterType((*HeartbeatResponse)(nil), "docker.swarmkit.v1.HeartbeatResponse") + proto.RegisterType((*UpdateTaskStatusRequest)(nil), "docker.swarmkit.v1.UpdateTaskStatusRequest") + proto.RegisterType((*UpdateTaskStatusRequest_TaskStatusUpdate)(nil), "docker.swarmkit.v1.UpdateTaskStatusRequest.TaskStatusUpdate") + proto.RegisterType((*UpdateTaskStatusResponse)(nil), "docker.swarmkit.v1.UpdateTaskStatusResponse") + proto.RegisterType((*TasksRequest)(nil), "docker.swarmkit.v1.TasksRequest") + proto.RegisterType((*TasksMessage)(nil), "docker.swarmkit.v1.TasksMessage") + proto.RegisterType((*AssignmentsRequest)(nil), "docker.swarmkit.v1.AssignmentsRequest") + proto.RegisterType((*Assignment)(nil), "docker.swarmkit.v1.Assignment") + proto.RegisterType((*AssignmentChange)(nil), "docker.swarmkit.v1.AssignmentChange") + proto.RegisterType((*AssignmentsMessage)(nil), "docker.swarmkit.v1.AssignmentsMessage") + proto.RegisterEnum("docker.swarmkit.v1.AssignmentChange_AssignmentAction", AssignmentChange_AssignmentAction_name, AssignmentChange_AssignmentAction_value) + proto.RegisterEnum("docker.swarmkit.v1.AssignmentsMessage_Type", AssignmentsMessage_Type_name, AssignmentsMessage_Type_value) +} + +type authenticatedWrapperDispatcherServer struct { + local DispatcherServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperDispatcherServer(local DispatcherServer, authorize func(context.Context, []string) error) DispatcherServer { + return &authenticatedWrapperDispatcherServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperDispatcherServer) Session(r *SessionRequest, stream Dispatcher_SessionServer) error { + + if err := p.authorize(stream.Context(), []string{"swarm-worker", "swarm-manager"}); err != nil { + return err + } + return p.local.Session(r, stream) +} + +func (p *authenticatedWrapperDispatcherServer) Heartbeat(ctx context.Context, r *HeartbeatRequest) (*HeartbeatResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-worker", "swarm-manager"}); err != nil { + return nil, err + } + return p.local.Heartbeat(ctx, r) +} + +func (p *authenticatedWrapperDispatcherServer) UpdateTaskStatus(ctx context.Context, r *UpdateTaskStatusRequest) (*UpdateTaskStatusResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-worker", "swarm-manager"}); err != nil { + return nil, err + } + return p.local.UpdateTaskStatus(ctx, r) +} + +func (p *authenticatedWrapperDispatcherServer) Tasks(r *TasksRequest, stream Dispatcher_TasksServer) error { + + if err := p.authorize(stream.Context(), []string{"swarm-worker", "swarm-manager"}); err != nil { + return err + } + return p.local.Tasks(r, stream) +} + +func (p *authenticatedWrapperDispatcherServer) Assignments(r *AssignmentsRequest, stream Dispatcher_AssignmentsServer) error { + + if err := p.authorize(stream.Context(), []string{"swarm-worker", "swarm-manager"}); err != nil { + return err + } + return p.local.Assignments(r, stream) +} + +func (m *SessionRequest) Copy() *SessionRequest { + if m == nil { + return nil + } + o := &SessionRequest{} + o.CopyFrom(m) + return o +} + +func (m *SessionRequest) CopyFrom(src interface{}) { + + o := src.(*SessionRequest) + *m = *o + if o.Description != nil { + m.Description = &NodeDescription{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Description, o.Description) + } +} + +func (m *SessionMessage) Copy() *SessionMessage { + if m == nil { + return nil + } + o := &SessionMessage{} + o.CopyFrom(m) + return o +} + +func (m *SessionMessage) CopyFrom(src interface{}) { + + o := src.(*SessionMessage) + *m = *o + if o.Node != nil { + m.Node = &Node{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Node, o.Node) + } + if o.Managers != nil { + m.Managers = make([]*WeightedPeer, len(o.Managers)) + for i := range m.Managers { + m.Managers[i] = &WeightedPeer{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Managers[i], o.Managers[i]) + } + } + + if o.NetworkBootstrapKeys != nil { + m.NetworkBootstrapKeys = make([]*EncryptionKey, len(o.NetworkBootstrapKeys)) + for i := range m.NetworkBootstrapKeys { + m.NetworkBootstrapKeys[i] = &EncryptionKey{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.NetworkBootstrapKeys[i], o.NetworkBootstrapKeys[i]) + } + } + + if o.RootCA != nil { + m.RootCA = make([]byte, len(o.RootCA)) + copy(m.RootCA, o.RootCA) + } +} + +func (m *HeartbeatRequest) Copy() *HeartbeatRequest { + if m == nil { + return nil + } + o := &HeartbeatRequest{} + o.CopyFrom(m) + return o +} + +func (m *HeartbeatRequest) CopyFrom(src interface{}) { + + o := src.(*HeartbeatRequest) + *m = *o +} + +func (m *HeartbeatResponse) Copy() *HeartbeatResponse { + if m == nil { + return nil + } + o := &HeartbeatResponse{} + o.CopyFrom(m) + return o +} + +func (m *HeartbeatResponse) CopyFrom(src interface{}) { + + o := src.(*HeartbeatResponse) + *m = *o + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Period, &o.Period) +} + +func (m *UpdateTaskStatusRequest) Copy() *UpdateTaskStatusRequest { + if m == nil { + return nil + } + o := &UpdateTaskStatusRequest{} + o.CopyFrom(m) + return o +} + +func (m *UpdateTaskStatusRequest) CopyFrom(src interface{}) { + + o := src.(*UpdateTaskStatusRequest) + *m = *o + if o.Updates != nil { + m.Updates = make([]*UpdateTaskStatusRequest_TaskStatusUpdate, len(o.Updates)) + for i := range m.Updates { + m.Updates[i] = &UpdateTaskStatusRequest_TaskStatusUpdate{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Updates[i], o.Updates[i]) + } + } + +} + +func (m *UpdateTaskStatusRequest_TaskStatusUpdate) Copy() *UpdateTaskStatusRequest_TaskStatusUpdate { + if m == nil { + return nil + } + o := &UpdateTaskStatusRequest_TaskStatusUpdate{} + o.CopyFrom(m) + return o +} + +func (m *UpdateTaskStatusRequest_TaskStatusUpdate) CopyFrom(src interface{}) { + + o := src.(*UpdateTaskStatusRequest_TaskStatusUpdate) + *m = *o + if o.Status != nil { + m.Status = &TaskStatus{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Status, o.Status) + } +} + +func (m *UpdateTaskStatusResponse) Copy() *UpdateTaskStatusResponse { + if m == nil { + return nil + } + o := &UpdateTaskStatusResponse{} + o.CopyFrom(m) + return o +} + +func (m *UpdateTaskStatusResponse) CopyFrom(src interface{}) {} +func (m *TasksRequest) Copy() *TasksRequest { + if m == nil { + return nil + } + o := &TasksRequest{} + o.CopyFrom(m) + return o +} + +func (m *TasksRequest) CopyFrom(src interface{}) { + + o := src.(*TasksRequest) + *m = *o +} + +func (m *TasksMessage) Copy() *TasksMessage { + if m == nil { + return nil + } + o := &TasksMessage{} + o.CopyFrom(m) + return o +} + +func (m *TasksMessage) CopyFrom(src interface{}) { + + o := src.(*TasksMessage) + *m = *o + if o.Tasks != nil { + m.Tasks = make([]*Task, len(o.Tasks)) + for i := range m.Tasks { + m.Tasks[i] = &Task{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Tasks[i], o.Tasks[i]) + } + } + +} + +func (m *AssignmentsRequest) Copy() *AssignmentsRequest { + if m == nil { + return nil + } + o := &AssignmentsRequest{} + o.CopyFrom(m) + return o +} + +func (m *AssignmentsRequest) CopyFrom(src interface{}) { + + o := src.(*AssignmentsRequest) + *m = *o +} + +func (m *Assignment) Copy() *Assignment { + if m == nil { + return nil + } + o := &Assignment{} + o.CopyFrom(m) + return o +} + +func (m *Assignment) CopyFrom(src interface{}) { + + o := src.(*Assignment) + *m = *o + if o.Item != nil { + switch o.Item.(type) { + case *Assignment_Task: + v := Assignment_Task{ + Task: &Task{}, + } + github_com_docker_swarmkit_api_deepcopy.Copy(v.Task, o.GetTask()) + m.Item = &v + case *Assignment_Secret: + v := Assignment_Secret{ + Secret: &Secret{}, + } + github_com_docker_swarmkit_api_deepcopy.Copy(v.Secret, o.GetSecret()) + m.Item = &v + case *Assignment_Config: + v := Assignment_Config{ + Config: &Config{}, + } + github_com_docker_swarmkit_api_deepcopy.Copy(v.Config, o.GetConfig()) + m.Item = &v + } + } + +} + +func (m *AssignmentChange) Copy() *AssignmentChange { + if m == nil { + return nil + } + o := &AssignmentChange{} + o.CopyFrom(m) + return o +} + +func (m *AssignmentChange) CopyFrom(src interface{}) { + + o := src.(*AssignmentChange) + *m = *o + if o.Assignment != nil { + m.Assignment = &Assignment{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Assignment, o.Assignment) + } +} + +func (m *AssignmentsMessage) Copy() *AssignmentsMessage { + if m == nil { + return nil + } + o := &AssignmentsMessage{} + o.CopyFrom(m) + return o +} + +func (m *AssignmentsMessage) CopyFrom(src interface{}) { + + o := src.(*AssignmentsMessage) + *m = *o + if o.Changes != nil { + m.Changes = make([]*AssignmentChange, len(o.Changes)) + for i := range m.Changes { + m.Changes[i] = &AssignmentChange{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Changes[i], o.Changes[i]) + } + } + +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Dispatcher service + +type DispatcherClient interface { + // Session starts an agent session with the dispatcher. The session is + // started after the first SessionMessage is received. + // + // Once started, the agent is controlled with a stream of SessionMessage. + // Agents should list on the stream at all times for instructions. + Session(ctx context.Context, in *SessionRequest, opts ...grpc.CallOption) (Dispatcher_SessionClient, error) + // Heartbeat is heartbeat method for nodes. It returns new TTL in response. + // Node should send new heartbeat earlier than now + TTL, otherwise it will + // be deregistered from dispatcher and its status will be updated to NodeStatus_DOWN + Heartbeat(ctx context.Context, in *HeartbeatRequest, opts ...grpc.CallOption) (*HeartbeatResponse, error) + // UpdateTaskStatus updates status of task. Node should send such updates + // on every status change of its tasks. + // + // Whether receiving batch updates or single status updates, this method + // should be accepting. Errors should only be returned if the entire update + // should be retried, due to data loss or other problems. + // + // If a task is unknown the dispatcher, the status update should be + // accepted regardless. + UpdateTaskStatus(ctx context.Context, in *UpdateTaskStatusRequest, opts ...grpc.CallOption) (*UpdateTaskStatusResponse, error) + // Tasks is a stream of tasks state for node. Each message contains full list + // of tasks which should be run on node, if task is not present in that list, + // it should be terminated. + Tasks(ctx context.Context, in *TasksRequest, opts ...grpc.CallOption) (Dispatcher_TasksClient, error) + // Assignments is a stream of assignments such as tasks and secrets for node. + // The first message in the stream contains all of the tasks and secrets + // that are relevant to the node. Future messages in the stream are updates to + // the set of assignments. + Assignments(ctx context.Context, in *AssignmentsRequest, opts ...grpc.CallOption) (Dispatcher_AssignmentsClient, error) +} + +type dispatcherClient struct { + cc *grpc.ClientConn +} + +func NewDispatcherClient(cc *grpc.ClientConn) DispatcherClient { + return &dispatcherClient{cc} +} + +func (c *dispatcherClient) Session(ctx context.Context, in *SessionRequest, opts ...grpc.CallOption) (Dispatcher_SessionClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Dispatcher_serviceDesc.Streams[0], c.cc, "/docker.swarmkit.v1.Dispatcher/Session", opts...) + if err != nil { + return nil, err + } + x := &dispatcherSessionClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Dispatcher_SessionClient interface { + Recv() (*SessionMessage, error) + grpc.ClientStream +} + +type dispatcherSessionClient struct { + grpc.ClientStream +} + +func (x *dispatcherSessionClient) Recv() (*SessionMessage, error) { + m := new(SessionMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *dispatcherClient) Heartbeat(ctx context.Context, in *HeartbeatRequest, opts ...grpc.CallOption) (*HeartbeatResponse, error) { + out := new(HeartbeatResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Dispatcher/Heartbeat", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dispatcherClient) UpdateTaskStatus(ctx context.Context, in *UpdateTaskStatusRequest, opts ...grpc.CallOption) (*UpdateTaskStatusResponse, error) { + out := new(UpdateTaskStatusResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Dispatcher/UpdateTaskStatus", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dispatcherClient) Tasks(ctx context.Context, in *TasksRequest, opts ...grpc.CallOption) (Dispatcher_TasksClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Dispatcher_serviceDesc.Streams[1], c.cc, "/docker.swarmkit.v1.Dispatcher/Tasks", opts...) + if err != nil { + return nil, err + } + x := &dispatcherTasksClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Dispatcher_TasksClient interface { + Recv() (*TasksMessage, error) + grpc.ClientStream +} + +type dispatcherTasksClient struct { + grpc.ClientStream +} + +func (x *dispatcherTasksClient) Recv() (*TasksMessage, error) { + m := new(TasksMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *dispatcherClient) Assignments(ctx context.Context, in *AssignmentsRequest, opts ...grpc.CallOption) (Dispatcher_AssignmentsClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Dispatcher_serviceDesc.Streams[2], c.cc, "/docker.swarmkit.v1.Dispatcher/Assignments", opts...) + if err != nil { + return nil, err + } + x := &dispatcherAssignmentsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Dispatcher_AssignmentsClient interface { + Recv() (*AssignmentsMessage, error) + grpc.ClientStream +} + +type dispatcherAssignmentsClient struct { + grpc.ClientStream +} + +func (x *dispatcherAssignmentsClient) Recv() (*AssignmentsMessage, error) { + m := new(AssignmentsMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for Dispatcher service + +type DispatcherServer interface { + // Session starts an agent session with the dispatcher. The session is + // started after the first SessionMessage is received. + // + // Once started, the agent is controlled with a stream of SessionMessage. + // Agents should list on the stream at all times for instructions. + Session(*SessionRequest, Dispatcher_SessionServer) error + // Heartbeat is heartbeat method for nodes. It returns new TTL in response. + // Node should send new heartbeat earlier than now + TTL, otherwise it will + // be deregistered from dispatcher and its status will be updated to NodeStatus_DOWN + Heartbeat(context.Context, *HeartbeatRequest) (*HeartbeatResponse, error) + // UpdateTaskStatus updates status of task. Node should send such updates + // on every status change of its tasks. + // + // Whether receiving batch updates or single status updates, this method + // should be accepting. Errors should only be returned if the entire update + // should be retried, due to data loss or other problems. + // + // If a task is unknown the dispatcher, the status update should be + // accepted regardless. + UpdateTaskStatus(context.Context, *UpdateTaskStatusRequest) (*UpdateTaskStatusResponse, error) + // Tasks is a stream of tasks state for node. Each message contains full list + // of tasks which should be run on node, if task is not present in that list, + // it should be terminated. + Tasks(*TasksRequest, Dispatcher_TasksServer) error + // Assignments is a stream of assignments such as tasks and secrets for node. + // The first message in the stream contains all of the tasks and secrets + // that are relevant to the node. Future messages in the stream are updates to + // the set of assignments. + Assignments(*AssignmentsRequest, Dispatcher_AssignmentsServer) error +} + +func RegisterDispatcherServer(s *grpc.Server, srv DispatcherServer) { + s.RegisterService(&_Dispatcher_serviceDesc, srv) +} + +func _Dispatcher_Session_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SessionRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(DispatcherServer).Session(m, &dispatcherSessionServer{stream}) +} + +type Dispatcher_SessionServer interface { + Send(*SessionMessage) error + grpc.ServerStream +} + +type dispatcherSessionServer struct { + grpc.ServerStream +} + +func (x *dispatcherSessionServer) Send(m *SessionMessage) error { + return x.ServerStream.SendMsg(m) +} + +func _Dispatcher_Heartbeat_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HeartbeatRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DispatcherServer).Heartbeat(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Dispatcher/Heartbeat", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DispatcherServer).Heartbeat(ctx, req.(*HeartbeatRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Dispatcher_UpdateTaskStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateTaskStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DispatcherServer).UpdateTaskStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Dispatcher/UpdateTaskStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DispatcherServer).UpdateTaskStatus(ctx, req.(*UpdateTaskStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Dispatcher_Tasks_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(TasksRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(DispatcherServer).Tasks(m, &dispatcherTasksServer{stream}) +} + +type Dispatcher_TasksServer interface { + Send(*TasksMessage) error + grpc.ServerStream +} + +type dispatcherTasksServer struct { + grpc.ServerStream +} + +func (x *dispatcherTasksServer) Send(m *TasksMessage) error { + return x.ServerStream.SendMsg(m) +} + +func _Dispatcher_Assignments_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(AssignmentsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(DispatcherServer).Assignments(m, &dispatcherAssignmentsServer{stream}) +} + +type Dispatcher_AssignmentsServer interface { + Send(*AssignmentsMessage) error + grpc.ServerStream +} + +type dispatcherAssignmentsServer struct { + grpc.ServerStream +} + +func (x *dispatcherAssignmentsServer) Send(m *AssignmentsMessage) error { + return x.ServerStream.SendMsg(m) +} + +var _Dispatcher_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.Dispatcher", + HandlerType: (*DispatcherServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Heartbeat", + Handler: _Dispatcher_Heartbeat_Handler, + }, + { + MethodName: "UpdateTaskStatus", + Handler: _Dispatcher_UpdateTaskStatus_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Session", + Handler: _Dispatcher_Session_Handler, + ServerStreams: true, + }, + { + StreamName: "Tasks", + Handler: _Dispatcher_Tasks_Handler, + ServerStreams: true, + }, + { + StreamName: "Assignments", + Handler: _Dispatcher_Assignments_Handler, + ServerStreams: true, + }, + }, + Metadata: "github.com/docker/swarmkit/api/dispatcher.proto", +} + +func (m *SessionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SessionRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Description != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(m.Description.Size())) + n1, err := m.Description.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if len(m.SessionID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.SessionID))) + i += copy(dAtA[i:], m.SessionID) + } + return i, nil +} + +func (m *SessionMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SessionMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SessionID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.SessionID))) + i += copy(dAtA[i:], m.SessionID) + } + if m.Node != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(m.Node.Size())) + n2, err := m.Node.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if len(m.Managers) > 0 { + for _, msg := range m.Managers { + dAtA[i] = 0x1a + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.NetworkBootstrapKeys) > 0 { + for _, msg := range m.NetworkBootstrapKeys { + dAtA[i] = 0x22 + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.RootCA) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.RootCA))) + i += copy(dAtA[i:], m.RootCA) + } + return i, nil +} + +func (m *HeartbeatRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HeartbeatRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SessionID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.SessionID))) + i += copy(dAtA[i:], m.SessionID) + } + return i, nil +} + +func (m *HeartbeatResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HeartbeatResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdDuration(m.Period))) + n3, err := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.Period, dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *UpdateTaskStatusRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateTaskStatusRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SessionID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.SessionID))) + i += copy(dAtA[i:], m.SessionID) + } + if len(m.Updates) > 0 { + for _, msg := range m.Updates { + dAtA[i] = 0x1a + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *UpdateTaskStatusRequest_TaskStatusUpdate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateTaskStatusRequest_TaskStatusUpdate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.TaskID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.TaskID))) + i += copy(dAtA[i:], m.TaskID) + } + if m.Status != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(m.Status.Size())) + n4, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + return i, nil +} + +func (m *UpdateTaskStatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateTaskStatusResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *TasksRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TasksRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SessionID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.SessionID))) + i += copy(dAtA[i:], m.SessionID) + } + return i, nil +} + +func (m *TasksMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TasksMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Tasks) > 0 { + for _, msg := range m.Tasks { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *AssignmentsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AssignmentsRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SessionID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.SessionID))) + i += copy(dAtA[i:], m.SessionID) + } + return i, nil +} + +func (m *Assignment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Assignment) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Item != nil { + nn5, err := m.Item.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn5 + } + return i, nil +} + +func (m *Assignment_Task) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Task != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(m.Task.Size())) + n6, err := m.Task.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} +func (m *Assignment_Secret) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Secret != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(m.Secret.Size())) + n7, err := m.Secret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} +func (m *Assignment_Config) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Config != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(m.Config.Size())) + n8, err := m.Config.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} +func (m *AssignmentChange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AssignmentChange) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Assignment != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(m.Assignment.Size())) + n9, err := m.Assignment.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + if m.Action != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(m.Action)) + } + return i, nil +} + +func (m *AssignmentsMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AssignmentsMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Type != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(m.Type)) + } + if len(m.AppliesTo) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.AppliesTo))) + i += copy(dAtA[i:], m.AppliesTo) + } + if len(m.ResultsIn) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.ResultsIn))) + i += copy(dAtA[i:], m.ResultsIn) + } + if len(m.Changes) > 0 { + for _, msg := range m.Changes { + dAtA[i] = 0x22 + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func encodeFixed64Dispatcher(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Dispatcher(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintDispatcher(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +type raftProxyDispatcherServer struct { + local DispatcherServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyDispatcherServer(local DispatcherServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) DispatcherServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + s, ok := transport.StreamFromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := s.ServerTransport().RemoteAddr().String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyDispatcherServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyDispatcherServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyDispatcherServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +type Dispatcher_SessionServerWrapper struct { + Dispatcher_SessionServer + ctx context.Context +} + +func (s Dispatcher_SessionServerWrapper) Context() context.Context { + return s.ctx +} + +func (p *raftProxyDispatcherServer) Session(r *SessionRequest, stream Dispatcher_SessionServer) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := Dispatcher_SessionServerWrapper{ + Dispatcher_SessionServer: stream, + ctx: ctx, + } + return p.local.Session(r, streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + } + clientStream, err := NewDispatcherClient(conn).Session(ctx, r) + + if err != nil { + return err + } + + for { + msg, err := clientStream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := stream.Send(msg); err != nil { + return err + } + } + return nil +} + +func (p *raftProxyDispatcherServer) Heartbeat(ctx context.Context, r *HeartbeatRequest) (*HeartbeatResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.Heartbeat(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewDispatcherClient(conn).Heartbeat(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.Heartbeat(ctx, r) + } + return nil, err + } + return NewDispatcherClient(conn).Heartbeat(modCtx, r) + } + return resp, err +} + +func (p *raftProxyDispatcherServer) UpdateTaskStatus(ctx context.Context, r *UpdateTaskStatusRequest) (*UpdateTaskStatusResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.UpdateTaskStatus(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewDispatcherClient(conn).UpdateTaskStatus(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.UpdateTaskStatus(ctx, r) + } + return nil, err + } + return NewDispatcherClient(conn).UpdateTaskStatus(modCtx, r) + } + return resp, err +} + +type Dispatcher_TasksServerWrapper struct { + Dispatcher_TasksServer + ctx context.Context +} + +func (s Dispatcher_TasksServerWrapper) Context() context.Context { + return s.ctx +} + +func (p *raftProxyDispatcherServer) Tasks(r *TasksRequest, stream Dispatcher_TasksServer) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := Dispatcher_TasksServerWrapper{ + Dispatcher_TasksServer: stream, + ctx: ctx, + } + return p.local.Tasks(r, streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + } + clientStream, err := NewDispatcherClient(conn).Tasks(ctx, r) + + if err != nil { + return err + } + + for { + msg, err := clientStream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := stream.Send(msg); err != nil { + return err + } + } + return nil +} + +type Dispatcher_AssignmentsServerWrapper struct { + Dispatcher_AssignmentsServer + ctx context.Context +} + +func (s Dispatcher_AssignmentsServerWrapper) Context() context.Context { + return s.ctx +} + +func (p *raftProxyDispatcherServer) Assignments(r *AssignmentsRequest, stream Dispatcher_AssignmentsServer) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := Dispatcher_AssignmentsServerWrapper{ + Dispatcher_AssignmentsServer: stream, + ctx: ctx, + } + return p.local.Assignments(r, streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + } + clientStream, err := NewDispatcherClient(conn).Assignments(ctx, r) + + if err != nil { + return err + } + + for { + msg, err := clientStream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := stream.Send(msg); err != nil { + return err + } + } + return nil +} + +func (m *SessionRequest) Size() (n int) { + var l int + _ = l + if m.Description != nil { + l = m.Description.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + l = len(m.SessionID) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + return n +} + +func (m *SessionMessage) Size() (n int) { + var l int + _ = l + l = len(m.SessionID) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + if m.Node != nil { + l = m.Node.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + if len(m.Managers) > 0 { + for _, e := range m.Managers { + l = e.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + } + if len(m.NetworkBootstrapKeys) > 0 { + for _, e := range m.NetworkBootstrapKeys { + l = e.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + } + l = len(m.RootCA) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + return n +} + +func (m *HeartbeatRequest) Size() (n int) { + var l int + _ = l + l = len(m.SessionID) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + return n +} + +func (m *HeartbeatResponse) Size() (n int) { + var l int + _ = l + l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.Period) + n += 1 + l + sovDispatcher(uint64(l)) + return n +} + +func (m *UpdateTaskStatusRequest) Size() (n int) { + var l int + _ = l + l = len(m.SessionID) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + if len(m.Updates) > 0 { + for _, e := range m.Updates { + l = e.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + } + return n +} + +func (m *UpdateTaskStatusRequest_TaskStatusUpdate) Size() (n int) { + var l int + _ = l + l = len(m.TaskID) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + return n +} + +func (m *UpdateTaskStatusResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *TasksRequest) Size() (n int) { + var l int + _ = l + l = len(m.SessionID) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + return n +} + +func (m *TasksMessage) Size() (n int) { + var l int + _ = l + if len(m.Tasks) > 0 { + for _, e := range m.Tasks { + l = e.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + } + return n +} + +func (m *AssignmentsRequest) Size() (n int) { + var l int + _ = l + l = len(m.SessionID) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + return n +} + +func (m *Assignment) Size() (n int) { + var l int + _ = l + if m.Item != nil { + n += m.Item.Size() + } + return n +} + +func (m *Assignment_Task) Size() (n int) { + var l int + _ = l + if m.Task != nil { + l = m.Task.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + return n +} +func (m *Assignment_Secret) Size() (n int) { + var l int + _ = l + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + return n +} +func (m *Assignment_Config) Size() (n int) { + var l int + _ = l + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + return n +} +func (m *AssignmentChange) Size() (n int) { + var l int + _ = l + if m.Assignment != nil { + l = m.Assignment.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + if m.Action != 0 { + n += 1 + sovDispatcher(uint64(m.Action)) + } + return n +} + +func (m *AssignmentsMessage) Size() (n int) { + var l int + _ = l + if m.Type != 0 { + n += 1 + sovDispatcher(uint64(m.Type)) + } + l = len(m.AppliesTo) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + l = len(m.ResultsIn) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + if len(m.Changes) > 0 { + for _, e := range m.Changes { + l = e.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + } + return n +} + +func sovDispatcher(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozDispatcher(x uint64) (n int) { + return sovDispatcher(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *SessionRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SessionRequest{`, + `Description:` + strings.Replace(fmt.Sprintf("%v", this.Description), "NodeDescription", "NodeDescription", 1) + `,`, + `SessionID:` + fmt.Sprintf("%v", this.SessionID) + `,`, + `}`, + }, "") + return s +} +func (this *SessionMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SessionMessage{`, + `SessionID:` + fmt.Sprintf("%v", this.SessionID) + `,`, + `Node:` + strings.Replace(fmt.Sprintf("%v", this.Node), "Node", "Node", 1) + `,`, + `Managers:` + strings.Replace(fmt.Sprintf("%v", this.Managers), "WeightedPeer", "WeightedPeer", 1) + `,`, + `NetworkBootstrapKeys:` + strings.Replace(fmt.Sprintf("%v", this.NetworkBootstrapKeys), "EncryptionKey", "EncryptionKey", 1) + `,`, + `RootCA:` + fmt.Sprintf("%v", this.RootCA) + `,`, + `}`, + }, "") + return s +} +func (this *HeartbeatRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HeartbeatRequest{`, + `SessionID:` + fmt.Sprintf("%v", this.SessionID) + `,`, + `}`, + }, "") + return s +} +func (this *HeartbeatResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HeartbeatResponse{`, + `Period:` + strings.Replace(strings.Replace(this.Period.String(), "Duration", "google_protobuf1.Duration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateTaskStatusRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateTaskStatusRequest{`, + `SessionID:` + fmt.Sprintf("%v", this.SessionID) + `,`, + `Updates:` + strings.Replace(fmt.Sprintf("%v", this.Updates), "UpdateTaskStatusRequest_TaskStatusUpdate", "UpdateTaskStatusRequest_TaskStatusUpdate", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateTaskStatusRequest_TaskStatusUpdate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateTaskStatusRequest_TaskStatusUpdate{`, + `TaskID:` + fmt.Sprintf("%v", this.TaskID) + `,`, + `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "TaskStatus", "TaskStatus", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateTaskStatusResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateTaskStatusResponse{`, + `}`, + }, "") + return s +} +func (this *TasksRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TasksRequest{`, + `SessionID:` + fmt.Sprintf("%v", this.SessionID) + `,`, + `}`, + }, "") + return s +} +func (this *TasksMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TasksMessage{`, + `Tasks:` + strings.Replace(fmt.Sprintf("%v", this.Tasks), "Task", "Task", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AssignmentsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AssignmentsRequest{`, + `SessionID:` + fmt.Sprintf("%v", this.SessionID) + `,`, + `}`, + }, "") + return s +} +func (this *Assignment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Assignment{`, + `Item:` + fmt.Sprintf("%v", this.Item) + `,`, + `}`, + }, "") + return s +} +func (this *Assignment_Task) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Assignment_Task{`, + `Task:` + strings.Replace(fmt.Sprintf("%v", this.Task), "Task", "Task", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Assignment_Secret) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Assignment_Secret{`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "Secret", "Secret", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Assignment_Config) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Assignment_Config{`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "Config", "Config", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AssignmentChange) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AssignmentChange{`, + `Assignment:` + strings.Replace(fmt.Sprintf("%v", this.Assignment), "Assignment", "Assignment", 1) + `,`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `}`, + }, "") + return s +} +func (this *AssignmentsMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AssignmentsMessage{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `AppliesTo:` + fmt.Sprintf("%v", this.AppliesTo) + `,`, + `ResultsIn:` + fmt.Sprintf("%v", this.ResultsIn) + `,`, + `Changes:` + strings.Replace(fmt.Sprintf("%v", this.Changes), "AssignmentChange", "AssignmentChange", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringDispatcher(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *SessionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SessionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SessionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Description == nil { + m.Description = &NodeDescription{} + } + if err := m.Description.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SessionID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SessionMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SessionMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SessionMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SessionID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Node == nil { + m.Node = &Node{} + } + if err := m.Node.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Managers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Managers = append(m.Managers, &WeightedPeer{}) + if err := m.Managers[len(m.Managers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetworkBootstrapKeys", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NetworkBootstrapKeys = append(m.NetworkBootstrapKeys, &EncryptionKey{}) + if err := m.NetworkBootstrapKeys[len(m.NetworkBootstrapKeys)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RootCA", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RootCA = append(m.RootCA[:0], dAtA[iNdEx:postIndex]...) + if m.RootCA == nil { + m.RootCA = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HeartbeatRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HeartbeatRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HeartbeatRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SessionID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HeartbeatResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HeartbeatResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HeartbeatResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Period", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.Period, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateTaskStatusRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateTaskStatusRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateTaskStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SessionID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Updates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Updates = append(m.Updates, &UpdateTaskStatusRequest_TaskStatusUpdate{}) + if err := m.Updates[len(m.Updates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateTaskStatusRequest_TaskStatusUpdate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskStatusUpdate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskStatusUpdate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &TaskStatus{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateTaskStatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateTaskStatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateTaskStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TasksRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TasksRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TasksRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SessionID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TasksMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TasksMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TasksMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tasks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tasks = append(m.Tasks, &Task{}) + if err := m.Tasks[len(m.Tasks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AssignmentsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AssignmentsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AssignmentsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SessionID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Assignment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Assignment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Assignment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Task{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Item = &Assignment_Task{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Secret{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Item = &Assignment_Secret{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Config{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Item = &Assignment_Config{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AssignmentChange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AssignmentChange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AssignmentChange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Assignment", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Assignment == nil { + m.Assignment = &Assignment{} + } + if err := m.Assignment.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + m.Action = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Action |= (AssignmentChange_AssignmentAction(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AssignmentsMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AssignmentsMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AssignmentsMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (AssignmentsMessage_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppliesTo", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppliesTo = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResultsIn", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResultsIn = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Changes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Changes = append(m.Changes, &AssignmentChange{}) + if err := m.Changes[len(m.Changes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipDispatcher(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDispatcher + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDispatcher + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDispatcher + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthDispatcher + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDispatcher + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipDispatcher(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthDispatcher = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDispatcher = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/docker/swarmkit/api/dispatcher.proto", fileDescriptorDispatcher) +} + +var fileDescriptorDispatcher = []byte{ + // 1007 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0x4f, 0x6f, 0xe3, 0x44, + 0x1c, 0xcd, 0xa4, 0xa9, 0xdb, 0xfc, 0xd2, 0x2d, 0x61, 0xb4, 0x2a, 0xc6, 0xd2, 0xa6, 0xc1, 0x65, + 0xab, 0x8a, 0x2d, 0xce, 0x12, 0xfe, 0x1d, 0xa8, 0x0a, 0x4d, 0x13, 0xa9, 0xd1, 0x6e, 0xbb, 0xd5, + 0xb4, 0xbb, 0x7b, 0xac, 0x1c, 0x7b, 0xd6, 0x35, 0x69, 0x3c, 0xc6, 0x33, 0xd9, 0x25, 0x07, 0x24, + 0x0e, 0xac, 0x84, 0x38, 0x21, 0x4e, 0x95, 0x10, 0x5f, 0x01, 0xf1, 0x31, 0x2a, 0x4e, 0x1c, 0x39, + 0x15, 0x36, 0x1f, 0x80, 0x0f, 0xc0, 0x09, 0x79, 0x3c, 0x4e, 0x42, 0x37, 0x69, 0xd3, 0x9e, 0x12, + 0xcf, 0xbc, 0xf7, 0xe6, 0xf9, 0xfd, 0x7e, 0xfe, 0x0d, 0x54, 0x3c, 0x5f, 0x1c, 0x77, 0x5b, 0x96, + 0xc3, 0x3a, 0x15, 0x97, 0x39, 0x6d, 0x1a, 0x55, 0xf8, 0x0b, 0x3b, 0xea, 0xb4, 0x7d, 0x51, 0xb1, + 0x43, 0xbf, 0xe2, 0xfa, 0x3c, 0xb4, 0x85, 0x73, 0x4c, 0x23, 0x2b, 0x8c, 0x98, 0x60, 0x18, 0x27, + 0x28, 0x2b, 0x45, 0x59, 0xcf, 0x3f, 0x30, 0xde, 0xbb, 0x42, 0x44, 0xf4, 0x42, 0xca, 0x13, 0xbe, + 0xb1, 0x7e, 0x05, 0x96, 0xb5, 0xbe, 0xa4, 0x8e, 0x48, 0xd1, 0xb7, 0x3d, 0xe6, 0x31, 0xf9, 0xb7, + 0x12, 0xff, 0x53, 0xab, 0x9f, 0x5e, 0xa2, 0x21, 0x11, 0xad, 0xee, 0xb3, 0x4a, 0x78, 0xd2, 0xf5, + 0xfc, 0x40, 0xfd, 0x28, 0x62, 0xc9, 0x63, 0xcc, 0x3b, 0xa1, 0x43, 0x90, 0xdb, 0x8d, 0x6c, 0xe1, + 0x33, 0xb5, 0x6f, 0xbe, 0x44, 0xb0, 0x78, 0x40, 0x39, 0xf7, 0x59, 0x40, 0xe8, 0x57, 0x5d, 0xca, + 0x05, 0x6e, 0x40, 0xc1, 0xa5, 0xdc, 0x89, 0xfc, 0x30, 0xc6, 0xe9, 0xa8, 0x8c, 0xd6, 0x0a, 0xd5, + 0x15, 0xeb, 0xf5, 0x14, 0xac, 0x3d, 0xe6, 0xd2, 0xfa, 0x10, 0x4a, 0x46, 0x79, 0x78, 0x1d, 0x80, + 0x27, 0xc2, 0x47, 0xbe, 0xab, 0x67, 0xcb, 0x68, 0x2d, 0x5f, 0xbb, 0xd5, 0x3f, 0x5f, 0xce, 0xab, + 0xe3, 0x9a, 0x75, 0x92, 0x57, 0x80, 0xa6, 0x6b, 0xfe, 0x9c, 0x1d, 0xf8, 0xd8, 0xa5, 0x9c, 0xdb, + 0x1e, 0xbd, 0x20, 0x80, 0x2e, 0x17, 0xc0, 0xeb, 0x90, 0x0b, 0x98, 0x4b, 0xe5, 0x41, 0x85, 0xaa, + 0x3e, 0xc9, 0x2e, 0x91, 0x28, 0xbc, 0x01, 0xf3, 0x1d, 0x3b, 0xb0, 0x3d, 0x1a, 0x71, 0x7d, 0xa6, + 0x3c, 0xb3, 0x56, 0xa8, 0x96, 0xc7, 0x31, 0x9e, 0x52, 0xdf, 0x3b, 0x16, 0xd4, 0xdd, 0xa7, 0x34, + 0x22, 0x03, 0x06, 0x7e, 0x0a, 0x4b, 0x01, 0x15, 0x2f, 0x58, 0xd4, 0x3e, 0x6a, 0x31, 0x26, 0xb8, + 0x88, 0xec, 0xf0, 0xa8, 0x4d, 0x7b, 0x5c, 0xcf, 0x49, 0xad, 0x77, 0xc6, 0x69, 0x35, 0x02, 0x27, + 0xea, 0xc9, 0x68, 0x1e, 0xd0, 0x1e, 0xb9, 0xad, 0x04, 0x6a, 0x29, 0xff, 0x01, 0xed, 0x71, 0xbc, + 0x04, 0x1a, 0x61, 0x4c, 0x6c, 0x6f, 0xe9, 0xb3, 0x65, 0xb4, 0xb6, 0x40, 0xd4, 0x93, 0xf9, 0x05, + 0x14, 0x77, 0xa8, 0x1d, 0x89, 0x16, 0xb5, 0x45, 0x5a, 0xa6, 0x6b, 0xc5, 0x63, 0xee, 0xc3, 0x9b, + 0x23, 0x0a, 0x3c, 0x64, 0x01, 0xa7, 0xf8, 0x33, 0xd0, 0x42, 0x1a, 0xf9, 0xcc, 0x55, 0x45, 0x7e, + 0xdb, 0x4a, 0xba, 0xc5, 0x4a, 0xbb, 0xc5, 0xaa, 0xab, 0x6e, 0xa9, 0xcd, 0x9f, 0x9d, 0x2f, 0x67, + 0x4e, 0xff, 0x5a, 0x46, 0x44, 0x51, 0xcc, 0x1f, 0xb3, 0xf0, 0xd6, 0xe3, 0xd0, 0xb5, 0x05, 0x3d, + 0xb4, 0x79, 0xfb, 0x40, 0xd8, 0xa2, 0xcb, 0x6f, 0xe4, 0x0d, 0x3f, 0x81, 0xb9, 0xae, 0x14, 0x4a, + 0x6b, 0xb1, 0x31, 0x2e, 0xbf, 0x09, 0x67, 0x59, 0xc3, 0x95, 0x04, 0x41, 0x52, 0x31, 0x83, 0x41, + 0xf1, 0xe2, 0x26, 0x5e, 0x81, 0x39, 0x61, 0xf3, 0xf6, 0xd0, 0x16, 0xf4, 0xcf, 0x97, 0xb5, 0x18, + 0xd6, 0xac, 0x13, 0x2d, 0xde, 0x6a, 0xba, 0xf8, 0x13, 0xd0, 0xb8, 0x24, 0xa9, 0x6e, 0x2a, 0x8d, + 0xf3, 0x33, 0xe2, 0x44, 0xa1, 0x4d, 0x03, 0xf4, 0xd7, 0x5d, 0x26, 0x59, 0x9b, 0x1b, 0xb0, 0x10, + 0xaf, 0xde, 0x2c, 0x22, 0x73, 0x53, 0xb1, 0xd3, 0x6f, 0xc3, 0x82, 0xd9, 0xd8, 0x2b, 0xd7, 0x91, + 0x0c, 0x4c, 0x9f, 0x64, 0x90, 0x24, 0x30, 0xb3, 0x06, 0x78, 0x8b, 0x73, 0xdf, 0x0b, 0x3a, 0x34, + 0x10, 0x37, 0xf4, 0xf0, 0x1b, 0x02, 0x18, 0x8a, 0x60, 0x0b, 0x72, 0xb1, 0xb6, 0x6a, 0x9d, 0x89, + 0x0e, 0x76, 0x32, 0x44, 0xe2, 0xf0, 0x47, 0xa0, 0x71, 0xea, 0x44, 0x54, 0xa8, 0x50, 0x8d, 0x71, + 0x8c, 0x03, 0x89, 0xd8, 0xc9, 0x10, 0x85, 0x8d, 0x59, 0x0e, 0x0b, 0x9e, 0xf9, 0x9e, 0x3e, 0x33, + 0x99, 0xb5, 0x2d, 0x11, 0x31, 0x2b, 0xc1, 0xd6, 0x34, 0xc8, 0xf9, 0x82, 0x76, 0xcc, 0x97, 0x59, + 0x28, 0x0e, 0x2d, 0x6f, 0x1f, 0xdb, 0x81, 0x47, 0xf1, 0x26, 0x80, 0x3d, 0x58, 0x53, 0xf6, 0xc7, + 0x56, 0x78, 0xc8, 0x24, 0x23, 0x0c, 0xbc, 0x0b, 0x9a, 0xed, 0xc8, 0xd1, 0x18, 0xbf, 0xc8, 0x62, + 0xf5, 0xe3, 0xcb, 0xb9, 0xc9, 0xa9, 0x23, 0x0b, 0x5b, 0x92, 0x4c, 0x94, 0x88, 0xd9, 0x1a, 0xb5, + 0x98, 0xec, 0xe1, 0x55, 0xd0, 0x1e, 0xef, 0xd7, 0xb7, 0x0e, 0x1b, 0xc5, 0x8c, 0x61, 0xfc, 0xf0, + 0x4b, 0x79, 0xe9, 0x22, 0x42, 0x75, 0xf3, 0x2a, 0x68, 0xa4, 0xb1, 0xfb, 0xe8, 0x49, 0xa3, 0x88, + 0xc6, 0xe3, 0x08, 0xed, 0xb0, 0xe7, 0xd4, 0xfc, 0x17, 0xfd, 0xaf, 0xfe, 0x69, 0x17, 0x7d, 0x0e, + 0xb9, 0xf8, 0xa2, 0x92, 0x19, 0x2c, 0x56, 0xef, 0x5d, 0xfe, 0x1e, 0x29, 0xcb, 0x3a, 0xec, 0x85, + 0x94, 0x48, 0x22, 0xbe, 0x03, 0x60, 0x87, 0xe1, 0x89, 0x4f, 0xf9, 0x91, 0x60, 0xc9, 0x8c, 0x27, + 0x79, 0xb5, 0x72, 0xc8, 0xe2, 0xed, 0x88, 0xf2, 0xee, 0x89, 0xe0, 0x47, 0x7e, 0x20, 0x0b, 0x98, + 0x27, 0x79, 0xb5, 0xd2, 0x0c, 0xf0, 0x26, 0xcc, 0x39, 0x32, 0x9c, 0x74, 0x6e, 0xbe, 0x3b, 0x4d, + 0x92, 0x24, 0x25, 0x99, 0x77, 0x21, 0x17, 0x7b, 0xc1, 0x0b, 0x30, 0xbf, 0xfd, 0x68, 0x77, 0xff, + 0x61, 0x23, 0xce, 0x0b, 0xbf, 0x01, 0x85, 0xe6, 0xde, 0x36, 0x69, 0xec, 0x36, 0xf6, 0x0e, 0xb7, + 0x1e, 0x16, 0x51, 0xf5, 0x74, 0x16, 0xa0, 0x3e, 0xb8, 0xd4, 0xf1, 0xd7, 0x30, 0xa7, 0xda, 0x1b, + 0x9b, 0xe3, 0x5b, 0x70, 0xf4, 0x36, 0x34, 0x2e, 0xc3, 0xa8, 0x44, 0xcc, 0x95, 0xdf, 0x7f, 0xfd, + 0xe7, 0x34, 0x7b, 0x07, 0x16, 0x24, 0xe6, 0xfd, 0x78, 0xae, 0xd3, 0x08, 0x6e, 0x25, 0x4f, 0xea, + 0xd6, 0xb8, 0x8f, 0xf0, 0x37, 0x90, 0x1f, 0xcc, 0x60, 0x3c, 0xf6, 0x5d, 0x2f, 0x0e, 0x79, 0xe3, + 0xee, 0x15, 0x28, 0x35, 0x5c, 0xa6, 0x31, 0x80, 0x7f, 0x42, 0x50, 0xbc, 0x38, 0x9e, 0xf0, 0xbd, + 0x6b, 0x8c, 0x5a, 0x63, 0x7d, 0x3a, 0xf0, 0x75, 0x4c, 0x75, 0x61, 0x56, 0x0e, 0x36, 0x5c, 0x9e, + 0x34, 0x40, 0x06, 0xa7, 0x4f, 0x46, 0xa4, 0x75, 0x58, 0x9d, 0xe2, 0xc4, 0xef, 0xb3, 0xe8, 0x3e, + 0xc2, 0xdf, 0x21, 0x28, 0x8c, 0xb4, 0x36, 0x5e, 0xbd, 0xa2, 0xf7, 0x53, 0x0f, 0xab, 0xd3, 0x7d, + 0x23, 0x53, 0x76, 0x44, 0x4d, 0x3f, 0x7b, 0x55, 0xca, 0xfc, 0xf9, 0xaa, 0x94, 0xf9, 0xb6, 0x5f, + 0x42, 0x67, 0xfd, 0x12, 0xfa, 0xa3, 0x5f, 0x42, 0x7f, 0xf7, 0x4b, 0xa8, 0xa5, 0xc9, 0x2b, 0xf8, + 0xc3, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0xe0, 0xf0, 0x6a, 0xcb, 0xae, 0x0a, 0x00, 0x00, +} diff --git a/vendor/github.com/docker/swarmkit/api/equality/equality.go b/vendor/github.com/docker/swarmkit/api/equality/equality.go new file mode 100644 index 0000000000..522c719810 --- /dev/null +++ b/vendor/github.com/docker/swarmkit/api/equality/equality.go @@ -0,0 +1,67 @@ +package equality + +import ( + "crypto/subtle" + "reflect" + + "github.com/docker/swarmkit/api" +) + +// TasksEqualStable returns true if the tasks are functionally equal, ignoring status, +// version and other superfluous fields. +// +// This used to decide whether or not to propagate a task update to a controller. +func TasksEqualStable(a, b *api.Task) bool { + // shallow copy + copyA, copyB := *a, *b + + copyA.Status, copyB.Status = api.TaskStatus{}, api.TaskStatus{} + copyA.Meta, copyB.Meta = api.Meta{}, api.Meta{} + + return reflect.DeepEqual(©A, ©B) +} + +// TaskStatusesEqualStable compares the task status excluding timestamp fields. +func TaskStatusesEqualStable(a, b *api.TaskStatus) bool { + copyA, copyB := *a, *b + + copyA.Timestamp, copyB.Timestamp = nil, nil + return reflect.DeepEqual(©A, ©B) +} + +// RootCAEqualStable compares RootCAs, excluding join tokens, which are randomly generated +func RootCAEqualStable(a, b *api.RootCA) bool { + if a == nil && b == nil { + return true + } + if a == nil || b == nil { + return false + } + + var aRotationKey, bRotationKey []byte + if a.RootRotation != nil { + aRotationKey = a.RootRotation.CAKey + } + if b.RootRotation != nil { + bRotationKey = b.RootRotation.CAKey + } + if subtle.ConstantTimeCompare(a.CAKey, b.CAKey) != 1 || subtle.ConstantTimeCompare(aRotationKey, bRotationKey) != 1 { + return false + } + + copyA, copyB := *a, *b + copyA.JoinTokens, copyB.JoinTokens = api.JoinTokens{}, api.JoinTokens{} + return reflect.DeepEqual(copyA, copyB) +} + +// ExternalCAsEqualStable compares lists of external CAs and determines whether they are equal. +func ExternalCAsEqualStable(a, b []*api.ExternalCA) bool { + // because DeepEqual will treat an empty list and a nil list differently, we want to manually check this first + if len(a) == 0 && len(b) == 0 { + return true + } + // The assumption is that each individual api.ExternalCA within both lists are created from deserializing from a + // protobuf, so no special affordances are made to treat a nil map and empty map in the Options field of an + // api.ExternalCA as equivalent. + return reflect.DeepEqual(a, b) +} diff --git a/vendor/github.com/docker/swarmkit/api/health.pb.go b/vendor/github.com/docker/swarmkit/api/health.pb.go new file mode 100644 index 0000000000..37296e0e84 --- /dev/null +++ b/vendor/github.com/docker/swarmkit/api/health.pb.go @@ -0,0 +1,724 @@ +// Code generated by protoc-gen-gogo. +// source: github.com/docker/swarmkit/api/health.proto +// DO NOT EDIT! + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import _ "github.com/docker/swarmkit/protobuf/plugin" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +import raftselector "github.com/docker/swarmkit/manager/raftselector" +import codes "google.golang.org/grpc/codes" +import status "google.golang.org/grpc/status" +import metadata "google.golang.org/grpc/metadata" +import transport "google.golang.org/grpc/transport" +import rafttime "time" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type HealthCheckResponse_ServingStatus int32 + +const ( + HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0 + HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1 + HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2 +) + +var HealthCheckResponse_ServingStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SERVING", + 2: "NOT_SERVING", +} +var HealthCheckResponse_ServingStatus_value = map[string]int32{ + "UNKNOWN": 0, + "SERVING": 1, + "NOT_SERVING": 2, +} + +func (x HealthCheckResponse_ServingStatus) String() string { + return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x)) +} +func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) { + return fileDescriptorHealth, []int{1, 0} +} + +type HealthCheckRequest struct { + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` +} + +func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} } +func (*HealthCheckRequest) ProtoMessage() {} +func (*HealthCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptorHealth, []int{0} } + +type HealthCheckResponse struct { + Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=docker.swarmkit.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"` +} + +func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} } +func (*HealthCheckResponse) ProtoMessage() {} +func (*HealthCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptorHealth, []int{1} } + +func init() { + proto.RegisterType((*HealthCheckRequest)(nil), "docker.swarmkit.v1.HealthCheckRequest") + proto.RegisterType((*HealthCheckResponse)(nil), "docker.swarmkit.v1.HealthCheckResponse") + proto.RegisterEnum("docker.swarmkit.v1.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value) +} + +type authenticatedWrapperHealthServer struct { + local HealthServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperHealthServer(local HealthServer, authorize func(context.Context, []string) error) HealthServer { + return &authenticatedWrapperHealthServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperHealthServer) Check(ctx context.Context, r *HealthCheckRequest) (*HealthCheckResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.Check(ctx, r) +} + +func (m *HealthCheckRequest) Copy() *HealthCheckRequest { + if m == nil { + return nil + } + o := &HealthCheckRequest{} + o.CopyFrom(m) + return o +} + +func (m *HealthCheckRequest) CopyFrom(src interface{}) { + + o := src.(*HealthCheckRequest) + *m = *o +} + +func (m *HealthCheckResponse) Copy() *HealthCheckResponse { + if m == nil { + return nil + } + o := &HealthCheckResponse{} + o.CopyFrom(m) + return o +} + +func (m *HealthCheckResponse) CopyFrom(src interface{}) { + + o := src.(*HealthCheckResponse) + *m = *o +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Health service + +type HealthClient interface { + Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) +} + +type healthClient struct { + cc *grpc.ClientConn +} + +func NewHealthClient(cc *grpc.ClientConn) HealthClient { + return &healthClient{cc} +} + +func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { + out := new(HealthCheckResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Health/Check", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Health service + +type HealthServer interface { + Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) +} + +func RegisterHealthServer(s *grpc.Server, srv HealthServer) { + s.RegisterService(&_Health_serviceDesc, srv) +} + +func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HealthCheckRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HealthServer).Check(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Health/Check", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Health_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.Health", + HandlerType: (*HealthServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Check", + Handler: _Health_Check_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/docker/swarmkit/api/health.proto", +} + +func (m *HealthCheckRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthCheckRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Service) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintHealth(dAtA, i, uint64(len(m.Service))) + i += copy(dAtA[i:], m.Service) + } + return i, nil +} + +func (m *HealthCheckResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthCheckResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Status != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintHealth(dAtA, i, uint64(m.Status)) + } + return i, nil +} + +func encodeFixed64Health(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Health(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintHealth(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +type raftProxyHealthServer struct { + local HealthServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyHealthServer(local HealthServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) HealthServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + s, ok := transport.StreamFromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := s.ServerTransport().RemoteAddr().String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyHealthServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyHealthServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyHealthServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +func (p *raftProxyHealthServer) Check(ctx context.Context, r *HealthCheckRequest) (*HealthCheckResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.Check(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewHealthClient(conn).Check(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.Check(ctx, r) + } + return nil, err + } + return NewHealthClient(conn).Check(modCtx, r) + } + return resp, err +} + +func (m *HealthCheckRequest) Size() (n int) { + var l int + _ = l + l = len(m.Service) + if l > 0 { + n += 1 + l + sovHealth(uint64(l)) + } + return n +} + +func (m *HealthCheckResponse) Size() (n int) { + var l int + _ = l + if m.Status != 0 { + n += 1 + sovHealth(uint64(m.Status)) + } + return n +} + +func sovHealth(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozHealth(x uint64) (n int) { + return sovHealth(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *HealthCheckRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HealthCheckRequest{`, + `Service:` + fmt.Sprintf("%v", this.Service) + `,`, + `}`, + }, "") + return s +} +func (this *HealthCheckResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HealthCheckResponse{`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `}`, + }, "") + return s +} +func valueToStringHealth(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *HealthCheckRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHealth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HealthCheckRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HealthCheckRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHealth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthHealth + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Service = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipHealth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthHealth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HealthCheckResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHealth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HealthCheckResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HealthCheckResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + m.Status = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHealth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Status |= (HealthCheckResponse_ServingStatus(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipHealth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthHealth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipHealth(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHealth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHealth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHealth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthHealth + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHealth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipHealth(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthHealth = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowHealth = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("github.com/docker/swarmkit/api/health.proto", fileDescriptorHealth) } + +var fileDescriptorHealth = []byte{ + // 315 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4e, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xc9, 0x4f, 0xce, 0x4e, 0x2d, 0xd2, 0x2f, 0x2e, + 0x4f, 0x2c, 0xca, 0xcd, 0xce, 0x2c, 0xd1, 0x4f, 0x2c, 0xc8, 0xd4, 0xcf, 0x48, 0x4d, 0xcc, 0x29, + 0xc9, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x82, 0xa8, 0xd0, 0x83, 0xa9, 0xd0, 0x2b, + 0x33, 0x94, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x4b, 0xeb, 0x83, 0x58, 0x10, 0x95, 0x52, 0xe6, + 0x78, 0x8c, 0x05, 0xab, 0x48, 0x2a, 0x4d, 0xd3, 0x2f, 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x83, 0x52, + 0x10, 0x8d, 0x4a, 0x7a, 0x5c, 0x42, 0x1e, 0x60, 0x2b, 0x9d, 0x33, 0x52, 0x93, 0xb3, 0x83, 0x52, + 0x0b, 0x4b, 0x53, 0x8b, 0x4b, 0x84, 0x24, 0xb8, 0xd8, 0x8b, 0x53, 0x8b, 0xca, 0x32, 0x93, 0x53, + 0x25, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0x60, 0x5c, 0xa5, 0x05, 0x8c, 0x5c, 0xc2, 0x28, 0x1a, + 0x8a, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x85, 0x7c, 0xb9, 0xd8, 0x8a, 0x4b, 0x12, 0x4b, 0x4a, 0x8b, + 0xc1, 0x1a, 0xf8, 0x8c, 0x4c, 0xf5, 0x30, 0xdd, 0xae, 0x87, 0x45, 0xa3, 0x5e, 0x30, 0xc8, 0xe0, + 0xbc, 0xf4, 0x60, 0xb0, 0xe6, 0x20, 0xa8, 0x21, 0x4a, 0x56, 0x5c, 0xbc, 0x28, 0x12, 0x42, 0xdc, + 0x5c, 0xec, 0xa1, 0x7e, 0xde, 0x7e, 0xfe, 0xe1, 0x7e, 0x02, 0x0c, 0x20, 0x4e, 0xb0, 0x6b, 0x50, + 0x98, 0xa7, 0x9f, 0xbb, 0x00, 0xa3, 0x10, 0x3f, 0x17, 0xb7, 0x9f, 0x7f, 0x48, 0x3c, 0x4c, 0x80, + 0xc9, 0xa8, 0x92, 0x8b, 0x0d, 0x62, 0x91, 0x50, 0x3e, 0x17, 0x2b, 0xd8, 0x32, 0x21, 0x35, 0x82, + 0xae, 0x01, 0xfb, 0x5b, 0x4a, 0x9d, 0x48, 0x57, 0x2b, 0x89, 0x9e, 0x5a, 0xf7, 0x6e, 0x06, 0x13, + 0x3f, 0x17, 0x2f, 0x58, 0xa1, 0x6e, 0x6e, 0x62, 0x5e, 0x62, 0x7a, 0x6a, 0x91, 0x93, 0xc4, 0x89, + 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31, 0x34, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, + 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x93, 0xd8, 0xc0, 0xc1, 0x6d, 0x0c, 0x08, 0x00, + 0x00, 0xff, 0xff, 0x7b, 0xf2, 0xdd, 0x23, 0x00, 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/docker/swarmkit/api/logbroker.pb.go b/vendor/github.com/docker/swarmkit/api/logbroker.pb.go new file mode 100644 index 0000000000..2244b3b3c3 --- /dev/null +++ b/vendor/github.com/docker/swarmkit/api/logbroker.pb.go @@ -0,0 +1,3421 @@ +// Code generated by protoc-gen-gogo. +// source: github.com/docker/swarmkit/api/logbroker.proto +// DO NOT EDIT! + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import google_protobuf "github.com/gogo/protobuf/types" +import _ "github.com/docker/swarmkit/protobuf/plugin" + +import github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +import raftselector "github.com/docker/swarmkit/manager/raftselector" +import codes "google.golang.org/grpc/codes" +import status "google.golang.org/grpc/status" +import metadata "google.golang.org/grpc/metadata" +import transport "google.golang.org/grpc/transport" +import rafttime "time" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// LogStream defines the stream from which the log message came. +type LogStream int32 + +const ( + LogStreamUnknown LogStream = 0 + LogStreamStdout LogStream = 1 + LogStreamStderr LogStream = 2 +) + +var LogStream_name = map[int32]string{ + 0: "LOG_STREAM_UNKNOWN", + 1: "LOG_STREAM_STDOUT", + 2: "LOG_STREAM_STDERR", +} +var LogStream_value = map[string]int32{ + "LOG_STREAM_UNKNOWN": 0, + "LOG_STREAM_STDOUT": 1, + "LOG_STREAM_STDERR": 2, +} + +func (x LogStream) String() string { + return proto.EnumName(LogStream_name, int32(x)) +} +func (LogStream) EnumDescriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{0} } + +type LogSubscriptionOptions struct { + // Streams defines which log streams should be sent from the task source. + // Empty means send all the messages. + Streams []LogStream `protobuf:"varint,1,rep,name=streams,enum=docker.swarmkit.v1.LogStream" json:"streams,omitempty"` + // Follow instructs the publisher to continue sending log messages as they + // are produced, after satisfying the initial query. + Follow bool `protobuf:"varint,2,opt,name=follow,proto3" json:"follow,omitempty"` + // Tail defines how many messages relative to the log stream to send when + // starting the stream. + // + // Positive values will skip that number of messages from the start of the + // stream before publishing. + // + // Negative values will specify messages relative to the end of the stream, + // offset by one. We can say that the last (-n-1) lines are returned when n + // < 0. As reference, -1 would mean send no log lines (typically used with + // follow), -2 would return the last log line, -11 would return the last 10 + // and so on. + // + // The default value of zero will return all logs. + // + // Note that this is very different from the Docker API. + Tail int64 `protobuf:"varint,3,opt,name=tail,proto3" json:"tail,omitempty"` + // Since indicates that only log messages produced after this timestamp + // should be sent. + // Note: can't use stdtime because this field is nullable. + Since *google_protobuf.Timestamp `protobuf:"bytes,4,opt,name=since" json:"since,omitempty"` +} + +func (m *LogSubscriptionOptions) Reset() { *m = LogSubscriptionOptions{} } +func (*LogSubscriptionOptions) ProtoMessage() {} +func (*LogSubscriptionOptions) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{0} } + +// LogSelector will match logs from ANY of the defined parameters. +// +// For the best effect, the client should use the least specific parameter +// possible. For example, if they want to listen to all the tasks of a service, +// they should use the service id, rather than specifying the individual tasks. +type LogSelector struct { + ServiceIDs []string `protobuf:"bytes,1,rep,name=service_ids,json=serviceIds" json:"service_ids,omitempty"` + NodeIDs []string `protobuf:"bytes,2,rep,name=node_ids,json=nodeIds" json:"node_ids,omitempty"` + TaskIDs []string `protobuf:"bytes,3,rep,name=task_ids,json=taskIds" json:"task_ids,omitempty"` +} + +func (m *LogSelector) Reset() { *m = LogSelector{} } +func (*LogSelector) ProtoMessage() {} +func (*LogSelector) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{1} } + +// LogContext marks the context from which a log message was generated. +type LogContext struct { + ServiceID string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + NodeID string `protobuf:"bytes,2,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + TaskID string `protobuf:"bytes,3,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` +} + +func (m *LogContext) Reset() { *m = LogContext{} } +func (*LogContext) ProtoMessage() {} +func (*LogContext) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{2} } + +// LogAttr is an extra key/value pair that may be have been set by users +type LogAttr struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *LogAttr) Reset() { *m = LogAttr{} } +func (*LogAttr) ProtoMessage() {} +func (*LogAttr) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{3} } + +// LogMessage +type LogMessage struct { + // Context identifies the source of the log message. + Context LogContext `protobuf:"bytes,1,opt,name=context" json:"context"` + // Timestamp is the time at which the message was generated. + // Note: can't use stdtime because this field is nullable. + Timestamp *google_protobuf.Timestamp `protobuf:"bytes,2,opt,name=timestamp" json:"timestamp,omitempty"` + // Stream identifies the stream of the log message, stdout or stderr. + Stream LogStream `protobuf:"varint,3,opt,name=stream,proto3,enum=docker.swarmkit.v1.LogStream" json:"stream,omitempty"` + // Data is the raw log message, as generated by the application. + Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` + // Attrs is a list of key value pairs representing additional log details + // that may have been returned from the logger + Attrs []LogAttr `protobuf:"bytes,5,rep,name=attrs" json:"attrs"` +} + +func (m *LogMessage) Reset() { *m = LogMessage{} } +func (*LogMessage) ProtoMessage() {} +func (*LogMessage) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{4} } + +type SubscribeLogsRequest struct { + // LogSelector describes the logs to which the subscriber is + Selector *LogSelector `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"` + Options *LogSubscriptionOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` +} + +func (m *SubscribeLogsRequest) Reset() { *m = SubscribeLogsRequest{} } +func (*SubscribeLogsRequest) ProtoMessage() {} +func (*SubscribeLogsRequest) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{5} } + +type SubscribeLogsMessage struct { + Messages []LogMessage `protobuf:"bytes,1,rep,name=messages" json:"messages"` +} + +func (m *SubscribeLogsMessage) Reset() { *m = SubscribeLogsMessage{} } +func (*SubscribeLogsMessage) ProtoMessage() {} +func (*SubscribeLogsMessage) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{6} } + +// ListenSubscriptionsRequest is a placeholder to begin listening for +// subscriptions. +type ListenSubscriptionsRequest struct { +} + +func (m *ListenSubscriptionsRequest) Reset() { *m = ListenSubscriptionsRequest{} } +func (*ListenSubscriptionsRequest) ProtoMessage() {} +func (*ListenSubscriptionsRequest) Descriptor() ([]byte, []int) { + return fileDescriptorLogbroker, []int{7} +} + +// SubscriptionMessage instructs the listener to start publishing messages for +// the stream or end a subscription. +// +// If Options.Follow == false, the worker should end the subscription on its own. +type SubscriptionMessage struct { + // ID identifies the subscription. + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Selector defines which sources should be sent for the subscription. + Selector *LogSelector `protobuf:"bytes,2,opt,name=selector" json:"selector,omitempty"` + // Options specify how the subscription should be satisfied. + Options *LogSubscriptionOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Close will be true if the node should shutdown the subscription with the + // provided identifier. + Close bool `protobuf:"varint,4,opt,name=close,proto3" json:"close,omitempty"` +} + +func (m *SubscriptionMessage) Reset() { *m = SubscriptionMessage{} } +func (*SubscriptionMessage) ProtoMessage() {} +func (*SubscriptionMessage) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{8} } + +type PublishLogsMessage struct { + // SubscriptionID identifies which subscription the set of messages should + // be sent to. We can think of this as a "mail box" for the subscription. + SubscriptionID string `protobuf:"bytes,1,opt,name=subscription_id,json=subscriptionId,proto3" json:"subscription_id,omitempty"` + // Messages is the log message for publishing. + Messages []LogMessage `protobuf:"bytes,2,rep,name=messages" json:"messages"` + // Close is a boolean for whether or not the client has completed its log + // stream. When close is called, the manager can hang up the subscription. + // Any further logs from this subscription are an error condition. Any + // messages included when close is set can be discarded + Close bool `protobuf:"varint,3,opt,name=close,proto3" json:"close,omitempty"` +} + +func (m *PublishLogsMessage) Reset() { *m = PublishLogsMessage{} } +func (*PublishLogsMessage) ProtoMessage() {} +func (*PublishLogsMessage) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{9} } + +type PublishLogsResponse struct { +} + +func (m *PublishLogsResponse) Reset() { *m = PublishLogsResponse{} } +func (*PublishLogsResponse) ProtoMessage() {} +func (*PublishLogsResponse) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{10} } + +func init() { + proto.RegisterType((*LogSubscriptionOptions)(nil), "docker.swarmkit.v1.LogSubscriptionOptions") + proto.RegisterType((*LogSelector)(nil), "docker.swarmkit.v1.LogSelector") + proto.RegisterType((*LogContext)(nil), "docker.swarmkit.v1.LogContext") + proto.RegisterType((*LogAttr)(nil), "docker.swarmkit.v1.LogAttr") + proto.RegisterType((*LogMessage)(nil), "docker.swarmkit.v1.LogMessage") + proto.RegisterType((*SubscribeLogsRequest)(nil), "docker.swarmkit.v1.SubscribeLogsRequest") + proto.RegisterType((*SubscribeLogsMessage)(nil), "docker.swarmkit.v1.SubscribeLogsMessage") + proto.RegisterType((*ListenSubscriptionsRequest)(nil), "docker.swarmkit.v1.ListenSubscriptionsRequest") + proto.RegisterType((*SubscriptionMessage)(nil), "docker.swarmkit.v1.SubscriptionMessage") + proto.RegisterType((*PublishLogsMessage)(nil), "docker.swarmkit.v1.PublishLogsMessage") + proto.RegisterType((*PublishLogsResponse)(nil), "docker.swarmkit.v1.PublishLogsResponse") + proto.RegisterEnum("docker.swarmkit.v1.LogStream", LogStream_name, LogStream_value) +} + +type authenticatedWrapperLogsServer struct { + local LogsServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperLogsServer(local LogsServer, authorize func(context.Context, []string) error) LogsServer { + return &authenticatedWrapperLogsServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperLogsServer) SubscribeLogs(r *SubscribeLogsRequest, stream Logs_SubscribeLogsServer) error { + + if err := p.authorize(stream.Context(), []string{"swarm-manager"}); err != nil { + return err + } + return p.local.SubscribeLogs(r, stream) +} + +type authenticatedWrapperLogBrokerServer struct { + local LogBrokerServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperLogBrokerServer(local LogBrokerServer, authorize func(context.Context, []string) error) LogBrokerServer { + return &authenticatedWrapperLogBrokerServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperLogBrokerServer) ListenSubscriptions(r *ListenSubscriptionsRequest, stream LogBroker_ListenSubscriptionsServer) error { + + if err := p.authorize(stream.Context(), []string{"swarm-worker", "swarm-manager"}); err != nil { + return err + } + return p.local.ListenSubscriptions(r, stream) +} + +func (p *authenticatedWrapperLogBrokerServer) PublishLogs(stream LogBroker_PublishLogsServer) error { + + if err := p.authorize(stream.Context(), []string{"swarm-worker", "swarm-manager"}); err != nil { + return err + } + return p.local.PublishLogs(stream) +} + +func (m *LogSubscriptionOptions) Copy() *LogSubscriptionOptions { + if m == nil { + return nil + } + o := &LogSubscriptionOptions{} + o.CopyFrom(m) + return o +} + +func (m *LogSubscriptionOptions) CopyFrom(src interface{}) { + + o := src.(*LogSubscriptionOptions) + *m = *o + if o.Streams != nil { + m.Streams = make([]LogStream, len(o.Streams)) + copy(m.Streams, o.Streams) + } + + if o.Since != nil { + m.Since = &google_protobuf.Timestamp{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Since, o.Since) + } +} + +func (m *LogSelector) Copy() *LogSelector { + if m == nil { + return nil + } + o := &LogSelector{} + o.CopyFrom(m) + return o +} + +func (m *LogSelector) CopyFrom(src interface{}) { + + o := src.(*LogSelector) + *m = *o + if o.ServiceIDs != nil { + m.ServiceIDs = make([]string, len(o.ServiceIDs)) + copy(m.ServiceIDs, o.ServiceIDs) + } + + if o.NodeIDs != nil { + m.NodeIDs = make([]string, len(o.NodeIDs)) + copy(m.NodeIDs, o.NodeIDs) + } + + if o.TaskIDs != nil { + m.TaskIDs = make([]string, len(o.TaskIDs)) + copy(m.TaskIDs, o.TaskIDs) + } + +} + +func (m *LogContext) Copy() *LogContext { + if m == nil { + return nil + } + o := &LogContext{} + o.CopyFrom(m) + return o +} + +func (m *LogContext) CopyFrom(src interface{}) { + + o := src.(*LogContext) + *m = *o +} + +func (m *LogAttr) Copy() *LogAttr { + if m == nil { + return nil + } + o := &LogAttr{} + o.CopyFrom(m) + return o +} + +func (m *LogAttr) CopyFrom(src interface{}) { + + o := src.(*LogAttr) + *m = *o +} + +func (m *LogMessage) Copy() *LogMessage { + if m == nil { + return nil + } + o := &LogMessage{} + o.CopyFrom(m) + return o +} + +func (m *LogMessage) CopyFrom(src interface{}) { + + o := src.(*LogMessage) + *m = *o + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Context, &o.Context) + if o.Timestamp != nil { + m.Timestamp = &google_protobuf.Timestamp{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Timestamp, o.Timestamp) + } + if o.Data != nil { + m.Data = make([]byte, len(o.Data)) + copy(m.Data, o.Data) + } + if o.Attrs != nil { + m.Attrs = make([]LogAttr, len(o.Attrs)) + for i := range m.Attrs { + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Attrs[i], &o.Attrs[i]) + } + } + +} + +func (m *SubscribeLogsRequest) Copy() *SubscribeLogsRequest { + if m == nil { + return nil + } + o := &SubscribeLogsRequest{} + o.CopyFrom(m) + return o +} + +func (m *SubscribeLogsRequest) CopyFrom(src interface{}) { + + o := src.(*SubscribeLogsRequest) + *m = *o + if o.Selector != nil { + m.Selector = &LogSelector{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Selector, o.Selector) + } + if o.Options != nil { + m.Options = &LogSubscriptionOptions{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Options, o.Options) + } +} + +func (m *SubscribeLogsMessage) Copy() *SubscribeLogsMessage { + if m == nil { + return nil + } + o := &SubscribeLogsMessage{} + o.CopyFrom(m) + return o +} + +func (m *SubscribeLogsMessage) CopyFrom(src interface{}) { + + o := src.(*SubscribeLogsMessage) + *m = *o + if o.Messages != nil { + m.Messages = make([]LogMessage, len(o.Messages)) + for i := range m.Messages { + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Messages[i], &o.Messages[i]) + } + } + +} + +func (m *ListenSubscriptionsRequest) Copy() *ListenSubscriptionsRequest { + if m == nil { + return nil + } + o := &ListenSubscriptionsRequest{} + o.CopyFrom(m) + return o +} + +func (m *ListenSubscriptionsRequest) CopyFrom(src interface{}) {} +func (m *SubscriptionMessage) Copy() *SubscriptionMessage { + if m == nil { + return nil + } + o := &SubscriptionMessage{} + o.CopyFrom(m) + return o +} + +func (m *SubscriptionMessage) CopyFrom(src interface{}) { + + o := src.(*SubscriptionMessage) + *m = *o + if o.Selector != nil { + m.Selector = &LogSelector{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Selector, o.Selector) + } + if o.Options != nil { + m.Options = &LogSubscriptionOptions{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Options, o.Options) + } +} + +func (m *PublishLogsMessage) Copy() *PublishLogsMessage { + if m == nil { + return nil + } + o := &PublishLogsMessage{} + o.CopyFrom(m) + return o +} + +func (m *PublishLogsMessage) CopyFrom(src interface{}) { + + o := src.(*PublishLogsMessage) + *m = *o + if o.Messages != nil { + m.Messages = make([]LogMessage, len(o.Messages)) + for i := range m.Messages { + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Messages[i], &o.Messages[i]) + } + } + +} + +func (m *PublishLogsResponse) Copy() *PublishLogsResponse { + if m == nil { + return nil + } + o := &PublishLogsResponse{} + o.CopyFrom(m) + return o +} + +func (m *PublishLogsResponse) CopyFrom(src interface{}) {} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Logs service + +type LogsClient interface { + // SubscribeLogs starts a subscription with the specified selector and options. + // + // The subscription will be distributed to relevant nodes and messages will + // be collected and sent via the returned stream. + // + // The subscription will end with an EOF. + SubscribeLogs(ctx context.Context, in *SubscribeLogsRequest, opts ...grpc.CallOption) (Logs_SubscribeLogsClient, error) +} + +type logsClient struct { + cc *grpc.ClientConn +} + +func NewLogsClient(cc *grpc.ClientConn) LogsClient { + return &logsClient{cc} +} + +func (c *logsClient) SubscribeLogs(ctx context.Context, in *SubscribeLogsRequest, opts ...grpc.CallOption) (Logs_SubscribeLogsClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Logs_serviceDesc.Streams[0], c.cc, "/docker.swarmkit.v1.Logs/SubscribeLogs", opts...) + if err != nil { + return nil, err + } + x := &logsSubscribeLogsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Logs_SubscribeLogsClient interface { + Recv() (*SubscribeLogsMessage, error) + grpc.ClientStream +} + +type logsSubscribeLogsClient struct { + grpc.ClientStream +} + +func (x *logsSubscribeLogsClient) Recv() (*SubscribeLogsMessage, error) { + m := new(SubscribeLogsMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for Logs service + +type LogsServer interface { + // SubscribeLogs starts a subscription with the specified selector and options. + // + // The subscription will be distributed to relevant nodes and messages will + // be collected and sent via the returned stream. + // + // The subscription will end with an EOF. + SubscribeLogs(*SubscribeLogsRequest, Logs_SubscribeLogsServer) error +} + +func RegisterLogsServer(s *grpc.Server, srv LogsServer) { + s.RegisterService(&_Logs_serviceDesc, srv) +} + +func _Logs_SubscribeLogs_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SubscribeLogsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(LogsServer).SubscribeLogs(m, &logsSubscribeLogsServer{stream}) +} + +type Logs_SubscribeLogsServer interface { + Send(*SubscribeLogsMessage) error + grpc.ServerStream +} + +type logsSubscribeLogsServer struct { + grpc.ServerStream +} + +func (x *logsSubscribeLogsServer) Send(m *SubscribeLogsMessage) error { + return x.ServerStream.SendMsg(m) +} + +var _Logs_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.Logs", + HandlerType: (*LogsServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "SubscribeLogs", + Handler: _Logs_SubscribeLogs_Handler, + ServerStreams: true, + }, + }, + Metadata: "github.com/docker/swarmkit/api/logbroker.proto", +} + +// Client API for LogBroker service + +type LogBrokerClient interface { + // ListenSubscriptions starts a subscription stream for the node. For each + // message received, the node should attempt to satisfy the subscription. + // + // Log messages that match the provided subscription should be sent via + // PublishLogs. + ListenSubscriptions(ctx context.Context, in *ListenSubscriptionsRequest, opts ...grpc.CallOption) (LogBroker_ListenSubscriptionsClient, error) + // PublishLogs receives sets of log messages destined for a single + // subscription identifier. + PublishLogs(ctx context.Context, opts ...grpc.CallOption) (LogBroker_PublishLogsClient, error) +} + +type logBrokerClient struct { + cc *grpc.ClientConn +} + +func NewLogBrokerClient(cc *grpc.ClientConn) LogBrokerClient { + return &logBrokerClient{cc} +} + +func (c *logBrokerClient) ListenSubscriptions(ctx context.Context, in *ListenSubscriptionsRequest, opts ...grpc.CallOption) (LogBroker_ListenSubscriptionsClient, error) { + stream, err := grpc.NewClientStream(ctx, &_LogBroker_serviceDesc.Streams[0], c.cc, "/docker.swarmkit.v1.LogBroker/ListenSubscriptions", opts...) + if err != nil { + return nil, err + } + x := &logBrokerListenSubscriptionsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type LogBroker_ListenSubscriptionsClient interface { + Recv() (*SubscriptionMessage, error) + grpc.ClientStream +} + +type logBrokerListenSubscriptionsClient struct { + grpc.ClientStream +} + +func (x *logBrokerListenSubscriptionsClient) Recv() (*SubscriptionMessage, error) { + m := new(SubscriptionMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *logBrokerClient) PublishLogs(ctx context.Context, opts ...grpc.CallOption) (LogBroker_PublishLogsClient, error) { + stream, err := grpc.NewClientStream(ctx, &_LogBroker_serviceDesc.Streams[1], c.cc, "/docker.swarmkit.v1.LogBroker/PublishLogs", opts...) + if err != nil { + return nil, err + } + x := &logBrokerPublishLogsClient{stream} + return x, nil +} + +type LogBroker_PublishLogsClient interface { + Send(*PublishLogsMessage) error + CloseAndRecv() (*PublishLogsResponse, error) + grpc.ClientStream +} + +type logBrokerPublishLogsClient struct { + grpc.ClientStream +} + +func (x *logBrokerPublishLogsClient) Send(m *PublishLogsMessage) error { + return x.ClientStream.SendMsg(m) +} + +func (x *logBrokerPublishLogsClient) CloseAndRecv() (*PublishLogsResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(PublishLogsResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for LogBroker service + +type LogBrokerServer interface { + // ListenSubscriptions starts a subscription stream for the node. For each + // message received, the node should attempt to satisfy the subscription. + // + // Log messages that match the provided subscription should be sent via + // PublishLogs. + ListenSubscriptions(*ListenSubscriptionsRequest, LogBroker_ListenSubscriptionsServer) error + // PublishLogs receives sets of log messages destined for a single + // subscription identifier. + PublishLogs(LogBroker_PublishLogsServer) error +} + +func RegisterLogBrokerServer(s *grpc.Server, srv LogBrokerServer) { + s.RegisterService(&_LogBroker_serviceDesc, srv) +} + +func _LogBroker_ListenSubscriptions_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ListenSubscriptionsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(LogBrokerServer).ListenSubscriptions(m, &logBrokerListenSubscriptionsServer{stream}) +} + +type LogBroker_ListenSubscriptionsServer interface { + Send(*SubscriptionMessage) error + grpc.ServerStream +} + +type logBrokerListenSubscriptionsServer struct { + grpc.ServerStream +} + +func (x *logBrokerListenSubscriptionsServer) Send(m *SubscriptionMessage) error { + return x.ServerStream.SendMsg(m) +} + +func _LogBroker_PublishLogs_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(LogBrokerServer).PublishLogs(&logBrokerPublishLogsServer{stream}) +} + +type LogBroker_PublishLogsServer interface { + SendAndClose(*PublishLogsResponse) error + Recv() (*PublishLogsMessage, error) + grpc.ServerStream +} + +type logBrokerPublishLogsServer struct { + grpc.ServerStream +} + +func (x *logBrokerPublishLogsServer) SendAndClose(m *PublishLogsResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *logBrokerPublishLogsServer) Recv() (*PublishLogsMessage, error) { + m := new(PublishLogsMessage) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _LogBroker_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.LogBroker", + HandlerType: (*LogBrokerServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "ListenSubscriptions", + Handler: _LogBroker_ListenSubscriptions_Handler, + ServerStreams: true, + }, + { + StreamName: "PublishLogs", + Handler: _LogBroker_PublishLogs_Handler, + ClientStreams: true, + }, + }, + Metadata: "github.com/docker/swarmkit/api/logbroker.proto", +} + +func (m *LogSubscriptionOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LogSubscriptionOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Streams) > 0 { + for _, num := range m.Streams { + dAtA[i] = 0x8 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(num)) + } + } + if m.Follow { + dAtA[i] = 0x10 + i++ + if m.Follow { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Tail != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(m.Tail)) + } + if m.Since != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(m.Since.Size())) + n1, err := m.Since.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + return i, nil +} + +func (m *LogSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LogSelector) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ServiceIDs) > 0 { + for _, s := range m.ServiceIDs { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.NodeIDs) > 0 { + for _, s := range m.NodeIDs { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.TaskIDs) > 0 { + for _, s := range m.TaskIDs { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *LogContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LogContext) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ServiceID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(len(m.ServiceID))) + i += copy(dAtA[i:], m.ServiceID) + } + if len(m.NodeID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + } + if len(m.TaskID) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(len(m.TaskID))) + i += copy(dAtA[i:], m.TaskID) + } + return i, nil +} + +func (m *LogAttr) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LogAttr) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.Value) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} + +func (m *LogMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LogMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(m.Context.Size())) + n2, err := m.Context.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if m.Timestamp != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(m.Timestamp.Size())) + n3, err := m.Timestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if m.Stream != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(m.Stream)) + } + if len(m.Data) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if len(m.Attrs) > 0 { + for _, msg := range m.Attrs { + dAtA[i] = 0x2a + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *SubscribeLogsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubscribeLogsRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Selector != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(m.Selector.Size())) + n4, err := m.Selector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + if m.Options != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(m.Options.Size())) + n5, err := m.Options.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + return i, nil +} + +func (m *SubscribeLogsMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubscribeLogsMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Messages) > 0 { + for _, msg := range m.Messages { + dAtA[i] = 0xa + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ListenSubscriptionsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenSubscriptionsRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *SubscriptionMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubscriptionMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if m.Selector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(m.Selector.Size())) + n6, err := m.Selector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if m.Options != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(m.Options.Size())) + n7, err := m.Options.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.Close { + dAtA[i] = 0x20 + i++ + if m.Close { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *PublishLogsMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PublishLogsMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SubscriptionID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(len(m.SubscriptionID))) + i += copy(dAtA[i:], m.SubscriptionID) + } + if len(m.Messages) > 0 { + for _, msg := range m.Messages { + dAtA[i] = 0x12 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Close { + dAtA[i] = 0x18 + i++ + if m.Close { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *PublishLogsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PublishLogsResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func encodeFixed64Logbroker(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Logbroker(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintLogbroker(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +type raftProxyLogsServer struct { + local LogsServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyLogsServer(local LogsServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) LogsServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + s, ok := transport.StreamFromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := s.ServerTransport().RemoteAddr().String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyLogsServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyLogsServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyLogsServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +type Logs_SubscribeLogsServerWrapper struct { + Logs_SubscribeLogsServer + ctx context.Context +} + +func (s Logs_SubscribeLogsServerWrapper) Context() context.Context { + return s.ctx +} + +func (p *raftProxyLogsServer) SubscribeLogs(r *SubscribeLogsRequest, stream Logs_SubscribeLogsServer) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := Logs_SubscribeLogsServerWrapper{ + Logs_SubscribeLogsServer: stream, + ctx: ctx, + } + return p.local.SubscribeLogs(r, streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + } + clientStream, err := NewLogsClient(conn).SubscribeLogs(ctx, r) + + if err != nil { + return err + } + + for { + msg, err := clientStream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := stream.Send(msg); err != nil { + return err + } + } + return nil +} + +type raftProxyLogBrokerServer struct { + local LogBrokerServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyLogBrokerServer(local LogBrokerServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) LogBrokerServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + s, ok := transport.StreamFromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := s.ServerTransport().RemoteAddr().String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyLogBrokerServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyLogBrokerServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyLogBrokerServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +type LogBroker_ListenSubscriptionsServerWrapper struct { + LogBroker_ListenSubscriptionsServer + ctx context.Context +} + +func (s LogBroker_ListenSubscriptionsServerWrapper) Context() context.Context { + return s.ctx +} + +func (p *raftProxyLogBrokerServer) ListenSubscriptions(r *ListenSubscriptionsRequest, stream LogBroker_ListenSubscriptionsServer) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := LogBroker_ListenSubscriptionsServerWrapper{ + LogBroker_ListenSubscriptionsServer: stream, + ctx: ctx, + } + return p.local.ListenSubscriptions(r, streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + } + clientStream, err := NewLogBrokerClient(conn).ListenSubscriptions(ctx, r) + + if err != nil { + return err + } + + for { + msg, err := clientStream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := stream.Send(msg); err != nil { + return err + } + } + return nil +} + +type LogBroker_PublishLogsServerWrapper struct { + LogBroker_PublishLogsServer + ctx context.Context +} + +func (s LogBroker_PublishLogsServerWrapper) Context() context.Context { + return s.ctx +} + +func (p *raftProxyLogBrokerServer) PublishLogs(stream LogBroker_PublishLogsServer) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := LogBroker_PublishLogsServerWrapper{ + LogBroker_PublishLogsServer: stream, + ctx: ctx, + } + return p.local.PublishLogs(streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + } + clientStream, err := NewLogBrokerClient(conn).PublishLogs(ctx) + + if err != nil { + return err + } + + for { + msg, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := clientStream.Send(msg); err != nil { + return err + } + } + + reply, err := clientStream.CloseAndRecv() + if err != nil { + return err + } + + return stream.SendAndClose(reply) +} + +func (m *LogSubscriptionOptions) Size() (n int) { + var l int + _ = l + if len(m.Streams) > 0 { + for _, e := range m.Streams { + n += 1 + sovLogbroker(uint64(e)) + } + } + if m.Follow { + n += 2 + } + if m.Tail != 0 { + n += 1 + sovLogbroker(uint64(m.Tail)) + } + if m.Since != nil { + l = m.Since.Size() + n += 1 + l + sovLogbroker(uint64(l)) + } + return n +} + +func (m *LogSelector) Size() (n int) { + var l int + _ = l + if len(m.ServiceIDs) > 0 { + for _, s := range m.ServiceIDs { + l = len(s) + n += 1 + l + sovLogbroker(uint64(l)) + } + } + if len(m.NodeIDs) > 0 { + for _, s := range m.NodeIDs { + l = len(s) + n += 1 + l + sovLogbroker(uint64(l)) + } + } + if len(m.TaskIDs) > 0 { + for _, s := range m.TaskIDs { + l = len(s) + n += 1 + l + sovLogbroker(uint64(l)) + } + } + return n +} + +func (m *LogContext) Size() (n int) { + var l int + _ = l + l = len(m.ServiceID) + if l > 0 { + n += 1 + l + sovLogbroker(uint64(l)) + } + l = len(m.NodeID) + if l > 0 { + n += 1 + l + sovLogbroker(uint64(l)) + } + l = len(m.TaskID) + if l > 0 { + n += 1 + l + sovLogbroker(uint64(l)) + } + return n +} + +func (m *LogAttr) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovLogbroker(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovLogbroker(uint64(l)) + } + return n +} + +func (m *LogMessage) Size() (n int) { + var l int + _ = l + l = m.Context.Size() + n += 1 + l + sovLogbroker(uint64(l)) + if m.Timestamp != nil { + l = m.Timestamp.Size() + n += 1 + l + sovLogbroker(uint64(l)) + } + if m.Stream != 0 { + n += 1 + sovLogbroker(uint64(m.Stream)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovLogbroker(uint64(l)) + } + if len(m.Attrs) > 0 { + for _, e := range m.Attrs { + l = e.Size() + n += 1 + l + sovLogbroker(uint64(l)) + } + } + return n +} + +func (m *SubscribeLogsRequest) Size() (n int) { + var l int + _ = l + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovLogbroker(uint64(l)) + } + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovLogbroker(uint64(l)) + } + return n +} + +func (m *SubscribeLogsMessage) Size() (n int) { + var l int + _ = l + if len(m.Messages) > 0 { + for _, e := range m.Messages { + l = e.Size() + n += 1 + l + sovLogbroker(uint64(l)) + } + } + return n +} + +func (m *ListenSubscriptionsRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *SubscriptionMessage) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovLogbroker(uint64(l)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovLogbroker(uint64(l)) + } + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovLogbroker(uint64(l)) + } + if m.Close { + n += 2 + } + return n +} + +func (m *PublishLogsMessage) Size() (n int) { + var l int + _ = l + l = len(m.SubscriptionID) + if l > 0 { + n += 1 + l + sovLogbroker(uint64(l)) + } + if len(m.Messages) > 0 { + for _, e := range m.Messages { + l = e.Size() + n += 1 + l + sovLogbroker(uint64(l)) + } + } + if m.Close { + n += 2 + } + return n +} + +func (m *PublishLogsResponse) Size() (n int) { + var l int + _ = l + return n +} + +func sovLogbroker(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozLogbroker(x uint64) (n int) { + return sovLogbroker(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *LogSubscriptionOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LogSubscriptionOptions{`, + `Streams:` + fmt.Sprintf("%v", this.Streams) + `,`, + `Follow:` + fmt.Sprintf("%v", this.Follow) + `,`, + `Tail:` + fmt.Sprintf("%v", this.Tail) + `,`, + `Since:` + strings.Replace(fmt.Sprintf("%v", this.Since), "Timestamp", "google_protobuf.Timestamp", 1) + `,`, + `}`, + }, "") + return s +} +func (this *LogSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LogSelector{`, + `ServiceIDs:` + fmt.Sprintf("%v", this.ServiceIDs) + `,`, + `NodeIDs:` + fmt.Sprintf("%v", this.NodeIDs) + `,`, + `TaskIDs:` + fmt.Sprintf("%v", this.TaskIDs) + `,`, + `}`, + }, "") + return s +} +func (this *LogContext) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LogContext{`, + `ServiceID:` + fmt.Sprintf("%v", this.ServiceID) + `,`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `TaskID:` + fmt.Sprintf("%v", this.TaskID) + `,`, + `}`, + }, "") + return s +} +func (this *LogAttr) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LogAttr{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *LogMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LogMessage{`, + `Context:` + strings.Replace(strings.Replace(this.Context.String(), "LogContext", "LogContext", 1), `&`, ``, 1) + `,`, + `Timestamp:` + strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Timestamp", "google_protobuf.Timestamp", 1) + `,`, + `Stream:` + fmt.Sprintf("%v", this.Stream) + `,`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `Attrs:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Attrs), "LogAttr", "LogAttr", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SubscribeLogsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubscribeLogsRequest{`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LogSelector", "LogSelector", 1) + `,`, + `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "LogSubscriptionOptions", "LogSubscriptionOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SubscribeLogsMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubscribeLogsMessage{`, + `Messages:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Messages), "LogMessage", "LogMessage", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListenSubscriptionsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListenSubscriptionsRequest{`, + `}`, + }, "") + return s +} +func (this *SubscriptionMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubscriptionMessage{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LogSelector", "LogSelector", 1) + `,`, + `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "LogSubscriptionOptions", "LogSubscriptionOptions", 1) + `,`, + `Close:` + fmt.Sprintf("%v", this.Close) + `,`, + `}`, + }, "") + return s +} +func (this *PublishLogsMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PublishLogsMessage{`, + `SubscriptionID:` + fmt.Sprintf("%v", this.SubscriptionID) + `,`, + `Messages:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Messages), "LogMessage", "LogMessage", 1), `&`, ``, 1) + `,`, + `Close:` + fmt.Sprintf("%v", this.Close) + `,`, + `}`, + }, "") + return s +} +func (this *PublishLogsResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PublishLogsResponse{`, + `}`, + }, "") + return s +} +func valueToStringLogbroker(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *LogSubscriptionOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LogSubscriptionOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LogSubscriptionOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v LogStream + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (LogStream(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Streams = append(m.Streams, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v LogStream + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (LogStream(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Streams = append(m.Streams, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Streams", wireType) + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Follow", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Follow = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Tail", wireType) + } + m.Tail = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Tail |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Since", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Since == nil { + m.Since = &google_protobuf.Timestamp{} + } + if err := m.Since.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LogSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LogSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LogSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceIDs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceIDs = append(m.ServiceIDs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeIDs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeIDs = append(m.NodeIDs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskIDs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskIDs = append(m.TaskIDs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LogContext) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LogContext: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LogContext: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LogAttr) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LogAttr: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LogAttr: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LogMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LogMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LogMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Timestamp == nil { + m.Timestamp = &google_protobuf.Timestamp{} + } + if err := m.Timestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType) + } + m.Stream = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Stream |= (LogStream(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attrs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attrs = append(m.Attrs, LogAttr{}) + if err := m.Attrs[len(m.Attrs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubscribeLogsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubscribeLogsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubscribeLogsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &LogSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &LogSubscriptionOptions{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubscribeLogsMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubscribeLogsMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubscribeLogsMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Messages", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Messages = append(m.Messages, LogMessage{}) + if err := m.Messages[len(m.Messages)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListenSubscriptionsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListenSubscriptionsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListenSubscriptionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubscriptionMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubscriptionMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubscriptionMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &LogSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &LogSubscriptionOptions{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Close", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Close = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PublishLogsMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PublishLogsMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PublishLogsMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SubscriptionID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SubscriptionID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Messages", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Messages = append(m.Messages, LogMessage{}) + if err := m.Messages[len(m.Messages)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Close", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Close = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PublishLogsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PublishLogsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PublishLogsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipLogbroker(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogbroker + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogbroker + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogbroker + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthLogbroker + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogbroker + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipLogbroker(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthLogbroker = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowLogbroker = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/docker/swarmkit/api/logbroker.proto", fileDescriptorLogbroker) +} + +var fileDescriptorLogbroker = []byte{ + // 966 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x95, 0x41, 0x6f, 0x1b, 0x45, + 0x14, 0xc7, 0x3d, 0xeb, 0xc4, 0x8e, 0x9f, 0x9b, 0xc4, 0x9d, 0xa4, 0x91, 0x65, 0xa8, 0x6d, 0x6d, + 0xa5, 0x62, 0x45, 0x65, 0xdd, 0x1a, 0xa1, 0x22, 0x45, 0x42, 0xd4, 0xb8, 0x42, 0x16, 0x6e, 0x82, + 0xc6, 0x8e, 0xe0, 0x16, 0xad, 0xbd, 0xd3, 0xed, 0xca, 0xeb, 0x1d, 0xb3, 0x33, 0x4e, 0x40, 0xe2, + 0xc0, 0xa1, 0x48, 0x28, 0x07, 0x6e, 0x48, 0x70, 0xe8, 0x89, 0x5e, 0x10, 0x12, 0x17, 0x6e, 0x7c, + 0x00, 0x14, 0x71, 0xe2, 0xc8, 0xc9, 0xa2, 0xfb, 0x01, 0xf8, 0x0c, 0x68, 0x67, 0xd6, 0xeb, 0x0d, + 0xb6, 0x53, 0x54, 0x2e, 0xf6, 0x8c, 0xe7, 0xf7, 0xf6, 0xfd, 0xdf, 0x7f, 0xde, 0x5b, 0x83, 0x61, + 0x3b, 0xe2, 0xc9, 0xa4, 0x6f, 0x0c, 0xd8, 0xa8, 0x6e, 0xb1, 0xc1, 0x90, 0xfa, 0x75, 0x7e, 0x66, + 0xfa, 0xa3, 0xa1, 0x23, 0xea, 0xe6, 0xd8, 0xa9, 0xbb, 0xcc, 0xee, 0xfb, 0x6c, 0x48, 0x7d, 0x63, + 0xec, 0x33, 0xc1, 0x30, 0x56, 0x90, 0x31, 0x83, 0x8c, 0xd3, 0x7b, 0xa5, 0x5d, 0x9b, 0xd9, 0x4c, + 0x1e, 0xd7, 0xc3, 0x95, 0x22, 0x4b, 0x15, 0x9b, 0x31, 0xdb, 0xa5, 0x75, 0xb9, 0xeb, 0x4f, 0x1e, + 0xd7, 0x85, 0x33, 0xa2, 0x5c, 0x98, 0xa3, 0x71, 0x04, 0xdc, 0xbf, 0x22, 0x75, 0x1c, 0x34, 0x76, + 0x27, 0xb6, 0xe3, 0x45, 0x5f, 0x2a, 0x50, 0xff, 0x05, 0xc1, 0x5e, 0x87, 0xd9, 0xdd, 0x49, 0x9f, + 0x0f, 0x7c, 0x67, 0x2c, 0x1c, 0xe6, 0x1d, 0xc9, 0x4f, 0x8e, 0x0f, 0x20, 0xcb, 0x85, 0x4f, 0xcd, + 0x11, 0x2f, 0xa2, 0x6a, 0xba, 0xb6, 0xd5, 0xb8, 0x69, 0x2c, 0x0a, 0x36, 0xc2, 0x60, 0x49, 0x35, + 0xb5, 0x42, 0x8a, 0xcc, 0x22, 0xf0, 0x1e, 0x64, 0x1e, 0x33, 0xd7, 0x65, 0x67, 0x45, 0xad, 0x8a, + 0x6a, 0x1b, 0x24, 0xda, 0x61, 0x0c, 0x6b, 0xc2, 0x74, 0xdc, 0x62, 0xba, 0x8a, 0x6a, 0x69, 0x22, + 0xd7, 0xf8, 0x2e, 0xac, 0x73, 0xc7, 0x1b, 0xd0, 0xe2, 0x5a, 0x15, 0xd5, 0xf2, 0x8d, 0x92, 0xa1, + 0xaa, 0x35, 0x66, 0xc2, 0x8d, 0xde, 0xac, 0x5a, 0xa2, 0x40, 0xfd, 0x1b, 0x04, 0xf9, 0x30, 0x31, + 0x75, 0xe9, 0x40, 0x30, 0x1f, 0xd7, 0x21, 0xcf, 0xa9, 0x7f, 0xea, 0x0c, 0xe8, 0x89, 0x63, 0x29, + 0xb9, 0xb9, 0xe6, 0x56, 0x30, 0xad, 0x40, 0x57, 0xfd, 0xdc, 0x6e, 0x71, 0x02, 0x11, 0xd2, 0xb6, + 0x38, 0xbe, 0x0d, 0x1b, 0x1e, 0xb3, 0x14, 0xad, 0x49, 0x3a, 0x1f, 0x4c, 0x2b, 0xd9, 0x43, 0x66, + 0x49, 0x34, 0x1b, 0x1e, 0x46, 0x9c, 0x30, 0xf9, 0x50, 0x72, 0xe9, 0x39, 0xd7, 0x33, 0xf9, 0x50, + 0x72, 0xe1, 0x61, 0xdb, 0xe2, 0xfa, 0x53, 0x04, 0xd0, 0x61, 0xf6, 0xfb, 0xcc, 0x13, 0xf4, 0x33, + 0x81, 0xef, 0x00, 0xcc, 0xf5, 0x14, 0x51, 0x15, 0xd5, 0x72, 0xcd, 0xcd, 0x60, 0x5a, 0xc9, 0xc5, + 0x72, 0x48, 0x2e, 0x56, 0x83, 0x6f, 0x41, 0x36, 0x12, 0x23, 0xcd, 0xca, 0x35, 0x21, 0x98, 0x56, + 0x32, 0x4a, 0x0b, 0xc9, 0x28, 0x29, 0x21, 0x14, 0x29, 0x91, 0xde, 0x45, 0x90, 0x12, 0x42, 0x32, + 0x4a, 0x87, 0x7e, 0x0f, 0xb2, 0x1d, 0x66, 0x3f, 0x10, 0xc2, 0xc7, 0x05, 0x48, 0x0f, 0xe9, 0xe7, + 0x2a, 0x37, 0x09, 0x97, 0x78, 0x17, 0xd6, 0x4f, 0x4d, 0x77, 0x42, 0x55, 0x12, 0xa2, 0x36, 0xfa, + 0xb9, 0x26, 0x95, 0x3f, 0xa2, 0x9c, 0x9b, 0x36, 0xc5, 0xef, 0x42, 0x76, 0xa0, 0x8a, 0x90, 0xa1, + 0xf9, 0x46, 0x79, 0xc5, 0xa5, 0x47, 0xa5, 0x36, 0xd7, 0x2e, 0xa6, 0x95, 0x14, 0x99, 0x05, 0xe1, + 0x77, 0x20, 0x17, 0xf7, 0xa6, 0x4c, 0x74, 0xf5, 0x7d, 0xce, 0x61, 0xfc, 0x36, 0x64, 0x54, 0xf3, + 0xc8, 0xfa, 0x5e, 0xd6, 0x6d, 0x24, 0x82, 0xc3, 0x86, 0xb2, 0x4c, 0x61, 0xca, 0xde, 0xb9, 0x46, + 0xe4, 0x1a, 0xdf, 0x87, 0x75, 0x53, 0x08, 0x9f, 0x17, 0xd7, 0xab, 0xe9, 0x5a, 0xbe, 0xf1, 0xda, + 0x8a, 0x27, 0x85, 0x3e, 0x45, 0xfa, 0x15, 0xaf, 0x7f, 0x8f, 0x60, 0x37, 0x1a, 0x85, 0x3e, 0xed, + 0x30, 0x9b, 0x13, 0xfa, 0xe9, 0x84, 0x72, 0x81, 0x0f, 0x60, 0x83, 0x47, 0xcd, 0x16, 0xf9, 0x52, + 0x59, 0x25, 0x2f, 0xc2, 0x48, 0x1c, 0x80, 0x5b, 0x90, 0x65, 0x6a, 0xa6, 0x22, 0x47, 0xf6, 0x57, + 0xc5, 0x2e, 0x4e, 0x21, 0x99, 0x85, 0xea, 0x9f, 0xfc, 0x4b, 0xda, 0xec, 0xc6, 0xde, 0x83, 0x8d, + 0x91, 0x5a, 0xaa, 0xc6, 0x5f, 0x7d, 0x65, 0x51, 0x44, 0x54, 0x72, 0x1c, 0xa5, 0xbf, 0x0e, 0xa5, + 0x8e, 0xc3, 0x05, 0xf5, 0x92, 0xf9, 0x67, 0xa5, 0xeb, 0xbf, 0x21, 0xd8, 0x49, 0x1e, 0xcc, 0xf2, + 0xee, 0x81, 0x16, 0xf7, 0x76, 0x26, 0x98, 0x56, 0xb4, 0x76, 0x8b, 0x68, 0x8e, 0x75, 0xc9, 0x2a, + 0xed, 0x7f, 0x58, 0x95, 0x7e, 0x65, 0xab, 0xc2, 0x4e, 0x1f, 0xb8, 0x8c, 0xab, 0x17, 0xca, 0x06, + 0x51, 0x1b, 0xfd, 0x47, 0x04, 0xf8, 0xa3, 0x49, 0xdf, 0x75, 0xf8, 0x93, 0xa4, 0x7f, 0x07, 0xb0, + 0xcd, 0x13, 0x0f, 0x9b, 0x0f, 0x2c, 0x0e, 0xa6, 0x95, 0xad, 0x64, 0x9e, 0x76, 0x8b, 0x6c, 0x25, + 0xd1, 0xb6, 0x75, 0xc9, 0x7c, 0xed, 0x55, 0xcc, 0x9f, 0x6b, 0x4d, 0x27, 0xb5, 0xde, 0x80, 0x9d, + 0x84, 0x54, 0x42, 0xf9, 0x98, 0x79, 0x9c, 0xee, 0x3f, 0x47, 0x90, 0x8b, 0x47, 0x00, 0xdf, 0x01, + 0xdc, 0x39, 0xfa, 0xe0, 0xa4, 0xdb, 0x23, 0x0f, 0x1f, 0x3c, 0x3a, 0x39, 0x3e, 0xfc, 0xf0, 0xf0, + 0xe8, 0xe3, 0xc3, 0x42, 0xaa, 0xb4, 0x7b, 0xfe, 0xac, 0x5a, 0x88, 0xb1, 0x63, 0x6f, 0xe8, 0xb1, + 0x33, 0x0f, 0xef, 0xc3, 0xf5, 0x04, 0xdd, 0xed, 0xb5, 0x8e, 0x8e, 0x7b, 0x05, 0x54, 0xda, 0x39, + 0x7f, 0x56, 0xdd, 0x8e, 0xe1, 0xae, 0xb0, 0xd8, 0x44, 0x2c, 0xb2, 0x0f, 0x09, 0x29, 0x68, 0x8b, + 0x2c, 0xf5, 0xfd, 0xd2, 0xf5, 0xaf, 0x7f, 0x28, 0xa7, 0x7e, 0x7d, 0x5e, 0x9e, 0x0b, 0x6b, 0x3c, + 0x45, 0xb0, 0x16, 0xea, 0xc6, 0x5f, 0xc0, 0xe6, 0xa5, 0x9e, 0xc5, 0xb5, 0x65, 0xee, 0x2c, 0x9b, + 0xb8, 0xd2, 0xcb, 0xc9, 0xc8, 0x51, 0xfd, 0xc6, 0xef, 0x3f, 0xff, 0xfd, 0x9d, 0xb6, 0x0d, 0x9b, + 0x92, 0x7c, 0x73, 0x64, 0x7a, 0xa6, 0x4d, 0xfd, 0xbb, 0xa8, 0xf1, 0x93, 0x26, 0xdd, 0x6a, 0xca, + 0xff, 0x5c, 0xfc, 0x2d, 0x82, 0x9d, 0x25, 0x6d, 0x8e, 0x8d, 0xa5, 0x17, 0xb6, 0x72, 0x1e, 0x4a, + 0x6f, 0x5c, 0x21, 0x2c, 0x39, 0x20, 0xfa, 0x2d, 0xa9, 0xeb, 0x26, 0x5c, 0x53, 0xba, 0xce, 0x98, + 0x3f, 0xa4, 0xfe, 0x82, 0x4a, 0xfc, 0x15, 0x82, 0x7c, 0xe2, 0xae, 0xf1, 0xed, 0x65, 0xcf, 0x5f, + 0xec, 0xdb, 0xe5, 0x3a, 0x96, 0x34, 0xcd, 0x7f, 0xd2, 0x51, 0x43, 0xcd, 0xe2, 0xc5, 0x8b, 0x72, + 0xea, 0xcf, 0x17, 0xe5, 0xd4, 0x97, 0x41, 0x19, 0x5d, 0x04, 0x65, 0xf4, 0x47, 0x50, 0x46, 0x7f, + 0x05, 0x65, 0xd4, 0xcf, 0xc8, 0x17, 0xf7, 0x5b, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x95, 0x7b, + 0x3c, 0x04, 0xe0, 0x08, 0x00, 0x00, +} diff --git a/vendor/github.com/docker/swarmkit/api/objects.pb.go b/vendor/github.com/docker/swarmkit/api/objects.pb.go new file mode 100644 index 0000000000..dc24902c93 --- /dev/null +++ b/vendor/github.com/docker/swarmkit/api/objects.pb.go @@ -0,0 +1,7892 @@ +// Code generated by protoc-gen-gogo. +// source: github.com/docker/swarmkit/api/objects.proto +// DO NOT EDIT! + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/gogo/protobuf/types" +import _ "github.com/gogo/protobuf/gogoproto" +import google_protobuf3 "github.com/gogo/protobuf/types" +import _ "github.com/docker/swarmkit/protobuf/plugin" + +import github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import github_com_docker_go_events "github.com/docker/go-events" +import strings "strings" + +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Meta contains metadata about objects. Every object contains a meta field. +type Meta struct { + // Version tracks the current version of the object. + Version Version `protobuf:"bytes,1,opt,name=version" json:"version"` + // Object timestamps. + // Note: can't use stdtime because these fields are nullable. + CreatedAt *google_protobuf.Timestamp `protobuf:"bytes,2,opt,name=created_at,json=createdAt" json:"created_at,omitempty"` + UpdatedAt *google_protobuf.Timestamp `protobuf:"bytes,3,opt,name=updated_at,json=updatedAt" json:"updated_at,omitempty"` +} + +func (m *Meta) Reset() { *m = Meta{} } +func (*Meta) ProtoMessage() {} +func (*Meta) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{0} } + +// Node provides the internal node state as seen by the cluster. +type Node struct { + // ID specifies the identity of the node. + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"` + // Spec defines the desired state of the node as specified by the user. + // The system will honor this and will *never* modify it. + Spec NodeSpec `protobuf:"bytes,3,opt,name=spec" json:"spec"` + // Description encapsulated the properties of the Node as reported by the + // agent. + Description *NodeDescription `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"` + // Status provides the current status of the node, as seen by the manager. + Status NodeStatus `protobuf:"bytes,5,opt,name=status" json:"status"` + // ManagerStatus provides the current status of the node's manager + // component, if the node is a manager. + ManagerStatus *ManagerStatus `protobuf:"bytes,6,opt,name=manager_status,json=managerStatus" json:"manager_status,omitempty"` + // DEPRECATED: Use Attachments to find the ingress network + // The node attachment to the ingress network. + Attachment *NetworkAttachment `protobuf:"bytes,7,opt,name=attachment" json:"attachment,omitempty"` + // Certificate is the TLS certificate issued for the node, if any. + Certificate Certificate `protobuf:"bytes,8,opt,name=certificate" json:"certificate"` + // Role is the *observed* role for this node. It differs from the + // desired role set in Node.Spec.Role because the role here is only + // updated after the Raft member list has been reconciled with the + // desired role from the spec. + // + // This field represents the current reconciled state. If an action is + // to be performed, first verify the role in the cert. This field only + // shows the privilege level that the CA would currently grant when + // issuing or renewing the node's certificate. + Role NodeRole `protobuf:"varint,9,opt,name=role,proto3,enum=docker.swarmkit.v1.NodeRole" json:"role,omitempty"` + // Attachments enumerates the network attachments for the node to set up an + // endpoint on the node to be used for load balancing. Each overlay + // network, including ingress network, will have an NetworkAttachment. + Attachments []*NetworkAttachment `protobuf:"bytes,10,rep,name=attachments" json:"attachments,omitempty"` +} + +func (m *Node) Reset() { *m = Node{} } +func (*Node) ProtoMessage() {} +func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{1} } + +type Service struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"` + Spec ServiceSpec `protobuf:"bytes,3,opt,name=spec" json:"spec"` + // SpecVersion versions Spec, to identify changes in the spec. Note that + // this is not directly comparable to the service's Version. + SpecVersion *Version `protobuf:"bytes,10,opt,name=spec_version,json=specVersion" json:"spec_version,omitempty"` + // PreviousSpec is the previous service spec that was in place before + // "Spec". + PreviousSpec *ServiceSpec `protobuf:"bytes,6,opt,name=previous_spec,json=previousSpec" json:"previous_spec,omitempty"` + // PreviousSpecVersion versions PreviousSpec. Note that this is not + // directly comparable to the service's Version. + PreviousSpecVersion *Version `protobuf:"bytes,11,opt,name=previous_spec_version,json=previousSpecVersion" json:"previous_spec_version,omitempty"` + // Runtime state of service endpoint. This may be different + // from the spec version because the user may not have entered + // the optional fields like node_port or virtual_ip and it + // could be auto allocated by the system. + Endpoint *Endpoint `protobuf:"bytes,4,opt,name=endpoint" json:"endpoint,omitempty"` + // UpdateStatus contains the status of an update, if one is in + // progress. + UpdateStatus *UpdateStatus `protobuf:"bytes,5,opt,name=update_status,json=updateStatus" json:"update_status,omitempty"` +} + +func (m *Service) Reset() { *m = Service{} } +func (*Service) ProtoMessage() {} +func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{2} } + +// Endpoint specified all the network parameters required to +// correctly discover and load balance a service +type Endpoint struct { + Spec *EndpointSpec `protobuf:"bytes,1,opt,name=spec" json:"spec,omitempty"` + // Runtime state of the exposed ports which may carry + // auto-allocated swarm ports in addition to the user + // configured information. + Ports []*PortConfig `protobuf:"bytes,2,rep,name=ports" json:"ports,omitempty"` + // VirtualIPs specifies the IP addresses under which this endpoint will be + // made available. + VirtualIPs []*Endpoint_VirtualIP `protobuf:"bytes,3,rep,name=virtual_ips,json=virtualIps" json:"virtual_ips,omitempty"` +} + +func (m *Endpoint) Reset() { *m = Endpoint{} } +func (*Endpoint) ProtoMessage() {} +func (*Endpoint) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{3} } + +// VirtualIP specifies a set of networks this endpoint will be attached to +// and the IP addresses the target service will be made available under. +type Endpoint_VirtualIP struct { + // NetworkID for which this endpoint attachment was created. + NetworkID string `protobuf:"bytes,1,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` + // A virtual IP is used to address this service in IP + // layer that the client can use to send requests to + // this service. A DNS A/AAAA query on the service + // name might return this IP to the client. This is + // strictly a logical IP and there may not be any + // interfaces assigned this IP address or any route + // created for this address. More than one to + // accommodate for both IPv4 and IPv6 + Addr string `protobuf:"bytes,2,opt,name=addr,proto3" json:"addr,omitempty"` +} + +func (m *Endpoint_VirtualIP) Reset() { *m = Endpoint_VirtualIP{} } +func (*Endpoint_VirtualIP) ProtoMessage() {} +func (*Endpoint_VirtualIP) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{3, 0} } + +// Task specifies the parameters for implementing a Spec. A task is effectively +// immutable and idempotent. Once it is dispatched to a node, it will not be +// dispatched to another node. +type Task struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"` + // Spec defines the desired state of the task as specified by the user. + // The system will honor this and will *never* modify it. + Spec TaskSpec `protobuf:"bytes,3,opt,name=spec" json:"spec"` + // SpecVersion is copied from Service, to identify which version of the + // spec this task has. Note that this is not directly comparable to the + // service's Version. + SpecVersion *Version `protobuf:"bytes,14,opt,name=spec_version,json=specVersion" json:"spec_version,omitempty"` + // ServiceID indicates the service under which this task is orchestrated. This + // should almost always be set. + ServiceID string `protobuf:"bytes,4,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + // Slot is the service slot number for a task. + // For example, if a replicated service has replicas = 2, there will be a + // task with slot = 1, and another with slot = 2. + Slot uint64 `protobuf:"varint,5,opt,name=slot,proto3" json:"slot,omitempty"` + // NodeID indicates the node to which the task is assigned. If this field + // is empty or not set, the task is unassigned. + NodeID string `protobuf:"bytes,6,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + // Annotations defines the names and labels for the runtime, as set by + // the cluster manager. + // + // As backup, if this field has an empty name, the runtime will + // allocate a unique name for the actual container. + // + // NOTE(stevvooe): The preserves the ability for us to making naming + // decisions for tasks in orchestrator, albeit, this is left empty for now. + Annotations Annotations `protobuf:"bytes,7,opt,name=annotations" json:"annotations"` + // ServiceAnnotations is a direct copy of the service name and labels when + // this task is created. + // + // Labels set here will *not* be propagated to the runtime target, such as a + // container. Use labels on the runtime target for that purpose. + ServiceAnnotations Annotations `protobuf:"bytes,8,opt,name=service_annotations,json=serviceAnnotations" json:"service_annotations"` + Status TaskStatus `protobuf:"bytes,9,opt,name=status" json:"status"` + // DesiredState is the target state for the task. It is set to + // TaskStateRunning when a task is first created, and changed to + // TaskStateShutdown if the manager wants to terminate the task. This field + // is only written by the manager. + DesiredState TaskState `protobuf:"varint,10,opt,name=desired_state,json=desiredState,proto3,enum=docker.swarmkit.v1.TaskState" json:"desired_state,omitempty"` + // List of network attachments by the task. + Networks []*NetworkAttachment `protobuf:"bytes,11,rep,name=networks" json:"networks,omitempty"` + // A copy of runtime state of service endpoint from Service + // object to be distributed to agents as part of the task. + Endpoint *Endpoint `protobuf:"bytes,12,opt,name=endpoint" json:"endpoint,omitempty"` + // LogDriver specifies the selected log driver to use for the task. Agent + // processes should always favor the value in this field. + // + // If present in the TaskSpec, this will be a copy of that value. The + // orchestrator may choose to insert a value here, which should be honored, + // such a cluster default or policy-based value. + // + // If not present, the daemon's default will be used. + LogDriver *Driver `protobuf:"bytes,13,opt,name=log_driver,json=logDriver" json:"log_driver,omitempty"` + AssignedGenericResources []*GenericResource `protobuf:"bytes,15,rep,name=assigned_generic_resources,json=assignedGenericResources" json:"assigned_generic_resources,omitempty"` +} + +func (m *Task) Reset() { *m = Task{} } +func (*Task) ProtoMessage() {} +func (*Task) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{4} } + +// NetworkAttachment specifies the network parameters of attachment to +// a single network by an object such as task or node. +type NetworkAttachment struct { + // Network state as a whole becomes part of the object so that + // it always is available for use in agents so that agents + // don't have any other dependency during execution. + Network *Network `protobuf:"bytes,1,opt,name=network" json:"network,omitempty"` + // List of IPv4/IPv6 addresses that are assigned to the object + // as part of getting attached to this network. + Addresses []string `protobuf:"bytes,2,rep,name=addresses" json:"addresses,omitempty"` + // List of aliases by which a task is resolved in a network + Aliases []string `protobuf:"bytes,3,rep,name=aliases" json:"aliases,omitempty"` + // Map of all the driver attachment options for this network + DriverAttachmentOpts map[string]string `protobuf:"bytes,4,rep,name=driver_attachment_opts,json=driverAttachmentOpts" json:"driver_attachment_opts,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *NetworkAttachment) Reset() { *m = NetworkAttachment{} } +func (*NetworkAttachment) ProtoMessage() {} +func (*NetworkAttachment) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{5} } + +type Network struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"` + Spec NetworkSpec `protobuf:"bytes,3,opt,name=spec" json:"spec"` + // Driver specific operational state provided by the network driver. + DriverState *Driver `protobuf:"bytes,4,opt,name=driver_state,json=driverState" json:"driver_state,omitempty"` + // Runtime state of IPAM options. This may not reflect the + // ipam options from NetworkSpec. + IPAM *IPAMOptions `protobuf:"bytes,5,opt,name=ipam" json:"ipam,omitempty"` +} + +func (m *Network) Reset() { *m = Network{} } +func (*Network) ProtoMessage() {} +func (*Network) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{6} } + +// Cluster provides global cluster settings. +type Cluster struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"` + Spec ClusterSpec `protobuf:"bytes,3,opt,name=spec" json:"spec"` + // RootCA contains key material for the root CA. + RootCA RootCA `protobuf:"bytes,4,opt,name=root_ca,json=rootCa" json:"root_ca"` + // Symmetric encryption key distributed by the lead manager. Used by agents + // for securing network bootstrapping and communication. + NetworkBootstrapKeys []*EncryptionKey `protobuf:"bytes,5,rep,name=network_bootstrap_keys,json=networkBootstrapKeys" json:"network_bootstrap_keys,omitempty"` + // Logical clock used to timestamp every key. It allows other managers + // and agents to unambiguously identify the older key to be deleted when + // a new key is allocated on key rotation. + EncryptionKeyLamportClock uint64 `protobuf:"varint,6,opt,name=encryption_key_lamport_clock,json=encryptionKeyLamportClock,proto3" json:"encryption_key_lamport_clock,omitempty"` + // BlacklistedCertificates tracks certificates that should no longer + // be honored. It's a mapping from CN -> BlacklistedCertificate. + // swarm. Their certificates should effectively be blacklisted. + BlacklistedCertificates map[string]*BlacklistedCertificate `protobuf:"bytes,8,rep,name=blacklisted_certificates,json=blacklistedCertificates" json:"blacklisted_certificates,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"` + // UnlockKeys defines the keys that lock node data at rest. For example, + // this would contain the key encrypting key (KEK) that will encrypt the + // manager TLS keys at rest and the raft encryption keys at rest. + // If the key is empty, the node will be unlocked (will not require a key + // to start up from a shut down state). + UnlockKeys []*EncryptionKey `protobuf:"bytes,9,rep,name=unlock_keys,json=unlockKeys" json:"unlock_keys,omitempty"` + // FIPS specifies whether this cluster should be in FIPS mode. This changes + // the format of the join tokens, and nodes that are not FIPS-enabled should + // reject joining the cluster. Nodes that report themselves to be non-FIPS + // should be rejected from the cluster. + FIPS bool `protobuf:"varint,10,opt,name=fips,proto3" json:"fips,omitempty"` +} + +func (m *Cluster) Reset() { *m = Cluster{} } +func (*Cluster) ProtoMessage() {} +func (*Cluster) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{7} } + +// Secret represents a secret that should be passed to a container or a node, +// and is immutable. +type Secret struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"` + // Spec contains the actual secret data, as well as any context around the + // secret data that the user provides. + Spec SecretSpec `protobuf:"bytes,3,opt,name=spec" json:"spec"` + // Whether the secret is an internal secret (not set by a user) or not. + Internal bool `protobuf:"varint,4,opt,name=internal,proto3" json:"internal,omitempty"` +} + +func (m *Secret) Reset() { *m = Secret{} } +func (*Secret) ProtoMessage() {} +func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{8} } + +// Config represents a set of configuration files that should be passed to a +// container. +type Config struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"` + // Spec contains the actual config data, as well as any context around the + // config data that the user provides. + Spec ConfigSpec `protobuf:"bytes,3,opt,name=spec" json:"spec"` +} + +func (m *Config) Reset() { *m = Config{} } +func (*Config) ProtoMessage() {} +func (*Config) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{9} } + +// Resource is a top-level object with externally defined content and indexing. +// SwarmKit can serve as a store for these objects without understanding their +// meanings. +type Resource struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"` + Annotations Annotations `protobuf:"bytes,3,opt,name=annotations" json:"annotations"` + // Kind identifies this class of object. It is essentially a namespace + // to keep IDs or indices from colliding between unrelated Resource + // objects. This must correspond to the name of an Extension. + Kind string `protobuf:"bytes,4,opt,name=kind,proto3" json:"kind,omitempty"` + // Payload bytes. This data is not interpreted in any way by SwarmKit. + // By convention, it should be a marshalled protocol buffers message. + Payload *google_protobuf3.Any `protobuf:"bytes,5,opt,name=payload" json:"payload,omitempty"` +} + +func (m *Resource) Reset() { *m = Resource{} } +func (*Resource) ProtoMessage() {} +func (*Resource) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{10} } + +// Extension declares a type of "resource" object. This message provides some +// metadata about the objects. +type Extension struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"` + Annotations Annotations `protobuf:"bytes,3,opt,name=annotations" json:"annotations"` + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` +} + +func (m *Extension) Reset() { *m = Extension{} } +func (*Extension) ProtoMessage() {} +func (*Extension) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{11} } + +func init() { + proto.RegisterType((*Meta)(nil), "docker.swarmkit.v1.Meta") + proto.RegisterType((*Node)(nil), "docker.swarmkit.v1.Node") + proto.RegisterType((*Service)(nil), "docker.swarmkit.v1.Service") + proto.RegisterType((*Endpoint)(nil), "docker.swarmkit.v1.Endpoint") + proto.RegisterType((*Endpoint_VirtualIP)(nil), "docker.swarmkit.v1.Endpoint.VirtualIP") + proto.RegisterType((*Task)(nil), "docker.swarmkit.v1.Task") + proto.RegisterType((*NetworkAttachment)(nil), "docker.swarmkit.v1.NetworkAttachment") + proto.RegisterType((*Network)(nil), "docker.swarmkit.v1.Network") + proto.RegisterType((*Cluster)(nil), "docker.swarmkit.v1.Cluster") + proto.RegisterType((*Secret)(nil), "docker.swarmkit.v1.Secret") + proto.RegisterType((*Config)(nil), "docker.swarmkit.v1.Config") + proto.RegisterType((*Resource)(nil), "docker.swarmkit.v1.Resource") + proto.RegisterType((*Extension)(nil), "docker.swarmkit.v1.Extension") +} + +func (m *Meta) Copy() *Meta { + if m == nil { + return nil + } + o := &Meta{} + o.CopyFrom(m) + return o +} + +func (m *Meta) CopyFrom(src interface{}) { + + o := src.(*Meta) + *m = *o + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Version, &o.Version) + if o.CreatedAt != nil { + m.CreatedAt = &google_protobuf.Timestamp{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.CreatedAt, o.CreatedAt) + } + if o.UpdatedAt != nil { + m.UpdatedAt = &google_protobuf.Timestamp{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.UpdatedAt, o.UpdatedAt) + } +} + +func (m *Node) Copy() *Node { + if m == nil { + return nil + } + o := &Node{} + o.CopyFrom(m) + return o +} + +func (m *Node) CopyFrom(src interface{}) { + + o := src.(*Node) + *m = *o + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Meta, &o.Meta) + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Spec, &o.Spec) + if o.Description != nil { + m.Description = &NodeDescription{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Description, o.Description) + } + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Status, &o.Status) + if o.ManagerStatus != nil { + m.ManagerStatus = &ManagerStatus{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.ManagerStatus, o.ManagerStatus) + } + if o.Attachment != nil { + m.Attachment = &NetworkAttachment{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Attachment, o.Attachment) + } + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Certificate, &o.Certificate) + if o.Attachments != nil { + m.Attachments = make([]*NetworkAttachment, len(o.Attachments)) + for i := range m.Attachments { + m.Attachments[i] = &NetworkAttachment{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Attachments[i], o.Attachments[i]) + } + } + +} + +func (m *Service) Copy() *Service { + if m == nil { + return nil + } + o := &Service{} + o.CopyFrom(m) + return o +} + +func (m *Service) CopyFrom(src interface{}) { + + o := src.(*Service) + *m = *o + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Meta, &o.Meta) + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Spec, &o.Spec) + if o.SpecVersion != nil { + m.SpecVersion = &Version{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.SpecVersion, o.SpecVersion) + } + if o.PreviousSpec != nil { + m.PreviousSpec = &ServiceSpec{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.PreviousSpec, o.PreviousSpec) + } + if o.PreviousSpecVersion != nil { + m.PreviousSpecVersion = &Version{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.PreviousSpecVersion, o.PreviousSpecVersion) + } + if o.Endpoint != nil { + m.Endpoint = &Endpoint{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Endpoint, o.Endpoint) + } + if o.UpdateStatus != nil { + m.UpdateStatus = &UpdateStatus{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.UpdateStatus, o.UpdateStatus) + } +} + +func (m *Endpoint) Copy() *Endpoint { + if m == nil { + return nil + } + o := &Endpoint{} + o.CopyFrom(m) + return o +} + +func (m *Endpoint) CopyFrom(src interface{}) { + + o := src.(*Endpoint) + *m = *o + if o.Spec != nil { + m.Spec = &EndpointSpec{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Spec, o.Spec) + } + if o.Ports != nil { + m.Ports = make([]*PortConfig, len(o.Ports)) + for i := range m.Ports { + m.Ports[i] = &PortConfig{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Ports[i], o.Ports[i]) + } + } + + if o.VirtualIPs != nil { + m.VirtualIPs = make([]*Endpoint_VirtualIP, len(o.VirtualIPs)) + for i := range m.VirtualIPs { + m.VirtualIPs[i] = &Endpoint_VirtualIP{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.VirtualIPs[i], o.VirtualIPs[i]) + } + } + +} + +func (m *Endpoint_VirtualIP) Copy() *Endpoint_VirtualIP { + if m == nil { + return nil + } + o := &Endpoint_VirtualIP{} + o.CopyFrom(m) + return o +} + +func (m *Endpoint_VirtualIP) CopyFrom(src interface{}) { + + o := src.(*Endpoint_VirtualIP) + *m = *o +} + +func (m *Task) Copy() *Task { + if m == nil { + return nil + } + o := &Task{} + o.CopyFrom(m) + return o +} + +func (m *Task) CopyFrom(src interface{}) { + + o := src.(*Task) + *m = *o + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Meta, &o.Meta) + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Spec, &o.Spec) + if o.SpecVersion != nil { + m.SpecVersion = &Version{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.SpecVersion, o.SpecVersion) + } + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Annotations, &o.Annotations) + github_com_docker_swarmkit_api_deepcopy.Copy(&m.ServiceAnnotations, &o.ServiceAnnotations) + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Status, &o.Status) + if o.Networks != nil { + m.Networks = make([]*NetworkAttachment, len(o.Networks)) + for i := range m.Networks { + m.Networks[i] = &NetworkAttachment{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Networks[i], o.Networks[i]) + } + } + + if o.Endpoint != nil { + m.Endpoint = &Endpoint{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Endpoint, o.Endpoint) + } + if o.LogDriver != nil { + m.LogDriver = &Driver{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.LogDriver, o.LogDriver) + } + if o.AssignedGenericResources != nil { + m.AssignedGenericResources = make([]*GenericResource, len(o.AssignedGenericResources)) + for i := range m.AssignedGenericResources { + m.AssignedGenericResources[i] = &GenericResource{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.AssignedGenericResources[i], o.AssignedGenericResources[i]) + } + } + +} + +func (m *NetworkAttachment) Copy() *NetworkAttachment { + if m == nil { + return nil + } + o := &NetworkAttachment{} + o.CopyFrom(m) + return o +} + +func (m *NetworkAttachment) CopyFrom(src interface{}) { + + o := src.(*NetworkAttachment) + *m = *o + if o.Network != nil { + m.Network = &Network{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Network, o.Network) + } + if o.Addresses != nil { + m.Addresses = make([]string, len(o.Addresses)) + copy(m.Addresses, o.Addresses) + } + + if o.Aliases != nil { + m.Aliases = make([]string, len(o.Aliases)) + copy(m.Aliases, o.Aliases) + } + + if o.DriverAttachmentOpts != nil { + m.DriverAttachmentOpts = make(map[string]string, len(o.DriverAttachmentOpts)) + for k, v := range o.DriverAttachmentOpts { + m.DriverAttachmentOpts[k] = v + } + } + +} + +func (m *Network) Copy() *Network { + if m == nil { + return nil + } + o := &Network{} + o.CopyFrom(m) + return o +} + +func (m *Network) CopyFrom(src interface{}) { + + o := src.(*Network) + *m = *o + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Meta, &o.Meta) + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Spec, &o.Spec) + if o.DriverState != nil { + m.DriverState = &Driver{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.DriverState, o.DriverState) + } + if o.IPAM != nil { + m.IPAM = &IPAMOptions{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.IPAM, o.IPAM) + } +} + +func (m *Cluster) Copy() *Cluster { + if m == nil { + return nil + } + o := &Cluster{} + o.CopyFrom(m) + return o +} + +func (m *Cluster) CopyFrom(src interface{}) { + + o := src.(*Cluster) + *m = *o + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Meta, &o.Meta) + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Spec, &o.Spec) + github_com_docker_swarmkit_api_deepcopy.Copy(&m.RootCA, &o.RootCA) + if o.NetworkBootstrapKeys != nil { + m.NetworkBootstrapKeys = make([]*EncryptionKey, len(o.NetworkBootstrapKeys)) + for i := range m.NetworkBootstrapKeys { + m.NetworkBootstrapKeys[i] = &EncryptionKey{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.NetworkBootstrapKeys[i], o.NetworkBootstrapKeys[i]) + } + } + + if o.BlacklistedCertificates != nil { + m.BlacklistedCertificates = make(map[string]*BlacklistedCertificate, len(o.BlacklistedCertificates)) + for k, v := range o.BlacklistedCertificates { + m.BlacklistedCertificates[k] = &BlacklistedCertificate{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.BlacklistedCertificates[k], v) + } + } + + if o.UnlockKeys != nil { + m.UnlockKeys = make([]*EncryptionKey, len(o.UnlockKeys)) + for i := range m.UnlockKeys { + m.UnlockKeys[i] = &EncryptionKey{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.UnlockKeys[i], o.UnlockKeys[i]) + } + } + +} + +func (m *Secret) Copy() *Secret { + if m == nil { + return nil + } + o := &Secret{} + o.CopyFrom(m) + return o +} + +func (m *Secret) CopyFrom(src interface{}) { + + o := src.(*Secret) + *m = *o + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Meta, &o.Meta) + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Spec, &o.Spec) +} + +func (m *Config) Copy() *Config { + if m == nil { + return nil + } + o := &Config{} + o.CopyFrom(m) + return o +} + +func (m *Config) CopyFrom(src interface{}) { + + o := src.(*Config) + *m = *o + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Meta, &o.Meta) + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Spec, &o.Spec) +} + +func (m *Resource) Copy() *Resource { + if m == nil { + return nil + } + o := &Resource{} + o.CopyFrom(m) + return o +} + +func (m *Resource) CopyFrom(src interface{}) { + + o := src.(*Resource) + *m = *o + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Meta, &o.Meta) + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Annotations, &o.Annotations) + if o.Payload != nil { + m.Payload = &google_protobuf3.Any{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Payload, o.Payload) + } +} + +func (m *Extension) Copy() *Extension { + if m == nil { + return nil + } + o := &Extension{} + o.CopyFrom(m) + return o +} + +func (m *Extension) CopyFrom(src interface{}) { + + o := src.(*Extension) + *m = *o + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Meta, &o.Meta) + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Annotations, &o.Annotations) +} + +func (m *Meta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Meta) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Version.Size())) + n1, err := m.Version.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + if m.CreatedAt != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.CreatedAt.Size())) + n2, err := m.CreatedAt.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if m.UpdatedAt != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.UpdatedAt.Size())) + n3, err := m.UpdatedAt.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + return i, nil +} + +func (m *Node) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Node) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Meta.Size())) + n4, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Spec.Size())) + n5, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + if m.Description != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Description.Size())) + n6, err := m.Description.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + dAtA[i] = 0x2a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Status.Size())) + n7, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + if m.ManagerStatus != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.ManagerStatus.Size())) + n8, err := m.ManagerStatus.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if m.Attachment != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Attachment.Size())) + n9, err := m.Attachment.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + dAtA[i] = 0x42 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Certificate.Size())) + n10, err := m.Certificate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + if m.Role != 0 { + dAtA[i] = 0x48 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Role)) + } + if len(m.Attachments) > 0 { + for _, msg := range m.Attachments { + dAtA[i] = 0x52 + i++ + i = encodeVarintObjects(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Service) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Service) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Meta.Size())) + n11, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Spec.Size())) + n12, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + if m.Endpoint != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Endpoint.Size())) + n13, err := m.Endpoint.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + if m.UpdateStatus != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.UpdateStatus.Size())) + n14, err := m.UpdateStatus.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + if m.PreviousSpec != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.PreviousSpec.Size())) + n15, err := m.PreviousSpec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + if m.SpecVersion != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.SpecVersion.Size())) + n16, err := m.SpecVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + if m.PreviousSpecVersion != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.PreviousSpecVersion.Size())) + n17, err := m.PreviousSpecVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + return i, nil +} + +func (m *Endpoint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Endpoint) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Spec != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Spec.Size())) + n18, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.VirtualIPs) > 0 { + for _, msg := range m.VirtualIPs { + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Endpoint_VirtualIP) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Endpoint_VirtualIP) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.NetworkID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.NetworkID))) + i += copy(dAtA[i:], m.NetworkID) + } + if len(m.Addr) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.Addr))) + i += copy(dAtA[i:], m.Addr) + } + return i, nil +} + +func (m *Task) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Task) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Meta.Size())) + n19, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Spec.Size())) + n20, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + if len(m.ServiceID) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ServiceID))) + i += copy(dAtA[i:], m.ServiceID) + } + if m.Slot != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Slot)) + } + if len(m.NodeID) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + } + dAtA[i] = 0x3a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Annotations.Size())) + n21, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 + dAtA[i] = 0x42 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.ServiceAnnotations.Size())) + n22, err := m.ServiceAnnotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + dAtA[i] = 0x4a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Status.Size())) + n23, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + if m.DesiredState != 0 { + dAtA[i] = 0x50 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.DesiredState)) + } + if len(m.Networks) > 0 { + for _, msg := range m.Networks { + dAtA[i] = 0x5a + i++ + i = encodeVarintObjects(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Endpoint != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Endpoint.Size())) + n24, err := m.Endpoint.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + } + if m.LogDriver != nil { + dAtA[i] = 0x6a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.LogDriver.Size())) + n25, err := m.LogDriver.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + } + if m.SpecVersion != nil { + dAtA[i] = 0x72 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.SpecVersion.Size())) + n26, err := m.SpecVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + } + if len(m.AssignedGenericResources) > 0 { + for _, msg := range m.AssignedGenericResources { + dAtA[i] = 0x7a + i++ + i = encodeVarintObjects(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NetworkAttachment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkAttachment) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Network != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Network.Size())) + n27, err := m.Network.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + } + if len(m.Addresses) > 0 { + for _, s := range m.Addresses { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Aliases) > 0 { + for _, s := range m.Aliases { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.DriverAttachmentOpts) > 0 { + for k, _ := range m.DriverAttachmentOpts { + dAtA[i] = 0x22 + i++ + v := m.DriverAttachmentOpts[k] + mapSize := 1 + len(k) + sovObjects(uint64(len(k))) + 1 + len(v) + sovObjects(uint64(len(v))) + i = encodeVarintObjects(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *Network) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Network) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Meta.Size())) + n28, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Spec.Size())) + n29, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + if m.DriverState != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.DriverState.Size())) + n30, err := m.DriverState.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + } + if m.IPAM != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.IPAM.Size())) + n31, err := m.IPAM.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 + } + return i, nil +} + +func (m *Cluster) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Meta.Size())) + n32, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Spec.Size())) + n33, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 + dAtA[i] = 0x22 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.RootCA.Size())) + n34, err := m.RootCA.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 + if len(m.NetworkBootstrapKeys) > 0 { + for _, msg := range m.NetworkBootstrapKeys { + dAtA[i] = 0x2a + i++ + i = encodeVarintObjects(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.EncryptionKeyLamportClock != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.EncryptionKeyLamportClock)) + } + if len(m.BlacklistedCertificates) > 0 { + for k, _ := range m.BlacklistedCertificates { + dAtA[i] = 0x42 + i++ + v := m.BlacklistedCertificates[k] + msgSize := 0 + if v != nil { + msgSize = v.Size() + msgSize += 1 + sovObjects(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovObjects(uint64(len(k))) + msgSize + i = encodeVarintObjects(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + if v != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(v.Size())) + n35, err := v.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + } + } + } + if len(m.UnlockKeys) > 0 { + for _, msg := range m.UnlockKeys { + dAtA[i] = 0x4a + i++ + i = encodeVarintObjects(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.FIPS { + dAtA[i] = 0x50 + i++ + if m.FIPS { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *Secret) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Secret) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Meta.Size())) + n36, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Spec.Size())) + n37, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n37 + if m.Internal { + dAtA[i] = 0x20 + i++ + if m.Internal { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *Config) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Config) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Meta.Size())) + n38, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Spec.Size())) + n39, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + return i, nil +} + +func (m *Resource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Resource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Meta.Size())) + n40, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Annotations.Size())) + n41, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n41 + if len(m.Kind) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + } + if m.Payload != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Payload.Size())) + n42, err := m.Payload.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n42 + } + return i, nil +} + +func (m *Extension) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Extension) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Meta.Size())) + n43, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n43 + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Annotations.Size())) + n44, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n44 + if len(m.Description) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.Description))) + i += copy(dAtA[i:], m.Description) + } + return i, nil +} + +func encodeFixed64Objects(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Objects(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintObjects(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +func (m *Meta) Size() (n int) { + var l int + _ = l + l = m.Version.Size() + n += 1 + l + sovObjects(uint64(l)) + if m.CreatedAt != nil { + l = m.CreatedAt.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if m.UpdatedAt != nil { + l = m.UpdatedAt.Size() + n += 1 + l + sovObjects(uint64(l)) + } + return n +} + +func (m *Node) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Meta.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovObjects(uint64(l)) + if m.Description != nil { + l = m.Description.Size() + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Status.Size() + n += 1 + l + sovObjects(uint64(l)) + if m.ManagerStatus != nil { + l = m.ManagerStatus.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if m.Attachment != nil { + l = m.Attachment.Size() + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Certificate.Size() + n += 1 + l + sovObjects(uint64(l)) + if m.Role != 0 { + n += 1 + sovObjects(uint64(m.Role)) + } + if len(m.Attachments) > 0 { + for _, e := range m.Attachments { + l = e.Size() + n += 1 + l + sovObjects(uint64(l)) + } + } + return n +} + +func (m *Service) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Meta.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovObjects(uint64(l)) + if m.Endpoint != nil { + l = m.Endpoint.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if m.UpdateStatus != nil { + l = m.UpdateStatus.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if m.PreviousSpec != nil { + l = m.PreviousSpec.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if m.SpecVersion != nil { + l = m.SpecVersion.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if m.PreviousSpecVersion != nil { + l = m.PreviousSpecVersion.Size() + n += 1 + l + sovObjects(uint64(l)) + } + return n +} + +func (m *Endpoint) Size() (n int) { + var l int + _ = l + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovObjects(uint64(l)) + } + } + if len(m.VirtualIPs) > 0 { + for _, e := range m.VirtualIPs { + l = e.Size() + n += 1 + l + sovObjects(uint64(l)) + } + } + return n +} + +func (m *Endpoint_VirtualIP) Size() (n int) { + var l int + _ = l + l = len(m.NetworkID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = len(m.Addr) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + return n +} + +func (m *Task) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Meta.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovObjects(uint64(l)) + l = len(m.ServiceID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + if m.Slot != 0 { + n += 1 + sovObjects(uint64(m.Slot)) + } + l = len(m.NodeID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Annotations.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.ServiceAnnotations.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovObjects(uint64(l)) + if m.DesiredState != 0 { + n += 1 + sovObjects(uint64(m.DesiredState)) + } + if len(m.Networks) > 0 { + for _, e := range m.Networks { + l = e.Size() + n += 1 + l + sovObjects(uint64(l)) + } + } + if m.Endpoint != nil { + l = m.Endpoint.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if m.LogDriver != nil { + l = m.LogDriver.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if m.SpecVersion != nil { + l = m.SpecVersion.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if len(m.AssignedGenericResources) > 0 { + for _, e := range m.AssignedGenericResources { + l = e.Size() + n += 1 + l + sovObjects(uint64(l)) + } + } + return n +} + +func (m *NetworkAttachment) Size() (n int) { + var l int + _ = l + if m.Network != nil { + l = m.Network.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if len(m.Addresses) > 0 { + for _, s := range m.Addresses { + l = len(s) + n += 1 + l + sovObjects(uint64(l)) + } + } + if len(m.Aliases) > 0 { + for _, s := range m.Aliases { + l = len(s) + n += 1 + l + sovObjects(uint64(l)) + } + } + if len(m.DriverAttachmentOpts) > 0 { + for k, v := range m.DriverAttachmentOpts { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovObjects(uint64(len(k))) + 1 + len(v) + sovObjects(uint64(len(v))) + n += mapEntrySize + 1 + sovObjects(uint64(mapEntrySize)) + } + } + return n +} + +func (m *Network) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Meta.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovObjects(uint64(l)) + if m.DriverState != nil { + l = m.DriverState.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if m.IPAM != nil { + l = m.IPAM.Size() + n += 1 + l + sovObjects(uint64(l)) + } + return n +} + +func (m *Cluster) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Meta.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.RootCA.Size() + n += 1 + l + sovObjects(uint64(l)) + if len(m.NetworkBootstrapKeys) > 0 { + for _, e := range m.NetworkBootstrapKeys { + l = e.Size() + n += 1 + l + sovObjects(uint64(l)) + } + } + if m.EncryptionKeyLamportClock != 0 { + n += 1 + sovObjects(uint64(m.EncryptionKeyLamportClock)) + } + if len(m.BlacklistedCertificates) > 0 { + for k, v := range m.BlacklistedCertificates { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovObjects(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovObjects(uint64(len(k))) + l + n += mapEntrySize + 1 + sovObjects(uint64(mapEntrySize)) + } + } + if len(m.UnlockKeys) > 0 { + for _, e := range m.UnlockKeys { + l = e.Size() + n += 1 + l + sovObjects(uint64(l)) + } + } + if m.FIPS { + n += 2 + } + return n +} + +func (m *Secret) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Meta.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovObjects(uint64(l)) + if m.Internal { + n += 2 + } + return n +} + +func (m *Config) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Meta.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovObjects(uint64(l)) + return n +} + +func (m *Resource) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Meta.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Annotations.Size() + n += 1 + l + sovObjects(uint64(l)) + l = len(m.Kind) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + if m.Payload != nil { + l = m.Payload.Size() + n += 1 + l + sovObjects(uint64(l)) + } + return n +} + +func (m *Extension) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Meta.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Annotations.Size() + n += 1 + l + sovObjects(uint64(l)) + l = len(m.Description) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + return n +} + +func sovObjects(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozObjects(x uint64) (n int) { + return sovObjects(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +type NodeCheckFunc func(t1, t2 *Node) bool + +type EventCreateNode struct { + Node *Node + Checks []NodeCheckFunc +} + +func (e EventCreateNode) Matches(apiEvent github_com_docker_go_events.Event) bool { + typedEvent, ok := apiEvent.(EventCreateNode) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Node, typedEvent.Node) { + return false + } + } + return true +} + +type EventUpdateNode struct { + Node *Node + OldNode *Node + Checks []NodeCheckFunc +} + +func (e EventUpdateNode) Matches(apiEvent github_com_docker_go_events.Event) bool { + typedEvent, ok := apiEvent.(EventUpdateNode) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Node, typedEvent.Node) { + return false + } + } + return true +} + +type EventDeleteNode struct { + Node *Node + Checks []NodeCheckFunc +} + +func (e EventDeleteNode) Matches(apiEvent github_com_docker_go_events.Event) bool { + typedEvent, ok := apiEvent.(EventDeleteNode) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Node, typedEvent.Node) { + return false + } + } + return true +} +func (m *Node) CopyStoreObject() StoreObject { + return m.Copy() +} + +func (m *Node) GetMeta() Meta { + return m.Meta +} + +func (m *Node) SetMeta(meta Meta) { + m.Meta = meta +} + +func (m *Node) GetID() string { + return m.ID +} + +func (m *Node) EventCreate() Event { + return EventCreateNode{Node: m} +} + +func (m *Node) EventUpdate(oldObject StoreObject) Event { + if oldObject != nil { + return EventUpdateNode{Node: m, OldNode: oldObject.(*Node)} + } else { + return EventUpdateNode{Node: m} + } +} + +func (m *Node) EventDelete() Event { + return EventDeleteNode{Node: m} +} + +func NodeCheckID(v1, v2 *Node) bool { + return v1.ID == v2.ID +} + +func NodeCheckIDPrefix(v1, v2 *Node) bool { + return strings.HasPrefix(v2.ID, v1.ID) +} + +func NodeCheckName(v1, v2 *Node) bool { + if v1.Description == nil || v2.Description == nil { + return false + } + return v1.Description.Hostname == v2.Description.Hostname +} + +func NodeCheckNamePrefix(v1, v2 *Node) bool { + if v1.Description == nil || v2.Description == nil { + return false + } + return strings.HasPrefix(v2.Description.Hostname, v1.Description.Hostname) +} + +func NodeCheckCustom(v1, v2 *Node) bool { + return checkCustom(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func NodeCheckCustomPrefix(v1, v2 *Node) bool { + return checkCustomPrefix(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func NodeCheckRole(v1, v2 *Node) bool { + return v1.Role == v2.Role +} + +func NodeCheckMembership(v1, v2 *Node) bool { + return v1.Spec.Membership == v2.Spec.Membership +} + +func ConvertNodeWatch(action WatchActionKind, filters []*SelectBy) ([]Event, error) { + var ( + m Node + checkFuncs []NodeCheckFunc + hasRole bool + hasMembership bool + ) + + for _, filter := range filters { + switch v := filter.By.(type) { + case *SelectBy_ID: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.ID + checkFuncs = append(checkFuncs, NodeCheckID) + case *SelectBy_IDPrefix: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.IDPrefix + checkFuncs = append(checkFuncs, NodeCheckIDPrefix) + case *SelectBy_Name: + if m.Description != nil { + return nil, errConflictingFilters + } + m.Description = &NodeDescription{Hostname: v.Name} + checkFuncs = append(checkFuncs, NodeCheckName) + case *SelectBy_NamePrefix: + if m.Description != nil { + return nil, errConflictingFilters + } + m.Description = &NodeDescription{Hostname: v.NamePrefix} + checkFuncs = append(checkFuncs, NodeCheckNamePrefix) + case *SelectBy_Custom: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}} + checkFuncs = append(checkFuncs, NodeCheckCustom) + case *SelectBy_CustomPrefix: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}} + checkFuncs = append(checkFuncs, NodeCheckCustomPrefix) + case *SelectBy_Role: + if hasRole { + return nil, errConflictingFilters + } + hasRole = true + m.Role = v.Role + checkFuncs = append(checkFuncs, NodeCheckRole) + case *SelectBy_Membership: + if hasMembership { + return nil, errConflictingFilters + } + hasMembership = true + m.Spec.Membership = v.Membership + checkFuncs = append(checkFuncs, NodeCheckMembership) + } + } + var events []Event + if (action & WatchActionKindCreate) != 0 { + events = append(events, EventCreateNode{Node: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindUpdate) != 0 { + events = append(events, EventUpdateNode{Node: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindRemove) != 0 { + events = append(events, EventDeleteNode{Node: &m, Checks: checkFuncs}) + } + if len(events) == 0 { + return nil, errUnrecognizedAction + } + return events, nil +} + +type NodeIndexerByID struct{} + +func (indexer NodeIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer NodeIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer NodeIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Node) + return true, []byte(m.ID + "\x00"), nil +} + +type NodeIndexerByName struct{} + +func (indexer NodeIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer NodeIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer NodeIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Node) + val := m.Spec.Annotations.Name + return true, []byte(strings.ToLower(val) + "\x00"), nil +} + +type NodeCustomIndexer struct{} + +func (indexer NodeCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer NodeCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer NodeCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + m := obj.(*Node) + return customIndexer("", &m.Spec.Annotations) +} + +type ServiceCheckFunc func(t1, t2 *Service) bool + +type EventCreateService struct { + Service *Service + Checks []ServiceCheckFunc +} + +func (e EventCreateService) Matches(apiEvent github_com_docker_go_events.Event) bool { + typedEvent, ok := apiEvent.(EventCreateService) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Service, typedEvent.Service) { + return false + } + } + return true +} + +type EventUpdateService struct { + Service *Service + OldService *Service + Checks []ServiceCheckFunc +} + +func (e EventUpdateService) Matches(apiEvent github_com_docker_go_events.Event) bool { + typedEvent, ok := apiEvent.(EventUpdateService) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Service, typedEvent.Service) { + return false + } + } + return true +} + +type EventDeleteService struct { + Service *Service + Checks []ServiceCheckFunc +} + +func (e EventDeleteService) Matches(apiEvent github_com_docker_go_events.Event) bool { + typedEvent, ok := apiEvent.(EventDeleteService) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Service, typedEvent.Service) { + return false + } + } + return true +} +func (m *Service) CopyStoreObject() StoreObject { + return m.Copy() +} + +func (m *Service) GetMeta() Meta { + return m.Meta +} + +func (m *Service) SetMeta(meta Meta) { + m.Meta = meta +} + +func (m *Service) GetID() string { + return m.ID +} + +func (m *Service) EventCreate() Event { + return EventCreateService{Service: m} +} + +func (m *Service) EventUpdate(oldObject StoreObject) Event { + if oldObject != nil { + return EventUpdateService{Service: m, OldService: oldObject.(*Service)} + } else { + return EventUpdateService{Service: m} + } +} + +func (m *Service) EventDelete() Event { + return EventDeleteService{Service: m} +} + +func ServiceCheckID(v1, v2 *Service) bool { + return v1.ID == v2.ID +} + +func ServiceCheckIDPrefix(v1, v2 *Service) bool { + return strings.HasPrefix(v2.ID, v1.ID) +} + +func ServiceCheckName(v1, v2 *Service) bool { + return v1.Spec.Annotations.Name == v2.Spec.Annotations.Name +} + +func ServiceCheckNamePrefix(v1, v2 *Service) bool { + return strings.HasPrefix(v2.Spec.Annotations.Name, v1.Spec.Annotations.Name) +} + +func ServiceCheckCustom(v1, v2 *Service) bool { + return checkCustom(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func ServiceCheckCustomPrefix(v1, v2 *Service) bool { + return checkCustomPrefix(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func ConvertServiceWatch(action WatchActionKind, filters []*SelectBy) ([]Event, error) { + var ( + m Service + checkFuncs []ServiceCheckFunc + ) + + for _, filter := range filters { + switch v := filter.By.(type) { + case *SelectBy_ID: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.ID + checkFuncs = append(checkFuncs, ServiceCheckID) + case *SelectBy_IDPrefix: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.IDPrefix + checkFuncs = append(checkFuncs, ServiceCheckIDPrefix) + case *SelectBy_Name: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.Name + checkFuncs = append(checkFuncs, ServiceCheckName) + case *SelectBy_NamePrefix: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.NamePrefix + checkFuncs = append(checkFuncs, ServiceCheckNamePrefix) + case *SelectBy_Custom: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}} + checkFuncs = append(checkFuncs, ServiceCheckCustom) + case *SelectBy_CustomPrefix: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}} + checkFuncs = append(checkFuncs, ServiceCheckCustomPrefix) + } + } + var events []Event + if (action & WatchActionKindCreate) != 0 { + events = append(events, EventCreateService{Service: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindUpdate) != 0 { + events = append(events, EventUpdateService{Service: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindRemove) != 0 { + events = append(events, EventDeleteService{Service: &m, Checks: checkFuncs}) + } + if len(events) == 0 { + return nil, errUnrecognizedAction + } + return events, nil +} + +type ServiceIndexerByID struct{} + +func (indexer ServiceIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ServiceIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ServiceIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Service) + return true, []byte(m.ID + "\x00"), nil +} + +type ServiceIndexerByName struct{} + +func (indexer ServiceIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ServiceIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ServiceIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Service) + val := m.Spec.Annotations.Name + return true, []byte(strings.ToLower(val) + "\x00"), nil +} + +type ServiceCustomIndexer struct{} + +func (indexer ServiceCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ServiceCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ServiceCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + m := obj.(*Service) + return customIndexer("", &m.Spec.Annotations) +} + +type TaskCheckFunc func(t1, t2 *Task) bool + +type EventCreateTask struct { + Task *Task + Checks []TaskCheckFunc +} + +func (e EventCreateTask) Matches(apiEvent github_com_docker_go_events.Event) bool { + typedEvent, ok := apiEvent.(EventCreateTask) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Task, typedEvent.Task) { + return false + } + } + return true +} + +type EventUpdateTask struct { + Task *Task + OldTask *Task + Checks []TaskCheckFunc +} + +func (e EventUpdateTask) Matches(apiEvent github_com_docker_go_events.Event) bool { + typedEvent, ok := apiEvent.(EventUpdateTask) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Task, typedEvent.Task) { + return false + } + } + return true +} + +type EventDeleteTask struct { + Task *Task + Checks []TaskCheckFunc +} + +func (e EventDeleteTask) Matches(apiEvent github_com_docker_go_events.Event) bool { + typedEvent, ok := apiEvent.(EventDeleteTask) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Task, typedEvent.Task) { + return false + } + } + return true +} +func (m *Task) CopyStoreObject() StoreObject { + return m.Copy() +} + +func (m *Task) GetMeta() Meta { + return m.Meta +} + +func (m *Task) SetMeta(meta Meta) { + m.Meta = meta +} + +func (m *Task) GetID() string { + return m.ID +} + +func (m *Task) EventCreate() Event { + return EventCreateTask{Task: m} +} + +func (m *Task) EventUpdate(oldObject StoreObject) Event { + if oldObject != nil { + return EventUpdateTask{Task: m, OldTask: oldObject.(*Task)} + } else { + return EventUpdateTask{Task: m} + } +} + +func (m *Task) EventDelete() Event { + return EventDeleteTask{Task: m} +} + +func TaskCheckID(v1, v2 *Task) bool { + return v1.ID == v2.ID +} + +func TaskCheckIDPrefix(v1, v2 *Task) bool { + return strings.HasPrefix(v2.ID, v1.ID) +} + +func TaskCheckName(v1, v2 *Task) bool { + return v1.Annotations.Name == v2.Annotations.Name +} + +func TaskCheckNamePrefix(v1, v2 *Task) bool { + return strings.HasPrefix(v2.Annotations.Name, v1.Annotations.Name) +} + +func TaskCheckCustom(v1, v2 *Task) bool { + return checkCustom(v1.Annotations, v2.Annotations) +} + +func TaskCheckCustomPrefix(v1, v2 *Task) bool { + return checkCustomPrefix(v1.Annotations, v2.Annotations) +} + +func TaskCheckNodeID(v1, v2 *Task) bool { + return v1.NodeID == v2.NodeID +} + +func TaskCheckServiceID(v1, v2 *Task) bool { + return v1.ServiceID == v2.ServiceID +} + +func TaskCheckSlot(v1, v2 *Task) bool { + return v1.Slot == v2.Slot +} + +func TaskCheckDesiredState(v1, v2 *Task) bool { + return v1.DesiredState == v2.DesiredState +} + +func ConvertTaskWatch(action WatchActionKind, filters []*SelectBy) ([]Event, error) { + var ( + m Task + checkFuncs []TaskCheckFunc + hasDesiredState bool + ) + + for _, filter := range filters { + switch v := filter.By.(type) { + case *SelectBy_ID: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.ID + checkFuncs = append(checkFuncs, TaskCheckID) + case *SelectBy_IDPrefix: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.IDPrefix + checkFuncs = append(checkFuncs, TaskCheckIDPrefix) + case *SelectBy_Name: + if m.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Annotations.Name = v.Name + checkFuncs = append(checkFuncs, TaskCheckName) + case *SelectBy_NamePrefix: + if m.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Annotations.Name = v.NamePrefix + checkFuncs = append(checkFuncs, TaskCheckNamePrefix) + case *SelectBy_Custom: + if len(m.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}} + checkFuncs = append(checkFuncs, TaskCheckCustom) + case *SelectBy_CustomPrefix: + if len(m.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}} + checkFuncs = append(checkFuncs, TaskCheckCustomPrefix) + case *SelectBy_ServiceID: + if m.ServiceID != "" { + return nil, errConflictingFilters + } + m.ServiceID = v.ServiceID + checkFuncs = append(checkFuncs, TaskCheckServiceID) + case *SelectBy_NodeID: + if m.NodeID != "" { + return nil, errConflictingFilters + } + m.NodeID = v.NodeID + checkFuncs = append(checkFuncs, TaskCheckNodeID) + case *SelectBy_Slot: + if m.Slot != 0 || m.ServiceID != "" { + return nil, errConflictingFilters + } + m.ServiceID = v.Slot.ServiceID + m.Slot = v.Slot.Slot + checkFuncs = append(checkFuncs, TaskCheckNodeID, TaskCheckSlot) + case *SelectBy_DesiredState: + if hasDesiredState { + return nil, errConflictingFilters + } + hasDesiredState = true + m.DesiredState = v.DesiredState + checkFuncs = append(checkFuncs, TaskCheckDesiredState) + } + } + var events []Event + if (action & WatchActionKindCreate) != 0 { + events = append(events, EventCreateTask{Task: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindUpdate) != 0 { + events = append(events, EventUpdateTask{Task: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindRemove) != 0 { + events = append(events, EventDeleteTask{Task: &m, Checks: checkFuncs}) + } + if len(events) == 0 { + return nil, errUnrecognizedAction + } + return events, nil +} + +type TaskIndexerByID struct{} + +func (indexer TaskIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer TaskIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer TaskIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Task) + return true, []byte(m.ID + "\x00"), nil +} + +type TaskIndexerByName struct{} + +func (indexer TaskIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer TaskIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer TaskIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Task) + val := m.Annotations.Name + return true, []byte(strings.ToLower(val) + "\x00"), nil +} + +type TaskCustomIndexer struct{} + +func (indexer TaskCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer TaskCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer TaskCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + m := obj.(*Task) + return customIndexer("", &m.Annotations) +} + +type NetworkCheckFunc func(t1, t2 *Network) bool + +type EventCreateNetwork struct { + Network *Network + Checks []NetworkCheckFunc +} + +func (e EventCreateNetwork) Matches(apiEvent github_com_docker_go_events.Event) bool { + typedEvent, ok := apiEvent.(EventCreateNetwork) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Network, typedEvent.Network) { + return false + } + } + return true +} + +type EventUpdateNetwork struct { + Network *Network + OldNetwork *Network + Checks []NetworkCheckFunc +} + +func (e EventUpdateNetwork) Matches(apiEvent github_com_docker_go_events.Event) bool { + typedEvent, ok := apiEvent.(EventUpdateNetwork) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Network, typedEvent.Network) { + return false + } + } + return true +} + +type EventDeleteNetwork struct { + Network *Network + Checks []NetworkCheckFunc +} + +func (e EventDeleteNetwork) Matches(apiEvent github_com_docker_go_events.Event) bool { + typedEvent, ok := apiEvent.(EventDeleteNetwork) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Network, typedEvent.Network) { + return false + } + } + return true +} +func (m *Network) CopyStoreObject() StoreObject { + return m.Copy() +} + +func (m *Network) GetMeta() Meta { + return m.Meta +} + +func (m *Network) SetMeta(meta Meta) { + m.Meta = meta +} + +func (m *Network) GetID() string { + return m.ID +} + +func (m *Network) EventCreate() Event { + return EventCreateNetwork{Network: m} +} + +func (m *Network) EventUpdate(oldObject StoreObject) Event { + if oldObject != nil { + return EventUpdateNetwork{Network: m, OldNetwork: oldObject.(*Network)} + } else { + return EventUpdateNetwork{Network: m} + } +} + +func (m *Network) EventDelete() Event { + return EventDeleteNetwork{Network: m} +} + +func NetworkCheckID(v1, v2 *Network) bool { + return v1.ID == v2.ID +} + +func NetworkCheckIDPrefix(v1, v2 *Network) bool { + return strings.HasPrefix(v2.ID, v1.ID) +} + +func NetworkCheckName(v1, v2 *Network) bool { + return v1.Spec.Annotations.Name == v2.Spec.Annotations.Name +} + +func NetworkCheckNamePrefix(v1, v2 *Network) bool { + return strings.HasPrefix(v2.Spec.Annotations.Name, v1.Spec.Annotations.Name) +} + +func NetworkCheckCustom(v1, v2 *Network) bool { + return checkCustom(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func NetworkCheckCustomPrefix(v1, v2 *Network) bool { + return checkCustomPrefix(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func ConvertNetworkWatch(action WatchActionKind, filters []*SelectBy) ([]Event, error) { + var ( + m Network + checkFuncs []NetworkCheckFunc + ) + + for _, filter := range filters { + switch v := filter.By.(type) { + case *SelectBy_ID: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.ID + checkFuncs = append(checkFuncs, NetworkCheckID) + case *SelectBy_IDPrefix: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.IDPrefix + checkFuncs = append(checkFuncs, NetworkCheckIDPrefix) + case *SelectBy_Name: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.Name + checkFuncs = append(checkFuncs, NetworkCheckName) + case *SelectBy_NamePrefix: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.NamePrefix + checkFuncs = append(checkFuncs, NetworkCheckNamePrefix) + case *SelectBy_Custom: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}} + checkFuncs = append(checkFuncs, NetworkCheckCustom) + case *SelectBy_CustomPrefix: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}} + checkFuncs = append(checkFuncs, NetworkCheckCustomPrefix) + } + } + var events []Event + if (action & WatchActionKindCreate) != 0 { + events = append(events, EventCreateNetwork{Network: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindUpdate) != 0 { + events = append(events, EventUpdateNetwork{Network: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindRemove) != 0 { + events = append(events, EventDeleteNetwork{Network: &m, Checks: checkFuncs}) + } + if len(events) == 0 { + return nil, errUnrecognizedAction + } + return events, nil +} + +type NetworkIndexerByID struct{} + +func (indexer NetworkIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer NetworkIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer NetworkIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Network) + return true, []byte(m.ID + "\x00"), nil +} + +type NetworkIndexerByName struct{} + +func (indexer NetworkIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer NetworkIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer NetworkIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Network) + val := m.Spec.Annotations.Name + return true, []byte(strings.ToLower(val) + "\x00"), nil +} + +type NetworkCustomIndexer struct{} + +func (indexer NetworkCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer NetworkCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer NetworkCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + m := obj.(*Network) + return customIndexer("", &m.Spec.Annotations) +} + +type ClusterCheckFunc func(t1, t2 *Cluster) bool + +type EventCreateCluster struct { + Cluster *Cluster + Checks []ClusterCheckFunc +} + +func (e EventCreateCluster) Matches(apiEvent github_com_docker_go_events.Event) bool { + typedEvent, ok := apiEvent.(EventCreateCluster) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Cluster, typedEvent.Cluster) { + return false + } + } + return true +} + +type EventUpdateCluster struct { + Cluster *Cluster + OldCluster *Cluster + Checks []ClusterCheckFunc +} + +func (e EventUpdateCluster) Matches(apiEvent github_com_docker_go_events.Event) bool { + typedEvent, ok := apiEvent.(EventUpdateCluster) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Cluster, typedEvent.Cluster) { + return false + } + } + return true +} + +type EventDeleteCluster struct { + Cluster *Cluster + Checks []ClusterCheckFunc +} + +func (e EventDeleteCluster) Matches(apiEvent github_com_docker_go_events.Event) bool { + typedEvent, ok := apiEvent.(EventDeleteCluster) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Cluster, typedEvent.Cluster) { + return false + } + } + return true +} +func (m *Cluster) CopyStoreObject() StoreObject { + return m.Copy() +} + +func (m *Cluster) GetMeta() Meta { + return m.Meta +} + +func (m *Cluster) SetMeta(meta Meta) { + m.Meta = meta +} + +func (m *Cluster) GetID() string { + return m.ID +} + +func (m *Cluster) EventCreate() Event { + return EventCreateCluster{Cluster: m} +} + +func (m *Cluster) EventUpdate(oldObject StoreObject) Event { + if oldObject != nil { + return EventUpdateCluster{Cluster: m, OldCluster: oldObject.(*Cluster)} + } else { + return EventUpdateCluster{Cluster: m} + } +} + +func (m *Cluster) EventDelete() Event { + return EventDeleteCluster{Cluster: m} +} + +func ClusterCheckID(v1, v2 *Cluster) bool { + return v1.ID == v2.ID +} + +func ClusterCheckIDPrefix(v1, v2 *Cluster) bool { + return strings.HasPrefix(v2.ID, v1.ID) +} + +func ClusterCheckName(v1, v2 *Cluster) bool { + return v1.Spec.Annotations.Name == v2.Spec.Annotations.Name +} + +func ClusterCheckNamePrefix(v1, v2 *Cluster) bool { + return strings.HasPrefix(v2.Spec.Annotations.Name, v1.Spec.Annotations.Name) +} + +func ClusterCheckCustom(v1, v2 *Cluster) bool { + return checkCustom(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func ClusterCheckCustomPrefix(v1, v2 *Cluster) bool { + return checkCustomPrefix(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func ConvertClusterWatch(action WatchActionKind, filters []*SelectBy) ([]Event, error) { + var ( + m Cluster + checkFuncs []ClusterCheckFunc + ) + + for _, filter := range filters { + switch v := filter.By.(type) { + case *SelectBy_ID: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.ID + checkFuncs = append(checkFuncs, ClusterCheckID) + case *SelectBy_IDPrefix: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.IDPrefix + checkFuncs = append(checkFuncs, ClusterCheckIDPrefix) + case *SelectBy_Name: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.Name + checkFuncs = append(checkFuncs, ClusterCheckName) + case *SelectBy_NamePrefix: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.NamePrefix + checkFuncs = append(checkFuncs, ClusterCheckNamePrefix) + case *SelectBy_Custom: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}} + checkFuncs = append(checkFuncs, ClusterCheckCustom) + case *SelectBy_CustomPrefix: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}} + checkFuncs = append(checkFuncs, ClusterCheckCustomPrefix) + } + } + var events []Event + if (action & WatchActionKindCreate) != 0 { + events = append(events, EventCreateCluster{Cluster: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindUpdate) != 0 { + events = append(events, EventUpdateCluster{Cluster: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindRemove) != 0 { + events = append(events, EventDeleteCluster{Cluster: &m, Checks: checkFuncs}) + } + if len(events) == 0 { + return nil, errUnrecognizedAction + } + return events, nil +} + +type ClusterIndexerByID struct{} + +func (indexer ClusterIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ClusterIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ClusterIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Cluster) + return true, []byte(m.ID + "\x00"), nil +} + +type ClusterIndexerByName struct{} + +func (indexer ClusterIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ClusterIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ClusterIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Cluster) + val := m.Spec.Annotations.Name + return true, []byte(strings.ToLower(val) + "\x00"), nil +} + +type ClusterCustomIndexer struct{} + +func (indexer ClusterCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ClusterCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ClusterCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + m := obj.(*Cluster) + return customIndexer("", &m.Spec.Annotations) +} + +type SecretCheckFunc func(t1, t2 *Secret) bool + +type EventCreateSecret struct { + Secret *Secret + Checks []SecretCheckFunc +} + +func (e EventCreateSecret) Matches(apiEvent github_com_docker_go_events.Event) bool { + typedEvent, ok := apiEvent.(EventCreateSecret) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Secret, typedEvent.Secret) { + return false + } + } + return true +} + +type EventUpdateSecret struct { + Secret *Secret + OldSecret *Secret + Checks []SecretCheckFunc +} + +func (e EventUpdateSecret) Matches(apiEvent github_com_docker_go_events.Event) bool { + typedEvent, ok := apiEvent.(EventUpdateSecret) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Secret, typedEvent.Secret) { + return false + } + } + return true +} + +type EventDeleteSecret struct { + Secret *Secret + Checks []SecretCheckFunc +} + +func (e EventDeleteSecret) Matches(apiEvent github_com_docker_go_events.Event) bool { + typedEvent, ok := apiEvent.(EventDeleteSecret) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Secret, typedEvent.Secret) { + return false + } + } + return true +} +func (m *Secret) CopyStoreObject() StoreObject { + return m.Copy() +} + +func (m *Secret) GetMeta() Meta { + return m.Meta +} + +func (m *Secret) SetMeta(meta Meta) { + m.Meta = meta +} + +func (m *Secret) GetID() string { + return m.ID +} + +func (m *Secret) EventCreate() Event { + return EventCreateSecret{Secret: m} +} + +func (m *Secret) EventUpdate(oldObject StoreObject) Event { + if oldObject != nil { + return EventUpdateSecret{Secret: m, OldSecret: oldObject.(*Secret)} + } else { + return EventUpdateSecret{Secret: m} + } +} + +func (m *Secret) EventDelete() Event { + return EventDeleteSecret{Secret: m} +} + +func SecretCheckID(v1, v2 *Secret) bool { + return v1.ID == v2.ID +} + +func SecretCheckIDPrefix(v1, v2 *Secret) bool { + return strings.HasPrefix(v2.ID, v1.ID) +} + +func SecretCheckName(v1, v2 *Secret) bool { + return v1.Spec.Annotations.Name == v2.Spec.Annotations.Name +} + +func SecretCheckNamePrefix(v1, v2 *Secret) bool { + return strings.HasPrefix(v2.Spec.Annotations.Name, v1.Spec.Annotations.Name) +} + +func SecretCheckCustom(v1, v2 *Secret) bool { + return checkCustom(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func SecretCheckCustomPrefix(v1, v2 *Secret) bool { + return checkCustomPrefix(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func ConvertSecretWatch(action WatchActionKind, filters []*SelectBy) ([]Event, error) { + var ( + m Secret + checkFuncs []SecretCheckFunc + ) + + for _, filter := range filters { + switch v := filter.By.(type) { + case *SelectBy_ID: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.ID + checkFuncs = append(checkFuncs, SecretCheckID) + case *SelectBy_IDPrefix: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.IDPrefix + checkFuncs = append(checkFuncs, SecretCheckIDPrefix) + case *SelectBy_Name: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.Name + checkFuncs = append(checkFuncs, SecretCheckName) + case *SelectBy_NamePrefix: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.NamePrefix + checkFuncs = append(checkFuncs, SecretCheckNamePrefix) + case *SelectBy_Custom: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}} + checkFuncs = append(checkFuncs, SecretCheckCustom) + case *SelectBy_CustomPrefix: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}} + checkFuncs = append(checkFuncs, SecretCheckCustomPrefix) + } + } + var events []Event + if (action & WatchActionKindCreate) != 0 { + events = append(events, EventCreateSecret{Secret: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindUpdate) != 0 { + events = append(events, EventUpdateSecret{Secret: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindRemove) != 0 { + events = append(events, EventDeleteSecret{Secret: &m, Checks: checkFuncs}) + } + if len(events) == 0 { + return nil, errUnrecognizedAction + } + return events, nil +} + +type SecretIndexerByID struct{} + +func (indexer SecretIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer SecretIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer SecretIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Secret) + return true, []byte(m.ID + "\x00"), nil +} + +type SecretIndexerByName struct{} + +func (indexer SecretIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer SecretIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer SecretIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Secret) + val := m.Spec.Annotations.Name + return true, []byte(strings.ToLower(val) + "\x00"), nil +} + +type SecretCustomIndexer struct{} + +func (indexer SecretCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer SecretCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer SecretCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + m := obj.(*Secret) + return customIndexer("", &m.Spec.Annotations) +} + +type ConfigCheckFunc func(t1, t2 *Config) bool + +type EventCreateConfig struct { + Config *Config + Checks []ConfigCheckFunc +} + +func (e EventCreateConfig) Matches(apiEvent github_com_docker_go_events.Event) bool { + typedEvent, ok := apiEvent.(EventCreateConfig) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Config, typedEvent.Config) { + return false + } + } + return true +} + +type EventUpdateConfig struct { + Config *Config + OldConfig *Config + Checks []ConfigCheckFunc +} + +func (e EventUpdateConfig) Matches(apiEvent github_com_docker_go_events.Event) bool { + typedEvent, ok := apiEvent.(EventUpdateConfig) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Config, typedEvent.Config) { + return false + } + } + return true +} + +type EventDeleteConfig struct { + Config *Config + Checks []ConfigCheckFunc +} + +func (e EventDeleteConfig) Matches(apiEvent github_com_docker_go_events.Event) bool { + typedEvent, ok := apiEvent.(EventDeleteConfig) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Config, typedEvent.Config) { + return false + } + } + return true +} +func (m *Config) CopyStoreObject() StoreObject { + return m.Copy() +} + +func (m *Config) GetMeta() Meta { + return m.Meta +} + +func (m *Config) SetMeta(meta Meta) { + m.Meta = meta +} + +func (m *Config) GetID() string { + return m.ID +} + +func (m *Config) EventCreate() Event { + return EventCreateConfig{Config: m} +} + +func (m *Config) EventUpdate(oldObject StoreObject) Event { + if oldObject != nil { + return EventUpdateConfig{Config: m, OldConfig: oldObject.(*Config)} + } else { + return EventUpdateConfig{Config: m} + } +} + +func (m *Config) EventDelete() Event { + return EventDeleteConfig{Config: m} +} + +func ConfigCheckID(v1, v2 *Config) bool { + return v1.ID == v2.ID +} + +func ConfigCheckIDPrefix(v1, v2 *Config) bool { + return strings.HasPrefix(v2.ID, v1.ID) +} + +func ConfigCheckName(v1, v2 *Config) bool { + return v1.Spec.Annotations.Name == v2.Spec.Annotations.Name +} + +func ConfigCheckNamePrefix(v1, v2 *Config) bool { + return strings.HasPrefix(v2.Spec.Annotations.Name, v1.Spec.Annotations.Name) +} + +func ConfigCheckCustom(v1, v2 *Config) bool { + return checkCustom(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func ConfigCheckCustomPrefix(v1, v2 *Config) bool { + return checkCustomPrefix(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func ConvertConfigWatch(action WatchActionKind, filters []*SelectBy) ([]Event, error) { + var ( + m Config + checkFuncs []ConfigCheckFunc + ) + + for _, filter := range filters { + switch v := filter.By.(type) { + case *SelectBy_ID: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.ID + checkFuncs = append(checkFuncs, ConfigCheckID) + case *SelectBy_IDPrefix: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.IDPrefix + checkFuncs = append(checkFuncs, ConfigCheckIDPrefix) + case *SelectBy_Name: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.Name + checkFuncs = append(checkFuncs, ConfigCheckName) + case *SelectBy_NamePrefix: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.NamePrefix + checkFuncs = append(checkFuncs, ConfigCheckNamePrefix) + case *SelectBy_Custom: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}} + checkFuncs = append(checkFuncs, ConfigCheckCustom) + case *SelectBy_CustomPrefix: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}} + checkFuncs = append(checkFuncs, ConfigCheckCustomPrefix) + } + } + var events []Event + if (action & WatchActionKindCreate) != 0 { + events = append(events, EventCreateConfig{Config: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindUpdate) != 0 { + events = append(events, EventUpdateConfig{Config: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindRemove) != 0 { + events = append(events, EventDeleteConfig{Config: &m, Checks: checkFuncs}) + } + if len(events) == 0 { + return nil, errUnrecognizedAction + } + return events, nil +} + +type ConfigIndexerByID struct{} + +func (indexer ConfigIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ConfigIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ConfigIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Config) + return true, []byte(m.ID + "\x00"), nil +} + +type ConfigIndexerByName struct{} + +func (indexer ConfigIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ConfigIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ConfigIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Config) + val := m.Spec.Annotations.Name + return true, []byte(strings.ToLower(val) + "\x00"), nil +} + +type ConfigCustomIndexer struct{} + +func (indexer ConfigCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ConfigCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ConfigCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + m := obj.(*Config) + return customIndexer("", &m.Spec.Annotations) +} + +type ResourceCheckFunc func(t1, t2 *Resource) bool + +type EventCreateResource struct { + Resource *Resource + Checks []ResourceCheckFunc +} + +func (e EventCreateResource) Matches(apiEvent github_com_docker_go_events.Event) bool { + typedEvent, ok := apiEvent.(EventCreateResource) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Resource, typedEvent.Resource) { + return false + } + } + return true +} + +type EventUpdateResource struct { + Resource *Resource + OldResource *Resource + Checks []ResourceCheckFunc +} + +func (e EventUpdateResource) Matches(apiEvent github_com_docker_go_events.Event) bool { + typedEvent, ok := apiEvent.(EventUpdateResource) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Resource, typedEvent.Resource) { + return false + } + } + return true +} + +type EventDeleteResource struct { + Resource *Resource + Checks []ResourceCheckFunc +} + +func (e EventDeleteResource) Matches(apiEvent github_com_docker_go_events.Event) bool { + typedEvent, ok := apiEvent.(EventDeleteResource) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Resource, typedEvent.Resource) { + return false + } + } + return true +} +func (m *Resource) CopyStoreObject() StoreObject { + return m.Copy() +} + +func (m *Resource) GetMeta() Meta { + return m.Meta +} + +func (m *Resource) SetMeta(meta Meta) { + m.Meta = meta +} + +func (m *Resource) GetID() string { + return m.ID +} + +func (m *Resource) EventCreate() Event { + return EventCreateResource{Resource: m} +} + +func (m *Resource) EventUpdate(oldObject StoreObject) Event { + if oldObject != nil { + return EventUpdateResource{Resource: m, OldResource: oldObject.(*Resource)} + } else { + return EventUpdateResource{Resource: m} + } +} + +func (m *Resource) EventDelete() Event { + return EventDeleteResource{Resource: m} +} + +func ResourceCheckID(v1, v2 *Resource) bool { + return v1.ID == v2.ID +} + +func ResourceCheckIDPrefix(v1, v2 *Resource) bool { + return strings.HasPrefix(v2.ID, v1.ID) +} + +func ResourceCheckName(v1, v2 *Resource) bool { + return v1.Annotations.Name == v2.Annotations.Name +} + +func ResourceCheckNamePrefix(v1, v2 *Resource) bool { + return strings.HasPrefix(v2.Annotations.Name, v1.Annotations.Name) +} + +func ResourceCheckCustom(v1, v2 *Resource) bool { + return checkCustom(v1.Annotations, v2.Annotations) +} + +func ResourceCheckCustomPrefix(v1, v2 *Resource) bool { + return checkCustomPrefix(v1.Annotations, v2.Annotations) +} + +func ResourceCheckKind(v1, v2 *Resource) bool { + return v1.Kind == v2.Kind +} + +func ConvertResourceWatch(action WatchActionKind, filters []*SelectBy, kind string) ([]Event, error) { + var ( + m Resource + checkFuncs []ResourceCheckFunc + ) + m.Kind = kind + checkFuncs = append(checkFuncs, ResourceCheckKind) + + for _, filter := range filters { + switch v := filter.By.(type) { + case *SelectBy_ID: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.ID + checkFuncs = append(checkFuncs, ResourceCheckID) + case *SelectBy_IDPrefix: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.IDPrefix + checkFuncs = append(checkFuncs, ResourceCheckIDPrefix) + case *SelectBy_Name: + if m.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Annotations.Name = v.Name + checkFuncs = append(checkFuncs, ResourceCheckName) + case *SelectBy_NamePrefix: + if m.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Annotations.Name = v.NamePrefix + checkFuncs = append(checkFuncs, ResourceCheckNamePrefix) + case *SelectBy_Custom: + if len(m.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}} + checkFuncs = append(checkFuncs, ResourceCheckCustom) + case *SelectBy_CustomPrefix: + if len(m.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}} + checkFuncs = append(checkFuncs, ResourceCheckCustomPrefix) + } + } + var events []Event + if (action & WatchActionKindCreate) != 0 { + events = append(events, EventCreateResource{Resource: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindUpdate) != 0 { + events = append(events, EventUpdateResource{Resource: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindRemove) != 0 { + events = append(events, EventDeleteResource{Resource: &m, Checks: checkFuncs}) + } + if len(events) == 0 { + return nil, errUnrecognizedAction + } + return events, nil +} + +type ResourceIndexerByID struct{} + +func (indexer ResourceIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ResourceIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ResourceIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Resource) + return true, []byte(m.ID + "\x00"), nil +} + +type ResourceIndexerByName struct{} + +func (indexer ResourceIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ResourceIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ResourceIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Resource) + val := m.Annotations.Name + return true, []byte(strings.ToLower(val) + "\x00"), nil +} + +type ResourceCustomIndexer struct{} + +func (indexer ResourceCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ResourceCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ResourceCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + m := obj.(*Resource) + return customIndexer("", &m.Annotations) +} + +type ExtensionCheckFunc func(t1, t2 *Extension) bool + +type EventCreateExtension struct { + Extension *Extension + Checks []ExtensionCheckFunc +} + +func (e EventCreateExtension) Matches(apiEvent github_com_docker_go_events.Event) bool { + typedEvent, ok := apiEvent.(EventCreateExtension) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Extension, typedEvent.Extension) { + return false + } + } + return true +} + +type EventUpdateExtension struct { + Extension *Extension + OldExtension *Extension + Checks []ExtensionCheckFunc +} + +func (e EventUpdateExtension) Matches(apiEvent github_com_docker_go_events.Event) bool { + typedEvent, ok := apiEvent.(EventUpdateExtension) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Extension, typedEvent.Extension) { + return false + } + } + return true +} + +type EventDeleteExtension struct { + Extension *Extension + Checks []ExtensionCheckFunc +} + +func (e EventDeleteExtension) Matches(apiEvent github_com_docker_go_events.Event) bool { + typedEvent, ok := apiEvent.(EventDeleteExtension) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Extension, typedEvent.Extension) { + return false + } + } + return true +} +func (m *Extension) CopyStoreObject() StoreObject { + return m.Copy() +} + +func (m *Extension) GetMeta() Meta { + return m.Meta +} + +func (m *Extension) SetMeta(meta Meta) { + m.Meta = meta +} + +func (m *Extension) GetID() string { + return m.ID +} + +func (m *Extension) EventCreate() Event { + return EventCreateExtension{Extension: m} +} + +func (m *Extension) EventUpdate(oldObject StoreObject) Event { + if oldObject != nil { + return EventUpdateExtension{Extension: m, OldExtension: oldObject.(*Extension)} + } else { + return EventUpdateExtension{Extension: m} + } +} + +func (m *Extension) EventDelete() Event { + return EventDeleteExtension{Extension: m} +} + +func ExtensionCheckID(v1, v2 *Extension) bool { + return v1.ID == v2.ID +} + +func ExtensionCheckIDPrefix(v1, v2 *Extension) bool { + return strings.HasPrefix(v2.ID, v1.ID) +} + +func ExtensionCheckName(v1, v2 *Extension) bool { + return v1.Annotations.Name == v2.Annotations.Name +} + +func ExtensionCheckNamePrefix(v1, v2 *Extension) bool { + return strings.HasPrefix(v2.Annotations.Name, v1.Annotations.Name) +} + +func ExtensionCheckCustom(v1, v2 *Extension) bool { + return checkCustom(v1.Annotations, v2.Annotations) +} + +func ExtensionCheckCustomPrefix(v1, v2 *Extension) bool { + return checkCustomPrefix(v1.Annotations, v2.Annotations) +} + +func ConvertExtensionWatch(action WatchActionKind, filters []*SelectBy) ([]Event, error) { + var ( + m Extension + checkFuncs []ExtensionCheckFunc + ) + + for _, filter := range filters { + switch v := filter.By.(type) { + case *SelectBy_ID: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.ID + checkFuncs = append(checkFuncs, ExtensionCheckID) + case *SelectBy_IDPrefix: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.IDPrefix + checkFuncs = append(checkFuncs, ExtensionCheckIDPrefix) + case *SelectBy_Name: + if m.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Annotations.Name = v.Name + checkFuncs = append(checkFuncs, ExtensionCheckName) + case *SelectBy_NamePrefix: + if m.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Annotations.Name = v.NamePrefix + checkFuncs = append(checkFuncs, ExtensionCheckNamePrefix) + case *SelectBy_Custom: + if len(m.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}} + checkFuncs = append(checkFuncs, ExtensionCheckCustom) + case *SelectBy_CustomPrefix: + if len(m.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}} + checkFuncs = append(checkFuncs, ExtensionCheckCustomPrefix) + } + } + var events []Event + if (action & WatchActionKindCreate) != 0 { + events = append(events, EventCreateExtension{Extension: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindUpdate) != 0 { + events = append(events, EventUpdateExtension{Extension: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindRemove) != 0 { + events = append(events, EventDeleteExtension{Extension: &m, Checks: checkFuncs}) + } + if len(events) == 0 { + return nil, errUnrecognizedAction + } + return events, nil +} + +type ExtensionIndexerByID struct{} + +func (indexer ExtensionIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ExtensionIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ExtensionIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Extension) + return true, []byte(m.ID + "\x00"), nil +} + +type ExtensionIndexerByName struct{} + +func (indexer ExtensionIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ExtensionIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ExtensionIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Extension) + val := m.Annotations.Name + return true, []byte(strings.ToLower(val) + "\x00"), nil +} + +type ExtensionCustomIndexer struct{} + +func (indexer ExtensionCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ExtensionCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ExtensionCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + m := obj.(*Extension) + return customIndexer("", &m.Annotations) +} +func NewStoreAction(c Event) (StoreAction, error) { + var sa StoreAction + switch v := c.(type) { + case EventCreateNode: + sa.Action = StoreActionKindCreate + sa.Target = &StoreAction_Node{Node: v.Node} + case EventUpdateNode: + sa.Action = StoreActionKindUpdate + sa.Target = &StoreAction_Node{Node: v.Node} + case EventDeleteNode: + sa.Action = StoreActionKindRemove + sa.Target = &StoreAction_Node{Node: v.Node} + case EventCreateService: + sa.Action = StoreActionKindCreate + sa.Target = &StoreAction_Service{Service: v.Service} + case EventUpdateService: + sa.Action = StoreActionKindUpdate + sa.Target = &StoreAction_Service{Service: v.Service} + case EventDeleteService: + sa.Action = StoreActionKindRemove + sa.Target = &StoreAction_Service{Service: v.Service} + case EventCreateTask: + sa.Action = StoreActionKindCreate + sa.Target = &StoreAction_Task{Task: v.Task} + case EventUpdateTask: + sa.Action = StoreActionKindUpdate + sa.Target = &StoreAction_Task{Task: v.Task} + case EventDeleteTask: + sa.Action = StoreActionKindRemove + sa.Target = &StoreAction_Task{Task: v.Task} + case EventCreateNetwork: + sa.Action = StoreActionKindCreate + sa.Target = &StoreAction_Network{Network: v.Network} + case EventUpdateNetwork: + sa.Action = StoreActionKindUpdate + sa.Target = &StoreAction_Network{Network: v.Network} + case EventDeleteNetwork: + sa.Action = StoreActionKindRemove + sa.Target = &StoreAction_Network{Network: v.Network} + case EventCreateCluster: + sa.Action = StoreActionKindCreate + sa.Target = &StoreAction_Cluster{Cluster: v.Cluster} + case EventUpdateCluster: + sa.Action = StoreActionKindUpdate + sa.Target = &StoreAction_Cluster{Cluster: v.Cluster} + case EventDeleteCluster: + sa.Action = StoreActionKindRemove + sa.Target = &StoreAction_Cluster{Cluster: v.Cluster} + case EventCreateSecret: + sa.Action = StoreActionKindCreate + sa.Target = &StoreAction_Secret{Secret: v.Secret} + case EventUpdateSecret: + sa.Action = StoreActionKindUpdate + sa.Target = &StoreAction_Secret{Secret: v.Secret} + case EventDeleteSecret: + sa.Action = StoreActionKindRemove + sa.Target = &StoreAction_Secret{Secret: v.Secret} + case EventCreateConfig: + sa.Action = StoreActionKindCreate + sa.Target = &StoreAction_Config{Config: v.Config} + case EventUpdateConfig: + sa.Action = StoreActionKindUpdate + sa.Target = &StoreAction_Config{Config: v.Config} + case EventDeleteConfig: + sa.Action = StoreActionKindRemove + sa.Target = &StoreAction_Config{Config: v.Config} + case EventCreateResource: + sa.Action = StoreActionKindCreate + sa.Target = &StoreAction_Resource{Resource: v.Resource} + case EventUpdateResource: + sa.Action = StoreActionKindUpdate + sa.Target = &StoreAction_Resource{Resource: v.Resource} + case EventDeleteResource: + sa.Action = StoreActionKindRemove + sa.Target = &StoreAction_Resource{Resource: v.Resource} + case EventCreateExtension: + sa.Action = StoreActionKindCreate + sa.Target = &StoreAction_Extension{Extension: v.Extension} + case EventUpdateExtension: + sa.Action = StoreActionKindUpdate + sa.Target = &StoreAction_Extension{Extension: v.Extension} + case EventDeleteExtension: + sa.Action = StoreActionKindRemove + sa.Target = &StoreAction_Extension{Extension: v.Extension} + default: + return StoreAction{}, errUnknownStoreAction + } + return sa, nil +} + +func EventFromStoreAction(sa StoreAction, oldObject StoreObject) (Event, error) { + switch v := sa.Target.(type) { + case *StoreAction_Node: + switch sa.Action { + case StoreActionKindCreate: + return EventCreateNode{Node: v.Node}, nil + case StoreActionKindUpdate: + if oldObject != nil { + return EventUpdateNode{Node: v.Node, OldNode: oldObject.(*Node)}, nil + } else { + return EventUpdateNode{Node: v.Node}, nil + } + case StoreActionKindRemove: + return EventDeleteNode{Node: v.Node}, nil + } + case *StoreAction_Service: + switch sa.Action { + case StoreActionKindCreate: + return EventCreateService{Service: v.Service}, nil + case StoreActionKindUpdate: + if oldObject != nil { + return EventUpdateService{Service: v.Service, OldService: oldObject.(*Service)}, nil + } else { + return EventUpdateService{Service: v.Service}, nil + } + case StoreActionKindRemove: + return EventDeleteService{Service: v.Service}, nil + } + case *StoreAction_Task: + switch sa.Action { + case StoreActionKindCreate: + return EventCreateTask{Task: v.Task}, nil + case StoreActionKindUpdate: + if oldObject != nil { + return EventUpdateTask{Task: v.Task, OldTask: oldObject.(*Task)}, nil + } else { + return EventUpdateTask{Task: v.Task}, nil + } + case StoreActionKindRemove: + return EventDeleteTask{Task: v.Task}, nil + } + case *StoreAction_Network: + switch sa.Action { + case StoreActionKindCreate: + return EventCreateNetwork{Network: v.Network}, nil + case StoreActionKindUpdate: + if oldObject != nil { + return EventUpdateNetwork{Network: v.Network, OldNetwork: oldObject.(*Network)}, nil + } else { + return EventUpdateNetwork{Network: v.Network}, nil + } + case StoreActionKindRemove: + return EventDeleteNetwork{Network: v.Network}, nil + } + case *StoreAction_Cluster: + switch sa.Action { + case StoreActionKindCreate: + return EventCreateCluster{Cluster: v.Cluster}, nil + case StoreActionKindUpdate: + if oldObject != nil { + return EventUpdateCluster{Cluster: v.Cluster, OldCluster: oldObject.(*Cluster)}, nil + } else { + return EventUpdateCluster{Cluster: v.Cluster}, nil + } + case StoreActionKindRemove: + return EventDeleteCluster{Cluster: v.Cluster}, nil + } + case *StoreAction_Secret: + switch sa.Action { + case StoreActionKindCreate: + return EventCreateSecret{Secret: v.Secret}, nil + case StoreActionKindUpdate: + if oldObject != nil { + return EventUpdateSecret{Secret: v.Secret, OldSecret: oldObject.(*Secret)}, nil + } else { + return EventUpdateSecret{Secret: v.Secret}, nil + } + case StoreActionKindRemove: + return EventDeleteSecret{Secret: v.Secret}, nil + } + case *StoreAction_Config: + switch sa.Action { + case StoreActionKindCreate: + return EventCreateConfig{Config: v.Config}, nil + case StoreActionKindUpdate: + if oldObject != nil { + return EventUpdateConfig{Config: v.Config, OldConfig: oldObject.(*Config)}, nil + } else { + return EventUpdateConfig{Config: v.Config}, nil + } + case StoreActionKindRemove: + return EventDeleteConfig{Config: v.Config}, nil + } + case *StoreAction_Resource: + switch sa.Action { + case StoreActionKindCreate: + return EventCreateResource{Resource: v.Resource}, nil + case StoreActionKindUpdate: + if oldObject != nil { + return EventUpdateResource{Resource: v.Resource, OldResource: oldObject.(*Resource)}, nil + } else { + return EventUpdateResource{Resource: v.Resource}, nil + } + case StoreActionKindRemove: + return EventDeleteResource{Resource: v.Resource}, nil + } + case *StoreAction_Extension: + switch sa.Action { + case StoreActionKindCreate: + return EventCreateExtension{Extension: v.Extension}, nil + case StoreActionKindUpdate: + if oldObject != nil { + return EventUpdateExtension{Extension: v.Extension, OldExtension: oldObject.(*Extension)}, nil + } else { + return EventUpdateExtension{Extension: v.Extension}, nil + } + case StoreActionKindRemove: + return EventDeleteExtension{Extension: v.Extension}, nil + } + } + return nil, errUnknownStoreAction +} + +func WatchMessageEvent(c Event) *WatchMessage_Event { + switch v := c.(type) { + case EventCreateNode: + return &WatchMessage_Event{Action: WatchActionKindCreate, Object: &Object{Object: &Object_Node{Node: v.Node}}} + case EventUpdateNode: + if v.OldNode != nil { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Node{Node: v.Node}}, OldObject: &Object{Object: &Object_Node{Node: v.OldNode}}} + } else { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Node{Node: v.Node}}} + } + case EventDeleteNode: + return &WatchMessage_Event{Action: WatchActionKindRemove, Object: &Object{Object: &Object_Node{Node: v.Node}}} + case EventCreateService: + return &WatchMessage_Event{Action: WatchActionKindCreate, Object: &Object{Object: &Object_Service{Service: v.Service}}} + case EventUpdateService: + if v.OldService != nil { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Service{Service: v.Service}}, OldObject: &Object{Object: &Object_Service{Service: v.OldService}}} + } else { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Service{Service: v.Service}}} + } + case EventDeleteService: + return &WatchMessage_Event{Action: WatchActionKindRemove, Object: &Object{Object: &Object_Service{Service: v.Service}}} + case EventCreateTask: + return &WatchMessage_Event{Action: WatchActionKindCreate, Object: &Object{Object: &Object_Task{Task: v.Task}}} + case EventUpdateTask: + if v.OldTask != nil { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Task{Task: v.Task}}, OldObject: &Object{Object: &Object_Task{Task: v.OldTask}}} + } else { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Task{Task: v.Task}}} + } + case EventDeleteTask: + return &WatchMessage_Event{Action: WatchActionKindRemove, Object: &Object{Object: &Object_Task{Task: v.Task}}} + case EventCreateNetwork: + return &WatchMessage_Event{Action: WatchActionKindCreate, Object: &Object{Object: &Object_Network{Network: v.Network}}} + case EventUpdateNetwork: + if v.OldNetwork != nil { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Network{Network: v.Network}}, OldObject: &Object{Object: &Object_Network{Network: v.OldNetwork}}} + } else { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Network{Network: v.Network}}} + } + case EventDeleteNetwork: + return &WatchMessage_Event{Action: WatchActionKindRemove, Object: &Object{Object: &Object_Network{Network: v.Network}}} + case EventCreateCluster: + return &WatchMessage_Event{Action: WatchActionKindCreate, Object: &Object{Object: &Object_Cluster{Cluster: v.Cluster}}} + case EventUpdateCluster: + if v.OldCluster != nil { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Cluster{Cluster: v.Cluster}}, OldObject: &Object{Object: &Object_Cluster{Cluster: v.OldCluster}}} + } else { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Cluster{Cluster: v.Cluster}}} + } + case EventDeleteCluster: + return &WatchMessage_Event{Action: WatchActionKindRemove, Object: &Object{Object: &Object_Cluster{Cluster: v.Cluster}}} + case EventCreateSecret: + return &WatchMessage_Event{Action: WatchActionKindCreate, Object: &Object{Object: &Object_Secret{Secret: v.Secret}}} + case EventUpdateSecret: + if v.OldSecret != nil { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Secret{Secret: v.Secret}}, OldObject: &Object{Object: &Object_Secret{Secret: v.OldSecret}}} + } else { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Secret{Secret: v.Secret}}} + } + case EventDeleteSecret: + return &WatchMessage_Event{Action: WatchActionKindRemove, Object: &Object{Object: &Object_Secret{Secret: v.Secret}}} + case EventCreateConfig: + return &WatchMessage_Event{Action: WatchActionKindCreate, Object: &Object{Object: &Object_Config{Config: v.Config}}} + case EventUpdateConfig: + if v.OldConfig != nil { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Config{Config: v.Config}}, OldObject: &Object{Object: &Object_Config{Config: v.OldConfig}}} + } else { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Config{Config: v.Config}}} + } + case EventDeleteConfig: + return &WatchMessage_Event{Action: WatchActionKindRemove, Object: &Object{Object: &Object_Config{Config: v.Config}}} + case EventCreateResource: + return &WatchMessage_Event{Action: WatchActionKindCreate, Object: &Object{Object: &Object_Resource{Resource: v.Resource}}} + case EventUpdateResource: + if v.OldResource != nil { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Resource{Resource: v.Resource}}, OldObject: &Object{Object: &Object_Resource{Resource: v.OldResource}}} + } else { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Resource{Resource: v.Resource}}} + } + case EventDeleteResource: + return &WatchMessage_Event{Action: WatchActionKindRemove, Object: &Object{Object: &Object_Resource{Resource: v.Resource}}} + case EventCreateExtension: + return &WatchMessage_Event{Action: WatchActionKindCreate, Object: &Object{Object: &Object_Extension{Extension: v.Extension}}} + case EventUpdateExtension: + if v.OldExtension != nil { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Extension{Extension: v.Extension}}, OldObject: &Object{Object: &Object_Extension{Extension: v.OldExtension}}} + } else { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Extension{Extension: v.Extension}}} + } + case EventDeleteExtension: + return &WatchMessage_Event{Action: WatchActionKindRemove, Object: &Object{Object: &Object_Extension{Extension: v.Extension}}} + } + return nil +} + +func ConvertWatchArgs(entries []*WatchRequest_WatchEntry) ([]Event, error) { + var events []Event + for _, entry := range entries { + var newEvents []Event + var err error + switch entry.Kind { + case "": + return nil, errNoKindSpecified + case "node": + newEvents, err = ConvertNodeWatch(entry.Action, entry.Filters) + case "service": + newEvents, err = ConvertServiceWatch(entry.Action, entry.Filters) + case "task": + newEvents, err = ConvertTaskWatch(entry.Action, entry.Filters) + case "network": + newEvents, err = ConvertNetworkWatch(entry.Action, entry.Filters) + case "cluster": + newEvents, err = ConvertClusterWatch(entry.Action, entry.Filters) + case "secret": + newEvents, err = ConvertSecretWatch(entry.Action, entry.Filters) + case "config": + newEvents, err = ConvertConfigWatch(entry.Action, entry.Filters) + default: + newEvents, err = ConvertResourceWatch(entry.Action, entry.Filters, entry.Kind) + case "extension": + newEvents, err = ConvertExtensionWatch(entry.Action, entry.Filters) + } + if err != nil { + return nil, err + } + events = append(events, newEvents...) + } + return events, nil +} + +func (this *Meta) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Meta{`, + `Version:` + strings.Replace(strings.Replace(this.Version.String(), "Version", "Version", 1), `&`, ``, 1) + `,`, + `CreatedAt:` + strings.Replace(fmt.Sprintf("%v", this.CreatedAt), "Timestamp", "google_protobuf.Timestamp", 1) + `,`, + `UpdatedAt:` + strings.Replace(fmt.Sprintf("%v", this.UpdatedAt), "Timestamp", "google_protobuf.Timestamp", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Node) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Node{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NodeSpec", "NodeSpec", 1), `&`, ``, 1) + `,`, + `Description:` + strings.Replace(fmt.Sprintf("%v", this.Description), "NodeDescription", "NodeDescription", 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NodeStatus", "NodeStatus", 1), `&`, ``, 1) + `,`, + `ManagerStatus:` + strings.Replace(fmt.Sprintf("%v", this.ManagerStatus), "ManagerStatus", "ManagerStatus", 1) + `,`, + `Attachment:` + strings.Replace(fmt.Sprintf("%v", this.Attachment), "NetworkAttachment", "NetworkAttachment", 1) + `,`, + `Certificate:` + strings.Replace(strings.Replace(this.Certificate.String(), "Certificate", "Certificate", 1), `&`, ``, 1) + `,`, + `Role:` + fmt.Sprintf("%v", this.Role) + `,`, + `Attachments:` + strings.Replace(fmt.Sprintf("%v", this.Attachments), "NetworkAttachment", "NetworkAttachment", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Service) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Service{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceSpec", "ServiceSpec", 1), `&`, ``, 1) + `,`, + `Endpoint:` + strings.Replace(fmt.Sprintf("%v", this.Endpoint), "Endpoint", "Endpoint", 1) + `,`, + `UpdateStatus:` + strings.Replace(fmt.Sprintf("%v", this.UpdateStatus), "UpdateStatus", "UpdateStatus", 1) + `,`, + `PreviousSpec:` + strings.Replace(fmt.Sprintf("%v", this.PreviousSpec), "ServiceSpec", "ServiceSpec", 1) + `,`, + `SpecVersion:` + strings.Replace(fmt.Sprintf("%v", this.SpecVersion), "Version", "Version", 1) + `,`, + `PreviousSpecVersion:` + strings.Replace(fmt.Sprintf("%v", this.PreviousSpecVersion), "Version", "Version", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Endpoint) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Endpoint{`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "EndpointSpec", "EndpointSpec", 1) + `,`, + `Ports:` + strings.Replace(fmt.Sprintf("%v", this.Ports), "PortConfig", "PortConfig", 1) + `,`, + `VirtualIPs:` + strings.Replace(fmt.Sprintf("%v", this.VirtualIPs), "Endpoint_VirtualIP", "Endpoint_VirtualIP", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Endpoint_VirtualIP) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Endpoint_VirtualIP{`, + `NetworkID:` + fmt.Sprintf("%v", this.NetworkID) + `,`, + `Addr:` + fmt.Sprintf("%v", this.Addr) + `,`, + `}`, + }, "") + return s +} +func (this *Task) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Task{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TaskSpec", "TaskSpec", 1), `&`, ``, 1) + `,`, + `ServiceID:` + fmt.Sprintf("%v", this.ServiceID) + `,`, + `Slot:` + fmt.Sprintf("%v", this.Slot) + `,`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `ServiceAnnotations:` + strings.Replace(strings.Replace(this.ServiceAnnotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TaskStatus", "TaskStatus", 1), `&`, ``, 1) + `,`, + `DesiredState:` + fmt.Sprintf("%v", this.DesiredState) + `,`, + `Networks:` + strings.Replace(fmt.Sprintf("%v", this.Networks), "NetworkAttachment", "NetworkAttachment", 1) + `,`, + `Endpoint:` + strings.Replace(fmt.Sprintf("%v", this.Endpoint), "Endpoint", "Endpoint", 1) + `,`, + `LogDriver:` + strings.Replace(fmt.Sprintf("%v", this.LogDriver), "Driver", "Driver", 1) + `,`, + `SpecVersion:` + strings.Replace(fmt.Sprintf("%v", this.SpecVersion), "Version", "Version", 1) + `,`, + `AssignedGenericResources:` + strings.Replace(fmt.Sprintf("%v", this.AssignedGenericResources), "GenericResource", "GenericResource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkAttachment) String() string { + if this == nil { + return "nil" + } + keysForDriverAttachmentOpts := make([]string, 0, len(this.DriverAttachmentOpts)) + for k, _ := range this.DriverAttachmentOpts { + keysForDriverAttachmentOpts = append(keysForDriverAttachmentOpts, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDriverAttachmentOpts) + mapStringForDriverAttachmentOpts := "map[string]string{" + for _, k := range keysForDriverAttachmentOpts { + mapStringForDriverAttachmentOpts += fmt.Sprintf("%v: %v,", k, this.DriverAttachmentOpts[k]) + } + mapStringForDriverAttachmentOpts += "}" + s := strings.Join([]string{`&NetworkAttachment{`, + `Network:` + strings.Replace(fmt.Sprintf("%v", this.Network), "Network", "Network", 1) + `,`, + `Addresses:` + fmt.Sprintf("%v", this.Addresses) + `,`, + `Aliases:` + fmt.Sprintf("%v", this.Aliases) + `,`, + `DriverAttachmentOpts:` + mapStringForDriverAttachmentOpts + `,`, + `}`, + }, "") + return s +} +func (this *Network) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Network{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkSpec", "NetworkSpec", 1), `&`, ``, 1) + `,`, + `DriverState:` + strings.Replace(fmt.Sprintf("%v", this.DriverState), "Driver", "Driver", 1) + `,`, + `IPAM:` + strings.Replace(fmt.Sprintf("%v", this.IPAM), "IPAMOptions", "IPAMOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Cluster) String() string { + if this == nil { + return "nil" + } + keysForBlacklistedCertificates := make([]string, 0, len(this.BlacklistedCertificates)) + for k, _ := range this.BlacklistedCertificates { + keysForBlacklistedCertificates = append(keysForBlacklistedCertificates, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForBlacklistedCertificates) + mapStringForBlacklistedCertificates := "map[string]*BlacklistedCertificate{" + for _, k := range keysForBlacklistedCertificates { + mapStringForBlacklistedCertificates += fmt.Sprintf("%v: %v,", k, this.BlacklistedCertificates[k]) + } + mapStringForBlacklistedCertificates += "}" + s := strings.Join([]string{`&Cluster{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterSpec", "ClusterSpec", 1), `&`, ``, 1) + `,`, + `RootCA:` + strings.Replace(strings.Replace(this.RootCA.String(), "RootCA", "RootCA", 1), `&`, ``, 1) + `,`, + `NetworkBootstrapKeys:` + strings.Replace(fmt.Sprintf("%v", this.NetworkBootstrapKeys), "EncryptionKey", "EncryptionKey", 1) + `,`, + `EncryptionKeyLamportClock:` + fmt.Sprintf("%v", this.EncryptionKeyLamportClock) + `,`, + `BlacklistedCertificates:` + mapStringForBlacklistedCertificates + `,`, + `UnlockKeys:` + strings.Replace(fmt.Sprintf("%v", this.UnlockKeys), "EncryptionKey", "EncryptionKey", 1) + `,`, + `FIPS:` + fmt.Sprintf("%v", this.FIPS) + `,`, + `}`, + }, "") + return s +} +func (this *Secret) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Secret{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SecretSpec", "SecretSpec", 1), `&`, ``, 1) + `,`, + `Internal:` + fmt.Sprintf("%v", this.Internal) + `,`, + `}`, + }, "") + return s +} +func (this *Config) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Config{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ConfigSpec", "ConfigSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Resource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Resource{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, + `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Payload:` + strings.Replace(fmt.Sprintf("%v", this.Payload), "Any", "google_protobuf3.Any", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Extension) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Extension{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, + `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `}`, + }, "") + return s +} +func valueToStringObjects(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Meta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Meta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Meta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CreatedAt == nil { + m.CreatedAt = &google_protobuf.Timestamp{} + } + if err := m.CreatedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UpdatedAt == nil { + m.UpdatedAt = &google_protobuf.Timestamp{} + } + if err := m.UpdatedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Node) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Node: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Node: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Description == nil { + m.Description = &NodeDescription{} + } + if err := m.Description.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ManagerStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ManagerStatus == nil { + m.ManagerStatus = &ManagerStatus{} + } + if err := m.ManagerStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attachment", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Attachment == nil { + m.Attachment = &NetworkAttachment{} + } + if err := m.Attachment.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Certificate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + m.Role = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Role |= (NodeRole(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attachments", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attachments = append(m.Attachments, &NetworkAttachment{}) + if err := m.Attachments[len(m.Attachments)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Service) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Service: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Service: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Endpoint", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Endpoint == nil { + m.Endpoint = &Endpoint{} + } + if err := m.Endpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UpdateStatus == nil { + m.UpdateStatus = &UpdateStatus{} + } + if err := m.UpdateStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreviousSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PreviousSpec == nil { + m.PreviousSpec = &ServiceSpec{} + } + if err := m.PreviousSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpecVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SpecVersion == nil { + m.SpecVersion = &Version{} + } + if err := m.SpecVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreviousSpecVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PreviousSpecVersion == nil { + m.PreviousSpecVersion = &Version{} + } + if err := m.PreviousSpecVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Endpoint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Endpoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Endpoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &EndpointSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, &PortConfig{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VirtualIPs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VirtualIPs = append(m.VirtualIPs, &Endpoint_VirtualIP{}) + if err := m.VirtualIPs[len(m.VirtualIPs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Endpoint_VirtualIP) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VirtualIP: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VirtualIP: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetworkID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NetworkID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Task) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Task: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Task: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Slot", wireType) + } + m.Slot = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Slot |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAnnotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ServiceAnnotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredState", wireType) + } + m.DesiredState = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DesiredState |= (TaskState(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Networks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Networks = append(m.Networks, &NetworkAttachment{}) + if err := m.Networks[len(m.Networks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Endpoint", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Endpoint == nil { + m.Endpoint = &Endpoint{} + } + if err := m.Endpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogDriver", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LogDriver == nil { + m.LogDriver = &Driver{} + } + if err := m.LogDriver.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpecVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SpecVersion == nil { + m.SpecVersion = &Version{} + } + if err := m.SpecVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AssignedGenericResources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AssignedGenericResources = append(m.AssignedGenericResources, &GenericResource{}) + if err := m.AssignedGenericResources[len(m.AssignedGenericResources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkAttachment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkAttachment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkAttachment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Network == nil { + m.Network = &Network{} + } + if err := m.Network.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addresses = append(m.Addresses, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Aliases", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Aliases = append(m.Aliases, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DriverAttachmentOpts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthObjects + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.DriverAttachmentOpts == nil { + m.DriverAttachmentOpts = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthObjects + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.DriverAttachmentOpts[mapkey] = mapvalue + } else { + var mapvalue string + m.DriverAttachmentOpts[mapkey] = mapvalue + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Network) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Network: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Network: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DriverState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DriverState == nil { + m.DriverState = &Driver{} + } + if err := m.DriverState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPAM", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.IPAM == nil { + m.IPAM = &IPAMOptions{} + } + if err := m.IPAM.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Cluster) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Cluster: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Cluster: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RootCA", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RootCA.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetworkBootstrapKeys", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NetworkBootstrapKeys = append(m.NetworkBootstrapKeys, &EncryptionKey{}) + if err := m.NetworkBootstrapKeys[len(m.NetworkBootstrapKeys)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EncryptionKeyLamportClock", wireType) + } + m.EncryptionKeyLamportClock = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EncryptionKeyLamportClock |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlacklistedCertificates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthObjects + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.BlacklistedCertificates == nil { + m.BlacklistedCertificates = make(map[string]*BlacklistedCertificate) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthObjects + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthObjects + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &BlacklistedCertificate{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.BlacklistedCertificates[mapkey] = mapvalue + } else { + var mapvalue *BlacklistedCertificate + m.BlacklistedCertificates[mapkey] = mapvalue + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UnlockKeys", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UnlockKeys = append(m.UnlockKeys, &EncryptionKey{}) + if err := m.UnlockKeys[len(m.UnlockKeys)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FIPS", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.FIPS = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Secret) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Secret: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Secret: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Internal", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Internal = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Config) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Config: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Config: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Resource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Resource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Resource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Payload == nil { + m.Payload = &google_protobuf3.Any{} + } + if err := m.Payload.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Extension) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Extension: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Extension: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipObjects(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowObjects + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowObjects + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowObjects + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthObjects + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowObjects + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipObjects(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthObjects = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowObjects = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("github.com/docker/swarmkit/api/objects.proto", fileDescriptorObjects) } + +var fileDescriptorObjects = []byte{ + // 1544 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0x4d, 0x73, 0xdb, 0x4c, + 0x1d, 0xaf, 0x6c, 0xc5, 0x2f, 0x7f, 0x27, 0x26, 0xec, 0x13, 0x82, 0x6a, 0x82, 0x1d, 0xfc, 0x0c, + 0xcc, 0x33, 0xcf, 0x74, 0x9c, 0x12, 0x0a, 0xa4, 0x81, 0xd2, 0xda, 0x49, 0x68, 0x3d, 0xa5, 0x34, + 0xb3, 0x29, 0x2d, 0x37, 0xb1, 0x91, 0x36, 0xae, 0xb0, 0xac, 0xd5, 0x68, 0xd7, 0x2e, 0xbe, 0xf5, + 0x1c, 0x3e, 0x40, 0x6e, 0x1c, 0xfa, 0x2d, 0xb8, 0x70, 0xe0, 0xd4, 0x23, 0xc3, 0x81, 0xe1, 0x94, + 0xa1, 0xfe, 0x16, 0xcc, 0x70, 0x60, 0x76, 0xb5, 0xb2, 0x95, 0x58, 0x79, 0x63, 0x3a, 0x19, 0x4e, + 0xd1, 0x6a, 0x7f, 0xbf, 0xff, 0x9b, 0xfe, 0x6f, 0x31, 0xdc, 0xeb, 0x79, 0xe2, 0xed, 0xf0, 0xb0, + 0xe5, 0xb0, 0xc1, 0x86, 0xcb, 0x9c, 0x3e, 0x8d, 0x36, 0xf8, 0x3b, 0x12, 0x0d, 0xfa, 0x9e, 0xd8, + 0x20, 0xa1, 0xb7, 0xc1, 0x0e, 0x7f, 0x4f, 0x1d, 0xc1, 0x5b, 0x61, 0xc4, 0x04, 0x43, 0x28, 0x86, + 0xb4, 0x12, 0x48, 0x6b, 0xf4, 0xc3, 0xda, 0xd7, 0x57, 0x48, 0x10, 0xe3, 0x90, 0x6a, 0xfe, 0x95, + 0x58, 0x1e, 0x52, 0x27, 0xc1, 0x36, 0x7a, 0x8c, 0xf5, 0x7c, 0xba, 0xa1, 0x4e, 0x87, 0xc3, 0xa3, + 0x0d, 0xe1, 0x0d, 0x28, 0x17, 0x64, 0x10, 0x6a, 0xc0, 0x4a, 0x8f, 0xf5, 0x98, 0x7a, 0xdc, 0x90, + 0x4f, 0xfa, 0xed, 0xdd, 0xf3, 0x34, 0x12, 0x8c, 0xf5, 0xd5, 0x4f, 0x2f, 0xd1, 0x3e, 0x85, 0x87, + 0xfe, 0xb0, 0xe7, 0x05, 0xfa, 0x4f, 0x4c, 0x6c, 0xfe, 0xd9, 0x00, 0xf3, 0x05, 0x15, 0x04, 0xfd, + 0x0c, 0x8a, 0x23, 0x1a, 0x71, 0x8f, 0x05, 0x96, 0xb1, 0x6e, 0x7c, 0x55, 0xd9, 0xfc, 0x4e, 0x6b, + 0x3e, 0x22, 0xad, 0xd7, 0x31, 0xa4, 0x63, 0x7e, 0x3c, 0x6d, 0xdc, 0xc1, 0x09, 0x03, 0x3d, 0x04, + 0x70, 0x22, 0x4a, 0x04, 0x75, 0x6d, 0x22, 0xac, 0x9c, 0xe2, 0xd7, 0x5a, 0xb1, 0xb9, 0xad, 0x44, + 0x7f, 0xeb, 0x55, 0xe2, 0x25, 0x2e, 0x6b, 0x74, 0x5b, 0x48, 0xea, 0x30, 0x74, 0x13, 0x6a, 0xfe, + 0x6a, 0xaa, 0x46, 0xb7, 0x45, 0xf3, 0xfd, 0x02, 0x98, 0xbf, 0x66, 0x2e, 0x45, 0xab, 0x90, 0xf3, + 0x5c, 0x65, 0x76, 0xb9, 0x53, 0x98, 0x9c, 0x36, 0x72, 0xdd, 0x5d, 0x9c, 0xf3, 0x5c, 0xb4, 0x09, + 0xe6, 0x80, 0x0a, 0xa2, 0x0d, 0xb2, 0xb2, 0x1c, 0x92, 0xbe, 0x6b, 0x6f, 0x14, 0x16, 0xfd, 0x04, + 0x4c, 0xf9, 0xa9, 0xb4, 0x25, 0x6b, 0x59, 0x1c, 0xa9, 0xf3, 0x20, 0xa4, 0x4e, 0xc2, 0x93, 0x78, + 0xb4, 0x07, 0x15, 0x97, 0x72, 0x27, 0xf2, 0x42, 0x21, 0x63, 0x68, 0x2a, 0xfa, 0x97, 0x17, 0xd1, + 0x77, 0x67, 0x50, 0x9c, 0xe6, 0xa1, 0x9f, 0x43, 0x81, 0x0b, 0x22, 0x86, 0xdc, 0x5a, 0x50, 0x12, + 0xea, 0x17, 0x1a, 0xa0, 0x50, 0xda, 0x04, 0xcd, 0x41, 0xcf, 0xa0, 0x3a, 0x20, 0x01, 0xe9, 0xd1, + 0xc8, 0xd6, 0x52, 0x0a, 0x4a, 0xca, 0xf7, 0x32, 0x5d, 0x8f, 0x91, 0xb1, 0x20, 0xbc, 0x34, 0x48, + 0x1f, 0x51, 0x17, 0x80, 0x08, 0x41, 0x9c, 0xb7, 0x03, 0x1a, 0x08, 0xab, 0xa8, 0xa4, 0x7c, 0x3f, + 0xd3, 0x16, 0x2a, 0xde, 0xb1, 0xa8, 0xdf, 0x9e, 0x82, 0x3b, 0x39, 0xcb, 0xc0, 0x29, 0x32, 0x7a, + 0x0a, 0x15, 0x87, 0x46, 0xc2, 0x3b, 0xf2, 0x1c, 0x22, 0xa8, 0x55, 0x52, 0xb2, 0x1a, 0x59, 0xb2, + 0x76, 0x66, 0x30, 0xed, 0x58, 0x9a, 0x89, 0xee, 0x83, 0x19, 0x31, 0x9f, 0x5a, 0xe5, 0x75, 0xe3, + 0xab, 0xea, 0xc5, 0x9f, 0x06, 0x33, 0x9f, 0x62, 0x85, 0x94, 0xaa, 0x67, 0x86, 0x70, 0x0b, 0xd6, + 0xf3, 0xd7, 0x76, 0x03, 0xa7, 0x99, 0xdb, 0xab, 0xc7, 0x27, 0x4d, 0x04, 0xcb, 0x25, 0x63, 0xd9, + 0x50, 0x79, 0x66, 0xdc, 0x37, 0x7e, 0x6b, 0xfc, 0xce, 0x68, 0xfe, 0x27, 0x0f, 0xc5, 0x03, 0x1a, + 0x8d, 0x3c, 0xe7, 0xf3, 0x66, 0xe1, 0xc3, 0x33, 0x59, 0x98, 0x19, 0x2c, 0xad, 0x76, 0x2e, 0x11, + 0xb7, 0xa0, 0x44, 0x03, 0x37, 0x64, 0x5e, 0x20, 0x74, 0x16, 0x66, 0x46, 0x6a, 0x4f, 0x63, 0xf0, + 0x14, 0x8d, 0xf6, 0x60, 0x29, 0x2e, 0x2e, 0xfb, 0x4c, 0x0a, 0xae, 0x67, 0xd1, 0x7f, 0xa3, 0x80, + 0x3a, 0x77, 0x16, 0x87, 0xa9, 0x13, 0xda, 0x85, 0xa5, 0x30, 0xa2, 0x23, 0x8f, 0x0d, 0xb9, 0xad, + 0x9c, 0x28, 0x5c, 0xcb, 0x09, 0xbc, 0x98, 0xb0, 0xe4, 0x09, 0xfd, 0x02, 0x16, 0x25, 0xd9, 0x4e, + 0x9a, 0x12, 0x5c, 0xd9, 0x94, 0x70, 0x45, 0x12, 0xf4, 0x01, 0xbd, 0x84, 0x6f, 0x9d, 0xb1, 0x62, + 0x2a, 0xa8, 0x72, 0xb5, 0xa0, 0x2f, 0xd2, 0x96, 0xe8, 0x97, 0xdb, 0xe8, 0xf8, 0xa4, 0x59, 0x85, + 0xc5, 0x74, 0x0a, 0x34, 0xff, 0x94, 0x83, 0x52, 0x12, 0x48, 0xf4, 0x40, 0x7f, 0x33, 0xe3, 0xe2, + 0xa8, 0x25, 0x58, 0xe5, 0x6f, 0xfc, 0xb9, 0x1e, 0xc0, 0x42, 0xc8, 0x22, 0xc1, 0xad, 0x9c, 0x4a, + 0xce, 0xcc, 0x7a, 0xdf, 0x67, 0x91, 0xd8, 0x61, 0xc1, 0x91, 0xd7, 0xc3, 0x31, 0x18, 0xbd, 0x81, + 0xca, 0xc8, 0x8b, 0xc4, 0x90, 0xf8, 0xb6, 0x17, 0x72, 0x2b, 0xaf, 0xb8, 0x3f, 0xb8, 0x4c, 0x65, + 0xeb, 0x75, 0x8c, 0xef, 0xee, 0x77, 0xaa, 0x93, 0xd3, 0x06, 0x4c, 0x8f, 0x1c, 0x83, 0x16, 0xd5, + 0x0d, 0x79, 0xed, 0x05, 0x94, 0xa7, 0x37, 0xe8, 0x1e, 0x40, 0x10, 0xd7, 0x85, 0x3d, 0xcd, 0xec, + 0xa5, 0xc9, 0x69, 0xa3, 0xac, 0xab, 0xa5, 0xbb, 0x8b, 0xcb, 0x1a, 0xd0, 0x75, 0x11, 0x02, 0x93, + 0xb8, 0x6e, 0xa4, 0xf2, 0xbc, 0x8c, 0xd5, 0x73, 0xf3, 0x8f, 0x45, 0x30, 0x5f, 0x11, 0xde, 0xbf, + 0xed, 0x16, 0x2d, 0x75, 0xce, 0x55, 0xc6, 0x3d, 0x00, 0x1e, 0xe7, 0x9b, 0x74, 0xc7, 0x9c, 0xb9, + 0xa3, 0xb3, 0x50, 0xba, 0xa3, 0x01, 0xb1, 0x3b, 0xdc, 0x67, 0x42, 0x15, 0x81, 0x89, 0xd5, 0x33, + 0xfa, 0x12, 0x8a, 0x01, 0x73, 0x15, 0xbd, 0xa0, 0xe8, 0x30, 0x39, 0x6d, 0x14, 0x64, 0xd3, 0xe9, + 0xee, 0xe2, 0x82, 0xbc, 0xea, 0xba, 0xaa, 0xe9, 0x04, 0x01, 0x13, 0x44, 0x36, 0x74, 0xae, 0x7b, + 0x67, 0x66, 0xf6, 0xb7, 0x67, 0xb0, 0xa4, 0xdf, 0xa5, 0x98, 0xe8, 0x35, 0x7c, 0x91, 0xd8, 0x9b, + 0x16, 0x58, 0xba, 0x89, 0x40, 0xa4, 0x25, 0xa4, 0x6e, 0x52, 0x33, 0xa6, 0x7c, 0xf1, 0x8c, 0x51, + 0x11, 0xcc, 0x9a, 0x31, 0x1d, 0x58, 0x72, 0x29, 0xf7, 0x22, 0xea, 0xaa, 0x36, 0x41, 0x55, 0x65, + 0x56, 0x37, 0xbf, 0x7b, 0x99, 0x10, 0x8a, 0x17, 0x35, 0x47, 0x9d, 0x50, 0x1b, 0x4a, 0x3a, 0x6f, + 0xb8, 0x55, 0xb9, 0x49, 0x53, 0x9e, 0xd2, 0xce, 0xb4, 0xb9, 0xc5, 0x1b, 0xb5, 0xb9, 0x87, 0x00, + 0x3e, 0xeb, 0xd9, 0x6e, 0xe4, 0x8d, 0x68, 0x64, 0x2d, 0xe9, 0x8d, 0x23, 0x83, 0xbb, 0xab, 0x10, + 0xb8, 0xec, 0xb3, 0x5e, 0xfc, 0x38, 0xd7, 0x94, 0xaa, 0x37, 0x6c, 0x4a, 0x04, 0x6a, 0x84, 0x73, + 0xaf, 0x17, 0x50, 0xd7, 0xee, 0xd1, 0x80, 0x46, 0x9e, 0x63, 0x47, 0x94, 0xb3, 0x61, 0xe4, 0x50, + 0x6e, 0x7d, 0x43, 0x45, 0x22, 0x73, 0x67, 0x78, 0x1a, 0x83, 0xb1, 0xc6, 0x62, 0x2b, 0x11, 0x73, + 0xee, 0x82, 0x6f, 0xd7, 0x8e, 0x4f, 0x9a, 0xab, 0xb0, 0x92, 0x6e, 0x53, 0x5b, 0xc6, 0x13, 0xe3, + 0x99, 0xb1, 0x6f, 0x34, 0xff, 0x9a, 0x83, 0x6f, 0xce, 0xc5, 0x14, 0xfd, 0x18, 0x8a, 0x3a, 0xaa, + 0x97, 0x6d, 0x7e, 0x9a, 0x87, 0x13, 0x2c, 0x5a, 0x83, 0xb2, 0x2c, 0x71, 0xca, 0x39, 0x8d, 0x9b, + 0x57, 0x19, 0xcf, 0x5e, 0x20, 0x0b, 0x8a, 0xc4, 0xf7, 0x88, 0xbc, 0xcb, 0xab, 0xbb, 0xe4, 0x88, + 0x86, 0xb0, 0x1a, 0x87, 0xde, 0x9e, 0x0d, 0x58, 0x9b, 0x85, 0x82, 0x5b, 0xa6, 0xf2, 0xff, 0xf1, + 0xb5, 0x32, 0x41, 0x7f, 0x9c, 0xd9, 0x8b, 0x97, 0xa1, 0xe0, 0x7b, 0x81, 0x88, 0xc6, 0x78, 0xc5, + 0xcd, 0xb8, 0xaa, 0x3d, 0x85, 0xbb, 0x17, 0x52, 0xd0, 0x32, 0xe4, 0xfb, 0x74, 0x1c, 0xb7, 0x27, + 0x2c, 0x1f, 0xd1, 0x0a, 0x2c, 0x8c, 0x88, 0x3f, 0xa4, 0xba, 0x9b, 0xc5, 0x87, 0xed, 0xdc, 0x96, + 0xd1, 0xfc, 0x90, 0x83, 0xa2, 0x36, 0xe7, 0xb6, 0x47, 0xbe, 0x56, 0x3b, 0xd7, 0xd8, 0x1e, 0xc1, + 0xa2, 0x0e, 0x69, 0x5c, 0x91, 0xe6, 0x95, 0x39, 0x5d, 0x89, 0xf1, 0x71, 0x35, 0x3e, 0x02, 0xd3, + 0x0b, 0xc9, 0x40, 0x8f, 0xfb, 0x4c, 0xcd, 0xdd, 0xfd, 0xf6, 0x8b, 0x97, 0x61, 0xdc, 0x58, 0x4a, + 0x93, 0xd3, 0x86, 0x29, 0x5f, 0x60, 0x45, 0xcb, 0x1c, 0x8c, 0x7f, 0x5f, 0x80, 0xe2, 0x8e, 0x3f, + 0xe4, 0x82, 0x46, 0xb7, 0x1d, 0x24, 0xad, 0x76, 0x2e, 0x48, 0x3b, 0x50, 0x8c, 0x18, 0x13, 0xb6, + 0x43, 0x2e, 0x8b, 0x0f, 0x66, 0x4c, 0xec, 0xb4, 0x3b, 0x55, 0x49, 0x94, 0xbd, 0x3d, 0x3e, 0xe3, + 0x82, 0xa4, 0xee, 0x10, 0xf4, 0x06, 0x56, 0x93, 0x89, 0x78, 0xc8, 0x98, 0xe0, 0x22, 0x22, 0xa1, + 0xdd, 0xa7, 0x63, 0xb9, 0x2b, 0xe5, 0x2f, 0x5a, 0xb4, 0xf7, 0x02, 0x27, 0x1a, 0xab, 0xe0, 0x3d, + 0xa7, 0x63, 0xbc, 0xa2, 0x05, 0x74, 0x12, 0xfe, 0x73, 0x3a, 0xe6, 0xe8, 0x31, 0xac, 0xd1, 0x29, + 0x4c, 0x4a, 0xb4, 0x7d, 0x32, 0x90, 0xb3, 0xde, 0x76, 0x7c, 0xe6, 0xf4, 0xd5, 0xb8, 0x31, 0xf1, + 0x5d, 0x9a, 0x16, 0xf5, 0xab, 0x18, 0xb1, 0x23, 0x01, 0x88, 0x83, 0x75, 0xe8, 0x13, 0xa7, 0xef, + 0x7b, 0x5c, 0xfe, 0x2f, 0x95, 0xda, 0x9b, 0xe5, 0xc4, 0x90, 0xb6, 0x6d, 0x5d, 0x12, 0xad, 0x56, + 0x67, 0xc6, 0x4d, 0x6d, 0xe1, 0xba, 0xa2, 0xbe, 0x7d, 0x98, 0x7d, 0x8b, 0x3a, 0x50, 0x19, 0x06, + 0x52, 0x7d, 0x1c, 0x83, 0xf2, 0x75, 0x63, 0x00, 0x31, 0x4b, 0x79, 0xbe, 0x06, 0xe6, 0x91, 0xdc, + 0x61, 0xe4, 0x18, 0x29, 0xc5, 0xc9, 0xf5, 0xcb, 0xee, 0xfe, 0x01, 0x56, 0x6f, 0x6b, 0x23, 0x58, + 0xbb, 0xcc, 0xb4, 0x8c, 0xca, 0x7d, 0x92, 0xae, 0xdc, 0xca, 0xe6, 0xd7, 0x59, 0xd6, 0x64, 0x8b, + 0x4c, 0x55, 0x79, 0x66, 0x52, 0xff, 0xc5, 0x80, 0xc2, 0x01, 0x75, 0x22, 0x2a, 0x3e, 0x6b, 0x4e, + 0x6f, 0x9d, 0xc9, 0xe9, 0x7a, 0xf6, 0x9a, 0x2c, 0xb5, 0xce, 0xa5, 0x74, 0x0d, 0x4a, 0x5e, 0x20, + 0x68, 0x14, 0x10, 0x5f, 0xe5, 0x74, 0x09, 0x4f, 0xcf, 0x99, 0x0e, 0x7c, 0x30, 0xa0, 0x10, 0xef, + 0x91, 0xb7, 0xed, 0x40, 0xac, 0xf5, 0xbc, 0x03, 0x99, 0x46, 0xfe, 0xdb, 0x80, 0x52, 0x32, 0xce, + 0x3e, 0xab, 0x99, 0xe7, 0xf6, 0xb2, 0xfc, 0xff, 0xbc, 0x97, 0x21, 0x30, 0xfb, 0x5e, 0xa0, 0x37, + 0x48, 0xac, 0x9e, 0x51, 0x0b, 0x8a, 0x21, 0x19, 0xfb, 0x8c, 0xb8, 0xba, 0x8d, 0xae, 0xcc, 0xfd, + 0x86, 0xd1, 0x0e, 0xc6, 0x38, 0x01, 0x6d, 0xaf, 0x1c, 0x9f, 0x34, 0x97, 0xa1, 0x9a, 0xf6, 0xfc, + 0xad, 0xd1, 0xfc, 0x87, 0x01, 0xe5, 0xbd, 0x3f, 0x08, 0x1a, 0xa8, 0x6d, 0xe1, 0xff, 0xd2, 0xf9, + 0xf5, 0xf9, 0xdf, 0x39, 0xca, 0x67, 0x7e, 0xc2, 0xc8, 0xfa, 0xa8, 0x1d, 0xeb, 0xe3, 0xa7, 0xfa, + 0x9d, 0x7f, 0x7e, 0xaa, 0xdf, 0x79, 0x3f, 0xa9, 0x1b, 0x1f, 0x27, 0x75, 0xe3, 0x6f, 0x93, 0xba, + 0xf1, 0xaf, 0x49, 0xdd, 0x38, 0x2c, 0xa8, 0xf8, 0xfc, 0xe8, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x07, 0xc5, 0x5a, 0x5b, 0xae, 0x13, 0x00, 0x00, +} diff --git a/vendor/github.com/docker/swarmkit/api/raft.pb.go b/vendor/github.com/docker/swarmkit/api/raft.pb.go new file mode 100644 index 0000000000..495bc0e379 --- /dev/null +++ b/vendor/github.com/docker/swarmkit/api/raft.pb.go @@ -0,0 +1,4029 @@ +// Code generated by protoc-gen-gogo. +// source: github.com/docker/swarmkit/api/raft.proto +// DO NOT EDIT! + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import raftpb "github.com/coreos/etcd/raft/raftpb" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" +// skipping weak import docker_protobuf_plugin "github.com/docker/swarmkit/protobuf/plugin" + +import github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +import raftselector "github.com/docker/swarmkit/manager/raftselector" +import codes "google.golang.org/grpc/codes" +import status "google.golang.org/grpc/status" +import metadata "google.golang.org/grpc/metadata" +import transport "google.golang.org/grpc/transport" +import rafttime "time" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// StoreActionKind defines the operation to take on the store for the target of +// a storage action. +type StoreActionKind int32 + +const ( + StoreActionKindUnknown StoreActionKind = 0 + StoreActionKindCreate StoreActionKind = 1 + StoreActionKindUpdate StoreActionKind = 2 + StoreActionKindRemove StoreActionKind = 3 +) + +var StoreActionKind_name = map[int32]string{ + 0: "UNKNOWN", + 1: "STORE_ACTION_CREATE", + 2: "STORE_ACTION_UPDATE", + 3: "STORE_ACTION_REMOVE", +} +var StoreActionKind_value = map[string]int32{ + "UNKNOWN": 0, + "STORE_ACTION_CREATE": 1, + "STORE_ACTION_UPDATE": 2, + "STORE_ACTION_REMOVE": 3, +} + +func (x StoreActionKind) String() string { + return proto.EnumName(StoreActionKind_name, int32(x)) +} +func (StoreActionKind) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{0} } + +type RaftMember struct { + // RaftID specifies the internal ID used by the manager in a raft context, it can never be modified + // and is used only for information purposes + RaftID uint64 `protobuf:"varint,1,opt,name=raft_id,json=raftId,proto3" json:"raft_id,omitempty"` + // NodeID is the node's ID. + NodeID string `protobuf:"bytes,2,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + // Addr specifies the address of the member + Addr string `protobuf:"bytes,3,opt,name=addr,proto3" json:"addr,omitempty"` + // Status provides the current status of the manager from the perspective of another manager. + Status RaftMemberStatus `protobuf:"bytes,4,opt,name=status" json:"status"` +} + +func (m *RaftMember) Reset() { *m = RaftMember{} } +func (*RaftMember) ProtoMessage() {} +func (*RaftMember) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{0} } + +type JoinRequest struct { + // Addr specifies the address of the member + Addr string `protobuf:"bytes,1,opt,name=addr,proto3" json:"addr,omitempty"` +} + +func (m *JoinRequest) Reset() { *m = JoinRequest{} } +func (*JoinRequest) ProtoMessage() {} +func (*JoinRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{1} } + +type JoinResponse struct { + // RaftID is the ID assigned to the new member. + RaftID uint64 `protobuf:"varint,1,opt,name=raft_id,json=raftId,proto3" json:"raft_id,omitempty"` + // Members is the membership set of the cluster. + Members []*RaftMember `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` + // RemovedMembers is a list of members that have been removed from + // the cluster, so the new node can avoid communicating with them. + RemovedMembers []uint64 `protobuf:"varint,3,rep,name=removed_members,json=removedMembers" json:"removed_members,omitempty"` +} + +func (m *JoinResponse) Reset() { *m = JoinResponse{} } +func (*JoinResponse) ProtoMessage() {} +func (*JoinResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{2} } + +type LeaveRequest struct { + Node *RaftMember `protobuf:"bytes,1,opt,name=node" json:"node,omitempty"` +} + +func (m *LeaveRequest) Reset() { *m = LeaveRequest{} } +func (*LeaveRequest) ProtoMessage() {} +func (*LeaveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{3} } + +type LeaveResponse struct { +} + +func (m *LeaveResponse) Reset() { *m = LeaveResponse{} } +func (*LeaveResponse) ProtoMessage() {} +func (*LeaveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{4} } + +type ProcessRaftMessageRequest struct { + Message *raftpb.Message `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"` +} + +func (m *ProcessRaftMessageRequest) Reset() { *m = ProcessRaftMessageRequest{} } +func (*ProcessRaftMessageRequest) ProtoMessage() {} +func (*ProcessRaftMessageRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{5} } + +type ProcessRaftMessageResponse struct { +} + +func (m *ProcessRaftMessageResponse) Reset() { *m = ProcessRaftMessageResponse{} } +func (*ProcessRaftMessageResponse) ProtoMessage() {} +func (*ProcessRaftMessageResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{6} } + +// Raft message streaming request. +type StreamRaftMessageRequest struct { + Message *raftpb.Message `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"` +} + +func (m *StreamRaftMessageRequest) Reset() { *m = StreamRaftMessageRequest{} } +func (*StreamRaftMessageRequest) ProtoMessage() {} +func (*StreamRaftMessageRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{7} } + +// Raft message streaming response. +type StreamRaftMessageResponse struct { +} + +func (m *StreamRaftMessageResponse) Reset() { *m = StreamRaftMessageResponse{} } +func (*StreamRaftMessageResponse) ProtoMessage() {} +func (*StreamRaftMessageResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{8} } + +type ResolveAddressRequest struct { + // raft_id is the ID to resolve to an address. + RaftID uint64 `protobuf:"varint,1,opt,name=raft_id,json=raftId,proto3" json:"raft_id,omitempty"` +} + +func (m *ResolveAddressRequest) Reset() { *m = ResolveAddressRequest{} } +func (*ResolveAddressRequest) ProtoMessage() {} +func (*ResolveAddressRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{9} } + +type ResolveAddressResponse struct { + // Addr specifies the address of the member + Addr string `protobuf:"bytes,1,opt,name=addr,proto3" json:"addr,omitempty"` +} + +func (m *ResolveAddressResponse) Reset() { *m = ResolveAddressResponse{} } +func (*ResolveAddressResponse) ProtoMessage() {} +func (*ResolveAddressResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{10} } + +// Contains one of many protobuf encoded objects to replicate +// over the raft backend with a request ID to track when the +// action is effectively applied +type InternalRaftRequest struct { + ID uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Action []StoreAction `protobuf:"bytes,2,rep,name=action" json:"action"` +} + +func (m *InternalRaftRequest) Reset() { *m = InternalRaftRequest{} } +func (*InternalRaftRequest) ProtoMessage() {} +func (*InternalRaftRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{11} } + +// StoreAction defines a target and operation to apply on the storage system. +type StoreAction struct { + Action StoreActionKind `protobuf:"varint,1,opt,name=action,proto3,enum=docker.swarmkit.v1.StoreActionKind" json:"action,omitempty"` + // Types that are valid to be assigned to Target: + // *StoreAction_Node + // *StoreAction_Service + // *StoreAction_Task + // *StoreAction_Network + // *StoreAction_Cluster + // *StoreAction_Secret + // *StoreAction_Resource + // *StoreAction_Extension + // *StoreAction_Config + Target isStoreAction_Target `protobuf_oneof:"target"` +} + +func (m *StoreAction) Reset() { *m = StoreAction{} } +func (*StoreAction) ProtoMessage() {} +func (*StoreAction) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{12} } + +type isStoreAction_Target interface { + isStoreAction_Target() + MarshalTo([]byte) (int, error) + Size() int +} + +type StoreAction_Node struct { + Node *Node `protobuf:"bytes,2,opt,name=node,oneof"` +} +type StoreAction_Service struct { + Service *Service `protobuf:"bytes,3,opt,name=service,oneof"` +} +type StoreAction_Task struct { + Task *Task `protobuf:"bytes,4,opt,name=task,oneof"` +} +type StoreAction_Network struct { + Network *Network `protobuf:"bytes,5,opt,name=network,oneof"` +} +type StoreAction_Cluster struct { + Cluster *Cluster `protobuf:"bytes,6,opt,name=cluster,oneof"` +} +type StoreAction_Secret struct { + Secret *Secret `protobuf:"bytes,7,opt,name=secret,oneof"` +} +type StoreAction_Resource struct { + Resource *Resource `protobuf:"bytes,8,opt,name=resource,oneof"` +} +type StoreAction_Extension struct { + Extension *Extension `protobuf:"bytes,9,opt,name=extension,oneof"` +} +type StoreAction_Config struct { + Config *Config `protobuf:"bytes,10,opt,name=config,oneof"` +} + +func (*StoreAction_Node) isStoreAction_Target() {} +func (*StoreAction_Service) isStoreAction_Target() {} +func (*StoreAction_Task) isStoreAction_Target() {} +func (*StoreAction_Network) isStoreAction_Target() {} +func (*StoreAction_Cluster) isStoreAction_Target() {} +func (*StoreAction_Secret) isStoreAction_Target() {} +func (*StoreAction_Resource) isStoreAction_Target() {} +func (*StoreAction_Extension) isStoreAction_Target() {} +func (*StoreAction_Config) isStoreAction_Target() {} + +func (m *StoreAction) GetTarget() isStoreAction_Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *StoreAction) GetNode() *Node { + if x, ok := m.GetTarget().(*StoreAction_Node); ok { + return x.Node + } + return nil +} + +func (m *StoreAction) GetService() *Service { + if x, ok := m.GetTarget().(*StoreAction_Service); ok { + return x.Service + } + return nil +} + +func (m *StoreAction) GetTask() *Task { + if x, ok := m.GetTarget().(*StoreAction_Task); ok { + return x.Task + } + return nil +} + +func (m *StoreAction) GetNetwork() *Network { + if x, ok := m.GetTarget().(*StoreAction_Network); ok { + return x.Network + } + return nil +} + +func (m *StoreAction) GetCluster() *Cluster { + if x, ok := m.GetTarget().(*StoreAction_Cluster); ok { + return x.Cluster + } + return nil +} + +func (m *StoreAction) GetSecret() *Secret { + if x, ok := m.GetTarget().(*StoreAction_Secret); ok { + return x.Secret + } + return nil +} + +func (m *StoreAction) GetResource() *Resource { + if x, ok := m.GetTarget().(*StoreAction_Resource); ok { + return x.Resource + } + return nil +} + +func (m *StoreAction) GetExtension() *Extension { + if x, ok := m.GetTarget().(*StoreAction_Extension); ok { + return x.Extension + } + return nil +} + +func (m *StoreAction) GetConfig() *Config { + if x, ok := m.GetTarget().(*StoreAction_Config); ok { + return x.Config + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*StoreAction) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _StoreAction_OneofMarshaler, _StoreAction_OneofUnmarshaler, _StoreAction_OneofSizer, []interface{}{ + (*StoreAction_Node)(nil), + (*StoreAction_Service)(nil), + (*StoreAction_Task)(nil), + (*StoreAction_Network)(nil), + (*StoreAction_Cluster)(nil), + (*StoreAction_Secret)(nil), + (*StoreAction_Resource)(nil), + (*StoreAction_Extension)(nil), + (*StoreAction_Config)(nil), + } +} + +func _StoreAction_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*StoreAction) + // target + switch x := m.Target.(type) { + case *StoreAction_Node: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Node); err != nil { + return err + } + case *StoreAction_Service: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Service); err != nil { + return err + } + case *StoreAction_Task: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Task); err != nil { + return err + } + case *StoreAction_Network: + _ = b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Network); err != nil { + return err + } + case *StoreAction_Cluster: + _ = b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Cluster); err != nil { + return err + } + case *StoreAction_Secret: + _ = b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Secret); err != nil { + return err + } + case *StoreAction_Resource: + _ = b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Resource); err != nil { + return err + } + case *StoreAction_Extension: + _ = b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Extension); err != nil { + return err + } + case *StoreAction_Config: + _ = b.EncodeVarint(10<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Config); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("StoreAction.Target has unexpected type %T", x) + } + return nil +} + +func _StoreAction_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*StoreAction) + switch tag { + case 2: // target.node + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Node) + err := b.DecodeMessage(msg) + m.Target = &StoreAction_Node{msg} + return true, err + case 3: // target.service + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Service) + err := b.DecodeMessage(msg) + m.Target = &StoreAction_Service{msg} + return true, err + case 4: // target.task + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Task) + err := b.DecodeMessage(msg) + m.Target = &StoreAction_Task{msg} + return true, err + case 5: // target.network + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Network) + err := b.DecodeMessage(msg) + m.Target = &StoreAction_Network{msg} + return true, err + case 6: // target.cluster + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Cluster) + err := b.DecodeMessage(msg) + m.Target = &StoreAction_Cluster{msg} + return true, err + case 7: // target.secret + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Secret) + err := b.DecodeMessage(msg) + m.Target = &StoreAction_Secret{msg} + return true, err + case 8: // target.resource + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Resource) + err := b.DecodeMessage(msg) + m.Target = &StoreAction_Resource{msg} + return true, err + case 9: // target.extension + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Extension) + err := b.DecodeMessage(msg) + m.Target = &StoreAction_Extension{msg} + return true, err + case 10: // target.config + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Config) + err := b.DecodeMessage(msg) + m.Target = &StoreAction_Config{msg} + return true, err + default: + return false, nil + } +} + +func _StoreAction_OneofSizer(msg proto.Message) (n int) { + m := msg.(*StoreAction) + // target + switch x := m.Target.(type) { + case *StoreAction_Node: + s := proto.Size(x.Node) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StoreAction_Service: + s := proto.Size(x.Service) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StoreAction_Task: + s := proto.Size(x.Task) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StoreAction_Network: + s := proto.Size(x.Network) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StoreAction_Cluster: + s := proto.Size(x.Cluster) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StoreAction_Secret: + s := proto.Size(x.Secret) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StoreAction_Resource: + s := proto.Size(x.Resource) + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StoreAction_Extension: + s := proto.Size(x.Extension) + n += proto.SizeVarint(9<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StoreAction_Config: + s := proto.Size(x.Config) + n += proto.SizeVarint(10<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +func init() { + proto.RegisterType((*RaftMember)(nil), "docker.swarmkit.v1.RaftMember") + proto.RegisterType((*JoinRequest)(nil), "docker.swarmkit.v1.JoinRequest") + proto.RegisterType((*JoinResponse)(nil), "docker.swarmkit.v1.JoinResponse") + proto.RegisterType((*LeaveRequest)(nil), "docker.swarmkit.v1.LeaveRequest") + proto.RegisterType((*LeaveResponse)(nil), "docker.swarmkit.v1.LeaveResponse") + proto.RegisterType((*ProcessRaftMessageRequest)(nil), "docker.swarmkit.v1.ProcessRaftMessageRequest") + proto.RegisterType((*ProcessRaftMessageResponse)(nil), "docker.swarmkit.v1.ProcessRaftMessageResponse") + proto.RegisterType((*StreamRaftMessageRequest)(nil), "docker.swarmkit.v1.StreamRaftMessageRequest") + proto.RegisterType((*StreamRaftMessageResponse)(nil), "docker.swarmkit.v1.StreamRaftMessageResponse") + proto.RegisterType((*ResolveAddressRequest)(nil), "docker.swarmkit.v1.ResolveAddressRequest") + proto.RegisterType((*ResolveAddressResponse)(nil), "docker.swarmkit.v1.ResolveAddressResponse") + proto.RegisterType((*InternalRaftRequest)(nil), "docker.swarmkit.v1.InternalRaftRequest") + proto.RegisterType((*StoreAction)(nil), "docker.swarmkit.v1.StoreAction") + proto.RegisterEnum("docker.swarmkit.v1.StoreActionKind", StoreActionKind_name, StoreActionKind_value) +} + +type authenticatedWrapperRaftServer struct { + local RaftServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperRaftServer(local RaftServer, authorize func(context.Context, []string) error) RaftServer { + return &authenticatedWrapperRaftServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperRaftServer) ProcessRaftMessage(ctx context.Context, r *ProcessRaftMessageRequest) (*ProcessRaftMessageResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ProcessRaftMessage(ctx, r) +} + +func (p *authenticatedWrapperRaftServer) StreamRaftMessage(stream Raft_StreamRaftMessageServer) error { + + if err := p.authorize(stream.Context(), []string{"swarm-manager"}); err != nil { + return err + } + return p.local.StreamRaftMessage(stream) +} + +func (p *authenticatedWrapperRaftServer) ResolveAddress(ctx context.Context, r *ResolveAddressRequest) (*ResolveAddressResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ResolveAddress(ctx, r) +} + +type authenticatedWrapperRaftMembershipServer struct { + local RaftMembershipServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperRaftMembershipServer(local RaftMembershipServer, authorize func(context.Context, []string) error) RaftMembershipServer { + return &authenticatedWrapperRaftMembershipServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperRaftMembershipServer) Join(ctx context.Context, r *JoinRequest) (*JoinResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.Join(ctx, r) +} + +func (p *authenticatedWrapperRaftMembershipServer) Leave(ctx context.Context, r *LeaveRequest) (*LeaveResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.Leave(ctx, r) +} + +func (m *RaftMember) Copy() *RaftMember { + if m == nil { + return nil + } + o := &RaftMember{} + o.CopyFrom(m) + return o +} + +func (m *RaftMember) CopyFrom(src interface{}) { + + o := src.(*RaftMember) + *m = *o + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Status, &o.Status) +} + +func (m *JoinRequest) Copy() *JoinRequest { + if m == nil { + return nil + } + o := &JoinRequest{} + o.CopyFrom(m) + return o +} + +func (m *JoinRequest) CopyFrom(src interface{}) { + + o := src.(*JoinRequest) + *m = *o +} + +func (m *JoinResponse) Copy() *JoinResponse { + if m == nil { + return nil + } + o := &JoinResponse{} + o.CopyFrom(m) + return o +} + +func (m *JoinResponse) CopyFrom(src interface{}) { + + o := src.(*JoinResponse) + *m = *o + if o.Members != nil { + m.Members = make([]*RaftMember, len(o.Members)) + for i := range m.Members { + m.Members[i] = &RaftMember{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Members[i], o.Members[i]) + } + } + + if o.RemovedMembers != nil { + m.RemovedMembers = make([]uint64, len(o.RemovedMembers)) + copy(m.RemovedMembers, o.RemovedMembers) + } + +} + +func (m *LeaveRequest) Copy() *LeaveRequest { + if m == nil { + return nil + } + o := &LeaveRequest{} + o.CopyFrom(m) + return o +} + +func (m *LeaveRequest) CopyFrom(src interface{}) { + + o := src.(*LeaveRequest) + *m = *o + if o.Node != nil { + m.Node = &RaftMember{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Node, o.Node) + } +} + +func (m *LeaveResponse) Copy() *LeaveResponse { + if m == nil { + return nil + } + o := &LeaveResponse{} + o.CopyFrom(m) + return o +} + +func (m *LeaveResponse) CopyFrom(src interface{}) {} +func (m *ProcessRaftMessageResponse) Copy() *ProcessRaftMessageResponse { + if m == nil { + return nil + } + o := &ProcessRaftMessageResponse{} + o.CopyFrom(m) + return o +} + +func (m *ProcessRaftMessageResponse) CopyFrom(src interface{}) {} +func (m *StreamRaftMessageResponse) Copy() *StreamRaftMessageResponse { + if m == nil { + return nil + } + o := &StreamRaftMessageResponse{} + o.CopyFrom(m) + return o +} + +func (m *StreamRaftMessageResponse) CopyFrom(src interface{}) {} +func (m *ResolveAddressRequest) Copy() *ResolveAddressRequest { + if m == nil { + return nil + } + o := &ResolveAddressRequest{} + o.CopyFrom(m) + return o +} + +func (m *ResolveAddressRequest) CopyFrom(src interface{}) { + + o := src.(*ResolveAddressRequest) + *m = *o +} + +func (m *ResolveAddressResponse) Copy() *ResolveAddressResponse { + if m == nil { + return nil + } + o := &ResolveAddressResponse{} + o.CopyFrom(m) + return o +} + +func (m *ResolveAddressResponse) CopyFrom(src interface{}) { + + o := src.(*ResolveAddressResponse) + *m = *o +} + +func (m *InternalRaftRequest) Copy() *InternalRaftRequest { + if m == nil { + return nil + } + o := &InternalRaftRequest{} + o.CopyFrom(m) + return o +} + +func (m *InternalRaftRequest) CopyFrom(src interface{}) { + + o := src.(*InternalRaftRequest) + *m = *o + if o.Action != nil { + m.Action = make([]StoreAction, len(o.Action)) + for i := range m.Action { + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Action[i], &o.Action[i]) + } + } + +} + +func (m *StoreAction) Copy() *StoreAction { + if m == nil { + return nil + } + o := &StoreAction{} + o.CopyFrom(m) + return o +} + +func (m *StoreAction) CopyFrom(src interface{}) { + + o := src.(*StoreAction) + *m = *o + if o.Target != nil { + switch o.Target.(type) { + case *StoreAction_Node: + v := StoreAction_Node{ + Node: &Node{}, + } + github_com_docker_swarmkit_api_deepcopy.Copy(v.Node, o.GetNode()) + m.Target = &v + case *StoreAction_Service: + v := StoreAction_Service{ + Service: &Service{}, + } + github_com_docker_swarmkit_api_deepcopy.Copy(v.Service, o.GetService()) + m.Target = &v + case *StoreAction_Task: + v := StoreAction_Task{ + Task: &Task{}, + } + github_com_docker_swarmkit_api_deepcopy.Copy(v.Task, o.GetTask()) + m.Target = &v + case *StoreAction_Network: + v := StoreAction_Network{ + Network: &Network{}, + } + github_com_docker_swarmkit_api_deepcopy.Copy(v.Network, o.GetNetwork()) + m.Target = &v + case *StoreAction_Cluster: + v := StoreAction_Cluster{ + Cluster: &Cluster{}, + } + github_com_docker_swarmkit_api_deepcopy.Copy(v.Cluster, o.GetCluster()) + m.Target = &v + case *StoreAction_Secret: + v := StoreAction_Secret{ + Secret: &Secret{}, + } + github_com_docker_swarmkit_api_deepcopy.Copy(v.Secret, o.GetSecret()) + m.Target = &v + case *StoreAction_Resource: + v := StoreAction_Resource{ + Resource: &Resource{}, + } + github_com_docker_swarmkit_api_deepcopy.Copy(v.Resource, o.GetResource()) + m.Target = &v + case *StoreAction_Extension: + v := StoreAction_Extension{ + Extension: &Extension{}, + } + github_com_docker_swarmkit_api_deepcopy.Copy(v.Extension, o.GetExtension()) + m.Target = &v + case *StoreAction_Config: + v := StoreAction_Config{ + Config: &Config{}, + } + github_com_docker_swarmkit_api_deepcopy.Copy(v.Config, o.GetConfig()) + m.Target = &v + } + } + +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Raft service + +type RaftClient interface { + // ProcessRaftMessage sends a raft message to be processed on a raft member, it is + // called from the RaftMember willing to send a message to its destination ('To' field) + ProcessRaftMessage(ctx context.Context, in *ProcessRaftMessageRequest, opts ...grpc.CallOption) (*ProcessRaftMessageResponse, error) + // StreamRaftMessage accepts a stream of raft messages of type StreamRaftMessageRequest + // to be processed on a raft member, returning a StreamRaftMessageResponse + // when processing of the streamed messages is complete. A single stream corresponds + // to a single raft message, which may be disassembled and streamed as individual messages. + // It is called from the Raft leader, which uses it to stream messages to a raft member. + StreamRaftMessage(ctx context.Context, opts ...grpc.CallOption) (Raft_StreamRaftMessageClient, error) + // ResolveAddress returns the address where the node with the given ID can be reached. + ResolveAddress(ctx context.Context, in *ResolveAddressRequest, opts ...grpc.CallOption) (*ResolveAddressResponse, error) +} + +type raftClient struct { + cc *grpc.ClientConn +} + +func NewRaftClient(cc *grpc.ClientConn) RaftClient { + return &raftClient{cc} +} + +func (c *raftClient) ProcessRaftMessage(ctx context.Context, in *ProcessRaftMessageRequest, opts ...grpc.CallOption) (*ProcessRaftMessageResponse, error) { + out := new(ProcessRaftMessageResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Raft/ProcessRaftMessage", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *raftClient) StreamRaftMessage(ctx context.Context, opts ...grpc.CallOption) (Raft_StreamRaftMessageClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Raft_serviceDesc.Streams[0], c.cc, "/docker.swarmkit.v1.Raft/StreamRaftMessage", opts...) + if err != nil { + return nil, err + } + x := &raftStreamRaftMessageClient{stream} + return x, nil +} + +type Raft_StreamRaftMessageClient interface { + Send(*StreamRaftMessageRequest) error + CloseAndRecv() (*StreamRaftMessageResponse, error) + grpc.ClientStream +} + +type raftStreamRaftMessageClient struct { + grpc.ClientStream +} + +func (x *raftStreamRaftMessageClient) Send(m *StreamRaftMessageRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *raftStreamRaftMessageClient) CloseAndRecv() (*StreamRaftMessageResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(StreamRaftMessageResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *raftClient) ResolveAddress(ctx context.Context, in *ResolveAddressRequest, opts ...grpc.CallOption) (*ResolveAddressResponse, error) { + out := new(ResolveAddressResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Raft/ResolveAddress", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Raft service + +type RaftServer interface { + // ProcessRaftMessage sends a raft message to be processed on a raft member, it is + // called from the RaftMember willing to send a message to its destination ('To' field) + ProcessRaftMessage(context.Context, *ProcessRaftMessageRequest) (*ProcessRaftMessageResponse, error) + // StreamRaftMessage accepts a stream of raft messages of type StreamRaftMessageRequest + // to be processed on a raft member, returning a StreamRaftMessageResponse + // when processing of the streamed messages is complete. A single stream corresponds + // to a single raft message, which may be disassembled and streamed as individual messages. + // It is called from the Raft leader, which uses it to stream messages to a raft member. + StreamRaftMessage(Raft_StreamRaftMessageServer) error + // ResolveAddress returns the address where the node with the given ID can be reached. + ResolveAddress(context.Context, *ResolveAddressRequest) (*ResolveAddressResponse, error) +} + +func RegisterRaftServer(s *grpc.Server, srv RaftServer) { + s.RegisterService(&_Raft_serviceDesc, srv) +} + +func _Raft_ProcessRaftMessage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ProcessRaftMessageRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RaftServer).ProcessRaftMessage(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Raft/ProcessRaftMessage", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RaftServer).ProcessRaftMessage(ctx, req.(*ProcessRaftMessageRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Raft_StreamRaftMessage_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(RaftServer).StreamRaftMessage(&raftStreamRaftMessageServer{stream}) +} + +type Raft_StreamRaftMessageServer interface { + SendAndClose(*StreamRaftMessageResponse) error + Recv() (*StreamRaftMessageRequest, error) + grpc.ServerStream +} + +type raftStreamRaftMessageServer struct { + grpc.ServerStream +} + +func (x *raftStreamRaftMessageServer) SendAndClose(m *StreamRaftMessageResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *raftStreamRaftMessageServer) Recv() (*StreamRaftMessageRequest, error) { + m := new(StreamRaftMessageRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Raft_ResolveAddress_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ResolveAddressRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RaftServer).ResolveAddress(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Raft/ResolveAddress", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RaftServer).ResolveAddress(ctx, req.(*ResolveAddressRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Raft_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.Raft", + HandlerType: (*RaftServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ProcessRaftMessage", + Handler: _Raft_ProcessRaftMessage_Handler, + }, + { + MethodName: "ResolveAddress", + Handler: _Raft_ResolveAddress_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamRaftMessage", + Handler: _Raft_StreamRaftMessage_Handler, + ClientStreams: true, + }, + }, + Metadata: "github.com/docker/swarmkit/api/raft.proto", +} + +// Client API for RaftMembership service + +type RaftMembershipClient interface { + // Join adds a RaftMember to the raft cluster. + Join(ctx context.Context, in *JoinRequest, opts ...grpc.CallOption) (*JoinResponse, error) + // Leave removes a RaftMember from the raft cluster. + Leave(ctx context.Context, in *LeaveRequest, opts ...grpc.CallOption) (*LeaveResponse, error) +} + +type raftMembershipClient struct { + cc *grpc.ClientConn +} + +func NewRaftMembershipClient(cc *grpc.ClientConn) RaftMembershipClient { + return &raftMembershipClient{cc} +} + +func (c *raftMembershipClient) Join(ctx context.Context, in *JoinRequest, opts ...grpc.CallOption) (*JoinResponse, error) { + out := new(JoinResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.RaftMembership/Join", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *raftMembershipClient) Leave(ctx context.Context, in *LeaveRequest, opts ...grpc.CallOption) (*LeaveResponse, error) { + out := new(LeaveResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.RaftMembership/Leave", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for RaftMembership service + +type RaftMembershipServer interface { + // Join adds a RaftMember to the raft cluster. + Join(context.Context, *JoinRequest) (*JoinResponse, error) + // Leave removes a RaftMember from the raft cluster. + Leave(context.Context, *LeaveRequest) (*LeaveResponse, error) +} + +func RegisterRaftMembershipServer(s *grpc.Server, srv RaftMembershipServer) { + s.RegisterService(&_RaftMembership_serviceDesc, srv) +} + +func _RaftMembership_Join_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(JoinRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RaftMembershipServer).Join(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.RaftMembership/Join", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RaftMembershipServer).Join(ctx, req.(*JoinRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RaftMembership_Leave_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RaftMembershipServer).Leave(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.RaftMembership/Leave", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RaftMembershipServer).Leave(ctx, req.(*LeaveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _RaftMembership_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.RaftMembership", + HandlerType: (*RaftMembershipServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Join", + Handler: _RaftMembership_Join_Handler, + }, + { + MethodName: "Leave", + Handler: _RaftMembership_Leave_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/docker/swarmkit/api/raft.proto", +} + +func (m *RaftMember) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RaftMember) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RaftID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.RaftID)) + } + if len(m.NodeID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + } + if len(m.Addr) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Addr))) + i += copy(dAtA[i:], m.Addr) + } + dAtA[i] = 0x22 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Status.Size())) + n1, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + return i, nil +} + +func (m *JoinRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JoinRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Addr) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Addr))) + i += copy(dAtA[i:], m.Addr) + } + return i, nil +} + +func (m *JoinResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JoinResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RaftID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.RaftID)) + } + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaft(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.RemovedMembers) > 0 { + for _, num := range m.RemovedMembers { + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(num)) + } + } + return i, nil +} + +func (m *LeaveRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaveRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Node != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Node.Size())) + n2, err := m.Node.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func (m *LeaveResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaveResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *ProcessRaftMessageRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProcessRaftMessageRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Message != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Message.Size())) + n3, err := m.Message.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + return i, nil +} + +func (m *ProcessRaftMessageResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProcessRaftMessageResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *StreamRaftMessageRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StreamRaftMessageRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Message != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Message.Size())) + n4, err := m.Message.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + return i, nil +} + +func (m *StreamRaftMessageResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StreamRaftMessageResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *ResolveAddressRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResolveAddressRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RaftID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.RaftID)) + } + return i, nil +} + +func (m *ResolveAddressResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResolveAddressResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Addr) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Addr))) + i += copy(dAtA[i:], m.Addr) + } + return i, nil +} + +func (m *InternalRaftRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InternalRaftRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.ID)) + } + if len(m.Action) > 0 { + for _, msg := range m.Action { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaft(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *StoreAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StoreAction) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Action != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Action)) + } + if m.Target != nil { + nn5, err := m.Target.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn5 + } + return i, nil +} + +func (m *StoreAction_Node) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Node != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Node.Size())) + n6, err := m.Node.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} +func (m *StoreAction_Service) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Service != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Service.Size())) + n7, err := m.Service.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} +func (m *StoreAction_Task) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Task != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Task.Size())) + n8, err := m.Task.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} +func (m *StoreAction_Network) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Network != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Network.Size())) + n9, err := m.Network.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} +func (m *StoreAction_Cluster) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Cluster != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Cluster.Size())) + n10, err := m.Cluster.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + return i, nil +} +func (m *StoreAction_Secret) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Secret != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Secret.Size())) + n11, err := m.Secret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + return i, nil +} +func (m *StoreAction_Resource) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Resource != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Resource.Size())) + n12, err := m.Resource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + return i, nil +} +func (m *StoreAction_Extension) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Extension != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Extension.Size())) + n13, err := m.Extension.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + return i, nil +} +func (m *StoreAction_Config) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Config != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Config.Size())) + n14, err := m.Config.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + return i, nil +} +func encodeFixed64Raft(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Raft(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintRaft(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +type raftProxyRaftServer struct { + local RaftServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyRaftServer(local RaftServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) RaftServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + s, ok := transport.StreamFromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := s.ServerTransport().RemoteAddr().String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyRaftServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyRaftServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyRaftServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +func (p *raftProxyRaftServer) ProcessRaftMessage(ctx context.Context, r *ProcessRaftMessageRequest) (*ProcessRaftMessageResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ProcessRaftMessage(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewRaftClient(conn).ProcessRaftMessage(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ProcessRaftMessage(ctx, r) + } + return nil, err + } + return NewRaftClient(conn).ProcessRaftMessage(modCtx, r) + } + return resp, err +} + +type Raft_StreamRaftMessageServerWrapper struct { + Raft_StreamRaftMessageServer + ctx context.Context +} + +func (s Raft_StreamRaftMessageServerWrapper) Context() context.Context { + return s.ctx +} + +func (p *raftProxyRaftServer) StreamRaftMessage(stream Raft_StreamRaftMessageServer) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := Raft_StreamRaftMessageServerWrapper{ + Raft_StreamRaftMessageServer: stream, + ctx: ctx, + } + return p.local.StreamRaftMessage(streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + } + clientStream, err := NewRaftClient(conn).StreamRaftMessage(ctx) + + if err != nil { + return err + } + + for { + msg, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := clientStream.Send(msg); err != nil { + return err + } + } + + reply, err := clientStream.CloseAndRecv() + if err != nil { + return err + } + + return stream.SendAndClose(reply) +} + +func (p *raftProxyRaftServer) ResolveAddress(ctx context.Context, r *ResolveAddressRequest) (*ResolveAddressResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ResolveAddress(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewRaftClient(conn).ResolveAddress(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ResolveAddress(ctx, r) + } + return nil, err + } + return NewRaftClient(conn).ResolveAddress(modCtx, r) + } + return resp, err +} + +type raftProxyRaftMembershipServer struct { + local RaftMembershipServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyRaftMembershipServer(local RaftMembershipServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) RaftMembershipServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + s, ok := transport.StreamFromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := s.ServerTransport().RemoteAddr().String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyRaftMembershipServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyRaftMembershipServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyRaftMembershipServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +func (p *raftProxyRaftMembershipServer) Join(ctx context.Context, r *JoinRequest) (*JoinResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.Join(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewRaftMembershipClient(conn).Join(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.Join(ctx, r) + } + return nil, err + } + return NewRaftMembershipClient(conn).Join(modCtx, r) + } + return resp, err +} + +func (p *raftProxyRaftMembershipServer) Leave(ctx context.Context, r *LeaveRequest) (*LeaveResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.Leave(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewRaftMembershipClient(conn).Leave(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.Leave(ctx, r) + } + return nil, err + } + return NewRaftMembershipClient(conn).Leave(modCtx, r) + } + return resp, err +} + +func (m *RaftMember) Size() (n int) { + var l int + _ = l + if m.RaftID != 0 { + n += 1 + sovRaft(uint64(m.RaftID)) + } + l = len(m.NodeID) + if l > 0 { + n += 1 + l + sovRaft(uint64(l)) + } + l = len(m.Addr) + if l > 0 { + n += 1 + l + sovRaft(uint64(l)) + } + l = m.Status.Size() + n += 1 + l + sovRaft(uint64(l)) + return n +} + +func (m *JoinRequest) Size() (n int) { + var l int + _ = l + l = len(m.Addr) + if l > 0 { + n += 1 + l + sovRaft(uint64(l)) + } + return n +} + +func (m *JoinResponse) Size() (n int) { + var l int + _ = l + if m.RaftID != 0 { + n += 1 + sovRaft(uint64(m.RaftID)) + } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRaft(uint64(l)) + } + } + if len(m.RemovedMembers) > 0 { + for _, e := range m.RemovedMembers { + n += 1 + sovRaft(uint64(e)) + } + } + return n +} + +func (m *LeaveRequest) Size() (n int) { + var l int + _ = l + if m.Node != nil { + l = m.Node.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} + +func (m *LeaveResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *ProcessRaftMessageRequest) Size() (n int) { + var l int + _ = l + if m.Message != nil { + l = m.Message.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} + +func (m *ProcessRaftMessageResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *StreamRaftMessageRequest) Size() (n int) { + var l int + _ = l + if m.Message != nil { + l = m.Message.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} + +func (m *StreamRaftMessageResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *ResolveAddressRequest) Size() (n int) { + var l int + _ = l + if m.RaftID != 0 { + n += 1 + sovRaft(uint64(m.RaftID)) + } + return n +} + +func (m *ResolveAddressResponse) Size() (n int) { + var l int + _ = l + l = len(m.Addr) + if l > 0 { + n += 1 + l + sovRaft(uint64(l)) + } + return n +} + +func (m *InternalRaftRequest) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRaft(uint64(m.ID)) + } + if len(m.Action) > 0 { + for _, e := range m.Action { + l = e.Size() + n += 1 + l + sovRaft(uint64(l)) + } + } + return n +} + +func (m *StoreAction) Size() (n int) { + var l int + _ = l + if m.Action != 0 { + n += 1 + sovRaft(uint64(m.Action)) + } + if m.Target != nil { + n += m.Target.Size() + } + return n +} + +func (m *StoreAction_Node) Size() (n int) { + var l int + _ = l + if m.Node != nil { + l = m.Node.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} +func (m *StoreAction_Service) Size() (n int) { + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} +func (m *StoreAction_Task) Size() (n int) { + var l int + _ = l + if m.Task != nil { + l = m.Task.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} +func (m *StoreAction_Network) Size() (n int) { + var l int + _ = l + if m.Network != nil { + l = m.Network.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} +func (m *StoreAction_Cluster) Size() (n int) { + var l int + _ = l + if m.Cluster != nil { + l = m.Cluster.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} +func (m *StoreAction_Secret) Size() (n int) { + var l int + _ = l + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} +func (m *StoreAction_Resource) Size() (n int) { + var l int + _ = l + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} +func (m *StoreAction_Extension) Size() (n int) { + var l int + _ = l + if m.Extension != nil { + l = m.Extension.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} +func (m *StoreAction_Config) Size() (n int) { + var l int + _ = l + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} + +func sovRaft(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozRaft(x uint64) (n int) { + return sovRaft(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *RaftMember) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RaftMember{`, + `RaftID:` + fmt.Sprintf("%v", this.RaftID) + `,`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `Addr:` + fmt.Sprintf("%v", this.Addr) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "RaftMemberStatus", "RaftMemberStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *JoinRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JoinRequest{`, + `Addr:` + fmt.Sprintf("%v", this.Addr) + `,`, + `}`, + }, "") + return s +} +func (this *JoinResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JoinResponse{`, + `RaftID:` + fmt.Sprintf("%v", this.RaftID) + `,`, + `Members:` + strings.Replace(fmt.Sprintf("%v", this.Members), "RaftMember", "RaftMember", 1) + `,`, + `RemovedMembers:` + fmt.Sprintf("%v", this.RemovedMembers) + `,`, + `}`, + }, "") + return s +} +func (this *LeaveRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LeaveRequest{`, + `Node:` + strings.Replace(fmt.Sprintf("%v", this.Node), "RaftMember", "RaftMember", 1) + `,`, + `}`, + }, "") + return s +} +func (this *LeaveResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LeaveResponse{`, + `}`, + }, "") + return s +} +func (this *ProcessRaftMessageRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProcessRaftMessageRequest{`, + `Message:` + strings.Replace(fmt.Sprintf("%v", this.Message), "Message", "raftpb.Message", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ProcessRaftMessageResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProcessRaftMessageResponse{`, + `}`, + }, "") + return s +} +func (this *StreamRaftMessageRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StreamRaftMessageRequest{`, + `Message:` + strings.Replace(fmt.Sprintf("%v", this.Message), "Message", "raftpb.Message", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StreamRaftMessageResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StreamRaftMessageResponse{`, + `}`, + }, "") + return s +} +func (this *ResolveAddressRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResolveAddressRequest{`, + `RaftID:` + fmt.Sprintf("%v", this.RaftID) + `,`, + `}`, + }, "") + return s +} +func (this *ResolveAddressResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResolveAddressResponse{`, + `Addr:` + fmt.Sprintf("%v", this.Addr) + `,`, + `}`, + }, "") + return s +} +func (this *InternalRaftRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&InternalRaftRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Action:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Action), "StoreAction", "StoreAction", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction{`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `Target:` + fmt.Sprintf("%v", this.Target) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction_Node) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction_Node{`, + `Node:` + strings.Replace(fmt.Sprintf("%v", this.Node), "Node", "Node", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction_Service) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction_Service{`, + `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "Service", "Service", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction_Task) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction_Task{`, + `Task:` + strings.Replace(fmt.Sprintf("%v", this.Task), "Task", "Task", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction_Network) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction_Network{`, + `Network:` + strings.Replace(fmt.Sprintf("%v", this.Network), "Network", "Network", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction_Cluster) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction_Cluster{`, + `Cluster:` + strings.Replace(fmt.Sprintf("%v", this.Cluster), "Cluster", "Cluster", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction_Secret) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction_Secret{`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "Secret", "Secret", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction_Resource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction_Resource{`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "Resource", "Resource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction_Extension) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction_Extension{`, + `Extension:` + strings.Replace(fmt.Sprintf("%v", this.Extension), "Extension", "Extension", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction_Config) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction_Config{`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "Config", "Config", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringRaft(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *RaftMember) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RaftMember: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RaftMember: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftID", wireType) + } + m.RaftID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JoinRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JoinRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JoinRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JoinResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JoinResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JoinResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftID", wireType) + } + m.RaftID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &RaftMember{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RemovedMembers = append(m.RemovedMembers, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RemovedMembers = append(m.RemovedMembers, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field RemovedMembers", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaveRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaveRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaveRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Node == nil { + m.Node = &RaftMember{} + } + if err := m.Node.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaveResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaveResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaveResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProcessRaftMessageRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProcessRaftMessageRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProcessRaftMessageRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Message == nil { + m.Message = &raftpb.Message{} + } + if err := m.Message.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProcessRaftMessageResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProcessRaftMessageResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProcessRaftMessageResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StreamRaftMessageRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StreamRaftMessageRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StreamRaftMessageRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Message == nil { + m.Message = &raftpb.Message{} + } + if err := m.Message.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StreamRaftMessageResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StreamRaftMessageResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StreamRaftMessageResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResolveAddressRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResolveAddressRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResolveAddressRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftID", wireType) + } + m.RaftID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResolveAddressResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResolveAddressResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResolveAddressResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InternalRaftRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InternalRaftRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Action = append(m.Action, StoreAction{}) + if err := m.Action[len(m.Action)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StoreAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StoreAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StoreAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + m.Action = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Action |= (StoreActionKind(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Node{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &StoreAction_Node{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Service{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &StoreAction_Service{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Task{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &StoreAction_Task{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Network{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &StoreAction_Network{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Cluster{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &StoreAction_Cluster{v} + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Secret{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &StoreAction_Secret{v} + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Resource{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &StoreAction_Resource{v} + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extension", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Extension{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &StoreAction_Extension{v} + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Config{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &StoreAction_Config{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRaft(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthRaft + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipRaft(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthRaft = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRaft = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("github.com/docker/swarmkit/api/raft.proto", fileDescriptorRaft) } + +var fileDescriptorRaft = []byte{ + // 1015 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x96, 0xc1, 0x6e, 0x1b, 0x45, + 0x18, 0xc7, 0x77, 0xed, 0xad, 0xd3, 0x7c, 0x69, 0x93, 0x30, 0x25, 0x61, 0xb3, 0x2d, 0x8e, 0xbb, + 0x45, 0xc2, 0x09, 0xc9, 0x5a, 0x18, 0xa4, 0xa2, 0x42, 0x0f, 0x71, 0x62, 0x29, 0x26, 0xad, 0x53, + 0x6d, 0x12, 0xe8, 0x2d, 0xac, 0x77, 0x27, 0xee, 0x62, 0x7b, 0xc7, 0xcc, 0x8c, 0x1d, 0xb8, 0xa0, + 0x1e, 0x21, 0x2f, 0x00, 0x42, 0xaa, 0x38, 0xc0, 0xb9, 0x0f, 0xc0, 0x03, 0xa0, 0x88, 0x13, 0x37, + 0x38, 0x45, 0xd4, 0x0f, 0x00, 0xaf, 0x80, 0x66, 0x76, 0xd7, 0x31, 0xf6, 0xda, 0xf1, 0x81, 0x4b, + 0x32, 0xda, 0xf9, 0xfd, 0xbf, 0xff, 0x37, 0x33, 0xdf, 0x7c, 0x63, 0x58, 0xab, 0xfb, 0xfc, 0x59, + 0xa7, 0x66, 0xb9, 0xa4, 0x55, 0xf0, 0x88, 0xdb, 0xc0, 0xb4, 0xc0, 0x4e, 0x1d, 0xda, 0x6a, 0xf8, + 0xbc, 0xe0, 0xb4, 0xfd, 0x02, 0x75, 0x4e, 0xb8, 0xd5, 0xa6, 0x84, 0x13, 0x84, 0xc2, 0x79, 0x2b, + 0x9e, 0xb7, 0xba, 0xef, 0x1a, 0x1b, 0x57, 0xc8, 0x49, 0xed, 0x73, 0xec, 0x72, 0x16, 0x46, 0x30, + 0xd6, 0xaf, 0xa0, 0xf9, 0x57, 0x6d, 0x1c, 0xb3, 0x9b, 0x03, 0xac, 0x4b, 0x28, 0x26, 0xac, 0x80, + 0xb9, 0xeb, 0xc9, 0x84, 0xe4, 0x9f, 0x76, 0x6d, 0x20, 0x39, 0xe3, 0xf5, 0x3a, 0xa9, 0x13, 0x39, + 0x2c, 0x88, 0x51, 0xf4, 0xf5, 0xfe, 0x04, 0x43, 0x49, 0xd4, 0x3a, 0x27, 0x85, 0x76, 0xb3, 0x53, + 0xf7, 0x83, 0xe8, 0x5f, 0x28, 0x34, 0x5f, 0xaa, 0x00, 0xb6, 0x73, 0xc2, 0x1f, 0xe3, 0x56, 0x0d, + 0x53, 0x74, 0x0f, 0x66, 0x84, 0xd7, 0xb1, 0xef, 0xe9, 0x6a, 0x4e, 0xcd, 0x6b, 0x25, 0xe8, 0x5d, + 0xac, 0x66, 0x04, 0x50, 0xd9, 0xb1, 0x33, 0x62, 0xaa, 0xe2, 0x09, 0x28, 0x20, 0x1e, 0x16, 0x50, + 0x2a, 0xa7, 0xe6, 0x67, 0x43, 0xa8, 0x4a, 0x3c, 0x2c, 0x20, 0x31, 0x55, 0xf1, 0x10, 0x02, 0xcd, + 0xf1, 0x3c, 0xaa, 0xa7, 0x05, 0x61, 0xcb, 0x31, 0x2a, 0x41, 0x86, 0x71, 0x87, 0x77, 0x98, 0xae, + 0xe5, 0xd4, 0xfc, 0x5c, 0xf1, 0x2d, 0x6b, 0x74, 0xa7, 0xad, 0xcb, 0x6c, 0x0e, 0x24, 0x5b, 0xd2, + 0xce, 0x2f, 0x56, 0x15, 0x3b, 0x52, 0x9a, 0x77, 0x61, 0xee, 0x63, 0xe2, 0x07, 0x36, 0xfe, 0xa2, + 0x83, 0x19, 0xef, 0xdb, 0xa8, 0x97, 0x36, 0xe6, 0x0f, 0x2a, 0xdc, 0x08, 0x19, 0xd6, 0x26, 0x01, + 0xc3, 0xd3, 0xad, 0xea, 0x03, 0x98, 0x69, 0x49, 0x5b, 0xa6, 0xa7, 0x72, 0xe9, 0xfc, 0x5c, 0x31, + 0x3b, 0x39, 0x3b, 0x3b, 0xc6, 0xd1, 0x3b, 0xb0, 0x40, 0x71, 0x8b, 0x74, 0xb1, 0x77, 0x1c, 0x47, + 0x48, 0xe7, 0xd2, 0x79, 0xad, 0x94, 0x5a, 0x54, 0xec, 0xf9, 0x68, 0x2a, 0x14, 0x31, 0xb3, 0x04, + 0x37, 0x1e, 0x61, 0xa7, 0x8b, 0xe3, 0x05, 0x14, 0x41, 0x13, 0x3b, 0x26, 0x13, 0xbb, 0xda, 0x53, + 0xb2, 0xe6, 0x02, 0xdc, 0x8c, 0x62, 0x84, 0x0b, 0x34, 0x1f, 0xc1, 0xca, 0x13, 0x4a, 0x5c, 0xcc, + 0x58, 0xc8, 0x32, 0xe6, 0xd4, 0xfb, 0x0e, 0x6b, 0x62, 0x61, 0xf2, 0x4b, 0x64, 0xb2, 0x60, 0x85, + 0x65, 0x65, 0xc5, 0x60, 0x3c, 0xff, 0x40, 0x7b, 0xfe, 0x9d, 0xa9, 0x98, 0x77, 0xc0, 0x48, 0x8a, + 0x16, 0x79, 0xed, 0x81, 0x7e, 0xc0, 0x29, 0x76, 0x5a, 0xff, 0x87, 0xd5, 0x6d, 0x58, 0x49, 0x08, + 0x16, 0x39, 0x7d, 0x04, 0x4b, 0x36, 0x66, 0xa4, 0xd9, 0xc5, 0x5b, 0x9e, 0x47, 0x45, 0x3a, 0x91, + 0xcd, 0x34, 0xe7, 0x69, 0x6e, 0xc0, 0xf2, 0xb0, 0x3a, 0x2a, 0x87, 0xa4, 0x9a, 0x69, 0xc2, 0xad, + 0x4a, 0xc0, 0x31, 0x0d, 0x9c, 0xa6, 0x88, 0x13, 0x3b, 0x2d, 0x43, 0xaa, 0x6f, 0x92, 0xe9, 0x5d, + 0xac, 0xa6, 0x2a, 0x3b, 0x76, 0xca, 0xf7, 0xd0, 0x43, 0xc8, 0x38, 0x2e, 0xf7, 0x49, 0x10, 0xd5, + 0xca, 0x6a, 0xd2, 0xb9, 0x1d, 0x70, 0x42, 0xf1, 0x96, 0xc4, 0xe2, 0x22, 0x0e, 0x45, 0xe6, 0xaf, + 0x1a, 0xcc, 0x0d, 0xcc, 0xa2, 0x0f, 0xfb, 0xe1, 0x84, 0xd5, 0x7c, 0xf1, 0xde, 0x15, 0xe1, 0xf6, + 0xfc, 0xc0, 0x8b, 0x83, 0x21, 0x2b, 0xaa, 0xa0, 0x94, 0xdc, 0x71, 0x3d, 0x49, 0x2a, 0xee, 0xe6, + 0xae, 0x12, 0x56, 0x0f, 0xba, 0x0f, 0x33, 0x0c, 0xd3, 0xae, 0xef, 0x62, 0x79, 0x39, 0xe7, 0x8a, + 0xb7, 0x13, 0xdd, 0x42, 0x64, 0x57, 0xb1, 0x63, 0x5a, 0x18, 0x71, 0x87, 0x35, 0xa2, 0xcb, 0x9b, + 0x68, 0x74, 0xe8, 0xb0, 0x86, 0x30, 0x12, 0x9c, 0x30, 0x0a, 0x30, 0x3f, 0x25, 0xb4, 0xa1, 0x5f, + 0x1b, 0x6f, 0x54, 0x0d, 0x11, 0x61, 0x14, 0xd1, 0x42, 0xe8, 0x36, 0x3b, 0x8c, 0x63, 0xaa, 0x67, + 0xc6, 0x0b, 0xb7, 0x43, 0x44, 0x08, 0x23, 0x1a, 0xbd, 0x0f, 0x19, 0x86, 0x5d, 0x8a, 0xb9, 0x3e, + 0x23, 0x75, 0x46, 0xf2, 0xca, 0x04, 0xb1, 0x2b, 0x5a, 0x8a, 0x1c, 0xa1, 0x07, 0x70, 0x9d, 0x62, + 0x46, 0x3a, 0xd4, 0xc5, 0xfa, 0x75, 0xa9, 0xbb, 0x93, 0x78, 0x0d, 0x23, 0x66, 0x57, 0xb1, 0xfb, + 0x3c, 0x7a, 0x08, 0xb3, 0xf8, 0x4b, 0x8e, 0x03, 0x26, 0x0e, 0x6f, 0x56, 0x8a, 0xdf, 0x4c, 0x12, + 0x97, 0x63, 0x68, 0x57, 0xb1, 0x2f, 0x15, 0x22, 0x61, 0x97, 0x04, 0x27, 0x7e, 0x5d, 0x87, 0xf1, + 0x09, 0x6f, 0x4b, 0x42, 0x24, 0x1c, 0xb2, 0xa5, 0xeb, 0x90, 0xe1, 0x0e, 0xad, 0x63, 0xbe, 0xfe, + 0x8f, 0x0a, 0x0b, 0x43, 0x75, 0x81, 0xde, 0x86, 0x99, 0xa3, 0xea, 0x5e, 0x75, 0xff, 0xd3, 0xea, + 0xa2, 0x62, 0x18, 0x67, 0x2f, 0x72, 0xcb, 0x43, 0xc4, 0x51, 0xd0, 0x08, 0xc8, 0x69, 0x80, 0x8a, + 0x70, 0xeb, 0xe0, 0x70, 0xdf, 0x2e, 0x1f, 0x6f, 0x6d, 0x1f, 0x56, 0xf6, 0xab, 0xc7, 0xdb, 0x76, + 0x79, 0xeb, 0xb0, 0xbc, 0xa8, 0x1a, 0x2b, 0x67, 0x2f, 0x72, 0x4b, 0x43, 0xa2, 0x6d, 0x8a, 0x1d, + 0x8e, 0x47, 0x34, 0x47, 0x4f, 0x76, 0x84, 0x26, 0x95, 0xa8, 0x39, 0x6a, 0x7b, 0x49, 0x1a, 0xbb, + 0xfc, 0x78, 0xff, 0x93, 0xf2, 0x62, 0x3a, 0x51, 0x63, 0xcb, 0x76, 0x69, 0xbc, 0xf1, 0xcd, 0x4f, + 0x59, 0xe5, 0x97, 0x9f, 0xb3, 0xc3, 0xab, 0x2b, 0xfe, 0x98, 0x06, 0x4d, 0xdc, 0x50, 0x74, 0xa6, + 0x02, 0x1a, 0x6d, 0x53, 0x68, 0x33, 0x69, 0x07, 0xc7, 0x36, 0x47, 0xc3, 0x9a, 0x16, 0x8f, 0x7a, + 0xd2, 0xd2, 0x6f, 0x2f, 0xff, 0xfe, 0x3e, 0xb5, 0x00, 0x37, 0x25, 0xbf, 0xd9, 0x72, 0x02, 0xa7, + 0x8e, 0x29, 0xfa, 0x56, 0x85, 0xd7, 0x46, 0x1a, 0x19, 0xda, 0x48, 0xbe, 0xc6, 0xc9, 0xcd, 0xd3, + 0xd8, 0x9c, 0x92, 0x9e, 0x98, 0x49, 0x5e, 0x45, 0x5f, 0xc3, 0xfc, 0x7f, 0x1b, 0x1f, 0x5a, 0x1b, + 0x57, 0xce, 0x23, 0xad, 0xd5, 0x58, 0x9f, 0x06, 0x9d, 0x98, 0x41, 0xf1, 0x0f, 0x15, 0xe6, 0x2f, + 0x9f, 0x2c, 0xf6, 0xcc, 0x6f, 0xa3, 0xcf, 0x40, 0x13, 0x0f, 0x32, 0x4a, 0x6c, 0x93, 0x03, 0xcf, + 0xb9, 0x91, 0x1b, 0x0f, 0x4c, 0x3e, 0x00, 0x17, 0xae, 0xc9, 0x27, 0x11, 0x25, 0x46, 0x18, 0x7c, + 0x71, 0x8d, 0xbb, 0x13, 0x88, 0x89, 0x26, 0x25, 0xfd, 0xfc, 0x55, 0x56, 0xf9, 0xf3, 0x55, 0x56, + 0x79, 0xde, 0xcb, 0xaa, 0xe7, 0xbd, 0xac, 0xfa, 0x7b, 0x2f, 0xab, 0xfe, 0xd5, 0xcb, 0xaa, 0x4f, + 0xd3, 0x4f, 0xb5, 0x5a, 0x46, 0xfe, 0xa2, 0x7a, 0xef, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x3e, + 0x7a, 0x8b, 0xe7, 0x6a, 0x0a, 0x00, 0x00, +} diff --git a/vendor/github.com/docker/swarmkit/api/resource.pb.go b/vendor/github.com/docker/swarmkit/api/resource.pb.go new file mode 100644 index 0000000000..5a51ecc96e --- /dev/null +++ b/vendor/github.com/docker/swarmkit/api/resource.pb.go @@ -0,0 +1,1096 @@ +// Code generated by protoc-gen-gogo. +// source: github.com/docker/swarmkit/api/resource.proto +// DO NOT EDIT! + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import _ "github.com/docker/swarmkit/protobuf/plugin" + +import github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +import raftselector "github.com/docker/swarmkit/manager/raftselector" +import codes "google.golang.org/grpc/codes" +import status "google.golang.org/grpc/status" +import metadata "google.golang.org/grpc/metadata" +import transport "google.golang.org/grpc/transport" +import rafttime "time" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type AttachNetworkRequest struct { + Config *NetworkAttachmentConfig `protobuf:"bytes,1,opt,name=config" json:"config,omitempty"` + ContainerID string `protobuf:"bytes,2,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` +} + +func (m *AttachNetworkRequest) Reset() { *m = AttachNetworkRequest{} } +func (*AttachNetworkRequest) ProtoMessage() {} +func (*AttachNetworkRequest) Descriptor() ([]byte, []int) { return fileDescriptorResource, []int{0} } + +type AttachNetworkResponse struct { + AttachmentID string `protobuf:"bytes,1,opt,name=attachment_id,json=attachmentId,proto3" json:"attachment_id,omitempty"` +} + +func (m *AttachNetworkResponse) Reset() { *m = AttachNetworkResponse{} } +func (*AttachNetworkResponse) ProtoMessage() {} +func (*AttachNetworkResponse) Descriptor() ([]byte, []int) { return fileDescriptorResource, []int{1} } + +type DetachNetworkRequest struct { + AttachmentID string `protobuf:"bytes,1,opt,name=attachment_id,json=attachmentId,proto3" json:"attachment_id,omitempty"` +} + +func (m *DetachNetworkRequest) Reset() { *m = DetachNetworkRequest{} } +func (*DetachNetworkRequest) ProtoMessage() {} +func (*DetachNetworkRequest) Descriptor() ([]byte, []int) { return fileDescriptorResource, []int{2} } + +type DetachNetworkResponse struct { +} + +func (m *DetachNetworkResponse) Reset() { *m = DetachNetworkResponse{} } +func (*DetachNetworkResponse) ProtoMessage() {} +func (*DetachNetworkResponse) Descriptor() ([]byte, []int) { return fileDescriptorResource, []int{3} } + +func init() { + proto.RegisterType((*AttachNetworkRequest)(nil), "docker.swarmkit.v1.AttachNetworkRequest") + proto.RegisterType((*AttachNetworkResponse)(nil), "docker.swarmkit.v1.AttachNetworkResponse") + proto.RegisterType((*DetachNetworkRequest)(nil), "docker.swarmkit.v1.DetachNetworkRequest") + proto.RegisterType((*DetachNetworkResponse)(nil), "docker.swarmkit.v1.DetachNetworkResponse") +} + +type authenticatedWrapperResourceAllocatorServer struct { + local ResourceAllocatorServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperResourceAllocatorServer(local ResourceAllocatorServer, authorize func(context.Context, []string) error) ResourceAllocatorServer { + return &authenticatedWrapperResourceAllocatorServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperResourceAllocatorServer) AttachNetwork(ctx context.Context, r *AttachNetworkRequest) (*AttachNetworkResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-worker", "swarm-manager"}); err != nil { + return nil, err + } + return p.local.AttachNetwork(ctx, r) +} + +func (p *authenticatedWrapperResourceAllocatorServer) DetachNetwork(ctx context.Context, r *DetachNetworkRequest) (*DetachNetworkResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-worker", "swarm-manager"}); err != nil { + return nil, err + } + return p.local.DetachNetwork(ctx, r) +} + +func (m *AttachNetworkRequest) Copy() *AttachNetworkRequest { + if m == nil { + return nil + } + o := &AttachNetworkRequest{} + o.CopyFrom(m) + return o +} + +func (m *AttachNetworkRequest) CopyFrom(src interface{}) { + + o := src.(*AttachNetworkRequest) + *m = *o + if o.Config != nil { + m.Config = &NetworkAttachmentConfig{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Config, o.Config) + } +} + +func (m *AttachNetworkResponse) Copy() *AttachNetworkResponse { + if m == nil { + return nil + } + o := &AttachNetworkResponse{} + o.CopyFrom(m) + return o +} + +func (m *AttachNetworkResponse) CopyFrom(src interface{}) { + + o := src.(*AttachNetworkResponse) + *m = *o +} + +func (m *DetachNetworkRequest) Copy() *DetachNetworkRequest { + if m == nil { + return nil + } + o := &DetachNetworkRequest{} + o.CopyFrom(m) + return o +} + +func (m *DetachNetworkRequest) CopyFrom(src interface{}) { + + o := src.(*DetachNetworkRequest) + *m = *o +} + +func (m *DetachNetworkResponse) Copy() *DetachNetworkResponse { + if m == nil { + return nil + } + o := &DetachNetworkResponse{} + o.CopyFrom(m) + return o +} + +func (m *DetachNetworkResponse) CopyFrom(src interface{}) {} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for ResourceAllocator service + +type ResourceAllocatorClient interface { + AttachNetwork(ctx context.Context, in *AttachNetworkRequest, opts ...grpc.CallOption) (*AttachNetworkResponse, error) + DetachNetwork(ctx context.Context, in *DetachNetworkRequest, opts ...grpc.CallOption) (*DetachNetworkResponse, error) +} + +type resourceAllocatorClient struct { + cc *grpc.ClientConn +} + +func NewResourceAllocatorClient(cc *grpc.ClientConn) ResourceAllocatorClient { + return &resourceAllocatorClient{cc} +} + +func (c *resourceAllocatorClient) AttachNetwork(ctx context.Context, in *AttachNetworkRequest, opts ...grpc.CallOption) (*AttachNetworkResponse, error) { + out := new(AttachNetworkResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.ResourceAllocator/AttachNetwork", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *resourceAllocatorClient) DetachNetwork(ctx context.Context, in *DetachNetworkRequest, opts ...grpc.CallOption) (*DetachNetworkResponse, error) { + out := new(DetachNetworkResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.ResourceAllocator/DetachNetwork", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for ResourceAllocator service + +type ResourceAllocatorServer interface { + AttachNetwork(context.Context, *AttachNetworkRequest) (*AttachNetworkResponse, error) + DetachNetwork(context.Context, *DetachNetworkRequest) (*DetachNetworkResponse, error) +} + +func RegisterResourceAllocatorServer(s *grpc.Server, srv ResourceAllocatorServer) { + s.RegisterService(&_ResourceAllocator_serviceDesc, srv) +} + +func _ResourceAllocator_AttachNetwork_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AttachNetworkRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ResourceAllocatorServer).AttachNetwork(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.ResourceAllocator/AttachNetwork", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ResourceAllocatorServer).AttachNetwork(ctx, req.(*AttachNetworkRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ResourceAllocator_DetachNetwork_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DetachNetworkRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ResourceAllocatorServer).DetachNetwork(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.ResourceAllocator/DetachNetwork", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ResourceAllocatorServer).DetachNetwork(ctx, req.(*DetachNetworkRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ResourceAllocator_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.ResourceAllocator", + HandlerType: (*ResourceAllocatorServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "AttachNetwork", + Handler: _ResourceAllocator_AttachNetwork_Handler, + }, + { + MethodName: "DetachNetwork", + Handler: _ResourceAllocator_DetachNetwork_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/docker/swarmkit/api/resource.proto", +} + +func (m *AttachNetworkRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AttachNetworkRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Config != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintResource(dAtA, i, uint64(m.Config.Size())) + n1, err := m.Config.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if len(m.ContainerID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintResource(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + return i, nil +} + +func (m *AttachNetworkResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AttachNetworkResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.AttachmentID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintResource(dAtA, i, uint64(len(m.AttachmentID))) + i += copy(dAtA[i:], m.AttachmentID) + } + return i, nil +} + +func (m *DetachNetworkRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DetachNetworkRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.AttachmentID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintResource(dAtA, i, uint64(len(m.AttachmentID))) + i += copy(dAtA[i:], m.AttachmentID) + } + return i, nil +} + +func (m *DetachNetworkResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DetachNetworkResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func encodeFixed64Resource(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Resource(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintResource(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +type raftProxyResourceAllocatorServer struct { + local ResourceAllocatorServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyResourceAllocatorServer(local ResourceAllocatorServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) ResourceAllocatorServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + s, ok := transport.StreamFromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := s.ServerTransport().RemoteAddr().String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyResourceAllocatorServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyResourceAllocatorServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyResourceAllocatorServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +func (p *raftProxyResourceAllocatorServer) AttachNetwork(ctx context.Context, r *AttachNetworkRequest) (*AttachNetworkResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.AttachNetwork(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewResourceAllocatorClient(conn).AttachNetwork(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.AttachNetwork(ctx, r) + } + return nil, err + } + return NewResourceAllocatorClient(conn).AttachNetwork(modCtx, r) + } + return resp, err +} + +func (p *raftProxyResourceAllocatorServer) DetachNetwork(ctx context.Context, r *DetachNetworkRequest) (*DetachNetworkResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.DetachNetwork(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewResourceAllocatorClient(conn).DetachNetwork(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.DetachNetwork(ctx, r) + } + return nil, err + } + return NewResourceAllocatorClient(conn).DetachNetwork(modCtx, r) + } + return resp, err +} + +func (m *AttachNetworkRequest) Size() (n int) { + var l int + _ = l + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovResource(uint64(l)) + } + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovResource(uint64(l)) + } + return n +} + +func (m *AttachNetworkResponse) Size() (n int) { + var l int + _ = l + l = len(m.AttachmentID) + if l > 0 { + n += 1 + l + sovResource(uint64(l)) + } + return n +} + +func (m *DetachNetworkRequest) Size() (n int) { + var l int + _ = l + l = len(m.AttachmentID) + if l > 0 { + n += 1 + l + sovResource(uint64(l)) + } + return n +} + +func (m *DetachNetworkResponse) Size() (n int) { + var l int + _ = l + return n +} + +func sovResource(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozResource(x uint64) (n int) { + return sovResource(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AttachNetworkRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AttachNetworkRequest{`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "NetworkAttachmentConfig", "NetworkAttachmentConfig", 1) + `,`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `}`, + }, "") + return s +} +func (this *AttachNetworkResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AttachNetworkResponse{`, + `AttachmentID:` + fmt.Sprintf("%v", this.AttachmentID) + `,`, + `}`, + }, "") + return s +} +func (this *DetachNetworkRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DetachNetworkRequest{`, + `AttachmentID:` + fmt.Sprintf("%v", this.AttachmentID) + `,`, + `}`, + }, "") + return s +} +func (this *DetachNetworkResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DetachNetworkResponse{`, + `}`, + }, "") + return s +} +func valueToStringResource(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AttachNetworkRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AttachNetworkRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AttachNetworkRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthResource + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &NetworkAttachmentConfig{} + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthResource + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipResource(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthResource + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AttachNetworkResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AttachNetworkResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AttachNetworkResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttachmentID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthResource + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AttachmentID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipResource(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthResource + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DetachNetworkRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DetachNetworkRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DetachNetworkRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttachmentID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthResource + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AttachmentID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipResource(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthResource + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DetachNetworkResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DetachNetworkResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DetachNetworkResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipResource(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthResource + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipResource(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowResource + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowResource + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowResource + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthResource + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowResource + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipResource(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthResource = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowResource = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/docker/swarmkit/api/resource.proto", fileDescriptorResource) +} + +var fileDescriptorResource = []byte{ + // 397 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x92, 0xcf, 0x4e, 0xf2, 0x40, + 0x14, 0xc5, 0x19, 0x16, 0x24, 0xdf, 0x50, 0xf2, 0x69, 0x03, 0x91, 0x90, 0x58, 0x48, 0xdd, 0xa0, + 0x86, 0x36, 0x62, 0x8c, 0x6b, 0xfe, 0x6c, 0xba, 0x90, 0x45, 0x5f, 0xc0, 0x0c, 0xed, 0x50, 0x1a, + 0x68, 0xa7, 0x4e, 0xa7, 0x12, 0x77, 0x6e, 0x5d, 0xb9, 0xf5, 0x1d, 0x4c, 0x7c, 0x0e, 0xe2, 0xca, + 0xa5, 0x2b, 0x22, 0x7d, 0x00, 0x9f, 0xc1, 0xd0, 0x29, 0x10, 0x70, 0xa2, 0xc4, 0x55, 0xa7, 0xd3, + 0x73, 0xce, 0xfd, 0xdd, 0x7b, 0x0b, 0x1b, 0x8e, 0xcb, 0x86, 0x51, 0x5f, 0xb3, 0x88, 0xa7, 0xdb, + 0xc4, 0x1a, 0x61, 0xaa, 0x87, 0x13, 0x44, 0xbd, 0x91, 0xcb, 0x74, 0x14, 0xb8, 0x3a, 0xc5, 0x21, + 0x89, 0xa8, 0x85, 0xb5, 0x80, 0x12, 0x46, 0x64, 0x99, 0x6b, 0xb4, 0xa5, 0x46, 0xbb, 0x3d, 0xab, + 0x9c, 0xfc, 0x12, 0xc1, 0xee, 0x02, 0x1c, 0x72, 0x7f, 0xa5, 0xe8, 0x10, 0x87, 0x24, 0x47, 0x7d, + 0x71, 0x4a, 0x6f, 0x2f, 0x7f, 0x48, 0x48, 0x14, 0xfd, 0x68, 0xa0, 0x07, 0xe3, 0xc8, 0x71, 0xfd, + 0xf4, 0xc1, 0x8d, 0xea, 0x23, 0x80, 0xc5, 0x16, 0x63, 0xc8, 0x1a, 0xf6, 0x30, 0x9b, 0x10, 0x3a, + 0x32, 0xf1, 0x4d, 0x84, 0x43, 0x26, 0x77, 0x60, 0xce, 0x22, 0xfe, 0xc0, 0x75, 0xca, 0xa0, 0x06, + 0xea, 0xf9, 0xe6, 0xa9, 0xf6, 0x1d, 0x5c, 0x4b, 0x3d, 0x3c, 0xc0, 0xc3, 0x3e, 0xeb, 0x24, 0x16, + 0x33, 0xb5, 0xca, 0x4d, 0x28, 0x59, 0xc4, 0x67, 0xc8, 0xf5, 0x31, 0xbd, 0x76, 0xed, 0x72, 0xb6, + 0x06, 0xea, 0xff, 0xda, 0xff, 0xe3, 0x59, 0x35, 0xdf, 0x59, 0xde, 0x1b, 0x5d, 0x33, 0xbf, 0x12, + 0x19, 0xb6, 0xda, 0x83, 0xa5, 0x2d, 0xa0, 0x30, 0x20, 0x7e, 0x88, 0xe5, 0x0b, 0x58, 0x40, 0xab, + 0x42, 0x8b, 0x34, 0x90, 0xa4, 0xed, 0xc5, 0xb3, 0xaa, 0xb4, 0x26, 0x30, 0xba, 0xa6, 0xb4, 0x96, + 0x19, 0xb6, 0x7a, 0x05, 0x8b, 0x5d, 0x2c, 0x68, 0xf0, 0x8f, 0x71, 0x07, 0xb0, 0xb4, 0x15, 0xc7, + 0xf1, 0x9a, 0xcf, 0x59, 0xb8, 0x6f, 0xa6, 0xbb, 0x6e, 0x8d, 0xc7, 0xc4, 0x42, 0x8c, 0x50, 0xf9, + 0x01, 0xc0, 0xc2, 0x46, 0x3b, 0x72, 0x5d, 0x34, 0x48, 0xd1, 0x0a, 0x2a, 0xc7, 0x3b, 0x28, 0x79, + 0x71, 0xf5, 0xe8, 0xf5, 0xe5, 0xf3, 0x29, 0x7b, 0x08, 0xa5, 0x44, 0xda, 0x58, 0x7c, 0xc3, 0x14, + 0x16, 0xf8, 0x9b, 0x87, 0x7c, 0xe4, 0x60, 0xce, 0xb2, 0xc1, 0x2e, 0x66, 0x11, 0x4d, 0x4b, 0xcc, + 0x22, 0x1c, 0xc4, 0x4e, 0x2c, 0xed, 0xf2, 0x74, 0xae, 0x64, 0xde, 0xe7, 0x4a, 0xe6, 0x3e, 0x56, + 0xc0, 0x34, 0x56, 0xc0, 0x5b, 0xac, 0x80, 0x8f, 0x58, 0x01, 0xfd, 0x5c, 0xf2, 0x63, 0x9e, 0x7f, + 0x05, 0x00, 0x00, 0xff, 0xff, 0xc1, 0x7a, 0x29, 0xfc, 0x58, 0x03, 0x00, 0x00, +} diff --git a/vendor/github.com/docker/swarmkit/api/snapshot.pb.go b/vendor/github.com/docker/swarmkit/api/snapshot.pb.go new file mode 100644 index 0000000000..f8c32b7726 --- /dev/null +++ b/vendor/github.com/docker/swarmkit/api/snapshot.pb.go @@ -0,0 +1,1345 @@ +// Code generated by protoc-gen-gogo. +// source: github.com/docker/swarmkit/api/snapshot.proto +// DO NOT EDIT! + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" + +import github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type Snapshot_Version int32 + +const ( + // V0 is the initial version of the StoreSnapshot message. + Snapshot_V0 Snapshot_Version = 0 +) + +var Snapshot_Version_name = map[int32]string{ + 0: "V0", +} +var Snapshot_Version_value = map[string]int32{ + "V0": 0, +} + +func (x Snapshot_Version) String() string { + return proto.EnumName(Snapshot_Version_name, int32(x)) +} +func (Snapshot_Version) EnumDescriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{2, 0} } + +// StoreSnapshot is used to store snapshots of the store. +type StoreSnapshot struct { + Nodes []*Node `protobuf:"bytes,1,rep,name=nodes" json:"nodes,omitempty"` + Services []*Service `protobuf:"bytes,2,rep,name=services" json:"services,omitempty"` + Networks []*Network `protobuf:"bytes,3,rep,name=networks" json:"networks,omitempty"` + Tasks []*Task `protobuf:"bytes,4,rep,name=tasks" json:"tasks,omitempty"` + Clusters []*Cluster `protobuf:"bytes,5,rep,name=clusters" json:"clusters,omitempty"` + Secrets []*Secret `protobuf:"bytes,6,rep,name=secrets" json:"secrets,omitempty"` + Resources []*Resource `protobuf:"bytes,7,rep,name=resources" json:"resources,omitempty"` + Extensions []*Extension `protobuf:"bytes,8,rep,name=extensions" json:"extensions,omitempty"` + Configs []*Config `protobuf:"bytes,9,rep,name=configs" json:"configs,omitempty"` +} + +func (m *StoreSnapshot) Reset() { *m = StoreSnapshot{} } +func (*StoreSnapshot) ProtoMessage() {} +func (*StoreSnapshot) Descriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{0} } + +// ClusterSnapshot stores cluster membership information in snapshots. +type ClusterSnapshot struct { + Members []*RaftMember `protobuf:"bytes,1,rep,name=members" json:"members,omitempty"` + Removed []uint64 `protobuf:"varint,2,rep,name=removed" json:"removed,omitempty"` +} + +func (m *ClusterSnapshot) Reset() { *m = ClusterSnapshot{} } +func (*ClusterSnapshot) ProtoMessage() {} +func (*ClusterSnapshot) Descriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{1} } + +type Snapshot struct { + Version Snapshot_Version `protobuf:"varint,1,opt,name=version,proto3,enum=docker.swarmkit.v1.Snapshot_Version" json:"version,omitempty"` + Membership ClusterSnapshot `protobuf:"bytes,2,opt,name=membership" json:"membership"` + Store StoreSnapshot `protobuf:"bytes,3,opt,name=store" json:"store"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{2} } + +func init() { + proto.RegisterType((*StoreSnapshot)(nil), "docker.swarmkit.v1.StoreSnapshot") + proto.RegisterType((*ClusterSnapshot)(nil), "docker.swarmkit.v1.ClusterSnapshot") + proto.RegisterType((*Snapshot)(nil), "docker.swarmkit.v1.Snapshot") + proto.RegisterEnum("docker.swarmkit.v1.Snapshot_Version", Snapshot_Version_name, Snapshot_Version_value) +} + +func (m *StoreSnapshot) Copy() *StoreSnapshot { + if m == nil { + return nil + } + o := &StoreSnapshot{} + o.CopyFrom(m) + return o +} + +func (m *StoreSnapshot) CopyFrom(src interface{}) { + + o := src.(*StoreSnapshot) + *m = *o + if o.Nodes != nil { + m.Nodes = make([]*Node, len(o.Nodes)) + for i := range m.Nodes { + m.Nodes[i] = &Node{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Nodes[i], o.Nodes[i]) + } + } + + if o.Services != nil { + m.Services = make([]*Service, len(o.Services)) + for i := range m.Services { + m.Services[i] = &Service{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Services[i], o.Services[i]) + } + } + + if o.Networks != nil { + m.Networks = make([]*Network, len(o.Networks)) + for i := range m.Networks { + m.Networks[i] = &Network{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Networks[i], o.Networks[i]) + } + } + + if o.Tasks != nil { + m.Tasks = make([]*Task, len(o.Tasks)) + for i := range m.Tasks { + m.Tasks[i] = &Task{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Tasks[i], o.Tasks[i]) + } + } + + if o.Clusters != nil { + m.Clusters = make([]*Cluster, len(o.Clusters)) + for i := range m.Clusters { + m.Clusters[i] = &Cluster{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Clusters[i], o.Clusters[i]) + } + } + + if o.Secrets != nil { + m.Secrets = make([]*Secret, len(o.Secrets)) + for i := range m.Secrets { + m.Secrets[i] = &Secret{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Secrets[i], o.Secrets[i]) + } + } + + if o.Resources != nil { + m.Resources = make([]*Resource, len(o.Resources)) + for i := range m.Resources { + m.Resources[i] = &Resource{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Resources[i], o.Resources[i]) + } + } + + if o.Extensions != nil { + m.Extensions = make([]*Extension, len(o.Extensions)) + for i := range m.Extensions { + m.Extensions[i] = &Extension{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Extensions[i], o.Extensions[i]) + } + } + + if o.Configs != nil { + m.Configs = make([]*Config, len(o.Configs)) + for i := range m.Configs { + m.Configs[i] = &Config{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Configs[i], o.Configs[i]) + } + } + +} + +func (m *ClusterSnapshot) Copy() *ClusterSnapshot { + if m == nil { + return nil + } + o := &ClusterSnapshot{} + o.CopyFrom(m) + return o +} + +func (m *ClusterSnapshot) CopyFrom(src interface{}) { + + o := src.(*ClusterSnapshot) + *m = *o + if o.Members != nil { + m.Members = make([]*RaftMember, len(o.Members)) + for i := range m.Members { + m.Members[i] = &RaftMember{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Members[i], o.Members[i]) + } + } + + if o.Removed != nil { + m.Removed = make([]uint64, len(o.Removed)) + copy(m.Removed, o.Removed) + } + +} + +func (m *Snapshot) Copy() *Snapshot { + if m == nil { + return nil + } + o := &Snapshot{} + o.CopyFrom(m) + return o +} + +func (m *Snapshot) CopyFrom(src interface{}) { + + o := src.(*Snapshot) + *m = *o + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Membership, &o.Membership) + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Store, &o.Store) +} + +func (m *StoreSnapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StoreSnapshot) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Nodes) > 0 { + for _, msg := range m.Nodes { + dAtA[i] = 0xa + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Services) > 0 { + for _, msg := range m.Services { + dAtA[i] = 0x12 + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Networks) > 0 { + for _, msg := range m.Networks { + dAtA[i] = 0x1a + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Tasks) > 0 { + for _, msg := range m.Tasks { + dAtA[i] = 0x22 + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Clusters) > 0 { + for _, msg := range m.Clusters { + dAtA[i] = 0x2a + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Secrets) > 0 { + for _, msg := range m.Secrets { + dAtA[i] = 0x32 + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Resources) > 0 { + for _, msg := range m.Resources { + dAtA[i] = 0x3a + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Extensions) > 0 { + for _, msg := range m.Extensions { + dAtA[i] = 0x42 + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Configs) > 0 { + for _, msg := range m.Configs { + dAtA[i] = 0x4a + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ClusterSnapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterSnapshot) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0xa + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Removed) > 0 { + for _, num := range m.Removed { + dAtA[i] = 0x10 + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(num)) + } + } + return i, nil +} + +func (m *Snapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Version != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(m.Version)) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(m.Membership.Size())) + n1, err := m.Membership.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x1a + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(m.Store.Size())) + n2, err := m.Store.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + return i, nil +} + +func encodeFixed64Snapshot(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Snapshot(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintSnapshot(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +func (m *StoreSnapshot) Size() (n int) { + var l int + _ = l + if len(m.Nodes) > 0 { + for _, e := range m.Nodes { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + if len(m.Services) > 0 { + for _, e := range m.Services { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + if len(m.Networks) > 0 { + for _, e := range m.Networks { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + if len(m.Tasks) > 0 { + for _, e := range m.Tasks { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + if len(m.Clusters) > 0 { + for _, e := range m.Clusters { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + if len(m.Secrets) > 0 { + for _, e := range m.Secrets { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + if len(m.Extensions) > 0 { + for _, e := range m.Extensions { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + if len(m.Configs) > 0 { + for _, e := range m.Configs { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + return n +} + +func (m *ClusterSnapshot) Size() (n int) { + var l int + _ = l + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + if len(m.Removed) > 0 { + for _, e := range m.Removed { + n += 1 + sovSnapshot(uint64(e)) + } + } + return n +} + +func (m *Snapshot) Size() (n int) { + var l int + _ = l + if m.Version != 0 { + n += 1 + sovSnapshot(uint64(m.Version)) + } + l = m.Membership.Size() + n += 1 + l + sovSnapshot(uint64(l)) + l = m.Store.Size() + n += 1 + l + sovSnapshot(uint64(l)) + return n +} + +func sovSnapshot(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozSnapshot(x uint64) (n int) { + return sovSnapshot(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *StoreSnapshot) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreSnapshot{`, + `Nodes:` + strings.Replace(fmt.Sprintf("%v", this.Nodes), "Node", "Node", 1) + `,`, + `Services:` + strings.Replace(fmt.Sprintf("%v", this.Services), "Service", "Service", 1) + `,`, + `Networks:` + strings.Replace(fmt.Sprintf("%v", this.Networks), "Network", "Network", 1) + `,`, + `Tasks:` + strings.Replace(fmt.Sprintf("%v", this.Tasks), "Task", "Task", 1) + `,`, + `Clusters:` + strings.Replace(fmt.Sprintf("%v", this.Clusters), "Cluster", "Cluster", 1) + `,`, + `Secrets:` + strings.Replace(fmt.Sprintf("%v", this.Secrets), "Secret", "Secret", 1) + `,`, + `Resources:` + strings.Replace(fmt.Sprintf("%v", this.Resources), "Resource", "Resource", 1) + `,`, + `Extensions:` + strings.Replace(fmt.Sprintf("%v", this.Extensions), "Extension", "Extension", 1) + `,`, + `Configs:` + strings.Replace(fmt.Sprintf("%v", this.Configs), "Config", "Config", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterSnapshot) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterSnapshot{`, + `Members:` + strings.Replace(fmt.Sprintf("%v", this.Members), "RaftMember", "RaftMember", 1) + `,`, + `Removed:` + fmt.Sprintf("%v", this.Removed) + `,`, + `}`, + }, "") + return s +} +func (this *Snapshot) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Snapshot{`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `Membership:` + strings.Replace(strings.Replace(this.Membership.String(), "ClusterSnapshot", "ClusterSnapshot", 1), `&`, ``, 1) + `,`, + `Store:` + strings.Replace(strings.Replace(this.Store.String(), "StoreSnapshot", "StoreSnapshot", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringSnapshot(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *StoreSnapshot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StoreSnapshot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StoreSnapshot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Nodes = append(m.Nodes, &Node{}) + if err := m.Nodes[len(m.Nodes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Services", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Services = append(m.Services, &Service{}) + if err := m.Services[len(m.Services)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Networks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Networks = append(m.Networks, &Network{}) + if err := m.Networks[len(m.Networks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tasks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tasks = append(m.Tasks, &Task{}) + if err := m.Tasks[len(m.Tasks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Clusters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Clusters = append(m.Clusters, &Cluster{}) + if err := m.Clusters[len(m.Clusters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secrets = append(m.Secrets, &Secret{}) + if err := m.Secrets[len(m.Secrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, &Resource{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extensions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Extensions = append(m.Extensions, &Extension{}) + if err := m.Extensions[len(m.Extensions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Configs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Configs = append(m.Configs, &Config{}) + if err := m.Configs[len(m.Configs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSnapshot(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSnapshot + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterSnapshot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterSnapshot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterSnapshot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &RaftMember{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Removed = append(m.Removed, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Removed = append(m.Removed, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Removed", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipSnapshot(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSnapshot + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Snapshot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Snapshot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + m.Version = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Version |= (Snapshot_Version(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Membership", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Membership.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Store", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Store.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSnapshot(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSnapshot + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipSnapshot(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSnapshot + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSnapshot + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSnapshot + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthSnapshot + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSnapshot + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipSnapshot(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthSnapshot = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSnapshot = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/docker/swarmkit/api/snapshot.proto", fileDescriptorSnapshot) +} + +var fileDescriptorSnapshot = []byte{ + // 493 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0xbf, 0x8f, 0xd3, 0x30, + 0x14, 0xc7, 0x93, 0xf4, 0x47, 0x7a, 0x3e, 0x01, 0x87, 0xc5, 0x60, 0x95, 0x23, 0x94, 0xc2, 0x50, + 0x24, 0x48, 0xa0, 0x20, 0x81, 0x90, 0x8e, 0xa1, 0x27, 0x06, 0x06, 0x6e, 0x70, 0xd1, 0x89, 0x35, + 0x4d, 0xdd, 0x36, 0x84, 0xc4, 0x95, 0x9f, 0xdb, 0x63, 0x84, 0xff, 0xae, 0x23, 0x23, 0x13, 0xe2, + 0xba, 0xf0, 0x6f, 0x20, 0xdb, 0x71, 0xa8, 0x44, 0x7a, 0xb7, 0x45, 0xd6, 0xe7, 0xf3, 0xde, 0xd7, + 0xce, 0x7b, 0xe8, 0xe9, 0x3c, 0x95, 0x8b, 0xd5, 0x24, 0x4c, 0x78, 0x1e, 0x4d, 0x79, 0x92, 0x31, + 0x11, 0xc1, 0x45, 0x2c, 0xf2, 0x2c, 0x95, 0x51, 0xbc, 0x4c, 0x23, 0x28, 0xe2, 0x25, 0x2c, 0xb8, + 0x0c, 0x97, 0x82, 0x4b, 0x8e, 0xb1, 0x61, 0x42, 0xcb, 0x84, 0xeb, 0xe7, 0xdd, 0x27, 0xd7, 0x94, + 0xe0, 0x93, 0xcf, 0x2c, 0x91, 0x60, 0x2a, 0x74, 0x1f, 0x5f, 0x43, 0x8b, 0x78, 0x56, 0x36, 0xeb, + 0xde, 0x99, 0xf3, 0x39, 0xd7, 0x9f, 0x91, 0xfa, 0x32, 0xa7, 0xfd, 0xef, 0x4d, 0x74, 0x63, 0x2c, + 0xb9, 0x60, 0xe3, 0x32, 0x1a, 0x0e, 0x51, 0xab, 0xe0, 0x53, 0x06, 0xc4, 0xed, 0x35, 0x06, 0x87, + 0x43, 0x12, 0xfe, 0x1f, 0x32, 0x3c, 0xe3, 0x53, 0x46, 0x0d, 0x86, 0x5f, 0xa1, 0x0e, 0x30, 0xb1, + 0x4e, 0x13, 0x06, 0xc4, 0xd3, 0xca, 0xdd, 0x3a, 0x65, 0x6c, 0x18, 0x5a, 0xc1, 0x4a, 0x2c, 0x98, + 0xbc, 0xe0, 0x22, 0x03, 0xd2, 0xd8, 0x2f, 0x9e, 0x19, 0x86, 0x56, 0xb0, 0x4a, 0x28, 0x63, 0xc8, + 0x80, 0x34, 0xf7, 0x27, 0xfc, 0x18, 0x43, 0x46, 0x0d, 0xa6, 0x1a, 0x25, 0x5f, 0x56, 0x20, 0x99, + 0x00, 0xd2, 0xda, 0xdf, 0xe8, 0xd4, 0x30, 0xb4, 0x82, 0xf1, 0x4b, 0xe4, 0x03, 0x4b, 0x04, 0x93, + 0x40, 0xda, 0xda, 0xeb, 0xd6, 0xdf, 0x4c, 0x21, 0xd4, 0xa2, 0xf8, 0x0d, 0x3a, 0x10, 0x0c, 0xf8, + 0x4a, 0xa8, 0x17, 0xf1, 0xb5, 0x77, 0x5c, 0xe7, 0xd1, 0x12, 0xa2, 0xff, 0x70, 0x7c, 0x82, 0x10, + 0xfb, 0x2a, 0x59, 0x01, 0x29, 0x2f, 0x80, 0x74, 0xb4, 0x7c, 0xaf, 0x4e, 0x7e, 0x67, 0x29, 0xba, + 0x23, 0xa8, 0xc0, 0x09, 0x2f, 0x66, 0xe9, 0x1c, 0xc8, 0xc1, 0xfe, 0xc0, 0xa7, 0x1a, 0xa1, 0x16, + 0xed, 0xa7, 0xe8, 0x56, 0x79, 0xf7, 0x6a, 0x08, 0x5e, 0x23, 0x3f, 0x67, 0xf9, 0x44, 0xbd, 0x98, + 0x19, 0x83, 0xa0, 0xf6, 0x06, 0xf1, 0x4c, 0x7e, 0xd0, 0x18, 0xb5, 0x38, 0x3e, 0x46, 0xbe, 0x60, + 0x39, 0x5f, 0xb3, 0xa9, 0x9e, 0x86, 0xe6, 0xc8, 0x3b, 0x72, 0xa8, 0x3d, 0xea, 0xff, 0x71, 0x51, + 0xa7, 0x6a, 0xf2, 0x16, 0xf9, 0x6b, 0x26, 0x54, 0x72, 0xe2, 0xf6, 0xdc, 0xc1, 0xcd, 0xe1, 0xa3, + 0xda, 0xe7, 0xb5, 0x3b, 0x73, 0x6e, 0x58, 0x6a, 0x25, 0xfc, 0x1e, 0xa1, 0xb2, 0xeb, 0x22, 0x5d, + 0x12, 0xaf, 0xe7, 0x0e, 0x0e, 0x87, 0x0f, 0xaf, 0xf8, 0xb3, 0xb6, 0xd2, 0xa8, 0xb9, 0xf9, 0x75, + 0xdf, 0xa1, 0x3b, 0x32, 0x3e, 0x41, 0x2d, 0x50, 0x5b, 0x40, 0x1a, 0xba, 0xca, 0x83, 0xda, 0x20, + 0xbb, 0x6b, 0x52, 0xd6, 0x30, 0x56, 0xff, 0x36, 0xf2, 0xcb, 0x74, 0xb8, 0x8d, 0xbc, 0xf3, 0x67, + 0x47, 0xce, 0x88, 0x6c, 0x2e, 0x03, 0xe7, 0xe7, 0x65, 0xe0, 0x7c, 0xdb, 0x06, 0xee, 0x66, 0x1b, + 0xb8, 0x3f, 0xb6, 0x81, 0xfb, 0x7b, 0x1b, 0xb8, 0x9f, 0xbc, 0x49, 0x5b, 0xef, 0xde, 0x8b, 0xbf, + 0x01, 0x00, 0x00, 0xff, 0xff, 0xfd, 0xbe, 0x47, 0xec, 0x2f, 0x04, 0x00, 0x00, +} diff --git a/vendor/github.com/docker/swarmkit/api/specs.pb.go b/vendor/github.com/docker/swarmkit/api/specs.pb.go new file mode 100644 index 0000000000..dfd18a6d78 --- /dev/null +++ b/vendor/github.com/docker/swarmkit/api/specs.pb.go @@ -0,0 +1,6743 @@ +// Code generated by protoc-gen-gogo. +// source: github.com/docker/swarmkit/api/specs.proto +// DO NOT EDIT! + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import google_protobuf1 "github.com/gogo/protobuf/types" +import google_protobuf3 "github.com/gogo/protobuf/types" +import google_protobuf4 "github.com/gogo/protobuf/types" + +import github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type NodeSpec_Membership int32 + +const ( + NodeMembershipPending NodeSpec_Membership = 0 + NodeMembershipAccepted NodeSpec_Membership = 1 +) + +var NodeSpec_Membership_name = map[int32]string{ + 0: "PENDING", + 1: "ACCEPTED", +} +var NodeSpec_Membership_value = map[string]int32{ + "PENDING": 0, + "ACCEPTED": 1, +} + +func (x NodeSpec_Membership) String() string { + return proto.EnumName(NodeSpec_Membership_name, int32(x)) +} +func (NodeSpec_Membership) EnumDescriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{0, 0} } + +type NodeSpec_Availability int32 + +const ( + // Active nodes. + NodeAvailabilityActive NodeSpec_Availability = 0 + // Paused nodes won't be considered by the scheduler, preventing any + // further task to run on them. + NodeAvailabilityPause NodeSpec_Availability = 1 + // Drained nodes are paused and any task already running on them will + // be evicted. + NodeAvailabilityDrain NodeSpec_Availability = 2 +) + +var NodeSpec_Availability_name = map[int32]string{ + 0: "ACTIVE", + 1: "PAUSE", + 2: "DRAIN", +} +var NodeSpec_Availability_value = map[string]int32{ + "ACTIVE": 0, + "PAUSE": 1, + "DRAIN": 2, +} + +func (x NodeSpec_Availability) String() string { + return proto.EnumName(NodeSpec_Availability_name, int32(x)) +} +func (NodeSpec_Availability) EnumDescriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{0, 1} } + +type ContainerSpec_Isolation int32 + +const ( + // ISOLATION_DEFAULT uses whatever default value from the container runtime + ContainerIsolationDefault ContainerSpec_Isolation = 0 + // ISOLATION_PROCESS forces windows container isolation + ContainerIsolationProcess ContainerSpec_Isolation = 1 + // ISOLATION_HYPERV forces Hyper-V isolation + ContainerIsolationHyperV ContainerSpec_Isolation = 2 +) + +var ContainerSpec_Isolation_name = map[int32]string{ + 0: "ISOLATION_DEFAULT", + 1: "ISOLATION_PROCESS", + 2: "ISOLATION_HYPERV", +} +var ContainerSpec_Isolation_value = map[string]int32{ + "ISOLATION_DEFAULT": 0, + "ISOLATION_PROCESS": 1, + "ISOLATION_HYPERV": 2, +} + +func (x ContainerSpec_Isolation) String() string { + return proto.EnumName(ContainerSpec_Isolation_name, int32(x)) +} +func (ContainerSpec_Isolation) EnumDescriptor() ([]byte, []int) { + return fileDescriptorSpecs, []int{8, 0} +} + +// ResolutionMode specifies the mode of resolution to use for +// internal loadbalancing between tasks which are all within +// the cluster. This is sometimes calls east-west data path. +type EndpointSpec_ResolutionMode int32 + +const ( + // VIP resolution mode specifies that the + // service resolves to a logical IP and the requests + // are sent to that logical IP. Packets hitting that + // logical IP are load balanced to a chosen backend. + ResolutionModeVirtualIP EndpointSpec_ResolutionMode = 0 + // DNSRR resolution mode specifies that the + // service directly gets resolved to one of the + // backend IP and the client directly initiates a + // request towards the actual backend. This requires + // that the client does not cache the DNS responses + // when the DNS response TTL is 0. + ResolutionModeDNSRoundRobin EndpointSpec_ResolutionMode = 1 +) + +var EndpointSpec_ResolutionMode_name = map[int32]string{ + 0: "VIP", + 1: "DNSRR", +} +var EndpointSpec_ResolutionMode_value = map[string]int32{ + "VIP": 0, + "DNSRR": 1, +} + +func (x EndpointSpec_ResolutionMode) String() string { + return proto.EnumName(EndpointSpec_ResolutionMode_name, int32(x)) +} +func (EndpointSpec_ResolutionMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptorSpecs, []int{9, 0} +} + +type NodeSpec struct { + Annotations Annotations `protobuf:"bytes,1,opt,name=annotations" json:"annotations"` + // DesiredRole defines the role the node should have. + DesiredRole NodeRole `protobuf:"varint,2,opt,name=desired_role,json=desiredRole,proto3,enum=docker.swarmkit.v1.NodeRole" json:"desired_role,omitempty"` + // Membership controls the admission of the node into the cluster. + Membership NodeSpec_Membership `protobuf:"varint,3,opt,name=membership,proto3,enum=docker.swarmkit.v1.NodeSpec_Membership" json:"membership,omitempty"` + // Availability allows a user to control the current scheduling status of a + // node. + Availability NodeSpec_Availability `protobuf:"varint,4,opt,name=availability,proto3,enum=docker.swarmkit.v1.NodeSpec_Availability" json:"availability,omitempty"` +} + +func (m *NodeSpec) Reset() { *m = NodeSpec{} } +func (*NodeSpec) ProtoMessage() {} +func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{0} } + +// ServiceSpec defines the properties of a service. +// +// A service instructs the cluster in orchestrating repeated instances of a +// template, implemented as tasks. Based on the number of instances, scheduling +// strategy and restart policy, a number of application-level behaviors can be +// defined. +type ServiceSpec struct { + Annotations Annotations `protobuf:"bytes,1,opt,name=annotations" json:"annotations"` + // Task defines the task template this service will spawn. + Task TaskSpec `protobuf:"bytes,2,opt,name=task" json:"task"` + // Types that are valid to be assigned to Mode: + // *ServiceSpec_Replicated + // *ServiceSpec_Global + Mode isServiceSpec_Mode `protobuf_oneof:"mode"` + // Update contains settings which affect updates. + Update *UpdateConfig `protobuf:"bytes,6,opt,name=update" json:"update,omitempty"` + // Rollback contains settings which affect rollbacks of updates. + Rollback *UpdateConfig `protobuf:"bytes,9,opt,name=rollback" json:"rollback,omitempty"` + // ServiceSpec.Networks has been deprecated and is replaced by + // Networks field in Task (TaskSpec.Networks). + // This field (ServiceSpec.Networks) is kept for compatibility. + // In case TaskSpec.Networks does not exist, ServiceSpec.Networks + // is still honored if it exists. + Networks []*NetworkAttachmentConfig `protobuf:"bytes,7,rep,name=networks" json:"networks,omitempty"` + // Service endpoint specifies the user provided configuration + // to properly discover and load balance a service. + Endpoint *EndpointSpec `protobuf:"bytes,8,opt,name=endpoint" json:"endpoint,omitempty"` +} + +func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } +func (*ServiceSpec) ProtoMessage() {} +func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{1} } + +type isServiceSpec_Mode interface { + isServiceSpec_Mode() + MarshalTo([]byte) (int, error) + Size() int +} + +type ServiceSpec_Replicated struct { + Replicated *ReplicatedService `protobuf:"bytes,3,opt,name=replicated,oneof"` +} +type ServiceSpec_Global struct { + Global *GlobalService `protobuf:"bytes,4,opt,name=global,oneof"` +} + +func (*ServiceSpec_Replicated) isServiceSpec_Mode() {} +func (*ServiceSpec_Global) isServiceSpec_Mode() {} + +func (m *ServiceSpec) GetMode() isServiceSpec_Mode { + if m != nil { + return m.Mode + } + return nil +} + +func (m *ServiceSpec) GetReplicated() *ReplicatedService { + if x, ok := m.GetMode().(*ServiceSpec_Replicated); ok { + return x.Replicated + } + return nil +} + +func (m *ServiceSpec) GetGlobal() *GlobalService { + if x, ok := m.GetMode().(*ServiceSpec_Global); ok { + return x.Global + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ServiceSpec) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ServiceSpec_OneofMarshaler, _ServiceSpec_OneofUnmarshaler, _ServiceSpec_OneofSizer, []interface{}{ + (*ServiceSpec_Replicated)(nil), + (*ServiceSpec_Global)(nil), + } +} + +func _ServiceSpec_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ServiceSpec) + // mode + switch x := m.Mode.(type) { + case *ServiceSpec_Replicated: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Replicated); err != nil { + return err + } + case *ServiceSpec_Global: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Global); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ServiceSpec.Mode has unexpected type %T", x) + } + return nil +} + +func _ServiceSpec_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ServiceSpec) + switch tag { + case 3: // mode.replicated + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ReplicatedService) + err := b.DecodeMessage(msg) + m.Mode = &ServiceSpec_Replicated{msg} + return true, err + case 4: // mode.global + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(GlobalService) + err := b.DecodeMessage(msg) + m.Mode = &ServiceSpec_Global{msg} + return true, err + default: + return false, nil + } +} + +func _ServiceSpec_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ServiceSpec) + // mode + switch x := m.Mode.(type) { + case *ServiceSpec_Replicated: + s := proto.Size(x.Replicated) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ServiceSpec_Global: + s := proto.Size(x.Global) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// ReplicatedService sets the reconciliation target to certain number of replicas. +type ReplicatedService struct { + Replicas uint64 `protobuf:"varint,1,opt,name=replicas,proto3" json:"replicas,omitempty"` +} + +func (m *ReplicatedService) Reset() { *m = ReplicatedService{} } +func (*ReplicatedService) ProtoMessage() {} +func (*ReplicatedService) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{2} } + +// GlobalService represents global service. +type GlobalService struct { +} + +func (m *GlobalService) Reset() { *m = GlobalService{} } +func (*GlobalService) ProtoMessage() {} +func (*GlobalService) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{3} } + +type TaskSpec struct { + // Types that are valid to be assigned to Runtime: + // *TaskSpec_Attachment + // *TaskSpec_Container + // *TaskSpec_Generic + Runtime isTaskSpec_Runtime `protobuf_oneof:"runtime"` + // Resource requirements for the container. + Resources *ResourceRequirements `protobuf:"bytes,2,opt,name=resources" json:"resources,omitempty"` + // RestartPolicy specifies what to do when a task fails or finishes. + Restart *RestartPolicy `protobuf:"bytes,4,opt,name=restart" json:"restart,omitempty"` + // Placement specifies node selection constraints + Placement *Placement `protobuf:"bytes,5,opt,name=placement" json:"placement,omitempty"` + // LogDriver specifies the log driver to use for the task. Any runtime will + // direct logs into the specified driver for the duration of the task. + LogDriver *Driver `protobuf:"bytes,6,opt,name=log_driver,json=logDriver" json:"log_driver,omitempty"` + // Networks specifies the list of network attachment + // configurations (which specify the network and per-network + // aliases) that this task spec is bound to. + Networks []*NetworkAttachmentConfig `protobuf:"bytes,7,rep,name=networks" json:"networks,omitempty"` + // ForceUpdate is a counter that triggers an update even if no relevant + // parameters have been changed. We do this to allow forced restarts + // using the same reconciliation-based mechanism that performs rolling + // updates. + ForceUpdate uint64 `protobuf:"varint,9,opt,name=force_update,json=forceUpdate,proto3" json:"force_update,omitempty"` + // ResourceReferences provides a generic way to specify resources that + // are used by this task, and should be sent down to agents along with + // the task. Inside the runtime field there may be more specific + // information about how to use the resource, but ResourceReferences + // establishes the relationship at the store level, and instructs the + // dispatcher to send the related objects. + // + // ResourceReferences is a list of ResourceReferences used by the task. + ResourceReferences []ResourceReference `protobuf:"bytes,11,rep,name=resource_references,json=resourceReferences" json:"resource_references"` +} + +func (m *TaskSpec) Reset() { *m = TaskSpec{} } +func (*TaskSpec) ProtoMessage() {} +func (*TaskSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{4} } + +type isTaskSpec_Runtime interface { + isTaskSpec_Runtime() + MarshalTo([]byte) (int, error) + Size() int +} + +type TaskSpec_Attachment struct { + Attachment *NetworkAttachmentSpec `protobuf:"bytes,8,opt,name=attachment,oneof"` +} +type TaskSpec_Container struct { + Container *ContainerSpec `protobuf:"bytes,1,opt,name=container,oneof"` +} +type TaskSpec_Generic struct { + Generic *GenericRuntimeSpec `protobuf:"bytes,10,opt,name=generic,oneof"` +} + +func (*TaskSpec_Attachment) isTaskSpec_Runtime() {} +func (*TaskSpec_Container) isTaskSpec_Runtime() {} +func (*TaskSpec_Generic) isTaskSpec_Runtime() {} + +func (m *TaskSpec) GetRuntime() isTaskSpec_Runtime { + if m != nil { + return m.Runtime + } + return nil +} + +func (m *TaskSpec) GetAttachment() *NetworkAttachmentSpec { + if x, ok := m.GetRuntime().(*TaskSpec_Attachment); ok { + return x.Attachment + } + return nil +} + +func (m *TaskSpec) GetContainer() *ContainerSpec { + if x, ok := m.GetRuntime().(*TaskSpec_Container); ok { + return x.Container + } + return nil +} + +func (m *TaskSpec) GetGeneric() *GenericRuntimeSpec { + if x, ok := m.GetRuntime().(*TaskSpec_Generic); ok { + return x.Generic + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*TaskSpec) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _TaskSpec_OneofMarshaler, _TaskSpec_OneofUnmarshaler, _TaskSpec_OneofSizer, []interface{}{ + (*TaskSpec_Attachment)(nil), + (*TaskSpec_Container)(nil), + (*TaskSpec_Generic)(nil), + } +} + +func _TaskSpec_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*TaskSpec) + // runtime + switch x := m.Runtime.(type) { + case *TaskSpec_Attachment: + _ = b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Attachment); err != nil { + return err + } + case *TaskSpec_Container: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Container); err != nil { + return err + } + case *TaskSpec_Generic: + _ = b.EncodeVarint(10<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Generic); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("TaskSpec.Runtime has unexpected type %T", x) + } + return nil +} + +func _TaskSpec_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*TaskSpec) + switch tag { + case 8: // runtime.attachment + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(NetworkAttachmentSpec) + err := b.DecodeMessage(msg) + m.Runtime = &TaskSpec_Attachment{msg} + return true, err + case 1: // runtime.container + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ContainerSpec) + err := b.DecodeMessage(msg) + m.Runtime = &TaskSpec_Container{msg} + return true, err + case 10: // runtime.generic + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(GenericRuntimeSpec) + err := b.DecodeMessage(msg) + m.Runtime = &TaskSpec_Generic{msg} + return true, err + default: + return false, nil + } +} + +func _TaskSpec_OneofSizer(msg proto.Message) (n int) { + m := msg.(*TaskSpec) + // runtime + switch x := m.Runtime.(type) { + case *TaskSpec_Attachment: + s := proto.Size(x.Attachment) + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *TaskSpec_Container: + s := proto.Size(x.Container) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *TaskSpec_Generic: + s := proto.Size(x.Generic) + n += proto.SizeVarint(10<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type ResourceReference struct { + ResourceID string `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"` + ResourceType ResourceType `protobuf:"varint,2,opt,name=resource_type,json=resourceType,proto3,enum=docker.swarmkit.v1.ResourceType" json:"resource_type,omitempty"` +} + +func (m *ResourceReference) Reset() { *m = ResourceReference{} } +func (*ResourceReference) ProtoMessage() {} +func (*ResourceReference) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{5} } + +type GenericRuntimeSpec struct { + Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + Payload *google_protobuf3.Any `protobuf:"bytes,2,opt,name=payload" json:"payload,omitempty"` +} + +func (m *GenericRuntimeSpec) Reset() { *m = GenericRuntimeSpec{} } +func (*GenericRuntimeSpec) ProtoMessage() {} +func (*GenericRuntimeSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{6} } + +// NetworkAttachmentSpec specifies runtime parameters required to attach +// a container to a network. +type NetworkAttachmentSpec struct { + // ContainerID specifies a unique ID of the container for which + // this attachment is for. + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` +} + +func (m *NetworkAttachmentSpec) Reset() { *m = NetworkAttachmentSpec{} } +func (*NetworkAttachmentSpec) ProtoMessage() {} +func (*NetworkAttachmentSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{7} } + +// Container specifies runtime parameters for a container. +type ContainerSpec struct { + // image defines the image reference, as specified in the + // distribution/reference package. This may include a registry host, name, + // tag or digest. + // + // The field will be directly passed to the engine pulling. Well-behaved + // service definitions will used immutable references, either through tags + // that don't change or verifiable digests. + Image string `protobuf:"bytes,1,opt,name=image,proto3" json:"image,omitempty"` + // Labels defines labels to be added to the container at creation time. If + // collisions with system labels occur, these labels will be overridden. + // + // This field *must* remain compatible with the Labels field of + // Annotations. + Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Command to run the the container. The first element is a path to the + // executable and the following elements are treated as arguments. + // + // If command is empty, execution will fall back to the image's entrypoint. + // + // Command should only be used when overriding entrypoint. + Command []string `protobuf:"bytes,3,rep,name=command" json:"command,omitempty"` + // Args specifies arguments provided to the image's entrypoint. + // + // If Command and Args are provided, Args will be appended to Command. + Args []string `protobuf:"bytes,4,rep,name=args" json:"args,omitempty"` + // Hostname specifies the hostname that will be set on containers created by docker swarm. + // All containers for a given service will have the same hostname + Hostname string `protobuf:"bytes,14,opt,name=hostname,proto3" json:"hostname,omitempty"` + // Env specifies the environment variables for the container in NAME=VALUE + // format. These must be compliant with [IEEE Std + // 1003.1-2001](http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html). + Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"` + // Dir defines the working directory to set for the container process. + Dir string `protobuf:"bytes,6,opt,name=dir,proto3" json:"dir,omitempty"` + // User specifies the user that should be employed to run the container. + // + // Note that the primary group may be specified by appending the group name + // or id to the user name, separated by a `:`. This syntax is + // `:`. + User string `protobuf:"bytes,7,opt,name=user,proto3" json:"user,omitempty"` + // Groups specifies supplementary groups available to the user. + Groups []string `protobuf:"bytes,11,rep,name=groups" json:"groups,omitempty"` + // Privileges specifies security configuration/permissions. + Privileges *Privileges `protobuf:"bytes,22,opt,name=privileges" json:"privileges,omitempty"` + // Init declares that a custom init will be running inside the container, if null, use the daemon's configured settings + Init *google_protobuf4.BoolValue `protobuf:"bytes,23,opt,name=init" json:"init,omitempty"` + // TTY declares that a TTY should be attached to the standard streams, + // including stdin if it is still open. + TTY bool `protobuf:"varint,13,opt,name=tty,proto3" json:"tty,omitempty"` + // OpenStdin declares that the standard input (stdin) should be open. + OpenStdin bool `protobuf:"varint,18,opt,name=open_stdin,json=openStdin,proto3" json:"open_stdin,omitempty"` + // ReadOnly declares that the container root filesystem is read-only. + // This only impacts the root filesystem, not additional mounts (including + // tmpfs). For additional mounts that are not part of the initial rootfs, + // they will be decided by the modes passed in the mount definition. + ReadOnly bool `protobuf:"varint,19,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + // StopSignal defines the signal to stop the container. + StopSignal string `protobuf:"bytes,20,opt,name=stop_signal,json=stopSignal,proto3" json:"stop_signal,omitempty"` + Mounts []Mount `protobuf:"bytes,8,rep,name=mounts" json:"mounts"` + // StopGracePeriod the grace period for stopping the container before + // forcefully killing the container. + // Note: Can't use stdduration here because this needs to be nullable. + StopGracePeriod *google_protobuf1.Duration `protobuf:"bytes,9,opt,name=stop_grace_period,json=stopGracePeriod" json:"stop_grace_period,omitempty"` + // PullOptions parameterize the behavior of image pulls. + PullOptions *ContainerSpec_PullOptions `protobuf:"bytes,10,opt,name=pull_options,json=pullOptions" json:"pull_options,omitempty"` + // SecretReference contains references to zero or more secrets that + // will be exposed to the container. + Secrets []*SecretReference `protobuf:"bytes,12,rep,name=secrets" json:"secrets,omitempty"` + // ConfigReference contains references to zero or more configs that + // will be exposed to the container. + Configs []*ConfigReference `protobuf:"bytes,21,rep,name=configs" json:"configs,omitempty"` + // Hosts allow additional entries to be specified in /etc/hosts + // that associates IP addresses with hostnames. + // Detailed documentation is available in: + // http://man7.org/linux/man-pages/man5/hosts.5.html + // IP_address canonical_hostname [aliases...] + // + // The format of the Hosts in swarmkit follows the same as + // above. + // This is different from `docker run --add-host :` + // where format is `:` + Hosts []string `protobuf:"bytes,17,rep,name=hosts" json:"hosts,omitempty"` + // DNSConfig allows one to specify DNS related configuration in resolv.conf + DNSConfig *ContainerSpec_DNSConfig `protobuf:"bytes,15,opt,name=dns_config,json=dnsConfig" json:"dns_config,omitempty"` + // Healthcheck describes how to check the container is healthy. If the + // container is considered unhealthy, it will be destroyed, its creating + // task will exit and a new task will be rescheduled elsewhere. A container + // is considered unhealthy after `Retries` number of consecutive failures. + Healthcheck *HealthConfig `protobuf:"bytes,16,opt,name=healthcheck" json:"healthcheck,omitempty"` + // Isolation defines the isolation level for windows containers (default, process, hyperv). + // Runtimes that don't support it ignore that field + Isolation ContainerSpec_Isolation `protobuf:"varint,24,opt,name=isolation,proto3,enum=docker.swarmkit.v1.ContainerSpec_Isolation" json:"isolation,omitempty"` + // PidsLimit prevents from OS resource damage by applications inside the container + // using fork bomb attack. + PidsLimit int64 `protobuf:"varint,25,opt,name=pidsLimit,proto3" json:"pidsLimit,omitempty"` +} + +func (m *ContainerSpec) Reset() { *m = ContainerSpec{} } +func (*ContainerSpec) ProtoMessage() {} +func (*ContainerSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{8} } + +// PullOptions allows one to parameterize an image pull. +type ContainerSpec_PullOptions struct { + // RegistryAuth is the registry auth token obtained from the client, required + // to pull private images. This is the unmodified JSON used as part of + // the `X-Registry-Auth` header. + // TODO(nishanttotla): This field will later be deprecated + RegistryAuth string `protobuf:"bytes,64,opt,name=registry_auth,json=registryAuth,proto3" json:"registry_auth,omitempty"` +} + +func (m *ContainerSpec_PullOptions) Reset() { *m = ContainerSpec_PullOptions{} } +func (*ContainerSpec_PullOptions) ProtoMessage() {} +func (*ContainerSpec_PullOptions) Descriptor() ([]byte, []int) { + return fileDescriptorSpecs, []int{8, 1} +} + +// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) +// Detailed documentation is available in: +// http://man7.org/linux/man-pages/man5/resolv.conf.5.html +// TODO: domain is not supported yet +type ContainerSpec_DNSConfig struct { + // Nameservers specifies the IP addresses of the name servers + Nameservers []string `protobuf:"bytes,1,rep,name=nameservers" json:"nameservers,omitempty"` + // Search specifies the search list for host-name lookup + Search []string `protobuf:"bytes,2,rep,name=search" json:"search,omitempty"` + // Options allows certain internal resolver variables to be modified + Options []string `protobuf:"bytes,3,rep,name=options" json:"options,omitempty"` +} + +func (m *ContainerSpec_DNSConfig) Reset() { *m = ContainerSpec_DNSConfig{} } +func (*ContainerSpec_DNSConfig) ProtoMessage() {} +func (*ContainerSpec_DNSConfig) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{8, 2} } + +// EndpointSpec defines the properties that can be configured to +// access and loadbalance the service. +type EndpointSpec struct { + Mode EndpointSpec_ResolutionMode `protobuf:"varint,1,opt,name=mode,proto3,enum=docker.swarmkit.v1.EndpointSpec_ResolutionMode" json:"mode,omitempty"` + // List of exposed ports that this service is accessible from + // external to the cluster. + Ports []*PortConfig `protobuf:"bytes,2,rep,name=ports" json:"ports,omitempty"` +} + +func (m *EndpointSpec) Reset() { *m = EndpointSpec{} } +func (*EndpointSpec) ProtoMessage() {} +func (*EndpointSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{9} } + +// NetworkSpec specifies user defined network parameters. +type NetworkSpec struct { + Annotations Annotations `protobuf:"bytes,1,opt,name=annotations" json:"annotations"` + // DriverConfig specific configuration consumed by the network driver. + DriverConfig *Driver `protobuf:"bytes,2,opt,name=driver_config,json=driverConfig" json:"driver_config,omitempty"` + // IPv6Enabled enables support for IPv6 on the network. + Ipv6Enabled bool `protobuf:"varint,3,opt,name=ipv6_enabled,json=ipv6Enabled,proto3" json:"ipv6_enabled,omitempty"` + // internal restricts external access to the network. This may be + // accomplished by disabling the default gateway or through other means. + Internal bool `protobuf:"varint,4,opt,name=internal,proto3" json:"internal,omitempty"` + IPAM *IPAMOptions `protobuf:"bytes,5,opt,name=ipam" json:"ipam,omitempty"` + // Attachable allows external(to swarm) entities to manually + // attach to this network. With this flag enabled, external + // entities such as containers running in an worker node in + // the cluster can manually attach to this network and access + // the services attached to this network. If this flag is not + // enabled(default case) no manual attachment to this network + // can happen. + Attachable bool `protobuf:"varint,6,opt,name=attachable,proto3" json:"attachable,omitempty"` + // Ingress indicates this network will provide the routing-mesh. + // In older versions, the network providing the routing mesh was + // swarm internally created only and it was identified by the name + // "ingress" and the label "com.docker.swarm.internal": "true". + Ingress bool `protobuf:"varint,7,opt,name=ingress,proto3" json:"ingress,omitempty"` + // ConfigFrom is the source of the configuration for this network. + // + // Types that are valid to be assigned to ConfigFrom: + // *NetworkSpec_Network + ConfigFrom isNetworkSpec_ConfigFrom `protobuf_oneof:"config_from"` +} + +func (m *NetworkSpec) Reset() { *m = NetworkSpec{} } +func (*NetworkSpec) ProtoMessage() {} +func (*NetworkSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{10} } + +type isNetworkSpec_ConfigFrom interface { + isNetworkSpec_ConfigFrom() + MarshalTo([]byte) (int, error) + Size() int +} + +type NetworkSpec_Network struct { + Network string `protobuf:"bytes,8,opt,name=network,proto3,oneof"` +} + +func (*NetworkSpec_Network) isNetworkSpec_ConfigFrom() {} + +func (m *NetworkSpec) GetConfigFrom() isNetworkSpec_ConfigFrom { + if m != nil { + return m.ConfigFrom + } + return nil +} + +func (m *NetworkSpec) GetNetwork() string { + if x, ok := m.GetConfigFrom().(*NetworkSpec_Network); ok { + return x.Network + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*NetworkSpec) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _NetworkSpec_OneofMarshaler, _NetworkSpec_OneofUnmarshaler, _NetworkSpec_OneofSizer, []interface{}{ + (*NetworkSpec_Network)(nil), + } +} + +func _NetworkSpec_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*NetworkSpec) + // config_from + switch x := m.ConfigFrom.(type) { + case *NetworkSpec_Network: + _ = b.EncodeVarint(8<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.Network) + case nil: + default: + return fmt.Errorf("NetworkSpec.ConfigFrom has unexpected type %T", x) + } + return nil +} + +func _NetworkSpec_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*NetworkSpec) + switch tag { + case 8: // config_from.network + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.ConfigFrom = &NetworkSpec_Network{x} + return true, err + default: + return false, nil + } +} + +func _NetworkSpec_OneofSizer(msg proto.Message) (n int) { + m := msg.(*NetworkSpec) + // config_from + switch x := m.ConfigFrom.(type) { + case *NetworkSpec_Network: + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Network))) + n += len(x.Network) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// ClusterSpec specifies global cluster settings. +type ClusterSpec struct { + Annotations Annotations `protobuf:"bytes,1,opt,name=annotations" json:"annotations"` + // DEPRECATED: AcceptancePolicy defines the certificate issuance policy. + // Acceptance policy is no longer customizable, and secrets have been + // replaced with join tokens. + AcceptancePolicy AcceptancePolicy `protobuf:"bytes,2,opt,name=acceptance_policy,json=acceptancePolicy" json:"acceptance_policy"` + // Orchestration defines cluster-level orchestration settings. + Orchestration OrchestrationConfig `protobuf:"bytes,3,opt,name=orchestration" json:"orchestration"` + // Raft defines the cluster's raft settings. + Raft RaftConfig `protobuf:"bytes,4,opt,name=raft" json:"raft"` + // Dispatcher defines cluster-level dispatcher settings. + Dispatcher DispatcherConfig `protobuf:"bytes,5,opt,name=dispatcher" json:"dispatcher"` + // CAConfig defines cluster-level certificate authority settings. + CAConfig CAConfig `protobuf:"bytes,6,opt,name=ca_config,json=caConfig" json:"ca_config"` + // TaskDefaults specifies the default values to use for task creation. + TaskDefaults TaskDefaults `protobuf:"bytes,7,opt,name=task_defaults,json=taskDefaults" json:"task_defaults"` + // EncryptionConfig defines the cluster's encryption settings. + EncryptionConfig EncryptionConfig `protobuf:"bytes,8,opt,name=encryption_config,json=encryptionConfig" json:"encryption_config"` +} + +func (m *ClusterSpec) Reset() { *m = ClusterSpec{} } +func (*ClusterSpec) ProtoMessage() {} +func (*ClusterSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{11} } + +// SecretSpec specifies a user-provided secret. +type SecretSpec struct { + Annotations Annotations `protobuf:"bytes,1,opt,name=annotations" json:"annotations"` + // Data is the secret payload - the maximum size is 500KB (that is, 500*1024 bytes) + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + // Templating controls whether and how to evaluate the secret payload as + // a template. If it is not set, no templating is used. + // + // The currently recognized values are: + // - golang: Go templating + Templating *Driver `protobuf:"bytes,3,opt,name=templating" json:"templating,omitempty"` + // Driver is the the secret driver that is used to store the specified secret + Driver *Driver `protobuf:"bytes,4,opt,name=driver" json:"driver,omitempty"` +} + +func (m *SecretSpec) Reset() { *m = SecretSpec{} } +func (*SecretSpec) ProtoMessage() {} +func (*SecretSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{12} } + +// ConfigSpec specifies user-provided configuration files. +type ConfigSpec struct { + Annotations Annotations `protobuf:"bytes,1,opt,name=annotations" json:"annotations"` + // Data is the config payload - the maximum size is 500KB (that is, 500*1024 bytes) + // TODO(aaronl): Do we want to revise this to include multiple payloads in a single + // ConfigSpec? Define this to be a tar? etc... + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + // Templating controls whether and how to evaluate the secret payload as + // a template. If it is not set, no templating is used. + // + // The currently recognized values are: + // - golang: Go templating + Templating *Driver `protobuf:"bytes,3,opt,name=templating" json:"templating,omitempty"` +} + +func (m *ConfigSpec) Reset() { *m = ConfigSpec{} } +func (*ConfigSpec) ProtoMessage() {} +func (*ConfigSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{13} } + +func init() { + proto.RegisterType((*NodeSpec)(nil), "docker.swarmkit.v1.NodeSpec") + proto.RegisterType((*ServiceSpec)(nil), "docker.swarmkit.v1.ServiceSpec") + proto.RegisterType((*ReplicatedService)(nil), "docker.swarmkit.v1.ReplicatedService") + proto.RegisterType((*GlobalService)(nil), "docker.swarmkit.v1.GlobalService") + proto.RegisterType((*TaskSpec)(nil), "docker.swarmkit.v1.TaskSpec") + proto.RegisterType((*ResourceReference)(nil), "docker.swarmkit.v1.ResourceReference") + proto.RegisterType((*GenericRuntimeSpec)(nil), "docker.swarmkit.v1.GenericRuntimeSpec") + proto.RegisterType((*NetworkAttachmentSpec)(nil), "docker.swarmkit.v1.NetworkAttachmentSpec") + proto.RegisterType((*ContainerSpec)(nil), "docker.swarmkit.v1.ContainerSpec") + proto.RegisterType((*ContainerSpec_PullOptions)(nil), "docker.swarmkit.v1.ContainerSpec.PullOptions") + proto.RegisterType((*ContainerSpec_DNSConfig)(nil), "docker.swarmkit.v1.ContainerSpec.DNSConfig") + proto.RegisterType((*EndpointSpec)(nil), "docker.swarmkit.v1.EndpointSpec") + proto.RegisterType((*NetworkSpec)(nil), "docker.swarmkit.v1.NetworkSpec") + proto.RegisterType((*ClusterSpec)(nil), "docker.swarmkit.v1.ClusterSpec") + proto.RegisterType((*SecretSpec)(nil), "docker.swarmkit.v1.SecretSpec") + proto.RegisterType((*ConfigSpec)(nil), "docker.swarmkit.v1.ConfigSpec") + proto.RegisterEnum("docker.swarmkit.v1.NodeSpec_Membership", NodeSpec_Membership_name, NodeSpec_Membership_value) + proto.RegisterEnum("docker.swarmkit.v1.NodeSpec_Availability", NodeSpec_Availability_name, NodeSpec_Availability_value) + proto.RegisterEnum("docker.swarmkit.v1.ContainerSpec_Isolation", ContainerSpec_Isolation_name, ContainerSpec_Isolation_value) + proto.RegisterEnum("docker.swarmkit.v1.EndpointSpec_ResolutionMode", EndpointSpec_ResolutionMode_name, EndpointSpec_ResolutionMode_value) +} + +func (m *NodeSpec) Copy() *NodeSpec { + if m == nil { + return nil + } + o := &NodeSpec{} + o.CopyFrom(m) + return o +} + +func (m *NodeSpec) CopyFrom(src interface{}) { + + o := src.(*NodeSpec) + *m = *o + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Annotations, &o.Annotations) +} + +func (m *ServiceSpec) Copy() *ServiceSpec { + if m == nil { + return nil + } + o := &ServiceSpec{} + o.CopyFrom(m) + return o +} + +func (m *ServiceSpec) CopyFrom(src interface{}) { + + o := src.(*ServiceSpec) + *m = *o + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Annotations, &o.Annotations) + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Task, &o.Task) + if o.Update != nil { + m.Update = &UpdateConfig{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Update, o.Update) + } + if o.Rollback != nil { + m.Rollback = &UpdateConfig{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Rollback, o.Rollback) + } + if o.Networks != nil { + m.Networks = make([]*NetworkAttachmentConfig, len(o.Networks)) + for i := range m.Networks { + m.Networks[i] = &NetworkAttachmentConfig{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Networks[i], o.Networks[i]) + } + } + + if o.Endpoint != nil { + m.Endpoint = &EndpointSpec{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Endpoint, o.Endpoint) + } + if o.Mode != nil { + switch o.Mode.(type) { + case *ServiceSpec_Replicated: + v := ServiceSpec_Replicated{ + Replicated: &ReplicatedService{}, + } + github_com_docker_swarmkit_api_deepcopy.Copy(v.Replicated, o.GetReplicated()) + m.Mode = &v + case *ServiceSpec_Global: + v := ServiceSpec_Global{ + Global: &GlobalService{}, + } + github_com_docker_swarmkit_api_deepcopy.Copy(v.Global, o.GetGlobal()) + m.Mode = &v + } + } + +} + +func (m *ReplicatedService) Copy() *ReplicatedService { + if m == nil { + return nil + } + o := &ReplicatedService{} + o.CopyFrom(m) + return o +} + +func (m *ReplicatedService) CopyFrom(src interface{}) { + + o := src.(*ReplicatedService) + *m = *o +} + +func (m *GlobalService) Copy() *GlobalService { + if m == nil { + return nil + } + o := &GlobalService{} + o.CopyFrom(m) + return o +} + +func (m *GlobalService) CopyFrom(src interface{}) {} +func (m *TaskSpec) Copy() *TaskSpec { + if m == nil { + return nil + } + o := &TaskSpec{} + o.CopyFrom(m) + return o +} + +func (m *TaskSpec) CopyFrom(src interface{}) { + + o := src.(*TaskSpec) + *m = *o + if o.Resources != nil { + m.Resources = &ResourceRequirements{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Resources, o.Resources) + } + if o.Restart != nil { + m.Restart = &RestartPolicy{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Restart, o.Restart) + } + if o.Placement != nil { + m.Placement = &Placement{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Placement, o.Placement) + } + if o.LogDriver != nil { + m.LogDriver = &Driver{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.LogDriver, o.LogDriver) + } + if o.Networks != nil { + m.Networks = make([]*NetworkAttachmentConfig, len(o.Networks)) + for i := range m.Networks { + m.Networks[i] = &NetworkAttachmentConfig{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Networks[i], o.Networks[i]) + } + } + + if o.ResourceReferences != nil { + m.ResourceReferences = make([]ResourceReference, len(o.ResourceReferences)) + for i := range m.ResourceReferences { + github_com_docker_swarmkit_api_deepcopy.Copy(&m.ResourceReferences[i], &o.ResourceReferences[i]) + } + } + + if o.Runtime != nil { + switch o.Runtime.(type) { + case *TaskSpec_Attachment: + v := TaskSpec_Attachment{ + Attachment: &NetworkAttachmentSpec{}, + } + github_com_docker_swarmkit_api_deepcopy.Copy(v.Attachment, o.GetAttachment()) + m.Runtime = &v + case *TaskSpec_Container: + v := TaskSpec_Container{ + Container: &ContainerSpec{}, + } + github_com_docker_swarmkit_api_deepcopy.Copy(v.Container, o.GetContainer()) + m.Runtime = &v + case *TaskSpec_Generic: + v := TaskSpec_Generic{ + Generic: &GenericRuntimeSpec{}, + } + github_com_docker_swarmkit_api_deepcopy.Copy(v.Generic, o.GetGeneric()) + m.Runtime = &v + } + } + +} + +func (m *ResourceReference) Copy() *ResourceReference { + if m == nil { + return nil + } + o := &ResourceReference{} + o.CopyFrom(m) + return o +} + +func (m *ResourceReference) CopyFrom(src interface{}) { + + o := src.(*ResourceReference) + *m = *o +} + +func (m *GenericRuntimeSpec) Copy() *GenericRuntimeSpec { + if m == nil { + return nil + } + o := &GenericRuntimeSpec{} + o.CopyFrom(m) + return o +} + +func (m *GenericRuntimeSpec) CopyFrom(src interface{}) { + + o := src.(*GenericRuntimeSpec) + *m = *o + if o.Payload != nil { + m.Payload = &google_protobuf3.Any{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Payload, o.Payload) + } +} + +func (m *NetworkAttachmentSpec) Copy() *NetworkAttachmentSpec { + if m == nil { + return nil + } + o := &NetworkAttachmentSpec{} + o.CopyFrom(m) + return o +} + +func (m *NetworkAttachmentSpec) CopyFrom(src interface{}) { + + o := src.(*NetworkAttachmentSpec) + *m = *o +} + +func (m *ContainerSpec) Copy() *ContainerSpec { + if m == nil { + return nil + } + o := &ContainerSpec{} + o.CopyFrom(m) + return o +} + +func (m *ContainerSpec) CopyFrom(src interface{}) { + + o := src.(*ContainerSpec) + *m = *o + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.Command != nil { + m.Command = make([]string, len(o.Command)) + copy(m.Command, o.Command) + } + + if o.Args != nil { + m.Args = make([]string, len(o.Args)) + copy(m.Args, o.Args) + } + + if o.Env != nil { + m.Env = make([]string, len(o.Env)) + copy(m.Env, o.Env) + } + + if o.Groups != nil { + m.Groups = make([]string, len(o.Groups)) + copy(m.Groups, o.Groups) + } + + if o.Privileges != nil { + m.Privileges = &Privileges{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Privileges, o.Privileges) + } + if o.Init != nil { + m.Init = &google_protobuf4.BoolValue{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Init, o.Init) + } + if o.Mounts != nil { + m.Mounts = make([]Mount, len(o.Mounts)) + for i := range m.Mounts { + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Mounts[i], &o.Mounts[i]) + } + } + + if o.StopGracePeriod != nil { + m.StopGracePeriod = &google_protobuf1.Duration{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.StopGracePeriod, o.StopGracePeriod) + } + if o.PullOptions != nil { + m.PullOptions = &ContainerSpec_PullOptions{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.PullOptions, o.PullOptions) + } + if o.Secrets != nil { + m.Secrets = make([]*SecretReference, len(o.Secrets)) + for i := range m.Secrets { + m.Secrets[i] = &SecretReference{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Secrets[i], o.Secrets[i]) + } + } + + if o.Configs != nil { + m.Configs = make([]*ConfigReference, len(o.Configs)) + for i := range m.Configs { + m.Configs[i] = &ConfigReference{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Configs[i], o.Configs[i]) + } + } + + if o.Hosts != nil { + m.Hosts = make([]string, len(o.Hosts)) + copy(m.Hosts, o.Hosts) + } + + if o.DNSConfig != nil { + m.DNSConfig = &ContainerSpec_DNSConfig{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.DNSConfig, o.DNSConfig) + } + if o.Healthcheck != nil { + m.Healthcheck = &HealthConfig{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Healthcheck, o.Healthcheck) + } +} + +func (m *ContainerSpec_PullOptions) Copy() *ContainerSpec_PullOptions { + if m == nil { + return nil + } + o := &ContainerSpec_PullOptions{} + o.CopyFrom(m) + return o +} + +func (m *ContainerSpec_PullOptions) CopyFrom(src interface{}) { + + o := src.(*ContainerSpec_PullOptions) + *m = *o +} + +func (m *ContainerSpec_DNSConfig) Copy() *ContainerSpec_DNSConfig { + if m == nil { + return nil + } + o := &ContainerSpec_DNSConfig{} + o.CopyFrom(m) + return o +} + +func (m *ContainerSpec_DNSConfig) CopyFrom(src interface{}) { + + o := src.(*ContainerSpec_DNSConfig) + *m = *o + if o.Nameservers != nil { + m.Nameservers = make([]string, len(o.Nameservers)) + copy(m.Nameservers, o.Nameservers) + } + + if o.Search != nil { + m.Search = make([]string, len(o.Search)) + copy(m.Search, o.Search) + } + + if o.Options != nil { + m.Options = make([]string, len(o.Options)) + copy(m.Options, o.Options) + } + +} + +func (m *EndpointSpec) Copy() *EndpointSpec { + if m == nil { + return nil + } + o := &EndpointSpec{} + o.CopyFrom(m) + return o +} + +func (m *EndpointSpec) CopyFrom(src interface{}) { + + o := src.(*EndpointSpec) + *m = *o + if o.Ports != nil { + m.Ports = make([]*PortConfig, len(o.Ports)) + for i := range m.Ports { + m.Ports[i] = &PortConfig{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Ports[i], o.Ports[i]) + } + } + +} + +func (m *NetworkSpec) Copy() *NetworkSpec { + if m == nil { + return nil + } + o := &NetworkSpec{} + o.CopyFrom(m) + return o +} + +func (m *NetworkSpec) CopyFrom(src interface{}) { + + o := src.(*NetworkSpec) + *m = *o + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Annotations, &o.Annotations) + if o.DriverConfig != nil { + m.DriverConfig = &Driver{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.DriverConfig, o.DriverConfig) + } + if o.IPAM != nil { + m.IPAM = &IPAMOptions{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.IPAM, o.IPAM) + } + if o.ConfigFrom != nil { + switch o.ConfigFrom.(type) { + case *NetworkSpec_Network: + v := NetworkSpec_Network{ + Network: o.GetNetwork(), + } + m.ConfigFrom = &v + } + } + +} + +func (m *ClusterSpec) Copy() *ClusterSpec { + if m == nil { + return nil + } + o := &ClusterSpec{} + o.CopyFrom(m) + return o +} + +func (m *ClusterSpec) CopyFrom(src interface{}) { + + o := src.(*ClusterSpec) + *m = *o + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Annotations, &o.Annotations) + github_com_docker_swarmkit_api_deepcopy.Copy(&m.AcceptancePolicy, &o.AcceptancePolicy) + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Orchestration, &o.Orchestration) + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Raft, &o.Raft) + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Dispatcher, &o.Dispatcher) + github_com_docker_swarmkit_api_deepcopy.Copy(&m.CAConfig, &o.CAConfig) + github_com_docker_swarmkit_api_deepcopy.Copy(&m.TaskDefaults, &o.TaskDefaults) + github_com_docker_swarmkit_api_deepcopy.Copy(&m.EncryptionConfig, &o.EncryptionConfig) +} + +func (m *SecretSpec) Copy() *SecretSpec { + if m == nil { + return nil + } + o := &SecretSpec{} + o.CopyFrom(m) + return o +} + +func (m *SecretSpec) CopyFrom(src interface{}) { + + o := src.(*SecretSpec) + *m = *o + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Annotations, &o.Annotations) + if o.Data != nil { + m.Data = make([]byte, len(o.Data)) + copy(m.Data, o.Data) + } + if o.Templating != nil { + m.Templating = &Driver{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Templating, o.Templating) + } + if o.Driver != nil { + m.Driver = &Driver{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Driver, o.Driver) + } +} + +func (m *ConfigSpec) Copy() *ConfigSpec { + if m == nil { + return nil + } + o := &ConfigSpec{} + o.CopyFrom(m) + return o +} + +func (m *ConfigSpec) CopyFrom(src interface{}) { + + o := src.(*ConfigSpec) + *m = *o + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Annotations, &o.Annotations) + if o.Data != nil { + m.Data = make([]byte, len(o.Data)) + copy(m.Data, o.Data) + } + if o.Templating != nil { + m.Templating = &Driver{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Templating, o.Templating) + } +} + +func (m *NodeSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Annotations.Size())) + n1, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + if m.DesiredRole != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.DesiredRole)) + } + if m.Membership != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Membership)) + } + if m.Availability != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Availability)) + } + return i, nil +} + +func (m *ServiceSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Annotations.Size())) + n2, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Task.Size())) + n3, err := m.Task.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + if m.Mode != nil { + nn4, err := m.Mode.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn4 + } + if m.Update != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Update.Size())) + n5, err := m.Update.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + if len(m.Networks) > 0 { + for _, msg := range m.Networks { + dAtA[i] = 0x3a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Endpoint != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Endpoint.Size())) + n6, err := m.Endpoint.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if m.Rollback != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Rollback.Size())) + n7, err := m.Rollback.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} + +func (m *ServiceSpec_Replicated) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Replicated != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Replicated.Size())) + n8, err := m.Replicated.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} +func (m *ServiceSpec_Global) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Global != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Global.Size())) + n9, err := m.Global.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} +func (m *ReplicatedService) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicatedService) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Replicas != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Replicas)) + } + return i, nil +} + +func (m *GlobalService) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GlobalService) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *TaskSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Runtime != nil { + nn10, err := m.Runtime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn10 + } + if m.Resources != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Resources.Size())) + n11, err := m.Resources.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + if m.Restart != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Restart.Size())) + n12, err := m.Restart.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + if m.Placement != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Placement.Size())) + n13, err := m.Placement.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + if m.LogDriver != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.LogDriver.Size())) + n14, err := m.LogDriver.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + if len(m.Networks) > 0 { + for _, msg := range m.Networks { + dAtA[i] = 0x3a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.ForceUpdate != 0 { + dAtA[i] = 0x48 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.ForceUpdate)) + } + if len(m.ResourceReferences) > 0 { + for _, msg := range m.ResourceReferences { + dAtA[i] = 0x5a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *TaskSpec_Container) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Container != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Container.Size())) + n15, err := m.Container.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + return i, nil +} +func (m *TaskSpec_Attachment) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Attachment != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Attachment.Size())) + n16, err := m.Attachment.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + return i, nil +} +func (m *TaskSpec_Generic) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Generic != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Generic.Size())) + n17, err := m.Generic.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + return i, nil +} +func (m *ResourceReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceReference) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ResourceID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.ResourceID))) + i += copy(dAtA[i:], m.ResourceID) + } + if m.ResourceType != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.ResourceType)) + } + return i, nil +} + +func (m *GenericRuntimeSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenericRuntimeSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Kind) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + } + if m.Payload != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Payload.Size())) + n18, err := m.Payload.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + return i, nil +} + +func (m *NetworkAttachmentSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkAttachmentSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ContainerID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + return i, nil +} + +func (m *ContainerSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Image) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.Image))) + i += copy(dAtA[i:], m.Image) + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x12 + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovSpecs(uint64(len(k))) + 1 + len(v) + sovSpecs(uint64(len(v))) + i = encodeVarintSpecs(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.Command) > 0 { + for _, s := range m.Command { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Args) > 0 { + for _, s := range m.Args { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Env) > 0 { + for _, s := range m.Env { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Dir) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.Dir))) + i += copy(dAtA[i:], m.Dir) + } + if len(m.User) > 0 { + dAtA[i] = 0x3a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) + } + if len(m.Mounts) > 0 { + for _, msg := range m.Mounts { + dAtA[i] = 0x42 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.StopGracePeriod != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.StopGracePeriod.Size())) + n19, err := m.StopGracePeriod.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + } + if m.PullOptions != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.PullOptions.Size())) + n20, err := m.PullOptions.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + } + if len(m.Groups) > 0 { + for _, s := range m.Groups { + dAtA[i] = 0x5a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Secrets) > 0 { + for _, msg := range m.Secrets { + dAtA[i] = 0x62 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.TTY { + dAtA[i] = 0x68 + i++ + if m.TTY { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Hostname) > 0 { + dAtA[i] = 0x72 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.Hostname))) + i += copy(dAtA[i:], m.Hostname) + } + if m.DNSConfig != nil { + dAtA[i] = 0x7a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.DNSConfig.Size())) + n21, err := m.DNSConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 + } + if m.Healthcheck != nil { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Healthcheck.Size())) + n22, err := m.Healthcheck.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + } + if len(m.Hosts) > 0 { + for _, s := range m.Hosts { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x1 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.OpenStdin { + dAtA[i] = 0x90 + i++ + dAtA[i] = 0x1 + i++ + if m.OpenStdin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.ReadOnly { + dAtA[i] = 0x98 + i++ + dAtA[i] = 0x1 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.StopSignal) > 0 { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.StopSignal))) + i += copy(dAtA[i:], m.StopSignal) + } + if len(m.Configs) > 0 { + for _, msg := range m.Configs { + dAtA[i] = 0xaa + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Privileges != nil { + dAtA[i] = 0xb2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Privileges.Size())) + n23, err := m.Privileges.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + } + if m.Init != nil { + dAtA[i] = 0xba + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Init.Size())) + n24, err := m.Init.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + } + if m.Isolation != 0 { + dAtA[i] = 0xc0 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Isolation)) + } + if m.PidsLimit != 0 { + dAtA[i] = 0xc8 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.PidsLimit)) + } + return i, nil +} + +func (m *ContainerSpec_PullOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerSpec_PullOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.RegistryAuth) > 0 { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x4 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.RegistryAuth))) + i += copy(dAtA[i:], m.RegistryAuth) + } + return i, nil +} + +func (m *ContainerSpec_DNSConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerSpec_DNSConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Nameservers) > 0 { + for _, s := range m.Nameservers { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Search) > 0 { + for _, s := range m.Search { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Options) > 0 { + for _, s := range m.Options { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *EndpointSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Mode != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Mode)) + } + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NetworkSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Annotations.Size())) + n25, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + if m.DriverConfig != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.DriverConfig.Size())) + n26, err := m.DriverConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + } + if m.Ipv6Enabled { + dAtA[i] = 0x18 + i++ + if m.Ipv6Enabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Internal { + dAtA[i] = 0x20 + i++ + if m.Internal { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.IPAM != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.IPAM.Size())) + n27, err := m.IPAM.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + } + if m.Attachable { + dAtA[i] = 0x30 + i++ + if m.Attachable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Ingress { + dAtA[i] = 0x38 + i++ + if m.Ingress { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.ConfigFrom != nil { + nn28, err := m.ConfigFrom.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn28 + } + return i, nil +} + +func (m *NetworkSpec_Network) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x42 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.Network))) + i += copy(dAtA[i:], m.Network) + return i, nil +} +func (m *ClusterSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Annotations.Size())) + n29, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.AcceptancePolicy.Size())) + n30, err := m.AcceptancePolicy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + dAtA[i] = 0x1a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Orchestration.Size())) + n31, err := m.Orchestration.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 + dAtA[i] = 0x22 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Raft.Size())) + n32, err := m.Raft.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + dAtA[i] = 0x2a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Dispatcher.Size())) + n33, err := m.Dispatcher.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 + dAtA[i] = 0x32 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.CAConfig.Size())) + n34, err := m.CAConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 + dAtA[i] = 0x3a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.TaskDefaults.Size())) + n35, err := m.TaskDefaults.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + dAtA[i] = 0x42 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.EncryptionConfig.Size())) + n36, err := m.EncryptionConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 + return i, nil +} + +func (m *SecretSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Annotations.Size())) + n37, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n37 + if len(m.Data) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if m.Templating != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Templating.Size())) + n38, err := m.Templating.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 + } + if m.Driver != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Driver.Size())) + n39, err := m.Driver.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + } + return i, nil +} + +func (m *ConfigSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Annotations.Size())) + n40, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 + if len(m.Data) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if m.Templating != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Templating.Size())) + n41, err := m.Templating.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n41 + } + return i, nil +} + +func encodeFixed64Specs(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Specs(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintSpecs(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +func (m *NodeSpec) Size() (n int) { + var l int + _ = l + l = m.Annotations.Size() + n += 1 + l + sovSpecs(uint64(l)) + if m.DesiredRole != 0 { + n += 1 + sovSpecs(uint64(m.DesiredRole)) + } + if m.Membership != 0 { + n += 1 + sovSpecs(uint64(m.Membership)) + } + if m.Availability != 0 { + n += 1 + sovSpecs(uint64(m.Availability)) + } + return n +} + +func (m *ServiceSpec) Size() (n int) { + var l int + _ = l + l = m.Annotations.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = m.Task.Size() + n += 1 + l + sovSpecs(uint64(l)) + if m.Mode != nil { + n += m.Mode.Size() + } + if m.Update != nil { + l = m.Update.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if len(m.Networks) > 0 { + for _, e := range m.Networks { + l = e.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + } + if m.Endpoint != nil { + l = m.Endpoint.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Rollback != nil { + l = m.Rollback.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} + +func (m *ServiceSpec_Replicated) Size() (n int) { + var l int + _ = l + if m.Replicated != nil { + l = m.Replicated.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} +func (m *ServiceSpec_Global) Size() (n int) { + var l int + _ = l + if m.Global != nil { + l = m.Global.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} +func (m *ReplicatedService) Size() (n int) { + var l int + _ = l + if m.Replicas != 0 { + n += 1 + sovSpecs(uint64(m.Replicas)) + } + return n +} + +func (m *GlobalService) Size() (n int) { + var l int + _ = l + return n +} + +func (m *TaskSpec) Size() (n int) { + var l int + _ = l + if m.Runtime != nil { + n += m.Runtime.Size() + } + if m.Resources != nil { + l = m.Resources.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Restart != nil { + l = m.Restart.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Placement != nil { + l = m.Placement.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if m.LogDriver != nil { + l = m.LogDriver.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if len(m.Networks) > 0 { + for _, e := range m.Networks { + l = e.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + } + if m.ForceUpdate != 0 { + n += 1 + sovSpecs(uint64(m.ForceUpdate)) + } + if len(m.ResourceReferences) > 0 { + for _, e := range m.ResourceReferences { + l = e.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + } + return n +} + +func (m *TaskSpec_Container) Size() (n int) { + var l int + _ = l + if m.Container != nil { + l = m.Container.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} +func (m *TaskSpec_Attachment) Size() (n int) { + var l int + _ = l + if m.Attachment != nil { + l = m.Attachment.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} +func (m *TaskSpec_Generic) Size() (n int) { + var l int + _ = l + if m.Generic != nil { + l = m.Generic.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} +func (m *ResourceReference) Size() (n int) { + var l int + _ = l + l = len(m.ResourceID) + if l > 0 { + n += 1 + l + sovSpecs(uint64(l)) + } + if m.ResourceType != 0 { + n += 1 + sovSpecs(uint64(m.ResourceType)) + } + return n +} + +func (m *GenericRuntimeSpec) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + if l > 0 { + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Payload != nil { + l = m.Payload.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} + +func (m *NetworkAttachmentSpec) Size() (n int) { + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} + +func (m *ContainerSpec) Size() (n int) { + var l int + _ = l + l = len(m.Image) + if l > 0 { + n += 1 + l + sovSpecs(uint64(l)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovSpecs(uint64(len(k))) + 1 + len(v) + sovSpecs(uint64(len(v))) + n += mapEntrySize + 1 + sovSpecs(uint64(mapEntrySize)) + } + } + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovSpecs(uint64(l)) + } + } + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovSpecs(uint64(l)) + } + } + if len(m.Env) > 0 { + for _, s := range m.Env { + l = len(s) + n += 1 + l + sovSpecs(uint64(l)) + } + } + l = len(m.Dir) + if l > 0 { + n += 1 + l + sovSpecs(uint64(l)) + } + l = len(m.User) + if l > 0 { + n += 1 + l + sovSpecs(uint64(l)) + } + if len(m.Mounts) > 0 { + for _, e := range m.Mounts { + l = e.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + } + if m.StopGracePeriod != nil { + l = m.StopGracePeriod.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if m.PullOptions != nil { + l = m.PullOptions.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovSpecs(uint64(l)) + } + } + if len(m.Secrets) > 0 { + for _, e := range m.Secrets { + l = e.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + } + if m.TTY { + n += 2 + } + l = len(m.Hostname) + if l > 0 { + n += 1 + l + sovSpecs(uint64(l)) + } + if m.DNSConfig != nil { + l = m.DNSConfig.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Healthcheck != nil { + l = m.Healthcheck.Size() + n += 2 + l + sovSpecs(uint64(l)) + } + if len(m.Hosts) > 0 { + for _, s := range m.Hosts { + l = len(s) + n += 2 + l + sovSpecs(uint64(l)) + } + } + if m.OpenStdin { + n += 3 + } + if m.ReadOnly { + n += 3 + } + l = len(m.StopSignal) + if l > 0 { + n += 2 + l + sovSpecs(uint64(l)) + } + if len(m.Configs) > 0 { + for _, e := range m.Configs { + l = e.Size() + n += 2 + l + sovSpecs(uint64(l)) + } + } + if m.Privileges != nil { + l = m.Privileges.Size() + n += 2 + l + sovSpecs(uint64(l)) + } + if m.Init != nil { + l = m.Init.Size() + n += 2 + l + sovSpecs(uint64(l)) + } + if m.Isolation != 0 { + n += 2 + sovSpecs(uint64(m.Isolation)) + } + if m.PidsLimit != 0 { + n += 2 + sovSpecs(uint64(m.PidsLimit)) + } + return n +} + +func (m *ContainerSpec_PullOptions) Size() (n int) { + var l int + _ = l + l = len(m.RegistryAuth) + if l > 0 { + n += 2 + l + sovSpecs(uint64(l)) + } + return n +} + +func (m *ContainerSpec_DNSConfig) Size() (n int) { + var l int + _ = l + if len(m.Nameservers) > 0 { + for _, s := range m.Nameservers { + l = len(s) + n += 1 + l + sovSpecs(uint64(l)) + } + } + if len(m.Search) > 0 { + for _, s := range m.Search { + l = len(s) + n += 1 + l + sovSpecs(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, s := range m.Options { + l = len(s) + n += 1 + l + sovSpecs(uint64(l)) + } + } + return n +} + +func (m *EndpointSpec) Size() (n int) { + var l int + _ = l + if m.Mode != 0 { + n += 1 + sovSpecs(uint64(m.Mode)) + } + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + } + return n +} + +func (m *NetworkSpec) Size() (n int) { + var l int + _ = l + l = m.Annotations.Size() + n += 1 + l + sovSpecs(uint64(l)) + if m.DriverConfig != nil { + l = m.DriverConfig.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Ipv6Enabled { + n += 2 + } + if m.Internal { + n += 2 + } + if m.IPAM != nil { + l = m.IPAM.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Attachable { + n += 2 + } + if m.Ingress { + n += 2 + } + if m.ConfigFrom != nil { + n += m.ConfigFrom.Size() + } + return n +} + +func (m *NetworkSpec_Network) Size() (n int) { + var l int + _ = l + l = len(m.Network) + n += 1 + l + sovSpecs(uint64(l)) + return n +} +func (m *ClusterSpec) Size() (n int) { + var l int + _ = l + l = m.Annotations.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = m.AcceptancePolicy.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = m.Orchestration.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = m.Raft.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = m.Dispatcher.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = m.CAConfig.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = m.TaskDefaults.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = m.EncryptionConfig.Size() + n += 1 + l + sovSpecs(uint64(l)) + return n +} + +func (m *SecretSpec) Size() (n int) { + var l int + _ = l + l = m.Annotations.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = len(m.Data) + if l > 0 { + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Templating != nil { + l = m.Templating.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Driver != nil { + l = m.Driver.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} + +func (m *ConfigSpec) Size() (n int) { + var l int + _ = l + l = m.Annotations.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = len(m.Data) + if l > 0 { + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Templating != nil { + l = m.Templating.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} + +func sovSpecs(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozSpecs(x uint64) (n int) { + return sovSpecs(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *NodeSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeSpec{`, + `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `DesiredRole:` + fmt.Sprintf("%v", this.DesiredRole) + `,`, + `Membership:` + fmt.Sprintf("%v", this.Membership) + `,`, + `Availability:` + fmt.Sprintf("%v", this.Availability) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceSpec{`, + `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `Task:` + strings.Replace(strings.Replace(this.Task.String(), "TaskSpec", "TaskSpec", 1), `&`, ``, 1) + `,`, + `Mode:` + fmt.Sprintf("%v", this.Mode) + `,`, + `Update:` + strings.Replace(fmt.Sprintf("%v", this.Update), "UpdateConfig", "UpdateConfig", 1) + `,`, + `Networks:` + strings.Replace(fmt.Sprintf("%v", this.Networks), "NetworkAttachmentConfig", "NetworkAttachmentConfig", 1) + `,`, + `Endpoint:` + strings.Replace(fmt.Sprintf("%v", this.Endpoint), "EndpointSpec", "EndpointSpec", 1) + `,`, + `Rollback:` + strings.Replace(fmt.Sprintf("%v", this.Rollback), "UpdateConfig", "UpdateConfig", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceSpec_Replicated) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceSpec_Replicated{`, + `Replicated:` + strings.Replace(fmt.Sprintf("%v", this.Replicated), "ReplicatedService", "ReplicatedService", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceSpec_Global) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceSpec_Global{`, + `Global:` + strings.Replace(fmt.Sprintf("%v", this.Global), "GlobalService", "GlobalService", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicatedService) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicatedService{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `}`, + }, "") + return s +} +func (this *GlobalService) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GlobalService{`, + `}`, + }, "") + return s +} +func (this *TaskSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskSpec{`, + `Runtime:` + fmt.Sprintf("%v", this.Runtime) + `,`, + `Resources:` + strings.Replace(fmt.Sprintf("%v", this.Resources), "ResourceRequirements", "ResourceRequirements", 1) + `,`, + `Restart:` + strings.Replace(fmt.Sprintf("%v", this.Restart), "RestartPolicy", "RestartPolicy", 1) + `,`, + `Placement:` + strings.Replace(fmt.Sprintf("%v", this.Placement), "Placement", "Placement", 1) + `,`, + `LogDriver:` + strings.Replace(fmt.Sprintf("%v", this.LogDriver), "Driver", "Driver", 1) + `,`, + `Networks:` + strings.Replace(fmt.Sprintf("%v", this.Networks), "NetworkAttachmentConfig", "NetworkAttachmentConfig", 1) + `,`, + `ForceUpdate:` + fmt.Sprintf("%v", this.ForceUpdate) + `,`, + `ResourceReferences:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ResourceReferences), "ResourceReference", "ResourceReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *TaskSpec_Container) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskSpec_Container{`, + `Container:` + strings.Replace(fmt.Sprintf("%v", this.Container), "ContainerSpec", "ContainerSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TaskSpec_Attachment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskSpec_Attachment{`, + `Attachment:` + strings.Replace(fmt.Sprintf("%v", this.Attachment), "NetworkAttachmentSpec", "NetworkAttachmentSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TaskSpec_Generic) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskSpec_Generic{`, + `Generic:` + strings.Replace(fmt.Sprintf("%v", this.Generic), "GenericRuntimeSpec", "GenericRuntimeSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceReference{`, + `ResourceID:` + fmt.Sprintf("%v", this.ResourceID) + `,`, + `ResourceType:` + fmt.Sprintf("%v", this.ResourceType) + `,`, + `}`, + }, "") + return s +} +func (this *GenericRuntimeSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GenericRuntimeSpec{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Payload:` + strings.Replace(fmt.Sprintf("%v", this.Payload), "Any", "google_protobuf3.Any", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkAttachmentSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkAttachmentSpec{`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerSpec) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&ContainerSpec{`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `Args:` + fmt.Sprintf("%v", this.Args) + `,`, + `Env:` + fmt.Sprintf("%v", this.Env) + `,`, + `Dir:` + fmt.Sprintf("%v", this.Dir) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Mounts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "Mount", 1), `&`, ``, 1) + `,`, + `StopGracePeriod:` + strings.Replace(fmt.Sprintf("%v", this.StopGracePeriod), "Duration", "google_protobuf1.Duration", 1) + `,`, + `PullOptions:` + strings.Replace(fmt.Sprintf("%v", this.PullOptions), "ContainerSpec_PullOptions", "ContainerSpec_PullOptions", 1) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Secrets:` + strings.Replace(fmt.Sprintf("%v", this.Secrets), "SecretReference", "SecretReference", 1) + `,`, + `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `DNSConfig:` + strings.Replace(fmt.Sprintf("%v", this.DNSConfig), "ContainerSpec_DNSConfig", "ContainerSpec_DNSConfig", 1) + `,`, + `Healthcheck:` + strings.Replace(fmt.Sprintf("%v", this.Healthcheck), "HealthConfig", "HealthConfig", 1) + `,`, + `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, + `OpenStdin:` + fmt.Sprintf("%v", this.OpenStdin) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `StopSignal:` + fmt.Sprintf("%v", this.StopSignal) + `,`, + `Configs:` + strings.Replace(fmt.Sprintf("%v", this.Configs), "ConfigReference", "ConfigReference", 1) + `,`, + `Privileges:` + strings.Replace(fmt.Sprintf("%v", this.Privileges), "Privileges", "Privileges", 1) + `,`, + `Init:` + strings.Replace(fmt.Sprintf("%v", this.Init), "BoolValue", "google_protobuf4.BoolValue", 1) + `,`, + `Isolation:` + fmt.Sprintf("%v", this.Isolation) + `,`, + `PidsLimit:` + fmt.Sprintf("%v", this.PidsLimit) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerSpec_PullOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerSpec_PullOptions{`, + `RegistryAuth:` + fmt.Sprintf("%v", this.RegistryAuth) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerSpec_DNSConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerSpec_DNSConfig{`, + `Nameservers:` + fmt.Sprintf("%v", this.Nameservers) + `,`, + `Search:` + fmt.Sprintf("%v", this.Search) + `,`, + `Options:` + fmt.Sprintf("%v", this.Options) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointSpec{`, + `Mode:` + fmt.Sprintf("%v", this.Mode) + `,`, + `Ports:` + strings.Replace(fmt.Sprintf("%v", this.Ports), "PortConfig", "PortConfig", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkSpec{`, + `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `DriverConfig:` + strings.Replace(fmt.Sprintf("%v", this.DriverConfig), "Driver", "Driver", 1) + `,`, + `Ipv6Enabled:` + fmt.Sprintf("%v", this.Ipv6Enabled) + `,`, + `Internal:` + fmt.Sprintf("%v", this.Internal) + `,`, + `IPAM:` + strings.Replace(fmt.Sprintf("%v", this.IPAM), "IPAMOptions", "IPAMOptions", 1) + `,`, + `Attachable:` + fmt.Sprintf("%v", this.Attachable) + `,`, + `Ingress:` + fmt.Sprintf("%v", this.Ingress) + `,`, + `ConfigFrom:` + fmt.Sprintf("%v", this.ConfigFrom) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkSpec_Network) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkSpec_Network{`, + `Network:` + fmt.Sprintf("%v", this.Network) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterSpec{`, + `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `AcceptancePolicy:` + strings.Replace(strings.Replace(this.AcceptancePolicy.String(), "AcceptancePolicy", "AcceptancePolicy", 1), `&`, ``, 1) + `,`, + `Orchestration:` + strings.Replace(strings.Replace(this.Orchestration.String(), "OrchestrationConfig", "OrchestrationConfig", 1), `&`, ``, 1) + `,`, + `Raft:` + strings.Replace(strings.Replace(this.Raft.String(), "RaftConfig", "RaftConfig", 1), `&`, ``, 1) + `,`, + `Dispatcher:` + strings.Replace(strings.Replace(this.Dispatcher.String(), "DispatcherConfig", "DispatcherConfig", 1), `&`, ``, 1) + `,`, + `CAConfig:` + strings.Replace(strings.Replace(this.CAConfig.String(), "CAConfig", "CAConfig", 1), `&`, ``, 1) + `,`, + `TaskDefaults:` + strings.Replace(strings.Replace(this.TaskDefaults.String(), "TaskDefaults", "TaskDefaults", 1), `&`, ``, 1) + `,`, + `EncryptionConfig:` + strings.Replace(strings.Replace(this.EncryptionConfig.String(), "EncryptionConfig", "EncryptionConfig", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SecretSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretSpec{`, + `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `Templating:` + strings.Replace(fmt.Sprintf("%v", this.Templating), "Driver", "Driver", 1) + `,`, + `Driver:` + strings.Replace(fmt.Sprintf("%v", this.Driver), "Driver", "Driver", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigSpec{`, + `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `Templating:` + strings.Replace(fmt.Sprintf("%v", this.Templating), "Driver", "Driver", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringSpecs(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *NodeSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredRole", wireType) + } + m.DesiredRole = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DesiredRole |= (NodeRole(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Membership", wireType) + } + m.Membership = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Membership |= (NodeSpec_Membership(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Availability", wireType) + } + m.Availability = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Availability |= (NodeSpec_Availability(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Task.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicated", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ReplicatedService{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Mode = &ServiceSpec_Replicated{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Global", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &GlobalService{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Mode = &ServiceSpec_Global{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Update", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Update == nil { + m.Update = &UpdateConfig{} + } + if err := m.Update.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Networks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Networks = append(m.Networks, &NetworkAttachmentConfig{}) + if err := m.Networks[len(m.Networks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Endpoint", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Endpoint == nil { + m.Endpoint = &EndpointSpec{} + } + if err := m.Endpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rollback", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Rollback == nil { + m.Rollback = &UpdateConfig{} + } + if err := m.Rollback.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicatedService) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicatedService: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicatedService: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GlobalService) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GlobalService: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GlobalService: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ContainerSpec{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Runtime = &TaskSpec_Container{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resources == nil { + m.Resources = &ResourceRequirements{} + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Restart", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Restart == nil { + m.Restart = &RestartPolicy{} + } + if err := m.Restart.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Placement", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Placement == nil { + m.Placement = &Placement{} + } + if err := m.Placement.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogDriver", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LogDriver == nil { + m.LogDriver = &Driver{} + } + if err := m.LogDriver.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Networks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Networks = append(m.Networks, &NetworkAttachmentConfig{}) + if err := m.Networks[len(m.Networks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attachment", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &NetworkAttachmentSpec{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Runtime = &TaskSpec_Attachment{v} + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ForceUpdate", wireType) + } + m.ForceUpdate = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ForceUpdate |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Generic", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &GenericRuntimeSpec{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Runtime = &TaskSpec_Generic{v} + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceReferences", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceReferences = append(m.ResourceReferences, ResourceReference{}) + if err := m.ResourceReferences[len(m.ResourceReferences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceType", wireType) + } + m.ResourceType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ResourceType |= (ResourceType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GenericRuntimeSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenericRuntimeSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenericRuntimeSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Payload == nil { + m.Payload = &google_protobuf3.Any{} + } + if err := m.Payload.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkAttachmentSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkAttachmentSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkAttachmentSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthSpecs + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Labels == nil { + m.Labels = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthSpecs + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Labels[mapkey] = mapvalue + } else { + var mapvalue string + m.Labels[mapkey] = mapvalue + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Dir = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mounts = append(m.Mounts, Mount{}) + if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StopGracePeriod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StopGracePeriod == nil { + m.StopGracePeriod = &google_protobuf1.Duration{} + } + if err := m.StopGracePeriod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PullOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PullOptions == nil { + m.PullOptions = &ContainerSpec_PullOptions{} + } + if err := m.PullOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secrets = append(m.Secrets, &SecretReference{}) + if err := m.Secrets[len(m.Secrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TTY = bool(v != 0) + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DNSConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DNSConfig == nil { + m.DNSConfig = &ContainerSpec_DNSConfig{} + } + if err := m.DNSConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Healthcheck", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Healthcheck == nil { + m.Healthcheck = &HealthConfig{} + } + if err := m.Healthcheck.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OpenStdin", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.OpenStdin = bool(v != 0) + case 19: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StopSignal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StopSignal = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Configs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Configs = append(m.Configs, &ConfigReference{}) + if err := m.Configs[len(m.Configs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Privileges == nil { + m.Privileges = &Privileges{} + } + if err := m.Privileges.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Init", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Init == nil { + m.Init = &google_protobuf4.BoolValue{} + } + if err := m.Init.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 24: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Isolation", wireType) + } + m.Isolation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Isolation |= (ContainerSpec_Isolation(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 25: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PidsLimit", wireType) + } + m.PidsLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PidsLimit |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerSpec_PullOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PullOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PullOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 64: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RegistryAuth", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RegistryAuth = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerSpec_DNSConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DNSConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DNSConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nameservers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Nameservers = append(m.Nameservers, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Search", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Search = append(m.Search, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + m.Mode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Mode |= (EndpointSpec_ResolutionMode(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, &PortConfig{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DriverConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DriverConfig == nil { + m.DriverConfig = &Driver{} + } + if err := m.DriverConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ipv6Enabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Ipv6Enabled = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Internal", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Internal = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPAM", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.IPAM == nil { + m.IPAM = &IPAMOptions{} + } + if err := m.IPAM.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Attachable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Attachable = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Ingress = bool(v != 0) + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConfigFrom = &NetworkSpec_Network{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AcceptancePolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.AcceptancePolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Orchestration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Orchestration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Raft", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Raft.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dispatcher", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Dispatcher.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CAConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CAConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskDefaults", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TaskDefaults.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EncryptionConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.EncryptionConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Templating", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Templating == nil { + m.Templating = &Driver{} + } + if err := m.Templating.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Driver == nil { + m.Driver = &Driver{} + } + if err := m.Driver.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Templating", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Templating == nil { + m.Templating = &Driver{} + } + if err := m.Templating.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipSpecs(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSpecs + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSpecs + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSpecs + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthSpecs + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSpecs + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipSpecs(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthSpecs = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSpecs = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("github.com/docker/swarmkit/api/specs.proto", fileDescriptorSpecs) } + +var fileDescriptorSpecs = []byte{ + // 2131 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0x4f, 0x6f, 0x1b, 0xc7, + 0x15, 0x17, 0x25, 0x8a, 0x22, 0xdf, 0x52, 0x36, 0x35, 0x71, 0x9c, 0x15, 0x6d, 0x4b, 0x34, 0xe3, + 0xb8, 0x4a, 0x82, 0x52, 0xa8, 0x1a, 0xa4, 0x4e, 0xdc, 0xb4, 0x25, 0x45, 0x46, 0x66, 0x6d, 0x4b, + 0xc4, 0x50, 0x56, 0x6b, 0xa0, 0x00, 0x31, 0xda, 0x1d, 0x91, 0x03, 0x2d, 0x77, 0xb6, 0xb3, 0x43, + 0x19, 0xbc, 0xf5, 0x18, 0xa8, 0x9f, 0x41, 0xe8, 0xa1, 0xe8, 0xbd, 0xfd, 0x16, 0x3e, 0xf6, 0xd8, + 0x5e, 0x84, 0x44, 0x5f, 0xa1, 0xb7, 0x5e, 0x5a, 0xcc, 0xec, 0xec, 0x92, 0x94, 0x57, 0x96, 0x81, + 0xfa, 0xd0, 0xdb, 0xcc, 0xdb, 0xdf, 0xef, 0xcd, 0xbf, 0xdf, 0xbc, 0xf7, 0x66, 0xe1, 0xb3, 0x3e, + 0x93, 0x83, 0xd1, 0x61, 0xcd, 0xe1, 0xc3, 0x4d, 0x97, 0x3b, 0xc7, 0x54, 0x6c, 0x86, 0xaf, 0x88, + 0x18, 0x1e, 0x33, 0xb9, 0x49, 0x02, 0xb6, 0x19, 0x06, 0xd4, 0x09, 0x6b, 0x81, 0xe0, 0x92, 0x23, + 0x14, 0x01, 0x6a, 0x31, 0xa0, 0x76, 0xf2, 0x93, 0xf2, 0x75, 0x7c, 0x39, 0x0e, 0xa8, 0xe1, 0x97, + 0x6f, 0xf5, 0x79, 0x9f, 0xeb, 0xe6, 0xa6, 0x6a, 0x19, 0xeb, 0x5a, 0x9f, 0xf3, 0xbe, 0x47, 0x37, + 0x75, 0xef, 0x70, 0x74, 0xb4, 0xe9, 0x8e, 0x04, 0x91, 0x8c, 0xfb, 0xe6, 0xfb, 0xea, 0xe5, 0xef, + 0xc4, 0x1f, 0x5f, 0x45, 0x7d, 0x25, 0x48, 0x10, 0x50, 0x61, 0x06, 0xac, 0x9e, 0x65, 0x21, 0xbf, + 0xcb, 0x5d, 0xda, 0x0d, 0xa8, 0x83, 0x76, 0xc0, 0x22, 0xbe, 0xcf, 0xa5, 0xf6, 0x1d, 0xda, 0x99, + 0x4a, 0x66, 0xc3, 0xda, 0x5a, 0xaf, 0xbd, 0xb9, 0xa6, 0x5a, 0x7d, 0x02, 0x6b, 0x64, 0x5f, 0x9f, + 0xaf, 0xcf, 0xe1, 0x69, 0x26, 0xfa, 0x25, 0x14, 0x5d, 0x1a, 0x32, 0x41, 0xdd, 0x9e, 0xe0, 0x1e, + 0xb5, 0xe7, 0x2b, 0x99, 0x8d, 0x1b, 0x5b, 0x77, 0xd3, 0x3c, 0xa9, 0xc1, 0x31, 0xf7, 0x28, 0xb6, + 0x0c, 0x43, 0x75, 0xd0, 0x0e, 0xc0, 0x90, 0x0e, 0x0f, 0xa9, 0x08, 0x07, 0x2c, 0xb0, 0x17, 0x34, + 0xfd, 0x47, 0x57, 0xd1, 0xd5, 0xdc, 0x6b, 0xcf, 0x13, 0x38, 0x9e, 0xa2, 0xa2, 0xe7, 0x50, 0x24, + 0x27, 0x84, 0x79, 0xe4, 0x90, 0x79, 0x4c, 0x8e, 0xed, 0xac, 0x76, 0xf5, 0xe9, 0x5b, 0x5d, 0xd5, + 0xa7, 0x08, 0x78, 0x86, 0x5e, 0x75, 0x01, 0x26, 0x03, 0xa1, 0x87, 0xb0, 0xd4, 0x69, 0xed, 0x36, + 0xdb, 0xbb, 0x3b, 0xa5, 0xb9, 0xf2, 0xea, 0xe9, 0x59, 0xe5, 0x43, 0xe5, 0x63, 0x02, 0xe8, 0x50, + 0xdf, 0x65, 0x7e, 0x1f, 0x6d, 0x40, 0xbe, 0xbe, 0xbd, 0xdd, 0xea, 0xec, 0xb7, 0x9a, 0xa5, 0x4c, + 0xb9, 0x7c, 0x7a, 0x56, 0xb9, 0x3d, 0x0b, 0xac, 0x3b, 0x0e, 0x0d, 0x24, 0x75, 0xcb, 0xd9, 0xef, + 0xfe, 0xbc, 0x36, 0x57, 0xfd, 0x2e, 0x03, 0xc5, 0xe9, 0x49, 0xa0, 0x87, 0x90, 0xab, 0x6f, 0xef, + 0xb7, 0x0f, 0x5a, 0xa5, 0xb9, 0x09, 0x7d, 0x1a, 0x51, 0x77, 0x24, 0x3b, 0xa1, 0xe8, 0x01, 0x2c, + 0x76, 0xea, 0x2f, 0xba, 0xad, 0x52, 0x66, 0x32, 0x9d, 0x69, 0x58, 0x87, 0x8c, 0x42, 0x8d, 0x6a, + 0xe2, 0x7a, 0x7b, 0xb7, 0x34, 0x9f, 0x8e, 0x6a, 0x0a, 0xc2, 0x7c, 0x33, 0x95, 0x3f, 0x65, 0xc1, + 0xea, 0x52, 0x71, 0xc2, 0x9c, 0xf7, 0x2c, 0x91, 0x2f, 0x21, 0x2b, 0x49, 0x78, 0xac, 0xa5, 0x61, + 0xa5, 0x4b, 0x63, 0x9f, 0x84, 0xc7, 0x6a, 0x50, 0x43, 0xd7, 0x78, 0xa5, 0x0c, 0x41, 0x03, 0x8f, + 0x39, 0x44, 0x52, 0x57, 0x2b, 0xc3, 0xda, 0xfa, 0x24, 0x8d, 0x8d, 0x13, 0x94, 0x99, 0xff, 0x93, + 0x39, 0x3c, 0x45, 0x45, 0x8f, 0x21, 0xd7, 0xf7, 0xf8, 0x21, 0xf1, 0xb4, 0x26, 0xac, 0xad, 0xfb, + 0x69, 0x4e, 0x76, 0x34, 0x62, 0xe2, 0xc0, 0x50, 0xd0, 0x23, 0xc8, 0x8d, 0x02, 0x97, 0x48, 0x6a, + 0xe7, 0x34, 0xb9, 0x92, 0x46, 0x7e, 0xa1, 0x11, 0xdb, 0xdc, 0x3f, 0x62, 0x7d, 0x6c, 0xf0, 0xe8, + 0x29, 0xe4, 0x7d, 0x2a, 0x5f, 0x71, 0x71, 0x1c, 0xda, 0x4b, 0x95, 0x85, 0x0d, 0x6b, 0xeb, 0xf3, + 0x54, 0x31, 0x46, 0x98, 0xba, 0x94, 0xc4, 0x19, 0x0c, 0xa9, 0x2f, 0x23, 0x37, 0x8d, 0x79, 0x3b, + 0x83, 0x13, 0x07, 0xe8, 0xe7, 0x90, 0xa7, 0xbe, 0x1b, 0x70, 0xe6, 0x4b, 0x3b, 0x7f, 0xf5, 0x44, + 0x5a, 0x06, 0xa3, 0x36, 0x13, 0x27, 0x0c, 0xc5, 0x16, 0xdc, 0xf3, 0x0e, 0x89, 0x73, 0x6c, 0x17, + 0xde, 0x71, 0x19, 0x09, 0xa3, 0x91, 0x83, 0xec, 0x90, 0xbb, 0xb4, 0xba, 0x09, 0x2b, 0x6f, 0x6c, + 0x35, 0x2a, 0x43, 0xde, 0x6c, 0x75, 0xa4, 0x91, 0x2c, 0x4e, 0xfa, 0xd5, 0x9b, 0xb0, 0x3c, 0xb3, + 0xad, 0xd5, 0xbf, 0x2e, 0x42, 0x3e, 0x3e, 0x6b, 0x54, 0x87, 0x82, 0xc3, 0x7d, 0x49, 0x98, 0x4f, + 0x85, 0x91, 0x57, 0xea, 0xc9, 0x6c, 0xc7, 0x20, 0xc5, 0x7a, 0x32, 0x87, 0x27, 0x2c, 0xf4, 0x2d, + 0x14, 0x04, 0x0d, 0xf9, 0x48, 0x38, 0x34, 0x34, 0xfa, 0xda, 0x48, 0x57, 0x48, 0x04, 0xc2, 0xf4, + 0xf7, 0x23, 0x26, 0xa8, 0xda, 0xe5, 0x10, 0x4f, 0xa8, 0xe8, 0x31, 0x2c, 0x09, 0x1a, 0x4a, 0x22, + 0xe4, 0xdb, 0x24, 0x82, 0x23, 0x48, 0x87, 0x7b, 0xcc, 0x19, 0xe3, 0x98, 0x81, 0x1e, 0x43, 0x21, + 0xf0, 0x88, 0xa3, 0xbd, 0xda, 0x8b, 0x9a, 0x7e, 0x2f, 0x8d, 0xde, 0x89, 0x41, 0x78, 0x82, 0x47, + 0x5f, 0x01, 0x78, 0xbc, 0xdf, 0x73, 0x05, 0x3b, 0xa1, 0xc2, 0x48, 0xac, 0x9c, 0xc6, 0x6e, 0x6a, + 0x04, 0x2e, 0x78, 0xbc, 0x1f, 0x35, 0xd1, 0xce, 0xff, 0xa4, 0xaf, 0x29, 0x6d, 0x3d, 0x05, 0x20, + 0xc9, 0x57, 0xa3, 0xae, 0x4f, 0xdf, 0xc9, 0x95, 0x39, 0x91, 0x29, 0x3a, 0xba, 0x0f, 0xc5, 0x23, + 0x2e, 0x1c, 0xda, 0x33, 0xb7, 0xa6, 0xa0, 0x35, 0x61, 0x69, 0x5b, 0xa4, 0x2f, 0xd4, 0x80, 0xa5, + 0x3e, 0xf5, 0xa9, 0x60, 0x8e, 0x0d, 0x7a, 0xb0, 0x87, 0xa9, 0x17, 0x32, 0x82, 0xe0, 0x91, 0x2f, + 0xd9, 0x90, 0x9a, 0x91, 0x62, 0x22, 0xfa, 0x1d, 0x7c, 0x10, 0x1f, 0x5f, 0x4f, 0xd0, 0x23, 0x2a, + 0xa8, 0xaf, 0x34, 0x60, 0xe9, 0x7d, 0xf8, 0xe4, 0xed, 0x1a, 0x30, 0x68, 0x13, 0x6c, 0x90, 0xb8, + 0xfc, 0x21, 0x6c, 0x14, 0x60, 0x49, 0x44, 0xe3, 0x56, 0xff, 0x98, 0x51, 0xaa, 0xbf, 0x84, 0x40, + 0x9b, 0x60, 0x25, 0xc3, 0x33, 0x57, 0xab, 0xb7, 0xd0, 0xb8, 0x71, 0x71, 0xbe, 0x0e, 0x31, 0xb6, + 0xdd, 0x54, 0x31, 0xc8, 0xb4, 0x5d, 0xd4, 0x82, 0xe5, 0x84, 0xa0, 0xca, 0x00, 0x93, 0x28, 0x2b, + 0x6f, 0x9b, 0xe9, 0xfe, 0x38, 0xa0, 0xb8, 0x28, 0xa6, 0x7a, 0xd5, 0xdf, 0x02, 0x7a, 0x73, 0x5f, + 0x10, 0x82, 0xec, 0x31, 0xf3, 0xcd, 0x34, 0xb0, 0x6e, 0xa3, 0x1a, 0x2c, 0x05, 0x64, 0xec, 0x71, + 0xe2, 0x9a, 0x8b, 0x71, 0xab, 0x16, 0x15, 0x08, 0xb5, 0xb8, 0x40, 0xa8, 0xd5, 0xfd, 0x31, 0x8e, + 0x41, 0xd5, 0xa7, 0xf0, 0x61, 0xea, 0xf1, 0xa2, 0x2d, 0x28, 0x26, 0x17, 0x6e, 0xb2, 0xd6, 0x9b, + 0x17, 0xe7, 0xeb, 0x56, 0x72, 0x33, 0xdb, 0x4d, 0x6c, 0x25, 0xa0, 0xb6, 0x5b, 0xfd, 0xde, 0x82, + 0xe5, 0x99, 0x6b, 0x8b, 0x6e, 0xc1, 0x22, 0x1b, 0x92, 0x3e, 0x35, 0x73, 0x8c, 0x3a, 0xa8, 0x05, + 0x39, 0x8f, 0x1c, 0x52, 0x4f, 0x5d, 0x5e, 0x75, 0x70, 0x3f, 0xbe, 0xf6, 0xfe, 0xd7, 0x9e, 0x69, + 0x7c, 0xcb, 0x97, 0x62, 0x8c, 0x0d, 0x19, 0xd9, 0xb0, 0xe4, 0xf0, 0xe1, 0x90, 0xf8, 0x2a, 0x4d, + 0x2c, 0x6c, 0x14, 0x70, 0xdc, 0x55, 0x3b, 0x43, 0x44, 0x3f, 0xb4, 0xb3, 0xda, 0xac, 0xdb, 0xa8, + 0x04, 0x0b, 0xd4, 0x3f, 0xb1, 0x17, 0xb5, 0x49, 0x35, 0x95, 0xc5, 0x65, 0xd1, 0xed, 0x2b, 0x60, + 0xd5, 0x54, 0xbc, 0x51, 0x48, 0x85, 0xbd, 0x14, 0xed, 0xa8, 0x6a, 0xa3, 0x9f, 0x41, 0x6e, 0xc8, + 0x47, 0xbe, 0x0c, 0xed, 0xbc, 0x9e, 0xec, 0x6a, 0xda, 0x64, 0x9f, 0x2b, 0x84, 0x51, 0x96, 0x81, + 0xa3, 0x16, 0xac, 0x84, 0x92, 0x07, 0xbd, 0xbe, 0x20, 0x0e, 0xed, 0x05, 0x54, 0x30, 0xee, 0x9a, + 0x30, 0xbc, 0xfa, 0xc6, 0xa1, 0x34, 0x4d, 0xc1, 0x87, 0x6f, 0x2a, 0xce, 0x8e, 0xa2, 0x74, 0x34, + 0x03, 0x75, 0xa0, 0x18, 0x8c, 0x3c, 0xaf, 0xc7, 0x83, 0x28, 0x23, 0x47, 0x77, 0xe7, 0x1d, 0xb6, + 0xac, 0x33, 0xf2, 0xbc, 0xbd, 0x88, 0x84, 0xad, 0x60, 0xd2, 0x41, 0xb7, 0x21, 0xd7, 0x17, 0x7c, + 0x14, 0x44, 0xf7, 0xa6, 0x80, 0x4d, 0x0f, 0x7d, 0x03, 0x4b, 0x21, 0x75, 0x04, 0x95, 0xa1, 0x5d, + 0xd4, 0x4b, 0xfd, 0x38, 0x6d, 0x90, 0xae, 0x86, 0x24, 0x77, 0x02, 0xc7, 0x1c, 0xb4, 0x0a, 0x0b, + 0x52, 0x8e, 0xed, 0xe5, 0x4a, 0x66, 0x23, 0xdf, 0x58, 0xba, 0x38, 0x5f, 0x5f, 0xd8, 0xdf, 0x7f, + 0x89, 0x95, 0x4d, 0x65, 0x8b, 0x01, 0x0f, 0xa5, 0x4f, 0x86, 0xd4, 0xbe, 0xa1, 0xf7, 0x36, 0xe9, + 0xa3, 0x97, 0x00, 0xae, 0x1f, 0xf6, 0x1c, 0x1d, 0x9e, 0xec, 0x9b, 0x7a, 0x75, 0x9f, 0x5f, 0xbf, + 0xba, 0xe6, 0x6e, 0xd7, 0x64, 0xcc, 0xe5, 0x8b, 0xf3, 0xf5, 0x42, 0xd2, 0xc5, 0x05, 0xd7, 0x0f, + 0xa3, 0x26, 0x6a, 0x80, 0x35, 0xa0, 0xc4, 0x93, 0x03, 0x67, 0x40, 0x9d, 0x63, 0xbb, 0x74, 0x75, + 0x0a, 0x7c, 0xa2, 0x61, 0xc6, 0xc3, 0x34, 0x49, 0x29, 0x58, 0x4d, 0x35, 0xb4, 0x57, 0xf4, 0x5e, + 0x45, 0x1d, 0x74, 0x0f, 0x80, 0x07, 0xd4, 0xef, 0x85, 0xd2, 0x65, 0xbe, 0x8d, 0xd4, 0x92, 0x71, + 0x41, 0x59, 0xba, 0xca, 0x80, 0xee, 0xa8, 0x04, 0x45, 0xdc, 0x1e, 0xf7, 0xbd, 0xb1, 0xfd, 0x81, + 0xfe, 0x9a, 0x57, 0x86, 0x3d, 0xdf, 0x1b, 0xa3, 0x75, 0xb0, 0xb4, 0x2e, 0x42, 0xd6, 0xf7, 0x89, + 0x67, 0xdf, 0xd2, 0xfb, 0x01, 0xca, 0xd4, 0xd5, 0x16, 0x75, 0x0e, 0xd1, 0x6e, 0x84, 0xf6, 0x87, + 0x57, 0x9f, 0x83, 0x99, 0xec, 0xe4, 0x1c, 0x0c, 0x07, 0xfd, 0x02, 0x20, 0x10, 0xec, 0x84, 0x79, + 0xb4, 0x4f, 0x43, 0xfb, 0xb6, 0x5e, 0xf4, 0x5a, 0x6a, 0x66, 0x4a, 0x50, 0x78, 0x8a, 0x81, 0x6a, + 0x90, 0x65, 0x3e, 0x93, 0xf6, 0x47, 0x26, 0x2b, 0x5d, 0x96, 0x6a, 0x83, 0x73, 0xef, 0x80, 0x78, + 0x23, 0x8a, 0x35, 0x0e, 0xb5, 0xa1, 0xc0, 0x42, 0xee, 0x69, 0xf9, 0xda, 0xb6, 0x8e, 0x6f, 0xef, + 0x70, 0x7e, 0xed, 0x98, 0x82, 0x27, 0x6c, 0x74, 0x17, 0x0a, 0x01, 0x73, 0xc3, 0x67, 0x6c, 0xc8, + 0xa4, 0xbd, 0x5a, 0xc9, 0x6c, 0x2c, 0xe0, 0x89, 0xa1, 0xfc, 0x15, 0x58, 0x53, 0x61, 0x40, 0x5d, + 0xdf, 0x63, 0x3a, 0x36, 0x91, 0x45, 0x35, 0xd5, 0x59, 0x9d, 0xa8, 0x89, 0xe9, 0xd0, 0x57, 0xc0, + 0x51, 0xe7, 0xeb, 0xf9, 0x47, 0x99, 0xf2, 0x16, 0x58, 0x53, 0xd7, 0x01, 0x7d, 0xac, 0xc2, 0x72, + 0x9f, 0x85, 0x52, 0x8c, 0x7b, 0x64, 0x24, 0x07, 0xf6, 0xaf, 0x34, 0xa1, 0x18, 0x1b, 0xeb, 0x23, + 0x39, 0x28, 0xf7, 0x60, 0xa2, 0x2a, 0x54, 0x01, 0x4b, 0xa9, 0x35, 0xa4, 0xe2, 0x84, 0x0a, 0x55, + 0xf2, 0x28, 0x31, 0x4c, 0x9b, 0xd4, 0xad, 0x0a, 0x29, 0x11, 0xce, 0x40, 0x07, 0xb5, 0x02, 0x36, + 0x3d, 0x15, 0xa5, 0xe2, 0xab, 0x6b, 0xa2, 0x94, 0xe9, 0x56, 0xff, 0x96, 0x81, 0x42, 0xb2, 0x0d, + 0xe8, 0x0b, 0x58, 0x69, 0x77, 0xf7, 0x9e, 0xd5, 0xf7, 0xdb, 0x7b, 0xbb, 0xbd, 0x66, 0xeb, 0xdb, + 0xfa, 0x8b, 0x67, 0xfb, 0xa5, 0xb9, 0xf2, 0xbd, 0xd3, 0xb3, 0xca, 0xea, 0x24, 0xe2, 0xc6, 0xf0, + 0x26, 0x3d, 0x22, 0x23, 0x4f, 0xce, 0xb2, 0x3a, 0x78, 0x6f, 0xbb, 0xd5, 0xed, 0x96, 0x32, 0x57, + 0xb1, 0x3a, 0x82, 0x3b, 0x34, 0x0c, 0xd1, 0x16, 0x94, 0x26, 0xac, 0x27, 0x2f, 0x3b, 0x2d, 0x7c, + 0x50, 0x9a, 0x2f, 0xdf, 0x3d, 0x3d, 0xab, 0xd8, 0x6f, 0x92, 0x9e, 0x8c, 0x03, 0x2a, 0x0e, 0xcc, + 0x73, 0xe1, 0x5f, 0x19, 0x28, 0x4e, 0x57, 0x9b, 0x68, 0x3b, 0xaa, 0x12, 0xf5, 0x31, 0xdc, 0xd8, + 0xda, 0xbc, 0xae, 0x3a, 0xd5, 0x59, 0xce, 0x1b, 0x29, 0xbf, 0xcf, 0xd5, 0xc3, 0x50, 0x93, 0xd1, + 0x17, 0xb0, 0x18, 0x70, 0x21, 0xe3, 0x7c, 0x90, 0xae, 0x56, 0x2e, 0xe2, 0x1a, 0x26, 0x02, 0x57, + 0x07, 0x70, 0x63, 0xd6, 0x1b, 0x7a, 0x00, 0x0b, 0x07, 0xed, 0x4e, 0x69, 0xae, 0x7c, 0xe7, 0xf4, + 0xac, 0xf2, 0xd1, 0xec, 0xc7, 0x03, 0x26, 0xe4, 0x88, 0x78, 0xed, 0x0e, 0xfa, 0x0c, 0x16, 0x9b, + 0xbb, 0x5d, 0x8c, 0x4b, 0x99, 0xf2, 0xfa, 0xe9, 0x59, 0xe5, 0xce, 0x2c, 0x4e, 0x7d, 0xe2, 0x23, + 0xdf, 0xc5, 0xfc, 0x30, 0x79, 0x24, 0xfd, 0x7b, 0x1e, 0x2c, 0x93, 0x26, 0xdf, 0xf7, 0x3b, 0x7a, + 0x39, 0xaa, 0x01, 0xe3, 0xf8, 0x37, 0x7f, 0x6d, 0x29, 0x58, 0x8c, 0x08, 0x46, 0x97, 0xf7, 0xa1, + 0xc8, 0x82, 0x93, 0x2f, 0x7b, 0xd4, 0x27, 0x87, 0x9e, 0x79, 0x2f, 0xe5, 0xb1, 0xa5, 0x6c, 0xad, + 0xc8, 0xa4, 0x82, 0x2f, 0xf3, 0x25, 0x15, 0xbe, 0x79, 0x09, 0xe5, 0x71, 0xd2, 0x47, 0xdf, 0x40, + 0x96, 0x05, 0x64, 0x68, 0xea, 0xd7, 0xd4, 0x15, 0xb4, 0x3b, 0xf5, 0xe7, 0xe6, 0xde, 0x34, 0xf2, + 0x17, 0xe7, 0xeb, 0x59, 0x65, 0xc0, 0x9a, 0x86, 0xd6, 0xe2, 0x12, 0x52, 0x8d, 0xa4, 0x13, 0x69, + 0x1e, 0x4f, 0x59, 0x94, 0xf6, 0x99, 0xdf, 0x17, 0x34, 0x0c, 0x75, 0x4a, 0xcd, 0xe3, 0xb8, 0x8b, + 0xca, 0xb0, 0x64, 0x0a, 0x51, 0x5d, 0x79, 0x16, 0x54, 0x91, 0x67, 0x0c, 0x8d, 0x65, 0xb0, 0xa2, + 0xdd, 0xe8, 0x1d, 0x09, 0x3e, 0xac, 0xfe, 0x27, 0x0b, 0xd6, 0xb6, 0x37, 0x0a, 0xa5, 0xa9, 0x29, + 0xde, 0xdb, 0xe6, 0xbf, 0x84, 0x15, 0xa2, 0xdf, 0xe5, 0xc4, 0x57, 0x09, 0x5a, 0xd7, 0xf7, 0xe6, + 0x00, 0x1e, 0xa4, 0xba, 0x4b, 0xc0, 0xd1, 0x5b, 0xa0, 0x91, 0x53, 0x3e, 0xed, 0x0c, 0x2e, 0x91, + 0x4b, 0x5f, 0x50, 0x17, 0x96, 0xb9, 0x70, 0x06, 0x34, 0x94, 0x51, 0x5a, 0x37, 0xef, 0xd8, 0xd4, + 0x3f, 0x1c, 0x7b, 0xd3, 0x40, 0x93, 0xd3, 0xa2, 0xd9, 0xce, 0xfa, 0x40, 0x8f, 0x20, 0x2b, 0xc8, + 0x51, 0xfc, 0x56, 0x49, 0xbd, 0x24, 0x98, 0x1c, 0xc9, 0x19, 0x17, 0x9a, 0x81, 0x7e, 0x0d, 0xe0, + 0xb2, 0x30, 0x20, 0xd2, 0x19, 0x50, 0x61, 0x0e, 0x3b, 0x75, 0x89, 0xcd, 0x04, 0x35, 0xe3, 0x65, + 0x8a, 0x8d, 0x9e, 0x42, 0xc1, 0x21, 0xb1, 0x5c, 0x73, 0x57, 0x3f, 0xee, 0xb7, 0xeb, 0xc6, 0x45, + 0x49, 0xb9, 0xb8, 0x38, 0x5f, 0xcf, 0xc7, 0x16, 0x9c, 0x77, 0x88, 0x91, 0xef, 0x53, 0x58, 0x56, + 0x8f, 0xfe, 0x9e, 0x1b, 0x85, 0xb3, 0x48, 0x26, 0x57, 0xe4, 0x68, 0xf5, 0x82, 0x34, 0x61, 0x2f, + 0x3e, 0xce, 0xa2, 0x9c, 0xb2, 0xa1, 0xdf, 0xc0, 0x0a, 0xf5, 0x1d, 0x31, 0xd6, 0x62, 0x8d, 0x67, + 0x98, 0xbf, 0x7a, 0xb1, 0xad, 0x04, 0x3c, 0xb3, 0xd8, 0x12, 0xbd, 0x64, 0xaf, 0xfe, 0x33, 0x03, + 0x10, 0x95, 0x3d, 0xef, 0x57, 0x80, 0x08, 0xb2, 0x2e, 0x91, 0x44, 0x6b, 0xae, 0x88, 0x75, 0x1b, + 0x7d, 0x0d, 0x20, 0xe9, 0x30, 0x50, 0xa1, 0xd7, 0xef, 0x1b, 0xd9, 0xbc, 0x2d, 0x1c, 0x4c, 0xa1, + 0xd1, 0x16, 0xe4, 0xcc, 0x8b, 0x32, 0x7b, 0x2d, 0xcf, 0x20, 0xab, 0x7f, 0xc9, 0x00, 0x44, 0xcb, + 0xfc, 0xbf, 0x5e, 0x5b, 0xc3, 0x7e, 0xfd, 0xc3, 0xda, 0xdc, 0x3f, 0x7e, 0x58, 0x9b, 0xfb, 0xc3, + 0xc5, 0x5a, 0xe6, 0xf5, 0xc5, 0x5a, 0xe6, 0xef, 0x17, 0x6b, 0x99, 0xef, 0x2f, 0xd6, 0x32, 0x87, + 0x39, 0x5d, 0x99, 0xfc, 0xf4, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xb8, 0xa3, 0x85, 0xdc, 0xc7, + 0x15, 0x00, 0x00, +} diff --git a/vendor/github.com/docker/swarmkit/api/storeobject.go b/vendor/github.com/docker/swarmkit/api/storeobject.go new file mode 100644 index 0000000000..48b50b72dd --- /dev/null +++ b/vendor/github.com/docker/swarmkit/api/storeobject.go @@ -0,0 +1,108 @@ +package api + +import ( + "errors" + "fmt" + "strings" + + "github.com/docker/go-events" +) + +var ( + errUnknownStoreAction = errors.New("unrecognized action type") + errConflictingFilters = errors.New("conflicting filters specified") + errNoKindSpecified = errors.New("no kind of object specified") + errUnrecognizedAction = errors.New("unrecognized action") +) + +// StoreObject is an abstract object that can be handled by the store. +type StoreObject interface { + GetID() string // Get ID + GetMeta() Meta // Retrieve metadata + SetMeta(Meta) // Set metadata + CopyStoreObject() StoreObject // Return a copy of this object + EventCreate() Event // Return a creation event + EventUpdate(oldObject StoreObject) Event // Return an update event + EventDelete() Event // Return a deletion event +} + +// Event is the type used for events passed over watcher channels, and also +// the type used to specify filtering in calls to Watch. +type Event interface { + // TODO(stevvooe): Consider whether it makes sense to squish both the + // matcher type and the primary type into the same type. It might be better + // to build a matcher from an event prototype. + + // Matches checks if this item in a watch queue Matches the event + // description. + Matches(events.Event) bool +} + +func customIndexer(kind string, annotations *Annotations) (bool, [][]byte, error) { + var converted [][]byte + + for _, entry := range annotations.Indices { + index := make([]byte, 0, len(kind)+1+len(entry.Key)+1+len(entry.Val)+1) + if kind != "" { + index = append(index, []byte(kind)...) + index = append(index, '|') + } + index = append(index, []byte(entry.Key)...) + index = append(index, '|') + index = append(index, []byte(entry.Val)...) + index = append(index, '\x00') + converted = append(converted, index) + } + + // Add the null character as a terminator + return len(converted) != 0, converted, nil +} + +func fromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + arg, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + // Add the null character as a terminator + arg += "\x00" + return []byte(arg), nil +} + +func prefixFromArgs(args ...interface{}) ([]byte, error) { + val, err := fromArgs(args...) + if err != nil { + return nil, err + } + + // Strip the null terminator, the rest is a prefix + n := len(val) + if n > 0 { + return val[:n-1], nil + } + return val, nil +} + +func checkCustom(a1, a2 Annotations) bool { + if len(a1.Indices) == 1 { + for _, ind := range a2.Indices { + if ind.Key == a1.Indices[0].Key && ind.Val == a1.Indices[0].Val { + return true + } + } + } + return false +} + +func checkCustomPrefix(a1, a2 Annotations) bool { + if len(a1.Indices) == 1 { + for _, ind := range a2.Indices { + if ind.Key == a1.Indices[0].Key && strings.HasPrefix(ind.Val, a1.Indices[0].Val) { + return true + } + } + } + return false +} diff --git a/vendor/github.com/docker/swarmkit/api/types.pb.go b/vendor/github.com/docker/swarmkit/api/types.pb.go new file mode 100644 index 0000000000..98159a9fdd --- /dev/null +++ b/vendor/github.com/docker/swarmkit/api/types.pb.go @@ -0,0 +1,17410 @@ +// Code generated by protoc-gen-gogo. +// source: github.com/docker/swarmkit/api/types.proto +// DO NOT EDIT! + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/gogo/protobuf/types" +import google_protobuf1 "github.com/gogo/protobuf/types" +import _ "github.com/gogo/protobuf/gogoproto" + +import os "os" +import time "time" + +import github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +type ResourceType int32 + +const ( + ResourceType_TASK ResourceType = 0 + ResourceType_SECRET ResourceType = 1 + ResourceType_CONFIG ResourceType = 2 +) + +var ResourceType_name = map[int32]string{ + 0: "TASK", + 1: "SECRET", + 2: "CONFIG", +} +var ResourceType_value = map[string]int32{ + "TASK": 0, + "SECRET": 1, + "CONFIG": 2, +} + +func (x ResourceType) String() string { + return proto.EnumName(ResourceType_name, int32(x)) +} +func (ResourceType) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{0} } + +// Only the manager create a NEW task, and move the task to PENDING and ASSIGNED. +// Afterward, the manager must rely on the agent to update the task status +// (pre-run: preparing, ready, starting; +// running; +// end-state: complete, shutdown, failed, rejected) +type TaskState int32 + +const ( + TaskStateNew TaskState = 0 + TaskStatePending TaskState = 64 + TaskStateAssigned TaskState = 192 + TaskStateAccepted TaskState = 256 + TaskStatePreparing TaskState = 320 + TaskStateReady TaskState = 384 + TaskStateStarting TaskState = 448 + TaskStateRunning TaskState = 512 + TaskStateCompleted TaskState = 576 + TaskStateShutdown TaskState = 640 + TaskStateFailed TaskState = 704 + // TaskStateRejected means a task never ran, for instance if something about + // the environment failed (e.g. setting up a port on that node failed). + TaskStateRejected TaskState = 768 + // TaskStateRemove is used to correctly handle service deletions and scale + // downs. This allows us to keep track of tasks that have been marked for + // deletion, but can't yet be removed because the agent is in the process of + // shutting them down. Once the agent has shut down tasks with desired state + // REMOVE, the task reaper is responsible for removing them. + TaskStateRemove TaskState = 800 + // TaskStateOrphaned is used to free up resources associated with service + // tasks on unresponsive nodes without having to delete those tasks. This + // state is directly assigned to the task by the orchestrator. + TaskStateOrphaned TaskState = 832 +) + +var TaskState_name = map[int32]string{ + 0: "NEW", + 64: "PENDING", + 192: "ASSIGNED", + 256: "ACCEPTED", + 320: "PREPARING", + 384: "READY", + 448: "STARTING", + 512: "RUNNING", + 576: "COMPLETE", + 640: "SHUTDOWN", + 704: "FAILED", + 768: "REJECTED", + 800: "REMOVE", + 832: "ORPHANED", +} +var TaskState_value = map[string]int32{ + "NEW": 0, + "PENDING": 64, + "ASSIGNED": 192, + "ACCEPTED": 256, + "PREPARING": 320, + "READY": 384, + "STARTING": 448, + "RUNNING": 512, + "COMPLETE": 576, + "SHUTDOWN": 640, + "FAILED": 704, + "REJECTED": 768, + "REMOVE": 800, + "ORPHANED": 832, +} + +func (x TaskState) String() string { + return proto.EnumName(TaskState_name, int32(x)) +} +func (TaskState) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{1} } + +type NodeRole int32 + +const ( + NodeRoleWorker NodeRole = 0 + NodeRoleManager NodeRole = 1 +) + +var NodeRole_name = map[int32]string{ + 0: "WORKER", + 1: "MANAGER", +} +var NodeRole_value = map[string]int32{ + "WORKER": 0, + "MANAGER": 1, +} + +func (x NodeRole) String() string { + return proto.EnumName(NodeRole_name, int32(x)) +} +func (NodeRole) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{2} } + +type RaftMemberStatus_Reachability int32 + +const ( + // Unknown indicates that the manager state cannot be resolved + RaftMemberStatus_UNKNOWN RaftMemberStatus_Reachability = 0 + // Unreachable indicates that the node cannot be contacted by other + // raft cluster members. + RaftMemberStatus_UNREACHABLE RaftMemberStatus_Reachability = 1 + // Reachable indicates that the node is healthy and reachable + // by other members. + RaftMemberStatus_REACHABLE RaftMemberStatus_Reachability = 2 +) + +var RaftMemberStatus_Reachability_name = map[int32]string{ + 0: "UNKNOWN", + 1: "UNREACHABLE", + 2: "REACHABLE", +} +var RaftMemberStatus_Reachability_value = map[string]int32{ + "UNKNOWN": 0, + "UNREACHABLE": 1, + "REACHABLE": 2, +} + +func (x RaftMemberStatus_Reachability) String() string { + return proto.EnumName(RaftMemberStatus_Reachability_name, int32(x)) +} +func (RaftMemberStatus_Reachability) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{13, 0} +} + +// TODO(aluzzardi) These should be using `gogoproto.enumvalue_customname`. +type NodeStatus_State int32 + +const ( + // Unknown indicates the node state cannot be resolved. + NodeStatus_UNKNOWN NodeStatus_State = 0 + // Down indicates the node is down. + NodeStatus_DOWN NodeStatus_State = 1 + // Ready indicates the node is ready to accept tasks. + NodeStatus_READY NodeStatus_State = 2 + // Disconnected indicates the node is currently trying to find new manager. + NodeStatus_DISCONNECTED NodeStatus_State = 3 +) + +var NodeStatus_State_name = map[int32]string{ + 0: "UNKNOWN", + 1: "DOWN", + 2: "READY", + 3: "DISCONNECTED", +} +var NodeStatus_State_value = map[string]int32{ + "UNKNOWN": 0, + "DOWN": 1, + "READY": 2, + "DISCONNECTED": 3, +} + +func (x NodeStatus_State) String() string { + return proto.EnumName(NodeStatus_State_name, int32(x)) +} +func (NodeStatus_State) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{14, 0} } + +type Mount_MountType int32 + +const ( + MountTypeBind Mount_MountType = 0 + MountTypeVolume Mount_MountType = 1 + MountTypeTmpfs Mount_MountType = 2 +) + +var Mount_MountType_name = map[int32]string{ + 0: "BIND", + 1: "VOLUME", + 2: "TMPFS", +} +var Mount_MountType_value = map[string]int32{ + "BIND": 0, + "VOLUME": 1, + "TMPFS": 2, +} + +func (x Mount_MountType) String() string { + return proto.EnumName(Mount_MountType_name, int32(x)) +} +func (Mount_MountType) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{16, 0} } + +// Consistency indicates the tolerable level of file system consistency +type Mount_MountConsistency int32 + +const ( + MountConsistencyDefault Mount_MountConsistency = 0 + MountConsistencyFull Mount_MountConsistency = 1 + MountConsistencyCached Mount_MountConsistency = 2 + MountConsistencyDelegated Mount_MountConsistency = 3 +) + +var Mount_MountConsistency_name = map[int32]string{ + 0: "DEFAULT", + 1: "CONSISTENT", + 2: "CACHED", + 3: "DELEGATED", +} +var Mount_MountConsistency_value = map[string]int32{ + "DEFAULT": 0, + "CONSISTENT": 1, + "CACHED": 2, + "DELEGATED": 3, +} + +func (x Mount_MountConsistency) String() string { + return proto.EnumName(Mount_MountConsistency_name, int32(x)) +} +func (Mount_MountConsistency) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{16, 1} +} + +type Mount_BindOptions_MountPropagation int32 + +const ( + MountPropagationRPrivate Mount_BindOptions_MountPropagation = 0 + MountPropagationPrivate Mount_BindOptions_MountPropagation = 1 + MountPropagationRShared Mount_BindOptions_MountPropagation = 2 + MountPropagationShared Mount_BindOptions_MountPropagation = 3 + MountPropagationRSlave Mount_BindOptions_MountPropagation = 4 + MountPropagationSlave Mount_BindOptions_MountPropagation = 5 +) + +var Mount_BindOptions_MountPropagation_name = map[int32]string{ + 0: "RPRIVATE", + 1: "PRIVATE", + 2: "RSHARED", + 3: "SHARED", + 4: "RSLAVE", + 5: "SLAVE", +} +var Mount_BindOptions_MountPropagation_value = map[string]int32{ + "RPRIVATE": 0, + "PRIVATE": 1, + "RSHARED": 2, + "SHARED": 3, + "RSLAVE": 4, + "SLAVE": 5, +} + +func (x Mount_BindOptions_MountPropagation) String() string { + return proto.EnumName(Mount_BindOptions_MountPropagation_name, int32(x)) +} +func (Mount_BindOptions_MountPropagation) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{16, 0, 0} +} + +type RestartPolicy_RestartCondition int32 + +const ( + RestartOnNone RestartPolicy_RestartCondition = 0 + RestartOnFailure RestartPolicy_RestartCondition = 1 + RestartOnAny RestartPolicy_RestartCondition = 2 +) + +var RestartPolicy_RestartCondition_name = map[int32]string{ + 0: "NONE", + 1: "ON_FAILURE", + 2: "ANY", +} +var RestartPolicy_RestartCondition_value = map[string]int32{ + "NONE": 0, + "ON_FAILURE": 1, + "ANY": 2, +} + +func (x RestartPolicy_RestartCondition) String() string { + return proto.EnumName(RestartPolicy_RestartCondition_name, int32(x)) +} +func (RestartPolicy_RestartCondition) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{17, 0} +} + +type UpdateConfig_FailureAction int32 + +const ( + UpdateConfig_PAUSE UpdateConfig_FailureAction = 0 + UpdateConfig_CONTINUE UpdateConfig_FailureAction = 1 + UpdateConfig_ROLLBACK UpdateConfig_FailureAction = 2 +) + +var UpdateConfig_FailureAction_name = map[int32]string{ + 0: "PAUSE", + 1: "CONTINUE", + 2: "ROLLBACK", +} +var UpdateConfig_FailureAction_value = map[string]int32{ + "PAUSE": 0, + "CONTINUE": 1, + "ROLLBACK": 2, +} + +func (x UpdateConfig_FailureAction) String() string { + return proto.EnumName(UpdateConfig_FailureAction_name, int32(x)) +} +func (UpdateConfig_FailureAction) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{18, 0} +} + +// UpdateOrder controls the order of operations when rolling out an +// updated task. Either the old task is shut down before the new task +// is started, or the new task is started before the old task is shut +// down. +type UpdateConfig_UpdateOrder int32 + +const ( + UpdateConfig_STOP_FIRST UpdateConfig_UpdateOrder = 0 + UpdateConfig_START_FIRST UpdateConfig_UpdateOrder = 1 +) + +var UpdateConfig_UpdateOrder_name = map[int32]string{ + 0: "STOP_FIRST", + 1: "START_FIRST", +} +var UpdateConfig_UpdateOrder_value = map[string]int32{ + "STOP_FIRST": 0, + "START_FIRST": 1, +} + +func (x UpdateConfig_UpdateOrder) String() string { + return proto.EnumName(UpdateConfig_UpdateOrder_name, int32(x)) +} +func (UpdateConfig_UpdateOrder) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{18, 1} +} + +type UpdateStatus_UpdateState int32 + +const ( + UpdateStatus_UNKNOWN UpdateStatus_UpdateState = 0 + UpdateStatus_UPDATING UpdateStatus_UpdateState = 1 + UpdateStatus_PAUSED UpdateStatus_UpdateState = 2 + UpdateStatus_COMPLETED UpdateStatus_UpdateState = 3 + UpdateStatus_ROLLBACK_STARTED UpdateStatus_UpdateState = 4 + UpdateStatus_ROLLBACK_PAUSED UpdateStatus_UpdateState = 5 + UpdateStatus_ROLLBACK_COMPLETED UpdateStatus_UpdateState = 6 +) + +var UpdateStatus_UpdateState_name = map[int32]string{ + 0: "UNKNOWN", + 1: "UPDATING", + 2: "PAUSED", + 3: "COMPLETED", + 4: "ROLLBACK_STARTED", + 5: "ROLLBACK_PAUSED", + 6: "ROLLBACK_COMPLETED", +} +var UpdateStatus_UpdateState_value = map[string]int32{ + "UNKNOWN": 0, + "UPDATING": 1, + "PAUSED": 2, + "COMPLETED": 3, + "ROLLBACK_STARTED": 4, + "ROLLBACK_PAUSED": 5, + "ROLLBACK_COMPLETED": 6, +} + +func (x UpdateStatus_UpdateState) String() string { + return proto.EnumName(UpdateStatus_UpdateState_name, int32(x)) +} +func (UpdateStatus_UpdateState) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{19, 0} +} + +// AddressFamily specifies the network address family that +// this IPAMConfig belongs to. +type IPAMConfig_AddressFamily int32 + +const ( + IPAMConfig_UNKNOWN IPAMConfig_AddressFamily = 0 + IPAMConfig_IPV4 IPAMConfig_AddressFamily = 4 + IPAMConfig_IPV6 IPAMConfig_AddressFamily = 6 +) + +var IPAMConfig_AddressFamily_name = map[int32]string{ + 0: "UNKNOWN", + 4: "IPV4", + 6: "IPV6", +} +var IPAMConfig_AddressFamily_value = map[string]int32{ + "UNKNOWN": 0, + "IPV4": 4, + "IPV6": 6, +} + +func (x IPAMConfig_AddressFamily) String() string { + return proto.EnumName(IPAMConfig_AddressFamily_name, int32(x)) +} +func (IPAMConfig_AddressFamily) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{24, 0} +} + +type PortConfig_Protocol int32 + +const ( + ProtocolTCP PortConfig_Protocol = 0 + ProtocolUDP PortConfig_Protocol = 1 + ProtocolSCTP PortConfig_Protocol = 2 +) + +var PortConfig_Protocol_name = map[int32]string{ + 0: "TCP", + 1: "UDP", + 2: "SCTP", +} +var PortConfig_Protocol_value = map[string]int32{ + "TCP": 0, + "UDP": 1, + "SCTP": 2, +} + +func (x PortConfig_Protocol) String() string { + return proto.EnumName(PortConfig_Protocol_name, int32(x)) +} +func (PortConfig_Protocol) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{25, 0} } + +// PublishMode controls how ports are published on the swarm. +type PortConfig_PublishMode int32 + +const ( + // PublishModeIngress exposes the port across the cluster on all nodes. + PublishModeIngress PortConfig_PublishMode = 0 + // PublishModeHost exposes the port on just the target host. If the + // published port is undefined, an ephemeral port will be allocated. If + // the published port is defined, the node will attempt to allocate it, + // erroring the task if it fails. + PublishModeHost PortConfig_PublishMode = 1 +) + +var PortConfig_PublishMode_name = map[int32]string{ + 0: "INGRESS", + 1: "HOST", +} +var PortConfig_PublishMode_value = map[string]int32{ + "INGRESS": 0, + "HOST": 1, +} + +func (x PortConfig_PublishMode) String() string { + return proto.EnumName(PortConfig_PublishMode_name, int32(x)) +} +func (PortConfig_PublishMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{25, 1} +} + +type IssuanceStatus_State int32 + +const ( + IssuanceStateUnknown IssuanceStatus_State = 0 + // A new certificate should be issued + IssuanceStateRenew IssuanceStatus_State = 1 + // Certificate is pending acceptance + IssuanceStatePending IssuanceStatus_State = 2 + // successful completion certificate issuance + IssuanceStateIssued IssuanceStatus_State = 3 + // Certificate issuance failed + IssuanceStateFailed IssuanceStatus_State = 4 + // Signals workers to renew their certificate. From the CA's perspective + // this is equivalent to IssuanceStateIssued: a noop. + IssuanceStateRotate IssuanceStatus_State = 5 +) + +var IssuanceStatus_State_name = map[int32]string{ + 0: "UNKNOWN", + 1: "RENEW", + 2: "PENDING", + 3: "ISSUED", + 4: "FAILED", + 5: "ROTATE", +} +var IssuanceStatus_State_value = map[string]int32{ + "UNKNOWN": 0, + "RENEW": 1, + "PENDING": 2, + "ISSUED": 3, + "FAILED": 4, + "ROTATE": 5, +} + +func (x IssuanceStatus_State) String() string { + return proto.EnumName(IssuanceStatus_State_name, int32(x)) +} +func (IssuanceStatus_State) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{30, 0} } + +type ExternalCA_CAProtocol int32 + +const ( + ExternalCA_CAProtocolCFSSL ExternalCA_CAProtocol = 0 +) + +var ExternalCA_CAProtocol_name = map[int32]string{ + 0: "CFSSL", +} +var ExternalCA_CAProtocol_value = map[string]int32{ + "CFSSL": 0, +} + +func (x ExternalCA_CAProtocol) String() string { + return proto.EnumName(ExternalCA_CAProtocol_name, int32(x)) +} +func (ExternalCA_CAProtocol) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{32, 0} +} + +// Encryption algorithm that can implemented using this key +type EncryptionKey_Algorithm int32 + +const ( + AES_128_GCM EncryptionKey_Algorithm = 0 +) + +var EncryptionKey_Algorithm_name = map[int32]string{ + 0: "AES_128_GCM", +} +var EncryptionKey_Algorithm_value = map[string]int32{ + "AES_128_GCM": 0, +} + +func (x EncryptionKey_Algorithm) String() string { + return proto.EnumName(EncryptionKey_Algorithm_name, int32(x)) +} +func (EncryptionKey_Algorithm) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{45, 0} +} + +type MaybeEncryptedRecord_Algorithm int32 + +const ( + MaybeEncryptedRecord_NotEncrypted MaybeEncryptedRecord_Algorithm = 0 + MaybeEncryptedRecord_NACLSecretboxSalsa20Poly1305 MaybeEncryptedRecord_Algorithm = 1 + MaybeEncryptedRecord_FernetAES128CBC MaybeEncryptedRecord_Algorithm = 2 +) + +var MaybeEncryptedRecord_Algorithm_name = map[int32]string{ + 0: "NONE", + 1: "SECRETBOX_SALSA20_POLY1305", + 2: "FERNET_AES_128_CBC", +} +var MaybeEncryptedRecord_Algorithm_value = map[string]int32{ + "NONE": 0, + "SECRETBOX_SALSA20_POLY1305": 1, + "FERNET_AES_128_CBC": 2, +} + +func (x MaybeEncryptedRecord_Algorithm) String() string { + return proto.EnumName(MaybeEncryptedRecord_Algorithm_name, int32(x)) +} +func (MaybeEncryptedRecord_Algorithm) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{52, 0} +} + +// Version tracks the last time an object in the store was updated. +type Version struct { + Index uint64 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"` +} + +func (m *Version) Reset() { *m = Version{} } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{0} } + +type IndexEntry struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Val string `protobuf:"bytes,2,opt,name=val,proto3" json:"val,omitempty"` +} + +func (m *IndexEntry) Reset() { *m = IndexEntry{} } +func (*IndexEntry) ProtoMessage() {} +func (*IndexEntry) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{1} } + +// Annotations provide useful information to identify API objects. They are +// common to all API specs. +type Annotations struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Indices provides keys and values for indexing this object. + // A single key may have multiple values. + Indices []IndexEntry `protobuf:"bytes,4,rep,name=indices" json:"indices"` +} + +func (m *Annotations) Reset() { *m = Annotations{} } +func (*Annotations) ProtoMessage() {} +func (*Annotations) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{2} } + +// NamedGenericResource represents a "user defined" resource which is defined +// as a string. +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...) +type NamedGenericResource struct { + Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *NamedGenericResource) Reset() { *m = NamedGenericResource{} } +func (*NamedGenericResource) ProtoMessage() {} +func (*NamedGenericResource) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{3} } + +// DiscreteGenericResource represents a "user defined" resource which is defined +// as an integer +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to count the resource (SSD=5, HDD=3, ...) +type DiscreteGenericResource struct { + Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *DiscreteGenericResource) Reset() { *m = DiscreteGenericResource{} } +func (*DiscreteGenericResource) ProtoMessage() {} +func (*DiscreteGenericResource) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{4} } + +// GenericResource represents a "user defined" resource which can +// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1) +type GenericResource struct { + // Types that are valid to be assigned to Resource: + // *GenericResource_NamedResourceSpec + // *GenericResource_DiscreteResourceSpec + Resource isGenericResource_Resource `protobuf_oneof:"resource"` +} + +func (m *GenericResource) Reset() { *m = GenericResource{} } +func (*GenericResource) ProtoMessage() {} +func (*GenericResource) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{5} } + +type isGenericResource_Resource interface { + isGenericResource_Resource() + MarshalTo([]byte) (int, error) + Size() int +} + +type GenericResource_NamedResourceSpec struct { + NamedResourceSpec *NamedGenericResource `protobuf:"bytes,1,opt,name=named_resource_spec,json=namedResourceSpec,oneof"` +} +type GenericResource_DiscreteResourceSpec struct { + DiscreteResourceSpec *DiscreteGenericResource `protobuf:"bytes,2,opt,name=discrete_resource_spec,json=discreteResourceSpec,oneof"` +} + +func (*GenericResource_NamedResourceSpec) isGenericResource_Resource() {} +func (*GenericResource_DiscreteResourceSpec) isGenericResource_Resource() {} + +func (m *GenericResource) GetResource() isGenericResource_Resource { + if m != nil { + return m.Resource + } + return nil +} + +func (m *GenericResource) GetNamedResourceSpec() *NamedGenericResource { + if x, ok := m.GetResource().(*GenericResource_NamedResourceSpec); ok { + return x.NamedResourceSpec + } + return nil +} + +func (m *GenericResource) GetDiscreteResourceSpec() *DiscreteGenericResource { + if x, ok := m.GetResource().(*GenericResource_DiscreteResourceSpec); ok { + return x.DiscreteResourceSpec + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*GenericResource) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _GenericResource_OneofMarshaler, _GenericResource_OneofUnmarshaler, _GenericResource_OneofSizer, []interface{}{ + (*GenericResource_NamedResourceSpec)(nil), + (*GenericResource_DiscreteResourceSpec)(nil), + } +} + +func _GenericResource_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*GenericResource) + // resource + switch x := m.Resource.(type) { + case *GenericResource_NamedResourceSpec: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.NamedResourceSpec); err != nil { + return err + } + case *GenericResource_DiscreteResourceSpec: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DiscreteResourceSpec); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("GenericResource.Resource has unexpected type %T", x) + } + return nil +} + +func _GenericResource_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*GenericResource) + switch tag { + case 1: // resource.named_resource_spec + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(NamedGenericResource) + err := b.DecodeMessage(msg) + m.Resource = &GenericResource_NamedResourceSpec{msg} + return true, err + case 2: // resource.discrete_resource_spec + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DiscreteGenericResource) + err := b.DecodeMessage(msg) + m.Resource = &GenericResource_DiscreteResourceSpec{msg} + return true, err + default: + return false, nil + } +} + +func _GenericResource_OneofSizer(msg proto.Message) (n int) { + m := msg.(*GenericResource) + // resource + switch x := m.Resource.(type) { + case *GenericResource_NamedResourceSpec: + s := proto.Size(x.NamedResourceSpec) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *GenericResource_DiscreteResourceSpec: + s := proto.Size(x.DiscreteResourceSpec) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Resources struct { + // Amount of CPUs (e.g. 2000000000 = 2 CPU cores) + NanoCPUs int64 `protobuf:"varint,1,opt,name=nano_cpus,json=nanoCpus,proto3" json:"nano_cpus,omitempty"` + // Amount of memory in bytes. + MemoryBytes int64 `protobuf:"varint,2,opt,name=memory_bytes,json=memoryBytes,proto3" json:"memory_bytes,omitempty"` + // User specified resource (e.g: bananas=2;apple={red,yellow,green}) + Generic []*GenericResource `protobuf:"bytes,3,rep,name=generic" json:"generic,omitempty"` +} + +func (m *Resources) Reset() { *m = Resources{} } +func (*Resources) ProtoMessage() {} +func (*Resources) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{6} } + +type ResourceRequirements struct { + Limits *Resources `protobuf:"bytes,1,opt,name=limits" json:"limits,omitempty"` + Reservations *Resources `protobuf:"bytes,2,opt,name=reservations" json:"reservations,omitempty"` +} + +func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } +func (*ResourceRequirements) ProtoMessage() {} +func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{7} } + +type Platform struct { + // Architecture (e.g. x86_64) + Architecture string `protobuf:"bytes,1,opt,name=architecture,proto3" json:"architecture,omitempty"` + // Operating System (e.g. linux) + OS string `protobuf:"bytes,2,opt,name=os,proto3" json:"os,omitempty"` +} + +func (m *Platform) Reset() { *m = Platform{} } +func (*Platform) ProtoMessage() {} +func (*Platform) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{8} } + +// PluginDescription describes an engine plugin. +type PluginDescription struct { + // Type of plugin. Canonical values for existing types are + // Volume, Network, and Authorization. More types could be + // supported in the future. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Name of the plugin + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` +} + +func (m *PluginDescription) Reset() { *m = PluginDescription{} } +func (*PluginDescription) ProtoMessage() {} +func (*PluginDescription) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{9} } + +type EngineDescription struct { + // Docker daemon version running on the node. + EngineVersion string `protobuf:"bytes,1,opt,name=engine_version,json=engineVersion,proto3" json:"engine_version,omitempty"` + // Labels attached to the engine. + Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Volume, Network, and Auth plugins + Plugins []PluginDescription `protobuf:"bytes,3,rep,name=plugins" json:"plugins"` +} + +func (m *EngineDescription) Reset() { *m = EngineDescription{} } +func (*EngineDescription) ProtoMessage() {} +func (*EngineDescription) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{10} } + +type NodeDescription struct { + // Hostname of the node as reported by the agent. + // This is different from spec.meta.name which is user-defined. + Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname,omitempty"` + // Platform of the node. + Platform *Platform `protobuf:"bytes,2,opt,name=platform" json:"platform,omitempty"` + // Total resources on the node. + Resources *Resources `protobuf:"bytes,3,opt,name=resources" json:"resources,omitempty"` + // Information about the Docker Engine on the node. + Engine *EngineDescription `protobuf:"bytes,4,opt,name=engine" json:"engine,omitempty"` + // Information on the node's TLS setup + TLSInfo *NodeTLSInfo `protobuf:"bytes,5,opt,name=tls_info,json=tlsInfo" json:"tls_info,omitempty"` + // FIPS indicates whether the node has FIPS-enabled + FIPS bool `protobuf:"varint,6,opt,name=fips,proto3" json:"fips,omitempty"` +} + +func (m *NodeDescription) Reset() { *m = NodeDescription{} } +func (*NodeDescription) ProtoMessage() {} +func (*NodeDescription) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{11} } + +type NodeTLSInfo struct { + // Information about which root certs the node trusts + TrustRoot []byte `protobuf:"bytes,1,opt,name=trust_root,json=trustRoot,proto3" json:"trust_root,omitempty"` + // Information about the node's current TLS certificate + CertIssuerSubject []byte `protobuf:"bytes,2,opt,name=cert_issuer_subject,json=certIssuerSubject,proto3" json:"cert_issuer_subject,omitempty"` + CertIssuerPublicKey []byte `protobuf:"bytes,3,opt,name=cert_issuer_public_key,json=certIssuerPublicKey,proto3" json:"cert_issuer_public_key,omitempty"` +} + +func (m *NodeTLSInfo) Reset() { *m = NodeTLSInfo{} } +func (*NodeTLSInfo) ProtoMessage() {} +func (*NodeTLSInfo) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{12} } + +type RaftMemberStatus struct { + Leader bool `protobuf:"varint,1,opt,name=leader,proto3" json:"leader,omitempty"` + Reachability RaftMemberStatus_Reachability `protobuf:"varint,2,opt,name=reachability,proto3,enum=docker.swarmkit.v1.RaftMemberStatus_Reachability" json:"reachability,omitempty"` + Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` +} + +func (m *RaftMemberStatus) Reset() { *m = RaftMemberStatus{} } +func (*RaftMemberStatus) ProtoMessage() {} +func (*RaftMemberStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{13} } + +type NodeStatus struct { + State NodeStatus_State `protobuf:"varint,1,opt,name=state,proto3,enum=docker.swarmkit.v1.NodeStatus_State" json:"state,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // Addr is the node's IP address as observed by the manager + Addr string `protobuf:"bytes,3,opt,name=addr,proto3" json:"addr,omitempty"` +} + +func (m *NodeStatus) Reset() { *m = NodeStatus{} } +func (*NodeStatus) ProtoMessage() {} +func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{14} } + +type Image struct { + // reference is a docker image reference. This can include a rpository, tag + // or be fully qualified witha digest. The format is specified in the + // distribution/reference package. + Reference string `protobuf:"bytes,1,opt,name=reference,proto3" json:"reference,omitempty"` +} + +func (m *Image) Reset() { *m = Image{} } +func (*Image) ProtoMessage() {} +func (*Image) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{15} } + +// Mount describes volume mounts for a container. +// +// The Mount type follows the structure of the mount syscall, including a type, +// source, target. Top-level flags, such as writable, are common to all kinds +// of mounts, where we also provide options that are specific to a type of +// mount. This corresponds to flags and data, respectively, in the syscall. +type Mount struct { + // Type defines the nature of the mount. + Type Mount_MountType `protobuf:"varint,1,opt,name=type,proto3,enum=docker.swarmkit.v1.Mount_MountType" json:"type,omitempty"` + // Source specifies the name of the mount. Depending on mount type, this + // may be a volume name or a host path, or even ignored. + Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"` + // Target path in container + Target string `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + // ReadOnly should be set to true if the mount should not be writable. + ReadOnly bool `protobuf:"varint,4,opt,name=readonly,proto3" json:"readonly,omitempty"` + Consistency Mount_MountConsistency `protobuf:"varint,8,opt,name=consistency,proto3,enum=docker.swarmkit.v1.Mount_MountConsistency" json:"consistency,omitempty"` + // BindOptions configures properties of a bind mount type. + // + // For mounts of type bind, the source must be an absolute host path. + BindOptions *Mount_BindOptions `protobuf:"bytes,5,opt,name=bind_options,json=bindOptions" json:"bind_options,omitempty"` + // VolumeOptions configures the properties specific to a volume mount type. + // + // For mounts of type volume, the source will be used as the volume name. + VolumeOptions *Mount_VolumeOptions `protobuf:"bytes,6,opt,name=volume_options,json=volumeOptions" json:"volume_options,omitempty"` + // TmpfsOptions allows one to set options for mounting a temporary + // filesystem. + // + // The source field will be ignored when using mounts of type tmpfs. + TmpfsOptions *Mount_TmpfsOptions `protobuf:"bytes,7,opt,name=tmpfs_options,json=tmpfsOptions" json:"tmpfs_options,omitempty"` +} + +func (m *Mount) Reset() { *m = Mount{} } +func (*Mount) ProtoMessage() {} +func (*Mount) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{16} } + +// BindOptions specifies options that are specific to a bind mount. +type Mount_BindOptions struct { + // Propagation mode of mount. + Propagation Mount_BindOptions_MountPropagation `protobuf:"varint,1,opt,name=propagation,proto3,enum=docker.swarmkit.v1.Mount_BindOptions_MountPropagation" json:"propagation,omitempty"` +} + +func (m *Mount_BindOptions) Reset() { *m = Mount_BindOptions{} } +func (*Mount_BindOptions) ProtoMessage() {} +func (*Mount_BindOptions) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{16, 0} } + +// VolumeOptions contains parameters for mounting the volume. +type Mount_VolumeOptions struct { + // nocopy prevents automatic copying of data to the volume with data from target + NoCopy bool `protobuf:"varint,1,opt,name=nocopy,proto3" json:"nocopy,omitempty"` + // labels to apply to the volume if creating + Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // DriverConfig specifies the options that may be passed to the driver + // if the volume is created. + // + // If this is empty, no volume will be created if the volume is missing. + DriverConfig *Driver `protobuf:"bytes,3,opt,name=driver_config,json=driverConfig" json:"driver_config,omitempty"` +} + +func (m *Mount_VolumeOptions) Reset() { *m = Mount_VolumeOptions{} } +func (*Mount_VolumeOptions) ProtoMessage() {} +func (*Mount_VolumeOptions) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{16, 1} } + +type Mount_TmpfsOptions struct { + // Size sets the size of the tmpfs, in bytes. + // + // This will be converted to an operating system specific value + // depending on the host. For example, on linux, it will be convered to + // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with + // docker, uses a straight byte value. + // + // Percentages are not supported. + SizeBytes int64 `protobuf:"varint,1,opt,name=size_bytes,json=sizeBytes,proto3" json:"size_bytes,omitempty"` + // Mode of the tmpfs upon creation + Mode os.FileMode `protobuf:"varint,2,opt,name=mode,proto3,customtype=os.FileMode" json:"mode"` +} + +func (m *Mount_TmpfsOptions) Reset() { *m = Mount_TmpfsOptions{} } +func (*Mount_TmpfsOptions) ProtoMessage() {} +func (*Mount_TmpfsOptions) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{16, 2} } + +type RestartPolicy struct { + Condition RestartPolicy_RestartCondition `protobuf:"varint,1,opt,name=condition,proto3,enum=docker.swarmkit.v1.RestartPolicy_RestartCondition" json:"condition,omitempty"` + // Delay between restart attempts + // Note: can't use stdduration because this field needs to be nullable. + Delay *google_protobuf1.Duration `protobuf:"bytes,2,opt,name=delay" json:"delay,omitempty"` + // MaxAttempts is the maximum number of restarts to attempt on an + // instance before giving up. Ignored if 0. + MaxAttempts uint64 `protobuf:"varint,3,opt,name=max_attempts,json=maxAttempts,proto3" json:"max_attempts,omitempty"` + // Window is the time window used to evaluate the restart policy. + // The time window is unbounded if this is 0. + // Note: can't use stdduration because this field needs to be nullable. + Window *google_protobuf1.Duration `protobuf:"bytes,4,opt,name=window" json:"window,omitempty"` +} + +func (m *RestartPolicy) Reset() { *m = RestartPolicy{} } +func (*RestartPolicy) ProtoMessage() {} +func (*RestartPolicy) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{17} } + +// UpdateConfig specifies the rate and policy of updates. +// TODO(aluzzardi): Consider making this a oneof with RollingStrategy and LockstepStrategy. +type UpdateConfig struct { + // Maximum number of tasks to be updated in one iteration. + // 0 means unlimited parallelism. + Parallelism uint64 `protobuf:"varint,1,opt,name=parallelism,proto3" json:"parallelism,omitempty"` + // Amount of time between updates. + Delay time.Duration `protobuf:"bytes,2,opt,name=delay,stdduration" json:"delay"` + // FailureAction is the action to take when an update failures. + FailureAction UpdateConfig_FailureAction `protobuf:"varint,3,opt,name=failure_action,json=failureAction,proto3,enum=docker.swarmkit.v1.UpdateConfig_FailureAction" json:"failure_action,omitempty"` + // Monitor indicates how long to monitor a task for failure after it is + // created. If the task fails by ending up in one of the states + // REJECTED, COMPLETED, or FAILED, within Monitor from its creation, + // this counts as a failure. If it fails after Monitor, it does not + // count as a failure. If Monitor is unspecified, a default value will + // be used. + // Note: can't use stdduration because this field needs to be nullable. + Monitor *google_protobuf1.Duration `protobuf:"bytes,4,opt,name=monitor" json:"monitor,omitempty"` + // MaxFailureRatio is the fraction of tasks that may fail during + // an update before the failure action is invoked. Any task created by + // the current update which ends up in one of the states REJECTED, + // COMPLETED or FAILED within Monitor from its creation counts as a + // failure. The number of failures is divided by the number of tasks + // being updated, and if this fraction is greater than + // MaxFailureRatio, the failure action is invoked. + // + // If the failure action is CONTINUE, there is no effect. + // If the failure action is PAUSE, no more tasks will be updated until + // another update is started. + // If the failure action is ROLLBACK, the orchestrator will attempt to + // roll back to the previous service spec. If the MaxFailureRatio + // threshold is hit during the rollback, the rollback will pause. + MaxFailureRatio float32 `protobuf:"fixed32,5,opt,name=max_failure_ratio,json=maxFailureRatio,proto3" json:"max_failure_ratio,omitempty"` + Order UpdateConfig_UpdateOrder `protobuf:"varint,6,opt,name=order,proto3,enum=docker.swarmkit.v1.UpdateConfig_UpdateOrder" json:"order,omitempty"` +} + +func (m *UpdateConfig) Reset() { *m = UpdateConfig{} } +func (*UpdateConfig) ProtoMessage() {} +func (*UpdateConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{18} } + +// UpdateStatus is the status of an update in progress. +type UpdateStatus struct { + // State is the state of this update. It indicates whether the + // update is in progress, completed, paused, rolling back, or + // finished rolling back. + State UpdateStatus_UpdateState `protobuf:"varint,1,opt,name=state,proto3,enum=docker.swarmkit.v1.UpdateStatus_UpdateState" json:"state,omitempty"` + // StartedAt is the time at which the update was started. + // Note: can't use stdtime because this field is nullable. + StartedAt *google_protobuf.Timestamp `protobuf:"bytes,2,opt,name=started_at,json=startedAt" json:"started_at,omitempty"` + // CompletedAt is the time at which the update completed successfully, + // paused, or finished rolling back. + // Note: can't use stdtime because this field is nullable. + CompletedAt *google_protobuf.Timestamp `protobuf:"bytes,3,opt,name=completed_at,json=completedAt" json:"completed_at,omitempty"` + // Message explains how the update got into its current state. For + // example, if the update is paused, it will explain what is preventing + // the update from proceeding (typically the failure of a task to start up + // when OnFailure is PAUSE). + Message string `protobuf:"bytes,4,opt,name=message,proto3" json:"message,omitempty"` +} + +func (m *UpdateStatus) Reset() { *m = UpdateStatus{} } +func (*UpdateStatus) ProtoMessage() {} +func (*UpdateStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{19} } + +// Container specific status. +type ContainerStatus struct { + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + PID int32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` + ExitCode int32 `protobuf:"varint,3,opt,name=exit_code,json=exitCode,proto3" json:"exit_code,omitempty"` +} + +func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } +func (*ContainerStatus) ProtoMessage() {} +func (*ContainerStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{20} } + +// PortStatus specifies the actual allocated runtime state of a list +// of port configs. +type PortStatus struct { + Ports []*PortConfig `protobuf:"bytes,1,rep,name=ports" json:"ports,omitempty"` +} + +func (m *PortStatus) Reset() { *m = PortStatus{} } +func (*PortStatus) ProtoMessage() {} +func (*PortStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{21} } + +type TaskStatus struct { + // Note: can't use stdtime because this field is nullable. + Timestamp *google_protobuf.Timestamp `protobuf:"bytes,1,opt,name=timestamp" json:"timestamp,omitempty"` + // State expresses the current state of the task. + State TaskState `protobuf:"varint,2,opt,name=state,proto3,enum=docker.swarmkit.v1.TaskState" json:"state,omitempty"` + // Message reports a message for the task status. This should provide a + // human readable message that can point to how the task actually arrived + // at a current state. + // + // As a convention, we place the a small message here that led to the + // current state. For example, if the task is in ready, because it was + // prepared, we'd place "prepared" in this field. If we skipped preparation + // because the task is prepared, we would put "already prepared" in this + // field. + Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` + // Err is set if the task is in an error state, or is unable to + // progress from an earlier state because a precondition is + // unsatisfied. + // + // The following states should report a companion error: + // + // FAILED, REJECTED + // + // In general, messages that should be surfaced to users belong in the + // Err field, and notes on routine state transitions belong in Message. + // + // TODO(stevvooe) Integrate this field with the error interface. + Err string `protobuf:"bytes,4,opt,name=err,proto3" json:"err,omitempty"` + // Container status contains container specific status information. + // + // Types that are valid to be assigned to RuntimeStatus: + // *TaskStatus_Container + RuntimeStatus isTaskStatus_RuntimeStatus `protobuf_oneof:"runtime_status"` + // HostPorts provides a list of ports allocated at the host + // level. + PortStatus *PortStatus `protobuf:"bytes,6,opt,name=port_status,json=portStatus" json:"port_status,omitempty"` + // AppliedBy gives the node ID of the manager that applied this task + // status update to the Task object. + AppliedBy string `protobuf:"bytes,7,opt,name=applied_by,json=appliedBy,proto3" json:"applied_by,omitempty"` + // AppliedAt gives a timestamp of when this status update was applied to + // the Task object. + // Note: can't use stdtime because this field is nullable. + AppliedAt *google_protobuf.Timestamp `protobuf:"bytes,8,opt,name=applied_at,json=appliedAt" json:"applied_at,omitempty"` +} + +func (m *TaskStatus) Reset() { *m = TaskStatus{} } +func (*TaskStatus) ProtoMessage() {} +func (*TaskStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{22} } + +type isTaskStatus_RuntimeStatus interface { + isTaskStatus_RuntimeStatus() + MarshalTo([]byte) (int, error) + Size() int +} + +type TaskStatus_Container struct { + Container *ContainerStatus `protobuf:"bytes,5,opt,name=container,oneof"` +} + +func (*TaskStatus_Container) isTaskStatus_RuntimeStatus() {} + +func (m *TaskStatus) GetRuntimeStatus() isTaskStatus_RuntimeStatus { + if m != nil { + return m.RuntimeStatus + } + return nil +} + +func (m *TaskStatus) GetContainer() *ContainerStatus { + if x, ok := m.GetRuntimeStatus().(*TaskStatus_Container); ok { + return x.Container + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*TaskStatus) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _TaskStatus_OneofMarshaler, _TaskStatus_OneofUnmarshaler, _TaskStatus_OneofSizer, []interface{}{ + (*TaskStatus_Container)(nil), + } +} + +func _TaskStatus_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*TaskStatus) + // runtime_status + switch x := m.RuntimeStatus.(type) { + case *TaskStatus_Container: + _ = b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Container); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("TaskStatus.RuntimeStatus has unexpected type %T", x) + } + return nil +} + +func _TaskStatus_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*TaskStatus) + switch tag { + case 5: // runtime_status.container + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ContainerStatus) + err := b.DecodeMessage(msg) + m.RuntimeStatus = &TaskStatus_Container{msg} + return true, err + default: + return false, nil + } +} + +func _TaskStatus_OneofSizer(msg proto.Message) (n int) { + m := msg.(*TaskStatus) + // runtime_status + switch x := m.RuntimeStatus.(type) { + case *TaskStatus_Container: + s := proto.Size(x.Container) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// NetworkAttachmentConfig specifies how a service should be attached to a particular network. +// +// For now, this is a simple struct, but this can include future information +// instructing Swarm on how this service should work on the particular +// network. +type NetworkAttachmentConfig struct { + // Target specifies the target network for attachment. This value must be a + // network ID. + Target string `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"` + // Aliases specifies a list of discoverable alternate names for the service on this Target. + Aliases []string `protobuf:"bytes,2,rep,name=aliases" json:"aliases,omitempty"` + // Addresses specifies a list of ipv4 and ipv6 addresses + // preferred. If these addresses are not available then the + // attachment might fail. + Addresses []string `protobuf:"bytes,3,rep,name=addresses" json:"addresses,omitempty"` + // DriverAttachmentOpts is a map of driver attachment options for the network target + DriverAttachmentOpts map[string]string `protobuf:"bytes,4,rep,name=driver_attachment_opts,json=driverAttachmentOpts" json:"driver_attachment_opts,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *NetworkAttachmentConfig) Reset() { *m = NetworkAttachmentConfig{} } +func (*NetworkAttachmentConfig) ProtoMessage() {} +func (*NetworkAttachmentConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{23} } + +// IPAMConfig specifies parameters for IP Address Management. +type IPAMConfig struct { + Family IPAMConfig_AddressFamily `protobuf:"varint,1,opt,name=family,proto3,enum=docker.swarmkit.v1.IPAMConfig_AddressFamily" json:"family,omitempty"` + // Subnet defines a network as a CIDR address (ie network and mask + // 192.168.0.1/24). + Subnet string `protobuf:"bytes,2,opt,name=subnet,proto3" json:"subnet,omitempty"` + // Range defines the portion of the subnet to allocate to tasks. This is + // defined as a subnet within the primary subnet. + Range string `protobuf:"bytes,3,opt,name=range,proto3" json:"range,omitempty"` + // Gateway address within the subnet. + Gateway string `protobuf:"bytes,4,opt,name=gateway,proto3" json:"gateway,omitempty"` + // Reserved is a list of address from the master pool that should *not* be + // allocated. These addresses may have already been allocated or may be + // reserved for another allocation manager. + Reserved map[string]string `protobuf:"bytes,5,rep,name=reserved" json:"reserved,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *IPAMConfig) Reset() { *m = IPAMConfig{} } +func (*IPAMConfig) ProtoMessage() {} +func (*IPAMConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{24} } + +// PortConfig specifies an exposed port which can be +// addressed using the given name. This can be later queried +// using a service discovery api or a DNS SRV query. The node +// port specifies a port that can be used to address this +// service external to the cluster by sending a connection +// request to this port to any node on the cluster. +type PortConfig struct { + // Name for the port. If provided the port information can + // be queried using the name as in a DNS SRV query. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Protocol for the port which is exposed. + Protocol PortConfig_Protocol `protobuf:"varint,2,opt,name=protocol,proto3,enum=docker.swarmkit.v1.PortConfig_Protocol" json:"protocol,omitempty"` + // The port which the application is exposing and is bound to. + TargetPort uint32 `protobuf:"varint,3,opt,name=target_port,json=targetPort,proto3" json:"target_port,omitempty"` + // PublishedPort specifies the port on which the service is exposed. If + // specified, the port must be within the available range. If not specified + // (value is zero), an available port is automatically assigned. + PublishedPort uint32 `protobuf:"varint,4,opt,name=published_port,json=publishedPort,proto3" json:"published_port,omitempty"` + // PublishMode controls how the port is published. + PublishMode PortConfig_PublishMode `protobuf:"varint,5,opt,name=publish_mode,json=publishMode,proto3,enum=docker.swarmkit.v1.PortConfig_PublishMode" json:"publish_mode,omitempty"` +} + +func (m *PortConfig) Reset() { *m = PortConfig{} } +func (*PortConfig) ProtoMessage() {} +func (*PortConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{25} } + +// Driver is a generic driver type to be used throughout the API. For now, a +// driver is simply a name and set of options. The field contents depend on the +// target use case and driver application. For example, a network driver may +// have different rules than a volume driver. +type Driver struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Options map[string]string `protobuf:"bytes,2,rep,name=options" json:"options,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *Driver) Reset() { *m = Driver{} } +func (*Driver) ProtoMessage() {} +func (*Driver) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{26} } + +type IPAMOptions struct { + Driver *Driver `protobuf:"bytes,1,opt,name=driver" json:"driver,omitempty"` + Configs []*IPAMConfig `protobuf:"bytes,3,rep,name=configs" json:"configs,omitempty"` +} + +func (m *IPAMOptions) Reset() { *m = IPAMOptions{} } +func (*IPAMOptions) ProtoMessage() {} +func (*IPAMOptions) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{27} } + +// Peer should be used anywhere where we are describing a remote peer. +type Peer struct { + NodeID string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + Addr string `protobuf:"bytes,2,opt,name=addr,proto3" json:"addr,omitempty"` +} + +func (m *Peer) Reset() { *m = Peer{} } +func (*Peer) ProtoMessage() {} +func (*Peer) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{28} } + +// WeightedPeer should be used anywhere where we are describing a remote peer +// with a weight. +type WeightedPeer struct { + Peer *Peer `protobuf:"bytes,1,opt,name=peer" json:"peer,omitempty"` + Weight int64 `protobuf:"varint,2,opt,name=weight,proto3" json:"weight,omitempty"` +} + +func (m *WeightedPeer) Reset() { *m = WeightedPeer{} } +func (*WeightedPeer) ProtoMessage() {} +func (*WeightedPeer) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{29} } + +type IssuanceStatus struct { + State IssuanceStatus_State `protobuf:"varint,1,opt,name=state,proto3,enum=docker.swarmkit.v1.IssuanceStatus_State" json:"state,omitempty"` + // Err is set if the Certificate Issuance is in an error state. + // The following states should report a companion error: + // FAILED + Err string `protobuf:"bytes,2,opt,name=err,proto3" json:"err,omitempty"` +} + +func (m *IssuanceStatus) Reset() { *m = IssuanceStatus{} } +func (*IssuanceStatus) ProtoMessage() {} +func (*IssuanceStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{30} } + +type AcceptancePolicy struct { + Policies []*AcceptancePolicy_RoleAdmissionPolicy `protobuf:"bytes,1,rep,name=policies" json:"policies,omitempty"` +} + +func (m *AcceptancePolicy) Reset() { *m = AcceptancePolicy{} } +func (*AcceptancePolicy) ProtoMessage() {} +func (*AcceptancePolicy) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{31} } + +type AcceptancePolicy_RoleAdmissionPolicy struct { + Role NodeRole `protobuf:"varint,1,opt,name=role,proto3,enum=docker.swarmkit.v1.NodeRole" json:"role,omitempty"` + // Autoaccept controls which roles' certificates are automatically + // issued without administrator intervention. + Autoaccept bool `protobuf:"varint,2,opt,name=autoaccept,proto3" json:"autoaccept,omitempty"` + // Secret represents a user-provided string that is necessary for new + // nodes to join the cluster + Secret *AcceptancePolicy_RoleAdmissionPolicy_Secret `protobuf:"bytes,3,opt,name=secret" json:"secret,omitempty"` +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy) Reset() { *m = AcceptancePolicy_RoleAdmissionPolicy{} } +func (*AcceptancePolicy_RoleAdmissionPolicy) ProtoMessage() {} +func (*AcceptancePolicy_RoleAdmissionPolicy) Descriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{31, 0} +} + +type AcceptancePolicy_RoleAdmissionPolicy_Secret struct { + // The actual content (possibly hashed) + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + // The type of hash we are using, or "plaintext" + Alg string `protobuf:"bytes,2,opt,name=alg,proto3" json:"alg,omitempty"` +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy_Secret) Reset() { + *m = AcceptancePolicy_RoleAdmissionPolicy_Secret{} +} +func (*AcceptancePolicy_RoleAdmissionPolicy_Secret) ProtoMessage() {} +func (*AcceptancePolicy_RoleAdmissionPolicy_Secret) Descriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{31, 0, 0} +} + +type ExternalCA struct { + // Protocol is the protocol used by this external CA. + Protocol ExternalCA_CAProtocol `protobuf:"varint,1,opt,name=protocol,proto3,enum=docker.swarmkit.v1.ExternalCA_CAProtocol" json:"protocol,omitempty"` + // URL is the URL where the external CA can be reached. + URL string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + // Options is a set of additional key/value pairs whose interpretation + // depends on the specified CA type. + Options map[string]string `protobuf:"bytes,3,rep,name=options" json:"options,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // CACert specifies which root CA is used by this external CA + CACert []byte `protobuf:"bytes,4,opt,name=ca_cert,json=caCert,proto3" json:"ca_cert,omitempty"` +} + +func (m *ExternalCA) Reset() { *m = ExternalCA{} } +func (*ExternalCA) ProtoMessage() {} +func (*ExternalCA) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{32} } + +type CAConfig struct { + // NodeCertExpiry is the duration certificates should be issued for + // Note: can't use stdduration because this field needs to be nullable. + NodeCertExpiry *google_protobuf1.Duration `protobuf:"bytes,1,opt,name=node_cert_expiry,json=nodeCertExpiry" json:"node_cert_expiry,omitempty"` + // ExternalCAs is a list of CAs to which a manager node will make + // certificate signing requests for node certificates. + ExternalCAs []*ExternalCA `protobuf:"bytes,2,rep,name=external_cas,json=externalCas" json:"external_cas,omitempty"` + // SigningCACert is the desired CA certificate to be used as the root and + // signing CA for the swarm. If not provided, indicates that we are either happy + // with the current configuration, or (together with a bump in the ForceRotate value) + // that we want a certificate and key generated for us. + SigningCACert []byte `protobuf:"bytes,3,opt,name=signing_ca_cert,json=signingCaCert,proto3" json:"signing_ca_cert,omitempty"` + // SigningCAKey is the desired private key, matching the signing CA cert, to be used + // to sign certificates for the swarm + SigningCAKey []byte `protobuf:"bytes,4,opt,name=signing_ca_key,json=signingCaKey,proto3" json:"signing_ca_key,omitempty"` + // ForceRotate is a counter that triggers a root CA rotation even if no relevant + // parameters have been in the spec. This will force the manager to generate a new + // certificate and key, if none have been provided. + ForceRotate uint64 `protobuf:"varint,5,opt,name=force_rotate,json=forceRotate,proto3" json:"force_rotate,omitempty"` +} + +func (m *CAConfig) Reset() { *m = CAConfig{} } +func (*CAConfig) ProtoMessage() {} +func (*CAConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{33} } + +// OrchestrationConfig defines cluster-level orchestration settings. +type OrchestrationConfig struct { + // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or + // node. If negative, never remove completed or failed tasks. + TaskHistoryRetentionLimit int64 `protobuf:"varint,1,opt,name=task_history_retention_limit,json=taskHistoryRetentionLimit,proto3" json:"task_history_retention_limit,omitempty"` +} + +func (m *OrchestrationConfig) Reset() { *m = OrchestrationConfig{} } +func (*OrchestrationConfig) ProtoMessage() {} +func (*OrchestrationConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{34} } + +// TaskDefaults specifies default values for task creation. +type TaskDefaults struct { + // LogDriver specifies the log driver to use for the cluster if not + // specified for each task. + // + // If this is changed, only new tasks will pick up the new log driver. + // Existing tasks will continue to use the previous default until rescheduled. + LogDriver *Driver `protobuf:"bytes,1,opt,name=log_driver,json=logDriver" json:"log_driver,omitempty"` +} + +func (m *TaskDefaults) Reset() { *m = TaskDefaults{} } +func (*TaskDefaults) ProtoMessage() {} +func (*TaskDefaults) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{35} } + +// DispatcherConfig defines cluster-level dispatcher settings. +type DispatcherConfig struct { + // HeartbeatPeriod defines how often agent should send heartbeats to + // dispatcher. + // Note: can't use stdduration because this field needs to be nullable. + HeartbeatPeriod *google_protobuf1.Duration `protobuf:"bytes,1,opt,name=heartbeat_period,json=heartbeatPeriod" json:"heartbeat_period,omitempty"` +} + +func (m *DispatcherConfig) Reset() { *m = DispatcherConfig{} } +func (*DispatcherConfig) ProtoMessage() {} +func (*DispatcherConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{36} } + +// RaftConfig defines raft settings for the cluster. +type RaftConfig struct { + // SnapshotInterval is the number of log entries between snapshots. + SnapshotInterval uint64 `protobuf:"varint,1,opt,name=snapshot_interval,json=snapshotInterval,proto3" json:"snapshot_interval,omitempty"` + // KeepOldSnapshots is the number of snapshots to keep beyond the + // current snapshot. + KeepOldSnapshots uint64 `protobuf:"varint,2,opt,name=keep_old_snapshots,json=keepOldSnapshots,proto3" json:"keep_old_snapshots,omitempty"` + // LogEntriesForSlowFollowers is the number of log entries to keep + // around to sync up slow followers after a snapshot is created. + LogEntriesForSlowFollowers uint64 `protobuf:"varint,3,opt,name=log_entries_for_slow_followers,json=logEntriesForSlowFollowers,proto3" json:"log_entries_for_slow_followers,omitempty"` + // HeartbeatTick defines the amount of ticks (in seconds) between + // each heartbeat message sent to other members for health-check. + HeartbeatTick uint32 `protobuf:"varint,4,opt,name=heartbeat_tick,json=heartbeatTick,proto3" json:"heartbeat_tick,omitempty"` + // ElectionTick defines the amount of ticks (in seconds) needed + // without a leader to trigger a new election. + ElectionTick uint32 `protobuf:"varint,5,opt,name=election_tick,json=electionTick,proto3" json:"election_tick,omitempty"` +} + +func (m *RaftConfig) Reset() { *m = RaftConfig{} } +func (*RaftConfig) ProtoMessage() {} +func (*RaftConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{37} } + +type EncryptionConfig struct { + // AutoLockManagers specifies whether or not managers TLS keys and raft data + // should be encrypted at rest in such a way that they must be unlocked + // before the manager node starts up again. + AutoLockManagers bool `protobuf:"varint,1,opt,name=auto_lock_managers,json=autoLockManagers,proto3" json:"auto_lock_managers,omitempty"` +} + +func (m *EncryptionConfig) Reset() { *m = EncryptionConfig{} } +func (*EncryptionConfig) ProtoMessage() {} +func (*EncryptionConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{38} } + +type SpreadOver struct { + SpreadDescriptor string `protobuf:"bytes,1,opt,name=spread_descriptor,json=spreadDescriptor,proto3" json:"spread_descriptor,omitempty"` +} + +func (m *SpreadOver) Reset() { *m = SpreadOver{} } +func (*SpreadOver) ProtoMessage() {} +func (*SpreadOver) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{39} } + +type PlacementPreference struct { + // Types that are valid to be assigned to Preference: + // *PlacementPreference_Spread + Preference isPlacementPreference_Preference `protobuf_oneof:"Preference"` +} + +func (m *PlacementPreference) Reset() { *m = PlacementPreference{} } +func (*PlacementPreference) ProtoMessage() {} +func (*PlacementPreference) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{40} } + +type isPlacementPreference_Preference interface { + isPlacementPreference_Preference() + MarshalTo([]byte) (int, error) + Size() int +} + +type PlacementPreference_Spread struct { + Spread *SpreadOver `protobuf:"bytes,1,opt,name=spread,oneof"` +} + +func (*PlacementPreference_Spread) isPlacementPreference_Preference() {} + +func (m *PlacementPreference) GetPreference() isPlacementPreference_Preference { + if m != nil { + return m.Preference + } + return nil +} + +func (m *PlacementPreference) GetSpread() *SpreadOver { + if x, ok := m.GetPreference().(*PlacementPreference_Spread); ok { + return x.Spread + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*PlacementPreference) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _PlacementPreference_OneofMarshaler, _PlacementPreference_OneofUnmarshaler, _PlacementPreference_OneofSizer, []interface{}{ + (*PlacementPreference_Spread)(nil), + } +} + +func _PlacementPreference_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*PlacementPreference) + // Preference + switch x := m.Preference.(type) { + case *PlacementPreference_Spread: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Spread); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("PlacementPreference.Preference has unexpected type %T", x) + } + return nil +} + +func _PlacementPreference_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*PlacementPreference) + switch tag { + case 1: // Preference.spread + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SpreadOver) + err := b.DecodeMessage(msg) + m.Preference = &PlacementPreference_Spread{msg} + return true, err + default: + return false, nil + } +} + +func _PlacementPreference_OneofSizer(msg proto.Message) (n int) { + m := msg.(*PlacementPreference) + // Preference + switch x := m.Preference.(type) { + case *PlacementPreference_Spread: + s := proto.Size(x.Spread) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Placement specifies task distribution constraints. +type Placement struct { + // Constraints specifies a set of requirements a node should meet for a task. + Constraints []string `protobuf:"bytes,1,rep,name=constraints" json:"constraints,omitempty"` + // Preferences provide a way to make the scheduler aware of factors + // such as topology. They are provided in order from highest to lowest + // precedence. + Preferences []*PlacementPreference `protobuf:"bytes,2,rep,name=preferences" json:"preferences,omitempty"` + // Platforms stores all the platforms that the image can run on. + // This field is used in the platform filter for scheduling. If empty, + // then the platform filter is off, meaning there are no scheduling restrictions. + Platforms []*Platform `protobuf:"bytes,3,rep,name=platforms" json:"platforms,omitempty"` +} + +func (m *Placement) Reset() { *m = Placement{} } +func (*Placement) ProtoMessage() {} +func (*Placement) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{41} } + +// JoinToken contains the join tokens for workers and managers. +type JoinTokens struct { + // Worker is the join token workers may use to join the swarm. + Worker string `protobuf:"bytes,1,opt,name=worker,proto3" json:"worker,omitempty"` + // Manager is the join token workers may use to join the swarm. + Manager string `protobuf:"bytes,2,opt,name=manager,proto3" json:"manager,omitempty"` +} + +func (m *JoinTokens) Reset() { *m = JoinTokens{} } +func (*JoinTokens) ProtoMessage() {} +func (*JoinTokens) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{42} } + +type RootCA struct { + // CAKey is the root CA private key. + CAKey []byte `protobuf:"bytes,1,opt,name=ca_key,json=caKey,proto3" json:"ca_key,omitempty"` + // CACert is the root CA certificate. + CACert []byte `protobuf:"bytes,2,opt,name=ca_cert,json=caCert,proto3" json:"ca_cert,omitempty"` + // CACertHash is the digest of the CA Certificate. + CACertHash string `protobuf:"bytes,3,opt,name=ca_cert_hash,json=caCertHash,proto3" json:"ca_cert_hash,omitempty"` + // JoinTokens contains the join tokens for workers and managers. + JoinTokens JoinTokens `protobuf:"bytes,4,opt,name=join_tokens,json=joinTokens" json:"join_tokens"` + // RootRotation contains the new root cert and key we want to rotate to - if this is nil, we are not in the + // middle of a root rotation + RootRotation *RootRotation `protobuf:"bytes,5,opt,name=root_rotation,json=rootRotation" json:"root_rotation,omitempty"` + // LastForcedRotation matches the Cluster Spec's CAConfig's ForceRotation counter. + // It indicates when the current CA cert and key were generated (or updated). + LastForcedRotation uint64 `protobuf:"varint,6,opt,name=last_forced_rotation,json=lastForcedRotation,proto3" json:"last_forced_rotation,omitempty"` +} + +func (m *RootCA) Reset() { *m = RootCA{} } +func (*RootCA) ProtoMessage() {} +func (*RootCA) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{43} } + +type Certificate struct { + Role NodeRole `protobuf:"varint,1,opt,name=role,proto3,enum=docker.swarmkit.v1.NodeRole" json:"role,omitempty"` + CSR []byte `protobuf:"bytes,2,opt,name=csr,proto3" json:"csr,omitempty"` + Status IssuanceStatus `protobuf:"bytes,3,opt,name=status" json:"status"` + Certificate []byte `protobuf:"bytes,4,opt,name=certificate,proto3" json:"certificate,omitempty"` + // CN represents the node ID. + CN string `protobuf:"bytes,5,opt,name=cn,proto3" json:"cn,omitempty"` +} + +func (m *Certificate) Reset() { *m = Certificate{} } +func (*Certificate) ProtoMessage() {} +func (*Certificate) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{44} } + +// Symmetric keys to encrypt inter-agent communication. +type EncryptionKey struct { + // Agent subsystem the key is intended for. Example: + // networking:gossip + Subsystem string `protobuf:"bytes,1,opt,name=subsystem,proto3" json:"subsystem,omitempty"` + Algorithm EncryptionKey_Algorithm `protobuf:"varint,2,opt,name=algorithm,proto3,enum=docker.swarmkit.v1.EncryptionKey_Algorithm" json:"algorithm,omitempty"` + Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // Time stamp from the lamport clock of the key allocator to + // identify the relative age of the key. + LamportTime uint64 `protobuf:"varint,4,opt,name=lamport_time,json=lamportTime,proto3" json:"lamport_time,omitempty"` +} + +func (m *EncryptionKey) Reset() { *m = EncryptionKey{} } +func (*EncryptionKey) ProtoMessage() {} +func (*EncryptionKey) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{45} } + +// ManagerStatus provides informations about the state of a manager in the cluster. +type ManagerStatus struct { + // RaftID specifies the internal ID used by the manager in a raft context, it can never be modified + // and is used only for information purposes + RaftID uint64 `protobuf:"varint,1,opt,name=raft_id,json=raftId,proto3" json:"raft_id,omitempty"` + // Addr is the address advertised to raft. + Addr string `protobuf:"bytes,2,opt,name=addr,proto3" json:"addr,omitempty"` + // Leader is set to true if this node is the raft leader. + Leader bool `protobuf:"varint,3,opt,name=leader,proto3" json:"leader,omitempty"` + // Reachability specifies whether this node is reachable. + Reachability RaftMemberStatus_Reachability `protobuf:"varint,4,opt,name=reachability,proto3,enum=docker.swarmkit.v1.RaftMemberStatus_Reachability" json:"reachability,omitempty"` +} + +func (m *ManagerStatus) Reset() { *m = ManagerStatus{} } +func (*ManagerStatus) ProtoMessage() {} +func (*ManagerStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{46} } + +// FileTarget represents a specific target that is backed by a file +type FileTarget struct { + // Name represents the final filename in the filesystem + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // UID represents the file UID + UID string `protobuf:"bytes,2,opt,name=uid,proto3" json:"uid,omitempty"` + // GID represents the file GID + GID string `protobuf:"bytes,3,opt,name=gid,proto3" json:"gid,omitempty"` + // Mode represents the FileMode of the file + Mode os.FileMode `protobuf:"varint,4,opt,name=mode,proto3,customtype=os.FileMode" json:"mode"` +} + +func (m *FileTarget) Reset() { *m = FileTarget{} } +func (*FileTarget) ProtoMessage() {} +func (*FileTarget) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{47} } + +// SecretReference is the linkage between a service and a secret that it uses. +type SecretReference struct { + // SecretID represents the ID of the specific Secret that we're + // referencing. This identifier exists so that SecretReferences don't leak + // any information about the secret contents. + SecretID string `protobuf:"bytes,1,opt,name=secret_id,json=secretId,proto3" json:"secret_id,omitempty"` + // SecretName is the name of the secret that this references, but this is just provided for + // lookup/display purposes. The secret in the reference will be identified by its ID. + SecretName string `protobuf:"bytes,2,opt,name=secret_name,json=secretName,proto3" json:"secret_name,omitempty"` + // Target specifies how this secret should be exposed to the task. + // + // Types that are valid to be assigned to Target: + // *SecretReference_File + Target isSecretReference_Target `protobuf_oneof:"target"` +} + +func (m *SecretReference) Reset() { *m = SecretReference{} } +func (*SecretReference) ProtoMessage() {} +func (*SecretReference) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{48} } + +type isSecretReference_Target interface { + isSecretReference_Target() + MarshalTo([]byte) (int, error) + Size() int +} + +type SecretReference_File struct { + File *FileTarget `protobuf:"bytes,3,opt,name=file,oneof"` +} + +func (*SecretReference_File) isSecretReference_Target() {} + +func (m *SecretReference) GetTarget() isSecretReference_Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *SecretReference) GetFile() *FileTarget { + if x, ok := m.GetTarget().(*SecretReference_File); ok { + return x.File + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*SecretReference) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _SecretReference_OneofMarshaler, _SecretReference_OneofUnmarshaler, _SecretReference_OneofSizer, []interface{}{ + (*SecretReference_File)(nil), + } +} + +func _SecretReference_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*SecretReference) + // target + switch x := m.Target.(type) { + case *SecretReference_File: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.File); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("SecretReference.Target has unexpected type %T", x) + } + return nil +} + +func _SecretReference_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*SecretReference) + switch tag { + case 3: // target.file + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(FileTarget) + err := b.DecodeMessage(msg) + m.Target = &SecretReference_File{msg} + return true, err + default: + return false, nil + } +} + +func _SecretReference_OneofSizer(msg proto.Message) (n int) { + m := msg.(*SecretReference) + // target + switch x := m.Target.(type) { + case *SecretReference_File: + s := proto.Size(x.File) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// ConfigReference is the linkage between a service and a config that it uses. +type ConfigReference struct { + // ConfigID represents the ID of the specific Config that we're + // referencing. + ConfigID string `protobuf:"bytes,1,opt,name=config_id,json=configId,proto3" json:"config_id,omitempty"` + // ConfigName is the name of the config that this references, but this is just provided for + // lookup/display purposes. The config in the reference will be identified by its ID. + ConfigName string `protobuf:"bytes,2,opt,name=config_name,json=configName,proto3" json:"config_name,omitempty"` + // Target specifies how this secret should be exposed to the task. + // + // Types that are valid to be assigned to Target: + // *ConfigReference_File + Target isConfigReference_Target `protobuf_oneof:"target"` +} + +func (m *ConfigReference) Reset() { *m = ConfigReference{} } +func (*ConfigReference) ProtoMessage() {} +func (*ConfigReference) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{49} } + +type isConfigReference_Target interface { + isConfigReference_Target() + MarshalTo([]byte) (int, error) + Size() int +} + +type ConfigReference_File struct { + File *FileTarget `protobuf:"bytes,3,opt,name=file,oneof"` +} + +func (*ConfigReference_File) isConfigReference_Target() {} + +func (m *ConfigReference) GetTarget() isConfigReference_Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *ConfigReference) GetFile() *FileTarget { + if x, ok := m.GetTarget().(*ConfigReference_File); ok { + return x.File + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ConfigReference) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ConfigReference_OneofMarshaler, _ConfigReference_OneofUnmarshaler, _ConfigReference_OneofSizer, []interface{}{ + (*ConfigReference_File)(nil), + } +} + +func _ConfigReference_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ConfigReference) + // target + switch x := m.Target.(type) { + case *ConfigReference_File: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.File); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ConfigReference.Target has unexpected type %T", x) + } + return nil +} + +func _ConfigReference_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ConfigReference) + switch tag { + case 3: // target.file + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(FileTarget) + err := b.DecodeMessage(msg) + m.Target = &ConfigReference_File{msg} + return true, err + default: + return false, nil + } +} + +func _ConfigReference_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ConfigReference) + // target + switch x := m.Target.(type) { + case *ConfigReference_File: + s := proto.Size(x.File) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// BlacklistedCertificate is a record for a blacklisted certificate. It does not +// contain the certificate's CN, because these records are indexed by CN. +type BlacklistedCertificate struct { + // Expiry is the latest known expiration time of a certificate that + // was issued for the given CN. + // Note: can't use stdtime because this field is nullable. + Expiry *google_protobuf.Timestamp `protobuf:"bytes,1,opt,name=expiry" json:"expiry,omitempty"` +} + +func (m *BlacklistedCertificate) Reset() { *m = BlacklistedCertificate{} } +func (*BlacklistedCertificate) ProtoMessage() {} +func (*BlacklistedCertificate) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{50} } + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `protobuf:"bytes,1,rep,name=test" json:"test,omitempty"` + // Interval is the time to wait between checks. Zero means inherit. + // Note: can't use stdduration because this field needs to be nullable. + Interval *google_protobuf1.Duration `protobuf:"bytes,2,opt,name=interval" json:"interval,omitempty"` + // Timeout is the time to wait before considering the check to have hung. + // Zero means inherit. + // Note: can't use stdduration because this field needs to be nullable. + Timeout *google_protobuf1.Duration `protobuf:"bytes,3,opt,name=timeout" json:"timeout,omitempty"` + // Retries is the number of consecutive failures needed to consider a + // container as unhealthy. Zero means inherit. + Retries int32 `protobuf:"varint,4,opt,name=retries,proto3" json:"retries,omitempty"` + // Start period is the period for container initialization during + // which health check failures will note count towards the maximum + // number of retries. + StartPeriod *google_protobuf1.Duration `protobuf:"bytes,5,opt,name=start_period,json=startPeriod" json:"start_period,omitempty"` +} + +func (m *HealthConfig) Reset() { *m = HealthConfig{} } +func (*HealthConfig) ProtoMessage() {} +func (*HealthConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{51} } + +type MaybeEncryptedRecord struct { + Algorithm MaybeEncryptedRecord_Algorithm `protobuf:"varint,1,opt,name=algorithm,proto3,enum=docker.swarmkit.v1.MaybeEncryptedRecord_Algorithm" json:"algorithm,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Nonce []byte `protobuf:"bytes,3,opt,name=nonce,proto3" json:"nonce,omitempty"` +} + +func (m *MaybeEncryptedRecord) Reset() { *m = MaybeEncryptedRecord{} } +func (*MaybeEncryptedRecord) ProtoMessage() {} +func (*MaybeEncryptedRecord) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{52} } + +type RootRotation struct { + CACert []byte `protobuf:"bytes,1,opt,name=ca_cert,json=caCert,proto3" json:"ca_cert,omitempty"` + CAKey []byte `protobuf:"bytes,2,opt,name=ca_key,json=caKey,proto3" json:"ca_key,omitempty"` + // cross-signed CA cert is the CACert that has been cross-signed by the previous root + CrossSignedCACert []byte `protobuf:"bytes,3,opt,name=cross_signed_ca_cert,json=crossSignedCaCert,proto3" json:"cross_signed_ca_cert,omitempty"` +} + +func (m *RootRotation) Reset() { *m = RootRotation{} } +func (*RootRotation) ProtoMessage() {} +func (*RootRotation) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{53} } + +// Privileges specifies security configuration/permissions. +type Privileges struct { + CredentialSpec *Privileges_CredentialSpec `protobuf:"bytes,1,opt,name=credential_spec,json=credentialSpec" json:"credential_spec,omitempty"` + SELinuxContext *Privileges_SELinuxContext `protobuf:"bytes,2,opt,name=selinux_context,json=selinuxContext" json:"selinux_context,omitempty"` +} + +func (m *Privileges) Reset() { *m = Privileges{} } +func (*Privileges) ProtoMessage() {} +func (*Privileges) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{54} } + +// CredentialSpec for managed service account (Windows only). +type Privileges_CredentialSpec struct { + // Types that are valid to be assigned to Source: + // *Privileges_CredentialSpec_File + // *Privileges_CredentialSpec_Registry + Source isPrivileges_CredentialSpec_Source `protobuf_oneof:"source"` +} + +func (m *Privileges_CredentialSpec) Reset() { *m = Privileges_CredentialSpec{} } +func (*Privileges_CredentialSpec) ProtoMessage() {} +func (*Privileges_CredentialSpec) Descriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{54, 0} +} + +type isPrivileges_CredentialSpec_Source interface { + isPrivileges_CredentialSpec_Source() + MarshalTo([]byte) (int, error) + Size() int +} + +type Privileges_CredentialSpec_File struct { + File string `protobuf:"bytes,1,opt,name=file,proto3,oneof"` +} +type Privileges_CredentialSpec_Registry struct { + Registry string `protobuf:"bytes,2,opt,name=registry,proto3,oneof"` +} + +func (*Privileges_CredentialSpec_File) isPrivileges_CredentialSpec_Source() {} +func (*Privileges_CredentialSpec_Registry) isPrivileges_CredentialSpec_Source() {} + +func (m *Privileges_CredentialSpec) GetSource() isPrivileges_CredentialSpec_Source { + if m != nil { + return m.Source + } + return nil +} + +func (m *Privileges_CredentialSpec) GetFile() string { + if x, ok := m.GetSource().(*Privileges_CredentialSpec_File); ok { + return x.File + } + return "" +} + +func (m *Privileges_CredentialSpec) GetRegistry() string { + if x, ok := m.GetSource().(*Privileges_CredentialSpec_Registry); ok { + return x.Registry + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Privileges_CredentialSpec) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Privileges_CredentialSpec_OneofMarshaler, _Privileges_CredentialSpec_OneofUnmarshaler, _Privileges_CredentialSpec_OneofSizer, []interface{}{ + (*Privileges_CredentialSpec_File)(nil), + (*Privileges_CredentialSpec_Registry)(nil), + } +} + +func _Privileges_CredentialSpec_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Privileges_CredentialSpec) + // source + switch x := m.Source.(type) { + case *Privileges_CredentialSpec_File: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.File) + case *Privileges_CredentialSpec_Registry: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.Registry) + case nil: + default: + return fmt.Errorf("Privileges_CredentialSpec.Source has unexpected type %T", x) + } + return nil +} + +func _Privileges_CredentialSpec_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Privileges_CredentialSpec) + switch tag { + case 1: // source.file + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Source = &Privileges_CredentialSpec_File{x} + return true, err + case 2: // source.registry + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Source = &Privileges_CredentialSpec_Registry{x} + return true, err + default: + return false, nil + } +} + +func _Privileges_CredentialSpec_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Privileges_CredentialSpec) + // source + switch x := m.Source.(type) { + case *Privileges_CredentialSpec_File: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.File))) + n += len(x.File) + case *Privileges_CredentialSpec_Registry: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Registry))) + n += len(x.Registry) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// SELinuxContext contains the SELinux labels for the container. +type Privileges_SELinuxContext struct { + Disable bool `protobuf:"varint,1,opt,name=disable,proto3" json:"disable,omitempty"` + User string `protobuf:"bytes,2,opt,name=user,proto3" json:"user,omitempty"` + Role string `protobuf:"bytes,3,opt,name=role,proto3" json:"role,omitempty"` + Type string `protobuf:"bytes,4,opt,name=type,proto3" json:"type,omitempty"` + Level string `protobuf:"bytes,5,opt,name=level,proto3" json:"level,omitempty"` +} + +func (m *Privileges_SELinuxContext) Reset() { *m = Privileges_SELinuxContext{} } +func (*Privileges_SELinuxContext) ProtoMessage() {} +func (*Privileges_SELinuxContext) Descriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{54, 1} +} + +func init() { + proto.RegisterType((*Version)(nil), "docker.swarmkit.v1.Version") + proto.RegisterType((*IndexEntry)(nil), "docker.swarmkit.v1.IndexEntry") + proto.RegisterType((*Annotations)(nil), "docker.swarmkit.v1.Annotations") + proto.RegisterType((*NamedGenericResource)(nil), "docker.swarmkit.v1.NamedGenericResource") + proto.RegisterType((*DiscreteGenericResource)(nil), "docker.swarmkit.v1.DiscreteGenericResource") + proto.RegisterType((*GenericResource)(nil), "docker.swarmkit.v1.GenericResource") + proto.RegisterType((*Resources)(nil), "docker.swarmkit.v1.Resources") + proto.RegisterType((*ResourceRequirements)(nil), "docker.swarmkit.v1.ResourceRequirements") + proto.RegisterType((*Platform)(nil), "docker.swarmkit.v1.Platform") + proto.RegisterType((*PluginDescription)(nil), "docker.swarmkit.v1.PluginDescription") + proto.RegisterType((*EngineDescription)(nil), "docker.swarmkit.v1.EngineDescription") + proto.RegisterType((*NodeDescription)(nil), "docker.swarmkit.v1.NodeDescription") + proto.RegisterType((*NodeTLSInfo)(nil), "docker.swarmkit.v1.NodeTLSInfo") + proto.RegisterType((*RaftMemberStatus)(nil), "docker.swarmkit.v1.RaftMemberStatus") + proto.RegisterType((*NodeStatus)(nil), "docker.swarmkit.v1.NodeStatus") + proto.RegisterType((*Image)(nil), "docker.swarmkit.v1.Image") + proto.RegisterType((*Mount)(nil), "docker.swarmkit.v1.Mount") + proto.RegisterType((*Mount_BindOptions)(nil), "docker.swarmkit.v1.Mount.BindOptions") + proto.RegisterType((*Mount_VolumeOptions)(nil), "docker.swarmkit.v1.Mount.VolumeOptions") + proto.RegisterType((*Mount_TmpfsOptions)(nil), "docker.swarmkit.v1.Mount.TmpfsOptions") + proto.RegisterType((*RestartPolicy)(nil), "docker.swarmkit.v1.RestartPolicy") + proto.RegisterType((*UpdateConfig)(nil), "docker.swarmkit.v1.UpdateConfig") + proto.RegisterType((*UpdateStatus)(nil), "docker.swarmkit.v1.UpdateStatus") + proto.RegisterType((*ContainerStatus)(nil), "docker.swarmkit.v1.ContainerStatus") + proto.RegisterType((*PortStatus)(nil), "docker.swarmkit.v1.PortStatus") + proto.RegisterType((*TaskStatus)(nil), "docker.swarmkit.v1.TaskStatus") + proto.RegisterType((*NetworkAttachmentConfig)(nil), "docker.swarmkit.v1.NetworkAttachmentConfig") + proto.RegisterType((*IPAMConfig)(nil), "docker.swarmkit.v1.IPAMConfig") + proto.RegisterType((*PortConfig)(nil), "docker.swarmkit.v1.PortConfig") + proto.RegisterType((*Driver)(nil), "docker.swarmkit.v1.Driver") + proto.RegisterType((*IPAMOptions)(nil), "docker.swarmkit.v1.IPAMOptions") + proto.RegisterType((*Peer)(nil), "docker.swarmkit.v1.Peer") + proto.RegisterType((*WeightedPeer)(nil), "docker.swarmkit.v1.WeightedPeer") + proto.RegisterType((*IssuanceStatus)(nil), "docker.swarmkit.v1.IssuanceStatus") + proto.RegisterType((*AcceptancePolicy)(nil), "docker.swarmkit.v1.AcceptancePolicy") + proto.RegisterType((*AcceptancePolicy_RoleAdmissionPolicy)(nil), "docker.swarmkit.v1.AcceptancePolicy.RoleAdmissionPolicy") + proto.RegisterType((*AcceptancePolicy_RoleAdmissionPolicy_Secret)(nil), "docker.swarmkit.v1.AcceptancePolicy.RoleAdmissionPolicy.Secret") + proto.RegisterType((*ExternalCA)(nil), "docker.swarmkit.v1.ExternalCA") + proto.RegisterType((*CAConfig)(nil), "docker.swarmkit.v1.CAConfig") + proto.RegisterType((*OrchestrationConfig)(nil), "docker.swarmkit.v1.OrchestrationConfig") + proto.RegisterType((*TaskDefaults)(nil), "docker.swarmkit.v1.TaskDefaults") + proto.RegisterType((*DispatcherConfig)(nil), "docker.swarmkit.v1.DispatcherConfig") + proto.RegisterType((*RaftConfig)(nil), "docker.swarmkit.v1.RaftConfig") + proto.RegisterType((*EncryptionConfig)(nil), "docker.swarmkit.v1.EncryptionConfig") + proto.RegisterType((*SpreadOver)(nil), "docker.swarmkit.v1.SpreadOver") + proto.RegisterType((*PlacementPreference)(nil), "docker.swarmkit.v1.PlacementPreference") + proto.RegisterType((*Placement)(nil), "docker.swarmkit.v1.Placement") + proto.RegisterType((*JoinTokens)(nil), "docker.swarmkit.v1.JoinTokens") + proto.RegisterType((*RootCA)(nil), "docker.swarmkit.v1.RootCA") + proto.RegisterType((*Certificate)(nil), "docker.swarmkit.v1.Certificate") + proto.RegisterType((*EncryptionKey)(nil), "docker.swarmkit.v1.EncryptionKey") + proto.RegisterType((*ManagerStatus)(nil), "docker.swarmkit.v1.ManagerStatus") + proto.RegisterType((*FileTarget)(nil), "docker.swarmkit.v1.FileTarget") + proto.RegisterType((*SecretReference)(nil), "docker.swarmkit.v1.SecretReference") + proto.RegisterType((*ConfigReference)(nil), "docker.swarmkit.v1.ConfigReference") + proto.RegisterType((*BlacklistedCertificate)(nil), "docker.swarmkit.v1.BlacklistedCertificate") + proto.RegisterType((*HealthConfig)(nil), "docker.swarmkit.v1.HealthConfig") + proto.RegisterType((*MaybeEncryptedRecord)(nil), "docker.swarmkit.v1.MaybeEncryptedRecord") + proto.RegisterType((*RootRotation)(nil), "docker.swarmkit.v1.RootRotation") + proto.RegisterType((*Privileges)(nil), "docker.swarmkit.v1.Privileges") + proto.RegisterType((*Privileges_CredentialSpec)(nil), "docker.swarmkit.v1.Privileges.CredentialSpec") + proto.RegisterType((*Privileges_SELinuxContext)(nil), "docker.swarmkit.v1.Privileges.SELinuxContext") + proto.RegisterEnum("docker.swarmkit.v1.ResourceType", ResourceType_name, ResourceType_value) + proto.RegisterEnum("docker.swarmkit.v1.TaskState", TaskState_name, TaskState_value) + proto.RegisterEnum("docker.swarmkit.v1.NodeRole", NodeRole_name, NodeRole_value) + proto.RegisterEnum("docker.swarmkit.v1.RaftMemberStatus_Reachability", RaftMemberStatus_Reachability_name, RaftMemberStatus_Reachability_value) + proto.RegisterEnum("docker.swarmkit.v1.NodeStatus_State", NodeStatus_State_name, NodeStatus_State_value) + proto.RegisterEnum("docker.swarmkit.v1.Mount_MountType", Mount_MountType_name, Mount_MountType_value) + proto.RegisterEnum("docker.swarmkit.v1.Mount_MountConsistency", Mount_MountConsistency_name, Mount_MountConsistency_value) + proto.RegisterEnum("docker.swarmkit.v1.Mount_BindOptions_MountPropagation", Mount_BindOptions_MountPropagation_name, Mount_BindOptions_MountPropagation_value) + proto.RegisterEnum("docker.swarmkit.v1.RestartPolicy_RestartCondition", RestartPolicy_RestartCondition_name, RestartPolicy_RestartCondition_value) + proto.RegisterEnum("docker.swarmkit.v1.UpdateConfig_FailureAction", UpdateConfig_FailureAction_name, UpdateConfig_FailureAction_value) + proto.RegisterEnum("docker.swarmkit.v1.UpdateConfig_UpdateOrder", UpdateConfig_UpdateOrder_name, UpdateConfig_UpdateOrder_value) + proto.RegisterEnum("docker.swarmkit.v1.UpdateStatus_UpdateState", UpdateStatus_UpdateState_name, UpdateStatus_UpdateState_value) + proto.RegisterEnum("docker.swarmkit.v1.IPAMConfig_AddressFamily", IPAMConfig_AddressFamily_name, IPAMConfig_AddressFamily_value) + proto.RegisterEnum("docker.swarmkit.v1.PortConfig_Protocol", PortConfig_Protocol_name, PortConfig_Protocol_value) + proto.RegisterEnum("docker.swarmkit.v1.PortConfig_PublishMode", PortConfig_PublishMode_name, PortConfig_PublishMode_value) + proto.RegisterEnum("docker.swarmkit.v1.IssuanceStatus_State", IssuanceStatus_State_name, IssuanceStatus_State_value) + proto.RegisterEnum("docker.swarmkit.v1.ExternalCA_CAProtocol", ExternalCA_CAProtocol_name, ExternalCA_CAProtocol_value) + proto.RegisterEnum("docker.swarmkit.v1.EncryptionKey_Algorithm", EncryptionKey_Algorithm_name, EncryptionKey_Algorithm_value) + proto.RegisterEnum("docker.swarmkit.v1.MaybeEncryptedRecord_Algorithm", MaybeEncryptedRecord_Algorithm_name, MaybeEncryptedRecord_Algorithm_value) +} + +func (m *Version) Copy() *Version { + if m == nil { + return nil + } + o := &Version{} + o.CopyFrom(m) + return o +} + +func (m *Version) CopyFrom(src interface{}) { + + o := src.(*Version) + *m = *o +} + +func (m *IndexEntry) Copy() *IndexEntry { + if m == nil { + return nil + } + o := &IndexEntry{} + o.CopyFrom(m) + return o +} + +func (m *IndexEntry) CopyFrom(src interface{}) { + + o := src.(*IndexEntry) + *m = *o +} + +func (m *Annotations) Copy() *Annotations { + if m == nil { + return nil + } + o := &Annotations{} + o.CopyFrom(m) + return o +} + +func (m *Annotations) CopyFrom(src interface{}) { + + o := src.(*Annotations) + *m = *o + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.Indices != nil { + m.Indices = make([]IndexEntry, len(o.Indices)) + for i := range m.Indices { + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Indices[i], &o.Indices[i]) + } + } + +} + +func (m *NamedGenericResource) Copy() *NamedGenericResource { + if m == nil { + return nil + } + o := &NamedGenericResource{} + o.CopyFrom(m) + return o +} + +func (m *NamedGenericResource) CopyFrom(src interface{}) { + + o := src.(*NamedGenericResource) + *m = *o +} + +func (m *DiscreteGenericResource) Copy() *DiscreteGenericResource { + if m == nil { + return nil + } + o := &DiscreteGenericResource{} + o.CopyFrom(m) + return o +} + +func (m *DiscreteGenericResource) CopyFrom(src interface{}) { + + o := src.(*DiscreteGenericResource) + *m = *o +} + +func (m *GenericResource) Copy() *GenericResource { + if m == nil { + return nil + } + o := &GenericResource{} + o.CopyFrom(m) + return o +} + +func (m *GenericResource) CopyFrom(src interface{}) { + + o := src.(*GenericResource) + *m = *o + if o.Resource != nil { + switch o.Resource.(type) { + case *GenericResource_NamedResourceSpec: + v := GenericResource_NamedResourceSpec{ + NamedResourceSpec: &NamedGenericResource{}, + } + github_com_docker_swarmkit_api_deepcopy.Copy(v.NamedResourceSpec, o.GetNamedResourceSpec()) + m.Resource = &v + case *GenericResource_DiscreteResourceSpec: + v := GenericResource_DiscreteResourceSpec{ + DiscreteResourceSpec: &DiscreteGenericResource{}, + } + github_com_docker_swarmkit_api_deepcopy.Copy(v.DiscreteResourceSpec, o.GetDiscreteResourceSpec()) + m.Resource = &v + } + } + +} + +func (m *Resources) Copy() *Resources { + if m == nil { + return nil + } + o := &Resources{} + o.CopyFrom(m) + return o +} + +func (m *Resources) CopyFrom(src interface{}) { + + o := src.(*Resources) + *m = *o + if o.Generic != nil { + m.Generic = make([]*GenericResource, len(o.Generic)) + for i := range m.Generic { + m.Generic[i] = &GenericResource{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Generic[i], o.Generic[i]) + } + } + +} + +func (m *ResourceRequirements) Copy() *ResourceRequirements { + if m == nil { + return nil + } + o := &ResourceRequirements{} + o.CopyFrom(m) + return o +} + +func (m *ResourceRequirements) CopyFrom(src interface{}) { + + o := src.(*ResourceRequirements) + *m = *o + if o.Limits != nil { + m.Limits = &Resources{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Limits, o.Limits) + } + if o.Reservations != nil { + m.Reservations = &Resources{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Reservations, o.Reservations) + } +} + +func (m *Platform) Copy() *Platform { + if m == nil { + return nil + } + o := &Platform{} + o.CopyFrom(m) + return o +} + +func (m *Platform) CopyFrom(src interface{}) { + + o := src.(*Platform) + *m = *o +} + +func (m *PluginDescription) Copy() *PluginDescription { + if m == nil { + return nil + } + o := &PluginDescription{} + o.CopyFrom(m) + return o +} + +func (m *PluginDescription) CopyFrom(src interface{}) { + + o := src.(*PluginDescription) + *m = *o +} + +func (m *EngineDescription) Copy() *EngineDescription { + if m == nil { + return nil + } + o := &EngineDescription{} + o.CopyFrom(m) + return o +} + +func (m *EngineDescription) CopyFrom(src interface{}) { + + o := src.(*EngineDescription) + *m = *o + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.Plugins != nil { + m.Plugins = make([]PluginDescription, len(o.Plugins)) + for i := range m.Plugins { + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Plugins[i], &o.Plugins[i]) + } + } + +} + +func (m *NodeDescription) Copy() *NodeDescription { + if m == nil { + return nil + } + o := &NodeDescription{} + o.CopyFrom(m) + return o +} + +func (m *NodeDescription) CopyFrom(src interface{}) { + + o := src.(*NodeDescription) + *m = *o + if o.Platform != nil { + m.Platform = &Platform{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Platform, o.Platform) + } + if o.Resources != nil { + m.Resources = &Resources{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Resources, o.Resources) + } + if o.Engine != nil { + m.Engine = &EngineDescription{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Engine, o.Engine) + } + if o.TLSInfo != nil { + m.TLSInfo = &NodeTLSInfo{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.TLSInfo, o.TLSInfo) + } +} + +func (m *NodeTLSInfo) Copy() *NodeTLSInfo { + if m == nil { + return nil + } + o := &NodeTLSInfo{} + o.CopyFrom(m) + return o +} + +func (m *NodeTLSInfo) CopyFrom(src interface{}) { + + o := src.(*NodeTLSInfo) + *m = *o + if o.TrustRoot != nil { + m.TrustRoot = make([]byte, len(o.TrustRoot)) + copy(m.TrustRoot, o.TrustRoot) + } + if o.CertIssuerSubject != nil { + m.CertIssuerSubject = make([]byte, len(o.CertIssuerSubject)) + copy(m.CertIssuerSubject, o.CertIssuerSubject) + } + if o.CertIssuerPublicKey != nil { + m.CertIssuerPublicKey = make([]byte, len(o.CertIssuerPublicKey)) + copy(m.CertIssuerPublicKey, o.CertIssuerPublicKey) + } +} + +func (m *RaftMemberStatus) Copy() *RaftMemberStatus { + if m == nil { + return nil + } + o := &RaftMemberStatus{} + o.CopyFrom(m) + return o +} + +func (m *RaftMemberStatus) CopyFrom(src interface{}) { + + o := src.(*RaftMemberStatus) + *m = *o +} + +func (m *NodeStatus) Copy() *NodeStatus { + if m == nil { + return nil + } + o := &NodeStatus{} + o.CopyFrom(m) + return o +} + +func (m *NodeStatus) CopyFrom(src interface{}) { + + o := src.(*NodeStatus) + *m = *o +} + +func (m *Image) Copy() *Image { + if m == nil { + return nil + } + o := &Image{} + o.CopyFrom(m) + return o +} + +func (m *Image) CopyFrom(src interface{}) { + + o := src.(*Image) + *m = *o +} + +func (m *Mount) Copy() *Mount { + if m == nil { + return nil + } + o := &Mount{} + o.CopyFrom(m) + return o +} + +func (m *Mount) CopyFrom(src interface{}) { + + o := src.(*Mount) + *m = *o + if o.BindOptions != nil { + m.BindOptions = &Mount_BindOptions{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.BindOptions, o.BindOptions) + } + if o.VolumeOptions != nil { + m.VolumeOptions = &Mount_VolumeOptions{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.VolumeOptions, o.VolumeOptions) + } + if o.TmpfsOptions != nil { + m.TmpfsOptions = &Mount_TmpfsOptions{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.TmpfsOptions, o.TmpfsOptions) + } +} + +func (m *Mount_BindOptions) Copy() *Mount_BindOptions { + if m == nil { + return nil + } + o := &Mount_BindOptions{} + o.CopyFrom(m) + return o +} + +func (m *Mount_BindOptions) CopyFrom(src interface{}) { + + o := src.(*Mount_BindOptions) + *m = *o +} + +func (m *Mount_VolumeOptions) Copy() *Mount_VolumeOptions { + if m == nil { + return nil + } + o := &Mount_VolumeOptions{} + o.CopyFrom(m) + return o +} + +func (m *Mount_VolumeOptions) CopyFrom(src interface{}) { + + o := src.(*Mount_VolumeOptions) + *m = *o + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.DriverConfig != nil { + m.DriverConfig = &Driver{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.DriverConfig, o.DriverConfig) + } +} + +func (m *Mount_TmpfsOptions) Copy() *Mount_TmpfsOptions { + if m == nil { + return nil + } + o := &Mount_TmpfsOptions{} + o.CopyFrom(m) + return o +} + +func (m *Mount_TmpfsOptions) CopyFrom(src interface{}) { + + o := src.(*Mount_TmpfsOptions) + *m = *o +} + +func (m *RestartPolicy) Copy() *RestartPolicy { + if m == nil { + return nil + } + o := &RestartPolicy{} + o.CopyFrom(m) + return o +} + +func (m *RestartPolicy) CopyFrom(src interface{}) { + + o := src.(*RestartPolicy) + *m = *o + if o.Delay != nil { + m.Delay = &google_protobuf1.Duration{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Delay, o.Delay) + } + if o.Window != nil { + m.Window = &google_protobuf1.Duration{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Window, o.Window) + } +} + +func (m *UpdateConfig) Copy() *UpdateConfig { + if m == nil { + return nil + } + o := &UpdateConfig{} + o.CopyFrom(m) + return o +} + +func (m *UpdateConfig) CopyFrom(src interface{}) { + + o := src.(*UpdateConfig) + *m = *o + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Delay, &o.Delay) + if o.Monitor != nil { + m.Monitor = &google_protobuf1.Duration{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Monitor, o.Monitor) + } +} + +func (m *UpdateStatus) Copy() *UpdateStatus { + if m == nil { + return nil + } + o := &UpdateStatus{} + o.CopyFrom(m) + return o +} + +func (m *UpdateStatus) CopyFrom(src interface{}) { + + o := src.(*UpdateStatus) + *m = *o + if o.StartedAt != nil { + m.StartedAt = &google_protobuf.Timestamp{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.StartedAt, o.StartedAt) + } + if o.CompletedAt != nil { + m.CompletedAt = &google_protobuf.Timestamp{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.CompletedAt, o.CompletedAt) + } +} + +func (m *ContainerStatus) Copy() *ContainerStatus { + if m == nil { + return nil + } + o := &ContainerStatus{} + o.CopyFrom(m) + return o +} + +func (m *ContainerStatus) CopyFrom(src interface{}) { + + o := src.(*ContainerStatus) + *m = *o +} + +func (m *PortStatus) Copy() *PortStatus { + if m == nil { + return nil + } + o := &PortStatus{} + o.CopyFrom(m) + return o +} + +func (m *PortStatus) CopyFrom(src interface{}) { + + o := src.(*PortStatus) + *m = *o + if o.Ports != nil { + m.Ports = make([]*PortConfig, len(o.Ports)) + for i := range m.Ports { + m.Ports[i] = &PortConfig{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Ports[i], o.Ports[i]) + } + } + +} + +func (m *TaskStatus) Copy() *TaskStatus { + if m == nil { + return nil + } + o := &TaskStatus{} + o.CopyFrom(m) + return o +} + +func (m *TaskStatus) CopyFrom(src interface{}) { + + o := src.(*TaskStatus) + *m = *o + if o.Timestamp != nil { + m.Timestamp = &google_protobuf.Timestamp{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Timestamp, o.Timestamp) + } + if o.PortStatus != nil { + m.PortStatus = &PortStatus{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.PortStatus, o.PortStatus) + } + if o.AppliedAt != nil { + m.AppliedAt = &google_protobuf.Timestamp{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.AppliedAt, o.AppliedAt) + } + if o.RuntimeStatus != nil { + switch o.RuntimeStatus.(type) { + case *TaskStatus_Container: + v := TaskStatus_Container{ + Container: &ContainerStatus{}, + } + github_com_docker_swarmkit_api_deepcopy.Copy(v.Container, o.GetContainer()) + m.RuntimeStatus = &v + } + } + +} + +func (m *NetworkAttachmentConfig) Copy() *NetworkAttachmentConfig { + if m == nil { + return nil + } + o := &NetworkAttachmentConfig{} + o.CopyFrom(m) + return o +} + +func (m *NetworkAttachmentConfig) CopyFrom(src interface{}) { + + o := src.(*NetworkAttachmentConfig) + *m = *o + if o.Aliases != nil { + m.Aliases = make([]string, len(o.Aliases)) + copy(m.Aliases, o.Aliases) + } + + if o.Addresses != nil { + m.Addresses = make([]string, len(o.Addresses)) + copy(m.Addresses, o.Addresses) + } + + if o.DriverAttachmentOpts != nil { + m.DriverAttachmentOpts = make(map[string]string, len(o.DriverAttachmentOpts)) + for k, v := range o.DriverAttachmentOpts { + m.DriverAttachmentOpts[k] = v + } + } + +} + +func (m *IPAMConfig) Copy() *IPAMConfig { + if m == nil { + return nil + } + o := &IPAMConfig{} + o.CopyFrom(m) + return o +} + +func (m *IPAMConfig) CopyFrom(src interface{}) { + + o := src.(*IPAMConfig) + *m = *o + if o.Reserved != nil { + m.Reserved = make(map[string]string, len(o.Reserved)) + for k, v := range o.Reserved { + m.Reserved[k] = v + } + } + +} + +func (m *PortConfig) Copy() *PortConfig { + if m == nil { + return nil + } + o := &PortConfig{} + o.CopyFrom(m) + return o +} + +func (m *PortConfig) CopyFrom(src interface{}) { + + o := src.(*PortConfig) + *m = *o +} + +func (m *Driver) Copy() *Driver { + if m == nil { + return nil + } + o := &Driver{} + o.CopyFrom(m) + return o +} + +func (m *Driver) CopyFrom(src interface{}) { + + o := src.(*Driver) + *m = *o + if o.Options != nil { + m.Options = make(map[string]string, len(o.Options)) + for k, v := range o.Options { + m.Options[k] = v + } + } + +} + +func (m *IPAMOptions) Copy() *IPAMOptions { + if m == nil { + return nil + } + o := &IPAMOptions{} + o.CopyFrom(m) + return o +} + +func (m *IPAMOptions) CopyFrom(src interface{}) { + + o := src.(*IPAMOptions) + *m = *o + if o.Driver != nil { + m.Driver = &Driver{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Driver, o.Driver) + } + if o.Configs != nil { + m.Configs = make([]*IPAMConfig, len(o.Configs)) + for i := range m.Configs { + m.Configs[i] = &IPAMConfig{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Configs[i], o.Configs[i]) + } + } + +} + +func (m *Peer) Copy() *Peer { + if m == nil { + return nil + } + o := &Peer{} + o.CopyFrom(m) + return o +} + +func (m *Peer) CopyFrom(src interface{}) { + + o := src.(*Peer) + *m = *o +} + +func (m *WeightedPeer) Copy() *WeightedPeer { + if m == nil { + return nil + } + o := &WeightedPeer{} + o.CopyFrom(m) + return o +} + +func (m *WeightedPeer) CopyFrom(src interface{}) { + + o := src.(*WeightedPeer) + *m = *o + if o.Peer != nil { + m.Peer = &Peer{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Peer, o.Peer) + } +} + +func (m *IssuanceStatus) Copy() *IssuanceStatus { + if m == nil { + return nil + } + o := &IssuanceStatus{} + o.CopyFrom(m) + return o +} + +func (m *IssuanceStatus) CopyFrom(src interface{}) { + + o := src.(*IssuanceStatus) + *m = *o +} + +func (m *AcceptancePolicy) Copy() *AcceptancePolicy { + if m == nil { + return nil + } + o := &AcceptancePolicy{} + o.CopyFrom(m) + return o +} + +func (m *AcceptancePolicy) CopyFrom(src interface{}) { + + o := src.(*AcceptancePolicy) + *m = *o + if o.Policies != nil { + m.Policies = make([]*AcceptancePolicy_RoleAdmissionPolicy, len(o.Policies)) + for i := range m.Policies { + m.Policies[i] = &AcceptancePolicy_RoleAdmissionPolicy{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Policies[i], o.Policies[i]) + } + } + +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy) Copy() *AcceptancePolicy_RoleAdmissionPolicy { + if m == nil { + return nil + } + o := &AcceptancePolicy_RoleAdmissionPolicy{} + o.CopyFrom(m) + return o +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy) CopyFrom(src interface{}) { + + o := src.(*AcceptancePolicy_RoleAdmissionPolicy) + *m = *o + if o.Secret != nil { + m.Secret = &AcceptancePolicy_RoleAdmissionPolicy_Secret{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Secret, o.Secret) + } +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy_Secret) Copy() *AcceptancePolicy_RoleAdmissionPolicy_Secret { + if m == nil { + return nil + } + o := &AcceptancePolicy_RoleAdmissionPolicy_Secret{} + o.CopyFrom(m) + return o +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy_Secret) CopyFrom(src interface{}) { + + o := src.(*AcceptancePolicy_RoleAdmissionPolicy_Secret) + *m = *o + if o.Data != nil { + m.Data = make([]byte, len(o.Data)) + copy(m.Data, o.Data) + } +} + +func (m *ExternalCA) Copy() *ExternalCA { + if m == nil { + return nil + } + o := &ExternalCA{} + o.CopyFrom(m) + return o +} + +func (m *ExternalCA) CopyFrom(src interface{}) { + + o := src.(*ExternalCA) + *m = *o + if o.Options != nil { + m.Options = make(map[string]string, len(o.Options)) + for k, v := range o.Options { + m.Options[k] = v + } + } + + if o.CACert != nil { + m.CACert = make([]byte, len(o.CACert)) + copy(m.CACert, o.CACert) + } +} + +func (m *CAConfig) Copy() *CAConfig { + if m == nil { + return nil + } + o := &CAConfig{} + o.CopyFrom(m) + return o +} + +func (m *CAConfig) CopyFrom(src interface{}) { + + o := src.(*CAConfig) + *m = *o + if o.NodeCertExpiry != nil { + m.NodeCertExpiry = &google_protobuf1.Duration{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.NodeCertExpiry, o.NodeCertExpiry) + } + if o.ExternalCAs != nil { + m.ExternalCAs = make([]*ExternalCA, len(o.ExternalCAs)) + for i := range m.ExternalCAs { + m.ExternalCAs[i] = &ExternalCA{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.ExternalCAs[i], o.ExternalCAs[i]) + } + } + + if o.SigningCACert != nil { + m.SigningCACert = make([]byte, len(o.SigningCACert)) + copy(m.SigningCACert, o.SigningCACert) + } + if o.SigningCAKey != nil { + m.SigningCAKey = make([]byte, len(o.SigningCAKey)) + copy(m.SigningCAKey, o.SigningCAKey) + } +} + +func (m *OrchestrationConfig) Copy() *OrchestrationConfig { + if m == nil { + return nil + } + o := &OrchestrationConfig{} + o.CopyFrom(m) + return o +} + +func (m *OrchestrationConfig) CopyFrom(src interface{}) { + + o := src.(*OrchestrationConfig) + *m = *o +} + +func (m *TaskDefaults) Copy() *TaskDefaults { + if m == nil { + return nil + } + o := &TaskDefaults{} + o.CopyFrom(m) + return o +} + +func (m *TaskDefaults) CopyFrom(src interface{}) { + + o := src.(*TaskDefaults) + *m = *o + if o.LogDriver != nil { + m.LogDriver = &Driver{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.LogDriver, o.LogDriver) + } +} + +func (m *DispatcherConfig) Copy() *DispatcherConfig { + if m == nil { + return nil + } + o := &DispatcherConfig{} + o.CopyFrom(m) + return o +} + +func (m *DispatcherConfig) CopyFrom(src interface{}) { + + o := src.(*DispatcherConfig) + *m = *o + if o.HeartbeatPeriod != nil { + m.HeartbeatPeriod = &google_protobuf1.Duration{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.HeartbeatPeriod, o.HeartbeatPeriod) + } +} + +func (m *RaftConfig) Copy() *RaftConfig { + if m == nil { + return nil + } + o := &RaftConfig{} + o.CopyFrom(m) + return o +} + +func (m *RaftConfig) CopyFrom(src interface{}) { + + o := src.(*RaftConfig) + *m = *o +} + +func (m *EncryptionConfig) Copy() *EncryptionConfig { + if m == nil { + return nil + } + o := &EncryptionConfig{} + o.CopyFrom(m) + return o +} + +func (m *EncryptionConfig) CopyFrom(src interface{}) { + + o := src.(*EncryptionConfig) + *m = *o +} + +func (m *SpreadOver) Copy() *SpreadOver { + if m == nil { + return nil + } + o := &SpreadOver{} + o.CopyFrom(m) + return o +} + +func (m *SpreadOver) CopyFrom(src interface{}) { + + o := src.(*SpreadOver) + *m = *o +} + +func (m *PlacementPreference) Copy() *PlacementPreference { + if m == nil { + return nil + } + o := &PlacementPreference{} + o.CopyFrom(m) + return o +} + +func (m *PlacementPreference) CopyFrom(src interface{}) { + + o := src.(*PlacementPreference) + *m = *o + if o.Preference != nil { + switch o.Preference.(type) { + case *PlacementPreference_Spread: + v := PlacementPreference_Spread{ + Spread: &SpreadOver{}, + } + github_com_docker_swarmkit_api_deepcopy.Copy(v.Spread, o.GetSpread()) + m.Preference = &v + } + } + +} + +func (m *Placement) Copy() *Placement { + if m == nil { + return nil + } + o := &Placement{} + o.CopyFrom(m) + return o +} + +func (m *Placement) CopyFrom(src interface{}) { + + o := src.(*Placement) + *m = *o + if o.Constraints != nil { + m.Constraints = make([]string, len(o.Constraints)) + copy(m.Constraints, o.Constraints) + } + + if o.Preferences != nil { + m.Preferences = make([]*PlacementPreference, len(o.Preferences)) + for i := range m.Preferences { + m.Preferences[i] = &PlacementPreference{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Preferences[i], o.Preferences[i]) + } + } + + if o.Platforms != nil { + m.Platforms = make([]*Platform, len(o.Platforms)) + for i := range m.Platforms { + m.Platforms[i] = &Platform{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Platforms[i], o.Platforms[i]) + } + } + +} + +func (m *JoinTokens) Copy() *JoinTokens { + if m == nil { + return nil + } + o := &JoinTokens{} + o.CopyFrom(m) + return o +} + +func (m *JoinTokens) CopyFrom(src interface{}) { + + o := src.(*JoinTokens) + *m = *o +} + +func (m *RootCA) Copy() *RootCA { + if m == nil { + return nil + } + o := &RootCA{} + o.CopyFrom(m) + return o +} + +func (m *RootCA) CopyFrom(src interface{}) { + + o := src.(*RootCA) + *m = *o + if o.CAKey != nil { + m.CAKey = make([]byte, len(o.CAKey)) + copy(m.CAKey, o.CAKey) + } + if o.CACert != nil { + m.CACert = make([]byte, len(o.CACert)) + copy(m.CACert, o.CACert) + } + github_com_docker_swarmkit_api_deepcopy.Copy(&m.JoinTokens, &o.JoinTokens) + if o.RootRotation != nil { + m.RootRotation = &RootRotation{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.RootRotation, o.RootRotation) + } +} + +func (m *Certificate) Copy() *Certificate { + if m == nil { + return nil + } + o := &Certificate{} + o.CopyFrom(m) + return o +} + +func (m *Certificate) CopyFrom(src interface{}) { + + o := src.(*Certificate) + *m = *o + if o.CSR != nil { + m.CSR = make([]byte, len(o.CSR)) + copy(m.CSR, o.CSR) + } + github_com_docker_swarmkit_api_deepcopy.Copy(&m.Status, &o.Status) + if o.Certificate != nil { + m.Certificate = make([]byte, len(o.Certificate)) + copy(m.Certificate, o.Certificate) + } +} + +func (m *EncryptionKey) Copy() *EncryptionKey { + if m == nil { + return nil + } + o := &EncryptionKey{} + o.CopyFrom(m) + return o +} + +func (m *EncryptionKey) CopyFrom(src interface{}) { + + o := src.(*EncryptionKey) + *m = *o + if o.Key != nil { + m.Key = make([]byte, len(o.Key)) + copy(m.Key, o.Key) + } +} + +func (m *ManagerStatus) Copy() *ManagerStatus { + if m == nil { + return nil + } + o := &ManagerStatus{} + o.CopyFrom(m) + return o +} + +func (m *ManagerStatus) CopyFrom(src interface{}) { + + o := src.(*ManagerStatus) + *m = *o +} + +func (m *FileTarget) Copy() *FileTarget { + if m == nil { + return nil + } + o := &FileTarget{} + o.CopyFrom(m) + return o +} + +func (m *FileTarget) CopyFrom(src interface{}) { + + o := src.(*FileTarget) + *m = *o +} + +func (m *SecretReference) Copy() *SecretReference { + if m == nil { + return nil + } + o := &SecretReference{} + o.CopyFrom(m) + return o +} + +func (m *SecretReference) CopyFrom(src interface{}) { + + o := src.(*SecretReference) + *m = *o + if o.Target != nil { + switch o.Target.(type) { + case *SecretReference_File: + v := SecretReference_File{ + File: &FileTarget{}, + } + github_com_docker_swarmkit_api_deepcopy.Copy(v.File, o.GetFile()) + m.Target = &v + } + } + +} + +func (m *ConfigReference) Copy() *ConfigReference { + if m == nil { + return nil + } + o := &ConfigReference{} + o.CopyFrom(m) + return o +} + +func (m *ConfigReference) CopyFrom(src interface{}) { + + o := src.(*ConfigReference) + *m = *o + if o.Target != nil { + switch o.Target.(type) { + case *ConfigReference_File: + v := ConfigReference_File{ + File: &FileTarget{}, + } + github_com_docker_swarmkit_api_deepcopy.Copy(v.File, o.GetFile()) + m.Target = &v + } + } + +} + +func (m *BlacklistedCertificate) Copy() *BlacklistedCertificate { + if m == nil { + return nil + } + o := &BlacklistedCertificate{} + o.CopyFrom(m) + return o +} + +func (m *BlacklistedCertificate) CopyFrom(src interface{}) { + + o := src.(*BlacklistedCertificate) + *m = *o + if o.Expiry != nil { + m.Expiry = &google_protobuf.Timestamp{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Expiry, o.Expiry) + } +} + +func (m *HealthConfig) Copy() *HealthConfig { + if m == nil { + return nil + } + o := &HealthConfig{} + o.CopyFrom(m) + return o +} + +func (m *HealthConfig) CopyFrom(src interface{}) { + + o := src.(*HealthConfig) + *m = *o + if o.Test != nil { + m.Test = make([]string, len(o.Test)) + copy(m.Test, o.Test) + } + + if o.Interval != nil { + m.Interval = &google_protobuf1.Duration{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Interval, o.Interval) + } + if o.Timeout != nil { + m.Timeout = &google_protobuf1.Duration{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Timeout, o.Timeout) + } + if o.StartPeriod != nil { + m.StartPeriod = &google_protobuf1.Duration{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.StartPeriod, o.StartPeriod) + } +} + +func (m *MaybeEncryptedRecord) Copy() *MaybeEncryptedRecord { + if m == nil { + return nil + } + o := &MaybeEncryptedRecord{} + o.CopyFrom(m) + return o +} + +func (m *MaybeEncryptedRecord) CopyFrom(src interface{}) { + + o := src.(*MaybeEncryptedRecord) + *m = *o + if o.Data != nil { + m.Data = make([]byte, len(o.Data)) + copy(m.Data, o.Data) + } + if o.Nonce != nil { + m.Nonce = make([]byte, len(o.Nonce)) + copy(m.Nonce, o.Nonce) + } +} + +func (m *RootRotation) Copy() *RootRotation { + if m == nil { + return nil + } + o := &RootRotation{} + o.CopyFrom(m) + return o +} + +func (m *RootRotation) CopyFrom(src interface{}) { + + o := src.(*RootRotation) + *m = *o + if o.CACert != nil { + m.CACert = make([]byte, len(o.CACert)) + copy(m.CACert, o.CACert) + } + if o.CAKey != nil { + m.CAKey = make([]byte, len(o.CAKey)) + copy(m.CAKey, o.CAKey) + } + if o.CrossSignedCACert != nil { + m.CrossSignedCACert = make([]byte, len(o.CrossSignedCACert)) + copy(m.CrossSignedCACert, o.CrossSignedCACert) + } +} + +func (m *Privileges) Copy() *Privileges { + if m == nil { + return nil + } + o := &Privileges{} + o.CopyFrom(m) + return o +} + +func (m *Privileges) CopyFrom(src interface{}) { + + o := src.(*Privileges) + *m = *o + if o.CredentialSpec != nil { + m.CredentialSpec = &Privileges_CredentialSpec{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.CredentialSpec, o.CredentialSpec) + } + if o.SELinuxContext != nil { + m.SELinuxContext = &Privileges_SELinuxContext{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.SELinuxContext, o.SELinuxContext) + } +} + +func (m *Privileges_CredentialSpec) Copy() *Privileges_CredentialSpec { + if m == nil { + return nil + } + o := &Privileges_CredentialSpec{} + o.CopyFrom(m) + return o +} + +func (m *Privileges_CredentialSpec) CopyFrom(src interface{}) { + + o := src.(*Privileges_CredentialSpec) + *m = *o + if o.Source != nil { + switch o.Source.(type) { + case *Privileges_CredentialSpec_File: + v := Privileges_CredentialSpec_File{ + File: o.GetFile(), + } + m.Source = &v + case *Privileges_CredentialSpec_Registry: + v := Privileges_CredentialSpec_Registry{ + Registry: o.GetRegistry(), + } + m.Source = &v + } + } + +} + +func (m *Privileges_SELinuxContext) Copy() *Privileges_SELinuxContext { + if m == nil { + return nil + } + o := &Privileges_SELinuxContext{} + o.CopyFrom(m) + return o +} + +func (m *Privileges_SELinuxContext) CopyFrom(src interface{}) { + + o := src.(*Privileges_SELinuxContext) + *m = *o +} + +func (m *Version) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Version) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Index != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Index)) + } + return i, nil +} + +func (m *IndexEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IndexEntry) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.Val) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Val))) + i += copy(dAtA[i:], m.Val) + } + return i, nil +} + +func (m *Annotations) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Annotations) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x12 + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + i = encodeVarintTypes(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.Indices) > 0 { + for _, msg := range m.Indices { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NamedGenericResource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamedGenericResource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Kind) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + } + if len(m.Value) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} + +func (m *DiscreteGenericResource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DiscreteGenericResource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Kind) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + } + if m.Value != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Value)) + } + return i, nil +} + +func (m *GenericResource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenericResource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Resource != nil { + nn1, err := m.Resource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn1 + } + return i, nil +} + +func (m *GenericResource_NamedResourceSpec) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.NamedResourceSpec != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.NamedResourceSpec.Size())) + n2, err := m.NamedResourceSpec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} +func (m *GenericResource_DiscreteResourceSpec) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.DiscreteResourceSpec != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.DiscreteResourceSpec.Size())) + n3, err := m.DiscreteResourceSpec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + return i, nil +} +func (m *Resources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Resources) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.NanoCPUs != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.NanoCPUs)) + } + if m.MemoryBytes != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.MemoryBytes)) + } + if len(m.Generic) > 0 { + for _, msg := range m.Generic { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ResourceRequirements) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceRequirements) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Limits != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Limits.Size())) + n4, err := m.Limits.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + if m.Reservations != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Reservations.Size())) + n5, err := m.Reservations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + return i, nil +} + +func (m *Platform) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Platform) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Architecture) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Architecture))) + i += copy(dAtA[i:], m.Architecture) + } + if len(m.OS) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.OS))) + i += copy(dAtA[i:], m.OS) + } + return i, nil +} + +func (m *PluginDescription) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PluginDescription) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Type) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + } + if len(m.Name) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + return i, nil +} + +func (m *EngineDescription) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EngineDescription) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.EngineVersion) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.EngineVersion))) + i += copy(dAtA[i:], m.EngineVersion) + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x12 + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + i = encodeVarintTypes(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.Plugins) > 0 { + for _, msg := range m.Plugins { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NodeDescription) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeDescription) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Hostname) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hostname))) + i += copy(dAtA[i:], m.Hostname) + } + if m.Platform != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Platform.Size())) + n6, err := m.Platform.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if m.Resources != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Resources.Size())) + n7, err := m.Resources.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.Engine != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Engine.Size())) + n8, err := m.Engine.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if m.TLSInfo != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.TLSInfo.Size())) + n9, err := m.TLSInfo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + if m.FIPS { + dAtA[i] = 0x30 + i++ + if m.FIPS { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *NodeTLSInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeTLSInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.TrustRoot) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.TrustRoot))) + i += copy(dAtA[i:], m.TrustRoot) + } + if len(m.CertIssuerSubject) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CertIssuerSubject))) + i += copy(dAtA[i:], m.CertIssuerSubject) + } + if len(m.CertIssuerPublicKey) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CertIssuerPublicKey))) + i += copy(dAtA[i:], m.CertIssuerPublicKey) + } + return i, nil +} + +func (m *RaftMemberStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RaftMemberStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Leader { + dAtA[i] = 0x8 + i++ + if m.Leader { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Reachability != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Reachability)) + } + if len(m.Message) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + } + return i, nil +} + +func (m *NodeStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.State != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.State)) + } + if len(m.Message) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + } + if len(m.Addr) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Addr))) + i += copy(dAtA[i:], m.Addr) + } + return i, nil +} + +func (m *Image) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Image) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Reference) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Reference))) + i += copy(dAtA[i:], m.Reference) + } + return i, nil +} + +func (m *Mount) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Mount) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Type != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + } + if len(m.Source) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Source))) + i += copy(dAtA[i:], m.Source) + } + if len(m.Target) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Target))) + i += copy(dAtA[i:], m.Target) + } + if m.ReadOnly { + dAtA[i] = 0x20 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.BindOptions != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.BindOptions.Size())) + n10, err := m.BindOptions.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + if m.VolumeOptions != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.VolumeOptions.Size())) + n11, err := m.VolumeOptions.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + if m.TmpfsOptions != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.TmpfsOptions.Size())) + n12, err := m.TmpfsOptions.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + if m.Consistency != 0 { + dAtA[i] = 0x40 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Consistency)) + } + return i, nil +} + +func (m *Mount_BindOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Mount_BindOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Propagation != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Propagation)) + } + return i, nil +} + +func (m *Mount_VolumeOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Mount_VolumeOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.NoCopy { + dAtA[i] = 0x8 + i++ + if m.NoCopy { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x12 + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + i = encodeVarintTypes(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if m.DriverConfig != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.DriverConfig.Size())) + n13, err := m.DriverConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + return i, nil +} + +func (m *Mount_TmpfsOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Mount_TmpfsOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.SizeBytes != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.SizeBytes)) + } + if m.Mode != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Mode)) + } + return i, nil +} + +func (m *RestartPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RestartPolicy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Condition != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Condition)) + } + if m.Delay != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Delay.Size())) + n14, err := m.Delay.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + if m.MaxAttempts != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.MaxAttempts)) + } + if m.Window != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Window.Size())) + n15, err := m.Window.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + return i, nil +} + +func (m *UpdateConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Parallelism != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Parallelism)) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdDuration(m.Delay))) + n16, err := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.Delay, dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + if m.FailureAction != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.FailureAction)) + } + if m.Monitor != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Monitor.Size())) + n17, err := m.Monitor.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + if m.MaxFailureRatio != 0 { + dAtA[i] = 0x2d + i++ + i = encodeFixed32Types(dAtA, i, uint32(math.Float32bits(float32(m.MaxFailureRatio)))) + } + if m.Order != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Order)) + } + return i, nil +} + +func (m *UpdateStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.State != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.State)) + } + if m.StartedAt != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.StartedAt.Size())) + n18, err := m.StartedAt.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + if m.CompletedAt != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.CompletedAt.Size())) + n19, err := m.CompletedAt.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + } + if len(m.Message) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + } + return i, nil +} + +func (m *ContainerStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ContainerID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + if m.PID != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.PID)) + } + if m.ExitCode != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.ExitCode)) + } + return i, nil +} + +func (m *PortStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PortStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *TaskStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Timestamp != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp.Size())) + n20, err := m.Timestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + } + if m.State != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.State)) + } + if len(m.Message) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + } + if len(m.Err) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Err))) + i += copy(dAtA[i:], m.Err) + } + if m.RuntimeStatus != nil { + nn21, err := m.RuntimeStatus.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn21 + } + if m.PortStatus != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.PortStatus.Size())) + n22, err := m.PortStatus.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + } + if len(m.AppliedBy) > 0 { + dAtA[i] = 0x3a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppliedBy))) + i += copy(dAtA[i:], m.AppliedBy) + } + if m.AppliedAt != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.AppliedAt.Size())) + n23, err := m.AppliedAt.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + } + return i, nil +} + +func (m *TaskStatus_Container) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Container != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Container.Size())) + n24, err := m.Container.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + } + return i, nil +} +func (m *NetworkAttachmentConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkAttachmentConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Target) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Target))) + i += copy(dAtA[i:], m.Target) + } + if len(m.Aliases) > 0 { + for _, s := range m.Aliases { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Addresses) > 0 { + for _, s := range m.Addresses { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.DriverAttachmentOpts) > 0 { + for k, _ := range m.DriverAttachmentOpts { + dAtA[i] = 0x22 + i++ + v := m.DriverAttachmentOpts[k] + mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + i = encodeVarintTypes(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *IPAMConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IPAMConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Family != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Family)) + } + if len(m.Subnet) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Subnet))) + i += copy(dAtA[i:], m.Subnet) + } + if len(m.Range) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Range))) + i += copy(dAtA[i:], m.Range) + } + if len(m.Gateway) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Gateway))) + i += copy(dAtA[i:], m.Gateway) + } + if len(m.Reserved) > 0 { + for k, _ := range m.Reserved { + dAtA[i] = 0x2a + i++ + v := m.Reserved[k] + mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + i = encodeVarintTypes(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *PortConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PortConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if m.Protocol != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Protocol)) + } + if m.TargetPort != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.TargetPort)) + } + if m.PublishedPort != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.PublishedPort)) + } + if m.PublishMode != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.PublishMode)) + } + return i, nil +} + +func (m *Driver) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Driver) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Options) > 0 { + for k, _ := range m.Options { + dAtA[i] = 0x12 + i++ + v := m.Options[k] + mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + i = encodeVarintTypes(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *IPAMOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IPAMOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Driver != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Driver.Size())) + n25, err := m.Driver.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + } + if len(m.Configs) > 0 { + for _, msg := range m.Configs { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Peer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Peer) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.NodeID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + } + if len(m.Addr) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Addr))) + i += copy(dAtA[i:], m.Addr) + } + return i, nil +} + +func (m *WeightedPeer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WeightedPeer) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Peer != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Peer.Size())) + n26, err := m.Peer.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + } + if m.Weight != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Weight)) + } + return i, nil +} + +func (m *IssuanceStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IssuanceStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.State != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.State)) + } + if len(m.Err) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Err))) + i += copy(dAtA[i:], m.Err) + } + return i, nil +} + +func (m *AcceptancePolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AcceptancePolicy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Policies) > 0 { + for _, msg := range m.Policies { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Role != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Role)) + } + if m.Autoaccept { + dAtA[i] = 0x10 + i++ + if m.Autoaccept { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Secret != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Secret.Size())) + n27, err := m.Secret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + } + return i, nil +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy_Secret) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy_Secret) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Data) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if len(m.Alg) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Alg))) + i += copy(dAtA[i:], m.Alg) + } + return i, nil +} + +func (m *ExternalCA) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExternalCA) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Protocol != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Protocol)) + } + if len(m.URL) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.URL))) + i += copy(dAtA[i:], m.URL) + } + if len(m.Options) > 0 { + for k, _ := range m.Options { + dAtA[i] = 0x1a + i++ + v := m.Options[k] + mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + i = encodeVarintTypes(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.CACert) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CACert))) + i += copy(dAtA[i:], m.CACert) + } + return i, nil +} + +func (m *CAConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CAConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.NodeCertExpiry != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.NodeCertExpiry.Size())) + n28, err := m.NodeCertExpiry.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + } + if len(m.ExternalCAs) > 0 { + for _, msg := range m.ExternalCAs { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.SigningCACert) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.SigningCACert))) + i += copy(dAtA[i:], m.SigningCACert) + } + if len(m.SigningCAKey) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.SigningCAKey))) + i += copy(dAtA[i:], m.SigningCAKey) + } + if m.ForceRotate != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.ForceRotate)) + } + return i, nil +} + +func (m *OrchestrationConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OrchestrationConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.TaskHistoryRetentionLimit != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.TaskHistoryRetentionLimit)) + } + return i, nil +} + +func (m *TaskDefaults) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskDefaults) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.LogDriver != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.LogDriver.Size())) + n29, err := m.LogDriver.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + } + return i, nil +} + +func (m *DispatcherConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DispatcherConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.HeartbeatPeriod != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.HeartbeatPeriod.Size())) + n30, err := m.HeartbeatPeriod.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + } + return i, nil +} + +func (m *RaftConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RaftConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.SnapshotInterval != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.SnapshotInterval)) + } + if m.KeepOldSnapshots != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.KeepOldSnapshots)) + } + if m.LogEntriesForSlowFollowers != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.LogEntriesForSlowFollowers)) + } + if m.HeartbeatTick != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.HeartbeatTick)) + } + if m.ElectionTick != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.ElectionTick)) + } + return i, nil +} + +func (m *EncryptionConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EncryptionConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.AutoLockManagers { + dAtA[i] = 0x8 + i++ + if m.AutoLockManagers { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *SpreadOver) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SpreadOver) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SpreadDescriptor) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.SpreadDescriptor))) + i += copy(dAtA[i:], m.SpreadDescriptor) + } + return i, nil +} + +func (m *PlacementPreference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PlacementPreference) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Preference != nil { + nn31, err := m.Preference.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn31 + } + return i, nil +} + +func (m *PlacementPreference_Spread) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Spread != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Spread.Size())) + n32, err := m.Spread.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + } + return i, nil +} +func (m *Placement) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Placement) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Constraints) > 0 { + for _, s := range m.Constraints { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Preferences) > 0 { + for _, msg := range m.Preferences { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Platforms) > 0 { + for _, msg := range m.Platforms { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *JoinTokens) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JoinTokens) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Worker) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Worker))) + i += copy(dAtA[i:], m.Worker) + } + if len(m.Manager) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Manager))) + i += copy(dAtA[i:], m.Manager) + } + return i, nil +} + +func (m *RootCA) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RootCA) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.CAKey) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CAKey))) + i += copy(dAtA[i:], m.CAKey) + } + if len(m.CACert) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CACert))) + i += copy(dAtA[i:], m.CACert) + } + if len(m.CACertHash) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CACertHash))) + i += copy(dAtA[i:], m.CACertHash) + } + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.JoinTokens.Size())) + n33, err := m.JoinTokens.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 + if m.RootRotation != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.RootRotation.Size())) + n34, err := m.RootRotation.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 + } + if m.LastForcedRotation != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.LastForcedRotation)) + } + return i, nil +} + +func (m *Certificate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Certificate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Role != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Role)) + } + if len(m.CSR) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CSR))) + i += copy(dAtA[i:], m.CSR) + } + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Status.Size())) + n35, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + if len(m.Certificate) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Certificate))) + i += copy(dAtA[i:], m.Certificate) + } + if len(m.CN) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CN))) + i += copy(dAtA[i:], m.CN) + } + return i, nil +} + +func (m *EncryptionKey) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EncryptionKey) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Subsystem) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Subsystem))) + i += copy(dAtA[i:], m.Subsystem) + } + if m.Algorithm != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Algorithm)) + } + if len(m.Key) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if m.LamportTime != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.LamportTime)) + } + return i, nil +} + +func (m *ManagerStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ManagerStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RaftID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.RaftID)) + } + if len(m.Addr) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Addr))) + i += copy(dAtA[i:], m.Addr) + } + if m.Leader { + dAtA[i] = 0x18 + i++ + if m.Leader { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Reachability != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Reachability)) + } + return i, nil +} + +func (m *FileTarget) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FileTarget) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.UID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) + } + if len(m.GID) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.GID))) + i += copy(dAtA[i:], m.GID) + } + if m.Mode != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Mode)) + } + return i, nil +} + +func (m *SecretReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretReference) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SecretID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.SecretID))) + i += copy(dAtA[i:], m.SecretID) + } + if len(m.SecretName) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.SecretName))) + i += copy(dAtA[i:], m.SecretName) + } + if m.Target != nil { + nn36, err := m.Target.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn36 + } + return i, nil +} + +func (m *SecretReference_File) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.File != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.File.Size())) + n37, err := m.File.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n37 + } + return i, nil +} +func (m *ConfigReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigReference) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ConfigID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.ConfigID))) + i += copy(dAtA[i:], m.ConfigID) + } + if len(m.ConfigName) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.ConfigName))) + i += copy(dAtA[i:], m.ConfigName) + } + if m.Target != nil { + nn38, err := m.Target.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn38 + } + return i, nil +} + +func (m *ConfigReference_File) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.File != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.File.Size())) + n39, err := m.File.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + } + return i, nil +} +func (m *BlacklistedCertificate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlacklistedCertificate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Expiry != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Expiry.Size())) + n40, err := m.Expiry.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 + } + return i, nil +} + +func (m *HealthConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Test) > 0 { + for _, s := range m.Test { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.Interval != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Interval.Size())) + n41, err := m.Interval.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n41 + } + if m.Timeout != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Timeout.Size())) + n42, err := m.Timeout.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n42 + } + if m.Retries != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Retries)) + } + if m.StartPeriod != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.StartPeriod.Size())) + n43, err := m.StartPeriod.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n43 + } + return i, nil +} + +func (m *MaybeEncryptedRecord) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MaybeEncryptedRecord) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Algorithm != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Algorithm)) + } + if len(m.Data) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if len(m.Nonce) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Nonce))) + i += copy(dAtA[i:], m.Nonce) + } + return i, nil +} + +func (m *RootRotation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RootRotation) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.CACert) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CACert))) + i += copy(dAtA[i:], m.CACert) + } + if len(m.CAKey) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CAKey))) + i += copy(dAtA[i:], m.CAKey) + } + if len(m.CrossSignedCACert) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CrossSignedCACert))) + i += copy(dAtA[i:], m.CrossSignedCACert) + } + return i, nil +} + +func (m *Privileges) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Privileges) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.CredentialSpec != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.CredentialSpec.Size())) + n44, err := m.CredentialSpec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n44 + } + if m.SELinuxContext != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.SELinuxContext.Size())) + n45, err := m.SELinuxContext.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n45 + } + return i, nil +} + +func (m *Privileges_CredentialSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Privileges_CredentialSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Source != nil { + nn46, err := m.Source.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn46 + } + return i, nil +} + +func (m *Privileges_CredentialSpec_File) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.File))) + i += copy(dAtA[i:], m.File) + return i, nil +} +func (m *Privileges_CredentialSpec_Registry) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Registry))) + i += copy(dAtA[i:], m.Registry) + return i, nil +} +func (m *Privileges_SELinuxContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Privileges_SELinuxContext) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Disable { + dAtA[i] = 0x8 + i++ + if m.Disable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.User) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) + } + if len(m.Role) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Role))) + i += copy(dAtA[i:], m.Role) + } + if len(m.Type) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + } + if len(m.Level) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Level))) + i += copy(dAtA[i:], m.Level) + } + return i, nil +} + +func encodeFixed64Types(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Types(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +func (m *Version) Size() (n int) { + var l int + _ = l + if m.Index != 0 { + n += 1 + sovTypes(uint64(m.Index)) + } + return n +} + +func (m *IndexEntry) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Val) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Annotations) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize)) + } + } + if len(m.Indices) > 0 { + for _, e := range m.Indices { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *NamedGenericResource) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *DiscreteGenericResource) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Value != 0 { + n += 1 + sovTypes(uint64(m.Value)) + } + return n +} + +func (m *GenericResource) Size() (n int) { + var l int + _ = l + if m.Resource != nil { + n += m.Resource.Size() + } + return n +} + +func (m *GenericResource_NamedResourceSpec) Size() (n int) { + var l int + _ = l + if m.NamedResourceSpec != nil { + l = m.NamedResourceSpec.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *GenericResource_DiscreteResourceSpec) Size() (n int) { + var l int + _ = l + if m.DiscreteResourceSpec != nil { + l = m.DiscreteResourceSpec.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Resources) Size() (n int) { + var l int + _ = l + if m.NanoCPUs != 0 { + n += 1 + sovTypes(uint64(m.NanoCPUs)) + } + if m.MemoryBytes != 0 { + n += 1 + sovTypes(uint64(m.MemoryBytes)) + } + if len(m.Generic) > 0 { + for _, e := range m.Generic { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *ResourceRequirements) Size() (n int) { + var l int + _ = l + if m.Limits != nil { + l = m.Limits.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Reservations != nil { + l = m.Reservations.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Platform) Size() (n int) { + var l int + _ = l + l = len(m.Architecture) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.OS) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *PluginDescription) Size() (n int) { + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *EngineDescription) Size() (n int) { + var l int + _ = l + l = len(m.EngineVersion) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize)) + } + } + if len(m.Plugins) > 0 { + for _, e := range m.Plugins { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *NodeDescription) Size() (n int) { + var l int + _ = l + l = len(m.Hostname) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Platform != nil { + l = m.Platform.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Resources != nil { + l = m.Resources.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Engine != nil { + l = m.Engine.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.TLSInfo != nil { + l = m.TLSInfo.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.FIPS { + n += 2 + } + return n +} + +func (m *NodeTLSInfo) Size() (n int) { + var l int + _ = l + l = len(m.TrustRoot) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.CertIssuerSubject) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.CertIssuerPublicKey) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RaftMemberStatus) Size() (n int) { + var l int + _ = l + if m.Leader { + n += 2 + } + if m.Reachability != 0 { + n += 1 + sovTypes(uint64(m.Reachability)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *NodeStatus) Size() (n int) { + var l int + _ = l + if m.State != 0 { + n += 1 + sovTypes(uint64(m.State)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Addr) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Image) Size() (n int) { + var l int + _ = l + l = len(m.Reference) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Mount) Size() (n int) { + var l int + _ = l + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } + l = len(m.Source) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Target) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.ReadOnly { + n += 2 + } + if m.BindOptions != nil { + l = m.BindOptions.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.VolumeOptions != nil { + l = m.VolumeOptions.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.TmpfsOptions != nil { + l = m.TmpfsOptions.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Consistency != 0 { + n += 1 + sovTypes(uint64(m.Consistency)) + } + return n +} + +func (m *Mount_BindOptions) Size() (n int) { + var l int + _ = l + if m.Propagation != 0 { + n += 1 + sovTypes(uint64(m.Propagation)) + } + return n +} + +func (m *Mount_VolumeOptions) Size() (n int) { + var l int + _ = l + if m.NoCopy { + n += 2 + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize)) + } + } + if m.DriverConfig != nil { + l = m.DriverConfig.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Mount_TmpfsOptions) Size() (n int) { + var l int + _ = l + if m.SizeBytes != 0 { + n += 1 + sovTypes(uint64(m.SizeBytes)) + } + if m.Mode != 0 { + n += 1 + sovTypes(uint64(m.Mode)) + } + return n +} + +func (m *RestartPolicy) Size() (n int) { + var l int + _ = l + if m.Condition != 0 { + n += 1 + sovTypes(uint64(m.Condition)) + } + if m.Delay != nil { + l = m.Delay.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.MaxAttempts != 0 { + n += 1 + sovTypes(uint64(m.MaxAttempts)) + } + if m.Window != nil { + l = m.Window.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *UpdateConfig) Size() (n int) { + var l int + _ = l + if m.Parallelism != 0 { + n += 1 + sovTypes(uint64(m.Parallelism)) + } + l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.Delay) + n += 1 + l + sovTypes(uint64(l)) + if m.FailureAction != 0 { + n += 1 + sovTypes(uint64(m.FailureAction)) + } + if m.Monitor != nil { + l = m.Monitor.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.MaxFailureRatio != 0 { + n += 5 + } + if m.Order != 0 { + n += 1 + sovTypes(uint64(m.Order)) + } + return n +} + +func (m *UpdateStatus) Size() (n int) { + var l int + _ = l + if m.State != 0 { + n += 1 + sovTypes(uint64(m.State)) + } + if m.StartedAt != nil { + l = m.StartedAt.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.CompletedAt != nil { + l = m.CompletedAt.Size() + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ContainerStatus) Size() (n int) { + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.PID != 0 { + n += 1 + sovTypes(uint64(m.PID)) + } + if m.ExitCode != 0 { + n += 1 + sovTypes(uint64(m.ExitCode)) + } + return n +} + +func (m *PortStatus) Size() (n int) { + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *TaskStatus) Size() (n int) { + var l int + _ = l + if m.Timestamp != nil { + l = m.Timestamp.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.State != 0 { + n += 1 + sovTypes(uint64(m.State)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Err) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.RuntimeStatus != nil { + n += m.RuntimeStatus.Size() + } + if m.PortStatus != nil { + l = m.PortStatus.Size() + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.AppliedBy) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.AppliedAt != nil { + l = m.AppliedAt.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *TaskStatus_Container) Size() (n int) { + var l int + _ = l + if m.Container != nil { + l = m.Container.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *NetworkAttachmentConfig) Size() (n int) { + var l int + _ = l + l = len(m.Target) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Aliases) > 0 { + for _, s := range m.Aliases { + l = len(s) + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.Addresses) > 0 { + for _, s := range m.Addresses { + l = len(s) + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.DriverAttachmentOpts) > 0 { + for k, v := range m.DriverAttachmentOpts { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize)) + } + } + return n +} + +func (m *IPAMConfig) Size() (n int) { + var l int + _ = l + if m.Family != 0 { + n += 1 + sovTypes(uint64(m.Family)) + } + l = len(m.Subnet) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Range) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Gateway) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Reserved) > 0 { + for k, v := range m.Reserved { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize)) + } + } + return n +} + +func (m *PortConfig) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Protocol != 0 { + n += 1 + sovTypes(uint64(m.Protocol)) + } + if m.TargetPort != 0 { + n += 1 + sovTypes(uint64(m.TargetPort)) + } + if m.PublishedPort != 0 { + n += 1 + sovTypes(uint64(m.PublishedPort)) + } + if m.PublishMode != 0 { + n += 1 + sovTypes(uint64(m.PublishMode)) + } + return n +} + +func (m *Driver) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Options) > 0 { + for k, v := range m.Options { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize)) + } + } + return n +} + +func (m *IPAMOptions) Size() (n int) { + var l int + _ = l + if m.Driver != nil { + l = m.Driver.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Configs) > 0 { + for _, e := range m.Configs { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *Peer) Size() (n int) { + var l int + _ = l + l = len(m.NodeID) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Addr) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *WeightedPeer) Size() (n int) { + var l int + _ = l + if m.Peer != nil { + l = m.Peer.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Weight != 0 { + n += 1 + sovTypes(uint64(m.Weight)) + } + return n +} + +func (m *IssuanceStatus) Size() (n int) { + var l int + _ = l + if m.State != 0 { + n += 1 + sovTypes(uint64(m.State)) + } + l = len(m.Err) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *AcceptancePolicy) Size() (n int) { + var l int + _ = l + if len(m.Policies) > 0 { + for _, e := range m.Policies { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy) Size() (n int) { + var l int + _ = l + if m.Role != 0 { + n += 1 + sovTypes(uint64(m.Role)) + } + if m.Autoaccept { + n += 2 + } + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy_Secret) Size() (n int) { + var l int + _ = l + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Alg) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ExternalCA) Size() (n int) { + var l int + _ = l + if m.Protocol != 0 { + n += 1 + sovTypes(uint64(m.Protocol)) + } + l = len(m.URL) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Options) > 0 { + for k, v := range m.Options { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize)) + } + } + l = len(m.CACert) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *CAConfig) Size() (n int) { + var l int + _ = l + if m.NodeCertExpiry != nil { + l = m.NodeCertExpiry.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.ExternalCAs) > 0 { + for _, e := range m.ExternalCAs { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = len(m.SigningCACert) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.SigningCAKey) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.ForceRotate != 0 { + n += 1 + sovTypes(uint64(m.ForceRotate)) + } + return n +} + +func (m *OrchestrationConfig) Size() (n int) { + var l int + _ = l + if m.TaskHistoryRetentionLimit != 0 { + n += 1 + sovTypes(uint64(m.TaskHistoryRetentionLimit)) + } + return n +} + +func (m *TaskDefaults) Size() (n int) { + var l int + _ = l + if m.LogDriver != nil { + l = m.LogDriver.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *DispatcherConfig) Size() (n int) { + var l int + _ = l + if m.HeartbeatPeriod != nil { + l = m.HeartbeatPeriod.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RaftConfig) Size() (n int) { + var l int + _ = l + if m.SnapshotInterval != 0 { + n += 1 + sovTypes(uint64(m.SnapshotInterval)) + } + if m.KeepOldSnapshots != 0 { + n += 1 + sovTypes(uint64(m.KeepOldSnapshots)) + } + if m.LogEntriesForSlowFollowers != 0 { + n += 1 + sovTypes(uint64(m.LogEntriesForSlowFollowers)) + } + if m.HeartbeatTick != 0 { + n += 1 + sovTypes(uint64(m.HeartbeatTick)) + } + if m.ElectionTick != 0 { + n += 1 + sovTypes(uint64(m.ElectionTick)) + } + return n +} + +func (m *EncryptionConfig) Size() (n int) { + var l int + _ = l + if m.AutoLockManagers { + n += 2 + } + return n +} + +func (m *SpreadOver) Size() (n int) { + var l int + _ = l + l = len(m.SpreadDescriptor) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *PlacementPreference) Size() (n int) { + var l int + _ = l + if m.Preference != nil { + n += m.Preference.Size() + } + return n +} + +func (m *PlacementPreference_Spread) Size() (n int) { + var l int + _ = l + if m.Spread != nil { + l = m.Spread.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Placement) Size() (n int) { + var l int + _ = l + if len(m.Constraints) > 0 { + for _, s := range m.Constraints { + l = len(s) + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.Preferences) > 0 { + for _, e := range m.Preferences { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.Platforms) > 0 { + for _, e := range m.Platforms { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *JoinTokens) Size() (n int) { + var l int + _ = l + l = len(m.Worker) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Manager) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RootCA) Size() (n int) { + var l int + _ = l + l = len(m.CAKey) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.CACert) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.CACertHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = m.JoinTokens.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.RootRotation != nil { + l = m.RootRotation.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.LastForcedRotation != 0 { + n += 1 + sovTypes(uint64(m.LastForcedRotation)) + } + return n +} + +func (m *Certificate) Size() (n int) { + var l int + _ = l + if m.Role != 0 { + n += 1 + sovTypes(uint64(m.Role)) + } + l = len(m.CSR) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = m.Status.Size() + n += 1 + l + sovTypes(uint64(l)) + l = len(m.Certificate) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.CN) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *EncryptionKey) Size() (n int) { + var l int + _ = l + l = len(m.Subsystem) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Algorithm != 0 { + n += 1 + sovTypes(uint64(m.Algorithm)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.LamportTime != 0 { + n += 1 + sovTypes(uint64(m.LamportTime)) + } + return n +} + +func (m *ManagerStatus) Size() (n int) { + var l int + _ = l + if m.RaftID != 0 { + n += 1 + sovTypes(uint64(m.RaftID)) + } + l = len(m.Addr) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Leader { + n += 2 + } + if m.Reachability != 0 { + n += 1 + sovTypes(uint64(m.Reachability)) + } + return n +} + +func (m *FileTarget) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.UID) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.GID) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Mode != 0 { + n += 1 + sovTypes(uint64(m.Mode)) + } + return n +} + +func (m *SecretReference) Size() (n int) { + var l int + _ = l + l = len(m.SecretID) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.SecretName) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Target != nil { + n += m.Target.Size() + } + return n +} + +func (m *SecretReference_File) Size() (n int) { + var l int + _ = l + if m.File != nil { + l = m.File.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *ConfigReference) Size() (n int) { + var l int + _ = l + l = len(m.ConfigID) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ConfigName) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Target != nil { + n += m.Target.Size() + } + return n +} + +func (m *ConfigReference_File) Size() (n int) { + var l int + _ = l + if m.File != nil { + l = m.File.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *BlacklistedCertificate) Size() (n int) { + var l int + _ = l + if m.Expiry != nil { + l = m.Expiry.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *HealthConfig) Size() (n int) { + var l int + _ = l + if len(m.Test) > 0 { + for _, s := range m.Test { + l = len(s) + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.Interval != nil { + l = m.Interval.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Timeout != nil { + l = m.Timeout.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Retries != 0 { + n += 1 + sovTypes(uint64(m.Retries)) + } + if m.StartPeriod != nil { + l = m.StartPeriod.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *MaybeEncryptedRecord) Size() (n int) { + var l int + _ = l + if m.Algorithm != 0 { + n += 1 + sovTypes(uint64(m.Algorithm)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Nonce) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RootRotation) Size() (n int) { + var l int + _ = l + l = len(m.CACert) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.CAKey) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.CrossSignedCACert) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Privileges) Size() (n int) { + var l int + _ = l + if m.CredentialSpec != nil { + l = m.CredentialSpec.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.SELinuxContext != nil { + l = m.SELinuxContext.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Privileges_CredentialSpec) Size() (n int) { + var l int + _ = l + if m.Source != nil { + n += m.Source.Size() + } + return n +} + +func (m *Privileges_CredentialSpec_File) Size() (n int) { + var l int + _ = l + l = len(m.File) + n += 1 + l + sovTypes(uint64(l)) + return n +} +func (m *Privileges_CredentialSpec_Registry) Size() (n int) { + var l int + _ = l + l = len(m.Registry) + n += 1 + l + sovTypes(uint64(l)) + return n +} +func (m *Privileges_SELinuxContext) Size() (n int) { + var l int + _ = l + if m.Disable { + n += 2 + } + l = len(m.User) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Role) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Type) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Level) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func sovTypes(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Version) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Version{`, + `Index:` + fmt.Sprintf("%v", this.Index) + `,`, + `}`, + }, "") + return s +} +func (this *IndexEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IndexEntry{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Val:` + fmt.Sprintf("%v", this.Val) + `,`, + `}`, + }, "") + return s +} +func (this *Annotations) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&Annotations{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Indices:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Indices), "IndexEntry", "IndexEntry", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NamedGenericResource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamedGenericResource{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *DiscreteGenericResource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DiscreteGenericResource{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *GenericResource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GenericResource{`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `}`, + }, "") + return s +} +func (this *GenericResource_NamedResourceSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GenericResource_NamedResourceSpec{`, + `NamedResourceSpec:` + strings.Replace(fmt.Sprintf("%v", this.NamedResourceSpec), "NamedGenericResource", "NamedGenericResource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *GenericResource_DiscreteResourceSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GenericResource_DiscreteResourceSpec{`, + `DiscreteResourceSpec:` + strings.Replace(fmt.Sprintf("%v", this.DiscreteResourceSpec), "DiscreteGenericResource", "DiscreteGenericResource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Resources) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Resources{`, + `NanoCPUs:` + fmt.Sprintf("%v", this.NanoCPUs) + `,`, + `MemoryBytes:` + fmt.Sprintf("%v", this.MemoryBytes) + `,`, + `Generic:` + strings.Replace(fmt.Sprintf("%v", this.Generic), "GenericResource", "GenericResource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceRequirements) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceRequirements{`, + `Limits:` + strings.Replace(fmt.Sprintf("%v", this.Limits), "Resources", "Resources", 1) + `,`, + `Reservations:` + strings.Replace(fmt.Sprintf("%v", this.Reservations), "Resources", "Resources", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Platform) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Platform{`, + `Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`, + `OS:` + fmt.Sprintf("%v", this.OS) + `,`, + `}`, + }, "") + return s +} +func (this *PluginDescription) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PluginDescription{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *EngineDescription) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&EngineDescription{`, + `EngineVersion:` + fmt.Sprintf("%v", this.EngineVersion) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Plugins:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Plugins), "PluginDescription", "PluginDescription", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeDescription) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeDescription{`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `Platform:` + strings.Replace(fmt.Sprintf("%v", this.Platform), "Platform", "Platform", 1) + `,`, + `Resources:` + strings.Replace(fmt.Sprintf("%v", this.Resources), "Resources", "Resources", 1) + `,`, + `Engine:` + strings.Replace(fmt.Sprintf("%v", this.Engine), "EngineDescription", "EngineDescription", 1) + `,`, + `TLSInfo:` + strings.Replace(fmt.Sprintf("%v", this.TLSInfo), "NodeTLSInfo", "NodeTLSInfo", 1) + `,`, + `FIPS:` + fmt.Sprintf("%v", this.FIPS) + `,`, + `}`, + }, "") + return s +} +func (this *NodeTLSInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeTLSInfo{`, + `TrustRoot:` + fmt.Sprintf("%v", this.TrustRoot) + `,`, + `CertIssuerSubject:` + fmt.Sprintf("%v", this.CertIssuerSubject) + `,`, + `CertIssuerPublicKey:` + fmt.Sprintf("%v", this.CertIssuerPublicKey) + `,`, + `}`, + }, "") + return s +} +func (this *RaftMemberStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RaftMemberStatus{`, + `Leader:` + fmt.Sprintf("%v", this.Leader) + `,`, + `Reachability:` + fmt.Sprintf("%v", this.Reachability) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *NodeStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeStatus{`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Addr:` + fmt.Sprintf("%v", this.Addr) + `,`, + `}`, + }, "") + return s +} +func (this *Image) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Image{`, + `Reference:` + fmt.Sprintf("%v", this.Reference) + `,`, + `}`, + }, "") + return s +} +func (this *Mount) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Mount{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Source:` + fmt.Sprintf("%v", this.Source) + `,`, + `Target:` + fmt.Sprintf("%v", this.Target) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `BindOptions:` + strings.Replace(fmt.Sprintf("%v", this.BindOptions), "Mount_BindOptions", "Mount_BindOptions", 1) + `,`, + `VolumeOptions:` + strings.Replace(fmt.Sprintf("%v", this.VolumeOptions), "Mount_VolumeOptions", "Mount_VolumeOptions", 1) + `,`, + `TmpfsOptions:` + strings.Replace(fmt.Sprintf("%v", this.TmpfsOptions), "Mount_TmpfsOptions", "Mount_TmpfsOptions", 1) + `,`, + `Consistency:` + fmt.Sprintf("%v", this.Consistency) + `,`, + `}`, + }, "") + return s +} +func (this *Mount_BindOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Mount_BindOptions{`, + `Propagation:` + fmt.Sprintf("%v", this.Propagation) + `,`, + `}`, + }, "") + return s +} +func (this *Mount_VolumeOptions) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&Mount_VolumeOptions{`, + `NoCopy:` + fmt.Sprintf("%v", this.NoCopy) + `,`, + `Labels:` + mapStringForLabels + `,`, + `DriverConfig:` + strings.Replace(fmt.Sprintf("%v", this.DriverConfig), "Driver", "Driver", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Mount_TmpfsOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Mount_TmpfsOptions{`, + `SizeBytes:` + fmt.Sprintf("%v", this.SizeBytes) + `,`, + `Mode:` + fmt.Sprintf("%v", this.Mode) + `,`, + `}`, + }, "") + return s +} +func (this *RestartPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RestartPolicy{`, + `Condition:` + fmt.Sprintf("%v", this.Condition) + `,`, + `Delay:` + strings.Replace(fmt.Sprintf("%v", this.Delay), "Duration", "google_protobuf1.Duration", 1) + `,`, + `MaxAttempts:` + fmt.Sprintf("%v", this.MaxAttempts) + `,`, + `Window:` + strings.Replace(fmt.Sprintf("%v", this.Window), "Duration", "google_protobuf1.Duration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateConfig{`, + `Parallelism:` + fmt.Sprintf("%v", this.Parallelism) + `,`, + `Delay:` + strings.Replace(strings.Replace(this.Delay.String(), "Duration", "google_protobuf1.Duration", 1), `&`, ``, 1) + `,`, + `FailureAction:` + fmt.Sprintf("%v", this.FailureAction) + `,`, + `Monitor:` + strings.Replace(fmt.Sprintf("%v", this.Monitor), "Duration", "google_protobuf1.Duration", 1) + `,`, + `MaxFailureRatio:` + fmt.Sprintf("%v", this.MaxFailureRatio) + `,`, + `Order:` + fmt.Sprintf("%v", this.Order) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateStatus{`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `StartedAt:` + strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Timestamp", "google_protobuf.Timestamp", 1) + `,`, + `CompletedAt:` + strings.Replace(fmt.Sprintf("%v", this.CompletedAt), "Timestamp", "google_protobuf.Timestamp", 1) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStatus{`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `PID:` + fmt.Sprintf("%v", this.PID) + `,`, + `ExitCode:` + fmt.Sprintf("%v", this.ExitCode) + `,`, + `}`, + }, "") + return s +} +func (this *PortStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PortStatus{`, + `Ports:` + strings.Replace(fmt.Sprintf("%v", this.Ports), "PortConfig", "PortConfig", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TaskStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskStatus{`, + `Timestamp:` + strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Timestamp", "google_protobuf.Timestamp", 1) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Err:` + fmt.Sprintf("%v", this.Err) + `,`, + `RuntimeStatus:` + fmt.Sprintf("%v", this.RuntimeStatus) + `,`, + `PortStatus:` + strings.Replace(fmt.Sprintf("%v", this.PortStatus), "PortStatus", "PortStatus", 1) + `,`, + `AppliedBy:` + fmt.Sprintf("%v", this.AppliedBy) + `,`, + `AppliedAt:` + strings.Replace(fmt.Sprintf("%v", this.AppliedAt), "Timestamp", "google_protobuf.Timestamp", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TaskStatus_Container) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskStatus_Container{`, + `Container:` + strings.Replace(fmt.Sprintf("%v", this.Container), "ContainerStatus", "ContainerStatus", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkAttachmentConfig) String() string { + if this == nil { + return "nil" + } + keysForDriverAttachmentOpts := make([]string, 0, len(this.DriverAttachmentOpts)) + for k, _ := range this.DriverAttachmentOpts { + keysForDriverAttachmentOpts = append(keysForDriverAttachmentOpts, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDriverAttachmentOpts) + mapStringForDriverAttachmentOpts := "map[string]string{" + for _, k := range keysForDriverAttachmentOpts { + mapStringForDriverAttachmentOpts += fmt.Sprintf("%v: %v,", k, this.DriverAttachmentOpts[k]) + } + mapStringForDriverAttachmentOpts += "}" + s := strings.Join([]string{`&NetworkAttachmentConfig{`, + `Target:` + fmt.Sprintf("%v", this.Target) + `,`, + `Aliases:` + fmt.Sprintf("%v", this.Aliases) + `,`, + `Addresses:` + fmt.Sprintf("%v", this.Addresses) + `,`, + `DriverAttachmentOpts:` + mapStringForDriverAttachmentOpts + `,`, + `}`, + }, "") + return s +} +func (this *IPAMConfig) String() string { + if this == nil { + return "nil" + } + keysForReserved := make([]string, 0, len(this.Reserved)) + for k, _ := range this.Reserved { + keysForReserved = append(keysForReserved, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForReserved) + mapStringForReserved := "map[string]string{" + for _, k := range keysForReserved { + mapStringForReserved += fmt.Sprintf("%v: %v,", k, this.Reserved[k]) + } + mapStringForReserved += "}" + s := strings.Join([]string{`&IPAMConfig{`, + `Family:` + fmt.Sprintf("%v", this.Family) + `,`, + `Subnet:` + fmt.Sprintf("%v", this.Subnet) + `,`, + `Range:` + fmt.Sprintf("%v", this.Range) + `,`, + `Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`, + `Reserved:` + mapStringForReserved + `,`, + `}`, + }, "") + return s +} +func (this *PortConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PortConfig{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `TargetPort:` + fmt.Sprintf("%v", this.TargetPort) + `,`, + `PublishedPort:` + fmt.Sprintf("%v", this.PublishedPort) + `,`, + `PublishMode:` + fmt.Sprintf("%v", this.PublishMode) + `,`, + `}`, + }, "") + return s +} +func (this *Driver) String() string { + if this == nil { + return "nil" + } + keysForOptions := make([]string, 0, len(this.Options)) + for k, _ := range this.Options { + keysForOptions = append(keysForOptions, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + mapStringForOptions := "map[string]string{" + for _, k := range keysForOptions { + mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) + } + mapStringForOptions += "}" + s := strings.Join([]string{`&Driver{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Options:` + mapStringForOptions + `,`, + `}`, + }, "") + return s +} +func (this *IPAMOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IPAMOptions{`, + `Driver:` + strings.Replace(fmt.Sprintf("%v", this.Driver), "Driver", "Driver", 1) + `,`, + `Configs:` + strings.Replace(fmt.Sprintf("%v", this.Configs), "IPAMConfig", "IPAMConfig", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Peer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Peer{`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `Addr:` + fmt.Sprintf("%v", this.Addr) + `,`, + `}`, + }, "") + return s +} +func (this *WeightedPeer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WeightedPeer{`, + `Peer:` + strings.Replace(fmt.Sprintf("%v", this.Peer), "Peer", "Peer", 1) + `,`, + `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, + `}`, + }, "") + return s +} +func (this *IssuanceStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IssuanceStatus{`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `Err:` + fmt.Sprintf("%v", this.Err) + `,`, + `}`, + }, "") + return s +} +func (this *AcceptancePolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AcceptancePolicy{`, + `Policies:` + strings.Replace(fmt.Sprintf("%v", this.Policies), "AcceptancePolicy_RoleAdmissionPolicy", "AcceptancePolicy_RoleAdmissionPolicy", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AcceptancePolicy_RoleAdmissionPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AcceptancePolicy_RoleAdmissionPolicy{`, + `Role:` + fmt.Sprintf("%v", this.Role) + `,`, + `Autoaccept:` + fmt.Sprintf("%v", this.Autoaccept) + `,`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "AcceptancePolicy_RoleAdmissionPolicy_Secret", "AcceptancePolicy_RoleAdmissionPolicy_Secret", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AcceptancePolicy_RoleAdmissionPolicy_Secret) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AcceptancePolicy_RoleAdmissionPolicy_Secret{`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `Alg:` + fmt.Sprintf("%v", this.Alg) + `,`, + `}`, + }, "") + return s +} +func (this *ExternalCA) String() string { + if this == nil { + return "nil" + } + keysForOptions := make([]string, 0, len(this.Options)) + for k, _ := range this.Options { + keysForOptions = append(keysForOptions, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + mapStringForOptions := "map[string]string{" + for _, k := range keysForOptions { + mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) + } + mapStringForOptions += "}" + s := strings.Join([]string{`&ExternalCA{`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `URL:` + fmt.Sprintf("%v", this.URL) + `,`, + `Options:` + mapStringForOptions + `,`, + `CACert:` + fmt.Sprintf("%v", this.CACert) + `,`, + `}`, + }, "") + return s +} +func (this *CAConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CAConfig{`, + `NodeCertExpiry:` + strings.Replace(fmt.Sprintf("%v", this.NodeCertExpiry), "Duration", "google_protobuf1.Duration", 1) + `,`, + `ExternalCAs:` + strings.Replace(fmt.Sprintf("%v", this.ExternalCAs), "ExternalCA", "ExternalCA", 1) + `,`, + `SigningCACert:` + fmt.Sprintf("%v", this.SigningCACert) + `,`, + `SigningCAKey:` + fmt.Sprintf("%v", this.SigningCAKey) + `,`, + `ForceRotate:` + fmt.Sprintf("%v", this.ForceRotate) + `,`, + `}`, + }, "") + return s +} +func (this *OrchestrationConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OrchestrationConfig{`, + `TaskHistoryRetentionLimit:` + fmt.Sprintf("%v", this.TaskHistoryRetentionLimit) + `,`, + `}`, + }, "") + return s +} +func (this *TaskDefaults) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskDefaults{`, + `LogDriver:` + strings.Replace(fmt.Sprintf("%v", this.LogDriver), "Driver", "Driver", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DispatcherConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DispatcherConfig{`, + `HeartbeatPeriod:` + strings.Replace(fmt.Sprintf("%v", this.HeartbeatPeriod), "Duration", "google_protobuf1.Duration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RaftConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RaftConfig{`, + `SnapshotInterval:` + fmt.Sprintf("%v", this.SnapshotInterval) + `,`, + `KeepOldSnapshots:` + fmt.Sprintf("%v", this.KeepOldSnapshots) + `,`, + `LogEntriesForSlowFollowers:` + fmt.Sprintf("%v", this.LogEntriesForSlowFollowers) + `,`, + `HeartbeatTick:` + fmt.Sprintf("%v", this.HeartbeatTick) + `,`, + `ElectionTick:` + fmt.Sprintf("%v", this.ElectionTick) + `,`, + `}`, + }, "") + return s +} +func (this *EncryptionConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EncryptionConfig{`, + `AutoLockManagers:` + fmt.Sprintf("%v", this.AutoLockManagers) + `,`, + `}`, + }, "") + return s +} +func (this *SpreadOver) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SpreadOver{`, + `SpreadDescriptor:` + fmt.Sprintf("%v", this.SpreadDescriptor) + `,`, + `}`, + }, "") + return s +} +func (this *PlacementPreference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PlacementPreference{`, + `Preference:` + fmt.Sprintf("%v", this.Preference) + `,`, + `}`, + }, "") + return s +} +func (this *PlacementPreference_Spread) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PlacementPreference_Spread{`, + `Spread:` + strings.Replace(fmt.Sprintf("%v", this.Spread), "SpreadOver", "SpreadOver", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Placement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Placement{`, + `Constraints:` + fmt.Sprintf("%v", this.Constraints) + `,`, + `Preferences:` + strings.Replace(fmt.Sprintf("%v", this.Preferences), "PlacementPreference", "PlacementPreference", 1) + `,`, + `Platforms:` + strings.Replace(fmt.Sprintf("%v", this.Platforms), "Platform", "Platform", 1) + `,`, + `}`, + }, "") + return s +} +func (this *JoinTokens) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JoinTokens{`, + `Worker:` + fmt.Sprintf("%v", this.Worker) + `,`, + `Manager:` + fmt.Sprintf("%v", this.Manager) + `,`, + `}`, + }, "") + return s +} +func (this *RootCA) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RootCA{`, + `CAKey:` + fmt.Sprintf("%v", this.CAKey) + `,`, + `CACert:` + fmt.Sprintf("%v", this.CACert) + `,`, + `CACertHash:` + fmt.Sprintf("%v", this.CACertHash) + `,`, + `JoinTokens:` + strings.Replace(strings.Replace(this.JoinTokens.String(), "JoinTokens", "JoinTokens", 1), `&`, ``, 1) + `,`, + `RootRotation:` + strings.Replace(fmt.Sprintf("%v", this.RootRotation), "RootRotation", "RootRotation", 1) + `,`, + `LastForcedRotation:` + fmt.Sprintf("%v", this.LastForcedRotation) + `,`, + `}`, + }, "") + return s +} +func (this *Certificate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Certificate{`, + `Role:` + fmt.Sprintf("%v", this.Role) + `,`, + `CSR:` + fmt.Sprintf("%v", this.CSR) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "IssuanceStatus", "IssuanceStatus", 1), `&`, ``, 1) + `,`, + `Certificate:` + fmt.Sprintf("%v", this.Certificate) + `,`, + `CN:` + fmt.Sprintf("%v", this.CN) + `,`, + `}`, + }, "") + return s +} +func (this *EncryptionKey) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EncryptionKey{`, + `Subsystem:` + fmt.Sprintf("%v", this.Subsystem) + `,`, + `Algorithm:` + fmt.Sprintf("%v", this.Algorithm) + `,`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `LamportTime:` + fmt.Sprintf("%v", this.LamportTime) + `,`, + `}`, + }, "") + return s +} +func (this *ManagerStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ManagerStatus{`, + `RaftID:` + fmt.Sprintf("%v", this.RaftID) + `,`, + `Addr:` + fmt.Sprintf("%v", this.Addr) + `,`, + `Leader:` + fmt.Sprintf("%v", this.Leader) + `,`, + `Reachability:` + fmt.Sprintf("%v", this.Reachability) + `,`, + `}`, + }, "") + return s +} +func (this *FileTarget) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FileTarget{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `GID:` + fmt.Sprintf("%v", this.GID) + `,`, + `Mode:` + fmt.Sprintf("%v", this.Mode) + `,`, + `}`, + }, "") + return s +} +func (this *SecretReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretReference{`, + `SecretID:` + fmt.Sprintf("%v", this.SecretID) + `,`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `Target:` + fmt.Sprintf("%v", this.Target) + `,`, + `}`, + }, "") + return s +} +func (this *SecretReference_File) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretReference_File{`, + `File:` + strings.Replace(fmt.Sprintf("%v", this.File), "FileTarget", "FileTarget", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigReference{`, + `ConfigID:` + fmt.Sprintf("%v", this.ConfigID) + `,`, + `ConfigName:` + fmt.Sprintf("%v", this.ConfigName) + `,`, + `Target:` + fmt.Sprintf("%v", this.Target) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigReference_File) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigReference_File{`, + `File:` + strings.Replace(fmt.Sprintf("%v", this.File), "FileTarget", "FileTarget", 1) + `,`, + `}`, + }, "") + return s +} +func (this *BlacklistedCertificate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BlacklistedCertificate{`, + `Expiry:` + strings.Replace(fmt.Sprintf("%v", this.Expiry), "Timestamp", "google_protobuf.Timestamp", 1) + `,`, + `}`, + }, "") + return s +} +func (this *HealthConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HealthConfig{`, + `Test:` + fmt.Sprintf("%v", this.Test) + `,`, + `Interval:` + strings.Replace(fmt.Sprintf("%v", this.Interval), "Duration", "google_protobuf1.Duration", 1) + `,`, + `Timeout:` + strings.Replace(fmt.Sprintf("%v", this.Timeout), "Duration", "google_protobuf1.Duration", 1) + `,`, + `Retries:` + fmt.Sprintf("%v", this.Retries) + `,`, + `StartPeriod:` + strings.Replace(fmt.Sprintf("%v", this.StartPeriod), "Duration", "google_protobuf1.Duration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MaybeEncryptedRecord) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MaybeEncryptedRecord{`, + `Algorithm:` + fmt.Sprintf("%v", this.Algorithm) + `,`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `Nonce:` + fmt.Sprintf("%v", this.Nonce) + `,`, + `}`, + }, "") + return s +} +func (this *RootRotation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RootRotation{`, + `CACert:` + fmt.Sprintf("%v", this.CACert) + `,`, + `CAKey:` + fmt.Sprintf("%v", this.CAKey) + `,`, + `CrossSignedCACert:` + fmt.Sprintf("%v", this.CrossSignedCACert) + `,`, + `}`, + }, "") + return s +} +func (this *Privileges) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Privileges{`, + `CredentialSpec:` + strings.Replace(fmt.Sprintf("%v", this.CredentialSpec), "Privileges_CredentialSpec", "Privileges_CredentialSpec", 1) + `,`, + `SELinuxContext:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxContext), "Privileges_SELinuxContext", "Privileges_SELinuxContext", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Privileges_CredentialSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Privileges_CredentialSpec{`, + `Source:` + fmt.Sprintf("%v", this.Source) + `,`, + `}`, + }, "") + return s +} +func (this *Privileges_CredentialSpec_File) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Privileges_CredentialSpec_File{`, + `File:` + fmt.Sprintf("%v", this.File) + `,`, + `}`, + }, "") + return s +} +func (this *Privileges_CredentialSpec_Registry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Privileges_CredentialSpec_Registry{`, + `Registry:` + fmt.Sprintf("%v", this.Registry) + `,`, + `}`, + }, "") + return s +} +func (this *Privileges_SELinuxContext) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Privileges_SELinuxContext{`, + `Disable:` + fmt.Sprintf("%v", this.Disable) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Role:` + fmt.Sprintf("%v", this.Role) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Level:` + fmt.Sprintf("%v", this.Level) + `,`, + `}`, + }, "") + return s +} +func valueToStringTypes(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Version) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Version: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Version: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IndexEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IndexEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IndexEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Val", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Val = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Annotations) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Annotations: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Annotations: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Labels == nil { + m.Labels = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Labels[mapkey] = mapvalue + } else { + var mapvalue string + m.Labels[mapkey] = mapvalue + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Indices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Indices = append(m.Indices, IndexEntry{}) + if err := m.Indices[len(m.Indices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamedGenericResource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamedGenericResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamedGenericResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DiscreteGenericResource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DiscreteGenericResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DiscreteGenericResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GenericResource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenericResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenericResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamedResourceSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &NamedGenericResource{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Resource = &GenericResource_NamedResourceSpec{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DiscreteResourceSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &DiscreteGenericResource{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Resource = &GenericResource_DiscreteResourceSpec{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Resources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Resources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Resources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NanoCPUs", wireType) + } + m.NanoCPUs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NanoCPUs |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemoryBytes", wireType) + } + m.MemoryBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MemoryBytes |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Generic", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Generic = append(m.Generic, &GenericResource{}) + if err := m.Generic[len(m.Generic)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceRequirements: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceRequirements: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Limits == nil { + m.Limits = &Resources{} + } + if err := m.Limits.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reservations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Reservations == nil { + m.Reservations = &Resources{} + } + if err := m.Reservations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Platform) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Platform: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Platform: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Architecture", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Architecture = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OS", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OS = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PluginDescription) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PluginDescription: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PluginDescription: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EngineDescription) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EngineDescription: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EngineDescription: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EngineVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EngineVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Labels == nil { + m.Labels = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Labels[mapkey] = mapvalue + } else { + var mapvalue string + m.Labels[mapkey] = mapvalue + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Plugins", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Plugins = append(m.Plugins, PluginDescription{}) + if err := m.Plugins[len(m.Plugins)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeDescription) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeDescription: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeDescription: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Platform", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Platform == nil { + m.Platform = &Platform{} + } + if err := m.Platform.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resources == nil { + m.Resources = &Resources{} + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Engine", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Engine == nil { + m.Engine = &EngineDescription{} + } + if err := m.Engine.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TLSInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TLSInfo == nil { + m.TLSInfo = &NodeTLSInfo{} + } + if err := m.TLSInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FIPS", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.FIPS = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeTLSInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeTLSInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeTLSInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TrustRoot", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TrustRoot = append(m.TrustRoot[:0], dAtA[iNdEx:postIndex]...) + if m.TrustRoot == nil { + m.TrustRoot = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CertIssuerSubject", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CertIssuerSubject = append(m.CertIssuerSubject[:0], dAtA[iNdEx:postIndex]...) + if m.CertIssuerSubject == nil { + m.CertIssuerSubject = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CertIssuerPublicKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CertIssuerPublicKey = append(m.CertIssuerPublicKey[:0], dAtA[iNdEx:postIndex]...) + if m.CertIssuerPublicKey == nil { + m.CertIssuerPublicKey = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RaftMemberStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RaftMemberStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RaftMemberStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Leader = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Reachability", wireType) + } + m.Reachability = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Reachability |= (RaftMemberStatus_Reachability(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= (NodeStatus_State(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Image) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Image: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Image: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reference", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reference = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Mount) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Mount: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Mount: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (Mount_MountType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Source = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Target = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BindOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BindOptions == nil { + m.BindOptions = &Mount_BindOptions{} + } + if err := m.BindOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VolumeOptions == nil { + m.VolumeOptions = &Mount_VolumeOptions{} + } + if err := m.VolumeOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TmpfsOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TmpfsOptions == nil { + m.TmpfsOptions = &Mount_TmpfsOptions{} + } + if err := m.TmpfsOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Consistency", wireType) + } + m.Consistency = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Consistency |= (Mount_MountConsistency(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Mount_BindOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BindOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BindOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Propagation", wireType) + } + m.Propagation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Propagation |= (Mount_BindOptions_MountPropagation(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Mount_VolumeOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoCopy", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NoCopy = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Labels == nil { + m.Labels = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Labels[mapkey] = mapvalue + } else { + var mapvalue string + m.Labels[mapkey] = mapvalue + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DriverConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DriverConfig == nil { + m.DriverConfig = &Driver{} + } + if err := m.DriverConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Mount_TmpfsOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TmpfsOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TmpfsOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SizeBytes", wireType) + } + m.SizeBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SizeBytes |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + m.Mode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Mode |= (os.FileMode(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RestartPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RestartPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RestartPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Condition", wireType) + } + m.Condition = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Condition |= (RestartPolicy_RestartCondition(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Delay", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Delay == nil { + m.Delay = &google_protobuf1.Duration{} + } + if err := m.Delay.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxAttempts", wireType) + } + m.MaxAttempts = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxAttempts |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Window", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Window == nil { + m.Window = &google_protobuf1.Duration{} + } + if err := m.Window.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Parallelism", wireType) + } + m.Parallelism = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Parallelism |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Delay", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.Delay, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FailureAction", wireType) + } + m.FailureAction = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FailureAction |= (UpdateConfig_FailureAction(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Monitor", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Monitor == nil { + m.Monitor = &google_protobuf1.Duration{} + } + if err := m.Monitor.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxFailureRatio", wireType) + } + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 4 + v = uint32(dAtA[iNdEx-4]) + v |= uint32(dAtA[iNdEx-3]) << 8 + v |= uint32(dAtA[iNdEx-2]) << 16 + v |= uint32(dAtA[iNdEx-1]) << 24 + m.MaxFailureRatio = float32(math.Float32frombits(v)) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Order", wireType) + } + m.Order = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Order |= (UpdateConfig_UpdateOrder(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= (UpdateStatus_UpdateState(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartedAt == nil { + m.StartedAt = &google_protobuf.Timestamp{} + } + if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompletedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CompletedAt == nil { + m.CompletedAt = &google_protobuf.Timestamp{} + } + if err := m.CompletedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PID", wireType) + } + m.PID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PID |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitCode", wireType) + } + m.ExitCode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExitCode |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PortStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PortStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PortStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, &PortConfig{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Timestamp == nil { + m.Timestamp = &google_protobuf.Timestamp{} + } + if err := m.Timestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= (TaskState(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Err", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Err = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ContainerStatus{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.RuntimeStatus = &TaskStatus_Container{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PortStatus == nil { + m.PortStatus = &PortStatus{} + } + if err := m.PortStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppliedBy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppliedBy = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppliedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AppliedAt == nil { + m.AppliedAt = &google_protobuf.Timestamp{} + } + if err := m.AppliedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkAttachmentConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkAttachmentConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkAttachmentConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Target = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Aliases", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Aliases = append(m.Aliases, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addresses = append(m.Addresses, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DriverAttachmentOpts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.DriverAttachmentOpts == nil { + m.DriverAttachmentOpts = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.DriverAttachmentOpts[mapkey] = mapvalue + } else { + var mapvalue string + m.DriverAttachmentOpts[mapkey] = mapvalue + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IPAMConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IPAMConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IPAMConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Family", wireType) + } + m.Family = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Family |= (IPAMConfig_AddressFamily(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subnet", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subnet = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Range = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gateway", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Gateway = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reserved", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Reserved == nil { + m.Reserved = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Reserved[mapkey] = mapvalue + } else { + var mapvalue string + m.Reserved[mapkey] = mapvalue + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PortConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PortConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PortConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + m.Protocol = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Protocol |= (PortConfig_Protocol(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetPort", wireType) + } + m.TargetPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TargetPort |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PublishedPort", wireType) + } + m.PublishedPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PublishedPort |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PublishMode", wireType) + } + m.PublishMode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PublishMode |= (PortConfig_PublishMode(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Driver) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Driver: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Driver: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Options == nil { + m.Options = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Options[mapkey] = mapvalue + } else { + var mapvalue string + m.Options[mapkey] = mapvalue + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IPAMOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IPAMOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IPAMOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Driver == nil { + m.Driver = &Driver{} + } + if err := m.Driver.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Configs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Configs = append(m.Configs, &IPAMConfig{}) + if err := m.Configs[len(m.Configs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Peer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Peer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Peer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WeightedPeer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WeightedPeer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WeightedPeer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Peer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Peer == nil { + m.Peer = &Peer{} + } + if err := m.Peer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) + } + m.Weight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Weight |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IssuanceStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IssuanceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IssuanceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= (IssuanceStatus_State(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Err", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Err = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AcceptancePolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AcceptancePolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AcceptancePolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Policies", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Policies = append(m.Policies, &AcceptancePolicy_RoleAdmissionPolicy{}) + if err := m.Policies[len(m.Policies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AcceptancePolicy_RoleAdmissionPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleAdmissionPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleAdmissionPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + m.Role = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Role |= (NodeRole(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Autoaccept", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Autoaccept = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Secret == nil { + m.Secret = &AcceptancePolicy_RoleAdmissionPolicy_Secret{} + } + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AcceptancePolicy_RoleAdmissionPolicy_Secret) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Secret: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Secret: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Alg", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Alg = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExternalCA) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExternalCA: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExternalCA: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + m.Protocol = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Protocol |= (ExternalCA_CAProtocol(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.URL = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Options == nil { + m.Options = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Options[mapkey] = mapvalue + } else { + var mapvalue string + m.Options[mapkey] = mapvalue + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CACert", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CACert = append(m.CACert[:0], dAtA[iNdEx:postIndex]...) + if m.CACert == nil { + m.CACert = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CAConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CAConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CAConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeCertExpiry", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeCertExpiry == nil { + m.NodeCertExpiry = &google_protobuf1.Duration{} + } + if err := m.NodeCertExpiry.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalCAs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalCAs = append(m.ExternalCAs, &ExternalCA{}) + if err := m.ExternalCAs[len(m.ExternalCAs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SigningCACert", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SigningCACert = append(m.SigningCACert[:0], dAtA[iNdEx:postIndex]...) + if m.SigningCACert == nil { + m.SigningCACert = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SigningCAKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SigningCAKey = append(m.SigningCAKey[:0], dAtA[iNdEx:postIndex]...) + if m.SigningCAKey == nil { + m.SigningCAKey = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ForceRotate", wireType) + } + m.ForceRotate = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ForceRotate |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OrchestrationConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OrchestrationConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OrchestrationConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskHistoryRetentionLimit", wireType) + } + m.TaskHistoryRetentionLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TaskHistoryRetentionLimit |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskDefaults) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskDefaults: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskDefaults: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogDriver", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LogDriver == nil { + m.LogDriver = &Driver{} + } + if err := m.LogDriver.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DispatcherConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DispatcherConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DispatcherConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HeartbeatPeriod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HeartbeatPeriod == nil { + m.HeartbeatPeriod = &google_protobuf1.Duration{} + } + if err := m.HeartbeatPeriod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RaftConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RaftConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RaftConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SnapshotInterval", wireType) + } + m.SnapshotInterval = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SnapshotInterval |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KeepOldSnapshots", wireType) + } + m.KeepOldSnapshots = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.KeepOldSnapshots |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LogEntriesForSlowFollowers", wireType) + } + m.LogEntriesForSlowFollowers = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LogEntriesForSlowFollowers |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HeartbeatTick", wireType) + } + m.HeartbeatTick = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HeartbeatTick |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ElectionTick", wireType) + } + m.ElectionTick = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ElectionTick |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EncryptionConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EncryptionConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EncryptionConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutoLockManagers", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.AutoLockManagers = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SpreadOver) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SpreadOver: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SpreadOver: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpreadDescriptor", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SpreadDescriptor = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PlacementPreference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PlacementPreference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PlacementPreference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spread", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SpreadOver{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Preference = &PlacementPreference_Spread{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Placement) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Placement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Placement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Constraints = append(m.Constraints, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Preferences", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Preferences = append(m.Preferences, &PlacementPreference{}) + if err := m.Preferences[len(m.Preferences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Platforms", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Platforms = append(m.Platforms, &Platform{}) + if err := m.Platforms[len(m.Platforms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JoinTokens) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JoinTokens: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JoinTokens: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Worker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Worker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Manager", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Manager = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RootCA) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RootCA: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RootCA: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CAKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CAKey = append(m.CAKey[:0], dAtA[iNdEx:postIndex]...) + if m.CAKey == nil { + m.CAKey = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CACert", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CACert = append(m.CACert[:0], dAtA[iNdEx:postIndex]...) + if m.CACert == nil { + m.CACert = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CACertHash", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CACertHash = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JoinTokens", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.JoinTokens.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RootRotation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RootRotation == nil { + m.RootRotation = &RootRotation{} + } + if err := m.RootRotation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastForcedRotation", wireType) + } + m.LastForcedRotation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastForcedRotation |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Certificate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Certificate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Certificate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + m.Role = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Role |= (NodeRole(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CSR", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CSR = append(m.CSR[:0], dAtA[iNdEx:postIndex]...) + if m.CSR == nil { + m.CSR = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Certificate = append(m.Certificate[:0], dAtA[iNdEx:postIndex]...) + if m.Certificate == nil { + m.Certificate = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CN", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CN = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EncryptionKey) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EncryptionKey: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EncryptionKey: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subsystem", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subsystem = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Algorithm", wireType) + } + m.Algorithm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Algorithm |= (EncryptionKey_Algorithm(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LamportTime", wireType) + } + m.LamportTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LamportTime |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ManagerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ManagerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ManagerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftID", wireType) + } + m.RaftID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Leader = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Reachability", wireType) + } + m.Reachability = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Reachability |= (RaftMemberStatus_Reachability(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FileTarget) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FileTarget: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FileTarget: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + m.Mode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Mode |= (os.FileMode(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field File", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &FileTarget{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &SecretReference_File{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConfigID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConfigName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field File", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &FileTarget{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &ConfigReference_File{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlacklistedCertificate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlacklistedCertificate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlacklistedCertificate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expiry", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Expiry == nil { + m.Expiry = &google_protobuf.Timestamp{} + } + if err := m.Expiry.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HealthConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HealthConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HealthConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Test", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Test = append(m.Test, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Interval", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Interval == nil { + m.Interval = &google_protobuf1.Duration{} + } + if err := m.Interval.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeout", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Timeout == nil { + m.Timeout = &google_protobuf1.Duration{} + } + if err := m.Timeout.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Retries", wireType) + } + m.Retries = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Retries |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartPeriod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartPeriod == nil { + m.StartPeriod = &google_protobuf1.Duration{} + } + if err := m.StartPeriod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MaybeEncryptedRecord) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MaybeEncryptedRecord: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MaybeEncryptedRecord: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Algorithm", wireType) + } + m.Algorithm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Algorithm |= (MaybeEncryptedRecord_Algorithm(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nonce", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Nonce = append(m.Nonce[:0], dAtA[iNdEx:postIndex]...) + if m.Nonce == nil { + m.Nonce = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RootRotation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RootRotation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RootRotation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CACert", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CACert = append(m.CACert[:0], dAtA[iNdEx:postIndex]...) + if m.CACert == nil { + m.CACert = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CAKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CAKey = append(m.CAKey[:0], dAtA[iNdEx:postIndex]...) + if m.CAKey == nil { + m.CAKey = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CrossSignedCACert", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CrossSignedCACert = append(m.CrossSignedCACert[:0], dAtA[iNdEx:postIndex]...) + if m.CrossSignedCACert == nil { + m.CrossSignedCACert = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Privileges) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Privileges: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Privileges: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CredentialSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CredentialSpec == nil { + m.CredentialSpec = &Privileges_CredentialSpec{} + } + if err := m.CredentialSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinuxContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SELinuxContext == nil { + m.SELinuxContext = &Privileges_SELinuxContext{} + } + if err := m.SELinuxContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Privileges_CredentialSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CredentialSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CredentialSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field File", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Source = &Privileges_CredentialSpec_File{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Registry", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Source = &Privileges_CredentialSpec_Registry{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Privileges_SELinuxContext) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SELinuxContext: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SELinuxContext: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Disable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Disable = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Level = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthTypes + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipTypes(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("github.com/docker/swarmkit/api/types.proto", fileDescriptorTypes) } + +var fileDescriptorTypes = []byte{ + // 5116 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x7a, 0x5d, 0x6c, 0x23, 0x59, + 0x56, 0x7f, 0xec, 0xd8, 0x8e, 0x7d, 0xec, 0x24, 0xd5, 0xb7, 0xb3, 0x3d, 0x69, 0x6f, 0x4f, 0xe2, + 0xa9, 0x99, 0xde, 0x99, 0xed, 0x9d, 0xbf, 0xfb, 0x6b, 0x77, 0xd5, 0x33, 0xf3, 0xdf, 0x9d, 0xb1, + 0xcb, 0x95, 0x8e, 0xb7, 0xd3, 0xb6, 0x75, 0xed, 0x74, 0xef, 0x22, 0x41, 0x51, 0xa9, 0xba, 0x71, + 0x6a, 0x52, 0xae, 0x6b, 0xaa, 0xca, 0xe9, 0x36, 0x0b, 0x62, 0xc4, 0x03, 0xa0, 0x3c, 0xc1, 0x0b, + 0x2c, 0x42, 0x41, 0x48, 0xf0, 0xc6, 0x03, 0x0f, 0x20, 0x21, 0x78, 0x1a, 0x24, 0x84, 0x56, 0xbc, + 0xc0, 0x82, 0x84, 0x56, 0x20, 0x05, 0x36, 0x0f, 0xbc, 0xad, 0xe0, 0x05, 0xf1, 0xc2, 0x03, 0xba, + 0x1f, 0x55, 0xae, 0xb8, 0x2b, 0xc9, 0x0c, 0xbb, 0x2f, 0x89, 0xef, 0x39, 0xbf, 0x73, 0xee, 0xbd, + 0xe7, 0xde, 0x7b, 0xee, 0x39, 0xe7, 0x16, 0xdc, 0x19, 0x3a, 0xe1, 0xc1, 0x64, 0xaf, 0x6e, 0xd1, + 0xd1, 0x5d, 0x9b, 0x5a, 0x87, 0xc4, 0xbf, 0x1b, 0xbc, 0x30, 0xfd, 0xd1, 0xa1, 0x13, 0xde, 0x35, + 0xc7, 0xce, 0xdd, 0x70, 0x3a, 0x26, 0x41, 0x7d, 0xec, 0xd3, 0x90, 0x22, 0x24, 0x00, 0xf5, 0x08, + 0x50, 0x3f, 0xba, 0x5f, 0xdd, 0x1c, 0x52, 0x3a, 0x74, 0xc9, 0x5d, 0x8e, 0xd8, 0x9b, 0xec, 0xdf, + 0x0d, 0x9d, 0x11, 0x09, 0x42, 0x73, 0x34, 0x16, 0x42, 0xd5, 0x8d, 0x79, 0x80, 0x3d, 0xf1, 0xcd, + 0xd0, 0xa1, 0x9e, 0xe4, 0xaf, 0x0d, 0xe9, 0x90, 0xf2, 0x9f, 0x77, 0xd9, 0x2f, 0x41, 0x55, 0x37, + 0x61, 0xe9, 0x19, 0xf1, 0x03, 0x87, 0x7a, 0x68, 0x0d, 0xf2, 0x8e, 0x67, 0x93, 0x97, 0xeb, 0x99, + 0x5a, 0xe6, 0x9d, 0x1c, 0x16, 0x0d, 0xf5, 0x1e, 0x40, 0x9b, 0xfd, 0xd0, 0xbd, 0xd0, 0x9f, 0x22, + 0x05, 0x16, 0x0f, 0xc9, 0x94, 0x23, 0x4a, 0x98, 0xfd, 0x64, 0x94, 0x23, 0xd3, 0x5d, 0xcf, 0x0a, + 0xca, 0x91, 0xe9, 0xaa, 0x3f, 0xca, 0x40, 0xb9, 0xe1, 0x79, 0x34, 0xe4, 0xbd, 0x07, 0x08, 0x41, + 0xce, 0x33, 0x47, 0x44, 0x0a, 0xf1, 0xdf, 0x48, 0x83, 0x82, 0x6b, 0xee, 0x11, 0x37, 0x58, 0xcf, + 0xd6, 0x16, 0xdf, 0x29, 0x3f, 0xf8, 0x4a, 0xfd, 0xd5, 0x29, 0xd7, 0x13, 0x4a, 0xea, 0x3b, 0x1c, + 0xcd, 0x07, 0x81, 0xa5, 0x28, 0xfa, 0x26, 0x2c, 0x39, 0x9e, 0xed, 0x58, 0x24, 0x58, 0xcf, 0x71, + 0x2d, 0x1b, 0x69, 0x5a, 0x66, 0xa3, 0x6f, 0xe6, 0xbe, 0x7f, 0xba, 0xb9, 0x80, 0x23, 0xa1, 0xea, + 0x7b, 0x50, 0x4e, 0xa8, 0x4d, 0x99, 0xdb, 0x1a, 0xe4, 0x8f, 0x4c, 0x77, 0x42, 0xe4, 0xec, 0x44, + 0xe3, 0xfd, 0xec, 0xa3, 0x8c, 0xfa, 0x11, 0xac, 0x75, 0xcc, 0x11, 0xb1, 0x1f, 0x13, 0x8f, 0xf8, + 0x8e, 0x85, 0x49, 0x40, 0x27, 0xbe, 0x45, 0xd8, 0x5c, 0x0f, 0x1d, 0xcf, 0x8e, 0xe6, 0xca, 0x7e, + 0xa7, 0x6b, 0x51, 0x35, 0x78, 0xad, 0xe5, 0x04, 0x96, 0x4f, 0x42, 0xf2, 0xb9, 0x95, 0x2c, 0x46, + 0x4a, 0x4e, 0x33, 0xb0, 0x3a, 0x2f, 0xfd, 0x33, 0x70, 0x9d, 0x99, 0xd8, 0x36, 0x7c, 0x49, 0x31, + 0x82, 0x31, 0xb1, 0xb8, 0xb2, 0xf2, 0x83, 0x77, 0xd2, 0x2c, 0x94, 0x36, 0x93, 0xed, 0x05, 0x7c, + 0x8d, 0xab, 0x89, 0x08, 0xfd, 0x31, 0xb1, 0x90, 0x05, 0x37, 0x6c, 0x39, 0xe8, 0x39, 0xf5, 0x59, + 0xae, 0x3e, 0x75, 0x19, 0x2f, 0x98, 0xe6, 0xf6, 0x02, 0x5e, 0x8b, 0x94, 0x25, 0x3b, 0x69, 0x02, + 0x14, 0x23, 0xdd, 0xea, 0xf7, 0x32, 0x50, 0x8a, 0x98, 0x01, 0xfa, 0x32, 0x94, 0x3c, 0xd3, 0xa3, + 0x86, 0x35, 0x9e, 0x04, 0x7c, 0x42, 0x8b, 0xcd, 0xca, 0xd9, 0xe9, 0x66, 0xb1, 0x63, 0x7a, 0x54, + 0xeb, 0xed, 0x06, 0xb8, 0xc8, 0xd8, 0xda, 0x78, 0x12, 0xa0, 0x37, 0xa0, 0x32, 0x22, 0x23, 0xea, + 0x4f, 0x8d, 0xbd, 0x69, 0x48, 0x02, 0x69, 0xb6, 0xb2, 0xa0, 0x35, 0x19, 0x09, 0x7d, 0x03, 0x96, + 0x86, 0x62, 0x48, 0xeb, 0x8b, 0x7c, 0xfb, 0xbc, 0x99, 0x36, 0xfa, 0xb9, 0x51, 0xe3, 0x48, 0x46, + 0xfd, 0xcd, 0x0c, 0xac, 0xc5, 0x54, 0xf2, 0x0b, 0x13, 0xc7, 0x27, 0x23, 0xe2, 0x85, 0x01, 0xfa, + 0x1a, 0x14, 0x5c, 0x67, 0xe4, 0x84, 0x81, 0xb4, 0xf9, 0xeb, 0x69, 0x6a, 0xe3, 0x49, 0x61, 0x09, + 0x46, 0x0d, 0xa8, 0xf8, 0x24, 0x20, 0xfe, 0x91, 0xd8, 0xf1, 0xd2, 0xa2, 0x57, 0x08, 0x9f, 0x13, + 0x51, 0xb7, 0xa0, 0xd8, 0x73, 0xcd, 0x70, 0x9f, 0xfa, 0x23, 0xa4, 0x42, 0xc5, 0xf4, 0xad, 0x03, + 0x27, 0x24, 0x56, 0x38, 0xf1, 0xa3, 0xd3, 0x77, 0x8e, 0x86, 0x6e, 0x40, 0x96, 0x8a, 0x8e, 0x4a, + 0xcd, 0xc2, 0xd9, 0xe9, 0x66, 0xb6, 0xdb, 0xc7, 0x59, 0x1a, 0xa8, 0x1f, 0xc0, 0xb5, 0x9e, 0x3b, + 0x19, 0x3a, 0x5e, 0x8b, 0x04, 0x96, 0xef, 0x8c, 0x99, 0x76, 0xb6, 0x2b, 0x99, 0x8f, 0x8a, 0x76, + 0x25, 0xfb, 0x1d, 0x1f, 0xed, 0xec, 0xec, 0x68, 0xab, 0xbf, 0x9e, 0x85, 0x6b, 0xba, 0x37, 0x74, + 0x3c, 0x92, 0x94, 0xbe, 0x0d, 0x2b, 0x84, 0x13, 0x8d, 0x23, 0xe1, 0x6e, 0xa4, 0x9e, 0x65, 0x41, + 0x8d, 0x7c, 0x50, 0x7b, 0xce, 0x2f, 0xdc, 0x4f, 0x9b, 0xfe, 0x2b, 0xda, 0x53, 0xbd, 0x83, 0x0e, + 0x4b, 0x63, 0x3e, 0x89, 0x40, 0x2e, 0xef, 0xed, 0x34, 0x5d, 0xaf, 0xcc, 0x33, 0x72, 0x12, 0x52, + 0xf6, 0x27, 0x71, 0x12, 0x7f, 0x9b, 0x85, 0xd5, 0x0e, 0xb5, 0xcf, 0xd9, 0xa1, 0x0a, 0xc5, 0x03, + 0x1a, 0x84, 0x09, 0x87, 0x18, 0xb7, 0xd1, 0x23, 0x28, 0x8e, 0xe5, 0xf2, 0xc9, 0xd5, 0xbf, 0x95, + 0x3e, 0x64, 0x81, 0xc1, 0x31, 0x1a, 0x7d, 0x00, 0xa5, 0xe8, 0xc8, 0xb0, 0xd9, 0x7e, 0x86, 0x8d, + 0x33, 0xc3, 0xa3, 0x6f, 0x40, 0x41, 0x2c, 0xc2, 0x7a, 0x8e, 0x4b, 0xde, 0xfe, 0x4c, 0x36, 0xc7, + 0x52, 0x08, 0x3d, 0x86, 0x62, 0xe8, 0x06, 0x86, 0xe3, 0xed, 0xd3, 0xf5, 0x3c, 0x57, 0xb0, 0x99, + 0xea, 0x64, 0xa8, 0x4d, 0x06, 0x3b, 0xfd, 0xb6, 0xb7, 0x4f, 0x9b, 0xe5, 0xb3, 0xd3, 0xcd, 0x25, + 0xd9, 0xc0, 0x4b, 0xa1, 0x1b, 0xb0, 0x1f, 0xe8, 0x16, 0xe4, 0xf6, 0x9d, 0x71, 0xb0, 0x5e, 0xa8, + 0x65, 0xde, 0x29, 0x36, 0x8b, 0x67, 0xa7, 0x9b, 0xb9, 0xad, 0x76, 0xaf, 0x8f, 0x39, 0x55, 0xfd, + 0xad, 0x0c, 0x94, 0x13, 0x3a, 0xd0, 0xeb, 0x00, 0xa1, 0x3f, 0x09, 0x42, 0xc3, 0xa7, 0x34, 0xe4, + 0xa6, 0xac, 0xe0, 0x12, 0xa7, 0x60, 0x4a, 0x43, 0x54, 0x87, 0xeb, 0x16, 0xf1, 0x43, 0xc3, 0x09, + 0x82, 0x09, 0xf1, 0x8d, 0x60, 0xb2, 0xf7, 0x31, 0xb1, 0x42, 0x6e, 0xd6, 0x0a, 0xbe, 0xc6, 0x58, + 0x6d, 0xce, 0xe9, 0x0b, 0x06, 0x7a, 0x08, 0x37, 0x92, 0xf8, 0xf1, 0x64, 0xcf, 0x75, 0x2c, 0x83, + 0x2d, 0xf5, 0x22, 0x17, 0xb9, 0x3e, 0x13, 0xe9, 0x71, 0xde, 0x13, 0x32, 0x55, 0x7f, 0x98, 0x01, + 0x05, 0x9b, 0xfb, 0xe1, 0x53, 0x32, 0xda, 0x23, 0x7e, 0x3f, 0x34, 0xc3, 0x49, 0x80, 0x6e, 0x40, + 0xc1, 0x25, 0xa6, 0x4d, 0x7c, 0x3e, 0xa8, 0x22, 0x96, 0x2d, 0xb4, 0xcb, 0xce, 0xb7, 0x69, 0x1d, + 0x98, 0x7b, 0x8e, 0xeb, 0x84, 0x53, 0x3e, 0x94, 0x95, 0xf4, 0x0d, 0x3e, 0xaf, 0xb3, 0x8e, 0x13, + 0x82, 0xf8, 0x9c, 0x1a, 0xb4, 0x0e, 0x4b, 0x23, 0x12, 0x04, 0xe6, 0x90, 0xf0, 0x91, 0x96, 0x70, + 0xd4, 0x54, 0x3f, 0x80, 0x4a, 0x52, 0x0e, 0x95, 0x61, 0x69, 0xb7, 0xf3, 0xa4, 0xd3, 0x7d, 0xde, + 0x51, 0x16, 0xd0, 0x2a, 0x94, 0x77, 0x3b, 0x58, 0x6f, 0x68, 0xdb, 0x8d, 0xe6, 0x8e, 0xae, 0x64, + 0xd0, 0x32, 0x94, 0x66, 0xcd, 0xac, 0xfa, 0xa7, 0x19, 0x00, 0x66, 0x6e, 0x39, 0xa9, 0xf7, 0x21, + 0x1f, 0x84, 0x66, 0x28, 0xf6, 0xec, 0xca, 0x83, 0xb7, 0x2e, 0x5a, 0x61, 0x39, 0x5e, 0xf6, 0x8f, + 0x60, 0x21, 0x92, 0x1c, 0x61, 0xf6, 0xdc, 0x08, 0x99, 0xfb, 0x30, 0x6d, 0xdb, 0x97, 0x03, 0xe7, + 0xbf, 0xd5, 0x0f, 0x20, 0xcf, 0xa5, 0xcf, 0x0f, 0xb7, 0x08, 0xb9, 0x16, 0xfb, 0x95, 0x41, 0x25, + 0xc8, 0x63, 0xbd, 0xd1, 0xfa, 0x8e, 0x92, 0x45, 0x0a, 0x54, 0x5a, 0xed, 0xbe, 0xd6, 0xed, 0x74, + 0x74, 0x6d, 0xa0, 0xb7, 0x94, 0x45, 0xf5, 0x36, 0xe4, 0xdb, 0x23, 0xa6, 0xf9, 0x16, 0x3b, 0x10, + 0xfb, 0xc4, 0x27, 0x9e, 0x15, 0x9d, 0xb3, 0x19, 0x41, 0xfd, 0x71, 0x19, 0xf2, 0x4f, 0xe9, 0xc4, + 0x0b, 0xd1, 0x83, 0x84, 0x53, 0x5b, 0x49, 0x8f, 0x1f, 0x38, 0xb0, 0x3e, 0x98, 0x8e, 0x89, 0x74, + 0x7a, 0x37, 0xa0, 0x20, 0x8e, 0x8e, 0x9c, 0x8e, 0x6c, 0x31, 0x7a, 0x68, 0xfa, 0x43, 0x12, 0xca, + 0xf9, 0xc8, 0x16, 0x7a, 0x87, 0xdd, 0x67, 0xa6, 0x4d, 0x3d, 0x77, 0xca, 0x4f, 0x58, 0x51, 0x5c, + 0x5a, 0x98, 0x98, 0x76, 0xd7, 0x73, 0xa7, 0x38, 0xe6, 0xa2, 0x6d, 0xa8, 0xec, 0x39, 0x9e, 0x6d, + 0xd0, 0xb1, 0xb8, 0x02, 0xf2, 0x17, 0x9f, 0x47, 0x31, 0xaa, 0xa6, 0xe3, 0xd9, 0x5d, 0x01, 0xc6, + 0xe5, 0xbd, 0x59, 0x03, 0x75, 0x60, 0xe5, 0x88, 0xba, 0x93, 0x11, 0x89, 0x75, 0x15, 0xb8, 0xae, + 0xb7, 0x2f, 0xd6, 0xf5, 0x8c, 0xe3, 0x23, 0x6d, 0xcb, 0x47, 0xc9, 0x26, 0x7a, 0x02, 0xcb, 0xe1, + 0x68, 0xbc, 0x1f, 0xc4, 0xea, 0x96, 0xb8, 0xba, 0x2f, 0x5d, 0x62, 0x30, 0x06, 0x8f, 0xb4, 0x55, + 0xc2, 0x44, 0x0b, 0x3d, 0x86, 0xb2, 0x45, 0xbd, 0xc0, 0x09, 0x42, 0xe2, 0x59, 0xd3, 0xf5, 0x22, + 0xb7, 0xfd, 0x25, 0xb3, 0xd4, 0x66, 0x60, 0x9c, 0x94, 0xac, 0xfe, 0xea, 0x22, 0x94, 0x13, 0x26, + 0x40, 0x7d, 0x28, 0x8f, 0x7d, 0x3a, 0x36, 0x87, 0xfc, 0x3e, 0x94, 0x8b, 0x7a, 0xff, 0x33, 0x99, + 0xaf, 0xde, 0x9b, 0x09, 0xe2, 0xa4, 0x16, 0xf5, 0x24, 0x0b, 0xe5, 0x04, 0x13, 0xdd, 0x81, 0x22, + 0xee, 0xe1, 0xf6, 0xb3, 0xc6, 0x40, 0x57, 0x16, 0xaa, 0xb7, 0x8e, 0x4f, 0x6a, 0xeb, 0x5c, 0x5b, + 0x52, 0x41, 0xcf, 0x77, 0x8e, 0xd8, 0x1e, 0x7e, 0x07, 0x96, 0x22, 0x68, 0xa6, 0xfa, 0xc5, 0xe3, + 0x93, 0xda, 0x6b, 0xf3, 0xd0, 0x04, 0x12, 0xf7, 0xb7, 0x1b, 0x58, 0x6f, 0x29, 0xd9, 0x74, 0x24, + 0xee, 0x1f, 0x98, 0x3e, 0xb1, 0xd1, 0x97, 0xa0, 0x20, 0x81, 0x8b, 0xd5, 0xea, 0xf1, 0x49, 0xed, + 0xc6, 0x3c, 0x70, 0x86, 0xc3, 0xfd, 0x9d, 0xc6, 0x33, 0x5d, 0xc9, 0xa5, 0xe3, 0x70, 0xdf, 0x35, + 0x8f, 0x08, 0x7a, 0x0b, 0xf2, 0x02, 0x96, 0xaf, 0xde, 0x3c, 0x3e, 0xa9, 0x7d, 0xe1, 0x15, 0x75, + 0x0c, 0x55, 0x5d, 0xff, 0x8d, 0x3f, 0xdc, 0x58, 0xf8, 0xcb, 0x3f, 0xda, 0x50, 0xe6, 0xd9, 0xd5, + 0xff, 0xc9, 0xc0, 0xf2, 0xb9, 0xbd, 0x83, 0x54, 0x28, 0x78, 0xd4, 0xa2, 0x63, 0x71, 0x4d, 0x16, + 0x9b, 0x70, 0x76, 0xba, 0x59, 0xe8, 0x50, 0x8d, 0x8e, 0xa7, 0x58, 0x72, 0xd0, 0x93, 0xb9, 0x8b, + 0xfe, 0xe1, 0x67, 0xdc, 0x98, 0xa9, 0x57, 0xfd, 0x87, 0xb0, 0x6c, 0xfb, 0xce, 0x11, 0xf1, 0x0d, + 0x8b, 0x7a, 0xfb, 0xce, 0x50, 0x5e, 0x81, 0xd5, 0xd4, 0x68, 0x94, 0x03, 0x71, 0x45, 0x08, 0x68, + 0x1c, 0xff, 0x13, 0x5c, 0xf2, 0xd5, 0x67, 0x50, 0x49, 0x6e, 0x75, 0x76, 0x2f, 0x05, 0xce, 0x2f, + 0x12, 0x19, 0x76, 0xf2, 0x20, 0x15, 0x97, 0x18, 0x45, 0x04, 0x9d, 0x6f, 0x43, 0x6e, 0x44, 0x6d, + 0xa1, 0x67, 0xb9, 0x79, 0x9d, 0xc5, 0x1a, 0xff, 0x7c, 0xba, 0x59, 0xa6, 0x41, 0x7d, 0xcb, 0x71, + 0xc9, 0x53, 0x6a, 0x13, 0xcc, 0x01, 0xea, 0x11, 0xe4, 0x98, 0xcf, 0x41, 0x5f, 0x84, 0x5c, 0xb3, + 0xdd, 0x69, 0x29, 0x0b, 0xd5, 0x6b, 0xc7, 0x27, 0xb5, 0x65, 0x6e, 0x12, 0xc6, 0x60, 0x7b, 0x17, + 0x6d, 0x42, 0xe1, 0x59, 0x77, 0x67, 0xf7, 0x29, 0xdb, 0x5e, 0xd7, 0x8f, 0x4f, 0x6a, 0xab, 0x31, + 0x5b, 0x18, 0x0d, 0xbd, 0x0e, 0xf9, 0xc1, 0xd3, 0xde, 0x56, 0x5f, 0xc9, 0x56, 0xd1, 0xf1, 0x49, + 0x6d, 0x25, 0xe6, 0xf3, 0x31, 0x57, 0xaf, 0xc9, 0x55, 0x2d, 0xc5, 0x74, 0xf5, 0x07, 0x19, 0x28, + 0x27, 0x0e, 0x1c, 0xdb, 0x98, 0x2d, 0x7d, 0xab, 0xb1, 0xbb, 0x33, 0x50, 0x16, 0x12, 0x1b, 0x33, + 0x01, 0x69, 0x91, 0x7d, 0x73, 0xe2, 0x32, 0x3f, 0x07, 0x5a, 0xb7, 0xd3, 0x6f, 0xf7, 0x07, 0x7a, + 0x67, 0xa0, 0x64, 0xaa, 0xeb, 0xc7, 0x27, 0xb5, 0xb5, 0x79, 0xf0, 0xd6, 0xc4, 0x75, 0xd9, 0xd6, + 0xd4, 0x1a, 0xda, 0x36, 0xdf, 0xeb, 0xb3, 0xad, 0x99, 0x40, 0x69, 0xa6, 0x75, 0x40, 0x6c, 0xf4, + 0x2e, 0x94, 0x5a, 0xfa, 0x8e, 0xfe, 0xb8, 0xc1, 0xbd, 0x7b, 0xf5, 0xf5, 0xe3, 0x93, 0xda, 0xcd, + 0x57, 0x7b, 0x77, 0xc9, 0xd0, 0x0c, 0x89, 0x3d, 0xb7, 0x45, 0x13, 0x10, 0xf5, 0xbf, 0xb2, 0xb0, + 0x8c, 0x59, 0xb2, 0xec, 0x87, 0x3d, 0xea, 0x3a, 0xd6, 0x14, 0xf5, 0xa0, 0x64, 0x51, 0xcf, 0x76, + 0x12, 0x7e, 0xe2, 0xc1, 0x05, 0x01, 0xd3, 0x4c, 0x2a, 0x6a, 0x69, 0x91, 0x24, 0x9e, 0x29, 0x41, + 0x77, 0x21, 0x6f, 0x13, 0xd7, 0x9c, 0xca, 0xc8, 0xed, 0x66, 0x5d, 0xa4, 0xe3, 0xf5, 0x28, 0x1d, + 0xaf, 0xb7, 0x64, 0x3a, 0x8e, 0x05, 0x8e, 0x67, 0x28, 0xe6, 0x4b, 0xc3, 0x0c, 0x43, 0x32, 0x1a, + 0x87, 0x22, 0x6c, 0xcb, 0xe1, 0xf2, 0xc8, 0x7c, 0xd9, 0x90, 0x24, 0x74, 0x1f, 0x0a, 0x2f, 0x1c, + 0xcf, 0xa6, 0x2f, 0x64, 0x64, 0x76, 0x89, 0x52, 0x09, 0x54, 0x8f, 0x59, 0x48, 0x32, 0x37, 0x4c, + 0xb6, 0x87, 0x3a, 0xdd, 0x8e, 0x1e, 0xed, 0x21, 0xc9, 0xef, 0x7a, 0x1d, 0xea, 0xb1, 0xf3, 0x0f, + 0xdd, 0x8e, 0xb1, 0xd5, 0x68, 0xef, 0xec, 0x62, 0xb6, 0x8f, 0xd6, 0x8e, 0x4f, 0x6a, 0x4a, 0x0c, + 0xd9, 0x32, 0x1d, 0x97, 0xa5, 0x0a, 0x37, 0x61, 0xb1, 0xd1, 0xf9, 0x8e, 0x92, 0xad, 0x2a, 0xc7, + 0x27, 0xb5, 0x4a, 0xcc, 0x6e, 0x78, 0xd3, 0x99, 0xdd, 0xe7, 0xfb, 0x55, 0xff, 0x6e, 0x11, 0x2a, + 0xbb, 0x63, 0xdb, 0x0c, 0x89, 0x38, 0x67, 0xa8, 0x06, 0xe5, 0xb1, 0xe9, 0x9b, 0xae, 0x4b, 0x5c, + 0x27, 0x18, 0xc9, 0x42, 0x43, 0x92, 0x84, 0xde, 0xfb, 0xac, 0x66, 0x6c, 0x16, 0xd9, 0xd9, 0xf9, + 0xde, 0xbf, 0x6e, 0x66, 0x22, 0x83, 0xee, 0xc2, 0xca, 0xbe, 0x18, 0xad, 0x61, 0x5a, 0x7c, 0x61, + 0x17, 0xf9, 0xc2, 0xd6, 0xd3, 0x16, 0x36, 0x39, 0xac, 0xba, 0x9c, 0x64, 0x83, 0x4b, 0xe1, 0xe5, + 0xfd, 0x64, 0x13, 0x3d, 0x84, 0xa5, 0x11, 0xf5, 0x9c, 0x90, 0xfa, 0x57, 0xaf, 0x42, 0x84, 0x44, + 0x77, 0xe0, 0x1a, 0x5b, 0xdc, 0x68, 0x3c, 0x9c, 0xcd, 0xaf, 0xf3, 0x2c, 0x5e, 0x1d, 0x99, 0x2f, + 0x65, 0x87, 0x98, 0x91, 0x51, 0x13, 0xf2, 0xd4, 0x67, 0xf1, 0x62, 0x81, 0x0f, 0xf7, 0xdd, 0x2b, + 0x87, 0x2b, 0x1a, 0x5d, 0x26, 0x83, 0x85, 0xa8, 0xfa, 0x75, 0x58, 0x3e, 0x37, 0x09, 0x16, 0x26, + 0xf5, 0x1a, 0xbb, 0x7d, 0x5d, 0x59, 0x40, 0x15, 0x28, 0x6a, 0xdd, 0xce, 0xa0, 0xdd, 0xd9, 0x65, + 0x71, 0x5e, 0x05, 0x8a, 0xb8, 0xbb, 0xb3, 0xd3, 0x6c, 0x68, 0x4f, 0x94, 0xac, 0x5a, 0x87, 0x72, + 0x42, 0x1b, 0x5a, 0x01, 0xe8, 0x0f, 0xba, 0x3d, 0x63, 0xab, 0x8d, 0xfb, 0x03, 0x11, 0x25, 0xf6, + 0x07, 0x0d, 0x3c, 0x90, 0x84, 0x8c, 0xfa, 0x1f, 0xd9, 0x68, 0x45, 0x65, 0x60, 0xd8, 0x3c, 0x1f, + 0x18, 0x5e, 0x32, 0x78, 0x19, 0x1a, 0xce, 0x1a, 0x71, 0x80, 0xf8, 0x1e, 0x00, 0xdf, 0x38, 0xc4, + 0x36, 0xcc, 0x50, 0x2e, 0x7c, 0xf5, 0x15, 0x23, 0x0f, 0xa2, 0x7a, 0x17, 0x2e, 0x49, 0x74, 0x23, + 0x44, 0xdf, 0x80, 0x8a, 0x45, 0x47, 0x63, 0x97, 0x48, 0xe1, 0xc5, 0x2b, 0x85, 0xcb, 0x31, 0xbe, + 0x11, 0x26, 0x43, 0xd3, 0xdc, 0xf9, 0xe0, 0xf9, 0xd7, 0x32, 0x91, 0x65, 0x52, 0xa2, 0xd1, 0x0a, + 0x14, 0x77, 0x7b, 0xad, 0xc6, 0xa0, 0xdd, 0x79, 0xac, 0x64, 0x10, 0x40, 0x81, 0x9b, 0xba, 0xa5, + 0x64, 0x59, 0x14, 0xad, 0x75, 0x9f, 0xf6, 0x76, 0x74, 0xee, 0xb1, 0xd0, 0x1a, 0x28, 0x91, 0xb1, + 0x0d, 0x6e, 0x48, 0xbd, 0xa5, 0xe4, 0xd0, 0x75, 0x58, 0x8d, 0xa9, 0x52, 0x32, 0x8f, 0x6e, 0x00, + 0x8a, 0x89, 0x33, 0x15, 0x05, 0xf5, 0x97, 0x61, 0x55, 0xa3, 0x5e, 0x68, 0x3a, 0x5e, 0x9c, 0x61, + 0x3c, 0x60, 0x93, 0x96, 0x24, 0xc3, 0x91, 0x75, 0xa2, 0xe6, 0xea, 0xd9, 0xe9, 0x66, 0x39, 0x86, + 0xb6, 0x5b, 0x3c, 0x54, 0x92, 0x0d, 0x9b, 0x9d, 0xdf, 0xb1, 0x63, 0x73, 0xe3, 0xe6, 0x9b, 0x4b, + 0x67, 0xa7, 0x9b, 0x8b, 0xbd, 0x76, 0x0b, 0x33, 0x1a, 0xfa, 0x22, 0x94, 0xc8, 0x4b, 0x27, 0x34, + 0x2c, 0x76, 0x2f, 0x31, 0x03, 0xe6, 0x71, 0x91, 0x11, 0x34, 0x76, 0x0d, 0x35, 0x01, 0x7a, 0xd4, + 0x0f, 0x65, 0xcf, 0x5f, 0x85, 0xfc, 0x98, 0xfa, 0xbc, 0xb2, 0x71, 0x61, 0xbd, 0x8d, 0xc1, 0xc5, + 0x46, 0xc5, 0x02, 0xac, 0xfe, 0xee, 0x22, 0xc0, 0xc0, 0x0c, 0x0e, 0xa5, 0x92, 0x47, 0x50, 0x8a, + 0x6b, 0x97, 0xb2, 0x44, 0x72, 0xe9, 0x6a, 0xc7, 0x60, 0xf4, 0x30, 0xda, 0x6c, 0x22, 0x77, 0x4a, + 0x4d, 0x71, 0xa3, 0x8e, 0xd2, 0xd2, 0x8f, 0xf3, 0x09, 0x12, 0xbb, 0xe6, 0x89, 0xef, 0xcb, 0x95, + 0x67, 0x3f, 0x91, 0xc6, 0xaf, 0x05, 0x61, 0x34, 0x19, 0x7d, 0xa7, 0x16, 0x85, 0xe6, 0x56, 0x64, + 0x7b, 0x01, 0xcf, 0xe4, 0xd0, 0x87, 0x50, 0x66, 0xf3, 0x36, 0x02, 0xce, 0x93, 0x81, 0xf7, 0x85, + 0xa6, 0x12, 0x1a, 0x30, 0x8c, 0x67, 0x56, 0x7e, 0x1d, 0xc0, 0x1c, 0x8f, 0x5d, 0x87, 0xd8, 0xc6, + 0xde, 0x94, 0x47, 0xda, 0x25, 0x5c, 0x92, 0x94, 0xe6, 0x94, 0x1d, 0x97, 0x88, 0x6d, 0x86, 0x3c, + 0x7a, 0xbe, 0xc2, 0x80, 0x12, 0xdd, 0x08, 0x9b, 0x0a, 0xac, 0xf8, 0x13, 0x8f, 0x19, 0x54, 0x8e, + 0x4e, 0xfd, 0x93, 0x2c, 0xbc, 0xd6, 0x21, 0xe1, 0x0b, 0xea, 0x1f, 0x36, 0xc2, 0xd0, 0xb4, 0x0e, + 0x46, 0xc4, 0x93, 0xcb, 0x97, 0x48, 0x68, 0x32, 0xe7, 0x12, 0x9a, 0x75, 0x58, 0x32, 0x5d, 0xc7, + 0x0c, 0x88, 0x08, 0xde, 0x4a, 0x38, 0x6a, 0xb2, 0xb4, 0x8b, 0x25, 0x71, 0x24, 0x08, 0x88, 0xa8, + 0xba, 0xb0, 0x81, 0x47, 0x04, 0xf4, 0x5d, 0xb8, 0x21, 0xc3, 0x34, 0x33, 0xee, 0x8a, 0x25, 0x14, + 0x51, 0xf9, 0x56, 0x4f, 0xcd, 0x2a, 0xd3, 0x07, 0x27, 0xe3, 0xb8, 0x19, 0xb9, 0x3b, 0x0e, 0x65, + 0x54, 0xb8, 0x66, 0xa7, 0xb0, 0xaa, 0x8f, 0xe1, 0xe6, 0x85, 0x22, 0x9f, 0xab, 0xaa, 0xf3, 0x8f, + 0x59, 0x80, 0x76, 0xaf, 0xf1, 0x54, 0x1a, 0xa9, 0x05, 0x85, 0x7d, 0x73, 0xe4, 0xb8, 0xd3, 0xcb, + 0x3c, 0xe0, 0x0c, 0x5f, 0x6f, 0x08, 0x73, 0x6c, 0x71, 0x19, 0x2c, 0x65, 0x79, 0x4e, 0x39, 0xd9, + 0xf3, 0x48, 0x18, 0xe7, 0x94, 0xbc, 0xc5, 0x86, 0xe1, 0x9b, 0x5e, 0xbc, 0x75, 0x45, 0x83, 0x2d, + 0x00, 0x0b, 0x79, 0x5e, 0x98, 0xd3, 0xc8, 0x6d, 0xc9, 0x26, 0xda, 0xe6, 0xb5, 0x53, 0xe2, 0x1f, + 0x11, 0x7b, 0x3d, 0xcf, 0x8d, 0x7a, 0xd5, 0x78, 0xb0, 0x84, 0x0b, 0xdb, 0xc5, 0xd2, 0xd5, 0x0f, + 0x78, 0xc8, 0x34, 0x63, 0x7d, 0x2e, 0x1b, 0xdd, 0x83, 0xe5, 0x73, 0xf3, 0x7c, 0x25, 0x99, 0x6f, + 0xf7, 0x9e, 0x7d, 0x55, 0xc9, 0xc9, 0x5f, 0x5f, 0x57, 0x0a, 0xea, 0xdf, 0x2c, 0x0a, 0x47, 0x23, + 0xad, 0x9a, 0xfe, 0x66, 0x50, 0xe4, 0xbb, 0xdb, 0xa2, 0xae, 0x74, 0x00, 0x6f, 0x5f, 0xee, 0x7f, + 0x58, 0x4e, 0xc7, 0xe1, 0x38, 0x16, 0x44, 0x9b, 0x50, 0x16, 0xbb, 0xd8, 0x60, 0x07, 0x8e, 0x9b, + 0x75, 0x19, 0x83, 0x20, 0x31, 0x49, 0x74, 0x1b, 0x56, 0x78, 0xf1, 0x27, 0x38, 0x20, 0xb6, 0xc0, + 0xe4, 0x38, 0x66, 0x39, 0xa6, 0x72, 0xd8, 0x53, 0xa8, 0x48, 0x82, 0xc1, 0xe3, 0xf9, 0x3c, 0x1f, + 0xd0, 0x9d, 0xab, 0x06, 0x24, 0x44, 0x78, 0x98, 0x5f, 0x1e, 0xcf, 0x1a, 0xea, 0xcf, 0x43, 0x31, + 0x1a, 0x2c, 0x5a, 0x87, 0xc5, 0x81, 0xd6, 0x53, 0x16, 0xaa, 0xab, 0xc7, 0x27, 0xb5, 0x72, 0x44, + 0x1e, 0x68, 0x3d, 0xc6, 0xd9, 0x6d, 0xf5, 0x94, 0xcc, 0x79, 0xce, 0x6e, 0xab, 0x87, 0xaa, 0x90, + 0xeb, 0x6b, 0x83, 0x5e, 0x14, 0x9f, 0x45, 0x2c, 0x46, 0xab, 0xe6, 0x58, 0x7c, 0xa6, 0xee, 0x43, + 0x39, 0xd1, 0x3b, 0x7a, 0x13, 0x96, 0xda, 0x9d, 0xc7, 0x58, 0xef, 0xf7, 0x95, 0x85, 0xea, 0x8d, + 0xe3, 0x93, 0x1a, 0x4a, 0x70, 0xdb, 0xde, 0x90, 0xad, 0x1d, 0x7a, 0x1d, 0x72, 0xdb, 0x5d, 0x76, + 0xef, 0x8b, 0xe4, 0x22, 0x81, 0xd8, 0xa6, 0x41, 0x58, 0xbd, 0x2e, 0x03, 0xbf, 0xa4, 0x62, 0xf5, + 0xf7, 0x32, 0x50, 0x10, 0x07, 0x2d, 0x75, 0x11, 0x1b, 0xb0, 0x14, 0x95, 0x10, 0x44, 0xe2, 0xf7, + 0xf6, 0xc5, 0x49, 0x5a, 0x5d, 0xe6, 0x54, 0x62, 0x6b, 0x46, 0x72, 0xd5, 0xf7, 0xa1, 0x92, 0x64, + 0x7c, 0xae, 0x8d, 0xf9, 0x5d, 0x28, 0xb3, 0xbd, 0x1f, 0x25, 0x6b, 0x0f, 0xa0, 0x20, 0x9c, 0x45, + 0x7c, 0x0f, 0x5d, 0x9c, 0x31, 0x4a, 0x24, 0x7a, 0x04, 0x4b, 0x22, 0xcb, 0x8c, 0xea, 0xca, 0x1b, + 0x97, 0x9f, 0x30, 0x1c, 0xc1, 0xd5, 0x0f, 0x21, 0xd7, 0x23, 0xc4, 0x67, 0xb6, 0xf7, 0xa8, 0x4d, + 0x66, 0x57, 0xb7, 0x4c, 0x90, 0x6d, 0xd2, 0x6e, 0xb1, 0x04, 0xd9, 0x26, 0x6d, 0x3b, 0xae, 0x8d, + 0x65, 0x13, 0xb5, 0xb1, 0x01, 0x54, 0x9e, 0x13, 0x67, 0x78, 0x10, 0x12, 0x9b, 0x2b, 0x7a, 0x17, + 0x72, 0x63, 0x12, 0x0f, 0x7e, 0x3d, 0x75, 0xf3, 0x11, 0xe2, 0x63, 0x8e, 0x62, 0x3e, 0xe6, 0x05, + 0x97, 0x96, 0x8f, 0x21, 0xb2, 0xa5, 0xfe, 0x43, 0x16, 0x56, 0xda, 0x41, 0x30, 0x31, 0x3d, 0x2b, + 0x8a, 0xea, 0xbe, 0x79, 0x3e, 0xaa, 0x4b, 0x7d, 0x35, 0x3a, 0x2f, 0x72, 0xbe, 0xe4, 0x27, 0x6f, + 0xd6, 0x6c, 0x7c, 0xb3, 0xaa, 0x3f, 0xce, 0x44, 0x75, 0xbd, 0xdb, 0x09, 0x57, 0x20, 0x72, 0xc4, + 0xa4, 0x26, 0xb2, 0xeb, 0x1d, 0x7a, 0xf4, 0x85, 0x87, 0xde, 0x80, 0x3c, 0xd6, 0x3b, 0xfa, 0x73, + 0x25, 0x23, 0xb6, 0xe7, 0x39, 0x10, 0x26, 0x1e, 0x79, 0xc1, 0x34, 0xf5, 0xf4, 0x4e, 0x8b, 0x45, + 0x61, 0xd9, 0x14, 0x4d, 0x3d, 0xe2, 0xd9, 0x8e, 0x37, 0x44, 0x6f, 0x42, 0xa1, 0xdd, 0xef, 0xef, + 0xf2, 0x14, 0xf2, 0xb5, 0xe3, 0x93, 0xda, 0xf5, 0x73, 0x28, 0x5e, 0xd3, 0xb5, 0x19, 0x88, 0xa5, + 0x40, 0x2c, 0x3e, 0x4b, 0x01, 0xb1, 0xd8, 0x5a, 0x80, 0x70, 0x77, 0xd0, 0x18, 0xe8, 0x4a, 0x3e, + 0x05, 0x84, 0x29, 0xfb, 0x2b, 0x8f, 0xdb, 0xbf, 0x64, 0x41, 0x69, 0x58, 0x16, 0x19, 0x87, 0x8c, + 0x2f, 0xb3, 0xce, 0x01, 0x14, 0xc7, 0xec, 0x97, 0x43, 0xa2, 0x08, 0xea, 0x51, 0xea, 0xbb, 0xe7, + 0x9c, 0x5c, 0x1d, 0x53, 0x97, 0x34, 0xec, 0x91, 0x13, 0x04, 0x0e, 0xf5, 0x04, 0x0d, 0xc7, 0x9a, + 0xaa, 0xff, 0x99, 0x81, 0xeb, 0x29, 0x08, 0x74, 0x0f, 0x72, 0x3e, 0x75, 0xa3, 0x35, 0xbc, 0x75, + 0x51, 0xc9, 0x96, 0x89, 0x62, 0x8e, 0x44, 0x1b, 0x00, 0xe6, 0x24, 0xa4, 0x26, 0xef, 0x9f, 0xaf, + 0x5e, 0x11, 0x27, 0x28, 0xe8, 0x39, 0x14, 0x02, 0x62, 0xf9, 0x24, 0x8a, 0xb3, 0x3f, 0xfc, 0xbf, + 0x8e, 0xbe, 0xde, 0xe7, 0x6a, 0xb0, 0x54, 0x57, 0xad, 0x43, 0x41, 0x50, 0xd8, 0xb6, 0xb7, 0xcd, + 0xd0, 0x94, 0x05, 0x7d, 0xfe, 0x9b, 0xed, 0x26, 0xd3, 0x1d, 0x46, 0xbb, 0xc9, 0x74, 0x87, 0xea, + 0x5f, 0x67, 0x01, 0xf4, 0x97, 0x21, 0xf1, 0x3d, 0xd3, 0xd5, 0x1a, 0x48, 0x4f, 0xdc, 0x0c, 0x62, + 0xb6, 0x5f, 0x4e, 0x7d, 0xc3, 0x88, 0x25, 0xea, 0x5a, 0x23, 0xe5, 0x6e, 0xb8, 0x09, 0x8b, 0x13, + 0x5f, 0x3e, 0x65, 0x8b, 0x18, 0x79, 0x17, 0xef, 0x60, 0x46, 0x43, 0xfa, 0xcc, 0x6d, 0x2d, 0x5e, + 0xfc, 0x60, 0x9d, 0xe8, 0x20, 0xd5, 0x75, 0xb1, 0x93, 0x6f, 0x99, 0x86, 0x45, 0xe4, 0xad, 0x52, + 0x11, 0x27, 0x5f, 0x6b, 0x68, 0xc4, 0x0f, 0x71, 0xc1, 0x32, 0xd9, 0xff, 0x9f, 0xc8, 0xbf, 0xbd, + 0x0b, 0x30, 0x9b, 0x1a, 0xda, 0x80, 0xbc, 0xb6, 0xd5, 0xef, 0xef, 0x28, 0x0b, 0xc2, 0x81, 0xcf, + 0x58, 0x9c, 0xac, 0xfe, 0x45, 0x16, 0x8a, 0x5a, 0x43, 0x5e, 0xb9, 0x1a, 0x28, 0xdc, 0x2b, 0xf1, + 0x67, 0x10, 0xf2, 0x72, 0xec, 0xf8, 0x53, 0xe9, 0x58, 0x2e, 0x49, 0x78, 0x57, 0x98, 0x08, 0x1b, + 0xb5, 0xce, 0x05, 0x10, 0x86, 0x0a, 0x91, 0x46, 0x30, 0x2c, 0x33, 0xf2, 0xf1, 0x1b, 0x97, 0x1b, + 0x4b, 0xa4, 0x2e, 0xb3, 0x76, 0x80, 0xcb, 0x91, 0x12, 0xcd, 0x0c, 0xd0, 0x7b, 0xb0, 0x1a, 0x38, + 0x43, 0xcf, 0xf1, 0x86, 0x46, 0x64, 0x3c, 0xfe, 0x26, 0xd3, 0xbc, 0x76, 0x76, 0xba, 0xb9, 0xdc, + 0x17, 0x2c, 0x69, 0xc3, 0x65, 0x89, 0xd4, 0xb8, 0x29, 0xd1, 0xd7, 0x61, 0x25, 0x21, 0xca, 0xac, + 0x28, 0xcc, 0xae, 0x9c, 0x9d, 0x6e, 0x56, 0x62, 0xc9, 0x27, 0x64, 0x8a, 0x2b, 0xb1, 0xe0, 0x13, + 0xc2, 0x6b, 0x33, 0xfb, 0xd4, 0xb7, 0x88, 0xe1, 0xf3, 0x33, 0xcd, 0x6f, 0xf7, 0x1c, 0x2e, 0x73, + 0x9a, 0x38, 0xe6, 0xea, 0x33, 0xb8, 0xde, 0xf5, 0xad, 0x03, 0x12, 0x84, 0xc2, 0x14, 0xd2, 0x8a, + 0x1f, 0xc2, 0xad, 0xd0, 0x0c, 0x0e, 0x8d, 0x03, 0x27, 0x08, 0xa9, 0x3f, 0x35, 0x7c, 0x12, 0x12, + 0x8f, 0xf1, 0x0d, 0xfe, 0xcc, 0x2b, 0x0b, 0x82, 0x37, 0x19, 0x66, 0x5b, 0x40, 0x70, 0x84, 0xd8, + 0x61, 0x00, 0xb5, 0x0d, 0x15, 0x96, 0xc2, 0xc8, 0xa2, 0x1a, 0x9b, 0x3d, 0xb8, 0x74, 0x68, 0x7c, + 0xe6, 0x6b, 0xaa, 0xe4, 0xd2, 0xa1, 0xf8, 0xa9, 0x7e, 0x1b, 0x94, 0x96, 0x13, 0x8c, 0xcd, 0xd0, + 0x3a, 0x88, 0x2a, 0x9d, 0xa8, 0x05, 0xca, 0x01, 0x31, 0xfd, 0x70, 0x8f, 0x98, 0xa1, 0x31, 0x26, + 0xbe, 0x43, 0xed, 0xab, 0x57, 0x79, 0x35, 0x16, 0xe9, 0x71, 0x09, 0xf5, 0xbf, 0x33, 0x00, 0xd8, + 0xdc, 0x8f, 0xa2, 0xb5, 0xaf, 0xc0, 0xb5, 0xc0, 0x33, 0xc7, 0xc1, 0x01, 0x0d, 0x0d, 0xc7, 0x0b, + 0x89, 0x7f, 0x64, 0xba, 0xb2, 0xb8, 0xa3, 0x44, 0x8c, 0xb6, 0xa4, 0xa3, 0x77, 0x01, 0x1d, 0x12, + 0x32, 0x36, 0xa8, 0x6b, 0x1b, 0x11, 0x53, 0x3c, 0x42, 0xe7, 0xb0, 0xc2, 0x38, 0x5d, 0xd7, 0xee, + 0x47, 0x74, 0xd4, 0x84, 0x0d, 0x36, 0x7d, 0xe2, 0x85, 0xbe, 0x43, 0x02, 0x63, 0x9f, 0xfa, 0x46, + 0xe0, 0xd2, 0x17, 0xc6, 0x3e, 0x75, 0x5d, 0xfa, 0x82, 0xf8, 0x51, 0xdd, 0xac, 0xea, 0xd2, 0xa1, + 0x2e, 0x40, 0x5b, 0xd4, 0xef, 0xbb, 0xf4, 0xc5, 0x56, 0x84, 0x60, 0x21, 0xdd, 0x6c, 0xce, 0xa1, + 0x63, 0x1d, 0x46, 0x21, 0x5d, 0x4c, 0x1d, 0x38, 0xd6, 0x21, 0x7a, 0x13, 0x96, 0x89, 0x4b, 0x78, + 0xf9, 0x44, 0xa0, 0xf2, 0x1c, 0x55, 0x89, 0x88, 0x0c, 0xa4, 0x7e, 0x04, 0x8a, 0xee, 0x59, 0xfe, + 0x74, 0x9c, 0x58, 0xf3, 0x77, 0x01, 0x31, 0x27, 0x69, 0xb8, 0xd4, 0x3a, 0x34, 0x46, 0xa6, 0x67, + 0x0e, 0xd9, 0xb8, 0xc4, 0xeb, 0x9f, 0xc2, 0x38, 0x3b, 0xd4, 0x3a, 0x7c, 0x2a, 0xe9, 0xea, 0x7b, + 0x00, 0xfd, 0xb1, 0x4f, 0x4c, 0xbb, 0xcb, 0xa2, 0x09, 0x66, 0x3a, 0xde, 0x32, 0x6c, 0xf9, 0xb6, + 0x4a, 0x7d, 0x79, 0xd4, 0x15, 0xc1, 0x68, 0xc5, 0x74, 0xf5, 0x67, 0xe1, 0x7a, 0xcf, 0x35, 0x2d, + 0xfe, 0x9d, 0x41, 0x2f, 0x7e, 0xce, 0x42, 0x8f, 0xa0, 0x20, 0xa0, 0x72, 0x25, 0x53, 0x8f, 0xdb, + 0xac, 0xcf, 0xed, 0x05, 0x2c, 0xf1, 0xcd, 0x0a, 0xc0, 0x4c, 0x8f, 0xfa, 0x67, 0x19, 0x28, 0xc5, + 0xfa, 0x51, 0x4d, 0xbc, 0xd2, 0x84, 0xbe, 0xe9, 0x78, 0x32, 0xe3, 0x2f, 0xe1, 0x24, 0x09, 0xb5, + 0xa1, 0x3c, 0x8e, 0xa5, 0x2f, 0x8d, 0xe7, 0x52, 0x46, 0x8d, 0x93, 0xb2, 0xe8, 0x7d, 0x28, 0x45, + 0x8f, 0xd9, 0x91, 0x87, 0xbd, 0xfc, 0xed, 0x7b, 0x06, 0x57, 0xbf, 0x09, 0xf0, 0x2d, 0xea, 0x78, + 0x03, 0x7a, 0x48, 0x3c, 0xfe, 0xfc, 0xca, 0xf2, 0x45, 0x12, 0x59, 0x51, 0xb6, 0x78, 0x19, 0x40, + 0x2c, 0x41, 0xfc, 0x0a, 0x29, 0x9a, 0xea, 0x5f, 0x65, 0xa1, 0x80, 0x29, 0x0d, 0xb5, 0x06, 0xaa, + 0x41, 0x41, 0xfa, 0x09, 0x7e, 0xff, 0x34, 0x4b, 0x67, 0xa7, 0x9b, 0x79, 0xe1, 0x20, 0xf2, 0x16, + 0xf7, 0x0c, 0x09, 0x0f, 0x9e, 0xbd, 0xc8, 0x83, 0xa3, 0x7b, 0x50, 0x91, 0x20, 0xe3, 0xc0, 0x0c, + 0x0e, 0x44, 0xf2, 0xd6, 0x5c, 0x39, 0x3b, 0xdd, 0x04, 0x81, 0xdc, 0x36, 0x83, 0x03, 0x0c, 0x02, + 0xcd, 0x7e, 0x23, 0x1d, 0xca, 0x1f, 0x53, 0xc7, 0x33, 0x42, 0x3e, 0x09, 0x59, 0x68, 0x4c, 0x5d, + 0xc7, 0xd9, 0x54, 0xe5, 0x97, 0x0a, 0xf0, 0xf1, 0x6c, 0xf2, 0x3a, 0x2c, 0xfb, 0x94, 0x86, 0xc2, + 0x6d, 0x39, 0xd4, 0x93, 0x35, 0x8c, 0x5a, 0x6a, 0x69, 0x9b, 0xd2, 0x10, 0x4b, 0x1c, 0xae, 0xf8, + 0x89, 0x16, 0xba, 0x07, 0x6b, 0xae, 0x19, 0x84, 0x06, 0xf7, 0x77, 0xf6, 0x4c, 0x5b, 0x81, 0x1f, + 0x35, 0xc4, 0x78, 0x5b, 0x9c, 0x15, 0x49, 0xa8, 0xff, 0x94, 0x81, 0x32, 0x9b, 0x8c, 0xb3, 0xef, + 0x58, 0x2c, 0xc8, 0xfb, 0xfc, 0xb1, 0xc7, 0x4d, 0x58, 0xb4, 0x02, 0x5f, 0x1a, 0x95, 0x5f, 0xbe, + 0x5a, 0x1f, 0x63, 0x46, 0x43, 0x1f, 0x41, 0x41, 0xd6, 0x52, 0x44, 0xd8, 0xa1, 0x5e, 0x1d, 0x8e, + 0x4a, 0xdb, 0x48, 0x39, 0xbe, 0x97, 0x67, 0xa3, 0x13, 0x97, 0x00, 0x4e, 0x92, 0xd0, 0x0d, 0xc8, + 0x5a, 0xc2, 0x5c, 0xf2, 0x53, 0x18, 0xad, 0x83, 0xb3, 0x96, 0xa7, 0xfe, 0x20, 0x03, 0xcb, 0xb3, + 0x03, 0xcf, 0x76, 0xc0, 0x2d, 0x28, 0x05, 0x93, 0xbd, 0x60, 0x1a, 0x84, 0x64, 0x14, 0x3d, 0x2d, + 0xc7, 0x04, 0xd4, 0x86, 0x92, 0xe9, 0x0e, 0xa9, 0xef, 0x84, 0x07, 0x23, 0x99, 0xa5, 0xa6, 0x87, + 0x0a, 0x49, 0x9d, 0xf5, 0x46, 0x24, 0x82, 0x67, 0xd2, 0xd1, 0xbd, 0x2f, 0xbe, 0x3f, 0xe0, 0xf7, + 0xfe, 0x1b, 0x50, 0x71, 0xcd, 0x11, 0x2f, 0x2e, 0x85, 0xce, 0x48, 0xcc, 0x23, 0x87, 0xcb, 0x92, + 0x36, 0x70, 0x46, 0x44, 0x55, 0xa1, 0x14, 0x2b, 0x43, 0xab, 0x50, 0x6e, 0xe8, 0x7d, 0xe3, 0xfe, + 0x83, 0x47, 0xc6, 0x63, 0xed, 0xa9, 0xb2, 0x20, 0x63, 0xd3, 0x3f, 0xcf, 0xc0, 0xb2, 0x74, 0x47, + 0x32, 0xde, 0x7f, 0x13, 0x96, 0x7c, 0x73, 0x3f, 0x8c, 0x32, 0x92, 0x9c, 0xd8, 0xd5, 0xcc, 0xc3, + 0xb3, 0x8c, 0x84, 0xb1, 0xd2, 0x33, 0x92, 0xc4, 0xc7, 0x0e, 0x8b, 0x97, 0x7e, 0xec, 0x90, 0xfb, + 0xa9, 0x7c, 0xec, 0xa0, 0xfe, 0x0a, 0xc0, 0x96, 0xe3, 0x92, 0x81, 0xa8, 0x43, 0xa5, 0xe5, 0x97, + 0x2c, 0x86, 0x93, 0x75, 0xce, 0x28, 0x86, 0x6b, 0xb7, 0x30, 0xa3, 0x31, 0xd6, 0xd0, 0xb1, 0xe5, + 0x61, 0xe4, 0xac, 0xc7, 0x8c, 0x35, 0x74, 0xec, 0xf8, 0x55, 0x2e, 0x77, 0xd5, 0xab, 0xdc, 0x49, + 0x06, 0x56, 0x65, 0xec, 0x1a, 0xbb, 0xdf, 0x2f, 0x43, 0x49, 0x84, 0xb1, 0xb3, 0x84, 0x8e, 0x3f, + 0xf0, 0x0b, 0x5c, 0xbb, 0x85, 0x8b, 0x82, 0xdd, 0xb6, 0xd1, 0x26, 0x94, 0x25, 0x34, 0xf1, 0xd9, + 0x14, 0x08, 0x52, 0x87, 0x0d, 0xff, 0xab, 0x90, 0xdb, 0x77, 0x5c, 0x22, 0x37, 0x7a, 0xaa, 0x03, + 0x98, 0x19, 0x60, 0x7b, 0x01, 0x73, 0x74, 0xb3, 0x18, 0x15, 0xea, 0xf8, 0xf8, 0x64, 0xda, 0x99, + 0x1c, 0x9f, 0xc8, 0x40, 0xe7, 0xc6, 0x27, 0x70, 0x6c, 0x7c, 0x82, 0x2d, 0xc6, 0x27, 0xa1, 0xc9, + 0xf1, 0x09, 0xd2, 0x4f, 0x65, 0x7c, 0x3b, 0x70, 0xa3, 0xe9, 0x9a, 0xd6, 0xa1, 0xeb, 0x04, 0x21, + 0xb1, 0x93, 0x1e, 0xe3, 0x01, 0x14, 0xce, 0x05, 0x9d, 0x97, 0x55, 0x34, 0x25, 0x52, 0xfd, 0xf7, + 0x0c, 0x54, 0xb6, 0x89, 0xe9, 0x86, 0x07, 0xb3, 0xb2, 0x51, 0x48, 0x82, 0x50, 0x5e, 0x56, 0xfc, + 0x37, 0xfa, 0x1a, 0x14, 0xe3, 0x98, 0xe4, 0xca, 0xb7, 0xb9, 0x18, 0x8a, 0x1e, 0xc2, 0x12, 0x3b, + 0x63, 0x74, 0x12, 0x25, 0x3b, 0x97, 0x3d, 0xfb, 0x48, 0x24, 0xbb, 0x64, 0x7c, 0xc2, 0x83, 0x10, + 0xbe, 0x95, 0xf2, 0x38, 0x6a, 0xa2, 0xff, 0x0f, 0x15, 0xfe, 0x6a, 0x11, 0xc5, 0x5c, 0xf9, 0xab, + 0x74, 0x96, 0xc5, 0xc3, 0xa3, 0x88, 0xb7, 0xfe, 0x38, 0x0b, 0x6b, 0x4f, 0xcd, 0xe9, 0x1e, 0x91, + 0x6e, 0x83, 0xd8, 0x98, 0x58, 0xd4, 0xb7, 0x51, 0x2f, 0xe9, 0x6e, 0x2e, 0x79, 0xc7, 0x4c, 0x13, + 0x4e, 0xf7, 0x3a, 0x51, 0x02, 0x96, 0x4d, 0x24, 0x60, 0x6b, 0x90, 0xf7, 0xa8, 0x67, 0x11, 0xe9, + 0x8b, 0x44, 0x43, 0xfd, 0xed, 0x4c, 0xd2, 0xd7, 0x54, 0xe3, 0x37, 0x46, 0x5e, 0x81, 0xea, 0xd0, + 0x30, 0xee, 0x0e, 0x7d, 0x04, 0xd5, 0xbe, 0xae, 0x61, 0x7d, 0xd0, 0xec, 0x7e, 0xdb, 0xe8, 0x37, + 0x76, 0xfa, 0x8d, 0x07, 0xf7, 0x8c, 0x5e, 0x77, 0xe7, 0x3b, 0xf7, 0x1f, 0xde, 0xfb, 0x9a, 0x92, + 0xa9, 0xd6, 0x8e, 0x4f, 0x6a, 0xb7, 0x3a, 0x0d, 0x6d, 0x47, 0x1c, 0x99, 0x3d, 0xfa, 0xb2, 0x6f, + 0xba, 0x81, 0xf9, 0xe0, 0x5e, 0x8f, 0xba, 0x53, 0x86, 0x41, 0x5f, 0x01, 0xb4, 0xa5, 0xe3, 0x8e, + 0x3e, 0x30, 0x22, 0x87, 0xa6, 0x35, 0x35, 0x25, 0x2b, 0xd2, 0x9a, 0x2d, 0xe2, 0x7b, 0x24, 0x6c, + 0xe8, 0xfd, 0xfb, 0x0f, 0x1e, 0x69, 0x4d, 0x8d, 0x1d, 0x82, 0x4a, 0xf2, 0x76, 0x4b, 0x5e, 0xda, + 0x99, 0x0b, 0x2f, 0xed, 0xd9, 0xdd, 0x9f, 0xbd, 0xe0, 0xee, 0xdf, 0x82, 0x35, 0xcb, 0xa7, 0x41, + 0x60, 0xb0, 0x5c, 0x81, 0xd8, 0x73, 0xd9, 0xc8, 0x17, 0xce, 0x4e, 0x37, 0xaf, 0x69, 0x8c, 0xdf, + 0xe7, 0x6c, 0xa9, 0xfe, 0x9a, 0x95, 0x20, 0xf1, 0x9e, 0xd4, 0xdf, 0x5f, 0x64, 0x61, 0x97, 0x73, + 0xe4, 0xb8, 0x64, 0x48, 0x02, 0xf4, 0x0c, 0x56, 0x2d, 0x9f, 0xd8, 0x2c, 0x09, 0x30, 0xdd, 0xe4, + 0xc7, 0xba, 0xff, 0x2f, 0x35, 0x02, 0x8a, 0x05, 0xeb, 0x5a, 0x2c, 0xd5, 0x1f, 0x13, 0x0b, 0xaf, + 0x58, 0xe7, 0xda, 0xe8, 0x63, 0x58, 0x0d, 0x88, 0xeb, 0x78, 0x93, 0x97, 0x86, 0x45, 0xbd, 0x90, + 0xbc, 0x8c, 0xde, 0xd6, 0xae, 0xd2, 0xdb, 0xd7, 0x77, 0x98, 0x94, 0x26, 0x84, 0x9a, 0xe8, 0xec, + 0x74, 0x73, 0xe5, 0x3c, 0x0d, 0xaf, 0x48, 0xcd, 0xb2, 0x5d, 0xed, 0xc0, 0xca, 0xf9, 0xd1, 0xa0, + 0x35, 0xe9, 0x29, 0xb8, 0xc3, 0x89, 0x3c, 0x01, 0xba, 0x05, 0x45, 0x9f, 0x0c, 0x9d, 0x20, 0xf4, + 0x85, 0x99, 0x19, 0x27, 0xa6, 0x30, 0x3f, 0x21, 0xbe, 0xa5, 0xaa, 0xfe, 0x12, 0xcc, 0xf5, 0xc8, + 0x8e, 0x96, 0xed, 0x04, 0xe6, 0x9e, 0x54, 0x59, 0xc4, 0x51, 0x93, 0xed, 0xd8, 0x49, 0x10, 0x87, + 0x75, 0xfc, 0x37, 0xa3, 0xf1, 0xf8, 0x43, 0x7e, 0x59, 0xc6, 0x23, 0x8c, 0xe8, 0x03, 0xd6, 0x5c, + 0xe2, 0x03, 0xd6, 0x35, 0xc8, 0xbb, 0xe4, 0x88, 0xb8, 0xe2, 0xe6, 0xc7, 0xa2, 0x71, 0xe7, 0x1e, + 0x54, 0xa2, 0x2f, 0x25, 0xf9, 0x37, 0x18, 0x45, 0xc8, 0x0d, 0x1a, 0xfd, 0x27, 0xca, 0x02, 0x02, + 0x28, 0x88, 0x9d, 0x2c, 0xde, 0xfd, 0xb4, 0x6e, 0x67, 0xab, 0xfd, 0x58, 0xc9, 0xde, 0xf9, 0x9d, + 0x1c, 0x94, 0xe2, 0x97, 0x27, 0x76, 0xd3, 0x74, 0xf4, 0xe7, 0xd1, 0x51, 0x88, 0xe9, 0x1d, 0xf2, + 0x02, 0xbd, 0x31, 0xab, 0x59, 0x7d, 0x24, 0x9e, 0xda, 0x63, 0x76, 0x54, 0xaf, 0x7a, 0x0b, 0x8a, + 0x8d, 0x7e, 0xbf, 0xfd, 0xb8, 0xa3, 0xb7, 0x94, 0x4f, 0x33, 0xd5, 0x2f, 0x1c, 0x9f, 0xd4, 0xae, + 0xc5, 0xa0, 0x46, 0x20, 0x36, 0x1f, 0x47, 0x69, 0x9a, 0xde, 0x1b, 0xe8, 0x2d, 0xe5, 0x93, 0xec, + 0x3c, 0x8a, 0xd7, 0x60, 0xf8, 0x47, 0x40, 0xa5, 0x1e, 0xd6, 0x7b, 0x0d, 0xcc, 0x3a, 0xfc, 0x34, + 0x2b, 0x4a, 0x69, 0xb3, 0x1e, 0x7d, 0x32, 0x36, 0x7d, 0xd6, 0xe7, 0x46, 0xf4, 0x55, 0xdd, 0x27, + 0x8b, 0xe2, 0x43, 0x91, 0xd9, 0x33, 0x1a, 0x31, 0xed, 0x29, 0xeb, 0x8d, 0xbf, 0x5f, 0x72, 0x35, + 0x8b, 0x73, 0xbd, 0xf5, 0x99, 0xa7, 0x62, 0x5a, 0x54, 0x58, 0xc2, 0xbb, 0x9d, 0x0e, 0x03, 0x7d, + 0x92, 0x9b, 0x9b, 0x1d, 0x9e, 0x78, 0x2c, 0xbf, 0x46, 0xb7, 0xa1, 0x18, 0x3d, 0x6f, 0x2a, 0x9f, + 0xe6, 0xe6, 0x06, 0xa4, 0x45, 0x6f, 0xb3, 0xbc, 0xc3, 0xed, 0xdd, 0x01, 0xff, 0xe8, 0xef, 0x93, + 0xfc, 0x7c, 0x87, 0x07, 0x93, 0xd0, 0xa6, 0x2f, 0x3c, 0x76, 0x66, 0x65, 0xd5, 0xee, 0xd3, 0xbc, + 0xf0, 0x05, 0x31, 0x46, 0x96, 0xec, 0xde, 0x82, 0x22, 0xd6, 0xbf, 0x25, 0xbe, 0x0f, 0xfc, 0xa4, + 0x30, 0xa7, 0x07, 0x93, 0x8f, 0x89, 0xc5, 0x7a, 0xab, 0x41, 0x01, 0xeb, 0x4f, 0xbb, 0xcf, 0x74, + 0xe5, 0x0f, 0x0a, 0x73, 0x7a, 0x30, 0x19, 0x51, 0xfe, 0x95, 0x54, 0xb1, 0x8b, 0x7b, 0xdb, 0x0d, + 0xbe, 0x28, 0xf3, 0x7a, 0xba, 0xfe, 0xf8, 0xc0, 0xf4, 0x88, 0x3d, 0xfb, 0x9e, 0x26, 0x66, 0xdd, + 0xf9, 0x39, 0x28, 0x46, 0x91, 0x2e, 0xda, 0x80, 0xc2, 0xf3, 0x2e, 0x7e, 0xa2, 0x63, 0x65, 0x41, + 0x58, 0x39, 0xe2, 0x3c, 0x17, 0x39, 0x4a, 0x0d, 0x96, 0x9e, 0x36, 0x3a, 0x8d, 0xc7, 0x3a, 0x8e, + 0x4a, 0xee, 0x11, 0x40, 0x86, 0x6b, 0x55, 0x45, 0x76, 0x10, 0xeb, 0x6c, 0xae, 0x7f, 0xff, 0x47, + 0x1b, 0x0b, 0x3f, 0xfc, 0xd1, 0xc6, 0xc2, 0x27, 0x67, 0x1b, 0x99, 0xef, 0x9f, 0x6d, 0x64, 0xfe, + 0xfe, 0x6c, 0x23, 0xf3, 0x6f, 0x67, 0x1b, 0x99, 0xbd, 0x02, 0xbf, 0x54, 0x1e, 0xfe, 0x6f, 0x00, + 0x00, 0x00, 0xff, 0xff, 0x80, 0x94, 0x1b, 0x9c, 0x7a, 0x32, 0x00, 0x00, +} diff --git a/vendor/github.com/docker/swarmkit/api/watch.pb.go b/vendor/github.com/docker/swarmkit/api/watch.pb.go new file mode 100644 index 0000000000..509793df80 --- /dev/null +++ b/vendor/github.com/docker/swarmkit/api/watch.pb.go @@ -0,0 +1,4602 @@ +// Code generated by protoc-gen-gogo. +// source: github.com/docker/swarmkit/api/watch.proto +// DO NOT EDIT! + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import _ "github.com/docker/swarmkit/protobuf/plugin" + +import github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +import raftselector "github.com/docker/swarmkit/manager/raftselector" +import codes "google.golang.org/grpc/codes" +import status "google.golang.org/grpc/status" +import metadata "google.golang.org/grpc/metadata" +import transport "google.golang.org/grpc/transport" +import rafttime "time" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// WatchActionKind distinguishes between creations, updates, and removals. It +// is structured as a bitmap so multiple kinds of events can be requested with +// a mask. +type WatchActionKind int32 + +const ( + WatchActionKindUnknown WatchActionKind = 0 + WatchActionKindCreate WatchActionKind = 1 + WatchActionKindUpdate WatchActionKind = 2 + WatchActionKindRemove WatchActionKind = 4 +) + +var WatchActionKind_name = map[int32]string{ + 0: "WATCH_ACTION_UNKNOWN", + 1: "WATCH_ACTION_CREATE", + 2: "WATCH_ACTION_UPDATE", + 4: "WATCH_ACTION_REMOVE", +} +var WatchActionKind_value = map[string]int32{ + "WATCH_ACTION_UNKNOWN": 0, + "WATCH_ACTION_CREATE": 1, + "WATCH_ACTION_UPDATE": 2, + "WATCH_ACTION_REMOVE": 4, +} + +func (x WatchActionKind) String() string { + return proto.EnumName(WatchActionKind_name, int32(x)) +} +func (WatchActionKind) EnumDescriptor() ([]byte, []int) { return fileDescriptorWatch, []int{0} } + +type Object struct { + // Types that are valid to be assigned to Object: + // *Object_Node + // *Object_Service + // *Object_Network + // *Object_Task + // *Object_Cluster + // *Object_Secret + // *Object_Resource + // *Object_Extension + // *Object_Config + Object isObject_Object `protobuf_oneof:"Object"` +} + +func (m *Object) Reset() { *m = Object{} } +func (*Object) ProtoMessage() {} +func (*Object) Descriptor() ([]byte, []int) { return fileDescriptorWatch, []int{0} } + +type isObject_Object interface { + isObject_Object() + MarshalTo([]byte) (int, error) + Size() int +} + +type Object_Node struct { + Node *Node `protobuf:"bytes,1,opt,name=node,oneof"` +} +type Object_Service struct { + Service *Service `protobuf:"bytes,2,opt,name=service,oneof"` +} +type Object_Network struct { + Network *Network `protobuf:"bytes,3,opt,name=network,oneof"` +} +type Object_Task struct { + Task *Task `protobuf:"bytes,4,opt,name=task,oneof"` +} +type Object_Cluster struct { + Cluster *Cluster `protobuf:"bytes,5,opt,name=cluster,oneof"` +} +type Object_Secret struct { + Secret *Secret `protobuf:"bytes,6,opt,name=secret,oneof"` +} +type Object_Resource struct { + Resource *Resource `protobuf:"bytes,7,opt,name=resource,oneof"` +} +type Object_Extension struct { + Extension *Extension `protobuf:"bytes,8,opt,name=extension,oneof"` +} +type Object_Config struct { + Config *Config `protobuf:"bytes,9,opt,name=config,oneof"` +} + +func (*Object_Node) isObject_Object() {} +func (*Object_Service) isObject_Object() {} +func (*Object_Network) isObject_Object() {} +func (*Object_Task) isObject_Object() {} +func (*Object_Cluster) isObject_Object() {} +func (*Object_Secret) isObject_Object() {} +func (*Object_Resource) isObject_Object() {} +func (*Object_Extension) isObject_Object() {} +func (*Object_Config) isObject_Object() {} + +func (m *Object) GetObject() isObject_Object { + if m != nil { + return m.Object + } + return nil +} + +func (m *Object) GetNode() *Node { + if x, ok := m.GetObject().(*Object_Node); ok { + return x.Node + } + return nil +} + +func (m *Object) GetService() *Service { + if x, ok := m.GetObject().(*Object_Service); ok { + return x.Service + } + return nil +} + +func (m *Object) GetNetwork() *Network { + if x, ok := m.GetObject().(*Object_Network); ok { + return x.Network + } + return nil +} + +func (m *Object) GetTask() *Task { + if x, ok := m.GetObject().(*Object_Task); ok { + return x.Task + } + return nil +} + +func (m *Object) GetCluster() *Cluster { + if x, ok := m.GetObject().(*Object_Cluster); ok { + return x.Cluster + } + return nil +} + +func (m *Object) GetSecret() *Secret { + if x, ok := m.GetObject().(*Object_Secret); ok { + return x.Secret + } + return nil +} + +func (m *Object) GetResource() *Resource { + if x, ok := m.GetObject().(*Object_Resource); ok { + return x.Resource + } + return nil +} + +func (m *Object) GetExtension() *Extension { + if x, ok := m.GetObject().(*Object_Extension); ok { + return x.Extension + } + return nil +} + +func (m *Object) GetConfig() *Config { + if x, ok := m.GetObject().(*Object_Config); ok { + return x.Config + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Object) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Object_OneofMarshaler, _Object_OneofUnmarshaler, _Object_OneofSizer, []interface{}{ + (*Object_Node)(nil), + (*Object_Service)(nil), + (*Object_Network)(nil), + (*Object_Task)(nil), + (*Object_Cluster)(nil), + (*Object_Secret)(nil), + (*Object_Resource)(nil), + (*Object_Extension)(nil), + (*Object_Config)(nil), + } +} + +func _Object_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Object) + // Object + switch x := m.Object.(type) { + case *Object_Node: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Node); err != nil { + return err + } + case *Object_Service: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Service); err != nil { + return err + } + case *Object_Network: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Network); err != nil { + return err + } + case *Object_Task: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Task); err != nil { + return err + } + case *Object_Cluster: + _ = b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Cluster); err != nil { + return err + } + case *Object_Secret: + _ = b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Secret); err != nil { + return err + } + case *Object_Resource: + _ = b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Resource); err != nil { + return err + } + case *Object_Extension: + _ = b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Extension); err != nil { + return err + } + case *Object_Config: + _ = b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Config); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Object.Object has unexpected type %T", x) + } + return nil +} + +func _Object_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Object) + switch tag { + case 1: // Object.node + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Node) + err := b.DecodeMessage(msg) + m.Object = &Object_Node{msg} + return true, err + case 2: // Object.service + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Service) + err := b.DecodeMessage(msg) + m.Object = &Object_Service{msg} + return true, err + case 3: // Object.network + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Network) + err := b.DecodeMessage(msg) + m.Object = &Object_Network{msg} + return true, err + case 4: // Object.task + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Task) + err := b.DecodeMessage(msg) + m.Object = &Object_Task{msg} + return true, err + case 5: // Object.cluster + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Cluster) + err := b.DecodeMessage(msg) + m.Object = &Object_Cluster{msg} + return true, err + case 6: // Object.secret + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Secret) + err := b.DecodeMessage(msg) + m.Object = &Object_Secret{msg} + return true, err + case 7: // Object.resource + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Resource) + err := b.DecodeMessage(msg) + m.Object = &Object_Resource{msg} + return true, err + case 8: // Object.extension + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Extension) + err := b.DecodeMessage(msg) + m.Object = &Object_Extension{msg} + return true, err + case 9: // Object.config + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Config) + err := b.DecodeMessage(msg) + m.Object = &Object_Config{msg} + return true, err + default: + return false, nil + } +} + +func _Object_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Object) + // Object + switch x := m.Object.(type) { + case *Object_Node: + s := proto.Size(x.Node) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Object_Service: + s := proto.Size(x.Service) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Object_Network: + s := proto.Size(x.Network) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Object_Task: + s := proto.Size(x.Task) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Object_Cluster: + s := proto.Size(x.Cluster) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Object_Secret: + s := proto.Size(x.Secret) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Object_Resource: + s := proto.Size(x.Resource) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Object_Extension: + s := proto.Size(x.Extension) + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Object_Config: + s := proto.Size(x.Config) + n += proto.SizeVarint(9<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// FIXME(aaronl): These messages should ideally be embedded in SelectBy, but +// protoc generates bad code for that. +type SelectBySlot struct { + ServiceID string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + Slot uint64 `protobuf:"varint,2,opt,name=slot,proto3" json:"slot,omitempty"` +} + +func (m *SelectBySlot) Reset() { *m = SelectBySlot{} } +func (*SelectBySlot) ProtoMessage() {} +func (*SelectBySlot) Descriptor() ([]byte, []int) { return fileDescriptorWatch, []int{1} } + +type SelectByCustom struct { + Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + Index string `protobuf:"bytes,2,opt,name=index,proto3" json:"index,omitempty"` + Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *SelectByCustom) Reset() { *m = SelectByCustom{} } +func (*SelectByCustom) ProtoMessage() {} +func (*SelectByCustom) Descriptor() ([]byte, []int) { return fileDescriptorWatch, []int{2} } + +type SelectBy struct { + // TODO(aaronl): Are all of these things we want to expose in + // the API? Exposing them may commit us to maintaining those + // internal indices going forward. + // + // Types that are valid to be assigned to By: + // *SelectBy_ID + // *SelectBy_IDPrefix + // *SelectBy_Name + // *SelectBy_NamePrefix + // *SelectBy_Custom + // *SelectBy_CustomPrefix + // *SelectBy_ServiceID + // *SelectBy_NodeID + // *SelectBy_Slot + // *SelectBy_DesiredState + // *SelectBy_Role + // *SelectBy_Membership + // *SelectBy_ReferencedNetworkID + // *SelectBy_ReferencedSecretID + // *SelectBy_ReferencedConfigID + // *SelectBy_Kind + By isSelectBy_By `protobuf_oneof:"By"` +} + +func (m *SelectBy) Reset() { *m = SelectBy{} } +func (*SelectBy) ProtoMessage() {} +func (*SelectBy) Descriptor() ([]byte, []int) { return fileDescriptorWatch, []int{3} } + +type isSelectBy_By interface { + isSelectBy_By() + MarshalTo([]byte) (int, error) + Size() int +} + +type SelectBy_ID struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3,oneof"` +} +type SelectBy_IDPrefix struct { + IDPrefix string `protobuf:"bytes,2,opt,name=id_prefix,json=idPrefix,proto3,oneof"` +} +type SelectBy_Name struct { + Name string `protobuf:"bytes,3,opt,name=name,proto3,oneof"` +} +type SelectBy_NamePrefix struct { + NamePrefix string `protobuf:"bytes,4,opt,name=name_prefix,json=namePrefix,proto3,oneof"` +} +type SelectBy_Custom struct { + Custom *SelectByCustom `protobuf:"bytes,5,opt,name=custom,oneof"` +} +type SelectBy_CustomPrefix struct { + CustomPrefix *SelectByCustom `protobuf:"bytes,6,opt,name=custom_prefix,json=customPrefix,oneof"` +} +type SelectBy_ServiceID struct { + ServiceID string `protobuf:"bytes,7,opt,name=service_id,json=serviceId,proto3,oneof"` +} +type SelectBy_NodeID struct { + NodeID string `protobuf:"bytes,8,opt,name=node_id,json=nodeId,proto3,oneof"` +} +type SelectBy_Slot struct { + Slot *SelectBySlot `protobuf:"bytes,9,opt,name=slot,oneof"` +} +type SelectBy_DesiredState struct { + DesiredState TaskState `protobuf:"varint,10,opt,name=desired_state,json=desiredState,proto3,enum=docker.swarmkit.v1.TaskState,oneof"` +} +type SelectBy_Role struct { + Role NodeRole `protobuf:"varint,11,opt,name=role,proto3,enum=docker.swarmkit.v1.NodeRole,oneof"` +} +type SelectBy_Membership struct { + Membership NodeSpec_Membership `protobuf:"varint,12,opt,name=membership,proto3,enum=docker.swarmkit.v1.NodeSpec_Membership,oneof"` +} +type SelectBy_ReferencedNetworkID struct { + ReferencedNetworkID string `protobuf:"bytes,13,opt,name=referenced_network_id,json=referencedNetworkId,proto3,oneof"` +} +type SelectBy_ReferencedSecretID struct { + ReferencedSecretID string `protobuf:"bytes,14,opt,name=referenced_secret_id,json=referencedSecretId,proto3,oneof"` +} +type SelectBy_ReferencedConfigID struct { + ReferencedConfigID string `protobuf:"bytes,16,opt,name=referenced_config_id,json=referencedConfigId,proto3,oneof"` +} +type SelectBy_Kind struct { + Kind string `protobuf:"bytes,15,opt,name=kind,proto3,oneof"` +} + +func (*SelectBy_ID) isSelectBy_By() {} +func (*SelectBy_IDPrefix) isSelectBy_By() {} +func (*SelectBy_Name) isSelectBy_By() {} +func (*SelectBy_NamePrefix) isSelectBy_By() {} +func (*SelectBy_Custom) isSelectBy_By() {} +func (*SelectBy_CustomPrefix) isSelectBy_By() {} +func (*SelectBy_ServiceID) isSelectBy_By() {} +func (*SelectBy_NodeID) isSelectBy_By() {} +func (*SelectBy_Slot) isSelectBy_By() {} +func (*SelectBy_DesiredState) isSelectBy_By() {} +func (*SelectBy_Role) isSelectBy_By() {} +func (*SelectBy_Membership) isSelectBy_By() {} +func (*SelectBy_ReferencedNetworkID) isSelectBy_By() {} +func (*SelectBy_ReferencedSecretID) isSelectBy_By() {} +func (*SelectBy_ReferencedConfigID) isSelectBy_By() {} +func (*SelectBy_Kind) isSelectBy_By() {} + +func (m *SelectBy) GetBy() isSelectBy_By { + if m != nil { + return m.By + } + return nil +} + +func (m *SelectBy) GetID() string { + if x, ok := m.GetBy().(*SelectBy_ID); ok { + return x.ID + } + return "" +} + +func (m *SelectBy) GetIDPrefix() string { + if x, ok := m.GetBy().(*SelectBy_IDPrefix); ok { + return x.IDPrefix + } + return "" +} + +func (m *SelectBy) GetName() string { + if x, ok := m.GetBy().(*SelectBy_Name); ok { + return x.Name + } + return "" +} + +func (m *SelectBy) GetNamePrefix() string { + if x, ok := m.GetBy().(*SelectBy_NamePrefix); ok { + return x.NamePrefix + } + return "" +} + +func (m *SelectBy) GetCustom() *SelectByCustom { + if x, ok := m.GetBy().(*SelectBy_Custom); ok { + return x.Custom + } + return nil +} + +func (m *SelectBy) GetCustomPrefix() *SelectByCustom { + if x, ok := m.GetBy().(*SelectBy_CustomPrefix); ok { + return x.CustomPrefix + } + return nil +} + +func (m *SelectBy) GetServiceID() string { + if x, ok := m.GetBy().(*SelectBy_ServiceID); ok { + return x.ServiceID + } + return "" +} + +func (m *SelectBy) GetNodeID() string { + if x, ok := m.GetBy().(*SelectBy_NodeID); ok { + return x.NodeID + } + return "" +} + +func (m *SelectBy) GetSlot() *SelectBySlot { + if x, ok := m.GetBy().(*SelectBy_Slot); ok { + return x.Slot + } + return nil +} + +func (m *SelectBy) GetDesiredState() TaskState { + if x, ok := m.GetBy().(*SelectBy_DesiredState); ok { + return x.DesiredState + } + return TaskStateNew +} + +func (m *SelectBy) GetRole() NodeRole { + if x, ok := m.GetBy().(*SelectBy_Role); ok { + return x.Role + } + return NodeRoleWorker +} + +func (m *SelectBy) GetMembership() NodeSpec_Membership { + if x, ok := m.GetBy().(*SelectBy_Membership); ok { + return x.Membership + } + return NodeMembershipPending +} + +func (m *SelectBy) GetReferencedNetworkID() string { + if x, ok := m.GetBy().(*SelectBy_ReferencedNetworkID); ok { + return x.ReferencedNetworkID + } + return "" +} + +func (m *SelectBy) GetReferencedSecretID() string { + if x, ok := m.GetBy().(*SelectBy_ReferencedSecretID); ok { + return x.ReferencedSecretID + } + return "" +} + +func (m *SelectBy) GetReferencedConfigID() string { + if x, ok := m.GetBy().(*SelectBy_ReferencedConfigID); ok { + return x.ReferencedConfigID + } + return "" +} + +func (m *SelectBy) GetKind() string { + if x, ok := m.GetBy().(*SelectBy_Kind); ok { + return x.Kind + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*SelectBy) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _SelectBy_OneofMarshaler, _SelectBy_OneofUnmarshaler, _SelectBy_OneofSizer, []interface{}{ + (*SelectBy_ID)(nil), + (*SelectBy_IDPrefix)(nil), + (*SelectBy_Name)(nil), + (*SelectBy_NamePrefix)(nil), + (*SelectBy_Custom)(nil), + (*SelectBy_CustomPrefix)(nil), + (*SelectBy_ServiceID)(nil), + (*SelectBy_NodeID)(nil), + (*SelectBy_Slot)(nil), + (*SelectBy_DesiredState)(nil), + (*SelectBy_Role)(nil), + (*SelectBy_Membership)(nil), + (*SelectBy_ReferencedNetworkID)(nil), + (*SelectBy_ReferencedSecretID)(nil), + (*SelectBy_ReferencedConfigID)(nil), + (*SelectBy_Kind)(nil), + } +} + +func _SelectBy_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*SelectBy) + // By + switch x := m.By.(type) { + case *SelectBy_ID: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.ID) + case *SelectBy_IDPrefix: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.IDPrefix) + case *SelectBy_Name: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.Name) + case *SelectBy_NamePrefix: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.NamePrefix) + case *SelectBy_Custom: + _ = b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Custom); err != nil { + return err + } + case *SelectBy_CustomPrefix: + _ = b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.CustomPrefix); err != nil { + return err + } + case *SelectBy_ServiceID: + _ = b.EncodeVarint(7<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.ServiceID) + case *SelectBy_NodeID: + _ = b.EncodeVarint(8<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.NodeID) + case *SelectBy_Slot: + _ = b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Slot); err != nil { + return err + } + case *SelectBy_DesiredState: + _ = b.EncodeVarint(10<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.DesiredState)) + case *SelectBy_Role: + _ = b.EncodeVarint(11<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.Role)) + case *SelectBy_Membership: + _ = b.EncodeVarint(12<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.Membership)) + case *SelectBy_ReferencedNetworkID: + _ = b.EncodeVarint(13<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.ReferencedNetworkID) + case *SelectBy_ReferencedSecretID: + _ = b.EncodeVarint(14<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.ReferencedSecretID) + case *SelectBy_ReferencedConfigID: + _ = b.EncodeVarint(16<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.ReferencedConfigID) + case *SelectBy_Kind: + _ = b.EncodeVarint(15<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.Kind) + case nil: + default: + return fmt.Errorf("SelectBy.By has unexpected type %T", x) + } + return nil +} + +func _SelectBy_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*SelectBy) + switch tag { + case 1: // By.id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_ID{x} + return true, err + case 2: // By.id_prefix + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_IDPrefix{x} + return true, err + case 3: // By.name + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_Name{x} + return true, err + case 4: // By.name_prefix + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_NamePrefix{x} + return true, err + case 5: // By.custom + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SelectByCustom) + err := b.DecodeMessage(msg) + m.By = &SelectBy_Custom{msg} + return true, err + case 6: // By.custom_prefix + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SelectByCustom) + err := b.DecodeMessage(msg) + m.By = &SelectBy_CustomPrefix{msg} + return true, err + case 7: // By.service_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_ServiceID{x} + return true, err + case 8: // By.node_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_NodeID{x} + return true, err + case 9: // By.slot + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SelectBySlot) + err := b.DecodeMessage(msg) + m.By = &SelectBy_Slot{msg} + return true, err + case 10: // By.desired_state + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.By = &SelectBy_DesiredState{TaskState(x)} + return true, err + case 11: // By.role + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.By = &SelectBy_Role{NodeRole(x)} + return true, err + case 12: // By.membership + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.By = &SelectBy_Membership{NodeSpec_Membership(x)} + return true, err + case 13: // By.referenced_network_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_ReferencedNetworkID{x} + return true, err + case 14: // By.referenced_secret_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_ReferencedSecretID{x} + return true, err + case 16: // By.referenced_config_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_ReferencedConfigID{x} + return true, err + case 15: // By.kind + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_Kind{x} + return true, err + default: + return false, nil + } +} + +func _SelectBy_OneofSizer(msg proto.Message) (n int) { + m := msg.(*SelectBy) + // By + switch x := m.By.(type) { + case *SelectBy_ID: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ID))) + n += len(x.ID) + case *SelectBy_IDPrefix: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.IDPrefix))) + n += len(x.IDPrefix) + case *SelectBy_Name: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Name))) + n += len(x.Name) + case *SelectBy_NamePrefix: + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.NamePrefix))) + n += len(x.NamePrefix) + case *SelectBy_Custom: + s := proto.Size(x.Custom) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SelectBy_CustomPrefix: + s := proto.Size(x.CustomPrefix) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SelectBy_ServiceID: + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ServiceID))) + n += len(x.ServiceID) + case *SelectBy_NodeID: + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.NodeID))) + n += len(x.NodeID) + case *SelectBy_Slot: + s := proto.Size(x.Slot) + n += proto.SizeVarint(9<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SelectBy_DesiredState: + n += proto.SizeVarint(10<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.DesiredState)) + case *SelectBy_Role: + n += proto.SizeVarint(11<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Role)) + case *SelectBy_Membership: + n += proto.SizeVarint(12<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Membership)) + case *SelectBy_ReferencedNetworkID: + n += proto.SizeVarint(13<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ReferencedNetworkID))) + n += len(x.ReferencedNetworkID) + case *SelectBy_ReferencedSecretID: + n += proto.SizeVarint(14<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ReferencedSecretID))) + n += len(x.ReferencedSecretID) + case *SelectBy_ReferencedConfigID: + n += proto.SizeVarint(16<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ReferencedConfigID))) + n += len(x.ReferencedConfigID) + case *SelectBy_Kind: + n += proto.SizeVarint(15<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Kind))) + n += len(x.Kind) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type WatchRequest struct { + // Multiple entries are combined using OR logic - i.e. if an event + // matches all of the selectors specified in any single watch entry, + // the event will be sent to the client. + Entries []*WatchRequest_WatchEntry `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"` + // ResumeFrom provides an version to resume the watch from, if non-nil. + // The watch will return changes since this version, and continue to + // return new changes afterwards. Watch will return an error if the + // server has compacted its log and no longer has complete history to + // this point. + ResumeFrom *Version `protobuf:"bytes,2,opt,name=resume_from,json=resumeFrom" json:"resume_from,omitempty"` + // IncludeOldObject causes WatchMessages to include a copy of the + // previous version of the object on updates. Note that only live + // changes will include the old object (not historical changes + // retrieved using ResumeFrom). + IncludeOldObject bool `protobuf:"varint,3,opt,name=include_old_object,json=includeOldObject,proto3" json:"include_old_object,omitempty"` +} + +func (m *WatchRequest) Reset() { *m = WatchRequest{} } +func (*WatchRequest) ProtoMessage() {} +func (*WatchRequest) Descriptor() ([]byte, []int) { return fileDescriptorWatch, []int{4} } + +type WatchRequest_WatchEntry struct { + // Kind can contain a builtin type such as "node", "secret", etc. or + // the kind specified by a custom-defined object. + Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + // Action (create/update/delete) + // This is a bitmask, so multiple actions may be OR'd together + Action WatchActionKind `protobuf:"varint,2,opt,name=action,proto3,enum=docker.swarmkit.v1.WatchActionKind" json:"action,omitempty"` + // Filters are combined using AND logic - an event must match + // all of them to pass the filter. + Filters []*SelectBy `protobuf:"bytes,3,rep,name=filters" json:"filters,omitempty"` +} + +func (m *WatchRequest_WatchEntry) Reset() { *m = WatchRequest_WatchEntry{} } +func (*WatchRequest_WatchEntry) ProtoMessage() {} +func (*WatchRequest_WatchEntry) Descriptor() ([]byte, []int) { return fileDescriptorWatch, []int{4, 0} } + +// WatchMessage is the type of the stream that's returned to the client by +// Watch. Note that the first item of this stream will always be a WatchMessage +// with a nil Object, to signal that the stream has started. +type WatchMessage struct { + Events []*WatchMessage_Event `protobuf:"bytes,1,rep,name=events" json:"events,omitempty"` + // Index versions this change to the data store. It can be used to + // resume the watch from this point. + Version *Version `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` +} + +func (m *WatchMessage) Reset() { *m = WatchMessage{} } +func (*WatchMessage) ProtoMessage() {} +func (*WatchMessage) Descriptor() ([]byte, []int) { return fileDescriptorWatch, []int{5} } + +type WatchMessage_Event struct { + // Action (create/update/delete) + // Note that WatchMessage does not expose "commit" events that + // mark transaction boundaries. + Action WatchActionKind `protobuf:"varint,1,opt,name=action,proto3,enum=docker.swarmkit.v1.WatchActionKind" json:"action,omitempty"` + // Matched object + Object *Object `protobuf:"bytes,2,opt,name=object" json:"object,omitempty"` + // For updates, OldObject will optionally be included in the + // watch message, containing the previous version of the + // object, if IncludeOldObject was set in WatchRequest. + OldObject *Object `protobuf:"bytes,3,opt,name=old_object,json=oldObject" json:"old_object,omitempty"` +} + +func (m *WatchMessage_Event) Reset() { *m = WatchMessage_Event{} } +func (*WatchMessage_Event) ProtoMessage() {} +func (*WatchMessage_Event) Descriptor() ([]byte, []int) { return fileDescriptorWatch, []int{5, 0} } + +func init() { + proto.RegisterType((*Object)(nil), "docker.swarmkit.v1.Object") + proto.RegisterType((*SelectBySlot)(nil), "docker.swarmkit.v1.SelectBySlot") + proto.RegisterType((*SelectByCustom)(nil), "docker.swarmkit.v1.SelectByCustom") + proto.RegisterType((*SelectBy)(nil), "docker.swarmkit.v1.SelectBy") + proto.RegisterType((*WatchRequest)(nil), "docker.swarmkit.v1.WatchRequest") + proto.RegisterType((*WatchRequest_WatchEntry)(nil), "docker.swarmkit.v1.WatchRequest.WatchEntry") + proto.RegisterType((*WatchMessage)(nil), "docker.swarmkit.v1.WatchMessage") + proto.RegisterType((*WatchMessage_Event)(nil), "docker.swarmkit.v1.WatchMessage.Event") + proto.RegisterEnum("docker.swarmkit.v1.WatchActionKind", WatchActionKind_name, WatchActionKind_value) +} + +type authenticatedWrapperWatchServer struct { + local WatchServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperWatchServer(local WatchServer, authorize func(context.Context, []string) error) WatchServer { + return &authenticatedWrapperWatchServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperWatchServer) Watch(r *WatchRequest, stream Watch_WatchServer) error { + + if err := p.authorize(stream.Context(), []string{"swarm-manager"}); err != nil { + return err + } + return p.local.Watch(r, stream) +} + +func (m *Object) Copy() *Object { + if m == nil { + return nil + } + o := &Object{} + o.CopyFrom(m) + return o +} + +func (m *Object) CopyFrom(src interface{}) { + + o := src.(*Object) + *m = *o + if o.Object != nil { + switch o.Object.(type) { + case *Object_Node: + v := Object_Node{ + Node: &Node{}, + } + github_com_docker_swarmkit_api_deepcopy.Copy(v.Node, o.GetNode()) + m.Object = &v + case *Object_Service: + v := Object_Service{ + Service: &Service{}, + } + github_com_docker_swarmkit_api_deepcopy.Copy(v.Service, o.GetService()) + m.Object = &v + case *Object_Network: + v := Object_Network{ + Network: &Network{}, + } + github_com_docker_swarmkit_api_deepcopy.Copy(v.Network, o.GetNetwork()) + m.Object = &v + case *Object_Task: + v := Object_Task{ + Task: &Task{}, + } + github_com_docker_swarmkit_api_deepcopy.Copy(v.Task, o.GetTask()) + m.Object = &v + case *Object_Cluster: + v := Object_Cluster{ + Cluster: &Cluster{}, + } + github_com_docker_swarmkit_api_deepcopy.Copy(v.Cluster, o.GetCluster()) + m.Object = &v + case *Object_Secret: + v := Object_Secret{ + Secret: &Secret{}, + } + github_com_docker_swarmkit_api_deepcopy.Copy(v.Secret, o.GetSecret()) + m.Object = &v + case *Object_Resource: + v := Object_Resource{ + Resource: &Resource{}, + } + github_com_docker_swarmkit_api_deepcopy.Copy(v.Resource, o.GetResource()) + m.Object = &v + case *Object_Extension: + v := Object_Extension{ + Extension: &Extension{}, + } + github_com_docker_swarmkit_api_deepcopy.Copy(v.Extension, o.GetExtension()) + m.Object = &v + case *Object_Config: + v := Object_Config{ + Config: &Config{}, + } + github_com_docker_swarmkit_api_deepcopy.Copy(v.Config, o.GetConfig()) + m.Object = &v + } + } + +} + +func (m *SelectBySlot) Copy() *SelectBySlot { + if m == nil { + return nil + } + o := &SelectBySlot{} + o.CopyFrom(m) + return o +} + +func (m *SelectBySlot) CopyFrom(src interface{}) { + + o := src.(*SelectBySlot) + *m = *o +} + +func (m *SelectByCustom) Copy() *SelectByCustom { + if m == nil { + return nil + } + o := &SelectByCustom{} + o.CopyFrom(m) + return o +} + +func (m *SelectByCustom) CopyFrom(src interface{}) { + + o := src.(*SelectByCustom) + *m = *o +} + +func (m *SelectBy) Copy() *SelectBy { + if m == nil { + return nil + } + o := &SelectBy{} + o.CopyFrom(m) + return o +} + +func (m *SelectBy) CopyFrom(src interface{}) { + + o := src.(*SelectBy) + *m = *o + if o.By != nil { + switch o.By.(type) { + case *SelectBy_ID: + v := SelectBy_ID{ + ID: o.GetID(), + } + m.By = &v + case *SelectBy_IDPrefix: + v := SelectBy_IDPrefix{ + IDPrefix: o.GetIDPrefix(), + } + m.By = &v + case *SelectBy_Name: + v := SelectBy_Name{ + Name: o.GetName(), + } + m.By = &v + case *SelectBy_NamePrefix: + v := SelectBy_NamePrefix{ + NamePrefix: o.GetNamePrefix(), + } + m.By = &v + case *SelectBy_Custom: + v := SelectBy_Custom{ + Custom: &SelectByCustom{}, + } + github_com_docker_swarmkit_api_deepcopy.Copy(v.Custom, o.GetCustom()) + m.By = &v + case *SelectBy_CustomPrefix: + v := SelectBy_CustomPrefix{ + CustomPrefix: &SelectByCustom{}, + } + github_com_docker_swarmkit_api_deepcopy.Copy(v.CustomPrefix, o.GetCustomPrefix()) + m.By = &v + case *SelectBy_ServiceID: + v := SelectBy_ServiceID{ + ServiceID: o.GetServiceID(), + } + m.By = &v + case *SelectBy_NodeID: + v := SelectBy_NodeID{ + NodeID: o.GetNodeID(), + } + m.By = &v + case *SelectBy_Slot: + v := SelectBy_Slot{ + Slot: &SelectBySlot{}, + } + github_com_docker_swarmkit_api_deepcopy.Copy(v.Slot, o.GetSlot()) + m.By = &v + case *SelectBy_DesiredState: + v := SelectBy_DesiredState{ + DesiredState: o.GetDesiredState(), + } + m.By = &v + case *SelectBy_Role: + v := SelectBy_Role{ + Role: o.GetRole(), + } + m.By = &v + case *SelectBy_Membership: + v := SelectBy_Membership{ + Membership: o.GetMembership(), + } + m.By = &v + case *SelectBy_ReferencedNetworkID: + v := SelectBy_ReferencedNetworkID{ + ReferencedNetworkID: o.GetReferencedNetworkID(), + } + m.By = &v + case *SelectBy_ReferencedSecretID: + v := SelectBy_ReferencedSecretID{ + ReferencedSecretID: o.GetReferencedSecretID(), + } + m.By = &v + case *SelectBy_ReferencedConfigID: + v := SelectBy_ReferencedConfigID{ + ReferencedConfigID: o.GetReferencedConfigID(), + } + m.By = &v + case *SelectBy_Kind: + v := SelectBy_Kind{ + Kind: o.GetKind(), + } + m.By = &v + } + } + +} + +func (m *WatchRequest) Copy() *WatchRequest { + if m == nil { + return nil + } + o := &WatchRequest{} + o.CopyFrom(m) + return o +} + +func (m *WatchRequest) CopyFrom(src interface{}) { + + o := src.(*WatchRequest) + *m = *o + if o.Entries != nil { + m.Entries = make([]*WatchRequest_WatchEntry, len(o.Entries)) + for i := range m.Entries { + m.Entries[i] = &WatchRequest_WatchEntry{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Entries[i], o.Entries[i]) + } + } + + if o.ResumeFrom != nil { + m.ResumeFrom = &Version{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.ResumeFrom, o.ResumeFrom) + } +} + +func (m *WatchRequest_WatchEntry) Copy() *WatchRequest_WatchEntry { + if m == nil { + return nil + } + o := &WatchRequest_WatchEntry{} + o.CopyFrom(m) + return o +} + +func (m *WatchRequest_WatchEntry) CopyFrom(src interface{}) { + + o := src.(*WatchRequest_WatchEntry) + *m = *o + if o.Filters != nil { + m.Filters = make([]*SelectBy, len(o.Filters)) + for i := range m.Filters { + m.Filters[i] = &SelectBy{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Filters[i], o.Filters[i]) + } + } + +} + +func (m *WatchMessage) Copy() *WatchMessage { + if m == nil { + return nil + } + o := &WatchMessage{} + o.CopyFrom(m) + return o +} + +func (m *WatchMessage) CopyFrom(src interface{}) { + + o := src.(*WatchMessage) + *m = *o + if o.Events != nil { + m.Events = make([]*WatchMessage_Event, len(o.Events)) + for i := range m.Events { + m.Events[i] = &WatchMessage_Event{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Events[i], o.Events[i]) + } + } + + if o.Version != nil { + m.Version = &Version{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Version, o.Version) + } +} + +func (m *WatchMessage_Event) Copy() *WatchMessage_Event { + if m == nil { + return nil + } + o := &WatchMessage_Event{} + o.CopyFrom(m) + return o +} + +func (m *WatchMessage_Event) CopyFrom(src interface{}) { + + o := src.(*WatchMessage_Event) + *m = *o + if o.Object != nil { + m.Object = &Object{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.Object, o.Object) + } + if o.OldObject != nil { + m.OldObject = &Object{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.OldObject, o.OldObject) + } +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Watch service + +type WatchClient interface { + // Watch starts a stream that returns any changes to objects that match + // the specified selectors. When the stream begins, it immediately sends + // an empty message back to the client. It is important to wait for + // this message before taking any actions that depend on an established + // stream of changes for consistency. + Watch(ctx context.Context, in *WatchRequest, opts ...grpc.CallOption) (Watch_WatchClient, error) +} + +type watchClient struct { + cc *grpc.ClientConn +} + +func NewWatchClient(cc *grpc.ClientConn) WatchClient { + return &watchClient{cc} +} + +func (c *watchClient) Watch(ctx context.Context, in *WatchRequest, opts ...grpc.CallOption) (Watch_WatchClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Watch_serviceDesc.Streams[0], c.cc, "/docker.swarmkit.v1.Watch/Watch", opts...) + if err != nil { + return nil, err + } + x := &watchWatchClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Watch_WatchClient interface { + Recv() (*WatchMessage, error) + grpc.ClientStream +} + +type watchWatchClient struct { + grpc.ClientStream +} + +func (x *watchWatchClient) Recv() (*WatchMessage, error) { + m := new(WatchMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for Watch service + +type WatchServer interface { + // Watch starts a stream that returns any changes to objects that match + // the specified selectors. When the stream begins, it immediately sends + // an empty message back to the client. It is important to wait for + // this message before taking any actions that depend on an established + // stream of changes for consistency. + Watch(*WatchRequest, Watch_WatchServer) error +} + +func RegisterWatchServer(s *grpc.Server, srv WatchServer) { + s.RegisterService(&_Watch_serviceDesc, srv) +} + +func _Watch_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(WatchRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(WatchServer).Watch(m, &watchWatchServer{stream}) +} + +type Watch_WatchServer interface { + Send(*WatchMessage) error + grpc.ServerStream +} + +type watchWatchServer struct { + grpc.ServerStream +} + +func (x *watchWatchServer) Send(m *WatchMessage) error { + return x.ServerStream.SendMsg(m) +} + +var _Watch_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.Watch", + HandlerType: (*WatchServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "Watch", + Handler: _Watch_Watch_Handler, + ServerStreams: true, + }, + }, + Metadata: "github.com/docker/swarmkit/api/watch.proto", +} + +func (m *Object) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Object) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Object != nil { + nn1, err := m.Object.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn1 + } + return i, nil +} + +func (m *Object_Node) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Node != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Node.Size())) + n2, err := m.Node.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} +func (m *Object_Service) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Service != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Service.Size())) + n3, err := m.Service.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + return i, nil +} +func (m *Object_Network) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Network != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Network.Size())) + n4, err := m.Network.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + return i, nil +} +func (m *Object_Task) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Task != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Task.Size())) + n5, err := m.Task.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + return i, nil +} +func (m *Object_Cluster) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Cluster != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Cluster.Size())) + n6, err := m.Cluster.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} +func (m *Object_Secret) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Secret != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Secret.Size())) + n7, err := m.Secret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} +func (m *Object_Resource) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Resource != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Resource.Size())) + n8, err := m.Resource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} +func (m *Object_Extension) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Extension != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Extension.Size())) + n9, err := m.Extension.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} +func (m *Object_Config) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Config != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Config.Size())) + n10, err := m.Config.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + return i, nil +} +func (m *SelectBySlot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelectBySlot) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ServiceID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.ServiceID))) + i += copy(dAtA[i:], m.ServiceID) + } + if m.Slot != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Slot)) + } + return i, nil +} + +func (m *SelectByCustom) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelectByCustom) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Kind) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + } + if len(m.Index) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.Index))) + i += copy(dAtA[i:], m.Index) + } + if len(m.Value) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} + +func (m *SelectBy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelectBy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.By != nil { + nn11, err := m.By.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn11 + } + return i, nil +} + +func (m *SelectBy_ID) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0xa + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + return i, nil +} +func (m *SelectBy_IDPrefix) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x12 + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.IDPrefix))) + i += copy(dAtA[i:], m.IDPrefix) + return i, nil +} +func (m *SelectBy_Name) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x1a + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + return i, nil +} +func (m *SelectBy_NamePrefix) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x22 + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.NamePrefix))) + i += copy(dAtA[i:], m.NamePrefix) + return i, nil +} +func (m *SelectBy_Custom) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Custom != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Custom.Size())) + n12, err := m.Custom.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + return i, nil +} +func (m *SelectBy_CustomPrefix) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.CustomPrefix != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.CustomPrefix.Size())) + n13, err := m.CustomPrefix.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + return i, nil +} +func (m *SelectBy_ServiceID) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x3a + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.ServiceID))) + i += copy(dAtA[i:], m.ServiceID) + return i, nil +} +func (m *SelectBy_NodeID) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x42 + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + return i, nil +} +func (m *SelectBy_Slot) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Slot != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Slot.Size())) + n14, err := m.Slot.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + return i, nil +} +func (m *SelectBy_DesiredState) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x50 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.DesiredState)) + return i, nil +} +func (m *SelectBy_Role) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x58 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Role)) + return i, nil +} +func (m *SelectBy_Membership) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x60 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Membership)) + return i, nil +} +func (m *SelectBy_ReferencedNetworkID) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x6a + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.ReferencedNetworkID))) + i += copy(dAtA[i:], m.ReferencedNetworkID) + return i, nil +} +func (m *SelectBy_ReferencedSecretID) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x72 + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.ReferencedSecretID))) + i += copy(dAtA[i:], m.ReferencedSecretID) + return i, nil +} +func (m *SelectBy_Kind) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x7a + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + return i, nil +} +func (m *SelectBy_ReferencedConfigID) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.ReferencedConfigID))) + i += copy(dAtA[i:], m.ReferencedConfigID) + return i, nil +} +func (m *WatchRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Entries) > 0 { + for _, msg := range m.Entries { + dAtA[i] = 0xa + i++ + i = encodeVarintWatch(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.ResumeFrom != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.ResumeFrom.Size())) + n15, err := m.ResumeFrom.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + if m.IncludeOldObject { + dAtA[i] = 0x18 + i++ + if m.IncludeOldObject { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *WatchRequest_WatchEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchRequest_WatchEntry) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Kind) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + } + if m.Action != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Action)) + } + if len(m.Filters) > 0 { + for _, msg := range m.Filters { + dAtA[i] = 0x1a + i++ + i = encodeVarintWatch(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *WatchMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Events) > 0 { + for _, msg := range m.Events { + dAtA[i] = 0xa + i++ + i = encodeVarintWatch(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Version != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Version.Size())) + n16, err := m.Version.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + return i, nil +} + +func (m *WatchMessage_Event) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchMessage_Event) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Action != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Action)) + } + if m.Object != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Object.Size())) + n17, err := m.Object.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + if m.OldObject != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.OldObject.Size())) + n18, err := m.OldObject.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + return i, nil +} + +func encodeFixed64Watch(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Watch(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintWatch(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +type raftProxyWatchServer struct { + local WatchServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyWatchServer(local WatchServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) WatchServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + s, ok := transport.StreamFromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := s.ServerTransport().RemoteAddr().String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyWatchServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyWatchServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyWatchServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +type Watch_WatchServerWrapper struct { + Watch_WatchServer + ctx context.Context +} + +func (s Watch_WatchServerWrapper) Context() context.Context { + return s.ctx +} + +func (p *raftProxyWatchServer) Watch(r *WatchRequest, stream Watch_WatchServer) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := Watch_WatchServerWrapper{ + Watch_WatchServer: stream, + ctx: ctx, + } + return p.local.Watch(r, streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + } + clientStream, err := NewWatchClient(conn).Watch(ctx, r) + + if err != nil { + return err + } + + for { + msg, err := clientStream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := stream.Send(msg); err != nil { + return err + } + } + return nil +} + +func (m *Object) Size() (n int) { + var l int + _ = l + if m.Object != nil { + n += m.Object.Size() + } + return n +} + +func (m *Object_Node) Size() (n int) { + var l int + _ = l + if m.Node != nil { + l = m.Node.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *Object_Service) Size() (n int) { + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *Object_Network) Size() (n int) { + var l int + _ = l + if m.Network != nil { + l = m.Network.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *Object_Task) Size() (n int) { + var l int + _ = l + if m.Task != nil { + l = m.Task.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *Object_Cluster) Size() (n int) { + var l int + _ = l + if m.Cluster != nil { + l = m.Cluster.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *Object_Secret) Size() (n int) { + var l int + _ = l + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *Object_Resource) Size() (n int) { + var l int + _ = l + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *Object_Extension) Size() (n int) { + var l int + _ = l + if m.Extension != nil { + l = m.Extension.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *Object_Config) Size() (n int) { + var l int + _ = l + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *SelectBySlot) Size() (n int) { + var l int + _ = l + l = len(m.ServiceID) + if l > 0 { + n += 1 + l + sovWatch(uint64(l)) + } + if m.Slot != 0 { + n += 1 + sovWatch(uint64(m.Slot)) + } + return n +} + +func (m *SelectByCustom) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + if l > 0 { + n += 1 + l + sovWatch(uint64(l)) + } + l = len(m.Index) + if l > 0 { + n += 1 + l + sovWatch(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovWatch(uint64(l)) + } + return n +} + +func (m *SelectBy) Size() (n int) { + var l int + _ = l + if m.By != nil { + n += m.By.Size() + } + return n +} + +func (m *SelectBy_ID) Size() (n int) { + var l int + _ = l + l = len(m.ID) + n += 1 + l + sovWatch(uint64(l)) + return n +} +func (m *SelectBy_IDPrefix) Size() (n int) { + var l int + _ = l + l = len(m.IDPrefix) + n += 1 + l + sovWatch(uint64(l)) + return n +} +func (m *SelectBy_Name) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovWatch(uint64(l)) + return n +} +func (m *SelectBy_NamePrefix) Size() (n int) { + var l int + _ = l + l = len(m.NamePrefix) + n += 1 + l + sovWatch(uint64(l)) + return n +} +func (m *SelectBy_Custom) Size() (n int) { + var l int + _ = l + if m.Custom != nil { + l = m.Custom.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *SelectBy_CustomPrefix) Size() (n int) { + var l int + _ = l + if m.CustomPrefix != nil { + l = m.CustomPrefix.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *SelectBy_ServiceID) Size() (n int) { + var l int + _ = l + l = len(m.ServiceID) + n += 1 + l + sovWatch(uint64(l)) + return n +} +func (m *SelectBy_NodeID) Size() (n int) { + var l int + _ = l + l = len(m.NodeID) + n += 1 + l + sovWatch(uint64(l)) + return n +} +func (m *SelectBy_Slot) Size() (n int) { + var l int + _ = l + if m.Slot != nil { + l = m.Slot.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *SelectBy_DesiredState) Size() (n int) { + var l int + _ = l + n += 1 + sovWatch(uint64(m.DesiredState)) + return n +} +func (m *SelectBy_Role) Size() (n int) { + var l int + _ = l + n += 1 + sovWatch(uint64(m.Role)) + return n +} +func (m *SelectBy_Membership) Size() (n int) { + var l int + _ = l + n += 1 + sovWatch(uint64(m.Membership)) + return n +} +func (m *SelectBy_ReferencedNetworkID) Size() (n int) { + var l int + _ = l + l = len(m.ReferencedNetworkID) + n += 1 + l + sovWatch(uint64(l)) + return n +} +func (m *SelectBy_ReferencedSecretID) Size() (n int) { + var l int + _ = l + l = len(m.ReferencedSecretID) + n += 1 + l + sovWatch(uint64(l)) + return n +} +func (m *SelectBy_Kind) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovWatch(uint64(l)) + return n +} +func (m *SelectBy_ReferencedConfigID) Size() (n int) { + var l int + _ = l + l = len(m.ReferencedConfigID) + n += 2 + l + sovWatch(uint64(l)) + return n +} +func (m *WatchRequest) Size() (n int) { + var l int + _ = l + if len(m.Entries) > 0 { + for _, e := range m.Entries { + l = e.Size() + n += 1 + l + sovWatch(uint64(l)) + } + } + if m.ResumeFrom != nil { + l = m.ResumeFrom.Size() + n += 1 + l + sovWatch(uint64(l)) + } + if m.IncludeOldObject { + n += 2 + } + return n +} + +func (m *WatchRequest_WatchEntry) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + if l > 0 { + n += 1 + l + sovWatch(uint64(l)) + } + if m.Action != 0 { + n += 1 + sovWatch(uint64(m.Action)) + } + if len(m.Filters) > 0 { + for _, e := range m.Filters { + l = e.Size() + n += 1 + l + sovWatch(uint64(l)) + } + } + return n +} + +func (m *WatchMessage) Size() (n int) { + var l int + _ = l + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovWatch(uint64(l)) + } + } + if m.Version != nil { + l = m.Version.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} + +func (m *WatchMessage_Event) Size() (n int) { + var l int + _ = l + if m.Action != 0 { + n += 1 + sovWatch(uint64(m.Action)) + } + if m.Object != nil { + l = m.Object.Size() + n += 1 + l + sovWatch(uint64(l)) + } + if m.OldObject != nil { + l = m.OldObject.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} + +func sovWatch(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozWatch(x uint64) (n int) { + return sovWatch(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Object) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object{`, + `Object:` + fmt.Sprintf("%v", this.Object) + `,`, + `}`, + }, "") + return s +} +func (this *Object_Node) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object_Node{`, + `Node:` + strings.Replace(fmt.Sprintf("%v", this.Node), "Node", "Node", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Object_Service) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object_Service{`, + `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "Service", "Service", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Object_Network) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object_Network{`, + `Network:` + strings.Replace(fmt.Sprintf("%v", this.Network), "Network", "Network", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Object_Task) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object_Task{`, + `Task:` + strings.Replace(fmt.Sprintf("%v", this.Task), "Task", "Task", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Object_Cluster) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object_Cluster{`, + `Cluster:` + strings.Replace(fmt.Sprintf("%v", this.Cluster), "Cluster", "Cluster", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Object_Secret) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object_Secret{`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "Secret", "Secret", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Object_Resource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object_Resource{`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "Resource", "Resource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Object_Extension) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object_Extension{`, + `Extension:` + strings.Replace(fmt.Sprintf("%v", this.Extension), "Extension", "Extension", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Object_Config) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object_Config{`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "Config", "Config", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBySlot) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBySlot{`, + `ServiceID:` + fmt.Sprintf("%v", this.ServiceID) + `,`, + `Slot:` + fmt.Sprintf("%v", this.Slot) + `,`, + `}`, + }, "") + return s +} +func (this *SelectByCustom) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectByCustom{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Index:` + fmt.Sprintf("%v", this.Index) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy{`, + `By:` + fmt.Sprintf("%v", this.By) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_ID) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_ID{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_IDPrefix) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_IDPrefix{`, + `IDPrefix:` + fmt.Sprintf("%v", this.IDPrefix) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_Name) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_Name{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_NamePrefix) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_NamePrefix{`, + `NamePrefix:` + fmt.Sprintf("%v", this.NamePrefix) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_Custom) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_Custom{`, + `Custom:` + strings.Replace(fmt.Sprintf("%v", this.Custom), "SelectByCustom", "SelectByCustom", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_CustomPrefix) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_CustomPrefix{`, + `CustomPrefix:` + strings.Replace(fmt.Sprintf("%v", this.CustomPrefix), "SelectByCustom", "SelectByCustom", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_ServiceID) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_ServiceID{`, + `ServiceID:` + fmt.Sprintf("%v", this.ServiceID) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_NodeID) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_NodeID{`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_Slot) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_Slot{`, + `Slot:` + strings.Replace(fmt.Sprintf("%v", this.Slot), "SelectBySlot", "SelectBySlot", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_DesiredState) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_DesiredState{`, + `DesiredState:` + fmt.Sprintf("%v", this.DesiredState) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_Role) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_Role{`, + `Role:` + fmt.Sprintf("%v", this.Role) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_Membership) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_Membership{`, + `Membership:` + fmt.Sprintf("%v", this.Membership) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_ReferencedNetworkID) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_ReferencedNetworkID{`, + `ReferencedNetworkID:` + fmt.Sprintf("%v", this.ReferencedNetworkID) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_ReferencedSecretID) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_ReferencedSecretID{`, + `ReferencedSecretID:` + fmt.Sprintf("%v", this.ReferencedSecretID) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_Kind) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_Kind{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_ReferencedConfigID) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_ReferencedConfigID{`, + `ReferencedConfigID:` + fmt.Sprintf("%v", this.ReferencedConfigID) + `,`, + `}`, + }, "") + return s +} +func (this *WatchRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WatchRequest{`, + `Entries:` + strings.Replace(fmt.Sprintf("%v", this.Entries), "WatchRequest_WatchEntry", "WatchRequest_WatchEntry", 1) + `,`, + `ResumeFrom:` + strings.Replace(fmt.Sprintf("%v", this.ResumeFrom), "Version", "Version", 1) + `,`, + `IncludeOldObject:` + fmt.Sprintf("%v", this.IncludeOldObject) + `,`, + `}`, + }, "") + return s +} +func (this *WatchRequest_WatchEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WatchRequest_WatchEntry{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "SelectBy", "SelectBy", 1) + `,`, + `}`, + }, "") + return s +} +func (this *WatchMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WatchMessage{`, + `Events:` + strings.Replace(fmt.Sprintf("%v", this.Events), "WatchMessage_Event", "WatchMessage_Event", 1) + `,`, + `Version:` + strings.Replace(fmt.Sprintf("%v", this.Version), "Version", "Version", 1) + `,`, + `}`, + }, "") + return s +} +func (this *WatchMessage_Event) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WatchMessage_Event{`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "Object", "Object", 1) + `,`, + `OldObject:` + strings.Replace(fmt.Sprintf("%v", this.OldObject), "Object", "Object", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringWatch(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Object) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Object: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Object: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Node{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Object = &Object_Node{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Service{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Object = &Object_Service{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Network{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Object = &Object_Network{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Task{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Object = &Object_Task{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Cluster{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Object = &Object_Cluster{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Secret{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Object = &Object_Secret{v} + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Resource{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Object = &Object_Resource{v} + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extension", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Extension{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Object = &Object_Extension{v} + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Config{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Object = &Object_Config{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWatch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWatch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelectBySlot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelectBySlot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelectBySlot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Slot", wireType) + } + m.Slot = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Slot |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWatch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWatch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelectByCustom) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelectByCustom: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelectByCustom: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Index = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWatch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWatch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelectBy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelectBy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelectBy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_ID{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IDPrefix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_IDPrefix{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_Name{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamePrefix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_NamePrefix{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Custom", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SelectByCustom{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.By = &SelectBy_Custom{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CustomPrefix", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SelectByCustom{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.By = &SelectBy_CustomPrefix{v} + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_ServiceID{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_NodeID{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Slot", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SelectBySlot{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.By = &SelectBy_Slot{v} + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredState", wireType) + } + var v TaskState + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (TaskState(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.By = &SelectBy_DesiredState{v} + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var v NodeRole + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (NodeRole(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.By = &SelectBy_Role{v} + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Membership", wireType) + } + var v NodeSpec_Membership + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (NodeSpec_Membership(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.By = &SelectBy_Membership{v} + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReferencedNetworkID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_ReferencedNetworkID{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReferencedSecretID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_ReferencedSecretID{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_Kind{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReferencedConfigID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_ReferencedConfigID{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWatch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWatch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Entries = append(m.Entries, &WatchRequest_WatchEntry{}) + if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResumeFrom", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResumeFrom == nil { + m.ResumeFrom = &Version{} + } + if err := m.ResumeFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeOldObject", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeOldObject = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipWatch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWatch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchRequest_WatchEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + m.Action = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Action |= (WatchActionKind(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filters = append(m.Filters, &SelectBy{}) + if err := m.Filters[len(m.Filters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWatch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWatch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, &WatchMessage_Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Version == nil { + m.Version = &Version{} + } + if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWatch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWatch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchMessage_Event) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Event: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + m.Action = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Action |= (WatchActionKind(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Object == nil { + m.Object = &Object{} + } + if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OldObject", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.OldObject == nil { + m.OldObject = &Object{} + } + if err := m.OldObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWatch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWatch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipWatch(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWatch + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWatch + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWatch + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthWatch + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWatch + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipWatch(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthWatch = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowWatch = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("github.com/docker/swarmkit/api/watch.proto", fileDescriptorWatch) } + +var fileDescriptorWatch = []byte{ + // 1186 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x96, 0xbd, 0x73, 0x1b, 0xc5, + 0x1b, 0xc7, 0x75, 0x8a, 0x7c, 0x92, 0x1e, 0xdb, 0x89, 0x67, 0xe3, 0x24, 0xf7, 0xd3, 0x2f, 0xc8, + 0x42, 0x0c, 0x90, 0x49, 0x82, 0x0c, 0x26, 0x24, 0x03, 0x04, 0x66, 0x2c, 0x59, 0x8c, 0x44, 0xc6, + 0x2f, 0xb3, 0xb6, 0x93, 0x52, 0x73, 0xbe, 0x7b, 0xac, 0x1c, 0xba, 0xbb, 0x15, 0x7b, 0x27, 0x39, + 0xee, 0x28, 0x28, 0x98, 0xf4, 0xcc, 0xd0, 0xa4, 0x82, 0x9a, 0x86, 0x0e, 0xfe, 0x81, 0x0c, 0x15, + 0x25, 0x34, 0x1a, 0xa2, 0x92, 0x82, 0xbf, 0x80, 0x82, 0xd9, 0x97, 0xf3, 0x8b, 0x72, 0xb2, 0x49, + 0xa5, 0xbd, 0xbd, 0xcf, 0xf7, 0xd9, 0x67, 0x9f, 0xb7, 0x13, 0xdc, 0xec, 0x7a, 0xf1, 0xe3, 0xc1, + 0x5e, 0xcd, 0x61, 0xc1, 0xb2, 0xcb, 0x9c, 0x1e, 0xf2, 0xe5, 0xe8, 0xc0, 0xe6, 0x41, 0xcf, 0x8b, + 0x97, 0xed, 0xbe, 0xb7, 0x7c, 0x60, 0xc7, 0xce, 0xe3, 0x5a, 0x9f, 0xb3, 0x98, 0x11, 0xa2, 0x80, + 0x5a, 0x02, 0xd4, 0x86, 0xef, 0x95, 0xce, 0xd3, 0x47, 0x7d, 0x74, 0x22, 0xa5, 0x2f, 0xdd, 0x3e, + 0x87, 0x65, 0x7b, 0x5f, 0xa0, 0x13, 0x27, 0xf4, 0x79, 0x96, 0xe3, 0xc3, 0x3e, 0x26, 0xec, 0x62, + 0x97, 0x75, 0x99, 0x5c, 0x2e, 0x8b, 0x95, 0xde, 0xbd, 0x77, 0x86, 0x05, 0x49, 0xec, 0x0d, 0xf6, + 0x97, 0xfb, 0xfe, 0xa0, 0xeb, 0x85, 0xfa, 0x47, 0x09, 0xab, 0x5f, 0xe7, 0xc0, 0xdc, 0x94, 0xce, + 0x90, 0x1a, 0xe4, 0x42, 0xe6, 0xa2, 0x65, 0x54, 0x8c, 0x1b, 0xb3, 0x2b, 0x56, 0xed, 0xe5, 0x10, + 0xd4, 0x36, 0x98, 0x8b, 0xad, 0x0c, 0x95, 0x1c, 0xb9, 0x07, 0xf9, 0x08, 0xf9, 0xd0, 0x73, 0xd0, + 0xca, 0x4a, 0xc9, 0xff, 0xd3, 0x24, 0xdb, 0x0a, 0x69, 0x65, 0x68, 0x42, 0x0b, 0x61, 0x88, 0xf1, + 0x01, 0xe3, 0x3d, 0xeb, 0xc2, 0x74, 0xe1, 0x86, 0x42, 0x84, 0x50, 0xd3, 0xc2, 0xc3, 0xd8, 0x8e, + 0x7a, 0x56, 0x6e, 0xba, 0x87, 0x3b, 0x76, 0x24, 0x24, 0x92, 0x13, 0x07, 0x39, 0xfe, 0x20, 0x8a, + 0x91, 0x5b, 0x33, 0xd3, 0x0f, 0x6a, 0x28, 0x44, 0x1c, 0xa4, 0x69, 0x72, 0x07, 0xcc, 0x08, 0x1d, + 0x8e, 0xb1, 0x65, 0x4a, 0x5d, 0x29, 0xfd, 0x66, 0x82, 0x68, 0x65, 0xa8, 0x66, 0xc9, 0x47, 0x50, + 0xe0, 0x18, 0xb1, 0x01, 0x77, 0xd0, 0xca, 0x4b, 0xdd, 0xf5, 0x34, 0x1d, 0xd5, 0x4c, 0x2b, 0x43, + 0x8f, 0x78, 0xf2, 0x09, 0x14, 0xf1, 0x49, 0x8c, 0x61, 0xe4, 0xb1, 0xd0, 0x2a, 0x48, 0xf1, 0x6b, + 0x69, 0xe2, 0x66, 0x02, 0xb5, 0x32, 0xf4, 0x58, 0x21, 0x1c, 0x76, 0x58, 0xb8, 0xef, 0x75, 0xad, + 0xe2, 0x74, 0x87, 0x1b, 0x92, 0x10, 0x0e, 0x2b, 0xb6, 0x5e, 0x48, 0x72, 0x5f, 0xdd, 0x82, 0xb9, + 0x6d, 0xf4, 0xd1, 0x89, 0xeb, 0x87, 0xdb, 0x3e, 0x8b, 0xc9, 0x6d, 0x00, 0x9d, 0xad, 0x8e, 0xe7, + 0xca, 0x8a, 0x28, 0xd6, 0xe7, 0xc7, 0xa3, 0xa5, 0xa2, 0x4e, 0x67, 0x7b, 0x8d, 0x16, 0x35, 0xd0, + 0x76, 0x09, 0x81, 0x5c, 0xe4, 0xb3, 0x58, 0x96, 0x41, 0x8e, 0xca, 0x75, 0x75, 0x0b, 0x2e, 0x26, + 0x16, 0x1b, 0x83, 0x28, 0x66, 0x81, 0xa0, 0x7a, 0x5e, 0xa8, 0xad, 0x51, 0xb9, 0x26, 0x8b, 0x30, + 0xe3, 0x85, 0x2e, 0x3e, 0x91, 0xd2, 0x22, 0x55, 0x0f, 0x62, 0x77, 0x68, 0xfb, 0x03, 0x94, 0xe5, + 0x51, 0xa4, 0xea, 0xa1, 0xfa, 0x97, 0x09, 0x85, 0xc4, 0x24, 0xb1, 0x20, 0x7b, 0xe4, 0x98, 0x39, + 0x1e, 0x2d, 0x65, 0xdb, 0x6b, 0xad, 0x0c, 0xcd, 0x7a, 0x2e, 0xb9, 0x05, 0x45, 0xcf, 0xed, 0xf4, + 0x39, 0xee, 0x7b, 0xda, 0x6c, 0x7d, 0x6e, 0x3c, 0x5a, 0x2a, 0xb4, 0xd7, 0xb6, 0xe4, 0x9e, 0x08, + 0xbb, 0xe7, 0xaa, 0x35, 0x59, 0x84, 0x5c, 0x68, 0x07, 0xfa, 0x20, 0x59, 0xd9, 0x76, 0x80, 0xe4, + 0x75, 0x98, 0x15, 0xbf, 0x89, 0x91, 0x9c, 0x7e, 0x09, 0x62, 0x53, 0x0b, 0xef, 0x83, 0xe9, 0xc8, + 0x6b, 0xe9, 0xca, 0xaa, 0xa6, 0x57, 0xc8, 0xc9, 0x00, 0xc8, 0xc0, 0xab, 0x50, 0xb4, 0x61, 0x5e, + 0xad, 0x92, 0x23, 0xcc, 0x57, 0x30, 0x32, 0xa7, 0xa4, 0xda, 0x91, 0xda, 0xa9, 0x4c, 0xe5, 0x53, + 0x32, 0x25, 0x2a, 0xe5, 0x38, 0x57, 0x6f, 0x42, 0x5e, 0x74, 0xaf, 0x80, 0x0b, 0x12, 0x86, 0xf1, + 0x68, 0xc9, 0x14, 0x8d, 0x2d, 0x49, 0x53, 0xbc, 0x6c, 0xbb, 0xe4, 0xae, 0x4e, 0xa9, 0x2a, 0xa7, + 0xca, 0x59, 0x8e, 0x89, 0x82, 0x11, 0xa1, 0x13, 0x3c, 0x59, 0x83, 0x79, 0x17, 0x23, 0x8f, 0xa3, + 0xdb, 0x89, 0x62, 0x3b, 0x46, 0x0b, 0x2a, 0xc6, 0x8d, 0x8b, 0xe9, 0xb5, 0x2c, 0x7a, 0x75, 0x5b, + 0x40, 0xe2, 0x52, 0x5a, 0x25, 0x9f, 0xc9, 0x0a, 0xe4, 0x38, 0xf3, 0xd1, 0x9a, 0x95, 0xe2, 0xeb, + 0xd3, 0x46, 0x11, 0x65, 0xbe, 0x1c, 0x47, 0x82, 0x25, 0x6d, 0x80, 0x00, 0x83, 0x3d, 0xe4, 0xd1, + 0x63, 0xaf, 0x6f, 0xcd, 0x49, 0xe5, 0xdb, 0xd3, 0x94, 0xdb, 0x7d, 0x74, 0x6a, 0xeb, 0x47, 0xb8, + 0x48, 0xee, 0xb1, 0x98, 0xac, 0xc3, 0x15, 0x8e, 0xfb, 0xc8, 0x31, 0x74, 0xd0, 0xed, 0xe8, 0xe9, + 0x23, 0x22, 0x36, 0x2f, 0x23, 0x76, 0x6d, 0x3c, 0x5a, 0xba, 0x4c, 0x8f, 0x00, 0x3d, 0xa8, 0x64, + 0xf8, 0x2e, 0xf3, 0x97, 0xb6, 0x5d, 0xf2, 0x39, 0x2c, 0x9e, 0x30, 0xa7, 0x86, 0x85, 0xb0, 0x76, + 0x51, 0x5a, 0xbb, 0x3a, 0x1e, 0x2d, 0x91, 0x63, 0x6b, 0x6a, 0xaa, 0x48, 0x63, 0x84, 0x4f, 0xee, + 0x8a, 0x86, 0x51, 0x4d, 0x74, 0x29, 0x29, 0x58, 0xd9, 0x46, 0xa7, 0x4f, 0x50, 0xdd, 0x2d, 0x4e, + 0x58, 0x48, 0x3b, 0x41, 0x8d, 0x81, 0xc9, 0x13, 0xf4, 0xae, 0x5b, 0xcf, 0x41, 0xb6, 0x7e, 0x58, + 0xfd, 0x23, 0x0b, 0x73, 0x8f, 0xc4, 0x07, 0x91, 0xe2, 0x97, 0x03, 0x8c, 0x62, 0xd2, 0x84, 0x3c, + 0x86, 0x31, 0xf7, 0x30, 0xb2, 0x8c, 0xca, 0x85, 0x1b, 0xb3, 0x2b, 0xb7, 0xd2, 0x62, 0x7b, 0x52, + 0xa2, 0x1e, 0x9a, 0x61, 0xcc, 0x0f, 0x69, 0xa2, 0x25, 0xf7, 0x61, 0x96, 0x63, 0x34, 0x08, 0xb0, + 0xb3, 0xcf, 0x59, 0x70, 0xd6, 0x87, 0xe3, 0x21, 0x72, 0x31, 0xda, 0x28, 0x28, 0xfe, 0x33, 0xce, + 0x02, 0x72, 0x1b, 0x88, 0x17, 0x3a, 0xfe, 0xc0, 0xc5, 0x0e, 0xf3, 0xdd, 0x8e, 0xfa, 0x8a, 0xca, + 0xe6, 0x2d, 0xd0, 0x05, 0xfd, 0x66, 0xd3, 0x77, 0xd5, 0x50, 0x2b, 0x7d, 0x6b, 0x00, 0x1c, 0xfb, + 0x90, 0x3a, 0x7f, 0x3e, 0x06, 0xd3, 0x76, 0x62, 0x31, 0x73, 0xb3, 0xb2, 0x60, 0xde, 0x98, 0x7a, + 0xa9, 0x55, 0x89, 0x3d, 0xf0, 0x42, 0x97, 0x6a, 0x09, 0xb9, 0x0b, 0xf9, 0x7d, 0xcf, 0x8f, 0x91, + 0x47, 0xd6, 0x05, 0x19, 0x92, 0xeb, 0x67, 0xb5, 0x09, 0x4d, 0xe0, 0xea, 0x2f, 0x49, 0x6c, 0xd7, + 0x31, 0x8a, 0xec, 0x2e, 0x92, 0x4f, 0xc1, 0xc4, 0x21, 0x86, 0x71, 0x12, 0xda, 0xb7, 0xa6, 0x7a, + 0xa1, 0x15, 0xb5, 0xa6, 0xc0, 0xa9, 0x56, 0x91, 0x0f, 0x20, 0x3f, 0x54, 0xd1, 0xfa, 0x2f, 0x01, + 0x4d, 0xd8, 0xd2, 0x4f, 0x06, 0xcc, 0x48, 0x43, 0x27, 0xc2, 0x60, 0xbc, 0x7a, 0x18, 0x56, 0xc0, + 0xd4, 0x89, 0xc8, 0x4e, 0xff, 0xf6, 0xa8, 0x94, 0x50, 0x4d, 0x92, 0x0f, 0x01, 0x26, 0x12, 0x78, + 0xb6, 0xae, 0xc8, 0x92, 0xac, 0xde, 0xfc, 0xc7, 0x80, 0x4b, 0x13, 0xae, 0x90, 0x3b, 0xb0, 0xf8, + 0x68, 0x75, 0xa7, 0xd1, 0xea, 0xac, 0x36, 0x76, 0xda, 0x9b, 0x1b, 0x9d, 0xdd, 0x8d, 0x07, 0x1b, + 0x9b, 0x8f, 0x36, 0x16, 0x32, 0xa5, 0xd2, 0xd3, 0x67, 0x95, 0xab, 0x13, 0xf8, 0x6e, 0xd8, 0x0b, + 0xd9, 0x81, 0x70, 0xfc, 0xf2, 0x29, 0x55, 0x83, 0x36, 0x57, 0x77, 0x9a, 0x0b, 0x46, 0xe9, 0x7f, + 0x4f, 0x9f, 0x55, 0xae, 0x4c, 0x88, 0x1a, 0x1c, 0xd5, 0x64, 0x3a, 0xad, 0xd9, 0xdd, 0x5a, 0x13, + 0x9a, 0x6c, 0xaa, 0x66, 0xb7, 0xef, 0xa6, 0x69, 0x68, 0x73, 0x7d, 0xf3, 0x61, 0x73, 0x21, 0x97, + 0xaa, 0xa1, 0x18, 0xb0, 0x21, 0x96, 0xae, 0x7d, 0xf3, 0x7d, 0x39, 0xf3, 0xf3, 0x0f, 0xe5, 0xc9, + 0xab, 0xae, 0x04, 0x30, 0x23, 0xb7, 0x88, 0x9b, 0x2c, 0x2a, 0xe7, 0x35, 0x62, 0xa9, 0x72, 0x5e, + 0x3d, 0x55, 0xaf, 0xfc, 0xfa, 0xe3, 0xdf, 0xdf, 0x65, 0x2f, 0xc1, 0xbc, 0x24, 0xde, 0x09, 0xec, + 0xd0, 0xee, 0x22, 0x7f, 0xd7, 0xa8, 0x5b, 0xcf, 0x5f, 0x94, 0x33, 0xbf, 0xbf, 0x28, 0x67, 0xbe, + 0x1a, 0x97, 0x8d, 0xe7, 0xe3, 0xb2, 0xf1, 0xdb, 0xb8, 0x6c, 0xfc, 0x39, 0x2e, 0x1b, 0x7b, 0xa6, + 0xfc, 0x03, 0xf9, 0xfe, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xe6, 0x76, 0x89, 0xef, 0x57, 0x0b, + 0x00, 0x00, +} diff --git a/vendor/github.com/docker/swarmkit/log/context.go b/vendor/github.com/docker/swarmkit/log/context.go new file mode 100644 index 0000000000..ac4f848806 --- /dev/null +++ b/vendor/github.com/docker/swarmkit/log/context.go @@ -0,0 +1,96 @@ +package log + +import ( + "path" + + "github.com/sirupsen/logrus" + "golang.org/x/net/context" +) + +var ( + // G is an alias for GetLogger. + // + // We may want to define this locally to a package to get package tagged log + // messages. + G = GetLogger + + // L is an alias for the the standard logger. + L = logrus.NewEntry(logrus.StandardLogger()) +) + +type ( + loggerKey struct{} + moduleKey struct{} +) + +// WithLogger returns a new context with the provided logger. Use in +// combination with logger.WithField(s) for great effect. +func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context { + return context.WithValue(ctx, loggerKey{}, logger) +} + +// WithFields returns a new context with added fields to logger. +func WithFields(ctx context.Context, fields logrus.Fields) context.Context { + logger := ctx.Value(loggerKey{}) + + if logger == nil { + logger = L + } + return WithLogger(ctx, logger.(*logrus.Entry).WithFields(fields)) +} + +// WithField is convenience wrapper around WithFields. +func WithField(ctx context.Context, key, value string) context.Context { + return WithFields(ctx, logrus.Fields{key: value}) +} + +// GetLogger retrieves the current logger from the context. If no logger is +// available, the default logger is returned. +func GetLogger(ctx context.Context) *logrus.Entry { + logger := ctx.Value(loggerKey{}) + + if logger == nil { + return L + } + + return logger.(*logrus.Entry) +} + +// WithModule adds the module to the context, appending it with a slash if a +// module already exists. A module is just a roughly correlated defined by the +// call tree for a given context. +// +// As an example, we might have a "node" module already part of a context. If +// this function is called with "tls", the new value of module will be +// "node/tls". +// +// Modules represent the call path. If the new module and last module are the +// same, a new module entry will not be created. If the new module and old +// older module are the same but separated by other modules, the cycle will be +// represented by the module path. +func WithModule(ctx context.Context, module string) context.Context { + parent := GetModulePath(ctx) + + if parent != "" { + // don't re-append module when module is the same. + if path.Base(parent) == module { + return ctx + } + + module = path.Join(parent, module) + } + + ctx = WithLogger(ctx, GetLogger(ctx).WithField("module", module)) + return context.WithValue(ctx, moduleKey{}, module) +} + +// GetModulePath returns the module path for the provided context. If no module +// is set, an empty string is returned. +func GetModulePath(ctx context.Context) string { + module := ctx.Value(moduleKey{}) + if module == nil { + return "" + } + + return module.(string) +} diff --git a/vendor/github.com/docker/swarmkit/log/grpc.go b/vendor/github.com/docker/swarmkit/log/grpc.go new file mode 100644 index 0000000000..4978d49730 --- /dev/null +++ b/vendor/github.com/docker/swarmkit/log/grpc.go @@ -0,0 +1,13 @@ +package log + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/grpclog" +) + +func init() { + ctx := WithModule(context.Background(), "grpc") + + // completely replace the grpc logger with the logrus logger. + grpclog.SetLogger(G(ctx)) +} diff --git a/vendor/github.com/docker/swarmkit/manager/raftselector/raftselector.go b/vendor/github.com/docker/swarmkit/manager/raftselector/raftselector.go new file mode 100644 index 0000000000..89e7918a3d --- /dev/null +++ b/vendor/github.com/docker/swarmkit/manager/raftselector/raftselector.go @@ -0,0 +1,20 @@ +package raftselector + +import ( + "errors" + + "golang.org/x/net/context" + + "google.golang.org/grpc" +) + +// ConnProvider is basic interface for connecting API package(raft proxy in particular) +// to manager/state/raft package without import cycles. It provides only one +// method for obtaining connection to leader. +type ConnProvider interface { + LeaderConn(ctx context.Context) (*grpc.ClientConn, error) +} + +// ErrIsLeader is returned from LeaderConn method when current machine is leader. +// It's just shim between packages to avoid import cycles. +var ErrIsLeader = errors.New("current node is leader") diff --git a/vendor/github.com/docker/swarmkit/protobuf/plugin/helpers.go b/vendor/github.com/docker/swarmkit/protobuf/plugin/helpers.go new file mode 100644 index 0000000000..daea795b36 --- /dev/null +++ b/vendor/github.com/docker/swarmkit/protobuf/plugin/helpers.go @@ -0,0 +1,11 @@ +package plugin + +import ( + "github.com/gogo/protobuf/proto" + google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" +) + +// DeepcopyEnabled returns true if deepcopy is enabled for the descriptor. +func DeepcopyEnabled(options *google_protobuf.MessageOptions) bool { + return proto.GetBoolExtension(options, E_Deepcopy, true) +} diff --git a/vendor/github.com/docker/swarmkit/protobuf/plugin/plugin.pb.go b/vendor/github.com/docker/swarmkit/protobuf/plugin/plugin.pb.go new file mode 100644 index 0000000000..549cebaeaa --- /dev/null +++ b/vendor/github.com/docker/swarmkit/protobuf/plugin/plugin.pb.go @@ -0,0 +1,1246 @@ +// Code generated by protoc-gen-gogo. +// source: github.com/docker/swarmkit/protobuf/plugin/plugin.proto +// DO NOT EDIT! + +/* + Package plugin is a generated protocol buffer package. + + It is generated from these files: + github.com/docker/swarmkit/protobuf/plugin/plugin.proto + + It has these top-level messages: + WatchSelectors + StoreObject + TLSAuthorization +*/ +package plugin + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + +import github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type WatchSelectors struct { + // supported by all object types + ID *bool `protobuf:"varint,1,opt,name=id" json:"id,omitempty"` + IDPrefix *bool `protobuf:"varint,2,opt,name=id_prefix,json=idPrefix" json:"id_prefix,omitempty"` + Name *bool `protobuf:"varint,3,opt,name=name" json:"name,omitempty"` + NamePrefix *bool `protobuf:"varint,4,opt,name=name_prefix,json=namePrefix" json:"name_prefix,omitempty"` + Custom *bool `protobuf:"varint,5,opt,name=custom" json:"custom,omitempty"` + CustomPrefix *bool `protobuf:"varint,6,opt,name=custom_prefix,json=customPrefix" json:"custom_prefix,omitempty"` + // supported by tasks only + ServiceID *bool `protobuf:"varint,7,opt,name=service_id,json=serviceId" json:"service_id,omitempty"` + NodeID *bool `protobuf:"varint,8,opt,name=node_id,json=nodeId" json:"node_id,omitempty"` + Slot *bool `protobuf:"varint,9,opt,name=slot" json:"slot,omitempty"` + DesiredState *bool `protobuf:"varint,10,opt,name=desired_state,json=desiredState" json:"desired_state,omitempty"` + // supported by nodes only + Role *bool `protobuf:"varint,11,opt,name=role" json:"role,omitempty"` + Membership *bool `protobuf:"varint,12,opt,name=membership" json:"membership,omitempty"` + // supported by: resource + Kind *bool `protobuf:"varint,13,opt,name=kind" json:"kind,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *WatchSelectors) Reset() { *m = WatchSelectors{} } +func (*WatchSelectors) ProtoMessage() {} +func (*WatchSelectors) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} } + +type StoreObject struct { + WatchSelectors *WatchSelectors `protobuf:"bytes,1,req,name=watch_selectors,json=watchSelectors" json:"watch_selectors,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StoreObject) Reset() { *m = StoreObject{} } +func (*StoreObject) ProtoMessage() {} +func (*StoreObject) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} } + +type TLSAuthorization struct { + // Roles contains the acceptable TLS OU roles for the handler. + Roles []string `protobuf:"bytes,1,rep,name=roles" json:"roles,omitempty"` + // Insecure is set to true if this method does not require + // authorization. NOTE: Specifying both "insecure" and a nonempty + // list of roles is invalid. This would fail at codegen time. + Insecure *bool `protobuf:"varint,2,opt,name=insecure" json:"insecure,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TLSAuthorization) Reset() { *m = TLSAuthorization{} } +func (*TLSAuthorization) ProtoMessage() {} +func (*TLSAuthorization) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{2} } + +var E_Deepcopy = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 70000, + Name: "docker.protobuf.plugin.deepcopy", + Tag: "varint,70000,opt,name=deepcopy,def=1", + Filename: "github.com/docker/swarmkit/protobuf/plugin/plugin.proto", +} + +var E_StoreObject = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*StoreObject)(nil), + Field: 70001, + Name: "docker.protobuf.plugin.store_object", + Tag: "bytes,70001,opt,name=store_object,json=storeObject", + Filename: "github.com/docker/swarmkit/protobuf/plugin/plugin.proto", +} + +var E_TlsAuthorization = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MethodOptions)(nil), + ExtensionType: (*TLSAuthorization)(nil), + Field: 73626345, + Name: "docker.protobuf.plugin.tls_authorization", + Tag: "bytes,73626345,opt,name=tls_authorization,json=tlsAuthorization", + Filename: "github.com/docker/swarmkit/protobuf/plugin/plugin.proto", +} + +func init() { + proto.RegisterType((*WatchSelectors)(nil), "docker.protobuf.plugin.WatchSelectors") + proto.RegisterType((*StoreObject)(nil), "docker.protobuf.plugin.StoreObject") + proto.RegisterType((*TLSAuthorization)(nil), "docker.protobuf.plugin.TLSAuthorization") + proto.RegisterExtension(E_Deepcopy) + proto.RegisterExtension(E_StoreObject) + proto.RegisterExtension(E_TlsAuthorization) +} + +func (m *WatchSelectors) Copy() *WatchSelectors { + if m == nil { + return nil + } + o := &WatchSelectors{} + o.CopyFrom(m) + return o +} + +func (m *WatchSelectors) CopyFrom(src interface{}) { + + o := src.(*WatchSelectors) + *m = *o +} + +func (m *StoreObject) Copy() *StoreObject { + if m == nil { + return nil + } + o := &StoreObject{} + o.CopyFrom(m) + return o +} + +func (m *StoreObject) CopyFrom(src interface{}) { + + o := src.(*StoreObject) + *m = *o + if o.WatchSelectors != nil { + m.WatchSelectors = &WatchSelectors{} + github_com_docker_swarmkit_api_deepcopy.Copy(m.WatchSelectors, o.WatchSelectors) + } +} + +func (m *TLSAuthorization) Copy() *TLSAuthorization { + if m == nil { + return nil + } + o := &TLSAuthorization{} + o.CopyFrom(m) + return o +} + +func (m *TLSAuthorization) CopyFrom(src interface{}) { + + o := src.(*TLSAuthorization) + *m = *o + if o.Roles != nil { + m.Roles = make([]string, len(o.Roles)) + copy(m.Roles, o.Roles) + } + +} + +func (m *WatchSelectors) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchSelectors) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != nil { + dAtA[i] = 0x8 + i++ + if *m.ID { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.IDPrefix != nil { + dAtA[i] = 0x10 + i++ + if *m.IDPrefix { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Name != nil { + dAtA[i] = 0x18 + i++ + if *m.Name { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.NamePrefix != nil { + dAtA[i] = 0x20 + i++ + if *m.NamePrefix { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Custom != nil { + dAtA[i] = 0x28 + i++ + if *m.Custom { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.CustomPrefix != nil { + dAtA[i] = 0x30 + i++ + if *m.CustomPrefix { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.ServiceID != nil { + dAtA[i] = 0x38 + i++ + if *m.ServiceID { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.NodeID != nil { + dAtA[i] = 0x40 + i++ + if *m.NodeID { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Slot != nil { + dAtA[i] = 0x48 + i++ + if *m.Slot { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.DesiredState != nil { + dAtA[i] = 0x50 + i++ + if *m.DesiredState { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Role != nil { + dAtA[i] = 0x58 + i++ + if *m.Role { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Membership != nil { + dAtA[i] = 0x60 + i++ + if *m.Membership { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Kind != nil { + dAtA[i] = 0x68 + i++ + if *m.Kind { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *StoreObject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StoreObject) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.WatchSelectors == nil { + return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("watch_selectors") + } else { + dAtA[i] = 0xa + i++ + i = encodeVarintPlugin(dAtA, i, uint64(m.WatchSelectors.Size())) + n1, err := m.WatchSelectors.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *TLSAuthorization) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TLSAuthorization) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Roles) > 0 { + for _, s := range m.Roles { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.Insecure != nil { + dAtA[i] = 0x10 + i++ + if *m.Insecure { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeFixed64Plugin(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Plugin(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +func (m *WatchSelectors) Size() (n int) { + var l int + _ = l + if m.ID != nil { + n += 2 + } + if m.IDPrefix != nil { + n += 2 + } + if m.Name != nil { + n += 2 + } + if m.NamePrefix != nil { + n += 2 + } + if m.Custom != nil { + n += 2 + } + if m.CustomPrefix != nil { + n += 2 + } + if m.ServiceID != nil { + n += 2 + } + if m.NodeID != nil { + n += 2 + } + if m.Slot != nil { + n += 2 + } + if m.DesiredState != nil { + n += 2 + } + if m.Role != nil { + n += 2 + } + if m.Membership != nil { + n += 2 + } + if m.Kind != nil { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StoreObject) Size() (n int) { + var l int + _ = l + if m.WatchSelectors != nil { + l = m.WatchSelectors.Size() + n += 1 + l + sovPlugin(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *TLSAuthorization) Size() (n int) { + var l int + _ = l + if len(m.Roles) > 0 { + for _, s := range m.Roles { + l = len(s) + n += 1 + l + sovPlugin(uint64(l)) + } + } + if m.Insecure != nil { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovPlugin(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozPlugin(x uint64) (n int) { + return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *WatchSelectors) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WatchSelectors{`, + `ID:` + valueToStringPlugin(this.ID) + `,`, + `IDPrefix:` + valueToStringPlugin(this.IDPrefix) + `,`, + `Name:` + valueToStringPlugin(this.Name) + `,`, + `NamePrefix:` + valueToStringPlugin(this.NamePrefix) + `,`, + `Custom:` + valueToStringPlugin(this.Custom) + `,`, + `CustomPrefix:` + valueToStringPlugin(this.CustomPrefix) + `,`, + `ServiceID:` + valueToStringPlugin(this.ServiceID) + `,`, + `NodeID:` + valueToStringPlugin(this.NodeID) + `,`, + `Slot:` + valueToStringPlugin(this.Slot) + `,`, + `DesiredState:` + valueToStringPlugin(this.DesiredState) + `,`, + `Role:` + valueToStringPlugin(this.Role) + `,`, + `Membership:` + valueToStringPlugin(this.Membership) + `,`, + `Kind:` + valueToStringPlugin(this.Kind) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *StoreObject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreObject{`, + `WatchSelectors:` + strings.Replace(fmt.Sprintf("%v", this.WatchSelectors), "WatchSelectors", "WatchSelectors", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *TLSAuthorization) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TLSAuthorization{`, + `Roles:` + fmt.Sprintf("%v", this.Roles) + `,`, + `Insecure:` + valueToStringPlugin(this.Insecure) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringPlugin(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *WatchSelectors) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchSelectors: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchSelectors: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ID = &b + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IDPrefix", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.IDPrefix = &b + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Name = &b + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NamePrefix", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.NamePrefix = &b + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Custom", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Custom = &b + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CustomPrefix", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.CustomPrefix = &b + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ServiceID = &b + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.NodeID = &b + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Slot", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Slot = &b + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredState", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.DesiredState = &b + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Role = &b + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Membership", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Membership = &b + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Kind = &b + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StoreObject) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StoreObject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StoreObject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WatchSelectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WatchSelectors == nil { + m.WatchSelectors = &WatchSelectors{} + } + if err := m.WatchSelectors.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return github_com_gogo_protobuf_proto.NewRequiredNotSetError("watch_selectors") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TLSAuthorization) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TLSAuthorization: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TLSAuthorization: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Insecure", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Insecure = &b + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPlugin(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthPlugin + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipPlugin(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/docker/swarmkit/protobuf/plugin/plugin.proto", fileDescriptorPlugin) +} + +var fileDescriptorPlugin = []byte{ + // 575 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x52, 0xc1, 0x6e, 0xd3, 0x4c, + 0x10, 0xae, 0xd3, 0x36, 0x4d, 0x26, 0x69, 0xff, 0xfe, 0x2b, 0x54, 0xad, 0x7a, 0x70, 0xaa, 0x46, + 0x42, 0x41, 0x42, 0x8e, 0xd4, 0x0b, 0x52, 0x6e, 0x94, 0x5c, 0x22, 0x01, 0x45, 0x0e, 0x12, 0x37, + 0x22, 0xc7, 0x3b, 0x4d, 0x96, 0x3a, 0x5e, 0x6b, 0x77, 0x4d, 0x0a, 0x27, 0x5e, 0x80, 0x07, 0xe0, + 0xca, 0xd3, 0xf4, 0xc8, 0x91, 0x53, 0x44, 0x2d, 0x71, 0xe0, 0x06, 0x6f, 0x80, 0x76, 0xd7, 0x69, + 0x08, 0x6a, 0xc5, 0xc9, 0x33, 0xdf, 0x7c, 0xdf, 0xcc, 0x7c, 0x3b, 0x86, 0x47, 0x13, 0xae, 0xa7, + 0xf9, 0x38, 0x88, 0xc5, 0xac, 0xcb, 0x44, 0x7c, 0x81, 0xb2, 0xab, 0xe6, 0x91, 0x9c, 0x5d, 0x70, + 0xdd, 0xcd, 0xa4, 0xd0, 0x62, 0x9c, 0x9f, 0x77, 0xb3, 0x24, 0x9f, 0xf0, 0xb4, 0xfc, 0x04, 0x16, + 0x26, 0x07, 0x8e, 0x1d, 0x2c, 0x49, 0x81, 0xab, 0x1e, 0x1e, 0x4d, 0x84, 0x98, 0x24, 0xb8, 0x12, + 0x33, 0x54, 0xb1, 0xe4, 0x99, 0x16, 0x25, 0xf7, 0xf8, 0xd3, 0x26, 0xec, 0xbd, 0x8a, 0x74, 0x3c, + 0x1d, 0x62, 0x82, 0xb1, 0x16, 0x52, 0x91, 0x03, 0xa8, 0x70, 0x46, 0xbd, 0x23, 0xaf, 0x53, 0x3b, + 0xad, 0x16, 0x8b, 0x56, 0x65, 0xd0, 0x0f, 0x2b, 0x9c, 0x91, 0x07, 0x50, 0xe7, 0x6c, 0x94, 0x49, + 0x3c, 0xe7, 0x97, 0xb4, 0x62, 0xcb, 0xcd, 0x62, 0xd1, 0xaa, 0x0d, 0xfa, 0x2f, 0x2c, 0x16, 0xd6, + 0x38, 0x73, 0x11, 0x21, 0xb0, 0x95, 0x46, 0x33, 0xa4, 0x9b, 0x86, 0x15, 0xda, 0x98, 0xb4, 0xa0, + 0x61, 0xbe, 0xcb, 0x06, 0x5b, 0xb6, 0x04, 0x06, 0x2a, 0x45, 0x07, 0x50, 0x8d, 0x73, 0xa5, 0xc5, + 0x8c, 0x6e, 0xdb, 0x5a, 0x99, 0x91, 0x36, 0xec, 0xba, 0x68, 0x29, 0xad, 0xda, 0x72, 0xd3, 0x81, + 0xa5, 0xf8, 0x21, 0x80, 0x42, 0xf9, 0x96, 0xc7, 0x38, 0xe2, 0x8c, 0xee, 0xd8, 0xed, 0x76, 0x8b, + 0x45, 0xab, 0x3e, 0x74, 0xe8, 0xa0, 0x1f, 0xd6, 0x4b, 0xc2, 0x80, 0x91, 0x36, 0xec, 0xa4, 0x82, + 0x59, 0x6a, 0xcd, 0x52, 0xa1, 0x58, 0xb4, 0xaa, 0xcf, 0x05, 0x33, 0xbc, 0xaa, 0x29, 0x0d, 0x98, + 0x31, 0xa1, 0x12, 0xa1, 0x69, 0xdd, 0x99, 0x30, 0xb1, 0xd9, 0x85, 0xa1, 0xe2, 0x12, 0xd9, 0x48, + 0xe9, 0x48, 0x23, 0x05, 0xb7, 0x4b, 0x09, 0x0e, 0x0d, 0x66, 0x84, 0x52, 0x24, 0x48, 0x1b, 0x4e, + 0x68, 0x62, 0xe2, 0x03, 0xcc, 0x70, 0x36, 0x46, 0xa9, 0xa6, 0x3c, 0xa3, 0x4d, 0x67, 0x7e, 0x85, + 0x18, 0xcd, 0x05, 0x4f, 0x19, 0xdd, 0x75, 0x1a, 0x13, 0x1f, 0xbf, 0x86, 0xc6, 0x50, 0x0b, 0x89, + 0x67, 0xe3, 0x37, 0x18, 0x6b, 0x72, 0x06, 0xff, 0xcd, 0xcd, 0xa5, 0x46, 0x6a, 0x79, 0x2a, 0xea, + 0x1d, 0x55, 0x3a, 0x8d, 0x93, 0xfb, 0xc1, 0xed, 0xe7, 0x0f, 0xd6, 0x0f, 0x1b, 0xee, 0xcd, 0xd7, + 0xf2, 0xe3, 0x3e, 0xec, 0xbf, 0x7c, 0x3a, 0x7c, 0x9c, 0xeb, 0xa9, 0x90, 0xfc, 0x7d, 0xa4, 0xb9, + 0x48, 0xc9, 0x3d, 0xd8, 0x36, 0xfb, 0x9a, 0xd6, 0x9b, 0x9d, 0x7a, 0xe8, 0x12, 0x72, 0x08, 0x35, + 0x9e, 0x2a, 0x8c, 0x73, 0x89, 0xee, 0xf2, 0xe1, 0x4d, 0xde, 0x7b, 0x02, 0x35, 0x86, 0x98, 0xc5, + 0x22, 0x7b, 0x47, 0x5a, 0x81, 0xfb, 0xe1, 0x56, 0x9b, 0x3c, 0x43, 0xa5, 0xa2, 0x09, 0x9e, 0x65, + 0xa6, 0xbb, 0xa2, 0x3f, 0x3f, 0xdb, 0xbb, 0xf7, 0xb6, 0xb4, 0xcc, 0x31, 0xbc, 0x11, 0xf6, 0x38, + 0x34, 0x95, 0xb1, 0x3a, 0x12, 0xce, 0xeb, 0x3f, 0x1b, 0xfd, 0xb2, 0x8d, 0x1a, 0x27, 0xed, 0xbb, + 0xbc, 0xff, 0xf1, 0x72, 0x61, 0x43, 0xad, 0x92, 0xde, 0x25, 0xfc, 0xaf, 0x13, 0x35, 0x8a, 0xd6, + 0x6c, 0xfb, 0xb7, 0xcc, 0xd3, 0x53, 0xc1, 0x96, 0xe3, 0x7e, 0x7c, 0xff, 0xd8, 0xb6, 0xf3, 0x3a, + 0x77, 0xcd, 0xfb, 0xfb, 0x25, 0xc3, 0x7d, 0x9d, 0xa8, 0x35, 0xe4, 0x94, 0x5e, 0x5d, 0xfb, 0x1b, + 0x5f, 0xaf, 0xfd, 0x8d, 0x0f, 0x85, 0xef, 0x5d, 0x15, 0xbe, 0xf7, 0xa5, 0xf0, 0xbd, 0x6f, 0x85, + 0xef, 0xfd, 0x0e, 0x00, 0x00, 0xff, 0xff, 0xb3, 0x99, 0x7d, 0xfb, 0xf9, 0x03, 0x00, 0x00, +} diff --git a/vendor/github.com/docker/swarmkit/protobuf/ptypes/doc.go b/vendor/github.com/docker/swarmkit/protobuf/ptypes/doc.go new file mode 100644 index 0000000000..b997ca1712 --- /dev/null +++ b/vendor/github.com/docker/swarmkit/protobuf/ptypes/doc.go @@ -0,0 +1,3 @@ +// Package ptypes provides utility functions for use with +// gogo/protobuf/ptypes. +package ptypes diff --git a/vendor/github.com/docker/swarmkit/protobuf/ptypes/timestamp.go b/vendor/github.com/docker/swarmkit/protobuf/ptypes/timestamp.go new file mode 100644 index 0000000000..3890384cfc --- /dev/null +++ b/vendor/github.com/docker/swarmkit/protobuf/ptypes/timestamp.go @@ -0,0 +1,17 @@ +package ptypes + +import ( + "time" + + gogotypes "github.com/gogo/protobuf/types" +) + +// MustTimestampProto converts time.Time to a google.protobuf.Timestamp proto. +// It panics if input timestamp is invalid. +func MustTimestampProto(t time.Time) *gogotypes.Timestamp { + ts, err := gogotypes.TimestampProto(t) + if err != nil { + panic(err.Error()) + } + return ts +} diff --git a/vendor/github.com/fsnotify/fsnotify/AUTHORS b/vendor/github.com/fsnotify/fsnotify/AUTHORS new file mode 100644 index 0000000000..5ab5d41c54 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/AUTHORS @@ -0,0 +1,52 @@ +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# You can update this list using the following command: +# +# $ git shortlog -se | awk '{print $2 " " $3 " " $4}' + +# Please keep the list sorted. + +Aaron L +Adrien Bustany +Amit Krishnan +Anmol Sethi +Bjørn Erik Pedersen +Bruno Bigras +Caleb Spare +Case Nelson +Chris Howey +Christoffer Buchholz +Daniel Wagner-Hall +Dave Cheney +Evan Phoenix +Francisco Souza +Hari haran +John C Barstow +Kelvin Fo +Ken-ichirou MATSUZAWA +Matt Layher +Nathan Youngman +Nickolai Zeldovich +Patrick +Paul Hammond +Pawel Knap +Pieter Droogendijk +Pursuit92 +Riku Voipio +Rob Figueiredo +Rodrigo Chiossi +Slawek Ligus +Soge Zhang +Tiffany Jernigan +Tilak Sharma +Tom Payne +Travis Cline +Tudor Golubenco +Vahe Khachikyan +Yukang +bronze1man +debrando +henrikedwards +铁哥 diff --git a/vendor/github.com/fsnotify/fsnotify/LICENSE b/vendor/github.com/fsnotify/fsnotify/LICENSE new file mode 100644 index 0000000000..f21e540800 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2012 fsnotify Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/fsnotify/fsnotify/fen.go b/vendor/github.com/fsnotify/fsnotify/fen.go new file mode 100644 index 0000000000..ced39cb881 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/fen.go @@ -0,0 +1,37 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package fsnotify + +import ( + "errors" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + return nil, errors.New("FEN based watcher not yet supported for fsnotify\n") +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + return nil +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + return nil +} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go new file mode 100644 index 0000000000..190bf0de57 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -0,0 +1,66 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9 + +// Package fsnotify provides a platform-independent interface for file system notifications. +package fsnotify + +import ( + "bytes" + "errors" + "fmt" +) + +// Event represents a single file system notification. +type Event struct { + Name string // Relative path to the file or directory. + Op Op // File operation that triggered the event. +} + +// Op describes a set of file operations. +type Op uint32 + +// These are the generalized file operations that can trigger a notification. +const ( + Create Op = 1 << iota + Write + Remove + Rename + Chmod +) + +func (op Op) String() string { + // Use a buffer for efficient string concatenation + var buffer bytes.Buffer + + if op&Create == Create { + buffer.WriteString("|CREATE") + } + if op&Remove == Remove { + buffer.WriteString("|REMOVE") + } + if op&Write == Write { + buffer.WriteString("|WRITE") + } + if op&Rename == Rename { + buffer.WriteString("|RENAME") + } + if op&Chmod == Chmod { + buffer.WriteString("|CHMOD") + } + if buffer.Len() == 0 { + return "" + } + return buffer.String()[1:] // Strip leading pipe +} + +// String returns a string representation of the event in the form +// "file: REMOVE|WRITE|..." +func (e Event) String() string { + return fmt.Sprintf("%q: %s", e.Name, e.Op.String()) +} + +// Common errors that can be reported by a watcher +var ErrEventOverflow = errors.New("fsnotify queue overflow") diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go new file mode 100644 index 0000000000..d9fd1b88a0 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/inotify.go @@ -0,0 +1,337 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "unsafe" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + mu sync.Mutex // Map access + fd int + poller *fdPoller + watches map[string]*watch // Map of inotify watches (key: path) + paths map[int]string // Map of watched paths (key: watch descriptor) + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + doneResp chan struct{} // Channel to respond to Close +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + // Create inotify fd + fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC) + if fd == -1 { + return nil, errno + } + // Create epoll + poller, err := newFdPoller(fd) + if err != nil { + unix.Close(fd) + return nil, err + } + w := &Watcher{ + fd: fd, + poller: poller, + watches: make(map[string]*watch), + paths: make(map[int]string), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + doneResp: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +func (w *Watcher) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed() { + return nil + } + + // Send 'close' signal to goroutine, and set the Watcher to closed. + close(w.done) + + // Wake up goroutine + w.poller.wake() + + // Wait for goroutine to close + <-w.doneResp + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + name = filepath.Clean(name) + if w.isClosed() { + return errors.New("inotify instance already closed") + } + + const agnosticEvents = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | + unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | + unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + + var flags uint32 = agnosticEvents + + w.mu.Lock() + defer w.mu.Unlock() + watchEntry := w.watches[name] + if watchEntry != nil { + flags |= watchEntry.flags | unix.IN_MASK_ADD + } + wd, errno := unix.InotifyAddWatch(w.fd, name, flags) + if wd == -1 { + return errno + } + + if watchEntry == nil { + w.watches[name] = &watch{wd: uint32(wd), flags: flags} + w.paths[wd] = name + } else { + watchEntry.wd = uint32(wd) + watchEntry.flags = flags + } + + return nil +} + +// Remove stops watching the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + + // Fetch the watch. + w.mu.Lock() + defer w.mu.Unlock() + watch, ok := w.watches[name] + + // Remove it from inotify. + if !ok { + return fmt.Errorf("can't remove non-existent inotify watch for: %s", name) + } + + // We successfully removed the watch if InotifyRmWatch doesn't return an + // error, we need to clean up our internal state to ensure it matches + // inotify's kernel state. + delete(w.paths, int(watch.wd)) + delete(w.watches, name) + + // inotify_rm_watch will return EINVAL if the file has been deleted; + // the inotify will already have been removed. + // watches and pathes are deleted in ignoreLinux() implicitly and asynchronously + // by calling inotify_rm_watch() below. e.g. readEvents() goroutine receives IN_IGNORE + // so that EINVAL means that the wd is being rm_watch()ed or its file removed + // by another thread and we have not received IN_IGNORE event. + success, errno := unix.InotifyRmWatch(w.fd, watch.wd) + if success == -1 { + // TODO: Perhaps it's not helpful to return an error here in every case. + // the only two possible errors are: + // EBADF, which happens when w.fd is not a valid file descriptor of any kind. + // EINVAL, which is when fd is not an inotify descriptor or wd is not a valid watch descriptor. + // Watch descriptors are invalidated when they are removed explicitly or implicitly; + // explicitly by inotify_rm_watch, implicitly when the file they are watching is deleted. + return errno + } + + return nil +} + +type watch struct { + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) +} + +// readEvents reads from the inotify file descriptor, converts the +// received events into Event objects and sends them via the Events channel +func (w *Watcher) readEvents() { + var ( + buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events + n int // Number of bytes read with read() + errno error // Syscall errno + ok bool // For poller.wait + ) + + defer close(w.doneResp) + defer close(w.Errors) + defer close(w.Events) + defer unix.Close(w.fd) + defer w.poller.close() + + for { + // See if we have been closed. + if w.isClosed() { + return + } + + ok, errno = w.poller.wait() + if errno != nil { + select { + case w.Errors <- errno: + case <-w.done: + return + } + continue + } + + if !ok { + continue + } + + n, errno = unix.Read(w.fd, buf[:]) + // If a signal interrupted execution, see if we've been asked to close, and try again. + // http://man7.org/linux/man-pages/man7/signal.7.html : + // "Before Linux 3.8, reads from an inotify(7) file descriptor were not restartable" + if errno == unix.EINTR { + continue + } + + // unix.Read might have been woken up by Close. If so, we're done. + if w.isClosed() { + return + } + + if n < unix.SizeofInotifyEvent { + var err error + if n == 0 { + // If EOF is received. This should really never happen. + err = io.EOF + } else if n < 0 { + // If an error occurred while reading. + err = errno + } else { + // Read was too short. + err = errors.New("notify: short read in readEvents()") + } + select { + case w.Errors <- err: + case <-w.done: + return + } + continue + } + + var offset uint32 + // We don't know how many events we just read into the buffer + // While the offset points to at least one whole event... + for offset <= uint32(n-unix.SizeofInotifyEvent) { + // Point "raw" to the event in the buffer + raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) + + mask := uint32(raw.Mask) + nameLen := uint32(raw.Len) + + if mask&unix.IN_Q_OVERFLOW != 0 { + select { + case w.Errors <- ErrEventOverflow: + case <-w.done: + return + } + } + + // If the event happened to the watched directory or the watched file, the kernel + // doesn't append the filename to the event, but we would like to always fill the + // the "Name" field with a valid filename. We retrieve the path of the watch from + // the "paths" map. + w.mu.Lock() + name, ok := w.paths[int(raw.Wd)] + // IN_DELETE_SELF occurs when the file/directory being watched is removed. + // This is a sign to clean up the maps, otherwise we are no longer in sync + // with the inotify kernel state which has already deleted the watch + // automatically. + if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + delete(w.paths, int(raw.Wd)) + delete(w.watches, name) + } + w.mu.Unlock() + + if nameLen > 0 { + // Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent])) + // The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + event := newEvent(name, mask) + + // Send the events that are not ignored on the events channel + if !event.ignoreLinux(mask) { + select { + case w.Events <- event: + case <-w.done: + return + } + } + + // Move to the next event in the buffer + offset += unix.SizeofInotifyEvent + nameLen + } + } +} + +// Certain types of events can be "ignored" and not sent over the Events +// channel. Such as events marked ignore by the kernel, or MODIFY events +// against files that do not exist. +func (e *Event) ignoreLinux(mask uint32) bool { + // Ignore anything the inotify API says to ignore + if mask&unix.IN_IGNORED == unix.IN_IGNORED { + return true + } + + // If the event is not a DELETE or RENAME, the file must exist. + // Otherwise the event is ignored. + // *Note*: this was put in place because it was seen that a MODIFY + // event was sent after the DELETE. This ignores that MODIFY and + // assumes a DELETE will come or has come if the file doesn't exist. + if !(e.Op&Remove == Remove || e.Op&Rename == Rename) { + _, statErr := os.Lstat(e.Name) + return os.IsNotExist(statErr) + } + return false +} + +// newEvent returns an platform-independent Event based on an inotify mask. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + e.Op |= Create + } + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF || mask&unix.IN_DELETE == unix.IN_DELETE { + e.Op |= Remove + } + if mask&unix.IN_MODIFY == unix.IN_MODIFY { + e.Op |= Write + } + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + e.Op |= Rename + } + if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { + e.Op |= Chmod + } + return e +} diff --git a/vendor/github.com/fsnotify/fsnotify/inotify_poller.go b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go new file mode 100644 index 0000000000..cc7db4b22e --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/inotify_poller.go @@ -0,0 +1,187 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package fsnotify + +import ( + "errors" + + "golang.org/x/sys/unix" +) + +type fdPoller struct { + fd int // File descriptor (as returned by the inotify_init() syscall) + epfd int // Epoll file descriptor + pipe [2]int // Pipe for waking up +} + +func emptyPoller(fd int) *fdPoller { + poller := new(fdPoller) + poller.fd = fd + poller.epfd = -1 + poller.pipe[0] = -1 + poller.pipe[1] = -1 + return poller +} + +// Create a new inotify poller. +// This creates an inotify handler, and an epoll handler. +func newFdPoller(fd int) (*fdPoller, error) { + var errno error + poller := emptyPoller(fd) + defer func() { + if errno != nil { + poller.close() + } + }() + poller.fd = fd + + // Create epoll fd + poller.epfd, errno = unix.EpollCreate1(0) + if poller.epfd == -1 { + return nil, errno + } + // Create pipe; pipe[0] is the read end, pipe[1] the write end. + errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK) + if errno != nil { + return nil, errno + } + + // Register inotify fd with epoll + event := unix.EpollEvent{ + Fd: int32(poller.fd), + Events: unix.EPOLLIN, + } + errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.fd, &event) + if errno != nil { + return nil, errno + } + + // Register pipe fd with epoll + event = unix.EpollEvent{ + Fd: int32(poller.pipe[0]), + Events: unix.EPOLLIN, + } + errno = unix.EpollCtl(poller.epfd, unix.EPOLL_CTL_ADD, poller.pipe[0], &event) + if errno != nil { + return nil, errno + } + + return poller, nil +} + +// Wait using epoll. +// Returns true if something is ready to be read, +// false if there is not. +func (poller *fdPoller) wait() (bool, error) { + // 3 possible events per fd, and 2 fds, makes a maximum of 6 events. + // I don't know whether epoll_wait returns the number of events returned, + // or the total number of events ready. + // I decided to catch both by making the buffer one larger than the maximum. + events := make([]unix.EpollEvent, 7) + for { + n, errno := unix.EpollWait(poller.epfd, events, -1) + if n == -1 { + if errno == unix.EINTR { + continue + } + return false, errno + } + if n == 0 { + // If there are no events, try again. + continue + } + if n > 6 { + // This should never happen. More events were returned than should be possible. + return false, errors.New("epoll_wait returned more events than I know what to do with") + } + ready := events[:n] + epollhup := false + epollerr := false + epollin := false + for _, event := range ready { + if event.Fd == int32(poller.fd) { + if event.Events&unix.EPOLLHUP != 0 { + // This should not happen, but if it does, treat it as a wakeup. + epollhup = true + } + if event.Events&unix.EPOLLERR != 0 { + // If an error is waiting on the file descriptor, we should pretend + // something is ready to read, and let unix.Read pick up the error. + epollerr = true + } + if event.Events&unix.EPOLLIN != 0 { + // There is data to read. + epollin = true + } + } + if event.Fd == int32(poller.pipe[0]) { + if event.Events&unix.EPOLLHUP != 0 { + // Write pipe descriptor was closed, by us. This means we're closing down the + // watcher, and we should wake up. + } + if event.Events&unix.EPOLLERR != 0 { + // If an error is waiting on the pipe file descriptor. + // This is an absolute mystery, and should never ever happen. + return false, errors.New("Error on the pipe descriptor.") + } + if event.Events&unix.EPOLLIN != 0 { + // This is a regular wakeup, so we have to clear the buffer. + err := poller.clearWake() + if err != nil { + return false, err + } + } + } + } + + if epollhup || epollerr || epollin { + return true, nil + } + return false, nil + } +} + +// Close the write end of the poller. +func (poller *fdPoller) wake() error { + buf := make([]byte, 1) + n, errno := unix.Write(poller.pipe[1], buf) + if n == -1 { + if errno == unix.EAGAIN { + // Buffer is full, poller will wake. + return nil + } + return errno + } + return nil +} + +func (poller *fdPoller) clearWake() error { + // You have to be woken up a LOT in order to get to 100! + buf := make([]byte, 100) + n, errno := unix.Read(poller.pipe[0], buf) + if n == -1 { + if errno == unix.EAGAIN { + // Buffer is empty, someone else cleared our wake. + return nil + } + return errno + } + return nil +} + +// Close all poller file descriptors, but not the one passed to it. +func (poller *fdPoller) close() { + if poller.pipe[1] != -1 { + unix.Close(poller.pipe[1]) + } + if poller.pipe[0] != -1 { + unix.Close(poller.pipe[0]) + } + if poller.epfd != -1 { + unix.Close(poller.epfd) + } +} diff --git a/vendor/github.com/fsnotify/fsnotify/kqueue.go b/vendor/github.com/fsnotify/fsnotify/kqueue.go new file mode 100644 index 0000000000..86e76a3d67 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/kqueue.go @@ -0,0 +1,521 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd dragonfly darwin + +package fsnotify + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + "time" + + "golang.org/x/sys/unix" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + + kq int // File descriptor (as returned by the kqueue() syscall). + + mu sync.Mutex // Protects access to watcher data + watches map[string]int // Map of watched file descriptors (key: path). + externalWatches map[string]bool // Map of watches added by user of the library. + dirFlags map[string]uint32 // Map of watched directories to fflags used in kqueue. + paths map[int]pathInfo // Map file descriptors to path names for processing kqueue events. + fileExists map[string]bool // Keep track of if we know this file exists (to stop duplicate create events). + isClosed bool // Set to true when Close() is first called +} + +type pathInfo struct { + name string + isDir bool +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + kq, err := kqueue() + if err != nil { + return nil, err + } + + w := &Watcher{ + kq: kq, + watches: make(map[string]int), + dirFlags: make(map[string]uint32), + paths: make(map[int]pathInfo), + fileExists: make(map[string]bool), + externalWatches: make(map[string]bool), + Events: make(chan Event), + Errors: make(chan error), + done: make(chan struct{}), + } + + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return nil + } + w.isClosed = true + + // copy paths to remove while locked + var pathsToRemove = make([]string, 0, len(w.watches)) + for name := range w.watches { + pathsToRemove = append(pathsToRemove, name) + } + w.mu.Unlock() + // unlock before calling Remove, which also locks + + for _, name := range pathsToRemove { + w.Remove(name) + } + + // send a "quit" message to the reader goroutine + close(w.done) + + return nil +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + w.mu.Lock() + w.externalWatches[name] = true + w.mu.Unlock() + _, err := w.addWatch(name, noteAllEvents) + return err +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + name = filepath.Clean(name) + w.mu.Lock() + watchfd, ok := w.watches[name] + w.mu.Unlock() + if !ok { + return fmt.Errorf("can't remove non-existent kevent watch for: %s", name) + } + + const registerRemove = unix.EV_DELETE + if err := register(w.kq, []int{watchfd}, registerRemove, 0); err != nil { + return err + } + + unix.Close(watchfd) + + w.mu.Lock() + isDir := w.paths[watchfd].isDir + delete(w.watches, name) + delete(w.paths, watchfd) + delete(w.dirFlags, name) + w.mu.Unlock() + + // Find all watched paths that are in this directory that are not external. + if isDir { + var pathsToRemove []string + w.mu.Lock() + for _, path := range w.paths { + wdir, _ := filepath.Split(path.name) + if filepath.Clean(wdir) == name { + if !w.externalWatches[path.name] { + pathsToRemove = append(pathsToRemove, path.name) + } + } + } + w.mu.Unlock() + for _, name := range pathsToRemove { + // Since these are internal, not much sense in propagating error + // to the user, as that will just confuse them with an error about + // a path they did not explicitly watch themselves. + w.Remove(name) + } + } + + return nil +} + +// Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) +const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | unix.NOTE_RENAME + +// keventWaitTime to block on each read from kevent +var keventWaitTime = durationToTimespec(100 * time.Millisecond) + +// addWatch adds name to the watched file set. +// The flags are interpreted as described in kevent(2). +// Returns the real path to the file which was added, if any, which may be different from the one passed in the case of symlinks. +func (w *Watcher) addWatch(name string, flags uint32) (string, error) { + var isDir bool + // Make ./name and name equivalent + name = filepath.Clean(name) + + w.mu.Lock() + if w.isClosed { + w.mu.Unlock() + return "", errors.New("kevent instance already closed") + } + watchfd, alreadyWatching := w.watches[name] + // We already have a watch, but we can still override flags. + if alreadyWatching { + isDir = w.paths[watchfd].isDir + } + w.mu.Unlock() + + if !alreadyWatching { + fi, err := os.Lstat(name) + if err != nil { + return "", err + } + + // Don't watch sockets. + if fi.Mode()&os.ModeSocket == os.ModeSocket { + return "", nil + } + + // Don't watch named pipes. + if fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe { + return "", nil + } + + // Follow Symlinks + // Unfortunately, Linux can add bogus symlinks to watch list without + // issue, and Windows can't do symlinks period (AFAIK). To maintain + // consistency, we will act like everything is fine. There will simply + // be no file events for broken symlinks. + // Hence the returns of nil on errors. + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + name, err = filepath.EvalSymlinks(name) + if err != nil { + return "", nil + } + + w.mu.Lock() + _, alreadyWatching = w.watches[name] + w.mu.Unlock() + + if alreadyWatching { + return name, nil + } + + fi, err = os.Lstat(name) + if err != nil { + return "", nil + } + } + + watchfd, err = unix.Open(name, openMode, 0700) + if watchfd == -1 { + return "", err + } + + isDir = fi.IsDir() + } + + const registerAdd = unix.EV_ADD | unix.EV_CLEAR | unix.EV_ENABLE + if err := register(w.kq, []int{watchfd}, registerAdd, flags); err != nil { + unix.Close(watchfd) + return "", err + } + + if !alreadyWatching { + w.mu.Lock() + w.watches[name] = watchfd + w.paths[watchfd] = pathInfo{name: name, isDir: isDir} + w.mu.Unlock() + } + + if isDir { + // Watch the directory if it has not been watched before, + // or if it was watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + w.mu.Lock() + + watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && + (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) + // Store flags so this watch can be updated later + w.dirFlags[name] = flags + w.mu.Unlock() + + if watchDir { + if err := w.watchDirectoryFiles(name); err != nil { + return "", err + } + } + } + return name, nil +} + +// readEvents reads from kqueue and converts the received kevents into +// Event values that it sends down the Events channel. +func (w *Watcher) readEvents() { + eventBuffer := make([]unix.Kevent_t, 10) + +loop: + for { + // See if there is a message on the "done" channel + select { + case <-w.done: + break loop + default: + } + + // Get new events + kevents, err := read(w.kq, eventBuffer, &keventWaitTime) + // EINTR is okay, the syscall was interrupted before timeout expired. + if err != nil && err != unix.EINTR { + select { + case w.Errors <- err: + case <-w.done: + break loop + } + continue + } + + // Flush the events we received to the Events channel + for len(kevents) > 0 { + kevent := &kevents[0] + watchfd := int(kevent.Ident) + mask := uint32(kevent.Fflags) + w.mu.Lock() + path := w.paths[watchfd] + w.mu.Unlock() + event := newEvent(path.name, mask) + + if path.isDir && !(event.Op&Remove == Remove) { + // Double check to make sure the directory exists. This can happen when + // we do a rm -fr on a recursively watched folders and we receive a + // modification event first but the folder has been deleted and later + // receive the delete event + if _, err := os.Lstat(event.Name); os.IsNotExist(err) { + // mark is as delete event + event.Op |= Remove + } + } + + if event.Op&Rename == Rename || event.Op&Remove == Remove { + w.Remove(event.Name) + w.mu.Lock() + delete(w.fileExists, event.Name) + w.mu.Unlock() + } + + if path.isDir && event.Op&Write == Write && !(event.Op&Remove == Remove) { + w.sendDirectoryChangeEvents(event.Name) + } else { + // Send the event on the Events channel. + select { + case w.Events <- event: + case <-w.done: + break loop + } + } + + if event.Op&Remove == Remove { + // Look for a file that may have overwritten this. + // For example, mv f1 f2 will delete f2, then create f2. + if path.isDir { + fileDir := filepath.Clean(event.Name) + w.mu.Lock() + _, found := w.watches[fileDir] + w.mu.Unlock() + if found { + // make sure the directory exists before we watch for changes. When we + // do a recursive watch and perform rm -fr, the parent directory might + // have gone missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the parent directory. + if _, err := os.Lstat(fileDir); err == nil { + w.sendDirectoryChangeEvents(fileDir) + } + } + } else { + filePath := filepath.Clean(event.Name) + if fileInfo, err := os.Lstat(filePath); err == nil { + w.sendFileCreatedEventIfNew(filePath, fileInfo) + } + } + } + + // Move to next event + kevents = kevents[1:] + } + } + + // cleanup + err := unix.Close(w.kq) + if err != nil { + // only way the previous loop breaks is if w.done was closed so we need to async send to w.Errors. + select { + case w.Errors <- err: + default: + } + } + close(w.Events) + close(w.Errors) +} + +// newEvent returns an platform-independent Event based on kqueue Fflags. +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { + e.Op |= Remove + } + if mask&unix.NOTE_WRITE == unix.NOTE_WRITE { + e.Op |= Write + } + if mask&unix.NOTE_RENAME == unix.NOTE_RENAME { + e.Op |= Rename + } + if mask&unix.NOTE_ATTRIB == unix.NOTE_ATTRIB { + e.Op |= Chmod + } + return e +} + +func newCreateEvent(name string) Event { + return Event{Name: name, Op: Create} +} + +// watchDirectoryFiles to mimic inotify when adding a watch on a directory +func (w *Watcher) watchDirectoryFiles(dirPath string) error { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + return err + } + + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + } + + return nil +} + +// sendDirectoryEvents searches the directory for newly created files +// and sends them over the event channel. This functionality is to have +// the BSD version of fsnotify match Linux inotify which provides a +// create event for files created in a watched directory. +func (w *Watcher) sendDirectoryChangeEvents(dirPath string) { + // Get all files + files, err := ioutil.ReadDir(dirPath) + if err != nil { + select { + case w.Errors <- err: + case <-w.done: + return + } + } + + // Search for new files + for _, fileInfo := range files { + filePath := filepath.Join(dirPath, fileInfo.Name()) + err := w.sendFileCreatedEventIfNew(filePath, fileInfo) + + if err != nil { + return + } + } +} + +// sendFileCreatedEvent sends a create event if the file isn't already being tracked. +func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fileInfo os.FileInfo) (err error) { + w.mu.Lock() + _, doesExist := w.fileExists[filePath] + w.mu.Unlock() + if !doesExist { + // Send create event + select { + case w.Events <- newCreateEvent(filePath): + case <-w.done: + return + } + } + + // like watchDirectoryFiles (but without doing another ReadDir) + filePath, err = w.internalWatch(filePath, fileInfo) + if err != nil { + return err + } + + w.mu.Lock() + w.fileExists[filePath] = true + w.mu.Unlock() + + return nil +} + +func (w *Watcher) internalWatch(name string, fileInfo os.FileInfo) (string, error) { + if fileInfo.IsDir() { + // mimic Linux providing delete events for subdirectories + // but preserve the flags used if currently watching subdirectory + w.mu.Lock() + flags := w.dirFlags[name] + w.mu.Unlock() + + flags |= unix.NOTE_DELETE | unix.NOTE_RENAME + return w.addWatch(name, flags) + } + + // watch file to mimic Linux inotify + return w.addWatch(name, noteAllEvents) +} + +// kqueue creates a new kernel event queue and returns a descriptor. +func kqueue() (kq int, err error) { + kq, err = unix.Kqueue() + if kq == -1 { + return kq, err + } + return kq, nil +} + +// register events with the queue +func register(kq int, fds []int, flags int, fflags uint32) error { + changes := make([]unix.Kevent_t, len(fds)) + + for i, fd := range fds { + // SetKevent converts int to the platform-specific types: + unix.SetKevent(&changes[i], fd, unix.EVFILT_VNODE, flags) + changes[i].Fflags = fflags + } + + // register the events + success, err := unix.Kevent(kq, changes, nil, nil) + if success == -1 { + return err + } + return nil +} + +// read retrieves pending events, or waits until an event occurs. +// A timeout of nil blocks indefinitely, while 0 polls the queue. +func read(kq int, events []unix.Kevent_t, timeout *unix.Timespec) ([]unix.Kevent_t, error) { + n, err := unix.Kevent(kq, nil, events, timeout) + if err != nil { + return nil, err + } + return events[0:n], nil +} + +// durationToTimespec prepares a timeout value +func durationToTimespec(d time.Duration) unix.Timespec { + return unix.NsecToTimespec(d.Nanoseconds()) +} diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go new file mode 100644 index 0000000000..7d8de14513 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go @@ -0,0 +1,11 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd dragonfly + +package fsnotify + +import "golang.org/x/sys/unix" + +const openMode = unix.O_NONBLOCK | unix.O_RDONLY diff --git a/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go new file mode 100644 index 0000000000..9139e17161 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/open_mode_darwin.go @@ -0,0 +1,12 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin + +package fsnotify + +import "golang.org/x/sys/unix" + +// note: this constant is not defined on BSD +const openMode = unix.O_EVTONLY diff --git a/vendor/github.com/fsnotify/fsnotify/windows.go b/vendor/github.com/fsnotify/fsnotify/windows.go new file mode 100644 index 0000000000..09436f31d8 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/windows.go @@ -0,0 +1,561 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package fsnotify + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + "sync" + "syscall" + "unsafe" +) + +// Watcher watches a set of files, delivering events to a channel. +type Watcher struct { + Events chan Event + Errors chan error + isClosed bool // Set to true when Close() is first called + mu sync.Mutex // Map access + port syscall.Handle // Handle to completion port + watches watchMap // Map of watches (key: i-number) + input chan *input // Inputs to the reader are sent on this channel + quit chan chan<- error +} + +// NewWatcher establishes a new watcher with the underlying OS and begins waiting for events. +func NewWatcher() (*Watcher, error) { + port, e := syscall.CreateIoCompletionPort(syscall.InvalidHandle, 0, 0, 0) + if e != nil { + return nil, os.NewSyscallError("CreateIoCompletionPort", e) + } + w := &Watcher{ + port: port, + watches: make(watchMap), + input: make(chan *input, 1), + Events: make(chan Event, 50), + Errors: make(chan error), + quit: make(chan chan<- error, 1), + } + go w.readEvents() + return w, nil +} + +// Close removes all watches and closes the events channel. +func (w *Watcher) Close() error { + if w.isClosed { + return nil + } + w.isClosed = true + + // Send "quit" message to the reader goroutine + ch := make(chan error) + w.quit <- ch + if err := w.wakeupReader(); err != nil { + return err + } + return <-ch +} + +// Add starts watching the named file or directory (non-recursively). +func (w *Watcher) Add(name string) error { + if w.isClosed { + return errors.New("watcher already closed") + } + in := &input{ + op: opAddWatch, + path: filepath.Clean(name), + flags: sysFSALLEVENTS, + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +// Remove stops watching the the named file or directory (non-recursively). +func (w *Watcher) Remove(name string) error { + in := &input{ + op: opRemoveWatch, + path: filepath.Clean(name), + reply: make(chan error), + } + w.input <- in + if err := w.wakeupReader(); err != nil { + return err + } + return <-in.reply +} + +const ( + // Options for AddWatch + sysFSONESHOT = 0x80000000 + sysFSONLYDIR = 0x1000000 + + // Events + sysFSACCESS = 0x1 + sysFSALLEVENTS = 0xfff + sysFSATTRIB = 0x4 + sysFSCLOSE = 0x18 + sysFSCREATE = 0x100 + sysFSDELETE = 0x200 + sysFSDELETESELF = 0x400 + sysFSMODIFY = 0x2 + sysFSMOVE = 0xc0 + sysFSMOVEDFROM = 0x40 + sysFSMOVEDTO = 0x80 + sysFSMOVESELF = 0x800 + + // Special events + sysFSIGNORED = 0x8000 + sysFSQOVERFLOW = 0x4000 +) + +func newEvent(name string, mask uint32) Event { + e := Event{Name: name} + if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { + e.Op |= Create + } + if mask&sysFSDELETE == sysFSDELETE || mask&sysFSDELETESELF == sysFSDELETESELF { + e.Op |= Remove + } + if mask&sysFSMODIFY == sysFSMODIFY { + e.Op |= Write + } + if mask&sysFSMOVE == sysFSMOVE || mask&sysFSMOVESELF == sysFSMOVESELF || mask&sysFSMOVEDFROM == sysFSMOVEDFROM { + e.Op |= Rename + } + if mask&sysFSATTRIB == sysFSATTRIB { + e.Op |= Chmod + } + return e +} + +const ( + opAddWatch = iota + opRemoveWatch +) + +const ( + provisional uint64 = 1 << (32 + iota) +) + +type input struct { + op int + path string + flags uint32 + reply chan error +} + +type inode struct { + handle syscall.Handle + volume uint32 + index uint64 +} + +type watch struct { + ov syscall.Overlapped + ino *inode // i-number + path string // Directory path + mask uint64 // Directory itself is being watched with these notify flags + names map[string]uint64 // Map of names being watched and their notify flags + rename string // Remembers the old name while renaming a file + buf [4096]byte +} + +type indexMap map[uint64]*watch +type watchMap map[uint32]indexMap + +func (w *Watcher) wakeupReader() error { + e := syscall.PostQueuedCompletionStatus(w.port, 0, 0, nil) + if e != nil { + return os.NewSyscallError("PostQueuedCompletionStatus", e) + } + return nil +} + +func getDir(pathname string) (dir string, err error) { + attr, e := syscall.GetFileAttributes(syscall.StringToUTF16Ptr(pathname)) + if e != nil { + return "", os.NewSyscallError("GetFileAttributes", e) + } + if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { + dir = pathname + } else { + dir, _ = filepath.Split(pathname) + dir = filepath.Clean(dir) + } + return +} + +func getIno(path string) (ino *inode, err error) { + h, e := syscall.CreateFile(syscall.StringToUTF16Ptr(path), + syscall.FILE_LIST_DIRECTORY, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE, + nil, syscall.OPEN_EXISTING, + syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OVERLAPPED, 0) + if e != nil { + return nil, os.NewSyscallError("CreateFile", e) + } + var fi syscall.ByHandleFileInformation + if e = syscall.GetFileInformationByHandle(h, &fi); e != nil { + syscall.CloseHandle(h) + return nil, os.NewSyscallError("GetFileInformationByHandle", e) + } + ino = &inode{ + handle: h, + volume: fi.VolumeSerialNumber, + index: uint64(fi.FileIndexHigh)<<32 | uint64(fi.FileIndexLow), + } + return ino, nil +} + +// Must run within the I/O thread. +func (m watchMap) get(ino *inode) *watch { + if i := m[ino.volume]; i != nil { + return i[ino.index] + } + return nil +} + +// Must run within the I/O thread. +func (m watchMap) set(ino *inode, watch *watch) { + i := m[ino.volume] + if i == nil { + i = make(indexMap) + m[ino.volume] = i + } + i[ino.index] = watch +} + +// Must run within the I/O thread. +func (w *Watcher) addWatch(pathname string, flags uint64) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + if flags&sysFSONLYDIR != 0 && pathname != dir { + return nil + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watchEntry := w.watches.get(ino) + w.mu.Unlock() + if watchEntry == nil { + if _, e := syscall.CreateIoCompletionPort(ino.handle, w.port, 0, 0); e != nil { + syscall.CloseHandle(ino.handle) + return os.NewSyscallError("CreateIoCompletionPort", e) + } + watchEntry = &watch{ + ino: ino, + path: dir, + names: make(map[string]uint64), + } + w.mu.Lock() + w.watches.set(ino, watchEntry) + w.mu.Unlock() + flags |= provisional + } else { + syscall.CloseHandle(ino.handle) + } + if pathname == dir { + watchEntry.mask |= flags + } else { + watchEntry.names[filepath.Base(pathname)] |= flags + } + if err = w.startRead(watchEntry); err != nil { + return err + } + if pathname == dir { + watchEntry.mask &= ^provisional + } else { + watchEntry.names[filepath.Base(pathname)] &= ^provisional + } + return nil +} + +// Must run within the I/O thread. +func (w *Watcher) remWatch(pathname string) error { + dir, err := getDir(pathname) + if err != nil { + return err + } + ino, err := getIno(dir) + if err != nil { + return err + } + w.mu.Lock() + watch := w.watches.get(ino) + w.mu.Unlock() + if watch == nil { + return fmt.Errorf("can't remove non-existent watch for: %s", pathname) + } + if pathname == dir { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + watch.mask = 0 + } else { + name := filepath.Base(pathname) + w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + return w.startRead(watch) +} + +// Must run within the I/O thread. +func (w *Watcher) deleteWatch(watch *watch) { + for name, mask := range watch.names { + if mask&provisional == 0 { + w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + } + delete(watch.names, name) + } + if watch.mask != 0 { + if watch.mask&provisional == 0 { + w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + } + watch.mask = 0 + } +} + +// Must run within the I/O thread. +func (w *Watcher) startRead(watch *watch) error { + if e := syscall.CancelIo(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CancelIo", e) + w.deleteWatch(watch) + } + mask := toWindowsFlags(watch.mask) + for _, m := range watch.names { + mask |= toWindowsFlags(m) + } + if mask == 0 { + if e := syscall.CloseHandle(watch.ino.handle); e != nil { + w.Errors <- os.NewSyscallError("CloseHandle", e) + } + w.mu.Lock() + delete(w.watches[watch.ino.volume], watch.ino.index) + w.mu.Unlock() + return nil + } + e := syscall.ReadDirectoryChanges(watch.ino.handle, &watch.buf[0], + uint32(unsafe.Sizeof(watch.buf)), false, mask, nil, &watch.ov, 0) + if e != nil { + err := os.NewSyscallError("ReadDirectoryChanges", e) + if e == syscall.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { + // Watched directory was probably removed + if w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) { + if watch.mask&sysFSONESHOT != 0 { + watch.mask = 0 + } + } + err = nil + } + w.deleteWatch(watch) + w.startRead(watch) + return err + } + return nil +} + +// readEvents reads from the I/O completion port, converts the +// received events into Event objects and sends them via the Events channel. +// Entry point to the I/O thread. +func (w *Watcher) readEvents() { + var ( + n, key uint32 + ov *syscall.Overlapped + ) + runtime.LockOSThread() + + for { + e := syscall.GetQueuedCompletionStatus(w.port, &n, &key, &ov, syscall.INFINITE) + watch := (*watch)(unsafe.Pointer(ov)) + + if watch == nil { + select { + case ch := <-w.quit: + w.mu.Lock() + var indexes []indexMap + for _, index := range w.watches { + indexes = append(indexes, index) + } + w.mu.Unlock() + for _, index := range indexes { + for _, watch := range index { + w.deleteWatch(watch) + w.startRead(watch) + } + } + var err error + if e := syscall.CloseHandle(w.port); e != nil { + err = os.NewSyscallError("CloseHandle", e) + } + close(w.Events) + close(w.Errors) + ch <- err + return + case in := <-w.input: + switch in.op { + case opAddWatch: + in.reply <- w.addWatch(in.path, uint64(in.flags)) + case opRemoveWatch: + in.reply <- w.remWatch(in.path) + } + default: + } + continue + } + + switch e { + case syscall.ERROR_MORE_DATA: + if watch == nil { + w.Errors <- errors.New("ERROR_MORE_DATA has unexpectedly null lpOverlapped buffer") + } else { + // The i/o succeeded but the buffer is full. + // In theory we should be building up a full packet. + // In practice we can get away with just carrying on. + n = uint32(unsafe.Sizeof(watch.buf)) + } + case syscall.ERROR_ACCESS_DENIED: + // Watched directory was probably removed + w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.deleteWatch(watch) + w.startRead(watch) + continue + case syscall.ERROR_OPERATION_ABORTED: + // CancelIo was called on this handle + continue + default: + w.Errors <- os.NewSyscallError("GetQueuedCompletionPort", e) + continue + case nil: + } + + var offset uint32 + for { + if n == 0 { + w.Events <- newEvent("", sysFSQOVERFLOW) + w.Errors <- errors.New("short read in readEvents()") + break + } + + // Point "raw" to the event in the buffer + raw := (*syscall.FileNotifyInformation)(unsafe.Pointer(&watch.buf[offset])) + buf := (*[syscall.MAX_PATH]uint16)(unsafe.Pointer(&raw.FileName)) + name := syscall.UTF16ToString(buf[:raw.FileNameLength/2]) + fullname := filepath.Join(watch.path, name) + + var mask uint64 + switch raw.Action { + case syscall.FILE_ACTION_REMOVED: + mask = sysFSDELETESELF + case syscall.FILE_ACTION_MODIFIED: + mask = sysFSMODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + watch.rename = name + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + if watch.names[watch.rename] != 0 { + watch.names[name] |= watch.names[watch.rename] + delete(watch.names, watch.rename) + mask = sysFSMOVESELF + } + } + + sendNameEvent := func() { + if w.sendEvent(fullname, watch.names[name]&mask) { + if watch.names[name]&sysFSONESHOT != 0 { + delete(watch.names, name) + } + } + } + if raw.Action != syscall.FILE_ACTION_RENAMED_NEW_NAME { + sendNameEvent() + } + if raw.Action == syscall.FILE_ACTION_REMOVED { + w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + delete(watch.names, name) + } + if w.sendEvent(fullname, watch.mask&toFSnotifyFlags(raw.Action)) { + if watch.mask&sysFSONESHOT != 0 { + watch.mask = 0 + } + } + if raw.Action == syscall.FILE_ACTION_RENAMED_NEW_NAME { + fullname = filepath.Join(watch.path, watch.rename) + sendNameEvent() + } + + // Move to the next event in the buffer + if raw.NextEntryOffset == 0 { + break + } + offset += raw.NextEntryOffset + + // Error! + if offset >= n { + w.Errors <- errors.New("Windows system assumed buffer larger than it is, events have likely been missed.") + break + } + } + + if err := w.startRead(watch); err != nil { + w.Errors <- err + } + } +} + +func (w *Watcher) sendEvent(name string, mask uint64) bool { + if mask == 0 { + return false + } + event := newEvent(name, uint32(mask)) + select { + case ch := <-w.quit: + w.quit <- ch + case w.Events <- event: + } + return true +} + +func toWindowsFlags(mask uint64) uint32 { + var m uint32 + if mask&sysFSACCESS != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_ACCESS + } + if mask&sysFSMODIFY != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_LAST_WRITE + } + if mask&sysFSATTRIB != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_ATTRIBUTES + } + if mask&(sysFSMOVE|sysFSCREATE|sysFSDELETE) != 0 { + m |= syscall.FILE_NOTIFY_CHANGE_FILE_NAME | syscall.FILE_NOTIFY_CHANGE_DIR_NAME + } + return m +} + +func toFSnotifyFlags(action uint32) uint64 { + switch action { + case syscall.FILE_ACTION_ADDED: + return sysFSCREATE + case syscall.FILE_ACTION_REMOVED: + return sysFSDELETE + case syscall.FILE_ACTION_MODIFIED: + return sysFSMODIFY + case syscall.FILE_ACTION_RENAMED_OLD_NAME: + return sysFSMOVEDFROM + case syscall.FILE_ACTION_RENAMED_NEW_NAME: + return sysFSMOVEDTO + } + return 0 +} diff --git a/vendor/github.com/ghodss/yaml/LICENSE b/vendor/github.com/ghodss/yaml/LICENSE deleted file mode 100644 index 7805d36de7..0000000000 --- a/vendor/github.com/ghodss/yaml/LICENSE +++ /dev/null @@ -1,50 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Sam Ghods - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ghodss/yaml/fields.go b/vendor/github.com/ghodss/yaml/fields.go deleted file mode 100644 index 5860074026..0000000000 --- a/vendor/github.com/ghodss/yaml/fields.go +++ /dev/null @@ -1,501 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -package yaml - -import ( - "bytes" - "encoding" - "encoding/json" - "reflect" - "sort" - "strings" - "sync" - "unicode" - "unicode/utf8" -) - -// indirect walks down v allocating pointers as needed, -// until it gets to a non-pointer. -// if it encounters an Unmarshaler, indirect stops and returns that. -// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. -func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { - // If v is a named type and is addressable, - // start with its address, so that if the type has pointer methods, - // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { - v = v.Addr() - } - for { - // Load value from interface, but only if the result will be - // usefully addressable. - if v.Kind() == reflect.Interface && !v.IsNil() { - e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { - v = e - continue - } - } - - if v.Kind() != reflect.Ptr { - break - } - - if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { - break - } - if v.IsNil() { - if v.CanSet() { - v.Set(reflect.New(v.Type().Elem())) - } else { - v = reflect.New(v.Type().Elem()) - } - } - if v.Type().NumMethod() > 0 { - if u, ok := v.Interface().(json.Unmarshaler); ok { - return u, nil, reflect.Value{} - } - if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { - return nil, u, reflect.Value{} - } - } - v = v.Elem() - } - return nil, nil, v -} - -// A field represents a single field found in a struct. -type field struct { - name string - nameBytes []byte // []byte(name) - equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent - - tag bool - index []int - typ reflect.Type - omitEmpty bool - quoted bool -} - -func fillField(f field) field { - f.nameBytes = []byte(f.name) - f.equalFold = foldFunc(f.nameBytes) - return f -} - -// byName sorts field by name, breaking ties with depth, -// then breaking ties with "name came from json tag", then -// breaking ties with index sequence. -type byName []field - -func (x byName) Len() int { return len(x) } - -func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byName) Less(i, j int) bool { - if x[i].name != x[j].name { - return x[i].name < x[j].name - } - if len(x[i].index) != len(x[j].index) { - return len(x[i].index) < len(x[j].index) - } - if x[i].tag != x[j].tag { - return x[i].tag - } - return byIndex(x).Less(i, j) -} - -// byIndex sorts field by index sequence. -type byIndex []field - -func (x byIndex) Len() int { return len(x) } - -func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byIndex) Less(i, j int) bool { - for k, xik := range x[i].index { - if k >= len(x[j].index) { - return false - } - if xik != x[j].index[k] { - return xik < x[j].index[k] - } - } - return len(x[i].index) < len(x[j].index) -} - -// typeFields returns a list of fields that JSON should recognize for the given type. -// The algorithm is breadth-first search over the set of structs to include - the top struct -// and then any reachable anonymous structs. -func typeFields(t reflect.Type) []field { - // Anonymous fields to explore at the current level and the next. - current := []field{} - next := []field{{typ: t}} - - // Count of queued names for current level and the next. - count := map[reflect.Type]int{} - nextCount := map[reflect.Type]int{} - - // Types already visited at an earlier level. - visited := map[reflect.Type]bool{} - - // Fields found. - var fields []field - - for len(next) > 0 { - current, next = next, current[:0] - count, nextCount = nextCount, map[reflect.Type]int{} - - for _, f := range current { - if visited[f.typ] { - continue - } - visited[f.typ] = true - - // Scan f.typ for fields to include. - for i := 0; i < f.typ.NumField(); i++ { - sf := f.typ.Field(i) - if sf.PkgPath != "" { // unexported - continue - } - tag := sf.Tag.Get("json") - if tag == "-" { - continue - } - name, opts := parseTag(tag) - if !isValidTag(name) { - name = "" - } - index := make([]int, len(f.index)+1) - copy(index, f.index) - index[len(f.index)] = i - - ft := sf.Type - if ft.Name() == "" && ft.Kind() == reflect.Ptr { - // Follow pointer. - ft = ft.Elem() - } - - // Record found field and index sequence. - if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { - tagged := name != "" - if name == "" { - name = sf.Name - } - fields = append(fields, fillField(field{ - name: name, - tag: tagged, - index: index, - typ: ft, - omitEmpty: opts.Contains("omitempty"), - quoted: opts.Contains("string"), - })) - if count[f.typ] > 1 { - // If there were multiple instances, add a second, - // so that the annihilation code will see a duplicate. - // It only cares about the distinction between 1 or 2, - // so don't bother generating any more copies. - fields = append(fields, fields[len(fields)-1]) - } - continue - } - - // Record new anonymous struct to explore in next round. - nextCount[ft]++ - if nextCount[ft] == 1 { - next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) - } - } - } - } - - sort.Sort(byName(fields)) - - // Delete all fields that are hidden by the Go rules for embedded fields, - // except that fields with JSON tags are promoted. - - // The fields are sorted in primary order of name, secondary order - // of field index length. Loop over names; for each name, delete - // hidden fields by choosing the one dominant field that survives. - out := fields[:0] - for advance, i := 0, 0; i < len(fields); i += advance { - // One iteration per name. - // Find the sequence of fields with the name of this first field. - fi := fields[i] - name := fi.name - for advance = 1; i+advance < len(fields); advance++ { - fj := fields[i+advance] - if fj.name != name { - break - } - } - if advance == 1 { // Only one field with this name - out = append(out, fi) - continue - } - dominant, ok := dominantField(fields[i : i+advance]) - if ok { - out = append(out, dominant) - } - } - - fields = out - sort.Sort(byIndex(fields)) - - return fields -} - -// dominantField looks through the fields, all of which are known to -// have the same name, to find the single field that dominates the -// others using Go's embedding rules, modified by the presence of -// JSON tags. If there are multiple top-level fields, the boolean -// will be false: This condition is an error in Go and we skip all -// the fields. -func dominantField(fields []field) (field, bool) { - // The fields are sorted in increasing index-length order. The winner - // must therefore be one with the shortest index length. Drop all - // longer entries, which is easy: just truncate the slice. - length := len(fields[0].index) - tagged := -1 // Index of first tagged field. - for i, f := range fields { - if len(f.index) > length { - fields = fields[:i] - break - } - if f.tag { - if tagged >= 0 { - // Multiple tagged fields at the same level: conflict. - // Return no field. - return field{}, false - } - tagged = i - } - } - if tagged >= 0 { - return fields[tagged], true - } - // All remaining fields have the same length. If there's more than one, - // we have a conflict (two fields named "X" at the same level) and we - // return no field. - if len(fields) > 1 { - return field{}, false - } - return fields[0], true -} - -var fieldCache struct { - sync.RWMutex - m map[reflect.Type][]field -} - -// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. -func cachedTypeFields(t reflect.Type) []field { - fieldCache.RLock() - f := fieldCache.m[t] - fieldCache.RUnlock() - if f != nil { - return f - } - - // Compute fields without lock. - // Might duplicate effort but won't hold other computations back. - f = typeFields(t) - if f == nil { - f = []field{} - } - - fieldCache.Lock() - if fieldCache.m == nil { - fieldCache.m = map[reflect.Type][]field{} - } - fieldCache.m[t] = f - fieldCache.Unlock() - return f -} - -func isValidTag(s string) bool { - if s == "" { - return false - } - for _, c := range s { - switch { - case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): - // Backslash and quote chars are reserved, but - // otherwise any punctuation chars are allowed - // in a tag name. - default: - if !unicode.IsLetter(c) && !unicode.IsDigit(c) { - return false - } - } - } - return true -} - -const ( - caseMask = ^byte(0x20) // Mask to ignore case in ASCII. - kelvin = '\u212a' - smallLongEss = '\u017f' -) - -// foldFunc returns one of four different case folding equivalence -// functions, from most general (and slow) to fastest: -// -// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 -// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') -// 3) asciiEqualFold, no special, but includes non-letters (including _) -// 4) simpleLetterEqualFold, no specials, no non-letters. -// -// The letters S and K are special because they map to 3 runes, not just 2: -// * S maps to s and to U+017F 'ſ' Latin small letter long s -// * k maps to K and to U+212A 'K' Kelvin sign -// See http://play.golang.org/p/tTxjOc0OGo -// -// The returned function is specialized for matching against s and -// should only be given s. It's not curried for performance reasons. -func foldFunc(s []byte) func(s, t []byte) bool { - nonLetter := false - special := false // special letter - for _, b := range s { - if b >= utf8.RuneSelf { - return bytes.EqualFold - } - upper := b & caseMask - if upper < 'A' || upper > 'Z' { - nonLetter = true - } else if upper == 'K' || upper == 'S' { - // See above for why these letters are special. - special = true - } - } - if special { - return equalFoldRight - } - if nonLetter { - return asciiEqualFold - } - return simpleLetterEqualFold -} - -// equalFoldRight is a specialization of bytes.EqualFold when s is -// known to be all ASCII (including punctuation), but contains an 's', -// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. -// See comments on foldFunc. -func equalFoldRight(s, t []byte) bool { - for _, sb := range s { - if len(t) == 0 { - return false - } - tb := t[0] - if tb < utf8.RuneSelf { - if sb != tb { - sbUpper := sb & caseMask - if 'A' <= sbUpper && sbUpper <= 'Z' { - if sbUpper != tb&caseMask { - return false - } - } else { - return false - } - } - t = t[1:] - continue - } - // sb is ASCII and t is not. t must be either kelvin - // sign or long s; sb must be s, S, k, or K. - tr, size := utf8.DecodeRune(t) - switch sb { - case 's', 'S': - if tr != smallLongEss { - return false - } - case 'k', 'K': - if tr != kelvin { - return false - } - default: - return false - } - t = t[size:] - - } - if len(t) > 0 { - return false - } - return true -} - -// asciiEqualFold is a specialization of bytes.EqualFold for use when -// s is all ASCII (but may contain non-letters) and contains no -// special-folding letters. -// See comments on foldFunc. -func asciiEqualFold(s, t []byte) bool { - if len(s) != len(t) { - return false - } - for i, sb := range s { - tb := t[i] - if sb == tb { - continue - } - if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { - if sb&caseMask != tb&caseMask { - return false - } - } else { - return false - } - } - return true -} - -// simpleLetterEqualFold is a specialization of bytes.EqualFold for -// use when s is all ASCII letters (no underscores, etc) and also -// doesn't contain 'k', 'K', 's', or 'S'. -// See comments on foldFunc. -func simpleLetterEqualFold(s, t []byte) bool { - if len(s) != len(t) { - return false - } - for i, b := range s { - if b&caseMask != t[i]&caseMask { - return false - } - } - return true -} - -// tagOptions is the string following a comma in a struct field's "json" -// tag, or the empty string. It does not include the leading comma. -type tagOptions string - -// parseTag splits a struct field's json tag into its name and -// comma-separated options. -func parseTag(tag string) (string, tagOptions) { - if idx := strings.Index(tag, ","); idx != -1 { - return tag[:idx], tagOptions(tag[idx+1:]) - } - return tag, tagOptions("") -} - -// Contains reports whether a comma-separated list of options -// contains a particular substr flag. substr must be surrounded by a -// string boundary or commas. -func (o tagOptions) Contains(optionName string) bool { - if len(o) == 0 { - return false - } - s := string(o) - for s != "" { - var next string - i := strings.Index(s, ",") - if i >= 0 { - s, next = s[:i], s[i+1:] - } - if s == optionName { - return true - } - s = next - } - return false -} diff --git a/vendor/github.com/ghodss/yaml/yaml.go b/vendor/github.com/ghodss/yaml/yaml.go deleted file mode 100644 index 4fb4054a8b..0000000000 --- a/vendor/github.com/ghodss/yaml/yaml.go +++ /dev/null @@ -1,277 +0,0 @@ -package yaml - -import ( - "bytes" - "encoding/json" - "fmt" - "reflect" - "strconv" - - "gopkg.in/yaml.v2" -) - -// Marshals the object into JSON then converts JSON to YAML and returns the -// YAML. -func Marshal(o interface{}) ([]byte, error) { - j, err := json.Marshal(o) - if err != nil { - return nil, fmt.Errorf("error marshaling into JSON: %v", err) - } - - y, err := JSONToYAML(j) - if err != nil { - return nil, fmt.Errorf("error converting JSON to YAML: %v", err) - } - - return y, nil -} - -// Converts YAML to JSON then uses JSON to unmarshal into an object. -func Unmarshal(y []byte, o interface{}) error { - vo := reflect.ValueOf(o) - j, err := yamlToJSON(y, &vo) - if err != nil { - return fmt.Errorf("error converting YAML to JSON: %v", err) - } - - err = json.Unmarshal(j, o) - if err != nil { - return fmt.Errorf("error unmarshaling JSON: %v", err) - } - - return nil -} - -// Convert JSON to YAML. -func JSONToYAML(j []byte) ([]byte, error) { - // Convert the JSON to an object. - var jsonObj interface{} - // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the - // Go JSON library doesn't try to pick the right number type (int, float, - // etc.) when unmarshalling to interface{}, it just picks float64 - // universally. go-yaml does go through the effort of picking the right - // number type, so we can preserve number type throughout this process. - err := yaml.Unmarshal(j, &jsonObj) - if err != nil { - return nil, err - } - - // Marshal this object into YAML. - return yaml.Marshal(jsonObj) -} - -// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through -// this method should be a no-op. -// -// Things YAML can do that are not supported by JSON: -// * In YAML you can have binary and null keys in your maps. These are invalid -// in JSON. (int and float keys are converted to strings.) -// * Binary data in YAML with the !!binary tag is not supported. If you want to -// use binary data with this library, encode the data as base64 as usual but do -// not use the !!binary tag in your YAML. This will ensure the original base64 -// encoded data makes it all the way through to the JSON. -func YAMLToJSON(y []byte) ([]byte, error) { - return yamlToJSON(y, nil) -} - -func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) { - // Convert the YAML to an object. - var yamlObj interface{} - err := yaml.Unmarshal(y, &yamlObj) - if err != nil { - return nil, err - } - - // YAML objects are not completely compatible with JSON objects (e.g. you - // can have non-string keys in YAML). So, convert the YAML-compatible object - // to a JSON-compatible object, failing with an error if irrecoverable - // incompatibilties happen along the way. - jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget) - if err != nil { - return nil, err - } - - // Convert this object to JSON and return the data. - return json.Marshal(jsonObj) -} - -func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) { - var err error - - // Resolve jsonTarget to a concrete value (i.e. not a pointer or an - // interface). We pass decodingNull as false because we're not actually - // decoding into the value, we're just checking if the ultimate target is a - // string. - if jsonTarget != nil { - ju, tu, pv := indirect(*jsonTarget, false) - // We have a JSON or Text Umarshaler at this level, so we can't be trying - // to decode into a string. - if ju != nil || tu != nil { - jsonTarget = nil - } else { - jsonTarget = &pv - } - } - - // If yamlObj is a number or a boolean, check if jsonTarget is a string - - // if so, coerce. Else return normal. - // If yamlObj is a map or array, find the field that each key is - // unmarshaling to, and when you recurse pass the reflect.Value for that - // field back into this function. - switch typedYAMLObj := yamlObj.(type) { - case map[interface{}]interface{}: - // JSON does not support arbitrary keys in a map, so we must convert - // these keys to strings. - // - // From my reading of go-yaml v2 (specifically the resolve function), - // keys can only have the types string, int, int64, float64, binary - // (unsupported), or null (unsupported). - strMap := make(map[string]interface{}) - for k, v := range typedYAMLObj { - // Resolve the key to a string first. - var keyString string - switch typedKey := k.(type) { - case string: - keyString = typedKey - case int: - keyString = strconv.Itoa(typedKey) - case int64: - // go-yaml will only return an int64 as a key if the system - // architecture is 32-bit and the key's value is between 32-bit - // and 64-bit. Otherwise the key type will simply be int. - keyString = strconv.FormatInt(typedKey, 10) - case float64: - // Stolen from go-yaml to use the same conversion to string as - // the go-yaml library uses to convert float to string when - // Marshaling. - s := strconv.FormatFloat(typedKey, 'g', -1, 32) - switch s { - case "+Inf": - s = ".inf" - case "-Inf": - s = "-.inf" - case "NaN": - s = ".nan" - } - keyString = s - case bool: - if typedKey { - keyString = "true" - } else { - keyString = "false" - } - default: - return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v", - reflect.TypeOf(k), k, v) - } - - // jsonTarget should be a struct or a map. If it's a struct, find - // the field it's going to map to and pass its reflect.Value. If - // it's a map, find the element type of the map and pass the - // reflect.Value created from that type. If it's neither, just pass - // nil - JSON conversion will error for us if it's a real issue. - if jsonTarget != nil { - t := *jsonTarget - if t.Kind() == reflect.Struct { - keyBytes := []byte(keyString) - // Find the field that the JSON library would use. - var f *field - fields := cachedTypeFields(t.Type()) - for i := range fields { - ff := &fields[i] - if bytes.Equal(ff.nameBytes, keyBytes) { - f = ff - break - } - // Do case-insensitive comparison. - if f == nil && ff.equalFold(ff.nameBytes, keyBytes) { - f = ff - } - } - if f != nil { - // Find the reflect.Value of the most preferential - // struct field. - jtf := t.Field(f.index[0]) - strMap[keyString], err = convertToJSONableObject(v, &jtf) - if err != nil { - return nil, err - } - continue - } - } else if t.Kind() == reflect.Map { - // Create a zero value of the map's element type to use as - // the JSON target. - jtv := reflect.Zero(t.Type().Elem()) - strMap[keyString], err = convertToJSONableObject(v, &jtv) - if err != nil { - return nil, err - } - continue - } - } - strMap[keyString], err = convertToJSONableObject(v, nil) - if err != nil { - return nil, err - } - } - return strMap, nil - case []interface{}: - // We need to recurse into arrays in case there are any - // map[interface{}]interface{}'s inside and to convert any - // numbers to strings. - - // If jsonTarget is a slice (which it really should be), find the - // thing it's going to map to. If it's not a slice, just pass nil - // - JSON conversion will error for us if it's a real issue. - var jsonSliceElemValue *reflect.Value - if jsonTarget != nil { - t := *jsonTarget - if t.Kind() == reflect.Slice { - // By default slices point to nil, but we need a reflect.Value - // pointing to a value of the slice type, so we create one here. - ev := reflect.Indirect(reflect.New(t.Type().Elem())) - jsonSliceElemValue = &ev - } - } - - // Make and use a new array. - arr := make([]interface{}, len(typedYAMLObj)) - for i, v := range typedYAMLObj { - arr[i], err = convertToJSONableObject(v, jsonSliceElemValue) - if err != nil { - return nil, err - } - } - return arr, nil - default: - // If the target type is a string and the YAML type is a number, - // convert the YAML type to a string. - if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String { - // Based on my reading of go-yaml, it may return int, int64, - // float64, or uint64. - var s string - switch typedVal := typedYAMLObj.(type) { - case int: - s = strconv.FormatInt(int64(typedVal), 10) - case int64: - s = strconv.FormatInt(typedVal, 10) - case float64: - s = strconv.FormatFloat(typedVal, 'g', -1, 32) - case uint64: - s = strconv.FormatUint(typedVal, 10) - case bool: - if typedVal { - s = "true" - } else { - s = "false" - } - } - if len(s) > 0 { - yamlObj = interface{}(s) - } - } - return yamlObj, nil - } - - return nil, nil -} diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/LICENSE b/vendor/github.com/godbus/dbus/LICENSE similarity index 100% rename from vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/LICENSE rename to vendor/github.com/godbus/dbus/LICENSE diff --git a/vendor/github.com/godbus/dbus/auth.go b/vendor/github.com/godbus/dbus/auth.go new file mode 100644 index 0000000000..98017b693e --- /dev/null +++ b/vendor/github.com/godbus/dbus/auth.go @@ -0,0 +1,253 @@ +package dbus + +import ( + "bufio" + "bytes" + "errors" + "io" + "os" + "strconv" +) + +// AuthStatus represents the Status of an authentication mechanism. +type AuthStatus byte + +const ( + // AuthOk signals that authentication is finished; the next command + // from the server should be an OK. + AuthOk AuthStatus = iota + + // AuthContinue signals that additional data is needed; the next command + // from the server should be a DATA. + AuthContinue + + // AuthError signals an error; the server sent invalid data or some + // other unexpected thing happened and the current authentication + // process should be aborted. + AuthError +) + +type authState byte + +const ( + waitingForData authState = iota + waitingForOk + waitingForReject +) + +// Auth defines the behaviour of an authentication mechanism. +type Auth interface { + // Return the name of the mechnism, the argument to the first AUTH command + // and the next status. + FirstData() (name, resp []byte, status AuthStatus) + + // Process the given DATA command, and return the argument to the DATA + // command and the next status. If len(resp) == 0, no DATA command is sent. + HandleData(data []byte) (resp []byte, status AuthStatus) +} + +// Auth authenticates the connection, trying the given list of authentication +// mechanisms (in that order). If nil is passed, the EXTERNAL and +// DBUS_COOKIE_SHA1 mechanisms are tried for the current user. For private +// connections, this method must be called before sending any messages to the +// bus. Auth must not be called on shared connections. +func (conn *Conn) Auth(methods []Auth) error { + if methods == nil { + uid := strconv.Itoa(os.Getuid()) + methods = []Auth{AuthExternal(uid), AuthCookieSha1(uid, getHomeDir())} + } + in := bufio.NewReader(conn.transport) + err := conn.transport.SendNullByte() + if err != nil { + return err + } + err = authWriteLine(conn.transport, []byte("AUTH")) + if err != nil { + return err + } + s, err := authReadLine(in) + if err != nil { + return err + } + if len(s) < 2 || !bytes.Equal(s[0], []byte("REJECTED")) { + return errors.New("dbus: authentication protocol error") + } + s = s[1:] + for _, v := range s { + for _, m := range methods { + if name, data, status := m.FirstData(); bytes.Equal(v, name) { + var ok bool + err = authWriteLine(conn.transport, []byte("AUTH"), []byte(v), data) + if err != nil { + return err + } + switch status { + case AuthOk: + err, ok = conn.tryAuth(m, waitingForOk, in) + case AuthContinue: + err, ok = conn.tryAuth(m, waitingForData, in) + default: + panic("dbus: invalid authentication status") + } + if err != nil { + return err + } + if ok { + if conn.transport.SupportsUnixFDs() { + err = authWriteLine(conn, []byte("NEGOTIATE_UNIX_FD")) + if err != nil { + return err + } + line, err := authReadLine(in) + if err != nil { + return err + } + switch { + case bytes.Equal(line[0], []byte("AGREE_UNIX_FD")): + conn.EnableUnixFDs() + conn.unixFD = true + case bytes.Equal(line[0], []byte("ERROR")): + default: + return errors.New("dbus: authentication protocol error") + } + } + err = authWriteLine(conn.transport, []byte("BEGIN")) + if err != nil { + return err + } + go conn.inWorker() + go conn.outWorker() + return nil + } + } + } + } + return errors.New("dbus: authentication failed") +} + +// tryAuth tries to authenticate with m as the mechanism, using state as the +// initial authState and in for reading input. It returns (nil, true) on +// success, (nil, false) on a REJECTED and (someErr, false) if some other +// error occured. +func (conn *Conn) tryAuth(m Auth, state authState, in *bufio.Reader) (error, bool) { + for { + s, err := authReadLine(in) + if err != nil { + return err, false + } + switch { + case state == waitingForData && string(s[0]) == "DATA": + if len(s) != 2 { + err = authWriteLine(conn.transport, []byte("ERROR")) + if err != nil { + return err, false + } + continue + } + data, status := m.HandleData(s[1]) + switch status { + case AuthOk, AuthContinue: + if len(data) != 0 { + err = authWriteLine(conn.transport, []byte("DATA"), data) + if err != nil { + return err, false + } + } + if status == AuthOk { + state = waitingForOk + } + case AuthError: + err = authWriteLine(conn.transport, []byte("ERROR")) + if err != nil { + return err, false + } + } + case state == waitingForData && string(s[0]) == "REJECTED": + return nil, false + case state == waitingForData && string(s[0]) == "ERROR": + err = authWriteLine(conn.transport, []byte("CANCEL")) + if err != nil { + return err, false + } + state = waitingForReject + case state == waitingForData && string(s[0]) == "OK": + if len(s) != 2 { + err = authWriteLine(conn.transport, []byte("CANCEL")) + if err != nil { + return err, false + } + state = waitingForReject + } + conn.uuid = string(s[1]) + return nil, true + case state == waitingForData: + err = authWriteLine(conn.transport, []byte("ERROR")) + if err != nil { + return err, false + } + case state == waitingForOk && string(s[0]) == "OK": + if len(s) != 2 { + err = authWriteLine(conn.transport, []byte("CANCEL")) + if err != nil { + return err, false + } + state = waitingForReject + } + conn.uuid = string(s[1]) + return nil, true + case state == waitingForOk && string(s[0]) == "REJECTED": + return nil, false + case state == waitingForOk && (string(s[0]) == "DATA" || + string(s[0]) == "ERROR"): + + err = authWriteLine(conn.transport, []byte("CANCEL")) + if err != nil { + return err, false + } + state = waitingForReject + case state == waitingForOk: + err = authWriteLine(conn.transport, []byte("ERROR")) + if err != nil { + return err, false + } + case state == waitingForReject && string(s[0]) == "REJECTED": + return nil, false + case state == waitingForReject: + return errors.New("dbus: authentication protocol error"), false + default: + panic("dbus: invalid auth state") + } + } +} + +// authReadLine reads a line and separates it into its fields. +func authReadLine(in *bufio.Reader) ([][]byte, error) { + data, err := in.ReadBytes('\n') + if err != nil { + return nil, err + } + data = bytes.TrimSuffix(data, []byte("\r\n")) + return bytes.Split(data, []byte{' '}), nil +} + +// authWriteLine writes the given line in the authentication protocol format +// (elements of data separated by a " " and terminated by "\r\n"). +func authWriteLine(out io.Writer, data ...[]byte) error { + buf := make([]byte, 0) + for i, v := range data { + buf = append(buf, v...) + if i != len(data)-1 { + buf = append(buf, ' ') + } + } + buf = append(buf, '\r') + buf = append(buf, '\n') + n, err := out.Write(buf) + if err != nil { + return err + } + if n != len(buf) { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/vendor/github.com/godbus/dbus/auth_external.go b/vendor/github.com/godbus/dbus/auth_external.go new file mode 100644 index 0000000000..7e376d3ef6 --- /dev/null +++ b/vendor/github.com/godbus/dbus/auth_external.go @@ -0,0 +1,26 @@ +package dbus + +import ( + "encoding/hex" +) + +// AuthExternal returns an Auth that authenticates as the given user with the +// EXTERNAL mechanism. +func AuthExternal(user string) Auth { + return authExternal{user} +} + +// AuthExternal implements the EXTERNAL authentication mechanism. +type authExternal struct { + user string +} + +func (a authExternal) FirstData() ([]byte, []byte, AuthStatus) { + b := make([]byte, 2*len(a.user)) + hex.Encode(b, []byte(a.user)) + return []byte("EXTERNAL"), b, AuthOk +} + +func (a authExternal) HandleData(b []byte) ([]byte, AuthStatus) { + return nil, AuthError +} diff --git a/vendor/github.com/godbus/dbus/auth_sha1.go b/vendor/github.com/godbus/dbus/auth_sha1.go new file mode 100644 index 0000000000..df15b46119 --- /dev/null +++ b/vendor/github.com/godbus/dbus/auth_sha1.go @@ -0,0 +1,102 @@ +package dbus + +import ( + "bufio" + "bytes" + "crypto/rand" + "crypto/sha1" + "encoding/hex" + "os" +) + +// AuthCookieSha1 returns an Auth that authenticates as the given user with the +// DBUS_COOKIE_SHA1 mechanism. The home parameter should specify the home +// directory of the user. +func AuthCookieSha1(user, home string) Auth { + return authCookieSha1{user, home} +} + +type authCookieSha1 struct { + user, home string +} + +func (a authCookieSha1) FirstData() ([]byte, []byte, AuthStatus) { + b := make([]byte, 2*len(a.user)) + hex.Encode(b, []byte(a.user)) + return []byte("DBUS_COOKIE_SHA1"), b, AuthContinue +} + +func (a authCookieSha1) HandleData(data []byte) ([]byte, AuthStatus) { + challenge := make([]byte, len(data)/2) + _, err := hex.Decode(challenge, data) + if err != nil { + return nil, AuthError + } + b := bytes.Split(challenge, []byte{' '}) + if len(b) != 3 { + return nil, AuthError + } + context := b[0] + id := b[1] + svchallenge := b[2] + cookie := a.getCookie(context, id) + if cookie == nil { + return nil, AuthError + } + clchallenge := a.generateChallenge() + if clchallenge == nil { + return nil, AuthError + } + hash := sha1.New() + hash.Write(bytes.Join([][]byte{svchallenge, clchallenge, cookie}, []byte{':'})) + hexhash := make([]byte, 2*hash.Size()) + hex.Encode(hexhash, hash.Sum(nil)) + data = append(clchallenge, ' ') + data = append(data, hexhash...) + resp := make([]byte, 2*len(data)) + hex.Encode(resp, data) + return resp, AuthOk +} + +// getCookie searches for the cookie identified by id in context and returns +// the cookie content or nil. (Since HandleData can't return a specific error, +// but only whether an error occured, this function also doesn't bother to +// return an error.) +func (a authCookieSha1) getCookie(context, id []byte) []byte { + file, err := os.Open(a.home + "/.dbus-keyrings/" + string(context)) + if err != nil { + return nil + } + defer file.Close() + rd := bufio.NewReader(file) + for { + line, err := rd.ReadBytes('\n') + if err != nil { + return nil + } + line = line[:len(line)-1] + b := bytes.Split(line, []byte{' '}) + if len(b) != 3 { + return nil + } + if bytes.Equal(b[0], id) { + return b[2] + } + } +} + +// generateChallenge returns a random, hex-encoded challenge, or nil on error +// (see above). +func (a authCookieSha1) generateChallenge() []byte { + b := make([]byte, 16) + n, err := rand.Read(b) + if err != nil { + return nil + } + if n != 16 { + return nil + } + enc := make([]byte, 32) + hex.Encode(enc, b) + return enc +} diff --git a/vendor/github.com/godbus/dbus/call.go b/vendor/github.com/godbus/dbus/call.go new file mode 100644 index 0000000000..ba6e73f607 --- /dev/null +++ b/vendor/github.com/godbus/dbus/call.go @@ -0,0 +1,36 @@ +package dbus + +import ( + "errors" +) + +// Call represents a pending or completed method call. +type Call struct { + Destination string + Path ObjectPath + Method string + Args []interface{} + + // Strobes when the call is complete. + Done chan *Call + + // After completion, the error status. If this is non-nil, it may be an + // error message from the peer (with Error as its type) or some other error. + Err error + + // Holds the response once the call is done. + Body []interface{} +} + +var errSignature = errors.New("dbus: mismatched signature") + +// Store stores the body of the reply into the provided pointers. It returns +// an error if the signatures of the body and retvalues don't match, or if +// the error status is not nil. +func (c *Call) Store(retvalues ...interface{}) error { + if c.Err != nil { + return c.Err + } + + return Store(c.Body, retvalues...) +} diff --git a/vendor/github.com/godbus/dbus/conn.go b/vendor/github.com/godbus/dbus/conn.go new file mode 100644 index 0000000000..5720e2ebbf --- /dev/null +++ b/vendor/github.com/godbus/dbus/conn.go @@ -0,0 +1,683 @@ +package dbus + +import ( + "errors" + "io" + "os" + "reflect" + "strings" + "sync" +) + +var ( + systemBus *Conn + systemBusLck sync.Mutex + sessionBus *Conn + sessionBusLck sync.Mutex + sessionEnvLck sync.Mutex +) + +// ErrClosed is the error returned by calls on a closed connection. +var ErrClosed = errors.New("dbus: connection closed by user") + +// Conn represents a connection to a message bus (usually, the system or +// session bus). +// +// Connections are either shared or private. Shared connections +// are shared between calls to the functions that return them. As a result, +// the methods Close, Auth and Hello must not be called on them. +// +// Multiple goroutines may invoke methods on a connection simultaneously. +type Conn struct { + transport + + busObj BusObject + unixFD bool + uuid string + + names []string + namesLck sync.RWMutex + + serialLck sync.Mutex + nextSerial uint32 + serialUsed map[uint32]bool + + calls map[uint32]*Call + callsLck sync.RWMutex + + handler Handler + + out chan *Message + closed bool + outLck sync.RWMutex + + signalHandler SignalHandler + + eavesdropped chan<- *Message + eavesdroppedLck sync.Mutex +} + +// SessionBus returns a shared connection to the session bus, connecting to it +// if not already done. +func SessionBus() (conn *Conn, err error) { + sessionBusLck.Lock() + defer sessionBusLck.Unlock() + if sessionBus != nil { + return sessionBus, nil + } + defer func() { + if conn != nil { + sessionBus = conn + } + }() + conn, err = SessionBusPrivate() + if err != nil { + return + } + if err = conn.Auth(nil); err != nil { + conn.Close() + conn = nil + return + } + if err = conn.Hello(); err != nil { + conn.Close() + conn = nil + } + return +} + +func getSessionBusAddress() (string, error) { + sessionEnvLck.Lock() + defer sessionEnvLck.Unlock() + address := os.Getenv("DBUS_SESSION_BUS_ADDRESS") + if address != "" && address != "autolaunch:" { + return address, nil + } + return getSessionBusPlatformAddress() +} + +// SessionBusPrivate returns a new private connection to the session bus. +func SessionBusPrivate() (*Conn, error) { + address, err := getSessionBusAddress() + if err != nil { + return nil, err + } + + return Dial(address) +} + +// SessionBusPrivate returns a new private connection to the session bus. +func SessionBusPrivateHandler(handler Handler, signalHandler SignalHandler) (*Conn, error) { + address, err := getSessionBusAddress() + if err != nil { + return nil, err + } + return DialHandler(address, handler, signalHandler) +} + +// SystemBus returns a shared connection to the system bus, connecting to it if +// not already done. +func SystemBus() (conn *Conn, err error) { + systemBusLck.Lock() + defer systemBusLck.Unlock() + if systemBus != nil { + return systemBus, nil + } + defer func() { + if conn != nil { + systemBus = conn + } + }() + conn, err = SystemBusPrivate() + if err != nil { + return + } + if err = conn.Auth(nil); err != nil { + conn.Close() + conn = nil + return + } + if err = conn.Hello(); err != nil { + conn.Close() + conn = nil + } + return +} + +// SystemBusPrivate returns a new private connection to the system bus. +func SystemBusPrivate() (*Conn, error) { + return Dial(getSystemBusPlatformAddress()) +} + +// SystemBusPrivateHandler returns a new private connection to the system bus, using the provided handlers. +func SystemBusPrivateHandler(handler Handler, signalHandler SignalHandler) (*Conn, error) { + return DialHandler(getSystemBusPlatformAddress(), handler, signalHandler) +} + +// Dial establishes a new private connection to the message bus specified by address. +func Dial(address string) (*Conn, error) { + tr, err := getTransport(address) + if err != nil { + return nil, err + } + return newConn(tr, NewDefaultHandler(), NewDefaultSignalHandler()) +} + +// DialHandler establishes a new private connection to the message bus specified by address, using the supplied handlers. +func DialHandler(address string, handler Handler, signalHandler SignalHandler) (*Conn, error) { + tr, err := getTransport(address) + if err != nil { + return nil, err + } + return newConn(tr, handler, signalHandler) +} + +// NewConn creates a new private *Conn from an already established connection. +func NewConn(conn io.ReadWriteCloser) (*Conn, error) { + return NewConnHandler(conn, NewDefaultHandler(), NewDefaultSignalHandler()) +} + +// NewConnHandler creates a new private *Conn from an already established connection, using the supplied handlers. +func NewConnHandler(conn io.ReadWriteCloser, handler Handler, signalHandler SignalHandler) (*Conn, error) { + return newConn(genericTransport{conn}, handler, signalHandler) +} + +// newConn creates a new *Conn from a transport. +func newConn(tr transport, handler Handler, signalHandler SignalHandler) (*Conn, error) { + conn := new(Conn) + conn.transport = tr + conn.calls = make(map[uint32]*Call) + conn.out = make(chan *Message, 10) + conn.handler = handler + conn.signalHandler = signalHandler + conn.nextSerial = 1 + conn.serialUsed = map[uint32]bool{0: true} + conn.busObj = conn.Object("org.freedesktop.DBus", "/org/freedesktop/DBus") + return conn, nil +} + +// BusObject returns the object owned by the bus daemon which handles +// administrative requests. +func (conn *Conn) BusObject() BusObject { + return conn.busObj +} + +// Close closes the connection. Any blocked operations will return with errors +// and the channels passed to Eavesdrop and Signal are closed. This method must +// not be called on shared connections. +func (conn *Conn) Close() error { + conn.outLck.Lock() + if conn.closed { + // inWorker calls Close on read error, the read error may + // be caused by another caller calling Close to shutdown the + // dbus connection, a double-close scenario we prevent here. + conn.outLck.Unlock() + return nil + } + close(conn.out) + conn.closed = true + conn.outLck.Unlock() + + if term, ok := conn.signalHandler.(Terminator); ok { + term.Terminate() + } + + if term, ok := conn.handler.(Terminator); ok { + term.Terminate() + } + + conn.eavesdroppedLck.Lock() + if conn.eavesdropped != nil { + close(conn.eavesdropped) + } + conn.eavesdroppedLck.Unlock() + + return conn.transport.Close() +} + +// Eavesdrop causes conn to send all incoming messages to the given channel +// without further processing. Method replies, errors and signals will not be +// sent to the appropiate channels and method calls will not be handled. If nil +// is passed, the normal behaviour is restored. +// +// The caller has to make sure that ch is sufficiently buffered; +// if a message arrives when a write to ch is not possible, the message is +// discarded. +func (conn *Conn) Eavesdrop(ch chan<- *Message) { + conn.eavesdroppedLck.Lock() + conn.eavesdropped = ch + conn.eavesdroppedLck.Unlock() +} + +// getSerial returns an unused serial. +func (conn *Conn) getSerial() uint32 { + conn.serialLck.Lock() + defer conn.serialLck.Unlock() + n := conn.nextSerial + for conn.serialUsed[n] { + n++ + } + conn.serialUsed[n] = true + conn.nextSerial = n + 1 + return n +} + +// Hello sends the initial org.freedesktop.DBus.Hello call. This method must be +// called after authentication, but before sending any other messages to the +// bus. Hello must not be called for shared connections. +func (conn *Conn) Hello() error { + var s string + err := conn.busObj.Call("org.freedesktop.DBus.Hello", 0).Store(&s) + if err != nil { + return err + } + conn.namesLck.Lock() + conn.names = make([]string, 1) + conn.names[0] = s + conn.namesLck.Unlock() + return nil +} + +// inWorker runs in an own goroutine, reading incoming messages from the +// transport and dispatching them appropiately. +func (conn *Conn) inWorker() { + for { + msg, err := conn.ReadMessage() + if err == nil { + conn.eavesdroppedLck.Lock() + if conn.eavesdropped != nil { + select { + case conn.eavesdropped <- msg: + default: + } + conn.eavesdroppedLck.Unlock() + continue + } + conn.eavesdroppedLck.Unlock() + dest, _ := msg.Headers[FieldDestination].value.(string) + found := false + if dest == "" { + found = true + } else { + conn.namesLck.RLock() + if len(conn.names) == 0 { + found = true + } + for _, v := range conn.names { + if dest == v { + found = true + break + } + } + conn.namesLck.RUnlock() + } + if !found { + // Eavesdropped a message, but no channel for it is registered. + // Ignore it. + continue + } + switch msg.Type { + case TypeMethodReply, TypeError: + serial := msg.Headers[FieldReplySerial].value.(uint32) + conn.callsLck.Lock() + if c, ok := conn.calls[serial]; ok { + if msg.Type == TypeError { + name, _ := msg.Headers[FieldErrorName].value.(string) + c.Err = Error{name, msg.Body} + } else { + c.Body = msg.Body + } + c.Done <- c + conn.serialLck.Lock() + delete(conn.serialUsed, serial) + conn.serialLck.Unlock() + delete(conn.calls, serial) + } + conn.callsLck.Unlock() + case TypeSignal: + iface := msg.Headers[FieldInterface].value.(string) + member := msg.Headers[FieldMember].value.(string) + // as per http://dbus.freedesktop.org/doc/dbus-specification.html , + // sender is optional for signals. + sender, _ := msg.Headers[FieldSender].value.(string) + if iface == "org.freedesktop.DBus" && sender == "org.freedesktop.DBus" { + if member == "NameLost" { + // If we lost the name on the bus, remove it from our + // tracking list. + name, ok := msg.Body[0].(string) + if !ok { + panic("Unable to read the lost name") + } + conn.namesLck.Lock() + for i, v := range conn.names { + if v == name { + conn.names = append(conn.names[:i], + conn.names[i+1:]...) + } + } + conn.namesLck.Unlock() + } else if member == "NameAcquired" { + // If we acquired the name on the bus, add it to our + // tracking list. + name, ok := msg.Body[0].(string) + if !ok { + panic("Unable to read the acquired name") + } + conn.namesLck.Lock() + conn.names = append(conn.names, name) + conn.namesLck.Unlock() + } + } + conn.handleSignal(msg) + case TypeMethodCall: + go conn.handleCall(msg) + } + } else if _, ok := err.(InvalidMessageError); !ok { + // Some read error occured (usually EOF); we can't really do + // anything but to shut down all stuff and returns errors to all + // pending replies. + conn.Close() + conn.callsLck.RLock() + for _, v := range conn.calls { + v.Err = err + v.Done <- v + } + conn.callsLck.RUnlock() + return + } + // invalid messages are ignored + } +} + +func (conn *Conn) handleSignal(msg *Message) { + iface := msg.Headers[FieldInterface].value.(string) + member := msg.Headers[FieldMember].value.(string) + // as per http://dbus.freedesktop.org/doc/dbus-specification.html , + // sender is optional for signals. + sender, _ := msg.Headers[FieldSender].value.(string) + signal := &Signal{ + Sender: sender, + Path: msg.Headers[FieldPath].value.(ObjectPath), + Name: iface + "." + member, + Body: msg.Body, + } + conn.signalHandler.DeliverSignal(iface, member, signal) +} + +// Names returns the list of all names that are currently owned by this +// connection. The slice is always at least one element long, the first element +// being the unique name of the connection. +func (conn *Conn) Names() []string { + conn.namesLck.RLock() + // copy the slice so it can't be modified + s := make([]string, len(conn.names)) + copy(s, conn.names) + conn.namesLck.RUnlock() + return s +} + +// Object returns the object identified by the given destination name and path. +func (conn *Conn) Object(dest string, path ObjectPath) BusObject { + return &Object{conn, dest, path} +} + +// outWorker runs in an own goroutine, encoding and sending messages that are +// sent to conn.out. +func (conn *Conn) outWorker() { + for msg := range conn.out { + err := conn.SendMessage(msg) + conn.callsLck.RLock() + if err != nil { + if c := conn.calls[msg.serial]; c != nil { + c.Err = err + c.Done <- c + } + conn.serialLck.Lock() + delete(conn.serialUsed, msg.serial) + conn.serialLck.Unlock() + } else if msg.Type != TypeMethodCall { + conn.serialLck.Lock() + delete(conn.serialUsed, msg.serial) + conn.serialLck.Unlock() + } + conn.callsLck.RUnlock() + } +} + +// Send sends the given message to the message bus. You usually don't need to +// use this; use the higher-level equivalents (Call / Go, Emit and Export) +// instead. If msg is a method call and NoReplyExpected is not set, a non-nil +// call is returned and the same value is sent to ch (which must be buffered) +// once the call is complete. Otherwise, ch is ignored and a Call structure is +// returned of which only the Err member is valid. +func (conn *Conn) Send(msg *Message, ch chan *Call) *Call { + var call *Call + + msg.serial = conn.getSerial() + if msg.Type == TypeMethodCall && msg.Flags&FlagNoReplyExpected == 0 { + if ch == nil { + ch = make(chan *Call, 5) + } else if cap(ch) == 0 { + panic("dbus: unbuffered channel passed to (*Conn).Send") + } + call = new(Call) + call.Destination, _ = msg.Headers[FieldDestination].value.(string) + call.Path, _ = msg.Headers[FieldPath].value.(ObjectPath) + iface, _ := msg.Headers[FieldInterface].value.(string) + member, _ := msg.Headers[FieldMember].value.(string) + call.Method = iface + "." + member + call.Args = msg.Body + call.Done = ch + conn.callsLck.Lock() + conn.calls[msg.serial] = call + conn.callsLck.Unlock() + conn.outLck.RLock() + if conn.closed { + call.Err = ErrClosed + call.Done <- call + } else { + conn.out <- msg + } + conn.outLck.RUnlock() + } else { + conn.outLck.RLock() + if conn.closed { + call = &Call{Err: ErrClosed} + } else { + conn.out <- msg + call = &Call{Err: nil} + } + conn.outLck.RUnlock() + } + return call +} + +// sendError creates an error message corresponding to the parameters and sends +// it to conn.out. +func (conn *Conn) sendError(err error, dest string, serial uint32) { + var e *Error + switch em := err.(type) { + case Error: + e = &em + case *Error: + e = em + case DBusError: + name, body := em.DBusError() + e = NewError(name, body) + default: + e = MakeFailedError(err) + } + msg := new(Message) + msg.Type = TypeError + msg.serial = conn.getSerial() + msg.Headers = make(map[HeaderField]Variant) + if dest != "" { + msg.Headers[FieldDestination] = MakeVariant(dest) + } + msg.Headers[FieldErrorName] = MakeVariant(e.Name) + msg.Headers[FieldReplySerial] = MakeVariant(serial) + msg.Body = e.Body + if len(e.Body) > 0 { + msg.Headers[FieldSignature] = MakeVariant(SignatureOf(e.Body...)) + } + conn.outLck.RLock() + if !conn.closed { + conn.out <- msg + } + conn.outLck.RUnlock() +} + +// sendReply creates a method reply message corresponding to the parameters and +// sends it to conn.out. +func (conn *Conn) sendReply(dest string, serial uint32, values ...interface{}) { + msg := new(Message) + msg.Type = TypeMethodReply + msg.serial = conn.getSerial() + msg.Headers = make(map[HeaderField]Variant) + if dest != "" { + msg.Headers[FieldDestination] = MakeVariant(dest) + } + msg.Headers[FieldReplySerial] = MakeVariant(serial) + msg.Body = values + if len(values) > 0 { + msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...)) + } + conn.outLck.RLock() + if !conn.closed { + conn.out <- msg + } + conn.outLck.RUnlock() +} + +func (conn *Conn) defaultSignalAction(fn func(h *defaultSignalHandler, ch chan<- *Signal), ch chan<- *Signal) { + if !isDefaultSignalHandler(conn.signalHandler) { + return + } + handler := conn.signalHandler.(*defaultSignalHandler) + fn(handler, ch) +} + +// Signal registers the given channel to be passed all received signal messages. +// The caller has to make sure that ch is sufficiently buffered; if a message +// arrives when a write to c is not possible, it is discarded. +// +// Multiple of these channels can be registered at the same time. +// +// These channels are "overwritten" by Eavesdrop; i.e., if there currently is a +// channel for eavesdropped messages, this channel receives all signals, and +// none of the channels passed to Signal will receive any signals. +func (conn *Conn) Signal(ch chan<- *Signal) { + conn.defaultSignalAction((*defaultSignalHandler).addSignal, ch) +} + +// RemoveSignal removes the given channel from the list of the registered channels. +func (conn *Conn) RemoveSignal(ch chan<- *Signal) { + conn.defaultSignalAction((*defaultSignalHandler).removeSignal, ch) +} + +// SupportsUnixFDs returns whether the underlying transport supports passing of +// unix file descriptors. If this is false, method calls containing unix file +// descriptors will return an error and emitted signals containing them will +// not be sent. +func (conn *Conn) SupportsUnixFDs() bool { + return conn.unixFD +} + +// Error represents a D-Bus message of type Error. +type Error struct { + Name string + Body []interface{} +} + +func NewError(name string, body []interface{}) *Error { + return &Error{name, body} +} + +func (e Error) Error() string { + if len(e.Body) >= 1 { + s, ok := e.Body[0].(string) + if ok { + return s + } + } + return e.Name +} + +// Signal represents a D-Bus message of type Signal. The name member is given in +// "interface.member" notation, e.g. org.freedesktop.D-Bus.NameLost. +type Signal struct { + Sender string + Path ObjectPath + Name string + Body []interface{} +} + +// transport is a D-Bus transport. +type transport interface { + // Read and Write raw data (for example, for the authentication protocol). + io.ReadWriteCloser + + // Send the initial null byte used for the EXTERNAL mechanism. + SendNullByte() error + + // Returns whether this transport supports passing Unix FDs. + SupportsUnixFDs() bool + + // Signal the transport that Unix FD passing is enabled for this connection. + EnableUnixFDs() + + // Read / send a message, handling things like Unix FDs. + ReadMessage() (*Message, error) + SendMessage(*Message) error +} + +var ( + transports = make(map[string]func(string) (transport, error)) +) + +func getTransport(address string) (transport, error) { + var err error + var t transport + + addresses := strings.Split(address, ";") + for _, v := range addresses { + i := strings.IndexRune(v, ':') + if i == -1 { + err = errors.New("dbus: invalid bus address (no transport)") + continue + } + f := transports[v[:i]] + if f == nil { + err = errors.New("dbus: invalid bus address (invalid or unsupported transport)") + continue + } + t, err = f(v[i+1:]) + if err == nil { + return t, nil + } + } + return nil, err +} + +// dereferenceAll returns a slice that, assuming that vs is a slice of pointers +// of arbitrary types, containes the values that are obtained from dereferencing +// all elements in vs. +func dereferenceAll(vs []interface{}) []interface{} { + for i := range vs { + v := reflect.ValueOf(vs[i]) + v = v.Elem() + vs[i] = v.Interface() + } + return vs +} + +// getKey gets a key from a the list of keys. Returns "" on error / not found... +func getKey(s, key string) string { + for _, keyEqualsValue := range strings.Split(s, ",") { + keyValue := strings.SplitN(keyEqualsValue, "=", 2) + if len(keyValue) == 2 && keyValue[0] == key { + return keyValue[1] + } + } + return "" +} diff --git a/vendor/github.com/godbus/dbus/conn_darwin.go b/vendor/github.com/godbus/dbus/conn_darwin.go new file mode 100644 index 0000000000..c015f80ce7 --- /dev/null +++ b/vendor/github.com/godbus/dbus/conn_darwin.go @@ -0,0 +1,33 @@ +package dbus + +import ( + "errors" + "fmt" + "os" + "os/exec" +) + +const defaultSystemBusAddress = "unix:path=/opt/local/var/run/dbus/system_bus_socket" + +func getSessionBusPlatformAddress() (string, error) { + cmd := exec.Command("launchctl", "getenv", "DBUS_LAUNCHD_SESSION_BUS_SOCKET") + b, err := cmd.CombinedOutput() + + if err != nil { + return "", err + } + + if len(b) == 0 { + return "", errors.New("dbus: couldn't determine address of session bus") + } + + return "unix:path=" + string(b[:len(b)-1]), nil +} + +func getSystemBusPlatformAddress() string { + address := os.Getenv("DBUS_LAUNCHD_SESSION_BUS_SOCKET") + if address != "" { + return fmt.Sprintf("unix:path=%s", address) + } + return defaultSystemBusAddress +} diff --git a/vendor/github.com/godbus/dbus/conn_other.go b/vendor/github.com/godbus/dbus/conn_other.go new file mode 100644 index 0000000000..254c9f2ef6 --- /dev/null +++ b/vendor/github.com/godbus/dbus/conn_other.go @@ -0,0 +1,42 @@ +// +build !darwin + +package dbus + +import ( + "bytes" + "errors" + "fmt" + "os" + "os/exec" +) + +const defaultSystemBusAddress = "unix:path=/var/run/dbus/system_bus_socket" + +func getSessionBusPlatformAddress() (string, error) { + cmd := exec.Command("dbus-launch") + b, err := cmd.CombinedOutput() + + if err != nil { + return "", err + } + + i := bytes.IndexByte(b, '=') + j := bytes.IndexByte(b, '\n') + + if i == -1 || j == -1 { + return "", errors.New("dbus: couldn't determine address of session bus") + } + + env, addr := string(b[0:i]), string(b[i+1:j]) + os.Setenv(env, addr) + + return addr, nil +} + +func getSystemBusPlatformAddress() string { + address := os.Getenv("DBUS_SYSTEM_BUS_ADDRESS") + if address != "" { + return fmt.Sprintf("unix:path=%s", address) + } + return defaultSystemBusAddress +} diff --git a/vendor/github.com/godbus/dbus/dbus.go b/vendor/github.com/godbus/dbus/dbus.go new file mode 100644 index 0000000000..c6d0d3ce0e --- /dev/null +++ b/vendor/github.com/godbus/dbus/dbus.go @@ -0,0 +1,427 @@ +package dbus + +import ( + "errors" + "fmt" + "reflect" + "strings" +) + +var ( + byteType = reflect.TypeOf(byte(0)) + boolType = reflect.TypeOf(false) + uint8Type = reflect.TypeOf(uint8(0)) + int16Type = reflect.TypeOf(int16(0)) + uint16Type = reflect.TypeOf(uint16(0)) + intType = reflect.TypeOf(int(0)) + uintType = reflect.TypeOf(uint(0)) + int32Type = reflect.TypeOf(int32(0)) + uint32Type = reflect.TypeOf(uint32(0)) + int64Type = reflect.TypeOf(int64(0)) + uint64Type = reflect.TypeOf(uint64(0)) + float64Type = reflect.TypeOf(float64(0)) + stringType = reflect.TypeOf("") + signatureType = reflect.TypeOf(Signature{""}) + objectPathType = reflect.TypeOf(ObjectPath("")) + variantType = reflect.TypeOf(Variant{Signature{""}, nil}) + interfacesType = reflect.TypeOf([]interface{}{}) + interfaceType = reflect.TypeOf((*interface{})(nil)).Elem() + unixFDType = reflect.TypeOf(UnixFD(0)) + unixFDIndexType = reflect.TypeOf(UnixFDIndex(0)) +) + +// An InvalidTypeError signals that a value which cannot be represented in the +// D-Bus wire format was passed to a function. +type InvalidTypeError struct { + Type reflect.Type +} + +func (e InvalidTypeError) Error() string { + return "dbus: invalid type " + e.Type.String() +} + +// Store copies the values contained in src to dest, which must be a slice of +// pointers. It converts slices of interfaces from src to corresponding structs +// in dest. An error is returned if the lengths of src and dest or the types of +// their elements don't match. +func Store(src []interface{}, dest ...interface{}) error { + if len(src) != len(dest) { + return errors.New("dbus.Store: length mismatch") + } + + for i := range src { + if err := storeInterfaces(src[i], dest[i]); err != nil { + return err + } + } + return nil +} + +func storeInterfaces(src, dest interface{}) error { + return store(reflect.ValueOf(dest), reflect.ValueOf(src)) +} + +func store(dest, src reflect.Value) error { + if dest.Kind() == reflect.Ptr { + return store(dest.Elem(), src) + } + switch src.Kind() { + case reflect.Slice: + return storeSlice(dest, src) + case reflect.Map: + return storeMap(dest, src) + default: + return storeBase(dest, src) + } +} + +func storeBase(dest, src reflect.Value) error { + return setDest(dest, src) +} + +func setDest(dest, src reflect.Value) error { + if !isVariant(src.Type()) && isVariant(dest.Type()) { + //special conversion for dbus.Variant + dest.Set(reflect.ValueOf(MakeVariant(src.Interface()))) + return nil + } + if isVariant(src.Type()) && !isVariant(dest.Type()) { + src = getVariantValue(src) + } + if !src.Type().ConvertibleTo(dest.Type()) { + return fmt.Errorf( + "dbus.Store: type mismatch: cannot convert %s to %s", + src.Type(), dest.Type()) + } + dest.Set(src.Convert(dest.Type())) + return nil +} + +func kindsAreCompatible(dest, src reflect.Type) bool { + switch { + case isVariant(dest): + return true + case dest.Kind() == reflect.Interface: + return true + default: + return dest.Kind() == src.Kind() + } +} + +func isConvertibleTo(dest, src reflect.Type) bool { + switch { + case isVariant(dest): + return true + case dest.Kind() == reflect.Interface: + return true + case dest.Kind() == reflect.Slice: + return src.Kind() == reflect.Slice && + isConvertibleTo(dest.Elem(), src.Elem()) + case dest.Kind() == reflect.Struct: + return src == interfacesType + default: + return src.ConvertibleTo(dest) + } +} + +func storeMap(dest, src reflect.Value) error { + switch { + case !kindsAreCompatible(dest.Type(), src.Type()): + return fmt.Errorf( + "dbus.Store: type mismatch: "+ + "map: cannot store a value of %s into %s", + src.Type(), dest.Type()) + case isVariant(dest.Type()): + return storeMapIntoVariant(dest, src) + case dest.Kind() == reflect.Interface: + return storeMapIntoInterface(dest, src) + case isConvertibleTo(dest.Type().Key(), src.Type().Key()) && + isConvertibleTo(dest.Type().Elem(), src.Type().Elem()): + return storeMapIntoMap(dest, src) + default: + return fmt.Errorf( + "dbus.Store: type mismatch: "+ + "map: cannot convert a value of %s into %s", + src.Type(), dest.Type()) + } +} + +func storeMapIntoVariant(dest, src reflect.Value) error { + dv := reflect.MakeMap(src.Type()) + err := store(dv, src) + if err != nil { + return err + } + return storeBase(dest, dv) +} + +func storeMapIntoInterface(dest, src reflect.Value) error { + var dv reflect.Value + if isVariant(src.Type().Elem()) { + //Convert variants to interface{} recursively when converting + //to interface{} + dv = reflect.MakeMap( + reflect.MapOf(src.Type().Key(), interfaceType)) + } else { + dv = reflect.MakeMap(src.Type()) + } + err := store(dv, src) + if err != nil { + return err + } + return storeBase(dest, dv) +} + +func storeMapIntoMap(dest, src reflect.Value) error { + if dest.IsNil() { + dest.Set(reflect.MakeMap(dest.Type())) + } + keys := src.MapKeys() + for _, key := range keys { + dkey := key.Convert(dest.Type().Key()) + dval := reflect.New(dest.Type().Elem()).Elem() + err := store(dval, getVariantValue(src.MapIndex(key))) + if err != nil { + return err + } + dest.SetMapIndex(dkey, dval) + } + return nil +} + +func storeSlice(dest, src reflect.Value) error { + switch { + case src.Type() == interfacesType && dest.Kind() == reflect.Struct: + //The decoder always decodes structs as slices of interface{} + return storeStruct(dest, src) + case !kindsAreCompatible(dest.Type(), src.Type()): + return fmt.Errorf( + "dbus.Store: type mismatch: "+ + "slice: cannot store a value of %s into %s", + src.Type(), dest.Type()) + case isVariant(dest.Type()): + return storeSliceIntoVariant(dest, src) + case dest.Kind() == reflect.Interface: + return storeSliceIntoInterface(dest, src) + case isConvertibleTo(dest.Type().Elem(), src.Type().Elem()): + return storeSliceIntoSlice(dest, src) + default: + return fmt.Errorf( + "dbus.Store: type mismatch: "+ + "slice: cannot convert a value of %s into %s", + src.Type(), dest.Type()) + } +} + +func storeStruct(dest, src reflect.Value) error { + if isVariant(dest.Type()) { + return storeBase(dest, src) + } + dval := make([]interface{}, 0, dest.NumField()) + dtype := dest.Type() + for i := 0; i < dest.NumField(); i++ { + field := dest.Field(i) + ftype := dtype.Field(i) + if ftype.PkgPath != "" { + continue + } + if ftype.Tag.Get("dbus") == "-" { + continue + } + dval = append(dval, field.Addr().Interface()) + } + if src.Len() != len(dval) { + return fmt.Errorf( + "dbus.Store: type mismatch: "+ + "destination struct does not have "+ + "enough fields need: %d have: %d", + src.Len(), len(dval)) + } + return Store(src.Interface().([]interface{}), dval...) +} + +func storeSliceIntoVariant(dest, src reflect.Value) error { + dv := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + err := store(dv, src) + if err != nil { + return err + } + return storeBase(dest, dv) +} + +func storeSliceIntoInterface(dest, src reflect.Value) error { + var dv reflect.Value + if isVariant(src.Type().Elem()) { + //Convert variants to interface{} recursively when converting + //to interface{} + dv = reflect.MakeSlice(reflect.SliceOf(interfaceType), + src.Len(), src.Cap()) + } else { + dv = reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) + } + err := store(dv, src) + if err != nil { + return err + } + return storeBase(dest, dv) +} + +func storeSliceIntoSlice(dest, src reflect.Value) error { + if dest.IsNil() || dest.Len() < src.Len() { + dest.Set(reflect.MakeSlice(dest.Type(), src.Len(), src.Cap())) + } + if dest.Len() != src.Len() { + return fmt.Errorf( + "dbus.Store: type mismatch: "+ + "slices are different lengths "+ + "need: %d have: %d", + src.Len(), dest.Len()) + } + for i := 0; i < src.Len(); i++ { + err := store(dest.Index(i), getVariantValue(src.Index(i))) + if err != nil { + return err + } + } + return nil +} + +func getVariantValue(in reflect.Value) reflect.Value { + if isVariant(in.Type()) { + return reflect.ValueOf(in.Interface().(Variant).Value()) + } + return in +} + +func isVariant(t reflect.Type) bool { + return t == variantType +} + +// An ObjectPath is an object path as defined by the D-Bus spec. +type ObjectPath string + +// IsValid returns whether the object path is valid. +func (o ObjectPath) IsValid() bool { + s := string(o) + if len(s) == 0 { + return false + } + if s[0] != '/' { + return false + } + if s[len(s)-1] == '/' && len(s) != 1 { + return false + } + // probably not used, but technically possible + if s == "/" { + return true + } + split := strings.Split(s[1:], "/") + for _, v := range split { + if len(v) == 0 { + return false + } + for _, c := range v { + if !isMemberChar(c) { + return false + } + } + } + return true +} + +// A UnixFD is a Unix file descriptor sent over the wire. See the package-level +// documentation for more information about Unix file descriptor passsing. +type UnixFD int32 + +// A UnixFDIndex is the representation of a Unix file descriptor in a message. +type UnixFDIndex uint32 + +// alignment returns the alignment of values of type t. +func alignment(t reflect.Type) int { + switch t { + case variantType: + return 1 + case objectPathType: + return 4 + case signatureType: + return 1 + case interfacesType: + return 4 + } + switch t.Kind() { + case reflect.Uint8: + return 1 + case reflect.Uint16, reflect.Int16: + return 2 + case reflect.Uint, reflect.Int, reflect.Uint32, reflect.Int32, reflect.String, reflect.Array, reflect.Slice, reflect.Map: + return 4 + case reflect.Uint64, reflect.Int64, reflect.Float64, reflect.Struct: + return 8 + case reflect.Ptr: + return alignment(t.Elem()) + } + return 1 +} + +// isKeyType returns whether t is a valid type for a D-Bus dict. +func isKeyType(t reflect.Type) bool { + switch t.Kind() { + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Int16, reflect.Int32, reflect.Int64, reflect.Float64, + reflect.String, reflect.Uint, reflect.Int: + + return true + } + return false +} + +// isValidInterface returns whether s is a valid name for an interface. +func isValidInterface(s string) bool { + if len(s) == 0 || len(s) > 255 || s[0] == '.' { + return false + } + elem := strings.Split(s, ".") + if len(elem) < 2 { + return false + } + for _, v := range elem { + if len(v) == 0 { + return false + } + if v[0] >= '0' && v[0] <= '9' { + return false + } + for _, c := range v { + if !isMemberChar(c) { + return false + } + } + } + return true +} + +// isValidMember returns whether s is a valid name for a member. +func isValidMember(s string) bool { + if len(s) == 0 || len(s) > 255 { + return false + } + i := strings.Index(s, ".") + if i != -1 { + return false + } + if s[0] >= '0' && s[0] <= '9' { + return false + } + for _, c := range s { + if !isMemberChar(c) { + return false + } + } + return true +} + +func isMemberChar(c rune) bool { + return (c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z') || + (c >= 'a' && c <= 'z') || c == '_' +} diff --git a/vendor/github.com/godbus/dbus/decoder.go b/vendor/github.com/godbus/dbus/decoder.go new file mode 100644 index 0000000000..ef50dcab98 --- /dev/null +++ b/vendor/github.com/godbus/dbus/decoder.go @@ -0,0 +1,228 @@ +package dbus + +import ( + "encoding/binary" + "io" + "reflect" +) + +type decoder struct { + in io.Reader + order binary.ByteOrder + pos int +} + +// newDecoder returns a new decoder that reads values from in. The input is +// expected to be in the given byte order. +func newDecoder(in io.Reader, order binary.ByteOrder) *decoder { + dec := new(decoder) + dec.in = in + dec.order = order + return dec +} + +// align aligns the input to the given boundary and panics on error. +func (dec *decoder) align(n int) { + if dec.pos%n != 0 { + newpos := (dec.pos + n - 1) & ^(n - 1) + empty := make([]byte, newpos-dec.pos) + if _, err := io.ReadFull(dec.in, empty); err != nil { + panic(err) + } + dec.pos = newpos + } +} + +// Calls binary.Read(dec.in, dec.order, v) and panics on read errors. +func (dec *decoder) binread(v interface{}) { + if err := binary.Read(dec.in, dec.order, v); err != nil { + panic(err) + } +} + +func (dec *decoder) Decode(sig Signature) (vs []interface{}, err error) { + defer func() { + var ok bool + v := recover() + if err, ok = v.(error); ok { + if err == io.EOF || err == io.ErrUnexpectedEOF { + err = FormatError("unexpected EOF") + } + } + }() + vs = make([]interface{}, 0) + s := sig.str + for s != "" { + err, rem := validSingle(s, 0) + if err != nil { + return nil, err + } + v := dec.decode(s[:len(s)-len(rem)], 0) + vs = append(vs, v) + s = rem + } + return vs, nil +} + +func (dec *decoder) decode(s string, depth int) interface{} { + dec.align(alignment(typeFor(s))) + switch s[0] { + case 'y': + var b [1]byte + if _, err := dec.in.Read(b[:]); err != nil { + panic(err) + } + dec.pos++ + return b[0] + case 'b': + i := dec.decode("u", depth).(uint32) + switch { + case i == 0: + return false + case i == 1: + return true + default: + panic(FormatError("invalid value for boolean")) + } + case 'n': + var i int16 + dec.binread(&i) + dec.pos += 2 + return i + case 'i': + var i int32 + dec.binread(&i) + dec.pos += 4 + return i + case 'x': + var i int64 + dec.binread(&i) + dec.pos += 8 + return i + case 'q': + var i uint16 + dec.binread(&i) + dec.pos += 2 + return i + case 'u': + var i uint32 + dec.binread(&i) + dec.pos += 4 + return i + case 't': + var i uint64 + dec.binread(&i) + dec.pos += 8 + return i + case 'd': + var f float64 + dec.binread(&f) + dec.pos += 8 + return f + case 's': + length := dec.decode("u", depth).(uint32) + b := make([]byte, int(length)+1) + if _, err := io.ReadFull(dec.in, b); err != nil { + panic(err) + } + dec.pos += int(length) + 1 + return string(b[:len(b)-1]) + case 'o': + return ObjectPath(dec.decode("s", depth).(string)) + case 'g': + length := dec.decode("y", depth).(byte) + b := make([]byte, int(length)+1) + if _, err := io.ReadFull(dec.in, b); err != nil { + panic(err) + } + dec.pos += int(length) + 1 + sig, err := ParseSignature(string(b[:len(b)-1])) + if err != nil { + panic(err) + } + return sig + case 'v': + if depth >= 64 { + panic(FormatError("input exceeds container depth limit")) + } + var variant Variant + sig := dec.decode("g", depth).(Signature) + if len(sig.str) == 0 { + panic(FormatError("variant signature is empty")) + } + err, rem := validSingle(sig.str, 0) + if err != nil { + panic(err) + } + if rem != "" { + panic(FormatError("variant signature has multiple types")) + } + variant.sig = sig + variant.value = dec.decode(sig.str, depth+1) + return variant + case 'h': + return UnixFDIndex(dec.decode("u", depth).(uint32)) + case 'a': + if len(s) > 1 && s[1] == '{' { + ksig := s[2:3] + vsig := s[3 : len(s)-1] + v := reflect.MakeMap(reflect.MapOf(typeFor(ksig), typeFor(vsig))) + if depth >= 63 { + panic(FormatError("input exceeds container depth limit")) + } + length := dec.decode("u", depth).(uint32) + // Even for empty maps, the correct padding must be included + dec.align(8) + spos := dec.pos + for dec.pos < spos+int(length) { + dec.align(8) + if !isKeyType(v.Type().Key()) { + panic(InvalidTypeError{v.Type()}) + } + kv := dec.decode(ksig, depth+2) + vv := dec.decode(vsig, depth+2) + v.SetMapIndex(reflect.ValueOf(kv), reflect.ValueOf(vv)) + } + return v.Interface() + } + if depth >= 64 { + panic(FormatError("input exceeds container depth limit")) + } + length := dec.decode("u", depth).(uint32) + v := reflect.MakeSlice(reflect.SliceOf(typeFor(s[1:])), 0, int(length)) + // Even for empty arrays, the correct padding must be included + dec.align(alignment(typeFor(s[1:]))) + spos := dec.pos + for dec.pos < spos+int(length) { + ev := dec.decode(s[1:], depth+1) + v = reflect.Append(v, reflect.ValueOf(ev)) + } + return v.Interface() + case '(': + if depth >= 64 { + panic(FormatError("input exceeds container depth limit")) + } + dec.align(8) + v := make([]interface{}, 0) + s = s[1 : len(s)-1] + for s != "" { + err, rem := validSingle(s, 0) + if err != nil { + panic(err) + } + ev := dec.decode(s[:len(s)-len(rem)], depth+1) + v = append(v, ev) + s = rem + } + return v + default: + panic(SignatureError{Sig: s}) + } +} + +// A FormatError is an error in the wire format. +type FormatError string + +func (e FormatError) Error() string { + return "dbus: wire format error: " + string(e) +} diff --git a/vendor/github.com/godbus/dbus/default_handler.go b/vendor/github.com/godbus/dbus/default_handler.go new file mode 100644 index 0000000000..e81f73ac52 --- /dev/null +++ b/vendor/github.com/godbus/dbus/default_handler.go @@ -0,0 +1,291 @@ +package dbus + +import ( + "bytes" + "reflect" + "strings" + "sync" +) + +func newIntrospectIntf(h *defaultHandler) *exportedIntf { + methods := make(map[string]Method) + methods["Introspect"] = exportedMethod{ + reflect.ValueOf(func(msg Message) (string, *Error) { + path := msg.Headers[FieldPath].value.(ObjectPath) + return h.introspectPath(path), nil + }), + } + return newExportedIntf(methods, true) +} + +//NewDefaultHandler returns an instance of the default +//call handler. This is useful if you want to implement only +//one of the two handlers but not both. +func NewDefaultHandler() *defaultHandler { + h := &defaultHandler{ + objects: make(map[ObjectPath]*exportedObj), + defaultIntf: make(map[string]*exportedIntf), + } + h.defaultIntf["org.freedesktop.DBus.Introspectable"] = newIntrospectIntf(h) + return h +} + +type defaultHandler struct { + sync.RWMutex + objects map[ObjectPath]*exportedObj + defaultIntf map[string]*exportedIntf +} + +func (h *defaultHandler) PathExists(path ObjectPath) bool { + _, ok := h.objects[path] + return ok +} + +func (h *defaultHandler) introspectPath(path ObjectPath) string { + subpath := make(map[string]struct{}) + var xml bytes.Buffer + xml.WriteString("") + for obj, _ := range h.objects { + p := string(path) + if p != "/" { + p += "/" + } + if strings.HasPrefix(string(obj), p) { + node_name := strings.Split(string(obj[len(p):]), "/")[0] + subpath[node_name] = struct{}{} + } + } + for s, _ := range subpath { + xml.WriteString("\n\t") + } + xml.WriteString("\n") + return xml.String() +} + +func (h *defaultHandler) LookupObject(path ObjectPath) (ServerObject, bool) { + h.RLock() + defer h.RUnlock() + object, ok := h.objects[path] + if ok { + return object, ok + } + + // If an object wasn't found for this exact path, + // look for a matching subtree registration + subtreeObject := newExportedObject() + path = path[:strings.LastIndex(string(path), "/")] + for len(path) > 0 { + object, ok = h.objects[path] + if ok { + for name, iface := range object.interfaces { + // Only include this handler if it registered for the subtree + if iface.isFallbackInterface() { + subtreeObject.interfaces[name] = iface + } + } + break + } + + path = path[:strings.LastIndex(string(path), "/")] + } + + for name, intf := range h.defaultIntf { + if _, exists := subtreeObject.interfaces[name]; exists { + continue + } + subtreeObject.interfaces[name] = intf + } + + return subtreeObject, true +} + +func (h *defaultHandler) AddObject(path ObjectPath, object *exportedObj) { + h.Lock() + h.objects[path] = object + h.Unlock() +} + +func (h *defaultHandler) DeleteObject(path ObjectPath) { + h.Lock() + delete(h.objects, path) + h.Unlock() +} + +type exportedMethod struct { + reflect.Value +} + +func (m exportedMethod) Call(args ...interface{}) ([]interface{}, error) { + t := m.Type() + + params := make([]reflect.Value, len(args)) + for i := 0; i < len(args); i++ { + params[i] = reflect.ValueOf(args[i]).Elem() + } + + ret := m.Value.Call(params) + + err := ret[t.NumOut()-1].Interface().(*Error) + ret = ret[:t.NumOut()-1] + out := make([]interface{}, len(ret)) + for i, val := range ret { + out[i] = val.Interface() + } + if err == nil { + //concrete type to interface nil is a special case + return out, nil + } + return out, err +} + +func (m exportedMethod) NumArguments() int { + return m.Value.Type().NumIn() +} + +func (m exportedMethod) ArgumentValue(i int) interface{} { + return reflect.Zero(m.Type().In(i)).Interface() +} + +func (m exportedMethod) NumReturns() int { + return m.Value.Type().NumOut() +} + +func (m exportedMethod) ReturnValue(i int) interface{} { + return reflect.Zero(m.Type().Out(i)).Interface() +} + +func newExportedObject() *exportedObj { + return &exportedObj{ + interfaces: make(map[string]*exportedIntf), + } +} + +type exportedObj struct { + interfaces map[string]*exportedIntf +} + +func (obj *exportedObj) LookupInterface(name string) (Interface, bool) { + if name == "" { + return obj, true + } + intf, exists := obj.interfaces[name] + return intf, exists +} + +func (obj *exportedObj) AddInterface(name string, iface *exportedIntf) { + obj.interfaces[name] = iface +} + +func (obj *exportedObj) DeleteInterface(name string) { + delete(obj.interfaces, name) +} + +func (obj *exportedObj) LookupMethod(name string) (Method, bool) { + for _, intf := range obj.interfaces { + method, exists := intf.LookupMethod(name) + if exists { + return method, exists + } + } + return nil, false +} + +func (obj *exportedObj) isFallbackInterface() bool { + return false +} + +func newExportedIntf(methods map[string]Method, includeSubtree bool) *exportedIntf { + return &exportedIntf{ + methods: methods, + includeSubtree: includeSubtree, + } +} + +type exportedIntf struct { + methods map[string]Method + + // Whether or not this export is for the entire subtree + includeSubtree bool +} + +func (obj *exportedIntf) LookupMethod(name string) (Method, bool) { + out, exists := obj.methods[name] + return out, exists +} + +func (obj *exportedIntf) isFallbackInterface() bool { + return obj.includeSubtree +} + +//NewDefaultSignalHandler returns an instance of the default +//signal handler. This is useful if you want to implement only +//one of the two handlers but not both. +func NewDefaultSignalHandler() *defaultSignalHandler { + return &defaultSignalHandler{} +} + +func isDefaultSignalHandler(handler SignalHandler) bool { + _, ok := handler.(*defaultSignalHandler) + return ok +} + +type defaultSignalHandler struct { + sync.RWMutex + closed bool + signals []chan<- *Signal +} + +func (sh *defaultSignalHandler) DeliverSignal(intf, name string, signal *Signal) { + go func() { + sh.RLock() + defer sh.RUnlock() + if sh.closed { + return + } + for _, ch := range sh.signals { + ch <- signal + } + }() +} + +func (sh *defaultSignalHandler) Init() error { + sh.Lock() + sh.signals = make([]chan<- *Signal, 0) + sh.Unlock() + return nil +} + +func (sh *defaultSignalHandler) Terminate() { + sh.Lock() + sh.closed = true + for _, ch := range sh.signals { + close(ch) + } + sh.signals = nil + sh.Unlock() +} + +func (sh *defaultSignalHandler) addSignal(ch chan<- *Signal) { + sh.Lock() + defer sh.Unlock() + if sh.closed { + return + } + sh.signals = append(sh.signals, ch) + +} + +func (sh *defaultSignalHandler) removeSignal(ch chan<- *Signal) { + sh.Lock() + defer sh.Unlock() + if sh.closed { + return + } + for i := len(sh.signals) - 1; i >= 0; i-- { + if ch == sh.signals[i] { + copy(sh.signals[i:], sh.signals[i+1:]) + sh.signals[len(sh.signals)-1] = nil + sh.signals = sh.signals[:len(sh.signals)-1] + } + } +} diff --git a/vendor/github.com/godbus/dbus/doc.go b/vendor/github.com/godbus/dbus/doc.go new file mode 100644 index 0000000000..895036a8c1 --- /dev/null +++ b/vendor/github.com/godbus/dbus/doc.go @@ -0,0 +1,69 @@ +/* +Package dbus implements bindings to the D-Bus message bus system. + +To use the message bus API, you first need to connect to a bus (usually the +session or system bus). The acquired connection then can be used to call methods +on remote objects and emit or receive signals. Using the Export method, you can +arrange D-Bus methods calls to be directly translated to method calls on a Go +value. + +Conversion Rules + +For outgoing messages, Go types are automatically converted to the +corresponding D-Bus types. The following types are directly encoded as their +respective D-Bus equivalents: + + Go type | D-Bus type + ------------+----------- + byte | BYTE + bool | BOOLEAN + int16 | INT16 + uint16 | UINT16 + int | INT32 + uint | UINT32 + int32 | INT32 + uint32 | UINT32 + int64 | INT64 + uint64 | UINT64 + float64 | DOUBLE + string | STRING + ObjectPath | OBJECT_PATH + Signature | SIGNATURE + Variant | VARIANT + interface{} | VARIANT + UnixFDIndex | UNIX_FD + +Slices and arrays encode as ARRAYs of their element type. + +Maps encode as DICTs, provided that their key type can be used as a key for +a DICT. + +Structs other than Variant and Signature encode as a STRUCT containing their +exported fields. Fields whose tags contain `dbus:"-"` and unexported fields will +be skipped. + +Pointers encode as the value they're pointed to. + +Types convertible to one of the base types above will be mapped as the +base type. + +Trying to encode any other type or a slice, map or struct containing an +unsupported type will result in an InvalidTypeError. + +For incoming messages, the inverse of these rules are used, with the exception +of STRUCTs. Incoming STRUCTS are represented as a slice of empty interfaces +containing the struct fields in the correct order. The Store function can be +used to convert such values to Go structs. + +Unix FD passing + +Handling Unix file descriptors deserves special mention. To use them, you should +first check that they are supported on a connection by calling SupportsUnixFDs. +If it returns true, all method of Connection will translate messages containing +UnixFD's to messages that are accompanied by the given file descriptors with the +UnixFD values being substituted by the correct indices. Similarily, the indices +of incoming messages are automatically resolved. It shouldn't be necessary to use +UnixFDIndex. + +*/ +package dbus diff --git a/vendor/github.com/godbus/dbus/encoder.go b/vendor/github.com/godbus/dbus/encoder.go new file mode 100644 index 0000000000..8bb717761c --- /dev/null +++ b/vendor/github.com/godbus/dbus/encoder.go @@ -0,0 +1,210 @@ +package dbus + +import ( + "bytes" + "encoding/binary" + "io" + "reflect" +) + +// An encoder encodes values to the D-Bus wire format. +type encoder struct { + out io.Writer + order binary.ByteOrder + pos int +} + +// NewEncoder returns a new encoder that writes to out in the given byte order. +func newEncoder(out io.Writer, order binary.ByteOrder) *encoder { + return newEncoderAtOffset(out, 0, order) +} + +// newEncoderAtOffset returns a new encoder that writes to out in the given +// byte order. Specify the offset to initialize pos for proper alignment +// computation. +func newEncoderAtOffset(out io.Writer, offset int, order binary.ByteOrder) *encoder { + enc := new(encoder) + enc.out = out + enc.order = order + enc.pos = offset + return enc +} + +// Aligns the next output to be on a multiple of n. Panics on write errors. +func (enc *encoder) align(n int) { + pad := enc.padding(0, n) + if pad > 0 { + empty := make([]byte, pad) + if _, err := enc.out.Write(empty); err != nil { + panic(err) + } + enc.pos += pad + } +} + +// pad returns the number of bytes of padding, based on current position and additional offset. +// and alignment. +func (enc *encoder) padding(offset, algn int) int { + abs := enc.pos + offset + if abs%algn != 0 { + newabs := (abs + algn - 1) & ^(algn - 1) + return newabs - abs + } + return 0 +} + +// Calls binary.Write(enc.out, enc.order, v) and panics on write errors. +func (enc *encoder) binwrite(v interface{}) { + if err := binary.Write(enc.out, enc.order, v); err != nil { + panic(err) + } +} + +// Encode encodes the given values to the underyling reader. All written values +// are aligned properly as required by the D-Bus spec. +func (enc *encoder) Encode(vs ...interface{}) (err error) { + defer func() { + err, _ = recover().(error) + }() + for _, v := range vs { + enc.encode(reflect.ValueOf(v), 0) + } + return nil +} + +// encode encodes the given value to the writer and panics on error. depth holds +// the depth of the container nesting. +func (enc *encoder) encode(v reflect.Value, depth int) { + enc.align(alignment(v.Type())) + switch v.Kind() { + case reflect.Uint8: + var b [1]byte + b[0] = byte(v.Uint()) + if _, err := enc.out.Write(b[:]); err != nil { + panic(err) + } + enc.pos++ + case reflect.Bool: + if v.Bool() { + enc.encode(reflect.ValueOf(uint32(1)), depth) + } else { + enc.encode(reflect.ValueOf(uint32(0)), depth) + } + case reflect.Int16: + enc.binwrite(int16(v.Int())) + enc.pos += 2 + case reflect.Uint16: + enc.binwrite(uint16(v.Uint())) + enc.pos += 2 + case reflect.Int, reflect.Int32: + enc.binwrite(int32(v.Int())) + enc.pos += 4 + case reflect.Uint, reflect.Uint32: + enc.binwrite(uint32(v.Uint())) + enc.pos += 4 + case reflect.Int64: + enc.binwrite(v.Int()) + enc.pos += 8 + case reflect.Uint64: + enc.binwrite(v.Uint()) + enc.pos += 8 + case reflect.Float64: + enc.binwrite(v.Float()) + enc.pos += 8 + case reflect.String: + enc.encode(reflect.ValueOf(uint32(len(v.String()))), depth) + b := make([]byte, v.Len()+1) + copy(b, v.String()) + b[len(b)-1] = 0 + n, err := enc.out.Write(b) + if err != nil { + panic(err) + } + enc.pos += n + case reflect.Ptr: + enc.encode(v.Elem(), depth) + case reflect.Slice, reflect.Array: + if depth >= 64 { + panic(FormatError("input exceeds container depth limit")) + } + // Lookahead offset: 4 bytes for uint32 length (with alignment), + // plus alignment for elements. + n := enc.padding(0, 4) + 4 + offset := enc.pos + n + enc.padding(n, alignment(v.Type().Elem())) + + var buf bytes.Buffer + bufenc := newEncoderAtOffset(&buf, offset, enc.order) + + for i := 0; i < v.Len(); i++ { + bufenc.encode(v.Index(i), depth+1) + } + enc.encode(reflect.ValueOf(uint32(buf.Len())), depth) + length := buf.Len() + enc.align(alignment(v.Type().Elem())) + if _, err := buf.WriteTo(enc.out); err != nil { + panic(err) + } + enc.pos += length + case reflect.Struct: + if depth >= 64 && v.Type() != signatureType { + panic(FormatError("input exceeds container depth limit")) + } + switch t := v.Type(); t { + case signatureType: + str := v.Field(0) + enc.encode(reflect.ValueOf(byte(str.Len())), depth+1) + b := make([]byte, str.Len()+1) + copy(b, str.String()) + b[len(b)-1] = 0 + n, err := enc.out.Write(b) + if err != nil { + panic(err) + } + enc.pos += n + case variantType: + variant := v.Interface().(Variant) + enc.encode(reflect.ValueOf(variant.sig), depth+1) + enc.encode(reflect.ValueOf(variant.value), depth+1) + default: + for i := 0; i < v.Type().NumField(); i++ { + field := t.Field(i) + if field.PkgPath == "" && field.Tag.Get("dbus") != "-" { + enc.encode(v.Field(i), depth+1) + } + } + } + case reflect.Map: + // Maps are arrays of structures, so they actually increase the depth by + // 2. + if depth >= 63 { + panic(FormatError("input exceeds container depth limit")) + } + if !isKeyType(v.Type().Key()) { + panic(InvalidTypeError{v.Type()}) + } + keys := v.MapKeys() + // Lookahead offset: 4 bytes for uint32 length (with alignment), + // plus 8-byte alignment + n := enc.padding(0, 4) + 4 + offset := enc.pos + n + enc.padding(n, 8) + + var buf bytes.Buffer + bufenc := newEncoderAtOffset(&buf, offset, enc.order) + for _, k := range keys { + bufenc.align(8) + bufenc.encode(k, depth+2) + bufenc.encode(v.MapIndex(k), depth+2) + } + enc.encode(reflect.ValueOf(uint32(buf.Len())), depth) + length := buf.Len() + enc.align(8) + if _, err := buf.WriteTo(enc.out); err != nil { + panic(err) + } + enc.pos += length + case reflect.Interface: + enc.encode(reflect.ValueOf(MakeVariant(v.Interface())), depth) + default: + panic(InvalidTypeError{v.Type()}) + } +} diff --git a/vendor/github.com/godbus/dbus/export.go b/vendor/github.com/godbus/dbus/export.go new file mode 100644 index 0000000000..aae9708815 --- /dev/null +++ b/vendor/github.com/godbus/dbus/export.go @@ -0,0 +1,413 @@ +package dbus + +import ( + "errors" + "fmt" + "reflect" + "strings" +) + +var ( + ErrMsgInvalidArg = Error{ + "org.freedesktop.DBus.Error.InvalidArgs", + []interface{}{"Invalid type / number of args"}, + } + ErrMsgNoObject = Error{ + "org.freedesktop.DBus.Error.NoSuchObject", + []interface{}{"No such object"}, + } + ErrMsgUnknownMethod = Error{ + "org.freedesktop.DBus.Error.UnknownMethod", + []interface{}{"Unknown / invalid method"}, + } + ErrMsgUnknownInterface = Error{ + "org.freedesktop.DBus.Error.UnknownInterface", + []interface{}{"Object does not implement the interface"}, + } +) + +func MakeFailedError(err error) *Error { + return &Error{ + "org.freedesktop.DBus.Error.Failed", + []interface{}{err.Error()}, + } +} + +// Sender is a type which can be used in exported methods to receive the message +// sender. +type Sender string + +func computeMethodName(name string, mapping map[string]string) string { + newname, ok := mapping[name] + if ok { + name = newname + } + return name +} + +func getMethods(in interface{}, mapping map[string]string) map[string]reflect.Value { + if in == nil { + return nil + } + methods := make(map[string]reflect.Value) + val := reflect.ValueOf(in) + typ := val.Type() + for i := 0; i < typ.NumMethod(); i++ { + methtype := typ.Method(i) + method := val.Method(i) + t := method.Type() + // only track valid methods must return *Error as last arg + // and must be exported + if t.NumOut() == 0 || + t.Out(t.NumOut()-1) != reflect.TypeOf(&ErrMsgInvalidArg) || + methtype.PkgPath != "" { + continue + } + // map names while building table + methods[computeMethodName(methtype.Name, mapping)] = method + } + return methods +} + +func standardMethodArgumentDecode(m Method, sender string, msg *Message, body []interface{}) ([]interface{}, error) { + pointers := make([]interface{}, m.NumArguments()) + decode := make([]interface{}, 0, len(body)) + + for i := 0; i < m.NumArguments(); i++ { + tp := reflect.TypeOf(m.ArgumentValue(i)) + val := reflect.New(tp) + pointers[i] = val.Interface() + if tp == reflect.TypeOf((*Sender)(nil)).Elem() { + val.Elem().SetString(sender) + } else if tp == reflect.TypeOf((*Message)(nil)).Elem() { + val.Elem().Set(reflect.ValueOf(*msg)) + } else { + decode = append(decode, pointers[i]) + } + } + + if len(decode) != len(body) { + return nil, ErrMsgInvalidArg + } + + if err := Store(body, decode...); err != nil { + return nil, ErrMsgInvalidArg + } + + return pointers, nil +} + +func (conn *Conn) decodeArguments(m Method, sender string, msg *Message) ([]interface{}, error) { + if decoder, ok := m.(ArgumentDecoder); ok { + return decoder.DecodeArguments(conn, sender, msg, msg.Body) + } + return standardMethodArgumentDecode(m, sender, msg, msg.Body) +} + +// handleCall handles the given method call (i.e. looks if it's one of the +// pre-implemented ones and searches for a corresponding handler if not). +func (conn *Conn) handleCall(msg *Message) { + name := msg.Headers[FieldMember].value.(string) + path := msg.Headers[FieldPath].value.(ObjectPath) + ifaceName, _ := msg.Headers[FieldInterface].value.(string) + sender, hasSender := msg.Headers[FieldSender].value.(string) + serial := msg.serial + if ifaceName == "org.freedesktop.DBus.Peer" { + switch name { + case "Ping": + conn.sendReply(sender, serial) + case "GetMachineId": + conn.sendReply(sender, serial, conn.uuid) + default: + conn.sendError(ErrMsgUnknownMethod, sender, serial) + } + return + } + if len(name) == 0 { + conn.sendError(ErrMsgUnknownMethod, sender, serial) + } + + object, ok := conn.handler.LookupObject(path) + if !ok { + conn.sendError(ErrMsgNoObject, sender, serial) + return + } + + iface, exists := object.LookupInterface(ifaceName) + if !exists { + conn.sendError(ErrMsgUnknownInterface, sender, serial) + return + } + + m, exists := iface.LookupMethod(name) + if !exists { + conn.sendError(ErrMsgUnknownMethod, sender, serial) + return + } + args, err := conn.decodeArguments(m, sender, msg) + if err != nil { + conn.sendError(err, sender, serial) + return + } + + ret, err := m.Call(args...) + if err != nil { + conn.sendError(err, sender, serial) + return + } + + if msg.Flags&FlagNoReplyExpected == 0 { + reply := new(Message) + reply.Type = TypeMethodReply + reply.serial = conn.getSerial() + reply.Headers = make(map[HeaderField]Variant) + if hasSender { + reply.Headers[FieldDestination] = msg.Headers[FieldSender] + } + reply.Headers[FieldReplySerial] = MakeVariant(msg.serial) + reply.Body = make([]interface{}, len(ret)) + for i := 0; i < len(ret); i++ { + reply.Body[i] = ret[i] + } + reply.Headers[FieldSignature] = MakeVariant(SignatureOf(reply.Body...)) + conn.outLck.RLock() + if !conn.closed { + conn.out <- reply + } + conn.outLck.RUnlock() + } +} + +// Emit emits the given signal on the message bus. The name parameter must be +// formatted as "interface.member", e.g., "org.freedesktop.DBus.NameLost". +func (conn *Conn) Emit(path ObjectPath, name string, values ...interface{}) error { + if !path.IsValid() { + return errors.New("dbus: invalid object path") + } + i := strings.LastIndex(name, ".") + if i == -1 { + return errors.New("dbus: invalid method name") + } + iface := name[:i] + member := name[i+1:] + if !isValidMember(member) { + return errors.New("dbus: invalid method name") + } + if !isValidInterface(iface) { + return errors.New("dbus: invalid interface name") + } + msg := new(Message) + msg.Type = TypeSignal + msg.serial = conn.getSerial() + msg.Headers = make(map[HeaderField]Variant) + msg.Headers[FieldInterface] = MakeVariant(iface) + msg.Headers[FieldMember] = MakeVariant(member) + msg.Headers[FieldPath] = MakeVariant(path) + msg.Body = values + if len(values) > 0 { + msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...)) + } + conn.outLck.RLock() + defer conn.outLck.RUnlock() + if conn.closed { + return ErrClosed + } + conn.out <- msg + return nil +} + +// Export registers the given value to be exported as an object on the +// message bus. +// +// If a method call on the given path and interface is received, an exported +// method with the same name is called with v as the receiver if the +// parameters match and the last return value is of type *Error. If this +// *Error is not nil, it is sent back to the caller as an error. +// Otherwise, a method reply is sent with the other return values as its body. +// +// Any parameters with the special type Sender are set to the sender of the +// dbus message when the method is called. Parameters of this type do not +// contribute to the dbus signature of the method (i.e. the method is exposed +// as if the parameters of type Sender were not there). +// +// Similarly, any parameters with the type Message are set to the raw message +// received on the bus. Again, parameters of this type do not contribute to the +// dbus signature of the method. +// +// Every method call is executed in a new goroutine, so the method may be called +// in multiple goroutines at once. +// +// Method calls on the interface org.freedesktop.DBus.Peer will be automatically +// handled for every object. +// +// Passing nil as the first parameter will cause conn to cease handling calls on +// the given combination of path and interface. +// +// Export returns an error if path is not a valid path name. +func (conn *Conn) Export(v interface{}, path ObjectPath, iface string) error { + return conn.ExportWithMap(v, nil, path, iface) +} + +// ExportWithMap works exactly like Export but provides the ability to remap +// method names (e.g. export a lower-case method). +// +// The keys in the map are the real method names (exported on the struct), and +// the values are the method names to be exported on DBus. +func (conn *Conn) ExportWithMap(v interface{}, mapping map[string]string, path ObjectPath, iface string) error { + return conn.export(getMethods(v, mapping), path, iface, false) +} + +// ExportSubtree works exactly like Export but registers the given value for +// an entire subtree rather under the root path provided. +// +// In order to make this useful, one parameter in each of the value's exported +// methods should be a Message, in which case it will contain the raw message +// (allowing one to get access to the path that caused the method to be called). +// +// Note that more specific export paths take precedence over less specific. For +// example, a method call using the ObjectPath /foo/bar/baz will call a method +// exported on /foo/bar before a method exported on /foo. +func (conn *Conn) ExportSubtree(v interface{}, path ObjectPath, iface string) error { + return conn.ExportSubtreeWithMap(v, nil, path, iface) +} + +// ExportSubtreeWithMap works exactly like ExportSubtree but provides the +// ability to remap method names (e.g. export a lower-case method). +// +// The keys in the map are the real method names (exported on the struct), and +// the values are the method names to be exported on DBus. +func (conn *Conn) ExportSubtreeWithMap(v interface{}, mapping map[string]string, path ObjectPath, iface string) error { + return conn.export(getMethods(v, mapping), path, iface, true) +} + +// ExportMethodTable like Export registers the given methods as an object +// on the message bus. Unlike Export the it uses a method table to define +// the object instead of a native go object. +// +// The method table is a map from method name to function closure +// representing the method. This allows an object exported on the bus to not +// necessarily be a native go object. It can be useful for generating exposed +// methods on the fly. +// +// Any non-function objects in the method table are ignored. +func (conn *Conn) ExportMethodTable(methods map[string]interface{}, path ObjectPath, iface string) error { + return conn.exportMethodTable(methods, path, iface, false) +} + +// Like ExportSubtree, but with the same caveats as ExportMethodTable. +func (conn *Conn) ExportSubtreeMethodTable(methods map[string]interface{}, path ObjectPath, iface string) error { + return conn.exportMethodTable(methods, path, iface, true) +} + +func (conn *Conn) exportMethodTable(methods map[string]interface{}, path ObjectPath, iface string, includeSubtree bool) error { + out := make(map[string]reflect.Value) + for name, method := range methods { + rval := reflect.ValueOf(method) + if rval.Kind() != reflect.Func { + continue + } + t := rval.Type() + // only track valid methods must return *Error as last arg + if t.NumOut() == 0 || + t.Out(t.NumOut()-1) != reflect.TypeOf(&ErrMsgInvalidArg) { + continue + } + out[name] = rval + } + return conn.export(out, path, iface, includeSubtree) +} + +func (conn *Conn) unexport(h *defaultHandler, path ObjectPath, iface string) error { + if h.PathExists(path) { + obj := h.objects[path] + obj.DeleteInterface(iface) + if len(obj.interfaces) == 0 { + h.DeleteObject(path) + } + } + return nil +} + +// exportWithMap is the worker function for all exports/registrations. +func (conn *Conn) export(methods map[string]reflect.Value, path ObjectPath, iface string, includeSubtree bool) error { + h, ok := conn.handler.(*defaultHandler) + if !ok { + return fmt.Errorf( + `dbus: export only allowed on the default hander handler have %T"`, + conn.handler) + } + + if !path.IsValid() { + return fmt.Errorf(`dbus: Invalid path name: "%s"`, path) + } + + // Remove a previous export if the interface is nil + if methods == nil { + return conn.unexport(h, path, iface) + } + + // If this is the first handler for this path, make a new map to hold all + // handlers for this path. + if !h.PathExists(path) { + h.AddObject(path, newExportedObject()) + } + + exportedMethods := make(map[string]Method) + for name, method := range methods { + exportedMethods[name] = exportedMethod{method} + } + + // Finally, save this handler + obj := h.objects[path] + obj.AddInterface(iface, newExportedIntf(exportedMethods, includeSubtree)) + + return nil +} + +// ReleaseName calls org.freedesktop.DBus.ReleaseName and awaits a response. +func (conn *Conn) ReleaseName(name string) (ReleaseNameReply, error) { + var r uint32 + err := conn.busObj.Call("org.freedesktop.DBus.ReleaseName", 0, name).Store(&r) + if err != nil { + return 0, err + } + return ReleaseNameReply(r), nil +} + +// RequestName calls org.freedesktop.DBus.RequestName and awaits a response. +func (conn *Conn) RequestName(name string, flags RequestNameFlags) (RequestNameReply, error) { + var r uint32 + err := conn.busObj.Call("org.freedesktop.DBus.RequestName", 0, name, flags).Store(&r) + if err != nil { + return 0, err + } + return RequestNameReply(r), nil +} + +// ReleaseNameReply is the reply to a ReleaseName call. +type ReleaseNameReply uint32 + +const ( + ReleaseNameReplyReleased ReleaseNameReply = 1 + iota + ReleaseNameReplyNonExistent + ReleaseNameReplyNotOwner +) + +// RequestNameFlags represents the possible flags for a RequestName call. +type RequestNameFlags uint32 + +const ( + NameFlagAllowReplacement RequestNameFlags = 1 << iota + NameFlagReplaceExisting + NameFlagDoNotQueue +) + +// RequestNameReply is the reply to a RequestName call. +type RequestNameReply uint32 + +const ( + RequestNameReplyPrimaryOwner RequestNameReply = 1 + iota + RequestNameReplyInQueue + RequestNameReplyExists + RequestNameReplyAlreadyOwner +) diff --git a/vendor/github.com/godbus/dbus/homedir.go b/vendor/github.com/godbus/dbus/homedir.go new file mode 100644 index 0000000000..0b745f9313 --- /dev/null +++ b/vendor/github.com/godbus/dbus/homedir.go @@ -0,0 +1,28 @@ +package dbus + +import ( + "os" + "sync" +) + +var ( + homeDir string + homeDirLock sync.Mutex +) + +func getHomeDir() string { + homeDirLock.Lock() + defer homeDirLock.Unlock() + + if homeDir != "" { + return homeDir + } + + homeDir = os.Getenv("HOME") + if homeDir != "" { + return homeDir + } + + homeDir = lookupHomeDir() + return homeDir +} diff --git a/vendor/github.com/godbus/dbus/homedir_dynamic.go b/vendor/github.com/godbus/dbus/homedir_dynamic.go new file mode 100644 index 0000000000..2732081e73 --- /dev/null +++ b/vendor/github.com/godbus/dbus/homedir_dynamic.go @@ -0,0 +1,15 @@ +// +build !static_build + +package dbus + +import ( + "os/user" +) + +func lookupHomeDir() string { + u, err := user.Current() + if err != nil { + return "/" + } + return u.HomeDir +} diff --git a/vendor/github.com/godbus/dbus/homedir_static.go b/vendor/github.com/godbus/dbus/homedir_static.go new file mode 100644 index 0000000000..b9d9cb5525 --- /dev/null +++ b/vendor/github.com/godbus/dbus/homedir_static.go @@ -0,0 +1,45 @@ +// +build static_build + +package dbus + +import ( + "bufio" + "os" + "strconv" + "strings" +) + +func lookupHomeDir() string { + myUid := os.Getuid() + + f, err := os.Open("/etc/passwd") + if err != nil { + return "/" + } + defer f.Close() + + s := bufio.NewScanner(f) + + for s.Scan() { + if err := s.Err(); err != nil { + break + } + + line := strings.TrimSpace(s.Text()) + if line == "" { + continue + } + + parts := strings.Split(line, ":") + + if len(parts) >= 6 { + uid, err := strconv.Atoi(parts[2]) + if err == nil && uid == myUid { + return parts[5] + } + } + } + + // Default to / if we can't get a better value + return "/" +} diff --git a/vendor/github.com/godbus/dbus/message.go b/vendor/github.com/godbus/dbus/message.go new file mode 100644 index 0000000000..6a925367eb --- /dev/null +++ b/vendor/github.com/godbus/dbus/message.go @@ -0,0 +1,353 @@ +package dbus + +import ( + "bytes" + "encoding/binary" + "errors" + "io" + "reflect" + "strconv" +) + +const protoVersion byte = 1 + +// Flags represents the possible flags of a D-Bus message. +type Flags byte + +const ( + // FlagNoReplyExpected signals that the message is not expected to generate + // a reply. If this flag is set on outgoing messages, any possible reply + // will be discarded. + FlagNoReplyExpected Flags = 1 << iota + // FlagNoAutoStart signals that the message bus should not automatically + // start an application when handling this message. + FlagNoAutoStart + // FlagAllowInteractiveAuthorization may be set on a method call + // message to inform the receiving side that the caller is prepared + // to wait for interactive authorization, which might take a + // considerable time to complete. For instance, if this flag is set, + // it would be appropriate to query the user for passwords or + // confirmation via Polkit or a similar framework. + FlagAllowInteractiveAuthorization +) + +// Type represents the possible types of a D-Bus message. +type Type byte + +const ( + TypeMethodCall Type = 1 + iota + TypeMethodReply + TypeError + TypeSignal + typeMax +) + +func (t Type) String() string { + switch t { + case TypeMethodCall: + return "method call" + case TypeMethodReply: + return "reply" + case TypeError: + return "error" + case TypeSignal: + return "signal" + } + return "invalid" +} + +// HeaderField represents the possible byte codes for the headers +// of a D-Bus message. +type HeaderField byte + +const ( + FieldPath HeaderField = 1 + iota + FieldInterface + FieldMember + FieldErrorName + FieldReplySerial + FieldDestination + FieldSender + FieldSignature + FieldUnixFDs + fieldMax +) + +// An InvalidMessageError describes the reason why a D-Bus message is regarded as +// invalid. +type InvalidMessageError string + +func (e InvalidMessageError) Error() string { + return "dbus: invalid message: " + string(e) +} + +// fieldType are the types of the various header fields. +var fieldTypes = [fieldMax]reflect.Type{ + FieldPath: objectPathType, + FieldInterface: stringType, + FieldMember: stringType, + FieldErrorName: stringType, + FieldReplySerial: uint32Type, + FieldDestination: stringType, + FieldSender: stringType, + FieldSignature: signatureType, + FieldUnixFDs: uint32Type, +} + +// requiredFields lists the header fields that are required by the different +// message types. +var requiredFields = [typeMax][]HeaderField{ + TypeMethodCall: {FieldPath, FieldMember}, + TypeMethodReply: {FieldReplySerial}, + TypeError: {FieldErrorName, FieldReplySerial}, + TypeSignal: {FieldPath, FieldInterface, FieldMember}, +} + +// Message represents a single D-Bus message. +type Message struct { + Type + Flags + Headers map[HeaderField]Variant + Body []interface{} + + serial uint32 +} + +type header struct { + Field byte + Variant +} + +// DecodeMessage tries to decode a single message in the D-Bus wire format +// from the given reader. The byte order is figured out from the first byte. +// The possibly returned error can be an error of the underlying reader, an +// InvalidMessageError or a FormatError. +func DecodeMessage(rd io.Reader) (msg *Message, err error) { + var order binary.ByteOrder + var hlength, length uint32 + var typ, flags, proto byte + var headers []header + + b := make([]byte, 1) + _, err = rd.Read(b) + if err != nil { + return + } + switch b[0] { + case 'l': + order = binary.LittleEndian + case 'B': + order = binary.BigEndian + default: + return nil, InvalidMessageError("invalid byte order") + } + + dec := newDecoder(rd, order) + dec.pos = 1 + + msg = new(Message) + vs, err := dec.Decode(Signature{"yyyuu"}) + if err != nil { + return nil, err + } + if err = Store(vs, &typ, &flags, &proto, &length, &msg.serial); err != nil { + return nil, err + } + msg.Type = Type(typ) + msg.Flags = Flags(flags) + + // get the header length separately because we need it later + b = make([]byte, 4) + _, err = io.ReadFull(rd, b) + if err != nil { + return nil, err + } + binary.Read(bytes.NewBuffer(b), order, &hlength) + if hlength+length+16 > 1<<27 { + return nil, InvalidMessageError("message is too long") + } + dec = newDecoder(io.MultiReader(bytes.NewBuffer(b), rd), order) + dec.pos = 12 + vs, err = dec.Decode(Signature{"a(yv)"}) + if err != nil { + return nil, err + } + if err = Store(vs, &headers); err != nil { + return nil, err + } + + msg.Headers = make(map[HeaderField]Variant) + for _, v := range headers { + msg.Headers[HeaderField(v.Field)] = v.Variant + } + + dec.align(8) + body := make([]byte, int(length)) + if length != 0 { + _, err := io.ReadFull(rd, body) + if err != nil { + return nil, err + } + } + + if err = msg.IsValid(); err != nil { + return nil, err + } + sig, _ := msg.Headers[FieldSignature].value.(Signature) + if sig.str != "" { + buf := bytes.NewBuffer(body) + dec = newDecoder(buf, order) + vs, err := dec.Decode(sig) + if err != nil { + return nil, err + } + msg.Body = vs + } + + return +} + +// EncodeTo encodes and sends a message to the given writer. The byte order must +// be either binary.LittleEndian or binary.BigEndian. If the message is not +// valid or an error occurs when writing, an error is returned. +func (msg *Message) EncodeTo(out io.Writer, order binary.ByteOrder) error { + if err := msg.IsValid(); err != nil { + return err + } + var vs [7]interface{} + switch order { + case binary.LittleEndian: + vs[0] = byte('l') + case binary.BigEndian: + vs[0] = byte('B') + default: + return errors.New("dbus: invalid byte order") + } + body := new(bytes.Buffer) + enc := newEncoder(body, order) + if len(msg.Body) != 0 { + enc.Encode(msg.Body...) + } + vs[1] = msg.Type + vs[2] = msg.Flags + vs[3] = protoVersion + vs[4] = uint32(len(body.Bytes())) + vs[5] = msg.serial + headers := make([]header, 0, len(msg.Headers)) + for k, v := range msg.Headers { + headers = append(headers, header{byte(k), v}) + } + vs[6] = headers + var buf bytes.Buffer + enc = newEncoder(&buf, order) + enc.Encode(vs[:]...) + enc.align(8) + body.WriteTo(&buf) + if buf.Len() > 1<<27 { + return InvalidMessageError("message is too long") + } + if _, err := buf.WriteTo(out); err != nil { + return err + } + return nil +} + +// IsValid checks whether msg is a valid message and returns an +// InvalidMessageError if it is not. +func (msg *Message) IsValid() error { + if msg.Flags & ^(FlagNoAutoStart|FlagNoReplyExpected|FlagAllowInteractiveAuthorization) != 0 { + return InvalidMessageError("invalid flags") + } + if msg.Type == 0 || msg.Type >= typeMax { + return InvalidMessageError("invalid message type") + } + for k, v := range msg.Headers { + if k == 0 || k >= fieldMax { + return InvalidMessageError("invalid header") + } + if reflect.TypeOf(v.value) != fieldTypes[k] { + return InvalidMessageError("invalid type of header field") + } + } + for _, v := range requiredFields[msg.Type] { + if _, ok := msg.Headers[v]; !ok { + return InvalidMessageError("missing required header") + } + } + if path, ok := msg.Headers[FieldPath]; ok { + if !path.value.(ObjectPath).IsValid() { + return InvalidMessageError("invalid path name") + } + } + if iface, ok := msg.Headers[FieldInterface]; ok { + if !isValidInterface(iface.value.(string)) { + return InvalidMessageError("invalid interface name") + } + } + if member, ok := msg.Headers[FieldMember]; ok { + if !isValidMember(member.value.(string)) { + return InvalidMessageError("invalid member name") + } + } + if errname, ok := msg.Headers[FieldErrorName]; ok { + if !isValidInterface(errname.value.(string)) { + return InvalidMessageError("invalid error name") + } + } + if len(msg.Body) != 0 { + if _, ok := msg.Headers[FieldSignature]; !ok { + return InvalidMessageError("missing signature") + } + } + return nil +} + +// Serial returns the message's serial number. The returned value is only valid +// for messages received by eavesdropping. +func (msg *Message) Serial() uint32 { + return msg.serial +} + +// String returns a string representation of a message similar to the format of +// dbus-monitor. +func (msg *Message) String() string { + if err := msg.IsValid(); err != nil { + return "" + } + s := msg.Type.String() + if v, ok := msg.Headers[FieldSender]; ok { + s += " from " + v.value.(string) + } + if v, ok := msg.Headers[FieldDestination]; ok { + s += " to " + v.value.(string) + } + s += " serial " + strconv.FormatUint(uint64(msg.serial), 10) + if v, ok := msg.Headers[FieldReplySerial]; ok { + s += " reply_serial " + strconv.FormatUint(uint64(v.value.(uint32)), 10) + } + if v, ok := msg.Headers[FieldUnixFDs]; ok { + s += " unixfds " + strconv.FormatUint(uint64(v.value.(uint32)), 10) + } + if v, ok := msg.Headers[FieldPath]; ok { + s += " path " + string(v.value.(ObjectPath)) + } + if v, ok := msg.Headers[FieldInterface]; ok { + s += " interface " + v.value.(string) + } + if v, ok := msg.Headers[FieldErrorName]; ok { + s += " error " + v.value.(string) + } + if v, ok := msg.Headers[FieldMember]; ok { + s += " member " + v.value.(string) + } + if len(msg.Body) != 0 { + s += "\n" + } + for i, v := range msg.Body { + s += " " + MakeVariant(v).String() + if i != len(msg.Body)-1 { + s += "\n" + } + } + return s +} diff --git a/vendor/github.com/godbus/dbus/object.go b/vendor/github.com/godbus/dbus/object.go new file mode 100644 index 0000000000..6d95583d76 --- /dev/null +++ b/vendor/github.com/godbus/dbus/object.go @@ -0,0 +1,147 @@ +package dbus + +import ( + "errors" + "strings" +) + +// BusObject is the interface of a remote object on which methods can be +// invoked. +type BusObject interface { + Call(method string, flags Flags, args ...interface{}) *Call + Go(method string, flags Flags, ch chan *Call, args ...interface{}) *Call + GetProperty(p string) (Variant, error) + Destination() string + Path() ObjectPath +} + +// Object represents a remote object on which methods can be invoked. +type Object struct { + conn *Conn + dest string + path ObjectPath +} + +// Call calls a method with (*Object).Go and waits for its reply. +func (o *Object) Call(method string, flags Flags, args ...interface{}) *Call { + return <-o.Go(method, flags, make(chan *Call, 1), args...).Done +} + +// AddMatchSignal subscribes BusObject to signals from specified interface and +// method (member). +func (o *Object) AddMatchSignal(iface, member string) *Call { + return o.Call( + "org.freedesktop.DBus.AddMatch", + 0, + "type='signal',interface='"+iface+"',member='"+member+"'", + ) +} + +// Go calls a method with the given arguments asynchronously. It returns a +// Call structure representing this method call. The passed channel will +// return the same value once the call is done. If ch is nil, a new channel +// will be allocated. Otherwise, ch has to be buffered or Go will panic. +// +// If the flags include FlagNoReplyExpected, ch is ignored and a Call structure +// is returned with any error in Err and a closed channel in Done containing +// the returned Call as it's one entry. +// +// If the method parameter contains a dot ('.'), the part before the last dot +// specifies the interface on which the method is called. +func (o *Object) Go(method string, flags Flags, ch chan *Call, args ...interface{}) *Call { + iface := "" + i := strings.LastIndex(method, ".") + if i != -1 { + iface = method[:i] + } + method = method[i+1:] + msg := new(Message) + msg.Type = TypeMethodCall + msg.serial = o.conn.getSerial() + msg.Flags = flags & (FlagNoAutoStart | FlagNoReplyExpected) + msg.Headers = make(map[HeaderField]Variant) + msg.Headers[FieldPath] = MakeVariant(o.path) + msg.Headers[FieldDestination] = MakeVariant(o.dest) + msg.Headers[FieldMember] = MakeVariant(method) + if iface != "" { + msg.Headers[FieldInterface] = MakeVariant(iface) + } + msg.Body = args + if len(args) > 0 { + msg.Headers[FieldSignature] = MakeVariant(SignatureOf(args...)) + } + if msg.Flags&FlagNoReplyExpected == 0 { + if ch == nil { + ch = make(chan *Call, 10) + } else if cap(ch) == 0 { + panic("dbus: unbuffered channel passed to (*Object).Go") + } + call := &Call{ + Destination: o.dest, + Path: o.path, + Method: method, + Args: args, + Done: ch, + } + o.conn.callsLck.Lock() + o.conn.calls[msg.serial] = call + o.conn.callsLck.Unlock() + o.conn.outLck.RLock() + if o.conn.closed { + call.Err = ErrClosed + call.Done <- call + } else { + o.conn.out <- msg + } + o.conn.outLck.RUnlock() + return call + } + o.conn.outLck.RLock() + defer o.conn.outLck.RUnlock() + done := make(chan *Call, 1) + call := &Call{ + Err: nil, + Done: done, + } + defer func() { + call.Done <- call + close(done) + }() + if o.conn.closed { + call.Err = ErrClosed + return call + } + o.conn.out <- msg + return call +} + +// GetProperty calls org.freedesktop.DBus.Properties.GetProperty on the given +// object. The property name must be given in interface.member notation. +func (o *Object) GetProperty(p string) (Variant, error) { + idx := strings.LastIndex(p, ".") + if idx == -1 || idx+1 == len(p) { + return Variant{}, errors.New("dbus: invalid property " + p) + } + + iface := p[:idx] + prop := p[idx+1:] + + result := Variant{} + err := o.Call("org.freedesktop.DBus.Properties.Get", 0, iface, prop).Store(&result) + + if err != nil { + return Variant{}, err + } + + return result, nil +} + +// Destination returns the destination that calls on (o *Object) are sent to. +func (o *Object) Destination() string { + return o.dest +} + +// Path returns the path that calls on (o *Object") are sent to. +func (o *Object) Path() ObjectPath { + return o.path +} diff --git a/vendor/github.com/godbus/dbus/server_interfaces.go b/vendor/github.com/godbus/dbus/server_interfaces.go new file mode 100644 index 0000000000..091948aef7 --- /dev/null +++ b/vendor/github.com/godbus/dbus/server_interfaces.go @@ -0,0 +1,89 @@ +package dbus + +// Terminator allows a handler to implement a shutdown mechanism that +// is called when the connection terminates. +type Terminator interface { + Terminate() +} + +// Handler is the representation of a D-Bus Application. +// +// The Handler must have a way to lookup objects given +// an ObjectPath. The returned object must implement the +// ServerObject interface. +type Handler interface { + LookupObject(path ObjectPath) (ServerObject, bool) +} + +// ServerObject is the representation of an D-Bus Object. +// +// Objects are registered at a path for a given Handler. +// The Objects implement D-Bus interfaces. The semantics +// of Interface lookup is up to the implementation of +// the ServerObject. The ServerObject implementation may +// choose to implement empty string as a valid interface +// represeting all methods or not per the D-Bus specification. +type ServerObject interface { + LookupInterface(name string) (Interface, bool) +} + +// An Interface is the representation of a D-Bus Interface. +// +// Interfaces are a grouping of methods implemented by the Objects. +// Interfaces are responsible for routing method calls. +type Interface interface { + LookupMethod(name string) (Method, bool) +} + +// A Method represents the exposed methods on D-Bus. +type Method interface { + // Call requires that all arguments are decoded before being passed to it. + Call(args ...interface{}) ([]interface{}, error) + NumArguments() int + NumReturns() int + // ArgumentValue returns a representative value for the argument at position + // it should be of the proper type. reflect.Zero would be a good mechanism + // to use for this Value. + ArgumentValue(position int) interface{} + // ReturnValue returns a representative value for the return at position + // it should be of the proper type. reflect.Zero would be a good mechanism + // to use for this Value. + ReturnValue(position int) interface{} +} + +// An Argument Decoder can decode arguments using the non-standard mechanism +// +// If a method implements this interface then the non-standard +// decoder will be used. +// +// Method arguments must be decoded from the message. +// The mechanism for doing this will vary based on the +// implementation of the method. A normal approach is provided +// as part of this library, but may be replaced with +// any other decoding scheme. +type ArgumentDecoder interface { + // To decode the arguments of a method the sender and message are + // provided incase the semantics of the implementer provides access + // to these as part of the method invocation. + DecodeArguments(conn *Conn, sender string, msg *Message, args []interface{}) ([]interface{}, error) +} + +// A SignalHandler is responsible for delivering a signal. +// +// Signal delivery may be changed from the default channel +// based approach by Handlers implementing the SignalHandler +// interface. +type SignalHandler interface { + DeliverSignal(iface, name string, signal *Signal) +} + +// A DBusError is used to convert a generic object to a D-Bus error. +// +// Any custom error mechanism may implement this interface to provide +// a custom encoding of the error on D-Bus. By default if a normal +// error is returned, it will be encoded as the generic +// "org.freedesktop.DBus.Error.Failed" error. By implementing this +// interface as well a custom encoding may be provided. +type DBusError interface { + DBusError() (string, []interface{}) +} diff --git a/vendor/github.com/godbus/dbus/sig.go b/vendor/github.com/godbus/dbus/sig.go new file mode 100644 index 0000000000..c1b809202c --- /dev/null +++ b/vendor/github.com/godbus/dbus/sig.go @@ -0,0 +1,259 @@ +package dbus + +import ( + "fmt" + "reflect" + "strings" +) + +var sigToType = map[byte]reflect.Type{ + 'y': byteType, + 'b': boolType, + 'n': int16Type, + 'q': uint16Type, + 'i': int32Type, + 'u': uint32Type, + 'x': int64Type, + 't': uint64Type, + 'd': float64Type, + 's': stringType, + 'g': signatureType, + 'o': objectPathType, + 'v': variantType, + 'h': unixFDIndexType, +} + +// Signature represents a correct type signature as specified by the D-Bus +// specification. The zero value represents the empty signature, "". +type Signature struct { + str string +} + +// SignatureOf returns the concatenation of all the signatures of the given +// values. It panics if one of them is not representable in D-Bus. +func SignatureOf(vs ...interface{}) Signature { + var s string + for _, v := range vs { + s += getSignature(reflect.TypeOf(v)) + } + return Signature{s} +} + +// SignatureOfType returns the signature of the given type. It panics if the +// type is not representable in D-Bus. +func SignatureOfType(t reflect.Type) Signature { + return Signature{getSignature(t)} +} + +// getSignature returns the signature of the given type and panics on unknown types. +func getSignature(t reflect.Type) string { + // handle simple types first + switch t.Kind() { + case reflect.Uint8: + return "y" + case reflect.Bool: + return "b" + case reflect.Int16: + return "n" + case reflect.Uint16: + return "q" + case reflect.Int, reflect.Int32: + if t == unixFDType { + return "h" + } + return "i" + case reflect.Uint, reflect.Uint32: + if t == unixFDIndexType { + return "h" + } + return "u" + case reflect.Int64: + return "x" + case reflect.Uint64: + return "t" + case reflect.Float64: + return "d" + case reflect.Ptr: + return getSignature(t.Elem()) + case reflect.String: + if t == objectPathType { + return "o" + } + return "s" + case reflect.Struct: + if t == variantType { + return "v" + } else if t == signatureType { + return "g" + } + var s string + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + if field.PkgPath == "" && field.Tag.Get("dbus") != "-" { + s += getSignature(t.Field(i).Type) + } + } + return "(" + s + ")" + case reflect.Array, reflect.Slice: + return "a" + getSignature(t.Elem()) + case reflect.Map: + if !isKeyType(t.Key()) { + panic(InvalidTypeError{t}) + } + return "a{" + getSignature(t.Key()) + getSignature(t.Elem()) + "}" + case reflect.Interface: + return "v" + } + panic(InvalidTypeError{t}) +} + +// ParseSignature returns the signature represented by this string, or a +// SignatureError if the string is not a valid signature. +func ParseSignature(s string) (sig Signature, err error) { + if len(s) == 0 { + return + } + if len(s) > 255 { + return Signature{""}, SignatureError{s, "too long"} + } + sig.str = s + for err == nil && len(s) != 0 { + err, s = validSingle(s, 0) + } + if err != nil { + sig = Signature{""} + } + + return +} + +// ParseSignatureMust behaves like ParseSignature, except that it panics if s +// is not valid. +func ParseSignatureMust(s string) Signature { + sig, err := ParseSignature(s) + if err != nil { + panic(err) + } + return sig +} + +// Empty retruns whether the signature is the empty signature. +func (s Signature) Empty() bool { + return s.str == "" +} + +// Single returns whether the signature represents a single, complete type. +func (s Signature) Single() bool { + err, r := validSingle(s.str, 0) + return err != nil && r == "" +} + +// String returns the signature's string representation. +func (s Signature) String() string { + return s.str +} + +// A SignatureError indicates that a signature passed to a function or received +// on a connection is not a valid signature. +type SignatureError struct { + Sig string + Reason string +} + +func (e SignatureError) Error() string { + return fmt.Sprintf("dbus: invalid signature: %q (%s)", e.Sig, e.Reason) +} + +// Try to read a single type from this string. If it was successful, err is nil +// and rem is the remaining unparsed part. Otherwise, err is a non-nil +// SignatureError and rem is "". depth is the current recursion depth which may +// not be greater than 64 and should be given as 0 on the first call. +func validSingle(s string, depth int) (err error, rem string) { + if s == "" { + return SignatureError{Sig: s, Reason: "empty signature"}, "" + } + if depth > 64 { + return SignatureError{Sig: s, Reason: "container nesting too deep"}, "" + } + switch s[0] { + case 'y', 'b', 'n', 'q', 'i', 'u', 'x', 't', 'd', 's', 'g', 'o', 'v', 'h': + return nil, s[1:] + case 'a': + if len(s) > 1 && s[1] == '{' { + i := findMatching(s[1:], '{', '}') + if i == -1 { + return SignatureError{Sig: s, Reason: "unmatched '{'"}, "" + } + i++ + rem = s[i+1:] + s = s[2:i] + if err, _ = validSingle(s[:1], depth+1); err != nil { + return err, "" + } + err, nr := validSingle(s[1:], depth+1) + if err != nil { + return err, "" + } + if nr != "" { + return SignatureError{Sig: s, Reason: "too many types in dict"}, "" + } + return nil, rem + } + return validSingle(s[1:], depth+1) + case '(': + i := findMatching(s, '(', ')') + if i == -1 { + return SignatureError{Sig: s, Reason: "unmatched ')'"}, "" + } + rem = s[i+1:] + s = s[1:i] + for err == nil && s != "" { + err, s = validSingle(s, depth+1) + } + if err != nil { + rem = "" + } + return + } + return SignatureError{Sig: s, Reason: "invalid type character"}, "" +} + +func findMatching(s string, left, right rune) int { + n := 0 + for i, v := range s { + if v == left { + n++ + } else if v == right { + n-- + } + if n == 0 { + return i + } + } + return -1 +} + +// typeFor returns the type of the given signature. It ignores any left over +// characters and panics if s doesn't start with a valid type signature. +func typeFor(s string) (t reflect.Type) { + err, _ := validSingle(s, 0) + if err != nil { + panic(err) + } + + if t, ok := sigToType[s[0]]; ok { + return t + } + switch s[0] { + case 'a': + if s[1] == '{' { + i := strings.LastIndex(s, "}") + t = reflect.MapOf(sigToType[s[2]], typeFor(s[3:i])) + } else { + t = reflect.SliceOf(typeFor(s[1:])) + } + case '(': + t = interfacesType + } + return +} diff --git a/vendor/github.com/godbus/dbus/transport_darwin.go b/vendor/github.com/godbus/dbus/transport_darwin.go new file mode 100644 index 0000000000..1bba0d6bf7 --- /dev/null +++ b/vendor/github.com/godbus/dbus/transport_darwin.go @@ -0,0 +1,6 @@ +package dbus + +func (t *unixTransport) SendNullByte() error { + _, err := t.Write([]byte{0}) + return err +} diff --git a/vendor/github.com/godbus/dbus/transport_generic.go b/vendor/github.com/godbus/dbus/transport_generic.go new file mode 100644 index 0000000000..3fad859a6b --- /dev/null +++ b/vendor/github.com/godbus/dbus/transport_generic.go @@ -0,0 +1,50 @@ +package dbus + +import ( + "encoding/binary" + "errors" + "io" + "unsafe" +) + +var nativeEndian binary.ByteOrder + +func detectEndianness() binary.ByteOrder { + var x uint32 = 0x01020304 + if *(*byte)(unsafe.Pointer(&x)) == 0x01 { + return binary.BigEndian + } + return binary.LittleEndian +} + +func init() { + nativeEndian = detectEndianness() +} + +type genericTransport struct { + io.ReadWriteCloser +} + +func (t genericTransport) SendNullByte() error { + _, err := t.Write([]byte{0}) + return err +} + +func (t genericTransport) SupportsUnixFDs() bool { + return false +} + +func (t genericTransport) EnableUnixFDs() {} + +func (t genericTransport) ReadMessage() (*Message, error) { + return DecodeMessage(t) +} + +func (t genericTransport) SendMessage(msg *Message) error { + for _, v := range msg.Body { + if _, ok := v.(UnixFD); ok { + return errors.New("dbus: unix fd passing not enabled") + } + } + return msg.EncodeTo(t, nativeEndian) +} diff --git a/vendor/github.com/godbus/dbus/transport_tcp.go b/vendor/github.com/godbus/dbus/transport_tcp.go new file mode 100644 index 0000000000..dd1c8e59c5 --- /dev/null +++ b/vendor/github.com/godbus/dbus/transport_tcp.go @@ -0,0 +1,43 @@ +//+build !windows + +package dbus + +import ( + "errors" + "net" +) + +func init() { + transports["tcp"] = newTcpTransport +} + +func tcpFamily(keys string) (string, error) { + switch getKey(keys, "family") { + case "": + return "tcp", nil + case "ipv4": + return "tcp4", nil + case "ipv6": + return "tcp6", nil + default: + return "", errors.New("dbus: invalid tcp family (must be ipv4 or ipv6)") + } +} + +func newTcpTransport(keys string) (transport, error) { + host := getKey(keys, "host") + port := getKey(keys, "port") + if host == "" || port == "" { + return nil, errors.New("dbus: unsupported address (must set host and port)") + } + + protocol, err := tcpFamily(keys) + if err != nil { + return nil, err + } + socket, err := net.Dial(protocol, net.JoinHostPort(host, port)) + if err != nil { + return nil, err + } + return NewConn(socket) +} diff --git a/vendor/github.com/godbus/dbus/transport_unix.go b/vendor/github.com/godbus/dbus/transport_unix.go new file mode 100644 index 0000000000..e56d5ca90a --- /dev/null +++ b/vendor/github.com/godbus/dbus/transport_unix.go @@ -0,0 +1,196 @@ +//+build !windows,!solaris + +package dbus + +import ( + "bytes" + "encoding/binary" + "errors" + "io" + "net" + "syscall" +) + +type oobReader struct { + conn *net.UnixConn + oob []byte + buf [4096]byte +} + +func (o *oobReader) Read(b []byte) (n int, err error) { + n, oobn, flags, _, err := o.conn.ReadMsgUnix(b, o.buf[:]) + if err != nil { + return n, err + } + if flags&syscall.MSG_CTRUNC != 0 { + return n, errors.New("dbus: control data truncated (too many fds received)") + } + o.oob = append(o.oob, o.buf[:oobn]...) + return n, nil +} + +type unixTransport struct { + *net.UnixConn + hasUnixFDs bool +} + +func newUnixTransport(keys string) (transport, error) { + var err error + + t := new(unixTransport) + abstract := getKey(keys, "abstract") + path := getKey(keys, "path") + switch { + case abstract == "" && path == "": + return nil, errors.New("dbus: invalid address (neither path nor abstract set)") + case abstract != "" && path == "": + t.UnixConn, err = net.DialUnix("unix", nil, &net.UnixAddr{Name: "@" + abstract, Net: "unix"}) + if err != nil { + return nil, err + } + return t, nil + case abstract == "" && path != "": + t.UnixConn, err = net.DialUnix("unix", nil, &net.UnixAddr{Name: path, Net: "unix"}) + if err != nil { + return nil, err + } + return t, nil + default: + return nil, errors.New("dbus: invalid address (both path and abstract set)") + } +} + +func init() { + transports["unix"] = newUnixTransport +} + +func (t *unixTransport) EnableUnixFDs() { + t.hasUnixFDs = true +} + +func (t *unixTransport) ReadMessage() (*Message, error) { + var ( + blen, hlen uint32 + csheader [16]byte + headers []header + order binary.ByteOrder + unixfds uint32 + ) + // To be sure that all bytes of out-of-band data are read, we use a special + // reader that uses ReadUnix on the underlying connection instead of Read + // and gathers the out-of-band data in a buffer. + rd := &oobReader{conn: t.UnixConn} + // read the first 16 bytes (the part of the header that has a constant size), + // from which we can figure out the length of the rest of the message + if _, err := io.ReadFull(rd, csheader[:]); err != nil { + return nil, err + } + switch csheader[0] { + case 'l': + order = binary.LittleEndian + case 'B': + order = binary.BigEndian + default: + return nil, InvalidMessageError("invalid byte order") + } + // csheader[4:8] -> length of message body, csheader[12:16] -> length of + // header fields (without alignment) + binary.Read(bytes.NewBuffer(csheader[4:8]), order, &blen) + binary.Read(bytes.NewBuffer(csheader[12:]), order, &hlen) + if hlen%8 != 0 { + hlen += 8 - (hlen % 8) + } + + // decode headers and look for unix fds + headerdata := make([]byte, hlen+4) + copy(headerdata, csheader[12:]) + if _, err := io.ReadFull(t, headerdata[4:]); err != nil { + return nil, err + } + dec := newDecoder(bytes.NewBuffer(headerdata), order) + dec.pos = 12 + vs, err := dec.Decode(Signature{"a(yv)"}) + if err != nil { + return nil, err + } + Store(vs, &headers) + for _, v := range headers { + if v.Field == byte(FieldUnixFDs) { + unixfds, _ = v.Variant.value.(uint32) + } + } + all := make([]byte, 16+hlen+blen) + copy(all, csheader[:]) + copy(all[16:], headerdata[4:]) + if _, err := io.ReadFull(rd, all[16+hlen:]); err != nil { + return nil, err + } + if unixfds != 0 { + if !t.hasUnixFDs { + return nil, errors.New("dbus: got unix fds on unsupported transport") + } + // read the fds from the OOB data + scms, err := syscall.ParseSocketControlMessage(rd.oob) + if err != nil { + return nil, err + } + if len(scms) != 1 { + return nil, errors.New("dbus: received more than one socket control message") + } + fds, err := syscall.ParseUnixRights(&scms[0]) + if err != nil { + return nil, err + } + msg, err := DecodeMessage(bytes.NewBuffer(all)) + if err != nil { + return nil, err + } + // substitute the values in the message body (which are indices for the + // array receiver via OOB) with the actual values + for i, v := range msg.Body { + if j, ok := v.(UnixFDIndex); ok { + if uint32(j) >= unixfds { + return nil, InvalidMessageError("invalid index for unix fd") + } + msg.Body[i] = UnixFD(fds[j]) + } + } + return msg, nil + } + return DecodeMessage(bytes.NewBuffer(all)) +} + +func (t *unixTransport) SendMessage(msg *Message) error { + fds := make([]int, 0) + for i, v := range msg.Body { + if fd, ok := v.(UnixFD); ok { + msg.Body[i] = UnixFDIndex(len(fds)) + fds = append(fds, int(fd)) + } + } + if len(fds) != 0 { + if !t.hasUnixFDs { + return errors.New("dbus: unix fd passing not enabled") + } + msg.Headers[FieldUnixFDs] = MakeVariant(uint32(len(fds))) + oob := syscall.UnixRights(fds...) + buf := new(bytes.Buffer) + msg.EncodeTo(buf, nativeEndian) + n, oobn, err := t.UnixConn.WriteMsgUnix(buf.Bytes(), oob, nil) + if err != nil { + return err + } + if n != buf.Len() || oobn != len(oob) { + return io.ErrShortWrite + } + } else { + if err := msg.EncodeTo(t, nativeEndian); err != nil { + return nil + } + } + return nil +} + +func (t *unixTransport) SupportsUnixFDs() bool { + return true +} diff --git a/vendor/github.com/godbus/dbus/transport_unixcred_dragonfly.go b/vendor/github.com/godbus/dbus/transport_unixcred_dragonfly.go new file mode 100644 index 0000000000..a8cd39395f --- /dev/null +++ b/vendor/github.com/godbus/dbus/transport_unixcred_dragonfly.go @@ -0,0 +1,95 @@ +// The UnixCredentials system call is currently only implemented on Linux +// http://golang.org/src/pkg/syscall/sockcmsg_linux.go +// https://golang.org/s/go1.4-syscall +// http://code.google.com/p/go/source/browse/unix/sockcmsg_linux.go?repo=sys + +// Local implementation of the UnixCredentials system call for DragonFly BSD + +package dbus + +/* +#include +*/ +import "C" + +import ( + "io" + "os" + "syscall" + "unsafe" +) + +// http://golang.org/src/pkg/syscall/ztypes_linux_amd64.go +// http://golang.org/src/pkg/syscall/ztypes_dragonfly_amd64.go +type Ucred struct { + Pid int32 + Uid uint32 + Gid uint32 +} + +// http://golang.org/src/pkg/syscall/types_linux.go +// http://golang.org/src/pkg/syscall/types_dragonfly.go +// https://github.com/DragonFlyBSD/DragonFlyBSD/blob/master/sys/sys/ucred.h +const ( + SizeofUcred = C.sizeof_struct_ucred +) + +// http://golang.org/src/pkg/syscall/sockcmsg_unix.go +func cmsgAlignOf(salen int) int { + // From http://golang.org/src/pkg/syscall/sockcmsg_unix.go + //salign := sizeofPtr + // NOTE: It seems like 64-bit Darwin and DragonFly BSD kernels + // still require 32-bit aligned access to network subsystem. + //if darwin64Bit || dragonfly64Bit { + // salign = 4 + //} + salign := 4 + return (salen + salign - 1) & ^(salign - 1) +} + +// http://golang.org/src/pkg/syscall/sockcmsg_unix.go +func cmsgData(h *syscall.Cmsghdr) unsafe.Pointer { + return unsafe.Pointer(uintptr(unsafe.Pointer(h)) + uintptr(cmsgAlignOf(syscall.SizeofCmsghdr))) +} + +// http://golang.org/src/pkg/syscall/sockcmsg_linux.go +// UnixCredentials encodes credentials into a socket control message +// for sending to another process. This can be used for +// authentication. +func UnixCredentials(ucred *Ucred) []byte { + b := make([]byte, syscall.CmsgSpace(SizeofUcred)) + h := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) + h.Level = syscall.SOL_SOCKET + h.Type = syscall.SCM_CREDS + h.SetLen(syscall.CmsgLen(SizeofUcred)) + *((*Ucred)(cmsgData(h))) = *ucred + return b +} + +// http://golang.org/src/pkg/syscall/sockcmsg_linux.go +// ParseUnixCredentials decodes a socket control message that contains +// credentials in a Ucred structure. To receive such a message, the +// SO_PASSCRED option must be enabled on the socket. +func ParseUnixCredentials(m *syscall.SocketControlMessage) (*Ucred, error) { + if m.Header.Level != syscall.SOL_SOCKET { + return nil, syscall.EINVAL + } + if m.Header.Type != syscall.SCM_CREDS { + return nil, syscall.EINVAL + } + ucred := *(*Ucred)(unsafe.Pointer(&m.Data[0])) + return &ucred, nil +} + +func (t *unixTransport) SendNullByte() error { + ucred := &Ucred{Pid: int32(os.Getpid()), Uid: uint32(os.Getuid()), Gid: uint32(os.Getgid())} + b := UnixCredentials(ucred) + _, oobn, err := t.UnixConn.WriteMsgUnix([]byte{0}, b, nil) + if err != nil { + return err + } + if oobn != len(b) { + return io.ErrShortWrite + } + return nil +} diff --git a/vendor/github.com/godbus/dbus/transport_unixcred_freebsd.go b/vendor/github.com/godbus/dbus/transport_unixcred_freebsd.go new file mode 100644 index 0000000000..0fc5b92739 --- /dev/null +++ b/vendor/github.com/godbus/dbus/transport_unixcred_freebsd.go @@ -0,0 +1,91 @@ +// The UnixCredentials system call is currently only implemented on Linux +// http://golang.org/src/pkg/syscall/sockcmsg_linux.go +// https://golang.org/s/go1.4-syscall +// http://code.google.com/p/go/source/browse/unix/sockcmsg_linux.go?repo=sys + +// Local implementation of the UnixCredentials system call for FreeBSD + +package dbus + +/* +const int sizeofPtr = sizeof(void*); +#define _WANT_UCRED +#include +*/ +import "C" + +import ( + "io" + "os" + "syscall" + "unsafe" +) + +// http://golang.org/src/pkg/syscall/ztypes_linux_amd64.go +// https://golang.org/src/syscall/ztypes_freebsd_amd64.go +type Ucred struct { + Pid int32 + Uid uint32 + Gid uint32 +} + +// http://golang.org/src/pkg/syscall/types_linux.go +// https://golang.org/src/syscall/types_freebsd.go +// https://github.com/freebsd/freebsd/blob/master/sys/sys/ucred.h +const ( + SizeofUcred = C.sizeof_struct_ucred +) + +// http://golang.org/src/pkg/syscall/sockcmsg_unix.go +func cmsgAlignOf(salen int) int { + salign := C.sizeofPtr + + return (salen + salign - 1) & ^(salign - 1) +} + +// http://golang.org/src/pkg/syscall/sockcmsg_unix.go +func cmsgData(h *syscall.Cmsghdr) unsafe.Pointer { + return unsafe.Pointer(uintptr(unsafe.Pointer(h)) + uintptr(cmsgAlignOf(syscall.SizeofCmsghdr))) +} + +// http://golang.org/src/pkg/syscall/sockcmsg_linux.go +// UnixCredentials encodes credentials into a socket control message +// for sending to another process. This can be used for +// authentication. +func UnixCredentials(ucred *Ucred) []byte { + b := make([]byte, syscall.CmsgSpace(SizeofUcred)) + h := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) + h.Level = syscall.SOL_SOCKET + h.Type = syscall.SCM_CREDS + h.SetLen(syscall.CmsgLen(SizeofUcred)) + *((*Ucred)(cmsgData(h))) = *ucred + return b +} + +// http://golang.org/src/pkg/syscall/sockcmsg_linux.go +// ParseUnixCredentials decodes a socket control message that contains +// credentials in a Ucred structure. To receive such a message, the +// SO_PASSCRED option must be enabled on the socket. +func ParseUnixCredentials(m *syscall.SocketControlMessage) (*Ucred, error) { + if m.Header.Level != syscall.SOL_SOCKET { + return nil, syscall.EINVAL + } + if m.Header.Type != syscall.SCM_CREDS { + return nil, syscall.EINVAL + } + ucred := *(*Ucred)(unsafe.Pointer(&m.Data[0])) + return &ucred, nil +} + +func (t *unixTransport) SendNullByte() error { + ucred := &Ucred{Pid: int32(os.Getpid()), Uid: uint32(os.Getuid()), Gid: uint32(os.Getgid())} + b := UnixCredentials(ucred) + _, oobn, err := t.UnixConn.WriteMsgUnix([]byte{0}, b, nil) + if err != nil { + return err + } + if oobn != len(b) { + return io.ErrShortWrite + } + return nil +} diff --git a/vendor/github.com/godbus/dbus/transport_unixcred_linux.go b/vendor/github.com/godbus/dbus/transport_unixcred_linux.go new file mode 100644 index 0000000000..d9dfdf6982 --- /dev/null +++ b/vendor/github.com/godbus/dbus/transport_unixcred_linux.go @@ -0,0 +1,25 @@ +// The UnixCredentials system call is currently only implemented on Linux +// http://golang.org/src/pkg/syscall/sockcmsg_linux.go +// https://golang.org/s/go1.4-syscall +// http://code.google.com/p/go/source/browse/unix/sockcmsg_linux.go?repo=sys + +package dbus + +import ( + "io" + "os" + "syscall" +) + +func (t *unixTransport) SendNullByte() error { + ucred := &syscall.Ucred{Pid: int32(os.Getpid()), Uid: uint32(os.Getuid()), Gid: uint32(os.Getgid())} + b := syscall.UnixCredentials(ucred) + _, oobn, err := t.UnixConn.WriteMsgUnix([]byte{0}, b, nil) + if err != nil { + return err + } + if oobn != len(b) { + return io.ErrShortWrite + } + return nil +} diff --git a/vendor/github.com/godbus/dbus/transport_unixcred_openbsd.go b/vendor/github.com/godbus/dbus/transport_unixcred_openbsd.go new file mode 100644 index 0000000000..af7bafdf95 --- /dev/null +++ b/vendor/github.com/godbus/dbus/transport_unixcred_openbsd.go @@ -0,0 +1,14 @@ +package dbus + +import "io" + +func (t *unixTransport) SendNullByte() error { + n, _, err := t.UnixConn.WriteMsgUnix([]byte{0}, nil, nil) + if err != nil { + return err + } + if n != 1 { + return io.ErrShortWrite + } + return nil +} diff --git a/vendor/github.com/godbus/dbus/variant.go b/vendor/github.com/godbus/dbus/variant.go new file mode 100644 index 0000000000..0ca123b012 --- /dev/null +++ b/vendor/github.com/godbus/dbus/variant.go @@ -0,0 +1,144 @@ +package dbus + +import ( + "bytes" + "fmt" + "reflect" + "sort" + "strconv" +) + +// Variant represents the D-Bus variant type. +type Variant struct { + sig Signature + value interface{} +} + +// MakeVariant converts the given value to a Variant. It panics if v cannot be +// represented as a D-Bus type. +func MakeVariant(v interface{}) Variant { + return MakeVariantWithSignature(v, SignatureOf(v)) +} + +// MakeVariantWithSignature converts the given value to a Variant. +func MakeVariantWithSignature(v interface{}, s Signature) Variant { + return Variant{s, v} +} + +// ParseVariant parses the given string as a variant as described at +// https://developer.gnome.org/glib/unstable/gvariant-text.html. If sig is not +// empty, it is taken to be the expected signature for the variant. +func ParseVariant(s string, sig Signature) (Variant, error) { + tokens := varLex(s) + p := &varParser{tokens: tokens} + n, err := varMakeNode(p) + if err != nil { + return Variant{}, err + } + if sig.str == "" { + sig, err = varInfer(n) + if err != nil { + return Variant{}, err + } + } + v, err := n.Value(sig) + if err != nil { + return Variant{}, err + } + return MakeVariant(v), nil +} + +// format returns a formatted version of v and whether this string can be parsed +// unambigously. +func (v Variant) format() (string, bool) { + switch v.sig.str[0] { + case 'b', 'i': + return fmt.Sprint(v.value), true + case 'n', 'q', 'u', 'x', 't', 'd', 'h': + return fmt.Sprint(v.value), false + case 's': + return strconv.Quote(v.value.(string)), true + case 'o': + return strconv.Quote(string(v.value.(ObjectPath))), false + case 'g': + return strconv.Quote(v.value.(Signature).str), false + case 'v': + s, unamb := v.value.(Variant).format() + if !unamb { + return "<@" + v.value.(Variant).sig.str + " " + s + ">", true + } + return "<" + s + ">", true + case 'y': + return fmt.Sprintf("%#x", v.value.(byte)), false + } + rv := reflect.ValueOf(v.value) + switch rv.Kind() { + case reflect.Slice: + if rv.Len() == 0 { + return "[]", false + } + unamb := true + buf := bytes.NewBuffer([]byte("[")) + for i := 0; i < rv.Len(); i++ { + // TODO: slooow + s, b := MakeVariant(rv.Index(i).Interface()).format() + unamb = unamb && b + buf.WriteString(s) + if i != rv.Len()-1 { + buf.WriteString(", ") + } + } + buf.WriteByte(']') + return buf.String(), unamb + case reflect.Map: + if rv.Len() == 0 { + return "{}", false + } + unamb := true + var buf bytes.Buffer + kvs := make([]string, rv.Len()) + for i, k := range rv.MapKeys() { + s, b := MakeVariant(k.Interface()).format() + unamb = unamb && b + buf.Reset() + buf.WriteString(s) + buf.WriteString(": ") + s, b = MakeVariant(rv.MapIndex(k).Interface()).format() + unamb = unamb && b + buf.WriteString(s) + kvs[i] = buf.String() + } + buf.Reset() + buf.WriteByte('{') + sort.Strings(kvs) + for i, kv := range kvs { + if i > 0 { + buf.WriteString(", ") + } + buf.WriteString(kv) + } + buf.WriteByte('}') + return buf.String(), unamb + } + return `"INVALID"`, true +} + +// Signature returns the D-Bus signature of the underlying value of v. +func (v Variant) Signature() Signature { + return v.sig +} + +// String returns the string representation of the underlying value of v as +// described at https://developer.gnome.org/glib/unstable/gvariant-text.html. +func (v Variant) String() string { + s, unamb := v.format() + if !unamb { + return "@" + v.sig.str + " " + s + } + return s +} + +// Value returns the underlying value of v. +func (v Variant) Value() interface{} { + return v.value +} diff --git a/vendor/github.com/godbus/dbus/variant_lexer.go b/vendor/github.com/godbus/dbus/variant_lexer.go new file mode 100644 index 0000000000..332007d6f1 --- /dev/null +++ b/vendor/github.com/godbus/dbus/variant_lexer.go @@ -0,0 +1,284 @@ +package dbus + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" +) + +// Heavily inspired by the lexer from text/template. + +type varToken struct { + typ varTokenType + val string +} + +type varTokenType byte + +const ( + tokEOF varTokenType = iota + tokError + tokNumber + tokString + tokBool + tokArrayStart + tokArrayEnd + tokDictStart + tokDictEnd + tokVariantStart + tokVariantEnd + tokComma + tokColon + tokType + tokByteString +) + +type varLexer struct { + input string + start int + pos int + width int + tokens []varToken +} + +type lexState func(*varLexer) lexState + +func varLex(s string) []varToken { + l := &varLexer{input: s} + l.run() + return l.tokens +} + +func (l *varLexer) accept(valid string) bool { + if strings.IndexRune(valid, l.next()) >= 0 { + return true + } + l.backup() + return false +} + +func (l *varLexer) backup() { + l.pos -= l.width +} + +func (l *varLexer) emit(t varTokenType) { + l.tokens = append(l.tokens, varToken{t, l.input[l.start:l.pos]}) + l.start = l.pos +} + +func (l *varLexer) errorf(format string, v ...interface{}) lexState { + l.tokens = append(l.tokens, varToken{ + tokError, + fmt.Sprintf(format, v...), + }) + return nil +} + +func (l *varLexer) ignore() { + l.start = l.pos +} + +func (l *varLexer) next() rune { + var r rune + + if l.pos >= len(l.input) { + l.width = 0 + return -1 + } + r, l.width = utf8.DecodeRuneInString(l.input[l.pos:]) + l.pos += l.width + return r +} + +func (l *varLexer) run() { + for state := varLexNormal; state != nil; { + state = state(l) + } +} + +func (l *varLexer) peek() rune { + r := l.next() + l.backup() + return r +} + +func varLexNormal(l *varLexer) lexState { + for { + r := l.next() + switch { + case r == -1: + l.emit(tokEOF) + return nil + case r == '[': + l.emit(tokArrayStart) + case r == ']': + l.emit(tokArrayEnd) + case r == '{': + l.emit(tokDictStart) + case r == '}': + l.emit(tokDictEnd) + case r == '<': + l.emit(tokVariantStart) + case r == '>': + l.emit(tokVariantEnd) + case r == ':': + l.emit(tokColon) + case r == ',': + l.emit(tokComma) + case r == '\'' || r == '"': + l.backup() + return varLexString + case r == '@': + l.backup() + return varLexType + case unicode.IsSpace(r): + l.ignore() + case unicode.IsNumber(r) || r == '+' || r == '-': + l.backup() + return varLexNumber + case r == 'b': + pos := l.start + if n := l.peek(); n == '"' || n == '\'' { + return varLexByteString + } + // not a byte string; try to parse it as a type or bool below + l.pos = pos + 1 + l.width = 1 + fallthrough + default: + // either a bool or a type. Try bools first. + l.backup() + if l.pos+4 <= len(l.input) { + if l.input[l.pos:l.pos+4] == "true" { + l.pos += 4 + l.emit(tokBool) + continue + } + } + if l.pos+5 <= len(l.input) { + if l.input[l.pos:l.pos+5] == "false" { + l.pos += 5 + l.emit(tokBool) + continue + } + } + // must be a type. + return varLexType + } + } +} + +var varTypeMap = map[string]string{ + "boolean": "b", + "byte": "y", + "int16": "n", + "uint16": "q", + "int32": "i", + "uint32": "u", + "int64": "x", + "uint64": "t", + "double": "f", + "string": "s", + "objectpath": "o", + "signature": "g", +} + +func varLexByteString(l *varLexer) lexState { + q := l.next() +Loop: + for { + switch l.next() { + case '\\': + if r := l.next(); r != -1 { + break + } + fallthrough + case -1: + return l.errorf("unterminated bytestring") + case q: + break Loop + } + } + l.emit(tokByteString) + return varLexNormal +} + +func varLexNumber(l *varLexer) lexState { + l.accept("+-") + digits := "0123456789" + if l.accept("0") { + if l.accept("x") { + digits = "0123456789abcdefABCDEF" + } else { + digits = "01234567" + } + } + for strings.IndexRune(digits, l.next()) >= 0 { + } + l.backup() + if l.accept(".") { + for strings.IndexRune(digits, l.next()) >= 0 { + } + l.backup() + } + if l.accept("eE") { + l.accept("+-") + for strings.IndexRune("0123456789", l.next()) >= 0 { + } + l.backup() + } + if r := l.peek(); unicode.IsLetter(r) { + l.next() + return l.errorf("bad number syntax: %q", l.input[l.start:l.pos]) + } + l.emit(tokNumber) + return varLexNormal +} + +func varLexString(l *varLexer) lexState { + q := l.next() +Loop: + for { + switch l.next() { + case '\\': + if r := l.next(); r != -1 { + break + } + fallthrough + case -1: + return l.errorf("unterminated string") + case q: + break Loop + } + } + l.emit(tokString) + return varLexNormal +} + +func varLexType(l *varLexer) lexState { + at := l.accept("@") + for { + r := l.next() + if r == -1 { + break + } + if unicode.IsSpace(r) { + l.backup() + break + } + } + if at { + if _, err := ParseSignature(l.input[l.start+1 : l.pos]); err != nil { + return l.errorf("%s", err) + } + } else { + if _, ok := varTypeMap[l.input[l.start:l.pos]]; ok { + l.emit(tokType) + return varLexNormal + } + return l.errorf("unrecognized type %q", l.input[l.start:l.pos]) + } + l.emit(tokType) + return varLexNormal +} diff --git a/vendor/github.com/godbus/dbus/variant_parser.go b/vendor/github.com/godbus/dbus/variant_parser.go new file mode 100644 index 0000000000..d20f5da6dd --- /dev/null +++ b/vendor/github.com/godbus/dbus/variant_parser.go @@ -0,0 +1,817 @@ +package dbus + +import ( + "bytes" + "errors" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "unicode/utf8" +) + +type varParser struct { + tokens []varToken + i int +} + +func (p *varParser) backup() { + p.i-- +} + +func (p *varParser) next() varToken { + if p.i < len(p.tokens) { + t := p.tokens[p.i] + p.i++ + return t + } + return varToken{typ: tokEOF} +} + +type varNode interface { + Infer() (Signature, error) + String() string + Sigs() sigSet + Value(Signature) (interface{}, error) +} + +func varMakeNode(p *varParser) (varNode, error) { + var sig Signature + + for { + t := p.next() + switch t.typ { + case tokEOF: + return nil, io.ErrUnexpectedEOF + case tokError: + return nil, errors.New(t.val) + case tokNumber: + return varMakeNumNode(t, sig) + case tokString: + return varMakeStringNode(t, sig) + case tokBool: + if sig.str != "" && sig.str != "b" { + return nil, varTypeError{t.val, sig} + } + b, err := strconv.ParseBool(t.val) + if err != nil { + return nil, err + } + return boolNode(b), nil + case tokArrayStart: + return varMakeArrayNode(p, sig) + case tokVariantStart: + return varMakeVariantNode(p, sig) + case tokDictStart: + return varMakeDictNode(p, sig) + case tokType: + if sig.str != "" { + return nil, errors.New("unexpected type annotation") + } + if t.val[0] == '@' { + sig.str = t.val[1:] + } else { + sig.str = varTypeMap[t.val] + } + case tokByteString: + if sig.str != "" && sig.str != "ay" { + return nil, varTypeError{t.val, sig} + } + b, err := varParseByteString(t.val) + if err != nil { + return nil, err + } + return byteStringNode(b), nil + default: + return nil, fmt.Errorf("unexpected %q", t.val) + } + } +} + +type varTypeError struct { + val string + sig Signature +} + +func (e varTypeError) Error() string { + return fmt.Sprintf("dbus: can't parse %q as type %q", e.val, e.sig.str) +} + +type sigSet map[Signature]bool + +func (s sigSet) Empty() bool { + return len(s) == 0 +} + +func (s sigSet) Intersect(s2 sigSet) sigSet { + r := make(sigSet) + for k := range s { + if s2[k] { + r[k] = true + } + } + return r +} + +func (s sigSet) Single() (Signature, bool) { + if len(s) == 1 { + for k := range s { + return k, true + } + } + return Signature{}, false +} + +func (s sigSet) ToArray() sigSet { + r := make(sigSet, len(s)) + for k := range s { + r[Signature{"a" + k.str}] = true + } + return r +} + +type numNode struct { + sig Signature + str string + val interface{} +} + +var numSigSet = sigSet{ + Signature{"y"}: true, + Signature{"n"}: true, + Signature{"q"}: true, + Signature{"i"}: true, + Signature{"u"}: true, + Signature{"x"}: true, + Signature{"t"}: true, + Signature{"d"}: true, +} + +func (n numNode) Infer() (Signature, error) { + if strings.ContainsAny(n.str, ".e") { + return Signature{"d"}, nil + } + return Signature{"i"}, nil +} + +func (n numNode) String() string { + return n.str +} + +func (n numNode) Sigs() sigSet { + if n.sig.str != "" { + return sigSet{n.sig: true} + } + if strings.ContainsAny(n.str, ".e") { + return sigSet{Signature{"d"}: true} + } + return numSigSet +} + +func (n numNode) Value(sig Signature) (interface{}, error) { + if n.sig.str != "" && n.sig != sig { + return nil, varTypeError{n.str, sig} + } + if n.val != nil { + return n.val, nil + } + return varNumAs(n.str, sig) +} + +func varMakeNumNode(tok varToken, sig Signature) (varNode, error) { + if sig.str == "" { + return numNode{str: tok.val}, nil + } + num, err := varNumAs(tok.val, sig) + if err != nil { + return nil, err + } + return numNode{sig: sig, val: num}, nil +} + +func varNumAs(s string, sig Signature) (interface{}, error) { + isUnsigned := false + size := 32 + switch sig.str { + case "n": + size = 16 + case "i": + case "x": + size = 64 + case "y": + size = 8 + isUnsigned = true + case "q": + size = 16 + isUnsigned = true + case "u": + isUnsigned = true + case "t": + size = 64 + isUnsigned = true + case "d": + d, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, err + } + return d, nil + default: + return nil, varTypeError{s, sig} + } + base := 10 + if strings.HasPrefix(s, "0x") { + base = 16 + s = s[2:] + } + if strings.HasPrefix(s, "0") && len(s) != 1 { + base = 8 + s = s[1:] + } + if isUnsigned { + i, err := strconv.ParseUint(s, base, size) + if err != nil { + return nil, err + } + var v interface{} = i + switch sig.str { + case "y": + v = byte(i) + case "q": + v = uint16(i) + case "u": + v = uint32(i) + } + return v, nil + } + i, err := strconv.ParseInt(s, base, size) + if err != nil { + return nil, err + } + var v interface{} = i + switch sig.str { + case "n": + v = int16(i) + case "i": + v = int32(i) + } + return v, nil +} + +type stringNode struct { + sig Signature + str string // parsed + val interface{} // has correct type +} + +var stringSigSet = sigSet{ + Signature{"s"}: true, + Signature{"g"}: true, + Signature{"o"}: true, +} + +func (n stringNode) Infer() (Signature, error) { + return Signature{"s"}, nil +} + +func (n stringNode) String() string { + return n.str +} + +func (n stringNode) Sigs() sigSet { + if n.sig.str != "" { + return sigSet{n.sig: true} + } + return stringSigSet +} + +func (n stringNode) Value(sig Signature) (interface{}, error) { + if n.sig.str != "" && n.sig != sig { + return nil, varTypeError{n.str, sig} + } + if n.val != nil { + return n.val, nil + } + switch { + case sig.str == "g": + return Signature{n.str}, nil + case sig.str == "o": + return ObjectPath(n.str), nil + case sig.str == "s": + return n.str, nil + default: + return nil, varTypeError{n.str, sig} + } +} + +func varMakeStringNode(tok varToken, sig Signature) (varNode, error) { + if sig.str != "" && sig.str != "s" && sig.str != "g" && sig.str != "o" { + return nil, fmt.Errorf("invalid type %q for string", sig.str) + } + s, err := varParseString(tok.val) + if err != nil { + return nil, err + } + n := stringNode{str: s} + if sig.str == "" { + return stringNode{str: s}, nil + } + n.sig = sig + switch sig.str { + case "o": + n.val = ObjectPath(s) + case "g": + n.val = Signature{s} + case "s": + n.val = s + } + return n, nil +} + +func varParseString(s string) (string, error) { + // quotes are guaranteed to be there + s = s[1 : len(s)-1] + buf := new(bytes.Buffer) + for len(s) != 0 { + r, size := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && size == 1 { + return "", errors.New("invalid UTF-8") + } + s = s[size:] + if r != '\\' { + buf.WriteRune(r) + continue + } + r, size = utf8.DecodeRuneInString(s) + if r == utf8.RuneError && size == 1 { + return "", errors.New("invalid UTF-8") + } + s = s[size:] + switch r { + case 'a': + buf.WriteRune(0x7) + case 'b': + buf.WriteRune(0x8) + case 'f': + buf.WriteRune(0xc) + case 'n': + buf.WriteRune('\n') + case 'r': + buf.WriteRune('\r') + case 't': + buf.WriteRune('\t') + case '\n': + case 'u': + if len(s) < 4 { + return "", errors.New("short unicode escape") + } + r, err := strconv.ParseUint(s[:4], 16, 32) + if err != nil { + return "", err + } + buf.WriteRune(rune(r)) + s = s[4:] + case 'U': + if len(s) < 8 { + return "", errors.New("short unicode escape") + } + r, err := strconv.ParseUint(s[:8], 16, 32) + if err != nil { + return "", err + } + buf.WriteRune(rune(r)) + s = s[8:] + default: + buf.WriteRune(r) + } + } + return buf.String(), nil +} + +var boolSigSet = sigSet{Signature{"b"}: true} + +type boolNode bool + +func (boolNode) Infer() (Signature, error) { + return Signature{"b"}, nil +} + +func (b boolNode) String() string { + if b { + return "true" + } + return "false" +} + +func (boolNode) Sigs() sigSet { + return boolSigSet +} + +func (b boolNode) Value(sig Signature) (interface{}, error) { + if sig.str != "b" { + return nil, varTypeError{b.String(), sig} + } + return bool(b), nil +} + +type arrayNode struct { + set sigSet + children []varNode + val interface{} +} + +func (n arrayNode) Infer() (Signature, error) { + for _, v := range n.children { + csig, err := varInfer(v) + if err != nil { + continue + } + return Signature{"a" + csig.str}, nil + } + return Signature{}, fmt.Errorf("can't infer type for %q", n.String()) +} + +func (n arrayNode) String() string { + s := "[" + for i, v := range n.children { + s += v.String() + if i != len(n.children)-1 { + s += ", " + } + } + return s + "]" +} + +func (n arrayNode) Sigs() sigSet { + return n.set +} + +func (n arrayNode) Value(sig Signature) (interface{}, error) { + if n.set.Empty() { + // no type information whatsoever, so this must be an empty slice + return reflect.MakeSlice(typeFor(sig.str), 0, 0).Interface(), nil + } + if !n.set[sig] { + return nil, varTypeError{n.String(), sig} + } + s := reflect.MakeSlice(typeFor(sig.str), len(n.children), len(n.children)) + for i, v := range n.children { + rv, err := v.Value(Signature{sig.str[1:]}) + if err != nil { + return nil, err + } + s.Index(i).Set(reflect.ValueOf(rv)) + } + return s.Interface(), nil +} + +func varMakeArrayNode(p *varParser, sig Signature) (varNode, error) { + var n arrayNode + if sig.str != "" { + n.set = sigSet{sig: true} + } + if t := p.next(); t.typ == tokArrayEnd { + return n, nil + } else { + p.backup() + } +Loop: + for { + t := p.next() + switch t.typ { + case tokEOF: + return nil, io.ErrUnexpectedEOF + case tokError: + return nil, errors.New(t.val) + } + p.backup() + cn, err := varMakeNode(p) + if err != nil { + return nil, err + } + if cset := cn.Sigs(); !cset.Empty() { + if n.set.Empty() { + n.set = cset.ToArray() + } else { + nset := cset.ToArray().Intersect(n.set) + if nset.Empty() { + return nil, fmt.Errorf("can't parse %q with given type information", cn.String()) + } + n.set = nset + } + } + n.children = append(n.children, cn) + switch t := p.next(); t.typ { + case tokEOF: + return nil, io.ErrUnexpectedEOF + case tokError: + return nil, errors.New(t.val) + case tokArrayEnd: + break Loop + case tokComma: + continue + default: + return nil, fmt.Errorf("unexpected %q", t.val) + } + } + return n, nil +} + +type variantNode struct { + n varNode +} + +var variantSet = sigSet{ + Signature{"v"}: true, +} + +func (variantNode) Infer() (Signature, error) { + return Signature{"v"}, nil +} + +func (n variantNode) String() string { + return "<" + n.n.String() + ">" +} + +func (variantNode) Sigs() sigSet { + return variantSet +} + +func (n variantNode) Value(sig Signature) (interface{}, error) { + if sig.str != "v" { + return nil, varTypeError{n.String(), sig} + } + sig, err := varInfer(n.n) + if err != nil { + return nil, err + } + v, err := n.n.Value(sig) + if err != nil { + return nil, err + } + return MakeVariant(v), nil +} + +func varMakeVariantNode(p *varParser, sig Signature) (varNode, error) { + n, err := varMakeNode(p) + if err != nil { + return nil, err + } + if t := p.next(); t.typ != tokVariantEnd { + return nil, fmt.Errorf("unexpected %q", t.val) + } + vn := variantNode{n} + if sig.str != "" && sig.str != "v" { + return nil, varTypeError{vn.String(), sig} + } + return variantNode{n}, nil +} + +type dictEntry struct { + key, val varNode +} + +type dictNode struct { + kset, vset sigSet + children []dictEntry + val interface{} +} + +func (n dictNode) Infer() (Signature, error) { + for _, v := range n.children { + ksig, err := varInfer(v.key) + if err != nil { + continue + } + vsig, err := varInfer(v.val) + if err != nil { + continue + } + return Signature{"a{" + ksig.str + vsig.str + "}"}, nil + } + return Signature{}, fmt.Errorf("can't infer type for %q", n.String()) +} + +func (n dictNode) String() string { + s := "{" + for i, v := range n.children { + s += v.key.String() + ": " + v.val.String() + if i != len(n.children)-1 { + s += ", " + } + } + return s + "}" +} + +func (n dictNode) Sigs() sigSet { + r := sigSet{} + for k := range n.kset { + for v := range n.vset { + sig := "a{" + k.str + v.str + "}" + r[Signature{sig}] = true + } + } + return r +} + +func (n dictNode) Value(sig Signature) (interface{}, error) { + set := n.Sigs() + if set.Empty() { + // no type information -> empty dict + return reflect.MakeMap(typeFor(sig.str)).Interface(), nil + } + if !set[sig] { + return nil, varTypeError{n.String(), sig} + } + m := reflect.MakeMap(typeFor(sig.str)) + ksig := Signature{sig.str[2:3]} + vsig := Signature{sig.str[3 : len(sig.str)-1]} + for _, v := range n.children { + kv, err := v.key.Value(ksig) + if err != nil { + return nil, err + } + vv, err := v.val.Value(vsig) + if err != nil { + return nil, err + } + m.SetMapIndex(reflect.ValueOf(kv), reflect.ValueOf(vv)) + } + return m.Interface(), nil +} + +func varMakeDictNode(p *varParser, sig Signature) (varNode, error) { + var n dictNode + + if sig.str != "" { + if len(sig.str) < 5 { + return nil, fmt.Errorf("invalid signature %q for dict type", sig) + } + ksig := Signature{string(sig.str[2])} + vsig := Signature{sig.str[3 : len(sig.str)-1]} + n.kset = sigSet{ksig: true} + n.vset = sigSet{vsig: true} + } + if t := p.next(); t.typ == tokDictEnd { + return n, nil + } else { + p.backup() + } +Loop: + for { + t := p.next() + switch t.typ { + case tokEOF: + return nil, io.ErrUnexpectedEOF + case tokError: + return nil, errors.New(t.val) + } + p.backup() + kn, err := varMakeNode(p) + if err != nil { + return nil, err + } + if kset := kn.Sigs(); !kset.Empty() { + if n.kset.Empty() { + n.kset = kset + } else { + n.kset = kset.Intersect(n.kset) + if n.kset.Empty() { + return nil, fmt.Errorf("can't parse %q with given type information", kn.String()) + } + } + } + t = p.next() + switch t.typ { + case tokEOF: + return nil, io.ErrUnexpectedEOF + case tokError: + return nil, errors.New(t.val) + case tokColon: + default: + return nil, fmt.Errorf("unexpected %q", t.val) + } + t = p.next() + switch t.typ { + case tokEOF: + return nil, io.ErrUnexpectedEOF + case tokError: + return nil, errors.New(t.val) + } + p.backup() + vn, err := varMakeNode(p) + if err != nil { + return nil, err + } + if vset := vn.Sigs(); !vset.Empty() { + if n.vset.Empty() { + n.vset = vset + } else { + n.vset = n.vset.Intersect(vset) + if n.vset.Empty() { + return nil, fmt.Errorf("can't parse %q with given type information", vn.String()) + } + } + } + n.children = append(n.children, dictEntry{kn, vn}) + t = p.next() + switch t.typ { + case tokEOF: + return nil, io.ErrUnexpectedEOF + case tokError: + return nil, errors.New(t.val) + case tokDictEnd: + break Loop + case tokComma: + continue + default: + return nil, fmt.Errorf("unexpected %q", t.val) + } + } + return n, nil +} + +type byteStringNode []byte + +var byteStringSet = sigSet{ + Signature{"ay"}: true, +} + +func (byteStringNode) Infer() (Signature, error) { + return Signature{"ay"}, nil +} + +func (b byteStringNode) String() string { + return string(b) +} + +func (b byteStringNode) Sigs() sigSet { + return byteStringSet +} + +func (b byteStringNode) Value(sig Signature) (interface{}, error) { + if sig.str != "ay" { + return nil, varTypeError{b.String(), sig} + } + return []byte(b), nil +} + +func varParseByteString(s string) ([]byte, error) { + // quotes and b at start are guaranteed to be there + b := make([]byte, 0, 1) + s = s[2 : len(s)-1] + for len(s) != 0 { + c := s[0] + s = s[1:] + if c != '\\' { + b = append(b, c) + continue + } + c = s[0] + s = s[1:] + switch c { + case 'a': + b = append(b, 0x7) + case 'b': + b = append(b, 0x8) + case 'f': + b = append(b, 0xc) + case 'n': + b = append(b, '\n') + case 'r': + b = append(b, '\r') + case 't': + b = append(b, '\t') + case 'x': + if len(s) < 2 { + return nil, errors.New("short escape") + } + n, err := strconv.ParseUint(s[:2], 16, 8) + if err != nil { + return nil, err + } + b = append(b, byte(n)) + s = s[2:] + case '0': + if len(s) < 3 { + return nil, errors.New("short escape") + } + n, err := strconv.ParseUint(s[:3], 8, 8) + if err != nil { + return nil, err + } + b = append(b, byte(n)) + s = s[3:] + default: + b = append(b, c) + } + } + return append(b, 0), nil +} + +func varInfer(n varNode) (Signature, error) { + if sig, ok := n.Sigs().Single(); ok { + return sig, nil + } + return n.Infer() +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/doc.go b/vendor/github.com/gogo/protobuf/gogoproto/doc.go new file mode 100644 index 0000000000..147b5ecc62 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/doc.go @@ -0,0 +1,169 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package gogoproto provides extensions for protocol buffers to achieve: + + - fast marshalling and unmarshalling. + - peace of mind by optionally generating test and benchmark code. + - more canonical Go structures. + - less typing by optionally generating extra helper code. + - goprotobuf compatibility + +More Canonical Go Structures + +A lot of time working with a goprotobuf struct will lead you to a place where you create another struct that is easier to work with and then have a function to copy the values between the two structs. +You might also find that basic structs that started their life as part of an API need to be sent over the wire. With gob, you could just send it. With goprotobuf, you need to make a parallel struct. +Gogoprotobuf tries to fix these problems with the nullable, embed, customtype and customname field extensions. + + - nullable, if false, a field is generated without a pointer (see warning below). + - embed, if true, the field is generated as an embedded field. + - customtype, It works with the Marshal and Unmarshal methods, to allow you to have your own types in your struct, but marshal to bytes. For example, custom.Uuid or custom.Fixed128 + - customname (beta), Changes the generated fieldname. This is especially useful when generated methods conflict with fieldnames. + - casttype (beta), Changes the generated fieldtype. All generated code assumes that this type is castable to the protocol buffer field type. It does not work for structs or enums. + - castkey (beta), Changes the generated fieldtype for a map key. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + - castvalue (beta), Changes the generated fieldtype for a map value. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + +Warning about nullable: According to the Protocol Buffer specification, you should be able to tell whether a field is set or unset. With the option nullable=false this feature is lost, since your non-nullable fields will always be set. It can be seen as a layer on top of Protocol Buffers, where before and after marshalling all non-nullable fields are set and they cannot be unset. + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +for a quicker overview. + +The following message: + + package test; + + import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + + message A { + optional string Description = 1 [(gogoproto.nullable) = false]; + optional int64 Number = 2 [(gogoproto.nullable) = false]; + optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false]; + } + +Will generate a go struct which looks a lot like this: + + type A struct { + Description string + Number int64 + Id github_com_gogo_protobuf_test_custom.Uuid + } + +You will see there are no pointers, since all fields are non-nullable. +You will also see a custom type which marshals to a string. +Be warned it is your responsibility to test your custom types thoroughly. +You should think of every possible empty and nil case for your marshaling, unmarshaling and size methods. + +Next we will embed the message A in message B. + + message B { + optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true]; + repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false]; + } + +See below that A is embedded in B. + + type B struct { + A + G []github_com_gogo_protobuf_test_custom.Uint128 + } + +Also see the repeated custom type. + + type Uint128 [2]uint64 + +Next we will create a custom name for one of our fields. + + message C { + optional int64 size = 1 [(gogoproto.customname) = "MySize"]; + } + +See below that the field's name is MySize and not Size. + + type C struct { + MySize *int64 + } + +The is useful when having a protocol buffer message with a field name which conflicts with a generated method. +As an example, having a field name size and using the sizer plugin to generate a Size method will cause a go compiler error. +Using customname you can fix this error without changing the field name. +This is typically useful when working with a protocol buffer that was designed before these methods and/or the go language were avialable. + +Gogoprotobuf also has some more subtle changes, these could be changed back: + + - the generated package name for imports do not have the extra /filename.pb, + but are actually the imports specified in the .proto file. + +Gogoprotobuf also has lost some features which should be brought back with time: + + - Marshalling and unmarshalling with reflect and without the unsafe package, + this requires work in pointer_reflect.go + +Why does nullable break protocol buffer specifications: + +The protocol buffer specification states, somewhere, that you should be able to tell whether a +field is set or unset. With the option nullable=false this feature is lost, +since your non-nullable fields will always be set. It can be seen as a layer on top of +protocol buffers, where before and after marshalling all non-nullable fields are set +and they cannot be unset. + +Goprotobuf Compatibility: + +Gogoprotobuf is compatible with Goprotobuf, because it is compatible with protocol buffers. +Gogoprotobuf generates the same code as goprotobuf if no extensions are used. +The enumprefix, getters and stringer extensions can be used to remove some of the unnecessary code generated by goprotobuf: + + - gogoproto_import, if false, the generated code imports github.com/golang/protobuf/proto instead of github.com/gogo/protobuf/proto. + - goproto_enum_prefix, if false, generates the enum constant names without the messagetype prefix + - goproto_enum_stringer (experimental), if false, the enum is generated without the default string method, this is useful for rather using enum_stringer, or allowing you to write your own string method. + - goproto_getters, if false, the message is generated without get methods, this is useful when you would rather want to use face + - goproto_stringer, if false, the message is generated without the default string method, this is useful for rather using stringer, or allowing you to write your own string method. + - goproto_extensions_map (beta), if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension + - goproto_unrecognized (beta), if false, XXX_unrecognized field is not generated. This is useful in conjunction with gogoproto.nullable=false, to generate structures completely devoid of pointers and reduce GC pressure at the cost of losing information about unrecognized fields. + - goproto_registration (beta), if true, the generated files will register all messages and types against both gogo/protobuf and golang/protobuf. This is necessary when using third-party packages which read registrations from golang/protobuf (such as the grpc-gateway). + +Less Typing and Peace of Mind is explained in their specific plugin folders godoc: + + - github.com/gogo/protobuf/plugin/ + +If you do not use any of these extension the code that is generated +will be the same as if goprotobuf has generated it. + +The most complete way to see examples is to look at + + github.com/gogo/protobuf/test/thetest.proto + +Gogoprototest is a seperate project, +because we want to keep gogoprotobuf independant of goprotobuf, +but we still want to test it thoroughly. + +*/ +package gogoproto diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go new file mode 100644 index 0000000000..5765acb153 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go @@ -0,0 +1,804 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: gogo.proto + +/* +Package gogoproto is a generated protocol buffer package. + +It is generated from these files: + gogo.proto + +It has these top-level messages: +*/ +package gogoproto + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +var E_GoprotoEnumPrefix = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62001, + Name: "gogoproto.goproto_enum_prefix", + Tag: "varint,62001,opt,name=goproto_enum_prefix,json=goprotoEnumPrefix", + Filename: "gogo.proto", +} + +var E_GoprotoEnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62021, + Name: "gogoproto.goproto_enum_stringer", + Tag: "varint,62021,opt,name=goproto_enum_stringer,json=goprotoEnumStringer", + Filename: "gogo.proto", +} + +var E_EnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62022, + Name: "gogoproto.enum_stringer", + Tag: "varint,62022,opt,name=enum_stringer,json=enumStringer", + Filename: "gogo.proto", +} + +var E_EnumCustomname = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.EnumOptions)(nil), + ExtensionType: (*string)(nil), + Field: 62023, + Name: "gogoproto.enum_customname", + Tag: "bytes,62023,opt,name=enum_customname,json=enumCustomname", + Filename: "gogo.proto", +} + +var E_Enumdecl = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62024, + Name: "gogoproto.enumdecl", + Tag: "varint,62024,opt,name=enumdecl", + Filename: "gogo.proto", +} + +var E_EnumvalueCustomname = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.EnumValueOptions)(nil), + ExtensionType: (*string)(nil), + Field: 66001, + Name: "gogoproto.enumvalue_customname", + Tag: "bytes,66001,opt,name=enumvalue_customname,json=enumvalueCustomname", + Filename: "gogo.proto", +} + +var E_GoprotoGettersAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63001, + Name: "gogoproto.goproto_getters_all", + Tag: "varint,63001,opt,name=goproto_getters_all,json=goprotoGettersAll", + Filename: "gogo.proto", +} + +var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63002, + Name: "gogoproto.goproto_enum_prefix_all", + Tag: "varint,63002,opt,name=goproto_enum_prefix_all,json=goprotoEnumPrefixAll", + Filename: "gogo.proto", +} + +var E_GoprotoStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63003, + Name: "gogoproto.goproto_stringer_all", + Tag: "varint,63003,opt,name=goproto_stringer_all,json=goprotoStringerAll", + Filename: "gogo.proto", +} + +var E_VerboseEqualAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63004, + Name: "gogoproto.verbose_equal_all", + Tag: "varint,63004,opt,name=verbose_equal_all,json=verboseEqualAll", + Filename: "gogo.proto", +} + +var E_FaceAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63005, + Name: "gogoproto.face_all", + Tag: "varint,63005,opt,name=face_all,json=faceAll", + Filename: "gogo.proto", +} + +var E_GostringAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63006, + Name: "gogoproto.gostring_all", + Tag: "varint,63006,opt,name=gostring_all,json=gostringAll", + Filename: "gogo.proto", +} + +var E_PopulateAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63007, + Name: "gogoproto.populate_all", + Tag: "varint,63007,opt,name=populate_all,json=populateAll", + Filename: "gogo.proto", +} + +var E_StringerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63008, + Name: "gogoproto.stringer_all", + Tag: "varint,63008,opt,name=stringer_all,json=stringerAll", + Filename: "gogo.proto", +} + +var E_OnlyoneAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63009, + Name: "gogoproto.onlyone_all", + Tag: "varint,63009,opt,name=onlyone_all,json=onlyoneAll", + Filename: "gogo.proto", +} + +var E_EqualAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63013, + Name: "gogoproto.equal_all", + Tag: "varint,63013,opt,name=equal_all,json=equalAll", + Filename: "gogo.proto", +} + +var E_DescriptionAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63014, + Name: "gogoproto.description_all", + Tag: "varint,63014,opt,name=description_all,json=descriptionAll", + Filename: "gogo.proto", +} + +var E_TestgenAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63015, + Name: "gogoproto.testgen_all", + Tag: "varint,63015,opt,name=testgen_all,json=testgenAll", + Filename: "gogo.proto", +} + +var E_BenchgenAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63016, + Name: "gogoproto.benchgen_all", + Tag: "varint,63016,opt,name=benchgen_all,json=benchgenAll", + Filename: "gogo.proto", +} + +var E_MarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63017, + Name: "gogoproto.marshaler_all", + Tag: "varint,63017,opt,name=marshaler_all,json=marshalerAll", + Filename: "gogo.proto", +} + +var E_UnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63018, + Name: "gogoproto.unmarshaler_all", + Tag: "varint,63018,opt,name=unmarshaler_all,json=unmarshalerAll", + Filename: "gogo.proto", +} + +var E_StableMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63019, + Name: "gogoproto.stable_marshaler_all", + Tag: "varint,63019,opt,name=stable_marshaler_all,json=stableMarshalerAll", + Filename: "gogo.proto", +} + +var E_SizerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63020, + Name: "gogoproto.sizer_all", + Tag: "varint,63020,opt,name=sizer_all,json=sizerAll", + Filename: "gogo.proto", +} + +var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63021, + Name: "gogoproto.goproto_enum_stringer_all", + Tag: "varint,63021,opt,name=goproto_enum_stringer_all,json=goprotoEnumStringerAll", + Filename: "gogo.proto", +} + +var E_EnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63022, + Name: "gogoproto.enum_stringer_all", + Tag: "varint,63022,opt,name=enum_stringer_all,json=enumStringerAll", + Filename: "gogo.proto", +} + +var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63023, + Name: "gogoproto.unsafe_marshaler_all", + Tag: "varint,63023,opt,name=unsafe_marshaler_all,json=unsafeMarshalerAll", + Filename: "gogo.proto", +} + +var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63024, + Name: "gogoproto.unsafe_unmarshaler_all", + Tag: "varint,63024,opt,name=unsafe_unmarshaler_all,json=unsafeUnmarshalerAll", + Filename: "gogo.proto", +} + +var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63025, + Name: "gogoproto.goproto_extensions_map_all", + Tag: "varint,63025,opt,name=goproto_extensions_map_all,json=goprotoExtensionsMapAll", + Filename: "gogo.proto", +} + +var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63026, + Name: "gogoproto.goproto_unrecognized_all", + Tag: "varint,63026,opt,name=goproto_unrecognized_all,json=goprotoUnrecognizedAll", + Filename: "gogo.proto", +} + +var E_GogoprotoImport = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63027, + Name: "gogoproto.gogoproto_import", + Tag: "varint,63027,opt,name=gogoproto_import,json=gogoprotoImport", + Filename: "gogo.proto", +} + +var E_ProtosizerAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63028, + Name: "gogoproto.protosizer_all", + Tag: "varint,63028,opt,name=protosizer_all,json=protosizerAll", + Filename: "gogo.proto", +} + +var E_CompareAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63029, + Name: "gogoproto.compare_all", + Tag: "varint,63029,opt,name=compare_all,json=compareAll", + Filename: "gogo.proto", +} + +var E_TypedeclAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63030, + Name: "gogoproto.typedecl_all", + Tag: "varint,63030,opt,name=typedecl_all,json=typedeclAll", + Filename: "gogo.proto", +} + +var E_EnumdeclAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63031, + Name: "gogoproto.enumdecl_all", + Tag: "varint,63031,opt,name=enumdecl_all,json=enumdeclAll", + Filename: "gogo.proto", +} + +var E_GoprotoRegistration = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63032, + Name: "gogoproto.goproto_registration", + Tag: "varint,63032,opt,name=goproto_registration,json=goprotoRegistration", + Filename: "gogo.proto", +} + +var E_GoprotoGetters = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64001, + Name: "gogoproto.goproto_getters", + Tag: "varint,64001,opt,name=goproto_getters,json=goprotoGetters", + Filename: "gogo.proto", +} + +var E_GoprotoStringer = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64003, + Name: "gogoproto.goproto_stringer", + Tag: "varint,64003,opt,name=goproto_stringer,json=goprotoStringer", + Filename: "gogo.proto", +} + +var E_VerboseEqual = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64004, + Name: "gogoproto.verbose_equal", + Tag: "varint,64004,opt,name=verbose_equal,json=verboseEqual", + Filename: "gogo.proto", +} + +var E_Face = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64005, + Name: "gogoproto.face", + Tag: "varint,64005,opt,name=face", + Filename: "gogo.proto", +} + +var E_Gostring = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64006, + Name: "gogoproto.gostring", + Tag: "varint,64006,opt,name=gostring", + Filename: "gogo.proto", +} + +var E_Populate = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64007, + Name: "gogoproto.populate", + Tag: "varint,64007,opt,name=populate", + Filename: "gogo.proto", +} + +var E_Stringer = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 67008, + Name: "gogoproto.stringer", + Tag: "varint,67008,opt,name=stringer", + Filename: "gogo.proto", +} + +var E_Onlyone = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64009, + Name: "gogoproto.onlyone", + Tag: "varint,64009,opt,name=onlyone", + Filename: "gogo.proto", +} + +var E_Equal = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64013, + Name: "gogoproto.equal", + Tag: "varint,64013,opt,name=equal", + Filename: "gogo.proto", +} + +var E_Description = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64014, + Name: "gogoproto.description", + Tag: "varint,64014,opt,name=description", + Filename: "gogo.proto", +} + +var E_Testgen = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64015, + Name: "gogoproto.testgen", + Tag: "varint,64015,opt,name=testgen", + Filename: "gogo.proto", +} + +var E_Benchgen = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64016, + Name: "gogoproto.benchgen", + Tag: "varint,64016,opt,name=benchgen", + Filename: "gogo.proto", +} + +var E_Marshaler = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64017, + Name: "gogoproto.marshaler", + Tag: "varint,64017,opt,name=marshaler", + Filename: "gogo.proto", +} + +var E_Unmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64018, + Name: "gogoproto.unmarshaler", + Tag: "varint,64018,opt,name=unmarshaler", + Filename: "gogo.proto", +} + +var E_StableMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64019, + Name: "gogoproto.stable_marshaler", + Tag: "varint,64019,opt,name=stable_marshaler,json=stableMarshaler", + Filename: "gogo.proto", +} + +var E_Sizer = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64020, + Name: "gogoproto.sizer", + Tag: "varint,64020,opt,name=sizer", + Filename: "gogo.proto", +} + +var E_UnsafeMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64023, + Name: "gogoproto.unsafe_marshaler", + Tag: "varint,64023,opt,name=unsafe_marshaler,json=unsafeMarshaler", + Filename: "gogo.proto", +} + +var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64024, + Name: "gogoproto.unsafe_unmarshaler", + Tag: "varint,64024,opt,name=unsafe_unmarshaler,json=unsafeUnmarshaler", + Filename: "gogo.proto", +} + +var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64025, + Name: "gogoproto.goproto_extensions_map", + Tag: "varint,64025,opt,name=goproto_extensions_map,json=goprotoExtensionsMap", + Filename: "gogo.proto", +} + +var E_GoprotoUnrecognized = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64026, + Name: "gogoproto.goproto_unrecognized", + Tag: "varint,64026,opt,name=goproto_unrecognized,json=goprotoUnrecognized", + Filename: "gogo.proto", +} + +var E_Protosizer = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64028, + Name: "gogoproto.protosizer", + Tag: "varint,64028,opt,name=protosizer", + Filename: "gogo.proto", +} + +var E_Compare = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64029, + Name: "gogoproto.compare", + Tag: "varint,64029,opt,name=compare", + Filename: "gogo.proto", +} + +var E_Typedecl = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64030, + Name: "gogoproto.typedecl", + Tag: "varint,64030,opt,name=typedecl", + Filename: "gogo.proto", +} + +var E_Nullable = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65001, + Name: "gogoproto.nullable", + Tag: "varint,65001,opt,name=nullable", + Filename: "gogo.proto", +} + +var E_Embed = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65002, + Name: "gogoproto.embed", + Tag: "varint,65002,opt,name=embed", + Filename: "gogo.proto", +} + +var E_Customtype = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65003, + Name: "gogoproto.customtype", + Tag: "bytes,65003,opt,name=customtype", + Filename: "gogo.proto", +} + +var E_Customname = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65004, + Name: "gogoproto.customname", + Tag: "bytes,65004,opt,name=customname", + Filename: "gogo.proto", +} + +var E_Jsontag = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65005, + Name: "gogoproto.jsontag", + Tag: "bytes,65005,opt,name=jsontag", + Filename: "gogo.proto", +} + +var E_Moretags = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65006, + Name: "gogoproto.moretags", + Tag: "bytes,65006,opt,name=moretags", + Filename: "gogo.proto", +} + +var E_Casttype = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65007, + Name: "gogoproto.casttype", + Tag: "bytes,65007,opt,name=casttype", + Filename: "gogo.proto", +} + +var E_Castkey = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65008, + Name: "gogoproto.castkey", + Tag: "bytes,65008,opt,name=castkey", + Filename: "gogo.proto", +} + +var E_Castvalue = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65009, + Name: "gogoproto.castvalue", + Tag: "bytes,65009,opt,name=castvalue", + Filename: "gogo.proto", +} + +var E_Stdtime = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65010, + Name: "gogoproto.stdtime", + Tag: "varint,65010,opt,name=stdtime", + Filename: "gogo.proto", +} + +var E_Stdduration = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65011, + Name: "gogoproto.stdduration", + Tag: "varint,65011,opt,name=stdduration", + Filename: "gogo.proto", +} + +func init() { + proto.RegisterExtension(E_GoprotoEnumPrefix) + proto.RegisterExtension(E_GoprotoEnumStringer) + proto.RegisterExtension(E_EnumStringer) + proto.RegisterExtension(E_EnumCustomname) + proto.RegisterExtension(E_Enumdecl) + proto.RegisterExtension(E_EnumvalueCustomname) + proto.RegisterExtension(E_GoprotoGettersAll) + proto.RegisterExtension(E_GoprotoEnumPrefixAll) + proto.RegisterExtension(E_GoprotoStringerAll) + proto.RegisterExtension(E_VerboseEqualAll) + proto.RegisterExtension(E_FaceAll) + proto.RegisterExtension(E_GostringAll) + proto.RegisterExtension(E_PopulateAll) + proto.RegisterExtension(E_StringerAll) + proto.RegisterExtension(E_OnlyoneAll) + proto.RegisterExtension(E_EqualAll) + proto.RegisterExtension(E_DescriptionAll) + proto.RegisterExtension(E_TestgenAll) + proto.RegisterExtension(E_BenchgenAll) + proto.RegisterExtension(E_MarshalerAll) + proto.RegisterExtension(E_UnmarshalerAll) + proto.RegisterExtension(E_StableMarshalerAll) + proto.RegisterExtension(E_SizerAll) + proto.RegisterExtension(E_GoprotoEnumStringerAll) + proto.RegisterExtension(E_EnumStringerAll) + proto.RegisterExtension(E_UnsafeMarshalerAll) + proto.RegisterExtension(E_UnsafeUnmarshalerAll) + proto.RegisterExtension(E_GoprotoExtensionsMapAll) + proto.RegisterExtension(E_GoprotoUnrecognizedAll) + proto.RegisterExtension(E_GogoprotoImport) + proto.RegisterExtension(E_ProtosizerAll) + proto.RegisterExtension(E_CompareAll) + proto.RegisterExtension(E_TypedeclAll) + proto.RegisterExtension(E_EnumdeclAll) + proto.RegisterExtension(E_GoprotoRegistration) + proto.RegisterExtension(E_GoprotoGetters) + proto.RegisterExtension(E_GoprotoStringer) + proto.RegisterExtension(E_VerboseEqual) + proto.RegisterExtension(E_Face) + proto.RegisterExtension(E_Gostring) + proto.RegisterExtension(E_Populate) + proto.RegisterExtension(E_Stringer) + proto.RegisterExtension(E_Onlyone) + proto.RegisterExtension(E_Equal) + proto.RegisterExtension(E_Description) + proto.RegisterExtension(E_Testgen) + proto.RegisterExtension(E_Benchgen) + proto.RegisterExtension(E_Marshaler) + proto.RegisterExtension(E_Unmarshaler) + proto.RegisterExtension(E_StableMarshaler) + proto.RegisterExtension(E_Sizer) + proto.RegisterExtension(E_UnsafeMarshaler) + proto.RegisterExtension(E_UnsafeUnmarshaler) + proto.RegisterExtension(E_GoprotoExtensionsMap) + proto.RegisterExtension(E_GoprotoUnrecognized) + proto.RegisterExtension(E_Protosizer) + proto.RegisterExtension(E_Compare) + proto.RegisterExtension(E_Typedecl) + proto.RegisterExtension(E_Nullable) + proto.RegisterExtension(E_Embed) + proto.RegisterExtension(E_Customtype) + proto.RegisterExtension(E_Customname) + proto.RegisterExtension(E_Jsontag) + proto.RegisterExtension(E_Moretags) + proto.RegisterExtension(E_Casttype) + proto.RegisterExtension(E_Castkey) + proto.RegisterExtension(E_Castvalue) + proto.RegisterExtension(E_Stdtime) + proto.RegisterExtension(E_Stdduration) +} + +func init() { proto.RegisterFile("gogo.proto", fileDescriptorGogo) } + +var fileDescriptorGogo = []byte{ + // 1220 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x4b, 0x6f, 0x1c, 0x45, + 0x10, 0x80, 0x85, 0x48, 0x14, 0x6f, 0xd9, 0x8e, 0xf1, 0xda, 0x98, 0x10, 0x81, 0x08, 0x9c, 0x38, + 0xd9, 0xa7, 0x08, 0xa5, 0xad, 0xc8, 0x72, 0x2c, 0xc7, 0x4a, 0x84, 0xc1, 0x98, 0x38, 0xbc, 0x0e, + 0xab, 0xd9, 0xdd, 0xf6, 0x78, 0x60, 0x66, 0x7a, 0x98, 0xe9, 0x89, 0xe2, 0xdc, 0x50, 0x78, 0x08, + 0x21, 0xde, 0x48, 0x90, 0x90, 0x04, 0x38, 0xf0, 0x7e, 0x86, 0xf7, 0x91, 0x0b, 0x8f, 0x2b, 0xff, + 0x81, 0x0b, 0x60, 0xde, 0xbe, 0xf9, 0x82, 0x6a, 0xb6, 0x6a, 0xb6, 0x67, 0xbd, 0x52, 0xf7, 0xde, + 0xc6, 0xeb, 0xfe, 0xbe, 0xad, 0xa9, 0x9a, 0xae, 0xea, 0x59, 0x00, 0x5f, 0xf9, 0x6a, 0x3a, 0x49, + 0x95, 0x56, 0xf5, 0x1a, 0x5e, 0x17, 0x97, 0x07, 0x0f, 0xf9, 0x4a, 0xf9, 0xa1, 0x9c, 0x29, 0xfe, + 0x6a, 0xe6, 0xeb, 0x33, 0x6d, 0x99, 0xb5, 0xd2, 0x20, 0xd1, 0x2a, 0xed, 0x2c, 0x16, 0x77, 0xc1, + 0x04, 0x2d, 0x6e, 0xc8, 0x38, 0x8f, 0x1a, 0x49, 0x2a, 0xd7, 0x83, 0xb3, 0xf5, 0x9b, 0xa6, 0x3b, + 0xe4, 0x34, 0x93, 0xd3, 0x8b, 0x71, 0x1e, 0xdd, 0x9d, 0xe8, 0x40, 0xc5, 0xd9, 0x81, 0xab, 0xbf, + 0x5c, 0x7b, 0xe8, 0x9a, 0xdb, 0x87, 0x56, 0xc7, 0x09, 0xc5, 0xff, 0xad, 0x14, 0xa0, 0x58, 0x85, + 0xeb, 0x2b, 0xbe, 0x4c, 0xa7, 0x41, 0xec, 0xcb, 0xd4, 0x62, 0xfc, 0x9e, 0x8c, 0x13, 0x86, 0xf1, + 0x5e, 0x42, 0xc5, 0x02, 0x8c, 0x0e, 0xe2, 0xfa, 0x81, 0x5c, 0x23, 0xd2, 0x94, 0x2c, 0xc1, 0x58, + 0x21, 0x69, 0xe5, 0x99, 0x56, 0x51, 0xec, 0x45, 0xd2, 0xa2, 0xf9, 0xb1, 0xd0, 0xd4, 0x56, 0xf7, + 0x23, 0xb6, 0x50, 0x52, 0x42, 0xc0, 0x10, 0x7e, 0xd2, 0x96, 0xad, 0xd0, 0x62, 0xf8, 0x89, 0x02, + 0x29, 0xd7, 0x8b, 0xd3, 0x30, 0x89, 0xd7, 0x67, 0xbc, 0x30, 0x97, 0x66, 0x24, 0xb7, 0xf6, 0xf5, + 0x9c, 0xc6, 0x65, 0x2c, 0xfb, 0xf9, 0xfc, 0x9e, 0x22, 0x9c, 0x89, 0x52, 0x60, 0xc4, 0x64, 0x54, + 0xd1, 0x97, 0x5a, 0xcb, 0x34, 0x6b, 0x78, 0x61, 0xbf, 0xf0, 0x8e, 0x07, 0x61, 0x69, 0xbc, 0xb0, + 0x55, 0xad, 0xe2, 0x52, 0x87, 0x9c, 0x0f, 0x43, 0xb1, 0x06, 0x37, 0xf4, 0x79, 0x2a, 0x1c, 0x9c, + 0x17, 0xc9, 0x39, 0xb9, 0xeb, 0xc9, 0x40, 0xed, 0x0a, 0xf0, 0xe7, 0x65, 0x2d, 0x1d, 0x9c, 0xaf, + 0x93, 0xb3, 0x4e, 0x2c, 0x97, 0x14, 0x8d, 0x27, 0x61, 0xfc, 0x8c, 0x4c, 0x9b, 0x2a, 0x93, 0x0d, + 0xf9, 0x68, 0xee, 0x85, 0x0e, 0xba, 0x4b, 0xa4, 0x1b, 0x23, 0x70, 0x11, 0x39, 0x74, 0x1d, 0x81, + 0xa1, 0x75, 0xaf, 0x25, 0x1d, 0x14, 0x97, 0x49, 0xb1, 0x0f, 0xd7, 0x23, 0x3a, 0x0f, 0x23, 0xbe, + 0xea, 0xdc, 0x92, 0x03, 0x7e, 0x85, 0xf0, 0x61, 0x66, 0x48, 0x91, 0xa8, 0x24, 0x0f, 0x3d, 0xed, + 0x12, 0xc1, 0x1b, 0xac, 0x60, 0x86, 0x14, 0x03, 0xa4, 0xf5, 0x4d, 0x56, 0x64, 0x46, 0x3e, 0xe7, + 0x60, 0x58, 0xc5, 0xe1, 0xa6, 0x8a, 0x5d, 0x82, 0x78, 0x8b, 0x0c, 0x40, 0x08, 0x0a, 0x66, 0xa1, + 0xe6, 0x5a, 0x88, 0xb7, 0xb7, 0x78, 0x7b, 0x70, 0x05, 0x96, 0x60, 0x8c, 0x1b, 0x54, 0xa0, 0x62, + 0x07, 0xc5, 0x3b, 0xa4, 0xd8, 0x6f, 0x60, 0x74, 0x1b, 0x5a, 0x66, 0xda, 0x97, 0x2e, 0x92, 0x77, + 0xf9, 0x36, 0x08, 0xa1, 0x54, 0x36, 0x65, 0xdc, 0xda, 0x70, 0x33, 0xbc, 0xc7, 0xa9, 0x64, 0x06, + 0x15, 0x0b, 0x30, 0x1a, 0x79, 0x69, 0xb6, 0xe1, 0x85, 0x4e, 0xe5, 0x78, 0x9f, 0x1c, 0x23, 0x25, + 0x44, 0x19, 0xc9, 0xe3, 0x41, 0x34, 0x1f, 0x70, 0x46, 0x0c, 0x8c, 0xb6, 0x5e, 0xa6, 0xbd, 0x66, + 0x28, 0x1b, 0x83, 0xd8, 0x3e, 0xe4, 0xad, 0xd7, 0x61, 0x97, 0x4d, 0xe3, 0x2c, 0xd4, 0xb2, 0xe0, + 0x9c, 0x93, 0xe6, 0x23, 0xae, 0x74, 0x01, 0x20, 0xfc, 0x00, 0xdc, 0xd8, 0x77, 0x4c, 0x38, 0xc8, + 0x3e, 0x26, 0xd9, 0x54, 0x9f, 0x51, 0x41, 0x2d, 0x61, 0x50, 0xe5, 0x27, 0xdc, 0x12, 0x64, 0x8f, + 0x6b, 0x05, 0x26, 0xf3, 0x38, 0xf3, 0xd6, 0x07, 0xcb, 0xda, 0xa7, 0x9c, 0xb5, 0x0e, 0x5b, 0xc9, + 0xda, 0x29, 0x98, 0x22, 0xe3, 0x60, 0x75, 0xfd, 0x8c, 0x1b, 0x6b, 0x87, 0x5e, 0xab, 0x56, 0xf7, + 0x21, 0x38, 0x58, 0xa6, 0xf3, 0xac, 0x96, 0x71, 0x86, 0x4c, 0x23, 0xf2, 0x12, 0x07, 0xf3, 0x55, + 0x32, 0x73, 0xc7, 0x5f, 0x2c, 0x05, 0xcb, 0x5e, 0x82, 0xf2, 0xfb, 0xe1, 0x00, 0xcb, 0xf3, 0x38, + 0x95, 0x2d, 0xe5, 0xc7, 0xc1, 0x39, 0xd9, 0x76, 0x50, 0x7f, 0xde, 0x53, 0xaa, 0x35, 0x03, 0x47, + 0xf3, 0x09, 0xb8, 0xae, 0x3c, 0xab, 0x34, 0x82, 0x28, 0x51, 0xa9, 0xb6, 0x18, 0xbf, 0xe0, 0x4a, + 0x95, 0xdc, 0x89, 0x02, 0x13, 0x8b, 0xb0, 0xbf, 0xf8, 0xd3, 0xf5, 0x91, 0xfc, 0x92, 0x44, 0xa3, + 0x5d, 0x8a, 0x1a, 0x47, 0x4b, 0x45, 0x89, 0x97, 0xba, 0xf4, 0xbf, 0xaf, 0xb8, 0x71, 0x10, 0x42, + 0x8d, 0x43, 0x6f, 0x26, 0x12, 0xa7, 0xbd, 0x83, 0xe1, 0x6b, 0x6e, 0x1c, 0xcc, 0x90, 0x82, 0x0f, + 0x0c, 0x0e, 0x8a, 0x6f, 0x58, 0xc1, 0x0c, 0x2a, 0xee, 0xe9, 0x0e, 0xda, 0x54, 0xfa, 0x41, 0xa6, + 0x53, 0x0f, 0x57, 0x5b, 0x54, 0xdf, 0x6e, 0x55, 0x0f, 0x61, 0xab, 0x06, 0x2a, 0x4e, 0xc2, 0x58, + 0xcf, 0x11, 0xa3, 0x7e, 0xcb, 0x2e, 0xdb, 0xb2, 0xcc, 0x32, 0xcf, 0x2f, 0x85, 0x8f, 0x6d, 0x53, + 0x33, 0xaa, 0x9e, 0x30, 0xc4, 0x9d, 0x58, 0xf7, 0xea, 0x39, 0xc0, 0x2e, 0x3b, 0xbf, 0x5d, 0x96, + 0xbe, 0x72, 0x0c, 0x10, 0xc7, 0x61, 0xb4, 0x72, 0x06, 0xb0, 0xab, 0x1e, 0x27, 0xd5, 0x88, 0x79, + 0x04, 0x10, 0x87, 0x61, 0x0f, 0xce, 0x73, 0x3b, 0xfe, 0x04, 0xe1, 0xc5, 0x72, 0x71, 0x14, 0x86, + 0x78, 0x8e, 0xdb, 0xd1, 0x27, 0x09, 0x2d, 0x11, 0xc4, 0x79, 0x86, 0xdb, 0xf1, 0xa7, 0x18, 0x67, + 0x04, 0x71, 0xf7, 0x14, 0x7e, 0xf7, 0xcc, 0x1e, 0xea, 0xc3, 0x9c, 0xbb, 0x59, 0xd8, 0x47, 0xc3, + 0xdb, 0x4e, 0x3f, 0x4d, 0x5f, 0xce, 0x84, 0xb8, 0x03, 0xf6, 0x3a, 0x26, 0xfc, 0x59, 0x42, 0x3b, + 0xeb, 0xc5, 0x02, 0x0c, 0x1b, 0x03, 0xdb, 0x8e, 0x3f, 0x47, 0xb8, 0x49, 0x61, 0xe8, 0x34, 0xb0, + 0xed, 0x82, 0xe7, 0x39, 0x74, 0x22, 0x30, 0x6d, 0x3c, 0xab, 0xed, 0xf4, 0x0b, 0x9c, 0x75, 0x46, + 0xc4, 0x1c, 0xd4, 0xca, 0xfe, 0x6b, 0xe7, 0x5f, 0x24, 0xbe, 0xcb, 0x60, 0x06, 0x8c, 0xfe, 0x6f, + 0x57, 0xbc, 0xc4, 0x19, 0x30, 0x28, 0xdc, 0x46, 0xbd, 0x33, 0xdd, 0x6e, 0x7a, 0x99, 0xb7, 0x51, + 0xcf, 0x48, 0xc7, 0x6a, 0x16, 0x6d, 0xd0, 0xae, 0x78, 0x85, 0xab, 0x59, 0xac, 0xc7, 0x30, 0x7a, + 0x87, 0xa4, 0xdd, 0xf1, 0x2a, 0x87, 0xd1, 0x33, 0x23, 0xc5, 0x0a, 0xd4, 0x77, 0x0f, 0x48, 0xbb, + 0xef, 0x35, 0xf2, 0x8d, 0xef, 0x9a, 0x8f, 0xe2, 0x3e, 0x98, 0xea, 0x3f, 0x1c, 0xed, 0xd6, 0x0b, + 0xdb, 0x3d, 0xaf, 0x33, 0xe6, 0x6c, 0x14, 0xa7, 0xba, 0x5d, 0xd6, 0x1c, 0x8c, 0x76, 0xed, 0xc5, + 0xed, 0x6a, 0xa3, 0x35, 0xe7, 0xa2, 0x98, 0x07, 0xe8, 0xce, 0x24, 0xbb, 0xeb, 0x12, 0xb9, 0x0c, + 0x08, 0xb7, 0x06, 0x8d, 0x24, 0x3b, 0x7f, 0x99, 0xb7, 0x06, 0x11, 0xb8, 0x35, 0x78, 0x1a, 0xd9, + 0xe9, 0x2b, 0xbc, 0x35, 0x18, 0x11, 0xb3, 0x30, 0x14, 0xe7, 0x61, 0x88, 0xcf, 0x56, 0xfd, 0xe6, + 0x3e, 0xe3, 0x46, 0x86, 0x6d, 0x86, 0x7f, 0xdd, 0x21, 0x98, 0x01, 0x71, 0x18, 0xf6, 0xca, 0xa8, + 0x29, 0xdb, 0x36, 0xf2, 0xb7, 0x1d, 0xee, 0x27, 0xb8, 0x5a, 0xcc, 0x01, 0x74, 0x5e, 0xa6, 0x31, + 0x0a, 0x1b, 0xfb, 0xfb, 0x4e, 0xe7, 0xbd, 0xde, 0x40, 0xba, 0x82, 0xe2, 0x6d, 0xdc, 0x22, 0xd8, + 0xaa, 0x0a, 0x8a, 0x17, 0xf0, 0x23, 0xb0, 0xef, 0xe1, 0x4c, 0xc5, 0xda, 0xf3, 0x6d, 0xf4, 0x1f, + 0x44, 0xf3, 0x7a, 0x4c, 0x58, 0xa4, 0x52, 0xa9, 0x3d, 0x3f, 0xb3, 0xb1, 0x7f, 0x12, 0x5b, 0x02, + 0x08, 0xb7, 0xbc, 0x4c, 0xbb, 0xdc, 0xf7, 0x5f, 0x0c, 0x33, 0x80, 0x41, 0xe3, 0xf5, 0x23, 0x72, + 0xd3, 0xc6, 0xfe, 0xcd, 0x41, 0xd3, 0x7a, 0x71, 0x14, 0x6a, 0x78, 0x59, 0xfc, 0x0e, 0x61, 0x83, + 0xff, 0x21, 0xb8, 0x4b, 0xe0, 0x37, 0x67, 0xba, 0xad, 0x03, 0x7b, 0xb2, 0xff, 0xa5, 0x4a, 0xf3, + 0x7a, 0x31, 0x0f, 0xc3, 0x99, 0x6e, 0xb7, 0x73, 0x3a, 0xd1, 0x58, 0xf0, 0xff, 0x76, 0xca, 0x97, + 0xdc, 0x92, 0x39, 0xb6, 0x08, 0x13, 0x2d, 0x15, 0xf5, 0x82, 0xc7, 0x60, 0x49, 0x2d, 0xa9, 0x95, + 0x62, 0x17, 0x3d, 0x78, 0x9b, 0x1f, 0xe8, 0x8d, 0xbc, 0x39, 0xdd, 0x52, 0xd1, 0x0c, 0x1e, 0x35, + 0xbb, 0xbf, 0xa0, 0x95, 0x07, 0xcf, 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0xed, 0x5f, 0x6c, 0x20, + 0x74, 0x13, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/helper.go b/vendor/github.com/gogo/protobuf/gogoproto/helper.go new file mode 100644 index 0000000000..6b851c5623 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/gogoproto/helper.go @@ -0,0 +1,357 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gogoproto + +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" +import proto "github.com/gogo/protobuf/proto" + +func IsEmbed(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Embed, false) +} + +func IsNullable(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Nullable, true) +} + +func IsStdTime(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdtime, false) +} + +func IsStdDuration(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdduration, false) +} + +func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool { + nullable := IsNullable(field) + if field.IsMessage() || IsCustomType(field) { + return nullable + } + if proto3 { + return false + } + return nullable || *field.Type == google_protobuf.FieldDescriptorProto_TYPE_BYTES +} + +func IsCustomType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCustomType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastKey(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastKey(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastValue(field) + if len(typ) > 0 { + return true + } + return false +} + +func HasEnumDecl(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_Enumdecl, proto.GetBoolExtension(file.Options, E_EnumdeclAll, true)) +} + +func HasTypeDecl(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Typedecl, proto.GetBoolExtension(file.Options, E_TypedeclAll, true)) +} + +func GetCustomType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customtype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Casttype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastKey(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castkey) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastValue(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castvalue) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func IsCustomName(field *google_protobuf.FieldDescriptorProto) bool { + name := GetCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func IsEnumCustomName(field *google_protobuf.EnumDescriptorProto) bool { + name := GetEnumCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func IsEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) bool { + name := GetEnumValueCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func GetCustomName(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_EnumCustomname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Jsontag) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Moretags) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +type EnableFunc func(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool + +func EnabledGoEnumPrefix(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumPrefix, proto.GetBoolExtension(file.Options, E_GoprotoEnumPrefixAll, true)) +} + +func EnabledGoStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoStringer, proto.GetBoolExtension(file.Options, E_GoprotoStringerAll, true)) +} + +func HasGoGetters(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoGetters, proto.GetBoolExtension(file.Options, E_GoprotoGettersAll, true)) +} + +func IsUnion(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Onlyone, proto.GetBoolExtension(file.Options, E_OnlyoneAll, false)) +} + +func HasGoString(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Gostring, proto.GetBoolExtension(file.Options, E_GostringAll, false)) +} + +func HasEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Equal, proto.GetBoolExtension(file.Options, E_EqualAll, false)) +} + +func HasVerboseEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_VerboseEqual, proto.GetBoolExtension(file.Options, E_VerboseEqualAll, false)) +} + +func IsStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Stringer, proto.GetBoolExtension(file.Options, E_StringerAll, false)) +} + +func IsFace(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Face, proto.GetBoolExtension(file.Options, E_FaceAll, false)) +} + +func HasDescription(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Description, proto.GetBoolExtension(file.Options, E_DescriptionAll, false)) +} + +func HasPopulate(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Populate, proto.GetBoolExtension(file.Options, E_PopulateAll, false)) +} + +func HasTestGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Testgen, proto.GetBoolExtension(file.Options, E_TestgenAll, false)) +} + +func HasBenchGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Benchgen, proto.GetBoolExtension(file.Options, E_BenchgenAll, false)) +} + +func IsMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Marshaler, proto.GetBoolExtension(file.Options, E_MarshalerAll, false)) +} + +func IsUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Unmarshaler, proto.GetBoolExtension(file.Options, E_UnmarshalerAll, false)) +} + +func IsStableMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_StableMarshaler, proto.GetBoolExtension(file.Options, E_StableMarshalerAll, false)) +} + +func IsSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Sizer, proto.GetBoolExtension(file.Options, E_SizerAll, false)) +} + +func IsProtoSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Protosizer, proto.GetBoolExtension(file.Options, E_ProtosizerAll, false)) +} + +func IsGoEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumStringer, proto.GetBoolExtension(file.Options, E_GoprotoEnumStringerAll, true)) +} + +func IsEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_EnumStringer, proto.GetBoolExtension(file.Options, E_EnumStringerAll, false)) +} + +func IsUnsafeMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeMarshaler, proto.GetBoolExtension(file.Options, E_UnsafeMarshalerAll, false)) +} + +func IsUnsafeUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeUnmarshaler, proto.GetBoolExtension(file.Options, E_UnsafeUnmarshalerAll, false)) +} + +func HasExtensionsMap(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoExtensionsMap, proto.GetBoolExtension(file.Options, E_GoprotoExtensionsMapAll, true)) +} + +func HasUnrecognized(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + if IsProto3(file) { + return false + } + return proto.GetBoolExtension(message.Options, E_GoprotoUnrecognized, proto.GetBoolExtension(file.Options, E_GoprotoUnrecognizedAll, true)) +} + +func IsProto3(file *google_protobuf.FileDescriptorProto) bool { + return file.GetSyntax() == "proto3" +} + +func ImportsGoGoProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GogoprotoImport, true) +} + +func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Compare, proto.GetBoolExtension(file.Options, E_CompareAll, false)) +} + +func RegistersGolangProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GoprotoRegistration, false) +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go new file mode 100644 index 0000000000..a85bf1984c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go @@ -0,0 +1,118 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package descriptor provides functions for obtaining protocol buffer +// descriptors for generated Go types. +// +// These functions cannot go in package proto because they depend on the +// generated protobuf descriptor messages, which themselves depend on proto. +package descriptor + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + + "github.com/gogo/protobuf/proto" +) + +// extractFile extracts a FileDescriptorProto from a gzip'd buffer. +func extractFile(gz []byte) (*FileDescriptorProto, error) { + r, err := gzip.NewReader(bytes.NewReader(gz)) + if err != nil { + return nil, fmt.Errorf("failed to open gzip reader: %v", err) + } + defer r.Close() + + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("failed to uncompress descriptor: %v", err) + } + + fd := new(FileDescriptorProto) + if err := proto.Unmarshal(b, fd); err != nil { + return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err) + } + + return fd, nil +} + +// Message is a proto.Message with a method to return its descriptor. +// +// Message types generated by the protocol compiler always satisfy +// the Message interface. +type Message interface { + proto.Message + Descriptor() ([]byte, []int) +} + +// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it +// describing the given message. +func ForMessage(msg Message) (fd *FileDescriptorProto, md *DescriptorProto) { + gz, path := msg.Descriptor() + fd, err := extractFile(gz) + if err != nil { + panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err)) + } + + md = fd.MessageType[path[0]] + for _, i := range path[1:] { + md = md.NestedType[i] + } + return fd, md +} + +// Is this field a scalar numeric type? +func (field *FieldDescriptorProto) IsScalar() bool { + if field.Type == nil { + return false + } + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE, + FieldDescriptorProto_TYPE_FLOAT, + FieldDescriptorProto_TYPE_INT64, + FieldDescriptorProto_TYPE_UINT64, + FieldDescriptorProto_TYPE_INT32, + FieldDescriptorProto_TYPE_FIXED64, + FieldDescriptorProto_TYPE_FIXED32, + FieldDescriptorProto_TYPE_BOOL, + FieldDescriptorProto_TYPE_UINT32, + FieldDescriptorProto_TYPE_ENUM, + FieldDescriptorProto_TYPE_SFIXED32, + FieldDescriptorProto_TYPE_SFIXED64, + FieldDescriptorProto_TYPE_SINT32, + FieldDescriptorProto_TYPE_SINT64: + return true + default: + return false + } +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go new file mode 100644 index 0000000000..4174cbd9f3 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go @@ -0,0 +1,2280 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: descriptor.proto + +/* +Package descriptor is a generated protocol buffer package. + +It is generated from these files: + descriptor.proto + +It has these top-level messages: + FileDescriptorSet + FileDescriptorProto + DescriptorProto + ExtensionRangeOptions + FieldDescriptorProto + OneofDescriptorProto + EnumDescriptorProto + EnumValueDescriptorProto + ServiceDescriptorProto + MethodDescriptorProto + FileOptions + MessageOptions + FieldOptions + OneofOptions + EnumOptions + EnumValueOptions + ServiceOptions + MethodOptions + UninterpretedOption + SourceCodeInfo + GeneratedCodeInfo +*/ +package descriptor + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 +) + +var FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} +var FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} +func (x FieldDescriptorProto_Type) String() string { + return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) +} +func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(value) + return nil +} +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{4, 0} +} + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +var FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", +} +var FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, +} + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} +func (x FieldDescriptorProto_Label) String() string { + return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) +} +func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(value) + return nil +} +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{4, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 +) + +var FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", +} +var FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, +} + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} +func (x FileOptions_OptimizeMode) String() string { + return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) +} +func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(value) + return nil +} +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{10, 0} +} + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +var FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", +} +var FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, +} + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} +func (x FieldOptions_CType) String() string { + return proto.EnumName(FieldOptions_CType_name, int32(x)) +} +func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") + if err != nil { + return err + } + *x = FieldOptions_CType(value) + return nil +} +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{12, 0} +} + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +var FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", +} +var FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, +} + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} +func (x FieldOptions_JSType) String() string { + return proto.EnumName(FieldOptions_JSType_name, int32(x)) +} +func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") + if err != nil { + return err + } + *x = FieldOptions_JSType(value) + return nil +} +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{12, 1} +} + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 +) + +var MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", +} +var MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, +} + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} +func (x MethodOptions_IdempotencyLevel) String() string { + return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) +} +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(value) + return nil +} +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{17, 0} +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{0} } + +func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if m != nil { + return m.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{1} } + +func (m *FileDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FileDescriptorProto) GetPackage() string { + if m != nil && m.Package != nil { + return *m.Package + } + return "" +} + +func (m *FileDescriptorProto) GetDependency() []string { + if m != nil { + return m.Dependency + } + return nil +} + +func (m *FileDescriptorProto) GetPublicDependency() []int32 { + if m != nil { + return m.PublicDependency + } + return nil +} + +func (m *FileDescriptorProto) GetWeakDependency() []int32 { + if m != nil { + return m.WeakDependency + } + return nil +} + +func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if m != nil { + return m.MessageType + } + return nil +} + +func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if m != nil { + return m.Service + } + return nil +} + +func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *FileDescriptorProto) GetOptions() *FileOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if m != nil { + return m.SourceCodeInfo + } + return nil +} + +func (m *FileDescriptorProto) GetSyntax() string { + if m != nil && m.Syntax != nil { + return *m.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{2} } + +func (m *DescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *DescriptorProto) GetField() []*FieldDescriptorProto { + if m != nil { + return m.Field + } + return nil +} + +func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *DescriptorProto) GetNestedType() []*DescriptorProto { + if m != nil { + return m.NestedType + } + return nil +} + +func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if m != nil { + return m.ExtensionRange + } + return nil +} + +func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if m != nil { + return m.OneofDecl + } + return nil +} + +func (m *DescriptorProto) GetOptions() *MessageOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *DescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } +func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{2, 0} +} + +func (m *DescriptorProto_ExtensionRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if m != nil { + return m.Options + } + return nil +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } +func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ReservedRange) ProtoMessage() {} +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{2, 1} +} + +func (m *DescriptorProto_ReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +type ExtensionRangeOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{3} } + +var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ExtensionRangeOptions +} + +func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{4} } + +func (m *FieldDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FieldDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if m != nil && m.Label != nil { + return *m.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (m *FieldDescriptorProto) GetTypeName() string { + if m != nil && m.TypeName != nil { + return *m.TypeName + } + return "" +} + +func (m *FieldDescriptorProto) GetExtendee() string { + if m != nil && m.Extendee != nil { + return *m.Extendee + } + return "" +} + +func (m *FieldDescriptorProto) GetDefaultValue() string { + if m != nil && m.DefaultValue != nil { + return *m.DefaultValue + } + return "" +} + +func (m *FieldDescriptorProto) GetOneofIndex() int32 { + if m != nil && m.OneofIndex != nil { + return *m.OneofIndex + } + return 0 +} + +func (m *FieldDescriptorProto) GetJsonName() string { + if m != nil && m.JsonName != nil { + return *m.JsonName + } + return "" +} + +func (m *FieldDescriptorProto) GetOptions() *FieldOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a oneof. +type OneofDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{5} } + +func (m *OneofDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *OneofDescriptorProto) GetOptions() *OneofOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{6} } + +func (m *EnumDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if m != nil { + return m.Value + } + return nil +} + +func (m *EnumDescriptorProto) GetOptions() *EnumOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +// Range of reserved numeric values. Reserved values may not be used by +// entries in the same enum. Reserved ranges may not overlap. +// +// Note that this is distinct from DescriptorProto.ReservedRange in that it +// is inclusive such that it can appropriately represent the entire int32 +// domain. +type EnumDescriptorProto_EnumReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } +func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} +func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{6, 0} +} + +func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{7} +} + +func (m *EnumValueDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumValueDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{8} } + +func (m *ServiceDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if m != nil { + return m.Method + } + return nil +} + +func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{9} } + +const Default_MethodDescriptorProto_ClientStreaming bool = false +const Default_MethodDescriptorProto_ServerStreaming bool = false + +func (m *MethodDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MethodDescriptorProto) GetInputType() string { + if m != nil && m.InputType != nil { + return *m.InputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOutputType() string { + if m != nil && m.OutputType != nil { + return *m.OutputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOptions() *MethodOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *MethodDescriptorProto) GetClientStreaming() bool { + if m != nil && m.ClientStreaming != nil { + return *m.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (m *MethodDescriptorProto) GetServerStreaming() bool { + if m != nil && m.ServerStreaming != nil { + return *m.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // This option does nothing. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{10} } + +var extRange_FileOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FileOptions +} + +const Default_FileOptions_JavaMultipleFiles bool = false +const Default_FileOptions_JavaStringCheckUtf8 bool = false +const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED +const Default_FileOptions_CcGenericServices bool = false +const Default_FileOptions_JavaGenericServices bool = false +const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_PhpGenericServices bool = false +const Default_FileOptions_Deprecated bool = false +const Default_FileOptions_CcEnableArenas bool = false + +func (m *FileOptions) GetJavaPackage() string { + if m != nil && m.JavaPackage != nil { + return *m.JavaPackage + } + return "" +} + +func (m *FileOptions) GetJavaOuterClassname() string { + if m != nil && m.JavaOuterClassname != nil { + return *m.JavaOuterClassname + } + return "" +} + +func (m *FileOptions) GetJavaMultipleFiles() bool { + if m != nil && m.JavaMultipleFiles != nil { + return *m.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if m != nil && m.JavaGenerateEqualsAndHash != nil { + return *m.JavaGenerateEqualsAndHash + } + return false +} + +func (m *FileOptions) GetJavaStringCheckUtf8() bool { + if m != nil && m.JavaStringCheckUtf8 != nil { + return *m.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if m != nil && m.OptimizeFor != nil { + return *m.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (m *FileOptions) GetGoPackage() string { + if m != nil && m.GoPackage != nil { + return *m.GoPackage + } + return "" +} + +func (m *FileOptions) GetCcGenericServices() bool { + if m != nil && m.CcGenericServices != nil { + return *m.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (m *FileOptions) GetJavaGenericServices() bool { + if m != nil && m.JavaGenericServices != nil { + return *m.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (m *FileOptions) GetPyGenericServices() bool { + if m != nil && m.PyGenericServices != nil { + return *m.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (m *FileOptions) GetPhpGenericServices() bool { + if m != nil && m.PhpGenericServices != nil { + return *m.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + +func (m *FileOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (m *FileOptions) GetCcEnableArenas() bool { + if m != nil && m.CcEnableArenas != nil { + return *m.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (m *FileOptions) GetObjcClassPrefix() string { + if m != nil && m.ObjcClassPrefix != nil { + return *m.ObjcClassPrefix + } + return "" +} + +func (m *FileOptions) GetCsharpNamespace() string { + if m != nil && m.CsharpNamespace != nil { + return *m.CsharpNamespace + } + return "" +} + +func (m *FileOptions) GetSwiftPrefix() string { + if m != nil && m.SwiftPrefix != nil { + return *m.SwiftPrefix + } + return "" +} + +func (m *FileOptions) GetPhpClassPrefix() string { + if m != nil && m.PhpClassPrefix != nil { + return *m.PhpClassPrefix + } + return "" +} + +func (m *FileOptions) GetPhpNamespace() string { + if m != nil && m.PhpNamespace != nil { + return *m.PhpNamespace + } + return "" +} + +func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementions still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{11} } + +var extRange_MessageOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MessageOptions +} + +const Default_MessageOptions_MessageSetWireFormat bool = false +const Default_MessageOptions_NoStandardDescriptorAccessor bool = false +const Default_MessageOptions_Deprecated bool = false + +func (m *MessageOptions) GetMessageSetWireFormat() bool { + if m != nil && m.MessageSetWireFormat != nil { + return *m.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if m != nil && m.NoStandardDescriptorAccessor != nil { + return *m.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (m *MessageOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (m *MessageOptions) GetMapEntry() bool { + if m != nil && m.MapEntry != nil { + return *m.MapEntry + } + return false +} + +func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{12} } + +var extRange_FieldOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FieldOptions +} + +const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING +const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL +const Default_FieldOptions_Lazy bool = false +const Default_FieldOptions_Deprecated bool = false +const Default_FieldOptions_Weak bool = false + +func (m *FieldOptions) GetCtype() FieldOptions_CType { + if m != nil && m.Ctype != nil { + return *m.Ctype + } + return Default_FieldOptions_Ctype +} + +func (m *FieldOptions) GetPacked() bool { + if m != nil && m.Packed != nil { + return *m.Packed + } + return false +} + +func (m *FieldOptions) GetJstype() FieldOptions_JSType { + if m != nil && m.Jstype != nil { + return *m.Jstype + } + return Default_FieldOptions_Jstype +} + +func (m *FieldOptions) GetLazy() bool { + if m != nil && m.Lazy != nil { + return *m.Lazy + } + return Default_FieldOptions_Lazy +} + +func (m *FieldOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (m *FieldOptions) GetWeak() bool { + if m != nil && m.Weak != nil { + return *m.Weak + } + return Default_FieldOptions_Weak +} + +func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{13} } + +var extRange_OneofOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OneofOptions +} + +func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{14} } + +var extRange_EnumOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumOptions +} + +const Default_EnumOptions_Deprecated bool = false + +func (m *EnumOptions) GetAllowAlias() bool { + if m != nil && m.AllowAlias != nil { + return *m.AllowAlias + } + return false +} + +func (m *EnumOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{15} } + +var extRange_EnumValueOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumValueOptions +} + +const Default_EnumValueOptions_Deprecated bool = false + +func (m *EnumValueOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{16} } + +var extRange_ServiceOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ServiceOptions +} + +const Default_ServiceOptions_Deprecated bool = false + +func (m *ServiceOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{17} } + +var extRange_MethodOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MethodOptions +} + +const Default_MethodOptions_Deprecated bool = false +const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN + +func (m *MethodOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if m != nil && m.IdempotencyLevel != nil { + return *m.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + +func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{18} } + +func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if m != nil { + return m.Name + } + return nil +} + +func (m *UninterpretedOption) GetIdentifierValue() string { + if m != nil && m.IdentifierValue != nil { + return *m.IdentifierValue + } + return "" +} + +func (m *UninterpretedOption) GetPositiveIntValue() uint64 { + if m != nil && m.PositiveIntValue != nil { + return *m.PositiveIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetNegativeIntValue() int64 { + if m != nil && m.NegativeIntValue != nil { + return *m.NegativeIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *UninterpretedOption) GetStringValue() []byte { + if m != nil { + return m.StringValue + } + return nil +} + +func (m *UninterpretedOption) GetAggregateValue() string { + if m != nil && m.AggregateValue != nil { + return *m.AggregateValue + } + return "" +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } +func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption_NamePart) ProtoMessage() {} +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{18, 0} +} + +func (m *UninterpretedOption_NamePart) GetNamePart() string { + if m != nil && m.NamePart != nil { + return *m.NamePart + } + return "" +} + +func (m *UninterpretedOption_NamePart) GetIsExtension() bool { + if m != nil && m.IsExtension != nil { + return *m.IsExtension + } + return false +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendent. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{19} } + +func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if m != nil { + return m.Location + } + return nil +} + +type SourceCodeInfo_Location struct { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{19, 0} +} + +func (m *SourceCodeInfo_Location) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *SourceCodeInfo_Location) GetSpan() []int32 { + if m != nil { + return m.Span + } + return nil +} + +func (m *SourceCodeInfo_Location) GetLeadingComments() string { + if m != nil && m.LeadingComments != nil { + return *m.LeadingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetTrailingComments() string { + if m != nil && m.TrailingComments != nil { + return *m.TrailingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if m != nil { + return m.LeadingDetachedComments + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{20} } + +func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } +func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{20, 0} +} + +func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if m != nil && m.SourceFile != nil { + return *m.SourceFile + } + return "" +} + +func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if m != nil && m.Begin != nil { + return *m.Begin + } + return 0 +} + +func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func init() { + proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") + proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") + proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") + proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") + proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") + proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") + proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") + proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") + proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") + proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") + proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") + proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") + proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") + proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") + proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") + proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") + proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") + proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") + proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") + proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") + proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") + proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") + proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") + proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") + proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) + proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) + proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) + proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) + proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) +} + +func init() { proto.RegisterFile("descriptor.proto", fileDescriptorDescriptor) } + +var fileDescriptorDescriptor = []byte{ + // 2487 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x6f, 0xdb, 0xc8, + 0x15, 0x5f, 0x7d, 0x5a, 0x7a, 0x92, 0xe5, 0xf1, 0xd8, 0x9b, 0x30, 0xde, 0x8f, 0x38, 0xda, 0x8f, + 0x38, 0x49, 0xab, 0x2c, 0x9c, 0xc4, 0xc9, 0x3a, 0xc5, 0xb6, 0xb2, 0xc4, 0x78, 0x95, 0xca, 0x92, + 0x4a, 0xc9, 0xdd, 0x64, 0x8b, 0x82, 0x18, 0x93, 0x23, 0x89, 0x09, 0x45, 0x72, 0x49, 0x2a, 0x89, + 0x83, 0x1e, 0x02, 0xf4, 0xd4, 0xff, 0xa0, 0x28, 0x8a, 0x1e, 0x7a, 0x59, 0xa0, 0xd7, 0x02, 0x05, + 0xda, 0x7b, 0xaf, 0x05, 0x7a, 0xef, 0xa1, 0x40, 0x0b, 0xb4, 0x7f, 0x42, 0x8f, 0xc5, 0xcc, 0x90, + 0x14, 0xf5, 0x95, 0x78, 0x17, 0x48, 0xf6, 0x64, 0xcf, 0xef, 0xfd, 0xde, 0xe3, 0x9b, 0x37, 0x6f, + 0xde, 0xbc, 0x19, 0x01, 0xd2, 0xa9, 0xa7, 0xb9, 0x86, 0xe3, 0xdb, 0x6e, 0xc5, 0x71, 0x6d, 0xdf, + 0xc6, 0x6b, 0x03, 0xdb, 0x1e, 0x98, 0x54, 0x8c, 0x4e, 0xc6, 0xfd, 0xf2, 0x11, 0xac, 0xdf, 0x33, + 0x4c, 0x5a, 0x8f, 0x88, 0x5d, 0xea, 0xe3, 0x3b, 0x90, 0xee, 0x1b, 0x26, 0x95, 0x12, 0xdb, 0xa9, + 0x9d, 0xc2, 0xee, 0x87, 0x95, 0x19, 0xa5, 0xca, 0xb4, 0x46, 0x87, 0xc1, 0x0a, 0xd7, 0x28, 0xff, + 0x3b, 0x0d, 0x1b, 0x0b, 0xa4, 0x18, 0x43, 0xda, 0x22, 0x23, 0x66, 0x31, 0xb1, 0x93, 0x57, 0xf8, + 0xff, 0x58, 0x82, 0x15, 0x87, 0x68, 0x8f, 0xc9, 0x80, 0x4a, 0x49, 0x0e, 0x87, 0x43, 0xfc, 0x3e, + 0x80, 0x4e, 0x1d, 0x6a, 0xe9, 0xd4, 0xd2, 0x4e, 0xa5, 0xd4, 0x76, 0x6a, 0x27, 0xaf, 0xc4, 0x10, + 0x7c, 0x0d, 0xd6, 0x9d, 0xf1, 0x89, 0x69, 0x68, 0x6a, 0x8c, 0x06, 0xdb, 0xa9, 0x9d, 0x8c, 0x82, + 0x84, 0xa0, 0x3e, 0x21, 0x5f, 0x86, 0xb5, 0xa7, 0x94, 0x3c, 0x8e, 0x53, 0x0b, 0x9c, 0x5a, 0x62, + 0x70, 0x8c, 0x58, 0x83, 0xe2, 0x88, 0x7a, 0x1e, 0x19, 0x50, 0xd5, 0x3f, 0x75, 0xa8, 0x94, 0xe6, + 0xb3, 0xdf, 0x9e, 0x9b, 0xfd, 0xec, 0xcc, 0x0b, 0x81, 0x56, 0xef, 0xd4, 0xa1, 0xb8, 0x0a, 0x79, + 0x6a, 0x8d, 0x47, 0xc2, 0x42, 0x66, 0x49, 0xfc, 0x64, 0x6b, 0x3c, 0x9a, 0xb5, 0x92, 0x63, 0x6a, + 0x81, 0x89, 0x15, 0x8f, 0xba, 0x4f, 0x0c, 0x8d, 0x4a, 0x59, 0x6e, 0xe0, 0xf2, 0x9c, 0x81, 0xae, + 0x90, 0xcf, 0xda, 0x08, 0xf5, 0x70, 0x0d, 0xf2, 0xf4, 0x99, 0x4f, 0x2d, 0xcf, 0xb0, 0x2d, 0x69, + 0x85, 0x1b, 0xf9, 0x68, 0xc1, 0x2a, 0x52, 0x53, 0x9f, 0x35, 0x31, 0xd1, 0xc3, 0x7b, 0xb0, 0x62, + 0x3b, 0xbe, 0x61, 0x5b, 0x9e, 0x94, 0xdb, 0x4e, 0xec, 0x14, 0x76, 0xdf, 0x5d, 0x98, 0x08, 0x6d, + 0xc1, 0x51, 0x42, 0x32, 0x6e, 0x00, 0xf2, 0xec, 0xb1, 0xab, 0x51, 0x55, 0xb3, 0x75, 0xaa, 0x1a, + 0x56, 0xdf, 0x96, 0xf2, 0xdc, 0xc0, 0xc5, 0xf9, 0x89, 0x70, 0x62, 0xcd, 0xd6, 0x69, 0xc3, 0xea, + 0xdb, 0x4a, 0xc9, 0x9b, 0x1a, 0xe3, 0x73, 0x90, 0xf5, 0x4e, 0x2d, 0x9f, 0x3c, 0x93, 0x8a, 0x3c, + 0x43, 0x82, 0x51, 0xf9, 0xcf, 0x59, 0x58, 0x3b, 0x4b, 0x8a, 0xdd, 0x85, 0x4c, 0x9f, 0xcd, 0x52, + 0x4a, 0x7e, 0x93, 0x18, 0x08, 0x9d, 0xe9, 0x20, 0x66, 0xbf, 0x65, 0x10, 0xab, 0x50, 0xb0, 0xa8, + 0xe7, 0x53, 0x5d, 0x64, 0x44, 0xea, 0x8c, 0x39, 0x05, 0x42, 0x69, 0x3e, 0xa5, 0xd2, 0xdf, 0x2a, + 0xa5, 0x1e, 0xc0, 0x5a, 0xe4, 0x92, 0xea, 0x12, 0x6b, 0x10, 0xe6, 0xe6, 0xf5, 0x57, 0x79, 0x52, + 0x91, 0x43, 0x3d, 0x85, 0xa9, 0x29, 0x25, 0x3a, 0x35, 0xc6, 0x75, 0x00, 0xdb, 0xa2, 0x76, 0x5f, + 0xd5, 0xa9, 0x66, 0x4a, 0xb9, 0x25, 0x51, 0x6a, 0x33, 0xca, 0x5c, 0x94, 0x6c, 0x81, 0x6a, 0x26, + 0xfe, 0x74, 0x92, 0x6a, 0x2b, 0x4b, 0x32, 0xe5, 0x48, 0x6c, 0xb2, 0xb9, 0x6c, 0x3b, 0x86, 0x92, + 0x4b, 0x59, 0xde, 0x53, 0x3d, 0x98, 0x59, 0x9e, 0x3b, 0x51, 0x79, 0xe5, 0xcc, 0x94, 0x40, 0x4d, + 0x4c, 0x6c, 0xd5, 0x8d, 0x0f, 0xf1, 0x07, 0x10, 0x01, 0x2a, 0x4f, 0x2b, 0xe0, 0x55, 0xa8, 0x18, + 0x82, 0x2d, 0x32, 0xa2, 0x5b, 0xcf, 0xa1, 0x34, 0x1d, 0x1e, 0xbc, 0x09, 0x19, 0xcf, 0x27, 0xae, + 0xcf, 0xb3, 0x30, 0xa3, 0x88, 0x01, 0x46, 0x90, 0xa2, 0x96, 0xce, 0xab, 0x5c, 0x46, 0x61, 0xff, + 0xe2, 0x1f, 0x4d, 0x26, 0x9c, 0xe2, 0x13, 0xfe, 0x78, 0x7e, 0x45, 0xa7, 0x2c, 0xcf, 0xce, 0x7b, + 0xeb, 0x36, 0xac, 0x4e, 0x4d, 0xe0, 0xac, 0x9f, 0x2e, 0xff, 0x02, 0xde, 0x5e, 0x68, 0x1a, 0x3f, + 0x80, 0xcd, 0xb1, 0x65, 0x58, 0x3e, 0x75, 0x1d, 0x97, 0xb2, 0x8c, 0x15, 0x9f, 0x92, 0xfe, 0xb3, + 0xb2, 0x24, 0xe7, 0x8e, 0xe3, 0x6c, 0x61, 0x45, 0xd9, 0x18, 0xcf, 0x83, 0x57, 0xf3, 0xb9, 0xff, + 0xae, 0xa0, 0x17, 0x2f, 0x5e, 0xbc, 0x48, 0x96, 0x7f, 0x9d, 0x85, 0xcd, 0x45, 0x7b, 0x66, 0xe1, + 0xf6, 0x3d, 0x07, 0x59, 0x6b, 0x3c, 0x3a, 0xa1, 0x2e, 0x0f, 0x52, 0x46, 0x09, 0x46, 0xb8, 0x0a, + 0x19, 0x93, 0x9c, 0x50, 0x53, 0x4a, 0x6f, 0x27, 0x76, 0x4a, 0xbb, 0xd7, 0xce, 0xb4, 0x2b, 0x2b, + 0x4d, 0xa6, 0xa2, 0x08, 0x4d, 0xfc, 0x19, 0xa4, 0x83, 0x12, 0xcd, 0x2c, 0x5c, 0x3d, 0x9b, 0x05, + 0xb6, 0x97, 0x14, 0xae, 0x87, 0xdf, 0x81, 0x3c, 0xfb, 0x2b, 0x72, 0x23, 0xcb, 0x7d, 0xce, 0x31, + 0x80, 0xe5, 0x05, 0xde, 0x82, 0x1c, 0xdf, 0x26, 0x3a, 0x0d, 0x8f, 0xb6, 0x68, 0xcc, 0x12, 0x4b, + 0xa7, 0x7d, 0x32, 0x36, 0x7d, 0xf5, 0x09, 0x31, 0xc7, 0x94, 0x27, 0x7c, 0x5e, 0x29, 0x06, 0xe0, + 0x4f, 0x19, 0x86, 0x2f, 0x42, 0x41, 0xec, 0x2a, 0xc3, 0xd2, 0xe9, 0x33, 0x5e, 0x3d, 0x33, 0x8a, + 0xd8, 0x68, 0x0d, 0x86, 0xb0, 0xcf, 0x3f, 0xf2, 0x6c, 0x2b, 0x4c, 0x4d, 0xfe, 0x09, 0x06, 0xf0, + 0xcf, 0xdf, 0x9e, 0x2d, 0xdc, 0xef, 0x2d, 0x9e, 0xde, 0x6c, 0x4e, 0x95, 0xff, 0x94, 0x84, 0x34, + 0xaf, 0x17, 0x6b, 0x50, 0xe8, 0x3d, 0xec, 0xc8, 0x6a, 0xbd, 0x7d, 0x7c, 0xd0, 0x94, 0x51, 0x02, + 0x97, 0x00, 0x38, 0x70, 0xaf, 0xd9, 0xae, 0xf6, 0x50, 0x32, 0x1a, 0x37, 0x5a, 0xbd, 0xbd, 0x9b, + 0x28, 0x15, 0x29, 0x1c, 0x0b, 0x20, 0x1d, 0x27, 0xdc, 0xd8, 0x45, 0x19, 0x8c, 0xa0, 0x28, 0x0c, + 0x34, 0x1e, 0xc8, 0xf5, 0xbd, 0x9b, 0x28, 0x3b, 0x8d, 0xdc, 0xd8, 0x45, 0x2b, 0x78, 0x15, 0xf2, + 0x1c, 0x39, 0x68, 0xb7, 0x9b, 0x28, 0x17, 0xd9, 0xec, 0xf6, 0x94, 0x46, 0xeb, 0x10, 0xe5, 0x23, + 0x9b, 0x87, 0x4a, 0xfb, 0xb8, 0x83, 0x20, 0xb2, 0x70, 0x24, 0x77, 0xbb, 0xd5, 0x43, 0x19, 0x15, + 0x22, 0xc6, 0xc1, 0xc3, 0x9e, 0xdc, 0x45, 0xc5, 0x29, 0xb7, 0x6e, 0xec, 0xa2, 0xd5, 0xe8, 0x13, + 0x72, 0xeb, 0xf8, 0x08, 0x95, 0xf0, 0x3a, 0xac, 0x8a, 0x4f, 0x84, 0x4e, 0xac, 0xcd, 0x40, 0x7b, + 0x37, 0x11, 0x9a, 0x38, 0x22, 0xac, 0xac, 0x4f, 0x01, 0x7b, 0x37, 0x11, 0x2e, 0xd7, 0x20, 0xc3, + 0xb3, 0x0b, 0x63, 0x28, 0x35, 0xab, 0x07, 0x72, 0x53, 0x6d, 0x77, 0x7a, 0x8d, 0x76, 0xab, 0xda, + 0x44, 0x89, 0x09, 0xa6, 0xc8, 0x3f, 0x39, 0x6e, 0x28, 0x72, 0x1d, 0x25, 0xe3, 0x58, 0x47, 0xae, + 0xf6, 0xe4, 0x3a, 0x4a, 0x95, 0x35, 0xd8, 0x5c, 0x54, 0x27, 0x17, 0xee, 0x8c, 0xd8, 0x12, 0x27, + 0x97, 0x2c, 0x31, 0xb7, 0x35, 0xb7, 0xc4, 0xff, 0x4a, 0xc2, 0xc6, 0x82, 0xb3, 0x62, 0xe1, 0x47, + 0x7e, 0x08, 0x19, 0x91, 0xa2, 0xe2, 0xf4, 0xbc, 0xb2, 0xf0, 0xd0, 0xe1, 0x09, 0x3b, 0x77, 0x82, + 0x72, 0xbd, 0x78, 0x07, 0x91, 0x5a, 0xd2, 0x41, 0x30, 0x13, 0x73, 0x35, 0xfd, 0xe7, 0x73, 0x35, + 0x5d, 0x1c, 0x7b, 0x7b, 0x67, 0x39, 0xf6, 0x38, 0xf6, 0xcd, 0x6a, 0x7b, 0x66, 0x41, 0x6d, 0xbf, + 0x0b, 0xeb, 0x73, 0x86, 0xce, 0x5c, 0x63, 0x7f, 0x99, 0x00, 0x69, 0x59, 0x70, 0x5e, 0x51, 0xe9, + 0x92, 0x53, 0x95, 0xee, 0xee, 0x6c, 0x04, 0x2f, 0x2d, 0x5f, 0x84, 0xb9, 0xb5, 0xfe, 0x3a, 0x01, + 0xe7, 0x16, 0x77, 0x8a, 0x0b, 0x7d, 0xf8, 0x0c, 0xb2, 0x23, 0xea, 0x0f, 0xed, 0xb0, 0x5b, 0xfa, + 0x78, 0xc1, 0x19, 0xcc, 0xc4, 0xb3, 0x8b, 0x1d, 0x68, 0xc5, 0x0f, 0xf1, 0xd4, 0xb2, 0x76, 0x4f, + 0x78, 0x33, 0xe7, 0xe9, 0xaf, 0x92, 0xf0, 0xf6, 0x42, 0xe3, 0x0b, 0x1d, 0x7d, 0x0f, 0xc0, 0xb0, + 0x9c, 0xb1, 0x2f, 0x3a, 0x22, 0x51, 0x60, 0xf3, 0x1c, 0xe1, 0xc5, 0x8b, 0x15, 0xcf, 0xb1, 0x1f, + 0xc9, 0x53, 0x5c, 0x0e, 0x02, 0xe2, 0x84, 0x3b, 0x13, 0x47, 0xd3, 0xdc, 0xd1, 0xf7, 0x97, 0xcc, + 0x74, 0x2e, 0x31, 0x3f, 0x01, 0xa4, 0x99, 0x06, 0xb5, 0x7c, 0xd5, 0xf3, 0x5d, 0x4a, 0x46, 0x86, + 0x35, 0xe0, 0x27, 0x48, 0x6e, 0x3f, 0xd3, 0x27, 0xa6, 0x47, 0x95, 0x35, 0x21, 0xee, 0x86, 0x52, + 0xa6, 0xc1, 0x13, 0xc8, 0x8d, 0x69, 0x64, 0xa7, 0x34, 0x84, 0x38, 0xd2, 0x28, 0xff, 0x31, 0x07, + 0x85, 0x58, 0x5f, 0x8d, 0x2f, 0x41, 0xf1, 0x11, 0x79, 0x42, 0xd4, 0xf0, 0xae, 0x24, 0x22, 0x51, + 0x60, 0x58, 0x27, 0xb8, 0x2f, 0x7d, 0x02, 0x9b, 0x9c, 0x62, 0x8f, 0x7d, 0xea, 0xaa, 0x9a, 0x49, + 0x3c, 0x8f, 0x07, 0x2d, 0xc7, 0xa9, 0x98, 0xc9, 0xda, 0x4c, 0x54, 0x0b, 0x25, 0xf8, 0x16, 0x6c, + 0x70, 0x8d, 0xd1, 0xd8, 0xf4, 0x0d, 0xc7, 0xa4, 0x2a, 0xbb, 0xbd, 0x79, 0xfc, 0x24, 0x89, 0x3c, + 0x5b, 0x67, 0x8c, 0xa3, 0x80, 0xc0, 0x3c, 0xf2, 0x70, 0x1d, 0xde, 0xe3, 0x6a, 0x03, 0x6a, 0x51, + 0x97, 0xf8, 0x54, 0xa5, 0x5f, 0x8d, 0x89, 0xe9, 0xa9, 0xc4, 0xd2, 0xd5, 0x21, 0xf1, 0x86, 0xd2, + 0x26, 0x33, 0x70, 0x90, 0x94, 0x12, 0xca, 0x05, 0x46, 0x3c, 0x0c, 0x78, 0x32, 0xa7, 0x55, 0x2d, + 0xfd, 0x73, 0xe2, 0x0d, 0xf1, 0x3e, 0x9c, 0xe3, 0x56, 0x3c, 0xdf, 0x35, 0xac, 0x81, 0xaa, 0x0d, + 0xa9, 0xf6, 0x58, 0x1d, 0xfb, 0xfd, 0x3b, 0xd2, 0x3b, 0xf1, 0xef, 0x73, 0x0f, 0xbb, 0x9c, 0x53, + 0x63, 0x94, 0x63, 0xbf, 0x7f, 0x07, 0x77, 0xa1, 0xc8, 0x16, 0x63, 0x64, 0x3c, 0xa7, 0x6a, 0xdf, + 0x76, 0xf9, 0xd1, 0x58, 0x5a, 0x50, 0x9a, 0x62, 0x11, 0xac, 0xb4, 0x03, 0x85, 0x23, 0x5b, 0xa7, + 0xfb, 0x99, 0x6e, 0x47, 0x96, 0xeb, 0x4a, 0x21, 0xb4, 0x72, 0xcf, 0x76, 0x59, 0x42, 0x0d, 0xec, + 0x28, 0xc0, 0x05, 0x91, 0x50, 0x03, 0x3b, 0x0c, 0xef, 0x2d, 0xd8, 0xd0, 0x34, 0x31, 0x67, 0x43, + 0x53, 0x83, 0x3b, 0x96, 0x27, 0xa1, 0xa9, 0x60, 0x69, 0xda, 0xa1, 0x20, 0x04, 0x39, 0xee, 0xe1, + 0x4f, 0xe1, 0xed, 0x49, 0xb0, 0xe2, 0x8a, 0xeb, 0x73, 0xb3, 0x9c, 0x55, 0xbd, 0x05, 0x1b, 0xce, + 0xe9, 0xbc, 0x22, 0x9e, 0xfa, 0xa2, 0x73, 0x3a, 0xab, 0x76, 0x1b, 0x36, 0x9d, 0xa1, 0x33, 0xaf, + 0x77, 0x35, 0xae, 0x87, 0x9d, 0xa1, 0x33, 0xab, 0xf8, 0x11, 0xbf, 0x70, 0xbb, 0x54, 0x23, 0x3e, + 0xd5, 0xa5, 0xf3, 0x71, 0x7a, 0x4c, 0x80, 0xaf, 0x03, 0xd2, 0x34, 0x95, 0x5a, 0xe4, 0xc4, 0xa4, + 0x2a, 0x71, 0xa9, 0x45, 0x3c, 0xe9, 0x62, 0x9c, 0x5c, 0xd2, 0x34, 0x99, 0x4b, 0xab, 0x5c, 0x88, + 0xaf, 0xc2, 0xba, 0x7d, 0xf2, 0x48, 0x13, 0x29, 0xa9, 0x3a, 0x2e, 0xed, 0x1b, 0xcf, 0xa4, 0x0f, + 0x79, 0x7c, 0xd7, 0x98, 0x80, 0x27, 0x64, 0x87, 0xc3, 0xf8, 0x0a, 0x20, 0xcd, 0x1b, 0x12, 0xd7, + 0xe1, 0x35, 0xd9, 0x73, 0x88, 0x46, 0xa5, 0x8f, 0x04, 0x55, 0xe0, 0xad, 0x10, 0x66, 0x5b, 0xc2, + 0x7b, 0x6a, 0xf4, 0xfd, 0xd0, 0xe2, 0x65, 0xb1, 0x25, 0x38, 0x16, 0x58, 0xdb, 0x01, 0xc4, 0x42, + 0x31, 0xf5, 0xe1, 0x1d, 0x4e, 0x2b, 0x39, 0x43, 0x27, 0xfe, 0xdd, 0x0f, 0x60, 0x95, 0x31, 0x27, + 0x1f, 0xbd, 0x22, 0x1a, 0x32, 0x67, 0x18, 0xfb, 0xe2, 0x6b, 0xeb, 0x8d, 0xcb, 0xfb, 0x50, 0x8c, + 0xe7, 0x27, 0xce, 0x83, 0xc8, 0x50, 0x94, 0x60, 0xcd, 0x4a, 0xad, 0x5d, 0x67, 0x6d, 0xc6, 0x97, + 0x32, 0x4a, 0xb2, 0x76, 0xa7, 0xd9, 0xe8, 0xc9, 0xaa, 0x72, 0xdc, 0xea, 0x35, 0x8e, 0x64, 0x94, + 0x8a, 0xf7, 0xd5, 0x7f, 0x4d, 0x42, 0x69, 0xfa, 0x8a, 0x84, 0x7f, 0x00, 0xe7, 0xc3, 0xf7, 0x0c, + 0x8f, 0xfa, 0xea, 0x53, 0xc3, 0xe5, 0x5b, 0x66, 0x44, 0xc4, 0xf1, 0x15, 0x2d, 0xda, 0x66, 0xc0, + 0xea, 0x52, 0xff, 0x0b, 0xc3, 0x65, 0x1b, 0x62, 0x44, 0x7c, 0xdc, 0x84, 0x8b, 0x96, 0xad, 0x7a, + 0x3e, 0xb1, 0x74, 0xe2, 0xea, 0xea, 0xe4, 0x25, 0x49, 0x25, 0x9a, 0x46, 0x3d, 0xcf, 0x16, 0x47, + 0x55, 0x64, 0xe5, 0x5d, 0xcb, 0xee, 0x06, 0xe4, 0x49, 0x0d, 0xaf, 0x06, 0xd4, 0x99, 0x04, 0x4b, + 0x2d, 0x4b, 0xb0, 0x77, 0x20, 0x3f, 0x22, 0x8e, 0x4a, 0x2d, 0xdf, 0x3d, 0xe5, 0x8d, 0x71, 0x4e, + 0xc9, 0x8d, 0x88, 0x23, 0xb3, 0xf1, 0x9b, 0xb9, 0x9f, 0xfc, 0x23, 0x05, 0xc5, 0x78, 0x73, 0xcc, + 0xee, 0x1a, 0x1a, 0x3f, 0x47, 0x12, 0xbc, 0xd2, 0x7c, 0xf0, 0xd2, 0x56, 0xba, 0x52, 0x63, 0x07, + 0xcc, 0x7e, 0x56, 0xb4, 0xac, 0x8a, 0xd0, 0x64, 0x87, 0x3b, 0xab, 0x2d, 0x54, 0xb4, 0x08, 0x39, + 0x25, 0x18, 0xe1, 0x43, 0xc8, 0x3e, 0xf2, 0xb8, 0xed, 0x2c, 0xb7, 0xfd, 0xe1, 0xcb, 0x6d, 0xdf, + 0xef, 0x72, 0xe3, 0xf9, 0xfb, 0x5d, 0xb5, 0xd5, 0x56, 0x8e, 0xaa, 0x4d, 0x25, 0x50, 0xc7, 0x17, + 0x20, 0x6d, 0x92, 0xe7, 0xa7, 0xd3, 0x47, 0x11, 0x87, 0xce, 0x1a, 0xf8, 0x0b, 0x90, 0x7e, 0x4a, + 0xc9, 0xe3, 0xe9, 0x03, 0x80, 0x43, 0xaf, 0x31, 0xf5, 0xaf, 0x43, 0x86, 0xc7, 0x0b, 0x03, 0x04, + 0x11, 0x43, 0x6f, 0xe1, 0x1c, 0xa4, 0x6b, 0x6d, 0x85, 0xa5, 0x3f, 0x82, 0xa2, 0x40, 0xd5, 0x4e, + 0x43, 0xae, 0xc9, 0x28, 0x59, 0xbe, 0x05, 0x59, 0x11, 0x04, 0xb6, 0x35, 0xa2, 0x30, 0xa0, 0xb7, + 0x82, 0x61, 0x60, 0x23, 0x11, 0x4a, 0x8f, 0x8f, 0x0e, 0x64, 0x05, 0x25, 0xe3, 0xcb, 0xeb, 0x41, + 0x31, 0xde, 0x17, 0xbf, 0x99, 0x9c, 0xfa, 0x4b, 0x02, 0x0a, 0xb1, 0x3e, 0x97, 0x35, 0x28, 0xc4, + 0x34, 0xed, 0xa7, 0x2a, 0x31, 0x0d, 0xe2, 0x05, 0x49, 0x01, 0x1c, 0xaa, 0x32, 0xe4, 0xac, 0x8b, + 0xf6, 0x46, 0x9c, 0xff, 0x5d, 0x02, 0xd0, 0x6c, 0x8b, 0x39, 0xe3, 0x60, 0xe2, 0x3b, 0x75, 0xf0, + 0xb7, 0x09, 0x28, 0x4d, 0xf7, 0x95, 0x33, 0xee, 0x5d, 0xfa, 0x4e, 0xdd, 0xfb, 0x67, 0x12, 0x56, + 0xa7, 0xba, 0xc9, 0xb3, 0x7a, 0xf7, 0x15, 0xac, 0x1b, 0x3a, 0x1d, 0x39, 0xb6, 0x4f, 0x2d, 0xed, + 0x54, 0x35, 0xe9, 0x13, 0x6a, 0x4a, 0x65, 0x5e, 0x28, 0xae, 0xbf, 0xbc, 0x5f, 0xad, 0x34, 0x26, + 0x7a, 0x4d, 0xa6, 0xb6, 0xbf, 0xd1, 0xa8, 0xcb, 0x47, 0x9d, 0x76, 0x4f, 0x6e, 0xd5, 0x1e, 0xaa, + 0xc7, 0xad, 0x1f, 0xb7, 0xda, 0x5f, 0xb4, 0x14, 0x64, 0xcc, 0xd0, 0x5e, 0xe3, 0x56, 0xef, 0x00, + 0x9a, 0x75, 0x0a, 0x9f, 0x87, 0x45, 0x6e, 0xa1, 0xb7, 0xf0, 0x06, 0xac, 0xb5, 0xda, 0x6a, 0xb7, + 0x51, 0x97, 0x55, 0xf9, 0xde, 0x3d, 0xb9, 0xd6, 0xeb, 0x8a, 0x17, 0x88, 0x88, 0xdd, 0x9b, 0xde, + 0xd4, 0xbf, 0x49, 0xc1, 0xc6, 0x02, 0x4f, 0x70, 0x35, 0xb8, 0x3b, 0x88, 0xeb, 0xcc, 0xf7, 0xcf, + 0xe2, 0x7d, 0x85, 0x1d, 0xf9, 0x1d, 0xe2, 0xfa, 0xc1, 0x55, 0xe3, 0x0a, 0xb0, 0x28, 0x59, 0xbe, + 0xd1, 0x37, 0xa8, 0x1b, 0x3c, 0xd8, 0x88, 0x0b, 0xc5, 0xda, 0x04, 0x17, 0x6f, 0x36, 0xdf, 0x03, + 0xec, 0xd8, 0x9e, 0xe1, 0x1b, 0x4f, 0xa8, 0x6a, 0x58, 0xe1, 0xeb, 0x0e, 0xbb, 0x60, 0xa4, 0x15, + 0x14, 0x4a, 0x1a, 0x96, 0x1f, 0xb1, 0x2d, 0x3a, 0x20, 0x33, 0x6c, 0x56, 0xc0, 0x53, 0x0a, 0x0a, + 0x25, 0x11, 0xfb, 0x12, 0x14, 0x75, 0x7b, 0xcc, 0xba, 0x2e, 0xc1, 0x63, 0xe7, 0x45, 0x42, 0x29, + 0x08, 0x2c, 0xa2, 0x04, 0xfd, 0xf4, 0xe4, 0x59, 0xa9, 0xa8, 0x14, 0x04, 0x26, 0x28, 0x97, 0x61, + 0x8d, 0x0c, 0x06, 0x2e, 0x33, 0x1e, 0x1a, 0x12, 0x37, 0x84, 0x52, 0x04, 0x73, 0xe2, 0xd6, 0x7d, + 0xc8, 0x85, 0x71, 0x60, 0x47, 0x32, 0x8b, 0x84, 0xea, 0x88, 0x6b, 0x6f, 0x72, 0x27, 0xaf, 0xe4, + 0xac, 0x50, 0x78, 0x09, 0x8a, 0x86, 0xa7, 0x4e, 0x5e, 0xc9, 0x93, 0xdb, 0xc9, 0x9d, 0x9c, 0x52, + 0x30, 0xbc, 0xe8, 0x85, 0xb1, 0xfc, 0x75, 0x12, 0x4a, 0xd3, 0xaf, 0xfc, 0xb8, 0x0e, 0x39, 0xd3, + 0xd6, 0x08, 0x4f, 0x2d, 0xf1, 0x13, 0xd3, 0xce, 0x2b, 0x7e, 0x18, 0xa8, 0x34, 0x03, 0xbe, 0x12, + 0x69, 0x6e, 0xfd, 0x2d, 0x01, 0xb9, 0x10, 0xc6, 0xe7, 0x20, 0xed, 0x10, 0x7f, 0xc8, 0xcd, 0x65, + 0x0e, 0x92, 0x28, 0xa1, 0xf0, 0x31, 0xc3, 0x3d, 0x87, 0x58, 0x3c, 0x05, 0x02, 0x9c, 0x8d, 0xd9, + 0xba, 0x9a, 0x94, 0xe8, 0xfc, 0xfa, 0x61, 0x8f, 0x46, 0xd4, 0xf2, 0xbd, 0x70, 0x5d, 0x03, 0xbc, + 0x16, 0xc0, 0xf8, 0x1a, 0xac, 0xfb, 0x2e, 0x31, 0xcc, 0x29, 0x6e, 0x9a, 0x73, 0x51, 0x28, 0x88, + 0xc8, 0xfb, 0x70, 0x21, 0xb4, 0xab, 0x53, 0x9f, 0x68, 0x43, 0xaa, 0x4f, 0x94, 0xb2, 0xfc, 0x99, + 0xe1, 0x7c, 0x40, 0xa8, 0x07, 0xf2, 0x50, 0xb7, 0xfc, 0xf7, 0x04, 0xac, 0x87, 0x17, 0x26, 0x3d, + 0x0a, 0xd6, 0x11, 0x00, 0xb1, 0x2c, 0xdb, 0x8f, 0x87, 0x6b, 0x3e, 0x95, 0xe7, 0xf4, 0x2a, 0xd5, + 0x48, 0x49, 0x89, 0x19, 0xd8, 0x1a, 0x01, 0x4c, 0x24, 0x4b, 0xc3, 0x76, 0x11, 0x0a, 0xc1, 0x4f, + 0x38, 0xfc, 0x77, 0x40, 0x71, 0xc5, 0x06, 0x01, 0xb1, 0x9b, 0x15, 0xde, 0x84, 0xcc, 0x09, 0x1d, + 0x18, 0x56, 0xf0, 0x30, 0x2b, 0x06, 0xe1, 0x43, 0x48, 0x3a, 0x7a, 0x08, 0x39, 0xf8, 0x19, 0x6c, + 0x68, 0xf6, 0x68, 0xd6, 0xdd, 0x03, 0x34, 0x73, 0xcd, 0xf7, 0x3e, 0x4f, 0x7c, 0x09, 0x93, 0x16, + 0xf3, 0x7f, 0x89, 0xc4, 0xef, 0x93, 0xa9, 0xc3, 0xce, 0xc1, 0x1f, 0x92, 0x5b, 0x87, 0x42, 0xb5, + 0x13, 0xce, 0x54, 0xa1, 0x7d, 0x93, 0x6a, 0xcc, 0xfb, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0xa3, + 0x58, 0x22, 0x30, 0xdf, 0x1c, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go new file mode 100644 index 0000000000..3b95a77575 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go @@ -0,0 +1,772 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: descriptor.proto + +/* +Package descriptor is a generated protocol buffer package. + +It is generated from these files: + descriptor.proto + +It has these top-level messages: + FileDescriptorSet + FileDescriptorProto + DescriptorProto + ExtensionRangeOptions + FieldDescriptorProto + OneofDescriptorProto + EnumDescriptorProto + EnumValueDescriptorProto + ServiceDescriptorProto + MethodDescriptorProto + FileOptions + MessageOptions + FieldOptions + OneofOptions + EnumOptions + EnumValueOptions + ServiceOptions + MethodOptions + UninterpretedOption + SourceCodeInfo + GeneratedCodeInfo +*/ +package descriptor + +import fmt "fmt" +import strings "strings" +import proto "github.com/gogo/protobuf/proto" +import sort "sort" +import strconv "strconv" +import reflect "reflect" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (this *FileDescriptorSet) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.FileDescriptorSet{") + if this.File != nil { + s = append(s, "File: "+fmt.Sprintf("%#v", this.File)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 16) + s = append(s, "&descriptor.FileDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Package != nil { + s = append(s, "Package: "+valueToGoStringDescriptor(this.Package, "string")+",\n") + } + if this.Dependency != nil { + s = append(s, "Dependency: "+fmt.Sprintf("%#v", this.Dependency)+",\n") + } + if this.PublicDependency != nil { + s = append(s, "PublicDependency: "+fmt.Sprintf("%#v", this.PublicDependency)+",\n") + } + if this.WeakDependency != nil { + s = append(s, "WeakDependency: "+fmt.Sprintf("%#v", this.WeakDependency)+",\n") + } + if this.MessageType != nil { + s = append(s, "MessageType: "+fmt.Sprintf("%#v", this.MessageType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.Service != nil { + s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.SourceCodeInfo != nil { + s = append(s, "SourceCodeInfo: "+fmt.Sprintf("%#v", this.SourceCodeInfo)+",\n") + } + if this.Syntax != nil { + s = append(s, "Syntax: "+valueToGoStringDescriptor(this.Syntax, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.DescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Field != nil { + s = append(s, "Field: "+fmt.Sprintf("%#v", this.Field)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.NestedType != nil { + s = append(s, "NestedType: "+fmt.Sprintf("%#v", this.NestedType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.ExtensionRange != nil { + s = append(s, "ExtensionRange: "+fmt.Sprintf("%#v", this.ExtensionRange)+",\n") + } + if this.OneofDecl != nil { + s = append(s, "OneofDecl: "+fmt.Sprintf("%#v", this.OneofDecl)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ExtensionRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.DescriptorProto_ExtensionRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.DescriptorProto_ReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ExtensionRangeOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.ExtensionRangeOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.FieldDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Label != nil { + s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "FieldDescriptorProto_Label")+",\n") + } + if this.Type != nil { + s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "FieldDescriptorProto_Type")+",\n") + } + if this.TypeName != nil { + s = append(s, "TypeName: "+valueToGoStringDescriptor(this.TypeName, "string")+",\n") + } + if this.Extendee != nil { + s = append(s, "Extendee: "+valueToGoStringDescriptor(this.Extendee, "string")+",\n") + } + if this.DefaultValue != nil { + s = append(s, "DefaultValue: "+valueToGoStringDescriptor(this.DefaultValue, "string")+",\n") + } + if this.OneofIndex != nil { + s = append(s, "OneofIndex: "+valueToGoStringDescriptor(this.OneofIndex, "int32")+",\n") + } + if this.JsonName != nil { + s = append(s, "JsonName: "+valueToGoStringDescriptor(this.JsonName, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneofDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.OneofDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.EnumDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Value != nil { + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto_EnumReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumDescriptorProto_EnumReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumValueDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.ServiceDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Method != nil { + s = append(s, "Method: "+fmt.Sprintf("%#v", this.Method)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&descriptor.MethodDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.InputType != nil { + s = append(s, "InputType: "+valueToGoStringDescriptor(this.InputType, "string")+",\n") + } + if this.OutputType != nil { + s = append(s, "OutputType: "+valueToGoStringDescriptor(this.OutputType, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ClientStreaming != nil { + s = append(s, "ClientStreaming: "+valueToGoStringDescriptor(this.ClientStreaming, "bool")+",\n") + } + if this.ServerStreaming != nil { + s = append(s, "ServerStreaming: "+valueToGoStringDescriptor(this.ServerStreaming, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 23) + s = append(s, "&descriptor.FileOptions{") + if this.JavaPackage != nil { + s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n") + } + if this.JavaOuterClassname != nil { + s = append(s, "JavaOuterClassname: "+valueToGoStringDescriptor(this.JavaOuterClassname, "string")+",\n") + } + if this.JavaMultipleFiles != nil { + s = append(s, "JavaMultipleFiles: "+valueToGoStringDescriptor(this.JavaMultipleFiles, "bool")+",\n") + } + if this.JavaGenerateEqualsAndHash != nil { + s = append(s, "JavaGenerateEqualsAndHash: "+valueToGoStringDescriptor(this.JavaGenerateEqualsAndHash, "bool")+",\n") + } + if this.JavaStringCheckUtf8 != nil { + s = append(s, "JavaStringCheckUtf8: "+valueToGoStringDescriptor(this.JavaStringCheckUtf8, "bool")+",\n") + } + if this.OptimizeFor != nil { + s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "FileOptions_OptimizeMode")+",\n") + } + if this.GoPackage != nil { + s = append(s, "GoPackage: "+valueToGoStringDescriptor(this.GoPackage, "string")+",\n") + } + if this.CcGenericServices != nil { + s = append(s, "CcGenericServices: "+valueToGoStringDescriptor(this.CcGenericServices, "bool")+",\n") + } + if this.JavaGenericServices != nil { + s = append(s, "JavaGenericServices: "+valueToGoStringDescriptor(this.JavaGenericServices, "bool")+",\n") + } + if this.PyGenericServices != nil { + s = append(s, "PyGenericServices: "+valueToGoStringDescriptor(this.PyGenericServices, "bool")+",\n") + } + if this.PhpGenericServices != nil { + s = append(s, "PhpGenericServices: "+valueToGoStringDescriptor(this.PhpGenericServices, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.CcEnableArenas != nil { + s = append(s, "CcEnableArenas: "+valueToGoStringDescriptor(this.CcEnableArenas, "bool")+",\n") + } + if this.ObjcClassPrefix != nil { + s = append(s, "ObjcClassPrefix: "+valueToGoStringDescriptor(this.ObjcClassPrefix, "string")+",\n") + } + if this.CsharpNamespace != nil { + s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n") + } + if this.SwiftPrefix != nil { + s = append(s, "SwiftPrefix: "+valueToGoStringDescriptor(this.SwiftPrefix, "string")+",\n") + } + if this.PhpClassPrefix != nil { + s = append(s, "PhpClassPrefix: "+valueToGoStringDescriptor(this.PhpClassPrefix, "string")+",\n") + } + if this.PhpNamespace != nil { + s = append(s, "PhpNamespace: "+valueToGoStringDescriptor(this.PhpNamespace, "string")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MessageOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.MessageOptions{") + if this.MessageSetWireFormat != nil { + s = append(s, "MessageSetWireFormat: "+valueToGoStringDescriptor(this.MessageSetWireFormat, "bool")+",\n") + } + if this.NoStandardDescriptorAccessor != nil { + s = append(s, "NoStandardDescriptorAccessor: "+valueToGoStringDescriptor(this.NoStandardDescriptorAccessor, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.MapEntry != nil { + s = append(s, "MapEntry: "+valueToGoStringDescriptor(this.MapEntry, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.FieldOptions{") + if this.Ctype != nil { + s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "FieldOptions_CType")+",\n") + } + if this.Packed != nil { + s = append(s, "Packed: "+valueToGoStringDescriptor(this.Packed, "bool")+",\n") + } + if this.Jstype != nil { + s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "FieldOptions_JSType")+",\n") + } + if this.Lazy != nil { + s = append(s, "Lazy: "+valueToGoStringDescriptor(this.Lazy, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.Weak != nil { + s = append(s, "Weak: "+valueToGoStringDescriptor(this.Weak, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneofOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.OneofOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumOptions{") + if this.AllowAlias != nil { + s = append(s, "AllowAlias: "+valueToGoStringDescriptor(this.AllowAlias, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumValueOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.ServiceOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.MethodOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.IdempotencyLevel != nil { + s = append(s, "IdempotencyLevel: "+valueToGoStringDescriptor(this.IdempotencyLevel, "MethodOptions_IdempotencyLevel")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.UninterpretedOption{") + if this.Name != nil { + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + } + if this.IdentifierValue != nil { + s = append(s, "IdentifierValue: "+valueToGoStringDescriptor(this.IdentifierValue, "string")+",\n") + } + if this.PositiveIntValue != nil { + s = append(s, "PositiveIntValue: "+valueToGoStringDescriptor(this.PositiveIntValue, "uint64")+",\n") + } + if this.NegativeIntValue != nil { + s = append(s, "NegativeIntValue: "+valueToGoStringDescriptor(this.NegativeIntValue, "int64")+",\n") + } + if this.DoubleValue != nil { + s = append(s, "DoubleValue: "+valueToGoStringDescriptor(this.DoubleValue, "float64")+",\n") + } + if this.StringValue != nil { + s = append(s, "StringValue: "+valueToGoStringDescriptor(this.StringValue, "byte")+",\n") + } + if this.AggregateValue != nil { + s = append(s, "AggregateValue: "+valueToGoStringDescriptor(this.AggregateValue, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption_NamePart) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.UninterpretedOption_NamePart{") + if this.NamePart != nil { + s = append(s, "NamePart: "+valueToGoStringDescriptor(this.NamePart, "string")+",\n") + } + if this.IsExtension != nil { + s = append(s, "IsExtension: "+valueToGoStringDescriptor(this.IsExtension, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.SourceCodeInfo{") + if this.Location != nil { + s = append(s, "Location: "+fmt.Sprintf("%#v", this.Location)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo_Location) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.SourceCodeInfo_Location{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.Span != nil { + s = append(s, "Span: "+fmt.Sprintf("%#v", this.Span)+",\n") + } + if this.LeadingComments != nil { + s = append(s, "LeadingComments: "+valueToGoStringDescriptor(this.LeadingComments, "string")+",\n") + } + if this.TrailingComments != nil { + s = append(s, "TrailingComments: "+valueToGoStringDescriptor(this.TrailingComments, "string")+",\n") + } + if this.LeadingDetachedComments != nil { + s = append(s, "LeadingDetachedComments: "+fmt.Sprintf("%#v", this.LeadingDetachedComments)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.GeneratedCodeInfo{") + if this.Annotation != nil { + s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo_Annotation) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&descriptor.GeneratedCodeInfo_Annotation{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.SourceFile != nil { + s = append(s, "SourceFile: "+valueToGoStringDescriptor(this.SourceFile, "string")+",\n") + } + if this.Begin != nil { + s = append(s, "Begin: "+valueToGoStringDescriptor(this.Begin, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringDescriptor(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func extensionToGoStringDescriptor(m proto.Message) string { + e := proto.GetUnsafeExtensionsMap(m) + if e == nil { + return "nil" + } + s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{" + keys := make([]int, 0, len(e)) + for k := range e { + keys = append(keys, int(k)) + } + sort.Ints(keys) + ss := []string{} + for _, k := range keys { + ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString()) + } + s += strings.Join(ss, ",") + "})" + return s +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go new file mode 100644 index 0000000000..e0846a357d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go @@ -0,0 +1,390 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package descriptor + +import ( + "strings" +) + +func (msg *DescriptorProto) GetMapFields() (*FieldDescriptorProto, *FieldDescriptorProto) { + if !msg.GetOptions().GetMapEntry() { + return nil, nil + } + return msg.GetField()[0], msg.GetField()[1] +} + +func dotToUnderscore(r rune) rune { + if r == '.' { + return '_' + } + return r +} + +func (field *FieldDescriptorProto) WireType() (wire int) { + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE: + return 1 + case FieldDescriptorProto_TYPE_FLOAT: + return 5 + case FieldDescriptorProto_TYPE_INT64: + return 0 + case FieldDescriptorProto_TYPE_UINT64: + return 0 + case FieldDescriptorProto_TYPE_INT32: + return 0 + case FieldDescriptorProto_TYPE_UINT32: + return 0 + case FieldDescriptorProto_TYPE_FIXED64: + return 1 + case FieldDescriptorProto_TYPE_FIXED32: + return 5 + case FieldDescriptorProto_TYPE_BOOL: + return 0 + case FieldDescriptorProto_TYPE_STRING: + return 2 + case FieldDescriptorProto_TYPE_GROUP: + return 2 + case FieldDescriptorProto_TYPE_MESSAGE: + return 2 + case FieldDescriptorProto_TYPE_BYTES: + return 2 + case FieldDescriptorProto_TYPE_ENUM: + return 0 + case FieldDescriptorProto_TYPE_SFIXED32: + return 5 + case FieldDescriptorProto_TYPE_SFIXED64: + return 1 + case FieldDescriptorProto_TYPE_SINT32: + return 0 + case FieldDescriptorProto_TYPE_SINT64: + return 0 + } + panic("unreachable") +} + +func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) { + packed := field.IsPacked() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + +func (field *FieldDescriptorProto) GetKey3Uint64() (x uint64) { + packed := field.IsPacked3() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + +func (field *FieldDescriptorProto) GetKey() []byte { + x := field.GetKeyUint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + +func (field *FieldDescriptorProto) GetKey3() []byte { + x := field.GetKey3Uint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + +func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto { + msg := desc.GetMessage(packageName, messageName) + if msg == nil { + return nil + } + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (file *FileDescriptorProto) GetMessage(typeName string) *DescriptorProto { + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + nes := file.GetNestedMessage(msg, strings.TrimPrefix(typeName, msg.GetName()+".")) + if nes != nil { + return nes + } + } + return nil +} + +func (file *FileDescriptorProto) GetNestedMessage(msg *DescriptorProto, typeName string) *DescriptorProto { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + res := file.GetNestedMessage(nes, strings.TrimPrefix(typeName, nes.GetName()+".")) + if res != nil { + return res + } + } + return nil +} + +func (desc *FileDescriptorSet) GetMessage(packageName string, typeName string) *DescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + if msg.GetName()+"."+nes.GetName() == typeName { + return nes + } + } + } + } + return nil +} + +func (desc *FileDescriptorSet) IsProto3(packageName string, typeName string) bool { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + if msg.GetName()+"."+nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + } + } + return false +} + +func (msg *DescriptorProto) IsExtendable() bool { + return len(msg.GetExtensionRange()) > 0 +} + +func (desc *FileDescriptorSet) FindExtension(packageName string, typeName string, fieldName string) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetName() == fieldName { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindExtensionByFieldNumber(packageName string, typeName string, fieldNum int32) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetNumber() == fieldNum { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindMessage(packageName string, typeName string, fieldName string) (msgPackageName string, msgName string) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", "" + } + field := parent.GetFieldDescriptor(fieldName) + if field == nil { + var extPackageName string + extPackageName, field = desc.FindExtension(packageName, typeName, fieldName) + if field == nil { + return "", "" + } + packageName = extPackageName + } + typeNames := strings.Split(field.GetTypeName(), ".") + if len(typeNames) == 1 { + msg := desc.GetMessage(packageName, typeName) + if msg == nil { + return "", "" + } + return packageName, msg.GetName() + } + if len(typeNames) > 2 { + for i := 1; i < len(typeNames)-1; i++ { + packageName = strings.Join(typeNames[1:len(typeNames)-i], ".") + typeName = strings.Join(typeNames[len(typeNames)-i:], ".") + msg := desc.GetMessage(packageName, typeName) + if msg != nil { + typeNames := strings.Split(msg.GetName(), ".") + if len(typeNames) == 1 { + return packageName, msg.GetName() + } + return strings.Join(typeNames[1:len(typeNames)-1], "."), typeNames[len(typeNames)-1] + } + } + } + return "", "" +} + +func (msg *DescriptorProto) GetFieldDescriptor(fieldName string) *FieldDescriptorProto { + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (desc *FileDescriptorSet) GetEnum(packageName string, typeName string) *EnumDescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, enum := range file.GetEnumType() { + if enum.GetName() == typeName { + return enum + } + } + } + return nil +} + +func (f *FieldDescriptorProto) IsEnum() bool { + return *f.Type == FieldDescriptorProto_TYPE_ENUM +} + +func (f *FieldDescriptorProto) IsMessage() bool { + return *f.Type == FieldDescriptorProto_TYPE_MESSAGE +} + +func (f *FieldDescriptorProto) IsBytes() bool { + return *f.Type == FieldDescriptorProto_TYPE_BYTES +} + +func (f *FieldDescriptorProto) IsRepeated() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REPEATED +} + +func (f *FieldDescriptorProto) IsString() bool { + return *f.Type == FieldDescriptorProto_TYPE_STRING +} + +func (f *FieldDescriptorProto) IsBool() bool { + return *f.Type == FieldDescriptorProto_TYPE_BOOL +} + +func (f *FieldDescriptorProto) IsRequired() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REQUIRED +} + +func (f *FieldDescriptorProto) IsPacked() bool { + return f.Options != nil && f.GetOptions().GetPacked() +} + +func (f *FieldDescriptorProto) IsPacked3() bool { + if f.IsRepeated() && f.IsScalar() { + if f.Options == nil || f.GetOptions().Packed == nil { + return true + } + return f.Options != nil && f.GetOptions().GetPacked() + } + return false +} + +func (m *DescriptorProto) HasExtension() bool { + return len(m.ExtensionRange) > 0 +} diff --git a/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go b/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go new file mode 100644 index 0000000000..ceadde6a5e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go @@ -0,0 +1,101 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package sortkeys + +import ( + "sort" +) + +func Strings(l []string) { + sort.Strings(l) +} + +func Float64s(l []float64) { + sort.Float64s(l) +} + +func Float32s(l []float32) { + sort.Sort(Float32Slice(l)) +} + +func Int64s(l []int64) { + sort.Sort(Int64Slice(l)) +} + +func Int32s(l []int32) { + sort.Sort(Int32Slice(l)) +} + +func Uint64s(l []uint64) { + sort.Sort(Uint64Slice(l)) +} + +func Uint32s(l []uint32) { + sort.Sort(Uint32Slice(l)) +} + +func Bools(l []bool) { + sort.Sort(BoolSlice(l)) +} + +type BoolSlice []bool + +func (p BoolSlice) Len() int { return len(p) } +func (p BoolSlice) Less(i, j int) bool { return p[j] } +func (p BoolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Int64Slice []int64 + +func (p Int64Slice) Len() int { return len(p) } +func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Int32Slice []int32 + +func (p Int32Slice) Len() int { return len(p) } +func (p Int32Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Int32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Uint64Slice []uint64 + +func (p Uint64Slice) Len() int { return len(p) } +func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Uint32Slice []uint32 + +func (p Uint32Slice) Len() int { return len(p) } +func (p Uint32Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Uint32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Float32Slice []float32 + +func (p Float32Slice) Len() int { return len(p) } +func (p Float32Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Float32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/gogo/protobuf/types/any.go b/vendor/github.com/gogo/protobuf/types/any.go new file mode 100644 index 0000000000..d83c3ad007 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/any.go @@ -0,0 +1,138 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +// This file implements functions to marshal proto.Message to/from +// google.protobuf.Any message. + +import ( + "fmt" + "reflect" + "strings" + + "github.com/gogo/protobuf/proto" +) + +const googleApis = "type.googleapis.com/" + +// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. +// +// Note that regular type assertions should be done using the Is +// function. AnyMessageName is provided for less common use cases like filtering a +// sequence of Any messages based on a set of allowed message type names. +func AnyMessageName(any *Any) (string, error) { + if any == nil { + return "", fmt.Errorf("message is nil") + } + slash := strings.LastIndex(any.TypeUrl, "/") + if slash < 0 { + return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) + } + return any.TypeUrl[slash+1:], nil +} + +// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. +func MarshalAny(pb proto.Message) (*Any, error) { + value, err := proto.Marshal(pb) + if err != nil { + return nil, err + } + return &Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in a google.protobuf.Any +// message. The allocated message is stored in the embedded proto.Message. +// +// Example: +// +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +type DynamicAny struct { + proto.Message +} + +// Empty returns a new proto.Message of the type specified in a +// google.protobuf.Any message. It returns an error if corresponding message +// type isn't linked in. +func EmptyAny(any *Any) (proto.Message, error) { + aname, err := AnyMessageName(any) + if err != nil { + return nil, err + } + + t := proto.MessageType(aname) + if t == nil { + return nil, fmt.Errorf("any: message type %q isn't linked in", aname) + } + return reflect.New(t.Elem()).Interface().(proto.Message), nil +} + +// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any +// message and places the decoded result in pb. It returns an error if type of +// contents of Any message does not match type of pb message. +// +// pb can be a proto.Message, or a *DynamicAny. +func UnmarshalAny(any *Any, pb proto.Message) error { + if d, ok := pb.(*DynamicAny); ok { + if d.Message == nil { + var err error + d.Message, err = EmptyAny(any) + if err != nil { + return err + } + } + return UnmarshalAny(any, d.Message) + } + + aname, err := AnyMessageName(any) + if err != nil { + return err + } + + mname := proto.MessageName(pb) + if aname != mname { + return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) + } + return proto.Unmarshal(any.Value, pb) +} + +// Is returns true if any value contains a given message type. +func Is(any *Any, pb proto.Message) bool { + aname, err := AnyMessageName(any) + if err != nil { + return false + } + + return aname == proto.MessageName(pb) +} diff --git a/vendor/github.com/gogo/protobuf/types/any.pb.go b/vendor/github.com/gogo/protobuf/types/any.pb.go new file mode 100644 index 0000000000..4b5f5705a9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/any.pb.go @@ -0,0 +1,651 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: any.proto + +/* + Package types is a generated protocol buffer package. + + It is generated from these files: + any.proto + + It has these top-level messages: + Any +*/ +package types + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import bytes "bytes" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +type Any struct { + // A URL/resource name whose content describes the type of the + // serialized protocol buffer message. + // + // For URLs which use the scheme `http`, `https`, or no scheme, the + // following restrictions and interpretations apply: + // + // * If no scheme is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // The name should be in a canonical form (e.g., leading "." is + // not accepted). + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // Must be a valid serialized protocol buffer of the above specified type. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *Any) Reset() { *m = Any{} } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { return fileDescriptorAny, []int{0} } +func (*Any) XXX_WellKnownType() string { return "Any" } + +func (m *Any) GetTypeUrl() string { + if m != nil { + return m.TypeUrl + } + return "" +} + +func (m *Any) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*Any)(nil), "google.protobuf.Any") +} +func (this *Any) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Any) + if !ok { + that2, ok := that.(Any) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.TypeUrl != that1.TypeUrl { + if this.TypeUrl < that1.TypeUrl { + return -1 + } + return 1 + } + if c := bytes.Compare(this.Value, that1.Value); c != 0 { + return c + } + return 0 +} +func (this *Any) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Any) + if !ok { + that2, ok := that.(Any) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.TypeUrl != that1.TypeUrl { + return false + } + if !bytes.Equal(this.Value, that1.Value) { + return false + } + return true +} +func (this *Any) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Any{") + s = append(s, "TypeUrl: "+fmt.Sprintf("%#v", this.TypeUrl)+",\n") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringAny(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Any) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Any) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.TypeUrl) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintAny(dAtA, i, uint64(len(m.TypeUrl))) + i += copy(dAtA[i:], m.TypeUrl) + } + if len(m.Value) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintAny(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} + +func encodeVarintAny(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func NewPopulatedAny(r randyAny, easy bool) *Any { + this := &Any{} + this.TypeUrl = string(randStringAny(r)) + v1 := r.Intn(100) + this.Value = make([]byte, v1) + for i := 0; i < v1; i++ { + this.Value[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +type randyAny interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneAny(r randyAny) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringAny(r randyAny) string { + v2 := r.Intn(100) + tmps := make([]rune, v2) + for i := 0; i < v2; i++ { + tmps[i] = randUTF8RuneAny(r) + } + return string(tmps) +} +func randUnrecognizedAny(r randyAny, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldAny(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldAny(dAtA []byte, r randyAny, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateAny(dAtA, uint64(key)) + v3 := r.Int63() + if r.Intn(2) == 0 { + v3 *= -1 + } + dAtA = encodeVarintPopulateAny(dAtA, uint64(v3)) + case 1: + dAtA = encodeVarintPopulateAny(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateAny(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateAny(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateAny(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateAny(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Any) Size() (n int) { + var l int + _ = l + l = len(m.TypeUrl) + if l > 0 { + n += 1 + l + sovAny(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovAny(uint64(l)) + } + return n +} + +func sovAny(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozAny(x uint64) (n int) { + return sovAny(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Any) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Any{`, + `TypeUrl:` + fmt.Sprintf("%v", this.TypeUrl) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func valueToStringAny(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Any) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAny + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Any: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Any: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAny + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAny + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TypeUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAny + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAny + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAny(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAny + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipAny(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAny + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAny + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAny + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthAny + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAny + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipAny(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthAny = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowAny = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("any.proto", fileDescriptorAny) } + +var fileDescriptorAny = []byte{ + // 204 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4c, 0xcc, 0xab, 0xd4, + 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4f, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0x85, 0xf0, 0x92, + 0x4a, 0xd3, 0x94, 0xcc, 0xb8, 0x98, 0x1d, 0xf3, 0x2a, 0x85, 0x24, 0xb9, 0x38, 0x4a, 0x2a, 0x0b, + 0x52, 0xe3, 0x4b, 0x8b, 0x72, 0x24, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0xd8, 0x41, 0xfc, 0xd0, + 0xa2, 0x1c, 0x21, 0x11, 0x2e, 0xd6, 0xb2, 0xc4, 0x9c, 0xd2, 0x54, 0x09, 0x26, 0x05, 0x46, 0x0d, + 0x9e, 0x20, 0x08, 0xc7, 0xa9, 0xfe, 0xc2, 0x43, 0x39, 0x86, 0x1b, 0x0f, 0xe5, 0x18, 0x3e, 0x3c, + 0x94, 0x63, 0xfc, 0xf1, 0x50, 0x8e, 0xb1, 0xe1, 0x91, 0x1c, 0xe3, 0x8a, 0x47, 0x72, 0x8c, 0x27, + 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x8b, 0x47, 0x72, 0x0c, + 0x1f, 0x40, 0xe2, 0x8f, 0xe5, 0x18, 0xb9, 0x84, 0x93, 0xf3, 0x73, 0xf5, 0xd0, 0xac, 0x77, 0xe2, + 0x70, 0xcc, 0xab, 0x0c, 0x00, 0x71, 0x02, 0x18, 0xa3, 0x58, 0x41, 0x36, 0x16, 0x2f, 0x62, 0x62, + 0x76, 0x0f, 0x70, 0x5a, 0xc5, 0x24, 0xe7, 0x0e, 0x51, 0x1a, 0x00, 0x55, 0xaa, 0x17, 0x9e, 0x9a, + 0x93, 0xe3, 0x9d, 0x97, 0x5f, 0x9e, 0x17, 0x02, 0x52, 0x96, 0xc4, 0x06, 0x36, 0xc3, 0x18, 0x10, + 0x00, 0x00, 0xff, 0xff, 0xb7, 0x39, 0x2f, 0x89, 0xdd, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/types/doc.go b/vendor/github.com/gogo/protobuf/types/doc.go new file mode 100644 index 0000000000..ff2810af1e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/doc.go @@ -0,0 +1,35 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package types contains code for interacting with well-known types. +*/ +package types diff --git a/vendor/github.com/gogo/protobuf/types/duration.go b/vendor/github.com/gogo/protobuf/types/duration.go new file mode 100644 index 0000000000..475d61f1db --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/duration.go @@ -0,0 +1,100 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Range of a Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid Duration +// may still be too large to fit into a time.Duration (the range of Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *Duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %#v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %#v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) + } + return nil +} + +// DurationFromProto converts a Duration to a time.Duration. DurationFromProto +// returns an error if the Duration is invalid or is too large to be +// represented in a time.Duration. +func DurationFromProto(p *Duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a Duration. +func DurationProto(d time.Duration) *Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &Duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/gogo/protobuf/types/duration.pb.go b/vendor/github.com/gogo/protobuf/types/duration.pb.go new file mode 100644 index 0000000000..ee9deacfd3 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/duration.pb.go @@ -0,0 +1,488 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: duration.proto + +/* + Package types is a generated protocol buffer package. + + It is generated from these files: + duration.proto + + It has these top-level messages: + Duration +*/ +package types + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +type Duration struct { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *Duration) Reset() { *m = Duration{} } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptorDuration, []int{0} } +func (*Duration) XXX_WellKnownType() string { return "Duration" } + +func (m *Duration) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Duration) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func init() { + proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") +} +func (this *Duration) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Duration) + if !ok { + that2, ok := that.(Duration) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Seconds != that1.Seconds { + if this.Seconds < that1.Seconds { + return -1 + } + return 1 + } + if this.Nanos != that1.Nanos { + if this.Nanos < that1.Nanos { + return -1 + } + return 1 + } + return 0 +} +func (this *Duration) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Duration) + if !ok { + that2, ok := that.(Duration) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Seconds != that1.Seconds { + return false + } + if this.Nanos != that1.Nanos { + return false + } + return true +} +func (this *Duration) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Duration{") + s = append(s, "Seconds: "+fmt.Sprintf("%#v", this.Seconds)+",\n") + s = append(s, "Nanos: "+fmt.Sprintf("%#v", this.Nanos)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringDuration(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Duration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Duration) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Seconds != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintDuration(dAtA, i, uint64(m.Seconds)) + } + if m.Nanos != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintDuration(dAtA, i, uint64(m.Nanos)) + } + return i, nil +} + +func encodeVarintDuration(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Duration) Size() (n int) { + var l int + _ = l + if m.Seconds != 0 { + n += 1 + sovDuration(uint64(m.Seconds)) + } + if m.Nanos != 0 { + n += 1 + sovDuration(uint64(m.Nanos)) + } + return n +} + +func sovDuration(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozDuration(x uint64) (n int) { + return sovDuration(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Duration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDuration + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Duration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Duration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDuration + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDuration + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanos |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDuration(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDuration + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipDuration(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDuration + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDuration + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDuration + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthDuration + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDuration + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipDuration(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthDuration = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDuration = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("duration.proto", fileDescriptorDuration) } + +var fileDescriptorDuration = []byte{ + // 203 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4b, 0x29, 0x2d, 0x4a, + 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0x85, 0xf0, 0x92, 0x4a, 0xd3, 0x94, 0xac, 0xb8, 0x38, 0x5c, 0xa0, 0x4a, 0x84, 0x24, + 0xb8, 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, + 0x60, 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, + 0xd6, 0x20, 0x08, 0xc7, 0xa9, 0xfe, 0xc2, 0x43, 0x39, 0x86, 0x1b, 0x0f, 0xe5, 0x18, 0x3e, 0x3c, + 0x94, 0x63, 0x5c, 0xf1, 0x48, 0x8e, 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, + 0x3c, 0x92, 0x63, 0x7c, 0xf1, 0x48, 0x8e, 0xe1, 0xc3, 0x23, 0x39, 0xc6, 0x15, 0x8f, 0xe5, 0x18, + 0xb9, 0x84, 0x93, 0xf3, 0x73, 0xf5, 0xd0, 0xac, 0x76, 0xe2, 0x85, 0x59, 0x1c, 0x00, 0x12, 0x09, + 0x60, 0x8c, 0x62, 0x2d, 0xa9, 0x2c, 0x48, 0x2d, 0xfe, 0xc1, 0xc8, 0xb8, 0x88, 0x89, 0xd9, 0x3d, + 0xc0, 0x69, 0x15, 0x93, 0x9c, 0x3b, 0x44, 0x4b, 0x00, 0x54, 0x8b, 0x5e, 0x78, 0x6a, 0x4e, 0x8e, + 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x65, 0x12, 0x1b, 0xd8, 0x2c, 0x63, 0x40, 0x00, 0x00, + 0x00, 0xff, 0xff, 0x9d, 0x5a, 0x25, 0xa5, 0xe6, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/types/duration_gogo.go b/vendor/github.com/gogo/protobuf/types/duration_gogo.go new file mode 100644 index 0000000000..90e7670e21 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/duration_gogo.go @@ -0,0 +1,100 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +import ( + "fmt" + "time" +) + +func NewPopulatedDuration(r interface { + Int63() int64 +}, easy bool) *Duration { + this := &Duration{} + maxSecs := time.Hour.Nanoseconds() / 1e9 + max := 2 * maxSecs + s := int64(r.Int63()) % max + s -= maxSecs + neg := int64(1) + if s < 0 { + neg = -1 + } + this.Seconds = s + this.Nanos = int32(neg * (r.Int63() % 1e9)) + return this +} + +func (d *Duration) String() string { + td, err := DurationFromProto(d) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return td.String() +} + +func NewPopulatedStdDuration(r interface { + Int63() int64 +}, easy bool) *time.Duration { + dur := NewPopulatedDuration(r, easy) + d, err := DurationFromProto(dur) + if err != nil { + return nil + } + return &d +} + +func SizeOfStdDuration(d time.Duration) int { + dur := DurationProto(d) + return dur.Size() +} + +func StdDurationMarshal(d time.Duration) ([]byte, error) { + size := SizeOfStdDuration(d) + buf := make([]byte, size) + _, err := StdDurationMarshalTo(d, buf) + return buf, err +} + +func StdDurationMarshalTo(d time.Duration, data []byte) (int, error) { + dur := DurationProto(d) + return dur.MarshalTo(data) +} + +func StdDurationUnmarshal(d *time.Duration, data []byte) error { + dur := &Duration{} + if err := dur.Unmarshal(data); err != nil { + return err + } + dd, err := DurationFromProto(dur) + if err != nil { + return err + } + *d = dd + return nil +} diff --git a/vendor/github.com/gogo/protobuf/types/empty.pb.go b/vendor/github.com/gogo/protobuf/types/empty.pb.go new file mode 100644 index 0000000000..e7018b905d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/empty.pb.go @@ -0,0 +1,432 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: empty.proto + +/* +Package types is a generated protocol buffer package. + +It is generated from these files: + empty.proto + +It has these top-level messages: + Empty +*/ +package types + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +type Empty struct { +} + +func (m *Empty) Reset() { *m = Empty{} } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptorEmpty, []int{0} } +func (*Empty) XXX_WellKnownType() string { return "Empty" } + +func init() { + proto.RegisterType((*Empty)(nil), "google.protobuf.Empty") +} +func (this *Empty) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Empty) + if !ok { + that2, ok := that.(Empty) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + return 0 +} +func (this *Empty) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Empty) + if !ok { + that2, ok := that.(Empty) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + return true +} +func (this *Empty) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&types.Empty{") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringEmpty(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Empty) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Empty) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func encodeVarintEmpty(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func NewPopulatedEmpty(r randyEmpty, easy bool) *Empty { + this := &Empty{} + if !easy && r.Intn(10) != 0 { + } + return this +} + +type randyEmpty interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneEmpty(r randyEmpty) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringEmpty(r randyEmpty) string { + v1 := r.Intn(100) + tmps := make([]rune, v1) + for i := 0; i < v1; i++ { + tmps[i] = randUTF8RuneEmpty(r) + } + return string(tmps) +} +func randUnrecognizedEmpty(r randyEmpty, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldEmpty(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldEmpty(dAtA []byte, r randyEmpty, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key)) + v2 := r.Int63() + if r.Intn(2) == 0 { + v2 *= -1 + } + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(v2)) + case 1: + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateEmpty(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Empty) Size() (n int) { + var l int + _ = l + return n +} + +func sovEmpty(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozEmpty(x uint64) (n int) { + return sovEmpty(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Empty) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Empty{`, + `}`, + }, "") + return s +} +func valueToStringEmpty(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Empty) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEmpty + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Empty: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Empty: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipEmpty(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEmpty + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEmpty(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEmpty + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEmpty + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEmpty + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthEmpty + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEmpty + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipEmpty(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthEmpty = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEmpty = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("empty.proto", fileDescriptorEmpty) } + +var fileDescriptorEmpty = []byte{ + // 169 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4e, 0xcd, 0x2d, 0x28, + 0xa9, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4f, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0x85, + 0xf0, 0x92, 0x4a, 0xd3, 0x94, 0xd8, 0xb9, 0x58, 0x5d, 0x41, 0xf2, 0x4e, 0x2d, 0x8c, 0x17, 0x1e, + 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, 0xf0, 0xe1, 0xa1, 0x1c, 0xe3, 0x8f, 0x87, 0x72, 0x8c, 0x0d, + 0x8f, 0xe4, 0x18, 0x57, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, + 0x07, 0x8f, 0xe4, 0x18, 0x5f, 0x3c, 0x92, 0x63, 0xf8, 0x00, 0x12, 0x7f, 0x2c, 0xc7, 0xc8, 0x25, + 0x9c, 0x9c, 0x9f, 0xab, 0x87, 0x66, 0xa0, 0x13, 0x17, 0xd8, 0xb8, 0x00, 0x10, 0x37, 0x80, 0x31, + 0x8a, 0xb5, 0xa4, 0xb2, 0x20, 0xb5, 0xf8, 0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, + 0x55, 0x4c, 0x72, 0xee, 0x10, 0xf5, 0x01, 0x50, 0xf5, 0x7a, 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, + 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x95, 0x49, 0x6c, 0x60, 0x83, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, + 0xff, 0x7c, 0xa8, 0xf0, 0xc4, 0xb6, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/types/field_mask.pb.go b/vendor/github.com/gogo/protobuf/types/field_mask.pb.go new file mode 100644 index 0000000000..22e8b4f0db --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/field_mask.pb.go @@ -0,0 +1,713 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: field_mask.proto + +/* +Package types is a generated protocol buffer package. + +It is generated from these files: + field_mask.proto + +It has these top-level messages: + FieldMask +*/ +package types + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// `FieldMask` represents a set of symbolic field paths, for example: +// +// paths: "f.a" +// paths: "f.b.d" +// +// Here `f` represents a field in some root message, `a` and `b` +// fields in the message found in `f`, and `d` a field found in the +// message in `f.b`. +// +// Field masks are used to specify a subset of fields that should be +// returned by a get operation or modified by an update operation. +// Field masks also have a custom JSON encoding (see below). +// +// # Field Masks in Projections +// +// When used in the context of a projection, a response message or +// sub-message is filtered by the API to only contain those fields as +// specified in the mask. For example, if the mask in the previous +// example is applied to a response message as follows: +// +// f { +// a : 22 +// b { +// d : 1 +// x : 2 +// } +// y : 13 +// } +// z: 8 +// +// The result will not contain specific values for fields x,y and z +// (their value will be set to the default, and omitted in proto text +// output): +// +// +// f { +// a : 22 +// b { +// d : 1 +// } +// } +// +// A repeated field is not allowed except at the last position of a +// paths string. +// +// If a FieldMask object is not present in a get operation, the +// operation applies to all fields (as if a FieldMask of all fields +// had been specified). +// +// Note that a field mask does not necessarily apply to the +// top-level response message. In case of a REST get operation, the +// field mask applies directly to the response, but in case of a REST +// list operation, the mask instead applies to each individual message +// in the returned resource list. In case of a REST custom method, +// other definitions may be used. Where the mask applies will be +// clearly documented together with its declaration in the API. In +// any case, the effect on the returned resource/resources is required +// behavior for APIs. +// +// # Field Masks in Update Operations +// +// A field mask in update operations specifies which fields of the +// targeted resource are going to be updated. The API is required +// to only change the values of the fields as specified in the mask +// and leave the others untouched. If a resource is passed in to +// describe the updated values, the API ignores the values of all +// fields not covered by the mask. +// +// If a repeated field is specified for an update operation, the existing +// repeated values in the target resource will be overwritten by the new values. +// Note that a repeated field is only allowed in the last position of a `paths` +// string. +// +// If a sub-message is specified in the last position of the field mask for an +// update operation, then the existing sub-message in the target resource is +// overwritten. Given the target message: +// +// f { +// b { +// d : 1 +// x : 2 +// } +// c : 1 +// } +// +// And an update message: +// +// f { +// b { +// d : 10 +// } +// } +// +// then if the field mask is: +// +// paths: "f.b" +// +// then the result will be: +// +// f { +// b { +// d : 10 +// } +// c : 1 +// } +// +// However, if the update mask was: +// +// paths: "f.b.d" +// +// then the result would be: +// +// f { +// b { +// d : 10 +// x : 2 +// } +// c : 1 +// } +// +// In order to reset a field's value to the default, the field must +// be in the mask and set to the default value in the provided resource. +// Hence, in order to reset all fields of a resource, provide a default +// instance of the resource and set all fields in the mask, or do +// not provide a mask as described below. +// +// If a field mask is not present on update, the operation applies to +// all fields (as if a field mask of all fields has been specified). +// Note that in the presence of schema evolution, this may mean that +// fields the client does not know and has therefore not filled into +// the request will be reset to their default. If this is unwanted +// behavior, a specific service may require a client to always specify +// a field mask, producing an error if not. +// +// As with get operations, the location of the resource which +// describes the updated values in the request message depends on the +// operation kind. In any case, the effect of the field mask is +// required to be honored by the API. +// +// ## Considerations for HTTP REST +// +// The HTTP kind of an update operation which uses a field mask must +// be set to PATCH instead of PUT in order to satisfy HTTP semantics +// (PUT must only be used for full updates). +// +// # JSON Encoding of Field Masks +// +// In JSON, a field mask is encoded as a single string where paths are +// separated by a comma. Fields name in each path are converted +// to/from lower-camel naming conventions. +// +// As an example, consider the following message declarations: +// +// message Profile { +// User user = 1; +// Photo photo = 2; +// } +// message User { +// string display_name = 1; +// string address = 2; +// } +// +// In proto a field mask for `Profile` may look as such: +// +// mask { +// paths: "user.display_name" +// paths: "photo" +// } +// +// In JSON, the same mask is represented as below: +// +// { +// mask: "user.displayName,photo" +// } +// +// # Field Masks and Oneof Fields +// +// Field masks treat fields in oneofs just as regular fields. Consider the +// following message: +// +// message SampleMessage { +// oneof test_oneof { +// string name = 4; +// SubMessage sub_message = 9; +// } +// } +// +// The field mask can be: +// +// mask { +// paths: "name" +// } +// +// Or: +// +// mask { +// paths: "sub_message" +// } +// +// Note that oneof type names ("test_oneof" in this case) cannot be used in +// paths. +type FieldMask struct { + // The set of field mask paths. + Paths []string `protobuf:"bytes,1,rep,name=paths" json:"paths,omitempty"` +} + +func (m *FieldMask) Reset() { *m = FieldMask{} } +func (*FieldMask) ProtoMessage() {} +func (*FieldMask) Descriptor() ([]byte, []int) { return fileDescriptorFieldMask, []int{0} } + +func (m *FieldMask) GetPaths() []string { + if m != nil { + return m.Paths + } + return nil +} + +func init() { + proto.RegisterType((*FieldMask)(nil), "google.protobuf.FieldMask") +} +func (this *FieldMask) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*FieldMask) + if !ok { + that2, ok := that.(FieldMask) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if len(this.Paths) != len(that1.Paths) { + if len(this.Paths) < len(that1.Paths) { + return -1 + } + return 1 + } + for i := range this.Paths { + if this.Paths[i] != that1.Paths[i] { + if this.Paths[i] < that1.Paths[i] { + return -1 + } + return 1 + } + } + return 0 +} +func (this *FieldMask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*FieldMask) + if !ok { + that2, ok := that.(FieldMask) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Paths) != len(that1.Paths) { + return false + } + for i := range this.Paths { + if this.Paths[i] != that1.Paths[i] { + return false + } + } + return true +} +func (this *FieldMask) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.FieldMask{") + s = append(s, "Paths: "+fmt.Sprintf("%#v", this.Paths)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringFieldMask(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *FieldMask) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FieldMask) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Paths) > 0 { + for _, s := range m.Paths { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func encodeVarintFieldMask(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func NewPopulatedFieldMask(r randyFieldMask, easy bool) *FieldMask { + this := &FieldMask{} + v1 := r.Intn(10) + this.Paths = make([]string, v1) + for i := 0; i < v1; i++ { + this.Paths[i] = string(randStringFieldMask(r)) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +type randyFieldMask interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneFieldMask(r randyFieldMask) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringFieldMask(r randyFieldMask) string { + v2 := r.Intn(100) + tmps := make([]rune, v2) + for i := 0; i < v2; i++ { + tmps[i] = randUTF8RuneFieldMask(r) + } + return string(tmps) +} +func randUnrecognizedFieldMask(r randyFieldMask, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldFieldMask(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldFieldMask(dAtA []byte, r randyFieldMask, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key)) + v3 := r.Int63() + if r.Intn(2) == 0 { + v3 *= -1 + } + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(v3)) + case 1: + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateFieldMask(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *FieldMask) Size() (n int) { + var l int + _ = l + if len(m.Paths) > 0 { + for _, s := range m.Paths { + l = len(s) + n += 1 + l + sovFieldMask(uint64(l)) + } + } + return n +} + +func sovFieldMask(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozFieldMask(x uint64) (n int) { + return sovFieldMask(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *FieldMask) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FieldMask{`, + `Paths:` + fmt.Sprintf("%v", this.Paths) + `,`, + `}`, + }, "") + return s +} +func valueToStringFieldMask(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *FieldMask) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFieldMask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FieldMask: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FieldMask: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFieldMask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthFieldMask + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Paths = append(m.Paths, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipFieldMask(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthFieldMask + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipFieldMask(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFieldMask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFieldMask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFieldMask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthFieldMask + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFieldMask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipFieldMask(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthFieldMask = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowFieldMask = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("field_mask.proto", fileDescriptorFieldMask) } + +var fileDescriptorFieldMask = []byte{ + // 193 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x48, 0xcb, 0x4c, 0xcd, + 0x49, 0x89, 0xcf, 0x4d, 0x2c, 0xce, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4f, 0xcf, + 0xcf, 0x4f, 0xcf, 0x49, 0x85, 0xf0, 0x92, 0x4a, 0xd3, 0x94, 0x14, 0xb9, 0x38, 0xdd, 0x40, 0x8a, + 0x7c, 0x13, 0x8b, 0xb3, 0x85, 0x44, 0xb8, 0x58, 0x0b, 0x12, 0x4b, 0x32, 0x8a, 0x25, 0x18, 0x15, + 0x98, 0x35, 0x38, 0x83, 0x20, 0x1c, 0xa7, 0x56, 0xc6, 0x0b, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, 0x94, + 0x63, 0xf8, 0xf0, 0x50, 0x8e, 0xf1, 0xc7, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x2b, 0x1e, + 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, + 0x1e, 0xc9, 0x31, 0x7c, 0x00, 0x89, 0x3f, 0x96, 0x63, 0xe4, 0x12, 0x4e, 0xce, 0xcf, 0xd5, 0x43, + 0xb3, 0xca, 0x89, 0x0f, 0x6e, 0x51, 0x00, 0x48, 0x28, 0x80, 0x31, 0x8a, 0xb5, 0xa4, 0xb2, 0x20, + 0xb5, 0x78, 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0x86, 0x00, 0xa8, + 0x06, 0xbd, 0xf0, 0xd4, 0x9c, 0x1c, 0xef, 0xbc, 0xfc, 0xf2, 0xbc, 0x10, 0x90, 0xb2, 0x24, 0x36, + 0xb0, 0x49, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x51, 0x31, 0x89, 0xb5, 0xd6, 0x00, 0x00, + 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/types/struct.pb.go b/vendor/github.com/gogo/protobuf/types/struct.pb.go new file mode 100644 index 0000000000..7d5372b031 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/struct.pb.go @@ -0,0 +1,1813 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: struct.proto + +/* + Package types is a generated protocol buffer package. + + It is generated from these files: + struct.proto + + It has these top-level messages: + Struct + Value + ListValue +*/ +package types + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strconv "strconv" + +import strings "strings" +import reflect "reflect" +import sortkeys "github.com/gogo/protobuf/sortkeys" + +import binary "encoding/binary" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +type NullValue int32 + +const ( + // Null value. + NULL_VALUE NullValue = 0 +) + +var NullValue_name = map[int32]string{ + 0: "NULL_VALUE", +} +var NullValue_value = map[string]int32{ + "NULL_VALUE": 0, +} + +func (NullValue) EnumDescriptor() ([]byte, []int) { return fileDescriptorStruct, []int{0} } +func (NullValue) XXX_WellKnownType() string { return "NullValue" } + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +type Struct struct { + // Unordered map of dynamically typed values. + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Struct) Reset() { *m = Struct{} } +func (*Struct) ProtoMessage() {} +func (*Struct) Descriptor() ([]byte, []int) { return fileDescriptorStruct, []int{0} } +func (*Struct) XXX_WellKnownType() string { return "Struct" } + +func (m *Struct) GetFields() map[string]*Value { + if m != nil { + return m.Fields + } + return nil +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +type Value struct { + // The kind of value. + // + // Types that are valid to be assigned to Kind: + // *Value_NullValue + // *Value_NumberValue + // *Value_StringValue + // *Value_BoolValue + // *Value_StructValue + // *Value_ListValue + Kind isValue_Kind `protobuf_oneof:"kind"` +} + +func (m *Value) Reset() { *m = Value{} } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { return fileDescriptorStruct, []int{1} } +func (*Value) XXX_WellKnownType() string { return "Value" } + +type isValue_Kind interface { + isValue_Kind() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int +} + +type Value_NullValue struct { + NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` +} +type Value_NumberValue struct { + NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"` +} +type Value_StringValue struct { + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"` +} +type Value_BoolValue struct { + BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"` +} +type Value_StructValue struct { + StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,oneof"` +} +type Value_ListValue struct { + ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,oneof"` +} + +func (*Value_NullValue) isValue_Kind() {} +func (*Value_NumberValue) isValue_Kind() {} +func (*Value_StringValue) isValue_Kind() {} +func (*Value_BoolValue) isValue_Kind() {} +func (*Value_StructValue) isValue_Kind() {} +func (*Value_ListValue) isValue_Kind() {} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (m *Value) GetNullValue() NullValue { + if x, ok := m.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return NULL_VALUE +} + +func (m *Value) GetNumberValue() float64 { + if x, ok := m.GetKind().(*Value_NumberValue); ok { + return x.NumberValue + } + return 0 +} + +func (m *Value) GetStringValue() string { + if x, ok := m.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *Value) GetBoolValue() bool { + if x, ok := m.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *Value) GetStructValue() *Struct { + if x, ok := m.GetKind().(*Value_StructValue); ok { + return x.StructValue + } + return nil +} + +func (m *Value) GetListValue() *ListValue { + if x, ok := m.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Value_OneofMarshaler, _Value_OneofUnmarshaler, _Value_OneofSizer, []interface{}{ + (*Value_NullValue)(nil), + (*Value_NumberValue)(nil), + (*Value_StringValue)(nil), + (*Value_BoolValue)(nil), + (*Value_StructValue)(nil), + (*Value_ListValue)(nil), + } +} + +func _Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Value) + // kind + switch x := m.Kind.(type) { + case *Value_NullValue: + _ = b.EncodeVarint(1<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.NullValue)) + case *Value_NumberValue: + _ = b.EncodeVarint(2<<3 | proto.WireFixed64) + _ = b.EncodeFixed64(math.Float64bits(x.NumberValue)) + case *Value_StringValue: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.StringValue) + case *Value_BoolValue: + t := uint64(0) + if x.BoolValue { + t = 1 + } + _ = b.EncodeVarint(4<<3 | proto.WireVarint) + _ = b.EncodeVarint(t) + case *Value_StructValue: + _ = b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StructValue); err != nil { + return err + } + case *Value_ListValue: + _ = b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ListValue); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Value.Kind has unexpected type %T", x) + } + return nil +} + +func _Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Value) + switch tag { + case 1: // kind.null_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Kind = &Value_NullValue{NullValue(x)} + return true, err + case 2: // kind.number_value + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Kind = &Value_NumberValue{math.Float64frombits(x)} + return true, err + case 3: // kind.string_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Kind = &Value_StringValue{x} + return true, err + case 4: // kind.bool_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Kind = &Value_BoolValue{x != 0} + return true, err + case 5: // kind.struct_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Struct) + err := b.DecodeMessage(msg) + m.Kind = &Value_StructValue{msg} + return true, err + case 6: // kind.list_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ListValue) + err := b.DecodeMessage(msg) + m.Kind = &Value_ListValue{msg} + return true, err + default: + return false, nil + } +} + +func _Value_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Value) + // kind + switch x := m.Kind.(type) { + case *Value_NullValue: + n += proto.SizeVarint(1<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.NullValue)) + case *Value_NumberValue: + n += proto.SizeVarint(2<<3 | proto.WireFixed64) + n += 8 + case *Value_StringValue: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.StringValue))) + n += len(x.StringValue) + case *Value_BoolValue: + n += proto.SizeVarint(4<<3 | proto.WireVarint) + n += 1 + case *Value_StructValue: + s := proto.Size(x.StructValue) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Value_ListValue: + s := proto.Size(x.ListValue) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +type ListValue struct { + // Repeated field of dynamically typed values. + Values []*Value `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"` +} + +func (m *ListValue) Reset() { *m = ListValue{} } +func (*ListValue) ProtoMessage() {} +func (*ListValue) Descriptor() ([]byte, []int) { return fileDescriptorStruct, []int{2} } +func (*ListValue) XXX_WellKnownType() string { return "ListValue" } + +func (m *ListValue) GetValues() []*Value { + if m != nil { + return m.Values + } + return nil +} + +func init() { + proto.RegisterType((*Struct)(nil), "google.protobuf.Struct") + proto.RegisterType((*Value)(nil), "google.protobuf.Value") + proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue") + proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value) +} +func (x NullValue) String() string { + s, ok := NullValue_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *Struct) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Struct) + if !ok { + that2, ok := that.(Struct) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Fields) != len(that1.Fields) { + return false + } + for i := range this.Fields { + if !this.Fields[i].Equal(that1.Fields[i]) { + return false + } + } + return true +} +func (this *Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value) + if !ok { + that2, ok := that.(Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if that1.Kind == nil { + if this.Kind != nil { + return false + } + } else if this.Kind == nil { + return false + } else if !this.Kind.Equal(that1.Kind) { + return false + } + return true +} +func (this *Value_NullValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_NullValue) + if !ok { + that2, ok := that.(Value_NullValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.NullValue != that1.NullValue { + return false + } + return true +} +func (this *Value_NumberValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_NumberValue) + if !ok { + that2, ok := that.(Value_NumberValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.NumberValue != that1.NumberValue { + return false + } + return true +} +func (this *Value_StringValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_StringValue) + if !ok { + that2, ok := that.(Value_StringValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.StringValue != that1.StringValue { + return false + } + return true +} +func (this *Value_BoolValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_BoolValue) + if !ok { + that2, ok := that.(Value_BoolValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.BoolValue != that1.BoolValue { + return false + } + return true +} +func (this *Value_StructValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_StructValue) + if !ok { + that2, ok := that.(Value_StructValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.StructValue.Equal(that1.StructValue) { + return false + } + return true +} +func (this *Value_ListValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_ListValue) + if !ok { + that2, ok := that.(Value_ListValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ListValue.Equal(that1.ListValue) { + return false + } + return true +} +func (this *ListValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ListValue) + if !ok { + that2, ok := that.(ListValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Values) != len(that1.Values) { + return false + } + for i := range this.Values { + if !this.Values[i].Equal(that1.Values[i]) { + return false + } + } + return true +} +func (this *Struct) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.Struct{") + keysForFields := make([]string, 0, len(this.Fields)) + for k := range this.Fields { + keysForFields = append(keysForFields, k) + } + sortkeys.Strings(keysForFields) + mapStringForFields := "map[string]*Value{" + for _, k := range keysForFields { + mapStringForFields += fmt.Sprintf("%#v: %#v,", k, this.Fields[k]) + } + mapStringForFields += "}" + if this.Fields != nil { + s = append(s, "Fields: "+mapStringForFields+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&types.Value{") + if this.Kind != nil { + s = append(s, "Kind: "+fmt.Sprintf("%#v", this.Kind)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Value_NullValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_NullValue{` + + `NullValue:` + fmt.Sprintf("%#v", this.NullValue) + `}`}, ", ") + return s +} +func (this *Value_NumberValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_NumberValue{` + + `NumberValue:` + fmt.Sprintf("%#v", this.NumberValue) + `}`}, ", ") + return s +} +func (this *Value_StringValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_StringValue{` + + `StringValue:` + fmt.Sprintf("%#v", this.StringValue) + `}`}, ", ") + return s +} +func (this *Value_BoolValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_BoolValue{` + + `BoolValue:` + fmt.Sprintf("%#v", this.BoolValue) + `}`}, ", ") + return s +} +func (this *Value_StructValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_StructValue{` + + `StructValue:` + fmt.Sprintf("%#v", this.StructValue) + `}`}, ", ") + return s +} +func (this *Value_ListValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_ListValue{` + + `ListValue:` + fmt.Sprintf("%#v", this.ListValue) + `}`}, ", ") + return s +} +func (this *ListValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.ListValue{") + if this.Values != nil { + s = append(s, "Values: "+fmt.Sprintf("%#v", this.Values)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringStruct(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Struct) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Struct) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Fields) > 0 { + for k := range m.Fields { + dAtA[i] = 0xa + i++ + v := m.Fields[k] + msgSize := 0 + if v != nil { + msgSize = v.Size() + msgSize += 1 + sovStruct(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovStruct(uint64(len(k))) + msgSize + i = encodeVarintStruct(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintStruct(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + if v != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintStruct(dAtA, i, uint64(v.Size())) + n1, err := v.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + } + } + return i, nil +} + +func (m *Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Value) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Kind != nil { + nn2, err := m.Kind.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn2 + } + return i, nil +} + +func (m *Value_NullValue) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x8 + i++ + i = encodeVarintStruct(dAtA, i, uint64(m.NullValue)) + return i, nil +} +func (m *Value_NumberValue) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x11 + i++ + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.NumberValue)))) + i += 8 + return i, nil +} +func (m *Value_StringValue) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x1a + i++ + i = encodeVarintStruct(dAtA, i, uint64(len(m.StringValue))) + i += copy(dAtA[i:], m.StringValue) + return i, nil +} +func (m *Value_BoolValue) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x20 + i++ + if m.BoolValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} +func (m *Value_StructValue) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.StructValue != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintStruct(dAtA, i, uint64(m.StructValue.Size())) + n3, err := m.StructValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + return i, nil +} +func (m *Value_ListValue) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.ListValue != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintStruct(dAtA, i, uint64(m.ListValue.Size())) + n4, err := m.ListValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + return i, nil +} +func (m *ListValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListValue) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Values) > 0 { + for _, msg := range m.Values { + dAtA[i] = 0xa + i++ + i = encodeVarintStruct(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func encodeVarintStruct(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func NewPopulatedStruct(r randyStruct, easy bool) *Struct { + this := &Struct{} + if r.Intn(10) == 0 { + v1 := r.Intn(10) + this.Fields = make(map[string]*Value) + for i := 0; i < v1; i++ { + this.Fields[randStringStruct(r)] = NewPopulatedValue(r, easy) + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedValue(r randyStruct, easy bool) *Value { + this := &Value{} + oneofNumber_Kind := []int32{1, 2, 3, 4, 5, 6}[r.Intn(6)] + switch oneofNumber_Kind { + case 1: + this.Kind = NewPopulatedValue_NullValue(r, easy) + case 2: + this.Kind = NewPopulatedValue_NumberValue(r, easy) + case 3: + this.Kind = NewPopulatedValue_StringValue(r, easy) + case 4: + this.Kind = NewPopulatedValue_BoolValue(r, easy) + case 5: + this.Kind = NewPopulatedValue_StructValue(r, easy) + case 6: + this.Kind = NewPopulatedValue_ListValue(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedValue_NullValue(r randyStruct, easy bool) *Value_NullValue { + this := &Value_NullValue{} + this.NullValue = NullValue([]int32{0}[r.Intn(1)]) + return this +} +func NewPopulatedValue_NumberValue(r randyStruct, easy bool) *Value_NumberValue { + this := &Value_NumberValue{} + this.NumberValue = float64(r.Float64()) + if r.Intn(2) == 0 { + this.NumberValue *= -1 + } + return this +} +func NewPopulatedValue_StringValue(r randyStruct, easy bool) *Value_StringValue { + this := &Value_StringValue{} + this.StringValue = string(randStringStruct(r)) + return this +} +func NewPopulatedValue_BoolValue(r randyStruct, easy bool) *Value_BoolValue { + this := &Value_BoolValue{} + this.BoolValue = bool(bool(r.Intn(2) == 0)) + return this +} +func NewPopulatedValue_StructValue(r randyStruct, easy bool) *Value_StructValue { + this := &Value_StructValue{} + this.StructValue = NewPopulatedStruct(r, easy) + return this +} +func NewPopulatedValue_ListValue(r randyStruct, easy bool) *Value_ListValue { + this := &Value_ListValue{} + this.ListValue = NewPopulatedListValue(r, easy) + return this +} +func NewPopulatedListValue(r randyStruct, easy bool) *ListValue { + this := &ListValue{} + if r.Intn(10) == 0 { + v2 := r.Intn(5) + this.Values = make([]*Value, v2) + for i := 0; i < v2; i++ { + this.Values[i] = NewPopulatedValue(r, easy) + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +type randyStruct interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneStruct(r randyStruct) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringStruct(r randyStruct) string { + v3 := r.Intn(100) + tmps := make([]rune, v3) + for i := 0; i < v3; i++ { + tmps[i] = randUTF8RuneStruct(r) + } + return string(tmps) +} +func randUnrecognizedStruct(r randyStruct, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldStruct(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldStruct(dAtA []byte, r randyStruct, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateStruct(dAtA, uint64(key)) + v4 := r.Int63() + if r.Intn(2) == 0 { + v4 *= -1 + } + dAtA = encodeVarintPopulateStruct(dAtA, uint64(v4)) + case 1: + dAtA = encodeVarintPopulateStruct(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateStruct(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateStruct(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateStruct(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateStruct(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Struct) Size() (n int) { + var l int + _ = l + if len(m.Fields) > 0 { + for k, v := range m.Fields { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovStruct(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovStruct(uint64(len(k))) + l + n += mapEntrySize + 1 + sovStruct(uint64(mapEntrySize)) + } + } + return n +} + +func (m *Value) Size() (n int) { + var l int + _ = l + if m.Kind != nil { + n += m.Kind.Size() + } + return n +} + +func (m *Value_NullValue) Size() (n int) { + var l int + _ = l + n += 1 + sovStruct(uint64(m.NullValue)) + return n +} +func (m *Value_NumberValue) Size() (n int) { + var l int + _ = l + n += 9 + return n +} +func (m *Value_StringValue) Size() (n int) { + var l int + _ = l + l = len(m.StringValue) + n += 1 + l + sovStruct(uint64(l)) + return n +} +func (m *Value_BoolValue) Size() (n int) { + var l int + _ = l + n += 2 + return n +} +func (m *Value_StructValue) Size() (n int) { + var l int + _ = l + if m.StructValue != nil { + l = m.StructValue.Size() + n += 1 + l + sovStruct(uint64(l)) + } + return n +} +func (m *Value_ListValue) Size() (n int) { + var l int + _ = l + if m.ListValue != nil { + l = m.ListValue.Size() + n += 1 + l + sovStruct(uint64(l)) + } + return n +} +func (m *ListValue) Size() (n int) { + var l int + _ = l + if len(m.Values) > 0 { + for _, e := range m.Values { + l = e.Size() + n += 1 + l + sovStruct(uint64(l)) + } + } + return n +} + +func sovStruct(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozStruct(x uint64) (n int) { + return sovStruct(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Struct) String() string { + if this == nil { + return "nil" + } + keysForFields := make([]string, 0, len(this.Fields)) + for k := range this.Fields { + keysForFields = append(keysForFields, k) + } + sortkeys.Strings(keysForFields) + mapStringForFields := "map[string]*Value{" + for _, k := range keysForFields { + mapStringForFields += fmt.Sprintf("%v: %v,", k, this.Fields[k]) + } + mapStringForFields += "}" + s := strings.Join([]string{`&Struct{`, + `Fields:` + mapStringForFields + `,`, + `}`, + }, "") + return s +} +func (this *Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `}`, + }, "") + return s +} +func (this *Value_NullValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_NullValue{`, + `NullValue:` + fmt.Sprintf("%v", this.NullValue) + `,`, + `}`, + }, "") + return s +} +func (this *Value_NumberValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_NumberValue{`, + `NumberValue:` + fmt.Sprintf("%v", this.NumberValue) + `,`, + `}`, + }, "") + return s +} +func (this *Value_StringValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_StringValue{`, + `StringValue:` + fmt.Sprintf("%v", this.StringValue) + `,`, + `}`, + }, "") + return s +} +func (this *Value_BoolValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_BoolValue{`, + `BoolValue:` + fmt.Sprintf("%v", this.BoolValue) + `,`, + `}`, + }, "") + return s +} +func (this *Value_StructValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_StructValue{`, + `StructValue:` + strings.Replace(fmt.Sprintf("%v", this.StructValue), "Struct", "Struct", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Value_ListValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_ListValue{`, + `ListValue:` + strings.Replace(fmt.Sprintf("%v", this.ListValue), "ListValue", "ListValue", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListValue{`, + `Values:` + strings.Replace(fmt.Sprintf("%v", this.Values), "Value", "Value", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringStruct(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Struct) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Struct: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Struct: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Fields == nil { + m.Fields = make(map[string]*Value) + } + var mapkey string + var mapvalue *Value + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthStruct + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthStruct + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthStruct + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Value{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipStruct(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Fields[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStruct(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NullValue", wireType) + } + var v NullValue + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (NullValue(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Kind = &Value_NullValue{v} + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberValue", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Kind = &Value_NumberValue{float64(math.Float64frombits(v))} + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = &Value_StringValue{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Kind = &Value_BoolValue{b} + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StructValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Struct{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &Value_StructValue{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ListValue{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &Value_ListValue{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStruct(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, &Value{}) + if err := m.Values[len(m.Values)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStruct(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipStruct(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStruct + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStruct + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStruct + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthStruct + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStruct + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipStruct(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthStruct = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowStruct = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("struct.proto", fileDescriptorStruct) } + +var fileDescriptorStruct = []byte{ + // 432 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xb1, 0x6f, 0xd3, 0x40, + 0x14, 0xc6, 0xfd, 0x9c, 0xc6, 0x22, 0xcf, 0x55, 0xa9, 0x0e, 0x09, 0xa2, 0x22, 0x1d, 0x51, 0xba, + 0x58, 0x08, 0x79, 0x08, 0x0b, 0x22, 0x2c, 0x58, 0x2a, 0xad, 0x84, 0x55, 0x19, 0x43, 0x8b, 0xc4, + 0x12, 0xe1, 0xd4, 0x8d, 0xac, 0x5e, 0xef, 0x2a, 0xfb, 0x0c, 0xca, 0x06, 0xff, 0x05, 0x33, 0x13, + 0x62, 0xe4, 0xaf, 0x60, 0xec, 0xc8, 0x88, 0x3d, 0x31, 0x76, 0xec, 0x88, 0xee, 0xce, 0x36, 0xa8, + 0x51, 0x36, 0xbf, 0xcf, 0xbf, 0xf7, 0xbd, 0xf7, 0xbd, 0xc3, 0xcd, 0x42, 0xe6, 0xe5, 0x5c, 0xfa, + 0x17, 0xb9, 0x90, 0x82, 0xdc, 0x5e, 0x08, 0xb1, 0x60, 0xa9, 0xa9, 0x92, 0xf2, 0x74, 0xfc, 0x05, + 0xd0, 0x79, 0xad, 0x09, 0x32, 0x45, 0xe7, 0x34, 0x4b, 0xd9, 0x49, 0x31, 0x84, 0x51, 0xcf, 0x73, + 0x27, 0xbb, 0xfe, 0x0d, 0xd8, 0x37, 0xa0, 0xff, 0x42, 0x53, 0x7b, 0x5c, 0xe6, 0xcb, 0xb8, 0x69, + 0xd9, 0x79, 0x85, 0xee, 0x7f, 0x32, 0xd9, 0xc6, 0xde, 0x59, 0xba, 0x1c, 0xc2, 0x08, 0xbc, 0x41, + 0xac, 0x3e, 0xc9, 0x23, 0xec, 0x7f, 0x78, 0xcf, 0xca, 0x74, 0x68, 0x8f, 0xc0, 0x73, 0x27, 0x77, + 0x57, 0xcc, 0x8f, 0xd5, 0xdf, 0xd8, 0x40, 0x4f, 0xed, 0x27, 0x30, 0xfe, 0x61, 0x63, 0x5f, 0x8b, + 0x64, 0x8a, 0xc8, 0x4b, 0xc6, 0x66, 0xc6, 0x40, 0x99, 0x6e, 0x4d, 0x76, 0x56, 0x0c, 0x0e, 0x4b, + 0xc6, 0x34, 0x7f, 0x60, 0xc5, 0x03, 0xde, 0x16, 0x64, 0x17, 0x37, 0x79, 0x79, 0x9e, 0xa4, 0xf9, + 0xec, 0xdf, 0x7c, 0x38, 0xb0, 0x62, 0xd7, 0xa8, 0x1d, 0x54, 0xc8, 0x3c, 0xe3, 0x8b, 0x06, 0xea, + 0xa9, 0xc5, 0x15, 0x64, 0x54, 0x03, 0x3d, 0x40, 0x4c, 0x84, 0x68, 0xd7, 0xd8, 0x18, 0x81, 0x77, + 0x4b, 0x8d, 0x52, 0x9a, 0x01, 0x9e, 0xb5, 0xd7, 0x6e, 0x90, 0xbe, 0x8e, 0x7a, 0x6f, 0xcd, 0x1d, + 0x1b, 0xfb, 0x72, 0x2e, 0xbb, 0x94, 0x2c, 0x2b, 0xda, 0x5e, 0x47, 0xf7, 0xae, 0xa6, 0x0c, 0xb3, + 0x42, 0x76, 0x29, 0x59, 0x5b, 0x04, 0x0e, 0x6e, 0x9c, 0x65, 0xfc, 0x64, 0x3c, 0xc5, 0x41, 0x47, + 0x10, 0x1f, 0x1d, 0x6d, 0xd6, 0xbe, 0xe8, 0xba, 0xa3, 0x37, 0xd4, 0xc3, 0xfb, 0x38, 0xe8, 0x8e, + 0x48, 0xb6, 0x10, 0x0f, 0x8f, 0xc2, 0x70, 0x76, 0xfc, 0x3c, 0x3c, 0xda, 0xdb, 0xb6, 0x82, 0xcf, + 0x70, 0x59, 0x51, 0xeb, 0x57, 0x45, 0xad, 0xab, 0x8a, 0xc2, 0x75, 0x45, 0xe1, 0x53, 0x4d, 0xe1, + 0x5b, 0x4d, 0xe1, 0x67, 0x4d, 0xe1, 0xb2, 0xa6, 0xf0, 0xbb, 0xa6, 0xf0, 0xa7, 0xa6, 0xd6, 0x55, + 0x4d, 0x01, 0xef, 0xcc, 0xc5, 0xf9, 0xcd, 0x71, 0x81, 0x6b, 0x92, 0x47, 0xaa, 0x8e, 0xe0, 0x5d, + 0x5f, 0x2e, 0x2f, 0xd2, 0xe2, 0x1a, 0xe0, 0xab, 0xdd, 0xdb, 0x8f, 0x82, 0xef, 0x36, 0xdd, 0x37, + 0x0d, 0x51, 0xbb, 0xdf, 0xdb, 0x94, 0xb1, 0x97, 0x5c, 0x7c, 0xe4, 0x6f, 0x14, 0x99, 0x38, 0xda, + 0xe9, 0xf1, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x75, 0xc5, 0x1c, 0x3b, 0xd5, 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/types/timestamp.go b/vendor/github.com/gogo/protobuf/types/timestamp.go new file mode 100644 index 0000000000..7ae54d8b3f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/timestamp.go @@ -0,0 +1,132 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func TimestampFromProto(ts *Timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampNow returns a google.protobuf.Timestamp for the current time. +func TimestampNow() *Timestamp { + ts, err := TimestampProto(time.Now()) + if err != nil { + panic("ptypes: time.Now() out of Timestamp range") + } + return ts +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func TimestampProto(t time.Time) (*Timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := &Timestamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} + +// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid +// Timestamps, it returns an error message in parentheses. +func TimestampString(ts *Timestamp) string { + t, err := TimestampFromProto(ts) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return t.Format(time.RFC3339Nano) +} diff --git a/vendor/github.com/gogo/protobuf/types/timestamp.pb.go b/vendor/github.com/gogo/protobuf/types/timestamp.pb.go new file mode 100644 index 0000000000..41b18f941f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/timestamp.pb.go @@ -0,0 +1,504 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: timestamp.proto + +/* + Package types is a generated protocol buffer package. + + It is generated from these files: + timestamp.proto + + It has these top-level messages: + Timestamp +*/ +package types + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required, though only UTC (as indicated by "Z") is presently supported. +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()) +// to obtain a formatter capable of generating timestamps in this format. +// +// +type Timestamp struct { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorTimestamp, []int{0} } +func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } + +func (m *Timestamp) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Timestamp) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func init() { + proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") +} +func (this *Timestamp) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Timestamp) + if !ok { + that2, ok := that.(Timestamp) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Seconds != that1.Seconds { + if this.Seconds < that1.Seconds { + return -1 + } + return 1 + } + if this.Nanos != that1.Nanos { + if this.Nanos < that1.Nanos { + return -1 + } + return 1 + } + return 0 +} +func (this *Timestamp) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Timestamp) + if !ok { + that2, ok := that.(Timestamp) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Seconds != that1.Seconds { + return false + } + if this.Nanos != that1.Nanos { + return false + } + return true +} +func (this *Timestamp) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Timestamp{") + s = append(s, "Seconds: "+fmt.Sprintf("%#v", this.Seconds)+",\n") + s = append(s, "Nanos: "+fmt.Sprintf("%#v", this.Nanos)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringTimestamp(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Timestamp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Timestamp) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Seconds != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTimestamp(dAtA, i, uint64(m.Seconds)) + } + if m.Nanos != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTimestamp(dAtA, i, uint64(m.Nanos)) + } + return i, nil +} + +func encodeVarintTimestamp(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Timestamp) Size() (n int) { + var l int + _ = l + if m.Seconds != 0 { + n += 1 + sovTimestamp(uint64(m.Seconds)) + } + if m.Nanos != 0 { + n += 1 + sovTimestamp(uint64(m.Nanos)) + } + return n +} + +func sovTimestamp(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozTimestamp(x uint64) (n int) { + return sovTimestamp(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Timestamp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTimestamp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Timestamp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTimestamp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTimestamp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanos |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTimestamp(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTimestamp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTimestamp(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTimestamp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTimestamp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTimestamp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthTimestamp + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTimestamp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipTimestamp(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthTimestamp = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTimestamp = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("timestamp.proto", fileDescriptorTimestamp) } + +var fileDescriptorTimestamp = []byte{ + // 205 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2f, 0xc9, 0xcc, 0x4d, + 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4f, 0xcf, 0xcf, + 0x4f, 0xcf, 0x49, 0x85, 0xf0, 0x92, 0x4a, 0xd3, 0x94, 0xac, 0xb9, 0x38, 0x43, 0x60, 0x6a, 0x84, + 0x24, 0xb8, 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, + 0x83, 0x60, 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, + 0x0d, 0xd6, 0x20, 0x08, 0xc7, 0xa9, 0x81, 0xf1, 0xc2, 0x43, 0x39, 0x86, 0x1b, 0x0f, 0xe5, 0x18, + 0x3e, 0x3c, 0x94, 0x63, 0x5c, 0xf1, 0x48, 0x8e, 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, + 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x7c, 0xf1, 0x48, 0x8e, 0xe1, 0xc3, 0x23, 0x39, 0xc6, 0x15, 0x8f, + 0xe5, 0x18, 0xb9, 0x84, 0x93, 0xf3, 0x73, 0xf5, 0xd0, 0x2c, 0x77, 0xe2, 0x83, 0x5b, 0x1d, 0x00, + 0x12, 0x0a, 0x60, 0x8c, 0x62, 0x2d, 0xa9, 0x2c, 0x48, 0x2d, 0xfe, 0xc1, 0xc8, 0xb8, 0x88, 0x89, + 0xd9, 0x3d, 0xc0, 0x69, 0x15, 0x93, 0x9c, 0x3b, 0x44, 0x4f, 0x00, 0x54, 0x8f, 0x5e, 0x78, 0x6a, + 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x65, 0x12, 0x1b, 0xd8, 0x30, 0x63, 0x40, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x9b, 0xa2, 0x42, 0xda, 0xea, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/types/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/types/timestamp_gogo.go new file mode 100644 index 0000000000..e03fa13158 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/timestamp_gogo.go @@ -0,0 +1,94 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +import ( + "time" +) + +func NewPopulatedTimestamp(r interface { + Int63() int64 +}, easy bool) *Timestamp { + this := &Timestamp{} + ns := int64(r.Int63()) + this.Seconds = ns / 1e9 + this.Nanos = int32(ns % 1e9) + return this +} + +func (ts *Timestamp) String() string { + return TimestampString(ts) +} + +func NewPopulatedStdTime(r interface { + Int63() int64 +}, easy bool) *time.Time { + timestamp := NewPopulatedTimestamp(r, easy) + t, err := TimestampFromProto(timestamp) + if err != nil { + return nil + } + return &t +} + +func SizeOfStdTime(t time.Time) int { + ts, err := TimestampProto(t) + if err != nil { + return 0 + } + return ts.Size() +} + +func StdTimeMarshal(t time.Time) ([]byte, error) { + size := SizeOfStdTime(t) + buf := make([]byte, size) + _, err := StdTimeMarshalTo(t, buf) + return buf, err +} + +func StdTimeMarshalTo(t time.Time, data []byte) (int, error) { + ts, err := TimestampProto(t) + if err != nil { + return 0, err + } + return ts.MarshalTo(data) +} + +func StdTimeUnmarshal(t *time.Time, data []byte) error { + ts := &Timestamp{} + if err := ts.Unmarshal(data); err != nil { + return err + } + tt, err := TimestampFromProto(ts) + if err != nil { + return err + } + *t = tt + return nil +} diff --git a/vendor/github.com/gogo/protobuf/types/wrappers.pb.go b/vendor/github.com/gogo/protobuf/types/wrappers.pb.go new file mode 100644 index 0000000000..18b384ea35 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/wrappers.pb.go @@ -0,0 +1,2180 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: wrappers.proto + +/* +Package types is a generated protocol buffer package. + +It is generated from these files: + wrappers.proto + +It has these top-level messages: + DoubleValue + FloatValue + Int64Value + UInt64Value + Int32Value + UInt32Value + BoolValue + StringValue + BytesValue +*/ +package types + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import bytes "bytes" + +import strings "strings" +import reflect "reflect" + +import binary "encoding/binary" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +type DoubleValue struct { + // The double value. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *DoubleValue) Reset() { *m = DoubleValue{} } +func (*DoubleValue) ProtoMessage() {} +func (*DoubleValue) Descriptor() ([]byte, []int) { return fileDescriptorWrappers, []int{0} } +func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" } + +func (m *DoubleValue) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +type FloatValue struct { + // The float value. + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *FloatValue) Reset() { *m = FloatValue{} } +func (*FloatValue) ProtoMessage() {} +func (*FloatValue) Descriptor() ([]byte, []int) { return fileDescriptorWrappers, []int{1} } +func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" } + +func (m *FloatValue) GetValue() float32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +type Int64Value struct { + // The int64 value. + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *Int64Value) Reset() { *m = Int64Value{} } +func (*Int64Value) ProtoMessage() {} +func (*Int64Value) Descriptor() ([]byte, []int) { return fileDescriptorWrappers, []int{2} } +func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" } + +func (m *Int64Value) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +type UInt64Value struct { + // The uint64 value. + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *UInt64Value) Reset() { *m = UInt64Value{} } +func (*UInt64Value) ProtoMessage() {} +func (*UInt64Value) Descriptor() ([]byte, []int) { return fileDescriptorWrappers, []int{3} } +func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" } + +func (m *UInt64Value) GetValue() uint64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +type Int32Value struct { + // The int32 value. + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *Int32Value) Reset() { *m = Int32Value{} } +func (*Int32Value) ProtoMessage() {} +func (*Int32Value) Descriptor() ([]byte, []int) { return fileDescriptorWrappers, []int{4} } +func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" } + +func (m *Int32Value) GetValue() int32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +type UInt32Value struct { + // The uint32 value. + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *UInt32Value) Reset() { *m = UInt32Value{} } +func (*UInt32Value) ProtoMessage() {} +func (*UInt32Value) Descriptor() ([]byte, []int) { return fileDescriptorWrappers, []int{5} } +func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" } + +func (m *UInt32Value) GetValue() uint32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +type BoolValue struct { + // The bool value. + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *BoolValue) Reset() { *m = BoolValue{} } +func (*BoolValue) ProtoMessage() {} +func (*BoolValue) Descriptor() ([]byte, []int) { return fileDescriptorWrappers, []int{6} } +func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" } + +func (m *BoolValue) GetValue() bool { + if m != nil { + return m.Value + } + return false +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +type StringValue struct { + // The string value. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *StringValue) Reset() { *m = StringValue{} } +func (*StringValue) ProtoMessage() {} +func (*StringValue) Descriptor() ([]byte, []int) { return fileDescriptorWrappers, []int{7} } +func (*StringValue) XXX_WellKnownType() string { return "StringValue" } + +func (m *StringValue) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +type BytesValue struct { + // The bytes value. + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *BytesValue) Reset() { *m = BytesValue{} } +func (*BytesValue) ProtoMessage() {} +func (*BytesValue) Descriptor() ([]byte, []int) { return fileDescriptorWrappers, []int{8} } +func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" } + +func (m *BytesValue) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue") + proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue") + proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value") + proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value") + proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value") + proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value") + proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue") + proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue") + proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue") +} +func (this *DoubleValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*DoubleValue) + if !ok { + that2, ok := that.(DoubleValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + return 0 +} +func (this *FloatValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*FloatValue) + if !ok { + that2, ok := that.(FloatValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + return 0 +} +func (this *Int64Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Int64Value) + if !ok { + that2, ok := that.(Int64Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + return 0 +} +func (this *UInt64Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*UInt64Value) + if !ok { + that2, ok := that.(UInt64Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + return 0 +} +func (this *Int32Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Int32Value) + if !ok { + that2, ok := that.(Int32Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + return 0 +} +func (this *UInt32Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*UInt32Value) + if !ok { + that2, ok := that.(UInt32Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + return 0 +} +func (this *BoolValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*BoolValue) + if !ok { + that2, ok := that.(BoolValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if !this.Value { + return -1 + } + return 1 + } + return 0 +} +func (this *StringValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*StringValue) + if !ok { + that2, ok := that.(StringValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + return 0 +} +func (this *BytesValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*BytesValue) + if !ok { + that2, ok := that.(BytesValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if c := bytes.Compare(this.Value, that1.Value); c != 0 { + return c + } + return 0 +} +func (this *DoubleValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DoubleValue) + if !ok { + that2, ok := that.(DoubleValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *FloatValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*FloatValue) + if !ok { + that2, ok := that.(FloatValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *Int64Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Int64Value) + if !ok { + that2, ok := that.(Int64Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *UInt64Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*UInt64Value) + if !ok { + that2, ok := that.(UInt64Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *Int32Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Int32Value) + if !ok { + that2, ok := that.(Int32Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *UInt32Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*UInt32Value) + if !ok { + that2, ok := that.(UInt32Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *BoolValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*BoolValue) + if !ok { + that2, ok := that.(BoolValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *StringValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*StringValue) + if !ok { + that2, ok := that.(StringValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *BytesValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*BytesValue) + if !ok { + that2, ok := that.(BytesValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Value, that1.Value) { + return false + } + return true +} +func (this *DoubleValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.DoubleValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FloatValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.FloatValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Int64Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.Int64Value{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UInt64Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.UInt64Value{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Int32Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.Int32Value{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UInt32Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.UInt32Value{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *BoolValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.BoolValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *StringValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.StringValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *BytesValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.BytesValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringWrappers(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *DoubleValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DoubleValue) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Value != 0 { + dAtA[i] = 0x9 + i++ + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i += 8 + } + return i, nil +} + +func (m *FloatValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FloatValue) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Value != 0 { + dAtA[i] = 0xd + i++ + binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.Value)))) + i += 4 + } + return i, nil +} + +func (m *Int64Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Int64Value) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Value != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) + } + return i, nil +} + +func (m *UInt64Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UInt64Value) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Value != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) + } + return i, nil +} + +func (m *Int32Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Int32Value) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Value != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) + } + return i, nil +} + +func (m *UInt32Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UInt32Value) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Value != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) + } + return i, nil +} + +func (m *BoolValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BoolValue) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Value { + dAtA[i] = 0x8 + i++ + if m.Value { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *StringValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StringValue) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Value) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintWrappers(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} + +func (m *BytesValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BytesValue) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Value) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintWrappers(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} + +func encodeVarintWrappers(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func NewPopulatedDoubleValue(r randyWrappers, easy bool) *DoubleValue { + this := &DoubleValue{} + this.Value = float64(r.Float64()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedFloatValue(r randyWrappers, easy bool) *FloatValue { + this := &FloatValue{} + this.Value = float32(r.Float32()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedInt64Value(r randyWrappers, easy bool) *Int64Value { + this := &Int64Value{} + this.Value = int64(r.Int63()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedUInt64Value(r randyWrappers, easy bool) *UInt64Value { + this := &UInt64Value{} + this.Value = uint64(uint64(r.Uint32())) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedInt32Value(r randyWrappers, easy bool) *Int32Value { + this := &Int32Value{} + this.Value = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedUInt32Value(r randyWrappers, easy bool) *UInt32Value { + this := &UInt32Value{} + this.Value = uint32(r.Uint32()) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedBoolValue(r randyWrappers, easy bool) *BoolValue { + this := &BoolValue{} + this.Value = bool(bool(r.Intn(2) == 0)) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedStringValue(r randyWrappers, easy bool) *StringValue { + this := &StringValue{} + this.Value = string(randStringWrappers(r)) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedBytesValue(r randyWrappers, easy bool) *BytesValue { + this := &BytesValue{} + v1 := r.Intn(100) + this.Value = make([]byte, v1) + for i := 0; i < v1; i++ { + this.Value[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +type randyWrappers interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneWrappers(r randyWrappers) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringWrappers(r randyWrappers) string { + v2 := r.Intn(100) + tmps := make([]rune, v2) + for i := 0; i < v2; i++ { + tmps[i] = randUTF8RuneWrappers(r) + } + return string(tmps) +} +func randUnrecognizedWrappers(r randyWrappers, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldWrappers(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldWrappers(dAtA []byte, r randyWrappers, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(key)) + v3 := r.Int63() + if r.Intn(2) == 0 { + v3 *= -1 + } + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(v3)) + case 1: + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateWrappers(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *DoubleValue) Size() (n int) { + var l int + _ = l + if m.Value != 0 { + n += 9 + } + return n +} + +func (m *FloatValue) Size() (n int) { + var l int + _ = l + if m.Value != 0 { + n += 5 + } + return n +} + +func (m *Int64Value) Size() (n int) { + var l int + _ = l + if m.Value != 0 { + n += 1 + sovWrappers(uint64(m.Value)) + } + return n +} + +func (m *UInt64Value) Size() (n int) { + var l int + _ = l + if m.Value != 0 { + n += 1 + sovWrappers(uint64(m.Value)) + } + return n +} + +func (m *Int32Value) Size() (n int) { + var l int + _ = l + if m.Value != 0 { + n += 1 + sovWrappers(uint64(m.Value)) + } + return n +} + +func (m *UInt32Value) Size() (n int) { + var l int + _ = l + if m.Value != 0 { + n += 1 + sovWrappers(uint64(m.Value)) + } + return n +} + +func (m *BoolValue) Size() (n int) { + var l int + _ = l + if m.Value { + n += 2 + } + return n +} + +func (m *StringValue) Size() (n int) { + var l int + _ = l + l = len(m.Value) + if l > 0 { + n += 1 + l + sovWrappers(uint64(l)) + } + return n +} + +func (m *BytesValue) Size() (n int) { + var l int + _ = l + l = len(m.Value) + if l > 0 { + n += 1 + l + sovWrappers(uint64(l)) + } + return n +} + +func sovWrappers(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozWrappers(x uint64) (n int) { + return sovWrappers(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *DoubleValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DoubleValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *FloatValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FloatValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *Int64Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Int64Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *UInt64Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UInt64Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *Int32Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Int32Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *UInt32Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UInt32Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *BoolValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BoolValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *StringValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StringValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *BytesValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BytesValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func valueToStringWrappers(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *DoubleValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DoubleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DoubleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FloatValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FloatValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FloatValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + m.Value = float32(math.Float32frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Int64Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Int64Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Int64Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UInt64Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UInt64Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UInt64Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Int32Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Int32Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Int32Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UInt32Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UInt32Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UInt32Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BoolValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BoolValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BoolValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Value = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StringValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StringValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StringValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWrappers + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BytesValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BytesValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BytesValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthWrappers + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipWrappers(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWrappers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWrappers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWrappers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthWrappers + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWrappers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipWrappers(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthWrappers = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowWrappers = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("wrappers.proto", fileDescriptorWrappers) } + +var fileDescriptorWrappers = []byte{ + // 278 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2b, 0x2f, 0x4a, 0x2c, + 0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0x85, 0xf0, 0x92, 0x4a, 0xd3, 0x94, 0x94, 0xb9, 0xb8, 0x5d, 0xf2, 0x4b, 0x93, 0x72, + 0x52, 0xc3, 0x12, 0x73, 0x4a, 0x53, 0x85, 0x44, 0xb8, 0x58, 0xcb, 0x40, 0x0c, 0x09, 0x46, 0x05, + 0x46, 0x0d, 0xc6, 0x20, 0x08, 0x47, 0x49, 0x89, 0x8b, 0xcb, 0x2d, 0x27, 0x3f, 0xb1, 0x04, 0x8b, + 0x1a, 0x26, 0x24, 0x35, 0x9e, 0x79, 0x25, 0x66, 0x26, 0x58, 0xd4, 0x30, 0xc3, 0xd4, 0x28, 0x73, + 0x71, 0x87, 0xe2, 0x52, 0xc4, 0x82, 0x6a, 0x90, 0xb1, 0x11, 0x16, 0x35, 0xac, 0x68, 0x06, 0x61, + 0x55, 0xc4, 0x0b, 0x53, 0xa4, 0xc8, 0xc5, 0xe9, 0x94, 0x9f, 0x9f, 0x83, 0x45, 0x09, 0x07, 0x92, + 0x39, 0xc1, 0x25, 0x45, 0x99, 0x79, 0xe9, 0x58, 0x14, 0x71, 0x22, 0x39, 0xc8, 0xa9, 0xb2, 0x24, + 0xb5, 0x18, 0x8b, 0x1a, 0x1e, 0xa8, 0x1a, 0xa7, 0x76, 0xc6, 0x0b, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, + 0x94, 0x63, 0xf8, 0xf0, 0x50, 0x8e, 0xf1, 0xc7, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x2b, + 0x1e, 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, + 0x2f, 0x1e, 0xc9, 0x31, 0x7c, 0x00, 0x89, 0x3f, 0x96, 0x63, 0xe4, 0x12, 0x4e, 0xce, 0xcf, 0xd5, + 0x43, 0x8b, 0x0e, 0x27, 0xde, 0x70, 0x68, 0x7c, 0x05, 0x80, 0x44, 0x02, 0x18, 0xa3, 0x58, 0x4b, + 0x2a, 0x0b, 0x52, 0x8b, 0x7f, 0x30, 0x32, 0x2e, 0x62, 0x62, 0x76, 0x0f, 0x70, 0x5a, 0xc5, 0x24, + 0xe7, 0x0e, 0xd1, 0x12, 0x00, 0xd5, 0xa2, 0x17, 0x9e, 0x9a, 0x93, 0xe3, 0x9d, 0x97, 0x5f, 0x9e, + 0x17, 0x02, 0x52, 0x99, 0xc4, 0x06, 0x36, 0xcb, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x23, 0x27, + 0x6c, 0x5f, 0xfa, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go index e392575b35..3cd3249f70 100644 --- a/vendor/github.com/golang/protobuf/proto/clone.go +++ b/vendor/github.com/golang/protobuf/proto/clone.go @@ -35,22 +35,39 @@ package proto import ( + "fmt" "log" "reflect" "strings" ) // Clone returns a deep copy of a protocol buffer. -func Clone(pb Message) Message { - in := reflect.ValueOf(pb) +func Clone(src Message) Message { + in := reflect.ValueOf(src) if in.IsNil() { - return pb + return src } - out := reflect.New(in.Type().Elem()) - // out is empty so a merge is a deep copy. - mergeStruct(out.Elem(), in.Elem()) - return out.Interface().(Message) + dst := out.Interface().(Message) + Merge(dst, src) + return dst +} + +// Merger is the interface representing objects that can merge messages of the same type. +type Merger interface { + // Merge merges src into this message. + // Required and optional fields that are set in src will be set to that value in dst. + // Elements of repeated fields will be appended. + // + // Merge may panic if called with a different argument type than the receiver. + Merge(src Message) +} + +// generatedMerger is the custom merge method that generated protos will have. +// We must add this method since a generate Merge method will conflict with +// many existing protos that have a Merge data field already defined. +type generatedMerger interface { + XXX_Merge(src Message) } // Merge merges src into dst. @@ -58,17 +75,24 @@ func Clone(pb Message) Message { // Elements of repeated fields will be appended. // Merge panics if src and dst are not the same type, or if dst is nil. func Merge(dst, src Message) { + if m, ok := dst.(Merger); ok { + m.Merge(src) + return + } + in := reflect.ValueOf(src) out := reflect.ValueOf(dst) if out.IsNil() { panic("proto: nil destination") } if in.Type() != out.Type() { - // Explicit test prior to mergeStruct so that mistyped nils will fail - panic("proto: type mismatch") + panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) } if in.IsNil() { - // Merging nil into non-nil is a quiet no-op + return // Merge from nil src is a noop + } + if m, ok := dst.(generatedMerger); ok { + m.XXX_Merge(src) return } mergeStruct(out.Elem(), in.Elem()) @@ -84,7 +108,7 @@ func mergeStruct(out, in reflect.Value) { mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) } - if emIn, ok := extendable(in.Addr().Interface()); ok { + if emIn, err := extendable(in.Addr().Interface()); err == nil { emOut, _ := extendable(out.Addr().Interface()) mIn, muIn := emIn.extensionsRead() if mIn != nil { diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go index aa207298f9..d9aa3c42d6 100644 --- a/vendor/github.com/golang/protobuf/proto/decode.go +++ b/vendor/github.com/golang/protobuf/proto/decode.go @@ -39,8 +39,6 @@ import ( "errors" "fmt" "io" - "os" - "reflect" ) // errOverflow is returned when an integer is too large to be represented. @@ -50,10 +48,6 @@ var errOverflow = errors.New("proto: integer overflow") // wire type is encountered. It does not get returned to user code. var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") -// The fundamental decoders that interpret bytes on the wire. -// Those that take integer types all return uint64 and are -// therefore of type valueDecoder. - // DecodeVarint reads a varint-encoded integer from the slice. // It returns the integer and the number of bytes consumed, or // zero if there is not enough. @@ -267,9 +261,6 @@ func (p *Buffer) DecodeZigzag32() (x uint64, err error) { return } -// These are not ValueDecoders: they produce an array of bytes or a string. -// bytes, embedded messages - // DecodeRawBytes reads a count-delimited byte buffer from the Buffer. // This is the format used for the bytes protocol buffer // type and for embedded messages. @@ -311,81 +302,29 @@ func (p *Buffer) DecodeStringBytes() (s string, err error) { return string(buf), nil } -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -// If the protocol buffer has extensions, and the field matches, add it as an extension. -// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. -func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { - oi := o.index - - err := o.skip(t, tag, wire) - if err != nil { - return err - } - - if !unrecField.IsValid() { - return nil - } - - ptr := structPointer_Bytes(base, unrecField) - - // Add the skipped field to struct field - obuf := o.buf - - o.buf = *ptr - o.EncodeVarint(uint64(tag<<3 | wire)) - *ptr = append(o.buf, obuf[oi:o.index]...) - - o.buf = obuf - - return nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -func (o *Buffer) skip(t reflect.Type, tag, wire int) error { - - var u uint64 - var err error - - switch wire { - case WireVarint: - _, err = o.DecodeVarint() - case WireFixed64: - _, err = o.DecodeFixed64() - case WireBytes: - _, err = o.DecodeRawBytes(false) - case WireFixed32: - _, err = o.DecodeFixed32() - case WireStartGroup: - for { - u, err = o.DecodeVarint() - if err != nil { - break - } - fwire := int(u & 0x7) - if fwire == WireEndGroup { - break - } - ftag := int(u >> 3) - err = o.skip(t, ftag, fwire) - if err != nil { - break - } - } - default: - err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) - } - return err -} - // Unmarshaler is the interface representing objects that can -// unmarshal themselves. The method should reset the receiver before -// decoding starts. The argument points to data that may be +// unmarshal themselves. The argument points to data that may be // overwritten, so implementations should not keep references to the // buffer. +// Unmarshal implementations should not clear the receiver. +// Any unmarshaled data should be merged into the receiver. +// Callers of Unmarshal that do not want to retain existing data +// should Reset the receiver before calling Unmarshal. type Unmarshaler interface { Unmarshal([]byte) error } +// newUnmarshaler is the interface representing objects that can +// unmarshal themselves. The semantics are identical to Unmarshaler. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newUnmarshaler interface { + XXX_Unmarshal([]byte) error +} + // Unmarshal parses the protocol buffer representation in buf and places the // decoded result in pb. If the struct underlying pb does not match // the data in buf, the results can be unpredictable. @@ -395,7 +334,13 @@ type Unmarshaler interface { // to preserve and append to existing data. func Unmarshal(buf []byte, pb Message) error { pb.Reset() - return UnmarshalMerge(buf, pb) + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) } // UnmarshalMerge parses the protocol buffer representation in buf and @@ -405,8 +350,16 @@ func Unmarshal(buf []byte, pb Message) error { // UnmarshalMerge merges into existing data in pb. // Most code should use Unmarshal instead. func UnmarshalMerge(buf []byte, pb Message) error { - // If the object can unmarshal itself, let it. + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 return u.Unmarshal(buf) } return NewBuffer(buf).Unmarshal(pb) @@ -422,12 +375,17 @@ func (p *Buffer) DecodeMessage(pb Message) error { } // DecodeGroup reads a tag-delimited group from the Buffer. +// StartGroup tag is already consumed. This function consumes +// EndGroup tag. func (p *Buffer) DecodeGroup(pb Message) error { - typ, base, err := getbase(pb) - if err != nil { - return err + b := p.buf[p.index:] + x, y := findEndGroup(b) + if x < 0 { + return io.ErrUnexpectedEOF } - return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) + err := Unmarshal(b[:x], pb) + p.index += y + return err } // Unmarshal parses the protocol buffer representation in the @@ -438,533 +396,33 @@ func (p *Buffer) DecodeGroup(pb Message) error { // Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. func (p *Buffer) Unmarshal(pb Message) error { // If the object can unmarshal itself, let it. - if u, ok := pb.(Unmarshaler); ok { - err := u.Unmarshal(p.buf[p.index:]) + if u, ok := pb.(newUnmarshaler); ok { + err := u.XXX_Unmarshal(p.buf[p.index:]) p.index = len(p.buf) return err } - - typ, base, err := getbase(pb) - if err != nil { - return err - } - - err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) - - if collectStats { - stats.Decode++ - } - - return err -} - -// unmarshalType does the work of unmarshaling a structure. -func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { - var state errorState - required, reqFields := prop.reqCount, uint64(0) - - var err error - for err == nil && o.index < len(o.buf) { - oi := o.index - var u uint64 - u, err = o.DecodeVarint() - if err != nil { - break - } - wire := int(u & 0x7) - if wire == WireEndGroup { - if is_group { - if required > 0 { - // Not enough information to determine the exact field. - // (See below.) - return &RequiredNotSetError{"{Unknown}"} - } - return nil // input is satisfied - } - return fmt.Errorf("proto: %s: wiretype end group for non-group", st) - } - tag := int(u >> 3) - if tag <= 0 { - return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) - } - fieldnum, ok := prop.decoderTags.get(tag) - if !ok { - // Maybe it's an extension? - if prop.extendable { - if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) { - if err = o.skip(st, tag, wire); err == nil { - extmap := e.extensionsWrite() - ext := extmap[int32(tag)] // may be missing - ext.enc = append(ext.enc, o.buf[oi:o.index]...) - extmap[int32(tag)] = ext - } - continue - } - } - // Maybe it's a oneof? - if prop.oneofUnmarshaler != nil { - m := structPointer_Interface(base, st).(Message) - // First return value indicates whether tag is a oneof field. - ok, err = prop.oneofUnmarshaler(m, tag, wire, o) - if err == ErrInternalBadWireType { - // Map the error to something more descriptive. - // Do the formatting here to save generated code space. - err = fmt.Errorf("bad wiretype for oneof field in %T", m) - } - if ok { - continue - } - } - err = o.skipAndSave(st, tag, wire, base, prop.unrecField) - continue - } - p := prop.Prop[fieldnum] - - if p.dec == nil { - fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) - continue - } - dec := p.dec - if wire != WireStartGroup && wire != p.WireType { - if wire == WireBytes && p.packedDec != nil { - // a packable field - dec = p.packedDec - } else { - err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) - continue - } - } - decErr := dec(o, p, base) - if decErr != nil && !state.shouldContinue(decErr, p) { - err = decErr - } - if err == nil && p.Required { - // Successfully decoded a required field. - if tag <= 64 { - // use bitmap for fields 1-64 to catch field reuse. - var mask uint64 = 1 << uint64(tag-1) - if reqFields&mask == 0 { - // new required field - reqFields |= mask - required-- - } - } else { - // This is imprecise. It can be fooled by a required field - // with a tag > 64 that is encoded twice; that's very rare. - // A fully correct implementation would require allocating - // a data structure, which we would like to avoid. - required-- - } - } - } - if err == nil { - if is_group { - return io.ErrUnexpectedEOF - } - if state.err != nil { - return state.err - } - if required > 0 { - // Not enough information to determine the exact field. If we use extra - // CPU, we could determine the field only if the missing required field - // has a tag <= 64 and we check reqFields. - return &RequiredNotSetError{"{Unknown}"} - } - } - return err -} - -// Individual type decoders -// For each, -// u is the decoded value, -// v is a pointer to the field (pointer) in the struct - -// Sizes of the pools to allocate inside the Buffer. -// The goal is modest amortization and allocation -// on at least 16-byte boundaries. -const ( - boolPoolSize = 16 - uint32PoolSize = 8 - uint64PoolSize = 4 -) - -// Decode a bool. -func (o *Buffer) dec_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - if len(o.bools) == 0 { - o.bools = make([]bool, boolPoolSize) - } - o.bools[0] = u != 0 - *structPointer_Bool(base, p.field) = &o.bools[0] - o.bools = o.bools[1:] - return nil -} - -func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - *structPointer_BoolVal(base, p.field) = u != 0 - return nil -} - -// Decode an int32. -func (o *Buffer) dec_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) - return nil -} - -func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) - return nil -} - -// Decode an int64. -func (o *Buffer) dec_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64_Set(structPointer_Word64(base, p.field), o, u) - return nil -} - -func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64Val_Set(structPointer_Word64Val(base, p.field), o, u) - return nil -} - -// Decode a string. -func (o *Buffer) dec_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_String(base, p.field) = &s - return nil -} - -func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_StringVal(base, p.field) = s - return nil -} - -// Decode a slice of bytes ([]byte). -func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - *structPointer_Bytes(base, p.field) = b - return nil -} - -// Decode a slice of bools ([]bool). -func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - v := structPointer_BoolSlice(base, p.field) - *v = append(*v, u != 0) - return nil -} - -// Decode a slice of bools ([]bool) in packed format. -func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { - v := structPointer_BoolSlice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded bools - fin := o.index + nb - if fin < o.index { - return errOverflow - } - - y := *v - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - y = append(y, u != 0) - } - - *v = y - return nil -} - -// Decode a slice of int32s ([]int32). -func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - structPointer_Word32Slice(base, p.field).Append(uint32(u)) - return nil -} - -// Decode a slice of int32s ([]int32) in packed format. -func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int32s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(uint32(u)) - } - return nil -} - -// Decode a slice of int64s ([]int64). -func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - - structPointer_Word64Slice(base, p.field).Append(u) - return nil -} - -// Decode a slice of int64s ([]int64) in packed format. -func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int64s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(u) - } - return nil -} - -// Decode a slice of strings ([]string). -func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - v := structPointer_StringSlice(base, p.field) - *v = append(*v, s) - return nil -} - -// Decode a slice of slice of bytes ([][]byte). -func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - v := structPointer_BytesSlice(base, p.field) - *v = append(*v, b) - return nil -} - -// Decode a map field. -func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - oi := o.index // index at the end of this map entry - o.index -= len(raw) // move buffer back to start of map entry - - mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V - if mptr.Elem().IsNil() { - mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) - } - v := mptr.Elem() // map[K]V - - // Prepare addressable doubly-indirect placeholders for the key and value types. - // See enc_new_map for why. - keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K - keybase := toStructPointer(keyptr.Addr()) // **K - - var valbase structPointer - var valptr reflect.Value - switch p.mtype.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valptr = reflect.ValueOf(&dummy) // *[]byte - valbase = toStructPointer(valptr) // *[]byte - case reflect.Ptr: - // message; valptr is **Msg; need to allocate the intermediate pointer - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valptr.Set(reflect.New(valptr.Type().Elem())) - valbase = toStructPointer(valptr) - default: - // everything else - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valbase = toStructPointer(valptr.Addr()) // **V - } - - // Decode. - // This parses a restricted wire format, namely the encoding of a message - // with two fields. See enc_new_map for the format. - for o.index < oi { - // tagcode for key and value properties are always a single byte - // because they have tags 1 and 2. - tagcode := o.buf[o.index] - o.index++ - switch tagcode { - case p.mkeyprop.tagcode[0]: - if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { - return err - } - case p.mvalprop.tagcode[0]: - if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { - return err - } - default: - // TODO: Should we silently skip this instead? - return fmt.Errorf("proto: bad map data tag %d", raw[0]) - } - } - keyelem, valelem := keyptr.Elem(), valptr.Elem() - if !keyelem.IsValid() { - keyelem = reflect.Zero(p.mtype.Key()) - } - if !valelem.IsValid() { - valelem = reflect.Zero(p.mtype.Elem()) - } - - v.SetMapIndex(keyelem, valelem) - return nil -} - -// Decode a group. -func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - return o.unmarshalType(p.stype, p.sprop, true, bas) -} - -// Decode an embedded message. -func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { - raw, e := o.DecodeRawBytes(false) - if e != nil { - return e - } - - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := structPointer_Interface(bas, p.stype) - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, false, bas) - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of embedded messages. -func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, false, base) -} - -// Decode a slice of embedded groups. -func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, true, base) -} - -// Decode a slice of structs ([]*struct). -func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { - v := reflect.New(p.stype) - bas := toStructPointer(v) - structPointer_StructPointerSlice(base, p.field).Append(bas) - - if is_group { - err := o.unmarshalType(p.stype, p.sprop, is_group, bas) - return err - } - - raw, err := o.DecodeRawBytes(false) - if err != nil { + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) return err } - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := v.Interface() - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, is_group, bas) - - o.buf = obuf - o.index = oi - + // Slow workaround for messages that aren't Unmarshalers. + // This includes some hand-coded .pb.go files and + // bootstrap protos. + // TODO: fix all of those and then add Unmarshal to + // the Message interface. Then: + // The cast above and code below can be deleted. + // The old unmarshaler can be deleted. + // Clients can call Unmarshal directly (can already do that, actually). + var info InternalMessageInfo + err := info.Unmarshal(pb, p.buf[p.index:]) + p.index = len(p.buf) return err } diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go index bd0e3bb4c8..dea2617ced 100644 --- a/vendor/github.com/golang/protobuf/proto/discard.go +++ b/vendor/github.com/golang/protobuf/proto/discard.go @@ -35,8 +35,14 @@ import ( "fmt" "reflect" "strings" + "sync" + "sync/atomic" ) +type generatedDiscarder interface { + XXX_DiscardUnknown() +} + // DiscardUnknown recursively discards all unknown fields from this message // and all embedded messages. // @@ -49,9 +55,202 @@ import ( // For proto2 messages, the unknown fields of message extensions are only // discarded from messages that have been accessed via GetExtension. func DiscardUnknown(m Message) { + if m, ok := m.(generatedDiscarder); ok { + m.XXX_DiscardUnknown() + return + } + // TODO: Dynamically populate a InternalMessageInfo for legacy messages, + // but the master branch has no implementation for InternalMessageInfo, + // so it would be more work to replicate that approach. discardLegacy(m) } +// DiscardUnknown recursively discards all unknown fields. +func (a *InternalMessageInfo) DiscardUnknown(m Message) { + di := atomicLoadDiscardInfo(&a.discard) + if di == nil { + di = getDiscardInfo(reflect.TypeOf(m).Elem()) + atomicStoreDiscardInfo(&a.discard, di) + } + di.discard(toPointer(&m)) +} + +type discardInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []discardFieldInfo + unrecognized field +} + +type discardFieldInfo struct { + field field // Offset of field, guaranteed to be valid + discard func(src pointer) +} + +var ( + discardInfoMap = map[reflect.Type]*discardInfo{} + discardInfoLock sync.Mutex +) + +func getDiscardInfo(t reflect.Type) *discardInfo { + discardInfoLock.Lock() + defer discardInfoLock.Unlock() + di := discardInfoMap[t] + if di == nil { + di = &discardInfo{typ: t} + discardInfoMap[t] = di + } + return di +} + +func (di *discardInfo) discard(src pointer) { + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&di.initialized) == 0 { + di.computeDiscardInfo() + } + + for _, fi := range di.fields { + sfp := src.offset(fi.field) + fi.discard(sfp) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { + // Ignore lock since DiscardUnknown is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + DiscardUnknown(m) + } + } + } + + if di.unrecognized.IsValid() { + *src.offset(di.unrecognized).toBytes() = nil + } +} + +func (di *discardInfo) computeDiscardInfo() { + di.lock.Lock() + defer di.lock.Unlock() + if di.initialized != 0 { + return + } + t := di.typ + n := t.NumField() + + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + dfi := discardFieldInfo{field: toField(&f)} + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) + case isSlice: // E.g., []*pb.T + di := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sps := src.getPointerSlice() + for _, sp := range sps { + if !sp.isNil() { + di.discard(sp) + } + } + } + default: // E.g., *pb.T + di := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sp := src.getPointer() + if !sp.isNil() { + di.discard(sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) + default: // E.g., map[K]V + if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) + dfi.discard = func(src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + DiscardUnknown(val.Interface().(Message)) + } + } + } else { + dfi.discard = func(pointer) {} // Noop + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) + default: // E.g., interface{} + // TODO: Make this faster? + dfi.discard = func(src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + DiscardUnknown(sv.Interface().(Message)) + } + } + } + } + default: + continue + } + di.fields = append(di.fields, dfi) + } + + di.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + di.unrecognized = toField(&f) + } + + atomic.StoreInt32(&di.initialized, 1) +} + func discardLegacy(m Message) { v := reflect.ValueOf(m) if v.Kind() != reflect.Ptr || v.IsNil() { @@ -139,7 +338,7 @@ func discardLegacy(m Message) { // For proto2 messages, only discard unknown fields in message extensions // that have been accessed via GetExtension. - if em, ok := extendable(m); ok { + if em, err := extendable(m); err == nil { // Ignore lock since discardLegacy is not concurrency safe. emm, _ := em.extensionsRead() for _, mx := range emm { diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go index 8b84d1b22d..c27d35f866 100644 --- a/vendor/github.com/golang/protobuf/proto/encode.go +++ b/vendor/github.com/golang/protobuf/proto/encode.go @@ -39,7 +39,6 @@ import ( "errors" "fmt" "reflect" - "sort" ) // RequiredNotSetError is the error returned if Marshal is called with @@ -82,10 +81,6 @@ var ( const maxVarintBytes = 10 // maximum length of a varint -// maxMarshalSize is the largest allowed size of an encoded protobuf, -// since C++ and Java use signed int32s for the size. -const maxMarshalSize = 1<<31 - 1 - // EncodeVarint returns the varint encoding of x. // This is the format for the // int32, int64, uint32, uint64, bool, and enum @@ -119,18 +114,27 @@ func (p *Buffer) EncodeVarint(x uint64) error { // SizeVarint returns the varint encoding size of an integer. func SizeVarint(x uint64) int { - return sizeVarint(x) -} - -func sizeVarint(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + switch { + case x < 1<<7: + return 1 + case x < 1<<14: + return 2 + case x < 1<<21: + return 3 + case x < 1<<28: + return 4 + case x < 1<<35: + return 5 + case x < 1<<42: + return 6 + case x < 1<<49: + return 7 + case x < 1<<56: + return 8 + case x < 1<<63: + return 9 + } + return 10 } // EncodeFixed64 writes a 64-bit integer to the Buffer. @@ -149,10 +153,6 @@ func (p *Buffer) EncodeFixed64(x uint64) error { return nil } -func sizeFixed64(x uint64) int { - return 8 -} - // EncodeFixed32 writes a 32-bit integer to the Buffer. // This is the format for the // fixed32, sfixed32, and float protocol buffer types. @@ -165,20 +165,12 @@ func (p *Buffer) EncodeFixed32(x uint64) error { return nil } -func sizeFixed32(x uint64) int { - return 4 -} - // EncodeZigzag64 writes a zigzag-encoded 64-bit integer // to the Buffer. // This is the format used for the sint64 protocol buffer type. func (p *Buffer) EncodeZigzag64(x uint64) error { // use signed number to get arithmetic right shift. - return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63))) -} - -func sizeZigzag64(x uint64) int { - return sizeVarint((x << 1) ^ uint64((int64(x) >> 63))) + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } // EncodeZigzag32 writes a zigzag-encoded 32-bit integer @@ -189,10 +181,6 @@ func (p *Buffer) EncodeZigzag32(x uint64) error { return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) } -func sizeZigzag32(x uint64) int { - return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - // EncodeRawBytes writes a count-delimited byte buffer to the Buffer. // This is the format used for the bytes protocol buffer // type and for embedded messages. @@ -202,11 +190,6 @@ func (p *Buffer) EncodeRawBytes(b []byte) error { return nil } -func sizeRawBytes(b []byte) int { - return sizeVarint(uint64(len(b))) + - len(b) -} - // EncodeStringBytes writes an encoded string to the Buffer. // This is the format used for the proto2 string type. func (p *Buffer) EncodeStringBytes(s string) error { @@ -215,319 +198,17 @@ func (p *Buffer) EncodeStringBytes(s string) error { return nil } -func sizeStringBytes(s string) int { - return sizeVarint(uint64(len(s))) + - len(s) -} - // Marshaler is the interface representing objects that can marshal themselves. type Marshaler interface { Marshal() ([]byte, error) } -// Marshal takes the protocol buffer -// and encodes it into the wire format, returning the data. -func Marshal(pb Message) ([]byte, error) { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - return m.Marshal() - } - p := NewBuffer(nil) - err := p.Marshal(pb) - if p.buf == nil && err == nil { - // Return a non-nil slice on success. - return []byte{}, nil - } - return p.buf, err -} - // EncodeMessage writes the protocol buffer to the Buffer, // prefixed by a varint-encoded length. func (p *Buffer) EncodeMessage(pb Message) error { - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - var state errorState - err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) - } - return err -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, writing the result to the -// Buffer. -func (p *Buffer) Marshal(pb Message) error { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - data, err := m.Marshal() - p.buf = append(p.buf, data...) - return err - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - err = p.enc_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - (stats).Encode++ // Parens are to work around a goimports bug. - } - - if len(p.buf) > maxMarshalSize { - return ErrTooLarge - } - return err -} - -// Size returns the encoded size of a protocol buffer. -func Size(pb Message) (n int) { - // Can the object marshal itself? If so, Size is slow. - // TODO: add Size to Marshaler, or add a Sizer interface. - if m, ok := pb.(Marshaler); ok { - b, _ := m.Marshal() - return len(b) - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return 0 - } - if err == nil { - n = size_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - (stats).Size++ // Parens are to work around a goimports bug. - } - - return -} - -// Individual type encoders. - -// Encode a bool. -func (o *Buffer) enc_bool(p *Properties, base structPointer) error { - v := *structPointer_Bool(base, p.field) - if v == nil { - return ErrNil - } - x := 0 - if *v { - x = 1 - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { - v := *structPointer_BoolVal(base, p.field) - if !v { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, 1) - return nil -} - -func size_bool(p *Properties, base structPointer) int { - v := *structPointer_Bool(base, p.field) - if v == nil { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -func size_proto3_bool(p *Properties, base structPointer) int { - v := *structPointer_BoolVal(base, p.field) - if !v && !p.oneof { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -// Encode an int32. -func (o *Buffer) enc_int32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode a uint32. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := word32_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := word32_Get(v) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode an int64. -func (o *Buffer) enc_int64(p *Properties, base structPointer) error { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return ErrNil - } - x := word64_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func size_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return 0 - } - x := word64_Get(v) - n += len(p.tagcode) - n += p.valSize(x) - return -} - -func size_proto3_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(x) - return -} - -// Encode a string. -func (o *Buffer) enc_string(p *Properties, base structPointer) error { - v := *structPointer_String(base, p.field) - if v == nil { - return ErrNil - } - x := *v - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(x) - return nil -} - -func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { - v := *structPointer_StringVal(base, p.field) - if v == "" { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(v) - return nil -} - -func size_string(p *Properties, base structPointer) (n int) { - v := *structPointer_String(base, p.field) - if v == nil { - return 0 - } - x := *v - n += len(p.tagcode) - n += sizeStringBytes(x) - return -} - -func size_proto3_string(p *Properties, base structPointer) (n int) { - v := *structPointer_StringVal(base, p.field) - if v == "" && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeStringBytes(v) - return + siz := Size(pb) + p.EncodeVarint(uint64(siz)) + return p.Marshal(pb) } // All protocol buffer fields are nillable, but be careful. @@ -538,825 +219,3 @@ func isNil(v reflect.Value) bool { } return false } - -// Encode a message struct. -func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { - var state errorState - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return state.err - } - - o.buf = append(o.buf, p.tagcode...) - return o.enc_len_struct(p.sprop, structp, &state) -} - -func size_struct_message(p *Properties, base structPointer) int { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n0 := len(p.tagcode) - n1 := sizeRawBytes(data) - return n0 + n1 - } - - n0 := len(p.tagcode) - n1 := size_struct(p.sprop, structp) - n2 := sizeVarint(uint64(n1)) // size of encoded length - return n0 + n1 + n2 -} - -// Encode a group struct. -func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { - var state errorState - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return ErrNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - err := o.enc_struct(p.sprop, b) - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return state.err -} - -func size_struct_group(p *Properties, base structPointer) (n int) { - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return 0 - } - - n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) - n += size_struct(p.sprop, b) - n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return -} - -// Encode a slice of bools ([]bool). -func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - for _, x := range s { - o.buf = append(o.buf, p.tagcode...) - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_bool(p *Properties, base structPointer) int { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - return l * (len(p.tagcode) + 1) // each bool takes exactly one byte -} - -// Encode a slice of bools ([]bool) in packed format. -func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(l)) // each bool takes exactly one byte - for _, x := range s { - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_packed_bool(p *Properties, base structPointer) (n int) { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - n += len(p.tagcode) - n += sizeVarint(uint64(l)) - n += l // each bool takes exactly one byte - return -} - -// Encode a slice of bytes ([]byte). -func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func size_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if s == nil && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -// Encode a slice of int32s ([]int32). -func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of int32s ([]int32) in packed format. -func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(buf, uint64(x)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - bufSize += p.valSize(uint64(x)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of uint32s ([]uint32). -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := s.Index(i) - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := s.Index(i) - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of uint32s ([]uint32) in packed format. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, uint64(s.Index(i))) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(uint64(s.Index(i))) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of int64s ([]int64). -func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, s.Index(i)) - } - return nil -} - -func size_slice_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - n += p.valSize(s.Index(i)) - } - return -} - -// Encode a slice of int64s ([]int64) in packed format. -func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, s.Index(i)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(s.Index(i)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of slice of bytes ([][]byte). -func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(ss[i]) - } - return nil -} - -func size_slice_slice_byte(p *Properties, base structPointer) (n int) { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return 0 - } - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeRawBytes(ss[i]) - } - return -} - -// Encode a slice of strings ([]string). -func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(ss[i]) - } - return nil -} - -func size_slice_string(p *Properties, base structPointer) (n int) { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeStringBytes(ss[i]) - } - return -} - -// Encode a slice of message structs ([]*struct). -func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return errRepeatedHasNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - continue - } - - o.buf = append(o.buf, p.tagcode...) - err := o.enc_len_struct(p.sprop, structp, &state) - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - } - return state.err -} - -func size_slice_struct_message(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return // return the size up to this point - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n += sizeRawBytes(data) - continue - } - - n0 := size_struct(p.sprop, structp) - n1 := sizeVarint(uint64(n0)) // size of encoded length - n += n0 + n1 - } - return -} - -// Encode a slice of group structs ([]*struct). -func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return errRepeatedHasNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - - err := o.enc_struct(p.sprop, b) - - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - } - return state.err -} - -func size_slice_struct_group(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) - n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return // return size up to this point - } - - n += size_struct(p.sprop, b) - } - return -} - -// Encode an extension map. -func (o *Buffer) enc_map(p *Properties, base structPointer) error { - exts := structPointer_ExtMap(base, p.field) - if err := encodeExtensionsMap(*exts); err != nil { - return err - } - - return o.enc_map_body(*exts) -} - -func (o *Buffer) enc_exts(p *Properties, base structPointer) error { - exts := structPointer_Extensions(base, p.field) - - v, mu := exts.extensionsRead() - if v == nil { - return nil - } - - mu.Lock() - defer mu.Unlock() - if err := encodeExtensionsMap(v); err != nil { - return err - } - - return o.enc_map_body(v) -} - -func (o *Buffer) enc_map_body(v map[int32]Extension) error { - // Fast-path for common cases: zero or one extensions. - if len(v) <= 1 { - for _, e := range v { - o.buf = append(o.buf, e.enc...) - } - return nil - } - - // Sort keys to provide a deterministic encoding. - keys := make([]int, 0, len(v)) - for k := range v { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - o.buf = append(o.buf, v[int32(k)].enc...) - } - return nil -} - -func size_map(p *Properties, base structPointer) int { - v := structPointer_ExtMap(base, p.field) - return extensionsMapSize(*v) -} - -func size_exts(p *Properties, base structPointer) int { - v := structPointer_Extensions(base, p.field) - return extensionsSize(v) -} - -// Encode a map field. -func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { - var state errorState // XXX: or do we need to plumb this through? - - /* - A map defined as - map map_field = N; - is encoded in the same way as - message MapFieldEntry { - key_type key = 1; - value_type value = 2; - } - repeated MapFieldEntry map_field = N; - */ - - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - if v.Len() == 0 { - return nil - } - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - enc := func() error { - if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { - return err - } - if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil { - return err - } - return nil - } - - // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - - keycopy.Set(key) - valcopy.Set(val) - - o.buf = append(o.buf, p.tagcode...) - if err := o.enc_len_thing(enc, &state); err != nil { - return err - } - } - return nil -} - -func size_new_map(p *Properties, base structPointer) int { - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - n := 0 - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - keycopy.Set(key) - valcopy.Set(val) - - // Tag codes for key and val are the responsibility of the sub-sizer. - keysize := p.mkeyprop.size(p.mkeyprop, keybase) - valsize := p.mvalprop.size(p.mvalprop, valbase) - entry := keysize + valsize - // Add on tag code and length of map entry itself. - n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry - } - return n -} - -// mapEncodeScratch returns a new reflect.Value matching the map's value type, -// and a structPointer suitable for passing to an encoder or sizer. -func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { - // Prepare addressable doubly-indirect placeholders for the key and value types. - // This is needed because the element-type encoders expect **T, but the map iteration produces T. - - keycopy = reflect.New(mapType.Key()).Elem() // addressable K - keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K - keyptr.Set(keycopy.Addr()) // - keybase = toStructPointer(keyptr.Addr()) // **K - - // Value types are more varied and require special handling. - switch mapType.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte - valbase = toStructPointer(valcopy.Addr()) - case reflect.Ptr: - // message; the generated field type is map[K]*Msg (so V is *Msg), - // so we only need one level of indirection. - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valbase = toStructPointer(valcopy.Addr()) - default: - // everything else - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V - valptr.Set(valcopy.Addr()) // - valbase = toStructPointer(valptr.Addr()) // **V - } - return -} - -// Encode a struct. -func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { - var state errorState - // Encode fields in tag order so that decoders may use optimizations - // that depend on the ordering. - // https://developers.google.com/protocol-buffers/docs/encoding#order - for _, i := range prop.order { - p := prop.Prop[i] - if p.enc != nil { - err := p.enc(o, p, base) - if err != nil { - if err == ErrNil { - if p.Required && state.err == nil { - state.err = &RequiredNotSetError{p.Name} - } - } else if err == errRepeatedHasNil { - // Give more context to nil values in repeated fields. - return errors.New("repeated field " + p.OrigName + " has nil element") - } else if !state.shouldContinue(err, p) { - return err - } - } - if len(o.buf) > maxMarshalSize { - return ErrTooLarge - } - } - } - - // Do oneof fields. - if prop.oneofMarshaler != nil { - m := structPointer_Interface(base, prop.stype).(Message) - if err := prop.oneofMarshaler(m, o); err == ErrNil { - return errOneofHasNil - } else if err != nil { - return err - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - if len(o.buf)+len(v) > maxMarshalSize { - return ErrTooLarge - } - if len(v) > 0 { - o.buf = append(o.buf, v...) - } - } - - return state.err -} - -func size_struct(prop *StructProperties, base structPointer) (n int) { - for _, i := range prop.order { - p := prop.Prop[i] - if p.size != nil { - n += p.size(p, base) - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - n += len(v) - } - - // Factor in any oneof fields. - if prop.oneofSizer != nil { - m := structPointer_Interface(base, prop.stype).(Message) - n += prop.oneofSizer(m) - } - - return -} - -var zeroes [20]byte // longer than any conceivable sizeVarint - -// Encode a struct, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { - return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) -} - -// Encode something, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { - iLen := len(o.buf) - o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length - iMsg := len(o.buf) - err := enc() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - lMsg := len(o.buf) - iMsg - lLen := sizeVarint(uint64(lMsg)) - switch x := lLen - (iMsg - iLen); { - case x > 0: // actual length is x bytes larger than the space we reserved - // Move msg x bytes right. - o.buf = append(o.buf, zeroes[:x]...) - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - case x < 0: // actual length is x bytes smaller than the space we reserved - // Move msg x bytes left. - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - o.buf = o.buf[:len(o.buf)+x] // x is negative - } - // Encode the length in the reserved space. - o.buf = o.buf[:iLen] - o.EncodeVarint(uint64(lMsg)) - o.buf = o.buf[:len(o.buf)+lMsg] - return state.err -} - -// errorState maintains the first error that occurs and updates that error -// with additional context. -type errorState struct { - err error -} - -// shouldContinue reports whether encoding should continue upon encountering the -// given error. If the error is RequiredNotSetError, shouldContinue returns true -// and, if this is the first appearance of that error, remembers it for future -// reporting. -// -// If prop is not nil, it may update any error with additional context about the -// field with the error. -func (s *errorState) shouldContinue(err error, prop *Properties) bool { - // Ignore unset required fields. - reqNotSet, ok := err.(*RequiredNotSetError) - if !ok { - return false - } - if s.err == nil { - if prop != nil { - err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} - } - s.err = err - } - return true -} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go index 2ed1cf5966..d4db5a1c14 100644 --- a/vendor/github.com/golang/protobuf/proto/equal.go +++ b/vendor/github.com/golang/protobuf/proto/equal.go @@ -109,15 +109,6 @@ func equalStruct(v1, v2 reflect.Value) bool { // set/unset mismatch return false } - b1, ok := f1.Interface().(raw) - if ok { - b2 := f2.Interface().(raw) - // RawMessage - if !bytes.Equal(b1.Bytes(), b2.Bytes()) { - return false - } - continue - } f1, f2 = f1.Elem(), f2.Elem() } if !equalAny(f1, f2, sprop.Prop[i]) { @@ -146,11 +137,7 @@ func equalStruct(v1, v2 reflect.Value) bool { u1 := uf.Bytes() u2 := v2.FieldByName("XXX_unrecognized").Bytes() - if !bytes.Equal(u1, u2) { - return false - } - - return true + return bytes.Equal(u1, u2) } // v1 and v2 are known to have the same type. @@ -261,6 +248,15 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { m1, m2 := e1.value, e2.value + if m1 == nil && m2 == nil { + // Both have only encoded form. + if bytes.Equal(e1.enc, e2.enc) { + continue + } + // The bytes are different, but the extensions might still be + // equal. We need to decode them to compare. + } + if m1 != nil && m2 != nil { // Both are unencoded. if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { @@ -276,8 +272,12 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { desc = m[extNum] } if desc == nil { + // If both have only encoded form and the bytes are the same, + // it is handled above. We get here when the bytes are different. + // We don't know how to decode it, so just compare them as byte + // slices. log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - continue + return false } var err error if m1 == nil { diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go index eaad218312..816a3b9d6c 100644 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -38,6 +38,7 @@ package proto import ( "errors" "fmt" + "io" "reflect" "strconv" "sync" @@ -91,14 +92,29 @@ func (n notLocker) Unlock() {} // extendable returns the extendableProto interface for the given generated proto message. // If the proto message has the old extension format, it returns a wrapper that implements // the extendableProto interface. -func extendable(p interface{}) (extendableProto, bool) { - if ep, ok := p.(extendableProto); ok { - return ep, ok - } - if ep, ok := p.(extendableProtoV1); ok { - return extensionAdapter{ep}, ok +func extendable(p interface{}) (extendableProto, error) { + switch p := p.(type) { + case extendableProto: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return p, nil + case extendableProtoV1: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return extensionAdapter{p}, nil } - return nil, false + // Don't allocate a specific error containing %T: + // this is the hot path for Clone and MarshalText. + return nil, errNotExtendable +} + +var errNotExtendable = errors.New("proto: not an extendable proto.Message") + +func isNilPtr(x interface{}) bool { + v := reflect.ValueOf(x) + return v.Kind() == reflect.Ptr && v.IsNil() } // XXX_InternalExtensions is an internal representation of proto extensions. @@ -143,9 +159,6 @@ func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Loc return e.p.extensionMap, &e.p.mu } -var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() -var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem() - // ExtensionDesc represents an extension specification. // Used in generated code from the protocol compiler. type ExtensionDesc struct { @@ -179,8 +192,8 @@ type Extension struct { // SetRawExtension is for testing only. func SetRawExtension(base Message, id int32, b []byte) { - epb, ok := extendable(base) - if !ok { + epb, err := extendable(base) + if err != nil { return } extmap := epb.extensionsWrite() @@ -205,7 +218,7 @@ func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { pbi = ea.extendableProtoV1 } if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) + return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) } // Check the range. if !isExtensionField(pb, extension.Field) { @@ -250,85 +263,11 @@ func extensionProperties(ed *ExtensionDesc) *Properties { return prop } -// encode encodes any unmarshaled (unencoded) extensions in e. -func encodeExtensions(e *XXX_InternalExtensions) error { - m, mu := e.extensionsRead() - if m == nil { - return nil // fast path - } - mu.Lock() - defer mu.Unlock() - return encodeExtensionsMap(m) -} - -// encode encodes any unmarshaled (unencoded) extensions in e. -func encodeExtensionsMap(m map[int32]Extension) error { - for k, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - p := NewBuffer(nil) - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { - return err - } - e.enc = p.buf - m[k] = e - } - return nil -} - -func extensionsSize(e *XXX_InternalExtensions) (n int) { - m, mu := e.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - defer mu.Unlock() - return extensionsMapSize(m) -} - -func extensionsMapSize(m map[int32]Extension) (n int) { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - n += props.size(props, toStructPointer(x)) - } - return -} - // HasExtension returns whether the given extension is present in pb. func HasExtension(pb Message, extension *ExtensionDesc) bool { // TODO: Check types, field numbers, etc.? - epb, ok := extendable(pb) - if !ok { + epb, err := extendable(pb) + if err != nil { return false } extmap, mu := epb.extensionsRead() @@ -336,15 +275,15 @@ func HasExtension(pb Message, extension *ExtensionDesc) bool { return false } mu.Lock() - _, ok = extmap[extension.Field] + _, ok := extmap[extension.Field] mu.Unlock() return ok } // ClearExtension removes the given extension from pb. func ClearExtension(pb Message, extension *ExtensionDesc) { - epb, ok := extendable(pb) - if !ok { + epb, err := extendable(pb) + if err != nil { return } // TODO: Check types, field numbers, etc.? @@ -352,16 +291,26 @@ func ClearExtension(pb Message, extension *ExtensionDesc) { delete(extmap, extension.Field) } -// GetExtension parses and returns the given extension of pb. -// If the extension is not present and has no default value it returns ErrMissingExtension. +// GetExtension retrieves a proto2 extended field from pb. +// +// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), +// then GetExtension parses the encoded field and returns a Go value of the specified type. +// If the field is not present, then the default value is returned (if one is specified), +// otherwise ErrMissingExtension is reported. +// +// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes of the field extension. func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - epb, ok := extendable(pb) - if !ok { - return nil, errors.New("proto: not an extendable proto") + epb, err := extendable(pb) + if err != nil { + return nil, err } - if err := checkExtensionTypes(epb, extension); err != nil { - return nil, err + if extension.ExtendedType != nil { + // can only check type if this is a complete descriptor + if err := checkExtensionTypes(epb, extension); err != nil { + return nil, err + } } emap, mu := epb.extensionsRead() @@ -388,6 +337,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { return e.value, nil } + if extension.ExtensionType == nil { + // incomplete descriptor + return e.enc, nil + } + v, err := decodeExtension(e.enc, extension) if err != nil { return nil, err @@ -405,6 +359,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { // defaultExtensionValue returns the default value for extension. // If no default for an extension is defined ErrMissingExtension is returned. func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + if extension.ExtensionType == nil { + // incomplete descriptor, so no default + return nil, ErrMissingExtension + } + t := reflect.TypeOf(extension.ExtensionType) props := extensionProperties(extension) @@ -439,31 +398,28 @@ func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { // decodeExtension decodes an extension encoded in b. func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - o := NewBuffer(b) - t := reflect.TypeOf(extension.ExtensionType) - - props := extensionProperties(extension) + unmarshal := typeUnmarshaler(t, extension.Tag) // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate a "field" to store the pointer/slice itself; the - // pointer/slice will be stored here. We pass - // the address of this field to props.dec. - // This passes a zero field and a *t and lets props.dec - // interpret it as a *struct{ x t }. + // Allocate space to store the pointer/slice. value := reflect.New(t).Elem() + var err error for { - // Discard wire type and field number varint. It isn't needed. - if _, err := o.DecodeVarint(); err != nil { - return nil, err + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF } + b = b[n:] + wire := int(x) & 7 - if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { + b, err = unmarshal(b, valToPointer(value.Addr()), wire) + if err != nil { return nil, err } - if o.index >= len(o.buf) { + if len(b) == 0 { break } } @@ -473,9 +429,9 @@ func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { // GetExtensions returns a slice of the extensions present in pb that are also listed in es. // The returned slice has the same length as es; missing extensions will appear as nil elements. func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, ok := extendable(pb) - if !ok { - return nil, errors.New("proto: not an extendable proto") + epb, err := extendable(pb) + if err != nil { + return nil, err } extensions = make([]interface{}, len(es)) for i, e := range es { @@ -494,9 +450,9 @@ func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, e // For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing // just the Field field, which defines the extension's field number. func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, ok := extendable(pb) - if !ok { - return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) + epb, err := extendable(pb) + if err != nil { + return nil, err } registeredExtensions := RegisteredExtensions(pb) @@ -523,9 +479,9 @@ func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { // SetExtension sets the specified extension of pb to the specified value. func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - epb, ok := extendable(pb) - if !ok { - return errors.New("proto: not an extendable proto") + epb, err := extendable(pb) + if err != nil { + return err } if err := checkExtensionTypes(epb, extension); err != nil { return err @@ -550,8 +506,8 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error // ClearAllExtensions clears all extensions from pb. func ClearAllExtensions(pb Message) { - epb, ok := extendable(pb) - if !ok { + epb, err := extendable(pb) + if err != nil { return } m := epb.extensionsWrite() diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go index 1c225504a0..0e2191b8ad 100644 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ b/vendor/github.com/golang/protobuf/proto/lib.go @@ -265,6 +265,7 @@ package proto import ( "encoding/json" + "errors" "fmt" "log" "reflect" @@ -273,6 +274,8 @@ import ( "sync" ) +var errInvalidUTF8 = errors.New("proto: invalid UTF-8 string") + // Message is implemented by generated protocol buffer messages. type Message interface { Reset() @@ -309,16 +312,7 @@ type Buffer struct { buf []byte // encode/decode byte stream index int // read point - // pools of basic types to amortize allocation. - bools []bool - uint32s []uint32 - uint64s []uint64 - - // extra pools, only used with pointer_reflect.go - int32s []int32 - int64s []int64 - float32s []float32 - float64s []float64 + deterministic bool } // NewBuffer allocates a new Buffer and initializes its internal data to @@ -343,6 +337,30 @@ func (p *Buffer) SetBuf(s []byte) { // Bytes returns the contents of the Buffer. func (p *Buffer) Bytes() []byte { return p.buf } +// SetDeterministic sets whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (p *Buffer) SetDeterministic(deterministic bool) { + p.deterministic = deterministic +} + /* * Helper routines for simplifying the creation of optional fields of basic type. */ @@ -831,22 +849,12 @@ func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMes return sf, false, nil } +// mapKeys returns a sort.Interface to be used for sorting the map keys. // Map fields may have key types of non-float scalars, strings and enums. -// The easiest way to sort them in some deterministic order is to use fmt. -// If this turns out to be inefficient we can always consider other options, -// such as doing a Schwartzian transform. - func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{ - vs: vs, - // default Less function: textual comparison - less: func(a, b reflect.Value) bool { - return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) - }, - } + s := mapKeySorter{vs: vs} - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; - // numeric keys are sorted numerically. + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. if len(vs) == 0 { return s } @@ -855,6 +863,12 @@ func mapKeys(vs []reflect.Value) sort.Interface { s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } case reflect.Uint32, reflect.Uint64: s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + case reflect.Bool: + s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true + case reflect.String: + s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } + default: + panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) } return s @@ -895,3 +909,13 @@ const ProtoPackageIsVersion2 = true // ProtoPackageIsVersion1 is referenced from generated protocol buffer files // to assert that that code is compatible with this version of the proto package. const ProtoPackageIsVersion1 = true + +// InternalMessageInfo is a type used internally by generated .pb.go files. +// This type is not intended to be used by non-generated code. +// This type is not subject to any compatibility guarantee. +type InternalMessageInfo struct { + marshal *marshalInfo + unmarshal *unmarshalInfo + merge *mergeInfo + discard *discardInfo +} diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go index fd982decd6..3b6ca41d5e 100644 --- a/vendor/github.com/golang/protobuf/proto/message_set.go +++ b/vendor/github.com/golang/protobuf/proto/message_set.go @@ -42,6 +42,7 @@ import ( "fmt" "reflect" "sort" + "sync" ) // errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. @@ -94,10 +95,7 @@ func (ms *messageSet) find(pb Message) *_MessageSet_Item { } func (ms *messageSet) Has(pb Message) bool { - if ms.find(pb) != nil { - return true - } - return false + return ms.find(pb) != nil } func (ms *messageSet) Unmarshal(pb Message) error { @@ -150,46 +148,42 @@ func skipVarint(buf []byte) []byte { // MarshalMessageSet encodes the extension map represented by m in the message set wire format. // It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. func MarshalMessageSet(exts interface{}) ([]byte, error) { - var m map[int32]Extension + return marshalMessageSet(exts, false) +} + +// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal. +func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) { switch exts := exts.(type) { case *XXX_InternalExtensions: - if err := encodeExtensions(exts); err != nil { - return nil, err - } - m, _ = exts.extensionsRead() + var u marshalInfo + siz := u.sizeMessageSet(exts) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, exts, deterministic) + case map[int32]Extension: - if err := encodeExtensionsMap(exts); err != nil { - return nil, err + // This is an old-style extension map. + // Wrap it in a new-style XXX_InternalExtensions. + ie := XXX_InternalExtensions{ + p: &struct { + mu sync.Mutex + extensionMap map[int32]Extension + }{ + extensionMap: exts, + }, } - m = exts + + var u marshalInfo + siz := u.sizeMessageSet(&ie) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, &ie, deterministic) + default: return nil, errors.New("proto: not an extension map") } - - // Sort extension IDs to provide a deterministic encoding. - // See also enc_map in encode.go. - ids := make([]int, 0, len(m)) - for id := range m { - ids = append(ids, int(id)) - } - sort.Ints(ids) - - ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} - for _, id := range ids { - e := m[int32(id)] - // Remove the wire type and field number varint, as well as the length varint. - msg := skipVarint(skipVarint(e.enc)) - - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: Int32(int32(id)), - Message: msg, - }) - } - return Marshal(ms) } // UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. func UnmarshalMessageSet(buf []byte, exts interface{}) error { var m map[int32]Extension switch exts := exts.(type) { @@ -235,7 +229,15 @@ func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { var m map[int32]Extension switch exts := exts.(type) { case *XXX_InternalExtensions: - m, _ = exts.extensionsRead() + var mu sync.Locker + m, mu = exts.extensionsRead() + if m != nil { + // Keep the extensions map locked until we're done marshaling to prevent + // races between marshaling and unmarshaling the lazily-{en,de}coded + // values. + mu.Lock() + defer mu.Unlock() + } case map[int32]Extension: m = exts default: @@ -253,15 +255,16 @@ func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { for i, id := range ids { ext := m[id] - if i > 0 { - b.WriteByte(',') - } - msd, ok := messageSetMap[id] if !ok { // Unknown type; we can't render it, so skip it. continue } + + if i > 0 && b.Len() > 1 { + b.WriteByte(',') + } + fmt.Fprintf(&b, `"[%s]":`, msd.name) x := ext.value diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go index fb512e2e16..b6cad90834 100644 --- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go @@ -29,7 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build appengine js +// +build purego appengine js // This file contains an implementation of proto field accesses using package reflect. // It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can @@ -38,32 +38,13 @@ package proto import ( - "math" "reflect" + "sync" ) -// A structPointer is a pointer to a struct. -type structPointer struct { - v reflect.Value -} - -// toStructPointer returns a structPointer equivalent to the given reflect value. -// The reflect value must itself be a pointer to a struct. -func toStructPointer(v reflect.Value) structPointer { - return structPointer{v} -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p.v.IsNil() -} +const unsafeAllowed = false -// Interface returns the struct pointer as an interface value. -func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { - return p.v.Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. +// A field identifies a field in a struct, accessible from a pointer. // In this implementation, a field is identified by the sequence of field indices // passed to reflect's FieldByIndex. type field []int @@ -76,409 +57,301 @@ func toField(f *reflect.StructField) field { // invalidField is an invalid field identifier. var invalidField = field(nil) +// zeroField is a noop when calling pointer.offset. +var zeroField = field([]int{}) + // IsValid reports whether the field identifier is valid. func (f field) IsValid() bool { return f != nil } -// field returns the given field in the struct as a reflect value. -func structPointer_field(p structPointer, f field) reflect.Value { - // Special case: an extension map entry with a value of type T - // passes a *T to the struct-handling code with a zero field, - // expecting that it will be treated as equivalent to *struct{ X T }, - // which has the same memory layout. We have to handle that case - // specially, because reflect will panic if we call FieldByIndex on a - // non-struct. - if f == nil { - return p.v.Elem() - } - - return p.v.Elem().FieldByIndex(f) +// The pointer type is for the table-driven decoder. +// The implementation here uses a reflect.Value of pointer type to +// create a generic pointer. In pointer_unsafe.go we use unsafe +// instead of reflect to implement the same (but faster) interface. +type pointer struct { + v reflect.Value } -// ifield returns the given field in the struct as an interface value. -func structPointer_ifield(p structPointer, f field) interface{} { - return structPointer_field(p, f).Addr().Interface() +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + return pointer{v: reflect.ValueOf(*i)} } -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return structPointer_ifield(p, f).(*[]byte) +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + v := reflect.ValueOf(*i) + u := reflect.New(v.Type()) + u.Elem().Set(v) + return pointer{v: u} } -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return structPointer_ifield(p, f).(*[][]byte) +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{v: v} } -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return structPointer_ifield(p, f).(**bool) +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} } -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return structPointer_ifield(p, f).(*bool) +func (p pointer) isNil() bool { + return p.v.IsNil() } -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return structPointer_ifield(p, f).(*[]bool) +// grow updates the slice s in place to make it one element longer. +// s must be addressable. +// Returns the (addressable) new element. +func grow(s reflect.Value) reflect.Value { + n, m := s.Len(), s.Cap() + if n < m { + s.SetLen(n + 1) + } else { + s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) + } + return s.Index(n) } -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return structPointer_ifield(p, f).(**string) +func (p pointer) toInt64() *int64 { + return p.v.Interface().(*int64) } - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return structPointer_ifield(p, f).(*string) +func (p pointer) toInt64Ptr() **int64 { + return p.v.Interface().(**int64) } - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return structPointer_ifield(p, f).(*[]string) +func (p pointer) toInt64Slice() *[]int64 { + return p.v.Interface().(*[]int64) } -// Extensions returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return structPointer_ifield(p, f).(*XXX_InternalExtensions) -} +var int32ptr = reflect.TypeOf((*int32)(nil)) -// ExtMap returns the address of an extension map field in the struct. -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return structPointer_ifield(p, f).(*map[int32]Extension) +func (p pointer) toInt32() *int32 { + return p.v.Convert(int32ptr).Interface().(*int32) } -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return structPointer_field(p, f).Addr() +// The toInt32Ptr/Slice methods don't work because of enums. +// Instead, we must use set/get methods for the int32ptr/slice case. +/* + func (p pointer) toInt32Ptr() **int32 { + return p.v.Interface().(**int32) } - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - structPointer_field(p, f).Set(q.v) + func (p pointer) toInt32Slice() *[]int32 { + return p.v.Interface().(*[]int32) } - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return structPointer{structPointer_field(p, f)} +*/ +func (p pointer) getInt32Ptr() *int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().(*int32) + } + // an enum + return p.v.Elem().Convert(int32PtrType).Interface().(*int32) +} +func (p pointer) setInt32Ptr(v int32) { + // Allocate value in a *int32. Possibly convert that to a *enum. + // Then assign it to a **int32 or **enum. + // Note: we can convert *int32 to *enum, but we can't convert + // **int32 to **enum! + p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) +} + +// getInt32Slice copies []int32 from p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getInt32Slice() []int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().([]int32) + } + // an enum + // Allocate a []int32, then assign []enum's values into it. + // Note: we can't convert []enum to []int32. + slice := p.v.Elem() + s := make([]int32, slice.Len()) + for i := 0; i < slice.Len(); i++ { + s[i] = int32(slice.Index(i).Int()) + } + return s } -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { - return structPointerSlice{structPointer_field(p, f)} +// setInt32Slice copies []int32 into p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setInt32Slice(v []int32) { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + p.v.Elem().Set(reflect.ValueOf(v)) + return + } + // an enum + // Allocate a []enum, then assign []int32's values into it. + // Note: we can't convert []enum to []int32. + slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) + for i, x := range v { + slice.Index(i).SetInt(int64(x)) + } + p.v.Elem().Set(slice) } - -// A structPointerSlice represents the address of a slice of pointers to structs -// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. -type structPointerSlice struct { - v reflect.Value +func (p pointer) appendInt32Slice(v int32) { + grow(p.v.Elem()).SetInt(int64(v)) } -func (p structPointerSlice) Len() int { return p.v.Len() } -func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } -func (p structPointerSlice) Append(q structPointer) { - p.v.Set(reflect.Append(p.v, q.v)) +func (p pointer) toUint64() *uint64 { + return p.v.Interface().(*uint64) } - -var ( - int32Type = reflect.TypeOf(int32(0)) - uint32Type = reflect.TypeOf(uint32(0)) - float32Type = reflect.TypeOf(float32(0)) - int64Type = reflect.TypeOf(int64(0)) - uint64Type = reflect.TypeOf(uint64(0)) - float64Type = reflect.TypeOf(float64(0)) -) - -// A word32 represents a field of type *int32, *uint32, *float32, or *enum. -// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. -type word32 struct { - v reflect.Value +func (p pointer) toUint64Ptr() **uint64 { + return p.v.Interface().(**uint64) } - -// IsNil reports whether p is nil. -func word32_IsNil(p word32) bool { - return p.v.IsNil() +func (p pointer) toUint64Slice() *[]uint64 { + return p.v.Interface().(*[]uint64) } - -// Set sets p to point at a newly allocated word with bits set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - t := p.v.Type().Elem() - switch t { - case int32Type: - if len(o.int32s) == 0 { - o.int32s = make([]int32, uint32PoolSize) - } - o.int32s[0] = int32(x) - p.v.Set(reflect.ValueOf(&o.int32s[0])) - o.int32s = o.int32s[1:] - return - case uint32Type: - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - p.v.Set(reflect.ValueOf(&o.uint32s[0])) - o.uint32s = o.uint32s[1:] - return - case float32Type: - if len(o.float32s) == 0 { - o.float32s = make([]float32, uint32PoolSize) - } - o.float32s[0] = math.Float32frombits(x) - p.v.Set(reflect.ValueOf(&o.float32s[0])) - o.float32s = o.float32s[1:] - return - } - - // must be enum - p.v.Set(reflect.New(t)) - p.v.Elem().SetInt(int64(int32(x))) +func (p pointer) toUint32() *uint32 { + return p.v.Interface().(*uint32) } - -// Get gets the bits pointed at by p, as a uint32. -func word32_Get(p word32) uint32 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") +func (p pointer) toUint32Ptr() **uint32 { + return p.v.Interface().(**uint32) } - -// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32{structPointer_field(p, f)} +func (p pointer) toUint32Slice() *[]uint32 { + return p.v.Interface().(*[]uint32) } - -// A word32Val represents a field of type int32, uint32, float32, or enum. -// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. -type word32Val struct { - v reflect.Value +func (p pointer) toBool() *bool { + return p.v.Interface().(*bool) } - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - switch p.v.Type() { - case int32Type: - p.v.SetInt(int64(x)) - return - case uint32Type: - p.v.SetUint(uint64(x)) - return - case float32Type: - p.v.SetFloat(float64(math.Float32frombits(x))) - return - } - - // must be enum - p.v.SetInt(int64(int32(x))) +func (p pointer) toBoolPtr() **bool { + return p.v.Interface().(**bool) } - -// Get gets the bits pointed at by p, as a uint32. -func word32Val_Get(p word32Val) uint32 { - elem := p.v - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") +func (p pointer) toBoolSlice() *[]bool { + return p.v.Interface().(*[]bool) } - -// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val{structPointer_field(p, f)} +func (p pointer) toFloat64() *float64 { + return p.v.Interface().(*float64) } - -// A word32Slice is a slice of 32-bit values. -// That is, v.Type() is []int32, []uint32, []float32, or []enum. -type word32Slice struct { - v reflect.Value +func (p pointer) toFloat64Ptr() **float64 { + return p.v.Interface().(**float64) } - -func (p word32Slice) Append(x uint32) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int32: - elem.SetInt(int64(int32(x))) - case reflect.Uint32: - elem.SetUint(uint64(x)) - case reflect.Float32: - elem.SetFloat(float64(math.Float32frombits(x))) - } +func (p pointer) toFloat64Slice() *[]float64 { + return p.v.Interface().(*[]float64) } - -func (p word32Slice) Len() int { - return p.v.Len() +func (p pointer) toFloat32() *float32 { + return p.v.Interface().(*float32) } - -func (p word32Slice) Index(i int) uint32 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") +func (p pointer) toFloat32Ptr() **float32 { + return p.v.Interface().(**float32) } - -// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) word32Slice { - return word32Slice{structPointer_field(p, f)} +func (p pointer) toFloat32Slice() *[]float32 { + return p.v.Interface().(*[]float32) } - -// word64 is like word32 but for 64-bit values. -type word64 struct { - v reflect.Value +func (p pointer) toString() *string { + return p.v.Interface().(*string) } - -func word64_Set(p word64, o *Buffer, x uint64) { - t := p.v.Type().Elem() - switch t { - case int64Type: - if len(o.int64s) == 0 { - o.int64s = make([]int64, uint64PoolSize) - } - o.int64s[0] = int64(x) - p.v.Set(reflect.ValueOf(&o.int64s[0])) - o.int64s = o.int64s[1:] - return - case uint64Type: - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - p.v.Set(reflect.ValueOf(&o.uint64s[0])) - o.uint64s = o.uint64s[1:] - return - case float64Type: - if len(o.float64s) == 0 { - o.float64s = make([]float64, uint64PoolSize) - } - o.float64s[0] = math.Float64frombits(x) - p.v.Set(reflect.ValueOf(&o.float64s[0])) - o.float64s = o.float64s[1:] - return - } - panic("unreachable") +func (p pointer) toStringPtr() **string { + return p.v.Interface().(**string) } - -func word64_IsNil(p word64) bool { - return p.v.IsNil() +func (p pointer) toStringSlice() *[]string { + return p.v.Interface().(*[]string) } - -func word64_Get(p word64) uint64 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) - } - panic("unreachable") +func (p pointer) toBytes() *[]byte { + return p.v.Interface().(*[]byte) } - -func structPointer_Word64(p structPointer, f field) word64 { - return word64{structPointer_field(p, f)} +func (p pointer) toBytesSlice() *[][]byte { + return p.v.Interface().(*[][]byte) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return p.v.Interface().(*XXX_InternalExtensions) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return p.v.Interface().(*map[int32]Extension) +} +func (p pointer) getPointer() pointer { + return pointer{v: p.v.Elem()} +} +func (p pointer) setPointer(q pointer) { + p.v.Elem().Set(q.v) +} +func (p pointer) appendPointer(q pointer) { + grow(p.v.Elem()).Set(q.v) } -// word64Val is like word32Val but for 64-bit values. -type word64Val struct { - v reflect.Value +// getPointerSlice copies []*T from p as a new []pointer. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getPointerSlice() []pointer { + if p.v.IsNil() { + return nil + } + n := p.v.Elem().Len() + s := make([]pointer, n) + for i := 0; i < n; i++ { + s[i] = pointer{v: p.v.Elem().Index(i)} + } + return s } -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - switch p.v.Type() { - case int64Type: - p.v.SetInt(int64(x)) - return - case uint64Type: - p.v.SetUint(x) - return - case float64Type: - p.v.SetFloat(math.Float64frombits(x)) +// setPointerSlice copies []pointer into p as a new []*T. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setPointerSlice(v []pointer) { + if v == nil { + p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) return } - panic("unreachable") + s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) + for _, p := range v { + s = reflect.Append(s, p.v) + } + p.v.Elem().Set(s) } -func word64Val_Get(p word64Val) uint64 { - elem := p.v - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + if p.v.Elem().IsNil() { + return pointer{v: p.v.Elem()} } - panic("unreachable") + return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct } -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val{structPointer_field(p, f)} +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + // TODO: check that p.v.Type().Elem() == t? + return p.v } -type word64Slice struct { - v reflect.Value +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p } - -func (p word64Slice) Append(x uint64) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int64: - elem.SetInt(int64(int64(x))) - case reflect.Uint64: - elem.SetUint(uint64(x)) - case reflect.Float64: - elem.SetFloat(float64(math.Float64frombits(x))) - } +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v } - -func (p word64Slice) Len() int { - return p.v.Len() +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p } - -func (p word64Slice) Index(i int) uint64 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return uint64(elem.Uint()) - case reflect.Float64: - return math.Float64bits(float64(elem.Float())) - } - panic("unreachable") +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v } - -func structPointer_Word64Slice(p structPointer, f field) word64Slice { - return word64Slice{structPointer_field(p, f)} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v } +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} + +var atomicLock sync.Mutex diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go index 6b5567d47c..d55a335d94 100644 --- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -29,7 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build !appengine,!js +// +build !purego,!appengine,!js // This file contains the implementation of the proto field accesses using package unsafe. @@ -37,38 +37,13 @@ package proto import ( "reflect" + "sync/atomic" "unsafe" ) -// NOTE: These type_Foo functions would more idiomatically be methods, -// but Go does not allow methods on pointer types, and we must preserve -// some pointer type for the garbage collector. We use these -// funcs with clunky names as our poor approximation to methods. -// -// An alternative would be -// type structPointer struct { p unsafe.Pointer } -// but that does not registerize as well. - -// A structPointer is a pointer to a struct. -type structPointer unsafe.Pointer - -// toStructPointer returns a structPointer equivalent to the given reflect value. -func toStructPointer(v reflect.Value) structPointer { - return structPointer(unsafe.Pointer(v.Pointer())) -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p == nil -} - -// Interface returns the struct pointer, assumed to have element type t, -// as an interface value. -func structPointer_Interface(p structPointer, t reflect.Type) interface{} { - return reflect.NewAt(t, unsafe.Pointer(p)).Interface() -} +const unsafeAllowed = true -// A field identifies a field in a struct, accessible from a structPointer. +// A field identifies a field in a struct, accessible from a pointer. // In this implementation, a field is identified by its byte offset from the start of the struct. type field uintptr @@ -80,191 +55,254 @@ func toField(f *reflect.StructField) field { // invalidField is an invalid field identifier. const invalidField = ^field(0) +// zeroField is a noop when calling pointer.offset. +const zeroField = field(0) + // IsValid reports whether the field identifier is valid. func (f field) IsValid() bool { - return f != ^field(0) + return f != invalidField } -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// The pointer type below is for the new table-driven encoder/decoder. +// The implementation here uses unsafe.Pointer to create a generic pointer. +// In pointer_reflect.go we use reflect instead of unsafe to implement +// the same (but slower) interface. +type pointer struct { + p unsafe.Pointer } -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} +// size of pointer +var ptrSize = unsafe.Sizeof(uintptr(0)) -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + // Super-tricky - read pointer out of data word of interface value. + // Saves ~25ns over the equivalent: + // return valToPointer(reflect.ValueOf(*i)) + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} } -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + // Super-tricky - read or get the address of data word of interface value. + if isptr { + // The interface is of pointer type, thus it is a direct interface. + // The data word is the pointer data itself. We take its address. + return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + } + // The interface is not of pointer type. The data word is the pointer + // to the data. + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} } -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{p: unsafe.Pointer(v.Pointer())} } -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + // For safety, we should panic if !f.IsValid, however calling panic causes + // this to no longer be inlineable, which is a serious performance cost. + /* + if !f.IsValid() { + panic("invalid field") + } + */ + return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} } -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) isNil() bool { + return p.p == nil } -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toInt64() *int64 { + return (*int64)(p.p) } - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toInt64Ptr() **int64 { + return (**int64)(p.p) } - -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toInt64Slice() *[]int64 { + return (*[]int64)(p.p) } - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) +func (p pointer) toInt32() *int32 { + return (*int32)(p.p) } -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q +// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. +/* + func (p pointer) toInt32Ptr() **int32 { + return (**int32)(p.p) + } + func (p pointer) toInt32Slice() *[]int32 { + return (*[]int32)(p.p) + } +*/ +func (p pointer) getInt32Ptr() *int32 { + return *(**int32)(p.p) } - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) setInt32Ptr(v int32) { + *(**int32)(p.p) = &v } -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { - return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// getInt32Slice loads a []int32 from p. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getInt32Slice() []int32 { + return *(*[]int32)(p.p) } -// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). -type structPointerSlice []structPointer - -func (v *structPointerSlice) Len() int { return len(*v) } -func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } -func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } - -// A word32 is the address of a "pointer to 32-bit value" field. -type word32 **uint32 - -// IsNil reports whether *v is nil. -func word32_IsNil(p word32) bool { - return *p == nil +// setInt32Slice stores a []int32 to p. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setInt32Slice(v []int32) { + *(*[]int32)(p.p) = v } -// Set sets *v to point at a newly allocated word set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - *p = &o.uint32s[0] - o.uint32s = o.uint32s[1:] +// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? +func (p pointer) appendInt32Slice(v int32) { + s := (*[]int32)(p.p) + *s = append(*s, v) } -// Get gets the value pointed at by *v. -func word32_Get(p word32) uint32 { - return **p +func (p pointer) toUint64() *uint64 { + return (*uint64)(p.p) } - -// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +func (p pointer) toUint64Ptr() **uint64 { + return (**uint64)(p.p) } - -// A word32Val is the address of a 32-bit value field. -type word32Val *uint32 - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - *p = x +func (p pointer) toUint64Slice() *[]uint64 { + return (*[]uint64)(p.p) } - -// Get gets the value pointed at by p. -func word32Val_Get(p word32Val) uint32 { - return *p +func (p pointer) toUint32() *uint32 { + return (*uint32)(p.p) } - -// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +func (p pointer) toUint32Ptr() **uint32 { + return (**uint32)(p.p) } - -// A word32Slice is a slice of 32-bit values. -type word32Slice []uint32 - -func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } -func (v *word32Slice) Len() int { return len(*v) } -func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } - -// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) *word32Slice { - return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toUint32Slice() *[]uint32 { + return (*[]uint32)(p.p) } - -// word64 is like word32 but for 64-bit values. -type word64 **uint64 - -func word64_Set(p word64, o *Buffer, x uint64) { - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - *p = &o.uint64s[0] - o.uint64s = o.uint64s[1:] +func (p pointer) toBool() *bool { + return (*bool)(p.p) } - -func word64_IsNil(p word64) bool { - return *p == nil +func (p pointer) toBoolPtr() **bool { + return (**bool)(p.p) } - -func word64_Get(p word64) uint64 { - return **p +func (p pointer) toBoolSlice() *[]bool { + return (*[]bool)(p.p) +} +func (p pointer) toFloat64() *float64 { + return (*float64)(p.p) +} +func (p pointer) toFloat64Ptr() **float64 { + return (**float64)(p.p) +} +func (p pointer) toFloat64Slice() *[]float64 { + return (*[]float64)(p.p) +} +func (p pointer) toFloat32() *float32 { + return (*float32)(p.p) +} +func (p pointer) toFloat32Ptr() **float32 { + return (**float32)(p.p) +} +func (p pointer) toFloat32Slice() *[]float32 { + return (*[]float32)(p.p) +} +func (p pointer) toString() *string { + return (*string)(p.p) +} +func (p pointer) toStringPtr() **string { + return (**string)(p.p) +} +func (p pointer) toStringSlice() *[]string { + return (*[]string)(p.p) +} +func (p pointer) toBytes() *[]byte { + return (*[]byte)(p.p) +} +func (p pointer) toBytesSlice() *[][]byte { + return (*[][]byte)(p.p) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(p.p) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return (*map[int32]Extension)(p.p) } -func structPointer_Word64(p structPointer, f field) word64 { - return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +// getPointerSlice loads []*T from p as a []pointer. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getPointerSlice() []pointer { + // Super-tricky - p should point to a []*T where T is a + // message type. We load it as []pointer. + return *(*[]pointer)(p.p) } -// word64Val is like word32Val but for 64-bit values. -type word64Val *uint64 +// setPointerSlice stores []pointer into p as a []*T. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setPointerSlice(v []pointer) { + // Super-tricky - p should point to a []*T where T is a + // message type. We store it as []pointer. + *(*[]pointer)(p.p) = v +} -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - *p = x +// getPointer loads the pointer at p and returns it. +func (p pointer) getPointer() pointer { + return pointer{p: *(*unsafe.Pointer)(p.p)} } -func word64Val_Get(p word64Val) uint64 { - return *p +// setPointer stores the pointer q at p. +func (p pointer) setPointer(q pointer) { + *(*unsafe.Pointer)(p.p) = q.p } -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +// append q to the slice pointed to by p. +func (p pointer) appendPointer(q pointer) { + s := (*[]unsafe.Pointer)(p.p) + *s = append(*s, q.p) } -// word64Slice is like word32Slice but for 64-bit values. -type word64Slice []uint64 +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + // Super-tricky - read pointer out of data word of interface value. + return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} +} -func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } -func (v *word64Slice) Len() int { return len(*v) } -func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } +// asPointerTo returns a reflect.Value that is a pointer to an +// object of type t stored at p. +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + return reflect.NewAt(t, p.p) +} -func structPointer_Word64Slice(p structPointer, f field) *word64Slice { - return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) } diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go index ec2289c005..f710adab09 100644 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -58,42 +58,6 @@ const ( WireFixed32 = 5 ) -const startSize = 10 // initial slice/string sizes - -// Encoders are defined in encode.go -// An encoder outputs the full representation of a field, including its -// tag and encoder type. -type encoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueEncoder encodes a single integer in a particular encoding. -type valueEncoder func(o *Buffer, x uint64) error - -// Sizers are defined in encode.go -// A sizer returns the encoded size of a field, including its tag and encoder -// type. -type sizer func(prop *Properties, base structPointer) int - -// A valueSizer returns the encoded size of a single integer in a particular -// encoding. -type valueSizer func(x uint64) int - -// Decoders are defined in decode.go -// A decoder creates a value from its wire representation. -// Unrecognized subelements are saved in unrec. -type decoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueDecoder decodes a single integer in a particular encoding. -type valueDecoder func(o *Buffer) (x uint64, err error) - -// A oneofMarshaler does the marshaling for all oneof fields in a message. -type oneofMarshaler func(Message, *Buffer) error - -// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. -type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) - -// A oneofSizer does the sizing for all oneof fields in a message. -type oneofSizer func(Message) int - // tagMap is an optimization over map[int]int for typical protocol buffer // use-cases. Encoded protocol buffers are often in tag order with small tag // numbers. @@ -140,13 +104,6 @@ type StructProperties struct { decoderTags tagMap // map from proto tag to struct field number decoderOrigNames map[string]int // map from original name to struct field number order []int // list of struct field numbers in tag order - unrecField field // field id of the XXX_unrecognized []byte field - extendable bool // is this an extendable proto - - oneofMarshaler oneofMarshaler - oneofUnmarshaler oneofUnmarshaler - oneofSizer oneofSizer - stype reflect.Type // OneofTypes contains information about the oneof fields in this message. // It is keyed by the original name of a field. @@ -187,36 +144,19 @@ type Properties struct { Default string // default value HasDefault bool // whether an explicit default was provided - def_uint64 uint64 - - enc encoder - valEnc valueEncoder // set for bool and numeric types only - field field - tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) - tagbuf [8]byte - stype reflect.Type // set for struct types only - sprop *StructProperties // set for struct types only - isMarshaler bool - isUnmarshaler bool + + stype reflect.Type // set for struct types only + sprop *StructProperties // set for struct types only mtype reflect.Type // set for map types only mkeyprop *Properties // set for map types only mvalprop *Properties // set for map types only - - size sizer - valSize valueSizer // set for bool and numeric types only - - dec decoder - valDec valueDecoder // set for bool and numeric types only - - // If this is a packable field, this will be the decoder for the packed version of the field. - packedDec decoder } // String formats the properties in the protobuf struct field tag style. func (p *Properties) String() string { s := p.Wire - s = "," + s += "," s += strconv.Itoa(p.Tag) if p.Required { s += ",req" @@ -262,29 +202,14 @@ func (p *Properties) Parse(s string) { switch p.Wire { case "varint": p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeVarint - p.valDec = (*Buffer).DecodeVarint - p.valSize = sizeVarint case "fixed32": p.WireType = WireFixed32 - p.valEnc = (*Buffer).EncodeFixed32 - p.valDec = (*Buffer).DecodeFixed32 - p.valSize = sizeFixed32 case "fixed64": p.WireType = WireFixed64 - p.valEnc = (*Buffer).EncodeFixed64 - p.valDec = (*Buffer).DecodeFixed64 - p.valSize = sizeFixed64 case "zigzag32": p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag32 - p.valDec = (*Buffer).DecodeZigzag32 - p.valSize = sizeZigzag32 case "zigzag64": p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag64 - p.valDec = (*Buffer).DecodeZigzag64 - p.valSize = sizeZigzag64 case "bytes", "group": p.WireType = WireBytes // no numeric converter for non-numeric types @@ -299,6 +224,7 @@ func (p *Properties) Parse(s string) { return } +outer: for i := 2; i < len(fields); i++ { f := fields[i] switch { @@ -326,229 +252,28 @@ func (p *Properties) Parse(s string) { if i+1 < len(fields) { // Commas aren't escaped, and def is always last. p.Default += "," + strings.Join(fields[i+1:], ",") - break + break outer } } } } -func logNoSliceEnc(t1, t2 reflect.Type) { - fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) -} - var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() -// Initialize the fields for encoding and decoding. -func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - p.enc = nil - p.dec = nil - p.size = nil - +// setFieldProps initializes the field properties for submessages and maps. +func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { switch t1 := typ; t1.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) - - // proto3 scalar types - - case reflect.Bool: - p.enc = (*Buffer).enc_proto3_bool - p.dec = (*Buffer).dec_proto3_bool - p.size = size_proto3_bool - case reflect.Int32: - p.enc = (*Buffer).enc_proto3_int32 - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_proto3_uint32 - p.dec = (*Buffer).dec_proto3_int32 // can reuse - p.size = size_proto3_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_proto3_int64 - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - case reflect.String: - p.enc = (*Buffer).enc_proto3_string - p.dec = (*Buffer).dec_proto3_string - p.size = size_proto3_string - case reflect.Ptr: - switch t2 := t1.Elem(); t2.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) - break - case reflect.Bool: - p.enc = (*Buffer).enc_bool - p.dec = (*Buffer).dec_bool - p.size = size_bool - case reflect.Int32: - p.enc = (*Buffer).enc_int32 - p.dec = (*Buffer).dec_int32 - p.size = size_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_uint32 - p.dec = (*Buffer).dec_int32 // can reuse - p.size = size_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_int64 - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_int32 - p.size = size_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_int64 // can just treat them as bits - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.String: - p.enc = (*Buffer).enc_string - p.dec = (*Buffer).dec_string - p.size = size_string - case reflect.Struct: + if t1.Elem().Kind() == reflect.Struct { p.stype = t1.Elem() - p.isMarshaler = isMarshaler(t1) - p.isUnmarshaler = isUnmarshaler(t1) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_struct_message - p.dec = (*Buffer).dec_struct_message - p.size = size_struct_message - } else { - p.enc = (*Buffer).enc_struct_group - p.dec = (*Buffer).dec_struct_group - p.size = size_struct_group - } } case reflect.Slice: - switch t2 := t1.Elem(); t2.Kind() { - default: - logNoSliceEnc(t1, t2) - break - case reflect.Bool: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_bool - p.size = size_slice_packed_bool - } else { - p.enc = (*Buffer).enc_slice_bool - p.size = size_slice_bool - } - p.dec = (*Buffer).dec_slice_bool - p.packedDec = (*Buffer).dec_slice_packed_bool - case reflect.Int32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int32 - p.size = size_slice_packed_int32 - } else { - p.enc = (*Buffer).enc_slice_int32 - p.size = size_slice_int32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Uint32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Int64, reflect.Uint64: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - case reflect.Uint8: - p.dec = (*Buffer).dec_slice_byte - if p.proto3 { - p.enc = (*Buffer).enc_proto3_slice_byte - p.size = size_proto3_slice_byte - } else { - p.enc = (*Buffer).enc_slice_byte - p.size = size_slice_byte - } - case reflect.Float32, reflect.Float64: - switch t2.Bits() { - case 32: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case 64: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - default: - logNoSliceEnc(t1, t2) - break - } - case reflect.String: - p.enc = (*Buffer).enc_slice_string - p.dec = (*Buffer).dec_slice_string - p.size = size_slice_string - case reflect.Ptr: - switch t3 := t2.Elem(); t3.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) - break - case reflect.Struct: - p.stype = t2.Elem() - p.isMarshaler = isMarshaler(t2) - p.isUnmarshaler = isUnmarshaler(t2) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_slice_struct_message - p.dec = (*Buffer).dec_slice_struct_message - p.size = size_slice_struct_message - } else { - p.enc = (*Buffer).enc_slice_struct_group - p.dec = (*Buffer).dec_slice_struct_group - p.size = size_slice_struct_group - } - } - case reflect.Slice: - switch t2.Elem().Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) - break - case reflect.Uint8: - p.enc = (*Buffer).enc_slice_slice_byte - p.dec = (*Buffer).dec_slice_slice_byte - p.size = size_slice_slice_byte - } + if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct { + p.stype = t2.Elem() } case reflect.Map: - p.enc = (*Buffer).enc_new_map - p.dec = (*Buffer).dec_new_map - p.size = size_new_map - p.mtype = t1 p.mkeyprop = &Properties{} p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) @@ -562,20 +287,6 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) } - // precalculate tag code - wire := p.WireType - if p.Packed { - wire = WireBytes - } - x := uint32(p.Tag)<<3 | uint32(wire) - i := 0 - for i = 0; x > 127; i++ { - p.tagbuf[i] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - p.tagbuf[i] = uint8(x) - p.tagcode = p.tagbuf[0 : i+1] - if p.stype != nil { if lockGetProp { p.sprop = GetProperties(p.stype) @@ -586,32 +297,9 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock } var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() - unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() ) -// isMarshaler reports whether type t implements Marshaler. -func isMarshaler(t reflect.Type) bool { - // We're checking for (likely) pointer-receiver methods - // so if t is not a pointer, something is very wrong. - // The calls above only invoke isMarshaler on pointer types. - if t.Kind() != reflect.Ptr { - panic("proto: misuse of isMarshaler") - } - return t.Implements(marshalerType) -} - -// isUnmarshaler reports whether type t implements Unmarshaler. -func isUnmarshaler(t reflect.Type) bool { - // We're checking for (likely) pointer-receiver methods - // so if t is not a pointer, something is very wrong. - // The calls above only invoke isUnmarshaler on pointer types. - if t.Kind() != reflect.Ptr { - panic("proto: misuse of isUnmarshaler") - } - return t.Implements(unmarshalerType) -} - // Init populates the properties from a protocol buffer struct tag. func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { p.init(typ, name, tag, f, true) @@ -621,14 +309,11 @@ func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructF // "bytes,49,opt,def=hello!" p.Name = name p.OrigName = name - if f != nil { - p.field = toField(f) - } if tag == "" { return } p.Parse(tag) - p.setEncAndDec(typ, f, lockGetProp) + p.setFieldProps(typ, f, lockGetProp) } var ( @@ -678,9 +363,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { propertiesMap[t] = prop // build properties - prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) || - reflect.PtrTo(t).Implements(extendableProtoV1Type) - prop.unrecField = invalidField prop.Prop = make([]*Properties, t.NumField()) prop.order = make([]int, t.NumField()) @@ -690,17 +372,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { name := f.Name p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - if f.Name == "XXX_InternalExtensions" { // special case - p.enc = (*Buffer).enc_exts - p.dec = nil // not needed - p.size = size_exts - } else if f.Name == "XXX_extensions" { // special case - p.enc = (*Buffer).enc_map - p.dec = nil // not needed - p.size = size_map - } else if f.Name == "XXX_unrecognized" { // special case - prop.unrecField = toField(&f) - } oneof := f.Tag.Get("protobuf_oneof") // special case if oneof != "" { // Oneof fields don't use the traditional protobuf tag. @@ -715,9 +386,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { } print("\n") } - if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { - fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") - } } // Re-order prop.order. @@ -728,8 +396,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { } if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { var oots []interface{} - prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() - prop.stype = t + _, _, _, oots = om.XXX_OneofFuncs() // Interpret oneof metadata. prop.OneofTypes = make(map[string]*OneofProperties) @@ -779,30 +446,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { return prop } -// Return the Properties object for the x[0]'th field of the structure. -func propByIndex(t reflect.Type, x []int) *Properties { - if len(x) != 1 { - fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) - return nil - } - prop := GetProperties(t) - return prop.Prop[x[0]] -} - -// Get the address and type of a pointer to a struct from an interface. -func getbase(pb Message) (t reflect.Type, b structPointer, err error) { - if pb == nil { - err = ErrNil - return - } - // get the reflect type of the pointer to the struct. - t = reflect.TypeOf(pb) - // get the address of the struct. - value := reflect.ValueOf(pb) - b = toStructPointer(value) - return -} - // A global registry of enum types. // The generated code will register the generated maps by calling RegisterEnum. @@ -826,20 +469,42 @@ func EnumValueMap(enumType string) map[string]int32 { // A registry of all linked message types. // The string is a fully-qualified proto name ("pkg.Message"). var ( - protoTypes = make(map[string]reflect.Type) - revProtoTypes = make(map[reflect.Type]string) + protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers + protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types + revProtoTypes = make(map[reflect.Type]string) ) // RegisterType is called from generated code and maps from the fully qualified // proto name to the type (pointer to struct) of the protocol buffer. func RegisterType(x Message, name string) { - if _, ok := protoTypes[name]; ok { + if _, ok := protoTypedNils[name]; ok { // TODO: Some day, make this a panic. log.Printf("proto: duplicate proto type registered: %s", name) return } t := reflect.TypeOf(x) - protoTypes[name] = t + if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { + // Generated code always calls RegisterType with nil x. + // This check is just for extra safety. + protoTypedNils[name] = x + } else { + protoTypedNils[name] = reflect.Zero(t).Interface().(Message) + } + revProtoTypes[t] = name +} + +// RegisterMapType is called from generated code and maps from the fully qualified +// proto name to the native map type of the proto map definition. +func RegisterMapType(x interface{}, name string) { + if reflect.TypeOf(x).Kind() != reflect.Map { + panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) + } + if _, ok := protoMapTypes[name]; ok { + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoMapTypes[name] = t revProtoTypes[t] = name } @@ -855,7 +520,14 @@ func MessageName(x Message) string { } // MessageType returns the message type (pointer to struct) for a named message. -func MessageType(name string) reflect.Type { return protoTypes[name] } +// The type is not guaranteed to implement proto.Message if the name refers to a +// map entry. +func MessageType(name string) reflect.Type { + if t, ok := protoTypedNils[name]; ok { + return reflect.TypeOf(t) + } + return protoMapTypes[name] +} // A registry of all linked proto files. var ( diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go new file mode 100644 index 0000000000..0f212b3029 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go @@ -0,0 +1,2681 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// a sizer takes a pointer to a field and the size of its tag, computes the size of +// the encoded data. +type sizer func(pointer, int) int + +// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), +// marshals the field to the end of the slice, returns the slice and error (if any). +type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) + +// marshalInfo is the information used for marshaling a message. +type marshalInfo struct { + typ reflect.Type + fields []*marshalFieldInfo + unrecognized field // offset of XXX_unrecognized + extensions field // offset of XXX_InternalExtensions + v1extensions field // offset of XXX_extensions + sizecache field // offset of XXX_sizecache + initialized int32 // 0 -- only typ is set, 1 -- fully initialized + messageset bool // uses message set wire format + hasmarshaler bool // has custom marshaler + sync.RWMutex // protect extElems map, also for initialization + extElems map[int32]*marshalElemInfo // info of extension elements +} + +// marshalFieldInfo is the information used for marshaling a field of a message. +type marshalFieldInfo struct { + field field + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isPointer bool + required bool // field is required + name string // name of the field, for error reporting + oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements +} + +// marshalElemInfo is the information used for marshaling an extension or oneof element. +type marshalElemInfo struct { + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) +} + +var ( + marshalInfoMap = map[reflect.Type]*marshalInfo{} + marshalInfoLock sync.Mutex +) + +// getMarshalInfo returns the information to marshal a given type of message. +// The info it returns may not necessarily initialized. +// t is the type of the message (NOT the pointer to it). +func getMarshalInfo(t reflect.Type) *marshalInfo { + marshalInfoLock.Lock() + u, ok := marshalInfoMap[t] + if !ok { + u = &marshalInfo{typ: t} + marshalInfoMap[t] = u + } + marshalInfoLock.Unlock() + return u +} + +// Size is the entry point from generated code, +// and should be ONLY called by generated code. +// It computes the size of encoded data of msg. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Size(msg Message) int { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return 0 + } + return u.size(ptr) +} + +// Marshal is the entry point from generated code, +// and should be ONLY called by generated code. +// It marshals msg to the end of b. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return b, ErrNil + } + return u.marshal(b, ptr, deterministic) +} + +func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { + // u := a.marshal, but atomically. + // We use an atomic here to ensure memory consistency. + u := atomicLoadMarshalInfo(&a.marshal) + if u == nil { + // Get marshal information from type of message. + t := reflect.ValueOf(msg).Type() + if t.Kind() != reflect.Ptr { + panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) + } + u = getMarshalInfo(t.Elem()) + // Store it in the cache for later users. + // a.marshal = u, but atomically. + atomicStoreMarshalInfo(&a.marshal, u) + } + return u +} + +// size is the main function to compute the size of the encoded data of a message. +// ptr is the pointer to the message. +func (u *marshalInfo) size(ptr pointer) int { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b, _ := m.Marshal() + return len(b) + } + + n := 0 + for _, f := range u.fields { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + n += f.sizer(ptr.offset(f.field), f.tagsize) + } + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + n += u.sizeMessageSet(e) + } else { + n += u.sizeExtensions(e) + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + n += u.sizeV1Extensions(m) + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + n += len(s) + } + // cache the result for use in marshal + if u.sizecache.IsValid() { + atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) + } + return n +} + +// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), +// fall back to compute the size. +func (u *marshalInfo) cachedsize(ptr pointer) int { + if u.sizecache.IsValid() { + return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) + } + return u.size(ptr) +} + +// marshal is the main function to marshal a message. It takes a byte slice and appends +// the encoded data to the end of the slice, returns the slice and error (if any). +// ptr is the pointer to the message. +// If deterministic is true, map is marshaled in deterministic order. +func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b1, err := m.Marshal() + b = append(b, b1...) + return b, err + } + + var err, errreq error + // The old marshaler encodes extensions at beginning. + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + b, err = u.appendMessageSet(b, e, deterministic) + } else { + b, err = u.appendExtensions(b, e, deterministic) + } + if err != nil { + return b, err + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + b, err = u.appendV1Extensions(b, m, deterministic) + if err != nil { + return b, err + } + } + for _, f := range u.fields { + if f.required && errreq == nil { + if ptr.offset(f.field).getPointer().isNil() { + // Required field is not set. + // We record the error but keep going, to give a complete marshaling. + errreq = &RequiredNotSetError{f.name} + continue + } + } + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) + if err != nil { + if err1, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = &RequiredNotSetError{f.name + "." + err1.field} + } + continue + } + if err == errRepeatedHasNil { + err = errors.New("proto: repeated field " + f.name + " has nil element") + } + return b, err + } + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + b = append(b, s...) + } + return b, errreq +} + +// computeMarshalInfo initializes the marshal info. +func (u *marshalInfo) computeMarshalInfo() { + u.Lock() + defer u.Unlock() + if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock + return + } + + t := u.typ + u.unrecognized = invalidField + u.extensions = invalidField + u.v1extensions = invalidField + u.sizecache = invalidField + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if reflect.PtrTo(t).Implements(marshalerType) { + u.hasmarshaler = true + atomic.StoreInt32(&u.initialized, 1) + return + } + + // get oneof implementers + var oneofImplementers []interface{} + if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + } + + n := t.NumField() + + // deal with XXX fields first + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if !strings.HasPrefix(f.Name, "XXX_") { + continue + } + switch f.Name { + case "XXX_sizecache": + u.sizecache = toField(&f) + case "XXX_unrecognized": + u.unrecognized = toField(&f) + case "XXX_InternalExtensions": + u.extensions = toField(&f) + u.messageset = f.Tag.Get("protobuf_messageset") == "1" + case "XXX_extensions": + u.v1extensions = toField(&f) + case "XXX_NoUnkeyedLiteral": + // nothing to do + default: + panic("unknown XXX field: " + f.Name) + } + n-- + } + + // normal fields + fields := make([]marshalFieldInfo, n) // batch allocation + u.fields = make([]*marshalFieldInfo, 0, n) + for i, j := 0, 0; i < t.NumField(); i++ { + f := t.Field(i) + + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + field := &fields[j] + j++ + field.name = f.Name + u.fields = append(u.fields, field) + if f.Tag.Get("protobuf_oneof") != "" { + field.computeOneofFieldInfo(&f, oneofImplementers) + continue + } + if f.Tag.Get("protobuf") == "" { + // field has no tag (not in generated message), ignore it + u.fields = u.fields[:len(u.fields)-1] + j-- + continue + } + field.computeMarshalFieldInfo(&f) + } + + // fields are marshaled in tag order on the wire. + sort.Sort(byTag(u.fields)) + + atomic.StoreInt32(&u.initialized, 1) +} + +// helper for sorting fields by tag +type byTag []*marshalFieldInfo + +func (a byTag) Len() int { return len(a) } +func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } + +// getExtElemInfo returns the information to marshal an extension element. +// The info it returns is initialized. +func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { + // get from cache first + u.RLock() + e, ok := u.extElems[desc.Field] + u.RUnlock() + if ok { + return e + } + + t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct + tags := strings.Split(desc.Tag, ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizer, marshaler := typeMarshaler(t, tags, false, false) + e = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizer, + marshaler: marshaler, + isptr: t.Kind() == reflect.Ptr, + } + + // update cache + u.Lock() + if u.extElems == nil { + u.extElems = make(map[int32]*marshalElemInfo) + } + u.extElems[desc.Field] = e + u.Unlock() + return e +} + +// computeMarshalFieldInfo fills up the information to marshal a field. +func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { + // parse protobuf tag of the field. + // tag has format of "bytes,49,opt,name=foo,def=hello!" + tags := strings.Split(f.Tag.Get("protobuf"), ",") + if tags[0] == "" { + return + } + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + if tags[2] == "req" { + fi.required = true + } + fi.setTag(f, tag, wt) + fi.setMarshaler(f, tags) +} + +func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { + fi.field = toField(f) + fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.isPointer = true + fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) + fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) + + ityp := f.Type // interface type + for _, o := range oneofImplementers { + t := reflect.TypeOf(o) + if !t.Implements(ityp) { + continue + } + sf := t.Elem().Field(0) // oneof implementer is a struct with a single field + tags := strings.Split(sf.Tag.Get("protobuf"), ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value + fi.oneofElems[t.Elem()] = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizer, + marshaler: marshaler, + } + } +} + +type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) +} + +// wiretype returns the wire encoding of the type. +func wiretype(encoding string) uint64 { + switch encoding { + case "fixed32": + return WireFixed32 + case "fixed64": + return WireFixed64 + case "varint", "zigzag32", "zigzag64": + return WireVarint + case "bytes": + return WireBytes + case "group": + return WireStartGroup + } + panic("unknown wire type " + encoding) +} + +// setTag fills up the tag (in wire format) and its size in the info of a field. +func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { + fi.field = toField(f) + fi.wiretag = uint64(tag)<<3 | wt + fi.tagsize = SizeVarint(uint64(tag) << 3) +} + +// setMarshaler fills up the sizer and marshaler in the info of a field. +func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { + switch f.Type.Kind() { + case reflect.Map: + // map field + fi.isPointer = true + fi.sizer, fi.marshaler = makeMapMarshaler(f) + return + case reflect.Ptr, reflect.Slice: + fi.isPointer = true + } + fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) +} + +// typeMarshaler returns the sizer and marshaler of a given field. +// t is the type of the field. +// tags is the generated "protobuf" tag of the field. +// If nozero is true, zero value is not marshaled to the wire. +// If oneof is true, it is a oneof field. +func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { + encoding := tags[0] + + pointer := false + slice := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + packed := false + proto3 := false + for i := 2; i < len(tags); i++ { + if tags[i] == "packed" { + packed = true + } + if tags[i] == "proto3" { + proto3 = true + } + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return sizeBoolPtr, appendBoolPtr + } + if slice { + if packed { + return sizeBoolPackedSlice, appendBoolPackedSlice + } + return sizeBoolSlice, appendBoolSlice + } + if nozero { + return sizeBoolValueNoZero, appendBoolValueNoZero + } + return sizeBoolValue, appendBoolValue + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixed32Ptr, appendFixed32Ptr + } + if slice { + if packed { + return sizeFixed32PackedSlice, appendFixed32PackedSlice + } + return sizeFixed32Slice, appendFixed32Slice + } + if nozero { + return sizeFixed32ValueNoZero, appendFixed32ValueNoZero + } + return sizeFixed32Value, appendFixed32Value + case "varint": + if pointer { + return sizeVarint32Ptr, appendVarint32Ptr + } + if slice { + if packed { + return sizeVarint32PackedSlice, appendVarint32PackedSlice + } + return sizeVarint32Slice, appendVarint32Slice + } + if nozero { + return sizeVarint32ValueNoZero, appendVarint32ValueNoZero + } + return sizeVarint32Value, appendVarint32Value + } + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixedS32Ptr, appendFixedS32Ptr + } + if slice { + if packed { + return sizeFixedS32PackedSlice, appendFixedS32PackedSlice + } + return sizeFixedS32Slice, appendFixedS32Slice + } + if nozero { + return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero + } + return sizeFixedS32Value, appendFixedS32Value + case "varint": + if pointer { + return sizeVarintS32Ptr, appendVarintS32Ptr + } + if slice { + if packed { + return sizeVarintS32PackedSlice, appendVarintS32PackedSlice + } + return sizeVarintS32Slice, appendVarintS32Slice + } + if nozero { + return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero + } + return sizeVarintS32Value, appendVarintS32Value + case "zigzag32": + if pointer { + return sizeZigzag32Ptr, appendZigzag32Ptr + } + if slice { + if packed { + return sizeZigzag32PackedSlice, appendZigzag32PackedSlice + } + return sizeZigzag32Slice, appendZigzag32Slice + } + if nozero { + return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero + } + return sizeZigzag32Value, appendZigzag32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixed64Ptr, appendFixed64Ptr + } + if slice { + if packed { + return sizeFixed64PackedSlice, appendFixed64PackedSlice + } + return sizeFixed64Slice, appendFixed64Slice + } + if nozero { + return sizeFixed64ValueNoZero, appendFixed64ValueNoZero + } + return sizeFixed64Value, appendFixed64Value + case "varint": + if pointer { + return sizeVarint64Ptr, appendVarint64Ptr + } + if slice { + if packed { + return sizeVarint64PackedSlice, appendVarint64PackedSlice + } + return sizeVarint64Slice, appendVarint64Slice + } + if nozero { + return sizeVarint64ValueNoZero, appendVarint64ValueNoZero + } + return sizeVarint64Value, appendVarint64Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixedS64Ptr, appendFixedS64Ptr + } + if slice { + if packed { + return sizeFixedS64PackedSlice, appendFixedS64PackedSlice + } + return sizeFixedS64Slice, appendFixedS64Slice + } + if nozero { + return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero + } + return sizeFixedS64Value, appendFixedS64Value + case "varint": + if pointer { + return sizeVarintS64Ptr, appendVarintS64Ptr + } + if slice { + if packed { + return sizeVarintS64PackedSlice, appendVarintS64PackedSlice + } + return sizeVarintS64Slice, appendVarintS64Slice + } + if nozero { + return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero + } + return sizeVarintS64Value, appendVarintS64Value + case "zigzag64": + if pointer { + return sizeZigzag64Ptr, appendZigzag64Ptr + } + if slice { + if packed { + return sizeZigzag64PackedSlice, appendZigzag64PackedSlice + } + return sizeZigzag64Slice, appendZigzag64Slice + } + if nozero { + return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero + } + return sizeZigzag64Value, appendZigzag64Value + } + case reflect.Float32: + if pointer { + return sizeFloat32Ptr, appendFloat32Ptr + } + if slice { + if packed { + return sizeFloat32PackedSlice, appendFloat32PackedSlice + } + return sizeFloat32Slice, appendFloat32Slice + } + if nozero { + return sizeFloat32ValueNoZero, appendFloat32ValueNoZero + } + return sizeFloat32Value, appendFloat32Value + case reflect.Float64: + if pointer { + return sizeFloat64Ptr, appendFloat64Ptr + } + if slice { + if packed { + return sizeFloat64PackedSlice, appendFloat64PackedSlice + } + return sizeFloat64Slice, appendFloat64Slice + } + if nozero { + return sizeFloat64ValueNoZero, appendFloat64ValueNoZero + } + return sizeFloat64Value, appendFloat64Value + case reflect.String: + if pointer { + return sizeStringPtr, appendStringPtr + } + if slice { + return sizeStringSlice, appendStringSlice + } + if nozero { + return sizeStringValueNoZero, appendStringValueNoZero + } + return sizeStringValue, appendStringValue + case reflect.Slice: + if slice { + return sizeBytesSlice, appendBytesSlice + } + if oneof { + // Oneof bytes field may also have "proto3" tag. + // We want to marshal it as a oneof field. Do this + // check before the proto3 check. + return sizeBytesOneof, appendBytesOneof + } + if proto3 { + return sizeBytes3, appendBytes3 + } + return sizeBytes, appendBytes + case reflect.Struct: + switch encoding { + case "group": + if slice { + return makeGroupSliceMarshaler(getMarshalInfo(t)) + } + return makeGroupMarshaler(getMarshalInfo(t)) + case "bytes": + if slice { + return makeMessageSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageMarshaler(getMarshalInfo(t)) + } + } + panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) +} + +// Below are functions to size/marshal a specific type of a field. +// They are stored in the field's info, and called by function pointers. +// They have type sizer or marshaler. + +func sizeFixed32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixedS32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFloat32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + return (4 + tagsize) * len(s) +} +func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixed64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFixedS64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFloat64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + return (8 + tagsize) * len(s) +} +func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeVarint32Value(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarint32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarint64Value(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + return SizeVarint(v) + tagsize +} +func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return SizeVarint(v) + tagsize +} +func sizeVarint64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return SizeVarint(*p) + tagsize +} +func sizeVarint64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(v) + tagsize + } + return n +} +func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize + } + return n +} +func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize + } + return n +} +func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeBoolValue(_ pointer, tagsize int) int { + return 1 + tagsize +} +func sizeBoolValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toBool() + if !v { + return 0 + } + return 1 + tagsize +} +func sizeBoolPtr(ptr pointer, tagsize int) int { + p := *ptr.toBoolPtr() + if p == nil { + return 0 + } + return 1 + tagsize +} +func sizeBoolSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + return (1 + tagsize) * len(s) +} +func sizeBoolPackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return 0 + } + return len(s) + SizeVarint(uint64(len(s))) + tagsize +} +func sizeStringValue(ptr pointer, tagsize int) int { + v := *ptr.toString() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toString() + if v == "" { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringPtr(ptr pointer, tagsize int) int { + p := *ptr.toStringPtr() + if p == nil { + return 0 + } + v := *p + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringSlice(ptr pointer, tagsize int) int { + s := *ptr.toStringSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} +func sizeBytes(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if v == nil { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytes3(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if len(v) == 0 { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesOneof(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesSlice(ptr pointer, tagsize int) int { + s := *ptr.toBytesSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} + +// appendFixed32 appends an encoded fixed32 to b. +func appendFixed32(b []byte, v uint32) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24)) + return b +} + +// appendFixed64 appends an encoded fixed64 to b. +func appendFixed64(b []byte, v uint64) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24), + byte(v>>32), + byte(v>>40), + byte(v>>48), + byte(v>>56)) + return b +} + +// appendVarint appends an encoded varint to b. +func appendVarint(b []byte, v uint64) []byte { + // TODO: make 1-byte (maybe 2-byte) case inline-able, once we + // have non-leaf inliner. + switch { + case v < 1<<7: + b = append(b, byte(v)) + case v < 1<<14: + b = append(b, + byte(v&0x7f|0x80), + byte(v>>7)) + case v < 1<<21: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte(v>>14)) + case v < 1<<28: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte(v>>21)) + case v < 1<<35: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte(v>>28)) + case v < 1<<42: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte(v>>35)) + case v < 1<<49: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte(v>>42)) + case v < 1<<56: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte(v>>49)) + case v < 1<<63: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte(v>>56)) + default: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte((v>>56)&0x7f|0x80), + 1) + } + return b +} + +func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, *p) + return b, nil +} +func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(*p)) + return b, nil +} +func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(*p)) + return b, nil +} +func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, *p) + return b, nil +} +func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(*p)) + return b, nil +} +func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(*p)) + return b, nil +} +func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, *p) + return b, nil +} +func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + } + return b, nil +} +func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, v) + } + return b, nil +} +func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + if !v { + return b, nil + } + b = appendVarint(b, wiretag) + b = append(b, 1) + return b, nil +} + +func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toBoolPtr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + if *p { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(len(s))) + for _, v := range s { + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if v == "" { + return b, nil + } + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toStringSlice() + for _, v := range s { + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} +func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if v == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if len(v) == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBytesSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} + +// makeGroupMarshaler returns the sizer and marshaler for a group. +// u is the marshal info of the underlying message. +func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + return u.size(p) + 2*tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + var err error + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, p, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + return b, err + } +} + +// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. +// u is the marshal info of the underlying message. +func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + n += u.size(v) + 2*tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err, errreq error + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, v, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, errreq + } +} + +// makeMessageMarshaler returns the sizer and marshaler for a message field. +// u is the marshal info of the message. +func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.size(p) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(p) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, p, deterministic) + } +} + +// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. +// u is the marshal info of the message. +func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err, errreq error + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, errreq + } +} + +// makeMapMarshaler returns the sizer and marshaler for a map field. +// f is the pointer to the reflect data structure of the field. +func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { + // figure out key and value type + t := f.Type + keyType := t.Key() + valType := t.Elem() + keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map + valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map + keyWireTag := 1<<3 | wiretype(keyTags[0]) + valWireTag := 2<<3 | wiretype(valTags[0]) + + // We create an interface to get the addresses of the map key and value. + // If value is pointer-typed, the interface is a direct interface, the + // idata itself is the value. Otherwise, the idata is the pointer to the + // value. + // Key cannot be pointer-typed. + valIsPtr := valType.Kind() == reflect.Ptr + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(t).Elem() // the map + n := 0 + for _, k := range m.MapKeys() { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(t).Elem() // the map + var err error + keys := m.MapKeys() + if len(keys) > 1 && deterministic { + sort.Sort(mapKeys(keys)) + } + for _, k := range keys { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + b = appendVarint(b, tag) + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + b = appendVarint(b, uint64(siz)) + b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) + if err != nil { + return b, err + } + b, err = valMarshaler(b, vaddr, valWireTag, deterministic) + if err != nil && err != ErrNil { // allow nil value in map + return b, err + } + } + return b, nil + } +} + +// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. +// fi is the marshal info of the field. +// f is the pointer to the reflect data structure of the field. +func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { + // Oneof field is an interface. We need to get the actual data type on the fly. + t := f.Type + return func(ptr pointer, _ int) int { + p := ptr.getInterfacePointer() + if p.isNil() { + return 0 + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + e := fi.oneofElems[telem] + return e.sizer(p, e.tagsize) + }, + func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { + p := ptr.getInterfacePointer() + if p.isNil() { + return b, nil + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { + return b, errOneofHasNil + } + e := fi.oneofElems[telem] + return e.marshaler(b, p, e.wiretag, deterministic) + } +} + +// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. +func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + mu.Unlock() + return n +} + +// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. +func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if err != nil { + return b, err + } + } + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if err != nil { + return b, err + } + } + return b, nil +} + +// message set format is: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } + +// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field +// in message set format (above). +func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for id, e := range m { + n += 2 // start group, end group. tag = 1 (size=1) + n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + siz := len(msgWithLen) + n += siz + 1 // message, tag = 3 (size=1) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, 1) // message, tag = 3 (size=1) + } + mu.Unlock() + return n +} + +// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) +// to the end of byte slice b. +func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for id, e := range m { + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + if err != nil { + return b, err + } + b = append(b, 1<<3|WireEndGroup) + } + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, id := range keys { + e := m[int32(id)] + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + b = append(b, 1<<3|WireEndGroup) + if err != nil { + return b, err + } + } + return b, nil +} + +// sizeV1Extensions computes the size of encoded data for a V1-API extension field. +func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { + if m == nil { + return 0 + } + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + return n +} + +// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. +func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { + if m == nil { + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + var err error + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if err != nil { + return b, err + } + } + return b, nil +} + +// newMarshaler is the interface representing objects that can marshal themselves. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newMarshaler interface { + XXX_Size() int + XXX_Marshal(b []byte, deterministic bool) ([]byte, error) +} + +// Size returns the encoded size of a protocol buffer message. +// This is the main entry point. +func Size(pb Message) int { + if m, ok := pb.(newMarshaler); ok { + return m.XXX_Size() + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, _ := m.Marshal() + return len(b) + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return 0 + } + var info InternalMessageInfo + return info.Size(pb) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, returning the data. +// This is the main entry point. +func Marshal(pb Message) ([]byte, error) { + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + b := make([]byte, 0, siz) + return m.XXX_Marshal(b, false) + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + return m.Marshal() + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return nil, ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + b := make([]byte, 0, siz) + return info.Marshal(b, pb, false) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, writing the result to the +// Buffer. +// This is an alternative entry point. It is not necessary to use +// a Buffer for most applications. +func (p *Buffer) Marshal(pb Message) error { + var err error + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + p.grow(siz) // make sure buf has enough capacity + p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) + return err + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, err := m.Marshal() + p.buf = append(p.buf, b...) + return err + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + p.grow(siz) // make sure buf has enough capacity + p.buf, err = info.Marshal(p.buf, pb, p.deterministic) + return err +} + +// grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After grow(n), at least n bytes can be written to the +// buffer without another allocation. +func (p *Buffer) grow(n int) { + need := len(p.buf) + n + if need <= cap(p.buf) { + return + } + newCap := len(p.buf) * 2 + if newCap < need { + newCap = need + } + p.buf = append(make([]byte, 0, newCap), p.buf...) +} diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go new file mode 100644 index 0000000000..5525def6a5 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/table_merge.go @@ -0,0 +1,654 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +// Merge merges the src message into dst. +// This assumes that dst and src of the same type and are non-nil. +func (a *InternalMessageInfo) Merge(dst, src Message) { + mi := atomicLoadMergeInfo(&a.merge) + if mi == nil { + mi = getMergeInfo(reflect.TypeOf(dst).Elem()) + atomicStoreMergeInfo(&a.merge, mi) + } + mi.merge(toPointer(&dst), toPointer(&src)) +} + +type mergeInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []mergeFieldInfo + unrecognized field // Offset of XXX_unrecognized +} + +type mergeFieldInfo struct { + field field // Offset of field, guaranteed to be valid + + // isPointer reports whether the value in the field is a pointer. + // This is true for the following situations: + // * Pointer to struct + // * Pointer to basic type (proto2 only) + // * Slice (first value in slice header is a pointer) + // * String (first value in string header is a pointer) + isPointer bool + + // basicWidth reports the width of the field assuming that it is directly + // embedded in the struct (as is the case for basic types in proto3). + // The possible values are: + // 0: invalid + // 1: bool + // 4: int32, uint32, float32 + // 8: int64, uint64, float64 + basicWidth int + + // Where dst and src are pointers to the types being merged. + merge func(dst, src pointer) +} + +var ( + mergeInfoMap = map[reflect.Type]*mergeInfo{} + mergeInfoLock sync.Mutex +) + +func getMergeInfo(t reflect.Type) *mergeInfo { + mergeInfoLock.Lock() + defer mergeInfoLock.Unlock() + mi := mergeInfoMap[t] + if mi == nil { + mi = &mergeInfo{typ: t} + mergeInfoMap[t] = mi + } + return mi +} + +// merge merges src into dst assuming they are both of type *mi.typ. +func (mi *mergeInfo) merge(dst, src pointer) { + if dst.isNil() { + panic("proto: nil destination") + } + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&mi.initialized) == 0 { + mi.computeMergeInfo() + } + + for _, fi := range mi.fields { + sfp := src.offset(fi.field) + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string + continue + } + if fi.basicWidth > 0 { + switch { + case fi.basicWidth == 1 && !*sfp.toBool(): + continue + case fi.basicWidth == 4 && *sfp.toUint32() == 0: + continue + case fi.basicWidth == 8 && *sfp.toUint64() == 0: + continue + } + } + } + + dfp := dst.offset(fi.field) + fi.merge(dfp, sfp) + } + + // TODO: Make this faster? + out := dst.asPointerTo(mi.typ).Elem() + in := src.asPointerTo(mi.typ).Elem() + if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + if mi.unrecognized.IsValid() { + if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { + *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) + } + } +} + +func (mi *mergeInfo) computeMergeInfo() { + mi.lock.Lock() + defer mi.lock.Unlock() + if mi.initialized != 0 { + return + } + t := mi.typ + n := t.NumField() + + props := GetProperties(t) + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + mfi := mergeFieldInfo{field: toField(&f)} + tf := f.Type + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + switch tf.Kind() { + case reflect.Ptr, reflect.Slice, reflect.String: + // As a special case, we assume slices and strings are pointers + // since we know that the first field in the SliceSlice or + // StringHeader is a data pointer. + mfi.isPointer = true + case reflect.Bool: + mfi.basicWidth = 1 + case reflect.Int32, reflect.Uint32, reflect.Float32: + mfi.basicWidth = 4 + case reflect.Int64, reflect.Uint64, reflect.Float64: + mfi.basicWidth = 8 + } + } + + // Unwrap tf to get at its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + tf.Name()) + } + + switch tf.Kind() { + case reflect.Int32: + switch { + case isSlice: // E.g., []int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Slice is not defined (see pointer_reflect.go). + /* + sfsp := src.toInt32Slice() + if *sfsp != nil { + dfsp := dst.toInt32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + */ + sfs := src.getInt32Slice() + if sfs != nil { + dfs := dst.getInt32Slice() + dfs = append(dfs, sfs...) + if dfs == nil { + dfs = []int32{} + } + dst.setInt32Slice(dfs) + } + } + case isPointer: // E.g., *int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). + /* + sfpp := src.toInt32Ptr() + if *sfpp != nil { + dfpp := dst.toInt32Ptr() + if *dfpp == nil { + *dfpp = Int32(**sfpp) + } else { + **dfpp = **sfpp + } + } + */ + sfp := src.getInt32Ptr() + if sfp != nil { + dfp := dst.getInt32Ptr() + if dfp == nil { + dst.setInt32Ptr(*sfp) + } else { + *dfp = *sfp + } + } + } + default: // E.g., int32 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt32(); v != 0 { + *dst.toInt32() = v + } + } + } + case reflect.Int64: + switch { + case isSlice: // E.g., []int64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toInt64Slice() + if *sfsp != nil { + dfsp := dst.toInt64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + } + case isPointer: // E.g., *int64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toInt64Ptr() + if *sfpp != nil { + dfpp := dst.toInt64Ptr() + if *dfpp == nil { + *dfpp = Int64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., int64 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt64(); v != 0 { + *dst.toInt64() = v + } + } + } + case reflect.Uint32: + switch { + case isSlice: // E.g., []uint32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint32Slice() + if *sfsp != nil { + dfsp := dst.toUint32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint32{} + } + } + } + case isPointer: // E.g., *uint32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint32Ptr() + if *sfpp != nil { + dfpp := dst.toUint32Ptr() + if *dfpp == nil { + *dfpp = Uint32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint32 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint32(); v != 0 { + *dst.toUint32() = v + } + } + } + case reflect.Uint64: + switch { + case isSlice: // E.g., []uint64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint64Slice() + if *sfsp != nil { + dfsp := dst.toUint64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint64{} + } + } + } + case isPointer: // E.g., *uint64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint64Ptr() + if *sfpp != nil { + dfpp := dst.toUint64Ptr() + if *dfpp == nil { + *dfpp = Uint64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint64 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint64(); v != 0 { + *dst.toUint64() = v + } + } + } + case reflect.Float32: + switch { + case isSlice: // E.g., []float32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat32Slice() + if *sfsp != nil { + dfsp := dst.toFloat32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float32{} + } + } + } + case isPointer: // E.g., *float32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat32Ptr() + if *sfpp != nil { + dfpp := dst.toFloat32Ptr() + if *dfpp == nil { + *dfpp = Float32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float32 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat32(); v != 0 { + *dst.toFloat32() = v + } + } + } + case reflect.Float64: + switch { + case isSlice: // E.g., []float64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat64Slice() + if *sfsp != nil { + dfsp := dst.toFloat64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float64{} + } + } + } + case isPointer: // E.g., *float64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat64Ptr() + if *sfpp != nil { + dfpp := dst.toFloat64Ptr() + if *dfpp == nil { + *dfpp = Float64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float64 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat64(); v != 0 { + *dst.toFloat64() = v + } + } + } + case reflect.Bool: + switch { + case isSlice: // E.g., []bool + mfi.merge = func(dst, src pointer) { + sfsp := src.toBoolSlice() + if *sfsp != nil { + dfsp := dst.toBoolSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []bool{} + } + } + } + case isPointer: // E.g., *bool + mfi.merge = func(dst, src pointer) { + sfpp := src.toBoolPtr() + if *sfpp != nil { + dfpp := dst.toBoolPtr() + if *dfpp == nil { + *dfpp = Bool(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., bool + mfi.merge = func(dst, src pointer) { + if v := *src.toBool(); v { + *dst.toBool() = v + } + } + } + case reflect.String: + switch { + case isSlice: // E.g., []string + mfi.merge = func(dst, src pointer) { + sfsp := src.toStringSlice() + if *sfsp != nil { + dfsp := dst.toStringSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []string{} + } + } + } + case isPointer: // E.g., *string + mfi.merge = func(dst, src pointer) { + sfpp := src.toStringPtr() + if *sfpp != nil { + dfpp := dst.toStringPtr() + if *dfpp == nil { + *dfpp = String(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., string + mfi.merge = func(dst, src pointer) { + if v := *src.toString(); v != "" { + *dst.toString() = v + } + } + } + case reflect.Slice: + isProto3 := props.Prop[i].proto3 + switch { + case isPointer: + panic("bad pointer in byte slice case in " + tf.Name()) + case tf.Elem().Kind() != reflect.Uint8: + panic("bad element kind in byte slice case in " + tf.Name()) + case isSlice: // E.g., [][]byte + mfi.merge = func(dst, src pointer) { + sbsp := src.toBytesSlice() + if *sbsp != nil { + dbsp := dst.toBytesSlice() + for _, sb := range *sbsp { + if sb == nil { + *dbsp = append(*dbsp, nil) + } else { + *dbsp = append(*dbsp, append([]byte{}, sb...)) + } + } + if *dbsp == nil { + *dbsp = [][]byte{} + } + } + } + default: // E.g., []byte + mfi.merge = func(dst, src pointer) { + sbp := src.toBytes() + if *sbp != nil { + dbp := dst.toBytes() + if !isProto3 || len(*sbp) > 0 { + *dbp = append([]byte{}, *sbp...) + } + } + } + } + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("message field %s without pointer", tf)) + case isSlice: // E.g., []*pb.T + mi := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sps := src.getPointerSlice() + if sps != nil { + dps := dst.getPointerSlice() + for _, sp := range sps { + var dp pointer + if !sp.isNil() { + dp = valToPointer(reflect.New(tf)) + mi.merge(dp, sp) + } + dps = append(dps, dp) + } + if dps == nil { + dps = []pointer{} + } + dst.setPointerSlice(dps) + } + } + default: // E.g., *pb.T + mi := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sp := src.getPointer() + if !sp.isNil() { + dp := dst.getPointer() + if dp.isNil() { + dp = valToPointer(reflect.New(tf)) + dst.setPointer(dp) + } + mi.merge(dp, sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic("bad pointer or slice in map case in " + tf.Name()) + default: // E.g., map[K]V + mfi.merge = func(dst, src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + dm := dst.asPointerTo(tf).Elem() + if dm.IsNil() { + dm.Set(reflect.MakeMap(tf)) + } + + switch tf.Elem().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(Clone(val.Interface().(Message))) + dm.SetMapIndex(key, val) + } + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + dm.SetMapIndex(key, val) + } + default: // Basic type (e.g., string) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + dm.SetMapIndex(key, val) + } + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic("bad pointer or slice in interface case in " + tf.Name()) + default: // E.g., interface{} + // TODO: Make this faster? + mfi.merge = func(dst, src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + du := dst.asPointerTo(tf).Elem() + typ := su.Elem().Type() + if du.IsNil() || du.Elem().Type() != typ { + du.Set(reflect.New(typ.Elem())) // Initialize interface if empty + } + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + dv := du.Elem().Elem().Field(0) + if dv.Kind() == reflect.Ptr && dv.IsNil() { + dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + Merge(dv.Interface().(Message), sv.Interface().(Message)) + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) + default: // Basic type (e.g., string) + dv.Set(sv) + } + } + } + } + default: + panic(fmt.Sprintf("merger not found for type:%s", tf)) + } + mi.fields = append(mi.fields, mfi) + } + + mi.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + mi.unrecognized = toField(&f) + } + + atomic.StoreInt32(&mi.initialized, 1) +} diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go new file mode 100644 index 0000000000..55f0340a3f --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go @@ -0,0 +1,1967 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// Unmarshal is the entry point from the generated .pb.go files. +// This function is not intended to be used by non-generated code. +// This function is not subject to any compatibility guarantee. +// msg contains a pointer to a protocol buffer struct. +// b is the data to be unmarshaled into the protocol buffer. +// a is a pointer to a place to store cached unmarshal information. +func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { + // Load the unmarshal information for this message type. + // The atomic load ensures memory consistency. + u := atomicLoadUnmarshalInfo(&a.unmarshal) + if u == nil { + // Slow path: find unmarshal info for msg, update a with it. + u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) + atomicStoreUnmarshalInfo(&a.unmarshal, u) + } + // Then do the unmarshaling. + err := u.unmarshal(toPointer(&msg), b) + return err +} + +type unmarshalInfo struct { + typ reflect.Type // type of the protobuf struct + + // 0 = only typ field is initialized + // 1 = completely initialized + initialized int32 + lock sync.Mutex // prevents double initialization + dense []unmarshalFieldInfo // fields indexed by tag # + sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # + reqFields []string // names of required fields + reqMask uint64 // 1< 0 { + // Read tag and wire type. + // Special case 1 and 2 byte varints. + var x uint64 + if b[0] < 128 { + x = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + x = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + x, n = decodeVarint(b) + if n == 0 { + return io.ErrUnexpectedEOF + } + b = b[n:] + } + tag := x >> 3 + wire := int(x) & 7 + + // Dispatch on the tag to one of the unmarshal* functions below. + var f unmarshalFieldInfo + if tag < uint64(len(u.dense)) { + f = u.dense[tag] + } else { + f = u.sparse[tag] + } + if fn := f.unmarshal; fn != nil { + var err error + b, err = fn(b, m.offset(f.field), wire) + if err == nil { + reqMask |= f.reqMask + continue + } + if r, ok := err.(*RequiredNotSetError); ok { + // Remember this error, but keep parsing. We need to produce + // a full parse even if a required field is missing. + rnse = r + reqMask |= f.reqMask + continue + } + if err != errInternalBadWireType { + return err + } + // Fragments with bad wire type are treated as unknown fields. + } + + // Unknown tag. + if !u.unrecognized.IsValid() { + // Don't keep unrecognized data; just skip it. + var err error + b, err = skipField(b, wire) + if err != nil { + return err + } + continue + } + // Keep unrecognized data around. + // maybe in extensions, maybe in the unrecognized field. + z := m.offset(u.unrecognized).toBytes() + var emap map[int32]Extension + var e Extension + for _, r := range u.extensionRanges { + if uint64(r.Start) <= tag && tag <= uint64(r.End) { + if u.extensions.IsValid() { + mp := m.offset(u.extensions).toExtensions() + emap = mp.extensionsWrite() + e = emap[int32(tag)] + z = &e.enc + break + } + if u.oldExtensions.IsValid() { + p := m.offset(u.oldExtensions).toOldExtensions() + emap = *p + if emap == nil { + emap = map[int32]Extension{} + *p = emap + } + e = emap[int32(tag)] + z = &e.enc + break + } + panic("no extensions field available") + } + } + + // Use wire type to skip data. + var err error + b0 := b + b, err = skipField(b, wire) + if err != nil { + return err + } + *z = encodeVarint(*z, tag<<3|uint64(wire)) + *z = append(*z, b0[:len(b0)-len(b)]...) + + if emap != nil { + emap[int32(tag)] = e + } + } + if rnse != nil { + // A required field of a submessage/group is missing. Return that error. + return rnse + } + if reqMask != u.reqMask { + // A required field of this message is missing. + for _, n := range u.reqFields { + if reqMask&1 == 0 { + return &RequiredNotSetError{n} + } + reqMask >>= 1 + } + } + return nil +} + +// computeUnmarshalInfo fills in u with information for use +// in unmarshaling protocol buffers of type u.typ. +func (u *unmarshalInfo) computeUnmarshalInfo() { + u.lock.Lock() + defer u.lock.Unlock() + if u.initialized != 0 { + return + } + t := u.typ + n := t.NumField() + + // Set up the "not found" value for the unrecognized byte buffer. + // This is the default for proto3. + u.unrecognized = invalidField + u.extensions = invalidField + u.oldExtensions = invalidField + + // List of the generated type and offset for each oneof field. + type oneofField struct { + ityp reflect.Type // interface type of oneof field + field field // offset in containing message + } + var oneofFields []oneofField + + for i := 0; i < n; i++ { + f := t.Field(i) + if f.Name == "XXX_unrecognized" { + // The byte slice used to hold unrecognized input is special. + if f.Type != reflect.TypeOf(([]byte)(nil)) { + panic("bad type for XXX_unrecognized field: " + f.Type.Name()) + } + u.unrecognized = toField(&f) + continue + } + if f.Name == "XXX_InternalExtensions" { + // Ditto here. + if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { + panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) + } + u.extensions = toField(&f) + if f.Tag.Get("protobuf_messageset") == "1" { + u.isMessageSet = true + } + continue + } + if f.Name == "XXX_extensions" { + // An older form of the extensions field. + if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) { + panic("bad type for XXX_extensions field: " + f.Type.Name()) + } + u.oldExtensions = toField(&f) + continue + } + if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { + continue + } + + oneof := f.Tag.Get("protobuf_oneof") + if oneof != "" { + oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) + // The rest of oneof processing happens below. + continue + } + + tags := f.Tag.Get("protobuf") + tagArray := strings.Split(tags, ",") + if len(tagArray) < 2 { + panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) + } + tag, err := strconv.Atoi(tagArray[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tagArray[1]) + } + + name := "" + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + } + + // Extract unmarshaling function from the field (its type and tags). + unmarshal := fieldUnmarshaler(&f) + + // Required field? + var reqMask uint64 + if tagArray[2] == "req" { + bit := len(u.reqFields) + u.reqFields = append(u.reqFields, name) + reqMask = uint64(1) << uint(bit) + // TODO: if we have more than 64 required fields, we end up + // not verifying that all required fields are present. + // Fix this, perhaps using a count of required fields? + } + + // Store the info in the correct slot in the message. + u.setTag(tag, toField(&f), unmarshal, reqMask) + } + + // Find any types associated with oneof fields. + // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it? + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs") + if fn.IsValid() { + res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{} + for i := res.Len() - 1; i >= 0; i-- { + v := res.Index(i) // interface{} + tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X + typ := tptr.Elem() // Msg_X + + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tagstr := strings.Split(f.Tag.Get("protobuf"), ",")[1] + tag, err := strconv.Atoi(tagstr) + if err != nil { + panic("protobuf tag field not an integer: " + tagstr) + } + + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(tag, of.field, unmarshal, 0) + } + } + } + } + + // Get extension ranges, if any. + fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + if fn.IsValid() { + if !u.extensions.IsValid() && !u.oldExtensions.IsValid() { + panic("a message with extensions, but no extensions field in " + t.Name()) + } + u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) + } + + // Explicitly disallow tag 0. This will ensure we flag an error + // when decoding a buffer of all zeros. Without this code, we + // would decode and skip an all-zero buffer of even length. + // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. + u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { + return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) + }, 0) + + // Set mask for required field check. + u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? + for len(u.dense) <= tag { + u.dense = append(u.dense, unmarshalFieldInfo{}) + } + u.dense[tag] = i + return + } + if u.sparse == nil { + u.sparse = map[uint64]unmarshalFieldInfo{} + } + u.sparse[uint64(tag)] = i +} + +// fieldUnmarshaler returns an unmarshaler for the given field. +func fieldUnmarshaler(f *reflect.StructField) unmarshaler { + if f.Type.Kind() == reflect.Map { + return makeUnmarshalMap(f) + } + return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) +} + +// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. +func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { + tagArray := strings.Split(tags, ",") + encoding := tagArray[0] + name := "unknown" + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + } + + // Figure out packaging (pointer, slice, or both) + slice := false + pointer := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + // We'll never have both pointer and slice for basic types. + if pointer && slice && t.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + t.Name()) + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return unmarshalBoolPtr + } + if slice { + return unmarshalBoolSlice + } + return unmarshalBoolValue + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixedS32Ptr + } + if slice { + return unmarshalFixedS32Slice + } + return unmarshalFixedS32Value + case "varint": + // this could be int32 or enum + if pointer { + return unmarshalInt32Ptr + } + if slice { + return unmarshalInt32Slice + } + return unmarshalInt32Value + case "zigzag32": + if pointer { + return unmarshalSint32Ptr + } + if slice { + return unmarshalSint32Slice + } + return unmarshalSint32Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixedS64Ptr + } + if slice { + return unmarshalFixedS64Slice + } + return unmarshalFixedS64Value + case "varint": + if pointer { + return unmarshalInt64Ptr + } + if slice { + return unmarshalInt64Slice + } + return unmarshalInt64Value + case "zigzag64": + if pointer { + return unmarshalSint64Ptr + } + if slice { + return unmarshalSint64Slice + } + return unmarshalSint64Value + } + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixed32Ptr + } + if slice { + return unmarshalFixed32Slice + } + return unmarshalFixed32Value + case "varint": + if pointer { + return unmarshalUint32Ptr + } + if slice { + return unmarshalUint32Slice + } + return unmarshalUint32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixed64Ptr + } + if slice { + return unmarshalFixed64Slice + } + return unmarshalFixed64Value + case "varint": + if pointer { + return unmarshalUint64Ptr + } + if slice { + return unmarshalUint64Slice + } + return unmarshalUint64Value + } + case reflect.Float32: + if pointer { + return unmarshalFloat32Ptr + } + if slice { + return unmarshalFloat32Slice + } + return unmarshalFloat32Value + case reflect.Float64: + if pointer { + return unmarshalFloat64Ptr + } + if slice { + return unmarshalFloat64Slice + } + return unmarshalFloat64Value + case reflect.Map: + panic("map type in typeUnmarshaler in " + t.Name()) + case reflect.Slice: + if pointer { + panic("bad pointer in slice case in " + t.Name()) + } + if slice { + return unmarshalBytesSlice + } + return unmarshalBytesValue + case reflect.String: + if pointer { + return unmarshalStringPtr + } + if slice { + return unmarshalStringSlice + } + return unmarshalStringValue + case reflect.Struct: + // message or group field + if !pointer { + panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding)) + } + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) + case "group": + if slice { + return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) + } + } + panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) +} + +// Below are all the unmarshalers for individual fields of various types. + +func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64() = v + return b, nil +} + +func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64() = v + return b, nil +} + +func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64() = v + return b, nil +} + +func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64Ptr() = &v + return b, nil +} + +func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + *f.toInt32() = v + return b, nil +} + +func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + *f.toInt32() = v + return b, nil +} + +func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32() = v + return b, nil +} + +func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32Ptr() = &v + return b, nil +} + +func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64() = v + return b[8:], nil +} + +func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64() = v + return b[8:], nil +} + +func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32() = v + return b[4:], nil +} + +func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32Ptr() = &v + return b[4:], nil +} + +func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + *f.toInt32() = v + return b[4:], nil +} + +func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.setInt32Ptr(v) + return b[4:], nil +} + +func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + return b[4:], nil +} + +func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + // Note: any length varint is allowed, even though any sane + // encoder will use one byte. + // See https://github.com/golang/protobuf/issues/76 + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + // TODO: check if x>1? Tests seem to indicate no. + v := x != 0 + *f.toBool() = v + return b[n:], nil +} + +func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + *f.toBoolPtr() = &v + return b[n:], nil +} + +func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + b = b[n:] + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + return b[n:], nil +} + +func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64() = v + return b[8:], nil +} + +func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64Ptr() = &v + return b[8:], nil +} + +func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32() = v + return b[4:], nil +} + +func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32Ptr() = &v + return b[4:], nil +} + +func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + *f.toString() = v + return b[x:], nil +} + +func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + *f.toStringPtr() = &v + return b[x:], nil +} + +func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + s := f.toStringSlice() + *s = append(*s, v) + return b[x:], nil +} + +var emptyBuf [0]byte + +func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // The use of append here is a trick which avoids the zeroing + // that would be required if we used a make/copy pair. + // We append to emptyBuf instead of nil because we want + // a non-nil result even when the length is 0. + v := append(emptyBuf[:], b[:x]...) + *f.toBytes() = v + return b[x:], nil +} + +func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := append(emptyBuf[:], b[:x]...) + s := f.toBytesSlice() + *s = append(*s, v) + return b[x:], nil +} + +func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[y:], err + } +} + +func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[y:], err + } +} + +func makeUnmarshalMap(f *reflect.StructField) unmarshaler { + t := f.Type + kt := t.Key() + vt := t.Elem() + unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) + unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val")) + return func(b []byte, f pointer, w int) ([]byte, error) { + // The map entry is a submessage. Figure out how big it is. + if w != WireBytes { + return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + r := b[x:] // unused data to return + b = b[:x] // data for map entry + + // Note: we could use #keys * #values ~= 200 functions + // to do map decoding without reflection. Probably not worth it. + // Maps will be somewhat slow. Oh well. + + // Read key and value from data. + k := reflect.New(kt) + v := reflect.New(vt) + for len(b) > 0 { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + wire := int(x) & 7 + b = b[n:] + + var err error + switch x >> 3 { + case 1: + b, err = unmarshalKey(b, valToPointer(k), wire) + case 2: + b, err = unmarshalVal(b, valToPointer(v), wire) + default: + err = errInternalBadWireType // skip unknown tag + } + + if err == nil { + continue + } + if err != errInternalBadWireType { + return nil, err + } + + // Skip past unknown fields. + b, err = skipField(b, wire) + if err != nil { + return nil, err + } + } + + // Get map, allocate if needed. + m := f.asPointerTo(t).Elem() // an addressable map[K]T + if m.IsNil() { + m.Set(reflect.MakeMap(t)) + } + + // Insert into map. + m.SetMapIndex(k.Elem(), v.Elem()) + + return r, nil + } +} + +// makeUnmarshalOneof makes an unmarshaler for oneof fields. +// for: +// message Msg { +// oneof F { +// int64 X = 1; +// float64 Y = 2; +// } +// } +// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). +// ityp is the interface type of the oneof field (e.g. isMsg_F). +// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). +// Note that this function will be called once for each case in the oneof. +func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { + sf := typ.Field(0) + field0 := toField(&sf) + return func(b []byte, f pointer, w int) ([]byte, error) { + // Allocate holder for value. + v := reflect.New(typ) + + // Unmarshal data into holder. + // We unmarshal into the first field of the holder object. + var err error + b, err = unmarshal(b, valToPointer(v).offset(field0), w) + if err != nil { + return nil, err + } + + // Write pointer to holder into target field. + f.asPointerTo(ityp).Elem().Set(v) + + return b, nil + } +} + +// Error used by decode internally. +var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") + +// skipField skips past a field of type wire and returns the remaining bytes. +func skipField(b []byte, wire int) ([]byte, error) { + switch wire { + case WireVarint: + _, k := decodeVarint(b) + if k == 0 { + return b, io.ErrUnexpectedEOF + } + b = b[k:] + case WireFixed32: + if len(b) < 4 { + return b, io.ErrUnexpectedEOF + } + b = b[4:] + case WireFixed64: + if len(b) < 8 { + return b, io.ErrUnexpectedEOF + } + b = b[8:] + case WireBytes: + m, k := decodeVarint(b) + if k == 0 || uint64(len(b)-k) < m { + return b, io.ErrUnexpectedEOF + } + b = b[uint64(k)+m:] + case WireStartGroup: + _, i := findEndGroup(b) + if i == -1 { + return b, io.ErrUnexpectedEOF + } + b = b[i:] + default: + return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) + } + return b, nil +} + +// findEndGroup finds the index of the next EndGroup tag. +// Groups may be nested, so the "next" EndGroup tag is the first +// unpaired EndGroup. +// findEndGroup returns the indexes of the start and end of the EndGroup tag. +// Returns (-1,-1) if it can't find one. +func findEndGroup(b []byte) (int, int) { + depth := 1 + i := 0 + for { + x, n := decodeVarint(b[i:]) + if n == 0 { + return -1, -1 + } + j := i + i += n + switch x & 7 { + case WireVarint: + _, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + case WireFixed32: + if len(b)-4 < i { + return -1, -1 + } + i += 4 + case WireFixed64: + if len(b)-8 < i { + return -1, -1 + } + i += 8 + case WireBytes: + m, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + if uint64(len(b)-i) < m { + return -1, -1 + } + i += int(m) + case WireStartGroup: + depth++ + case WireEndGroup: + depth-- + if depth == 0 { + return j, i + } + default: + return -1, -1 + } + } +} + +// encodeVarint appends a varint-encoded integer to b and returns the result. +func encodeVarint(b []byte, x uint64) []byte { + for x >= 1<<7 { + b = append(b, byte(x&0x7f|0x80)) + x >>= 7 + } + return append(b, byte(x)) +} + +// decodeVarint reads a varint-encoded integer from b. +// Returns the decoded integer and the number of bytes read. +// If there is an error, it returns 0,0. +func decodeVarint(b []byte) (uint64, int) { + var x, y uint64 + if len(b) <= 0 { + goto bad + } + x = uint64(b[0]) + if x < 0x80 { + return x, 1 + } + x -= 0x80 + + if len(b) <= 1 { + goto bad + } + y = uint64(b[1]) + x += y << 7 + if y < 0x80 { + return x, 2 + } + x -= 0x80 << 7 + + if len(b) <= 2 { + goto bad + } + y = uint64(b[2]) + x += y << 14 + if y < 0x80 { + return x, 3 + } + x -= 0x80 << 14 + + if len(b) <= 3 { + goto bad + } + y = uint64(b[3]) + x += y << 21 + if y < 0x80 { + return x, 4 + } + x -= 0x80 << 21 + + if len(b) <= 4 { + goto bad + } + y = uint64(b[4]) + x += y << 28 + if y < 0x80 { + return x, 5 + } + x -= 0x80 << 28 + + if len(b) <= 5 { + goto bad + } + y = uint64(b[5]) + x += y << 35 + if y < 0x80 { + return x, 6 + } + x -= 0x80 << 35 + + if len(b) <= 6 { + goto bad + } + y = uint64(b[6]) + x += y << 42 + if y < 0x80 { + return x, 7 + } + x -= 0x80 << 42 + + if len(b) <= 7 { + goto bad + } + y = uint64(b[7]) + x += y << 49 + if y < 0x80 { + return x, 8 + } + x -= 0x80 << 49 + + if len(b) <= 8 { + goto bad + } + y = uint64(b[8]) + x += y << 56 + if y < 0x80 { + return x, 9 + } + x -= 0x80 << 56 + + if len(b) <= 9 { + goto bad + } + y = uint64(b[9]) + x += y << 63 + if y < 2 { + return x, 10 + } + +bad: + return 0, 0 +} diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go index 965876bf03..2205fdaadf 100644 --- a/vendor/github.com/golang/protobuf/proto/text.go +++ b/vendor/github.com/golang/protobuf/proto/text.go @@ -50,7 +50,6 @@ import ( var ( newline = []byte("\n") spaces = []byte(" ") - gtNewline = []byte(">\n") endBraceNewline = []byte("}\n") backslashN = []byte{'\\', 'n'} backslashR = []byte{'\\', 'r'} @@ -170,11 +169,6 @@ func writeName(w *textWriter, props *Properties) error { return nil } -// raw is the interface satisfied by RawMessage. -type raw interface { - Bytes() []byte -} - func requiresQuotes(u string) bool { // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. for _, ch := range u { @@ -269,6 +263,10 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { props := sprops.Prop[i] name := st.Field(i).Name + if name == "XXX_NoUnkeyedLiteral" { + continue + } + if strings.HasPrefix(name, "XXX_") { // There are two XXX_ fields: // XXX_unrecognized []byte @@ -436,12 +434,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if b, ok := fv.Interface().(raw); ok { - if err := writeRaw(w, b.Bytes()); err != nil { - return err - } - continue - } // Enums have a String method, so writeAny will work fine. if err := tm.writeAny(w, fv, props); err != nil { @@ -455,7 +447,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { // Extensions (the XXX_extensions field). pv := sv.Addr() - if _, ok := extendable(pv.Interface()); ok { + if _, err := extendable(pv.Interface()); err == nil { if err := tm.writeExtensions(w, pv); err != nil { return err } @@ -464,27 +456,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return nil } -// writeRaw writes an uninterpreted raw message. -func writeRaw(w *textWriter, b []byte) error { - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if err := writeUnknownStruct(w, b); err != nil { - return err - } - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - return nil -} - // writeAny writes an arbitrary field. func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { v = reflect.Indirect(v) @@ -535,6 +506,19 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert } } w.indent() + if v.CanAddr() { + // Calling v.Interface on a struct causes the reflect package to + // copy the entire struct. This is racy with the new Marshaler + // since we atomically update the XXX_sizecache. + // + // Thus, we retrieve a pointer to the struct if possible to avoid + // a race since v.Interface on the pointer doesn't copy the struct. + // + // If v is not addressable, then we are not worried about a race + // since it implies that the binary Marshaler cannot possibly be + // mutating this value. + v = v.Addr() + } if etm, ok := v.Interface().(encoding.TextMarshaler); ok { text, err := etm.MarshalText() if err != nil { @@ -543,8 +527,13 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert if _, err = w.Write(text); err != nil { return err } - } else if err := tm.writeStruct(w, v); err != nil { - return err + } else { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if err := tm.writeStruct(w, v); err != nil { + return err + } } w.unindent() if err := w.WriteByte(ket); err != nil { diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go index 5e14513f28..0685bae36d 100644 --- a/vendor/github.com/golang/protobuf/proto/text_parser.go +++ b/vendor/github.com/golang/protobuf/proto/text_parser.go @@ -206,7 +206,6 @@ func (p *textParser) advance() { var ( errBadUTF8 = errors.New("proto: bad UTF-8") - errBadHex = errors.New("proto: bad hexadecimal") ) func unquoteC(s string, quote rune) (string, error) { @@ -277,60 +276,47 @@ func unescape(s string) (ch string, tail string, err error) { return "?", s, nil // trigraph workaround case '\'', '"', '\\': return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': + case '0', '1', '2', '3', '4', '5', '6', '7': if len(s) < 2 { return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) } - base := 8 - ss := s[:2] + ss := string(r) + s[:2] s = s[2:] - if r == 'x' || r == 'X' { - base = 16 - } else { - ss = string(r) + ss - } - i, err := strconv.ParseUint(ss, base, 8) + i, err := strconv.ParseUint(ss, 8, 8) if err != nil { - return "", "", err + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) } return string([]byte{byte(i)}), s, nil - case 'u', 'U': - n := 4 - if r == 'U' { + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': n = 8 } if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) - } - - bs := make([]byte, n/2) - for i := 0; i < n; i += 2 { - a, ok1 := unhex(s[i]) - b, ok2 := unhex(s[i+1]) - if !ok1 || !ok2 { - return "", "", errBadHex - } - bs[i/2] = a<<4 | b + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) } + ss := s[:n] s = s[n:] - return string(bs), s, nil + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(i), s, nil } return "", "", fmt.Errorf(`unknown escape \%c`, r) } -// Adapted from src/pkg/strconv/quote.go. -func unhex(b byte) (v byte, ok bool) { - switch { - case '0' <= b && b <= '9': - return b - '0', true - case 'a' <= b && b <= 'f': - return b - 'a' + 10, true - case 'A' <= b && b <= 'F': - return b - 'A' + 10, true - } - return 0, false -} - // Back off the parser by one token. Can only be done between calls to next(). // It makes the next advance() a no-op. func (p *textParser) back() { p.backed = true } @@ -728,6 +714,9 @@ func (p *textParser) consumeExtName() (string, error) { if tok.err != nil { return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } } return strings.Join(parts, ""), nil } @@ -865,7 +854,7 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { return p.readStruct(fv, terminator) case reflect.Uint32: if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(x) + fv.SetUint(uint64(x)) return nil } case reflect.Uint64: @@ -883,13 +872,9 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { // UnmarshalText returns *RequiredNotSetError. func UnmarshalText(s string, pb Message) error { if um, ok := pb.(encoding.TextUnmarshaler); ok { - err := um.UnmarshalText([]byte(s)) - return err + return um.UnmarshalText([]byte(s)) } pb.Reset() v := reflect.ValueOf(pb) - if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { - return pe - } - return nil + return newTextParser(s).readStruct(v.Elem(), "") } diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go index c6a91bcab9..e855b1f5c4 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go @@ -1,36 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/descriptor.proto -/* -Package descriptor is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/descriptor.proto - -It has these top-level messages: - FileDescriptorSet - FileDescriptorProto - DescriptorProto - ExtensionRangeOptions - FieldDescriptorProto - OneofDescriptorProto - EnumDescriptorProto - EnumValueDescriptorProto - ServiceDescriptorProto - MethodDescriptorProto - FileOptions - MessageOptions - FieldOptions - OneofOptions - EnumOptions - EnumValueOptions - ServiceOptions - MethodOptions - UninterpretedOption - SourceCodeInfo - GeneratedCodeInfo -*/ -package descriptor +package descriptor // import "github.com/golang/protobuf/protoc-gen-go/descriptor" import proto "github.com/golang/protobuf/proto" import fmt "fmt" @@ -138,7 +109,9 @@ func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { *x = FieldDescriptorProto_Type(value) return nil } -func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{4, 0} } +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{4, 0} +} type FieldDescriptorProto_Label int32 @@ -177,7 +150,7 @@ func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { return nil } func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { - return fileDescriptor0, []int{4, 1} + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{4, 1} } // Generated classes can be optimized for speed or code size. @@ -217,7 +190,9 @@ func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { *x = FileOptions_OptimizeMode(value) return nil } -func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{10, 0} } +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{10, 0} +} type FieldOptions_CType int32 @@ -255,7 +230,9 @@ func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { *x = FieldOptions_CType(value) return nil } -func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{12, 0} } +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{12, 0} +} type FieldOptions_JSType int32 @@ -295,7 +272,9 @@ func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { *x = FieldOptions_JSType(value) return nil } -func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{12, 1} } +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{12, 1} +} // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, // or neither? HTTP based RPC implementation may choose GET verb for safe @@ -336,20 +315,41 @@ func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { return nil } func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { - return fileDescriptor0, []int{17, 0} + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{17, 0} } // The protocol compiler can output a FileDescriptorSet containing the .proto // files it parses. type FileDescriptorSet struct { - File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` - XXX_unrecognized []byte `json:"-"` + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{0} +} +func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) +} +func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic) +} +func (dst *FileDescriptorSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorSet.Merge(dst, src) +} +func (m *FileDescriptorSet) XXX_Size() int { + return xxx_messageInfo_FileDescriptorSet.Size(m) +} +func (m *FileDescriptorSet) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m) } -func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } -func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } -func (*FileDescriptorSet) ProtoMessage() {} -func (*FileDescriptorSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { if m != nil { @@ -382,14 +382,35 @@ type FileDescriptorProto struct { SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` // The syntax of the proto file. // The supported values are "proto2" and "proto3". - Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` - XXX_unrecognized []byte `json:"-"` + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{1} +} +func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) +} +func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *FileDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorProto.Merge(dst, src) +} +func (m *FileDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FileDescriptorProto.Size(m) +} +func (m *FileDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m) } -func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } -func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*FileDescriptorProto) ProtoMessage() {} -func (*FileDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo func (m *FileDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -488,14 +509,35 @@ type DescriptorProto struct { ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` // Reserved field names, which may not be used by fields in the same message. // A given name may only be reserved once. - ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` - XXX_unrecognized []byte `json:"-"` + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{2} +} +func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) +} +func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic) +} +func (dst *DescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto.Merge(dst, src) +} +func (m *DescriptorProto) XXX_Size() int { + return xxx_messageInfo_DescriptorProto.Size(m) +} +func (m *DescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto.DiscardUnknown(m) } -func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } -func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto) ProtoMessage() {} -func (*DescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo func (m *DescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -568,19 +610,38 @@ func (m *DescriptorProto) GetReservedName() []string { } type DescriptorProto_ExtensionRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } func (*DescriptorProto_ExtensionRange) ProtoMessage() {} func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{2, 0} + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{2, 0} +} +func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic) +} +func (dst *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(dst, src) +} +func (m *DescriptorProto_ExtensionRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m) +} +func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m) } +var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo + func (m *DescriptorProto_ExtensionRange) GetStart() int32 { if m != nil && m.Start != nil { return *m.Start @@ -606,17 +667,36 @@ func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { // fields or extension ranges in the same message. Reserved ranges may // not overlap. type DescriptorProto_ReservedRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - XXX_unrecognized []byte `json:"-"` + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } func (*DescriptorProto_ReservedRange) ProtoMessage() {} func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{2, 1} + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{2, 1} +} +func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic) +} +func (dst *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ReservedRange.Merge(dst, src) +} +func (m *DescriptorProto_ReservedRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m) } +func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo func (m *DescriptorProto_ReservedRange) GetStart() int32 { if m != nil && m.Start != nil { @@ -635,22 +715,43 @@ func (m *DescriptorProto_ReservedRange) GetEnd() int32 { type ExtensionRangeOptions struct { // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } -func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } -func (*ExtensionRangeOptions) ProtoMessage() {} -func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{3} +} var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_ExtensionRangeOptions } +func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b) +} +func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic) +} +func (dst *ExtensionRangeOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionRangeOptions.Merge(dst, src) +} +func (m *ExtensionRangeOptions) XXX_Size() int { + return xxx_messageInfo_ExtensionRangeOptions.Size(m) +} +func (m *ExtensionRangeOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { if m != nil { @@ -689,15 +790,36 @@ type FieldDescriptorProto struct { // user has set a "json_name" option on this field, that option's value // will be used. Otherwise, it's deduced from the field's name by converting // it to camelCase. - JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` - Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } -func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*FieldDescriptorProto) ProtoMessage() {} -func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{4} +} +func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) +} +func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *FieldDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldDescriptorProto.Merge(dst, src) +} +func (m *FieldDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FieldDescriptorProto.Size(m) +} +func (m *FieldDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo func (m *FieldDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -771,15 +893,36 @@ func (m *FieldDescriptorProto) GetOptions() *FieldOptions { // Describes a oneof. type OneofDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{5} +} +func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) +} +func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *OneofDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofDescriptorProto.Merge(dst, src) +} +func (m *OneofDescriptorProto) XXX_Size() int { + return xxx_messageInfo_OneofDescriptorProto.Size(m) +} +func (m *OneofDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m) } -func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } -func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*OneofDescriptorProto) ProtoMessage() {} -func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo func (m *OneofDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -797,16 +940,44 @@ func (m *OneofDescriptorProto) GetOptions() *OneofOptions { // Describes an enum type. type EnumDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` - Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{6} +} +func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) +} +func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *EnumDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto.Merge(dst, src) +} +func (m *EnumDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto.Size(m) +} +func (m *EnumDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m) } -func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } -func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*EnumDescriptorProto) ProtoMessage() {} -func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo func (m *EnumDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -829,18 +1000,105 @@ func (m *EnumDescriptorProto) GetOptions() *EnumOptions { return nil } +func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +// Range of reserved numeric values. Reserved values may not be used by +// entries in the same enum. Reserved ranges may not overlap. +// +// Note that this is distinct from DescriptorProto.ReservedRange in that it +// is inclusive such that it can appropriately represent the entire int32 +// domain. +type EnumDescriptorProto_EnumReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } +func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} +func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{6, 0} +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic) +} +func (dst *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(dst, src) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo + +func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + // Describes a value within an enum. type EnumValueDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` - Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } -func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*EnumValueDescriptorProto) ProtoMessage() {} -func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{7} +} +func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) +} +func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueDescriptorProto.Merge(dst, src) +} +func (m *EnumValueDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumValueDescriptorProto.Size(m) +} +func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo func (m *EnumValueDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -865,16 +1123,37 @@ func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { // Describes a service. type ServiceDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` - Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } -func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*ServiceDescriptorProto) ProtoMessage() {} -func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{8} +} +func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) +} +func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *ServiceDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceDescriptorProto.Merge(dst, src) +} +func (m *ServiceDescriptorProto) XXX_Size() int { + return xxx_messageInfo_ServiceDescriptorProto.Size(m) +} +func (m *ServiceDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo func (m *ServiceDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -908,14 +1187,35 @@ type MethodDescriptorProto struct { // Identifies if client streams multiple client messages ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` // Identifies if server streams multiple server messages - ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` - XXX_unrecognized []byte `json:"-"` + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{9} +} +func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) +} +func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic) +} +func (dst *MethodDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodDescriptorProto.Merge(dst, src) +} +func (m *MethodDescriptorProto) XXX_Size() int { + return xxx_messageInfo_MethodDescriptorProto.Size(m) +} +func (m *MethodDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m) } -func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } -func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*MethodDescriptorProto) ProtoMessage() {} -func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } +var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo const Default_MethodDescriptorProto_ClientStreaming bool = false const Default_MethodDescriptorProto_ServerStreaming bool = false @@ -982,7 +1282,7 @@ type FileOptions struct { // top-level extensions defined in the file. JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` // This option does nothing. - JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use. // If set true, then the Java2 code generator will generate code that // throws an exception whenever an attempt is made to assign a non-UTF-8 // byte sequence to a string field. @@ -1036,24 +1336,46 @@ type FileOptions struct { // is empty. When this option is empty, the package name will be used for // determining the namespace. PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` - // The parser stores options it doesn't recognize here. See above. + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *FileOptions) Reset() { *m = FileOptions{} } -func (m *FileOptions) String() string { return proto.CompactTextString(m) } -func (*FileOptions) ProtoMessage() {} -func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{10} +} var extRange_FileOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_FileOptions } +func (m *FileOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileOptions.Unmarshal(m, b) +} +func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic) +} +func (dst *FileOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileOptions.Merge(dst, src) +} +func (m *FileOptions) XXX_Size() int { + return xxx_messageInfo_FileOptions.Size(m) +} +func (m *FileOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FileOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FileOptions proto.InternalMessageInfo const Default_FileOptions_JavaMultipleFiles bool = false const Default_FileOptions_JavaStringCheckUtf8 bool = false @@ -1086,6 +1408,7 @@ func (m *FileOptions) GetJavaMultipleFiles() bool { return Default_FileOptions_JavaMultipleFiles } +// Deprecated: Do not use. func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { if m != nil && m.JavaGenerateEqualsAndHash != nil { return *m.JavaGenerateEqualsAndHash @@ -1251,22 +1574,43 @@ type MessageOptions struct { MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *MessageOptions) Reset() { *m = MessageOptions{} } -func (m *MessageOptions) String() string { return proto.CompactTextString(m) } -func (*MessageOptions) ProtoMessage() {} -func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{11} +} var extRange_MessageOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_MessageOptions } +func (m *MessageOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageOptions.Unmarshal(m, b) +} +func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic) +} +func (dst *MessageOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageOptions.Merge(dst, src) +} +func (m *MessageOptions) XXX_Size() int { + return xxx_messageInfo_MessageOptions.Size(m) +} +func (m *MessageOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MessageOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageOptions proto.InternalMessageInfo const Default_MessageOptions_MessageSetWireFormat bool = false const Default_MessageOptions_NoStandardDescriptorAccessor bool = false @@ -1369,22 +1713,43 @@ type FieldOptions struct { Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *FieldOptions) Reset() { *m = FieldOptions{} } -func (m *FieldOptions) String() string { return proto.CompactTextString(m) } -func (*FieldOptions) ProtoMessage() {} -func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{12} +} var extRange_FieldOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_FieldOptions } +func (m *FieldOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldOptions.Unmarshal(m, b) +} +func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic) +} +func (dst *FieldOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldOptions.Merge(dst, src) +} +func (m *FieldOptions) XXX_Size() int { + return xxx_messageInfo_FieldOptions.Size(m) +} +func (m *FieldOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FieldOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldOptions proto.InternalMessageInfo const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL @@ -1444,22 +1809,43 @@ func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { type OneofOptions struct { // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *OneofOptions) Reset() { *m = OneofOptions{} } -func (m *OneofOptions) String() string { return proto.CompactTextString(m) } -func (*OneofOptions) ProtoMessage() {} -func (*OneofOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{13} +} var extRange_OneofOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_OneofOptions } +func (m *OneofOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofOptions.Unmarshal(m, b) +} +func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic) +} +func (dst *OneofOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofOptions.Merge(dst, src) +} +func (m *OneofOptions) XXX_Size() int { + return xxx_messageInfo_OneofOptions.Size(m) +} +func (m *OneofOptions) XXX_DiscardUnknown() { + xxx_messageInfo_OneofOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofOptions proto.InternalMessageInfo func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { if m != nil { @@ -1479,22 +1865,43 @@ type EnumOptions struct { Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *EnumOptions) Reset() { *m = EnumOptions{} } -func (m *EnumOptions) String() string { return proto.CompactTextString(m) } -func (*EnumOptions) ProtoMessage() {} -func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{14} +} var extRange_EnumOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_EnumOptions } +func (m *EnumOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumOptions.Unmarshal(m, b) +} +func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic) +} +func (dst *EnumOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumOptions.Merge(dst, src) +} +func (m *EnumOptions) XXX_Size() int { + return xxx_messageInfo_EnumOptions.Size(m) +} +func (m *EnumOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumOptions proto.InternalMessageInfo const Default_EnumOptions_Deprecated bool = false @@ -1527,22 +1934,43 @@ type EnumValueOptions struct { Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } -func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } -func (*EnumValueOptions) ProtoMessage() {} -func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{15} +} var extRange_EnumValueOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_EnumValueOptions } +func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b) +} +func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic) +} +func (dst *EnumValueOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueOptions.Merge(dst, src) +} +func (m *EnumValueOptions) XXX_Size() int { + return xxx_messageInfo_EnumValueOptions.Size(m) +} +func (m *EnumValueOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo const Default_EnumValueOptions_Deprecated bool = false @@ -1568,22 +1996,43 @@ type ServiceOptions struct { Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } -func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } -func (*ServiceOptions) ProtoMessage() {} -func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{16} +} var extRange_ServiceOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_ServiceOptions } +func (m *ServiceOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceOptions.Unmarshal(m, b) +} +func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic) +} +func (dst *ServiceOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceOptions.Merge(dst, src) +} +func (m *ServiceOptions) XXX_Size() int { + return xxx_messageInfo_ServiceOptions.Size(m) +} +func (m *ServiceOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo const Default_ServiceOptions_Deprecated bool = false @@ -1610,22 +2059,43 @@ type MethodOptions struct { IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *MethodOptions) Reset() { *m = MethodOptions{} } -func (m *MethodOptions) String() string { return proto.CompactTextString(m) } -func (*MethodOptions) ProtoMessage() {} -func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{17} +} var extRange_MethodOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { return extRange_MethodOptions } +func (m *MethodOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodOptions.Unmarshal(m, b) +} +func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic) +} +func (dst *MethodOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodOptions.Merge(dst, src) +} +func (m *MethodOptions) XXX_Size() int { + return xxx_messageInfo_MethodOptions.Size(m) +} +func (m *MethodOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MethodOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodOptions proto.InternalMessageInfo const Default_MethodOptions_Deprecated bool = false const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN @@ -1661,19 +2131,40 @@ type UninterpretedOption struct { Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` // The value of the uninterpreted option, in whatever type the tokenizer // identified it as during parsing. Exactly one of these should be set. - IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` - PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` - NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` - DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` - StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` - AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` - XXX_unrecognized []byte `json:"-"` + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } -func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } -func (*UninterpretedOption) ProtoMessage() {} -func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{18} +} +func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) +} +func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic) +} +func (dst *UninterpretedOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption.Merge(dst, src) +} +func (m *UninterpretedOption) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption.Size(m) +} +func (m *UninterpretedOption) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { if m != nil { @@ -1730,18 +2221,37 @@ func (m *UninterpretedOption) GetAggregateValue() string { // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents // "foo.(bar.baz).qux". type UninterpretedOption_NamePart struct { - NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` - IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` - XXX_unrecognized []byte `json:"-"` + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } func (*UninterpretedOption_NamePart) ProtoMessage() {} func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{18, 0} + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{18, 0} +} +func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) +} +func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic) +} +func (dst *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption_NamePart.Merge(dst, src) +} +func (m *UninterpretedOption_NamePart) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption_NamePart.Size(m) +} +func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m) } +var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo + func (m *UninterpretedOption_NamePart) GetNamePart() string { if m != nil && m.NamePart != nil { return *m.NamePart @@ -1802,14 +2312,35 @@ type SourceCodeInfo struct { // - Code which tries to interpret locations should probably be designed to // ignore those that it doesn't understand, as more types of locations could // be recorded in the future. - Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` - XXX_unrecognized []byte `json:"-"` + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{19} +} +func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) +} +func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic) +} +func (dst *SourceCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo.Merge(dst, src) +} +func (m *SourceCodeInfo) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo.Size(m) +} +func (m *SourceCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m) } -func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } -func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } -func (*SourceCodeInfo) ProtoMessage() {} -func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } +var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { if m != nil { @@ -1899,13 +2430,34 @@ type SourceCodeInfo_Location struct { LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{19, 0} +} +func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) +} +func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic) +} +func (dst *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo_Location.Merge(dst, src) +} +func (m *SourceCodeInfo_Location) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo_Location.Size(m) +} +func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m) } -func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } -func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } -func (*SourceCodeInfo_Location) ProtoMessage() {} -func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19, 0} } +var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo func (m *SourceCodeInfo_Location) GetPath() []int32 { if m != nil { @@ -1948,14 +2500,35 @@ func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { type GeneratedCodeInfo struct { // An Annotation connects some span of text in generated code to an element // of its generating .proto file. - Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` - XXX_unrecognized []byte `json:"-"` + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } -func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } -func (*GeneratedCodeInfo) ProtoMessage() {} -func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{20} +} +func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic) +} +func (dst *GeneratedCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo.Merge(dst, src) +} +func (m *GeneratedCodeInfo) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo.Size(m) +} +func (m *GeneratedCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { if m != nil { @@ -1976,17 +2549,36 @@ type GeneratedCodeInfo_Annotation struct { // Identifies the ending offset in bytes in the generated code that // relates to the identified offset. The end offset should be one past // the last relevant byte (so the length of the text = end - begin). - End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` - XXX_unrecognized []byte `json:"-"` + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { - return fileDescriptor0, []int{20, 0} + return fileDescriptor_descriptor_4df4cb5f42392df6, []int{20, 0} +} +func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic) +} +func (dst *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(dst, src) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m) +} +func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m) } +var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo + func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { if m != nil { return m.Path @@ -2025,6 +2617,7 @@ func init() { proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") @@ -2050,166 +2643,170 @@ func init() { proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) } -func init() { proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor0) } +func init() { + proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor_descriptor_4df4cb5f42392df6) +} -var fileDescriptor0 = []byte{ - // 2519 bytes of a gzipped FileDescriptorProto +var fileDescriptor_descriptor_4df4cb5f42392df6 = []byte{ + // 2555 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x6e, 0x1b, 0xc7, - 0x15, 0x0e, 0x7f, 0x45, 0x1e, 0x52, 0xd4, 0x68, 0xa4, 0xd8, 0x6b, 0xe5, 0xc7, 0x32, 0xf3, 0x63, - 0xd9, 0x69, 0xa8, 0x40, 0xb1, 0x1d, 0x47, 0x29, 0xd2, 0x52, 0xe4, 0x5a, 0xa1, 0x4a, 0x91, 0xec, - 0x92, 0x6a, 0x7e, 0x6e, 0x16, 0xa3, 0xdd, 0x21, 0xb9, 0xf6, 0x72, 0x77, 0xb3, 0xbb, 0xb4, 0xad, - 0xa0, 0x17, 0x06, 0x7a, 0x55, 0xa0, 0x0f, 0x50, 0x14, 0x45, 0x2f, 0x72, 0x13, 0xa0, 0x0f, 0x50, - 0x20, 0x77, 0x7d, 0x82, 0x02, 0x79, 0x83, 0xa2, 0x28, 0xd0, 0x3e, 0x46, 0x31, 0x33, 0xbb, 0xcb, - 0x5d, 0xfe, 0xc4, 0x6a, 0x80, 0x38, 0x57, 0xe4, 0x7c, 0xe7, 0x3b, 0x67, 0xce, 0x9c, 0x39, 0x33, - 0x73, 0x66, 0x16, 0x76, 0x47, 0xb6, 0x3d, 0x32, 0xe9, 0xbe, 0xe3, 0xda, 0xbe, 0x7d, 0x3e, 0x1d, - 0xee, 0xeb, 0xd4, 0xd3, 0x5c, 0xc3, 0xf1, 0x6d, 0xb7, 0xc6, 0x31, 0xbc, 0x21, 0x18, 0xb5, 0x90, - 0x51, 0x3d, 0x85, 0xcd, 0x07, 0x86, 0x49, 0x9b, 0x11, 0xb1, 0x4f, 0x7d, 0x7c, 0x1f, 0xb2, 0x43, - 0xc3, 0xa4, 0x52, 0x6a, 0x37, 0xb3, 0x57, 0x3a, 0x78, 0xb3, 0x36, 0xa7, 0x54, 0x4b, 0x6a, 0xf4, - 0x18, 0xac, 0x70, 0x8d, 0xea, 0xbf, 0xb3, 0xb0, 0xb5, 0x44, 0x8a, 0x31, 0x64, 0x2d, 0x32, 0x61, - 0x16, 0x53, 0x7b, 0x45, 0x85, 0xff, 0xc7, 0x12, 0xac, 0x39, 0x44, 0x7b, 0x44, 0x46, 0x54, 0x4a, - 0x73, 0x38, 0x6c, 0xe2, 0xd7, 0x01, 0x74, 0xea, 0x50, 0x4b, 0xa7, 0x96, 0x76, 0x21, 0x65, 0x76, - 0x33, 0x7b, 0x45, 0x25, 0x86, 0xe0, 0x77, 0x60, 0xd3, 0x99, 0x9e, 0x9b, 0x86, 0xa6, 0xc6, 0x68, - 0xb0, 0x9b, 0xd9, 0xcb, 0x29, 0x48, 0x08, 0x9a, 0x33, 0xf2, 0x4d, 0xd8, 0x78, 0x42, 0xc9, 0xa3, - 0x38, 0xb5, 0xc4, 0xa9, 0x15, 0x06, 0xc7, 0x88, 0x0d, 0x28, 0x4f, 0xa8, 0xe7, 0x91, 0x11, 0x55, - 0xfd, 0x0b, 0x87, 0x4a, 0x59, 0x3e, 0xfa, 0xdd, 0x85, 0xd1, 0xcf, 0x8f, 0xbc, 0x14, 0x68, 0x0d, - 0x2e, 0x1c, 0x8a, 0xeb, 0x50, 0xa4, 0xd6, 0x74, 0x22, 0x2c, 0xe4, 0x56, 0xc4, 0x4f, 0xb6, 0xa6, - 0x93, 0x79, 0x2b, 0x05, 0xa6, 0x16, 0x98, 0x58, 0xf3, 0xa8, 0xfb, 0xd8, 0xd0, 0xa8, 0x94, 0xe7, - 0x06, 0x6e, 0x2e, 0x18, 0xe8, 0x0b, 0xf9, 0xbc, 0x8d, 0x50, 0x0f, 0x37, 0xa0, 0x48, 0x9f, 0xfa, - 0xd4, 0xf2, 0x0c, 0xdb, 0x92, 0xd6, 0xb8, 0x91, 0xb7, 0x96, 0xcc, 0x22, 0x35, 0xf5, 0x79, 0x13, - 0x33, 0x3d, 0x7c, 0x0f, 0xd6, 0x6c, 0xc7, 0x37, 0x6c, 0xcb, 0x93, 0x0a, 0xbb, 0xa9, 0xbd, 0xd2, - 0xc1, 0xab, 0x4b, 0x13, 0xa1, 0x2b, 0x38, 0x4a, 0x48, 0xc6, 0x2d, 0x40, 0x9e, 0x3d, 0x75, 0x35, - 0xaa, 0x6a, 0xb6, 0x4e, 0x55, 0xc3, 0x1a, 0xda, 0x52, 0x91, 0x1b, 0xb8, 0xbe, 0x38, 0x10, 0x4e, - 0x6c, 0xd8, 0x3a, 0x6d, 0x59, 0x43, 0x5b, 0xa9, 0x78, 0x89, 0x36, 0xbe, 0x02, 0x79, 0xef, 0xc2, - 0xf2, 0xc9, 0x53, 0xa9, 0xcc, 0x33, 0x24, 0x68, 0x55, 0xbf, 0xcd, 0xc3, 0xc6, 0x65, 0x52, 0xec, - 0x23, 0xc8, 0x0d, 0xd9, 0x28, 0xa5, 0xf4, 0xff, 0x13, 0x03, 0xa1, 0x93, 0x0c, 0x62, 0xfe, 0x07, - 0x06, 0xb1, 0x0e, 0x25, 0x8b, 0x7a, 0x3e, 0xd5, 0x45, 0x46, 0x64, 0x2e, 0x99, 0x53, 0x20, 0x94, - 0x16, 0x53, 0x2a, 0xfb, 0x83, 0x52, 0xea, 0x33, 0xd8, 0x88, 0x5c, 0x52, 0x5d, 0x62, 0x8d, 0xc2, - 0xdc, 0xdc, 0x7f, 0x9e, 0x27, 0x35, 0x39, 0xd4, 0x53, 0x98, 0x9a, 0x52, 0xa1, 0x89, 0x36, 0x6e, - 0x02, 0xd8, 0x16, 0xb5, 0x87, 0xaa, 0x4e, 0x35, 0x53, 0x2a, 0xac, 0x88, 0x52, 0x97, 0x51, 0x16, - 0xa2, 0x64, 0x0b, 0x54, 0x33, 0xf1, 0x87, 0xb3, 0x54, 0x5b, 0x5b, 0x91, 0x29, 0xa7, 0x62, 0x91, - 0x2d, 0x64, 0xdb, 0x19, 0x54, 0x5c, 0xca, 0xf2, 0x9e, 0xea, 0xc1, 0xc8, 0x8a, 0xdc, 0x89, 0xda, - 0x73, 0x47, 0xa6, 0x04, 0x6a, 0x62, 0x60, 0xeb, 0x6e, 0xbc, 0x89, 0xdf, 0x80, 0x08, 0x50, 0x79, - 0x5a, 0x01, 0xdf, 0x85, 0xca, 0x21, 0xd8, 0x21, 0x13, 0xba, 0xf3, 0x15, 0x54, 0x92, 0xe1, 0xc1, - 0xdb, 0x90, 0xf3, 0x7c, 0xe2, 0xfa, 0x3c, 0x0b, 0x73, 0x8a, 0x68, 0x60, 0x04, 0x19, 0x6a, 0xe9, - 0x7c, 0x97, 0xcb, 0x29, 0xec, 0x2f, 0xfe, 0xe5, 0x6c, 0xc0, 0x19, 0x3e, 0xe0, 0xb7, 0x17, 0x67, - 0x34, 0x61, 0x79, 0x7e, 0xdc, 0x3b, 0x1f, 0xc0, 0x7a, 0x62, 0x00, 0x97, 0xed, 0xba, 0xfa, 0x5b, - 0x78, 0x79, 0xa9, 0x69, 0xfc, 0x19, 0x6c, 0x4f, 0x2d, 0xc3, 0xf2, 0xa9, 0xeb, 0xb8, 0x94, 0x65, - 0xac, 0xe8, 0x4a, 0xfa, 0xcf, 0xda, 0x8a, 0x9c, 0x3b, 0x8b, 0xb3, 0x85, 0x15, 0x65, 0x6b, 0xba, - 0x08, 0xde, 0x2e, 0x16, 0xfe, 0xbb, 0x86, 0x9e, 0x3d, 0x7b, 0xf6, 0x2c, 0x5d, 0xfd, 0x63, 0x1e, - 0xb6, 0x97, 0xad, 0x99, 0xa5, 0xcb, 0xf7, 0x0a, 0xe4, 0xad, 0xe9, 0xe4, 0x9c, 0xba, 0x3c, 0x48, - 0x39, 0x25, 0x68, 0xe1, 0x3a, 0xe4, 0x4c, 0x72, 0x4e, 0x4d, 0x29, 0xbb, 0x9b, 0xda, 0xab, 0x1c, - 0xbc, 0x73, 0xa9, 0x55, 0x59, 0x6b, 0x33, 0x15, 0x45, 0x68, 0xe2, 0x8f, 0x21, 0x1b, 0x6c, 0xd1, - 0xcc, 0xc2, 0xed, 0xcb, 0x59, 0x60, 0x6b, 0x49, 0xe1, 0x7a, 0xf8, 0x15, 0x28, 0xb2, 0x5f, 0x91, - 0x1b, 0x79, 0xee, 0x73, 0x81, 0x01, 0x2c, 0x2f, 0xf0, 0x0e, 0x14, 0xf8, 0x32, 0xd1, 0x69, 0x78, - 0xb4, 0x45, 0x6d, 0x96, 0x58, 0x3a, 0x1d, 0x92, 0xa9, 0xe9, 0xab, 0x8f, 0x89, 0x39, 0xa5, 0x3c, - 0xe1, 0x8b, 0x4a, 0x39, 0x00, 0x7f, 0xc3, 0x30, 0x7c, 0x1d, 0x4a, 0x62, 0x55, 0x19, 0x96, 0x4e, - 0x9f, 0xf2, 0xdd, 0x33, 0xa7, 0x88, 0x85, 0xd6, 0x62, 0x08, 0xeb, 0xfe, 0xa1, 0x67, 0x5b, 0x61, - 0x6a, 0xf2, 0x2e, 0x18, 0xc0, 0xbb, 0xff, 0x60, 0x7e, 0xe3, 0x7e, 0x6d, 0xf9, 0xf0, 0xe6, 0x73, - 0xaa, 0xfa, 0xb7, 0x34, 0x64, 0xf9, 0x7e, 0xb1, 0x01, 0xa5, 0xc1, 0xe7, 0x3d, 0x59, 0x6d, 0x76, - 0xcf, 0x8e, 0xda, 0x32, 0x4a, 0xe1, 0x0a, 0x00, 0x07, 0x1e, 0xb4, 0xbb, 0xf5, 0x01, 0x4a, 0x47, - 0xed, 0x56, 0x67, 0x70, 0xef, 0x0e, 0xca, 0x44, 0x0a, 0x67, 0x02, 0xc8, 0xc6, 0x09, 0xef, 0x1f, - 0xa0, 0x1c, 0x46, 0x50, 0x16, 0x06, 0x5a, 0x9f, 0xc9, 0xcd, 0x7b, 0x77, 0x50, 0x3e, 0x89, 0xbc, - 0x7f, 0x80, 0xd6, 0xf0, 0x3a, 0x14, 0x39, 0x72, 0xd4, 0xed, 0xb6, 0x51, 0x21, 0xb2, 0xd9, 0x1f, - 0x28, 0xad, 0xce, 0x31, 0x2a, 0x46, 0x36, 0x8f, 0x95, 0xee, 0x59, 0x0f, 0x41, 0x64, 0xe1, 0x54, - 0xee, 0xf7, 0xeb, 0xc7, 0x32, 0x2a, 0x45, 0x8c, 0xa3, 0xcf, 0x07, 0x72, 0x1f, 0x95, 0x13, 0x6e, - 0xbd, 0x7f, 0x80, 0xd6, 0xa3, 0x2e, 0xe4, 0xce, 0xd9, 0x29, 0xaa, 0xe0, 0x4d, 0x58, 0x17, 0x5d, - 0x84, 0x4e, 0x6c, 0xcc, 0x41, 0xf7, 0xee, 0x20, 0x34, 0x73, 0x44, 0x58, 0xd9, 0x4c, 0x00, 0xf7, - 0xee, 0x20, 0x5c, 0x6d, 0x40, 0x8e, 0x67, 0x17, 0xc6, 0x50, 0x69, 0xd7, 0x8f, 0xe4, 0xb6, 0xda, - 0xed, 0x0d, 0x5a, 0xdd, 0x4e, 0xbd, 0x8d, 0x52, 0x33, 0x4c, 0x91, 0x7f, 0x7d, 0xd6, 0x52, 0xe4, - 0x26, 0x4a, 0xc7, 0xb1, 0x9e, 0x5c, 0x1f, 0xc8, 0x4d, 0x94, 0xa9, 0x6a, 0xb0, 0xbd, 0x6c, 0x9f, - 0x5c, 0xba, 0x32, 0x62, 0x53, 0x9c, 0x5e, 0x31, 0xc5, 0xdc, 0xd6, 0xc2, 0x14, 0x7f, 0x9d, 0x82, - 0xad, 0x25, 0x67, 0xc5, 0xd2, 0x4e, 0x7e, 0x01, 0x39, 0x91, 0xa2, 0xe2, 0xf4, 0xbc, 0xb5, 0xf4, - 0xd0, 0xe1, 0x09, 0xbb, 0x70, 0x82, 0x72, 0xbd, 0x78, 0x05, 0x91, 0x59, 0x51, 0x41, 0x30, 0x13, - 0x0b, 0x4e, 0xfe, 0x2e, 0x05, 0xd2, 0x2a, 0xdb, 0xcf, 0xd9, 0x28, 0xd2, 0x89, 0x8d, 0xe2, 0xa3, - 0x79, 0x07, 0x6e, 0xac, 0x1e, 0xc3, 0x82, 0x17, 0xdf, 0xa4, 0xe0, 0xca, 0xf2, 0x42, 0x6b, 0xa9, - 0x0f, 0x1f, 0x43, 0x7e, 0x42, 0xfd, 0xb1, 0x1d, 0x16, 0x1b, 0x6f, 0x2f, 0x39, 0xc2, 0x98, 0x78, - 0x3e, 0x56, 0x81, 0x56, 0xfc, 0x0c, 0xcc, 0xac, 0xaa, 0x96, 0x84, 0x37, 0x0b, 0x9e, 0xfe, 0x3e, - 0x0d, 0x2f, 0x2f, 0x35, 0xbe, 0xd4, 0xd1, 0xd7, 0x00, 0x0c, 0xcb, 0x99, 0xfa, 0xa2, 0xa0, 0x10, - 0xfb, 0x53, 0x91, 0x23, 0x7c, 0xed, 0xb3, 0xbd, 0x67, 0xea, 0x47, 0xf2, 0x0c, 0x97, 0x83, 0x80, - 0x38, 0xe1, 0xfe, 0xcc, 0xd1, 0x2c, 0x77, 0xf4, 0xf5, 0x15, 0x23, 0x5d, 0x38, 0xab, 0xdf, 0x03, - 0xa4, 0x99, 0x06, 0xb5, 0x7c, 0xd5, 0xf3, 0x5d, 0x4a, 0x26, 0x86, 0x35, 0xe2, 0x1b, 0x70, 0xe1, - 0x30, 0x37, 0x24, 0xa6, 0x47, 0x95, 0x0d, 0x21, 0xee, 0x87, 0x52, 0xa6, 0xc1, 0xcf, 0x38, 0x37, - 0xa6, 0x91, 0x4f, 0x68, 0x08, 0x71, 0xa4, 0x51, 0xfd, 0xb6, 0x00, 0xa5, 0x58, 0x59, 0x8a, 0x6f, - 0x40, 0xf9, 0x21, 0x79, 0x4c, 0xd4, 0xf0, 0xaa, 0x21, 0x22, 0x51, 0x62, 0x58, 0x2f, 0xb8, 0x6e, - 0xbc, 0x07, 0xdb, 0x9c, 0x62, 0x4f, 0x7d, 0xea, 0xaa, 0x9a, 0x49, 0x3c, 0x8f, 0x07, 0xad, 0xc0, - 0xa9, 0x98, 0xc9, 0xba, 0x4c, 0xd4, 0x08, 0x25, 0xf8, 0x2e, 0x6c, 0x71, 0x8d, 0xc9, 0xd4, 0xf4, - 0x0d, 0xc7, 0xa4, 0x2a, 0xbb, 0xfc, 0x78, 0x7c, 0x23, 0x8e, 0x3c, 0xdb, 0x64, 0x8c, 0xd3, 0x80, - 0xc0, 0x3c, 0xf2, 0x70, 0x13, 0x5e, 0xe3, 0x6a, 0x23, 0x6a, 0x51, 0x97, 0xf8, 0x54, 0xa5, 0x5f, - 0x4e, 0x89, 0xe9, 0xa9, 0xc4, 0xd2, 0xd5, 0x31, 0xf1, 0xc6, 0xd2, 0x36, 0x33, 0x70, 0x94, 0x96, - 0x52, 0xca, 0x35, 0x46, 0x3c, 0x0e, 0x78, 0x32, 0xa7, 0xd5, 0x2d, 0xfd, 0x13, 0xe2, 0x8d, 0xf1, - 0x21, 0x5c, 0xe1, 0x56, 0x3c, 0xdf, 0x35, 0xac, 0x91, 0xaa, 0x8d, 0xa9, 0xf6, 0x48, 0x9d, 0xfa, - 0xc3, 0xfb, 0xd2, 0x2b, 0xf1, 0xfe, 0xb9, 0x87, 0x7d, 0xce, 0x69, 0x30, 0xca, 0x99, 0x3f, 0xbc, - 0x8f, 0xfb, 0x50, 0x66, 0x93, 0x31, 0x31, 0xbe, 0xa2, 0xea, 0xd0, 0x76, 0xf9, 0xc9, 0x52, 0x59, - 0xb2, 0xb2, 0x63, 0x11, 0xac, 0x75, 0x03, 0x85, 0x53, 0x5b, 0xa7, 0x87, 0xb9, 0x7e, 0x4f, 0x96, - 0x9b, 0x4a, 0x29, 0xb4, 0xf2, 0xc0, 0x76, 0x59, 0x42, 0x8d, 0xec, 0x28, 0xc0, 0x25, 0x91, 0x50, - 0x23, 0x3b, 0x0c, 0xef, 0x5d, 0xd8, 0xd2, 0x34, 0x31, 0x66, 0x43, 0x53, 0x83, 0x2b, 0x8a, 0x27, - 0xa1, 0x44, 0xb0, 0x34, 0xed, 0x58, 0x10, 0x82, 0x1c, 0xf7, 0xf0, 0x87, 0xf0, 0xf2, 0x2c, 0x58, - 0x71, 0xc5, 0xcd, 0x85, 0x51, 0xce, 0xab, 0xde, 0x85, 0x2d, 0xe7, 0x62, 0x51, 0x11, 0x27, 0x7a, - 0x74, 0x2e, 0xe6, 0xd5, 0x3e, 0x80, 0x6d, 0x67, 0xec, 0x2c, 0xea, 0xdd, 0x8e, 0xeb, 0x61, 0x67, - 0xec, 0xcc, 0x2b, 0xbe, 0xc5, 0xef, 0xab, 0x2e, 0xd5, 0x88, 0x4f, 0x75, 0xe9, 0x6a, 0x9c, 0x1e, - 0x13, 0xe0, 0x7d, 0x40, 0x9a, 0xa6, 0x52, 0x8b, 0x9c, 0x9b, 0x54, 0x25, 0x2e, 0xb5, 0x88, 0x27, - 0x5d, 0x8f, 0x93, 0x2b, 0x9a, 0x26, 0x73, 0x69, 0x9d, 0x0b, 0xf1, 0x6d, 0xd8, 0xb4, 0xcf, 0x1f, - 0x6a, 0x22, 0x25, 0x55, 0xc7, 0xa5, 0x43, 0xe3, 0xa9, 0xf4, 0x26, 0x8f, 0xef, 0x06, 0x13, 0xf0, - 0x84, 0xec, 0x71, 0x18, 0xdf, 0x02, 0xa4, 0x79, 0x63, 0xe2, 0x3a, 0xbc, 0x26, 0xf0, 0x1c, 0xa2, - 0x51, 0xe9, 0x2d, 0x41, 0x15, 0x78, 0x27, 0x84, 0xd9, 0x92, 0xf0, 0x9e, 0x18, 0x43, 0x3f, 0xb4, - 0x78, 0x53, 0x2c, 0x09, 0x8e, 0x05, 0xd6, 0xf6, 0x00, 0xb1, 0x50, 0x24, 0x3a, 0xde, 0xe3, 0xb4, - 0x8a, 0x33, 0x76, 0xe2, 0xfd, 0xbe, 0x01, 0xeb, 0x8c, 0x39, 0xeb, 0xf4, 0x96, 0xa8, 0x67, 0x9c, - 0x71, 0xac, 0xc7, 0x1f, 0xad, 0xb4, 0xac, 0x1e, 0x42, 0x39, 0x9e, 0x9f, 0xb8, 0x08, 0x22, 0x43, - 0x51, 0x8a, 0x9d, 0xf5, 0x8d, 0x6e, 0x93, 0x9d, 0xd2, 0x5f, 0xc8, 0x28, 0xcd, 0xaa, 0x85, 0x76, - 0x6b, 0x20, 0xab, 0xca, 0x59, 0x67, 0xd0, 0x3a, 0x95, 0x51, 0x26, 0x56, 0x96, 0x9e, 0x64, 0x0b, - 0x6f, 0xa3, 0x9b, 0xd5, 0xef, 0xd2, 0x50, 0x49, 0xde, 0x33, 0xf0, 0xcf, 0xe1, 0x6a, 0xf8, 0x28, - 0xe0, 0x51, 0x5f, 0x7d, 0x62, 0xb8, 0x7c, 0xe1, 0x4c, 0x88, 0xa8, 0xb3, 0xa3, 0xa9, 0xdb, 0x0e, - 0x58, 0x7d, 0xea, 0x7f, 0x6a, 0xb8, 0x6c, 0x59, 0x4c, 0x88, 0x8f, 0xdb, 0x70, 0xdd, 0xb2, 0x55, - 0xcf, 0x27, 0x96, 0x4e, 0x5c, 0x5d, 0x9d, 0x3d, 0xc7, 0xa8, 0x44, 0xd3, 0xa8, 0xe7, 0xd9, 0xe2, - 0xc0, 0x8a, 0xac, 0xbc, 0x6a, 0xd9, 0xfd, 0x80, 0x3c, 0xdb, 0xc9, 0xeb, 0x01, 0x75, 0x2e, 0xcd, - 0x32, 0xab, 0xd2, 0xec, 0x15, 0x28, 0x4e, 0x88, 0xa3, 0x52, 0xcb, 0x77, 0x2f, 0x78, 0x75, 0x59, - 0x50, 0x0a, 0x13, 0xe2, 0xc8, 0xac, 0xfd, 0x42, 0x8a, 0xfc, 0x93, 0x6c, 0xa1, 0x80, 0x8a, 0x27, - 0xd9, 0x42, 0x11, 0x41, 0xf5, 0x5f, 0x19, 0x28, 0xc7, 0xab, 0x4d, 0x56, 0xbc, 0x6b, 0xfc, 0x64, - 0x49, 0xf1, 0xbd, 0xe7, 0x8d, 0xef, 0xad, 0x4d, 0x6b, 0x0d, 0x76, 0xe4, 0x1c, 0xe6, 0x45, 0x0d, - 0xa8, 0x08, 0x4d, 0x76, 0xdc, 0xb3, 0xdd, 0x86, 0x8a, 0x7b, 0x4d, 0x41, 0x09, 0x5a, 0xf8, 0x18, - 0xf2, 0x0f, 0x3d, 0x6e, 0x3b, 0xcf, 0x6d, 0xbf, 0xf9, 0xfd, 0xb6, 0x4f, 0xfa, 0xdc, 0x78, 0xf1, - 0xa4, 0xaf, 0x76, 0xba, 0xca, 0x69, 0xbd, 0xad, 0x04, 0xea, 0xf8, 0x1a, 0x64, 0x4d, 0xf2, 0xd5, - 0x45, 0xf2, 0x70, 0xe2, 0xd0, 0x65, 0x27, 0xe1, 0x1a, 0x64, 0x9f, 0x50, 0xf2, 0x28, 0x79, 0x24, - 0x70, 0xe8, 0x47, 0x5c, 0x0c, 0xfb, 0x90, 0xe3, 0xf1, 0xc2, 0x00, 0x41, 0xc4, 0xd0, 0x4b, 0xb8, - 0x00, 0xd9, 0x46, 0x57, 0x61, 0x0b, 0x02, 0x41, 0x59, 0xa0, 0x6a, 0xaf, 0x25, 0x37, 0x64, 0x94, - 0xae, 0xde, 0x85, 0xbc, 0x08, 0x02, 0x5b, 0x2c, 0x51, 0x18, 0xd0, 0x4b, 0x41, 0x33, 0xb0, 0x91, - 0x0a, 0xa5, 0x67, 0xa7, 0x47, 0xb2, 0x82, 0xd2, 0xc9, 0xa9, 0xce, 0xa2, 0x5c, 0xd5, 0x83, 0x72, - 0xbc, 0xdc, 0x7c, 0x31, 0x57, 0xc9, 0xbf, 0xa7, 0xa0, 0x14, 0x2b, 0x1f, 0x59, 0xe1, 0x42, 0x4c, - 0xd3, 0x7e, 0xa2, 0x12, 0xd3, 0x20, 0x5e, 0x90, 0x1a, 0xc0, 0xa1, 0x3a, 0x43, 0x2e, 0x3b, 0x75, - 0x2f, 0x68, 0x89, 0xe4, 0x50, 0xbe, 0xfa, 0x97, 0x14, 0xa0, 0xf9, 0x02, 0x74, 0xce, 0xcd, 0xd4, - 0x4f, 0xe9, 0x66, 0xf5, 0xcf, 0x29, 0xa8, 0x24, 0xab, 0xce, 0x39, 0xf7, 0x6e, 0xfc, 0xa4, 0xee, - 0xfd, 0x33, 0x0d, 0xeb, 0x89, 0x5a, 0xf3, 0xb2, 0xde, 0x7d, 0x09, 0x9b, 0x86, 0x4e, 0x27, 0x8e, - 0xed, 0x53, 0x4b, 0xbb, 0x50, 0x4d, 0xfa, 0x98, 0x9a, 0x52, 0x95, 0x6f, 0x1a, 0xfb, 0xdf, 0x5f, - 0xcd, 0xd6, 0x5a, 0x33, 0xbd, 0x36, 0x53, 0x3b, 0xdc, 0x6a, 0x35, 0xe5, 0xd3, 0x5e, 0x77, 0x20, - 0x77, 0x1a, 0x9f, 0xab, 0x67, 0x9d, 0x5f, 0x75, 0xba, 0x9f, 0x76, 0x14, 0x64, 0xcc, 0xd1, 0x7e, - 0xc4, 0x65, 0xdf, 0x03, 0x34, 0xef, 0x14, 0xbe, 0x0a, 0xcb, 0xdc, 0x42, 0x2f, 0xe1, 0x2d, 0xd8, - 0xe8, 0x74, 0xd5, 0x7e, 0xab, 0x29, 0xab, 0xf2, 0x83, 0x07, 0x72, 0x63, 0xd0, 0x17, 0xd7, 0xfb, - 0x88, 0x3d, 0x48, 0x2c, 0xf0, 0xea, 0x9f, 0x32, 0xb0, 0xb5, 0xc4, 0x13, 0x5c, 0x0f, 0x6e, 0x16, - 0xe2, 0xb2, 0xf3, 0xee, 0x65, 0xbc, 0xaf, 0xb1, 0x82, 0xa0, 0x47, 0x5c, 0x3f, 0xb8, 0x88, 0xdc, - 0x02, 0x16, 0x25, 0xcb, 0x37, 0x86, 0x06, 0x75, 0x83, 0xd7, 0x10, 0x71, 0xdd, 0xd8, 0x98, 0xe1, - 0xe2, 0x41, 0xe4, 0x67, 0x80, 0x1d, 0xdb, 0x33, 0x7c, 0xe3, 0x31, 0x55, 0x0d, 0x2b, 0x7c, 0x3a, - 0x61, 0xd7, 0x8f, 0xac, 0x82, 0x42, 0x49, 0xcb, 0xf2, 0x23, 0xb6, 0x45, 0x47, 0x64, 0x8e, 0xcd, - 0x36, 0xf3, 0x8c, 0x82, 0x42, 0x49, 0xc4, 0xbe, 0x01, 0x65, 0xdd, 0x9e, 0xb2, 0x9a, 0x4c, 0xf0, - 0xd8, 0xd9, 0x91, 0x52, 0x4a, 0x02, 0x8b, 0x28, 0x41, 0xb5, 0x3d, 0x7b, 0xb3, 0x29, 0x2b, 0x25, - 0x81, 0x09, 0xca, 0x4d, 0xd8, 0x20, 0xa3, 0x91, 0xcb, 0x8c, 0x87, 0x86, 0xc4, 0xfd, 0xa1, 0x12, - 0xc1, 0x9c, 0xb8, 0x73, 0x02, 0x85, 0x30, 0x0e, 0xec, 0xa8, 0x66, 0x91, 0x50, 0x1d, 0xf1, 0x6e, - 0x97, 0xde, 0x2b, 0x2a, 0x05, 0x2b, 0x14, 0xde, 0x80, 0xb2, 0xe1, 0xa9, 0xb3, 0x27, 0xe8, 0xf4, - 0x6e, 0x7a, 0xaf, 0xa0, 0x94, 0x0c, 0x2f, 0x7a, 0xbe, 0xab, 0x7e, 0x93, 0x86, 0x4a, 0xf2, 0x09, - 0x1d, 0x37, 0xa1, 0x60, 0xda, 0x1a, 0xe1, 0xa9, 0x25, 0xbe, 0xdf, 0xec, 0x3d, 0xe7, 0xd5, 0xbd, - 0xd6, 0x0e, 0xf8, 0x4a, 0xa4, 0xb9, 0xf3, 0x8f, 0x14, 0x14, 0x42, 0x18, 0x5f, 0x81, 0xac, 0x43, - 0xfc, 0x31, 0x37, 0x97, 0x3b, 0x4a, 0xa3, 0x94, 0xc2, 0xdb, 0x0c, 0xf7, 0x1c, 0x62, 0xf1, 0x14, - 0x08, 0x70, 0xd6, 0x66, 0xf3, 0x6a, 0x52, 0xa2, 0xf3, 0xcb, 0x89, 0x3d, 0x99, 0x50, 0xcb, 0xf7, - 0xc2, 0x79, 0x0d, 0xf0, 0x46, 0x00, 0xe3, 0x77, 0x60, 0xd3, 0x77, 0x89, 0x61, 0x26, 0xb8, 0x59, - 0xce, 0x45, 0xa1, 0x20, 0x22, 0x1f, 0xc2, 0xb5, 0xd0, 0xae, 0x4e, 0x7d, 0xa2, 0x8d, 0xa9, 0x3e, - 0x53, 0xca, 0xf3, 0xf7, 0xd9, 0xab, 0x01, 0xa1, 0x19, 0xc8, 0x43, 0xdd, 0xea, 0x77, 0x29, 0xd8, - 0x0c, 0xaf, 0x53, 0x7a, 0x14, 0xac, 0x53, 0x00, 0x62, 0x59, 0xb6, 0x1f, 0x0f, 0xd7, 0x62, 0x2a, - 0x2f, 0xe8, 0xd5, 0xea, 0x91, 0x92, 0x12, 0x33, 0xb0, 0x33, 0x01, 0x98, 0x49, 0x56, 0x86, 0xed, - 0x3a, 0x94, 0x82, 0xef, 0x23, 0xfc, 0x23, 0x9b, 0xb8, 0x80, 0x83, 0x80, 0xd8, 0xbd, 0x0b, 0x6f, - 0x43, 0xee, 0x9c, 0x8e, 0x0c, 0x2b, 0x78, 0xf5, 0x14, 0x8d, 0xf0, 0x25, 0x37, 0x1b, 0xbd, 0xe4, - 0x1e, 0xfd, 0x21, 0x05, 0x5b, 0x9a, 0x3d, 0x99, 0xf7, 0xf7, 0x08, 0xcd, 0xbd, 0x02, 0x78, 0x9f, - 0xa4, 0xbe, 0xf8, 0x78, 0x64, 0xf8, 0xe3, 0xe9, 0x79, 0x4d, 0xb3, 0x27, 0xfb, 0x23, 0xdb, 0x24, - 0xd6, 0x68, 0xf6, 0x95, 0x90, 0xff, 0xd1, 0xde, 0x1d, 0x51, 0xeb, 0xdd, 0x91, 0x1d, 0xfb, 0x66, - 0xf8, 0xd1, 0xec, 0xef, 0xd7, 0xe9, 0xcc, 0x71, 0xef, 0xe8, 0xaf, 0xe9, 0x9d, 0x63, 0xd1, 0x57, - 0x2f, 0x8c, 0x8d, 0x42, 0x87, 0x26, 0xd5, 0xd8, 0x78, 0xff, 0x17, 0x00, 0x00, 0xff, 0xff, 0x0c, - 0xab, 0xb6, 0x37, 0x7e, 0x1c, 0x00, 0x00, + 0xf5, 0xcf, 0xf2, 0x4b, 0xe4, 0x21, 0x45, 0x8d, 0x46, 0x8a, 0xbd, 0x56, 0x3e, 0x2c, 0x33, 0x1f, + 0x96, 0x9d, 0x7f, 0xa8, 0xc0, 0xb1, 0x1d, 0x47, 0xfe, 0x23, 0x2d, 0x45, 0xae, 0x15, 0xaa, 0x12, + 0xc9, 0x2e, 0xa9, 0xe6, 0x03, 0x28, 0x16, 0xa3, 0xdd, 0x21, 0xb9, 0xf6, 0x72, 0x77, 0xb3, 0xbb, + 0xb4, 0xad, 0xa0, 0x17, 0x06, 0x7a, 0xd5, 0xab, 0xde, 0x16, 0x45, 0xd1, 0x8b, 0xde, 0x04, 0xe8, + 0x03, 0x14, 0xc8, 0x5d, 0x9f, 0xa0, 0x40, 0xde, 0xa0, 0x68, 0x0b, 0xb4, 0x8f, 0xd0, 0xcb, 0x62, + 0x66, 0x76, 0x97, 0xbb, 0x24, 0x15, 0x2b, 0x01, 0xe2, 0x5c, 0x91, 0xf3, 0x9b, 0xdf, 0x39, 0x73, + 0xe6, 0xcc, 0x99, 0x33, 0x67, 0x66, 0x61, 0x7b, 0xe4, 0x38, 0x23, 0x8b, 0xee, 0xba, 0x9e, 0x13, + 0x38, 0xa7, 0xd3, 0xe1, 0xae, 0x41, 0x7d, 0xdd, 0x33, 0xdd, 0xc0, 0xf1, 0xea, 0x1c, 0xc3, 0x6b, + 0x82, 0x51, 0x8f, 0x18, 0xb5, 0x63, 0x58, 0x7f, 0x60, 0x5a, 0xb4, 0x15, 0x13, 0xfb, 0x34, 0xc0, + 0xf7, 0x20, 0x37, 0x34, 0x2d, 0x2a, 0x4b, 0xdb, 0xd9, 0x9d, 0xf2, 0xad, 0x37, 0xeb, 0x73, 0x42, + 0xf5, 0xb4, 0x44, 0x8f, 0xc1, 0x2a, 0x97, 0xa8, 0xfd, 0x2b, 0x07, 0x1b, 0x4b, 0x7a, 0x31, 0x86, + 0x9c, 0x4d, 0x26, 0x4c, 0xa3, 0xb4, 0x53, 0x52, 0xf9, 0x7f, 0x2c, 0xc3, 0x8a, 0x4b, 0xf4, 0x47, + 0x64, 0x44, 0xe5, 0x0c, 0x87, 0xa3, 0x26, 0x7e, 0x1d, 0xc0, 0xa0, 0x2e, 0xb5, 0x0d, 0x6a, 0xeb, + 0x67, 0x72, 0x76, 0x3b, 0xbb, 0x53, 0x52, 0x13, 0x08, 0x7e, 0x07, 0xd6, 0xdd, 0xe9, 0xa9, 0x65, + 0xea, 0x5a, 0x82, 0x06, 0xdb, 0xd9, 0x9d, 0xbc, 0x8a, 0x44, 0x47, 0x6b, 0x46, 0xbe, 0x0e, 0x6b, + 0x4f, 0x28, 0x79, 0x94, 0xa4, 0x96, 0x39, 0xb5, 0xca, 0xe0, 0x04, 0xb1, 0x09, 0x95, 0x09, 0xf5, + 0x7d, 0x32, 0xa2, 0x5a, 0x70, 0xe6, 0x52, 0x39, 0xc7, 0x67, 0xbf, 0xbd, 0x30, 0xfb, 0xf9, 0x99, + 0x97, 0x43, 0xa9, 0xc1, 0x99, 0x4b, 0x71, 0x03, 0x4a, 0xd4, 0x9e, 0x4e, 0x84, 0x86, 0xfc, 0x39, + 0xfe, 0x53, 0xec, 0xe9, 0x64, 0x5e, 0x4b, 0x91, 0x89, 0x85, 0x2a, 0x56, 0x7c, 0xea, 0x3d, 0x36, + 0x75, 0x2a, 0x17, 0xb8, 0x82, 0xeb, 0x0b, 0x0a, 0xfa, 0xa2, 0x7f, 0x5e, 0x47, 0x24, 0x87, 0x9b, + 0x50, 0xa2, 0x4f, 0x03, 0x6a, 0xfb, 0xa6, 0x63, 0xcb, 0x2b, 0x5c, 0xc9, 0x5b, 0x4b, 0x56, 0x91, + 0x5a, 0xc6, 0xbc, 0x8a, 0x99, 0x1c, 0xbe, 0x0b, 0x2b, 0x8e, 0x1b, 0x98, 0x8e, 0xed, 0xcb, 0xc5, + 0x6d, 0x69, 0xa7, 0x7c, 0xeb, 0xd5, 0xa5, 0x81, 0xd0, 0x15, 0x1c, 0x35, 0x22, 0xe3, 0x36, 0x20, + 0xdf, 0x99, 0x7a, 0x3a, 0xd5, 0x74, 0xc7, 0xa0, 0x9a, 0x69, 0x0f, 0x1d, 0xb9, 0xc4, 0x15, 0x5c, + 0x5d, 0x9c, 0x08, 0x27, 0x36, 0x1d, 0x83, 0xb6, 0xed, 0xa1, 0xa3, 0x56, 0xfd, 0x54, 0x1b, 0x5f, + 0x82, 0x82, 0x7f, 0x66, 0x07, 0xe4, 0xa9, 0x5c, 0xe1, 0x11, 0x12, 0xb6, 0x6a, 0x5f, 0x17, 0x60, + 0xed, 0x22, 0x21, 0x76, 0x1f, 0xf2, 0x43, 0x36, 0x4b, 0x39, 0xf3, 0x5d, 0x7c, 0x20, 0x64, 0xd2, + 0x4e, 0x2c, 0x7c, 0x4f, 0x27, 0x36, 0xa0, 0x6c, 0x53, 0x3f, 0xa0, 0x86, 0x88, 0x88, 0xec, 0x05, + 0x63, 0x0a, 0x84, 0xd0, 0x62, 0x48, 0xe5, 0xbe, 0x57, 0x48, 0x7d, 0x0a, 0x6b, 0xb1, 0x49, 0x9a, + 0x47, 0xec, 0x51, 0x14, 0x9b, 0xbb, 0xcf, 0xb3, 0xa4, 0xae, 0x44, 0x72, 0x2a, 0x13, 0x53, 0xab, + 0x34, 0xd5, 0xc6, 0x2d, 0x00, 0xc7, 0xa6, 0xce, 0x50, 0x33, 0xa8, 0x6e, 0xc9, 0xc5, 0x73, 0xbc, + 0xd4, 0x65, 0x94, 0x05, 0x2f, 0x39, 0x02, 0xd5, 0x2d, 0xfc, 0xe1, 0x2c, 0xd4, 0x56, 0xce, 0x89, + 0x94, 0x63, 0xb1, 0xc9, 0x16, 0xa2, 0xed, 0x04, 0xaa, 0x1e, 0x65, 0x71, 0x4f, 0x8d, 0x70, 0x66, + 0x25, 0x6e, 0x44, 0xfd, 0xb9, 0x33, 0x53, 0x43, 0x31, 0x31, 0xb1, 0x55, 0x2f, 0xd9, 0xc4, 0x6f, + 0x40, 0x0c, 0x68, 0x3c, 0xac, 0x80, 0x67, 0xa1, 0x4a, 0x04, 0x76, 0xc8, 0x84, 0x6e, 0x7d, 0x09, + 0xd5, 0xb4, 0x7b, 0xf0, 0x26, 0xe4, 0xfd, 0x80, 0x78, 0x01, 0x8f, 0xc2, 0xbc, 0x2a, 0x1a, 0x18, + 0x41, 0x96, 0xda, 0x06, 0xcf, 0x72, 0x79, 0x95, 0xfd, 0xc5, 0x3f, 0x9d, 0x4d, 0x38, 0xcb, 0x27, + 0xfc, 0xf6, 0xe2, 0x8a, 0xa6, 0x34, 0xcf, 0xcf, 0x7b, 0xeb, 0x03, 0x58, 0x4d, 0x4d, 0xe0, 0xa2, + 0x43, 0xd7, 0x7e, 0x05, 0x2f, 0x2f, 0x55, 0x8d, 0x3f, 0x85, 0xcd, 0xa9, 0x6d, 0xda, 0x01, 0xf5, + 0x5c, 0x8f, 0xb2, 0x88, 0x15, 0x43, 0xc9, 0xff, 0x5e, 0x39, 0x27, 0xe6, 0x4e, 0x92, 0x6c, 0xa1, + 0x45, 0xdd, 0x98, 0x2e, 0x82, 0x37, 0x4b, 0xc5, 0xff, 0xac, 0xa0, 0x67, 0xcf, 0x9e, 0x3d, 0xcb, + 0xd4, 0x7e, 0x57, 0x80, 0xcd, 0x65, 0x7b, 0x66, 0xe9, 0xf6, 0xbd, 0x04, 0x05, 0x7b, 0x3a, 0x39, + 0xa5, 0x1e, 0x77, 0x52, 0x5e, 0x0d, 0x5b, 0xb8, 0x01, 0x79, 0x8b, 0x9c, 0x52, 0x4b, 0xce, 0x6d, + 0x4b, 0x3b, 0xd5, 0x5b, 0xef, 0x5c, 0x68, 0x57, 0xd6, 0x8f, 0x98, 0x88, 0x2a, 0x24, 0xf1, 0x47, + 0x90, 0x0b, 0x53, 0x34, 0xd3, 0x70, 0xf3, 0x62, 0x1a, 0xd8, 0x5e, 0x52, 0xb9, 0x1c, 0x7e, 0x05, + 0x4a, 0xec, 0x57, 0xc4, 0x46, 0x81, 0xdb, 0x5c, 0x64, 0x00, 0x8b, 0x0b, 0xbc, 0x05, 0x45, 0xbe, + 0x4d, 0x0c, 0x1a, 0x1d, 0x6d, 0x71, 0x9b, 0x05, 0x96, 0x41, 0x87, 0x64, 0x6a, 0x05, 0xda, 0x63, + 0x62, 0x4d, 0x29, 0x0f, 0xf8, 0x92, 0x5a, 0x09, 0xc1, 0x5f, 0x30, 0x0c, 0x5f, 0x85, 0xb2, 0xd8, + 0x55, 0xa6, 0x6d, 0xd0, 0xa7, 0x3c, 0x7b, 0xe6, 0x55, 0xb1, 0xd1, 0xda, 0x0c, 0x61, 0xc3, 0x3f, + 0xf4, 0x1d, 0x3b, 0x0a, 0x4d, 0x3e, 0x04, 0x03, 0xf8, 0xf0, 0x1f, 0xcc, 0x27, 0xee, 0xd7, 0x96, + 0x4f, 0x6f, 0x3e, 0xa6, 0x6a, 0x7f, 0xc9, 0x40, 0x8e, 0xe7, 0x8b, 0x35, 0x28, 0x0f, 0x3e, 0xeb, + 0x29, 0x5a, 0xab, 0x7b, 0xb2, 0x7f, 0xa4, 0x20, 0x09, 0x57, 0x01, 0x38, 0xf0, 0xe0, 0xa8, 0xdb, + 0x18, 0xa0, 0x4c, 0xdc, 0x6e, 0x77, 0x06, 0x77, 0x6f, 0xa3, 0x6c, 0x2c, 0x70, 0x22, 0x80, 0x5c, + 0x92, 0xf0, 0xfe, 0x2d, 0x94, 0xc7, 0x08, 0x2a, 0x42, 0x41, 0xfb, 0x53, 0xa5, 0x75, 0xf7, 0x36, + 0x2a, 0xa4, 0x91, 0xf7, 0x6f, 0xa1, 0x15, 0xbc, 0x0a, 0x25, 0x8e, 0xec, 0x77, 0xbb, 0x47, 0xa8, + 0x18, 0xeb, 0xec, 0x0f, 0xd4, 0x76, 0xe7, 0x00, 0x95, 0x62, 0x9d, 0x07, 0x6a, 0xf7, 0xa4, 0x87, + 0x20, 0xd6, 0x70, 0xac, 0xf4, 0xfb, 0x8d, 0x03, 0x05, 0x95, 0x63, 0xc6, 0xfe, 0x67, 0x03, 0xa5, + 0x8f, 0x2a, 0x29, 0xb3, 0xde, 0xbf, 0x85, 0x56, 0xe3, 0x21, 0x94, 0xce, 0xc9, 0x31, 0xaa, 0xe2, + 0x75, 0x58, 0x15, 0x43, 0x44, 0x46, 0xac, 0xcd, 0x41, 0x77, 0x6f, 0x23, 0x34, 0x33, 0x44, 0x68, + 0x59, 0x4f, 0x01, 0x77, 0x6f, 0x23, 0x5c, 0x6b, 0x42, 0x9e, 0x47, 0x17, 0xc6, 0x50, 0x3d, 0x6a, + 0xec, 0x2b, 0x47, 0x5a, 0xb7, 0x37, 0x68, 0x77, 0x3b, 0x8d, 0x23, 0x24, 0xcd, 0x30, 0x55, 0xf9, + 0xf9, 0x49, 0x5b, 0x55, 0x5a, 0x28, 0x93, 0xc4, 0x7a, 0x4a, 0x63, 0xa0, 0xb4, 0x50, 0xb6, 0xa6, + 0xc3, 0xe6, 0xb2, 0x3c, 0xb9, 0x74, 0x67, 0x24, 0x96, 0x38, 0x73, 0xce, 0x12, 0x73, 0x5d, 0x0b, + 0x4b, 0xfc, 0xcf, 0x0c, 0x6c, 0x2c, 0x39, 0x2b, 0x96, 0x0e, 0xf2, 0x13, 0xc8, 0x8b, 0x10, 0x15, + 0xa7, 0xe7, 0x8d, 0xa5, 0x87, 0x0e, 0x0f, 0xd8, 0x85, 0x13, 0x94, 0xcb, 0x25, 0x2b, 0x88, 0xec, + 0x39, 0x15, 0x04, 0x53, 0xb1, 0x90, 0xd3, 0x7f, 0xb9, 0x90, 0xd3, 0xc5, 0xb1, 0x77, 0xf7, 0x22, + 0xc7, 0x1e, 0xc7, 0xbe, 0x5b, 0x6e, 0xcf, 0x2f, 0xc9, 0xed, 0xf7, 0x61, 0x7d, 0x41, 0xd1, 0x85, + 0x73, 0xec, 0xaf, 0x25, 0x90, 0xcf, 0x73, 0xce, 0x73, 0x32, 0x5d, 0x26, 0x95, 0xe9, 0xee, 0xcf, + 0x7b, 0xf0, 0xda, 0xf9, 0x8b, 0xb0, 0xb0, 0xd6, 0x5f, 0x49, 0x70, 0x69, 0x79, 0xa5, 0xb8, 0xd4, + 0x86, 0x8f, 0xa0, 0x30, 0xa1, 0xc1, 0xd8, 0x89, 0xaa, 0xa5, 0xb7, 0x97, 0x9c, 0xc1, 0xac, 0x7b, + 0x7e, 0xb1, 0x43, 0xa9, 0xe4, 0x21, 0x9e, 0x3d, 0xaf, 0xdc, 0x13, 0xd6, 0x2c, 0x58, 0xfa, 0x9b, + 0x0c, 0xbc, 0xbc, 0x54, 0xf9, 0x52, 0x43, 0x5f, 0x03, 0x30, 0x6d, 0x77, 0x1a, 0x88, 0x8a, 0x48, + 0x24, 0xd8, 0x12, 0x47, 0x78, 0xf2, 0x62, 0xc9, 0x73, 0x1a, 0xc4, 0xfd, 0x59, 0xde, 0x0f, 0x02, + 0xe2, 0x84, 0x7b, 0x33, 0x43, 0x73, 0xdc, 0xd0, 0xd7, 0xcf, 0x99, 0xe9, 0x42, 0x60, 0xbe, 0x07, + 0x48, 0xb7, 0x4c, 0x6a, 0x07, 0x9a, 0x1f, 0x78, 0x94, 0x4c, 0x4c, 0x7b, 0xc4, 0x4f, 0x90, 0xe2, + 0x5e, 0x7e, 0x48, 0x2c, 0x9f, 0xaa, 0x6b, 0xa2, 0xbb, 0x1f, 0xf5, 0x32, 0x09, 0x1e, 0x40, 0x5e, + 0x42, 0xa2, 0x90, 0x92, 0x10, 0xdd, 0xb1, 0x44, 0xed, 0xeb, 0x22, 0x94, 0x13, 0x75, 0x35, 0xbe, + 0x06, 0x95, 0x87, 0xe4, 0x31, 0xd1, 0xa2, 0xbb, 0x92, 0xf0, 0x44, 0x99, 0x61, 0xbd, 0xf0, 0xbe, + 0xf4, 0x1e, 0x6c, 0x72, 0x8a, 0x33, 0x0d, 0xa8, 0xa7, 0xe9, 0x16, 0xf1, 0x7d, 0xee, 0xb4, 0x22, + 0xa7, 0x62, 0xd6, 0xd7, 0x65, 0x5d, 0xcd, 0xa8, 0x07, 0xdf, 0x81, 0x0d, 0x2e, 0x31, 0x99, 0x5a, + 0x81, 0xe9, 0x5a, 0x54, 0x63, 0xb7, 0x37, 0x9f, 0x9f, 0x24, 0xb1, 0x65, 0xeb, 0x8c, 0x71, 0x1c, + 0x12, 0x98, 0x45, 0x3e, 0x6e, 0xc1, 0x6b, 0x5c, 0x6c, 0x44, 0x6d, 0xea, 0x91, 0x80, 0x6a, 0xf4, + 0x8b, 0x29, 0xb1, 0x7c, 0x8d, 0xd8, 0x86, 0x36, 0x26, 0xfe, 0x58, 0xde, 0x64, 0x0a, 0xf6, 0x33, + 0xb2, 0xa4, 0x5e, 0x61, 0xc4, 0x83, 0x90, 0xa7, 0x70, 0x5a, 0xc3, 0x36, 0x3e, 0x26, 0xfe, 0x18, + 0xef, 0xc1, 0x25, 0xae, 0xc5, 0x0f, 0x3c, 0xd3, 0x1e, 0x69, 0xfa, 0x98, 0xea, 0x8f, 0xb4, 0x69, + 0x30, 0xbc, 0x27, 0xbf, 0x92, 0x1c, 0x9f, 0x5b, 0xd8, 0xe7, 0x9c, 0x26, 0xa3, 0x9c, 0x04, 0xc3, + 0x7b, 0xb8, 0x0f, 0x15, 0xb6, 0x18, 0x13, 0xf3, 0x4b, 0xaa, 0x0d, 0x1d, 0x8f, 0x1f, 0x8d, 0xd5, + 0x25, 0xa9, 0x29, 0xe1, 0xc1, 0x7a, 0x37, 0x14, 0x38, 0x76, 0x0c, 0xba, 0x97, 0xef, 0xf7, 0x14, + 0xa5, 0xa5, 0x96, 0x23, 0x2d, 0x0f, 0x1c, 0x8f, 0x05, 0xd4, 0xc8, 0x89, 0x1d, 0x5c, 0x16, 0x01, + 0x35, 0x72, 0x22, 0xf7, 0xde, 0x81, 0x0d, 0x5d, 0x17, 0x73, 0x36, 0x75, 0x2d, 0xbc, 0x63, 0xf9, + 0x32, 0x4a, 0x39, 0x4b, 0xd7, 0x0f, 0x04, 0x21, 0x8c, 0x71, 0x1f, 0x7f, 0x08, 0x2f, 0xcf, 0x9c, + 0x95, 0x14, 0x5c, 0x5f, 0x98, 0xe5, 0xbc, 0xe8, 0x1d, 0xd8, 0x70, 0xcf, 0x16, 0x05, 0x71, 0x6a, + 0x44, 0xf7, 0x6c, 0x5e, 0xec, 0x03, 0xd8, 0x74, 0xc7, 0xee, 0xa2, 0xdc, 0xcd, 0xa4, 0x1c, 0x76, + 0xc7, 0xee, 0xbc, 0xe0, 0x5b, 0xfc, 0xc2, 0xed, 0x51, 0x9d, 0x04, 0xd4, 0x90, 0x2f, 0x27, 0xe9, + 0x89, 0x0e, 0xbc, 0x0b, 0x48, 0xd7, 0x35, 0x6a, 0x93, 0x53, 0x8b, 0x6a, 0xc4, 0xa3, 0x36, 0xf1, + 0xe5, 0xab, 0x49, 0x72, 0x55, 0xd7, 0x15, 0xde, 0xdb, 0xe0, 0x9d, 0xf8, 0x26, 0xac, 0x3b, 0xa7, + 0x0f, 0x75, 0x11, 0x92, 0x9a, 0xeb, 0xd1, 0xa1, 0xf9, 0x54, 0x7e, 0x93, 0xfb, 0x77, 0x8d, 0x75, + 0xf0, 0x80, 0xec, 0x71, 0x18, 0xdf, 0x00, 0xa4, 0xfb, 0x63, 0xe2, 0xb9, 0x3c, 0x27, 0xfb, 0x2e, + 0xd1, 0xa9, 0xfc, 0x96, 0xa0, 0x0a, 0xbc, 0x13, 0xc1, 0x6c, 0x4b, 0xf8, 0x4f, 0xcc, 0x61, 0x10, + 0x69, 0xbc, 0x2e, 0xb6, 0x04, 0xc7, 0x42, 0x6d, 0x3b, 0x80, 0x98, 0x2b, 0x52, 0x03, 0xef, 0x70, + 0x5a, 0xd5, 0x1d, 0xbb, 0xc9, 0x71, 0xdf, 0x80, 0x55, 0xc6, 0x9c, 0x0d, 0x7a, 0x43, 0x14, 0x64, + 0xee, 0x38, 0x31, 0xe2, 0x0f, 0x56, 0x1b, 0xd7, 0xf6, 0xa0, 0x92, 0x8c, 0x4f, 0x5c, 0x02, 0x11, + 0xa1, 0x48, 0x62, 0xc5, 0x4a, 0xb3, 0xdb, 0x62, 0x65, 0xc6, 0xe7, 0x0a, 0xca, 0xb0, 0x72, 0xe7, + 0xa8, 0x3d, 0x50, 0x34, 0xf5, 0xa4, 0x33, 0x68, 0x1f, 0x2b, 0x28, 0x9b, 0xa8, 0xab, 0x0f, 0x73, + 0xc5, 0xb7, 0xd1, 0xf5, 0xda, 0x37, 0x19, 0xa8, 0xa6, 0x2f, 0x4a, 0xf8, 0xff, 0xe1, 0x72, 0xf4, + 0xaa, 0xe1, 0xd3, 0x40, 0x7b, 0x62, 0x7a, 0x7c, 0xe3, 0x4c, 0x88, 0x38, 0xc4, 0xe2, 0xa5, 0xdb, + 0x0c, 0x59, 0x7d, 0x1a, 0x7c, 0x62, 0x7a, 0x6c, 0x5b, 0x4c, 0x48, 0x80, 0x8f, 0xe0, 0xaa, 0xed, + 0x68, 0x7e, 0x40, 0x6c, 0x83, 0x78, 0x86, 0x36, 0x7b, 0x4f, 0xd2, 0x88, 0xae, 0x53, 0xdf, 0x77, + 0xc4, 0x81, 0x15, 0x6b, 0x79, 0xd5, 0x76, 0xfa, 0x21, 0x79, 0x96, 0xc9, 0x1b, 0x21, 0x75, 0x2e, + 0xcc, 0xb2, 0xe7, 0x85, 0xd9, 0x2b, 0x50, 0x9a, 0x10, 0x57, 0xa3, 0x76, 0xe0, 0x9d, 0xf1, 0xf2, + 0xb8, 0xa8, 0x16, 0x27, 0xc4, 0x55, 0x58, 0xfb, 0x85, 0xdc, 0x52, 0x0e, 0x73, 0xc5, 0x22, 0x2a, + 0x1d, 0xe6, 0x8a, 0x25, 0x04, 0xb5, 0x7f, 0x64, 0xa1, 0x92, 0x2c, 0x97, 0xd9, 0xed, 0x43, 0xe7, + 0x27, 0x8b, 0xc4, 0x73, 0xcf, 0x1b, 0xdf, 0x5a, 0x5c, 0xd7, 0x9b, 0xec, 0xc8, 0xd9, 0x2b, 0x88, + 0x22, 0x56, 0x15, 0x92, 0xec, 0xb8, 0x67, 0xd9, 0x86, 0x8a, 0xa2, 0xa1, 0xa8, 0x86, 0x2d, 0x7c, + 0x00, 0x85, 0x87, 0x3e, 0xd7, 0x5d, 0xe0, 0xba, 0xdf, 0xfc, 0x76, 0xdd, 0x87, 0x7d, 0xae, 0xbc, + 0x74, 0xd8, 0xd7, 0x3a, 0x5d, 0xf5, 0xb8, 0x71, 0xa4, 0x86, 0xe2, 0xf8, 0x0a, 0xe4, 0x2c, 0xf2, + 0xe5, 0x59, 0xfa, 0x70, 0xe2, 0xd0, 0x45, 0x17, 0xe1, 0x0a, 0xe4, 0x9e, 0x50, 0xf2, 0x28, 0x7d, + 0x24, 0x70, 0xe8, 0x07, 0xdc, 0x0c, 0xbb, 0x90, 0xe7, 0xfe, 0xc2, 0x00, 0xa1, 0xc7, 0xd0, 0x4b, + 0xb8, 0x08, 0xb9, 0x66, 0x57, 0x65, 0x1b, 0x02, 0x41, 0x45, 0xa0, 0x5a, 0xaf, 0xad, 0x34, 0x15, + 0x94, 0xa9, 0xdd, 0x81, 0x82, 0x70, 0x02, 0xdb, 0x2c, 0xb1, 0x1b, 0xd0, 0x4b, 0x61, 0x33, 0xd4, + 0x21, 0x45, 0xbd, 0x27, 0xc7, 0xfb, 0x8a, 0x8a, 0x32, 0xe9, 0xa5, 0xce, 0xa1, 0x7c, 0xcd, 0x87, + 0x4a, 0xb2, 0x5e, 0x7e, 0x31, 0x77, 0xe1, 0xbf, 0x4a, 0x50, 0x4e, 0xd4, 0xbf, 0xac, 0x70, 0x21, + 0x96, 0xe5, 0x3c, 0xd1, 0x88, 0x65, 0x12, 0x3f, 0x0c, 0x0d, 0xe0, 0x50, 0x83, 0x21, 0x17, 0x5d, + 0xba, 0x17, 0xb4, 0x45, 0xf2, 0xa8, 0x50, 0xfb, 0xa3, 0x04, 0x68, 0xbe, 0x00, 0x9d, 0x33, 0x53, + 0xfa, 0x31, 0xcd, 0xac, 0xfd, 0x41, 0x82, 0x6a, 0xba, 0xea, 0x9c, 0x33, 0xef, 0xda, 0x8f, 0x6a, + 0xde, 0xdf, 0x33, 0xb0, 0x9a, 0xaa, 0x35, 0x2f, 0x6a, 0xdd, 0x17, 0xb0, 0x6e, 0x1a, 0x74, 0xe2, + 0x3a, 0x01, 0xb5, 0xf5, 0x33, 0xcd, 0xa2, 0x8f, 0xa9, 0x25, 0xd7, 0x78, 0xd2, 0xd8, 0xfd, 0xf6, + 0x6a, 0xb6, 0xde, 0x9e, 0xc9, 0x1d, 0x31, 0xb1, 0xbd, 0x8d, 0x76, 0x4b, 0x39, 0xee, 0x75, 0x07, + 0x4a, 0xa7, 0xf9, 0x99, 0x76, 0xd2, 0xf9, 0x59, 0xa7, 0xfb, 0x49, 0x47, 0x45, 0xe6, 0x1c, 0xed, + 0x07, 0xdc, 0xf6, 0x3d, 0x40, 0xf3, 0x46, 0xe1, 0xcb, 0xb0, 0xcc, 0x2c, 0xf4, 0x12, 0xde, 0x80, + 0xb5, 0x4e, 0x57, 0xeb, 0xb7, 0x5b, 0x8a, 0xa6, 0x3c, 0x78, 0xa0, 0x34, 0x07, 0x7d, 0xf1, 0x3e, + 0x11, 0xb3, 0x07, 0xa9, 0x0d, 0x5e, 0xfb, 0x7d, 0x16, 0x36, 0x96, 0x58, 0x82, 0x1b, 0xe1, 0xcd, + 0x42, 0x5c, 0x76, 0xde, 0xbd, 0x88, 0xf5, 0x75, 0x56, 0x10, 0xf4, 0x88, 0x17, 0x84, 0x17, 0x91, + 0x1b, 0xc0, 0xbc, 0x64, 0x07, 0xe6, 0xd0, 0xa4, 0x5e, 0xf8, 0x9c, 0x23, 0xae, 0x1b, 0x6b, 0x33, + 0x5c, 0xbc, 0xe8, 0xfc, 0x1f, 0x60, 0xd7, 0xf1, 0xcd, 0xc0, 0x7c, 0x4c, 0x35, 0xd3, 0x8e, 0xde, + 0x7e, 0xd8, 0xf5, 0x23, 0xa7, 0xa2, 0xa8, 0xa7, 0x6d, 0x07, 0x31, 0xdb, 0xa6, 0x23, 0x32, 0xc7, + 0x66, 0xc9, 0x3c, 0xab, 0xa2, 0xa8, 0x27, 0x66, 0x5f, 0x83, 0x8a, 0xe1, 0x4c, 0x59, 0x4d, 0x26, + 0x78, 0xec, 0xec, 0x90, 0xd4, 0xb2, 0xc0, 0x62, 0x4a, 0x58, 0x6d, 0xcf, 0x1e, 0x9d, 0x2a, 0x6a, + 0x59, 0x60, 0x82, 0x72, 0x1d, 0xd6, 0xc8, 0x68, 0xe4, 0x31, 0xe5, 0x91, 0x22, 0x71, 0x7f, 0xa8, + 0xc6, 0x30, 0x27, 0x6e, 0x1d, 0x42, 0x31, 0xf2, 0x03, 0x3b, 0xaa, 0x99, 0x27, 0x34, 0x57, 0x5c, + 0x8a, 0x33, 0x3b, 0x25, 0xb5, 0x68, 0x47, 0x9d, 0xd7, 0xa0, 0x62, 0xfa, 0xda, 0xec, 0x0d, 0x3d, + 0xb3, 0x9d, 0xd9, 0x29, 0xaa, 0x65, 0xd3, 0x8f, 0xdf, 0x1f, 0x6b, 0x5f, 0x65, 0xa0, 0x9a, 0xfe, + 0x06, 0x80, 0x5b, 0x50, 0xb4, 0x1c, 0x9d, 0xf0, 0xd0, 0x12, 0x1f, 0xa0, 0x76, 0x9e, 0xf3, 0xd9, + 0xa0, 0x7e, 0x14, 0xf2, 0xd5, 0x58, 0x72, 0xeb, 0x6f, 0x12, 0x14, 0x23, 0x18, 0x5f, 0x82, 0x9c, + 0x4b, 0x82, 0x31, 0x57, 0x97, 0xdf, 0xcf, 0x20, 0x49, 0xe5, 0x6d, 0x86, 0xfb, 0x2e, 0xb1, 0x79, + 0x08, 0x84, 0x38, 0x6b, 0xb3, 0x75, 0xb5, 0x28, 0x31, 0xf8, 0xe5, 0xc4, 0x99, 0x4c, 0xa8, 0x1d, + 0xf8, 0xd1, 0xba, 0x86, 0x78, 0x33, 0x84, 0xf1, 0x3b, 0xb0, 0x1e, 0x78, 0xc4, 0xb4, 0x52, 0xdc, + 0x1c, 0xe7, 0xa2, 0xa8, 0x23, 0x26, 0xef, 0xc1, 0x95, 0x48, 0xaf, 0x41, 0x03, 0xa2, 0x8f, 0xa9, + 0x31, 0x13, 0x2a, 0xf0, 0x47, 0x88, 0xcb, 0x21, 0xa1, 0x15, 0xf6, 0x47, 0xb2, 0xb5, 0x6f, 0x24, + 0x58, 0x8f, 0xae, 0x53, 0x46, 0xec, 0xac, 0x63, 0x00, 0x62, 0xdb, 0x4e, 0x90, 0x74, 0xd7, 0x62, + 0x28, 0x2f, 0xc8, 0xd5, 0x1b, 0xb1, 0x90, 0x9a, 0x50, 0xb0, 0x35, 0x01, 0x98, 0xf5, 0x9c, 0xeb, + 0xb6, 0xab, 0x50, 0x0e, 0x3f, 0xf0, 0xf0, 0xaf, 0x84, 0xe2, 0x02, 0x0e, 0x02, 0x62, 0xf7, 0x2e, + 0xbc, 0x09, 0xf9, 0x53, 0x3a, 0x32, 0xed, 0xf0, 0xd9, 0x56, 0x34, 0xa2, 0x67, 0x92, 0x5c, 0xfc, + 0x4c, 0xb2, 0xff, 0x5b, 0x09, 0x36, 0x74, 0x67, 0x32, 0x6f, 0xef, 0x3e, 0x9a, 0x7b, 0x05, 0xf0, + 0x3f, 0x96, 0x3e, 0xff, 0x68, 0x64, 0x06, 0xe3, 0xe9, 0x69, 0x5d, 0x77, 0x26, 0xbb, 0x23, 0xc7, + 0x22, 0xf6, 0x68, 0xf6, 0x99, 0x93, 0xff, 0xd1, 0xdf, 0x1d, 0x51, 0xfb, 0xdd, 0x91, 0x93, 0xf8, + 0xe8, 0x79, 0x7f, 0xf6, 0xf7, 0xbf, 0x92, 0xf4, 0xa7, 0x4c, 0xf6, 0xa0, 0xb7, 0xff, 0xe7, 0xcc, + 0xd6, 0x81, 0x18, 0xae, 0x17, 0xb9, 0x47, 0xa5, 0x43, 0x8b, 0xea, 0x6c, 0xca, 0xff, 0x0b, 0x00, + 0x00, 0xff, 0xff, 0x1a, 0x28, 0x25, 0x79, 0x42, 0x1d, 0x00, 0x00, } diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go index f34601723d..f67edc7dc2 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -1,16 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/any.proto -/* -Package any is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/any.proto - -It has these top-level messages: - Any -*/ -package any +package any // import "github.com/golang/protobuf/ptypes/any" import proto "github.com/golang/protobuf/proto" import fmt "fmt" @@ -132,14 +123,36 @@ type Any struct { // TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"` // Must be a valid serialized protocol buffer of the above specified type. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Any) Reset() { *m = Any{} } +func (m *Any) String() string { return proto.CompactTextString(m) } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { + return fileDescriptor_any_744b9ca530f228db, []int{0} +} +func (*Any) XXX_WellKnownType() string { return "Any" } +func (m *Any) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Any.Unmarshal(m, b) +} +func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Any.Marshal(b, m, deterministic) +} +func (dst *Any) XXX_Merge(src proto.Message) { + xxx_messageInfo_Any.Merge(dst, src) +} +func (m *Any) XXX_Size() int { + return xxx_messageInfo_Any.Size(m) +} +func (m *Any) XXX_DiscardUnknown() { + xxx_messageInfo_Any.DiscardUnknown(m) } -func (m *Any) Reset() { *m = Any{} } -func (m *Any) String() string { return proto.CompactTextString(m) } -func (*Any) ProtoMessage() {} -func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*Any) XXX_WellKnownType() string { return "Any" } +var xxx_messageInfo_Any proto.InternalMessageInfo func (m *Any) GetTypeUrl() string { if m != nil { @@ -159,9 +172,9 @@ func init() { proto.RegisterType((*Any)(nil), "google.protobuf.Any") } -func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor0) } +func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_any_744b9ca530f228db) } -var fileDescriptor0 = []byte{ +var fileDescriptor_any_744b9ca530f228db = []byte{ // 185 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go index b2410a098e..4d75473b8b 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -1,16 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/duration.proto -/* -Package duration is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/duration.proto - -It has these top-level messages: - Duration -*/ -package duration +package duration // import "github.com/golang/protobuf/ptypes/duration" import proto "github.com/golang/protobuf/proto" import fmt "fmt" @@ -98,14 +89,36 @@ type Duration struct { // of one second or more, a non-zero value for the `nanos` field must be // of the same sign as the `seconds` field. Must be from -999,999,999 // to +999,999,999 inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Duration) Reset() { *m = Duration{} } -func (m *Duration) String() string { return proto.CompactTextString(m) } -func (*Duration) ProtoMessage() {} -func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*Duration) XXX_WellKnownType() string { return "Duration" } +func (m *Duration) Reset() { *m = Duration{} } +func (m *Duration) String() string { return proto.CompactTextString(m) } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { + return fileDescriptor_duration_e7d612259e3f0613, []int{0} +} +func (*Duration) XXX_WellKnownType() string { return "Duration" } +func (m *Duration) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Duration.Unmarshal(m, b) +} +func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Duration.Marshal(b, m, deterministic) +} +func (dst *Duration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Duration.Merge(dst, src) +} +func (m *Duration) XXX_Size() int { + return xxx_messageInfo_Duration.Size(m) +} +func (m *Duration) XXX_DiscardUnknown() { + xxx_messageInfo_Duration.DiscardUnknown(m) +} + +var xxx_messageInfo_Duration proto.InternalMessageInfo func (m *Duration) GetSeconds() int64 { if m != nil { @@ -125,9 +138,11 @@ func init() { proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") } -func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor0) } +func init() { + proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_duration_e7d612259e3f0613) +} -var fileDescriptor0 = []byte{ +var fileDescriptor_duration_e7d612259e3f0613 = []byte{ // 190 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go index e23e4a25da..e9c2222821 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -1,16 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/timestamp.proto -/* -Package timestamp is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/timestamp.proto - -It has these top-level messages: - Timestamp -*/ -package timestamp +package timestamp // import "github.com/golang/protobuf/ptypes/timestamp" import proto "github.com/golang/protobuf/proto" import fmt "fmt" @@ -101,7 +92,7 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) // with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one // can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()) +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--) // to obtain a formatter capable of generating timestamps in this format. // // @@ -114,14 +105,36 @@ type Timestamp struct { // second values with fractions must still have non-negative nanos values // that count forward in time. Must be from 0 to 999,999,999 // inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (m *Timestamp) String() string { return proto.CompactTextString(m) } -func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { + return fileDescriptor_timestamp_b826e8e5fba671a8, []int{0} +} +func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } +func (m *Timestamp) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Timestamp.Unmarshal(m, b) +} +func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) +} +func (dst *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(dst, src) +} +func (m *Timestamp) XXX_Size() int { + return xxx_messageInfo_Timestamp.Size(m) +} +func (m *Timestamp) XXX_DiscardUnknown() { + xxx_messageInfo_Timestamp.DiscardUnknown(m) +} + +var xxx_messageInfo_Timestamp proto.InternalMessageInfo func (m *Timestamp) GetSeconds() int64 { if m != nil { @@ -141,9 +154,11 @@ func init() { proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") } -func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor0) } +func init() { + proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_timestamp_b826e8e5fba671a8) +} -var fileDescriptor0 = []byte{ +var fileDescriptor_timestamp_b826e8e5fba671a8 = []byte{ // 191 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, diff --git a/vendor/github.com/google/go-containerregistry/LICENSE b/vendor/github.com/google/go-containerregistry/LICENSE new file mode 100644 index 0000000000..7a4a3ea242 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/google/go-containerregistry/authn/anon.go b/vendor/github.com/google/go-containerregistry/authn/anon.go new file mode 100644 index 0000000000..c9c08ec737 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/authn/anon.go @@ -0,0 +1,26 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package authn + +// anonymous implements Authenticator for anonymous authentication. +type anonymous struct{} + +// Authorization implements Authenticator. +func (a *anonymous) Authorization() (string, error) { + return "", nil +} + +// Anonymous is a singleton Authenticator for providing anonymous auth. +var Anonymous Authenticator = &anonymous{} diff --git a/vendor/github.com/google/go-containerregistry/authn/auth.go b/vendor/github.com/google/go-containerregistry/authn/auth.go new file mode 100644 index 0000000000..c39ee5a9f9 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/authn/auth.go @@ -0,0 +1,29 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package authn + +import ( + "fmt" +) + +// auth implements Authenticator for an "auth" entry of the docker config. +type auth struct { + token string +} + +// Authorization implements Authenticator. +func (a *auth) Authorization() (string, error) { + return fmt.Sprintf("Basic %s", a.token), nil +} diff --git a/vendor/github.com/google/go-containerregistry/authn/authn.go b/vendor/github.com/google/go-containerregistry/authn/authn.go new file mode 100644 index 0000000000..30e935cbc9 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/authn/authn.go @@ -0,0 +1,21 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package authn + +// Authenticator is used to authenticate Docker transports. +type Authenticator interface { + // Authorization returns the value to use in an http transport's Authorization header. + Authorization() (string, error) +} diff --git a/vendor/github.com/google/go-containerregistry/authn/basic.go b/vendor/github.com/google/go-containerregistry/authn/basic.go new file mode 100644 index 0000000000..7cd4984069 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/authn/basic.go @@ -0,0 +1,33 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package authn + +import ( + "encoding/base64" + "fmt" +) + +// Basic implements Authenticator for basic authentication. +type Basic struct { + Username string + Password string +} + +// Authorization implements Authenticator. +func (b *Basic) Authorization() (string, error) { + delimited := fmt.Sprintf("%s:%s", b.Username, b.Password) + encoded := base64.StdEncoding.EncodeToString([]byte(delimited)) + return fmt.Sprintf("Basic %s", encoded), nil +} diff --git a/vendor/github.com/google/go-containerregistry/authn/bearer.go b/vendor/github.com/google/go-containerregistry/authn/bearer.go new file mode 100644 index 0000000000..cb1ae5845a --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/authn/bearer.go @@ -0,0 +1,29 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package authn + +import ( + "fmt" +) + +// Bearer implements Authenticator for bearer authentication. +type Bearer struct { + Token string `json:"token"` +} + +// Authorization implements Authenticator. +func (b *Bearer) Authorization() (string, error) { + return fmt.Sprintf("Bearer %s", b.Token), nil +} diff --git a/vendor/github.com/google/go-containerregistry/authn/doc.go b/vendor/github.com/google/go-containerregistry/authn/doc.go new file mode 100644 index 0000000000..c2a5fc0267 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/authn/doc.go @@ -0,0 +1,17 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package authn defines different methods of authentication for +// talking to a container registry. +package authn diff --git a/vendor/github.com/google/go-containerregistry/authn/helper.go b/vendor/github.com/google/go-containerregistry/authn/helper.go new file mode 100644 index 0000000000..5b2467ae83 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/authn/helper.go @@ -0,0 +1,94 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package authn + +import ( + "bytes" + "encoding/json" + "fmt" + "os/exec" + "strings" + + "github.com/google/go-containerregistry/name" +) + +// magicNotFoundMessage is the string that the CLI special cases to mean +// that a given registry domain wasn't found. +const ( + magicNotFoundMessage = "credentials not found in native keychain" +) + +// runner allows us to swap out how we "Run" os/exec commands. +type runner interface { + Run(*exec.Cmd) error +} + +// defaultRunner implements runner by just calling Run(). +type defaultRunner struct{} + +// Run implements runner. +func (dr *defaultRunner) Run(cmd *exec.Cmd) error { + return cmd.Run() +} + +// helper executes the named credential helper against the given domain. +type helper struct { + name string + domain name.Registry + + // We add this layer of indirection to facilitate unit testing. + r runner +} + +// helperOutput is the expected JSON output form of a credential helper +// (or at least these are the fields that we care about). +type helperOutput struct { + Username string + Secret string +} + +// Authorization implements Authenticator. +func (h *helper) Authorization() (string, error) { + helperName := fmt.Sprintf("docker-credential-%s", h.name) + // We want to execute: + // echo -n {domain} | docker-credential-{name} get + cmd := exec.Command(helperName, "get") + + // Some keychains expect a scheme: + // https://github.com/bazelbuild/rules_docker/issues/111 + cmd.Stdin = strings.NewReader(fmt.Sprintf("https://%v", h.domain)) + + var out bytes.Buffer + cmd.Stdout = &out + if err := h.r.Run(cmd); err != nil { + return "", err + } + output := out.String() + + // If we see this specific message, it means the domain wasn't found + // and we should fall back on anonymous auth. + if output == magicNotFoundMessage { + return Anonymous.Authorization() + } + + // Any other output should be parsed as JSON and the Username / Secret + // fields used for Basic authentication. + ho := helperOutput{} + if err := json.Unmarshal([]byte(output), &ho); err != nil { + return "", err + } + b := Basic{Username: ho.Username, Password: ho.Secret} + return b.Authorization() +} diff --git a/vendor/github.com/google/go-containerregistry/authn/keychain.go b/vendor/github.com/google/go-containerregistry/authn/keychain.go new file mode 100644 index 0000000000..887847a270 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/authn/keychain.go @@ -0,0 +1,152 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package authn + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "log" + "os" + "path" + "runtime" + + "github.com/google/go-containerregistry/name" +) + +// Keychain is an interface for resolving an image reference to a credential. +type Keychain interface { + // Resolve looks up the most appropriate credential for the specified registry. + Resolve(name.Registry) (Authenticator, error) +} + +// defaultKeychain implements Keychain with the semantics of the standard Docker +// credential keychain. +type defaultKeychain struct{} + +// configDir returns the directory containing Docker's config.json +func configDir() (string, error) { + if dc := os.Getenv("DOCKER_CONFIG"); dc != "" { + return dc, nil + } + if h := dockerUserHomeDir(); h != "" { + return path.Join(dockerUserHomeDir(), ".docker"), nil + } + return "", errNoHomeDir +} + +var errNoHomeDir = errors.New("could not determine home directory") + +// dockerUserHomeDir returns the current user's home directory, as interpreted by Docker. +func dockerUserHomeDir() string { + if runtime.GOOS == "windows" { + // Docker specifically expands "%USERPROFILE%" on Windows, + return os.Getenv("USERPROFILE") + } + // Docker defaults to "$HOME" Linux and OSX. + return os.Getenv("HOME") +} + +// authEntry is a helper for JSON parsing an "auth" entry of config.json +// This is not meant for direct consumption. +type authEntry struct { + Auth string `json:"auth"` + Username string `json:"username"` + Password string `json:"password"` +} + +// cfg is a helper for JSON parsing Docker's config.json +// This is not meant for direct consumption. +type cfg struct { + CredHelper map[string]string `json:"credHelpers,omitempty"` + CredStore string `json:"credsStore,omitempty"` + Auths map[string]authEntry `json:"auths,omitempty"` +} + +// There are a variety of ways a domain may get qualified within the Docker credential file. +// We enumerate them here as format strings. +var ( + domainForms = []string{ + // Allow naked domains + "%s", + // Allow scheme-prefixed. + "https://%s", + "http://%s", + // Allow scheme-prefixes with version in url path. + "https://%s/v1/", + "http://%s/v1/", + "https://%s/v2/", + "http://%s/v2/", + } + + // Export an instance of the default keychain. + DefaultKeychain Keychain = &defaultKeychain{} +) + +// Resolve implements Keychain. +func (dk *defaultKeychain) Resolve(reg name.Registry) (Authenticator, error) { + dir, err := configDir() + if err != nil { + log.Printf("Unable to determine config dir, falling back on anonymous: %v", err) + return Anonymous, nil + } + file := path.Join(dir, "config.json") + content, err := ioutil.ReadFile(file) + if err != nil { + log.Printf("Unable to read %q, falling back on anonymous: %v", file, err) + return Anonymous, nil + } + + var cf cfg + if err := json.Unmarshal(content, &cf); err != nil { + log.Printf("Unable to parse %q, falling back on anonymous: %v", file, err) + return Anonymous, nil + } + + // Per-registry credential helpers take precedence. + if cf.CredHelper != nil { + for _, form := range domainForms { + if entry, ok := cf.CredHelper[fmt.Sprintf(form, reg.Name())]; ok { + return &helper{name: entry, domain: reg, r: &defaultRunner{}}, nil + } + } + } + + // A global credential helper is next in precedence. + if cf.CredStore != "" { + return &helper{name: cf.CredStore, domain: reg, r: &defaultRunner{}}, nil + } + + // Lastly, the 'auths' section directly contains basic auth entries. + if cf.Auths != nil { + for _, form := range domainForms { + if entry, ok := cf.Auths[fmt.Sprintf(form, reg.Name())]; ok { + if entry.Auth != "" { + return &auth{entry.Auth}, nil + } else if entry.Username != "" { + return &Basic{Username: entry.Username, Password: entry.Password}, nil + } else { + // TODO(mattmoor): Support identitytoken + // TODO(mattmoor): Support registrytoken + return nil, fmt.Errorf("Unsupported entry in \"auths\" section of %q", file) + } + } + } + } + + log.Printf("No matching credentials found for %v, falling back on anonymous", reg) + return Anonymous, nil +} diff --git a/vendor/github.com/google/go-containerregistry/name/check.go b/vendor/github.com/google/go-containerregistry/name/check.go new file mode 100644 index 0000000000..01a25d554a --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/name/check.go @@ -0,0 +1,52 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import ( + "strings" + "unicode/utf8" +) + +// Strictness defines the level of strictness for name validation. +type Strictness int + +// Enums for CRUD operations. +const ( + StrictValidation Strictness = iota + WeakValidation +) + +// stripRunesFn returns a function which returns -1 (i.e. a value which +// signals deletion in strings.Map) for runes in 'runes', and the rune otherwise. +func stripRunesFn(runes string) func(rune) rune { + return func(r rune) rune { + if strings.ContainsRune(runes, r) { + return -1 + } + return r + } +} + +// checkElement checks a given named element matches character and length restrictions. +// Returns true if the given element adheres to the given restrictions, false otherwise. +func checkElement(name, element, allowedRunes string, minRunes, maxRunes int) error { + numRunes := utf8.RuneCountInString(element) + if (numRunes < minRunes) || (maxRunes < numRunes) { + return NewErrBadName("%s must be between %d and %d runes in length: %s", name, minRunes, maxRunes, element) + } else if len(strings.Map(stripRunesFn(allowedRunes), element)) != 0 { + return NewErrBadName("%s can only contain the runes `%s`: %s", name, allowedRunes, element) + } + return nil +} diff --git a/vendor/github.com/google/go-containerregistry/name/digest.go b/vendor/github.com/google/go-containerregistry/name/digest.go new file mode 100644 index 0000000000..ea6287a847 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/name/digest.go @@ -0,0 +1,91 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package name defines structured types for representing image references. +package name + +import ( + "strings" +) + +const ( + // These have the form: sha256: + // TODO(dekkagaijin): replace with opencontainers/go-digest or docker/distribution's validation. + digestChars = "sh:0123456789abcdef" + digestDelim = "@" +) + +// Digest stores a digest name in a structured form. +type Digest struct { + Repository + digest string +} + +// Ensure Digest implements Reference +var _ Reference = (*Digest)(nil) + +// Context implements Reference. +func (d Digest) Context() Repository { + return d.Repository +} + +// Identifier implements Reference. +func (d Digest) Identifier() string { + return d.DigestStr() +} + +// DigestStr returns the digest component of the Digest. +func (d Digest) DigestStr() string { + return d.digest +} + +// Name returns the name from which the Digest was derived. +func (d Digest) Name() string { + return d.Repository.Name() + digestDelim + d.DigestStr() +} + +func (d Digest) String() string { + return d.Name() +} + +func checkDigest(name string) error { + return checkElement("digest", name, digestChars, 7+64, 7+64) +} + +// NewDigest returns a new Digest representing the given name, according to the given strictness. +func NewDigest(name string, strict Strictness) (Digest, error) { + // Split on "@" + parts := strings.Split(name, digestDelim) + if len(parts) != 2 { + return Digest{}, NewErrBadName("a digest must contain exactly one '@' separator (e.g. registry/repository@digest) saw: %s", name) + } + base := parts[0] + digest := parts[1] + + // We don't require a digest, but if we get one check it's valid, + // even when not being strict. + // If we are being strict, we want to validate the digest regardless in case + // it's empty. + if digest != "" || strict == StrictValidation { + if err := checkDigest(digest); err != nil { + return Digest{}, err + } + } + + repo, err := NewRepository(base, strict) + if err != nil { + return Digest{}, err + } + return Digest{repo, digest}, nil +} diff --git a/vendor/github.com/google/go-containerregistry/name/errors.go b/vendor/github.com/google/go-containerregistry/name/errors.go new file mode 100644 index 0000000000..7847cc5d1e --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/name/errors.go @@ -0,0 +1,37 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import "fmt" + +// ErrBadName is an error for when a bad docker name is supplied. +type ErrBadName struct { + info string +} + +func (e *ErrBadName) Error() string { + return e.info +} + +// NewErrBadName returns a ErrBadName which returns the given formatted string from Error(). +func NewErrBadName(fmtStr string, args ...interface{}) *ErrBadName { + return &ErrBadName{fmt.Sprintf(fmtStr, args...)} +} + +// IsErrBadName returns true if the given error is an ErrBadName. +func IsErrBadName(err error) bool { + _, ok := err.(*ErrBadName) + return ok +} diff --git a/vendor/github.com/google/go-containerregistry/name/ref.go b/vendor/github.com/google/go-containerregistry/name/ref.go new file mode 100644 index 0000000000..58775daa30 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/name/ref.go @@ -0,0 +1,50 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import ( + "errors" + "fmt" +) + +// Reference defines the interface that consumers use when they can +// take either a tag or a digest. +type Reference interface { + fmt.Stringer + + // Context accesses the Repository context of the reference. + Context() Repository + + // Identifier accesses the type-specific portion of the reference. + Identifier() string + + // Name is the fully-qualified reference name. + Name() string + + // Scope is the scope needed to access this reference. + Scope(string) string +} + +// ParseReference parses the string as a reference, either by tag or digest. +func ParseReference(s string, strict Strictness) (Reference, error) { + if t, err := NewTag(s, strict); err == nil { + return t, nil + } + if d, err := NewDigest(s, strict); err == nil { + return d, nil + } + // TODO: Combine above errors into something more useful? + return nil, errors.New("could not parse reference") +} diff --git a/vendor/github.com/google/go-containerregistry/name/registry.go b/vendor/github.com/google/go-containerregistry/name/registry.go new file mode 100644 index 0000000000..6d06cc5fed --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/name/registry.go @@ -0,0 +1,79 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import "net/url" + +const ( + DefaultRegistry = "index.docker.io" + defaultRegistryAlias = "docker.io" +) + +// Registry stores a docker registry name in a structured form. +type Registry struct { + registry string +} + +// RegistryStr returns the registry component of the Registry. +func (r Registry) RegistryStr() string { + if r.registry != "" { + return r.registry + } + return DefaultRegistry +} + +// Name returns the name from which the Registry was derived. +func (r Registry) Name() string { + return r.RegistryStr() +} + +func (r Registry) String() string { + return r.Name() +} + +// Scope returns the scope required to access the registry. +func (r Registry) Scope(string) string { + // The only resource under 'registry' is 'catalog'. http://goo.gl/N9cN9Z + return "registry:catalog:*" +} + +func checkRegistry(name string) error { + // Per RFC 3986, registries (authorities) are required to be prefixed with "//" + // url.Host == hostname[:port] == authority + if url, err := url.Parse("//" + name); err != nil || url.Host != name { + return NewErrBadName("registries must be valid RFC 3986 URI authorities: %s", name) + } + return nil +} + +// NewRegistry returns a Registry based on the given name. +// Strict validation requires explicit, valid RFC 3986 URI authorities to be given. +func NewRegistry(name string, strict Strictness) (Registry, error) { + if strict == StrictValidation && len(name) == 0 { + return Registry{}, NewErrBadName("strict validation requires the registry to be explicitly defined") + } + + if err := checkRegistry(name); err != nil { + return Registry{}, err + } + + // Rewrite "docker.io" to "index.docker.io". + // See: https://github.com/google/go-containerregistry/issues/68 + if name == defaultRegistryAlias { + name = DefaultRegistry + } + + return Registry{registry: name}, nil +} diff --git a/vendor/github.com/google/go-containerregistry/name/repository.go b/vendor/github.com/google/go-containerregistry/name/repository.go new file mode 100644 index 0000000000..7c296e394a --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/name/repository.go @@ -0,0 +1,99 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import ( + "fmt" + "strings" +) + +const ( + defaultNamespace = "library" + repositoryChars = "abcdefghijklmnopqrstuvwxyz0123456789_-./" + regRepoDelimiter = "/" +) + +// Repository stores a docker repository name in a structured form. +type Repository struct { + Registry + repository string +} + +// See https://docs.docker.com/docker-hub/official_repos +func hasImplicitNamespace(repo string, reg Registry) bool { + return !strings.ContainsRune(repo, '/') && reg.RegistryStr() == DefaultRegistry +} + +// RepositoryStr returns the repository component of the Repository. +func (r Repository) RepositoryStr() string { + if hasImplicitNamespace(r.repository, r.Registry) { + return fmt.Sprintf("%s/%s", defaultNamespace, r.repository) + } + return r.repository +} + +// Name returns the name from which the Repository was derived. +func (r Repository) Name() string { + regName := r.Registry.Name() + if regName != "" { + return regName + regRepoDelimiter + r.RepositoryStr() + } + return r.RepositoryStr() +} + +func (r Repository) String() string { + return r.Name() +} + +// Scope returns the scope required to perform the given action on the registry. +// TODO(jonjohnsonjr): consider moving scopes to a separate package. +func (r Repository) Scope(action string) string { + return fmt.Sprintf("repository:%s:%s", r.RepositoryStr(), action) +} + +func checkRepository(repository string) error { + return checkElement("repository", repository, repositoryChars, 2, 255) +} + +// NewRepository returns a new Repository representing the given name, according to the given strictness. +func NewRepository(name string, strict Strictness) (Repository, error) { + if len(name) == 0 { + return Repository{}, NewErrBadName("a repository name must be specified") + } + + var registry string + repo := name + parts := strings.SplitN(name, regRepoDelimiter, 2) + if len(parts) == 2 && (strings.ContainsRune(parts[0], '.') || strings.ContainsRune(parts[0], ':')) { + // The first part of the repository is treated as the registry domain + // iff it contains a '.' or ':' character, otherwise it is all repository + // and the domain defaults to DockerHub. + registry = parts[0] + repo = parts[1] + } + + if err := checkRepository(repo); err != nil { + return Repository{}, err + } + + reg, err := NewRegistry(registry, strict) + if err != nil { + return Repository{}, err + } + if hasImplicitNamespace(repo, reg) && strict == StrictValidation { + return Repository{}, NewErrBadName("strict validation requires the full repository path (missing 'library')") + } + return Repository{reg, repo}, nil +} diff --git a/vendor/github.com/google/go-containerregistry/name/tag.go b/vendor/github.com/google/go-containerregistry/name/tag.go new file mode 100644 index 0000000000..b8375e1f9b --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/name/tag.go @@ -0,0 +1,101 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package name + +import ( + "strings" +) + +const ( + defaultTag = "latest" + // TODO(dekkagaijin): use the docker/distribution regexes for validation. + tagChars = "abcdefghijklmnopqrstuvwxyz0123456789_-.ABCDEFGHIJKLMNOPQRSTUVWXYZ" + tagDelim = ":" +) + +// Tag stores a docker tag name in a structured form. +type Tag struct { + Repository + tag string +} + +// Ensure Tag implements Reference +var _ Reference = (*Tag)(nil) + +// Context implements Reference. +func (t Tag) Context() Repository { + return t.Repository +} + +// Identifier implements Reference. +func (t Tag) Identifier() string { + return t.TagStr() +} + +// TagStr returns the tag component of the Tag. +func (t Tag) TagStr() string { + if t.tag != "" { + return t.tag + } + return defaultTag +} + +// Name returns the name from which the Tag was derived. +func (t Tag) Name() string { + return t.Repository.Name() + tagDelim + t.TagStr() +} + +func (t Tag) String() string { + return t.Name() +} + +// Scope returns the scope required to perform the given action on the tag. +func (t Tag) Scope(action string) string { + return t.Repository.Scope(action) +} + +func checkTag(name string) error { + return checkElement("tag", name, tagChars, 1, 127) +} + +// NewTag returns a new Tag representing the given name, according to the given strictness. +func NewTag(name string, strict Strictness) (Tag, error) { + base := name + tag := "" + + // Split on ":" + parts := strings.Split(name, tagDelim) + // Verify that we aren't confusing a tag for a hostname w/ port for the purposes of weak validation. + if len(parts) > 1 && !strings.Contains(parts[len(parts)-1], regRepoDelimiter) { + base = strings.Join(parts[:len(parts)-1], tagDelim) + tag = parts[len(parts)-1] + } + + // We don't require a tag, but if we get one check it's valid, + // even when not being strict. + // If we are being strict, we want to validate the tag regardless in case + // it's empty. + if tag != "" || strict == StrictValidation { + if err := checkTag(tag); err != nil { + return Tag{}, err + } + } + + repo, err := NewRepository(base, strict) + if err != nil { + return Tag{}, err + } + return Tag{repo, tag}, nil +} diff --git a/vendor/github.com/google/go-containerregistry/v1/config.go b/vendor/github.com/google/go-containerregistry/v1/config.go new file mode 100644 index 0000000000..d1d809d911 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/config.go @@ -0,0 +1,130 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "encoding/json" + "io" + "time" +) + +// ConfigFile is the configuration file that holds the metadata describing +// how to launch a container. The names of the fields are chosen to reflect +// the JSON payload of the ConfigFile as defined here: https://git.io/vrAEY +type ConfigFile struct { + Architecture string `json:"architecture"` + Container string `json:"container"` + Created Time `json:"created"` + DockerVersion string `json:"docker_version"` + History []History `json:"history"` + OS string `json:"os"` + RootFS RootFS `json:"rootfs"` + Config Config `json:"config"` + ContainerConfig Config `json:"container_config"` + OSVersion string `json:"osversion"` +} + +// History is one entry of a list recording how this container image was built. +type History struct { + Author string `json:"author"` + Created Time `json:"created"` + CreatedBy string `json:"created_by"` + Comment string `json:"comment"` + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// Time is a wrapper around time.Time to help with deep copying +type Time struct { + time.Time +} + +// DeepCopyInto creates a deep-copy of the Time value. The underlying time.Time +// type is effectively immutable in the time API, so it is safe to +// copy-by-assign, despite the presence of (unexported) Pointer fields. +func (t *Time) DeepCopyInto(out *Time) { + *out = *t +} + +// RootFS holds the ordered list of file system deltas that comprise the +// container image's root filesystem. +type RootFS struct { + Type string `json:"type"` + DiffIDs []Hash `json:"diff_ids"` +} + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// Config is a submessage of the config file described as: +// The execution parameters which SHOULD be used as a base when running +// a container using the image. +// The names of the fields in this message are chosen to reflect the JSON +// payload of the Config as defined here: +// https://git.io/vrAET +// and +// https://github.com/opencontainers/image-spec/blob/master/config.md +type Config struct { + AttachStderr bool + AttachStdin bool + AttachStdout bool + Cmd []string + Healthcheck *HealthConfig + Domainname string + Entrypoint []string + Env []string + Hostname string + Image string + Labels map[string]string + OnBuild []string + OpenStdin bool + StdinOnce bool + Tty bool + User string + Volumes map[string]struct{} + WorkingDir string + ExposedPorts map[string]struct{} + ArgsEscaped bool + NetworkDisabled bool + MacAddress string + StopSignal string + Shell []string +} + +// ParseConfigFile parses the io.Reader's contents into a ConfigFile. +func ParseConfigFile(r io.Reader) (*ConfigFile, error) { + cf := ConfigFile{} + if err := json.NewDecoder(r).Decode(&cf); err != nil { + return nil, err + } + return &cf, nil +} diff --git a/vendor/github.com/google/go-containerregistry/v1/doc.go b/vendor/github.com/google/go-containerregistry/v1/doc.go new file mode 100644 index 0000000000..c9b203173e --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/doc.go @@ -0,0 +1,19 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package v1 defines structured types for OCI v1 images +// +k8s:deepcopy-gen=package + +//go:generate deepcopy-gen -O zz_deepcopy_generated --go-header-file $BOILER_PLATE_FILE -i . +package v1 diff --git a/vendor/github.com/google/go-containerregistry/v1/empty/doc.go b/vendor/github.com/google/go-containerregistry/v1/empty/doc.go new file mode 100644 index 0000000000..1a521e9a74 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/empty/doc.go @@ -0,0 +1,16 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package empty provides an implementation of v1.Image equivalent to "FROM scratch". +package empty diff --git a/vendor/github.com/google/go-containerregistry/v1/empty/image.go b/vendor/github.com/google/go-containerregistry/v1/empty/image.go new file mode 100644 index 0000000000..78527b8480 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/empty/image.go @@ -0,0 +1,22 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package empty + +import ( + "github.com/google/go-containerregistry/v1/random" +) + +// Image is a singleton empty image, think: FROM scratch. +var Image, _ = random.Image(0, 0) diff --git a/vendor/github.com/google/go-containerregistry/v1/hash.go b/vendor/github.com/google/go-containerregistry/v1/hash.go new file mode 100644 index 0000000000..f0db0d51cf --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/hash.go @@ -0,0 +1,111 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "hash" + "io" + "strconv" + "strings" +) + +// Hash is an unqualified digest of some content, e.g. sha256:deadbeef +type Hash struct { + // Algorithm holds the algorithm used to compute the hash. + Algorithm string + + // Hex holds the hex portion of the content hash. + Hex string +} + +// String reverses NewHash returning the string-form of the hash. +func (h Hash) String() string { + return fmt.Sprintf("%s:%s", h.Algorithm, h.Hex) +} + +// NewHash validates the input string is a hash and returns a strongly type Hash object. +func NewHash(s string) (Hash, error) { + h := Hash{} + if err := h.parse(s); err != nil { + return Hash{}, err + } + return h, nil +} + +// MarshalJSON implements json.Marshaler +func (h *Hash) MarshalJSON() ([]byte, error) { + return json.Marshal(h.String()) +} + +// UnmarshalJSON implements json.Unmarshaler +func (h *Hash) UnmarshalJSON(data []byte) error { + s, err := strconv.Unquote(string(data)) + if err != nil { + return err + } + return h.parse(s) +} + +// Hasher returns a hash.Hash for the named algorithm (e.g. "sha256") +func Hasher(name string) (hash.Hash, error) { + switch name { + case "sha256": + return sha256.New(), nil + default: + return nil, fmt.Errorf("unsupported hash: %q", name) + } +} + +func (h *Hash) parse(unquoted string) error { + parts := strings.Split(unquoted, ":") + if len(parts) != 2 { + return fmt.Errorf("too many parts in hash: %s", unquoted) + } + + rest := strings.TrimLeft(parts[1], "0123456789abcdef") + if len(rest) != 0 { + return fmt.Errorf("found non-hex character in hash: %c", rest[0]) + } + + hasher, err := Hasher(parts[0]) + if err != nil { + return err + } + // Compare the hex to the expected size (2 hex characters per byte) + if len(parts[1]) != hasher.Size()*2 { + return fmt.Errorf("wrong number of hex digits for %s: %s", parts[0], parts[1]) + } + + h.Algorithm = parts[0] + h.Hex = parts[1] + return nil +} + +// SHA256 computes the Hash of the provided io.Reader's content. +func SHA256(r io.Reader) (Hash, int64, error) { + hasher := sha256.New() + n, err := io.Copy(hasher, r) + if err != nil { + return Hash{}, 0, err + } + return Hash{ + Algorithm: "sha256", + Hex: hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size()))), + }, n, nil +} diff --git a/vendor/github.com/google/go-containerregistry/v1/image.go b/vendor/github.com/google/go-containerregistry/v1/image.go new file mode 100644 index 0000000000..89d1439637 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/image.go @@ -0,0 +1,58 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "github.com/google/go-containerregistry/v1/types" +) + +// Image defines the interface for interacting with an OCI v1 image. +type Image interface { + // Layers returns the ordered collection of filesystem layers that comprise this image. + // The order of the list is oldest/base layer first, and most-recent/top layer last. + Layers() ([]Layer, error) + + // BlobSet returns an unordered collection of all the blobs in the image. + BlobSet() (map[Hash]struct{}, error) + + // MediaType of this image's manifest. + MediaType() (types.MediaType, error) + + // ConfigName returns the hash of the image's config file. + ConfigName() (Hash, error) + + // ConfigFile returns this image's config file. + ConfigFile() (*ConfigFile, error) + + // RawConfigFile returns the serialized bytes of ConfigFile() + RawConfigFile() ([]byte, error) + + // Digest returns the sha256 of this image's manifest. + Digest() (Hash, error) + + // Manifest returns this image's Manifest object. + Manifest() (*Manifest, error) + + // RawManifest returns the serialized bytes of Manifest() + RawManifest() ([]byte, error) + + // LayerByDigest returns a Layer for interacting with a particular layer of + // the image, looking it up by "digest" (the compressed hash). + LayerByDigest(Hash) (Layer, error) + + // LayerByDiffID is an analog to LayerByDigest, looking up by "diff id" + // (the uncompressed hash). + LayerByDiffID(Hash) (Layer, error) +} diff --git a/vendor/github.com/google/go-containerregistry/v1/layer.go b/vendor/github.com/google/go-containerregistry/v1/layer.go new file mode 100644 index 0000000000..8b5091e45b --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/layer.go @@ -0,0 +1,37 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "io" +) + +// Layer is an interface for accessing the properties of a particular layer of a v1.Image +type Layer interface { + // Digest returns the Hash of the compressed layer. + Digest() (Hash, error) + + // DiffID returns the Hash of the uncompressed layer. + DiffID() (Hash, error) + + // Compressed returns an io.ReadCloser for the compressed layer contents. + Compressed() (io.ReadCloser, error) + + // Uncompressed returns an io.ReadCloser for the uncompressed layer contents. + Uncompressed() (io.ReadCloser, error) + + // Size returns the compressed size of the Layer. + Size() (int64, error) +} diff --git a/vendor/github.com/google/go-containerregistry/v1/manifest.go b/vendor/github.com/google/go-containerregistry/v1/manifest.go new file mode 100644 index 0000000000..79c9b05291 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/manifest.go @@ -0,0 +1,49 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "encoding/json" + "io" + + "github.com/google/go-containerregistry/v1/types" +) + +// Manifest represents the OCI image manifest in a structured way. +type Manifest struct { + SchemaVersion int64 `json:"schemaVersion"` + MediaType types.MediaType `json:"mediaType"` + Config Descriptor `json:"config"` + Layers []Descriptor `json:"layers"` + Annotations map[string]string `json:"annotations,omitempty"` +} + +// Descriptor holds a reference from the manifest to one of its constituent elements. +type Descriptor struct { + MediaType types.MediaType `json:"mediaType"` + Size int64 `json:"size"` + Digest Hash `json:"digest"` + URLs []string `json:"urls,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` +} + +// ParseManifest parses the io.Reader's contents into a Manifest. +func ParseManifest(r io.Reader) (*Manifest, error) { + m := Manifest{} + if err := json.NewDecoder(r).Decode(&m); err != nil { + return nil, err + } + return &m, nil +} diff --git a/vendor/github.com/google/go-containerregistry/v1/mutate/doc.go b/vendor/github.com/google/go-containerregistry/v1/mutate/doc.go new file mode 100644 index 0000000000..dfbd9951e0 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/mutate/doc.go @@ -0,0 +1,16 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package mutate provides facilities for mutating v1.Images of any kind. +package mutate diff --git a/vendor/github.com/google/go-containerregistry/v1/mutate/mutate.go b/vendor/github.com/google/go-containerregistry/v1/mutate/mutate.go new file mode 100644 index 0000000000..55e4805909 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/mutate/mutate.go @@ -0,0 +1,351 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mutate + +import ( + "archive/tar" + "encoding/json" + "errors" + "fmt" + "io" + "path/filepath" + "strings" + + "github.com/google/go-containerregistry/v1" + "github.com/google/go-containerregistry/v1/partial" + "github.com/google/go-containerregistry/v1/types" +) + +const whiteoutPrefix = ".wh." + +// Addendum contains layers and history to be appended +// to a base image +type Addendum struct { + Layer v1.Layer + History v1.History +} + +// AppendLayers applies layers to a base image +func AppendLayers(base v1.Image, layers ...v1.Layer) (v1.Image, error) { + additions := make([]Addendum, 0, len(layers)) + for _, layer := range layers { + additions = append(additions, Addendum{Layer: layer}) + } + + return Append(base, additions...) +} + +// Append will apply the list of addendums to the base image +func Append(base v1.Image, adds ...Addendum) (v1.Image, error) { + if len(adds) == 0 { + return base, nil + } + + if err := validate(adds); err != nil { + return nil, err + } + + m, err := base.Manifest() + if err != nil { + return nil, err + } + + cf, err := base.ConfigFile() + if err != nil { + return nil, err + } + + image := &image{ + Image: base, + manifest: m.DeepCopy(), + configFile: cf.DeepCopy(), + diffIDMap: make(map[v1.Hash]v1.Layer), + digestMap: make(map[v1.Hash]v1.Layer), + } + + diffIDs := image.configFile.RootFS.DiffIDs + history := image.configFile.History + + for _, add := range adds { + diffID, err := add.Layer.DiffID() + if err != nil { + return nil, err + } + diffIDs = append(diffIDs, diffID) + history = append(history, add.History) + image.diffIDMap[diffID] = add.Layer + } + + manifestLayers := image.manifest.Layers + + for _, add := range adds { + d := v1.Descriptor{ + MediaType: types.DockerLayer, + } + + if d.Size, err = add.Layer.Size(); err != nil { + return nil, err + } + + if d.Digest, err = add.Layer.Digest(); err != nil { + return nil, err + } + + manifestLayers = append(manifestLayers, d) + image.digestMap[d.Digest] = add.Layer + } + + image.configFile.RootFS.DiffIDs = diffIDs + image.configFile.History = history + image.manifest.Layers = manifestLayers + image.manifest.Config.Digest, err = image.ConfigName() + if err != nil { + return nil, err + } + + return image, nil +} + +// Config mutates the provided v1.Image to have the provided v1.Config +func Config(base v1.Image, cfg v1.Config) (v1.Image, error) { + m, err := base.Manifest() + if err != nil { + return nil, err + } + + cf, err := base.ConfigFile() + if err != nil { + return nil, err + } + + cf.Config = cfg + + image := &image{ + Image: base, + manifest: m.DeepCopy(), + configFile: cf.DeepCopy(), + diffIDMap: make(map[v1.Hash]v1.Layer), + digestMap: make(map[v1.Hash]v1.Layer), + } + image.manifest.Config.Digest, err = image.ConfigName() + if err != nil { + return nil, err + } + return image, nil +} + +type image struct { + v1.Image + configFile *v1.ConfigFile + manifest *v1.Manifest + diffIDMap map[v1.Hash]v1.Layer + digestMap map[v1.Hash]v1.Layer +} + +// Layers returns the ordered collection of filesystem layers that comprise this image. +// The order of the list is oldest/base layer first, and most-recent/top layer last. +func (i *image) Layers() ([]v1.Layer, error) { + diffIDs, err := partial.DiffIDs(i) + if err != nil { + return nil, err + } + ls := make([]v1.Layer, 0, len(diffIDs)) + for _, h := range diffIDs { + l, err := i.LayerByDiffID(h) + if err != nil { + return nil, err + } + ls = append(ls, l) + } + return ls, nil +} + +// BlobSet returns an unordered collection of all the blobs in the image. +func (i *image) BlobSet() (map[v1.Hash]struct{}, error) { + return partial.BlobSet(i) +} + +// ConfigName returns the hash of the image's config file. +func (i *image) ConfigName() (v1.Hash, error) { + return partial.ConfigName(i) +} + +// ConfigFile returns this image's config file. +func (i *image) ConfigFile() (*v1.ConfigFile, error) { + return i.configFile, nil +} + +// RawConfigFile returns the serialized bytes of ConfigFile() +func (i *image) RawConfigFile() ([]byte, error) { + return json.Marshal(i.configFile) +} + +// Digest returns the sha256 of this image's manifest. +func (i *image) Digest() (v1.Hash, error) { + return partial.Digest(i) +} + +// Manifest returns this image's Manifest object. +func (i *image) Manifest() (*v1.Manifest, error) { + return i.manifest, nil +} + +// RawManifest returns the serialized bytes of Manifest() +func (i *image) RawManifest() ([]byte, error) { + return json.Marshal(i.manifest) +} + +// LayerByDigest returns a Layer for interacting with a particular layer of +// the image, looking it up by "digest" (the compressed hash). +func (i *image) LayerByDigest(h v1.Hash) (v1.Layer, error) { + if cn, err := i.ConfigName(); err != nil { + return nil, err + } else if h == cn { + return partial.ConfigLayer(i) + } + if layer, ok := i.digestMap[h]; ok { + return layer, nil + } + return i.Image.LayerByDigest(h) +} + +// LayerByDiffID is an analog to LayerByDigest, looking up by "diff id" +// (the uncompressed hash). +func (i *image) LayerByDiffID(h v1.Hash) (v1.Layer, error) { + if layer, ok := i.diffIDMap[h]; ok { + return layer, nil + } + return i.Image.LayerByDiffID(h) +} + +func validate(adds []Addendum) error { + for _, add := range adds { + if add.Layer == nil { + return errors.New("Unable to add a nil layer to the image") + } + } + return nil +} + +// Extract takes an image and returns an io.ReadCloser containing the image's +// flattened filesystem. +// +// Callers can read the filesystem contents by passing the reader to +// tar.NewReader, or io.Copy it directly to some output. +// +// If a caller doesn't read the full contents, they should Close it to free up +// resources used during extraction. +// +// Adapted from https://github.com/google/containerregistry/blob/master/client/v2_2/docker_image_.py#L731 +func Extract(img v1.Image) io.ReadCloser { + pr, pw := io.Pipe() + + go func() { + // Close the writer with any errors encountered during + // extraction. These errors will be returned by the reader end + // on subsequent reads. If err == nil, the reader will return + // EOF. + pw.CloseWithError(extract(img, pw)) + }() + + return pr +} + +func extract(img v1.Image, w io.Writer) error { + tarWriter := tar.NewWriter(w) + defer tarWriter.Close() + + fileMap := map[string]bool{} + + layers, err := img.Layers() + if err != nil { + return fmt.Errorf("retrieving image layers: %v", err) + } + // we iterate through the layers in reverse order because it makes handling + // whiteout layers more efficient, since we can just keep track of the removed + // files as we see .wh. layers and ignore those in previous layers. + for i := len(layers) - 1; i >= 0; i-- { + layer := layers[i] + layerReader, err := layer.Uncompressed() + if err != nil { + return fmt.Errorf("reading layer contents: %v", err) + } + tarReader := tar.NewReader(layerReader) + for { + header, err := tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + return fmt.Errorf("reading tar: %v", err) + } + + basename := filepath.Base(header.Name) + dirname := filepath.Dir(header.Name) + tombstone := strings.HasPrefix(basename, whiteoutPrefix) + if tombstone { + basename = basename[len(whiteoutPrefix):] + } + + // check if we have seen value before + // if we're checking a directory, don't filepath.Join names + var name string + if header.Typeflag == tar.TypeDir { + name = header.Name + } else { + name = filepath.Join(dirname, basename) + } + + if _, ok := fileMap[name]; ok { + continue + } + + // check for a whited out parent directory + if inWhiteoutDir(fileMap, name) { + continue + } + + // mark file as handled. non-directory implicitly tombstones + // any entries with a matching (or child) name + fileMap[name] = tombstone || !(header.Typeflag == tar.TypeDir) + if !tombstone { + tarWriter.WriteHeader(header) + if header.Size > 0 { + if _, err := io.Copy(tarWriter, tarReader); err != nil { + return err + } + } + } + } + } + return nil +} + +func inWhiteoutDir(fileMap map[string]bool, file string) bool { + for { + if file == "" { + break + } + dirname := filepath.Dir(file) + if file == dirname { + break + } + if val, ok := fileMap[dirname]; ok && val { + return true + } + file = dirname + } + return false +} diff --git a/vendor/github.com/google/go-containerregistry/v1/mutate/rebase.go b/vendor/github.com/google/go-containerregistry/v1/mutate/rebase.go new file mode 100644 index 0000000000..7365fa57ab --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/mutate/rebase.go @@ -0,0 +1,100 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mutate + +import ( + "fmt" + + "github.com/google/go-containerregistry/v1" + "github.com/google/go-containerregistry/v1/empty" +) + +type RebaseOptions struct { + // TODO(jasonhall): Rebase seam hint. +} + +func Rebase(orig, oldBase, newBase v1.Image, opts *RebaseOptions) (v1.Image, error) { + // Verify that oldBase's layers are present in orig, otherwise orig is + // not based on oldBase at all. + origLayers, err := orig.Layers() + if err != nil { + return nil, fmt.Errorf("failed to get layers for original: %v", err) + } + oldBaseLayers, err := oldBase.Layers() + if err != nil { + return nil, err + } + if len(oldBaseLayers) > len(origLayers) { + return nil, fmt.Errorf("image %q is not based on %q (too few layers)", orig, oldBase) + } + for i, l := range oldBaseLayers { + oldLayerDigest, err := l.Digest() + if err != nil { + return nil, fmt.Errorf("failed to get digest of layer %d of %q: %v", i, oldBase, err) + } + origLayerDigest, err := origLayers[i].Digest() + if err != nil { + return nil, fmt.Errorf("failed to get digest of layer %d of %q: %v", i, orig, err) + } + if oldLayerDigest != origLayerDigest { + return nil, fmt.Errorf("image %q is not based on %q (layer %d mismatch)", orig, oldBase, i) + } + } + + origConfig, err := orig.ConfigFile() + if err != nil { + return nil, fmt.Errorf("failed to get config for original: %v", err) + } + + // Stitch together an image that contains: + // - original image's config + // - new base image's layers + top of original image's layers + // - new base image's history + top of original image's history + rebasedImage, err := Config(empty.Image, *origConfig.Config.DeepCopy()) + if err != nil { + return nil, fmt.Errorf("failed to create empty image with original config: %v", err) + } + // Get new base layers and config for history. + newBaseLayers, err := newBase.Layers() + if err != nil { + return nil, fmt.Errorf("could not get new base layers for new base: %v", err) + } + newConfig, err := newBase.ConfigFile() + if err != nil { + return nil, fmt.Errorf("could not get config for new base: %v", err) + } + // Add new base layers. + for i := range newBaseLayers { + rebasedImage, err = Append(rebasedImage, Addendum{ + Layer: newBaseLayers[i], + History: newConfig.History[i], + }) + if err != nil { + return nil, fmt.Errorf("failed to append layer %d of new base layers", i) + } + } + // Add original layers above the old base. + start := len(oldBaseLayers) + for i := range origLayers[start:] { + rebasedImage, err = Append(rebasedImage, Addendum{ + Layer: origLayers[start+i], + History: origConfig.History[start+i], + }) + if err != nil { + return nil, fmt.Errorf("failed to append layer %d of original layers", i) + } + } + return rebasedImage, nil +} diff --git a/vendor/github.com/google/go-containerregistry/v1/partial/compressed.go b/vendor/github.com/google/go-containerregistry/v1/partial/compressed.go new file mode 100644 index 0000000000..1c810ff043 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/partial/compressed.go @@ -0,0 +1,154 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package partial + +import ( + "io" + + "github.com/google/go-containerregistry/v1" + "github.com/google/go-containerregistry/v1/v1util" +) + +// CompressedLayer represents the bare minimum interface a natively +// compressed layer must implement for us to produce a v1.Layer +type CompressedLayer interface { + // Digest returns the Hash of the compressed layer. + Digest() (v1.Hash, error) + + // Compressed returns an io.ReadCloser for the compressed layer contents. + Compressed() (io.ReadCloser, error) + + // Size returns the compressed size of the Layer. + Size() (int64, error) +} + +// compressedLayerExtender implements v1.Image using the compressed base properties. +type compressedLayerExtender struct { + CompressedLayer +} + +// Uncompressed implements v1.Layer +func (ule *compressedLayerExtender) Uncompressed() (io.ReadCloser, error) { + u, err := ule.Compressed() + if err != nil { + return nil, err + } + return v1util.GunzipReadCloser(u) +} + +// DiffID implements v1.Layer +func (ule *compressedLayerExtender) DiffID() (v1.Hash, error) { + r, err := ule.Uncompressed() + if err != nil { + return v1.Hash{}, err + } + defer r.Close() + h, _, err := v1.SHA256(r) + return h, err +} + +// CompressedToLayer fills in the missing methods from a CompressedLayer so that it implements v1.Layer +func CompressedToLayer(ul CompressedLayer) (v1.Layer, error) { + return &compressedLayerExtender{ul}, nil +} + +// CompressedImageCore represents the base minimum interface a natively +// compressed image must implement for us to produce a v1.Image. +type CompressedImageCore interface { + imageCore + + // RawManifest returns the serialized bytes of the manifest. + RawManifest() ([]byte, error) + + // LayerByDigest is a variation on the v1.Image method, which returns + // a CompressedLayer instead. + LayerByDigest(v1.Hash) (CompressedLayer, error) +} + +// compressedImageExtender implements v1.Image by extending CompressedImageCore with the +// appropriate methods computed from the minimal core. +type compressedImageExtender struct { + CompressedImageCore +} + +// Assert that our extender type completes the v1.Image interface +var _ v1.Image = (*compressedImageExtender)(nil) + +// BlobSet implements v1.Image +func (i *compressedImageExtender) BlobSet() (map[v1.Hash]struct{}, error) { + return BlobSet(i) +} + +// Digest implements v1.Image +func (i *compressedImageExtender) Digest() (v1.Hash, error) { + return Digest(i) +} + +// ConfigName implements v1.Image +func (i *compressedImageExtender) ConfigName() (v1.Hash, error) { + return ConfigName(i) +} + +// Layers implements v1.Image +func (i *compressedImageExtender) Layers() ([]v1.Layer, error) { + hs, err := FSLayers(i) + if err != nil { + return nil, err + } + ls := make([]v1.Layer, 0, len(hs)) + for _, h := range hs { + l, err := i.LayerByDigest(h) + if err != nil { + return nil, err + } + ls = append(ls, l) + } + return ls, nil +} + +// LayerByDigest implements v1.Image +func (i *compressedImageExtender) LayerByDigest(h v1.Hash) (v1.Layer, error) { + cl, err := i.CompressedImageCore.LayerByDigest(h) + if err != nil { + return nil, err + } + return CompressedToLayer(cl) +} + +// LayerByDiffID implements v1.Image +func (i *compressedImageExtender) LayerByDiffID(h v1.Hash) (v1.Layer, error) { + h, err := DiffIDToBlob(i, h) + if err != nil { + return nil, err + } + return i.LayerByDigest(h) +} + +// ConfigFile implements v1.Image +func (i *compressedImageExtender) ConfigFile() (*v1.ConfigFile, error) { + return ConfigFile(i) +} + +// Manifest implements v1.Image +func (i *compressedImageExtender) Manifest() (*v1.Manifest, error) { + return Manifest(i) +} + +// CompressedToImage fills in the missing methods from a CompressedImageCore so that it implements v1.Image +func CompressedToImage(cic CompressedImageCore) (v1.Image, error) { + return &compressedImageExtender{ + CompressedImageCore: cic, + }, nil +} diff --git a/vendor/github.com/google/go-containerregistry/v1/partial/doc.go b/vendor/github.com/google/go-containerregistry/v1/partial/doc.go new file mode 100644 index 0000000000..153dfe4d53 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/partial/doc.go @@ -0,0 +1,17 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package partial defines methods for building up a v1.Image from +// minimal subsets that are sufficient for defining a v1.Image. +package partial diff --git a/vendor/github.com/google/go-containerregistry/v1/partial/image.go b/vendor/github.com/google/go-containerregistry/v1/partial/image.go new file mode 100644 index 0000000000..b82ea36b69 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/partial/image.go @@ -0,0 +1,28 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package partial + +import ( + "github.com/google/go-containerregistry/v1/types" +) + +// imageCore is the core set of properties without which we cannot build a v1.Image +type imageCore interface { + // RawConfigFile returns the serialized bytes of this image's config file. + RawConfigFile() ([]byte, error) + + // MediaType of this image's manifest. + MediaType() (types.MediaType, error) +} diff --git a/vendor/github.com/google/go-containerregistry/v1/partial/uncompressed.go b/vendor/github.com/google/go-containerregistry/v1/partial/uncompressed.go new file mode 100644 index 0000000000..fb8c38bd02 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/partial/uncompressed.go @@ -0,0 +1,229 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package partial + +import ( + "bytes" + "io" + "sync" + + "github.com/google/go-containerregistry/v1" + "github.com/google/go-containerregistry/v1/types" + "github.com/google/go-containerregistry/v1/v1util" +) + +// UncompressedLayer represents the bare minimum interface a natively +// uncompressed layer must implement for us to produce a v1.Layer +type UncompressedLayer interface { + // DiffID returns the Hash of the uncompressed layer. + DiffID() (v1.Hash, error) + + // Uncompressed returns an io.ReadCloser for the uncompressed layer contents. + Uncompressed() (io.ReadCloser, error) +} + +// uncompressedLayerExtender implements v1.Image using the uncompressed base properties. +type uncompressedLayerExtender struct { + UncompressedLayer + // TODO(mattmoor): Memoize size/hash so that the methods aren't twice as + // expensive as doing this manually. +} + +// Compressed implements v1.Layer +func (ule *uncompressedLayerExtender) Compressed() (io.ReadCloser, error) { + u, err := ule.Uncompressed() + if err != nil { + return nil, err + } + return v1util.GzipReadCloser(u) +} + +// Digest implements v1.Layer +func (ule *uncompressedLayerExtender) Digest() (v1.Hash, error) { + r, err := ule.Compressed() + if err != nil { + return v1.Hash{}, err + } + defer r.Close() + h, _, err := v1.SHA256(r) + return h, err +} + +// Size implements v1.Layer +func (ule *uncompressedLayerExtender) Size() (int64, error) { + r, err := ule.Compressed() + if err != nil { + return -1, err + } + defer r.Close() + _, i, err := v1.SHA256(r) + return i, err +} + +// UncompressedToLayer fills in the missing methods from an UncompressedLayer so that it implements v1.Layer +func UncompressedToLayer(ul UncompressedLayer) (v1.Layer, error) { + return &uncompressedLayerExtender{ul}, nil +} + +// UncompressedImageCore represents the bare minimum interface a natively +// uncompressed image must implement for us to produce a v1.Image +type UncompressedImageCore interface { + imageCore + + // LayerByDiffID is a variation on the v1.Image method, which returns + // an UncompressedLayer instead. + LayerByDiffID(v1.Hash) (UncompressedLayer, error) +} + +// UncompressedToImage fills in the missing methods from an UncompressedImageCore so that it implements v1.Image. +func UncompressedToImage(uic UncompressedImageCore) (v1.Image, error) { + return &uncompressedImageExtender{ + UncompressedImageCore: uic, + }, nil +} + +// uncompressedImageExtender implements v1.Image by extending UncompressedImageCore with the +// appropriate methods computed from the minimal core. +type uncompressedImageExtender struct { + UncompressedImageCore + + lock sync.Mutex + manifest *v1.Manifest +} + +// Assert that our extender type completes the v1.Image interface +var _ v1.Image = (*uncompressedImageExtender)(nil) + +// BlobSet implements v1.Image +func (i *uncompressedImageExtender) BlobSet() (map[v1.Hash]struct{}, error) { + return BlobSet(i) +} + +// Digest implements v1.Image +func (i *uncompressedImageExtender) Digest() (v1.Hash, error) { + return Digest(i) +} + +// Manifest implements v1.Image +func (i *uncompressedImageExtender) Manifest() (*v1.Manifest, error) { + i.lock.Lock() + defer i.lock.Unlock() + if i.manifest != nil { + return i.manifest, nil + } + + b, err := i.RawConfigFile() + if err != nil { + return nil, err + } + + cfgHash, cfgSize, err := v1.SHA256(bytes.NewReader(b)) + if err != nil { + return nil, err + } + + m := &v1.Manifest{ + SchemaVersion: 2, + MediaType: types.DockerManifestSchema2, + Config: v1.Descriptor{ + MediaType: types.DockerConfigJSON, + Size: cfgSize, + Digest: cfgHash, + }, + } + + ls, err := i.Layers() + if err != nil { + return nil, err + } + + m.Layers = make([]v1.Descriptor, len(ls)) + for i, l := range ls { + sz, err := l.Size() + if err != nil { + return nil, err + } + h, err := l.Digest() + if err != nil { + return nil, err + } + + m.Layers[i] = v1.Descriptor{ + MediaType: types.DockerLayer, + Size: sz, + Digest: h, + } + } + + i.manifest = m + return i.manifest, nil +} + +// RawManifest implements v1.Image +func (i *uncompressedImageExtender) RawManifest() ([]byte, error) { + return RawManifest(i) +} + +// ConfigName implements v1.Image +func (i *uncompressedImageExtender) ConfigName() (v1.Hash, error) { + return ConfigName(i) +} + +// ConfigFile implements v1.Image +func (i *uncompressedImageExtender) ConfigFile() (*v1.ConfigFile, error) { + return ConfigFile(i) +} + +// Layers implements v1.Image +func (i *uncompressedImageExtender) Layers() ([]v1.Layer, error) { + diffIDs, err := DiffIDs(i) + if err != nil { + return nil, err + } + ls := make([]v1.Layer, 0, len(diffIDs)) + for _, h := range diffIDs { + l, err := i.LayerByDiffID(h) + if err != nil { + return nil, err + } + ls = append(ls, l) + } + return ls, nil +} + +// LayerByDiffID implements v1.Image +func (i *uncompressedImageExtender) LayerByDiffID(diffID v1.Hash) (v1.Layer, error) { + ul, err := i.UncompressedImageCore.LayerByDiffID(diffID) + if err != nil { + return nil, err + } + return UncompressedToLayer(ul) +} + +// LayerByDigest implements v1.Image +func (i *uncompressedImageExtender) LayerByDigest(h v1.Hash) (v1.Layer, error) { + // Support returning the ConfigFile when asked for its hash. + if cfgName, err := i.ConfigName(); err != nil { + return nil, err + } else if cfgName == h { + return ConfigLayer(i) + } + + diffID, err := BlobToDiffID(i, h) + if err != nil { + return nil, err + } + return i.LayerByDiffID(diffID) +} diff --git a/vendor/github.com/google/go-containerregistry/v1/partial/with.go b/vendor/github.com/google/go-containerregistry/v1/partial/with.go new file mode 100644 index 0000000000..f97dedbb59 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/partial/with.go @@ -0,0 +1,287 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package partial + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + + "github.com/google/go-containerregistry/v1" + "github.com/google/go-containerregistry/v1/v1util" +) + +// WithRawConfigFile defines the subset of v1.Image used by these helper methods +type WithRawConfigFile interface { + // RawConfigFile returns the serialized bytes of this image's config file. + RawConfigFile() ([]byte, error) +} + +// ConfigFile is a helper for implementing v1.Image +func ConfigFile(i WithRawConfigFile) (*v1.ConfigFile, error) { + b, err := i.RawConfigFile() + if err != nil { + return nil, err + } + return v1.ParseConfigFile(bytes.NewReader(b)) +} + +// ConfigName is a helper for implementing v1.Image +func ConfigName(i WithRawConfigFile) (v1.Hash, error) { + b, err := i.RawConfigFile() + if err != nil { + return v1.Hash{}, err + } + h, _, err := v1.SHA256(bytes.NewReader(b)) + return h, err +} + +// configLayer implements v1.Layer from the raw config bytes. +// This is so that clients (e.g. remote) can access the config as a blob. +type configLayer struct { + hash v1.Hash + content []byte +} + +// Digest implements v1.Layer +func (cl *configLayer) Digest() (v1.Hash, error) { + return cl.hash, nil +} + +// DiffID implements v1.Layer +func (cl *configLayer) DiffID() (v1.Hash, error) { + return cl.hash, nil +} + +// Uncompressed implements v1.Layer +func (cl *configLayer) Uncompressed() (io.ReadCloser, error) { + return v1util.NopReadCloser(bytes.NewBuffer(cl.content)), nil +} + +// Compressed implements v1.Layer +func (cl *configLayer) Compressed() (io.ReadCloser, error) { + return v1util.NopReadCloser(bytes.NewBuffer(cl.content)), nil +} + +// Size implements v1.Layer +func (cl *configLayer) Size() (int64, error) { + return int64(len(cl.content)), nil +} + +var _ v1.Layer = (*configLayer)(nil) + +func ConfigLayer(i WithRawConfigFile) (v1.Layer, error) { + h, err := ConfigName(i) + if err != nil { + return nil, err + } + rcfg, err := i.RawConfigFile() + if err != nil { + return nil, err + } + return &configLayer{ + hash: h, + content: rcfg, + }, nil +} + +// WithConfigFile defines the subset of v1.Image used by these helper methods +type WithConfigFile interface { + // ConfigFile returns this image's config file. + ConfigFile() (*v1.ConfigFile, error) +} + +// DiffIDs is a helper for implementing v1.Image +func DiffIDs(i WithConfigFile) ([]v1.Hash, error) { + cfg, err := i.ConfigFile() + if err != nil { + return nil, err + } + return cfg.RootFS.DiffIDs, nil +} + +// RawConfigFile is a helper for implementing v1.Image +func RawConfigFile(i WithConfigFile) ([]byte, error) { + cfg, err := i.ConfigFile() + if err != nil { + return nil, err + } + return json.Marshal(cfg) +} + +// WithUncompressedLayer defines the subset of v1.Image used by these helper methods +type WithUncompressedLayer interface { + // UncompressedLayer is like UncompressedBlob, but takes the "diff id". + UncompressedLayer(v1.Hash) (io.ReadCloser, error) +} + +// Layer is the same as Blob, but takes the "diff id". +func Layer(wul WithUncompressedLayer, h v1.Hash) (io.ReadCloser, error) { + rc, err := wul.UncompressedLayer(h) + if err != nil { + return nil, err + } + return v1util.GzipReadCloser(rc) +} + +// WithRawManifest defines the subset of v1.Image used by these helper methods +type WithRawManifest interface { + // RawManifest returns the serialized bytes of this image's config file. + RawManifest() ([]byte, error) +} + +// Digest is a helper for implementing v1.Image +func Digest(i WithRawManifest) (v1.Hash, error) { + mb, err := i.RawManifest() + if err != nil { + return v1.Hash{}, err + } + digest, _, err := v1.SHA256(bytes.NewReader(mb)) + return digest, err +} + +// Manifest is a helper for implementing v1.Image +func Manifest(i WithRawManifest) (*v1.Manifest, error) { + b, err := i.RawManifest() + if err != nil { + return nil, err + } + return v1.ParseManifest(bytes.NewReader(b)) +} + +// WithManifest defines the subset of v1.Image used by these helper methods +type WithManifest interface { + // Manifest returns this image's Manifest object. + Manifest() (*v1.Manifest, error) +} + +// RawManifest is a helper for implementing v1.Image +func RawManifest(i WithManifest) ([]byte, error) { + m, err := i.Manifest() + if err != nil { + return nil, err + } + return json.Marshal(m) +} + +// FSLayers is a helper for implementing v1.Image +func FSLayers(i WithManifest) ([]v1.Hash, error) { + m, err := i.Manifest() + if err != nil { + return nil, err + } + fsl := make([]v1.Hash, len(m.Layers)) + for i, l := range m.Layers { + fsl[i] = l.Digest + } + return fsl, nil +} + +// BlobSet is a helper for implementing v1.Image +func BlobSet(i WithManifest) (map[v1.Hash]struct{}, error) { + m, err := i.Manifest() + if err != nil { + return nil, err + } + bs := make(map[v1.Hash]struct{}) + for _, l := range m.Layers { + bs[l.Digest] = struct{}{} + } + bs[m.Config.Digest] = struct{}{} + return bs, nil +} + +// BlobSize is a helper for implementing v1.Image +func BlobSize(i WithManifest, h v1.Hash) (int64, error) { + m, err := i.Manifest() + if err != nil { + return -1, err + } + for _, l := range m.Layers { + if l.Digest == h { + return l.Size, nil + } + } + return -1, fmt.Errorf("blob %v not found", h) +} + +// WithManifestAndConfigFile defines the subset of v1.Image used by these helper methods +type WithManifestAndConfigFile interface { + WithConfigFile + + // Manifest returns this image's Manifest object. + Manifest() (*v1.Manifest, error) +} + +// BlobToDiffID is a helper for mapping between compressed +// and uncompressed blob hashes. +func BlobToDiffID(i WithManifestAndConfigFile, h v1.Hash) (v1.Hash, error) { + blobs, err := FSLayers(i) + if err != nil { + return v1.Hash{}, err + } + diffIDs, err := DiffIDs(i) + if err != nil { + return v1.Hash{}, err + } + if len(blobs) != len(diffIDs) { + return v1.Hash{}, fmt.Errorf("mismatched fs layers (%d) and diff ids (%d)", len(blobs), len(diffIDs)) + } + for i, blob := range blobs { + if blob == h { + return diffIDs[i], nil + } + } + return v1.Hash{}, fmt.Errorf("unknown blob %v", h) +} + +// DiffIDToBlob is a helper for mapping between uncompressed +// and compressed blob hashes. +func DiffIDToBlob(wm WithManifestAndConfigFile, h v1.Hash) (v1.Hash, error) { + blobs, err := FSLayers(wm) + if err != nil { + return v1.Hash{}, err + } + diffIDs, err := DiffIDs(wm) + if err != nil { + return v1.Hash{}, err + } + if len(blobs) != len(diffIDs) { + return v1.Hash{}, fmt.Errorf("mismatched fs layers (%d) and diff ids (%d)", len(blobs), len(diffIDs)) + } + for i, diffID := range diffIDs { + if diffID == h { + return blobs[i], nil + } + } + return v1.Hash{}, fmt.Errorf("unknown diffID %v", h) + +} + +// WithBlob defines the subset of v1.Image used by these helper methods +type WithBlob interface { + // Blob returns a ReadCloser for streaming the blob's content. + Blob(v1.Hash) (io.ReadCloser, error) +} + +// UncompressedBlob returns a ReadCloser for streaming the blob's content uncompressed. +func UncompressedBlob(b WithBlob, h v1.Hash) (io.ReadCloser, error) { + rc, err := b.Blob(h) + if err != nil { + return nil, err + } + return v1util.GunzipReadCloser(rc) +} diff --git a/vendor/github.com/google/go-containerregistry/v1/random/doc.go b/vendor/github.com/google/go-containerregistry/v1/random/doc.go new file mode 100644 index 0000000000..d3712767d2 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/random/doc.go @@ -0,0 +1,16 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package random provides a facility for synthesizing pseudo-random images. +package random diff --git a/vendor/github.com/google/go-containerregistry/v1/random/image.go b/vendor/github.com/google/go-containerregistry/v1/random/image.go new file mode 100644 index 0000000000..5f5bc903e7 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/random/image.go @@ -0,0 +1,128 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package random + +import ( + "archive/tar" + "bytes" + "crypto/rand" + "fmt" + "io" + "time" + + "github.com/google/go-containerregistry/v1" + "github.com/google/go-containerregistry/v1/partial" + "github.com/google/go-containerregistry/v1/types" + "github.com/google/go-containerregistry/v1/v1util" +) + +// uncompressedLayer implements partial.UncompressedLayer from raw bytes. +// TODO(mattmoor): Consider moving this into a library. +type uncompressedLayer struct { + diffID v1.Hash + content []byte +} + +// DiffID implements partial.UncompressedLayer +func (ul *uncompressedLayer) DiffID() (v1.Hash, error) { + return ul.diffID, nil +} + +// Uncompressed implements partial.UncompressedLayer +func (ul *uncompressedLayer) Uncompressed() (io.ReadCloser, error) { + return v1util.NopReadCloser(bytes.NewBuffer(ul.content)), nil +} + +var _ partial.UncompressedLayer = (*uncompressedLayer)(nil) + +// Image returns a pseudo-randomly generated Image. +func Image(byteSize, layers int64) (v1.Image, error) { + layerz := make(map[v1.Hash]partial.UncompressedLayer) + for i := int64(0); i < layers; i++ { + var b bytes.Buffer + tw := tar.NewWriter(&b) + if err := tw.WriteHeader(&tar.Header{ + Name: fmt.Sprintf("random_file_%d.txt", i), + Size: byteSize, + }); err != nil { + return nil, err + } + if _, err := io.CopyN(tw, rand.Reader, byteSize); err != nil { + return nil, err + } + bts := b.Bytes() + h, _, err := v1.SHA256(bytes.NewReader(bts)) + if err != nil { + return nil, err + } + layerz[h] = &uncompressedLayer{ + diffID: h, + content: bts, + } + } + + cfg := &v1.ConfigFile{} + + // It is ok that iteration order is random in Go, because this is the random image anyways. + for k := range layerz { + cfg.RootFS.DiffIDs = append(cfg.RootFS.DiffIDs, k) + } + + for i := int64(0); i < layers; i++ { + cfg.History = append(cfg.History, v1.History{ + Author: "random.Image", + Comment: fmt.Sprintf("this is a random history %d", i), + CreatedBy: "random", + Created: v1.Time{time.Now()}, + }) + } + + return partial.UncompressedToImage(&image{ + config: cfg, + layers: layerz, + }) +} + +// image is pseudo-randomly generated. +type image struct { + config *v1.ConfigFile + layers map[v1.Hash]partial.UncompressedLayer +} + +var _ partial.UncompressedImageCore = (*image)(nil) + +// RawConfigFile implements partial.UncompressedImageCore +func (i *image) RawConfigFile() ([]byte, error) { + return partial.RawConfigFile(i) +} + +// ConfigFile implements v1.Image +func (i *image) ConfigFile() (*v1.ConfigFile, error) { + return i.config, nil +} + +// MediaType implements partial.UncompressedImageCore +func (i *image) MediaType() (types.MediaType, error) { + return types.DockerManifestSchema2, nil +} + +// LayerByDiffID implements partial.UncompressedImageCore +func (i *image) LayerByDiffID(diffID v1.Hash) (partial.UncompressedLayer, error) { + l, ok := i.layers[diffID] + if !ok { + return nil, fmt.Errorf("unknown diff_id: %v", diffID) + } + return l, nil +} diff --git a/vendor/github.com/google/go-containerregistry/v1/remote/delete.go b/vendor/github.com/google/go-containerregistry/v1/remote/delete.go new file mode 100644 index 0000000000..ce7d62b9ef --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/remote/delete.go @@ -0,0 +1,71 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/url" + + "github.com/google/go-containerregistry/authn" + "github.com/google/go-containerregistry/name" + "github.com/google/go-containerregistry/v1/remote/transport" +) + +// DeleteOptions are used to expose optional information to guide or +// control the image deletion. +type DeleteOptions struct { + // TODO(mattmoor): Fail on not found? + // TODO(mattmoor): Delete tag and manifest? +} + +// Delete removes the specified image reference from the remote registry. +func Delete(ref name.Reference, auth authn.Authenticator, t http.RoundTripper, do DeleteOptions) error { + scopes := []string{ref.Scope(transport.DeleteScope)} + tr, err := transport.New(ref.Context().Registry, auth, t, scopes) + if err != nil { + return err + } + c := &http.Client{Transport: tr} + + u := url.URL{ + Scheme: transport.Scheme(ref.Context().Registry), + Host: ref.Context().RegistryStr(), + Path: fmt.Sprintf("/v2/%s/manifests/%s", ref.Context().RepositoryStr(), ref.Identifier()), + } + + req, err := http.NewRequest(http.MethodDelete, u.String(), nil) + if err != nil { + return err + } + + resp, err := c.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK, http.StatusAccepted: + return nil + default: + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + return fmt.Errorf("unrecognized status code during DELETE: %v; %v", resp.Status, string(b)) + } +} diff --git a/vendor/github.com/google/go-containerregistry/v1/remote/doc.go b/vendor/github.com/google/go-containerregistry/v1/remote/doc.go new file mode 100644 index 0000000000..846ba07cda --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/remote/doc.go @@ -0,0 +1,17 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package remote provides facilities for reading/writing v1.Images from/to +// a remote image registry. +package remote diff --git a/vendor/github.com/google/go-containerregistry/v1/remote/error.go b/vendor/github.com/google/go-containerregistry/v1/remote/error.go new file mode 100644 index 0000000000..8c4b814e24 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/remote/error.go @@ -0,0 +1,106 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" +) + +// Error implements error to support the following error specification: +// https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors +type Error struct { + Errors []Diagnostic `json:"errors,omitempty"` +} + +// Check that Error implements error +var _ error = (*Error)(nil) + +// Error implements error +func (e *Error) Error() string { + switch len(e.Errors) { + case 0: + return "" + case 1: + return e.Errors[0].String() + default: + var errors []string + for _, d := range e.Errors { + errors = append(errors, d.String()) + } + return fmt.Sprintf("multiple errors returned: %s", + strings.Join(errors, ";")) + } +} + +// Diagnostic represents a single error returned by a Docker registry interaction. +type Diagnostic struct { + Code ErrorCode `json:"code"` + Message string `json:"message,omitempty"` + Detail interface{} `json:"detail,omitempty"` +} + +// String stringifies the Diagnostic +func (d Diagnostic) String() string { + return fmt.Sprintf("%s: %q", d.Code, d.Message) +} + +// ErrorCode is an enumeration of supported error codes. +type ErrorCode string + +// The set of error conditions a registry may return: +// https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors-2 +const ( + BlobUnknownErrorCode ErrorCode = "BLOB_UNKNOWN" + BlobUploadInvalidErrorCode ErrorCode = "BLOB_UPLOAD_INVALID" + BlobUploadUnknownErrorCode ErrorCode = "BLOB_UPLOAD_UNKNOWN" + DigestInvalidErrorCode ErrorCode = "DIGEST_INVALID" + ManifestBlobUnknownErrorCode ErrorCode = "MANIFEST_BLOB_UNKNOWN" + ManifestInvalidErrorCode ErrorCode = "MANIFEST_INVALID" + ManifestUnknownErrorCode ErrorCode = "MANIFEST_UNKNOWN" + ManifestUnverifiedErrorCode ErrorCode = "MANIFEST_UNVERIFIED" + NameInvalidErrorCode ErrorCode = "NAME_INVALID" + NameUnknownErrorCode ErrorCode = "NAME_UNKNOWN" + SizeInvalidErrorCode ErrorCode = "SIZE_INVALID" + TagInvalidErrorCode ErrorCode = "TAG_INVALID" + UnauthorizedErrorCode ErrorCode = "UNAUTHORIZED" + DeniedErrorCode ErrorCode = "DENIED" + UnsupportedErrorCode ErrorCode = "UNSUPPORTED" +) + +func checkError(resp *http.Response, codes ...int) error { + for _, code := range codes { + if resp.StatusCode == code { + // This is one of the supported status codes. + return nil + } + } + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + + // https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors + var structuredError Error + if err := json.Unmarshal(b, &structuredError); err != nil { + // If the response isn't an unstructured error, then return some + // reasonable error response containing the response body. + return fmt.Errorf("unsupported status code %d; body: %s", resp.StatusCode, string(b)) + } + return &structuredError +} diff --git a/vendor/github.com/google/go-containerregistry/v1/remote/image.go b/vendor/github.com/google/go-containerregistry/v1/remote/image.go new file mode 100644 index 0000000000..d4c6d74abe --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/remote/image.go @@ -0,0 +1,200 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "sync" + + "github.com/google/go-containerregistry/authn" + "github.com/google/go-containerregistry/name" + "github.com/google/go-containerregistry/v1" + "github.com/google/go-containerregistry/v1/partial" + "github.com/google/go-containerregistry/v1/remote/transport" + "github.com/google/go-containerregistry/v1/types" + "github.com/google/go-containerregistry/v1/v1util" +) + +// remoteImage accesses an image from a remote registry +type remoteImage struct { + ref name.Reference + client *http.Client + manifestLock sync.Mutex // Protects manifest + manifest []byte + configLock sync.Mutex // Protects config + config []byte +} + +var _ partial.CompressedImageCore = (*remoteImage)(nil) + +// Image accesses a given image reference over the provided transport, with the provided authentication. +func Image(ref name.Reference, auth authn.Authenticator, t http.RoundTripper) (v1.Image, error) { + scopes := []string{ref.Scope(transport.PullScope)} + tr, err := transport.New(ref.Context().Registry, auth, t, scopes) + if err != nil { + return nil, err + } + return partial.CompressedToImage(&remoteImage{ + ref: ref, + client: &http.Client{Transport: tr}, + }) +} + +func (r *remoteImage) url(resource, identifier string) url.URL { + return url.URL{ + Scheme: transport.Scheme(r.ref.Context().Registry), + Host: r.ref.Context().RegistryStr(), + Path: fmt.Sprintf("/v2/%s/%s/%s", r.ref.Context().RepositoryStr(), resource, identifier), + } +} + +func (r *remoteImage) MediaType() (types.MediaType, error) { + // TODO(jonjohnsonjr): Determine this based on response. + return types.DockerManifestSchema2, nil +} + +// TODO(jonjohnsonjr): Handle manifest lists. +func (r *remoteImage) RawManifest() ([]byte, error) { + r.manifestLock.Lock() + defer r.manifestLock.Unlock() + if r.manifest != nil { + return r.manifest, nil + } + + u := r.url("manifests", r.ref.Identifier()) + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, err + } + // TODO(jonjohnsonjr): Accept OCI manifest, manifest list, and image index. + req.Header.Set("Accept", string(types.DockerManifestSchema2)) + resp, err := r.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if err := checkError(resp, http.StatusOK); err != nil { + return nil, err + } + + manifest, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + digest, _, err := v1.SHA256(bytes.NewReader(manifest)) + if err != nil { + return nil, err + } + + // Validate the digest matches what we asked for, if pulling by digest. + if dgst, ok := r.ref.(name.Digest); ok { + if digest.String() != dgst.DigestStr() { + return nil, fmt.Errorf("manifest digest: %q does not match requested digest: %q for %q", digest, dgst.DigestStr(), r.ref) + } + } else if checksum := resp.Header.Get("Docker-Content-Digest"); checksum != "" && checksum != digest.String() { + err := fmt.Errorf("manifest digest: %q does not match Docker-Content-Digest: %q for %q", digest, checksum, r.ref) + if r.ref.Context().RegistryStr() == name.DefaultRegistry { + // TODO(docker/distribution#2395): Remove this check. + } else { + // When pulling by tag, we can only validate that the digest matches what the registry told us it should be. + return nil, err + } + } + + r.manifest = manifest + return r.manifest, nil +} + +func (r *remoteImage) RawConfigFile() ([]byte, error) { + r.configLock.Lock() + defer r.configLock.Unlock() + if r.config != nil { + return r.config, nil + } + + m, err := partial.Manifest(r) + if err != nil { + return nil, err + } + + cl, err := r.LayerByDigest(m.Config.Digest) + if err != nil { + return nil, err + } + body, err := cl.Compressed() + if err != nil { + return nil, err + } + defer body.Close() + + r.config, err = ioutil.ReadAll(body) + if err != nil { + return nil, err + } + return r.config, nil +} + +// remoteLayer implements partial.CompressedLayer +type remoteLayer struct { + ri *remoteImage + digest v1.Hash +} + +// Digest implements partial.CompressedLayer +func (rl *remoteLayer) Digest() (v1.Hash, error) { + return rl.digest, nil +} + +// Compressed implements partial.CompressedLayer +func (rl *remoteLayer) Compressed() (io.ReadCloser, error) { + u := rl.ri.url("blobs", rl.digest.String()) + resp, err := rl.ri.client.Get(u.String()) + if err != nil { + return nil, err + } + + if err := checkError(resp, http.StatusOK); err != nil { + resp.Body.Close() + return nil, err + } + + return v1util.VerifyReadCloser(resp.Body, rl.digest) +} + +// Manifest implements partial.WithManifest so that we can use partial.BlobSize below. +func (rl *remoteLayer) Manifest() (*v1.Manifest, error) { + return partial.Manifest(rl.ri) +} + +// Size implements partial.CompressedLayer +func (rl *remoteLayer) Size() (int64, error) { + // Look up the size of this digest in the manifest to avoid a request. + return partial.BlobSize(rl, rl.digest) +} + +// LayerByDigest implements partial.CompressedLayer +func (r *remoteImage) LayerByDigest(h v1.Hash) (partial.CompressedLayer, error) { + return &remoteLayer{ + ri: r, + digest: h, + }, nil +} diff --git a/vendor/github.com/google/go-containerregistry/v1/remote/list.go b/vendor/github.com/google/go-containerregistry/v1/remote/list.go new file mode 100644 index 0000000000..f1e206dddd --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/remote/list.go @@ -0,0 +1,64 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + + "github.com/google/go-containerregistry/authn" + "github.com/google/go-containerregistry/name" + "github.com/google/go-containerregistry/v1/remote/transport" +) + +type Tags struct { + Name string `json:"name"` + Tags []string `json:"tags"` +} + +// TODO(jonjohnsonjr): return []name.Tag? +func List(repo name.Repository, auth authn.Authenticator, t http.RoundTripper) ([]string, error) { + scopes := []string{repo.Scope(transport.PullScope)} + tr, err := transport.New(repo.Registry, auth, t, scopes) + if err != nil { + return nil, err + } + + uri := url.URL{ + Scheme: transport.Scheme(repo.Registry), + Host: repo.Registry.RegistryStr(), + Path: fmt.Sprintf("/v2/%s/tags/list", repo.RepositoryStr()), + } + + client := http.Client{Transport: tr} + resp, err := client.Get(uri.String()) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if err := checkError(resp, http.StatusOK); err != nil { + return nil, err + } + + tags := Tags{} + if err := json.NewDecoder(resp.Body).Decode(&tags); err != nil { + return nil, err + } + + return tags.Tags, nil +} diff --git a/vendor/github.com/google/go-containerregistry/v1/remote/transport/basic.go b/vendor/github.com/google/go-containerregistry/v1/remote/transport/basic.go new file mode 100644 index 0000000000..a9ab46beb3 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/remote/transport/basic.go @@ -0,0 +1,47 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "net/http" + + "github.com/google/go-containerregistry/authn" +) + +type basicTransport struct { + inner http.RoundTripper + auth authn.Authenticator + target string +} + +var _ http.RoundTripper = (*basicTransport)(nil) + +// RoundTrip implements http.RoundTripper +func (bt *basicTransport) RoundTrip(in *http.Request) (*http.Response, error) { + hdr, err := bt.auth.Authorization() + if err != nil { + return nil, err + } + + // http.Client handles redirects at a layer above the http.RoundTripper + // abstraction, so to avoid forwarding Authorization headers to places + // we are redirected, only set it when the authorization header matches + // the host with which we are interacting. + if in.Host == bt.target { + in.Header.Set("Authorization", hdr) + } + in.Header.Set("User-Agent", transportName) + return bt.inner.RoundTrip(in) +} diff --git a/vendor/github.com/google/go-containerregistry/v1/remote/transport/bearer.go b/vendor/github.com/google/go-containerregistry/v1/remote/transport/bearer.go new file mode 100644 index 0000000000..f69dd9a7b3 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/remote/transport/bearer.go @@ -0,0 +1,119 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "fmt" + + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + + "github.com/google/go-containerregistry/authn" + "github.com/google/go-containerregistry/name" +) + +type bearerTransport struct { + // Wrapped by bearerTransport. + inner http.RoundTripper + // Basic credentials that we exchange for bearer tokens. + basic authn.Authenticator + // Holds the bearer response from the token service. + bearer *authn.Bearer + // Registry to which we send bearer tokens. + registry name.Registry + // See https://tools.ietf.org/html/rfc6750#section-3 + realm string + // See https://docs.docker.com/registry/spec/auth/token/ + service string + scopes []string +} + +var _ http.RoundTripper = (*bearerTransport)(nil) + +// RoundTrip implements http.RoundTripper +func (bt *bearerTransport) RoundTrip(in *http.Request) (*http.Response, error) { + hdr, err := bt.bearer.Authorization() + if err != nil { + return nil, err + } + + // http.Client handles redirects at a layer above the http.RoundTripper + // abstraction, so to avoid forwarding Authorization headers to places + // we are redirected, only set it when the authorization header matches + // the registry with which we are interacting. + if in.Host == bt.registry.RegistryStr() { + in.Header.Set("Authorization", hdr) + } + in.Header.Set("User-Agent", transportName) + + // TODO(mattmoor): On 401s perform a single refresh() and retry. + return bt.inner.RoundTrip(in) +} + +func (bt *bearerTransport) refresh() error { + u, err := url.Parse(bt.realm) + if err != nil { + return err + } + b := &basicTransport{ + inner: bt.inner, + auth: bt.basic, + target: u.Host, + } + client := http.Client{Transport: b} + + u.RawQuery = url.Values{ + "scope": bt.scopes, + "service": []string{bt.service}, + }.Encode() + + resp, err := client.Get(u.String()) + if err != nil { + return err + } + defer resp.Body.Close() + + content, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + + // Some registries don't have "token" in the response. See #54. + type tokenResponse struct { + Token string `json:"token"` + AccessToken string `json:"access_token"` + } + + var response tokenResponse + if err := json.Unmarshal(content, &response); err != nil { + return err + } + + // Find a token to turn into a Bearer authenticator + var bearer authn.Bearer + if response.Token != "" { + bearer = authn.Bearer{Token: response.Token} + } else if response.AccessToken != "" { + bearer = authn.Bearer{Token: response.AccessToken} + } else { + return fmt.Errorf("no token in bearer response:\n%s", content) + } + + // Replace our old bearer authenticator (if we had one) with our newly refreshed authenticator. + bt.bearer = &bearer + return nil +} diff --git a/vendor/github.com/google/go-containerregistry/v1/remote/transport/doc.go b/vendor/github.com/google/go-containerregistry/v1/remote/transport/doc.go new file mode 100644 index 0000000000..ff7025b5c0 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/remote/transport/doc.go @@ -0,0 +1,18 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package transport provides facilities for setting up an authenticated +// http.RoundTripper given an Authenticator and base RoundTripper. See +// transport.New for more information. +package transport diff --git a/vendor/github.com/google/go-containerregistry/v1/remote/transport/ping.go b/vendor/github.com/google/go-containerregistry/v1/remote/transport/ping.go new file mode 100644 index 0000000000..82d7546490 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/remote/transport/ping.go @@ -0,0 +1,89 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "fmt" + "net/http" + "strings" + + "github.com/google/go-containerregistry/name" +) + +type challenge string + +const ( + anonymous challenge = "Anonymous" + basic challenge = "Basic" + bearer challenge = "Bearer" +) + +type pingResp struct { + challenge challenge + + // Following the challenge there are often key/value pairs + // e.g. Bearer service="gcr.io",realm="https://auth.gcr.io/v36/tokenz" + parameters map[string]string +} + +func parseChallenge(suffix string) map[string]string { + kv := make(map[string]string) + for _, token := range strings.Split(suffix, ",") { + // Trim any whitespace around each token. + token = strings.Trim(token, " ") + + // Break the token into a key/value pair + if parts := strings.SplitN(token, "=", 2); len(parts) == 2 { + // Unquote the value, if it is quoted. + kv[parts[0]] = strings.Trim(parts[1], `"`) + } else { + // If there was only one part, treat is as a key with an empty value + kv[token] = "" + } + } + return kv +} + +func ping(reg name.Registry, t http.RoundTripper) (*pingResp, error) { + client := http.Client{Transport: t} + + url := fmt.Sprintf("%s://%s/v2/", Scheme(reg), reg.Name()) + resp, err := client.Get(url) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + // If we get a 200, then no authentication is needed. + return &pingResp{challenge: anonymous}, nil + case http.StatusUnauthorized: + wac := resp.Header.Get(http.CanonicalHeaderKey("WWW-Authenticate")) + if parts := strings.SplitN(wac, " ", 2); len(parts) == 2 { + // If there are two parts, then parse the challenge parameters. + return &pingResp{ + challenge: challenge(strings.Title(parts[0])), + parameters: parseChallenge(parts[1]), + }, nil + } + // Otherwise, just return the challenge without parameters. + return &pingResp{ + challenge: challenge(strings.Title(wac)), + }, nil + default: + return nil, fmt.Errorf("unrecognized HTTP status: %v", resp.Status) + } +} diff --git a/vendor/github.com/google/go-containerregistry/v1/remote/transport/scheme.go b/vendor/github.com/google/go-containerregistry/v1/remote/transport/scheme.go new file mode 100644 index 0000000000..b9d1dd3bf0 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/remote/transport/scheme.go @@ -0,0 +1,42 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "regexp" + "strings" + + "github.com/google/go-containerregistry/name" +) + +// Detect more complex forms of local references. +var reLocal = regexp.MustCompile(`.*\.local(?:host)?(?::\d{1,5})?$`) + +// Detect the loopback IP (127.0.0.1) +var reLoopback = regexp.MustCompile(regexp.QuoteMeta("127.0.0.1")) + +// Scheme returns https scheme for all the endpoints except localhost. +func Scheme(reg name.Registry) string { + if strings.HasPrefix(reg.Name(), "localhost:") { + return "http" + } + if reLocal.MatchString(reg.Name()) { + return "http" + } + if reLoopback.MatchString(reg.Name()) { + return "http" + } + return "https" +} diff --git a/vendor/github.com/google/go-containerregistry/v1/remote/transport/scope.go b/vendor/github.com/google/go-containerregistry/v1/remote/transport/scope.go new file mode 100644 index 0000000000..c3b56f7a41 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/remote/transport/scope.go @@ -0,0 +1,24 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +// Scopes suitable to qualify each Repository +const ( + PullScope string = "pull" + PushScope string = "push,pull" + // For now DELETE is PUSH, which is the read/write ACL. + DeleteScope string = PushScope + CatalogScope string = "catalog" +) diff --git a/vendor/github.com/google/go-containerregistry/v1/remote/transport/transport.go b/vendor/github.com/google/go-containerregistry/v1/remote/transport/transport.go new file mode 100644 index 0000000000..fd1bc5065d --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/remote/transport/transport.go @@ -0,0 +1,84 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package transport + +import ( + "fmt" + "net/http" + + "github.com/google/go-containerregistry/authn" + "github.com/google/go-containerregistry/name" +) + +const ( + transportName = "go-containerregistry" +) + +// New returns a new RoundTripper based on the provided RoundTripper that has been +// setup to authenticate with the remote registry "reg", in the capacity +// laid out by the specified scopes. +func New(reg name.Registry, auth authn.Authenticator, t http.RoundTripper, scopes []string) (http.RoundTripper, error) { + // The handshake: + // 1. Use "t" to ping() the registry for the authentication challenge. + // + // 2a. If we get back a 200, then simply use "t". + // + // 2b. If we get back a 401 with a Basic challenge, then use a transport + // that just attachs auth each roundtrip. + // + // 2c. If we get back a 401 with a Bearer challenge, then use a transport + // that attaches a bearer token to each request, and refreshes is on 401s. + // Perform an initial refresh to seed the bearer token. + + // First we ping the registry to determine the parameters of the authentication handshake + // (if one is even necessary). + pr, err := ping(reg, t) + if err != nil { + return nil, err + } + + switch pr.challenge { + case anonymous: + return t, nil + case basic: + return &basicTransport{inner: t, auth: auth, target: reg.RegistryStr()}, nil + case bearer: + // We require the realm, which tells us where to send our Basic auth to turn it into Bearer auth. + realm, ok := pr.parameters["realm"] + if !ok { + return nil, fmt.Errorf("malformed www-authenticate, missing realm: %v", pr.parameters) + } + service, ok := pr.parameters["service"] + if !ok { + // If the service parameter is not specified, then default it to the registry + // with which we are talking. + service = reg.String() + } + bt := &bearerTransport{ + inner: t, + basic: auth, + realm: realm, + registry: reg, + service: service, + scopes: scopes, + } + if err := bt.refresh(); err != nil { + return nil, err + } + return bt, nil + default: + return nil, fmt.Errorf("Unrecognized challenge: %s", pr.challenge) + } +} diff --git a/vendor/github.com/google/go-containerregistry/v1/remote/write.go b/vendor/github.com/google/go-containerregistry/v1/remote/write.go new file mode 100644 index 0000000000..966271fac1 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/remote/write.go @@ -0,0 +1,293 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "bytes" + "errors" + "fmt" + "log" + "net/http" + "net/url" + + "github.com/google/go-containerregistry/authn" + "github.com/google/go-containerregistry/name" + "github.com/google/go-containerregistry/v1" + "github.com/google/go-containerregistry/v1/remote/transport" +) + +// WriteOptions are used to expose optional information to guide or +// control the image write. +type WriteOptions struct { + // The set of paths from which to attempt to mount blobs. + MountPaths []name.Repository + // TODO(mattmoor): Expose "threads" to limit parallelism? +} + +// Write pushes the provided img to the specified image reference. +func Write(ref name.Reference, img v1.Image, auth authn.Authenticator, t http.RoundTripper, + wo WriteOptions) error { + + scopes := []string{ref.Scope(transport.PushScope)} + for _, mp := range wo.MountPaths { + scopes = append(scopes, mp.Scope(transport.PullScope)) + } + + tr, err := transport.New(ref.Context().Registry, auth, t, scopes) + if err != nil { + return err + } + w := writer{ + ref: ref, + client: &http.Client{Transport: tr}, + img: img, + options: wo, + } + + bs, err := img.BlobSet() + if err != nil { + return err + } + + // Spin up go routines to publish each of the members of BlobSet(), + // and use an error channel to collect their results. + errCh := make(chan error) + defer close(errCh) + for h := range bs { + go func(h v1.Hash) { + errCh <- w.uploadOne(h) + }(h) + } + + // Now wait for all of the blob uploads to complete. + var errors []error + for _ = range bs { + if err := <-errCh; err != nil { + errors = append(errors, err) + } + } + if len(errors) > 0 { + // Return the first error we encountered. + return errors[0] + } + + // With all of the constituent elements uploaded, upload the manifest + // to commit the image. + return w.commitImage() +} + +// writer writes the elements of an image to a remote image reference. +type writer struct { + ref name.Reference + client *http.Client + img v1.Image + options WriteOptions +} + +// url returns a url.Url for the specified path in the context of this remote image reference. +func (w *writer) url(path string) url.URL { + return url.URL{ + Scheme: transport.Scheme(w.ref.Context().Registry), + Host: w.ref.Context().RegistryStr(), + Path: path, + } +} + +// nextLocation extracts the fully-qualified URL to which we should send the next request in an upload sequence. +func (w *writer) nextLocation(resp *http.Response) (string, error) { + loc := resp.Header.Get("Location") + if len(loc) == 0 { + return "", errors.New("missing Location header") + } + u, err := url.Parse(loc) + if err != nil { + return "", err + } + + // If the location header returned is just a url path, then fully qualify it. + // We cannot simply call w.url, since there might be an embedded query string. + return resp.Request.URL.ResolveReference(u).String(), nil +} + +// initiateUpload initiates the blob upload, which starts with a POST that can +// optionally include the hash of the layer and a list of repositories from +// which that layer might be read. On failure, an error is returned. +// On success, the layer was either mounted (nothing more to do) or a blob +// upload was initiated and the body of that blob should be sent to the returned +// location. +func (w *writer) initiateUpload(h v1.Hash) (location string, mounted bool, err error) { + u := w.url(fmt.Sprintf("/v2/%s/blobs/uploads/", w.ref.Context().RepositoryStr())) + uv := url.Values{ + "mount": []string{h.String()}, + } + var from []string + for _, m := range w.options.MountPaths { + from = append(from, m.RepositoryStr()) + } + // We currently avoid HEAD because it's semi-redundant with the mount that is part + // of initiating the blob upload. GCR will perform an existence check on the initiation + // if "mount" is specified, even if no "from" sources are specified. If this turns out + // to not be broadly applicable then we should replace mounts without "from"s with a HEAD. + if len(from) > 0 { + uv["from"] = from + } + u.RawQuery = uv.Encode() + + // Make the request to initiate the blob upload. + resp, err := w.client.Post(u.String(), "application/json", nil) + if err != nil { + return "", false, err + } + defer resp.Body.Close() + + if err := checkError(resp, http.StatusCreated, http.StatusAccepted); err != nil { + return "", false, err + } + + // Check the response code to determine the result. + switch resp.StatusCode { + case http.StatusCreated: + // We're done, we were able to fast-path. + return "", true, nil + case http.StatusAccepted: + // Proceed to PATCH, upload has begun. + loc, err := w.nextLocation(resp) + return loc, false, err + default: + panic("Unreachable: initiateUpload") + } +} + +// streamBlob streams the contents of the blob to the specified location. +// On failure, this will return an error. On success, this will return the location +// header indicating how to commit the streamed blob. +func (w *writer) streamBlob(h v1.Hash, streamLocation string) (commitLocation string, err error) { + l, err := w.img.LayerByDigest(h) + if err != nil { + return "", err + } + blob, err := l.Compressed() + if err != nil { + return "", err + } + defer blob.Close() + + req, err := http.NewRequest(http.MethodPatch, streamLocation, blob) + if err != nil { + return "", err + } + + resp, err := w.client.Do(req) + if err != nil { + return "", err + } + defer resp.Body.Close() + + if err := checkError(resp, http.StatusNoContent, http.StatusAccepted, http.StatusCreated); err != nil { + return "", err + } + + // The blob has been uploaded, return the location header indicating + // how to commit this layer. + return w.nextLocation(resp) +} + +// commitBlob commits this blob by sending a PUT to the location returned from streaming the blob. +func (w *writer) commitBlob(h v1.Hash, location string) (err error) { + u, err := url.Parse(location) + if err != nil { + return err + } + v := u.Query() + v.Set("digest", h.String()) + u.RawQuery = v.Encode() + + req, err := http.NewRequest(http.MethodPut, u.String(), nil) + if err != nil { + return err + } + + resp, err := w.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + return checkError(resp, http.StatusCreated) +} + +// uploadOne performs a complete upload of a single layer. +func (w *writer) uploadOne(h v1.Hash) error { + location, mounted, err := w.initiateUpload(h) + if err != nil { + return err + } else if mounted { + log.Printf("mounted blob: %v", h) + return nil + } + + location, err = w.streamBlob(h, location) + if err != nil { + return err + } + + if err := w.commitBlob(h, location); err != nil { + return err + } + log.Printf("pushed blob %v", h) + return nil +} + +// commitImage does a PUT of the image's manifest. +func (w *writer) commitImage() error { + raw, err := w.img.RawManifest() + if err != nil { + return err + } + mt, err := w.img.MediaType() + if err != nil { + return err + } + + u := w.url(fmt.Sprintf("/v2/%s/manifests/%s", w.ref.Context().RepositoryStr(), w.ref.Identifier())) + + // Make the request to PUT the serialized manifest + req, err := http.NewRequest(http.MethodPut, u.String(), bytes.NewBuffer(raw)) + if err != nil { + return err + } + req.Header.Set("Content-Type", string(mt)) + + resp, err := w.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if err := checkError(resp, http.StatusOK, http.StatusCreated, http.StatusAccepted); err != nil { + return err + } + + digest, err := w.img.Digest() + if err != nil { + return err + } + + // The image was successfully pushed! + fmt.Printf("%v: digest: %v size: %d\n", w.ref, digest, len(raw)) + return nil +} + +// TODO(mattmoor): WriteIndex diff --git a/vendor/github.com/google/go-containerregistry/v1/tarball/doc.go b/vendor/github.com/google/go-containerregistry/v1/tarball/doc.go new file mode 100644 index 0000000000..4eb79bb4e5 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/tarball/doc.go @@ -0,0 +1,17 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tarball provides facilities for reading/writing v1.Images from/to +// a tarball on-disk. +package tarball diff --git a/vendor/github.com/google/go-containerregistry/v1/tarball/image.go b/vendor/github.com/google/go-containerregistry/v1/tarball/image.go new file mode 100644 index 0000000000..7346dbcef2 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/tarball/image.go @@ -0,0 +1,338 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tarball + +import ( + "archive/tar" + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "sync" + + "github.com/google/go-containerregistry/name" + "github.com/google/go-containerregistry/v1" + "github.com/google/go-containerregistry/v1/partial" + "github.com/google/go-containerregistry/v1/types" + "github.com/google/go-containerregistry/v1/v1util" +) + +type image struct { + opener Opener + td *tarDescriptor + config []byte + imgDescriptor *singleImageTarDescriptor + + tag *name.Tag +} + +type uncompressedImage struct { + *image +} + +type compressedImage struct { + *image + manifestLock sync.Mutex // Protects manifest + manifest *v1.Manifest +} + +var _ partial.UncompressedImageCore = (*uncompressedImage)(nil) +var _ partial.CompressedImageCore = (*compressedImage)(nil) + +type Opener func() (io.ReadCloser, error) + +func pathOpener(path string) Opener { + return func() (io.ReadCloser, error) { + return os.Open(path) + } +} + +func ImageFromPath(path string, tag *name.Tag) (v1.Image, error) { + return Image(pathOpener(path), tag) +} + +// Image exposes an image from the tarball at the provided path. +func Image(opener Opener, tag *name.Tag) (v1.Image, error) { + img := &image{ + opener: opener, + tag: tag, + } + if err := img.loadTarDescriptorAndConfig(); err != nil { + return nil, err + } + + // Peek at the first layer and see if it's compressed. + compressed, err := img.areLayersCompressed() + if err != nil { + return nil, err + } + if compressed { + c := compressedImage{ + image: img, + } + return partial.CompressedToImage(&c) + } + + uc := uncompressedImage{ + image: img, + } + return partial.UncompressedToImage(&uc) +} + +func (i *image) MediaType() (types.MediaType, error) { + return types.DockerManifestSchema2, nil +} + +// singleImageTarDescriptor is the struct used to represent a single image inside a `docker save` tarball. +type singleImageTarDescriptor struct { + Config string + RepoTags []string + Layers []string +} + +// tarDescriptor is the struct used inside the `manifest.json` file of a `docker save` tarball. +type tarDescriptor []singleImageTarDescriptor + +func (td tarDescriptor) findSpecifiedImageDescriptor(tag *name.Tag) (*singleImageTarDescriptor, error) { + if tag == nil { + if len(td) != 1 { + return nil, errors.New("tarball must contain only a single image to be used with tarball.Image") + } + return &(td)[0], nil + } + for _, img := range td { + for _, tagStr := range img.RepoTags { + repoTag, err := name.NewTag(tagStr, name.WeakValidation) + if err != nil { + return nil, err + } + + // Compare the resolved names, since there are several ways to specify the same tag. + if repoTag.Name() == tag.Name() { + return &img, nil + } + } + } + return nil, fmt.Errorf("tag %s not found in tarball", tag) +} + +func (i *image) areLayersCompressed() (bool, error) { + if len(i.imgDescriptor.Layers) == 0 { + return false, errors.New("0 layers found in image") + } + layer := i.imgDescriptor.Layers[0] + blob, err := extractFileFromTar(i.opener, layer) + if err != nil { + return false, err + } + defer blob.Close() + return v1util.IsGzipped(blob) +} + +func (i *image) loadTarDescriptorAndConfig() error { + td, err := extractFileFromTar(i.opener, "manifest.json") + if err != nil { + return err + } + defer td.Close() + + if err := json.NewDecoder(td).Decode(&i.td); err != nil { + return err + } + + i.imgDescriptor, err = i.td.findSpecifiedImageDescriptor(i.tag) + if err != nil { + return err + } + + cfg, err := extractFileFromTar(i.opener, i.imgDescriptor.Config) + if err != nil { + return err + } + defer cfg.Close() + + i.config, err = ioutil.ReadAll(cfg) + if err != nil { + return err + } + return nil +} + +func (i *image) RawConfigFile() ([]byte, error) { + return i.config, nil +} + +// tarFile represents a single file inside a tar. Closing it closes the tar itself. +type tarFile struct { + io.Reader + io.Closer +} + +func extractFileFromTar(opener Opener, filePath string) (io.ReadCloser, error) { + f, err := opener() + if err != nil { + return nil, err + } + tf := tar.NewReader(f) + for { + hdr, err := tf.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + if hdr.Name == filePath { + return tarFile{ + Reader: tf, + Closer: f, + }, nil + } + } + return nil, fmt.Errorf("file %s not found in tar", filePath) +} + +// uncompressedLayerFromTarball implements partial.UncompressedLayer +type uncompressedLayerFromTarball struct { + diffID v1.Hash + opener Opener + filePath string +} + +// DiffID implements partial.UncompressedLayer +func (ulft *uncompressedLayerFromTarball) DiffID() (v1.Hash, error) { + return ulft.diffID, nil +} + +// Uncompressed implements partial.UncompressedLayer +func (ulft *uncompressedLayerFromTarball) Uncompressed() (io.ReadCloser, error) { + return extractFileFromTar(ulft.opener, ulft.filePath) +} + +func (i *uncompressedImage) LayerByDiffID(h v1.Hash) (partial.UncompressedLayer, error) { + cfg, err := partial.ConfigFile(i) + if err != nil { + return nil, err + } + for idx, diffID := range cfg.RootFS.DiffIDs { + if diffID == h { + return &uncompressedLayerFromTarball{ + diffID: diffID, + opener: i.opener, + filePath: i.imgDescriptor.Layers[idx], + }, nil + } + } + return nil, fmt.Errorf("diff id %q not found", h) +} + +func (c *compressedImage) Manifest() (*v1.Manifest, error) { + c.manifestLock.Lock() + defer c.manifestLock.Unlock() + if c.manifest != nil { + return c.manifest, nil + } + + b, err := c.RawConfigFile() + if err != nil { + return nil, err + } + + cfgHash, cfgSize, err := v1.SHA256(bytes.NewReader(b)) + if err != nil { + return nil, err + } + + c.manifest = &v1.Manifest{ + SchemaVersion: 2, + MediaType: types.DockerManifestSchema2, + Config: v1.Descriptor{ + MediaType: types.DockerConfigJSON, + Size: cfgSize, + Digest: cfgHash, + }, + } + + for _, p := range c.imgDescriptor.Layers { + l, err := extractFileFromTar(c.opener, p) + if err != nil { + return nil, err + } + defer l.Close() + sha, size, err := v1.SHA256(l) + if err != nil { + return nil, err + } + c.manifest.Layers = append(c.manifest.Layers, v1.Descriptor{ + MediaType: types.DockerLayer, + Size: size, + Digest: sha, + }) + } + return c.manifest, nil +} + +func (c *compressedImage) RawManifest() ([]byte, error) { + return partial.RawManifest(c) +} + +// compressedLayerFromTarball implements partial.CompressedLayer +type compressedLayerFromTarball struct { + digest v1.Hash + opener Opener + filePath string +} + +// DiffID implements partial.CompressedLayer +func (clft *compressedLayerFromTarball) Digest() (v1.Hash, error) { + return clft.digest, nil +} + +// Compressed implements partial.CompressedLayer +func (clft *compressedLayerFromTarball) Compressed() (io.ReadCloser, error) { + return extractFileFromTar(clft.opener, clft.filePath) +} + +// Size implements partial.CompressedLayer +func (clft *compressedLayerFromTarball) Size() (int64, error) { + r, err := clft.Compressed() + if err != nil { + return -1, err + } + defer r.Close() + _, i, err := v1.SHA256(r) + return i, err +} + +func (c *compressedImage) LayerByDigest(h v1.Hash) (partial.CompressedLayer, error) { + m, err := c.Manifest() + if err != nil { + return nil, err + } + for i, l := range m.Layers { + if l.Digest == h { + fp := c.imgDescriptor.Layers[i] + return &compressedLayerFromTarball{ + digest: h, + opener: c.opener, + filePath: fp, + }, nil + } + } + return nil, fmt.Errorf("blob %v not found", h) +} diff --git a/vendor/github.com/google/go-containerregistry/v1/tarball/layer.go b/vendor/github.com/google/go-containerregistry/v1/tarball/layer.go new file mode 100644 index 0000000000..3657dd9604 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/tarball/layer.go @@ -0,0 +1,144 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tarball + +import ( + "compress/gzip" + "io" + "io/ioutil" + "os" + + "github.com/google/go-containerregistry/v1" + "github.com/google/go-containerregistry/v1/v1util" +) + +type layer struct { + digest v1.Hash + diffID v1.Hash + size int64 + opener Opener + compressed bool +} + +func (l *layer) Digest() (v1.Hash, error) { + return l.digest, nil +} + +func (l *layer) DiffID() (v1.Hash, error) { + return l.diffID, nil +} + +func (l *layer) Compressed() (io.ReadCloser, error) { + rc, err := l.opener() + if err == nil && !l.compressed { + return v1util.GzipReadCloser(rc) + } + + return rc, err +} + +func (l *layer) Uncompressed() (io.ReadCloser, error) { + rc, err := l.opener() + if err == nil && l.compressed { + return v1util.GunzipReadCloser(rc) + } + + return rc, err +} + +func (l *layer) Size() (int64, error) { + return l.size, nil +} + +// LayerFromFile returns a v1.Layer given a tarball +func LayerFromFile(path string) (v1.Layer, error) { + opener := func() (io.ReadCloser, error) { + return os.Open(path) + } + return LayerFromOpener(opener) +} + +// LayerFromOpener returns a v1.Layer given an Opener function +func LayerFromOpener(opener Opener) (v1.Layer, error) { + rc, err := opener() + if err != nil { + return nil, err + } + defer rc.Close() + + compressed, err := v1util.IsGzipped(rc) + if err != nil { + return nil, err + } + + var digest v1.Hash + var size int64 + if digest, size, err = computeDigest(opener, compressed); err != nil { + return nil, err + } + + diffID, err := computeDiffID(opener, compressed) + if err != nil { + return nil, err + } + + return &layer{ + digest: digest, + diffID: diffID, + size: size, + compressed: compressed, + opener: opener, + }, nil +} + +func computeDigest(opener Opener, compressed bool) (v1.Hash, int64, error) { + rc, err := opener() + if err != nil { + return v1.Hash{}, 0, err + } + defer rc.Close() + + if compressed { + return v1.SHA256(rc) + } + + reader, err := v1util.GzipReadCloser(ioutil.NopCloser(rc)) + if err != nil { + return v1.Hash{}, 0, err + } + + return v1.SHA256(reader) +} + +func computeDiffID(opener Opener, compressed bool) (v1.Hash, error) { + rc, err := opener() + if err != nil { + return v1.Hash{}, err + } + defer rc.Close() + + if !compressed { + digest, _, err := v1.SHA256(rc) + return digest, err + } + + reader, err := gzip.NewReader(rc) + if err != nil { + return v1.Hash{}, err + } + + diffID, _, err := v1.SHA256(reader) + return diffID, err +} diff --git a/vendor/github.com/google/go-containerregistry/v1/tarball/write.go b/vendor/github.com/google/go-containerregistry/v1/tarball/write.go new file mode 100644 index 0000000000..73a3f73d35 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/tarball/write.go @@ -0,0 +1,134 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tarball + +import ( + "archive/tar" + "bytes" + "encoding/json" + "fmt" + "io" + "os" + + "github.com/google/go-containerregistry/name" + "github.com/google/go-containerregistry/v1" +) + +// WriteOptions are used to expose optional information to guide or +// control the image write. +type WriteOptions struct { + // TODO(mattmoor): Whether to store things compressed? +} + +// WriteToFile writes in the compressed format to a tarball, on disk. +// This is just syntactic sugar wrapping tarball.Write with a new file. +func WriteToFile(p string, tag name.Tag, img v1.Image, wo *WriteOptions) error { + w, err := os.Create(p) + if err != nil { + return err + } + defer w.Close() + + return Write(tag, img, wo, w) +} + +// Write the contents of the image to the provided reader, in the compressed format. +// The contents are written in the following format: +// One manifest.json file at the top level containing information about several images. +// One file for each layer, named after the layer's SHA. +// One file for the config blob, named after its SHA. +func Write(tag name.Tag, img v1.Image, wo *WriteOptions, w io.Writer) error { + tf := tar.NewWriter(w) + defer tf.Close() + + // Write the config. + cfgName, err := img.ConfigName() + if err != nil { + return err + } + cfgBlob, err := img.RawConfigFile() + if err != nil { + return err + } + if err := writeTarEntry(tf, cfgName.String(), bytes.NewReader(cfgBlob), int64(len(cfgBlob))); err != nil { + return err + } + + // Write the layers. + layers, err := img.Layers() + if err != nil { + return err + } + layerFiles := make([]string, len(layers)) + for i, l := range layers { + d, err := l.Digest() + if err != nil { + return err + } + + // Munge the file name to appease ancient technology. + // + // tar assumes anything with a colon is a remote tape drive: + // https://www.gnu.org/software/tar/manual/html_section/tar_45.html + // Drop the algorithm prefix, e.g. "sha256:" + hex := d.Hex + + // gunzip expects certain file extensions: + // https://www.gnu.org/software/gzip/manual/html_node/Overview.html + layerFiles[i] = fmt.Sprintf("%s.tar.gz", hex) + + r, err := l.Compressed() + if err != nil { + return err + } + blobSize, err := l.Size() + if err != nil { + return err + } + + if err := writeTarEntry(tf, layerFiles[i], r, blobSize); err != nil { + return err + } + } + + // Generate the tar descriptor and write it. + td := tarDescriptor{ + singleImageTarDescriptor{ + Config: cfgName.String(), + RepoTags: []string{tag.String()}, + Layers: layerFiles, + }, + } + tdBytes, err := json.Marshal(td) + if err != nil { + return err + } + return writeTarEntry(tf, "manifest.json", bytes.NewReader(tdBytes), int64(len(tdBytes))) +} + +// write a file to the provided writer with a corresponding tar header +func writeTarEntry(tf *tar.Writer, path string, r io.Reader, size int64) error { + hdr := &tar.Header{ + Mode: 0644, + Typeflag: tar.TypeReg, + Size: size, + Name: path, + } + if err := tf.WriteHeader(hdr); err != nil { + return err + } + _, err := io.Copy(tf, r) + return err +} diff --git a/vendor/github.com/google/go-containerregistry/v1/types/types.go b/vendor/github.com/google/go-containerregistry/v1/types/types.go new file mode 100644 index 0000000000..ddaf71962e --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/types/types.go @@ -0,0 +1,40 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +// MediaType is an enumeration of the supported mime types that an element of an image might have. +type MediaType string + +// The collection of known MediaType values. +const ( + OCIContentDescriptor MediaType = "application/vnd.oci.descriptor.v1+json" + OCIImageIndex MediaType = "application/vnd.oci.image.index.v1+json" + OCIManifestSchema1 MediaType = "application/vnd.oci.image.manifest.v1+json" + OCIConfigJSON MediaType = "application/vnd.oci.image.config.v1+json" + OCILayer MediaType = "application/vnd.oci.image.layer.v1.tar+gzip" + OCIRestrictedLayer MediaType = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" + OCIUncompressedLayer MediaType = "application/vnd.oci.image.layer.v1.tar" + OCIUncompressedRestrictedLayer MediaType = "application/vnd.oci.image.layer.nondistributable.v1.tar" + + DockerManifestSchema1 MediaType = "application/vnd.docker.distribution.manifest.v1+json" + DockerManifestSchema1Signed MediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws" + DockerManifestSchema2 MediaType = "application/vnd.docker.distribution.manifest.v2+json" + DockerManifestList MediaType = "application/vnd.docker.distribution.manifest.list.v2+json" + DockerLayer MediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip" + DockerConfigJSON MediaType = "application/vnd.docker.container.image.v1+json" + DockerPluginConfig MediaType = "application/vnd.docker.plugin.v1+json" + DockerForeignLayer MediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" + DockerUncompressedLayer MediaType = "application/vnd.docker.image.rootfs.diff.tar" +) diff --git a/vendor/github.com/google/go-containerregistry/v1/v1util/and_closer.go b/vendor/github.com/google/go-containerregistry/v1/v1util/and_closer.go new file mode 100644 index 0000000000..0925f13d54 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/v1util/and_closer.go @@ -0,0 +1,47 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1util + +import ( + "io" +) + +// readAndCloser implements io.ReadCloser by reading from a particular io.Reader +// and then calling the provided "Close()" method. +type readAndCloser struct { + io.Reader + CloseFunc func() error +} + +var _ io.ReadCloser = (*readAndCloser)(nil) + +// Close implements io.ReadCloser +func (rac *readAndCloser) Close() error { + return rac.CloseFunc() +} + +// writeAndCloser implements io.WriteCloser by reading from a particular io.Writer +// and then calling the provided "Close()" method. +type writeAndCloser struct { + io.Writer + CloseFunc func() error +} + +var _ io.WriteCloser = (*writeAndCloser)(nil) + +// Close implements io.WriteCloser +func (wac *writeAndCloser) Close() error { + return wac.CloseFunc() +} diff --git a/vendor/github.com/google/go-containerregistry/v1/v1util/nop.go b/vendor/github.com/google/go-containerregistry/v1/v1util/nop.go new file mode 100644 index 0000000000..8ff288d978 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/v1util/nop.go @@ -0,0 +1,40 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1util + +import ( + "io" +) + +func nop() error { + return nil +} + +// NopWriteCloser wraps the io.Writer as an io.WriteCloser with a Close() method that does nothing. +func NopWriteCloser(w io.Writer) io.WriteCloser { + return &writeAndCloser{ + Writer: w, + CloseFunc: nop, + } +} + +// NopReadCloser wraps the io.Reader as an io.ReadCloser with a Close() method that does nothing. +// This is technically redundant with ioutil.NopCloser, but provided for symmetry and clarity. +func NopReadCloser(r io.Reader) io.ReadCloser { + return &readAndCloser{ + Reader: r, + CloseFunc: nop, + } +} diff --git a/vendor/github.com/google/go-containerregistry/v1/v1util/verify.go b/vendor/github.com/google/go-containerregistry/v1/v1util/verify.go new file mode 100644 index 0000000000..610ca5c01b --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/v1util/verify.go @@ -0,0 +1,61 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1util + +import ( + "encoding/hex" + "fmt" + "hash" + "io" + + "github.com/google/go-containerregistry/v1" +) + +type verifyReader struct { + inner io.Reader + hasher hash.Hash + expected v1.Hash +} + +// Read implements io.Reader +func (vc *verifyReader) Read(b []byte) (int, error) { + n, err := vc.inner.Read(b) + if err == io.EOF { + got := hex.EncodeToString(vc.hasher.Sum(make([]byte, 0, vc.hasher.Size()))) + if want := vc.expected.Hex; got != want { + return n, fmt.Errorf("error verifying %s checksum; got %q, want %q", + vc.expected.Algorithm, got, want) + } + } + return n, err +} + +// VerifyReadCloser wraps the given io.ReadCloser to verify that its contents match +// the provided v1.Hash before io.EOF is returned. +func VerifyReadCloser(r io.ReadCloser, h v1.Hash) (io.ReadCloser, error) { + w, err := v1.Hasher(h.Algorithm) + if err != nil { + return nil, err + } + r2 := io.TeeReader(r, w) + return &readAndCloser{ + Reader: &verifyReader{ + inner: r2, + hasher: w, + expected: h, + }, + CloseFunc: r.Close, + }, nil +} diff --git a/vendor/github.com/google/go-containerregistry/v1/v1util/zip.go b/vendor/github.com/google/go-containerregistry/v1/v1util/zip.go new file mode 100644 index 0000000000..469d29146f --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/v1util/zip.go @@ -0,0 +1,117 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1util + +import ( + "bytes" + "compress/gzip" + "io" +) + +var gzipMagicHeader = []byte{'\x1f', '\x8b'} + +// GzipReadCloser reads uncompressed input data from the io.ReadCloser and +// returns an io.ReadCloser from which compressed data may be read. +func GzipReadCloser(r io.ReadCloser) (io.ReadCloser, error) { + pr, pw := io.Pipe() + + go func() { + defer pw.Close() + defer r.Close() + + gw, _ := gzip.NewWriterLevel(pw, gzip.BestCompression) + defer gw.Close() + + _, err := io.Copy(gw, r) + if err != nil { + pr.CloseWithError(err) + } + }() + + return pr, nil +} + +// GunzipReadCloser reads compressed input data from the io.ReadCloser and +// returns an io.ReadCloser from which uncompessed data may be read. +func GunzipReadCloser(r io.ReadCloser) (io.ReadCloser, error) { + gr, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + return &readAndCloser{ + Reader: gr, + CloseFunc: func() error { + if err := gr.Close(); err != nil { + return err + } + return r.Close() + }, + }, nil +} + +// GzipWriteCloser returns an io.WriteCloser to which uncompressed data may be +// written, and the compressed data is then written to the provided +// io.WriteCloser. +func GzipWriteCloser(w io.WriteCloser) io.WriteCloser { + gw := gzip.NewWriter(w) + return &writeAndCloser{ + Writer: gw, + CloseFunc: func() error { + if err := gw.Close(); err != nil { + return err + } + return w.Close() + }, + } +} + +// gunzipWriteCloser implements io.WriteCloser +// It is used to implement GunzipWriteClose. +type gunzipWriteCloser struct { + *bytes.Buffer + writer io.WriteCloser +} + +// Close implements io.WriteCloser +func (gwc *gunzipWriteCloser) Close() error { + // TODO(mattmoor): How to avoid buffering this whole thing into memory? + gr, err := gzip.NewReader(gwc.Buffer) + if err != nil { + return err + } + if _, err := io.Copy(gwc.writer, gr); err != nil { + return err + } + return gwc.writer.Close() +} + +// GunzipWriteCloser returns an io.WriteCloser to which compressed data may be +// written, and the uncompressed data is then written to the provided +// io.WriteCloser. +func GunzipWriteCloser(w io.WriteCloser) (io.WriteCloser, error) { + return &gunzipWriteCloser{ + Buffer: bytes.NewBuffer(nil), + writer: w, + }, nil +} + +// IsGzipped detects whether the input stream is compressed. +func IsGzipped(r io.Reader) (bool, error) { + magicHeader := make([]byte, 2) + if _, err := r.Read(magicHeader); err != nil { + return false, err + } + return bytes.Equal(magicHeader, gzipMagicHeader), nil +} diff --git a/vendor/github.com/google/go-containerregistry/v1/zz_deepcopy_generated.go b/vendor/github.com/google/go-containerregistry/v1/zz_deepcopy_generated.go new file mode 100644 index 0000000000..530e048fbd --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/v1/zz_deepcopy_generated.go @@ -0,0 +1,232 @@ +// +build !ignore_autogenerated + +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Config) DeepCopyInto(out *Config) { + *out = *in + if in.Cmd != nil { + in, out := &in.Cmd, &out.Cmd + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Entrypoint != nil { + in, out := &in.Entrypoint, &out.Entrypoint + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.OnBuild != nil { + in, out := &in.OnBuild, &out.OnBuild + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make(map[string]struct{}, len(*in)) + for key := range *in { + (*out)[key] = struct{}{} + } + } + if in.ExposedPorts != nil { + in, out := &in.ExposedPorts, &out.ExposedPorts + *out = make(map[string]struct{}, len(*in)) + for key := range *in { + (*out)[key] = struct{}{} + } + } + if in.Shell != nil { + in, out := &in.Shell, &out.Shell + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. +func (in *Config) DeepCopy() *Config { + if in == nil { + return nil + } + out := new(Config) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigFile) DeepCopyInto(out *ConfigFile) { + *out = *in + in.Created.DeepCopyInto(&out.Created) + if in.History != nil { + in, out := &in.History, &out.History + *out = make([]History, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.RootFS.DeepCopyInto(&out.RootFS) + in.Config.DeepCopyInto(&out.Config) + in.ContainerConfig.DeepCopyInto(&out.ContainerConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigFile. +func (in *ConfigFile) DeepCopy() *ConfigFile { + if in == nil { + return nil + } + out := new(ConfigFile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Descriptor) DeepCopyInto(out *Descriptor) { + *out = *in + out.Digest = in.Digest + if in.URLs != nil { + in, out := &in.URLs, &out.URLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Descriptor. +func (in *Descriptor) DeepCopy() *Descriptor { + if in == nil { + return nil + } + out := new(Descriptor) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Hash) DeepCopyInto(out *Hash) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Hash. +func (in *Hash) DeepCopy() *Hash { + if in == nil { + return nil + } + out := new(Hash) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *History) DeepCopyInto(out *History) { + *out = *in + in.Created.DeepCopyInto(&out.Created) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new History. +func (in *History) DeepCopy() *History { + if in == nil { + return nil + } + out := new(History) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Manifest) DeepCopyInto(out *Manifest) { + *out = *in + in.Config.DeepCopyInto(&out.Config) + if in.Layers != nil { + in, out := &in.Layers, &out.Layers + *out = make([]Descriptor, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Manifest. +func (in *Manifest) DeepCopy() *Manifest { + if in == nil { + return nil + } + out := new(Manifest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RootFS) DeepCopyInto(out *RootFS) { + *out = *in + if in.DiffIDs != nil { + in, out := &in.DiffIDs, &out.DiffIDs + *out = make([]Hash, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootFS. +func (in *RootFS) DeepCopy() *RootFS { + if in == nil { + return nil + } + out := new(RootFS) + in.DeepCopyInto(out) + return out +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Time. +func (in *Time) DeepCopy() *Time { + if in == nil { + return nil + } + out := new(Time) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/gorilla/context/LICENSE b/vendor/github.com/gorilla/context/LICENSE deleted file mode 100644 index 0e5fb87280..0000000000 --- a/vendor/github.com/gorilla/context/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 Rodrigo Moraes. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/context/context.go b/vendor/github.com/gorilla/context/context.go deleted file mode 100644 index 81cb128b19..0000000000 --- a/vendor/github.com/gorilla/context/context.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package context - -import ( - "net/http" - "sync" - "time" -) - -var ( - mutex sync.RWMutex - data = make(map[*http.Request]map[interface{}]interface{}) - datat = make(map[*http.Request]int64) -) - -// Set stores a value for a given key in a given request. -func Set(r *http.Request, key, val interface{}) { - mutex.Lock() - if data[r] == nil { - data[r] = make(map[interface{}]interface{}) - datat[r] = time.Now().Unix() - } - data[r][key] = val - mutex.Unlock() -} - -// Get returns a value stored for a given key in a given request. -func Get(r *http.Request, key interface{}) interface{} { - mutex.RLock() - if ctx := data[r]; ctx != nil { - value := ctx[key] - mutex.RUnlock() - return value - } - mutex.RUnlock() - return nil -} - -// GetOk returns stored value and presence state like multi-value return of map access. -func GetOk(r *http.Request, key interface{}) (interface{}, bool) { - mutex.RLock() - if _, ok := data[r]; ok { - value, ok := data[r][key] - mutex.RUnlock() - return value, ok - } - mutex.RUnlock() - return nil, false -} - -// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests. -func GetAll(r *http.Request) map[interface{}]interface{} { - mutex.RLock() - if context, ok := data[r]; ok { - result := make(map[interface{}]interface{}, len(context)) - for k, v := range context { - result[k] = v - } - mutex.RUnlock() - return result - } - mutex.RUnlock() - return nil -} - -// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if -// the request was registered. -func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) { - mutex.RLock() - context, ok := data[r] - result := make(map[interface{}]interface{}, len(context)) - for k, v := range context { - result[k] = v - } - mutex.RUnlock() - return result, ok -} - -// Delete removes a value stored for a given key in a given request. -func Delete(r *http.Request, key interface{}) { - mutex.Lock() - if data[r] != nil { - delete(data[r], key) - } - mutex.Unlock() -} - -// Clear removes all values stored for a given request. -// -// This is usually called by a handler wrapper to clean up request -// variables at the end of a request lifetime. See ClearHandler(). -func Clear(r *http.Request) { - mutex.Lock() - clear(r) - mutex.Unlock() -} - -// clear is Clear without the lock. -func clear(r *http.Request) { - delete(data, r) - delete(datat, r) -} - -// Purge removes request data stored for longer than maxAge, in seconds. -// It returns the amount of requests removed. -// -// If maxAge <= 0, all request data is removed. -// -// This is only used for sanity check: in case context cleaning was not -// properly set some request data can be kept forever, consuming an increasing -// amount of memory. In case this is detected, Purge() must be called -// periodically until the problem is fixed. -func Purge(maxAge int) int { - mutex.Lock() - count := 0 - if maxAge <= 0 { - count = len(data) - data = make(map[*http.Request]map[interface{}]interface{}) - datat = make(map[*http.Request]int64) - } else { - min := time.Now().Unix() - int64(maxAge) - for r := range data { - if datat[r] < min { - clear(r) - count++ - } - } - } - mutex.Unlock() - return count -} - -// ClearHandler wraps an http.Handler and clears request values at the end -// of a request lifetime. -func ClearHandler(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - defer Clear(r) - h.ServeHTTP(w, r) - }) -} diff --git a/vendor/github.com/gorilla/context/doc.go b/vendor/github.com/gorilla/context/doc.go deleted file mode 100644 index 73c7400311..0000000000 --- a/vendor/github.com/gorilla/context/doc.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package context stores values shared during a request lifetime. - -For example, a router can set variables extracted from the URL and later -application handlers can access those values, or it can be used to store -sessions values to be saved at the end of a request. There are several -others common uses. - -The idea was posted by Brad Fitzpatrick to the go-nuts mailing list: - - http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53 - -Here's the basic usage: first define the keys that you will need. The key -type is interface{} so a key can be of any type that supports equality. -Here we define a key using a custom int type to avoid name collisions: - - package foo - - import ( - "github.com/gorilla/context" - ) - - type key int - - const MyKey key = 0 - -Then set a variable. Variables are bound to an http.Request object, so you -need a request instance to set a value: - - context.Set(r, MyKey, "bar") - -The application can later access the variable using the same key you provided: - - func MyHandler(w http.ResponseWriter, r *http.Request) { - // val is "bar". - val := context.Get(r, foo.MyKey) - - // returns ("bar", true) - val, ok := context.GetOk(r, foo.MyKey) - // ... - } - -And that's all about the basic usage. We discuss some other ideas below. - -Any type can be stored in the context. To enforce a given type, make the key -private and wrap Get() and Set() to accept and return values of a specific -type: - - type key int - - const mykey key = 0 - - // GetMyKey returns a value for this package from the request values. - func GetMyKey(r *http.Request) SomeType { - if rv := context.Get(r, mykey); rv != nil { - return rv.(SomeType) - } - return nil - } - - // SetMyKey sets a value for this package in the request values. - func SetMyKey(r *http.Request, val SomeType) { - context.Set(r, mykey, val) - } - -Variables must be cleared at the end of a request, to remove all values -that were stored. This can be done in an http.Handler, after a request was -served. Just call Clear() passing the request: - - context.Clear(r) - -...or use ClearHandler(), which conveniently wraps an http.Handler to clear -variables at the end of a request lifetime. - -The Routers from the packages gorilla/mux and gorilla/pat call Clear() -so if you are using either of them you don't need to clear the context manually. -*/ -package context diff --git a/vendor/github.com/gorilla/mux/LICENSE b/vendor/github.com/gorilla/mux/LICENSE deleted file mode 100644 index 0e5fb87280..0000000000 --- a/vendor/github.com/gorilla/mux/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 Rodrigo Moraes. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/mux/context_gorilla.go b/vendor/github.com/gorilla/mux/context_gorilla.go deleted file mode 100644 index d7adaa8fad..0000000000 --- a/vendor/github.com/gorilla/mux/context_gorilla.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build !go1.7 - -package mux - -import ( - "net/http" - - "github.com/gorilla/context" -) - -func contextGet(r *http.Request, key interface{}) interface{} { - return context.Get(r, key) -} - -func contextSet(r *http.Request, key, val interface{}) *http.Request { - if val == nil { - return r - } - - context.Set(r, key, val) - return r -} - -func contextClear(r *http.Request) { - context.Clear(r) -} diff --git a/vendor/github.com/gorilla/mux/context_native.go b/vendor/github.com/gorilla/mux/context_native.go deleted file mode 100644 index 209cbea7d6..0000000000 --- a/vendor/github.com/gorilla/mux/context_native.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build go1.7 - -package mux - -import ( - "context" - "net/http" -) - -func contextGet(r *http.Request, key interface{}) interface{} { - return r.Context().Value(key) -} - -func contextSet(r *http.Request, key, val interface{}) *http.Request { - if val == nil { - return r - } - - return r.WithContext(context.WithValue(r.Context(), key, val)) -} - -func contextClear(r *http.Request) { - return -} diff --git a/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/gorilla/mux/doc.go deleted file mode 100644 index 013f088985..0000000000 --- a/vendor/github.com/gorilla/mux/doc.go +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package mux implements a request router and dispatcher. - -The name mux stands for "HTTP request multiplexer". Like the standard -http.ServeMux, mux.Router matches incoming requests against a list of -registered routes and calls a handler for the route that matches the URL -or other conditions. The main features are: - - * Requests can be matched based on URL host, path, path prefix, schemes, - header and query values, HTTP methods or using custom matchers. - * URL hosts, paths and query values can have variables with an optional - regular expression. - * Registered URLs can be built, or "reversed", which helps maintaining - references to resources. - * Routes can be used as subrouters: nested routes are only tested if the - parent route matches. This is useful to define groups of routes that - share common conditions like a host, a path prefix or other repeated - attributes. As a bonus, this optimizes request matching. - * It implements the http.Handler interface so it is compatible with the - standard http.ServeMux. - -Let's start registering a couple of URL paths and handlers: - - func main() { - r := mux.NewRouter() - r.HandleFunc("/", HomeHandler) - r.HandleFunc("/products", ProductsHandler) - r.HandleFunc("/articles", ArticlesHandler) - http.Handle("/", r) - } - -Here we register three routes mapping URL paths to handlers. This is -equivalent to how http.HandleFunc() works: if an incoming request URL matches -one of the paths, the corresponding handler is called passing -(http.ResponseWriter, *http.Request) as parameters. - -Paths can have variables. They are defined using the format {name} or -{name:pattern}. If a regular expression pattern is not defined, the matched -variable will be anything until the next slash. For example: - - r := mux.NewRouter() - r.HandleFunc("/products/{key}", ProductHandler) - r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) - r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) - -Groups can be used inside patterns, as long as they are non-capturing (?:re). For example: - - r.HandleFunc("/articles/{category}/{sort:(?:asc|desc|new)}", ArticlesCategoryHandler) - -The names are used to create a map of route variables which can be retrieved -calling mux.Vars(): - - vars := mux.Vars(request) - category := vars["category"] - -Note that if any capturing groups are present, mux will panic() during parsing. To prevent -this, convert any capturing groups to non-capturing, e.g. change "/{sort:(asc|desc)}" to -"/{sort:(?:asc|desc)}". This is a change from prior versions which behaved unpredictably -when capturing groups were present. - -And this is all you need to know about the basic usage. More advanced options -are explained below. - -Routes can also be restricted to a domain or subdomain. Just define a host -pattern to be matched. They can also have variables: - - r := mux.NewRouter() - // Only matches if domain is "www.example.com". - r.Host("www.example.com") - // Matches a dynamic subdomain. - r.Host("{subdomain:[a-z]+}.domain.com") - -There are several other matchers that can be added. To match path prefixes: - - r.PathPrefix("/products/") - -...or HTTP methods: - - r.Methods("GET", "POST") - -...or URL schemes: - - r.Schemes("https") - -...or header values: - - r.Headers("X-Requested-With", "XMLHttpRequest") - -...or query values: - - r.Queries("key", "value") - -...or to use a custom matcher function: - - r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { - return r.ProtoMajor == 0 - }) - -...and finally, it is possible to combine several matchers in a single route: - - r.HandleFunc("/products", ProductsHandler). - Host("www.example.com"). - Methods("GET"). - Schemes("http") - -Setting the same matching conditions again and again can be boring, so we have -a way to group several routes that share the same requirements. -We call it "subrouting". - -For example, let's say we have several URLs that should only match when the -host is "www.example.com". Create a route for that host and get a "subrouter" -from it: - - r := mux.NewRouter() - s := r.Host("www.example.com").Subrouter() - -Then register routes in the subrouter: - - s.HandleFunc("/products/", ProductsHandler) - s.HandleFunc("/products/{key}", ProductHandler) - s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) - -The three URL paths we registered above will only be tested if the domain is -"www.example.com", because the subrouter is tested first. This is not -only convenient, but also optimizes request matching. You can create -subrouters combining any attribute matchers accepted by a route. - -Subrouters can be used to create domain or path "namespaces": you define -subrouters in a central place and then parts of the app can register its -paths relatively to a given subrouter. - -There's one more thing about subroutes. When a subrouter has a path prefix, -the inner routes use it as base for their paths: - - r := mux.NewRouter() - s := r.PathPrefix("/products").Subrouter() - // "/products/" - s.HandleFunc("/", ProductsHandler) - // "/products/{key}/" - s.HandleFunc("/{key}/", ProductHandler) - // "/products/{key}/details" - s.HandleFunc("/{key}/details", ProductDetailsHandler) - -Note that the path provided to PathPrefix() represents a "wildcard": calling -PathPrefix("/static/").Handler(...) means that the handler will be passed any -request that matches "/static/*". This makes it easy to serve static files with mux: - - func main() { - var dir string - - flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir") - flag.Parse() - r := mux.NewRouter() - - // This will serve files under http://localhost:8000/static/ - r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir)))) - - srv := &http.Server{ - Handler: r, - Addr: "127.0.0.1:8000", - // Good practice: enforce timeouts for servers you create! - WriteTimeout: 15 * time.Second, - ReadTimeout: 15 * time.Second, - } - - log.Fatal(srv.ListenAndServe()) - } - -Now let's see how to build registered URLs. - -Routes can be named. All routes that define a name can have their URLs built, -or "reversed". We define a name calling Name() on a route. For example: - - r := mux.NewRouter() - r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). - Name("article") - -To build a URL, get the route and call the URL() method, passing a sequence of -key/value pairs for the route variables. For the previous route, we would do: - - url, err := r.Get("article").URL("category", "technology", "id", "42") - -...and the result will be a url.URL with the following path: - - "/articles/technology/42" - -This also works for host and query value variables: - - r := mux.NewRouter() - r.Host("{subdomain}.domain.com"). - Path("/articles/{category}/{id:[0-9]+}"). - Queries("filter", "{filter}"). - HandlerFunc(ArticleHandler). - Name("article") - - // url.String() will be "http://news.domain.com/articles/technology/42?filter=gorilla" - url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42", - "filter", "gorilla") - -All variables defined in the route are required, and their values must -conform to the corresponding patterns. These requirements guarantee that a -generated URL will always match a registered route -- the only exception is -for explicitly defined "build-only" routes which never match. - -Regex support also exists for matching Headers within a route. For example, we could do: - - r.HeadersRegexp("Content-Type", "application/(text|json)") - -...and the route will match both requests with a Content-Type of `application/json` as well as -`application/text` - -There's also a way to build only the URL host or path for a route: -use the methods URLHost() or URLPath() instead. For the previous route, -we would do: - - // "http://news.domain.com/" - host, err := r.Get("article").URLHost("subdomain", "news") - - // "/articles/technology/42" - path, err := r.Get("article").URLPath("category", "technology", "id", "42") - -And if you use subrouters, host and path defined separately can be built -as well: - - r := mux.NewRouter() - s := r.Host("{subdomain}.domain.com").Subrouter() - s.Path("/articles/{category}/{id:[0-9]+}"). - HandlerFunc(ArticleHandler). - Name("article") - - // "http://news.domain.com/articles/technology/42" - url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") - -Since **vX.Y.Z**, mux supports the addition of middlewares to a [Router](https://godoc.org/github.com/gorilla/mux#Router), which are executed if a -match is found (including subrouters). Middlewares are defined using the de facto standard type: - - type MiddlewareFunc func(http.Handler) http.Handler - -Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc (closures can access variables from the context where they are created). - -A very basic middleware which logs the URI of the request being handled could be written as: - - func simpleMw(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Do stuff here - log.Println(r.RequestURI) - // Call the next handler, which can be another middleware in the chain, or the final handler. - next.ServeHTTP(w, r) - }) - } - -Middlewares can be added to a router using `Router.Use()`: - - r := mux.NewRouter() - r.HandleFunc("/", handler) - r.AddMiddleware(simpleMw) - -A more complex authentication middleware, which maps session token to users, could be written as: - - // Define our struct - type authenticationMiddleware struct { - tokenUsers map[string]string - } - - // Initialize it somewhere - func (amw *authenticationMiddleware) Populate() { - amw.tokenUsers["00000000"] = "user0" - amw.tokenUsers["aaaaaaaa"] = "userA" - amw.tokenUsers["05f717e5"] = "randomUser" - amw.tokenUsers["deadbeef"] = "user0" - } - - // Middleware function, which will be called for each request - func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - token := r.Header.Get("X-Session-Token") - - if user, found := amw.tokenUsers[token]; found { - // We found the token in our map - log.Printf("Authenticated user %s\n", user) - next.ServeHTTP(w, r) - } else { - http.Error(w, "Forbidden", 403) - } - }) - } - - r := mux.NewRouter() - r.HandleFunc("/", handler) - - amw := authenticationMiddleware{} - amw.Populate() - - r.Use(amw.Middleware) - -Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. - -*/ -package mux diff --git a/vendor/github.com/gorilla/mux/middleware.go b/vendor/github.com/gorilla/mux/middleware.go deleted file mode 100644 index 8f898675ea..0000000000 --- a/vendor/github.com/gorilla/mux/middleware.go +++ /dev/null @@ -1,28 +0,0 @@ -package mux - -import "net/http" - -// MiddlewareFunc is a function which receives an http.Handler and returns another http.Handler. -// Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed -// to it, and then calls the handler passed as parameter to the MiddlewareFunc. -type MiddlewareFunc func(http.Handler) http.Handler - -// middleware interface is anything which implements a MiddlewareFunc named Middleware. -type middleware interface { - Middleware(handler http.Handler) http.Handler -} - -// MiddlewareFunc also implements the middleware interface. -func (mw MiddlewareFunc) Middleware(handler http.Handler) http.Handler { - return mw(handler) -} - -// Use appends a MiddlewareFunc to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router. -func (r *Router) Use(mwf MiddlewareFunc) { - r.middlewares = append(r.middlewares, mwf) -} - -// useInterface appends a middleware to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router. -func (r *Router) useInterface(mw middleware) { - r.middlewares = append(r.middlewares, mw) -} diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go deleted file mode 100644 index efabd24175..0000000000 --- a/vendor/github.com/gorilla/mux/mux.go +++ /dev/null @@ -1,585 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "errors" - "fmt" - "net/http" - "path" - "regexp" -) - -var ( - ErrMethodMismatch = errors.New("method is not allowed") - ErrNotFound = errors.New("no matching route was found") -) - -// NewRouter returns a new router instance. -func NewRouter() *Router { - return &Router{namedRoutes: make(map[string]*Route), KeepContext: false} -} - -// Router registers routes to be matched and dispatches a handler. -// -// It implements the http.Handler interface, so it can be registered to serve -// requests: -// -// var router = mux.NewRouter() -// -// func main() { -// http.Handle("/", router) -// } -// -// Or, for Google App Engine, register it in a init() function: -// -// func init() { -// http.Handle("/", router) -// } -// -// This will send all incoming requests to the router. -type Router struct { - // Configurable Handler to be used when no route matches. - NotFoundHandler http.Handler - - // Configurable Handler to be used when the request method does not match the route. - MethodNotAllowedHandler http.Handler - - // Parent route, if this is a subrouter. - parent parentRoute - // Routes to be matched, in order. - routes []*Route - // Routes by name for URL building. - namedRoutes map[string]*Route - // See Router.StrictSlash(). This defines the flag for new routes. - strictSlash bool - // See Router.SkipClean(). This defines the flag for new routes. - skipClean bool - // If true, do not clear the request context after handling the request. - // This has no effect when go1.7+ is used, since the context is stored - // on the request itself. - KeepContext bool - // see Router.UseEncodedPath(). This defines a flag for all routes. - useEncodedPath bool - // Slice of middlewares to be called after a match is found - middlewares []middleware -} - -// Match attempts to match the given request against the router's registered routes. -// -// If the request matches a route of this router or one of its subrouters the Route, -// Handler, and Vars fields of the the match argument are filled and this function -// returns true. -// -// If the request does not match any of this router's or its subrouters' routes -// then this function returns false. If available, a reason for the match failure -// will be filled in the match argument's MatchErr field. If the match failure type -// (eg: not found) has a registered handler, the handler is assigned to the Handler -// field of the match argument. -func (r *Router) Match(req *http.Request, match *RouteMatch) bool { - for _, route := range r.routes { - if route.Match(req, match) { - // Build middleware chain if no error was found - if match.MatchErr == nil { - for i := len(r.middlewares) - 1; i >= 0; i-- { - match.Handler = r.middlewares[i].Middleware(match.Handler) - } - } - return true - } - } - - if match.MatchErr == ErrMethodMismatch { - if r.MethodNotAllowedHandler != nil { - match.Handler = r.MethodNotAllowedHandler - return true - } else { - return false - } - } - - // Closest match for a router (includes sub-routers) - if r.NotFoundHandler != nil { - match.Handler = r.NotFoundHandler - match.MatchErr = ErrNotFound - return true - } - - match.MatchErr = ErrNotFound - return false -} - -// ServeHTTP dispatches the handler registered in the matched route. -// -// When there is a match, the route variables can be retrieved calling -// mux.Vars(request). -func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { - if !r.skipClean { - path := req.URL.Path - if r.useEncodedPath { - path = req.URL.EscapedPath() - } - // Clean path to canonical form and redirect. - if p := cleanPath(path); p != path { - - // Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query. - // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: - // http://code.google.com/p/go/issues/detail?id=5252 - url := *req.URL - url.Path = p - p = url.String() - - w.Header().Set("Location", p) - w.WriteHeader(http.StatusMovedPermanently) - return - } - } - var match RouteMatch - var handler http.Handler - if r.Match(req, &match) { - handler = match.Handler - req = setVars(req, match.Vars) - req = setCurrentRoute(req, match.Route) - } - - if handler == nil && match.MatchErr == ErrMethodMismatch { - handler = methodNotAllowedHandler() - } - - if handler == nil { - handler = http.NotFoundHandler() - } - - if !r.KeepContext { - defer contextClear(req) - } - - handler.ServeHTTP(w, req) -} - -// Get returns a route registered with the given name. -func (r *Router) Get(name string) *Route { - return r.getNamedRoutes()[name] -} - -// GetRoute returns a route registered with the given name. This method -// was renamed to Get() and remains here for backwards compatibility. -func (r *Router) GetRoute(name string) *Route { - return r.getNamedRoutes()[name] -} - -// StrictSlash defines the trailing slash behavior for new routes. The initial -// value is false. -// -// When true, if the route path is "/path/", accessing "/path" will perform a redirect -// to the former and vice versa. In other words, your application will always -// see the path as specified in the route. -// -// When false, if the route path is "/path", accessing "/path/" will not match -// this route and vice versa. -// -// The re-direct is a HTTP 301 (Moved Permanently). Note that when this is set for -// routes with a non-idempotent method (e.g. POST, PUT), the subsequent re-directed -// request will be made as a GET by most clients. Use middleware or client settings -// to modify this behaviour as needed. -// -// Special case: when a route sets a path prefix using the PathPrefix() method, -// strict slash is ignored for that route because the redirect behavior can't -// be determined from a prefix alone. However, any subrouters created from that -// route inherit the original StrictSlash setting. -func (r *Router) StrictSlash(value bool) *Router { - r.strictSlash = value - return r -} - -// SkipClean defines the path cleaning behaviour for new routes. The initial -// value is false. Users should be careful about which routes are not cleaned -// -// When true, if the route path is "/path//to", it will remain with the double -// slash. This is helpful if you have a route like: /fetch/http://xkcd.com/534/ -// -// When false, the path will be cleaned, so /fetch/http://xkcd.com/534/ will -// become /fetch/http/xkcd.com/534 -func (r *Router) SkipClean(value bool) *Router { - r.skipClean = value - return r -} - -// UseEncodedPath tells the router to match the encoded original path -// to the routes. -// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to". -// -// If not called, the router will match the unencoded path to the routes. -// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to" -func (r *Router) UseEncodedPath() *Router { - r.useEncodedPath = true - return r -} - -// ---------------------------------------------------------------------------- -// parentRoute -// ---------------------------------------------------------------------------- - -func (r *Router) getBuildScheme() string { - if r.parent != nil { - return r.parent.getBuildScheme() - } - return "" -} - -// getNamedRoutes returns the map where named routes are registered. -func (r *Router) getNamedRoutes() map[string]*Route { - if r.namedRoutes == nil { - if r.parent != nil { - r.namedRoutes = r.parent.getNamedRoutes() - } else { - r.namedRoutes = make(map[string]*Route) - } - } - return r.namedRoutes -} - -// getRegexpGroup returns regexp definitions from the parent route, if any. -func (r *Router) getRegexpGroup() *routeRegexpGroup { - if r.parent != nil { - return r.parent.getRegexpGroup() - } - return nil -} - -func (r *Router) buildVars(m map[string]string) map[string]string { - if r.parent != nil { - m = r.parent.buildVars(m) - } - return m -} - -// ---------------------------------------------------------------------------- -// Route factories -// ---------------------------------------------------------------------------- - -// NewRoute registers an empty route. -func (r *Router) NewRoute() *Route { - route := &Route{parent: r, strictSlash: r.strictSlash, skipClean: r.skipClean, useEncodedPath: r.useEncodedPath} - r.routes = append(r.routes, route) - return route -} - -// Handle registers a new route with a matcher for the URL path. -// See Route.Path() and Route.Handler(). -func (r *Router) Handle(path string, handler http.Handler) *Route { - return r.NewRoute().Path(path).Handler(handler) -} - -// HandleFunc registers a new route with a matcher for the URL path. -// See Route.Path() and Route.HandlerFunc(). -func (r *Router) HandleFunc(path string, f func(http.ResponseWriter, - *http.Request)) *Route { - return r.NewRoute().Path(path).HandlerFunc(f) -} - -// Headers registers a new route with a matcher for request header values. -// See Route.Headers(). -func (r *Router) Headers(pairs ...string) *Route { - return r.NewRoute().Headers(pairs...) -} - -// Host registers a new route with a matcher for the URL host. -// See Route.Host(). -func (r *Router) Host(tpl string) *Route { - return r.NewRoute().Host(tpl) -} - -// MatcherFunc registers a new route with a custom matcher function. -// See Route.MatcherFunc(). -func (r *Router) MatcherFunc(f MatcherFunc) *Route { - return r.NewRoute().MatcherFunc(f) -} - -// Methods registers a new route with a matcher for HTTP methods. -// See Route.Methods(). -func (r *Router) Methods(methods ...string) *Route { - return r.NewRoute().Methods(methods...) -} - -// Path registers a new route with a matcher for the URL path. -// See Route.Path(). -func (r *Router) Path(tpl string) *Route { - return r.NewRoute().Path(tpl) -} - -// PathPrefix registers a new route with a matcher for the URL path prefix. -// See Route.PathPrefix(). -func (r *Router) PathPrefix(tpl string) *Route { - return r.NewRoute().PathPrefix(tpl) -} - -// Queries registers a new route with a matcher for URL query values. -// See Route.Queries(). -func (r *Router) Queries(pairs ...string) *Route { - return r.NewRoute().Queries(pairs...) -} - -// Schemes registers a new route with a matcher for URL schemes. -// See Route.Schemes(). -func (r *Router) Schemes(schemes ...string) *Route { - return r.NewRoute().Schemes(schemes...) -} - -// BuildVarsFunc registers a new route with a custom function for modifying -// route variables before building a URL. -func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route { - return r.NewRoute().BuildVarsFunc(f) -} - -// Walk walks the router and all its sub-routers, calling walkFn for each route -// in the tree. The routes are walked in the order they were added. Sub-routers -// are explored depth-first. -func (r *Router) Walk(walkFn WalkFunc) error { - return r.walk(walkFn, []*Route{}) -} - -// SkipRouter is used as a return value from WalkFuncs to indicate that the -// router that walk is about to descend down to should be skipped. -var SkipRouter = errors.New("skip this router") - -// WalkFunc is the type of the function called for each route visited by Walk. -// At every invocation, it is given the current route, and the current router, -// and a list of ancestor routes that lead to the current route. -type WalkFunc func(route *Route, router *Router, ancestors []*Route) error - -func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error { - for _, t := range r.routes { - err := walkFn(t, r, ancestors) - if err == SkipRouter { - continue - } - if err != nil { - return err - } - for _, sr := range t.matchers { - if h, ok := sr.(*Router); ok { - ancestors = append(ancestors, t) - err := h.walk(walkFn, ancestors) - if err != nil { - return err - } - ancestors = ancestors[:len(ancestors)-1] - } - } - if h, ok := t.handler.(*Router); ok { - ancestors = append(ancestors, t) - err := h.walk(walkFn, ancestors) - if err != nil { - return err - } - ancestors = ancestors[:len(ancestors)-1] - } - } - return nil -} - -// ---------------------------------------------------------------------------- -// Context -// ---------------------------------------------------------------------------- - -// RouteMatch stores information about a matched route. -type RouteMatch struct { - Route *Route - Handler http.Handler - Vars map[string]string - - // MatchErr is set to appropriate matching error - // It is set to ErrMethodMismatch if there is a mismatch in - // the request method and route method - MatchErr error -} - -type contextKey int - -const ( - varsKey contextKey = iota - routeKey -) - -// Vars returns the route variables for the current request, if any. -func Vars(r *http.Request) map[string]string { - if rv := contextGet(r, varsKey); rv != nil { - return rv.(map[string]string) - } - return nil -} - -// CurrentRoute returns the matched route for the current request, if any. -// This only works when called inside the handler of the matched route -// because the matched route is stored in the request context which is cleared -// after the handler returns, unless the KeepContext option is set on the -// Router. -func CurrentRoute(r *http.Request) *Route { - if rv := contextGet(r, routeKey); rv != nil { - return rv.(*Route) - } - return nil -} - -func setVars(r *http.Request, val interface{}) *http.Request { - return contextSet(r, varsKey, val) -} - -func setCurrentRoute(r *http.Request, val interface{}) *http.Request { - return contextSet(r, routeKey, val) -} - -// ---------------------------------------------------------------------------- -// Helpers -// ---------------------------------------------------------------------------- - -// cleanPath returns the canonical path for p, eliminating . and .. elements. -// Borrowed from the net/http package. -func cleanPath(p string) string { - if p == "" { - return "/" - } - if p[0] != '/' { - p = "/" + p - } - np := path.Clean(p) - // path.Clean removes trailing slash except for root; - // put the trailing slash back if necessary. - if p[len(p)-1] == '/' && np != "/" { - np += "/" - } - - return np -} - -// uniqueVars returns an error if two slices contain duplicated strings. -func uniqueVars(s1, s2 []string) error { - for _, v1 := range s1 { - for _, v2 := range s2 { - if v1 == v2 { - return fmt.Errorf("mux: duplicated route variable %q", v2) - } - } - } - return nil -} - -// checkPairs returns the count of strings passed in, and an error if -// the count is not an even number. -func checkPairs(pairs ...string) (int, error) { - length := len(pairs) - if length%2 != 0 { - return length, fmt.Errorf( - "mux: number of parameters must be multiple of 2, got %v", pairs) - } - return length, nil -} - -// mapFromPairsToString converts variadic string parameters to a -// string to string map. -func mapFromPairsToString(pairs ...string) (map[string]string, error) { - length, err := checkPairs(pairs...) - if err != nil { - return nil, err - } - m := make(map[string]string, length/2) - for i := 0; i < length; i += 2 { - m[pairs[i]] = pairs[i+1] - } - return m, nil -} - -// mapFromPairsToRegex converts variadic string parameters to a -// string to regex map. -func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) { - length, err := checkPairs(pairs...) - if err != nil { - return nil, err - } - m := make(map[string]*regexp.Regexp, length/2) - for i := 0; i < length; i += 2 { - regex, err := regexp.Compile(pairs[i+1]) - if err != nil { - return nil, err - } - m[pairs[i]] = regex - } - return m, nil -} - -// matchInArray returns true if the given string value is in the array. -func matchInArray(arr []string, value string) bool { - for _, v := range arr { - if v == value { - return true - } - } - return false -} - -// matchMapWithString returns true if the given key/value pairs exist in a given map. -func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool { - for k, v := range toCheck { - // Check if key exists. - if canonicalKey { - k = http.CanonicalHeaderKey(k) - } - if values := toMatch[k]; values == nil { - return false - } else if v != "" { - // If value was defined as an empty string we only check that the - // key exists. Otherwise we also check for equality. - valueExists := false - for _, value := range values { - if v == value { - valueExists = true - break - } - } - if !valueExists { - return false - } - } - } - return true -} - -// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against -// the given regex -func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool { - for k, v := range toCheck { - // Check if key exists. - if canonicalKey { - k = http.CanonicalHeaderKey(k) - } - if values := toMatch[k]; values == nil { - return false - } else if v != nil { - // If value was defined as an empty string we only check that the - // key exists. Otherwise we also check for equality. - valueExists := false - for _, value := range values { - if v.MatchString(value) { - valueExists = true - break - } - } - if !valueExists { - return false - } - } - } - return true -} - -// methodNotAllowed replies to the request with an HTTP status code 405. -func methodNotAllowed(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusMethodNotAllowed) -} - -// methodNotAllowedHandler returns a simple request handler -// that replies to each request with a status code 405. -func methodNotAllowedHandler() http.Handler { return http.HandlerFunc(methodNotAllowed) } diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go deleted file mode 100644 index 2b57e5627d..0000000000 --- a/vendor/github.com/gorilla/mux/regexp.go +++ /dev/null @@ -1,332 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "bytes" - "fmt" - "net/http" - "net/url" - "regexp" - "strconv" - "strings" -) - -type routeRegexpOptions struct { - strictSlash bool - useEncodedPath bool -} - -type regexpType int - -const ( - regexpTypePath regexpType = 0 - regexpTypeHost regexpType = 1 - regexpTypePrefix regexpType = 2 - regexpTypeQuery regexpType = 3 -) - -// newRouteRegexp parses a route template and returns a routeRegexp, -// used to match a host, a path or a query string. -// -// It will extract named variables, assemble a regexp to be matched, create -// a "reverse" template to build URLs and compile regexps to validate variable -// values used in URL building. -// -// Previously we accepted only Python-like identifiers for variable -// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that -// name and pattern can't be empty, and names can't contain a colon. -func newRouteRegexp(tpl string, typ regexpType, options routeRegexpOptions) (*routeRegexp, error) { - // Check if it is well-formed. - idxs, errBraces := braceIndices(tpl) - if errBraces != nil { - return nil, errBraces - } - // Backup the original. - template := tpl - // Now let's parse it. - defaultPattern := "[^/]+" - if typ == regexpTypeQuery { - defaultPattern = ".*" - } else if typ == regexpTypeHost { - defaultPattern = "[^.]+" - } - // Only match strict slash if not matching - if typ != regexpTypePath { - options.strictSlash = false - } - // Set a flag for strictSlash. - endSlash := false - if options.strictSlash && strings.HasSuffix(tpl, "/") { - tpl = tpl[:len(tpl)-1] - endSlash = true - } - varsN := make([]string, len(idxs)/2) - varsR := make([]*regexp.Regexp, len(idxs)/2) - pattern := bytes.NewBufferString("") - pattern.WriteByte('^') - reverse := bytes.NewBufferString("") - var end int - var err error - for i := 0; i < len(idxs); i += 2 { - // Set all values we are interested in. - raw := tpl[end:idxs[i]] - end = idxs[i+1] - parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2) - name := parts[0] - patt := defaultPattern - if len(parts) == 2 { - patt = parts[1] - } - // Name or pattern can't be empty. - if name == "" || patt == "" { - return nil, fmt.Errorf("mux: missing name or pattern in %q", - tpl[idxs[i]:end]) - } - // Build the regexp pattern. - fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(i/2), patt) - - // Build the reverse template. - fmt.Fprintf(reverse, "%s%%s", raw) - - // Append variable name and compiled pattern. - varsN[i/2] = name - varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) - if err != nil { - return nil, err - } - } - // Add the remaining. - raw := tpl[end:] - pattern.WriteString(regexp.QuoteMeta(raw)) - if options.strictSlash { - pattern.WriteString("[/]?") - } - if typ == regexpTypeQuery { - // Add the default pattern if the query value is empty - if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" { - pattern.WriteString(defaultPattern) - } - } - if typ != regexpTypePrefix { - pattern.WriteByte('$') - } - reverse.WriteString(raw) - if endSlash { - reverse.WriteByte('/') - } - // Compile full regexp. - reg, errCompile := regexp.Compile(pattern.String()) - if errCompile != nil { - return nil, errCompile - } - - // Check for capturing groups which used to work in older versions - if reg.NumSubexp() != len(idxs)/2 { - panic(fmt.Sprintf("route %s contains capture groups in its regexp. ", template) + - "Only non-capturing groups are accepted: e.g. (?:pattern) instead of (pattern)") - } - - // Done! - return &routeRegexp{ - template: template, - regexpType: typ, - options: options, - regexp: reg, - reverse: reverse.String(), - varsN: varsN, - varsR: varsR, - }, nil -} - -// routeRegexp stores a regexp to match a host or path and information to -// collect and validate route variables. -type routeRegexp struct { - // The unmodified template. - template string - // The type of match - regexpType regexpType - // Options for matching - options routeRegexpOptions - // Expanded regexp. - regexp *regexp.Regexp - // Reverse template. - reverse string - // Variable names. - varsN []string - // Variable regexps (validators). - varsR []*regexp.Regexp -} - -// Match matches the regexp against the URL host or path. -func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { - if r.regexpType != regexpTypeHost { - if r.regexpType == regexpTypeQuery { - return r.matchQueryString(req) - } - path := req.URL.Path - if r.options.useEncodedPath { - path = req.URL.EscapedPath() - } - return r.regexp.MatchString(path) - } - - return r.regexp.MatchString(getHost(req)) -} - -// url builds a URL part using the given values. -func (r *routeRegexp) url(values map[string]string) (string, error) { - urlValues := make([]interface{}, len(r.varsN)) - for k, v := range r.varsN { - value, ok := values[v] - if !ok { - return "", fmt.Errorf("mux: missing route variable %q", v) - } - if r.regexpType == regexpTypeQuery { - value = url.QueryEscape(value) - } - urlValues[k] = value - } - rv := fmt.Sprintf(r.reverse, urlValues...) - if !r.regexp.MatchString(rv) { - // The URL is checked against the full regexp, instead of checking - // individual variables. This is faster but to provide a good error - // message, we check individual regexps if the URL doesn't match. - for k, v := range r.varsN { - if !r.varsR[k].MatchString(values[v]) { - return "", fmt.Errorf( - "mux: variable %q doesn't match, expected %q", values[v], - r.varsR[k].String()) - } - } - } - return rv, nil -} - -// getURLQuery returns a single query parameter from a request URL. -// For a URL with foo=bar&baz=ding, we return only the relevant key -// value pair for the routeRegexp. -func (r *routeRegexp) getURLQuery(req *http.Request) string { - if r.regexpType != regexpTypeQuery { - return "" - } - templateKey := strings.SplitN(r.template, "=", 2)[0] - for key, vals := range req.URL.Query() { - if key == templateKey && len(vals) > 0 { - return key + "=" + vals[0] - } - } - return "" -} - -func (r *routeRegexp) matchQueryString(req *http.Request) bool { - return r.regexp.MatchString(r.getURLQuery(req)) -} - -// braceIndices returns the first level curly brace indices from a string. -// It returns an error in case of unbalanced braces. -func braceIndices(s string) ([]int, error) { - var level, idx int - var idxs []int - for i := 0; i < len(s); i++ { - switch s[i] { - case '{': - if level++; level == 1 { - idx = i - } - case '}': - if level--; level == 0 { - idxs = append(idxs, idx, i+1) - } else if level < 0 { - return nil, fmt.Errorf("mux: unbalanced braces in %q", s) - } - } - } - if level != 0 { - return nil, fmt.Errorf("mux: unbalanced braces in %q", s) - } - return idxs, nil -} - -// varGroupName builds a capturing group name for the indexed variable. -func varGroupName(idx int) string { - return "v" + strconv.Itoa(idx) -} - -// ---------------------------------------------------------------------------- -// routeRegexpGroup -// ---------------------------------------------------------------------------- - -// routeRegexpGroup groups the route matchers that carry variables. -type routeRegexpGroup struct { - host *routeRegexp - path *routeRegexp - queries []*routeRegexp -} - -// setMatch extracts the variables from the URL once a route matches. -func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { - // Store host variables. - if v.host != nil { - host := getHost(req) - matches := v.host.regexp.FindStringSubmatchIndex(host) - if len(matches) > 0 { - extractVars(host, matches, v.host.varsN, m.Vars) - } - } - path := req.URL.Path - if r.useEncodedPath { - path = req.URL.EscapedPath() - } - // Store path variables. - if v.path != nil { - matches := v.path.regexp.FindStringSubmatchIndex(path) - if len(matches) > 0 { - extractVars(path, matches, v.path.varsN, m.Vars) - // Check if we should redirect. - if v.path.options.strictSlash { - p1 := strings.HasSuffix(path, "/") - p2 := strings.HasSuffix(v.path.template, "/") - if p1 != p2 { - u, _ := url.Parse(req.URL.String()) - if p1 { - u.Path = u.Path[:len(u.Path)-1] - } else { - u.Path += "/" - } - m.Handler = http.RedirectHandler(u.String(), 301) - } - } - } - } - // Store query string variables. - for _, q := range v.queries { - queryURL := q.getURLQuery(req) - matches := q.regexp.FindStringSubmatchIndex(queryURL) - if len(matches) > 0 { - extractVars(queryURL, matches, q.varsN, m.Vars) - } - } -} - -// getHost tries its best to return the request host. -func getHost(r *http.Request) string { - if r.URL.IsAbs() { - return r.URL.Host - } - host := r.Host - // Slice off any port information. - if i := strings.Index(host, ":"); i != -1 { - host = host[:i] - } - return host - -} - -func extractVars(input string, matches []int, names []string, output map[string]string) { - for i, name := range names { - output[name] = input[matches[2*i+2]:matches[2*i+3]] - } -} diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go deleted file mode 100644 index 4ce098d4fb..0000000000 --- a/vendor/github.com/gorilla/mux/route.go +++ /dev/null @@ -1,761 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "errors" - "fmt" - "net/http" - "net/url" - "regexp" - "strings" -) - -// Route stores information to match a request and build URLs. -type Route struct { - // Parent where the route was registered (a Router). - parent parentRoute - // Request handler for the route. - handler http.Handler - // List of matchers. - matchers []matcher - // Manager for the variables from host and path. - regexp *routeRegexpGroup - // If true, when the path pattern is "/path/", accessing "/path" will - // redirect to the former and vice versa. - strictSlash bool - // If true, when the path pattern is "/path//to", accessing "/path//to" - // will not redirect - skipClean bool - // If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to" - useEncodedPath bool - // The scheme used when building URLs. - buildScheme string - // If true, this route never matches: it is only used to build URLs. - buildOnly bool - // The name used to build URLs. - name string - // Error resulted from building a route. - err error - - buildVarsFunc BuildVarsFunc -} - -func (r *Route) SkipClean() bool { - return r.skipClean -} - -// Match matches the route against the request. -func (r *Route) Match(req *http.Request, match *RouteMatch) bool { - if r.buildOnly || r.err != nil { - return false - } - - var matchErr error - - // Match everything. - for _, m := range r.matchers { - if matched := m.Match(req, match); !matched { - if _, ok := m.(methodMatcher); ok { - matchErr = ErrMethodMismatch - continue - } - matchErr = nil - return false - } - } - - if matchErr != nil { - match.MatchErr = matchErr - return false - } - - if match.MatchErr == ErrMethodMismatch { - // We found a route which matches request method, clear MatchErr - match.MatchErr = nil - // Then override the mis-matched handler - match.Handler = r.handler - } - - // Yay, we have a match. Let's collect some info about it. - if match.Route == nil { - match.Route = r - } - if match.Handler == nil { - match.Handler = r.handler - } - if match.Vars == nil { - match.Vars = make(map[string]string) - } - - // Set variables. - if r.regexp != nil { - r.regexp.setMatch(req, match, r) - } - return true -} - -// ---------------------------------------------------------------------------- -// Route attributes -// ---------------------------------------------------------------------------- - -// GetError returns an error resulted from building the route, if any. -func (r *Route) GetError() error { - return r.err -} - -// BuildOnly sets the route to never match: it is only used to build URLs. -func (r *Route) BuildOnly() *Route { - r.buildOnly = true - return r -} - -// Handler -------------------------------------------------------------------- - -// Handler sets a handler for the route. -func (r *Route) Handler(handler http.Handler) *Route { - if r.err == nil { - r.handler = handler - } - return r -} - -// HandlerFunc sets a handler function for the route. -func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route { - return r.Handler(http.HandlerFunc(f)) -} - -// GetHandler returns the handler for the route, if any. -func (r *Route) GetHandler() http.Handler { - return r.handler -} - -// Name ----------------------------------------------------------------------- - -// Name sets the name for the route, used to build URLs. -// If the name was registered already it will be overwritten. -func (r *Route) Name(name string) *Route { - if r.name != "" { - r.err = fmt.Errorf("mux: route already has name %q, can't set %q", - r.name, name) - } - if r.err == nil { - r.name = name - r.getNamedRoutes()[name] = r - } - return r -} - -// GetName returns the name for the route, if any. -func (r *Route) GetName() string { - return r.name -} - -// ---------------------------------------------------------------------------- -// Matchers -// ---------------------------------------------------------------------------- - -// matcher types try to match a request. -type matcher interface { - Match(*http.Request, *RouteMatch) bool -} - -// addMatcher adds a matcher to the route. -func (r *Route) addMatcher(m matcher) *Route { - if r.err == nil { - r.matchers = append(r.matchers, m) - } - return r -} - -// addRegexpMatcher adds a host or path matcher and builder to a route. -func (r *Route) addRegexpMatcher(tpl string, typ regexpType) error { - if r.err != nil { - return r.err - } - r.regexp = r.getRegexpGroup() - if typ == regexpTypePath || typ == regexpTypePrefix { - if len(tpl) > 0 && tpl[0] != '/' { - return fmt.Errorf("mux: path must start with a slash, got %q", tpl) - } - if r.regexp.path != nil { - tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl - } - } - rr, err := newRouteRegexp(tpl, typ, routeRegexpOptions{ - strictSlash: r.strictSlash, - useEncodedPath: r.useEncodedPath, - }) - if err != nil { - return err - } - for _, q := range r.regexp.queries { - if err = uniqueVars(rr.varsN, q.varsN); err != nil { - return err - } - } - if typ == regexpTypeHost { - if r.regexp.path != nil { - if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil { - return err - } - } - r.regexp.host = rr - } else { - if r.regexp.host != nil { - if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil { - return err - } - } - if typ == regexpTypeQuery { - r.regexp.queries = append(r.regexp.queries, rr) - } else { - r.regexp.path = rr - } - } - r.addMatcher(rr) - return nil -} - -// Headers -------------------------------------------------------------------- - -// headerMatcher matches the request against header values. -type headerMatcher map[string]string - -func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchMapWithString(m, r.Header, true) -} - -// Headers adds a matcher for request header values. -// It accepts a sequence of key/value pairs to be matched. For example: -// -// r := mux.NewRouter() -// r.Headers("Content-Type", "application/json", -// "X-Requested-With", "XMLHttpRequest") -// -// The above route will only match if both request header values match. -// If the value is an empty string, it will match any value if the key is set. -func (r *Route) Headers(pairs ...string) *Route { - if r.err == nil { - var headers map[string]string - headers, r.err = mapFromPairsToString(pairs...) - return r.addMatcher(headerMatcher(headers)) - } - return r -} - -// headerRegexMatcher matches the request against the route given a regex for the header -type headerRegexMatcher map[string]*regexp.Regexp - -func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchMapWithRegex(m, r.Header, true) -} - -// HeadersRegexp accepts a sequence of key/value pairs, where the value has regex -// support. For example: -// -// r := mux.NewRouter() -// r.HeadersRegexp("Content-Type", "application/(text|json)", -// "X-Requested-With", "XMLHttpRequest") -// -// The above route will only match if both the request header matches both regular expressions. -// If the value is an empty string, it will match any value if the key is set. -// Use the start and end of string anchors (^ and $) to match an exact value. -func (r *Route) HeadersRegexp(pairs ...string) *Route { - if r.err == nil { - var headers map[string]*regexp.Regexp - headers, r.err = mapFromPairsToRegex(pairs...) - return r.addMatcher(headerRegexMatcher(headers)) - } - return r -} - -// Host ----------------------------------------------------------------------- - -// Host adds a matcher for the URL host. -// It accepts a template with zero or more URL variables enclosed by {}. -// Variables can define an optional regexp pattern to be matched: -// -// - {name} matches anything until the next dot. -// -// - {name:pattern} matches the given regexp pattern. -// -// For example: -// -// r := mux.NewRouter() -// r.Host("www.example.com") -// r.Host("{subdomain}.domain.com") -// r.Host("{subdomain:[a-z]+}.domain.com") -// -// Variable names must be unique in a given route. They can be retrieved -// calling mux.Vars(request). -func (r *Route) Host(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, regexpTypeHost) - return r -} - -// MatcherFunc ---------------------------------------------------------------- - -// MatcherFunc is the function signature used by custom matchers. -type MatcherFunc func(*http.Request, *RouteMatch) bool - -// Match returns the match for a given request. -func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool { - return m(r, match) -} - -// MatcherFunc adds a custom function to be used as request matcher. -func (r *Route) MatcherFunc(f MatcherFunc) *Route { - return r.addMatcher(f) -} - -// Methods -------------------------------------------------------------------- - -// methodMatcher matches the request against HTTP methods. -type methodMatcher []string - -func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchInArray(m, r.Method) -} - -// Methods adds a matcher for HTTP methods. -// It accepts a sequence of one or more methods to be matched, e.g.: -// "GET", "POST", "PUT". -func (r *Route) Methods(methods ...string) *Route { - for k, v := range methods { - methods[k] = strings.ToUpper(v) - } - return r.addMatcher(methodMatcher(methods)) -} - -// Path ----------------------------------------------------------------------- - -// Path adds a matcher for the URL path. -// It accepts a template with zero or more URL variables enclosed by {}. The -// template must start with a "/". -// Variables can define an optional regexp pattern to be matched: -// -// - {name} matches anything until the next slash. -// -// - {name:pattern} matches the given regexp pattern. -// -// For example: -// -// r := mux.NewRouter() -// r.Path("/products/").Handler(ProductsHandler) -// r.Path("/products/{key}").Handler(ProductsHandler) -// r.Path("/articles/{category}/{id:[0-9]+}"). -// Handler(ArticleHandler) -// -// Variable names must be unique in a given route. They can be retrieved -// calling mux.Vars(request). -func (r *Route) Path(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, regexpTypePath) - return r -} - -// PathPrefix ----------------------------------------------------------------- - -// PathPrefix adds a matcher for the URL path prefix. This matches if the given -// template is a prefix of the full URL path. See Route.Path() for details on -// the tpl argument. -// -// Note that it does not treat slashes specially ("/foobar/" will be matched by -// the prefix "/foo") so you may want to use a trailing slash here. -// -// Also note that the setting of Router.StrictSlash() has no effect on routes -// with a PathPrefix matcher. -func (r *Route) PathPrefix(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, regexpTypePrefix) - return r -} - -// Query ---------------------------------------------------------------------- - -// Queries adds a matcher for URL query values. -// It accepts a sequence of key/value pairs. Values may define variables. -// For example: -// -// r := mux.NewRouter() -// r.Queries("foo", "bar", "id", "{id:[0-9]+}") -// -// The above route will only match if the URL contains the defined queries -// values, e.g.: ?foo=bar&id=42. -// -// It the value is an empty string, it will match any value if the key is set. -// -// Variables can define an optional regexp pattern to be matched: -// -// - {name} matches anything until the next slash. -// -// - {name:pattern} matches the given regexp pattern. -func (r *Route) Queries(pairs ...string) *Route { - length := len(pairs) - if length%2 != 0 { - r.err = fmt.Errorf( - "mux: number of parameters must be multiple of 2, got %v", pairs) - return nil - } - for i := 0; i < length; i += 2 { - if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], regexpTypeQuery); r.err != nil { - return r - } - } - - return r -} - -// Schemes -------------------------------------------------------------------- - -// schemeMatcher matches the request against URL schemes. -type schemeMatcher []string - -func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchInArray(m, r.URL.Scheme) -} - -// Schemes adds a matcher for URL schemes. -// It accepts a sequence of schemes to be matched, e.g.: "http", "https". -func (r *Route) Schemes(schemes ...string) *Route { - for k, v := range schemes { - schemes[k] = strings.ToLower(v) - } - if r.buildScheme == "" && len(schemes) > 0 { - r.buildScheme = schemes[0] - } - return r.addMatcher(schemeMatcher(schemes)) -} - -// BuildVarsFunc -------------------------------------------------------------- - -// BuildVarsFunc is the function signature used by custom build variable -// functions (which can modify route variables before a route's URL is built). -type BuildVarsFunc func(map[string]string) map[string]string - -// BuildVarsFunc adds a custom function to be used to modify build variables -// before a route's URL is built. -func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { - r.buildVarsFunc = f - return r -} - -// Subrouter ------------------------------------------------------------------ - -// Subrouter creates a subrouter for the route. -// -// It will test the inner routes only if the parent route matched. For example: -// -// r := mux.NewRouter() -// s := r.Host("www.example.com").Subrouter() -// s.HandleFunc("/products/", ProductsHandler) -// s.HandleFunc("/products/{key}", ProductHandler) -// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) -// -// Here, the routes registered in the subrouter won't be tested if the host -// doesn't match. -func (r *Route) Subrouter() *Router { - router := &Router{parent: r, strictSlash: r.strictSlash} - r.addMatcher(router) - return router -} - -// ---------------------------------------------------------------------------- -// URL building -// ---------------------------------------------------------------------------- - -// URL builds a URL for the route. -// -// It accepts a sequence of key/value pairs for the route variables. For -// example, given this route: -// -// r := mux.NewRouter() -// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Name("article") -// -// ...a URL for it can be built using: -// -// url, err := r.Get("article").URL("category", "technology", "id", "42") -// -// ...which will return an url.URL with the following path: -// -// "/articles/technology/42" -// -// This also works for host variables: -// -// r := mux.NewRouter() -// r.Host("{subdomain}.domain.com"). -// HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Name("article") -// -// // url.String() will be "http://news.domain.com/articles/technology/42" -// url, err := r.Get("article").URL("subdomain", "news", -// "category", "technology", -// "id", "42") -// -// All variables defined in the route are required, and their values must -// conform to the corresponding patterns. -func (r *Route) URL(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp == nil { - return nil, errors.New("mux: route doesn't have a host or path") - } - values, err := r.prepareVars(pairs...) - if err != nil { - return nil, err - } - var scheme, host, path string - queries := make([]string, 0, len(r.regexp.queries)) - if r.regexp.host != nil { - if host, err = r.regexp.host.url(values); err != nil { - return nil, err - } - scheme = "http" - if s := r.getBuildScheme(); s != "" { - scheme = s - } - } - if r.regexp.path != nil { - if path, err = r.regexp.path.url(values); err != nil { - return nil, err - } - } - for _, q := range r.regexp.queries { - var query string - if query, err = q.url(values); err != nil { - return nil, err - } - queries = append(queries, query) - } - return &url.URL{ - Scheme: scheme, - Host: host, - Path: path, - RawQuery: strings.Join(queries, "&"), - }, nil -} - -// URLHost builds the host part of the URL for a route. See Route.URL(). -// -// The route must have a host defined. -func (r *Route) URLHost(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp == nil || r.regexp.host == nil { - return nil, errors.New("mux: route doesn't have a host") - } - values, err := r.prepareVars(pairs...) - if err != nil { - return nil, err - } - host, err := r.regexp.host.url(values) - if err != nil { - return nil, err - } - u := &url.URL{ - Scheme: "http", - Host: host, - } - if s := r.getBuildScheme(); s != "" { - u.Scheme = s - } - return u, nil -} - -// URLPath builds the path part of the URL for a route. See Route.URL(). -// -// The route must have a path defined. -func (r *Route) URLPath(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp == nil || r.regexp.path == nil { - return nil, errors.New("mux: route doesn't have a path") - } - values, err := r.prepareVars(pairs...) - if err != nil { - return nil, err - } - path, err := r.regexp.path.url(values) - if err != nil { - return nil, err - } - return &url.URL{ - Path: path, - }, nil -} - -// GetPathTemplate returns the template used to build the -// route match. -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if the route does not define a path. -func (r *Route) GetPathTemplate() (string, error) { - if r.err != nil { - return "", r.err - } - if r.regexp == nil || r.regexp.path == nil { - return "", errors.New("mux: route doesn't have a path") - } - return r.regexp.path.template, nil -} - -// GetPathRegexp returns the expanded regular expression used to match route path. -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if the route does not define a path. -func (r *Route) GetPathRegexp() (string, error) { - if r.err != nil { - return "", r.err - } - if r.regexp == nil || r.regexp.path == nil { - return "", errors.New("mux: route does not have a path") - } - return r.regexp.path.regexp.String(), nil -} - -// GetQueriesRegexp returns the expanded regular expressions used to match the -// route queries. -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An empty list will be returned if the route does not have queries. -func (r *Route) GetQueriesRegexp() ([]string, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp == nil || r.regexp.queries == nil { - return nil, errors.New("mux: route doesn't have queries") - } - var queries []string - for _, query := range r.regexp.queries { - queries = append(queries, query.regexp.String()) - } - return queries, nil -} - -// GetQueriesTemplates returns the templates used to build the -// query matching. -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An empty list will be returned if the route does not define queries. -func (r *Route) GetQueriesTemplates() ([]string, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp == nil || r.regexp.queries == nil { - return nil, errors.New("mux: route doesn't have queries") - } - var queries []string - for _, query := range r.regexp.queries { - queries = append(queries, query.template) - } - return queries, nil -} - -// GetMethods returns the methods the route matches against -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An empty list will be returned if route does not have methods. -func (r *Route) GetMethods() ([]string, error) { - if r.err != nil { - return nil, r.err - } - for _, m := range r.matchers { - if methods, ok := m.(methodMatcher); ok { - return []string(methods), nil - } - } - return nil, nil -} - -// GetHostTemplate returns the template used to build the -// route match. -// This is useful for building simple REST API documentation and for instrumentation -// against third-party services. -// An error will be returned if the route does not define a host. -func (r *Route) GetHostTemplate() (string, error) { - if r.err != nil { - return "", r.err - } - if r.regexp == nil || r.regexp.host == nil { - return "", errors.New("mux: route doesn't have a host") - } - return r.regexp.host.template, nil -} - -// prepareVars converts the route variable pairs into a map. If the route has a -// BuildVarsFunc, it is invoked. -func (r *Route) prepareVars(pairs ...string) (map[string]string, error) { - m, err := mapFromPairsToString(pairs...) - if err != nil { - return nil, err - } - return r.buildVars(m), nil -} - -func (r *Route) buildVars(m map[string]string) map[string]string { - if r.parent != nil { - m = r.parent.buildVars(m) - } - if r.buildVarsFunc != nil { - m = r.buildVarsFunc(m) - } - return m -} - -// ---------------------------------------------------------------------------- -// parentRoute -// ---------------------------------------------------------------------------- - -// parentRoute allows routes to know about parent host and path definitions. -type parentRoute interface { - getBuildScheme() string - getNamedRoutes() map[string]*Route - getRegexpGroup() *routeRegexpGroup - buildVars(map[string]string) map[string]string -} - -func (r *Route) getBuildScheme() string { - if r.buildScheme != "" { - return r.buildScheme - } - if r.parent != nil { - return r.parent.getBuildScheme() - } - return "" -} - -// getNamedRoutes returns the map where named routes are registered. -func (r *Route) getNamedRoutes() map[string]*Route { - if r.parent == nil { - // During tests router is not always set. - r.parent = NewRouter() - } - return r.parent.getNamedRoutes() -} - -// getRegexpGroup returns regexp definitions from this route. -func (r *Route) getRegexpGroup() *routeRegexpGroup { - if r.regexp == nil { - if r.parent == nil { - // During tests router is not always set. - r.parent = NewRouter() - } - regexp := r.parent.getRegexpGroup() - if regexp == nil { - r.regexp = new(routeRegexpGroup) - } else { - // Copy. - r.regexp = &routeRegexpGroup{ - host: regexp.host, - path: regexp.path, - queries: regexp.queries, - } - } - } - return r.regexp -} diff --git a/vendor/github.com/gorilla/mux/test_helpers.go b/vendor/github.com/gorilla/mux/test_helpers.go deleted file mode 100644 index 8b2c4a4c58..0000000000 --- a/vendor/github.com/gorilla/mux/test_helpers.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import "net/http" - -// SetURLVars sets the URL variables for the given request, to be accessed via -// mux.Vars for testing route behaviour. -// -// This API should only be used for testing purposes; it provides a way to -// inject variables into the request context. Alternatively, URL variables -// can be set by making a route that captures the required variables, -// starting a server and sending the request to that server. -func SetURLVars(r *http.Request, val map[string]string) *http.Request { - return setVars(r, val) -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/LICENSE b/vendor/github.com/grpc-ecosystem/grpc-opentracing/LICENSE new file mode 100644 index 0000000000..abe5fe170b --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-opentracing/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2016, gRPC Ecosystem +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of grpc-opentracing nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/PATENTS b/vendor/github.com/grpc-ecosystem/grpc-opentracing/PATENTS new file mode 100644 index 0000000000..5cfe0175ee --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-opentracing/PATENTS @@ -0,0 +1,23 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the GRPC project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of GRPC, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of GRPC. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of GRPC or any code incorporated within this +implementation of GRPC constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of GRPC +shall terminate as of the date such litigation is filed. +Status API Training Shop Blog About diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/client.go b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/client.go new file mode 100644 index 0000000000..3414e55cb1 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/client.go @@ -0,0 +1,239 @@ +package otgrpc + +import ( + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "github.com/opentracing/opentracing-go/log" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + "io" + "runtime" + "sync/atomic" +) + +// OpenTracingClientInterceptor returns a grpc.UnaryClientInterceptor suitable +// for use in a grpc.Dial call. +// +// For example: +// +// conn, err := grpc.Dial( +// address, +// ..., // (existing DialOptions) +// grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(tracer))) +// +// All gRPC client spans will inject the OpenTracing SpanContext into the gRPC +// metadata; they will also look in the context.Context for an active +// in-process parent Span and establish a ChildOf reference if such a parent +// Span could be found. +func OpenTracingClientInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.UnaryClientInterceptor { + otgrpcOpts := newOptions() + otgrpcOpts.apply(optFuncs...) + return func( + ctx context.Context, + method string, + req, resp interface{}, + cc *grpc.ClientConn, + invoker grpc.UnaryInvoker, + opts ...grpc.CallOption, + ) error { + var err error + var parentCtx opentracing.SpanContext + if parent := opentracing.SpanFromContext(ctx); parent != nil { + parentCtx = parent.Context() + } + if otgrpcOpts.inclusionFunc != nil && + !otgrpcOpts.inclusionFunc(parentCtx, method, req, resp) { + return invoker(ctx, method, req, resp, cc, opts...) + } + clientSpan := tracer.StartSpan( + method, + opentracing.ChildOf(parentCtx), + ext.SpanKindRPCClient, + gRPCComponentTag, + ) + defer clientSpan.Finish() + ctx = injectSpanContext(ctx, tracer, clientSpan) + if otgrpcOpts.logPayloads { + clientSpan.LogFields(log.Object("gRPC request", req)) + } + err = invoker(ctx, method, req, resp, cc, opts...) + if err == nil { + if otgrpcOpts.logPayloads { + clientSpan.LogFields(log.Object("gRPC response", resp)) + } + } else { + SetSpanTags(clientSpan, err, true) + clientSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) + } + if otgrpcOpts.decorator != nil { + otgrpcOpts.decorator(clientSpan, method, req, resp, err) + } + return err + } +} + +// OpenTracingStreamClientInterceptor returns a grpc.StreamClientInterceptor suitable +// for use in a grpc.Dial call. The interceptor instruments streaming RPCs by creating +// a single span to correspond to the lifetime of the RPC's stream. +// +// For example: +// +// conn, err := grpc.Dial( +// address, +// ..., // (existing DialOptions) +// grpc.WithStreamInterceptor(otgrpc.OpenTracingStreamClientInterceptor(tracer))) +// +// All gRPC client spans will inject the OpenTracing SpanContext into the gRPC +// metadata; they will also look in the context.Context for an active +// in-process parent Span and establish a ChildOf reference if such a parent +// Span could be found. +func OpenTracingStreamClientInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.StreamClientInterceptor { + otgrpcOpts := newOptions() + otgrpcOpts.apply(optFuncs...) + return func( + ctx context.Context, + desc *grpc.StreamDesc, + cc *grpc.ClientConn, + method string, + streamer grpc.Streamer, + opts ...grpc.CallOption, + ) (grpc.ClientStream, error) { + var err error + var parentCtx opentracing.SpanContext + if parent := opentracing.SpanFromContext(ctx); parent != nil { + parentCtx = parent.Context() + } + if otgrpcOpts.inclusionFunc != nil && + !otgrpcOpts.inclusionFunc(parentCtx, method, nil, nil) { + return streamer(ctx, desc, cc, method, opts...) + } + + clientSpan := tracer.StartSpan( + method, + opentracing.ChildOf(parentCtx), + ext.SpanKindRPCClient, + gRPCComponentTag, + ) + ctx = injectSpanContext(ctx, tracer, clientSpan) + cs, err := streamer(ctx, desc, cc, method, opts...) + if err != nil { + clientSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) + SetSpanTags(clientSpan, err, true) + clientSpan.Finish() + return cs, err + } + return newOpenTracingClientStream(cs, method, desc, clientSpan, otgrpcOpts), nil + } +} + +func newOpenTracingClientStream(cs grpc.ClientStream, method string, desc *grpc.StreamDesc, clientSpan opentracing.Span, otgrpcOpts *options) grpc.ClientStream { + finishChan := make(chan struct{}) + + isFinished := new(int32) + *isFinished = 0 + finishFunc := func(err error) { + // The current OpenTracing specification forbids finishing a span more than + // once. Since we have multiple code paths that could concurrently call + // `finishFunc`, we need to add some sort of synchronization to guard against + // multiple finishing. + if !atomic.CompareAndSwapInt32(isFinished, 0, 1) { + return + } + close(finishChan) + defer clientSpan.Finish() + if err != nil { + clientSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) + SetSpanTags(clientSpan, err, true) + } + if otgrpcOpts.decorator != nil { + otgrpcOpts.decorator(clientSpan, method, nil, nil, err) + } + } + go func() { + select { + case <-finishChan: + // The client span is being finished by another code path; hence, no + // action is necessary. + case <-cs.Context().Done(): + finishFunc(cs.Context().Err()) + } + }() + otcs := &openTracingClientStream{ + ClientStream: cs, + desc: desc, + finishFunc: finishFunc, + } + + // The `ClientStream` interface allows one to omit calling `Recv` if it's + // known that the result will be `io.EOF`. See + // http://stackoverflow.com/q/42915337 + // In such cases, there's nothing that triggers the span to finish. We, + // therefore, set a finalizer so that the span and the context goroutine will + // at least be cleaned up when the garbage collector is run. + runtime.SetFinalizer(otcs, func(otcs *openTracingClientStream) { + otcs.finishFunc(nil) + }) + return otcs +} + +type openTracingClientStream struct { + grpc.ClientStream + desc *grpc.StreamDesc + finishFunc func(error) +} + +func (cs *openTracingClientStream) Header() (metadata.MD, error) { + md, err := cs.ClientStream.Header() + if err != nil { + cs.finishFunc(err) + } + return md, err +} + +func (cs *openTracingClientStream) SendMsg(m interface{}) error { + err := cs.ClientStream.SendMsg(m) + if err != nil { + cs.finishFunc(err) + } + return err +} + +func (cs *openTracingClientStream) RecvMsg(m interface{}) error { + err := cs.ClientStream.RecvMsg(m) + if err == io.EOF { + cs.finishFunc(nil) + return err + } else if err != nil { + cs.finishFunc(err) + return err + } + if !cs.desc.ServerStreams { + cs.finishFunc(nil) + } + return err +} + +func (cs *openTracingClientStream) CloseSend() error { + err := cs.ClientStream.CloseSend() + if err != nil { + cs.finishFunc(err) + } + return err +} + +func injectSpanContext(ctx context.Context, tracer opentracing.Tracer, clientSpan opentracing.Span) context.Context { + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { + md = metadata.New(nil) + } else { + md = md.Copy() + } + mdWriter := metadataReaderWriter{md} + err := tracer.Inject(clientSpan.Context(), opentracing.HTTPHeaders, mdWriter) + // We have no better place to record an error than the Span itself :-/ + if err != nil { + clientSpan.LogFields(log.String("event", "Tracer.Inject() failed"), log.Error(err)) + } + return metadata.NewOutgoingContext(ctx, md) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/errors.go b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/errors.go new file mode 100644 index 0000000000..41a6346f25 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/errors.go @@ -0,0 +1,69 @@ +package otgrpc + +import ( + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// A Class is a set of types of outcomes (including errors) that will often +// be handled in the same way. +type Class string + +const ( + Unknown Class = "0xx" + // Success represents outcomes that achieved the desired results. + Success Class = "2xx" + // ClientError represents errors that were the client's fault. + ClientError Class = "4xx" + // ServerError represents errors that were the server's fault. + ServerError Class = "5xx" +) + +// ErrorClass returns the class of the given error +func ErrorClass(err error) Class { + if s, ok := status.FromError(err); ok { + switch s.Code() { + // Success or "success" + case codes.OK, codes.Canceled: + return Success + + // Client errors + case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, + codes.PermissionDenied, codes.Unauthenticated, codes.FailedPrecondition, + codes.OutOfRange: + return ClientError + + // Server errors + case codes.DeadlineExceeded, codes.ResourceExhausted, codes.Aborted, + codes.Unimplemented, codes.Internal, codes.Unavailable, codes.DataLoss: + return ServerError + + // Not sure + case codes.Unknown: + fallthrough + default: + return Unknown + } + } + return Unknown +} + +// SetSpanTags sets one or more tags on the given span according to the +// error. +func SetSpanTags(span opentracing.Span, err error, client bool) { + c := ErrorClass(err) + code := codes.Unknown + if s, ok := status.FromError(err); ok { + code = s.Code() + } + span.SetTag("response_code", code) + span.SetTag("response_class", c) + if err == nil { + return + } + if client || c == ServerError { + ext.Error.Set(span, true) + } +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/options.go b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/options.go new file mode 100644 index 0000000000..903e8382e3 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/options.go @@ -0,0 +1,76 @@ +package otgrpc + +import "github.com/opentracing/opentracing-go" + +// Option instances may be used in OpenTracing(Server|Client)Interceptor +// initialization. +// +// See this post about the "functional options" pattern: +// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis +type Option func(o *options) + +// LogPayloads returns an Option that tells the OpenTracing instrumentation to +// try to log application payloads in both directions. +func LogPayloads() Option { + return func(o *options) { + o.logPayloads = true + } +} + +// SpanInclusionFunc provides an optional mechanism to decide whether or not +// to trace a given gRPC call. Return true to create a Span and initiate +// tracing, false to not create a Span and not trace. +// +// parentSpanCtx may be nil if no parent could be extraction from either the Go +// context.Context (on the client) or the RPC (on the server). +type SpanInclusionFunc func( + parentSpanCtx opentracing.SpanContext, + method string, + req, resp interface{}) bool + +// IncludingSpans binds a IncludeSpanFunc to the options +func IncludingSpans(inclusionFunc SpanInclusionFunc) Option { + return func(o *options) { + o.inclusionFunc = inclusionFunc + } +} + +// SpanDecoratorFunc provides an (optional) mechanism for otgrpc users to add +// arbitrary tags/logs/etc to the opentracing.Span associated with client +// and/or server RPCs. +type SpanDecoratorFunc func( + span opentracing.Span, + method string, + req, resp interface{}, + grpcError error) + +// SpanDecorator binds a function that decorates gRPC Spans. +func SpanDecorator(decorator SpanDecoratorFunc) Option { + return func(o *options) { + o.decorator = decorator + } +} + +// The internal-only options struct. Obviously overkill at the moment; but will +// scale well as production use dictates other configuration and tuning +// parameters. +type options struct { + logPayloads bool + decorator SpanDecoratorFunc + // May be nil. + inclusionFunc SpanInclusionFunc +} + +// newOptions returns the default options. +func newOptions() *options { + return &options{ + logPayloads: false, + inclusionFunc: nil, + } +} + +func (o *options) apply(opts ...Option) { + for _, opt := range opts { + opt(o) + } +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/package.go b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/package.go new file mode 100644 index 0000000000..4ff3d19978 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/package.go @@ -0,0 +1,5 @@ +// Package otgrpc provides OpenTracing support for any gRPC client or server. +// +// See the README for simple usage examples: +// https://github.com/grpc-ecosystem/grpc-opentracing/blob/master/go/otgrpc/README.md +package otgrpc diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/server.go b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/server.go new file mode 100644 index 0000000000..62cf54d221 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/server.go @@ -0,0 +1,141 @@ +package otgrpc + +import ( + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "github.com/opentracing/opentracing-go/log" + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +// OpenTracingServerInterceptor returns a grpc.UnaryServerInterceptor suitable +// for use in a grpc.NewServer call. +// +// For example: +// +// s := grpc.NewServer( +// ..., // (existing ServerOptions) +// grpc.UnaryInterceptor(otgrpc.OpenTracingServerInterceptor(tracer))) +// +// All gRPC server spans will look for an OpenTracing SpanContext in the gRPC +// metadata; if found, the server span will act as the ChildOf that RPC +// SpanContext. +// +// Root or not, the server Span will be embedded in the context.Context for the +// application-specific gRPC handler(s) to access. +func OpenTracingServerInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.UnaryServerInterceptor { + otgrpcOpts := newOptions() + otgrpcOpts.apply(optFuncs...) + return func( + ctx context.Context, + req interface{}, + info *grpc.UnaryServerInfo, + handler grpc.UnaryHandler, + ) (resp interface{}, err error) { + spanContext, err := extractSpanContext(ctx, tracer) + if err != nil && err != opentracing.ErrSpanContextNotFound { + // TODO: establish some sort of error reporting mechanism here. We + // don't know where to put such an error and must rely on Tracer + // implementations to do something appropriate for the time being. + } + if otgrpcOpts.inclusionFunc != nil && + !otgrpcOpts.inclusionFunc(spanContext, info.FullMethod, req, nil) { + return handler(ctx, req) + } + serverSpan := tracer.StartSpan( + info.FullMethod, + ext.RPCServerOption(spanContext), + gRPCComponentTag, + ) + defer serverSpan.Finish() + + ctx = opentracing.ContextWithSpan(ctx, serverSpan) + if otgrpcOpts.logPayloads { + serverSpan.LogFields(log.Object("gRPC request", req)) + } + resp, err = handler(ctx, req) + if err == nil { + if otgrpcOpts.logPayloads { + serverSpan.LogFields(log.Object("gRPC response", resp)) + } + } else { + SetSpanTags(serverSpan, err, false) + serverSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) + } + if otgrpcOpts.decorator != nil { + otgrpcOpts.decorator(serverSpan, info.FullMethod, req, resp, err) + } + return resp, err + } +} + +// OpenTracingStreamServerInterceptor returns a grpc.StreamServerInterceptor suitable +// for use in a grpc.NewServer call. The interceptor instruments streaming RPCs by +// creating a single span to correspond to the lifetime of the RPC's stream. +// +// For example: +// +// s := grpc.NewServer( +// ..., // (existing ServerOptions) +// grpc.StreamInterceptor(otgrpc.OpenTracingStreamServerInterceptor(tracer))) +// +// All gRPC server spans will look for an OpenTracing SpanContext in the gRPC +// metadata; if found, the server span will act as the ChildOf that RPC +// SpanContext. +// +// Root or not, the server Span will be embedded in the context.Context for the +// application-specific gRPC handler(s) to access. +func OpenTracingStreamServerInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.StreamServerInterceptor { + otgrpcOpts := newOptions() + otgrpcOpts.apply(optFuncs...) + return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + spanContext, err := extractSpanContext(ss.Context(), tracer) + if err != nil && err != opentracing.ErrSpanContextNotFound { + // TODO: establish some sort of error reporting mechanism here. We + // don't know where to put such an error and must rely on Tracer + // implementations to do something appropriate for the time being. + } + if otgrpcOpts.inclusionFunc != nil && + !otgrpcOpts.inclusionFunc(spanContext, info.FullMethod, nil, nil) { + return handler(srv, ss) + } + + serverSpan := tracer.StartSpan( + info.FullMethod, + ext.RPCServerOption(spanContext), + gRPCComponentTag, + ) + defer serverSpan.Finish() + ss = &openTracingServerStream{ + ServerStream: ss, + ctx: opentracing.ContextWithSpan(ss.Context(), serverSpan), + } + err = handler(srv, ss) + if err != nil { + SetSpanTags(serverSpan, err, false) + serverSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) + } + if otgrpcOpts.decorator != nil { + otgrpcOpts.decorator(serverSpan, info.FullMethod, nil, nil, err) + } + return err + } +} + +type openTracingServerStream struct { + grpc.ServerStream + ctx context.Context +} + +func (ss *openTracingServerStream) Context() context.Context { + return ss.ctx +} + +func extractSpanContext(ctx context.Context, tracer opentracing.Tracer) (opentracing.SpanContext, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + md = metadata.New(nil) + } + return tracer.Extract(opentracing.HTTPHeaders, metadataReaderWriter{md}) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/shared.go b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/shared.go new file mode 100644 index 0000000000..9abd5eaa62 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/shared.go @@ -0,0 +1,42 @@ +package otgrpc + +import ( + "strings" + + opentracing "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "google.golang.org/grpc/metadata" +) + +var ( + // Morally a const: + gRPCComponentTag = opentracing.Tag{string(ext.Component), "gRPC"} +) + +// metadataReaderWriter satisfies both the opentracing.TextMapReader and +// opentracing.TextMapWriter interfaces. +type metadataReaderWriter struct { + metadata.MD +} + +func (w metadataReaderWriter) Set(key, val string) { + // The GRPC HPACK implementation rejects any uppercase keys here. + // + // As such, since the HTTP_HEADERS format is case-insensitive anyway, we + // blindly lowercase the key (which is guaranteed to work in the + // Inject/Extract sense per the OpenTracing spec). + key = strings.ToLower(key) + w.MD[key] = append(w.MD[key], val) +} + +func (w metadataReaderWriter) ForeachKey(handler func(key, val string) error) error { + for k, vals := range w.MD { + for _, v := range vals { + if err := handler(k, v); err != nil { + return err + } + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/consul/LICENSE b/vendor/github.com/hashicorp/consul/LICENSE new file mode 100644 index 0000000000..c33dcc7c92 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/consul/api/acl.go b/vendor/github.com/hashicorp/consul/api/acl.go new file mode 100644 index 0000000000..8ec9aa5855 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/acl.go @@ -0,0 +1,193 @@ +package api + +import ( + "time" +) + +const ( + // ACLClientType is the client type token + ACLClientType = "client" + + // ACLManagementType is the management type token + ACLManagementType = "management" +) + +// ACLEntry is used to represent an ACL entry +type ACLEntry struct { + CreateIndex uint64 + ModifyIndex uint64 + ID string + Name string + Type string + Rules string +} + +// ACLReplicationStatus is used to represent the status of ACL replication. +type ACLReplicationStatus struct { + Enabled bool + Running bool + SourceDatacenter string + ReplicatedIndex uint64 + LastSuccess time.Time + LastError time.Time +} + +// ACL can be used to query the ACL endpoints +type ACL struct { + c *Client +} + +// ACL returns a handle to the ACL endpoints +func (c *Client) ACL() *ACL { + return &ACL{c} +} + +// Bootstrap is used to perform a one-time ACL bootstrap operation on a cluster +// to get the first management token. +func (a *ACL) Bootstrap() (string, *WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/bootstrap") + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Create is used to generate a new token with the given parameters +func (a *ACL) Create(acl *ACLEntry, q *WriteOptions) (string, *WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/create") + r.setWriteOptions(q) + r.obj = acl + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Update is used to update the rules of an existing token +func (a *ACL) Update(acl *ACLEntry, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/update") + r.setWriteOptions(q) + r.obj = acl + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// Destroy is used to destroy a given ACL token ID +func (a *ACL) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/destroy/"+id) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} + +// Clone is used to return a new token cloned from an existing one +func (a *ACL) Clone(id string, q *WriteOptions) (string, *WriteMeta, error) { + r := a.c.newRequest("PUT", "/v1/acl/clone/"+id) + r.setWriteOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Info is used to query for information about an ACL token +func (a *ACL) Info(id string, q *QueryOptions) (*ACLEntry, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/info/"+id) + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + if len(entries) > 0 { + return entries[0], qm, nil + } + return nil, qm, nil +} + +// List is used to get all the ACL tokens +func (a *ACL) List(q *QueryOptions) ([]*ACLEntry, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/list") + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*ACLEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// Replication returns the status of the ACL replication process in the datacenter +func (a *ACL) Replication(q *QueryOptions) (*ACLReplicationStatus, *QueryMeta, error) { + r := a.c.newRequest("GET", "/v1/acl/replication") + r.setQueryOptions(q) + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries *ACLReplicationStatus + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go new file mode 100644 index 0000000000..b42baed41d --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/agent.go @@ -0,0 +1,627 @@ +package api + +import ( + "bufio" + "fmt" +) + +// AgentCheck represents a check known to the agent +type AgentCheck struct { + Node string + CheckID string + Name string + Status string + Notes string + Output string + ServiceID string + ServiceName string + Definition HealthCheckDefinition +} + +// AgentService represents a service known to the agent +type AgentService struct { + ID string + Service string + Tags []string + Port int + Address string + EnableTagOverride bool + CreateIndex uint64 + ModifyIndex uint64 +} + +// AgentMember represents a cluster member known to the agent +type AgentMember struct { + Name string + Addr string + Port uint16 + Tags map[string]string + Status int + ProtocolMin uint8 + ProtocolMax uint8 + ProtocolCur uint8 + DelegateMin uint8 + DelegateMax uint8 + DelegateCur uint8 +} + +// AllSegments is used to select for all segments in MembersOpts. +const AllSegments = "_all" + +// MembersOpts is used for querying member information. +type MembersOpts struct { + // WAN is whether to show members from the WAN. + WAN bool + + // Segment is the LAN segment to show members for. Setting this to the + // AllSegments value above will show members in all segments. + Segment string +} + +// AgentServiceRegistration is used to register a new service +type AgentServiceRegistration struct { + ID string `json:",omitempty"` + Name string `json:",omitempty"` + Tags []string `json:",omitempty"` + Port int `json:",omitempty"` + Address string `json:",omitempty"` + EnableTagOverride bool `json:",omitempty"` + Meta map[string]string `json:",omitempty"` + Check *AgentServiceCheck + Checks AgentServiceChecks +} + +// AgentCheckRegistration is used to register a new check +type AgentCheckRegistration struct { + ID string `json:",omitempty"` + Name string `json:",omitempty"` + Notes string `json:",omitempty"` + ServiceID string `json:",omitempty"` + AgentServiceCheck +} + +// AgentServiceCheck is used to define a node or service level check +type AgentServiceCheck struct { + CheckID string `json:",omitempty"` + Name string `json:",omitempty"` + Args []string `json:"ScriptArgs,omitempty"` + Script string `json:",omitempty"` // Deprecated, use Args. + DockerContainerID string `json:",omitempty"` + Shell string `json:",omitempty"` // Only supported for Docker. + Interval string `json:",omitempty"` + Timeout string `json:",omitempty"` + TTL string `json:",omitempty"` + HTTP string `json:",omitempty"` + Header map[string][]string `json:",omitempty"` + Method string `json:",omitempty"` + TCP string `json:",omitempty"` + Status string `json:",omitempty"` + Notes string `json:",omitempty"` + TLSSkipVerify bool `json:",omitempty"` + GRPC string `json:",omitempty"` + GRPCUseTLS bool `json:",omitempty"` + + // In Consul 0.7 and later, checks that are associated with a service + // may also contain this optional DeregisterCriticalServiceAfter field, + // which is a timeout in the same Go time format as Interval and TTL. If + // a check is in the critical state for more than this configured value, + // then its associated service (and all of its associated checks) will + // automatically be deregistered. + DeregisterCriticalServiceAfter string `json:",omitempty"` +} +type AgentServiceChecks []*AgentServiceCheck + +// AgentToken is used when updating ACL tokens for an agent. +type AgentToken struct { + Token string +} + +// Metrics info is used to store different types of metric values from the agent. +type MetricsInfo struct { + Timestamp string + Gauges []GaugeValue + Points []PointValue + Counters []SampledValue + Samples []SampledValue +} + +// GaugeValue stores one value that is updated as time goes on, such as +// the amount of memory allocated. +type GaugeValue struct { + Name string + Value float32 + Labels map[string]string +} + +// PointValue holds a series of points for a metric. +type PointValue struct { + Name string + Points []float32 +} + +// SampledValue stores info about a metric that is incremented over time, +// such as the number of requests to an HTTP endpoint. +type SampledValue struct { + Name string + Count int + Sum float64 + Min float64 + Max float64 + Mean float64 + Stddev float64 + Labels map[string]string +} + +// Agent can be used to query the Agent endpoints +type Agent struct { + c *Client + + // cache the node name + nodeName string +} + +// Agent returns a handle to the agent endpoints +func (c *Client) Agent() *Agent { + return &Agent{c: c} +} + +// Self is used to query the agent we are speaking to for +// information about itself +func (a *Agent) Self() (map[string]map[string]interface{}, error) { + r := a.c.newRequest("GET", "/v1/agent/self") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out map[string]map[string]interface{} + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Metrics is used to query the agent we are speaking to for +// its current internal metric data +func (a *Agent) Metrics() (*MetricsInfo, error) { + r := a.c.newRequest("GET", "/v1/agent/metrics") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out *MetricsInfo + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Reload triggers a configuration reload for the agent we are connected to. +func (a *Agent) Reload() error { + r := a.c.newRequest("PUT", "/v1/agent/reload") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// NodeName is used to get the node name of the agent +func (a *Agent) NodeName() (string, error) { + if a.nodeName != "" { + return a.nodeName, nil + } + info, err := a.Self() + if err != nil { + return "", err + } + name := info["Config"]["NodeName"].(string) + a.nodeName = name + return name, nil +} + +// Checks returns the locally registered checks +func (a *Agent) Checks() (map[string]*AgentCheck, error) { + r := a.c.newRequest("GET", "/v1/agent/checks") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out map[string]*AgentCheck + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Services returns the locally registered services +func (a *Agent) Services() (map[string]*AgentService, error) { + r := a.c.newRequest("GET", "/v1/agent/services") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out map[string]*AgentService + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Members returns the known gossip members. The WAN +// flag can be used to query a server for WAN members. +func (a *Agent) Members(wan bool) ([]*AgentMember, error) { + r := a.c.newRequest("GET", "/v1/agent/members") + if wan { + r.params.Set("wan", "1") + } + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []*AgentMember + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// MembersOpts returns the known gossip members and can be passed +// additional options for WAN/segment filtering. +func (a *Agent) MembersOpts(opts MembersOpts) ([]*AgentMember, error) { + r := a.c.newRequest("GET", "/v1/agent/members") + r.params.Set("segment", opts.Segment) + if opts.WAN { + r.params.Set("wan", "1") + } + + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []*AgentMember + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// ServiceRegister is used to register a new service with +// the local agent +func (a *Agent) ServiceRegister(service *AgentServiceRegistration) error { + r := a.c.newRequest("PUT", "/v1/agent/service/register") + r.obj = service + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// ServiceDeregister is used to deregister a service with +// the local agent +func (a *Agent) ServiceDeregister(serviceID string) error { + r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// PassTTL is used to set a TTL check to the passing state. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 or changed to use +// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. +func (a *Agent) PassTTL(checkID, note string) error { + return a.updateTTL(checkID, note, "pass") +} + +// WarnTTL is used to set a TTL check to the warning state. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 or changed to use +// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. +func (a *Agent) WarnTTL(checkID, note string) error { + return a.updateTTL(checkID, note, "warn") +} + +// FailTTL is used to set a TTL check to the failing state. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 or changed to use +// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. +func (a *Agent) FailTTL(checkID, note string) error { + return a.updateTTL(checkID, note, "fail") +} + +// updateTTL is used to update the TTL of a check. This is the internal +// method that uses the old API that's present in Consul versions prior to +// 0.6.4. Since Consul didn't have an analogous "update" API before it seemed +// ok to break this (former) UpdateTTL in favor of the new UpdateTTL below, +// but keep the old Pass/Warn/Fail methods using the old API under the hood. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 and the server endpoints will +// be removed in 0.9. +func (a *Agent) updateTTL(checkID, note, status string) error { + switch status { + case "pass": + case "warn": + case "fail": + default: + return fmt.Errorf("Invalid status: %s", status) + } + endpoint := fmt.Sprintf("/v1/agent/check/%s/%s", status, checkID) + r := a.c.newRequest("PUT", endpoint) + r.params.Set("note", note) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// checkUpdate is the payload for a PUT for a check update. +type checkUpdate struct { + // Status is one of the api.Health* states: HealthPassing + // ("passing"), HealthWarning ("warning"), or HealthCritical + // ("critical"). + Status string + + // Output is the information to post to the UI for operators as the + // output of the process that decided to hit the TTL check. This is + // different from the note field that's associated with the check + // itself. + Output string +} + +// UpdateTTL is used to update the TTL of a check. This uses the newer API +// that was introduced in Consul 0.6.4 and later. We translate the old status +// strings for compatibility (though a newer version of Consul will still be +// required to use this API). +func (a *Agent) UpdateTTL(checkID, output, status string) error { + switch status { + case "pass", HealthPassing: + status = HealthPassing + case "warn", HealthWarning: + status = HealthWarning + case "fail", HealthCritical: + status = HealthCritical + default: + return fmt.Errorf("Invalid status: %s", status) + } + + endpoint := fmt.Sprintf("/v1/agent/check/update/%s", checkID) + r := a.c.newRequest("PUT", endpoint) + r.obj = &checkUpdate{ + Status: status, + Output: output, + } + + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// CheckRegister is used to register a new check with +// the local agent +func (a *Agent) CheckRegister(check *AgentCheckRegistration) error { + r := a.c.newRequest("PUT", "/v1/agent/check/register") + r.obj = check + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// CheckDeregister is used to deregister a check with +// the local agent +func (a *Agent) CheckDeregister(checkID string) error { + r := a.c.newRequest("PUT", "/v1/agent/check/deregister/"+checkID) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// Join is used to instruct the agent to attempt a join to +// another cluster member +func (a *Agent) Join(addr string, wan bool) error { + r := a.c.newRequest("PUT", "/v1/agent/join/"+addr) + if wan { + r.params.Set("wan", "1") + } + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// Leave is used to have the agent gracefully leave the cluster and shutdown +func (a *Agent) Leave() error { + r := a.c.newRequest("PUT", "/v1/agent/leave") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// ForceLeave is used to have the agent eject a failed node +func (a *Agent) ForceLeave(node string) error { + r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// EnableServiceMaintenance toggles service maintenance mode on +// for the given service ID. +func (a *Agent) EnableServiceMaintenance(serviceID, reason string) error { + r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID) + r.params.Set("enable", "true") + r.params.Set("reason", reason) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// DisableServiceMaintenance toggles service maintenance mode off +// for the given service ID. +func (a *Agent) DisableServiceMaintenance(serviceID string) error { + r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID) + r.params.Set("enable", "false") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// EnableNodeMaintenance toggles node maintenance mode on for the +// agent we are connected to. +func (a *Agent) EnableNodeMaintenance(reason string) error { + r := a.c.newRequest("PUT", "/v1/agent/maintenance") + r.params.Set("enable", "true") + r.params.Set("reason", reason) + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// DisableNodeMaintenance toggles node maintenance mode off for the +// agent we are connected to. +func (a *Agent) DisableNodeMaintenance() error { + r := a.c.newRequest("PUT", "/v1/agent/maintenance") + r.params.Set("enable", "false") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// Monitor returns a channel which will receive streaming logs from the agent +// Providing a non-nil stopCh can be used to close the connection and stop the +// log stream. An empty string will be sent down the given channel when there's +// nothing left to stream, after which the caller should close the stopCh. +func (a *Agent) Monitor(loglevel string, stopCh <-chan struct{}, q *QueryOptions) (chan string, error) { + r := a.c.newRequest("GET", "/v1/agent/monitor") + r.setQueryOptions(q) + if loglevel != "" { + r.params.Add("loglevel", loglevel) + } + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + + logCh := make(chan string, 64) + go func() { + defer resp.Body.Close() + + scanner := bufio.NewScanner(resp.Body) + for { + select { + case <-stopCh: + close(logCh) + return + default: + } + if scanner.Scan() { + // An empty string signals to the caller that + // the scan is done, so make sure we only emit + // that when the scanner says it's done, not if + // we happen to ingest an empty line. + if text := scanner.Text(); text != "" { + logCh <- text + } else { + logCh <- " " + } + } else { + logCh <- "" + } + } + }() + + return logCh, nil +} + +// UpdateACLToken updates the agent's "acl_token". See updateToken for more +// details. +func (a *Agent) UpdateACLToken(token string, q *WriteOptions) (*WriteMeta, error) { + return a.updateToken("acl_token", token, q) +} + +// UpdateACLAgentToken updates the agent's "acl_agent_token". See updateToken +// for more details. +func (a *Agent) UpdateACLAgentToken(token string, q *WriteOptions) (*WriteMeta, error) { + return a.updateToken("acl_agent_token", token, q) +} + +// UpdateACLAgentMasterToken updates the agent's "acl_agent_master_token". See +// updateToken for more details. +func (a *Agent) UpdateACLAgentMasterToken(token string, q *WriteOptions) (*WriteMeta, error) { + return a.updateToken("acl_agent_master_token", token, q) +} + +// UpdateACLReplicationToken updates the agent's "acl_replication_token". See +// updateToken for more details. +func (a *Agent) UpdateACLReplicationToken(token string, q *WriteOptions) (*WriteMeta, error) { + return a.updateToken("acl_replication_token", token, q) +} + +// updateToken can be used to update an agent's ACL token after the agent has +// started. The tokens are not persisted, so will need to be updated again if +// the agent is restarted. +func (a *Agent) updateToken(target, token string, q *WriteOptions) (*WriteMeta, error) { + r := a.c.newRequest("PUT", fmt.Sprintf("/v1/agent/token/%s", target)) + r.setWriteOptions(q) + r.obj = &AgentToken{Token: token} + rtt, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + return wm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go new file mode 100644 index 0000000000..1cdc21e331 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/api.go @@ -0,0 +1,791 @@ +package api + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-rootcerts" +) + +const ( + // HTTPAddrEnvName defines an environment variable name which sets + // the HTTP address if there is no -http-addr specified. + HTTPAddrEnvName = "CONSUL_HTTP_ADDR" + + // HTTPTokenEnvName defines an environment variable name which sets + // the HTTP token. + HTTPTokenEnvName = "CONSUL_HTTP_TOKEN" + + // HTTPAuthEnvName defines an environment variable name which sets + // the HTTP authentication header. + HTTPAuthEnvName = "CONSUL_HTTP_AUTH" + + // HTTPSSLEnvName defines an environment variable name which sets + // whether or not to use HTTPS. + HTTPSSLEnvName = "CONSUL_HTTP_SSL" + + // HTTPCAFile defines an environment variable name which sets the + // CA file to use for talking to Consul over TLS. + HTTPCAFile = "CONSUL_CACERT" + + // HTTPCAPath defines an environment variable name which sets the + // path to a directory of CA certs to use for talking to Consul over TLS. + HTTPCAPath = "CONSUL_CAPATH" + + // HTTPClientCert defines an environment variable name which sets the + // client cert file to use for talking to Consul over TLS. + HTTPClientCert = "CONSUL_CLIENT_CERT" + + // HTTPClientKey defines an environment variable name which sets the + // client key file to use for talking to Consul over TLS. + HTTPClientKey = "CONSUL_CLIENT_KEY" + + // HTTPTLSServerName defines an environment variable name which sets the + // server name to use as the SNI host when connecting via TLS + HTTPTLSServerName = "CONSUL_TLS_SERVER_NAME" + + // HTTPSSLVerifyEnvName defines an environment variable name which sets + // whether or not to disable certificate checking. + HTTPSSLVerifyEnvName = "CONSUL_HTTP_SSL_VERIFY" +) + +// QueryOptions are used to parameterize a query +type QueryOptions struct { + // Providing a datacenter overwrites the DC provided + // by the Config + Datacenter string + + // AllowStale allows any Consul server (non-leader) to service + // a read. This allows for lower latency and higher throughput + AllowStale bool + + // RequireConsistent forces the read to be fully consistent. + // This is more expensive but prevents ever performing a stale + // read. + RequireConsistent bool + + // WaitIndex is used to enable a blocking query. Waits + // until the timeout or the next index is reached + WaitIndex uint64 + + // WaitTime is used to bound the duration of a wait. + // Defaults to that of the Config, but can be overridden. + WaitTime time.Duration + + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string + + // Near is used to provide a node name that will sort the results + // in ascending order based on the estimated round trip time from + // that node. Setting this to "_agent" will use the agent's node + // for the sort. + Near string + + // NodeMeta is used to filter results by nodes with the given + // metadata key/value pairs. Currently, only one key/value pair can + // be provided for filtering. + NodeMeta map[string]string + + // RelayFactor is used in keyring operations to cause responses to be + // relayed back to the sender through N other random nodes. Must be + // a value from 0 to 5 (inclusive). + RelayFactor uint8 + + // ctx is an optional context pass through to the underlying HTTP + // request layer. Use Context() and WithContext() to manage this. + ctx context.Context +} + +func (o *QueryOptions) Context() context.Context { + if o != nil && o.ctx != nil { + return o.ctx + } + return context.Background() +} + +func (o *QueryOptions) WithContext(ctx context.Context) *QueryOptions { + o2 := new(QueryOptions) + if o != nil { + *o2 = *o + } + o2.ctx = ctx + return o2 +} + +// WriteOptions are used to parameterize a write +type WriteOptions struct { + // Providing a datacenter overwrites the DC provided + // by the Config + Datacenter string + + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string + + // RelayFactor is used in keyring operations to cause responses to be + // relayed back to the sender through N other random nodes. Must be + // a value from 0 to 5 (inclusive). + RelayFactor uint8 + + // ctx is an optional context pass through to the underlying HTTP + // request layer. Use Context() and WithContext() to manage this. + ctx context.Context +} + +func (o *WriteOptions) Context() context.Context { + if o != nil && o.ctx != nil { + return o.ctx + } + return context.Background() +} + +func (o *WriteOptions) WithContext(ctx context.Context) *WriteOptions { + o2 := new(WriteOptions) + if o != nil { + *o2 = *o + } + o2.ctx = ctx + return o2 +} + +// QueryMeta is used to return meta data about a query +type QueryMeta struct { + // LastIndex. This can be used as a WaitIndex to perform + // a blocking query + LastIndex uint64 + + // Time of last contact from the leader for the + // server servicing the request + LastContact time.Duration + + // Is there a known leader + KnownLeader bool + + // How long did the request take + RequestTime time.Duration + + // Is address translation enabled for HTTP responses on this agent + AddressTranslationEnabled bool +} + +// WriteMeta is used to return meta data about a write +type WriteMeta struct { + // How long did the request take + RequestTime time.Duration +} + +// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication +type HttpBasicAuth struct { + // Username to use for HTTP Basic Authentication + Username string + + // Password to use for HTTP Basic Authentication + Password string +} + +// Config is used to configure the creation of a client +type Config struct { + // Address is the address of the Consul server + Address string + + // Scheme is the URI scheme for the Consul server + Scheme string + + // Datacenter to use. If not provided, the default agent datacenter is used. + Datacenter string + + // Transport is the Transport to use for the http client. + Transport *http.Transport + + // HttpClient is the client to use. Default will be + // used if not provided. + HttpClient *http.Client + + // HttpAuth is the auth info to use for http access. + HttpAuth *HttpBasicAuth + + // WaitTime limits how long a Watch will block. If not provided, + // the agent default values will be used. + WaitTime time.Duration + + // Token is used to provide a per-request ACL token + // which overrides the agent's default token. + Token string + + TLSConfig TLSConfig +} + +// TLSConfig is used to generate a TLSClientConfig that's useful for talking to +// Consul using TLS. +type TLSConfig struct { + // Address is the optional address of the Consul server. The port, if any + // will be removed from here and this will be set to the ServerName of the + // resulting config. + Address string + + // CAFile is the optional path to the CA certificate used for Consul + // communication, defaults to the system bundle if not specified. + CAFile string + + // CAPath is the optional path to a directory of CA certificates to use for + // Consul communication, defaults to the system bundle if not specified. + CAPath string + + // CertFile is the optional path to the certificate for Consul + // communication. If this is set then you need to also set KeyFile. + CertFile string + + // KeyFile is the optional path to the private key for Consul communication. + // If this is set then you need to also set CertFile. + KeyFile string + + // InsecureSkipVerify if set to true will disable TLS host verification. + InsecureSkipVerify bool +} + +// DefaultConfig returns a default configuration for the client. By default this +// will pool and reuse idle connections to Consul. If you have a long-lived +// client object, this is the desired behavior and should make the most efficient +// use of the connections to Consul. If you don't reuse a client object , which +// is not recommended, then you may notice idle connections building up over +// time. To avoid this, use the DefaultNonPooledConfig() instead. +func DefaultConfig() *Config { + return defaultConfig(cleanhttp.DefaultPooledTransport) +} + +// DefaultNonPooledConfig returns a default configuration for the client which +// does not pool connections. This isn't a recommended configuration because it +// will reconnect to Consul on every request, but this is useful to avoid the +// accumulation of idle connections if you make many client objects during the +// lifetime of your application. +func DefaultNonPooledConfig() *Config { + return defaultConfig(cleanhttp.DefaultTransport) +} + +// defaultConfig returns the default configuration for the client, using the +// given function to make the transport. +func defaultConfig(transportFn func() *http.Transport) *Config { + config := &Config{ + Address: "127.0.0.1:8500", + Scheme: "http", + Transport: transportFn(), + } + + if addr := os.Getenv(HTTPAddrEnvName); addr != "" { + config.Address = addr + } + + if token := os.Getenv(HTTPTokenEnvName); token != "" { + config.Token = token + } + + if auth := os.Getenv(HTTPAuthEnvName); auth != "" { + var username, password string + if strings.Contains(auth, ":") { + split := strings.SplitN(auth, ":", 2) + username = split[0] + password = split[1] + } else { + username = auth + } + + config.HttpAuth = &HttpBasicAuth{ + Username: username, + Password: password, + } + } + + if ssl := os.Getenv(HTTPSSLEnvName); ssl != "" { + enabled, err := strconv.ParseBool(ssl) + if err != nil { + log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLEnvName, err) + } + + if enabled { + config.Scheme = "https" + } + } + + if v := os.Getenv(HTTPTLSServerName); v != "" { + config.TLSConfig.Address = v + } + if v := os.Getenv(HTTPCAFile); v != "" { + config.TLSConfig.CAFile = v + } + if v := os.Getenv(HTTPCAPath); v != "" { + config.TLSConfig.CAPath = v + } + if v := os.Getenv(HTTPClientCert); v != "" { + config.TLSConfig.CertFile = v + } + if v := os.Getenv(HTTPClientKey); v != "" { + config.TLSConfig.KeyFile = v + } + if v := os.Getenv(HTTPSSLVerifyEnvName); v != "" { + doVerify, err := strconv.ParseBool(v) + if err != nil { + log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLVerifyEnvName, err) + } + if !doVerify { + config.TLSConfig.InsecureSkipVerify = true + } + } + + return config +} + +// TLSConfig is used to generate a TLSClientConfig that's useful for talking to +// Consul using TLS. +func SetupTLSConfig(tlsConfig *TLSConfig) (*tls.Config, error) { + tlsClientConfig := &tls.Config{ + InsecureSkipVerify: tlsConfig.InsecureSkipVerify, + } + + if tlsConfig.Address != "" { + server := tlsConfig.Address + hasPort := strings.LastIndex(server, ":") > strings.LastIndex(server, "]") + if hasPort { + var err error + server, _, err = net.SplitHostPort(server) + if err != nil { + return nil, err + } + } + tlsClientConfig.ServerName = server + } + + if tlsConfig.CertFile != "" && tlsConfig.KeyFile != "" { + tlsCert, err := tls.LoadX509KeyPair(tlsConfig.CertFile, tlsConfig.KeyFile) + if err != nil { + return nil, err + } + tlsClientConfig.Certificates = []tls.Certificate{tlsCert} + } + + if tlsConfig.CAFile != "" || tlsConfig.CAPath != "" { + rootConfig := &rootcerts.Config{ + CAFile: tlsConfig.CAFile, + CAPath: tlsConfig.CAPath, + } + if err := rootcerts.ConfigureTLS(tlsClientConfig, rootConfig); err != nil { + return nil, err + } + } + + return tlsClientConfig, nil +} + +// Client provides a client to the Consul API +type Client struct { + config Config +} + +// NewClient returns a new client +func NewClient(config *Config) (*Client, error) { + // bootstrap the config + defConfig := DefaultConfig() + + if len(config.Address) == 0 { + config.Address = defConfig.Address + } + + if len(config.Scheme) == 0 { + config.Scheme = defConfig.Scheme + } + + if config.Transport == nil { + config.Transport = defConfig.Transport + } + + if config.TLSConfig.Address == "" { + config.TLSConfig.Address = defConfig.TLSConfig.Address + } + + if config.TLSConfig.CAFile == "" { + config.TLSConfig.CAFile = defConfig.TLSConfig.CAFile + } + + if config.TLSConfig.CAPath == "" { + config.TLSConfig.CAPath = defConfig.TLSConfig.CAPath + } + + if config.TLSConfig.CertFile == "" { + config.TLSConfig.CertFile = defConfig.TLSConfig.CertFile + } + + if config.TLSConfig.KeyFile == "" { + config.TLSConfig.KeyFile = defConfig.TLSConfig.KeyFile + } + + if !config.TLSConfig.InsecureSkipVerify { + config.TLSConfig.InsecureSkipVerify = defConfig.TLSConfig.InsecureSkipVerify + } + + if config.HttpClient == nil { + var err error + config.HttpClient, err = NewHttpClient(config.Transport, config.TLSConfig) + if err != nil { + return nil, err + } + } + + parts := strings.SplitN(config.Address, "://", 2) + if len(parts) == 2 { + switch parts[0] { + case "http": + config.Scheme = "http" + case "https": + config.Scheme = "https" + case "unix": + trans := cleanhttp.DefaultTransport() + trans.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) { + return net.Dial("unix", parts[1]) + } + config.HttpClient = &http.Client{ + Transport: trans, + } + default: + return nil, fmt.Errorf("Unknown protocol scheme: %s", parts[0]) + } + config.Address = parts[1] + } + + if config.Token == "" { + config.Token = defConfig.Token + } + + return &Client{config: *config}, nil +} + +// NewHttpClient returns an http client configured with the given Transport and TLS +// config. +func NewHttpClient(transport *http.Transport, tlsConf TLSConfig) (*http.Client, error) { + client := &http.Client{ + Transport: transport, + } + + // TODO (slackpad) - Once we get some run time on the HTTP/2 support we + // should turn it on by default if TLS is enabled. We would basically + // just need to call http2.ConfigureTransport(transport) here. We also + // don't want to introduce another external dependency on + // golang.org/x/net/http2 at this time. For a complete recipe for how + // to enable HTTP/2 support on a transport suitable for the API client + // library see agent/http_test.go:TestHTTPServer_H2. + + if transport.TLSClientConfig == nil { + tlsClientConfig, err := SetupTLSConfig(&tlsConf) + + if err != nil { + return nil, err + } + + transport.TLSClientConfig = tlsClientConfig + } + + return client, nil +} + +// request is used to help build up a request +type request struct { + config *Config + method string + url *url.URL + params url.Values + body io.Reader + header http.Header + obj interface{} + ctx context.Context +} + +// setQueryOptions is used to annotate the request with +// additional query options +func (r *request) setQueryOptions(q *QueryOptions) { + if q == nil { + return + } + if q.Datacenter != "" { + r.params.Set("dc", q.Datacenter) + } + if q.AllowStale { + r.params.Set("stale", "") + } + if q.RequireConsistent { + r.params.Set("consistent", "") + } + if q.WaitIndex != 0 { + r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10)) + } + if q.WaitTime != 0 { + r.params.Set("wait", durToMsec(q.WaitTime)) + } + if q.Token != "" { + r.header.Set("X-Consul-Token", q.Token) + } + if q.Near != "" { + r.params.Set("near", q.Near) + } + if len(q.NodeMeta) > 0 { + for key, value := range q.NodeMeta { + r.params.Add("node-meta", key+":"+value) + } + } + if q.RelayFactor != 0 { + r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor))) + } + r.ctx = q.ctx +} + +// durToMsec converts a duration to a millisecond specified string. If the +// user selected a positive value that rounds to 0 ms, then we will use 1 ms +// so they get a short delay, otherwise Consul will translate the 0 ms into +// a huge default delay. +func durToMsec(dur time.Duration) string { + ms := dur / time.Millisecond + if dur > 0 && ms == 0 { + ms = 1 + } + return fmt.Sprintf("%dms", ms) +} + +// serverError is a string we look for to detect 500 errors. +const serverError = "Unexpected response code: 500" + +// IsRetryableError returns true for 500 errors from the Consul servers, and +// network connection errors. These are usually retryable at a later time. +// This applies to reads but NOT to writes. This may return true for errors +// on writes that may have still gone through, so do not use this to retry +// any write operations. +func IsRetryableError(err error) bool { + if err == nil { + return false + } + + if _, ok := err.(net.Error); ok { + return true + } + + // TODO (slackpad) - Make a real error type here instead of using + // a string check. + return strings.Contains(err.Error(), serverError) +} + +// setWriteOptions is used to annotate the request with +// additional write options +func (r *request) setWriteOptions(q *WriteOptions) { + if q == nil { + return + } + if q.Datacenter != "" { + r.params.Set("dc", q.Datacenter) + } + if q.Token != "" { + r.header.Set("X-Consul-Token", q.Token) + } + if q.RelayFactor != 0 { + r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor))) + } + r.ctx = q.ctx +} + +// toHTTP converts the request to an HTTP request +func (r *request) toHTTP() (*http.Request, error) { + // Encode the query parameters + r.url.RawQuery = r.params.Encode() + + // Check if we should encode the body + if r.body == nil && r.obj != nil { + b, err := encodeBody(r.obj) + if err != nil { + return nil, err + } + r.body = b + } + + // Create the HTTP request + req, err := http.NewRequest(r.method, r.url.RequestURI(), r.body) + if err != nil { + return nil, err + } + + req.URL.Host = r.url.Host + req.URL.Scheme = r.url.Scheme + req.Host = r.url.Host + req.Header = r.header + + // Setup auth + if r.config.HttpAuth != nil { + req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password) + } + if r.ctx != nil { + return req.WithContext(r.ctx), nil + } + + return req, nil +} + +// newRequest is used to create a new request +func (c *Client) newRequest(method, path string) *request { + r := &request{ + config: &c.config, + method: method, + url: &url.URL{ + Scheme: c.config.Scheme, + Host: c.config.Address, + Path: path, + }, + params: make(map[string][]string), + header: make(http.Header), + } + if c.config.Datacenter != "" { + r.params.Set("dc", c.config.Datacenter) + } + if c.config.WaitTime != 0 { + r.params.Set("wait", durToMsec(r.config.WaitTime)) + } + if c.config.Token != "" { + r.header.Set("X-Consul-Token", r.config.Token) + } + return r +} + +// doRequest runs a request with our client +func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) { + req, err := r.toHTTP() + if err != nil { + return 0, nil, err + } + start := time.Now() + resp, err := c.config.HttpClient.Do(req) + diff := time.Since(start) + return diff, resp, err +} + +// Query is used to do a GET request against an endpoint +// and deserialize the response into an interface using +// standard Consul conventions. +func (c *Client) query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { + r := c.newRequest("GET", endpoint) + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if err := decodeBody(resp, out); err != nil { + return nil, err + } + return qm, nil +} + +// write is used to do a PUT request against an endpoint +// and serialize/deserialized using the standard Consul conventions. +func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { + r := c.newRequest("PUT", endpoint) + r.setWriteOptions(q) + r.obj = in + rtt, resp, err := requireOK(c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + if out != nil { + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + } else if _, err := ioutil.ReadAll(resp.Body); err != nil { + return nil, err + } + return wm, nil +} + +// parseQueryMeta is used to help parse query meta-data +func parseQueryMeta(resp *http.Response, q *QueryMeta) error { + header := resp.Header + + // Parse the X-Consul-Index + index, err := strconv.ParseUint(header.Get("X-Consul-Index"), 10, 64) + if err != nil { + return fmt.Errorf("Failed to parse X-Consul-Index: %v", err) + } + q.LastIndex = index + + // Parse the X-Consul-LastContact + last, err := strconv.ParseUint(header.Get("X-Consul-LastContact"), 10, 64) + if err != nil { + return fmt.Errorf("Failed to parse X-Consul-LastContact: %v", err) + } + q.LastContact = time.Duration(last) * time.Millisecond + + // Parse the X-Consul-KnownLeader + switch header.Get("X-Consul-KnownLeader") { + case "true": + q.KnownLeader = true + default: + q.KnownLeader = false + } + + // Parse X-Consul-Translate-Addresses + switch header.Get("X-Consul-Translate-Addresses") { + case "true": + q.AddressTranslationEnabled = true + default: + q.AddressTranslationEnabled = false + } + + return nil +} + +// decodeBody is used to JSON decode a body +func decodeBody(resp *http.Response, out interface{}) error { + dec := json.NewDecoder(resp.Body) + return dec.Decode(out) +} + +// encodeBody is used to encode a request body +func encodeBody(obj interface{}) (io.Reader, error) { + buf := bytes.NewBuffer(nil) + enc := json.NewEncoder(buf) + if err := enc.Encode(obj); err != nil { + return nil, err + } + return buf, nil +} + +// requireOK is used to wrap doRequest and check for a 200 +func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) { + if e != nil { + if resp != nil { + resp.Body.Close() + } + return d, nil, e + } + if resp.StatusCode != 200 { + var buf bytes.Buffer + io.Copy(&buf, resp.Body) + resp.Body.Close() + return d, nil, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes()) + } + return d, resp, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/catalog.go b/vendor/github.com/hashicorp/consul/api/catalog.go new file mode 100644 index 0000000000..80ce1bc815 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/catalog.go @@ -0,0 +1,200 @@ +package api + +type Node struct { + ID string + Node string + Address string + Datacenter string + TaggedAddresses map[string]string + Meta map[string]string + CreateIndex uint64 + ModifyIndex uint64 +} + +type CatalogService struct { + ID string + Node string + Address string + Datacenter string + TaggedAddresses map[string]string + NodeMeta map[string]string + ServiceID string + ServiceName string + ServiceAddress string + ServiceTags []string + ServiceMeta map[string]string + ServicePort int + ServiceEnableTagOverride bool + CreateIndex uint64 + ModifyIndex uint64 +} + +type CatalogNode struct { + Node *Node + Services map[string]*AgentService +} + +type CatalogRegistration struct { + ID string + Node string + Address string + TaggedAddresses map[string]string + NodeMeta map[string]string + Datacenter string + Service *AgentService + Check *AgentCheck + SkipNodeUpdate bool +} + +type CatalogDeregistration struct { + Node string + Address string // Obsolete. + Datacenter string + ServiceID string + CheckID string +} + +// Catalog can be used to query the Catalog endpoints +type Catalog struct { + c *Client +} + +// Catalog returns a handle to the catalog endpoints +func (c *Client) Catalog() *Catalog { + return &Catalog{c} +} + +func (c *Catalog) Register(reg *CatalogRegistration, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("PUT", "/v1/catalog/register") + r.setWriteOptions(q) + r.obj = reg + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + return wm, nil +} + +func (c *Catalog) Deregister(dereg *CatalogDeregistration, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("PUT", "/v1/catalog/deregister") + r.setWriteOptions(q) + r.obj = dereg + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + return wm, nil +} + +// Datacenters is used to query for all the known datacenters +func (c *Catalog) Datacenters() ([]string, error) { + r := c.c.newRequest("GET", "/v1/catalog/datacenters") + _, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []string + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Nodes is used to query all the known nodes +func (c *Catalog) Nodes(q *QueryOptions) ([]*Node, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/nodes") + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*Node + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Services is used to query for all known services +func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/services") + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out map[string][]string + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Service is used to query catalog entries for a given service +func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/service/"+service) + r.setQueryOptions(q) + if tag != "" { + r.params.Set("tag", tag) + } + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*CatalogService + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Node is used to query for service information about a single node +func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/catalog/node/"+node) + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out *CatalogNode + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/coordinate.go b/vendor/github.com/hashicorp/consul/api/coordinate.go new file mode 100644 index 0000000000..53318f11dd --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/coordinate.go @@ -0,0 +1,106 @@ +package api + +import ( + "github.com/hashicorp/serf/coordinate" +) + +// CoordinateEntry represents a node and its associated network coordinate. +type CoordinateEntry struct { + Node string + Segment string + Coord *coordinate.Coordinate +} + +// CoordinateDatacenterMap has the coordinates for servers in a given datacenter +// and area. Network coordinates are only compatible within the same area. +type CoordinateDatacenterMap struct { + Datacenter string + AreaID string + Coordinates []CoordinateEntry +} + +// Coordinate can be used to query the coordinate endpoints +type Coordinate struct { + c *Client +} + +// Coordinate returns a handle to the coordinate endpoints +func (c *Client) Coordinate() *Coordinate { + return &Coordinate{c} +} + +// Datacenters is used to return the coordinates of all the servers in the WAN +// pool. +func (c *Coordinate) Datacenters() ([]*CoordinateDatacenterMap, error) { + r := c.c.newRequest("GET", "/v1/coordinate/datacenters") + _, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []*CoordinateDatacenterMap + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// Nodes is used to return the coordinates of all the nodes in the LAN pool. +func (c *Coordinate) Nodes(q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/coordinate/nodes") + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*CoordinateEntry + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Update inserts or updates the LAN coordinate of a node. +func (c *Coordinate) Update(coord *CoordinateEntry, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("PUT", "/v1/coordinate/update") + r.setWriteOptions(q) + r.obj = coord + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + return wm, nil +} + +// Node is used to return the coordinates of a single in the LAN pool. +func (c *Coordinate) Node(node string, q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, error) { + r := c.c.newRequest("GET", "/v1/coordinate/node/"+node) + r.setQueryOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*CoordinateEntry + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/event.go b/vendor/github.com/hashicorp/consul/api/event.go new file mode 100644 index 0000000000..85b5b069b0 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/event.go @@ -0,0 +1,104 @@ +package api + +import ( + "bytes" + "strconv" +) + +// Event can be used to query the Event endpoints +type Event struct { + c *Client +} + +// UserEvent represents an event that was fired by the user +type UserEvent struct { + ID string + Name string + Payload []byte + NodeFilter string + ServiceFilter string + TagFilter string + Version int + LTime uint64 +} + +// Event returns a handle to the event endpoints +func (c *Client) Event() *Event { + return &Event{c} +} + +// Fire is used to fire a new user event. Only the Name, Payload and Filters +// are respected. This returns the ID or an associated error. Cross DC requests +// are supported. +func (e *Event) Fire(params *UserEvent, q *WriteOptions) (string, *WriteMeta, error) { + r := e.c.newRequest("PUT", "/v1/event/fire/"+params.Name) + r.setWriteOptions(q) + if params.NodeFilter != "" { + r.params.Set("node", params.NodeFilter) + } + if params.ServiceFilter != "" { + r.params.Set("service", params.ServiceFilter) + } + if params.TagFilter != "" { + r.params.Set("tag", params.TagFilter) + } + if params.Payload != nil { + r.body = bytes.NewReader(params.Payload) + } + + rtt, resp, err := requireOK(e.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + var out UserEvent + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// List is used to get the most recent events an agent has received. +// This list can be optionally filtered by the name. This endpoint supports +// quasi-blocking queries. The index is not monotonic, nor does it provide provide +// LastContact or KnownLeader. +func (e *Event) List(name string, q *QueryOptions) ([]*UserEvent, *QueryMeta, error) { + r := e.c.newRequest("GET", "/v1/event/list") + r.setQueryOptions(q) + if name != "" { + r.params.Set("name", name) + } + rtt, resp, err := requireOK(e.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var entries []*UserEvent + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// IDToIndex is a bit of a hack. This simulates the index generation to +// convert an event ID into a WaitIndex. +func (e *Event) IDToIndex(uuid string) uint64 { + lower := uuid[0:8] + uuid[9:13] + uuid[14:18] + upper := uuid[19:23] + uuid[24:36] + lowVal, err := strconv.ParseUint(lower, 16, 64) + if err != nil { + panic("Failed to convert " + lower) + } + highVal, err := strconv.ParseUint(upper, 16, 64) + if err != nil { + panic("Failed to convert " + upper) + } + return lowVal ^ highVal +} diff --git a/vendor/github.com/hashicorp/consul/api/health.go b/vendor/github.com/hashicorp/consul/api/health.go new file mode 100644 index 0000000000..53f3de4f79 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/health.go @@ -0,0 +1,215 @@ +package api + +import ( + "fmt" + "strings" +) + +const ( + // HealthAny is special, and is used as a wild card, + // not as a specific state. + HealthAny = "any" + HealthPassing = "passing" + HealthWarning = "warning" + HealthCritical = "critical" + HealthMaint = "maintenance" +) + +const ( + // NodeMaint is the special key set by a node in maintenance mode. + NodeMaint = "_node_maintenance" + + // ServiceMaintPrefix is the prefix for a service in maintenance mode. + ServiceMaintPrefix = "_service_maintenance:" +) + +// HealthCheck is used to represent a single check +type HealthCheck struct { + Node string + CheckID string + Name string + Status string + Notes string + Output string + ServiceID string + ServiceName string + ServiceTags []string + + Definition HealthCheckDefinition +} + +// HealthCheckDefinition is used to store the details about +// a health check's execution. +type HealthCheckDefinition struct { + HTTP string + Header map[string][]string + Method string + TLSSkipVerify bool + TCP string + Interval ReadableDuration + Timeout ReadableDuration + DeregisterCriticalServiceAfter ReadableDuration +} + +// HealthChecks is a collection of HealthCheck structs. +type HealthChecks []*HealthCheck + +// AggregatedStatus returns the "best" status for the list of health checks. +// Because a given entry may have many service and node-level health checks +// attached, this function determines the best representative of the status as +// as single string using the following heuristic: +// +// maintenance > critical > warning > passing +// +func (c HealthChecks) AggregatedStatus() string { + var passing, warning, critical, maintenance bool + for _, check := range c { + id := string(check.CheckID) + if id == NodeMaint || strings.HasPrefix(id, ServiceMaintPrefix) { + maintenance = true + continue + } + + switch check.Status { + case HealthPassing: + passing = true + case HealthWarning: + warning = true + case HealthCritical: + critical = true + default: + return "" + } + } + + switch { + case maintenance: + return HealthMaint + case critical: + return HealthCritical + case warning: + return HealthWarning + case passing: + return HealthPassing + default: + return HealthPassing + } +} + +// ServiceEntry is used for the health service endpoint +type ServiceEntry struct { + Node *Node + Service *AgentService + Checks HealthChecks +} + +// Health can be used to query the Health endpoints +type Health struct { + c *Client +} + +// Health returns a handle to the health endpoints +func (c *Client) Health() *Health { + return &Health{c} +} + +// Node is used to query for checks belonging to a given node +func (h *Health) Node(node string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/health/node/"+node) + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out HealthChecks + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Checks is used to return the checks associated with a service +func (h *Health) Checks(service string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/health/checks/"+service) + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out HealthChecks + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Service is used to query health information along with service info +// for a given service. It can optionally do server-side filtering on a tag +// or nodes with passing health checks only. +func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/health/service/"+service) + r.setQueryOptions(q) + if tag != "" { + r.params.Set("tag", tag) + } + if passingOnly { + r.params.Set(HealthPassing, "1") + } + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out []*ServiceEntry + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// State is used to retrieve all the checks in a given state. +// The wildcard "any" state can also be used for all checks. +func (h *Health) State(state string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { + switch state { + case HealthAny: + case HealthWarning: + case HealthCritical: + case HealthPassing: + default: + return nil, nil, fmt.Errorf("Unsupported state: %v", state) + } + r := h.c.newRequest("GET", "/v1/health/state/"+state) + r.setQueryOptions(q) + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + var out HealthChecks + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/kv.go b/vendor/github.com/hashicorp/consul/api/kv.go new file mode 100644 index 0000000000..97f5156855 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/kv.go @@ -0,0 +1,420 @@ +package api + +import ( + "bytes" + "fmt" + "io" + "net/http" + "strconv" + "strings" +) + +// KVPair is used to represent a single K/V entry +type KVPair struct { + // Key is the name of the key. It is also part of the URL path when accessed + // via the API. + Key string + + // CreateIndex holds the index corresponding the creation of this KVPair. This + // is a read-only field. + CreateIndex uint64 + + // ModifyIndex is used for the Check-And-Set operations and can also be fed + // back into the WaitIndex of the QueryOptions in order to perform blocking + // queries. + ModifyIndex uint64 + + // LockIndex holds the index corresponding to a lock on this key, if any. This + // is a read-only field. + LockIndex uint64 + + // Flags are any user-defined flags on the key. It is up to the implementer + // to check these values, since Consul does not treat them specially. + Flags uint64 + + // Value is the value for the key. This can be any value, but it will be + // base64 encoded upon transport. + Value []byte + + // Session is a string representing the ID of the session. Any other + // interactions with this key over the same session must specify the same + // session ID. + Session string +} + +// KVPairs is a list of KVPair objects +type KVPairs []*KVPair + +// KVOp constants give possible operations available in a KVTxn. +type KVOp string + +const ( + KVSet KVOp = "set" + KVDelete KVOp = "delete" + KVDeleteCAS KVOp = "delete-cas" + KVDeleteTree KVOp = "delete-tree" + KVCAS KVOp = "cas" + KVLock KVOp = "lock" + KVUnlock KVOp = "unlock" + KVGet KVOp = "get" + KVGetTree KVOp = "get-tree" + KVCheckSession KVOp = "check-session" + KVCheckIndex KVOp = "check-index" + KVCheckNotExists KVOp = "check-not-exists" +) + +// KVTxnOp defines a single operation inside a transaction. +type KVTxnOp struct { + Verb KVOp + Key string + Value []byte + Flags uint64 + Index uint64 + Session string +} + +// KVTxnOps defines a set of operations to be performed inside a single +// transaction. +type KVTxnOps []*KVTxnOp + +// KVTxnResponse has the outcome of a transaction. +type KVTxnResponse struct { + Results []*KVPair + Errors TxnErrors +} + +// KV is used to manipulate the K/V API +type KV struct { + c *Client +} + +// KV is used to return a handle to the K/V apis +func (c *Client) KV() *KV { + return &KV{c} +} + +// Get is used to lookup a single key. The returned pointer +// to the KVPair will be nil if the key does not exist. +func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) { + resp, qm, err := k.getInternal(key, nil, q) + if err != nil { + return nil, nil, err + } + if resp == nil { + return nil, qm, nil + } + defer resp.Body.Close() + + var entries []*KVPair + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + if len(entries) > 0 { + return entries[0], qm, nil + } + return nil, qm, nil +} + +// List is used to lookup all keys under a prefix +func (k *KV) List(prefix string, q *QueryOptions) (KVPairs, *QueryMeta, error) { + resp, qm, err := k.getInternal(prefix, map[string]string{"recurse": ""}, q) + if err != nil { + return nil, nil, err + } + if resp == nil { + return nil, qm, nil + } + defer resp.Body.Close() + + var entries []*KVPair + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// Keys is used to list all the keys under a prefix. Optionally, +// a separator can be used to limit the responses. +func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMeta, error) { + params := map[string]string{"keys": ""} + if separator != "" { + params["separator"] = separator + } + resp, qm, err := k.getInternal(prefix, params, q) + if err != nil { + return nil, nil, err + } + if resp == nil { + return nil, qm, nil + } + defer resp.Body.Close() + + var entries []string + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions) (*http.Response, *QueryMeta, error) { + r := k.c.newRequest("GET", "/v1/kv/"+strings.TrimPrefix(key, "/")) + r.setQueryOptions(q) + for param, val := range params { + r.params.Set(param, val) + } + rtt, resp, err := k.c.doRequest(r) + if err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if resp.StatusCode == 404 { + resp.Body.Close() + return nil, qm, nil + } else if resp.StatusCode != 200 { + resp.Body.Close() + return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode) + } + return resp, qm, nil +} + +// Put is used to write a new value. Only the +// Key, Flags and Value is respected. +func (k *KV) Put(p *KVPair, q *WriteOptions) (*WriteMeta, error) { + params := make(map[string]string, 1) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + _, wm, err := k.put(p.Key, params, p.Value, q) + return wm, err +} + +// CAS is used for a Check-And-Set operation. The Key, +// ModifyIndex, Flags and Value are respected. Returns true +// on success or false on failures. +func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := make(map[string]string, 2) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + params["cas"] = strconv.FormatUint(p.ModifyIndex, 10) + return k.put(p.Key, params, p.Value, q) +} + +// Acquire is used for a lock acquisition operation. The Key, +// Flags, Value and Session are respected. Returns true +// on success or false on failures. +func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := make(map[string]string, 2) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + params["acquire"] = p.Session + return k.put(p.Key, params, p.Value, q) +} + +// Release is used for a lock release operation. The Key, +// Flags, Value and Session are respected. Returns true +// on success or false on failures. +func (k *KV) Release(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := make(map[string]string, 2) + if p.Flags != 0 { + params["flags"] = strconv.FormatUint(p.Flags, 10) + } + params["release"] = p.Session + return k.put(p.Key, params, p.Value, q) +} + +func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOptions) (bool, *WriteMeta, error) { + if len(key) > 0 && key[0] == '/' { + return false, nil, fmt.Errorf("Invalid key. Key must not begin with a '/': %s", key) + } + + r := k.c.newRequest("PUT", "/v1/kv/"+key) + r.setWriteOptions(q) + for param, val := range params { + r.params.Set(param, val) + } + r.body = bytes.NewReader(body) + rtt, resp, err := requireOK(k.c.doRequest(r)) + if err != nil { + return false, nil, err + } + defer resp.Body.Close() + + qm := &WriteMeta{} + qm.RequestTime = rtt + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, fmt.Errorf("Failed to read response: %v", err) + } + res := strings.Contains(buf.String(), "true") + return res, qm, nil +} + +// Delete is used to delete a single key +func (k *KV) Delete(key string, w *WriteOptions) (*WriteMeta, error) { + _, qm, err := k.deleteInternal(key, nil, w) + return qm, err +} + +// DeleteCAS is used for a Delete Check-And-Set operation. The Key +// and ModifyIndex are respected. Returns true on success or false on failures. +func (k *KV) DeleteCAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { + params := map[string]string{ + "cas": strconv.FormatUint(p.ModifyIndex, 10), + } + return k.deleteInternal(p.Key, params, q) +} + +// DeleteTree is used to delete all keys under a prefix +func (k *KV) DeleteTree(prefix string, w *WriteOptions) (*WriteMeta, error) { + _, qm, err := k.deleteInternal(prefix, map[string]string{"recurse": ""}, w) + return qm, err +} + +func (k *KV) deleteInternal(key string, params map[string]string, q *WriteOptions) (bool, *WriteMeta, error) { + r := k.c.newRequest("DELETE", "/v1/kv/"+strings.TrimPrefix(key, "/")) + r.setWriteOptions(q) + for param, val := range params { + r.params.Set(param, val) + } + rtt, resp, err := requireOK(k.c.doRequest(r)) + if err != nil { + return false, nil, err + } + defer resp.Body.Close() + + qm := &WriteMeta{} + qm.RequestTime = rtt + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, fmt.Errorf("Failed to read response: %v", err) + } + res := strings.Contains(buf.String(), "true") + return res, qm, nil +} + +// TxnOp is the internal format we send to Consul. It's not specific to KV, +// though currently only KV operations are supported. +type TxnOp struct { + KV *KVTxnOp +} + +// TxnOps is a list of transaction operations. +type TxnOps []*TxnOp + +// TxnResult is the internal format we receive from Consul. +type TxnResult struct { + KV *KVPair +} + +// TxnResults is a list of TxnResult objects. +type TxnResults []*TxnResult + +// TxnError is used to return information about an operation in a transaction. +type TxnError struct { + OpIndex int + What string +} + +// TxnErrors is a list of TxnError objects. +type TxnErrors []*TxnError + +// TxnResponse is the internal format we receive from Consul. +type TxnResponse struct { + Results TxnResults + Errors TxnErrors +} + +// Txn is used to apply multiple KV operations in a single, atomic transaction. +// +// Note that Go will perform the required base64 encoding on the values +// automatically because the type is a byte slice. Transactions are defined as a +// list of operations to perform, using the KVOp constants and KVTxnOp structure +// to define operations. If any operation fails, none of the changes are applied +// to the state store. Note that this hides the internal raw transaction interface +// and munges the input and output types into KV-specific ones for ease of use. +// If there are more non-KV operations in the future we may break out a new +// transaction API client, but it will be easy to keep this KV-specific variant +// supported. +// +// Even though this is generally a write operation, we take a QueryOptions input +// and return a QueryMeta output. If the transaction contains only read ops, then +// Consul will fast-path it to a different endpoint internally which supports +// consistency controls, but not blocking. If there are write operations then +// the request will always be routed through raft and any consistency settings +// will be ignored. +// +// Here's an example: +// +// ops := KVTxnOps{ +// &KVTxnOp{ +// Verb: KVLock, +// Key: "test/lock", +// Session: "adf4238a-882b-9ddc-4a9d-5b6758e4159e", +// Value: []byte("hello"), +// }, +// &KVTxnOp{ +// Verb: KVGet, +// Key: "another/key", +// }, +// } +// ok, response, _, err := kv.Txn(&ops, nil) +// +// If there is a problem making the transaction request then an error will be +// returned. Otherwise, the ok value will be true if the transaction succeeded +// or false if it was rolled back. The response is a structured return value which +// will have the outcome of the transaction. Its Results member will have entries +// for each operation. Deleted keys will have a nil entry in the, and to save +// space, the Value of each key in the Results will be nil unless the operation +// is a KVGet. If the transaction was rolled back, the Errors member will have +// entries referencing the index of the operation that failed along with an error +// message. +func (k *KV) Txn(txn KVTxnOps, q *QueryOptions) (bool, *KVTxnResponse, *QueryMeta, error) { + r := k.c.newRequest("PUT", "/v1/txn") + r.setQueryOptions(q) + + // Convert into the internal format since this is an all-KV txn. + ops := make(TxnOps, 0, len(txn)) + for _, kvOp := range txn { + ops = append(ops, &TxnOp{KV: kvOp}) + } + r.obj = ops + rtt, resp, err := k.c.doRequest(r) + if err != nil { + return false, nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusConflict { + var txnResp TxnResponse + if err := decodeBody(resp, &txnResp); err != nil { + return false, nil, nil, err + } + + // Convert from the internal format. + kvResp := KVTxnResponse{ + Errors: txnResp.Errors, + } + for _, result := range txnResp.Results { + kvResp.Results = append(kvResp.Results, result.KV) + } + return resp.StatusCode == http.StatusOK, &kvResp, qm, nil + } + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, nil, fmt.Errorf("Failed to read response: %v", err) + } + return false, nil, nil, fmt.Errorf("Failed request: %s", buf.String()) +} diff --git a/vendor/github.com/hashicorp/consul/api/lock.go b/vendor/github.com/hashicorp/consul/api/lock.go new file mode 100644 index 0000000000..41f72e7d23 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/lock.go @@ -0,0 +1,385 @@ +package api + +import ( + "fmt" + "sync" + "time" +) + +const ( + // DefaultLockSessionName is the Session Name we assign if none is provided + DefaultLockSessionName = "Consul API Lock" + + // DefaultLockSessionTTL is the default session TTL if no Session is provided + // when creating a new Lock. This is used because we do not have another + // other check to depend upon. + DefaultLockSessionTTL = "15s" + + // DefaultLockWaitTime is how long we block for at a time to check if lock + // acquisition is possible. This affects the minimum time it takes to cancel + // a Lock acquisition. + DefaultLockWaitTime = 15 * time.Second + + // DefaultLockRetryTime is how long we wait after a failed lock acquisition + // before attempting to do the lock again. This is so that once a lock-delay + // is in effect, we do not hot loop retrying the acquisition. + DefaultLockRetryTime = 5 * time.Second + + // DefaultMonitorRetryTime is how long we wait after a failed monitor check + // of a lock (500 response code). This allows the monitor to ride out brief + // periods of unavailability, subject to the MonitorRetries setting in the + // lock options which is by default set to 0, disabling this feature. This + // affects locks and semaphores. + DefaultMonitorRetryTime = 2 * time.Second + + // LockFlagValue is a magic flag we set to indicate a key + // is being used for a lock. It is used to detect a potential + // conflict with a semaphore. + LockFlagValue = 0x2ddccbc058a50c18 +) + +var ( + // ErrLockHeld is returned if we attempt to double lock + ErrLockHeld = fmt.Errorf("Lock already held") + + // ErrLockNotHeld is returned if we attempt to unlock a lock + // that we do not hold. + ErrLockNotHeld = fmt.Errorf("Lock not held") + + // ErrLockInUse is returned if we attempt to destroy a lock + // that is in use. + ErrLockInUse = fmt.Errorf("Lock in use") + + // ErrLockConflict is returned if the flags on a key + // used for a lock do not match expectation + ErrLockConflict = fmt.Errorf("Existing key does not match lock use") +) + +// Lock is used to implement client-side leader election. It is follows the +// algorithm as described here: https://www.consul.io/docs/guides/leader-election.html. +type Lock struct { + c *Client + opts *LockOptions + + isHeld bool + sessionRenew chan struct{} + lockSession string + l sync.Mutex +} + +// LockOptions is used to parameterize the Lock behavior. +type LockOptions struct { + Key string // Must be set and have write permissions + Value []byte // Optional, value to associate with the lock + Session string // Optional, created if not specified + SessionOpts *SessionEntry // Optional, options to use when creating a session + SessionName string // Optional, defaults to DefaultLockSessionName (ignored if SessionOpts is given) + SessionTTL string // Optional, defaults to DefaultLockSessionTTL (ignored if SessionOpts is given) + MonitorRetries int // Optional, defaults to 0 which means no retries + MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime + LockWaitTime time.Duration // Optional, defaults to DefaultLockWaitTime + LockTryOnce bool // Optional, defaults to false which means try forever +} + +// LockKey returns a handle to a lock struct which can be used +// to acquire and release the mutex. The key used must have +// write permissions. +func (c *Client) LockKey(key string) (*Lock, error) { + opts := &LockOptions{ + Key: key, + } + return c.LockOpts(opts) +} + +// LockOpts returns a handle to a lock struct which can be used +// to acquire and release the mutex. The key used must have +// write permissions. +func (c *Client) LockOpts(opts *LockOptions) (*Lock, error) { + if opts.Key == "" { + return nil, fmt.Errorf("missing key") + } + if opts.SessionName == "" { + opts.SessionName = DefaultLockSessionName + } + if opts.SessionTTL == "" { + opts.SessionTTL = DefaultLockSessionTTL + } else { + if _, err := time.ParseDuration(opts.SessionTTL); err != nil { + return nil, fmt.Errorf("invalid SessionTTL: %v", err) + } + } + if opts.MonitorRetryTime == 0 { + opts.MonitorRetryTime = DefaultMonitorRetryTime + } + if opts.LockWaitTime == 0 { + opts.LockWaitTime = DefaultLockWaitTime + } + l := &Lock{ + c: c, + opts: opts, + } + return l, nil +} + +// Lock attempts to acquire the lock and blocks while doing so. +// Providing a non-nil stopCh can be used to abort the lock attempt. +// Returns a channel that is closed if our lock is lost or an error. +// This channel could be closed at any time due to session invalidation, +// communication errors, operator intervention, etc. It is NOT safe to +// assume that the lock is held until Unlock() unless the Session is specifically +// created without any associated health checks. By default Consul sessions +// prefer liveness over safety and an application must be able to handle +// the lock being lost. +func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { + // Hold the lock as we try to acquire + l.l.Lock() + defer l.l.Unlock() + + // Check if we already hold the lock + if l.isHeld { + return nil, ErrLockHeld + } + + // Check if we need to create a session first + l.lockSession = l.opts.Session + if l.lockSession == "" { + s, err := l.createSession() + if err != nil { + return nil, fmt.Errorf("failed to create session: %v", err) + } + + l.sessionRenew = make(chan struct{}) + l.lockSession = s + session := l.c.Session() + go session.RenewPeriodic(l.opts.SessionTTL, s, nil, l.sessionRenew) + + // If we fail to acquire the lock, cleanup the session + defer func() { + if !l.isHeld { + close(l.sessionRenew) + l.sessionRenew = nil + } + }() + } + + // Setup the query options + kv := l.c.KV() + qOpts := &QueryOptions{ + WaitTime: l.opts.LockWaitTime, + } + + start := time.Now() + attempts := 0 +WAIT: + // Check if we should quit + select { + case <-stopCh: + return nil, nil + default: + } + + // Handle the one-shot mode. + if l.opts.LockTryOnce && attempts > 0 { + elapsed := time.Since(start) + if elapsed > qOpts.WaitTime { + return nil, nil + } + + qOpts.WaitTime -= elapsed + } + attempts++ + + // Look for an existing lock, blocking until not taken + pair, meta, err := kv.Get(l.opts.Key, qOpts) + if err != nil { + return nil, fmt.Errorf("failed to read lock: %v", err) + } + if pair != nil && pair.Flags != LockFlagValue { + return nil, ErrLockConflict + } + locked := false + if pair != nil && pair.Session == l.lockSession { + goto HELD + } + if pair != nil && pair.Session != "" { + qOpts.WaitIndex = meta.LastIndex + goto WAIT + } + + // Try to acquire the lock + pair = l.lockEntry(l.lockSession) + locked, _, err = kv.Acquire(pair, nil) + if err != nil { + return nil, fmt.Errorf("failed to acquire lock: %v", err) + } + + // Handle the case of not getting the lock + if !locked { + // Determine why the lock failed + qOpts.WaitIndex = 0 + pair, meta, err = kv.Get(l.opts.Key, qOpts) + if pair != nil && pair.Session != "" { + //If the session is not null, this means that a wait can safely happen + //using a long poll + qOpts.WaitIndex = meta.LastIndex + goto WAIT + } else { + // If the session is empty and the lock failed to acquire, then it means + // a lock-delay is in effect and a timed wait must be used + select { + case <-time.After(DefaultLockRetryTime): + goto WAIT + case <-stopCh: + return nil, nil + } + } + } + +HELD: + // Watch to ensure we maintain leadership + leaderCh := make(chan struct{}) + go l.monitorLock(l.lockSession, leaderCh) + + // Set that we own the lock + l.isHeld = true + + // Locked! All done + return leaderCh, nil +} + +// Unlock released the lock. It is an error to call this +// if the lock is not currently held. +func (l *Lock) Unlock() error { + // Hold the lock as we try to release + l.l.Lock() + defer l.l.Unlock() + + // Ensure the lock is actually held + if !l.isHeld { + return ErrLockNotHeld + } + + // Set that we no longer own the lock + l.isHeld = false + + // Stop the session renew + if l.sessionRenew != nil { + defer func() { + close(l.sessionRenew) + l.sessionRenew = nil + }() + } + + // Get the lock entry, and clear the lock session + lockEnt := l.lockEntry(l.lockSession) + l.lockSession = "" + + // Release the lock explicitly + kv := l.c.KV() + _, _, err := kv.Release(lockEnt, nil) + if err != nil { + return fmt.Errorf("failed to release lock: %v", err) + } + return nil +} + +// Destroy is used to cleanup the lock entry. It is not necessary +// to invoke. It will fail if the lock is in use. +func (l *Lock) Destroy() error { + // Hold the lock as we try to release + l.l.Lock() + defer l.l.Unlock() + + // Check if we already hold the lock + if l.isHeld { + return ErrLockHeld + } + + // Look for an existing lock + kv := l.c.KV() + pair, _, err := kv.Get(l.opts.Key, nil) + if err != nil { + return fmt.Errorf("failed to read lock: %v", err) + } + + // Nothing to do if the lock does not exist + if pair == nil { + return nil + } + + // Check for possible flag conflict + if pair.Flags != LockFlagValue { + return ErrLockConflict + } + + // Check if it is in use + if pair.Session != "" { + return ErrLockInUse + } + + // Attempt the delete + didRemove, _, err := kv.DeleteCAS(pair, nil) + if err != nil { + return fmt.Errorf("failed to remove lock: %v", err) + } + if !didRemove { + return ErrLockInUse + } + return nil +} + +// createSession is used to create a new managed session +func (l *Lock) createSession() (string, error) { + session := l.c.Session() + se := l.opts.SessionOpts + if se == nil { + se = &SessionEntry{ + Name: l.opts.SessionName, + TTL: l.opts.SessionTTL, + } + } + id, _, err := session.Create(se, nil) + if err != nil { + return "", err + } + return id, nil +} + +// lockEntry returns a formatted KVPair for the lock +func (l *Lock) lockEntry(session string) *KVPair { + return &KVPair{ + Key: l.opts.Key, + Value: l.opts.Value, + Session: session, + Flags: LockFlagValue, + } +} + +// monitorLock is a long running routine to monitor a lock ownership +// It closes the stopCh if we lose our leadership. +func (l *Lock) monitorLock(session string, stopCh chan struct{}) { + defer close(stopCh) + kv := l.c.KV() + opts := &QueryOptions{RequireConsistent: true} +WAIT: + retries := l.opts.MonitorRetries +RETRY: + pair, meta, err := kv.Get(l.opts.Key, opts) + if err != nil { + // If configured we can try to ride out a brief Consul unavailability + // by doing retries. Note that we have to attempt the retry in a non- + // blocking fashion so that we have a clean place to reset the retry + // counter if service is restored. + if retries > 0 && IsRetryableError(err) { + time.Sleep(l.opts.MonitorRetryTime) + retries-- + opts.WaitIndex = 0 + goto RETRY + } + return + } + if pair != nil && pair.Session == session { + opts.WaitIndex = meta.LastIndex + goto WAIT + } +} diff --git a/vendor/github.com/hashicorp/consul/api/operator.go b/vendor/github.com/hashicorp/consul/api/operator.go new file mode 100644 index 0000000000..079e224866 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator.go @@ -0,0 +1,11 @@ +package api + +// Operator can be used to perform low-level operator tasks for Consul. +type Operator struct { + c *Client +} + +// Operator returns a handle to the operator endpoints. +func (c *Client) Operator() *Operator { + return &Operator{c} +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_area.go b/vendor/github.com/hashicorp/consul/api/operator_area.go new file mode 100644 index 0000000000..a630b694cd --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_area.go @@ -0,0 +1,193 @@ +// The /v1/operator/area endpoints are available only in Consul Enterprise and +// interact with its network area subsystem. Network areas are used to link +// together Consul servers in different Consul datacenters. With network areas, +// Consul datacenters can be linked together in ways other than a fully-connected +// mesh, as is required for Consul's WAN. +package api + +import ( + "net" + "time" +) + +// Area defines a network area. +type Area struct { + // ID is this identifier for an area (a UUID). This must be left empty + // when creating a new area. + ID string + + // PeerDatacenter is the peer Consul datacenter that will make up the + // other side of this network area. Network areas always involve a pair + // of datacenters: the datacenter where the area was created, and the + // peer datacenter. This is required. + PeerDatacenter string + + // RetryJoin specifies the address of Consul servers to join to, such as + // an IPs or hostnames with an optional port number. This is optional. + RetryJoin []string + + // UseTLS specifies whether gossip over this area should be encrypted with TLS + // if possible. + UseTLS bool +} + +// AreaJoinResponse is returned when a join occurs and gives the result for each +// address. +type AreaJoinResponse struct { + // The address that was joined. + Address string + + // Whether or not the join was a success. + Joined bool + + // If we couldn't join, this is the message with information. + Error string +} + +// SerfMember is a generic structure for reporting information about members in +// a Serf cluster. This is only used by the area endpoints right now, but this +// could be expanded to other endpoints in the future. +type SerfMember struct { + // ID is the node identifier (a UUID). + ID string + + // Name is the node name. + Name string + + // Addr has the IP address. + Addr net.IP + + // Port is the RPC port. + Port uint16 + + // Datacenter is the DC name. + Datacenter string + + // Role is "client", "server", or "unknown". + Role string + + // Build has the version of the Consul agent. + Build string + + // Protocol is the protocol of the Consul agent. + Protocol int + + // Status is the Serf health status "none", "alive", "leaving", "left", + // or "failed". + Status string + + // RTT is the estimated round trip time from the server handling the + // request to the this member. This will be negative if no RTT estimate + // is available. + RTT time.Duration +} + +// AreaCreate will create a new network area. The ID in the given structure must +// be empty and a generated ID will be returned on success. +func (op *Operator) AreaCreate(area *Area, q *WriteOptions) (string, *WriteMeta, error) { + r := op.c.newRequest("POST", "/v1/operator/area") + r.setWriteOptions(q) + r.obj = area + rtt, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// AreaUpdate will update the configuration of the network area with the given ID. +func (op *Operator) AreaUpdate(areaID string, area *Area, q *WriteOptions) (string, *WriteMeta, error) { + r := op.c.newRequest("PUT", "/v1/operator/area/"+areaID) + r.setWriteOptions(q) + r.obj = area + rtt, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// AreaGet returns a single network area. +func (op *Operator) AreaGet(areaID string, q *QueryOptions) ([]*Area, *QueryMeta, error) { + var out []*Area + qm, err := op.c.query("/v1/operator/area/"+areaID, &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// AreaList returns all the available network areas. +func (op *Operator) AreaList(q *QueryOptions) ([]*Area, *QueryMeta, error) { + var out []*Area + qm, err := op.c.query("/v1/operator/area", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// AreaDelete deletes the given network area. +func (op *Operator) AreaDelete(areaID string, q *WriteOptions) (*WriteMeta, error) { + r := op.c.newRequest("DELETE", "/v1/operator/area/"+areaID) + r.setWriteOptions(q) + rtt, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + return wm, nil +} + +// AreaJoin attempts to join the given set of join addresses to the given +// network area. See the Area structure for details about join addresses. +func (op *Operator) AreaJoin(areaID string, addresses []string, q *WriteOptions) ([]*AreaJoinResponse, *WriteMeta, error) { + r := op.c.newRequest("PUT", "/v1/operator/area/"+areaID+"/join") + r.setWriteOptions(q) + r.obj = addresses + rtt, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out []*AreaJoinResponse + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return out, wm, nil +} + +// AreaMembers lists the Serf information about the members in the given area. +func (op *Operator) AreaMembers(areaID string, q *QueryOptions) ([]*SerfMember, *QueryMeta, error) { + var out []*SerfMember + qm, err := op.c.query("/v1/operator/area/"+areaID+"/members", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_autopilot.go b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go new file mode 100644 index 0000000000..b179406dc1 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go @@ -0,0 +1,219 @@ +package api + +import ( + "bytes" + "fmt" + "io" + "strconv" + "strings" + "time" +) + +// AutopilotConfiguration is used for querying/setting the Autopilot configuration. +// Autopilot helps manage operator tasks related to Consul servers like removing +// failed servers from the Raft quorum. +type AutopilotConfiguration struct { + // CleanupDeadServers controls whether to remove dead servers from the Raft + // peer list when a new server joins + CleanupDeadServers bool + + // LastContactThreshold is the limit on the amount of time a server can go + // without leader contact before being considered unhealthy. + LastContactThreshold *ReadableDuration + + // MaxTrailingLogs is the amount of entries in the Raft Log that a server can + // be behind before being considered unhealthy. + MaxTrailingLogs uint64 + + // ServerStabilizationTime is the minimum amount of time a server must be + // in a stable, healthy state before it can be added to the cluster. Only + // applicable with Raft protocol version 3 or higher. + ServerStabilizationTime *ReadableDuration + + // (Enterprise-only) RedundancyZoneTag is the node tag to use for separating + // servers into zones for redundancy. If left blank, this feature will be disabled. + RedundancyZoneTag string + + // (Enterprise-only) DisableUpgradeMigration will disable Autopilot's upgrade migration + // strategy of waiting until enough newer-versioned servers have been added to the + // cluster before promoting them to voters. + DisableUpgradeMigration bool + + // (Enterprise-only) UpgradeVersionTag is the node tag to use for version info when + // performing upgrade migrations. If left blank, the Consul version will be used. + UpgradeVersionTag string + + // CreateIndex holds the index corresponding the creation of this configuration. + // This is a read-only field. + CreateIndex uint64 + + // ModifyIndex will be set to the index of the last update when retrieving the + // Autopilot configuration. Resubmitting a configuration with + // AutopilotCASConfiguration will perform a check-and-set operation which ensures + // there hasn't been a subsequent update since the configuration was retrieved. + ModifyIndex uint64 +} + +// ServerHealth is the health (from the leader's point of view) of a server. +type ServerHealth struct { + // ID is the raft ID of the server. + ID string + + // Name is the node name of the server. + Name string + + // Address is the address of the server. + Address string + + // The status of the SerfHealth check for the server. + SerfStatus string + + // Version is the Consul version of the server. + Version string + + // Leader is whether this server is currently the leader. + Leader bool + + // LastContact is the time since this node's last contact with the leader. + LastContact *ReadableDuration + + // LastTerm is the highest leader term this server has a record of in its Raft log. + LastTerm uint64 + + // LastIndex is the last log index this server has a record of in its Raft log. + LastIndex uint64 + + // Healthy is whether or not the server is healthy according to the current + // Autopilot config. + Healthy bool + + // Voter is whether this is a voting server. + Voter bool + + // StableSince is the last time this server's Healthy value changed. + StableSince time.Time +} + +// OperatorHealthReply is a representation of the overall health of the cluster +type OperatorHealthReply struct { + // Healthy is true if all the servers in the cluster are healthy. + Healthy bool + + // FailureTolerance is the number of healthy servers that could be lost without + // an outage occurring. + FailureTolerance int + + // Servers holds the health of each server. + Servers []ServerHealth +} + +// ReadableDuration is a duration type that is serialized to JSON in human readable format. +type ReadableDuration time.Duration + +func NewReadableDuration(dur time.Duration) *ReadableDuration { + d := ReadableDuration(dur) + return &d +} + +func (d *ReadableDuration) String() string { + return d.Duration().String() +} + +func (d *ReadableDuration) Duration() time.Duration { + if d == nil { + return time.Duration(0) + } + return time.Duration(*d) +} + +func (d *ReadableDuration) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%s"`, d.Duration().String())), nil +} + +func (d *ReadableDuration) UnmarshalJSON(raw []byte) error { + if d == nil { + return fmt.Errorf("cannot unmarshal to nil pointer") + } + + str := string(raw) + if len(str) < 2 || str[0] != '"' || str[len(str)-1] != '"' { + return fmt.Errorf("must be enclosed with quotes: %s", str) + } + dur, err := time.ParseDuration(str[1 : len(str)-1]) + if err != nil { + return err + } + *d = ReadableDuration(dur) + return nil +} + +// AutopilotGetConfiguration is used to query the current Autopilot configuration. +func (op *Operator) AutopilotGetConfiguration(q *QueryOptions) (*AutopilotConfiguration, error) { + r := op.c.newRequest("GET", "/v1/operator/autopilot/configuration") + r.setQueryOptions(q) + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out AutopilotConfiguration + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + + return &out, nil +} + +// AutopilotSetConfiguration is used to set the current Autopilot configuration. +func (op *Operator) AutopilotSetConfiguration(conf *AutopilotConfiguration, q *WriteOptions) error { + r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration") + r.setWriteOptions(q) + r.obj = conf + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// AutopilotCASConfiguration is used to perform a Check-And-Set update on the +// Autopilot configuration. The ModifyIndex value will be respected. Returns +// true on success or false on failures. +func (op *Operator) AutopilotCASConfiguration(conf *AutopilotConfiguration, q *WriteOptions) (bool, error) { + r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration") + r.setWriteOptions(q) + r.params.Set("cas", strconv.FormatUint(conf.ModifyIndex, 10)) + r.obj = conf + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return false, err + } + defer resp.Body.Close() + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, fmt.Errorf("Failed to read response: %v", err) + } + res := strings.Contains(buf.String(), "true") + + return res, nil +} + +// AutopilotServerHealth +func (op *Operator) AutopilotServerHealth(q *QueryOptions) (*OperatorHealthReply, error) { + r := op.c.newRequest("GET", "/v1/operator/autopilot/health") + r.setQueryOptions(q) + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out OperatorHealthReply + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return &out, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_keyring.go b/vendor/github.com/hashicorp/consul/api/operator_keyring.go new file mode 100644 index 0000000000..6b614296ce --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_keyring.go @@ -0,0 +1,86 @@ +package api + +// keyringRequest is used for performing Keyring operations +type keyringRequest struct { + Key string +} + +// KeyringResponse is returned when listing the gossip encryption keys +type KeyringResponse struct { + // Whether this response is for a WAN ring + WAN bool + + // The datacenter name this request corresponds to + Datacenter string + + // Segment has the network segment this request corresponds to. + Segment string + + // A map of the encryption keys to the number of nodes they're installed on + Keys map[string]int + + // The total number of nodes in this ring + NumNodes int +} + +// KeyringInstall is used to install a new gossip encryption key into the cluster +func (op *Operator) KeyringInstall(key string, q *WriteOptions) error { + r := op.c.newRequest("POST", "/v1/operator/keyring") + r.setWriteOptions(q) + r.obj = keyringRequest{ + Key: key, + } + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// KeyringList is used to list the gossip keys installed in the cluster +func (op *Operator) KeyringList(q *QueryOptions) ([]*KeyringResponse, error) { + r := op.c.newRequest("GET", "/v1/operator/keyring") + r.setQueryOptions(q) + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []*KeyringResponse + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// KeyringRemove is used to remove a gossip encryption key from the cluster +func (op *Operator) KeyringRemove(key string, q *WriteOptions) error { + r := op.c.newRequest("DELETE", "/v1/operator/keyring") + r.setWriteOptions(q) + r.obj = keyringRequest{ + Key: key, + } + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// KeyringUse is used to change the active gossip encryption key +func (op *Operator) KeyringUse(key string, q *WriteOptions) error { + r := op.c.newRequest("PUT", "/v1/operator/keyring") + r.setWriteOptions(q) + r.obj = keyringRequest{ + Key: key, + } + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_raft.go b/vendor/github.com/hashicorp/consul/api/operator_raft.go new file mode 100644 index 0000000000..a9844df2dd --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_raft.go @@ -0,0 +1,89 @@ +package api + +// RaftServer has information about a server in the Raft configuration. +type RaftServer struct { + // ID is the unique ID for the server. These are currently the same + // as the address, but they will be changed to a real GUID in a future + // release of Consul. + ID string + + // Node is the node name of the server, as known by Consul, or this + // will be set to "(unknown)" otherwise. + Node string + + // Address is the IP:port of the server, used for Raft communications. + Address string + + // Leader is true if this server is the current cluster leader. + Leader bool + + // Protocol version is the raft protocol version used by the server + ProtocolVersion string + + // Voter is true if this server has a vote in the cluster. This might + // be false if the server is staging and still coming online, or if + // it's a non-voting server, which will be added in a future release of + // Consul. + Voter bool +} + +// RaftConfiguration is returned when querying for the current Raft configuration. +type RaftConfiguration struct { + // Servers has the list of servers in the Raft configuration. + Servers []*RaftServer + + // Index has the Raft index of this configuration. + Index uint64 +} + +// RaftGetConfiguration is used to query the current Raft peer set. +func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, error) { + r := op.c.newRequest("GET", "/v1/operator/raft/configuration") + r.setQueryOptions(q) + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out RaftConfiguration + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return &out, nil +} + +// RaftRemovePeerByAddress is used to kick a stale peer (one that it in the Raft +// quorum but no longer known to Serf or the catalog) by address in the form of +// "IP:port". +func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) error { + r := op.c.newRequest("DELETE", "/v1/operator/raft/peer") + r.setWriteOptions(q) + + r.params.Set("address", string(address)) + + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + + resp.Body.Close() + return nil +} + +// RaftRemovePeerByID is used to kick a stale peer (one that it in the Raft +// quorum but no longer known to Serf or the catalog) by ID. +func (op *Operator) RaftRemovePeerByID(id string, q *WriteOptions) error { + r := op.c.newRequest("DELETE", "/v1/operator/raft/peer") + r.setWriteOptions(q) + + r.params.Set("id", string(id)) + + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + + resp.Body.Close() + return nil +} diff --git a/vendor/github.com/hashicorp/consul/api/operator_segment.go b/vendor/github.com/hashicorp/consul/api/operator_segment.go new file mode 100644 index 0000000000..92b05d3c03 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/operator_segment.go @@ -0,0 +1,11 @@ +package api + +// SegmentList returns all the available LAN segments. +func (op *Operator) SegmentList(q *QueryOptions) ([]string, *QueryMeta, error) { + var out []string + qm, err := op.c.query("/v1/operator/segment", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/prepared_query.go b/vendor/github.com/hashicorp/consul/api/prepared_query.go new file mode 100644 index 0000000000..d322dd8679 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/prepared_query.go @@ -0,0 +1,204 @@ +package api + +// QueryDatacenterOptions sets options about how we fail over if there are no +// healthy nodes in the local datacenter. +type QueryDatacenterOptions struct { + // NearestN is set to the number of remote datacenters to try, based on + // network coordinates. + NearestN int + + // Datacenters is a fixed list of datacenters to try after NearestN. We + // never try a datacenter multiple times, so those are subtracted from + // this list before proceeding. + Datacenters []string +} + +// QueryDNSOptions controls settings when query results are served over DNS. +type QueryDNSOptions struct { + // TTL is the time to live for the served DNS results. + TTL string +} + +// ServiceQuery is used to query for a set of healthy nodes offering a specific +// service. +type ServiceQuery struct { + // Service is the service to query. + Service string + + // Near allows baking in the name of a node to automatically distance- + // sort from. The magic "_agent" value is supported, which sorts near + // the agent which initiated the request by default. + Near string + + // Failover controls what we do if there are no healthy nodes in the + // local datacenter. + Failover QueryDatacenterOptions + + // IgnoreCheckIDs is an optional list of health check IDs to ignore when + // considering which nodes are healthy. It is useful as an emergency measure + // to temporarily override some health check that is producing false negatives + // for example. + IgnoreCheckIDs []string + + // If OnlyPassing is true then we will only include nodes with passing + // health checks (critical AND warning checks will cause a node to be + // discarded) + OnlyPassing bool + + // Tags are a set of required and/or disallowed tags. If a tag is in + // this list it must be present. If the tag is preceded with "!" then + // it is disallowed. + Tags []string + + // NodeMeta is a map of required node metadata fields. If a key/value + // pair is in this map it must be present on the node in order for the + // service entry to be returned. + NodeMeta map[string]string +} + +// QueryTemplate carries the arguments for creating a templated query. +type QueryTemplate struct { + // Type specifies the type of the query template. Currently only + // "name_prefix_match" is supported. This field is required. + Type string + + // Regexp allows specifying a regex pattern to match against the name + // of the query being executed. + Regexp string +} + +// PreparedQueryDefinition defines a complete prepared query. +type PreparedQueryDefinition struct { + // ID is this UUID-based ID for the query, always generated by Consul. + ID string + + // Name is an optional friendly name for the query supplied by the + // user. NOTE - if this feature is used then it will reduce the security + // of any read ACL associated with this query/service since this name + // can be used to locate nodes with supplying any ACL. + Name string + + // Session is an optional session to tie this query's lifetime to. If + // this is omitted then the query will not expire. + Session string + + // Token is the ACL token used when the query was created, and it is + // used when a query is subsequently executed. This token, or a token + // with management privileges, must be used to change the query later. + Token string + + // Service defines a service query (leaving things open for other types + // later). + Service ServiceQuery + + // DNS has options that control how the results of this query are + // served over DNS. + DNS QueryDNSOptions + + // Template is used to pass through the arguments for creating a + // prepared query with an attached template. If a template is given, + // interpolations are possible in other struct fields. + Template QueryTemplate +} + +// PreparedQueryExecuteResponse has the results of executing a query. +type PreparedQueryExecuteResponse struct { + // Service is the service that was queried. + Service string + + // Nodes has the nodes that were output by the query. + Nodes []ServiceEntry + + // DNS has the options for serving these results over DNS. + DNS QueryDNSOptions + + // Datacenter is the datacenter that these results came from. + Datacenter string + + // Failovers is a count of how many times we had to query a remote + // datacenter. + Failovers int +} + +// PreparedQuery can be used to query the prepared query endpoints. +type PreparedQuery struct { + c *Client +} + +// PreparedQuery returns a handle to the prepared query endpoints. +func (c *Client) PreparedQuery() *PreparedQuery { + return &PreparedQuery{c} +} + +// Create makes a new prepared query. The ID of the new query is returned. +func (c *PreparedQuery) Create(query *PreparedQueryDefinition, q *WriteOptions) (string, *WriteMeta, error) { + r := c.c.newRequest("POST", "/v1/query") + r.setWriteOptions(q) + r.obj = query + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return "", nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + + var out struct{ ID string } + if err := decodeBody(resp, &out); err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Update makes updates to an existing prepared query. +func (c *PreparedQuery) Update(query *PreparedQueryDefinition, q *WriteOptions) (*WriteMeta, error) { + return c.c.write("/v1/query/"+query.ID, query, nil, q) +} + +// List is used to fetch all the prepared queries (always requires a management +// token). +func (c *PreparedQuery) List(q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) { + var out []*PreparedQueryDefinition + qm, err := c.c.query("/v1/query", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Get is used to fetch a specific prepared query. +func (c *PreparedQuery) Get(queryID string, q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) { + var out []*PreparedQueryDefinition + qm, err := c.c.query("/v1/query/"+queryID, &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} + +// Delete is used to delete a specific prepared query. +func (c *PreparedQuery) Delete(queryID string, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("DELETE", "/v1/query/"+queryID) + r.setWriteOptions(q) + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + return wm, nil +} + +// Execute is used to execute a specific prepared query. You can execute using +// a query ID or name. +func (c *PreparedQuery) Execute(queryIDOrName string, q *QueryOptions) (*PreparedQueryExecuteResponse, *QueryMeta, error) { + var out *PreparedQueryExecuteResponse + qm, err := c.c.query("/v1/query/"+queryIDOrName+"/execute", &out, q) + if err != nil { + return nil, nil, err + } + return out, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/raw.go b/vendor/github.com/hashicorp/consul/api/raw.go new file mode 100644 index 0000000000..745a208c99 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/raw.go @@ -0,0 +1,24 @@ +package api + +// Raw can be used to do raw queries against custom endpoints +type Raw struct { + c *Client +} + +// Raw returns a handle to query endpoints +func (c *Client) Raw() *Raw { + return &Raw{c} +} + +// Query is used to do a GET request against an endpoint +// and deserialize the response into an interface using +// standard Consul conventions. +func (raw *Raw) Query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { + return raw.c.query(endpoint, out, q) +} + +// Write is used to do a PUT request against an endpoint +// and serialize/deserialized using the standard Consul conventions. +func (raw *Raw) Write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { + return raw.c.write(endpoint, in, out, q) +} diff --git a/vendor/github.com/hashicorp/consul/api/semaphore.go b/vendor/github.com/hashicorp/consul/api/semaphore.go new file mode 100644 index 0000000000..d0c5741778 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/semaphore.go @@ -0,0 +1,513 @@ +package api + +import ( + "encoding/json" + "fmt" + "path" + "sync" + "time" +) + +const ( + // DefaultSemaphoreSessionName is the Session Name we assign if none is provided + DefaultSemaphoreSessionName = "Consul API Semaphore" + + // DefaultSemaphoreSessionTTL is the default session TTL if no Session is provided + // when creating a new Semaphore. This is used because we do not have another + // other check to depend upon. + DefaultSemaphoreSessionTTL = "15s" + + // DefaultSemaphoreWaitTime is how long we block for at a time to check if semaphore + // acquisition is possible. This affects the minimum time it takes to cancel + // a Semaphore acquisition. + DefaultSemaphoreWaitTime = 15 * time.Second + + // DefaultSemaphoreKey is the key used within the prefix to + // use for coordination between all the contenders. + DefaultSemaphoreKey = ".lock" + + // SemaphoreFlagValue is a magic flag we set to indicate a key + // is being used for a semaphore. It is used to detect a potential + // conflict with a lock. + SemaphoreFlagValue = 0xe0f69a2baa414de0 +) + +var ( + // ErrSemaphoreHeld is returned if we attempt to double lock + ErrSemaphoreHeld = fmt.Errorf("Semaphore already held") + + // ErrSemaphoreNotHeld is returned if we attempt to unlock a semaphore + // that we do not hold. + ErrSemaphoreNotHeld = fmt.Errorf("Semaphore not held") + + // ErrSemaphoreInUse is returned if we attempt to destroy a semaphore + // that is in use. + ErrSemaphoreInUse = fmt.Errorf("Semaphore in use") + + // ErrSemaphoreConflict is returned if the flags on a key + // used for a semaphore do not match expectation + ErrSemaphoreConflict = fmt.Errorf("Existing key does not match semaphore use") +) + +// Semaphore is used to implement a distributed semaphore +// using the Consul KV primitives. +type Semaphore struct { + c *Client + opts *SemaphoreOptions + + isHeld bool + sessionRenew chan struct{} + lockSession string + l sync.Mutex +} + +// SemaphoreOptions is used to parameterize the Semaphore +type SemaphoreOptions struct { + Prefix string // Must be set and have write permissions + Limit int // Must be set, and be positive + Value []byte // Optional, value to associate with the contender entry + Session string // Optional, created if not specified + SessionName string // Optional, defaults to DefaultLockSessionName + SessionTTL string // Optional, defaults to DefaultLockSessionTTL + MonitorRetries int // Optional, defaults to 0 which means no retries + MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime + SemaphoreWaitTime time.Duration // Optional, defaults to DefaultSemaphoreWaitTime + SemaphoreTryOnce bool // Optional, defaults to false which means try forever +} + +// semaphoreLock is written under the DefaultSemaphoreKey and +// is used to coordinate between all the contenders. +type semaphoreLock struct { + // Limit is the integer limit of holders. This is used to + // verify that all the holders agree on the value. + Limit int + + // Holders is a list of all the semaphore holders. + // It maps the session ID to true. It is used as a set effectively. + Holders map[string]bool +} + +// SemaphorePrefix is used to created a Semaphore which will operate +// at the given KV prefix and uses the given limit for the semaphore. +// The prefix must have write privileges, and the limit must be agreed +// upon by all contenders. +func (c *Client) SemaphorePrefix(prefix string, limit int) (*Semaphore, error) { + opts := &SemaphoreOptions{ + Prefix: prefix, + Limit: limit, + } + return c.SemaphoreOpts(opts) +} + +// SemaphoreOpts is used to create a Semaphore with the given options. +// The prefix must have write privileges, and the limit must be agreed +// upon by all contenders. If a Session is not provided, one will be created. +func (c *Client) SemaphoreOpts(opts *SemaphoreOptions) (*Semaphore, error) { + if opts.Prefix == "" { + return nil, fmt.Errorf("missing prefix") + } + if opts.Limit <= 0 { + return nil, fmt.Errorf("semaphore limit must be positive") + } + if opts.SessionName == "" { + opts.SessionName = DefaultSemaphoreSessionName + } + if opts.SessionTTL == "" { + opts.SessionTTL = DefaultSemaphoreSessionTTL + } else { + if _, err := time.ParseDuration(opts.SessionTTL); err != nil { + return nil, fmt.Errorf("invalid SessionTTL: %v", err) + } + } + if opts.MonitorRetryTime == 0 { + opts.MonitorRetryTime = DefaultMonitorRetryTime + } + if opts.SemaphoreWaitTime == 0 { + opts.SemaphoreWaitTime = DefaultSemaphoreWaitTime + } + s := &Semaphore{ + c: c, + opts: opts, + } + return s, nil +} + +// Acquire attempts to reserve a slot in the semaphore, blocking until +// success, interrupted via the stopCh or an error is encountered. +// Providing a non-nil stopCh can be used to abort the attempt. +// On success, a channel is returned that represents our slot. +// This channel could be closed at any time due to session invalidation, +// communication errors, operator intervention, etc. It is NOT safe to +// assume that the slot is held until Release() unless the Session is specifically +// created without any associated health checks. By default Consul sessions +// prefer liveness over safety and an application must be able to handle +// the session being lost. +func (s *Semaphore) Acquire(stopCh <-chan struct{}) (<-chan struct{}, error) { + // Hold the lock as we try to acquire + s.l.Lock() + defer s.l.Unlock() + + // Check if we already hold the semaphore + if s.isHeld { + return nil, ErrSemaphoreHeld + } + + // Check if we need to create a session first + s.lockSession = s.opts.Session + if s.lockSession == "" { + sess, err := s.createSession() + if err != nil { + return nil, fmt.Errorf("failed to create session: %v", err) + } + + s.sessionRenew = make(chan struct{}) + s.lockSession = sess + session := s.c.Session() + go session.RenewPeriodic(s.opts.SessionTTL, sess, nil, s.sessionRenew) + + // If we fail to acquire the lock, cleanup the session + defer func() { + if !s.isHeld { + close(s.sessionRenew) + s.sessionRenew = nil + } + }() + } + + // Create the contender entry + kv := s.c.KV() + made, _, err := kv.Acquire(s.contenderEntry(s.lockSession), nil) + if err != nil || !made { + return nil, fmt.Errorf("failed to make contender entry: %v", err) + } + + // Setup the query options + qOpts := &QueryOptions{ + WaitTime: s.opts.SemaphoreWaitTime, + } + + start := time.Now() + attempts := 0 +WAIT: + // Check if we should quit + select { + case <-stopCh: + return nil, nil + default: + } + + // Handle the one-shot mode. + if s.opts.SemaphoreTryOnce && attempts > 0 { + elapsed := time.Since(start) + if elapsed > qOpts.WaitTime { + return nil, nil + } + + qOpts.WaitTime -= elapsed + } + attempts++ + + // Read the prefix + pairs, meta, err := kv.List(s.opts.Prefix, qOpts) + if err != nil { + return nil, fmt.Errorf("failed to read prefix: %v", err) + } + + // Decode the lock + lockPair := s.findLock(pairs) + if lockPair.Flags != SemaphoreFlagValue { + return nil, ErrSemaphoreConflict + } + lock, err := s.decodeLock(lockPair) + if err != nil { + return nil, err + } + + // Verify we agree with the limit + if lock.Limit != s.opts.Limit { + return nil, fmt.Errorf("semaphore limit conflict (lock: %d, local: %d)", + lock.Limit, s.opts.Limit) + } + + // Prune the dead holders + s.pruneDeadHolders(lock, pairs) + + // Check if the lock is held + if len(lock.Holders) >= lock.Limit { + qOpts.WaitIndex = meta.LastIndex + goto WAIT + } + + // Create a new lock with us as a holder + lock.Holders[s.lockSession] = true + newLock, err := s.encodeLock(lock, lockPair.ModifyIndex) + if err != nil { + return nil, err + } + + // Attempt the acquisition + didSet, _, err := kv.CAS(newLock, nil) + if err != nil { + return nil, fmt.Errorf("failed to update lock: %v", err) + } + if !didSet { + // Update failed, could have been a race with another contender, + // retry the operation + goto WAIT + } + + // Watch to ensure we maintain ownership of the slot + lockCh := make(chan struct{}) + go s.monitorLock(s.lockSession, lockCh) + + // Set that we own the lock + s.isHeld = true + + // Acquired! All done + return lockCh, nil +} + +// Release is used to voluntarily give up our semaphore slot. It is +// an error to call this if the semaphore has not been acquired. +func (s *Semaphore) Release() error { + // Hold the lock as we try to release + s.l.Lock() + defer s.l.Unlock() + + // Ensure the lock is actually held + if !s.isHeld { + return ErrSemaphoreNotHeld + } + + // Set that we no longer own the lock + s.isHeld = false + + // Stop the session renew + if s.sessionRenew != nil { + defer func() { + close(s.sessionRenew) + s.sessionRenew = nil + }() + } + + // Get and clear the lock session + lockSession := s.lockSession + s.lockSession = "" + + // Remove ourselves as a lock holder + kv := s.c.KV() + key := path.Join(s.opts.Prefix, DefaultSemaphoreKey) +READ: + pair, _, err := kv.Get(key, nil) + if err != nil { + return err + } + if pair == nil { + pair = &KVPair{} + } + lock, err := s.decodeLock(pair) + if err != nil { + return err + } + + // Create a new lock without us as a holder + if _, ok := lock.Holders[lockSession]; ok { + delete(lock.Holders, lockSession) + newLock, err := s.encodeLock(lock, pair.ModifyIndex) + if err != nil { + return err + } + + // Swap the locks + didSet, _, err := kv.CAS(newLock, nil) + if err != nil { + return fmt.Errorf("failed to update lock: %v", err) + } + if !didSet { + goto READ + } + } + + // Destroy the contender entry + contenderKey := path.Join(s.opts.Prefix, lockSession) + if _, err := kv.Delete(contenderKey, nil); err != nil { + return err + } + return nil +} + +// Destroy is used to cleanup the semaphore entry. It is not necessary +// to invoke. It will fail if the semaphore is in use. +func (s *Semaphore) Destroy() error { + // Hold the lock as we try to acquire + s.l.Lock() + defer s.l.Unlock() + + // Check if we already hold the semaphore + if s.isHeld { + return ErrSemaphoreHeld + } + + // List for the semaphore + kv := s.c.KV() + pairs, _, err := kv.List(s.opts.Prefix, nil) + if err != nil { + return fmt.Errorf("failed to read prefix: %v", err) + } + + // Find the lock pair, bail if it doesn't exist + lockPair := s.findLock(pairs) + if lockPair.ModifyIndex == 0 { + return nil + } + if lockPair.Flags != SemaphoreFlagValue { + return ErrSemaphoreConflict + } + + // Decode the lock + lock, err := s.decodeLock(lockPair) + if err != nil { + return err + } + + // Prune the dead holders + s.pruneDeadHolders(lock, pairs) + + // Check if there are any holders + if len(lock.Holders) > 0 { + return ErrSemaphoreInUse + } + + // Attempt the delete + didRemove, _, err := kv.DeleteCAS(lockPair, nil) + if err != nil { + return fmt.Errorf("failed to remove semaphore: %v", err) + } + if !didRemove { + return ErrSemaphoreInUse + } + return nil +} + +// createSession is used to create a new managed session +func (s *Semaphore) createSession() (string, error) { + session := s.c.Session() + se := &SessionEntry{ + Name: s.opts.SessionName, + TTL: s.opts.SessionTTL, + Behavior: SessionBehaviorDelete, + } + id, _, err := session.Create(se, nil) + if err != nil { + return "", err + } + return id, nil +} + +// contenderEntry returns a formatted KVPair for the contender +func (s *Semaphore) contenderEntry(session string) *KVPair { + return &KVPair{ + Key: path.Join(s.opts.Prefix, session), + Value: s.opts.Value, + Session: session, + Flags: SemaphoreFlagValue, + } +} + +// findLock is used to find the KV Pair which is used for coordination +func (s *Semaphore) findLock(pairs KVPairs) *KVPair { + key := path.Join(s.opts.Prefix, DefaultSemaphoreKey) + for _, pair := range pairs { + if pair.Key == key { + return pair + } + } + return &KVPair{Flags: SemaphoreFlagValue} +} + +// decodeLock is used to decode a semaphoreLock from an +// entry in Consul +func (s *Semaphore) decodeLock(pair *KVPair) (*semaphoreLock, error) { + // Handle if there is no lock + if pair == nil || pair.Value == nil { + return &semaphoreLock{ + Limit: s.opts.Limit, + Holders: make(map[string]bool), + }, nil + } + + l := &semaphoreLock{} + if err := json.Unmarshal(pair.Value, l); err != nil { + return nil, fmt.Errorf("lock decoding failed: %v", err) + } + return l, nil +} + +// encodeLock is used to encode a semaphoreLock into a KVPair +// that can be PUT +func (s *Semaphore) encodeLock(l *semaphoreLock, oldIndex uint64) (*KVPair, error) { + enc, err := json.Marshal(l) + if err != nil { + return nil, fmt.Errorf("lock encoding failed: %v", err) + } + pair := &KVPair{ + Key: path.Join(s.opts.Prefix, DefaultSemaphoreKey), + Value: enc, + Flags: SemaphoreFlagValue, + ModifyIndex: oldIndex, + } + return pair, nil +} + +// pruneDeadHolders is used to remove all the dead lock holders +func (s *Semaphore) pruneDeadHolders(lock *semaphoreLock, pairs KVPairs) { + // Gather all the live holders + alive := make(map[string]struct{}, len(pairs)) + for _, pair := range pairs { + if pair.Session != "" { + alive[pair.Session] = struct{}{} + } + } + + // Remove any holders that are dead + for holder := range lock.Holders { + if _, ok := alive[holder]; !ok { + delete(lock.Holders, holder) + } + } +} + +// monitorLock is a long running routine to monitor a semaphore ownership +// It closes the stopCh if we lose our slot. +func (s *Semaphore) monitorLock(session string, stopCh chan struct{}) { + defer close(stopCh) + kv := s.c.KV() + opts := &QueryOptions{RequireConsistent: true} +WAIT: + retries := s.opts.MonitorRetries +RETRY: + pairs, meta, err := kv.List(s.opts.Prefix, opts) + if err != nil { + // If configured we can try to ride out a brief Consul unavailability + // by doing retries. Note that we have to attempt the retry in a non- + // blocking fashion so that we have a clean place to reset the retry + // counter if service is restored. + if retries > 0 && IsRetryableError(err) { + time.Sleep(s.opts.MonitorRetryTime) + retries-- + opts.WaitIndex = 0 + goto RETRY + } + return + } + lockPair := s.findLock(pairs) + lock, err := s.decodeLock(lockPair) + if err != nil { + return + } + s.pruneDeadHolders(lock, pairs) + if _, ok := lock.Holders[session]; ok { + opts.WaitIndex = meta.LastIndex + goto WAIT + } +} diff --git a/vendor/github.com/hashicorp/consul/api/session.go b/vendor/github.com/hashicorp/consul/api/session.go new file mode 100644 index 0000000000..1613f11a60 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/session.go @@ -0,0 +1,224 @@ +package api + +import ( + "errors" + "fmt" + "time" +) + +const ( + // SessionBehaviorRelease is the default behavior and causes + // all associated locks to be released on session invalidation. + SessionBehaviorRelease = "release" + + // SessionBehaviorDelete is new in Consul 0.5 and changes the + // behavior to delete all associated locks on session invalidation. + // It can be used in a way similar to Ephemeral Nodes in ZooKeeper. + SessionBehaviorDelete = "delete" +) + +var ErrSessionExpired = errors.New("session expired") + +// SessionEntry represents a session in consul +type SessionEntry struct { + CreateIndex uint64 + ID string + Name string + Node string + Checks []string + LockDelay time.Duration + Behavior string + TTL string +} + +// Session can be used to query the Session endpoints +type Session struct { + c *Client +} + +// Session returns a handle to the session endpoints +func (c *Client) Session() *Session { + return &Session{c} +} + +// CreateNoChecks is like Create but is used specifically to create +// a session with no associated health checks. +func (s *Session) CreateNoChecks(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { + body := make(map[string]interface{}) + body["Checks"] = []string{} + if se != nil { + if se.Name != "" { + body["Name"] = se.Name + } + if se.Node != "" { + body["Node"] = se.Node + } + if se.LockDelay != 0 { + body["LockDelay"] = durToMsec(se.LockDelay) + } + if se.Behavior != "" { + body["Behavior"] = se.Behavior + } + if se.TTL != "" { + body["TTL"] = se.TTL + } + } + return s.create(body, q) + +} + +// Create makes a new session. Providing a session entry can +// customize the session. It can also be nil to use defaults. +func (s *Session) Create(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { + var obj interface{} + if se != nil { + body := make(map[string]interface{}) + obj = body + if se.Name != "" { + body["Name"] = se.Name + } + if se.Node != "" { + body["Node"] = se.Node + } + if se.LockDelay != 0 { + body["LockDelay"] = durToMsec(se.LockDelay) + } + if len(se.Checks) > 0 { + body["Checks"] = se.Checks + } + if se.Behavior != "" { + body["Behavior"] = se.Behavior + } + if se.TTL != "" { + body["TTL"] = se.TTL + } + } + return s.create(obj, q) +} + +func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta, error) { + var out struct{ ID string } + wm, err := s.c.write("/v1/session/create", obj, &out, q) + if err != nil { + return "", nil, err + } + return out.ID, wm, nil +} + +// Destroy invalidates a given session +func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { + wm, err := s.c.write("/v1/session/destroy/"+id, nil, nil, q) + if err != nil { + return nil, err + } + return wm, nil +} + +// Renew renews the TTL on a given session +func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, error) { + r := s.c.newRequest("PUT", "/v1/session/renew/"+id) + r.setWriteOptions(q) + rtt, resp, err := s.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{RequestTime: rtt} + + if resp.StatusCode == 404 { + return nil, wm, nil + } else if resp.StatusCode != 200 { + return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode) + } + + var entries []*SessionEntry + if err := decodeBody(resp, &entries); err != nil { + return nil, nil, fmt.Errorf("Failed to read response: %v", err) + } + if len(entries) > 0 { + return entries[0], wm, nil + } + return nil, wm, nil +} + +// RenewPeriodic is used to periodically invoke Session.Renew on a +// session until a doneCh is closed. This is meant to be used in a long running +// goroutine to ensure a session stays valid. +func (s *Session) RenewPeriodic(initialTTL string, id string, q *WriteOptions, doneCh <-chan struct{}) error { + ctx := q.Context() + + ttl, err := time.ParseDuration(initialTTL) + if err != nil { + return err + } + + waitDur := ttl / 2 + lastRenewTime := time.Now() + var lastErr error + for { + if time.Since(lastRenewTime) > ttl { + return lastErr + } + select { + case <-time.After(waitDur): + entry, _, err := s.Renew(id, q) + if err != nil { + waitDur = time.Second + lastErr = err + continue + } + if entry == nil { + return ErrSessionExpired + } + + // Handle the server updating the TTL + ttl, _ = time.ParseDuration(entry.TTL) + waitDur = ttl / 2 + lastRenewTime = time.Now() + + case <-doneCh: + // Attempt a session destroy + s.Destroy(id, q) + return nil + + case <-ctx.Done(): + // Bail immediately since attempting the destroy would + // use the canceled context in q, which would just bail. + return ctx.Err() + } + } +} + +// Info looks up a single session +func (s *Session) Info(id string, q *QueryOptions) (*SessionEntry, *QueryMeta, error) { + var entries []*SessionEntry + qm, err := s.c.query("/v1/session/info/"+id, &entries, q) + if err != nil { + return nil, nil, err + } + if len(entries) > 0 { + return entries[0], qm, nil + } + return nil, qm, nil +} + +// List gets sessions for a node +func (s *Session) Node(node string, q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { + var entries []*SessionEntry + qm, err := s.c.query("/v1/session/node/"+node, &entries, q) + if err != nil { + return nil, nil, err + } + return entries, qm, nil +} + +// List gets all active sessions +func (s *Session) List(q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { + var entries []*SessionEntry + qm, err := s.c.query("/v1/session/list", &entries, q) + if err != nil { + return nil, nil, err + } + return entries, qm, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/snapshot.go b/vendor/github.com/hashicorp/consul/api/snapshot.go new file mode 100644 index 0000000000..e902377dd5 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/snapshot.go @@ -0,0 +1,47 @@ +package api + +import ( + "io" +) + +// Snapshot can be used to query the /v1/snapshot endpoint to take snapshots of +// Consul's internal state and restore snapshots for disaster recovery. +type Snapshot struct { + c *Client +} + +// Snapshot returns a handle that exposes the snapshot endpoints. +func (c *Client) Snapshot() *Snapshot { + return &Snapshot{c} +} + +// Save requests a new snapshot and provides an io.ReadCloser with the snapshot +// data to save. If this doesn't return an error, then it's the responsibility +// of the caller to close it. Only a subset of the QueryOptions are supported: +// Datacenter, AllowStale, and Token. +func (s *Snapshot) Save(q *QueryOptions) (io.ReadCloser, *QueryMeta, error) { + r := s.c.newRequest("GET", "/v1/snapshot") + r.setQueryOptions(q) + + rtt, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + return resp.Body, qm, nil +} + +// Restore streams in an existing snapshot and attempts to restore it. +func (s *Snapshot) Restore(q *WriteOptions, in io.Reader) error { + r := s.c.newRequest("PUT", "/v1/snapshot") + r.body = in + r.setWriteOptions(q) + _, _, err := requireOK(s.c.doRequest(r)) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/hashicorp/consul/api/status.go b/vendor/github.com/hashicorp/consul/api/status.go new file mode 100644 index 0000000000..74ef61a678 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/status.go @@ -0,0 +1,43 @@ +package api + +// Status can be used to query the Status endpoints +type Status struct { + c *Client +} + +// Status returns a handle to the status endpoints +func (c *Client) Status() *Status { + return &Status{c} +} + +// Leader is used to query for a known leader +func (s *Status) Leader() (string, error) { + r := s.c.newRequest("GET", "/v1/status/leader") + _, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return "", err + } + defer resp.Body.Close() + + var leader string + if err := decodeBody(resp, &leader); err != nil { + return "", err + } + return leader, nil +} + +// Peers is used to query for a known raft peers +func (s *Status) Peers() ([]string, error) { + r := s.c.newRequest("GET", "/v1/status/peers") + _, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var peers []string + if err := decodeBody(resp, &peers); err != nil { + return nil, err + } + return peers, nil +} diff --git a/vendor/github.com/hashicorp/consul/website/LICENSE.md b/vendor/github.com/hashicorp/consul/website/LICENSE.md new file mode 100644 index 0000000000..3189f43a65 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/website/LICENSE.md @@ -0,0 +1,10 @@ +# Proprietary License + +This license is temporary while a more official one is drafted. However, +this should make it clear: + +The text contents of this website are MPL 2.0 licensed. + +The design contents of this website are proprietary and may not be reproduced +or reused in any way other than to run the website locally. The license for +the design is owned solely by HashiCorp, Inc. diff --git a/vendor/github.com/hashicorp/errwrap/LICENSE b/vendor/github.com/hashicorp/errwrap/LICENSE new file mode 100644 index 0000000000..c33dcc7c92 --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/errwrap/errwrap.go b/vendor/github.com/hashicorp/errwrap/errwrap.go new file mode 100644 index 0000000000..a733bef18c --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/errwrap.go @@ -0,0 +1,169 @@ +// Package errwrap implements methods to formalize error wrapping in Go. +// +// All of the top-level functions that take an `error` are built to be able +// to take any error, not just wrapped errors. This allows you to use errwrap +// without having to type-check and type-cast everywhere. +package errwrap + +import ( + "errors" + "reflect" + "strings" +) + +// WalkFunc is the callback called for Walk. +type WalkFunc func(error) + +// Wrapper is an interface that can be implemented by custom types to +// have all the Contains, Get, etc. functions in errwrap work. +// +// When Walk reaches a Wrapper, it will call the callback for every +// wrapped error in addition to the wrapper itself. Since all the top-level +// functions in errwrap use Walk, this means that all those functions work +// with your custom type. +type Wrapper interface { + WrappedErrors() []error +} + +// Wrap defines that outer wraps inner, returning an error type that +// can be cleanly used with the other methods in this package, such as +// Contains, GetAll, etc. +// +// This function won't modify the error message at all (the outer message +// will be used). +func Wrap(outer, inner error) error { + return &wrappedError{ + Outer: outer, + Inner: inner, + } +} + +// Wrapf wraps an error with a formatting message. This is similar to using +// `fmt.Errorf` to wrap an error. If you're using `fmt.Errorf` to wrap +// errors, you should replace it with this. +// +// format is the format of the error message. The string '{{err}}' will +// be replaced with the original error message. +func Wrapf(format string, err error) error { + outerMsg := "" + if err != nil { + outerMsg = err.Error() + } + + outer := errors.New(strings.Replace( + format, "{{err}}", outerMsg, -1)) + + return Wrap(outer, err) +} + +// Contains checks if the given error contains an error with the +// message msg. If err is not a wrapped error, this will always return +// false unless the error itself happens to match this msg. +func Contains(err error, msg string) bool { + return len(GetAll(err, msg)) > 0 +} + +// ContainsType checks if the given error contains an error with +// the same concrete type as v. If err is not a wrapped error, this will +// check the err itself. +func ContainsType(err error, v interface{}) bool { + return len(GetAllType(err, v)) > 0 +} + +// Get is the same as GetAll but returns the deepest matching error. +func Get(err error, msg string) error { + es := GetAll(err, msg) + if len(es) > 0 { + return es[len(es)-1] + } + + return nil +} + +// GetType is the same as GetAllType but returns the deepest matching error. +func GetType(err error, v interface{}) error { + es := GetAllType(err, v) + if len(es) > 0 { + return es[len(es)-1] + } + + return nil +} + +// GetAll gets all the errors that might be wrapped in err with the +// given message. The order of the errors is such that the outermost +// matching error (the most recent wrap) is index zero, and so on. +func GetAll(err error, msg string) []error { + var result []error + + Walk(err, func(err error) { + if err.Error() == msg { + result = append(result, err) + } + }) + + return result +} + +// GetAllType gets all the errors that are the same type as v. +// +// The order of the return value is the same as described in GetAll. +func GetAllType(err error, v interface{}) []error { + var result []error + + var search string + if v != nil { + search = reflect.TypeOf(v).String() + } + Walk(err, func(err error) { + var needle string + if err != nil { + needle = reflect.TypeOf(err).String() + } + + if needle == search { + result = append(result, err) + } + }) + + return result +} + +// Walk walks all the wrapped errors in err and calls the callback. If +// err isn't a wrapped error, this will be called once for err. If err +// is a wrapped error, the callback will be called for both the wrapper +// that implements error as well as the wrapped error itself. +func Walk(err error, cb WalkFunc) { + if err == nil { + return + } + + switch e := err.(type) { + case *wrappedError: + cb(e.Outer) + Walk(e.Inner, cb) + case Wrapper: + cb(err) + + for _, err := range e.WrappedErrors() { + Walk(err, cb) + } + default: + cb(err) + } +} + +// wrappedError is an implementation of error that has both the +// outer and inner errors. +type wrappedError struct { + Outer error + Inner error +} + +func (w *wrappedError) Error() string { + return w.Outer.Error() +} + +func (w *wrappedError) WrappedErrors() []error { + return []error{w.Outer, w.Inner} +} diff --git a/vendor/github.com/hashicorp/go-cleanhttp/LICENSE b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE new file mode 100644 index 0000000000..e87a115e46 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go new file mode 100644 index 0000000000..8d306bf513 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go @@ -0,0 +1,57 @@ +package cleanhttp + +import ( + "net" + "net/http" + "runtime" + "time" +) + +// DefaultTransport returns a new http.Transport with similar default values to +// http.DefaultTransport, but with idle connections and keepalives disabled. +func DefaultTransport() *http.Transport { + transport := DefaultPooledTransport() + transport.DisableKeepAlives = true + transport.MaxIdleConnsPerHost = -1 + return transport +} + +// DefaultPooledTransport returns a new http.Transport with similar default +// values to http.DefaultTransport. Do not use this for transient transports as +// it can leak file descriptors over time. Only use this for transports that +// will be re-used for the same host(s). +func DefaultPooledTransport() *http.Transport { + transport := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + MaxIdleConnsPerHost: runtime.GOMAXPROCS(0) + 1, + } + return transport +} + +// DefaultClient returns a new http.Client with similar default values to +// http.Client, but with a non-shared Transport, idle connections disabled, and +// keepalives disabled. +func DefaultClient() *http.Client { + return &http.Client{ + Transport: DefaultTransport(), + } +} + +// DefaultPooledClient returns a new http.Client with similar default values to +// http.Client, but with a shared Transport. Do not use this function for +// transient clients as it can leak file descriptors over time. Only use this +// for clients that will be re-used for the same host(s). +func DefaultPooledClient() *http.Client { + return &http.Client{ + Transport: DefaultPooledTransport(), + } +} diff --git a/vendor/github.com/hashicorp/go-cleanhttp/doc.go b/vendor/github.com/hashicorp/go-cleanhttp/doc.go new file mode 100644 index 0000000000..05841092a7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/doc.go @@ -0,0 +1,20 @@ +// Package cleanhttp offers convenience utilities for acquiring "clean" +// http.Transport and http.Client structs. +// +// Values set on http.DefaultClient and http.DefaultTransport affect all +// callers. This can have detrimental effects, esepcially in TLS contexts, +// where client or root certificates set to talk to multiple endpoints can end +// up displacing each other, leading to hard-to-debug issues. This package +// provides non-shared http.Client and http.Transport structs to ensure that +// the configuration will not be overwritten by other parts of the application +// or dependencies. +// +// The DefaultClient and DefaultTransport functions disable idle connections +// and keepalives. Without ensuring that idle connections are closed before +// garbage collection, short-term clients/transports can leak file descriptors, +// eventually leading to "too many open files" errors. If you will be +// connecting to the same hosts repeatedly from the same client, you can use +// DefaultPooledClient to receive a client that has connection pooling +// semantics similar to http.DefaultClient. +// +package cleanhttp diff --git a/vendor/github.com/hashicorp/go-cleanhttp/handlers.go b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go new file mode 100644 index 0000000000..7eda3777f3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go @@ -0,0 +1,43 @@ +package cleanhttp + +import ( + "net/http" + "strings" + "unicode" +) + +// HandlerInput provides input options to cleanhttp's handlers +type HandlerInput struct { + ErrStatus int +} + +// PrintablePathCheckHandler is a middleware that ensures the request path +// contains only printable runes. +func PrintablePathCheckHandler(next http.Handler, input *HandlerInput) http.Handler { + // Nil-check on input to make it optional + if input == nil { + input = &HandlerInput{ + ErrStatus: http.StatusBadRequest, + } + } + + // Default to http.StatusBadRequest on error + if input.ErrStatus == 0 { + input.ErrStatus = http.StatusBadRequest + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Check URL path for non-printable characters + idx := strings.IndexFunc(r.URL.Path, func(c rune) bool { + return !unicode.IsPrint(c) + }) + + if idx != -1 { + w.WriteHeader(input.ErrStatus) + return + } + + next.ServeHTTP(w, r) + return + }) +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/LICENSE b/vendor/github.com/hashicorp/go-immutable-radix/LICENSE new file mode 100644 index 0000000000..e87a115e46 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-immutable-radix/edges.go b/vendor/github.com/hashicorp/go-immutable-radix/edges.go new file mode 100644 index 0000000000..a63674775f --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/edges.go @@ -0,0 +1,21 @@ +package iradix + +import "sort" + +type edges []edge + +func (e edges) Len() int { + return len(e) +} + +func (e edges) Less(i, j int) bool { + return e[i].label < e[j].label +} + +func (e edges) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +func (e edges) Sort() { + sort.Sort(e) +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iradix.go b/vendor/github.com/hashicorp/go-immutable-radix/iradix.go new file mode 100644 index 0000000000..e5e6e57f26 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/iradix.go @@ -0,0 +1,662 @@ +package iradix + +import ( + "bytes" + "strings" + + "github.com/hashicorp/golang-lru/simplelru" +) + +const ( + // defaultModifiedCache is the default size of the modified node + // cache used per transaction. This is used to cache the updates + // to the nodes near the root, while the leaves do not need to be + // cached. This is important for very large transactions to prevent + // the modified cache from growing to be enormous. This is also used + // to set the max size of the mutation notify maps since those should + // also be bounded in a similar way. + defaultModifiedCache = 8192 +) + +// Tree implements an immutable radix tree. This can be treated as a +// Dictionary abstract data type. The main advantage over a standard +// hash map is prefix-based lookups and ordered iteration. The immutability +// means that it is safe to concurrently read from a Tree without any +// coordination. +type Tree struct { + root *Node + size int +} + +// New returns an empty Tree +func New() *Tree { + t := &Tree{ + root: &Node{ + mutateCh: make(chan struct{}), + }, + } + return t +} + +// Len is used to return the number of elements in the tree +func (t *Tree) Len() int { + return t.size +} + +// Txn is a transaction on the tree. This transaction is applied +// atomically and returns a new tree when committed. A transaction +// is not thread safe, and should only be used by a single goroutine. +type Txn struct { + // root is the modified root for the transaction. + root *Node + + // snap is a snapshot of the root node for use if we have to run the + // slow notify algorithm. + snap *Node + + // size tracks the size of the tree as it is modified during the + // transaction. + size int + + // writable is a cache of writable nodes that have been created during + // the course of the transaction. This allows us to re-use the same + // nodes for further writes and avoid unnecessary copies of nodes that + // have never been exposed outside the transaction. This will only hold + // up to defaultModifiedCache number of entries. + writable *simplelru.LRU + + // trackChannels is used to hold channels that need to be notified to + // signal mutation of the tree. This will only hold up to + // defaultModifiedCache number of entries, after which we will set the + // trackOverflow flag, which will cause us to use a more expensive + // algorithm to perform the notifications. Mutation tracking is only + // performed if trackMutate is true. + trackChannels map[chan struct{}]struct{} + trackOverflow bool + trackMutate bool +} + +// Txn starts a new transaction that can be used to mutate the tree +func (t *Tree) Txn() *Txn { + txn := &Txn{ + root: t.root, + snap: t.root, + size: t.size, + } + return txn +} + +// TrackMutate can be used to toggle if mutations are tracked. If this is enabled +// then notifications will be issued for affected internal nodes and leaves when +// the transaction is committed. +func (t *Txn) TrackMutate(track bool) { + t.trackMutate = track +} + +// trackChannel safely attempts to track the given mutation channel, setting the +// overflow flag if we can no longer track any more. This limits the amount of +// state that will accumulate during a transaction and we have a slower algorithm +// to switch to if we overflow. +func (t *Txn) trackChannel(ch chan struct{}) { + // In overflow, make sure we don't store any more objects. + if t.trackOverflow { + return + } + + // If this would overflow the state we reject it and set the flag (since + // we aren't tracking everything that's required any longer). + if len(t.trackChannels) >= defaultModifiedCache { + // Mark that we are in the overflow state + t.trackOverflow = true + + // Clear the map so that the channels can be garbage collected. It is + // safe to do this since we have already overflowed and will be using + // the slow notify algorithm. + t.trackChannels = nil + return + } + + // Create the map on the fly when we need it. + if t.trackChannels == nil { + t.trackChannels = make(map[chan struct{}]struct{}) + } + + // Otherwise we are good to track it. + t.trackChannels[ch] = struct{}{} +} + +// writeNode returns a node to be modified, if the current node has already been +// modified during the course of the transaction, it is used in-place. Set +// forLeafUpdate to true if you are getting a write node to update the leaf, +// which will set leaf mutation tracking appropriately as well. +func (t *Txn) writeNode(n *Node, forLeafUpdate bool) *Node { + // Ensure the writable set exists. + if t.writable == nil { + lru, err := simplelru.NewLRU(defaultModifiedCache, nil) + if err != nil { + panic(err) + } + t.writable = lru + } + + // If this node has already been modified, we can continue to use it + // during this transaction. We know that we don't need to track it for + // a node update since the node is writable, but if this is for a leaf + // update we track it, in case the initial write to this node didn't + // update the leaf. + if _, ok := t.writable.Get(n); ok { + if t.trackMutate && forLeafUpdate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + return n + } + + // Mark this node as being mutated. + if t.trackMutate { + t.trackChannel(n.mutateCh) + } + + // Mark its leaf as being mutated, if appropriate. + if t.trackMutate && forLeafUpdate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + + // Copy the existing node. If you have set forLeafUpdate it will be + // safe to replace this leaf with another after you get your node for + // writing. You MUST replace it, because the channel associated with + // this leaf will be closed when this transaction is committed. + nc := &Node{ + mutateCh: make(chan struct{}), + leaf: n.leaf, + } + if n.prefix != nil { + nc.prefix = make([]byte, len(n.prefix)) + copy(nc.prefix, n.prefix) + } + if len(n.edges) != 0 { + nc.edges = make([]edge, len(n.edges)) + copy(nc.edges, n.edges) + } + + // Mark this node as writable. + t.writable.Add(nc, nil) + return nc +} + +// Visit all the nodes in the tree under n, and add their mutateChannels to the transaction +// Returns the size of the subtree visited +func (t *Txn) trackChannelsAndCount(n *Node) int { + // Count only leaf nodes + leaves := 0 + if n.leaf != nil { + leaves = 1 + } + // Mark this node as being mutated. + if t.trackMutate { + t.trackChannel(n.mutateCh) + } + + // Mark its leaf as being mutated, if appropriate. + if t.trackMutate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + + // Recurse on the children + for _, e := range n.edges { + leaves += t.trackChannelsAndCount(e.node) + } + return leaves +} + +// mergeChild is called to collapse the given node with its child. This is only +// called when the given node is not a leaf and has a single edge. +func (t *Txn) mergeChild(n *Node) { + // Mark the child node as being mutated since we are about to abandon + // it. We don't need to mark the leaf since we are retaining it if it + // is there. + e := n.edges[0] + child := e.node + if t.trackMutate { + t.trackChannel(child.mutateCh) + } + + // Merge the nodes. + n.prefix = concat(n.prefix, child.prefix) + n.leaf = child.leaf + if len(child.edges) != 0 { + n.edges = make([]edge, len(child.edges)) + copy(n.edges, child.edges) + } else { + n.edges = nil + } +} + +// insert does a recursive insertion +func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface{}, bool) { + // Handle key exhaustion + if len(search) == 0 { + var oldVal interface{} + didUpdate := false + if n.isLeaf() { + oldVal = n.leaf.val + didUpdate = true + } + + nc := t.writeNode(n, true) + nc.leaf = &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + } + return nc, oldVal, didUpdate + } + + // Look for the edge + idx, child := n.getEdge(search[0]) + + // No edge, create one + if child == nil { + e := edge{ + label: search[0], + node: &Node{ + mutateCh: make(chan struct{}), + leaf: &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + }, + prefix: search, + }, + } + nc := t.writeNode(n, false) + nc.addEdge(e) + return nc, nil, false + } + + // Determine longest prefix of the search key on match + commonPrefix := longestPrefix(search, child.prefix) + if commonPrefix == len(child.prefix) { + search = search[commonPrefix:] + newChild, oldVal, didUpdate := t.insert(child, k, search, v) + if newChild != nil { + nc := t.writeNode(n, false) + nc.edges[idx].node = newChild + return nc, oldVal, didUpdate + } + return nil, oldVal, didUpdate + } + + // Split the node + nc := t.writeNode(n, false) + splitNode := &Node{ + mutateCh: make(chan struct{}), + prefix: search[:commonPrefix], + } + nc.replaceEdge(edge{ + label: search[0], + node: splitNode, + }) + + // Restore the existing child node + modChild := t.writeNode(child, false) + splitNode.addEdge(edge{ + label: modChild.prefix[commonPrefix], + node: modChild, + }) + modChild.prefix = modChild.prefix[commonPrefix:] + + // Create a new leaf node + leaf := &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + } + + // If the new key is a subset, add to to this node + search = search[commonPrefix:] + if len(search) == 0 { + splitNode.leaf = leaf + return nc, nil, false + } + + // Create a new edge for the node + splitNode.addEdge(edge{ + label: search[0], + node: &Node{ + mutateCh: make(chan struct{}), + leaf: leaf, + prefix: search, + }, + }) + return nc, nil, false +} + +// delete does a recursive deletion +func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) { + // Check for key exhaustion + if len(search) == 0 { + if !n.isLeaf() { + return nil, nil + } + // Copy the pointer in case we are in a transaction that already + // modified this node since the node will be reused. Any changes + // made to the node will not affect returning the original leaf + // value. + oldLeaf := n.leaf + + // Remove the leaf node + nc := t.writeNode(n, true) + nc.leaf = nil + + // Check if this node should be merged + if n != t.root && len(nc.edges) == 1 { + t.mergeChild(nc) + } + return nc, oldLeaf + } + + // Look for an edge + label := search[0] + idx, child := n.getEdge(label) + if child == nil || !bytes.HasPrefix(search, child.prefix) { + return nil, nil + } + + // Consume the search prefix + search = search[len(child.prefix):] + newChild, leaf := t.delete(n, child, search) + if newChild == nil { + return nil, nil + } + + // Copy this node. WATCH OUT - it's safe to pass "false" here because we + // will only ADD a leaf via nc.mergeChild() if there isn't one due to + // the !nc.isLeaf() check in the logic just below. This is pretty subtle, + // so be careful if you change any of the logic here. + nc := t.writeNode(n, false) + + // Delete the edge if the node has no edges + if newChild.leaf == nil && len(newChild.edges) == 0 { + nc.delEdge(label) + if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() { + t.mergeChild(nc) + } + } else { + nc.edges[idx].node = newChild + } + return nc, leaf +} + +// delete does a recursive deletion +func (t *Txn) deletePrefix(parent, n *Node, search []byte) (*Node, int) { + // Check for key exhaustion + if len(search) == 0 { + nc := t.writeNode(n, true) + if n.isLeaf() { + nc.leaf = nil + } + nc.edges = nil + return nc, t.trackChannelsAndCount(n) + } + + // Look for an edge + label := search[0] + idx, child := n.getEdge(label) + // We make sure that either the child node's prefix starts with the search term, or the search term starts with the child node's prefix + // Need to do both so that we can delete prefixes that don't correspond to any node in the tree + if child == nil || (!bytes.HasPrefix(child.prefix, search) && !bytes.HasPrefix(search, child.prefix)) { + return nil, 0 + } + + // Consume the search prefix + if len(child.prefix) > len(search) { + search = []byte("") + } else { + search = search[len(child.prefix):] + } + newChild, numDeletions := t.deletePrefix(n, child, search) + if newChild == nil { + return nil, 0 + } + // Copy this node. WATCH OUT - it's safe to pass "false" here because we + // will only ADD a leaf via nc.mergeChild() if there isn't one due to + // the !nc.isLeaf() check in the logic just below. This is pretty subtle, + // so be careful if you change any of the logic here. + + nc := t.writeNode(n, false) + + // Delete the edge if the node has no edges + if newChild.leaf == nil && len(newChild.edges) == 0 { + nc.delEdge(label) + if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() { + t.mergeChild(nc) + } + } else { + nc.edges[idx].node = newChild + } + return nc, numDeletions +} + +// Insert is used to add or update a given key. The return provides +// the previous value and a bool indicating if any was set. +func (t *Txn) Insert(k []byte, v interface{}) (interface{}, bool) { + newRoot, oldVal, didUpdate := t.insert(t.root, k, k, v) + if newRoot != nil { + t.root = newRoot + } + if !didUpdate { + t.size++ + } + return oldVal, didUpdate +} + +// Delete is used to delete a given key. Returns the old value if any, +// and a bool indicating if the key was set. +func (t *Txn) Delete(k []byte) (interface{}, bool) { + newRoot, leaf := t.delete(nil, t.root, k) + if newRoot != nil { + t.root = newRoot + } + if leaf != nil { + t.size-- + return leaf.val, true + } + return nil, false +} + +// DeletePrefix is used to delete an entire subtree that matches the prefix +// This will delete all nodes under that prefix +func (t *Txn) DeletePrefix(prefix []byte) bool { + newRoot, numDeletions := t.deletePrefix(nil, t.root, prefix) + if newRoot != nil { + t.root = newRoot + t.size = t.size - numDeletions + return true + } + return false + +} + +// Root returns the current root of the radix tree within this +// transaction. The root is not safe across insert and delete operations, +// but can be used to read the current state during a transaction. +func (t *Txn) Root() *Node { + return t.root +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Txn) Get(k []byte) (interface{}, bool) { + return t.root.Get(k) +} + +// GetWatch is used to lookup a specific key, returning +// the watch channel, value and if it was found +func (t *Txn) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) { + return t.root.GetWatch(k) +} + +// Commit is used to finalize the transaction and return a new tree. If mutation +// tracking is turned on then notifications will also be issued. +func (t *Txn) Commit() *Tree { + nt := t.CommitOnly() + if t.trackMutate { + t.Notify() + } + return nt +} + +// CommitOnly is used to finalize the transaction and return a new tree, but +// does not issue any notifications until Notify is called. +func (t *Txn) CommitOnly() *Tree { + nt := &Tree{t.root, t.size} + t.writable = nil + return nt +} + +// slowNotify does a complete comparison of the before and after trees in order +// to trigger notifications. This doesn't require any additional state but it +// is very expensive to compute. +func (t *Txn) slowNotify() { + snapIter := t.snap.rawIterator() + rootIter := t.root.rawIterator() + for snapIter.Front() != nil || rootIter.Front() != nil { + // If we've exhausted the nodes in the old snapshot, we know + // there's nothing remaining to notify. + if snapIter.Front() == nil { + return + } + snapElem := snapIter.Front() + + // If we've exhausted the nodes in the new root, we know we need + // to invalidate everything that remains in the old snapshot. We + // know from the loop condition there's something in the old + // snapshot. + if rootIter.Front() == nil { + close(snapElem.mutateCh) + if snapElem.isLeaf() { + close(snapElem.leaf.mutateCh) + } + snapIter.Next() + continue + } + + // Do one string compare so we can check the various conditions + // below without repeating the compare. + cmp := strings.Compare(snapIter.Path(), rootIter.Path()) + + // If the snapshot is behind the root, then we must have deleted + // this node during the transaction. + if cmp < 0 { + close(snapElem.mutateCh) + if snapElem.isLeaf() { + close(snapElem.leaf.mutateCh) + } + snapIter.Next() + continue + } + + // If the snapshot is ahead of the root, then we must have added + // this node during the transaction. + if cmp > 0 { + rootIter.Next() + continue + } + + // If we have the same path, then we need to see if we mutated a + // node and possibly the leaf. + rootElem := rootIter.Front() + if snapElem != rootElem { + close(snapElem.mutateCh) + if snapElem.leaf != nil && (snapElem.leaf != rootElem.leaf) { + close(snapElem.leaf.mutateCh) + } + } + snapIter.Next() + rootIter.Next() + } +} + +// Notify is used along with TrackMutate to trigger notifications. This must +// only be done once a transaction is committed via CommitOnly, and it is called +// automatically by Commit. +func (t *Txn) Notify() { + if !t.trackMutate { + return + } + + // If we've overflowed the tracking state we can't use it in any way and + // need to do a full tree compare. + if t.trackOverflow { + t.slowNotify() + } else { + for ch := range t.trackChannels { + close(ch) + } + } + + // Clean up the tracking state so that a re-notify is safe (will trigger + // the else clause above which will be a no-op). + t.trackChannels = nil + t.trackOverflow = false +} + +// Insert is used to add or update a given key. The return provides +// the new tree, previous value and a bool indicating if any was set. +func (t *Tree) Insert(k []byte, v interface{}) (*Tree, interface{}, bool) { + txn := t.Txn() + old, ok := txn.Insert(k, v) + return txn.Commit(), old, ok +} + +// Delete is used to delete a given key. Returns the new tree, +// old value if any, and a bool indicating if the key was set. +func (t *Tree) Delete(k []byte) (*Tree, interface{}, bool) { + txn := t.Txn() + old, ok := txn.Delete(k) + return txn.Commit(), old, ok +} + +// DeletePrefix is used to delete all nodes starting with a given prefix. Returns the new tree, +// and a bool indicating if the prefix matched any nodes +func (t *Tree) DeletePrefix(k []byte) (*Tree, bool) { + txn := t.Txn() + ok := txn.DeletePrefix(k) + return txn.Commit(), ok +} + +// Root returns the root node of the tree which can be used for richer +// query operations. +func (t *Tree) Root() *Node { + return t.root +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Tree) Get(k []byte) (interface{}, bool) { + return t.root.Get(k) +} + +// longestPrefix finds the length of the shared prefix +// of two strings +func longestPrefix(k1, k2 []byte) int { + max := len(k1) + if l := len(k2); l < max { + max = l + } + var i int + for i = 0; i < max; i++ { + if k1[i] != k2[i] { + break + } + } + return i +} + +// concat two byte slices, returning a third new copy +func concat(a, b []byte) []byte { + c := make([]byte, len(a)+len(b)) + copy(c, a) + copy(c[len(a):], b) + return c +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iter.go b/vendor/github.com/hashicorp/go-immutable-radix/iter.go new file mode 100644 index 0000000000..9815e02538 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/iter.go @@ -0,0 +1,91 @@ +package iradix + +import "bytes" + +// Iterator is used to iterate over a set of nodes +// in pre-order +type Iterator struct { + node *Node + stack []edges +} + +// SeekPrefixWatch is used to seek the iterator to a given prefix +// and returns the watch channel of the finest granularity +func (i *Iterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) { + // Wipe the stack + i.stack = nil + n := i.node + watch = n.mutateCh + search := prefix + for { + // Check for key exhaution + if len(search) == 0 { + i.node = n + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + i.node = nil + return + } + + // Update to the finest granularity as the search makes progress + watch = n.mutateCh + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if bytes.HasPrefix(n.prefix, search) { + i.node = n + return + } else { + i.node = nil + return + } + } +} + +// SeekPrefix is used to seek the iterator to a given prefix +func (i *Iterator) SeekPrefix(prefix []byte) { + i.SeekPrefixWatch(prefix) +} + +// Next returns the next node in order +func (i *Iterator) Next() ([]byte, interface{}, bool) { + // Initialize our stack if needed + if i.stack == nil && i.node != nil { + i.stack = []edges{ + edges{ + edge{node: i.node}, + }, + } + } + + for len(i.stack) > 0 { + // Inspect the last element of the stack + n := len(i.stack) + last := i.stack[n-1] + elem := last[0].node + + // Update the stack + if len(last) > 1 { + i.stack[n-1] = last[1:] + } else { + i.stack = i.stack[:n-1] + } + + // Push the edges onto the frontier + if len(elem.edges) > 0 { + i.stack = append(i.stack, elem.edges) + } + + // Return the leaf values if any + if elem.leaf != nil { + return elem.leaf.key, elem.leaf.val, true + } + } + return nil, nil, false +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/node.go b/vendor/github.com/hashicorp/go-immutable-radix/node.go new file mode 100644 index 0000000000..7a065e7a09 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/node.go @@ -0,0 +1,292 @@ +package iradix + +import ( + "bytes" + "sort" +) + +// WalkFn is used when walking the tree. Takes a +// key and value, returning if iteration should +// be terminated. +type WalkFn func(k []byte, v interface{}) bool + +// leafNode is used to represent a value +type leafNode struct { + mutateCh chan struct{} + key []byte + val interface{} +} + +// edge is used to represent an edge node +type edge struct { + label byte + node *Node +} + +// Node is an immutable node in the radix tree +type Node struct { + // mutateCh is closed if this node is modified + mutateCh chan struct{} + + // leaf is used to store possible leaf + leaf *leafNode + + // prefix is the common prefix we ignore + prefix []byte + + // Edges should be stored in-order for iteration. + // We avoid a fully materialized slice to save memory, + // since in most cases we expect to be sparse + edges edges +} + +func (n *Node) isLeaf() bool { + return n.leaf != nil +} + +func (n *Node) addEdge(e edge) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= e.label + }) + n.edges = append(n.edges, e) + if idx != num { + copy(n.edges[idx+1:], n.edges[idx:num]) + n.edges[idx] = e + } +} + +func (n *Node) replaceEdge(e edge) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= e.label + }) + if idx < num && n.edges[idx].label == e.label { + n.edges[idx].node = e.node + return + } + panic("replacing missing edge") +} + +func (n *Node) getEdge(label byte) (int, *Node) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + return idx, n.edges[idx].node + } + return -1, nil +} + +func (n *Node) delEdge(label byte) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + copy(n.edges[idx:], n.edges[idx+1:]) + n.edges[len(n.edges)-1] = edge{} + n.edges = n.edges[:len(n.edges)-1] + } +} + +func (n *Node) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) { + search := k + watch := n.mutateCh + for { + // Check for key exhaustion + if len(search) == 0 { + if n.isLeaf() { + return n.leaf.mutateCh, n.leaf.val, true + } + break + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Update to the finest granularity as the search makes progress + watch = n.mutateCh + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + return watch, nil, false +} + +func (n *Node) Get(k []byte) (interface{}, bool) { + _, val, ok := n.GetWatch(k) + return val, ok +} + +// LongestPrefix is like Get, but instead of an +// exact match, it will return the longest prefix match. +func (n *Node) LongestPrefix(k []byte) ([]byte, interface{}, bool) { + var last *leafNode + search := k + for { + // Look for a leaf node + if n.isLeaf() { + last = n.leaf + } + + // Check for key exhaution + if len(search) == 0 { + break + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + if last != nil { + return last.key, last.val, true + } + return nil, nil, false +} + +// Minimum is used to return the minimum value in the tree +func (n *Node) Minimum() ([]byte, interface{}, bool) { + for { + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + if len(n.edges) > 0 { + n = n.edges[0].node + } else { + break + } + } + return nil, nil, false +} + +// Maximum is used to return the maximum value in the tree +func (n *Node) Maximum() ([]byte, interface{}, bool) { + for { + if num := len(n.edges); num > 0 { + n = n.edges[num-1].node + continue + } + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } else { + break + } + } + return nil, nil, false +} + +// Iterator is used to return an iterator at +// the given node to walk the tree +func (n *Node) Iterator() *Iterator { + return &Iterator{node: n} +} + +// rawIterator is used to return a raw iterator at the given node to walk the +// tree. +func (n *Node) rawIterator() *rawIterator { + iter := &rawIterator{node: n} + iter.Next() + return iter +} + +// Walk is used to walk the tree +func (n *Node) Walk(fn WalkFn) { + recursiveWalk(n, fn) +} + +// WalkPrefix is used to walk the tree under a prefix +func (n *Node) WalkPrefix(prefix []byte, fn WalkFn) { + search := prefix + for { + // Check for key exhaution + if len(search) == 0 { + recursiveWalk(n, fn) + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if bytes.HasPrefix(n.prefix, search) { + // Child may be under our search prefix + recursiveWalk(n, fn) + return + } else { + break + } + } +} + +// WalkPath is used to walk the tree, but only visiting nodes +// from the root down to a given leaf. Where WalkPrefix walks +// all the entries *under* the given prefix, this walks the +// entries *above* the given prefix. +func (n *Node) WalkPath(path []byte, fn WalkFn) { + search := path + for { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return + } + + // Check for key exhaution + if len(search) == 0 { + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + return + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } +} + +// recursiveWalk is used to do a pre-order walk of a node +// recursively. Returns true if the walk should be aborted +func recursiveWalk(n *Node, fn WalkFn) bool { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return true + } + + // Recurse on the children + for _, e := range n.edges { + if recursiveWalk(e.node, fn) { + return true + } + } + return false +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go b/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go new file mode 100644 index 0000000000..04814c1323 --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go @@ -0,0 +1,78 @@ +package iradix + +// rawIterator visits each of the nodes in the tree, even the ones that are not +// leaves. It keeps track of the effective path (what a leaf at a given node +// would be called), which is useful for comparing trees. +type rawIterator struct { + // node is the starting node in the tree for the iterator. + node *Node + + // stack keeps track of edges in the frontier. + stack []rawStackEntry + + // pos is the current position of the iterator. + pos *Node + + // path is the effective path of the current iterator position, + // regardless of whether the current node is a leaf. + path string +} + +// rawStackEntry is used to keep track of the cumulative common path as well as +// its associated edges in the frontier. +type rawStackEntry struct { + path string + edges edges +} + +// Front returns the current node that has been iterated to. +func (i *rawIterator) Front() *Node { + return i.pos +} + +// Path returns the effective path of the current node, even if it's not actually +// a leaf. +func (i *rawIterator) Path() string { + return i.path +} + +// Next advances the iterator to the next node. +func (i *rawIterator) Next() { + // Initialize our stack if needed. + if i.stack == nil && i.node != nil { + i.stack = []rawStackEntry{ + rawStackEntry{ + edges: edges{ + edge{node: i.node}, + }, + }, + } + } + + for len(i.stack) > 0 { + // Inspect the last element of the stack. + n := len(i.stack) + last := i.stack[n-1] + elem := last.edges[0].node + + // Update the stack. + if len(last.edges) > 1 { + i.stack[n-1].edges = last.edges[1:] + } else { + i.stack = i.stack[:n-1] + } + + // Push the edges onto the frontier. + if len(elem.edges) > 0 { + path := last.path + string(elem.prefix) + i.stack = append(i.stack, rawStackEntry{path, elem.edges}) + } + + i.pos = elem + i.path = last.path + string(elem.prefix) + return + } + + i.pos = nil + i.path = "" +} diff --git a/vendor/github.com/hashicorp/go-memdb/LICENSE b/vendor/github.com/hashicorp/go-memdb/LICENSE new file mode 100644 index 0000000000..e87a115e46 --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-memdb/filter.go b/vendor/github.com/hashicorp/go-memdb/filter.go new file mode 100644 index 0000000000..2e3a9b3f7b --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/filter.go @@ -0,0 +1,33 @@ +package memdb + +// FilterFunc is a function that takes the results of an iterator and returns +// whether the result should be filtered out. +type FilterFunc func(interface{}) bool + +// FilterIterator is used to wrap a ResultIterator and apply a filter over it. +type FilterIterator struct { + // filter is the filter function applied over the base iterator. + filter FilterFunc + + // iter is the iterator that is being wrapped. + iter ResultIterator +} + +func NewFilterIterator(wrap ResultIterator, filter FilterFunc) *FilterIterator { + return &FilterIterator{ + filter: filter, + iter: wrap, + } +} + +// WatchCh returns the watch channel of the wrapped iterator. +func (f *FilterIterator) WatchCh() <-chan struct{} { return f.iter.WatchCh() } + +// Next returns the next non-filtered result from the wrapped iterator +func (f *FilterIterator) Next() interface{} { + for { + if value := f.iter.Next(); value == nil || !f.filter(value) { + return value + } + } +} diff --git a/vendor/github.com/hashicorp/go-memdb/index.go b/vendor/github.com/hashicorp/go-memdb/index.go new file mode 100644 index 0000000000..cca853c5ab --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/index.go @@ -0,0 +1,581 @@ +package memdb + +import ( + "encoding/binary" + "encoding/hex" + "fmt" + "reflect" + "strings" +) + +// Indexer is an interface used for defining indexes. Indexes are used +// for efficient lookup of objects in a MemDB table. An Indexer must also +// implement one of SingleIndexer or MultiIndexer. +// +// Indexers are primarily responsible for returning the lookup key as +// a byte slice. The byte slice is the key data in the underlying data storage. +type Indexer interface { + // FromArgs is called to build the exact index key from a list of arguments. + FromArgs(args ...interface{}) ([]byte, error) +} + +// SingleIndexer is an interface used for defining indexes that generate a +// single value per object +type SingleIndexer interface { + // FromObject extracts the index value from an object. The return values + // are whether the index value was found, the index value, and any error + // while extracting the index value, respectively. + FromObject(raw interface{}) (bool, []byte, error) +} + +// MultiIndexer is an interface used for defining indexes that generate +// multiple values per object. Each value is stored as a seperate index +// pointing to the same object. +// +// For example, an index that extracts the first and last name of a person +// and allows lookup based on eitherd would be a MultiIndexer. The FromObject +// of this example would split the first and last name and return both as +// values. +type MultiIndexer interface { + // FromObject extracts index values from an object. The return values + // are the same as a SingleIndexer except there can be multiple index + // values. + FromObject(raw interface{}) (bool, [][]byte, error) +} + +// PrefixIndexer is an optional interface on top of an Indexer that allows +// indexes to support prefix-based iteration. +type PrefixIndexer interface { + // PrefixFromArgs is the same as FromArgs for an Indexer except that + // the index value returned should return all prefix-matched values. + PrefixFromArgs(args ...interface{}) ([]byte, error) +} + +// StringFieldIndex is used to extract a field from an object +// using reflection and builds an index on that field. +type StringFieldIndex struct { + Field string + Lowercase bool +} + +func (s *StringFieldIndex) FromObject(obj interface{}) (bool, []byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(s.Field) + if !fv.IsValid() { + return false, nil, + fmt.Errorf("field '%s' for %#v is invalid", s.Field, obj) + } + + val := fv.String() + if val == "" { + return false, nil, nil + } + + if s.Lowercase { + val = strings.ToLower(val) + } + + // Add the null character as a terminator + val += "\x00" + return true, []byte(val), nil +} + +func (s *StringFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + arg, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + if s.Lowercase { + arg = strings.ToLower(arg) + } + // Add the null character as a terminator + arg += "\x00" + return []byte(arg), nil +} + +func (s *StringFieldIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) { + val, err := s.FromArgs(args...) + if err != nil { + return nil, err + } + + // Strip the null terminator, the rest is a prefix + n := len(val) + if n > 0 { + return val[:n-1], nil + } + return val, nil +} + +// StringSliceFieldIndex builds an index from a field on an object that is a +// string slice ([]string). Each value within the string slice can be used for +// lookup. +type StringSliceFieldIndex struct { + Field string + Lowercase bool +} + +func (s *StringSliceFieldIndex) FromObject(obj interface{}) (bool, [][]byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(s.Field) + if !fv.IsValid() { + return false, nil, + fmt.Errorf("field '%s' for %#v is invalid", s.Field, obj) + } + + if fv.Kind() != reflect.Slice || fv.Type().Elem().Kind() != reflect.String { + return false, nil, fmt.Errorf("field '%s' is not a string slice", s.Field) + } + + length := fv.Len() + vals := make([][]byte, 0, length) + for i := 0; i < fv.Len(); i++ { + val := fv.Index(i).String() + if val == "" { + continue + } + + if s.Lowercase { + val = strings.ToLower(val) + } + + // Add the null character as a terminator + val += "\x00" + vals = append(vals, []byte(val)) + } + if len(vals) == 0 { + return false, nil, nil + } + return true, vals, nil +} + +func (s *StringSliceFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + arg, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + if s.Lowercase { + arg = strings.ToLower(arg) + } + // Add the null character as a terminator + arg += "\x00" + return []byte(arg), nil +} + +func (s *StringSliceFieldIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) { + val, err := s.FromArgs(args...) + if err != nil { + return nil, err + } + + // Strip the null terminator, the rest is a prefix + n := len(val) + if n > 0 { + return val[:n-1], nil + } + return val, nil +} + +// StringMapFieldIndex is used to extract a field of type map[string]string +// from an object using reflection and builds an index on that field. +type StringMapFieldIndex struct { + Field string + Lowercase bool +} + +var MapType = reflect.MapOf(reflect.TypeOf(""), reflect.TypeOf("")).Kind() + +func (s *StringMapFieldIndex) FromObject(obj interface{}) (bool, [][]byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(s.Field) + if !fv.IsValid() { + return false, nil, fmt.Errorf("field '%s' for %#v is invalid", s.Field, obj) + } + + if fv.Kind() != MapType { + return false, nil, fmt.Errorf("field '%s' is not a map[string]string", s.Field) + } + + length := fv.Len() + vals := make([][]byte, 0, length) + for _, key := range fv.MapKeys() { + k := key.String() + if k == "" { + continue + } + val := fv.MapIndex(key).String() + + if s.Lowercase { + k = strings.ToLower(k) + val = strings.ToLower(val) + } + + // Add the null character as a terminator + k += "\x00" + val + "\x00" + + vals = append(vals, []byte(k)) + } + if len(vals) == 0 { + return false, nil, nil + } + return true, vals, nil +} + +func (s *StringMapFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) > 2 || len(args) == 0 { + return nil, fmt.Errorf("must provide one or two arguments") + } + key, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + if s.Lowercase { + key = strings.ToLower(key) + } + // Add the null character as a terminator + key += "\x00" + + if len(args) == 2 { + val, ok := args[1].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[1]) + } + if s.Lowercase { + val = strings.ToLower(val) + } + // Add the null character as a terminator + key += val + "\x00" + } + + return []byte(key), nil +} + +// UintFieldIndex is used to extract a uint field from an object using +// reflection and builds an index on that field. +type UintFieldIndex struct { + Field string +} + +func (u *UintFieldIndex) FromObject(obj interface{}) (bool, []byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(u.Field) + if !fv.IsValid() { + return false, nil, + fmt.Errorf("field '%s' for %#v is invalid", u.Field, obj) + } + + // Check the type + k := fv.Kind() + size, ok := IsUintType(k) + if !ok { + return false, nil, fmt.Errorf("field %q is of type %v; want a uint", u.Field, k) + } + + // Get the value and encode it + val := fv.Uint() + buf := make([]byte, size) + binary.PutUvarint(buf, val) + + return true, buf, nil +} + +func (u *UintFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + + v := reflect.ValueOf(args[0]) + if !v.IsValid() { + return nil, fmt.Errorf("%#v is invalid", args[0]) + } + + k := v.Kind() + size, ok := IsUintType(k) + if !ok { + return nil, fmt.Errorf("arg is of type %v; want a uint", k) + } + + val := v.Uint() + buf := make([]byte, size) + binary.PutUvarint(buf, val) + + return buf, nil +} + +// IsUintType returns whether the passed type is a type of uint and the number +// of bytes needed to encode the type. +func IsUintType(k reflect.Kind) (size int, okay bool) { + switch k { + case reflect.Uint: + return binary.MaxVarintLen64, true + case reflect.Uint8: + return 2, true + case reflect.Uint16: + return binary.MaxVarintLen16, true + case reflect.Uint32: + return binary.MaxVarintLen32, true + case reflect.Uint64: + return binary.MaxVarintLen64, true + default: + return 0, false + } +} + +// UUIDFieldIndex is used to extract a field from an object +// using reflection and builds an index on that field by treating +// it as a UUID. This is an optimization to using a StringFieldIndex +// as the UUID can be more compactly represented in byte form. +type UUIDFieldIndex struct { + Field string +} + +func (u *UUIDFieldIndex) FromObject(obj interface{}) (bool, []byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(u.Field) + if !fv.IsValid() { + return false, nil, + fmt.Errorf("field '%s' for %#v is invalid", u.Field, obj) + } + + val := fv.String() + if val == "" { + return false, nil, nil + } + + buf, err := u.parseString(val, true) + return true, buf, err +} + +func (u *UUIDFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + switch arg := args[0].(type) { + case string: + return u.parseString(arg, true) + case []byte: + if len(arg) != 16 { + return nil, fmt.Errorf("byte slice must be 16 characters") + } + return arg, nil + default: + return nil, + fmt.Errorf("argument must be a string or byte slice: %#v", args[0]) + } +} + +func (u *UUIDFieldIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + switch arg := args[0].(type) { + case string: + return u.parseString(arg, false) + case []byte: + return arg, nil + default: + return nil, + fmt.Errorf("argument must be a string or byte slice: %#v", args[0]) + } +} + +// parseString parses a UUID from the string. If enforceLength is false, it will +// parse a partial UUID. An error is returned if the input, stripped of hyphens, +// is not even length. +func (u *UUIDFieldIndex) parseString(s string, enforceLength bool) ([]byte, error) { + // Verify the length + l := len(s) + if enforceLength && l != 36 { + return nil, fmt.Errorf("UUID must be 36 characters") + } else if l > 36 { + return nil, fmt.Errorf("Invalid UUID length. UUID have 36 characters; got %d", l) + } + + hyphens := strings.Count(s, "-") + if hyphens > 4 { + return nil, fmt.Errorf(`UUID should have maximum of 4 "-"; got %d`, hyphens) + } + + // The sanitized length is the length of the original string without the "-". + sanitized := strings.Replace(s, "-", "", -1) + sanitizedLength := len(sanitized) + if sanitizedLength%2 != 0 { + return nil, fmt.Errorf("Input (without hyphens) must be even length") + } + + dec, err := hex.DecodeString(sanitized) + if err != nil { + return nil, fmt.Errorf("Invalid UUID: %v", err) + } + + return dec, nil +} + +// FieldSetIndex is used to extract a field from an object using reflection and +// builds an index on whether the field is set by comparing it against its +// type's nil value. +type FieldSetIndex struct { + Field string +} + +func (f *FieldSetIndex) FromObject(obj interface{}) (bool, []byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(f.Field) + if !fv.IsValid() { + return false, nil, + fmt.Errorf("field '%s' for %#v is invalid", f.Field, obj) + } + + if fv.Interface() == reflect.Zero(fv.Type()).Interface() { + return true, []byte{0}, nil + } + + return true, []byte{1}, nil +} + +func (f *FieldSetIndex) FromArgs(args ...interface{}) ([]byte, error) { + return fromBoolArgs(args) +} + +// ConditionalIndex builds an index based on a condition specified by a passed +// user function. This function may examine the passed object and return a +// boolean to encapsulate an arbitrarily complex conditional. +type ConditionalIndex struct { + Conditional ConditionalIndexFunc +} + +// ConditionalIndexFunc is the required function interface for a +// ConditionalIndex. +type ConditionalIndexFunc func(obj interface{}) (bool, error) + +func (c *ConditionalIndex) FromObject(obj interface{}) (bool, []byte, error) { + // Call the user's function + res, err := c.Conditional(obj) + if err != nil { + return false, nil, fmt.Errorf("ConditionalIndexFunc(%#v) failed: %v", obj, err) + } + + if res { + return true, []byte{1}, nil + } + + return true, []byte{0}, nil +} + +func (c *ConditionalIndex) FromArgs(args ...interface{}) ([]byte, error) { + return fromBoolArgs(args) +} + +// fromBoolArgs is a helper that expects only a single boolean argument and +// returns a single length byte array containing either a one or zero depending +// on whether the passed input is true or false respectively. +func fromBoolArgs(args []interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + + if val, ok := args[0].(bool); !ok { + return nil, fmt.Errorf("argument must be a boolean type: %#v", args[0]) + } else if val { + return []byte{1}, nil + } + + return []byte{0}, nil +} + +// CompoundIndex is used to build an index using multiple sub-indexes +// Prefix based iteration is supported as long as the appropriate prefix +// of indexers support it. All sub-indexers are only assumed to expect +// a single argument. +type CompoundIndex struct { + Indexes []Indexer + + // AllowMissing results in an index based on only the indexers + // that return data. If true, you may end up with 2/3 columns + // indexed which might be useful for an index scan. Otherwise, + // the CompoundIndex requires all indexers to be satisfied. + AllowMissing bool +} + +func (c *CompoundIndex) FromObject(raw interface{}) (bool, []byte, error) { + var out []byte + for i, idxRaw := range c.Indexes { + idx, ok := idxRaw.(SingleIndexer) + if !ok { + return false, nil, fmt.Errorf("sub-index %d error: %s", i, "sub-index must be a SingleIndexer") + } + ok, val, err := idx.FromObject(raw) + if err != nil { + return false, nil, fmt.Errorf("sub-index %d error: %v", i, err) + } + if !ok { + if c.AllowMissing { + break + } else { + return false, nil, nil + } + } + out = append(out, val...) + } + return true, out, nil +} + +func (c *CompoundIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != len(c.Indexes) { + return nil, fmt.Errorf("less arguments than index fields") + } + var out []byte + for i, arg := range args { + val, err := c.Indexes[i].FromArgs(arg) + if err != nil { + return nil, fmt.Errorf("sub-index %d error: %v", i, err) + } + out = append(out, val...) + } + return out, nil +} + +func (c *CompoundIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) { + if len(args) > len(c.Indexes) { + return nil, fmt.Errorf("more arguments than index fields") + } + var out []byte + for i, arg := range args { + if i+1 < len(args) { + val, err := c.Indexes[i].FromArgs(arg) + if err != nil { + return nil, fmt.Errorf("sub-index %d error: %v", i, err) + } + out = append(out, val...) + } else { + prefixIndexer, ok := c.Indexes[i].(PrefixIndexer) + if !ok { + return nil, fmt.Errorf("sub-index %d does not support prefix scanning", i) + } + val, err := prefixIndexer.PrefixFromArgs(arg) + if err != nil { + return nil, fmt.Errorf("sub-index %d error: %v", i, err) + } + out = append(out, val...) + } + } + return out, nil +} diff --git a/vendor/github.com/hashicorp/go-memdb/memdb.go b/vendor/github.com/hashicorp/go-memdb/memdb.go new file mode 100644 index 0000000000..65c9207310 --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/memdb.go @@ -0,0 +1,97 @@ +// Package memdb provides an in-memory database that supports transactions +// and MVCC. +package memdb + +import ( + "sync" + "sync/atomic" + "unsafe" + + "github.com/hashicorp/go-immutable-radix" +) + +// MemDB is an in-memory database. +// +// MemDB provides a table abstraction to store objects (rows) with multiple +// indexes based on inserted values. The database makes use of immutable radix +// trees to provide transactions and MVCC. +type MemDB struct { + schema *DBSchema + root unsafe.Pointer // *iradix.Tree underneath + primary bool + + // There can only be a single writer at once + writer sync.Mutex +} + +// NewMemDB creates a new MemDB with the given schema +func NewMemDB(schema *DBSchema) (*MemDB, error) { + // Validate the schema + if err := schema.Validate(); err != nil { + return nil, err + } + + // Create the MemDB + db := &MemDB{ + schema: schema, + root: unsafe.Pointer(iradix.New()), + primary: true, + } + if err := db.initialize(); err != nil { + return nil, err + } + + return db, nil +} + +// getRoot is used to do an atomic load of the root pointer +func (db *MemDB) getRoot() *iradix.Tree { + root := (*iradix.Tree)(atomic.LoadPointer(&db.root)) + return root +} + +// Txn is used to start a new transaction, in either read or write mode. +// There can only be a single concurrent writer, but any number of readers. +func (db *MemDB) Txn(write bool) *Txn { + if write { + db.writer.Lock() + } + txn := &Txn{ + db: db, + write: write, + rootTxn: db.getRoot().Txn(), + } + return txn +} + +// Snapshot is used to capture a point-in-time snapshot +// of the database that will not be affected by any write +// operations to the existing DB. +func (db *MemDB) Snapshot() *MemDB { + clone := &MemDB{ + schema: db.schema, + root: unsafe.Pointer(db.getRoot()), + primary: false, + } + return clone +} + +// initialize is used to setup the DB for use after creation. This should +// be called only once after allocating a MemDB. +func (db *MemDB) initialize() error { + root := db.getRoot() + for tName, tableSchema := range db.schema.Tables { + for iName := range tableSchema.Indexes { + index := iradix.New() + path := indexPath(tName, iName) + root, _, _ = root.Insert(path, index) + } + } + db.root = unsafe.Pointer(root) + return nil +} + +// indexPath returns the path from the root to the given table index +func indexPath(table, index string) []byte { + return []byte(table + "." + index) +} diff --git a/vendor/github.com/hashicorp/go-memdb/schema.go b/vendor/github.com/hashicorp/go-memdb/schema.go new file mode 100644 index 0000000000..e6a9b526bc --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/schema.go @@ -0,0 +1,114 @@ +package memdb + +import "fmt" + +// DBSchema is the schema to use for the full database with a MemDB instance. +// +// MemDB will require a valid schema. Schema validation can be tested using +// the Validate function. Calling this function is recommended in unit tests. +type DBSchema struct { + // Tables is the set of tables within this database. The key is the + // table name and must match the Name in TableSchema. + Tables map[string]*TableSchema +} + +// Validate validates the schema. +func (s *DBSchema) Validate() error { + if s == nil { + return fmt.Errorf("schema is nil") + } + + if len(s.Tables) == 0 { + return fmt.Errorf("schema has no tables defined") + } + + for name, table := range s.Tables { + if name != table.Name { + return fmt.Errorf("table name mis-match for '%s'", name) + } + + if err := table.Validate(); err != nil { + return fmt.Errorf("table %q: %s", name, err) + } + } + + return nil +} + +// TableSchema is the schema for a single table. +type TableSchema struct { + // Name of the table. This must match the key in the Tables map in DBSchema. + Name string + + // Indexes is the set of indexes for querying this table. The key + // is a unique name for the index and must match the Name in the + // IndexSchema. + Indexes map[string]*IndexSchema +} + +// Validate is used to validate the table schema +func (s *TableSchema) Validate() error { + if s.Name == "" { + return fmt.Errorf("missing table name") + } + + if len(s.Indexes) == 0 { + return fmt.Errorf("missing table indexes for '%s'", s.Name) + } + + if _, ok := s.Indexes["id"]; !ok { + return fmt.Errorf("must have id index") + } + + if !s.Indexes["id"].Unique { + return fmt.Errorf("id index must be unique") + } + + if _, ok := s.Indexes["id"].Indexer.(SingleIndexer); !ok { + return fmt.Errorf("id index must be a SingleIndexer") + } + + for name, index := range s.Indexes { + if name != index.Name { + return fmt.Errorf("index name mis-match for '%s'", name) + } + + if err := index.Validate(); err != nil { + return fmt.Errorf("index %q: %s", name, err) + } + } + + return nil +} + +// IndexSchema is the schema for an index. An index defines how a table is +// queried. +type IndexSchema struct { + // Name of the index. This must be unique among a tables set of indexes. + // This must match the key in the map of Indexes for a TableSchema. + Name string + + // AllowMissing if true ignores this index if it doesn't produce a + // value. For example, an index that extracts a field that doesn't + // exist from a structure. + AllowMissing bool + + Unique bool + Indexer Indexer +} + +func (s *IndexSchema) Validate() error { + if s.Name == "" { + return fmt.Errorf("missing index name") + } + if s.Indexer == nil { + return fmt.Errorf("missing index function for '%s'", s.Name) + } + switch s.Indexer.(type) { + case SingleIndexer: + case MultiIndexer: + default: + return fmt.Errorf("indexer for '%s' must be a SingleIndexer or MultiIndexer", s.Name) + } + return nil +} diff --git a/vendor/github.com/hashicorp/go-memdb/txn.go b/vendor/github.com/hashicorp/go-memdb/txn.go new file mode 100644 index 0000000000..2b85087ea3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/txn.go @@ -0,0 +1,644 @@ +package memdb + +import ( + "bytes" + "fmt" + "strings" + "sync/atomic" + "unsafe" + + "github.com/hashicorp/go-immutable-radix" +) + +const ( + id = "id" +) + +var ( + // ErrNotFound is returned when the requested item is not found + ErrNotFound = fmt.Errorf("not found") +) + +// tableIndex is a tuple of (Table, Index) used for lookups +type tableIndex struct { + Table string + Index string +} + +// Txn is a transaction against a MemDB. +// This can be a read or write transaction. +type Txn struct { + db *MemDB + write bool + rootTxn *iradix.Txn + after []func() + + modified map[tableIndex]*iradix.Txn +} + +// readableIndex returns a transaction usable for reading the given +// index in a table. If a write transaction is in progress, we may need +// to use an existing modified txn. +func (txn *Txn) readableIndex(table, index string) *iradix.Txn { + // Look for existing transaction + if txn.write && txn.modified != nil { + key := tableIndex{table, index} + exist, ok := txn.modified[key] + if ok { + return exist + } + } + + // Create a read transaction + path := indexPath(table, index) + raw, _ := txn.rootTxn.Get(path) + indexTxn := raw.(*iradix.Tree).Txn() + return indexTxn +} + +// writableIndex returns a transaction usable for modifying the +// given index in a table. +func (txn *Txn) writableIndex(table, index string) *iradix.Txn { + if txn.modified == nil { + txn.modified = make(map[tableIndex]*iradix.Txn) + } + + // Look for existing transaction + key := tableIndex{table, index} + exist, ok := txn.modified[key] + if ok { + return exist + } + + // Start a new transaction + path := indexPath(table, index) + raw, _ := txn.rootTxn.Get(path) + indexTxn := raw.(*iradix.Tree).Txn() + + // If we are the primary DB, enable mutation tracking. Snapshots should + // not notify, otherwise we will trigger watches on the primary DB when + // the writes will not be visible. + indexTxn.TrackMutate(txn.db.primary) + + // Keep this open for the duration of the txn + txn.modified[key] = indexTxn + return indexTxn +} + +// Abort is used to cancel this transaction. +// This is a noop for read transactions. +func (txn *Txn) Abort() { + // Noop for a read transaction + if !txn.write { + return + } + + // Check if already aborted or committed + if txn.rootTxn == nil { + return + } + + // Clear the txn + txn.rootTxn = nil + txn.modified = nil + + // Release the writer lock since this is invalid + txn.db.writer.Unlock() +} + +// Commit is used to finalize this transaction. +// This is a noop for read transactions. +func (txn *Txn) Commit() { + // Noop for a read transaction + if !txn.write { + return + } + + // Check if already aborted or committed + if txn.rootTxn == nil { + return + } + + // Commit each sub-transaction scoped to (table, index) + for key, subTxn := range txn.modified { + path := indexPath(key.Table, key.Index) + final := subTxn.CommitOnly() + txn.rootTxn.Insert(path, final) + } + + // Update the root of the DB + newRoot := txn.rootTxn.CommitOnly() + atomic.StorePointer(&txn.db.root, unsafe.Pointer(newRoot)) + + // Now issue all of the mutation updates (this is safe to call + // even if mutation tracking isn't enabled); we do this after + // the root pointer is swapped so that waking responders will + // see the new state. + for _, subTxn := range txn.modified { + subTxn.Notify() + } + txn.rootTxn.Notify() + + // Clear the txn + txn.rootTxn = nil + txn.modified = nil + + // Release the writer lock since this is invalid + txn.db.writer.Unlock() + + // Run the deferred functions, if any + for i := len(txn.after); i > 0; i-- { + fn := txn.after[i-1] + fn() + } +} + +// Insert is used to add or update an object into the given table +func (txn *Txn) Insert(table string, obj interface{}) error { + if !txn.write { + return fmt.Errorf("cannot insert in read-only transaction") + } + + // Get the table schema + tableSchema, ok := txn.db.schema.Tables[table] + if !ok { + return fmt.Errorf("invalid table '%s'", table) + } + + // Get the primary ID of the object + idSchema := tableSchema.Indexes[id] + idIndexer := idSchema.Indexer.(SingleIndexer) + ok, idVal, err := idIndexer.FromObject(obj) + if err != nil { + return fmt.Errorf("failed to build primary index: %v", err) + } + if !ok { + return fmt.Errorf("object missing primary index") + } + + // Lookup the object by ID first, to see if this is an update + idTxn := txn.writableIndex(table, id) + existing, update := idTxn.Get(idVal) + + // On an update, there is an existing object with the given + // primary ID. We do the update by deleting the current object + // and inserting the new object. + for name, indexSchema := range tableSchema.Indexes { + indexTxn := txn.writableIndex(table, name) + + // Determine the new index value + var ( + ok bool + vals [][]byte + err error + ) + switch indexer := indexSchema.Indexer.(type) { + case SingleIndexer: + var val []byte + ok, val, err = indexer.FromObject(obj) + vals = [][]byte{val} + case MultiIndexer: + ok, vals, err = indexer.FromObject(obj) + } + if err != nil { + return fmt.Errorf("failed to build index '%s': %v", name, err) + } + + // Handle non-unique index by computing a unique index. + // This is done by appending the primary key which must + // be unique anyways. + if ok && !indexSchema.Unique { + for i := range vals { + vals[i] = append(vals[i], idVal...) + } + } + + // Handle the update by deleting from the index first + if update { + var ( + okExist bool + valsExist [][]byte + err error + ) + switch indexer := indexSchema.Indexer.(type) { + case SingleIndexer: + var valExist []byte + okExist, valExist, err = indexer.FromObject(existing) + valsExist = [][]byte{valExist} + case MultiIndexer: + okExist, valsExist, err = indexer.FromObject(existing) + } + if err != nil { + return fmt.Errorf("failed to build index '%s': %v", name, err) + } + if okExist { + for i, valExist := range valsExist { + // Handle non-unique index by computing a unique index. + // This is done by appending the primary key which must + // be unique anyways. + if !indexSchema.Unique { + valExist = append(valExist, idVal...) + } + + // If we are writing to the same index with the same value, + // we can avoid the delete as the insert will overwrite the + // value anyways. + if i >= len(vals) || !bytes.Equal(valExist, vals[i]) { + indexTxn.Delete(valExist) + } + } + } + } + + // If there is no index value, either this is an error or an expected + // case and we can skip updating + if !ok { + if indexSchema.AllowMissing { + continue + } else { + return fmt.Errorf("missing value for index '%s'", name) + } + } + + // Update the value of the index + for _, val := range vals { + indexTxn.Insert(val, obj) + } + } + return nil +} + +// Delete is used to delete a single object from the given table +// This object must already exist in the table +func (txn *Txn) Delete(table string, obj interface{}) error { + if !txn.write { + return fmt.Errorf("cannot delete in read-only transaction") + } + + // Get the table schema + tableSchema, ok := txn.db.schema.Tables[table] + if !ok { + return fmt.Errorf("invalid table '%s'", table) + } + + // Get the primary ID of the object + idSchema := tableSchema.Indexes[id] + idIndexer := idSchema.Indexer.(SingleIndexer) + ok, idVal, err := idIndexer.FromObject(obj) + if err != nil { + return fmt.Errorf("failed to build primary index: %v", err) + } + if !ok { + return fmt.Errorf("object missing primary index") + } + + // Lookup the object by ID first, check fi we should continue + idTxn := txn.writableIndex(table, id) + existing, ok := idTxn.Get(idVal) + if !ok { + return ErrNotFound + } + + // Remove the object from all the indexes + for name, indexSchema := range tableSchema.Indexes { + indexTxn := txn.writableIndex(table, name) + + // Handle the update by deleting from the index first + var ( + ok bool + vals [][]byte + err error + ) + switch indexer := indexSchema.Indexer.(type) { + case SingleIndexer: + var val []byte + ok, val, err = indexer.FromObject(existing) + vals = [][]byte{val} + case MultiIndexer: + ok, vals, err = indexer.FromObject(existing) + } + if err != nil { + return fmt.Errorf("failed to build index '%s': %v", name, err) + } + if ok { + // Handle non-unique index by computing a unique index. + // This is done by appending the primary key which must + // be unique anyways. + for _, val := range vals { + if !indexSchema.Unique { + val = append(val, idVal...) + } + indexTxn.Delete(val) + } + } + } + return nil +} + +// DeletePrefix is used to delete an entire subtree based on a prefix. +// The given index must be a prefix index, and will be used to perform a scan and enumerate the set of objects to delete. +// These will be removed from all other indexes, and then a special prefix operation will delete the objects from the given index in an efficient subtree delete operation. +// This is useful when you have a very large number of objects indexed by the given index, along with a much smaller number of entries in the other indexes for those objects. +func (txn *Txn) DeletePrefix(table string, prefix_index string, prefix string) (bool, error) { + if !txn.write { + return false, fmt.Errorf("cannot delete in read-only transaction") + } + + if !strings.HasSuffix(prefix_index, "_prefix") { + return false, fmt.Errorf("Index name for DeletePrefix must be a prefix index, Got %v ", prefix_index) + } + + deletePrefixIndex := strings.TrimSuffix(prefix_index, "_prefix") + + // Get an iterator over all of the keys with the given prefix. + entries, err := txn.Get(table, prefix_index, prefix) + if err != nil { + return false, fmt.Errorf("failed kvs lookup: %s", err) + } + // Get the table schema + tableSchema, ok := txn.db.schema.Tables[table] + if !ok { + return false, fmt.Errorf("invalid table '%s'", table) + } + + foundAny := false + for entry := entries.Next(); entry != nil; entry = entries.Next() { + if !foundAny { + foundAny = true + } + // Get the primary ID of the object + idSchema := tableSchema.Indexes[id] + idIndexer := idSchema.Indexer.(SingleIndexer) + ok, idVal, err := idIndexer.FromObject(entry) + if err != nil { + return false, fmt.Errorf("failed to build primary index: %v", err) + } + if !ok { + return false, fmt.Errorf("object missing primary index") + } + // Remove the object from all the indexes except the given prefix index + for name, indexSchema := range tableSchema.Indexes { + if name == deletePrefixIndex { + continue + } + indexTxn := txn.writableIndex(table, name) + + // Handle the update by deleting from the index first + var ( + ok bool + vals [][]byte + err error + ) + switch indexer := indexSchema.Indexer.(type) { + case SingleIndexer: + var val []byte + ok, val, err = indexer.FromObject(entry) + vals = [][]byte{val} + case MultiIndexer: + ok, vals, err = indexer.FromObject(entry) + } + if err != nil { + return false, fmt.Errorf("failed to build index '%s': %v", name, err) + } + + if ok { + // Handle non-unique index by computing a unique index. + // This is done by appending the primary key which must + // be unique anyways. + for _, val := range vals { + if !indexSchema.Unique { + val = append(val, idVal...) + } + indexTxn.Delete(val) + } + } + } + } + if foundAny { + indexTxn := txn.writableIndex(table, deletePrefixIndex) + ok = indexTxn.DeletePrefix([]byte(prefix)) + if !ok { + panic(fmt.Errorf("prefix %v matched some entries but DeletePrefix did not delete any ", prefix)) + } + return true, nil + } + return false, nil +} + +// DeleteAll is used to delete all the objects in a given table +// matching the constraints on the index +func (txn *Txn) DeleteAll(table, index string, args ...interface{}) (int, error) { + if !txn.write { + return 0, fmt.Errorf("cannot delete in read-only transaction") + } + + // Get all the objects + iter, err := txn.Get(table, index, args...) + if err != nil { + return 0, err + } + + // Put them into a slice so there are no safety concerns while actually + // performing the deletes + var objs []interface{} + for { + obj := iter.Next() + if obj == nil { + break + } + + objs = append(objs, obj) + } + + // Do the deletes + num := 0 + for _, obj := range objs { + if err := txn.Delete(table, obj); err != nil { + return num, err + } + num++ + } + return num, nil +} + +// FirstWatch is used to return the first matching object for +// the given constraints on the index along with the watch channel +func (txn *Txn) FirstWatch(table, index string, args ...interface{}) (<-chan struct{}, interface{}, error) { + // Get the index value + indexSchema, val, err := txn.getIndexValue(table, index, args...) + if err != nil { + return nil, nil, err + } + + // Get the index itself + indexTxn := txn.readableIndex(table, indexSchema.Name) + + // Do an exact lookup + if indexSchema.Unique && val != nil && indexSchema.Name == index { + watch, obj, ok := indexTxn.GetWatch(val) + if !ok { + return watch, nil, nil + } + return watch, obj, nil + } + + // Handle non-unique index by using an iterator and getting the first value + iter := indexTxn.Root().Iterator() + watch := iter.SeekPrefixWatch(val) + _, value, _ := iter.Next() + return watch, value, nil +} + +// First is used to return the first matching object for +// the given constraints on the index +func (txn *Txn) First(table, index string, args ...interface{}) (interface{}, error) { + _, val, err := txn.FirstWatch(table, index, args...) + return val, err +} + +// LongestPrefix is used to fetch the longest prefix match for the given +// constraints on the index. Note that this will not work with the memdb +// StringFieldIndex because it adds null terminators which prevent the +// algorithm from correctly finding a match (it will get to right before the +// null and fail to find a leaf node). This should only be used where the prefix +// given is capable of matching indexed entries directly, which typically only +// applies to a custom indexer. See the unit test for an example. +func (txn *Txn) LongestPrefix(table, index string, args ...interface{}) (interface{}, error) { + // Enforce that this only works on prefix indexes. + if !strings.HasSuffix(index, "_prefix") { + return nil, fmt.Errorf("must use '%s_prefix' on index", index) + } + + // Get the index value. + indexSchema, val, err := txn.getIndexValue(table, index, args...) + if err != nil { + return nil, err + } + + // This algorithm only makes sense against a unique index, otherwise the + // index keys will have the IDs appended to them. + if !indexSchema.Unique { + return nil, fmt.Errorf("index '%s' is not unique", index) + } + + // Find the longest prefix match with the given index. + indexTxn := txn.readableIndex(table, indexSchema.Name) + if _, value, ok := indexTxn.Root().LongestPrefix(val); ok { + return value, nil + } + return nil, nil +} + +// getIndexValue is used to get the IndexSchema and the value +// used to scan the index given the parameters. This handles prefix based +// scans when the index has the "_prefix" suffix. The index must support +// prefix iteration. +func (txn *Txn) getIndexValue(table, index string, args ...interface{}) (*IndexSchema, []byte, error) { + // Get the table schema + tableSchema, ok := txn.db.schema.Tables[table] + if !ok { + return nil, nil, fmt.Errorf("invalid table '%s'", table) + } + + // Check for a prefix scan + prefixScan := false + if strings.HasSuffix(index, "_prefix") { + index = strings.TrimSuffix(index, "_prefix") + prefixScan = true + } + + // Get the index schema + indexSchema, ok := tableSchema.Indexes[index] + if !ok { + return nil, nil, fmt.Errorf("invalid index '%s'", index) + } + + // Hot-path for when there are no arguments + if len(args) == 0 { + return indexSchema, nil, nil + } + + // Special case the prefix scanning + if prefixScan { + prefixIndexer, ok := indexSchema.Indexer.(PrefixIndexer) + if !ok { + return indexSchema, nil, + fmt.Errorf("index '%s' does not support prefix scanning", index) + } + + val, err := prefixIndexer.PrefixFromArgs(args...) + if err != nil { + return indexSchema, nil, fmt.Errorf("index error: %v", err) + } + return indexSchema, val, err + } + + // Get the exact match index + val, err := indexSchema.Indexer.FromArgs(args...) + if err != nil { + return indexSchema, nil, fmt.Errorf("index error: %v", err) + } + return indexSchema, val, err +} + +// ResultIterator is used to iterate over a list of results +// from a Get query on a table. +type ResultIterator interface { + WatchCh() <-chan struct{} + Next() interface{} +} + +// Get is used to construct a ResultIterator over all the +// rows that match the given constraints of an index. +func (txn *Txn) Get(table, index string, args ...interface{}) (ResultIterator, error) { + // Get the index value to scan + indexSchema, val, err := txn.getIndexValue(table, index, args...) + if err != nil { + return nil, err + } + + // Get the index itself + indexTxn := txn.readableIndex(table, indexSchema.Name) + indexRoot := indexTxn.Root() + + // Get an interator over the index + indexIter := indexRoot.Iterator() + + // Seek the iterator to the appropriate sub-set + watchCh := indexIter.SeekPrefixWatch(val) + + // Create an iterator + iter := &radixIterator{ + iter: indexIter, + watchCh: watchCh, + } + return iter, nil +} + +// Defer is used to push a new arbitrary function onto a stack which +// gets called when a transaction is committed and finished. Deferred +// functions are called in LIFO order, and only invoked at the end of +// write transactions. +func (txn *Txn) Defer(fn func()) { + txn.after = append(txn.after, fn) +} + +// radixIterator is used to wrap an underlying iradix iterator. +// This is much more efficient than a sliceIterator as we are not +// materializing the entire view. +type radixIterator struct { + iter *iradix.Iterator + watchCh <-chan struct{} +} + +func (r *radixIterator) WatchCh() <-chan struct{} { + return r.watchCh +} + +func (r *radixIterator) Next() interface{} { + _, value, ok := r.iter.Next() + if !ok { + return nil + } + return value +} diff --git a/vendor/github.com/hashicorp/go-memdb/watch.go b/vendor/github.com/hashicorp/go-memdb/watch.go new file mode 100644 index 0000000000..a6f01213be --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/watch.go @@ -0,0 +1,129 @@ +package memdb + +import ( + "context" + "time" +) + +// WatchSet is a collection of watch channels. +type WatchSet map[<-chan struct{}]struct{} + +// NewWatchSet constructs a new watch set. +func NewWatchSet() WatchSet { + return make(map[<-chan struct{}]struct{}) +} + +// Add appends a watchCh to the WatchSet if non-nil. +func (w WatchSet) Add(watchCh <-chan struct{}) { + if w == nil { + return + } + + if _, ok := w[watchCh]; !ok { + w[watchCh] = struct{}{} + } +} + +// AddWithLimit appends a watchCh to the WatchSet if non-nil, and if the given +// softLimit hasn't been exceeded. Otherwise, it will watch the given alternate +// channel. It's expected that the altCh will be the same on many calls to this +// function, so you will exceed the soft limit a little bit if you hit this, but +// not by much. +// +// This is useful if you want to track individual items up to some limit, after +// which you watch a higher-level channel (usually a channel from start start of +// an iterator higher up in the radix tree) that will watch a superset of items. +func (w WatchSet) AddWithLimit(softLimit int, watchCh <-chan struct{}, altCh <-chan struct{}) { + // This is safe for a nil WatchSet so we don't need to check that here. + if len(w) < softLimit { + w.Add(watchCh) + } else { + w.Add(altCh) + } +} + +// Watch is used to wait for either the watch set to trigger or a timeout. +// Returns true on timeout. +func (w WatchSet) Watch(timeoutCh <-chan time.Time) bool { + if w == nil { + return false + } + + // Create a context that gets cancelled when the timeout is triggered + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { + select { + case <-timeoutCh: + cancel() + case <-ctx.Done(): + } + }() + + return w.WatchCtx(ctx) == context.Canceled +} + +// WatchCtx is used to wait for either the watch set to trigger or for the +// context to be cancelled. Watch with a timeout channel can be mimicked by +// creating a context with a deadline. WatchCtx should be preferred over Watch. +func (w WatchSet) WatchCtx(ctx context.Context) error { + if w == nil { + return nil + } + + if n := len(w); n <= aFew { + idx := 0 + chunk := make([]<-chan struct{}, aFew) + for watchCh := range w { + chunk[idx] = watchCh + idx++ + } + return watchFew(ctx, chunk) + } + + return w.watchMany(ctx) +} + +// watchMany is used if there are many watchers. +func (w WatchSet) watchMany(ctx context.Context) error { + // Set up a goroutine for each watcher. + triggerCh := make(chan struct{}, 1) + watcher := func(chunk []<-chan struct{}) { + if err := watchFew(ctx, chunk); err == nil { + select { + case triggerCh <- struct{}{}: + default: + } + } + } + + // Apportion the watch channels into chunks we can feed into the + // watchFew helper. + idx := 0 + chunk := make([]<-chan struct{}, aFew) + for watchCh := range w { + subIdx := idx % aFew + chunk[subIdx] = watchCh + idx++ + + // Fire off this chunk and start a fresh one. + if idx%aFew == 0 { + go watcher(chunk) + chunk = make([]<-chan struct{}, aFew) + } + } + + // Make sure to watch any residual channels in the last chunk. + if idx%aFew != 0 { + go watcher(chunk) + } + + // Wait for a channel to trigger or timeout. + select { + case <-triggerCh: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} diff --git a/vendor/github.com/hashicorp/go-memdb/watch_few.go b/vendor/github.com/hashicorp/go-memdb/watch_few.go new file mode 100644 index 0000000000..880f098b77 --- /dev/null +++ b/vendor/github.com/hashicorp/go-memdb/watch_few.go @@ -0,0 +1,117 @@ +package memdb + +//go:generate sh -c "go run watch-gen/main.go >watch_few.go" + +import( + "context" +) + +// aFew gives how many watchers this function is wired to support. You must +// always pass a full slice of this length, but unused channels can be nil. +const aFew = 32 + +// watchFew is used if there are only a few watchers as a performance +// optimization. +func watchFew(ctx context.Context, ch []<-chan struct{}) error { + select { + + case <-ch[0]: + return nil + + case <-ch[1]: + return nil + + case <-ch[2]: + return nil + + case <-ch[3]: + return nil + + case <-ch[4]: + return nil + + case <-ch[5]: + return nil + + case <-ch[6]: + return nil + + case <-ch[7]: + return nil + + case <-ch[8]: + return nil + + case <-ch[9]: + return nil + + case <-ch[10]: + return nil + + case <-ch[11]: + return nil + + case <-ch[12]: + return nil + + case <-ch[13]: + return nil + + case <-ch[14]: + return nil + + case <-ch[15]: + return nil + + case <-ch[16]: + return nil + + case <-ch[17]: + return nil + + case <-ch[18]: + return nil + + case <-ch[19]: + return nil + + case <-ch[20]: + return nil + + case <-ch[21]: + return nil + + case <-ch[22]: + return nil + + case <-ch[23]: + return nil + + case <-ch[24]: + return nil + + case <-ch[25]: + return nil + + case <-ch[26]: + return nil + + case <-ch[27]: + return nil + + case <-ch[28]: + return nil + + case <-ch[29]: + return nil + + case <-ch[30]: + return nil + + case <-ch[31]: + return nil + + case <-ctx.Done(): + return ctx.Err() + } +} diff --git a/vendor/github.com/hashicorp/go-msgpack/LICENSE b/vendor/github.com/hashicorp/go-msgpack/LICENSE new file mode 100644 index 0000000000..ccae99f6a9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2012, 2013 Ugorji Nwoke. +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +* Neither the name of the author nor the names of its contributors may be used + to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/0doc.go b/vendor/github.com/hashicorp/go-msgpack/codec/0doc.go new file mode 100644 index 0000000000..c14d810a73 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/0doc.go @@ -0,0 +1,143 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +/* +High Performance, Feature-Rich Idiomatic Go encoding library for msgpack and binc . + +Supported Serialization formats are: + + - msgpack: [https://github.com/msgpack/msgpack] + - binc: [http://github.com/ugorji/binc] + +To install: + + go get github.com/ugorji/go/codec + +The idiomatic Go support is as seen in other encoding packages in +the standard library (ie json, xml, gob, etc). + +Rich Feature Set includes: + + - Simple but extremely powerful and feature-rich API + - Very High Performance. + Our extensive benchmarks show us outperforming Gob, Json and Bson by 2-4X. + This was achieved by taking extreme care on: + - managing allocation + - function frame size (important due to Go's use of split stacks), + - reflection use (and by-passing reflection for common types) + - recursion implications + - zero-copy mode (encoding/decoding to byte slice without using temp buffers) + - Correct. + Care was taken to precisely handle corner cases like: + overflows, nil maps and slices, nil value in stream, etc. + - Efficient zero-copying into temporary byte buffers + when encoding into or decoding from a byte slice. + - Standard field renaming via tags + - Encoding from any value + (struct, slice, map, primitives, pointers, interface{}, etc) + - Decoding into pointer to any non-nil typed value + (struct, slice, map, int, float32, bool, string, reflect.Value, etc) + - Supports extension functions to handle the encode/decode of custom types + - Support Go 1.2 encoding.BinaryMarshaler/BinaryUnmarshaler + - Schema-less decoding + (decode into a pointer to a nil interface{} as opposed to a typed non-nil value). + Includes Options to configure what specific map or slice type to use + when decoding an encoded list or map into a nil interface{} + - Provides a RPC Server and Client Codec for net/rpc communication protocol. + - Msgpack Specific: + - Provides extension functions to handle spec-defined extensions (binary, timestamp) + - Options to resolve ambiguities in handling raw bytes (as string or []byte) + during schema-less decoding (decoding into a nil interface{}) + - RPC Server/Client Codec for msgpack-rpc protocol defined at: + https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md + - Fast Paths for some container types: + For some container types, we circumvent reflection and its associated overhead + and allocation costs, and encode/decode directly. These types are: + []interface{} + []int + []string + map[interface{}]interface{} + map[int]interface{} + map[string]interface{} + +Extension Support + +Users can register a function to handle the encoding or decoding of +their custom types. + +There are no restrictions on what the custom type can be. Some examples: + + type BisSet []int + type BitSet64 uint64 + type UUID string + type MyStructWithUnexportedFields struct { a int; b bool; c []int; } + type GifImage struct { ... } + +As an illustration, MyStructWithUnexportedFields would normally be +encoded as an empty map because it has no exported fields, while UUID +would be encoded as a string. However, with extension support, you can +encode any of these however you like. + +RPC + +RPC Client and Server Codecs are implemented, so the codecs can be used +with the standard net/rpc package. + +Usage + +Typical usage model: + + // create and configure Handle + var ( + bh codec.BincHandle + mh codec.MsgpackHandle + ) + + mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) + + // configure extensions + // e.g. for msgpack, define functions and enable Time support for tag 1 + // mh.AddExt(reflect.TypeOf(time.Time{}), 1, myMsgpackTimeEncodeExtFn, myMsgpackTimeDecodeExtFn) + + // create and use decoder/encoder + var ( + r io.Reader + w io.Writer + b []byte + h = &bh // or mh to use msgpack + ) + + dec = codec.NewDecoder(r, h) + dec = codec.NewDecoderBytes(b, h) + err = dec.Decode(&v) + + enc = codec.NewEncoder(w, h) + enc = codec.NewEncoderBytes(&b, h) + err = enc.Encode(v) + + //RPC Server + go func() { + for { + conn, err := listener.Accept() + rpcCodec := codec.GoRpc.ServerCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) + rpc.ServeCodec(rpcCodec) + } + }() + + //RPC Communication (client side) + conn, err = net.Dial("tcp", "localhost:5555") + rpcCodec := codec.GoRpc.ClientCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) + client := rpc.NewClientWithCodec(rpcCodec) + +Representative Benchmark Results + +Run the benchmark suite using: + go test -bi -bench=. -benchmem + +To run full benchmark suite (including against vmsgpack and bson), +see notes in ext_dep_test.go + +*/ +package codec diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/binc.go b/vendor/github.com/hashicorp/go-msgpack/codec/binc.go new file mode 100644 index 0000000000..2bb5e8fee8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/binc.go @@ -0,0 +1,786 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +import ( + "math" + // "reflect" + // "sync/atomic" + "time" + //"fmt" +) + +const bincDoPrune = true // No longer needed. Needed before as C lib did not support pruning. + +//var _ = fmt.Printf + +// vd as low 4 bits (there are 16 slots) +const ( + bincVdSpecial byte = iota + bincVdPosInt + bincVdNegInt + bincVdFloat + + bincVdString + bincVdByteArray + bincVdArray + bincVdMap + + bincVdTimestamp + bincVdSmallInt + bincVdUnicodeOther + bincVdSymbol + + bincVdDecimal + _ // open slot + _ // open slot + bincVdCustomExt = 0x0f +) + +const ( + bincSpNil byte = iota + bincSpFalse + bincSpTrue + bincSpNan + bincSpPosInf + bincSpNegInf + bincSpZeroFloat + bincSpZero + bincSpNegOne +) + +const ( + bincFlBin16 byte = iota + bincFlBin32 + _ // bincFlBin32e + bincFlBin64 + _ // bincFlBin64e + // others not currently supported +) + +type bincEncDriver struct { + w encWriter + m map[string]uint16 // symbols + s uint32 // symbols sequencer + b [8]byte +} + +func (e *bincEncDriver) isBuiltinType(rt uintptr) bool { + return rt == timeTypId +} + +func (e *bincEncDriver) encodeBuiltin(rt uintptr, v interface{}) { + switch rt { + case timeTypId: + bs := encodeTime(v.(time.Time)) + e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs))) + e.w.writeb(bs) + } +} + +func (e *bincEncDriver) encodeNil() { + e.w.writen1(bincVdSpecial<<4 | bincSpNil) +} + +func (e *bincEncDriver) encodeBool(b bool) { + if b { + e.w.writen1(bincVdSpecial<<4 | bincSpTrue) + } else { + e.w.writen1(bincVdSpecial<<4 | bincSpFalse) + } +} + +func (e *bincEncDriver) encodeFloat32(f float32) { + if f == 0 { + e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) + return + } + e.w.writen1(bincVdFloat<<4 | bincFlBin32) + e.w.writeUint32(math.Float32bits(f)) +} + +func (e *bincEncDriver) encodeFloat64(f float64) { + if f == 0 { + e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) + return + } + bigen.PutUint64(e.b[:], math.Float64bits(f)) + if bincDoPrune { + i := 7 + for ; i >= 0 && (e.b[i] == 0); i-- { + } + i++ + if i <= 6 { + e.w.writen1(bincVdFloat<<4 | 0x8 | bincFlBin64) + e.w.writen1(byte(i)) + e.w.writeb(e.b[:i]) + return + } + } + e.w.writen1(bincVdFloat<<4 | bincFlBin64) + e.w.writeb(e.b[:]) +} + +func (e *bincEncDriver) encIntegerPrune(bd byte, pos bool, v uint64, lim uint8) { + if lim == 4 { + bigen.PutUint32(e.b[:lim], uint32(v)) + } else { + bigen.PutUint64(e.b[:lim], v) + } + if bincDoPrune { + i := pruneSignExt(e.b[:lim], pos) + e.w.writen1(bd | lim - 1 - byte(i)) + e.w.writeb(e.b[i:lim]) + } else { + e.w.writen1(bd | lim - 1) + e.w.writeb(e.b[:lim]) + } +} + +func (e *bincEncDriver) encodeInt(v int64) { + const nbd byte = bincVdNegInt << 4 + switch { + case v >= 0: + e.encUint(bincVdPosInt<<4, true, uint64(v)) + case v == -1: + e.w.writen1(bincVdSpecial<<4 | bincSpNegOne) + default: + e.encUint(bincVdNegInt<<4, false, uint64(-v)) + } +} + +func (e *bincEncDriver) encodeUint(v uint64) { + e.encUint(bincVdPosInt<<4, true, v) +} + +func (e *bincEncDriver) encUint(bd byte, pos bool, v uint64) { + switch { + case v == 0: + e.w.writen1(bincVdSpecial<<4 | bincSpZero) + case pos && v >= 1 && v <= 16: + e.w.writen1(bincVdSmallInt<<4 | byte(v-1)) + case v <= math.MaxUint8: + e.w.writen2(bd|0x0, byte(v)) + case v <= math.MaxUint16: + e.w.writen1(bd | 0x01) + e.w.writeUint16(uint16(v)) + case v <= math.MaxUint32: + e.encIntegerPrune(bd, pos, v, 4) + default: + e.encIntegerPrune(bd, pos, v, 8) + } +} + +func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) { + e.encLen(bincVdCustomExt<<4, uint64(length)) + e.w.writen1(xtag) +} + +func (e *bincEncDriver) encodeArrayPreamble(length int) { + e.encLen(bincVdArray<<4, uint64(length)) +} + +func (e *bincEncDriver) encodeMapPreamble(length int) { + e.encLen(bincVdMap<<4, uint64(length)) +} + +func (e *bincEncDriver) encodeString(c charEncoding, v string) { + l := uint64(len(v)) + e.encBytesLen(c, l) + if l > 0 { + e.w.writestr(v) + } +} + +func (e *bincEncDriver) encodeSymbol(v string) { + // if WriteSymbolsNoRefs { + // e.encodeString(c_UTF8, v) + // return + // } + + //symbols only offer benefit when string length > 1. + //This is because strings with length 1 take only 2 bytes to store + //(bd with embedded length, and single byte for string val). + + l := len(v) + switch l { + case 0: + e.encBytesLen(c_UTF8, 0) + return + case 1: + e.encBytesLen(c_UTF8, 1) + e.w.writen1(v[0]) + return + } + if e.m == nil { + e.m = make(map[string]uint16, 16) + } + ui, ok := e.m[v] + if ok { + if ui <= math.MaxUint8 { + e.w.writen2(bincVdSymbol<<4, byte(ui)) + } else { + e.w.writen1(bincVdSymbol<<4 | 0x8) + e.w.writeUint16(ui) + } + } else { + e.s++ + ui = uint16(e.s) + //ui = uint16(atomic.AddUint32(&e.s, 1)) + e.m[v] = ui + var lenprec uint8 + switch { + case l <= math.MaxUint8: + // lenprec = 0 + case l <= math.MaxUint16: + lenprec = 1 + case int64(l) <= math.MaxUint32: + lenprec = 2 + default: + lenprec = 3 + } + if ui <= math.MaxUint8 { + e.w.writen2(bincVdSymbol<<4|0x0|0x4|lenprec, byte(ui)) + } else { + e.w.writen1(bincVdSymbol<<4 | 0x8 | 0x4 | lenprec) + e.w.writeUint16(ui) + } + switch lenprec { + case 0: + e.w.writen1(byte(l)) + case 1: + e.w.writeUint16(uint16(l)) + case 2: + e.w.writeUint32(uint32(l)) + default: + e.w.writeUint64(uint64(l)) + } + e.w.writestr(v) + } +} + +func (e *bincEncDriver) encodeStringBytes(c charEncoding, v []byte) { + l := uint64(len(v)) + e.encBytesLen(c, l) + if l > 0 { + e.w.writeb(v) + } +} + +func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) { + //TODO: support bincUnicodeOther (for now, just use string or bytearray) + if c == c_RAW { + e.encLen(bincVdByteArray<<4, length) + } else { + e.encLen(bincVdString<<4, length) + } +} + +func (e *bincEncDriver) encLen(bd byte, l uint64) { + if l < 12 { + e.w.writen1(bd | uint8(l+4)) + } else { + e.encLenNumber(bd, l) + } +} + +func (e *bincEncDriver) encLenNumber(bd byte, v uint64) { + switch { + case v <= math.MaxUint8: + e.w.writen2(bd, byte(v)) + case v <= math.MaxUint16: + e.w.writen1(bd | 0x01) + e.w.writeUint16(uint16(v)) + case v <= math.MaxUint32: + e.w.writen1(bd | 0x02) + e.w.writeUint32(uint32(v)) + default: + e.w.writen1(bd | 0x03) + e.w.writeUint64(uint64(v)) + } +} + +//------------------------------------ + +type bincDecDriver struct { + r decReader + bdRead bool + bdType valueType + bd byte + vd byte + vs byte + b [8]byte + m map[uint32]string // symbols (use uint32 as key, as map optimizes for it) +} + +func (d *bincDecDriver) initReadNext() { + if d.bdRead { + return + } + d.bd = d.r.readn1() + d.vd = d.bd >> 4 + d.vs = d.bd & 0x0f + d.bdRead = true + d.bdType = valueTypeUnset +} + +func (d *bincDecDriver) currentEncodedType() valueType { + if d.bdType == valueTypeUnset { + switch d.vd { + case bincVdSpecial: + switch d.vs { + case bincSpNil: + d.bdType = valueTypeNil + case bincSpFalse, bincSpTrue: + d.bdType = valueTypeBool + case bincSpNan, bincSpNegInf, bincSpPosInf, bincSpZeroFloat: + d.bdType = valueTypeFloat + case bincSpZero: + d.bdType = valueTypeUint + case bincSpNegOne: + d.bdType = valueTypeInt + default: + decErr("currentEncodedType: Unrecognized special value 0x%x", d.vs) + } + case bincVdSmallInt: + d.bdType = valueTypeUint + case bincVdPosInt: + d.bdType = valueTypeUint + case bincVdNegInt: + d.bdType = valueTypeInt + case bincVdFloat: + d.bdType = valueTypeFloat + case bincVdString: + d.bdType = valueTypeString + case bincVdSymbol: + d.bdType = valueTypeSymbol + case bincVdByteArray: + d.bdType = valueTypeBytes + case bincVdTimestamp: + d.bdType = valueTypeTimestamp + case bincVdCustomExt: + d.bdType = valueTypeExt + case bincVdArray: + d.bdType = valueTypeArray + case bincVdMap: + d.bdType = valueTypeMap + default: + decErr("currentEncodedType: Unrecognized d.vd: 0x%x", d.vd) + } + } + return d.bdType +} + +func (d *bincDecDriver) tryDecodeAsNil() bool { + if d.bd == bincVdSpecial<<4|bincSpNil { + d.bdRead = false + return true + } + return false +} + +func (d *bincDecDriver) isBuiltinType(rt uintptr) bool { + return rt == timeTypId +} + +func (d *bincDecDriver) decodeBuiltin(rt uintptr, v interface{}) { + switch rt { + case timeTypId: + if d.vd != bincVdTimestamp { + decErr("Invalid d.vd. Expecting 0x%x. Received: 0x%x", bincVdTimestamp, d.vd) + } + tt, err := decodeTime(d.r.readn(int(d.vs))) + if err != nil { + panic(err) + } + var vt *time.Time = v.(*time.Time) + *vt = tt + d.bdRead = false + } +} + +func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) { + if vs&0x8 == 0 { + d.r.readb(d.b[0:defaultLen]) + } else { + l := d.r.readn1() + if l > 8 { + decErr("At most 8 bytes used to represent float. Received: %v bytes", l) + } + for i := l; i < 8; i++ { + d.b[i] = 0 + } + d.r.readb(d.b[0:l]) + } +} + +func (d *bincDecDriver) decFloat() (f float64) { + //if true { f = math.Float64frombits(d.r.readUint64()); break; } + switch vs := d.vs; vs & 0x7 { + case bincFlBin32: + d.decFloatPre(vs, 4) + f = float64(math.Float32frombits(bigen.Uint32(d.b[0:4]))) + case bincFlBin64: + d.decFloatPre(vs, 8) + f = math.Float64frombits(bigen.Uint64(d.b[0:8])) + default: + decErr("only float32 and float64 are supported. d.vd: 0x%x, d.vs: 0x%x", d.vd, d.vs) + } + return +} + +func (d *bincDecDriver) decUint() (v uint64) { + // need to inline the code (interface conversion and type assertion expensive) + switch d.vs { + case 0: + v = uint64(d.r.readn1()) + case 1: + d.r.readb(d.b[6:]) + v = uint64(bigen.Uint16(d.b[6:])) + case 2: + d.b[4] = 0 + d.r.readb(d.b[5:]) + v = uint64(bigen.Uint32(d.b[4:])) + case 3: + d.r.readb(d.b[4:]) + v = uint64(bigen.Uint32(d.b[4:])) + case 4, 5, 6: + lim := int(7 - d.vs) + d.r.readb(d.b[lim:]) + for i := 0; i < lim; i++ { + d.b[i] = 0 + } + v = uint64(bigen.Uint64(d.b[:])) + case 7: + d.r.readb(d.b[:]) + v = uint64(bigen.Uint64(d.b[:])) + default: + decErr("unsigned integers with greater than 64 bits of precision not supported") + } + return +} + +func (d *bincDecDriver) decIntAny() (ui uint64, i int64, neg bool) { + switch d.vd { + case bincVdPosInt: + ui = d.decUint() + i = int64(ui) + case bincVdNegInt: + ui = d.decUint() + i = -(int64(ui)) + neg = true + case bincVdSmallInt: + i = int64(d.vs) + 1 + ui = uint64(d.vs) + 1 + case bincVdSpecial: + switch d.vs { + case bincSpZero: + //i = 0 + case bincSpNegOne: + neg = true + ui = 1 + i = -1 + default: + decErr("numeric decode fails for special value: d.vs: 0x%x", d.vs) + } + default: + decErr("number can only be decoded from uint or int values. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd) + } + return +} + +func (d *bincDecDriver) decodeInt(bitsize uint8) (i int64) { + _, i, _ = d.decIntAny() + checkOverflow(0, i, bitsize) + d.bdRead = false + return +} + +func (d *bincDecDriver) decodeUint(bitsize uint8) (ui uint64) { + ui, i, neg := d.decIntAny() + if neg { + decErr("Assigning negative signed value: %v, to unsigned type", i) + } + checkOverflow(ui, 0, bitsize) + d.bdRead = false + return +} + +func (d *bincDecDriver) decodeFloat(chkOverflow32 bool) (f float64) { + switch d.vd { + case bincVdSpecial: + d.bdRead = false + switch d.vs { + case bincSpNan: + return math.NaN() + case bincSpPosInf: + return math.Inf(1) + case bincSpZeroFloat, bincSpZero: + return + case bincSpNegInf: + return math.Inf(-1) + default: + decErr("Invalid d.vs decoding float where d.vd=bincVdSpecial: %v", d.vs) + } + case bincVdFloat: + f = d.decFloat() + default: + _, i, _ := d.decIntAny() + f = float64(i) + } + checkOverflowFloat32(f, chkOverflow32) + d.bdRead = false + return +} + +// bool can be decoded from bool only (single byte). +func (d *bincDecDriver) decodeBool() (b bool) { + switch d.bd { + case (bincVdSpecial | bincSpFalse): + // b = false + case (bincVdSpecial | bincSpTrue): + b = true + default: + decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + } + d.bdRead = false + return +} + +func (d *bincDecDriver) readMapLen() (length int) { + if d.vd != bincVdMap { + decErr("Invalid d.vd for map. Expecting 0x%x. Got: 0x%x", bincVdMap, d.vd) + } + length = d.decLen() + d.bdRead = false + return +} + +func (d *bincDecDriver) readArrayLen() (length int) { + if d.vd != bincVdArray { + decErr("Invalid d.vd for array. Expecting 0x%x. Got: 0x%x", bincVdArray, d.vd) + } + length = d.decLen() + d.bdRead = false + return +} + +func (d *bincDecDriver) decLen() int { + if d.vs <= 3 { + return int(d.decUint()) + } + return int(d.vs - 4) +} + +func (d *bincDecDriver) decodeString() (s string) { + switch d.vd { + case bincVdString, bincVdByteArray: + if length := d.decLen(); length > 0 { + s = string(d.r.readn(length)) + } + case bincVdSymbol: + //from vs: extract numSymbolBytes, containsStringVal, strLenPrecision, + //extract symbol + //if containsStringVal, read it and put in map + //else look in map for string value + var symbol uint32 + vs := d.vs + //fmt.Printf(">>>> d.vs: 0b%b, & 0x8: %v, & 0x4: %v\n", d.vs, vs & 0x8, vs & 0x4) + if vs&0x8 == 0 { + symbol = uint32(d.r.readn1()) + } else { + symbol = uint32(d.r.readUint16()) + } + if d.m == nil { + d.m = make(map[uint32]string, 16) + } + + if vs&0x4 == 0 { + s = d.m[symbol] + } else { + var slen int + switch vs & 0x3 { + case 0: + slen = int(d.r.readn1()) + case 1: + slen = int(d.r.readUint16()) + case 2: + slen = int(d.r.readUint32()) + case 3: + slen = int(d.r.readUint64()) + } + s = string(d.r.readn(slen)) + d.m[symbol] = s + } + default: + decErr("Invalid d.vd for string. Expecting string:0x%x, bytearray:0x%x or symbol: 0x%x. Got: 0x%x", + bincVdString, bincVdByteArray, bincVdSymbol, d.vd) + } + d.bdRead = false + return +} + +func (d *bincDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) { + var clen int + switch d.vd { + case bincVdString, bincVdByteArray: + clen = d.decLen() + default: + decErr("Invalid d.vd for bytes. Expecting string:0x%x or bytearray:0x%x. Got: 0x%x", + bincVdString, bincVdByteArray, d.vd) + } + if clen > 0 { + // if no contents in stream, don't update the passed byteslice + if len(bs) != clen { + if len(bs) > clen { + bs = bs[:clen] + } else { + bs = make([]byte, clen) + } + bsOut = bs + changed = true + } + d.r.readb(bs) + } + d.bdRead = false + return +} + +func (d *bincDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + switch d.vd { + case bincVdCustomExt: + l := d.decLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) + } + xbs = d.r.readn(l) + case bincVdByteArray: + xbs, _ = d.decodeBytes(nil) + default: + decErr("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.vd) + } + d.bdRead = false + return +} + +func (d *bincDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) { + d.initReadNext() + + switch d.vd { + case bincVdSpecial: + switch d.vs { + case bincSpNil: + vt = valueTypeNil + case bincSpFalse: + vt = valueTypeBool + v = false + case bincSpTrue: + vt = valueTypeBool + v = true + case bincSpNan: + vt = valueTypeFloat + v = math.NaN() + case bincSpPosInf: + vt = valueTypeFloat + v = math.Inf(1) + case bincSpNegInf: + vt = valueTypeFloat + v = math.Inf(-1) + case bincSpZeroFloat: + vt = valueTypeFloat + v = float64(0) + case bincSpZero: + vt = valueTypeUint + v = int64(0) // int8(0) + case bincSpNegOne: + vt = valueTypeInt + v = int64(-1) // int8(-1) + default: + decErr("decodeNaked: Unrecognized special value 0x%x", d.vs) + } + case bincVdSmallInt: + vt = valueTypeUint + v = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1 + case bincVdPosInt: + vt = valueTypeUint + v = d.decUint() + case bincVdNegInt: + vt = valueTypeInt + v = -(int64(d.decUint())) + case bincVdFloat: + vt = valueTypeFloat + v = d.decFloat() + case bincVdSymbol: + vt = valueTypeSymbol + v = d.decodeString() + case bincVdString: + vt = valueTypeString + v = d.decodeString() + case bincVdByteArray: + vt = valueTypeBytes + v, _ = d.decodeBytes(nil) + case bincVdTimestamp: + vt = valueTypeTimestamp + tt, err := decodeTime(d.r.readn(int(d.vs))) + if err != nil { + panic(err) + } + v = tt + case bincVdCustomExt: + vt = valueTypeExt + l := d.decLen() + var re RawExt + re.Tag = d.r.readn1() + re.Data = d.r.readn(l) + v = &re + vt = valueTypeExt + case bincVdArray: + vt = valueTypeArray + decodeFurther = true + case bincVdMap: + vt = valueTypeMap + decodeFurther = true + default: + decErr("decodeNaked: Unrecognized d.vd: 0x%x", d.vd) + } + + if !decodeFurther { + d.bdRead = false + } + return +} + +//------------------------------------ + +//BincHandle is a Handle for the Binc Schema-Free Encoding Format +//defined at https://github.com/ugorji/binc . +// +//BincHandle currently supports all Binc features with the following EXCEPTIONS: +// - only integers up to 64 bits of precision are supported. +// big integers are unsupported. +// - Only IEEE 754 binary32 and binary64 floats are supported (ie Go float32 and float64 types). +// extended precision and decimal IEEE 754 floats are unsupported. +// - Only UTF-8 strings supported. +// Unicode_Other Binc types (UTF16, UTF32) are currently unsupported. +//Note that these EXCEPTIONS are temporary and full support is possible and may happen soon. +type BincHandle struct { + BasicHandle +} + +func (h *BincHandle) newEncDriver(w encWriter) encDriver { + return &bincEncDriver{w: w} +} + +func (h *BincHandle) newDecDriver(r decReader) decDriver { + return &bincDecDriver{r: r} +} + +func (_ *BincHandle) writeExt() bool { + return true +} + +func (h *BincHandle) getBasicHandle() *BasicHandle { + return &h.BasicHandle +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/decode.go b/vendor/github.com/hashicorp/go-msgpack/codec/decode.go new file mode 100644 index 0000000000..87bef2b935 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/decode.go @@ -0,0 +1,1048 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +import ( + "io" + "reflect" + // "runtime/debug" +) + +// Some tagging information for error messages. +const ( + msgTagDec = "codec.decoder" + msgBadDesc = "Unrecognized descriptor byte" + msgDecCannotExpandArr = "cannot expand go array from %v to stream length: %v" +) + +// decReader abstracts the reading source, allowing implementations that can +// read from an io.Reader or directly off a byte slice with zero-copying. +type decReader interface { + readn(n int) []byte + readb([]byte) + readn1() uint8 + readUint16() uint16 + readUint32() uint32 + readUint64() uint64 +} + +type decDriver interface { + initReadNext() + tryDecodeAsNil() bool + currentEncodedType() valueType + isBuiltinType(rt uintptr) bool + decodeBuiltin(rt uintptr, v interface{}) + //decodeNaked: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types). + decodeNaked() (v interface{}, vt valueType, decodeFurther bool) + decodeInt(bitsize uint8) (i int64) + decodeUint(bitsize uint8) (ui uint64) + decodeFloat(chkOverflow32 bool) (f float64) + decodeBool() (b bool) + // decodeString can also decode symbols + decodeString() (s string) + decodeBytes(bs []byte) (bsOut []byte, changed bool) + decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) + readMapLen() int + readArrayLen() int +} + +type DecodeOptions struct { + // An instance of MapType is used during schema-less decoding of a map in the stream. + // If nil, we use map[interface{}]interface{} + MapType reflect.Type + // An instance of SliceType is used during schema-less decoding of an array in the stream. + // If nil, we use []interface{} + SliceType reflect.Type + // ErrorIfNoField controls whether an error is returned when decoding a map + // from a codec stream into a struct, and no matching struct field is found. + ErrorIfNoField bool +} + +// ------------------------------------ + +// ioDecReader is a decReader that reads off an io.Reader +type ioDecReader struct { + r io.Reader + br io.ByteReader + x [8]byte //temp byte array re-used internally for efficiency +} + +func (z *ioDecReader) readn(n int) (bs []byte) { + if n <= 0 { + return + } + bs = make([]byte, n) + if _, err := io.ReadAtLeast(z.r, bs, n); err != nil { + panic(err) + } + return +} + +func (z *ioDecReader) readb(bs []byte) { + if _, err := io.ReadAtLeast(z.r, bs, len(bs)); err != nil { + panic(err) + } +} + +func (z *ioDecReader) readn1() uint8 { + if z.br != nil { + b, err := z.br.ReadByte() + if err != nil { + panic(err) + } + return b + } + z.readb(z.x[:1]) + return z.x[0] +} + +func (z *ioDecReader) readUint16() uint16 { + z.readb(z.x[:2]) + return bigen.Uint16(z.x[:2]) +} + +func (z *ioDecReader) readUint32() uint32 { + z.readb(z.x[:4]) + return bigen.Uint32(z.x[:4]) +} + +func (z *ioDecReader) readUint64() uint64 { + z.readb(z.x[:8]) + return bigen.Uint64(z.x[:8]) +} + +// ------------------------------------ + +// bytesDecReader is a decReader that reads off a byte slice with zero copying +type bytesDecReader struct { + b []byte // data + c int // cursor + a int // available +} + +func (z *bytesDecReader) consume(n int) (oldcursor int) { + if z.a == 0 { + panic(io.EOF) + } + if n > z.a { + decErr("Trying to read %v bytes. Only %v available", n, z.a) + } + // z.checkAvailable(n) + oldcursor = z.c + z.c = oldcursor + n + z.a = z.a - n + return +} + +func (z *bytesDecReader) readn(n int) (bs []byte) { + if n <= 0 { + return + } + c0 := z.consume(n) + bs = z.b[c0:z.c] + return +} + +func (z *bytesDecReader) readb(bs []byte) { + copy(bs, z.readn(len(bs))) +} + +func (z *bytesDecReader) readn1() uint8 { + c0 := z.consume(1) + return z.b[c0] +} + +// Use binaryEncoding helper for 4 and 8 bits, but inline it for 2 bits +// creating temp slice variable and copying it to helper function is expensive +// for just 2 bits. + +func (z *bytesDecReader) readUint16() uint16 { + c0 := z.consume(2) + return uint16(z.b[c0+1]) | uint16(z.b[c0])<<8 +} + +func (z *bytesDecReader) readUint32() uint32 { + c0 := z.consume(4) + return bigen.Uint32(z.b[c0:z.c]) +} + +func (z *bytesDecReader) readUint64() uint64 { + c0 := z.consume(8) + return bigen.Uint64(z.b[c0:z.c]) +} + +// ------------------------------------ + +// decFnInfo has methods for registering handling decoding of a specific type +// based on some characteristics (builtin, extension, reflect Kind, etc) +type decFnInfo struct { + ti *typeInfo + d *Decoder + dd decDriver + xfFn func(reflect.Value, []byte) error + xfTag byte + array bool +} + +func (f *decFnInfo) builtin(rv reflect.Value) { + f.dd.decodeBuiltin(f.ti.rtid, rv.Addr().Interface()) +} + +func (f *decFnInfo) rawExt(rv reflect.Value) { + xtag, xbs := f.dd.decodeExt(false, 0) + rv.Field(0).SetUint(uint64(xtag)) + rv.Field(1).SetBytes(xbs) +} + +func (f *decFnInfo) ext(rv reflect.Value) { + _, xbs := f.dd.decodeExt(true, f.xfTag) + if fnerr := f.xfFn(rv, xbs); fnerr != nil { + panic(fnerr) + } +} + +func (f *decFnInfo) binaryMarshal(rv reflect.Value) { + var bm binaryUnmarshaler + if f.ti.unmIndir == -1 { + bm = rv.Addr().Interface().(binaryUnmarshaler) + } else if f.ti.unmIndir == 0 { + bm = rv.Interface().(binaryUnmarshaler) + } else { + for j, k := int8(0), f.ti.unmIndir; j < k; j++ { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + rv = rv.Elem() + } + bm = rv.Interface().(binaryUnmarshaler) + } + xbs, _ := f.dd.decodeBytes(nil) + if fnerr := bm.UnmarshalBinary(xbs); fnerr != nil { + panic(fnerr) + } +} + +func (f *decFnInfo) kErr(rv reflect.Value) { + decErr("Unhandled value for kind: %v: %s", rv.Kind(), msgBadDesc) +} + +func (f *decFnInfo) kString(rv reflect.Value) { + rv.SetString(f.dd.decodeString()) +} + +func (f *decFnInfo) kBool(rv reflect.Value) { + rv.SetBool(f.dd.decodeBool()) +} + +func (f *decFnInfo) kInt(rv reflect.Value) { + rv.SetInt(f.dd.decodeInt(intBitsize)) +} + +func (f *decFnInfo) kInt64(rv reflect.Value) { + rv.SetInt(f.dd.decodeInt(64)) +} + +func (f *decFnInfo) kInt32(rv reflect.Value) { + rv.SetInt(f.dd.decodeInt(32)) +} + +func (f *decFnInfo) kInt8(rv reflect.Value) { + rv.SetInt(f.dd.decodeInt(8)) +} + +func (f *decFnInfo) kInt16(rv reflect.Value) { + rv.SetInt(f.dd.decodeInt(16)) +} + +func (f *decFnInfo) kFloat32(rv reflect.Value) { + rv.SetFloat(f.dd.decodeFloat(true)) +} + +func (f *decFnInfo) kFloat64(rv reflect.Value) { + rv.SetFloat(f.dd.decodeFloat(false)) +} + +func (f *decFnInfo) kUint8(rv reflect.Value) { + rv.SetUint(f.dd.decodeUint(8)) +} + +func (f *decFnInfo) kUint64(rv reflect.Value) { + rv.SetUint(f.dd.decodeUint(64)) +} + +func (f *decFnInfo) kUint(rv reflect.Value) { + rv.SetUint(f.dd.decodeUint(uintBitsize)) +} + +func (f *decFnInfo) kUint32(rv reflect.Value) { + rv.SetUint(f.dd.decodeUint(32)) +} + +func (f *decFnInfo) kUint16(rv reflect.Value) { + rv.SetUint(f.dd.decodeUint(16)) +} + +// func (f *decFnInfo) kPtr(rv reflect.Value) { +// debugf(">>>>>>> ??? decode kPtr called - shouldn't get called") +// if rv.IsNil() { +// rv.Set(reflect.New(rv.Type().Elem())) +// } +// f.d.decodeValue(rv.Elem()) +// } + +func (f *decFnInfo) kInterface(rv reflect.Value) { + // debugf("\t===> kInterface") + if !rv.IsNil() { + f.d.decodeValue(rv.Elem()) + return + } + // nil interface: + // use some hieristics to set the nil interface to an + // appropriate value based on the first byte read (byte descriptor bd) + v, vt, decodeFurther := f.dd.decodeNaked() + if vt == valueTypeNil { + return + } + // Cannot decode into nil interface with methods (e.g. error, io.Reader, etc) + // if non-nil value in stream. + if num := f.ti.rt.NumMethod(); num > 0 { + decErr("decodeValue: Cannot decode non-nil codec value into nil %v (%v methods)", + f.ti.rt, num) + } + var rvn reflect.Value + var useRvn bool + switch vt { + case valueTypeMap: + if f.d.h.MapType == nil { + var m2 map[interface{}]interface{} + v = &m2 + } else { + rvn = reflect.New(f.d.h.MapType).Elem() + useRvn = true + } + case valueTypeArray: + if f.d.h.SliceType == nil { + var m2 []interface{} + v = &m2 + } else { + rvn = reflect.New(f.d.h.SliceType).Elem() + useRvn = true + } + case valueTypeExt: + re := v.(*RawExt) + var bfn func(reflect.Value, []byte) error + rvn, bfn = f.d.h.getDecodeExtForTag(re.Tag) + if bfn == nil { + rvn = reflect.ValueOf(*re) + } else if fnerr := bfn(rvn, re.Data); fnerr != nil { + panic(fnerr) + } + rv.Set(rvn) + return + } + if decodeFurther { + if useRvn { + f.d.decodeValue(rvn) + } else if v != nil { + // this v is a pointer, so we need to dereference it when done + f.d.decode(v) + rvn = reflect.ValueOf(v).Elem() + useRvn = true + } + } + if useRvn { + rv.Set(rvn) + } else if v != nil { + rv.Set(reflect.ValueOf(v)) + } +} + +func (f *decFnInfo) kStruct(rv reflect.Value) { + fti := f.ti + if currEncodedType := f.dd.currentEncodedType(); currEncodedType == valueTypeMap { + containerLen := f.dd.readMapLen() + if containerLen == 0 { + return + } + tisfi := fti.sfi + for j := 0; j < containerLen; j++ { + // var rvkencname string + // ddecode(&rvkencname) + f.dd.initReadNext() + rvkencname := f.dd.decodeString() + // rvksi := ti.getForEncName(rvkencname) + if k := fti.indexForEncName(rvkencname); k > -1 { + sfik := tisfi[k] + if sfik.i != -1 { + f.d.decodeValue(rv.Field(int(sfik.i))) + } else { + f.d.decEmbeddedField(rv, sfik.is) + } + // f.d.decodeValue(ti.field(k, rv)) + } else { + if f.d.h.ErrorIfNoField { + decErr("No matching struct field found when decoding stream map with key: %v", + rvkencname) + } else { + var nilintf0 interface{} + f.d.decodeValue(reflect.ValueOf(&nilintf0).Elem()) + } + } + } + } else if currEncodedType == valueTypeArray { + containerLen := f.dd.readArrayLen() + if containerLen == 0 { + return + } + for j, si := range fti.sfip { + if j == containerLen { + break + } + if si.i != -1 { + f.d.decodeValue(rv.Field(int(si.i))) + } else { + f.d.decEmbeddedField(rv, si.is) + } + } + if containerLen > len(fti.sfip) { + // read remaining values and throw away + for j := len(fti.sfip); j < containerLen; j++ { + var nilintf0 interface{} + f.d.decodeValue(reflect.ValueOf(&nilintf0).Elem()) + } + } + } else { + decErr("Only encoded map or array can be decoded into a struct. (valueType: %x)", + currEncodedType) + } +} + +func (f *decFnInfo) kSlice(rv reflect.Value) { + // A slice can be set from a map or array in stream. + currEncodedType := f.dd.currentEncodedType() + + switch currEncodedType { + case valueTypeBytes, valueTypeString: + if f.ti.rtid == uint8SliceTypId || f.ti.rt.Elem().Kind() == reflect.Uint8 { + if bs2, changed2 := f.dd.decodeBytes(rv.Bytes()); changed2 { + rv.SetBytes(bs2) + } + return + } + } + + if shortCircuitReflectToFastPath && rv.CanAddr() { + switch f.ti.rtid { + case intfSliceTypId: + f.d.decSliceIntf(rv.Addr().Interface().(*[]interface{}), currEncodedType, f.array) + return + case uint64SliceTypId: + f.d.decSliceUint64(rv.Addr().Interface().(*[]uint64), currEncodedType, f.array) + return + case int64SliceTypId: + f.d.decSliceInt64(rv.Addr().Interface().(*[]int64), currEncodedType, f.array) + return + case strSliceTypId: + f.d.decSliceStr(rv.Addr().Interface().(*[]string), currEncodedType, f.array) + return + } + } + + containerLen, containerLenS := decContLens(f.dd, currEncodedType) + + // an array can never return a nil slice. so no need to check f.array here. + + if rv.IsNil() { + rv.Set(reflect.MakeSlice(f.ti.rt, containerLenS, containerLenS)) + } + + if containerLen == 0 { + return + } + + if rvcap, rvlen := rv.Len(), rv.Cap(); containerLenS > rvcap { + if f.array { // !rv.CanSet() + decErr(msgDecCannotExpandArr, rvcap, containerLenS) + } + rvn := reflect.MakeSlice(f.ti.rt, containerLenS, containerLenS) + if rvlen > 0 { + reflect.Copy(rvn, rv) + } + rv.Set(rvn) + } else if containerLenS > rvlen { + rv.SetLen(containerLenS) + } + + for j := 0; j < containerLenS; j++ { + f.d.decodeValue(rv.Index(j)) + } +} + +func (f *decFnInfo) kArray(rv reflect.Value) { + // f.d.decodeValue(rv.Slice(0, rv.Len())) + f.kSlice(rv.Slice(0, rv.Len())) +} + +func (f *decFnInfo) kMap(rv reflect.Value) { + if shortCircuitReflectToFastPath && rv.CanAddr() { + switch f.ti.rtid { + case mapStrIntfTypId: + f.d.decMapStrIntf(rv.Addr().Interface().(*map[string]interface{})) + return + case mapIntfIntfTypId: + f.d.decMapIntfIntf(rv.Addr().Interface().(*map[interface{}]interface{})) + return + case mapInt64IntfTypId: + f.d.decMapInt64Intf(rv.Addr().Interface().(*map[int64]interface{})) + return + case mapUint64IntfTypId: + f.d.decMapUint64Intf(rv.Addr().Interface().(*map[uint64]interface{})) + return + } + } + + containerLen := f.dd.readMapLen() + + if rv.IsNil() { + rv.Set(reflect.MakeMap(f.ti.rt)) + } + + if containerLen == 0 { + return + } + + ktype, vtype := f.ti.rt.Key(), f.ti.rt.Elem() + ktypeId := reflect.ValueOf(ktype).Pointer() + for j := 0; j < containerLen; j++ { + rvk := reflect.New(ktype).Elem() + f.d.decodeValue(rvk) + + // special case if a byte array. + // if ktype == intfTyp { + if ktypeId == intfTypId { + rvk = rvk.Elem() + if rvk.Type() == uint8SliceTyp { + rvk = reflect.ValueOf(string(rvk.Bytes())) + } + } + rvv := rv.MapIndex(rvk) + if !rvv.IsValid() { + rvv = reflect.New(vtype).Elem() + } + + f.d.decodeValue(rvv) + rv.SetMapIndex(rvk, rvv) + } +} + +// ---------------------------------------- + +type decFn struct { + i *decFnInfo + f func(*decFnInfo, reflect.Value) +} + +// A Decoder reads and decodes an object from an input stream in the codec format. +type Decoder struct { + r decReader + d decDriver + h *BasicHandle + f map[uintptr]decFn + x []uintptr + s []decFn +} + +// NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader. +// +// For efficiency, Users are encouraged to pass in a memory buffered writer +// (eg bufio.Reader, bytes.Buffer). +func NewDecoder(r io.Reader, h Handle) *Decoder { + z := ioDecReader{ + r: r, + } + z.br, _ = r.(io.ByteReader) + return &Decoder{r: &z, d: h.newDecDriver(&z), h: h.getBasicHandle()} +} + +// NewDecoderBytes returns a Decoder which efficiently decodes directly +// from a byte slice with zero copying. +func NewDecoderBytes(in []byte, h Handle) *Decoder { + z := bytesDecReader{ + b: in, + a: len(in), + } + return &Decoder{r: &z, d: h.newDecDriver(&z), h: h.getBasicHandle()} +} + +// Decode decodes the stream from reader and stores the result in the +// value pointed to by v. v cannot be a nil pointer. v can also be +// a reflect.Value of a pointer. +// +// Note that a pointer to a nil interface is not a nil pointer. +// If you do not know what type of stream it is, pass in a pointer to a nil interface. +// We will decode and store a value in that nil interface. +// +// Sample usages: +// // Decoding into a non-nil typed value +// var f float32 +// err = codec.NewDecoder(r, handle).Decode(&f) +// +// // Decoding into nil interface +// var v interface{} +// dec := codec.NewDecoder(r, handle) +// err = dec.Decode(&v) +// +// When decoding into a nil interface{}, we will decode into an appropriate value based +// on the contents of the stream: +// - Numbers are decoded as float64, int64 or uint64. +// - Other values are decoded appropriately depending on the type: +// bool, string, []byte, time.Time, etc +// - Extensions are decoded as RawExt (if no ext function registered for the tag) +// Configurations exist on the Handle to override defaults +// (e.g. for MapType, SliceType and how to decode raw bytes). +// +// When decoding into a non-nil interface{} value, the mode of encoding is based on the +// type of the value. When a value is seen: +// - If an extension is registered for it, call that extension function +// - If it implements BinaryUnmarshaler, call its UnmarshalBinary(data []byte) error +// - Else decode it based on its reflect.Kind +// +// There are some special rules when decoding into containers (slice/array/map/struct). +// Decode will typically use the stream contents to UPDATE the container. +// - A map can be decoded from a stream map, by updating matching keys. +// - A slice can be decoded from a stream array, +// by updating the first n elements, where n is length of the stream. +// - A slice can be decoded from a stream map, by decoding as if +// it contains a sequence of key-value pairs. +// - A struct can be decoded from a stream map, by updating matching fields. +// - A struct can be decoded from a stream array, +// by updating fields as they occur in the struct (by index). +// +// When decoding a stream map or array with length of 0 into a nil map or slice, +// we reset the destination map or slice to a zero-length value. +// +// However, when decoding a stream nil, we reset the destination container +// to its "zero" value (e.g. nil for slice/map, etc). +// +func (d *Decoder) Decode(v interface{}) (err error) { + defer panicToErr(&err) + d.decode(v) + return +} + +func (d *Decoder) decode(iv interface{}) { + d.d.initReadNext() + + switch v := iv.(type) { + case nil: + decErr("Cannot decode into nil.") + + case reflect.Value: + d.chkPtrValue(v) + d.decodeValue(v.Elem()) + + case *string: + *v = d.d.decodeString() + case *bool: + *v = d.d.decodeBool() + case *int: + *v = int(d.d.decodeInt(intBitsize)) + case *int8: + *v = int8(d.d.decodeInt(8)) + case *int16: + *v = int16(d.d.decodeInt(16)) + case *int32: + *v = int32(d.d.decodeInt(32)) + case *int64: + *v = d.d.decodeInt(64) + case *uint: + *v = uint(d.d.decodeUint(uintBitsize)) + case *uint8: + *v = uint8(d.d.decodeUint(8)) + case *uint16: + *v = uint16(d.d.decodeUint(16)) + case *uint32: + *v = uint32(d.d.decodeUint(32)) + case *uint64: + *v = d.d.decodeUint(64) + case *float32: + *v = float32(d.d.decodeFloat(true)) + case *float64: + *v = d.d.decodeFloat(false) + case *[]byte: + *v, _ = d.d.decodeBytes(*v) + + case *[]interface{}: + d.decSliceIntf(v, valueTypeInvalid, false) + case *[]uint64: + d.decSliceUint64(v, valueTypeInvalid, false) + case *[]int64: + d.decSliceInt64(v, valueTypeInvalid, false) + case *[]string: + d.decSliceStr(v, valueTypeInvalid, false) + case *map[string]interface{}: + d.decMapStrIntf(v) + case *map[interface{}]interface{}: + d.decMapIntfIntf(v) + case *map[uint64]interface{}: + d.decMapUint64Intf(v) + case *map[int64]interface{}: + d.decMapInt64Intf(v) + + case *interface{}: + d.decodeValue(reflect.ValueOf(iv).Elem()) + + default: + rv := reflect.ValueOf(iv) + d.chkPtrValue(rv) + d.decodeValue(rv.Elem()) + } +} + +func (d *Decoder) decodeValue(rv reflect.Value) { + d.d.initReadNext() + + if d.d.tryDecodeAsNil() { + // If value in stream is nil, set the dereferenced value to its "zero" value (if settable) + if rv.Kind() == reflect.Ptr { + if !rv.IsNil() { + rv.Set(reflect.Zero(rv.Type())) + } + return + } + // for rv.Kind() == reflect.Ptr { + // rv = rv.Elem() + // } + if rv.IsValid() { // rv.CanSet() // always settable, except it's invalid + rv.Set(reflect.Zero(rv.Type())) + } + return + } + + // If stream is not containing a nil value, then we can deref to the base + // non-pointer value, and decode into that. + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + rv = rv.Elem() + } + + rt := rv.Type() + rtid := reflect.ValueOf(rt).Pointer() + + // retrieve or register a focus'ed function for this type + // to eliminate need to do the retrieval multiple times + + // if d.f == nil && d.s == nil { debugf("---->Creating new dec f map for type: %v\n", rt) } + var fn decFn + var ok bool + if useMapForCodecCache { + fn, ok = d.f[rtid] + } else { + for i, v := range d.x { + if v == rtid { + fn, ok = d.s[i], true + break + } + } + } + if !ok { + // debugf("\tCreating new dec fn for type: %v\n", rt) + fi := decFnInfo{ti: getTypeInfo(rtid, rt), d: d, dd: d.d} + fn.i = &fi + // An extension can be registered for any type, regardless of the Kind + // (e.g. type BitSet int64, type MyStruct { / * unexported fields * / }, type X []int, etc. + // + // We can't check if it's an extension byte here first, because the user may have + // registered a pointer or non-pointer type, meaning we may have to recurse first + // before matching a mapped type, even though the extension byte is already detected. + // + // NOTE: if decoding into a nil interface{}, we return a non-nil + // value except even if the container registers a length of 0. + if rtid == rawExtTypId { + fn.f = (*decFnInfo).rawExt + } else if d.d.isBuiltinType(rtid) { + fn.f = (*decFnInfo).builtin + } else if xfTag, xfFn := d.h.getDecodeExt(rtid); xfFn != nil { + fi.xfTag, fi.xfFn = xfTag, xfFn + fn.f = (*decFnInfo).ext + } else if supportBinaryMarshal && fi.ti.unm { + fn.f = (*decFnInfo).binaryMarshal + } else { + switch rk := rt.Kind(); rk { + case reflect.String: + fn.f = (*decFnInfo).kString + case reflect.Bool: + fn.f = (*decFnInfo).kBool + case reflect.Int: + fn.f = (*decFnInfo).kInt + case reflect.Int64: + fn.f = (*decFnInfo).kInt64 + case reflect.Int32: + fn.f = (*decFnInfo).kInt32 + case reflect.Int8: + fn.f = (*decFnInfo).kInt8 + case reflect.Int16: + fn.f = (*decFnInfo).kInt16 + case reflect.Float32: + fn.f = (*decFnInfo).kFloat32 + case reflect.Float64: + fn.f = (*decFnInfo).kFloat64 + case reflect.Uint8: + fn.f = (*decFnInfo).kUint8 + case reflect.Uint64: + fn.f = (*decFnInfo).kUint64 + case reflect.Uint: + fn.f = (*decFnInfo).kUint + case reflect.Uint32: + fn.f = (*decFnInfo).kUint32 + case reflect.Uint16: + fn.f = (*decFnInfo).kUint16 + // case reflect.Ptr: + // fn.f = (*decFnInfo).kPtr + case reflect.Interface: + fn.f = (*decFnInfo).kInterface + case reflect.Struct: + fn.f = (*decFnInfo).kStruct + case reflect.Slice: + fn.f = (*decFnInfo).kSlice + case reflect.Array: + fi.array = true + fn.f = (*decFnInfo).kArray + case reflect.Map: + fn.f = (*decFnInfo).kMap + default: + fn.f = (*decFnInfo).kErr + } + } + if useMapForCodecCache { + if d.f == nil { + d.f = make(map[uintptr]decFn, 16) + } + d.f[rtid] = fn + } else { + d.s = append(d.s, fn) + d.x = append(d.x, rtid) + } + } + + fn.f(fn.i, rv) + + return +} + +func (d *Decoder) chkPtrValue(rv reflect.Value) { + // We can only decode into a non-nil pointer + if rv.Kind() == reflect.Ptr && !rv.IsNil() { + return + } + if !rv.IsValid() { + decErr("Cannot decode into a zero (ie invalid) reflect.Value") + } + if !rv.CanInterface() { + decErr("Cannot decode into a value without an interface: %v", rv) + } + rvi := rv.Interface() + decErr("Cannot decode into non-pointer or nil pointer. Got: %v, %T, %v", + rv.Kind(), rvi, rvi) +} + +func (d *Decoder) decEmbeddedField(rv reflect.Value, index []int) { + // d.decodeValue(rv.FieldByIndex(index)) + // nil pointers may be here; so reproduce FieldByIndex logic + enhancements + for _, j := range index { + if rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + // If a pointer, it must be a pointer to struct (based on typeInfo contract) + rv = rv.Elem() + } + rv = rv.Field(j) + } + d.decodeValue(rv) +} + +// -------------------------------------------------- + +// short circuit functions for common maps and slices + +func (d *Decoder) decSliceIntf(v *[]interface{}, currEncodedType valueType, doNotReset bool) { + _, containerLenS := decContLens(d.d, currEncodedType) + s := *v + if s == nil { + s = make([]interface{}, containerLenS, containerLenS) + } else if containerLenS > cap(s) { + if doNotReset { + decErr(msgDecCannotExpandArr, cap(s), containerLenS) + } + s = make([]interface{}, containerLenS, containerLenS) + copy(s, *v) + } else if containerLenS > len(s) { + s = s[:containerLenS] + } + for j := 0; j < containerLenS; j++ { + d.decode(&s[j]) + } + *v = s +} + +func (d *Decoder) decSliceInt64(v *[]int64, currEncodedType valueType, doNotReset bool) { + _, containerLenS := decContLens(d.d, currEncodedType) + s := *v + if s == nil { + s = make([]int64, containerLenS, containerLenS) + } else if containerLenS > cap(s) { + if doNotReset { + decErr(msgDecCannotExpandArr, cap(s), containerLenS) + } + s = make([]int64, containerLenS, containerLenS) + copy(s, *v) + } else if containerLenS > len(s) { + s = s[:containerLenS] + } + for j := 0; j < containerLenS; j++ { + // d.decode(&s[j]) + d.d.initReadNext() + s[j] = d.d.decodeInt(intBitsize) + } + *v = s +} + +func (d *Decoder) decSliceUint64(v *[]uint64, currEncodedType valueType, doNotReset bool) { + _, containerLenS := decContLens(d.d, currEncodedType) + s := *v + if s == nil { + s = make([]uint64, containerLenS, containerLenS) + } else if containerLenS > cap(s) { + if doNotReset { + decErr(msgDecCannotExpandArr, cap(s), containerLenS) + } + s = make([]uint64, containerLenS, containerLenS) + copy(s, *v) + } else if containerLenS > len(s) { + s = s[:containerLenS] + } + for j := 0; j < containerLenS; j++ { + // d.decode(&s[j]) + d.d.initReadNext() + s[j] = d.d.decodeUint(intBitsize) + } + *v = s +} + +func (d *Decoder) decSliceStr(v *[]string, currEncodedType valueType, doNotReset bool) { + _, containerLenS := decContLens(d.d, currEncodedType) + s := *v + if s == nil { + s = make([]string, containerLenS, containerLenS) + } else if containerLenS > cap(s) { + if doNotReset { + decErr(msgDecCannotExpandArr, cap(s), containerLenS) + } + s = make([]string, containerLenS, containerLenS) + copy(s, *v) + } else if containerLenS > len(s) { + s = s[:containerLenS] + } + for j := 0; j < containerLenS; j++ { + // d.decode(&s[j]) + d.d.initReadNext() + s[j] = d.d.decodeString() + } + *v = s +} + +func (d *Decoder) decMapIntfIntf(v *map[interface{}]interface{}) { + containerLen := d.d.readMapLen() + m := *v + if m == nil { + m = make(map[interface{}]interface{}, containerLen) + *v = m + } + for j := 0; j < containerLen; j++ { + var mk interface{} + d.decode(&mk) + // special case if a byte array. + if bv, bok := mk.([]byte); bok { + mk = string(bv) + } + mv := m[mk] + d.decode(&mv) + m[mk] = mv + } +} + +func (d *Decoder) decMapInt64Intf(v *map[int64]interface{}) { + containerLen := d.d.readMapLen() + m := *v + if m == nil { + m = make(map[int64]interface{}, containerLen) + *v = m + } + for j := 0; j < containerLen; j++ { + d.d.initReadNext() + mk := d.d.decodeInt(intBitsize) + mv := m[mk] + d.decode(&mv) + m[mk] = mv + } +} + +func (d *Decoder) decMapUint64Intf(v *map[uint64]interface{}) { + containerLen := d.d.readMapLen() + m := *v + if m == nil { + m = make(map[uint64]interface{}, containerLen) + *v = m + } + for j := 0; j < containerLen; j++ { + d.d.initReadNext() + mk := d.d.decodeUint(intBitsize) + mv := m[mk] + d.decode(&mv) + m[mk] = mv + } +} + +func (d *Decoder) decMapStrIntf(v *map[string]interface{}) { + containerLen := d.d.readMapLen() + m := *v + if m == nil { + m = make(map[string]interface{}, containerLen) + *v = m + } + for j := 0; j < containerLen; j++ { + d.d.initReadNext() + mk := d.d.decodeString() + mv := m[mk] + d.decode(&mv) + m[mk] = mv + } +} + +// ---------------------------------------- + +func decContLens(dd decDriver, currEncodedType valueType) (containerLen, containerLenS int) { + if currEncodedType == valueTypeInvalid { + currEncodedType = dd.currentEncodedType() + } + switch currEncodedType { + case valueTypeArray: + containerLen = dd.readArrayLen() + containerLenS = containerLen + case valueTypeMap: + containerLen = dd.readMapLen() + containerLenS = containerLen * 2 + default: + decErr("Only encoded map or array can be decoded into a slice. (valueType: %0x)", + currEncodedType) + } + return +} + +func decErr(format string, params ...interface{}) { + doPanic(msgTagDec, format, params...) +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/encode.go b/vendor/github.com/hashicorp/go-msgpack/codec/encode.go new file mode 100644 index 0000000000..4914be0c74 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/encode.go @@ -0,0 +1,1001 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +import ( + "io" + "reflect" +) + +const ( + // Some tagging information for error messages. + msgTagEnc = "codec.encoder" + defEncByteBufSize = 1 << 6 // 4:16, 6:64, 8:256, 10:1024 + // maxTimeSecs32 = math.MaxInt32 / 60 / 24 / 366 +) + +// AsSymbolFlag defines what should be encoded as symbols. +type AsSymbolFlag uint8 + +const ( + // AsSymbolDefault is default. + // Currently, this means only encode struct field names as symbols. + // The default is subject to change. + AsSymbolDefault AsSymbolFlag = iota + + // AsSymbolAll means encode anything which could be a symbol as a symbol. + AsSymbolAll = 0xfe + + // AsSymbolNone means do not encode anything as a symbol. + AsSymbolNone = 1 << iota + + // AsSymbolMapStringKeys means encode keys in map[string]XXX as symbols. + AsSymbolMapStringKeysFlag + + // AsSymbolStructFieldName means encode struct field names as symbols. + AsSymbolStructFieldNameFlag +) + +// encWriter abstracting writing to a byte array or to an io.Writer. +type encWriter interface { + writeUint16(uint16) + writeUint32(uint32) + writeUint64(uint64) + writeb([]byte) + writestr(string) + writen1(byte) + writen2(byte, byte) + atEndOfEncode() +} + +// encDriver abstracts the actual codec (binc vs msgpack, etc) +type encDriver interface { + isBuiltinType(rt uintptr) bool + encodeBuiltin(rt uintptr, v interface{}) + encodeNil() + encodeInt(i int64) + encodeUint(i uint64) + encodeBool(b bool) + encodeFloat32(f float32) + encodeFloat64(f float64) + encodeExtPreamble(xtag byte, length int) + encodeArrayPreamble(length int) + encodeMapPreamble(length int) + encodeString(c charEncoding, v string) + encodeSymbol(v string) + encodeStringBytes(c charEncoding, v []byte) + //TODO + //encBignum(f *big.Int) + //encStringRunes(c charEncoding, v []rune) +} + +type ioEncWriterWriter interface { + WriteByte(c byte) error + WriteString(s string) (n int, err error) + Write(p []byte) (n int, err error) +} + +type ioEncStringWriter interface { + WriteString(s string) (n int, err error) +} + +type EncodeOptions struct { + // Encode a struct as an array, and not as a map. + StructToArray bool + + // AsSymbols defines what should be encoded as symbols. + // + // Encoding as symbols can reduce the encoded size significantly. + // + // However, during decoding, each string to be encoded as a symbol must + // be checked to see if it has been seen before. Consequently, encoding time + // will increase if using symbols, because string comparisons has a clear cost. + // + // Sample values: + // AsSymbolNone + // AsSymbolAll + // AsSymbolMapStringKeys + // AsSymbolMapStringKeysFlag | AsSymbolStructFieldNameFlag + AsSymbols AsSymbolFlag +} + +// --------------------------------------------- + +type simpleIoEncWriterWriter struct { + w io.Writer + bw io.ByteWriter + sw ioEncStringWriter +} + +func (o *simpleIoEncWriterWriter) WriteByte(c byte) (err error) { + if o.bw != nil { + return o.bw.WriteByte(c) + } + _, err = o.w.Write([]byte{c}) + return +} + +func (o *simpleIoEncWriterWriter) WriteString(s string) (n int, err error) { + if o.sw != nil { + return o.sw.WriteString(s) + } + return o.w.Write([]byte(s)) +} + +func (o *simpleIoEncWriterWriter) Write(p []byte) (n int, err error) { + return o.w.Write(p) +} + +// ---------------------------------------- + +// ioEncWriter implements encWriter and can write to an io.Writer implementation +type ioEncWriter struct { + w ioEncWriterWriter + x [8]byte // temp byte array re-used internally for efficiency +} + +func (z *ioEncWriter) writeUint16(v uint16) { + bigen.PutUint16(z.x[:2], v) + z.writeb(z.x[:2]) +} + +func (z *ioEncWriter) writeUint32(v uint32) { + bigen.PutUint32(z.x[:4], v) + z.writeb(z.x[:4]) +} + +func (z *ioEncWriter) writeUint64(v uint64) { + bigen.PutUint64(z.x[:8], v) + z.writeb(z.x[:8]) +} + +func (z *ioEncWriter) writeb(bs []byte) { + if len(bs) == 0 { + return + } + n, err := z.w.Write(bs) + if err != nil { + panic(err) + } + if n != len(bs) { + encErr("write: Incorrect num bytes written. Expecting: %v, Wrote: %v", len(bs), n) + } +} + +func (z *ioEncWriter) writestr(s string) { + n, err := z.w.WriteString(s) + if err != nil { + panic(err) + } + if n != len(s) { + encErr("write: Incorrect num bytes written. Expecting: %v, Wrote: %v", len(s), n) + } +} + +func (z *ioEncWriter) writen1(b byte) { + if err := z.w.WriteByte(b); err != nil { + panic(err) + } +} + +func (z *ioEncWriter) writen2(b1 byte, b2 byte) { + z.writen1(b1) + z.writen1(b2) +} + +func (z *ioEncWriter) atEndOfEncode() {} + +// ---------------------------------------- + +// bytesEncWriter implements encWriter and can write to an byte slice. +// It is used by Marshal function. +type bytesEncWriter struct { + b []byte + c int // cursor + out *[]byte // write out on atEndOfEncode +} + +func (z *bytesEncWriter) writeUint16(v uint16) { + c := z.grow(2) + z.b[c] = byte(v >> 8) + z.b[c+1] = byte(v) +} + +func (z *bytesEncWriter) writeUint32(v uint32) { + c := z.grow(4) + z.b[c] = byte(v >> 24) + z.b[c+1] = byte(v >> 16) + z.b[c+2] = byte(v >> 8) + z.b[c+3] = byte(v) +} + +func (z *bytesEncWriter) writeUint64(v uint64) { + c := z.grow(8) + z.b[c] = byte(v >> 56) + z.b[c+1] = byte(v >> 48) + z.b[c+2] = byte(v >> 40) + z.b[c+3] = byte(v >> 32) + z.b[c+4] = byte(v >> 24) + z.b[c+5] = byte(v >> 16) + z.b[c+6] = byte(v >> 8) + z.b[c+7] = byte(v) +} + +func (z *bytesEncWriter) writeb(s []byte) { + if len(s) == 0 { + return + } + c := z.grow(len(s)) + copy(z.b[c:], s) +} + +func (z *bytesEncWriter) writestr(s string) { + c := z.grow(len(s)) + copy(z.b[c:], s) +} + +func (z *bytesEncWriter) writen1(b1 byte) { + c := z.grow(1) + z.b[c] = b1 +} + +func (z *bytesEncWriter) writen2(b1 byte, b2 byte) { + c := z.grow(2) + z.b[c] = b1 + z.b[c+1] = b2 +} + +func (z *bytesEncWriter) atEndOfEncode() { + *(z.out) = z.b[:z.c] +} + +func (z *bytesEncWriter) grow(n int) (oldcursor int) { + oldcursor = z.c + z.c = oldcursor + n + if z.c > cap(z.b) { + // Tried using appendslice logic: (if cap < 1024, *2, else *1.25). + // However, it was too expensive, causing too many iterations of copy. + // Using bytes.Buffer model was much better (2*cap + n) + bs := make([]byte, 2*cap(z.b)+n) + copy(bs, z.b[:oldcursor]) + z.b = bs + } else if z.c > len(z.b) { + z.b = z.b[:cap(z.b)] + } + return +} + +// --------------------------------------------- + +type encFnInfo struct { + ti *typeInfo + e *Encoder + ee encDriver + xfFn func(reflect.Value) ([]byte, error) + xfTag byte +} + +func (f *encFnInfo) builtin(rv reflect.Value) { + f.ee.encodeBuiltin(f.ti.rtid, rv.Interface()) +} + +func (f *encFnInfo) rawExt(rv reflect.Value) { + f.e.encRawExt(rv.Interface().(RawExt)) +} + +func (f *encFnInfo) ext(rv reflect.Value) { + bs, fnerr := f.xfFn(rv) + if fnerr != nil { + panic(fnerr) + } + if bs == nil { + f.ee.encodeNil() + return + } + if f.e.hh.writeExt() { + f.ee.encodeExtPreamble(f.xfTag, len(bs)) + f.e.w.writeb(bs) + } else { + f.ee.encodeStringBytes(c_RAW, bs) + } + +} + +func (f *encFnInfo) binaryMarshal(rv reflect.Value) { + var bm binaryMarshaler + if f.ti.mIndir == 0 { + bm = rv.Interface().(binaryMarshaler) + } else if f.ti.mIndir == -1 { + bm = rv.Addr().Interface().(binaryMarshaler) + } else { + for j, k := int8(0), f.ti.mIndir; j < k; j++ { + if rv.IsNil() { + f.ee.encodeNil() + return + } + rv = rv.Elem() + } + bm = rv.Interface().(binaryMarshaler) + } + // debugf(">>>> binaryMarshaler: %T", rv.Interface()) + bs, fnerr := bm.MarshalBinary() + if fnerr != nil { + panic(fnerr) + } + if bs == nil { + f.ee.encodeNil() + } else { + f.ee.encodeStringBytes(c_RAW, bs) + } +} + +func (f *encFnInfo) kBool(rv reflect.Value) { + f.ee.encodeBool(rv.Bool()) +} + +func (f *encFnInfo) kString(rv reflect.Value) { + f.ee.encodeString(c_UTF8, rv.String()) +} + +func (f *encFnInfo) kFloat64(rv reflect.Value) { + f.ee.encodeFloat64(rv.Float()) +} + +func (f *encFnInfo) kFloat32(rv reflect.Value) { + f.ee.encodeFloat32(float32(rv.Float())) +} + +func (f *encFnInfo) kInt(rv reflect.Value) { + f.ee.encodeInt(rv.Int()) +} + +func (f *encFnInfo) kUint(rv reflect.Value) { + f.ee.encodeUint(rv.Uint()) +} + +func (f *encFnInfo) kInvalid(rv reflect.Value) { + f.ee.encodeNil() +} + +func (f *encFnInfo) kErr(rv reflect.Value) { + encErr("Unsupported kind: %s, for: %#v", rv.Kind(), rv) +} + +func (f *encFnInfo) kSlice(rv reflect.Value) { + if rv.IsNil() { + f.ee.encodeNil() + return + } + + if shortCircuitReflectToFastPath { + switch f.ti.rtid { + case intfSliceTypId: + f.e.encSliceIntf(rv.Interface().([]interface{})) + return + case strSliceTypId: + f.e.encSliceStr(rv.Interface().([]string)) + return + case uint64SliceTypId: + f.e.encSliceUint64(rv.Interface().([]uint64)) + return + case int64SliceTypId: + f.e.encSliceInt64(rv.Interface().([]int64)) + return + } + } + + // If in this method, then there was no extension function defined. + // So it's okay to treat as []byte. + if f.ti.rtid == uint8SliceTypId || f.ti.rt.Elem().Kind() == reflect.Uint8 { + f.ee.encodeStringBytes(c_RAW, rv.Bytes()) + return + } + + l := rv.Len() + if f.ti.mbs { + if l%2 == 1 { + encErr("mapBySlice: invalid length (must be divisible by 2): %v", l) + } + f.ee.encodeMapPreamble(l / 2) + } else { + f.ee.encodeArrayPreamble(l) + } + if l == 0 { + return + } + for j := 0; j < l; j++ { + // TODO: Consider perf implication of encoding odd index values as symbols if type is string + f.e.encodeValue(rv.Index(j)) + } +} + +func (f *encFnInfo) kArray(rv reflect.Value) { + // We cannot share kSlice method, because the array may be non-addressable. + // E.g. type struct S{B [2]byte}; Encode(S{}) will bomb on "panic: slice of unaddressable array". + // So we have to duplicate the functionality here. + // f.e.encodeValue(rv.Slice(0, rv.Len())) + // f.kSlice(rv.Slice(0, rv.Len())) + + l := rv.Len() + // Handle an array of bytes specially (in line with what is done for slices) + if f.ti.rt.Elem().Kind() == reflect.Uint8 { + if l == 0 { + f.ee.encodeStringBytes(c_RAW, nil) + return + } + var bs []byte + if rv.CanAddr() { + bs = rv.Slice(0, l).Bytes() + } else { + bs = make([]byte, l) + for i := 0; i < l; i++ { + bs[i] = byte(rv.Index(i).Uint()) + } + } + f.ee.encodeStringBytes(c_RAW, bs) + return + } + + if f.ti.mbs { + if l%2 == 1 { + encErr("mapBySlice: invalid length (must be divisible by 2): %v", l) + } + f.ee.encodeMapPreamble(l / 2) + } else { + f.ee.encodeArrayPreamble(l) + } + if l == 0 { + return + } + for j := 0; j < l; j++ { + // TODO: Consider perf implication of encoding odd index values as symbols if type is string + f.e.encodeValue(rv.Index(j)) + } +} + +func (f *encFnInfo) kStruct(rv reflect.Value) { + fti := f.ti + newlen := len(fti.sfi) + rvals := make([]reflect.Value, newlen) + var encnames []string + e := f.e + tisfi := fti.sfip + toMap := !(fti.toArray || e.h.StructToArray) + // if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct) + if toMap { + tisfi = fti.sfi + encnames = make([]string, newlen) + } + newlen = 0 + for _, si := range tisfi { + if si.i != -1 { + rvals[newlen] = rv.Field(int(si.i)) + } else { + rvals[newlen] = rv.FieldByIndex(si.is) + } + if toMap { + if si.omitEmpty && isEmptyValue(rvals[newlen]) { + continue + } + encnames[newlen] = si.encName + } else { + if si.omitEmpty && isEmptyValue(rvals[newlen]) { + rvals[newlen] = reflect.Value{} //encode as nil + } + } + newlen++ + } + + // debugf(">>>> kStruct: newlen: %v", newlen) + if toMap { + ee := f.ee //don't dereference everytime + ee.encodeMapPreamble(newlen) + // asSymbols := e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 + asSymbols := e.h.AsSymbols == AsSymbolDefault || e.h.AsSymbols&AsSymbolStructFieldNameFlag != 0 + for j := 0; j < newlen; j++ { + if asSymbols { + ee.encodeSymbol(encnames[j]) + } else { + ee.encodeString(c_UTF8, encnames[j]) + } + e.encodeValue(rvals[j]) + } + } else { + f.ee.encodeArrayPreamble(newlen) + for j := 0; j < newlen; j++ { + e.encodeValue(rvals[j]) + } + } +} + +// func (f *encFnInfo) kPtr(rv reflect.Value) { +// debugf(">>>>>>> ??? encode kPtr called - shouldn't get called") +// if rv.IsNil() { +// f.ee.encodeNil() +// return +// } +// f.e.encodeValue(rv.Elem()) +// } + +func (f *encFnInfo) kInterface(rv reflect.Value) { + if rv.IsNil() { + f.ee.encodeNil() + return + } + f.e.encodeValue(rv.Elem()) +} + +func (f *encFnInfo) kMap(rv reflect.Value) { + if rv.IsNil() { + f.ee.encodeNil() + return + } + + if shortCircuitReflectToFastPath { + switch f.ti.rtid { + case mapIntfIntfTypId: + f.e.encMapIntfIntf(rv.Interface().(map[interface{}]interface{})) + return + case mapStrIntfTypId: + f.e.encMapStrIntf(rv.Interface().(map[string]interface{})) + return + case mapStrStrTypId: + f.e.encMapStrStr(rv.Interface().(map[string]string)) + return + case mapInt64IntfTypId: + f.e.encMapInt64Intf(rv.Interface().(map[int64]interface{})) + return + case mapUint64IntfTypId: + f.e.encMapUint64Intf(rv.Interface().(map[uint64]interface{})) + return + } + } + + l := rv.Len() + f.ee.encodeMapPreamble(l) + if l == 0 { + return + } + // keyTypeIsString := f.ti.rt.Key().Kind() == reflect.String + keyTypeIsString := f.ti.rt.Key() == stringTyp + var asSymbols bool + if keyTypeIsString { + asSymbols = f.e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + } + mks := rv.MapKeys() + // for j, lmks := 0, len(mks); j < lmks; j++ { + for j := range mks { + if keyTypeIsString { + if asSymbols { + f.ee.encodeSymbol(mks[j].String()) + } else { + f.ee.encodeString(c_UTF8, mks[j].String()) + } + } else { + f.e.encodeValue(mks[j]) + } + f.e.encodeValue(rv.MapIndex(mks[j])) + } + +} + +// -------------------------------------------------- + +// encFn encapsulates the captured variables and the encode function. +// This way, we only do some calculations one times, and pass to the +// code block that should be called (encapsulated in a function) +// instead of executing the checks every time. +type encFn struct { + i *encFnInfo + f func(*encFnInfo, reflect.Value) +} + +// -------------------------------------------------- + +// An Encoder writes an object to an output stream in the codec format. +type Encoder struct { + w encWriter + e encDriver + h *BasicHandle + hh Handle + f map[uintptr]encFn + x []uintptr + s []encFn +} + +// NewEncoder returns an Encoder for encoding into an io.Writer. +// +// For efficiency, Users are encouraged to pass in a memory buffered writer +// (eg bufio.Writer, bytes.Buffer). +func NewEncoder(w io.Writer, h Handle) *Encoder { + ww, ok := w.(ioEncWriterWriter) + if !ok { + sww := simpleIoEncWriterWriter{w: w} + sww.bw, _ = w.(io.ByteWriter) + sww.sw, _ = w.(ioEncStringWriter) + ww = &sww + //ww = bufio.NewWriterSize(w, defEncByteBufSize) + } + z := ioEncWriter{ + w: ww, + } + return &Encoder{w: &z, hh: h, h: h.getBasicHandle(), e: h.newEncDriver(&z)} +} + +// NewEncoderBytes returns an encoder for encoding directly and efficiently +// into a byte slice, using zero-copying to temporary slices. +// +// It will potentially replace the output byte slice pointed to. +// After encoding, the out parameter contains the encoded contents. +func NewEncoderBytes(out *[]byte, h Handle) *Encoder { + in := *out + if in == nil { + in = make([]byte, defEncByteBufSize) + } + z := bytesEncWriter{ + b: in, + out: out, + } + return &Encoder{w: &z, hh: h, h: h.getBasicHandle(), e: h.newEncDriver(&z)} +} + +// Encode writes an object into a stream in the codec format. +// +// Encoding can be configured via the "codec" struct tag for the fields. +// +// The "codec" key in struct field's tag value is the key name, +// followed by an optional comma and options. +// +// To set an option on all fields (e.g. omitempty on all fields), you +// can create a field called _struct, and set flags on it. +// +// Struct values "usually" encode as maps. Each exported struct field is encoded unless: +// - the field's codec tag is "-", OR +// - the field is empty and its codec tag specifies the "omitempty" option. +// +// When encoding as a map, the first string in the tag (before the comma) +// is the map key string to use when encoding. +// +// However, struct values may encode as arrays. This happens when: +// - StructToArray Encode option is set, OR +// - the codec tag on the _struct field sets the "toarray" option +// +// Values with types that implement MapBySlice are encoded as stream maps. +// +// The empty values (for omitempty option) are false, 0, any nil pointer +// or interface value, and any array, slice, map, or string of length zero. +// +// Anonymous fields are encoded inline if no struct tag is present. +// Else they are encoded as regular fields. +// +// Examples: +// +// type MyStruct struct { +// _struct bool `codec:",omitempty"` //set omitempty for every field +// Field1 string `codec:"-"` //skip this field +// Field2 int `codec:"myName"` //Use key "myName" in encode stream +// Field3 int32 `codec:",omitempty"` //use key "Field3". Omit if empty. +// Field4 bool `codec:"f4,omitempty"` //use key "f4". Omit if empty. +// ... +// } +// +// type MyStruct struct { +// _struct bool `codec:",omitempty,toarray"` //set omitempty for every field +// //and encode struct as an array +// } +// +// The mode of encoding is based on the type of the value. When a value is seen: +// - If an extension is registered for it, call that extension function +// - If it implements BinaryMarshaler, call its MarshalBinary() (data []byte, err error) +// - Else encode it based on its reflect.Kind +// +// Note that struct field names and keys in map[string]XXX will be treated as symbols. +// Some formats support symbols (e.g. binc) and will properly encode the string +// only once in the stream, and use a tag to refer to it thereafter. +func (e *Encoder) Encode(v interface{}) (err error) { + defer panicToErr(&err) + e.encode(v) + e.w.atEndOfEncode() + return +} + +func (e *Encoder) encode(iv interface{}) { + switch v := iv.(type) { + case nil: + e.e.encodeNil() + + case reflect.Value: + e.encodeValue(v) + + case string: + e.e.encodeString(c_UTF8, v) + case bool: + e.e.encodeBool(v) + case int: + e.e.encodeInt(int64(v)) + case int8: + e.e.encodeInt(int64(v)) + case int16: + e.e.encodeInt(int64(v)) + case int32: + e.e.encodeInt(int64(v)) + case int64: + e.e.encodeInt(v) + case uint: + e.e.encodeUint(uint64(v)) + case uint8: + e.e.encodeUint(uint64(v)) + case uint16: + e.e.encodeUint(uint64(v)) + case uint32: + e.e.encodeUint(uint64(v)) + case uint64: + e.e.encodeUint(v) + case float32: + e.e.encodeFloat32(v) + case float64: + e.e.encodeFloat64(v) + + case []interface{}: + e.encSliceIntf(v) + case []string: + e.encSliceStr(v) + case []int64: + e.encSliceInt64(v) + case []uint64: + e.encSliceUint64(v) + case []uint8: + e.e.encodeStringBytes(c_RAW, v) + + case map[interface{}]interface{}: + e.encMapIntfIntf(v) + case map[string]interface{}: + e.encMapStrIntf(v) + case map[string]string: + e.encMapStrStr(v) + case map[int64]interface{}: + e.encMapInt64Intf(v) + case map[uint64]interface{}: + e.encMapUint64Intf(v) + + case *string: + e.e.encodeString(c_UTF8, *v) + case *bool: + e.e.encodeBool(*v) + case *int: + e.e.encodeInt(int64(*v)) + case *int8: + e.e.encodeInt(int64(*v)) + case *int16: + e.e.encodeInt(int64(*v)) + case *int32: + e.e.encodeInt(int64(*v)) + case *int64: + e.e.encodeInt(*v) + case *uint: + e.e.encodeUint(uint64(*v)) + case *uint8: + e.e.encodeUint(uint64(*v)) + case *uint16: + e.e.encodeUint(uint64(*v)) + case *uint32: + e.e.encodeUint(uint64(*v)) + case *uint64: + e.e.encodeUint(*v) + case *float32: + e.e.encodeFloat32(*v) + case *float64: + e.e.encodeFloat64(*v) + + case *[]interface{}: + e.encSliceIntf(*v) + case *[]string: + e.encSliceStr(*v) + case *[]int64: + e.encSliceInt64(*v) + case *[]uint64: + e.encSliceUint64(*v) + case *[]uint8: + e.e.encodeStringBytes(c_RAW, *v) + + case *map[interface{}]interface{}: + e.encMapIntfIntf(*v) + case *map[string]interface{}: + e.encMapStrIntf(*v) + case *map[string]string: + e.encMapStrStr(*v) + case *map[int64]interface{}: + e.encMapInt64Intf(*v) + case *map[uint64]interface{}: + e.encMapUint64Intf(*v) + + default: + e.encodeValue(reflect.ValueOf(iv)) + } +} + +func (e *Encoder) encodeValue(rv reflect.Value) { + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + e.e.encodeNil() + return + } + rv = rv.Elem() + } + + rt := rv.Type() + rtid := reflect.ValueOf(rt).Pointer() + + // if e.f == nil && e.s == nil { debugf("---->Creating new enc f map for type: %v\n", rt) } + var fn encFn + var ok bool + if useMapForCodecCache { + fn, ok = e.f[rtid] + } else { + for i, v := range e.x { + if v == rtid { + fn, ok = e.s[i], true + break + } + } + } + if !ok { + // debugf("\tCreating new enc fn for type: %v\n", rt) + fi := encFnInfo{ti: getTypeInfo(rtid, rt), e: e, ee: e.e} + fn.i = &fi + if rtid == rawExtTypId { + fn.f = (*encFnInfo).rawExt + } else if e.e.isBuiltinType(rtid) { + fn.f = (*encFnInfo).builtin + } else if xfTag, xfFn := e.h.getEncodeExt(rtid); xfFn != nil { + fi.xfTag, fi.xfFn = xfTag, xfFn + fn.f = (*encFnInfo).ext + } else if supportBinaryMarshal && fi.ti.m { + fn.f = (*encFnInfo).binaryMarshal + } else { + switch rk := rt.Kind(); rk { + case reflect.Bool: + fn.f = (*encFnInfo).kBool + case reflect.String: + fn.f = (*encFnInfo).kString + case reflect.Float64: + fn.f = (*encFnInfo).kFloat64 + case reflect.Float32: + fn.f = (*encFnInfo).kFloat32 + case reflect.Int, reflect.Int8, reflect.Int64, reflect.Int32, reflect.Int16: + fn.f = (*encFnInfo).kInt + case reflect.Uint8, reflect.Uint64, reflect.Uint, reflect.Uint32, reflect.Uint16: + fn.f = (*encFnInfo).kUint + case reflect.Invalid: + fn.f = (*encFnInfo).kInvalid + case reflect.Slice: + fn.f = (*encFnInfo).kSlice + case reflect.Array: + fn.f = (*encFnInfo).kArray + case reflect.Struct: + fn.f = (*encFnInfo).kStruct + // case reflect.Ptr: + // fn.f = (*encFnInfo).kPtr + case reflect.Interface: + fn.f = (*encFnInfo).kInterface + case reflect.Map: + fn.f = (*encFnInfo).kMap + default: + fn.f = (*encFnInfo).kErr + } + } + if useMapForCodecCache { + if e.f == nil { + e.f = make(map[uintptr]encFn, 16) + } + e.f[rtid] = fn + } else { + e.s = append(e.s, fn) + e.x = append(e.x, rtid) + } + } + + fn.f(fn.i, rv) + +} + +func (e *Encoder) encRawExt(re RawExt) { + if re.Data == nil { + e.e.encodeNil() + return + } + if e.hh.writeExt() { + e.e.encodeExtPreamble(re.Tag, len(re.Data)) + e.w.writeb(re.Data) + } else { + e.e.encodeStringBytes(c_RAW, re.Data) + } +} + +// --------------------------------------------- +// short circuit functions for common maps and slices + +func (e *Encoder) encSliceIntf(v []interface{}) { + e.e.encodeArrayPreamble(len(v)) + for _, v2 := range v { + e.encode(v2) + } +} + +func (e *Encoder) encSliceStr(v []string) { + e.e.encodeArrayPreamble(len(v)) + for _, v2 := range v { + e.e.encodeString(c_UTF8, v2) + } +} + +func (e *Encoder) encSliceInt64(v []int64) { + e.e.encodeArrayPreamble(len(v)) + for _, v2 := range v { + e.e.encodeInt(v2) + } +} + +func (e *Encoder) encSliceUint64(v []uint64) { + e.e.encodeArrayPreamble(len(v)) + for _, v2 := range v { + e.e.encodeUint(v2) + } +} + +func (e *Encoder) encMapStrStr(v map[string]string) { + e.e.encodeMapPreamble(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + for k2, v2 := range v { + if asSymbols { + e.e.encodeSymbol(k2) + } else { + e.e.encodeString(c_UTF8, k2) + } + e.e.encodeString(c_UTF8, v2) + } +} + +func (e *Encoder) encMapStrIntf(v map[string]interface{}) { + e.e.encodeMapPreamble(len(v)) + asSymbols := e.h.AsSymbols&AsSymbolMapStringKeysFlag != 0 + for k2, v2 := range v { + if asSymbols { + e.e.encodeSymbol(k2) + } else { + e.e.encodeString(c_UTF8, k2) + } + e.encode(v2) + } +} + +func (e *Encoder) encMapInt64Intf(v map[int64]interface{}) { + e.e.encodeMapPreamble(len(v)) + for k2, v2 := range v { + e.e.encodeInt(k2) + e.encode(v2) + } +} + +func (e *Encoder) encMapUint64Intf(v map[uint64]interface{}) { + e.e.encodeMapPreamble(len(v)) + for k2, v2 := range v { + e.e.encodeUint(uint64(k2)) + e.encode(v2) + } +} + +func (e *Encoder) encMapIntfIntf(v map[interface{}]interface{}) { + e.e.encodeMapPreamble(len(v)) + for k2, v2 := range v { + e.encode(k2) + e.encode(v2) + } +} + +// ---------------------------------------- + +func encErr(format string, params ...interface{}) { + doPanic(msgTagEnc, format, params...) +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/helper.go b/vendor/github.com/hashicorp/go-msgpack/codec/helper.go new file mode 100644 index 0000000000..e6dc0563f0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/helper.go @@ -0,0 +1,589 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +// Contains code shared by both encode and decode. + +import ( + "encoding/binary" + "fmt" + "math" + "reflect" + "sort" + "strings" + "sync" + "time" + "unicode" + "unicode/utf8" +) + +const ( + structTagName = "codec" + + // Support + // encoding.BinaryMarshaler: MarshalBinary() (data []byte, err error) + // encoding.BinaryUnmarshaler: UnmarshalBinary(data []byte) error + // This constant flag will enable or disable it. + supportBinaryMarshal = true + + // Each Encoder or Decoder uses a cache of functions based on conditionals, + // so that the conditionals are not run every time. + // + // Either a map or a slice is used to keep track of the functions. + // The map is more natural, but has a higher cost than a slice/array. + // This flag (useMapForCodecCache) controls which is used. + useMapForCodecCache = false + + // For some common container types, we can short-circuit an elaborate + // reflection dance and call encode/decode directly. + // The currently supported types are: + // - slices of strings, or id's (int64,uint64) or interfaces. + // - maps of str->str, str->intf, id(int64,uint64)->intf, intf->intf + shortCircuitReflectToFastPath = true + + // for debugging, set this to false, to catch panic traces. + // Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic. + recoverPanicToErr = true +) + +type charEncoding uint8 + +const ( + c_RAW charEncoding = iota + c_UTF8 + c_UTF16LE + c_UTF16BE + c_UTF32LE + c_UTF32BE +) + +// valueType is the stream type +type valueType uint8 + +const ( + valueTypeUnset valueType = iota + valueTypeNil + valueTypeInt + valueTypeUint + valueTypeFloat + valueTypeBool + valueTypeString + valueTypeSymbol + valueTypeBytes + valueTypeMap + valueTypeArray + valueTypeTimestamp + valueTypeExt + + valueTypeInvalid = 0xff +) + +var ( + bigen = binary.BigEndian + structInfoFieldName = "_struct" + + cachedTypeInfo = make(map[uintptr]*typeInfo, 4) + cachedTypeInfoMutex sync.RWMutex + + intfSliceTyp = reflect.TypeOf([]interface{}(nil)) + intfTyp = intfSliceTyp.Elem() + + strSliceTyp = reflect.TypeOf([]string(nil)) + boolSliceTyp = reflect.TypeOf([]bool(nil)) + uintSliceTyp = reflect.TypeOf([]uint(nil)) + uint8SliceTyp = reflect.TypeOf([]uint8(nil)) + uint16SliceTyp = reflect.TypeOf([]uint16(nil)) + uint32SliceTyp = reflect.TypeOf([]uint32(nil)) + uint64SliceTyp = reflect.TypeOf([]uint64(nil)) + intSliceTyp = reflect.TypeOf([]int(nil)) + int8SliceTyp = reflect.TypeOf([]int8(nil)) + int16SliceTyp = reflect.TypeOf([]int16(nil)) + int32SliceTyp = reflect.TypeOf([]int32(nil)) + int64SliceTyp = reflect.TypeOf([]int64(nil)) + float32SliceTyp = reflect.TypeOf([]float32(nil)) + float64SliceTyp = reflect.TypeOf([]float64(nil)) + + mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil)) + mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil)) + mapStrStrTyp = reflect.TypeOf(map[string]string(nil)) + + mapIntIntfTyp = reflect.TypeOf(map[int]interface{}(nil)) + mapInt64IntfTyp = reflect.TypeOf(map[int64]interface{}(nil)) + mapUintIntfTyp = reflect.TypeOf(map[uint]interface{}(nil)) + mapUint64IntfTyp = reflect.TypeOf(map[uint64]interface{}(nil)) + + stringTyp = reflect.TypeOf("") + timeTyp = reflect.TypeOf(time.Time{}) + rawExtTyp = reflect.TypeOf(RawExt{}) + + mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem() + binaryMarshalerTyp = reflect.TypeOf((*binaryMarshaler)(nil)).Elem() + binaryUnmarshalerTyp = reflect.TypeOf((*binaryUnmarshaler)(nil)).Elem() + + rawExtTypId = reflect.ValueOf(rawExtTyp).Pointer() + intfTypId = reflect.ValueOf(intfTyp).Pointer() + timeTypId = reflect.ValueOf(timeTyp).Pointer() + + intfSliceTypId = reflect.ValueOf(intfSliceTyp).Pointer() + strSliceTypId = reflect.ValueOf(strSliceTyp).Pointer() + + boolSliceTypId = reflect.ValueOf(boolSliceTyp).Pointer() + uintSliceTypId = reflect.ValueOf(uintSliceTyp).Pointer() + uint8SliceTypId = reflect.ValueOf(uint8SliceTyp).Pointer() + uint16SliceTypId = reflect.ValueOf(uint16SliceTyp).Pointer() + uint32SliceTypId = reflect.ValueOf(uint32SliceTyp).Pointer() + uint64SliceTypId = reflect.ValueOf(uint64SliceTyp).Pointer() + intSliceTypId = reflect.ValueOf(intSliceTyp).Pointer() + int8SliceTypId = reflect.ValueOf(int8SliceTyp).Pointer() + int16SliceTypId = reflect.ValueOf(int16SliceTyp).Pointer() + int32SliceTypId = reflect.ValueOf(int32SliceTyp).Pointer() + int64SliceTypId = reflect.ValueOf(int64SliceTyp).Pointer() + float32SliceTypId = reflect.ValueOf(float32SliceTyp).Pointer() + float64SliceTypId = reflect.ValueOf(float64SliceTyp).Pointer() + + mapStrStrTypId = reflect.ValueOf(mapStrStrTyp).Pointer() + mapIntfIntfTypId = reflect.ValueOf(mapIntfIntfTyp).Pointer() + mapStrIntfTypId = reflect.ValueOf(mapStrIntfTyp).Pointer() + mapIntIntfTypId = reflect.ValueOf(mapIntIntfTyp).Pointer() + mapInt64IntfTypId = reflect.ValueOf(mapInt64IntfTyp).Pointer() + mapUintIntfTypId = reflect.ValueOf(mapUintIntfTyp).Pointer() + mapUint64IntfTypId = reflect.ValueOf(mapUint64IntfTyp).Pointer() + // Id = reflect.ValueOf().Pointer() + // mapBySliceTypId = reflect.ValueOf(mapBySliceTyp).Pointer() + + binaryMarshalerTypId = reflect.ValueOf(binaryMarshalerTyp).Pointer() + binaryUnmarshalerTypId = reflect.ValueOf(binaryUnmarshalerTyp).Pointer() + + intBitsize uint8 = uint8(reflect.TypeOf(int(0)).Bits()) + uintBitsize uint8 = uint8(reflect.TypeOf(uint(0)).Bits()) + + bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0} + bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} +) + +type binaryUnmarshaler interface { + UnmarshalBinary(data []byte) error +} + +type binaryMarshaler interface { + MarshalBinary() (data []byte, err error) +} + +// MapBySlice represents a slice which should be encoded as a map in the stream. +// The slice contains a sequence of key-value pairs. +type MapBySlice interface { + MapBySlice() +} + +// WARNING: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED. +// +// BasicHandle encapsulates the common options and extension functions. +type BasicHandle struct { + extHandle + EncodeOptions + DecodeOptions +} + +// Handle is the interface for a specific encoding format. +// +// Typically, a Handle is pre-configured before first time use, +// and not modified while in use. Such a pre-configured Handle +// is safe for concurrent access. +type Handle interface { + writeExt() bool + getBasicHandle() *BasicHandle + newEncDriver(w encWriter) encDriver + newDecDriver(r decReader) decDriver +} + +// RawExt represents raw unprocessed extension data. +type RawExt struct { + Tag byte + Data []byte +} + +type extTypeTagFn struct { + rtid uintptr + rt reflect.Type + tag byte + encFn func(reflect.Value) ([]byte, error) + decFn func(reflect.Value, []byte) error +} + +type extHandle []*extTypeTagFn + +// AddExt registers an encode and decode function for a reflect.Type. +// Note that the type must be a named type, and specifically not +// a pointer or Interface. An error is returned if that is not honored. +// +// To Deregister an ext, call AddExt with 0 tag, nil encfn and nil decfn. +func (o *extHandle) AddExt( + rt reflect.Type, + tag byte, + encfn func(reflect.Value) ([]byte, error), + decfn func(reflect.Value, []byte) error, +) (err error) { + // o is a pointer, because we may need to initialize it + if rt.PkgPath() == "" || rt.Kind() == reflect.Interface { + err = fmt.Errorf("codec.Handle.AddExt: Takes named type, especially not a pointer or interface: %T", + reflect.Zero(rt).Interface()) + return + } + + // o cannot be nil, since it is always embedded in a Handle. + // if nil, let it panic. + // if o == nil { + // err = errors.New("codec.Handle.AddExt: extHandle cannot be a nil pointer.") + // return + // } + + rtid := reflect.ValueOf(rt).Pointer() + for _, v := range *o { + if v.rtid == rtid { + v.tag, v.encFn, v.decFn = tag, encfn, decfn + return + } + } + + *o = append(*o, &extTypeTagFn{rtid, rt, tag, encfn, decfn}) + return +} + +func (o extHandle) getExt(rtid uintptr) *extTypeTagFn { + for _, v := range o { + if v.rtid == rtid { + return v + } + } + return nil +} + +func (o extHandle) getExtForTag(tag byte) *extTypeTagFn { + for _, v := range o { + if v.tag == tag { + return v + } + } + return nil +} + +func (o extHandle) getDecodeExtForTag(tag byte) ( + rv reflect.Value, fn func(reflect.Value, []byte) error) { + if x := o.getExtForTag(tag); x != nil { + // ext is only registered for base + rv = reflect.New(x.rt).Elem() + fn = x.decFn + } + return +} + +func (o extHandle) getDecodeExt(rtid uintptr) (tag byte, fn func(reflect.Value, []byte) error) { + if x := o.getExt(rtid); x != nil { + tag = x.tag + fn = x.decFn + } + return +} + +func (o extHandle) getEncodeExt(rtid uintptr) (tag byte, fn func(reflect.Value) ([]byte, error)) { + if x := o.getExt(rtid); x != nil { + tag = x.tag + fn = x.encFn + } + return +} + +type structFieldInfo struct { + encName string // encode name + + // only one of 'i' or 'is' can be set. If 'i' is -1, then 'is' has been set. + + is []int // (recursive/embedded) field index in struct + i int16 // field index in struct + omitEmpty bool + toArray bool // if field is _struct, is the toArray set? + + // tag string // tag + // name string // field name + // encNameBs []byte // encoded name as byte stream + // ikind int // kind of the field as an int i.e. int(reflect.Kind) +} + +func parseStructFieldInfo(fname string, stag string) *structFieldInfo { + if fname == "" { + panic("parseStructFieldInfo: No Field Name") + } + si := structFieldInfo{ + // name: fname, + encName: fname, + // tag: stag, + } + + if stag != "" { + for i, s := range strings.Split(stag, ",") { + if i == 0 { + if s != "" { + si.encName = s + } + } else { + switch s { + case "omitempty": + si.omitEmpty = true + case "toarray": + si.toArray = true + } + } + } + } + // si.encNameBs = []byte(si.encName) + return &si +} + +type sfiSortedByEncName []*structFieldInfo + +func (p sfiSortedByEncName) Len() int { + return len(p) +} + +func (p sfiSortedByEncName) Less(i, j int) bool { + return p[i].encName < p[j].encName +} + +func (p sfiSortedByEncName) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} + +// typeInfo keeps information about each type referenced in the encode/decode sequence. +// +// During an encode/decode sequence, we work as below: +// - If base is a built in type, en/decode base value +// - If base is registered as an extension, en/decode base value +// - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method +// - Else decode appropriately based on the reflect.Kind +type typeInfo struct { + sfi []*structFieldInfo // sorted. Used when enc/dec struct to map. + sfip []*structFieldInfo // unsorted. Used when enc/dec struct to array. + + rt reflect.Type + rtid uintptr + + // baseId gives pointer to the base reflect.Type, after deferencing + // the pointers. E.g. base type of ***time.Time is time.Time. + base reflect.Type + baseId uintptr + baseIndir int8 // number of indirections to get to base + + mbs bool // base type (T or *T) is a MapBySlice + + m bool // base type (T or *T) is a binaryMarshaler + unm bool // base type (T or *T) is a binaryUnmarshaler + mIndir int8 // number of indirections to get to binaryMarshaler type + unmIndir int8 // number of indirections to get to binaryUnmarshaler type + toArray bool // whether this (struct) type should be encoded as an array +} + +func (ti *typeInfo) indexForEncName(name string) int { + //tisfi := ti.sfi + const binarySearchThreshold = 16 + if sfilen := len(ti.sfi); sfilen < binarySearchThreshold { + // linear search. faster than binary search in my testing up to 16-field structs. + for i, si := range ti.sfi { + if si.encName == name { + return i + } + } + } else { + // binary search. adapted from sort/search.go. + h, i, j := 0, 0, sfilen + for i < j { + h = i + (j-i)/2 + if ti.sfi[h].encName < name { + i = h + 1 + } else { + j = h + } + } + if i < sfilen && ti.sfi[i].encName == name { + return i + } + } + return -1 +} + +func getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) { + var ok bool + cachedTypeInfoMutex.RLock() + pti, ok = cachedTypeInfo[rtid] + cachedTypeInfoMutex.RUnlock() + if ok { + return + } + + cachedTypeInfoMutex.Lock() + defer cachedTypeInfoMutex.Unlock() + if pti, ok = cachedTypeInfo[rtid]; ok { + return + } + + ti := typeInfo{rt: rt, rtid: rtid} + pti = &ti + + var indir int8 + if ok, indir = implementsIntf(rt, binaryMarshalerTyp); ok { + ti.m, ti.mIndir = true, indir + } + if ok, indir = implementsIntf(rt, binaryUnmarshalerTyp); ok { + ti.unm, ti.unmIndir = true, indir + } + if ok, _ = implementsIntf(rt, mapBySliceTyp); ok { + ti.mbs = true + } + + pt := rt + var ptIndir int8 + // for ; pt.Kind() == reflect.Ptr; pt, ptIndir = pt.Elem(), ptIndir+1 { } + for pt.Kind() == reflect.Ptr { + pt = pt.Elem() + ptIndir++ + } + if ptIndir == 0 { + ti.base = rt + ti.baseId = rtid + } else { + ti.base = pt + ti.baseId = reflect.ValueOf(pt).Pointer() + ti.baseIndir = ptIndir + } + + if rt.Kind() == reflect.Struct { + var siInfo *structFieldInfo + if f, ok := rt.FieldByName(structInfoFieldName); ok { + siInfo = parseStructFieldInfo(structInfoFieldName, f.Tag.Get(structTagName)) + ti.toArray = siInfo.toArray + } + sfip := make([]*structFieldInfo, 0, rt.NumField()) + rgetTypeInfo(rt, nil, make(map[string]bool), &sfip, siInfo) + + // // try to put all si close together + // const tryToPutAllStructFieldInfoTogether = true + // if tryToPutAllStructFieldInfoTogether { + // sfip2 := make([]structFieldInfo, len(sfip)) + // for i, si := range sfip { + // sfip2[i] = *si + // } + // for i := range sfip { + // sfip[i] = &sfip2[i] + // } + // } + + ti.sfip = make([]*structFieldInfo, len(sfip)) + ti.sfi = make([]*structFieldInfo, len(sfip)) + copy(ti.sfip, sfip) + sort.Sort(sfiSortedByEncName(sfip)) + copy(ti.sfi, sfip) + } + // sfi = sfip + cachedTypeInfo[rtid] = pti + return +} + +func rgetTypeInfo(rt reflect.Type, indexstack []int, fnameToHastag map[string]bool, + sfi *[]*structFieldInfo, siInfo *structFieldInfo, +) { + // for rt.Kind() == reflect.Ptr { + // // indexstack = append(indexstack, 0) + // rt = rt.Elem() + // } + for j := 0; j < rt.NumField(); j++ { + f := rt.Field(j) + stag := f.Tag.Get(structTagName) + if stag == "-" { + continue + } + if r1, _ := utf8.DecodeRuneInString(f.Name); r1 == utf8.RuneError || !unicode.IsUpper(r1) { + continue + } + // if anonymous and there is no struct tag and its a struct (or pointer to struct), inline it. + if f.Anonymous && stag == "" { + ft := f.Type + for ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + if ft.Kind() == reflect.Struct { + indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) + rgetTypeInfo(ft, indexstack2, fnameToHastag, sfi, siInfo) + continue + } + } + // do not let fields with same name in embedded structs override field at higher level. + // this must be done after anonymous check, to allow anonymous field + // still include their child fields + if _, ok := fnameToHastag[f.Name]; ok { + continue + } + si := parseStructFieldInfo(f.Name, stag) + // si.ikind = int(f.Type.Kind()) + if len(indexstack) == 0 { + si.i = int16(j) + } else { + si.i = -1 + si.is = append(append(make([]int, 0, len(indexstack)+4), indexstack...), j) + } + + if siInfo != nil { + if siInfo.omitEmpty { + si.omitEmpty = true + } + } + *sfi = append(*sfi, si) + fnameToHastag[f.Name] = stag != "" + } +} + +func panicToErr(err *error) { + if recoverPanicToErr { + if x := recover(); x != nil { + //debug.PrintStack() + panicValToErr(x, err) + } + } +} + +func doPanic(tag string, format string, params ...interface{}) { + params2 := make([]interface{}, len(params)+1) + params2[0] = tag + copy(params2[1:], params) + panic(fmt.Errorf("%s: "+format, params2...)) +} + +func checkOverflowFloat32(f float64, doCheck bool) { + if !doCheck { + return + } + // check overflow (logic adapted from std pkg reflect/value.go OverflowFloat() + f2 := f + if f2 < 0 { + f2 = -f + } + if math.MaxFloat32 < f2 && f2 <= math.MaxFloat64 { + decErr("Overflow float32 value: %v", f2) + } +} + +func checkOverflow(ui uint64, i int64, bitsize uint8) { + // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() + if bitsize == 0 { + return + } + if i != 0 { + if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc { + decErr("Overflow int value: %v", i) + } + } + if ui != 0 { + if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc { + decErr("Overflow uint value: %v", ui) + } + } +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/helper_internal.go b/vendor/github.com/hashicorp/go-msgpack/codec/helper_internal.go new file mode 100644 index 0000000000..58417da958 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/helper_internal.go @@ -0,0 +1,127 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +// All non-std package dependencies live in this file, +// so porting to different environment is easy (just update functions). + +import ( + "errors" + "fmt" + "math" + "reflect" +) + +var ( + raisePanicAfterRecover = false + debugging = true +) + +func panicValToErr(panicVal interface{}, err *error) { + switch xerr := panicVal.(type) { + case error: + *err = xerr + case string: + *err = errors.New(xerr) + default: + *err = fmt.Errorf("%v", panicVal) + } + if raisePanicAfterRecover { + panic(panicVal) + } + return +} + +func isEmptyValueDeref(v reflect.Value, deref bool) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + if deref { + if v.IsNil() { + return true + } + return isEmptyValueDeref(v.Elem(), deref) + } else { + return v.IsNil() + } + case reflect.Struct: + // return true if all fields are empty. else return false. + + // we cannot use equality check, because some fields may be maps/slices/etc + // and consequently the structs are not comparable. + // return v.Interface() == reflect.Zero(v.Type()).Interface() + for i, n := 0, v.NumField(); i < n; i++ { + if !isEmptyValueDeref(v.Field(i), deref) { + return false + } + } + return true + } + return false +} + +func isEmptyValue(v reflect.Value) bool { + return isEmptyValueDeref(v, true) +} + +func debugf(format string, args ...interface{}) { + if debugging { + if len(format) == 0 || format[len(format)-1] != '\n' { + format = format + "\n" + } + fmt.Printf(format, args...) + } +} + +func pruneSignExt(v []byte, pos bool) (n int) { + if len(v) < 2 { + } else if pos && v[0] == 0 { + for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ { + } + } else if !pos && v[0] == 0xff { + for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ { + } + } + return +} + +func implementsIntf(typ, iTyp reflect.Type) (success bool, indir int8) { + if typ == nil { + return + } + rt := typ + // The type might be a pointer and we need to keep + // dereferencing to the base type until we find an implementation. + for { + if rt.Implements(iTyp) { + return true, indir + } + if p := rt; p.Kind() == reflect.Ptr { + indir++ + if indir >= math.MaxInt8 { // insane number of indirections + return false, 0 + } + rt = p.Elem() + continue + } + break + } + // No luck yet, but if this is a base type (non-pointer), the pointer might satisfy. + if typ.Kind() != reflect.Ptr { + // Not a pointer, but does the pointer work? + if reflect.PtrTo(typ).Implements(iTyp) { + return true, -1 + } + } + return false, 0 +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/msgpack.go b/vendor/github.com/hashicorp/go-msgpack/codec/msgpack.go new file mode 100644 index 0000000000..da0500d192 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/msgpack.go @@ -0,0 +1,816 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +/* +MSGPACK + +Msgpack-c implementation powers the c, c++, python, ruby, etc libraries. +We need to maintain compatibility with it and how it encodes integer values +without caring about the type. + +For compatibility with behaviour of msgpack-c reference implementation: + - Go intX (>0) and uintX + IS ENCODED AS + msgpack +ve fixnum, unsigned + - Go intX (<0) + IS ENCODED AS + msgpack -ve fixnum, signed + +*/ +package codec + +import ( + "fmt" + "io" + "math" + "net/rpc" +) + +const ( + mpPosFixNumMin byte = 0x00 + mpPosFixNumMax = 0x7f + mpFixMapMin = 0x80 + mpFixMapMax = 0x8f + mpFixArrayMin = 0x90 + mpFixArrayMax = 0x9f + mpFixStrMin = 0xa0 + mpFixStrMax = 0xbf + mpNil = 0xc0 + _ = 0xc1 + mpFalse = 0xc2 + mpTrue = 0xc3 + mpFloat = 0xca + mpDouble = 0xcb + mpUint8 = 0xcc + mpUint16 = 0xcd + mpUint32 = 0xce + mpUint64 = 0xcf + mpInt8 = 0xd0 + mpInt16 = 0xd1 + mpInt32 = 0xd2 + mpInt64 = 0xd3 + + // extensions below + mpBin8 = 0xc4 + mpBin16 = 0xc5 + mpBin32 = 0xc6 + mpExt8 = 0xc7 + mpExt16 = 0xc8 + mpExt32 = 0xc9 + mpFixExt1 = 0xd4 + mpFixExt2 = 0xd5 + mpFixExt4 = 0xd6 + mpFixExt8 = 0xd7 + mpFixExt16 = 0xd8 + + mpStr8 = 0xd9 // new + mpStr16 = 0xda + mpStr32 = 0xdb + + mpArray16 = 0xdc + mpArray32 = 0xdd + + mpMap16 = 0xde + mpMap32 = 0xdf + + mpNegFixNumMin = 0xe0 + mpNegFixNumMax = 0xff +) + +// MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec +// that the backend RPC service takes multiple arguments, which have been arranged +// in sequence in the slice. +// +// The Codec then passes it AS-IS to the rpc service (without wrapping it in an +// array of 1 element). +type MsgpackSpecRpcMultiArgs []interface{} + +// A MsgpackContainer type specifies the different types of msgpackContainers. +type msgpackContainerType struct { + fixCutoff int + bFixMin, b8, b16, b32 byte + hasFixMin, has8, has8Always bool +} + +var ( + msgpackContainerStr = msgpackContainerType{32, mpFixStrMin, mpStr8, mpStr16, mpStr32, true, true, false} + msgpackContainerBin = msgpackContainerType{0, 0, mpBin8, mpBin16, mpBin32, false, true, true} + msgpackContainerList = msgpackContainerType{16, mpFixArrayMin, 0, mpArray16, mpArray32, true, false, false} + msgpackContainerMap = msgpackContainerType{16, mpFixMapMin, 0, mpMap16, mpMap32, true, false, false} +) + +//--------------------------------------------- + +type msgpackEncDriver struct { + w encWriter + h *MsgpackHandle +} + +func (e *msgpackEncDriver) isBuiltinType(rt uintptr) bool { + //no builtin types. All encodings are based on kinds. Types supported as extensions. + return false +} + +func (e *msgpackEncDriver) encodeBuiltin(rt uintptr, v interface{}) {} + +func (e *msgpackEncDriver) encodeNil() { + e.w.writen1(mpNil) +} + +func (e *msgpackEncDriver) encodeInt(i int64) { + + switch { + case i >= 0: + e.encodeUint(uint64(i)) + case i >= -32: + e.w.writen1(byte(i)) + case i >= math.MinInt8: + e.w.writen2(mpInt8, byte(i)) + case i >= math.MinInt16: + e.w.writen1(mpInt16) + e.w.writeUint16(uint16(i)) + case i >= math.MinInt32: + e.w.writen1(mpInt32) + e.w.writeUint32(uint32(i)) + default: + e.w.writen1(mpInt64) + e.w.writeUint64(uint64(i)) + } +} + +func (e *msgpackEncDriver) encodeUint(i uint64) { + switch { + case i <= math.MaxInt8: + e.w.writen1(byte(i)) + case i <= math.MaxUint8: + e.w.writen2(mpUint8, byte(i)) + case i <= math.MaxUint16: + e.w.writen1(mpUint16) + e.w.writeUint16(uint16(i)) + case i <= math.MaxUint32: + e.w.writen1(mpUint32) + e.w.writeUint32(uint32(i)) + default: + e.w.writen1(mpUint64) + e.w.writeUint64(uint64(i)) + } +} + +func (e *msgpackEncDriver) encodeBool(b bool) { + if b { + e.w.writen1(mpTrue) + } else { + e.w.writen1(mpFalse) + } +} + +func (e *msgpackEncDriver) encodeFloat32(f float32) { + e.w.writen1(mpFloat) + e.w.writeUint32(math.Float32bits(f)) +} + +func (e *msgpackEncDriver) encodeFloat64(f float64) { + e.w.writen1(mpDouble) + e.w.writeUint64(math.Float64bits(f)) +} + +func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) { + switch { + case l == 1: + e.w.writen2(mpFixExt1, xtag) + case l == 2: + e.w.writen2(mpFixExt2, xtag) + case l == 4: + e.w.writen2(mpFixExt4, xtag) + case l == 8: + e.w.writen2(mpFixExt8, xtag) + case l == 16: + e.w.writen2(mpFixExt16, xtag) + case l < 256: + e.w.writen2(mpExt8, byte(l)) + e.w.writen1(xtag) + case l < 65536: + e.w.writen1(mpExt16) + e.w.writeUint16(uint16(l)) + e.w.writen1(xtag) + default: + e.w.writen1(mpExt32) + e.w.writeUint32(uint32(l)) + e.w.writen1(xtag) + } +} + +func (e *msgpackEncDriver) encodeArrayPreamble(length int) { + e.writeContainerLen(msgpackContainerList, length) +} + +func (e *msgpackEncDriver) encodeMapPreamble(length int) { + e.writeContainerLen(msgpackContainerMap, length) +} + +func (e *msgpackEncDriver) encodeString(c charEncoding, s string) { + if c == c_RAW && e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, len(s)) + } else { + e.writeContainerLen(msgpackContainerStr, len(s)) + } + if len(s) > 0 { + e.w.writestr(s) + } +} + +func (e *msgpackEncDriver) encodeSymbol(v string) { + e.encodeString(c_UTF8, v) +} + +func (e *msgpackEncDriver) encodeStringBytes(c charEncoding, bs []byte) { + if c == c_RAW && e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, len(bs)) + } else { + e.writeContainerLen(msgpackContainerStr, len(bs)) + } + if len(bs) > 0 { + e.w.writeb(bs) + } +} + +func (e *msgpackEncDriver) writeContainerLen(ct msgpackContainerType, l int) { + switch { + case ct.hasFixMin && l < ct.fixCutoff: + e.w.writen1(ct.bFixMin | byte(l)) + case ct.has8 && l < 256 && (ct.has8Always || e.h.WriteExt): + e.w.writen2(ct.b8, uint8(l)) + case l < 65536: + e.w.writen1(ct.b16) + e.w.writeUint16(uint16(l)) + default: + e.w.writen1(ct.b32) + e.w.writeUint32(uint32(l)) + } +} + +//--------------------------------------------- + +type msgpackDecDriver struct { + r decReader + h *MsgpackHandle + bd byte + bdRead bool + bdType valueType +} + +func (d *msgpackDecDriver) isBuiltinType(rt uintptr) bool { + //no builtin types. All encodings are based on kinds. Types supported as extensions. + return false +} + +func (d *msgpackDecDriver) decodeBuiltin(rt uintptr, v interface{}) {} + +// Note: This returns either a primitive (int, bool, etc) for non-containers, +// or a containerType, or a specific type denoting nil or extension. +// It is called when a nil interface{} is passed, leaving it up to the DecDriver +// to introspect the stream and decide how best to decode. +// It deciphers the value by looking at the stream first. +func (d *msgpackDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) { + d.initReadNext() + bd := d.bd + + switch bd { + case mpNil: + vt = valueTypeNil + d.bdRead = false + case mpFalse: + vt = valueTypeBool + v = false + case mpTrue: + vt = valueTypeBool + v = true + + case mpFloat: + vt = valueTypeFloat + v = float64(math.Float32frombits(d.r.readUint32())) + case mpDouble: + vt = valueTypeFloat + v = math.Float64frombits(d.r.readUint64()) + + case mpUint8: + vt = valueTypeUint + v = uint64(d.r.readn1()) + case mpUint16: + vt = valueTypeUint + v = uint64(d.r.readUint16()) + case mpUint32: + vt = valueTypeUint + v = uint64(d.r.readUint32()) + case mpUint64: + vt = valueTypeUint + v = uint64(d.r.readUint64()) + + case mpInt8: + vt = valueTypeInt + v = int64(int8(d.r.readn1())) + case mpInt16: + vt = valueTypeInt + v = int64(int16(d.r.readUint16())) + case mpInt32: + vt = valueTypeInt + v = int64(int32(d.r.readUint32())) + case mpInt64: + vt = valueTypeInt + v = int64(int64(d.r.readUint64())) + + default: + switch { + case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: + // positive fixnum (always signed) + vt = valueTypeInt + v = int64(int8(bd)) + case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: + // negative fixnum + vt = valueTypeInt + v = int64(int8(bd)) + case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: + if d.h.RawToString { + var rvm string + vt = valueTypeString + v = &rvm + } else { + var rvm = []byte{} + vt = valueTypeBytes + v = &rvm + } + decodeFurther = true + case bd == mpBin8, bd == mpBin16, bd == mpBin32: + var rvm = []byte{} + vt = valueTypeBytes + v = &rvm + decodeFurther = true + case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: + vt = valueTypeArray + decodeFurther = true + case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: + vt = valueTypeMap + decodeFurther = true + case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: + clen := d.readExtLen() + var re RawExt + re.Tag = d.r.readn1() + re.Data = d.r.readn(clen) + v = &re + vt = valueTypeExt + default: + decErr("Nil-Deciphered DecodeValue: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) + } + } + if !decodeFurther { + d.bdRead = false + } + return +} + +// int can be decoded from msgpack type: intXXX or uintXXX +func (d *msgpackDecDriver) decodeInt(bitsize uint8) (i int64) { + switch d.bd { + case mpUint8: + i = int64(uint64(d.r.readn1())) + case mpUint16: + i = int64(uint64(d.r.readUint16())) + case mpUint32: + i = int64(uint64(d.r.readUint32())) + case mpUint64: + i = int64(d.r.readUint64()) + case mpInt8: + i = int64(int8(d.r.readn1())) + case mpInt16: + i = int64(int16(d.r.readUint16())) + case mpInt32: + i = int64(int32(d.r.readUint32())) + case mpInt64: + i = int64(d.r.readUint64()) + default: + switch { + case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: + i = int64(int8(d.bd)) + case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: + i = int64(int8(d.bd)) + default: + decErr("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) + } + } + // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() + if bitsize > 0 { + if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc { + decErr("Overflow int value: %v", i) + } + } + d.bdRead = false + return +} + +// uint can be decoded from msgpack type: intXXX or uintXXX +func (d *msgpackDecDriver) decodeUint(bitsize uint8) (ui uint64) { + switch d.bd { + case mpUint8: + ui = uint64(d.r.readn1()) + case mpUint16: + ui = uint64(d.r.readUint16()) + case mpUint32: + ui = uint64(d.r.readUint32()) + case mpUint64: + ui = d.r.readUint64() + case mpInt8: + if i := int64(int8(d.r.readn1())); i >= 0 { + ui = uint64(i) + } else { + decErr("Assigning negative signed value: %v, to unsigned type", i) + } + case mpInt16: + if i := int64(int16(d.r.readUint16())); i >= 0 { + ui = uint64(i) + } else { + decErr("Assigning negative signed value: %v, to unsigned type", i) + } + case mpInt32: + if i := int64(int32(d.r.readUint32())); i >= 0 { + ui = uint64(i) + } else { + decErr("Assigning negative signed value: %v, to unsigned type", i) + } + case mpInt64: + if i := int64(d.r.readUint64()); i >= 0 { + ui = uint64(i) + } else { + decErr("Assigning negative signed value: %v, to unsigned type", i) + } + default: + switch { + case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: + ui = uint64(d.bd) + case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: + decErr("Assigning negative signed value: %v, to unsigned type", int(d.bd)) + default: + decErr("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd) + } + } + // check overflow (logic adapted from std pkg reflect/value.go OverflowUint() + if bitsize > 0 { + if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc { + decErr("Overflow uint value: %v", ui) + } + } + d.bdRead = false + return +} + +// float can either be decoded from msgpack type: float, double or intX +func (d *msgpackDecDriver) decodeFloat(chkOverflow32 bool) (f float64) { + switch d.bd { + case mpFloat: + f = float64(math.Float32frombits(d.r.readUint32())) + case mpDouble: + f = math.Float64frombits(d.r.readUint64()) + default: + f = float64(d.decodeInt(0)) + } + checkOverflowFloat32(f, chkOverflow32) + d.bdRead = false + return +} + +// bool can be decoded from bool, fixnum 0 or 1. +func (d *msgpackDecDriver) decodeBool() (b bool) { + switch d.bd { + case mpFalse, 0: + // b = false + case mpTrue, 1: + b = true + default: + decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + } + d.bdRead = false + return +} + +func (d *msgpackDecDriver) decodeString() (s string) { + clen := d.readContainerLen(msgpackContainerStr) + if clen > 0 { + s = string(d.r.readn(clen)) + } + d.bdRead = false + return +} + +// Callers must check if changed=true (to decide whether to replace the one they have) +func (d *msgpackDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) { + // bytes can be decoded from msgpackContainerStr or msgpackContainerBin + var clen int + switch d.bd { + case mpBin8, mpBin16, mpBin32: + clen = d.readContainerLen(msgpackContainerBin) + default: + clen = d.readContainerLen(msgpackContainerStr) + } + // if clen < 0 { + // changed = true + // panic("length cannot be zero. this cannot be nil.") + // } + if clen > 0 { + // if no contents in stream, don't update the passed byteslice + if len(bs) != clen { + // Return changed=true if length of passed slice diff from length of bytes in stream + if len(bs) > clen { + bs = bs[:clen] + } else { + bs = make([]byte, clen) + } + bsOut = bs + changed = true + } + d.r.readb(bs) + } + d.bdRead = false + return +} + +// Every top-level decode funcs (i.e. decodeValue, decode) must call this first. +func (d *msgpackDecDriver) initReadNext() { + if d.bdRead { + return + } + d.bd = d.r.readn1() + d.bdRead = true + d.bdType = valueTypeUnset +} + +func (d *msgpackDecDriver) currentEncodedType() valueType { + if d.bdType == valueTypeUnset { + bd := d.bd + switch bd { + case mpNil: + d.bdType = valueTypeNil + case mpFalse, mpTrue: + d.bdType = valueTypeBool + case mpFloat, mpDouble: + d.bdType = valueTypeFloat + case mpUint8, mpUint16, mpUint32, mpUint64: + d.bdType = valueTypeUint + case mpInt8, mpInt16, mpInt32, mpInt64: + d.bdType = valueTypeInt + default: + switch { + case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: + d.bdType = valueTypeInt + case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: + d.bdType = valueTypeInt + case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: + if d.h.RawToString { + d.bdType = valueTypeString + } else { + d.bdType = valueTypeBytes + } + case bd == mpBin8, bd == mpBin16, bd == mpBin32: + d.bdType = valueTypeBytes + case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: + d.bdType = valueTypeArray + case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: + d.bdType = valueTypeMap + case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: + d.bdType = valueTypeExt + default: + decErr("currentEncodedType: Undeciphered descriptor: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) + } + } + } + return d.bdType +} + +func (d *msgpackDecDriver) tryDecodeAsNil() bool { + if d.bd == mpNil { + d.bdRead = false + return true + } + return false +} + +func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int) { + bd := d.bd + switch { + case bd == mpNil: + clen = -1 // to represent nil + case bd == ct.b8: + clen = int(d.r.readn1()) + case bd == ct.b16: + clen = int(d.r.readUint16()) + case bd == ct.b32: + clen = int(d.r.readUint32()) + case (ct.bFixMin & bd) == ct.bFixMin: + clen = int(ct.bFixMin ^ bd) + default: + decErr("readContainerLen: %s: hex: %x, dec: %d", msgBadDesc, bd, bd) + } + d.bdRead = false + return +} + +func (d *msgpackDecDriver) readMapLen() int { + return d.readContainerLen(msgpackContainerMap) +} + +func (d *msgpackDecDriver) readArrayLen() int { + return d.readContainerLen(msgpackContainerList) +} + +func (d *msgpackDecDriver) readExtLen() (clen int) { + switch d.bd { + case mpNil: + clen = -1 // to represent nil + case mpFixExt1: + clen = 1 + case mpFixExt2: + clen = 2 + case mpFixExt4: + clen = 4 + case mpFixExt8: + clen = 8 + case mpFixExt16: + clen = 16 + case mpExt8: + clen = int(d.r.readn1()) + case mpExt16: + clen = int(d.r.readUint16()) + case mpExt32: + clen = int(d.r.readUint32()) + default: + decErr("decoding ext bytes: found unexpected byte: %x", d.bd) + } + return +} + +func (d *msgpackDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + xbd := d.bd + switch { + case xbd == mpBin8, xbd == mpBin16, xbd == mpBin32: + xbs, _ = d.decodeBytes(nil) + case xbd == mpStr8, xbd == mpStr16, xbd == mpStr32, + xbd >= mpFixStrMin && xbd <= mpFixStrMax: + xbs = []byte(d.decodeString()) + default: + clen := d.readExtLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) + } + xbs = d.r.readn(clen) + } + d.bdRead = false + return +} + +//-------------------------------------------------- + +//MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format. +type MsgpackHandle struct { + BasicHandle + + // RawToString controls how raw bytes are decoded into a nil interface{}. + RawToString bool + // WriteExt flag supports encoding configured extensions with extension tags. + // It also controls whether other elements of the new spec are encoded (ie Str8). + // + // With WriteExt=false, configured extensions are serialized as raw bytes + // and Str8 is not encoded. + // + // A stream can still be decoded into a typed value, provided an appropriate value + // is provided, but the type cannot be inferred from the stream. If no appropriate + // type is provided (e.g. decoding into a nil interface{}), you get back + // a []byte or string based on the setting of RawToString. + WriteExt bool +} + +func (h *MsgpackHandle) newEncDriver(w encWriter) encDriver { + return &msgpackEncDriver{w: w, h: h} +} + +func (h *MsgpackHandle) newDecDriver(r decReader) decDriver { + return &msgpackDecDriver{r: r, h: h} +} + +func (h *MsgpackHandle) writeExt() bool { + return h.WriteExt +} + +func (h *MsgpackHandle) getBasicHandle() *BasicHandle { + return &h.BasicHandle +} + +//-------------------------------------------------- + +type msgpackSpecRpcCodec struct { + rpcCodec +} + +// /////////////// Spec RPC Codec /////////////////// +func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { + // WriteRequest can write to both a Go service, and other services that do + // not abide by the 1 argument rule of a Go service. + // We discriminate based on if the body is a MsgpackSpecRpcMultiArgs + var bodyArr []interface{} + if m, ok := body.(MsgpackSpecRpcMultiArgs); ok { + bodyArr = ([]interface{})(m) + } else { + bodyArr = []interface{}{body} + } + r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr} + return c.write(r2, nil, false, true) +} + +func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { + var moe interface{} + if r.Error != "" { + moe = r.Error + } + if moe != nil && body != nil { + body = nil + } + r2 := []interface{}{1, uint32(r.Seq), moe, body} + return c.write(r2, nil, false, true) +} + +func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error { + return c.parseCustomHeader(1, &r.Seq, &r.Error) +} + +func (c *msgpackSpecRpcCodec) ReadRequestHeader(r *rpc.Request) error { + return c.parseCustomHeader(0, &r.Seq, &r.ServiceMethod) +} + +func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error { + if body == nil { // read and discard + return c.read(nil) + } + bodyArr := []interface{}{body} + return c.read(&bodyArr) +} + +func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) { + + if c.cls { + return io.EOF + } + + // We read the response header by hand + // so that the body can be decoded on its own from the stream at a later time. + + const fia byte = 0x94 //four item array descriptor value + // Not sure why the panic of EOF is swallowed above. + // if bs1 := c.dec.r.readn1(); bs1 != fia { + // err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs1) + // return + // } + var b byte + b, err = c.br.ReadByte() + if err != nil { + return + } + if b != fia { + err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, b) + return + } + + if err = c.read(&b); err != nil { + return + } + if b != expectTypeByte { + err = fmt.Errorf("Unexpected byte descriptor in header. Expecting %v. Received %v", expectTypeByte, b) + return + } + if err = c.read(msgid); err != nil { + return + } + if err = c.read(methodOrError); err != nil { + return + } + return +} + +//-------------------------------------------------- + +// msgpackSpecRpc is the implementation of Rpc that uses custom communication protocol +// as defined in the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md +type msgpackSpecRpc struct{} + +// MsgpackSpecRpc implements Rpc using the communication protocol defined in +// the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md . +// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered. +var MsgpackSpecRpc msgpackSpecRpc + +func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { + return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} +} + +func (x msgpackSpecRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { + return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} +} + +var _ decDriver = (*msgpackDecDriver)(nil) +var _ encDriver = (*msgpackEncDriver)(nil) diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/rpc.go b/vendor/github.com/hashicorp/go-msgpack/codec/rpc.go new file mode 100644 index 0000000000..d014dbdcc7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/rpc.go @@ -0,0 +1,152 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +import ( + "bufio" + "io" + "net/rpc" + "sync" +) + +// Rpc provides a rpc Server or Client Codec for rpc communication. +type Rpc interface { + ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec + ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec +} + +// RpcCodecBuffered allows access to the underlying bufio.Reader/Writer +// used by the rpc connection. It accomodates use-cases where the connection +// should be used by rpc and non-rpc functions, e.g. streaming a file after +// sending an rpc response. +type RpcCodecBuffered interface { + BufferedReader() *bufio.Reader + BufferedWriter() *bufio.Writer +} + +// ------------------------------------- + +// rpcCodec defines the struct members and common methods. +type rpcCodec struct { + rwc io.ReadWriteCloser + dec *Decoder + enc *Encoder + bw *bufio.Writer + br *bufio.Reader + mu sync.Mutex + cls bool +} + +func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec { + bw := bufio.NewWriter(conn) + br := bufio.NewReader(conn) + return rpcCodec{ + rwc: conn, + bw: bw, + br: br, + enc: NewEncoder(bw, h), + dec: NewDecoder(br, h), + } +} + +func (c *rpcCodec) BufferedReader() *bufio.Reader { + return c.br +} + +func (c *rpcCodec) BufferedWriter() *bufio.Writer { + return c.bw +} + +func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2, doFlush bool) (err error) { + if c.cls { + return io.EOF + } + if err = c.enc.Encode(obj1); err != nil { + return + } + if writeObj2 { + if err = c.enc.Encode(obj2); err != nil { + return + } + } + if doFlush && c.bw != nil { + return c.bw.Flush() + } + return +} + +func (c *rpcCodec) read(obj interface{}) (err error) { + if c.cls { + return io.EOF + } + //If nil is passed in, we should still attempt to read content to nowhere. + if obj == nil { + var obj2 interface{} + return c.dec.Decode(&obj2) + } + return c.dec.Decode(obj) +} + +func (c *rpcCodec) Close() error { + if c.cls { + return io.EOF + } + c.cls = true + return c.rwc.Close() +} + +func (c *rpcCodec) ReadResponseBody(body interface{}) error { + return c.read(body) +} + +// ------------------------------------- + +type goRpcCodec struct { + rpcCodec +} + +func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { + // Must protect for concurrent access as per API + c.mu.Lock() + defer c.mu.Unlock() + return c.write(r, body, true, true) +} + +func (c *goRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { + c.mu.Lock() + defer c.mu.Unlock() + return c.write(r, body, true, true) +} + +func (c *goRpcCodec) ReadResponseHeader(r *rpc.Response) error { + return c.read(r) +} + +func (c *goRpcCodec) ReadRequestHeader(r *rpc.Request) error { + return c.read(r) +} + +func (c *goRpcCodec) ReadRequestBody(body interface{}) error { + return c.read(body) +} + +// ------------------------------------- + +// goRpc is the implementation of Rpc that uses the communication protocol +// as defined in net/rpc package. +type goRpc struct{} + +// GoRpc implements Rpc using the communication protocol defined in net/rpc package. +// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered. +var GoRpc goRpc + +func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { + return &goRpcCodec{newRPCCodec(conn, h)} +} + +func (x goRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { + return &goRpcCodec{newRPCCodec(conn, h)} +} + +var _ RpcCodecBuffered = (*rpcCodec)(nil) // ensure *rpcCodec implements RpcCodecBuffered diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/simple.go b/vendor/github.com/hashicorp/go-msgpack/codec/simple.go new file mode 100644 index 0000000000..9e4d148a2a --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/simple.go @@ -0,0 +1,461 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +import "math" + +const ( + _ uint8 = iota + simpleVdNil = 1 + simpleVdFalse = 2 + simpleVdTrue = 3 + simpleVdFloat32 = 4 + simpleVdFloat64 = 5 + + // each lasts for 4 (ie n, n+1, n+2, n+3) + simpleVdPosInt = 8 + simpleVdNegInt = 12 + + // containers: each lasts for 4 (ie n, n+1, n+2, ... n+7) + simpleVdString = 216 + simpleVdByteArray = 224 + simpleVdArray = 232 + simpleVdMap = 240 + simpleVdExt = 248 +) + +type simpleEncDriver struct { + h *SimpleHandle + w encWriter + //b [8]byte +} + +func (e *simpleEncDriver) isBuiltinType(rt uintptr) bool { + return false +} + +func (e *simpleEncDriver) encodeBuiltin(rt uintptr, v interface{}) { +} + +func (e *simpleEncDriver) encodeNil() { + e.w.writen1(simpleVdNil) +} + +func (e *simpleEncDriver) encodeBool(b bool) { + if b { + e.w.writen1(simpleVdTrue) + } else { + e.w.writen1(simpleVdFalse) + } +} + +func (e *simpleEncDriver) encodeFloat32(f float32) { + e.w.writen1(simpleVdFloat32) + e.w.writeUint32(math.Float32bits(f)) +} + +func (e *simpleEncDriver) encodeFloat64(f float64) { + e.w.writen1(simpleVdFloat64) + e.w.writeUint64(math.Float64bits(f)) +} + +func (e *simpleEncDriver) encodeInt(v int64) { + if v < 0 { + e.encUint(uint64(-v), simpleVdNegInt) + } else { + e.encUint(uint64(v), simpleVdPosInt) + } +} + +func (e *simpleEncDriver) encodeUint(v uint64) { + e.encUint(v, simpleVdPosInt) +} + +func (e *simpleEncDriver) encUint(v uint64, bd uint8) { + switch { + case v <= math.MaxUint8: + e.w.writen2(bd, uint8(v)) + case v <= math.MaxUint16: + e.w.writen1(bd + 1) + e.w.writeUint16(uint16(v)) + case v <= math.MaxUint32: + e.w.writen1(bd + 2) + e.w.writeUint32(uint32(v)) + case v <= math.MaxUint64: + e.w.writen1(bd + 3) + e.w.writeUint64(v) + } +} + +func (e *simpleEncDriver) encLen(bd byte, length int) { + switch { + case length == 0: + e.w.writen1(bd) + case length <= math.MaxUint8: + e.w.writen1(bd + 1) + e.w.writen1(uint8(length)) + case length <= math.MaxUint16: + e.w.writen1(bd + 2) + e.w.writeUint16(uint16(length)) + case int64(length) <= math.MaxUint32: + e.w.writen1(bd + 3) + e.w.writeUint32(uint32(length)) + default: + e.w.writen1(bd + 4) + e.w.writeUint64(uint64(length)) + } +} + +func (e *simpleEncDriver) encodeExtPreamble(xtag byte, length int) { + e.encLen(simpleVdExt, length) + e.w.writen1(xtag) +} + +func (e *simpleEncDriver) encodeArrayPreamble(length int) { + e.encLen(simpleVdArray, length) +} + +func (e *simpleEncDriver) encodeMapPreamble(length int) { + e.encLen(simpleVdMap, length) +} + +func (e *simpleEncDriver) encodeString(c charEncoding, v string) { + e.encLen(simpleVdString, len(v)) + e.w.writestr(v) +} + +func (e *simpleEncDriver) encodeSymbol(v string) { + e.encodeString(c_UTF8, v) +} + +func (e *simpleEncDriver) encodeStringBytes(c charEncoding, v []byte) { + e.encLen(simpleVdByteArray, len(v)) + e.w.writeb(v) +} + +//------------------------------------ + +type simpleDecDriver struct { + h *SimpleHandle + r decReader + bdRead bool + bdType valueType + bd byte + //b [8]byte +} + +func (d *simpleDecDriver) initReadNext() { + if d.bdRead { + return + } + d.bd = d.r.readn1() + d.bdRead = true + d.bdType = valueTypeUnset +} + +func (d *simpleDecDriver) currentEncodedType() valueType { + if d.bdType == valueTypeUnset { + switch d.bd { + case simpleVdNil: + d.bdType = valueTypeNil + case simpleVdTrue, simpleVdFalse: + d.bdType = valueTypeBool + case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3: + d.bdType = valueTypeUint + case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3: + d.bdType = valueTypeInt + case simpleVdFloat32, simpleVdFloat64: + d.bdType = valueTypeFloat + case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: + d.bdType = valueTypeString + case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + d.bdType = valueTypeBytes + case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: + d.bdType = valueTypeExt + case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: + d.bdType = valueTypeArray + case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: + d.bdType = valueTypeMap + default: + decErr("currentEncodedType: Unrecognized d.vd: 0x%x", d.bd) + } + } + return d.bdType +} + +func (d *simpleDecDriver) tryDecodeAsNil() bool { + if d.bd == simpleVdNil { + d.bdRead = false + return true + } + return false +} + +func (d *simpleDecDriver) isBuiltinType(rt uintptr) bool { + return false +} + +func (d *simpleDecDriver) decodeBuiltin(rt uintptr, v interface{}) { +} + +func (d *simpleDecDriver) decIntAny() (ui uint64, i int64, neg bool) { + switch d.bd { + case simpleVdPosInt: + ui = uint64(d.r.readn1()) + i = int64(ui) + case simpleVdPosInt + 1: + ui = uint64(d.r.readUint16()) + i = int64(ui) + case simpleVdPosInt + 2: + ui = uint64(d.r.readUint32()) + i = int64(ui) + case simpleVdPosInt + 3: + ui = uint64(d.r.readUint64()) + i = int64(ui) + case simpleVdNegInt: + ui = uint64(d.r.readn1()) + i = -(int64(ui)) + neg = true + case simpleVdNegInt + 1: + ui = uint64(d.r.readUint16()) + i = -(int64(ui)) + neg = true + case simpleVdNegInt + 2: + ui = uint64(d.r.readUint32()) + i = -(int64(ui)) + neg = true + case simpleVdNegInt + 3: + ui = uint64(d.r.readUint64()) + i = -(int64(ui)) + neg = true + default: + decErr("decIntAny: Integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd) + } + // don't do this check, because callers may only want the unsigned value. + // if ui > math.MaxInt64 { + // decErr("decIntAny: Integer out of range for signed int64: %v", ui) + // } + return +} + +func (d *simpleDecDriver) decodeInt(bitsize uint8) (i int64) { + _, i, _ = d.decIntAny() + checkOverflow(0, i, bitsize) + d.bdRead = false + return +} + +func (d *simpleDecDriver) decodeUint(bitsize uint8) (ui uint64) { + ui, i, neg := d.decIntAny() + if neg { + decErr("Assigning negative signed value: %v, to unsigned type", i) + } + checkOverflow(ui, 0, bitsize) + d.bdRead = false + return +} + +func (d *simpleDecDriver) decodeFloat(chkOverflow32 bool) (f float64) { + switch d.bd { + case simpleVdFloat32: + f = float64(math.Float32frombits(d.r.readUint32())) + case simpleVdFloat64: + f = math.Float64frombits(d.r.readUint64()) + default: + if d.bd >= simpleVdPosInt && d.bd <= simpleVdNegInt+3 { + _, i, _ := d.decIntAny() + f = float64(i) + } else { + decErr("Float only valid from float32/64: Invalid descriptor: %v", d.bd) + } + } + checkOverflowFloat32(f, chkOverflow32) + d.bdRead = false + return +} + +// bool can be decoded from bool only (single byte). +func (d *simpleDecDriver) decodeBool() (b bool) { + switch d.bd { + case simpleVdTrue: + b = true + case simpleVdFalse: + default: + decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd) + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) readMapLen() (length int) { + d.bdRead = false + return d.decLen() +} + +func (d *simpleDecDriver) readArrayLen() (length int) { + d.bdRead = false + return d.decLen() +} + +func (d *simpleDecDriver) decLen() int { + switch d.bd % 8 { + case 0: + return 0 + case 1: + return int(d.r.readn1()) + case 2: + return int(d.r.readUint16()) + case 3: + ui := uint64(d.r.readUint32()) + checkOverflow(ui, 0, intBitsize) + return int(ui) + case 4: + ui := d.r.readUint64() + checkOverflow(ui, 0, intBitsize) + return int(ui) + } + decErr("decLen: Cannot read length: bd%8 must be in range 0..4. Got: %d", d.bd%8) + return -1 +} + +func (d *simpleDecDriver) decodeString() (s string) { + s = string(d.r.readn(d.decLen())) + d.bdRead = false + return +} + +func (d *simpleDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) { + if clen := d.decLen(); clen > 0 { + // if no contents in stream, don't update the passed byteslice + if len(bs) != clen { + if len(bs) > clen { + bs = bs[:clen] + } else { + bs = make([]byte, clen) + } + bsOut = bs + changed = true + } + d.r.readb(bs) + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + switch d.bd { + case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: + l := d.decLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag) + } + xbs = d.r.readn(l) + case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + xbs, _ = d.decodeBytes(nil) + default: + decErr("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.bd) + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) { + d.initReadNext() + + switch d.bd { + case simpleVdNil: + vt = valueTypeNil + case simpleVdFalse: + vt = valueTypeBool + v = false + case simpleVdTrue: + vt = valueTypeBool + v = true + case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3: + vt = valueTypeUint + ui, _, _ := d.decIntAny() + v = ui + case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3: + vt = valueTypeInt + _, i, _ := d.decIntAny() + v = i + case simpleVdFloat32: + vt = valueTypeFloat + v = d.decodeFloat(true) + case simpleVdFloat64: + vt = valueTypeFloat + v = d.decodeFloat(false) + case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: + vt = valueTypeString + v = d.decodeString() + case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + vt = valueTypeBytes + v, _ = d.decodeBytes(nil) + case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: + vt = valueTypeExt + l := d.decLen() + var re RawExt + re.Tag = d.r.readn1() + re.Data = d.r.readn(l) + v = &re + vt = valueTypeExt + case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: + vt = valueTypeArray + decodeFurther = true + case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: + vt = valueTypeMap + decodeFurther = true + default: + decErr("decodeNaked: Unrecognized d.vd: 0x%x", d.bd) + } + + if !decodeFurther { + d.bdRead = false + } + return +} + +//------------------------------------ + +// SimpleHandle is a Handle for a very simple encoding format. +// +// simple is a simplistic codec similar to binc, but not as compact. +// - Encoding of a value is always preceeded by the descriptor byte (bd) +// - True, false, nil are encoded fully in 1 byte (the descriptor) +// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte). +// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers. +// - Floats are encoded in 4 or 8 bytes (plus a descriptor byte) +// - Lenght of containers (strings, bytes, array, map, extensions) +// are encoded in 0, 1, 2, 4 or 8 bytes. +// Zero-length containers have no length encoded. +// For others, the number of bytes is given by pow(2, bd%3) +// - maps are encoded as [bd] [length] [[key][value]]... +// - arrays are encoded as [bd] [length] [value]... +// - extensions are encoded as [bd] [length] [tag] [byte]... +// - strings/bytearrays are encoded as [bd] [length] [byte]... +// +// The full spec will be published soon. +type SimpleHandle struct { + BasicHandle +} + +func (h *SimpleHandle) newEncDriver(w encWriter) encDriver { + return &simpleEncDriver{w: w, h: h} +} + +func (h *SimpleHandle) newDecDriver(r decReader) decDriver { + return &simpleDecDriver{r: r, h: h} +} + +func (_ *SimpleHandle) writeExt() bool { + return true +} + +func (h *SimpleHandle) getBasicHandle() *BasicHandle { + return &h.BasicHandle +} + +var _ decDriver = (*simpleDecDriver)(nil) +var _ encDriver = (*simpleEncDriver)(nil) diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/time.go b/vendor/github.com/hashicorp/go-msgpack/codec/time.go new file mode 100644 index 0000000000..c86d65328d --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/time.go @@ -0,0 +1,193 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +import ( + "time" +) + +var ( + timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'} +) + +// EncodeTime encodes a time.Time as a []byte, including +// information on the instant in time and UTC offset. +// +// Format Description +// +// A timestamp is composed of 3 components: +// +// - secs: signed integer representing seconds since unix epoch +// - nsces: unsigned integer representing fractional seconds as a +// nanosecond offset within secs, in the range 0 <= nsecs < 1e9 +// - tz: signed integer representing timezone offset in minutes east of UTC, +// and a dst (daylight savings time) flag +// +// When encoding a timestamp, the first byte is the descriptor, which +// defines which components are encoded and how many bytes are used to +// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it +// is not encoded in the byte array explicitly*. +// +// Descriptor 8 bits are of the form `A B C DDD EE`: +// A: Is secs component encoded? 1 = true +// B: Is nsecs component encoded? 1 = true +// C: Is tz component encoded? 1 = true +// DDD: Number of extra bytes for secs (range 0-7). +// If A = 1, secs encoded in DDD+1 bytes. +// If A = 0, secs is not encoded, and is assumed to be 0. +// If A = 1, then we need at least 1 byte to encode secs. +// DDD says the number of extra bytes beyond that 1. +// E.g. if DDD=0, then secs is represented in 1 byte. +// if DDD=2, then secs is represented in 3 bytes. +// EE: Number of extra bytes for nsecs (range 0-3). +// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above) +// +// Following the descriptor bytes, subsequent bytes are: +// +// secs component encoded in `DDD + 1` bytes (if A == 1) +// nsecs component encoded in `EE + 1` bytes (if B == 1) +// tz component encoded in 2 bytes (if C == 1) +// +// secs and nsecs components are integers encoded in a BigEndian +// 2-complement encoding format. +// +// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to +// Least significant bit 0 are described below: +// +// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes). +// Bit 15 = have\_dst: set to 1 if we set the dst flag. +// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not. +// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format. +// +func encodeTime(t time.Time) []byte { + //t := rv.Interface().(time.Time) + tsecs, tnsecs := t.Unix(), t.Nanosecond() + var ( + bd byte + btmp [8]byte + bs [16]byte + i int = 1 + ) + l := t.Location() + if l == time.UTC { + l = nil + } + if tsecs != 0 { + bd = bd | 0x80 + bigen.PutUint64(btmp[:], uint64(tsecs)) + f := pruneSignExt(btmp[:], tsecs >= 0) + bd = bd | (byte(7-f) << 2) + copy(bs[i:], btmp[f:]) + i = i + (8 - f) + } + if tnsecs != 0 { + bd = bd | 0x40 + bigen.PutUint32(btmp[:4], uint32(tnsecs)) + f := pruneSignExt(btmp[:4], true) + bd = bd | byte(3-f) + copy(bs[i:], btmp[f:4]) + i = i + (4 - f) + } + if l != nil { + bd = bd | 0x20 + // Note that Go Libs do not give access to dst flag. + _, zoneOffset := t.Zone() + //zoneName, zoneOffset := t.Zone() + zoneOffset /= 60 + z := uint16(zoneOffset) + bigen.PutUint16(btmp[:2], z) + // clear dst flags + bs[i] = btmp[0] & 0x3f + bs[i+1] = btmp[1] + i = i + 2 + } + bs[0] = bd + return bs[0:i] +} + +// DecodeTime decodes a []byte into a time.Time. +func decodeTime(bs []byte) (tt time.Time, err error) { + bd := bs[0] + var ( + tsec int64 + tnsec uint32 + tz uint16 + i byte = 1 + i2 byte + n byte + ) + if bd&(1<<7) != 0 { + var btmp [8]byte + n = ((bd >> 2) & 0x7) + 1 + i2 = i + n + copy(btmp[8-n:], bs[i:i2]) + //if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it) + if bs[i]&(1<<7) != 0 { + copy(btmp[0:8-n], bsAll0xff) + //for j,k := byte(0), 8-n; j < k; j++ { btmp[j] = 0xff } + } + i = i2 + tsec = int64(bigen.Uint64(btmp[:])) + } + if bd&(1<<6) != 0 { + var btmp [4]byte + n = (bd & 0x3) + 1 + i2 = i + n + copy(btmp[4-n:], bs[i:i2]) + i = i2 + tnsec = bigen.Uint32(btmp[:]) + } + if bd&(1<<5) == 0 { + tt = time.Unix(tsec, int64(tnsec)).UTC() + return + } + // In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name. + // However, we need name here, so it can be shown when time is printed. + // Zone name is in form: UTC-08:00. + // Note that Go Libs do not give access to dst flag, so we ignore dst bits + + i2 = i + 2 + tz = bigen.Uint16(bs[i:i2]) + i = i2 + // sign extend sign bit into top 2 MSB (which were dst bits): + if tz&(1<<13) == 0 { // positive + tz = tz & 0x3fff //clear 2 MSBs: dst bits + } else { // negative + tz = tz | 0xc000 //set 2 MSBs: dst bits + //tzname[3] = '-' (TODO: verify. this works here) + } + tzint := int16(tz) + if tzint == 0 { + tt = time.Unix(tsec, int64(tnsec)).UTC() + } else { + // For Go Time, do not use a descriptive timezone. + // It's unnecessary, and makes it harder to do a reflect.DeepEqual. + // The Offset already tells what the offset should be, if not on UTC and unknown zone name. + // var zoneName = timeLocUTCName(tzint) + tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60)) + } + return +} + +func timeLocUTCName(tzint int16) string { + if tzint == 0 { + return "UTC" + } + var tzname = []byte("UTC+00:00") + //tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf. inline below. + //tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first + var tzhr, tzmin int16 + if tzint < 0 { + tzname[3] = '-' // (TODO: verify. this works here) + tzhr, tzmin = -tzint/60, (-tzint)%60 + } else { + tzhr, tzmin = tzint/60, tzint%60 + } + tzname[4] = timeDigits[tzhr/10] + tzname[5] = timeDigits[tzhr%10] + tzname[7] = timeDigits[tzmin/10] + tzname[8] = timeDigits[tzmin%10] + return string(tzname) + //return time.FixedZone(string(tzname), int(tzint)*60) +} diff --git a/vendor/github.com/hashicorp/go-multierror/LICENSE b/vendor/github.com/hashicorp/go-multierror/LICENSE new file mode 100644 index 0000000000..82b4de97c7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/LICENSE @@ -0,0 +1,353 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-multierror/append.go b/vendor/github.com/hashicorp/go-multierror/append.go new file mode 100644 index 0000000000..775b6e753e --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/append.go @@ -0,0 +1,41 @@ +package multierror + +// Append is a helper function that will append more errors +// onto an Error in order to create a larger multi-error. +// +// If err is not a multierror.Error, then it will be turned into +// one. If any of the errs are multierr.Error, they will be flattened +// one level into err. +func Append(err error, errs ...error) *Error { + switch err := err.(type) { + case *Error: + // Typed nils can reach here, so initialize if we are nil + if err == nil { + err = new(Error) + } + + // Go through each error and flatten + for _, e := range errs { + switch e := e.(type) { + case *Error: + if e != nil { + err.Errors = append(err.Errors, e.Errors...) + } + default: + if e != nil { + err.Errors = append(err.Errors, e) + } + } + } + + return err + default: + newErrs := make([]error, 0, len(errs)+1) + if err != nil { + newErrs = append(newErrs, err) + } + newErrs = append(newErrs, errs...) + + return Append(&Error{}, newErrs...) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/flatten.go b/vendor/github.com/hashicorp/go-multierror/flatten.go new file mode 100644 index 0000000000..aab8e9abec --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/flatten.go @@ -0,0 +1,26 @@ +package multierror + +// Flatten flattens the given error, merging any *Errors together into +// a single *Error. +func Flatten(err error) error { + // If it isn't an *Error, just return the error as-is + if _, ok := err.(*Error); !ok { + return err + } + + // Otherwise, make the result and flatten away! + flatErr := new(Error) + flatten(err, flatErr) + return flatErr +} + +func flatten(err error, flatErr *Error) { + switch err := err.(type) { + case *Error: + for _, e := range err.Errors { + flatten(e, flatErr) + } + default: + flatErr.Errors = append(flatErr.Errors, err) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/format.go b/vendor/github.com/hashicorp/go-multierror/format.go new file mode 100644 index 0000000000..6c7a3cc91d --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/format.go @@ -0,0 +1,27 @@ +package multierror + +import ( + "fmt" + "strings" +) + +// ErrorFormatFunc is a function callback that is called by Error to +// turn the list of errors into a string. +type ErrorFormatFunc func([]error) string + +// ListFormatFunc is a basic formatter that outputs the number of errors +// that occurred along with a bullet point list of the errors. +func ListFormatFunc(es []error) string { + if len(es) == 1 { + return fmt.Sprintf("1 error occurred:\n\n* %s", es[0]) + } + + points := make([]string, len(es)) + for i, err := range es { + points[i] = fmt.Sprintf("* %s", err) + } + + return fmt.Sprintf( + "%d errors occurred:\n\n%s", + len(es), strings.Join(points, "\n")) +} diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go new file mode 100644 index 0000000000..89b1422d1d --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/multierror.go @@ -0,0 +1,51 @@ +package multierror + +import ( + "fmt" +) + +// Error is an error type to track multiple errors. This is used to +// accumulate errors in cases and return them as a single "error". +type Error struct { + Errors []error + ErrorFormat ErrorFormatFunc +} + +func (e *Error) Error() string { + fn := e.ErrorFormat + if fn == nil { + fn = ListFormatFunc + } + + return fn(e.Errors) +} + +// ErrorOrNil returns an error interface if this Error represents +// a list of errors, or returns nil if the list of errors is empty. This +// function is useful at the end of accumulation to make sure that the value +// returned represents the existence of errors. +func (e *Error) ErrorOrNil() error { + if e == nil { + return nil + } + if len(e.Errors) == 0 { + return nil + } + + return e +} + +func (e *Error) GoString() string { + return fmt.Sprintf("*%#v", *e) +} + +// WrappedErrors returns the list of errors that this Error is wrapping. +// It is an implementation of the errwrap.Wrapper interface so that +// multierror.Error can be used with that library. +// +// This method is not safe to be called concurrently and is no different +// than accessing the Errors field directly. It is implemented only to +// satisfy the errwrap.Wrapper interface. +func (e *Error) WrappedErrors() []error { + return e.Errors +} diff --git a/vendor/github.com/hashicorp/go-multierror/prefix.go b/vendor/github.com/hashicorp/go-multierror/prefix.go new file mode 100644 index 0000000000..5c477abe44 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/prefix.go @@ -0,0 +1,37 @@ +package multierror + +import ( + "fmt" + + "github.com/hashicorp/errwrap" +) + +// Prefix is a helper function that will prefix some text +// to the given error. If the error is a multierror.Error, then +// it will be prefixed to each wrapped error. +// +// This is useful to use when appending multiple multierrors +// together in order to give better scoping. +func Prefix(err error, prefix string) error { + if err == nil { + return nil + } + + format := fmt.Sprintf("%s {{err}}", prefix) + switch err := err.(type) { + case *Error: + // Typed nils can reach here, so initialize if we are nil + if err == nil { + err = new(Error) + } + + // Wrap each of the errors + for i, e := range err.Errors { + err.Errors[i] = errwrap.Wrapf(format, e) + } + + return err + default: + return errwrap.Wrapf(format, err) + } +} diff --git a/vendor/github.com/hashicorp/go-rootcerts/LICENSE b/vendor/github.com/hashicorp/go-rootcerts/LICENSE new file mode 100644 index 0000000000..e87a115e46 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-rootcerts/doc.go b/vendor/github.com/hashicorp/go-rootcerts/doc.go new file mode 100644 index 0000000000..b55cc62848 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/doc.go @@ -0,0 +1,9 @@ +// Package rootcerts contains functions to aid in loading CA certificates for +// TLS connections. +// +// In addition, its default behavior on Darwin works around an open issue [1] +// in Go's crypto/x509 that prevents certicates from being loaded from the +// System or Login keychains. +// +// [1] https://github.com/golang/go/issues/14514 +package rootcerts diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go new file mode 100644 index 0000000000..aeb30ece32 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go @@ -0,0 +1,103 @@ +package rootcerts + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "os" + "path/filepath" +) + +// Config determines where LoadCACerts will load certificates from. When both +// CAFile and CAPath are blank, this library's functions will either load +// system roots explicitly and return them, or set the CertPool to nil to allow +// Go's standard library to load system certs. +type Config struct { + // CAFile is a path to a PEM-encoded certificate file or bundle. Takes + // precedence over CAPath. + CAFile string + + // CAPath is a path to a directory populated with PEM-encoded certificates. + CAPath string +} + +// ConfigureTLS sets up the RootCAs on the provided tls.Config based on the +// Config specified. +func ConfigureTLS(t *tls.Config, c *Config) error { + if t == nil { + return nil + } + pool, err := LoadCACerts(c) + if err != nil { + return err + } + t.RootCAs = pool + return nil +} + +// LoadCACerts loads a CertPool based on the Config specified. +func LoadCACerts(c *Config) (*x509.CertPool, error) { + if c == nil { + c = &Config{} + } + if c.CAFile != "" { + return LoadCAFile(c.CAFile) + } + if c.CAPath != "" { + return LoadCAPath(c.CAPath) + } + + return LoadSystemCAs() +} + +// LoadCAFile loads a single PEM-encoded file from the path specified. +func LoadCAFile(caFile string) (*x509.CertPool, error) { + pool := x509.NewCertPool() + + pem, err := ioutil.ReadFile(caFile) + if err != nil { + return nil, fmt.Errorf("Error loading CA File: %s", err) + } + + ok := pool.AppendCertsFromPEM(pem) + if !ok { + return nil, fmt.Errorf("Error loading CA File: Couldn't parse PEM in: %s", caFile) + } + + return pool, nil +} + +// LoadCAPath walks the provided path and loads all certificates encounted into +// a pool. +func LoadCAPath(caPath string) (*x509.CertPool, error) { + pool := x509.NewCertPool() + walkFn := func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if info.IsDir() { + return nil + } + + pem, err := ioutil.ReadFile(path) + if err != nil { + return fmt.Errorf("Error loading file from CAPath: %s", err) + } + + ok := pool.AppendCertsFromPEM(pem) + if !ok { + return fmt.Errorf("Error loading CA Path: Couldn't parse PEM in: %s", path) + } + + return nil + } + + err := filepath.Walk(caPath, walkFn) + if err != nil { + return nil, err + } + + return pool, nil +} diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go new file mode 100644 index 0000000000..66b1472c4a --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go @@ -0,0 +1,12 @@ +// +build !darwin + +package rootcerts + +import "crypto/x509" + +// LoadSystemCAs does nothing on non-Darwin systems. We return nil so that +// default behavior of standard TLS config libraries is triggered, which is to +// load system certs. +func LoadSystemCAs() (*x509.CertPool, error) { + return nil, nil +} diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go new file mode 100644 index 0000000000..a9a040657f --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go @@ -0,0 +1,48 @@ +package rootcerts + +import ( + "crypto/x509" + "os/exec" + "path" + + "github.com/mitchellh/go-homedir" +) + +// LoadSystemCAs has special behavior on Darwin systems to work around +func LoadSystemCAs() (*x509.CertPool, error) { + pool := x509.NewCertPool() + + for _, keychain := range certKeychains() { + err := addCertsFromKeychain(pool, keychain) + if err != nil { + return nil, err + } + } + + return pool, nil +} + +func addCertsFromKeychain(pool *x509.CertPool, keychain string) error { + cmd := exec.Command("/usr/bin/security", "find-certificate", "-a", "-p", keychain) + data, err := cmd.Output() + if err != nil { + return err + } + + pool.AppendCertsFromPEM(data) + + return nil +} + +func certKeychains() []string { + keychains := []string{ + "/System/Library/Keychains/SystemRootCertificates.keychain", + "/Library/Keychains/System.keychain", + } + home, err := homedir.Dir() + if err == nil { + loginKeychain := path.Join(home, "Library", "Keychains", "login.keychain") + keychains = append(keychains, loginKeychain) + } + return keychains +} diff --git a/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/securetrust.pem b/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/securetrust.pem new file mode 120000 index 0000000000..dda0574d7f --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/securetrust.pem @@ -0,0 +1 @@ +../capath/securetrust.pem \ No newline at end of file diff --git a/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/thawte.pem b/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/thawte.pem new file mode 120000 index 0000000000..37ed4f01a4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-rootcerts/test-fixtures/capath-with-symlinks/thawte.pem @@ -0,0 +1 @@ +../capath/thawte.pem \ No newline at end of file diff --git a/vendor/github.com/hashicorp/go-sockaddr/LICENSE b/vendor/github.com/hashicorp/go-sockaddr/LICENSE new file mode 100644 index 0000000000..a612ad9813 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/go-sockaddr/doc.go b/vendor/github.com/hashicorp/go-sockaddr/doc.go new file mode 100644 index 0000000000..90671deb51 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/doc.go @@ -0,0 +1,5 @@ +/* +Package sockaddr is a Go implementation of the UNIX socket family data types and +related helper functions. +*/ +package sockaddr diff --git a/vendor/github.com/hashicorp/go-sockaddr/ifaddr.go b/vendor/github.com/hashicorp/go-sockaddr/ifaddr.go new file mode 100644 index 0000000000..0811b27599 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/ifaddr.go @@ -0,0 +1,254 @@ +package sockaddr + +import "strings" + +// ifAddrAttrMap is a map of the IfAddr type-specific attributes. +var ifAddrAttrMap map[AttrName]func(IfAddr) string +var ifAddrAttrs []AttrName + +func init() { + ifAddrAttrInit() +} + +// GetPrivateIP returns a string with a single IP address that is part of RFC +// 6890 and has a default route. If the system can't determine its IP address +// or find an RFC 6890 IP address, an empty string will be returned instead. +// This function is the `eval` equivalent of: +// +// ``` +// $ sockaddr eval -r '{{GetPrivateInterfaces | attr "address"}}' +/// ``` +func GetPrivateIP() (string, error) { + privateIfs, err := GetPrivateInterfaces() + if err != nil { + return "", err + } + if len(privateIfs) < 1 { + return "", nil + } + + ifAddr := privateIfs[0] + ip := *ToIPAddr(ifAddr.SockAddr) + return ip.NetIP().String(), nil +} + +// GetPrivateIPs returns a string with all IP addresses that are part of RFC +// 6890 (regardless of whether or not there is a default route, unlike +// GetPublicIP). If the system can't find any RFC 6890 IP addresses, an empty +// string will be returned instead. This function is the `eval` equivalent of: +// +// ``` +// $ sockaddr eval -r '{{GetAllInterfaces | include "RFC" "6890" | join "address" " "}}' +/// ``` +func GetPrivateIPs() (string, error) { + ifAddrs, err := GetAllInterfaces() + if err != nil { + return "", err + } else if len(ifAddrs) < 1 { + return "", nil + } + + ifAddrs, _ = FilterIfByType(ifAddrs, TypeIP) + if len(ifAddrs) == 0 { + return "", nil + } + + OrderedIfAddrBy(AscIfType, AscIfNetworkSize).Sort(ifAddrs) + + ifAddrs, _, err = IfByRFC("6890", ifAddrs) + if err != nil { + return "", err + } else if len(ifAddrs) == 0 { + return "", nil + } + + _, ifAddrs, err = IfByRFC(ForwardingBlacklistRFC, ifAddrs) + if err != nil { + return "", err + } else if len(ifAddrs) == 0 { + return "", nil + } + + ips := make([]string, 0, len(ifAddrs)) + for _, ifAddr := range ifAddrs { + ip := *ToIPAddr(ifAddr.SockAddr) + s := ip.NetIP().String() + ips = append(ips, s) + } + + return strings.Join(ips, " "), nil +} + +// GetPublicIP returns a string with a single IP address that is NOT part of RFC +// 6890 and has a default route. If the system can't determine its IP address +// or find a non RFC 6890 IP address, an empty string will be returned instead. +// This function is the `eval` equivalent of: +// +// ``` +// $ sockaddr eval -r '{{GetPublicInterfaces | attr "address"}}' +/// ``` +func GetPublicIP() (string, error) { + publicIfs, err := GetPublicInterfaces() + if err != nil { + return "", err + } else if len(publicIfs) < 1 { + return "", nil + } + + ifAddr := publicIfs[0] + ip := *ToIPAddr(ifAddr.SockAddr) + return ip.NetIP().String(), nil +} + +// GetPublicIPs returns a string with all IP addresses that are NOT part of RFC +// 6890 (regardless of whether or not there is a default route, unlike +// GetPublicIP). If the system can't find any non RFC 6890 IP addresses, an +// empty string will be returned instead. This function is the `eval` +// equivalent of: +// +// ``` +// $ sockaddr eval -r '{{GetAllInterfaces | exclude "RFC" "6890" | join "address" " "}}' +/// ``` +func GetPublicIPs() (string, error) { + ifAddrs, err := GetAllInterfaces() + if err != nil { + return "", err + } else if len(ifAddrs) < 1 { + return "", nil + } + + ifAddrs, _ = FilterIfByType(ifAddrs, TypeIP) + if len(ifAddrs) == 0 { + return "", nil + } + + OrderedIfAddrBy(AscIfType, AscIfNetworkSize).Sort(ifAddrs) + + _, ifAddrs, err = IfByRFC("6890", ifAddrs) + if err != nil { + return "", err + } else if len(ifAddrs) == 0 { + return "", nil + } + + ips := make([]string, 0, len(ifAddrs)) + for _, ifAddr := range ifAddrs { + ip := *ToIPAddr(ifAddr.SockAddr) + s := ip.NetIP().String() + ips = append(ips, s) + } + + return strings.Join(ips, " "), nil +} + +// GetInterfaceIP returns a string with a single IP address sorted by the size +// of the network (i.e. IP addresses with a smaller netmask, larger network +// size, are sorted first). This function is the `eval` equivalent of: +// +// ``` +// $ sockaddr eval -r '{{GetAllInterfaces | include "name" <> | sort "type,size" | include "flag" "forwardable" | attr "address" }}' +/// ``` +func GetInterfaceIP(namedIfRE string) (string, error) { + ifAddrs, err := GetAllInterfaces() + if err != nil { + return "", err + } + + ifAddrs, _, err = IfByName(namedIfRE, ifAddrs) + if err != nil { + return "", err + } + + ifAddrs, _, err = IfByFlag("forwardable", ifAddrs) + if err != nil { + return "", err + } + + ifAddrs, err = SortIfBy("+type,+size", ifAddrs) + if err != nil { + return "", err + } + + if len(ifAddrs) == 0 { + return "", err + } + + ip := ToIPAddr(ifAddrs[0].SockAddr) + if ip == nil { + return "", err + } + + return IPAddrAttr(*ip, "address"), nil +} + +// GetInterfaceIPs returns a string with all IPs, sorted by the size of the +// network (i.e. IP addresses with a smaller netmask, larger network size, are +// sorted first), on a named interface. This function is the `eval` equivalent +// of: +// +// ``` +// $ sockaddr eval -r '{{GetAllInterfaces | include "name" <> | sort "type,size" | join "address" " "}}' +/// ``` +func GetInterfaceIPs(namedIfRE string) (string, error) { + ifAddrs, err := GetAllInterfaces() + if err != nil { + return "", err + } + + ifAddrs, _, err = IfByName(namedIfRE, ifAddrs) + if err != nil { + return "", err + } + + ifAddrs, err = SortIfBy("+type,+size", ifAddrs) + if err != nil { + return "", err + } + + if len(ifAddrs) == 0 { + return "", err + } + + ips := make([]string, 0, len(ifAddrs)) + for _, ifAddr := range ifAddrs { + ip := *ToIPAddr(ifAddr.SockAddr) + s := ip.NetIP().String() + ips = append(ips, s) + } + + return strings.Join(ips, " "), nil +} + +// IfAddrAttrs returns a list of attributes supported by the IfAddr type +func IfAddrAttrs() []AttrName { + return ifAddrAttrs +} + +// IfAddrAttr returns a string representation of an attribute for the given +// IfAddr. +func IfAddrAttr(ifAddr IfAddr, attrName AttrName) string { + fn, found := ifAddrAttrMap[attrName] + if !found { + return "" + } + + return fn(ifAddr) +} + +// ifAddrAttrInit is called once at init() +func ifAddrAttrInit() { + // Sorted for human readability + ifAddrAttrs = []AttrName{ + "flags", + "name", + } + + ifAddrAttrMap = map[AttrName]func(ifAddr IfAddr) string{ + "flags": func(ifAddr IfAddr) string { + return ifAddr.Interface.Flags.String() + }, + "name": func(ifAddr IfAddr) string { + return ifAddr.Interface.Name + }, + } +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/ifaddrs.go b/vendor/github.com/hashicorp/go-sockaddr/ifaddrs.go new file mode 100644 index 0000000000..2a706c34e9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/ifaddrs.go @@ -0,0 +1,1281 @@ +package sockaddr + +import ( + "encoding/binary" + "errors" + "fmt" + "math/big" + "net" + "regexp" + "sort" + "strconv" + "strings" +) + +var ( + // Centralize all regexps and regexp.Copy() where necessary. + signRE *regexp.Regexp = regexp.MustCompile(`^[\s]*[+-]`) + whitespaceRE *regexp.Regexp = regexp.MustCompile(`[\s]+`) + ifNameRE *regexp.Regexp = regexp.MustCompile(`^(?:Ethernet|Wireless LAN) adapter ([^:]+):`) + ipAddrRE *regexp.Regexp = regexp.MustCompile(`^ IPv[46] Address\. \. \. \. \. \. \. \. \. \. \. : ([^\s]+)`) +) + +// IfAddrs is a slice of IfAddr +type IfAddrs []IfAddr + +func (ifs IfAddrs) Len() int { return len(ifs) } + +// CmpIfFunc is the function signature that must be met to be used in the +// OrderedIfAddrBy multiIfAddrSorter +type CmpIfAddrFunc func(p1, p2 *IfAddr) int + +// multiIfAddrSorter implements the Sort interface, sorting the IfAddrs within. +type multiIfAddrSorter struct { + ifAddrs IfAddrs + cmp []CmpIfAddrFunc +} + +// Sort sorts the argument slice according to the Cmp functions passed to +// OrderedIfAddrBy. +func (ms *multiIfAddrSorter) Sort(ifAddrs IfAddrs) { + ms.ifAddrs = ifAddrs + sort.Sort(ms) +} + +// OrderedIfAddrBy sorts SockAddr by the list of sort function pointers. +func OrderedIfAddrBy(cmpFuncs ...CmpIfAddrFunc) *multiIfAddrSorter { + return &multiIfAddrSorter{ + cmp: cmpFuncs, + } +} + +// Len is part of sort.Interface. +func (ms *multiIfAddrSorter) Len() int { + return len(ms.ifAddrs) +} + +// Less is part of sort.Interface. It is implemented by looping along the Cmp() +// functions until it finds a comparison that is either less than or greater +// than. A return value of 0 defers sorting to the next function in the +// multisorter (which means the results of sorting may leave the resutls in a +// non-deterministic order). +func (ms *multiIfAddrSorter) Less(i, j int) bool { + p, q := &ms.ifAddrs[i], &ms.ifAddrs[j] + // Try all but the last comparison. + var k int + for k = 0; k < len(ms.cmp)-1; k++ { + cmp := ms.cmp[k] + x := cmp(p, q) + switch x { + case -1: + // p < q, so we have a decision. + return true + case 1: + // p > q, so we have a decision. + return false + } + // p == q; try the next comparison. + } + // All comparisons to here said "equal", so just return whatever the + // final comparison reports. + switch ms.cmp[k](p, q) { + case -1: + return true + case 1: + return false + default: + // Still a tie! Now what? + return false + panic("undefined sort order for remaining items in the list") + } +} + +// Swap is part of sort.Interface. +func (ms *multiIfAddrSorter) Swap(i, j int) { + ms.ifAddrs[i], ms.ifAddrs[j] = ms.ifAddrs[j], ms.ifAddrs[i] +} + +// AscIfAddress is a sorting function to sort IfAddrs by their respective +// address type. Non-equal types are deferred in the sort. +func AscIfAddress(p1Ptr, p2Ptr *IfAddr) int { + return AscAddress(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// AscIfDefault is a sorting function to sort IfAddrs by whether or not they +// have a default route or not. Non-equal types are deferred in the sort. +// +// FIXME: This is a particularly expensive sorting operation because of the +// non-memoized calls to NewRouteInfo(). In an ideal world the routeInfo data +// once at the start of the sort and pass it along as a context or by wrapping +// the IfAddr type with this information (this would also solve the inability to +// return errors and the possibility of failing silently). Fortunately, +// N*log(N) where N = 3 is only ~6.2 invocations. Not ideal, but not worth +// optimizing today. The common case is this gets called once or twice. +// Patches welcome. +func AscIfDefault(p1Ptr, p2Ptr *IfAddr) int { + ri, err := NewRouteInfo() + if err != nil { + return sortDeferDecision + } + + defaultIfName, err := ri.GetDefaultInterfaceName() + if err != nil { + return sortDeferDecision + } + + switch { + case p1Ptr.Interface.Name == defaultIfName && p2Ptr.Interface.Name == defaultIfName: + return sortDeferDecision + case p1Ptr.Interface.Name == defaultIfName: + return sortReceiverBeforeArg + case p2Ptr.Interface.Name == defaultIfName: + return sortArgBeforeReceiver + default: + return sortDeferDecision + } +} + +// AscIfName is a sorting function to sort IfAddrs by their interface names. +func AscIfName(p1Ptr, p2Ptr *IfAddr) int { + return strings.Compare(p1Ptr.Name, p2Ptr.Name) +} + +// AscIfNetworkSize is a sorting function to sort IfAddrs by their respective +// network mask size. +func AscIfNetworkSize(p1Ptr, p2Ptr *IfAddr) int { + return AscNetworkSize(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// AscIfPort is a sorting function to sort IfAddrs by their respective +// port type. Non-equal types are deferred in the sort. +func AscIfPort(p1Ptr, p2Ptr *IfAddr) int { + return AscPort(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// AscIfPrivate is a sorting function to sort IfAddrs by "private" values before +// "public" values. Both IPv4 and IPv6 are compared against RFC6890 (RFC6890 +// includes, and is not limited to, RFC1918 and RFC6598 for IPv4, and IPv6 +// includes RFC4193). +func AscIfPrivate(p1Ptr, p2Ptr *IfAddr) int { + return AscPrivate(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// AscIfType is a sorting function to sort IfAddrs by their respective address +// type. Non-equal types are deferred in the sort. +func AscIfType(p1Ptr, p2Ptr *IfAddr) int { + return AscType(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// DescIfAddress is identical to AscIfAddress but reverse ordered. +func DescIfAddress(p1Ptr, p2Ptr *IfAddr) int { + return -1 * AscAddress(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// DescIfDefault is identical to AscIfDefault but reverse ordered. +func DescIfDefault(p1Ptr, p2Ptr *IfAddr) int { + return -1 * AscIfDefault(p1Ptr, p2Ptr) +} + +// DescIfName is identical to AscIfName but reverse ordered. +func DescIfName(p1Ptr, p2Ptr *IfAddr) int { + return -1 * strings.Compare(p1Ptr.Name, p2Ptr.Name) +} + +// DescIfNetworkSize is identical to AscIfNetworkSize but reverse ordered. +func DescIfNetworkSize(p1Ptr, p2Ptr *IfAddr) int { + return -1 * AscNetworkSize(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// DescIfPort is identical to AscIfPort but reverse ordered. +func DescIfPort(p1Ptr, p2Ptr *IfAddr) int { + return -1 * AscPort(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// DescIfPrivate is identical to AscIfPrivate but reverse ordered. +func DescIfPrivate(p1Ptr, p2Ptr *IfAddr) int { + return -1 * AscPrivate(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// DescIfType is identical to AscIfType but reverse ordered. +func DescIfType(p1Ptr, p2Ptr *IfAddr) int { + return -1 * AscType(&p1Ptr.SockAddr, &p2Ptr.SockAddr) +} + +// FilterIfByType filters IfAddrs and returns a list of the matching type +func FilterIfByType(ifAddrs IfAddrs, type_ SockAddrType) (matchedIfs, excludedIfs IfAddrs) { + excludedIfs = make(IfAddrs, 0, len(ifAddrs)) + matchedIfs = make(IfAddrs, 0, len(ifAddrs)) + + for _, ifAddr := range ifAddrs { + if ifAddr.SockAddr.Type()&type_ != 0 { + matchedIfs = append(matchedIfs, ifAddr) + } else { + excludedIfs = append(excludedIfs, ifAddr) + } + } + return matchedIfs, excludedIfs +} + +// IfAttr forwards the selector to IfAttr.Attr() for resolution. If there is +// more than one IfAddr, only the first IfAddr is used. +func IfAttr(selectorName string, ifAddr IfAddr) (string, error) { + attrName := AttrName(strings.ToLower(selectorName)) + attrVal, err := ifAddr.Attr(attrName) + return attrVal, err +} + +// IfAttrs forwards the selector to IfAttrs.Attr() for resolution. If there is +// more than one IfAddr, only the first IfAddr is used. +func IfAttrs(selectorName string, ifAddrs IfAddrs) (string, error) { + if len(ifAddrs) == 0 { + return "", nil + } + + attrName := AttrName(strings.ToLower(selectorName)) + attrVal, err := ifAddrs[0].Attr(attrName) + return attrVal, err +} + +// GetAllInterfaces iterates over all available network interfaces and finds all +// available IP addresses on each interface and converts them to +// sockaddr.IPAddrs, and returning the result as an array of IfAddr. +func GetAllInterfaces() (IfAddrs, error) { + ifs, err := net.Interfaces() + if err != nil { + return nil, err + } + + ifAddrs := make(IfAddrs, 0, len(ifs)) + for _, intf := range ifs { + addrs, err := intf.Addrs() + if err != nil { + return nil, err + } + + for _, addr := range addrs { + var ipAddr IPAddr + ipAddr, err = NewIPAddr(addr.String()) + if err != nil { + return IfAddrs{}, fmt.Errorf("unable to create an IP address from %q", addr.String()) + } + + ifAddr := IfAddr{ + SockAddr: ipAddr, + Interface: intf, + } + ifAddrs = append(ifAddrs, ifAddr) + } + } + + return ifAddrs, nil +} + +// GetDefaultInterfaces returns IfAddrs of the addresses attached to the default +// route. +func GetDefaultInterfaces() (IfAddrs, error) { + ri, err := NewRouteInfo() + if err != nil { + return nil, err + } + + defaultIfName, err := ri.GetDefaultInterfaceName() + if err != nil { + return nil, err + } + + var defaultIfs, ifAddrs IfAddrs + ifAddrs, err = GetAllInterfaces() + for _, ifAddr := range ifAddrs { + if ifAddr.Name == defaultIfName { + defaultIfs = append(defaultIfs, ifAddr) + } + } + + return defaultIfs, nil +} + +// GetPrivateInterfaces returns an IfAddrs that are part of RFC 6890 and have a +// default route. If the system can't determine its IP address or find an RFC +// 6890 IP address, an empty IfAddrs will be returned instead. This function is +// the `eval` equivalent of: +// +// ``` +// $ sockaddr eval -r '{{GetAllInterfaces | include "type" "ip" | include "flags" "forwardable" | include "flags" "up" | sort "default,type,size" | include "RFC" "6890" }}' +/// ``` +func GetPrivateInterfaces() (IfAddrs, error) { + privateIfs, err := GetAllInterfaces() + if err != nil { + return IfAddrs{}, err + } + if len(privateIfs) == 0 { + return IfAddrs{}, nil + } + + privateIfs, _ = FilterIfByType(privateIfs, TypeIP) + if len(privateIfs) == 0 { + return IfAddrs{}, nil + } + + privateIfs, _, err = IfByFlag("forwardable", privateIfs) + if err != nil { + return IfAddrs{}, err + } + + privateIfs, _, err = IfByFlag("up", privateIfs) + if err != nil { + return IfAddrs{}, err + } + + if len(privateIfs) == 0 { + return IfAddrs{}, nil + } + + OrderedIfAddrBy(AscIfDefault, AscIfType, AscIfNetworkSize).Sort(privateIfs) + + privateIfs, _, err = IfByRFC("6890", privateIfs) + if err != nil { + return IfAddrs{}, err + } else if len(privateIfs) == 0 { + return IfAddrs{}, nil + } + + return privateIfs, nil +} + +// GetPublicInterfaces returns an IfAddrs that are NOT part of RFC 6890 and has a +// default route. If the system can't determine its IP address or find a non +// RFC 6890 IP address, an empty IfAddrs will be returned instead. This +// function is the `eval` equivalent of: +// +// ``` +// $ sockaddr eval -r '{{GetAllInterfaces | include "type" "ip" | include "flags" "forwardable" | include "flags" "up" | sort "default,type,size" | exclude "RFC" "6890" }}' +/// ``` +func GetPublicInterfaces() (IfAddrs, error) { + publicIfs, err := GetAllInterfaces() + if err != nil { + return IfAddrs{}, err + } + if len(publicIfs) == 0 { + return IfAddrs{}, nil + } + + publicIfs, _ = FilterIfByType(publicIfs, TypeIP) + if len(publicIfs) == 0 { + return IfAddrs{}, nil + } + + publicIfs, _, err = IfByFlag("forwardable", publicIfs) + if err != nil { + return IfAddrs{}, err + } + + publicIfs, _, err = IfByFlag("up", publicIfs) + if err != nil { + return IfAddrs{}, err + } + + if len(publicIfs) == 0 { + return IfAddrs{}, nil + } + + OrderedIfAddrBy(AscIfDefault, AscIfType, AscIfNetworkSize).Sort(publicIfs) + + _, publicIfs, err = IfByRFC("6890", publicIfs) + if err != nil { + return IfAddrs{}, err + } else if len(publicIfs) == 0 { + return IfAddrs{}, nil + } + + return publicIfs, nil +} + +// IfByAddress returns a list of matched and non-matched IfAddrs, or an error if +// the regexp fails to compile. +func IfByAddress(inputRe string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) { + re, err := regexp.Compile(inputRe) + if err != nil { + return nil, nil, fmt.Errorf("Unable to compile address regexp %+q: %v", inputRe, err) + } + + matchedAddrs := make(IfAddrs, 0, len(ifAddrs)) + excludedAddrs := make(IfAddrs, 0, len(ifAddrs)) + for _, addr := range ifAddrs { + if re.MatchString(addr.SockAddr.String()) { + matchedAddrs = append(matchedAddrs, addr) + } else { + excludedAddrs = append(excludedAddrs, addr) + } + } + + return matchedAddrs, excludedAddrs, nil +} + +// IfByName returns a list of matched and non-matched IfAddrs, or an error if +// the regexp fails to compile. +func IfByName(inputRe string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) { + re, err := regexp.Compile(inputRe) + if err != nil { + return nil, nil, fmt.Errorf("Unable to compile name regexp %+q: %v", inputRe, err) + } + + matchedAddrs := make(IfAddrs, 0, len(ifAddrs)) + excludedAddrs := make(IfAddrs, 0, len(ifAddrs)) + for _, addr := range ifAddrs { + if re.MatchString(addr.Name) { + matchedAddrs = append(matchedAddrs, addr) + } else { + excludedAddrs = append(excludedAddrs, addr) + } + } + + return matchedAddrs, excludedAddrs, nil +} + +// IfByPort returns a list of matched and non-matched IfAddrs, or an error if +// the regexp fails to compile. +func IfByPort(inputRe string, ifAddrs IfAddrs) (matchedIfs, excludedIfs IfAddrs, err error) { + re, err := regexp.Compile(inputRe) + if err != nil { + return nil, nil, fmt.Errorf("Unable to compile port regexp %+q: %v", inputRe, err) + } + + ipIfs, nonIfs := FilterIfByType(ifAddrs, TypeIP) + matchedIfs = make(IfAddrs, 0, len(ipIfs)) + excludedIfs = append(IfAddrs(nil), nonIfs...) + for _, addr := range ipIfs { + ipAddr := ToIPAddr(addr.SockAddr) + if ipAddr == nil { + continue + } + + port := strconv.FormatInt(int64((*ipAddr).IPPort()), 10) + if re.MatchString(port) { + matchedIfs = append(matchedIfs, addr) + } else { + excludedIfs = append(excludedIfs, addr) + } + } + + return matchedIfs, excludedIfs, nil +} + +// IfByRFC returns a list of matched and non-matched IfAddrs that contain the +// relevant RFC-specified traits. +func IfByRFC(selectorParam string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) { + inputRFC, err := strconv.ParseUint(selectorParam, 10, 64) + if err != nil { + return IfAddrs{}, IfAddrs{}, fmt.Errorf("unable to parse RFC number %q: %v", selectorParam, err) + } + + matchedIfAddrs := make(IfAddrs, 0, len(ifAddrs)) + remainingIfAddrs := make(IfAddrs, 0, len(ifAddrs)) + + rfcNetMap := KnownRFCs() + rfcNets, ok := rfcNetMap[uint(inputRFC)] + if !ok { + return nil, nil, fmt.Errorf("unsupported RFC %d", inputRFC) + } + + for _, ifAddr := range ifAddrs { + var contained bool + for _, rfcNet := range rfcNets { + if rfcNet.Contains(ifAddr.SockAddr) { + matchedIfAddrs = append(matchedIfAddrs, ifAddr) + contained = true + break + } + } + if !contained { + remainingIfAddrs = append(remainingIfAddrs, ifAddr) + } + } + + return matchedIfAddrs, remainingIfAddrs, nil +} + +// IfByRFCs returns a list of matched and non-matched IfAddrs that contain the +// relevant RFC-specified traits. Multiple RFCs can be specified and separated +// by the `|` symbol. No protection is taken to ensure an IfAddr does not end +// up in both the included and excluded list. +func IfByRFCs(selectorParam string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) { + var includedIfs, excludedIfs IfAddrs + for _, rfcStr := range strings.Split(selectorParam, "|") { + includedRFCIfs, excludedRFCIfs, err := IfByRFC(rfcStr, ifAddrs) + if err != nil { + return IfAddrs{}, IfAddrs{}, fmt.Errorf("unable to lookup RFC number %q: %v", rfcStr, err) + } + includedIfs = append(includedIfs, includedRFCIfs...) + excludedIfs = append(excludedIfs, excludedRFCIfs...) + } + + return includedIfs, excludedIfs, nil +} + +// IfByMaskSize returns a list of matched and non-matched IfAddrs that have the +// matching mask size. +func IfByMaskSize(selectorParam string, ifAddrs IfAddrs) (matchedIfs, excludedIfs IfAddrs, err error) { + maskSize, err := strconv.ParseUint(selectorParam, 10, 64) + if err != nil { + return IfAddrs{}, IfAddrs{}, fmt.Errorf("invalid exclude size argument (%q): %v", selectorParam, err) + } + + ipIfs, nonIfs := FilterIfByType(ifAddrs, TypeIP) + matchedIfs = make(IfAddrs, 0, len(ipIfs)) + excludedIfs = append(IfAddrs(nil), nonIfs...) + for _, addr := range ipIfs { + ipAddr := ToIPAddr(addr.SockAddr) + if ipAddr == nil { + return IfAddrs{}, IfAddrs{}, fmt.Errorf("unable to filter mask sizes on non-IP type %s: %v", addr.SockAddr.Type().String(), addr.SockAddr.String()) + } + + switch { + case (*ipAddr).Type()&TypeIPv4 != 0 && maskSize > 32: + return IfAddrs{}, IfAddrs{}, fmt.Errorf("mask size out of bounds for IPv4 address: %d", maskSize) + case (*ipAddr).Type()&TypeIPv6 != 0 && maskSize > 128: + return IfAddrs{}, IfAddrs{}, fmt.Errorf("mask size out of bounds for IPv6 address: %d", maskSize) + } + + if (*ipAddr).Maskbits() == int(maskSize) { + matchedIfs = append(matchedIfs, addr) + } else { + excludedIfs = append(excludedIfs, addr) + } + } + + return matchedIfs, excludedIfs, nil +} + +// IfByType returns a list of matching and non-matching IfAddr that match the +// specified type. For instance: +// +// include "type" "IPv4,IPv6" +// +// will include any IfAddrs that is either an IPv4 or IPv6 address. Any +// addresses on those interfaces that don't match will be included in the +// remainder results. +func IfByType(inputTypes string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) { + matchingIfAddrs := make(IfAddrs, 0, len(ifAddrs)) + remainingIfAddrs := make(IfAddrs, 0, len(ifAddrs)) + + ifTypes := strings.Split(strings.ToLower(inputTypes), "|") + for _, ifType := range ifTypes { + switch ifType { + case "ip", "ipv4", "ipv6", "unix": + // Valid types + default: + return nil, nil, fmt.Errorf("unsupported type %q %q", ifType, inputTypes) + } + } + + for _, ifAddr := range ifAddrs { + for _, ifType := range ifTypes { + var matched bool + switch { + case ifType == "ip" && ifAddr.SockAddr.Type()&TypeIP != 0: + matched = true + case ifType == "ipv4" && ifAddr.SockAddr.Type()&TypeIPv4 != 0: + matched = true + case ifType == "ipv6" && ifAddr.SockAddr.Type()&TypeIPv6 != 0: + matched = true + case ifType == "unix" && ifAddr.SockAddr.Type()&TypeUnix != 0: + matched = true + } + + if matched { + matchingIfAddrs = append(matchingIfAddrs, ifAddr) + } else { + remainingIfAddrs = append(remainingIfAddrs, ifAddr) + } + } + } + + return matchingIfAddrs, remainingIfAddrs, nil +} + +// IfByFlag returns a list of matching and non-matching IfAddrs that match the +// specified type. For instance: +// +// include "flag" "up,broadcast" +// +// will include any IfAddrs that have both the "up" and "broadcast" flags set. +// Any addresses on those interfaces that don't match will be omitted from the +// results. +func IfByFlag(inputFlags string, ifAddrs IfAddrs) (matched, remainder IfAddrs, err error) { + matchedAddrs := make(IfAddrs, 0, len(ifAddrs)) + excludedAddrs := make(IfAddrs, 0, len(ifAddrs)) + + var wantForwardable, + wantGlobalUnicast, + wantInterfaceLocalMulticast, + wantLinkLocalMulticast, + wantLinkLocalUnicast, + wantLoopback, + wantMulticast, + wantUnspecified bool + var ifFlags net.Flags + var checkFlags, checkAttrs bool + for _, flagName := range strings.Split(strings.ToLower(inputFlags), "|") { + switch flagName { + case "broadcast": + checkFlags = true + ifFlags = ifFlags | net.FlagBroadcast + case "down": + checkFlags = true + ifFlags = (ifFlags &^ net.FlagUp) + case "forwardable": + checkAttrs = true + wantForwardable = true + case "global unicast": + checkAttrs = true + wantGlobalUnicast = true + case "interface-local multicast": + checkAttrs = true + wantInterfaceLocalMulticast = true + case "link-local multicast": + checkAttrs = true + wantLinkLocalMulticast = true + case "link-local unicast": + checkAttrs = true + wantLinkLocalUnicast = true + case "loopback": + checkAttrs = true + checkFlags = true + ifFlags = ifFlags | net.FlagLoopback + wantLoopback = true + case "multicast": + checkAttrs = true + checkFlags = true + ifFlags = ifFlags | net.FlagMulticast + wantMulticast = true + case "point-to-point": + checkFlags = true + ifFlags = ifFlags | net.FlagPointToPoint + case "unspecified": + checkAttrs = true + wantUnspecified = true + case "up": + checkFlags = true + ifFlags = ifFlags | net.FlagUp + default: + return nil, nil, fmt.Errorf("Unknown interface flag: %+q", flagName) + } + } + + for _, ifAddr := range ifAddrs { + var matched bool + if checkFlags && ifAddr.Interface.Flags&ifFlags == ifFlags { + matched = true + } + if checkAttrs { + if ip := ToIPAddr(ifAddr.SockAddr); ip != nil { + netIP := (*ip).NetIP() + switch { + case wantGlobalUnicast && netIP.IsGlobalUnicast(): + matched = true + case wantInterfaceLocalMulticast && netIP.IsInterfaceLocalMulticast(): + matched = true + case wantLinkLocalMulticast && netIP.IsLinkLocalMulticast(): + matched = true + case wantLinkLocalUnicast && netIP.IsLinkLocalUnicast(): + matched = true + case wantLoopback && netIP.IsLoopback(): + matched = true + case wantMulticast && netIP.IsMulticast(): + matched = true + case wantUnspecified && netIP.IsUnspecified(): + matched = true + case wantForwardable && !IsRFC(ForwardingBlacklist, ifAddr.SockAddr): + matched = true + } + } + } + if matched { + matchedAddrs = append(matchedAddrs, ifAddr) + } else { + excludedAddrs = append(excludedAddrs, ifAddr) + } + } + return matchedAddrs, excludedAddrs, nil +} + +// IfByNetwork returns an IfAddrs that are equal to or included within the +// network passed in by selector. +func IfByNetwork(selectorParam string, inputIfAddrs IfAddrs) (IfAddrs, IfAddrs, error) { + var includedIfs, excludedIfs IfAddrs + for _, netStr := range strings.Split(selectorParam, "|") { + netAddr, err := NewIPAddr(netStr) + if err != nil { + return nil, nil, fmt.Errorf("unable to create an IP address from %+q: %v", netStr, err) + } + + for _, ifAddr := range inputIfAddrs { + if netAddr.Contains(ifAddr.SockAddr) { + includedIfs = append(includedIfs, ifAddr) + } else { + excludedIfs = append(excludedIfs, ifAddr) + } + } + } + + return includedIfs, excludedIfs, nil +} + +// IfAddrMath will return a new IfAddr struct with a mutated value. +func IfAddrMath(operation, value string, inputIfAddr IfAddr) (IfAddr, error) { + // Regexp used to enforce the sign being a required part of the grammar for + // some values. + signRe := signRE.Copy() + + switch strings.ToLower(operation) { + case "address": + // "address" operates on the IP address and is allowed to overflow or + // underflow networks, however it will wrap along the underlying address's + // underlying type. + + if !signRe.MatchString(value) { + return IfAddr{}, fmt.Errorf("sign (+/-) is required for operation %q", operation) + } + + switch sockType := inputIfAddr.SockAddr.Type(); sockType { + case TypeIPv4: + // 33 == Accept any uint32 value + // TODO(seanc@): Add the ability to parse hex + i, err := strconv.ParseInt(value, 10, 33) + if err != nil { + return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err) + } + + ipv4 := *ToIPv4Addr(inputIfAddr.SockAddr) + ipv4Uint32 := uint32(ipv4.Address) + ipv4Uint32 += uint32(i) + return IfAddr{ + SockAddr: IPv4Addr{ + Address: IPv4Address(ipv4Uint32), + Mask: ipv4.Mask, + }, + Interface: inputIfAddr.Interface, + }, nil + case TypeIPv6: + // 64 == Accept any int32 value + // TODO(seanc@): Add the ability to parse hex. Also parse a bignum int. + i, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err) + } + + ipv6 := *ToIPv6Addr(inputIfAddr.SockAddr) + ipv6BigIntA := new(big.Int) + ipv6BigIntA.Set(ipv6.Address) + ipv6BigIntB := big.NewInt(i) + + ipv6Addr := ipv6BigIntA.Add(ipv6BigIntA, ipv6BigIntB) + ipv6Addr.And(ipv6Addr, ipv6HostMask) + + return IfAddr{ + SockAddr: IPv6Addr{ + Address: IPv6Address(ipv6Addr), + Mask: ipv6.Mask, + }, + Interface: inputIfAddr.Interface, + }, nil + default: + return IfAddr{}, fmt.Errorf("unsupported type for operation %q: %T", operation, sockType) + } + case "network": + // "network" operates on the network address. Positive values start at the + // network address and negative values wrap at the network address, which + // means a "-1" value on a network will be the broadcast address after + // wrapping is applied. + + if !signRe.MatchString(value) { + return IfAddr{}, fmt.Errorf("sign (+/-) is required for operation %q", operation) + } + + switch sockType := inputIfAddr.SockAddr.Type(); sockType { + case TypeIPv4: + // 33 == Accept any uint32 value + // TODO(seanc@): Add the ability to parse hex + i, err := strconv.ParseInt(value, 10, 33) + if err != nil { + return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err) + } + + ipv4 := *ToIPv4Addr(inputIfAddr.SockAddr) + ipv4Uint32 := uint32(ipv4.NetworkAddress()) + + // Wrap along network mask boundaries. EZ-mode wrapping made possible by + // use of int64 vs a uint. + var wrappedMask int64 + if i >= 0 { + wrappedMask = i + } else { + wrappedMask = 1 + i + int64(^uint32(ipv4.Mask)) + } + + ipv4Uint32 = ipv4Uint32 + (uint32(wrappedMask) &^ uint32(ipv4.Mask)) + + return IfAddr{ + SockAddr: IPv4Addr{ + Address: IPv4Address(ipv4Uint32), + Mask: ipv4.Mask, + }, + Interface: inputIfAddr.Interface, + }, nil + case TypeIPv6: + // 64 == Accept any int32 value + // TODO(seanc@): Add the ability to parse hex. Also parse a bignum int. + i, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err) + } + + ipv6 := *ToIPv6Addr(inputIfAddr.SockAddr) + ipv6BigInt := new(big.Int) + ipv6BigInt.Set(ipv6.NetworkAddress()) + + mask := new(big.Int) + mask.Set(ipv6.Mask) + if i > 0 { + wrappedMask := new(big.Int) + wrappedMask.SetInt64(i) + + wrappedMask.AndNot(wrappedMask, mask) + ipv6BigInt.Add(ipv6BigInt, wrappedMask) + } else { + // Mask off any bits that exceed the network size. Subtract the + // wrappedMask from the last usable - 1 + wrappedMask := new(big.Int) + wrappedMask.SetInt64(-1 * i) + wrappedMask.Sub(wrappedMask, big.NewInt(1)) + + wrappedMask.AndNot(wrappedMask, mask) + + lastUsable := new(big.Int) + lastUsable.Set(ipv6.LastUsable().(IPv6Addr).Address) + + ipv6BigInt = lastUsable.Sub(lastUsable, wrappedMask) + } + + return IfAddr{ + SockAddr: IPv6Addr{ + Address: IPv6Address(ipv6BigInt), + Mask: ipv6.Mask, + }, + Interface: inputIfAddr.Interface, + }, nil + default: + return IfAddr{}, fmt.Errorf("unsupported type for operation %q: %T", operation, sockType) + } + case "mask": + // "mask" operates on the IP address and returns the IP address on + // which the given integer mask has been applied. If the applied mask + // corresponds to a larger network than the mask of the IP address, + // the latter will be replaced by the former. + switch sockType := inputIfAddr.SockAddr.Type(); sockType { + case TypeIPv4: + i, err := strconv.ParseUint(value, 10, 32) + if err != nil { + return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err) + } + + if i > 32 { + return IfAddr{}, fmt.Errorf("parameter for operation %q on ipv4 addresses must be between 0 and 32", operation) + } + + ipv4 := *ToIPv4Addr(inputIfAddr.SockAddr) + + ipv4Mask := net.CIDRMask(int(i), 32) + ipv4MaskUint32 := binary.BigEndian.Uint32(ipv4Mask) + + maskedIpv4 := ipv4.NetIP().Mask(ipv4Mask) + maskedIpv4Uint32 := binary.BigEndian.Uint32(maskedIpv4) + + maskedIpv4MaskUint32 := uint32(ipv4.Mask) + + if ipv4MaskUint32 < maskedIpv4MaskUint32 { + maskedIpv4MaskUint32 = ipv4MaskUint32 + } + + return IfAddr{ + SockAddr: IPv4Addr{ + Address: IPv4Address(maskedIpv4Uint32), + Mask: IPv4Mask(maskedIpv4MaskUint32), + }, + Interface: inputIfAddr.Interface, + }, nil + case TypeIPv6: + i, err := strconv.ParseUint(value, 10, 32) + if err != nil { + return IfAddr{}, fmt.Errorf("unable to convert %q to int for operation %q: %v", value, operation, err) + } + + if i > 128 { + return IfAddr{}, fmt.Errorf("parameter for operation %q on ipv6 addresses must be between 0 and 64", operation) + } + + ipv6 := *ToIPv6Addr(inputIfAddr.SockAddr) + + ipv6Mask := net.CIDRMask(int(i), 128) + ipv6MaskBigInt := new(big.Int) + ipv6MaskBigInt.SetBytes(ipv6Mask) + + maskedIpv6 := ipv6.NetIP().Mask(ipv6Mask) + maskedIpv6BigInt := new(big.Int) + maskedIpv6BigInt.SetBytes(maskedIpv6) + + maskedIpv6MaskBigInt := new(big.Int) + maskedIpv6MaskBigInt.Set(ipv6.Mask) + + if ipv6MaskBigInt.Cmp(maskedIpv6MaskBigInt) == -1 { + maskedIpv6MaskBigInt = ipv6MaskBigInt + } + + return IfAddr{ + SockAddr: IPv6Addr{ + Address: IPv6Address(maskedIpv6BigInt), + Mask: IPv6Mask(maskedIpv6MaskBigInt), + }, + Interface: inputIfAddr.Interface, + }, nil + default: + return IfAddr{}, fmt.Errorf("unsupported type for operation %q: %T", operation, sockType) + } + default: + return IfAddr{}, fmt.Errorf("unsupported math operation: %q", operation) + } +} + +// IfAddrsMath will apply an IfAddrMath operation each IfAddr struct. Any +// failure will result in zero results. +func IfAddrsMath(operation, value string, inputIfAddrs IfAddrs) (IfAddrs, error) { + outputAddrs := make(IfAddrs, 0, len(inputIfAddrs)) + for _, ifAddr := range inputIfAddrs { + result, err := IfAddrMath(operation, value, ifAddr) + if err != nil { + return IfAddrs{}, fmt.Errorf("unable to perform an IPMath operation on %s: %v", ifAddr, err) + } + outputAddrs = append(outputAddrs, result) + } + return outputAddrs, nil +} + +// IncludeIfs returns an IfAddrs based on the passed in selector. +func IncludeIfs(selectorName, selectorParam string, inputIfAddrs IfAddrs) (IfAddrs, error) { + var includedIfs IfAddrs + var err error + + switch strings.ToLower(selectorName) { + case "address": + includedIfs, _, err = IfByAddress(selectorParam, inputIfAddrs) + case "flag", "flags": + includedIfs, _, err = IfByFlag(selectorParam, inputIfAddrs) + case "name": + includedIfs, _, err = IfByName(selectorParam, inputIfAddrs) + case "network": + includedIfs, _, err = IfByNetwork(selectorParam, inputIfAddrs) + case "port": + includedIfs, _, err = IfByPort(selectorParam, inputIfAddrs) + case "rfc", "rfcs": + includedIfs, _, err = IfByRFCs(selectorParam, inputIfAddrs) + case "size": + includedIfs, _, err = IfByMaskSize(selectorParam, inputIfAddrs) + case "type": + includedIfs, _, err = IfByType(selectorParam, inputIfAddrs) + default: + return IfAddrs{}, fmt.Errorf("invalid include selector %q", selectorName) + } + + if err != nil { + return IfAddrs{}, err + } + + return includedIfs, nil +} + +// ExcludeIfs returns an IfAddrs based on the passed in selector. +func ExcludeIfs(selectorName, selectorParam string, inputIfAddrs IfAddrs) (IfAddrs, error) { + var excludedIfs IfAddrs + var err error + + switch strings.ToLower(selectorName) { + case "address": + _, excludedIfs, err = IfByAddress(selectorParam, inputIfAddrs) + case "flag", "flags": + _, excludedIfs, err = IfByFlag(selectorParam, inputIfAddrs) + case "name": + _, excludedIfs, err = IfByName(selectorParam, inputIfAddrs) + case "network": + _, excludedIfs, err = IfByNetwork(selectorParam, inputIfAddrs) + case "port": + _, excludedIfs, err = IfByPort(selectorParam, inputIfAddrs) + case "rfc", "rfcs": + _, excludedIfs, err = IfByRFCs(selectorParam, inputIfAddrs) + case "size": + _, excludedIfs, err = IfByMaskSize(selectorParam, inputIfAddrs) + case "type": + _, excludedIfs, err = IfByType(selectorParam, inputIfAddrs) + default: + return IfAddrs{}, fmt.Errorf("invalid exclude selector %q", selectorName) + } + + if err != nil { + return IfAddrs{}, err + } + + return excludedIfs, nil +} + +// SortIfBy returns an IfAddrs sorted based on the passed in selector. Multiple +// sort clauses can be passed in as a comma delimited list without whitespace. +func SortIfBy(selectorParam string, inputIfAddrs IfAddrs) (IfAddrs, error) { + sortedIfs := append(IfAddrs(nil), inputIfAddrs...) + + clauses := strings.Split(selectorParam, ",") + sortFuncs := make([]CmpIfAddrFunc, len(clauses)) + + for i, clause := range clauses { + switch strings.TrimSpace(strings.ToLower(clause)) { + case "+address", "address": + // The "address" selector returns an array of IfAddrs + // ordered by the network address. IfAddrs that are not + // comparable will be at the end of the list and in a + // non-deterministic order. + sortFuncs[i] = AscIfAddress + case "-address": + sortFuncs[i] = DescIfAddress + case "+default", "default": + sortFuncs[i] = AscIfDefault + case "-default": + sortFuncs[i] = DescIfDefault + case "+name", "name": + // The "name" selector returns an array of IfAddrs + // ordered by the interface name. + sortFuncs[i] = AscIfName + case "-name": + sortFuncs[i] = DescIfName + case "+port", "port": + // The "port" selector returns an array of IfAddrs + // ordered by the port, if included in the IfAddr. + // IfAddrs that are not comparable will be at the end of + // the list and in a non-deterministic order. + sortFuncs[i] = AscIfPort + case "-port": + sortFuncs[i] = DescIfPort + case "+private", "private": + // The "private" selector returns an array of IfAddrs + // ordered by private addresses first. IfAddrs that are + // not comparable will be at the end of the list and in + // a non-deterministic order. + sortFuncs[i] = AscIfPrivate + case "-private": + sortFuncs[i] = DescIfPrivate + case "+size", "size": + // The "size" selector returns an array of IfAddrs + // ordered by the size of the network mask, smaller mask + // (larger number of hosts per network) to largest + // (e.g. a /24 sorts before a /32). + sortFuncs[i] = AscIfNetworkSize + case "-size": + sortFuncs[i] = DescIfNetworkSize + case "+type", "type": + // The "type" selector returns an array of IfAddrs + // ordered by the type of the IfAddr. The sort order is + // Unix, IPv4, then IPv6. + sortFuncs[i] = AscIfType + case "-type": + sortFuncs[i] = DescIfType + default: + // Return an empty list for invalid sort types. + return IfAddrs{}, fmt.Errorf("unknown sort type: %q", clause) + } + } + + OrderedIfAddrBy(sortFuncs...).Sort(sortedIfs) + + return sortedIfs, nil +} + +// UniqueIfAddrsBy creates a unique set of IfAddrs based on the matching +// selector. UniqueIfAddrsBy assumes the input has already been sorted. +func UniqueIfAddrsBy(selectorName string, inputIfAddrs IfAddrs) (IfAddrs, error) { + attrName := strings.ToLower(selectorName) + + ifs := make(IfAddrs, 0, len(inputIfAddrs)) + var lastMatch string + for _, ifAddr := range inputIfAddrs { + var out string + switch attrName { + case "address": + out = ifAddr.SockAddr.String() + case "name": + out = ifAddr.Name + default: + return nil, fmt.Errorf("unsupported unique constraint %+q", selectorName) + } + + switch { + case lastMatch == "", lastMatch != out: + lastMatch = out + ifs = append(ifs, ifAddr) + case lastMatch == out: + continue + } + } + + return ifs, nil +} + +// JoinIfAddrs joins an IfAddrs and returns a string +func JoinIfAddrs(selectorName string, joinStr string, inputIfAddrs IfAddrs) (string, error) { + outputs := make([]string, 0, len(inputIfAddrs)) + attrName := AttrName(strings.ToLower(selectorName)) + + for _, ifAddr := range inputIfAddrs { + var attrVal string + var err error + attrVal, err = ifAddr.Attr(attrName) + if err != nil { + return "", err + } + outputs = append(outputs, attrVal) + } + return strings.Join(outputs, joinStr), nil +} + +// LimitIfAddrs returns a slice of IfAddrs based on the specified limit. +func LimitIfAddrs(lim uint, in IfAddrs) (IfAddrs, error) { + // Clamp the limit to the length of the array + if int(lim) > len(in) { + lim = uint(len(in)) + } + + return in[0:lim], nil +} + +// OffsetIfAddrs returns a slice of IfAddrs based on the specified offset. +func OffsetIfAddrs(off int, in IfAddrs) (IfAddrs, error) { + var end bool + if off < 0 { + end = true + off = off * -1 + } + + if off > len(in) { + return IfAddrs{}, fmt.Errorf("unable to seek past the end of the interface array: offset (%d) exceeds the number of interfaces (%d)", off, len(in)) + } + + if end { + return in[len(in)-off:], nil + } + return in[off:], nil +} + +func (ifAddr IfAddr) String() string { + return fmt.Sprintf("%s %v", ifAddr.SockAddr, ifAddr.Interface) +} + +// parseDefaultIfNameFromRoute parses standard route(8)'s output for the *BSDs +// and Solaris. +func parseDefaultIfNameFromRoute(routeOut string) (string, error) { + lines := strings.Split(routeOut, "\n") + for _, line := range lines { + kvs := strings.SplitN(line, ":", 2) + if len(kvs) != 2 { + continue + } + + if strings.TrimSpace(kvs[0]) == "interface" { + ifName := strings.TrimSpace(kvs[1]) + return ifName, nil + } + } + + return "", errors.New("No default interface found") +} + +// parseDefaultIfNameFromIPCmd parses the default interface from ip(8) for +// Linux. +func parseDefaultIfNameFromIPCmd(routeOut string) (string, error) { + lines := strings.Split(routeOut, "\n") + re := whitespaceRE.Copy() + for _, line := range lines { + kvs := re.Split(line, -1) + if len(kvs) < 5 { + continue + } + + if kvs[0] == "default" && + kvs[1] == "via" && + kvs[3] == "dev" { + ifName := strings.TrimSpace(kvs[4]) + return ifName, nil + } + } + + return "", errors.New("No default interface found") +} + +// parseDefaultIfNameWindows parses the default interface from `netstat -rn` and +// `ipconfig` on Windows. +func parseDefaultIfNameWindows(routeOut, ipconfigOut string) (string, error) { + defaultIPAddr, err := parseDefaultIPAddrWindowsRoute(routeOut) + if err != nil { + return "", err + } + + ifName, err := parseDefaultIfNameWindowsIPConfig(defaultIPAddr, ipconfigOut) + if err != nil { + return "", err + } + + return ifName, nil +} + +// parseDefaultIPAddrWindowsRoute parses the IP address on the default interface +// `netstat -rn`. +// +// NOTES(sean): Only IPv4 addresses are parsed at this time. If you have an +// IPv6 connected host, submit an issue on github.com/hashicorp/go-sockaddr with +// the output from `netstat -rn`, `ipconfig`, and version of Windows to see IPv6 +// support added. +func parseDefaultIPAddrWindowsRoute(routeOut string) (string, error) { + lines := strings.Split(routeOut, "\n") + re := whitespaceRE.Copy() + for _, line := range lines { + kvs := re.Split(strings.TrimSpace(line), -1) + if len(kvs) < 3 { + continue + } + + if kvs[0] == "0.0.0.0" && kvs[1] == "0.0.0.0" { + defaultIPAddr := strings.TrimSpace(kvs[3]) + return defaultIPAddr, nil + } + } + + return "", errors.New("No IP on default interface found") +} + +// parseDefaultIfNameWindowsIPConfig parses the output of `ipconfig` to find the +// interface name forwarding traffic to the default gateway. +func parseDefaultIfNameWindowsIPConfig(defaultIPAddr, routeOut string) (string, error) { + lines := strings.Split(routeOut, "\n") + ifNameRe := ifNameRE.Copy() + ipAddrRe := ipAddrRE.Copy() + var ifName string + for _, line := range lines { + switch ifNameMatches := ifNameRe.FindStringSubmatch(line); { + case len(ifNameMatches) > 1: + ifName = ifNameMatches[1] + continue + } + + switch ipAddrMatches := ipAddrRe.FindStringSubmatch(line); { + case len(ipAddrMatches) > 1 && ipAddrMatches[1] == defaultIPAddr: + return ifName, nil + } + } + + return "", errors.New("No default interface found with matching IP") +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/ifattr.go b/vendor/github.com/hashicorp/go-sockaddr/ifattr.go new file mode 100644 index 0000000000..6984cb4a35 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/ifattr.go @@ -0,0 +1,65 @@ +package sockaddr + +import ( + "fmt" + "net" +) + +// IfAddr is a union of a SockAddr and a net.Interface. +type IfAddr struct { + SockAddr + net.Interface +} + +// Attr returns the named attribute as a string +func (ifAddr IfAddr) Attr(attrName AttrName) (string, error) { + val := IfAddrAttr(ifAddr, attrName) + if val != "" { + return val, nil + } + + return Attr(ifAddr.SockAddr, attrName) +} + +// Attr returns the named attribute as a string +func Attr(sa SockAddr, attrName AttrName) (string, error) { + switch sockType := sa.Type(); { + case sockType&TypeIP != 0: + ip := *ToIPAddr(sa) + attrVal := IPAddrAttr(ip, attrName) + if attrVal != "" { + return attrVal, nil + } + + if sockType == TypeIPv4 { + ipv4 := *ToIPv4Addr(sa) + attrVal := IPv4AddrAttr(ipv4, attrName) + if attrVal != "" { + return attrVal, nil + } + } else if sockType == TypeIPv6 { + ipv6 := *ToIPv6Addr(sa) + attrVal := IPv6AddrAttr(ipv6, attrName) + if attrVal != "" { + return attrVal, nil + } + } + + case sockType == TypeUnix: + us := *ToUnixSock(sa) + attrVal := UnixSockAttr(us, attrName) + if attrVal != "" { + return attrVal, nil + } + } + + // Non type-specific attributes + switch attrName { + case "string": + return sa.String(), nil + case "type": + return sa.Type().String(), nil + } + + return "", fmt.Errorf("unsupported attribute name %q", attrName) +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/ipaddr.go b/vendor/github.com/hashicorp/go-sockaddr/ipaddr.go new file mode 100644 index 0000000000..b47d15c201 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/ipaddr.go @@ -0,0 +1,169 @@ +package sockaddr + +import ( + "fmt" + "math/big" + "net" + "strings" +) + +// Constants for the sizes of IPv3, IPv4, and IPv6 address types. +const ( + IPv3len = 6 + IPv4len = 4 + IPv6len = 16 +) + +// IPAddr is a generic IP address interface for IPv4 and IPv6 addresses, +// networks, and socket endpoints. +type IPAddr interface { + SockAddr + AddressBinString() string + AddressHexString() string + Cmp(SockAddr) int + CmpAddress(SockAddr) int + CmpPort(SockAddr) int + FirstUsable() IPAddr + Host() IPAddr + IPPort() IPPort + LastUsable() IPAddr + Maskbits() int + NetIP() *net.IP + NetIPMask() *net.IPMask + NetIPNet() *net.IPNet + Network() IPAddr + Octets() []int +} + +// IPPort is the type for an IP port number for the TCP and UDP IP transports. +type IPPort uint16 + +// IPPrefixLen is a typed integer representing the prefix length for a given +// IPAddr. +type IPPrefixLen byte + +// ipAddrAttrMap is a map of the IPAddr type-specific attributes. +var ipAddrAttrMap map[AttrName]func(IPAddr) string +var ipAddrAttrs []AttrName + +func init() { + ipAddrInit() +} + +// NewIPAddr creates a new IPAddr from a string. Returns nil if the string is +// not an IPv4 or an IPv6 address. +func NewIPAddr(addr string) (IPAddr, error) { + ipv4Addr, err := NewIPv4Addr(addr) + if err == nil { + return ipv4Addr, nil + } + + ipv6Addr, err := NewIPv6Addr(addr) + if err == nil { + return ipv6Addr, nil + } + + return nil, fmt.Errorf("invalid IPAddr %v", addr) +} + +// IPAddrAttr returns a string representation of an attribute for the given +// IPAddr. +func IPAddrAttr(ip IPAddr, selector AttrName) string { + fn, found := ipAddrAttrMap[selector] + if !found { + return "" + } + + return fn(ip) +} + +// IPAttrs returns a list of attributes supported by the IPAddr type +func IPAttrs() []AttrName { + return ipAddrAttrs +} + +// MustIPAddr is a helper method that must return an IPAddr or panic on invalid +// input. +func MustIPAddr(addr string) IPAddr { + ip, err := NewIPAddr(addr) + if err != nil { + panic(fmt.Sprintf("Unable to create an IPAddr from %+q: %v", addr, err)) + } + return ip +} + +// ipAddrInit is called once at init() +func ipAddrInit() { + // Sorted for human readability + ipAddrAttrs = []AttrName{ + "host", + "address", + "port", + "netmask", + "network", + "mask_bits", + "binary", + "hex", + "first_usable", + "last_usable", + "octets", + } + + ipAddrAttrMap = map[AttrName]func(ip IPAddr) string{ + "address": func(ip IPAddr) string { + return ip.NetIP().String() + }, + "binary": func(ip IPAddr) string { + return ip.AddressBinString() + }, + "first_usable": func(ip IPAddr) string { + return ip.FirstUsable().String() + }, + "hex": func(ip IPAddr) string { + return ip.AddressHexString() + }, + "host": func(ip IPAddr) string { + return ip.Host().String() + }, + "last_usable": func(ip IPAddr) string { + return ip.LastUsable().String() + }, + "mask_bits": func(ip IPAddr) string { + return fmt.Sprintf("%d", ip.Maskbits()) + }, + "netmask": func(ip IPAddr) string { + switch v := ip.(type) { + case IPv4Addr: + ipv4Mask := IPv4Addr{ + Address: IPv4Address(v.Mask), + Mask: IPv4HostMask, + } + return ipv4Mask.String() + case IPv6Addr: + ipv6Mask := new(big.Int) + ipv6Mask.Set(v.Mask) + ipv6MaskAddr := IPv6Addr{ + Address: IPv6Address(ipv6Mask), + Mask: ipv6HostMask, + } + return ipv6MaskAddr.String() + default: + return fmt.Sprintf("", ip) + } + }, + "network": func(ip IPAddr) string { + return ip.Network().NetIP().String() + }, + "octets": func(ip IPAddr) string { + octets := ip.Octets() + octetStrs := make([]string, 0, len(octets)) + for _, octet := range octets { + octetStrs = append(octetStrs, fmt.Sprintf("%d", octet)) + } + return strings.Join(octetStrs, " ") + }, + "port": func(ip IPAddr) string { + return fmt.Sprintf("%d", ip.IPPort()) + }, + } +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/ipaddrs.go b/vendor/github.com/hashicorp/go-sockaddr/ipaddrs.go new file mode 100644 index 0000000000..6eeb7ddd2f --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/ipaddrs.go @@ -0,0 +1,98 @@ +package sockaddr + +import "bytes" + +type IPAddrs []IPAddr + +func (s IPAddrs) Len() int { return len(s) } +func (s IPAddrs) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// // SortIPAddrsByCmp is a type that satisfies sort.Interface and can be used +// // by the routines in this package. The SortIPAddrsByCmp type is used to +// // sort IPAddrs by Cmp() +// type SortIPAddrsByCmp struct{ IPAddrs } + +// // Less reports whether the element with index i should sort before the +// // element with index j. +// func (s SortIPAddrsByCmp) Less(i, j int) bool { +// // Sort by Type, then address, then port number. +// return Less(s.IPAddrs[i], s.IPAddrs[j]) +// } + +// SortIPAddrsBySpecificMaskLen is a type that satisfies sort.Interface and +// can be used by the routines in this package. The +// SortIPAddrsBySpecificMaskLen type is used to sort IPAddrs by smallest +// network (most specific to largest network). +type SortIPAddrsByNetworkSize struct{ IPAddrs } + +// Less reports whether the element with index i should sort before the +// element with index j. +func (s SortIPAddrsByNetworkSize) Less(i, j int) bool { + // Sort masks with a larger binary value (i.e. fewer hosts per network + // prefix) after masks with a smaller value (larger number of hosts per + // prefix). + switch bytes.Compare([]byte(*s.IPAddrs[i].NetIPMask()), []byte(*s.IPAddrs[j].NetIPMask())) { + case 0: + // Fall through to the second test if the net.IPMasks are the + // same. + break + case 1: + return true + case -1: + return false + default: + panic("bad, m'kay?") + } + + // Sort IPs based on the length (i.e. prefer IPv4 over IPv6). + iLen := len(*s.IPAddrs[i].NetIP()) + jLen := len(*s.IPAddrs[j].NetIP()) + if iLen != jLen { + return iLen > jLen + } + + // Sort IPs based on their network address from lowest to highest. + switch bytes.Compare(s.IPAddrs[i].NetIPNet().IP, s.IPAddrs[j].NetIPNet().IP) { + case 0: + break + case 1: + return false + case -1: + return true + default: + panic("lol wut?") + } + + // If a host does not have a port set, it always sorts after hosts + // that have a port (e.g. a host with a /32 and port number is more + // specific and should sort first over a host with a /32 but no port + // set). + if s.IPAddrs[i].IPPort() == 0 || s.IPAddrs[j].IPPort() == 0 { + return false + } + return s.IPAddrs[i].IPPort() < s.IPAddrs[j].IPPort() +} + +// SortIPAddrsBySpecificMaskLen is a type that satisfies sort.Interface and +// can be used by the routines in this package. The +// SortIPAddrsBySpecificMaskLen type is used to sort IPAddrs by smallest +// network (most specific to largest network). +type SortIPAddrsBySpecificMaskLen struct{ IPAddrs } + +// Less reports whether the element with index i should sort before the +// element with index j. +func (s SortIPAddrsBySpecificMaskLen) Less(i, j int) bool { + return s.IPAddrs[i].Maskbits() > s.IPAddrs[j].Maskbits() +} + +// SortIPAddrsByBroadMaskLen is a type that satisfies sort.Interface and can +// be used by the routines in this package. The SortIPAddrsByBroadMaskLen +// type is used to sort IPAddrs by largest network (i.e. largest subnets +// first). +type SortIPAddrsByBroadMaskLen struct{ IPAddrs } + +// Less reports whether the element with index i should sort before the +// element with index j. +func (s SortIPAddrsByBroadMaskLen) Less(i, j int) bool { + return s.IPAddrs[i].Maskbits() < s.IPAddrs[j].Maskbits() +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/ipv4addr.go b/vendor/github.com/hashicorp/go-sockaddr/ipv4addr.go new file mode 100644 index 0000000000..4d395dc954 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/ipv4addr.go @@ -0,0 +1,516 @@ +package sockaddr + +import ( + "encoding/binary" + "fmt" + "net" + "regexp" + "strconv" + "strings" +) + +type ( + // IPv4Address is a named type representing an IPv4 address. + IPv4Address uint32 + + // IPv4Network is a named type representing an IPv4 network. + IPv4Network uint32 + + // IPv4Mask is a named type representing an IPv4 network mask. + IPv4Mask uint32 +) + +// IPv4HostMask is a constant represents a /32 IPv4 Address +// (i.e. 255.255.255.255). +const IPv4HostMask = IPv4Mask(0xffffffff) + +// ipv4AddrAttrMap is a map of the IPv4Addr type-specific attributes. +var ipv4AddrAttrMap map[AttrName]func(IPv4Addr) string +var ipv4AddrAttrs []AttrName +var trailingHexNetmaskRE *regexp.Regexp + +// IPv4Addr implements a convenience wrapper around the union of Go's +// built-in net.IP and net.IPNet types. In UNIX-speak, IPv4Addr implements +// `sockaddr` when the the address family is set to AF_INET +// (i.e. `sockaddr_in`). +type IPv4Addr struct { + IPAddr + Address IPv4Address + Mask IPv4Mask + Port IPPort +} + +func init() { + ipv4AddrInit() + trailingHexNetmaskRE = regexp.MustCompile(`/([0f]{8})$`) +} + +// NewIPv4Addr creates an IPv4Addr from a string. String can be in the form +// of either an IPv4:port (e.g. `1.2.3.4:80`, in which case the mask is +// assumed to be a `/32`), an IPv4 address (e.g. `1.2.3.4`, also with a `/32` +// mask), or an IPv4 CIDR (e.g. `1.2.3.4/24`, which has its IP port +// initialized to zero). ipv4Str can not be a hostname. +// +// NOTE: Many net.*() routines will initialize and return an IPv6 address. +// To create uint32 values from net.IP, always test to make sure the address +// returned can be converted to a 4 byte array using To4(). +func NewIPv4Addr(ipv4Str string) (IPv4Addr, error) { + // Strip off any bogus hex-encoded netmasks that will be mis-parsed by Go. In + // particular, clients with the Barracuda VPN client will see something like: + // `192.168.3.51/00ffffff` as their IP address. + trailingHexNetmaskRe := trailingHexNetmaskRE.Copy() + if match := trailingHexNetmaskRe.FindStringIndex(ipv4Str); match != nil { + ipv4Str = ipv4Str[:match[0]] + } + + // Parse as an IPv4 CIDR + ipAddr, network, err := net.ParseCIDR(ipv4Str) + if err == nil { + ipv4 := ipAddr.To4() + if ipv4 == nil { + return IPv4Addr{}, fmt.Errorf("Unable to convert %s to an IPv4 address", ipv4Str) + } + + // If we see an IPv6 netmask, convert it to an IPv4 mask. + netmaskSepPos := strings.LastIndexByte(ipv4Str, '/') + if netmaskSepPos != -1 && netmaskSepPos+1 < len(ipv4Str) { + netMask, err := strconv.ParseUint(ipv4Str[netmaskSepPos+1:], 10, 8) + if err != nil { + return IPv4Addr{}, fmt.Errorf("Unable to convert %s to an IPv4 address: unable to parse CIDR netmask: %v", ipv4Str, err) + } else if netMask > 128 { + return IPv4Addr{}, fmt.Errorf("Unable to convert %s to an IPv4 address: invalid CIDR netmask", ipv4Str) + } + + if netMask >= 96 { + // Convert the IPv6 netmask to an IPv4 netmask + network.Mask = net.CIDRMask(int(netMask-96), IPv4len*8) + } + } + ipv4Addr := IPv4Addr{ + Address: IPv4Address(binary.BigEndian.Uint32(ipv4)), + Mask: IPv4Mask(binary.BigEndian.Uint32(network.Mask)), + } + return ipv4Addr, nil + } + + // Attempt to parse ipv4Str as a /32 host with a port number. + tcpAddr, err := net.ResolveTCPAddr("tcp4", ipv4Str) + if err == nil { + ipv4 := tcpAddr.IP.To4() + if ipv4 == nil { + return IPv4Addr{}, fmt.Errorf("Unable to resolve %+q as an IPv4 address", ipv4Str) + } + + ipv4Uint32 := binary.BigEndian.Uint32(ipv4) + ipv4Addr := IPv4Addr{ + Address: IPv4Address(ipv4Uint32), + Mask: IPv4HostMask, + Port: IPPort(tcpAddr.Port), + } + + return ipv4Addr, nil + } + + // Parse as a naked IPv4 address + ip := net.ParseIP(ipv4Str) + if ip != nil { + ipv4 := ip.To4() + if ipv4 == nil { + return IPv4Addr{}, fmt.Errorf("Unable to string convert %+q to an IPv4 address", ipv4Str) + } + + ipv4Uint32 := binary.BigEndian.Uint32(ipv4) + ipv4Addr := IPv4Addr{ + Address: IPv4Address(ipv4Uint32), + Mask: IPv4HostMask, + } + return ipv4Addr, nil + } + + return IPv4Addr{}, fmt.Errorf("Unable to parse %+q to an IPv4 address: %v", ipv4Str, err) +} + +// AddressBinString returns a string with the IPv4Addr's Address represented +// as a sequence of '0' and '1' characters. This method is useful for +// debugging or by operators who want to inspect an address. +func (ipv4 IPv4Addr) AddressBinString() string { + return fmt.Sprintf("%032s", strconv.FormatUint(uint64(ipv4.Address), 2)) +} + +// AddressHexString returns a string with the IPv4Addr address represented as +// a sequence of hex characters. This method is useful for debugging or by +// operators who want to inspect an address. +func (ipv4 IPv4Addr) AddressHexString() string { + return fmt.Sprintf("%08s", strconv.FormatUint(uint64(ipv4.Address), 16)) +} + +// Broadcast is an IPv4Addr-only method that returns the broadcast address of +// the network. +// +// NOTE: IPv6 only supports multicast, so this method only exists for +// IPv4Addr. +func (ipv4 IPv4Addr) Broadcast() IPAddr { + // Nothing should listen on a broadcast address. + return IPv4Addr{ + Address: IPv4Address(ipv4.BroadcastAddress()), + Mask: IPv4HostMask, + } +} + +// BroadcastAddress returns a IPv4Network of the IPv4Addr's broadcast +// address. +func (ipv4 IPv4Addr) BroadcastAddress() IPv4Network { + return IPv4Network(uint32(ipv4.Address)&uint32(ipv4.Mask) | ^uint32(ipv4.Mask)) +} + +// CmpAddress follows the Cmp() standard protocol and returns: +// +// - -1 If the receiver should sort first because its address is lower than arg +// - 0 if the SockAddr arg is equal to the receiving IPv4Addr or the argument is +// of a different type. +// - 1 If the argument should sort first. +func (ipv4 IPv4Addr) CmpAddress(sa SockAddr) int { + ipv4b, ok := sa.(IPv4Addr) + if !ok { + return sortDeferDecision + } + + switch { + case ipv4.Address == ipv4b.Address: + return sortDeferDecision + case ipv4.Address < ipv4b.Address: + return sortReceiverBeforeArg + default: + return sortArgBeforeReceiver + } +} + +// CmpPort follows the Cmp() standard protocol and returns: +// +// - -1 If the receiver should sort first because its port is lower than arg +// - 0 if the SockAddr arg's port number is equal to the receiving IPv4Addr, +// regardless of type. +// - 1 If the argument should sort first. +func (ipv4 IPv4Addr) CmpPort(sa SockAddr) int { + var saPort IPPort + switch v := sa.(type) { + case IPv4Addr: + saPort = v.Port + case IPv6Addr: + saPort = v.Port + default: + return sortDeferDecision + } + + switch { + case ipv4.Port == saPort: + return sortDeferDecision + case ipv4.Port < saPort: + return sortReceiverBeforeArg + default: + return sortArgBeforeReceiver + } +} + +// CmpRFC follows the Cmp() standard protocol and returns: +// +// - -1 If the receiver should sort first because it belongs to the RFC and its +// arg does not +// - 0 if the receiver and arg both belong to the same RFC or neither do. +// - 1 If the arg belongs to the RFC but receiver does not. +func (ipv4 IPv4Addr) CmpRFC(rfcNum uint, sa SockAddr) int { + recvInRFC := IsRFC(rfcNum, ipv4) + ipv4b, ok := sa.(IPv4Addr) + if !ok { + // If the receiver is part of the desired RFC and the SockAddr + // argument is not, return -1 so that the receiver sorts before + // the non-IPv4 SockAddr. Conversely, if the receiver is not + // part of the RFC, punt on sorting and leave it for the next + // sorter. + if recvInRFC { + return sortReceiverBeforeArg + } else { + return sortDeferDecision + } + } + + argInRFC := IsRFC(rfcNum, ipv4b) + switch { + case (recvInRFC && argInRFC), (!recvInRFC && !argInRFC): + // If a and b both belong to the RFC, or neither belong to + // rfcNum, defer sorting to the next sorter. + return sortDeferDecision + case recvInRFC && !argInRFC: + return sortReceiverBeforeArg + default: + return sortArgBeforeReceiver + } +} + +// Contains returns true if the SockAddr is contained within the receiver. +func (ipv4 IPv4Addr) Contains(sa SockAddr) bool { + ipv4b, ok := sa.(IPv4Addr) + if !ok { + return false + } + + return ipv4.ContainsNetwork(ipv4b) +} + +// ContainsAddress returns true if the IPv4Address is contained within the +// receiver. +func (ipv4 IPv4Addr) ContainsAddress(x IPv4Address) bool { + return IPv4Address(ipv4.NetworkAddress()) <= x && + IPv4Address(ipv4.BroadcastAddress()) >= x +} + +// ContainsNetwork returns true if the network from IPv4Addr is contained +// within the receiver. +func (ipv4 IPv4Addr) ContainsNetwork(x IPv4Addr) bool { + return ipv4.NetworkAddress() <= x.NetworkAddress() && + ipv4.BroadcastAddress() >= x.BroadcastAddress() +} + +// DialPacketArgs returns the arguments required to be passed to +// net.DialUDP(). If the Mask of ipv4 is not a /32 or the Port is 0, +// DialPacketArgs() will fail. See Host() to create an IPv4Addr with its +// mask set to /32. +func (ipv4 IPv4Addr) DialPacketArgs() (network, dialArgs string) { + if ipv4.Mask != IPv4HostMask || ipv4.Port == 0 { + return "udp4", "" + } + return "udp4", fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port) +} + +// DialStreamArgs returns the arguments required to be passed to +// net.DialTCP(). If the Mask of ipv4 is not a /32 or the Port is 0, +// DialStreamArgs() will fail. See Host() to create an IPv4Addr with its +// mask set to /32. +func (ipv4 IPv4Addr) DialStreamArgs() (network, dialArgs string) { + if ipv4.Mask != IPv4HostMask || ipv4.Port == 0 { + return "tcp4", "" + } + return "tcp4", fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port) +} + +// Equal returns true if a SockAddr is equal to the receiving IPv4Addr. +func (ipv4 IPv4Addr) Equal(sa SockAddr) bool { + ipv4b, ok := sa.(IPv4Addr) + if !ok { + return false + } + + if ipv4.Port != ipv4b.Port { + return false + } + + if ipv4.Address != ipv4b.Address { + return false + } + + if ipv4.NetIPNet().String() != ipv4b.NetIPNet().String() { + return false + } + + return true +} + +// FirstUsable returns an IPv4Addr set to the first address following the +// network prefix. The first usable address in a network is normally the +// gateway and should not be used except by devices forwarding packets +// between two administratively distinct networks (i.e. a router). This +// function does not discriminate against first usable vs "first address that +// should be used." For example, FirstUsable() on "192.168.1.10/24" would +// return the address "192.168.1.1/24". +func (ipv4 IPv4Addr) FirstUsable() IPAddr { + addr := ipv4.NetworkAddress() + + // If /32, return the address itself. If /31 assume a point-to-point + // link and return the lower address. + if ipv4.Maskbits() < 31 { + addr++ + } + + return IPv4Addr{ + Address: IPv4Address(addr), + Mask: IPv4HostMask, + } +} + +// Host returns a copy of ipv4 with its mask set to /32 so that it can be +// used by DialPacketArgs(), DialStreamArgs(), ListenPacketArgs(), or +// ListenStreamArgs(). +func (ipv4 IPv4Addr) Host() IPAddr { + // Nothing should listen on a broadcast address. + return IPv4Addr{ + Address: ipv4.Address, + Mask: IPv4HostMask, + Port: ipv4.Port, + } +} + +// IPPort returns the Port number attached to the IPv4Addr +func (ipv4 IPv4Addr) IPPort() IPPort { + return ipv4.Port +} + +// LastUsable returns the last address before the broadcast address in a +// given network. +func (ipv4 IPv4Addr) LastUsable() IPAddr { + addr := ipv4.BroadcastAddress() + + // If /32, return the address itself. If /31 assume a point-to-point + // link and return the upper address. + if ipv4.Maskbits() < 31 { + addr-- + } + + return IPv4Addr{ + Address: IPv4Address(addr), + Mask: IPv4HostMask, + } +} + +// ListenPacketArgs returns the arguments required to be passed to +// net.ListenUDP(). If the Mask of ipv4 is not a /32, ListenPacketArgs() +// will fail. See Host() to create an IPv4Addr with its mask set to /32. +func (ipv4 IPv4Addr) ListenPacketArgs() (network, listenArgs string) { + if ipv4.Mask != IPv4HostMask { + return "udp4", "" + } + return "udp4", fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port) +} + +// ListenStreamArgs returns the arguments required to be passed to +// net.ListenTCP(). If the Mask of ipv4 is not a /32, ListenStreamArgs() +// will fail. See Host() to create an IPv4Addr with its mask set to /32. +func (ipv4 IPv4Addr) ListenStreamArgs() (network, listenArgs string) { + if ipv4.Mask != IPv4HostMask { + return "tcp4", "" + } + return "tcp4", fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port) +} + +// Maskbits returns the number of network mask bits in a given IPv4Addr. For +// example, the Maskbits() of "192.168.1.1/24" would return 24. +func (ipv4 IPv4Addr) Maskbits() int { + mask := make(net.IPMask, IPv4len) + binary.BigEndian.PutUint32(mask, uint32(ipv4.Mask)) + maskOnes, _ := mask.Size() + return maskOnes +} + +// MustIPv4Addr is a helper method that must return an IPv4Addr or panic on +// invalid input. +func MustIPv4Addr(addr string) IPv4Addr { + ipv4, err := NewIPv4Addr(addr) + if err != nil { + panic(fmt.Sprintf("Unable to create an IPv4Addr from %+q: %v", addr, err)) + } + return ipv4 +} + +// NetIP returns the address as a net.IP (address is always presized to +// IPv4). +func (ipv4 IPv4Addr) NetIP() *net.IP { + x := make(net.IP, IPv4len) + binary.BigEndian.PutUint32(x, uint32(ipv4.Address)) + return &x +} + +// NetIPMask create a new net.IPMask from the IPv4Addr. +func (ipv4 IPv4Addr) NetIPMask() *net.IPMask { + ipv4Mask := net.IPMask{} + ipv4Mask = make(net.IPMask, IPv4len) + binary.BigEndian.PutUint32(ipv4Mask, uint32(ipv4.Mask)) + return &ipv4Mask +} + +// NetIPNet create a new net.IPNet from the IPv4Addr. +func (ipv4 IPv4Addr) NetIPNet() *net.IPNet { + ipv4net := &net.IPNet{} + ipv4net.IP = make(net.IP, IPv4len) + binary.BigEndian.PutUint32(ipv4net.IP, uint32(ipv4.NetworkAddress())) + ipv4net.Mask = *ipv4.NetIPMask() + return ipv4net +} + +// Network returns the network prefix or network address for a given network. +func (ipv4 IPv4Addr) Network() IPAddr { + return IPv4Addr{ + Address: IPv4Address(ipv4.NetworkAddress()), + Mask: ipv4.Mask, + } +} + +// NetworkAddress returns an IPv4Network of the IPv4Addr's network address. +func (ipv4 IPv4Addr) NetworkAddress() IPv4Network { + return IPv4Network(uint32(ipv4.Address) & uint32(ipv4.Mask)) +} + +// Octets returns a slice of the four octets in an IPv4Addr's Address. The +// order of the bytes is big endian. +func (ipv4 IPv4Addr) Octets() []int { + return []int{ + int(ipv4.Address >> 24), + int((ipv4.Address >> 16) & 0xff), + int((ipv4.Address >> 8) & 0xff), + int(ipv4.Address & 0xff), + } +} + +// String returns a string representation of the IPv4Addr +func (ipv4 IPv4Addr) String() string { + if ipv4.Port != 0 { + return fmt.Sprintf("%s:%d", ipv4.NetIP().String(), ipv4.Port) + } + + if ipv4.Maskbits() == 32 { + return ipv4.NetIP().String() + } + + return fmt.Sprintf("%s/%d", ipv4.NetIP().String(), ipv4.Maskbits()) +} + +// Type is used as a type switch and returns TypeIPv4 +func (IPv4Addr) Type() SockAddrType { + return TypeIPv4 +} + +// IPv4AddrAttr returns a string representation of an attribute for the given +// IPv4Addr. +func IPv4AddrAttr(ipv4 IPv4Addr, selector AttrName) string { + fn, found := ipv4AddrAttrMap[selector] + if !found { + return "" + } + + return fn(ipv4) +} + +// IPv4Attrs returns a list of attributes supported by the IPv4Addr type +func IPv4Attrs() []AttrName { + return ipv4AddrAttrs +} + +// ipv4AddrInit is called once at init() +func ipv4AddrInit() { + // Sorted for human readability + ipv4AddrAttrs = []AttrName{ + "size", // Same position as in IPv6 for output consistency + "broadcast", + "uint32", + } + + ipv4AddrAttrMap = map[AttrName]func(ipv4 IPv4Addr) string{ + "broadcast": func(ipv4 IPv4Addr) string { + return ipv4.Broadcast().String() + }, + "size": func(ipv4 IPv4Addr) string { + return fmt.Sprintf("%d", 1< 2 && ipv6Str[0] == '[' && ipv6Str[len(ipv6Str)-1] == ']' { + ipv6Str = ipv6Str[1 : len(ipv6Str)-1] + } + ip := net.ParseIP(ipv6Str) + if ip != nil { + ipv6 := ip.To16() + if ipv6 == nil { + return IPv6Addr{}, fmt.Errorf("Unable to string convert %+q to a 16byte IPv6 address", ipv6Str) + } + + ipv6BigIntAddr := new(big.Int) + ipv6BigIntAddr.SetBytes(ipv6) + + ipv6BigIntMask := new(big.Int) + ipv6BigIntMask.Set(ipv6HostMask) + + return IPv6Addr{ + Address: IPv6Address(ipv6BigIntAddr), + Mask: IPv6Mask(ipv6BigIntMask), + }, nil + } + + // Parse as an IPv6 CIDR + ipAddr, network, err := net.ParseCIDR(ipv6Str) + if err == nil { + ipv6 := ipAddr.To16() + if ipv6 == nil { + return IPv6Addr{}, fmt.Errorf("Unable to convert %+q to a 16byte IPv6 address", ipv6Str) + } + + ipv6BigIntAddr := new(big.Int) + ipv6BigIntAddr.SetBytes(ipv6) + + ipv6BigIntMask := new(big.Int) + ipv6BigIntMask.SetBytes(network.Mask) + + ipv6Addr := IPv6Addr{ + Address: IPv6Address(ipv6BigIntAddr), + Mask: IPv6Mask(ipv6BigIntMask), + } + return ipv6Addr, nil + } + + return IPv6Addr{}, fmt.Errorf("Unable to parse %+q to an IPv6 address: %v", ipv6Str, err) +} + +// AddressBinString returns a string with the IPv6Addr's Address represented +// as a sequence of '0' and '1' characters. This method is useful for +// debugging or by operators who want to inspect an address. +func (ipv6 IPv6Addr) AddressBinString() string { + bi := big.Int(*ipv6.Address) + return fmt.Sprintf("%0128s", bi.Text(2)) +} + +// AddressHexString returns a string with the IPv6Addr address represented as +// a sequence of hex characters. This method is useful for debugging or by +// operators who want to inspect an address. +func (ipv6 IPv6Addr) AddressHexString() string { + bi := big.Int(*ipv6.Address) + return fmt.Sprintf("%032s", bi.Text(16)) +} + +// CmpAddress follows the Cmp() standard protocol and returns: +// +// - -1 If the receiver should sort first because its address is lower than arg +// - 0 if the SockAddr arg equal to the receiving IPv6Addr or the argument is of a +// different type. +// - 1 If the argument should sort first. +func (ipv6 IPv6Addr) CmpAddress(sa SockAddr) int { + ipv6b, ok := sa.(IPv6Addr) + if !ok { + return sortDeferDecision + } + + ipv6aBigInt := new(big.Int) + ipv6aBigInt.Set(ipv6.Address) + ipv6bBigInt := new(big.Int) + ipv6bBigInt.Set(ipv6b.Address) + + return ipv6aBigInt.Cmp(ipv6bBigInt) +} + +// CmpPort follows the Cmp() standard protocol and returns: +// +// - -1 If the receiver should sort first because its port is lower than arg +// - 0 if the SockAddr arg's port number is equal to the receiving IPv6Addr, +// regardless of type. +// - 1 If the argument should sort first. +func (ipv6 IPv6Addr) CmpPort(sa SockAddr) int { + var saPort IPPort + switch v := sa.(type) { + case IPv4Addr: + saPort = v.Port + case IPv6Addr: + saPort = v.Port + default: + return sortDeferDecision + } + + switch { + case ipv6.Port == saPort: + return sortDeferDecision + case ipv6.Port < saPort: + return sortReceiverBeforeArg + default: + return sortArgBeforeReceiver + } +} + +// CmpRFC follows the Cmp() standard protocol and returns: +// +// - -1 If the receiver should sort first because it belongs to the RFC and its +// arg does not +// - 0 if the receiver and arg both belong to the same RFC or neither do. +// - 1 If the arg belongs to the RFC but receiver does not. +func (ipv6 IPv6Addr) CmpRFC(rfcNum uint, sa SockAddr) int { + recvInRFC := IsRFC(rfcNum, ipv6) + ipv6b, ok := sa.(IPv6Addr) + if !ok { + // If the receiver is part of the desired RFC and the SockAddr + // argument is not, sort receiver before the non-IPv6 SockAddr. + // Conversely, if the receiver is not part of the RFC, punt on + // sorting and leave it for the next sorter. + if recvInRFC { + return sortReceiverBeforeArg + } else { + return sortDeferDecision + } + } + + argInRFC := IsRFC(rfcNum, ipv6b) + switch { + case (recvInRFC && argInRFC), (!recvInRFC && !argInRFC): + // If a and b both belong to the RFC, or neither belong to + // rfcNum, defer sorting to the next sorter. + return sortDeferDecision + case recvInRFC && !argInRFC: + return sortReceiverBeforeArg + default: + return sortArgBeforeReceiver + } +} + +// Contains returns true if the SockAddr is contained within the receiver. +func (ipv6 IPv6Addr) Contains(sa SockAddr) bool { + ipv6b, ok := sa.(IPv6Addr) + if !ok { + return false + } + + return ipv6.ContainsNetwork(ipv6b) +} + +// ContainsAddress returns true if the IPv6Address is contained within the +// receiver. +func (ipv6 IPv6Addr) ContainsAddress(x IPv6Address) bool { + xAddr := IPv6Addr{ + Address: x, + Mask: ipv6HostMask, + } + + { + xIPv6 := xAddr.FirstUsable().(IPv6Addr) + yIPv6 := ipv6.FirstUsable().(IPv6Addr) + if xIPv6.CmpAddress(yIPv6) >= 1 { + return false + } + } + + { + xIPv6 := xAddr.LastUsable().(IPv6Addr) + yIPv6 := ipv6.LastUsable().(IPv6Addr) + if xIPv6.CmpAddress(yIPv6) <= -1 { + return false + } + } + return true +} + +// ContainsNetwork returns true if the network from IPv6Addr is contained within +// the receiver. +func (x IPv6Addr) ContainsNetwork(y IPv6Addr) bool { + { + xIPv6 := x.FirstUsable().(IPv6Addr) + yIPv6 := y.FirstUsable().(IPv6Addr) + if ret := xIPv6.CmpAddress(yIPv6); ret >= 1 { + return false + } + } + + { + xIPv6 := x.LastUsable().(IPv6Addr) + yIPv6 := y.LastUsable().(IPv6Addr) + if ret := xIPv6.CmpAddress(yIPv6); ret <= -1 { + return false + } + } + return true +} + +// DialPacketArgs returns the arguments required to be passed to +// net.DialUDP(). If the Mask of ipv6 is not a /128 or the Port is 0, +// DialPacketArgs() will fail. See Host() to create an IPv6Addr with its +// mask set to /128. +func (ipv6 IPv6Addr) DialPacketArgs() (network, dialArgs string) { + ipv6Mask := big.Int(*ipv6.Mask) + if ipv6Mask.Cmp(ipv6HostMask) != 0 || ipv6.Port == 0 { + return "udp6", "" + } + return "udp6", fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port) +} + +// DialStreamArgs returns the arguments required to be passed to +// net.DialTCP(). If the Mask of ipv6 is not a /128 or the Port is 0, +// DialStreamArgs() will fail. See Host() to create an IPv6Addr with its +// mask set to /128. +func (ipv6 IPv6Addr) DialStreamArgs() (network, dialArgs string) { + ipv6Mask := big.Int(*ipv6.Mask) + if ipv6Mask.Cmp(ipv6HostMask) != 0 || ipv6.Port == 0 { + return "tcp6", "" + } + return "tcp6", fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port) +} + +// Equal returns true if a SockAddr is equal to the receiving IPv4Addr. +func (ipv6a IPv6Addr) Equal(sa SockAddr) bool { + ipv6b, ok := sa.(IPv6Addr) + if !ok { + return false + } + + if ipv6a.NetIP().String() != ipv6b.NetIP().String() { + return false + } + + if ipv6a.NetIPNet().String() != ipv6b.NetIPNet().String() { + return false + } + + if ipv6a.Port != ipv6b.Port { + return false + } + + return true +} + +// FirstUsable returns an IPv6Addr set to the first address following the +// network prefix. The first usable address in a network is normally the +// gateway and should not be used except by devices forwarding packets +// between two administratively distinct networks (i.e. a router). This +// function does not discriminate against first usable vs "first address that +// should be used." For example, FirstUsable() on "2001:0db8::0003/64" would +// return "2001:0db8::00011". +func (ipv6 IPv6Addr) FirstUsable() IPAddr { + return IPv6Addr{ + Address: IPv6Address(ipv6.NetworkAddress()), + Mask: ipv6HostMask, + } +} + +// Host returns a copy of ipv6 with its mask set to /128 so that it can be +// used by DialPacketArgs(), DialStreamArgs(), ListenPacketArgs(), or +// ListenStreamArgs(). +func (ipv6 IPv6Addr) Host() IPAddr { + // Nothing should listen on a broadcast address. + return IPv6Addr{ + Address: ipv6.Address, + Mask: ipv6HostMask, + Port: ipv6.Port, + } +} + +// IPPort returns the Port number attached to the IPv6Addr +func (ipv6 IPv6Addr) IPPort() IPPort { + return ipv6.Port +} + +// LastUsable returns the last address in a given network. +func (ipv6 IPv6Addr) LastUsable() IPAddr { + addr := new(big.Int) + addr.Set(ipv6.Address) + + mask := new(big.Int) + mask.Set(ipv6.Mask) + + negMask := new(big.Int) + negMask.Xor(ipv6HostMask, mask) + + lastAddr := new(big.Int) + lastAddr.And(addr, mask) + lastAddr.Or(lastAddr, negMask) + + return IPv6Addr{ + Address: IPv6Address(lastAddr), + Mask: ipv6HostMask, + } +} + +// ListenPacketArgs returns the arguments required to be passed to +// net.ListenUDP(). If the Mask of ipv6 is not a /128, ListenPacketArgs() +// will fail. See Host() to create an IPv6Addr with its mask set to /128. +func (ipv6 IPv6Addr) ListenPacketArgs() (network, listenArgs string) { + ipv6Mask := big.Int(*ipv6.Mask) + if ipv6Mask.Cmp(ipv6HostMask) != 0 { + return "udp6", "" + } + return "udp6", fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port) +} + +// ListenStreamArgs returns the arguments required to be passed to +// net.ListenTCP(). If the Mask of ipv6 is not a /128, ListenStreamArgs() +// will fail. See Host() to create an IPv6Addr with its mask set to /128. +func (ipv6 IPv6Addr) ListenStreamArgs() (network, listenArgs string) { + ipv6Mask := big.Int(*ipv6.Mask) + if ipv6Mask.Cmp(ipv6HostMask) != 0 { + return "tcp6", "" + } + return "tcp6", fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port) +} + +// Maskbits returns the number of network mask bits in a given IPv6Addr. For +// example, the Maskbits() of "2001:0db8::0003/64" would return 64. +func (ipv6 IPv6Addr) Maskbits() int { + maskOnes, _ := ipv6.NetIPNet().Mask.Size() + + return maskOnes +} + +// MustIPv6Addr is a helper method that must return an IPv6Addr or panic on +// invalid input. +func MustIPv6Addr(addr string) IPv6Addr { + ipv6, err := NewIPv6Addr(addr) + if err != nil { + panic(fmt.Sprintf("Unable to create an IPv6Addr from %+q: %v", addr, err)) + } + return ipv6 +} + +// NetIP returns the address as a net.IP. +func (ipv6 IPv6Addr) NetIP() *net.IP { + return bigIntToNetIPv6(ipv6.Address) +} + +// NetIPMask create a new net.IPMask from the IPv6Addr. +func (ipv6 IPv6Addr) NetIPMask() *net.IPMask { + ipv6Mask := make(net.IPMask, IPv6len) + m := big.Int(*ipv6.Mask) + copy(ipv6Mask, m.Bytes()) + return &ipv6Mask +} + +// Network returns a pointer to the net.IPNet within IPv4Addr receiver. +func (ipv6 IPv6Addr) NetIPNet() *net.IPNet { + ipv6net := &net.IPNet{} + ipv6net.IP = make(net.IP, IPv6len) + copy(ipv6net.IP, *ipv6.NetIP()) + ipv6net.Mask = *ipv6.NetIPMask() + return ipv6net +} + +// Network returns the network prefix or network address for a given network. +func (ipv6 IPv6Addr) Network() IPAddr { + return IPv6Addr{ + Address: IPv6Address(ipv6.NetworkAddress()), + Mask: ipv6.Mask, + } +} + +// NetworkAddress returns an IPv6Network of the IPv6Addr's network address. +func (ipv6 IPv6Addr) NetworkAddress() IPv6Network { + addr := new(big.Int) + addr.SetBytes((*ipv6.Address).Bytes()) + + mask := new(big.Int) + mask.SetBytes(*ipv6.NetIPMask()) + + netAddr := new(big.Int) + netAddr.And(addr, mask) + + return IPv6Network(netAddr) +} + +// Octets returns a slice of the 16 octets in an IPv6Addr's Address. The +// order of the bytes is big endian. +func (ipv6 IPv6Addr) Octets() []int { + x := make([]int, IPv6len) + for i, b := range *bigIntToNetIPv6(ipv6.Address) { + x[i] = int(b) + } + + return x +} + +// String returns a string representation of the IPv6Addr +func (ipv6 IPv6Addr) String() string { + if ipv6.Port != 0 { + return fmt.Sprintf("[%s]:%d", ipv6.NetIP().String(), ipv6.Port) + } + + if ipv6.Maskbits() == 128 { + return ipv6.NetIP().String() + } + + return fmt.Sprintf("%s/%d", ipv6.NetIP().String(), ipv6.Maskbits()) +} + +// Type is used as a type switch and returns TypeIPv6 +func (IPv6Addr) Type() SockAddrType { + return TypeIPv6 +} + +// IPv6Attrs returns a list of attributes supported by the IPv6Addr type +func IPv6Attrs() []AttrName { + return ipv6AddrAttrs +} + +// IPv6AddrAttr returns a string representation of an attribute for the given +// IPv6Addr. +func IPv6AddrAttr(ipv6 IPv6Addr, selector AttrName) string { + fn, found := ipv6AddrAttrMap[selector] + if !found { + return "" + } + + return fn(ipv6) +} + +// ipv6AddrInit is called once at init() +func ipv6AddrInit() { + // Sorted for human readability + ipv6AddrAttrs = []AttrName{ + "size", // Same position as in IPv6 for output consistency + "uint128", + } + + ipv6AddrAttrMap = map[AttrName]func(ipv6 IPv6Addr) string{ + "size": func(ipv6 IPv6Addr) string { + netSize := big.NewInt(1) + netSize = netSize.Lsh(netSize, uint(IPv6len*8-ipv6.Maskbits())) + return netSize.Text(10) + }, + "uint128": func(ipv6 IPv6Addr) string { + b := big.Int(*ipv6.Address) + return b.Text(10) + }, + } +} + +// bigIntToNetIPv6 is a helper function that correctly returns a net.IP with the +// correctly padded values. +func bigIntToNetIPv6(bi *big.Int) *net.IP { + x := make(net.IP, IPv6len) + ipv6Bytes := bi.Bytes() + + // It's possibe for ipv6Bytes to be less than IPv6len bytes in size. If + // they are different sizes we to pad the size of response. + if len(ipv6Bytes) < IPv6len { + buf := new(bytes.Buffer) + buf.Grow(IPv6len) + + for i := len(ipv6Bytes); i < IPv6len; i++ { + if err := binary.Write(buf, binary.BigEndian, byte(0)); err != nil { + panic(fmt.Sprintf("Unable to pad byte %d of input %v: %v", i, bi, err)) + } + } + + for _, b := range ipv6Bytes { + if err := binary.Write(buf, binary.BigEndian, b); err != nil { + panic(fmt.Sprintf("Unable to preserve endianness of input %v: %v", bi, err)) + } + } + + ipv6Bytes = buf.Bytes() + } + i := copy(x, ipv6Bytes) + if i != IPv6len { + panic("IPv6 wrong size") + } + return &x +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/rfc.go b/vendor/github.com/hashicorp/go-sockaddr/rfc.go new file mode 100644 index 0000000000..02e188f6fe --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/rfc.go @@ -0,0 +1,948 @@ +package sockaddr + +// ForwardingBlacklist is a faux RFC that includes a list of non-forwardable IP +// blocks. +const ForwardingBlacklist = 4294967295 +const ForwardingBlacklistRFC = "4294967295" + +// IsRFC tests to see if an SockAddr matches the specified RFC +func IsRFC(rfcNum uint, sa SockAddr) bool { + rfcNetMap := KnownRFCs() + rfcNets, ok := rfcNetMap[rfcNum] + if !ok { + return false + } + + var contained bool + for _, rfcNet := range rfcNets { + if rfcNet.Contains(sa) { + contained = true + break + } + } + return contained +} + +// KnownRFCs returns an initial set of known RFCs. +// +// NOTE (sean@): As this list evolves over time, please submit patches to keep +// this list current. If something isn't right, inquire, as it may just be a +// bug on my part. Some of the inclusions were based on my judgement as to what +// would be a useful value (e.g. RFC3330). +// +// Useful resources: +// +// * https://www.iana.org/assignments/ipv6-address-space/ipv6-address-space.xhtml +// * https://www.iana.org/assignments/ipv6-unicast-address-assignments/ipv6-unicast-address-assignments.xhtml +// * https://www.iana.org/assignments/ipv6-address-space/ipv6-address-space.xhtml +func KnownRFCs() map[uint]SockAddrs { + // NOTE(sean@): Multiple SockAddrs per RFC lend themselves well to a + // RADIX tree, but `ENOTIME`. Patches welcome. + return map[uint]SockAddrs{ + 919: { + // [RFC919] Broadcasting Internet Datagrams + MustIPv4Addr("255.255.255.255/32"), // [RFC1122], §7 Broadcast IP Addressing - Proposed Standards + }, + 1122: { + // [RFC1122] Requirements for Internet Hosts -- Communication Layers + MustIPv4Addr("0.0.0.0/8"), // [RFC1122], §3.2.1.3 + MustIPv4Addr("127.0.0.0/8"), // [RFC1122], §3.2.1.3 + }, + 1112: { + // [RFC1112] Host Extensions for IP Multicasting + MustIPv4Addr("224.0.0.0/4"), // [RFC1112], §4 Host Group Addresses + }, + 1918: { + // [RFC1918] Address Allocation for Private Internets + MustIPv4Addr("10.0.0.0/8"), + MustIPv4Addr("172.16.0.0/12"), + MustIPv4Addr("192.168.0.0/16"), + }, + 2544: { + // [RFC2544] Benchmarking Methodology for Network + // Interconnect Devices + MustIPv4Addr("198.18.0.0/15"), + }, + 2765: { + // [RFC2765] Stateless IP/ICMP Translation Algorithm + // (SIIT) (obsoleted by RFCs 6145, which itself was + // later obsoleted by 7915). + + // [RFC2765], §2.1 Addresses + MustIPv6Addr("0:0:0:0:0:ffff:0:0/96"), + }, + 2928: { + // [RFC2928] Initial IPv6 Sub-TLA ID Assignments + MustIPv6Addr("2001::/16"), // Superblock + //MustIPv6Addr("2001:0000::/23"), // IANA + //MustIPv6Addr("2001:0200::/23"), // APNIC + //MustIPv6Addr("2001:0400::/23"), // ARIN + //MustIPv6Addr("2001:0600::/23"), // RIPE NCC + //MustIPv6Addr("2001:0800::/23"), // (future assignment) + // ... + //MustIPv6Addr("2001:FE00::/23"), // (future assignment) + }, + 3056: { // 6to4 address + // [RFC3056] Connection of IPv6 Domains via IPv4 Clouds + + // [RFC3056], §2 IPv6 Prefix Allocation + MustIPv6Addr("2002::/16"), + }, + 3068: { + // [RFC3068] An Anycast Prefix for 6to4 Relay Routers + // (obsolete by RFC7526) + + // [RFC3068], § 6to4 Relay anycast address + MustIPv4Addr("192.88.99.0/24"), + + // [RFC3068], §2.5 6to4 IPv6 relay anycast address + // + // NOTE: /120 == 128-(32-24) + MustIPv6Addr("2002:c058:6301::/120"), + }, + 3171: { + // [RFC3171] IANA Guidelines for IPv4 Multicast Address Assignments + MustIPv4Addr("224.0.0.0/4"), + }, + 3330: { + // [RFC3330] Special-Use IPv4 Addresses + + // Addresses in this block refer to source hosts on + // "this" network. Address 0.0.0.0/32 may be used as a + // source address for this host on this network; other + // addresses within 0.0.0.0/8 may be used to refer to + // specified hosts on this network [RFC1700, page 4]. + MustIPv4Addr("0.0.0.0/8"), + + // 10.0.0.0/8 - This block is set aside for use in + // private networks. Its intended use is documented in + // [RFC1918]. Addresses within this block should not + // appear on the public Internet. + MustIPv4Addr("10.0.0.0/8"), + + // 14.0.0.0/8 - This block is set aside for assignments + // to the international system of Public Data Networks + // [RFC1700, page 181]. The registry of assignments + // within this block can be accessed from the "Public + // Data Network Numbers" link on the web page at + // http://www.iana.org/numbers.html. Addresses within + // this block are assigned to users and should be + // treated as such. + + // 24.0.0.0/8 - This block was allocated in early 1996 + // for use in provisioning IP service over cable + // television systems. Although the IANA initially was + // involved in making assignments to cable operators, + // this responsibility was transferred to American + // Registry for Internet Numbers (ARIN) in May 2001. + // Addresses within this block are assigned in the + // normal manner and should be treated as such. + + // 39.0.0.0/8 - This block was used in the "Class A + // Subnet Experiment" that commenced in May 1995, as + // documented in [RFC1797]. The experiment has been + // completed and this block has been returned to the + // pool of addresses reserved for future allocation or + // assignment. This block therefore no longer has a + // special use and is subject to allocation to a + // Regional Internet Registry for assignment in the + // normal manner. + + // 127.0.0.0/8 - This block is assigned for use as the Internet host + // loopback address. A datagram sent by a higher level protocol to an + // address anywhere within this block should loop back inside the host. + // This is ordinarily implemented using only 127.0.0.1/32 for loopback, + // but no addresses within this block should ever appear on any network + // anywhere [RFC1700, page 5]. + MustIPv4Addr("127.0.0.0/8"), + + // 128.0.0.0/16 - This block, corresponding to the + // numerically lowest of the former Class B addresses, + // was initially and is still reserved by the IANA. + // Given the present classless nature of the IP address + // space, the basis for the reservation no longer + // applies and addresses in this block are subject to + // future allocation to a Regional Internet Registry for + // assignment in the normal manner. + + // 169.254.0.0/16 - This is the "link local" block. It + // is allocated for communication between hosts on a + // single link. Hosts obtain these addresses by + // auto-configuration, such as when a DHCP server may + // not be found. + MustIPv4Addr("169.254.0.0/16"), + + // 172.16.0.0/12 - This block is set aside for use in + // private networks. Its intended use is documented in + // [RFC1918]. Addresses within this block should not + // appear on the public Internet. + MustIPv4Addr("172.16.0.0/12"), + + // 191.255.0.0/16 - This block, corresponding to the numerically highest + // to the former Class B addresses, was initially and is still reserved + // by the IANA. Given the present classless nature of the IP address + // space, the basis for the reservation no longer applies and addresses + // in this block are subject to future allocation to a Regional Internet + // Registry for assignment in the normal manner. + + // 192.0.0.0/24 - This block, corresponding to the + // numerically lowest of the former Class C addresses, + // was initially and is still reserved by the IANA. + // Given the present classless nature of the IP address + // space, the basis for the reservation no longer + // applies and addresses in this block are subject to + // future allocation to a Regional Internet Registry for + // assignment in the normal manner. + + // 192.0.2.0/24 - This block is assigned as "TEST-NET" for use in + // documentation and example code. It is often used in conjunction with + // domain names example.com or example.net in vendor and protocol + // documentation. Addresses within this block should not appear on the + // public Internet. + MustIPv4Addr("192.0.2.0/24"), + + // 192.88.99.0/24 - This block is allocated for use as 6to4 relay + // anycast addresses, according to [RFC3068]. + MustIPv4Addr("192.88.99.0/24"), + + // 192.168.0.0/16 - This block is set aside for use in private networks. + // Its intended use is documented in [RFC1918]. Addresses within this + // block should not appear on the public Internet. + MustIPv4Addr("192.168.0.0/16"), + + // 198.18.0.0/15 - This block has been allocated for use + // in benchmark tests of network interconnect devices. + // Its use is documented in [RFC2544]. + MustIPv4Addr("198.18.0.0/15"), + + // 223.255.255.0/24 - This block, corresponding to the + // numerically highest of the former Class C addresses, + // was initially and is still reserved by the IANA. + // Given the present classless nature of the IP address + // space, the basis for the reservation no longer + // applies and addresses in this block are subject to + // future allocation to a Regional Internet Registry for + // assignment in the normal manner. + + // 224.0.0.0/4 - This block, formerly known as the Class + // D address space, is allocated for use in IPv4 + // multicast address assignments. The IANA guidelines + // for assignments from this space are described in + // [RFC3171]. + MustIPv4Addr("224.0.0.0/4"), + + // 240.0.0.0/4 - This block, formerly known as the Class E address + // space, is reserved. The "limited broadcast" destination address + // 255.255.255.255 should never be forwarded outside the (sub-)net of + // the source. The remainder of this space is reserved + // for future use. [RFC1700, page 4] + MustIPv4Addr("240.0.0.0/4"), + }, + 3849: { + // [RFC3849] IPv6 Address Prefix Reserved for Documentation + MustIPv6Addr("2001:db8::/32"), // [RFC3849], §4 IANA Considerations + }, + 3927: { + // [RFC3927] Dynamic Configuration of IPv4 Link-Local Addresses + MustIPv4Addr("169.254.0.0/16"), // [RFC3927], §2.1 Link-Local Address Selection + }, + 4038: { + // [RFC4038] Application Aspects of IPv6 Transition + + // [RFC4038], §4.2. IPv6 Applications in a Dual-Stack Node + MustIPv6Addr("0:0:0:0:0:ffff::/96"), + }, + 4193: { + // [RFC4193] Unique Local IPv6 Unicast Addresses + MustIPv6Addr("fc00::/7"), + }, + 4291: { + // [RFC4291] IP Version 6 Addressing Architecture + + // [RFC4291], §2.5.2 The Unspecified Address + MustIPv6Addr("::/128"), + + // [RFC4291], §2.5.3 The Loopback Address + MustIPv6Addr("::1/128"), + + // [RFC4291], §2.5.5.1. IPv4-Compatible IPv6 Address + MustIPv6Addr("::/96"), + + // [RFC4291], §2.5.5.2. IPv4-Mapped IPv6 Address + MustIPv6Addr("::ffff:0:0/96"), + + // [RFC4291], §2.5.6 Link-Local IPv6 Unicast Addresses + MustIPv6Addr("fe80::/10"), + + // [RFC4291], §2.5.7 Site-Local IPv6 Unicast Addresses + // (depreciated) + MustIPv6Addr("fec0::/10"), + + // [RFC4291], §2.7 Multicast Addresses + MustIPv6Addr("ff00::/8"), + + // IPv6 Multicast Information. + // + // In the following "table" below, `ff0x` is replaced + // with the following values depending on the scope of + // the query: + // + // IPv6 Multicast Scopes: + // * ff00/9 // reserved + // * ff01/9 // interface-local + // * ff02/9 // link-local + // * ff03/9 // realm-local + // * ff04/9 // admin-local + // * ff05/9 // site-local + // * ff08/9 // organization-local + // * ff0e/9 // global + // * ff0f/9 // reserved + // + // IPv6 Multicast Addresses: + // * ff0x::2 // All routers + // * ff02::5 // OSPFIGP + // * ff02::6 // OSPFIGP Designated Routers + // * ff02::9 // RIP Routers + // * ff02::a // EIGRP Routers + // * ff02::d // All PIM Routers + // * ff02::1a // All RPL Routers + // * ff0x::fb // mDNSv6 + // * ff0x::101 // All Network Time Protocol (NTP) servers + // * ff02::1:1 // Link Name + // * ff02::1:2 // All-dhcp-agents + // * ff02::1:3 // Link-local Multicast Name Resolution + // * ff05::1:3 // All-dhcp-servers + // * ff02::1:ff00:0/104 // Solicited-node multicast address. + // * ff02::2:ff00:0/104 // Node Information Queries + }, + 4380: { + // [RFC4380] Teredo: Tunneling IPv6 over UDP through + // Network Address Translations (NATs) + + // [RFC4380], §2.6 Global Teredo IPv6 Service Prefix + MustIPv6Addr("2001:0000::/32"), + }, + 4773: { + // [RFC4773] Administration of the IANA Special Purpose IPv6 Address Block + MustIPv6Addr("2001:0000::/23"), // IANA + }, + 4843: { + // [RFC4843] An IPv6 Prefix for Overlay Routable Cryptographic Hash Identifiers (ORCHID) + MustIPv6Addr("2001:10::/28"), // [RFC4843], §7 IANA Considerations + }, + 5180: { + // [RFC5180] IPv6 Benchmarking Methodology for Network Interconnect Devices + MustIPv6Addr("2001:0200::/48"), // [RFC5180], §8 IANA Considerations + }, + 5735: { + // [RFC5735] Special Use IPv4 Addresses + MustIPv4Addr("192.0.2.0/24"), // TEST-NET-1 + MustIPv4Addr("198.51.100.0/24"), // TEST-NET-2 + MustIPv4Addr("203.0.113.0/24"), // TEST-NET-3 + MustIPv4Addr("198.18.0.0/15"), // Benchmarks + }, + 5737: { + // [RFC5737] IPv4 Address Blocks Reserved for Documentation + MustIPv4Addr("192.0.2.0/24"), // TEST-NET-1 + MustIPv4Addr("198.51.100.0/24"), // TEST-NET-2 + MustIPv4Addr("203.0.113.0/24"), // TEST-NET-3 + }, + 6052: { + // [RFC6052] IPv6 Addressing of IPv4/IPv6 Translators + MustIPv6Addr("64:ff9b::/96"), // [RFC6052], §2.1. Well-Known Prefix + }, + 6333: { + // [RFC6333] Dual-Stack Lite Broadband Deployments Following IPv4 Exhaustion + MustIPv4Addr("192.0.0.0/29"), // [RFC6333], §5.7 Well-Known IPv4 Address + }, + 6598: { + // [RFC6598] IANA-Reserved IPv4 Prefix for Shared Address Space + MustIPv4Addr("100.64.0.0/10"), + }, + 6666: { + // [RFC6666] A Discard Prefix for IPv6 + MustIPv6Addr("0100::/64"), + }, + 6890: { + // [RFC6890] Special-Purpose IP Address Registries + + // From "RFC6890 §2.2.1 Information Requirements": + /* + The IPv4 and IPv6 Special-Purpose Address Registries maintain the + following information regarding each entry: + + o Address Block - A block of IPv4 or IPv6 addresses that has been + registered for a special purpose. + + o Name - A descriptive name for the special-purpose address block. + + o RFC - The RFC through which the special-purpose address block was + requested. + + o Allocation Date - The date upon which the special-purpose address + block was allocated. + + o Termination Date - The date upon which the allocation is to be + terminated. This field is applicable for limited-use allocations + only. + + o Source - A boolean value indicating whether an address from the + allocated special-purpose address block is valid when used as the + source address of an IP datagram that transits two devices. + + o Destination - A boolean value indicating whether an address from + the allocated special-purpose address block is valid when used as + the destination address of an IP datagram that transits two + devices. + + o Forwardable - A boolean value indicating whether a router may + forward an IP datagram whose destination address is drawn from the + allocated special-purpose address block between external + interfaces. + + o Global - A boolean value indicating whether an IP datagram whose + destination address is drawn from the allocated special-purpose + address block is forwardable beyond a specified administrative + domain. + + o Reserved-by-Protocol - A boolean value indicating whether the + special-purpose address block is reserved by IP, itself. This + value is "TRUE" if the RFC that created the special-purpose + address block requires all compliant IP implementations to behave + in a special way when processing packets either to or from + addresses contained by the address block. + + If the value of "Destination" is FALSE, the values of "Forwardable" + and "Global" must also be false. + */ + + /*+----------------------+----------------------------+ + * | Attribute | Value | + * +----------------------+----------------------------+ + * | Address Block | 0.0.0.0/8 | + * | Name | "This host on this network"| + * | RFC | [RFC1122], Section 3.2.1.3 | + * | Allocation Date | September 1981 | + * | Termination Date | N/A | + * | Source | True | + * | Destination | False | + * | Forwardable | False | + * | Global | False | + * | Reserved-by-Protocol | True | + * +----------------------+----------------------------+*/ + MustIPv4Addr("0.0.0.0/8"), + + /*+----------------------+---------------+ + * | Attribute | Value | + * +----------------------+---------------+ + * | Address Block | 10.0.0.0/8 | + * | Name | Private-Use | + * | RFC | [RFC1918] | + * | Allocation Date | February 1996 | + * | Termination Date | N/A | + * | Source | True | + * | Destination | True | + * | Forwardable | True | + * | Global | False | + * | Reserved-by-Protocol | False | + * +----------------------+---------------+ */ + MustIPv4Addr("10.0.0.0/8"), + + /*+----------------------+----------------------+ + | Attribute | Value | + +----------------------+----------------------+ + | Address Block | 100.64.0.0/10 | + | Name | Shared Address Space | + | RFC | [RFC6598] | + | Allocation Date | April 2012 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+----------------------+*/ + MustIPv4Addr("100.64.0.0/10"), + + /*+----------------------+----------------------------+ + | Attribute | Value | + +----------------------+----------------------------+ + | Address Block | 127.0.0.0/8 | + | Name | Loopback | + | RFC | [RFC1122], Section 3.2.1.3 | + | Allocation Date | September 1981 | + | Termination Date | N/A | + | Source | False [1] | + | Destination | False [1] | + | Forwardable | False [1] | + | Global | False [1] | + | Reserved-by-Protocol | True | + +----------------------+----------------------------+*/ + // [1] Several protocols have been granted exceptions to + // this rule. For examples, see [RFC4379] and + // [RFC5884]. + MustIPv4Addr("127.0.0.0/8"), + + /*+----------------------+----------------+ + | Attribute | Value | + +----------------------+----------------+ + | Address Block | 169.254.0.0/16 | + | Name | Link Local | + | RFC | [RFC3927] | + | Allocation Date | May 2005 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | True | + +----------------------+----------------+*/ + MustIPv4Addr("169.254.0.0/16"), + + /*+----------------------+---------------+ + | Attribute | Value | + +----------------------+---------------+ + | Address Block | 172.16.0.0/12 | + | Name | Private-Use | + | RFC | [RFC1918] | + | Allocation Date | February 1996 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+---------------+*/ + MustIPv4Addr("172.16.0.0/12"), + + /*+----------------------+---------------------------------+ + | Attribute | Value | + +----------------------+---------------------------------+ + | Address Block | 192.0.0.0/24 [2] | + | Name | IETF Protocol Assignments | + | RFC | Section 2.1 of this document | + | Allocation Date | January 2010 | + | Termination Date | N/A | + | Source | False | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+---------------------------------+*/ + // [2] Not usable unless by virtue of a more specific + // reservation. + MustIPv4Addr("192.0.0.0/24"), + + /*+----------------------+--------------------------------+ + | Attribute | Value | + +----------------------+--------------------------------+ + | Address Block | 192.0.0.0/29 | + | Name | IPv4 Service Continuity Prefix | + | RFC | [RFC6333], [RFC7335] | + | Allocation Date | June 2011 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+--------------------------------+*/ + MustIPv4Addr("192.0.0.0/29"), + + /*+----------------------+----------------------------+ + | Attribute | Value | + +----------------------+----------------------------+ + | Address Block | 192.0.2.0/24 | + | Name | Documentation (TEST-NET-1) | + | RFC | [RFC5737] | + | Allocation Date | January 2010 | + | Termination Date | N/A | + | Source | False | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+----------------------------+*/ + MustIPv4Addr("192.0.2.0/24"), + + /*+----------------------+--------------------+ + | Attribute | Value | + +----------------------+--------------------+ + | Address Block | 192.88.99.0/24 | + | Name | 6to4 Relay Anycast | + | RFC | [RFC3068] | + | Allocation Date | June 2001 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | True | + | Reserved-by-Protocol | False | + +----------------------+--------------------+*/ + MustIPv4Addr("192.88.99.0/24"), + + /*+----------------------+----------------+ + | Attribute | Value | + +----------------------+----------------+ + | Address Block | 192.168.0.0/16 | + | Name | Private-Use | + | RFC | [RFC1918] | + | Allocation Date | February 1996 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+----------------+*/ + MustIPv4Addr("192.168.0.0/16"), + + /*+----------------------+---------------+ + | Attribute | Value | + +----------------------+---------------+ + | Address Block | 198.18.0.0/15 | + | Name | Benchmarking | + | RFC | [RFC2544] | + | Allocation Date | March 1999 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+---------------+*/ + MustIPv4Addr("198.18.0.0/15"), + + /*+----------------------+----------------------------+ + | Attribute | Value | + +----------------------+----------------------------+ + | Address Block | 198.51.100.0/24 | + | Name | Documentation (TEST-NET-2) | + | RFC | [RFC5737] | + | Allocation Date | January 2010 | + | Termination Date | N/A | + | Source | False | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+----------------------------+*/ + MustIPv4Addr("198.51.100.0/24"), + + /*+----------------------+----------------------------+ + | Attribute | Value | + +----------------------+----------------------------+ + | Address Block | 203.0.113.0/24 | + | Name | Documentation (TEST-NET-3) | + | RFC | [RFC5737] | + | Allocation Date | January 2010 | + | Termination Date | N/A | + | Source | False | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+----------------------------+*/ + MustIPv4Addr("203.0.113.0/24"), + + /*+----------------------+----------------------+ + | Attribute | Value | + +----------------------+----------------------+ + | Address Block | 240.0.0.0/4 | + | Name | Reserved | + | RFC | [RFC1112], Section 4 | + | Allocation Date | August 1989 | + | Termination Date | N/A | + | Source | False | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | True | + +----------------------+----------------------+*/ + MustIPv4Addr("240.0.0.0/4"), + + /*+----------------------+----------------------+ + | Attribute | Value | + +----------------------+----------------------+ + | Address Block | 255.255.255.255/32 | + | Name | Limited Broadcast | + | RFC | [RFC0919], Section 7 | + | Allocation Date | October 1984 | + | Termination Date | N/A | + | Source | False | + | Destination | True | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+----------------------+*/ + MustIPv4Addr("255.255.255.255/32"), + + /*+----------------------+------------------+ + | Attribute | Value | + +----------------------+------------------+ + | Address Block | ::1/128 | + | Name | Loopback Address | + | RFC | [RFC4291] | + | Allocation Date | February 2006 | + | Termination Date | N/A | + | Source | False | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | True | + +----------------------+------------------+*/ + MustIPv6Addr("::1/128"), + + /*+----------------------+---------------------+ + | Attribute | Value | + +----------------------+---------------------+ + | Address Block | ::/128 | + | Name | Unspecified Address | + | RFC | [RFC4291] | + | Allocation Date | February 2006 | + | Termination Date | N/A | + | Source | True | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | True | + +----------------------+---------------------+*/ + MustIPv6Addr("::/128"), + + /*+----------------------+---------------------+ + | Attribute | Value | + +----------------------+---------------------+ + | Address Block | 64:ff9b::/96 | + | Name | IPv4-IPv6 Translat. | + | RFC | [RFC6052] | + | Allocation Date | October 2010 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | True | + | Reserved-by-Protocol | False | + +----------------------+---------------------+*/ + MustIPv6Addr("64:ff9b::/96"), + + /*+----------------------+---------------------+ + | Attribute | Value | + +----------------------+---------------------+ + | Address Block | ::ffff:0:0/96 | + | Name | IPv4-mapped Address | + | RFC | [RFC4291] | + | Allocation Date | February 2006 | + | Termination Date | N/A | + | Source | False | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | True | + +----------------------+---------------------+*/ + MustIPv6Addr("::ffff:0:0/96"), + + /*+----------------------+----------------------------+ + | Attribute | Value | + +----------------------+----------------------------+ + | Address Block | 100::/64 | + | Name | Discard-Only Address Block | + | RFC | [RFC6666] | + | Allocation Date | June 2012 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+----------------------------+*/ + MustIPv6Addr("100::/64"), + + /*+----------------------+---------------------------+ + | Attribute | Value | + +----------------------+---------------------------+ + | Address Block | 2001::/23 | + | Name | IETF Protocol Assignments | + | RFC | [RFC2928] | + | Allocation Date | September 2000 | + | Termination Date | N/A | + | Source | False[1] | + | Destination | False[1] | + | Forwardable | False[1] | + | Global | False[1] | + | Reserved-by-Protocol | False | + +----------------------+---------------------------+*/ + // [1] Unless allowed by a more specific allocation. + MustIPv6Addr("2001::/16"), + + /*+----------------------+----------------+ + | Attribute | Value | + +----------------------+----------------+ + | Address Block | 2001::/32 | + | Name | TEREDO | + | RFC | [RFC4380] | + | Allocation Date | January 2006 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+----------------+*/ + // Covered by previous entry, included for completeness. + // + // MustIPv6Addr("2001::/16"), + + /*+----------------------+----------------+ + | Attribute | Value | + +----------------------+----------------+ + | Address Block | 2001:2::/48 | + | Name | Benchmarking | + | RFC | [RFC5180] | + | Allocation Date | April 2008 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+----------------+*/ + // Covered by previous entry, included for completeness. + // + // MustIPv6Addr("2001:2::/48"), + + /*+----------------------+---------------+ + | Attribute | Value | + +----------------------+---------------+ + | Address Block | 2001:db8::/32 | + | Name | Documentation | + | RFC | [RFC3849] | + | Allocation Date | July 2004 | + | Termination Date | N/A | + | Source | False | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+---------------+*/ + // Covered by previous entry, included for completeness. + // + // MustIPv6Addr("2001:db8::/32"), + + /*+----------------------+--------------+ + | Attribute | Value | + +----------------------+--------------+ + | Address Block | 2001:10::/28 | + | Name | ORCHID | + | RFC | [RFC4843] | + | Allocation Date | March 2007 | + | Termination Date | March 2014 | + | Source | False | + | Destination | False | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+--------------+*/ + // Covered by previous entry, included for completeness. + // + // MustIPv6Addr("2001:10::/28"), + + /*+----------------------+---------------+ + | Attribute | Value | + +----------------------+---------------+ + | Address Block | 2002::/16 [2] | + | Name | 6to4 | + | RFC | [RFC3056] | + | Allocation Date | February 2001 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | N/A [2] | + | Reserved-by-Protocol | False | + +----------------------+---------------+*/ + // [2] See [RFC3056] for details. + MustIPv6Addr("2002::/16"), + + /*+----------------------+--------------+ + | Attribute | Value | + +----------------------+--------------+ + | Address Block | fc00::/7 | + | Name | Unique-Local | + | RFC | [RFC4193] | + | Allocation Date | October 2005 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | True | + | Global | False | + | Reserved-by-Protocol | False | + +----------------------+--------------+*/ + MustIPv6Addr("fc00::/7"), + + /*+----------------------+-----------------------+ + | Attribute | Value | + +----------------------+-----------------------+ + | Address Block | fe80::/10 | + | Name | Linked-Scoped Unicast | + | RFC | [RFC4291] | + | Allocation Date | February 2006 | + | Termination Date | N/A | + | Source | True | + | Destination | True | + | Forwardable | False | + | Global | False | + | Reserved-by-Protocol | True | + +----------------------+-----------------------+*/ + MustIPv6Addr("fe80::/10"), + }, + 7335: { + // [RFC7335] IPv4 Service Continuity Prefix + MustIPv4Addr("192.0.0.0/29"), // [RFC7335], §6 IANA Considerations + }, + ForwardingBlacklist: { // Pseudo-RFC + // Blacklist of non-forwardable IP blocks taken from RFC6890 + // + // TODO: the attributes for forwardable should be + // searcahble and embedded in the main list of RFCs + // above. + MustIPv4Addr("0.0.0.0/8"), + MustIPv4Addr("127.0.0.0/8"), + MustIPv4Addr("169.254.0.0/16"), + MustIPv4Addr("192.0.0.0/24"), + MustIPv4Addr("192.0.2.0/24"), + MustIPv4Addr("198.51.100.0/24"), + MustIPv4Addr("203.0.113.0/24"), + MustIPv4Addr("240.0.0.0/4"), + MustIPv4Addr("255.255.255.255/32"), + MustIPv6Addr("::1/128"), + MustIPv6Addr("::/128"), + MustIPv6Addr("::ffff:0:0/96"), + + // There is no way of expressing a whitelist per RFC2928 + // atm without creating a negative mask, which I don't + // want to do atm. + //MustIPv6Addr("2001::/23"), + + MustIPv6Addr("2001:db8::/32"), + MustIPv6Addr("2001:10::/28"), + MustIPv6Addr("fe80::/10"), + }, + } +} + +// VisitAllRFCs iterates over all known RFCs and calls the visitor +func VisitAllRFCs(fn func(rfcNum uint, sockaddrs SockAddrs)) { + rfcNetMap := KnownRFCs() + + // Blacklist of faux-RFCs. Don't show the world that we're abusing the + // RFC system in this library. + rfcBlacklist := map[uint]struct{}{ + ForwardingBlacklist: {}, + } + + for rfcNum, sas := range rfcNetMap { + if _, found := rfcBlacklist[rfcNum]; !found { + fn(rfcNum, sas) + } + } +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info.go b/vendor/github.com/hashicorp/go-sockaddr/route_info.go new file mode 100644 index 0000000000..2a3ee1db9e --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/route_info.go @@ -0,0 +1,19 @@ +package sockaddr + +// RouteInterface specifies an interface for obtaining memoized route table and +// network information from a given OS. +type RouteInterface interface { + // GetDefaultInterfaceName returns the name of the interface that has a + // default route or an error and an empty string if a problem was + // encountered. + GetDefaultInterfaceName() (string, error) +} + +// VisitCommands visits each command used by the platform-specific RouteInfo +// implementation. +func (ri routeInfo) VisitCommands(fn func(name string, cmd []string)) { + for k, v := range ri.cmds { + cmds := append([]string(nil), v...) + fn(k, cmds) + } +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_bsd.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_bsd.go new file mode 100644 index 0000000000..705757abc7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/route_info_bsd.go @@ -0,0 +1,36 @@ +// +build darwin dragonfly freebsd netbsd openbsd + +package sockaddr + +import "os/exec" + +var cmds map[string][]string = map[string][]string{ + "route": {"/sbin/route", "-n", "get", "default"}, +} + +type routeInfo struct { + cmds map[string][]string +} + +// NewRouteInfo returns a BSD-specific implementation of the RouteInfo +// interface. +func NewRouteInfo() (routeInfo, error) { + return routeInfo{ + cmds: cmds, + }, nil +} + +// GetDefaultInterfaceName returns the interface name attached to the default +// route on the default interface. +func (ri routeInfo) GetDefaultInterfaceName() (string, error) { + out, err := exec.Command(cmds["route"][0], cmds["route"][1:]...).Output() + if err != nil { + return "", err + } + + var ifName string + if ifName, err = parseDefaultIfNameFromRoute(string(out)); err != nil { + return "", err + } + return ifName, nil +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_default.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_default.go new file mode 100644 index 0000000000..d1b009f653 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/route_info_default.go @@ -0,0 +1,10 @@ +// +build android nacl plan9 + +package sockaddr + +import "errors" + +// getDefaultIfName is the default interface function for unsupported platforms. +func getDefaultIfName() (string, error) { + return "", errors.New("No default interface found (unsupported platform)") +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_linux.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_linux.go new file mode 100644 index 0000000000..c2ec91eaf4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/route_info_linux.go @@ -0,0 +1,40 @@ +package sockaddr + +import ( + "errors" + "os/exec" +) + +type routeInfo struct { + cmds map[string][]string +} + +// NewRouteInfo returns a Linux-specific implementation of the RouteInfo +// interface. +func NewRouteInfo() (routeInfo, error) { + // CoreOS Container Linux moved ip to /usr/bin/ip, so look it up on + // $PATH and fallback to /sbin/ip on error. + path, _ := exec.LookPath("ip") + if path == "" { + path = "/sbin/ip" + } + + return routeInfo{ + cmds: map[string][]string{"ip": {path, "route"}}, + }, nil +} + +// GetDefaultInterfaceName returns the interface name attached to the default +// route on the default interface. +func (ri routeInfo) GetDefaultInterfaceName() (string, error) { + out, err := exec.Command(ri.cmds["ip"][0], ri.cmds["ip"][1:]...).Output() + if err != nil { + return "", err + } + + var ifName string + if ifName, err = parseDefaultIfNameFromIPCmd(string(out)); err != nil { + return "", errors.New("No default interface found") + } + return ifName, nil +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_solaris.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_solaris.go new file mode 100644 index 0000000000..ee8e7984d7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/route_info_solaris.go @@ -0,0 +1,37 @@ +package sockaddr + +import ( + "errors" + "os/exec" +) + +var cmds map[string][]string = map[string][]string{ + "route": {"/usr/sbin/route", "-n", "get", "default"}, +} + +type routeInfo struct { + cmds map[string][]string +} + +// NewRouteInfo returns a BSD-specific implementation of the RouteInfo +// interface. +func NewRouteInfo() (routeInfo, error) { + return routeInfo{ + cmds: cmds, + }, nil +} + +// GetDefaultInterfaceName returns the interface name attached to the default +// route on the default interface. +func (ri routeInfo) GetDefaultInterfaceName() (string, error) { + out, err := exec.Command(cmds["route"][0], cmds["route"][1:]...).Output() + if err != nil { + return "", err + } + + var ifName string + if ifName, err = parseDefaultIfNameFromRoute(string(out)); err != nil { + return "", errors.New("No default interface found") + } + return ifName, nil +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_windows.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_windows.go new file mode 100644 index 0000000000..3da972883e --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/route_info_windows.go @@ -0,0 +1,41 @@ +package sockaddr + +import "os/exec" + +var cmds map[string][]string = map[string][]string{ + "netstat": {"netstat", "-rn"}, + "ipconfig": {"ipconfig"}, +} + +type routeInfo struct { + cmds map[string][]string +} + +// NewRouteInfo returns a BSD-specific implementation of the RouteInfo +// interface. +func NewRouteInfo() (routeInfo, error) { + return routeInfo{ + cmds: cmds, + }, nil +} + +// GetDefaultInterfaceName returns the interface name attached to the default +// route on the default interface. +func (ri routeInfo) GetDefaultInterfaceName() (string, error) { + ifNameOut, err := exec.Command(cmds["netstat"][0], cmds["netstat"][1:]...).Output() + if err != nil { + return "", err + } + + ipconfigOut, err := exec.Command(cmds["ipconfig"][0], cmds["ipconfig"][1:]...).Output() + if err != nil { + return "", err + } + + ifName, err := parseDefaultIfNameWindows(string(ifNameOut), string(ipconfigOut)) + if err != nil { + return "", err + } + + return ifName, nil +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/sockaddr.go b/vendor/github.com/hashicorp/go-sockaddr/sockaddr.go new file mode 100644 index 0000000000..826c91c2e3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/sockaddr.go @@ -0,0 +1,206 @@ +package sockaddr + +import ( + "encoding/json" + "fmt" + "strings" +) + +type SockAddrType int +type AttrName string + +const ( + TypeUnknown SockAddrType = 0x0 + TypeUnix = 0x1 + TypeIPv4 = 0x2 + TypeIPv6 = 0x4 + + // TypeIP is the union of TypeIPv4 and TypeIPv6 + TypeIP = 0x6 +) + +type SockAddr interface { + // CmpRFC returns 0 if SockAddr exactly matches one of the matched RFC + // networks, -1 if the receiver is contained within the RFC network, or + // 1 if the address is not contained within the RFC. + CmpRFC(rfcNum uint, sa SockAddr) int + + // Contains returns true if the SockAddr arg is contained within the + // receiver + Contains(SockAddr) bool + + // Equal allows for the comparison of two SockAddrs + Equal(SockAddr) bool + + DialPacketArgs() (string, string) + DialStreamArgs() (string, string) + ListenPacketArgs() (string, string) + ListenStreamArgs() (string, string) + + // String returns the string representation of SockAddr + String() string + + // Type returns the SockAddrType + Type() SockAddrType +} + +// sockAddrAttrMap is a map of the SockAddr type-specific attributes. +var sockAddrAttrMap map[AttrName]func(SockAddr) string +var sockAddrAttrs []AttrName + +func init() { + sockAddrInit() +} + +// New creates a new SockAddr from the string. The order in which New() +// attempts to construct a SockAddr is: IPv4Addr, IPv6Addr, SockAddrUnix. +// +// NOTE: New() relies on the heuristic wherein if the path begins with either a +// '.' or '/' character before creating a new UnixSock. For UNIX sockets that +// are absolute paths or are nested within a sub-directory, this works as +// expected, however if the UNIX socket is contained in the current working +// directory, this will fail unless the path begins with "./" +// (e.g. "./my-local-socket"). Calls directly to NewUnixSock() do not suffer +// this limitation. Invalid IP addresses such as "256.0.0.0/-1" will run afoul +// of this heuristic and be assumed to be a valid UNIX socket path (which they +// are, but it is probably not what you want and you won't realize it until you +// stat(2) the file system to discover it doesn't exist). +func NewSockAddr(s string) (SockAddr, error) { + ipv4Addr, err := NewIPv4Addr(s) + if err == nil { + return ipv4Addr, nil + } + + ipv6Addr, err := NewIPv6Addr(s) + if err == nil { + return ipv6Addr, nil + } + + // Check to make sure the string begins with either a '.' or '/', or + // contains a '/'. + if len(s) > 1 && (strings.IndexAny(s[0:1], "./") != -1 || strings.IndexByte(s, '/') != -1) { + unixSock, err := NewUnixSock(s) + if err == nil { + return unixSock, nil + } + } + + return nil, fmt.Errorf("Unable to convert %q to an IPv4 or IPv6 address, or a UNIX Socket", s) +} + +// ToIPAddr returns an IPAddr type or nil if the type conversion fails. +func ToIPAddr(sa SockAddr) *IPAddr { + ipa, ok := sa.(IPAddr) + if !ok { + return nil + } + return &ipa +} + +// ToIPv4Addr returns an IPv4Addr type or nil if the type conversion fails. +func ToIPv4Addr(sa SockAddr) *IPv4Addr { + switch v := sa.(type) { + case IPv4Addr: + return &v + default: + return nil + } +} + +// ToIPv6Addr returns an IPv6Addr type or nil if the type conversion fails. +func ToIPv6Addr(sa SockAddr) *IPv6Addr { + switch v := sa.(type) { + case IPv6Addr: + return &v + default: + return nil + } +} + +// ToUnixSock returns a UnixSock type or nil if the type conversion fails. +func ToUnixSock(sa SockAddr) *UnixSock { + switch v := sa.(type) { + case UnixSock: + return &v + default: + return nil + } +} + +// SockAddrAttr returns a string representation of an attribute for the given +// SockAddr. +func SockAddrAttr(sa SockAddr, selector AttrName) string { + fn, found := sockAddrAttrMap[selector] + if !found { + return "" + } + + return fn(sa) +} + +// String() for SockAddrType returns a string representation of the +// SockAddrType (e.g. "IPv4", "IPv6", "UNIX", "IP", or "unknown"). +func (sat SockAddrType) String() string { + switch sat { + case TypeIPv4: + return "IPv4" + case TypeIPv6: + return "IPv6" + // There is no concrete "IP" type. Leaving here as a reminder. + // case TypeIP: + // return "IP" + case TypeUnix: + return "UNIX" + default: + panic("unsupported type") + } +} + +// sockAddrInit is called once at init() +func sockAddrInit() { + sockAddrAttrs = []AttrName{ + "type", // type should be first + "string", + } + + sockAddrAttrMap = map[AttrName]func(sa SockAddr) string{ + "string": func(sa SockAddr) string { + return sa.String() + }, + "type": func(sa SockAddr) string { + return sa.Type().String() + }, + } +} + +// UnixSockAttrs returns a list of attributes supported by the UnixSock type +func SockAddrAttrs() []AttrName { + return sockAddrAttrs +} + +// Although this is pretty trivial to do in a program, having the logic here is +// useful all around. Note that this marshals into a *string* -- the underlying +// string representation of the sockaddr. If you then unmarshal into this type +// in Go, all will work as expected, but externally you can take what comes out +// and use the string value directly. +type SockAddrMarshaler struct { + SockAddr +} + +func (s *SockAddrMarshaler) MarshalJSON() ([]byte, error) { + return json.Marshal(s.SockAddr.String()) +} + +func (s *SockAddrMarshaler) UnmarshalJSON(in []byte) error { + var str string + err := json.Unmarshal(in, &str) + if err != nil { + return err + } + sa, err := NewSockAddr(str) + if err != nil { + return err + } + s.SockAddr = sa + return nil +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/sockaddrs.go b/vendor/github.com/hashicorp/go-sockaddr/sockaddrs.go new file mode 100644 index 0000000000..75fbffb1ea --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/sockaddrs.go @@ -0,0 +1,193 @@ +package sockaddr + +import ( + "bytes" + "sort" +) + +// SockAddrs is a slice of SockAddrs +type SockAddrs []SockAddr + +func (s SockAddrs) Len() int { return len(s) } +func (s SockAddrs) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// CmpAddrFunc is the function signature that must be met to be used in the +// OrderedAddrBy multiAddrSorter +type CmpAddrFunc func(p1, p2 *SockAddr) int + +// multiAddrSorter implements the Sort interface, sorting the SockAddrs within. +type multiAddrSorter struct { + addrs SockAddrs + cmp []CmpAddrFunc +} + +// Sort sorts the argument slice according to the Cmp functions passed to +// OrderedAddrBy. +func (ms *multiAddrSorter) Sort(sockAddrs SockAddrs) { + ms.addrs = sockAddrs + sort.Sort(ms) +} + +// OrderedAddrBy sorts SockAddr by the list of sort function pointers. +func OrderedAddrBy(cmpFuncs ...CmpAddrFunc) *multiAddrSorter { + return &multiAddrSorter{ + cmp: cmpFuncs, + } +} + +// Len is part of sort.Interface. +func (ms *multiAddrSorter) Len() int { + return len(ms.addrs) +} + +// Less is part of sort.Interface. It is implemented by looping along the +// Cmp() functions until it finds a comparison that is either less than, +// equal to, or greater than. +func (ms *multiAddrSorter) Less(i, j int) bool { + p, q := &ms.addrs[i], &ms.addrs[j] + // Try all but the last comparison. + var k int + for k = 0; k < len(ms.cmp)-1; k++ { + cmp := ms.cmp[k] + x := cmp(p, q) + switch x { + case -1: + // p < q, so we have a decision. + return true + case 1: + // p > q, so we have a decision. + return false + } + // p == q; try the next comparison. + } + // All comparisons to here said "equal", so just return whatever the + // final comparison reports. + switch ms.cmp[k](p, q) { + case -1: + return true + case 1: + return false + default: + // Still a tie! Now what? + return false + } +} + +// Swap is part of sort.Interface. +func (ms *multiAddrSorter) Swap(i, j int) { + ms.addrs[i], ms.addrs[j] = ms.addrs[j], ms.addrs[i] +} + +const ( + // NOTE (sean@): These constants are here for code readability only and + // are sprucing up the code for readability purposes. Some of the + // Cmp*() variants have confusing logic (especially when dealing with + // mixed-type comparisons) and this, I think, has made it easier to grok + // the code faster. + sortReceiverBeforeArg = -1 + sortDeferDecision = 0 + sortArgBeforeReceiver = 1 +) + +// AscAddress is a sorting function to sort SockAddrs by their respective +// address type. Non-equal types are deferred in the sort. +func AscAddress(p1Ptr, p2Ptr *SockAddr) int { + p1 := *p1Ptr + p2 := *p2Ptr + + switch v := p1.(type) { + case IPv4Addr: + return v.CmpAddress(p2) + case IPv6Addr: + return v.CmpAddress(p2) + case UnixSock: + return v.CmpAddress(p2) + default: + return sortDeferDecision + } +} + +// AscPort is a sorting function to sort SockAddrs by their respective address +// type. Non-equal types are deferred in the sort. +func AscPort(p1Ptr, p2Ptr *SockAddr) int { + p1 := *p1Ptr + p2 := *p2Ptr + + switch v := p1.(type) { + case IPv4Addr: + return v.CmpPort(p2) + case IPv6Addr: + return v.CmpPort(p2) + default: + return sortDeferDecision + } +} + +// AscPrivate is a sorting function to sort "more secure" private values before +// "more public" values. Both IPv4 and IPv6 are compared against RFC6890 +// (RFC6890 includes, and is not limited to, RFC1918 and RFC6598 for IPv4, and +// IPv6 includes RFC4193). +func AscPrivate(p1Ptr, p2Ptr *SockAddr) int { + p1 := *p1Ptr + p2 := *p2Ptr + + switch v := p1.(type) { + case IPv4Addr, IPv6Addr: + return v.CmpRFC(6890, p2) + default: + return sortDeferDecision + } +} + +// AscNetworkSize is a sorting function to sort SockAddrs based on their network +// size. Non-equal types are deferred in the sort. +func AscNetworkSize(p1Ptr, p2Ptr *SockAddr) int { + p1 := *p1Ptr + p2 := *p2Ptr + p1Type := p1.Type() + p2Type := p2.Type() + + // Network size operations on non-IP types make no sense + if p1Type != p2Type && p1Type != TypeIP { + return sortDeferDecision + } + + ipA := p1.(IPAddr) + ipB := p2.(IPAddr) + + return bytes.Compare([]byte(*ipA.NetIPMask()), []byte(*ipB.NetIPMask())) +} + +// AscType is a sorting function to sort "more secure" types before +// "less-secure" types. +func AscType(p1Ptr, p2Ptr *SockAddr) int { + p1 := *p1Ptr + p2 := *p2Ptr + p1Type := p1.Type() + p2Type := p2.Type() + switch { + case p1Type < p2Type: + return sortReceiverBeforeArg + case p1Type == p2Type: + return sortDeferDecision + case p1Type > p2Type: + return sortArgBeforeReceiver + default: + return sortDeferDecision + } +} + +// FilterByType returns two lists: a list of matched and unmatched SockAddrs +func (sas SockAddrs) FilterByType(type_ SockAddrType) (matched, excluded SockAddrs) { + matched = make(SockAddrs, 0, len(sas)) + excluded = make(SockAddrs, 0, len(sas)) + + for _, sa := range sas { + if sa.Type()&type_ != 0 { + matched = append(matched, sa) + } else { + excluded = append(excluded, sa) + } + } + return matched, excluded +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/unixsock.go b/vendor/github.com/hashicorp/go-sockaddr/unixsock.go new file mode 100644 index 0000000000..f3be3f67e7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/unixsock.go @@ -0,0 +1,135 @@ +package sockaddr + +import ( + "fmt" + "strings" +) + +type UnixSock struct { + SockAddr + path string +} +type UnixSocks []*UnixSock + +// unixAttrMap is a map of the UnixSockAddr type-specific attributes. +var unixAttrMap map[AttrName]func(UnixSock) string +var unixAttrs []AttrName + +func init() { + unixAttrInit() +} + +// NewUnixSock creates an UnixSock from a string path. String can be in the +// form of either URI-based string (e.g. `file:///etc/passwd`), an absolute +// path (e.g. `/etc/passwd`), or a relative path (e.g. `./foo`). +func NewUnixSock(s string) (ret UnixSock, err error) { + ret.path = s + return ret, nil +} + +// CmpAddress follows the Cmp() standard protocol and returns: +// +// - -1 If the receiver should sort first because its name lexically sorts before arg +// - 0 if the SockAddr arg is not a UnixSock, or is a UnixSock with the same path. +// - 1 If the argument should sort first. +func (us UnixSock) CmpAddress(sa SockAddr) int { + usb, ok := sa.(UnixSock) + if !ok { + return sortDeferDecision + } + + return strings.Compare(us.Path(), usb.Path()) +} + +// DialPacketArgs returns the arguments required to be passed to net.DialUnix() +// with the `unixgram` network type. +func (us UnixSock) DialPacketArgs() (network, dialArgs string) { + return "unixgram", us.path +} + +// DialStreamArgs returns the arguments required to be passed to net.DialUnix() +// with the `unix` network type. +func (us UnixSock) DialStreamArgs() (network, dialArgs string) { + return "unix", us.path +} + +// Equal returns true if a SockAddr is equal to the receiving UnixSock. +func (us UnixSock) Equal(sa SockAddr) bool { + usb, ok := sa.(UnixSock) + if !ok { + return false + } + + if us.Path() != usb.Path() { + return false + } + + return true +} + +// ListenPacketArgs returns the arguments required to be passed to +// net.ListenUnixgram() with the `unixgram` network type. +func (us UnixSock) ListenPacketArgs() (network, dialArgs string) { + return "unixgram", us.path +} + +// ListenStreamArgs returns the arguments required to be passed to +// net.ListenUnix() with the `unix` network type. +func (us UnixSock) ListenStreamArgs() (network, dialArgs string) { + return "unix", us.path +} + +// MustUnixSock is a helper method that must return an UnixSock or panic on +// invalid input. +func MustUnixSock(addr string) UnixSock { + us, err := NewUnixSock(addr) + if err != nil { + panic(fmt.Sprintf("Unable to create a UnixSock from %+q: %v", addr, err)) + } + return us +} + +// Path returns the given path of the UnixSock +func (us UnixSock) Path() string { + return us.path +} + +// String returns the path of the UnixSock +func (us UnixSock) String() string { + return fmt.Sprintf("%+q", us.path) +} + +// Type is used as a type switch and returns TypeUnix +func (UnixSock) Type() SockAddrType { + return TypeUnix +} + +// UnixSockAttrs returns a list of attributes supported by the UnixSockAddr type +func UnixSockAttrs() []AttrName { + return unixAttrs +} + +// UnixSockAttr returns a string representation of an attribute for the given +// UnixSock. +func UnixSockAttr(us UnixSock, attrName AttrName) string { + fn, found := unixAttrMap[attrName] + if !found { + return "" + } + + return fn(us) +} + +// unixAttrInit is called once at init() +func unixAttrInit() { + // Sorted for human readability + unixAttrs = []AttrName{ + "path", + } + + unixAttrMap = map[AttrName]func(us UnixSock) string{ + "path": func(us UnixSock) string { + return us.Path() + }, + } +} diff --git a/vendor/github.com/hashicorp/golang-lru/LICENSE b/vendor/github.com/hashicorp/golang-lru/LICENSE new file mode 100644 index 0000000000..be2cc4dfb6 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go new file mode 100644 index 0000000000..5673773b22 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go @@ -0,0 +1,161 @@ +package simplelru + +import ( + "container/list" + "errors" +) + +// EvictCallback is used to get a callback when a cache entry is evicted +type EvictCallback func(key interface{}, value interface{}) + +// LRU implements a non-thread safe fixed size LRU cache +type LRU struct { + size int + evictList *list.List + items map[interface{}]*list.Element + onEvict EvictCallback +} + +// entry is used to hold a value in the evictList +type entry struct { + key interface{} + value interface{} +} + +// NewLRU constructs an LRU of the given size +func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { + if size <= 0 { + return nil, errors.New("Must provide a positive size") + } + c := &LRU{ + size: size, + evictList: list.New(), + items: make(map[interface{}]*list.Element), + onEvict: onEvict, + } + return c, nil +} + +// Purge is used to completely clear the cache. +func (c *LRU) Purge() { + for k, v := range c.items { + if c.onEvict != nil { + c.onEvict(k, v.Value.(*entry).value) + } + delete(c.items, k) + } + c.evictList.Init() +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *LRU) Add(key, value interface{}) (evicted bool) { + // Check for existing item + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + ent.Value.(*entry).value = value + return false + } + + // Add new item + ent := &entry{key, value} + entry := c.evictList.PushFront(ent) + c.items[key] = entry + + evict := c.evictList.Len() > c.size + // Verify size not exceeded + if evict { + c.removeOldest() + } + return evict +} + +// Get looks up a key's value from the cache. +func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + return ent.Value.(*entry).value, true + } + return +} + +// Contains checks if a key is in the cache, without updating the recent-ness +// or deleting it for being stale. +func (c *LRU) Contains(key interface{}) (ok bool) { + _, ok = c.items[key] + return ok +} + +// Peek returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { + var ent *list.Element + if ent, ok = c.items[key]; ok { + return ent.Value.(*entry).value, true + } + return nil, ok +} + +// Remove removes the provided key from the cache, returning if the +// key was contained. +func (c *LRU) Remove(key interface{}) (present bool) { + if ent, ok := c.items[key]; ok { + c.removeElement(ent) + return true + } + return false +} + +// RemoveOldest removes the oldest item from the cache. +func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { + ent := c.evictList.Back() + if ent != nil { + c.removeElement(ent) + kv := ent.Value.(*entry) + return kv.key, kv.value, true + } + return nil, nil, false +} + +// GetOldest returns the oldest entry +func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) { + ent := c.evictList.Back() + if ent != nil { + kv := ent.Value.(*entry) + return kv.key, kv.value, true + } + return nil, nil, false +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *LRU) Keys() []interface{} { + keys := make([]interface{}, len(c.items)) + i := 0 + for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { + keys[i] = ent.Value.(*entry).key + i++ + } + return keys +} + +// Len returns the number of items in the cache. +func (c *LRU) Len() int { + return c.evictList.Len() +} + +// removeOldest removes the oldest item from the cache. +func (c *LRU) removeOldest() { + ent := c.evictList.Back() + if ent != nil { + c.removeElement(ent) + } +} + +// removeElement is used to remove a given list element from the cache +func (c *LRU) removeElement(e *list.Element) { + c.evictList.Remove(e) + kv := e.Value.(*entry) + delete(c.items, kv.key) + if c.onEvict != nil { + c.onEvict(kv.key, kv.value) + } +} diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go new file mode 100644 index 0000000000..744cac01c6 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go @@ -0,0 +1,37 @@ +package simplelru + + +// LRUCache is the interface for simple LRU cache. +type LRUCache interface { + // Adds a value to the cache, returns true if an eviction occurred and + // updates the "recently used"-ness of the key. + Add(key, value interface{}) bool + + // Returns key's value from the cache and + // updates the "recently used"-ness of the key. #value, isFound + Get(key interface{}) (value interface{}, ok bool) + + // Check if a key exsists in cache without updating the recent-ness. + Contains(key interface{}) (ok bool) + + // Returns key's value without updating the "recently used"-ness of the key. + Peek(key interface{}) (value interface{}, ok bool) + + // Removes a key from the cache. + Remove(key interface{}) bool + + // Removes the oldest entry from cache. + RemoveOldest() (interface{}, interface{}, bool) + + // Returns the oldest entry from the cache. #key, value, isFound + GetOldest() (interface{}, interface{}, bool) + + // Returns a slice of the keys in the cache, from oldest to newest. + Keys() []interface{} + + // Returns the number of items in the cache. + Len() int + + // Clear all cache entries + Purge() +} diff --git a/vendor/github.com/hashicorp/memberlist/LICENSE b/vendor/github.com/hashicorp/memberlist/LICENSE new file mode 100644 index 0000000000..c33dcc7c92 --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/memberlist/alive_delegate.go b/vendor/github.com/hashicorp/memberlist/alive_delegate.go new file mode 100644 index 0000000000..51a0ba9054 --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/alive_delegate.go @@ -0,0 +1,14 @@ +package memberlist + +// AliveDelegate is used to involve a client in processing +// a node "alive" message. When a node joins, either through +// a UDP gossip or TCP push/pull, we update the state of +// that node via an alive message. This can be used to filter +// a node out and prevent it from being considered a peer +// using application specific logic. +type AliveDelegate interface { + // NotifyMerge is invoked when a merge could take place. + // Provides a list of the nodes known by the peer. If + // the return value is non-nil, the merge is canceled. + NotifyAlive(peer *Node) error +} diff --git a/vendor/github.com/hashicorp/memberlist/awareness.go b/vendor/github.com/hashicorp/memberlist/awareness.go new file mode 100644 index 0000000000..ea95c75388 --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/awareness.go @@ -0,0 +1,69 @@ +package memberlist + +import ( + "sync" + "time" + + "github.com/armon/go-metrics" +) + +// awareness manages a simple metric for tracking the estimated health of the +// local node. Health is primary the node's ability to respond in the soft +// real-time manner required for correct health checking of other nodes in the +// cluster. +type awareness struct { + sync.RWMutex + + // max is the upper threshold for the timeout scale (the score will be + // constrained to be from 0 <= score < max). + max int + + // score is the current awareness score. Lower values are healthier and + // zero is the minimum value. + score int +} + +// newAwareness returns a new awareness object. +func newAwareness(max int) *awareness { + return &awareness{ + max: max, + score: 0, + } +} + +// ApplyDelta takes the given delta and applies it to the score in a thread-safe +// manner. It also enforces a floor of zero and a max of max, so deltas may not +// change the overall score if it's railed at one of the extremes. +func (a *awareness) ApplyDelta(delta int) { + a.Lock() + initial := a.score + a.score += delta + if a.score < 0 { + a.score = 0 + } else if a.score > (a.max - 1) { + a.score = (a.max - 1) + } + final := a.score + a.Unlock() + + if initial != final { + metrics.SetGauge([]string{"memberlist", "health", "score"}, float32(final)) + } +} + +// GetHealthScore returns the raw health score. +func (a *awareness) GetHealthScore() int { + a.RLock() + score := a.score + a.RUnlock() + return score +} + +// ScaleTimeout takes the given duration and scales it based on the current +// score. Less healthyness will lead to longer timeouts. +func (a *awareness) ScaleTimeout(timeout time.Duration) time.Duration { + a.RLock() + score := a.score + a.RUnlock() + return timeout * (time.Duration(score) + 1) +} diff --git a/vendor/github.com/hashicorp/memberlist/broadcast.go b/vendor/github.com/hashicorp/memberlist/broadcast.go new file mode 100644 index 0000000000..f7e85a119c --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/broadcast.go @@ -0,0 +1,100 @@ +package memberlist + +/* +The broadcast mechanism works by maintaining a sorted list of messages to be +sent out. When a message is to be broadcast, the retransmit count +is set to zero and appended to the queue. The retransmit count serves +as the "priority", ensuring that newer messages get sent first. Once +a message hits the retransmit limit, it is removed from the queue. + +Additionally, older entries can be invalidated by new messages that +are contradictory. For example, if we send "{suspect M1 inc: 1}, +then a following {alive M1 inc: 2} will invalidate that message +*/ + +type memberlistBroadcast struct { + node string + msg []byte + notify chan struct{} +} + +func (b *memberlistBroadcast) Invalidates(other Broadcast) bool { + // Check if that broadcast is a memberlist type + mb, ok := other.(*memberlistBroadcast) + if !ok { + return false + } + + // Invalidates any message about the same node + return b.node == mb.node +} + +func (b *memberlistBroadcast) Message() []byte { + return b.msg +} + +func (b *memberlistBroadcast) Finished() { + select { + case b.notify <- struct{}{}: + default: + } +} + +// encodeAndBroadcast encodes a message and enqueues it for broadcast. Fails +// silently if there is an encoding error. +func (m *Memberlist) encodeAndBroadcast(node string, msgType messageType, msg interface{}) { + m.encodeBroadcastNotify(node, msgType, msg, nil) +} + +// encodeBroadcastNotify encodes a message and enqueues it for broadcast +// and notifies the given channel when transmission is finished. Fails +// silently if there is an encoding error. +func (m *Memberlist) encodeBroadcastNotify(node string, msgType messageType, msg interface{}, notify chan struct{}) { + buf, err := encode(msgType, msg) + if err != nil { + m.logger.Printf("[ERR] memberlist: Failed to encode message for broadcast: %s", err) + } else { + m.queueBroadcast(node, buf.Bytes(), notify) + } +} + +// queueBroadcast is used to start dissemination of a message. It will be +// sent up to a configured number of times. The message could potentially +// be invalidated by a future message about the same node +func (m *Memberlist) queueBroadcast(node string, msg []byte, notify chan struct{}) { + b := &memberlistBroadcast{node, msg, notify} + m.broadcasts.QueueBroadcast(b) +} + +// getBroadcasts is used to return a slice of broadcasts to send up to +// a maximum byte size, while imposing a per-broadcast overhead. This is used +// to fill a UDP packet with piggybacked data +func (m *Memberlist) getBroadcasts(overhead, limit int) [][]byte { + // Get memberlist messages first + toSend := m.broadcasts.GetBroadcasts(overhead, limit) + + // Check if the user has anything to broadcast + d := m.config.Delegate + if d != nil { + // Determine the bytes used already + bytesUsed := 0 + for _, msg := range toSend { + bytesUsed += len(msg) + overhead + } + + // Check space remaining for user messages + avail := limit - bytesUsed + if avail > overhead+userMsgOverhead { + userMsgs := d.GetBroadcasts(overhead+userMsgOverhead, avail) + + // Frame each user message + for _, msg := range userMsgs { + buf := make([]byte, 1, len(msg)+1) + buf[0] = byte(userMsg) + buf = append(buf, msg...) + toSend = append(toSend, buf) + } + } + } + return toSend +} diff --git a/vendor/github.com/hashicorp/memberlist/config.go b/vendor/github.com/hashicorp/memberlist/config.go new file mode 100644 index 0000000000..2f43d14cb1 --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/config.go @@ -0,0 +1,288 @@ +package memberlist + +import ( + "io" + "log" + "os" + "time" +) + +type Config struct { + // The name of this node. This must be unique in the cluster. + Name string + + // Transport is a hook for providing custom code to communicate with + // other nodes. If this is left nil, then memberlist will by default + // make a NetTransport using BindAddr and BindPort from this structure. + Transport Transport + + // Configuration related to what address to bind to and ports to + // listen on. The port is used for both UDP and TCP gossip. It is + // assumed other nodes are running on this port, but they do not need + // to. + BindAddr string + BindPort int + + // Configuration related to what address to advertise to other + // cluster members. Used for nat traversal. + AdvertiseAddr string + AdvertisePort int + + // ProtocolVersion is the configured protocol version that we + // will _speak_. This must be between ProtocolVersionMin and + // ProtocolVersionMax. + ProtocolVersion uint8 + + // TCPTimeout is the timeout for establishing a stream connection with + // a remote node for a full state sync, and for stream read and write + // operations. This is a legacy name for backwards compatibility, but + // should really be called StreamTimeout now that we have generalized + // the transport. + TCPTimeout time.Duration + + // IndirectChecks is the number of nodes that will be asked to perform + // an indirect probe of a node in the case a direct probe fails. Memberlist + // waits for an ack from any single indirect node, so increasing this + // number will increase the likelihood that an indirect probe will succeed + // at the expense of bandwidth. + IndirectChecks int + + // RetransmitMult is the multiplier for the number of retransmissions + // that are attempted for messages broadcasted over gossip. The actual + // count of retransmissions is calculated using the formula: + // + // Retransmits = RetransmitMult * log(N+1) + // + // This allows the retransmits to scale properly with cluster size. The + // higher the multiplier, the more likely a failed broadcast is to converge + // at the expense of increased bandwidth. + RetransmitMult int + + // SuspicionMult is the multiplier for determining the time an + // inaccessible node is considered suspect before declaring it dead. + // The actual timeout is calculated using the formula: + // + // SuspicionTimeout = SuspicionMult * log(N+1) * ProbeInterval + // + // This allows the timeout to scale properly with expected propagation + // delay with a larger cluster size. The higher the multiplier, the longer + // an inaccessible node is considered part of the cluster before declaring + // it dead, giving that suspect node more time to refute if it is indeed + // still alive. + SuspicionMult int + + // SuspicionMaxTimeoutMult is the multiplier applied to the + // SuspicionTimeout used as an upper bound on detection time. This max + // timeout is calculated using the formula: + // + // SuspicionMaxTimeout = SuspicionMaxTimeoutMult * SuspicionTimeout + // + // If everything is working properly, confirmations from other nodes will + // accelerate suspicion timers in a manner which will cause the timeout + // to reach the base SuspicionTimeout before that elapses, so this value + // will typically only come into play if a node is experiencing issues + // communicating with other nodes. It should be set to a something fairly + // large so that a node having problems will have a lot of chances to + // recover before falsely declaring other nodes as failed, but short + // enough for a legitimately isolated node to still make progress marking + // nodes failed in a reasonable amount of time. + SuspicionMaxTimeoutMult int + + // PushPullInterval is the interval between complete state syncs. + // Complete state syncs are done with a single node over TCP and are + // quite expensive relative to standard gossiped messages. Setting this + // to zero will disable state push/pull syncs completely. + // + // Setting this interval lower (more frequent) will increase convergence + // speeds across larger clusters at the expense of increased bandwidth + // usage. + PushPullInterval time.Duration + + // ProbeInterval and ProbeTimeout are used to configure probing + // behavior for memberlist. + // + // ProbeInterval is the interval between random node probes. Setting + // this lower (more frequent) will cause the memberlist cluster to detect + // failed nodes more quickly at the expense of increased bandwidth usage. + // + // ProbeTimeout is the timeout to wait for an ack from a probed node + // before assuming it is unhealthy. This should be set to 99-percentile + // of RTT (round-trip time) on your network. + ProbeInterval time.Duration + ProbeTimeout time.Duration + + // DisableTcpPings will turn off the fallback TCP pings that are attempted + // if the direct UDP ping fails. These get pipelined along with the + // indirect UDP pings. + DisableTcpPings bool + + // AwarenessMaxMultiplier will increase the probe interval if the node + // becomes aware that it might be degraded and not meeting the soft real + // time requirements to reliably probe other nodes. + AwarenessMaxMultiplier int + + // GossipInterval and GossipNodes are used to configure the gossip + // behavior of memberlist. + // + // GossipInterval is the interval between sending messages that need + // to be gossiped that haven't been able to piggyback on probing messages. + // If this is set to zero, non-piggyback gossip is disabled. By lowering + // this value (more frequent) gossip messages are propagated across + // the cluster more quickly at the expense of increased bandwidth. + // + // GossipNodes is the number of random nodes to send gossip messages to + // per GossipInterval. Increasing this number causes the gossip messages + // to propagate across the cluster more quickly at the expense of + // increased bandwidth. + // + // GossipToTheDeadTime is the interval after which a node has died that + // we will still try to gossip to it. This gives it a chance to refute. + GossipInterval time.Duration + GossipNodes int + GossipToTheDeadTime time.Duration + + // EnableCompression is used to control message compression. This can + // be used to reduce bandwidth usage at the cost of slightly more CPU + // utilization. This is only available starting at protocol version 1. + EnableCompression bool + + // SecretKey is used to initialize the primary encryption key in a keyring. + // The primary encryption key is the only key used to encrypt messages and + // the first key used while attempting to decrypt messages. Providing a + // value for this primary key will enable message-level encryption and + // verification, and automatically install the key onto the keyring. + // The value should be either 16, 24, or 32 bytes to select AES-128, + // AES-192, or AES-256. + SecretKey []byte + + // The keyring holds all of the encryption keys used internally. It is + // automatically initialized using the SecretKey and SecretKeys values. + Keyring *Keyring + + // Delegate and Events are delegates for receiving and providing + // data to memberlist via callback mechanisms. For Delegate, see + // the Delegate interface. For Events, see the EventDelegate interface. + // + // The DelegateProtocolMin/Max are used to guarantee protocol-compatibility + // for any custom messages that the delegate might do (broadcasts, + // local/remote state, etc.). If you don't set these, then the protocol + // versions will just be zero, and version compliance won't be done. + Delegate Delegate + DelegateProtocolVersion uint8 + DelegateProtocolMin uint8 + DelegateProtocolMax uint8 + Events EventDelegate + Conflict ConflictDelegate + Merge MergeDelegate + Ping PingDelegate + Alive AliveDelegate + + // DNSConfigPath points to the system's DNS config file, usually located + // at /etc/resolv.conf. It can be overridden via config for easier testing. + DNSConfigPath string + + // LogOutput is the writer where logs should be sent. If this is not + // set, logging will go to stderr by default. You cannot specify both LogOutput + // and Logger at the same time. + LogOutput io.Writer + + // Logger is a custom logger which you provide. If Logger is set, it will use + // this for the internal logger. If Logger is not set, it will fall back to the + // behavior for using LogOutput. You cannot specify both LogOutput and Logger + // at the same time. + Logger *log.Logger + + // Size of Memberlist's internal channel which handles UDP messages. The + // size of this determines the size of the queue which Memberlist will keep + // while UDP messages are handled. + HandoffQueueDepth int + + // Maximum number of bytes that memberlist will put in a packet (this + // will be for UDP packets by default with a NetTransport). A safe value + // for this is typically 1400 bytes (which is the default). However, + // depending on your network's MTU (Maximum Transmission Unit) you may + // be able to increase this to get more content into each gossip packet. + // This is a legacy name for backward compatibility but should really be + // called PacketBufferSize now that we have generalized the transport. + UDPBufferSize int +} + +// DefaultLANConfig returns a sane set of configurations for Memberlist. +// It uses the hostname as the node name, and otherwise sets very conservative +// values that are sane for most LAN environments. The default configuration +// errs on the side of caution, choosing values that are optimized +// for higher convergence at the cost of higher bandwidth usage. Regardless, +// these values are a good starting point when getting started with memberlist. +func DefaultLANConfig() *Config { + hostname, _ := os.Hostname() + return &Config{ + Name: hostname, + BindAddr: "0.0.0.0", + BindPort: 7946, + AdvertiseAddr: "", + AdvertisePort: 7946, + ProtocolVersion: ProtocolVersion2Compatible, + TCPTimeout: 10 * time.Second, // Timeout after 10 seconds + IndirectChecks: 3, // Use 3 nodes for the indirect ping + RetransmitMult: 4, // Retransmit a message 4 * log(N+1) nodes + SuspicionMult: 5, // Suspect a node for 5 * log(N+1) * Interval + SuspicionMaxTimeoutMult: 6, // For 10k nodes this will give a max timeout of 120 seconds + PushPullInterval: 30 * time.Second, // Low frequency + ProbeTimeout: 500 * time.Millisecond, // Reasonable RTT time for LAN + ProbeInterval: 1 * time.Second, // Failure check every second + DisableTcpPings: false, // TCP pings are safe, even with mixed versions + AwarenessMaxMultiplier: 8, // Probe interval backs off to 8 seconds + + GossipNodes: 3, // Gossip to 3 nodes + GossipInterval: 200 * time.Millisecond, // Gossip more rapidly + GossipToTheDeadTime: 30 * time.Second, // Same as push/pull + + EnableCompression: true, // Enable compression by default + + SecretKey: nil, + Keyring: nil, + + DNSConfigPath: "/etc/resolv.conf", + + HandoffQueueDepth: 1024, + UDPBufferSize: 1400, + } +} + +// DefaultWANConfig works like DefaultConfig, however it returns a configuration +// that is optimized for most WAN environments. The default configuration is +// still very conservative and errs on the side of caution. +func DefaultWANConfig() *Config { + conf := DefaultLANConfig() + conf.TCPTimeout = 30 * time.Second + conf.SuspicionMult = 6 + conf.PushPullInterval = 60 * time.Second + conf.ProbeTimeout = 3 * time.Second + conf.ProbeInterval = 5 * time.Second + conf.GossipNodes = 4 // Gossip less frequently, but to an additional node + conf.GossipInterval = 500 * time.Millisecond + conf.GossipToTheDeadTime = 60 * time.Second + return conf +} + +// DefaultLocalConfig works like DefaultConfig, however it returns a configuration +// that is optimized for a local loopback environments. The default configuration is +// still very conservative and errs on the side of caution. +func DefaultLocalConfig() *Config { + conf := DefaultLANConfig() + conf.TCPTimeout = time.Second + conf.IndirectChecks = 1 + conf.RetransmitMult = 2 + conf.SuspicionMult = 3 + conf.PushPullInterval = 15 * time.Second + conf.ProbeTimeout = 200 * time.Millisecond + conf.ProbeInterval = time.Second + conf.GossipInterval = 100 * time.Millisecond + conf.GossipToTheDeadTime = 15 * time.Second + return conf +} + +// Returns whether or not encryption is enabled +func (c *Config) EncryptionEnabled() bool { + return c.Keyring != nil && len(c.Keyring.GetKeys()) > 0 +} diff --git a/vendor/github.com/hashicorp/memberlist/conflict_delegate.go b/vendor/github.com/hashicorp/memberlist/conflict_delegate.go new file mode 100644 index 0000000000..f52b136eba --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/conflict_delegate.go @@ -0,0 +1,10 @@ +package memberlist + +// ConflictDelegate is a used to inform a client that +// a node has attempted to join which would result in a +// name conflict. This happens if two clients are configured +// with the same name but different addresses. +type ConflictDelegate interface { + // NotifyConflict is invoked when a name conflict is detected + NotifyConflict(existing, other *Node) +} diff --git a/vendor/github.com/hashicorp/memberlist/delegate.go b/vendor/github.com/hashicorp/memberlist/delegate.go new file mode 100644 index 0000000000..5515488921 --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/delegate.go @@ -0,0 +1,37 @@ +package memberlist + +// Delegate is the interface that clients must implement if they want to hook +// into the gossip layer of Memberlist. All the methods must be thread-safe, +// as they can and generally will be called concurrently. +type Delegate interface { + // NodeMeta is used to retrieve meta-data about the current node + // when broadcasting an alive message. It's length is limited to + // the given byte size. This metadata is available in the Node structure. + NodeMeta(limit int) []byte + + // NotifyMsg is called when a user-data message is received. + // Care should be taken that this method does not block, since doing + // so would block the entire UDP packet receive loop. Additionally, the byte + // slice may be modified after the call returns, so it should be copied if needed + NotifyMsg([]byte) + + // GetBroadcasts is called when user data messages can be broadcast. + // It can return a list of buffers to send. Each buffer should assume an + // overhead as provided with a limit on the total byte size allowed. + // The total byte size of the resulting data to send must not exceed + // the limit. Care should be taken that this method does not block, + // since doing so would block the entire UDP packet receive loop. + GetBroadcasts(overhead, limit int) [][]byte + + // LocalState is used for a TCP Push/Pull. This is sent to + // the remote side in addition to the membership information. Any + // data can be sent here. See MergeRemoteState as well. The `join` + // boolean indicates this is for a join instead of a push/pull. + LocalState(join bool) []byte + + // MergeRemoteState is invoked after a TCP Push/Pull. This is the + // state received from the remote side and is the result of the + // remote side's LocalState call. The 'join' + // boolean indicates this is for a join instead of a push/pull. + MergeRemoteState(buf []byte, join bool) +} diff --git a/vendor/github.com/hashicorp/memberlist/event_delegate.go b/vendor/github.com/hashicorp/memberlist/event_delegate.go new file mode 100644 index 0000000000..35e2a56fdd --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/event_delegate.go @@ -0,0 +1,61 @@ +package memberlist + +// EventDelegate is a simpler delegate that is used only to receive +// notifications about members joining and leaving. The methods in this +// delegate may be called by multiple goroutines, but never concurrently. +// This allows you to reason about ordering. +type EventDelegate interface { + // NotifyJoin is invoked when a node is detected to have joined. + // The Node argument must not be modified. + NotifyJoin(*Node) + + // NotifyLeave is invoked when a node is detected to have left. + // The Node argument must not be modified. + NotifyLeave(*Node) + + // NotifyUpdate is invoked when a node is detected to have + // updated, usually involving the meta data. The Node argument + // must not be modified. + NotifyUpdate(*Node) +} + +// ChannelEventDelegate is used to enable an application to receive +// events about joins and leaves over a channel instead of a direct +// function call. +// +// Care must be taken that events are processed in a timely manner from +// the channel, since this delegate will block until an event can be sent. +type ChannelEventDelegate struct { + Ch chan<- NodeEvent +} + +// NodeEventType are the types of events that can be sent from the +// ChannelEventDelegate. +type NodeEventType int + +const ( + NodeJoin NodeEventType = iota + NodeLeave + NodeUpdate +) + +// NodeEvent is a single event related to node activity in the memberlist. +// The Node member of this struct must not be directly modified. It is passed +// as a pointer to avoid unnecessary copies. If you wish to modify the node, +// make a copy first. +type NodeEvent struct { + Event NodeEventType + Node *Node +} + +func (c *ChannelEventDelegate) NotifyJoin(n *Node) { + c.Ch <- NodeEvent{NodeJoin, n} +} + +func (c *ChannelEventDelegate) NotifyLeave(n *Node) { + c.Ch <- NodeEvent{NodeLeave, n} +} + +func (c *ChannelEventDelegate) NotifyUpdate(n *Node) { + c.Ch <- NodeEvent{NodeUpdate, n} +} diff --git a/vendor/github.com/hashicorp/memberlist/keyring.go b/vendor/github.com/hashicorp/memberlist/keyring.go new file mode 100644 index 0000000000..a2774a0ce0 --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/keyring.go @@ -0,0 +1,160 @@ +package memberlist + +import ( + "bytes" + "fmt" + "sync" +) + +type Keyring struct { + // Keys stores the key data used during encryption and decryption. It is + // ordered in such a way where the first key (index 0) is the primary key, + // which is used for encrypting messages, and is the first key tried during + // message decryption. + keys [][]byte + + // The keyring lock is used while performing IO operations on the keyring. + l sync.Mutex +} + +// Init allocates substructures +func (k *Keyring) init() { + k.keys = make([][]byte, 0) +} + +// NewKeyring constructs a new container for a set of encryption keys. The +// keyring contains all key data used internally by memberlist. +// +// While creating a new keyring, you must do one of: +// - Omit keys and primary key, effectively disabling encryption +// - Pass a set of keys plus the primary key +// - Pass only a primary key +// +// If only a primary key is passed, then it will be automatically added to the +// keyring. If creating a keyring with multiple keys, one key must be designated +// primary by passing it as the primaryKey. If the primaryKey does not exist in +// the list of secondary keys, it will be automatically added at position 0. +// +// A key should be either 16, 24, or 32 bytes to select AES-128, +// AES-192, or AES-256. +func NewKeyring(keys [][]byte, primaryKey []byte) (*Keyring, error) { + keyring := &Keyring{} + keyring.init() + + if len(keys) > 0 || len(primaryKey) > 0 { + if len(primaryKey) == 0 { + return nil, fmt.Errorf("Empty primary key not allowed") + } + if err := keyring.AddKey(primaryKey); err != nil { + return nil, err + } + for _, key := range keys { + if err := keyring.AddKey(key); err != nil { + return nil, err + } + } + } + + return keyring, nil +} + +// ValidateKey will check to see if the key is valid and returns an error if not. +// +// key should be either 16, 24, or 32 bytes to select AES-128, +// AES-192, or AES-256. +func ValidateKey(key []byte) error { + if l := len(key); l != 16 && l != 24 && l != 32 { + return fmt.Errorf("key size must be 16, 24 or 32 bytes") + } + return nil +} + +// AddKey will install a new key on the ring. Adding a key to the ring will make +// it available for use in decryption. If the key already exists on the ring, +// this function will just return noop. +// +// key should be either 16, 24, or 32 bytes to select AES-128, +// AES-192, or AES-256. +func (k *Keyring) AddKey(key []byte) error { + if err := ValidateKey(key); err != nil { + return err + } + + // No-op if key is already installed + for _, installedKey := range k.keys { + if bytes.Equal(installedKey, key) { + return nil + } + } + + keys := append(k.keys, key) + primaryKey := k.GetPrimaryKey() + if primaryKey == nil { + primaryKey = key + } + k.installKeys(keys, primaryKey) + return nil +} + +// UseKey changes the key used to encrypt messages. This is the only key used to +// encrypt messages, so peers should know this key before this method is called. +func (k *Keyring) UseKey(key []byte) error { + for _, installedKey := range k.keys { + if bytes.Equal(key, installedKey) { + k.installKeys(k.keys, key) + return nil + } + } + return fmt.Errorf("Requested key is not in the keyring") +} + +// RemoveKey drops a key from the keyring. This will return an error if the key +// requested for removal is currently at position 0 (primary key). +func (k *Keyring) RemoveKey(key []byte) error { + if bytes.Equal(key, k.keys[0]) { + return fmt.Errorf("Removing the primary key is not allowed") + } + for i, installedKey := range k.keys { + if bytes.Equal(key, installedKey) { + keys := append(k.keys[:i], k.keys[i+1:]...) + k.installKeys(keys, k.keys[0]) + } + } + return nil +} + +// installKeys will take out a lock on the keyring, and replace the keys with a +// new set of keys. The key indicated by primaryKey will be installed as the new +// primary key. +func (k *Keyring) installKeys(keys [][]byte, primaryKey []byte) { + k.l.Lock() + defer k.l.Unlock() + + newKeys := [][]byte{primaryKey} + for _, key := range keys { + if !bytes.Equal(key, primaryKey) { + newKeys = append(newKeys, key) + } + } + k.keys = newKeys +} + +// GetKeys returns the current set of keys on the ring. +func (k *Keyring) GetKeys() [][]byte { + k.l.Lock() + defer k.l.Unlock() + + return k.keys +} + +// GetPrimaryKey returns the key on the ring at position 0. This is the key used +// for encrypting messages, and is the first key tried for decrypting messages. +func (k *Keyring) GetPrimaryKey() (key []byte) { + k.l.Lock() + defer k.l.Unlock() + + if len(k.keys) > 0 { + key = k.keys[0] + } + return +} diff --git a/vendor/github.com/hashicorp/memberlist/logging.go b/vendor/github.com/hashicorp/memberlist/logging.go new file mode 100644 index 0000000000..f31acfb2fa --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/logging.go @@ -0,0 +1,22 @@ +package memberlist + +import ( + "fmt" + "net" +) + +func LogAddress(addr net.Addr) string { + if addr == nil { + return "from=" + } + + return fmt.Sprintf("from=%s", addr.String()) +} + +func LogConn(conn net.Conn) string { + if conn == nil { + return LogAddress(nil) + } + + return LogAddress(conn.RemoteAddr()) +} diff --git a/vendor/github.com/hashicorp/memberlist/memberlist.go b/vendor/github.com/hashicorp/memberlist/memberlist.go new file mode 100644 index 0000000000..e4b0d7347d --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/memberlist.go @@ -0,0 +1,625 @@ +/* +memberlist is a library that manages cluster +membership and member failure detection using a gossip based protocol. + +The use cases for such a library are far-reaching: all distributed systems +require membership, and memberlist is a re-usable solution to managing +cluster membership and node failure detection. + +memberlist is eventually consistent but converges quickly on average. +The speed at which it converges can be heavily tuned via various knobs +on the protocol. Node failures are detected and network partitions are partially +tolerated by attempting to communicate to potentially dead nodes through +multiple routes. +*/ +package memberlist + +import ( + "fmt" + "log" + "net" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/hashicorp/go-multierror" + sockaddr "github.com/hashicorp/go-sockaddr" + "github.com/miekg/dns" +) + +type Memberlist struct { + sequenceNum uint32 // Local sequence number + incarnation uint32 // Local incarnation number + numNodes uint32 // Number of known nodes (estimate) + + config *Config + shutdown bool + shutdownCh chan struct{} + leave bool + leaveBroadcast chan struct{} + + transport Transport + handoff chan msgHandoff + + nodeLock sync.RWMutex + nodes []*nodeState // Known nodes + nodeMap map[string]*nodeState // Maps Addr.String() -> NodeState + nodeTimers map[string]*suspicion // Maps Addr.String() -> suspicion timer + awareness *awareness + + tickerLock sync.Mutex + tickers []*time.Ticker + stopTick chan struct{} + probeIndex int + + ackLock sync.Mutex + ackHandlers map[uint32]*ackHandler + + broadcasts *TransmitLimitedQueue + + logger *log.Logger +} + +// newMemberlist creates the network listeners. +// Does not schedule execution of background maintenance. +func newMemberlist(conf *Config) (*Memberlist, error) { + if conf.ProtocolVersion < ProtocolVersionMin { + return nil, fmt.Errorf("Protocol version '%d' too low. Must be in range: [%d, %d]", + conf.ProtocolVersion, ProtocolVersionMin, ProtocolVersionMax) + } else if conf.ProtocolVersion > ProtocolVersionMax { + return nil, fmt.Errorf("Protocol version '%d' too high. Must be in range: [%d, %d]", + conf.ProtocolVersion, ProtocolVersionMin, ProtocolVersionMax) + } + + if len(conf.SecretKey) > 0 { + if conf.Keyring == nil { + keyring, err := NewKeyring(nil, conf.SecretKey) + if err != nil { + return nil, err + } + conf.Keyring = keyring + } else { + if err := conf.Keyring.AddKey(conf.SecretKey); err != nil { + return nil, err + } + if err := conf.Keyring.UseKey(conf.SecretKey); err != nil { + return nil, err + } + } + } + + if conf.LogOutput != nil && conf.Logger != nil { + return nil, fmt.Errorf("Cannot specify both LogOutput and Logger. Please choose a single log configuration setting.") + } + + logDest := conf.LogOutput + if logDest == nil { + logDest = os.Stderr + } + + logger := conf.Logger + if logger == nil { + logger = log.New(logDest, "", log.LstdFlags) + } + + // Set up a network transport by default if a custom one wasn't given + // by the config. + transport := conf.Transport + if transport == nil { + nc := &NetTransportConfig{ + BindAddrs: []string{conf.BindAddr}, + BindPort: conf.BindPort, + Logger: logger, + } + nt, err := NewNetTransport(nc) + if err != nil { + return nil, fmt.Errorf("Could not set up network transport: %v", err) + } + + if conf.BindPort == 0 { + port := nt.GetAutoBindPort() + conf.BindPort = port + logger.Printf("[DEBUG] Using dynamic bind port %d", port) + } + transport = nt + } + + m := &Memberlist{ + config: conf, + shutdownCh: make(chan struct{}), + leaveBroadcast: make(chan struct{}, 1), + transport: transport, + handoff: make(chan msgHandoff, conf.HandoffQueueDepth), + nodeMap: make(map[string]*nodeState), + nodeTimers: make(map[string]*suspicion), + awareness: newAwareness(conf.AwarenessMaxMultiplier), + ackHandlers: make(map[uint32]*ackHandler), + broadcasts: &TransmitLimitedQueue{RetransmitMult: conf.RetransmitMult}, + logger: logger, + } + m.broadcasts.NumNodes = func() int { + return m.estNumNodes() + } + go m.streamListen() + go m.packetListen() + go m.packetHandler() + return m, nil +} + +// Create will create a new Memberlist using the given configuration. +// This will not connect to any other node (see Join) yet, but will start +// all the listeners to allow other nodes to join this memberlist. +// After creating a Memberlist, the configuration given should not be +// modified by the user anymore. +func Create(conf *Config) (*Memberlist, error) { + m, err := newMemberlist(conf) + if err != nil { + return nil, err + } + if err := m.setAlive(); err != nil { + m.Shutdown() + return nil, err + } + m.schedule() + return m, nil +} + +// Join is used to take an existing Memberlist and attempt to join a cluster +// by contacting all the given hosts and performing a state sync. Initially, +// the Memberlist only contains our own state, so doing this will cause +// remote nodes to become aware of the existence of this node, effectively +// joining the cluster. +// +// This returns the number of hosts successfully contacted and an error if +// none could be reached. If an error is returned, the node did not successfully +// join the cluster. +func (m *Memberlist) Join(existing []string) (int, error) { + numSuccess := 0 + var errs error + for _, exist := range existing { + addrs, err := m.resolveAddr(exist) + if err != nil { + err = fmt.Errorf("Failed to resolve %s: %v", exist, err) + errs = multierror.Append(errs, err) + m.logger.Printf("[WARN] memberlist: %v", err) + continue + } + + for _, addr := range addrs { + hp := joinHostPort(addr.ip.String(), addr.port) + if err := m.pushPullNode(hp, true); err != nil { + err = fmt.Errorf("Failed to join %s: %v", addr.ip, err) + errs = multierror.Append(errs, err) + m.logger.Printf("[DEBUG] memberlist: %v", err) + continue + } + numSuccess++ + } + + } + if numSuccess > 0 { + errs = nil + } + return numSuccess, errs +} + +// ipPort holds information about a node we want to try to join. +type ipPort struct { + ip net.IP + port uint16 +} + +// tcpLookupIP is a helper to initiate a TCP-based DNS lookup for the given host. +// The built-in Go resolver will do a UDP lookup first, and will only use TCP if +// the response has the truncate bit set, which isn't common on DNS servers like +// Consul's. By doing the TCP lookup directly, we get the best chance for the +// largest list of hosts to join. Since joins are relatively rare events, it's ok +// to do this rather expensive operation. +func (m *Memberlist) tcpLookupIP(host string, defaultPort uint16) ([]ipPort, error) { + // Don't attempt any TCP lookups against non-fully qualified domain + // names, since those will likely come from the resolv.conf file. + if !strings.Contains(host, ".") { + return nil, nil + } + + // Make sure the domain name is terminated with a dot (we know there's + // at least one character at this point). + dn := host + if dn[len(dn)-1] != '.' { + dn = dn + "." + } + + // See if we can find a server to try. + cc, err := dns.ClientConfigFromFile(m.config.DNSConfigPath) + if err != nil { + return nil, err + } + if len(cc.Servers) > 0 { + // We support host:port in the DNS config, but need to add the + // default port if one is not supplied. + server := cc.Servers[0] + if !hasPort(server) { + server = net.JoinHostPort(server, cc.Port) + } + + // Do the lookup. + c := new(dns.Client) + c.Net = "tcp" + msg := new(dns.Msg) + msg.SetQuestion(dn, dns.TypeANY) + in, _, err := c.Exchange(msg, server) + if err != nil { + return nil, err + } + + // Handle any IPs we get back that we can attempt to join. + var ips []ipPort + for _, r := range in.Answer { + switch rr := r.(type) { + case (*dns.A): + ips = append(ips, ipPort{rr.A, defaultPort}) + case (*dns.AAAA): + ips = append(ips, ipPort{rr.AAAA, defaultPort}) + case (*dns.CNAME): + m.logger.Printf("[DEBUG] memberlist: Ignoring CNAME RR in TCP-first answer for '%s'", host) + } + } + return ips, nil + } + + return nil, nil +} + +// resolveAddr is used to resolve the address into an address, +// port, and error. If no port is given, use the default +func (m *Memberlist) resolveAddr(hostStr string) ([]ipPort, error) { + // Normalize the incoming string to host:port so we can apply Go's + // parser to it. + port := uint16(0) + if !hasPort(hostStr) { + hostStr += ":" + strconv.Itoa(m.config.BindPort) + } + host, sport, err := net.SplitHostPort(hostStr) + if err != nil { + return nil, err + } + + // This will capture the supplied port, or the default one added above. + lport, err := strconv.ParseUint(sport, 10, 16) + if err != nil { + return nil, err + } + port = uint16(lport) + + // If it looks like an IP address we are done. The SplitHostPort() above + // will make sure the host part is in good shape for parsing, even for + // IPv6 addresses. + if ip := net.ParseIP(host); ip != nil { + return []ipPort{ipPort{ip, port}}, nil + } + + // First try TCP so we have the best chance for the largest list of + // hosts to join. If this fails it's not fatal since this isn't a standard + // way to query DNS, and we have a fallback below. + ips, err := m.tcpLookupIP(host, port) + if err != nil { + m.logger.Printf("[DEBUG] memberlist: TCP-first lookup failed for '%s', falling back to UDP: %s", hostStr, err) + } + if len(ips) > 0 { + return ips, nil + } + + // If TCP didn't yield anything then use the normal Go resolver which + // will try UDP, then might possibly try TCP again if the UDP response + // indicates it was truncated. + ans, err := net.LookupIP(host) + if err != nil { + return nil, err + } + ips = make([]ipPort, 0, len(ans)) + for _, ip := range ans { + ips = append(ips, ipPort{ip, port}) + } + return ips, nil +} + +// setAlive is used to mark this node as being alive. This is the same +// as if we received an alive notification our own network channel for +// ourself. +func (m *Memberlist) setAlive() error { + // Get the final advertise address from the transport, which may need + // to see which address we bound to. + addr, port, err := m.transport.FinalAdvertiseAddr( + m.config.AdvertiseAddr, m.config.AdvertisePort) + if err != nil { + return fmt.Errorf("Failed to get final advertise address: %v", err) + } + + // Check if this is a public address without encryption + ipAddr, err := sockaddr.NewIPAddr(addr.String()) + if err != nil { + return fmt.Errorf("Failed to parse interface addresses: %v", err) + } + ifAddrs := []sockaddr.IfAddr{ + sockaddr.IfAddr{ + SockAddr: ipAddr, + }, + } + _, publicIfs, err := sockaddr.IfByRFC("6890", ifAddrs) + if len(publicIfs) > 0 && !m.config.EncryptionEnabled() { + m.logger.Printf("[WARN] memberlist: Binding to public address without encryption!") + } + + // Set any metadata from the delegate. + var meta []byte + if m.config.Delegate != nil { + meta = m.config.Delegate.NodeMeta(MetaMaxSize) + if len(meta) > MetaMaxSize { + panic("Node meta data provided is longer than the limit") + } + } + + a := alive{ + Incarnation: m.nextIncarnation(), + Node: m.config.Name, + Addr: addr, + Port: uint16(port), + Meta: meta, + Vsn: []uint8{ + ProtocolVersionMin, ProtocolVersionMax, m.config.ProtocolVersion, + m.config.DelegateProtocolMin, m.config.DelegateProtocolMax, + m.config.DelegateProtocolVersion, + }, + } + m.aliveNode(&a, nil, true) + return nil +} + +// LocalNode is used to return the local Node +func (m *Memberlist) LocalNode() *Node { + m.nodeLock.RLock() + defer m.nodeLock.RUnlock() + state := m.nodeMap[m.config.Name] + return &state.Node +} + +// UpdateNode is used to trigger re-advertising the local node. This is +// primarily used with a Delegate to support dynamic updates to the local +// meta data. This will block until the update message is successfully +// broadcasted to a member of the cluster, if any exist or until a specified +// timeout is reached. +func (m *Memberlist) UpdateNode(timeout time.Duration) error { + // Get the node meta data + var meta []byte + if m.config.Delegate != nil { + meta = m.config.Delegate.NodeMeta(MetaMaxSize) + if len(meta) > MetaMaxSize { + panic("Node meta data provided is longer than the limit") + } + } + + // Get the existing node + m.nodeLock.RLock() + state := m.nodeMap[m.config.Name] + m.nodeLock.RUnlock() + + // Format a new alive message + a := alive{ + Incarnation: m.nextIncarnation(), + Node: m.config.Name, + Addr: state.Addr, + Port: state.Port, + Meta: meta, + Vsn: []uint8{ + ProtocolVersionMin, ProtocolVersionMax, m.config.ProtocolVersion, + m.config.DelegateProtocolMin, m.config.DelegateProtocolMax, + m.config.DelegateProtocolVersion, + }, + } + notifyCh := make(chan struct{}) + m.aliveNode(&a, notifyCh, true) + + // Wait for the broadcast or a timeout + if m.anyAlive() { + var timeoutCh <-chan time.Time + if timeout > 0 { + timeoutCh = time.After(timeout) + } + select { + case <-notifyCh: + case <-timeoutCh: + return fmt.Errorf("timeout waiting for update broadcast") + } + } + return nil +} + +// SendTo is deprecated in favor of SendBestEffort, which requires a node to +// target. +func (m *Memberlist) SendTo(to net.Addr, msg []byte) error { + // Encode as a user message + buf := make([]byte, 1, len(msg)+1) + buf[0] = byte(userMsg) + buf = append(buf, msg...) + + // Send the message + return m.rawSendMsgPacket(to.String(), nil, buf) +} + +// SendToUDP is deprecated in favor of SendBestEffort. +func (m *Memberlist) SendToUDP(to *Node, msg []byte) error { + return m.SendBestEffort(to, msg) +} + +// SendToTCP is deprecated in favor of SendReliable. +func (m *Memberlist) SendToTCP(to *Node, msg []byte) error { + return m.SendReliable(to, msg) +} + +// SendBestEffort uses the unreliable packet-oriented interface of the transport +// to target a user message at the given node (this does not use the gossip +// mechanism). The maximum size of the message depends on the configured +// UDPBufferSize for this memberlist instance. +func (m *Memberlist) SendBestEffort(to *Node, msg []byte) error { + // Encode as a user message + buf := make([]byte, 1, len(msg)+1) + buf[0] = byte(userMsg) + buf = append(buf, msg...) + + // Send the message + return m.rawSendMsgPacket(to.Address(), to, buf) +} + +// SendReliable uses the reliable stream-oriented interface of the transport to +// target a user message at the given node (this does not use the gossip +// mechanism). Delivery is guaranteed if no error is returned, and there is no +// limit on the size of the message. +func (m *Memberlist) SendReliable(to *Node, msg []byte) error { + return m.sendUserMsg(to.Address(), msg) +} + +// Members returns a list of all known live nodes. The node structures +// returned must not be modified. If you wish to modify a Node, make a +// copy first. +func (m *Memberlist) Members() []*Node { + m.nodeLock.RLock() + defer m.nodeLock.RUnlock() + + nodes := make([]*Node, 0, len(m.nodes)) + for _, n := range m.nodes { + if n.State != stateDead { + nodes = append(nodes, &n.Node) + } + } + + return nodes +} + +// NumMembers returns the number of alive nodes currently known. Between +// the time of calling this and calling Members, the number of alive nodes +// may have changed, so this shouldn't be used to determine how many +// members will be returned by Members. +func (m *Memberlist) NumMembers() (alive int) { + m.nodeLock.RLock() + defer m.nodeLock.RUnlock() + + for _, n := range m.nodes { + if n.State != stateDead { + alive++ + } + } + + return +} + +// Leave will broadcast a leave message but will not shutdown the background +// listeners, meaning the node will continue participating in gossip and state +// updates. +// +// This will block until the leave message is successfully broadcasted to +// a member of the cluster, if any exist or until a specified timeout +// is reached. +// +// This method is safe to call multiple times, but must not be called +// after the cluster is already shut down. +func (m *Memberlist) Leave(timeout time.Duration) error { + m.nodeLock.Lock() + // We can't defer m.nodeLock.Unlock() because m.deadNode will also try to + // acquire a lock so we need to Unlock before that. + + if m.shutdown { + m.nodeLock.Unlock() + panic("leave after shutdown") + } + + if !m.leave { + m.leave = true + + state, ok := m.nodeMap[m.config.Name] + m.nodeLock.Unlock() + if !ok { + m.logger.Printf("[WARN] memberlist: Leave but we're not in the node map.") + return nil + } + + d := dead{ + Incarnation: state.Incarnation, + Node: state.Name, + } + m.deadNode(&d) + + // Block until the broadcast goes out + if m.anyAlive() { + var timeoutCh <-chan time.Time + if timeout > 0 { + timeoutCh = time.After(timeout) + } + select { + case <-m.leaveBroadcast: + case <-timeoutCh: + return fmt.Errorf("timeout waiting for leave broadcast") + } + } + } else { + m.nodeLock.Unlock() + } + + return nil +} + +// Check for any other alive node. +func (m *Memberlist) anyAlive() bool { + m.nodeLock.RLock() + defer m.nodeLock.RUnlock() + for _, n := range m.nodes { + if n.State != stateDead && n.Name != m.config.Name { + return true + } + } + return false +} + +// GetHealthScore gives this instance's idea of how well it is meeting the soft +// real-time requirements of the protocol. Lower numbers are better, and zero +// means "totally healthy". +func (m *Memberlist) GetHealthScore() int { + return m.awareness.GetHealthScore() +} + +// ProtocolVersion returns the protocol version currently in use by +// this memberlist. +func (m *Memberlist) ProtocolVersion() uint8 { + // NOTE: This method exists so that in the future we can control + // any locking if necessary, if we change the protocol version at + // runtime, etc. + return m.config.ProtocolVersion +} + +// Shutdown will stop any background maintanence of network activity +// for this memberlist, causing it to appear "dead". A leave message +// will not be broadcasted prior, so the cluster being left will have +// to detect this node's shutdown using probing. If you wish to more +// gracefully exit the cluster, call Leave prior to shutting down. +// +// This method is safe to call multiple times. +func (m *Memberlist) Shutdown() error { + m.nodeLock.Lock() + defer m.nodeLock.Unlock() + + if m.shutdown { + return nil + } + + // Shut down the transport first, which should block until it's + // completely torn down. If we kill the memberlist-side handlers + // those I/O handlers might get stuck. + m.transport.Shutdown() + + // Now tear down everything else. + m.shutdown = true + close(m.shutdownCh) + m.deschedule() + return nil +} diff --git a/vendor/github.com/hashicorp/memberlist/merge_delegate.go b/vendor/github.com/hashicorp/memberlist/merge_delegate.go new file mode 100644 index 0000000000..89afb59f20 --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/merge_delegate.go @@ -0,0 +1,14 @@ +package memberlist + +// MergeDelegate is used to involve a client in +// a potential cluster merge operation. Namely, when +// a node does a TCP push/pull (as part of a join), +// the delegate is involved and allowed to cancel the join +// based on custom logic. The merge delegate is NOT invoked +// as part of the push-pull anti-entropy. +type MergeDelegate interface { + // NotifyMerge is invoked when a merge could take place. + // Provides a list of the nodes known by the peer. If + // the return value is non-nil, the merge is canceled. + NotifyMerge(peers []*Node) error +} diff --git a/vendor/github.com/hashicorp/memberlist/mock_transport.go b/vendor/github.com/hashicorp/memberlist/mock_transport.go new file mode 100644 index 0000000000..b8bafa8026 --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/mock_transport.go @@ -0,0 +1,121 @@ +package memberlist + +import ( + "fmt" + "net" + "strconv" + "time" +) + +// MockNetwork is used as a factory that produces MockTransport instances which +// are uniquely addressed and wired up to talk to each other. +type MockNetwork struct { + transports map[string]*MockTransport + port int +} + +// NewTransport returns a new MockTransport with a unique address, wired up to +// talk to the other transports in the MockNetwork. +func (n *MockNetwork) NewTransport() *MockTransport { + n.port += 1 + addr := fmt.Sprintf("127.0.0.1:%d", n.port) + transport := &MockTransport{ + net: n, + addr: &MockAddress{addr}, + packetCh: make(chan *Packet), + streamCh: make(chan net.Conn), + } + + if n.transports == nil { + n.transports = make(map[string]*MockTransport) + } + n.transports[addr] = transport + return transport +} + +// MockAddress is a wrapper which adds the net.Addr interface to our mock +// address scheme. +type MockAddress struct { + addr string +} + +// See net.Addr. +func (a *MockAddress) Network() string { + return "mock" +} + +// See net.Addr. +func (a *MockAddress) String() string { + return a.addr +} + +// MockTransport directly plumbs messages to other transports its MockNetwork. +type MockTransport struct { + net *MockNetwork + addr *MockAddress + packetCh chan *Packet + streamCh chan net.Conn +} + +// See Transport. +func (t *MockTransport) FinalAdvertiseAddr(string, int) (net.IP, int, error) { + host, portStr, err := net.SplitHostPort(t.addr.String()) + if err != nil { + return nil, 0, err + } + + ip := net.ParseIP(host) + if ip == nil { + return nil, 0, fmt.Errorf("Failed to parse IP %q", host) + } + + port, err := strconv.ParseInt(portStr, 10, 16) + if err != nil { + return nil, 0, err + } + + return ip, int(port), nil +} + +// See Transport. +func (t *MockTransport) WriteTo(b []byte, addr string) (time.Time, error) { + dest, ok := t.net.transports[addr] + if !ok { + return time.Time{}, fmt.Errorf("No route to %q", addr) + } + + now := time.Now() + dest.packetCh <- &Packet{ + Buf: b, + From: t.addr, + Timestamp: now, + } + return now, nil +} + +// See Transport. +func (t *MockTransport) PacketCh() <-chan *Packet { + return t.packetCh +} + +// See Transport. +func (t *MockTransport) DialTimeout(addr string, timeout time.Duration) (net.Conn, error) { + dest, ok := t.net.transports[addr] + if !ok { + return nil, fmt.Errorf("No route to %q", addr) + } + + p1, p2 := net.Pipe() + dest.streamCh <- p1 + return p2, nil +} + +// See Transport. +func (t *MockTransport) StreamCh() <-chan net.Conn { + return t.streamCh +} + +// See Transport. +func (t *MockTransport) Shutdown() error { + return nil +} diff --git a/vendor/github.com/hashicorp/memberlist/net.go b/vendor/github.com/hashicorp/memberlist/net.go new file mode 100644 index 0000000000..e0036d01d6 --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/net.go @@ -0,0 +1,1069 @@ +package memberlist + +import ( + "bufio" + "bytes" + "encoding/binary" + "fmt" + "hash/crc32" + "io" + "net" + "time" + + "github.com/armon/go-metrics" + "github.com/hashicorp/go-msgpack/codec" +) + +// This is the minimum and maximum protocol version that we can +// _understand_. We're allowed to speak at any version within this +// range. This range is inclusive. +const ( + ProtocolVersionMin uint8 = 1 + + // Version 3 added support for TCP pings but we kept the default + // protocol version at 2 to ease transition to this new feature. + // A memberlist speaking version 2 of the protocol will attempt + // to TCP ping another memberlist who understands version 3 or + // greater. + // + // Version 4 added support for nacks as part of indirect probes. + // A memberlist speaking version 2 of the protocol will expect + // nacks from another memberlist who understands version 4 or + // greater, and likewise nacks will be sent to memberlists who + // understand version 4 or greater. + ProtocolVersion2Compatible = 2 + + ProtocolVersionMax = 5 +) + +// messageType is an integer ID of a type of message that can be received +// on network channels from other members. +type messageType uint8 + +// The list of available message types. +const ( + pingMsg messageType = iota + indirectPingMsg + ackRespMsg + suspectMsg + aliveMsg + deadMsg + pushPullMsg + compoundMsg + userMsg // User mesg, not handled by us + compressMsg + encryptMsg + nackRespMsg + hasCrcMsg +) + +// compressionType is used to specify the compression algorithm +type compressionType uint8 + +const ( + lzwAlgo compressionType = iota +) + +const ( + MetaMaxSize = 512 // Maximum size for node meta data + compoundHeaderOverhead = 2 // Assumed header overhead + compoundOverhead = 2 // Assumed overhead per entry in compoundHeader + userMsgOverhead = 1 + blockingWarning = 10 * time.Millisecond // Warn if a UDP packet takes this long to process + maxPushStateBytes = 10 * 1024 * 1024 +) + +// ping request sent directly to node +type ping struct { + SeqNo uint32 + + // Node is sent so the target can verify they are + // the intended recipient. This is to protect again an agent + // restart with a new name. + Node string +} + +// indirect ping sent to an indirect ndoe +type indirectPingReq struct { + SeqNo uint32 + Target []byte + Port uint16 + Node string + Nack bool // true if we'd like a nack back +} + +// ack response is sent for a ping +type ackResp struct { + SeqNo uint32 + Payload []byte +} + +// nack response is sent for an indirect ping when the pinger doesn't hear from +// the ping-ee within the configured timeout. This lets the original node know +// that the indirect ping attempt happened but didn't succeed. +type nackResp struct { + SeqNo uint32 +} + +// suspect is broadcast when we suspect a node is dead +type suspect struct { + Incarnation uint32 + Node string + From string // Include who is suspecting +} + +// alive is broadcast when we know a node is alive. +// Overloaded for nodes joining +type alive struct { + Incarnation uint32 + Node string + Addr []byte + Port uint16 + Meta []byte + + // The versions of the protocol/delegate that are being spoken, order: + // pmin, pmax, pcur, dmin, dmax, dcur + Vsn []uint8 +} + +// dead is broadcast when we confirm a node is dead +// Overloaded for nodes leaving +type dead struct { + Incarnation uint32 + Node string + From string // Include who is suspecting +} + +// pushPullHeader is used to inform the +// otherside how many states we are transferring +type pushPullHeader struct { + Nodes int + UserStateLen int // Encodes the byte lengh of user state + Join bool // Is this a join request or a anti-entropy run +} + +// userMsgHeader is used to encapsulate a userMsg +type userMsgHeader struct { + UserMsgLen int // Encodes the byte lengh of user state +} + +// pushNodeState is used for pushPullReq when we are +// transferring out node states +type pushNodeState struct { + Name string + Addr []byte + Port uint16 + Meta []byte + Incarnation uint32 + State nodeStateType + Vsn []uint8 // Protocol versions +} + +// compress is used to wrap an underlying payload +// using a specified compression algorithm +type compress struct { + Algo compressionType + Buf []byte +} + +// msgHandoff is used to transfer a message between goroutines +type msgHandoff struct { + msgType messageType + buf []byte + from net.Addr +} + +// encryptionVersion returns the encryption version to use +func (m *Memberlist) encryptionVersion() encryptionVersion { + switch m.ProtocolVersion() { + case 1: + return 0 + default: + return 1 + } +} + +// streamListen is a long running goroutine that pulls incoming streams from the +// transport and hands them off for processing. +func (m *Memberlist) streamListen() { + for { + select { + case conn := <-m.transport.StreamCh(): + go m.handleConn(conn) + + case <-m.shutdownCh: + return + } + } +} + +// handleConn handles a single incoming stream connection from the transport. +func (m *Memberlist) handleConn(conn net.Conn) { + m.logger.Printf("[DEBUG] memberlist: Stream connection %s", LogConn(conn)) + + defer conn.Close() + metrics.IncrCounter([]string{"memberlist", "tcp", "accept"}, 1) + + conn.SetDeadline(time.Now().Add(m.config.TCPTimeout)) + msgType, bufConn, dec, err := m.readStream(conn) + if err != nil { + if err != io.EOF { + m.logger.Printf("[ERR] memberlist: failed to receive: %s %s", err, LogConn(conn)) + } + return + } + + switch msgType { + case userMsg: + if err := m.readUserMsg(bufConn, dec); err != nil { + m.logger.Printf("[ERR] memberlist: Failed to receive user message: %s %s", err, LogConn(conn)) + } + case pushPullMsg: + join, remoteNodes, userState, err := m.readRemoteState(bufConn, dec) + if err != nil { + m.logger.Printf("[ERR] memberlist: Failed to read remote state: %s %s", err, LogConn(conn)) + return + } + + if err := m.sendLocalState(conn, join); err != nil { + m.logger.Printf("[ERR] memberlist: Failed to push local state: %s %s", err, LogConn(conn)) + return + } + + if err := m.mergeRemoteState(join, remoteNodes, userState); err != nil { + m.logger.Printf("[ERR] memberlist: Failed push/pull merge: %s %s", err, LogConn(conn)) + return + } + case pingMsg: + var p ping + if err := dec.Decode(&p); err != nil { + m.logger.Printf("[ERR] memberlist: Failed to decode ping: %s %s", err, LogConn(conn)) + return + } + + if p.Node != "" && p.Node != m.config.Name { + m.logger.Printf("[WARN] memberlist: Got ping for unexpected node %s %s", p.Node, LogConn(conn)) + return + } + + ack := ackResp{p.SeqNo, nil} + out, err := encode(ackRespMsg, &ack) + if err != nil { + m.logger.Printf("[ERR] memberlist: Failed to encode ack: %s", err) + return + } + + err = m.rawSendMsgStream(conn, out.Bytes()) + if err != nil { + m.logger.Printf("[ERR] memberlist: Failed to send ack: %s %s", err, LogConn(conn)) + return + } + default: + m.logger.Printf("[ERR] memberlist: Received invalid msgType (%d) %s", msgType, LogConn(conn)) + } +} + +// packetListen is a long running goroutine that pulls packets out of the +// transport and hands them off for processing. +func (m *Memberlist) packetListen() { + for { + select { + case packet := <-m.transport.PacketCh(): + m.ingestPacket(packet.Buf, packet.From, packet.Timestamp) + + case <-m.shutdownCh: + return + } + } +} + +func (m *Memberlist) ingestPacket(buf []byte, from net.Addr, timestamp time.Time) { + // Check if encryption is enabled + if m.config.EncryptionEnabled() { + // Decrypt the payload + plain, err := decryptPayload(m.config.Keyring.GetKeys(), buf, nil) + if err != nil { + m.logger.Printf("[ERR] memberlist: Decrypt packet failed: %v %s", err, LogAddress(from)) + return + } + + // Continue processing the plaintext buffer + buf = plain + } + + // See if there's a checksum included to verify the contents of the message + if len(buf) >= 5 && messageType(buf[0]) == hasCrcMsg { + crc := crc32.ChecksumIEEE(buf[5:]) + expected := binary.BigEndian.Uint32(buf[1:5]) + if crc != expected { + m.logger.Printf("[WARN] memberlist: Got invalid checksum for UDP packet: %x, %x", crc, expected) + return + } + m.handleCommand(buf[5:], from, timestamp) + } else { + m.handleCommand(buf, from, timestamp) + } +} + +func (m *Memberlist) handleCommand(buf []byte, from net.Addr, timestamp time.Time) { + // Decode the message type + msgType := messageType(buf[0]) + buf = buf[1:] + + // Switch on the msgType + switch msgType { + case compoundMsg: + m.handleCompound(buf, from, timestamp) + case compressMsg: + m.handleCompressed(buf, from, timestamp) + + case pingMsg: + m.handlePing(buf, from) + case indirectPingMsg: + m.handleIndirectPing(buf, from) + case ackRespMsg: + m.handleAck(buf, from, timestamp) + case nackRespMsg: + m.handleNack(buf, from) + + case suspectMsg: + fallthrough + case aliveMsg: + fallthrough + case deadMsg: + fallthrough + case userMsg: + select { + case m.handoff <- msgHandoff{msgType, buf, from}: + default: + m.logger.Printf("[WARN] memberlist: handler queue full, dropping message (%d) %s", msgType, LogAddress(from)) + } + + default: + m.logger.Printf("[ERR] memberlist: msg type (%d) not supported %s", msgType, LogAddress(from)) + } +} + +// packetHandler is a long running goroutine that processes messages received +// over the packet interface, but is decoupled from the listener to avoid +// blocking the listener which may cause ping/ack messages to be delayed. +func (m *Memberlist) packetHandler() { + for { + select { + case msg := <-m.handoff: + msgType := msg.msgType + buf := msg.buf + from := msg.from + + switch msgType { + case suspectMsg: + m.handleSuspect(buf, from) + case aliveMsg: + m.handleAlive(buf, from) + case deadMsg: + m.handleDead(buf, from) + case userMsg: + m.handleUser(buf, from) + default: + m.logger.Printf("[ERR] memberlist: Message type (%d) not supported %s (packet handler)", msgType, LogAddress(from)) + } + + case <-m.shutdownCh: + return + } + } +} + +func (m *Memberlist) handleCompound(buf []byte, from net.Addr, timestamp time.Time) { + // Decode the parts + trunc, parts, err := decodeCompoundMessage(buf) + if err != nil { + m.logger.Printf("[ERR] memberlist: Failed to decode compound request: %s %s", err, LogAddress(from)) + return + } + + // Log any truncation + if trunc > 0 { + m.logger.Printf("[WARN] memberlist: Compound request had %d truncated messages %s", trunc, LogAddress(from)) + } + + // Handle each message + for _, part := range parts { + m.handleCommand(part, from, timestamp) + } +} + +func (m *Memberlist) handlePing(buf []byte, from net.Addr) { + var p ping + if err := decode(buf, &p); err != nil { + m.logger.Printf("[ERR] memberlist: Failed to decode ping request: %s %s", err, LogAddress(from)) + return + } + // If node is provided, verify that it is for us + if p.Node != "" && p.Node != m.config.Name { + m.logger.Printf("[WARN] memberlist: Got ping for unexpected node '%s' %s", p.Node, LogAddress(from)) + return + } + var ack ackResp + ack.SeqNo = p.SeqNo + if m.config.Ping != nil { + ack.Payload = m.config.Ping.AckPayload() + } + if err := m.encodeAndSendMsg(from.String(), ackRespMsg, &ack); err != nil { + m.logger.Printf("[ERR] memberlist: Failed to send ack: %s %s", err, LogAddress(from)) + } +} + +func (m *Memberlist) handleIndirectPing(buf []byte, from net.Addr) { + var ind indirectPingReq + if err := decode(buf, &ind); err != nil { + m.logger.Printf("[ERR] memberlist: Failed to decode indirect ping request: %s %s", err, LogAddress(from)) + return + } + + // For proto versions < 2, there is no port provided. Mask old + // behavior by using the configured port. + if m.ProtocolVersion() < 2 || ind.Port == 0 { + ind.Port = uint16(m.config.BindPort) + } + + // Send a ping to the correct host. + localSeqNo := m.nextSeqNo() + ping := ping{SeqNo: localSeqNo, Node: ind.Node} + + // Setup a response handler to relay the ack + cancelCh := make(chan struct{}) + respHandler := func(payload []byte, timestamp time.Time) { + // Try to prevent the nack if we've caught it in time. + close(cancelCh) + + // Forward the ack back to the requestor. + ack := ackResp{ind.SeqNo, nil} + if err := m.encodeAndSendMsg(from.String(), ackRespMsg, &ack); err != nil { + m.logger.Printf("[ERR] memberlist: Failed to forward ack: %s %s", err, LogAddress(from)) + } + } + m.setAckHandler(localSeqNo, respHandler, m.config.ProbeTimeout) + + // Send the ping. + addr := joinHostPort(net.IP(ind.Target).String(), ind.Port) + if err := m.encodeAndSendMsg(addr, pingMsg, &ping); err != nil { + m.logger.Printf("[ERR] memberlist: Failed to send ping: %s %s", err, LogAddress(from)) + } + + // Setup a timer to fire off a nack if no ack is seen in time. + if ind.Nack { + go func() { + select { + case <-cancelCh: + return + case <-time.After(m.config.ProbeTimeout): + nack := nackResp{ind.SeqNo} + if err := m.encodeAndSendMsg(from.String(), nackRespMsg, &nack); err != nil { + m.logger.Printf("[ERR] memberlist: Failed to send nack: %s %s", err, LogAddress(from)) + } + } + }() + } +} + +func (m *Memberlist) handleAck(buf []byte, from net.Addr, timestamp time.Time) { + var ack ackResp + if err := decode(buf, &ack); err != nil { + m.logger.Printf("[ERR] memberlist: Failed to decode ack response: %s %s", err, LogAddress(from)) + return + } + m.invokeAckHandler(ack, timestamp) +} + +func (m *Memberlist) handleNack(buf []byte, from net.Addr) { + var nack nackResp + if err := decode(buf, &nack); err != nil { + m.logger.Printf("[ERR] memberlist: Failed to decode nack response: %s %s", err, LogAddress(from)) + return + } + m.invokeNackHandler(nack) +} + +func (m *Memberlist) handleSuspect(buf []byte, from net.Addr) { + var sus suspect + if err := decode(buf, &sus); err != nil { + m.logger.Printf("[ERR] memberlist: Failed to decode suspect message: %s %s", err, LogAddress(from)) + return + } + m.suspectNode(&sus) +} + +func (m *Memberlist) handleAlive(buf []byte, from net.Addr) { + var live alive + if err := decode(buf, &live); err != nil { + m.logger.Printf("[ERR] memberlist: Failed to decode alive message: %s %s", err, LogAddress(from)) + return + } + + // For proto versions < 2, there is no port provided. Mask old + // behavior by using the configured port + if m.ProtocolVersion() < 2 || live.Port == 0 { + live.Port = uint16(m.config.BindPort) + } + + m.aliveNode(&live, nil, false) +} + +func (m *Memberlist) handleDead(buf []byte, from net.Addr) { + var d dead + if err := decode(buf, &d); err != nil { + m.logger.Printf("[ERR] memberlist: Failed to decode dead message: %s %s", err, LogAddress(from)) + return + } + m.deadNode(&d) +} + +// handleUser is used to notify channels of incoming user data +func (m *Memberlist) handleUser(buf []byte, from net.Addr) { + d := m.config.Delegate + if d != nil { + d.NotifyMsg(buf) + } +} + +// handleCompressed is used to unpack a compressed message +func (m *Memberlist) handleCompressed(buf []byte, from net.Addr, timestamp time.Time) { + // Try to decode the payload + payload, err := decompressPayload(buf) + if err != nil { + m.logger.Printf("[ERR] memberlist: Failed to decompress payload: %v %s", err, LogAddress(from)) + return + } + + // Recursively handle the payload + m.handleCommand(payload, from, timestamp) +} + +// encodeAndSendMsg is used to combine the encoding and sending steps +func (m *Memberlist) encodeAndSendMsg(addr string, msgType messageType, msg interface{}) error { + out, err := encode(msgType, msg) + if err != nil { + return err + } + if err := m.sendMsg(addr, out.Bytes()); err != nil { + return err + } + return nil +} + +// sendMsg is used to send a message via packet to another host. It will +// opportunistically create a compoundMsg and piggy back other broadcasts. +func (m *Memberlist) sendMsg(addr string, msg []byte) error { + // Check if we can piggy back any messages + bytesAvail := m.config.UDPBufferSize - len(msg) - compoundHeaderOverhead + if m.config.EncryptionEnabled() { + bytesAvail -= encryptOverhead(m.encryptionVersion()) + } + extra := m.getBroadcasts(compoundOverhead, bytesAvail) + + // Fast path if nothing to piggypack + if len(extra) == 0 { + return m.rawSendMsgPacket(addr, nil, msg) + } + + // Join all the messages + msgs := make([][]byte, 0, 1+len(extra)) + msgs = append(msgs, msg) + msgs = append(msgs, extra...) + + // Create a compound message + compound := makeCompoundMessage(msgs) + + // Send the message + return m.rawSendMsgPacket(addr, nil, compound.Bytes()) +} + +// rawSendMsgPacket is used to send message via packet to another host without +// modification, other than compression or encryption if enabled. +func (m *Memberlist) rawSendMsgPacket(addr string, node *Node, msg []byte) error { + // Check if we have compression enabled + if m.config.EnableCompression { + buf, err := compressPayload(msg) + if err != nil { + m.logger.Printf("[WARN] memberlist: Failed to compress payload: %v", err) + } else { + // Only use compression if it reduced the size + if buf.Len() < len(msg) { + msg = buf.Bytes() + } + } + } + + // Try to look up the destination node + if node == nil { + toAddr, _, err := net.SplitHostPort(addr) + if err != nil { + m.logger.Printf("[ERR] memberlist: Failed to parse address %q: %v", addr, err) + return err + } + m.nodeLock.RLock() + nodeState, ok := m.nodeMap[toAddr] + m.nodeLock.RUnlock() + if ok { + node = &nodeState.Node + } + } + + // Add a CRC to the end of the payload if the recipient understands + // ProtocolVersion >= 5 + if node != nil && node.PMax >= 5 { + crc := crc32.ChecksumIEEE(msg) + header := make([]byte, 5, 5+len(msg)) + header[0] = byte(hasCrcMsg) + binary.BigEndian.PutUint32(header[1:], crc) + msg = append(header, msg...) + } + + // Check if we have encryption enabled + if m.config.EncryptionEnabled() { + // Encrypt the payload + var buf bytes.Buffer + primaryKey := m.config.Keyring.GetPrimaryKey() + err := encryptPayload(m.encryptionVersion(), primaryKey, msg, nil, &buf) + if err != nil { + m.logger.Printf("[ERR] memberlist: Encryption of message failed: %v", err) + return err + } + msg = buf.Bytes() + } + + metrics.IncrCounter([]string{"memberlist", "udp", "sent"}, float32(len(msg))) + _, err := m.transport.WriteTo(msg, addr) + return err +} + +// rawSendMsgStream is used to stream a message to another host without +// modification, other than applying compression and encryption if enabled. +func (m *Memberlist) rawSendMsgStream(conn net.Conn, sendBuf []byte) error { + // Check if compresion is enabled + if m.config.EnableCompression { + compBuf, err := compressPayload(sendBuf) + if err != nil { + m.logger.Printf("[ERROR] memberlist: Failed to compress payload: %v", err) + } else { + sendBuf = compBuf.Bytes() + } + } + + // Check if encryption is enabled + if m.config.EncryptionEnabled() { + crypt, err := m.encryptLocalState(sendBuf) + if err != nil { + m.logger.Printf("[ERROR] memberlist: Failed to encrypt local state: %v", err) + return err + } + sendBuf = crypt + } + + // Write out the entire send buffer + metrics.IncrCounter([]string{"memberlist", "tcp", "sent"}, float32(len(sendBuf))) + + if n, err := conn.Write(sendBuf); err != nil { + return err + } else if n != len(sendBuf) { + return fmt.Errorf("only %d of %d bytes written", n, len(sendBuf)) + } + + return nil +} + +// sendUserMsg is used to stream a user message to another host. +func (m *Memberlist) sendUserMsg(addr string, sendBuf []byte) error { + conn, err := m.transport.DialTimeout(addr, m.config.TCPTimeout) + if err != nil { + return err + } + defer conn.Close() + + bufConn := bytes.NewBuffer(nil) + if err := bufConn.WriteByte(byte(userMsg)); err != nil { + return err + } + + header := userMsgHeader{UserMsgLen: len(sendBuf)} + hd := codec.MsgpackHandle{} + enc := codec.NewEncoder(bufConn, &hd) + if err := enc.Encode(&header); err != nil { + return err + } + if _, err := bufConn.Write(sendBuf); err != nil { + return err + } + return m.rawSendMsgStream(conn, bufConn.Bytes()) +} + +// sendAndReceiveState is used to initiate a push/pull over a stream with a +// remote host. +func (m *Memberlist) sendAndReceiveState(addr string, join bool) ([]pushNodeState, []byte, error) { + // Attempt to connect + conn, err := m.transport.DialTimeout(addr, m.config.TCPTimeout) + if err != nil { + return nil, nil, err + } + defer conn.Close() + m.logger.Printf("[DEBUG] memberlist: Initiating push/pull sync with: %s", conn.RemoteAddr()) + metrics.IncrCounter([]string{"memberlist", "tcp", "connect"}, 1) + + // Send our state + if err := m.sendLocalState(conn, join); err != nil { + return nil, nil, err + } + + conn.SetDeadline(time.Now().Add(m.config.TCPTimeout)) + msgType, bufConn, dec, err := m.readStream(conn) + if err != nil { + return nil, nil, err + } + + // Quit if not push/pull + if msgType != pushPullMsg { + err := fmt.Errorf("received invalid msgType (%d), expected pushPullMsg (%d) %s", msgType, pushPullMsg, LogConn(conn)) + return nil, nil, err + } + + // Read remote state + _, remoteNodes, userState, err := m.readRemoteState(bufConn, dec) + return remoteNodes, userState, err +} + +// sendLocalState is invoked to send our local state over a stream connection. +func (m *Memberlist) sendLocalState(conn net.Conn, join bool) error { + // Setup a deadline + conn.SetDeadline(time.Now().Add(m.config.TCPTimeout)) + + // Prepare the local node state + m.nodeLock.RLock() + localNodes := make([]pushNodeState, len(m.nodes)) + for idx, n := range m.nodes { + localNodes[idx].Name = n.Name + localNodes[idx].Addr = n.Addr + localNodes[idx].Port = n.Port + localNodes[idx].Incarnation = n.Incarnation + localNodes[idx].State = n.State + localNodes[idx].Meta = n.Meta + localNodes[idx].Vsn = []uint8{ + n.PMin, n.PMax, n.PCur, + n.DMin, n.DMax, n.DCur, + } + } + m.nodeLock.RUnlock() + + // Get the delegate state + var userData []byte + if m.config.Delegate != nil { + userData = m.config.Delegate.LocalState(join) + } + + // Create a bytes buffer writer + bufConn := bytes.NewBuffer(nil) + + // Send our node state + header := pushPullHeader{Nodes: len(localNodes), UserStateLen: len(userData), Join: join} + hd := codec.MsgpackHandle{} + enc := codec.NewEncoder(bufConn, &hd) + + // Begin state push + if _, err := bufConn.Write([]byte{byte(pushPullMsg)}); err != nil { + return err + } + + if err := enc.Encode(&header); err != nil { + return err + } + for i := 0; i < header.Nodes; i++ { + if err := enc.Encode(&localNodes[i]); err != nil { + return err + } + } + + // Write the user state as well + if userData != nil { + if _, err := bufConn.Write(userData); err != nil { + return err + } + } + + // Get the send buffer + return m.rawSendMsgStream(conn, bufConn.Bytes()) +} + +// encryptLocalState is used to help encrypt local state before sending +func (m *Memberlist) encryptLocalState(sendBuf []byte) ([]byte, error) { + var buf bytes.Buffer + + // Write the encryptMsg byte + buf.WriteByte(byte(encryptMsg)) + + // Write the size of the message + sizeBuf := make([]byte, 4) + encVsn := m.encryptionVersion() + encLen := encryptedLength(encVsn, len(sendBuf)) + binary.BigEndian.PutUint32(sizeBuf, uint32(encLen)) + buf.Write(sizeBuf) + + // Write the encrypted cipher text to the buffer + key := m.config.Keyring.GetPrimaryKey() + err := encryptPayload(encVsn, key, sendBuf, buf.Bytes()[:5], &buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// decryptRemoteState is used to help decrypt the remote state +func (m *Memberlist) decryptRemoteState(bufConn io.Reader) ([]byte, error) { + // Read in enough to determine message length + cipherText := bytes.NewBuffer(nil) + cipherText.WriteByte(byte(encryptMsg)) + _, err := io.CopyN(cipherText, bufConn, 4) + if err != nil { + return nil, err + } + + // Ensure we aren't asked to download too much. This is to guard against + // an attack vector where a huge amount of state is sent + moreBytes := binary.BigEndian.Uint32(cipherText.Bytes()[1:5]) + if moreBytes > maxPushStateBytes { + return nil, fmt.Errorf("Remote node state is larger than limit (%d)", moreBytes) + } + + // Read in the rest of the payload + _, err = io.CopyN(cipherText, bufConn, int64(moreBytes)) + if err != nil { + return nil, err + } + + // Decrypt the cipherText + dataBytes := cipherText.Bytes()[:5] + cipherBytes := cipherText.Bytes()[5:] + + // Decrypt the payload + keys := m.config.Keyring.GetKeys() + return decryptPayload(keys, cipherBytes, dataBytes) +} + +// readStream is used to read from a stream connection, decrypting and +// decompressing the stream if necessary. +func (m *Memberlist) readStream(conn net.Conn) (messageType, io.Reader, *codec.Decoder, error) { + // Created a buffered reader + var bufConn io.Reader = bufio.NewReader(conn) + + // Read the message type + buf := [1]byte{0} + if _, err := bufConn.Read(buf[:]); err != nil { + return 0, nil, nil, err + } + msgType := messageType(buf[0]) + + // Check if the message is encrypted + if msgType == encryptMsg { + if !m.config.EncryptionEnabled() { + return 0, nil, nil, + fmt.Errorf("Remote state is encrypted and encryption is not configured") + } + + plain, err := m.decryptRemoteState(bufConn) + if err != nil { + return 0, nil, nil, err + } + + // Reset message type and bufConn + msgType = messageType(plain[0]) + bufConn = bytes.NewReader(plain[1:]) + } else if m.config.EncryptionEnabled() { + return 0, nil, nil, + fmt.Errorf("Encryption is configured but remote state is not encrypted") + } + + // Get the msgPack decoders + hd := codec.MsgpackHandle{} + dec := codec.NewDecoder(bufConn, &hd) + + // Check if we have a compressed message + if msgType == compressMsg { + var c compress + if err := dec.Decode(&c); err != nil { + return 0, nil, nil, err + } + decomp, err := decompressBuffer(&c) + if err != nil { + return 0, nil, nil, err + } + + // Reset the message type + msgType = messageType(decomp[0]) + + // Create a new bufConn + bufConn = bytes.NewReader(decomp[1:]) + + // Create a new decoder + dec = codec.NewDecoder(bufConn, &hd) + } + + return msgType, bufConn, dec, nil +} + +// readRemoteState is used to read the remote state from a connection +func (m *Memberlist) readRemoteState(bufConn io.Reader, dec *codec.Decoder) (bool, []pushNodeState, []byte, error) { + // Read the push/pull header + var header pushPullHeader + if err := dec.Decode(&header); err != nil { + return false, nil, nil, err + } + + // Allocate space for the transfer + remoteNodes := make([]pushNodeState, header.Nodes) + + // Try to decode all the states + for i := 0; i < header.Nodes; i++ { + if err := dec.Decode(&remoteNodes[i]); err != nil { + return false, nil, nil, err + } + } + + // Read the remote user state into a buffer + var userBuf []byte + if header.UserStateLen > 0 { + userBuf = make([]byte, header.UserStateLen) + bytes, err := io.ReadAtLeast(bufConn, userBuf, header.UserStateLen) + if err == nil && bytes != header.UserStateLen { + err = fmt.Errorf( + "Failed to read full user state (%d / %d)", + bytes, header.UserStateLen) + } + if err != nil { + return false, nil, nil, err + } + } + + // For proto versions < 2, there is no port provided. Mask old + // behavior by using the configured port + for idx := range remoteNodes { + if m.ProtocolVersion() < 2 || remoteNodes[idx].Port == 0 { + remoteNodes[idx].Port = uint16(m.config.BindPort) + } + } + + return header.Join, remoteNodes, userBuf, nil +} + +// mergeRemoteState is used to merge the remote state with our local state +func (m *Memberlist) mergeRemoteState(join bool, remoteNodes []pushNodeState, userBuf []byte) error { + if err := m.verifyProtocol(remoteNodes); err != nil { + return err + } + + // Invoke the merge delegate if any + if join && m.config.Merge != nil { + nodes := make([]*Node, len(remoteNodes)) + for idx, n := range remoteNodes { + nodes[idx] = &Node{ + Name: n.Name, + Addr: n.Addr, + Port: n.Port, + Meta: n.Meta, + PMin: n.Vsn[0], + PMax: n.Vsn[1], + PCur: n.Vsn[2], + DMin: n.Vsn[3], + DMax: n.Vsn[4], + DCur: n.Vsn[5], + } + } + if err := m.config.Merge.NotifyMerge(nodes); err != nil { + return err + } + } + + // Merge the membership state + m.mergeState(remoteNodes) + + // Invoke the delegate for user state + if userBuf != nil && m.config.Delegate != nil { + m.config.Delegate.MergeRemoteState(userBuf, join) + } + return nil +} + +// readUserMsg is used to decode a userMsg from a stream. +func (m *Memberlist) readUserMsg(bufConn io.Reader, dec *codec.Decoder) error { + // Read the user message header + var header userMsgHeader + if err := dec.Decode(&header); err != nil { + return err + } + + // Read the user message into a buffer + var userBuf []byte + if header.UserMsgLen > 0 { + userBuf = make([]byte, header.UserMsgLen) + bytes, err := io.ReadAtLeast(bufConn, userBuf, header.UserMsgLen) + if err == nil && bytes != header.UserMsgLen { + err = fmt.Errorf( + "Failed to read full user message (%d / %d)", + bytes, header.UserMsgLen) + } + if err != nil { + return err + } + + d := m.config.Delegate + if d != nil { + d.NotifyMsg(userBuf) + } + } + + return nil +} + +// sendPingAndWaitForAck makes a stream connection to the given address, sends +// a ping, and waits for an ack. All of this is done as a series of blocking +// operations, given the deadline. The bool return parameter is true if we +// we able to round trip a ping to the other node. +func (m *Memberlist) sendPingAndWaitForAck(addr string, ping ping, deadline time.Time) (bool, error) { + conn, err := m.transport.DialTimeout(addr, m.config.TCPTimeout) + if err != nil { + // If the node is actually dead we expect this to fail, so we + // shouldn't spam the logs with it. After this point, errors + // with the connection are real, unexpected errors and should + // get propagated up. + return false, nil + } + defer conn.Close() + conn.SetDeadline(deadline) + + out, err := encode(pingMsg, &ping) + if err != nil { + return false, err + } + + if err = m.rawSendMsgStream(conn, out.Bytes()); err != nil { + return false, err + } + + msgType, _, dec, err := m.readStream(conn) + if err != nil { + return false, err + } + + if msgType != ackRespMsg { + return false, fmt.Errorf("Unexpected msgType (%d) from ping %s", msgType, LogConn(conn)) + } + + var ack ackResp + if err = dec.Decode(&ack); err != nil { + return false, err + } + + if ack.SeqNo != ping.SeqNo { + return false, fmt.Errorf("Sequence number from ack (%d) doesn't match ping (%d)", ack.SeqNo, ping.SeqNo, LogConn(conn)) + } + + return true, nil +} diff --git a/vendor/github.com/hashicorp/memberlist/net_transport.go b/vendor/github.com/hashicorp/memberlist/net_transport.go new file mode 100644 index 0000000000..e7b88b01f6 --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/net_transport.go @@ -0,0 +1,289 @@ +package memberlist + +import ( + "fmt" + "log" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/armon/go-metrics" + sockaddr "github.com/hashicorp/go-sockaddr" +) + +const ( + // udpPacketBufSize is used to buffer incoming packets during read + // operations. + udpPacketBufSize = 65536 + + // udpRecvBufSize is a large buffer size that we attempt to set UDP + // sockets to in order to handle a large volume of messages. + udpRecvBufSize = 2 * 1024 * 1024 +) + +// NetTransportConfig is used to configure a net transport. +type NetTransportConfig struct { + // BindAddrs is a list of addresses to bind to for both TCP and UDP + // communications. + BindAddrs []string + + // BindPort is the port to listen on, for each address above. + BindPort int + + // Logger is a logger for operator messages. + Logger *log.Logger +} + +// NetTransport is a Transport implementation that uses connectionless UDP for +// packet operations, and ad-hoc TCP connections for stream operations. +type NetTransport struct { + config *NetTransportConfig + packetCh chan *Packet + streamCh chan net.Conn + logger *log.Logger + wg sync.WaitGroup + tcpListeners []*net.TCPListener + udpListeners []*net.UDPConn + shutdown int32 +} + +// NewNetTransport returns a net transport with the given configuration. On +// success all the network listeners will be created and listening. +func NewNetTransport(config *NetTransportConfig) (*NetTransport, error) { + // If we reject the empty list outright we can assume that there's at + // least one listener of each type later during operation. + if len(config.BindAddrs) == 0 { + return nil, fmt.Errorf("At least one bind address is required") + } + + // Build out the new transport. + var ok bool + t := NetTransport{ + config: config, + packetCh: make(chan *Packet), + streamCh: make(chan net.Conn), + logger: config.Logger, + } + + // Clean up listeners if there's an error. + defer func() { + if !ok { + t.Shutdown() + } + }() + + // Build all the TCP and UDP listeners. + port := config.BindPort + for _, addr := range config.BindAddrs { + ip := net.ParseIP(addr) + + tcpAddr := &net.TCPAddr{IP: ip, Port: port} + tcpLn, err := net.ListenTCP("tcp", tcpAddr) + if err != nil { + return nil, fmt.Errorf("Failed to start TCP listener on %q port %d: %v", addr, port, err) + } + t.tcpListeners = append(t.tcpListeners, tcpLn) + + // If the config port given was zero, use the first TCP listener + // to pick an available port and then apply that to everything + // else. + if port == 0 { + port = tcpLn.Addr().(*net.TCPAddr).Port + } + + udpAddr := &net.UDPAddr{IP: ip, Port: port} + udpLn, err := net.ListenUDP("udp", udpAddr) + if err != nil { + return nil, fmt.Errorf("Failed to start UDP listener on %q port %d: %v", addr, port, err) + } + if err := setUDPRecvBuf(udpLn); err != nil { + return nil, fmt.Errorf("Failed to resize UDP buffer: %v", err) + } + t.udpListeners = append(t.udpListeners, udpLn) + } + + // Fire them up now that we've been able to create them all. + for i := 0; i < len(config.BindAddrs); i++ { + t.wg.Add(2) + go t.tcpListen(t.tcpListeners[i]) + go t.udpListen(t.udpListeners[i]) + } + + ok = true + return &t, nil +} + +// GetAutoBindPort returns the bind port that was automatically given by the +// kernel, if a bind port of 0 was given. +func (t *NetTransport) GetAutoBindPort() int { + // We made sure there's at least one TCP listener, and that one's + // port was applied to all the others for the dynamic bind case. + return t.tcpListeners[0].Addr().(*net.TCPAddr).Port +} + +// See Transport. +func (t *NetTransport) FinalAdvertiseAddr(ip string, port int) (net.IP, int, error) { + var advertiseAddr net.IP + var advertisePort int + if ip != "" { + // If they've supplied an address, use that. + advertiseAddr = net.ParseIP(ip) + if advertiseAddr == nil { + return nil, 0, fmt.Errorf("Failed to parse advertise address %q", ip) + } + + // Ensure IPv4 conversion if necessary. + if ip4 := advertiseAddr.To4(); ip4 != nil { + advertiseAddr = ip4 + } + advertisePort = port + } else { + if t.config.BindAddrs[0] == "0.0.0.0" { + // Otherwise, if we're not bound to a specific IP, let's + // use a suitable private IP address. + var err error + ip, err = sockaddr.GetPrivateIP() + if err != nil { + return nil, 0, fmt.Errorf("Failed to get interface addresses: %v", err) + } + if ip == "" { + return nil, 0, fmt.Errorf("No private IP address found, and explicit IP not provided") + } + + advertiseAddr = net.ParseIP(ip) + if advertiseAddr == nil { + return nil, 0, fmt.Errorf("Failed to parse advertise address: %q", ip) + } + } else { + // Use the IP that we're bound to, based on the first + // TCP listener, which we already ensure is there. + advertiseAddr = t.tcpListeners[0].Addr().(*net.TCPAddr).IP + } + + // Use the port we are bound to. + advertisePort = t.GetAutoBindPort() + } + + return advertiseAddr, advertisePort, nil +} + +// See Transport. +func (t *NetTransport) WriteTo(b []byte, addr string) (time.Time, error) { + udpAddr, err := net.ResolveUDPAddr("udp", addr) + if err != nil { + return time.Time{}, err + } + + // We made sure there's at least one UDP listener, so just use the + // packet sending interface on the first one. Take the time after the + // write call comes back, which will underestimate the time a little, + // but help account for any delays before the write occurs. + _, err = t.udpListeners[0].WriteTo(b, udpAddr) + return time.Now(), err +} + +// See Transport. +func (t *NetTransport) PacketCh() <-chan *Packet { + return t.packetCh +} + +// See Transport. +func (t *NetTransport) DialTimeout(addr string, timeout time.Duration) (net.Conn, error) { + dialer := net.Dialer{Timeout: timeout} + return dialer.Dial("tcp", addr) +} + +// See Transport. +func (t *NetTransport) StreamCh() <-chan net.Conn { + return t.streamCh +} + +// See Transport. +func (t *NetTransport) Shutdown() error { + // This will avoid log spam about errors when we shut down. + atomic.StoreInt32(&t.shutdown, 1) + + // Rip through all the connections and shut them down. + for _, conn := range t.tcpListeners { + conn.Close() + } + for _, conn := range t.udpListeners { + conn.Close() + } + + // Block until all the listener threads have died. + t.wg.Wait() + return nil +} + +// tcpListen is a long running goroutine that accepts incoming TCP connections +// and hands them off to the stream channel. +func (t *NetTransport) tcpListen(tcpLn *net.TCPListener) { + defer t.wg.Done() + for { + conn, err := tcpLn.AcceptTCP() + if err != nil { + if s := atomic.LoadInt32(&t.shutdown); s == 1 { + break + } + + t.logger.Printf("[ERR] memberlist: Error accepting TCP connection: %v", err) + continue + } + + t.streamCh <- conn + } +} + +// udpListen is a long running goroutine that accepts incoming UDP packets and +// hands them off to the packet channel. +func (t *NetTransport) udpListen(udpLn *net.UDPConn) { + defer t.wg.Done() + for { + // Do a blocking read into a fresh buffer. Grab a time stamp as + // close as possible to the I/O. + buf := make([]byte, udpPacketBufSize) + n, addr, err := udpLn.ReadFrom(buf) + ts := time.Now() + if err != nil { + if s := atomic.LoadInt32(&t.shutdown); s == 1 { + break + } + + t.logger.Printf("[ERR] memberlist: Error reading UDP packet: %v", err) + continue + } + + // Check the length - it needs to have at least one byte to be a + // proper message. + if n < 1 { + t.logger.Printf("[ERR] memberlist: UDP packet too short (%d bytes) %s", + len(buf), LogAddress(addr)) + continue + } + + // Ingest the packet. + metrics.IncrCounter([]string{"memberlist", "udp", "received"}, float32(n)) + t.packetCh <- &Packet{ + Buf: buf[:n], + From: addr, + Timestamp: ts, + } + } +} + +// setUDPRecvBuf is used to resize the UDP receive window. The function +// attempts to set the read buffer to `udpRecvBuf` but backs off until +// the read buffer can be set. +func setUDPRecvBuf(c *net.UDPConn) error { + size := udpRecvBufSize + var err error + for size > 0 { + if err = c.SetReadBuffer(size); err == nil { + return nil + } + size = size / 2 + } + return err +} diff --git a/vendor/github.com/hashicorp/memberlist/ping_delegate.go b/vendor/github.com/hashicorp/memberlist/ping_delegate.go new file mode 100644 index 0000000000..1566c8b3d5 --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/ping_delegate.go @@ -0,0 +1,14 @@ +package memberlist + +import "time" + +// PingDelegate is used to notify an observer how long it took for a ping message to +// complete a round trip. It can also be used for writing arbitrary byte slices +// into ack messages. Note that in order to be meaningful for RTT estimates, this +// delegate does not apply to indirect pings, nor fallback pings sent over TCP. +type PingDelegate interface { + // AckPayload is invoked when an ack is being sent; the returned bytes will be appended to the ack + AckPayload() []byte + // NotifyPing is invoked when an ack for a ping is received + NotifyPingComplete(other *Node, rtt time.Duration, payload []byte) +} diff --git a/vendor/github.com/hashicorp/memberlist/queue.go b/vendor/github.com/hashicorp/memberlist/queue.go new file mode 100644 index 0000000000..994b90ff10 --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/queue.go @@ -0,0 +1,167 @@ +package memberlist + +import ( + "sort" + "sync" +) + +// TransmitLimitedQueue is used to queue messages to broadcast to +// the cluster (via gossip) but limits the number of transmits per +// message. It also prioritizes messages with lower transmit counts +// (hence newer messages). +type TransmitLimitedQueue struct { + // NumNodes returns the number of nodes in the cluster. This is + // used to determine the retransmit count, which is calculated + // based on the log of this. + NumNodes func() int + + // RetransmitMult is the multiplier used to determine the maximum + // number of retransmissions attempted. + RetransmitMult int + + sync.Mutex + bcQueue limitedBroadcasts +} + +type limitedBroadcast struct { + transmits int // Number of transmissions attempted. + b Broadcast +} +type limitedBroadcasts []*limitedBroadcast + +// Broadcast is something that can be broadcasted via gossip to +// the memberlist cluster. +type Broadcast interface { + // Invalidates checks if enqueuing the current broadcast + // invalidates a previous broadcast + Invalidates(b Broadcast) bool + + // Returns a byte form of the message + Message() []byte + + // Finished is invoked when the message will no longer + // be broadcast, either due to invalidation or to the + // transmit limit being reached + Finished() +} + +// QueueBroadcast is used to enqueue a broadcast +func (q *TransmitLimitedQueue) QueueBroadcast(b Broadcast) { + q.Lock() + defer q.Unlock() + + // Check if this message invalidates another + n := len(q.bcQueue) + for i := 0; i < n; i++ { + if b.Invalidates(q.bcQueue[i].b) { + q.bcQueue[i].b.Finished() + copy(q.bcQueue[i:], q.bcQueue[i+1:]) + q.bcQueue[n-1] = nil + q.bcQueue = q.bcQueue[:n-1] + n-- + } + } + + // Append to the queue + q.bcQueue = append(q.bcQueue, &limitedBroadcast{0, b}) +} + +// GetBroadcasts is used to get a number of broadcasts, up to a byte limit +// and applying a per-message overhead as provided. +func (q *TransmitLimitedQueue) GetBroadcasts(overhead, limit int) [][]byte { + q.Lock() + defer q.Unlock() + + // Fast path the default case + if len(q.bcQueue) == 0 { + return nil + } + + transmitLimit := retransmitLimit(q.RetransmitMult, q.NumNodes()) + bytesUsed := 0 + var toSend [][]byte + + for i := len(q.bcQueue) - 1; i >= 0; i-- { + // Check if this is within our limits + b := q.bcQueue[i] + msg := b.b.Message() + if bytesUsed+overhead+len(msg) > limit { + continue + } + + // Add to slice to send + bytesUsed += overhead + len(msg) + toSend = append(toSend, msg) + + // Check if we should stop transmission + b.transmits++ + if b.transmits >= transmitLimit { + b.b.Finished() + n := len(q.bcQueue) + q.bcQueue[i], q.bcQueue[n-1] = q.bcQueue[n-1], nil + q.bcQueue = q.bcQueue[:n-1] + } + } + + // If we are sending anything, we need to re-sort to deal + // with adjusted transmit counts + if len(toSend) > 0 { + q.bcQueue.Sort() + } + return toSend +} + +// NumQueued returns the number of queued messages +func (q *TransmitLimitedQueue) NumQueued() int { + q.Lock() + defer q.Unlock() + return len(q.bcQueue) +} + +// Reset clears all the queued messages +func (q *TransmitLimitedQueue) Reset() { + q.Lock() + defer q.Unlock() + for _, b := range q.bcQueue { + b.b.Finished() + } + q.bcQueue = nil +} + +// Prune will retain the maxRetain latest messages, and the rest +// will be discarded. This can be used to prevent unbounded queue sizes +func (q *TransmitLimitedQueue) Prune(maxRetain int) { + q.Lock() + defer q.Unlock() + + // Do nothing if queue size is less than the limit + n := len(q.bcQueue) + if n < maxRetain { + return + } + + // Invalidate the messages we will be removing + for i := 0; i < n-maxRetain; i++ { + q.bcQueue[i].b.Finished() + } + + // Move the messages, and retain only the last maxRetain + copy(q.bcQueue[0:], q.bcQueue[n-maxRetain:]) + q.bcQueue = q.bcQueue[:maxRetain] +} + +func (b limitedBroadcasts) Len() int { + return len(b) +} + +func (b limitedBroadcasts) Less(i, j int) bool { + return b[i].transmits < b[j].transmits +} + +func (b limitedBroadcasts) Swap(i, j int) { + b[i], b[j] = b[j], b[i] +} + +func (b limitedBroadcasts) Sort() { + sort.Sort(sort.Reverse(b)) +} diff --git a/vendor/github.com/hashicorp/memberlist/security.go b/vendor/github.com/hashicorp/memberlist/security.go new file mode 100644 index 0000000000..d90114eb0c --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/security.go @@ -0,0 +1,198 @@ +package memberlist + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "fmt" + "io" +) + +/* + +Encrypted messages are prefixed with an encryptionVersion byte +that is used for us to be able to properly encode/decode. We +currently support the following versions: + + 0 - AES-GCM 128, using PKCS7 padding + 1 - AES-GCM 128, no padding. Padding not needed, caused bloat. + +*/ +type encryptionVersion uint8 + +const ( + minEncryptionVersion encryptionVersion = 0 + maxEncryptionVersion encryptionVersion = 1 +) + +const ( + versionSize = 1 + nonceSize = 12 + tagSize = 16 + maxPadOverhead = 16 + blockSize = aes.BlockSize +) + +// pkcs7encode is used to pad a byte buffer to a specific block size using +// the PKCS7 algorithm. "Ignores" some bytes to compensate for IV +func pkcs7encode(buf *bytes.Buffer, ignore, blockSize int) { + n := buf.Len() - ignore + more := blockSize - (n % blockSize) + for i := 0; i < more; i++ { + buf.WriteByte(byte(more)) + } +} + +// pkcs7decode is used to decode a buffer that has been padded +func pkcs7decode(buf []byte, blockSize int) []byte { + if len(buf) == 0 { + panic("Cannot decode a PKCS7 buffer of zero length") + } + n := len(buf) + last := buf[n-1] + n -= int(last) + return buf[:n] +} + +// encryptOverhead returns the maximum possible overhead of encryption by version +func encryptOverhead(vsn encryptionVersion) int { + switch vsn { + case 0: + return 45 // Version: 1, IV: 12, Padding: 16, Tag: 16 + case 1: + return 29 // Version: 1, IV: 12, Tag: 16 + default: + panic("unsupported version") + } +} + +// encryptedLength is used to compute the buffer size needed +// for a message of given length +func encryptedLength(vsn encryptionVersion, inp int) int { + // If we are on version 1, there is no padding + if vsn >= 1 { + return versionSize + nonceSize + inp + tagSize + } + + // Determine the padding size + padding := blockSize - (inp % blockSize) + + // Sum the extra parts to get total size + return versionSize + nonceSize + inp + padding + tagSize +} + +// encryptPayload is used to encrypt a message with a given key. +// We make use of AES-128 in GCM mode. New byte buffer is the version, +// nonce, ciphertext and tag +func encryptPayload(vsn encryptionVersion, key []byte, msg []byte, data []byte, dst *bytes.Buffer) error { + // Get the AES block cipher + aesBlock, err := aes.NewCipher(key) + if err != nil { + return err + } + + // Get the GCM cipher mode + gcm, err := cipher.NewGCM(aesBlock) + if err != nil { + return err + } + + // Grow the buffer to make room for everything + offset := dst.Len() + dst.Grow(encryptedLength(vsn, len(msg))) + + // Write the encryption version + dst.WriteByte(byte(vsn)) + + // Add a random nonce + io.CopyN(dst, rand.Reader, nonceSize) + afterNonce := dst.Len() + + // Ensure we are correctly padded (only version 0) + if vsn == 0 { + io.Copy(dst, bytes.NewReader(msg)) + pkcs7encode(dst, offset+versionSize+nonceSize, aes.BlockSize) + } + + // Encrypt message using GCM + slice := dst.Bytes()[offset:] + nonce := slice[versionSize : versionSize+nonceSize] + + // Message source depends on the encryption version. + // Version 0 uses padding, version 1 does not + var src []byte + if vsn == 0 { + src = slice[versionSize+nonceSize:] + } else { + src = msg + } + out := gcm.Seal(nil, nonce, src, data) + + // Truncate the plaintext, and write the cipher text + dst.Truncate(afterNonce) + dst.Write(out) + return nil +} + +// decryptMessage performs the actual decryption of ciphertext. This is in its +// own function to allow it to be called on all keys easily. +func decryptMessage(key, msg []byte, data []byte) ([]byte, error) { + // Get the AES block cipher + aesBlock, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + + // Get the GCM cipher mode + gcm, err := cipher.NewGCM(aesBlock) + if err != nil { + return nil, err + } + + // Decrypt the message + nonce := msg[versionSize : versionSize+nonceSize] + ciphertext := msg[versionSize+nonceSize:] + plain, err := gcm.Open(nil, nonce, ciphertext, data) + if err != nil { + return nil, err + } + + // Success! + return plain, nil +} + +// decryptPayload is used to decrypt a message with a given key, +// and verify it's contents. Any padding will be removed, and a +// slice to the plaintext is returned. Decryption is done IN PLACE! +func decryptPayload(keys [][]byte, msg []byte, data []byte) ([]byte, error) { + // Ensure we have at least one byte + if len(msg) == 0 { + return nil, fmt.Errorf("Cannot decrypt empty payload") + } + + // Verify the version + vsn := encryptionVersion(msg[0]) + if vsn > maxEncryptionVersion { + return nil, fmt.Errorf("Unsupported encryption version %d", msg[0]) + } + + // Ensure the length is sane + if len(msg) < encryptedLength(vsn, 0) { + return nil, fmt.Errorf("Payload is too small to decrypt: %d", len(msg)) + } + + for _, key := range keys { + plain, err := decryptMessage(key, msg, data) + if err == nil { + // Remove the PKCS7 padding for vsn 0 + if vsn == 0 { + return pkcs7decode(plain, aes.BlockSize), nil + } else { + return plain, nil + } + } + } + + return nil, fmt.Errorf("No installed keys could decrypt the message") +} diff --git a/vendor/github.com/hashicorp/memberlist/state.go b/vendor/github.com/hashicorp/memberlist/state.go new file mode 100644 index 0000000000..71bf6f34d2 --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/state.go @@ -0,0 +1,1160 @@ +package memberlist + +import ( + "bytes" + "fmt" + "math" + "math/rand" + "net" + "sync/atomic" + "time" + + "github.com/armon/go-metrics" +) + +type nodeStateType int + +const ( + stateAlive nodeStateType = iota + stateSuspect + stateDead +) + +// Node represents a node in the cluster. +type Node struct { + Name string + Addr net.IP + Port uint16 + Meta []byte // Metadata from the delegate for this node. + PMin uint8 // Minimum protocol version this understands + PMax uint8 // Maximum protocol version this understands + PCur uint8 // Current version node is speaking + DMin uint8 // Min protocol version for the delegate to understand + DMax uint8 // Max protocol version for the delegate to understand + DCur uint8 // Current version delegate is speaking +} + +// Address returns the host:port form of a node's address, suitable for use +// with a transport. +func (n *Node) Address() string { + return joinHostPort(n.Addr.String(), n.Port) +} + +// NodeState is used to manage our state view of another node +type nodeState struct { + Node + Incarnation uint32 // Last known incarnation number + State nodeStateType // Current state + StateChange time.Time // Time last state change happened +} + +// Address returns the host:port form of a node's address, suitable for use +// with a transport. +func (n *nodeState) Address() string { + return n.Node.Address() +} + +// ackHandler is used to register handlers for incoming acks and nacks. +type ackHandler struct { + ackFn func([]byte, time.Time) + nackFn func() + timer *time.Timer +} + +// NoPingResponseError is used to indicate a 'ping' packet was +// successfully issued but no response was received +type NoPingResponseError struct { + node string +} + +func (f NoPingResponseError) Error() string { + return fmt.Sprintf("No response from node %s", f.node) +} + +// Schedule is used to ensure the Tick is performed periodically. This +// function is safe to call multiple times. If the memberlist is already +// scheduled, then it won't do anything. +func (m *Memberlist) schedule() { + m.tickerLock.Lock() + defer m.tickerLock.Unlock() + + // If we already have tickers, then don't do anything, since we're + // scheduled + if len(m.tickers) > 0 { + return + } + + // Create the stop tick channel, a blocking channel. We close this + // when we should stop the tickers. + stopCh := make(chan struct{}) + + // Create a new probeTicker + if m.config.ProbeInterval > 0 { + t := time.NewTicker(m.config.ProbeInterval) + go m.triggerFunc(m.config.ProbeInterval, t.C, stopCh, m.probe) + m.tickers = append(m.tickers, t) + } + + // Create a push pull ticker if needed + if m.config.PushPullInterval > 0 { + go m.pushPullTrigger(stopCh) + } + + // Create a gossip ticker if needed + if m.config.GossipInterval > 0 && m.config.GossipNodes > 0 { + t := time.NewTicker(m.config.GossipInterval) + go m.triggerFunc(m.config.GossipInterval, t.C, stopCh, m.gossip) + m.tickers = append(m.tickers, t) + } + + // If we made any tickers, then record the stopTick channel for + // later. + if len(m.tickers) > 0 { + m.stopTick = stopCh + } +} + +// triggerFunc is used to trigger a function call each time a +// message is received until a stop tick arrives. +func (m *Memberlist) triggerFunc(stagger time.Duration, C <-chan time.Time, stop <-chan struct{}, f func()) { + // Use a random stagger to avoid syncronizing + randStagger := time.Duration(uint64(rand.Int63()) % uint64(stagger)) + select { + case <-time.After(randStagger): + case <-stop: + return + } + for { + select { + case <-C: + f() + case <-stop: + return + } + } +} + +// pushPullTrigger is used to periodically trigger a push/pull until +// a stop tick arrives. We don't use triggerFunc since the push/pull +// timer is dynamically scaled based on cluster size to avoid network +// saturation +func (m *Memberlist) pushPullTrigger(stop <-chan struct{}) { + interval := m.config.PushPullInterval + + // Use a random stagger to avoid syncronizing + randStagger := time.Duration(uint64(rand.Int63()) % uint64(interval)) + select { + case <-time.After(randStagger): + case <-stop: + return + } + + // Tick using a dynamic timer + for { + tickTime := pushPullScale(interval, m.estNumNodes()) + select { + case <-time.After(tickTime): + m.pushPull() + case <-stop: + return + } + } +} + +// Deschedule is used to stop the background maintenance. This is safe +// to call multiple times. +func (m *Memberlist) deschedule() { + m.tickerLock.Lock() + defer m.tickerLock.Unlock() + + // If we have no tickers, then we aren't scheduled. + if len(m.tickers) == 0 { + return + } + + // Close the stop channel so all the ticker listeners stop. + close(m.stopTick) + + // Explicitly stop all the tickers themselves so they don't take + // up any more resources, and get rid of the list. + for _, t := range m.tickers { + t.Stop() + } + m.tickers = nil +} + +// Tick is used to perform a single round of failure detection and gossip +func (m *Memberlist) probe() { + // Track the number of indexes we've considered probing + numCheck := 0 +START: + m.nodeLock.RLock() + + // Make sure we don't wrap around infinitely + if numCheck >= len(m.nodes) { + m.nodeLock.RUnlock() + return + } + + // Handle the wrap around case + if m.probeIndex >= len(m.nodes) { + m.nodeLock.RUnlock() + m.resetNodes() + m.probeIndex = 0 + numCheck++ + goto START + } + + // Determine if we should probe this node + skip := false + var node nodeState + + node = *m.nodes[m.probeIndex] + if node.Name == m.config.Name { + skip = true + } else if node.State == stateDead { + skip = true + } + + // Potentially skip + m.nodeLock.RUnlock() + m.probeIndex++ + if skip { + numCheck++ + goto START + } + + // Probe the specific node + m.probeNode(&node) +} + +// probeNode handles a single round of failure checking on a node. +func (m *Memberlist) probeNode(node *nodeState) { + defer metrics.MeasureSince([]string{"memberlist", "probeNode"}, time.Now()) + + // We use our health awareness to scale the overall probe interval, so we + // slow down if we detect problems. The ticker that calls us can handle + // us running over the base interval, and will skip missed ticks. + probeInterval := m.awareness.ScaleTimeout(m.config.ProbeInterval) + if probeInterval > m.config.ProbeInterval { + metrics.IncrCounter([]string{"memberlist", "degraded", "probe"}, 1) + } + + // Prepare a ping message and setup an ack handler. + ping := ping{SeqNo: m.nextSeqNo(), Node: node.Name} + ackCh := make(chan ackMessage, m.config.IndirectChecks+1) + nackCh := make(chan struct{}, m.config.IndirectChecks+1) + m.setProbeChannels(ping.SeqNo, ackCh, nackCh, probeInterval) + + // Send a ping to the node. If this node looks like it's suspect or dead, + // also tack on a suspect message so that it has a chance to refute as + // soon as possible. + deadline := time.Now().Add(probeInterval) + addr := node.Address() + if node.State == stateAlive { + if err := m.encodeAndSendMsg(addr, pingMsg, &ping); err != nil { + m.logger.Printf("[ERR] memberlist: Failed to send ping: %s", err) + return + } + } else { + var msgs [][]byte + if buf, err := encode(pingMsg, &ping); err != nil { + m.logger.Printf("[ERR] memberlist: Failed to encode ping message: %s", err) + return + } else { + msgs = append(msgs, buf.Bytes()) + } + s := suspect{Incarnation: node.Incarnation, Node: node.Name, From: m.config.Name} + if buf, err := encode(suspectMsg, &s); err != nil { + m.logger.Printf("[ERR] memberlist: Failed to encode suspect message: %s", err) + return + } else { + msgs = append(msgs, buf.Bytes()) + } + + compound := makeCompoundMessage(msgs) + if err := m.rawSendMsgPacket(addr, &node.Node, compound.Bytes()); err != nil { + m.logger.Printf("[ERR] memberlist: Failed to send compound ping and suspect message to %s: %s", addr, err) + return + } + } + + // Mark the sent time here, which should be after any pre-processing and + // system calls to do the actual send. This probably under-reports a bit, + // but it's the best we can do. + sent := time.Now() + + // Arrange for our self-awareness to get updated. At this point we've + // sent the ping, so any return statement means the probe succeeded + // which will improve our health until we get to the failure scenarios + // at the end of this function, which will alter this delta variable + // accordingly. + awarenessDelta := -1 + defer func() { + m.awareness.ApplyDelta(awarenessDelta) + }() + + // Wait for response or round-trip-time. + select { + case v := <-ackCh: + if v.Complete == true { + if m.config.Ping != nil { + rtt := v.Timestamp.Sub(sent) + m.config.Ping.NotifyPingComplete(&node.Node, rtt, v.Payload) + } + return + } + + // As an edge case, if we get a timeout, we need to re-enqueue it + // here to break out of the select below. + if v.Complete == false { + ackCh <- v + } + case <-time.After(m.config.ProbeTimeout): + // Note that we don't scale this timeout based on awareness and + // the health score. That's because we don't really expect waiting + // longer to help get UDP through. Since health does extend the + // probe interval it will give the TCP fallback more time, which + // is more active in dealing with lost packets, and it gives more + // time to wait for indirect acks/nacks. + m.logger.Printf("[DEBUG] memberlist: Failed ping: %v (timeout reached)", node.Name) + } + + // Get some random live nodes. + m.nodeLock.RLock() + kNodes := kRandomNodes(m.config.IndirectChecks, m.nodes, func(n *nodeState) bool { + return n.Name == m.config.Name || + n.Name == node.Name || + n.State != stateAlive + }) + m.nodeLock.RUnlock() + + // Attempt an indirect ping. + expectedNacks := 0 + ind := indirectPingReq{SeqNo: ping.SeqNo, Target: node.Addr, Port: node.Port, Node: node.Name} + for _, peer := range kNodes { + // We only expect nack to be sent from peers who understand + // version 4 of the protocol. + if ind.Nack = peer.PMax >= 4; ind.Nack { + expectedNacks++ + } + + if err := m.encodeAndSendMsg(peer.Address(), indirectPingMsg, &ind); err != nil { + m.logger.Printf("[ERR] memberlist: Failed to send indirect ping: %s", err) + } + } + + // Also make an attempt to contact the node directly over TCP. This + // helps prevent confused clients who get isolated from UDP traffic + // but can still speak TCP (which also means they can possibly report + // misinformation to other nodes via anti-entropy), avoiding flapping in + // the cluster. + // + // This is a little unusual because we will attempt a TCP ping to any + // member who understands version 3 of the protocol, regardless of + // which protocol version we are speaking. That's why we've included a + // config option to turn this off if desired. + fallbackCh := make(chan bool, 1) + if (!m.config.DisableTcpPings) && (node.PMax >= 3) { + go func() { + defer close(fallbackCh) + didContact, err := m.sendPingAndWaitForAck(node.Address(), ping, deadline) + if err != nil { + m.logger.Printf("[ERR] memberlist: Failed fallback ping: %s", err) + } else { + fallbackCh <- didContact + } + }() + } else { + close(fallbackCh) + } + + // Wait for the acks or timeout. Note that we don't check the fallback + // channel here because we want to issue a warning below if that's the + // *only* way we hear back from the peer, so we have to let this time + // out first to allow the normal UDP-based acks to come in. + select { + case v := <-ackCh: + if v.Complete == true { + return + } + } + + // Finally, poll the fallback channel. The timeouts are set such that + // the channel will have something or be closed without having to wait + // any additional time here. + for didContact := range fallbackCh { + if didContact { + m.logger.Printf("[WARN] memberlist: Was able to connect to %s but other probes failed, network may be misconfigured", node.Name) + return + } + } + + // Update our self-awareness based on the results of this failed probe. + // If we don't have peers who will send nacks then we penalize for any + // failed probe as a simple health metric. If we do have peers to nack + // verify, then we can use that as a more sophisticated measure of self- + // health because we assume them to be working, and they can help us + // decide if the probed node was really dead or if it was something wrong + // with ourselves. + awarenessDelta = 0 + if expectedNacks > 0 { + if nackCount := len(nackCh); nackCount < expectedNacks { + awarenessDelta += (expectedNacks - nackCount) + } + } else { + awarenessDelta += 1 + } + + // No acks received from target, suspect it as failed. + m.logger.Printf("[INFO] memberlist: Suspect %s has failed, no acks received", node.Name) + s := suspect{Incarnation: node.Incarnation, Node: node.Name, From: m.config.Name} + m.suspectNode(&s) +} + +// Ping initiates a ping to the node with the specified name. +func (m *Memberlist) Ping(node string, addr net.Addr) (time.Duration, error) { + // Prepare a ping message and setup an ack handler. + ping := ping{SeqNo: m.nextSeqNo(), Node: node} + ackCh := make(chan ackMessage, m.config.IndirectChecks+1) + m.setProbeChannels(ping.SeqNo, ackCh, nil, m.config.ProbeInterval) + + // Send a ping to the node. + if err := m.encodeAndSendMsg(addr.String(), pingMsg, &ping); err != nil { + return 0, err + } + + // Mark the sent time here, which should be after any pre-processing and + // system calls to do the actual send. This probably under-reports a bit, + // but it's the best we can do. + sent := time.Now() + + // Wait for response or timeout. + select { + case v := <-ackCh: + if v.Complete == true { + return v.Timestamp.Sub(sent), nil + } + case <-time.After(m.config.ProbeTimeout): + // Timeout, return an error below. + } + + m.logger.Printf("[DEBUG] memberlist: Failed UDP ping: %v (timeout reached)", node) + return 0, NoPingResponseError{ping.Node} +} + +// resetNodes is used when the tick wraps around. It will reap the +// dead nodes and shuffle the node list. +func (m *Memberlist) resetNodes() { + m.nodeLock.Lock() + defer m.nodeLock.Unlock() + + // Move dead nodes, but respect gossip to the dead interval + deadIdx := moveDeadNodes(m.nodes, m.config.GossipToTheDeadTime) + + // Deregister the dead nodes + for i := deadIdx; i < len(m.nodes); i++ { + delete(m.nodeMap, m.nodes[i].Name) + m.nodes[i] = nil + } + + // Trim the nodes to exclude the dead nodes + m.nodes = m.nodes[0:deadIdx] + + // Update numNodes after we've trimmed the dead nodes + atomic.StoreUint32(&m.numNodes, uint32(deadIdx)) + + // Shuffle live nodes + shuffleNodes(m.nodes) +} + +// gossip is invoked every GossipInterval period to broadcast our gossip +// messages to a few random nodes. +func (m *Memberlist) gossip() { + defer metrics.MeasureSince([]string{"memberlist", "gossip"}, time.Now()) + + // Get some random live, suspect, or recently dead nodes + m.nodeLock.RLock() + kNodes := kRandomNodes(m.config.GossipNodes, m.nodes, func(n *nodeState) bool { + if n.Name == m.config.Name { + return true + } + + switch n.State { + case stateAlive, stateSuspect: + return false + + case stateDead: + return time.Since(n.StateChange) > m.config.GossipToTheDeadTime + + default: + return true + } + }) + m.nodeLock.RUnlock() + + // Compute the bytes available + bytesAvail := m.config.UDPBufferSize - compoundHeaderOverhead + if m.config.EncryptionEnabled() { + bytesAvail -= encryptOverhead(m.encryptionVersion()) + } + + for _, node := range kNodes { + // Get any pending broadcasts + msgs := m.getBroadcasts(compoundOverhead, bytesAvail) + if len(msgs) == 0 { + return + } + + addr := node.Address() + if len(msgs) == 1 { + // Send single message as is + if err := m.rawSendMsgPacket(addr, &node.Node, msgs[0]); err != nil { + m.logger.Printf("[ERR] memberlist: Failed to send gossip to %s: %s", addr, err) + } + } else { + // Otherwise create and send a compound message + compound := makeCompoundMessage(msgs) + if err := m.rawSendMsgPacket(addr, &node.Node, compound.Bytes()); err != nil { + m.logger.Printf("[ERR] memberlist: Failed to send gossip to %s: %s", addr, err) + } + } + } +} + +// pushPull is invoked periodically to randomly perform a complete state +// exchange. Used to ensure a high level of convergence, but is also +// reasonably expensive as the entire state of this node is exchanged +// with the other node. +func (m *Memberlist) pushPull() { + // Get a random live node + m.nodeLock.RLock() + nodes := kRandomNodes(1, m.nodes, func(n *nodeState) bool { + return n.Name == m.config.Name || + n.State != stateAlive + }) + m.nodeLock.RUnlock() + + // If no nodes, bail + if len(nodes) == 0 { + return + } + node := nodes[0] + + // Attempt a push pull + if err := m.pushPullNode(node.Address(), false); err != nil { + m.logger.Printf("[ERR] memberlist: Push/Pull with %s failed: %s", node.Name, err) + } +} + +// pushPullNode does a complete state exchange with a specific node. +func (m *Memberlist) pushPullNode(addr string, join bool) error { + defer metrics.MeasureSince([]string{"memberlist", "pushPullNode"}, time.Now()) + + // Attempt to send and receive with the node + remote, userState, err := m.sendAndReceiveState(addr, join) + if err != nil { + return err + } + + if err := m.mergeRemoteState(join, remote, userState); err != nil { + return err + } + return nil +} + +// verifyProtocol verifies that all the remote nodes can speak with our +// nodes and vice versa on both the core protocol as well as the +// delegate protocol level. +// +// The verification works by finding the maximum minimum and +// minimum maximum understood protocol and delegate versions. In other words, +// it finds the common denominator of protocol and delegate version ranges +// for the entire cluster. +// +// After this, it goes through the entire cluster (local and remote) and +// verifies that everyone's speaking protocol versions satisfy this range. +// If this passes, it means that every node can understand each other. +func (m *Memberlist) verifyProtocol(remote []pushNodeState) error { + m.nodeLock.RLock() + defer m.nodeLock.RUnlock() + + // Maximum minimum understood and minimum maximum understood for both + // the protocol and delegate versions. We use this to verify everyone + // can be understood. + var maxpmin, minpmax uint8 + var maxdmin, mindmax uint8 + minpmax = math.MaxUint8 + mindmax = math.MaxUint8 + + for _, rn := range remote { + // If the node isn't alive, then skip it + if rn.State != stateAlive { + continue + } + + // Skip nodes that don't have versions set, it just means + // their version is zero. + if len(rn.Vsn) == 0 { + continue + } + + if rn.Vsn[0] > maxpmin { + maxpmin = rn.Vsn[0] + } + + if rn.Vsn[1] < minpmax { + minpmax = rn.Vsn[1] + } + + if rn.Vsn[3] > maxdmin { + maxdmin = rn.Vsn[3] + } + + if rn.Vsn[4] < mindmax { + mindmax = rn.Vsn[4] + } + } + + for _, n := range m.nodes { + // Ignore non-alive nodes + if n.State != stateAlive { + continue + } + + if n.PMin > maxpmin { + maxpmin = n.PMin + } + + if n.PMax < minpmax { + minpmax = n.PMax + } + + if n.DMin > maxdmin { + maxdmin = n.DMin + } + + if n.DMax < mindmax { + mindmax = n.DMax + } + } + + // Now that we definitively know the minimum and maximum understood + // version that satisfies the whole cluster, we verify that every + // node in the cluster satisifies this. + for _, n := range remote { + var nPCur, nDCur uint8 + if len(n.Vsn) > 0 { + nPCur = n.Vsn[2] + nDCur = n.Vsn[5] + } + + if nPCur < maxpmin || nPCur > minpmax { + return fmt.Errorf( + "Node '%s' protocol version (%d) is incompatible: [%d, %d]", + n.Name, nPCur, maxpmin, minpmax) + } + + if nDCur < maxdmin || nDCur > mindmax { + return fmt.Errorf( + "Node '%s' delegate protocol version (%d) is incompatible: [%d, %d]", + n.Name, nDCur, maxdmin, mindmax) + } + } + + for _, n := range m.nodes { + nPCur := n.PCur + nDCur := n.DCur + + if nPCur < maxpmin || nPCur > minpmax { + return fmt.Errorf( + "Node '%s' protocol version (%d) is incompatible: [%d, %d]", + n.Name, nPCur, maxpmin, minpmax) + } + + if nDCur < maxdmin || nDCur > mindmax { + return fmt.Errorf( + "Node '%s' delegate protocol version (%d) is incompatible: [%d, %d]", + n.Name, nDCur, maxdmin, mindmax) + } + } + + return nil +} + +// nextSeqNo returns a usable sequence number in a thread safe way +func (m *Memberlist) nextSeqNo() uint32 { + return atomic.AddUint32(&m.sequenceNum, 1) +} + +// nextIncarnation returns the next incarnation number in a thread safe way +func (m *Memberlist) nextIncarnation() uint32 { + return atomic.AddUint32(&m.incarnation, 1) +} + +// skipIncarnation adds the positive offset to the incarnation number. +func (m *Memberlist) skipIncarnation(offset uint32) uint32 { + return atomic.AddUint32(&m.incarnation, offset) +} + +// estNumNodes is used to get the current estimate of the number of nodes +func (m *Memberlist) estNumNodes() int { + return int(atomic.LoadUint32(&m.numNodes)) +} + +type ackMessage struct { + Complete bool + Payload []byte + Timestamp time.Time +} + +// setProbeChannels is used to attach the ackCh to receive a message when an ack +// with a given sequence number is received. The `complete` field of the message +// will be false on timeout. Any nack messages will cause an empty struct to be +// passed to the nackCh, which can be nil if not needed. +func (m *Memberlist) setProbeChannels(seqNo uint32, ackCh chan ackMessage, nackCh chan struct{}, timeout time.Duration) { + // Create handler functions for acks and nacks + ackFn := func(payload []byte, timestamp time.Time) { + select { + case ackCh <- ackMessage{true, payload, timestamp}: + default: + } + } + nackFn := func() { + select { + case nackCh <- struct{}{}: + default: + } + } + + // Add the handlers + ah := &ackHandler{ackFn, nackFn, nil} + m.ackLock.Lock() + m.ackHandlers[seqNo] = ah + m.ackLock.Unlock() + + // Setup a reaping routing + ah.timer = time.AfterFunc(timeout, func() { + m.ackLock.Lock() + delete(m.ackHandlers, seqNo) + m.ackLock.Unlock() + select { + case ackCh <- ackMessage{false, nil, time.Now()}: + default: + } + }) +} + +// setAckHandler is used to attach a handler to be invoked when an ack with a +// given sequence number is received. If a timeout is reached, the handler is +// deleted. This is used for indirect pings so does not configure a function +// for nacks. +func (m *Memberlist) setAckHandler(seqNo uint32, ackFn func([]byte, time.Time), timeout time.Duration) { + // Add the handler + ah := &ackHandler{ackFn, nil, nil} + m.ackLock.Lock() + m.ackHandlers[seqNo] = ah + m.ackLock.Unlock() + + // Setup a reaping routing + ah.timer = time.AfterFunc(timeout, func() { + m.ackLock.Lock() + delete(m.ackHandlers, seqNo) + m.ackLock.Unlock() + }) +} + +// Invokes an ack handler if any is associated, and reaps the handler immediately +func (m *Memberlist) invokeAckHandler(ack ackResp, timestamp time.Time) { + m.ackLock.Lock() + ah, ok := m.ackHandlers[ack.SeqNo] + delete(m.ackHandlers, ack.SeqNo) + m.ackLock.Unlock() + if !ok { + return + } + ah.timer.Stop() + ah.ackFn(ack.Payload, timestamp) +} + +// Invokes nack handler if any is associated. +func (m *Memberlist) invokeNackHandler(nack nackResp) { + m.ackLock.Lock() + ah, ok := m.ackHandlers[nack.SeqNo] + m.ackLock.Unlock() + if !ok || ah.nackFn == nil { + return + } + ah.nackFn() +} + +// refute gossips an alive message in response to incoming information that we +// are suspect or dead. It will make sure the incarnation number beats the given +// accusedInc value, or you can supply 0 to just get the next incarnation number. +// This alters the node state that's passed in so this MUST be called while the +// nodeLock is held. +func (m *Memberlist) refute(me *nodeState, accusedInc uint32) { + // Make sure the incarnation number beats the accusation. + inc := m.nextIncarnation() + if accusedInc >= inc { + inc = m.skipIncarnation(accusedInc - inc + 1) + } + me.Incarnation = inc + + // Decrease our health because we are being asked to refute a problem. + m.awareness.ApplyDelta(1) + + // Format and broadcast an alive message. + a := alive{ + Incarnation: inc, + Node: me.Name, + Addr: me.Addr, + Port: me.Port, + Meta: me.Meta, + Vsn: []uint8{ + me.PMin, me.PMax, me.PCur, + me.DMin, me.DMax, me.DCur, + }, + } + m.encodeAndBroadcast(me.Addr.String(), aliveMsg, a) +} + +// aliveNode is invoked by the network layer when we get a message about a +// live node. +func (m *Memberlist) aliveNode(a *alive, notify chan struct{}, bootstrap bool) { + m.nodeLock.Lock() + defer m.nodeLock.Unlock() + state, ok := m.nodeMap[a.Node] + + // It is possible that during a Leave(), there is already an aliveMsg + // in-queue to be processed but blocked by the locks above. If we let + // that aliveMsg process, it'll cause us to re-join the cluster. This + // ensures that we don't. + if m.leave && a.Node == m.config.Name { + return + } + + // Invoke the Alive delegate if any. This can be used to filter out + // alive messages based on custom logic. For example, using a cluster name. + // Using a merge delegate is not enough, as it is possible for passive + // cluster merging to still occur. + if m.config.Alive != nil { + node := &Node{ + Name: a.Node, + Addr: a.Addr, + Port: a.Port, + Meta: a.Meta, + PMin: a.Vsn[0], + PMax: a.Vsn[1], + PCur: a.Vsn[2], + DMin: a.Vsn[3], + DMax: a.Vsn[4], + DCur: a.Vsn[5], + } + if err := m.config.Alive.NotifyAlive(node); err != nil { + m.logger.Printf("[WARN] memberlist: ignoring alive message for '%s': %s", + a.Node, err) + return + } + } + + // Check if we've never seen this node before, and if not, then + // store this node in our node map. + if !ok { + state = &nodeState{ + Node: Node{ + Name: a.Node, + Addr: a.Addr, + Port: a.Port, + Meta: a.Meta, + }, + State: stateDead, + } + + // Add to map + m.nodeMap[a.Node] = state + + // Get a random offset. This is important to ensure + // the failure detection bound is low on average. If all + // nodes did an append, failure detection bound would be + // very high. + n := len(m.nodes) + offset := randomOffset(n) + + // Add at the end and swap with the node at the offset + m.nodes = append(m.nodes, state) + m.nodes[offset], m.nodes[n] = m.nodes[n], m.nodes[offset] + + // Update numNodes after we've added a new node + atomic.AddUint32(&m.numNodes, 1) + } + + // Check if this address is different than the existing node + if !bytes.Equal([]byte(state.Addr), a.Addr) || state.Port != a.Port { + m.logger.Printf("[ERR] memberlist: Conflicting address for %s. Mine: %v:%d Theirs: %v:%d", + state.Name, state.Addr, state.Port, net.IP(a.Addr), a.Port) + + // Inform the conflict delegate if provided + if m.config.Conflict != nil { + other := Node{ + Name: a.Node, + Addr: a.Addr, + Port: a.Port, + Meta: a.Meta, + } + m.config.Conflict.NotifyConflict(&state.Node, &other) + } + return + } + + // Bail if the incarnation number is older, and this is not about us + isLocalNode := state.Name == m.config.Name + if a.Incarnation <= state.Incarnation && !isLocalNode { + return + } + + // Bail if strictly less and this is about us + if a.Incarnation < state.Incarnation && isLocalNode { + return + } + + // Clear out any suspicion timer that may be in effect. + delete(m.nodeTimers, a.Node) + + // Store the old state and meta data + oldState := state.State + oldMeta := state.Meta + + // If this is us we need to refute, otherwise re-broadcast + if !bootstrap && isLocalNode { + // Compute the version vector + versions := []uint8{ + state.PMin, state.PMax, state.PCur, + state.DMin, state.DMax, state.DCur, + } + + // If the Incarnation is the same, we need special handling, since it + // possible for the following situation to happen: + // 1) Start with configuration C, join cluster + // 2) Hard fail / Kill / Shutdown + // 3) Restart with configuration C', join cluster + // + // In this case, other nodes and the local node see the same incarnation, + // but the values may not be the same. For this reason, we always + // need to do an equality check for this Incarnation. In most cases, + // we just ignore, but we may need to refute. + // + if a.Incarnation == state.Incarnation && + bytes.Equal(a.Meta, state.Meta) && + bytes.Equal(a.Vsn, versions) { + return + } + + m.refute(state, a.Incarnation) + m.logger.Printf("[WARN] memberlist: Refuting an alive message") + } else { + m.encodeBroadcastNotify(a.Node, aliveMsg, a, notify) + + // Update protocol versions if it arrived + if len(a.Vsn) > 0 { + state.PMin = a.Vsn[0] + state.PMax = a.Vsn[1] + state.PCur = a.Vsn[2] + state.DMin = a.Vsn[3] + state.DMax = a.Vsn[4] + state.DCur = a.Vsn[5] + } + + // Update the state and incarnation number + state.Incarnation = a.Incarnation + state.Meta = a.Meta + if state.State != stateAlive { + state.State = stateAlive + state.StateChange = time.Now() + } + } + + // Update metrics + metrics.IncrCounter([]string{"memberlist", "msg", "alive"}, 1) + + // Notify the delegate of any relevant updates + if m.config.Events != nil { + if oldState == stateDead { + // if Dead -> Alive, notify of join + m.config.Events.NotifyJoin(&state.Node) + + } else if !bytes.Equal(oldMeta, state.Meta) { + // if Meta changed, trigger an update notification + m.config.Events.NotifyUpdate(&state.Node) + } + } +} + +// suspectNode is invoked by the network layer when we get a message +// about a suspect node +func (m *Memberlist) suspectNode(s *suspect) { + m.nodeLock.Lock() + defer m.nodeLock.Unlock() + state, ok := m.nodeMap[s.Node] + + // If we've never heard about this node before, ignore it + if !ok { + return + } + + // Ignore old incarnation numbers + if s.Incarnation < state.Incarnation { + return + } + + // See if there's a suspicion timer we can confirm. If the info is new + // to us we will go ahead and re-gossip it. This allows for multiple + // independent confirmations to flow even when a node probes a node + // that's already suspect. + if timer, ok := m.nodeTimers[s.Node]; ok { + if timer.Confirm(s.From) { + m.encodeAndBroadcast(s.Node, suspectMsg, s) + } + return + } + + // Ignore non-alive nodes + if state.State != stateAlive { + return + } + + // If this is us we need to refute, otherwise re-broadcast + if state.Name == m.config.Name { + m.refute(state, s.Incarnation) + m.logger.Printf("[WARN] memberlist: Refuting a suspect message (from: %s)", s.From) + return // Do not mark ourself suspect + } else { + m.encodeAndBroadcast(s.Node, suspectMsg, s) + } + + // Update metrics + metrics.IncrCounter([]string{"memberlist", "msg", "suspect"}, 1) + + // Update the state + state.Incarnation = s.Incarnation + state.State = stateSuspect + changeTime := time.Now() + state.StateChange = changeTime + + // Setup a suspicion timer. Given that we don't have any known phase + // relationship with our peers, we set up k such that we hit the nominal + // timeout two probe intervals short of what we expect given the suspicion + // multiplier. + k := m.config.SuspicionMult - 2 + + // If there aren't enough nodes to give the expected confirmations, just + // set k to 0 to say that we don't expect any. Note we subtract 2 from n + // here to take out ourselves and the node being probed. + n := m.estNumNodes() + if n-2 < k { + k = 0 + } + + // Compute the timeouts based on the size of the cluster. + min := suspicionTimeout(m.config.SuspicionMult, n, m.config.ProbeInterval) + max := time.Duration(m.config.SuspicionMaxTimeoutMult) * min + fn := func(numConfirmations int) { + m.nodeLock.Lock() + state, ok := m.nodeMap[s.Node] + timeout := ok && state.State == stateSuspect && state.StateChange == changeTime + m.nodeLock.Unlock() + + if timeout { + if k > 0 && numConfirmations < k { + metrics.IncrCounter([]string{"memberlist", "degraded", "timeout"}, 1) + } + + m.logger.Printf("[INFO] memberlist: Marking %s as failed, suspect timeout reached (%d peer confirmations)", + state.Name, numConfirmations) + d := dead{Incarnation: state.Incarnation, Node: state.Name, From: m.config.Name} + m.deadNode(&d) + } + } + m.nodeTimers[s.Node] = newSuspicion(s.From, k, min, max, fn) +} + +// deadNode is invoked by the network layer when we get a message +// about a dead node +func (m *Memberlist) deadNode(d *dead) { + m.nodeLock.Lock() + defer m.nodeLock.Unlock() + state, ok := m.nodeMap[d.Node] + + // If we've never heard about this node before, ignore it + if !ok { + return + } + + // Ignore old incarnation numbers + if d.Incarnation < state.Incarnation { + return + } + + // Clear out any suspicion timer that may be in effect. + delete(m.nodeTimers, d.Node) + + // Ignore if node is already dead + if state.State == stateDead { + return + } + + // Check if this is us + if state.Name == m.config.Name { + // If we are not leaving we need to refute + if !m.leave { + m.refute(state, d.Incarnation) + m.logger.Printf("[WARN] memberlist: Refuting a dead message (from: %s)", d.From) + return // Do not mark ourself dead + } + + // If we are leaving, we broadcast and wait + m.encodeBroadcastNotify(d.Node, deadMsg, d, m.leaveBroadcast) + } else { + m.encodeAndBroadcast(d.Node, deadMsg, d) + } + + // Update metrics + metrics.IncrCounter([]string{"memberlist", "msg", "dead"}, 1) + + // Update the state + state.Incarnation = d.Incarnation + state.State = stateDead + state.StateChange = time.Now() + + // Notify of death + if m.config.Events != nil { + m.config.Events.NotifyLeave(&state.Node) + } +} + +// mergeState is invoked by the network layer when we get a Push/Pull +// state transfer +func (m *Memberlist) mergeState(remote []pushNodeState) { + for _, r := range remote { + switch r.State { + case stateAlive: + a := alive{ + Incarnation: r.Incarnation, + Node: r.Name, + Addr: r.Addr, + Port: r.Port, + Meta: r.Meta, + Vsn: r.Vsn, + } + m.aliveNode(&a, nil, false) + + case stateDead: + // If the remote node believes a node is dead, we prefer to + // suspect that node instead of declaring it dead instantly + fallthrough + case stateSuspect: + s := suspect{Incarnation: r.Incarnation, Node: r.Name, From: m.config.Name} + m.suspectNode(&s) + } + } +} diff --git a/vendor/github.com/hashicorp/memberlist/suspicion.go b/vendor/github.com/hashicorp/memberlist/suspicion.go new file mode 100644 index 0000000000..5f573e1fc6 --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/suspicion.go @@ -0,0 +1,130 @@ +package memberlist + +import ( + "math" + "sync/atomic" + "time" +) + +// suspicion manages the suspect timer for a node and provides an interface +// to accelerate the timeout as we get more independent confirmations that +// a node is suspect. +type suspicion struct { + // n is the number of independent confirmations we've seen. This must + // be updated using atomic instructions to prevent contention with the + // timer callback. + n int32 + + // k is the number of independent confirmations we'd like to see in + // order to drive the timer to its minimum value. + k int32 + + // min is the minimum timer value. + min time.Duration + + // max is the maximum timer value. + max time.Duration + + // start captures the timestamp when we began the timer. This is used + // so we can calculate durations to feed the timer during updates in + // a way the achieves the overall time we'd like. + start time.Time + + // timer is the underlying timer that implements the timeout. + timer *time.Timer + + // f is the function to call when the timer expires. We hold on to this + // because there are cases where we call it directly. + timeoutFn func() + + // confirmations is a map of "from" nodes that have confirmed a given + // node is suspect. This prevents double counting. + confirmations map[string]struct{} +} + +// newSuspicion returns a timer started with the max time, and that will drive +// to the min time after seeing k or more confirmations. The from node will be +// excluded from confirmations since we might get our own suspicion message +// gossiped back to us. The minimum time will be used if no confirmations are +// called for (k <= 0). +func newSuspicion(from string, k int, min time.Duration, max time.Duration, fn func(int)) *suspicion { + s := &suspicion{ + k: int32(k), + min: min, + max: max, + confirmations: make(map[string]struct{}), + } + + // Exclude the from node from any confirmations. + s.confirmations[from] = struct{}{} + + // Pass the number of confirmations into the timeout function for + // easy telemetry. + s.timeoutFn = func() { + fn(int(atomic.LoadInt32(&s.n))) + } + + // If there aren't any confirmations to be made then take the min + // time from the start. + timeout := max + if k < 1 { + timeout = min + } + s.timer = time.AfterFunc(timeout, s.timeoutFn) + + // Capture the start time right after starting the timer above so + // we should always err on the side of a little longer timeout if + // there's any preemption that separates this and the step above. + s.start = time.Now() + return s +} + +// remainingSuspicionTime takes the state variables of the suspicion timer and +// calculates the remaining time to wait before considering a node dead. The +// return value can be negative, so be prepared to fire the timer immediately in +// that case. +func remainingSuspicionTime(n, k int32, elapsed time.Duration, min, max time.Duration) time.Duration { + frac := math.Log(float64(n)+1.0) / math.Log(float64(k)+1.0) + raw := max.Seconds() - frac*(max.Seconds()-min.Seconds()) + timeout := time.Duration(math.Floor(1000.0*raw)) * time.Millisecond + if timeout < min { + timeout = min + } + + // We have to take into account the amount of time that has passed so + // far, so we get the right overall timeout. + return timeout - elapsed +} + +// Confirm registers that a possibly new peer has also determined the given +// node is suspect. This returns true if this was new information, and false +// if it was a duplicate confirmation, or if we've got enough confirmations to +// hit the minimum. +func (s *suspicion) Confirm(from string) bool { + // If we've got enough confirmations then stop accepting them. + if atomic.LoadInt32(&s.n) >= s.k { + return false + } + + // Only allow one confirmation from each possible peer. + if _, ok := s.confirmations[from]; ok { + return false + } + s.confirmations[from] = struct{}{} + + // Compute the new timeout given the current number of confirmations and + // adjust the timer. If the timeout becomes negative *and* we can cleanly + // stop the timer then we will call the timeout function directly from + // here. + n := atomic.AddInt32(&s.n, 1) + elapsed := time.Now().Sub(s.start) + remaining := remainingSuspicionTime(n, s.k, elapsed, s.min, s.max) + if s.timer.Stop() { + if remaining > 0 { + s.timer.Reset(remaining) + } else { + go s.timeoutFn() + } + } + return true +} diff --git a/vendor/github.com/hashicorp/memberlist/transport.go b/vendor/github.com/hashicorp/memberlist/transport.go new file mode 100644 index 0000000000..ca0a660836 --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/transport.go @@ -0,0 +1,65 @@ +package memberlist + +import ( + "net" + "time" +) + +// Packet is used to provide some metadata about incoming packets from peers +// over a packet connection, as well as the packet payload. +type Packet struct { + // Buf has the raw contents of the packet. + Buf []byte + + // From has the address of the peer. This is an actual net.Addr so we + // can expose some concrete details about incoming packets. + From net.Addr + + // Timestamp is the time when the packet was received. This should be + // taken as close as possible to the actual receipt time to help make an + // accurate RTT measurements during probes. + Timestamp time.Time +} + +// Transport is used to abstract over communicating with other peers. The packet +// interface is assumed to be best-effort and the stream interface is assumed to +// be reliable. +type Transport interface { + // FinalAdvertiseAddr is given the user's configured values (which + // might be empty) and returns the desired IP and port to advertise to + // the rest of the cluster. + FinalAdvertiseAddr(ip string, port int) (net.IP, int, error) + + // WriteTo is a packet-oriented interface that fires off the given + // payload to the given address in a connectionless fashion. This should + // return a time stamp that's as close as possible to when the packet + // was transmitted to help make accurate RTT measurements during probes. + // + // This is similar to net.PacketConn, though we didn't want to expose + // that full set of required methods to keep assumptions about the + // underlying plumbing to a minimum. We also treat the address here as a + // string, similar to Dial, so it's network neutral, so this usually is + // in the form of "host:port". + WriteTo(b []byte, addr string) (time.Time, error) + + // PacketCh returns a channel that can be read to receive incoming + // packets from other peers. How this is set up for listening is left as + // an exercise for the concrete transport implementations. + PacketCh() <-chan *Packet + + // DialTimeout is used to create a connection that allows us to perform + // two-way communication with a peer. This is generally more expensive + // than packet connections so is used for more infrequent operations + // such as anti-entropy or fallback probes if the packet-oriented probe + // failed. + DialTimeout(addr string, timeout time.Duration) (net.Conn, error) + + // StreamCh returns a channel that can be read to handle incoming stream + // connections from other peers. How this is set up for listening is + // left as an exercise for the concrete transport implementations. + StreamCh() <-chan net.Conn + + // Shutdown is called when memberlist is shutting down; this gives the + // transport a chance to clean up any listeners. + Shutdown() error +} diff --git a/vendor/github.com/hashicorp/memberlist/util.go b/vendor/github.com/hashicorp/memberlist/util.go new file mode 100644 index 0000000000..a4f926e3ab --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/util.go @@ -0,0 +1,296 @@ +package memberlist + +import ( + "bytes" + "compress/lzw" + "encoding/binary" + "fmt" + "io" + "math" + "math/rand" + "net" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-msgpack/codec" + "github.com/sean-/seed" +) + +// pushPullScale is the minimum number of nodes +// before we start scaling the push/pull timing. The scale +// effect is the log2(Nodes) - log2(pushPullScale). This means +// that the 33rd node will cause us to double the interval, +// while the 65th will triple it. +const pushPullScaleThreshold = 32 + +const ( + // Constant litWidth 2-8 + lzwLitWidth = 8 +) + +func init() { + seed.Init() +} + +// Decode reverses the encode operation on a byte slice input +func decode(buf []byte, out interface{}) error { + r := bytes.NewReader(buf) + hd := codec.MsgpackHandle{} + dec := codec.NewDecoder(r, &hd) + return dec.Decode(out) +} + +// Encode writes an encoded object to a new bytes buffer +func encode(msgType messageType, in interface{}) (*bytes.Buffer, error) { + buf := bytes.NewBuffer(nil) + buf.WriteByte(uint8(msgType)) + hd := codec.MsgpackHandle{} + enc := codec.NewEncoder(buf, &hd) + err := enc.Encode(in) + return buf, err +} + +// Returns a random offset between 0 and n +func randomOffset(n int) int { + if n == 0 { + return 0 + } + return int(rand.Uint32() % uint32(n)) +} + +// suspicionTimeout computes the timeout that should be used when +// a node is suspected +func suspicionTimeout(suspicionMult, n int, interval time.Duration) time.Duration { + nodeScale := math.Max(1.0, math.Log10(math.Max(1.0, float64(n)))) + // multiply by 1000 to keep some precision because time.Duration is an int64 type + timeout := time.Duration(suspicionMult) * time.Duration(nodeScale*1000) * interval / 1000 + return timeout +} + +// retransmitLimit computes the limit of retransmissions +func retransmitLimit(retransmitMult, n int) int { + nodeScale := math.Ceil(math.Log10(float64(n + 1))) + limit := retransmitMult * int(nodeScale) + return limit +} + +// shuffleNodes randomly shuffles the input nodes using the Fisher-Yates shuffle +func shuffleNodes(nodes []*nodeState) { + n := len(nodes) + for i := n - 1; i > 0; i-- { + j := rand.Intn(i + 1) + nodes[i], nodes[j] = nodes[j], nodes[i] + } +} + +// pushPushScale is used to scale the time interval at which push/pull +// syncs take place. It is used to prevent network saturation as the +// cluster size grows +func pushPullScale(interval time.Duration, n int) time.Duration { + // Don't scale until we cross the threshold + if n <= pushPullScaleThreshold { + return interval + } + + multiplier := math.Ceil(math.Log2(float64(n))-math.Log2(pushPullScaleThreshold)) + 1.0 + return time.Duration(multiplier) * interval +} + +// moveDeadNodes moves nodes that are dead and beyond the gossip to the dead interval +// to the end of the slice and returns the index of the first moved node. +func moveDeadNodes(nodes []*nodeState, gossipToTheDeadTime time.Duration) int { + numDead := 0 + n := len(nodes) + for i := 0; i < n-numDead; i++ { + if nodes[i].State != stateDead { + continue + } + + // Respect the gossip to the dead interval + if time.Since(nodes[i].StateChange) <= gossipToTheDeadTime { + continue + } + + // Move this node to the end + nodes[i], nodes[n-numDead-1] = nodes[n-numDead-1], nodes[i] + numDead++ + i-- + } + return n - numDead +} + +// kRandomNodes is used to select up to k random nodes, excluding any nodes where +// the filter function returns true. It is possible that less than k nodes are +// returned. +func kRandomNodes(k int, nodes []*nodeState, filterFn func(*nodeState) bool) []*nodeState { + n := len(nodes) + kNodes := make([]*nodeState, 0, k) +OUTER: + // Probe up to 3*n times, with large n this is not necessary + // since k << n, but with small n we want search to be + // exhaustive + for i := 0; i < 3*n && len(kNodes) < k; i++ { + // Get random node + idx := randomOffset(n) + node := nodes[idx] + + // Give the filter a shot at it. + if filterFn != nil && filterFn(node) { + continue OUTER + } + + // Check if we have this node already + for j := 0; j < len(kNodes); j++ { + if node == kNodes[j] { + continue OUTER + } + } + + // Append the node + kNodes = append(kNodes, node) + } + return kNodes +} + +// makeCompoundMessage takes a list of messages and generates +// a single compound message containing all of them +func makeCompoundMessage(msgs [][]byte) *bytes.Buffer { + // Create a local buffer + buf := bytes.NewBuffer(nil) + + // Write out the type + buf.WriteByte(uint8(compoundMsg)) + + // Write out the number of message + buf.WriteByte(uint8(len(msgs))) + + // Add the message lengths + for _, m := range msgs { + binary.Write(buf, binary.BigEndian, uint16(len(m))) + } + + // Append the messages + for _, m := range msgs { + buf.Write(m) + } + + return buf +} + +// decodeCompoundMessage splits a compound message and returns +// the slices of individual messages. Also returns the number +// of truncated messages and any potential error +func decodeCompoundMessage(buf []byte) (trunc int, parts [][]byte, err error) { + if len(buf) < 1 { + err = fmt.Errorf("missing compound length byte") + return + } + numParts := uint8(buf[0]) + buf = buf[1:] + + // Check we have enough bytes + if len(buf) < int(numParts*2) { + err = fmt.Errorf("truncated len slice") + return + } + + // Decode the lengths + lengths := make([]uint16, numParts) + for i := 0; i < int(numParts); i++ { + lengths[i] = binary.BigEndian.Uint16(buf[i*2 : i*2+2]) + } + buf = buf[numParts*2:] + + // Split each message + for idx, msgLen := range lengths { + if len(buf) < int(msgLen) { + trunc = int(numParts) - idx + return + } + + // Extract the slice, seek past on the buffer + slice := buf[:msgLen] + buf = buf[msgLen:] + parts = append(parts, slice) + } + return +} + +// Given a string of the form "host", "host:port", +// "ipv6::addr" or "[ipv6::address]:port", +// return true if the string includes a port. +func hasPort(s string) bool { + last := strings.LastIndex(s, ":") + if last == -1 { + return false + } + if s[0] == '[' { + return s[last-1] == ']' + } + return strings.Index(s, ":") == last +} + +// compressPayload takes an opaque input buffer, compresses it +// and wraps it in a compress{} message that is encoded. +func compressPayload(inp []byte) (*bytes.Buffer, error) { + var buf bytes.Buffer + compressor := lzw.NewWriter(&buf, lzw.LSB, lzwLitWidth) + + _, err := compressor.Write(inp) + if err != nil { + return nil, err + } + + // Ensure we flush everything out + if err := compressor.Close(); err != nil { + return nil, err + } + + // Create a compressed message + c := compress{ + Algo: lzwAlgo, + Buf: buf.Bytes(), + } + return encode(compressMsg, &c) +} + +// decompressPayload is used to unpack an encoded compress{} +// message and return its payload uncompressed +func decompressPayload(msg []byte) ([]byte, error) { + // Decode the message + var c compress + if err := decode(msg, &c); err != nil { + return nil, err + } + return decompressBuffer(&c) +} + +// decompressBuffer is used to decompress the buffer of +// a single compress message, handling multiple algorithms +func decompressBuffer(c *compress) ([]byte, error) { + // Verify the algorithm + if c.Algo != lzwAlgo { + return nil, fmt.Errorf("Cannot decompress unknown algorithm %d", c.Algo) + } + + // Create a uncompressor + uncomp := lzw.NewReader(bytes.NewReader(c.Buf), lzw.LSB, lzwLitWidth) + defer uncomp.Close() + + // Read all the data + var b bytes.Buffer + _, err := io.Copy(&b, uncomp) + if err != nil { + return nil, err + } + + // Return the uncompressed bytes + return b.Bytes(), nil +} + +// joinHostPort returns the host:port form of an address, for use with a +// transport. +func joinHostPort(host string, port uint16) string { + return net.JoinHostPort(host, strconv.Itoa(int(port))) +} diff --git a/vendor/github.com/hashicorp/serf/LICENSE b/vendor/github.com/hashicorp/serf/LICENSE new file mode 100644 index 0000000000..c33dcc7c92 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/serf/coordinate/client.go b/vendor/github.com/hashicorp/serf/coordinate/client.go new file mode 100644 index 0000000000..613bfff89e --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/client.go @@ -0,0 +1,180 @@ +package coordinate + +import ( + "fmt" + "math" + "sort" + "sync" + "time" +) + +// Client manages the estimated network coordinate for a given node, and adjusts +// it as the node observes round trip times and estimated coordinates from other +// nodes. The core algorithm is based on Vivaldi, see the documentation for Config +// for more details. +type Client struct { + // coord is the current estimate of the client's network coordinate. + coord *Coordinate + + // origin is a coordinate sitting at the origin. + origin *Coordinate + + // config contains the tuning parameters that govern the performance of + // the algorithm. + config *Config + + // adjustmentIndex is the current index into the adjustmentSamples slice. + adjustmentIndex uint + + // adjustment is used to store samples for the adjustment calculation. + adjustmentSamples []float64 + + // latencyFilterSamples is used to store the last several RTT samples, + // keyed by node name. We will use the config's LatencyFilterSamples + // value to determine how many samples we keep, per node. + latencyFilterSamples map[string][]float64 + + // mutex enables safe concurrent access to the client. + mutex sync.RWMutex +} + +// NewClient creates a new Client and verifies the configuration is valid. +func NewClient(config *Config) (*Client, error) { + if !(config.Dimensionality > 0) { + return nil, fmt.Errorf("dimensionality must be >0") + } + + return &Client{ + coord: NewCoordinate(config), + origin: NewCoordinate(config), + config: config, + adjustmentIndex: 0, + adjustmentSamples: make([]float64, config.AdjustmentWindowSize), + latencyFilterSamples: make(map[string][]float64), + }, nil +} + +// GetCoordinate returns a copy of the coordinate for this client. +func (c *Client) GetCoordinate() *Coordinate { + c.mutex.RLock() + defer c.mutex.RUnlock() + + return c.coord.Clone() +} + +// SetCoordinate forces the client's coordinate to a known state. +func (c *Client) SetCoordinate(coord *Coordinate) { + c.mutex.Lock() + defer c.mutex.Unlock() + + c.coord = coord.Clone() +} + +// ForgetNode removes any client state for the given node. +func (c *Client) ForgetNode(node string) { + c.mutex.Lock() + defer c.mutex.Unlock() + + delete(c.latencyFilterSamples, node) +} + +// latencyFilter applies a simple moving median filter with a new sample for +// a node. This assumes that the mutex has been locked already. +func (c *Client) latencyFilter(node string, rttSeconds float64) float64 { + samples, ok := c.latencyFilterSamples[node] + if !ok { + samples = make([]float64, 0, c.config.LatencyFilterSize) + } + + // Add the new sample and trim the list, if needed. + samples = append(samples, rttSeconds) + if len(samples) > int(c.config.LatencyFilterSize) { + samples = samples[1:] + } + c.latencyFilterSamples[node] = samples + + // Sort a copy of the samples and return the median. + sorted := make([]float64, len(samples)) + copy(sorted, samples) + sort.Float64s(sorted) + return sorted[len(sorted)/2] +} + +// updateVivialdi updates the Vivaldi portion of the client's coordinate. This +// assumes that the mutex has been locked already. +func (c *Client) updateVivaldi(other *Coordinate, rttSeconds float64) { + const zeroThreshold = 1.0e-6 + + dist := c.coord.DistanceTo(other).Seconds() + if rttSeconds < zeroThreshold { + rttSeconds = zeroThreshold + } + wrongness := math.Abs(dist-rttSeconds) / rttSeconds + + totalError := c.coord.Error + other.Error + if totalError < zeroThreshold { + totalError = zeroThreshold + } + weight := c.coord.Error / totalError + + c.coord.Error = c.config.VivaldiCE*weight*wrongness + c.coord.Error*(1.0-c.config.VivaldiCE*weight) + if c.coord.Error > c.config.VivaldiErrorMax { + c.coord.Error = c.config.VivaldiErrorMax + } + + delta := c.config.VivaldiCC * weight + force := delta * (rttSeconds - dist) + c.coord = c.coord.ApplyForce(c.config, force, other) +} + +// updateAdjustment updates the adjustment portion of the client's coordinate, if +// the feature is enabled. This assumes that the mutex has been locked already. +func (c *Client) updateAdjustment(other *Coordinate, rttSeconds float64) { + if c.config.AdjustmentWindowSize == 0 { + return + } + + // Note that the existing adjustment factors don't figure in to this + // calculation so we use the raw distance here. + dist := c.coord.rawDistanceTo(other) + c.adjustmentSamples[c.adjustmentIndex] = rttSeconds - dist + c.adjustmentIndex = (c.adjustmentIndex + 1) % c.config.AdjustmentWindowSize + + sum := 0.0 + for _, sample := range c.adjustmentSamples { + sum += sample + } + c.coord.Adjustment = sum / (2.0 * float64(c.config.AdjustmentWindowSize)) +} + +// updateGravity applies a small amount of gravity to pull coordinates towards +// the center of the coordinate system to combat drift. This assumes that the +// mutex is locked already. +func (c *Client) updateGravity() { + dist := c.origin.DistanceTo(c.coord).Seconds() + force := -1.0 * math.Pow(dist/c.config.GravityRho, 2.0) + c.coord = c.coord.ApplyForce(c.config, force, c.origin) +} + +// Update takes other, a coordinate for another node, and rtt, a round trip +// time observation for a ping to that node, and updates the estimated position of +// the client's coordinate. Returns the updated coordinate. +func (c *Client) Update(node string, other *Coordinate, rtt time.Duration) *Coordinate { + c.mutex.Lock() + defer c.mutex.Unlock() + + rttSeconds := c.latencyFilter(node, rtt.Seconds()) + c.updateVivaldi(other, rttSeconds) + c.updateAdjustment(other, rttSeconds) + c.updateGravity() + return c.coord.Clone() +} + +// DistanceTo returns the estimated RTT from the client's coordinate to other, the +// coordinate for another node. +func (c *Client) DistanceTo(other *Coordinate) time.Duration { + c.mutex.RLock() + defer c.mutex.RUnlock() + + return c.coord.DistanceTo(other) +} diff --git a/vendor/github.com/hashicorp/serf/coordinate/config.go b/vendor/github.com/hashicorp/serf/coordinate/config.go new file mode 100644 index 0000000000..b85a8ab7b0 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/config.go @@ -0,0 +1,70 @@ +package coordinate + +// Config is used to set the parameters of the Vivaldi-based coordinate mapping +// algorithm. +// +// The following references are called out at various points in the documentation +// here: +// +// [1] Dabek, Frank, et al. "Vivaldi: A decentralized network coordinate system." +// ACM SIGCOMM Computer Communication Review. Vol. 34. No. 4. ACM, 2004. +// [2] Ledlie, Jonathan, Paul Gardner, and Margo I. Seltzer. "Network Coordinates +// in the Wild." NSDI. Vol. 7. 2007. +// [3] Lee, Sanghwan, et al. "On suitability of Euclidean embedding for +// host-based network coordinate systems." Networking, IEEE/ACM Transactions +// on 18.1 (2010): 27-40. +type Config struct { + // The dimensionality of the coordinate system. As discussed in [2], more + // dimensions improves the accuracy of the estimates up to a point. Per [2] + // we chose 8 dimensions plus a non-Euclidean height. + Dimensionality uint + + // VivaldiErrorMax is the default error value when a node hasn't yet made + // any observations. It also serves as an upper limit on the error value in + // case observations cause the error value to increase without bound. + VivaldiErrorMax float64 + + // VivaldiCE is a tuning factor that controls the maximum impact an + // observation can have on a node's confidence. See [1] for more details. + VivaldiCE float64 + + // VivaldiCC is a tuning factor that controls the maximum impact an + // observation can have on a node's coordinate. See [1] for more details. + VivaldiCC float64 + + // AdjustmentWindowSize is a tuning factor that determines how many samples + // we retain to calculate the adjustment factor as discussed in [3]. Setting + // this to zero disables this feature. + AdjustmentWindowSize uint + + // HeightMin is the minimum value of the height parameter. Since this + // always must be positive, it will introduce a small amount error, so + // the chosen value should be relatively small compared to "normal" + // coordinates. + HeightMin float64 + + // LatencyFilterSamples is the maximum number of samples that are retained + // per node, in order to compute a median. The intent is to ride out blips + // but still keep the delay low, since our time to probe any given node is + // pretty infrequent. See [2] for more details. + LatencyFilterSize uint + + // GravityRho is a tuning factor that sets how much gravity has an effect + // to try to re-center coordinates. See [2] for more details. + GravityRho float64 +} + +// DefaultConfig returns a Config that has some default values suitable for +// basic testing of the algorithm, but not tuned to any particular type of cluster. +func DefaultConfig() *Config { + return &Config{ + Dimensionality: 8, + VivaldiErrorMax: 1.5, + VivaldiCE: 0.25, + VivaldiCC: 0.25, + AdjustmentWindowSize: 20, + HeightMin: 10.0e-6, + LatencyFilterSize: 3, + GravityRho: 150.0, + } +} diff --git a/vendor/github.com/hashicorp/serf/coordinate/coordinate.go b/vendor/github.com/hashicorp/serf/coordinate/coordinate.go new file mode 100644 index 0000000000..c9194e048b --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/coordinate.go @@ -0,0 +1,183 @@ +package coordinate + +import ( + "math" + "math/rand" + "time" +) + +// Coordinate is a specialized structure for holding network coordinates for the +// Vivaldi-based coordinate mapping algorithm. All of the fields should be public +// to enable this to be serialized. All values in here are in units of seconds. +type Coordinate struct { + // Vec is the Euclidean portion of the coordinate. This is used along + // with the other fields to provide an overall distance estimate. The + // units here are seconds. + Vec []float64 + + // Err reflects the confidence in the given coordinate and is updated + // dynamically by the Vivaldi Client. This is dimensionless. + Error float64 + + // Adjustment is a distance offset computed based on a calculation over + // observations from all other nodes over a fixed window and is updated + // dynamically by the Vivaldi Client. The units here are seconds. + Adjustment float64 + + // Height is a distance offset that accounts for non-Euclidean effects + // which model the access links from nodes to the core Internet. The access + // links are usually set by bandwidth and congestion, and the core links + // usually follow distance based on geography. + Height float64 +} + +const ( + // secondsToNanoseconds is used to convert float seconds to nanoseconds. + secondsToNanoseconds = 1.0e9 + + // zeroThreshold is used to decide if two coordinates are on top of each + // other. + zeroThreshold = 1.0e-6 +) + +// ErrDimensionalityConflict will be panic-d if you try to perform operations +// with incompatible dimensions. +type DimensionalityConflictError struct{} + +// Adds the error interface. +func (e DimensionalityConflictError) Error() string { + return "coordinate dimensionality does not match" +} + +// NewCoordinate creates a new coordinate at the origin, using the given config +// to supply key initial values. +func NewCoordinate(config *Config) *Coordinate { + return &Coordinate{ + Vec: make([]float64, config.Dimensionality), + Error: config.VivaldiErrorMax, + Adjustment: 0.0, + Height: config.HeightMin, + } +} + +// Clone creates an independent copy of this coordinate. +func (c *Coordinate) Clone() *Coordinate { + vec := make([]float64, len(c.Vec)) + copy(vec, c.Vec) + return &Coordinate{ + Vec: vec, + Error: c.Error, + Adjustment: c.Adjustment, + Height: c.Height, + } +} + +// IsCompatibleWith checks to see if the two coordinates are compatible +// dimensionally. If this returns true then you are guaranteed to not get +// any runtime errors operating on them. +func (c *Coordinate) IsCompatibleWith(other *Coordinate) bool { + return len(c.Vec) == len(other.Vec) +} + +// ApplyForce returns the result of applying the force from the direction of the +// other coordinate. +func (c *Coordinate) ApplyForce(config *Config, force float64, other *Coordinate) *Coordinate { + if !c.IsCompatibleWith(other) { + panic(DimensionalityConflictError{}) + } + + ret := c.Clone() + unit, mag := unitVectorAt(c.Vec, other.Vec) + ret.Vec = add(ret.Vec, mul(unit, force)) + if mag > zeroThreshold { + ret.Height = (ret.Height+other.Height)*force/mag + ret.Height + ret.Height = math.Max(ret.Height, config.HeightMin) + } + return ret +} + +// DistanceTo returns the distance between this coordinate and the other +// coordinate, including adjustments. +func (c *Coordinate) DistanceTo(other *Coordinate) time.Duration { + if !c.IsCompatibleWith(other) { + panic(DimensionalityConflictError{}) + } + + dist := c.rawDistanceTo(other) + adjustedDist := dist + c.Adjustment + other.Adjustment + if adjustedDist > 0.0 { + dist = adjustedDist + } + return time.Duration(dist * secondsToNanoseconds) +} + +// rawDistanceTo returns the Vivaldi distance between this coordinate and the +// other coordinate in seconds, not including adjustments. This assumes the +// dimensions have already been checked to be compatible. +func (c *Coordinate) rawDistanceTo(other *Coordinate) float64 { + return magnitude(diff(c.Vec, other.Vec)) + c.Height + other.Height +} + +// add returns the sum of vec1 and vec2. This assumes the dimensions have +// already been checked to be compatible. +func add(vec1 []float64, vec2 []float64) []float64 { + ret := make([]float64, len(vec1)) + for i, _ := range ret { + ret[i] = vec1[i] + vec2[i] + } + return ret +} + +// diff returns the difference between the vec1 and vec2. This assumes the +// dimensions have already been checked to be compatible. +func diff(vec1 []float64, vec2 []float64) []float64 { + ret := make([]float64, len(vec1)) + for i, _ := range ret { + ret[i] = vec1[i] - vec2[i] + } + return ret +} + +// mul returns vec multiplied by a scalar factor. +func mul(vec []float64, factor float64) []float64 { + ret := make([]float64, len(vec)) + for i, _ := range vec { + ret[i] = vec[i] * factor + } + return ret +} + +// magnitude computes the magnitude of the vec. +func magnitude(vec []float64) float64 { + sum := 0.0 + for i, _ := range vec { + sum += vec[i] * vec[i] + } + return math.Sqrt(sum) +} + +// unitVectorAt returns a unit vector pointing at vec1 from vec2. If the two +// positions are the same then a random unit vector is returned. We also return +// the distance between the points for use in the later height calculation. +func unitVectorAt(vec1 []float64, vec2 []float64) ([]float64, float64) { + ret := diff(vec1, vec2) + + // If the coordinates aren't on top of each other we can normalize. + if mag := magnitude(ret); mag > zeroThreshold { + return mul(ret, 1.0/mag), mag + } + + // Otherwise, just return a random unit vector. + for i, _ := range ret { + ret[i] = rand.Float64() - 0.5 + } + if mag := magnitude(ret); mag > zeroThreshold { + return mul(ret, 1.0/mag), 0.0 + } + + // And finally just give up and make a unit vector along the first + // dimension. This should be exceedingly rare. + ret = make([]float64, len(ret)) + ret[0] = 1.0 + return ret, 0.0 +} diff --git a/vendor/github.com/hashicorp/serf/coordinate/phantom.go b/vendor/github.com/hashicorp/serf/coordinate/phantom.go new file mode 100644 index 0000000000..6fb033c0cd --- /dev/null +++ b/vendor/github.com/hashicorp/serf/coordinate/phantom.go @@ -0,0 +1,187 @@ +package coordinate + +import ( + "fmt" + "math" + "math/rand" + "time" +) + +// GenerateClients returns a slice with nodes number of clients, all with the +// given config. +func GenerateClients(nodes int, config *Config) ([]*Client, error) { + clients := make([]*Client, nodes) + for i, _ := range clients { + client, err := NewClient(config) + if err != nil { + return nil, err + } + + clients[i] = client + } + return clients, nil +} + +// GenerateLine returns a truth matrix as if all the nodes are in a straight linke +// with the given spacing between them. +func GenerateLine(nodes int, spacing time.Duration) [][]time.Duration { + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + rtt := time.Duration(j-i) * spacing + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// GenerateGrid returns a truth matrix as if all the nodes are in a two dimensional +// grid with the given spacing between them. +func GenerateGrid(nodes int, spacing time.Duration) [][]time.Duration { + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + n := int(math.Sqrt(float64(nodes))) + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + x1, y1 := float64(i%n), float64(i/n) + x2, y2 := float64(j%n), float64(j/n) + dx, dy := x2-x1, y2-y1 + dist := math.Sqrt(dx*dx + dy*dy) + rtt := time.Duration(dist * float64(spacing)) + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// GenerateSplit returns a truth matrix as if half the nodes are close together in +// one location and half the nodes are close together in another. The lan factor +// is used to separate the nodes locally and the wan factor represents the split +// between the two sides. +func GenerateSplit(nodes int, lan time.Duration, wan time.Duration) [][]time.Duration { + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + split := nodes / 2 + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + rtt := lan + if (i <= split && j > split) || (i > split && j <= split) { + rtt += wan + } + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// GenerateCircle returns a truth matrix for a set of nodes, evenly distributed +// around a circle with the given radius. The first node is at the "center" of the +// circle because it's equidistant from all the other nodes, but we place it at +// double the radius, so it should show up above all the other nodes in height. +func GenerateCircle(nodes int, radius time.Duration) [][]time.Duration { + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + var rtt time.Duration + if i == 0 { + rtt = 2 * radius + } else { + t1 := 2.0 * math.Pi * float64(i) / float64(nodes) + x1, y1 := math.Cos(t1), math.Sin(t1) + t2 := 2.0 * math.Pi * float64(j) / float64(nodes) + x2, y2 := math.Cos(t2), math.Sin(t2) + dx, dy := x2-x1, y2-y1 + dist := math.Sqrt(dx*dx + dy*dy) + rtt = time.Duration(dist * float64(radius)) + } + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// GenerateRandom returns a truth matrix for a set of nodes with normally +// distributed delays, with the given mean and deviation. The RNG is re-seeded +// so you always get the same matrix for a given size. +func GenerateRandom(nodes int, mean time.Duration, deviation time.Duration) [][]time.Duration { + rand.Seed(1) + + truth := make([][]time.Duration, nodes) + for i := range truth { + truth[i] = make([]time.Duration, nodes) + } + + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + rttSeconds := rand.NormFloat64()*deviation.Seconds() + mean.Seconds() + rtt := time.Duration(rttSeconds * secondsToNanoseconds) + truth[i][j], truth[j][i] = rtt, rtt + } + } + return truth +} + +// Simulate runs the given number of cycles using the given list of clients and +// truth matrix. On each cycle, each client will pick a random node and observe +// the truth RTT, updating its coordinate estimate. The RNG is re-seeded for +// each simulation run to get deterministic results (for this algorithm and the +// underlying algorithm which will use random numbers for position vectors when +// starting out with everything at the origin). +func Simulate(clients []*Client, truth [][]time.Duration, cycles int) { + rand.Seed(1) + + nodes := len(clients) + for cycle := 0; cycle < cycles; cycle++ { + for i, _ := range clients { + if j := rand.Intn(nodes); j != i { + c := clients[j].GetCoordinate() + rtt := truth[i][j] + node := fmt.Sprintf("node_%d", j) + clients[i].Update(node, c, rtt) + } + } + } +} + +// Stats is returned from the Evaluate function with a summary of the algorithm +// performance. +type Stats struct { + ErrorMax float64 + ErrorAvg float64 +} + +// Evaluate uses the coordinates of the given clients to calculate estimated +// distances and compares them with the given truth matrix, returning summary +// stats. +func Evaluate(clients []*Client, truth [][]time.Duration) (stats Stats) { + nodes := len(clients) + count := 0 + for i := 0; i < nodes; i++ { + for j := i + 1; j < nodes; j++ { + est := clients[i].DistanceTo(clients[j].GetCoordinate()).Seconds() + actual := truth[i][j].Seconds() + error := math.Abs(est-actual) / actual + stats.ErrorMax = math.Max(stats.ErrorMax, error) + stats.ErrorAvg += error + count += 1 + } + } + + stats.ErrorAvg /= float64(count) + fmt.Printf("Error avg=%9.6f max=%9.6f\n", stats.ErrorAvg, stats.ErrorMax) + return +} diff --git a/vendor/github.com/hashicorp/serf/ops-misc/debian/copyright b/vendor/github.com/hashicorp/serf/ops-misc/debian/copyright new file mode 100644 index 0000000000..21a1a1b532 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/ops-misc/debian/copyright @@ -0,0 +1,2 @@ +Name: serf +Copyright: Hashicorp 2013 diff --git a/vendor/github.com/hashicorp/serf/serf/broadcast.go b/vendor/github.com/hashicorp/serf/serf/broadcast.go new file mode 100644 index 0000000000..d20728f3f4 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/serf/broadcast.go @@ -0,0 +1,27 @@ +package serf + +import ( + "github.com/hashicorp/memberlist" +) + +// broadcast is an implementation of memberlist.Broadcast and is used +// to manage broadcasts across the memberlist channel that are related +// only to Serf. +type broadcast struct { + msg []byte + notify chan<- struct{} +} + +func (b *broadcast) Invalidates(other memberlist.Broadcast) bool { + return false +} + +func (b *broadcast) Message() []byte { + return b.msg +} + +func (b *broadcast) Finished() { + if b.notify != nil { + close(b.notify) + } +} diff --git a/vendor/github.com/hashicorp/serf/serf/coalesce.go b/vendor/github.com/hashicorp/serf/serf/coalesce.go new file mode 100644 index 0000000000..567943be14 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/serf/coalesce.go @@ -0,0 +1,80 @@ +package serf + +import ( + "time" +) + +// coalescer is a simple interface that must be implemented to be +// used inside of a coalesceLoop +type coalescer interface { + // Can the coalescer handle this event, if not it is + // directly passed through to the destination channel + Handle(Event) bool + + // Invoked to coalesce the given event + Coalesce(Event) + + // Invoked to flush the coalesced events + Flush(outChan chan<- Event) +} + +// coalescedEventCh returns an event channel where the events are coalesced +// using the given coalescer. +func coalescedEventCh(outCh chan<- Event, shutdownCh <-chan struct{}, + cPeriod time.Duration, qPeriod time.Duration, c coalescer) chan<- Event { + inCh := make(chan Event, 1024) + go coalesceLoop(inCh, outCh, shutdownCh, cPeriod, qPeriod, c) + return inCh +} + +// coalesceLoop is a simple long-running routine that manages the high-level +// flow of coalescing based on quiescence and a maximum quantum period. +func coalesceLoop(inCh <-chan Event, outCh chan<- Event, shutdownCh <-chan struct{}, + coalescePeriod time.Duration, quiescentPeriod time.Duration, c coalescer) { + var quiescent <-chan time.Time + var quantum <-chan time.Time + shutdown := false + +INGEST: + // Reset the timers + quantum = nil + quiescent = nil + + for { + select { + case e := <-inCh: + // Ignore any non handled events + if !c.Handle(e) { + outCh <- e + continue + } + + // Start a new quantum if we need to + // and restart the quiescent timer + if quantum == nil { + quantum = time.After(coalescePeriod) + } + quiescent = time.After(quiescentPeriod) + + // Coalesce the event + c.Coalesce(e) + + case <-quantum: + goto FLUSH + case <-quiescent: + goto FLUSH + case <-shutdownCh: + shutdown = true + goto FLUSH + } + } + +FLUSH: + // Flush the coalesced events + c.Flush(outCh) + + // Restart ingestion if we are not done + if !shutdown { + goto INGEST + } +} diff --git a/vendor/github.com/hashicorp/serf/serf/coalesce_member.go b/vendor/github.com/hashicorp/serf/serf/coalesce_member.go new file mode 100644 index 0000000000..82fdb8dacf --- /dev/null +++ b/vendor/github.com/hashicorp/serf/serf/coalesce_member.go @@ -0,0 +1,68 @@ +package serf + +type coalesceEvent struct { + Type EventType + Member *Member +} + +type memberEventCoalescer struct { + lastEvents map[string]EventType + latestEvents map[string]coalesceEvent +} + +func (c *memberEventCoalescer) Handle(e Event) bool { + switch e.EventType() { + case EventMemberJoin: + return true + case EventMemberLeave: + return true + case EventMemberFailed: + return true + case EventMemberUpdate: + return true + case EventMemberReap: + return true + default: + return false + } +} + +func (c *memberEventCoalescer) Coalesce(raw Event) { + e := raw.(MemberEvent) + for _, m := range e.Members { + c.latestEvents[m.Name] = coalesceEvent{ + Type: e.Type, + Member: &m, + } + } +} + +func (c *memberEventCoalescer) Flush(outCh chan<- Event) { + // Coalesce the various events we got into a single set of events. + events := make(map[EventType]*MemberEvent) + for name, cevent := range c.latestEvents { + previous, ok := c.lastEvents[name] + + // If we sent the same event before, then ignore + // unless it is a MemberUpdate + if ok && previous == cevent.Type && cevent.Type != EventMemberUpdate { + continue + } + + // Update our last event + c.lastEvents[name] = cevent.Type + + // Add it to our event + newEvent, ok := events[cevent.Type] + if !ok { + newEvent = &MemberEvent{Type: cevent.Type} + events[cevent.Type] = newEvent + } + newEvent.Members = append(newEvent.Members, *cevent.Member) + } + + // Send out those events + for _, event := range events { + outCh <- *event + } +} diff --git a/vendor/github.com/hashicorp/serf/serf/coalesce_user.go b/vendor/github.com/hashicorp/serf/serf/coalesce_user.go new file mode 100644 index 0000000000..1551b6c52c --- /dev/null +++ b/vendor/github.com/hashicorp/serf/serf/coalesce_user.go @@ -0,0 +1,52 @@ +package serf + +type latestUserEvents struct { + LTime LamportTime + Events []Event +} + +type userEventCoalescer struct { + // Maps an event name into the latest versions + events map[string]*latestUserEvents +} + +func (c *userEventCoalescer) Handle(e Event) bool { + // Only handle EventUser messages + if e.EventType() != EventUser { + return false + } + + // Check if coalescing is enabled + user := e.(UserEvent) + return user.Coalesce +} + +func (c *userEventCoalescer) Coalesce(e Event) { + user := e.(UserEvent) + latest, ok := c.events[user.Name] + + // Create a new entry if there are none, or + // if this message has the newest LTime + if !ok || latest.LTime < user.LTime { + latest = &latestUserEvents{ + LTime: user.LTime, + Events: []Event{e}, + } + c.events[user.Name] = latest + return + } + + // If the the same age, save it + if latest.LTime == user.LTime { + latest.Events = append(latest.Events, e) + } +} + +func (c *userEventCoalescer) Flush(outChan chan<- Event) { + for _, latest := range c.events { + for _, e := range latest.Events { + outChan <- e + } + } + c.events = make(map[string]*latestUserEvents) +} diff --git a/vendor/github.com/hashicorp/serf/serf/config.go b/vendor/github.com/hashicorp/serf/serf/config.go new file mode 100644 index 0000000000..2403cea587 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/serf/config.go @@ -0,0 +1,260 @@ +package serf + +import ( + "io" + "os" + "time" + + "github.com/hashicorp/memberlist" +) + +// ProtocolVersionMap is the mapping of Serf delegate protocol versions +// to memberlist protocol versions. We mask the memberlist protocols using +// our own protocol version. +var ProtocolVersionMap map[uint8]uint8 + +func init() { + ProtocolVersionMap = map[uint8]uint8{ + 5: 2, + 4: 2, + 3: 2, + 2: 2, + } +} + +// Config is the configuration for creating a Serf instance. +type Config struct { + // The name of this node. This must be unique in the cluster. If this + // is not set, Serf will set it to the hostname of the running machine. + NodeName string + + // The tags for this role, if any. This is used to provide arbitrary + // key/value metadata per-node. For example, a "role" tag may be used to + // differentiate "load-balancer" from a "web" role as parts of the same cluster. + // Tags are deprecating 'Role', and instead it acts as a special key in this + // map. + Tags map[string]string + + // EventCh is a channel that receives all the Serf events. The events + // are sent on this channel in proper ordering. Care must be taken that + // this channel doesn't block, either by processing the events quick + // enough or buffering the channel, otherwise it can block state updates + // within Serf itself. If no EventCh is specified, no events will be fired, + // but point-in-time snapshots of members can still be retrieved by + // calling Members on Serf. + EventCh chan<- Event + + // ProtocolVersion is the protocol version to speak. This must be between + // ProtocolVersionMin and ProtocolVersionMax. + ProtocolVersion uint8 + + // BroadcastTimeout is the amount of time to wait for a broadcast + // message to be sent to the cluster. Broadcast messages are used for + // things like leave messages and force remove messages. If this is not + // set, a timeout of 5 seconds will be set. + BroadcastTimeout time.Duration + + // The settings below relate to Serf's event coalescence feature. Serf + // is able to coalesce multiple events into single events in order to + // reduce the amount of noise that is sent along the EventCh. For example + // if five nodes quickly join, the EventCh will be sent one EventMemberJoin + // containing the five nodes rather than five individual EventMemberJoin + // events. Coalescence can mitigate potential flapping behavior. + // + // Coalescence is disabled by default and can be enabled by setting + // CoalescePeriod. + // + // CoalescePeriod specifies the time duration to coalesce events. + // For example, if this is set to 5 seconds, then all events received + // within 5 seconds that can be coalesced will be. + // + // QuiescentPeriod specifies the duration of time where if no events + // are received, coalescence immediately happens. For example, if + // CoalscePeriod is set to 10 seconds but QuiscentPeriod is set to 2 + // seconds, then the events will be coalesced and dispatched if no + // new events are received within 2 seconds of the last event. Otherwise, + // every event will always be delayed by at least 10 seconds. + CoalescePeriod time.Duration + QuiescentPeriod time.Duration + + // The settings below relate to Serf's user event coalescing feature. + // The settings operate like above but only affect user messages and + // not the Member* messages that Serf generates. + UserCoalescePeriod time.Duration + UserQuiescentPeriod time.Duration + + // The settings below relate to Serf keeping track of recently + // failed/left nodes and attempting reconnects. + // + // ReapInterval is the interval when the reaper runs. If this is not + // set (it is zero), it will be set to a reasonable default. + // + // ReconnectInterval is the interval when we attempt to reconnect + // to failed nodes. If this is not set (it is zero), it will be set + // to a reasonable default. + // + // ReconnectTimeout is the amount of time to attempt to reconnect to + // a failed node before giving up and considering it completely gone. + // + // TombstoneTimeout is the amount of time to keep around nodes + // that gracefully left as tombstones for syncing state with other + // Serf nodes. + ReapInterval time.Duration + ReconnectInterval time.Duration + ReconnectTimeout time.Duration + TombstoneTimeout time.Duration + + // FlapTimeout is the amount of time less than which we consider a node + // being failed and rejoining looks like a flap for telemetry purposes. + // This should be set less than a typical reboot time, but large enough + // to see actual events, given our expected detection times for a failed + // node. + FlapTimeout time.Duration + + // QueueDepthWarning is used to generate warning message if the + // number of queued messages to broadcast exceeds this number. This + // is to provide the user feedback if events are being triggered + // faster than they can be disseminated + QueueDepthWarning int + + // MaxQueueDepth is used to start dropping messages if the number + // of queued messages to broadcast exceeds this number. This is to + // prevent an unbounded growth of memory utilization + MaxQueueDepth int + + // RecentIntentTimeout is used to determine how long we store recent + // join and leave intents. This is used to guard against the case where + // Serf broadcasts an intent that arrives before the Memberlist event. + // It is important that this not be too short to avoid continuous + // rebroadcasting of dead events. + RecentIntentTimeout time.Duration + + // EventBuffer is used to control how many events are buffered. + // This is used to prevent re-delivery of events to a client. The buffer + // must be large enough to handle all "recent" events, since Serf will + // not deliver messages that are older than the oldest entry in the buffer. + // Thus if a client is generating too many events, it's possible that the + // buffer gets overrun and messages are not delivered. + EventBuffer int + + // QueryBuffer is used to control how many queries are buffered. + // This is used to prevent re-delivery of queries to a client. The buffer + // must be large enough to handle all "recent" events, since Serf will not + // deliver queries older than the oldest entry in the buffer. + // Thus if a client is generating too many queries, it's possible that the + // buffer gets overrun and messages are not delivered. + QueryBuffer int + + // QueryTimeoutMult configures the default timeout multipler for a query to run if no + // specific value is provided. Queries are real-time by nature, where the + // reply is time sensitive. As a result, results are collected in an async + // fashion, however the query must have a bounded duration. We want the timeout + // to be long enough that all nodes have time to receive the message, run a handler, + // and generate a reply. Once the timeout is exceeded, any further replies are ignored. + // The default value is + // + // Timeout = GossipInterval * QueryTimeoutMult * log(N+1) + // + QueryTimeoutMult int + + // QueryResponseSizeLimit and QuerySizeLimit limit the inbound and + // outbound payload sizes for queries, respectively. These must fit + // in a UDP packet with some additional overhead, so tuning these + // past the default values of 1024 will depend on your network + // configuration. + QueryResponseSizeLimit int + QuerySizeLimit int + + // MemberlistConfig is the memberlist configuration that Serf will + // use to do the underlying membership management and gossip. Some + // fields in the MemberlistConfig will be overwritten by Serf no + // matter what: + // + // * Name - This will always be set to the same as the NodeName + // in this configuration. + // + // * Events - Serf uses a custom event delegate. + // + // * Delegate - Serf uses a custom delegate. + // + MemberlistConfig *memberlist.Config + + // LogOutput is the location to write logs to. If this is not set, + // logs will go to stderr. + LogOutput io.Writer + + // SnapshotPath if provided is used to snapshot live nodes as well + // as lamport clock values. When Serf is started with a snapshot, + // it will attempt to join all the previously known nodes until one + // succeeds and will also avoid replaying old user events. + SnapshotPath string + + // RejoinAfterLeave controls our interaction with the snapshot file. + // When set to false (default), a leave causes a Serf to not rejoin + // the cluster until an explicit join is received. If this is set to + // true, we ignore the leave, and rejoin the cluster on start. + RejoinAfterLeave bool + + // EnableNameConflictResolution controls if Serf will actively attempt + // to resolve a name conflict. Since each Serf member must have a unique + // name, a cluster can run into issues if multiple nodes claim the same + // name. Without automatic resolution, Serf merely logs some warnings, but + // otherwise does not take any action. Automatic resolution detects the + // conflict and issues a special query which asks the cluster for the + // Name -> IP:Port mapping. If there is a simple majority of votes, that + // node stays while the other node will leave the cluster and exit. + EnableNameConflictResolution bool + + // DisableCoordinates controls if Serf will maintain an estimate of this + // node's network coordinate internally. A network coordinate is useful + // for estimating the network distance (i.e. round trip time) between + // two nodes. Enabling this option adds some overhead to ping messages. + DisableCoordinates bool + + // KeyringFile provides the location of a writable file where Serf can + // persist changes to the encryption keyring. + KeyringFile string + + // Merge can be optionally provided to intercept a cluster merge + // and conditionally abort the merge. + Merge MergeDelegate +} + +// Init allocates the subdata structures +func (c *Config) Init() { + if c.Tags == nil { + c.Tags = make(map[string]string) + } +} + +// DefaultConfig returns a Config struct that contains reasonable defaults +// for most of the configurations. +func DefaultConfig() *Config { + hostname, err := os.Hostname() + if err != nil { + panic(err) + } + + return &Config{ + NodeName: hostname, + BroadcastTimeout: 5 * time.Second, + EventBuffer: 512, + QueryBuffer: 512, + LogOutput: os.Stderr, + ProtocolVersion: 4, + ReapInterval: 15 * time.Second, + RecentIntentTimeout: 5 * time.Minute, + ReconnectInterval: 30 * time.Second, + ReconnectTimeout: 24 * time.Hour, + QueueDepthWarning: 128, + MaxQueueDepth: 4096, + TombstoneTimeout: 24 * time.Hour, + FlapTimeout: 60 * time.Second, + MemberlistConfig: memberlist.DefaultLANConfig(), + QueryTimeoutMult: 16, + QueryResponseSizeLimit: 1024, + QuerySizeLimit: 1024, + EnableNameConflictResolution: true, + DisableCoordinates: false, + } +} diff --git a/vendor/github.com/hashicorp/serf/serf/conflict_delegate.go b/vendor/github.com/hashicorp/serf/serf/conflict_delegate.go new file mode 100644 index 0000000000..65a50156c0 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/serf/conflict_delegate.go @@ -0,0 +1,13 @@ +package serf + +import ( + "github.com/hashicorp/memberlist" +) + +type conflictDelegate struct { + serf *Serf +} + +func (c *conflictDelegate) NotifyConflict(existing, other *memberlist.Node) { + c.serf.handleNodeConflict(existing, other) +} diff --git a/vendor/github.com/hashicorp/serf/serf/delegate.go b/vendor/github.com/hashicorp/serf/serf/delegate.go new file mode 100644 index 0000000000..8f51cb7d08 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/serf/delegate.go @@ -0,0 +1,275 @@ +package serf + +import ( + "bytes" + "fmt" + + "github.com/armon/go-metrics" + "github.com/hashicorp/go-msgpack/codec" +) + +// delegate is the memberlist.Delegate implementation that Serf uses. +type delegate struct { + serf *Serf +} + +func (d *delegate) NodeMeta(limit int) []byte { + roleBytes := d.serf.encodeTags(d.serf.config.Tags) + if len(roleBytes) > limit { + panic(fmt.Errorf("Node tags '%v' exceeds length limit of %d bytes", d.serf.config.Tags, limit)) + } + + return roleBytes +} + +func (d *delegate) NotifyMsg(buf []byte) { + // If we didn't actually receive any data, then ignore it. + if len(buf) == 0 { + return + } + metrics.AddSample([]string{"serf", "msgs", "received"}, float32(len(buf))) + + rebroadcast := false + rebroadcastQueue := d.serf.broadcasts + t := messageType(buf[0]) + switch t { + case messageLeaveType: + var leave messageLeave + if err := decodeMessage(buf[1:], &leave); err != nil { + d.serf.logger.Printf("[ERR] serf: Error decoding leave message: %s", err) + break + } + + d.serf.logger.Printf("[DEBUG] serf: messageLeaveType: %s", leave.Node) + rebroadcast = d.serf.handleNodeLeaveIntent(&leave) + + case messageJoinType: + var join messageJoin + if err := decodeMessage(buf[1:], &join); err != nil { + d.serf.logger.Printf("[ERR] serf: Error decoding join message: %s", err) + break + } + + d.serf.logger.Printf("[DEBUG] serf: messageJoinType: %s", join.Node) + rebroadcast = d.serf.handleNodeJoinIntent(&join) + + case messageUserEventType: + var event messageUserEvent + if err := decodeMessage(buf[1:], &event); err != nil { + d.serf.logger.Printf("[ERR] serf: Error decoding user event message: %s", err) + break + } + + d.serf.logger.Printf("[DEBUG] serf: messageUserEventType: %s", event.Name) + rebroadcast = d.serf.handleUserEvent(&event) + rebroadcastQueue = d.serf.eventBroadcasts + + case messageQueryType: + var query messageQuery + if err := decodeMessage(buf[1:], &query); err != nil { + d.serf.logger.Printf("[ERR] serf: Error decoding query message: %s", err) + break + } + + d.serf.logger.Printf("[DEBUG] serf: messageQueryType: %s", query.Name) + rebroadcast = d.serf.handleQuery(&query) + rebroadcastQueue = d.serf.queryBroadcasts + + case messageQueryResponseType: + var resp messageQueryResponse + if err := decodeMessage(buf[1:], &resp); err != nil { + d.serf.logger.Printf("[ERR] serf: Error decoding query response message: %s", err) + break + } + + d.serf.logger.Printf("[DEBUG] serf: messageQueryResponseType: %v", resp.From) + d.serf.handleQueryResponse(&resp) + + case messageRelayType: + var header relayHeader + var handle codec.MsgpackHandle + reader := bytes.NewReader(buf[1:]) + decoder := codec.NewDecoder(reader, &handle) + if err := decoder.Decode(&header); err != nil { + d.serf.logger.Printf("[ERR] serf: Error decoding relay header: %s", err) + break + } + + // The remaining contents are the message itself, so forward that + raw := make([]byte, reader.Len()) + reader.Read(raw) + d.serf.logger.Printf("[DEBUG] serf: Relaying response to addr: %s", header.DestAddr.String()) + if err := d.serf.memberlist.SendTo(&header.DestAddr, raw); err != nil { + d.serf.logger.Printf("[ERR] serf: Error forwarding message to %s: %s", header.DestAddr.String(), err) + break + } + + default: + d.serf.logger.Printf("[WARN] serf: Received message of unknown type: %d", t) + } + + if rebroadcast { + // Copy the buffer since it we cannot rely on the slice not changing + newBuf := make([]byte, len(buf)) + copy(newBuf, buf) + + rebroadcastQueue.QueueBroadcast(&broadcast{ + msg: newBuf, + notify: nil, + }) + } +} + +func (d *delegate) GetBroadcasts(overhead, limit int) [][]byte { + msgs := d.serf.broadcasts.GetBroadcasts(overhead, limit) + + // Determine the bytes used already + bytesUsed := 0 + for _, msg := range msgs { + lm := len(msg) + bytesUsed += lm + overhead + metrics.AddSample([]string{"serf", "msgs", "sent"}, float32(lm)) + } + + // Get any additional query broadcasts + queryMsgs := d.serf.queryBroadcasts.GetBroadcasts(overhead, limit-bytesUsed) + if queryMsgs != nil { + for _, m := range queryMsgs { + lm := len(m) + bytesUsed += lm + overhead + metrics.AddSample([]string{"serf", "msgs", "sent"}, float32(lm)) + } + msgs = append(msgs, queryMsgs...) + } + + // Get any additional event broadcasts + eventMsgs := d.serf.eventBroadcasts.GetBroadcasts(overhead, limit-bytesUsed) + if eventMsgs != nil { + for _, m := range eventMsgs { + lm := len(m) + bytesUsed += lm + overhead + metrics.AddSample([]string{"serf", "msgs", "sent"}, float32(lm)) + } + msgs = append(msgs, eventMsgs...) + } + + return msgs +} + +func (d *delegate) LocalState(join bool) []byte { + d.serf.memberLock.RLock() + defer d.serf.memberLock.RUnlock() + d.serf.eventLock.RLock() + defer d.serf.eventLock.RUnlock() + + // Create the message to send + pp := messagePushPull{ + LTime: d.serf.clock.Time(), + StatusLTimes: make(map[string]LamportTime, len(d.serf.members)), + LeftMembers: make([]string, 0, len(d.serf.leftMembers)), + EventLTime: d.serf.eventClock.Time(), + Events: d.serf.eventBuffer, + QueryLTime: d.serf.queryClock.Time(), + } + + // Add all the join LTimes + for name, member := range d.serf.members { + pp.StatusLTimes[name] = member.statusLTime + } + + // Add all the left nodes + for _, member := range d.serf.leftMembers { + pp.LeftMembers = append(pp.LeftMembers, member.Name) + } + + // Encode the push pull state + buf, err := encodeMessage(messagePushPullType, &pp) + if err != nil { + d.serf.logger.Printf("[ERR] serf: Failed to encode local state: %v", err) + return nil + } + return buf +} + +func (d *delegate) MergeRemoteState(buf []byte, isJoin bool) { + // Ensure we have a message + if len(buf) == 0 { + d.serf.logger.Printf("[ERR] serf: Remote state is zero bytes") + return + } + + // Check the message type + if messageType(buf[0]) != messagePushPullType { + d.serf.logger.Printf("[ERR] serf: Remote state has bad type prefix: %v", buf[0]) + return + } + + // Attempt a decode + pp := messagePushPull{} + if err := decodeMessage(buf[1:], &pp); err != nil { + d.serf.logger.Printf("[ERR] serf: Failed to decode remote state: %v", err) + return + } + + // Witness the Lamport clocks first. + // We subtract 1 since no message with that clock has been sent yet + if pp.LTime > 0 { + d.serf.clock.Witness(pp.LTime - 1) + } + if pp.EventLTime > 0 { + d.serf.eventClock.Witness(pp.EventLTime - 1) + } + if pp.QueryLTime > 0 { + d.serf.queryClock.Witness(pp.QueryLTime - 1) + } + + // Process the left nodes first to avoid the LTimes from being increment + // in the wrong order + leftMap := make(map[string]struct{}, len(pp.LeftMembers)) + leave := messageLeave{} + for _, name := range pp.LeftMembers { + leftMap[name] = struct{}{} + leave.LTime = pp.StatusLTimes[name] + leave.Node = name + d.serf.handleNodeLeaveIntent(&leave) + } + + // Update any other LTimes + join := messageJoin{} + for name, statusLTime := range pp.StatusLTimes { + // Skip the left nodes + if _, ok := leftMap[name]; ok { + continue + } + + // Create an artificial join message + join.LTime = statusLTime + join.Node = name + d.serf.handleNodeJoinIntent(&join) + } + + // If we are doing a join, and eventJoinIgnore is set + // then we set the eventMinTime to the EventLTime. This + // prevents any of the incoming events from being processed + if isJoin && d.serf.eventJoinIgnore { + d.serf.eventLock.Lock() + if pp.EventLTime > d.serf.eventMinTime { + d.serf.eventMinTime = pp.EventLTime + } + d.serf.eventLock.Unlock() + } + + // Process all the events + userEvent := messageUserEvent{} + for _, events := range pp.Events { + if events == nil { + continue + } + userEvent.LTime = events.LTime + for _, e := range events.Events { + userEvent.Name = e.Name + userEvent.Payload = e.Payload + d.serf.handleUserEvent(&userEvent) + } + } +} diff --git a/vendor/github.com/hashicorp/serf/serf/event.go b/vendor/github.com/hashicorp/serf/serf/event.go new file mode 100644 index 0000000000..29211393f8 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/serf/event.go @@ -0,0 +1,174 @@ +package serf + +import ( + "fmt" + "net" + "sync" + "time" +) + +// EventType are all the types of events that may occur and be sent +// along the Serf channel. +type EventType int + +const ( + EventMemberJoin EventType = iota + EventMemberLeave + EventMemberFailed + EventMemberUpdate + EventMemberReap + EventUser + EventQuery +) + +func (t EventType) String() string { + switch t { + case EventMemberJoin: + return "member-join" + case EventMemberLeave: + return "member-leave" + case EventMemberFailed: + return "member-failed" + case EventMemberUpdate: + return "member-update" + case EventMemberReap: + return "member-reap" + case EventUser: + return "user" + case EventQuery: + return "query" + default: + panic(fmt.Sprintf("unknown event type: %d", t)) + } +} + +// Event is a generic interface for exposing Serf events +// Clients will usually need to use a type switches to get +// to a more useful type +type Event interface { + EventType() EventType + String() string +} + +// MemberEvent is the struct used for member related events +// Because Serf coalesces events, an event may contain multiple members. +type MemberEvent struct { + Type EventType + Members []Member +} + +func (m MemberEvent) EventType() EventType { + return m.Type +} + +func (m MemberEvent) String() string { + switch m.Type { + case EventMemberJoin: + return "member-join" + case EventMemberLeave: + return "member-leave" + case EventMemberFailed: + return "member-failed" + case EventMemberUpdate: + return "member-update" + case EventMemberReap: + return "member-reap" + default: + panic(fmt.Sprintf("unknown event type: %d", m.Type)) + } +} + +// UserEvent is the struct used for events that are triggered +// by the user and are not related to members +type UserEvent struct { + LTime LamportTime + Name string + Payload []byte + Coalesce bool +} + +func (u UserEvent) EventType() EventType { + return EventUser +} + +func (u UserEvent) String() string { + return fmt.Sprintf("user-event: %s", u.Name) +} + +// Query is the struct used by EventQuery type events +type Query struct { + LTime LamportTime + Name string + Payload []byte + + serf *Serf + id uint32 // ID is not exported, since it may change + addr []byte // Address to respond to + port uint16 // Port to respond to + deadline time.Time // Must respond by this deadline + relayFactor uint8 // Number of duplicate responses to relay back to sender + respLock sync.Mutex +} + +func (q *Query) EventType() EventType { + return EventQuery +} + +func (q *Query) String() string { + return fmt.Sprintf("query: %s", q.Name) +} + +// Deadline returns the time by which a response must be sent +func (q *Query) Deadline() time.Time { + return q.deadline +} + +// Respond is used to send a response to the user query +func (q *Query) Respond(buf []byte) error { + q.respLock.Lock() + defer q.respLock.Unlock() + + // Check if we've already responded + if q.deadline.IsZero() { + return fmt.Errorf("response already sent") + } + + // Ensure we aren't past our response deadline + if time.Now().After(q.deadline) { + return fmt.Errorf("response is past the deadline") + } + + // Create response + resp := messageQueryResponse{ + LTime: q.LTime, + ID: q.id, + From: q.serf.config.NodeName, + Payload: buf, + } + + // Send a direct response + raw, err := encodeMessage(messageQueryResponseType, &resp) + if err != nil { + return fmt.Errorf("failed to format response: %v", err) + } + + // Check the size limit + if len(raw) > q.serf.config.QueryResponseSizeLimit { + return fmt.Errorf("response exceeds limit of %d bytes", q.serf.config.QueryResponseSizeLimit) + } + + // Send the response directly to the originator + addr := net.UDPAddr{IP: q.addr, Port: int(q.port)} + if err := q.serf.memberlist.SendTo(&addr, raw); err != nil { + return err + } + + // Relay the response through up to relayFactor other nodes + if err := q.serf.relayResponse(q.relayFactor, addr, &resp); err != nil { + return err + } + + // Clear the deadline, responses sent + q.deadline = time.Time{} + return nil +} diff --git a/vendor/github.com/hashicorp/serf/serf/event_delegate.go b/vendor/github.com/hashicorp/serf/serf/event_delegate.go new file mode 100644 index 0000000000..e201322819 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/serf/event_delegate.go @@ -0,0 +1,21 @@ +package serf + +import ( + "github.com/hashicorp/memberlist" +) + +type eventDelegate struct { + serf *Serf +} + +func (e *eventDelegate) NotifyJoin(n *memberlist.Node) { + e.serf.handleNodeJoin(n) +} + +func (e *eventDelegate) NotifyLeave(n *memberlist.Node) { + e.serf.handleNodeLeave(n) +} + +func (e *eventDelegate) NotifyUpdate(n *memberlist.Node) { + e.serf.handleNodeUpdate(n) +} diff --git a/vendor/github.com/hashicorp/serf/serf/internal_query.go b/vendor/github.com/hashicorp/serf/serf/internal_query.go new file mode 100644 index 0000000000..128b2cf214 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/serf/internal_query.go @@ -0,0 +1,312 @@ +package serf + +import ( + "encoding/base64" + "log" + "strings" +) + +const ( + // This is the prefix we use for queries that are internal to Serf. + // They are handled internally, and not forwarded to a client. + InternalQueryPrefix = "_serf_" + + // pingQuery is run to check for reachability + pingQuery = "ping" + + // conflictQuery is run to resolve a name conflict + conflictQuery = "conflict" + + // installKeyQuery is used to install a new key + installKeyQuery = "install-key" + + // useKeyQuery is used to change the primary encryption key + useKeyQuery = "use-key" + + // removeKeyQuery is used to remove a key from the keyring + removeKeyQuery = "remove-key" + + // listKeysQuery is used to list all known keys in the cluster + listKeysQuery = "list-keys" +) + +// internalQueryName is used to generate a query name for an internal query +func internalQueryName(name string) string { + return InternalQueryPrefix + name +} + +// serfQueries is used to listen for queries that start with +// _serf and respond to them as appropriate. +type serfQueries struct { + inCh chan Event + logger *log.Logger + outCh chan<- Event + serf *Serf + shutdownCh <-chan struct{} +} + +// nodeKeyResponse is used to store the result from an individual node while +// replying to key modification queries +type nodeKeyResponse struct { + // Result indicates true/false if there were errors or not + Result bool + + // Message contains error messages or other information + Message string + + // Keys is used in listing queries to relay a list of installed keys + Keys []string +} + +// newSerfQueries is used to create a new serfQueries. We return an event +// channel that is ingested and forwarded to an outCh. Any Queries that +// have the InternalQueryPrefix are handled instead of forwarded. +func newSerfQueries(serf *Serf, logger *log.Logger, outCh chan<- Event, shutdownCh <-chan struct{}) (chan<- Event, error) { + inCh := make(chan Event, 1024) + q := &serfQueries{ + inCh: inCh, + logger: logger, + outCh: outCh, + serf: serf, + shutdownCh: shutdownCh, + } + go q.stream() + return inCh, nil +} + +// stream is a long running routine to ingest the event stream +func (s *serfQueries) stream() { + for { + select { + case e := <-s.inCh: + // Check if this is a query we should process + if q, ok := e.(*Query); ok && strings.HasPrefix(q.Name, InternalQueryPrefix) { + go s.handleQuery(q) + + } else if s.outCh != nil { + s.outCh <- e + } + + case <-s.shutdownCh: + return + } + } +} + +// handleQuery is invoked when we get an internal query +func (s *serfQueries) handleQuery(q *Query) { + // Get the queryName after the initial prefix + queryName := q.Name[len(InternalQueryPrefix):] + switch queryName { + case pingQuery: + // Nothing to do, we will ack the query + case conflictQuery: + s.handleConflict(q) + case installKeyQuery: + s.handleInstallKey(q) + case useKeyQuery: + s.handleUseKey(q) + case removeKeyQuery: + s.handleRemoveKey(q) + case listKeysQuery: + s.handleListKeys(q) + default: + s.logger.Printf("[WARN] serf: Unhandled internal query '%s'", queryName) + } +} + +// handleConflict is invoked when we get a query that is attempting to +// disambiguate a name conflict. They payload is a node name, and the response +// should the address we believe that node is at, if any. +func (s *serfQueries) handleConflict(q *Query) { + // The target node name is the payload + node := string(q.Payload) + + // Do not respond to the query if it is about us + if node == s.serf.config.NodeName { + return + } + s.logger.Printf("[DEBUG] serf: Got conflict resolution query for '%s'", node) + + // Look for the member info + var out *Member + s.serf.memberLock.Lock() + if member, ok := s.serf.members[node]; ok { + out = &member.Member + } + s.serf.memberLock.Unlock() + + // Encode the response + buf, err := encodeMessage(messageConflictResponseType, out) + if err != nil { + s.logger.Printf("[ERR] serf: Failed to encode conflict query response: %v", err) + return + } + + // Send our answer + if err := q.Respond(buf); err != nil { + s.logger.Printf("[ERR] serf: Failed to respond to conflict query: %v", err) + } +} + +// sendKeyResponse handles responding to key-related queries. +func (s *serfQueries) sendKeyResponse(q *Query, resp *nodeKeyResponse) { + buf, err := encodeMessage(messageKeyResponseType, resp) + if err != nil { + s.logger.Printf("[ERR] serf: Failed to encode key response: %v", err) + return + } + + if err := q.Respond(buf); err != nil { + s.logger.Printf("[ERR] serf: Failed to respond to key query: %v", err) + return + } +} + +// handleInstallKey is invoked whenever a new encryption key is received from +// another member in the cluster, and handles the process of installing it onto +// the memberlist keyring. This type of query may fail if the provided key does +// not fit the constraints that memberlist enforces. If the query fails, the +// response will contain the error message so that it may be relayed. +func (s *serfQueries) handleInstallKey(q *Query) { + response := nodeKeyResponse{Result: false} + keyring := s.serf.config.MemberlistConfig.Keyring + req := keyRequest{} + + err := decodeMessage(q.Payload[1:], &req) + if err != nil { + s.logger.Printf("[ERR] serf: Failed to decode key request: %v", err) + goto SEND + } + + if !s.serf.EncryptionEnabled() { + response.Message = "No keyring to modify (encryption not enabled)" + s.logger.Printf("[ERR] serf: No keyring to modify (encryption not enabled)") + goto SEND + } + + s.logger.Printf("[INFO] serf: Received install-key query") + if err := keyring.AddKey(req.Key); err != nil { + response.Message = err.Error() + s.logger.Printf("[ERR] serf: Failed to install key: %s", err) + goto SEND + } + + if err := s.serf.writeKeyringFile(); err != nil { + response.Message = err.Error() + s.logger.Printf("[ERR] serf: Failed to write keyring file: %s", err) + goto SEND + } + + response.Result = true + +SEND: + s.sendKeyResponse(q, &response) +} + +// handleUseKey is invoked whenever a query is received to mark a different key +// in the internal keyring as the primary key. This type of query may fail due +// to operator error (requested key not in ring), and thus sends error messages +// back in the response. +func (s *serfQueries) handleUseKey(q *Query) { + response := nodeKeyResponse{Result: false} + keyring := s.serf.config.MemberlistConfig.Keyring + req := keyRequest{} + + err := decodeMessage(q.Payload[1:], &req) + if err != nil { + s.logger.Printf("[ERR] serf: Failed to decode key request: %v", err) + goto SEND + } + + if !s.serf.EncryptionEnabled() { + response.Message = "No keyring to modify (encryption not enabled)" + s.logger.Printf("[ERR] serf: No keyring to modify (encryption not enabled)") + goto SEND + } + + s.logger.Printf("[INFO] serf: Received use-key query") + if err := keyring.UseKey(req.Key); err != nil { + response.Message = err.Error() + s.logger.Printf("[ERR] serf: Failed to change primary key: %s", err) + goto SEND + } + + if err := s.serf.writeKeyringFile(); err != nil { + response.Message = err.Error() + s.logger.Printf("[ERR] serf: Failed to write keyring file: %s", err) + goto SEND + } + + response.Result = true + +SEND: + s.sendKeyResponse(q, &response) +} + +// handleRemoveKey is invoked when a query is received to remove a particular +// key from the keyring. This type of query can fail if the key requested for +// deletion is currently the primary key in the keyring, so therefore it will +// reply to the query with any relevant errors from the operation. +func (s *serfQueries) handleRemoveKey(q *Query) { + response := nodeKeyResponse{Result: false} + keyring := s.serf.config.MemberlistConfig.Keyring + req := keyRequest{} + + err := decodeMessage(q.Payload[1:], &req) + if err != nil { + s.logger.Printf("[ERR] serf: Failed to decode key request: %v", err) + goto SEND + } + + if !s.serf.EncryptionEnabled() { + response.Message = "No keyring to modify (encryption not enabled)" + s.logger.Printf("[ERR] serf: No keyring to modify (encryption not enabled)") + goto SEND + } + + s.logger.Printf("[INFO] serf: Received remove-key query") + if err := keyring.RemoveKey(req.Key); err != nil { + response.Message = err.Error() + s.logger.Printf("[ERR] serf: Failed to remove key: %s", err) + goto SEND + } + + if err := s.serf.writeKeyringFile(); err != nil { + response.Message = err.Error() + s.logger.Printf("[ERR] serf: Failed to write keyring file: %s", err) + goto SEND + } + + response.Result = true + +SEND: + s.sendKeyResponse(q, &response) +} + +// handleListKeys is invoked when a query is received to return a list of all +// installed keys the Serf instance knows of. For performance, the keys are +// encoded to base64 on each of the members to remove this burden from the +// node asking for the results. +func (s *serfQueries) handleListKeys(q *Query) { + response := nodeKeyResponse{Result: false} + keyring := s.serf.config.MemberlistConfig.Keyring + + if !s.serf.EncryptionEnabled() { + response.Message = "Keyring is empty (encryption not enabled)" + s.logger.Printf("[ERR] serf: Keyring is empty (encryption not enabled)") + goto SEND + } + + s.logger.Printf("[INFO] serf: Received list-keys query") + for _, keyBytes := range keyring.GetKeys() { + // Encode the keys before sending the response. This should help take + // some the burden of doing this off of the asking member. + key := base64.StdEncoding.EncodeToString(keyBytes) + response.Keys = append(response.Keys, key) + } + response.Result = true + +SEND: + s.sendKeyResponse(q, &response) +} diff --git a/vendor/github.com/hashicorp/serf/serf/keymanager.go b/vendor/github.com/hashicorp/serf/serf/keymanager.go new file mode 100644 index 0000000000..fd53182fc5 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/serf/keymanager.go @@ -0,0 +1,192 @@ +package serf + +import ( + "encoding/base64" + "fmt" + "sync" +) + +// KeyManager encapsulates all functionality within Serf for handling +// encryption keyring changes across a cluster. +type KeyManager struct { + serf *Serf + + // Lock to protect read and write operations + l sync.RWMutex +} + +// keyRequest is used to contain input parameters which get broadcasted to all +// nodes as part of a key query operation. +type keyRequest struct { + Key []byte +} + +// KeyResponse is used to relay a query for a list of all keys in use. +type KeyResponse struct { + Messages map[string]string // Map of node name to response message + NumNodes int // Total nodes memberlist knows of + NumResp int // Total responses received + NumErr int // Total errors from request + + // Keys is a mapping of the base64-encoded value of the key bytes to the + // number of nodes that have the key installed. + Keys map[string]int +} + +// KeyRequestOptions is used to contain optional parameters for a keyring operation +type KeyRequestOptions struct { + // RelayFactor is the number of duplicate query responses to send by relaying through + // other nodes, for redundancy + RelayFactor uint8 +} + +// streamKeyResp takes care of reading responses from a channel and composing +// them into a KeyResponse. It will update a KeyResponse *in place* and +// therefore has nothing to return. +func (k *KeyManager) streamKeyResp(resp *KeyResponse, ch <-chan NodeResponse) { + for r := range ch { + var nodeResponse nodeKeyResponse + + resp.NumResp++ + + // Decode the response + if len(r.Payload) < 1 || messageType(r.Payload[0]) != messageKeyResponseType { + resp.Messages[r.From] = fmt.Sprintf( + "Invalid key query response type: %v", r.Payload) + resp.NumErr++ + goto NEXT + } + if err := decodeMessage(r.Payload[1:], &nodeResponse); err != nil { + resp.Messages[r.From] = fmt.Sprintf( + "Failed to decode key query response: %v", r.Payload) + resp.NumErr++ + goto NEXT + } + + if !nodeResponse.Result { + resp.Messages[r.From] = nodeResponse.Message + resp.NumErr++ + } + + // Currently only used for key list queries, this adds keys to a counter + // and increments them for each node response which contains them. + for _, key := range nodeResponse.Keys { + if _, ok := resp.Keys[key]; !ok { + resp.Keys[key] = 1 + } else { + resp.Keys[key]++ + } + } + + NEXT: + // Return early if all nodes have responded. This allows us to avoid + // waiting for the full timeout when there is nothing left to do. + if resp.NumResp == resp.NumNodes { + return + } + } +} + +// handleKeyRequest performs query broadcasting to all members for any type of +// key operation and manages gathering responses and packing them up into a +// KeyResponse for uniform response handling. +func (k *KeyManager) handleKeyRequest(key, query string, opts *KeyRequestOptions) (*KeyResponse, error) { + resp := &KeyResponse{ + Messages: make(map[string]string), + Keys: make(map[string]int), + } + qName := internalQueryName(query) + + // Decode the new key into raw bytes + rawKey, err := base64.StdEncoding.DecodeString(key) + if err != nil { + return resp, err + } + + // Encode the query request + req, err := encodeMessage(messageKeyRequestType, keyRequest{Key: rawKey}) + if err != nil { + return resp, err + } + + qParam := k.serf.DefaultQueryParams() + if opts != nil { + qParam.RelayFactor = opts.RelayFactor + } + queryResp, err := k.serf.Query(qName, req, qParam) + if err != nil { + return resp, err + } + + // Handle the response stream and populate the KeyResponse + resp.NumNodes = k.serf.memberlist.NumMembers() + k.streamKeyResp(resp, queryResp.respCh) + + // Check the response for any reported failure conditions + if resp.NumErr != 0 { + return resp, fmt.Errorf("%d/%d nodes reported failure", resp.NumErr, resp.NumNodes) + } + if resp.NumResp != resp.NumNodes { + return resp, fmt.Errorf("%d/%d nodes reported success", resp.NumResp, resp.NumNodes) + } + + return resp, nil +} + +// InstallKey handles broadcasting a query to all members and gathering +// responses from each of them, returning a list of messages from each node +// and any applicable error conditions. +func (k *KeyManager) InstallKey(key string) (*KeyResponse, error) { + return k.InstallKeyWithOptions(key, nil) +} + +func (k *KeyManager) InstallKeyWithOptions(key string, opts *KeyRequestOptions) (*KeyResponse, error) { + k.l.Lock() + defer k.l.Unlock() + + return k.handleKeyRequest(key, installKeyQuery, opts) +} + +// UseKey handles broadcasting a primary key change to all members in the +// cluster, and gathering any response messages. If successful, there should +// be an empty KeyResponse returned. +func (k *KeyManager) UseKey(key string) (*KeyResponse, error) { + return k.UseKeyWithOptions(key, nil) +} + +func (k *KeyManager) UseKeyWithOptions(key string, opts *KeyRequestOptions) (*KeyResponse, error) { + k.l.Lock() + defer k.l.Unlock() + + return k.handleKeyRequest(key, useKeyQuery, opts) +} + +// RemoveKey handles broadcasting a key to the cluster for removal. Each member +// will receive this event, and if they have the key in their keyring, remove +// it. If any errors are encountered, RemoveKey will collect and relay them. +func (k *KeyManager) RemoveKey(key string) (*KeyResponse, error) { + return k.RemoveKeyWithOptions(key, nil) +} + +func (k *KeyManager) RemoveKeyWithOptions(key string, opts *KeyRequestOptions) (*KeyResponse, error) { + k.l.Lock() + defer k.l.Unlock() + + return k.handleKeyRequest(key, removeKeyQuery, opts) +} + +// ListKeys is used to collect installed keys from members in a Serf cluster +// and return an aggregated list of all installed keys. This is useful to +// operators to ensure that there are no lingering keys installed on any agents. +// Since having multiple keys installed can cause performance penalties in some +// cases, it's important to verify this information and remove unneeded keys. +func (k *KeyManager) ListKeys() (*KeyResponse, error) { + return k.ListKeysWithOptions(nil) +} + +func (k *KeyManager) ListKeysWithOptions(opts *KeyRequestOptions) (*KeyResponse, error) { + k.l.RLock() + defer k.l.RUnlock() + + return k.handleKeyRequest("", listKeysQuery, opts) +} \ No newline at end of file diff --git a/vendor/github.com/hashicorp/serf/serf/lamport.go b/vendor/github.com/hashicorp/serf/serf/lamport.go new file mode 100644 index 0000000000..08f4aa7a62 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/serf/lamport.go @@ -0,0 +1,45 @@ +package serf + +import ( + "sync/atomic" +) + +// LamportClock is a thread safe implementation of a lamport clock. It +// uses efficient atomic operations for all of its functions, falling back +// to a heavy lock only if there are enough CAS failures. +type LamportClock struct { + counter uint64 +} + +// LamportTime is the value of a LamportClock. +type LamportTime uint64 + +// Time is used to return the current value of the lamport clock +func (l *LamportClock) Time() LamportTime { + return LamportTime(atomic.LoadUint64(&l.counter)) +} + +// Increment is used to increment and return the value of the lamport clock +func (l *LamportClock) Increment() LamportTime { + return LamportTime(atomic.AddUint64(&l.counter, 1)) +} + +// Witness is called to update our local clock if necessary after +// witnessing a clock value received from another process +func (l *LamportClock) Witness(v LamportTime) { +WITNESS: + // If the other value is old, we do not need to do anything + cur := atomic.LoadUint64(&l.counter) + other := uint64(v) + if other < cur { + return + } + + // Ensure that our local clock is at least one ahead. + if !atomic.CompareAndSwapUint64(&l.counter, cur, other+1) { + // The CAS failed, so we just retry. Eventually our CAS should + // succeed or a future witness will pass us by and our witness + // will end. + goto WITNESS + } +} diff --git a/vendor/github.com/hashicorp/serf/serf/merge_delegate.go b/vendor/github.com/hashicorp/serf/serf/merge_delegate.go new file mode 100644 index 0000000000..7fdc732887 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/serf/merge_delegate.go @@ -0,0 +1,44 @@ +package serf + +import ( + "net" + + "github.com/hashicorp/memberlist" +) + +type MergeDelegate interface { + NotifyMerge([]*Member) error +} + +type mergeDelegate struct { + serf *Serf +} + +func (m *mergeDelegate) NotifyMerge(nodes []*memberlist.Node) error { + members := make([]*Member, len(nodes)) + for idx, n := range nodes { + members[idx] = m.nodeToMember(n) + } + return m.serf.config.Merge.NotifyMerge(members) +} + +func (m *mergeDelegate) NotifyAlive(peer *memberlist.Node) error { + member := m.nodeToMember(peer) + return m.serf.config.Merge.NotifyMerge([]*Member{member}) +} + +func (m *mergeDelegate) nodeToMember(n *memberlist.Node) *Member { + return &Member{ + Name: n.Name, + Addr: net.IP(n.Addr), + Port: n.Port, + Tags: m.serf.decodeTags(n.Meta), + Status: StatusNone, + ProtocolMin: n.PMin, + ProtocolMax: n.PMax, + ProtocolCur: n.PCur, + DelegateMin: n.DMin, + DelegateMax: n.DMax, + DelegateCur: n.DCur, + } +} diff --git a/vendor/github.com/hashicorp/serf/serf/messages.go b/vendor/github.com/hashicorp/serf/serf/messages.go new file mode 100644 index 0000000000..20df5b8e83 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/serf/messages.go @@ -0,0 +1,173 @@ +package serf + +import ( + "bytes" + "net" + "time" + + "github.com/hashicorp/go-msgpack/codec" +) + +// messageType are the types of gossip messages Serf will send along +// memberlist. +type messageType uint8 + +const ( + messageLeaveType messageType = iota + messageJoinType + messagePushPullType + messageUserEventType + messageQueryType + messageQueryResponseType + messageConflictResponseType + messageKeyRequestType + messageKeyResponseType + messageRelayType +) + +const ( + // Ack flag is used to force receiver to send an ack back + queryFlagAck uint32 = 1 << iota + + // NoBroadcast is used to prevent re-broadcast of a query. + // this can be used to selectively send queries to individual members + queryFlagNoBroadcast +) + +// filterType is used with a queryFilter to specify the type of +// filter we are sending +type filterType uint8 + +const ( + filterNodeType filterType = iota + filterTagType +) + +// messageJoin is the message broadcasted after we join to +// associated the node with a lamport clock +type messageJoin struct { + LTime LamportTime + Node string +} + +// messageLeave is the message broadcasted to signal the intentional to +// leave. +type messageLeave struct { + LTime LamportTime + Node string +} + +// messagePushPullType is used when doing a state exchange. This +// is a relatively large message, but is sent infrequently +type messagePushPull struct { + LTime LamportTime // Current node lamport time + StatusLTimes map[string]LamportTime // Maps the node to its status time + LeftMembers []string // List of left nodes + EventLTime LamportTime // Lamport time for event clock + Events []*userEvents // Recent events + QueryLTime LamportTime // Lamport time for query clock +} + +// messageUserEvent is used for user-generated events +type messageUserEvent struct { + LTime LamportTime + Name string + Payload []byte + CC bool // "Can Coalesce". Zero value is compatible with Serf 0.1 +} + +// messageQuery is used for query events +type messageQuery struct { + LTime LamportTime // Event lamport time + ID uint32 // Query ID, randomly generated + Addr []byte // Source address, used for a direct reply + Port uint16 // Source port, used for a direct reply + Filters [][]byte // Potential query filters + Flags uint32 // Used to provide various flags + RelayFactor uint8 // Used to set the number of duplicate relayed responses + Timeout time.Duration // Maximum time between delivery and response + Name string // Query name + Payload []byte // Query payload +} + +// Ack checks if the ack flag is set +func (m *messageQuery) Ack() bool { + return (m.Flags & queryFlagAck) != 0 +} + +// NoBroadcast checks if the no broadcast flag is set +func (m *messageQuery) NoBroadcast() bool { + return (m.Flags & queryFlagNoBroadcast) != 0 +} + +// filterNode is used with the filterNodeType, and is a list +// of node names +type filterNode []string + +// filterTag is used with the filterTagType and is a regular +// expression to apply to a tag +type filterTag struct { + Tag string + Expr string +} + +// messageQueryResponse is used to respond to a query +type messageQueryResponse struct { + LTime LamportTime // Event lamport time + ID uint32 // Query ID + From string // Node name + Flags uint32 // Used to provide various flags + Payload []byte // Optional response payload +} + +// Ack checks if the ack flag is set +func (m *messageQueryResponse) Ack() bool { + return (m.Flags & queryFlagAck) != 0 +} + +func decodeMessage(buf []byte, out interface{}) error { + var handle codec.MsgpackHandle + return codec.NewDecoder(bytes.NewReader(buf), &handle).Decode(out) +} + +func encodeMessage(t messageType, msg interface{}) ([]byte, error) { + buf := bytes.NewBuffer(nil) + buf.WriteByte(uint8(t)) + + handle := codec.MsgpackHandle{} + encoder := codec.NewEncoder(buf, &handle) + err := encoder.Encode(msg) + return buf.Bytes(), err +} + +// relayHeader is used to store the end destination of a relayed message +type relayHeader struct { + DestAddr net.UDPAddr +} + +// encodeRelayMessage wraps a message in the messageRelayType, adding the length and +// address of the end recipient to the front of the message +func encodeRelayMessage(t messageType, addr net.UDPAddr, msg interface{}) ([]byte, error) { + buf := bytes.NewBuffer(nil) + handle := codec.MsgpackHandle{} + encoder := codec.NewEncoder(buf, &handle) + + buf.WriteByte(uint8(messageRelayType)) + if err := encoder.Encode(relayHeader{DestAddr: addr}); err != nil { + return nil, err + } + + buf.WriteByte(uint8(t)) + err := encoder.Encode(msg) + return buf.Bytes(), err +} + +func encodeFilter(f filterType, filt interface{}) ([]byte, error) { + buf := bytes.NewBuffer(nil) + buf.WriteByte(uint8(f)) + + handle := codec.MsgpackHandle{} + encoder := codec.NewEncoder(buf, &handle) + err := encoder.Encode(filt) + return buf.Bytes(), err +} diff --git a/vendor/github.com/hashicorp/serf/serf/ping_delegate.go b/vendor/github.com/hashicorp/serf/serf/ping_delegate.go new file mode 100644 index 0000000000..a482685a20 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/serf/ping_delegate.go @@ -0,0 +1,89 @@ +package serf + +import ( + "bytes" + "log" + "time" + + "github.com/armon/go-metrics" + "github.com/hashicorp/go-msgpack/codec" + "github.com/hashicorp/memberlist" + "github.com/hashicorp/serf/coordinate" +) + +// pingDelegate is notified when memberlist successfully completes a direct ping +// of a peer node. We use this to update our estimated network coordinate, as +// well as cache the coordinate of the peer. +type pingDelegate struct { + serf *Serf +} + +const ( + // PingVersion is an internal version for the ping message, above the normal + // versioning we get from the protocol version. This enables small updates + // to the ping message without a full protocol bump. + PingVersion = 1 +) + +// AckPayload is called to produce a payload to send back in response to a ping +// request. +func (p *pingDelegate) AckPayload() []byte { + var buf bytes.Buffer + + // The first byte is the version number, forming a simple header. + version := []byte{PingVersion} + buf.Write(version) + + // The rest of the message is the serialized coordinate. + enc := codec.NewEncoder(&buf, &codec.MsgpackHandle{}) + if err := enc.Encode(p.serf.coordClient.GetCoordinate()); err != nil { + log.Printf("[ERR] serf: Failed to encode coordinate: %v\n", err) + } + return buf.Bytes() +} + +// NotifyPingComplete is called when this node successfully completes a direct ping +// of a peer node. +func (p *pingDelegate) NotifyPingComplete(other *memberlist.Node, rtt time.Duration, payload []byte) { + if payload == nil || len(payload) == 0 { + return + } + + // Verify ping version in the header. + version := payload[0] + if version != PingVersion { + log.Printf("[ERR] serf: Unsupported ping version: %v", version) + return + } + + // Process the remainder of the message as a coordinate. + r := bytes.NewReader(payload[1:]) + dec := codec.NewDecoder(r, &codec.MsgpackHandle{}) + var coord coordinate.Coordinate + if err := dec.Decode(&coord); err != nil { + log.Printf("[ERR] serf: Failed to decode coordinate from ping: %v", err) + } + + // Apply the update. Since this is a coordinate coming from some place + // else we harden this and look for dimensionality problems proactively. + before := p.serf.coordClient.GetCoordinate() + if before.IsCompatibleWith(&coord) { + after := p.serf.coordClient.Update(other.Name, &coord, rtt) + + // Publish some metrics to give us an idea of how much we are + // adjusting each time we update. + d := float32(before.DistanceTo(after).Seconds() * 1.0e3) + metrics.AddSample([]string{"serf", "coordinate", "adjustment-ms"}, d) + + // Cache the coordinate for the other node, and add our own + // to the cache as well since it just got updated. This lets + // users call GetCachedCoordinate with our node name, which is + // more friendly. + p.serf.coordCacheLock.Lock() + p.serf.coordCache[other.Name] = &coord + p.serf.coordCache[p.serf.config.NodeName] = p.serf.coordClient.GetCoordinate() + p.serf.coordCacheLock.Unlock() + } else { + log.Printf("[ERR] serf: Rejected bad coordinate: %v\n", coord) + } +} diff --git a/vendor/github.com/hashicorp/serf/serf/query.go b/vendor/github.com/hashicorp/serf/serf/query.go new file mode 100644 index 0000000000..5412821e30 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/serf/query.go @@ -0,0 +1,294 @@ +package serf + +import ( + "fmt" + "math" + "math/rand" + "net" + "regexp" + "sync" + "time" +) + +// QueryParam is provided to Query() to configure the parameters of the +// query. If not provided, sane defaults will be used. +type QueryParam struct { + // If provided, we restrict the nodes that should respond to those + // with names in this list + FilterNodes []string + + // FilterTags maps a tag name to a regular expression that is applied + // to restrict the nodes that should respond + FilterTags map[string]string + + // If true, we are requesting an delivery acknowledgement from + // every node that meets the filter requirement. This means nodes + // the receive the message but do not pass the filters, will not + // send an ack. + RequestAck bool + + // RelayFactor controls the number of duplicate responses to relay + // back to the sender through other nodes for redundancy. + RelayFactor uint8 + + // The timeout limits how long the query is left open. If not provided, + // then a default timeout is used based on the configuration of Serf + Timeout time.Duration +} + +// DefaultQueryTimeout returns the default timeout value for a query +// Computed as GossipInterval * QueryTimeoutMult * log(N+1) +func (s *Serf) DefaultQueryTimeout() time.Duration { + n := s.memberlist.NumMembers() + timeout := s.config.MemberlistConfig.GossipInterval + timeout *= time.Duration(s.config.QueryTimeoutMult) + timeout *= time.Duration(math.Ceil(math.Log10(float64(n + 1)))) + return timeout +} + +// DefaultQueryParam is used to return the default query parameters +func (s *Serf) DefaultQueryParams() *QueryParam { + return &QueryParam{ + FilterNodes: nil, + FilterTags: nil, + RequestAck: false, + Timeout: s.DefaultQueryTimeout(), + } +} + +// encodeFilters is used to convert the filters into the wire format +func (q *QueryParam) encodeFilters() ([][]byte, error) { + var filters [][]byte + + // Add the node filter + if len(q.FilterNodes) > 0 { + if buf, err := encodeFilter(filterNodeType, q.FilterNodes); err != nil { + return nil, err + } else { + filters = append(filters, buf) + } + } + + // Add the tag filters + for tag, expr := range q.FilterTags { + filt := filterTag{tag, expr} + if buf, err := encodeFilter(filterTagType, &filt); err != nil { + return nil, err + } else { + filters = append(filters, buf) + } + } + + return filters, nil +} + +// QueryResponse is returned for each new Query. It is used to collect +// Ack's as well as responses and to provide those back to a client. +type QueryResponse struct { + // ackCh is used to send the name of a node for which we've received an ack + ackCh chan string + + // deadline is the query end time (start + query timeout) + deadline time.Time + + // Query ID + id uint32 + + // Stores the LTime of the query + lTime LamportTime + + // respCh is used to send a response from a node + respCh chan NodeResponse + + // acks/responses are used to track the nodes that have sent an ack/response + acks map[string]struct{} + responses map[string]struct{} + + closed bool + closeLock sync.Mutex +} + +// newQueryResponse is used to construct a new query response +func newQueryResponse(n int, q *messageQuery) *QueryResponse { + resp := &QueryResponse{ + deadline: time.Now().Add(q.Timeout), + id: q.ID, + lTime: q.LTime, + respCh: make(chan NodeResponse, n), + responses: make(map[string]struct{}), + } + if q.Ack() { + resp.ackCh = make(chan string, n) + resp.acks = make(map[string]struct{}) + } + return resp +} + +// Close is used to close the query, which will close the underlying +// channels and prevent further deliveries +func (r *QueryResponse) Close() { + r.closeLock.Lock() + defer r.closeLock.Unlock() + if r.closed { + return + } + r.closed = true + if r.ackCh != nil { + close(r.ackCh) + } + if r.respCh != nil { + close(r.respCh) + } +} + +// Deadline returns the ending deadline of the query +func (r *QueryResponse) Deadline() time.Time { + return r.deadline +} + +// Finished returns if the query is finished running +func (r *QueryResponse) Finished() bool { + return r.closed || time.Now().After(r.deadline) +} + +// AckCh returns a channel that can be used to listen for acks +// Channel will be closed when the query is finished. This is nil, +// if the query did not specify RequestAck. +func (r *QueryResponse) AckCh() <-chan string { + return r.ackCh +} + +// ResponseCh returns a channel that can be used to listen for responses. +// Channel will be closed when the query is finished. +func (r *QueryResponse) ResponseCh() <-chan NodeResponse { + return r.respCh +} + +// NodeResponse is used to represent a single response from a node +type NodeResponse struct { + From string + Payload []byte +} + +// shouldProcessQuery checks if a query should be proceeded given +// a set of filers. +func (s *Serf) shouldProcessQuery(filters [][]byte) bool { + for _, filter := range filters { + switch filterType(filter[0]) { + case filterNodeType: + // Decode the filter + var nodes filterNode + if err := decodeMessage(filter[1:], &nodes); err != nil { + s.logger.Printf("[WARN] serf: failed to decode filterNodeType: %v", err) + return false + } + + // Check if we are being targeted + found := false + for _, n := range nodes { + if n == s.config.NodeName { + found = true + break + } + } + if !found { + return false + } + + case filterTagType: + // Decode the filter + var filt filterTag + if err := decodeMessage(filter[1:], &filt); err != nil { + s.logger.Printf("[WARN] serf: failed to decode filterTagType: %v", err) + return false + } + + // Check if we match this regex + tags := s.config.Tags + matched, err := regexp.MatchString(filt.Expr, tags[filt.Tag]) + if err != nil { + s.logger.Printf("[WARN] serf: failed to compile filter regex (%s): %v", filt.Expr, err) + return false + } + if !matched { + return false + } + + default: + s.logger.Printf("[WARN] serf: query has unrecognized filter type: %d", filter[0]) + return false + } + } + return true +} + +// relayResponse will relay a copy of the given response to up to relayFactor +// other members. +func (s *Serf) relayResponse(relayFactor uint8, addr net.UDPAddr, resp *messageQueryResponse) error { + if relayFactor == 0 { + return nil + } + + // Needs to be worth it; we need to have at least relayFactor *other* + // nodes. If you have a tiny cluster then the relayFactor shouldn't + // be needed. + members := s.Members() + if len(members) < int(relayFactor)+1 { + return nil + } + + // Prep the relay message, which is a wrapped version of the original. + raw, err := encodeRelayMessage(messageQueryResponseType, addr, &resp) + if err != nil { + return fmt.Errorf("failed to format relayed response: %v", err) + } + if len(raw) > s.config.QueryResponseSizeLimit { + return fmt.Errorf("relayed response exceeds limit of %d bytes", s.config.QueryResponseSizeLimit) + } + + // Relay to a random set of peers. + localName := s.LocalMember().Name + relayMembers := kRandomMembers(int(relayFactor), members, func(m Member) bool { + return m.Status != StatusAlive || m.ProtocolMax < 5 || m.Name == localName + }) + for _, m := range relayMembers { + relayAddr := net.UDPAddr{IP: m.Addr, Port: int(m.Port)} + if err := s.memberlist.SendTo(&relayAddr, raw); err != nil { + return fmt.Errorf("failed to send relay response: %v", err) + } + } + return nil +} + +// kRandomMembers selects up to k members from a given list, optionally +// filtering by the given filterFunc +func kRandomMembers(k int, members []Member, filterFunc func(Member) bool) []Member { + n := len(members) + kMembers := make([]Member, 0, k) +OUTER: + // Probe up to 3*n times, with large n this is not necessary + // since k << n, but with small n we want search to be + // exhaustive + for i := 0; i < 3*n && len(kMembers) < k; i++ { + // Get random member + idx := rand.Intn(n) + member := members[idx] + + // Give the filter a shot at it. + if filterFunc != nil && filterFunc(member) { + continue OUTER + } + + // Check if we have this member already + for j := 0; j < len(kMembers); j++ { + if member.Name == kMembers[j].Name { + continue OUTER + } + } + + // Append the member + kMembers = append(kMembers, member) + } + + return kMembers +} diff --git a/vendor/github.com/hashicorp/serf/serf/serf.go b/vendor/github.com/hashicorp/serf/serf/serf.go new file mode 100644 index 0000000000..62fef5bc06 --- /dev/null +++ b/vendor/github.com/hashicorp/serf/serf/serf.go @@ -0,0 +1,1728 @@ +package serf + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "log" + "math/rand" + "net" + "strconv" + "sync" + "time" + + "github.com/armon/go-metrics" + "github.com/hashicorp/go-msgpack/codec" + "github.com/hashicorp/memberlist" + "github.com/hashicorp/serf/coordinate" +) + +// These are the protocol versions that Serf can _understand_. These are +// Serf-level protocol versions that are passed down as the delegate +// version to memberlist below. +const ( + ProtocolVersionMin uint8 = 2 + ProtocolVersionMax = 5 +) + +const ( + // Used to detect if the meta data is tags + // or if it is a raw role + tagMagicByte uint8 = 255 +) + +var ( + // FeatureNotSupported is returned if a feature cannot be used + // due to an older protocol version being used. + FeatureNotSupported = fmt.Errorf("Feature not supported") +) + +func init() { + // Seed the random number generator + rand.Seed(time.Now().UnixNano()) +} + +// Serf is a single node that is part of a single cluster that gets +// events about joins/leaves/failures/etc. It is created with the Create +// method. +// +// All functions on the Serf structure are safe to call concurrently. +type Serf struct { + // The clocks for different purposes. These MUST be the first things + // in this struct due to Golang issue #599. + clock LamportClock + eventClock LamportClock + queryClock LamportClock + + broadcasts *memberlist.TransmitLimitedQueue + config *Config + failedMembers []*memberState + leftMembers []*memberState + memberlist *memberlist.Memberlist + memberLock sync.RWMutex + members map[string]*memberState + + // recentIntents the lamport time and type of intent for a given node in + // case we get an intent before the relevant memberlist event. This is + // indexed by node, and always store the latest lamport time / intent + // we've seen. The memberLock protects this structure. + recentIntents map[string]nodeIntent + + eventBroadcasts *memberlist.TransmitLimitedQueue + eventBuffer []*userEvents + eventJoinIgnore bool + eventMinTime LamportTime + eventLock sync.RWMutex + + queryBroadcasts *memberlist.TransmitLimitedQueue + queryBuffer []*queries + queryMinTime LamportTime + queryResponse map[LamportTime]*QueryResponse + queryLock sync.RWMutex + + logger *log.Logger + joinLock sync.Mutex + stateLock sync.Mutex + state SerfState + shutdownCh chan struct{} + + snapshotter *Snapshotter + keyManager *KeyManager + + coordClient *coordinate.Client + coordCache map[string]*coordinate.Coordinate + coordCacheLock sync.RWMutex +} + +// SerfState is the state of the Serf instance. +type SerfState int + +const ( + SerfAlive SerfState = iota + SerfLeaving + SerfLeft + SerfShutdown +) + +func (s SerfState) String() string { + switch s { + case SerfAlive: + return "alive" + case SerfLeaving: + return "leaving" + case SerfLeft: + return "left" + case SerfShutdown: + return "shutdown" + default: + return "unknown" + } +} + +// Member is a single member of the Serf cluster. +type Member struct { + Name string + Addr net.IP + Port uint16 + Tags map[string]string + Status MemberStatus + + // The minimum, maximum, and current values of the protocol versions + // and delegate (Serf) protocol versions that each member can understand + // or is speaking. + ProtocolMin uint8 + ProtocolMax uint8 + ProtocolCur uint8 + DelegateMin uint8 + DelegateMax uint8 + DelegateCur uint8 +} + +// MemberStatus is the state that a member is in. +type MemberStatus int + +const ( + StatusNone MemberStatus = iota + StatusAlive + StatusLeaving + StatusLeft + StatusFailed +) + +func (s MemberStatus) String() string { + switch s { + case StatusNone: + return "none" + case StatusAlive: + return "alive" + case StatusLeaving: + return "leaving" + case StatusLeft: + return "left" + case StatusFailed: + return "failed" + default: + panic(fmt.Sprintf("unknown MemberStatus: %d", s)) + } +} + +// memberState is used to track members that are no longer active due to +// leaving, failing, partitioning, etc. It tracks the member along with +// when that member was marked as leaving. +type memberState struct { + Member + statusLTime LamportTime // lamport clock time of last received message + leaveTime time.Time // wall clock time of leave +} + +// nodeIntent is used to buffer intents for out-of-order deliveries. +type nodeIntent struct { + // Type is the intent being tracked. Only messageJoinType and + // messageLeaveType are tracked. + Type messageType + + // WallTime is the wall clock time we saw this intent in order to + // expire it from the buffer. + WallTime time.Time + + // LTime is the Lamport time, used for cluster-wide ordering of events. + LTime LamportTime +} + +// userEvent is used to buffer events to prevent re-delivery +type userEvent struct { + Name string + Payload []byte +} + +func (ue *userEvent) Equals(other *userEvent) bool { + if ue.Name != other.Name { + return false + } + if bytes.Compare(ue.Payload, other.Payload) != 0 { + return false + } + return true +} + +// userEvents stores all the user events at a specific time +type userEvents struct { + LTime LamportTime + Events []userEvent +} + +// queries stores all the query ids at a specific time +type queries struct { + LTime LamportTime + QueryIDs []uint32 +} + +const ( + UserEventSizeLimit = 512 // Maximum byte size for event name and payload + snapshotSizeLimit = 128 * 1024 // Maximum 128 KB snapshot +) + +// Create creates a new Serf instance, starting all the background tasks +// to maintain cluster membership information. +// +// After calling this function, the configuration should no longer be used +// or modified by the caller. +func Create(conf *Config) (*Serf, error) { + conf.Init() + if conf.ProtocolVersion < ProtocolVersionMin { + return nil, fmt.Errorf("Protocol version '%d' too low. Must be in range: [%d, %d]", + conf.ProtocolVersion, ProtocolVersionMin, ProtocolVersionMax) + } else if conf.ProtocolVersion > ProtocolVersionMax { + return nil, fmt.Errorf("Protocol version '%d' too high. Must be in range: [%d, %d]", + conf.ProtocolVersion, ProtocolVersionMin, ProtocolVersionMax) + } + + serf := &Serf{ + config: conf, + logger: log.New(conf.LogOutput, "", log.LstdFlags), + members: make(map[string]*memberState), + queryResponse: make(map[LamportTime]*QueryResponse), + shutdownCh: make(chan struct{}), + state: SerfAlive, + } + + // Check that the meta data length is okay + if len(serf.encodeTags(conf.Tags)) > memberlist.MetaMaxSize { + return nil, fmt.Errorf("Encoded length of tags exceeds limit of %d bytes", memberlist.MetaMaxSize) + } + + // Check if serf member event coalescing is enabled + if conf.CoalescePeriod > 0 && conf.QuiescentPeriod > 0 && conf.EventCh != nil { + c := &memberEventCoalescer{ + lastEvents: make(map[string]EventType), + latestEvents: make(map[string]coalesceEvent), + } + + conf.EventCh = coalescedEventCh(conf.EventCh, serf.shutdownCh, + conf.CoalescePeriod, conf.QuiescentPeriod, c) + } + + // Check if user event coalescing is enabled + if conf.UserCoalescePeriod > 0 && conf.UserQuiescentPeriod > 0 && conf.EventCh != nil { + c := &userEventCoalescer{ + events: make(map[string]*latestUserEvents), + } + + conf.EventCh = coalescedEventCh(conf.EventCh, serf.shutdownCh, + conf.UserCoalescePeriod, conf.UserQuiescentPeriod, c) + } + + // Listen for internal Serf queries. This is setup before the snapshotter, since + // we want to capture the query-time, but the internal listener does not passthrough + // the queries + outCh, err := newSerfQueries(serf, serf.logger, conf.EventCh, serf.shutdownCh) + if err != nil { + return nil, fmt.Errorf("Failed to setup serf query handler: %v", err) + } + conf.EventCh = outCh + + // Set up network coordinate client. + if !conf.DisableCoordinates { + serf.coordClient, err = coordinate.NewClient(coordinate.DefaultConfig()) + if err != nil { + return nil, fmt.Errorf("Failed to create coordinate client: %v", err) + } + } + + // Try access the snapshot + var oldClock, oldEventClock, oldQueryClock LamportTime + var prev []*PreviousNode + if conf.SnapshotPath != "" { + eventCh, snap, err := NewSnapshotter( + conf.SnapshotPath, + snapshotSizeLimit, + conf.RejoinAfterLeave, + serf.logger, + &serf.clock, + serf.coordClient, + conf.EventCh, + serf.shutdownCh) + if err != nil { + return nil, fmt.Errorf("Failed to setup snapshot: %v", err) + } + serf.snapshotter = snap + conf.EventCh = eventCh + prev = snap.AliveNodes() + oldClock = snap.LastClock() + oldEventClock = snap.LastEventClock() + oldQueryClock = snap.LastQueryClock() + serf.eventMinTime = oldEventClock + 1 + serf.queryMinTime = oldQueryClock + 1 + } + + // Set up the coordinate cache. We do this after we read the snapshot to + // make sure we get a good initial value from there, if we got one. + if !conf.DisableCoordinates { + serf.coordCache = make(map[string]*coordinate.Coordinate) + serf.coordCache[conf.NodeName] = serf.coordClient.GetCoordinate() + } + + // Setup the various broadcast queues, which we use to send our own + // custom broadcasts along the gossip channel. + serf.broadcasts = &memberlist.TransmitLimitedQueue{ + NumNodes: func() int { + return len(serf.members) + }, + RetransmitMult: conf.MemberlistConfig.RetransmitMult, + } + serf.eventBroadcasts = &memberlist.TransmitLimitedQueue{ + NumNodes: func() int { + return len(serf.members) + }, + RetransmitMult: conf.MemberlistConfig.RetransmitMult, + } + serf.queryBroadcasts = &memberlist.TransmitLimitedQueue{ + NumNodes: func() int { + return len(serf.members) + }, + RetransmitMult: conf.MemberlistConfig.RetransmitMult, + } + + // Create the buffer for recent intents + serf.recentIntents = make(map[string]nodeIntent) + + // Create a buffer for events and queries + serf.eventBuffer = make([]*userEvents, conf.EventBuffer) + serf.queryBuffer = make([]*queries, conf.QueryBuffer) + + // Ensure our lamport clock is at least 1, so that the default + // join LTime of 0 does not cause issues + serf.clock.Increment() + serf.eventClock.Increment() + serf.queryClock.Increment() + + // Restore the clock from snap if we have one + serf.clock.Witness(oldClock) + serf.eventClock.Witness(oldEventClock) + serf.queryClock.Witness(oldQueryClock) + + // Modify the memberlist configuration with keys that we set + conf.MemberlistConfig.Events = &eventDelegate{serf: serf} + conf.MemberlistConfig.Conflict = &conflictDelegate{serf: serf} + conf.MemberlistConfig.Delegate = &delegate{serf: serf} + conf.MemberlistConfig.DelegateProtocolVersion = conf.ProtocolVersion + conf.MemberlistConfig.DelegateProtocolMin = ProtocolVersionMin + conf.MemberlistConfig.DelegateProtocolMax = ProtocolVersionMax + conf.MemberlistConfig.Name = conf.NodeName + conf.MemberlistConfig.ProtocolVersion = ProtocolVersionMap[conf.ProtocolVersion] + if !conf.DisableCoordinates { + conf.MemberlistConfig.Ping = &pingDelegate{serf: serf} + } + + // Setup a merge delegate if necessary + if conf.Merge != nil { + md := &mergeDelegate{serf: serf} + conf.MemberlistConfig.Merge = md + conf.MemberlistConfig.Alive = md + } + + // Create the underlying memberlist that will manage membership + // and failure detection for the Serf instance. + memberlist, err := memberlist.Create(conf.MemberlistConfig) + if err != nil { + return nil, fmt.Errorf("Failed to create memberlist: %v", err) + } + + serf.memberlist = memberlist + + // Create a key manager for handling all encryption key changes + serf.keyManager = &KeyManager{serf: serf} + + // Start the background tasks. See the documentation above each method + // for more information on their role. + go serf.handleReap() + go serf.handleReconnect() + go serf.checkQueueDepth("Intent", serf.broadcasts) + go serf.checkQueueDepth("Event", serf.eventBroadcasts) + go serf.checkQueueDepth("Query", serf.queryBroadcasts) + + // Attempt to re-join the cluster if we have known nodes + if len(prev) != 0 { + go serf.handleRejoin(prev) + } + + return serf, nil +} + +// ProtocolVersion returns the current protocol version in use by Serf. +// This is the Serf protocol version, not the memberlist protocol version. +func (s *Serf) ProtocolVersion() uint8 { + return s.config.ProtocolVersion +} + +// EncryptionEnabled is a predicate that determines whether or not encryption +// is enabled, which can be possible in one of 2 cases: +// - Single encryption key passed at agent start (no persistence) +// - Keyring file provided at agent start +func (s *Serf) EncryptionEnabled() bool { + return s.config.MemberlistConfig.Keyring != nil +} + +// KeyManager returns the key manager for the current Serf instance. +func (s *Serf) KeyManager() *KeyManager { + return s.keyManager +} + +// UserEvent is used to broadcast a custom user event with a given +// name and payload. The events must be fairly small, and if the +// size limit is exceeded and error will be returned. If coalesce is enabled, +// nodes are allowed to coalesce this event. Coalescing is only available +// starting in v0.2 +func (s *Serf) UserEvent(name string, payload []byte, coalesce bool) error { + // Check the size limit + if len(name)+len(payload) > UserEventSizeLimit { + return fmt.Errorf("user event exceeds limit of %d bytes", UserEventSizeLimit) + } + + // Create a message + msg := messageUserEvent{ + LTime: s.eventClock.Time(), + Name: name, + Payload: payload, + CC: coalesce, + } + s.eventClock.Increment() + + // Process update locally + s.handleUserEvent(&msg) + + // Start broadcasting the event + raw, err := encodeMessage(messageUserEventType, &msg) + if err != nil { + return err + } + s.eventBroadcasts.QueueBroadcast(&broadcast{ + msg: raw, + }) + return nil +} + +// Query is used to broadcast a new query. The query must be fairly small, +// and an error will be returned if the size limit is exceeded. This is only +// available with protocol version 4 and newer. Query parameters are optional, +// and if not provided, a sane set of defaults will be used. +func (s *Serf) Query(name string, payload []byte, params *QueryParam) (*QueryResponse, error) { + // Check that the latest protocol is in use + if s.ProtocolVersion() < 4 { + return nil, FeatureNotSupported + } + + // Provide default parameters if none given + if params == nil { + params = s.DefaultQueryParams() + } else if params.Timeout == 0 { + params.Timeout = s.DefaultQueryTimeout() + } + + // Get the local node + local := s.memberlist.LocalNode() + + // Encode the filters + filters, err := params.encodeFilters() + if err != nil { + return nil, fmt.Errorf("Failed to format filters: %v", err) + } + + // Setup the flags + var flags uint32 + if params.RequestAck { + flags |= queryFlagAck + } + + // Create a message + q := messageQuery{ + LTime: s.queryClock.Time(), + ID: uint32(rand.Int31()), + Addr: local.Addr, + Port: local.Port, + Filters: filters, + Flags: flags, + RelayFactor: params.RelayFactor, + Timeout: params.Timeout, + Name: name, + Payload: payload, + } + + // Encode the query + raw, err := encodeMessage(messageQueryType, &q) + if err != nil { + return nil, err + } + + // Check the size + if len(raw) > s.config.QuerySizeLimit { + return nil, fmt.Errorf("query exceeds limit of %d bytes", s.config.QuerySizeLimit) + } + + // Register QueryResponse to track acks and responses + resp := newQueryResponse(s.memberlist.NumMembers(), &q) + s.registerQueryResponse(params.Timeout, resp) + + // Process query locally + s.handleQuery(&q) + + // Start broadcasting the event + s.queryBroadcasts.QueueBroadcast(&broadcast{ + msg: raw, + }) + return resp, nil +} + +// registerQueryResponse is used to setup the listeners for the query, +// and to schedule closing the query after the timeout. +func (s *Serf) registerQueryResponse(timeout time.Duration, resp *QueryResponse) { + s.queryLock.Lock() + defer s.queryLock.Unlock() + + // Map the LTime to the QueryResponse. This is necessarily 1-to-1, + // since we increment the time for each new query. + s.queryResponse[resp.lTime] = resp + + // Setup a timer to close the response and deregister after the timeout + time.AfterFunc(timeout, func() { + s.queryLock.Lock() + delete(s.queryResponse, resp.lTime) + resp.Close() + s.queryLock.Unlock() + }) +} + +// SetTags is used to dynamically update the tags associated with +// the local node. This will propagate the change to the rest of +// the cluster. Blocks until a the message is broadcast out. +func (s *Serf) SetTags(tags map[string]string) error { + // Check that the meta data length is okay + if len(s.encodeTags(tags)) > memberlist.MetaMaxSize { + return fmt.Errorf("Encoded length of tags exceeds limit of %d bytes", + memberlist.MetaMaxSize) + } + + // Update the config + s.config.Tags = tags + + // Trigger a memberlist update + return s.memberlist.UpdateNode(s.config.BroadcastTimeout) +} + +// Join joins an existing Serf cluster. Returns the number of nodes +// successfully contacted. The returned error will be non-nil only in the +// case that no nodes could be contacted. If ignoreOld is true, then any +// user messages sent prior to the join will be ignored. +func (s *Serf) Join(existing []string, ignoreOld bool) (int, error) { + // Do a quick state check + if s.State() != SerfAlive { + return 0, fmt.Errorf("Serf can't Join after Leave or Shutdown") + } + + // Hold the joinLock, this is to make eventJoinIgnore safe + s.joinLock.Lock() + defer s.joinLock.Unlock() + + // Ignore any events from a potential join. This is safe since we hold + // the joinLock and nobody else can be doing a Join + if ignoreOld { + s.eventJoinIgnore = true + defer func() { + s.eventJoinIgnore = false + }() + } + + // Have memberlist attempt to join + num, err := s.memberlist.Join(existing) + + // If we joined any nodes, broadcast the join message + if num > 0 { + // Start broadcasting the update + if err := s.broadcastJoin(s.clock.Time()); err != nil { + return num, err + } + } + + return num, err +} + +// broadcastJoin broadcasts a new join intent with a +// given clock value. It is used on either join, or if +// we need to refute an older leave intent. Cannot be called +// with the memberLock held. +func (s *Serf) broadcastJoin(ltime LamportTime) error { + // Construct message to update our lamport clock + msg := messageJoin{ + LTime: ltime, + Node: s.config.NodeName, + } + s.clock.Witness(ltime) + + // Process update locally + s.handleNodeJoinIntent(&msg) + + // Start broadcasting the update + if err := s.broadcast(messageJoinType, &msg, nil); err != nil { + s.logger.Printf("[WARN] serf: Failed to broadcast join intent: %v", err) + return err + } + return nil +} + +// Leave gracefully exits the cluster. It is safe to call this multiple +// times. +func (s *Serf) Leave() error { + // Check the current state + s.stateLock.Lock() + if s.state == SerfLeft { + s.stateLock.Unlock() + return nil + } else if s.state == SerfLeaving { + s.stateLock.Unlock() + return fmt.Errorf("Leave already in progress") + } else if s.state == SerfShutdown { + s.stateLock.Unlock() + return fmt.Errorf("Leave called after Shutdown") + } + s.state = SerfLeaving + s.stateLock.Unlock() + + // If we have a snapshot, mark we are leaving + if s.snapshotter != nil { + s.snapshotter.Leave() + } + + // Construct the message for the graceful leave + msg := messageLeave{ + LTime: s.clock.Time(), + Node: s.config.NodeName, + } + s.clock.Increment() + + // Process the leave locally + s.handleNodeLeaveIntent(&msg) + + // Only broadcast the leave message if there is at least one + // other node alive. + if s.hasAliveMembers() { + notifyCh := make(chan struct{}) + if err := s.broadcast(messageLeaveType, &msg, notifyCh); err != nil { + return err + } + + select { + case <-notifyCh: + case <-time.After(s.config.BroadcastTimeout): + return errors.New("timeout while waiting for graceful leave") + } + } + + // Attempt the memberlist leave + err := s.memberlist.Leave(s.config.BroadcastTimeout) + if err != nil { + return err + } + + // Transition to Left only if we not already shutdown + s.stateLock.Lock() + if s.state != SerfShutdown { + s.state = SerfLeft + } + s.stateLock.Unlock() + return nil +} + +// hasAliveMembers is called to check for any alive members other than +// ourself. +func (s *Serf) hasAliveMembers() bool { + s.memberLock.RLock() + defer s.memberLock.RUnlock() + + hasAlive := false + for _, m := range s.members { + // Skip ourself, we want to know if OTHER members are alive + if m.Name == s.config.NodeName { + continue + } + + if m.Status == StatusAlive { + hasAlive = true + break + } + } + return hasAlive +} + +// LocalMember returns the Member information for the local node +func (s *Serf) LocalMember() Member { + s.memberLock.RLock() + defer s.memberLock.RUnlock() + return s.members[s.config.NodeName].Member +} + +// Members returns a point-in-time snapshot of the members of this cluster. +func (s *Serf) Members() []Member { + s.memberLock.RLock() + defer s.memberLock.RUnlock() + + members := make([]Member, 0, len(s.members)) + for _, m := range s.members { + members = append(members, m.Member) + } + + return members +} + +// RemoveFailedNode forcibly removes a failed node from the cluster +// immediately, instead of waiting for the reaper to eventually reclaim it. +// This also has the effect that Serf will no longer attempt to reconnect +// to this node. +func (s *Serf) RemoveFailedNode(node string) error { + // Construct the message to broadcast + msg := messageLeave{ + LTime: s.clock.Time(), + Node: node, + } + s.clock.Increment() + + // Process our own event + s.handleNodeLeaveIntent(&msg) + + // If we have no members, then we don't need to broadcast + if !s.hasAliveMembers() { + return nil + } + + // Broadcast the remove + notifyCh := make(chan struct{}) + if err := s.broadcast(messageLeaveType, &msg, notifyCh); err != nil { + return err + } + + // Wait for the broadcast + select { + case <-notifyCh: + case <-time.After(s.config.BroadcastTimeout): + return fmt.Errorf("timed out broadcasting node removal") + } + + return nil +} + +// Shutdown forcefully shuts down the Serf instance, stopping all network +// activity and background maintenance associated with the instance. +// +// This is not a graceful shutdown, and should be preceded by a call +// to Leave. Otherwise, other nodes in the cluster will detect this node's +// exit as a node failure. +// +// It is safe to call this method multiple times. +func (s *Serf) Shutdown() error { + s.stateLock.Lock() + defer s.stateLock.Unlock() + + if s.state == SerfShutdown { + return nil + } + + if s.state != SerfLeft { + s.logger.Printf("[WARN] serf: Shutdown without a Leave") + } + + s.state = SerfShutdown + close(s.shutdownCh) + + err := s.memberlist.Shutdown() + if err != nil { + return err + } + + // Wait for the snapshoter to finish if we have one + if s.snapshotter != nil { + s.snapshotter.Wait() + } + + return nil +} + +// ShutdownCh returns a channel that can be used to wait for +// Serf to shutdown. +func (s *Serf) ShutdownCh() <-chan struct{} { + return s.shutdownCh +} + +// Memberlist is used to get access to the underlying Memberlist instance +func (s *Serf) Memberlist() *memberlist.Memberlist { + return s.memberlist +} + +// State is the current state of this Serf instance. +func (s *Serf) State() SerfState { + s.stateLock.Lock() + defer s.stateLock.Unlock() + return s.state +} + +// broadcast takes a Serf message type, encodes it for the wire, and queues +// the broadcast. If a notify channel is given, this channel will be closed +// when the broadcast is sent. +func (s *Serf) broadcast(t messageType, msg interface{}, notify chan<- struct{}) error { + raw, err := encodeMessage(t, msg) + if err != nil { + return err + } + + s.broadcasts.QueueBroadcast(&broadcast{ + msg: raw, + notify: notify, + }) + return nil +} + +// handleNodeJoin is called when a node join event is received +// from memberlist. +func (s *Serf) handleNodeJoin(n *memberlist.Node) { + s.memberLock.Lock() + defer s.memberLock.Unlock() + + var oldStatus MemberStatus + member, ok := s.members[n.Name] + if !ok { + oldStatus = StatusNone + member = &memberState{ + Member: Member{ + Name: n.Name, + Addr: net.IP(n.Addr), + Port: n.Port, + Tags: s.decodeTags(n.Meta), + Status: StatusAlive, + }, + } + + // Check if we have a join or leave intent. The intent buffer + // will only hold one event for this node, so the more recent + // one will take effect. + if join, ok := recentIntent(s.recentIntents, n.Name, messageJoinType); ok { + member.statusLTime = join + } + if leave, ok := recentIntent(s.recentIntents, n.Name, messageLeaveType); ok { + member.Status = StatusLeaving + member.statusLTime = leave + } + + s.members[n.Name] = member + } else { + oldStatus = member.Status + deadTime := time.Now().Sub(member.leaveTime) + if oldStatus == StatusFailed && deadTime < s.config.FlapTimeout { + metrics.IncrCounter([]string{"serf", "member", "flap"}, 1) + } + + member.Status = StatusAlive + member.leaveTime = time.Time{} + member.Addr = net.IP(n.Addr) + member.Port = n.Port + member.Tags = s.decodeTags(n.Meta) + } + + // Update the protocol versions every time we get an event + member.ProtocolMin = n.PMin + member.ProtocolMax = n.PMax + member.ProtocolCur = n.PCur + member.DelegateMin = n.DMin + member.DelegateMax = n.DMax + member.DelegateCur = n.DCur + + // If node was previously in a failed state, then clean up some + // internal accounting. + // TODO(mitchellh): needs tests to verify not reaped + if oldStatus == StatusFailed || oldStatus == StatusLeft { + s.failedMembers = removeOldMember(s.failedMembers, member.Name) + s.leftMembers = removeOldMember(s.leftMembers, member.Name) + } + + // Update some metrics + metrics.IncrCounter([]string{"serf", "member", "join"}, 1) + + // Send an event along + s.logger.Printf("[INFO] serf: EventMemberJoin: %s %s", + member.Member.Name, member.Member.Addr) + if s.config.EventCh != nil { + s.config.EventCh <- MemberEvent{ + Type: EventMemberJoin, + Members: []Member{member.Member}, + } + } +} + +// handleNodeLeave is called when a node leave event is received +// from memberlist. +func (s *Serf) handleNodeLeave(n *memberlist.Node) { + s.memberLock.Lock() + defer s.memberLock.Unlock() + + member, ok := s.members[n.Name] + if !ok { + // We've never even heard of this node that is supposedly + // leaving. Just ignore it completely. + return + } + + switch member.Status { + case StatusLeaving: + member.Status = StatusLeft + member.leaveTime = time.Now() + s.leftMembers = append(s.leftMembers, member) + case StatusAlive: + member.Status = StatusFailed + member.leaveTime = time.Now() + s.failedMembers = append(s.failedMembers, member) + default: + // Unknown state that it was in? Just don't do anything + s.logger.Printf("[WARN] serf: Bad state when leave: %d", member.Status) + return + } + + // Send an event along + event := EventMemberLeave + eventStr := "EventMemberLeave" + if member.Status != StatusLeft { + event = EventMemberFailed + eventStr = "EventMemberFailed" + } + + // Update some metrics + metrics.IncrCounter([]string{"serf", "member", member.Status.String()}, 1) + + s.logger.Printf("[INFO] serf: %s: %s %s", + eventStr, member.Member.Name, member.Member.Addr) + if s.config.EventCh != nil { + s.config.EventCh <- MemberEvent{ + Type: event, + Members: []Member{member.Member}, + } + } +} + +// handleNodeUpdate is called when a node meta data update +// has taken place +func (s *Serf) handleNodeUpdate(n *memberlist.Node) { + s.memberLock.Lock() + defer s.memberLock.Unlock() + + member, ok := s.members[n.Name] + if !ok { + // We've never even heard of this node that is updating. + // Just ignore it completely. + return + } + + // Update the member attributes + member.Addr = net.IP(n.Addr) + member.Port = n.Port + member.Tags = s.decodeTags(n.Meta) + + // Snag the latest versions. NOTE - the current memberlist code will NOT + // fire an update event if the metadata (for Serf, tags) stays the same + // and only the protocol versions change. If we wake any Serf-level + // protocol changes where we want to get this event under those + // circumstances, we will need to update memberlist to do a check of + // versions as well as the metadata. + member.ProtocolMin = n.PMin + member.ProtocolMax = n.PMax + member.ProtocolCur = n.PCur + member.DelegateMin = n.DMin + member.DelegateMax = n.DMax + member.DelegateCur = n.DCur + + // Update some metrics + metrics.IncrCounter([]string{"serf", "member", "update"}, 1) + + // Send an event along + s.logger.Printf("[INFO] serf: EventMemberUpdate: %s", member.Member.Name) + if s.config.EventCh != nil { + s.config.EventCh <- MemberEvent{ + Type: EventMemberUpdate, + Members: []Member{member.Member}, + } + } +} + +// handleNodeLeaveIntent is called when an intent to leave is received. +func (s *Serf) handleNodeLeaveIntent(leaveMsg *messageLeave) bool { + // Witness a potentially newer time + s.clock.Witness(leaveMsg.LTime) + + s.memberLock.Lock() + defer s.memberLock.Unlock() + + member, ok := s.members[leaveMsg.Node] + if !ok { + // Rebroadcast only if this was an update we hadn't seen before. + return upsertIntent(s.recentIntents, leaveMsg.Node, messageLeaveType, leaveMsg.LTime, time.Now) + } + + // If the message is old, then it is irrelevant and we can skip it + if leaveMsg.LTime <= member.statusLTime { + return false + } + + // Refute us leaving if we are in the alive state + // Must be done in another goroutine since we have the memberLock + if leaveMsg.Node == s.config.NodeName && s.state == SerfAlive { + s.logger.Printf("[DEBUG] serf: Refuting an older leave intent") + go s.broadcastJoin(s.clock.Time()) + return false + } + + // State transition depends on current state + switch member.Status { + case StatusAlive: + member.Status = StatusLeaving + member.statusLTime = leaveMsg.LTime + return true + case StatusFailed: + member.Status = StatusLeft + member.statusLTime = leaveMsg.LTime + + // Remove from the failed list and add to the left list. We add + // to the left list so that when we do a sync, other nodes will + // remove it from their failed list. + s.failedMembers = removeOldMember(s.failedMembers, member.Name) + s.leftMembers = append(s.leftMembers, member) + + // We must push a message indicating the node has now + // left to allow higher-level applications to handle the + // graceful leave. + s.logger.Printf("[INFO] serf: EventMemberLeave (forced): %s %s", + member.Member.Name, member.Member.Addr) + if s.config.EventCh != nil { + s.config.EventCh <- MemberEvent{ + Type: EventMemberLeave, + Members: []Member{member.Member}, + } + } + return true + default: + return false + } +} + +// handleNodeJoinIntent is called when a node broadcasts a +// join message to set the lamport time of its join +func (s *Serf) handleNodeJoinIntent(joinMsg *messageJoin) bool { + // Witness a potentially newer time + s.clock.Witness(joinMsg.LTime) + + s.memberLock.Lock() + defer s.memberLock.Unlock() + + member, ok := s.members[joinMsg.Node] + if !ok { + // Rebroadcast only if this was an update we hadn't seen before. + return upsertIntent(s.recentIntents, joinMsg.Node, messageJoinType, joinMsg.LTime, time.Now) + } + + // Check if this time is newer than what we have + if joinMsg.LTime <= member.statusLTime { + return false + } + + // Update the LTime + member.statusLTime = joinMsg.LTime + + // If we are in the leaving state, we should go back to alive, + // since the leaving message must have been for an older time + if member.Status == StatusLeaving { + member.Status = StatusAlive + } + return true +} + +// handleUserEvent is called when a user event broadcast is +// received. Returns if the message should be rebroadcast. +func (s *Serf) handleUserEvent(eventMsg *messageUserEvent) bool { + // Witness a potentially newer time + s.eventClock.Witness(eventMsg.LTime) + + s.eventLock.Lock() + defer s.eventLock.Unlock() + + // Ignore if it is before our minimum event time + if eventMsg.LTime < s.eventMinTime { + return false + } + + // Check if this message is too old + curTime := s.eventClock.Time() + if curTime > LamportTime(len(s.eventBuffer)) && + eventMsg.LTime < curTime-LamportTime(len(s.eventBuffer)) { + s.logger.Printf( + "[WARN] serf: received old event %s from time %d (current: %d)", + eventMsg.Name, + eventMsg.LTime, + s.eventClock.Time()) + return false + } + + // Check if we've already seen this + idx := eventMsg.LTime % LamportTime(len(s.eventBuffer)) + seen := s.eventBuffer[idx] + userEvent := userEvent{Name: eventMsg.Name, Payload: eventMsg.Payload} + if seen != nil && seen.LTime == eventMsg.LTime { + for _, previous := range seen.Events { + if previous.Equals(&userEvent) { + return false + } + } + } else { + seen = &userEvents{LTime: eventMsg.LTime} + s.eventBuffer[idx] = seen + } + + // Add to recent events + seen.Events = append(seen.Events, userEvent) + + // Update some metrics + metrics.IncrCounter([]string{"serf", "events"}, 1) + metrics.IncrCounter([]string{"serf", "events", eventMsg.Name}, 1) + + if s.config.EventCh != nil { + s.config.EventCh <- UserEvent{ + LTime: eventMsg.LTime, + Name: eventMsg.Name, + Payload: eventMsg.Payload, + Coalesce: eventMsg.CC, + } + } + return true +} + +// handleQuery is called when a query broadcast is +// received. Returns if the message should be rebroadcast. +func (s *Serf) handleQuery(query *messageQuery) bool { + // Witness a potentially newer time + s.queryClock.Witness(query.LTime) + + s.queryLock.Lock() + defer s.queryLock.Unlock() + + // Ignore if it is before our minimum query time + if query.LTime < s.queryMinTime { + return false + } + + // Check if this message is too old + curTime := s.queryClock.Time() + if curTime > LamportTime(len(s.queryBuffer)) && + query.LTime < curTime-LamportTime(len(s.queryBuffer)) { + s.logger.Printf( + "[WARN] serf: received old query %s from time %d (current: %d)", + query.Name, + query.LTime, + s.queryClock.Time()) + return false + } + + // Check if we've already seen this + idx := query.LTime % LamportTime(len(s.queryBuffer)) + seen := s.queryBuffer[idx] + if seen != nil && seen.LTime == query.LTime { + for _, previous := range seen.QueryIDs { + if previous == query.ID { + // Seen this ID already + return false + } + } + } else { + seen = &queries{LTime: query.LTime} + s.queryBuffer[idx] = seen + } + + // Add to recent queries + seen.QueryIDs = append(seen.QueryIDs, query.ID) + + // Update some metrics + metrics.IncrCounter([]string{"serf", "queries"}, 1) + metrics.IncrCounter([]string{"serf", "queries", query.Name}, 1) + + // Check if we should rebroadcast, this may be disabled by a flag + rebroadcast := true + if query.NoBroadcast() { + rebroadcast = false + } + + // Filter the query + if !s.shouldProcessQuery(query.Filters) { + // Even if we don't process it further, we should rebroadcast, + // since it is the first time we've seen this. + return rebroadcast + } + + // Send ack if requested, without waiting for client to Respond() + if query.Ack() { + ack := messageQueryResponse{ + LTime: query.LTime, + ID: query.ID, + From: s.config.NodeName, + Flags: queryFlagAck, + } + raw, err := encodeMessage(messageQueryResponseType, &ack) + if err != nil { + s.logger.Printf("[ERR] serf: failed to format ack: %v", err) + } else { + addr := net.UDPAddr{IP: query.Addr, Port: int(query.Port)} + if err := s.memberlist.SendTo(&addr, raw); err != nil { + s.logger.Printf("[ERR] serf: failed to send ack: %v", err) + } + if err := s.relayResponse(query.RelayFactor, addr, &ack); err != nil { + s.logger.Printf("[ERR] serf: failed to relay ack: %v", err) + } + } + } + + if s.config.EventCh != nil { + s.config.EventCh <- &Query{ + LTime: query.LTime, + Name: query.Name, + Payload: query.Payload, + serf: s, + id: query.ID, + addr: query.Addr, + port: query.Port, + deadline: time.Now().Add(query.Timeout), + relayFactor: query.RelayFactor, + } + } + return rebroadcast +} + +// handleResponse is called when a query response is +// received. +func (s *Serf) handleQueryResponse(resp *messageQueryResponse) { + // Look for a corresponding QueryResponse + s.queryLock.RLock() + query, ok := s.queryResponse[resp.LTime] + s.queryLock.RUnlock() + if !ok { + s.logger.Printf("[WARN] serf: reply for non-running query (LTime: %d, ID: %d) From: %s", + resp.LTime, resp.ID, resp.From) + return + } + + // Verify the ID matches + if query.id != resp.ID { + s.logger.Printf("[WARN] serf: query reply ID mismatch (Local: %d, Response: %d)", + query.id, resp.ID) + return + } + + // Check if the query is closed + if query.Finished() { + return + } + + // Process each type of response + if resp.Ack() { + // Exit early if this is a duplicate ack + if _, ok := query.acks[resp.From]; ok { + metrics.IncrCounter([]string{"serf", "query_duplicate_acks"}, 1) + return + } + + metrics.IncrCounter([]string{"serf", "query_acks"}, 1) + select { + case query.ackCh <- resp.From: + query.acks[resp.From] = struct{}{} + default: + s.logger.Printf("[WARN] serf: Failed to deliver query ack, dropping") + } + } else { + // Exit early if this is a duplicate response + if _, ok := query.responses[resp.From]; ok { + metrics.IncrCounter([]string{"serf", "query_duplicate_responses"}, 1) + return + } + + metrics.IncrCounter([]string{"serf", "query_responses"}, 1) + select { + case query.respCh <- NodeResponse{From: resp.From, Payload: resp.Payload}: + query.responses[resp.From] = struct{}{} + default: + s.logger.Printf("[WARN] serf: Failed to deliver query response, dropping") + } + } +} + +// handleNodeConflict is invoked when a join detects a conflict over a name. +// This means two different nodes (IP/Port) are claiming the same name. Memberlist +// will reject the "new" node mapping, but we can still be notified +func (s *Serf) handleNodeConflict(existing, other *memberlist.Node) { + // Log a basic warning if the node is not us... + if existing.Name != s.config.NodeName { + s.logger.Printf("[WARN] serf: Name conflict for '%s' both %s:%d and %s:%d are claiming", + existing.Name, existing.Addr, existing.Port, other.Addr, other.Port) + return + } + + // The current node is conflicting! This is an error + s.logger.Printf("[ERR] serf: Node name conflicts with another node at %s:%d. Names must be unique! (Resolution enabled: %v)", + other.Addr, other.Port, s.config.EnableNameConflictResolution) + + // If automatic resolution is enabled, kick off the resolution + if s.config.EnableNameConflictResolution { + go s.resolveNodeConflict() + } +} + +// resolveNodeConflict is used to determine which node should remain during +// a name conflict. This is done by running an internal query. +func (s *Serf) resolveNodeConflict() { + // Get the local node + local := s.memberlist.LocalNode() + + // Start a name resolution query + qName := internalQueryName(conflictQuery) + payload := []byte(s.config.NodeName) + resp, err := s.Query(qName, payload, nil) + if err != nil { + s.logger.Printf("[ERR] serf: Failed to start name resolution query: %v", err) + return + } + + // Counter to determine winner + var responses, matching int + + // Gather responses + respCh := resp.ResponseCh() + for r := range respCh { + // Decode the response + if len(r.Payload) < 1 || messageType(r.Payload[0]) != messageConflictResponseType { + s.logger.Printf("[ERR] serf: Invalid conflict query response type: %v", r.Payload) + continue + } + var member Member + if err := decodeMessage(r.Payload[1:], &member); err != nil { + s.logger.Printf("[ERR] serf: Failed to decode conflict query response: %v", err) + continue + } + + // Update the counters + responses++ + if bytes.Equal(member.Addr, local.Addr) && member.Port == local.Port { + matching++ + } + } + + // Query over, determine if we should live + majority := (responses / 2) + 1 + if matching >= majority { + s.logger.Printf("[INFO] serf: majority in name conflict resolution [%d / %d]", + matching, responses) + return + } + + // Since we lost the vote, we need to exit + s.logger.Printf("[WARN] serf: minority in name conflict resolution, quiting [%d / %d]", + matching, responses) + if err := s.Shutdown(); err != nil { + s.logger.Printf("[ERR] serf: Failed to shutdown: %v", err) + } +} + +// handleReap periodically reaps the list of failed and left members, as well +// as old buffered intents. +func (s *Serf) handleReap() { + for { + select { + case <-time.After(s.config.ReapInterval): + s.memberLock.Lock() + now := time.Now() + s.failedMembers = s.reap(s.failedMembers, now, s.config.ReconnectTimeout) + s.leftMembers = s.reap(s.leftMembers, now, s.config.TombstoneTimeout) + reapIntents(s.recentIntents, now, s.config.RecentIntentTimeout) + s.memberLock.Unlock() + case <-s.shutdownCh: + return + } + } +} + +// handleReconnect attempts to reconnect to recently failed nodes +// on configured intervals. +func (s *Serf) handleReconnect() { + for { + select { + case <-time.After(s.config.ReconnectInterval): + s.reconnect() + case <-s.shutdownCh: + return + } + } +} + +// reap is called with a list of old members and a timeout, and removes +// members that have exceeded the timeout. The members are removed from +// both the old list and the members itself. Locking is left to the caller. +func (s *Serf) reap(old []*memberState, now time.Time, timeout time.Duration) []*memberState { + n := len(old) + for i := 0; i < n; i++ { + m := old[i] + + // Skip if the timeout is not yet reached + if now.Sub(m.leaveTime) <= timeout { + continue + } + + // Delete from the list + old[i], old[n-1] = old[n-1], nil + old = old[:n-1] + n-- + i-- + + // Delete from members + delete(s.members, m.Name) + + // Tell the coordinate client the node has gone away and delete + // its cached coordinates. + if !s.config.DisableCoordinates { + s.coordClient.ForgetNode(m.Name) + + s.coordCacheLock.Lock() + delete(s.coordCache, m.Name) + s.coordCacheLock.Unlock() + } + + // Send an event along + s.logger.Printf("[INFO] serf: EventMemberReap: %s", m.Name) + if s.config.EventCh != nil { + s.config.EventCh <- MemberEvent{ + Type: EventMemberReap, + Members: []Member{m.Member}, + } + } + } + + return old +} + +// reconnect attempts to reconnect to recently fail nodes. +func (s *Serf) reconnect() { + s.memberLock.RLock() + + // Nothing to do if there are no failed members + n := len(s.failedMembers) + if n == 0 { + s.memberLock.RUnlock() + return + } + + // Probability we should attempt to reconect is given + // by num failed / (num members - num failed - num left) + // This means that we probabilistically expect the cluster + // to attempt to connect to each failed member once per + // reconnect interval + numFailed := float32(len(s.failedMembers)) + numAlive := float32(len(s.members) - len(s.failedMembers) - len(s.leftMembers)) + if numAlive == 0 { + numAlive = 1 // guard against zero divide + } + prob := numFailed / numAlive + if rand.Float32() > prob { + s.memberLock.RUnlock() + s.logger.Printf("[DEBUG] serf: forgoing reconnect for random throttling") + return + } + + // Select a random member to try and join + idx := rand.Int31n(int32(n)) + mem := s.failedMembers[idx] + s.memberLock.RUnlock() + + // Format the addr + addr := net.UDPAddr{IP: mem.Addr, Port: int(mem.Port)} + s.logger.Printf("[INFO] serf: attempting reconnect to %v %s", mem.Name, addr.String()) + + // Attempt to join at the memberlist level + s.memberlist.Join([]string{addr.String()}) +} + +// checkQueueDepth periodically checks the size of a queue to see if +// it is too large +func (s *Serf) checkQueueDepth(name string, queue *memberlist.TransmitLimitedQueue) { + for { + select { + case <-time.After(time.Second): + numq := queue.NumQueued() + metrics.AddSample([]string{"serf", "queue", name}, float32(numq)) + if numq >= s.config.QueueDepthWarning { + s.logger.Printf("[WARN] serf: %s queue depth: %d", name, numq) + } + if numq > s.config.MaxQueueDepth { + s.logger.Printf("[WARN] serf: %s queue depth (%d) exceeds limit (%d), dropping messages!", + name, numq, s.config.MaxQueueDepth) + queue.Prune(s.config.MaxQueueDepth) + } + case <-s.shutdownCh: + return + } + } +} + +// removeOldMember is used to remove an old member from a list of old +// members. +func removeOldMember(old []*memberState, name string) []*memberState { + for i, m := range old { + if m.Name == name { + n := len(old) + old[i], old[n-1] = old[n-1], nil + return old[:n-1] + } + } + + return old +} + +// reapIntents clears out any intents that are older than the timeout. Make sure +// the memberLock is held when passing in the Serf instance's recentIntents +// member. +func reapIntents(intents map[string]nodeIntent, now time.Time, timeout time.Duration) { + for node, intent := range intents { + if now.Sub(intent.WallTime) > timeout { + delete(intents, node) + } + } +} + +// upsertIntent will update an existing intent with the supplied Lamport time, +// or create a new entry. This will return true if a new entry was added. The +// stamper is used to capture the wall clock time for expiring these buffered +// intents. Make sure the memberLock is held when passing in the Serf instance's +// recentIntents member. +func upsertIntent(intents map[string]nodeIntent, node string, itype messageType, + ltime LamportTime, stamper func() time.Time) bool { + if intent, ok := intents[node]; !ok || ltime > intent.LTime { + intents[node] = nodeIntent{ + Type: itype, + WallTime: stamper(), + LTime: ltime, + } + return true + } + + return false +} + +// recentIntent checks the recent intent buffer for a matching entry for a given +// node, and returns the Lamport time, if an intent is present, indicated by the +// returned boolean. Make sure the memberLock is held for read when passing in +// the Serf instance's recentIntents member. +func recentIntent(intents map[string]nodeIntent, node string, itype messageType) (LamportTime, bool) { + if intent, ok := intents[node]; ok && intent.Type == itype { + return intent.LTime, true + } + + return LamportTime(0), false +} + +// handleRejoin attempts to reconnect to previously known alive nodes +func (s *Serf) handleRejoin(previous []*PreviousNode) { + for _, prev := range previous { + // Do not attempt to join ourself + if prev.Name == s.config.NodeName { + continue + } + + s.logger.Printf("[INFO] serf: Attempting re-join to previously known node: %s", prev) + _, err := s.memberlist.Join([]string{prev.Addr}) + if err == nil { + s.logger.Printf("[INFO] serf: Re-joined to previously known node: %s", prev) + return + } + } + s.logger.Printf("[WARN] serf: Failed to re-join any previously known node") +} + +// encodeTags is used to encode a tag map +func (s *Serf) encodeTags(tags map[string]string) []byte { + // Support role-only backwards compatibility + if s.ProtocolVersion() < 3 { + role := tags["role"] + return []byte(role) + } + + // Use a magic byte prefix and msgpack encode the tags + var buf bytes.Buffer + buf.WriteByte(tagMagicByte) + enc := codec.NewEncoder(&buf, &codec.MsgpackHandle{}) + if err := enc.Encode(tags); err != nil { + panic(fmt.Sprintf("Failed to encode tags: %v", err)) + } + return buf.Bytes() +} + +// decodeTags is used to decode a tag map +func (s *Serf) decodeTags(buf []byte) map[string]string { + tags := make(map[string]string) + + // Backwards compatibility mode + if len(buf) == 0 || buf[0] != tagMagicByte { + tags["role"] = string(buf) + return tags + } + + // Decode the tags + r := bytes.NewReader(buf[1:]) + dec := codec.NewDecoder(r, &codec.MsgpackHandle{}) + if err := dec.Decode(&tags); err != nil { + s.logger.Printf("[ERR] serf: Failed to decode tags: %v", err) + } + return tags +} + +// Stats is used to provide operator debugging information +func (s *Serf) Stats() map[string]string { + toString := func(v uint64) string { + return strconv.FormatUint(v, 10) + } + stats := map[string]string{ + "members": toString(uint64(len(s.members))), + "failed": toString(uint64(len(s.failedMembers))), + "left": toString(uint64(len(s.leftMembers))), + "health_score": toString(uint64(s.memberlist.GetHealthScore())), + "member_time": toString(uint64(s.clock.Time())), + "event_time": toString(uint64(s.eventClock.Time())), + "query_time": toString(uint64(s.queryClock.Time())), + "intent_queue": toString(uint64(s.broadcasts.NumQueued())), + "event_queue": toString(uint64(s.eventBroadcasts.NumQueued())), + "query_queue": toString(uint64(s.queryBroadcasts.NumQueued())), + "encrypted": fmt.Sprintf("%v", s.EncryptionEnabled()), + } + return stats +} + +// WriteKeyringFile will serialize the current keyring and save it to a file. +func (s *Serf) writeKeyringFile() error { + if len(s.config.KeyringFile) == 0 { + return nil + } + + keyring := s.config.MemberlistConfig.Keyring + keysRaw := keyring.GetKeys() + keysEncoded := make([]string, len(keysRaw)) + + for i, key := range keysRaw { + keysEncoded[i] = base64.StdEncoding.EncodeToString(key) + } + + encodedKeys, err := json.MarshalIndent(keysEncoded, "", " ") + if err != nil { + return fmt.Errorf("Failed to encode keys: %s", err) + } + + // Use 0600 for permissions because key data is sensitive + if err = ioutil.WriteFile(s.config.KeyringFile, encodedKeys, 0600); err != nil { + return fmt.Errorf("Failed to write keyring file: %s", err) + } + + // Success! + return nil +} + +// GetCoordinate returns the network coordinate of the local node. +func (s *Serf) GetCoordinate() (*coordinate.Coordinate, error) { + if !s.config.DisableCoordinates { + return s.coordClient.GetCoordinate(), nil + } + + return nil, fmt.Errorf("Coordinates are disabled") +} + +// GetCachedCoordinate returns the network coordinate for the node with the given +// name. This will only be valid if DisableCoordinates is set to false. +func (s *Serf) GetCachedCoordinate(name string) (coord *coordinate.Coordinate, ok bool) { + if !s.config.DisableCoordinates { + s.coordCacheLock.RLock() + defer s.coordCacheLock.RUnlock() + if coord, ok = s.coordCache[name]; ok { + return coord, true + } + + return nil, false + } + + return nil, false +} + +// NumNodes returns the number of nodes in the serf cluster, regardless of +// their health or status. +func (s *Serf) NumNodes() (numNodes int) { + s.memberLock.RLock() + numNodes = len(s.members) + s.memberLock.RUnlock() + + return numNodes +} diff --git a/vendor/github.com/hashicorp/serf/serf/snapshot.go b/vendor/github.com/hashicorp/serf/serf/snapshot.go new file mode 100644 index 0000000000..44f8a5175a --- /dev/null +++ b/vendor/github.com/hashicorp/serf/serf/snapshot.go @@ -0,0 +1,560 @@ +package serf + +import ( + "bufio" + "encoding/json" + "fmt" + "log" + "math/rand" + "net" + "os" + "strconv" + "strings" + "time" + + "github.com/armon/go-metrics" + "github.com/hashicorp/serf/coordinate" +) + +/* +Serf supports using a "snapshot" file that contains various +transactional data that is used to help Serf recover quickly +and gracefully from a failure. We append member events, as well +as the latest clock values to the file during normal operation, +and periodically checkpoint and roll over the file. During a restore, +we can replay the various member events to recall a list of known +nodes to re-join, as well as restore our clock values to avoid replaying +old events. +*/ + +const flushInterval = 500 * time.Millisecond +const clockUpdateInterval = 500 * time.Millisecond +const coordinateUpdateInterval = 60 * time.Second +const tmpExt = ".compact" + +// Snapshotter is responsible for ingesting events and persisting +// them to disk, and providing a recovery mechanism at start time. +type Snapshotter struct { + aliveNodes map[string]string + clock *LamportClock + coordClient *coordinate.Client + fh *os.File + buffered *bufio.Writer + inCh <-chan Event + lastFlush time.Time + lastClock LamportTime + lastEventClock LamportTime + lastQueryClock LamportTime + leaveCh chan struct{} + leaving bool + logger *log.Logger + maxSize int64 + path string + offset int64 + outCh chan<- Event + rejoinAfterLeave bool + shutdownCh <-chan struct{} + waitCh chan struct{} +} + +// PreviousNode is used to represent the previously known alive nodes +type PreviousNode struct { + Name string + Addr string +} + +func (p PreviousNode) String() string { + return fmt.Sprintf("%s: %s", p.Name, p.Addr) +} + +// NewSnapshotter creates a new Snapshotter that records events up to a +// max byte size before rotating the file. It can also be used to +// recover old state. Snapshotter works by reading an event channel it returns, +// passing through to an output channel, and persisting relevant events to disk. +// Setting rejoinAfterLeave makes leave not clear the state, and can be used +// if you intend to rejoin the same cluster after a leave. +func NewSnapshotter(path string, + maxSize int, + rejoinAfterLeave bool, + logger *log.Logger, + clock *LamportClock, + coordClient *coordinate.Client, + outCh chan<- Event, + shutdownCh <-chan struct{}) (chan<- Event, *Snapshotter, error) { + inCh := make(chan Event, 1024) + + // Try to open the file + fh, err := os.OpenFile(path, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0755) + if err != nil { + return nil, nil, fmt.Errorf("failed to open snapshot: %v", err) + } + + // Determine the offset + info, err := fh.Stat() + if err != nil { + fh.Close() + return nil, nil, fmt.Errorf("failed to stat snapshot: %v", err) + } + offset := info.Size() + + // Create the snapshotter + snap := &Snapshotter{ + aliveNodes: make(map[string]string), + clock: clock, + coordClient: coordClient, + fh: fh, + buffered: bufio.NewWriter(fh), + inCh: inCh, + lastClock: 0, + lastEventClock: 0, + lastQueryClock: 0, + leaveCh: make(chan struct{}), + logger: logger, + maxSize: int64(maxSize), + path: path, + offset: offset, + outCh: outCh, + rejoinAfterLeave: rejoinAfterLeave, + shutdownCh: shutdownCh, + waitCh: make(chan struct{}), + } + + // Recover the last known state + if err := snap.replay(); err != nil { + fh.Close() + return nil, nil, err + } + + // Start handling new commands + go snap.stream() + return inCh, snap, nil +} + +// LastClock returns the last known clock time +func (s *Snapshotter) LastClock() LamportTime { + return s.lastClock +} + +// LastEventClock returns the last known event clock time +func (s *Snapshotter) LastEventClock() LamportTime { + return s.lastEventClock +} + +// LastQueryClock returns the last known query clock time +func (s *Snapshotter) LastQueryClock() LamportTime { + return s.lastQueryClock +} + +// AliveNodes returns the last known alive nodes +func (s *Snapshotter) AliveNodes() []*PreviousNode { + // Copy the previously known + previous := make([]*PreviousNode, 0, len(s.aliveNodes)) + for name, addr := range s.aliveNodes { + previous = append(previous, &PreviousNode{name, addr}) + } + + // Randomize the order, prevents hot shards + for i := range previous { + j := rand.Intn(i + 1) + previous[i], previous[j] = previous[j], previous[i] + } + return previous +} + +// Wait is used to wait until the snapshotter finishes shut down +func (s *Snapshotter) Wait() { + <-s.waitCh +} + +// Leave is used to remove known nodes to prevent a restart from +// causing a join. Otherwise nodes will re-join after leaving! +func (s *Snapshotter) Leave() { + select { + case s.leaveCh <- struct{}{}: + case <-s.shutdownCh: + } +} + +// stream is a long running routine that is used to handle events +func (s *Snapshotter) stream() { + clockTicker := time.NewTicker(clockUpdateInterval) + defer clockTicker.Stop() + + coordinateTicker := time.NewTicker(coordinateUpdateInterval) + defer coordinateTicker.Stop() + + for { + select { + case <-s.leaveCh: + s.leaving = true + + // If we plan to re-join, keep our state + if !s.rejoinAfterLeave { + s.aliveNodes = make(map[string]string) + } + s.tryAppend("leave\n") + if err := s.buffered.Flush(); err != nil { + s.logger.Printf("[ERR] serf: failed to flush leave to snapshot: %v", err) + } + if err := s.fh.Sync(); err != nil { + s.logger.Printf("[ERR] serf: failed to sync leave to snapshot: %v", err) + } + + case e := <-s.inCh: + // Forward the event immediately + if s.outCh != nil { + s.outCh <- e + } + + // Stop recording events after a leave is issued + if s.leaving { + continue + } + switch typed := e.(type) { + case MemberEvent: + s.processMemberEvent(typed) + case UserEvent: + s.processUserEvent(typed) + case *Query: + s.processQuery(typed) + default: + s.logger.Printf("[ERR] serf: Unknown event to snapshot: %#v", e) + } + + case <-clockTicker.C: + s.updateClock() + + case <-coordinateTicker.C: + s.updateCoordinate() + + case <-s.shutdownCh: + if err := s.buffered.Flush(); err != nil { + s.logger.Printf("[ERR] serf: failed to flush snapshot: %v", err) + } + if err := s.fh.Sync(); err != nil { + s.logger.Printf("[ERR] serf: failed to sync snapshot: %v", err) + } + s.fh.Close() + close(s.waitCh) + return + } + } +} + +// processMemberEvent is used to handle a single member event +func (s *Snapshotter) processMemberEvent(e MemberEvent) { + switch e.Type { + case EventMemberJoin: + for _, mem := range e.Members { + addr := net.TCPAddr{IP: mem.Addr, Port: int(mem.Port)} + s.aliveNodes[mem.Name] = addr.String() + s.tryAppend(fmt.Sprintf("alive: %s %s\n", mem.Name, addr.String())) + } + + case EventMemberLeave: + fallthrough + case EventMemberFailed: + for _, mem := range e.Members { + delete(s.aliveNodes, mem.Name) + s.tryAppend(fmt.Sprintf("not-alive: %s\n", mem.Name)) + } + } + s.updateClock() +} + +// updateClock is called periodically to check if we should udpate our +// clock value. This is done after member events but should also be done +// periodically due to race conditions with join and leave intents +func (s *Snapshotter) updateClock() { + lastSeen := s.clock.Time() - 1 + if lastSeen > s.lastClock { + s.lastClock = lastSeen + s.tryAppend(fmt.Sprintf("clock: %d\n", s.lastClock)) + } +} + +// updateCoordinate is called periodically to write out the current local +// coordinate. It's safe to call this if coordinates aren't enabled (nil +// client) and it will be a no-op. +func (s *Snapshotter) updateCoordinate() { + if s.coordClient != nil { + encoded, err := json.Marshal(s.coordClient.GetCoordinate()) + if err != nil { + s.logger.Printf("[ERR] serf: Failed to encode coordinate: %v", err) + } else { + s.tryAppend(fmt.Sprintf("coordinate: %s\n", encoded)) + } + } +} + +// processUserEvent is used to handle a single user event +func (s *Snapshotter) processUserEvent(e UserEvent) { + // Ignore old clocks + if e.LTime <= s.lastEventClock { + return + } + s.lastEventClock = e.LTime + s.tryAppend(fmt.Sprintf("event-clock: %d\n", e.LTime)) +} + +// processQuery is used to handle a single query event +func (s *Snapshotter) processQuery(q *Query) { + // Ignore old clocks + if q.LTime <= s.lastQueryClock { + return + } + s.lastQueryClock = q.LTime + s.tryAppend(fmt.Sprintf("query-clock: %d\n", q.LTime)) +} + +// tryAppend will invoke append line but will not return an error +func (s *Snapshotter) tryAppend(l string) { + if err := s.appendLine(l); err != nil { + s.logger.Printf("[ERR] serf: Failed to update snapshot: %v", err) + } +} + +// appendLine is used to append a line to the existing log +func (s *Snapshotter) appendLine(l string) error { + defer metrics.MeasureSince([]string{"serf", "snapshot", "appendLine"}, time.Now()) + + n, err := s.buffered.WriteString(l) + if err != nil { + return err + } + + // Check if we should flush + now := time.Now() + if now.Sub(s.lastFlush) > flushInterval { + s.lastFlush = now + if err := s.buffered.Flush(); err != nil { + return err + } + } + + // Check if a compaction is necessary + s.offset += int64(n) + if s.offset > s.maxSize { + return s.compact() + } + return nil +} + +// Compact is used to compact the snapshot once it is too large +func (s *Snapshotter) compact() error { + defer metrics.MeasureSince([]string{"serf", "snapshot", "compact"}, time.Now()) + + // Try to open the file to new fiel + newPath := s.path + tmpExt + fh, err := os.OpenFile(newPath, os.O_RDWR|os.O_TRUNC|os.O_CREATE, 0755) + if err != nil { + return fmt.Errorf("failed to open new snapshot: %v", err) + } + + // Create a buffered writer + buf := bufio.NewWriter(fh) + + // Write out the live nodes + var offset int64 + for name, addr := range s.aliveNodes { + line := fmt.Sprintf("alive: %s %s\n", name, addr) + n, err := buf.WriteString(line) + if err != nil { + fh.Close() + return err + } + offset += int64(n) + } + + // Write out the clocks + line := fmt.Sprintf("clock: %d\n", s.lastClock) + n, err := buf.WriteString(line) + if err != nil { + fh.Close() + return err + } + offset += int64(n) + + line = fmt.Sprintf("event-clock: %d\n", s.lastEventClock) + n, err = buf.WriteString(line) + if err != nil { + fh.Close() + return err + } + offset += int64(n) + + line = fmt.Sprintf("query-clock: %d\n", s.lastQueryClock) + n, err = buf.WriteString(line) + if err != nil { + fh.Close() + return err + } + offset += int64(n) + + // Write out the coordinate. + if s.coordClient != nil { + encoded, err := json.Marshal(s.coordClient.GetCoordinate()) + if err != nil { + fh.Close() + return err + } + + line = fmt.Sprintf("coordinate: %s\n", encoded) + n, err = buf.WriteString(line) + if err != nil { + fh.Close() + return err + } + offset += int64(n) + } + + // Flush the new snapshot + err = buf.Flush() + fh.Close() + if err != nil { + return fmt.Errorf("failed to flush new snapshot: %v", err) + } + + // We now need to swap the old snapshot file with the new snapshot. + // Turns out, Windows won't let us rename the files if we have + // open handles to them or if the destination already exists. This + // means we are forced to close the existing handles, delete the + // old file, move the new one in place, and then re-open the file + // handles. + + // Flush the existing snapshot, ignoring errors since we will + // delete it momentarily. + s.buffered.Flush() + s.buffered = nil + + // Close the file handle to the old snapshot + s.fh.Close() + s.fh = nil + + // Delete the old file + if err := os.Remove(s.path); err != nil { + return fmt.Errorf("failed to remove old snapshot: %v", err) + } + + // Move the new file into place + if err := os.Rename(newPath, s.path); err != nil { + return fmt.Errorf("failed to install new snapshot: %v", err) + } + + // Open the new snapshot + fh, err = os.OpenFile(s.path, os.O_RDWR|os.O_APPEND|os.O_CREATE, 0755) + if err != nil { + return fmt.Errorf("failed to open snapshot: %v", err) + } + buf = bufio.NewWriter(fh) + + // Rotate our handles + s.fh = fh + s.buffered = buf + s.offset = offset + s.lastFlush = time.Now() + return nil +} + +// replay is used to seek to reset our internal state by replaying +// the snapshot file. It is used at initialization time to read old +// state +func (s *Snapshotter) replay() error { + // Seek to the beginning + if _, err := s.fh.Seek(0, os.SEEK_SET); err != nil { + return err + } + + // Read each line + reader := bufio.NewReader(s.fh) + for { + line, err := reader.ReadString('\n') + if err != nil { + break + } + + // Skip the newline + line = line[:len(line)-1] + + // Switch on the prefix + if strings.HasPrefix(line, "alive: ") { + info := strings.TrimPrefix(line, "alive: ") + addrIdx := strings.LastIndex(info, " ") + if addrIdx == -1 { + s.logger.Printf("[WARN] serf: Failed to parse address: %v", line) + continue + } + addr := info[addrIdx+1:] + name := info[:addrIdx] + s.aliveNodes[name] = addr + + } else if strings.HasPrefix(line, "not-alive: ") { + name := strings.TrimPrefix(line, "not-alive: ") + delete(s.aliveNodes, name) + + } else if strings.HasPrefix(line, "clock: ") { + timeStr := strings.TrimPrefix(line, "clock: ") + timeInt, err := strconv.ParseUint(timeStr, 10, 64) + if err != nil { + s.logger.Printf("[WARN] serf: Failed to convert clock time: %v", err) + continue + } + s.lastClock = LamportTime(timeInt) + + } else if strings.HasPrefix(line, "event-clock: ") { + timeStr := strings.TrimPrefix(line, "event-clock: ") + timeInt, err := strconv.ParseUint(timeStr, 10, 64) + if err != nil { + s.logger.Printf("[WARN] serf: Failed to convert event clock time: %v", err) + continue + } + s.lastEventClock = LamportTime(timeInt) + + } else if strings.HasPrefix(line, "query-clock: ") { + timeStr := strings.TrimPrefix(line, "query-clock: ") + timeInt, err := strconv.ParseUint(timeStr, 10, 64) + if err != nil { + s.logger.Printf("[WARN] serf: Failed to convert query clock time: %v", err) + continue + } + s.lastQueryClock = LamportTime(timeInt) + + } else if strings.HasPrefix(line, "coordinate: ") { + if s.coordClient == nil { + s.logger.Printf("[WARN] serf: Ignoring snapshot coordinates since they are disabled") + continue + } + + coordStr := strings.TrimPrefix(line, "coordinate: ") + var coord coordinate.Coordinate + err := json.Unmarshal([]byte(coordStr), &coord) + if err != nil { + s.logger.Printf("[WARN] serf: Failed to decode coordinate: %v", err) + continue + } + s.coordClient.SetCoordinate(&coord) + } else if line == "leave" { + // Ignore a leave if we plan on re-joining + if s.rejoinAfterLeave { + s.logger.Printf("[INFO] serf: Ignoring previous leave in snapshot") + continue + } + s.aliveNodes = make(map[string]string) + s.lastClock = 0 + s.lastEventClock = 0 + s.lastQueryClock = 0 + + } else if strings.HasPrefix(line, "#") { + // Skip comment lines + + } else { + s.logger.Printf("[WARN] serf: Unrecognized snapshot line: %v", line) + } + } + + // Seek to the end + if _, err := s.fh.Seek(0, os.SEEK_END); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/hashicorp/serf/website/source/LICENSE b/vendor/github.com/hashicorp/serf/website/source/LICENSE new file mode 100644 index 0000000000..36c29d7f7b --- /dev/null +++ b/vendor/github.com/hashicorp/serf/website/source/LICENSE @@ -0,0 +1,10 @@ +# Proprietary License + +This license is temporary while a more official one is drafted. However, +this should make it clear: + +* The text contents of this website are MPL 2.0 licensed. + +* The design contents of this website are proprietary and may not be reproduced + or reused in any way other than to run the Serf website locally. The license + for the design is owned solely by HashiCorp, Inc. diff --git a/vendor/github.com/imdario/mergo/LICENSE b/vendor/github.com/imdario/mergo/LICENSE deleted file mode 100644 index 686680298d..0000000000 --- a/vendor/github.com/imdario/mergo/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright (c) 2013 Dario Castañé. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go deleted file mode 100644 index 6e9aa7baf3..0000000000 --- a/vendor/github.com/imdario/mergo/doc.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package mergo merges same-type structs and maps by setting default values in zero-value fields. - -Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). - -Usage - -From my own work-in-progress project: - - type networkConfig struct { - Protocol string - Address string - ServerType string `json: "server_type"` - Port uint16 - } - - type FssnConfig struct { - Network networkConfig - } - - var fssnDefault = FssnConfig { - networkConfig { - "tcp", - "127.0.0.1", - "http", - 31560, - }, - } - - // Inside a function [...] - - if err := mergo.Merge(&config, fssnDefault); err != nil { - log.Fatal(err) - } - - // More code [...] - -*/ -package mergo diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go deleted file mode 100644 index 6ea38e636b..0000000000 --- a/vendor/github.com/imdario/mergo/map.go +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright 2014 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "fmt" - "reflect" - "unicode" - "unicode/utf8" -) - -func changeInitialCase(s string, mapper func(rune) rune) string { - if s == "" { - return s - } - r, n := utf8.DecodeRuneInString(s) - return string(mapper(r)) + s[n:] -} - -func isExported(field reflect.StructField) bool { - r, _ := utf8.DecodeRuneInString(field.Name) - return r >= 'A' && r <= 'Z' -} - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { - overwrite := config.Overwrite - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{addr, typ, seen} - } - zeroValue := reflect.Value{} - switch dst.Kind() { - case reflect.Map: - dstMap := dst.Interface().(map[string]interface{}) - for i, n := 0, src.NumField(); i < n; i++ { - srcType := src.Type() - field := srcType.Field(i) - if !isExported(field) { - continue - } - fieldName := field.Name - fieldName = changeInitialCase(fieldName, unicode.ToLower) - if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) { - dstMap[fieldName] = src.Field(i).Interface() - } - } - case reflect.Ptr: - if dst.IsNil() { - v := reflect.New(dst.Type().Elem()) - dst.Set(v) - } - dst = dst.Elem() - fallthrough - case reflect.Struct: - srcMap := src.Interface().(map[string]interface{}) - for key := range srcMap { - srcValue := srcMap[key] - fieldName := changeInitialCase(key, unicode.ToUpper) - dstElement := dst.FieldByName(fieldName) - if dstElement == zeroValue { - // We discard it because the field doesn't exist. - continue - } - srcElement := reflect.ValueOf(srcValue) - dstKind := dstElement.Kind() - srcKind := srcElement.Kind() - if srcKind == reflect.Ptr && dstKind != reflect.Ptr { - srcElement = srcElement.Elem() - srcKind = reflect.TypeOf(srcElement.Interface()).Kind() - } else if dstKind == reflect.Ptr { - // Can this work? I guess it can't. - if srcKind != reflect.Ptr && srcElement.CanAddr() { - srcPtr := srcElement.Addr() - srcElement = reflect.ValueOf(srcPtr) - srcKind = reflect.Ptr - } - } - - if !srcElement.IsValid() { - continue - } - if srcKind == dstKind { - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface { - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else if srcKind == reflect.Map { - if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - } else { - return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind) - } - } - } - return -} - -// Map sets fields' values in dst from src. -// src can be a map with string keys or a struct. dst must be the opposite: -// if src is a map, dst must be a valid pointer to struct. If src is a struct, -// dst must be map[string]interface{}. -// It won't merge unexported (private) fields and will do recursively -// any exported field. -// If dst is a map, keys will be src fields' names in lower camel case. -// Missing key in src that doesn't match a field in dst will be skipped. This -// doesn't apply if dst is a map. -// This is separated method from Merge because it is cleaner and it keeps sane -// semantics: merging equal types, mapping different (restricted) types. -func Map(dst, src interface{}, opts ...func(*Config)) error { - return _map(dst, src, opts...) -} - -// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by -// non-empty src attribute values. -// Deprecated: Use Map(…) with WithOverride -func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { - return _map(dst, src, append(opts, WithOverride)...) -} - -func _map(dst, src interface{}, opts ...func(*Config)) error { - var ( - vDst, vSrc reflect.Value - err error - ) - config := &Config{} - - for _, opt := range opts { - opt(config) - } - - if vDst, vSrc, err = resolveValues(dst, src); err != nil { - return err - } - // To be friction-less, we redirect equal-type arguments - // to deepMerge. Only because arguments can be anything. - if vSrc.Kind() == vDst.Kind() { - return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) - } - switch vSrc.Kind() { - case reflect.Struct: - if vDst.Kind() != reflect.Map { - return ErrExpectedMapAsDestination - } - case reflect.Map: - if vDst.Kind() != reflect.Struct { - return ErrExpectedStructAsDestination - } - default: - return ErrNotSupported - } - return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config) -} diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go deleted file mode 100644 index f0e17924ac..0000000000 --- a/vendor/github.com/imdario/mergo/merge.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "reflect" -) - -func hasExportedField(dst reflect.Value) (exported bool) { - for i, n := 0, dst.NumField(); i < n; i++ { - field := dst.Type().Field(i) - if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { - exported = exported || hasExportedField(dst.Field(i)) - } else { - exported = exported || len(field.PkgPath) == 0 - } - } - return -} - -type Config struct { - Overwrite bool - AppendSlice bool - Transformers Transformers -} - -type Transformers interface { - Transformer(reflect.Type) func(dst, src reflect.Value) error -} - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { - overwrite := config.Overwrite - - if !src.IsValid() { - return - } - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{addr, typ, seen} - } - - if config.Transformers != nil && !isEmptyValue(dst) { - if fn := config.Transformers.Transformer(dst.Type()); fn != nil { - err = fn(dst, src) - return - } - } - - switch dst.Kind() { - case reflect.Struct: - if hasExportedField(dst) { - for i, n := 0, dst.NumField(); i < n; i++ { - if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { - return - } - } - } else { - if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) { - dst.Set(src) - } - } - case reflect.Map: - if dst.IsNil() && !src.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - for _, key := range src.MapKeys() { - srcElement := src.MapIndex(key) - if !srcElement.IsValid() { - continue - } - dstElement := dst.MapIndex(key) - switch srcElement.Kind() { - case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: - if srcElement.IsNil() { - continue - } - fallthrough - default: - if !srcElement.CanInterface() { - continue - } - switch reflect.TypeOf(srcElement.Interface()).Kind() { - case reflect.Struct: - fallthrough - case reflect.Ptr: - fallthrough - case reflect.Map: - if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { - return - } - case reflect.Slice: - srcSlice := reflect.ValueOf(srcElement.Interface()) - - var dstSlice reflect.Value - if !dstElement.IsValid() || dstElement.IsNil() { - dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len()) - } else { - dstSlice = reflect.ValueOf(dstElement.Interface()) - } - - dstSlice = reflect.AppendSlice(dstSlice, srcSlice) - dst.SetMapIndex(key, dstSlice) - } - } - if dstElement.IsValid() && reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map { - continue - } - - if srcElement.IsValid() && (overwrite || (!dstElement.IsValid() || isEmptyValue(dst))) { - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - dst.SetMapIndex(key, srcElement) - } - } - case reflect.Slice: - if !dst.CanSet() { - break - } - if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { - dst.Set(src) - } else { - dst.Set(reflect.AppendSlice(dst, src)) - } - case reflect.Ptr: - fallthrough - case reflect.Interface: - if src.IsNil() { - break - } - if src.Kind() != reflect.Interface { - if dst.IsNil() || overwrite { - if dst.CanSet() && (overwrite || isEmptyValue(dst)) { - dst.Set(src) - } - } else if src.Kind() == reflect.Ptr { - if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return - } - } else if dst.Elem().Type() == src.Type() { - if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { - return - } - } else { - return ErrDifferentArgumentsTypes - } - break - } - if dst.IsNil() || overwrite { - if dst.CanSet() && (overwrite || isEmptyValue(dst)) { - dst.Set(src) - } - } else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return - } - default: - if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) { - dst.Set(src) - } - } - return -} - -// Merge will fill any empty for value type attributes on the dst struct using corresponding -// src attributes if they themselves are not empty. dst and src must be valid same-type structs -// and dst must be a pointer to struct. -// It won't merge unexported (private) fields and will do recursively any exported field. -func Merge(dst, src interface{}, opts ...func(*Config)) error { - return merge(dst, src, opts...) -} - -// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overriden by -// non-empty src attribute values. -// Deprecated: use Merge(…) with WithOverride -func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { - return merge(dst, src, append(opts, WithOverride)...) -} - -// WithTransformers adds transformers to merge, allowing to customize the merging of some types. -func WithTransformers(transformers Transformers) func(*Config) { - return func(config *Config) { - config.Transformers = transformers - } -} - -// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values. -func WithOverride(config *Config) { - config.Overwrite = true -} - -// WithAppendSlice will make merge append slices instead of overwriting it -func WithAppendSlice(config *Config) { - config.AppendSlice = true -} - -func merge(dst, src interface{}, opts ...func(*Config)) error { - var ( - vDst, vSrc reflect.Value - err error - ) - - config := &Config{} - - for _, opt := range opts { - opt(config) - } - - if vDst, vSrc, err = resolveValues(dst, src); err != nil { - return err - } - if vDst.Type() != vSrc.Type() { - return ErrDifferentArgumentsTypes - } - return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) -} diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go deleted file mode 100644 index 785618cd07..0000000000 --- a/vendor/github.com/imdario/mergo/mergo.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright 2013 Dario Castañé. All rights reserved. -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on src/pkg/reflect/deepequal.go from official -// golang's stdlib. - -package mergo - -import ( - "errors" - "reflect" -) - -// Errors reported by Mergo when it finds invalid arguments. -var ( - ErrNilArguments = errors.New("src and dst must not be nil") - ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") - ErrNotSupported = errors.New("only structs and maps are supported") - ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") - ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") -) - -// During deepMerge, must keep track of checks that are -// in progress. The comparison algorithm assumes that all -// checks in progress are true when it reencounters them. -// Visited are stored in a map indexed by 17 * a1 + a2; -type visit struct { - ptr uintptr - typ reflect.Type - next *visit -} - -// From src/pkg/encoding/json/encode.go. -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr, reflect.Func: - return v.IsNil() - case reflect.Invalid: - return true - } - return false -} - -func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { - if dst == nil || src == nil { - err = ErrNilArguments - return - } - vDst = reflect.ValueOf(dst).Elem() - if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map { - err = ErrNotSupported - return - } - vSrc = reflect.ValueOf(src) - // We check if vSrc is a pointer to dereference it. - if vSrc.Kind() == reflect.Ptr { - vSrc = vSrc.Elem() - } - return -} - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) { - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{addr, typ, seen} - } - return // TODO refactor -} diff --git a/vendor/github.com/imdario/mergo/testdata/license.yml b/vendor/github.com/imdario/mergo/testdata/license.yml deleted file mode 100644 index 2f1ad0082b..0000000000 --- a/vendor/github.com/imdario/mergo/testdata/license.yml +++ /dev/null @@ -1,4 +0,0 @@ -import: ../../../../fossene/db/schema/thing.yml -fields: - site: string - author: root diff --git a/vendor/github.com/ishidawataru/sctp/LICENSE b/vendor/github.com/ishidawataru/sctp/LICENSE new file mode 100644 index 0000000000..8dada3edaf --- /dev/null +++ b/vendor/github.com/ishidawataru/sctp/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/ishidawataru/sctp/sctp.go b/vendor/github.com/ishidawataru/sctp/sctp.go new file mode 100644 index 0000000000..cac1a889ca --- /dev/null +++ b/vendor/github.com/ishidawataru/sctp/sctp.go @@ -0,0 +1,656 @@ +package sctp + +import ( + "bytes" + "encoding/binary" + "fmt" + "net" + "strconv" + "strings" + "sync" + "sync/atomic" + "syscall" + "time" + "unsafe" +) + +const ( + SOL_SCTP = 132 + + SCTP_BINDX_ADD_ADDR = 0x01 + SCTP_BINDX_REM_ADDR = 0x02 + + MSG_NOTIFICATION = 0x8000 +) + +const ( + SCTP_RTOINFO = iota + SCTP_ASSOCINFO + SCTP_INITMSG + SCTP_NODELAY + SCTP_AUTOCLOSE + SCTP_SET_PEER_PRIMARY_ADDR + SCTP_PRIMARY_ADDR + SCTP_ADAPTATION_LAYER + SCTP_DISABLE_FRAGMENTS + SCTP_PEER_ADDR_PARAMS + SCTP_DEFAULT_SENT_PARAM + SCTP_EVENTS + SCTP_I_WANT_MAPPED_V4_ADDR + SCTP_MAXSEG + SCTP_STATUS + SCTP_GET_PEER_ADDR_INFO + SCTP_DELAYED_ACK_TIME + SCTP_DELAYED_ACK = SCTP_DELAYED_ACK_TIME + SCTP_DELAYED_SACK = SCTP_DELAYED_ACK_TIME + + SCTP_SOCKOPT_BINDX_ADD = 100 + SCTP_SOCKOPT_BINDX_REM = 101 + SCTP_SOCKOPT_PEELOFF = 102 + SCTP_GET_PEER_ADDRS = 108 + SCTP_GET_LOCAL_ADDRS = 109 + SCTP_SOCKOPT_CONNECTX = 110 + SCTP_SOCKOPT_CONNECTX3 = 111 +) + +const ( + SCTP_EVENT_DATA_IO = 1 << iota + SCTP_EVENT_ASSOCIATION + SCTP_EVENT_ADDRESS + SCTP_EVENT_SEND_FAILURE + SCTP_EVENT_PEER_ERROR + SCTP_EVENT_SHUTDOWN + SCTP_EVENT_PARTIAL_DELIVERY + SCTP_EVENT_ADAPTATION_LAYER + SCTP_EVENT_AUTHENTICATION + SCTP_EVENT_SENDER_DRY + + SCTP_EVENT_ALL = SCTP_EVENT_DATA_IO | SCTP_EVENT_ASSOCIATION | SCTP_EVENT_ADDRESS | SCTP_EVENT_SEND_FAILURE | SCTP_EVENT_PEER_ERROR | SCTP_EVENT_SHUTDOWN | SCTP_EVENT_PARTIAL_DELIVERY | SCTP_EVENT_ADAPTATION_LAYER | SCTP_EVENT_AUTHENTICATION | SCTP_EVENT_SENDER_DRY +) + +type SCTPNotificationType int + +const ( + SCTP_SN_TYPE_BASE = SCTPNotificationType(iota + (1 << 15)) + SCTP_ASSOC_CHANGE + SCTP_PEER_ADDR_CHANGE + SCTP_SEND_FAILED + SCTP_REMOTE_ERROR + SCTP_SHUTDOWN_EVENT + SCTP_PARTIAL_DELIVERY_EVENT + SCTP_ADAPTATION_INDICATION + SCTP_AUTHENTICATION_INDICATION + SCTP_SENDER_DRY_EVENT +) + +type NotificationHandler func([]byte) error + +type EventSubscribe struct { + DataIO uint8 + Association uint8 + Address uint8 + SendFailure uint8 + PeerError uint8 + Shutdown uint8 + PartialDelivery uint8 + AdaptationLayer uint8 + Authentication uint8 + SenderDry uint8 +} + +const ( + SCTP_CMSG_INIT = iota + SCTP_CMSG_SNDRCV + SCTP_CMSG_SNDINFO + SCTP_CMSG_RCVINFO + SCTP_CMSG_NXTINFO +) + +const ( + SCTP_UNORDERED = 1 << iota + SCTP_ADDR_OVER + SCTP_ABORT + SCTP_SACK_IMMEDIATELY + SCTP_EOF +) + +const ( + SCTP_MAX_STREAM = 0xffff +) + +type InitMsg struct { + NumOstreams uint16 + MaxInstreams uint16 + MaxAttempts uint16 + MaxInitTimeout uint16 +} + +type SndRcvInfo struct { + Stream uint16 + SSN uint16 + Flags uint16 + _ uint16 + PPID uint32 + Context uint32 + TTL uint32 + TSN uint32 + CumTSN uint32 + AssocID int32 +} + +type SndInfo struct { + SID uint16 + Flags uint16 + PPID uint32 + Context uint32 + AssocID int32 +} + +type GetAddrsOld struct { + AssocID int32 + AddrNum int32 + Addrs uintptr +} + +type NotificationHeader struct { + Type uint16 + Flags uint16 + Length uint32 +} + +type SCTPState uint16 + +const ( + SCTP_COMM_UP = SCTPState(iota) + SCTP_COMM_LOST + SCTP_RESTART + SCTP_SHUTDOWN_COMP + SCTP_CANT_STR_ASSOC +) + +var nativeEndian binary.ByteOrder +var sndRcvInfoSize uintptr + +func init() { + i := uint16(1) + if *(*byte)(unsafe.Pointer(&i)) == 0 { + nativeEndian = binary.BigEndian + } else { + nativeEndian = binary.LittleEndian + } + info := SndRcvInfo{} + sndRcvInfoSize = unsafe.Sizeof(info) +} + +func toBuf(v interface{}) []byte { + var buf bytes.Buffer + binary.Write(&buf, nativeEndian, v) + return buf.Bytes() +} + +func htons(h uint16) uint16 { + if nativeEndian == binary.LittleEndian { + return (h << 8 & 0xff00) | (h >> 8 & 0xff) + } + return h +} + +var ntohs = htons + +func setNumOstreams(fd, num int) error { + param := InitMsg{ + NumOstreams: uint16(num), + } + optlen := unsafe.Sizeof(param) + _, _, err := setsockopt(fd, SCTP_INITMSG, uintptr(unsafe.Pointer(¶m)), uintptr(optlen)) + return err +} + +type SCTPAddr struct { + IP []net.IP + Port int +} + +func (a *SCTPAddr) ToRawSockAddrBuf() []byte { + buf := []byte{} + p := htons(uint16(a.Port)) + for _, ip := range a.IP { + if ip.To4() != nil { + s := syscall.RawSockaddrInet4{ + Family: syscall.AF_INET, + Port: p, + } + copy(s.Addr[:], ip.To4()) + buf = append(buf, toBuf(s)...) + } else { + s := syscall.RawSockaddrInet6{ + Family: syscall.AF_INET6, + Port: p, + } + copy(s.Addr[:], ip) + buf = append(buf, toBuf(s)...) + } + } + return buf +} + +func (a *SCTPAddr) String() string { + var b bytes.Buffer + + for n, i := range a.IP { + if a.IP[n].To4() != nil { + b.WriteString(i.String()) + } else if a.IP[n].To16() != nil { + b.WriteRune('[') + b.WriteString(i.String()) + b.WriteRune(']') + } + if n < len(a.IP)-1 { + b.WriteRune('/') + } + } + b.WriteRune(':') + b.WriteString(strconv.Itoa(a.Port)) + return b.String() +} + +func (a *SCTPAddr) Network() string { return "sctp" } + +func ResolveSCTPAddr(network, addrs string) (*SCTPAddr, error) { + tcpnet := "" + switch network { + case "", "sctp": + case "sctp4": + tcpnet = "tcp4" + case "sctp6": + tcpnet = "tcp6" + default: + return nil, fmt.Errorf("invalid net: %s", network) + } + elems := strings.Split(addrs, "/") + if len(elems) == 0 { + return nil, fmt.Errorf("invalid input: %s", addrs) + } + ipaddrs := make([]net.IP, 0, len(elems)) + for _, e := range elems[:len(elems)-1] { + tcpa, err := net.ResolveTCPAddr(tcpnet, e+":") + if err != nil { + return nil, err + } + ipaddrs = append(ipaddrs, tcpa.IP) + } + tcpa, err := net.ResolveTCPAddr(tcpnet, elems[len(elems)-1]) + if err != nil { + return nil, err + } + if tcpa.IP != nil { + ipaddrs = append(ipaddrs, tcpa.IP) + } else { + ipaddrs = nil + } + return &SCTPAddr{ + IP: ipaddrs, + Port: tcpa.Port, + }, nil +} + +func SCTPConnect(fd int, addr *SCTPAddr) (int, error) { + buf := addr.ToRawSockAddrBuf() + param := GetAddrsOld{ + AddrNum: int32(len(buf)), + Addrs: uintptr(uintptr(unsafe.Pointer(&buf[0]))), + } + optlen := unsafe.Sizeof(param) + _, _, err := getsockopt(fd, SCTP_SOCKOPT_CONNECTX3, uintptr(unsafe.Pointer(¶m)), uintptr(unsafe.Pointer(&optlen))) + if err == nil { + return int(param.AssocID), nil + } else if err != syscall.ENOPROTOOPT { + return 0, err + } + r0, _, err := setsockopt(fd, SCTP_SOCKOPT_CONNECTX, uintptr(unsafe.Pointer(&buf[0])), uintptr(len(buf))) + return int(r0), err +} + +func SCTPBind(fd int, addr *SCTPAddr, flags int) error { + var option uintptr + switch flags { + case SCTP_BINDX_ADD_ADDR: + option = SCTP_SOCKOPT_BINDX_ADD + case SCTP_BINDX_REM_ADDR: + option = SCTP_SOCKOPT_BINDX_REM + default: + return syscall.EINVAL + } + + buf := addr.ToRawSockAddrBuf() + _, _, err := setsockopt(fd, option, uintptr(unsafe.Pointer(&buf[0])), uintptr(len(buf))) + return err +} + +type SCTPConn struct { + _fd int32 + notificationHandler NotificationHandler +} + +func (c *SCTPConn) fd() int { + return int(atomic.LoadInt32(&c._fd)) +} + +func NewSCTPConn(fd int, handler NotificationHandler) *SCTPConn { + conn := &SCTPConn{ + _fd: int32(fd), + notificationHandler: handler, + } + return conn +} + +func (c *SCTPConn) Write(b []byte) (int, error) { + return c.SCTPWrite(b, nil) +} + +func (c *SCTPConn) Read(b []byte) (int, error) { + n, _, err := c.SCTPRead(b) + if n < 0 { + n = 0 + } + return n, err +} + +func (c *SCTPConn) SetInitMsg(numOstreams, maxInstreams, maxAttempts, maxInitTimeout int) error { + param := InitMsg{ + NumOstreams: uint16(numOstreams), + MaxInstreams: uint16(maxInstreams), + MaxAttempts: uint16(maxAttempts), + MaxInitTimeout: uint16(maxInitTimeout), + } + optlen := unsafe.Sizeof(param) + _, _, err := setsockopt(c.fd(), SCTP_INITMSG, uintptr(unsafe.Pointer(¶m)), uintptr(optlen)) + return err +} + +func (c *SCTPConn) SubscribeEvents(flags int) error { + var d, a, ad, sf, p, sh, pa, ada, au, se uint8 + if flags&SCTP_EVENT_DATA_IO > 0 { + d = 1 + } + if flags&SCTP_EVENT_ASSOCIATION > 0 { + a = 1 + } + if flags&SCTP_EVENT_ADDRESS > 0 { + ad = 1 + } + if flags&SCTP_EVENT_SEND_FAILURE > 0 { + sf = 1 + } + if flags&SCTP_EVENT_PEER_ERROR > 0 { + p = 1 + } + if flags&SCTP_EVENT_SHUTDOWN > 0 { + sh = 1 + } + if flags&SCTP_EVENT_PARTIAL_DELIVERY > 0 { + pa = 1 + } + if flags&SCTP_EVENT_ADAPTATION_LAYER > 0 { + ada = 1 + } + if flags&SCTP_EVENT_AUTHENTICATION > 0 { + au = 1 + } + if flags&SCTP_EVENT_SENDER_DRY > 0 { + se = 1 + } + param := EventSubscribe{ + DataIO: d, + Association: a, + Address: ad, + SendFailure: sf, + PeerError: p, + Shutdown: sh, + PartialDelivery: pa, + AdaptationLayer: ada, + Authentication: au, + SenderDry: se, + } + optlen := unsafe.Sizeof(param) + _, _, err := setsockopt(c.fd(), SCTP_EVENTS, uintptr(unsafe.Pointer(¶m)), uintptr(optlen)) + return err +} + +func (c *SCTPConn) SubscribedEvents() (int, error) { + param := EventSubscribe{} + optlen := unsafe.Sizeof(param) + _, _, err := getsockopt(c.fd(), SCTP_EVENTS, uintptr(unsafe.Pointer(¶m)), uintptr(unsafe.Pointer(&optlen))) + if err != nil { + return 0, err + } + var flags int + if param.DataIO > 0 { + flags |= SCTP_EVENT_DATA_IO + } + if param.Association > 0 { + flags |= SCTP_EVENT_ASSOCIATION + } + if param.Address > 0 { + flags |= SCTP_EVENT_ADDRESS + } + if param.SendFailure > 0 { + flags |= SCTP_EVENT_SEND_FAILURE + } + if param.PeerError > 0 { + flags |= SCTP_EVENT_PEER_ERROR + } + if param.Shutdown > 0 { + flags |= SCTP_EVENT_SHUTDOWN + } + if param.PartialDelivery > 0 { + flags |= SCTP_EVENT_PARTIAL_DELIVERY + } + if param.AdaptationLayer > 0 { + flags |= SCTP_EVENT_ADAPTATION_LAYER + } + if param.Authentication > 0 { + flags |= SCTP_EVENT_AUTHENTICATION + } + if param.SenderDry > 0 { + flags |= SCTP_EVENT_SENDER_DRY + } + return flags, nil +} + +func (c *SCTPConn) SetDefaultSentParam(info *SndRcvInfo) error { + optlen := unsafe.Sizeof(*info) + _, _, err := setsockopt(c.fd(), SCTP_DEFAULT_SENT_PARAM, uintptr(unsafe.Pointer(info)), uintptr(optlen)) + return err +} + +func (c *SCTPConn) GetDefaultSentParam() (*SndRcvInfo, error) { + info := &SndRcvInfo{} + optlen := unsafe.Sizeof(*info) + _, _, err := getsockopt(c.fd(), SCTP_DEFAULT_SENT_PARAM, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(&optlen))) + return info, err +} + +func resolveFromRawAddr(ptr unsafe.Pointer, n int) (*SCTPAddr, error) { + addr := &SCTPAddr{ + IP: make([]net.IP, n), + } + + switch family := (*(*syscall.RawSockaddrAny)(ptr)).Addr.Family; family { + case syscall.AF_INET: + addr.Port = int(ntohs(uint16((*(*syscall.RawSockaddrInet4)(ptr)).Port))) + tmp := syscall.RawSockaddrInet4{} + size := unsafe.Sizeof(tmp) + for i := 0; i < n; i++ { + a := *(*syscall.RawSockaddrInet4)(unsafe.Pointer( + uintptr(ptr) + size*uintptr(i))) + addr.IP[i] = a.Addr[:] + } + case syscall.AF_INET6: + addr.Port = int(ntohs(uint16((*(*syscall.RawSockaddrInet4)(ptr)).Port))) + tmp := syscall.RawSockaddrInet6{} + size := unsafe.Sizeof(tmp) + for i := 0; i < n; i++ { + a := *(*syscall.RawSockaddrInet6)(unsafe.Pointer( + uintptr(ptr) + size*uintptr(i))) + addr.IP[i] = a.Addr[:] + } + default: + return nil, fmt.Errorf("unknown address family: %d", family) + } + return addr, nil +} + +func sctpGetAddrs(fd, id, optname int) (*SCTPAddr, error) { + + type getaddrs struct { + assocId int32 + addrNum uint32 + addrs [4096]byte + } + param := getaddrs{ + assocId: int32(id), + } + optlen := unsafe.Sizeof(param) + _, _, err := getsockopt(fd, uintptr(optname), uintptr(unsafe.Pointer(¶m)), uintptr(unsafe.Pointer(&optlen))) + if err != nil { + return nil, err + } + return resolveFromRawAddr(unsafe.Pointer(¶m.addrs), int(param.addrNum)) +} + +func (c *SCTPConn) SCTPGetPrimaryPeerAddr() (*SCTPAddr, error) { + + type sctpGetSetPrim struct { + assocId int32 + addrs [128]byte + } + param := sctpGetSetPrim{ + assocId: int32(0), + } + optlen := unsafe.Sizeof(param) + _, _, err := getsockopt(c.fd(), SCTP_PRIMARY_ADDR, uintptr(unsafe.Pointer(¶m)), uintptr(unsafe.Pointer(&optlen))) + if err != nil { + return nil, err + } + return resolveFromRawAddr(unsafe.Pointer(¶m.addrs), 1) +} + +func (c *SCTPConn) SCTPLocalAddr(id int) (*SCTPAddr, error) { + return sctpGetAddrs(c.fd(), id, SCTP_GET_LOCAL_ADDRS) +} + +func (c *SCTPConn) SCTPRemoteAddr(id int) (*SCTPAddr, error) { + return sctpGetAddrs(c.fd(), id, SCTP_GET_PEER_ADDRS) +} + +func (c *SCTPConn) LocalAddr() net.Addr { + addr, err := sctpGetAddrs(c.fd(), 0, SCTP_GET_LOCAL_ADDRS) + if err != nil { + return nil + } + return addr +} + +func (c *SCTPConn) RemoteAddr() net.Addr { + addr, err := sctpGetAddrs(c.fd(), 0, SCTP_GET_PEER_ADDRS) + if err != nil { + return nil + } + return addr +} + +func (c *SCTPConn) PeelOff(id int) (*SCTPConn, error) { + type peeloffArg struct { + assocId int32 + sd int + } + param := peeloffArg{ + assocId: int32(id), + } + optlen := unsafe.Sizeof(param) + _, _, err := getsockopt(c.fd(), SCTP_SOCKOPT_PEELOFF, uintptr(unsafe.Pointer(¶m)), uintptr(unsafe.Pointer(&optlen))) + if err != nil { + return nil, err + } + return &SCTPConn{_fd: int32(param.sd)}, nil +} + +func (c *SCTPConn) SetDeadline(t time.Time) error { + return syscall.EOPNOTSUPP +} + +func (c *SCTPConn) SetReadDeadline(t time.Time) error { + return syscall.EOPNOTSUPP +} + +func (c *SCTPConn) SetWriteDeadline(t time.Time) error { + return syscall.EOPNOTSUPP +} + +type SCTPListener struct { + fd int + m sync.Mutex +} + +func (ln *SCTPListener) Addr() net.Addr { + laddr, err := sctpGetAddrs(ln.fd, 0, SCTP_GET_LOCAL_ADDRS) + if err != nil { + return nil + } + return laddr +} + +type SCTPSndRcvInfoWrappedConn struct { + conn *SCTPConn +} + +func NewSCTPSndRcvInfoWrappedConn(conn *SCTPConn) *SCTPSndRcvInfoWrappedConn { + conn.SubscribeEvents(SCTP_EVENT_DATA_IO) + return &SCTPSndRcvInfoWrappedConn{conn} +} + +func (c *SCTPSndRcvInfoWrappedConn) Write(b []byte) (int, error) { + if len(b) < int(sndRcvInfoSize) { + return 0, syscall.EINVAL + } + info := (*SndRcvInfo)(unsafe.Pointer(&b[0])) + n, err := c.conn.SCTPWrite(b[sndRcvInfoSize:], info) + return n + int(sndRcvInfoSize), err +} + +func (c *SCTPSndRcvInfoWrappedConn) Read(b []byte) (int, error) { + if len(b) < int(sndRcvInfoSize) { + return 0, syscall.EINVAL + } + n, info, err := c.conn.SCTPRead(b[sndRcvInfoSize:]) + if err != nil { + return n, err + } + copy(b, toBuf(info)) + return n + int(sndRcvInfoSize), err +} + +func (c *SCTPSndRcvInfoWrappedConn) Close() error { + return c.conn.Close() +} + +func (c *SCTPSndRcvInfoWrappedConn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +func (c *SCTPSndRcvInfoWrappedConn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +func (c *SCTPSndRcvInfoWrappedConn) SetDeadline(t time.Time) error { + return c.conn.SetDeadline(t) +} + +func (c *SCTPSndRcvInfoWrappedConn) SetReadDeadline(t time.Time) error { + return c.conn.SetReadDeadline(t) +} + +func (c *SCTPSndRcvInfoWrappedConn) SetWriteDeadline(t time.Time) error { + return c.conn.SetWriteDeadline(t) +} diff --git a/vendor/github.com/ishidawataru/sctp/sctp_linux.go b/vendor/github.com/ishidawataru/sctp/sctp_linux.go new file mode 100644 index 0000000000..f93ab8622a --- /dev/null +++ b/vendor/github.com/ishidawataru/sctp/sctp_linux.go @@ -0,0 +1,227 @@ +// +build linux,!386 + +package sctp + +import ( + "fmt" + "io" + "net" + "sync/atomic" + "syscall" + "unsafe" +) + +func setsockopt(fd int, optname, optval, optlen uintptr) (uintptr, uintptr, error) { + // FIXME: syscall.SYS_SETSOCKOPT is undefined on 386 + r0, r1, errno := syscall.Syscall6(syscall.SYS_SETSOCKOPT, + uintptr(fd), + SOL_SCTP, + optname, + optval, + optlen, + 0) + if errno != 0 { + return r0, r1, errno + } + return r0, r1, nil +} + +func getsockopt(fd int, optname, optval, optlen uintptr) (uintptr, uintptr, error) { + // FIXME: syscall.SYS_GETSOCKOPT is undefined on 386 + r0, r1, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT, + uintptr(fd), + SOL_SCTP, + optname, + optval, + optlen, + 0) + if errno != 0 { + return r0, r1, errno + } + return r0, r1, nil +} + +func (c *SCTPConn) SCTPWrite(b []byte, info *SndRcvInfo) (int, error) { + var cbuf []byte + if info != nil { + cmsgBuf := toBuf(info) + hdr := &syscall.Cmsghdr{ + Level: syscall.IPPROTO_SCTP, + Type: SCTP_CMSG_SNDRCV, + } + + // bitwidth of hdr.Len is platform-specific, + // so we use hdr.SetLen() rather than directly setting hdr.Len + hdr.SetLen(syscall.CmsgSpace(len(cmsgBuf))) + cbuf = append(toBuf(hdr), cmsgBuf...) + } + return syscall.SendmsgN(c.fd(), b, cbuf, nil, 0) +} + +func parseSndRcvInfo(b []byte) (*SndRcvInfo, error) { + msgs, err := syscall.ParseSocketControlMessage(b) + if err != nil { + return nil, err + } + for _, m := range msgs { + if m.Header.Level == syscall.IPPROTO_SCTP { + switch m.Header.Type { + case SCTP_CMSG_SNDRCV: + return (*SndRcvInfo)(unsafe.Pointer(&m.Data[0])), nil + } + } + } + return nil, nil +} + +func (c *SCTPConn) SCTPRead(b []byte) (int, *SndRcvInfo, error) { + oob := make([]byte, 254) + for { + n, oobn, recvflags, _, err := syscall.Recvmsg(c.fd(), b, oob, 0) + if err != nil { + return n, nil, err + } + + if n == 0 && oobn == 0 { + return 0, nil, io.EOF + } + + if recvflags&MSG_NOTIFICATION > 0 && c.notificationHandler != nil { + if err := c.notificationHandler(b[:n]); err != nil { + return 0, nil, err + } + } else { + var info *SndRcvInfo + if oobn > 0 { + info, err = parseSndRcvInfo(oob[:oobn]) + } + return n, info, err + } + } +} + +func (c *SCTPConn) Close() error { + if c != nil { + fd := atomic.SwapInt32(&c._fd, -1) + if fd > 0 { + info := &SndRcvInfo{ + Flags: SCTP_EOF, + } + c.SCTPWrite(nil, info) + syscall.Shutdown(int(fd), syscall.SHUT_RDWR) + return syscall.Close(int(fd)) + } + } + return syscall.EBADF +} + +func ListenSCTP(net string, laddr *SCTPAddr) (*SCTPListener, error) { + af := syscall.AF_INET + switch net { + case "sctp": + hasv6 := func(addr *SCTPAddr) bool { + if addr == nil { + return false + } + for _, ip := range addr.IP { + if ip.To4() == nil { + return true + } + } + return false + } + if hasv6(laddr) { + af = syscall.AF_INET6 + } + case "sctp4": + case "sctp6": + af = syscall.AF_INET6 + default: + return nil, fmt.Errorf("invalid net: %s", net) + } + + sock, err := syscall.Socket( + af, + syscall.SOCK_STREAM, + syscall.IPPROTO_SCTP, + ) + if err != nil { + return nil, err + } + err = setNumOstreams(sock, SCTP_MAX_STREAM) + if err != nil { + return nil, err + } + if laddr != nil && len(laddr.IP) != 0 { + err := SCTPBind(sock, laddr, SCTP_BINDX_ADD_ADDR) + if err != nil { + return nil, err + } + } + err = syscall.Listen(sock, syscall.SOMAXCONN) + if err != nil { + return nil, err + } + return &SCTPListener{ + fd: sock, + }, nil +} + +func (ln *SCTPListener) Accept() (net.Conn, error) { + fd, _, err := syscall.Accept4(ln.fd, 0) + return NewSCTPConn(fd, nil), err +} + +func (ln *SCTPListener) Close() error { + syscall.Shutdown(ln.fd, syscall.SHUT_RDWR) + return syscall.Close(ln.fd) +} + +func DialSCTP(net string, laddr, raddr *SCTPAddr) (*SCTPConn, error) { + af := syscall.AF_INET + switch net { + case "sctp": + hasv6 := func(addr *SCTPAddr) bool { + if addr == nil { + return false + } + for _, ip := range addr.IP { + if ip.To4() == nil { + return true + } + } + return false + } + if hasv6(laddr) || hasv6(raddr) { + af = syscall.AF_INET6 + } + case "sctp4": + case "sctp6": + af = syscall.AF_INET6 + default: + return nil, fmt.Errorf("invalid net: %s", net) + } + sock, err := syscall.Socket( + af, + syscall.SOCK_STREAM, + syscall.IPPROTO_SCTP, + ) + if err != nil { + return nil, err + } + err = setNumOstreams(sock, SCTP_MAX_STREAM) + if err != nil { + return nil, err + } + if laddr != nil { + err := SCTPBind(sock, laddr, SCTP_BINDX_ADD_ADDR) + if err != nil { + return nil, err + } + } + _, err = SCTPConnect(sock, raddr) + if err != nil { + return nil, err + } + return NewSCTPConn(sock, nil), nil +} diff --git a/vendor/github.com/ishidawataru/sctp/sctp_unsupported.go b/vendor/github.com/ishidawataru/sctp/sctp_unsupported.go new file mode 100644 index 0000000000..adcbf78b46 --- /dev/null +++ b/vendor/github.com/ishidawataru/sctp/sctp_unsupported.go @@ -0,0 +1,47 @@ +// +build !linux linux,386 + +package sctp + +import ( + "errors" + "net" + "runtime" +) + +var ErrUnsupported = errors.New("SCTP is unsupported on " + runtime.GOOS + "/" + runtime.GOARCH) + +func setsockopt(fd int, optname, optval, optlen uintptr) (uintptr, uintptr, error) { + return 0, 0, ErrUnsupported +} + +func getsockopt(fd int, optname, optval, optlen uintptr) (uintptr, uintptr, error) { + return 0, 0, ErrUnsupported +} + +func (c *SCTPConn) SCTPWrite(b []byte, info *SndRcvInfo) (int, error) { + return 0, ErrUnsupported +} + +func (c *SCTPConn) SCTPRead(b []byte) (int, *SndRcvInfo, error) { + return 0, nil, ErrUnsupported +} + +func (c *SCTPConn) Close() error { + return ErrUnsupported +} + +func ListenSCTP(net string, laddr *SCTPAddr) (*SCTPListener, error) { + return nil, ErrUnsupported +} + +func (ln *SCTPListener) Accept() (net.Conn, error) { + return nil, ErrUnsupported +} + +func (ln *SCTPListener) Close() error { + return ErrUnsupported +} + +func DialSCTP(net string, laddr, raddr *SCTPAddr) (*SCTPConn, error) { + return nil, ErrUnsupported +} diff --git a/vendor/github.com/mattn/go-runewidth/LICENSE b/vendor/github.com/mattn/go-runewidth/LICENSE deleted file mode 100644 index 91b5cef30e..0000000000 --- a/vendor/github.com/mattn/go-runewidth/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Yasuhiro Matsumoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/mattn/go-runewidth/runewidth.go b/vendor/github.com/mattn/go-runewidth/runewidth.go deleted file mode 100644 index 2164497ad9..0000000000 --- a/vendor/github.com/mattn/go-runewidth/runewidth.go +++ /dev/null @@ -1,1223 +0,0 @@ -package runewidth - -var ( - // EastAsianWidth will be set true if the current locale is CJK - EastAsianWidth = IsEastAsian() - - // DefaultCondition is a condition in current locale - DefaultCondition = &Condition{EastAsianWidth} -) - -type interval struct { - first rune - last rune -} - -type table []interval - -func inTables(r rune, ts ...table) bool { - for _, t := range ts { - if inTable(r, t) { - return true - } - } - return false -} - -func inTable(r rune, t table) bool { - // func (t table) IncludesRune(r rune) bool { - if r < t[0].first { - return false - } - - bot := 0 - top := len(t) - 1 - for top >= bot { - mid := (bot + top) / 2 - - switch { - case t[mid].last < r: - bot = mid + 1 - case t[mid].first > r: - top = mid - 1 - default: - return true - } - } - - return false -} - -var private = table{ - {0x00E000, 0x00F8FF}, {0x0F0000, 0x0FFFFD}, {0x100000, 0x10FFFD}, -} - -var nonprint = table{ - {0x0000, 0x001F}, {0x007F, 0x009F}, {0x00AD, 0x00AD}, - {0x070F, 0x070F}, {0x180B, 0x180E}, {0x200B, 0x200F}, - {0x202A, 0x202E}, {0x206A, 0x206F}, {0xD800, 0xDFFF}, - {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFB}, {0xFFFE, 0xFFFF}, -} - -var combining = table{ - {0x0300, 0x036F}, {0x0483, 0x0489}, {0x0591, 0x05BD}, - {0x05BF, 0x05BF}, {0x05C1, 0x05C2}, {0x05C4, 0x05C5}, - {0x05C7, 0x05C7}, {0x0610, 0x061A}, {0x064B, 0x065F}, - {0x0670, 0x0670}, {0x06D6, 0x06DC}, {0x06DF, 0x06E4}, - {0x06E7, 0x06E8}, {0x06EA, 0x06ED}, {0x0711, 0x0711}, - {0x0730, 0x074A}, {0x07A6, 0x07B0}, {0x07EB, 0x07F3}, - {0x0816, 0x0819}, {0x081B, 0x0823}, {0x0825, 0x0827}, - {0x0829, 0x082D}, {0x0859, 0x085B}, {0x08D4, 0x08E1}, - {0x08E3, 0x0903}, {0x093A, 0x093C}, {0x093E, 0x094F}, - {0x0951, 0x0957}, {0x0962, 0x0963}, {0x0981, 0x0983}, - {0x09BC, 0x09BC}, {0x09BE, 0x09C4}, {0x09C7, 0x09C8}, - {0x09CB, 0x09CD}, {0x09D7, 0x09D7}, {0x09E2, 0x09E3}, - {0x0A01, 0x0A03}, {0x0A3C, 0x0A3C}, {0x0A3E, 0x0A42}, - {0x0A47, 0x0A48}, {0x0A4B, 0x0A4D}, {0x0A51, 0x0A51}, - {0x0A70, 0x0A71}, {0x0A75, 0x0A75}, {0x0A81, 0x0A83}, - {0x0ABC, 0x0ABC}, {0x0ABE, 0x0AC5}, {0x0AC7, 0x0AC9}, - {0x0ACB, 0x0ACD}, {0x0AE2, 0x0AE3}, {0x0B01, 0x0B03}, - {0x0B3C, 0x0B3C}, {0x0B3E, 0x0B44}, {0x0B47, 0x0B48}, - {0x0B4B, 0x0B4D}, {0x0B56, 0x0B57}, {0x0B62, 0x0B63}, - {0x0B82, 0x0B82}, {0x0BBE, 0x0BC2}, {0x0BC6, 0x0BC8}, - {0x0BCA, 0x0BCD}, {0x0BD7, 0x0BD7}, {0x0C00, 0x0C03}, - {0x0C3E, 0x0C44}, {0x0C46, 0x0C48}, {0x0C4A, 0x0C4D}, - {0x0C55, 0x0C56}, {0x0C62, 0x0C63}, {0x0C81, 0x0C83}, - {0x0CBC, 0x0CBC}, {0x0CBE, 0x0CC4}, {0x0CC6, 0x0CC8}, - {0x0CCA, 0x0CCD}, {0x0CD5, 0x0CD6}, {0x0CE2, 0x0CE3}, - {0x0D01, 0x0D03}, {0x0D3E, 0x0D44}, {0x0D46, 0x0D48}, - {0x0D4A, 0x0D4D}, {0x0D57, 0x0D57}, {0x0D62, 0x0D63}, - {0x0D82, 0x0D83}, {0x0DCA, 0x0DCA}, {0x0DCF, 0x0DD4}, - {0x0DD6, 0x0DD6}, {0x0DD8, 0x0DDF}, {0x0DF2, 0x0DF3}, - {0x0E31, 0x0E31}, {0x0E34, 0x0E3A}, {0x0E47, 0x0E4E}, - {0x0EB1, 0x0EB1}, {0x0EB4, 0x0EB9}, {0x0EBB, 0x0EBC}, - {0x0EC8, 0x0ECD}, {0x0F18, 0x0F19}, {0x0F35, 0x0F35}, - {0x0F37, 0x0F37}, {0x0F39, 0x0F39}, {0x0F3E, 0x0F3F}, - {0x0F71, 0x0F84}, {0x0F86, 0x0F87}, {0x0F8D, 0x0F97}, - {0x0F99, 0x0FBC}, {0x0FC6, 0x0FC6}, {0x102B, 0x103E}, - {0x1056, 0x1059}, {0x105E, 0x1060}, {0x1062, 0x1064}, - {0x1067, 0x106D}, {0x1071, 0x1074}, {0x1082, 0x108D}, - {0x108F, 0x108F}, {0x109A, 0x109D}, {0x135D, 0x135F}, - {0x1712, 0x1714}, {0x1732, 0x1734}, {0x1752, 0x1753}, - {0x1772, 0x1773}, {0x17B4, 0x17D3}, {0x17DD, 0x17DD}, - {0x180B, 0x180D}, {0x1885, 0x1886}, {0x18A9, 0x18A9}, - {0x1920, 0x192B}, {0x1930, 0x193B}, {0x1A17, 0x1A1B}, - {0x1A55, 0x1A5E}, {0x1A60, 0x1A7C}, {0x1A7F, 0x1A7F}, - {0x1AB0, 0x1ABE}, {0x1B00, 0x1B04}, {0x1B34, 0x1B44}, - {0x1B6B, 0x1B73}, {0x1B80, 0x1B82}, {0x1BA1, 0x1BAD}, - {0x1BE6, 0x1BF3}, {0x1C24, 0x1C37}, {0x1CD0, 0x1CD2}, - {0x1CD4, 0x1CE8}, {0x1CED, 0x1CED}, {0x1CF2, 0x1CF4}, - {0x1CF8, 0x1CF9}, {0x1DC0, 0x1DF5}, {0x1DFB, 0x1DFF}, - {0x20D0, 0x20F0}, {0x2CEF, 0x2CF1}, {0x2D7F, 0x2D7F}, - {0x2DE0, 0x2DFF}, {0x302A, 0x302F}, {0x3099, 0x309A}, - {0xA66F, 0xA672}, {0xA674, 0xA67D}, {0xA69E, 0xA69F}, - {0xA6F0, 0xA6F1}, {0xA802, 0xA802}, {0xA806, 0xA806}, - {0xA80B, 0xA80B}, {0xA823, 0xA827}, {0xA880, 0xA881}, - {0xA8B4, 0xA8C5}, {0xA8E0, 0xA8F1}, {0xA926, 0xA92D}, - {0xA947, 0xA953}, {0xA980, 0xA983}, {0xA9B3, 0xA9C0}, - {0xA9E5, 0xA9E5}, {0xAA29, 0xAA36}, {0xAA43, 0xAA43}, - {0xAA4C, 0xAA4D}, {0xAA7B, 0xAA7D}, {0xAAB0, 0xAAB0}, - {0xAAB2, 0xAAB4}, {0xAAB7, 0xAAB8}, {0xAABE, 0xAABF}, - {0xAAC1, 0xAAC1}, {0xAAEB, 0xAAEF}, {0xAAF5, 0xAAF6}, - {0xABE3, 0xABEA}, {0xABEC, 0xABED}, {0xFB1E, 0xFB1E}, - {0xFE00, 0xFE0F}, {0xFE20, 0xFE2F}, {0x101FD, 0x101FD}, - {0x102E0, 0x102E0}, {0x10376, 0x1037A}, {0x10A01, 0x10A03}, - {0x10A05, 0x10A06}, {0x10A0C, 0x10A0F}, {0x10A38, 0x10A3A}, - {0x10A3F, 0x10A3F}, {0x10AE5, 0x10AE6}, {0x11000, 0x11002}, - {0x11038, 0x11046}, {0x1107F, 0x11082}, {0x110B0, 0x110BA}, - {0x11100, 0x11102}, {0x11127, 0x11134}, {0x11173, 0x11173}, - {0x11180, 0x11182}, {0x111B3, 0x111C0}, {0x111CA, 0x111CC}, - {0x1122C, 0x11237}, {0x1123E, 0x1123E}, {0x112DF, 0x112EA}, - {0x11300, 0x11303}, {0x1133C, 0x1133C}, {0x1133E, 0x11344}, - {0x11347, 0x11348}, {0x1134B, 0x1134D}, {0x11357, 0x11357}, - {0x11362, 0x11363}, {0x11366, 0x1136C}, {0x11370, 0x11374}, - {0x11435, 0x11446}, {0x114B0, 0x114C3}, {0x115AF, 0x115B5}, - {0x115B8, 0x115C0}, {0x115DC, 0x115DD}, {0x11630, 0x11640}, - {0x116AB, 0x116B7}, {0x1171D, 0x1172B}, {0x11C2F, 0x11C36}, - {0x11C38, 0x11C3F}, {0x11C92, 0x11CA7}, {0x11CA9, 0x11CB6}, - {0x16AF0, 0x16AF4}, {0x16B30, 0x16B36}, {0x16F51, 0x16F7E}, - {0x16F8F, 0x16F92}, {0x1BC9D, 0x1BC9E}, {0x1D165, 0x1D169}, - {0x1D16D, 0x1D172}, {0x1D17B, 0x1D182}, {0x1D185, 0x1D18B}, - {0x1D1AA, 0x1D1AD}, {0x1D242, 0x1D244}, {0x1DA00, 0x1DA36}, - {0x1DA3B, 0x1DA6C}, {0x1DA75, 0x1DA75}, {0x1DA84, 0x1DA84}, - {0x1DA9B, 0x1DA9F}, {0x1DAA1, 0x1DAAF}, {0x1E000, 0x1E006}, - {0x1E008, 0x1E018}, {0x1E01B, 0x1E021}, {0x1E023, 0x1E024}, - {0x1E026, 0x1E02A}, {0x1E8D0, 0x1E8D6}, {0x1E944, 0x1E94A}, - {0xE0100, 0xE01EF}, -} - -var doublewidth = table{ - {0x1100, 0x115F}, {0x231A, 0x231B}, {0x2329, 0x232A}, - {0x23E9, 0x23EC}, {0x23F0, 0x23F0}, {0x23F3, 0x23F3}, - {0x25FD, 0x25FE}, {0x2614, 0x2615}, {0x2648, 0x2653}, - {0x267F, 0x267F}, {0x2693, 0x2693}, {0x26A1, 0x26A1}, - {0x26AA, 0x26AB}, {0x26BD, 0x26BE}, {0x26C4, 0x26C5}, - {0x26CE, 0x26CE}, {0x26D4, 0x26D4}, {0x26EA, 0x26EA}, - {0x26F2, 0x26F3}, {0x26F5, 0x26F5}, {0x26FA, 0x26FA}, - {0x26FD, 0x26FD}, {0x2705, 0x2705}, {0x270A, 0x270B}, - {0x2728, 0x2728}, {0x274C, 0x274C}, {0x274E, 0x274E}, - {0x2753, 0x2755}, {0x2757, 0x2757}, {0x2795, 0x2797}, - {0x27B0, 0x27B0}, {0x27BF, 0x27BF}, {0x2B1B, 0x2B1C}, - {0x2B50, 0x2B50}, {0x2B55, 0x2B55}, {0x2E80, 0x2E99}, - {0x2E9B, 0x2EF3}, {0x2F00, 0x2FD5}, {0x2FF0, 0x2FFB}, - {0x3000, 0x303E}, {0x3041, 0x3096}, {0x3099, 0x30FF}, - {0x3105, 0x312D}, {0x3131, 0x318E}, {0x3190, 0x31BA}, - {0x31C0, 0x31E3}, {0x31F0, 0x321E}, {0x3220, 0x3247}, - {0x3250, 0x32FE}, {0x3300, 0x4DBF}, {0x4E00, 0xA48C}, - {0xA490, 0xA4C6}, {0xA960, 0xA97C}, {0xAC00, 0xD7A3}, - {0xF900, 0xFAFF}, {0xFE10, 0xFE19}, {0xFE30, 0xFE52}, - {0xFE54, 0xFE66}, {0xFE68, 0xFE6B}, {0xFF01, 0xFF60}, - {0xFFE0, 0xFFE6}, {0x16FE0, 0x16FE0}, {0x17000, 0x187EC}, - {0x18800, 0x18AF2}, {0x1B000, 0x1B001}, {0x1F004, 0x1F004}, - {0x1F0CF, 0x1F0CF}, {0x1F18E, 0x1F18E}, {0x1F191, 0x1F19A}, - {0x1F200, 0x1F202}, {0x1F210, 0x1F23B}, {0x1F240, 0x1F248}, - {0x1F250, 0x1F251}, {0x1F300, 0x1F320}, {0x1F32D, 0x1F335}, - {0x1F337, 0x1F37C}, {0x1F37E, 0x1F393}, {0x1F3A0, 0x1F3CA}, - {0x1F3CF, 0x1F3D3}, {0x1F3E0, 0x1F3F0}, {0x1F3F4, 0x1F3F4}, - {0x1F3F8, 0x1F43E}, {0x1F440, 0x1F440}, {0x1F442, 0x1F4FC}, - {0x1F4FF, 0x1F53D}, {0x1F54B, 0x1F54E}, {0x1F550, 0x1F567}, - {0x1F57A, 0x1F57A}, {0x1F595, 0x1F596}, {0x1F5A4, 0x1F5A4}, - {0x1F5FB, 0x1F64F}, {0x1F680, 0x1F6C5}, {0x1F6CC, 0x1F6CC}, - {0x1F6D0, 0x1F6D2}, {0x1F6EB, 0x1F6EC}, {0x1F6F4, 0x1F6F6}, - {0x1F910, 0x1F91E}, {0x1F920, 0x1F927}, {0x1F930, 0x1F930}, - {0x1F933, 0x1F93E}, {0x1F940, 0x1F94B}, {0x1F950, 0x1F95E}, - {0x1F980, 0x1F991}, {0x1F9C0, 0x1F9C0}, {0x20000, 0x2FFFD}, - {0x30000, 0x3FFFD}, -} - -var ambiguous = table{ - {0x00A1, 0x00A1}, {0x00A4, 0x00A4}, {0x00A7, 0x00A8}, - {0x00AA, 0x00AA}, {0x00AD, 0x00AE}, {0x00B0, 0x00B4}, - {0x00B6, 0x00BA}, {0x00BC, 0x00BF}, {0x00C6, 0x00C6}, - {0x00D0, 0x00D0}, {0x00D7, 0x00D8}, {0x00DE, 0x00E1}, - {0x00E6, 0x00E6}, {0x00E8, 0x00EA}, {0x00EC, 0x00ED}, - {0x00F0, 0x00F0}, {0x00F2, 0x00F3}, {0x00F7, 0x00FA}, - {0x00FC, 0x00FC}, {0x00FE, 0x00FE}, {0x0101, 0x0101}, - {0x0111, 0x0111}, {0x0113, 0x0113}, {0x011B, 0x011B}, - {0x0126, 0x0127}, {0x012B, 0x012B}, {0x0131, 0x0133}, - {0x0138, 0x0138}, {0x013F, 0x0142}, {0x0144, 0x0144}, - {0x0148, 0x014B}, {0x014D, 0x014D}, {0x0152, 0x0153}, - {0x0166, 0x0167}, {0x016B, 0x016B}, {0x01CE, 0x01CE}, - {0x01D0, 0x01D0}, {0x01D2, 0x01D2}, {0x01D4, 0x01D4}, - {0x01D6, 0x01D6}, {0x01D8, 0x01D8}, {0x01DA, 0x01DA}, - {0x01DC, 0x01DC}, {0x0251, 0x0251}, {0x0261, 0x0261}, - {0x02C4, 0x02C4}, {0x02C7, 0x02C7}, {0x02C9, 0x02CB}, - {0x02CD, 0x02CD}, {0x02D0, 0x02D0}, {0x02D8, 0x02DB}, - {0x02DD, 0x02DD}, {0x02DF, 0x02DF}, {0x0300, 0x036F}, - {0x0391, 0x03A1}, {0x03A3, 0x03A9}, {0x03B1, 0x03C1}, - {0x03C3, 0x03C9}, {0x0401, 0x0401}, {0x0410, 0x044F}, - {0x0451, 0x0451}, {0x2010, 0x2010}, {0x2013, 0x2016}, - {0x2018, 0x2019}, {0x201C, 0x201D}, {0x2020, 0x2022}, - {0x2024, 0x2027}, {0x2030, 0x2030}, {0x2032, 0x2033}, - {0x2035, 0x2035}, {0x203B, 0x203B}, {0x203E, 0x203E}, - {0x2074, 0x2074}, {0x207F, 0x207F}, {0x2081, 0x2084}, - {0x20AC, 0x20AC}, {0x2103, 0x2103}, {0x2105, 0x2105}, - {0x2109, 0x2109}, {0x2113, 0x2113}, {0x2116, 0x2116}, - {0x2121, 0x2122}, {0x2126, 0x2126}, {0x212B, 0x212B}, - {0x2153, 0x2154}, {0x215B, 0x215E}, {0x2160, 0x216B}, - {0x2170, 0x2179}, {0x2189, 0x2189}, {0x2190, 0x2199}, - {0x21B8, 0x21B9}, {0x21D2, 0x21D2}, {0x21D4, 0x21D4}, - {0x21E7, 0x21E7}, {0x2200, 0x2200}, {0x2202, 0x2203}, - {0x2207, 0x2208}, {0x220B, 0x220B}, {0x220F, 0x220F}, - {0x2211, 0x2211}, {0x2215, 0x2215}, {0x221A, 0x221A}, - {0x221D, 0x2220}, {0x2223, 0x2223}, {0x2225, 0x2225}, - {0x2227, 0x222C}, {0x222E, 0x222E}, {0x2234, 0x2237}, - {0x223C, 0x223D}, {0x2248, 0x2248}, {0x224C, 0x224C}, - {0x2252, 0x2252}, {0x2260, 0x2261}, {0x2264, 0x2267}, - {0x226A, 0x226B}, {0x226E, 0x226F}, {0x2282, 0x2283}, - {0x2286, 0x2287}, {0x2295, 0x2295}, {0x2299, 0x2299}, - {0x22A5, 0x22A5}, {0x22BF, 0x22BF}, {0x2312, 0x2312}, - {0x2460, 0x24E9}, {0x24EB, 0x254B}, {0x2550, 0x2573}, - {0x2580, 0x258F}, {0x2592, 0x2595}, {0x25A0, 0x25A1}, - {0x25A3, 0x25A9}, {0x25B2, 0x25B3}, {0x25B6, 0x25B7}, - {0x25BC, 0x25BD}, {0x25C0, 0x25C1}, {0x25C6, 0x25C8}, - {0x25CB, 0x25CB}, {0x25CE, 0x25D1}, {0x25E2, 0x25E5}, - {0x25EF, 0x25EF}, {0x2605, 0x2606}, {0x2609, 0x2609}, - {0x260E, 0x260F}, {0x261C, 0x261C}, {0x261E, 0x261E}, - {0x2640, 0x2640}, {0x2642, 0x2642}, {0x2660, 0x2661}, - {0x2663, 0x2665}, {0x2667, 0x266A}, {0x266C, 0x266D}, - {0x266F, 0x266F}, {0x269E, 0x269F}, {0x26BF, 0x26BF}, - {0x26C6, 0x26CD}, {0x26CF, 0x26D3}, {0x26D5, 0x26E1}, - {0x26E3, 0x26E3}, {0x26E8, 0x26E9}, {0x26EB, 0x26F1}, - {0x26F4, 0x26F4}, {0x26F6, 0x26F9}, {0x26FB, 0x26FC}, - {0x26FE, 0x26FF}, {0x273D, 0x273D}, {0x2776, 0x277F}, - {0x2B56, 0x2B59}, {0x3248, 0x324F}, {0xE000, 0xF8FF}, - {0xFE00, 0xFE0F}, {0xFFFD, 0xFFFD}, {0x1F100, 0x1F10A}, - {0x1F110, 0x1F12D}, {0x1F130, 0x1F169}, {0x1F170, 0x1F18D}, - {0x1F18F, 0x1F190}, {0x1F19B, 0x1F1AC}, {0xE0100, 0xE01EF}, - {0xF0000, 0xFFFFD}, {0x100000, 0x10FFFD}, -} - -var emoji = table{ - {0x1F1E6, 0x1F1FF}, {0x1F321, 0x1F321}, {0x1F324, 0x1F32C}, - {0x1F336, 0x1F336}, {0x1F37D, 0x1F37D}, {0x1F396, 0x1F397}, - {0x1F399, 0x1F39B}, {0x1F39E, 0x1F39F}, {0x1F3CB, 0x1F3CE}, - {0x1F3D4, 0x1F3DF}, {0x1F3F3, 0x1F3F5}, {0x1F3F7, 0x1F3F7}, - {0x1F43F, 0x1F43F}, {0x1F441, 0x1F441}, {0x1F4FD, 0x1F4FD}, - {0x1F549, 0x1F54A}, {0x1F56F, 0x1F570}, {0x1F573, 0x1F579}, - {0x1F587, 0x1F587}, {0x1F58A, 0x1F58D}, {0x1F590, 0x1F590}, - {0x1F5A5, 0x1F5A5}, {0x1F5A8, 0x1F5A8}, {0x1F5B1, 0x1F5B2}, - {0x1F5BC, 0x1F5BC}, {0x1F5C2, 0x1F5C4}, {0x1F5D1, 0x1F5D3}, - {0x1F5DC, 0x1F5DE}, {0x1F5E1, 0x1F5E1}, {0x1F5E3, 0x1F5E3}, - {0x1F5E8, 0x1F5E8}, {0x1F5EF, 0x1F5EF}, {0x1F5F3, 0x1F5F3}, - {0x1F5FA, 0x1F5FA}, {0x1F6CB, 0x1F6CF}, {0x1F6E0, 0x1F6E5}, - {0x1F6E9, 0x1F6E9}, {0x1F6F0, 0x1F6F0}, {0x1F6F3, 0x1F6F3}, -} - -var notassigned = table{ - {0x0378, 0x0379}, {0x0380, 0x0383}, {0x038B, 0x038B}, - {0x038D, 0x038D}, {0x03A2, 0x03A2}, {0x0530, 0x0530}, - {0x0557, 0x0558}, {0x0560, 0x0560}, {0x0588, 0x0588}, - {0x058B, 0x058C}, {0x0590, 0x0590}, {0x05C8, 0x05CF}, - {0x05EB, 0x05EF}, {0x05F5, 0x05FF}, {0x061D, 0x061D}, - {0x070E, 0x070E}, {0x074B, 0x074C}, {0x07B2, 0x07BF}, - {0x07FB, 0x07FF}, {0x082E, 0x082F}, {0x083F, 0x083F}, - {0x085C, 0x085D}, {0x085F, 0x089F}, {0x08B5, 0x08B5}, - {0x08BE, 0x08D3}, {0x0984, 0x0984}, {0x098D, 0x098E}, - {0x0991, 0x0992}, {0x09A9, 0x09A9}, {0x09B1, 0x09B1}, - {0x09B3, 0x09B5}, {0x09BA, 0x09BB}, {0x09C5, 0x09C6}, - {0x09C9, 0x09CA}, {0x09CF, 0x09D6}, {0x09D8, 0x09DB}, - {0x09DE, 0x09DE}, {0x09E4, 0x09E5}, {0x09FC, 0x0A00}, - {0x0A04, 0x0A04}, {0x0A0B, 0x0A0E}, {0x0A11, 0x0A12}, - {0x0A29, 0x0A29}, {0x0A31, 0x0A31}, {0x0A34, 0x0A34}, - {0x0A37, 0x0A37}, {0x0A3A, 0x0A3B}, {0x0A3D, 0x0A3D}, - {0x0A43, 0x0A46}, {0x0A49, 0x0A4A}, {0x0A4E, 0x0A50}, - {0x0A52, 0x0A58}, {0x0A5D, 0x0A5D}, {0x0A5F, 0x0A65}, - {0x0A76, 0x0A80}, {0x0A84, 0x0A84}, {0x0A8E, 0x0A8E}, - {0x0A92, 0x0A92}, {0x0AA9, 0x0AA9}, {0x0AB1, 0x0AB1}, - {0x0AB4, 0x0AB4}, {0x0ABA, 0x0ABB}, {0x0AC6, 0x0AC6}, - {0x0ACA, 0x0ACA}, {0x0ACE, 0x0ACF}, {0x0AD1, 0x0ADF}, - {0x0AE4, 0x0AE5}, {0x0AF2, 0x0AF8}, {0x0AFA, 0x0B00}, - {0x0B04, 0x0B04}, {0x0B0D, 0x0B0E}, {0x0B11, 0x0B12}, - {0x0B29, 0x0B29}, {0x0B31, 0x0B31}, {0x0B34, 0x0B34}, - {0x0B3A, 0x0B3B}, {0x0B45, 0x0B46}, {0x0B49, 0x0B4A}, - {0x0B4E, 0x0B55}, {0x0B58, 0x0B5B}, {0x0B5E, 0x0B5E}, - {0x0B64, 0x0B65}, {0x0B78, 0x0B81}, {0x0B84, 0x0B84}, - {0x0B8B, 0x0B8D}, {0x0B91, 0x0B91}, {0x0B96, 0x0B98}, - {0x0B9B, 0x0B9B}, {0x0B9D, 0x0B9D}, {0x0BA0, 0x0BA2}, - {0x0BA5, 0x0BA7}, {0x0BAB, 0x0BAD}, {0x0BBA, 0x0BBD}, - {0x0BC3, 0x0BC5}, {0x0BC9, 0x0BC9}, {0x0BCE, 0x0BCF}, - {0x0BD1, 0x0BD6}, {0x0BD8, 0x0BE5}, {0x0BFB, 0x0BFF}, - {0x0C04, 0x0C04}, {0x0C0D, 0x0C0D}, {0x0C11, 0x0C11}, - {0x0C29, 0x0C29}, {0x0C3A, 0x0C3C}, {0x0C45, 0x0C45}, - {0x0C49, 0x0C49}, {0x0C4E, 0x0C54}, {0x0C57, 0x0C57}, - {0x0C5B, 0x0C5F}, {0x0C64, 0x0C65}, {0x0C70, 0x0C77}, - {0x0C84, 0x0C84}, {0x0C8D, 0x0C8D}, {0x0C91, 0x0C91}, - {0x0CA9, 0x0CA9}, {0x0CB4, 0x0CB4}, {0x0CBA, 0x0CBB}, - {0x0CC5, 0x0CC5}, {0x0CC9, 0x0CC9}, {0x0CCE, 0x0CD4}, - {0x0CD7, 0x0CDD}, {0x0CDF, 0x0CDF}, {0x0CE4, 0x0CE5}, - {0x0CF0, 0x0CF0}, {0x0CF3, 0x0D00}, {0x0D04, 0x0D04}, - {0x0D0D, 0x0D0D}, {0x0D11, 0x0D11}, {0x0D3B, 0x0D3C}, - {0x0D45, 0x0D45}, {0x0D49, 0x0D49}, {0x0D50, 0x0D53}, - {0x0D64, 0x0D65}, {0x0D80, 0x0D81}, {0x0D84, 0x0D84}, - {0x0D97, 0x0D99}, {0x0DB2, 0x0DB2}, {0x0DBC, 0x0DBC}, - {0x0DBE, 0x0DBF}, {0x0DC7, 0x0DC9}, {0x0DCB, 0x0DCE}, - {0x0DD5, 0x0DD5}, {0x0DD7, 0x0DD7}, {0x0DE0, 0x0DE5}, - {0x0DF0, 0x0DF1}, {0x0DF5, 0x0E00}, {0x0E3B, 0x0E3E}, - {0x0E5C, 0x0E80}, {0x0E83, 0x0E83}, {0x0E85, 0x0E86}, - {0x0E89, 0x0E89}, {0x0E8B, 0x0E8C}, {0x0E8E, 0x0E93}, - {0x0E98, 0x0E98}, {0x0EA0, 0x0EA0}, {0x0EA4, 0x0EA4}, - {0x0EA6, 0x0EA6}, {0x0EA8, 0x0EA9}, {0x0EAC, 0x0EAC}, - {0x0EBA, 0x0EBA}, {0x0EBE, 0x0EBF}, {0x0EC5, 0x0EC5}, - {0x0EC7, 0x0EC7}, {0x0ECE, 0x0ECF}, {0x0EDA, 0x0EDB}, - {0x0EE0, 0x0EFF}, {0x0F48, 0x0F48}, {0x0F6D, 0x0F70}, - {0x0F98, 0x0F98}, {0x0FBD, 0x0FBD}, {0x0FCD, 0x0FCD}, - {0x0FDB, 0x0FFF}, {0x10C6, 0x10C6}, {0x10C8, 0x10CC}, - {0x10CE, 0x10CF}, {0x1249, 0x1249}, {0x124E, 0x124F}, - {0x1257, 0x1257}, {0x1259, 0x1259}, {0x125E, 0x125F}, - {0x1289, 0x1289}, {0x128E, 0x128F}, {0x12B1, 0x12B1}, - {0x12B6, 0x12B7}, {0x12BF, 0x12BF}, {0x12C1, 0x12C1}, - {0x12C6, 0x12C7}, {0x12D7, 0x12D7}, {0x1311, 0x1311}, - {0x1316, 0x1317}, {0x135B, 0x135C}, {0x137D, 0x137F}, - {0x139A, 0x139F}, {0x13F6, 0x13F7}, {0x13FE, 0x13FF}, - {0x169D, 0x169F}, {0x16F9, 0x16FF}, {0x170D, 0x170D}, - {0x1715, 0x171F}, {0x1737, 0x173F}, {0x1754, 0x175F}, - {0x176D, 0x176D}, {0x1771, 0x1771}, {0x1774, 0x177F}, - {0x17DE, 0x17DF}, {0x17EA, 0x17EF}, {0x17FA, 0x17FF}, - {0x180F, 0x180F}, {0x181A, 0x181F}, {0x1878, 0x187F}, - {0x18AB, 0x18AF}, {0x18F6, 0x18FF}, {0x191F, 0x191F}, - {0x192C, 0x192F}, {0x193C, 0x193F}, {0x1941, 0x1943}, - {0x196E, 0x196F}, {0x1975, 0x197F}, {0x19AC, 0x19AF}, - {0x19CA, 0x19CF}, {0x19DB, 0x19DD}, {0x1A1C, 0x1A1D}, - {0x1A5F, 0x1A5F}, {0x1A7D, 0x1A7E}, {0x1A8A, 0x1A8F}, - {0x1A9A, 0x1A9F}, {0x1AAE, 0x1AAF}, {0x1ABF, 0x1AFF}, - {0x1B4C, 0x1B4F}, {0x1B7D, 0x1B7F}, {0x1BF4, 0x1BFB}, - {0x1C38, 0x1C3A}, {0x1C4A, 0x1C4C}, {0x1C89, 0x1CBF}, - {0x1CC8, 0x1CCF}, {0x1CF7, 0x1CF7}, {0x1CFA, 0x1CFF}, - {0x1DF6, 0x1DFA}, {0x1F16, 0x1F17}, {0x1F1E, 0x1F1F}, - {0x1F46, 0x1F47}, {0x1F4E, 0x1F4F}, {0x1F58, 0x1F58}, - {0x1F5A, 0x1F5A}, {0x1F5C, 0x1F5C}, {0x1F5E, 0x1F5E}, - {0x1F7E, 0x1F7F}, {0x1FB5, 0x1FB5}, {0x1FC5, 0x1FC5}, - {0x1FD4, 0x1FD5}, {0x1FDC, 0x1FDC}, {0x1FF0, 0x1FF1}, - {0x1FF5, 0x1FF5}, {0x1FFF, 0x1FFF}, {0x2065, 0x2065}, - {0x2072, 0x2073}, {0x208F, 0x208F}, {0x209D, 0x209F}, - {0x20BF, 0x20CF}, {0x20F1, 0x20FF}, {0x218C, 0x218F}, - {0x23FF, 0x23FF}, {0x2427, 0x243F}, {0x244B, 0x245F}, - {0x2B74, 0x2B75}, {0x2B96, 0x2B97}, {0x2BBA, 0x2BBC}, - {0x2BC9, 0x2BC9}, {0x2BD2, 0x2BEB}, {0x2BF0, 0x2BFF}, - {0x2C2F, 0x2C2F}, {0x2C5F, 0x2C5F}, {0x2CF4, 0x2CF8}, - {0x2D26, 0x2D26}, {0x2D28, 0x2D2C}, {0x2D2E, 0x2D2F}, - {0x2D68, 0x2D6E}, {0x2D71, 0x2D7E}, {0x2D97, 0x2D9F}, - {0x2DA7, 0x2DA7}, {0x2DAF, 0x2DAF}, {0x2DB7, 0x2DB7}, - {0x2DBF, 0x2DBF}, {0x2DC7, 0x2DC7}, {0x2DCF, 0x2DCF}, - {0x2DD7, 0x2DD7}, {0x2DDF, 0x2DDF}, {0x2E45, 0x2E7F}, - {0x2E9A, 0x2E9A}, {0x2EF4, 0x2EFF}, {0x2FD6, 0x2FEF}, - {0x2FFC, 0x2FFF}, {0x3040, 0x3040}, {0x3097, 0x3098}, - {0x3100, 0x3104}, {0x312E, 0x3130}, {0x318F, 0x318F}, - {0x31BB, 0x31BF}, {0x31E4, 0x31EF}, {0x321F, 0x321F}, - {0x32FF, 0x32FF}, {0x4DB6, 0x4DBF}, {0x9FD6, 0x9FFF}, - {0xA48D, 0xA48F}, {0xA4C7, 0xA4CF}, {0xA62C, 0xA63F}, - {0xA6F8, 0xA6FF}, {0xA7AF, 0xA7AF}, {0xA7B8, 0xA7F6}, - {0xA82C, 0xA82F}, {0xA83A, 0xA83F}, {0xA878, 0xA87F}, - {0xA8C6, 0xA8CD}, {0xA8DA, 0xA8DF}, {0xA8FE, 0xA8FF}, - {0xA954, 0xA95E}, {0xA97D, 0xA97F}, {0xA9CE, 0xA9CE}, - {0xA9DA, 0xA9DD}, {0xA9FF, 0xA9FF}, {0xAA37, 0xAA3F}, - {0xAA4E, 0xAA4F}, {0xAA5A, 0xAA5B}, {0xAAC3, 0xAADA}, - {0xAAF7, 0xAB00}, {0xAB07, 0xAB08}, {0xAB0F, 0xAB10}, - {0xAB17, 0xAB1F}, {0xAB27, 0xAB27}, {0xAB2F, 0xAB2F}, - {0xAB66, 0xAB6F}, {0xABEE, 0xABEF}, {0xABFA, 0xABFF}, - {0xD7A4, 0xD7AF}, {0xD7C7, 0xD7CA}, {0xD7FC, 0xD7FF}, - {0xFA6E, 0xFA6F}, {0xFADA, 0xFAFF}, {0xFB07, 0xFB12}, - {0xFB18, 0xFB1C}, {0xFB37, 0xFB37}, {0xFB3D, 0xFB3D}, - {0xFB3F, 0xFB3F}, {0xFB42, 0xFB42}, {0xFB45, 0xFB45}, - {0xFBC2, 0xFBD2}, {0xFD40, 0xFD4F}, {0xFD90, 0xFD91}, - {0xFDC8, 0xFDEF}, {0xFDFE, 0xFDFF}, {0xFE1A, 0xFE1F}, - {0xFE53, 0xFE53}, {0xFE67, 0xFE67}, {0xFE6C, 0xFE6F}, - {0xFE75, 0xFE75}, {0xFEFD, 0xFEFE}, {0xFF00, 0xFF00}, - {0xFFBF, 0xFFC1}, {0xFFC8, 0xFFC9}, {0xFFD0, 0xFFD1}, - {0xFFD8, 0xFFD9}, {0xFFDD, 0xFFDF}, {0xFFE7, 0xFFE7}, - {0xFFEF, 0xFFF8}, {0xFFFE, 0xFFFF}, {0x1000C, 0x1000C}, - {0x10027, 0x10027}, {0x1003B, 0x1003B}, {0x1003E, 0x1003E}, - {0x1004E, 0x1004F}, {0x1005E, 0x1007F}, {0x100FB, 0x100FF}, - {0x10103, 0x10106}, {0x10134, 0x10136}, {0x1018F, 0x1018F}, - {0x1019C, 0x1019F}, {0x101A1, 0x101CF}, {0x101FE, 0x1027F}, - {0x1029D, 0x1029F}, {0x102D1, 0x102DF}, {0x102FC, 0x102FF}, - {0x10324, 0x1032F}, {0x1034B, 0x1034F}, {0x1037B, 0x1037F}, - {0x1039E, 0x1039E}, {0x103C4, 0x103C7}, {0x103D6, 0x103FF}, - {0x1049E, 0x1049F}, {0x104AA, 0x104AF}, {0x104D4, 0x104D7}, - {0x104FC, 0x104FF}, {0x10528, 0x1052F}, {0x10564, 0x1056E}, - {0x10570, 0x105FF}, {0x10737, 0x1073F}, {0x10756, 0x1075F}, - {0x10768, 0x107FF}, {0x10806, 0x10807}, {0x10809, 0x10809}, - {0x10836, 0x10836}, {0x10839, 0x1083B}, {0x1083D, 0x1083E}, - {0x10856, 0x10856}, {0x1089F, 0x108A6}, {0x108B0, 0x108DF}, - {0x108F3, 0x108F3}, {0x108F6, 0x108FA}, {0x1091C, 0x1091E}, - {0x1093A, 0x1093E}, {0x10940, 0x1097F}, {0x109B8, 0x109BB}, - {0x109D0, 0x109D1}, {0x10A04, 0x10A04}, {0x10A07, 0x10A0B}, - {0x10A14, 0x10A14}, {0x10A18, 0x10A18}, {0x10A34, 0x10A37}, - {0x10A3B, 0x10A3E}, {0x10A48, 0x10A4F}, {0x10A59, 0x10A5F}, - {0x10AA0, 0x10ABF}, {0x10AE7, 0x10AEA}, {0x10AF7, 0x10AFF}, - {0x10B36, 0x10B38}, {0x10B56, 0x10B57}, {0x10B73, 0x10B77}, - {0x10B92, 0x10B98}, {0x10B9D, 0x10BA8}, {0x10BB0, 0x10BFF}, - {0x10C49, 0x10C7F}, {0x10CB3, 0x10CBF}, {0x10CF3, 0x10CF9}, - {0x10D00, 0x10E5F}, {0x10E7F, 0x10FFF}, {0x1104E, 0x11051}, - {0x11070, 0x1107E}, {0x110C2, 0x110CF}, {0x110E9, 0x110EF}, - {0x110FA, 0x110FF}, {0x11135, 0x11135}, {0x11144, 0x1114F}, - {0x11177, 0x1117F}, {0x111CE, 0x111CF}, {0x111E0, 0x111E0}, - {0x111F5, 0x111FF}, {0x11212, 0x11212}, {0x1123F, 0x1127F}, - {0x11287, 0x11287}, {0x11289, 0x11289}, {0x1128E, 0x1128E}, - {0x1129E, 0x1129E}, {0x112AA, 0x112AF}, {0x112EB, 0x112EF}, - {0x112FA, 0x112FF}, {0x11304, 0x11304}, {0x1130D, 0x1130E}, - {0x11311, 0x11312}, {0x11329, 0x11329}, {0x11331, 0x11331}, - {0x11334, 0x11334}, {0x1133A, 0x1133B}, {0x11345, 0x11346}, - {0x11349, 0x1134A}, {0x1134E, 0x1134F}, {0x11351, 0x11356}, - {0x11358, 0x1135C}, {0x11364, 0x11365}, {0x1136D, 0x1136F}, - {0x11375, 0x113FF}, {0x1145A, 0x1145A}, {0x1145C, 0x1145C}, - {0x1145E, 0x1147F}, {0x114C8, 0x114CF}, {0x114DA, 0x1157F}, - {0x115B6, 0x115B7}, {0x115DE, 0x115FF}, {0x11645, 0x1164F}, - {0x1165A, 0x1165F}, {0x1166D, 0x1167F}, {0x116B8, 0x116BF}, - {0x116CA, 0x116FF}, {0x1171A, 0x1171C}, {0x1172C, 0x1172F}, - {0x11740, 0x1189F}, {0x118F3, 0x118FE}, {0x11900, 0x11ABF}, - {0x11AF9, 0x11BFF}, {0x11C09, 0x11C09}, {0x11C37, 0x11C37}, - {0x11C46, 0x11C4F}, {0x11C6D, 0x11C6F}, {0x11C90, 0x11C91}, - {0x11CA8, 0x11CA8}, {0x11CB7, 0x11FFF}, {0x1239A, 0x123FF}, - {0x1246F, 0x1246F}, {0x12475, 0x1247F}, {0x12544, 0x12FFF}, - {0x1342F, 0x143FF}, {0x14647, 0x167FF}, {0x16A39, 0x16A3F}, - {0x16A5F, 0x16A5F}, {0x16A6A, 0x16A6D}, {0x16A70, 0x16ACF}, - {0x16AEE, 0x16AEF}, {0x16AF6, 0x16AFF}, {0x16B46, 0x16B4F}, - {0x16B5A, 0x16B5A}, {0x16B62, 0x16B62}, {0x16B78, 0x16B7C}, - {0x16B90, 0x16EFF}, {0x16F45, 0x16F4F}, {0x16F7F, 0x16F8E}, - {0x16FA0, 0x16FDF}, {0x16FE1, 0x16FFF}, {0x187ED, 0x187FF}, - {0x18AF3, 0x1AFFF}, {0x1B002, 0x1BBFF}, {0x1BC6B, 0x1BC6F}, - {0x1BC7D, 0x1BC7F}, {0x1BC89, 0x1BC8F}, {0x1BC9A, 0x1BC9B}, - {0x1BCA4, 0x1CFFF}, {0x1D0F6, 0x1D0FF}, {0x1D127, 0x1D128}, - {0x1D1E9, 0x1D1FF}, {0x1D246, 0x1D2FF}, {0x1D357, 0x1D35F}, - {0x1D372, 0x1D3FF}, {0x1D455, 0x1D455}, {0x1D49D, 0x1D49D}, - {0x1D4A0, 0x1D4A1}, {0x1D4A3, 0x1D4A4}, {0x1D4A7, 0x1D4A8}, - {0x1D4AD, 0x1D4AD}, {0x1D4BA, 0x1D4BA}, {0x1D4BC, 0x1D4BC}, - {0x1D4C4, 0x1D4C4}, {0x1D506, 0x1D506}, {0x1D50B, 0x1D50C}, - {0x1D515, 0x1D515}, {0x1D51D, 0x1D51D}, {0x1D53A, 0x1D53A}, - {0x1D53F, 0x1D53F}, {0x1D545, 0x1D545}, {0x1D547, 0x1D549}, - {0x1D551, 0x1D551}, {0x1D6A6, 0x1D6A7}, {0x1D7CC, 0x1D7CD}, - {0x1DA8C, 0x1DA9A}, {0x1DAA0, 0x1DAA0}, {0x1DAB0, 0x1DFFF}, - {0x1E007, 0x1E007}, {0x1E019, 0x1E01A}, {0x1E022, 0x1E022}, - {0x1E025, 0x1E025}, {0x1E02B, 0x1E7FF}, {0x1E8C5, 0x1E8C6}, - {0x1E8D7, 0x1E8FF}, {0x1E94B, 0x1E94F}, {0x1E95A, 0x1E95D}, - {0x1E960, 0x1EDFF}, {0x1EE04, 0x1EE04}, {0x1EE20, 0x1EE20}, - {0x1EE23, 0x1EE23}, {0x1EE25, 0x1EE26}, {0x1EE28, 0x1EE28}, - {0x1EE33, 0x1EE33}, {0x1EE38, 0x1EE38}, {0x1EE3A, 0x1EE3A}, - {0x1EE3C, 0x1EE41}, {0x1EE43, 0x1EE46}, {0x1EE48, 0x1EE48}, - {0x1EE4A, 0x1EE4A}, {0x1EE4C, 0x1EE4C}, {0x1EE50, 0x1EE50}, - {0x1EE53, 0x1EE53}, {0x1EE55, 0x1EE56}, {0x1EE58, 0x1EE58}, - {0x1EE5A, 0x1EE5A}, {0x1EE5C, 0x1EE5C}, {0x1EE5E, 0x1EE5E}, - {0x1EE60, 0x1EE60}, {0x1EE63, 0x1EE63}, {0x1EE65, 0x1EE66}, - {0x1EE6B, 0x1EE6B}, {0x1EE73, 0x1EE73}, {0x1EE78, 0x1EE78}, - {0x1EE7D, 0x1EE7D}, {0x1EE7F, 0x1EE7F}, {0x1EE8A, 0x1EE8A}, - {0x1EE9C, 0x1EEA0}, {0x1EEA4, 0x1EEA4}, {0x1EEAA, 0x1EEAA}, - {0x1EEBC, 0x1EEEF}, {0x1EEF2, 0x1EFFF}, {0x1F02C, 0x1F02F}, - {0x1F094, 0x1F09F}, {0x1F0AF, 0x1F0B0}, {0x1F0C0, 0x1F0C0}, - {0x1F0D0, 0x1F0D0}, {0x1F0F6, 0x1F0FF}, {0x1F10D, 0x1F10F}, - {0x1F12F, 0x1F12F}, {0x1F16C, 0x1F16F}, {0x1F1AD, 0x1F1E5}, - {0x1F203, 0x1F20F}, {0x1F23C, 0x1F23F}, {0x1F249, 0x1F24F}, - {0x1F252, 0x1F2FF}, {0x1F6D3, 0x1F6DF}, {0x1F6ED, 0x1F6EF}, - {0x1F6F7, 0x1F6FF}, {0x1F774, 0x1F77F}, {0x1F7D5, 0x1F7FF}, - {0x1F80C, 0x1F80F}, {0x1F848, 0x1F84F}, {0x1F85A, 0x1F85F}, - {0x1F888, 0x1F88F}, {0x1F8AE, 0x1F90F}, {0x1F91F, 0x1F91F}, - {0x1F928, 0x1F92F}, {0x1F931, 0x1F932}, {0x1F93F, 0x1F93F}, - {0x1F94C, 0x1F94F}, {0x1F95F, 0x1F97F}, {0x1F992, 0x1F9BF}, - {0x1F9C1, 0x1FFFF}, {0x2A6D7, 0x2A6FF}, {0x2B735, 0x2B73F}, - {0x2B81E, 0x2B81F}, {0x2CEA2, 0x2F7FF}, {0x2FA1E, 0xE0000}, - {0xE0002, 0xE001F}, {0xE0080, 0xE00FF}, {0xE01F0, 0xEFFFF}, - {0xFFFFE, 0xFFFFF}, -} - -var neutral = table{ - {0x0000, 0x001F}, {0x007F, 0x007F}, {0x0080, 0x009F}, - {0x00A0, 0x00A0}, {0x00A9, 0x00A9}, {0x00AB, 0x00AB}, - {0x00B5, 0x00B5}, {0x00BB, 0x00BB}, {0x00C0, 0x00C5}, - {0x00C7, 0x00CF}, {0x00D1, 0x00D6}, {0x00D9, 0x00DD}, - {0x00E2, 0x00E5}, {0x00E7, 0x00E7}, {0x00EB, 0x00EB}, - {0x00EE, 0x00EF}, {0x00F1, 0x00F1}, {0x00F4, 0x00F6}, - {0x00FB, 0x00FB}, {0x00FD, 0x00FD}, {0x00FF, 0x00FF}, - {0x0100, 0x0100}, {0x0102, 0x0110}, {0x0112, 0x0112}, - {0x0114, 0x011A}, {0x011C, 0x0125}, {0x0128, 0x012A}, - {0x012C, 0x0130}, {0x0134, 0x0137}, {0x0139, 0x013E}, - {0x0143, 0x0143}, {0x0145, 0x0147}, {0x014C, 0x014C}, - {0x014E, 0x0151}, {0x0154, 0x0165}, {0x0168, 0x016A}, - {0x016C, 0x017F}, {0x0180, 0x01BA}, {0x01BB, 0x01BB}, - {0x01BC, 0x01BF}, {0x01C0, 0x01C3}, {0x01C4, 0x01CD}, - {0x01CF, 0x01CF}, {0x01D1, 0x01D1}, {0x01D3, 0x01D3}, - {0x01D5, 0x01D5}, {0x01D7, 0x01D7}, {0x01D9, 0x01D9}, - {0x01DB, 0x01DB}, {0x01DD, 0x024F}, {0x0250, 0x0250}, - {0x0252, 0x0260}, {0x0262, 0x0293}, {0x0294, 0x0294}, - {0x0295, 0x02AF}, {0x02B0, 0x02C1}, {0x02C2, 0x02C3}, - {0x02C5, 0x02C5}, {0x02C6, 0x02C6}, {0x02C8, 0x02C8}, - {0x02CC, 0x02CC}, {0x02CE, 0x02CF}, {0x02D1, 0x02D1}, - {0x02D2, 0x02D7}, {0x02DC, 0x02DC}, {0x02DE, 0x02DE}, - {0x02E0, 0x02E4}, {0x02E5, 0x02EB}, {0x02EC, 0x02EC}, - {0x02ED, 0x02ED}, {0x02EE, 0x02EE}, {0x02EF, 0x02FF}, - {0x0370, 0x0373}, {0x0374, 0x0374}, {0x0375, 0x0375}, - {0x0376, 0x0377}, {0x037A, 0x037A}, {0x037B, 0x037D}, - {0x037E, 0x037E}, {0x037F, 0x037F}, {0x0384, 0x0385}, - {0x0386, 0x0386}, {0x0387, 0x0387}, {0x0388, 0x038A}, - {0x038C, 0x038C}, {0x038E, 0x0390}, {0x03AA, 0x03B0}, - {0x03C2, 0x03C2}, {0x03CA, 0x03F5}, {0x03F6, 0x03F6}, - {0x03F7, 0x03FF}, {0x0400, 0x0400}, {0x0402, 0x040F}, - {0x0450, 0x0450}, {0x0452, 0x0481}, {0x0482, 0x0482}, - {0x0483, 0x0487}, {0x0488, 0x0489}, {0x048A, 0x04FF}, - {0x0500, 0x052F}, {0x0531, 0x0556}, {0x0559, 0x0559}, - {0x055A, 0x055F}, {0x0561, 0x0587}, {0x0589, 0x0589}, - {0x058A, 0x058A}, {0x058D, 0x058E}, {0x058F, 0x058F}, - {0x0591, 0x05BD}, {0x05BE, 0x05BE}, {0x05BF, 0x05BF}, - {0x05C0, 0x05C0}, {0x05C1, 0x05C2}, {0x05C3, 0x05C3}, - {0x05C4, 0x05C5}, {0x05C6, 0x05C6}, {0x05C7, 0x05C7}, - {0x05D0, 0x05EA}, {0x05F0, 0x05F2}, {0x05F3, 0x05F4}, - {0x0600, 0x0605}, {0x0606, 0x0608}, {0x0609, 0x060A}, - {0x060B, 0x060B}, {0x060C, 0x060D}, {0x060E, 0x060F}, - {0x0610, 0x061A}, {0x061B, 0x061B}, {0x061C, 0x061C}, - {0x061E, 0x061F}, {0x0620, 0x063F}, {0x0640, 0x0640}, - {0x0641, 0x064A}, {0x064B, 0x065F}, {0x0660, 0x0669}, - {0x066A, 0x066D}, {0x066E, 0x066F}, {0x0670, 0x0670}, - {0x0671, 0x06D3}, {0x06D4, 0x06D4}, {0x06D5, 0x06D5}, - {0x06D6, 0x06DC}, {0x06DD, 0x06DD}, {0x06DE, 0x06DE}, - {0x06DF, 0x06E4}, {0x06E5, 0x06E6}, {0x06E7, 0x06E8}, - {0x06E9, 0x06E9}, {0x06EA, 0x06ED}, {0x06EE, 0x06EF}, - {0x06F0, 0x06F9}, {0x06FA, 0x06FC}, {0x06FD, 0x06FE}, - {0x06FF, 0x06FF}, {0x0700, 0x070D}, {0x070F, 0x070F}, - {0x0710, 0x0710}, {0x0711, 0x0711}, {0x0712, 0x072F}, - {0x0730, 0x074A}, {0x074D, 0x074F}, {0x0750, 0x077F}, - {0x0780, 0x07A5}, {0x07A6, 0x07B0}, {0x07B1, 0x07B1}, - {0x07C0, 0x07C9}, {0x07CA, 0x07EA}, {0x07EB, 0x07F3}, - {0x07F4, 0x07F5}, {0x07F6, 0x07F6}, {0x07F7, 0x07F9}, - {0x07FA, 0x07FA}, {0x0800, 0x0815}, {0x0816, 0x0819}, - {0x081A, 0x081A}, {0x081B, 0x0823}, {0x0824, 0x0824}, - {0x0825, 0x0827}, {0x0828, 0x0828}, {0x0829, 0x082D}, - {0x0830, 0x083E}, {0x0840, 0x0858}, {0x0859, 0x085B}, - {0x085E, 0x085E}, {0x08A0, 0x08B4}, {0x08B6, 0x08BD}, - {0x08D4, 0x08E1}, {0x08E2, 0x08E2}, {0x08E3, 0x08FF}, - {0x0900, 0x0902}, {0x0903, 0x0903}, {0x0904, 0x0939}, - {0x093A, 0x093A}, {0x093B, 0x093B}, {0x093C, 0x093C}, - {0x093D, 0x093D}, {0x093E, 0x0940}, {0x0941, 0x0948}, - {0x0949, 0x094C}, {0x094D, 0x094D}, {0x094E, 0x094F}, - {0x0950, 0x0950}, {0x0951, 0x0957}, {0x0958, 0x0961}, - {0x0962, 0x0963}, {0x0964, 0x0965}, {0x0966, 0x096F}, - {0x0970, 0x0970}, {0x0971, 0x0971}, {0x0972, 0x097F}, - {0x0980, 0x0980}, {0x0981, 0x0981}, {0x0982, 0x0983}, - {0x0985, 0x098C}, {0x098F, 0x0990}, {0x0993, 0x09A8}, - {0x09AA, 0x09B0}, {0x09B2, 0x09B2}, {0x09B6, 0x09B9}, - {0x09BC, 0x09BC}, {0x09BD, 0x09BD}, {0x09BE, 0x09C0}, - {0x09C1, 0x09C4}, {0x09C7, 0x09C8}, {0x09CB, 0x09CC}, - {0x09CD, 0x09CD}, {0x09CE, 0x09CE}, {0x09D7, 0x09D7}, - {0x09DC, 0x09DD}, {0x09DF, 0x09E1}, {0x09E2, 0x09E3}, - {0x09E6, 0x09EF}, {0x09F0, 0x09F1}, {0x09F2, 0x09F3}, - {0x09F4, 0x09F9}, {0x09FA, 0x09FA}, {0x09FB, 0x09FB}, - {0x0A01, 0x0A02}, {0x0A03, 0x0A03}, {0x0A05, 0x0A0A}, - {0x0A0F, 0x0A10}, {0x0A13, 0x0A28}, {0x0A2A, 0x0A30}, - {0x0A32, 0x0A33}, {0x0A35, 0x0A36}, {0x0A38, 0x0A39}, - {0x0A3C, 0x0A3C}, {0x0A3E, 0x0A40}, {0x0A41, 0x0A42}, - {0x0A47, 0x0A48}, {0x0A4B, 0x0A4D}, {0x0A51, 0x0A51}, - {0x0A59, 0x0A5C}, {0x0A5E, 0x0A5E}, {0x0A66, 0x0A6F}, - {0x0A70, 0x0A71}, {0x0A72, 0x0A74}, {0x0A75, 0x0A75}, - {0x0A81, 0x0A82}, {0x0A83, 0x0A83}, {0x0A85, 0x0A8D}, - {0x0A8F, 0x0A91}, {0x0A93, 0x0AA8}, {0x0AAA, 0x0AB0}, - {0x0AB2, 0x0AB3}, {0x0AB5, 0x0AB9}, {0x0ABC, 0x0ABC}, - {0x0ABD, 0x0ABD}, {0x0ABE, 0x0AC0}, {0x0AC1, 0x0AC5}, - {0x0AC7, 0x0AC8}, {0x0AC9, 0x0AC9}, {0x0ACB, 0x0ACC}, - {0x0ACD, 0x0ACD}, {0x0AD0, 0x0AD0}, {0x0AE0, 0x0AE1}, - {0x0AE2, 0x0AE3}, {0x0AE6, 0x0AEF}, {0x0AF0, 0x0AF0}, - {0x0AF1, 0x0AF1}, {0x0AF9, 0x0AF9}, {0x0B01, 0x0B01}, - {0x0B02, 0x0B03}, {0x0B05, 0x0B0C}, {0x0B0F, 0x0B10}, - {0x0B13, 0x0B28}, {0x0B2A, 0x0B30}, {0x0B32, 0x0B33}, - {0x0B35, 0x0B39}, {0x0B3C, 0x0B3C}, {0x0B3D, 0x0B3D}, - {0x0B3E, 0x0B3E}, {0x0B3F, 0x0B3F}, {0x0B40, 0x0B40}, - {0x0B41, 0x0B44}, {0x0B47, 0x0B48}, {0x0B4B, 0x0B4C}, - {0x0B4D, 0x0B4D}, {0x0B56, 0x0B56}, {0x0B57, 0x0B57}, - {0x0B5C, 0x0B5D}, {0x0B5F, 0x0B61}, {0x0B62, 0x0B63}, - {0x0B66, 0x0B6F}, {0x0B70, 0x0B70}, {0x0B71, 0x0B71}, - {0x0B72, 0x0B77}, {0x0B82, 0x0B82}, {0x0B83, 0x0B83}, - {0x0B85, 0x0B8A}, {0x0B8E, 0x0B90}, {0x0B92, 0x0B95}, - {0x0B99, 0x0B9A}, {0x0B9C, 0x0B9C}, {0x0B9E, 0x0B9F}, - {0x0BA3, 0x0BA4}, {0x0BA8, 0x0BAA}, {0x0BAE, 0x0BB9}, - {0x0BBE, 0x0BBF}, {0x0BC0, 0x0BC0}, {0x0BC1, 0x0BC2}, - {0x0BC6, 0x0BC8}, {0x0BCA, 0x0BCC}, {0x0BCD, 0x0BCD}, - {0x0BD0, 0x0BD0}, {0x0BD7, 0x0BD7}, {0x0BE6, 0x0BEF}, - {0x0BF0, 0x0BF2}, {0x0BF3, 0x0BF8}, {0x0BF9, 0x0BF9}, - {0x0BFA, 0x0BFA}, {0x0C00, 0x0C00}, {0x0C01, 0x0C03}, - {0x0C05, 0x0C0C}, {0x0C0E, 0x0C10}, {0x0C12, 0x0C28}, - {0x0C2A, 0x0C39}, {0x0C3D, 0x0C3D}, {0x0C3E, 0x0C40}, - {0x0C41, 0x0C44}, {0x0C46, 0x0C48}, {0x0C4A, 0x0C4D}, - {0x0C55, 0x0C56}, {0x0C58, 0x0C5A}, {0x0C60, 0x0C61}, - {0x0C62, 0x0C63}, {0x0C66, 0x0C6F}, {0x0C78, 0x0C7E}, - {0x0C7F, 0x0C7F}, {0x0C80, 0x0C80}, {0x0C81, 0x0C81}, - {0x0C82, 0x0C83}, {0x0C85, 0x0C8C}, {0x0C8E, 0x0C90}, - {0x0C92, 0x0CA8}, {0x0CAA, 0x0CB3}, {0x0CB5, 0x0CB9}, - {0x0CBC, 0x0CBC}, {0x0CBD, 0x0CBD}, {0x0CBE, 0x0CBE}, - {0x0CBF, 0x0CBF}, {0x0CC0, 0x0CC4}, {0x0CC6, 0x0CC6}, - {0x0CC7, 0x0CC8}, {0x0CCA, 0x0CCB}, {0x0CCC, 0x0CCD}, - {0x0CD5, 0x0CD6}, {0x0CDE, 0x0CDE}, {0x0CE0, 0x0CE1}, - {0x0CE2, 0x0CE3}, {0x0CE6, 0x0CEF}, {0x0CF1, 0x0CF2}, - {0x0D01, 0x0D01}, {0x0D02, 0x0D03}, {0x0D05, 0x0D0C}, - {0x0D0E, 0x0D10}, {0x0D12, 0x0D3A}, {0x0D3D, 0x0D3D}, - {0x0D3E, 0x0D40}, {0x0D41, 0x0D44}, {0x0D46, 0x0D48}, - {0x0D4A, 0x0D4C}, {0x0D4D, 0x0D4D}, {0x0D4E, 0x0D4E}, - {0x0D4F, 0x0D4F}, {0x0D54, 0x0D56}, {0x0D57, 0x0D57}, - {0x0D58, 0x0D5E}, {0x0D5F, 0x0D61}, {0x0D62, 0x0D63}, - {0x0D66, 0x0D6F}, {0x0D70, 0x0D78}, {0x0D79, 0x0D79}, - {0x0D7A, 0x0D7F}, {0x0D82, 0x0D83}, {0x0D85, 0x0D96}, - {0x0D9A, 0x0DB1}, {0x0DB3, 0x0DBB}, {0x0DBD, 0x0DBD}, - {0x0DC0, 0x0DC6}, {0x0DCA, 0x0DCA}, {0x0DCF, 0x0DD1}, - {0x0DD2, 0x0DD4}, {0x0DD6, 0x0DD6}, {0x0DD8, 0x0DDF}, - {0x0DE6, 0x0DEF}, {0x0DF2, 0x0DF3}, {0x0DF4, 0x0DF4}, - {0x0E01, 0x0E30}, {0x0E31, 0x0E31}, {0x0E32, 0x0E33}, - {0x0E34, 0x0E3A}, {0x0E3F, 0x0E3F}, {0x0E40, 0x0E45}, - {0x0E46, 0x0E46}, {0x0E47, 0x0E4E}, {0x0E4F, 0x0E4F}, - {0x0E50, 0x0E59}, {0x0E5A, 0x0E5B}, {0x0E81, 0x0E82}, - {0x0E84, 0x0E84}, {0x0E87, 0x0E88}, {0x0E8A, 0x0E8A}, - {0x0E8D, 0x0E8D}, {0x0E94, 0x0E97}, {0x0E99, 0x0E9F}, - {0x0EA1, 0x0EA3}, {0x0EA5, 0x0EA5}, {0x0EA7, 0x0EA7}, - {0x0EAA, 0x0EAB}, {0x0EAD, 0x0EB0}, {0x0EB1, 0x0EB1}, - {0x0EB2, 0x0EB3}, {0x0EB4, 0x0EB9}, {0x0EBB, 0x0EBC}, - {0x0EBD, 0x0EBD}, {0x0EC0, 0x0EC4}, {0x0EC6, 0x0EC6}, - {0x0EC8, 0x0ECD}, {0x0ED0, 0x0ED9}, {0x0EDC, 0x0EDF}, - {0x0F00, 0x0F00}, {0x0F01, 0x0F03}, {0x0F04, 0x0F12}, - {0x0F13, 0x0F13}, {0x0F14, 0x0F14}, {0x0F15, 0x0F17}, - {0x0F18, 0x0F19}, {0x0F1A, 0x0F1F}, {0x0F20, 0x0F29}, - {0x0F2A, 0x0F33}, {0x0F34, 0x0F34}, {0x0F35, 0x0F35}, - {0x0F36, 0x0F36}, {0x0F37, 0x0F37}, {0x0F38, 0x0F38}, - {0x0F39, 0x0F39}, {0x0F3A, 0x0F3A}, {0x0F3B, 0x0F3B}, - {0x0F3C, 0x0F3C}, {0x0F3D, 0x0F3D}, {0x0F3E, 0x0F3F}, - {0x0F40, 0x0F47}, {0x0F49, 0x0F6C}, {0x0F71, 0x0F7E}, - {0x0F7F, 0x0F7F}, {0x0F80, 0x0F84}, {0x0F85, 0x0F85}, - {0x0F86, 0x0F87}, {0x0F88, 0x0F8C}, {0x0F8D, 0x0F97}, - {0x0F99, 0x0FBC}, {0x0FBE, 0x0FC5}, {0x0FC6, 0x0FC6}, - {0x0FC7, 0x0FCC}, {0x0FCE, 0x0FCF}, {0x0FD0, 0x0FD4}, - {0x0FD5, 0x0FD8}, {0x0FD9, 0x0FDA}, {0x1000, 0x102A}, - {0x102B, 0x102C}, {0x102D, 0x1030}, {0x1031, 0x1031}, - {0x1032, 0x1037}, {0x1038, 0x1038}, {0x1039, 0x103A}, - {0x103B, 0x103C}, {0x103D, 0x103E}, {0x103F, 0x103F}, - {0x1040, 0x1049}, {0x104A, 0x104F}, {0x1050, 0x1055}, - {0x1056, 0x1057}, {0x1058, 0x1059}, {0x105A, 0x105D}, - {0x105E, 0x1060}, {0x1061, 0x1061}, {0x1062, 0x1064}, - {0x1065, 0x1066}, {0x1067, 0x106D}, {0x106E, 0x1070}, - {0x1071, 0x1074}, {0x1075, 0x1081}, {0x1082, 0x1082}, - {0x1083, 0x1084}, {0x1085, 0x1086}, {0x1087, 0x108C}, - {0x108D, 0x108D}, {0x108E, 0x108E}, {0x108F, 0x108F}, - {0x1090, 0x1099}, {0x109A, 0x109C}, {0x109D, 0x109D}, - {0x109E, 0x109F}, {0x10A0, 0x10C5}, {0x10C7, 0x10C7}, - {0x10CD, 0x10CD}, {0x10D0, 0x10FA}, {0x10FB, 0x10FB}, - {0x10FC, 0x10FC}, {0x10FD, 0x10FF}, {0x1160, 0x11FF}, - {0x1200, 0x1248}, {0x124A, 0x124D}, {0x1250, 0x1256}, - {0x1258, 0x1258}, {0x125A, 0x125D}, {0x1260, 0x1288}, - {0x128A, 0x128D}, {0x1290, 0x12B0}, {0x12B2, 0x12B5}, - {0x12B8, 0x12BE}, {0x12C0, 0x12C0}, {0x12C2, 0x12C5}, - {0x12C8, 0x12D6}, {0x12D8, 0x1310}, {0x1312, 0x1315}, - {0x1318, 0x135A}, {0x135D, 0x135F}, {0x1360, 0x1368}, - {0x1369, 0x137C}, {0x1380, 0x138F}, {0x1390, 0x1399}, - {0x13A0, 0x13F5}, {0x13F8, 0x13FD}, {0x1400, 0x1400}, - {0x1401, 0x166C}, {0x166D, 0x166E}, {0x166F, 0x167F}, - {0x1680, 0x1680}, {0x1681, 0x169A}, {0x169B, 0x169B}, - {0x169C, 0x169C}, {0x16A0, 0x16EA}, {0x16EB, 0x16ED}, - {0x16EE, 0x16F0}, {0x16F1, 0x16F8}, {0x1700, 0x170C}, - {0x170E, 0x1711}, {0x1712, 0x1714}, {0x1720, 0x1731}, - {0x1732, 0x1734}, {0x1735, 0x1736}, {0x1740, 0x1751}, - {0x1752, 0x1753}, {0x1760, 0x176C}, {0x176E, 0x1770}, - {0x1772, 0x1773}, {0x1780, 0x17B3}, {0x17B4, 0x17B5}, - {0x17B6, 0x17B6}, {0x17B7, 0x17BD}, {0x17BE, 0x17C5}, - {0x17C6, 0x17C6}, {0x17C7, 0x17C8}, {0x17C9, 0x17D3}, - {0x17D4, 0x17D6}, {0x17D7, 0x17D7}, {0x17D8, 0x17DA}, - {0x17DB, 0x17DB}, {0x17DC, 0x17DC}, {0x17DD, 0x17DD}, - {0x17E0, 0x17E9}, {0x17F0, 0x17F9}, {0x1800, 0x1805}, - {0x1806, 0x1806}, {0x1807, 0x180A}, {0x180B, 0x180D}, - {0x180E, 0x180E}, {0x1810, 0x1819}, {0x1820, 0x1842}, - {0x1843, 0x1843}, {0x1844, 0x1877}, {0x1880, 0x1884}, - {0x1885, 0x1886}, {0x1887, 0x18A8}, {0x18A9, 0x18A9}, - {0x18AA, 0x18AA}, {0x18B0, 0x18F5}, {0x1900, 0x191E}, - {0x1920, 0x1922}, {0x1923, 0x1926}, {0x1927, 0x1928}, - {0x1929, 0x192B}, {0x1930, 0x1931}, {0x1932, 0x1932}, - {0x1933, 0x1938}, {0x1939, 0x193B}, {0x1940, 0x1940}, - {0x1944, 0x1945}, {0x1946, 0x194F}, {0x1950, 0x196D}, - {0x1970, 0x1974}, {0x1980, 0x19AB}, {0x19B0, 0x19C9}, - {0x19D0, 0x19D9}, {0x19DA, 0x19DA}, {0x19DE, 0x19DF}, - {0x19E0, 0x19FF}, {0x1A00, 0x1A16}, {0x1A17, 0x1A18}, - {0x1A19, 0x1A1A}, {0x1A1B, 0x1A1B}, {0x1A1E, 0x1A1F}, - {0x1A20, 0x1A54}, {0x1A55, 0x1A55}, {0x1A56, 0x1A56}, - {0x1A57, 0x1A57}, {0x1A58, 0x1A5E}, {0x1A60, 0x1A60}, - {0x1A61, 0x1A61}, {0x1A62, 0x1A62}, {0x1A63, 0x1A64}, - {0x1A65, 0x1A6C}, {0x1A6D, 0x1A72}, {0x1A73, 0x1A7C}, - {0x1A7F, 0x1A7F}, {0x1A80, 0x1A89}, {0x1A90, 0x1A99}, - {0x1AA0, 0x1AA6}, {0x1AA7, 0x1AA7}, {0x1AA8, 0x1AAD}, - {0x1AB0, 0x1ABD}, {0x1ABE, 0x1ABE}, {0x1B00, 0x1B03}, - {0x1B04, 0x1B04}, {0x1B05, 0x1B33}, {0x1B34, 0x1B34}, - {0x1B35, 0x1B35}, {0x1B36, 0x1B3A}, {0x1B3B, 0x1B3B}, - {0x1B3C, 0x1B3C}, {0x1B3D, 0x1B41}, {0x1B42, 0x1B42}, - {0x1B43, 0x1B44}, {0x1B45, 0x1B4B}, {0x1B50, 0x1B59}, - {0x1B5A, 0x1B60}, {0x1B61, 0x1B6A}, {0x1B6B, 0x1B73}, - {0x1B74, 0x1B7C}, {0x1B80, 0x1B81}, {0x1B82, 0x1B82}, - {0x1B83, 0x1BA0}, {0x1BA1, 0x1BA1}, {0x1BA2, 0x1BA5}, - {0x1BA6, 0x1BA7}, {0x1BA8, 0x1BA9}, {0x1BAA, 0x1BAA}, - {0x1BAB, 0x1BAD}, {0x1BAE, 0x1BAF}, {0x1BB0, 0x1BB9}, - {0x1BBA, 0x1BBF}, {0x1BC0, 0x1BE5}, {0x1BE6, 0x1BE6}, - {0x1BE7, 0x1BE7}, {0x1BE8, 0x1BE9}, {0x1BEA, 0x1BEC}, - {0x1BED, 0x1BED}, {0x1BEE, 0x1BEE}, {0x1BEF, 0x1BF1}, - {0x1BF2, 0x1BF3}, {0x1BFC, 0x1BFF}, {0x1C00, 0x1C23}, - {0x1C24, 0x1C2B}, {0x1C2C, 0x1C33}, {0x1C34, 0x1C35}, - {0x1C36, 0x1C37}, {0x1C3B, 0x1C3F}, {0x1C40, 0x1C49}, - {0x1C4D, 0x1C4F}, {0x1C50, 0x1C59}, {0x1C5A, 0x1C77}, - {0x1C78, 0x1C7D}, {0x1C7E, 0x1C7F}, {0x1C80, 0x1C88}, - {0x1CC0, 0x1CC7}, {0x1CD0, 0x1CD2}, {0x1CD3, 0x1CD3}, - {0x1CD4, 0x1CE0}, {0x1CE1, 0x1CE1}, {0x1CE2, 0x1CE8}, - {0x1CE9, 0x1CEC}, {0x1CED, 0x1CED}, {0x1CEE, 0x1CF1}, - {0x1CF2, 0x1CF3}, {0x1CF4, 0x1CF4}, {0x1CF5, 0x1CF6}, - {0x1CF8, 0x1CF9}, {0x1D00, 0x1D2B}, {0x1D2C, 0x1D6A}, - {0x1D6B, 0x1D77}, {0x1D78, 0x1D78}, {0x1D79, 0x1D7F}, - {0x1D80, 0x1D9A}, {0x1D9B, 0x1DBF}, {0x1DC0, 0x1DF5}, - {0x1DFB, 0x1DFF}, {0x1E00, 0x1EFF}, {0x1F00, 0x1F15}, - {0x1F18, 0x1F1D}, {0x1F20, 0x1F45}, {0x1F48, 0x1F4D}, - {0x1F50, 0x1F57}, {0x1F59, 0x1F59}, {0x1F5B, 0x1F5B}, - {0x1F5D, 0x1F5D}, {0x1F5F, 0x1F7D}, {0x1F80, 0x1FB4}, - {0x1FB6, 0x1FBC}, {0x1FBD, 0x1FBD}, {0x1FBE, 0x1FBE}, - {0x1FBF, 0x1FC1}, {0x1FC2, 0x1FC4}, {0x1FC6, 0x1FCC}, - {0x1FCD, 0x1FCF}, {0x1FD0, 0x1FD3}, {0x1FD6, 0x1FDB}, - {0x1FDD, 0x1FDF}, {0x1FE0, 0x1FEC}, {0x1FED, 0x1FEF}, - {0x1FF2, 0x1FF4}, {0x1FF6, 0x1FFC}, {0x1FFD, 0x1FFE}, - {0x2000, 0x200A}, {0x200B, 0x200F}, {0x2011, 0x2012}, - {0x2017, 0x2017}, {0x201A, 0x201A}, {0x201B, 0x201B}, - {0x201E, 0x201E}, {0x201F, 0x201F}, {0x2023, 0x2023}, - {0x2028, 0x2028}, {0x2029, 0x2029}, {0x202A, 0x202E}, - {0x202F, 0x202F}, {0x2031, 0x2031}, {0x2034, 0x2034}, - {0x2036, 0x2038}, {0x2039, 0x2039}, {0x203A, 0x203A}, - {0x203C, 0x203D}, {0x203F, 0x2040}, {0x2041, 0x2043}, - {0x2044, 0x2044}, {0x2045, 0x2045}, {0x2046, 0x2046}, - {0x2047, 0x2051}, {0x2052, 0x2052}, {0x2053, 0x2053}, - {0x2054, 0x2054}, {0x2055, 0x205E}, {0x205F, 0x205F}, - {0x2060, 0x2064}, {0x2066, 0x206F}, {0x2070, 0x2070}, - {0x2071, 0x2071}, {0x2075, 0x2079}, {0x207A, 0x207C}, - {0x207D, 0x207D}, {0x207E, 0x207E}, {0x2080, 0x2080}, - {0x2085, 0x2089}, {0x208A, 0x208C}, {0x208D, 0x208D}, - {0x208E, 0x208E}, {0x2090, 0x209C}, {0x20A0, 0x20A8}, - {0x20AA, 0x20AB}, {0x20AD, 0x20BE}, {0x20D0, 0x20DC}, - {0x20DD, 0x20E0}, {0x20E1, 0x20E1}, {0x20E2, 0x20E4}, - {0x20E5, 0x20F0}, {0x2100, 0x2101}, {0x2102, 0x2102}, - {0x2104, 0x2104}, {0x2106, 0x2106}, {0x2107, 0x2107}, - {0x2108, 0x2108}, {0x210A, 0x2112}, {0x2114, 0x2114}, - {0x2115, 0x2115}, {0x2117, 0x2117}, {0x2118, 0x2118}, - {0x2119, 0x211D}, {0x211E, 0x2120}, {0x2123, 0x2123}, - {0x2124, 0x2124}, {0x2125, 0x2125}, {0x2127, 0x2127}, - {0x2128, 0x2128}, {0x2129, 0x2129}, {0x212A, 0x212A}, - {0x212C, 0x212D}, {0x212E, 0x212E}, {0x212F, 0x2134}, - {0x2135, 0x2138}, {0x2139, 0x2139}, {0x213A, 0x213B}, - {0x213C, 0x213F}, {0x2140, 0x2144}, {0x2145, 0x2149}, - {0x214A, 0x214A}, {0x214B, 0x214B}, {0x214C, 0x214D}, - {0x214E, 0x214E}, {0x214F, 0x214F}, {0x2150, 0x2152}, - {0x2155, 0x215A}, {0x215F, 0x215F}, {0x216C, 0x216F}, - {0x217A, 0x2182}, {0x2183, 0x2184}, {0x2185, 0x2188}, - {0x218A, 0x218B}, {0x219A, 0x219B}, {0x219C, 0x219F}, - {0x21A0, 0x21A0}, {0x21A1, 0x21A2}, {0x21A3, 0x21A3}, - {0x21A4, 0x21A5}, {0x21A6, 0x21A6}, {0x21A7, 0x21AD}, - {0x21AE, 0x21AE}, {0x21AF, 0x21B7}, {0x21BA, 0x21CD}, - {0x21CE, 0x21CF}, {0x21D0, 0x21D1}, {0x21D3, 0x21D3}, - {0x21D5, 0x21E6}, {0x21E8, 0x21F3}, {0x21F4, 0x21FF}, - {0x2201, 0x2201}, {0x2204, 0x2206}, {0x2209, 0x220A}, - {0x220C, 0x220E}, {0x2210, 0x2210}, {0x2212, 0x2214}, - {0x2216, 0x2219}, {0x221B, 0x221C}, {0x2221, 0x2222}, - {0x2224, 0x2224}, {0x2226, 0x2226}, {0x222D, 0x222D}, - {0x222F, 0x2233}, {0x2238, 0x223B}, {0x223E, 0x2247}, - {0x2249, 0x224B}, {0x224D, 0x2251}, {0x2253, 0x225F}, - {0x2262, 0x2263}, {0x2268, 0x2269}, {0x226C, 0x226D}, - {0x2270, 0x2281}, {0x2284, 0x2285}, {0x2288, 0x2294}, - {0x2296, 0x2298}, {0x229A, 0x22A4}, {0x22A6, 0x22BE}, - {0x22C0, 0x22FF}, {0x2300, 0x2307}, {0x2308, 0x2308}, - {0x2309, 0x2309}, {0x230A, 0x230A}, {0x230B, 0x230B}, - {0x230C, 0x2311}, {0x2313, 0x2319}, {0x231C, 0x231F}, - {0x2320, 0x2321}, {0x2322, 0x2328}, {0x232B, 0x237B}, - {0x237C, 0x237C}, {0x237D, 0x239A}, {0x239B, 0x23B3}, - {0x23B4, 0x23DB}, {0x23DC, 0x23E1}, {0x23E2, 0x23E8}, - {0x23ED, 0x23EF}, {0x23F1, 0x23F2}, {0x23F4, 0x23FE}, - {0x2400, 0x2426}, {0x2440, 0x244A}, {0x24EA, 0x24EA}, - {0x254C, 0x254F}, {0x2574, 0x257F}, {0x2590, 0x2591}, - {0x2596, 0x259F}, {0x25A2, 0x25A2}, {0x25AA, 0x25B1}, - {0x25B4, 0x25B5}, {0x25B8, 0x25BB}, {0x25BE, 0x25BF}, - {0x25C2, 0x25C5}, {0x25C9, 0x25CA}, {0x25CC, 0x25CD}, - {0x25D2, 0x25E1}, {0x25E6, 0x25EE}, {0x25F0, 0x25F7}, - {0x25F8, 0x25FC}, {0x25FF, 0x25FF}, {0x2600, 0x2604}, - {0x2607, 0x2608}, {0x260A, 0x260D}, {0x2610, 0x2613}, - {0x2616, 0x261B}, {0x261D, 0x261D}, {0x261F, 0x263F}, - {0x2641, 0x2641}, {0x2643, 0x2647}, {0x2654, 0x265F}, - {0x2662, 0x2662}, {0x2666, 0x2666}, {0x266B, 0x266B}, - {0x266E, 0x266E}, {0x2670, 0x267E}, {0x2680, 0x2692}, - {0x2694, 0x269D}, {0x26A0, 0x26A0}, {0x26A2, 0x26A9}, - {0x26AC, 0x26BC}, {0x26C0, 0x26C3}, {0x26E2, 0x26E2}, - {0x26E4, 0x26E7}, {0x2700, 0x2704}, {0x2706, 0x2709}, - {0x270C, 0x2727}, {0x2729, 0x273C}, {0x273E, 0x274B}, - {0x274D, 0x274D}, {0x274F, 0x2752}, {0x2756, 0x2756}, - {0x2758, 0x2767}, {0x2768, 0x2768}, {0x2769, 0x2769}, - {0x276A, 0x276A}, {0x276B, 0x276B}, {0x276C, 0x276C}, - {0x276D, 0x276D}, {0x276E, 0x276E}, {0x276F, 0x276F}, - {0x2770, 0x2770}, {0x2771, 0x2771}, {0x2772, 0x2772}, - {0x2773, 0x2773}, {0x2774, 0x2774}, {0x2775, 0x2775}, - {0x2780, 0x2793}, {0x2794, 0x2794}, {0x2798, 0x27AF}, - {0x27B1, 0x27BE}, {0x27C0, 0x27C4}, {0x27C5, 0x27C5}, - {0x27C6, 0x27C6}, {0x27C7, 0x27E5}, {0x27EE, 0x27EE}, - {0x27EF, 0x27EF}, {0x27F0, 0x27FF}, {0x2800, 0x28FF}, - {0x2900, 0x297F}, {0x2980, 0x2982}, {0x2983, 0x2983}, - {0x2984, 0x2984}, {0x2987, 0x2987}, {0x2988, 0x2988}, - {0x2989, 0x2989}, {0x298A, 0x298A}, {0x298B, 0x298B}, - {0x298C, 0x298C}, {0x298D, 0x298D}, {0x298E, 0x298E}, - {0x298F, 0x298F}, {0x2990, 0x2990}, {0x2991, 0x2991}, - {0x2992, 0x2992}, {0x2993, 0x2993}, {0x2994, 0x2994}, - {0x2995, 0x2995}, {0x2996, 0x2996}, {0x2997, 0x2997}, - {0x2998, 0x2998}, {0x2999, 0x29D7}, {0x29D8, 0x29D8}, - {0x29D9, 0x29D9}, {0x29DA, 0x29DA}, {0x29DB, 0x29DB}, - {0x29DC, 0x29FB}, {0x29FC, 0x29FC}, {0x29FD, 0x29FD}, - {0x29FE, 0x29FF}, {0x2A00, 0x2AFF}, {0x2B00, 0x2B1A}, - {0x2B1D, 0x2B2F}, {0x2B30, 0x2B44}, {0x2B45, 0x2B46}, - {0x2B47, 0x2B4C}, {0x2B4D, 0x2B4F}, {0x2B51, 0x2B54}, - {0x2B5A, 0x2B73}, {0x2B76, 0x2B95}, {0x2B98, 0x2BB9}, - {0x2BBD, 0x2BC8}, {0x2BCA, 0x2BD1}, {0x2BEC, 0x2BEF}, - {0x2C00, 0x2C2E}, {0x2C30, 0x2C5E}, {0x2C60, 0x2C7B}, - {0x2C7C, 0x2C7D}, {0x2C7E, 0x2C7F}, {0x2C80, 0x2CE4}, - {0x2CE5, 0x2CEA}, {0x2CEB, 0x2CEE}, {0x2CEF, 0x2CF1}, - {0x2CF2, 0x2CF3}, {0x2CF9, 0x2CFC}, {0x2CFD, 0x2CFD}, - {0x2CFE, 0x2CFF}, {0x2D00, 0x2D25}, {0x2D27, 0x2D27}, - {0x2D2D, 0x2D2D}, {0x2D30, 0x2D67}, {0x2D6F, 0x2D6F}, - {0x2D70, 0x2D70}, {0x2D7F, 0x2D7F}, {0x2D80, 0x2D96}, - {0x2DA0, 0x2DA6}, {0x2DA8, 0x2DAE}, {0x2DB0, 0x2DB6}, - {0x2DB8, 0x2DBE}, {0x2DC0, 0x2DC6}, {0x2DC8, 0x2DCE}, - {0x2DD0, 0x2DD6}, {0x2DD8, 0x2DDE}, {0x2DE0, 0x2DFF}, - {0x2E00, 0x2E01}, {0x2E02, 0x2E02}, {0x2E03, 0x2E03}, - {0x2E04, 0x2E04}, {0x2E05, 0x2E05}, {0x2E06, 0x2E08}, - {0x2E09, 0x2E09}, {0x2E0A, 0x2E0A}, {0x2E0B, 0x2E0B}, - {0x2E0C, 0x2E0C}, {0x2E0D, 0x2E0D}, {0x2E0E, 0x2E16}, - {0x2E17, 0x2E17}, {0x2E18, 0x2E19}, {0x2E1A, 0x2E1A}, - {0x2E1B, 0x2E1B}, {0x2E1C, 0x2E1C}, {0x2E1D, 0x2E1D}, - {0x2E1E, 0x2E1F}, {0x2E20, 0x2E20}, {0x2E21, 0x2E21}, - {0x2E22, 0x2E22}, {0x2E23, 0x2E23}, {0x2E24, 0x2E24}, - {0x2E25, 0x2E25}, {0x2E26, 0x2E26}, {0x2E27, 0x2E27}, - {0x2E28, 0x2E28}, {0x2E29, 0x2E29}, {0x2E2A, 0x2E2E}, - {0x2E2F, 0x2E2F}, {0x2E30, 0x2E39}, {0x2E3A, 0x2E3B}, - {0x2E3C, 0x2E3F}, {0x2E40, 0x2E40}, {0x2E41, 0x2E41}, - {0x2E42, 0x2E42}, {0x2E43, 0x2E44}, {0x303F, 0x303F}, - {0x4DC0, 0x4DFF}, {0xA4D0, 0xA4F7}, {0xA4F8, 0xA4FD}, - {0xA4FE, 0xA4FF}, {0xA500, 0xA60B}, {0xA60C, 0xA60C}, - {0xA60D, 0xA60F}, {0xA610, 0xA61F}, {0xA620, 0xA629}, - {0xA62A, 0xA62B}, {0xA640, 0xA66D}, {0xA66E, 0xA66E}, - {0xA66F, 0xA66F}, {0xA670, 0xA672}, {0xA673, 0xA673}, - {0xA674, 0xA67D}, {0xA67E, 0xA67E}, {0xA67F, 0xA67F}, - {0xA680, 0xA69B}, {0xA69C, 0xA69D}, {0xA69E, 0xA69F}, - {0xA6A0, 0xA6E5}, {0xA6E6, 0xA6EF}, {0xA6F0, 0xA6F1}, - {0xA6F2, 0xA6F7}, {0xA700, 0xA716}, {0xA717, 0xA71F}, - {0xA720, 0xA721}, {0xA722, 0xA76F}, {0xA770, 0xA770}, - {0xA771, 0xA787}, {0xA788, 0xA788}, {0xA789, 0xA78A}, - {0xA78B, 0xA78E}, {0xA78F, 0xA78F}, {0xA790, 0xA7AE}, - {0xA7B0, 0xA7B7}, {0xA7F7, 0xA7F7}, {0xA7F8, 0xA7F9}, - {0xA7FA, 0xA7FA}, {0xA7FB, 0xA7FF}, {0xA800, 0xA801}, - {0xA802, 0xA802}, {0xA803, 0xA805}, {0xA806, 0xA806}, - {0xA807, 0xA80A}, {0xA80B, 0xA80B}, {0xA80C, 0xA822}, - {0xA823, 0xA824}, {0xA825, 0xA826}, {0xA827, 0xA827}, - {0xA828, 0xA82B}, {0xA830, 0xA835}, {0xA836, 0xA837}, - {0xA838, 0xA838}, {0xA839, 0xA839}, {0xA840, 0xA873}, - {0xA874, 0xA877}, {0xA880, 0xA881}, {0xA882, 0xA8B3}, - {0xA8B4, 0xA8C3}, {0xA8C4, 0xA8C5}, {0xA8CE, 0xA8CF}, - {0xA8D0, 0xA8D9}, {0xA8E0, 0xA8F1}, {0xA8F2, 0xA8F7}, - {0xA8F8, 0xA8FA}, {0xA8FB, 0xA8FB}, {0xA8FC, 0xA8FC}, - {0xA8FD, 0xA8FD}, {0xA900, 0xA909}, {0xA90A, 0xA925}, - {0xA926, 0xA92D}, {0xA92E, 0xA92F}, {0xA930, 0xA946}, - {0xA947, 0xA951}, {0xA952, 0xA953}, {0xA95F, 0xA95F}, - {0xA980, 0xA982}, {0xA983, 0xA983}, {0xA984, 0xA9B2}, - {0xA9B3, 0xA9B3}, {0xA9B4, 0xA9B5}, {0xA9B6, 0xA9B9}, - {0xA9BA, 0xA9BB}, {0xA9BC, 0xA9BC}, {0xA9BD, 0xA9C0}, - {0xA9C1, 0xA9CD}, {0xA9CF, 0xA9CF}, {0xA9D0, 0xA9D9}, - {0xA9DE, 0xA9DF}, {0xA9E0, 0xA9E4}, {0xA9E5, 0xA9E5}, - {0xA9E6, 0xA9E6}, {0xA9E7, 0xA9EF}, {0xA9F0, 0xA9F9}, - {0xA9FA, 0xA9FE}, {0xAA00, 0xAA28}, {0xAA29, 0xAA2E}, - {0xAA2F, 0xAA30}, {0xAA31, 0xAA32}, {0xAA33, 0xAA34}, - {0xAA35, 0xAA36}, {0xAA40, 0xAA42}, {0xAA43, 0xAA43}, - {0xAA44, 0xAA4B}, {0xAA4C, 0xAA4C}, {0xAA4D, 0xAA4D}, - {0xAA50, 0xAA59}, {0xAA5C, 0xAA5F}, {0xAA60, 0xAA6F}, - {0xAA70, 0xAA70}, {0xAA71, 0xAA76}, {0xAA77, 0xAA79}, - {0xAA7A, 0xAA7A}, {0xAA7B, 0xAA7B}, {0xAA7C, 0xAA7C}, - {0xAA7D, 0xAA7D}, {0xAA7E, 0xAA7F}, {0xAA80, 0xAAAF}, - {0xAAB0, 0xAAB0}, {0xAAB1, 0xAAB1}, {0xAAB2, 0xAAB4}, - {0xAAB5, 0xAAB6}, {0xAAB7, 0xAAB8}, {0xAAB9, 0xAABD}, - {0xAABE, 0xAABF}, {0xAAC0, 0xAAC0}, {0xAAC1, 0xAAC1}, - {0xAAC2, 0xAAC2}, {0xAADB, 0xAADC}, {0xAADD, 0xAADD}, - {0xAADE, 0xAADF}, {0xAAE0, 0xAAEA}, {0xAAEB, 0xAAEB}, - {0xAAEC, 0xAAED}, {0xAAEE, 0xAAEF}, {0xAAF0, 0xAAF1}, - {0xAAF2, 0xAAF2}, {0xAAF3, 0xAAF4}, {0xAAF5, 0xAAF5}, - {0xAAF6, 0xAAF6}, {0xAB01, 0xAB06}, {0xAB09, 0xAB0E}, - {0xAB11, 0xAB16}, {0xAB20, 0xAB26}, {0xAB28, 0xAB2E}, - {0xAB30, 0xAB5A}, {0xAB5B, 0xAB5B}, {0xAB5C, 0xAB5F}, - {0xAB60, 0xAB65}, {0xAB70, 0xABBF}, {0xABC0, 0xABE2}, - {0xABE3, 0xABE4}, {0xABE5, 0xABE5}, {0xABE6, 0xABE7}, - {0xABE8, 0xABE8}, {0xABE9, 0xABEA}, {0xABEB, 0xABEB}, - {0xABEC, 0xABEC}, {0xABED, 0xABED}, {0xABF0, 0xABF9}, - {0xD7B0, 0xD7C6}, {0xD7CB, 0xD7FB}, {0xD800, 0xDB7F}, - {0xDB80, 0xDBFF}, {0xDC00, 0xDFFF}, {0xFB00, 0xFB06}, - {0xFB13, 0xFB17}, {0xFB1D, 0xFB1D}, {0xFB1E, 0xFB1E}, - {0xFB1F, 0xFB28}, {0xFB29, 0xFB29}, {0xFB2A, 0xFB36}, - {0xFB38, 0xFB3C}, {0xFB3E, 0xFB3E}, {0xFB40, 0xFB41}, - {0xFB43, 0xFB44}, {0xFB46, 0xFB4F}, {0xFB50, 0xFBB1}, - {0xFBB2, 0xFBC1}, {0xFBD3, 0xFD3D}, {0xFD3E, 0xFD3E}, - {0xFD3F, 0xFD3F}, {0xFD50, 0xFD8F}, {0xFD92, 0xFDC7}, - {0xFDF0, 0xFDFB}, {0xFDFC, 0xFDFC}, {0xFDFD, 0xFDFD}, - {0xFE20, 0xFE2F}, {0xFE70, 0xFE74}, {0xFE76, 0xFEFC}, - {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFB}, {0xFFFC, 0xFFFC}, - {0x10000, 0x1000B}, {0x1000D, 0x10026}, {0x10028, 0x1003A}, - {0x1003C, 0x1003D}, {0x1003F, 0x1004D}, {0x10050, 0x1005D}, - {0x10080, 0x100FA}, {0x10100, 0x10102}, {0x10107, 0x10133}, - {0x10137, 0x1013F}, {0x10140, 0x10174}, {0x10175, 0x10178}, - {0x10179, 0x10189}, {0x1018A, 0x1018B}, {0x1018C, 0x1018E}, - {0x10190, 0x1019B}, {0x101A0, 0x101A0}, {0x101D0, 0x101FC}, - {0x101FD, 0x101FD}, {0x10280, 0x1029C}, {0x102A0, 0x102D0}, - {0x102E0, 0x102E0}, {0x102E1, 0x102FB}, {0x10300, 0x1031F}, - {0x10320, 0x10323}, {0x10330, 0x10340}, {0x10341, 0x10341}, - {0x10342, 0x10349}, {0x1034A, 0x1034A}, {0x10350, 0x10375}, - {0x10376, 0x1037A}, {0x10380, 0x1039D}, {0x1039F, 0x1039F}, - {0x103A0, 0x103C3}, {0x103C8, 0x103CF}, {0x103D0, 0x103D0}, - {0x103D1, 0x103D5}, {0x10400, 0x1044F}, {0x10450, 0x1047F}, - {0x10480, 0x1049D}, {0x104A0, 0x104A9}, {0x104B0, 0x104D3}, - {0x104D8, 0x104FB}, {0x10500, 0x10527}, {0x10530, 0x10563}, - {0x1056F, 0x1056F}, {0x10600, 0x10736}, {0x10740, 0x10755}, - {0x10760, 0x10767}, {0x10800, 0x10805}, {0x10808, 0x10808}, - {0x1080A, 0x10835}, {0x10837, 0x10838}, {0x1083C, 0x1083C}, - {0x1083F, 0x1083F}, {0x10840, 0x10855}, {0x10857, 0x10857}, - {0x10858, 0x1085F}, {0x10860, 0x10876}, {0x10877, 0x10878}, - {0x10879, 0x1087F}, {0x10880, 0x1089E}, {0x108A7, 0x108AF}, - {0x108E0, 0x108F2}, {0x108F4, 0x108F5}, {0x108FB, 0x108FF}, - {0x10900, 0x10915}, {0x10916, 0x1091B}, {0x1091F, 0x1091F}, - {0x10920, 0x10939}, {0x1093F, 0x1093F}, {0x10980, 0x1099F}, - {0x109A0, 0x109B7}, {0x109BC, 0x109BD}, {0x109BE, 0x109BF}, - {0x109C0, 0x109CF}, {0x109D2, 0x109FF}, {0x10A00, 0x10A00}, - {0x10A01, 0x10A03}, {0x10A05, 0x10A06}, {0x10A0C, 0x10A0F}, - {0x10A10, 0x10A13}, {0x10A15, 0x10A17}, {0x10A19, 0x10A33}, - {0x10A38, 0x10A3A}, {0x10A3F, 0x10A3F}, {0x10A40, 0x10A47}, - {0x10A50, 0x10A58}, {0x10A60, 0x10A7C}, {0x10A7D, 0x10A7E}, - {0x10A7F, 0x10A7F}, {0x10A80, 0x10A9C}, {0x10A9D, 0x10A9F}, - {0x10AC0, 0x10AC7}, {0x10AC8, 0x10AC8}, {0x10AC9, 0x10AE4}, - {0x10AE5, 0x10AE6}, {0x10AEB, 0x10AEF}, {0x10AF0, 0x10AF6}, - {0x10B00, 0x10B35}, {0x10B39, 0x10B3F}, {0x10B40, 0x10B55}, - {0x10B58, 0x10B5F}, {0x10B60, 0x10B72}, {0x10B78, 0x10B7F}, - {0x10B80, 0x10B91}, {0x10B99, 0x10B9C}, {0x10BA9, 0x10BAF}, - {0x10C00, 0x10C48}, {0x10C80, 0x10CB2}, {0x10CC0, 0x10CF2}, - {0x10CFA, 0x10CFF}, {0x10E60, 0x10E7E}, {0x11000, 0x11000}, - {0x11001, 0x11001}, {0x11002, 0x11002}, {0x11003, 0x11037}, - {0x11038, 0x11046}, {0x11047, 0x1104D}, {0x11052, 0x11065}, - {0x11066, 0x1106F}, {0x1107F, 0x1107F}, {0x11080, 0x11081}, - {0x11082, 0x11082}, {0x11083, 0x110AF}, {0x110B0, 0x110B2}, - {0x110B3, 0x110B6}, {0x110B7, 0x110B8}, {0x110B9, 0x110BA}, - {0x110BB, 0x110BC}, {0x110BD, 0x110BD}, {0x110BE, 0x110C1}, - {0x110D0, 0x110E8}, {0x110F0, 0x110F9}, {0x11100, 0x11102}, - {0x11103, 0x11126}, {0x11127, 0x1112B}, {0x1112C, 0x1112C}, - {0x1112D, 0x11134}, {0x11136, 0x1113F}, {0x11140, 0x11143}, - {0x11150, 0x11172}, {0x11173, 0x11173}, {0x11174, 0x11175}, - {0x11176, 0x11176}, {0x11180, 0x11181}, {0x11182, 0x11182}, - {0x11183, 0x111B2}, {0x111B3, 0x111B5}, {0x111B6, 0x111BE}, - {0x111BF, 0x111C0}, {0x111C1, 0x111C4}, {0x111C5, 0x111C9}, - {0x111CA, 0x111CC}, {0x111CD, 0x111CD}, {0x111D0, 0x111D9}, - {0x111DA, 0x111DA}, {0x111DB, 0x111DB}, {0x111DC, 0x111DC}, - {0x111DD, 0x111DF}, {0x111E1, 0x111F4}, {0x11200, 0x11211}, - {0x11213, 0x1122B}, {0x1122C, 0x1122E}, {0x1122F, 0x11231}, - {0x11232, 0x11233}, {0x11234, 0x11234}, {0x11235, 0x11235}, - {0x11236, 0x11237}, {0x11238, 0x1123D}, {0x1123E, 0x1123E}, - {0x11280, 0x11286}, {0x11288, 0x11288}, {0x1128A, 0x1128D}, - {0x1128F, 0x1129D}, {0x1129F, 0x112A8}, {0x112A9, 0x112A9}, - {0x112B0, 0x112DE}, {0x112DF, 0x112DF}, {0x112E0, 0x112E2}, - {0x112E3, 0x112EA}, {0x112F0, 0x112F9}, {0x11300, 0x11301}, - {0x11302, 0x11303}, {0x11305, 0x1130C}, {0x1130F, 0x11310}, - {0x11313, 0x11328}, {0x1132A, 0x11330}, {0x11332, 0x11333}, - {0x11335, 0x11339}, {0x1133C, 0x1133C}, {0x1133D, 0x1133D}, - {0x1133E, 0x1133F}, {0x11340, 0x11340}, {0x11341, 0x11344}, - {0x11347, 0x11348}, {0x1134B, 0x1134D}, {0x11350, 0x11350}, - {0x11357, 0x11357}, {0x1135D, 0x11361}, {0x11362, 0x11363}, - {0x11366, 0x1136C}, {0x11370, 0x11374}, {0x11400, 0x11434}, - {0x11435, 0x11437}, {0x11438, 0x1143F}, {0x11440, 0x11441}, - {0x11442, 0x11444}, {0x11445, 0x11445}, {0x11446, 0x11446}, - {0x11447, 0x1144A}, {0x1144B, 0x1144F}, {0x11450, 0x11459}, - {0x1145B, 0x1145B}, {0x1145D, 0x1145D}, {0x11480, 0x114AF}, - {0x114B0, 0x114B2}, {0x114B3, 0x114B8}, {0x114B9, 0x114B9}, - {0x114BA, 0x114BA}, {0x114BB, 0x114BE}, {0x114BF, 0x114C0}, - {0x114C1, 0x114C1}, {0x114C2, 0x114C3}, {0x114C4, 0x114C5}, - {0x114C6, 0x114C6}, {0x114C7, 0x114C7}, {0x114D0, 0x114D9}, - {0x11580, 0x115AE}, {0x115AF, 0x115B1}, {0x115B2, 0x115B5}, - {0x115B8, 0x115BB}, {0x115BC, 0x115BD}, {0x115BE, 0x115BE}, - {0x115BF, 0x115C0}, {0x115C1, 0x115D7}, {0x115D8, 0x115DB}, - {0x115DC, 0x115DD}, {0x11600, 0x1162F}, {0x11630, 0x11632}, - {0x11633, 0x1163A}, {0x1163B, 0x1163C}, {0x1163D, 0x1163D}, - {0x1163E, 0x1163E}, {0x1163F, 0x11640}, {0x11641, 0x11643}, - {0x11644, 0x11644}, {0x11650, 0x11659}, {0x11660, 0x1166C}, - {0x11680, 0x116AA}, {0x116AB, 0x116AB}, {0x116AC, 0x116AC}, - {0x116AD, 0x116AD}, {0x116AE, 0x116AF}, {0x116B0, 0x116B5}, - {0x116B6, 0x116B6}, {0x116B7, 0x116B7}, {0x116C0, 0x116C9}, - {0x11700, 0x11719}, {0x1171D, 0x1171F}, {0x11720, 0x11721}, - {0x11722, 0x11725}, {0x11726, 0x11726}, {0x11727, 0x1172B}, - {0x11730, 0x11739}, {0x1173A, 0x1173B}, {0x1173C, 0x1173E}, - {0x1173F, 0x1173F}, {0x118A0, 0x118DF}, {0x118E0, 0x118E9}, - {0x118EA, 0x118F2}, {0x118FF, 0x118FF}, {0x11AC0, 0x11AF8}, - {0x11C00, 0x11C08}, {0x11C0A, 0x11C2E}, {0x11C2F, 0x11C2F}, - {0x11C30, 0x11C36}, {0x11C38, 0x11C3D}, {0x11C3E, 0x11C3E}, - {0x11C3F, 0x11C3F}, {0x11C40, 0x11C40}, {0x11C41, 0x11C45}, - {0x11C50, 0x11C59}, {0x11C5A, 0x11C6C}, {0x11C70, 0x11C71}, - {0x11C72, 0x11C8F}, {0x11C92, 0x11CA7}, {0x11CA9, 0x11CA9}, - {0x11CAA, 0x11CB0}, {0x11CB1, 0x11CB1}, {0x11CB2, 0x11CB3}, - {0x11CB4, 0x11CB4}, {0x11CB5, 0x11CB6}, {0x12000, 0x12399}, - {0x12400, 0x1246E}, {0x12470, 0x12474}, {0x12480, 0x12543}, - {0x13000, 0x1342E}, {0x14400, 0x14646}, {0x16800, 0x16A38}, - {0x16A40, 0x16A5E}, {0x16A60, 0x16A69}, {0x16A6E, 0x16A6F}, - {0x16AD0, 0x16AED}, {0x16AF0, 0x16AF4}, {0x16AF5, 0x16AF5}, - {0x16B00, 0x16B2F}, {0x16B30, 0x16B36}, {0x16B37, 0x16B3B}, - {0x16B3C, 0x16B3F}, {0x16B40, 0x16B43}, {0x16B44, 0x16B44}, - {0x16B45, 0x16B45}, {0x16B50, 0x16B59}, {0x16B5B, 0x16B61}, - {0x16B63, 0x16B77}, {0x16B7D, 0x16B8F}, {0x16F00, 0x16F44}, - {0x16F50, 0x16F50}, {0x16F51, 0x16F7E}, {0x16F8F, 0x16F92}, - {0x16F93, 0x16F9F}, {0x1BC00, 0x1BC6A}, {0x1BC70, 0x1BC7C}, - {0x1BC80, 0x1BC88}, {0x1BC90, 0x1BC99}, {0x1BC9C, 0x1BC9C}, - {0x1BC9D, 0x1BC9E}, {0x1BC9F, 0x1BC9F}, {0x1BCA0, 0x1BCA3}, - {0x1D000, 0x1D0F5}, {0x1D100, 0x1D126}, {0x1D129, 0x1D164}, - {0x1D165, 0x1D166}, {0x1D167, 0x1D169}, {0x1D16A, 0x1D16C}, - {0x1D16D, 0x1D172}, {0x1D173, 0x1D17A}, {0x1D17B, 0x1D182}, - {0x1D183, 0x1D184}, {0x1D185, 0x1D18B}, {0x1D18C, 0x1D1A9}, - {0x1D1AA, 0x1D1AD}, {0x1D1AE, 0x1D1E8}, {0x1D200, 0x1D241}, - {0x1D242, 0x1D244}, {0x1D245, 0x1D245}, {0x1D300, 0x1D356}, - {0x1D360, 0x1D371}, {0x1D400, 0x1D454}, {0x1D456, 0x1D49C}, - {0x1D49E, 0x1D49F}, {0x1D4A2, 0x1D4A2}, {0x1D4A5, 0x1D4A6}, - {0x1D4A9, 0x1D4AC}, {0x1D4AE, 0x1D4B9}, {0x1D4BB, 0x1D4BB}, - {0x1D4BD, 0x1D4C3}, {0x1D4C5, 0x1D505}, {0x1D507, 0x1D50A}, - {0x1D50D, 0x1D514}, {0x1D516, 0x1D51C}, {0x1D51E, 0x1D539}, - {0x1D53B, 0x1D53E}, {0x1D540, 0x1D544}, {0x1D546, 0x1D546}, - {0x1D54A, 0x1D550}, {0x1D552, 0x1D6A5}, {0x1D6A8, 0x1D6C0}, - {0x1D6C1, 0x1D6C1}, {0x1D6C2, 0x1D6DA}, {0x1D6DB, 0x1D6DB}, - {0x1D6DC, 0x1D6FA}, {0x1D6FB, 0x1D6FB}, {0x1D6FC, 0x1D714}, - {0x1D715, 0x1D715}, {0x1D716, 0x1D734}, {0x1D735, 0x1D735}, - {0x1D736, 0x1D74E}, {0x1D74F, 0x1D74F}, {0x1D750, 0x1D76E}, - {0x1D76F, 0x1D76F}, {0x1D770, 0x1D788}, {0x1D789, 0x1D789}, - {0x1D78A, 0x1D7A8}, {0x1D7A9, 0x1D7A9}, {0x1D7AA, 0x1D7C2}, - {0x1D7C3, 0x1D7C3}, {0x1D7C4, 0x1D7CB}, {0x1D7CE, 0x1D7FF}, - {0x1D800, 0x1D9FF}, {0x1DA00, 0x1DA36}, {0x1DA37, 0x1DA3A}, - {0x1DA3B, 0x1DA6C}, {0x1DA6D, 0x1DA74}, {0x1DA75, 0x1DA75}, - {0x1DA76, 0x1DA83}, {0x1DA84, 0x1DA84}, {0x1DA85, 0x1DA86}, - {0x1DA87, 0x1DA8B}, {0x1DA9B, 0x1DA9F}, {0x1DAA1, 0x1DAAF}, - {0x1E000, 0x1E006}, {0x1E008, 0x1E018}, {0x1E01B, 0x1E021}, - {0x1E023, 0x1E024}, {0x1E026, 0x1E02A}, {0x1E800, 0x1E8C4}, - {0x1E8C7, 0x1E8CF}, {0x1E8D0, 0x1E8D6}, {0x1E900, 0x1E943}, - {0x1E944, 0x1E94A}, {0x1E950, 0x1E959}, {0x1E95E, 0x1E95F}, - {0x1EE00, 0x1EE03}, {0x1EE05, 0x1EE1F}, {0x1EE21, 0x1EE22}, - {0x1EE24, 0x1EE24}, {0x1EE27, 0x1EE27}, {0x1EE29, 0x1EE32}, - {0x1EE34, 0x1EE37}, {0x1EE39, 0x1EE39}, {0x1EE3B, 0x1EE3B}, - {0x1EE42, 0x1EE42}, {0x1EE47, 0x1EE47}, {0x1EE49, 0x1EE49}, - {0x1EE4B, 0x1EE4B}, {0x1EE4D, 0x1EE4F}, {0x1EE51, 0x1EE52}, - {0x1EE54, 0x1EE54}, {0x1EE57, 0x1EE57}, {0x1EE59, 0x1EE59}, - {0x1EE5B, 0x1EE5B}, {0x1EE5D, 0x1EE5D}, {0x1EE5F, 0x1EE5F}, - {0x1EE61, 0x1EE62}, {0x1EE64, 0x1EE64}, {0x1EE67, 0x1EE6A}, - {0x1EE6C, 0x1EE72}, {0x1EE74, 0x1EE77}, {0x1EE79, 0x1EE7C}, - {0x1EE7E, 0x1EE7E}, {0x1EE80, 0x1EE89}, {0x1EE8B, 0x1EE9B}, - {0x1EEA1, 0x1EEA3}, {0x1EEA5, 0x1EEA9}, {0x1EEAB, 0x1EEBB}, - {0x1EEF0, 0x1EEF1}, {0x1F000, 0x1F003}, {0x1F005, 0x1F02B}, - {0x1F030, 0x1F093}, {0x1F0A0, 0x1F0AE}, {0x1F0B1, 0x1F0BF}, - {0x1F0C1, 0x1F0CE}, {0x1F0D1, 0x1F0F5}, {0x1F10B, 0x1F10C}, - {0x1F12E, 0x1F12E}, {0x1F16A, 0x1F16B}, {0x1F1E6, 0x1F1FF}, - {0x1F321, 0x1F32C}, {0x1F336, 0x1F336}, {0x1F37D, 0x1F37D}, - {0x1F394, 0x1F39F}, {0x1F3CB, 0x1F3CE}, {0x1F3D4, 0x1F3DF}, - {0x1F3F1, 0x1F3F3}, {0x1F3F5, 0x1F3F7}, {0x1F43F, 0x1F43F}, - {0x1F441, 0x1F441}, {0x1F4FD, 0x1F4FE}, {0x1F53E, 0x1F54A}, - {0x1F54F, 0x1F54F}, {0x1F568, 0x1F579}, {0x1F57B, 0x1F594}, - {0x1F597, 0x1F5A3}, {0x1F5A5, 0x1F5FA}, {0x1F650, 0x1F67F}, - {0x1F6C6, 0x1F6CB}, {0x1F6CD, 0x1F6CF}, {0x1F6E0, 0x1F6EA}, - {0x1F6F0, 0x1F6F3}, {0x1F700, 0x1F773}, {0x1F780, 0x1F7D4}, - {0x1F800, 0x1F80B}, {0x1F810, 0x1F847}, {0x1F850, 0x1F859}, - {0x1F860, 0x1F887}, {0x1F890, 0x1F8AD}, {0xE0001, 0xE0001}, - {0xE0020, 0xE007F}, -} - -// Condition have flag EastAsianWidth whether the current locale is CJK or not. -type Condition struct { - EastAsianWidth bool -} - -// NewCondition return new instance of Condition which is current locale. -func NewCondition() *Condition { - return &Condition{EastAsianWidth} -} - -// RuneWidth returns the number of cells in r. -// See http://www.unicode.org/reports/tr11/ -func (c *Condition) RuneWidth(r rune) int { - switch { - case r < 0 || r > 0x10FFFF || - inTables(r, nonprint, combining, notassigned): - return 0 - case (c.EastAsianWidth && IsAmbiguousWidth(r)) || - inTables(r, doublewidth, emoji): - return 2 - default: - return 1 - } -} - -// StringWidth return width as you can see -func (c *Condition) StringWidth(s string) (width int) { - for _, r := range []rune(s) { - width += c.RuneWidth(r) - } - return width -} - -// Truncate return string truncated with w cells -func (c *Condition) Truncate(s string, w int, tail string) string { - if c.StringWidth(s) <= w { - return s - } - r := []rune(s) - tw := c.StringWidth(tail) - w -= tw - width := 0 - i := 0 - for ; i < len(r); i++ { - cw := c.RuneWidth(r[i]) - if width+cw > w { - break - } - width += cw - } - return string(r[0:i]) + tail -} - -// Wrap return string wrapped with w cells -func (c *Condition) Wrap(s string, w int) string { - width := 0 - out := "" - for _, r := range []rune(s) { - cw := RuneWidth(r) - if r == '\n' { - out += string(r) - width = 0 - continue - } else if width+cw > w { - out += "\n" - width = 0 - out += string(r) - width += cw - continue - } - out += string(r) - width += cw - } - return out -} - -// FillLeft return string filled in left by spaces in w cells -func (c *Condition) FillLeft(s string, w int) string { - width := c.StringWidth(s) - count := w - width - if count > 0 { - b := make([]byte, count) - for i := range b { - b[i] = ' ' - } - return string(b) + s - } - return s -} - -// FillRight return string filled in left by spaces in w cells -func (c *Condition) FillRight(s string, w int) string { - width := c.StringWidth(s) - count := w - width - if count > 0 { - b := make([]byte, count) - for i := range b { - b[i] = ' ' - } - return s + string(b) - } - return s -} - -// RuneWidth returns the number of cells in r. -// See http://www.unicode.org/reports/tr11/ -func RuneWidth(r rune) int { - return DefaultCondition.RuneWidth(r) -} - -// IsAmbiguousWidth returns whether is ambiguous width or not. -func IsAmbiguousWidth(r rune) bool { - return inTables(r, private, ambiguous) -} - -// IsNeutralWidth returns whether is neutral width or not. -func IsNeutralWidth(r rune) bool { - return inTable(r, neutral) -} - -// StringWidth return width as you can see -func StringWidth(s string) (width int) { - return DefaultCondition.StringWidth(s) -} - -// Truncate return string truncated with w cells -func Truncate(s string, w int, tail string) string { - return DefaultCondition.Truncate(s, w, tail) -} - -// Wrap return string wrapped with w cells -func Wrap(s string, w int) string { - return DefaultCondition.Wrap(s, w) -} - -// FillLeft return string filled in left by spaces in w cells -func FillLeft(s string, w int) string { - return DefaultCondition.FillLeft(s, w) -} - -// FillRight return string filled in left by spaces in w cells -func FillRight(s string, w int) string { - return DefaultCondition.FillRight(s, w) -} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_js.go b/vendor/github.com/mattn/go-runewidth/runewidth_js.go deleted file mode 100644 index 0ce32c5e7b..0000000000 --- a/vendor/github.com/mattn/go-runewidth/runewidth_js.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build js - -package runewidth - -func IsEastAsian() bool { - // TODO: Implement this for the web. Detect east asian in a compatible way, and return true. - return false -} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_posix.go b/vendor/github.com/mattn/go-runewidth/runewidth_posix.go deleted file mode 100644 index c579e9a314..0000000000 --- a/vendor/github.com/mattn/go-runewidth/runewidth_posix.go +++ /dev/null @@ -1,77 +0,0 @@ -// +build !windows,!js - -package runewidth - -import ( - "os" - "regexp" - "strings" -) - -var reLoc = regexp.MustCompile(`^[a-z][a-z][a-z]?(?:_[A-Z][A-Z])?\.(.+)`) - -var mblenTable = map[string]int{ - "utf-8": 6, - "utf8": 6, - "jis": 8, - "eucjp": 3, - "euckr": 2, - "euccn": 2, - "sjis": 2, - "cp932": 2, - "cp51932": 2, - "cp936": 2, - "cp949": 2, - "cp950": 2, - "big5": 2, - "gbk": 2, - "gb2312": 2, -} - -func isEastAsian(locale string) bool { - charset := strings.ToLower(locale) - r := reLoc.FindStringSubmatch(locale) - if len(r) == 2 { - charset = strings.ToLower(r[1]) - } - - if strings.HasSuffix(charset, "@cjk_narrow") { - return false - } - - for pos, b := range []byte(charset) { - if b == '@' { - charset = charset[:pos] - break - } - } - max := 1 - if m, ok := mblenTable[charset]; ok { - max = m - } - if max > 1 && (charset[0] != 'u' || - strings.HasPrefix(locale, "ja") || - strings.HasPrefix(locale, "ko") || - strings.HasPrefix(locale, "zh")) { - return true - } - return false -} - -// IsEastAsian return true if the current locale is CJK -func IsEastAsian() bool { - locale := os.Getenv("LC_CTYPE") - if locale == "" { - locale = os.Getenv("LANG") - } - - // ignore C locale - if locale == "POSIX" || locale == "C" { - return false - } - if len(locale) > 1 && locale[0] == 'C' && (locale[1] == '.' || locale[1] == '-') { - return false - } - - return isEastAsian(locale) -} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_windows.go b/vendor/github.com/mattn/go-runewidth/runewidth_windows.go deleted file mode 100644 index 0258876b99..0000000000 --- a/vendor/github.com/mattn/go-runewidth/runewidth_windows.go +++ /dev/null @@ -1,25 +0,0 @@ -package runewidth - -import ( - "syscall" -) - -var ( - kernel32 = syscall.NewLazyDLL("kernel32") - procGetConsoleOutputCP = kernel32.NewProc("GetConsoleOutputCP") -) - -// IsEastAsian return true if the current locale is CJK -func IsEastAsian() bool { - r1, _, _ := procGetConsoleOutputCP.Call() - if r1 == 0 { - return false - } - - switch int(r1) { - case 932, 51932, 936, 949, 950: - return true - } - - return false -} diff --git a/vendor/github.com/miekg/dns/AUTHORS b/vendor/github.com/miekg/dns/AUTHORS new file mode 100644 index 0000000000..1965683525 --- /dev/null +++ b/vendor/github.com/miekg/dns/AUTHORS @@ -0,0 +1 @@ +Miek Gieben diff --git a/vendor/github.com/miekg/dns/CONTRIBUTORS b/vendor/github.com/miekg/dns/CONTRIBUTORS new file mode 100644 index 0000000000..5903779d81 --- /dev/null +++ b/vendor/github.com/miekg/dns/CONTRIBUTORS @@ -0,0 +1,10 @@ +Alex A. Skinner +Andrew Tunnell-Jones +Ask Bjørn Hansen +Dave Cheney +Dusty Wilson +Marek Majkowski +Peter van Dijk +Omri Bahumi +Alex Sergeyev +James Hartig diff --git a/vendor/github.com/miekg/dns/COPYRIGHT b/vendor/github.com/miekg/dns/COPYRIGHT new file mode 100644 index 0000000000..35702b10e8 --- /dev/null +++ b/vendor/github.com/miekg/dns/COPYRIGHT @@ -0,0 +1,9 @@ +Copyright 2009 The Go Authors. All rights reserved. Use of this source code +is governed by a BSD-style license that can be found in the LICENSE file. +Extensions of the original work are copyright (c) 2011 Miek Gieben + +Copyright 2011 Miek Gieben. All rights reserved. Use of this source code is +governed by a BSD-style license that can be found in the LICENSE file. + +Copyright 2014 CloudFlare. All rights reserved. Use of this source code is +governed by a BSD-style license that can be found in the LICENSE file. diff --git a/vendor/github.com/miekg/dns/LICENSE b/vendor/github.com/miekg/dns/LICENSE new file mode 100644 index 0000000000..5763fa7fe5 --- /dev/null +++ b/vendor/github.com/miekg/dns/LICENSE @@ -0,0 +1,32 @@ +Extensions of the original work are copyright (c) 2011 Miek Gieben + +As this is fork of the official Go code the same license applies: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/miekg/dns/client.go b/vendor/github.com/miekg/dns/client.go new file mode 100644 index 0000000000..282565afdd --- /dev/null +++ b/vendor/github.com/miekg/dns/client.go @@ -0,0 +1,503 @@ +package dns + +// A client implementation. + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/binary" + "io" + "net" + "strings" + "time" +) + +const dnsTimeout time.Duration = 2 * time.Second +const tcpIdleTimeout time.Duration = 8 * time.Second + +// A Conn represents a connection to a DNS server. +type Conn struct { + net.Conn // a net.Conn holding the connection + UDPSize uint16 // minimum receive buffer for UDP messages + TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2) + tsigRequestMAC string +} + +// A Client defines parameters for a DNS client. +type Client struct { + Net string // if "tcp" or "tcp-tls" (DNS over TLS) a TCP query will be initiated, otherwise an UDP one (default is "" for UDP) + UDPSize uint16 // minimum receive buffer for UDP messages + TLSConfig *tls.Config // TLS connection configuration + Dialer *net.Dialer // a net.Dialer used to set local address, timeouts and more + // Timeout is a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout, + // WriteTimeout when non-zero. Can be overridden with net.Dialer.Timeout (see Client.ExchangeWithDialer and + // Client.Dialer) or context.Context.Deadline (see the deprecated ExchangeContext) + Timeout time.Duration + DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero + ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero + WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero + TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2) + SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass + group singleflight +} + +// Exchange performs a synchronous UDP query. It sends the message m to the address +// contained in a and waits for a reply. Exchange does not retry a failed query, nor +// will it fall back to TCP in case of truncation. +// See client.Exchange for more information on setting larger buffer sizes. +func Exchange(m *Msg, a string) (r *Msg, err error) { + client := Client{Net: "udp"} + r, _, err = client.Exchange(m, a) + return r, err +} + +func (c *Client) dialTimeout() time.Duration { + if c.Timeout != 0 { + return c.Timeout + } + if c.DialTimeout != 0 { + return c.DialTimeout + } + return dnsTimeout +} + +func (c *Client) readTimeout() time.Duration { + if c.ReadTimeout != 0 { + return c.ReadTimeout + } + return dnsTimeout +} + +func (c *Client) writeTimeout() time.Duration { + if c.WriteTimeout != 0 { + return c.WriteTimeout + } + return dnsTimeout +} + +// Dial connects to the address on the named network. +func (c *Client) Dial(address string) (conn *Conn, err error) { + // create a new dialer with the appropriate timeout + var d net.Dialer + if c.Dialer == nil { + d = net.Dialer{} + } else { + d = net.Dialer(*c.Dialer) + } + d.Timeout = c.getTimeoutForRequest(c.writeTimeout()) + + network := "udp" + useTLS := false + + switch c.Net { + case "tcp-tls": + network = "tcp" + useTLS = true + case "tcp4-tls": + network = "tcp4" + useTLS = true + case "tcp6-tls": + network = "tcp6" + useTLS = true + default: + if c.Net != "" { + network = c.Net + } + } + + conn = new(Conn) + if useTLS { + conn.Conn, err = tls.DialWithDialer(&d, network, address, c.TLSConfig) + } else { + conn.Conn, err = d.Dial(network, address) + } + if err != nil { + return nil, err + } + return conn, nil +} + +// Exchange performs a synchronous query. It sends the message m to the address +// contained in a and waits for a reply. Basic use pattern with a *dns.Client: +// +// c := new(dns.Client) +// in, rtt, err := c.Exchange(message, "127.0.0.1:53") +// +// Exchange does not retry a failed query, nor will it fall back to TCP in +// case of truncation. +// It is up to the caller to create a message that allows for larger responses to be +// returned. Specifically this means adding an EDNS0 OPT RR that will advertise a larger +// buffer, see SetEdns0. Messages without an OPT RR will fallback to the historic limit +// of 512 bytes +// To specify a local address or a timeout, the caller has to set the `Client.Dialer` +// attribute appropriately +func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, err error) { + if !c.SingleInflight { + return c.exchange(m, address) + } + + t := "nop" + if t1, ok := TypeToString[m.Question[0].Qtype]; ok { + t = t1 + } + cl := "nop" + if cl1, ok := ClassToString[m.Question[0].Qclass]; ok { + cl = cl1 + } + r, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) { + return c.exchange(m, address) + }) + if r != nil && shared { + r = r.Copy() + } + return r, rtt, err +} + +func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) { + var co *Conn + + co, err = c.Dial(a) + + if err != nil { + return nil, 0, err + } + defer co.Close() + + opt := m.IsEdns0() + // If EDNS0 is used use that for size. + if opt != nil && opt.UDPSize() >= MinMsgSize { + co.UDPSize = opt.UDPSize() + } + // Otherwise use the client's configured UDP size. + if opt == nil && c.UDPSize >= MinMsgSize { + co.UDPSize = c.UDPSize + } + + co.TsigSecret = c.TsigSecret + t := time.Now() + // write with the appropriate write timeout + co.SetWriteDeadline(t.Add(c.getTimeoutForRequest(c.writeTimeout()))) + if err = co.WriteMsg(m); err != nil { + return nil, 0, err + } + + co.SetReadDeadline(time.Now().Add(c.getTimeoutForRequest(c.readTimeout()))) + r, err = co.ReadMsg() + if err == nil && r.Id != m.Id { + err = ErrId + } + rtt = time.Since(t) + return r, rtt, err +} + +// ReadMsg reads a message from the connection co. +// If the received message contains a TSIG record the transaction signature +// is verified. This method always tries to return the message, however if an +// error is returned there are no guarantees that the returned message is a +// valid representation of the packet read. +func (co *Conn) ReadMsg() (*Msg, error) { + p, err := co.ReadMsgHeader(nil) + if err != nil { + return nil, err + } + + m := new(Msg) + if err := m.Unpack(p); err != nil { + // If an error was returned, we still want to allow the user to use + // the message, but naively they can just check err if they don't want + // to use an erroneous message + return m, err + } + if t := m.IsTsig(); t != nil { + if _, ok := co.TsigSecret[t.Hdr.Name]; !ok { + return m, ErrSecret + } + // Need to work on the original message p, as that was used to calculate the tsig. + err = TsigVerify(p, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false) + } + return m, err +} + +// ReadMsgHeader reads a DNS message, parses and populates hdr (when hdr is not nil). +// Returns message as a byte slice to be parsed with Msg.Unpack later on. +// Note that error handling on the message body is not possible as only the header is parsed. +func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) { + var ( + p []byte + n int + err error + ) + + switch t := co.Conn.(type) { + case *net.TCPConn, *tls.Conn: + r := t.(io.Reader) + + // First two bytes specify the length of the entire message. + l, err := tcpMsgLen(r) + if err != nil { + return nil, err + } + p = make([]byte, l) + n, err = tcpRead(r, p) + default: + if co.UDPSize > MinMsgSize { + p = make([]byte, co.UDPSize) + } else { + p = make([]byte, MinMsgSize) + } + n, err = co.Read(p) + } + + if err != nil { + return nil, err + } else if n < headerSize { + return nil, ErrShortRead + } + + p = p[:n] + if hdr != nil { + dh, _, err := unpackMsgHdr(p, 0) + if err != nil { + return nil, err + } + *hdr = dh + } + return p, err +} + +// tcpMsgLen is a helper func to read first two bytes of stream as uint16 packet length. +func tcpMsgLen(t io.Reader) (int, error) { + p := []byte{0, 0} + n, err := t.Read(p) + if err != nil { + return 0, err + } + + // As seen with my local router/switch, returns 1 byte on the above read, + // resulting a a ShortRead. Just write it out (instead of loop) and read the + // other byte. + if n == 1 { + n1, err := t.Read(p[1:]) + if err != nil { + return 0, err + } + n += n1 + } + + if n != 2 { + return 0, ErrShortRead + } + l := binary.BigEndian.Uint16(p) + if l == 0 { + return 0, ErrShortRead + } + return int(l), nil +} + +// tcpRead calls TCPConn.Read enough times to fill allocated buffer. +func tcpRead(t io.Reader, p []byte) (int, error) { + n, err := t.Read(p) + if err != nil { + return n, err + } + for n < len(p) { + j, err := t.Read(p[n:]) + if err != nil { + return n, err + } + n += j + } + return n, err +} + +// Read implements the net.Conn read method. +func (co *Conn) Read(p []byte) (n int, err error) { + if co.Conn == nil { + return 0, ErrConnEmpty + } + if len(p) < 2 { + return 0, io.ErrShortBuffer + } + switch t := co.Conn.(type) { + case *net.TCPConn, *tls.Conn: + r := t.(io.Reader) + + l, err := tcpMsgLen(r) + if err != nil { + return 0, err + } + if l > len(p) { + return int(l), io.ErrShortBuffer + } + return tcpRead(r, p[:l]) + } + // UDP connection + n, err = co.Conn.Read(p) + if err != nil { + return n, err + } + return n, err +} + +// WriteMsg sends a message through the connection co. +// If the message m contains a TSIG record the transaction +// signature is calculated. +func (co *Conn) WriteMsg(m *Msg) (err error) { + var out []byte + if t := m.IsTsig(); t != nil { + mac := "" + if _, ok := co.TsigSecret[t.Hdr.Name]; !ok { + return ErrSecret + } + out, mac, err = TsigGenerate(m, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false) + // Set for the next read, although only used in zone transfers + co.tsigRequestMAC = mac + } else { + out, err = m.Pack() + } + if err != nil { + return err + } + if _, err = co.Write(out); err != nil { + return err + } + return nil +} + +// Write implements the net.Conn Write method. +func (co *Conn) Write(p []byte) (n int, err error) { + switch t := co.Conn.(type) { + case *net.TCPConn, *tls.Conn: + w := t.(io.Writer) + + lp := len(p) + if lp < 2 { + return 0, io.ErrShortBuffer + } + if lp > MaxMsgSize { + return 0, &Error{err: "message too large"} + } + l := make([]byte, 2, lp+2) + binary.BigEndian.PutUint16(l, uint16(lp)) + p = append(l, p...) + n, err := io.Copy(w, bytes.NewReader(p)) + return int(n), err + } + n, err = co.Conn.Write(p) + return n, err +} + +// Return the appropriate timeout for a specific request +func (c *Client) getTimeoutForRequest(timeout time.Duration) time.Duration { + var requestTimeout time.Duration + if c.Timeout != 0 { + requestTimeout = c.Timeout + } else { + requestTimeout = timeout + } + // net.Dialer.Timeout has priority if smaller than the timeouts computed so + // far + if c.Dialer != nil && c.Dialer.Timeout != 0 { + if c.Dialer.Timeout < requestTimeout { + requestTimeout = c.Dialer.Timeout + } + } + return requestTimeout +} + +// Dial connects to the address on the named network. +func Dial(network, address string) (conn *Conn, err error) { + conn = new(Conn) + conn.Conn, err = net.Dial(network, address) + if err != nil { + return nil, err + } + return conn, nil +} + +// ExchangeContext performs a synchronous UDP query, like Exchange. It +// additionally obeys deadlines from the passed Context. +func ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, err error) { + client := Client{Net: "udp"} + r, _, err = client.ExchangeContext(ctx, m, a) + // ignorint rtt to leave the original ExchangeContext API unchanged, but + // this function will go away + return r, err +} + +// ExchangeConn performs a synchronous query. It sends the message m via the connection +// c and waits for a reply. The connection c is not closed by ExchangeConn. +// This function is going away, but can easily be mimicked: +// +// co := &dns.Conn{Conn: c} // c is your net.Conn +// co.WriteMsg(m) +// in, _ := co.ReadMsg() +// co.Close() +// +func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) { + println("dns: ExchangeConn: this function is deprecated") + co := new(Conn) + co.Conn = c + if err = co.WriteMsg(m); err != nil { + return nil, err + } + r, err = co.ReadMsg() + if err == nil && r.Id != m.Id { + err = ErrId + } + return r, err +} + +// DialTimeout acts like Dial but takes a timeout. +func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) { + client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}} + conn, err = client.Dial(address) + if err != nil { + return nil, err + } + return conn, nil +} + +// DialWithTLS connects to the address on the named network with TLS. +func DialWithTLS(network, address string, tlsConfig *tls.Config) (conn *Conn, err error) { + if !strings.HasSuffix(network, "-tls") { + network += "-tls" + } + client := Client{Net: network, TLSConfig: tlsConfig} + conn, err = client.Dial(address) + + if err != nil { + return nil, err + } + return conn, nil +} + +// DialTimeoutWithTLS acts like DialWithTLS but takes a timeout. +func DialTimeoutWithTLS(network, address string, tlsConfig *tls.Config, timeout time.Duration) (conn *Conn, err error) { + if !strings.HasSuffix(network, "-tls") { + network += "-tls" + } + client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}, TLSConfig: tlsConfig} + conn, err = client.Dial(address) + if err != nil { + return nil, err + } + return conn, nil +} + +// ExchangeContext acts like Exchange, but honors the deadline on the provided +// context, if present. If there is both a context deadline and a configured +// timeout on the client, the earliest of the two takes effect. +func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) { + var timeout time.Duration + if deadline, ok := ctx.Deadline(); !ok { + timeout = 0 + } else { + timeout = deadline.Sub(time.Now()) + } + // not passing the context to the underlying calls, as the API does not support + // context. For timeouts you should set up Client.Dialer and call Client.Exchange. + c.Dialer = &net.Dialer{Timeout: timeout} + return c.Exchange(m, a) +} diff --git a/vendor/github.com/miekg/dns/clientconfig.go b/vendor/github.com/miekg/dns/clientconfig.go new file mode 100644 index 0000000000..f13cfa30cb --- /dev/null +++ b/vendor/github.com/miekg/dns/clientconfig.go @@ -0,0 +1,139 @@ +package dns + +import ( + "bufio" + "io" + "os" + "strconv" + "strings" +) + +// ClientConfig wraps the contents of the /etc/resolv.conf file. +type ClientConfig struct { + Servers []string // servers to use + Search []string // suffixes to append to local name + Port string // what port to use + Ndots int // number of dots in name to trigger absolute lookup + Timeout int // seconds before giving up on packet + Attempts int // lost packets before giving up on server, not used in the package dns +} + +// ClientConfigFromFile parses a resolv.conf(5) like file and returns +// a *ClientConfig. +func ClientConfigFromFile(resolvconf string) (*ClientConfig, error) { + file, err := os.Open(resolvconf) + if err != nil { + return nil, err + } + defer file.Close() + return ClientConfigFromReader(file) +} + +// ClientConfigFromReader works like ClientConfigFromFile but takes an io.Reader as argument +func ClientConfigFromReader(resolvconf io.Reader) (*ClientConfig, error) { + c := new(ClientConfig) + scanner := bufio.NewScanner(resolvconf) + c.Servers = make([]string, 0) + c.Search = make([]string, 0) + c.Port = "53" + c.Ndots = 1 + c.Timeout = 5 + c.Attempts = 2 + + for scanner.Scan() { + if err := scanner.Err(); err != nil { + return nil, err + } + line := scanner.Text() + f := strings.Fields(line) + if len(f) < 1 { + continue + } + switch f[0] { + case "nameserver": // add one name server + if len(f) > 1 { + // One more check: make sure server name is + // just an IP address. Otherwise we need DNS + // to look it up. + name := f[1] + c.Servers = append(c.Servers, name) + } + + case "domain": // set search path to just this domain + if len(f) > 1 { + c.Search = make([]string, 1) + c.Search[0] = f[1] + } else { + c.Search = make([]string, 0) + } + + case "search": // set search path to given servers + c.Search = make([]string, len(f)-1) + for i := 0; i < len(c.Search); i++ { + c.Search[i] = f[i+1] + } + + case "options": // magic options + for i := 1; i < len(f); i++ { + s := f[i] + switch { + case len(s) >= 6 && s[:6] == "ndots:": + n, _ := strconv.Atoi(s[6:]) + if n < 0 { + n = 0 + } else if n > 15 { + n = 15 + } + c.Ndots = n + case len(s) >= 8 && s[:8] == "timeout:": + n, _ := strconv.Atoi(s[8:]) + if n < 1 { + n = 1 + } + c.Timeout = n + case len(s) >= 9 && s[:9] == "attempts:": + n, _ := strconv.Atoi(s[9:]) + if n < 1 { + n = 1 + } + c.Attempts = n + case s == "rotate": + /* not imp */ + } + } + } + } + return c, nil +} + +// NameList returns all of the names that should be queried based on the +// config. It is based off of go's net/dns name building, but it does not +// check the length of the resulting names. +func (c *ClientConfig) NameList(name string) []string { + // if this domain is already fully qualified, no append needed. + if IsFqdn(name) { + return []string{name} + } + + // Check to see if the name has more labels than Ndots. Do this before making + // the domain fully qualified. + hasNdots := CountLabel(name) > c.Ndots + // Make the domain fully qualified. + name = Fqdn(name) + + // Make a list of names based off search. + names := []string{} + + // If name has enough dots, try that first. + if hasNdots { + names = append(names, name) + } + for _, s := range c.Search { + names = append(names, Fqdn(name+s)) + } + // If we didn't have enough dots, try after suffixes. + if !hasNdots { + names = append(names, name) + } + return names +} diff --git a/vendor/github.com/miekg/dns/compress_generate.go b/vendor/github.com/miekg/dns/compress_generate.go new file mode 100644 index 0000000000..87fb36f68c --- /dev/null +++ b/vendor/github.com/miekg/dns/compress_generate.go @@ -0,0 +1,188 @@ +//+build ignore + +// compression_generate.go is meant to run with go generate. It will use +// go/{importer,types} to track down all the RR struct types. Then for each type +// it will look to see if there are (compressible) names, if so it will add that +// type to compressionLenHelperType and comressionLenSearchType which "fake" the +// compression so that Len() is fast. +package main + +import ( + "bytes" + "fmt" + "go/format" + "go/importer" + "go/types" + "log" + "os" +) + +var packageHdr = ` +// Code generated by "go run compress_generate.go"; DO NOT EDIT. + +package dns + +` + +// getTypeStruct will take a type and the package scope, and return the +// (innermost) struct if the type is considered a RR type (currently defined as +// those structs beginning with a RR_Header, could be redefined as implementing +// the RR interface). The bool return value indicates if embedded structs were +// resolved. +func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { + st, ok := t.Underlying().(*types.Struct) + if !ok { + return nil, false + } + if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { + return st, false + } + if st.Field(0).Anonymous() { + st, _ := getTypeStruct(st.Field(0).Type(), scope) + return st, true + } + return nil, false +} + +func main() { + // Import and type-check the package + pkg, err := importer.Default().Import("github.com/miekg/dns") + fatalIfErr(err) + scope := pkg.Scope() + + var domainTypes []string // Types that have a domain name in them (either compressible or not). + var cdomainTypes []string // Types that have a compressible domain name in them (subset of domainType) +Names: + for _, name := range scope.Names() { + o := scope.Lookup(name) + if o == nil || !o.Exported() { + continue + } + st, _ := getTypeStruct(o.Type(), scope) + if st == nil { + continue + } + if name == "PrivateRR" { + continue + } + + if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" { + log.Fatalf("Constant Type%s does not exist.", o.Name()) + } + + for i := 1; i < st.NumFields(); i++ { + if _, ok := st.Field(i).Type().(*types.Slice); ok { + if st.Tag(i) == `dns:"domain-name"` { + domainTypes = append(domainTypes, o.Name()) + continue Names + } + if st.Tag(i) == `dns:"cdomain-name"` { + cdomainTypes = append(cdomainTypes, o.Name()) + domainTypes = append(domainTypes, o.Name()) + continue Names + } + continue + } + + switch { + case st.Tag(i) == `dns:"domain-name"`: + domainTypes = append(domainTypes, o.Name()) + continue Names + case st.Tag(i) == `dns:"cdomain-name"`: + cdomainTypes = append(cdomainTypes, o.Name()) + domainTypes = append(domainTypes, o.Name()) + continue Names + } + } + } + + b := &bytes.Buffer{} + b.WriteString(packageHdr) + + // compressionLenHelperType - all types that have domain-name/cdomain-name can be used for compressing names + + fmt.Fprint(b, "func compressionLenHelperType(c map[string]int, r RR) {\n") + fmt.Fprint(b, "switch x := r.(type) {\n") + for _, name := range domainTypes { + o := scope.Lookup(name) + st, _ := getTypeStruct(o.Type(), scope) + + fmt.Fprintf(b, "case *%s:\n", name) + for i := 1; i < st.NumFields(); i++ { + out := func(s string) { fmt.Fprintf(b, "compressionLenHelper(c, x.%s)\n", st.Field(i).Name()) } + + if _, ok := st.Field(i).Type().(*types.Slice); ok { + switch st.Tag(i) { + case `dns:"domain-name"`: + fallthrough + case `dns:"cdomain-name"`: + // For HIP we need to slice over the elements in this slice. + fmt.Fprintf(b, `for i := range x.%s { + compressionLenHelper(c, x.%s[i]) + } +`, st.Field(i).Name(), st.Field(i).Name()) + } + continue + } + + switch { + case st.Tag(i) == `dns:"cdomain-name"`: + fallthrough + case st.Tag(i) == `dns:"domain-name"`: + out(st.Field(i).Name()) + } + } + } + fmt.Fprintln(b, "}\n}\n\n") + + // compressionLenSearchType - search cdomain-tags types for compressible names. + + fmt.Fprint(b, "func compressionLenSearchType(c map[string]int, r RR) (int, bool) {\n") + fmt.Fprint(b, "switch x := r.(type) {\n") + for _, name := range cdomainTypes { + o := scope.Lookup(name) + st, _ := getTypeStruct(o.Type(), scope) + + fmt.Fprintf(b, "case *%s:\n", name) + j := 1 + for i := 1; i < st.NumFields(); i++ { + out := func(s string, j int) { + fmt.Fprintf(b, "k%d, ok%d := compressionLenSearch(c, x.%s)\n", j, j, st.Field(i).Name()) + } + + // There are no slice types with names that can be compressed. + + switch { + case st.Tag(i) == `dns:"cdomain-name"`: + out(st.Field(i).Name(), j) + j++ + } + } + k := "k1" + ok := "ok1" + for i := 2; i < j; i++ { + k += fmt.Sprintf(" + k%d", i) + ok += fmt.Sprintf(" && ok%d", i) + } + fmt.Fprintf(b, "return %s, %s\n", k, ok) + } + fmt.Fprintln(b, "}\nreturn 0, false\n}\n\n") + + // gofmt + res, err := format.Source(b.Bytes()) + if err != nil { + b.WriteTo(os.Stderr) + log.Fatal(err) + } + + f, err := os.Create("zcompress.go") + fatalIfErr(err) + defer f.Close() + f.Write(res) +} + +func fatalIfErr(err error) { + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/miekg/dns/dane.go b/vendor/github.com/miekg/dns/dane.go new file mode 100644 index 0000000000..8c4a14ef19 --- /dev/null +++ b/vendor/github.com/miekg/dns/dane.go @@ -0,0 +1,43 @@ +package dns + +import ( + "crypto/sha256" + "crypto/sha512" + "crypto/x509" + "encoding/hex" + "errors" +) + +// CertificateToDANE converts a certificate to a hex string as used in the TLSA or SMIMEA records. +func CertificateToDANE(selector, matchingType uint8, cert *x509.Certificate) (string, error) { + switch matchingType { + case 0: + switch selector { + case 0: + return hex.EncodeToString(cert.Raw), nil + case 1: + return hex.EncodeToString(cert.RawSubjectPublicKeyInfo), nil + } + case 1: + h := sha256.New() + switch selector { + case 0: + h.Write(cert.Raw) + return hex.EncodeToString(h.Sum(nil)), nil + case 1: + h.Write(cert.RawSubjectPublicKeyInfo) + return hex.EncodeToString(h.Sum(nil)), nil + } + case 2: + h := sha512.New() + switch selector { + case 0: + h.Write(cert.Raw) + return hex.EncodeToString(h.Sum(nil)), nil + case 1: + h.Write(cert.RawSubjectPublicKeyInfo) + return hex.EncodeToString(h.Sum(nil)), nil + } + } + return "", errors.New("dns: bad MatchingType or Selector") +} diff --git a/vendor/github.com/miekg/dns/defaults.go b/vendor/github.com/miekg/dns/defaults.go new file mode 100644 index 0000000000..14e18b0b38 --- /dev/null +++ b/vendor/github.com/miekg/dns/defaults.go @@ -0,0 +1,288 @@ +package dns + +import ( + "errors" + "net" + "strconv" +) + +const hexDigit = "0123456789abcdef" + +// Everything is assumed in ClassINET. + +// SetReply creates a reply message from a request message. +func (dns *Msg) SetReply(request *Msg) *Msg { + dns.Id = request.Id + dns.Response = true + dns.Opcode = request.Opcode + if dns.Opcode == OpcodeQuery { + dns.RecursionDesired = request.RecursionDesired // Copy rd bit + dns.CheckingDisabled = request.CheckingDisabled // Copy cd bit + } + dns.Rcode = RcodeSuccess + if len(request.Question) > 0 { + dns.Question = make([]Question, 1) + dns.Question[0] = request.Question[0] + } + return dns +} + +// SetQuestion creates a question message, it sets the Question +// section, generates an Id and sets the RecursionDesired (RD) +// bit to true. +func (dns *Msg) SetQuestion(z string, t uint16) *Msg { + dns.Id = Id() + dns.RecursionDesired = true + dns.Question = make([]Question, 1) + dns.Question[0] = Question{z, t, ClassINET} + return dns +} + +// SetNotify creates a notify message, it sets the Question +// section, generates an Id and sets the Authoritative (AA) +// bit to true. +func (dns *Msg) SetNotify(z string) *Msg { + dns.Opcode = OpcodeNotify + dns.Authoritative = true + dns.Id = Id() + dns.Question = make([]Question, 1) + dns.Question[0] = Question{z, TypeSOA, ClassINET} + return dns +} + +// SetRcode creates an error message suitable for the request. +func (dns *Msg) SetRcode(request *Msg, rcode int) *Msg { + dns.SetReply(request) + dns.Rcode = rcode + return dns +} + +// SetRcodeFormatError creates a message with FormError set. +func (dns *Msg) SetRcodeFormatError(request *Msg) *Msg { + dns.Rcode = RcodeFormatError + dns.Opcode = OpcodeQuery + dns.Response = true + dns.Authoritative = false + dns.Id = request.Id + return dns +} + +// SetUpdate makes the message a dynamic update message. It +// sets the ZONE section to: z, TypeSOA, ClassINET. +func (dns *Msg) SetUpdate(z string) *Msg { + dns.Id = Id() + dns.Response = false + dns.Opcode = OpcodeUpdate + dns.Compress = false // BIND9 cannot handle compression + dns.Question = make([]Question, 1) + dns.Question[0] = Question{z, TypeSOA, ClassINET} + return dns +} + +// SetIxfr creates message for requesting an IXFR. +func (dns *Msg) SetIxfr(z string, serial uint32, ns, mbox string) *Msg { + dns.Id = Id() + dns.Question = make([]Question, 1) + dns.Ns = make([]RR, 1) + s := new(SOA) + s.Hdr = RR_Header{z, TypeSOA, ClassINET, defaultTtl, 0} + s.Serial = serial + s.Ns = ns + s.Mbox = mbox + dns.Question[0] = Question{z, TypeIXFR, ClassINET} + dns.Ns[0] = s + return dns +} + +// SetAxfr creates message for requesting an AXFR. +func (dns *Msg) SetAxfr(z string) *Msg { + dns.Id = Id() + dns.Question = make([]Question, 1) + dns.Question[0] = Question{z, TypeAXFR, ClassINET} + return dns +} + +// SetTsig appends a TSIG RR to the message. +// This is only a skeleton TSIG RR that is added as the last RR in the +// additional section. The Tsig is calculated when the message is being send. +func (dns *Msg) SetTsig(z, algo string, fudge uint16, timesigned int64) *Msg { + t := new(TSIG) + t.Hdr = RR_Header{z, TypeTSIG, ClassANY, 0, 0} + t.Algorithm = algo + t.Fudge = fudge + t.TimeSigned = uint64(timesigned) + t.OrigId = dns.Id + dns.Extra = append(dns.Extra, t) + return dns +} + +// SetEdns0 appends a EDNS0 OPT RR to the message. +// TSIG should always the last RR in a message. +func (dns *Msg) SetEdns0(udpsize uint16, do bool) *Msg { + e := new(OPT) + e.Hdr.Name = "." + e.Hdr.Rrtype = TypeOPT + e.SetUDPSize(udpsize) + if do { + e.SetDo() + } + dns.Extra = append(dns.Extra, e) + return dns +} + +// IsTsig checks if the message has a TSIG record as the last record +// in the additional section. It returns the TSIG record found or nil. +func (dns *Msg) IsTsig() *TSIG { + if len(dns.Extra) > 0 { + if dns.Extra[len(dns.Extra)-1].Header().Rrtype == TypeTSIG { + return dns.Extra[len(dns.Extra)-1].(*TSIG) + } + } + return nil +} + +// IsEdns0 checks if the message has a EDNS0 (OPT) record, any EDNS0 +// record in the additional section will do. It returns the OPT record +// found or nil. +func (dns *Msg) IsEdns0() *OPT { + // EDNS0 is at the end of the additional section, start there. + // We might want to change this to *only* look at the last two + // records. So we see TSIG and/or OPT - this a slightly bigger + // change though. + for i := len(dns.Extra) - 1; i >= 0; i-- { + if dns.Extra[i].Header().Rrtype == TypeOPT { + return dns.Extra[i].(*OPT) + } + } + return nil +} + +// IsDomainName checks if s is a valid domain name, it returns the number of +// labels and true, when a domain name is valid. Note that non fully qualified +// domain name is considered valid, in this case the last label is counted in +// the number of labels. When false is returned the number of labels is not +// defined. Also note that this function is extremely liberal; almost any +// string is a valid domain name as the DNS is 8 bit protocol. It checks if each +// label fits in 63 characters, but there is no length check for the entire +// string s. I.e. a domain name longer than 255 characters is considered valid. +func IsDomainName(s string) (labels int, ok bool) { + _, labels, err := packDomainName(s, nil, 0, nil, false) + return labels, err == nil +} + +// IsSubDomain checks if child is indeed a child of the parent. If child and parent +// are the same domain true is returned as well. +func IsSubDomain(parent, child string) bool { + // Entire child is contained in parent + return CompareDomainName(parent, child) == CountLabel(parent) +} + +// IsMsg sanity checks buf and returns an error if it isn't a valid DNS packet. +// The checking is performed on the binary payload. +func IsMsg(buf []byte) error { + // Header + if len(buf) < 12 { + return errors.New("dns: bad message header") + } + // Header: Opcode + // TODO(miek): more checks here, e.g. check all header bits. + return nil +} + +// IsFqdn checks if a domain name is fully qualified. +func IsFqdn(s string) bool { + l := len(s) + if l == 0 { + return false + } + return s[l-1] == '.' +} + +// IsRRset checks if a set of RRs is a valid RRset as defined by RFC 2181. +// This means the RRs need to have the same type, name, and class. Returns true +// if the RR set is valid, otherwise false. +func IsRRset(rrset []RR) bool { + if len(rrset) == 0 { + return false + } + if len(rrset) == 1 { + return true + } + rrHeader := rrset[0].Header() + rrType := rrHeader.Rrtype + rrClass := rrHeader.Class + rrName := rrHeader.Name + + for _, rr := range rrset[1:] { + curRRHeader := rr.Header() + if curRRHeader.Rrtype != rrType || curRRHeader.Class != rrClass || curRRHeader.Name != rrName { + // Mismatch between the records, so this is not a valid rrset for + //signing/verifying + return false + } + } + + return true +} + +// Fqdn return the fully qualified domain name from s. +// If s is already fully qualified, it behaves as the identity function. +func Fqdn(s string) string { + if IsFqdn(s) { + return s + } + return s + "." +} + +// Copied from the official Go code. + +// ReverseAddr returns the in-addr.arpa. or ip6.arpa. hostname of the IP +// address suitable for reverse DNS (PTR) record lookups or an error if it fails +// to parse the IP address. +func ReverseAddr(addr string) (arpa string, err error) { + ip := net.ParseIP(addr) + if ip == nil { + return "", &Error{err: "unrecognized address: " + addr} + } + if ip.To4() != nil { + return strconv.Itoa(int(ip[15])) + "." + strconv.Itoa(int(ip[14])) + "." + strconv.Itoa(int(ip[13])) + "." + + strconv.Itoa(int(ip[12])) + ".in-addr.arpa.", nil + } + // Must be IPv6 + buf := make([]byte, 0, len(ip)*4+len("ip6.arpa.")) + // Add it, in reverse, to the buffer + for i := len(ip) - 1; i >= 0; i-- { + v := ip[i] + buf = append(buf, hexDigit[v&0xF]) + buf = append(buf, '.') + buf = append(buf, hexDigit[v>>4]) + buf = append(buf, '.') + } + // Append "ip6.arpa." and return (buf already has the final .) + buf = append(buf, "ip6.arpa."...) + return string(buf), nil +} + +// String returns the string representation for the type t. +func (t Type) String() string { + if t1, ok := TypeToString[uint16(t)]; ok { + return t1 + } + return "TYPE" + strconv.Itoa(int(t)) +} + +// String returns the string representation for the class c. +func (c Class) String() string { + if s, ok := ClassToString[uint16(c)]; ok { + // Only emit mnemonics when they are unambiguous, specically ANY is in both. + if _, ok := StringToType[s]; !ok { + return s + } + } + return "CLASS" + strconv.Itoa(int(c)) +} + +// String returns the string representation for the name n. +func (n Name) String() string { + return sprintName(string(n)) +} diff --git a/vendor/github.com/miekg/dns/dns.go b/vendor/github.com/miekg/dns/dns.go new file mode 100644 index 0000000000..5133eac727 --- /dev/null +++ b/vendor/github.com/miekg/dns/dns.go @@ -0,0 +1,107 @@ +package dns + +import "strconv" + +const ( + year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits. + defaultTtl = 3600 // Default internal TTL. + + // DefaultMsgSize is the standard default for messages larger than 512 bytes. + DefaultMsgSize = 4096 + // MinMsgSize is the minimal size of a DNS packet. + MinMsgSize = 512 + // MaxMsgSize is the largest possible DNS packet. + MaxMsgSize = 65535 +) + +// Error represents a DNS error. +type Error struct{ err string } + +func (e *Error) Error() string { + if e == nil { + return "dns: " + } + return "dns: " + e.err +} + +// An RR represents a resource record. +type RR interface { + // Header returns the header of an resource record. The header contains + // everything up to the rdata. + Header() *RR_Header + // String returns the text representation of the resource record. + String() string + + // copy returns a copy of the RR + copy() RR + // len returns the length (in octets) of the uncompressed RR in wire format. + len() int + // pack packs an RR into wire format. + pack([]byte, int, map[string]int, bool) (int, error) +} + +// RR_Header is the header all DNS resource records share. +type RR_Header struct { + Name string `dns:"cdomain-name"` + Rrtype uint16 + Class uint16 + Ttl uint32 + Rdlength uint16 // Length of data after header. +} + +// Header returns itself. This is here to make RR_Header implements the RR interface. +func (h *RR_Header) Header() *RR_Header { return h } + +// Just to implement the RR interface. +func (h *RR_Header) copy() RR { return nil } + +func (h *RR_Header) copyHeader() *RR_Header { + r := new(RR_Header) + r.Name = h.Name + r.Rrtype = h.Rrtype + r.Class = h.Class + r.Ttl = h.Ttl + r.Rdlength = h.Rdlength + return r +} + +func (h *RR_Header) String() string { + var s string + + if h.Rrtype == TypeOPT { + s = ";" + // and maybe other things + } + + s += sprintName(h.Name) + "\t" + s += strconv.FormatInt(int64(h.Ttl), 10) + "\t" + s += Class(h.Class).String() + "\t" + s += Type(h.Rrtype).String() + "\t" + return s +} + +func (h *RR_Header) len() int { + l := len(h.Name) + 1 + l += 10 // rrtype(2) + class(2) + ttl(4) + rdlength(2) + return l +} + +// ToRFC3597 converts a known RR to the unknown RR representation from RFC 3597. +func (rr *RFC3597) ToRFC3597(r RR) error { + buf := make([]byte, r.len()*2) + off, err := PackRR(r, buf, 0, nil, false) + if err != nil { + return err + } + buf = buf[:off] + if int(r.Header().Rdlength) > off { + return ErrBuf + } + + rfc3597, _, err := unpackRFC3597(*r.Header(), buf, off-int(r.Header().Rdlength)) + if err != nil { + return err + } + *rr = *rfc3597.(*RFC3597) + return nil +} diff --git a/vendor/github.com/miekg/dns/dnssec.go b/vendor/github.com/miekg/dns/dnssec.go new file mode 100644 index 0000000000..ac9fdd45ee --- /dev/null +++ b/vendor/github.com/miekg/dns/dnssec.go @@ -0,0 +1,784 @@ +package dns + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + _ "crypto/md5" + "crypto/rand" + "crypto/rsa" + _ "crypto/sha1" + _ "crypto/sha256" + _ "crypto/sha512" + "encoding/asn1" + "encoding/binary" + "encoding/hex" + "math/big" + "sort" + "strings" + "time" + + "golang.org/x/crypto/ed25519" +) + +// DNSSEC encryption algorithm codes. +const ( + _ uint8 = iota + RSAMD5 + DH + DSA + _ // Skip 4, RFC 6725, section 2.1 + RSASHA1 + DSANSEC3SHA1 + RSASHA1NSEC3SHA1 + RSASHA256 + _ // Skip 9, RFC 6725, section 2.1 + RSASHA512 + _ // Skip 11, RFC 6725, section 2.1 + ECCGOST + ECDSAP256SHA256 + ECDSAP384SHA384 + ED25519 + ED448 + INDIRECT uint8 = 252 + PRIVATEDNS uint8 = 253 // Private (experimental keys) + PRIVATEOID uint8 = 254 +) + +// AlgorithmToString is a map of algorithm IDs to algorithm names. +var AlgorithmToString = map[uint8]string{ + RSAMD5: "RSAMD5", + DH: "DH", + DSA: "DSA", + RSASHA1: "RSASHA1", + DSANSEC3SHA1: "DSA-NSEC3-SHA1", + RSASHA1NSEC3SHA1: "RSASHA1-NSEC3-SHA1", + RSASHA256: "RSASHA256", + RSASHA512: "RSASHA512", + ECCGOST: "ECC-GOST", + ECDSAP256SHA256: "ECDSAP256SHA256", + ECDSAP384SHA384: "ECDSAP384SHA384", + ED25519: "ED25519", + ED448: "ED448", + INDIRECT: "INDIRECT", + PRIVATEDNS: "PRIVATEDNS", + PRIVATEOID: "PRIVATEOID", +} + +// StringToAlgorithm is the reverse of AlgorithmToString. +var StringToAlgorithm = reverseInt8(AlgorithmToString) + +// AlgorithmToHash is a map of algorithm crypto hash IDs to crypto.Hash's. +var AlgorithmToHash = map[uint8]crypto.Hash{ + RSAMD5: crypto.MD5, // Deprecated in RFC 6725 + RSASHA1: crypto.SHA1, + RSASHA1NSEC3SHA1: crypto.SHA1, + RSASHA256: crypto.SHA256, + ECDSAP256SHA256: crypto.SHA256, + ECDSAP384SHA384: crypto.SHA384, + RSASHA512: crypto.SHA512, + ED25519: crypto.Hash(0), +} + +// DNSSEC hashing algorithm codes. +const ( + _ uint8 = iota + SHA1 // RFC 4034 + SHA256 // RFC 4509 + GOST94 // RFC 5933 + SHA384 // Experimental + SHA512 // Experimental +) + +// HashToString is a map of hash IDs to names. +var HashToString = map[uint8]string{ + SHA1: "SHA1", + SHA256: "SHA256", + GOST94: "GOST94", + SHA384: "SHA384", + SHA512: "SHA512", +} + +// StringToHash is a map of names to hash IDs. +var StringToHash = reverseInt8(HashToString) + +// DNSKEY flag values. +const ( + SEP = 1 + REVOKE = 1 << 7 + ZONE = 1 << 8 +) + +// The RRSIG needs to be converted to wireformat with some of the rdata (the signature) missing. +type rrsigWireFmt struct { + TypeCovered uint16 + Algorithm uint8 + Labels uint8 + OrigTtl uint32 + Expiration uint32 + Inception uint32 + KeyTag uint16 + SignerName string `dns:"domain-name"` + /* No Signature */ +} + +// Used for converting DNSKEY's rdata to wirefmt. +type dnskeyWireFmt struct { + Flags uint16 + Protocol uint8 + Algorithm uint8 + PublicKey string `dns:"base64"` + /* Nothing is left out */ +} + +func divRoundUp(a, b int) int { + return (a + b - 1) / b +} + +// KeyTag calculates the keytag (or key-id) of the DNSKEY. +func (k *DNSKEY) KeyTag() uint16 { + if k == nil { + return 0 + } + var keytag int + switch k.Algorithm { + case RSAMD5: + // Look at the bottom two bytes of the modules, which the last + // item in the pubkey. We could do this faster by looking directly + // at the base64 values. But I'm lazy. + modulus, _ := fromBase64([]byte(k.PublicKey)) + if len(modulus) > 1 { + x := binary.BigEndian.Uint16(modulus[len(modulus)-2:]) + keytag = int(x) + } + default: + keywire := new(dnskeyWireFmt) + keywire.Flags = k.Flags + keywire.Protocol = k.Protocol + keywire.Algorithm = k.Algorithm + keywire.PublicKey = k.PublicKey + wire := make([]byte, DefaultMsgSize) + n, err := packKeyWire(keywire, wire) + if err != nil { + return 0 + } + wire = wire[:n] + for i, v := range wire { + if i&1 != 0 { + keytag += int(v) // must be larger than uint32 + } else { + keytag += int(v) << 8 + } + } + keytag += (keytag >> 16) & 0xFFFF + keytag &= 0xFFFF + } + return uint16(keytag) +} + +// ToDS converts a DNSKEY record to a DS record. +func (k *DNSKEY) ToDS(h uint8) *DS { + if k == nil { + return nil + } + ds := new(DS) + ds.Hdr.Name = k.Hdr.Name + ds.Hdr.Class = k.Hdr.Class + ds.Hdr.Rrtype = TypeDS + ds.Hdr.Ttl = k.Hdr.Ttl + ds.Algorithm = k.Algorithm + ds.DigestType = h + ds.KeyTag = k.KeyTag() + + keywire := new(dnskeyWireFmt) + keywire.Flags = k.Flags + keywire.Protocol = k.Protocol + keywire.Algorithm = k.Algorithm + keywire.PublicKey = k.PublicKey + wire := make([]byte, DefaultMsgSize) + n, err := packKeyWire(keywire, wire) + if err != nil { + return nil + } + wire = wire[:n] + + owner := make([]byte, 255) + off, err1 := PackDomainName(strings.ToLower(k.Hdr.Name), owner, 0, nil, false) + if err1 != nil { + return nil + } + owner = owner[:off] + // RFC4034: + // digest = digest_algorithm( DNSKEY owner name | DNSKEY RDATA); + // "|" denotes concatenation + // DNSKEY RDATA = Flags | Protocol | Algorithm | Public Key. + + var hash crypto.Hash + switch h { + case SHA1: + hash = crypto.SHA1 + case SHA256: + hash = crypto.SHA256 + case SHA384: + hash = crypto.SHA384 + case SHA512: + hash = crypto.SHA512 + default: + return nil + } + + s := hash.New() + s.Write(owner) + s.Write(wire) + ds.Digest = hex.EncodeToString(s.Sum(nil)) + return ds +} + +// ToCDNSKEY converts a DNSKEY record to a CDNSKEY record. +func (k *DNSKEY) ToCDNSKEY() *CDNSKEY { + c := &CDNSKEY{DNSKEY: *k} + c.Hdr = *k.Hdr.copyHeader() + c.Hdr.Rrtype = TypeCDNSKEY + return c +} + +// ToCDS converts a DS record to a CDS record. +func (d *DS) ToCDS() *CDS { + c := &CDS{DS: *d} + c.Hdr = *d.Hdr.copyHeader() + c.Hdr.Rrtype = TypeCDS + return c +} + +// Sign signs an RRSet. The signature needs to be filled in with the values: +// Inception, Expiration, KeyTag, SignerName and Algorithm. The rest is copied +// from the RRset. Sign returns a non-nill error when the signing went OK. +// There is no check if RRSet is a proper (RFC 2181) RRSet. If OrigTTL is non +// zero, it is used as-is, otherwise the TTL of the RRset is used as the +// OrigTTL. +func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error { + if k == nil { + return ErrPrivKey + } + // s.Inception and s.Expiration may be 0 (rollover etc.), the rest must be set + if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { + return ErrKey + } + + rr.Hdr.Rrtype = TypeRRSIG + rr.Hdr.Name = rrset[0].Header().Name + rr.Hdr.Class = rrset[0].Header().Class + if rr.OrigTtl == 0 { // If set don't override + rr.OrigTtl = rrset[0].Header().Ttl + } + rr.TypeCovered = rrset[0].Header().Rrtype + rr.Labels = uint8(CountLabel(rrset[0].Header().Name)) + + if strings.HasPrefix(rrset[0].Header().Name, "*") { + rr.Labels-- // wildcard, remove from label count + } + + sigwire := new(rrsigWireFmt) + sigwire.TypeCovered = rr.TypeCovered + sigwire.Algorithm = rr.Algorithm + sigwire.Labels = rr.Labels + sigwire.OrigTtl = rr.OrigTtl + sigwire.Expiration = rr.Expiration + sigwire.Inception = rr.Inception + sigwire.KeyTag = rr.KeyTag + // For signing, lowercase this name + sigwire.SignerName = strings.ToLower(rr.SignerName) + + // Create the desired binary blob + signdata := make([]byte, DefaultMsgSize) + n, err := packSigWire(sigwire, signdata) + if err != nil { + return err + } + signdata = signdata[:n] + wire, err := rawSignatureData(rrset, rr) + if err != nil { + return err + } + + hash, ok := AlgorithmToHash[rr.Algorithm] + if !ok { + return ErrAlg + } + + switch rr.Algorithm { + case ED25519: + // ed25519 signs the raw message and performs hashing internally. + // All other supported signature schemes operate over the pre-hashed + // message, and thus ed25519 must be handled separately here. + // + // The raw message is passed directly into sign and crypto.Hash(0) is + // used to signal to the crypto.Signer that the data has not been hashed. + signature, err := sign(k, append(signdata, wire...), crypto.Hash(0), rr.Algorithm) + if err != nil { + return err + } + + rr.Signature = toBase64(signature) + default: + h := hash.New() + h.Write(signdata) + h.Write(wire) + + signature, err := sign(k, h.Sum(nil), hash, rr.Algorithm) + if err != nil { + return err + } + + rr.Signature = toBase64(signature) + } + + return nil +} + +func sign(k crypto.Signer, hashed []byte, hash crypto.Hash, alg uint8) ([]byte, error) { + signature, err := k.Sign(rand.Reader, hashed, hash) + if err != nil { + return nil, err + } + + switch alg { + case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512: + return signature, nil + + case ECDSAP256SHA256, ECDSAP384SHA384: + ecdsaSignature := &struct { + R, S *big.Int + }{} + if _, err := asn1.Unmarshal(signature, ecdsaSignature); err != nil { + return nil, err + } + + var intlen int + switch alg { + case ECDSAP256SHA256: + intlen = 32 + case ECDSAP384SHA384: + intlen = 48 + } + + signature := intToBytes(ecdsaSignature.R, intlen) + signature = append(signature, intToBytes(ecdsaSignature.S, intlen)...) + return signature, nil + + // There is no defined interface for what a DSA backed crypto.Signer returns + case DSA, DSANSEC3SHA1: + // t := divRoundUp(divRoundUp(p.PublicKey.Y.BitLen(), 8)-64, 8) + // signature := []byte{byte(t)} + // signature = append(signature, intToBytes(r1, 20)...) + // signature = append(signature, intToBytes(s1, 20)...) + // rr.Signature = signature + + case ED25519: + return signature, nil + } + + return nil, ErrAlg +} + +// Verify validates an RRSet with the signature and key. This is only the +// cryptographic test, the signature validity period must be checked separately. +// This function copies the rdata of some RRs (to lowercase domain names) for the validation to work. +func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error { + // First the easy checks + if !IsRRset(rrset) { + return ErrRRset + } + if rr.KeyTag != k.KeyTag() { + return ErrKey + } + if rr.Hdr.Class != k.Hdr.Class { + return ErrKey + } + if rr.Algorithm != k.Algorithm { + return ErrKey + } + if strings.ToLower(rr.SignerName) != strings.ToLower(k.Hdr.Name) { + return ErrKey + } + if k.Protocol != 3 { + return ErrKey + } + + // IsRRset checked that we have at least one RR and that the RRs in + // the set have consistent type, class, and name. Also check that type and + // class matches the RRSIG record. + if rrset[0].Header().Class != rr.Hdr.Class { + return ErrRRset + } + if rrset[0].Header().Rrtype != rr.TypeCovered { + return ErrRRset + } + + // RFC 4035 5.3.2. Reconstructing the Signed Data + // Copy the sig, except the rrsig data + sigwire := new(rrsigWireFmt) + sigwire.TypeCovered = rr.TypeCovered + sigwire.Algorithm = rr.Algorithm + sigwire.Labels = rr.Labels + sigwire.OrigTtl = rr.OrigTtl + sigwire.Expiration = rr.Expiration + sigwire.Inception = rr.Inception + sigwire.KeyTag = rr.KeyTag + sigwire.SignerName = strings.ToLower(rr.SignerName) + // Create the desired binary blob + signeddata := make([]byte, DefaultMsgSize) + n, err := packSigWire(sigwire, signeddata) + if err != nil { + return err + } + signeddata = signeddata[:n] + wire, err := rawSignatureData(rrset, rr) + if err != nil { + return err + } + + sigbuf := rr.sigBuf() // Get the binary signature data + if rr.Algorithm == PRIVATEDNS { // PRIVATEOID + // TODO(miek) + // remove the domain name and assume its ours? + } + + hash, ok := AlgorithmToHash[rr.Algorithm] + if !ok { + return ErrAlg + } + + switch rr.Algorithm { + case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512, RSAMD5: + // TODO(mg): this can be done quicker, ie. cache the pubkey data somewhere?? + pubkey := k.publicKeyRSA() // Get the key + if pubkey == nil { + return ErrKey + } + + h := hash.New() + h.Write(signeddata) + h.Write(wire) + return rsa.VerifyPKCS1v15(pubkey, hash, h.Sum(nil), sigbuf) + + case ECDSAP256SHA256, ECDSAP384SHA384: + pubkey := k.publicKeyECDSA() + if pubkey == nil { + return ErrKey + } + + // Split sigbuf into the r and s coordinates + r := new(big.Int).SetBytes(sigbuf[:len(sigbuf)/2]) + s := new(big.Int).SetBytes(sigbuf[len(sigbuf)/2:]) + + h := hash.New() + h.Write(signeddata) + h.Write(wire) + if ecdsa.Verify(pubkey, h.Sum(nil), r, s) { + return nil + } + return ErrSig + + case ED25519: + pubkey := k.publicKeyED25519() + if pubkey == nil { + return ErrKey + } + + if ed25519.Verify(pubkey, append(signeddata, wire...), sigbuf) { + return nil + } + return ErrSig + + default: + return ErrAlg + } +} + +// ValidityPeriod uses RFC1982 serial arithmetic to calculate +// if a signature period is valid. If t is the zero time, the +// current time is taken other t is. Returns true if the signature +// is valid at the given time, otherwise returns false. +func (rr *RRSIG) ValidityPeriod(t time.Time) bool { + var utc int64 + if t.IsZero() { + utc = time.Now().UTC().Unix() + } else { + utc = t.UTC().Unix() + } + modi := (int64(rr.Inception) - utc) / year68 + mode := (int64(rr.Expiration) - utc) / year68 + ti := int64(rr.Inception) + (modi * year68) + te := int64(rr.Expiration) + (mode * year68) + return ti <= utc && utc <= te +} + +// Return the signatures base64 encodedig sigdata as a byte slice. +func (rr *RRSIG) sigBuf() []byte { + sigbuf, err := fromBase64([]byte(rr.Signature)) + if err != nil { + return nil + } + return sigbuf +} + +// publicKeyRSA returns the RSA public key from a DNSKEY record. +func (k *DNSKEY) publicKeyRSA() *rsa.PublicKey { + keybuf, err := fromBase64([]byte(k.PublicKey)) + if err != nil { + return nil + } + + // RFC 2537/3110, section 2. RSA Public KEY Resource Records + // Length is in the 0th byte, unless its zero, then it + // it in bytes 1 and 2 and its a 16 bit number + explen := uint16(keybuf[0]) + keyoff := 1 + if explen == 0 { + explen = uint16(keybuf[1])<<8 | uint16(keybuf[2]) + keyoff = 3 + } + pubkey := new(rsa.PublicKey) + + pubkey.N = big.NewInt(0) + shift := uint64((explen - 1) * 8) + expo := uint64(0) + for i := int(explen - 1); i > 0; i-- { + expo += uint64(keybuf[keyoff+i]) << shift + shift -= 8 + } + // Remainder + expo += uint64(keybuf[keyoff]) + if expo > (2<<31)+1 { + // Larger expo than supported. + // println("dns: F5 primes (or larger) are not supported") + return nil + } + pubkey.E = int(expo) + + pubkey.N.SetBytes(keybuf[keyoff+int(explen):]) + return pubkey +} + +// publicKeyECDSA returns the Curve public key from the DNSKEY record. +func (k *DNSKEY) publicKeyECDSA() *ecdsa.PublicKey { + keybuf, err := fromBase64([]byte(k.PublicKey)) + if err != nil { + return nil + } + pubkey := new(ecdsa.PublicKey) + switch k.Algorithm { + case ECDSAP256SHA256: + pubkey.Curve = elliptic.P256() + if len(keybuf) != 64 { + // wrongly encoded key + return nil + } + case ECDSAP384SHA384: + pubkey.Curve = elliptic.P384() + if len(keybuf) != 96 { + // Wrongly encoded key + return nil + } + } + pubkey.X = big.NewInt(0) + pubkey.X.SetBytes(keybuf[:len(keybuf)/2]) + pubkey.Y = big.NewInt(0) + pubkey.Y.SetBytes(keybuf[len(keybuf)/2:]) + return pubkey +} + +func (k *DNSKEY) publicKeyDSA() *dsa.PublicKey { + keybuf, err := fromBase64([]byte(k.PublicKey)) + if err != nil { + return nil + } + if len(keybuf) < 22 { + return nil + } + t, keybuf := int(keybuf[0]), keybuf[1:] + size := 64 + t*8 + q, keybuf := keybuf[:20], keybuf[20:] + if len(keybuf) != 3*size { + return nil + } + p, keybuf := keybuf[:size], keybuf[size:] + g, y := keybuf[:size], keybuf[size:] + pubkey := new(dsa.PublicKey) + pubkey.Parameters.Q = big.NewInt(0).SetBytes(q) + pubkey.Parameters.P = big.NewInt(0).SetBytes(p) + pubkey.Parameters.G = big.NewInt(0).SetBytes(g) + pubkey.Y = big.NewInt(0).SetBytes(y) + return pubkey +} + +func (k *DNSKEY) publicKeyED25519() ed25519.PublicKey { + keybuf, err := fromBase64([]byte(k.PublicKey)) + if err != nil { + return nil + } + if len(keybuf) != ed25519.PublicKeySize { + return nil + } + return keybuf +} + +type wireSlice [][]byte + +func (p wireSlice) Len() int { return len(p) } +func (p wireSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p wireSlice) Less(i, j int) bool { + _, ioff, _ := UnpackDomainName(p[i], 0) + _, joff, _ := UnpackDomainName(p[j], 0) + return bytes.Compare(p[i][ioff+10:], p[j][joff+10:]) < 0 +} + +// Return the raw signature data. +func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) { + wires := make(wireSlice, len(rrset)) + for i, r := range rrset { + r1 := r.copy() + r1.Header().Ttl = s.OrigTtl + labels := SplitDomainName(r1.Header().Name) + // 6.2. Canonical RR Form. (4) - wildcards + if len(labels) > int(s.Labels) { + // Wildcard + r1.Header().Name = "*." + strings.Join(labels[len(labels)-int(s.Labels):], ".") + "." + } + // RFC 4034: 6.2. Canonical RR Form. (2) - domain name to lowercase + r1.Header().Name = strings.ToLower(r1.Header().Name) + // 6.2. Canonical RR Form. (3) - domain rdata to lowercase. + // NS, MD, MF, CNAME, SOA, MB, MG, MR, PTR, + // HINFO, MINFO, MX, RP, AFSDB, RT, SIG, PX, NXT, NAPTR, KX, + // SRV, DNAME, A6 + // + // RFC 6840 - Clarifications and Implementation Notes for DNS Security (DNSSEC): + // Section 6.2 of [RFC4034] also erroneously lists HINFO as a record + // that needs conversion to lowercase, and twice at that. Since HINFO + // records contain no domain names, they are not subject to case + // conversion. + switch x := r1.(type) { + case *NS: + x.Ns = strings.ToLower(x.Ns) + case *MD: + x.Md = strings.ToLower(x.Md) + case *MF: + x.Mf = strings.ToLower(x.Mf) + case *CNAME: + x.Target = strings.ToLower(x.Target) + case *SOA: + x.Ns = strings.ToLower(x.Ns) + x.Mbox = strings.ToLower(x.Mbox) + case *MB: + x.Mb = strings.ToLower(x.Mb) + case *MG: + x.Mg = strings.ToLower(x.Mg) + case *MR: + x.Mr = strings.ToLower(x.Mr) + case *PTR: + x.Ptr = strings.ToLower(x.Ptr) + case *MINFO: + x.Rmail = strings.ToLower(x.Rmail) + x.Email = strings.ToLower(x.Email) + case *MX: + x.Mx = strings.ToLower(x.Mx) + case *RP: + x.Mbox = strings.ToLower(x.Mbox) + x.Txt = strings.ToLower(x.Txt) + case *AFSDB: + x.Hostname = strings.ToLower(x.Hostname) + case *RT: + x.Host = strings.ToLower(x.Host) + case *SIG: + x.SignerName = strings.ToLower(x.SignerName) + case *PX: + x.Map822 = strings.ToLower(x.Map822) + x.Mapx400 = strings.ToLower(x.Mapx400) + case *NAPTR: + x.Replacement = strings.ToLower(x.Replacement) + case *KX: + x.Exchanger = strings.ToLower(x.Exchanger) + case *SRV: + x.Target = strings.ToLower(x.Target) + case *DNAME: + x.Target = strings.ToLower(x.Target) + } + // 6.2. Canonical RR Form. (5) - origTTL + wire := make([]byte, r1.len()+1) // +1 to be safe(r) + off, err1 := PackRR(r1, wire, 0, nil, false) + if err1 != nil { + return nil, err1 + } + wire = wire[:off] + wires[i] = wire + } + sort.Sort(wires) + for i, wire := range wires { + if i > 0 && bytes.Equal(wire, wires[i-1]) { + continue + } + buf = append(buf, wire...) + } + return buf, nil +} + +func packSigWire(sw *rrsigWireFmt, msg []byte) (int, error) { + // copied from zmsg.go RRSIG packing + off, err := packUint16(sw.TypeCovered, msg, 0) + if err != nil { + return off, err + } + off, err = packUint8(sw.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(sw.Labels, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(sw.OrigTtl, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(sw.Expiration, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(sw.Inception, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(sw.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(sw.SignerName, msg, off, nil, false) + if err != nil { + return off, err + } + return off, nil +} + +func packKeyWire(dw *dnskeyWireFmt, msg []byte) (int, error) { + // copied from zmsg.go DNSKEY packing + off, err := packUint16(dw.Flags, msg, 0) + if err != nil { + return off, err + } + off, err = packUint8(dw.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(dw.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(dw.PublicKey, msg, off) + if err != nil { + return off, err + } + return off, nil +} diff --git a/vendor/github.com/miekg/dns/dnssec_keygen.go b/vendor/github.com/miekg/dns/dnssec_keygen.go new file mode 100644 index 0000000000..33e913ac52 --- /dev/null +++ b/vendor/github.com/miekg/dns/dnssec_keygen.go @@ -0,0 +1,178 @@ +package dns + +import ( + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "math/big" + + "golang.org/x/crypto/ed25519" +) + +// Generate generates a DNSKEY of the given bit size. +// The public part is put inside the DNSKEY record. +// The Algorithm in the key must be set as this will define +// what kind of DNSKEY will be generated. +// The ECDSA algorithms imply a fixed keysize, in that case +// bits should be set to the size of the algorithm. +func (k *DNSKEY) Generate(bits int) (crypto.PrivateKey, error) { + switch k.Algorithm { + case DSA, DSANSEC3SHA1: + if bits != 1024 { + return nil, ErrKeySize + } + case RSAMD5, RSASHA1, RSASHA256, RSASHA1NSEC3SHA1: + if bits < 512 || bits > 4096 { + return nil, ErrKeySize + } + case RSASHA512: + if bits < 1024 || bits > 4096 { + return nil, ErrKeySize + } + case ECDSAP256SHA256: + if bits != 256 { + return nil, ErrKeySize + } + case ECDSAP384SHA384: + if bits != 384 { + return nil, ErrKeySize + } + case ED25519: + if bits != 256 { + return nil, ErrKeySize + } + } + + switch k.Algorithm { + case DSA, DSANSEC3SHA1: + params := new(dsa.Parameters) + if err := dsa.GenerateParameters(params, rand.Reader, dsa.L1024N160); err != nil { + return nil, err + } + priv := new(dsa.PrivateKey) + priv.PublicKey.Parameters = *params + err := dsa.GenerateKey(priv, rand.Reader) + if err != nil { + return nil, err + } + k.setPublicKeyDSA(params.Q, params.P, params.G, priv.PublicKey.Y) + return priv, nil + case RSAMD5, RSASHA1, RSASHA256, RSASHA512, RSASHA1NSEC3SHA1: + priv, err := rsa.GenerateKey(rand.Reader, bits) + if err != nil { + return nil, err + } + k.setPublicKeyRSA(priv.PublicKey.E, priv.PublicKey.N) + return priv, nil + case ECDSAP256SHA256, ECDSAP384SHA384: + var c elliptic.Curve + switch k.Algorithm { + case ECDSAP256SHA256: + c = elliptic.P256() + case ECDSAP384SHA384: + c = elliptic.P384() + } + priv, err := ecdsa.GenerateKey(c, rand.Reader) + if err != nil { + return nil, err + } + k.setPublicKeyECDSA(priv.PublicKey.X, priv.PublicKey.Y) + return priv, nil + case ED25519: + pub, priv, err := ed25519.GenerateKey(rand.Reader) + if err != nil { + return nil, err + } + k.setPublicKeyED25519(pub) + return priv, nil + default: + return nil, ErrAlg + } +} + +// Set the public key (the value E and N) +func (k *DNSKEY) setPublicKeyRSA(_E int, _N *big.Int) bool { + if _E == 0 || _N == nil { + return false + } + buf := exponentToBuf(_E) + buf = append(buf, _N.Bytes()...) + k.PublicKey = toBase64(buf) + return true +} + +// Set the public key for Elliptic Curves +func (k *DNSKEY) setPublicKeyECDSA(_X, _Y *big.Int) bool { + if _X == nil || _Y == nil { + return false + } + var intlen int + switch k.Algorithm { + case ECDSAP256SHA256: + intlen = 32 + case ECDSAP384SHA384: + intlen = 48 + } + k.PublicKey = toBase64(curveToBuf(_X, _Y, intlen)) + return true +} + +// Set the public key for DSA +func (k *DNSKEY) setPublicKeyDSA(_Q, _P, _G, _Y *big.Int) bool { + if _Q == nil || _P == nil || _G == nil || _Y == nil { + return false + } + buf := dsaToBuf(_Q, _P, _G, _Y) + k.PublicKey = toBase64(buf) + return true +} + +// Set the public key for Ed25519 +func (k *DNSKEY) setPublicKeyED25519(_K ed25519.PublicKey) bool { + if _K == nil { + return false + } + k.PublicKey = toBase64(_K) + return true +} + +// Set the public key (the values E and N) for RSA +// RFC 3110: Section 2. RSA Public KEY Resource Records +func exponentToBuf(_E int) []byte { + var buf []byte + i := big.NewInt(int64(_E)).Bytes() + if len(i) < 256 { + buf = make([]byte, 1, 1+len(i)) + buf[0] = uint8(len(i)) + } else { + buf = make([]byte, 3, 3+len(i)) + buf[0] = 0 + buf[1] = uint8(len(i) >> 8) + buf[2] = uint8(len(i)) + } + buf = append(buf, i...) + return buf +} + +// Set the public key for X and Y for Curve. The two +// values are just concatenated. +func curveToBuf(_X, _Y *big.Int, intlen int) []byte { + buf := intToBytes(_X, intlen) + buf = append(buf, intToBytes(_Y, intlen)...) + return buf +} + +// Set the public key for X and Y for Curve. The two +// values are just concatenated. +func dsaToBuf(_Q, _P, _G, _Y *big.Int) []byte { + t := divRoundUp(divRoundUp(_G.BitLen(), 8)-64, 8) + buf := []byte{byte(t)} + buf = append(buf, intToBytes(_Q, 20)...) + buf = append(buf, intToBytes(_P, 64+t*8)...) + buf = append(buf, intToBytes(_G, 64+t*8)...) + buf = append(buf, intToBytes(_Y, 64+t*8)...) + return buf +} diff --git a/vendor/github.com/miekg/dns/dnssec_keyscan.go b/vendor/github.com/miekg/dns/dnssec_keyscan.go new file mode 100644 index 0000000000..e2d9d8f924 --- /dev/null +++ b/vendor/github.com/miekg/dns/dnssec_keyscan.go @@ -0,0 +1,297 @@ +package dns + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/rsa" + "io" + "math/big" + "strconv" + "strings" + + "golang.org/x/crypto/ed25519" +) + +// NewPrivateKey returns a PrivateKey by parsing the string s. +// s should be in the same form of the BIND private key files. +func (k *DNSKEY) NewPrivateKey(s string) (crypto.PrivateKey, error) { + if s == "" || s[len(s)-1] != '\n' { // We need a closing newline + return k.ReadPrivateKey(strings.NewReader(s+"\n"), "") + } + return k.ReadPrivateKey(strings.NewReader(s), "") +} + +// ReadPrivateKey reads a private key from the io.Reader q. The string file is +// only used in error reporting. +// The public key must be known, because some cryptographic algorithms embed +// the public inside the privatekey. +func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, error) { + m, err := parseKey(q, file) + if m == nil { + return nil, err + } + if _, ok := m["private-key-format"]; !ok { + return nil, ErrPrivKey + } + if m["private-key-format"] != "v1.2" && m["private-key-format"] != "v1.3" { + return nil, ErrPrivKey + } + // TODO(mg): check if the pubkey matches the private key + algo, err := strconv.ParseUint(strings.SplitN(m["algorithm"], " ", 2)[0], 10, 8) + if err != nil { + return nil, ErrPrivKey + } + switch uint8(algo) { + case DSA: + priv, err := readPrivateKeyDSA(m) + if err != nil { + return nil, err + } + pub := k.publicKeyDSA() + if pub == nil { + return nil, ErrKey + } + priv.PublicKey = *pub + return priv, nil + case RSAMD5: + fallthrough + case RSASHA1: + fallthrough + case RSASHA1NSEC3SHA1: + fallthrough + case RSASHA256: + fallthrough + case RSASHA512: + priv, err := readPrivateKeyRSA(m) + if err != nil { + return nil, err + } + pub := k.publicKeyRSA() + if pub == nil { + return nil, ErrKey + } + priv.PublicKey = *pub + return priv, nil + case ECCGOST: + return nil, ErrPrivKey + case ECDSAP256SHA256: + fallthrough + case ECDSAP384SHA384: + priv, err := readPrivateKeyECDSA(m) + if err != nil { + return nil, err + } + pub := k.publicKeyECDSA() + if pub == nil { + return nil, ErrKey + } + priv.PublicKey = *pub + return priv, nil + case ED25519: + return readPrivateKeyED25519(m) + default: + return nil, ErrPrivKey + } +} + +// Read a private key (file) string and create a public key. Return the private key. +func readPrivateKeyRSA(m map[string]string) (*rsa.PrivateKey, error) { + p := new(rsa.PrivateKey) + p.Primes = []*big.Int{nil, nil} + for k, v := range m { + switch k { + case "modulus", "publicexponent", "privateexponent", "prime1", "prime2": + v1, err := fromBase64([]byte(v)) + if err != nil { + return nil, err + } + switch k { + case "modulus": + p.PublicKey.N = big.NewInt(0) + p.PublicKey.N.SetBytes(v1) + case "publicexponent": + i := big.NewInt(0) + i.SetBytes(v1) + p.PublicKey.E = int(i.Int64()) // int64 should be large enough + case "privateexponent": + p.D = big.NewInt(0) + p.D.SetBytes(v1) + case "prime1": + p.Primes[0] = big.NewInt(0) + p.Primes[0].SetBytes(v1) + case "prime2": + p.Primes[1] = big.NewInt(0) + p.Primes[1].SetBytes(v1) + } + case "exponent1", "exponent2", "coefficient": + // not used in Go (yet) + case "created", "publish", "activate": + // not used in Go (yet) + } + } + return p, nil +} + +func readPrivateKeyDSA(m map[string]string) (*dsa.PrivateKey, error) { + p := new(dsa.PrivateKey) + p.X = big.NewInt(0) + for k, v := range m { + switch k { + case "private_value(x)": + v1, err := fromBase64([]byte(v)) + if err != nil { + return nil, err + } + p.X.SetBytes(v1) + case "created", "publish", "activate": + /* not used in Go (yet) */ + } + } + return p, nil +} + +func readPrivateKeyECDSA(m map[string]string) (*ecdsa.PrivateKey, error) { + p := new(ecdsa.PrivateKey) + p.D = big.NewInt(0) + // TODO: validate that the required flags are present + for k, v := range m { + switch k { + case "privatekey": + v1, err := fromBase64([]byte(v)) + if err != nil { + return nil, err + } + p.D.SetBytes(v1) + case "created", "publish", "activate": + /* not used in Go (yet) */ + } + } + return p, nil +} + +func readPrivateKeyED25519(m map[string]string) (ed25519.PrivateKey, error) { + var p ed25519.PrivateKey + // TODO: validate that the required flags are present + for k, v := range m { + switch k { + case "privatekey": + p1, err := fromBase64([]byte(v)) + if err != nil { + return nil, err + } + if len(p1) != 32 { + return nil, ErrPrivKey + } + // RFC 8080 and Golang's x/crypto/ed25519 differ as to how the + // private keys are represented. RFC 8080 specifies that private + // keys be stored solely as the seed value (p1 above) while the + // ed25519 package represents them as the seed value concatenated + // to the public key, which is derived from the seed value. + // + // ed25519.GenerateKey reads exactly 32 bytes from the passed in + // io.Reader and uses them as the seed. It also derives the + // public key and produces a compatible private key. + _, p, err = ed25519.GenerateKey(bytes.NewReader(p1)) + if err != nil { + return nil, err + } + case "created", "publish", "activate": + /* not used in Go (yet) */ + } + } + return p, nil +} + +// parseKey reads a private key from r. It returns a map[string]string, +// with the key-value pairs, or an error when the file is not correct. +func parseKey(r io.Reader, file string) (map[string]string, error) { + s, cancel := scanInit(r) + m := make(map[string]string) + c := make(chan lex) + k := "" + defer func() { + cancel() + // zlexer can send up to two tokens, the next one and possibly 1 remainders. + // Do a non-blocking read. + _, ok := <-c + _, ok = <-c + if !ok { + // too bad + } + }() + // Start the lexer + go klexer(s, c) + for l := range c { + // It should alternate + switch l.value { + case zKey: + k = l.token + case zValue: + if k == "" { + return nil, &ParseError{file, "no private key seen", l} + } + //println("Setting", strings.ToLower(k), "to", l.token, "b") + m[strings.ToLower(k)] = l.token + k = "" + } + } + return m, nil +} + +// klexer scans the sourcefile and returns tokens on the channel c. +func klexer(s *scan, c chan lex) { + var l lex + str := "" // Hold the current read text + commt := false + key := true + x, err := s.tokenText() + defer close(c) + for err == nil { + l.column = s.position.Column + l.line = s.position.Line + switch x { + case ':': + if commt { + break + } + l.token = str + if key { + l.value = zKey + c <- l + // Next token is a space, eat it + s.tokenText() + key = false + str = "" + } else { + l.value = zValue + } + case ';': + commt = true + case '\n': + if commt { + // Reset a comment + commt = false + } + l.value = zValue + l.token = str + c <- l + str = "" + commt = false + key = true + default: + if commt { + break + } + str += string(x) + } + x, err = s.tokenText() + } + if len(str) > 0 { + // Send remainder + l.token = str + l.value = zValue + c <- l + } +} diff --git a/vendor/github.com/miekg/dns/dnssec_privkey.go b/vendor/github.com/miekg/dns/dnssec_privkey.go new file mode 100644 index 0000000000..46f3215c8f --- /dev/null +++ b/vendor/github.com/miekg/dns/dnssec_privkey.go @@ -0,0 +1,93 @@ +package dns + +import ( + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/rsa" + "math/big" + "strconv" + + "golang.org/x/crypto/ed25519" +) + +const format = "Private-key-format: v1.3\n" + +// PrivateKeyString converts a PrivateKey to a string. This string has the same +// format as the private-key-file of BIND9 (Private-key-format: v1.3). +// It needs some info from the key (the algorithm), so its a method of the DNSKEY +// It supports rsa.PrivateKey, ecdsa.PrivateKey and dsa.PrivateKey +func (r *DNSKEY) PrivateKeyString(p crypto.PrivateKey) string { + algorithm := strconv.Itoa(int(r.Algorithm)) + algorithm += " (" + AlgorithmToString[r.Algorithm] + ")" + + switch p := p.(type) { + case *rsa.PrivateKey: + modulus := toBase64(p.PublicKey.N.Bytes()) + e := big.NewInt(int64(p.PublicKey.E)) + publicExponent := toBase64(e.Bytes()) + privateExponent := toBase64(p.D.Bytes()) + prime1 := toBase64(p.Primes[0].Bytes()) + prime2 := toBase64(p.Primes[1].Bytes()) + // Calculate Exponent1/2 and Coefficient as per: http://en.wikipedia.org/wiki/RSA#Using_the_Chinese_remainder_algorithm + // and from: http://code.google.com/p/go/issues/detail?id=987 + one := big.NewInt(1) + p1 := big.NewInt(0).Sub(p.Primes[0], one) + q1 := big.NewInt(0).Sub(p.Primes[1], one) + exp1 := big.NewInt(0).Mod(p.D, p1) + exp2 := big.NewInt(0).Mod(p.D, q1) + coeff := big.NewInt(0).ModInverse(p.Primes[1], p.Primes[0]) + + exponent1 := toBase64(exp1.Bytes()) + exponent2 := toBase64(exp2.Bytes()) + coefficient := toBase64(coeff.Bytes()) + + return format + + "Algorithm: " + algorithm + "\n" + + "Modulus: " + modulus + "\n" + + "PublicExponent: " + publicExponent + "\n" + + "PrivateExponent: " + privateExponent + "\n" + + "Prime1: " + prime1 + "\n" + + "Prime2: " + prime2 + "\n" + + "Exponent1: " + exponent1 + "\n" + + "Exponent2: " + exponent2 + "\n" + + "Coefficient: " + coefficient + "\n" + + case *ecdsa.PrivateKey: + var intlen int + switch r.Algorithm { + case ECDSAP256SHA256: + intlen = 32 + case ECDSAP384SHA384: + intlen = 48 + } + private := toBase64(intToBytes(p.D, intlen)) + return format + + "Algorithm: " + algorithm + "\n" + + "PrivateKey: " + private + "\n" + + case *dsa.PrivateKey: + T := divRoundUp(divRoundUp(p.PublicKey.Parameters.G.BitLen(), 8)-64, 8) + prime := toBase64(intToBytes(p.PublicKey.Parameters.P, 64+T*8)) + subprime := toBase64(intToBytes(p.PublicKey.Parameters.Q, 20)) + base := toBase64(intToBytes(p.PublicKey.Parameters.G, 64+T*8)) + priv := toBase64(intToBytes(p.X, 20)) + pub := toBase64(intToBytes(p.PublicKey.Y, 64+T*8)) + return format + + "Algorithm: " + algorithm + "\n" + + "Prime(p): " + prime + "\n" + + "Subprime(q): " + subprime + "\n" + + "Base(g): " + base + "\n" + + "Private_value(x): " + priv + "\n" + + "Public_value(y): " + pub + "\n" + + case ed25519.PrivateKey: + private := toBase64(p[:32]) + return format + + "Algorithm: " + algorithm + "\n" + + "PrivateKey: " + private + "\n" + + default: + return "" + } +} diff --git a/vendor/github.com/miekg/dns/doc.go b/vendor/github.com/miekg/dns/doc.go new file mode 100644 index 0000000000..1d8114744f --- /dev/null +++ b/vendor/github.com/miekg/dns/doc.go @@ -0,0 +1,272 @@ +/* +Package dns implements a full featured interface to the Domain Name System. +Server- and client-side programming is supported. +The package allows complete control over what is sent out to the DNS. The package +API follows the less-is-more principle, by presenting a small, clean interface. + +The package dns supports (asynchronous) querying/replying, incoming/outgoing zone transfers, +TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation/signing. +Note that domain names MUST be fully qualified, before sending them, unqualified +names in a message will result in a packing failure. + +Resource records are native types. They are not stored in wire format. +Basic usage pattern for creating a new resource record: + + r := new(dns.MX) + r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, + Class: dns.ClassINET, Ttl: 3600} + r.Preference = 10 + r.Mx = "mx.miek.nl." + +Or directly from a string: + + mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.") + +Or when the default origin (.) and TTL (3600) and class (IN) suit you: + + mx, err := dns.NewRR("miek.nl MX 10 mx.miek.nl") + +Or even: + + mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek") + +In the DNS messages are exchanged, these messages contain resource +records (sets). Use pattern for creating a message: + + m := new(dns.Msg) + m.SetQuestion("miek.nl.", dns.TypeMX) + +Or when not certain if the domain name is fully qualified: + + m.SetQuestion(dns.Fqdn("miek.nl"), dns.TypeMX) + +The message m is now a message with the question section set to ask +the MX records for the miek.nl. zone. + +The following is slightly more verbose, but more flexible: + + m1 := new(dns.Msg) + m1.Id = dns.Id() + m1.RecursionDesired = true + m1.Question = make([]dns.Question, 1) + m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET} + +After creating a message it can be sent. +Basic use pattern for synchronous querying the DNS at a +server configured on 127.0.0.1 and port 53: + + c := new(dns.Client) + in, rtt, err := c.Exchange(m1, "127.0.0.1:53") + +Suppressing multiple outstanding queries (with the same question, type and +class) is as easy as setting: + + c.SingleInflight = true + +More advanced options are available using a net.Dialer and the corresponding API. +For example it is possible to set a timeout, or to specify a source IP address +and port to use for the connection: + + c := new(dns.Client) + laddr := net.UDPAddr{ + IP: net.ParseIP("[::1]"), + Port: 12345, + Zone: "", + } + d := net.Dialer{ + Timeout: 200 * time.Millisecond, + LocalAddr: &laddr, + } + in, rtt, err := c.ExchangeWithDialer(&d, m1, "8.8.8.8:53") + +If these "advanced" features are not needed, a simple UDP query can be sent, +with: + + in, err := dns.Exchange(m1, "127.0.0.1:53") + +When this functions returns you will get dns message. A dns message consists +out of four sections. +The question section: in.Question, the answer section: in.Answer, +the authority section: in.Ns and the additional section: in.Extra. + +Each of these sections (except the Question section) contain a []RR. Basic +use pattern for accessing the rdata of a TXT RR as the first RR in +the Answer section: + + if t, ok := in.Answer[0].(*dns.TXT); ok { + // do something with t.Txt + } + +Domain Name and TXT Character String Representations + +Both domain names and TXT character strings are converted to presentation +form both when unpacked and when converted to strings. + +For TXT character strings, tabs, carriage returns and line feeds will be +converted to \t, \r and \n respectively. Back slashes and quotations marks +will be escaped. Bytes below 32 and above 127 will be converted to \DDD +form. + +For domain names, in addition to the above rules brackets, periods, +spaces, semicolons and the at symbol are escaped. + +DNSSEC + +DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It +uses public key cryptography to sign resource records. The +public keys are stored in DNSKEY records and the signatures in RRSIG records. + +Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK) bit +to a request. + + m := new(dns.Msg) + m.SetEdns0(4096, true) + +Signature generation, signature verification and key generation are all supported. + +DYNAMIC UPDATES + +Dynamic updates reuses the DNS message format, but renames three of +the sections. Question is Zone, Answer is Prerequisite, Authority is +Update, only the Additional is not renamed. See RFC 2136 for the gory details. + +You can set a rather complex set of rules for the existence of absence of +certain resource records or names in a zone to specify if resource records +should be added or removed. The table from RFC 2136 supplemented with the Go +DNS function shows which functions exist to specify the prerequisites. + + 3.2.4 - Table Of Metavalues Used In Prerequisite Section + + CLASS TYPE RDATA Meaning Function + -------------------------------------------------------------- + ANY ANY empty Name is in use dns.NameUsed + ANY rrset empty RRset exists (value indep) dns.RRsetUsed + NONE ANY empty Name is not in use dns.NameNotUsed + NONE rrset empty RRset does not exist dns.RRsetNotUsed + zone rrset rr RRset exists (value dep) dns.Used + +The prerequisite section can also be left empty. +If you have decided on the prerequisites you can tell what RRs should +be added or deleted. The next table shows the options you have and +what functions to call. + + 3.4.2.6 - Table Of Metavalues Used In Update Section + + CLASS TYPE RDATA Meaning Function + --------------------------------------------------------------- + ANY ANY empty Delete all RRsets from name dns.RemoveName + ANY rrset empty Delete an RRset dns.RemoveRRset + NONE rrset rr Delete an RR from RRset dns.Remove + zone rrset rr Add to an RRset dns.Insert + +TRANSACTION SIGNATURE + +An TSIG or transaction signature adds a HMAC TSIG record to each message sent. +The supported algorithms include: HmacMD5, HmacSHA1, HmacSHA256 and HmacSHA512. + +Basic use pattern when querying with a TSIG name "axfr." (note that these key names +must be fully qualified - as they are domain names) and the base64 secret +"so6ZGir4GPAqINNh9U5c3A==": + +If an incoming message contains a TSIG record it MUST be the last record in +the additional section (RFC2845 3.2). This means that you should make the +call to SetTsig last, right before executing the query. If you make any +changes to the RRset after calling SetTsig() the signature will be incorrect. + + c := new(dns.Client) + c.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} + m := new(dns.Msg) + m.SetQuestion("miek.nl.", dns.TypeMX) + m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) + ... + // When sending the TSIG RR is calculated and filled in before sending + +When requesting an zone transfer (almost all TSIG usage is when requesting zone transfers), with +TSIG, this is the basic use pattern. In this example we request an AXFR for +miek.nl. with TSIG key named "axfr." and secret "so6ZGir4GPAqINNh9U5c3A==" +and using the server 176.58.119.54: + + t := new(dns.Transfer) + m := new(dns.Msg) + t.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} + m.SetAxfr("miek.nl.") + m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) + c, err := t.In(m, "176.58.119.54:53") + for r := range c { ... } + +You can now read the records from the transfer as they come in. Each envelope is checked with TSIG. +If something is not correct an error is returned. + +Basic use pattern validating and replying to a message that has TSIG set. + + server := &dns.Server{Addr: ":53", Net: "udp"} + server.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} + go server.ListenAndServe() + dns.HandleFunc(".", handleRequest) + + func handleRequest(w dns.ResponseWriter, r *dns.Msg) { + m := new(dns.Msg) + m.SetReply(r) + if r.IsTsig() != nil { + if w.TsigStatus() == nil { + // *Msg r has an TSIG record and it was validated + m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) + } else { + // *Msg r has an TSIG records and it was not valided + } + } + w.WriteMsg(m) + } + +PRIVATE RRS + +RFC 6895 sets aside a range of type codes for private use. This range +is 65,280 - 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these +can be used, before requesting an official type code from IANA. + +see http://miek.nl/2014/September/21/idn-and-private-rr-in-go-dns/ for more +information. + +EDNS0 + +EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated +by RFC 6891. It defines an new RR type, the OPT RR, which is then completely +abused. +Basic use pattern for creating an (empty) OPT RR: + + o := new(dns.OPT) + o.Hdr.Name = "." // MUST be the root zone, per definition. + o.Hdr.Rrtype = dns.TypeOPT + +The rdata of an OPT RR consists out of a slice of EDNS0 (RFC 6891) +interfaces. Currently only a few have been standardized: EDNS0_NSID +(RFC 5001) and EDNS0_SUBNET (draft-vandergaast-edns-client-subnet-02). Note +that these options may be combined in an OPT RR. +Basic use pattern for a server to check if (and which) options are set: + + // o is a dns.OPT + for _, s := range o.Option { + switch e := s.(type) { + case *dns.EDNS0_NSID: + // do stuff with e.Nsid + case *dns.EDNS0_SUBNET: + // access e.Family, e.Address, etc. + } + } + +SIG(0) + +From RFC 2931: + + SIG(0) provides protection for DNS transactions and requests .... + ... protection for glue records, DNS requests, protection for message headers + on requests and responses, and protection of the overall integrity of a response. + +It works like TSIG, except that SIG(0) uses public key cryptography, instead of the shared +secret approach in TSIG. +Supported algorithms: DSA, ECDSAP256SHA256, ECDSAP384SHA384, RSASHA1, RSASHA256 and +RSASHA512. + +Signing subsequent messages in multi-message sessions is not implemented. +*/ +package dns diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go new file mode 100644 index 0000000000..6f9d2ea393 --- /dev/null +++ b/vendor/github.com/miekg/dns/edns.go @@ -0,0 +1,627 @@ +package dns + +import ( + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "net" + "strconv" +) + +// EDNS0 Option codes. +const ( + EDNS0LLQ = 0x1 // long lived queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01 + EDNS0UL = 0x2 // update lease draft: http://files.dns-sd.org/draft-sekar-dns-ul.txt + EDNS0NSID = 0x3 // nsid (See RFC 5001) + EDNS0DAU = 0x5 // DNSSEC Algorithm Understood + EDNS0DHU = 0x6 // DS Hash Understood + EDNS0N3U = 0x7 // NSEC3 Hash Understood + EDNS0SUBNET = 0x8 // client-subnet (See RFC 7871) + EDNS0EXPIRE = 0x9 // EDNS0 expire + EDNS0COOKIE = 0xa // EDNS0 Cookie + EDNS0TCPKEEPALIVE = 0xb // EDNS0 tcp keep alive (See RFC 7828) + EDNS0PADDING = 0xc // EDNS0 padding (See RFC 7830) + EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (See RFC 6891) + EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (See RFC 6891) + _DO = 1 << 15 // DNSSEC OK +) + +// OPT is the EDNS0 RR appended to messages to convey extra (meta) information. +// See RFC 6891. +type OPT struct { + Hdr RR_Header + Option []EDNS0 `dns:"opt"` +} + +func (rr *OPT) String() string { + s := "\n;; OPT PSEUDOSECTION:\n; EDNS: version " + strconv.Itoa(int(rr.Version())) + "; " + if rr.Do() { + s += "flags: do; " + } else { + s += "flags: ; " + } + s += "udp: " + strconv.Itoa(int(rr.UDPSize())) + + for _, o := range rr.Option { + switch o.(type) { + case *EDNS0_NSID: + s += "\n; NSID: " + o.String() + h, e := o.pack() + var r string + if e == nil { + for _, c := range h { + r += "(" + string(c) + ")" + } + s += " " + r + } + case *EDNS0_SUBNET: + s += "\n; SUBNET: " + o.String() + case *EDNS0_COOKIE: + s += "\n; COOKIE: " + o.String() + case *EDNS0_UL: + s += "\n; UPDATE LEASE: " + o.String() + case *EDNS0_LLQ: + s += "\n; LONG LIVED QUERIES: " + o.String() + case *EDNS0_DAU: + s += "\n; DNSSEC ALGORITHM UNDERSTOOD: " + o.String() + case *EDNS0_DHU: + s += "\n; DS HASH UNDERSTOOD: " + o.String() + case *EDNS0_N3U: + s += "\n; NSEC3 HASH UNDERSTOOD: " + o.String() + case *EDNS0_LOCAL: + s += "\n; LOCAL OPT: " + o.String() + case *EDNS0_PADDING: + s += "\n; PADDING: " + o.String() + } + } + return s +} + +func (rr *OPT) len() int { + l := rr.Hdr.len() + for i := 0; i < len(rr.Option); i++ { + l += 4 // Account for 2-byte option code and 2-byte option length. + lo, _ := rr.Option[i].pack() + l += len(lo) + } + return l +} + +// return the old value -> delete SetVersion? + +// Version returns the EDNS version used. Only zero is defined. +func (rr *OPT) Version() uint8 { + return uint8((rr.Hdr.Ttl & 0x00FF0000) >> 16) +} + +// SetVersion sets the version of EDNS. This is usually zero. +func (rr *OPT) SetVersion(v uint8) { + rr.Hdr.Ttl = rr.Hdr.Ttl&0xFF00FFFF | (uint32(v) << 16) +} + +// ExtendedRcode returns the EDNS extended RCODE field (the upper 8 bits of the TTL). +func (rr *OPT) ExtendedRcode() int { + return int((rr.Hdr.Ttl & 0xFF000000) >> 24) +} + +// SetExtendedRcode sets the EDNS extended RCODE field. +func (rr *OPT) SetExtendedRcode(v uint8) { + rr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | (uint32(v) << 24) +} + +// UDPSize returns the UDP buffer size. +func (rr *OPT) UDPSize() uint16 { + return rr.Hdr.Class +} + +// SetUDPSize sets the UDP buffer size. +func (rr *OPT) SetUDPSize(size uint16) { + rr.Hdr.Class = size +} + +// Do returns the value of the DO (DNSSEC OK) bit. +func (rr *OPT) Do() bool { + return rr.Hdr.Ttl&_DO == _DO +} + +// SetDo sets the DO (DNSSEC OK) bit. +// If we pass an argument, set the DO bit to that value. +// It is possible to pass 2 or more arguments. Any arguments after the 1st is silently ignored. +func (rr *OPT) SetDo(do ...bool) { + if len(do) == 1 { + if do[0] { + rr.Hdr.Ttl |= _DO + } else { + rr.Hdr.Ttl &^= _DO + } + } else { + rr.Hdr.Ttl |= _DO + } +} + +// EDNS0 defines an EDNS0 Option. An OPT RR can have multiple options appended to it. +type EDNS0 interface { + // Option returns the option code for the option. + Option() uint16 + // pack returns the bytes of the option data. + pack() ([]byte, error) + // unpack sets the data as found in the buffer. Is also sets + // the length of the slice as the length of the option data. + unpack([]byte) error + // String returns the string representation of the option. + String() string +} + +// EDNS0_NSID option is used to retrieve a nameserver +// identifier. When sending a request Nsid must be set to the empty string +// The identifier is an opaque string encoded as hex. +// Basic use pattern for creating an nsid option: +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_NSID) +// e.Code = dns.EDNS0NSID +// e.Nsid = "AA" +// o.Option = append(o.Option, e) +type EDNS0_NSID struct { + Code uint16 // Always EDNS0NSID + Nsid string // This string needs to be hex encoded +} + +func (e *EDNS0_NSID) pack() ([]byte, error) { + h, err := hex.DecodeString(e.Nsid) + if err != nil { + return nil, err + } + return h, nil +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_NSID) Option() uint16 { return EDNS0NSID } // Option returns the option code. +func (e *EDNS0_NSID) unpack(b []byte) error { e.Nsid = hex.EncodeToString(b); return nil } +func (e *EDNS0_NSID) String() string { return string(e.Nsid) } + +// EDNS0_SUBNET is the subnet option that is used to give the remote nameserver +// an idea of where the client lives. See RFC 7871. It can then give back a different +// answer depending on the location or network topology. +// Basic use pattern for creating an subnet option: +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_SUBNET) +// e.Code = dns.EDNS0SUBNET +// e.Family = 1 // 1 for IPv4 source address, 2 for IPv6 +// e.SourceNetmask = 32 // 32 for IPV4, 128 for IPv6 +// e.SourceScope = 0 +// e.Address = net.ParseIP("127.0.0.1").To4() // for IPv4 +// // e.Address = net.ParseIP("2001:7b8:32a::2") // for IPV6 +// o.Option = append(o.Option, e) +// +// This code will parse all the available bits when unpacking (up to optlen). +// When packing it will apply SourceNetmask. If you need more advanced logic, +// patches welcome and good luck. +type EDNS0_SUBNET struct { + Code uint16 // Always EDNS0SUBNET + Family uint16 // 1 for IP, 2 for IP6 + SourceNetmask uint8 + SourceScope uint8 + Address net.IP +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_SUBNET) Option() uint16 { return EDNS0SUBNET } + +func (e *EDNS0_SUBNET) pack() ([]byte, error) { + b := make([]byte, 4) + binary.BigEndian.PutUint16(b[0:], e.Family) + b[2] = e.SourceNetmask + b[3] = e.SourceScope + switch e.Family { + case 0: + // "dig" sets AddressFamily to 0 if SourceNetmask is also 0 + // We might don't need to complain either + if e.SourceNetmask != 0 { + return nil, errors.New("dns: bad address family") + } + case 1: + if e.SourceNetmask > net.IPv4len*8 { + return nil, errors.New("dns: bad netmask") + } + if len(e.Address.To4()) != net.IPv4len { + return nil, errors.New("dns: bad address") + } + ip := e.Address.To4().Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv4len*8)) + needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up + b = append(b, ip[:needLength]...) + case 2: + if e.SourceNetmask > net.IPv6len*8 { + return nil, errors.New("dns: bad netmask") + } + if len(e.Address) != net.IPv6len { + return nil, errors.New("dns: bad address") + } + ip := e.Address.Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv6len*8)) + needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up + b = append(b, ip[:needLength]...) + default: + return nil, errors.New("dns: bad address family") + } + return b, nil +} + +func (e *EDNS0_SUBNET) unpack(b []byte) error { + if len(b) < 4 { + return ErrBuf + } + e.Family = binary.BigEndian.Uint16(b) + e.SourceNetmask = b[2] + e.SourceScope = b[3] + switch e.Family { + case 0: + // "dig" sets AddressFamily to 0 if SourceNetmask is also 0 + // It's okay to accept such a packet + if e.SourceNetmask != 0 { + return errors.New("dns: bad address family") + } + e.Address = net.IPv4(0, 0, 0, 0) + case 1: + if e.SourceNetmask > net.IPv4len*8 || e.SourceScope > net.IPv4len*8 { + return errors.New("dns: bad netmask") + } + addr := make([]byte, net.IPv4len) + for i := 0; i < net.IPv4len && 4+i < len(b); i++ { + addr[i] = b[4+i] + } + e.Address = net.IPv4(addr[0], addr[1], addr[2], addr[3]) + case 2: + if e.SourceNetmask > net.IPv6len*8 || e.SourceScope > net.IPv6len*8 { + return errors.New("dns: bad netmask") + } + addr := make([]byte, net.IPv6len) + for i := 0; i < net.IPv6len && 4+i < len(b); i++ { + addr[i] = b[4+i] + } + e.Address = net.IP{addr[0], addr[1], addr[2], addr[3], addr[4], + addr[5], addr[6], addr[7], addr[8], addr[9], addr[10], + addr[11], addr[12], addr[13], addr[14], addr[15]} + default: + return errors.New("dns: bad address family") + } + return nil +} + +func (e *EDNS0_SUBNET) String() (s string) { + if e.Address == nil { + s = "" + } else if e.Address.To4() != nil { + s = e.Address.String() + } else { + s = "[" + e.Address.String() + "]" + } + s += "/" + strconv.Itoa(int(e.SourceNetmask)) + "/" + strconv.Itoa(int(e.SourceScope)) + return +} + +// The EDNS0_COOKIE option is used to add a DNS Cookie to a message. +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_COOKIE) +// e.Code = dns.EDNS0COOKIE +// e.Cookie = "24a5ac.." +// o.Option = append(o.Option, e) +// +// The Cookie field consists out of a client cookie (RFC 7873 Section 4), that is +// always 8 bytes. It may then optionally be followed by the server cookie. The server +// cookie is of variable length, 8 to a maximum of 32 bytes. In other words: +// +// cCookie := o.Cookie[:16] +// sCookie := o.Cookie[16:] +// +// There is no guarantee that the Cookie string has a specific length. +type EDNS0_COOKIE struct { + Code uint16 // Always EDNS0COOKIE + Cookie string // Hex-encoded cookie data +} + +func (e *EDNS0_COOKIE) pack() ([]byte, error) { + h, err := hex.DecodeString(e.Cookie) + if err != nil { + return nil, err + } + return h, nil +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_COOKIE) Option() uint16 { return EDNS0COOKIE } +func (e *EDNS0_COOKIE) unpack(b []byte) error { e.Cookie = hex.EncodeToString(b); return nil } +func (e *EDNS0_COOKIE) String() string { return e.Cookie } + +// The EDNS0_UL (Update Lease) (draft RFC) option is used to tell the server to set +// an expiration on an update RR. This is helpful for clients that cannot clean +// up after themselves. This is a draft RFC and more information can be found at +// http://files.dns-sd.org/draft-sekar-dns-ul.txt +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_UL) +// e.Code = dns.EDNS0UL +// e.Lease = 120 // in seconds +// o.Option = append(o.Option, e) +type EDNS0_UL struct { + Code uint16 // Always EDNS0UL + Lease uint32 +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_UL) Option() uint16 { return EDNS0UL } +func (e *EDNS0_UL) String() string { return strconv.FormatUint(uint64(e.Lease), 10) } + +// Copied: http://golang.org/src/pkg/net/dnsmsg.go +func (e *EDNS0_UL) pack() ([]byte, error) { + b := make([]byte, 4) + binary.BigEndian.PutUint32(b, e.Lease) + return b, nil +} + +func (e *EDNS0_UL) unpack(b []byte) error { + if len(b) < 4 { + return ErrBuf + } + e.Lease = binary.BigEndian.Uint32(b) + return nil +} + +// EDNS0_LLQ stands for Long Lived Queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01 +// Implemented for completeness, as the EDNS0 type code is assigned. +type EDNS0_LLQ struct { + Code uint16 // Always EDNS0LLQ + Version uint16 + Opcode uint16 + Error uint16 + Id uint64 + LeaseLife uint32 +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_LLQ) Option() uint16 { return EDNS0LLQ } + +func (e *EDNS0_LLQ) pack() ([]byte, error) { + b := make([]byte, 18) + binary.BigEndian.PutUint16(b[0:], e.Version) + binary.BigEndian.PutUint16(b[2:], e.Opcode) + binary.BigEndian.PutUint16(b[4:], e.Error) + binary.BigEndian.PutUint64(b[6:], e.Id) + binary.BigEndian.PutUint32(b[14:], e.LeaseLife) + return b, nil +} + +func (e *EDNS0_LLQ) unpack(b []byte) error { + if len(b) < 18 { + return ErrBuf + } + e.Version = binary.BigEndian.Uint16(b[0:]) + e.Opcode = binary.BigEndian.Uint16(b[2:]) + e.Error = binary.BigEndian.Uint16(b[4:]) + e.Id = binary.BigEndian.Uint64(b[6:]) + e.LeaseLife = binary.BigEndian.Uint32(b[14:]) + return nil +} + +func (e *EDNS0_LLQ) String() string { + s := strconv.FormatUint(uint64(e.Version), 10) + " " + strconv.FormatUint(uint64(e.Opcode), 10) + + " " + strconv.FormatUint(uint64(e.Error), 10) + " " + strconv.FormatUint(uint64(e.Id), 10) + + " " + strconv.FormatUint(uint64(e.LeaseLife), 10) + return s +} + +// EDNS0_DUA implements the EDNS0 "DNSSEC Algorithm Understood" option. See RFC 6975. +type EDNS0_DAU struct { + Code uint16 // Always EDNS0DAU + AlgCode []uint8 +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_DAU) Option() uint16 { return EDNS0DAU } +func (e *EDNS0_DAU) pack() ([]byte, error) { return e.AlgCode, nil } +func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = b; return nil } + +func (e *EDNS0_DAU) String() string { + s := "" + for i := 0; i < len(e.AlgCode); i++ { + if a, ok := AlgorithmToString[e.AlgCode[i]]; ok { + s += " " + a + } else { + s += " " + strconv.Itoa(int(e.AlgCode[i])) + } + } + return s +} + +// EDNS0_DHU implements the EDNS0 "DS Hash Understood" option. See RFC 6975. +type EDNS0_DHU struct { + Code uint16 // Always EDNS0DHU + AlgCode []uint8 +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_DHU) Option() uint16 { return EDNS0DHU } +func (e *EDNS0_DHU) pack() ([]byte, error) { return e.AlgCode, nil } +func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = b; return nil } + +func (e *EDNS0_DHU) String() string { + s := "" + for i := 0; i < len(e.AlgCode); i++ { + if a, ok := HashToString[e.AlgCode[i]]; ok { + s += " " + a + } else { + s += " " + strconv.Itoa(int(e.AlgCode[i])) + } + } + return s +} + +// EDNS0_N3U implements the EDNS0 "NSEC3 Hash Understood" option. See RFC 6975. +type EDNS0_N3U struct { + Code uint16 // Always EDNS0N3U + AlgCode []uint8 +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_N3U) Option() uint16 { return EDNS0N3U } +func (e *EDNS0_N3U) pack() ([]byte, error) { return e.AlgCode, nil } +func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = b; return nil } + +func (e *EDNS0_N3U) String() string { + // Re-use the hash map + s := "" + for i := 0; i < len(e.AlgCode); i++ { + if a, ok := HashToString[e.AlgCode[i]]; ok { + s += " " + a + } else { + s += " " + strconv.Itoa(int(e.AlgCode[i])) + } + } + return s +} + +// EDNS0_EXPIRE implementes the EDNS0 option as described in RFC 7314. +type EDNS0_EXPIRE struct { + Code uint16 // Always EDNS0EXPIRE + Expire uint32 +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_EXPIRE) Option() uint16 { return EDNS0EXPIRE } +func (e *EDNS0_EXPIRE) String() string { return strconv.FormatUint(uint64(e.Expire), 10) } + +func (e *EDNS0_EXPIRE) pack() ([]byte, error) { + b := make([]byte, 4) + b[0] = byte(e.Expire >> 24) + b[1] = byte(e.Expire >> 16) + b[2] = byte(e.Expire >> 8) + b[3] = byte(e.Expire) + return b, nil +} + +func (e *EDNS0_EXPIRE) unpack(b []byte) error { + if len(b) < 4 { + return ErrBuf + } + e.Expire = binary.BigEndian.Uint32(b) + return nil +} + +// The EDNS0_LOCAL option is used for local/experimental purposes. The option +// code is recommended to be within the range [EDNS0LOCALSTART, EDNS0LOCALEND] +// (RFC6891), although any unassigned code can actually be used. The content of +// the option is made available in Data, unaltered. +// Basic use pattern for creating a local option: +// +// o := new(dns.OPT) +// o.Hdr.Name = "." +// o.Hdr.Rrtype = dns.TypeOPT +// e := new(dns.EDNS0_LOCAL) +// e.Code = dns.EDNS0LOCALSTART +// e.Data = []byte{72, 82, 74} +// o.Option = append(o.Option, e) +type EDNS0_LOCAL struct { + Code uint16 + Data []byte +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_LOCAL) Option() uint16 { return e.Code } +func (e *EDNS0_LOCAL) String() string { + return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data) +} + +func (e *EDNS0_LOCAL) pack() ([]byte, error) { + b := make([]byte, len(e.Data)) + copied := copy(b, e.Data) + if copied != len(e.Data) { + return nil, ErrBuf + } + return b, nil +} + +func (e *EDNS0_LOCAL) unpack(b []byte) error { + e.Data = make([]byte, len(b)) + copied := copy(e.Data, b) + if copied != len(b) { + return ErrBuf + } + return nil +} + +// EDNS0_TCP_KEEPALIVE is an EDNS0 option that instructs the server to keep +// the TCP connection alive. See RFC 7828. +type EDNS0_TCP_KEEPALIVE struct { + Code uint16 // Always EDNSTCPKEEPALIVE + Length uint16 // the value 0 if the TIMEOUT is omitted, the value 2 if it is present; + Timeout uint16 // an idle timeout value for the TCP connection, specified in units of 100 milliseconds, encoded in network byte order. +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_TCP_KEEPALIVE) Option() uint16 { return EDNS0TCPKEEPALIVE } + +func (e *EDNS0_TCP_KEEPALIVE) pack() ([]byte, error) { + if e.Timeout != 0 && e.Length != 2 { + return nil, errors.New("dns: timeout specified but length is not 2") + } + if e.Timeout == 0 && e.Length != 0 { + return nil, errors.New("dns: timeout not specified but length is not 0") + } + b := make([]byte, 4+e.Length) + binary.BigEndian.PutUint16(b[0:], e.Code) + binary.BigEndian.PutUint16(b[2:], e.Length) + if e.Length == 2 { + binary.BigEndian.PutUint16(b[4:], e.Timeout) + } + return b, nil +} + +func (e *EDNS0_TCP_KEEPALIVE) unpack(b []byte) error { + if len(b) < 4 { + return ErrBuf + } + e.Length = binary.BigEndian.Uint16(b[2:4]) + if e.Length != 0 && e.Length != 2 { + return errors.New("dns: length mismatch, want 0/2 but got " + strconv.FormatUint(uint64(e.Length), 10)) + } + if e.Length == 2 { + if len(b) < 6 { + return ErrBuf + } + e.Timeout = binary.BigEndian.Uint16(b[4:6]) + } + return nil +} + +func (e *EDNS0_TCP_KEEPALIVE) String() (s string) { + s = "use tcp keep-alive" + if e.Length == 0 { + s += ", timeout omitted" + } else { + s += fmt.Sprintf(", timeout %dms", e.Timeout*100) + } + return +} + +// EDNS0_PADDING option is used to add padding to a request/response. The default +// value of padding SHOULD be 0x0 but other values MAY be used, for instance if +// compression is applied before encryption which may break signatures. +type EDNS0_PADDING struct { + Padding []byte +} + +// Option implements the EDNS0 interface. +func (e *EDNS0_PADDING) Option() uint16 { return EDNS0PADDING } +func (e *EDNS0_PADDING) pack() ([]byte, error) { return e.Padding, nil } +func (e *EDNS0_PADDING) unpack(b []byte) error { e.Padding = b; return nil } +func (e *EDNS0_PADDING) String() string { return fmt.Sprintf("%0X", e.Padding) } diff --git a/vendor/github.com/miekg/dns/format.go b/vendor/github.com/miekg/dns/format.go new file mode 100644 index 0000000000..3f5303c201 --- /dev/null +++ b/vendor/github.com/miekg/dns/format.go @@ -0,0 +1,87 @@ +package dns + +import ( + "net" + "reflect" + "strconv" +) + +// NumField returns the number of rdata fields r has. +func NumField(r RR) int { + return reflect.ValueOf(r).Elem().NumField() - 1 // Remove RR_Header +} + +// Field returns the rdata field i as a string. Fields are indexed starting from 1. +// RR types that holds slice data, for instance the NSEC type bitmap will return a single +// string where the types are concatenated using a space. +// Accessing non existing fields will cause a panic. +func Field(r RR, i int) string { + if i == 0 { + return "" + } + d := reflect.ValueOf(r).Elem().Field(i) + switch k := d.Kind(); k { + case reflect.String: + return d.String() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(d.Int(), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return strconv.FormatUint(d.Uint(), 10) + case reflect.Slice: + switch reflect.ValueOf(r).Elem().Type().Field(i).Tag { + case `dns:"a"`: + // TODO(miek): Hmm store this as 16 bytes + if d.Len() < net.IPv6len { + return net.IPv4(byte(d.Index(0).Uint()), + byte(d.Index(1).Uint()), + byte(d.Index(2).Uint()), + byte(d.Index(3).Uint())).String() + } + return net.IPv4(byte(d.Index(12).Uint()), + byte(d.Index(13).Uint()), + byte(d.Index(14).Uint()), + byte(d.Index(15).Uint())).String() + case `dns:"aaaa"`: + return net.IP{ + byte(d.Index(0).Uint()), + byte(d.Index(1).Uint()), + byte(d.Index(2).Uint()), + byte(d.Index(3).Uint()), + byte(d.Index(4).Uint()), + byte(d.Index(5).Uint()), + byte(d.Index(6).Uint()), + byte(d.Index(7).Uint()), + byte(d.Index(8).Uint()), + byte(d.Index(9).Uint()), + byte(d.Index(10).Uint()), + byte(d.Index(11).Uint()), + byte(d.Index(12).Uint()), + byte(d.Index(13).Uint()), + byte(d.Index(14).Uint()), + byte(d.Index(15).Uint()), + }.String() + case `dns:"nsec"`: + if d.Len() == 0 { + return "" + } + s := Type(d.Index(0).Uint()).String() + for i := 1; i < d.Len(); i++ { + s += " " + Type(d.Index(i).Uint()).String() + } + return s + default: + // if it does not have a tag its a string slice + fallthrough + case `dns:"txt"`: + if d.Len() == 0 { + return "" + } + s := d.Index(0).String() + for i := 1; i < d.Len(); i++ { + s += " " + d.Index(i).String() + } + return s + } + } + return "" +} diff --git a/vendor/github.com/miekg/dns/fuzz.go b/vendor/github.com/miekg/dns/fuzz.go new file mode 100644 index 0000000000..a8a09184d4 --- /dev/null +++ b/vendor/github.com/miekg/dns/fuzz.go @@ -0,0 +1,23 @@ +// +build fuzz + +package dns + +func Fuzz(data []byte) int { + msg := new(Msg) + + if err := msg.Unpack(data); err != nil { + return 0 + } + if _, err := msg.Pack(); err != nil { + return 0 + } + + return 1 +} + +func FuzzNewRR(data []byte) int { + if _, err := NewRR(string(data)); err != nil { + return 0 + } + return 1 +} diff --git a/vendor/github.com/miekg/dns/generate.go b/vendor/github.com/miekg/dns/generate.go new file mode 100644 index 0000000000..e4481a4b0d --- /dev/null +++ b/vendor/github.com/miekg/dns/generate.go @@ -0,0 +1,159 @@ +package dns + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "strings" +) + +// Parse the $GENERATE statement as used in BIND9 zones. +// See http://www.zytrax.com/books/dns/ch8/generate.html for instance. +// We are called after '$GENERATE '. After which we expect: +// * the range (12-24/2) +// * lhs (ownername) +// * [[ttl][class]] +// * type +// * rhs (rdata) +// But we are lazy here, only the range is parsed *all* occurrences +// of $ after that are interpreted. +// Any error are returned as a string value, the empty string signals +// "no error". +func generate(l lex, c chan lex, t chan *Token, o string) string { + step := 1 + if i := strings.IndexAny(l.token, "/"); i != -1 { + if i+1 == len(l.token) { + return "bad step in $GENERATE range" + } + if s, err := strconv.Atoi(l.token[i+1:]); err == nil { + if s < 0 { + return "bad step in $GENERATE range" + } + step = s + } else { + return "bad step in $GENERATE range" + } + l.token = l.token[:i] + } + sx := strings.SplitN(l.token, "-", 2) + if len(sx) != 2 { + return "bad start-stop in $GENERATE range" + } + start, err := strconv.Atoi(sx[0]) + if err != nil { + return "bad start in $GENERATE range" + } + end, err := strconv.Atoi(sx[1]) + if err != nil { + return "bad stop in $GENERATE range" + } + if end < 0 || start < 0 || end < start { + return "bad range in $GENERATE range" + } + + <-c // _BLANK + // Create a complete new string, which we then parse again. + s := "" +BuildRR: + l = <-c + if l.value != zNewline && l.value != zEOF { + s += l.token + goto BuildRR + } + for i := start; i <= end; i += step { + var ( + escape bool + dom bytes.Buffer + mod string + err error + offset int + ) + + for j := 0; j < len(s); j++ { // No 'range' because we need to jump around + switch s[j] { + case '\\': + if escape { + dom.WriteByte('\\') + escape = false + continue + } + escape = true + case '$': + mod = "%d" + offset = 0 + if escape { + dom.WriteByte('$') + escape = false + continue + } + escape = false + if j+1 >= len(s) { // End of the string + dom.WriteString(fmt.Sprintf(mod, i+offset)) + continue + } else { + if s[j+1] == '$' { + dom.WriteByte('$') + j++ + continue + } + } + // Search for { and } + if s[j+1] == '{' { // Modifier block + sep := strings.Index(s[j+2:], "}") + if sep == -1 { + return "bad modifier in $GENERATE" + } + mod, offset, err = modToPrintf(s[j+2 : j+2+sep]) + if err != nil { + return err.Error() + } + j += 2 + sep // Jump to it + } + dom.WriteString(fmt.Sprintf(mod, i+offset)) + default: + if escape { // Pretty useless here + escape = false + continue + } + dom.WriteByte(s[j]) + } + } + // Re-parse the RR and send it on the current channel t + rx, err := NewRR("$ORIGIN " + o + "\n" + dom.String()) + if err != nil { + return err.Error() + } + t <- &Token{RR: rx} + // Its more efficient to first built the rrlist and then parse it in + // one go! But is this a problem? + } + return "" +} + +// Convert a $GENERATE modifier 0,0,d to something Printf can deal with. +func modToPrintf(s string) (string, int, error) { + xs := strings.SplitN(s, ",", 3) + if len(xs) != 3 { + return "", 0, errors.New("bad modifier in $GENERATE") + } + // xs[0] is offset, xs[1] is width, xs[2] is base + if xs[2] != "o" && xs[2] != "d" && xs[2] != "x" && xs[2] != "X" { + return "", 0, errors.New("bad base in $GENERATE") + } + offset, err := strconv.Atoi(xs[0]) + if err != nil || offset > 255 { + return "", 0, errors.New("bad offset in $GENERATE") + } + width, err := strconv.Atoi(xs[1]) + if err != nil || width > 255 { + return "", offset, errors.New("bad width in $GENERATE") + } + switch { + case width < 0: + return "", offset, errors.New("bad width in $GENERATE") + case width == 0: + return "%" + xs[1] + xs[2], offset, nil + } + return "%0" + xs[1] + xs[2], offset, nil +} diff --git a/vendor/github.com/miekg/dns/labels.go b/vendor/github.com/miekg/dns/labels.go new file mode 100644 index 0000000000..760b89e711 --- /dev/null +++ b/vendor/github.com/miekg/dns/labels.go @@ -0,0 +1,191 @@ +package dns + +// Holds a bunch of helper functions for dealing with labels. + +// SplitDomainName splits a name string into it's labels. +// www.miek.nl. returns []string{"www", "miek", "nl"} +// .www.miek.nl. returns []string{"", "www", "miek", "nl"}, +// The root label (.) returns nil. Note that using +// strings.Split(s) will work in most cases, but does not handle +// escaped dots (\.) for instance. +// s must be a syntactically valid domain name, see IsDomainName. +func SplitDomainName(s string) (labels []string) { + if len(s) == 0 { + return nil + } + fqdnEnd := 0 // offset of the final '.' or the length of the name + idx := Split(s) + begin := 0 + if s[len(s)-1] == '.' { + fqdnEnd = len(s) - 1 + } else { + fqdnEnd = len(s) + } + + switch len(idx) { + case 0: + return nil + case 1: + // no-op + default: + end := 0 + for i := 1; i < len(idx); i++ { + end = idx[i] + labels = append(labels, s[begin:end-1]) + begin = end + } + } + + labels = append(labels, s[begin:fqdnEnd]) + return labels +} + +// CompareDomainName compares the names s1 and s2 and +// returns how many labels they have in common starting from the *right*. +// The comparison stops at the first inequality. The names are downcased +// before the comparison. +// +// www.miek.nl. and miek.nl. have two labels in common: miek and nl +// www.miek.nl. and www.bla.nl. have one label in common: nl +// +// s1 and s2 must be syntactically valid domain names. +func CompareDomainName(s1, s2 string) (n int) { + // the first check: root label + if s1 == "." || s2 == "." { + return 0 + } + + l1 := Split(s1) + l2 := Split(s2) + + j1 := len(l1) - 1 // end + i1 := len(l1) - 2 // start + j2 := len(l2) - 1 + i2 := len(l2) - 2 + // the second check can be done here: last/only label + // before we fall through into the for-loop below + if equal(s1[l1[j1]:], s2[l2[j2]:]) { + n++ + } else { + return + } + for { + if i1 < 0 || i2 < 0 { + break + } + if equal(s1[l1[i1]:l1[j1]], s2[l2[i2]:l2[j2]]) { + n++ + } else { + break + } + j1-- + i1-- + j2-- + i2-- + } + return +} + +// CountLabel counts the the number of labels in the string s. +// s must be a syntactically valid domain name. +func CountLabel(s string) (labels int) { + if s == "." { + return + } + off := 0 + end := false + for { + off, end = NextLabel(s, off) + labels++ + if end { + return + } + } +} + +// Split splits a name s into its label indexes. +// www.miek.nl. returns []int{0, 4, 9}, www.miek.nl also returns []int{0, 4, 9}. +// The root name (.) returns nil. Also see SplitDomainName. +// s must be a syntactically valid domain name. +func Split(s string) []int { + if s == "." { + return nil + } + idx := make([]int, 1, 3) + off := 0 + end := false + + for { + off, end = NextLabel(s, off) + if end { + return idx + } + idx = append(idx, off) + } +} + +// NextLabel returns the index of the start of the next label in the +// string s starting at offset. +// The bool end is true when the end of the string has been reached. +// Also see PrevLabel. +func NextLabel(s string, offset int) (i int, end bool) { + quote := false + for i = offset; i < len(s)-1; i++ { + switch s[i] { + case '\\': + quote = !quote + default: + quote = false + case '.': + if quote { + quote = !quote + continue + } + return i + 1, false + } + } + return i + 1, true +} + +// PrevLabel returns the index of the label when starting from the right and +// jumping n labels to the left. +// The bool start is true when the start of the string has been overshot. +// Also see NextLabel. +func PrevLabel(s string, n int) (i int, start bool) { + if n == 0 { + return len(s), false + } + lab := Split(s) + if lab == nil { + return 0, true + } + if n > len(lab) { + return 0, true + } + return lab[len(lab)-n], false +} + +// equal compares a and b while ignoring case. It returns true when equal otherwise false. +func equal(a, b string) bool { + // might be lifted into API function. + la := len(a) + lb := len(b) + if la != lb { + return false + } + + for i := la - 1; i >= 0; i-- { + ai := a[i] + bi := b[i] + if ai >= 'A' && ai <= 'Z' { + ai |= ('a' - 'A') + } + if bi >= 'A' && bi <= 'Z' { + bi |= ('a' - 'A') + } + if ai != bi { + return false + } + } + return true +} diff --git a/vendor/github.com/miekg/dns/msg.go b/vendor/github.com/miekg/dns/msg.go new file mode 100644 index 0000000000..276e6b0271 --- /dev/null +++ b/vendor/github.com/miekg/dns/msg.go @@ -0,0 +1,1177 @@ +// DNS packet assembly, see RFC 1035. Converting from - Unpack() - +// and to - Pack() - wire format. +// All the packers and unpackers take a (msg []byte, off int) +// and return (off1 int, ok bool). If they return ok==false, they +// also return off1==len(msg), so that the next unpacker will +// also fail. This lets us avoid checks of ok until the end of a +// packing sequence. + +package dns + +//go:generate go run msg_generate.go +//go:generate go run compress_generate.go + +import ( + crand "crypto/rand" + "encoding/binary" + "fmt" + "math/big" + "math/rand" + "strconv" + "sync" +) + +const ( + maxCompressionOffset = 2 << 13 // We have 14 bits for the compression pointer + maxDomainNameWireOctets = 255 // See RFC 1035 section 2.3.4 +) + +// Errors defined in this package. +var ( + ErrAlg error = &Error{err: "bad algorithm"} // ErrAlg indicates an error with the (DNSSEC) algorithm. + ErrAuth error = &Error{err: "bad authentication"} // ErrAuth indicates an error in the TSIG authentication. + ErrBuf error = &Error{err: "buffer size too small"} // ErrBuf indicates that the buffer used is too small for the message. + ErrConnEmpty error = &Error{err: "conn has no connection"} // ErrConnEmpty indicates a connection is being used before it is initialized. + ErrExtendedRcode error = &Error{err: "bad extended rcode"} // ErrExtendedRcode ... + ErrFqdn error = &Error{err: "domain must be fully qualified"} // ErrFqdn indicates that a domain name does not have a closing dot. + ErrId error = &Error{err: "id mismatch"} // ErrId indicates there is a mismatch with the message's ID. + ErrKeyAlg error = &Error{err: "bad key algorithm"} // ErrKeyAlg indicates that the algorithm in the key is not valid. + ErrKey error = &Error{err: "bad key"} + ErrKeySize error = &Error{err: "bad key size"} + ErrLongDomain error = &Error{err: fmt.Sprintf("domain name exceeded %d wire-format octets", maxDomainNameWireOctets)} + ErrNoSig error = &Error{err: "no signature found"} + ErrPrivKey error = &Error{err: "bad private key"} + ErrRcode error = &Error{err: "bad rcode"} + ErrRdata error = &Error{err: "bad rdata"} + ErrRRset error = &Error{err: "bad rrset"} + ErrSecret error = &Error{err: "no secrets defined"} + ErrShortRead error = &Error{err: "short read"} + ErrSig error = &Error{err: "bad signature"} // ErrSig indicates that a signature can not be cryptographically validated. + ErrSoa error = &Error{err: "no SOA"} // ErrSOA indicates that no SOA RR was seen when doing zone transfers. + ErrTime error = &Error{err: "bad time"} // ErrTime indicates a timing error in TSIG authentication. + ErrTruncated error = &Error{err: "failed to unpack truncated message"} // ErrTruncated indicates that we failed to unpack a truncated message. We unpacked as much as we had so Msg can still be used, if desired. +) + +// Id by default, returns a 16 bits random number to be used as a +// message id. The random provided should be good enough. This being a +// variable the function can be reassigned to a custom function. +// For instance, to make it return a static value: +// +// dns.Id = func() uint16 { return 3 } +var Id = id + +var ( + idLock sync.Mutex + idRand *rand.Rand +) + +// id returns a 16 bits random number to be used as a +// message id. The random provided should be good enough. +func id() uint16 { + idLock.Lock() + + if idRand == nil { + // This (partially) works around + // https://github.com/golang/go/issues/11833 by only + // seeding idRand upon the first call to id. + + var seed int64 + var buf [8]byte + + if _, err := crand.Read(buf[:]); err == nil { + seed = int64(binary.LittleEndian.Uint64(buf[:])) + } else { + seed = rand.Int63() + } + + idRand = rand.New(rand.NewSource(seed)) + } + + // The call to idRand.Uint32 must be within the + // mutex lock because *rand.Rand is not safe for + // concurrent use. + // + // There is no added performance overhead to calling + // idRand.Uint32 inside a mutex lock over just + // calling rand.Uint32 as the global math/rand rng + // is internally protected by a sync.Mutex. + id := uint16(idRand.Uint32()) + + idLock.Unlock() + return id +} + +// MsgHdr is a a manually-unpacked version of (id, bits). +type MsgHdr struct { + Id uint16 + Response bool + Opcode int + Authoritative bool + Truncated bool + RecursionDesired bool + RecursionAvailable bool + Zero bool + AuthenticatedData bool + CheckingDisabled bool + Rcode int +} + +// Msg contains the layout of a DNS message. +type Msg struct { + MsgHdr + Compress bool `json:"-"` // If true, the message will be compressed when converted to wire format. + Question []Question // Holds the RR(s) of the question section. + Answer []RR // Holds the RR(s) of the answer section. + Ns []RR // Holds the RR(s) of the authority section. + Extra []RR // Holds the RR(s) of the additional section. +} + +// ClassToString is a maps Classes to strings for each CLASS wire type. +var ClassToString = map[uint16]string{ + ClassINET: "IN", + ClassCSNET: "CS", + ClassCHAOS: "CH", + ClassHESIOD: "HS", + ClassNONE: "NONE", + ClassANY: "ANY", +} + +// OpcodeToString maps Opcodes to strings. +var OpcodeToString = map[int]string{ + OpcodeQuery: "QUERY", + OpcodeIQuery: "IQUERY", + OpcodeStatus: "STATUS", + OpcodeNotify: "NOTIFY", + OpcodeUpdate: "UPDATE", +} + +// RcodeToString maps Rcodes to strings. +var RcodeToString = map[int]string{ + RcodeSuccess: "NOERROR", + RcodeFormatError: "FORMERR", + RcodeServerFailure: "SERVFAIL", + RcodeNameError: "NXDOMAIN", + RcodeNotImplemented: "NOTIMPL", + RcodeRefused: "REFUSED", + RcodeYXDomain: "YXDOMAIN", // See RFC 2136 + RcodeYXRrset: "YXRRSET", + RcodeNXRrset: "NXRRSET", + RcodeNotAuth: "NOTAUTH", + RcodeNotZone: "NOTZONE", + RcodeBadSig: "BADSIG", // Also known as RcodeBadVers, see RFC 6891 + // RcodeBadVers: "BADVERS", + RcodeBadKey: "BADKEY", + RcodeBadTime: "BADTIME", + RcodeBadMode: "BADMODE", + RcodeBadName: "BADNAME", + RcodeBadAlg: "BADALG", + RcodeBadTrunc: "BADTRUNC", + RcodeBadCookie: "BADCOOKIE", +} + +// Domain names are a sequence of counted strings +// split at the dots. They end with a zero-length string. + +// PackDomainName packs a domain name s into msg[off:]. +// If compression is wanted compress must be true and the compression +// map needs to hold a mapping between domain names and offsets +// pointing into msg. +func PackDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { + off1, _, err = packDomainName(s, msg, off, compression, compress) + return +} + +func packDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, labels int, err error) { + // special case if msg == nil + lenmsg := 256 + if msg != nil { + lenmsg = len(msg) + } + ls := len(s) + if ls == 0 { // Ok, for instance when dealing with update RR without any rdata. + return off, 0, nil + } + // If not fully qualified, error out, but only if msg == nil #ugly + switch { + case msg == nil: + if s[ls-1] != '.' { + s += "." + ls++ + } + case msg != nil: + if s[ls-1] != '.' { + return lenmsg, 0, ErrFqdn + } + } + // Each dot ends a segment of the name. + // We trade each dot byte for a length byte. + // Except for escaped dots (\.), which are normal dots. + // There is also a trailing zero. + + // Compression + nameoffset := -1 + pointer := -1 + // Emit sequence of counted strings, chopping at dots. + begin := 0 + bs := []byte(s) + roBs, bsFresh, escapedDot := s, true, false + for i := 0; i < ls; i++ { + if bs[i] == '\\' { + for j := i; j < ls-1; j++ { + bs[j] = bs[j+1] + } + ls-- + if off+1 > lenmsg { + return lenmsg, labels, ErrBuf + } + // check for \DDD + if i+2 < ls && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { + bs[i] = dddToByte(bs[i:]) + for j := i + 1; j < ls-2; j++ { + bs[j] = bs[j+2] + } + ls -= 2 + } + escapedDot = bs[i] == '.' + bsFresh = false + continue + } + + if bs[i] == '.' { + if i > 0 && bs[i-1] == '.' && !escapedDot { + // two dots back to back is not legal + return lenmsg, labels, ErrRdata + } + if i-begin >= 1<<6 { // top two bits of length must be clear + return lenmsg, labels, ErrRdata + } + // off can already (we're in a loop) be bigger than len(msg) + // this happens when a name isn't fully qualified + if off+1 > lenmsg { + return lenmsg, labels, ErrBuf + } + if msg != nil { + msg[off] = byte(i - begin) + } + offset := off + off++ + for j := begin; j < i; j++ { + if off+1 > lenmsg { + return lenmsg, labels, ErrBuf + } + if msg != nil { + msg[off] = bs[j] + } + off++ + } + if compress && !bsFresh { + roBs = string(bs) + bsFresh = true + } + // Don't try to compress '.' + // We should only compress when compress it true, but we should also still pick + // up names that can be used for *future* compression(s). + if compression != nil && roBs[begin:] != "." { + if p, ok := compression[roBs[begin:]]; !ok { + // Only offsets smaller than this can be used. + if offset < maxCompressionOffset { + compression[roBs[begin:]] = offset + } + } else { + // The first hit is the longest matching dname + // keep the pointer offset we get back and store + // the offset of the current name, because that's + // where we need to insert the pointer later + + // If compress is true, we're allowed to compress this dname + if pointer == -1 && compress { + pointer = p // Where to point to + nameoffset = offset // Where to point from + break + } + } + } + labels++ + begin = i + 1 + } + escapedDot = false + } + // Root label is special + if len(bs) == 1 && bs[0] == '.' { + return off, labels, nil + } + // If we did compression and we find something add the pointer here + if pointer != -1 { + // We have two bytes (14 bits) to put the pointer in + // if msg == nil, we will never do compression + binary.BigEndian.PutUint16(msg[nameoffset:], uint16(pointer^0xC000)) + off = nameoffset + 1 + goto End + } + if msg != nil && off < len(msg) { + msg[off] = 0 + } +End: + off++ + return off, labels, nil +} + +// Unpack a domain name. +// In addition to the simple sequences of counted strings above, +// domain names are allowed to refer to strings elsewhere in the +// packet, to avoid repeating common suffixes when returning +// many entries in a single domain. The pointers are marked +// by a length byte with the top two bits set. Ignoring those +// two bits, that byte and the next give a 14 bit offset from msg[0] +// where we should pick up the trail. +// Note that if we jump elsewhere in the packet, +// we return off1 == the offset after the first pointer we found, +// which is where the next record will start. +// In theory, the pointers are only allowed to jump backward. +// We let them jump anywhere and stop jumping after a while. + +// UnpackDomainName unpacks a domain name into a string. +func UnpackDomainName(msg []byte, off int) (string, int, error) { + s := make([]byte, 0, 64) + off1 := 0 + lenmsg := len(msg) + maxLen := maxDomainNameWireOctets + ptr := 0 // number of pointers followed +Loop: + for { + if off >= lenmsg { + return "", lenmsg, ErrBuf + } + c := int(msg[off]) + off++ + switch c & 0xC0 { + case 0x00: + if c == 0x00 { + // end of name + break Loop + } + // literal string + if off+c > lenmsg { + return "", lenmsg, ErrBuf + } + for j := off; j < off+c; j++ { + switch b := msg[j]; b { + case '.', '(', ')', ';', ' ', '@': + fallthrough + case '"', '\\': + s = append(s, '\\', b) + // presentation-format \X escapes add an extra byte + maxLen++ + default: + if b < 32 || b >= 127 { // unprintable, use \DDD + var buf [3]byte + bufs := strconv.AppendInt(buf[:0], int64(b), 10) + s = append(s, '\\') + for i := 0; i < 3-len(bufs); i++ { + s = append(s, '0') + } + for _, r := range bufs { + s = append(s, r) + } + // presentation-format \DDD escapes add 3 extra bytes + maxLen += 3 + } else { + s = append(s, b) + } + } + } + s = append(s, '.') + off += c + case 0xC0: + // pointer to somewhere else in msg. + // remember location after first ptr, + // since that's how many bytes we consumed. + // also, don't follow too many pointers -- + // maybe there's a loop. + if off >= lenmsg { + return "", lenmsg, ErrBuf + } + c1 := msg[off] + off++ + if ptr == 0 { + off1 = off + } + if ptr++; ptr > 10 { + return "", lenmsg, &Error{err: "too many compression pointers"} + } + // pointer should guarantee that it advances and points forwards at least + // but the condition on previous three lines guarantees that it's + // at least loop-free + off = (c^0xC0)<<8 | int(c1) + default: + // 0x80 and 0x40 are reserved + return "", lenmsg, ErrRdata + } + } + if ptr == 0 { + off1 = off + } + if len(s) == 0 { + s = []byte(".") + } else if len(s) >= maxLen { + // error if the name is too long, but don't throw it away + return string(s), lenmsg, ErrLongDomain + } + return string(s), off1, nil +} + +func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) { + if len(txt) == 0 { + if offset >= len(msg) { + return offset, ErrBuf + } + msg[offset] = 0 + return offset, nil + } + var err error + for i := range txt { + if len(txt[i]) > len(tmp) { + return offset, ErrBuf + } + offset, err = packTxtString(txt[i], msg, offset, tmp) + if err != nil { + return offset, err + } + } + return offset, nil +} + +func packTxtString(s string, msg []byte, offset int, tmp []byte) (int, error) { + lenByteOffset := offset + if offset >= len(msg) || len(s) > len(tmp) { + return offset, ErrBuf + } + offset++ + bs := tmp[:len(s)] + copy(bs, s) + for i := 0; i < len(bs); i++ { + if len(msg) <= offset { + return offset, ErrBuf + } + if bs[i] == '\\' { + i++ + if i == len(bs) { + break + } + // check for \DDD + if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { + msg[offset] = dddToByte(bs[i:]) + i += 2 + } else { + msg[offset] = bs[i] + } + } else { + msg[offset] = bs[i] + } + offset++ + } + l := offset - lenByteOffset - 1 + if l > 255 { + return offset, &Error{err: "string exceeded 255 bytes in txt"} + } + msg[lenByteOffset] = byte(l) + return offset, nil +} + +func packOctetString(s string, msg []byte, offset int, tmp []byte) (int, error) { + if offset >= len(msg) || len(s) > len(tmp) { + return offset, ErrBuf + } + bs := tmp[:len(s)] + copy(bs, s) + for i := 0; i < len(bs); i++ { + if len(msg) <= offset { + return offset, ErrBuf + } + if bs[i] == '\\' { + i++ + if i == len(bs) { + break + } + // check for \DDD + if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { + msg[offset] = dddToByte(bs[i:]) + i += 2 + } else { + msg[offset] = bs[i] + } + } else { + msg[offset] = bs[i] + } + offset++ + } + return offset, nil +} + +func unpackTxt(msg []byte, off0 int) (ss []string, off int, err error) { + off = off0 + var s string + for off < len(msg) && err == nil { + s, off, err = unpackTxtString(msg, off) + if err == nil { + ss = append(ss, s) + } + } + return +} + +func unpackTxtString(msg []byte, offset int) (string, int, error) { + if offset+1 > len(msg) { + return "", offset, &Error{err: "overflow unpacking txt"} + } + l := int(msg[offset]) + if offset+l+1 > len(msg) { + return "", offset, &Error{err: "overflow unpacking txt"} + } + s := make([]byte, 0, l) + for _, b := range msg[offset+1 : offset+1+l] { + switch b { + case '"', '\\': + s = append(s, '\\', b) + default: + if b < 32 || b > 127 { // unprintable + var buf [3]byte + bufs := strconv.AppendInt(buf[:0], int64(b), 10) + s = append(s, '\\') + for i := 0; i < 3-len(bufs); i++ { + s = append(s, '0') + } + for _, r := range bufs { + s = append(s, r) + } + } else { + s = append(s, b) + } + } + } + offset += 1 + l + return string(s), offset, nil +} + +// Helpers for dealing with escaped bytes +func isDigit(b byte) bool { return b >= '0' && b <= '9' } + +func dddToByte(s []byte) byte { + return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0')) +} + +// Helper function for packing and unpacking +func intToBytes(i *big.Int, length int) []byte { + buf := i.Bytes() + if len(buf) < length { + b := make([]byte, length) + copy(b[length-len(buf):], buf) + return b + } + return buf +} + +// PackRR packs a resource record rr into msg[off:]. +// See PackDomainName for documentation about the compression. +func PackRR(rr RR, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { + if rr == nil { + return len(msg), &Error{err: "nil rr"} + } + + off1, err = rr.pack(msg, off, compression, compress) + if err != nil { + return len(msg), err + } + // TODO(miek): Not sure if this is needed? If removed we can remove rawmsg.go as well. + if rawSetRdlength(msg, off, off1) { + return off1, nil + } + return off, ErrRdata +} + +// UnpackRR unpacks msg[off:] into an RR. +func UnpackRR(msg []byte, off int) (rr RR, off1 int, err error) { + h, off, msg, err := unpackHeader(msg, off) + if err != nil { + return nil, len(msg), err + } + + return UnpackRRWithHeader(h, msg, off) +} + +// UnpackRRWithHeader unpacks the record type specific payload given an existing +// RR_Header. +func UnpackRRWithHeader(h RR_Header, msg []byte, off int) (rr RR, off1 int, err error) { + end := off + int(h.Rdlength) + + if fn, known := typeToUnpack[h.Rrtype]; !known { + rr, off, err = unpackRFC3597(h, msg, off) + } else { + rr, off, err = fn(h, msg, off) + } + if off != end { + return &h, end, &Error{err: "bad rdlength"} + } + return rr, off, err +} + +// unpackRRslice unpacks msg[off:] into an []RR. +// If we cannot unpack the whole array, then it will return nil +func unpackRRslice(l int, msg []byte, off int) (dst1 []RR, off1 int, err error) { + var r RR + // Don't pre-allocate, l may be under attacker control + var dst []RR + for i := 0; i < l; i++ { + off1 := off + r, off, err = UnpackRR(msg, off) + if err != nil { + off = len(msg) + break + } + // If offset does not increase anymore, l is a lie + if off1 == off { + l = i + break + } + dst = append(dst, r) + } + if err != nil && off == len(msg) { + dst = nil + } + return dst, off, err +} + +// Convert a MsgHdr to a string, with dig-like headers: +// +//;; opcode: QUERY, status: NOERROR, id: 48404 +// +//;; flags: qr aa rd ra; +func (h *MsgHdr) String() string { + if h == nil { + return " MsgHdr" + } + + s := ";; opcode: " + OpcodeToString[h.Opcode] + s += ", status: " + RcodeToString[h.Rcode] + s += ", id: " + strconv.Itoa(int(h.Id)) + "\n" + + s += ";; flags:" + if h.Response { + s += " qr" + } + if h.Authoritative { + s += " aa" + } + if h.Truncated { + s += " tc" + } + if h.RecursionDesired { + s += " rd" + } + if h.RecursionAvailable { + s += " ra" + } + if h.Zero { // Hmm + s += " z" + } + if h.AuthenticatedData { + s += " ad" + } + if h.CheckingDisabled { + s += " cd" + } + + s += ";" + return s +} + +// Pack packs a Msg: it is converted to to wire format. +// If the dns.Compress is true the message will be in compressed wire format. +func (dns *Msg) Pack() (msg []byte, err error) { + return dns.PackBuffer(nil) +} + +// PackBuffer packs a Msg, using the given buffer buf. If buf is too small +// a new buffer is allocated. +func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) { + // We use a similar function in tsig.go's stripTsig. + var ( + dh Header + compression map[string]int + ) + + if dns.Compress { + compression = make(map[string]int) // Compression pointer mappings + } + + if dns.Rcode < 0 || dns.Rcode > 0xFFF { + return nil, ErrRcode + } + if dns.Rcode > 0xF { + // Regular RCODE field is 4 bits + opt := dns.IsEdns0() + if opt == nil { + return nil, ErrExtendedRcode + } + opt.SetExtendedRcode(uint8(dns.Rcode >> 4)) + dns.Rcode &= 0xF + } + + // Convert convenient Msg into wire-like Header. + dh.Id = dns.Id + dh.Bits = uint16(dns.Opcode)<<11 | uint16(dns.Rcode) + if dns.Response { + dh.Bits |= _QR + } + if dns.Authoritative { + dh.Bits |= _AA + } + if dns.Truncated { + dh.Bits |= _TC + } + if dns.RecursionDesired { + dh.Bits |= _RD + } + if dns.RecursionAvailable { + dh.Bits |= _RA + } + if dns.Zero { + dh.Bits |= _Z + } + if dns.AuthenticatedData { + dh.Bits |= _AD + } + if dns.CheckingDisabled { + dh.Bits |= _CD + } + + // Prepare variable sized arrays. + question := dns.Question + answer := dns.Answer + ns := dns.Ns + extra := dns.Extra + + dh.Qdcount = uint16(len(question)) + dh.Ancount = uint16(len(answer)) + dh.Nscount = uint16(len(ns)) + dh.Arcount = uint16(len(extra)) + + // We need the uncompressed length here, because we first pack it and then compress it. + msg = buf + uncompressedLen := compressedLen(dns, false) + if packLen := uncompressedLen + 1; len(msg) < packLen { + msg = make([]byte, packLen) + } + + // Pack it in: header and then the pieces. + off := 0 + off, err = dh.pack(msg, off, compression, dns.Compress) + if err != nil { + return nil, err + } + for i := 0; i < len(question); i++ { + off, err = question[i].pack(msg, off, compression, dns.Compress) + if err != nil { + return nil, err + } + } + for i := 0; i < len(answer); i++ { + off, err = PackRR(answer[i], msg, off, compression, dns.Compress) + if err != nil { + return nil, err + } + } + for i := 0; i < len(ns); i++ { + off, err = PackRR(ns[i], msg, off, compression, dns.Compress) + if err != nil { + return nil, err + } + } + for i := 0; i < len(extra); i++ { + off, err = PackRR(extra[i], msg, off, compression, dns.Compress) + if err != nil { + return nil, err + } + } + return msg[:off], nil +} + +// Unpack unpacks a binary message to a Msg structure. +func (dns *Msg) Unpack(msg []byte) (err error) { + var ( + dh Header + off int + ) + if dh, off, err = unpackMsgHdr(msg, off); err != nil { + return err + } + + dns.Id = dh.Id + dns.Response = (dh.Bits & _QR) != 0 + dns.Opcode = int(dh.Bits>>11) & 0xF + dns.Authoritative = (dh.Bits & _AA) != 0 + dns.Truncated = (dh.Bits & _TC) != 0 + dns.RecursionDesired = (dh.Bits & _RD) != 0 + dns.RecursionAvailable = (dh.Bits & _RA) != 0 + dns.Zero = (dh.Bits & _Z) != 0 + dns.AuthenticatedData = (dh.Bits & _AD) != 0 + dns.CheckingDisabled = (dh.Bits & _CD) != 0 + dns.Rcode = int(dh.Bits & 0xF) + + // If we are at the end of the message we should return *just* the + // header. This can still be useful to the caller. 9.9.9.9 sends these + // when responding with REFUSED for instance. + if off == len(msg) { + // reset sections before returning + dns.Question, dns.Answer, dns.Ns, dns.Extra = nil, nil, nil, nil + return nil + } + + // Qdcount, Ancount, Nscount, Arcount can't be trusted, as they are + // attacker controlled. This means we can't use them to pre-allocate + // slices. + dns.Question = nil + for i := 0; i < int(dh.Qdcount); i++ { + off1 := off + var q Question + q, off, err = unpackQuestion(msg, off) + if err != nil { + // Even if Truncated is set, we only will set ErrTruncated if we + // actually got the questions + return err + } + if off1 == off { // Offset does not increase anymore, dh.Qdcount is a lie! + dh.Qdcount = uint16(i) + break + } + dns.Question = append(dns.Question, q) + } + + dns.Answer, off, err = unpackRRslice(int(dh.Ancount), msg, off) + // The header counts might have been wrong so we need to update it + dh.Ancount = uint16(len(dns.Answer)) + if err == nil { + dns.Ns, off, err = unpackRRslice(int(dh.Nscount), msg, off) + } + // The header counts might have been wrong so we need to update it + dh.Nscount = uint16(len(dns.Ns)) + if err == nil { + dns.Extra, off, err = unpackRRslice(int(dh.Arcount), msg, off) + } + // The header counts might have been wrong so we need to update it + dh.Arcount = uint16(len(dns.Extra)) + + if off != len(msg) { + // TODO(miek) make this an error? + // use PackOpt to let people tell how detailed the error reporting should be? + // println("dns: extra bytes in dns packet", off, "<", len(msg)) + } else if dns.Truncated { + // Whether we ran into a an error or not, we want to return that it + // was truncated + err = ErrTruncated + } + return err +} + +// Convert a complete message to a string with dig-like output. +func (dns *Msg) String() string { + if dns == nil { + return " MsgHdr" + } + s := dns.MsgHdr.String() + " " + s += "QUERY: " + strconv.Itoa(len(dns.Question)) + ", " + s += "ANSWER: " + strconv.Itoa(len(dns.Answer)) + ", " + s += "AUTHORITY: " + strconv.Itoa(len(dns.Ns)) + ", " + s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n" + if len(dns.Question) > 0 { + s += "\n;; QUESTION SECTION:\n" + for i := 0; i < len(dns.Question); i++ { + s += dns.Question[i].String() + "\n" + } + } + if len(dns.Answer) > 0 { + s += "\n;; ANSWER SECTION:\n" + for i := 0; i < len(dns.Answer); i++ { + if dns.Answer[i] != nil { + s += dns.Answer[i].String() + "\n" + } + } + } + if len(dns.Ns) > 0 { + s += "\n;; AUTHORITY SECTION:\n" + for i := 0; i < len(dns.Ns); i++ { + if dns.Ns[i] != nil { + s += dns.Ns[i].String() + "\n" + } + } + } + if len(dns.Extra) > 0 { + s += "\n;; ADDITIONAL SECTION:\n" + for i := 0; i < len(dns.Extra); i++ { + if dns.Extra[i] != nil { + s += dns.Extra[i].String() + "\n" + } + } + } + return s +} + +// Len returns the message length when in (un)compressed wire format. +// If dns.Compress is true compression it is taken into account. Len() +// is provided to be a faster way to get the size of the resulting packet, +// than packing it, measuring the size and discarding the buffer. +func (dns *Msg) Len() int { return compressedLen(dns, dns.Compress) } + +// compressedLen returns the message length when in compressed wire format +// when compress is true, otherwise the uncompressed length is returned. +func compressedLen(dns *Msg, compress bool) int { + // We always return one more than needed. + l := 12 // Message header is always 12 bytes + if compress { + compression := map[string]int{} + for _, r := range dns.Question { + l += r.len() + compressionLenHelper(compression, r.Name) + } + l += compressionLenSlice(l, compression, dns.Answer) + l += compressionLenSlice(l, compression, dns.Ns) + l += compressionLenSlice(l, compression, dns.Extra) + + return l + } + + for _, r := range dns.Question { + l += r.len() + } + for _, r := range dns.Answer { + if r != nil { + l += r.len() + } + } + for _, r := range dns.Ns { + if r != nil { + l += r.len() + } + } + for _, r := range dns.Extra { + if r != nil { + l += r.len() + } + } + + return l +} + +func compressionLenSlice(len int, c map[string]int, rs []RR) int { + var l int + for _, r := range rs { + if r == nil { + continue + } + // track this length, and the global length in len, while taking compression into account for both. + x := r.len() + l += x + len += x + + k, ok := compressionLenSearch(c, r.Header().Name) + if ok { + l += 1 - k + len += 1 - k + } + + if len < maxCompressionOffset { + compressionLenHelper(c, r.Header().Name) + } + + k, ok = compressionLenSearchType(c, r) + if ok { + l += 1 - k + len += 1 - k + } + + if len < maxCompressionOffset { + compressionLenHelperType(c, r) + } + } + return l +} + +// Put the parts of the name in the compression map. +func compressionLenHelper(c map[string]int, s string) { + pref := "" + lbs := Split(s) + for j := len(lbs) - 1; j >= 0; j-- { + pref = s[lbs[j]:] + if _, ok := c[pref]; !ok { + c[pref] = len(pref) + } + } +} + +// Look for each part in the compression map and returns its length, +// keep on searching so we get the longest match. +func compressionLenSearch(c map[string]int, s string) (int, bool) { + off := 0 + end := false + if s == "" { // don't bork on bogus data + return 0, false + } + for { + if _, ok := c[s[off:]]; ok { + return len(s[off:]), true + } + if end { + break + } + off, end = NextLabel(s, off) + } + return 0, false +} + +// Copy returns a new RR which is a deep-copy of r. +func Copy(r RR) RR { r1 := r.copy(); return r1 } + +// Len returns the length (in octets) of the uncompressed RR in wire format. +func Len(r RR) int { return r.len() } + +// Copy returns a new *Msg which is a deep-copy of dns. +func (dns *Msg) Copy() *Msg { return dns.CopyTo(new(Msg)) } + +// CopyTo copies the contents to the provided message using a deep-copy and returns the copy. +func (dns *Msg) CopyTo(r1 *Msg) *Msg { + r1.MsgHdr = dns.MsgHdr + r1.Compress = dns.Compress + + if len(dns.Question) > 0 { + r1.Question = make([]Question, len(dns.Question)) + copy(r1.Question, dns.Question) // TODO(miek): Question is an immutable value, ok to do a shallow-copy + } + + rrArr := make([]RR, len(dns.Answer)+len(dns.Ns)+len(dns.Extra)) + var rri int + + if len(dns.Answer) > 0 { + rrbegin := rri + for i := 0; i < len(dns.Answer); i++ { + rrArr[rri] = dns.Answer[i].copy() + rri++ + } + r1.Answer = rrArr[rrbegin:rri:rri] + } + + if len(dns.Ns) > 0 { + rrbegin := rri + for i := 0; i < len(dns.Ns); i++ { + rrArr[rri] = dns.Ns[i].copy() + rri++ + } + r1.Ns = rrArr[rrbegin:rri:rri] + } + + if len(dns.Extra) > 0 { + rrbegin := rri + for i := 0; i < len(dns.Extra); i++ { + rrArr[rri] = dns.Extra[i].copy() + rri++ + } + r1.Extra = rrArr[rrbegin:rri:rri] + } + + return r1 +} + +func (q *Question) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := PackDomainName(q.Name, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = packUint16(q.Qtype, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(q.Qclass, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func unpackQuestion(msg []byte, off int) (Question, int, error) { + var ( + q Question + err error + ) + q.Name, off, err = UnpackDomainName(msg, off) + if err != nil { + return q, off, err + } + if off == len(msg) { + return q, off, nil + } + q.Qtype, off, err = unpackUint16(msg, off) + if err != nil { + return q, off, err + } + if off == len(msg) { + return q, off, nil + } + q.Qclass, off, err = unpackUint16(msg, off) + if off == len(msg) { + return q, off, nil + } + return q, off, err +} + +func (dh *Header) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := packUint16(dh.Id, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Bits, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Qdcount, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Ancount, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Nscount, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(dh.Arcount, msg, off) + return off, err +} + +func unpackMsgHdr(msg []byte, off int) (Header, int, error) { + var ( + dh Header + err error + ) + dh.Id, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Bits, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Qdcount, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Ancount, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Nscount, off, err = unpackUint16(msg, off) + if err != nil { + return dh, off, err + } + dh.Arcount, off, err = unpackUint16(msg, off) + return dh, off, err +} diff --git a/vendor/github.com/miekg/dns/msg_generate.go b/vendor/github.com/miekg/dns/msg_generate.go new file mode 100644 index 0000000000..8ba609f726 --- /dev/null +++ b/vendor/github.com/miekg/dns/msg_generate.go @@ -0,0 +1,348 @@ +//+build ignore + +// msg_generate.go is meant to run with go generate. It will use +// go/{importer,types} to track down all the RR struct types. Then for each type +// it will generate pack/unpack methods based on the struct tags. The generated source is +// written to zmsg.go, and is meant to be checked into git. +package main + +import ( + "bytes" + "fmt" + "go/format" + "go/importer" + "go/types" + "log" + "os" + "strings" +) + +var packageHdr = ` +// Code generated by "go run msg_generate.go"; DO NOT EDIT. + +package dns + +` + +// getTypeStruct will take a type and the package scope, and return the +// (innermost) struct if the type is considered a RR type (currently defined as +// those structs beginning with a RR_Header, could be redefined as implementing +// the RR interface). The bool return value indicates if embedded structs were +// resolved. +func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { + st, ok := t.Underlying().(*types.Struct) + if !ok { + return nil, false + } + if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { + return st, false + } + if st.Field(0).Anonymous() { + st, _ := getTypeStruct(st.Field(0).Type(), scope) + return st, true + } + return nil, false +} + +func main() { + // Import and type-check the package + pkg, err := importer.Default().Import("github.com/miekg/dns") + fatalIfErr(err) + scope := pkg.Scope() + + // Collect actual types (*X) + var namedTypes []string + for _, name := range scope.Names() { + o := scope.Lookup(name) + if o == nil || !o.Exported() { + continue + } + if st, _ := getTypeStruct(o.Type(), scope); st == nil { + continue + } + if name == "PrivateRR" { + continue + } + + // Check if corresponding TypeX exists + if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" { + log.Fatalf("Constant Type%s does not exist.", o.Name()) + } + + namedTypes = append(namedTypes, o.Name()) + } + + b := &bytes.Buffer{} + b.WriteString(packageHdr) + + fmt.Fprint(b, "// pack*() functions\n\n") + for _, name := range namedTypes { + o := scope.Lookup(name) + st, _ := getTypeStruct(o.Type(), scope) + + fmt.Fprintf(b, "func (rr *%s) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {\n", name) + fmt.Fprint(b, `off, err := rr.Hdr.pack(msg, off, compression, compress) +if err != nil { + return off, err +} +headerEnd := off +`) + for i := 1; i < st.NumFields(); i++ { + o := func(s string) { + fmt.Fprintf(b, s, st.Field(i).Name()) + fmt.Fprint(b, `if err != nil { +return off, err +} +`) + } + + if _, ok := st.Field(i).Type().(*types.Slice); ok { + switch st.Tag(i) { + case `dns:"-"`: // ignored + case `dns:"txt"`: + o("off, err = packStringTxt(rr.%s, msg, off)\n") + case `dns:"opt"`: + o("off, err = packDataOpt(rr.%s, msg, off)\n") + case `dns:"nsec"`: + o("off, err = packDataNsec(rr.%s, msg, off)\n") + case `dns:"domain-name"`: + o("off, err = packDataDomainNames(rr.%s, msg, off, compression, compress)\n") + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + continue + } + + switch { + case st.Tag(i) == `dns:"-"`: // ignored + case st.Tag(i) == `dns:"cdomain-name"`: + o("off, err = PackDomainName(rr.%s, msg, off, compression, compress)\n") + case st.Tag(i) == `dns:"domain-name"`: + o("off, err = PackDomainName(rr.%s, msg, off, compression, false)\n") + case st.Tag(i) == `dns:"a"`: + o("off, err = packDataA(rr.%s, msg, off)\n") + case st.Tag(i) == `dns:"aaaa"`: + o("off, err = packDataAAAA(rr.%s, msg, off)\n") + case st.Tag(i) == `dns:"uint48"`: + o("off, err = packUint48(rr.%s, msg, off)\n") + case st.Tag(i) == `dns:"txt"`: + o("off, err = packString(rr.%s, msg, off)\n") + + case strings.HasPrefix(st.Tag(i), `dns:"size-base32`): // size-base32 can be packed just like base32 + fallthrough + case st.Tag(i) == `dns:"base32"`: + o("off, err = packStringBase32(rr.%s, msg, off)\n") + + case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): // size-base64 can be packed just like base64 + fallthrough + case st.Tag(i) == `dns:"base64"`: + o("off, err = packStringBase64(rr.%s, msg, off)\n") + + case strings.HasPrefix(st.Tag(i), `dns:"size-hex:SaltLength`): + // directly write instead of using o() so we get the error check in the correct place + field := st.Field(i).Name() + fmt.Fprintf(b, `// Only pack salt if value is not "-", i.e. empty +if rr.%s != "-" { + off, err = packStringHex(rr.%s, msg, off) + if err != nil { + return off, err + } +} +`, field, field) + continue + case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): // size-hex can be packed just like hex + fallthrough + case st.Tag(i) == `dns:"hex"`: + o("off, err = packStringHex(rr.%s, msg, off)\n") + + case st.Tag(i) == `dns:"octet"`: + o("off, err = packStringOctet(rr.%s, msg, off)\n") + case st.Tag(i) == "": + switch st.Field(i).Type().(*types.Basic).Kind() { + case types.Uint8: + o("off, err = packUint8(rr.%s, msg, off)\n") + case types.Uint16: + o("off, err = packUint16(rr.%s, msg, off)\n") + case types.Uint32: + o("off, err = packUint32(rr.%s, msg, off)\n") + case types.Uint64: + o("off, err = packUint64(rr.%s, msg, off)\n") + case types.String: + o("off, err = packString(rr.%s, msg, off)\n") + default: + log.Fatalln(name, st.Field(i).Name()) + } + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + } + // We have packed everything, only now we know the rdlength of this RR + fmt.Fprintln(b, "rr.Header().Rdlength = uint16(off-headerEnd)") + fmt.Fprintln(b, "return off, nil }\n") + } + + fmt.Fprint(b, "// unpack*() functions\n\n") + for _, name := range namedTypes { + o := scope.Lookup(name) + st, _ := getTypeStruct(o.Type(), scope) + + fmt.Fprintf(b, "func unpack%s(h RR_Header, msg []byte, off int) (RR, int, error) {\n", name) + fmt.Fprintf(b, "rr := new(%s)\n", name) + fmt.Fprint(b, "rr.Hdr = h\n") + fmt.Fprint(b, `if noRdata(h) { +return rr, off, nil + } +var err error +rdStart := off +_ = rdStart + +`) + for i := 1; i < st.NumFields(); i++ { + o := func(s string) { + fmt.Fprintf(b, s, st.Field(i).Name()) + fmt.Fprint(b, `if err != nil { +return rr, off, err +} +`) + } + + // size-* are special, because they reference a struct member we should use for the length. + if strings.HasPrefix(st.Tag(i), `dns:"size-`) { + structMember := structMember(st.Tag(i)) + structTag := structTag(st.Tag(i)) + switch structTag { + case "hex": + fmt.Fprintf(b, "rr.%s, off, err = unpackStringHex(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) + case "base32": + fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase32(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) + case "base64": + fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase64(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + fmt.Fprint(b, `if err != nil { +return rr, off, err +} +`) + continue + } + + if _, ok := st.Field(i).Type().(*types.Slice); ok { + switch st.Tag(i) { + case `dns:"-"`: // ignored + case `dns:"txt"`: + o("rr.%s, off, err = unpackStringTxt(msg, off)\n") + case `dns:"opt"`: + o("rr.%s, off, err = unpackDataOpt(msg, off)\n") + case `dns:"nsec"`: + o("rr.%s, off, err = unpackDataNsec(msg, off)\n") + case `dns:"domain-name"`: + o("rr.%s, off, err = unpackDataDomainNames(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + continue + } + + switch st.Tag(i) { + case `dns:"-"`: // ignored + case `dns:"cdomain-name"`: + fallthrough + case `dns:"domain-name"`: + o("rr.%s, off, err = UnpackDomainName(msg, off)\n") + case `dns:"a"`: + o("rr.%s, off, err = unpackDataA(msg, off)\n") + case `dns:"aaaa"`: + o("rr.%s, off, err = unpackDataAAAA(msg, off)\n") + case `dns:"uint48"`: + o("rr.%s, off, err = unpackUint48(msg, off)\n") + case `dns:"txt"`: + o("rr.%s, off, err = unpackString(msg, off)\n") + case `dns:"base32"`: + o("rr.%s, off, err = unpackStringBase32(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") + case `dns:"base64"`: + o("rr.%s, off, err = unpackStringBase64(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") + case `dns:"hex"`: + o("rr.%s, off, err = unpackStringHex(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") + case `dns:"octet"`: + o("rr.%s, off, err = unpackStringOctet(msg, off)\n") + case "": + switch st.Field(i).Type().(*types.Basic).Kind() { + case types.Uint8: + o("rr.%s, off, err = unpackUint8(msg, off)\n") + case types.Uint16: + o("rr.%s, off, err = unpackUint16(msg, off)\n") + case types.Uint32: + o("rr.%s, off, err = unpackUint32(msg, off)\n") + case types.Uint64: + o("rr.%s, off, err = unpackUint64(msg, off)\n") + case types.String: + o("rr.%s, off, err = unpackString(msg, off)\n") + default: + log.Fatalln(name, st.Field(i).Name()) + } + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + // If we've hit len(msg) we return without error. + if i < st.NumFields()-1 { + fmt.Fprintf(b, `if off == len(msg) { +return rr, off, nil + } +`) + } + } + fmt.Fprintf(b, "return rr, off, err }\n\n") + } + // Generate typeToUnpack map + fmt.Fprintln(b, "var typeToUnpack = map[uint16]func(RR_Header, []byte, int) (RR, int, error){") + for _, name := range namedTypes { + if name == "RFC3597" { + continue + } + fmt.Fprintf(b, "Type%s: unpack%s,\n", name, name) + } + fmt.Fprintln(b, "}\n") + + // gofmt + res, err := format.Source(b.Bytes()) + if err != nil { + b.WriteTo(os.Stderr) + log.Fatal(err) + } + + // write result + f, err := os.Create("zmsg.go") + fatalIfErr(err) + defer f.Close() + f.Write(res) +} + +// structMember will take a tag like dns:"size-base32:SaltLength" and return the last part of this string. +func structMember(s string) string { + fields := strings.Split(s, ":") + if len(fields) == 0 { + return "" + } + f := fields[len(fields)-1] + // f should have a closing " + if len(f) > 1 { + return f[:len(f)-1] + } + return f +} + +// structTag will take a tag like dns:"size-base32:SaltLength" and return base32. +func structTag(s string) string { + fields := strings.Split(s, ":") + if len(fields) < 2 { + return "" + } + return fields[1][len("\"size-"):] +} + +func fatalIfErr(err error) { + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/miekg/dns/msg_helpers.go b/vendor/github.com/miekg/dns/msg_helpers.go new file mode 100644 index 0000000000..946d5acbf0 --- /dev/null +++ b/vendor/github.com/miekg/dns/msg_helpers.go @@ -0,0 +1,637 @@ +package dns + +import ( + "encoding/base32" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "net" + "strconv" +) + +// helper functions called from the generated zmsg.go + +// These function are named after the tag to help pack/unpack, if there is no tag it is the name +// of the type they pack/unpack (string, int, etc). We prefix all with unpackData or packData, so packDataA or +// packDataDomainName. + +func unpackDataA(msg []byte, off int) (net.IP, int, error) { + if off+net.IPv4len > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking a"} + } + a := append(make(net.IP, 0, net.IPv4len), msg[off:off+net.IPv4len]...) + off += net.IPv4len + return a, off, nil +} + +func packDataA(a net.IP, msg []byte, off int) (int, error) { + // It must be a slice of 4, even if it is 16, we encode only the first 4 + if off+net.IPv4len > len(msg) { + return len(msg), &Error{err: "overflow packing a"} + } + switch len(a) { + case net.IPv4len, net.IPv6len: + copy(msg[off:], a.To4()) + off += net.IPv4len + case 0: + // Allowed, for dynamic updates. + default: + return len(msg), &Error{err: "overflow packing a"} + } + return off, nil +} + +func unpackDataAAAA(msg []byte, off int) (net.IP, int, error) { + if off+net.IPv6len > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking aaaa"} + } + aaaa := append(make(net.IP, 0, net.IPv6len), msg[off:off+net.IPv6len]...) + off += net.IPv6len + return aaaa, off, nil +} + +func packDataAAAA(aaaa net.IP, msg []byte, off int) (int, error) { + if off+net.IPv6len > len(msg) { + return len(msg), &Error{err: "overflow packing aaaa"} + } + + switch len(aaaa) { + case net.IPv6len: + copy(msg[off:], aaaa) + off += net.IPv6len + case 0: + // Allowed, dynamic updates. + default: + return len(msg), &Error{err: "overflow packing aaaa"} + } + return off, nil +} + +// unpackHeader unpacks an RR header, returning the offset to the end of the header and a +// re-sliced msg according to the expected length of the RR. +func unpackHeader(msg []byte, off int) (rr RR_Header, off1 int, truncmsg []byte, err error) { + hdr := RR_Header{} + if off == len(msg) { + return hdr, off, msg, nil + } + + hdr.Name, off, err = UnpackDomainName(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + hdr.Rrtype, off, err = unpackUint16(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + hdr.Class, off, err = unpackUint16(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + hdr.Ttl, off, err = unpackUint32(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + hdr.Rdlength, off, err = unpackUint16(msg, off) + if err != nil { + return hdr, len(msg), msg, err + } + msg, err = truncateMsgFromRdlength(msg, off, hdr.Rdlength) + return hdr, off, msg, err +} + +// pack packs an RR header, returning the offset to the end of the header. +// See PackDomainName for documentation about the compression. +func (hdr RR_Header) pack(msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { + if off == len(msg) { + return off, nil + } + + off, err = PackDomainName(hdr.Name, msg, off, compression, compress) + if err != nil { + return len(msg), err + } + off, err = packUint16(hdr.Rrtype, msg, off) + if err != nil { + return len(msg), err + } + off, err = packUint16(hdr.Class, msg, off) + if err != nil { + return len(msg), err + } + off, err = packUint32(hdr.Ttl, msg, off) + if err != nil { + return len(msg), err + } + off, err = packUint16(hdr.Rdlength, msg, off) + if err != nil { + return len(msg), err + } + return off, nil +} + +// helper helper functions. + +// truncateMsgFromRdLength truncates msg to match the expected length of the RR. +// Returns an error if msg is smaller than the expected size. +func truncateMsgFromRdlength(msg []byte, off int, rdlength uint16) (truncmsg []byte, err error) { + lenrd := off + int(rdlength) + if lenrd > len(msg) { + return msg, &Error{err: "overflowing header size"} + } + return msg[:lenrd], nil +} + +func fromBase32(s []byte) (buf []byte, err error) { + for i, b := range s { + if b >= 'a' && b <= 'z' { + s[i] = b - 32 + } + } + buflen := base32.HexEncoding.DecodedLen(len(s)) + buf = make([]byte, buflen) + n, err := base32.HexEncoding.Decode(buf, s) + buf = buf[:n] + return +} + +func toBase32(b []byte) string { return base32.HexEncoding.EncodeToString(b) } + +func fromBase64(s []byte) (buf []byte, err error) { + buflen := base64.StdEncoding.DecodedLen(len(s)) + buf = make([]byte, buflen) + n, err := base64.StdEncoding.Decode(buf, s) + buf = buf[:n] + return +} + +func toBase64(b []byte) string { return base64.StdEncoding.EncodeToString(b) } + +// dynamicUpdate returns true if the Rdlength is zero. +func noRdata(h RR_Header) bool { return h.Rdlength == 0 } + +func unpackUint8(msg []byte, off int) (i uint8, off1 int, err error) { + if off+1 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint8"} + } + return uint8(msg[off]), off + 1, nil +} + +func packUint8(i uint8, msg []byte, off int) (off1 int, err error) { + if off+1 > len(msg) { + return len(msg), &Error{err: "overflow packing uint8"} + } + msg[off] = byte(i) + return off + 1, nil +} + +func unpackUint16(msg []byte, off int) (i uint16, off1 int, err error) { + if off+2 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint16"} + } + return binary.BigEndian.Uint16(msg[off:]), off + 2, nil +} + +func packUint16(i uint16, msg []byte, off int) (off1 int, err error) { + if off+2 > len(msg) { + return len(msg), &Error{err: "overflow packing uint16"} + } + binary.BigEndian.PutUint16(msg[off:], i) + return off + 2, nil +} + +func unpackUint32(msg []byte, off int) (i uint32, off1 int, err error) { + if off+4 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint32"} + } + return binary.BigEndian.Uint32(msg[off:]), off + 4, nil +} + +func packUint32(i uint32, msg []byte, off int) (off1 int, err error) { + if off+4 > len(msg) { + return len(msg), &Error{err: "overflow packing uint32"} + } + binary.BigEndian.PutUint32(msg[off:], i) + return off + 4, nil +} + +func unpackUint48(msg []byte, off int) (i uint64, off1 int, err error) { + if off+6 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint64 as uint48"} + } + // Used in TSIG where the last 48 bits are occupied, so for now, assume a uint48 (6 bytes) + i = (uint64(uint64(msg[off])<<40 | uint64(msg[off+1])<<32 | uint64(msg[off+2])<<24 | uint64(msg[off+3])<<16 | + uint64(msg[off+4])<<8 | uint64(msg[off+5]))) + off += 6 + return i, off, nil +} + +func packUint48(i uint64, msg []byte, off int) (off1 int, err error) { + if off+6 > len(msg) { + return len(msg), &Error{err: "overflow packing uint64 as uint48"} + } + msg[off] = byte(i >> 40) + msg[off+1] = byte(i >> 32) + msg[off+2] = byte(i >> 24) + msg[off+3] = byte(i >> 16) + msg[off+4] = byte(i >> 8) + msg[off+5] = byte(i) + off += 6 + return off, nil +} + +func unpackUint64(msg []byte, off int) (i uint64, off1 int, err error) { + if off+8 > len(msg) { + return 0, len(msg), &Error{err: "overflow unpacking uint64"} + } + return binary.BigEndian.Uint64(msg[off:]), off + 8, nil +} + +func packUint64(i uint64, msg []byte, off int) (off1 int, err error) { + if off+8 > len(msg) { + return len(msg), &Error{err: "overflow packing uint64"} + } + binary.BigEndian.PutUint64(msg[off:], i) + off += 8 + return off, nil +} + +func unpackString(msg []byte, off int) (string, int, error) { + if off+1 > len(msg) { + return "", off, &Error{err: "overflow unpacking txt"} + } + l := int(msg[off]) + if off+l+1 > len(msg) { + return "", off, &Error{err: "overflow unpacking txt"} + } + s := make([]byte, 0, l) + for _, b := range msg[off+1 : off+1+l] { + switch b { + case '"', '\\': + s = append(s, '\\', b) + default: + if b < 32 || b > 127 { // unprintable + var buf [3]byte + bufs := strconv.AppendInt(buf[:0], int64(b), 10) + s = append(s, '\\') + for i := 0; i < 3-len(bufs); i++ { + s = append(s, '0') + } + for _, r := range bufs { + s = append(s, r) + } + } else { + s = append(s, b) + } + } + } + off += 1 + l + return string(s), off, nil +} + +func packString(s string, msg []byte, off int) (int, error) { + txtTmp := make([]byte, 256*4+1) + off, err := packTxtString(s, msg, off, txtTmp) + if err != nil { + return len(msg), err + } + return off, nil +} + +func unpackStringBase32(msg []byte, off, end int) (string, int, error) { + if end > len(msg) { + return "", len(msg), &Error{err: "overflow unpacking base32"} + } + s := toBase32(msg[off:end]) + return s, end, nil +} + +func packStringBase32(s string, msg []byte, off int) (int, error) { + b32, err := fromBase32([]byte(s)) + if err != nil { + return len(msg), err + } + if off+len(b32) > len(msg) { + return len(msg), &Error{err: "overflow packing base32"} + } + copy(msg[off:off+len(b32)], b32) + off += len(b32) + return off, nil +} + +func unpackStringBase64(msg []byte, off, end int) (string, int, error) { + // Rest of the RR is base64 encoded value, so we don't need an explicit length + // to be set. Thus far all RR's that have base64 encoded fields have those as their + // last one. What we do need is the end of the RR! + if end > len(msg) { + return "", len(msg), &Error{err: "overflow unpacking base64"} + } + s := toBase64(msg[off:end]) + return s, end, nil +} + +func packStringBase64(s string, msg []byte, off int) (int, error) { + b64, err := fromBase64([]byte(s)) + if err != nil { + return len(msg), err + } + if off+len(b64) > len(msg) { + return len(msg), &Error{err: "overflow packing base64"} + } + copy(msg[off:off+len(b64)], b64) + off += len(b64) + return off, nil +} + +func unpackStringHex(msg []byte, off, end int) (string, int, error) { + // Rest of the RR is hex encoded value, so we don't need an explicit length + // to be set. NSEC and TSIG have hex fields with a length field. + // What we do need is the end of the RR! + if end > len(msg) { + return "", len(msg), &Error{err: "overflow unpacking hex"} + } + + s := hex.EncodeToString(msg[off:end]) + return s, end, nil +} + +func packStringHex(s string, msg []byte, off int) (int, error) { + h, err := hex.DecodeString(s) + if err != nil { + return len(msg), err + } + if off+(len(h)) > len(msg) { + return len(msg), &Error{err: "overflow packing hex"} + } + copy(msg[off:off+len(h)], h) + off += len(h) + return off, nil +} + +func unpackStringTxt(msg []byte, off int) ([]string, int, error) { + txt, off, err := unpackTxt(msg, off) + if err != nil { + return nil, len(msg), err + } + return txt, off, nil +} + +func packStringTxt(s []string, msg []byte, off int) (int, error) { + txtTmp := make([]byte, 256*4+1) // If the whole string consists out of \DDD we need this many. + off, err := packTxt(s, msg, off, txtTmp) + if err != nil { + return len(msg), err + } + return off, nil +} + +func unpackDataOpt(msg []byte, off int) ([]EDNS0, int, error) { + var edns []EDNS0 +Option: + code := uint16(0) + if off+4 > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking opt"} + } + code = binary.BigEndian.Uint16(msg[off:]) + off += 2 + optlen := binary.BigEndian.Uint16(msg[off:]) + off += 2 + if off+int(optlen) > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking opt"} + } + switch code { + case EDNS0NSID: + e := new(EDNS0_NSID) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0SUBNET: + e := new(EDNS0_SUBNET) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0COOKIE: + e := new(EDNS0_COOKIE) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0UL: + e := new(EDNS0_UL) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0LLQ: + e := new(EDNS0_LLQ) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0DAU: + e := new(EDNS0_DAU) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0DHU: + e := new(EDNS0_DHU) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0N3U: + e := new(EDNS0_N3U) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + case EDNS0PADDING: + e := new(EDNS0_PADDING) + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + default: + e := new(EDNS0_LOCAL) + e.Code = code + if err := e.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, e) + off += int(optlen) + } + + if off < len(msg) { + goto Option + } + + return edns, off, nil +} + +func packDataOpt(options []EDNS0, msg []byte, off int) (int, error) { + for _, el := range options { + b, err := el.pack() + if err != nil || off+3 > len(msg) { + return len(msg), &Error{err: "overflow packing opt"} + } + binary.BigEndian.PutUint16(msg[off:], el.Option()) // Option code + binary.BigEndian.PutUint16(msg[off+2:], uint16(len(b))) // Length + off += 4 + if off+len(b) > len(msg) { + copy(msg[off:], b) + off = len(msg) + continue + } + // Actual data + copy(msg[off:off+len(b)], b) + off += len(b) + } + return off, nil +} + +func unpackStringOctet(msg []byte, off int) (string, int, error) { + s := string(msg[off:]) + return s, len(msg), nil +} + +func packStringOctet(s string, msg []byte, off int) (int, error) { + txtTmp := make([]byte, 256*4+1) + off, err := packOctetString(s, msg, off, txtTmp) + if err != nil { + return len(msg), err + } + return off, nil +} + +func unpackDataNsec(msg []byte, off int) ([]uint16, int, error) { + var nsec []uint16 + length, window, lastwindow := 0, 0, -1 + for off < len(msg) { + if off+2 > len(msg) { + return nsec, len(msg), &Error{err: "overflow unpacking nsecx"} + } + window = int(msg[off]) + length = int(msg[off+1]) + off += 2 + if window <= lastwindow { + // RFC 4034: Blocks are present in the NSEC RR RDATA in + // increasing numerical order. + return nsec, len(msg), &Error{err: "out of order NSEC block"} + } + if length == 0 { + // RFC 4034: Blocks with no types present MUST NOT be included. + return nsec, len(msg), &Error{err: "empty NSEC block"} + } + if length > 32 { + return nsec, len(msg), &Error{err: "NSEC block too long"} + } + if off+length > len(msg) { + return nsec, len(msg), &Error{err: "overflowing NSEC block"} + } + + // Walk the bytes in the window and extract the type bits + for j := 0; j < length; j++ { + b := msg[off+j] + // Check the bits one by one, and set the type + if b&0x80 == 0x80 { + nsec = append(nsec, uint16(window*256+j*8+0)) + } + if b&0x40 == 0x40 { + nsec = append(nsec, uint16(window*256+j*8+1)) + } + if b&0x20 == 0x20 { + nsec = append(nsec, uint16(window*256+j*8+2)) + } + if b&0x10 == 0x10 { + nsec = append(nsec, uint16(window*256+j*8+3)) + } + if b&0x8 == 0x8 { + nsec = append(nsec, uint16(window*256+j*8+4)) + } + if b&0x4 == 0x4 { + nsec = append(nsec, uint16(window*256+j*8+5)) + } + if b&0x2 == 0x2 { + nsec = append(nsec, uint16(window*256+j*8+6)) + } + if b&0x1 == 0x1 { + nsec = append(nsec, uint16(window*256+j*8+7)) + } + } + off += length + lastwindow = window + } + return nsec, off, nil +} + +func packDataNsec(bitmap []uint16, msg []byte, off int) (int, error) { + if len(bitmap) == 0 { + return off, nil + } + var lastwindow, lastlength uint16 + for j := 0; j < len(bitmap); j++ { + t := bitmap[j] + window := t / 256 + length := (t-window*256)/8 + 1 + if window > lastwindow && lastlength != 0 { // New window, jump to the new offset + off += int(lastlength) + 2 + lastlength = 0 + } + if window < lastwindow || length < lastlength { + return len(msg), &Error{err: "nsec bits out of order"} + } + if off+2+int(length) > len(msg) { + return len(msg), &Error{err: "overflow packing nsec"} + } + // Setting the window # + msg[off] = byte(window) + // Setting the octets length + msg[off+1] = byte(length) + // Setting the bit value for the type in the right octet + msg[off+1+int(length)] |= byte(1 << (7 - (t % 8))) + lastwindow, lastlength = window, length + } + off += int(lastlength) + 2 + return off, nil +} + +func unpackDataDomainNames(msg []byte, off, end int) ([]string, int, error) { + var ( + servers []string + s string + err error + ) + if end > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking domain names"} + } + for off < end { + s, off, err = UnpackDomainName(msg, off) + if err != nil { + return servers, len(msg), err + } + servers = append(servers, s) + } + return servers, off, nil +} + +func packDataDomainNames(names []string, msg []byte, off int, compression map[string]int, compress bool) (int, error) { + var err error + for j := 0; j < len(names); j++ { + off, err = PackDomainName(names[j], msg, off, compression, false && compress) + if err != nil { + return len(msg), err + } + } + return off, nil +} diff --git a/vendor/github.com/miekg/dns/nsecx.go b/vendor/github.com/miekg/dns/nsecx.go new file mode 100644 index 0000000000..9b908c4478 --- /dev/null +++ b/vendor/github.com/miekg/dns/nsecx.go @@ -0,0 +1,106 @@ +package dns + +import ( + "crypto/sha1" + "hash" + "strings" +) + +type saltWireFmt struct { + Salt string `dns:"size-hex"` +} + +// HashName hashes a string (label) according to RFC 5155. It returns the hashed string in uppercase. +func HashName(label string, ha uint8, iter uint16, salt string) string { + saltwire := new(saltWireFmt) + saltwire.Salt = salt + wire := make([]byte, DefaultMsgSize) + n, err := packSaltWire(saltwire, wire) + if err != nil { + return "" + } + wire = wire[:n] + name := make([]byte, 255) + off, err := PackDomainName(strings.ToLower(label), name, 0, nil, false) + if err != nil { + return "" + } + name = name[:off] + var s hash.Hash + switch ha { + case SHA1: + s = sha1.New() + default: + return "" + } + + // k = 0 + s.Write(name) + s.Write(wire) + nsec3 := s.Sum(nil) + // k > 0 + for k := uint16(0); k < iter; k++ { + s.Reset() + s.Write(nsec3) + s.Write(wire) + nsec3 = s.Sum(nsec3[:0]) + } + return toBase32(nsec3) +} + +// Cover returns true if a name is covered by the NSEC3 record +func (rr *NSEC3) Cover(name string) bool { + nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt) + owner := strings.ToUpper(rr.Hdr.Name) + labelIndices := Split(owner) + if len(labelIndices) < 2 { + return false + } + ownerHash := owner[:labelIndices[1]-1] + ownerZone := owner[labelIndices[1]:] + if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone + return false + } + + nextHash := rr.NextDomain + if ownerHash == nextHash { // empty interval + return false + } + if ownerHash > nextHash { // end of zone + if nameHash > ownerHash { // covered since there is nothing after ownerHash + return true + } + return nameHash < nextHash // if nameHash is before beginning of zone it is covered + } + if nameHash < ownerHash { // nameHash is before ownerHash, not covered + return false + } + return nameHash < nextHash // if nameHash is before nextHash is it covered (between ownerHash and nextHash) +} + +// Match returns true if a name matches the NSEC3 record +func (rr *NSEC3) Match(name string) bool { + nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt) + owner := strings.ToUpper(rr.Hdr.Name) + labelIndices := Split(owner) + if len(labelIndices) < 2 { + return false + } + ownerHash := owner[:labelIndices[1]-1] + ownerZone := owner[labelIndices[1]:] + if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone + return false + } + if ownerHash == nameHash { + return true + } + return false +} + +func packSaltWire(sw *saltWireFmt, msg []byte) (int, error) { + off, err := packStringHex(sw.Salt, msg, 0) + if err != nil { + return off, err + } + return off, nil +} diff --git a/vendor/github.com/miekg/dns/privaterr.go b/vendor/github.com/miekg/dns/privaterr.go new file mode 100644 index 0000000000..6b08e6e959 --- /dev/null +++ b/vendor/github.com/miekg/dns/privaterr.go @@ -0,0 +1,149 @@ +package dns + +import ( + "fmt" + "strings" +) + +// PrivateRdata is an interface used for implementing "Private Use" RR types, see +// RFC 6895. This allows one to experiment with new RR types, without requesting an +// official type code. Also see dns.PrivateHandle and dns.PrivateHandleRemove. +type PrivateRdata interface { + // String returns the text presentaton of the Rdata of the Private RR. + String() string + // Parse parses the Rdata of the private RR. + Parse([]string) error + // Pack is used when packing a private RR into a buffer. + Pack([]byte) (int, error) + // Unpack is used when unpacking a private RR from a buffer. + // TODO(miek): diff. signature than Pack, see edns0.go for instance. + Unpack([]byte) (int, error) + // Copy copies the Rdata. + Copy(PrivateRdata) error + // Len returns the length in octets of the Rdata. + Len() int +} + +// PrivateRR represents an RR that uses a PrivateRdata user-defined type. +// It mocks normal RRs and implements dns.RR interface. +type PrivateRR struct { + Hdr RR_Header + Data PrivateRdata +} + +func mkPrivateRR(rrtype uint16) *PrivateRR { + // Panics if RR is not an instance of PrivateRR. + rrfunc, ok := TypeToRR[rrtype] + if !ok { + panic(fmt.Sprintf("dns: invalid operation with Private RR type %d", rrtype)) + } + + anyrr := rrfunc() + switch rr := anyrr.(type) { + case *PrivateRR: + return rr + } + panic(fmt.Sprintf("dns: RR is not a PrivateRR, TypeToRR[%d] generator returned %T", rrtype, anyrr)) +} + +// Header return the RR header of r. +func (r *PrivateRR) Header() *RR_Header { return &r.Hdr } + +func (r *PrivateRR) String() string { return r.Hdr.String() + r.Data.String() } + +// Private len and copy parts to satisfy RR interface. +func (r *PrivateRR) len() int { return r.Hdr.len() + r.Data.Len() } +func (r *PrivateRR) copy() RR { + // make new RR like this: + rr := mkPrivateRR(r.Hdr.Rrtype) + newh := r.Hdr.copyHeader() + rr.Hdr = *newh + + err := r.Data.Copy(rr.Data) + if err != nil { + panic("dns: got value that could not be used to copy Private rdata") + } + return rr +} +func (r *PrivateRR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := r.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + n, err := r.Data.Pack(msg[off:]) + if err != nil { + return len(msg), err + } + off += n + r.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +// PrivateHandle registers a private resource record type. It requires +// string and numeric representation of private RR type and generator function as argument. +func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) { + rtypestr = strings.ToUpper(rtypestr) + + TypeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator()} } + TypeToString[rtype] = rtypestr + StringToType[rtypestr] = rtype + + typeToUnpack[rtype] = func(h RR_Header, msg []byte, off int) (RR, int, error) { + if noRdata(h) { + return &h, off, nil + } + var err error + + rr := mkPrivateRR(h.Rrtype) + rr.Hdr = h + + off1, err := rr.Data.Unpack(msg[off:]) + off += off1 + if err != nil { + return rr, off, err + } + return rr, off, err + } + + setPrivateRR := func(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := mkPrivateRR(h.Rrtype) + rr.Hdr = h + + var l lex + text := make([]string, 0, 2) // could be 0..N elements, median is probably 1 + Fetch: + for { + // TODO(miek): we could also be returning _QUOTE, this might or might not + // be an issue (basically parsing TXT becomes hard) + switch l = <-c; l.value { + case zNewline, zEOF: + break Fetch + case zString: + text = append(text, l.token) + } + } + + err := rr.Data.Parse(text) + if err != nil { + return nil, &ParseError{f, err.Error(), l}, "" + } + + return rr, nil, "" + } + + typeToparserFunc[rtype] = parserFunc{setPrivateRR, true} +} + +// PrivateHandleRemove removes defenitions required to support private RR type. +func PrivateHandleRemove(rtype uint16) { + rtypestr, ok := TypeToString[rtype] + if ok { + delete(TypeToRR, rtype) + delete(TypeToString, rtype) + delete(typeToparserFunc, rtype) + delete(StringToType, rtypestr) + delete(typeToUnpack, rtype) + } + return +} diff --git a/vendor/github.com/miekg/dns/rawmsg.go b/vendor/github.com/miekg/dns/rawmsg.go new file mode 100644 index 0000000000..6e21fba7e1 --- /dev/null +++ b/vendor/github.com/miekg/dns/rawmsg.go @@ -0,0 +1,49 @@ +package dns + +import "encoding/binary" + +// rawSetRdlength sets the rdlength in the header of +// the RR. The offset 'off' must be positioned at the +// start of the header of the RR, 'end' must be the +// end of the RR. +func rawSetRdlength(msg []byte, off, end int) bool { + l := len(msg) +Loop: + for { + if off+1 > l { + return false + } + c := int(msg[off]) + off++ + switch c & 0xC0 { + case 0x00: + if c == 0x00 { + // End of the domainname + break Loop + } + if off+c > l { + return false + } + off += c + + case 0xC0: + // pointer, next byte included, ends domainname + off++ + break Loop + } + } + // The domainname has been seen, we at the start of the fixed part in the header. + // Type is 2 bytes, class is 2 bytes, ttl 4 and then 2 bytes for the length. + off += 2 + 2 + 4 + if off+2 > l { + return false + } + //off+1 is the end of the header, 'end' is the end of the rr + //so 'end' - 'off+2' is the length of the rdata + rdatalen := end - (off + 2) + if rdatalen > 0xFFFF { + return false + } + binary.BigEndian.PutUint16(msg[off:], uint16(rdatalen)) + return true +} diff --git a/vendor/github.com/miekg/dns/reverse.go b/vendor/github.com/miekg/dns/reverse.go new file mode 100644 index 0000000000..f6e7a47a6e --- /dev/null +++ b/vendor/github.com/miekg/dns/reverse.go @@ -0,0 +1,38 @@ +package dns + +// StringToType is the reverse of TypeToString, needed for string parsing. +var StringToType = reverseInt16(TypeToString) + +// StringToClass is the reverse of ClassToString, needed for string parsing. +var StringToClass = reverseInt16(ClassToString) + +// StringToOpcode is a map of opcodes to strings. +var StringToOpcode = reverseInt(OpcodeToString) + +// StringToRcode is a map of rcodes to strings. +var StringToRcode = reverseInt(RcodeToString) + +// Reverse a map +func reverseInt8(m map[uint8]string) map[string]uint8 { + n := make(map[string]uint8, len(m)) + for u, s := range m { + n[s] = u + } + return n +} + +func reverseInt16(m map[uint16]string) map[string]uint16 { + n := make(map[string]uint16, len(m)) + for u, s := range m { + n[s] = u + } + return n +} + +func reverseInt(m map[int]string) map[string]int { + n := make(map[string]int, len(m)) + for u, s := range m { + n[s] = u + } + return n +} diff --git a/vendor/github.com/miekg/dns/sanitize.go b/vendor/github.com/miekg/dns/sanitize.go new file mode 100644 index 0000000000..c415bdd6c3 --- /dev/null +++ b/vendor/github.com/miekg/dns/sanitize.go @@ -0,0 +1,84 @@ +package dns + +// Dedup removes identical RRs from rrs. It preserves the original ordering. +// The lowest TTL of any duplicates is used in the remaining one. Dedup modifies +// rrs. +// m is used to store the RRs temporary. If it is nil a new map will be allocated. +func Dedup(rrs []RR, m map[string]RR) []RR { + if m == nil { + m = make(map[string]RR) + } + // Save the keys, so we don't have to call normalizedString twice. + keys := make([]*string, 0, len(rrs)) + + for _, r := range rrs { + key := normalizedString(r) + keys = append(keys, &key) + if _, ok := m[key]; ok { + // Shortest TTL wins. + if m[key].Header().Ttl > r.Header().Ttl { + m[key].Header().Ttl = r.Header().Ttl + } + continue + } + + m[key] = r + } + // If the length of the result map equals the amount of RRs we got, + // it means they were all different. We can then just return the original rrset. + if len(m) == len(rrs) { + return rrs + } + + j := 0 + for i, r := range rrs { + // If keys[i] lives in the map, we should copy and remove it. + if _, ok := m[*keys[i]]; ok { + delete(m, *keys[i]) + rrs[j] = r + j++ + } + + if len(m) == 0 { + break + } + } + + return rrs[:j] +} + +// normalizedString returns a normalized string from r. The TTL +// is removed and the domain name is lowercased. We go from this: +// DomainNameTTLCLASSTYPERDATA to: +// lowercasenameCLASSTYPE... +func normalizedString(r RR) string { + // A string Go DNS makes has: domainnameTTL... + b := []byte(r.String()) + + // find the first non-escaped tab, then another, so we capture where the TTL lives. + esc := false + ttlStart, ttlEnd := 0, 0 + for i := 0; i < len(b) && ttlEnd == 0; i++ { + switch { + case b[i] == '\\': + esc = !esc + case b[i] == '\t' && !esc: + if ttlStart == 0 { + ttlStart = i + continue + } + if ttlEnd == 0 { + ttlEnd = i + } + case b[i] >= 'A' && b[i] <= 'Z' && !esc: + b[i] += 32 + default: + esc = false + } + } + + // remove TTL. + copy(b[ttlStart:], b[ttlEnd:]) + cut := ttlEnd - ttlStart + return string(b[:len(b)-cut]) +} diff --git a/vendor/github.com/miekg/dns/scan.go b/vendor/github.com/miekg/dns/scan.go new file mode 100644 index 0000000000..f9cd47401d --- /dev/null +++ b/vendor/github.com/miekg/dns/scan.go @@ -0,0 +1,1007 @@ +package dns + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strconv" + "strings" +) + +const maxTok = 2048 // Largest token we can return. +const maxUint16 = 1<<16 - 1 + +// Tokinize a RFC 1035 zone file. The tokenizer will normalize it: +// * Add ownernames if they are left blank; +// * Suppress sequences of spaces; +// * Make each RR fit on one line (_NEWLINE is send as last) +// * Handle comments: ; +// * Handle braces - anywhere. +const ( + // Zonefile + zEOF = iota + zString + zBlank + zQuote + zNewline + zRrtpe + zOwner + zClass + zDirOrigin // $ORIGIN + zDirTTL // $TTL + zDirInclude // $INCLUDE + zDirGenerate // $GENERATE + + // Privatekey file + zValue + zKey + + zExpectOwnerDir // Ownername + zExpectOwnerBl // Whitespace after the ownername + zExpectAny // Expect rrtype, ttl or class + zExpectAnyNoClass // Expect rrtype or ttl + zExpectAnyNoClassBl // The whitespace after _EXPECT_ANY_NOCLASS + zExpectAnyNoTTL // Expect rrtype or class + zExpectAnyNoTTLBl // Whitespace after _EXPECT_ANY_NOTTL + zExpectRrtype // Expect rrtype + zExpectRrtypeBl // Whitespace BEFORE rrtype + zExpectRdata // The first element of the rdata + zExpectDirTTLBl // Space after directive $TTL + zExpectDirTTL // Directive $TTL + zExpectDirOriginBl // Space after directive $ORIGIN + zExpectDirOrigin // Directive $ORIGIN + zExpectDirIncludeBl // Space after directive $INCLUDE + zExpectDirInclude // Directive $INCLUDE + zExpectDirGenerate // Directive $GENERATE + zExpectDirGenerateBl // Space after directive $GENERATE +) + +// ParseError is a parsing error. It contains the parse error and the location in the io.Reader +// where the error occurred. +type ParseError struct { + file string + err string + lex lex +} + +func (e *ParseError) Error() (s string) { + if e.file != "" { + s = e.file + ": " + } + s += "dns: " + e.err + ": " + strconv.QuoteToASCII(e.lex.token) + " at line: " + + strconv.Itoa(e.lex.line) + ":" + strconv.Itoa(e.lex.column) + return +} + +type lex struct { + token string // text of the token + tokenUpper string // uppercase text of the token + length int // length of the token + err bool // when true, token text has lexer error + value uint8 // value: zString, _BLANK, etc. + line int // line in the file + column int // column in the file + torc uint16 // type or class as parsed in the lexer, we only need to look this up in the grammar + comment string // any comment text seen +} + +// Token holds the token that are returned when a zone file is parsed. +type Token struct { + // The scanned resource record when error is not nil. + RR + // When an error occurred, this has the error specifics. + Error *ParseError + // A potential comment positioned after the RR and on the same line. + Comment string +} + +// ttlState describes the state necessary to fill in an omitted RR TTL +type ttlState struct { + ttl uint32 // ttl is the current default TTL + isByDirective bool // isByDirective indicates whether ttl was set by a $TTL directive +} + +// NewRR reads the RR contained in the string s. Only the first RR is +// returned. If s contains no RR, return nil with no error. The class +// defaults to IN and TTL defaults to 3600. The full zone file syntax +// like $TTL, $ORIGIN, etc. is supported. All fields of the returned +// RR are set, except RR.Header().Rdlength which is set to 0. +func NewRR(s string) (RR, error) { + if len(s) > 0 && s[len(s)-1] != '\n' { // We need a closing newline + return ReadRR(strings.NewReader(s+"\n"), "") + } + return ReadRR(strings.NewReader(s), "") +} + +// ReadRR reads the RR contained in q. +// See NewRR for more documentation. +func ReadRR(q io.Reader, filename string) (RR, error) { + defttl := &ttlState{defaultTtl, false} + r := <-parseZoneHelper(q, ".", filename, defttl, 1) + if r == nil { + return nil, nil + } + + if r.Error != nil { + return nil, r.Error + } + return r.RR, nil +} + +// ParseZone reads a RFC 1035 style zonefile from r. It returns *Tokens on the +// returned channel, each consisting of either a parsed RR and optional comment +// or a nil RR and an error. The string file is only used +// in error reporting. The string origin is used as the initial origin, as +// if the file would start with an $ORIGIN directive. +// The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are supported. +// The channel t is closed by ParseZone when the end of r is reached. +// +// Basic usage pattern when reading from a string (z) containing the +// zone data: +// +// for x := range dns.ParseZone(strings.NewReader(z), "", "") { +// if x.Error != nil { +// // log.Println(x.Error) +// } else { +// // Do something with x.RR +// } +// } +// +// Comments specified after an RR (and on the same line!) are returned too: +// +// foo. IN A 10.0.0.1 ; this is a comment +// +// The text "; this is comment" is returned in Token.Comment. Comments inside the +// RR are discarded. Comments on a line by themselves are discarded too. +func ParseZone(r io.Reader, origin, file string) chan *Token { + return parseZoneHelper(r, origin, file, nil, 10000) +} + +func parseZoneHelper(r io.Reader, origin, file string, defttl *ttlState, chansize int) chan *Token { + t := make(chan *Token, chansize) + go parseZone(r, origin, file, defttl, t, 0) + return t +} + +func parseZone(r io.Reader, origin, f string, defttl *ttlState, t chan *Token, include int) { + defer func() { + if include == 0 { + close(t) + } + }() + s, cancel := scanInit(r) + c := make(chan lex) + // Start the lexer + go zlexer(s, c) + + defer func() { + cancel() + // zlexer can send up to three tokens, the next one and possibly 2 remainders. + // Do a non-blocking read. + _, ok := <-c + _, ok = <-c + _, ok = <-c + if !ok { + // too bad + } + }() + // 6 possible beginnings of a line, _ is a space + // 0. zRRTYPE -> all omitted until the rrtype + // 1. zOwner _ zRrtype -> class/ttl omitted + // 2. zOwner _ zString _ zRrtype -> class omitted + // 3. zOwner _ zString _ zClass _ zRrtype -> ttl/class + // 4. zOwner _ zClass _ zRrtype -> ttl omitted + // 5. zOwner _ zClass _ zString _ zRrtype -> class/ttl (reversed) + // After detecting these, we know the zRrtype so we can jump to functions + // handling the rdata for each of these types. + + if origin != "" { + origin = Fqdn(origin) + if _, ok := IsDomainName(origin); !ok { + t <- &Token{Error: &ParseError{f, "bad initial origin name", lex{}}} + return + } + } + + st := zExpectOwnerDir // initial state + var h RR_Header + var prevName string + for l := range c { + // Lexer spotted an error already + if l.err == true { + t <- &Token{Error: &ParseError{f, l.token, l}} + return + + } + switch st { + case zExpectOwnerDir: + // We can also expect a directive, like $TTL or $ORIGIN + if defttl != nil { + h.Ttl = defttl.ttl + } + h.Class = ClassINET + switch l.value { + case zNewline: + st = zExpectOwnerDir + case zOwner: + h.Name = l.token + name, ok := toAbsoluteName(l.token, origin) + if !ok { + t <- &Token{Error: &ParseError{f, "bad owner name", l}} + return + } + h.Name = name + prevName = h.Name + st = zExpectOwnerBl + case zDirTTL: + st = zExpectDirTTLBl + case zDirOrigin: + st = zExpectDirOriginBl + case zDirInclude: + st = zExpectDirIncludeBl + case zDirGenerate: + st = zExpectDirGenerateBl + case zRrtpe: + h.Name = prevName + h.Rrtype = l.torc + st = zExpectRdata + case zClass: + h.Name = prevName + h.Class = l.torc + st = zExpectAnyNoClassBl + case zBlank: + // Discard, can happen when there is nothing on the + // line except the RR type + case zString: + ttl, ok := stringToTTL(l.token) + if !ok { + t <- &Token{Error: &ParseError{f, "not a TTL", l}} + return + } + h.Ttl = ttl + if defttl == nil || !defttl.isByDirective { + defttl = &ttlState{ttl, false} + } + st = zExpectAnyNoTTLBl + + default: + t <- &Token{Error: &ParseError{f, "syntax error at beginning", l}} + return + } + case zExpectDirIncludeBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank after $INCLUDE-directive", l}} + return + } + st = zExpectDirInclude + case zExpectDirInclude: + if l.value != zString { + t <- &Token{Error: &ParseError{f, "expecting $INCLUDE value, not this...", l}} + return + } + neworigin := origin // There may be optionally a new origin set after the filename, if not use current one + switch l := <-c; l.value { + case zBlank: + l := <-c + if l.value == zString { + name, ok := toAbsoluteName(l.token, origin) + if !ok { + t <- &Token{Error: &ParseError{f, "bad origin name", l}} + return + } + neworigin = name + } + case zNewline, zEOF: + // Ok + default: + t <- &Token{Error: &ParseError{f, "garbage after $INCLUDE", l}} + return + } + // Start with the new file + includePath := l.token + if !filepath.IsAbs(includePath) { + includePath = filepath.Join(filepath.Dir(f), includePath) + } + r1, e1 := os.Open(includePath) + if e1 != nil { + msg := fmt.Sprintf("failed to open `%s'", l.token) + if !filepath.IsAbs(l.token) { + msg += fmt.Sprintf(" as `%s'", includePath) + } + t <- &Token{Error: &ParseError{f, msg, l}} + return + } + if include+1 > 7 { + t <- &Token{Error: &ParseError{f, "too deeply nested $INCLUDE", l}} + return + } + parseZone(r1, neworigin, includePath, defttl, t, include+1) + st = zExpectOwnerDir + case zExpectDirTTLBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank after $TTL-directive", l}} + return + } + st = zExpectDirTTL + case zExpectDirTTL: + if l.value != zString { + t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}} + return + } + if e, _ := slurpRemainder(c, f); e != nil { + t <- &Token{Error: e} + return + } + ttl, ok := stringToTTL(l.token) + if !ok { + t <- &Token{Error: &ParseError{f, "expecting $TTL value, not this...", l}} + return + } + defttl = &ttlState{ttl, true} + st = zExpectOwnerDir + case zExpectDirOriginBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank after $ORIGIN-directive", l}} + return + } + st = zExpectDirOrigin + case zExpectDirOrigin: + if l.value != zString { + t <- &Token{Error: &ParseError{f, "expecting $ORIGIN value, not this...", l}} + return + } + if e, _ := slurpRemainder(c, f); e != nil { + t <- &Token{Error: e} + } + name, ok := toAbsoluteName(l.token, origin) + if !ok { + t <- &Token{Error: &ParseError{f, "bad origin name", l}} + return + } + origin = name + st = zExpectOwnerDir + case zExpectDirGenerateBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank after $GENERATE-directive", l}} + return + } + st = zExpectDirGenerate + case zExpectDirGenerate: + if l.value != zString { + t <- &Token{Error: &ParseError{f, "expecting $GENERATE value, not this...", l}} + return + } + if errMsg := generate(l, c, t, origin); errMsg != "" { + t <- &Token{Error: &ParseError{f, errMsg, l}} + return + } + st = zExpectOwnerDir + case zExpectOwnerBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank after owner", l}} + return + } + st = zExpectAny + case zExpectAny: + switch l.value { + case zRrtpe: + if defttl == nil { + t <- &Token{Error: &ParseError{f, "missing TTL with no previous value", l}} + return + } + h.Rrtype = l.torc + st = zExpectRdata + case zClass: + h.Class = l.torc + st = zExpectAnyNoClassBl + case zString: + ttl, ok := stringToTTL(l.token) + if !ok { + t <- &Token{Error: &ParseError{f, "not a TTL", l}} + return + } + h.Ttl = ttl + if defttl == nil || !defttl.isByDirective { + defttl = &ttlState{ttl, false} + } + st = zExpectAnyNoTTLBl + default: + t <- &Token{Error: &ParseError{f, "expecting RR type, TTL or class, not this...", l}} + return + } + case zExpectAnyNoClassBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank before class", l}} + return + } + st = zExpectAnyNoClass + case zExpectAnyNoTTLBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank before TTL", l}} + return + } + st = zExpectAnyNoTTL + case zExpectAnyNoTTL: + switch l.value { + case zClass: + h.Class = l.torc + st = zExpectRrtypeBl + case zRrtpe: + h.Rrtype = l.torc + st = zExpectRdata + default: + t <- &Token{Error: &ParseError{f, "expecting RR type or class, not this...", l}} + return + } + case zExpectAnyNoClass: + switch l.value { + case zString: + ttl, ok := stringToTTL(l.token) + if !ok { + t <- &Token{Error: &ParseError{f, "not a TTL", l}} + return + } + h.Ttl = ttl + if defttl == nil || !defttl.isByDirective { + defttl = &ttlState{ttl, false} + } + st = zExpectRrtypeBl + case zRrtpe: + h.Rrtype = l.torc + st = zExpectRdata + default: + t <- &Token{Error: &ParseError{f, "expecting RR type or TTL, not this...", l}} + return + } + case zExpectRrtypeBl: + if l.value != zBlank { + t <- &Token{Error: &ParseError{f, "no blank before RR type", l}} + return + } + st = zExpectRrtype + case zExpectRrtype: + if l.value != zRrtpe { + t <- &Token{Error: &ParseError{f, "unknown RR type", l}} + return + } + h.Rrtype = l.torc + st = zExpectRdata + case zExpectRdata: + r, e, c1 := setRR(h, c, origin, f) + if e != nil { + // If e.lex is nil than we have encounter a unknown RR type + // in that case we substitute our current lex token + if e.lex.token == "" && e.lex.value == 0 { + e.lex = l // Uh, dirty + } + t <- &Token{Error: e} + return + } + t <- &Token{RR: r, Comment: c1} + st = zExpectOwnerDir + } + } + // If we get here, we and the h.Rrtype is still zero, we haven't parsed anything, this + // is not an error, because an empty zone file is still a zone file. +} + +// zlexer scans the sourcefile and returns tokens on the channel c. +func zlexer(s *scan, c chan lex) { + var l lex + str := make([]byte, maxTok) // Should be enough for any token + stri := 0 // Offset in str (0 means empty) + com := make([]byte, maxTok) // Hold comment text + comi := 0 + quote := false + escape := false + space := false + commt := false + rrtype := false + owner := true + brace := 0 + x, err := s.tokenText() + defer close(c) + for err == nil { + l.column = s.position.Column + l.line = s.position.Line + if stri >= maxTok { + l.token = "token length insufficient for parsing" + l.err = true + c <- l + return + } + if comi >= maxTok { + l.token = "comment length insufficient for parsing" + l.err = true + c <- l + return + } + + switch x { + case ' ', '\t': + if escape { + escape = false + str[stri] = x + stri++ + break + } + if quote { + // Inside quotes this is legal + str[stri] = x + stri++ + break + } + if commt { + com[comi] = x + comi++ + break + } + if stri == 0 { + // Space directly in the beginning, handled in the grammar + } else if owner { + // If we have a string and its the first, make it an owner + l.value = zOwner + l.token = string(str[:stri]) + l.tokenUpper = strings.ToUpper(l.token) + l.length = stri + // escape $... start with a \ not a $, so this will work + switch l.tokenUpper { + case "$TTL": + l.value = zDirTTL + case "$ORIGIN": + l.value = zDirOrigin + case "$INCLUDE": + l.value = zDirInclude + case "$GENERATE": + l.value = zDirGenerate + } + c <- l + } else { + l.value = zString + l.token = string(str[:stri]) + l.tokenUpper = strings.ToUpper(l.token) + l.length = stri + if !rrtype { + if t, ok := StringToType[l.tokenUpper]; ok { + l.value = zRrtpe + l.torc = t + rrtype = true + } else { + if strings.HasPrefix(l.tokenUpper, "TYPE") { + t, ok := typeToInt(l.token) + if !ok { + l.token = "unknown RR type" + l.err = true + c <- l + return + } + l.value = zRrtpe + rrtype = true + l.torc = t + } + } + if t, ok := StringToClass[l.tokenUpper]; ok { + l.value = zClass + l.torc = t + } else { + if strings.HasPrefix(l.tokenUpper, "CLASS") { + t, ok := classToInt(l.token) + if !ok { + l.token = "unknown class" + l.err = true + c <- l + return + } + l.value = zClass + l.torc = t + } + } + } + c <- l + } + stri = 0 + + if !space && !commt { + l.value = zBlank + l.token = " " + l.length = 1 + c <- l + } + owner = false + space = true + case ';': + if escape { + escape = false + str[stri] = x + stri++ + break + } + if quote { + // Inside quotes this is legal + str[stri] = x + stri++ + break + } + if stri > 0 { + l.value = zString + l.token = string(str[:stri]) + l.tokenUpper = strings.ToUpper(l.token) + l.length = stri + c <- l + stri = 0 + } + commt = true + com[comi] = ';' + comi++ + case '\r': + escape = false + if quote { + str[stri] = x + stri++ + break + } + // discard if outside of quotes + case '\n': + escape = false + // Escaped newline + if quote { + str[stri] = x + stri++ + break + } + // inside quotes this is legal + if commt { + // Reset a comment + commt = false + rrtype = false + stri = 0 + // If not in a brace this ends the comment AND the RR + if brace == 0 { + owner = true + owner = true + l.value = zNewline + l.token = "\n" + l.tokenUpper = l.token + l.length = 1 + l.comment = string(com[:comi]) + c <- l + l.comment = "" + comi = 0 + break + } + com[comi] = ' ' // convert newline to space + comi++ + break + } + + if brace == 0 { + // If there is previous text, we should output it here + if stri != 0 { + l.value = zString + l.token = string(str[:stri]) + l.tokenUpper = strings.ToUpper(l.token) + + l.length = stri + if !rrtype { + if t, ok := StringToType[l.tokenUpper]; ok { + l.value = zRrtpe + l.torc = t + rrtype = true + } + } + c <- l + } + l.value = zNewline + l.token = "\n" + l.tokenUpper = l.token + l.length = 1 + c <- l + stri = 0 + commt = false + rrtype = false + owner = true + comi = 0 + } + case '\\': + // comments do not get escaped chars, everything is copied + if commt { + com[comi] = x + comi++ + break + } + // something already escaped must be in string + if escape { + str[stri] = x + stri++ + escape = false + break + } + // something escaped outside of string gets added to string + str[stri] = x + stri++ + escape = true + case '"': + if commt { + com[comi] = x + comi++ + break + } + if escape { + str[stri] = x + stri++ + escape = false + break + } + space = false + // send previous gathered text and the quote + if stri != 0 { + l.value = zString + l.token = string(str[:stri]) + l.tokenUpper = strings.ToUpper(l.token) + l.length = stri + + c <- l + stri = 0 + } + + // send quote itself as separate token + l.value = zQuote + l.token = "\"" + l.tokenUpper = l.token + l.length = 1 + c <- l + quote = !quote + case '(', ')': + if commt { + com[comi] = x + comi++ + break + } + if escape { + str[stri] = x + stri++ + escape = false + break + } + if quote { + str[stri] = x + stri++ + break + } + switch x { + case ')': + brace-- + if brace < 0 { + l.token = "extra closing brace" + l.tokenUpper = l.token + l.err = true + c <- l + return + } + case '(': + brace++ + } + default: + escape = false + if commt { + com[comi] = x + comi++ + break + } + str[stri] = x + stri++ + space = false + } + x, err = s.tokenText() + } + if stri > 0 { + // Send remainder + l.token = string(str[:stri]) + l.tokenUpper = strings.ToUpper(l.token) + l.length = stri + l.value = zString + c <- l + } + if brace != 0 { + l.token = "unbalanced brace" + l.tokenUpper = l.token + l.err = true + c <- l + } +} + +// Extract the class number from CLASSxx +func classToInt(token string) (uint16, bool) { + offset := 5 + if len(token) < offset+1 { + return 0, false + } + class, err := strconv.ParseUint(token[offset:], 10, 16) + if err != nil { + return 0, false + } + return uint16(class), true +} + +// Extract the rr number from TYPExxx +func typeToInt(token string) (uint16, bool) { + offset := 4 + if len(token) < offset+1 { + return 0, false + } + typ, err := strconv.ParseUint(token[offset:], 10, 16) + if err != nil { + return 0, false + } + return uint16(typ), true +} + +// stringToTTL parses things like 2w, 2m, etc, and returns the time in seconds. +func stringToTTL(token string) (uint32, bool) { + s := uint32(0) + i := uint32(0) + for _, c := range token { + switch c { + case 's', 'S': + s += i + i = 0 + case 'm', 'M': + s += i * 60 + i = 0 + case 'h', 'H': + s += i * 60 * 60 + i = 0 + case 'd', 'D': + s += i * 60 * 60 * 24 + i = 0 + case 'w', 'W': + s += i * 60 * 60 * 24 * 7 + i = 0 + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + i *= 10 + i += uint32(c) - '0' + default: + return 0, false + } + } + return s + i, true +} + +// Parse LOC records' [.][mM] into a +// mantissa exponent format. Token should contain the entire +// string (i.e. no spaces allowed) +func stringToCm(token string) (e, m uint8, ok bool) { + if token[len(token)-1] == 'M' || token[len(token)-1] == 'm' { + token = token[0 : len(token)-1] + } + s := strings.SplitN(token, ".", 2) + var meters, cmeters, val int + var err error + switch len(s) { + case 2: + if cmeters, err = strconv.Atoi(s[1]); err != nil { + return + } + fallthrough + case 1: + if meters, err = strconv.Atoi(s[0]); err != nil { + return + } + case 0: + // huh? + return 0, 0, false + } + ok = true + if meters > 0 { + e = 2 + val = meters + } else { + e = 0 + val = cmeters + } + for val > 10 { + e++ + val /= 10 + } + if e > 9 { + ok = false + } + m = uint8(val) + return +} + +func toAbsoluteName(name, origin string) (absolute string, ok bool) { + // check for an explicit origin reference + if name == "@" { + // require a nonempty origin + if origin == "" { + return "", false + } + return origin, true + } + + // require a valid domain name + _, ok = IsDomainName(name) + if !ok || name == "" { + return "", false + } + + // check if name is already absolute + if name[len(name)-1] == '.' { + return name, true + } + + // require a nonempty origin + if origin == "" { + return "", false + } + return appendOrigin(name, origin), true +} + +func appendOrigin(name, origin string) string { + if origin == "." { + return name + origin + } + return name + "." + origin +} + +// LOC record helper function +func locCheckNorth(token string, latitude uint32) (uint32, bool) { + switch token { + case "n", "N": + return LOC_EQUATOR + latitude, true + case "s", "S": + return LOC_EQUATOR - latitude, true + } + return latitude, false +} + +// LOC record helper function +func locCheckEast(token string, longitude uint32) (uint32, bool) { + switch token { + case "e", "E": + return LOC_EQUATOR + longitude, true + case "w", "W": + return LOC_EQUATOR - longitude, true + } + return longitude, false +} + +// "Eat" the rest of the "line". Return potential comments +func slurpRemainder(c chan lex, f string) (*ParseError, string) { + l := <-c + com := "" + switch l.value { + case zBlank: + l = <-c + com = l.comment + if l.value != zNewline && l.value != zEOF { + return &ParseError{f, "garbage after rdata", l}, "" + } + case zNewline: + com = l.comment + case zEOF: + default: + return &ParseError{f, "garbage after rdata", l}, "" + } + return nil, com +} + +// Parse a 64 bit-like ipv6 address: "0014:4fff:ff20:ee64" +// Used for NID and L64 record. +func stringToNodeID(l lex) (uint64, *ParseError) { + if len(l.token) < 19 { + return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} + } + // There must be three colons at fixes postitions, if not its a parse error + if l.token[4] != ':' && l.token[9] != ':' && l.token[14] != ':' { + return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} + } + s := l.token[0:4] + l.token[5:9] + l.token[10:14] + l.token[15:19] + u, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} + } + return u, nil +} diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go new file mode 100644 index 0000000000..f4ccc84246 --- /dev/null +++ b/vendor/github.com/miekg/dns/scan_rr.go @@ -0,0 +1,2199 @@ +package dns + +import ( + "encoding/base64" + "net" + "strconv" + "strings" +) + +type parserFunc struct { + // Func defines the function that parses the tokens and returns the RR + // or an error. The last string contains any comments in the line as + // they returned by the lexer as well. + Func func(h RR_Header, c chan lex, origin string, file string) (RR, *ParseError, string) + // Signals if the RR ending is of variable length, like TXT or records + // that have Hexadecimal or Base64 as their last element in the Rdata. Records + // that have a fixed ending or for instance A, AAAA, SOA and etc. + Variable bool +} + +// Parse the rdata of each rrtype. +// All data from the channel c is either zString or zBlank. +// After the rdata there may come a zBlank and then a zNewline +// or immediately a zNewline. If this is not the case we flag +// an *ParseError: garbage after rdata. +func setRR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + parserfunc, ok := typeToparserFunc[h.Rrtype] + if ok { + r, e, cm := parserfunc.Func(h, c, o, f) + if parserfunc.Variable { + return r, e, cm + } + if e != nil { + return nil, e, "" + } + e, cm = slurpRemainder(c, f) + if e != nil { + return nil, e, "" + } + return r, nil, cm + } + // RFC3957 RR (Unknown RR handling) + return setRFC3597(h, c, o, f) +} + +// A remainder of the rdata with embedded spaces, return the parsed string (sans the spaces) +// or an error +func endingToString(c chan lex, errstr, f string) (string, *ParseError, string) { + s := "" + l := <-c // zString + for l.value != zNewline && l.value != zEOF { + if l.err { + return s, &ParseError{f, errstr, l}, "" + } + switch l.value { + case zString: + s += l.token + case zBlank: // Ok + default: + return "", &ParseError{f, errstr, l}, "" + } + l = <-c + } + return s, nil, l.comment +} + +// A remainder of the rdata with embedded spaces, split on unquoted whitespace +// and return the parsed string slice or an error +func endingToTxtSlice(c chan lex, errstr, f string) ([]string, *ParseError, string) { + // Get the remaining data until we see a zNewline + l := <-c + if l.err { + return nil, &ParseError{f, errstr, l}, "" + } + + // Build the slice + s := make([]string, 0) + quote := false + empty := false + for l.value != zNewline && l.value != zEOF { + if l.err { + return nil, &ParseError{f, errstr, l}, "" + } + switch l.value { + case zString: + empty = false + if len(l.token) > 255 { + // split up tokens that are larger than 255 into 255-chunks + sx := []string{} + p, i := 0, 255 + for { + if i <= len(l.token) { + sx = append(sx, l.token[p:i]) + } else { + sx = append(sx, l.token[p:]) + break + + } + p, i = p+255, i+255 + } + s = append(s, sx...) + break + } + + s = append(s, l.token) + case zBlank: + if quote { + // zBlank can only be seen in between txt parts. + return nil, &ParseError{f, errstr, l}, "" + } + case zQuote: + if empty && quote { + s = append(s, "") + } + quote = !quote + empty = true + default: + return nil, &ParseError{f, errstr, l}, "" + } + l = <-c + } + if quote { + return nil, &ParseError{f, errstr, l}, "" + } + return s, nil, l.comment +} + +func setA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(A) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + rr.A = net.ParseIP(l.token) + if rr.A == nil || l.err { + return nil, &ParseError{f, "bad A A", l}, "" + } + return rr, nil, "" +} + +func setAAAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(AAAA) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + rr.AAAA = net.ParseIP(l.token) + if rr.AAAA == nil || l.err { + return nil, &ParseError{f, "bad AAAA AAAA", l}, "" + } + return rr, nil, "" +} + +func setNS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NS) + rr.Hdr = h + + l := <-c + rr.Ns = l.token + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad NS Ns", l}, "" + } + rr.Ns = name + return rr, nil, "" +} + +func setPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(PTR) + rr.Hdr = h + + l := <-c + rr.Ptr = l.token + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad PTR Ptr", l}, "" + } + rr.Ptr = name + return rr, nil, "" +} + +func setNSAPPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NSAPPTR) + rr.Hdr = h + + l := <-c + rr.Ptr = l.token + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad NSAP-PTR Ptr", l}, "" + } + rr.Ptr = name + return rr, nil, "" +} + +func setRP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(RP) + rr.Hdr = h + + l := <-c + rr.Mbox = l.token + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + mbox, mboxOk := toAbsoluteName(l.token, o) + if l.err || !mboxOk { + return nil, &ParseError{f, "bad RP Mbox", l}, "" + } + rr.Mbox = mbox + + <-c // zBlank + l = <-c + rr.Txt = l.token + + txt, txtOk := toAbsoluteName(l.token, o) + if l.err || !txtOk { + return nil, &ParseError{f, "bad RP Txt", l}, "" + } + rr.Txt = txt + + return rr, nil, "" +} + +func setMR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MR) + rr.Hdr = h + + l := <-c + rr.Mr = l.token + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad MR Mr", l}, "" + } + rr.Mr = name + return rr, nil, "" +} + +func setMB(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MB) + rr.Hdr = h + + l := <-c + rr.Mb = l.token + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad MB Mb", l}, "" + } + rr.Mb = name + return rr, nil, "" +} + +func setMG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MG) + rr.Hdr = h + + l := <-c + rr.Mg = l.token + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad MG Mg", l}, "" + } + rr.Mg = name + return rr, nil, "" +} + +func setHINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(HINFO) + rr.Hdr = h + + chunks, e, c1 := endingToTxtSlice(c, "bad HINFO Fields", f) + if e != nil { + return nil, e, c1 + } + + if ln := len(chunks); ln == 0 { + return rr, nil, "" + } else if ln == 1 { + // Can we split it? + if out := strings.Fields(chunks[0]); len(out) > 1 { + chunks = out + } else { + chunks = append(chunks, "") + } + } + + rr.Cpu = chunks[0] + rr.Os = strings.Join(chunks[1:], " ") + + return rr, nil, "" +} + +func setMINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MINFO) + rr.Hdr = h + + l := <-c + rr.Rmail = l.token + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + rmail, rmailOk := toAbsoluteName(l.token, o) + if l.err || !rmailOk { + return nil, &ParseError{f, "bad MINFO Rmail", l}, "" + } + rr.Rmail = rmail + + <-c // zBlank + l = <-c + rr.Email = l.token + + email, emailOk := toAbsoluteName(l.token, o) + if l.err || !emailOk { + return nil, &ParseError{f, "bad MINFO Email", l}, "" + } + rr.Email = email + + return rr, nil, "" +} + +func setMF(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MF) + rr.Hdr = h + + l := <-c + rr.Mf = l.token + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad MF Mf", l}, "" + } + rr.Mf = name + return rr, nil, "" +} + +func setMD(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MD) + rr.Hdr = h + + l := <-c + rr.Md = l.token + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad MD Md", l}, "" + } + rr.Md = name + return rr, nil, "" +} + +func setMX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(MX) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad MX Pref", l}, "" + } + rr.Preference = uint16(i) + + <-c // zBlank + l = <-c // zString + rr.Mx = l.token + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad MX Mx", l}, "" + } + rr.Mx = name + + return rr, nil, "" +} + +func setRT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(RT) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil { + return nil, &ParseError{f, "bad RT Preference", l}, "" + } + rr.Preference = uint16(i) + + <-c // zBlank + l = <-c // zString + rr.Host = l.token + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad RT Host", l}, "" + } + rr.Host = name + + return rr, nil, "" +} + +func setAFSDB(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(AFSDB) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad AFSDB Subtype", l}, "" + } + rr.Subtype = uint16(i) + + <-c // zBlank + l = <-c // zString + rr.Hostname = l.token + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad AFSDB Hostname", l}, "" + } + rr.Hostname = name + return rr, nil, "" +} + +func setX25(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(X25) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + if l.err { + return nil, &ParseError{f, "bad X25 PSDNAddress", l}, "" + } + rr.PSDNAddress = l.token + return rr, nil, "" +} + +func setKX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(KX) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad KX Pref", l}, "" + } + rr.Preference = uint16(i) + + <-c // zBlank + l = <-c // zString + rr.Exchanger = l.token + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad KX Exchanger", l}, "" + } + rr.Exchanger = name + return rr, nil, "" +} + +func setCNAME(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(CNAME) + rr.Hdr = h + + l := <-c + rr.Target = l.token + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad CNAME Target", l}, "" + } + rr.Target = name + return rr, nil, "" +} + +func setDNAME(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(DNAME) + rr.Hdr = h + + l := <-c + rr.Target = l.token + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad DNAME Target", l}, "" + } + rr.Target = name + return rr, nil, "" +} + +func setSOA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(SOA) + rr.Hdr = h + + l := <-c + rr.Ns = l.token + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + ns, nsOk := toAbsoluteName(l.token, o) + if l.err || !nsOk { + return nil, &ParseError{f, "bad SOA Ns", l}, "" + } + rr.Ns = ns + + <-c // zBlank + l = <-c + rr.Mbox = l.token + + mbox, mboxOk := toAbsoluteName(l.token, o) + if l.err || !mboxOk { + return nil, &ParseError{f, "bad SOA Mbox", l}, "" + } + rr.Mbox = mbox + + <-c // zBlank + + var ( + v uint32 + ok bool + ) + for i := 0; i < 5; i++ { + l = <-c + if l.err { + return nil, &ParseError{f, "bad SOA zone parameter", l}, "" + } + if j, e := strconv.ParseUint(l.token, 10, 32); e != nil { + if i == 0 { + // Serial must be a number + return nil, &ParseError{f, "bad SOA zone parameter", l}, "" + } + // We allow other fields to be unitful duration strings + if v, ok = stringToTTL(l.token); !ok { + return nil, &ParseError{f, "bad SOA zone parameter", l}, "" + + } + } else { + v = uint32(j) + } + switch i { + case 0: + rr.Serial = v + <-c // zBlank + case 1: + rr.Refresh = v + <-c // zBlank + case 2: + rr.Retry = v + <-c // zBlank + case 3: + rr.Expire = v + <-c // zBlank + case 4: + rr.Minttl = v + } + } + return rr, nil, "" +} + +func setSRV(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(SRV) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad SRV Priority", l}, "" + } + rr.Priority = uint16(i) + + <-c // zBlank + l = <-c // zString + i, e = strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad SRV Weight", l}, "" + } + rr.Weight = uint16(i) + + <-c // zBlank + l = <-c // zString + i, e = strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad SRV Port", l}, "" + } + rr.Port = uint16(i) + + <-c // zBlank + l = <-c // zString + rr.Target = l.token + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad SRV Target", l}, "" + } + rr.Target = name + return rr, nil, "" +} + +func setNAPTR(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NAPTR) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad NAPTR Order", l}, "" + } + rr.Order = uint16(i) + + <-c // zBlank + l = <-c // zString + i, e = strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad NAPTR Preference", l}, "" + } + rr.Preference = uint16(i) + + // Flags + <-c // zBlank + l = <-c // _QUOTE + if l.value != zQuote { + return nil, &ParseError{f, "bad NAPTR Flags", l}, "" + } + l = <-c // Either String or Quote + if l.value == zString { + rr.Flags = l.token + l = <-c // _QUOTE + if l.value != zQuote { + return nil, &ParseError{f, "bad NAPTR Flags", l}, "" + } + } else if l.value == zQuote { + rr.Flags = "" + } else { + return nil, &ParseError{f, "bad NAPTR Flags", l}, "" + } + + // Service + <-c // zBlank + l = <-c // _QUOTE + if l.value != zQuote { + return nil, &ParseError{f, "bad NAPTR Service", l}, "" + } + l = <-c // Either String or Quote + if l.value == zString { + rr.Service = l.token + l = <-c // _QUOTE + if l.value != zQuote { + return nil, &ParseError{f, "bad NAPTR Service", l}, "" + } + } else if l.value == zQuote { + rr.Service = "" + } else { + return nil, &ParseError{f, "bad NAPTR Service", l}, "" + } + + // Regexp + <-c // zBlank + l = <-c // _QUOTE + if l.value != zQuote { + return nil, &ParseError{f, "bad NAPTR Regexp", l}, "" + } + l = <-c // Either String or Quote + if l.value == zString { + rr.Regexp = l.token + l = <-c // _QUOTE + if l.value != zQuote { + return nil, &ParseError{f, "bad NAPTR Regexp", l}, "" + } + } else if l.value == zQuote { + rr.Regexp = "" + } else { + return nil, &ParseError{f, "bad NAPTR Regexp", l}, "" + } + + // After quote no space?? + <-c // zBlank + l = <-c // zString + rr.Replacement = l.token + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad NAPTR Replacement", l}, "" + } + rr.Replacement = name + return rr, nil, "" +} + +func setTALINK(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(TALINK) + rr.Hdr = h + + l := <-c + rr.PreviousName = l.token + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + previousName, previousNameOk := toAbsoluteName(l.token, o) + if l.err || !previousNameOk { + return nil, &ParseError{f, "bad TALINK PreviousName", l}, "" + } + rr.PreviousName = previousName + + <-c // zBlank + l = <-c + rr.NextName = l.token + + nextName, nextNameOk := toAbsoluteName(l.token, o) + if l.err || !nextNameOk { + return nil, &ParseError{f, "bad TALINK NextName", l}, "" + } + rr.NextName = nextName + + return rr, nil, "" +} + +func setLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(LOC) + rr.Hdr = h + // Non zero defaults for LOC record, see RFC 1876, Section 3. + rr.HorizPre = 165 // 10000 + rr.VertPre = 162 // 10 + rr.Size = 18 // 1 + ok := false + + // North + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + i, e := strconv.ParseUint(l.token, 10, 32) + if e != nil || l.err { + return nil, &ParseError{f, "bad LOC Latitude", l}, "" + } + rr.Latitude = 1000 * 60 * 60 * uint32(i) + + <-c // zBlank + // Either number, 'N' or 'S' + l = <-c + if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok { + goto East + } + i, e = strconv.ParseUint(l.token, 10, 32) + if e != nil || l.err { + return nil, &ParseError{f, "bad LOC Latitude minutes", l}, "" + } + rr.Latitude += 1000 * 60 * uint32(i) + + <-c // zBlank + l = <-c + if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err { + return nil, &ParseError{f, "bad LOC Latitude seconds", l}, "" + } else { + rr.Latitude += uint32(1000 * i) + } + <-c // zBlank + // Either number, 'N' or 'S' + l = <-c + if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok { + goto East + } + // If still alive, flag an error + return nil, &ParseError{f, "bad LOC Latitude North/South", l}, "" + +East: + // East + <-c // zBlank + l = <-c + if i, e := strconv.ParseUint(l.token, 10, 32); e != nil || l.err { + return nil, &ParseError{f, "bad LOC Longitude", l}, "" + } else { + rr.Longitude = 1000 * 60 * 60 * uint32(i) + } + <-c // zBlank + // Either number, 'E' or 'W' + l = <-c + if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok { + goto Altitude + } + if i, e := strconv.ParseUint(l.token, 10, 32); e != nil || l.err { + return nil, &ParseError{f, "bad LOC Longitude minutes", l}, "" + } else { + rr.Longitude += 1000 * 60 * uint32(i) + } + <-c // zBlank + l = <-c + if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err { + return nil, &ParseError{f, "bad LOC Longitude seconds", l}, "" + } else { + rr.Longitude += uint32(1000 * i) + } + <-c // zBlank + // Either number, 'E' or 'W' + l = <-c + if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok { + goto Altitude + } + // If still alive, flag an error + return nil, &ParseError{f, "bad LOC Longitude East/West", l}, "" + +Altitude: + <-c // zBlank + l = <-c + if l.length == 0 || l.err { + return nil, &ParseError{f, "bad LOC Altitude", l}, "" + } + if l.token[len(l.token)-1] == 'M' || l.token[len(l.token)-1] == 'm' { + l.token = l.token[0 : len(l.token)-1] + } + if i, e := strconv.ParseFloat(l.token, 32); e != nil { + return nil, &ParseError{f, "bad LOC Altitude", l}, "" + } else { + rr.Altitude = uint32(i*100.0 + 10000000.0 + 0.5) + } + + // And now optionally the other values + l = <-c + count := 0 + for l.value != zNewline && l.value != zEOF { + switch l.value { + case zString: + switch count { + case 0: // Size + e, m, ok := stringToCm(l.token) + if !ok { + return nil, &ParseError{f, "bad LOC Size", l}, "" + } + rr.Size = (e & 0x0f) | (m << 4 & 0xf0) + case 1: // HorizPre + e, m, ok := stringToCm(l.token) + if !ok { + return nil, &ParseError{f, "bad LOC HorizPre", l}, "" + } + rr.HorizPre = (e & 0x0f) | (m << 4 & 0xf0) + case 2: // VertPre + e, m, ok := stringToCm(l.token) + if !ok { + return nil, &ParseError{f, "bad LOC VertPre", l}, "" + } + rr.VertPre = (e & 0x0f) | (m << 4 & 0xf0) + } + count++ + case zBlank: + // Ok + default: + return nil, &ParseError{f, "bad LOC Size, HorizPre or VertPre", l}, "" + } + l = <-c + } + return rr, nil, "" +} + +func setHIP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(HIP) + rr.Hdr = h + + // HitLength is not represented + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, l.comment + } + + i, e := strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad HIP PublicKeyAlgorithm", l}, "" + } + rr.PublicKeyAlgorithm = uint8(i) + + <-c // zBlank + l = <-c // zString + if l.length == 0 || l.err { + return nil, &ParseError{f, "bad HIP Hit", l}, "" + } + rr.Hit = l.token // This can not contain spaces, see RFC 5205 Section 6. + rr.HitLength = uint8(len(rr.Hit)) / 2 + + <-c // zBlank + l = <-c // zString + if l.length == 0 || l.err { + return nil, &ParseError{f, "bad HIP PublicKey", l}, "" + } + rr.PublicKey = l.token // This cannot contain spaces + rr.PublicKeyLength = uint16(base64.StdEncoding.DecodedLen(len(rr.PublicKey))) + + // RendezvousServers (if any) + l = <-c + var xs []string + for l.value != zNewline && l.value != zEOF { + switch l.value { + case zString: + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad HIP RendezvousServers", l}, "" + } + xs = append(xs, name) + case zBlank: + // Ok + default: + return nil, &ParseError{f, "bad HIP RendezvousServers", l}, "" + } + l = <-c + } + rr.RendezvousServers = xs + return rr, nil, l.comment +} + +func setCERT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(CERT) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, l.comment + } + + if v, ok := StringToCertType[l.token]; ok { + rr.Type = v + } else if i, e := strconv.ParseUint(l.token, 10, 16); e != nil { + return nil, &ParseError{f, "bad CERT Type", l}, "" + } else { + rr.Type = uint16(i) + } + <-c // zBlank + l = <-c // zString + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad CERT KeyTag", l}, "" + } + rr.KeyTag = uint16(i) + <-c // zBlank + l = <-c // zString + if v, ok := StringToAlgorithm[l.token]; ok { + rr.Algorithm = v + } else if i, e := strconv.ParseUint(l.token, 10, 8); e != nil { + return nil, &ParseError{f, "bad CERT Algorithm", l}, "" + } else { + rr.Algorithm = uint8(i) + } + s, e1, c1 := endingToString(c, "bad CERT Certificate", f) + if e1 != nil { + return nil, e1, c1 + } + rr.Certificate = s + return rr, nil, c1 +} + +func setOPENPGPKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(OPENPGPKEY) + rr.Hdr = h + + s, e, c1 := endingToString(c, "bad OPENPGPKEY PublicKey", f) + if e != nil { + return nil, e, c1 + } + rr.PublicKey = s + return rr, nil, c1 +} + +func setCSYNC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(CSYNC) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, l.comment + } + j, e := strconv.ParseUint(l.token, 10, 32) + if e != nil { + // Serial must be a number + return nil, &ParseError{f, "bad CSYNC serial", l}, "" + } + rr.Serial = uint32(j) + + <-c // zBlank + + l = <-c + j, e = strconv.ParseUint(l.token, 10, 16) + if e != nil { + // Serial must be a number + return nil, &ParseError{f, "bad CSYNC flags", l}, "" + } + rr.Flags = uint16(j) + + rr.TypeBitMap = make([]uint16, 0) + var ( + k uint16 + ok bool + ) + l = <-c + for l.value != zNewline && l.value != zEOF { + switch l.value { + case zBlank: + // Ok + case zString: + if k, ok = StringToType[l.tokenUpper]; !ok { + if k, ok = typeToInt(l.tokenUpper); !ok { + return nil, &ParseError{f, "bad CSYNC TypeBitMap", l}, "" + } + } + rr.TypeBitMap = append(rr.TypeBitMap, k) + default: + return nil, &ParseError{f, "bad CSYNC TypeBitMap", l}, "" + } + l = <-c + } + return rr, nil, l.comment +} + +func setSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setRRSIG(h, c, o, f) + if r != nil { + return &SIG{*r.(*RRSIG)}, e, s + } + return nil, e, s +} + +func setRRSIG(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(RRSIG) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, l.comment + } + + if t, ok := StringToType[l.tokenUpper]; !ok { + if strings.HasPrefix(l.tokenUpper, "TYPE") { + t, ok = typeToInt(l.tokenUpper) + if !ok { + return nil, &ParseError{f, "bad RRSIG Typecovered", l}, "" + } + rr.TypeCovered = t + } else { + return nil, &ParseError{f, "bad RRSIG Typecovered", l}, "" + } + } else { + rr.TypeCovered = t + } + + <-c // zBlank + l = <-c + i, err := strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return nil, &ParseError{f, "bad RRSIG Algorithm", l}, "" + } + rr.Algorithm = uint8(i) + + <-c // zBlank + l = <-c + i, err = strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return nil, &ParseError{f, "bad RRSIG Labels", l}, "" + } + rr.Labels = uint8(i) + + <-c // zBlank + l = <-c + i, err = strconv.ParseUint(l.token, 10, 32) + if err != nil || l.err { + return nil, &ParseError{f, "bad RRSIG OrigTtl", l}, "" + } + rr.OrigTtl = uint32(i) + + <-c // zBlank + l = <-c + if i, err := StringToTime(l.token); err != nil { + // Try to see if all numeric and use it as epoch + if i, err := strconv.ParseInt(l.token, 10, 64); err == nil { + // TODO(miek): error out on > MAX_UINT32, same below + rr.Expiration = uint32(i) + } else { + return nil, &ParseError{f, "bad RRSIG Expiration", l}, "" + } + } else { + rr.Expiration = i + } + + <-c // zBlank + l = <-c + if i, err := StringToTime(l.token); err != nil { + if i, err := strconv.ParseInt(l.token, 10, 64); err == nil { + rr.Inception = uint32(i) + } else { + return nil, &ParseError{f, "bad RRSIG Inception", l}, "" + } + } else { + rr.Inception = i + } + + <-c // zBlank + l = <-c + i, err = strconv.ParseUint(l.token, 10, 16) + if err != nil || l.err { + return nil, &ParseError{f, "bad RRSIG KeyTag", l}, "" + } + rr.KeyTag = uint16(i) + + <-c // zBlank + l = <-c + rr.SignerName = l.token + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad RRSIG SignerName", l}, "" + } + rr.SignerName = name + + s, e, c1 := endingToString(c, "bad RRSIG Signature", f) + if e != nil { + return nil, e, c1 + } + rr.Signature = s + + return rr, nil, c1 +} + +func setNSEC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NSEC) + rr.Hdr = h + + l := <-c + rr.NextDomain = l.token + if l.length == 0 { // dynamic update rr. + return rr, nil, l.comment + } + + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad NSEC NextDomain", l}, "" + } + rr.NextDomain = name + + rr.TypeBitMap = make([]uint16, 0) + var ( + k uint16 + ok bool + ) + l = <-c + for l.value != zNewline && l.value != zEOF { + switch l.value { + case zBlank: + // Ok + case zString: + if k, ok = StringToType[l.tokenUpper]; !ok { + if k, ok = typeToInt(l.tokenUpper); !ok { + return nil, &ParseError{f, "bad NSEC TypeBitMap", l}, "" + } + } + rr.TypeBitMap = append(rr.TypeBitMap, k) + default: + return nil, &ParseError{f, "bad NSEC TypeBitMap", l}, "" + } + l = <-c + } + return rr, nil, l.comment +} + +func setNSEC3(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NSEC3) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, l.comment + } + + i, e := strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad NSEC3 Hash", l}, "" + } + rr.Hash = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad NSEC3 Flags", l}, "" + } + rr.Flags = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad NSEC3 Iterations", l}, "" + } + rr.Iterations = uint16(i) + <-c + l = <-c + if len(l.token) == 0 || l.err { + return nil, &ParseError{f, "bad NSEC3 Salt", l}, "" + } + rr.SaltLength = uint8(len(l.token)) / 2 + rr.Salt = l.token + + <-c + l = <-c + if len(l.token) == 0 || l.err { + return nil, &ParseError{f, "bad NSEC3 NextDomain", l}, "" + } + rr.HashLength = 20 // Fix for NSEC3 (sha1 160 bits) + rr.NextDomain = l.token + + rr.TypeBitMap = make([]uint16, 0) + var ( + k uint16 + ok bool + ) + l = <-c + for l.value != zNewline && l.value != zEOF { + switch l.value { + case zBlank: + // Ok + case zString: + if k, ok = StringToType[l.tokenUpper]; !ok { + if k, ok = typeToInt(l.tokenUpper); !ok { + return nil, &ParseError{f, "bad NSEC3 TypeBitMap", l}, "" + } + } + rr.TypeBitMap = append(rr.TypeBitMap, k) + default: + return nil, &ParseError{f, "bad NSEC3 TypeBitMap", l}, "" + } + l = <-c + } + return rr, nil, l.comment +} + +func setNSEC3PARAM(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NSEC3PARAM) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + i, e := strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad NSEC3PARAM Hash", l}, "" + } + rr.Hash = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad NSEC3PARAM Flags", l}, "" + } + rr.Flags = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad NSEC3PARAM Iterations", l}, "" + } + rr.Iterations = uint16(i) + <-c + l = <-c + rr.SaltLength = uint8(len(l.token)) + rr.Salt = l.token + return rr, nil, "" +} + +func setEUI48(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(EUI48) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + if l.length != 17 || l.err { + return nil, &ParseError{f, "bad EUI48 Address", l}, "" + } + addr := make([]byte, 12) + dash := 0 + for i := 0; i < 10; i += 2 { + addr[i] = l.token[i+dash] + addr[i+1] = l.token[i+1+dash] + dash++ + if l.token[i+1+dash] != '-' { + return nil, &ParseError{f, "bad EUI48 Address", l}, "" + } + } + addr[10] = l.token[15] + addr[11] = l.token[16] + + i, e := strconv.ParseUint(string(addr), 16, 48) + if e != nil { + return nil, &ParseError{f, "bad EUI48 Address", l}, "" + } + rr.Address = i + return rr, nil, "" +} + +func setEUI64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(EUI64) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + if l.length != 23 || l.err { + return nil, &ParseError{f, "bad EUI64 Address", l}, "" + } + addr := make([]byte, 16) + dash := 0 + for i := 0; i < 14; i += 2 { + addr[i] = l.token[i+dash] + addr[i+1] = l.token[i+1+dash] + dash++ + if l.token[i+1+dash] != '-' { + return nil, &ParseError{f, "bad EUI64 Address", l}, "" + } + } + addr[14] = l.token[21] + addr[15] = l.token[22] + + i, e := strconv.ParseUint(string(addr), 16, 64) + if e != nil { + return nil, &ParseError{f, "bad EUI68 Address", l}, "" + } + rr.Address = uint64(i) + return rr, nil, "" +} + +func setSSHFP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(SSHFP) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + i, e := strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad SSHFP Algorithm", l}, "" + } + rr.Algorithm = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad SSHFP Type", l}, "" + } + rr.Type = uint8(i) + <-c // zBlank + s, e1, c1 := endingToString(c, "bad SSHFP Fingerprint", f) + if e1 != nil { + return nil, e1, c1 + } + rr.FingerPrint = s + return rr, nil, "" +} + +func setDNSKEYs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string) { + rr := new(DNSKEY) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, l.comment + } + + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad " + typ + " Flags", l}, "" + } + rr.Flags = uint16(i) + <-c // zBlank + l = <-c // zString + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad " + typ + " Protocol", l}, "" + } + rr.Protocol = uint8(i) + <-c // zBlank + l = <-c // zString + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, "" + } + rr.Algorithm = uint8(i) + s, e1, c1 := endingToString(c, "bad "+typ+" PublicKey", f) + if e1 != nil { + return nil, e1, c1 + } + rr.PublicKey = s + return rr, nil, c1 +} + +func setKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDNSKEYs(h, c, o, f, "KEY") + if r != nil { + return &KEY{*r.(*DNSKEY)}, e, s + } + return nil, e, s +} + +func setDNSKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDNSKEYs(h, c, o, f, "DNSKEY") + return r, e, s +} + +func setCDNSKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDNSKEYs(h, c, o, f, "CDNSKEY") + if r != nil { + return &CDNSKEY{*r.(*DNSKEY)}, e, s + } + return nil, e, s +} + +func setRKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(RKEY) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, l.comment + } + + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad RKEY Flags", l}, "" + } + rr.Flags = uint16(i) + <-c // zBlank + l = <-c // zString + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad RKEY Protocol", l}, "" + } + rr.Protocol = uint8(i) + <-c // zBlank + l = <-c // zString + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad RKEY Algorithm", l}, "" + } + rr.Algorithm = uint8(i) + s, e1, c1 := endingToString(c, "bad RKEY PublicKey", f) + if e1 != nil { + return nil, e1, c1 + } + rr.PublicKey = s + return rr, nil, c1 +} + +func setEID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(EID) + rr.Hdr = h + s, e, c1 := endingToString(c, "bad EID Endpoint", f) + if e != nil { + return nil, e, c1 + } + rr.Endpoint = s + return rr, nil, c1 +} + +func setNIMLOC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NIMLOC) + rr.Hdr = h + s, e, c1 := endingToString(c, "bad NIMLOC Locator", f) + if e != nil { + return nil, e, c1 + } + rr.Locator = s + return rr, nil, c1 +} + +func setGPOS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(GPOS) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + _, e := strconv.ParseFloat(l.token, 64) + if e != nil || l.err { + return nil, &ParseError{f, "bad GPOS Longitude", l}, "" + } + rr.Longitude = l.token + <-c // zBlank + l = <-c + _, e = strconv.ParseFloat(l.token, 64) + if e != nil || l.err { + return nil, &ParseError{f, "bad GPOS Latitude", l}, "" + } + rr.Latitude = l.token + <-c // zBlank + l = <-c + _, e = strconv.ParseFloat(l.token, 64) + if e != nil || l.err { + return nil, &ParseError{f, "bad GPOS Altitude", l}, "" + } + rr.Altitude = l.token + return rr, nil, "" +} + +func setDSs(h RR_Header, c chan lex, o, f, typ string) (RR, *ParseError, string) { + rr := new(DS) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, l.comment + } + + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad " + typ + " KeyTag", l}, "" + } + rr.KeyTag = uint16(i) + <-c // zBlank + l = <-c + if i, e = strconv.ParseUint(l.token, 10, 8); e != nil { + i, ok := StringToAlgorithm[l.tokenUpper] + if !ok || l.err { + return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, "" + } + rr.Algorithm = i + } else { + rr.Algorithm = uint8(i) + } + <-c // zBlank + l = <-c + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad " + typ + " DigestType", l}, "" + } + rr.DigestType = uint8(i) + s, e1, c1 := endingToString(c, "bad "+typ+" Digest", f) + if e1 != nil { + return nil, e1, c1 + } + rr.Digest = s + return rr, nil, c1 +} + +func setDS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDSs(h, c, o, f, "DS") + return r, e, s +} + +func setDLV(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDSs(h, c, o, f, "DLV") + if r != nil { + return &DLV{*r.(*DS)}, e, s + } + return nil, e, s +} + +func setCDS(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + r, e, s := setDSs(h, c, o, f, "CDS") + if r != nil { + return &CDS{*r.(*DS)}, e, s + } + return nil, e, s +} + +func setTA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(TA) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, l.comment + } + + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad TA KeyTag", l}, "" + } + rr.KeyTag = uint16(i) + <-c // zBlank + l = <-c + if i, e := strconv.ParseUint(l.token, 10, 8); e != nil { + i, ok := StringToAlgorithm[l.tokenUpper] + if !ok || l.err { + return nil, &ParseError{f, "bad TA Algorithm", l}, "" + } + rr.Algorithm = i + } else { + rr.Algorithm = uint8(i) + } + <-c // zBlank + l = <-c + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad TA DigestType", l}, "" + } + rr.DigestType = uint8(i) + s, e, c1 := endingToString(c, "bad TA Digest", f) + if e != nil { + return nil, e.(*ParseError), c1 + } + rr.Digest = s + return rr, nil, c1 +} + +func setTLSA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(TLSA) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, l.comment + } + + i, e := strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad TLSA Usage", l}, "" + } + rr.Usage = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad TLSA Selector", l}, "" + } + rr.Selector = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad TLSA MatchingType", l}, "" + } + rr.MatchingType = uint8(i) + // So this needs be e2 (i.e. different than e), because...??t + s, e2, c1 := endingToString(c, "bad TLSA Certificate", f) + if e2 != nil { + return nil, e2, c1 + } + rr.Certificate = s + return rr, nil, c1 +} + +func setSMIMEA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(SMIMEA) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, l.comment + } + + i, e := strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad SMIMEA Usage", l}, "" + } + rr.Usage = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad SMIMEA Selector", l}, "" + } + rr.Selector = uint8(i) + <-c // zBlank + l = <-c + i, e = strconv.ParseUint(l.token, 10, 8) + if e != nil || l.err { + return nil, &ParseError{f, "bad SMIMEA MatchingType", l}, "" + } + rr.MatchingType = uint8(i) + // So this needs be e2 (i.e. different than e), because...??t + s, e2, c1 := endingToString(c, "bad SMIMEA Certificate", f) + if e2 != nil { + return nil, e2, c1 + } + rr.Certificate = s + return rr, nil, c1 +} + +func setRFC3597(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(RFC3597) + rr.Hdr = h + + l := <-c + if l.token != "\\#" { + return nil, &ParseError{f, "bad RFC3597 Rdata", l}, "" + } + + <-c // zBlank + l = <-c + rdlength, e := strconv.Atoi(l.token) + if e != nil || l.err { + return nil, &ParseError{f, "bad RFC3597 Rdata ", l}, "" + } + + s, e1, c1 := endingToString(c, "bad RFC3597 Rdata", f) + if e1 != nil { + return nil, e1, c1 + } + if rdlength*2 != len(s) { + return nil, &ParseError{f, "bad RFC3597 Rdata", l}, "" + } + rr.Rdata = s + return rr, nil, c1 +} + +func setSPF(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(SPF) + rr.Hdr = h + + s, e, c1 := endingToTxtSlice(c, "bad SPF Txt", f) + if e != nil { + return nil, e, "" + } + rr.Txt = s + return rr, nil, c1 +} + +func setAVC(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(AVC) + rr.Hdr = h + + s, e, c1 := endingToTxtSlice(c, "bad AVC Txt", f) + if e != nil { + return nil, e, "" + } + rr.Txt = s + return rr, nil, c1 +} + +func setTXT(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(TXT) + rr.Hdr = h + + // no zBlank reading here, because all this rdata is TXT + s, e, c1 := endingToTxtSlice(c, "bad TXT Txt", f) + if e != nil { + return nil, e, "" + } + rr.Txt = s + return rr, nil, c1 +} + +// identical to setTXT +func setNINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NINFO) + rr.Hdr = h + + s, e, c1 := endingToTxtSlice(c, "bad NINFO ZSData", f) + if e != nil { + return nil, e, "" + } + rr.ZSData = s + return rr, nil, c1 +} + +func setURI(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(URI) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad URI Priority", l}, "" + } + rr.Priority = uint16(i) + <-c // zBlank + l = <-c + i, e = strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad URI Weight", l}, "" + } + rr.Weight = uint16(i) + + <-c // zBlank + s, err, c1 := endingToTxtSlice(c, "bad URI Target", f) + if err != nil { + return nil, err, "" + } + if len(s) != 1 { + return nil, &ParseError{f, "bad URI Target", l}, "" + } + rr.Target = s[0] + return rr, nil, c1 +} + +func setDHCID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + // awesome record to parse! + rr := new(DHCID) + rr.Hdr = h + + s, e, c1 := endingToString(c, "bad DHCID Digest", f) + if e != nil { + return nil, e, c1 + } + rr.Digest = s + return rr, nil, c1 +} + +func setNID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(NID) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad NID Preference", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + u, err := stringToNodeID(l) + if err != nil || l.err { + return nil, err, "" + } + rr.NodeID = u + return rr, nil, "" +} + +func setL32(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(L32) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad L32 Preference", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + rr.Locator32 = net.ParseIP(l.token) + if rr.Locator32 == nil || l.err { + return nil, &ParseError{f, "bad L32 Locator", l}, "" + } + return rr, nil, "" +} + +func setLP(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(LP) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad LP Preference", l}, "" + } + rr.Preference = uint16(i) + + <-c // zBlank + l = <-c // zString + rr.Fqdn = l.token + name, nameOk := toAbsoluteName(l.token, o) + if l.err || !nameOk { + return nil, &ParseError{f, "bad LP Fqdn", l}, "" + } + rr.Fqdn = name + + return rr, nil, "" +} + +func setL64(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(L64) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad L64 Preference", l}, "" + } + rr.Preference = uint16(i) + <-c // zBlank + l = <-c // zString + u, err := stringToNodeID(l) + if err != nil || l.err { + return nil, err, "" + } + rr.Locator64 = u + return rr, nil, "" +} + +func setUID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(UID) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + i, e := strconv.ParseUint(l.token, 10, 32) + if e != nil || l.err { + return nil, &ParseError{f, "bad UID Uid", l}, "" + } + rr.Uid = uint32(i) + return rr, nil, "" +} + +func setGID(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(GID) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + i, e := strconv.ParseUint(l.token, 10, 32) + if e != nil || l.err { + return nil, &ParseError{f, "bad GID Gid", l}, "" + } + rr.Gid = uint32(i) + return rr, nil, "" +} + +func setUINFO(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(UINFO) + rr.Hdr = h + + s, e, c1 := endingToTxtSlice(c, "bad UINFO Uinfo", f) + if e != nil { + return nil, e, c1 + } + if ln := len(s); ln == 0 { + return rr, nil, c1 + } + rr.Uinfo = s[0] // silently discard anything after the first character-string + return rr, nil, c1 +} + +func setPX(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(PX) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, "" + } + + i, e := strconv.ParseUint(l.token, 10, 16) + if e != nil || l.err { + return nil, &ParseError{f, "bad PX Preference", l}, "" + } + rr.Preference = uint16(i) + + <-c // zBlank + l = <-c // zString + rr.Map822 = l.token + map822, map822Ok := toAbsoluteName(l.token, o) + if l.err || !map822Ok { + return nil, &ParseError{f, "bad PX Map822", l}, "" + } + rr.Map822 = map822 + + <-c // zBlank + l = <-c // zString + rr.Mapx400 = l.token + mapx400, mapx400Ok := toAbsoluteName(l.token, o) + if l.err || !mapx400Ok { + return nil, &ParseError{f, "bad PX Mapx400", l}, "" + } + rr.Mapx400 = mapx400 + + return rr, nil, "" +} + +func setCAA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(CAA) + rr.Hdr = h + + l := <-c + if l.length == 0 { // dynamic update rr. + return rr, nil, l.comment + } + + i, err := strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return nil, &ParseError{f, "bad CAA Flag", l}, "" + } + rr.Flag = uint8(i) + + <-c // zBlank + l = <-c // zString + if l.value != zString { + return nil, &ParseError{f, "bad CAA Tag", l}, "" + } + rr.Tag = l.token + + <-c // zBlank + s, e, c1 := endingToTxtSlice(c, "bad CAA Value", f) + if e != nil { + return nil, e, "" + } + if len(s) != 1 { + return nil, &ParseError{f, "bad CAA Value", l}, "" + } + rr.Value = s[0] + return rr, nil, c1 +} + +func setTKEY(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) { + rr := new(TKEY) + rr.Hdr = h + + l := <-c + + // Algorithm + if l.value != zString { + return nil, &ParseError{f, "bad TKEY algorithm", l}, "" + } + rr.Algorithm = l.token + <-c // zBlank + + // Get the key length and key values + l = <-c + i, err := strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return nil, &ParseError{f, "bad TKEY key length", l}, "" + } + rr.KeySize = uint16(i) + <-c // zBlank + l = <-c + if l.value != zString { + return nil, &ParseError{f, "bad TKEY key", l}, "" + } + rr.Key = l.token + <-c // zBlank + + // Get the otherdata length and string data + l = <-c + i, err = strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return nil, &ParseError{f, "bad TKEY otherdata length", l}, "" + } + rr.OtherLen = uint16(i) + <-c // zBlank + l = <-c + if l.value != zString { + return nil, &ParseError{f, "bad TKEY otherday", l}, "" + } + rr.OtherData = l.token + + return rr, nil, "" +} + +var typeToparserFunc = map[uint16]parserFunc{ + TypeAAAA: {setAAAA, false}, + TypeAFSDB: {setAFSDB, false}, + TypeA: {setA, false}, + TypeCAA: {setCAA, true}, + TypeCDS: {setCDS, true}, + TypeCDNSKEY: {setCDNSKEY, true}, + TypeCERT: {setCERT, true}, + TypeCNAME: {setCNAME, false}, + TypeCSYNC: {setCSYNC, true}, + TypeDHCID: {setDHCID, true}, + TypeDLV: {setDLV, true}, + TypeDNAME: {setDNAME, false}, + TypeKEY: {setKEY, true}, + TypeDNSKEY: {setDNSKEY, true}, + TypeDS: {setDS, true}, + TypeEID: {setEID, true}, + TypeEUI48: {setEUI48, false}, + TypeEUI64: {setEUI64, false}, + TypeGID: {setGID, false}, + TypeGPOS: {setGPOS, false}, + TypeHINFO: {setHINFO, true}, + TypeHIP: {setHIP, true}, + TypeKX: {setKX, false}, + TypeL32: {setL32, false}, + TypeL64: {setL64, false}, + TypeLOC: {setLOC, true}, + TypeLP: {setLP, false}, + TypeMB: {setMB, false}, + TypeMD: {setMD, false}, + TypeMF: {setMF, false}, + TypeMG: {setMG, false}, + TypeMINFO: {setMINFO, false}, + TypeMR: {setMR, false}, + TypeMX: {setMX, false}, + TypeNAPTR: {setNAPTR, false}, + TypeNID: {setNID, false}, + TypeNIMLOC: {setNIMLOC, true}, + TypeNINFO: {setNINFO, true}, + TypeNSAPPTR: {setNSAPPTR, false}, + TypeNSEC3PARAM: {setNSEC3PARAM, false}, + TypeNSEC3: {setNSEC3, true}, + TypeNSEC: {setNSEC, true}, + TypeNS: {setNS, false}, + TypeOPENPGPKEY: {setOPENPGPKEY, true}, + TypePTR: {setPTR, false}, + TypePX: {setPX, false}, + TypeSIG: {setSIG, true}, + TypeRKEY: {setRKEY, true}, + TypeRP: {setRP, false}, + TypeRRSIG: {setRRSIG, true}, + TypeRT: {setRT, false}, + TypeSMIMEA: {setSMIMEA, true}, + TypeSOA: {setSOA, false}, + TypeSPF: {setSPF, true}, + TypeAVC: {setAVC, true}, + TypeSRV: {setSRV, false}, + TypeSSHFP: {setSSHFP, true}, + TypeTALINK: {setTALINK, false}, + TypeTA: {setTA, true}, + TypeTLSA: {setTLSA, true}, + TypeTXT: {setTXT, true}, + TypeUID: {setUID, false}, + TypeUINFO: {setUINFO, true}, + TypeURI: {setURI, true}, + TypeX25: {setX25, false}, + TypeTKEY: {setTKEY, true}, +} diff --git a/vendor/github.com/miekg/dns/scanner.go b/vendor/github.com/miekg/dns/scanner.go new file mode 100644 index 0000000000..424e5af9f5 --- /dev/null +++ b/vendor/github.com/miekg/dns/scanner.go @@ -0,0 +1,56 @@ +package dns + +// Implement a simple scanner, return a byte stream from an io reader. + +import ( + "bufio" + "context" + "io" + "text/scanner" +) + +type scan struct { + src *bufio.Reader + position scanner.Position + eof bool // Have we just seen a eof + ctx context.Context +} + +func scanInit(r io.Reader) (*scan, context.CancelFunc) { + s := new(scan) + s.src = bufio.NewReader(r) + s.position.Line = 1 + + ctx, cancel := context.WithCancel(context.Background()) + s.ctx = ctx + + return s, cancel +} + +// tokenText returns the next byte from the input +func (s *scan) tokenText() (byte, error) { + c, err := s.src.ReadByte() + if err != nil { + return c, err + } + select { + case <-s.ctx.Done(): + return c, context.Canceled + default: + break + } + + // delay the newline handling until the next token is delivered, + // fixes off-by-one errors when reporting a parse error. + if s.eof == true { + s.position.Line++ + s.position.Column = 0 + s.eof = false + } + if c == '\n' { + s.eof = true + return c, nil + } + s.position.Column++ + return c, nil +} diff --git a/vendor/github.com/miekg/dns/server.go b/vendor/github.com/miekg/dns/server.go new file mode 100644 index 0000000000..685753f43c --- /dev/null +++ b/vendor/github.com/miekg/dns/server.go @@ -0,0 +1,719 @@ +// DNS server implementation. + +package dns + +import ( + "bytes" + "crypto/tls" + "encoding/binary" + "io" + "net" + "sync" + "time" +) + +// Maximum number of TCP queries before we close the socket. +const maxTCPQueries = 128 + +// Handler is implemented by any value that implements ServeDNS. +type Handler interface { + ServeDNS(w ResponseWriter, r *Msg) +} + +// A ResponseWriter interface is used by an DNS handler to +// construct an DNS response. +type ResponseWriter interface { + // LocalAddr returns the net.Addr of the server + LocalAddr() net.Addr + // RemoteAddr returns the net.Addr of the client that sent the current request. + RemoteAddr() net.Addr + // WriteMsg writes a reply back to the client. + WriteMsg(*Msg) error + // Write writes a raw buffer back to the client. + Write([]byte) (int, error) + // Close closes the connection. + Close() error + // TsigStatus returns the status of the Tsig. + TsigStatus() error + // TsigTimersOnly sets the tsig timers only boolean. + TsigTimersOnly(bool) + // Hijack lets the caller take over the connection. + // After a call to Hijack(), the DNS package will not do anything with the connection. + Hijack() +} + +type response struct { + hijacked bool // connection has been hijacked by handler + tsigStatus error + tsigTimersOnly bool + tsigRequestMAC string + tsigSecret map[string]string // the tsig secrets + udp *net.UDPConn // i/o connection if UDP was used + tcp net.Conn // i/o connection if TCP was used + udpSession *SessionUDP // oob data to get egress interface right + remoteAddr net.Addr // address of the client + writer Writer // writer to output the raw DNS bits +} + +// ServeMux is an DNS request multiplexer. It matches the +// zone name of each incoming request against a list of +// registered patterns add calls the handler for the pattern +// that most closely matches the zone name. ServeMux is DNSSEC aware, meaning +// that queries for the DS record are redirected to the parent zone (if that +// is also registered), otherwise the child gets the query. +// ServeMux is also safe for concurrent access from multiple goroutines. +type ServeMux struct { + z map[string]Handler + m *sync.RWMutex +} + +// NewServeMux allocates and returns a new ServeMux. +func NewServeMux() *ServeMux { return &ServeMux{z: make(map[string]Handler), m: new(sync.RWMutex)} } + +// DefaultServeMux is the default ServeMux used by Serve. +var DefaultServeMux = NewServeMux() + +// The HandlerFunc type is an adapter to allow the use of +// ordinary functions as DNS handlers. If f is a function +// with the appropriate signature, HandlerFunc(f) is a +// Handler object that calls f. +type HandlerFunc func(ResponseWriter, *Msg) + +// ServeDNS calls f(w, r). +func (f HandlerFunc) ServeDNS(w ResponseWriter, r *Msg) { + f(w, r) +} + +// HandleFailed returns a HandlerFunc that returns SERVFAIL for every request it gets. +func HandleFailed(w ResponseWriter, r *Msg) { + m := new(Msg) + m.SetRcode(r, RcodeServerFailure) + // does not matter if this write fails + w.WriteMsg(m) +} + +func failedHandler() Handler { return HandlerFunc(HandleFailed) } + +// ListenAndServe Starts a server on address and network specified Invoke handler +// for incoming queries. +func ListenAndServe(addr string, network string, handler Handler) error { + server := &Server{Addr: addr, Net: network, Handler: handler} + return server.ListenAndServe() +} + +// ListenAndServeTLS acts like http.ListenAndServeTLS, more information in +// http://golang.org/pkg/net/http/#ListenAndServeTLS +func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return err + } + + config := tls.Config{ + Certificates: []tls.Certificate{cert}, + } + + server := &Server{ + Addr: addr, + Net: "tcp-tls", + TLSConfig: &config, + Handler: handler, + } + + return server.ListenAndServe() +} + +// ActivateAndServe activates a server with a listener from systemd, +// l and p should not both be non-nil. +// If both l and p are not nil only p will be used. +// Invoke handler for incoming queries. +func ActivateAndServe(l net.Listener, p net.PacketConn, handler Handler) error { + server := &Server{Listener: l, PacketConn: p, Handler: handler} + return server.ActivateAndServe() +} + +func (mux *ServeMux) match(q string, t uint16) Handler { + mux.m.RLock() + defer mux.m.RUnlock() + var handler Handler + b := make([]byte, len(q)) // worst case, one label of length q + off := 0 + end := false + for { + l := len(q[off:]) + for i := 0; i < l; i++ { + b[i] = q[off+i] + if b[i] >= 'A' && b[i] <= 'Z' { + b[i] |= ('a' - 'A') + } + } + if h, ok := mux.z[string(b[:l])]; ok { // causes garbage, might want to change the map key + if t != TypeDS { + return h + } + // Continue for DS to see if we have a parent too, if so delegeate to the parent + handler = h + } + off, end = NextLabel(q, off) + if end { + break + } + } + // Wildcard match, if we have found nothing try the root zone as a last resort. + if h, ok := mux.z["."]; ok { + return h + } + return handler +} + +// Handle adds a handler to the ServeMux for pattern. +func (mux *ServeMux) Handle(pattern string, handler Handler) { + if pattern == "" { + panic("dns: invalid pattern " + pattern) + } + mux.m.Lock() + mux.z[Fqdn(pattern)] = handler + mux.m.Unlock() +} + +// HandleFunc adds a handler function to the ServeMux for pattern. +func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) { + mux.Handle(pattern, HandlerFunc(handler)) +} + +// HandleRemove deregistrars the handler specific for pattern from the ServeMux. +func (mux *ServeMux) HandleRemove(pattern string) { + if pattern == "" { + panic("dns: invalid pattern " + pattern) + } + mux.m.Lock() + delete(mux.z, Fqdn(pattern)) + mux.m.Unlock() +} + +// ServeDNS dispatches the request to the handler whose +// pattern most closely matches the request message. If DefaultServeMux +// is used the correct thing for DS queries is done: a possible parent +// is sought. +// If no handler is found a standard SERVFAIL message is returned +// If the request message does not have exactly one question in the +// question section a SERVFAIL is returned, unlesss Unsafe is true. +func (mux *ServeMux) ServeDNS(w ResponseWriter, request *Msg) { + var h Handler + if len(request.Question) < 1 { // allow more than one question + h = failedHandler() + } else { + if h = mux.match(request.Question[0].Name, request.Question[0].Qtype); h == nil { + h = failedHandler() + } + } + h.ServeDNS(w, request) +} + +// Handle registers the handler with the given pattern +// in the DefaultServeMux. The documentation for +// ServeMux explains how patterns are matched. +func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) } + +// HandleRemove deregisters the handle with the given pattern +// in the DefaultServeMux. +func HandleRemove(pattern string) { DefaultServeMux.HandleRemove(pattern) } + +// HandleFunc registers the handler function with the given pattern +// in the DefaultServeMux. +func HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) { + DefaultServeMux.HandleFunc(pattern, handler) +} + +// Writer writes raw DNS messages; each call to Write should send an entire message. +type Writer interface { + io.Writer +} + +// Reader reads raw DNS messages; each call to ReadTCP or ReadUDP should return an entire message. +type Reader interface { + // ReadTCP reads a raw message from a TCP connection. Implementations may alter + // connection properties, for example the read-deadline. + ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) + // ReadUDP reads a raw message from a UDP connection. Implementations may alter + // connection properties, for example the read-deadline. + ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) +} + +// defaultReader is an adapter for the Server struct that implements the Reader interface +// using the readTCP and readUDP func of the embedded Server. +type defaultReader struct { + *Server +} + +func (dr *defaultReader) ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) { + return dr.readTCP(conn, timeout) +} + +func (dr *defaultReader) ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) { + return dr.readUDP(conn, timeout) +} + +// DecorateReader is a decorator hook for extending or supplanting the functionality of a Reader. +// Implementations should never return a nil Reader. +type DecorateReader func(Reader) Reader + +// DecorateWriter is a decorator hook for extending or supplanting the functionality of a Writer. +// Implementations should never return a nil Writer. +type DecorateWriter func(Writer) Writer + +// A Server defines parameters for running an DNS server. +type Server struct { + // Address to listen on, ":dns" if empty. + Addr string + // if "tcp" or "tcp-tls" (DNS over TLS) it will invoke a TCP listener, otherwise an UDP one + Net string + // TCP Listener to use, this is to aid in systemd's socket activation. + Listener net.Listener + // TLS connection configuration + TLSConfig *tls.Config + // UDP "Listener" to use, this is to aid in systemd's socket activation. + PacketConn net.PacketConn + // Handler to invoke, dns.DefaultServeMux if nil. + Handler Handler + // Default buffer size to use to read incoming UDP messages. If not set + // it defaults to MinMsgSize (512 B). + UDPSize int + // The net.Conn.SetReadTimeout value for new connections, defaults to 2 * time.Second. + ReadTimeout time.Duration + // The net.Conn.SetWriteTimeout value for new connections, defaults to 2 * time.Second. + WriteTimeout time.Duration + // TCP idle timeout for multiple queries, if nil, defaults to 8 * time.Second (RFC 5966). + IdleTimeout func() time.Duration + // Secret(s) for Tsig map[]. The zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2). + TsigSecret map[string]string + // Unsafe instructs the server to disregard any sanity checks and directly hand the message to + // the handler. It will specifically not check if the query has the QR bit not set. + Unsafe bool + // If NotifyStartedFunc is set it is called once the server has started listening. + NotifyStartedFunc func() + // DecorateReader is optional, allows customization of the process that reads raw DNS messages. + DecorateReader DecorateReader + // DecorateWriter is optional, allows customization of the process that writes raw DNS messages. + DecorateWriter DecorateWriter + + // Shutdown handling + lock sync.RWMutex + started bool +} + +// ListenAndServe starts a nameserver on the configured address in *Server. +func (srv *Server) ListenAndServe() error { + srv.lock.Lock() + defer srv.lock.Unlock() + if srv.started { + return &Error{err: "server already started"} + } + addr := srv.Addr + if addr == "" { + addr = ":domain" + } + if srv.UDPSize == 0 { + srv.UDPSize = MinMsgSize + } + switch srv.Net { + case "tcp", "tcp4", "tcp6": + a, err := net.ResolveTCPAddr(srv.Net, addr) + if err != nil { + return err + } + l, err := net.ListenTCP(srv.Net, a) + if err != nil { + return err + } + srv.Listener = l + srv.started = true + srv.lock.Unlock() + err = srv.serveTCP(l) + srv.lock.Lock() // to satisfy the defer at the top + return err + case "tcp-tls", "tcp4-tls", "tcp6-tls": + network := "tcp" + if srv.Net == "tcp4-tls" { + network = "tcp4" + } else if srv.Net == "tcp6-tls" { + network = "tcp6" + } + + l, err := tls.Listen(network, addr, srv.TLSConfig) + if err != nil { + return err + } + srv.Listener = l + srv.started = true + srv.lock.Unlock() + err = srv.serveTCP(l) + srv.lock.Lock() // to satisfy the defer at the top + return err + case "udp", "udp4", "udp6": + a, err := net.ResolveUDPAddr(srv.Net, addr) + if err != nil { + return err + } + l, err := net.ListenUDP(srv.Net, a) + if err != nil { + return err + } + if e := setUDPSocketOptions(l); e != nil { + return e + } + srv.PacketConn = l + srv.started = true + srv.lock.Unlock() + err = srv.serveUDP(l) + srv.lock.Lock() // to satisfy the defer at the top + return err + } + return &Error{err: "bad network"} +} + +// ActivateAndServe starts a nameserver with the PacketConn or Listener +// configured in *Server. Its main use is to start a server from systemd. +func (srv *Server) ActivateAndServe() error { + srv.lock.Lock() + defer srv.lock.Unlock() + if srv.started { + return &Error{err: "server already started"} + } + pConn := srv.PacketConn + l := srv.Listener + if pConn != nil { + if srv.UDPSize == 0 { + srv.UDPSize = MinMsgSize + } + // Check PacketConn interface's type is valid and value + // is not nil + if t, ok := pConn.(*net.UDPConn); ok && t != nil { + if e := setUDPSocketOptions(t); e != nil { + return e + } + srv.started = true + srv.lock.Unlock() + e := srv.serveUDP(t) + srv.lock.Lock() // to satisfy the defer at the top + return e + } + } + if l != nil { + srv.started = true + srv.lock.Unlock() + e := srv.serveTCP(l) + srv.lock.Lock() // to satisfy the defer at the top + return e + } + return &Error{err: "bad listeners"} +} + +// Shutdown shuts down a server. After a call to Shutdown, ListenAndServe and +// ActivateAndServe will return. +func (srv *Server) Shutdown() error { + srv.lock.Lock() + if !srv.started { + srv.lock.Unlock() + return &Error{err: "server not started"} + } + srv.started = false + srv.lock.Unlock() + + if srv.PacketConn != nil { + srv.PacketConn.Close() + } + if srv.Listener != nil { + srv.Listener.Close() + } + return nil +} + +// getReadTimeout is a helper func to use system timeout if server did not intend to change it. +func (srv *Server) getReadTimeout() time.Duration { + rtimeout := dnsTimeout + if srv.ReadTimeout != 0 { + rtimeout = srv.ReadTimeout + } + return rtimeout +} + +// serveTCP starts a TCP listener for the server. +// Each request is handled in a separate goroutine. +func (srv *Server) serveTCP(l net.Listener) error { + defer l.Close() + + if srv.NotifyStartedFunc != nil { + srv.NotifyStartedFunc() + } + + reader := Reader(&defaultReader{srv}) + if srv.DecorateReader != nil { + reader = srv.DecorateReader(reader) + } + + handler := srv.Handler + if handler == nil { + handler = DefaultServeMux + } + rtimeout := srv.getReadTimeout() + // deadline is not used here + for { + rw, err := l.Accept() + srv.lock.RLock() + if !srv.started { + srv.lock.RUnlock() + return nil + } + srv.lock.RUnlock() + if err != nil { + if neterr, ok := err.(net.Error); ok && neterr.Temporary() { + continue + } + return err + } + go func() { + m, err := reader.ReadTCP(rw, rtimeout) + if err != nil { + rw.Close() + return + } + srv.serve(rw.RemoteAddr(), handler, m, nil, nil, rw) + }() + } +} + +// serveUDP starts a UDP listener for the server. +// Each request is handled in a separate goroutine. +func (srv *Server) serveUDP(l *net.UDPConn) error { + defer l.Close() + + if srv.NotifyStartedFunc != nil { + srv.NotifyStartedFunc() + } + + reader := Reader(&defaultReader{srv}) + if srv.DecorateReader != nil { + reader = srv.DecorateReader(reader) + } + + handler := srv.Handler + if handler == nil { + handler = DefaultServeMux + } + rtimeout := srv.getReadTimeout() + // deadline is not used here + for { + m, s, err := reader.ReadUDP(l, rtimeout) + srv.lock.RLock() + if !srv.started { + srv.lock.RUnlock() + return nil + } + srv.lock.RUnlock() + if err != nil { + if netErr, ok := err.(net.Error); ok && netErr.Temporary() { + continue + } + return err + } + if len(m) < headerSize { + continue + } + go srv.serve(s.RemoteAddr(), handler, m, l, s, nil) + } +} + +// Serve a new connection. +func (srv *Server) serve(a net.Addr, h Handler, m []byte, u *net.UDPConn, s *SessionUDP, t net.Conn) { + w := &response{tsigSecret: srv.TsigSecret, udp: u, tcp: t, remoteAddr: a, udpSession: s} + if srv.DecorateWriter != nil { + w.writer = srv.DecorateWriter(w) + } else { + w.writer = w + } + + q := 0 // counter for the amount of TCP queries we get + + reader := Reader(&defaultReader{srv}) + if srv.DecorateReader != nil { + reader = srv.DecorateReader(reader) + } +Redo: + req := new(Msg) + err := req.Unpack(m) + if err != nil { // Send a FormatError back + x := new(Msg) + x.SetRcodeFormatError(req) + w.WriteMsg(x) + goto Exit + } + if !srv.Unsafe && req.Response { + goto Exit + } + + w.tsigStatus = nil + if w.tsigSecret != nil { + if t := req.IsTsig(); t != nil { + secret := t.Hdr.Name + if _, ok := w.tsigSecret[secret]; !ok { + w.tsigStatus = ErrKeyAlg + } + w.tsigStatus = TsigVerify(m, w.tsigSecret[secret], "", false) + w.tsigTimersOnly = false + w.tsigRequestMAC = req.Extra[len(req.Extra)-1].(*TSIG).MAC + } + } + h.ServeDNS(w, req) // Writes back to the client + +Exit: + if w.tcp == nil { + return + } + // TODO(miek): make this number configurable? + if q > maxTCPQueries { // close socket after this many queries + w.Close() + return + } + + if w.hijacked { + return // client calls Close() + } + if u != nil { // UDP, "close" and return + w.Close() + return + } + idleTimeout := tcpIdleTimeout + if srv.IdleTimeout != nil { + idleTimeout = srv.IdleTimeout() + } + m, err = reader.ReadTCP(w.tcp, idleTimeout) + if err == nil { + q++ + goto Redo + } + w.Close() + return +} + +func (srv *Server) readTCP(conn net.Conn, timeout time.Duration) ([]byte, error) { + conn.SetReadDeadline(time.Now().Add(timeout)) + l := make([]byte, 2) + n, err := conn.Read(l) + if err != nil || n != 2 { + if err != nil { + return nil, err + } + return nil, ErrShortRead + } + length := binary.BigEndian.Uint16(l) + if length == 0 { + return nil, ErrShortRead + } + m := make([]byte, int(length)) + n, err = conn.Read(m[:int(length)]) + if err != nil || n == 0 { + if err != nil { + return nil, err + } + return nil, ErrShortRead + } + i := n + for i < int(length) { + j, err := conn.Read(m[i:int(length)]) + if err != nil { + return nil, err + } + i += j + } + n = i + m = m[:n] + return m, nil +} + +func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) { + conn.SetReadDeadline(time.Now().Add(timeout)) + m := make([]byte, srv.UDPSize) + n, s, err := ReadFromSessionUDP(conn, m) + if err != nil { + return nil, nil, err + } + m = m[:n] + return m, s, nil +} + +// WriteMsg implements the ResponseWriter.WriteMsg method. +func (w *response) WriteMsg(m *Msg) (err error) { + var data []byte + if w.tsigSecret != nil { // if no secrets, dont check for the tsig (which is a longer check) + if t := m.IsTsig(); t != nil { + data, w.tsigRequestMAC, err = TsigGenerate(m, w.tsigSecret[t.Hdr.Name], w.tsigRequestMAC, w.tsigTimersOnly) + if err != nil { + return err + } + _, err = w.writer.Write(data) + return err + } + } + data, err = m.Pack() + if err != nil { + return err + } + _, err = w.writer.Write(data) + return err +} + +// Write implements the ResponseWriter.Write method. +func (w *response) Write(m []byte) (int, error) { + switch { + case w.udp != nil: + n, err := WriteToSessionUDP(w.udp, m, w.udpSession) + return n, err + case w.tcp != nil: + lm := len(m) + if lm < 2 { + return 0, io.ErrShortBuffer + } + if lm > MaxMsgSize { + return 0, &Error{err: "message too large"} + } + l := make([]byte, 2, 2+lm) + binary.BigEndian.PutUint16(l, uint16(lm)) + m = append(l, m...) + + n, err := io.Copy(w.tcp, bytes.NewReader(m)) + return int(n), err + } + panic("not reached") +} + +// LocalAddr implements the ResponseWriter.LocalAddr method. +func (w *response) LocalAddr() net.Addr { + if w.tcp != nil { + return w.tcp.LocalAddr() + } + return w.udp.LocalAddr() +} + +// RemoteAddr implements the ResponseWriter.RemoteAddr method. +func (w *response) RemoteAddr() net.Addr { return w.remoteAddr } + +// TsigStatus implements the ResponseWriter.TsigStatus method. +func (w *response) TsigStatus() error { return w.tsigStatus } + +// TsigTimersOnly implements the ResponseWriter.TsigTimersOnly method. +func (w *response) TsigTimersOnly(b bool) { w.tsigTimersOnly = b } + +// Hijack implements the ResponseWriter.Hijack method. +func (w *response) Hijack() { w.hijacked = true } + +// Close implements the ResponseWriter.Close method +func (w *response) Close() error { + // Can't close the udp conn, as that is actually the listener. + if w.tcp != nil { + e := w.tcp.Close() + w.tcp = nil + return e + } + return nil +} diff --git a/vendor/github.com/miekg/dns/sig0.go b/vendor/github.com/miekg/dns/sig0.go new file mode 100644 index 0000000000..f31e9e6843 --- /dev/null +++ b/vendor/github.com/miekg/dns/sig0.go @@ -0,0 +1,218 @@ +package dns + +import ( + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/rsa" + "encoding/binary" + "math/big" + "strings" + "time" +) + +// Sign signs a dns.Msg. It fills the signature with the appropriate data. +// The SIG record should have the SignerName, KeyTag, Algorithm, Inception +// and Expiration set. +func (rr *SIG) Sign(k crypto.Signer, m *Msg) ([]byte, error) { + if k == nil { + return nil, ErrPrivKey + } + if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { + return nil, ErrKey + } + rr.Header().Rrtype = TypeSIG + rr.Header().Class = ClassANY + rr.Header().Ttl = 0 + rr.Header().Name = "." + rr.OrigTtl = 0 + rr.TypeCovered = 0 + rr.Labels = 0 + + buf := make([]byte, m.Len()+rr.len()) + mbuf, err := m.PackBuffer(buf) + if err != nil { + return nil, err + } + if &buf[0] != &mbuf[0] { + return nil, ErrBuf + } + off, err := PackRR(rr, buf, len(mbuf), nil, false) + if err != nil { + return nil, err + } + buf = buf[:off:cap(buf)] + + hash, ok := AlgorithmToHash[rr.Algorithm] + if !ok { + return nil, ErrAlg + } + + hasher := hash.New() + // Write SIG rdata + hasher.Write(buf[len(mbuf)+1+2+2+4+2:]) + // Write message + hasher.Write(buf[:len(mbuf)]) + + signature, err := sign(k, hasher.Sum(nil), hash, rr.Algorithm) + if err != nil { + return nil, err + } + + rr.Signature = toBase64(signature) + + buf = append(buf, signature...) + if len(buf) > int(^uint16(0)) { + return nil, ErrBuf + } + // Adjust sig data length + rdoff := len(mbuf) + 1 + 2 + 2 + 4 + rdlen := binary.BigEndian.Uint16(buf[rdoff:]) + rdlen += uint16(len(signature)) + binary.BigEndian.PutUint16(buf[rdoff:], rdlen) + // Adjust additional count + adc := binary.BigEndian.Uint16(buf[10:]) + adc++ + binary.BigEndian.PutUint16(buf[10:], adc) + return buf, nil +} + +// Verify validates the message buf using the key k. +// It's assumed that buf is a valid message from which rr was unpacked. +func (rr *SIG) Verify(k *KEY, buf []byte) error { + if k == nil { + return ErrKey + } + if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { + return ErrKey + } + + var hash crypto.Hash + switch rr.Algorithm { + case DSA, RSASHA1: + hash = crypto.SHA1 + case RSASHA256, ECDSAP256SHA256: + hash = crypto.SHA256 + case ECDSAP384SHA384: + hash = crypto.SHA384 + case RSASHA512: + hash = crypto.SHA512 + default: + return ErrAlg + } + hasher := hash.New() + + buflen := len(buf) + qdc := binary.BigEndian.Uint16(buf[4:]) + anc := binary.BigEndian.Uint16(buf[6:]) + auc := binary.BigEndian.Uint16(buf[8:]) + adc := binary.BigEndian.Uint16(buf[10:]) + offset := 12 + var err error + for i := uint16(0); i < qdc && offset < buflen; i++ { + _, offset, err = UnpackDomainName(buf, offset) + if err != nil { + return err + } + // Skip past Type and Class + offset += 2 + 2 + } + for i := uint16(1); i < anc+auc+adc && offset < buflen; i++ { + _, offset, err = UnpackDomainName(buf, offset) + if err != nil { + return err + } + // Skip past Type, Class and TTL + offset += 2 + 2 + 4 + if offset+1 >= buflen { + continue + } + var rdlen uint16 + rdlen = binary.BigEndian.Uint16(buf[offset:]) + offset += 2 + offset += int(rdlen) + } + if offset >= buflen { + return &Error{err: "overflowing unpacking signed message"} + } + + // offset should be just prior to SIG + bodyend := offset + // owner name SHOULD be root + _, offset, err = UnpackDomainName(buf, offset) + if err != nil { + return err + } + // Skip Type, Class, TTL, RDLen + offset += 2 + 2 + 4 + 2 + sigstart := offset + // Skip Type Covered, Algorithm, Labels, Original TTL + offset += 2 + 1 + 1 + 4 + if offset+4+4 >= buflen { + return &Error{err: "overflow unpacking signed message"} + } + expire := binary.BigEndian.Uint32(buf[offset:]) + offset += 4 + incept := binary.BigEndian.Uint32(buf[offset:]) + offset += 4 + now := uint32(time.Now().Unix()) + if now < incept || now > expire { + return ErrTime + } + // Skip key tag + offset += 2 + var signername string + signername, offset, err = UnpackDomainName(buf, offset) + if err != nil { + return err + } + // If key has come from the DNS name compression might + // have mangled the case of the name + if strings.ToLower(signername) != strings.ToLower(k.Header().Name) { + return &Error{err: "signer name doesn't match key name"} + } + sigend := offset + hasher.Write(buf[sigstart:sigend]) + hasher.Write(buf[:10]) + hasher.Write([]byte{ + byte((adc - 1) << 8), + byte(adc - 1), + }) + hasher.Write(buf[12:bodyend]) + + hashed := hasher.Sum(nil) + sig := buf[sigend:] + switch k.Algorithm { + case DSA: + pk := k.publicKeyDSA() + sig = sig[1:] + r := big.NewInt(0) + r.SetBytes(sig[:len(sig)/2]) + s := big.NewInt(0) + s.SetBytes(sig[len(sig)/2:]) + if pk != nil { + if dsa.Verify(pk, hashed, r, s) { + return nil + } + return ErrSig + } + case RSASHA1, RSASHA256, RSASHA512: + pk := k.publicKeyRSA() + if pk != nil { + return rsa.VerifyPKCS1v15(pk, hash, hashed, sig) + } + case ECDSAP256SHA256, ECDSAP384SHA384: + pk := k.publicKeyECDSA() + r := big.NewInt(0) + r.SetBytes(sig[:len(sig)/2]) + s := big.NewInt(0) + s.SetBytes(sig[len(sig)/2:]) + if pk != nil { + if ecdsa.Verify(pk, hashed, r, s) { + return nil + } + return ErrSig + } + } + return ErrKeyAlg +} diff --git a/vendor/github.com/miekg/dns/singleinflight.go b/vendor/github.com/miekg/dns/singleinflight.go new file mode 100644 index 0000000000..9573c7d0b8 --- /dev/null +++ b/vendor/github.com/miekg/dns/singleinflight.go @@ -0,0 +1,57 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Adapted for dns package usage by Miek Gieben. + +package dns + +import "sync" +import "time" + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + val *Msg + rtt time.Duration + err error + dups int +} + +// singleflight represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type singleflight struct { + sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *singleflight) Do(key string, fn func() (*Msg, time.Duration, error)) (v *Msg, rtt time.Duration, err error, shared bool) { + g.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.Unlock() + c.wg.Wait() + return c.val, c.rtt, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.Unlock() + + c.val, c.rtt, c.err = fn() + c.wg.Done() + + g.Lock() + delete(g.m, key) + g.Unlock() + + return c.val, c.rtt, c.err, c.dups > 0 +} diff --git a/vendor/github.com/miekg/dns/smimea.go b/vendor/github.com/miekg/dns/smimea.go new file mode 100644 index 0000000000..4e7ded4b38 --- /dev/null +++ b/vendor/github.com/miekg/dns/smimea.go @@ -0,0 +1,47 @@ +package dns + +import ( + "crypto/sha256" + "crypto/x509" + "encoding/hex" +) + +// Sign creates a SMIMEA record from an SSL certificate. +func (r *SMIMEA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) { + r.Hdr.Rrtype = TypeSMIMEA + r.Usage = uint8(usage) + r.Selector = uint8(selector) + r.MatchingType = uint8(matchingType) + + r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert) + if err != nil { + return err + } + return nil +} + +// Verify verifies a SMIMEA record against an SSL certificate. If it is OK +// a nil error is returned. +func (r *SMIMEA) Verify(cert *x509.Certificate) error { + c, err := CertificateToDANE(r.Selector, r.MatchingType, cert) + if err != nil { + return err // Not also ErrSig? + } + if r.Certificate == c { + return nil + } + return ErrSig // ErrSig, really? +} + +// SMIMEAName returns the ownername of a SMIMEA resource record as per the +// format specified in RFC 'draft-ietf-dane-smime-12' Section 2 and 3 +func SMIMEAName(email, domain string) (string, error) { + hasher := sha256.New() + hasher.Write([]byte(email)) + + // RFC Section 3: "The local-part is hashed using the SHA2-256 + // algorithm with the hash truncated to 28 octets and + // represented in its hexadecimal representation to become the + // left-most label in the prepared domain name" + return hex.EncodeToString(hasher.Sum(nil)[:28]) + "." + "_smimecert." + domain, nil +} diff --git a/vendor/github.com/miekg/dns/tlsa.go b/vendor/github.com/miekg/dns/tlsa.go new file mode 100644 index 0000000000..431e2fb5af --- /dev/null +++ b/vendor/github.com/miekg/dns/tlsa.go @@ -0,0 +1,47 @@ +package dns + +import ( + "crypto/x509" + "net" + "strconv" +) + +// Sign creates a TLSA record from an SSL certificate. +func (r *TLSA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) { + r.Hdr.Rrtype = TypeTLSA + r.Usage = uint8(usage) + r.Selector = uint8(selector) + r.MatchingType = uint8(matchingType) + + r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert) + if err != nil { + return err + } + return nil +} + +// Verify verifies a TLSA record against an SSL certificate. If it is OK +// a nil error is returned. +func (r *TLSA) Verify(cert *x509.Certificate) error { + c, err := CertificateToDANE(r.Selector, r.MatchingType, cert) + if err != nil { + return err // Not also ErrSig? + } + if r.Certificate == c { + return nil + } + return ErrSig // ErrSig, really? +} + +// TLSAName returns the ownername of a TLSA resource record as per the +// rules specified in RFC 6698, Section 3. +func TLSAName(name, service, network string) (string, error) { + if !IsFqdn(name) { + return "", ErrFqdn + } + p, err := net.LookupPort(network, service) + if err != nil { + return "", err + } + return "_" + strconv.Itoa(p) + "._" + network + "." + name, nil +} diff --git a/vendor/github.com/miekg/dns/tsig.go b/vendor/github.com/miekg/dns/tsig.go new file mode 100644 index 0000000000..4837b4ab1f --- /dev/null +++ b/vendor/github.com/miekg/dns/tsig.go @@ -0,0 +1,386 @@ +package dns + +import ( + "crypto/hmac" + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/binary" + "encoding/hex" + "hash" + "strconv" + "strings" + "time" +) + +// HMAC hashing codes. These are transmitted as domain names. +const ( + HmacMD5 = "hmac-md5.sig-alg.reg.int." + HmacSHA1 = "hmac-sha1." + HmacSHA256 = "hmac-sha256." + HmacSHA512 = "hmac-sha512." +) + +// TSIG is the RR the holds the transaction signature of a message. +// See RFC 2845 and RFC 4635. +type TSIG struct { + Hdr RR_Header + Algorithm string `dns:"domain-name"` + TimeSigned uint64 `dns:"uint48"` + Fudge uint16 + MACSize uint16 + MAC string `dns:"size-hex:MACSize"` + OrigId uint16 + Error uint16 + OtherLen uint16 + OtherData string `dns:"size-hex:OtherLen"` +} + +// TSIG has no official presentation format, but this will suffice. + +func (rr *TSIG) String() string { + s := "\n;; TSIG PSEUDOSECTION:\n" + s += rr.Hdr.String() + + " " + rr.Algorithm + + " " + tsigTimeToString(rr.TimeSigned) + + " " + strconv.Itoa(int(rr.Fudge)) + + " " + strconv.Itoa(int(rr.MACSize)) + + " " + strings.ToUpper(rr.MAC) + + " " + strconv.Itoa(int(rr.OrigId)) + + " " + strconv.Itoa(int(rr.Error)) + // BIND prints NOERROR + " " + strconv.Itoa(int(rr.OtherLen)) + + " " + rr.OtherData + return s +} + +// The following values must be put in wireformat, so that the MAC can be calculated. +// RFC 2845, section 3.4.2. TSIG Variables. +type tsigWireFmt struct { + // From RR_Header + Name string `dns:"domain-name"` + Class uint16 + Ttl uint32 + // Rdata of the TSIG + Algorithm string `dns:"domain-name"` + TimeSigned uint64 `dns:"uint48"` + Fudge uint16 + // MACSize, MAC and OrigId excluded + Error uint16 + OtherLen uint16 + OtherData string `dns:"size-hex:OtherLen"` +} + +// If we have the MAC use this type to convert it to wiredata. Section 3.4.3. Request MAC +type macWireFmt struct { + MACSize uint16 + MAC string `dns:"size-hex:MACSize"` +} + +// 3.3. Time values used in TSIG calculations +type timerWireFmt struct { + TimeSigned uint64 `dns:"uint48"` + Fudge uint16 +} + +// TsigGenerate fills out the TSIG record attached to the message. +// The message should contain +// a "stub" TSIG RR with the algorithm, key name (owner name of the RR), +// time fudge (defaults to 300 seconds) and the current time +// The TSIG MAC is saved in that Tsig RR. +// When TsigGenerate is called for the first time requestMAC is set to the empty string and +// timersOnly is false. +// If something goes wrong an error is returned, otherwise it is nil. +func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, string, error) { + if m.IsTsig() == nil { + panic("dns: TSIG not last RR in additional") + } + // If we barf here, the caller is to blame + rawsecret, err := fromBase64([]byte(secret)) + if err != nil { + return nil, "", err + } + + rr := m.Extra[len(m.Extra)-1].(*TSIG) + m.Extra = m.Extra[0 : len(m.Extra)-1] // kill the TSIG from the msg + mbuf, err := m.Pack() + if err != nil { + return nil, "", err + } + buf := tsigBuffer(mbuf, rr, requestMAC, timersOnly) + + t := new(TSIG) + var h hash.Hash + switch strings.ToLower(rr.Algorithm) { + case HmacMD5: + h = hmac.New(md5.New, []byte(rawsecret)) + case HmacSHA1: + h = hmac.New(sha1.New, []byte(rawsecret)) + case HmacSHA256: + h = hmac.New(sha256.New, []byte(rawsecret)) + case HmacSHA512: + h = hmac.New(sha512.New, []byte(rawsecret)) + default: + return nil, "", ErrKeyAlg + } + h.Write(buf) + t.MAC = hex.EncodeToString(h.Sum(nil)) + t.MACSize = uint16(len(t.MAC) / 2) // Size is half! + + t.Hdr = RR_Header{Name: rr.Hdr.Name, Rrtype: TypeTSIG, Class: ClassANY, Ttl: 0} + t.Fudge = rr.Fudge + t.TimeSigned = rr.TimeSigned + t.Algorithm = rr.Algorithm + t.OrigId = m.Id + + tbuf := make([]byte, t.len()) + if off, err := PackRR(t, tbuf, 0, nil, false); err == nil { + tbuf = tbuf[:off] // reset to actual size used + } else { + return nil, "", err + } + mbuf = append(mbuf, tbuf...) + // Update the ArCount directly in the buffer. + binary.BigEndian.PutUint16(mbuf[10:], uint16(len(m.Extra)+1)) + + return mbuf, t.MAC, nil +} + +// TsigVerify verifies the TSIG on a message. +// If the signature does not validate err contains the +// error, otherwise it is nil. +func TsigVerify(msg []byte, secret, requestMAC string, timersOnly bool) error { + rawsecret, err := fromBase64([]byte(secret)) + if err != nil { + return err + } + // Strip the TSIG from the incoming msg + stripped, tsig, err := stripTsig(msg) + if err != nil { + return err + } + + msgMAC, err := hex.DecodeString(tsig.MAC) + if err != nil { + return err + } + + buf := tsigBuffer(stripped, tsig, requestMAC, timersOnly) + + // Fudge factor works both ways. A message can arrive before it was signed because + // of clock skew. + now := uint64(time.Now().Unix()) + ti := now - tsig.TimeSigned + if now < tsig.TimeSigned { + ti = tsig.TimeSigned - now + } + if uint64(tsig.Fudge) < ti { + return ErrTime + } + + var h hash.Hash + switch strings.ToLower(tsig.Algorithm) { + case HmacMD5: + h = hmac.New(md5.New, rawsecret) + case HmacSHA1: + h = hmac.New(sha1.New, rawsecret) + case HmacSHA256: + h = hmac.New(sha256.New, rawsecret) + case HmacSHA512: + h = hmac.New(sha512.New, rawsecret) + default: + return ErrKeyAlg + } + h.Write(buf) + if !hmac.Equal(h.Sum(nil), msgMAC) { + return ErrSig + } + return nil +} + +// Create a wiredata buffer for the MAC calculation. +func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []byte { + var buf []byte + if rr.TimeSigned == 0 { + rr.TimeSigned = uint64(time.Now().Unix()) + } + if rr.Fudge == 0 { + rr.Fudge = 300 // Standard (RFC) default. + } + + // Replace message ID in header with original ID from TSIG + binary.BigEndian.PutUint16(msgbuf[0:2], rr.OrigId) + + if requestMAC != "" { + m := new(macWireFmt) + m.MACSize = uint16(len(requestMAC) / 2) + m.MAC = requestMAC + buf = make([]byte, len(requestMAC)) // long enough + n, _ := packMacWire(m, buf) + buf = buf[:n] + } + + tsigvar := make([]byte, DefaultMsgSize) + if timersOnly { + tsig := new(timerWireFmt) + tsig.TimeSigned = rr.TimeSigned + tsig.Fudge = rr.Fudge + n, _ := packTimerWire(tsig, tsigvar) + tsigvar = tsigvar[:n] + } else { + tsig := new(tsigWireFmt) + tsig.Name = strings.ToLower(rr.Hdr.Name) + tsig.Class = ClassANY + tsig.Ttl = rr.Hdr.Ttl + tsig.Algorithm = strings.ToLower(rr.Algorithm) + tsig.TimeSigned = rr.TimeSigned + tsig.Fudge = rr.Fudge + tsig.Error = rr.Error + tsig.OtherLen = rr.OtherLen + tsig.OtherData = rr.OtherData + n, _ := packTsigWire(tsig, tsigvar) + tsigvar = tsigvar[:n] + } + + if requestMAC != "" { + x := append(buf, msgbuf...) + buf = append(x, tsigvar...) + } else { + buf = append(msgbuf, tsigvar...) + } + return buf +} + +// Strip the TSIG from the raw message. +func stripTsig(msg []byte) ([]byte, *TSIG, error) { + // Copied from msg.go's Unpack() Header, but modified. + var ( + dh Header + err error + ) + off, tsigoff := 0, 0 + + if dh, off, err = unpackMsgHdr(msg, off); err != nil { + return nil, nil, err + } + if dh.Arcount == 0 { + return nil, nil, ErrNoSig + } + + // Rcode, see msg.go Unpack() + if int(dh.Bits&0xF) == RcodeNotAuth { + return nil, nil, ErrAuth + } + + for i := 0; i < int(dh.Qdcount); i++ { + _, off, err = unpackQuestion(msg, off) + if err != nil { + return nil, nil, err + } + } + + _, off, err = unpackRRslice(int(dh.Ancount), msg, off) + if err != nil { + return nil, nil, err + } + _, off, err = unpackRRslice(int(dh.Nscount), msg, off) + if err != nil { + return nil, nil, err + } + + rr := new(TSIG) + var extra RR + for i := 0; i < int(dh.Arcount); i++ { + tsigoff = off + extra, off, err = UnpackRR(msg, off) + if err != nil { + return nil, nil, err + } + if extra.Header().Rrtype == TypeTSIG { + rr = extra.(*TSIG) + // Adjust Arcount. + arcount := binary.BigEndian.Uint16(msg[10:]) + binary.BigEndian.PutUint16(msg[10:], arcount-1) + break + } + } + if rr == nil { + return nil, nil, ErrNoSig + } + return msg[:tsigoff], rr, nil +} + +// Translate the TSIG time signed into a date. There is no +// need for RFC1982 calculations as this date is 48 bits. +func tsigTimeToString(t uint64) string { + ti := time.Unix(int64(t), 0).UTC() + return ti.Format("20060102150405") +} + +func packTsigWire(tw *tsigWireFmt, msg []byte) (int, error) { + // copied from zmsg.go TSIG packing + // RR_Header + off, err := PackDomainName(tw.Name, msg, 0, nil, false) + if err != nil { + return off, err + } + off, err = packUint16(tw.Class, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(tw.Ttl, msg, off) + if err != nil { + return off, err + } + + off, err = PackDomainName(tw.Algorithm, msg, off, nil, false) + if err != nil { + return off, err + } + off, err = packUint48(tw.TimeSigned, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(tw.Fudge, msg, off) + if err != nil { + return off, err + } + + off, err = packUint16(tw.Error, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(tw.OtherLen, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(tw.OtherData, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func packMacWire(mw *macWireFmt, msg []byte) (int, error) { + off, err := packUint16(mw.MACSize, msg, 0) + if err != nil { + return off, err + } + off, err = packStringHex(mw.MAC, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func packTimerWire(tw *timerWireFmt, msg []byte) (int, error) { + off, err := packUint48(tw.TimeSigned, msg, 0) + if err != nil { + return off, err + } + off, err = packUint16(tw.Fudge, msg, off) + if err != nil { + return off, err + } + return off, nil +} diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go new file mode 100644 index 0000000000..a779ca8abc --- /dev/null +++ b/vendor/github.com/miekg/dns/types.go @@ -0,0 +1,1381 @@ +package dns + +import ( + "fmt" + "net" + "strconv" + "strings" + "time" +) + +type ( + // Type is a DNS type. + Type uint16 + // Class is a DNS class. + Class uint16 + // Name is a DNS domain name. + Name string +) + +// Packet formats + +// Wire constants and supported types. +const ( + // valid RR_Header.Rrtype and Question.qtype + + TypeNone uint16 = 0 + TypeA uint16 = 1 + TypeNS uint16 = 2 + TypeMD uint16 = 3 + TypeMF uint16 = 4 + TypeCNAME uint16 = 5 + TypeSOA uint16 = 6 + TypeMB uint16 = 7 + TypeMG uint16 = 8 + TypeMR uint16 = 9 + TypeNULL uint16 = 10 + TypePTR uint16 = 12 + TypeHINFO uint16 = 13 + TypeMINFO uint16 = 14 + TypeMX uint16 = 15 + TypeTXT uint16 = 16 + TypeRP uint16 = 17 + TypeAFSDB uint16 = 18 + TypeX25 uint16 = 19 + TypeISDN uint16 = 20 + TypeRT uint16 = 21 + TypeNSAPPTR uint16 = 23 + TypeSIG uint16 = 24 + TypeKEY uint16 = 25 + TypePX uint16 = 26 + TypeGPOS uint16 = 27 + TypeAAAA uint16 = 28 + TypeLOC uint16 = 29 + TypeNXT uint16 = 30 + TypeEID uint16 = 31 + TypeNIMLOC uint16 = 32 + TypeSRV uint16 = 33 + TypeATMA uint16 = 34 + TypeNAPTR uint16 = 35 + TypeKX uint16 = 36 + TypeCERT uint16 = 37 + TypeDNAME uint16 = 39 + TypeOPT uint16 = 41 // EDNS + TypeDS uint16 = 43 + TypeSSHFP uint16 = 44 + TypeRRSIG uint16 = 46 + TypeNSEC uint16 = 47 + TypeDNSKEY uint16 = 48 + TypeDHCID uint16 = 49 + TypeNSEC3 uint16 = 50 + TypeNSEC3PARAM uint16 = 51 + TypeTLSA uint16 = 52 + TypeSMIMEA uint16 = 53 + TypeHIP uint16 = 55 + TypeNINFO uint16 = 56 + TypeRKEY uint16 = 57 + TypeTALINK uint16 = 58 + TypeCDS uint16 = 59 + TypeCDNSKEY uint16 = 60 + TypeOPENPGPKEY uint16 = 61 + TypeCSYNC uint16 = 62 + TypeSPF uint16 = 99 + TypeUINFO uint16 = 100 + TypeUID uint16 = 101 + TypeGID uint16 = 102 + TypeUNSPEC uint16 = 103 + TypeNID uint16 = 104 + TypeL32 uint16 = 105 + TypeL64 uint16 = 106 + TypeLP uint16 = 107 + TypeEUI48 uint16 = 108 + TypeEUI64 uint16 = 109 + TypeURI uint16 = 256 + TypeCAA uint16 = 257 + TypeAVC uint16 = 258 + + TypeTKEY uint16 = 249 + TypeTSIG uint16 = 250 + + // valid Question.Qtype only + TypeIXFR uint16 = 251 + TypeAXFR uint16 = 252 + TypeMAILB uint16 = 253 + TypeMAILA uint16 = 254 + TypeANY uint16 = 255 + + TypeTA uint16 = 32768 + TypeDLV uint16 = 32769 + TypeReserved uint16 = 65535 + + // valid Question.Qclass + ClassINET = 1 + ClassCSNET = 2 + ClassCHAOS = 3 + ClassHESIOD = 4 + ClassNONE = 254 + ClassANY = 255 + + // Message Response Codes, see https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml + RcodeSuccess = 0 // NoError - No Error [DNS] + RcodeFormatError = 1 // FormErr - Format Error [DNS] + RcodeServerFailure = 2 // ServFail - Server Failure [DNS] + RcodeNameError = 3 // NXDomain - Non-Existent Domain [DNS] + RcodeNotImplemented = 4 // NotImp - Not Implemented [DNS] + RcodeRefused = 5 // Refused - Query Refused [DNS] + RcodeYXDomain = 6 // YXDomain - Name Exists when it should not [DNS Update] + RcodeYXRrset = 7 // YXRRSet - RR Set Exists when it should not [DNS Update] + RcodeNXRrset = 8 // NXRRSet - RR Set that should exist does not [DNS Update] + RcodeNotAuth = 9 // NotAuth - Server Not Authoritative for zone [DNS Update] + RcodeNotZone = 10 // NotZone - Name not contained in zone [DNS Update/TSIG] + RcodeBadSig = 16 // BADSIG - TSIG Signature Failure [TSIG] + RcodeBadVers = 16 // BADVERS - Bad OPT Version [EDNS0] + RcodeBadKey = 17 // BADKEY - Key not recognized [TSIG] + RcodeBadTime = 18 // BADTIME - Signature out of time window [TSIG] + RcodeBadMode = 19 // BADMODE - Bad TKEY Mode [TKEY] + RcodeBadName = 20 // BADNAME - Duplicate key name [TKEY] + RcodeBadAlg = 21 // BADALG - Algorithm not supported [TKEY] + RcodeBadTrunc = 22 // BADTRUNC - Bad Truncation [TSIG] + RcodeBadCookie = 23 // BADCOOKIE - Bad/missing Server Cookie [DNS Cookies] + + // Message Opcodes. There is no 3. + OpcodeQuery = 0 + OpcodeIQuery = 1 + OpcodeStatus = 2 + OpcodeNotify = 4 + OpcodeUpdate = 5 +) + +// Header is the wire format for the DNS packet header. +type Header struct { + Id uint16 + Bits uint16 + Qdcount, Ancount, Nscount, Arcount uint16 +} + +const ( + headerSize = 12 + + // Header.Bits + _QR = 1 << 15 // query/response (response=1) + _AA = 1 << 10 // authoritative + _TC = 1 << 9 // truncated + _RD = 1 << 8 // recursion desired + _RA = 1 << 7 // recursion available + _Z = 1 << 6 // Z + _AD = 1 << 5 // authticated data + _CD = 1 << 4 // checking disabled +) + +// Various constants used in the LOC RR, See RFC 1887. +const ( + LOC_EQUATOR = 1 << 31 // RFC 1876, Section 2. + LOC_PRIMEMERIDIAN = 1 << 31 // RFC 1876, Section 2. + LOC_HOURS = 60 * 1000 + LOC_DEGREES = 60 * LOC_HOURS + LOC_ALTITUDEBASE = 100000 +) + +// Different Certificate Types, see RFC 4398, Section 2.1 +const ( + CertPKIX = 1 + iota + CertSPKI + CertPGP + CertIPIX + CertISPKI + CertIPGP + CertACPKIX + CertIACPKIX + CertURI = 253 + CertOID = 254 +) + +// CertTypeToString converts the Cert Type to its string representation. +// See RFC 4398 and RFC 6944. +var CertTypeToString = map[uint16]string{ + CertPKIX: "PKIX", + CertSPKI: "SPKI", + CertPGP: "PGP", + CertIPIX: "IPIX", + CertISPKI: "ISPKI", + CertIPGP: "IPGP", + CertACPKIX: "ACPKIX", + CertIACPKIX: "IACPKIX", + CertURI: "URI", + CertOID: "OID", +} + +// StringToCertType is the reverseof CertTypeToString. +var StringToCertType = reverseInt16(CertTypeToString) + +//go:generate go run types_generate.go + +// Question holds a DNS question. There can be multiple questions in the +// question section of a message. Usually there is just one. +type Question struct { + Name string `dns:"cdomain-name"` // "cdomain-name" specifies encoding (and may be compressed) + Qtype uint16 + Qclass uint16 +} + +func (q *Question) len() int { + return len(q.Name) + 1 + 2 + 2 +} + +func (q *Question) String() (s string) { + // prefix with ; (as in dig) + s = ";" + sprintName(q.Name) + "\t" + s += Class(q.Qclass).String() + "\t" + s += " " + Type(q.Qtype).String() + return s +} + +// ANY is a wildcard record. See RFC 1035, Section 3.2.3. ANY +// is named "*" there. +type ANY struct { + Hdr RR_Header + // Does not have any rdata +} + +func (rr *ANY) String() string { return rr.Hdr.String() } + +// CNAME RR. See RFC 1034. +type CNAME struct { + Hdr RR_Header + Target string `dns:"cdomain-name"` +} + +func (rr *CNAME) String() string { return rr.Hdr.String() + sprintName(rr.Target) } + +// HINFO RR. See RFC 1034. +type HINFO struct { + Hdr RR_Header + Cpu string + Os string +} + +func (rr *HINFO) String() string { + return rr.Hdr.String() + sprintTxt([]string{rr.Cpu, rr.Os}) +} + +// MB RR. See RFC 1035. +type MB struct { + Hdr RR_Header + Mb string `dns:"cdomain-name"` +} + +func (rr *MB) String() string { return rr.Hdr.String() + sprintName(rr.Mb) } + +// MG RR. See RFC 1035. +type MG struct { + Hdr RR_Header + Mg string `dns:"cdomain-name"` +} + +func (rr *MG) String() string { return rr.Hdr.String() + sprintName(rr.Mg) } + +// MINFO RR. See RFC 1035. +type MINFO struct { + Hdr RR_Header + Rmail string `dns:"cdomain-name"` + Email string `dns:"cdomain-name"` +} + +func (rr *MINFO) String() string { + return rr.Hdr.String() + sprintName(rr.Rmail) + " " + sprintName(rr.Email) +} + +// MR RR. See RFC 1035. +type MR struct { + Hdr RR_Header + Mr string `dns:"cdomain-name"` +} + +func (rr *MR) String() string { + return rr.Hdr.String() + sprintName(rr.Mr) +} + +// MF RR. See RFC 1035. +type MF struct { + Hdr RR_Header + Mf string `dns:"cdomain-name"` +} + +func (rr *MF) String() string { + return rr.Hdr.String() + sprintName(rr.Mf) +} + +// MD RR. See RFC 1035. +type MD struct { + Hdr RR_Header + Md string `dns:"cdomain-name"` +} + +func (rr *MD) String() string { + return rr.Hdr.String() + sprintName(rr.Md) +} + +// MX RR. See RFC 1035. +type MX struct { + Hdr RR_Header + Preference uint16 + Mx string `dns:"cdomain-name"` +} + +func (rr *MX) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Mx) +} + +// AFSDB RR. See RFC 1183. +type AFSDB struct { + Hdr RR_Header + Subtype uint16 + Hostname string `dns:"cdomain-name"` +} + +func (rr *AFSDB) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Subtype)) + " " + sprintName(rr.Hostname) +} + +// X25 RR. See RFC 1183, Section 3.1. +type X25 struct { + Hdr RR_Header + PSDNAddress string +} + +func (rr *X25) String() string { + return rr.Hdr.String() + rr.PSDNAddress +} + +// RT RR. See RFC 1183, Section 3.3. +type RT struct { + Hdr RR_Header + Preference uint16 + Host string `dns:"cdomain-name"` +} + +func (rr *RT) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Host) +} + +// NS RR. See RFC 1035. +type NS struct { + Hdr RR_Header + Ns string `dns:"cdomain-name"` +} + +func (rr *NS) String() string { + return rr.Hdr.String() + sprintName(rr.Ns) +} + +// PTR RR. See RFC 1035. +type PTR struct { + Hdr RR_Header + Ptr string `dns:"cdomain-name"` +} + +func (rr *PTR) String() string { + return rr.Hdr.String() + sprintName(rr.Ptr) +} + +// RP RR. See RFC 1138, Section 2.2. +type RP struct { + Hdr RR_Header + Mbox string `dns:"domain-name"` + Txt string `dns:"domain-name"` +} + +func (rr *RP) String() string { + return rr.Hdr.String() + rr.Mbox + " " + sprintTxt([]string{rr.Txt}) +} + +// SOA RR. See RFC 1035. +type SOA struct { + Hdr RR_Header + Ns string `dns:"cdomain-name"` + Mbox string `dns:"cdomain-name"` + Serial uint32 + Refresh uint32 + Retry uint32 + Expire uint32 + Minttl uint32 +} + +func (rr *SOA) String() string { + return rr.Hdr.String() + sprintName(rr.Ns) + " " + sprintName(rr.Mbox) + + " " + strconv.FormatInt(int64(rr.Serial), 10) + + " " + strconv.FormatInt(int64(rr.Refresh), 10) + + " " + strconv.FormatInt(int64(rr.Retry), 10) + + " " + strconv.FormatInt(int64(rr.Expire), 10) + + " " + strconv.FormatInt(int64(rr.Minttl), 10) +} + +// TXT RR. See RFC 1035. +type TXT struct { + Hdr RR_Header + Txt []string `dns:"txt"` +} + +func (rr *TXT) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } + +func sprintName(s string) string { + src := []byte(s) + dst := make([]byte, 0, len(src)) + for i := 0; i < len(src); { + if i+1 < len(src) && src[i] == '\\' && src[i+1] == '.' { + dst = append(dst, src[i:i+2]...) + i += 2 + } else { + b, n := nextByte(src, i) + if n == 0 { + i++ // dangling back slash + } else if b == '.' { + dst = append(dst, b) + } else { + dst = appendDomainNameByte(dst, b) + } + i += n + } + } + return string(dst) +} + +func sprintTxtOctet(s string) string { + src := []byte(s) + dst := make([]byte, 0, len(src)) + dst = append(dst, '"') + for i := 0; i < len(src); { + if i+1 < len(src) && src[i] == '\\' && src[i+1] == '.' { + dst = append(dst, src[i:i+2]...) + i += 2 + } else { + b, n := nextByte(src, i) + if n == 0 { + i++ // dangling back slash + } else if b == '.' { + dst = append(dst, b) + } else { + if b < ' ' || b > '~' { + dst = appendByte(dst, b) + } else { + dst = append(dst, b) + } + } + i += n + } + } + dst = append(dst, '"') + return string(dst) +} + +func sprintTxt(txt []string) string { + var out []byte + for i, s := range txt { + if i > 0 { + out = append(out, ` "`...) + } else { + out = append(out, '"') + } + bs := []byte(s) + for j := 0; j < len(bs); { + b, n := nextByte(bs, j) + if n == 0 { + break + } + out = appendTXTStringByte(out, b) + j += n + } + out = append(out, '"') + } + return string(out) +} + +func appendDomainNameByte(s []byte, b byte) []byte { + switch b { + case '.', ' ', '\'', '@', ';', '(', ')': // additional chars to escape + return append(s, '\\', b) + } + return appendTXTStringByte(s, b) +} + +func appendTXTStringByte(s []byte, b byte) []byte { + switch b { + case '"', '\\': + return append(s, '\\', b) + } + if b < ' ' || b > '~' { + return appendByte(s, b) + } + return append(s, b) +} + +func appendByte(s []byte, b byte) []byte { + var buf [3]byte + bufs := strconv.AppendInt(buf[:0], int64(b), 10) + s = append(s, '\\') + for i := 0; i < 3-len(bufs); i++ { + s = append(s, '0') + } + for _, r := range bufs { + s = append(s, r) + } + return s +} + +func nextByte(b []byte, offset int) (byte, int) { + if offset >= len(b) { + return 0, 0 + } + if b[offset] != '\\' { + // not an escape sequence + return b[offset], 1 + } + switch len(b) - offset { + case 1: // dangling escape + return 0, 0 + case 2, 3: // too short to be \ddd + default: // maybe \ddd + if isDigit(b[offset+1]) && isDigit(b[offset+2]) && isDigit(b[offset+3]) { + return dddToByte(b[offset+1:]), 4 + } + } + // not \ddd, just an RFC 1035 "quoted" character + return b[offset+1], 2 +} + +// SPF RR. See RFC 4408, Section 3.1.1. +type SPF struct { + Hdr RR_Header + Txt []string `dns:"txt"` +} + +func (rr *SPF) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } + +// AVC RR. See https://www.iana.org/assignments/dns-parameters/AVC/avc-completed-template. +type AVC struct { + Hdr RR_Header + Txt []string `dns:"txt"` +} + +func (rr *AVC) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } + +// SRV RR. See RFC 2782. +type SRV struct { + Hdr RR_Header + Priority uint16 + Weight uint16 + Port uint16 + Target string `dns:"domain-name"` +} + +func (rr *SRV) String() string { + return rr.Hdr.String() + + strconv.Itoa(int(rr.Priority)) + " " + + strconv.Itoa(int(rr.Weight)) + " " + + strconv.Itoa(int(rr.Port)) + " " + sprintName(rr.Target) +} + +// NAPTR RR. See RFC 2915. +type NAPTR struct { + Hdr RR_Header + Order uint16 + Preference uint16 + Flags string + Service string + Regexp string + Replacement string `dns:"domain-name"` +} + +func (rr *NAPTR) String() string { + return rr.Hdr.String() + + strconv.Itoa(int(rr.Order)) + " " + + strconv.Itoa(int(rr.Preference)) + " " + + "\"" + rr.Flags + "\" " + + "\"" + rr.Service + "\" " + + "\"" + rr.Regexp + "\" " + + rr.Replacement +} + +// CERT RR. See RFC 4398. +type CERT struct { + Hdr RR_Header + Type uint16 + KeyTag uint16 + Algorithm uint8 + Certificate string `dns:"base64"` +} + +func (rr *CERT) String() string { + var ( + ok bool + certtype, algorithm string + ) + if certtype, ok = CertTypeToString[rr.Type]; !ok { + certtype = strconv.Itoa(int(rr.Type)) + } + if algorithm, ok = AlgorithmToString[rr.Algorithm]; !ok { + algorithm = strconv.Itoa(int(rr.Algorithm)) + } + return rr.Hdr.String() + certtype + + " " + strconv.Itoa(int(rr.KeyTag)) + + " " + algorithm + + " " + rr.Certificate +} + +// DNAME RR. See RFC 2672. +type DNAME struct { + Hdr RR_Header + Target string `dns:"domain-name"` +} + +func (rr *DNAME) String() string { + return rr.Hdr.String() + sprintName(rr.Target) +} + +// A RR. See RFC 1035. +type A struct { + Hdr RR_Header + A net.IP `dns:"a"` +} + +func (rr *A) String() string { + if rr.A == nil { + return rr.Hdr.String() + } + return rr.Hdr.String() + rr.A.String() +} + +// AAAA RR. See RFC 3596. +type AAAA struct { + Hdr RR_Header + AAAA net.IP `dns:"aaaa"` +} + +func (rr *AAAA) String() string { + if rr.AAAA == nil { + return rr.Hdr.String() + } + return rr.Hdr.String() + rr.AAAA.String() +} + +// PX RR. See RFC 2163. +type PX struct { + Hdr RR_Header + Preference uint16 + Map822 string `dns:"domain-name"` + Mapx400 string `dns:"domain-name"` +} + +func (rr *PX) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Map822) + " " + sprintName(rr.Mapx400) +} + +// GPOS RR. See RFC 1712. +type GPOS struct { + Hdr RR_Header + Longitude string + Latitude string + Altitude string +} + +func (rr *GPOS) String() string { + return rr.Hdr.String() + rr.Longitude + " " + rr.Latitude + " " + rr.Altitude +} + +// LOC RR. See RFC RFC 1876. +type LOC struct { + Hdr RR_Header + Version uint8 + Size uint8 + HorizPre uint8 + VertPre uint8 + Latitude uint32 + Longitude uint32 + Altitude uint32 +} + +// cmToM takes a cm value expressed in RFC1876 SIZE mantissa/exponent +// format and returns a string in m (two decimals for the cm) +func cmToM(m, e uint8) string { + if e < 2 { + if e == 1 { + m *= 10 + } + + return fmt.Sprintf("0.%02d", m) + } + + s := fmt.Sprintf("%d", m) + for e > 2 { + s += "0" + e-- + } + return s +} + +func (rr *LOC) String() string { + s := rr.Hdr.String() + + lat := rr.Latitude + ns := "N" + if lat > LOC_EQUATOR { + lat = lat - LOC_EQUATOR + } else { + ns = "S" + lat = LOC_EQUATOR - lat + } + h := lat / LOC_DEGREES + lat = lat % LOC_DEGREES + m := lat / LOC_HOURS + lat = lat % LOC_HOURS + s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, (float64(lat) / 1000), ns) + + lon := rr.Longitude + ew := "E" + if lon > LOC_PRIMEMERIDIAN { + lon = lon - LOC_PRIMEMERIDIAN + } else { + ew = "W" + lon = LOC_PRIMEMERIDIAN - lon + } + h = lon / LOC_DEGREES + lon = lon % LOC_DEGREES + m = lon / LOC_HOURS + lon = lon % LOC_HOURS + s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, (float64(lon) / 1000), ew) + + var alt = float64(rr.Altitude) / 100 + alt -= LOC_ALTITUDEBASE + if rr.Altitude%100 != 0 { + s += fmt.Sprintf("%.2fm ", alt) + } else { + s += fmt.Sprintf("%.0fm ", alt) + } + + s += cmToM((rr.Size&0xf0)>>4, rr.Size&0x0f) + "m " + s += cmToM((rr.HorizPre&0xf0)>>4, rr.HorizPre&0x0f) + "m " + s += cmToM((rr.VertPre&0xf0)>>4, rr.VertPre&0x0f) + "m" + + return s +} + +// SIG RR. See RFC 2535. The SIG RR is identical to RRSIG and nowadays only used for SIG(0), See RFC 2931. +type SIG struct { + RRSIG +} + +// RRSIG RR. See RFC 4034 and RFC 3755. +type RRSIG struct { + Hdr RR_Header + TypeCovered uint16 + Algorithm uint8 + Labels uint8 + OrigTtl uint32 + Expiration uint32 + Inception uint32 + KeyTag uint16 + SignerName string `dns:"domain-name"` + Signature string `dns:"base64"` +} + +func (rr *RRSIG) String() string { + s := rr.Hdr.String() + s += Type(rr.TypeCovered).String() + s += " " + strconv.Itoa(int(rr.Algorithm)) + + " " + strconv.Itoa(int(rr.Labels)) + + " " + strconv.FormatInt(int64(rr.OrigTtl), 10) + + " " + TimeToString(rr.Expiration) + + " " + TimeToString(rr.Inception) + + " " + strconv.Itoa(int(rr.KeyTag)) + + " " + sprintName(rr.SignerName) + + " " + rr.Signature + return s +} + +// NSEC RR. See RFC 4034 and RFC 3755. +type NSEC struct { + Hdr RR_Header + NextDomain string `dns:"domain-name"` + TypeBitMap []uint16 `dns:"nsec"` +} + +func (rr *NSEC) String() string { + s := rr.Hdr.String() + sprintName(rr.NextDomain) + for i := 0; i < len(rr.TypeBitMap); i++ { + s += " " + Type(rr.TypeBitMap[i]).String() + } + return s +} + +func (rr *NSEC) len() int { + l := rr.Hdr.len() + len(rr.NextDomain) + 1 + lastwindow := uint32(2 ^ 32 + 1) + for _, t := range rr.TypeBitMap { + window := t / 256 + if uint32(window) != lastwindow { + l += 1 + 32 + } + lastwindow = uint32(window) + } + return l +} + +// DLV RR. See RFC 4431. +type DLV struct{ DS } + +// CDS RR. See RFC 7344. +type CDS struct{ DS } + +// DS RR. See RFC 4034 and RFC 3658. +type DS struct { + Hdr RR_Header + KeyTag uint16 + Algorithm uint8 + DigestType uint8 + Digest string `dns:"hex"` +} + +func (rr *DS) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) + + " " + strconv.Itoa(int(rr.Algorithm)) + + " " + strconv.Itoa(int(rr.DigestType)) + + " " + strings.ToUpper(rr.Digest) +} + +// KX RR. See RFC 2230. +type KX struct { + Hdr RR_Header + Preference uint16 + Exchanger string `dns:"domain-name"` +} + +func (rr *KX) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + + " " + sprintName(rr.Exchanger) +} + +// TA RR. See http://www.watson.org/~weiler/INI1999-19.pdf. +type TA struct { + Hdr RR_Header + KeyTag uint16 + Algorithm uint8 + DigestType uint8 + Digest string `dns:"hex"` +} + +func (rr *TA) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) + + " " + strconv.Itoa(int(rr.Algorithm)) + + " " + strconv.Itoa(int(rr.DigestType)) + + " " + strings.ToUpper(rr.Digest) +} + +// TALINK RR. See https://www.iana.org/assignments/dns-parameters/TALINK/talink-completed-template. +type TALINK struct { + Hdr RR_Header + PreviousName string `dns:"domain-name"` + NextName string `dns:"domain-name"` +} + +func (rr *TALINK) String() string { + return rr.Hdr.String() + + sprintName(rr.PreviousName) + " " + sprintName(rr.NextName) +} + +// SSHFP RR. See RFC RFC 4255. +type SSHFP struct { + Hdr RR_Header + Algorithm uint8 + Type uint8 + FingerPrint string `dns:"hex"` +} + +func (rr *SSHFP) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Algorithm)) + + " " + strconv.Itoa(int(rr.Type)) + + " " + strings.ToUpper(rr.FingerPrint) +} + +// KEY RR. See RFC RFC 2535. +type KEY struct { + DNSKEY +} + +// CDNSKEY RR. See RFC 7344. +type CDNSKEY struct { + DNSKEY +} + +// DNSKEY RR. See RFC 4034 and RFC 3755. +type DNSKEY struct { + Hdr RR_Header + Flags uint16 + Protocol uint8 + Algorithm uint8 + PublicKey string `dns:"base64"` +} + +func (rr *DNSKEY) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) + + " " + strconv.Itoa(int(rr.Protocol)) + + " " + strconv.Itoa(int(rr.Algorithm)) + + " " + rr.PublicKey +} + +// RKEY RR. See https://www.iana.org/assignments/dns-parameters/RKEY/rkey-completed-template. +type RKEY struct { + Hdr RR_Header + Flags uint16 + Protocol uint8 + Algorithm uint8 + PublicKey string `dns:"base64"` +} + +func (rr *RKEY) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) + + " " + strconv.Itoa(int(rr.Protocol)) + + " " + strconv.Itoa(int(rr.Algorithm)) + + " " + rr.PublicKey +} + +// NSAPPTR RR. See RFC 1348. +type NSAPPTR struct { + Hdr RR_Header + Ptr string `dns:"domain-name"` +} + +func (rr *NSAPPTR) String() string { return rr.Hdr.String() + sprintName(rr.Ptr) } + +// NSEC3 RR. See RFC 5155. +type NSEC3 struct { + Hdr RR_Header + Hash uint8 + Flags uint8 + Iterations uint16 + SaltLength uint8 + Salt string `dns:"size-hex:SaltLength"` + HashLength uint8 + NextDomain string `dns:"size-base32:HashLength"` + TypeBitMap []uint16 `dns:"nsec"` +} + +func (rr *NSEC3) String() string { + s := rr.Hdr.String() + s += strconv.Itoa(int(rr.Hash)) + + " " + strconv.Itoa(int(rr.Flags)) + + " " + strconv.Itoa(int(rr.Iterations)) + + " " + saltToString(rr.Salt) + + " " + rr.NextDomain + for i := 0; i < len(rr.TypeBitMap); i++ { + s += " " + Type(rr.TypeBitMap[i]).String() + } + return s +} + +func (rr *NSEC3) len() int { + l := rr.Hdr.len() + 6 + len(rr.Salt)/2 + 1 + len(rr.NextDomain) + 1 + lastwindow := uint32(2 ^ 32 + 1) + for _, t := range rr.TypeBitMap { + window := t / 256 + if uint32(window) != lastwindow { + l += 1 + 32 + } + lastwindow = uint32(window) + } + return l +} + +// NSEC3PARAM RR. See RFC 5155. +type NSEC3PARAM struct { + Hdr RR_Header + Hash uint8 + Flags uint8 + Iterations uint16 + SaltLength uint8 + Salt string `dns:"size-hex:SaltLength"` +} + +func (rr *NSEC3PARAM) String() string { + s := rr.Hdr.String() + s += strconv.Itoa(int(rr.Hash)) + + " " + strconv.Itoa(int(rr.Flags)) + + " " + strconv.Itoa(int(rr.Iterations)) + + " " + saltToString(rr.Salt) + return s +} + +// TKEY RR. See RFC 2930. +type TKEY struct { + Hdr RR_Header + Algorithm string `dns:"domain-name"` + Inception uint32 + Expiration uint32 + Mode uint16 + Error uint16 + KeySize uint16 + Key string `dns:"size-hex:KeySize"` + OtherLen uint16 + OtherData string `dns:"size-hex:OtherLen"` +} + +// TKEY has no official presentation format, but this will suffice. +func (rr *TKEY) String() string { + s := "\n;; TKEY PSEUDOSECTION:\n" + s += rr.Hdr.String() + " " + rr.Algorithm + " " + + strconv.Itoa(int(rr.KeySize)) + " " + rr.Key + " " + + strconv.Itoa(int(rr.OtherLen)) + " " + rr.OtherData + return s +} + +// RFC3597 represents an unknown/generic RR. See RFC 3597. +type RFC3597 struct { + Hdr RR_Header + Rdata string `dns:"hex"` +} + +func (rr *RFC3597) String() string { + // Let's call it a hack + s := rfc3597Header(rr.Hdr) + + s += "\\# " + strconv.Itoa(len(rr.Rdata)/2) + " " + rr.Rdata + return s +} + +func rfc3597Header(h RR_Header) string { + var s string + + s += sprintName(h.Name) + "\t" + s += strconv.FormatInt(int64(h.Ttl), 10) + "\t" + s += "CLASS" + strconv.Itoa(int(h.Class)) + "\t" + s += "TYPE" + strconv.Itoa(int(h.Rrtype)) + "\t" + return s +} + +// URI RR. See RFC 7553. +type URI struct { + Hdr RR_Header + Priority uint16 + Weight uint16 + Target string `dns:"octet"` +} + +func (rr *URI) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Priority)) + + " " + strconv.Itoa(int(rr.Weight)) + " " + sprintTxtOctet(rr.Target) +} + +// DHCID RR. See RFC 4701. +type DHCID struct { + Hdr RR_Header + Digest string `dns:"base64"` +} + +func (rr *DHCID) String() string { return rr.Hdr.String() + rr.Digest } + +// TLSA RR. See RFC 6698. +type TLSA struct { + Hdr RR_Header + Usage uint8 + Selector uint8 + MatchingType uint8 + Certificate string `dns:"hex"` +} + +func (rr *TLSA) String() string { + return rr.Hdr.String() + + strconv.Itoa(int(rr.Usage)) + + " " + strconv.Itoa(int(rr.Selector)) + + " " + strconv.Itoa(int(rr.MatchingType)) + + " " + rr.Certificate +} + +// SMIMEA RR. See RFC 8162. +type SMIMEA struct { + Hdr RR_Header + Usage uint8 + Selector uint8 + MatchingType uint8 + Certificate string `dns:"hex"` +} + +func (rr *SMIMEA) String() string { + s := rr.Hdr.String() + + strconv.Itoa(int(rr.Usage)) + + " " + strconv.Itoa(int(rr.Selector)) + + " " + strconv.Itoa(int(rr.MatchingType)) + + // Every Nth char needs a space on this output. If we output + // this as one giant line, we can't read it can in because in some cases + // the cert length overflows scan.maxTok (2048). + sx := splitN(rr.Certificate, 1024) // conservative value here + s += " " + strings.Join(sx, " ") + return s +} + +// HIP RR. See RFC 8005. +type HIP struct { + Hdr RR_Header + HitLength uint8 + PublicKeyAlgorithm uint8 + PublicKeyLength uint16 + Hit string `dns:"size-hex:HitLength"` + PublicKey string `dns:"size-base64:PublicKeyLength"` + RendezvousServers []string `dns:"domain-name"` +} + +func (rr *HIP) String() string { + s := rr.Hdr.String() + + strconv.Itoa(int(rr.PublicKeyAlgorithm)) + + " " + rr.Hit + + " " + rr.PublicKey + for _, d := range rr.RendezvousServers { + s += " " + sprintName(d) + } + return s +} + +// NINFO RR. See https://www.iana.org/assignments/dns-parameters/NINFO/ninfo-completed-template. +type NINFO struct { + Hdr RR_Header + ZSData []string `dns:"txt"` +} + +func (rr *NINFO) String() string { return rr.Hdr.String() + sprintTxt(rr.ZSData) } + +// NID RR. See RFC RFC 6742. +type NID struct { + Hdr RR_Header + Preference uint16 + NodeID uint64 +} + +func (rr *NID) String() string { + s := rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + node := fmt.Sprintf("%0.16x", rr.NodeID) + s += " " + node[0:4] + ":" + node[4:8] + ":" + node[8:12] + ":" + node[12:16] + return s +} + +// L32 RR, See RFC 6742. +type L32 struct { + Hdr RR_Header + Preference uint16 + Locator32 net.IP `dns:"a"` +} + +func (rr *L32) String() string { + if rr.Locator32 == nil { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + } + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + + " " + rr.Locator32.String() +} + +// L64 RR, See RFC 6742. +type L64 struct { + Hdr RR_Header + Preference uint16 + Locator64 uint64 +} + +func (rr *L64) String() string { + s := rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + node := fmt.Sprintf("%0.16X", rr.Locator64) + s += " " + node[0:4] + ":" + node[4:8] + ":" + node[8:12] + ":" + node[12:16] + return s +} + +// LP RR. See RFC 6742. +type LP struct { + Hdr RR_Header + Preference uint16 + Fqdn string `dns:"domain-name"` +} + +func (rr *LP) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Fqdn) +} + +// EUI48 RR. See RFC 7043. +type EUI48 struct { + Hdr RR_Header + Address uint64 `dns:"uint48"` +} + +func (rr *EUI48) String() string { return rr.Hdr.String() + euiToString(rr.Address, 48) } + +// EUI64 RR. See RFC 7043. +type EUI64 struct { + Hdr RR_Header + Address uint64 +} + +func (rr *EUI64) String() string { return rr.Hdr.String() + euiToString(rr.Address, 64) } + +// CAA RR. See RFC 6844. +type CAA struct { + Hdr RR_Header + Flag uint8 + Tag string + Value string `dns:"octet"` +} + +func (rr *CAA) String() string { + return rr.Hdr.String() + strconv.Itoa(int(rr.Flag)) + " " + rr.Tag + " " + sprintTxtOctet(rr.Value) +} + +// UID RR. Deprecated, IANA-Reserved. +type UID struct { + Hdr RR_Header + Uid uint32 +} + +func (rr *UID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Uid), 10) } + +// GID RR. Deprecated, IANA-Reserved. +type GID struct { + Hdr RR_Header + Gid uint32 +} + +func (rr *GID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Gid), 10) } + +// UINFO RR. Deprecated, IANA-Reserved. +type UINFO struct { + Hdr RR_Header + Uinfo string +} + +func (rr *UINFO) String() string { return rr.Hdr.String() + sprintTxt([]string{rr.Uinfo}) } + +// EID RR. See http://ana-3.lcs.mit.edu/~jnc/nimrod/dns.txt. +type EID struct { + Hdr RR_Header + Endpoint string `dns:"hex"` +} + +func (rr *EID) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Endpoint) } + +// NIMLOC RR. See http://ana-3.lcs.mit.edu/~jnc/nimrod/dns.txt. +type NIMLOC struct { + Hdr RR_Header + Locator string `dns:"hex"` +} + +func (rr *NIMLOC) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Locator) } + +// OPENPGPKEY RR. See RFC 7929. +type OPENPGPKEY struct { + Hdr RR_Header + PublicKey string `dns:"base64"` +} + +func (rr *OPENPGPKEY) String() string { return rr.Hdr.String() + rr.PublicKey } + +// CSYNC RR. See RFC 7477. +type CSYNC struct { + Hdr RR_Header + Serial uint32 + Flags uint16 + TypeBitMap []uint16 `dns:"nsec"` +} + +func (rr *CSYNC) String() string { + s := rr.Hdr.String() + strconv.FormatInt(int64(rr.Serial), 10) + " " + strconv.Itoa(int(rr.Flags)) + + for i := 0; i < len(rr.TypeBitMap); i++ { + s += " " + Type(rr.TypeBitMap[i]).String() + } + return s +} + +func (rr *CSYNC) len() int { + l := rr.Hdr.len() + 4 + 2 + lastwindow := uint32(2 ^ 32 + 1) + for _, t := range rr.TypeBitMap { + window := t / 256 + if uint32(window) != lastwindow { + l += 1 + 32 + } + lastwindow = uint32(window) + } + return l +} + +// TimeToString translates the RRSIG's incep. and expir. times to the +// string representation used when printing the record. +// It takes serial arithmetic (RFC 1982) into account. +func TimeToString(t uint32) string { + mod := ((int64(t) - time.Now().Unix()) / year68) - 1 + if mod < 0 { + mod = 0 + } + ti := time.Unix(int64(t)-(mod*year68), 0).UTC() + return ti.Format("20060102150405") +} + +// StringToTime translates the RRSIG's incep. and expir. times from +// string values like "20110403154150" to an 32 bit integer. +// It takes serial arithmetic (RFC 1982) into account. +func StringToTime(s string) (uint32, error) { + t, err := time.Parse("20060102150405", s) + if err != nil { + return 0, err + } + mod := (t.Unix() / year68) - 1 + if mod < 0 { + mod = 0 + } + return uint32(t.Unix() - (mod * year68)), nil +} + +// saltToString converts a NSECX salt to uppercase and returns "-" when it is empty. +func saltToString(s string) string { + if len(s) == 0 { + return "-" + } + return strings.ToUpper(s) +} + +func euiToString(eui uint64, bits int) (hex string) { + switch bits { + case 64: + hex = fmt.Sprintf("%16.16x", eui) + hex = hex[0:2] + "-" + hex[2:4] + "-" + hex[4:6] + "-" + hex[6:8] + + "-" + hex[8:10] + "-" + hex[10:12] + "-" + hex[12:14] + "-" + hex[14:16] + case 48: + hex = fmt.Sprintf("%12.12x", eui) + hex = hex[0:2] + "-" + hex[2:4] + "-" + hex[4:6] + "-" + hex[6:8] + + "-" + hex[8:10] + "-" + hex[10:12] + } + return +} + +// copyIP returns a copy of ip. +func copyIP(ip net.IP) net.IP { + p := make(net.IP, len(ip)) + copy(p, ip) + return p +} + +// SplitN splits a string into N sized string chunks. +// This might become an exported function once. +func splitN(s string, n int) []string { + if len(s) < n { + return []string{s} + } + sx := []string{} + p, i := 0, n + for { + if i <= len(s) { + sx = append(sx, s[p:i]) + } else { + sx = append(sx, s[p:]) + break + + } + p, i = p+n, i+n + } + + return sx +} diff --git a/vendor/github.com/miekg/dns/types_generate.go b/vendor/github.com/miekg/dns/types_generate.go new file mode 100644 index 0000000000..8703cce647 --- /dev/null +++ b/vendor/github.com/miekg/dns/types_generate.go @@ -0,0 +1,272 @@ +//+build ignore + +// types_generate.go is meant to run with go generate. It will use +// go/{importer,types} to track down all the RR struct types. Then for each type +// it will generate conversion tables (TypeToRR and TypeToString) and banal +// methods (len, Header, copy) based on the struct tags. The generated source is +// written to ztypes.go, and is meant to be checked into git. +package main + +import ( + "bytes" + "fmt" + "go/format" + "go/importer" + "go/types" + "log" + "os" + "strings" + "text/template" +) + +var skipLen = map[string]struct{}{ + "NSEC": {}, + "NSEC3": {}, + "OPT": {}, + "CSYNC": {}, +} + +var packageHdr = ` +// Code generated by "go run types_generate.go"; DO NOT EDIT. + +package dns + +import ( + "encoding/base64" + "net" +) + +` + +var TypeToRR = template.Must(template.New("TypeToRR").Parse(` +// TypeToRR is a map of constructors for each RR type. +var TypeToRR = map[uint16]func() RR{ +{{range .}}{{if ne . "RFC3597"}} Type{{.}}: func() RR { return new({{.}}) }, +{{end}}{{end}} } + +`)) + +var typeToString = template.Must(template.New("typeToString").Parse(` +// TypeToString is a map of strings for each RR type. +var TypeToString = map[uint16]string{ +{{range .}}{{if ne . "NSAPPTR"}} Type{{.}}: "{{.}}", +{{end}}{{end}} TypeNSAPPTR: "NSAP-PTR", +} + +`)) + +var headerFunc = template.Must(template.New("headerFunc").Parse(` +{{range .}} func (rr *{{.}}) Header() *RR_Header { return &rr.Hdr } +{{end}} + +`)) + +// getTypeStruct will take a type and the package scope, and return the +// (innermost) struct if the type is considered a RR type (currently defined as +// those structs beginning with a RR_Header, could be redefined as implementing +// the RR interface). The bool return value indicates if embedded structs were +// resolved. +func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { + st, ok := t.Underlying().(*types.Struct) + if !ok { + return nil, false + } + if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { + return st, false + } + if st.Field(0).Anonymous() { + st, _ := getTypeStruct(st.Field(0).Type(), scope) + return st, true + } + return nil, false +} + +func main() { + // Import and type-check the package + pkg, err := importer.Default().Import("github.com/miekg/dns") + fatalIfErr(err) + scope := pkg.Scope() + + // Collect constants like TypeX + var numberedTypes []string + for _, name := range scope.Names() { + o := scope.Lookup(name) + if o == nil || !o.Exported() { + continue + } + b, ok := o.Type().(*types.Basic) + if !ok || b.Kind() != types.Uint16 { + continue + } + if !strings.HasPrefix(o.Name(), "Type") { + continue + } + name := strings.TrimPrefix(o.Name(), "Type") + if name == "PrivateRR" { + continue + } + numberedTypes = append(numberedTypes, name) + } + + // Collect actual types (*X) + var namedTypes []string + for _, name := range scope.Names() { + o := scope.Lookup(name) + if o == nil || !o.Exported() { + continue + } + if st, _ := getTypeStruct(o.Type(), scope); st == nil { + continue + } + if name == "PrivateRR" { + continue + } + + // Check if corresponding TypeX exists + if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" { + log.Fatalf("Constant Type%s does not exist.", o.Name()) + } + + namedTypes = append(namedTypes, o.Name()) + } + + b := &bytes.Buffer{} + b.WriteString(packageHdr) + + // Generate TypeToRR + fatalIfErr(TypeToRR.Execute(b, namedTypes)) + + // Generate typeToString + fatalIfErr(typeToString.Execute(b, numberedTypes)) + + // Generate headerFunc + fatalIfErr(headerFunc.Execute(b, namedTypes)) + + // Generate len() + fmt.Fprint(b, "// len() functions\n") + for _, name := range namedTypes { + if _, ok := skipLen[name]; ok { + continue + } + o := scope.Lookup(name) + st, isEmbedded := getTypeStruct(o.Type(), scope) + if isEmbedded { + continue + } + fmt.Fprintf(b, "func (rr *%s) len() int {\n", name) + fmt.Fprintf(b, "l := rr.Hdr.len()\n") + for i := 1; i < st.NumFields(); i++ { + o := func(s string) { fmt.Fprintf(b, s, st.Field(i).Name()) } + + if _, ok := st.Field(i).Type().(*types.Slice); ok { + switch st.Tag(i) { + case `dns:"-"`: + // ignored + case `dns:"cdomain-name"`, `dns:"domain-name"`, `dns:"txt"`: + o("for _, x := range rr.%s { l += len(x) + 1 }\n") + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + continue + } + + switch { + case st.Tag(i) == `dns:"-"`: + // ignored + case st.Tag(i) == `dns:"cdomain-name"`, st.Tag(i) == `dns:"domain-name"`: + o("l += len(rr.%s) + 1\n") + case st.Tag(i) == `dns:"octet"`: + o("l += len(rr.%s)\n") + case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): + fallthrough + case st.Tag(i) == `dns:"base64"`: + o("l += base64.StdEncoding.DecodedLen(len(rr.%s))\n") + case strings.HasPrefix(st.Tag(i), `dns:"size-hex:`): // this has an extra field where the length is stored + o("l += len(rr.%s)/2\n") + case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): + fallthrough + case st.Tag(i) == `dns:"hex"`: + o("l += len(rr.%s)/2 + 1\n") + case st.Tag(i) == `dns:"a"`: + o("l += net.IPv4len // %s\n") + case st.Tag(i) == `dns:"aaaa"`: + o("l += net.IPv6len // %s\n") + case st.Tag(i) == `dns:"txt"`: + o("for _, t := range rr.%s { l += len(t) + 1 }\n") + case st.Tag(i) == `dns:"uint48"`: + o("l += 6 // %s\n") + case st.Tag(i) == "": + switch st.Field(i).Type().(*types.Basic).Kind() { + case types.Uint8: + o("l++ // %s\n") + case types.Uint16: + o("l += 2 // %s\n") + case types.Uint32: + o("l += 4 // %s\n") + case types.Uint64: + o("l += 8 // %s\n") + case types.String: + o("l += len(rr.%s) + 1\n") + default: + log.Fatalln(name, st.Field(i).Name()) + } + default: + log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) + } + } + fmt.Fprintf(b, "return l }\n") + } + + // Generate copy() + fmt.Fprint(b, "// copy() functions\n") + for _, name := range namedTypes { + o := scope.Lookup(name) + st, isEmbedded := getTypeStruct(o.Type(), scope) + if isEmbedded { + continue + } + fmt.Fprintf(b, "func (rr *%s) copy() RR {\n", name) + fields := []string{"*rr.Hdr.copyHeader()"} + for i := 1; i < st.NumFields(); i++ { + f := st.Field(i).Name() + if sl, ok := st.Field(i).Type().(*types.Slice); ok { + t := sl.Underlying().String() + t = strings.TrimPrefix(t, "[]") + if strings.Contains(t, ".") { + splits := strings.Split(t, ".") + t = splits[len(splits)-1] + } + fmt.Fprintf(b, "%s := make([]%s, len(rr.%s)); copy(%s, rr.%s)\n", + f, t, f, f, f) + fields = append(fields, f) + continue + } + if st.Field(i).Type().String() == "net.IP" { + fields = append(fields, "copyIP(rr."+f+")") + continue + } + fields = append(fields, "rr."+f) + } + fmt.Fprintf(b, "return &%s{%s}\n", name, strings.Join(fields, ",")) + fmt.Fprintf(b, "}\n") + } + + // gofmt + res, err := format.Source(b.Bytes()) + if err != nil { + b.WriteTo(os.Stderr) + log.Fatal(err) + } + + // write result + f, err := os.Create("ztypes.go") + fatalIfErr(err) + defer f.Close() + f.Write(res) +} + +func fatalIfErr(err error) { + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/miekg/dns/udp.go b/vendor/github.com/miekg/dns/udp.go new file mode 100644 index 0000000000..f3f31a7ac9 --- /dev/null +++ b/vendor/github.com/miekg/dns/udp.go @@ -0,0 +1,89 @@ +// +build !windows + +package dns + +import ( + "net" + + "golang.org/x/net/ipv4" + "golang.org/x/net/ipv6" +) + +// SessionUDP holds the remote address and the associated +// out-of-band data. +type SessionUDP struct { + raddr *net.UDPAddr + context []byte +} + +// RemoteAddr returns the remote network address. +func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr } + +// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a +// net.UDPAddr. +func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { + oob := make([]byte, 40) + n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob) + if err != nil { + return n, nil, err + } + return n, &SessionUDP{raddr, oob[:oobn]}, err +} + +// WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr. +func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { + oob := correctSource(session.context) + n, _, err := conn.WriteMsgUDP(b, oob, session.raddr) + return n, err +} + +func setUDPSocketOptions(conn *net.UDPConn) error { + // Try setting the flags for both families and ignore the errors unless they + // both error. + err6 := ipv6.NewPacketConn(conn).SetControlMessage(ipv6.FlagDst|ipv6.FlagInterface, true) + err4 := ipv4.NewPacketConn(conn).SetControlMessage(ipv4.FlagDst|ipv4.FlagInterface, true) + if err6 != nil && err4 != nil { + return err4 + } + return nil +} + +// parseDstFromOOB takes oob data and returns the destination IP. +func parseDstFromOOB(oob []byte) net.IP { + // Start with IPv6 and then fallback to IPv4 + // TODO(fastest963): Figure out a way to prefer one or the other. Looking at + // the lvl of the header for a 0 or 41 isn't cross-platform. + var dst net.IP + cm6 := new(ipv6.ControlMessage) + if cm6.Parse(oob) == nil { + dst = cm6.Dst + } + if dst == nil { + cm4 := new(ipv4.ControlMessage) + if cm4.Parse(oob) == nil { + dst = cm4.Dst + } + } + return dst +} + +// correctSource takes oob data and returns new oob data with the Src equal to the Dst +func correctSource(oob []byte) []byte { + dst := parseDstFromOOB(oob) + if dst == nil { + return nil + } + // If the dst is definitely an IPv6, then use ipv6's ControlMessage to + // respond otherwise use ipv4's because ipv6's marshal ignores ipv4 + // addresses. + if dst.To4() == nil { + cm := new(ipv6.ControlMessage) + cm.Src = dst + oob = cm.Marshal() + } else { + cm := new(ipv4.ControlMessage) + cm.Src = dst + oob = cm.Marshal() + } + return oob +} diff --git a/vendor/github.com/miekg/dns/udp_windows.go b/vendor/github.com/miekg/dns/udp_windows.go new file mode 100644 index 0000000000..6778c3c6cf --- /dev/null +++ b/vendor/github.com/miekg/dns/udp_windows.go @@ -0,0 +1,37 @@ +// +build windows + +package dns + +import "net" + +// SessionUDP holds the remote address +type SessionUDP struct { + raddr *net.UDPAddr +} + +// RemoteAddr returns the remote network address. +func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr } + +// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a +// net.UDPAddr. +// TODO(fastest963): Once go1.10 is released, use ReadMsgUDP. +func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { + n, raddr, err := conn.ReadFrom(b) + if err != nil { + return n, nil, err + } + session := &SessionUDP{raddr.(*net.UDPAddr)} + return n, session, err +} + +// WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr. +// TODO(fastest963): Once go1.10 is released, use WriteMsgUDP. +func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { + n, err := conn.WriteTo(b, session.raddr) + return n, err +} + +// TODO(fastest963): Once go1.10 is released and we can use *MsgUDP methods +// use the standard method in udp.go for these. +func setUDPSocketOptions(*net.UDPConn) error { return nil } +func parseDstFromOOB([]byte, net.IP) net.IP { return nil } diff --git a/vendor/github.com/miekg/dns/update.go b/vendor/github.com/miekg/dns/update.go new file mode 100644 index 0000000000..e90c5c968e --- /dev/null +++ b/vendor/github.com/miekg/dns/update.go @@ -0,0 +1,106 @@ +package dns + +// NameUsed sets the RRs in the prereq section to +// "Name is in use" RRs. RFC 2136 section 2.4.4. +func (u *Msg) NameUsed(rr []RR) { + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}}) + } +} + +// NameNotUsed sets the RRs in the prereq section to +// "Name is in not use" RRs. RFC 2136 section 2.4.5. +func (u *Msg) NameNotUsed(rr []RR) { + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassNONE}}) + } +} + +// Used sets the RRs in the prereq section to +// "RRset exists (value dependent -- with rdata)" RRs. RFC 2136 section 2.4.2. +func (u *Msg) Used(rr []RR) { + if len(u.Question) == 0 { + panic("dns: empty question section") + } + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + r.Header().Class = u.Question[0].Qclass + u.Answer = append(u.Answer, r) + } +} + +// RRsetUsed sets the RRs in the prereq section to +// "RRset exists (value independent -- no rdata)" RRs. RFC 2136 section 2.4.1. +func (u *Msg) RRsetUsed(rr []RR) { + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassANY}}) + } +} + +// RRsetNotUsed sets the RRs in the prereq section to +// "RRset does not exist" RRs. RFC 2136 section 2.4.3. +func (u *Msg) RRsetNotUsed(rr []RR) { + if u.Answer == nil { + u.Answer = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassNONE}}) + } +} + +// Insert creates a dynamic update packet that adds an complete RRset, see RFC 2136 section 2.5.1. +func (u *Msg) Insert(rr []RR) { + if len(u.Question) == 0 { + panic("dns: empty question section") + } + if u.Ns == nil { + u.Ns = make([]RR, 0, len(rr)) + } + for _, r := range rr { + r.Header().Class = u.Question[0].Qclass + u.Ns = append(u.Ns, r) + } +} + +// RemoveRRset creates a dynamic update packet that deletes an RRset, see RFC 2136 section 2.5.2. +func (u *Msg) RemoveRRset(rr []RR) { + if u.Ns == nil { + u.Ns = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassANY}}) + } +} + +// RemoveName creates a dynamic update packet that deletes all RRsets of a name, see RFC 2136 section 2.5.3 +func (u *Msg) RemoveName(rr []RR) { + if u.Ns == nil { + u.Ns = make([]RR, 0, len(rr)) + } + for _, r := range rr { + u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}}) + } +} + +// Remove creates a dynamic update packet deletes RR from a RRSset, see RFC 2136 section 2.5.4 +func (u *Msg) Remove(rr []RR) { + if u.Ns == nil { + u.Ns = make([]RR, 0, len(rr)) + } + for _, r := range rr { + r.Header().Class = ClassNONE + r.Header().Ttl = 0 + u.Ns = append(u.Ns, r) + } +} diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go new file mode 100644 index 0000000000..e746fee5b0 --- /dev/null +++ b/vendor/github.com/miekg/dns/version.go @@ -0,0 +1,15 @@ +package dns + +import "fmt" + +// Version is current version of this library. +var Version = V{1, 0, 5} + +// V holds the version of this library. +type V struct { + Major, Minor, Patch int +} + +func (v V) String() string { + return fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch) +} diff --git a/vendor/github.com/miekg/dns/xfr.go b/vendor/github.com/miekg/dns/xfr.go new file mode 100644 index 0000000000..5d0ff5c8a2 --- /dev/null +++ b/vendor/github.com/miekg/dns/xfr.go @@ -0,0 +1,260 @@ +package dns + +import ( + "fmt" + "time" +) + +// Envelope is used when doing a zone transfer with a remote server. +type Envelope struct { + RR []RR // The set of RRs in the answer section of the xfr reply message. + Error error // If something went wrong, this contains the error. +} + +// A Transfer defines parameters that are used during a zone transfer. +type Transfer struct { + *Conn + DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds + ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds + WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds + TsigSecret map[string]string // Secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2) + tsigTimersOnly bool +} + +// Think we need to away to stop the transfer + +// In performs an incoming transfer with the server in a. +// If you would like to set the source IP, or some other attribute +// of a Dialer for a Transfer, you can do so by specifying the attributes +// in the Transfer.Conn: +// +// d := net.Dialer{LocalAddr: transfer_source} +// con, err := d.Dial("tcp", master) +// dnscon := &dns.Conn{Conn:con} +// transfer = &dns.Transfer{Conn: dnscon} +// channel, err := transfer.In(message, master) +// +func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) { + timeout := dnsTimeout + if t.DialTimeout != 0 { + timeout = t.DialTimeout + } + if t.Conn == nil { + t.Conn, err = DialTimeout("tcp", a, timeout) + if err != nil { + return nil, err + } + } + if err := t.WriteMsg(q); err != nil { + return nil, err + } + env = make(chan *Envelope) + go func() { + if q.Question[0].Qtype == TypeAXFR { + go t.inAxfr(q, env) + return + } + if q.Question[0].Qtype == TypeIXFR { + go t.inIxfr(q, env) + return + } + }() + return env, nil +} + +func (t *Transfer) inAxfr(q *Msg, c chan *Envelope) { + first := true + defer t.Close() + defer close(c) + timeout := dnsTimeout + if t.ReadTimeout != 0 { + timeout = t.ReadTimeout + } + for { + t.Conn.SetReadDeadline(time.Now().Add(timeout)) + in, err := t.ReadMsg() + if err != nil { + c <- &Envelope{nil, err} + return + } + if q.Id != in.Id { + c <- &Envelope{in.Answer, ErrId} + return + } + if first { + if in.Rcode != RcodeSuccess { + c <- &Envelope{in.Answer, &Error{err: fmt.Sprintf(errXFR, in.Rcode)}} + return + } + if !isSOAFirst(in) { + c <- &Envelope{in.Answer, ErrSoa} + return + } + first = !first + // only one answer that is SOA, receive more + if len(in.Answer) == 1 { + t.tsigTimersOnly = true + c <- &Envelope{in.Answer, nil} + continue + } + } + + if !first { + t.tsigTimersOnly = true // Subsequent envelopes use this. + if isSOALast(in) { + c <- &Envelope{in.Answer, nil} + return + } + c <- &Envelope{in.Answer, nil} + } + } +} + +func (t *Transfer) inIxfr(q *Msg, c chan *Envelope) { + serial := uint32(0) // The first serial seen is the current server serial + axfr := true + n := 0 + qser := q.Ns[0].(*SOA).Serial + defer t.Close() + defer close(c) + timeout := dnsTimeout + if t.ReadTimeout != 0 { + timeout = t.ReadTimeout + } + for { + t.SetReadDeadline(time.Now().Add(timeout)) + in, err := t.ReadMsg() + if err != nil { + c <- &Envelope{nil, err} + return + } + if q.Id != in.Id { + c <- &Envelope{in.Answer, ErrId} + return + } + if in.Rcode != RcodeSuccess { + c <- &Envelope{in.Answer, &Error{err: fmt.Sprintf(errXFR, in.Rcode)}} + return + } + if n == 0 { + // Check if the returned answer is ok + if !isSOAFirst(in) { + c <- &Envelope{in.Answer, ErrSoa} + return + } + // This serial is important + serial = in.Answer[0].(*SOA).Serial + // Check if there are no changes in zone + if qser >= serial { + c <- &Envelope{in.Answer, nil} + return + } + } + // Now we need to check each message for SOA records, to see what we need to do + t.tsigTimersOnly = true + for _, rr := range in.Answer { + if v, ok := rr.(*SOA); ok { + if v.Serial == serial { + n++ + // quit if it's a full axfr or the the servers' SOA is repeated the third time + if axfr && n == 2 || n == 3 { + c <- &Envelope{in.Answer, nil} + return + } + } else if axfr { + // it's an ixfr + axfr = false + } + } + } + c <- &Envelope{in.Answer, nil} + } +} + +// Out performs an outgoing transfer with the client connecting in w. +// Basic use pattern: +// +// ch := make(chan *dns.Envelope) +// tr := new(dns.Transfer) +// go tr.Out(w, r, ch) +// ch <- &dns.Envelope{RR: []dns.RR{soa, rr1, rr2, rr3, soa}} +// close(ch) +// w.Hijack() +// // w.Close() // Client closes connection +// +// The server is responsible for sending the correct sequence of RRs through the +// channel ch. +func (t *Transfer) Out(w ResponseWriter, q *Msg, ch chan *Envelope) error { + for x := range ch { + r := new(Msg) + // Compress? + r.SetReply(q) + r.Authoritative = true + // assume it fits TODO(miek): fix + r.Answer = append(r.Answer, x.RR...) + if err := w.WriteMsg(r); err != nil { + return err + } + } + w.TsigTimersOnly(true) + return nil +} + +// ReadMsg reads a message from the transfer connection t. +func (t *Transfer) ReadMsg() (*Msg, error) { + m := new(Msg) + p := make([]byte, MaxMsgSize) + n, err := t.Read(p) + if err != nil && n == 0 { + return nil, err + } + p = p[:n] + if err := m.Unpack(p); err != nil { + return nil, err + } + if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil { + if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok { + return m, ErrSecret + } + // Need to work on the original message p, as that was used to calculate the tsig. + err = TsigVerify(p, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly) + t.tsigRequestMAC = ts.MAC + } + return m, err +} + +// WriteMsg writes a message through the transfer connection t. +func (t *Transfer) WriteMsg(m *Msg) (err error) { + var out []byte + if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil { + if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok { + return ErrSecret + } + out, t.tsigRequestMAC, err = TsigGenerate(m, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly) + } else { + out, err = m.Pack() + } + if err != nil { + return err + } + if _, err = t.Write(out); err != nil { + return err + } + return nil +} + +func isSOAFirst(in *Msg) bool { + if len(in.Answer) > 0 { + return in.Answer[0].Header().Rrtype == TypeSOA + } + return false +} + +func isSOALast(in *Msg) bool { + if len(in.Answer) > 0 { + return in.Answer[len(in.Answer)-1].Header().Rrtype == TypeSOA + } + return false +} + +const errXFR = "bad xfr rcode: %d" diff --git a/vendor/github.com/miekg/dns/zcompress.go b/vendor/github.com/miekg/dns/zcompress.go new file mode 100644 index 0000000000..c2503204dd --- /dev/null +++ b/vendor/github.com/miekg/dns/zcompress.go @@ -0,0 +1,118 @@ +// Code generated by "go run compress_generate.go"; DO NOT EDIT. + +package dns + +func compressionLenHelperType(c map[string]int, r RR) { + switch x := r.(type) { + case *AFSDB: + compressionLenHelper(c, x.Hostname) + case *CNAME: + compressionLenHelper(c, x.Target) + case *DNAME: + compressionLenHelper(c, x.Target) + case *HIP: + for i := range x.RendezvousServers { + compressionLenHelper(c, x.RendezvousServers[i]) + } + case *KX: + compressionLenHelper(c, x.Exchanger) + case *LP: + compressionLenHelper(c, x.Fqdn) + case *MB: + compressionLenHelper(c, x.Mb) + case *MD: + compressionLenHelper(c, x.Md) + case *MF: + compressionLenHelper(c, x.Mf) + case *MG: + compressionLenHelper(c, x.Mg) + case *MINFO: + compressionLenHelper(c, x.Rmail) + compressionLenHelper(c, x.Email) + case *MR: + compressionLenHelper(c, x.Mr) + case *MX: + compressionLenHelper(c, x.Mx) + case *NAPTR: + compressionLenHelper(c, x.Replacement) + case *NS: + compressionLenHelper(c, x.Ns) + case *NSAPPTR: + compressionLenHelper(c, x.Ptr) + case *NSEC: + compressionLenHelper(c, x.NextDomain) + case *PTR: + compressionLenHelper(c, x.Ptr) + case *PX: + compressionLenHelper(c, x.Map822) + compressionLenHelper(c, x.Mapx400) + case *RP: + compressionLenHelper(c, x.Mbox) + compressionLenHelper(c, x.Txt) + case *RRSIG: + compressionLenHelper(c, x.SignerName) + case *RT: + compressionLenHelper(c, x.Host) + case *SIG: + compressionLenHelper(c, x.SignerName) + case *SOA: + compressionLenHelper(c, x.Ns) + compressionLenHelper(c, x.Mbox) + case *SRV: + compressionLenHelper(c, x.Target) + case *TALINK: + compressionLenHelper(c, x.PreviousName) + compressionLenHelper(c, x.NextName) + case *TKEY: + compressionLenHelper(c, x.Algorithm) + case *TSIG: + compressionLenHelper(c, x.Algorithm) + } +} + +func compressionLenSearchType(c map[string]int, r RR) (int, bool) { + switch x := r.(type) { + case *AFSDB: + k1, ok1 := compressionLenSearch(c, x.Hostname) + return k1, ok1 + case *CNAME: + k1, ok1 := compressionLenSearch(c, x.Target) + return k1, ok1 + case *MB: + k1, ok1 := compressionLenSearch(c, x.Mb) + return k1, ok1 + case *MD: + k1, ok1 := compressionLenSearch(c, x.Md) + return k1, ok1 + case *MF: + k1, ok1 := compressionLenSearch(c, x.Mf) + return k1, ok1 + case *MG: + k1, ok1 := compressionLenSearch(c, x.Mg) + return k1, ok1 + case *MINFO: + k1, ok1 := compressionLenSearch(c, x.Rmail) + k2, ok2 := compressionLenSearch(c, x.Email) + return k1 + k2, ok1 && ok2 + case *MR: + k1, ok1 := compressionLenSearch(c, x.Mr) + return k1, ok1 + case *MX: + k1, ok1 := compressionLenSearch(c, x.Mx) + return k1, ok1 + case *NS: + k1, ok1 := compressionLenSearch(c, x.Ns) + return k1, ok1 + case *PTR: + k1, ok1 := compressionLenSearch(c, x.Ptr) + return k1, ok1 + case *RT: + k1, ok1 := compressionLenSearch(c, x.Host) + return k1, ok1 + case *SOA: + k1, ok1 := compressionLenSearch(c, x.Ns) + k2, ok2 := compressionLenSearch(c, x.Mbox) + return k1 + k2, ok1 && ok2 + } + return 0, false +} diff --git a/vendor/github.com/miekg/dns/zmsg.go b/vendor/github.com/miekg/dns/zmsg.go new file mode 100644 index 0000000000..0d1f6f4daa --- /dev/null +++ b/vendor/github.com/miekg/dns/zmsg.go @@ -0,0 +1,3615 @@ +// Code generated by "go run msg_generate.go"; DO NOT EDIT. + +package dns + +// pack*() functions + +func (rr *A) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packDataA(rr.A, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *AAAA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packDataAAAA(rr.AAAA, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *AFSDB) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Subtype, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Hostname, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *ANY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *AVC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringTxt(rr.Txt, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *CAA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Flag, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Tag, msg, off) + if err != nil { + return off, err + } + off, err = packStringOctet(rr.Value, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *CDNSKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *CDS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.DigestType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Digest, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *CERT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Type, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.Certificate, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *CNAME) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Target, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *CSYNC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint32(rr.Serial, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packDataNsec(rr.TypeBitMap, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *DHCID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringBase64(rr.Digest, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *DLV) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.DigestType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Digest, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *DNAME) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Target, msg, off, compression, false) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *DNSKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *DS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.DigestType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Digest, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *EID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringHex(rr.Endpoint, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *EUI48) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint48(rr.Address, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *EUI64) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint64(rr.Address, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *GID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint32(rr.Gid, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *GPOS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packString(rr.Longitude, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Latitude, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Altitude, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *HINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packString(rr.Cpu, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Os, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *HIP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.HitLength, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.PublicKeyAlgorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.PublicKeyLength, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Hit, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + off, err = packDataDomainNames(rr.RendezvousServers, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *KEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *KX) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Exchanger, msg, off, compression, false) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *L32) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packDataA(rr.Locator32, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *L64) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packUint64(rr.Locator64, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *LOC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Version, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Size, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.HorizPre, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.VertPre, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Latitude, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Longitude, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Altitude, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *LP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Fqdn, msg, off, compression, false) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MB) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Mb, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MD) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Md, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MF) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Mf, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Mg, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Rmail, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Email, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Mr, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *MX) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Mx, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NAPTR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Order, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Service, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.Regexp, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Replacement, msg, off, compression, false) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = packUint64(rr.NodeID, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NIMLOC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringHex(rr.Locator, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringTxt(rr.ZSData, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NS) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Ns, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NSAPPTR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Ptr, msg, off, compression, false) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NSEC) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.NextDomain, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = packDataNsec(rr.TypeBitMap, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NSEC3) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Hash, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Iterations, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.SaltLength, msg, off) + if err != nil { + return off, err + } + // Only pack salt if value is not "-", i.e. empty + if rr.Salt != "-" { + off, err = packStringHex(rr.Salt, msg, off) + if err != nil { + return off, err + } + } + off, err = packUint8(rr.HashLength, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase32(rr.NextDomain, msg, off) + if err != nil { + return off, err + } + off, err = packDataNsec(rr.TypeBitMap, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *NSEC3PARAM) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Hash, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Iterations, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.SaltLength, msg, off) + if err != nil { + return off, err + } + // Only pack salt if value is not "-", i.e. empty + if rr.Salt != "-" { + off, err = packStringHex(rr.Salt, msg, off) + if err != nil { + return off, err + } + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *OPENPGPKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *OPT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packDataOpt(rr.Option, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *PTR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Ptr, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *PX) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Map822, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Mapx400, msg, off, compression, false) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *RFC3597) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringHex(rr.Rdata, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *RKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Flags, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Protocol, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *RP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Mbox, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Txt, msg, off, compression, false) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *RRSIG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.TypeCovered, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Labels, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.OrigTtl, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Expiration, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Inception, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.SignerName, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.Signature, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *RT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Preference, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Host, msg, off, compression, compress) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *SIG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.TypeCovered, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Labels, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.OrigTtl, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Expiration, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Inception, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.SignerName, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.Signature, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *SMIMEA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Usage, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Selector, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.MatchingType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Certificate, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *SOA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Ns, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Mbox, msg, off, compression, compress) + if err != nil { + return off, err + } + off, err = packUint32(rr.Serial, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Refresh, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Retry, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Expire, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Minttl, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *SPF) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringTxt(rr.Txt, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *SRV) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Priority, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Weight, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Port, msg, off) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.Target, msg, off, compression, false) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *SSHFP) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Type, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.FingerPrint, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *TA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.KeyTag, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.DigestType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Digest, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *TALINK) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.PreviousName, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = PackDomainName(rr.NextName, msg, off, compression, false) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *TKEY) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Algorithm, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = packUint32(rr.Inception, msg, off) + if err != nil { + return off, err + } + off, err = packUint32(rr.Expiration, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Mode, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Error, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.KeySize, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Key, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.OtherLen, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.OtherData, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *TLSA) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint8(rr.Usage, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Selector, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.MatchingType, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.Certificate, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *TSIG) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = PackDomainName(rr.Algorithm, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = packUint48(rr.TimeSigned, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Fudge, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.MACSize, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.MAC, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.OrigId, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Error, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.OtherLen, msg, off) + if err != nil { + return off, err + } + off, err = packStringHex(rr.OtherData, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *TXT) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packStringTxt(rr.Txt, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *UID) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint32(rr.Uid, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *UINFO) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packString(rr.Uinfo, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *URI) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packUint16(rr.Priority, msg, off) + if err != nil { + return off, err + } + off, err = packUint16(rr.Weight, msg, off) + if err != nil { + return off, err + } + off, err = packStringOctet(rr.Target, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +func (rr *X25) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { + off, err := rr.Hdr.pack(msg, off, compression, compress) + if err != nil { + return off, err + } + headerEnd := off + off, err = packString(rr.PSDNAddress, msg, off) + if err != nil { + return off, err + } + rr.Header().Rdlength = uint16(off - headerEnd) + return off, nil +} + +// unpack*() functions + +func unpackA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(A) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.A, off, err = unpackDataA(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackAAAA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(AAAA) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.AAAA, off, err = unpackDataAAAA(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackAFSDB(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(AFSDB) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Subtype, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Hostname, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackANY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(ANY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + return rr, off, err +} + +func unpackAVC(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(AVC) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Txt, off, err = unpackStringTxt(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackCAA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(CAA) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Flag, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Tag, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Value, off, err = unpackStringOctet(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackCDNSKEY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(CDNSKEY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Flags, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Protocol, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackCDS(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(CDS) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.DigestType, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackCERT(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(CERT) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Type, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Certificate, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackCNAME(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(CNAME) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Target, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackCSYNC(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(CSYNC) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Serial, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Flags, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.TypeBitMap, off, err = unpackDataNsec(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackDHCID(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(DHCID) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Digest, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackDLV(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(DLV) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.DigestType, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackDNAME(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(DNAME) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Target, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackDNSKEY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(DNSKEY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Flags, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Protocol, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackDS(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(DS) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.DigestType, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackEID(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(EID) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Endpoint, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackEUI48(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(EUI48) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Address, off, err = unpackUint48(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackEUI64(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(EUI64) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Address, off, err = unpackUint64(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackGID(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(GID) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Gid, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackGPOS(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(GPOS) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Longitude, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Latitude, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Altitude, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackHINFO(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(HINFO) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Cpu, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Os, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackHIP(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(HIP) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.HitLength, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.PublicKeyAlgorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.PublicKeyLength, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Hit, off, err = unpackStringHex(msg, off, off+int(rr.HitLength)) + if err != nil { + return rr, off, err + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, off+int(rr.PublicKeyLength)) + if err != nil { + return rr, off, err + } + rr.RendezvousServers, off, err = unpackDataDomainNames(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackKEY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(KEY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Flags, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Protocol, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackKX(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(KX) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Exchanger, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackL32(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(L32) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Locator32, off, err = unpackDataA(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackL64(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(L64) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Locator64, off, err = unpackUint64(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackLOC(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(LOC) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Version, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Size, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.HorizPre, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.VertPre, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Latitude, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Longitude, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Altitude, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackLP(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(LP) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Fqdn, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMB(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MB) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Mb, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMD(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MD) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Md, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMF(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MF) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Mf, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMG(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MG) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Mg, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMINFO(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MINFO) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Rmail, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Email, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMR(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MR) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Mr, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackMX(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(MX) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Mx, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNAPTR(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NAPTR) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Order, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Flags, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Service, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Regexp, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Replacement, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNID(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NID) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.NodeID, off, err = unpackUint64(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNIMLOC(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NIMLOC) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Locator, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNINFO(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NINFO) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.ZSData, off, err = unpackStringTxt(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNS(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NS) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Ns, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNSAPPTR(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NSAPPTR) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Ptr, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNSEC(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NSEC) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.NextDomain, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.TypeBitMap, off, err = unpackDataNsec(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNSEC3(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NSEC3) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Hash, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Flags, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Iterations, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.SaltLength, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Salt, off, err = unpackStringHex(msg, off, off+int(rr.SaltLength)) + if err != nil { + return rr, off, err + } + rr.HashLength, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.NextDomain, off, err = unpackStringBase32(msg, off, off+int(rr.HashLength)) + if err != nil { + return rr, off, err + } + rr.TypeBitMap, off, err = unpackDataNsec(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackNSEC3PARAM(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(NSEC3PARAM) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Hash, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Flags, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Iterations, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.SaltLength, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Salt, off, err = unpackStringHex(msg, off, off+int(rr.SaltLength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackOPENPGPKEY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(OPENPGPKEY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackOPT(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(OPT) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Option, off, err = unpackDataOpt(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackPTR(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(PTR) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Ptr, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackPX(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(PX) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Map822, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Mapx400, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackRFC3597(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(RFC3597) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Rdata, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackRKEY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(RKEY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Flags, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Protocol, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackRP(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(RP) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Mbox, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Txt, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackRRSIG(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(RRSIG) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.TypeCovered, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Labels, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.OrigTtl, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Expiration, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Inception, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.SignerName, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Signature, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackRT(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(RT) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Preference, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Host, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackSIG(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(SIG) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.TypeCovered, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Labels, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.OrigTtl, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Expiration, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Inception, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.SignerName, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Signature, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackSMIMEA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(SMIMEA) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Usage, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Selector, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.MatchingType, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Certificate, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackSOA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(SOA) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Ns, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Mbox, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Serial, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Refresh, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Retry, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Expire, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Minttl, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackSPF(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(SPF) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Txt, off, err = unpackStringTxt(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackSRV(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(SRV) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Priority, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Weight, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Port, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Target, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackSSHFP(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(SSHFP) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Type, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.FingerPrint, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackTA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(TA) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.KeyTag, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.DigestType, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackTALINK(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(TALINK) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.PreviousName, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.NextName, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackTKEY(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(TKEY) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Algorithm, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Inception, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Expiration, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Mode, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Error, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.KeySize, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Key, off, err = unpackStringHex(msg, off, off+int(rr.KeySize)) + if err != nil { + return rr, off, err + } + rr.OtherLen, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.OtherData, off, err = unpackStringHex(msg, off, off+int(rr.OtherLen)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackTLSA(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(TLSA) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Usage, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Selector, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.MatchingType, off, err = unpackUint8(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Certificate, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackTSIG(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(TSIG) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Algorithm, off, err = UnpackDomainName(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.TimeSigned, off, err = unpackUint48(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Fudge, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.MACSize, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.MAC, off, err = unpackStringHex(msg, off, off+int(rr.MACSize)) + if err != nil { + return rr, off, err + } + rr.OrigId, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Error, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.OtherLen, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.OtherData, off, err = unpackStringHex(msg, off, off+int(rr.OtherLen)) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackTXT(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(TXT) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Txt, off, err = unpackStringTxt(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackUID(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(UID) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Uid, off, err = unpackUint32(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackUINFO(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(UINFO) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Uinfo, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackURI(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(URI) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.Priority, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Weight, off, err = unpackUint16(msg, off) + if err != nil { + return rr, off, err + } + if off == len(msg) { + return rr, off, nil + } + rr.Target, off, err = unpackStringOctet(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +func unpackX25(h RR_Header, msg []byte, off int) (RR, int, error) { + rr := new(X25) + rr.Hdr = h + if noRdata(h) { + return rr, off, nil + } + var err error + rdStart := off + _ = rdStart + + rr.PSDNAddress, off, err = unpackString(msg, off) + if err != nil { + return rr, off, err + } + return rr, off, err +} + +var typeToUnpack = map[uint16]func(RR_Header, []byte, int) (RR, int, error){ + TypeA: unpackA, + TypeAAAA: unpackAAAA, + TypeAFSDB: unpackAFSDB, + TypeANY: unpackANY, + TypeAVC: unpackAVC, + TypeCAA: unpackCAA, + TypeCDNSKEY: unpackCDNSKEY, + TypeCDS: unpackCDS, + TypeCERT: unpackCERT, + TypeCNAME: unpackCNAME, + TypeCSYNC: unpackCSYNC, + TypeDHCID: unpackDHCID, + TypeDLV: unpackDLV, + TypeDNAME: unpackDNAME, + TypeDNSKEY: unpackDNSKEY, + TypeDS: unpackDS, + TypeEID: unpackEID, + TypeEUI48: unpackEUI48, + TypeEUI64: unpackEUI64, + TypeGID: unpackGID, + TypeGPOS: unpackGPOS, + TypeHINFO: unpackHINFO, + TypeHIP: unpackHIP, + TypeKEY: unpackKEY, + TypeKX: unpackKX, + TypeL32: unpackL32, + TypeL64: unpackL64, + TypeLOC: unpackLOC, + TypeLP: unpackLP, + TypeMB: unpackMB, + TypeMD: unpackMD, + TypeMF: unpackMF, + TypeMG: unpackMG, + TypeMINFO: unpackMINFO, + TypeMR: unpackMR, + TypeMX: unpackMX, + TypeNAPTR: unpackNAPTR, + TypeNID: unpackNID, + TypeNIMLOC: unpackNIMLOC, + TypeNINFO: unpackNINFO, + TypeNS: unpackNS, + TypeNSAPPTR: unpackNSAPPTR, + TypeNSEC: unpackNSEC, + TypeNSEC3: unpackNSEC3, + TypeNSEC3PARAM: unpackNSEC3PARAM, + TypeOPENPGPKEY: unpackOPENPGPKEY, + TypeOPT: unpackOPT, + TypePTR: unpackPTR, + TypePX: unpackPX, + TypeRKEY: unpackRKEY, + TypeRP: unpackRP, + TypeRRSIG: unpackRRSIG, + TypeRT: unpackRT, + TypeSIG: unpackSIG, + TypeSMIMEA: unpackSMIMEA, + TypeSOA: unpackSOA, + TypeSPF: unpackSPF, + TypeSRV: unpackSRV, + TypeSSHFP: unpackSSHFP, + TypeTA: unpackTA, + TypeTALINK: unpackTALINK, + TypeTKEY: unpackTKEY, + TypeTLSA: unpackTLSA, + TypeTSIG: unpackTSIG, + TypeTXT: unpackTXT, + TypeUID: unpackUID, + TypeUINFO: unpackUINFO, + TypeURI: unpackURI, + TypeX25: unpackX25, +} diff --git a/vendor/github.com/miekg/dns/ztypes.go b/vendor/github.com/miekg/dns/ztypes.go new file mode 100644 index 0000000000..abd75dd918 --- /dev/null +++ b/vendor/github.com/miekg/dns/ztypes.go @@ -0,0 +1,863 @@ +// Code generated by "go run types_generate.go"; DO NOT EDIT. + +package dns + +import ( + "encoding/base64" + "net" +) + +// TypeToRR is a map of constructors for each RR type. +var TypeToRR = map[uint16]func() RR{ + TypeA: func() RR { return new(A) }, + TypeAAAA: func() RR { return new(AAAA) }, + TypeAFSDB: func() RR { return new(AFSDB) }, + TypeANY: func() RR { return new(ANY) }, + TypeAVC: func() RR { return new(AVC) }, + TypeCAA: func() RR { return new(CAA) }, + TypeCDNSKEY: func() RR { return new(CDNSKEY) }, + TypeCDS: func() RR { return new(CDS) }, + TypeCERT: func() RR { return new(CERT) }, + TypeCNAME: func() RR { return new(CNAME) }, + TypeCSYNC: func() RR { return new(CSYNC) }, + TypeDHCID: func() RR { return new(DHCID) }, + TypeDLV: func() RR { return new(DLV) }, + TypeDNAME: func() RR { return new(DNAME) }, + TypeDNSKEY: func() RR { return new(DNSKEY) }, + TypeDS: func() RR { return new(DS) }, + TypeEID: func() RR { return new(EID) }, + TypeEUI48: func() RR { return new(EUI48) }, + TypeEUI64: func() RR { return new(EUI64) }, + TypeGID: func() RR { return new(GID) }, + TypeGPOS: func() RR { return new(GPOS) }, + TypeHINFO: func() RR { return new(HINFO) }, + TypeHIP: func() RR { return new(HIP) }, + TypeKEY: func() RR { return new(KEY) }, + TypeKX: func() RR { return new(KX) }, + TypeL32: func() RR { return new(L32) }, + TypeL64: func() RR { return new(L64) }, + TypeLOC: func() RR { return new(LOC) }, + TypeLP: func() RR { return new(LP) }, + TypeMB: func() RR { return new(MB) }, + TypeMD: func() RR { return new(MD) }, + TypeMF: func() RR { return new(MF) }, + TypeMG: func() RR { return new(MG) }, + TypeMINFO: func() RR { return new(MINFO) }, + TypeMR: func() RR { return new(MR) }, + TypeMX: func() RR { return new(MX) }, + TypeNAPTR: func() RR { return new(NAPTR) }, + TypeNID: func() RR { return new(NID) }, + TypeNIMLOC: func() RR { return new(NIMLOC) }, + TypeNINFO: func() RR { return new(NINFO) }, + TypeNS: func() RR { return new(NS) }, + TypeNSAPPTR: func() RR { return new(NSAPPTR) }, + TypeNSEC: func() RR { return new(NSEC) }, + TypeNSEC3: func() RR { return new(NSEC3) }, + TypeNSEC3PARAM: func() RR { return new(NSEC3PARAM) }, + TypeOPENPGPKEY: func() RR { return new(OPENPGPKEY) }, + TypeOPT: func() RR { return new(OPT) }, + TypePTR: func() RR { return new(PTR) }, + TypePX: func() RR { return new(PX) }, + TypeRKEY: func() RR { return new(RKEY) }, + TypeRP: func() RR { return new(RP) }, + TypeRRSIG: func() RR { return new(RRSIG) }, + TypeRT: func() RR { return new(RT) }, + TypeSIG: func() RR { return new(SIG) }, + TypeSMIMEA: func() RR { return new(SMIMEA) }, + TypeSOA: func() RR { return new(SOA) }, + TypeSPF: func() RR { return new(SPF) }, + TypeSRV: func() RR { return new(SRV) }, + TypeSSHFP: func() RR { return new(SSHFP) }, + TypeTA: func() RR { return new(TA) }, + TypeTALINK: func() RR { return new(TALINK) }, + TypeTKEY: func() RR { return new(TKEY) }, + TypeTLSA: func() RR { return new(TLSA) }, + TypeTSIG: func() RR { return new(TSIG) }, + TypeTXT: func() RR { return new(TXT) }, + TypeUID: func() RR { return new(UID) }, + TypeUINFO: func() RR { return new(UINFO) }, + TypeURI: func() RR { return new(URI) }, + TypeX25: func() RR { return new(X25) }, +} + +// TypeToString is a map of strings for each RR type. +var TypeToString = map[uint16]string{ + TypeA: "A", + TypeAAAA: "AAAA", + TypeAFSDB: "AFSDB", + TypeANY: "ANY", + TypeATMA: "ATMA", + TypeAVC: "AVC", + TypeAXFR: "AXFR", + TypeCAA: "CAA", + TypeCDNSKEY: "CDNSKEY", + TypeCDS: "CDS", + TypeCERT: "CERT", + TypeCNAME: "CNAME", + TypeCSYNC: "CSYNC", + TypeDHCID: "DHCID", + TypeDLV: "DLV", + TypeDNAME: "DNAME", + TypeDNSKEY: "DNSKEY", + TypeDS: "DS", + TypeEID: "EID", + TypeEUI48: "EUI48", + TypeEUI64: "EUI64", + TypeGID: "GID", + TypeGPOS: "GPOS", + TypeHINFO: "HINFO", + TypeHIP: "HIP", + TypeISDN: "ISDN", + TypeIXFR: "IXFR", + TypeKEY: "KEY", + TypeKX: "KX", + TypeL32: "L32", + TypeL64: "L64", + TypeLOC: "LOC", + TypeLP: "LP", + TypeMAILA: "MAILA", + TypeMAILB: "MAILB", + TypeMB: "MB", + TypeMD: "MD", + TypeMF: "MF", + TypeMG: "MG", + TypeMINFO: "MINFO", + TypeMR: "MR", + TypeMX: "MX", + TypeNAPTR: "NAPTR", + TypeNID: "NID", + TypeNIMLOC: "NIMLOC", + TypeNINFO: "NINFO", + TypeNS: "NS", + TypeNSEC: "NSEC", + TypeNSEC3: "NSEC3", + TypeNSEC3PARAM: "NSEC3PARAM", + TypeNULL: "NULL", + TypeNXT: "NXT", + TypeNone: "None", + TypeOPENPGPKEY: "OPENPGPKEY", + TypeOPT: "OPT", + TypePTR: "PTR", + TypePX: "PX", + TypeRKEY: "RKEY", + TypeRP: "RP", + TypeRRSIG: "RRSIG", + TypeRT: "RT", + TypeReserved: "Reserved", + TypeSIG: "SIG", + TypeSMIMEA: "SMIMEA", + TypeSOA: "SOA", + TypeSPF: "SPF", + TypeSRV: "SRV", + TypeSSHFP: "SSHFP", + TypeTA: "TA", + TypeTALINK: "TALINK", + TypeTKEY: "TKEY", + TypeTLSA: "TLSA", + TypeTSIG: "TSIG", + TypeTXT: "TXT", + TypeUID: "UID", + TypeUINFO: "UINFO", + TypeUNSPEC: "UNSPEC", + TypeURI: "URI", + TypeX25: "X25", + TypeNSAPPTR: "NSAP-PTR", +} + +func (rr *A) Header() *RR_Header { return &rr.Hdr } +func (rr *AAAA) Header() *RR_Header { return &rr.Hdr } +func (rr *AFSDB) Header() *RR_Header { return &rr.Hdr } +func (rr *ANY) Header() *RR_Header { return &rr.Hdr } +func (rr *AVC) Header() *RR_Header { return &rr.Hdr } +func (rr *CAA) Header() *RR_Header { return &rr.Hdr } +func (rr *CDNSKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *CDS) Header() *RR_Header { return &rr.Hdr } +func (rr *CERT) Header() *RR_Header { return &rr.Hdr } +func (rr *CNAME) Header() *RR_Header { return &rr.Hdr } +func (rr *CSYNC) Header() *RR_Header { return &rr.Hdr } +func (rr *DHCID) Header() *RR_Header { return &rr.Hdr } +func (rr *DLV) Header() *RR_Header { return &rr.Hdr } +func (rr *DNAME) Header() *RR_Header { return &rr.Hdr } +func (rr *DNSKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *DS) Header() *RR_Header { return &rr.Hdr } +func (rr *EID) Header() *RR_Header { return &rr.Hdr } +func (rr *EUI48) Header() *RR_Header { return &rr.Hdr } +func (rr *EUI64) Header() *RR_Header { return &rr.Hdr } +func (rr *GID) Header() *RR_Header { return &rr.Hdr } +func (rr *GPOS) Header() *RR_Header { return &rr.Hdr } +func (rr *HINFO) Header() *RR_Header { return &rr.Hdr } +func (rr *HIP) Header() *RR_Header { return &rr.Hdr } +func (rr *KEY) Header() *RR_Header { return &rr.Hdr } +func (rr *KX) Header() *RR_Header { return &rr.Hdr } +func (rr *L32) Header() *RR_Header { return &rr.Hdr } +func (rr *L64) Header() *RR_Header { return &rr.Hdr } +func (rr *LOC) Header() *RR_Header { return &rr.Hdr } +func (rr *LP) Header() *RR_Header { return &rr.Hdr } +func (rr *MB) Header() *RR_Header { return &rr.Hdr } +func (rr *MD) Header() *RR_Header { return &rr.Hdr } +func (rr *MF) Header() *RR_Header { return &rr.Hdr } +func (rr *MG) Header() *RR_Header { return &rr.Hdr } +func (rr *MINFO) Header() *RR_Header { return &rr.Hdr } +func (rr *MR) Header() *RR_Header { return &rr.Hdr } +func (rr *MX) Header() *RR_Header { return &rr.Hdr } +func (rr *NAPTR) Header() *RR_Header { return &rr.Hdr } +func (rr *NID) Header() *RR_Header { return &rr.Hdr } +func (rr *NIMLOC) Header() *RR_Header { return &rr.Hdr } +func (rr *NINFO) Header() *RR_Header { return &rr.Hdr } +func (rr *NS) Header() *RR_Header { return &rr.Hdr } +func (rr *NSAPPTR) Header() *RR_Header { return &rr.Hdr } +func (rr *NSEC) Header() *RR_Header { return &rr.Hdr } +func (rr *NSEC3) Header() *RR_Header { return &rr.Hdr } +func (rr *NSEC3PARAM) Header() *RR_Header { return &rr.Hdr } +func (rr *OPENPGPKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *OPT) Header() *RR_Header { return &rr.Hdr } +func (rr *PTR) Header() *RR_Header { return &rr.Hdr } +func (rr *PX) Header() *RR_Header { return &rr.Hdr } +func (rr *RFC3597) Header() *RR_Header { return &rr.Hdr } +func (rr *RKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *RP) Header() *RR_Header { return &rr.Hdr } +func (rr *RRSIG) Header() *RR_Header { return &rr.Hdr } +func (rr *RT) Header() *RR_Header { return &rr.Hdr } +func (rr *SIG) Header() *RR_Header { return &rr.Hdr } +func (rr *SMIMEA) Header() *RR_Header { return &rr.Hdr } +func (rr *SOA) Header() *RR_Header { return &rr.Hdr } +func (rr *SPF) Header() *RR_Header { return &rr.Hdr } +func (rr *SRV) Header() *RR_Header { return &rr.Hdr } +func (rr *SSHFP) Header() *RR_Header { return &rr.Hdr } +func (rr *TA) Header() *RR_Header { return &rr.Hdr } +func (rr *TALINK) Header() *RR_Header { return &rr.Hdr } +func (rr *TKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *TLSA) Header() *RR_Header { return &rr.Hdr } +func (rr *TSIG) Header() *RR_Header { return &rr.Hdr } +func (rr *TXT) Header() *RR_Header { return &rr.Hdr } +func (rr *UID) Header() *RR_Header { return &rr.Hdr } +func (rr *UINFO) Header() *RR_Header { return &rr.Hdr } +func (rr *URI) Header() *RR_Header { return &rr.Hdr } +func (rr *X25) Header() *RR_Header { return &rr.Hdr } + +// len() functions +func (rr *A) len() int { + l := rr.Hdr.len() + l += net.IPv4len // A + return l +} +func (rr *AAAA) len() int { + l := rr.Hdr.len() + l += net.IPv6len // AAAA + return l +} +func (rr *AFSDB) len() int { + l := rr.Hdr.len() + l += 2 // Subtype + l += len(rr.Hostname) + 1 + return l +} +func (rr *ANY) len() int { + l := rr.Hdr.len() + return l +} +func (rr *AVC) len() int { + l := rr.Hdr.len() + for _, x := range rr.Txt { + l += len(x) + 1 + } + return l +} +func (rr *CAA) len() int { + l := rr.Hdr.len() + l++ // Flag + l += len(rr.Tag) + 1 + l += len(rr.Value) + return l +} +func (rr *CERT) len() int { + l := rr.Hdr.len() + l += 2 // Type + l += 2 // KeyTag + l++ // Algorithm + l += base64.StdEncoding.DecodedLen(len(rr.Certificate)) + return l +} +func (rr *CNAME) len() int { + l := rr.Hdr.len() + l += len(rr.Target) + 1 + return l +} +func (rr *DHCID) len() int { + l := rr.Hdr.len() + l += base64.StdEncoding.DecodedLen(len(rr.Digest)) + return l +} +func (rr *DNAME) len() int { + l := rr.Hdr.len() + l += len(rr.Target) + 1 + return l +} +func (rr *DNSKEY) len() int { + l := rr.Hdr.len() + l += 2 // Flags + l++ // Protocol + l++ // Algorithm + l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) + return l +} +func (rr *DS) len() int { + l := rr.Hdr.len() + l += 2 // KeyTag + l++ // Algorithm + l++ // DigestType + l += len(rr.Digest)/2 + 1 + return l +} +func (rr *EID) len() int { + l := rr.Hdr.len() + l += len(rr.Endpoint)/2 + 1 + return l +} +func (rr *EUI48) len() int { + l := rr.Hdr.len() + l += 6 // Address + return l +} +func (rr *EUI64) len() int { + l := rr.Hdr.len() + l += 8 // Address + return l +} +func (rr *GID) len() int { + l := rr.Hdr.len() + l += 4 // Gid + return l +} +func (rr *GPOS) len() int { + l := rr.Hdr.len() + l += len(rr.Longitude) + 1 + l += len(rr.Latitude) + 1 + l += len(rr.Altitude) + 1 + return l +} +func (rr *HINFO) len() int { + l := rr.Hdr.len() + l += len(rr.Cpu) + 1 + l += len(rr.Os) + 1 + return l +} +func (rr *HIP) len() int { + l := rr.Hdr.len() + l++ // HitLength + l++ // PublicKeyAlgorithm + l += 2 // PublicKeyLength + l += len(rr.Hit) / 2 + l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) + for _, x := range rr.RendezvousServers { + l += len(x) + 1 + } + return l +} +func (rr *KX) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += len(rr.Exchanger) + 1 + return l +} +func (rr *L32) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += net.IPv4len // Locator32 + return l +} +func (rr *L64) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += 8 // Locator64 + return l +} +func (rr *LOC) len() int { + l := rr.Hdr.len() + l++ // Version + l++ // Size + l++ // HorizPre + l++ // VertPre + l += 4 // Latitude + l += 4 // Longitude + l += 4 // Altitude + return l +} +func (rr *LP) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += len(rr.Fqdn) + 1 + return l +} +func (rr *MB) len() int { + l := rr.Hdr.len() + l += len(rr.Mb) + 1 + return l +} +func (rr *MD) len() int { + l := rr.Hdr.len() + l += len(rr.Md) + 1 + return l +} +func (rr *MF) len() int { + l := rr.Hdr.len() + l += len(rr.Mf) + 1 + return l +} +func (rr *MG) len() int { + l := rr.Hdr.len() + l += len(rr.Mg) + 1 + return l +} +func (rr *MINFO) len() int { + l := rr.Hdr.len() + l += len(rr.Rmail) + 1 + l += len(rr.Email) + 1 + return l +} +func (rr *MR) len() int { + l := rr.Hdr.len() + l += len(rr.Mr) + 1 + return l +} +func (rr *MX) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += len(rr.Mx) + 1 + return l +} +func (rr *NAPTR) len() int { + l := rr.Hdr.len() + l += 2 // Order + l += 2 // Preference + l += len(rr.Flags) + 1 + l += len(rr.Service) + 1 + l += len(rr.Regexp) + 1 + l += len(rr.Replacement) + 1 + return l +} +func (rr *NID) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += 8 // NodeID + return l +} +func (rr *NIMLOC) len() int { + l := rr.Hdr.len() + l += len(rr.Locator)/2 + 1 + return l +} +func (rr *NINFO) len() int { + l := rr.Hdr.len() + for _, x := range rr.ZSData { + l += len(x) + 1 + } + return l +} +func (rr *NS) len() int { + l := rr.Hdr.len() + l += len(rr.Ns) + 1 + return l +} +func (rr *NSAPPTR) len() int { + l := rr.Hdr.len() + l += len(rr.Ptr) + 1 + return l +} +func (rr *NSEC3PARAM) len() int { + l := rr.Hdr.len() + l++ // Hash + l++ // Flags + l += 2 // Iterations + l++ // SaltLength + l += len(rr.Salt) / 2 + return l +} +func (rr *OPENPGPKEY) len() int { + l := rr.Hdr.len() + l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) + return l +} +func (rr *PTR) len() int { + l := rr.Hdr.len() + l += len(rr.Ptr) + 1 + return l +} +func (rr *PX) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += len(rr.Map822) + 1 + l += len(rr.Mapx400) + 1 + return l +} +func (rr *RFC3597) len() int { + l := rr.Hdr.len() + l += len(rr.Rdata)/2 + 1 + return l +} +func (rr *RKEY) len() int { + l := rr.Hdr.len() + l += 2 // Flags + l++ // Protocol + l++ // Algorithm + l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) + return l +} +func (rr *RP) len() int { + l := rr.Hdr.len() + l += len(rr.Mbox) + 1 + l += len(rr.Txt) + 1 + return l +} +func (rr *RRSIG) len() int { + l := rr.Hdr.len() + l += 2 // TypeCovered + l++ // Algorithm + l++ // Labels + l += 4 // OrigTtl + l += 4 // Expiration + l += 4 // Inception + l += 2 // KeyTag + l += len(rr.SignerName) + 1 + l += base64.StdEncoding.DecodedLen(len(rr.Signature)) + return l +} +func (rr *RT) len() int { + l := rr.Hdr.len() + l += 2 // Preference + l += len(rr.Host) + 1 + return l +} +func (rr *SMIMEA) len() int { + l := rr.Hdr.len() + l++ // Usage + l++ // Selector + l++ // MatchingType + l += len(rr.Certificate)/2 + 1 + return l +} +func (rr *SOA) len() int { + l := rr.Hdr.len() + l += len(rr.Ns) + 1 + l += len(rr.Mbox) + 1 + l += 4 // Serial + l += 4 // Refresh + l += 4 // Retry + l += 4 // Expire + l += 4 // Minttl + return l +} +func (rr *SPF) len() int { + l := rr.Hdr.len() + for _, x := range rr.Txt { + l += len(x) + 1 + } + return l +} +func (rr *SRV) len() int { + l := rr.Hdr.len() + l += 2 // Priority + l += 2 // Weight + l += 2 // Port + l += len(rr.Target) + 1 + return l +} +func (rr *SSHFP) len() int { + l := rr.Hdr.len() + l++ // Algorithm + l++ // Type + l += len(rr.FingerPrint)/2 + 1 + return l +} +func (rr *TA) len() int { + l := rr.Hdr.len() + l += 2 // KeyTag + l++ // Algorithm + l++ // DigestType + l += len(rr.Digest)/2 + 1 + return l +} +func (rr *TALINK) len() int { + l := rr.Hdr.len() + l += len(rr.PreviousName) + 1 + l += len(rr.NextName) + 1 + return l +} +func (rr *TKEY) len() int { + l := rr.Hdr.len() + l += len(rr.Algorithm) + 1 + l += 4 // Inception + l += 4 // Expiration + l += 2 // Mode + l += 2 // Error + l += 2 // KeySize + l += len(rr.Key) / 2 + l += 2 // OtherLen + l += len(rr.OtherData) / 2 + return l +} +func (rr *TLSA) len() int { + l := rr.Hdr.len() + l++ // Usage + l++ // Selector + l++ // MatchingType + l += len(rr.Certificate)/2 + 1 + return l +} +func (rr *TSIG) len() int { + l := rr.Hdr.len() + l += len(rr.Algorithm) + 1 + l += 6 // TimeSigned + l += 2 // Fudge + l += 2 // MACSize + l += len(rr.MAC) / 2 + l += 2 // OrigId + l += 2 // Error + l += 2 // OtherLen + l += len(rr.OtherData) / 2 + return l +} +func (rr *TXT) len() int { + l := rr.Hdr.len() + for _, x := range rr.Txt { + l += len(x) + 1 + } + return l +} +func (rr *UID) len() int { + l := rr.Hdr.len() + l += 4 // Uid + return l +} +func (rr *UINFO) len() int { + l := rr.Hdr.len() + l += len(rr.Uinfo) + 1 + return l +} +func (rr *URI) len() int { + l := rr.Hdr.len() + l += 2 // Priority + l += 2 // Weight + l += len(rr.Target) + return l +} +func (rr *X25) len() int { + l := rr.Hdr.len() + l += len(rr.PSDNAddress) + 1 + return l +} + +// copy() functions +func (rr *A) copy() RR { + return &A{*rr.Hdr.copyHeader(), copyIP(rr.A)} +} +func (rr *AAAA) copy() RR { + return &AAAA{*rr.Hdr.copyHeader(), copyIP(rr.AAAA)} +} +func (rr *AFSDB) copy() RR { + return &AFSDB{*rr.Hdr.copyHeader(), rr.Subtype, rr.Hostname} +} +func (rr *ANY) copy() RR { + return &ANY{*rr.Hdr.copyHeader()} +} +func (rr *AVC) copy() RR { + Txt := make([]string, len(rr.Txt)) + copy(Txt, rr.Txt) + return &AVC{*rr.Hdr.copyHeader(), Txt} +} +func (rr *CAA) copy() RR { + return &CAA{*rr.Hdr.copyHeader(), rr.Flag, rr.Tag, rr.Value} +} +func (rr *CERT) copy() RR { + return &CERT{*rr.Hdr.copyHeader(), rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate} +} +func (rr *CNAME) copy() RR { + return &CNAME{*rr.Hdr.copyHeader(), rr.Target} +} +func (rr *CSYNC) copy() RR { + TypeBitMap := make([]uint16, len(rr.TypeBitMap)) + copy(TypeBitMap, rr.TypeBitMap) + return &CSYNC{*rr.Hdr.copyHeader(), rr.Serial, rr.Flags, TypeBitMap} +} +func (rr *DHCID) copy() RR { + return &DHCID{*rr.Hdr.copyHeader(), rr.Digest} +} +func (rr *DNAME) copy() RR { + return &DNAME{*rr.Hdr.copyHeader(), rr.Target} +} +func (rr *DNSKEY) copy() RR { + return &DNSKEY{*rr.Hdr.copyHeader(), rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} +} +func (rr *DS) copy() RR { + return &DS{*rr.Hdr.copyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} +} +func (rr *EID) copy() RR { + return &EID{*rr.Hdr.copyHeader(), rr.Endpoint} +} +func (rr *EUI48) copy() RR { + return &EUI48{*rr.Hdr.copyHeader(), rr.Address} +} +func (rr *EUI64) copy() RR { + return &EUI64{*rr.Hdr.copyHeader(), rr.Address} +} +func (rr *GID) copy() RR { + return &GID{*rr.Hdr.copyHeader(), rr.Gid} +} +func (rr *GPOS) copy() RR { + return &GPOS{*rr.Hdr.copyHeader(), rr.Longitude, rr.Latitude, rr.Altitude} +} +func (rr *HINFO) copy() RR { + return &HINFO{*rr.Hdr.copyHeader(), rr.Cpu, rr.Os} +} +func (rr *HIP) copy() RR { + RendezvousServers := make([]string, len(rr.RendezvousServers)) + copy(RendezvousServers, rr.RendezvousServers) + return &HIP{*rr.Hdr.copyHeader(), rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, RendezvousServers} +} +func (rr *KX) copy() RR { + return &KX{*rr.Hdr.copyHeader(), rr.Preference, rr.Exchanger} +} +func (rr *L32) copy() RR { + return &L32{*rr.Hdr.copyHeader(), rr.Preference, copyIP(rr.Locator32)} +} +func (rr *L64) copy() RR { + return &L64{*rr.Hdr.copyHeader(), rr.Preference, rr.Locator64} +} +func (rr *LOC) copy() RR { + return &LOC{*rr.Hdr.copyHeader(), rr.Version, rr.Size, rr.HorizPre, rr.VertPre, rr.Latitude, rr.Longitude, rr.Altitude} +} +func (rr *LP) copy() RR { + return &LP{*rr.Hdr.copyHeader(), rr.Preference, rr.Fqdn} +} +func (rr *MB) copy() RR { + return &MB{*rr.Hdr.copyHeader(), rr.Mb} +} +func (rr *MD) copy() RR { + return &MD{*rr.Hdr.copyHeader(), rr.Md} +} +func (rr *MF) copy() RR { + return &MF{*rr.Hdr.copyHeader(), rr.Mf} +} +func (rr *MG) copy() RR { + return &MG{*rr.Hdr.copyHeader(), rr.Mg} +} +func (rr *MINFO) copy() RR { + return &MINFO{*rr.Hdr.copyHeader(), rr.Rmail, rr.Email} +} +func (rr *MR) copy() RR { + return &MR{*rr.Hdr.copyHeader(), rr.Mr} +} +func (rr *MX) copy() RR { + return &MX{*rr.Hdr.copyHeader(), rr.Preference, rr.Mx} +} +func (rr *NAPTR) copy() RR { + return &NAPTR{*rr.Hdr.copyHeader(), rr.Order, rr.Preference, rr.Flags, rr.Service, rr.Regexp, rr.Replacement} +} +func (rr *NID) copy() RR { + return &NID{*rr.Hdr.copyHeader(), rr.Preference, rr.NodeID} +} +func (rr *NIMLOC) copy() RR { + return &NIMLOC{*rr.Hdr.copyHeader(), rr.Locator} +} +func (rr *NINFO) copy() RR { + ZSData := make([]string, len(rr.ZSData)) + copy(ZSData, rr.ZSData) + return &NINFO{*rr.Hdr.copyHeader(), ZSData} +} +func (rr *NS) copy() RR { + return &NS{*rr.Hdr.copyHeader(), rr.Ns} +} +func (rr *NSAPPTR) copy() RR { + return &NSAPPTR{*rr.Hdr.copyHeader(), rr.Ptr} +} +func (rr *NSEC) copy() RR { + TypeBitMap := make([]uint16, len(rr.TypeBitMap)) + copy(TypeBitMap, rr.TypeBitMap) + return &NSEC{*rr.Hdr.copyHeader(), rr.NextDomain, TypeBitMap} +} +func (rr *NSEC3) copy() RR { + TypeBitMap := make([]uint16, len(rr.TypeBitMap)) + copy(TypeBitMap, rr.TypeBitMap) + return &NSEC3{*rr.Hdr.copyHeader(), rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt, rr.HashLength, rr.NextDomain, TypeBitMap} +} +func (rr *NSEC3PARAM) copy() RR { + return &NSEC3PARAM{*rr.Hdr.copyHeader(), rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt} +} +func (rr *OPENPGPKEY) copy() RR { + return &OPENPGPKEY{*rr.Hdr.copyHeader(), rr.PublicKey} +} +func (rr *OPT) copy() RR { + Option := make([]EDNS0, len(rr.Option)) + copy(Option, rr.Option) + return &OPT{*rr.Hdr.copyHeader(), Option} +} +func (rr *PTR) copy() RR { + return &PTR{*rr.Hdr.copyHeader(), rr.Ptr} +} +func (rr *PX) copy() RR { + return &PX{*rr.Hdr.copyHeader(), rr.Preference, rr.Map822, rr.Mapx400} +} +func (rr *RFC3597) copy() RR { + return &RFC3597{*rr.Hdr.copyHeader(), rr.Rdata} +} +func (rr *RKEY) copy() RR { + return &RKEY{*rr.Hdr.copyHeader(), rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} +} +func (rr *RP) copy() RR { + return &RP{*rr.Hdr.copyHeader(), rr.Mbox, rr.Txt} +} +func (rr *RRSIG) copy() RR { + return &RRSIG{*rr.Hdr.copyHeader(), rr.TypeCovered, rr.Algorithm, rr.Labels, rr.OrigTtl, rr.Expiration, rr.Inception, rr.KeyTag, rr.SignerName, rr.Signature} +} +func (rr *RT) copy() RR { + return &RT{*rr.Hdr.copyHeader(), rr.Preference, rr.Host} +} +func (rr *SMIMEA) copy() RR { + return &SMIMEA{*rr.Hdr.copyHeader(), rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} +} +func (rr *SOA) copy() RR { + return &SOA{*rr.Hdr.copyHeader(), rr.Ns, rr.Mbox, rr.Serial, rr.Refresh, rr.Retry, rr.Expire, rr.Minttl} +} +func (rr *SPF) copy() RR { + Txt := make([]string, len(rr.Txt)) + copy(Txt, rr.Txt) + return &SPF{*rr.Hdr.copyHeader(), Txt} +} +func (rr *SRV) copy() RR { + return &SRV{*rr.Hdr.copyHeader(), rr.Priority, rr.Weight, rr.Port, rr.Target} +} +func (rr *SSHFP) copy() RR { + return &SSHFP{*rr.Hdr.copyHeader(), rr.Algorithm, rr.Type, rr.FingerPrint} +} +func (rr *TA) copy() RR { + return &TA{*rr.Hdr.copyHeader(), rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} +} +func (rr *TALINK) copy() RR { + return &TALINK{*rr.Hdr.copyHeader(), rr.PreviousName, rr.NextName} +} +func (rr *TKEY) copy() RR { + return &TKEY{*rr.Hdr.copyHeader(), rr.Algorithm, rr.Inception, rr.Expiration, rr.Mode, rr.Error, rr.KeySize, rr.Key, rr.OtherLen, rr.OtherData} +} +func (rr *TLSA) copy() RR { + return &TLSA{*rr.Hdr.copyHeader(), rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} +} +func (rr *TSIG) copy() RR { + return &TSIG{*rr.Hdr.copyHeader(), rr.Algorithm, rr.TimeSigned, rr.Fudge, rr.MACSize, rr.MAC, rr.OrigId, rr.Error, rr.OtherLen, rr.OtherData} +} +func (rr *TXT) copy() RR { + Txt := make([]string, len(rr.Txt)) + copy(Txt, rr.Txt) + return &TXT{*rr.Hdr.copyHeader(), Txt} +} +func (rr *UID) copy() RR { + return &UID{*rr.Hdr.copyHeader(), rr.Uid} +} +func (rr *UINFO) copy() RR { + return &UINFO{*rr.Hdr.copyHeader(), rr.Uinfo} +} +func (rr *URI) copy() RR { + return &URI{*rr.Hdr.copyHeader(), rr.Priority, rr.Weight, rr.Target} +} +func (rr *X25) copy() RR { + return &X25{*rr.Hdr.copyHeader(), rr.PSDNAddress} +} diff --git a/vendor/github.com/mistifyio/go-zfs/LICENSE b/vendor/github.com/mistifyio/go-zfs/LICENSE deleted file mode 100644 index f4c265cfec..0000000000 --- a/vendor/github.com/mistifyio/go-zfs/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright (c) 2014, OmniTI Computer Consulting, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/mistifyio/go-zfs/error.go b/vendor/github.com/mistifyio/go-zfs/error.go deleted file mode 100644 index 5408ccdb55..0000000000 --- a/vendor/github.com/mistifyio/go-zfs/error.go +++ /dev/null @@ -1,18 +0,0 @@ -package zfs - -import ( - "fmt" -) - -// Error is an error which is returned when the `zfs` or `zpool` shell -// commands return with a non-zero exit code. -type Error struct { - Err error - Debug string - Stderr string -} - -// Error returns the string representation of an Error. -func (e Error) Error() string { - return fmt.Sprintf("%s: %q => %s", e.Err, e.Debug, e.Stderr) -} diff --git a/vendor/github.com/mistifyio/go-zfs/utils.go b/vendor/github.com/mistifyio/go-zfs/utils.go deleted file mode 100644 index d5b7353494..0000000000 --- a/vendor/github.com/mistifyio/go-zfs/utils.go +++ /dev/null @@ -1,320 +0,0 @@ -package zfs - -import ( - "bytes" - "fmt" - "io" - "os/exec" - "regexp" - "strconv" - "strings" -) - -type command struct { - Command string - Stdin io.Reader - Stdout io.Writer -} - -func (c *command) Run(arg ...string) ([][]string, error) { - - cmd := exec.Command(c.Command, arg...) - - var stdout, stderr bytes.Buffer - - if c.Stdout == nil { - cmd.Stdout = &stdout - } else { - cmd.Stdout = c.Stdout - } - - if c.Stdin != nil { - cmd.Stdin = c.Stdin - - } - cmd.Stderr = &stderr - - debug := strings.Join([]string{cmd.Path, strings.Join(cmd.Args, " ")}, " ") - if logger != nil { - logger.Log(cmd.Args) - } - err := cmd.Run() - - if err != nil { - return nil, &Error{ - Err: err, - Debug: debug, - Stderr: stderr.String(), - } - } - - // assume if you passed in something for stdout, that you know what to do with it - if c.Stdout != nil { - return nil, nil - } - - lines := strings.Split(stdout.String(), "\n") - - //last line is always blank - lines = lines[0 : len(lines)-1] - output := make([][]string, len(lines)) - - for i, l := range lines { - output[i] = strings.Fields(l) - } - - return output, nil -} - -func setString(field *string, value string) { - v := "" - if value != "-" { - v = value - } - *field = v -} - -func setUint(field *uint64, value string) error { - var v uint64 - if value != "-" { - var err error - v, err = strconv.ParseUint(value, 10, 64) - if err != nil { - return err - } - } - *field = v - return nil -} - -func (ds *Dataset) parseLine(line []string) error { - prop := line[1] - val := line[2] - - var err error - - switch prop { - case "available": - err = setUint(&ds.Avail, val) - case "compression": - setString(&ds.Compression, val) - case "mountpoint": - setString(&ds.Mountpoint, val) - case "quota": - err = setUint(&ds.Quota, val) - case "type": - setString(&ds.Type, val) - case "origin": - setString(&ds.Origin, val) - case "used": - err = setUint(&ds.Used, val) - case "volsize": - err = setUint(&ds.Volsize, val) - case "written": - err = setUint(&ds.Written, val) - case "logicalused": - err = setUint(&ds.Logicalused, val) - } - return err -} - -/* - * from zfs diff`s escape function: - * - * Prints a file name out a character at a time. If the character is - * not in the range of what we consider "printable" ASCII, display it - * as an escaped 3-digit octal value. ASCII values less than a space - * are all control characters and we declare the upper end as the - * DELete character. This also is the last 7-bit ASCII character. - * We choose to treat all 8-bit ASCII as not printable for this - * application. - */ -func unescapeFilepath(path string) (string, error) { - buf := make([]byte, 0, len(path)) - llen := len(path) - for i := 0; i < llen; { - if path[i] == '\\' { - if llen < i+4 { - return "", fmt.Errorf("Invalid octal code: too short") - } - octalCode := path[(i + 1):(i + 4)] - val, err := strconv.ParseUint(octalCode, 8, 8) - if err != nil { - return "", fmt.Errorf("Invalid octal code: %v", err) - } - buf = append(buf, byte(val)) - i += 4 - } else { - buf = append(buf, path[i]) - i++ - } - } - return string(buf), nil -} - -var changeTypeMap = map[string]ChangeType{ - "-": Removed, - "+": Created, - "M": Modified, - "R": Renamed, -} -var inodeTypeMap = map[string]InodeType{ - "B": BlockDevice, - "C": CharacterDevice, - "/": Directory, - ">": Door, - "|": NamedPipe, - "@": SymbolicLink, - "P": EventPort, - "=": Socket, - "F": File, -} - -// matches (+1) or (-1) -var referenceCountRegex = regexp.MustCompile("\\(([+-]\\d+?)\\)") - -func parseReferenceCount(field string) (int, error) { - matches := referenceCountRegex.FindStringSubmatch(field) - if matches == nil { - return 0, fmt.Errorf("Regexp does not match") - } - return strconv.Atoi(matches[1]) -} - -func parseInodeChange(line []string) (*InodeChange, error) { - llen := len(line) - if llen < 1 { - return nil, fmt.Errorf("Empty line passed") - } - - changeType := changeTypeMap[line[0]] - if changeType == 0 { - return nil, fmt.Errorf("Unknown change type '%s'", line[0]) - } - - switch changeType { - case Renamed: - if llen != 4 { - return nil, fmt.Errorf("Mismatching number of fields: expect 4, got: %d", llen) - } - case Modified: - if llen != 4 && llen != 3 { - return nil, fmt.Errorf("Mismatching number of fields: expect 3..4, got: %d", llen) - } - default: - if llen != 3 { - return nil, fmt.Errorf("Mismatching number of fields: expect 3, got: %d", llen) - } - } - - inodeType := inodeTypeMap[line[1]] - if inodeType == 0 { - return nil, fmt.Errorf("Unknown inode type '%s'", line[1]) - } - - path, err := unescapeFilepath(line[2]) - if err != nil { - return nil, fmt.Errorf("Failed to parse filename: %v", err) - } - - var newPath string - var referenceCount int - switch changeType { - case Renamed: - newPath, err = unescapeFilepath(line[3]) - if err != nil { - return nil, fmt.Errorf("Failed to parse filename: %v", err) - } - case Modified: - if llen == 4 { - referenceCount, err = parseReferenceCount(line[3]) - if err != nil { - return nil, fmt.Errorf("Failed to parse reference count: %v", err) - } - } - default: - newPath = "" - } - - return &InodeChange{ - Change: changeType, - Type: inodeType, - Path: path, - NewPath: newPath, - ReferenceCountChange: referenceCount, - }, nil -} - -// example input -//M / /testpool/bar/ -//+ F /testpool/bar/hello.txt -//M / /testpool/bar/hello.txt (+1) -//M / /testpool/bar/hello-hardlink -func parseInodeChanges(lines [][]string) ([]*InodeChange, error) { - changes := make([]*InodeChange, len(lines)) - - for i, line := range lines { - c, err := parseInodeChange(line) - if err != nil { - return nil, fmt.Errorf("Failed to parse line %d of zfs diff: %v, got: '%s'", i, err, line) - } - changes[i] = c - } - return changes, nil -} - -func listByType(t, filter string) ([]*Dataset, error) { - args := []string{"get", "-rHp", "-t", t, "all"} - if filter != "" { - args = append(args, filter) - } - out, err := zfs(args...) - if err != nil { - return nil, err - } - - var datasets []*Dataset - - name := "" - var ds *Dataset - for _, line := range out { - if name != line[0] { - name = line[0] - ds = &Dataset{Name: name} - datasets = append(datasets, ds) - } - if err := ds.parseLine(line); err != nil { - return nil, err - } - } - - return datasets, nil -} - -func propsSlice(properties map[string]string) []string { - args := make([]string, 0, len(properties)*3) - for k, v := range properties { - args = append(args, "-o") - args = append(args, fmt.Sprintf("%s=%s", k, v)) - } - return args -} - -func (z *Zpool) parseLine(line []string) error { - prop := line[1] - val := line[2] - - var err error - - switch prop { - case "health": - setString(&z.Health, val) - case "allocated": - err = setUint(&z.Allocated, val) - case "size": - err = setUint(&z.Size, val) - case "free": - err = setUint(&z.Free, val) - } - return err -} diff --git a/vendor/github.com/mistifyio/go-zfs/zfs.go b/vendor/github.com/mistifyio/go-zfs/zfs.go deleted file mode 100644 index a1d740e07e..0000000000 --- a/vendor/github.com/mistifyio/go-zfs/zfs.go +++ /dev/null @@ -1,382 +0,0 @@ -// Package zfs provides wrappers around the ZFS command line tools. -package zfs - -import ( - "errors" - "fmt" - "io" - "strconv" - "strings" -) - -// ZFS dataset types, which can indicate if a dataset is a filesystem, -// snapshot, or volume. -const ( - DatasetFilesystem = "filesystem" - DatasetSnapshot = "snapshot" - DatasetVolume = "volume" -) - -// Dataset is a ZFS dataset. A dataset could be a clone, filesystem, snapshot, -// or volume. The Type struct member can be used to determine a dataset's type. -// -// The field definitions can be found in the ZFS manual: -// http://www.freebsd.org/cgi/man.cgi?zfs(8). -type Dataset struct { - Name string - Origin string - Used uint64 - Avail uint64 - Mountpoint string - Compression string - Type string - Written uint64 - Volsize uint64 - Usedbydataset uint64 - Logicalused uint64 - Quota uint64 -} - -// InodeType is the type of inode as reported by Diff -type InodeType int - -// Types of Inodes -const ( - _ = iota // 0 == unknown type - BlockDevice InodeType = iota - CharacterDevice - Directory - Door - NamedPipe - SymbolicLink - EventPort - Socket - File -) - -// ChangeType is the type of inode change as reported by Diff -type ChangeType int - -// Types of Changes -const ( - _ = iota // 0 == unknown type - Removed ChangeType = iota - Created - Modified - Renamed -) - -// DestroyFlag is the options flag passed to Destroy -type DestroyFlag int - -// Valid destroy options -const ( - DestroyDefault DestroyFlag = 1 << iota - DestroyRecursive = 1 << iota - DestroyRecursiveClones = 1 << iota - DestroyDeferDeletion = 1 << iota - DestroyForceUmount = 1 << iota -) - -// InodeChange represents a change as reported by Diff -type InodeChange struct { - Change ChangeType - Type InodeType - Path string - NewPath string - ReferenceCountChange int -} - -// Logger can be used to log commands/actions -type Logger interface { - Log(cmd []string) -} - -var logger Logger - -// SetLogger set a log handler to log all commands including arguments before -// they are executed -func SetLogger(l Logger) { - logger = l -} - -// zfs is a helper function to wrap typical calls to zfs. -func zfs(arg ...string) ([][]string, error) { - c := command{Command: "zfs"} - return c.Run(arg...) -} - -// Datasets returns a slice of ZFS datasets, regardless of type. -// A filter argument may be passed to select a dataset with the matching name, -// or empty string ("") may be used to select all datasets. -func Datasets(filter string) ([]*Dataset, error) { - return listByType("all", filter) -} - -// Snapshots returns a slice of ZFS snapshots. -// A filter argument may be passed to select a snapshot with the matching name, -// or empty string ("") may be used to select all snapshots. -func Snapshots(filter string) ([]*Dataset, error) { - return listByType(DatasetSnapshot, filter) -} - -// Filesystems returns a slice of ZFS filesystems. -// A filter argument may be passed to select a filesystem with the matching name, -// or empty string ("") may be used to select all filesystems. -func Filesystems(filter string) ([]*Dataset, error) { - return listByType(DatasetFilesystem, filter) -} - -// Volumes returns a slice of ZFS volumes. -// A filter argument may be passed to select a volume with the matching name, -// or empty string ("") may be used to select all volumes. -func Volumes(filter string) ([]*Dataset, error) { - return listByType(DatasetVolume, filter) -} - -// GetDataset retrieves a single ZFS dataset by name. This dataset could be -// any valid ZFS dataset type, such as a clone, filesystem, snapshot, or volume. -func GetDataset(name string) (*Dataset, error) { - out, err := zfs("get", "-Hp", "all", name) - if err != nil { - return nil, err - } - - ds := &Dataset{Name: name} - for _, line := range out { - if err := ds.parseLine(line); err != nil { - return nil, err - } - } - - return ds, nil -} - -// Clone clones a ZFS snapshot and returns a clone dataset. -// An error will be returned if the input dataset is not of snapshot type. -func (d *Dataset) Clone(dest string, properties map[string]string) (*Dataset, error) { - if d.Type != DatasetSnapshot { - return nil, errors.New("can only clone snapshots") - } - args := make([]string, 2, 4) - args[0] = "clone" - args[1] = "-p" - if properties != nil { - args = append(args, propsSlice(properties)...) - } - args = append(args, []string{d.Name, dest}...) - _, err := zfs(args...) - if err != nil { - return nil, err - } - return GetDataset(dest) -} - -// ReceiveSnapshot receives a ZFS stream from the input io.Reader, creates a -// new snapshot with the specified name, and streams the input data into the -// newly-created snapshot. -func ReceiveSnapshot(input io.Reader, name string) (*Dataset, error) { - c := command{Command: "zfs", Stdin: input} - _, err := c.Run("receive", name) - if err != nil { - return nil, err - } - return GetDataset(name) -} - -// SendSnapshot sends a ZFS stream of a snapshot to the input io.Writer. -// An error will be returned if the input dataset is not of snapshot type. -func (d *Dataset) SendSnapshot(output io.Writer) error { - if d.Type != DatasetSnapshot { - return errors.New("can only send snapshots") - } - - c := command{Command: "zfs", Stdout: output} - _, err := c.Run("send", d.Name) - return err -} - -// CreateVolume creates a new ZFS volume with the specified name, size, and -// properties. -// A full list of available ZFS properties may be found here: -// https://www.freebsd.org/cgi/man.cgi?zfs(8). -func CreateVolume(name string, size uint64, properties map[string]string) (*Dataset, error) { - args := make([]string, 4, 5) - args[0] = "create" - args[1] = "-p" - args[2] = "-V" - args[3] = strconv.FormatUint(size, 10) - if properties != nil { - args = append(args, propsSlice(properties)...) - } - args = append(args, name) - _, err := zfs(args...) - if err != nil { - return nil, err - } - return GetDataset(name) -} - -// Destroy destroys a ZFS dataset. If the destroy bit flag is set, any -// descendents of the dataset will be recursively destroyed, including snapshots. -// If the deferred bit flag is set, the snapshot is marked for deferred -// deletion. -func (d *Dataset) Destroy(flags DestroyFlag) error { - args := make([]string, 1, 3) - args[0] = "destroy" - if flags&DestroyRecursive != 0 { - args = append(args, "-r") - } - - if flags&DestroyRecursiveClones != 0 { - args = append(args, "-R") - } - - if flags&DestroyDeferDeletion != 0 { - args = append(args, "-d") - } - - if flags&DestroyForceUmount != 0 { - args = append(args, "-f") - } - - args = append(args, d.Name) - _, err := zfs(args...) - return err -} - -// SetProperty sets a ZFS property on the receiving dataset. -// A full list of available ZFS properties may be found here: -// https://www.freebsd.org/cgi/man.cgi?zfs(8). -func (d *Dataset) SetProperty(key, val string) error { - prop := strings.Join([]string{key, val}, "=") - _, err := zfs("set", prop, d.Name) - return err -} - -// GetProperty returns the current value of a ZFS property from the -// receiving dataset. -// A full list of available ZFS properties may be found here: -// https://www.freebsd.org/cgi/man.cgi?zfs(8). -func (d *Dataset) GetProperty(key string) (string, error) { - out, err := zfs("get", key, d.Name) - if err != nil { - return "", err - } - - return out[0][2], nil -} - -// Snapshots returns a slice of all ZFS snapshots of a given dataset. -func (d *Dataset) Snapshots() ([]*Dataset, error) { - return Snapshots(d.Name) -} - -// CreateFilesystem creates a new ZFS filesystem with the specified name and -// properties. -// A full list of available ZFS properties may be found here: -// https://www.freebsd.org/cgi/man.cgi?zfs(8). -func CreateFilesystem(name string, properties map[string]string) (*Dataset, error) { - args := make([]string, 1, 4) - args[0] = "create" - - if properties != nil { - args = append(args, propsSlice(properties)...) - } - - args = append(args, name) - _, err := zfs(args...) - if err != nil { - return nil, err - } - return GetDataset(name) -} - -// Snapshot creates a new ZFS snapshot of the receiving dataset, using the -// specified name. Optionally, the snapshot can be taken recursively, creating -// snapshots of all descendent filesystems in a single, atomic operation. -func (d *Dataset) Snapshot(name string, recursive bool) (*Dataset, error) { - args := make([]string, 1, 4) - args[0] = "snapshot" - if recursive { - args = append(args, "-r") - } - snapName := fmt.Sprintf("%s@%s", d.Name, name) - args = append(args, snapName) - _, err := zfs(args...) - if err != nil { - return nil, err - } - return GetDataset(snapName) -} - -// Rollback rolls back the receiving ZFS dataset to a previous snapshot. -// Optionally, intermediate snapshots can be destroyed. A ZFS snapshot -// rollback cannot be completed without this option, if more recent -// snapshots exist. -// An error will be returned if the input dataset is not of snapshot type. -func (d *Dataset) Rollback(destroyMoreRecent bool) error { - if d.Type != DatasetSnapshot { - return errors.New("can only rollback snapshots") - } - - args := make([]string, 1, 3) - args[0] = "rollback" - if destroyMoreRecent { - args = append(args, "-r") - } - args = append(args, d.Name) - - _, err := zfs(args...) - return err -} - -// Children returns a slice of children of the receiving ZFS dataset. -// A recursion depth may be specified, or a depth of 0 allows unlimited -// recursion. -func (d *Dataset) Children(depth uint64) ([]*Dataset, error) { - args := []string{"get", "-t", "all", "-Hp", "all"} - if depth > 0 { - args = append(args, "-d") - args = append(args, strconv.FormatUint(depth, 10)) - } else { - args = append(args, "-r") - } - args = append(args, d.Name) - - out, err := zfs(args...) - if err != nil { - return nil, err - } - - var datasets []*Dataset - name := "" - var ds *Dataset - for _, line := range out { - if name != line[0] { - name = line[0] - ds = &Dataset{Name: name} - datasets = append(datasets, ds) - } - if err := ds.parseLine(line); err != nil { - return nil, err - } - } - return datasets[1:], nil -} - -// Diff returns changes between a snapshot and the given ZFS dataset. -// The snapshot name must include the filesystem part as it is possible to -// compare clones with their origin snapshots. -func (d *Dataset) Diff(snapshot string) ([]*InodeChange, error) { - args := []string{"diff", "-FH", snapshot, d.Name}[:] - out, err := zfs(args...) - if err != nil { - return nil, err - } - inodeChanges, err := parseInodeChanges(out) - if err != nil { - return nil, err - } - return inodeChanges, nil -} diff --git a/vendor/github.com/mistifyio/go-zfs/zpool.go b/vendor/github.com/mistifyio/go-zfs/zpool.go deleted file mode 100644 index 6ba52d30cb..0000000000 --- a/vendor/github.com/mistifyio/go-zfs/zpool.go +++ /dev/null @@ -1,105 +0,0 @@ -package zfs - -// ZFS zpool states, which can indicate if a pool is online, offline, -// degraded, etc. More information regarding zpool states can be found here: -// https://docs.oracle.com/cd/E19253-01/819-5461/gamno/index.html. -const ( - ZpoolOnline = "ONLINE" - ZpoolDegraded = "DEGRADED" - ZpoolFaulted = "FAULTED" - ZpoolOffline = "OFFLINE" - ZpoolUnavail = "UNAVAIL" - ZpoolRemoved = "REMOVED" -) - -// Zpool is a ZFS zpool. A pool is a top-level structure in ZFS, and can -// contain many descendent datasets. -type Zpool struct { - Name string - Health string - Allocated uint64 - Size uint64 - Free uint64 -} - -// zpool is a helper function to wrap typical calls to zpool. -func zpool(arg ...string) ([][]string, error) { - c := command{Command: "zpool"} - return c.Run(arg...) -} - -// GetZpool retrieves a single ZFS zpool by name. -func GetZpool(name string) (*Zpool, error) { - out, err := zpool("get", "all", "-p", name) - if err != nil { - return nil, err - } - - // there is no -H - out = out[1:] - - z := &Zpool{Name: name} - for _, line := range out { - if err := z.parseLine(line); err != nil { - return nil, err - } - } - - return z, nil -} - -// Datasets returns a slice of all ZFS datasets in a zpool. -func (z *Zpool) Datasets() ([]*Dataset, error) { - return Datasets(z.Name) -} - -// Snapshots returns a slice of all ZFS snapshots in a zpool. -func (z *Zpool) Snapshots() ([]*Dataset, error) { - return Snapshots(z.Name) -} - -// CreateZpool creates a new ZFS zpool with the specified name, properties, -// and optional arguments. -// A full list of available ZFS properties and command-line arguments may be -// found here: https://www.freebsd.org/cgi/man.cgi?zfs(8). -func CreateZpool(name string, properties map[string]string, args ...string) (*Zpool, error) { - cli := make([]string, 1, 4) - cli[0] = "create" - if properties != nil { - cli = append(cli, propsSlice(properties)...) - } - cli = append(cli, name) - cli = append(cli, args...) - _, err := zpool(cli...) - if err != nil { - return nil, err - } - - return &Zpool{Name: name}, nil -} - -// Destroy destroys a ZFS zpool by name. -func (z *Zpool) Destroy() error { - _, err := zpool("destroy", z.Name) - return err -} - -// ListZpools list all ZFS zpools accessible on the current system. -func ListZpools() ([]*Zpool, error) { - args := []string{"list", "-Ho", "name"} - out, err := zpool(args...) - if err != nil { - return nil, err - } - - var pools []*Zpool - - for _, line := range out { - z, err := GetZpool(line[0]) - if err != nil { - return nil, err - } - pools = append(pools, z) - } - return pools, nil -} diff --git a/vendor/github.com/mitchellh/go-homedir/LICENSE b/vendor/github.com/mitchellh/go-homedir/LICENSE new file mode 100644 index 0000000000..f9c841a51e --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go new file mode 100644 index 0000000000..47e1f9ef8e --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/homedir.go @@ -0,0 +1,137 @@ +package homedir + +import ( + "bytes" + "errors" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" +) + +// DisableCache will disable caching of the home directory. Caching is enabled +// by default. +var DisableCache bool + +var homedirCache string +var cacheLock sync.RWMutex + +// Dir returns the home directory for the executing user. +// +// This uses an OS-specific method for discovering the home directory. +// An error is returned if a home directory cannot be detected. +func Dir() (string, error) { + if !DisableCache { + cacheLock.RLock() + cached := homedirCache + cacheLock.RUnlock() + if cached != "" { + return cached, nil + } + } + + cacheLock.Lock() + defer cacheLock.Unlock() + + var result string + var err error + if runtime.GOOS == "windows" { + result, err = dirWindows() + } else { + // Unix-like system, so just assume Unix + result, err = dirUnix() + } + + if err != nil { + return "", err + } + homedirCache = result + return result, nil +} + +// Expand expands the path to include the home directory if the path +// is prefixed with `~`. If it isn't prefixed with `~`, the path is +// returned as-is. +func Expand(path string) (string, error) { + if len(path) == 0 { + return path, nil + } + + if path[0] != '~' { + return path, nil + } + + if len(path) > 1 && path[1] != '/' && path[1] != '\\' { + return "", errors.New("cannot expand user-specific home dir") + } + + dir, err := Dir() + if err != nil { + return "", err + } + + return filepath.Join(dir, path[1:]), nil +} + +func dirUnix() (string, error) { + // First prefer the HOME environmental variable + if home := os.Getenv("HOME"); home != "" { + return home, nil + } + + // If that fails, try getent + var stdout bytes.Buffer + cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid())) + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + // If the error is ErrNotFound, we ignore it. Otherwise, return it. + if err != exec.ErrNotFound { + return "", err + } + } else { + if passwd := strings.TrimSpace(stdout.String()); passwd != "" { + // username:password:uid:gid:gecos:home:shell + passwdParts := strings.SplitN(passwd, ":", 7) + if len(passwdParts) > 5 { + return passwdParts[5], nil + } + } + } + + // If all else fails, try the shell + stdout.Reset() + cmd = exec.Command("sh", "-c", "cd && pwd") + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + return "", err + } + + result := strings.TrimSpace(stdout.String()) + if result == "" { + return "", errors.New("blank output when reading home directory") + } + + return result, nil +} + +func dirWindows() (string, error) { + // First prefer the HOME environmental variable + if home := os.Getenv("HOME"); home != "" { + return home, nil + } + + drive := os.Getenv("HOMEDRIVE") + path := os.Getenv("HOMEPATH") + home := drive + path + if drive == "" || path == "" { + home = os.Getenv("USERPROFILE") + } + if home == "" { + return "", errors.New("HOMEDRIVE, HOMEPATH, and USERPROFILE are blank") + } + + return home, nil +} diff --git a/vendor/github.com/moby/buildkit/LICENSE b/vendor/github.com/moby/buildkit/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/moby/buildkit/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/moby/buildkit/identity/randomid.go b/vendor/github.com/moby/buildkit/identity/randomid.go new file mode 100644 index 0000000000..0eb13527aa --- /dev/null +++ b/vendor/github.com/moby/buildkit/identity/randomid.go @@ -0,0 +1,53 @@ +package identity + +import ( + cryptorand "crypto/rand" + "fmt" + "io" + "math/big" +) + +var ( + // idReader is used for random id generation. This declaration allows us to + // replace it for testing. + idReader = cryptorand.Reader +) + +// parameters for random identifier generation. We can tweak this when there is +// time for further analysis. +const ( + randomIDEntropyBytes = 17 + randomIDBase = 36 + + // To ensure that all identifiers are fixed length, we make sure they + // get padded out or truncated to 25 characters. + // + // For academics, f5lxx1zz5pnorynqglhzmsp33 == 2^128 - 1. This value + // was calculated from floor(log(2^128-1, 36)) + 1. + // + // While 128 bits is the largest whole-byte size that fits into 25 + // base-36 characters, we generate an extra byte of entropy to fill + // in the high bits, which would otherwise be 0. This gives us a more + // even distribution of the first character. + // + // See http://mathworld.wolfram.com/NumberLength.html for more information. + maxRandomIDLength = 25 +) + +// NewID generates a new identifier for use where random identifiers with low +// collision probability are required. +// +// With the parameters in this package, the generated identifier will provide +// ~129 bits of entropy encoded with base36. Leading padding is added if the +// string is less 25 bytes. We do not intend to maintain this interface, so +// identifiers should be treated opaquely. +func NewID() string { + var p [randomIDEntropyBytes]byte + + if _, err := io.ReadFull(idReader, p[:]); err != nil { + panic(fmt.Errorf("failed to read random bytes: %v", err)) + } + + p[0] |= 0x80 // set high bit to avoid the need for padding + return (&big.Int{}).SetBytes(p[:]).Text(randomIDBase)[1 : maxRandomIDLength+1] +} diff --git a/vendor/github.com/moby/buildkit/session/context.go b/vendor/github.com/moby/buildkit/session/context.go new file mode 100644 index 0000000000..31a29f0868 --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/context.go @@ -0,0 +1,22 @@ +package session + +import "context" + +type contextKeyT string + +var contextKey = contextKeyT("buildkit/session-id") + +func NewContext(ctx context.Context, id string) context.Context { + if id != "" { + return context.WithValue(ctx, contextKey, id) + } + return ctx +} + +func FromContext(ctx context.Context) string { + v := ctx.Value(contextKey) + if v == nil { + return "" + } + return v.(string) +} diff --git a/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go b/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go new file mode 100644 index 0000000000..7f8bf3c0df --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go @@ -0,0 +1,113 @@ +package filesync + +import ( + "bufio" + io "io" + "os" + "time" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/tonistiigi/fsutil" + "google.golang.org/grpc" +) + +func sendDiffCopy(stream grpc.Stream, dir string, includes, excludes []string, progress progressCb, _map func(*fsutil.Stat) bool) error { + return fsutil.Send(stream.Context(), stream, dir, &fsutil.WalkOpt{ + ExcludePatterns: excludes, + IncludePatterns: includes, + Map: _map, + }, progress) +} + +func newStreamWriter(stream grpc.ClientStream) io.WriteCloser { + wc := &streamWriterCloser{ClientStream: stream} + return &bufferedWriteCloser{Writer: bufio.NewWriter(wc), Closer: wc} +} + +type bufferedWriteCloser struct { + *bufio.Writer + io.Closer +} + +func (bwc *bufferedWriteCloser) Close() error { + if err := bwc.Writer.Flush(); err != nil { + return err + } + return bwc.Closer.Close() +} + +type streamWriterCloser struct { + grpc.ClientStream +} + +func (wc *streamWriterCloser) Write(dt []byte) (int, error) { + if err := wc.ClientStream.SendMsg(&BytesMessage{Data: dt}); err != nil { + return 0, err + } + return len(dt), nil +} + +func (wc *streamWriterCloser) Close() error { + if err := wc.ClientStream.CloseSend(); err != nil { + return err + } + // block until receiver is done + var bm BytesMessage + if err := wc.ClientStream.RecvMsg(&bm); err != io.EOF { + return err + } + return nil +} + +func recvDiffCopy(ds grpc.Stream, dest string, cu CacheUpdater, progress progressCb) error { + st := time.Now() + defer func() { + logrus.Debugf("diffcopy took: %v", time.Since(st)) + }() + var cf fsutil.ChangeFunc + var ch fsutil.ContentHasher + if cu != nil { + cu.MarkSupported(true) + cf = cu.HandleChange + ch = cu.ContentHasher() + } + return fsutil.Receive(ds.Context(), ds, dest, fsutil.ReceiveOpt{ + NotifyHashed: cf, + ContentHasher: ch, + ProgressCb: progress, + }) +} + +func syncTargetDiffCopy(ds grpc.Stream, dest string) error { + if err := os.MkdirAll(dest, 0700); err != nil { + return err + } + return fsutil.Receive(ds.Context(), ds, dest, fsutil.ReceiveOpt{ + Merge: true, + Filter: func() func(*fsutil.Stat) bool { + uid := os.Getuid() + gid := os.Getgid() + return func(st *fsutil.Stat) bool { + st.Uid = uint32(uid) + st.Gid = uint32(gid) + return true + } + }(), + }) +} + +func writeTargetFile(ds grpc.Stream, wc io.WriteCloser) error { + for { + bm := BytesMessage{} + if err := ds.RecvMsg(&bm); err != nil { + if errors.Cause(err) == io.EOF { + return nil + } + return err + } + if _, err := wc.Write(bm.Data); err != nil { + return err + } + } +} diff --git a/vendor/github.com/moby/buildkit/session/filesync/filesync.go b/vendor/github.com/moby/buildkit/session/filesync/filesync.go new file mode 100644 index 0000000000..232a696d73 --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/filesync/filesync.go @@ -0,0 +1,281 @@ +package filesync + +import ( + "context" + "fmt" + io "io" + "os" + "strings" + + "github.com/moby/buildkit/session" + "github.com/pkg/errors" + "github.com/tonistiigi/fsutil" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +const ( + keyOverrideExcludes = "override-excludes" + keyIncludePatterns = "include-patterns" + keyExcludePatterns = "exclude-patterns" + keyDirName = "dir-name" +) + +type fsSyncProvider struct { + dirs map[string]SyncedDir + p progressCb + doneCh chan error +} + +type SyncedDir struct { + Name string + Dir string + Excludes []string + Map func(*fsutil.Stat) bool +} + +// NewFSSyncProvider creates a new provider for sending files from client +func NewFSSyncProvider(dirs []SyncedDir) session.Attachable { + p := &fsSyncProvider{ + dirs: map[string]SyncedDir{}, + } + for _, d := range dirs { + p.dirs[d.Name] = d + } + return p +} + +func (sp *fsSyncProvider) Register(server *grpc.Server) { + RegisterFileSyncServer(server, sp) +} + +func (sp *fsSyncProvider) DiffCopy(stream FileSync_DiffCopyServer) error { + return sp.handle("diffcopy", stream) +} +func (sp *fsSyncProvider) TarStream(stream FileSync_TarStreamServer) error { + return sp.handle("tarstream", stream) +} + +func (sp *fsSyncProvider) handle(method string, stream grpc.ServerStream) (retErr error) { + var pr *protocol + for _, p := range supportedProtocols { + if method == p.name && isProtoSupported(p.name) { + pr = &p + break + } + } + if pr == nil { + return errors.New("failed to negotiate protocol") + } + + opts, _ := metadata.FromIncomingContext(stream.Context()) // if no metadata continue with empty object + + dirName := "" + name, ok := opts[keyDirName] + if ok && len(name) > 0 { + dirName = name[0] + } + + dir, ok := sp.dirs[dirName] + if !ok { + return errors.Errorf("no access allowed to dir %q", dirName) + } + + excludes := opts[keyExcludePatterns] + if len(dir.Excludes) != 0 && (len(opts[keyOverrideExcludes]) == 0 || opts[keyOverrideExcludes][0] != "true") { + excludes = dir.Excludes + } + includes := opts[keyIncludePatterns] + + var progress progressCb + if sp.p != nil { + progress = sp.p + sp.p = nil + } + + var doneCh chan error + if sp.doneCh != nil { + doneCh = sp.doneCh + sp.doneCh = nil + } + err := pr.sendFn(stream, dir.Dir, includes, excludes, progress, dir.Map) + if doneCh != nil { + if err != nil { + doneCh <- err + } + close(doneCh) + } + return err +} + +func (sp *fsSyncProvider) SetNextProgressCallback(f func(int, bool), doneCh chan error) { + sp.p = f + sp.doneCh = doneCh +} + +type progressCb func(int, bool) + +type protocol struct { + name string + sendFn func(stream grpc.Stream, srcDir string, includes, excludes []string, progress progressCb, _map func(*fsutil.Stat) bool) error + recvFn func(stream grpc.Stream, destDir string, cu CacheUpdater, progress progressCb) error +} + +func isProtoSupported(p string) bool { + // TODO: this should be removed after testing if stability is confirmed + if override := os.Getenv("BUILD_STREAM_PROTOCOL"); override != "" { + return strings.EqualFold(p, override) + } + return true +} + +var supportedProtocols = []protocol{ + { + name: "diffcopy", + sendFn: sendDiffCopy, + recvFn: recvDiffCopy, + }, +} + +// FSSendRequestOpt defines options for FSSend request +type FSSendRequestOpt struct { + Name string + IncludePatterns []string + ExcludePatterns []string + OverrideExcludes bool // deprecated: this is used by docker/cli for automatically loading .dockerignore from the directory + DestDir string + CacheUpdater CacheUpdater + ProgressCb func(int, bool) +} + +// CacheUpdater is an object capable of sending notifications for the cache hash changes +type CacheUpdater interface { + MarkSupported(bool) + HandleChange(fsutil.ChangeKind, string, os.FileInfo, error) error + ContentHasher() fsutil.ContentHasher +} + +// FSSync initializes a transfer of files +func FSSync(ctx context.Context, c session.Caller, opt FSSendRequestOpt) error { + var pr *protocol + for _, p := range supportedProtocols { + if isProtoSupported(p.name) && c.Supports(session.MethodURL(_FileSync_serviceDesc.ServiceName, p.name)) { + pr = &p + break + } + } + if pr == nil { + return errors.New("no fssync handlers") + } + + opts := make(map[string][]string) + if opt.OverrideExcludes { + opts[keyOverrideExcludes] = []string{"true"} + } + + if opt.IncludePatterns != nil { + opts[keyIncludePatterns] = opt.IncludePatterns + } + + if opt.ExcludePatterns != nil { + opts[keyExcludePatterns] = opt.ExcludePatterns + } + + opts[keyDirName] = []string{opt.Name} + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client := NewFileSyncClient(c.Conn()) + + var stream grpc.ClientStream + + ctx = metadata.NewOutgoingContext(ctx, opts) + + switch pr.name { + case "tarstream": + cc, err := client.TarStream(ctx) + if err != nil { + return err + } + stream = cc + case "diffcopy": + cc, err := client.DiffCopy(ctx) + if err != nil { + return err + } + stream = cc + default: + panic(fmt.Sprintf("invalid protocol: %q", pr.name)) + } + + return pr.recvFn(stream, opt.DestDir, opt.CacheUpdater, opt.ProgressCb) +} + +// NewFSSyncTargetDir allows writing into a directory +func NewFSSyncTargetDir(outdir string) session.Attachable { + p := &fsSyncTarget{ + outdir: outdir, + } + return p +} + +// NewFSSyncTarget allows writing into an io.WriteCloser +func NewFSSyncTarget(w io.WriteCloser) session.Attachable { + p := &fsSyncTarget{ + outfile: w, + } + return p +} + +type fsSyncTarget struct { + outdir string + outfile io.WriteCloser +} + +func (sp *fsSyncTarget) Register(server *grpc.Server) { + RegisterFileSendServer(server, sp) +} + +func (sp *fsSyncTarget) DiffCopy(stream FileSend_DiffCopyServer) error { + if sp.outdir != "" { + return syncTargetDiffCopy(stream, sp.outdir) + } + if sp.outfile == nil { + return errors.New("empty outfile and outdir") + } + defer sp.outfile.Close() + return writeTargetFile(stream, sp.outfile) +} + +func CopyToCaller(ctx context.Context, srcPath string, c session.Caller, progress func(int, bool)) error { + method := session.MethodURL(_FileSend_serviceDesc.ServiceName, "diffcopy") + if !c.Supports(method) { + return errors.Errorf("method %s not supported by the client", method) + } + + client := NewFileSendClient(c.Conn()) + + cc, err := client.DiffCopy(ctx) + if err != nil { + return err + } + + return sendDiffCopy(cc, srcPath, nil, nil, progress, nil) +} + +func CopyFileWriter(ctx context.Context, c session.Caller) (io.WriteCloser, error) { + method := session.MethodURL(_FileSend_serviceDesc.ServiceName, "diffcopy") + if !c.Supports(method) { + return nil, errors.Errorf("method %s not supported by the client", method) + } + + client := NewFileSendClient(c.Conn()) + + cc, err := client.DiffCopy(ctx) + if err != nil { + return nil, err + } + + return newStreamWriter(cc), nil +} diff --git a/vendor/github.com/moby/buildkit/session/filesync/filesync.pb.go b/vendor/github.com/moby/buildkit/session/filesync/filesync.pb.go new file mode 100644 index 0000000000..30413db1e3 --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/filesync/filesync.pb.go @@ -0,0 +1,652 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: filesync.proto + +/* +Package filesync is a generated protocol buffer package. + +It is generated from these files: + filesync.proto + +It has these top-level messages: + BytesMessage +*/ +package filesync + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import bytes "bytes" + +import strings "strings" +import reflect "reflect" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// BytesMessage contains a chunk of byte data +type BytesMessage struct { + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *BytesMessage) Reset() { *m = BytesMessage{} } +func (*BytesMessage) ProtoMessage() {} +func (*BytesMessage) Descriptor() ([]byte, []int) { return fileDescriptorFilesync, []int{0} } + +func (m *BytesMessage) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func init() { + proto.RegisterType((*BytesMessage)(nil), "moby.filesync.v1.BytesMessage") +} +func (this *BytesMessage) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*BytesMessage) + if !ok { + that2, ok := that.(BytesMessage) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if !bytes.Equal(this.Data, that1.Data) { + return false + } + return true +} +func (this *BytesMessage) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&filesync.BytesMessage{") + s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringFilesync(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for FileSync service + +type FileSyncClient interface { + DiffCopy(ctx context.Context, opts ...grpc.CallOption) (FileSync_DiffCopyClient, error) + TarStream(ctx context.Context, opts ...grpc.CallOption) (FileSync_TarStreamClient, error) +} + +type fileSyncClient struct { + cc *grpc.ClientConn +} + +func NewFileSyncClient(cc *grpc.ClientConn) FileSyncClient { + return &fileSyncClient{cc} +} + +func (c *fileSyncClient) DiffCopy(ctx context.Context, opts ...grpc.CallOption) (FileSync_DiffCopyClient, error) { + stream, err := grpc.NewClientStream(ctx, &_FileSync_serviceDesc.Streams[0], c.cc, "/moby.filesync.v1.FileSync/DiffCopy", opts...) + if err != nil { + return nil, err + } + x := &fileSyncDiffCopyClient{stream} + return x, nil +} + +type FileSync_DiffCopyClient interface { + Send(*BytesMessage) error + Recv() (*BytesMessage, error) + grpc.ClientStream +} + +type fileSyncDiffCopyClient struct { + grpc.ClientStream +} + +func (x *fileSyncDiffCopyClient) Send(m *BytesMessage) error { + return x.ClientStream.SendMsg(m) +} + +func (x *fileSyncDiffCopyClient) Recv() (*BytesMessage, error) { + m := new(BytesMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *fileSyncClient) TarStream(ctx context.Context, opts ...grpc.CallOption) (FileSync_TarStreamClient, error) { + stream, err := grpc.NewClientStream(ctx, &_FileSync_serviceDesc.Streams[1], c.cc, "/moby.filesync.v1.FileSync/TarStream", opts...) + if err != nil { + return nil, err + } + x := &fileSyncTarStreamClient{stream} + return x, nil +} + +type FileSync_TarStreamClient interface { + Send(*BytesMessage) error + Recv() (*BytesMessage, error) + grpc.ClientStream +} + +type fileSyncTarStreamClient struct { + grpc.ClientStream +} + +func (x *fileSyncTarStreamClient) Send(m *BytesMessage) error { + return x.ClientStream.SendMsg(m) +} + +func (x *fileSyncTarStreamClient) Recv() (*BytesMessage, error) { + m := new(BytesMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for FileSync service + +type FileSyncServer interface { + DiffCopy(FileSync_DiffCopyServer) error + TarStream(FileSync_TarStreamServer) error +} + +func RegisterFileSyncServer(s *grpc.Server, srv FileSyncServer) { + s.RegisterService(&_FileSync_serviceDesc, srv) +} + +func _FileSync_DiffCopy_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(FileSyncServer).DiffCopy(&fileSyncDiffCopyServer{stream}) +} + +type FileSync_DiffCopyServer interface { + Send(*BytesMessage) error + Recv() (*BytesMessage, error) + grpc.ServerStream +} + +type fileSyncDiffCopyServer struct { + grpc.ServerStream +} + +func (x *fileSyncDiffCopyServer) Send(m *BytesMessage) error { + return x.ServerStream.SendMsg(m) +} + +func (x *fileSyncDiffCopyServer) Recv() (*BytesMessage, error) { + m := new(BytesMessage) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _FileSync_TarStream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(FileSyncServer).TarStream(&fileSyncTarStreamServer{stream}) +} + +type FileSync_TarStreamServer interface { + Send(*BytesMessage) error + Recv() (*BytesMessage, error) + grpc.ServerStream +} + +type fileSyncTarStreamServer struct { + grpc.ServerStream +} + +func (x *fileSyncTarStreamServer) Send(m *BytesMessage) error { + return x.ServerStream.SendMsg(m) +} + +func (x *fileSyncTarStreamServer) Recv() (*BytesMessage, error) { + m := new(BytesMessage) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _FileSync_serviceDesc = grpc.ServiceDesc{ + ServiceName: "moby.filesync.v1.FileSync", + HandlerType: (*FileSyncServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "DiffCopy", + Handler: _FileSync_DiffCopy_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "TarStream", + Handler: _FileSync_TarStream_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "filesync.proto", +} + +// Client API for FileSend service + +type FileSendClient interface { + DiffCopy(ctx context.Context, opts ...grpc.CallOption) (FileSend_DiffCopyClient, error) +} + +type fileSendClient struct { + cc *grpc.ClientConn +} + +func NewFileSendClient(cc *grpc.ClientConn) FileSendClient { + return &fileSendClient{cc} +} + +func (c *fileSendClient) DiffCopy(ctx context.Context, opts ...grpc.CallOption) (FileSend_DiffCopyClient, error) { + stream, err := grpc.NewClientStream(ctx, &_FileSend_serviceDesc.Streams[0], c.cc, "/moby.filesync.v1.FileSend/DiffCopy", opts...) + if err != nil { + return nil, err + } + x := &fileSendDiffCopyClient{stream} + return x, nil +} + +type FileSend_DiffCopyClient interface { + Send(*BytesMessage) error + Recv() (*BytesMessage, error) + grpc.ClientStream +} + +type fileSendDiffCopyClient struct { + grpc.ClientStream +} + +func (x *fileSendDiffCopyClient) Send(m *BytesMessage) error { + return x.ClientStream.SendMsg(m) +} + +func (x *fileSendDiffCopyClient) Recv() (*BytesMessage, error) { + m := new(BytesMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for FileSend service + +type FileSendServer interface { + DiffCopy(FileSend_DiffCopyServer) error +} + +func RegisterFileSendServer(s *grpc.Server, srv FileSendServer) { + s.RegisterService(&_FileSend_serviceDesc, srv) +} + +func _FileSend_DiffCopy_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(FileSendServer).DiffCopy(&fileSendDiffCopyServer{stream}) +} + +type FileSend_DiffCopyServer interface { + Send(*BytesMessage) error + Recv() (*BytesMessage, error) + grpc.ServerStream +} + +type fileSendDiffCopyServer struct { + grpc.ServerStream +} + +func (x *fileSendDiffCopyServer) Send(m *BytesMessage) error { + return x.ServerStream.SendMsg(m) +} + +func (x *fileSendDiffCopyServer) Recv() (*BytesMessage, error) { + m := new(BytesMessage) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _FileSend_serviceDesc = grpc.ServiceDesc{ + ServiceName: "moby.filesync.v1.FileSend", + HandlerType: (*FileSendServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "DiffCopy", + Handler: _FileSend_DiffCopy_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "filesync.proto", +} + +func (m *BytesMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BytesMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Data) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintFilesync(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + return i, nil +} + +func encodeVarintFilesync(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *BytesMessage) Size() (n int) { + var l int + _ = l + l = len(m.Data) + if l > 0 { + n += 1 + l + sovFilesync(uint64(l)) + } + return n +} + +func sovFilesync(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozFilesync(x uint64) (n int) { + return sovFilesync(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *BytesMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BytesMessage{`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `}`, + }, "") + return s +} +func valueToStringFilesync(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *BytesMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilesync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BytesMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BytesMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilesync + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthFilesync + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipFilesync(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthFilesync + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipFilesync(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFilesync + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFilesync + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFilesync + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthFilesync + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFilesync + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipFilesync(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthFilesync = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowFilesync = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("filesync.proto", fileDescriptorFilesync) } + +var fileDescriptorFilesync = []byte{ + // 208 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4b, 0xcb, 0xcc, 0x49, + 0x2d, 0xae, 0xcc, 0x4b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xc8, 0xcd, 0x4f, 0xaa, + 0xd4, 0x83, 0x0b, 0x96, 0x19, 0x2a, 0x29, 0x71, 0xf1, 0x38, 0x55, 0x96, 0xa4, 0x16, 0xfb, 0xa6, + 0x16, 0x17, 0x27, 0xa6, 0xa7, 0x0a, 0x09, 0x71, 0xb1, 0xa4, 0x24, 0x96, 0x24, 0x4a, 0x30, 0x2a, + 0x30, 0x6a, 0xf0, 0x04, 0x81, 0xd9, 0x46, 0xab, 0x19, 0xb9, 0x38, 0xdc, 0x32, 0x73, 0x52, 0x83, + 0x2b, 0xf3, 0x92, 0x85, 0xfc, 0xb8, 0x38, 0x5c, 0x32, 0xd3, 0xd2, 0x9c, 0xf3, 0x0b, 0x2a, 0x85, + 0xe4, 0xf4, 0xd0, 0xcd, 0xd3, 0x43, 0x36, 0x4c, 0x8a, 0x80, 0xbc, 0x06, 0xa3, 0x01, 0xa3, 0x90, + 0x3f, 0x17, 0x67, 0x48, 0x62, 0x51, 0x70, 0x49, 0x51, 0x6a, 0x62, 0x2e, 0x35, 0x0c, 0x34, 0x8a, + 0x82, 0x3a, 0x36, 0x35, 0x2f, 0x85, 0xda, 0x8e, 0x75, 0x32, 0xbb, 0xf0, 0x50, 0x8e, 0xe1, 0xc6, + 0x43, 0x39, 0x86, 0x0f, 0x0f, 0xe5, 0x18, 0x1b, 0x1e, 0xc9, 0x31, 0xae, 0x78, 0x24, 0xc7, 0x78, + 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xbe, 0x78, 0x24, 0xc7, + 0xf0, 0xe1, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x51, 0x1c, 0x30, 0xb3, 0x92, 0xd8, 0xc0, + 0xc1, 0x6f, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x72, 0x81, 0x1a, 0x91, 0x90, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/moby/buildkit/session/filesync/generate.go b/vendor/github.com/moby/buildkit/session/filesync/generate.go new file mode 100644 index 0000000000..261e876272 --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/filesync/generate.go @@ -0,0 +1,3 @@ +package filesync + +//go:generate protoc --gogoslick_out=plugins=grpc:. filesync.proto diff --git a/vendor/github.com/moby/buildkit/session/grpc.go b/vendor/github.com/moby/buildkit/session/grpc.go new file mode 100644 index 0000000000..2798b6abba --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/grpc.go @@ -0,0 +1,81 @@ +package session + +import ( + "context" + "net" + "sync/atomic" + "time" + + "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" + opentracing "github.com/opentracing/opentracing-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/net/http2" + "google.golang.org/grpc" + "google.golang.org/grpc/health/grpc_health_v1" +) + +func serve(ctx context.Context, grpcServer *grpc.Server, conn net.Conn) { + go func() { + <-ctx.Done() + conn.Close() + }() + logrus.Debugf("serving grpc connection") + (&http2.Server{}).ServeConn(conn, &http2.ServeConnOpts{Handler: grpcServer}) +} + +func grpcClientConn(ctx context.Context, conn net.Conn) (context.Context, *grpc.ClientConn, error) { + var dialCount int64 + dialer := grpc.WithDialer(func(addr string, d time.Duration) (net.Conn, error) { + if c := atomic.AddInt64(&dialCount, 1); c > 1 { + return nil, errors.Errorf("only one connection allowed") + } + return conn, nil + }) + + dialOpts := []grpc.DialOption{ + dialer, + grpc.WithInsecure(), + } + + if span := opentracing.SpanFromContext(ctx); span != nil { + tracer := span.Tracer() + dialOpts = append(dialOpts, + grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(tracer, traceFilter())), + grpc.WithStreamInterceptor(otgrpc.OpenTracingStreamClientInterceptor(tracer, traceFilter())), + ) + } + + cc, err := grpc.DialContext(ctx, "", dialOpts...) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to create grpc client") + } + + ctx, cancel := context.WithCancel(ctx) + go monitorHealth(ctx, cc, cancel) + + return ctx, cc, nil +} + +func monitorHealth(ctx context.Context, cc *grpc.ClientConn, cancelConn func()) { + defer cancelConn() + defer cc.Close() + + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + healthClient := grpc_health_v1.NewHealthClient(cc) + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + _, err := healthClient.Check(ctx, &grpc_health_v1.HealthCheckRequest{}) + cancel() + if err != nil { + return + } + } + } +} diff --git a/vendor/github.com/moby/buildkit/session/manager.go b/vendor/github.com/moby/buildkit/session/manager.go new file mode 100644 index 0000000000..f401c7fb33 --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/manager.go @@ -0,0 +1,218 @@ +package session + +import ( + "context" + "net" + "net/http" + "strings" + "sync" + + "github.com/pkg/errors" + "google.golang.org/grpc" +) + +// Caller can invoke requests on the session +type Caller interface { + Context() context.Context + Supports(method string) bool + Conn() *grpc.ClientConn + Name() string + SharedKey() string +} + +type client struct { + Session + cc *grpc.ClientConn + supported map[string]struct{} +} + +// Manager is a controller for accessing currently active sessions +type Manager struct { + sessions map[string]*client + mu sync.Mutex + updateCondition *sync.Cond +} + +// NewManager returns a new Manager +func NewManager() (*Manager, error) { + sm := &Manager{ + sessions: make(map[string]*client), + } + sm.updateCondition = sync.NewCond(&sm.mu) + return sm, nil +} + +// HandleHTTPRequest handles an incoming HTTP request +func (sm *Manager) HandleHTTPRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) error { + hijacker, ok := w.(http.Hijacker) + if !ok { + return errors.New("handler does not support hijack") + } + + id := r.Header.Get(headerSessionID) + + proto := r.Header.Get("Upgrade") + + sm.mu.Lock() + if _, ok := sm.sessions[id]; ok { + sm.mu.Unlock() + return errors.Errorf("session %s already exists", id) + } + + if proto == "" { + sm.mu.Unlock() + return errors.New("no upgrade proto in request") + } + + if proto != "h2c" { + sm.mu.Unlock() + return errors.Errorf("protocol %s not supported", proto) + } + + conn, _, err := hijacker.Hijack() + if err != nil { + sm.mu.Unlock() + return errors.Wrap(err, "failed to hijack connection") + } + + resp := &http.Response{ + StatusCode: http.StatusSwitchingProtocols, + ProtoMajor: 1, + ProtoMinor: 1, + Header: http.Header{}, + } + resp.Header.Set("Connection", "Upgrade") + resp.Header.Set("Upgrade", proto) + + // set raw mode + conn.Write([]byte{}) + resp.Write(conn) + + return sm.handleConn(ctx, conn, r.Header) +} + +// HandleConn handles an incoming raw connection +func (sm *Manager) HandleConn(ctx context.Context, conn net.Conn, opts map[string][]string) error { + sm.mu.Lock() + return sm.handleConn(ctx, conn, opts) +} + +// caller needs to take lock, this function will release it +func (sm *Manager) handleConn(ctx context.Context, conn net.Conn, opts map[string][]string) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + opts = canonicalHeaders(opts) + + h := http.Header(opts) + id := h.Get(headerSessionID) + name := h.Get(headerSessionName) + sharedKey := h.Get(headerSessionSharedKey) + + ctx, cc, err := grpcClientConn(ctx, conn) + if err != nil { + sm.mu.Unlock() + return err + } + + c := &client{ + Session: Session{ + id: id, + name: name, + sharedKey: sharedKey, + ctx: ctx, + cancelCtx: cancel, + done: make(chan struct{}), + }, + cc: cc, + supported: make(map[string]struct{}), + } + + for _, m := range opts[headerSessionMethod] { + c.supported[strings.ToLower(m)] = struct{}{} + } + sm.sessions[id] = c + sm.updateCondition.Broadcast() + sm.mu.Unlock() + + defer func() { + sm.mu.Lock() + delete(sm.sessions, id) + sm.mu.Unlock() + }() + + <-c.ctx.Done() + conn.Close() + close(c.done) + + return nil +} + +// Get returns a session by ID +func (sm *Manager) Get(ctx context.Context, id string) (Caller, error) { + // session prefix is used to identify vertexes with different contexts so + // they would not collide, but for lookup we don't need the prefix + if p := strings.SplitN(id, ":", 2); len(p) == 2 && len(p[1]) > 0 { + id = p[1] + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + go func() { + select { + case <-ctx.Done(): + sm.updateCondition.Broadcast() + } + }() + + var c *client + + sm.mu.Lock() + for { + select { + case <-ctx.Done(): + sm.mu.Unlock() + return nil, errors.Wrapf(ctx.Err(), "no active session for %s", id) + default: + } + var ok bool + c, ok = sm.sessions[id] + if !ok || c.closed() { + sm.updateCondition.Wait() + continue + } + sm.mu.Unlock() + break + } + + return c, nil +} + +func (c *client) Context() context.Context { + return c.context() +} + +func (c *client) Name() string { + return c.name +} + +func (c *client) SharedKey() string { + return c.sharedKey +} + +func (c *client) Supports(url string) bool { + _, ok := c.supported[strings.ToLower(url)] + return ok +} +func (c *client) Conn() *grpc.ClientConn { + return c.cc +} + +func canonicalHeaders(in map[string][]string) map[string][]string { + out := map[string][]string{} + for k := range in { + out[http.CanonicalHeaderKey(k)] = in[k] + } + return out +} diff --git a/vendor/github.com/moby/buildkit/session/session.go b/vendor/github.com/moby/buildkit/session/session.go new file mode 100644 index 0000000000..47c9579633 --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/session.go @@ -0,0 +1,143 @@ +package session + +import ( + "context" + "net" + "strings" + + "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" + "github.com/moby/buildkit/identity" + opentracing "github.com/opentracing/opentracing-go" + "github.com/pkg/errors" + "google.golang.org/grpc" + "google.golang.org/grpc/health" + "google.golang.org/grpc/health/grpc_health_v1" +) + +const ( + headerSessionID = "X-Docker-Expose-Session-Uuid" + headerSessionName = "X-Docker-Expose-Session-Name" + headerSessionSharedKey = "X-Docker-Expose-Session-Sharedkey" + headerSessionMethod = "X-Docker-Expose-Session-Grpc-Method" +) + +// Dialer returns a connection that can be used by the session +type Dialer func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) + +// Attachable defines a feature that can be expsed on a session +type Attachable interface { + Register(*grpc.Server) +} + +// Session is a long running connection between client and a daemon +type Session struct { + id string + name string + sharedKey string + ctx context.Context + cancelCtx func() + done chan struct{} + grpcServer *grpc.Server + conn net.Conn +} + +// NewSession returns a new long running session +func NewSession(ctx context.Context, name, sharedKey string) (*Session, error) { + id := identity.NewID() + + serverOpts := []grpc.ServerOption{} + if span := opentracing.SpanFromContext(ctx); span != nil { + tracer := span.Tracer() + serverOpts = []grpc.ServerOption{ + grpc.StreamInterceptor(otgrpc.OpenTracingStreamServerInterceptor(span.Tracer(), traceFilter())), + grpc.UnaryInterceptor(otgrpc.OpenTracingServerInterceptor(tracer, traceFilter())), + } + } + + s := &Session{ + id: id, + name: name, + sharedKey: sharedKey, + grpcServer: grpc.NewServer(serverOpts...), + } + + grpc_health_v1.RegisterHealthServer(s.grpcServer, health.NewServer()) + + return s, nil +} + +// Allow enable a given service to be reachable through the grpc session +func (s *Session) Allow(a Attachable) { + a.Register(s.grpcServer) +} + +// ID returns unique identifier for the session +func (s *Session) ID() string { + return s.id +} + +// Run activates the session +func (s *Session) Run(ctx context.Context, dialer Dialer) error { + ctx, cancel := context.WithCancel(ctx) + s.cancelCtx = cancel + s.done = make(chan struct{}) + + defer cancel() + defer close(s.done) + + meta := make(map[string][]string) + meta[headerSessionID] = []string{s.id} + meta[headerSessionName] = []string{s.name} + meta[headerSessionSharedKey] = []string{s.sharedKey} + + for name, svc := range s.grpcServer.GetServiceInfo() { + for _, method := range svc.Methods { + meta[headerSessionMethod] = append(meta[headerSessionMethod], MethodURL(name, method.Name)) + } + } + conn, err := dialer(ctx, "h2c", meta) + if err != nil { + return errors.Wrap(err, "failed to dial gRPC") + } + s.conn = conn + serve(ctx, s.grpcServer, conn) + return nil +} + +// Close closes the session +func (s *Session) Close() error { + if s.cancelCtx != nil && s.done != nil { + if s.conn != nil { + s.conn.Close() + } + s.grpcServer.Stop() + <-s.done + } + return nil +} + +func (s *Session) context() context.Context { + return s.ctx +} + +func (s *Session) closed() bool { + select { + case <-s.context().Done(): + return true + default: + return false + } +} + +// MethodURL returns a gRPC method URL for service and method name +func MethodURL(s, m string) string { + return "/" + s + "/" + m +} + +func traceFilter() otgrpc.Option { + return otgrpc.IncludingSpans(func(parentSpanCtx opentracing.SpanContext, + method string, + req, resp interface{}) bool { + return !strings.HasSuffix(method, "Health/Check") + }) +} diff --git a/vendor/github.com/mtrmac/gpgme/LICENSE b/vendor/github.com/mtrmac/gpgme/LICENSE deleted file mode 100644 index 06d4ab7731..0000000000 --- a/vendor/github.com/mtrmac/gpgme/LICENSE +++ /dev/null @@ -1,12 +0,0 @@ -Copyright (c) 2015, James Fargher -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/mtrmac/gpgme/callbacks.go b/vendor/github.com/mtrmac/gpgme/callbacks.go deleted file mode 100644 index d1dc610d42..0000000000 --- a/vendor/github.com/mtrmac/gpgme/callbacks.go +++ /dev/null @@ -1,42 +0,0 @@ -package gpgme - -import ( - "sync" -) - -var callbacks struct { - sync.Mutex - m map[uintptr]interface{} - c uintptr -} - -func callbackAdd(v interface{}) uintptr { - callbacks.Lock() - defer callbacks.Unlock() - if callbacks.m == nil { - callbacks.m = make(map[uintptr]interface{}) - } - callbacks.c++ - ret := callbacks.c - callbacks.m[ret] = v - return ret -} - -func callbackLookup(c uintptr) interface{} { - callbacks.Lock() - defer callbacks.Unlock() - ret := callbacks.m[c] - if ret == nil { - panic("callback pointer not found") - } - return ret -} - -func callbackDelete(c uintptr) { - callbacks.Lock() - defer callbacks.Unlock() - if callbacks.m[c] == nil { - panic("callback pointer not found") - } - delete(callbacks.m, c) -} diff --git a/vendor/github.com/mtrmac/gpgme/data.go b/vendor/github.com/mtrmac/gpgme/data.go deleted file mode 100644 index eebc972634..0000000000 --- a/vendor/github.com/mtrmac/gpgme/data.go +++ /dev/null @@ -1,191 +0,0 @@ -package gpgme - -// #include -// #include -// #include -// #include "go_gpgme.h" -import "C" - -import ( - "io" - "os" - "runtime" - "unsafe" -) - -const ( - SeekSet = C.SEEK_SET - SeekCur = C.SEEK_CUR - SeekEnd = C.SEEK_END -) - -//export gogpgme_readfunc -func gogpgme_readfunc(handle, buffer unsafe.Pointer, size C.size_t) C.ssize_t { - d := callbackLookup(uintptr(handle)).(*Data) - if len(d.buf) < int(size) { - d.buf = make([]byte, size) - } - n, err := d.r.Read(d.buf[:size]) - if err != nil && err != io.EOF { - C.gpgme_err_set_errno(C.EIO) - return -1 - } - C.memcpy(buffer, unsafe.Pointer(&d.buf[0]), C.size_t(n)) - return C.ssize_t(n) -} - -//export gogpgme_writefunc -func gogpgme_writefunc(handle, buffer unsafe.Pointer, size C.size_t) C.ssize_t { - d := callbackLookup(uintptr(handle)).(*Data) - if len(d.buf) < int(size) { - d.buf = make([]byte, size) - } - C.memcpy(unsafe.Pointer(&d.buf[0]), buffer, C.size_t(size)) - n, err := d.w.Write(d.buf[:size]) - if err != nil && err != io.EOF { - C.gpgme_err_set_errno(C.EIO) - return -1 - } - return C.ssize_t(n) -} - -//export gogpgme_seekfunc -func gogpgme_seekfunc(handle unsafe.Pointer, offset C.off_t, whence C.int) C.off_t { - d := callbackLookup(uintptr(handle)).(*Data) - n, err := d.s.Seek(int64(offset), int(whence)) - if err != nil { - C.gpgme_err_set_errno(C.EIO) - return -1 - } - return C.off_t(n) -} - -// The Data buffer used to communicate with GPGME -type Data struct { - dh C.gpgme_data_t - buf []byte - cbs C.struct_gpgme_data_cbs - r io.Reader - w io.Writer - s io.Seeker - cbc uintptr -} - -func newData() *Data { - d := &Data{} - runtime.SetFinalizer(d, (*Data).Close) - return d -} - -// NewData returns a new memory based data buffer -func NewData() (*Data, error) { - d := newData() - return d, handleError(C.gpgme_data_new(&d.dh)) -} - -// NewDataFile returns a new file based data buffer -func NewDataFile(f *os.File) (*Data, error) { - d := newData() - return d, handleError(C.gpgme_data_new_from_fd(&d.dh, C.int(f.Fd()))) -} - -// NewDataBytes returns a new memory based data buffer that contains `b` bytes -func NewDataBytes(b []byte) (*Data, error) { - d := newData() - var cb *C.char - if len(b) != 0 { - cb = (*C.char)(unsafe.Pointer(&b[0])) - } - return d, handleError(C.gpgme_data_new_from_mem(&d.dh, cb, C.size_t(len(b)), 1)) -} - -// NewDataReader returns a new callback based data buffer -func NewDataReader(r io.Reader) (*Data, error) { - d := newData() - d.r = r - d.cbs.read = C.gpgme_data_read_cb_t(C.gogpgme_readfunc) - cbc := callbackAdd(d) - d.cbc = cbc - return d, handleError(C.gogpgme_data_new_from_cbs(&d.dh, &d.cbs, C.uintptr_t(cbc))) -} - -// NewDataWriter returns a new callback based data buffer -func NewDataWriter(w io.Writer) (*Data, error) { - d := newData() - d.w = w - d.cbs.write = C.gpgme_data_write_cb_t(C.gogpgme_writefunc) - cbc := callbackAdd(d) - d.cbc = cbc - return d, handleError(C.gogpgme_data_new_from_cbs(&d.dh, &d.cbs, C.uintptr_t(cbc))) -} - -// NewDataReadWriter returns a new callback based data buffer -func NewDataReadWriter(rw io.ReadWriter) (*Data, error) { - d := newData() - d.r = rw - d.w = rw - d.cbs.read = C.gpgme_data_read_cb_t(C.gogpgme_readfunc) - d.cbs.write = C.gpgme_data_write_cb_t(C.gogpgme_writefunc) - cbc := callbackAdd(d) - d.cbc = cbc - return d, handleError(C.gogpgme_data_new_from_cbs(&d.dh, &d.cbs, C.uintptr_t(cbc))) -} - -// NewDataReadWriteSeeker returns a new callback based data buffer -func NewDataReadWriteSeeker(rw io.ReadWriteSeeker) (*Data, error) { - d := newData() - d.r = rw - d.w = rw - d.s = rw - d.cbs.read = C.gpgme_data_read_cb_t(C.gogpgme_readfunc) - d.cbs.write = C.gpgme_data_write_cb_t(C.gogpgme_writefunc) - d.cbs.seek = C.gpgme_data_seek_cb_t(C.gogpgme_seekfunc) - cbc := callbackAdd(d) - d.cbc = cbc - return d, handleError(C.gogpgme_data_new_from_cbs(&d.dh, &d.cbs, C.uintptr_t(cbc))) -} - -// Close releases any resources associated with the data buffer -func (d *Data) Close() error { - if d.dh == nil { - return nil - } - if d.cbc > 0 { - callbackDelete(d.cbc) - } - _, err := C.gpgme_data_release(d.dh) - d.dh = nil - return err -} - -func (d *Data) Write(p []byte) (int, error) { - n, err := C.gpgme_data_write(d.dh, unsafe.Pointer(&p[0]), C.size_t(len(p))) - if err != nil { - return 0, err - } - if n == 0 { - return 0, io.EOF - } - return int(n), nil -} - -func (d *Data) Read(p []byte) (int, error) { - n, err := C.gpgme_data_read(d.dh, unsafe.Pointer(&p[0]), C.size_t(len(p))) - if err != nil { - return 0, err - } - if n == 0 { - return 0, io.EOF - } - return int(n), nil -} - -func (d *Data) Seek(offset int64, whence int) (int64, error) { - n, err := C.gpgme_data_seek(d.dh, C.off_t(offset), C.int(whence)) - return int64(n), err -} - -// Name returns the associated filename if any -func (d *Data) Name() string { - return C.GoString(C.gpgme_data_get_file_name(d.dh)) -} diff --git a/vendor/github.com/mtrmac/gpgme/go_gpgme.c b/vendor/github.com/mtrmac/gpgme/go_gpgme.c deleted file mode 100644 index b887574e0c..0000000000 --- a/vendor/github.com/mtrmac/gpgme/go_gpgme.c +++ /dev/null @@ -1,89 +0,0 @@ -#include "go_gpgme.h" - -gpgme_error_t gogpgme_data_new_from_cbs(gpgme_data_t *dh, gpgme_data_cbs_t cbs, uintptr_t handle) { - return gpgme_data_new_from_cbs(dh, cbs, (void *)handle); -} - -void gogpgme_set_passphrase_cb(gpgme_ctx_t ctx, gpgme_passphrase_cb_t cb, uintptr_t handle) { - gpgme_set_passphrase_cb(ctx, cb, (void *)handle); -} - -unsigned int key_revoked(gpgme_key_t k) { - return k->revoked; -} - -unsigned int key_expired(gpgme_key_t k) { - return k->expired; -} - -unsigned int key_disabled(gpgme_key_t k) { - return k->disabled; -} - -unsigned int key_invalid(gpgme_key_t k) { - return k->invalid; -} - -unsigned int key_can_encrypt(gpgme_key_t k) { - return k->can_encrypt; -} - -unsigned int key_can_sign(gpgme_key_t k) { - return k->can_sign; -} - -unsigned int key_can_certify(gpgme_key_t k) { - return k->can_certify; -} - -unsigned int key_secret(gpgme_key_t k) { - return k->secret; -} - -unsigned int key_can_authenticate(gpgme_key_t k) { - return k->can_authenticate; -} - -unsigned int key_is_qualified(gpgme_key_t k) { - return k->is_qualified; -} - -unsigned int signature_wrong_key_usage(gpgme_signature_t s) { - return s->wrong_key_usage; -} - -unsigned int signature_pka_trust(gpgme_signature_t s) { - return s->pka_trust; -} - -unsigned int signature_chain_model(gpgme_signature_t s) { - return s->chain_model; -} - -unsigned int subkey_revoked(gpgme_subkey_t k) { - return k->revoked; -} - -unsigned int subkey_expired(gpgme_subkey_t k) { - return k->expired; -} - -unsigned int subkey_disabled(gpgme_subkey_t k) { - return k->disabled; -} - -unsigned int subkey_invalid(gpgme_subkey_t k) { - return k->invalid; -} - -unsigned int subkey_secret(gpgme_subkey_t k) { - return k->secret; -} - -unsigned int uid_revoked(gpgme_user_id_t u) { - return u->revoked; -} - -unsigned int uid_invalid(gpgme_user_id_t u) { - return u->invalid; -} diff --git a/vendor/github.com/mtrmac/gpgme/go_gpgme.h b/vendor/github.com/mtrmac/gpgme/go_gpgme.h deleted file mode 100644 index a3678b127a..0000000000 --- a/vendor/github.com/mtrmac/gpgme/go_gpgme.h +++ /dev/null @@ -1,37 +0,0 @@ -#ifndef GO_GPGME_H -#define GO_GPGME_H - -#define _FILE_OFFSET_BITS 64 -#include - -#include - -extern ssize_t gogpgme_readfunc(void *handle, void *buffer, size_t size); -extern ssize_t gogpgme_writefunc(void *handle, void *buffer, size_t size); -extern off_t gogpgme_seekfunc(void *handle, off_t offset, int whence); -extern gpgme_error_t gogpgme_passfunc(void *hook, char *uid_hint, char *passphrase_info, int prev_was_bad, int fd); -extern gpgme_error_t gogpgme_data_new_from_cbs(gpgme_data_t *dh, gpgme_data_cbs_t cbs, uintptr_t handle); -extern void gogpgme_set_passphrase_cb(gpgme_ctx_t ctx, gpgme_passphrase_cb_t cb, uintptr_t handle); - -extern unsigned int key_revoked(gpgme_key_t k); -extern unsigned int key_expired(gpgme_key_t k); -extern unsigned int key_disabled(gpgme_key_t k); -extern unsigned int key_invalid(gpgme_key_t k); -extern unsigned int key_can_encrypt(gpgme_key_t k); -extern unsigned int key_can_sign(gpgme_key_t k); -extern unsigned int key_can_certify(gpgme_key_t k); -extern unsigned int key_secret(gpgme_key_t k); -extern unsigned int key_can_authenticate(gpgme_key_t k); -extern unsigned int key_is_qualified(gpgme_key_t k); -extern unsigned int signature_wrong_key_usage(gpgme_signature_t s); -extern unsigned int signature_pka_trust(gpgme_signature_t s); -extern unsigned int signature_chain_model(gpgme_signature_t s); -extern unsigned int subkey_revoked(gpgme_subkey_t k); -extern unsigned int subkey_expired(gpgme_subkey_t k); -extern unsigned int subkey_disabled(gpgme_subkey_t k); -extern unsigned int subkey_invalid(gpgme_subkey_t k); -extern unsigned int subkey_secret(gpgme_subkey_t k); -extern unsigned int uid_revoked(gpgme_user_id_t u); -extern unsigned int uid_invalid(gpgme_user_id_t u); - -#endif diff --git a/vendor/github.com/mtrmac/gpgme/gpgme.go b/vendor/github.com/mtrmac/gpgme/gpgme.go deleted file mode 100644 index 20aad737c6..0000000000 --- a/vendor/github.com/mtrmac/gpgme/gpgme.go +++ /dev/null @@ -1,748 +0,0 @@ -// Package gpgme provides a Go wrapper for the GPGME library -package gpgme - -// #cgo LDFLAGS: -lgpgme -lassuan -lgpg-error -// #cgo CPPFLAGS: -D_FILE_OFFSET_BITS=64 -// #include -// #include -// #include "go_gpgme.h" -import "C" - -import ( - "fmt" - "io" - "os" - "runtime" - "time" - "unsafe" -) - -var Version string - -func init() { - Version = C.GoString(C.gpgme_check_version(nil)) -} - -// Callback is the function that is called when a passphrase is required -type Callback func(uidHint string, prevWasBad bool, f *os.File) error - -//export gogpgme_passfunc -func gogpgme_passfunc(hook unsafe.Pointer, uid_hint, passphrase_info *C.char, prev_was_bad, fd C.int) C.gpgme_error_t { - c := callbackLookup(uintptr(hook)).(*Context) - go_uid_hint := C.GoString(uid_hint) - f := os.NewFile(uintptr(fd), go_uid_hint) - defer f.Close() - err := c.callback(go_uid_hint, prev_was_bad != 0, f) - if err != nil { - return C.GPG_ERR_CANCELED - } - return 0 -} - -type Protocol int - -const ( - ProtocolOpenPGP Protocol = C.GPGME_PROTOCOL_OpenPGP - ProtocolCMS Protocol = C.GPGME_PROTOCOL_CMS - ProtocolGPGConf Protocol = C.GPGME_PROTOCOL_GPGCONF - ProtocolAssuan Protocol = C.GPGME_PROTOCOL_ASSUAN - ProtocolG13 Protocol = C.GPGME_PROTOCOL_G13 - ProtocolUIServer Protocol = C.GPGME_PROTOCOL_UISERVER - // ProtocolSpawn Protocol = C.GPGME_PROTOCOL_SPAWN // Unavailable in 1.4.3 - ProtocolDefault Protocol = C.GPGME_PROTOCOL_DEFAULT - ProtocolUnknown Protocol = C.GPGME_PROTOCOL_UNKNOWN -) - -type PinEntryMode int - -// const ( // Unavailable in 1.3.2 -// PinEntryDefault PinEntryMode = C.GPGME_PINENTRY_MODE_DEFAULT -// PinEntryAsk PinEntryMode = C.GPGME_PINENTRY_MODE_ASK -// PinEntryCancel PinEntryMode = C.GPGME_PINENTRY_MODE_CANCEL -// PinEntryError PinEntryMode = C.GPGME_PINENTRY_MODE_ERROR -// PinEntryLoopback PinEntryMode = C.GPGME_PINENTRY_MODE_LOOPBACK -// ) - -type EncryptFlag uint - -const ( - EncryptAlwaysTrust EncryptFlag = C.GPGME_ENCRYPT_ALWAYS_TRUST - EncryptNoEncryptTo EncryptFlag = C.GPGME_ENCRYPT_NO_ENCRYPT_TO - EncryptPrepare EncryptFlag = C.GPGME_ENCRYPT_PREPARE - EncryptExceptSign EncryptFlag = C.GPGME_ENCRYPT_EXPECT_SIGN - // EncryptNoCompress EncryptFlag = C.GPGME_ENCRYPT_NO_COMPRESS // Unavailable in 1.4.3 -) - -type HashAlgo int - -// const values for HashAlgo values should be added when necessary. - -type KeyListMode uint - -const ( - KeyListModeLocal KeyListMode = C.GPGME_KEYLIST_MODE_LOCAL - KeyListModeExtern KeyListMode = C.GPGME_KEYLIST_MODE_EXTERN - KeyListModeSigs KeyListMode = C.GPGME_KEYLIST_MODE_SIGS - KeyListModeSigNotations KeyListMode = C.GPGME_KEYLIST_MODE_SIG_NOTATIONS - // KeyListModeWithSecret KeyListMode = C.GPGME_KEYLIST_MODE_WITH_SECRET // Unavailable in 1.4.3 - KeyListModeEphemeral KeyListMode = C.GPGME_KEYLIST_MODE_EPHEMERAL - KeyListModeModeValidate KeyListMode = C.GPGME_KEYLIST_MODE_VALIDATE -) - -type PubkeyAlgo int - -// const values for PubkeyAlgo values should be added when necessary. - -type SigMode int - -const ( - SigModeNormal SigMode = C.GPGME_SIG_MODE_NORMAL - SigModeDetach SigMode = C.GPGME_SIG_MODE_DETACH - SigModeClear SigMode = C.GPGME_SIG_MODE_CLEAR -) - -type SigSum int - -const ( - SigSumValid SigSum = C.GPGME_SIGSUM_VALID - SigSumGreen SigSum = C.GPGME_SIGSUM_GREEN - SigSumRed SigSum = C.GPGME_SIGSUM_RED - SigSumKeyRevoked SigSum = C.GPGME_SIGSUM_KEY_REVOKED - SigSumKeyExpired SigSum = C.GPGME_SIGSUM_KEY_EXPIRED - SigSumSigExpired SigSum = C.GPGME_SIGSUM_SIG_EXPIRED - SigSumKeyMissing SigSum = C.GPGME_SIGSUM_KEY_MISSING - SigSumCRLMissing SigSum = C.GPGME_SIGSUM_CRL_MISSING - SigSumCRLTooOld SigSum = C.GPGME_SIGSUM_CRL_TOO_OLD - SigSumBadPolicy SigSum = C.GPGME_SIGSUM_BAD_POLICY - SigSumSysError SigSum = C.GPGME_SIGSUM_SYS_ERROR -) - -type Validity int - -const ( - ValidityUnknown Validity = C.GPGME_VALIDITY_UNKNOWN - ValidityUndefined Validity = C.GPGME_VALIDITY_UNDEFINED - ValidityNever Validity = C.GPGME_VALIDITY_NEVER - ValidityMarginal Validity = C.GPGME_VALIDITY_MARGINAL - ValidityFull Validity = C.GPGME_VALIDITY_FULL - ValidityUltimate Validity = C.GPGME_VALIDITY_ULTIMATE -) - -type ErrorCode int - -const ( - ErrorNoError ErrorCode = C.GPG_ERR_NO_ERROR - ErrorEOF ErrorCode = C.GPG_ERR_EOF -) - -// Error is a wrapper for GPGME errors -type Error struct { - err C.gpgme_error_t -} - -func (e Error) Code() ErrorCode { - return ErrorCode(C.gpgme_err_code(e.err)) -} - -func (e Error) Error() string { - return C.GoString(C.gpgme_strerror(e.err)) -} - -func handleError(err C.gpgme_error_t) error { - e := Error{err: err} - if e.Code() == ErrorNoError { - return nil - } - return e -} - -func cbool(b bool) C.int { - if b { - return 1 - } - return 0 -} - -func EngineCheckVersion(p Protocol) error { - return handleError(C.gpgme_engine_check_version(C.gpgme_protocol_t(p))) -} - -type EngineInfo struct { - info C.gpgme_engine_info_t -} - -func (e *EngineInfo) Next() *EngineInfo { - if e.info.next == nil { - return nil - } - return &EngineInfo{info: e.info.next} -} - -func (e *EngineInfo) Protocol() Protocol { - return Protocol(e.info.protocol) -} - -func (e *EngineInfo) FileName() string { - return C.GoString(e.info.file_name) -} - -func (e *EngineInfo) Version() string { - return C.GoString(e.info.version) -} - -func (e *EngineInfo) RequiredVersion() string { - return C.GoString(e.info.req_version) -} - -func (e *EngineInfo) HomeDir() string { - return C.GoString(e.info.home_dir) -} - -func GetEngineInfo() (*EngineInfo, error) { - info := &EngineInfo{} - return info, handleError(C.gpgme_get_engine_info(&info.info)) -} - -func SetEngineInfo(proto Protocol, fileName, homeDir string) error { - var cfn, chome *C.char - if fileName != "" { - cfn = C.CString(fileName) - defer C.free(unsafe.Pointer(cfn)) - } - if homeDir != "" { - chome = C.CString(homeDir) - defer C.free(unsafe.Pointer(chome)) - } - return handleError(C.gpgme_set_engine_info(C.gpgme_protocol_t(proto), cfn, chome)) -} - -func FindKeys(pattern string, secretOnly bool) ([]*Key, error) { - var keys []*Key - ctx, err := New() - if err != nil { - return keys, err - } - defer ctx.Release() - if err := ctx.KeyListStart(pattern, secretOnly); err != nil { - return keys, err - } - defer ctx.KeyListEnd() - for ctx.KeyListNext() { - keys = append(keys, ctx.Key) - } - if ctx.KeyError != nil { - return keys, ctx.KeyError - } - return keys, nil -} - -func Decrypt(r io.Reader) (*Data, error) { - ctx, err := New() - if err != nil { - return nil, err - } - defer ctx.Release() - cipher, err := NewDataReader(r) - if err != nil { - return nil, err - } - defer cipher.Close() - plain, err := NewData() - if err != nil { - return nil, err - } - err = ctx.Decrypt(cipher, plain) - plain.Seek(0, SeekSet) - return plain, err -} - -type Context struct { - Key *Key - KeyError error - - callback Callback - cbc uintptr - - ctx C.gpgme_ctx_t -} - -func New() (*Context, error) { - c := &Context{} - err := C.gpgme_new(&c.ctx) - runtime.SetFinalizer(c, (*Context).Release) - return c, handleError(err) -} - -func (c *Context) Release() { - if c.ctx == nil { - return - } - if c.cbc > 0 { - callbackDelete(c.cbc) - } - C.gpgme_release(c.ctx) - c.ctx = nil -} - -func (c *Context) SetArmor(yes bool) { - C.gpgme_set_armor(c.ctx, cbool(yes)) -} - -func (c *Context) Armor() bool { - return C.gpgme_get_armor(c.ctx) != 0 -} - -func (c *Context) SetTextMode(yes bool) { - C.gpgme_set_textmode(c.ctx, cbool(yes)) -} - -func (c *Context) TextMode() bool { - return C.gpgme_get_textmode(c.ctx) != 0 -} - -func (c *Context) SetProtocol(p Protocol) error { - return handleError(C.gpgme_set_protocol(c.ctx, C.gpgme_protocol_t(p))) -} - -func (c *Context) Protocol() Protocol { - return Protocol(C.gpgme_get_protocol(c.ctx)) -} - -func (c *Context) SetKeyListMode(m KeyListMode) error { - return handleError(C.gpgme_set_keylist_mode(c.ctx, C.gpgme_keylist_mode_t(m))) -} - -func (c *Context) KeyListMode() KeyListMode { - return KeyListMode(C.gpgme_get_keylist_mode(c.ctx)) -} - -// Unavailable in 1.3.2: -// func (c *Context) SetPinEntryMode(m PinEntryMode) error { -// return handleError(C.gpgme_set_pinentry_mode(c.ctx, C.gpgme_pinentry_mode_t(m))) -// } - -// Unavailable in 1.3.2: -// func (c *Context) PinEntryMode() PinEntryMode { -// return PinEntryMode(C.gpgme_get_pinentry_mode(c.ctx)) -// } - -func (c *Context) SetCallback(callback Callback) error { - var err error - c.callback = callback - if c.cbc > 0 { - callbackDelete(c.cbc) - } - if callback != nil { - cbc := callbackAdd(c) - c.cbc = cbc - _, err = C.gogpgme_set_passphrase_cb(c.ctx, C.gpgme_passphrase_cb_t(C.gogpgme_passfunc), C.uintptr_t(cbc)) - } else { - c.cbc = 0 - _, err = C.gogpgme_set_passphrase_cb(c.ctx, nil, 0) - } - return err -} - -func (c *Context) EngineInfo() *EngineInfo { - return &EngineInfo{info: C.gpgme_ctx_get_engine_info(c.ctx)} -} - -func (c *Context) SetEngineInfo(proto Protocol, fileName, homeDir string) error { - var cfn, chome *C.char - if fileName != "" { - cfn = C.CString(fileName) - defer C.free(unsafe.Pointer(cfn)) - } - if homeDir != "" { - chome = C.CString(homeDir) - defer C.free(unsafe.Pointer(chome)) - } - return handleError(C.gpgme_ctx_set_engine_info(c.ctx, C.gpgme_protocol_t(proto), cfn, chome)) -} - -func (c *Context) KeyListStart(pattern string, secretOnly bool) error { - cpattern := C.CString(pattern) - defer C.free(unsafe.Pointer(cpattern)) - err := C.gpgme_op_keylist_start(c.ctx, cpattern, cbool(secretOnly)) - return handleError(err) -} - -func (c *Context) KeyListNext() bool { - c.Key = newKey() - err := handleError(C.gpgme_op_keylist_next(c.ctx, &c.Key.k)) - if err != nil { - if e, ok := err.(Error); ok && e.Code() == ErrorEOF { - c.KeyError = nil - } else { - c.KeyError = err - } - return false - } - c.KeyError = nil - return true -} - -func (c *Context) KeyListEnd() error { - return handleError(C.gpgme_op_keylist_end(c.ctx)) -} - -func (c *Context) GetKey(fingerprint string, secret bool) (*Key, error) { - key := newKey() - cfpr := C.CString(fingerprint) - defer C.free(unsafe.Pointer(cfpr)) - err := handleError(C.gpgme_get_key(c.ctx, cfpr, &key.k, cbool(secret))) - if e, ok := err.(Error); key.k == nil && ok && e.Code() == ErrorEOF { - return nil, fmt.Errorf("key %q not found", fingerprint) - } - if err != nil { - return nil, err - } - return key, nil -} - -func (c *Context) Decrypt(ciphertext, plaintext *Data) error { - return handleError(C.gpgme_op_decrypt(c.ctx, ciphertext.dh, plaintext.dh)) -} - -func (c *Context) DecryptVerify(ciphertext, plaintext *Data) error { - return handleError(C.gpgme_op_decrypt_verify(c.ctx, ciphertext.dh, plaintext.dh)) -} - -type Signature struct { - Summary SigSum - Fingerprint string - Status error - Timestamp time.Time - ExpTimestamp time.Time - WrongKeyUsage bool - PKATrust uint - ChainModel bool - Validity Validity - ValidityReason error - PubkeyAlgo PubkeyAlgo - HashAlgo HashAlgo -} - -func (c *Context) Verify(sig, signedText, plain *Data) (string, []Signature, error) { - var signedTextPtr, plainPtr C.gpgme_data_t = nil, nil - if signedText != nil { - signedTextPtr = signedText.dh - } - if plain != nil { - plainPtr = plain.dh - } - err := handleError(C.gpgme_op_verify(c.ctx, sig.dh, signedTextPtr, plainPtr)) - if err != nil { - return "", nil, err - } - res := C.gpgme_op_verify_result(c.ctx) - sigs := []Signature{} - for s := res.signatures; s != nil; s = s.next { - sig := Signature{ - Summary: SigSum(s.summary), - Fingerprint: C.GoString(s.fpr), - Status: handleError(s.status), - // s.notations not implemented - Timestamp: time.Unix(int64(s.timestamp), 0), - ExpTimestamp: time.Unix(int64(s.exp_timestamp), 0), - WrongKeyUsage: C.signature_wrong_key_usage(s) != 0, - PKATrust: uint(C.signature_pka_trust(s)), - ChainModel: C.signature_chain_model(s) != 0, - Validity: Validity(s.validity), - ValidityReason: handleError(s.validity_reason), - PubkeyAlgo: PubkeyAlgo(s.pubkey_algo), - HashAlgo: HashAlgo(s.hash_algo), - } - sigs = append(sigs, sig) - } - return C.GoString(res.file_name), sigs, nil -} - -func (c *Context) Encrypt(recipients []*Key, flags EncryptFlag, plaintext, ciphertext *Data) error { - size := unsafe.Sizeof(new(C.gpgme_key_t)) - recp := C.calloc(C.size_t(len(recipients)+1), C.size_t(size)) - defer C.free(recp) - for i := range recipients { - ptr := (*C.gpgme_key_t)(unsafe.Pointer(uintptr(recp) + size*uintptr(i))) - *ptr = recipients[i].k - } - err := C.gpgme_op_encrypt(c.ctx, (*C.gpgme_key_t)(recp), C.gpgme_encrypt_flags_t(flags), plaintext.dh, ciphertext.dh) - return handleError(err) -} - -func (c *Context) Sign(signers []*Key, plain, sig *Data, mode SigMode) error { - C.gpgme_signers_clear(c.ctx) - for _, k := range signers { - if err := handleError(C.gpgme_signers_add(c.ctx, k.k)); err != nil { - C.gpgme_signers_clear(c.ctx) - return err - } - } - return handleError(C.gpgme_op_sign(c.ctx, plain.dh, sig.dh, C.gpgme_sig_mode_t(mode))) -} - -// ImportStatusFlags describes the type of ImportStatus.Status. The C API in gpgme.h simply uses "unsigned". -type ImportStatusFlags uint - -const ( - ImportNew ImportStatusFlags = C.GPGME_IMPORT_NEW - ImportUID ImportStatusFlags = C.GPGME_IMPORT_UID - ImportSIG ImportStatusFlags = C.GPGME_IMPORT_SIG - ImportSubKey ImportStatusFlags = C.GPGME_IMPORT_SUBKEY - ImportSecret ImportStatusFlags = C.GPGME_IMPORT_SECRET -) - -type ImportStatus struct { - Fingerprint string - Result error - Status ImportStatusFlags -} - -type ImportResult struct { - Considered int - NoUserID int - Imported int - ImportedRSA int - Unchanged int - NewUserIDs int - NewSubKeys int - NewSignatures int - NewRevocations int - SecretRead int - SecretImported int - SecretUnchanged int - NotImported int - Imports []ImportStatus -} - -func (c *Context) Import(keyData *Data) (*ImportResult, error) { - err := handleError(C.gpgme_op_import(c.ctx, keyData.dh)) - if err != nil { - return nil, err - } - res := C.gpgme_op_import_result(c.ctx) - imports := []ImportStatus{} - for s := res.imports; s != nil; s = s.next { - imports = append(imports, ImportStatus{ - Fingerprint: C.GoString(s.fpr), - Result: handleError(s.result), - Status: ImportStatusFlags(s.status), - }) - } - return &ImportResult{ - Considered: int(res.considered), - NoUserID: int(res.no_user_id), - Imported: int(res.imported), - ImportedRSA: int(res.imported_rsa), - Unchanged: int(res.unchanged), - NewUserIDs: int(res.new_user_ids), - NewSubKeys: int(res.new_sub_keys), - NewSignatures: int(res.new_signatures), - NewRevocations: int(res.new_revocations), - SecretRead: int(res.secret_read), - SecretImported: int(res.secret_imported), - SecretUnchanged: int(res.secret_unchanged), - NotImported: int(res.not_imported), - Imports: imports, - }, nil -} - -type Key struct { - k C.gpgme_key_t -} - -func newKey() *Key { - k := &Key{} - runtime.SetFinalizer(k, (*Key).Release) - return k -} - -func (k *Key) Release() { - C.gpgme_key_release(k.k) - k.k = nil -} - -func (k *Key) Revoked() bool { - return C.key_revoked(k.k) != 0 -} - -func (k *Key) Expired() bool { - return C.key_expired(k.k) != 0 -} - -func (k *Key) Disabled() bool { - return C.key_disabled(k.k) != 0 -} - -func (k *Key) Invalid() bool { - return C.key_invalid(k.k) != 0 -} - -func (k *Key) CanEncrypt() bool { - return C.key_can_encrypt(k.k) != 0 -} - -func (k *Key) CanSign() bool { - return C.key_can_sign(k.k) != 0 -} - -func (k *Key) CanCertify() bool { - return C.key_can_certify(k.k) != 0 -} - -func (k *Key) Secret() bool { - return C.key_secret(k.k) != 0 -} - -func (k *Key) CanAuthenticate() bool { - return C.key_can_authenticate(k.k) != 0 -} - -func (k *Key) IsQualified() bool { - return C.key_is_qualified(k.k) != 0 -} - -func (k *Key) Protocol() Protocol { - return Protocol(k.k.protocol) -} - -func (k *Key) IssuerSerial() string { - return C.GoString(k.k.issuer_serial) -} - -func (k *Key) IssuerName() string { - return C.GoString(k.k.issuer_name) -} - -func (k *Key) ChainID() string { - return C.GoString(k.k.chain_id) -} - -func (k *Key) OwnerTrust() Validity { - return Validity(k.k.owner_trust) -} - -func (k *Key) SubKeys() *SubKey { - if k.k.subkeys == nil { - return nil - } - return &SubKey{k: k.k.subkeys, parent: k} -} - -func (k *Key) UserIDs() *UserID { - if k.k.uids == nil { - return nil - } - return &UserID{u: k.k.uids, parent: k} -} - -func (k *Key) KeyListMode() KeyListMode { - return KeyListMode(k.k.keylist_mode) -} - -type SubKey struct { - k C.gpgme_subkey_t - parent *Key // make sure the key is not released when we have a reference to a subkey -} - -func (k *SubKey) Next() *SubKey { - if k.k.next == nil { - return nil - } - return &SubKey{k: k.k.next, parent: k.parent} -} - -func (k *SubKey) Revoked() bool { - return C.subkey_revoked(k.k) != 0 -} - -func (k *SubKey) Expired() bool { - return C.subkey_expired(k.k) != 0 -} - -func (k *SubKey) Disabled() bool { - return C.subkey_disabled(k.k) != 0 -} - -func (k *SubKey) Invalid() bool { - return C.subkey_invalid(k.k) != 0 -} - -func (k *SubKey) Secret() bool { - return C.subkey_secret(k.k) != 0 -} - -func (k *SubKey) KeyID() string { - return C.GoString(k.k.keyid) -} - -func (k *SubKey) Fingerprint() string { - return C.GoString(k.k.fpr) -} - -func (k *SubKey) Created() time.Time { - if k.k.timestamp <= 0 { - return time.Time{} - } - return time.Unix(int64(k.k.timestamp), 0) -} - -func (k *SubKey) Expires() time.Time { - if k.k.expires <= 0 { - return time.Time{} - } - return time.Unix(int64(k.k.expires), 0) -} - -func (k *SubKey) CardNumber() string { - return C.GoString(k.k.card_number) -} - -type UserID struct { - u C.gpgme_user_id_t - parent *Key // make sure the key is not released when we have a reference to a user ID -} - -func (u *UserID) Next() *UserID { - if u.u.next == nil { - return nil - } - return &UserID{u: u.u.next, parent: u.parent} -} - -func (u *UserID) Revoked() bool { - return C.uid_revoked(u.u) != 0 -} - -func (u *UserID) Invalid() bool { - return C.uid_invalid(u.u) != 0 -} - -func (u *UserID) Validity() Validity { - return Validity(u.u.validity) -} - -func (u *UserID) UID() string { - return C.GoString(u.u.uid) -} - -func (u *UserID) Name() string { - return C.GoString(u.u.name) -} - -func (u *UserID) Comment() string { - return C.GoString(u.u.comment) -} - -func (u *UserID) Email() string { - return C.GoString(u.u.email) -} - -// This is somewhat of a horrible hack. We need to unset GPG_AGENT_INFO so that gpgme does not pass --use-agent to GPG. -// os.Unsetenv should be enough, but that only calls the underlying C library (which gpgme uses) if cgo is involved -// - and cgo can't be used in tests. So, provide this helper for test initialization. -func unsetenvGPGAgentInfo() { - v := C.CString("GPG_AGENT_INFO") - defer C.free(unsafe.Pointer(v)) - C.unsetenv(v) -} diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE deleted file mode 100644 index f090cb42f3..0000000000 --- a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Simon Eskildsen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/codegangsta/cli/LICENSE b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/codegangsta/cli/LICENSE deleted file mode 100644 index 5515ccfb71..0000000000 --- a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/codegangsta/cli/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -Copyright (C) 2013 Jeremy Saenz -All Rights Reserved. - -MIT LICENSE - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/coreos/go-systemd/LICENSE b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/coreos/go-systemd/LICENSE deleted file mode 100644 index 37ec93a14f..0000000000 --- a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/coreos/go-systemd/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/LICENSE b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/LICENSE deleted file mode 100644 index c7a3f0cfd4..0000000000 --- a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2013-2015 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/NOTICE b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/NOTICE deleted file mode 100644 index 6e6f469ab9..0000000000 --- a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/NOTICE +++ /dev/null @@ -1,19 +0,0 @@ -Docker -Copyright 2012-2015 Docker, Inc. - -This product includes software developed at Docker, Inc. (https://www.docker.com). - -This product contains software (https://github.com/kr/pty) developed -by Keith Rarick, licensed under the MIT License. - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - -See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/contrib/syntax/vim/LICENSE b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/contrib/syntax/vim/LICENSE deleted file mode 100644 index e67cdabd22..0000000000 --- a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/contrib/syntax/vim/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2013 Honza Pokorny -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/docs/project/images/red_notice.png b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/docs/project/images/red_notice.png deleted file mode 100644 index 8839723a37..0000000000 Binary files a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/docs/project/images/red_notice.png and /dev/null differ diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/LICENSE b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/LICENSE deleted file mode 100644 index ac74d8f049..0000000000 --- a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2014-2015 The Docker & Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.APACHE b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.APACHE deleted file mode 100644 index 9e4bd4dbee..0000000000 --- a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.APACHE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2014-2015 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.BSD b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.BSD deleted file mode 100644 index ac74d8f049..0000000000 --- a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.BSD +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2014-2015 The Docker & Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/go-units/LICENSE b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/go-units/LICENSE deleted file mode 100644 index b55b37bc31..0000000000 --- a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/go-units/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/golang/protobuf/LICENSE b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/golang/protobuf/LICENSE deleted file mode 100644 index 1b1b1921ef..0000000000 --- a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/golang/protobuf/LICENSE +++ /dev/null @@ -1,31 +0,0 @@ -Go support for Protocol Buffers - Google's data interchange format - -Copyright 2010 The Go Authors. All rights reserved. -https://github.com/golang/protobuf - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/pquerna/ffjson/LICENSE b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/pquerna/ffjson/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/pquerna/ffjson/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/pquerna/ffjson/NOTICE b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/pquerna/ffjson/NOTICE deleted file mode 100644 index 405a49618b..0000000000 --- a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/pquerna/ffjson/NOTICE +++ /dev/null @@ -1,8 +0,0 @@ -ffjson -Copyright (c) 2014, Paul Querna - -This product includes software developed by -Paul Querna (http://paul.querna.org/). - -Portions of this software were developed as -part of Go, Copyright (c) 2012 The Go Authors. \ No newline at end of file diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/seccomp/libseccomp-golang/LICENSE b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/seccomp/libseccomp-golang/LICENSE deleted file mode 100644 index 81cf60de29..0000000000 --- a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/seccomp/libseccomp-golang/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2015 Matthew Heon -Copyright (c) 2015 Paul Moore -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: -- Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. -- Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/syndtr/gocapability/LICENSE b/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/syndtr/gocapability/LICENSE deleted file mode 100644 index 80dd96de77..0000000000 --- a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/syndtr/gocapability/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -Copyright 2013 Suryandaru Triandana -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go new file mode 100644 index 0000000000..25ff515893 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go @@ -0,0 +1,64 @@ +// +build linux + +package cgroups + +import ( + "fmt" + + "github.com/opencontainers/runc/libcontainer/configs" +) + +type Manager interface { + // Applies cgroup configuration to the process with the specified pid + Apply(pid int) error + + // Returns the PIDs inside the cgroup set + GetPids() ([]int, error) + + // Returns the PIDs inside the cgroup set & all sub-cgroups + GetAllPids() ([]int, error) + + // Returns statistics for the cgroup set + GetStats() (*Stats, error) + + // Toggles the freezer cgroup according with specified state + Freeze(state configs.FreezerState) error + + // Destroys the cgroup set + Destroy() error + + // The option func SystemdCgroups() and Cgroupfs() require following attributes: + // Paths map[string]string + // Cgroups *configs.Cgroup + // Paths maps cgroup subsystem to path at which it is mounted. + // Cgroups specifies specific cgroup settings for the various subsystems + + // Returns cgroup paths to save in a state file and to be able to + // restore the object later. + GetPaths() map[string]string + + // Sets the cgroup as configured. + Set(container *configs.Config) error +} + +type NotFoundError struct { + Subsystem string +} + +func (e *NotFoundError) Error() string { + return fmt.Sprintf("mountpoint for %s not found", e.Subsystem) +} + +func NewNotFoundError(sub string) error { + return &NotFoundError{ + Subsystem: sub, + } +} + +func IsNotFound(err error) bool { + if err == nil { + return false + } + _, ok := err.(*NotFoundError) + return ok +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_unsupported.go new file mode 100644 index 0000000000..278d507e28 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_unsupported.go @@ -0,0 +1,3 @@ +// +build !linux + +package cgroups diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go new file mode 100644 index 0000000000..8eeedc55b0 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/stats.go @@ -0,0 +1,108 @@ +// +build linux + +package cgroups + +type ThrottlingData struct { + // Number of periods with throttling active + Periods uint64 `json:"periods,omitempty"` + // Number of periods when the container hit its throttling limit. + ThrottledPeriods uint64 `json:"throttled_periods,omitempty"` + // Aggregate time the container was throttled for in nanoseconds. + ThrottledTime uint64 `json:"throttled_time,omitempty"` +} + +// CpuUsage denotes the usage of a CPU. +// All CPU stats are aggregate since container inception. +type CpuUsage struct { + // Total CPU time consumed. + // Units: nanoseconds. + TotalUsage uint64 `json:"total_usage,omitempty"` + // Total CPU time consumed per core. + // Units: nanoseconds. + PercpuUsage []uint64 `json:"percpu_usage,omitempty"` + // Time spent by tasks of the cgroup in kernel mode. + // Units: nanoseconds. + UsageInKernelmode uint64 `json:"usage_in_kernelmode"` + // Time spent by tasks of the cgroup in user mode. + // Units: nanoseconds. + UsageInUsermode uint64 `json:"usage_in_usermode"` +} + +type CpuStats struct { + CpuUsage CpuUsage `json:"cpu_usage,omitempty"` + ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` +} + +type MemoryData struct { + Usage uint64 `json:"usage,omitempty"` + MaxUsage uint64 `json:"max_usage,omitempty"` + Failcnt uint64 `json:"failcnt"` + Limit uint64 `json:"limit"` +} + +type MemoryStats struct { + // memory used for cache + Cache uint64 `json:"cache,omitempty"` + // usage of memory + Usage MemoryData `json:"usage,omitempty"` + // usage of memory + swap + SwapUsage MemoryData `json:"swap_usage,omitempty"` + // usage of kernel memory + KernelUsage MemoryData `json:"kernel_usage,omitempty"` + // usage of kernel TCP memory + KernelTCPUsage MemoryData `json:"kernel_tcp_usage,omitempty"` + // if true, memory usage is accounted for throughout a hierarchy of cgroups. + UseHierarchy bool `json:"use_hierarchy"` + + Stats map[string]uint64 `json:"stats,omitempty"` +} + +type PidsStats struct { + // number of pids in the cgroup + Current uint64 `json:"current,omitempty"` + // active pids hard limit + Limit uint64 `json:"limit,omitempty"` +} + +type BlkioStatEntry struct { + Major uint64 `json:"major,omitempty"` + Minor uint64 `json:"minor,omitempty"` + Op string `json:"op,omitempty"` + Value uint64 `json:"value,omitempty"` +} + +type BlkioStats struct { + // number of bytes tranferred to and from the block device + IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive,omitempty"` + IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive,omitempty"` + IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive,omitempty"` + IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive,omitempty"` + IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive,omitempty"` + IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive,omitempty"` + IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive,omitempty"` + SectorsRecursive []BlkioStatEntry `json:"sectors_recursive,omitempty"` +} + +type HugetlbStats struct { + // current res_counter usage for hugetlb + Usage uint64 `json:"usage,omitempty"` + // maximum usage ever recorded. + MaxUsage uint64 `json:"max_usage,omitempty"` + // number of times hugetlb usage allocation failure. + Failcnt uint64 `json:"failcnt"` +} + +type Stats struct { + CpuStats CpuStats `json:"cpu_stats,omitempty"` + MemoryStats MemoryStats `json:"memory_stats,omitempty"` + PidsStats PidsStats `json:"pids_stats,omitempty"` + BlkioStats BlkioStats `json:"blkio_stats,omitempty"` + // the map is in the format "size of hugepage: stats of the hugepage" + HugetlbStats map[string]HugetlbStats `json:"hugetlb_stats,omitempty"` +} + +func NewStats() *Stats { + memoryStats := MemoryStats{Stats: make(map[string]uint64)} + hugetlbStats := make(map[string]HugetlbStats) + return &Stats{MemoryStats: memoryStats, HugetlbStats: hugetlbStats} +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go new file mode 100644 index 0000000000..7c995efee5 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/cgroups/utils.go @@ -0,0 +1,462 @@ +// +build linux + +package cgroups + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/docker/go-units" +) + +const ( + cgroupNamePrefix = "name=" + CgroupProcesses = "cgroup.procs" +) + +// https://www.kernel.org/doc/Documentation/cgroup-v1/cgroups.txt +func FindCgroupMountpoint(subsystem string) (string, error) { + mnt, _, err := FindCgroupMountpointAndRoot(subsystem) + return mnt, err +} + +func FindCgroupMountpointAndRoot(subsystem string) (string, string, error) { + // We are not using mount.GetMounts() because it's super-inefficient, + // parsing it directly sped up x10 times because of not using Sscanf. + // It was one of two major performance drawbacks in container start. + if !isSubsystemAvailable(subsystem) { + return "", "", NewNotFoundError(subsystem) + } + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return "", "", err + } + defer f.Close() + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + txt := scanner.Text() + fields := strings.Split(txt, " ") + for _, opt := range strings.Split(fields[len(fields)-1], ",") { + if opt == subsystem { + return fields[4], fields[3], nil + } + } + } + if err := scanner.Err(); err != nil { + return "", "", err + } + + return "", "", NewNotFoundError(subsystem) +} + +func isSubsystemAvailable(subsystem string) bool { + cgroups, err := ParseCgroupFile("/proc/self/cgroup") + if err != nil { + return false + } + _, avail := cgroups[subsystem] + return avail +} + +func GetClosestMountpointAncestor(dir, mountinfo string) string { + deepestMountPoint := "" + for _, mountInfoEntry := range strings.Split(mountinfo, "\n") { + mountInfoParts := strings.Fields(mountInfoEntry) + if len(mountInfoParts) < 5 { + continue + } + mountPoint := mountInfoParts[4] + if strings.HasPrefix(mountPoint, deepestMountPoint) && strings.HasPrefix(dir, mountPoint) { + deepestMountPoint = mountPoint + } + } + return deepestMountPoint +} + +func FindCgroupMountpointDir() (string, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return "", err + } + defer f.Close() + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + text := scanner.Text() + fields := strings.Split(text, " ") + // Safe as mountinfo encodes mountpoints with spaces as \040. + index := strings.Index(text, " - ") + postSeparatorFields := strings.Fields(text[index+3:]) + numPostFields := len(postSeparatorFields) + + // This is an error as we can't detect if the mount is for "cgroup" + if numPostFields == 0 { + return "", fmt.Errorf("Found no fields post '-' in %q", text) + } + + if postSeparatorFields[0] == "cgroup" { + // Check that the mount is properly formated. + if numPostFields < 3 { + return "", fmt.Errorf("Error found less than 3 fields post '-' in %q", text) + } + + return filepath.Dir(fields[4]), nil + } + } + if err := scanner.Err(); err != nil { + return "", err + } + + return "", NewNotFoundError("cgroup") +} + +type Mount struct { + Mountpoint string + Root string + Subsystems []string +} + +func (m Mount) GetOwnCgroup(cgroups map[string]string) (string, error) { + if len(m.Subsystems) == 0 { + return "", fmt.Errorf("no subsystem for mount") + } + + return getControllerPath(m.Subsystems[0], cgroups) +} + +func getCgroupMountsHelper(ss map[string]bool, mi io.Reader, all bool) ([]Mount, error) { + res := make([]Mount, 0, len(ss)) + scanner := bufio.NewScanner(mi) + numFound := 0 + for scanner.Scan() && numFound < len(ss) { + txt := scanner.Text() + sepIdx := strings.Index(txt, " - ") + if sepIdx == -1 { + return nil, fmt.Errorf("invalid mountinfo format") + } + if txt[sepIdx+3:sepIdx+10] == "cgroup2" || txt[sepIdx+3:sepIdx+9] != "cgroup" { + continue + } + fields := strings.Split(txt, " ") + m := Mount{ + Mountpoint: fields[4], + Root: fields[3], + } + for _, opt := range strings.Split(fields[len(fields)-1], ",") { + if !ss[opt] { + continue + } + if strings.HasPrefix(opt, cgroupNamePrefix) { + m.Subsystems = append(m.Subsystems, opt[len(cgroupNamePrefix):]) + } else { + m.Subsystems = append(m.Subsystems, opt) + } + if !all { + numFound++ + } + } + res = append(res, m) + } + if err := scanner.Err(); err != nil { + return nil, err + } + return res, nil +} + +// GetCgroupMounts returns the mounts for the cgroup subsystems. +// all indicates whether to return just the first instance or all the mounts. +func GetCgroupMounts(all bool) ([]Mount, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return nil, err + } + defer f.Close() + + allSubsystems, err := ParseCgroupFile("/proc/self/cgroup") + if err != nil { + return nil, err + } + + allMap := make(map[string]bool) + for s := range allSubsystems { + allMap[s] = true + } + return getCgroupMountsHelper(allMap, f, all) +} + +// GetAllSubsystems returns all the cgroup subsystems supported by the kernel +func GetAllSubsystems() ([]string, error) { + f, err := os.Open("/proc/cgroups") + if err != nil { + return nil, err + } + defer f.Close() + + subsystems := []string{} + + s := bufio.NewScanner(f) + for s.Scan() { + text := s.Text() + if text[0] != '#' { + parts := strings.Fields(text) + if len(parts) >= 4 && parts[3] != "0" { + subsystems = append(subsystems, parts[0]) + } + } + } + if err := s.Err(); err != nil { + return nil, err + } + return subsystems, nil +} + +// GetOwnCgroup returns the relative path to the cgroup docker is running in. +func GetOwnCgroup(subsystem string) (string, error) { + cgroups, err := ParseCgroupFile("/proc/self/cgroup") + if err != nil { + return "", err + } + + return getControllerPath(subsystem, cgroups) +} + +func GetOwnCgroupPath(subsystem string) (string, error) { + cgroup, err := GetOwnCgroup(subsystem) + if err != nil { + return "", err + } + + return getCgroupPathHelper(subsystem, cgroup) +} + +func GetInitCgroup(subsystem string) (string, error) { + cgroups, err := ParseCgroupFile("/proc/1/cgroup") + if err != nil { + return "", err + } + + return getControllerPath(subsystem, cgroups) +} + +func GetInitCgroupPath(subsystem string) (string, error) { + cgroup, err := GetInitCgroup(subsystem) + if err != nil { + return "", err + } + + return getCgroupPathHelper(subsystem, cgroup) +} + +func getCgroupPathHelper(subsystem, cgroup string) (string, error) { + mnt, root, err := FindCgroupMountpointAndRoot(subsystem) + if err != nil { + return "", err + } + + // This is needed for nested containers, because in /proc/self/cgroup we + // see pathes from host, which don't exist in container. + relCgroup, err := filepath.Rel(root, cgroup) + if err != nil { + return "", err + } + + return filepath.Join(mnt, relCgroup), nil +} + +func readProcsFile(dir string) ([]int, error) { + f, err := os.Open(filepath.Join(dir, CgroupProcesses)) + if err != nil { + return nil, err + } + defer f.Close() + + var ( + s = bufio.NewScanner(f) + out = []int{} + ) + + for s.Scan() { + if t := s.Text(); t != "" { + pid, err := strconv.Atoi(t) + if err != nil { + return nil, err + } + out = append(out, pid) + } + } + return out, nil +} + +// ParseCgroupFile parses the given cgroup file, typically from +// /proc//cgroup, into a map of subgroups to cgroup names. +func ParseCgroupFile(path string) (map[string]string, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + return parseCgroupFromReader(f) +} + +// helper function for ParseCgroupFile to make testing easier +func parseCgroupFromReader(r io.Reader) (map[string]string, error) { + s := bufio.NewScanner(r) + cgroups := make(map[string]string) + + for s.Scan() { + text := s.Text() + // from cgroups(7): + // /proc/[pid]/cgroup + // ... + // For each cgroup hierarchy ... there is one entry + // containing three colon-separated fields of the form: + // hierarchy-ID:subsystem-list:cgroup-path + parts := strings.SplitN(text, ":", 3) + if len(parts) < 3 { + return nil, fmt.Errorf("invalid cgroup entry: must contain at least two colons: %v", text) + } + + for _, subs := range strings.Split(parts[1], ",") { + cgroups[subs] = parts[2] + } + } + if err := s.Err(); err != nil { + return nil, err + } + + return cgroups, nil +} + +func getControllerPath(subsystem string, cgroups map[string]string) (string, error) { + + if p, ok := cgroups[subsystem]; ok { + return p, nil + } + + if p, ok := cgroups[cgroupNamePrefix+subsystem]; ok { + return p, nil + } + + return "", NewNotFoundError(subsystem) +} + +func PathExists(path string) bool { + if _, err := os.Stat(path); err != nil { + return false + } + return true +} + +func EnterPid(cgroupPaths map[string]string, pid int) error { + for _, path := range cgroupPaths { + if PathExists(path) { + if err := WriteCgroupProc(path, pid); err != nil { + return err + } + } + } + return nil +} + +// RemovePaths iterates over the provided paths removing them. +// We trying to remove all paths five times with increasing delay between tries. +// If after all there are not removed cgroups - appropriate error will be +// returned. +func RemovePaths(paths map[string]string) (err error) { + delay := 10 * time.Millisecond + for i := 0; i < 5; i++ { + if i != 0 { + time.Sleep(delay) + delay *= 2 + } + for s, p := range paths { + os.RemoveAll(p) + // TODO: here probably should be logging + _, err := os.Stat(p) + // We need this strange way of checking cgroups existence because + // RemoveAll almost always returns error, even on already removed + // cgroups + if os.IsNotExist(err) { + delete(paths, s) + } + } + if len(paths) == 0 { + return nil + } + } + return fmt.Errorf("Failed to remove paths: %v", paths) +} + +func GetHugePageSize() ([]string, error) { + var pageSizes []string + sizeList := []string{"B", "kB", "MB", "GB", "TB", "PB"} + files, err := ioutil.ReadDir("/sys/kernel/mm/hugepages") + if err != nil { + return pageSizes, err + } + for _, st := range files { + nameArray := strings.Split(st.Name(), "-") + pageSize, err := units.RAMInBytes(nameArray[1]) + if err != nil { + return []string{}, err + } + sizeString := units.CustomSize("%g%s", float64(pageSize), 1024.0, sizeList) + pageSizes = append(pageSizes, sizeString) + } + + return pageSizes, nil +} + +// GetPids returns all pids, that were added to cgroup at path. +func GetPids(path string) ([]int, error) { + return readProcsFile(path) +} + +// GetAllPids returns all pids, that were added to cgroup at path and to all its +// subcgroups. +func GetAllPids(path string) ([]int, error) { + var pids []int + // collect pids from all sub-cgroups + err := filepath.Walk(path, func(p string, info os.FileInfo, iErr error) error { + dir, file := filepath.Split(p) + if file != CgroupProcesses { + return nil + } + if iErr != nil { + return iErr + } + cPids, err := readProcsFile(dir) + if err != nil { + return err + } + pids = append(pids, cPids...) + return nil + }) + return pids, err +} + +// WriteCgroupProc writes the specified pid into the cgroup's cgroup.procs file +func WriteCgroupProc(dir string, pid int) error { + // Normally dir should not be empty, one case is that cgroup subsystem + // is not mounted, we will get empty dir, and we want it fail here. + if dir == "" { + return fmt.Errorf("no such directory for %s", CgroupProcesses) + } + + // Dont attach any pid to the cgroup if -1 is specified as a pid + if pid != -1 { + if err := ioutil.WriteFile(filepath.Join(dir, CgroupProcesses), []byte(strconv.Itoa(pid)), 0700); err != nil { + return fmt.Errorf("failed to write %v to %v: %v", pid, CgroupProcesses, err) + } + } + return nil +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go new file mode 100644 index 0000000000..e0f3ca1653 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go @@ -0,0 +1,61 @@ +package configs + +import "fmt" + +// blockIODevice holds major:minor format supported in blkio cgroup +type blockIODevice struct { + // Major is the device's major number + Major int64 `json:"major"` + // Minor is the device's minor number + Minor int64 `json:"minor"` +} + +// WeightDevice struct holds a `major:minor weight`|`major:minor leaf_weight` pair +type WeightDevice struct { + blockIODevice + // Weight is the bandwidth rate for the device, range is from 10 to 1000 + Weight uint16 `json:"weight"` + // LeafWeight is the bandwidth rate for the device while competing with the cgroup's child cgroups, range is from 10 to 1000, cfq scheduler only + LeafWeight uint16 `json:"leafWeight"` +} + +// NewWeightDevice returns a configured WeightDevice pointer +func NewWeightDevice(major, minor int64, weight, leafWeight uint16) *WeightDevice { + wd := &WeightDevice{} + wd.Major = major + wd.Minor = minor + wd.Weight = weight + wd.LeafWeight = leafWeight + return wd +} + +// WeightString formats the struct to be writable to the cgroup specific file +func (wd *WeightDevice) WeightString() string { + return fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, wd.Weight) +} + +// LeafWeightString formats the struct to be writable to the cgroup specific file +func (wd *WeightDevice) LeafWeightString() string { + return fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, wd.LeafWeight) +} + +// ThrottleDevice struct holds a `major:minor rate_per_second` pair +type ThrottleDevice struct { + blockIODevice + // Rate is the IO rate limit per cgroup per device + Rate uint64 `json:"rate"` +} + +// NewThrottleDevice returns a configured ThrottleDevice pointer +func NewThrottleDevice(major, minor int64, rate uint64) *ThrottleDevice { + td := &ThrottleDevice{} + td.Major = major + td.Minor = minor + td.Rate = rate + return td +} + +// String formats the struct to be writable to the cgroup specific file +func (td *ThrottleDevice) String() string { + return fmt.Sprintf("%d:%d %d", td.Major, td.Minor, td.Rate) +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go new file mode 100644 index 0000000000..e15a662f52 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_linux.go @@ -0,0 +1,122 @@ +package configs + +type FreezerState string + +const ( + Undefined FreezerState = "" + Frozen FreezerState = "FROZEN" + Thawed FreezerState = "THAWED" +) + +type Cgroup struct { + // Deprecated, use Path instead + Name string `json:"name,omitempty"` + + // name of parent of cgroup or slice + // Deprecated, use Path instead + Parent string `json:"parent,omitempty"` + + // Path specifies the path to cgroups that are created and/or joined by the container. + // The path is assumed to be relative to the host system cgroup mountpoint. + Path string `json:"path"` + + // ScopePrefix describes prefix for the scope name + ScopePrefix string `json:"scope_prefix"` + + // Paths represent the absolute cgroups paths to join. + // This takes precedence over Path. + Paths map[string]string + + // Resources contains various cgroups settings to apply + *Resources +} + +type Resources struct { + // If this is true allow access to any kind of device within the container. If false, allow access only to devices explicitly listed in the allowed_devices list. + // Deprecated + AllowAllDevices *bool `json:"allow_all_devices,omitempty"` + // Deprecated + AllowedDevices []*Device `json:"allowed_devices,omitempty"` + // Deprecated + DeniedDevices []*Device `json:"denied_devices,omitempty"` + + Devices []*Device `json:"devices"` + + // Memory limit (in bytes) + Memory int64 `json:"memory"` + + // Memory reservation or soft_limit (in bytes) + MemoryReservation int64 `json:"memory_reservation"` + + // Total memory usage (memory + swap); set `-1` to enable unlimited swap + MemorySwap int64 `json:"memory_swap"` + + // Kernel memory limit (in bytes) + KernelMemory int64 `json:"kernel_memory"` + + // Kernel memory limit for TCP use (in bytes) + KernelMemoryTCP int64 `json:"kernel_memory_tcp"` + + // CPU shares (relative weight vs. other containers) + CpuShares uint64 `json:"cpu_shares"` + + // CPU hardcap limit (in usecs). Allowed cpu time in a given period. + CpuQuota int64 `json:"cpu_quota"` + + // CPU period to be used for hardcapping (in usecs). 0 to use system default. + CpuPeriod uint64 `json:"cpu_period"` + + // How many time CPU will use in realtime scheduling (in usecs). + CpuRtRuntime int64 `json:"cpu_rt_quota"` + + // CPU period to be used for realtime scheduling (in usecs). + CpuRtPeriod uint64 `json:"cpu_rt_period"` + + // CPU to use + CpusetCpus string `json:"cpuset_cpus"` + + // MEM to use + CpusetMems string `json:"cpuset_mems"` + + // Process limit; set <= `0' to disable limit. + PidsLimit int64 `json:"pids_limit"` + + // Specifies per cgroup weight, range is from 10 to 1000. + BlkioWeight uint16 `json:"blkio_weight"` + + // Specifies tasks' weight in the given cgroup while competing with the cgroup's child cgroups, range is from 10 to 1000, cfq scheduler only + BlkioLeafWeight uint16 `json:"blkio_leaf_weight"` + + // Weight per cgroup per device, can override BlkioWeight. + BlkioWeightDevice []*WeightDevice `json:"blkio_weight_device"` + + // IO read rate limit per cgroup per device, bytes per second. + BlkioThrottleReadBpsDevice []*ThrottleDevice `json:"blkio_throttle_read_bps_device"` + + // IO write rate limit per cgroup per device, bytes per second. + BlkioThrottleWriteBpsDevice []*ThrottleDevice `json:"blkio_throttle_write_bps_device"` + + // IO read rate limit per cgroup per device, IO per second. + BlkioThrottleReadIOPSDevice []*ThrottleDevice `json:"blkio_throttle_read_iops_device"` + + // IO write rate limit per cgroup per device, IO per second. + BlkioThrottleWriteIOPSDevice []*ThrottleDevice `json:"blkio_throttle_write_iops_device"` + + // set the freeze value for the process + Freezer FreezerState `json:"freezer"` + + // Hugetlb limit (in bytes) + HugetlbLimit []*HugepageLimit `json:"hugetlb_limit"` + + // Whether to disable OOM Killer + OomKillDisable bool `json:"oom_kill_disable"` + + // Tuning swappiness behaviour per cgroup + MemorySwappiness *uint64 `json:"memory_swappiness"` + + // Set priority of network traffic for container + NetPrioIfpriomap []*IfPrioMap `json:"net_prio_ifpriomap"` + + // Set class identifier for container's network packets + NetClsClassid uint32 `json:"net_cls_classid_u"` +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_windows.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_windows.go new file mode 100644 index 0000000000..d74847b0db --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/cgroup_windows.go @@ -0,0 +1,6 @@ +package configs + +// TODO Windows: This can ultimately be entirely factored out on Windows as +// cgroups are a Unix-specific construct. +type Cgroup struct { +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go new file mode 100644 index 0000000000..3cae4fd8d9 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go @@ -0,0 +1,348 @@ +package configs + +import ( + "bytes" + "encoding/json" + "fmt" + "os/exec" + "time" + + "github.com/opencontainers/runtime-spec/specs-go" + + "github.com/sirupsen/logrus" +) + +type Rlimit struct { + Type int `json:"type"` + Hard uint64 `json:"hard"` + Soft uint64 `json:"soft"` +} + +// IDMap represents UID/GID Mappings for User Namespaces. +type IDMap struct { + ContainerID int `json:"container_id"` + HostID int `json:"host_id"` + Size int `json:"size"` +} + +// Seccomp represents syscall restrictions +// By default, only the native architecture of the kernel is allowed to be used +// for syscalls. Additional architectures can be added by specifying them in +// Architectures. +type Seccomp struct { + DefaultAction Action `json:"default_action"` + Architectures []string `json:"architectures"` + Syscalls []*Syscall `json:"syscalls"` +} + +// Action is taken upon rule match in Seccomp +type Action int + +const ( + Kill Action = iota + 1 + Errno + Trap + Allow + Trace +) + +// Operator is a comparison operator to be used when matching syscall arguments in Seccomp +type Operator int + +const ( + EqualTo Operator = iota + 1 + NotEqualTo + GreaterThan + GreaterThanOrEqualTo + LessThan + LessThanOrEqualTo + MaskEqualTo +) + +// Arg is a rule to match a specific syscall argument in Seccomp +type Arg struct { + Index uint `json:"index"` + Value uint64 `json:"value"` + ValueTwo uint64 `json:"value_two"` + Op Operator `json:"op"` +} + +// Syscall is a rule to match a syscall in Seccomp +type Syscall struct { + Name string `json:"name"` + Action Action `json:"action"` + Args []*Arg `json:"args"` +} + +// TODO Windows. Many of these fields should be factored out into those parts +// which are common across platforms, and those which are platform specific. + +// Config defines configuration options for executing a process inside a contained environment. +type Config struct { + // NoPivotRoot will use MS_MOVE and a chroot to jail the process into the container's rootfs + // This is a common option when the container is running in ramdisk + NoPivotRoot bool `json:"no_pivot_root"` + + // ParentDeathSignal specifies the signal that is sent to the container's process in the case + // that the parent process dies. + ParentDeathSignal int `json:"parent_death_signal"` + + // Path to a directory containing the container's root filesystem. + Rootfs string `json:"rootfs"` + + // Readonlyfs will remount the container's rootfs as readonly where only externally mounted + // bind mounts are writtable. + Readonlyfs bool `json:"readonlyfs"` + + // Specifies the mount propagation flags to be applied to /. + RootPropagation int `json:"rootPropagation"` + + // Mounts specify additional source and destination paths that will be mounted inside the container's + // rootfs and mount namespace if specified + Mounts []*Mount `json:"mounts"` + + // The device nodes that should be automatically created within the container upon container start. Note, make sure that the node is marked as allowed in the cgroup as well! + Devices []*Device `json:"devices"` + + MountLabel string `json:"mount_label"` + + // Hostname optionally sets the container's hostname if provided + Hostname string `json:"hostname"` + + // Namespaces specifies the container's namespaces that it should setup when cloning the init process + // If a namespace is not provided that namespace is shared from the container's parent process + Namespaces Namespaces `json:"namespaces"` + + // Capabilities specify the capabilities to keep when executing the process inside the container + // All capabilities not specified will be dropped from the processes capability mask + Capabilities *Capabilities `json:"capabilities"` + + // Networks specifies the container's network setup to be created + Networks []*Network `json:"networks"` + + // Routes can be specified to create entries in the route table as the container is started + Routes []*Route `json:"routes"` + + // Cgroups specifies specific cgroup settings for the various subsystems that the container is + // placed into to limit the resources the container has available + Cgroups *Cgroup `json:"cgroups"` + + // AppArmorProfile specifies the profile to apply to the process running in the container and is + // change at the time the process is execed + AppArmorProfile string `json:"apparmor_profile,omitempty"` + + // ProcessLabel specifies the label to apply to the process running in the container. It is + // commonly used by selinux + ProcessLabel string `json:"process_label,omitempty"` + + // Rlimits specifies the resource limits, such as max open files, to set in the container + // If Rlimits are not set, the container will inherit rlimits from the parent process + Rlimits []Rlimit `json:"rlimits,omitempty"` + + // OomScoreAdj specifies the adjustment to be made by the kernel when calculating oom scores + // for a process. Valid values are between the range [-1000, '1000'], where processes with + // higher scores are preferred for being killed. + // More information about kernel oom score calculation here: https://lwn.net/Articles/317814/ + OomScoreAdj int `json:"oom_score_adj"` + + // UidMappings is an array of User ID mappings for User Namespaces + UidMappings []IDMap `json:"uid_mappings"` + + // GidMappings is an array of Group ID mappings for User Namespaces + GidMappings []IDMap `json:"gid_mappings"` + + // MaskPaths specifies paths within the container's rootfs to mask over with a bind + // mount pointing to /dev/null as to prevent reads of the file. + MaskPaths []string `json:"mask_paths"` + + // ReadonlyPaths specifies paths within the container's rootfs to remount as read-only + // so that these files prevent any writes. + ReadonlyPaths []string `json:"readonly_paths"` + + // Sysctl is a map of properties and their values. It is the equivalent of using + // sysctl -w my.property.name value in Linux. + Sysctl map[string]string `json:"sysctl"` + + // Seccomp allows actions to be taken whenever a syscall is made within the container. + // A number of rules are given, each having an action to be taken if a syscall matches it. + // A default action to be taken if no rules match is also given. + Seccomp *Seccomp `json:"seccomp"` + + // NoNewPrivileges controls whether processes in the container can gain additional privileges. + NoNewPrivileges bool `json:"no_new_privileges,omitempty"` + + // Hooks are a collection of actions to perform at various container lifecycle events. + // CommandHooks are serialized to JSON, but other hooks are not. + Hooks *Hooks + + // Version is the version of opencontainer specification that is supported. + Version string `json:"version"` + + // Labels are user defined metadata that is stored in the config and populated on the state + Labels []string `json:"labels"` + + // NoNewKeyring will not allocated a new session keyring for the container. It will use the + // callers keyring in this case. + NoNewKeyring bool `json:"no_new_keyring"` + + // Rootless specifies whether the container is a rootless container. + Rootless bool `json:"rootless"` + + // IntelRdt specifies settings for Intel RDT/CAT group that the container is placed into + // to limit the resources (e.g., L3 cache) the container has available + IntelRdt *IntelRdt `json:"intel_rdt,omitempty"` +} + +type Hooks struct { + // Prestart commands are executed after the container namespaces are created, + // but before the user supplied command is executed from init. + Prestart []Hook + + // Poststart commands are executed after the container init process starts. + Poststart []Hook + + // Poststop commands are executed after the container init process exits. + Poststop []Hook +} + +type Capabilities struct { + // Bounding is the set of capabilities checked by the kernel. + Bounding []string + // Effective is the set of capabilities checked by the kernel. + Effective []string + // Inheritable is the capabilities preserved across execve. + Inheritable []string + // Permitted is the limiting superset for effective capabilities. + Permitted []string + // Ambient is the ambient set of capabilities that are kept. + Ambient []string +} + +func (hooks *Hooks) UnmarshalJSON(b []byte) error { + var state struct { + Prestart []CommandHook + Poststart []CommandHook + Poststop []CommandHook + } + + if err := json.Unmarshal(b, &state); err != nil { + return err + } + + deserialize := func(shooks []CommandHook) (hooks []Hook) { + for _, shook := range shooks { + hooks = append(hooks, shook) + } + + return hooks + } + + hooks.Prestart = deserialize(state.Prestart) + hooks.Poststart = deserialize(state.Poststart) + hooks.Poststop = deserialize(state.Poststop) + return nil +} + +func (hooks Hooks) MarshalJSON() ([]byte, error) { + serialize := func(hooks []Hook) (serializableHooks []CommandHook) { + for _, hook := range hooks { + switch chook := hook.(type) { + case CommandHook: + serializableHooks = append(serializableHooks, chook) + default: + logrus.Warnf("cannot serialize hook of type %T, skipping", hook) + } + } + + return serializableHooks + } + + return json.Marshal(map[string]interface{}{ + "prestart": serialize(hooks.Prestart), + "poststart": serialize(hooks.Poststart), + "poststop": serialize(hooks.Poststop), + }) +} + +// HookState is the payload provided to a hook on execution. +type HookState specs.State + +type Hook interface { + // Run executes the hook with the provided state. + Run(HookState) error +} + +// NewFunctionHook will call the provided function when the hook is run. +func NewFunctionHook(f func(HookState) error) FuncHook { + return FuncHook{ + run: f, + } +} + +type FuncHook struct { + run func(HookState) error +} + +func (f FuncHook) Run(s HookState) error { + return f.run(s) +} + +type Command struct { + Path string `json:"path"` + Args []string `json:"args"` + Env []string `json:"env"` + Dir string `json:"dir"` + Timeout *time.Duration `json:"timeout"` +} + +// NewCommandHook will execute the provided command when the hook is run. +func NewCommandHook(cmd Command) CommandHook { + return CommandHook{ + Command: cmd, + } +} + +type CommandHook struct { + Command +} + +func (c Command) Run(s HookState) error { + b, err := json.Marshal(s) + if err != nil { + return err + } + var stdout, stderr bytes.Buffer + cmd := exec.Cmd{ + Path: c.Path, + Args: c.Args, + Env: c.Env, + Stdin: bytes.NewReader(b), + Stdout: &stdout, + Stderr: &stderr, + } + if err := cmd.Start(); err != nil { + return err + } + errC := make(chan error, 1) + go func() { + err := cmd.Wait() + if err != nil { + err = fmt.Errorf("error running hook: %v, stdout: %s, stderr: %s", err, stdout.String(), stderr.String()) + } + errC <- err + }() + var timerCh <-chan time.Time + if c.Timeout != nil { + timer := time.NewTimer(*c.Timeout) + defer timer.Stop() + timerCh = timer.C + } + select { + case err := <-errC: + return err + case <-timerCh: + cmd.Process.Kill() + cmd.Wait() + return fmt.Errorf("hook ran past specified timeout of %.1fs", c.Timeout.Seconds()) + } +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go new file mode 100644 index 0000000000..07da108045 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/config_linux.go @@ -0,0 +1,61 @@ +package configs + +import "fmt" + +// HostUID gets the translated uid for the process on host which could be +// different when user namespaces are enabled. +func (c Config) HostUID(containerId int) (int, error) { + if c.Namespaces.Contains(NEWUSER) { + if c.UidMappings == nil { + return -1, fmt.Errorf("User namespaces enabled, but no uid mappings found.") + } + id, found := c.hostIDFromMapping(containerId, c.UidMappings) + if !found { + return -1, fmt.Errorf("User namespaces enabled, but no user mapping found.") + } + return id, nil + } + // Return unchanged id. + return containerId, nil +} + +// HostRootUID gets the root uid for the process on host which could be non-zero +// when user namespaces are enabled. +func (c Config) HostRootUID() (int, error) { + return c.HostUID(0) +} + +// HostGID gets the translated gid for the process on host which could be +// different when user namespaces are enabled. +func (c Config) HostGID(containerId int) (int, error) { + if c.Namespaces.Contains(NEWUSER) { + if c.GidMappings == nil { + return -1, fmt.Errorf("User namespaces enabled, but no gid mappings found.") + } + id, found := c.hostIDFromMapping(containerId, c.GidMappings) + if !found { + return -1, fmt.Errorf("User namespaces enabled, but no group mapping found.") + } + return id, nil + } + // Return unchanged id. + return containerId, nil +} + +// HostRootGID gets the root gid for the process on host which could be non-zero +// when user namespaces are enabled. +func (c Config) HostRootGID() (int, error) { + return c.HostGID(0) +} + +// Utility function that gets a host ID for a container ID from user namespace map +// if that ID is present in the map. +func (c Config) hostIDFromMapping(containerID int, uMap []IDMap) (int, bool) { + for _, m := range uMap { + if (containerID >= m.ContainerID) && (containerID <= (m.ContainerID + m.Size - 1)) { + hostID := m.HostID + (containerID - m.ContainerID) + return hostID, true + } + } + return -1, false +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/device.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/device.go new file mode 100644 index 0000000000..8701bb212d --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/device.go @@ -0,0 +1,57 @@ +package configs + +import ( + "fmt" + "os" +) + +const ( + Wildcard = -1 +) + +// TODO Windows: This can be factored out in the future + +type Device struct { + // Device type, block, char, etc. + Type rune `json:"type"` + + // Path to the device. + Path string `json:"path"` + + // Major is the device's major number. + Major int64 `json:"major"` + + // Minor is the device's minor number. + Minor int64 `json:"minor"` + + // Cgroup permissions format, rwm. + Permissions string `json:"permissions"` + + // FileMode permission bits for the device. + FileMode os.FileMode `json:"file_mode"` + + // Uid of the device. + Uid uint32 `json:"uid"` + + // Gid of the device. + Gid uint32 `json:"gid"` + + // Write the file to the allowed list + Allow bool `json:"allow"` +} + +func (d *Device) CgroupString() string { + return fmt.Sprintf("%c %s:%s %s", d.Type, deviceNumberString(d.Major), deviceNumberString(d.Minor), d.Permissions) +} + +func (d *Device) Mkdev() int { + return int((d.Major << 8) | (d.Minor & 0xff) | ((d.Minor & 0xfff00) << 12)) +} + +// deviceNumberString converts the device number to a string return result. +func deviceNumberString(number int64) string { + if number == Wildcard { + return "*" + } + return fmt.Sprint(number) +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go new file mode 100644 index 0000000000..e4f423c523 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go @@ -0,0 +1,111 @@ +// +build linux + +package configs + +var ( + // DefaultSimpleDevices are devices that are to be both allowed and created. + DefaultSimpleDevices = []*Device{ + // /dev/null and zero + { + Path: "/dev/null", + Type: 'c', + Major: 1, + Minor: 3, + Permissions: "rwm", + FileMode: 0666, + }, + { + Path: "/dev/zero", + Type: 'c', + Major: 1, + Minor: 5, + Permissions: "rwm", + FileMode: 0666, + }, + + { + Path: "/dev/full", + Type: 'c', + Major: 1, + Minor: 7, + Permissions: "rwm", + FileMode: 0666, + }, + + // consoles and ttys + { + Path: "/dev/tty", + Type: 'c', + Major: 5, + Minor: 0, + Permissions: "rwm", + FileMode: 0666, + }, + + // /dev/urandom,/dev/random + { + Path: "/dev/urandom", + Type: 'c', + Major: 1, + Minor: 9, + Permissions: "rwm", + FileMode: 0666, + }, + { + Path: "/dev/random", + Type: 'c', + Major: 1, + Minor: 8, + Permissions: "rwm", + FileMode: 0666, + }, + } + DefaultAllowedDevices = append([]*Device{ + // allow mknod for any device + { + Type: 'c', + Major: Wildcard, + Minor: Wildcard, + Permissions: "m", + }, + { + Type: 'b', + Major: Wildcard, + Minor: Wildcard, + Permissions: "m", + }, + + { + Path: "/dev/console", + Type: 'c', + Major: 5, + Minor: 1, + Permissions: "rwm", + }, + // /dev/pts/ - pts namespaces are "coming soon" + { + Path: "", + Type: 'c', + Major: 136, + Minor: Wildcard, + Permissions: "rwm", + }, + { + Path: "", + Type: 'c', + Major: 5, + Minor: 2, + Permissions: "rwm", + }, + + // tuntap + { + Path: "", + Type: 'c', + Major: 10, + Minor: 200, + Permissions: "rwm", + }, + }, DefaultSimpleDevices...) + DefaultAutoCreatedDevices = append([]*Device{}, DefaultSimpleDevices...) +) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go new file mode 100644 index 0000000000..d30216380b --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go @@ -0,0 +1,9 @@ +package configs + +type HugepageLimit struct { + // which type of hugepage to limit. + Pagesize string `json:"page_size"` + + // usage limit for hugepage. + Limit uint64 `json:"limit"` +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go new file mode 100644 index 0000000000..36bd5f96a1 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go @@ -0,0 +1,7 @@ +package configs + +type IntelRdt struct { + // The schema for L3 cache id and capacity bitmask (CBM) + // Format: "L3:=;=;..." + L3CacheSchema string `json:"l3_cache_schema,omitempty"` +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go new file mode 100644 index 0000000000..9a0395eaf5 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go @@ -0,0 +1,14 @@ +package configs + +import ( + "fmt" +) + +type IfPrioMap struct { + Interface string `json:"interface"` + Priority int64 `json:"priority"` +} + +func (i *IfPrioMap) CgroupString() string { + return fmt.Sprintf("%s %d", i.Interface, i.Priority) +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go new file mode 100644 index 0000000000..670757ddb5 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/mount.go @@ -0,0 +1,39 @@ +package configs + +const ( + // EXT_COPYUP is a directive to copy up the contents of a directory when + // a tmpfs is mounted over it. + EXT_COPYUP = 1 << iota +) + +type Mount struct { + // Source path for the mount. + Source string `json:"source"` + + // Destination path for the mount inside the container. + Destination string `json:"destination"` + + // Device the mount is for. + Device string `json:"device"` + + // Mount flags. + Flags int `json:"flags"` + + // Propagation Flags + PropagationFlags []int `json:"propagation_flags"` + + // Mount data applied to the mount. + Data string `json:"data"` + + // Relabel source if set, "z" indicates shared, "Z" indicates unshared. + Relabel string `json:"relabel"` + + // Extensions are additional flags that are specific to runc. + Extensions int `json:"extensions"` + + // Optional Command to be run before Source is mounted. + PremountCmds []Command `json:"premount_cmds"` + + // Optional Command to be run after Source is mounted. + PostmountCmds []Command `json:"postmount_cmds"` +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces.go new file mode 100644 index 0000000000..a3329a31a9 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces.go @@ -0,0 +1,5 @@ +package configs + +type NamespaceType string + +type Namespaces []Namespace diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_linux.go new file mode 100644 index 0000000000..5fc171a57b --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_linux.go @@ -0,0 +1,122 @@ +package configs + +import ( + "fmt" + "os" + "sync" +) + +const ( + NEWNET NamespaceType = "NEWNET" + NEWPID NamespaceType = "NEWPID" + NEWNS NamespaceType = "NEWNS" + NEWUTS NamespaceType = "NEWUTS" + NEWIPC NamespaceType = "NEWIPC" + NEWUSER NamespaceType = "NEWUSER" +) + +var ( + nsLock sync.Mutex + supportedNamespaces = make(map[NamespaceType]bool) +) + +// NsName converts the namespace type to its filename +func NsName(ns NamespaceType) string { + switch ns { + case NEWNET: + return "net" + case NEWNS: + return "mnt" + case NEWPID: + return "pid" + case NEWIPC: + return "ipc" + case NEWUSER: + return "user" + case NEWUTS: + return "uts" + } + return "" +} + +// IsNamespaceSupported returns whether a namespace is available or +// not +func IsNamespaceSupported(ns NamespaceType) bool { + nsLock.Lock() + defer nsLock.Unlock() + supported, ok := supportedNamespaces[ns] + if ok { + return supported + } + nsFile := NsName(ns) + // if the namespace type is unknown, just return false + if nsFile == "" { + return false + } + _, err := os.Stat(fmt.Sprintf("/proc/self/ns/%s", nsFile)) + // a namespace is supported if it exists and we have permissions to read it + supported = err == nil + supportedNamespaces[ns] = supported + return supported +} + +func NamespaceTypes() []NamespaceType { + return []NamespaceType{ + NEWUSER, // Keep user NS always first, don't move it. + NEWIPC, + NEWUTS, + NEWNET, + NEWPID, + NEWNS, + } +} + +// Namespace defines configuration for each namespace. It specifies an +// alternate path that is able to be joined via setns. +type Namespace struct { + Type NamespaceType `json:"type"` + Path string `json:"path"` +} + +func (n *Namespace) GetPath(pid int) string { + return fmt.Sprintf("/proc/%d/ns/%s", pid, NsName(n.Type)) +} + +func (n *Namespaces) Remove(t NamespaceType) bool { + i := n.index(t) + if i == -1 { + return false + } + *n = append((*n)[:i], (*n)[i+1:]...) + return true +} + +func (n *Namespaces) Add(t NamespaceType, path string) { + i := n.index(t) + if i == -1 { + *n = append(*n, Namespace{Type: t, Path: path}) + return + } + (*n)[i].Path = path +} + +func (n *Namespaces) index(t NamespaceType) int { + for i, ns := range *n { + if ns.Type == t { + return i + } + } + return -1 +} + +func (n *Namespaces) Contains(t NamespaceType) bool { + return n.index(t) != -1 +} + +func (n *Namespaces) PathOf(t NamespaceType) string { + i := n.index(t) + if i == -1 { + return "" + } + return (*n)[i].Path +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go new file mode 100644 index 0000000000..4ce6813d23 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go @@ -0,0 +1,31 @@ +// +build linux + +package configs + +import "golang.org/x/sys/unix" + +func (n *Namespace) Syscall() int { + return namespaceInfo[n.Type] +} + +var namespaceInfo = map[NamespaceType]int{ + NEWNET: unix.CLONE_NEWNET, + NEWNS: unix.CLONE_NEWNS, + NEWUSER: unix.CLONE_NEWUSER, + NEWIPC: unix.CLONE_NEWIPC, + NEWUTS: unix.CLONE_NEWUTS, + NEWPID: unix.CLONE_NEWPID, +} + +// CloneFlags parses the container's Namespaces options to set the correct +// flags on clone, unshare. This function returns flags only for new namespaces. +func (n *Namespaces) CloneFlags() uintptr { + var flag int + for _, v := range *n { + if v.Path != "" { + continue + } + flag |= namespaceInfo[v.Type] + } + return uintptr(flag) +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go new file mode 100644 index 0000000000..5d9a5c81f3 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux,!windows + +package configs + +func (n *Namespace) Syscall() int { + panic("No namespace syscall support") +} + +// CloneFlags parses the container's Namespaces options to set the correct +// flags on clone, unshare. This function returns flags only for new namespaces. +func (n *Namespaces) CloneFlags() uintptr { + panic("No namespace syscall support") +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go new file mode 100644 index 0000000000..19bf713de3 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go @@ -0,0 +1,8 @@ +// +build !linux + +package configs + +// Namespace defines configuration for each namespace. It specifies an +// alternate path that is able to be joined via setns. +type Namespace struct { +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/network.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/network.go new file mode 100644 index 0000000000..ccdb228e14 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/network.go @@ -0,0 +1,72 @@ +package configs + +// Network defines configuration for a container's networking stack +// +// The network configuration can be omitted from a container causing the +// container to be setup with the host's networking stack +type Network struct { + // Type sets the networks type, commonly veth and loopback + Type string `json:"type"` + + // Name of the network interface + Name string `json:"name"` + + // The bridge to use. + Bridge string `json:"bridge"` + + // MacAddress contains the MAC address to set on the network interface + MacAddress string `json:"mac_address"` + + // Address contains the IPv4 and mask to set on the network interface + Address string `json:"address"` + + // Gateway sets the gateway address that is used as the default for the interface + Gateway string `json:"gateway"` + + // IPv6Address contains the IPv6 and mask to set on the network interface + IPv6Address string `json:"ipv6_address"` + + // IPv6Gateway sets the ipv6 gateway address that is used as the default for the interface + IPv6Gateway string `json:"ipv6_gateway"` + + // Mtu sets the mtu value for the interface and will be mirrored on both the host and + // container's interfaces if a pair is created, specifically in the case of type veth + // Note: This does not apply to loopback interfaces. + Mtu int `json:"mtu"` + + // TxQueueLen sets the tx_queuelen value for the interface and will be mirrored on both the host and + // container's interfaces if a pair is created, specifically in the case of type veth + // Note: This does not apply to loopback interfaces. + TxQueueLen int `json:"txqueuelen"` + + // HostInterfaceName is a unique name of a veth pair that resides on in the host interface of the + // container. + HostInterfaceName string `json:"host_interface_name"` + + // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface + // bridge port in the case of type veth + // Note: This is unsupported on some systems. + // Note: This does not apply to loopback interfaces. + HairpinMode bool `json:"hairpin_mode"` +} + +// Routes can be specified to create entries in the route table as the container is started +// +// All of destination, source, and gateway should be either IPv4 or IPv6. +// One of the three options must be present, and omitted entries will use their +// IP family default for the route table. For IPv4 for example, setting the +// gateway to 1.2.3.4 and the interface to eth0 will set up a standard +// destination of 0.0.0.0(or *) when viewed in the route table. +type Route struct { + // Sets the destination and mask, should be a CIDR. Accepts IPv4 and IPv6 + Destination string `json:"destination"` + + // Sets the source and mask, should be a CIDR. Accepts IPv4 and IPv6 + Source string `json:"source"` + + // Sets the gateway. Accepts IPv4 and IPv6 + Gateway string `json:"gateway"` + + // The device to set this route up for, for example: eth0 + InterfaceName string `json:"interface_name"` +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/devices/devices.go b/vendor/github.com/opencontainers/runc/libcontainer/devices/devices.go new file mode 100644 index 0000000000..3619258905 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/devices/devices.go @@ -0,0 +1,104 @@ +package devices + +import ( + "errors" + "io/ioutil" + "os" + "path/filepath" + + "github.com/opencontainers/runc/libcontainer/configs" + + "golang.org/x/sys/unix" +) + +var ( + ErrNotADevice = errors.New("not a device node") +) + +// Testing dependencies +var ( + unixLstat = unix.Lstat + ioutilReadDir = ioutil.ReadDir +) + +// Given the path to a device and its cgroup_permissions(which cannot be easily queried) look up the information about a linux device and return that information as a Device struct. +func DeviceFromPath(path, permissions string) (*configs.Device, error) { + var stat unix.Stat_t + err := unixLstat(path, &stat) + if err != nil { + return nil, err + } + + var ( + devNumber = stat.Rdev + major = unix.Major(devNumber) + ) + if major == 0 { + return nil, ErrNotADevice + } + + var ( + devType rune + mode = stat.Mode + ) + switch { + case mode&unix.S_IFBLK == unix.S_IFBLK: + devType = 'b' + case mode&unix.S_IFCHR == unix.S_IFCHR: + devType = 'c' + } + return &configs.Device{ + Type: devType, + Path: path, + Major: int64(major), + Minor: int64(unix.Minor(devNumber)), + Permissions: permissions, + FileMode: os.FileMode(mode), + Uid: stat.Uid, + Gid: stat.Gid, + }, nil +} + +func HostDevices() ([]*configs.Device, error) { + return getDevices("/dev") +} + +func getDevices(path string) ([]*configs.Device, error) { + files, err := ioutilReadDir(path) + if err != nil { + return nil, err + } + out := []*configs.Device{} + for _, f := range files { + switch { + case f.IsDir(): + switch f.Name() { + // ".lxc" & ".lxd-mounts" added to address https://github.com/lxc/lxd/issues/2825 + case "pts", "shm", "fd", "mqueue", ".lxc", ".lxd-mounts": + continue + default: + sub, err := getDevices(filepath.Join(path, f.Name())) + if err != nil { + return nil, err + } + + out = append(out, sub...) + continue + } + case f.Name() == "console": + continue + } + device, err := DeviceFromPath(filepath.Join(path, f.Name()), "rwm") + if err != nil { + if err == ErrNotADevice { + continue + } + if os.IsNotExist(err) { + continue + } + return nil, err + } + out = append(out, device) + } + return out, nil +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go b/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go index 8b199d92ed..5f124cd8bb 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go @@ -7,8 +7,10 @@ import ( "fmt" "os" "os/exec" - "syscall" + "syscall" // only for exec "unsafe" + + "golang.org/x/sys/unix" ) // If arg2 is nonzero, set the "child subreaper" attribute of the @@ -53,8 +55,8 @@ func Execv(cmd string, args []string, env []string) error { return syscall.Exec(name, args, env) } -func Prlimit(pid, resource int, limit syscall.Rlimit) error { - _, _, err := syscall.RawSyscall6(syscall.SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(&limit)), uintptr(unsafe.Pointer(&limit)), 0, 0) +func Prlimit(pid, resource int, limit unix.Rlimit) error { + _, _, err := unix.RawSyscall6(unix.SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(&limit)), uintptr(unsafe.Pointer(&limit)), 0, 0) if err != 0 { return err } @@ -62,7 +64,7 @@ func Prlimit(pid, resource int, limit syscall.Rlimit) error { } func SetParentDeathSignal(sig uintptr) error { - if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_PDEATHSIG, sig, 0); err != 0 { + if err := unix.Prctl(unix.PR_SET_PDEATHSIG, sig, 0, 0, 0); err != nil { return err } return nil @@ -70,15 +72,14 @@ func SetParentDeathSignal(sig uintptr) error { func GetParentDeathSignal() (ParentDeathSignal, error) { var sig int - _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_GET_PDEATHSIG, uintptr(unsafe.Pointer(&sig)), 0) - if err != 0 { + if err := unix.Prctl(unix.PR_GET_PDEATHSIG, uintptr(unsafe.Pointer(&sig)), 0, 0, 0); err != nil { return -1, err } return ParentDeathSignal(sig), nil } func SetKeepCaps() error { - if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_KEEPCAPS, 1, 0); err != 0 { + if err := unix.Prctl(unix.PR_SET_KEEPCAPS, 1, 0, 0, 0); err != nil { return err } @@ -86,7 +87,7 @@ func SetKeepCaps() error { } func ClearKeepCaps() error { - if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_KEEPCAPS, 0, 0); err != 0 { + if err := unix.Prctl(unix.PR_SET_KEEPCAPS, 0, 0, 0, 0); err != nil { return err } @@ -94,23 +95,18 @@ func ClearKeepCaps() error { } func Setctty() error { - if _, _, err := syscall.RawSyscall(syscall.SYS_IOCTL, 0, uintptr(syscall.TIOCSCTTY), 0); err != 0 { + if err := unix.IoctlSetInt(0, unix.TIOCSCTTY, 0); err != nil { return err } return nil } -/* - * Detect whether we are currently running in a user namespace. - * Copied from github.com/lxc/lxd/shared/util.go - */ +// RunningInUserNS detects whether we are currently running in a user namespace. +// Copied from github.com/lxc/lxd/shared/util.go func RunningInUserNS() bool { file, err := os.Open("/proc/self/uid_map") if err != nil { - /* - * This kernel-provided file only exists if user namespaces are - * supported - */ + // This kernel-provided file only exists if user namespaces are supported return false } defer file.Close() @@ -136,13 +132,16 @@ func RunningInUserNS() bool { // SetSubreaper sets the value i as the subreaper setting for the calling process func SetSubreaper(i int) error { - return Prctl(PR_SET_CHILD_SUBREAPER, uintptr(i), 0, 0, 0) + return unix.Prctl(PR_SET_CHILD_SUBREAPER, uintptr(i), 0, 0, 0) } -func Prctl(option int, arg2, arg3, arg4, arg5 uintptr) (err error) { - _, _, e1 := syscall.Syscall6(syscall.SYS_PRCTL, uintptr(option), arg2, arg3, arg4, arg5, 0) - if e1 != 0 { - err = e1 +// GetSubreaper returns the subreaper setting for the calling process +func GetSubreaper() (int, error) { + var i uintptr + + if err := unix.Prctl(unix.PR_GET_CHILD_SUBREAPER, uintptr(unsafe.Pointer(&i)), 0, 0, 0); err != nil { + return -1, err } - return + + return int(i), nil } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go b/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go index 37808a29f6..79232a4371 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/proc.go @@ -1,27 +1,113 @@ package system import ( + "fmt" "io/ioutil" "path/filepath" "strconv" "strings" ) -// look in /proc to find the process start time so that we can verify -// that this pid has started after ourself +// State is the status of a process. +type State rune + +const ( // Only values for Linux 3.14 and later are listed here + Dead State = 'X' + DiskSleep State = 'D' + Running State = 'R' + Sleeping State = 'S' + Stopped State = 'T' + TracingStop State = 't' + Zombie State = 'Z' +) + +// String forms of the state from proc(5)'s documentation for +// /proc/[pid]/status' "State" field. +func (s State) String() string { + switch s { + case Dead: + return "dead" + case DiskSleep: + return "disk sleep" + case Running: + return "running" + case Sleeping: + return "sleeping" + case Stopped: + return "stopped" + case TracingStop: + return "tracing stop" + case Zombie: + return "zombie" + default: + return fmt.Sprintf("unknown (%c)", s) + } +} + +// Stat_t represents the information from /proc/[pid]/stat, as +// described in proc(5) with names based on the /proc/[pid]/status +// fields. +type Stat_t struct { + // PID is the process ID. + PID uint + + // Name is the command run by the process. + Name string + + // State is the state of the process. + State State + + // StartTime is the number of clock ticks after system boot (since + // Linux 2.6). + StartTime uint64 +} + +// Stat returns a Stat_t instance for the specified process. +func Stat(pid int) (stat Stat_t, err error) { + bytes, err := ioutil.ReadFile(filepath.Join("/proc", strconv.Itoa(pid), "stat")) + if err != nil { + return stat, err + } + return parseStat(string(bytes)) +} + +// GetProcessStartTime is deprecated. Use Stat(pid) and +// Stat_t.StartTime instead. func GetProcessStartTime(pid int) (string, error) { - data, err := ioutil.ReadFile(filepath.Join("/proc", strconv.Itoa(pid), "stat")) + stat, err := Stat(pid) if err != nil { return "", err } + return fmt.Sprintf("%d", stat.StartTime), nil +} + +func parseStat(data string) (stat Stat_t, err error) { + // From proc(5), field 2 could contain space and is inside `(` and `)`. + // The following is an example: + // 89653 (gunicorn: maste) S 89630 89653 89653 0 -1 4194560 29689 28896 0 3 146 32 76 19 20 0 1 0 2971844 52965376 3920 18446744073709551615 1 1 0 0 0 0 0 16781312 137447943 0 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0 + i := strings.LastIndex(data, ")") + if i <= 2 || i >= len(data)-1 { + return stat, fmt.Errorf("invalid stat data: %q", data) + } + + parts := strings.SplitN(data[:i], "(", 2) + if len(parts) != 2 { + return stat, fmt.Errorf("invalid stat data: %q", data) + } + + stat.Name = parts[1] + _, err = fmt.Sscanf(parts[0], "%d", &stat.PID) + if err != nil { + return stat, err + } - parts := strings.Split(string(data), " ") - // the starttime is located at pos 22 - // from the man page - // - // starttime %llu (was %lu before Linux 2.6) - // (22) The time the process started after system boot. In kernels before Linux 2.6, this - // value was expressed in jiffies. Since Linux 2.6, the value is expressed in clock ticks - // (divide by sysconf(_SC_CLK_TCK)). - return parts[22-1], nil // starts at 1 + // parts indexes should be offset by 3 from the field number given + // proc(5), because parts is zero-indexed and we've removed fields + // one (PID) and two (Name) in the paren-split. + parts = strings.Split(data[i+2:], " ") + var state int + fmt.Sscanf(parts[3-3], "%c", &state) + stat.State = State(state) + fmt.Sscanf(parts[22-3], "%d", &stat.StartTime) + return stat, nil } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/setns_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/system/setns_linux.go deleted file mode 100644 index 615ff4c827..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/setns_linux.go +++ /dev/null @@ -1,40 +0,0 @@ -package system - -import ( - "fmt" - "runtime" - "syscall" -) - -// Via http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=7b21fddd087678a70ad64afc0f632e0f1071b092 -// -// We need different setns values for the different platforms and arch -// We are declaring the macro here because the SETNS syscall does not exist in th stdlib -var setNsMap = map[string]uintptr{ - "linux/386": 346, - "linux/arm64": 268, - "linux/amd64": 308, - "linux/arm": 375, - "linux/ppc": 350, - "linux/ppc64": 350, - "linux/ppc64le": 350, - "linux/s390x": 339, -} - -var sysSetns = setNsMap[fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)] - -func SysSetns() uint32 { - return uint32(sysSetns) -} - -func Setns(fd uintptr, flags uintptr) error { - ns, exists := setNsMap[fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH)] - if !exists { - return fmt.Errorf("unsupported platform %s/%s", runtime.GOOS, runtime.GOARCH) - } - _, _, err := syscall.RawSyscall(ns, fd, flags, 0) - if err != 0 { - return err - } - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_32.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_32.go new file mode 100644 index 0000000000..c5ca5d8623 --- /dev/null +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_32.go @@ -0,0 +1,26 @@ +// +build linux +// +build 386 arm + +package system + +import ( + "golang.org/x/sys/unix" +) + +// Setuid sets the uid of the calling thread to the specified uid. +func Setuid(uid int) (err error) { + _, _, e1 := unix.RawSyscall(unix.SYS_SETUID32, uintptr(uid), 0, 0) + if e1 != 0 { + err = e1 + } + return +} + +// Setgid sets the gid of the calling thread to the specified gid. +func Setgid(gid int) (err error) { + _, _, e1 := unix.RawSyscall(unix.SYS_SETGID32, uintptr(gid), 0, 0) + if e1 != 0 { + err = e1 + } + return +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go deleted file mode 100644 index c990065189..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build linux,386 - -package system - -import ( - "syscall" -) - -// Setuid sets the uid of the calling thread to the specified uid. -func Setuid(uid int) (err error) { - _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID, uintptr(uid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} - -// Setgid sets the gid of the calling thread to the specified gid. -func Setgid(gid int) (err error) { - _, _, e1 := syscall.RawSyscall(syscall.SYS_SETGID32, uintptr(gid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go index 0816bf8281..11c3faafbf 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go @@ -1,14 +1,15 @@ -// +build linux,arm64 linux,amd64 linux,ppc linux,ppc64 linux,ppc64le linux,s390x +// +build linux +// +build arm64 amd64 mips mipsle mips64 mips64le ppc ppc64 ppc64le s390x package system import ( - "syscall" + "golang.org/x/sys/unix" ) // Setuid sets the uid of the calling thread to the specified uid. func Setuid(uid int) (err error) { - _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID, uintptr(uid), 0, 0) + _, _, e1 := unix.RawSyscall(unix.SYS_SETUID, uintptr(uid), 0, 0) if e1 != 0 { err = e1 } @@ -17,7 +18,7 @@ func Setuid(uid int) (err error) { // Setgid sets the gid of the calling thread to the specified gid. func Setgid(gid int) (err error) { - _, _, e1 := syscall.RawSyscall(syscall.SYS_SETGID, uintptr(gid), 0, 0) + _, _, e1 := unix.RawSyscall(unix.SYS_SETGID, uintptr(gid), 0, 0) if e1 != 0 { err = e1 } diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go b/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go deleted file mode 100644 index 3f780f312b..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build linux,arm - -package system - -import ( - "syscall" -) - -// Setuid sets the uid of the calling thread to the specified uid. -func Setuid(uid int) (err error) { - _, _, e1 := syscall.RawSyscall(syscall.SYS_SETUID32, uintptr(uid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} - -// Setgid sets the gid of the calling thread to the specified gid. -func Setgid(gid int) (err error) { - _, _, e1 := syscall.RawSyscall(syscall.SYS_SETGID32, uintptr(gid), 0, 0) - if e1 != 0 { - err = e1 - } - return -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go b/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go index b3a07cba3e..b8434f1050 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/sysconfig.go @@ -1,4 +1,4 @@ -// +build cgo,linux cgo,freebsd +// +build cgo,linux package system diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go b/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go index 30f74dfb1b..a6823fc99b 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go @@ -1,99 +1,35 @@ package system -import ( - "syscall" - "unsafe" -) - -var _zero uintptr - -// Returns the size of xattrs and nil error -// Requires path, takes allocated []byte or nil as last argument -func Llistxattr(path string, dest []byte) (size int, err error) { - pathBytes, err := syscall.BytePtrFromString(path) - if err != nil { - return -1, err - } - var newpathBytes unsafe.Pointer - if len(dest) > 0 { - newpathBytes = unsafe.Pointer(&dest[0]) - } else { - newpathBytes = unsafe.Pointer(&_zero) - } - - _size, _, errno := syscall.Syscall6(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(newpathBytes), uintptr(len(dest)), 0, 0, 0) - size = int(_size) - if errno != 0 { - return -1, errno - } - - return size, nil -} +import "golang.org/x/sys/unix" // Returns a []byte slice if the xattr is set and nil otherwise // Requires path and its attribute as arguments func Lgetxattr(path string, attr string) ([]byte, error) { var sz int - pathBytes, err := syscall.BytePtrFromString(path) - if err != nil { - return nil, err - } - attrBytes, err := syscall.BytePtrFromString(attr) - if err != nil { - return nil, err - } - // Start with a 128 length byte array - sz = 128 - dest := make([]byte, sz) - destBytes := unsafe.Pointer(&dest[0]) - _sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) + dest := make([]byte, 128) + sz, errno := unix.Lgetxattr(path, attr, dest) switch { - case errno == syscall.ENODATA: + case errno == unix.ENODATA: return nil, errno - case errno == syscall.ENOTSUP: + case errno == unix.ENOTSUP: return nil, errno - case errno == syscall.ERANGE: + case errno == unix.ERANGE: // 128 byte array might just not be good enough, - // A dummy buffer is used ``uintptr(0)`` to get real size + // A dummy buffer is used to get the real size // of the xattrs on disk - _sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(unsafe.Pointer(nil)), uintptr(0), 0, 0) - sz = int(_sz) - if sz < 0 { + sz, errno = unix.Lgetxattr(path, attr, []byte{}) + if errno != nil { return nil, errno } dest = make([]byte, sz) - destBytes := unsafe.Pointer(&dest[0]) - _sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) - if errno != 0 { + sz, errno = unix.Lgetxattr(path, attr, dest) + if errno != nil { return nil, errno } - case errno != 0: + case errno != nil: return nil, errno } - sz = int(_sz) return dest[:sz], nil } - -func Lsetxattr(path string, attr string, data []byte, flags int) error { - pathBytes, err := syscall.BytePtrFromString(path) - if err != nil { - return err - } - attrBytes, err := syscall.BytePtrFromString(attr) - if err != nil { - return err - } - var dataBytes unsafe.Pointer - if len(data) > 0 { - dataBytes = unsafe.Pointer(&data[0]) - } else { - dataBytes = unsafe.Pointer(&_zero) - } - _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) - if errno != 0 { - return errno - } - return nil -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup.go index ab1439f361..95e9eebc0b 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup.go @@ -2,7 +2,6 @@ package user import ( "errors" - "syscall" ) var ( @@ -36,13 +35,6 @@ func lookupUser(filter func(u User) bool) (User, error) { return users[0], nil } -// CurrentUser looks up the current user by their user id in /etc/passwd. If the -// user cannot be found (or there is no /etc/passwd file on the filesystem), -// then CurrentUser returns an error. -func CurrentUser() (User, error) { - return LookupUid(syscall.Getuid()) -} - // LookupUser looks up a user by their username in /etc/passwd. If the user // cannot be found (or there is no /etc/passwd file on the filesystem), then // LookupUser returns an error. @@ -84,13 +76,6 @@ func lookupGroup(filter func(g Group) bool) (Group, error) { return groups[0], nil } -// CurrentGroup looks up the current user's group by their primary group id's -// entry in /etc/passwd. If the group cannot be found (or there is no -// /etc/group file on the filesystem), then CurrentGroup returns an error. -func CurrentGroup() (Group, error) { - return LookupGid(syscall.Getgid()) -} - // LookupGroup looks up a group by its name in /etc/group. If the group cannot // be found (or there is no /etc/group file on the filesystem), then LookupGroup // returns an error. diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go index 758b734c22..c2bb9ec90d 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go @@ -5,6 +5,8 @@ package user import ( "io" "os" + + "golang.org/x/sys/unix" ) // Unix-specific path to the passwd and group formatted files. @@ -28,3 +30,17 @@ func GetGroupPath() (string, error) { func GetGroup() (io.ReadCloser, error) { return os.Open(unixGroupPath) } + +// CurrentUser looks up the current user by their user id in /etc/passwd. If the +// user cannot be found (or there is no /etc/passwd file on the filesystem), +// then CurrentUser returns an error. +func CurrentUser() (User, error) { + return LookupUid(unix.Getuid()) +} + +// CurrentGroup looks up the current user's group by their primary group id's +// entry in /etc/passwd. If the group cannot be found (or there is no +// /etc/group file on the filesystem), then CurrentGroup returns an error. +func CurrentGroup() (Group, error) { + return LookupGid(unix.Getgid()) +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go deleted file mode 100644 index 7217948870..0000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris - -package user - -import "io" - -func GetPasswdPath() (string, error) { - return "", ErrUnsupported -} - -func GetPasswd() (io.ReadCloser, error) { - return nil, ErrUnsupported -} - -func GetGroupPath() (string, error) { - return "", ErrUnsupported -} - -func GetGroup() (io.ReadCloser, error) { - return nil, ErrUnsupported -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go index 43fd39ef54..8962cab331 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go @@ -199,18 +199,16 @@ type ExecUser struct { // files cannot be opened for any reason, the error is ignored and a nil // io.Reader is passed instead. func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) { - passwd, err := os.Open(passwdPath) - if err != nil { - passwd = nil - } else { - defer passwd.Close() + var passwd, group io.Reader + + if passwdFile, err := os.Open(passwdPath); err == nil { + passwd = passwdFile + defer passwdFile.Close() } - group, err := os.Open(groupPath) - if err != nil { - group = nil - } else { - defer group.Close() + if groupFile, err := os.Open(groupPath); err == nil { + group = groupFile + defer groupFile.Close() } return GetExecUser(userSpec, defaults, passwd, group) @@ -343,7 +341,7 @@ func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) ( if len(groups) > 0 { // First match wins, even if there's more than one matching entry. user.Gid = groups[0].Gid - } else if groupArg != "" { + } else { // If we can't find a group with the given name, the only other valid // option is if it's a numeric group name with no associated entry in group. @@ -433,9 +431,11 @@ func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, err // that opens the groupPath given and gives it as an argument to // GetAdditionalGroups. func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) { - group, err := os.Open(groupPath) - if err == nil { - defer group.Close() + var group io.Reader + + if groupFile, err := os.Open(groupPath); err == nil { + group = groupFile + defer groupFile.Close() } return GetAdditionalGroups(additionalGroups, group) } diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/opencontainers/runtime-spec/LICENSE b/vendor/github.com/opencontainers/runtime-spec/LICENSE similarity index 100% rename from vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/opencontainers/runtime-spec/LICENSE rename to vendor/github.com/opencontainers/runtime-spec/LICENSE diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go new file mode 100644 index 0000000000..71c9fa7731 --- /dev/null +++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go @@ -0,0 +1,570 @@ +package specs + +import "os" + +// Spec is the base configuration for the container. +type Spec struct { + // Version of the Open Container Initiative Runtime Specification with which the bundle complies. + Version string `json:"ociVersion"` + // Process configures the container process. + Process *Process `json:"process,omitempty"` + // Root configures the container's root filesystem. + Root *Root `json:"root,omitempty"` + // Hostname configures the container's hostname. + Hostname string `json:"hostname,omitempty"` + // Mounts configures additional mounts (on top of Root). + Mounts []Mount `json:"mounts,omitempty"` + // Hooks configures callbacks for container lifecycle events. + Hooks *Hooks `json:"hooks,omitempty" platform:"linux,solaris"` + // Annotations contains arbitrary metadata for the container. + Annotations map[string]string `json:"annotations,omitempty"` + + // Linux is platform-specific configuration for Linux based containers. + Linux *Linux `json:"linux,omitempty" platform:"linux"` + // Solaris is platform-specific configuration for Solaris based containers. + Solaris *Solaris `json:"solaris,omitempty" platform:"solaris"` + // Windows is platform-specific configuration for Windows based containers. + Windows *Windows `json:"windows,omitempty" platform:"windows"` +} + +// Process contains information to start a specific application inside the container. +type Process struct { + // Terminal creates an interactive terminal for the container. + Terminal bool `json:"terminal,omitempty"` + // ConsoleSize specifies the size of the console. + ConsoleSize *Box `json:"consoleSize,omitempty"` + // User specifies user information for the process. + User User `json:"user"` + // Args specifies the binary and arguments for the application to execute. + Args []string `json:"args"` + // Env populates the process environment for the process. + Env []string `json:"env,omitempty"` + // Cwd is the current working directory for the process and must be + // relative to the container's root. + Cwd string `json:"cwd"` + // Capabilities are Linux capabilities that are kept for the process. + Capabilities *LinuxCapabilities `json:"capabilities,omitempty" platform:"linux"` + // Rlimits specifies rlimit options to apply to the process. + Rlimits []POSIXRlimit `json:"rlimits,omitempty" platform:"linux,solaris"` + // NoNewPrivileges controls whether additional privileges could be gained by processes in the container. + NoNewPrivileges bool `json:"noNewPrivileges,omitempty" platform:"linux"` + // ApparmorProfile specifies the apparmor profile for the container. + ApparmorProfile string `json:"apparmorProfile,omitempty" platform:"linux"` + // Specify an oom_score_adj for the container. + OOMScoreAdj *int `json:"oomScoreAdj,omitempty" platform:"linux"` + // SelinuxLabel specifies the selinux context that the container process is run as. + SelinuxLabel string `json:"selinuxLabel,omitempty" platform:"linux"` +} + +// LinuxCapabilities specifies the whitelist of capabilities that are kept for a process. +// http://man7.org/linux/man-pages/man7/capabilities.7.html +type LinuxCapabilities struct { + // Bounding is the set of capabilities checked by the kernel. + Bounding []string `json:"bounding,omitempty" platform:"linux"` + // Effective is the set of capabilities checked by the kernel. + Effective []string `json:"effective,omitempty" platform:"linux"` + // Inheritable is the capabilities preserved across execve. + Inheritable []string `json:"inheritable,omitempty" platform:"linux"` + // Permitted is the limiting superset for effective capabilities. + Permitted []string `json:"permitted,omitempty" platform:"linux"` + // Ambient is the ambient set of capabilities that are kept. + Ambient []string `json:"ambient,omitempty" platform:"linux"` +} + +// Box specifies dimensions of a rectangle. Used for specifying the size of a console. +type Box struct { + // Height is the vertical dimension of a box. + Height uint `json:"height"` + // Width is the horizontal dimension of a box. + Width uint `json:"width"` +} + +// User specifies specific user (and group) information for the container process. +type User struct { + // UID is the user id. + UID uint32 `json:"uid" platform:"linux,solaris"` + // GID is the group id. + GID uint32 `json:"gid" platform:"linux,solaris"` + // AdditionalGids are additional group ids set for the container's process. + AdditionalGids []uint32 `json:"additionalGids,omitempty" platform:"linux,solaris"` + // Username is the user name. + Username string `json:"username,omitempty" platform:"windows"` +} + +// Root contains information about the container's root filesystem on the host. +type Root struct { + // Path is the absolute path to the container's root filesystem. + Path string `json:"path"` + // Readonly makes the root filesystem for the container readonly before the process is executed. + Readonly bool `json:"readonly,omitempty"` +} + +// Mount specifies a mount for a container. +type Mount struct { + // Destination is the absolute path where the mount will be placed in the container. + Destination string `json:"destination"` + // Type specifies the mount kind. + Type string `json:"type,omitempty" platform:"linux,solaris"` + // Source specifies the source path of the mount. + Source string `json:"source,omitempty"` + // Options are fstab style mount options. + Options []string `json:"options,omitempty"` +} + +// Hook specifies a command that is run at a particular event in the lifecycle of a container +type Hook struct { + Path string `json:"path"` + Args []string `json:"args,omitempty"` + Env []string `json:"env,omitempty"` + Timeout *int `json:"timeout,omitempty"` +} + +// Hooks for container setup and teardown +type Hooks struct { + // Prestart is a list of hooks to be run before the container process is executed. + Prestart []Hook `json:"prestart,omitempty"` + // Poststart is a list of hooks to be run after the container process is started. + Poststart []Hook `json:"poststart,omitempty"` + // Poststop is a list of hooks to be run after the container process exits. + Poststop []Hook `json:"poststop,omitempty"` +} + +// Linux contains platform-specific configuration for Linux based containers. +type Linux struct { + // UIDMapping specifies user mappings for supporting user namespaces. + UIDMappings []LinuxIDMapping `json:"uidMappings,omitempty"` + // GIDMapping specifies group mappings for supporting user namespaces. + GIDMappings []LinuxIDMapping `json:"gidMappings,omitempty"` + // Sysctl are a set of key value pairs that are set for the container on start + Sysctl map[string]string `json:"sysctl,omitempty"` + // Resources contain cgroup information for handling resource constraints + // for the container + Resources *LinuxResources `json:"resources,omitempty"` + // CgroupsPath specifies the path to cgroups that are created and/or joined by the container. + // The path is expected to be relative to the cgroups mountpoint. + // If resources are specified, the cgroups at CgroupsPath will be updated based on resources. + CgroupsPath string `json:"cgroupsPath,omitempty"` + // Namespaces contains the namespaces that are created and/or joined by the container + Namespaces []LinuxNamespace `json:"namespaces,omitempty"` + // Devices are a list of device nodes that are created for the container + Devices []LinuxDevice `json:"devices,omitempty"` + // Seccomp specifies the seccomp security settings for the container. + Seccomp *LinuxSeccomp `json:"seccomp,omitempty"` + // RootfsPropagation is the rootfs mount propagation mode for the container. + RootfsPropagation string `json:"rootfsPropagation,omitempty"` + // MaskedPaths masks over the provided paths inside the container. + MaskedPaths []string `json:"maskedPaths,omitempty"` + // ReadonlyPaths sets the provided paths as RO inside the container. + ReadonlyPaths []string `json:"readonlyPaths,omitempty"` + // MountLabel specifies the selinux context for the mounts in the container. + MountLabel string `json:"mountLabel,omitempty"` + // IntelRdt contains Intel Resource Director Technology (RDT) information + // for handling resource constraints (e.g., L3 cache) for the container + IntelRdt *LinuxIntelRdt `json:"intelRdt,omitempty"` +} + +// LinuxNamespace is the configuration for a Linux namespace +type LinuxNamespace struct { + // Type is the type of namespace + Type LinuxNamespaceType `json:"type"` + // Path is a path to an existing namespace persisted on disk that can be joined + // and is of the same type + Path string `json:"path,omitempty"` +} + +// LinuxNamespaceType is one of the Linux namespaces +type LinuxNamespaceType string + +const ( + // PIDNamespace for isolating process IDs + PIDNamespace LinuxNamespaceType = "pid" + // NetworkNamespace for isolating network devices, stacks, ports, etc + NetworkNamespace = "network" + // MountNamespace for isolating mount points + MountNamespace = "mount" + // IPCNamespace for isolating System V IPC, POSIX message queues + IPCNamespace = "ipc" + // UTSNamespace for isolating hostname and NIS domain name + UTSNamespace = "uts" + // UserNamespace for isolating user and group IDs + UserNamespace = "user" + // CgroupNamespace for isolating cgroup hierarchies + CgroupNamespace = "cgroup" +) + +// LinuxIDMapping specifies UID/GID mappings +type LinuxIDMapping struct { + // HostID is the starting UID/GID on the host to be mapped to 'ContainerID' + HostID uint32 `json:"hostID"` + // ContainerID is the starting UID/GID in the container + ContainerID uint32 `json:"containerID"` + // Size is the number of IDs to be mapped + Size uint32 `json:"size"` +} + +// POSIXRlimit type and restrictions +type POSIXRlimit struct { + // Type of the rlimit to set + Type string `json:"type"` + // Hard is the hard limit for the specified type + Hard uint64 `json:"hard"` + // Soft is the soft limit for the specified type + Soft uint64 `json:"soft"` +} + +// LinuxHugepageLimit structure corresponds to limiting kernel hugepages +type LinuxHugepageLimit struct { + // Pagesize is the hugepage size + Pagesize string `json:"pageSize"` + // Limit is the limit of "hugepagesize" hugetlb usage + Limit uint64 `json:"limit"` +} + +// LinuxInterfacePriority for network interfaces +type LinuxInterfacePriority struct { + // Name is the name of the network interface + Name string `json:"name"` + // Priority for the interface + Priority uint32 `json:"priority"` +} + +// linuxBlockIODevice holds major:minor format supported in blkio cgroup +type linuxBlockIODevice struct { + // Major is the device's major number. + Major int64 `json:"major"` + // Minor is the device's minor number. + Minor int64 `json:"minor"` +} + +// LinuxWeightDevice struct holds a `major:minor weight` pair for weightDevice +type LinuxWeightDevice struct { + linuxBlockIODevice + // Weight is the bandwidth rate for the device. + Weight *uint16 `json:"weight,omitempty"` + // LeafWeight is the bandwidth rate for the device while competing with the cgroup's child cgroups, CFQ scheduler only + LeafWeight *uint16 `json:"leafWeight,omitempty"` +} + +// LinuxThrottleDevice struct holds a `major:minor rate_per_second` pair +type LinuxThrottleDevice struct { + linuxBlockIODevice + // Rate is the IO rate limit per cgroup per device + Rate uint64 `json:"rate"` +} + +// LinuxBlockIO for Linux cgroup 'blkio' resource management +type LinuxBlockIO struct { + // Specifies per cgroup weight + Weight *uint16 `json:"weight,omitempty"` + // Specifies tasks' weight in the given cgroup while competing with the cgroup's child cgroups, CFQ scheduler only + LeafWeight *uint16 `json:"leafWeight,omitempty"` + // Weight per cgroup per device, can override BlkioWeight + WeightDevice []LinuxWeightDevice `json:"weightDevice,omitempty"` + // IO read rate limit per cgroup per device, bytes per second + ThrottleReadBpsDevice []LinuxThrottleDevice `json:"throttleReadBpsDevice,omitempty"` + // IO write rate limit per cgroup per device, bytes per second + ThrottleWriteBpsDevice []LinuxThrottleDevice `json:"throttleWriteBpsDevice,omitempty"` + // IO read rate limit per cgroup per device, IO per second + ThrottleReadIOPSDevice []LinuxThrottleDevice `json:"throttleReadIOPSDevice,omitempty"` + // IO write rate limit per cgroup per device, IO per second + ThrottleWriteIOPSDevice []LinuxThrottleDevice `json:"throttleWriteIOPSDevice,omitempty"` +} + +// LinuxMemory for Linux cgroup 'memory' resource management +type LinuxMemory struct { + // Memory limit (in bytes). + Limit *int64 `json:"limit,omitempty"` + // Memory reservation or soft_limit (in bytes). + Reservation *int64 `json:"reservation,omitempty"` + // Total memory limit (memory + swap). + Swap *int64 `json:"swap,omitempty"` + // Kernel memory limit (in bytes). + Kernel *int64 `json:"kernel,omitempty"` + // Kernel memory limit for tcp (in bytes) + KernelTCP *int64 `json:"kernelTCP,omitempty"` + // How aggressive the kernel will swap memory pages. + Swappiness *uint64 `json:"swappiness,omitempty"` + // DisableOOMKiller disables the OOM killer for out of memory conditions + DisableOOMKiller *bool `json:"disableOOMKiller,omitempty"` +} + +// LinuxCPU for Linux cgroup 'cpu' resource management +type LinuxCPU struct { + // CPU shares (relative weight (ratio) vs. other cgroups with cpu shares). + Shares *uint64 `json:"shares,omitempty"` + // CPU hardcap limit (in usecs). Allowed cpu time in a given period. + Quota *int64 `json:"quota,omitempty"` + // CPU period to be used for hardcapping (in usecs). + Period *uint64 `json:"period,omitempty"` + // How much time realtime scheduling may use (in usecs). + RealtimeRuntime *int64 `json:"realtimeRuntime,omitempty"` + // CPU period to be used for realtime scheduling (in usecs). + RealtimePeriod *uint64 `json:"realtimePeriod,omitempty"` + // CPUs to use within the cpuset. Default is to use any CPU available. + Cpus string `json:"cpus,omitempty"` + // List of memory nodes in the cpuset. Default is to use any available memory node. + Mems string `json:"mems,omitempty"` +} + +// LinuxPids for Linux cgroup 'pids' resource management (Linux 4.3) +type LinuxPids struct { + // Maximum number of PIDs. Default is "no limit". + Limit int64 `json:"limit"` +} + +// LinuxNetwork identification and priority configuration +type LinuxNetwork struct { + // Set class identifier for container's network packets + ClassID *uint32 `json:"classID,omitempty"` + // Set priority of network traffic for container + Priorities []LinuxInterfacePriority `json:"priorities,omitempty"` +} + +// LinuxResources has container runtime resource constraints +type LinuxResources struct { + // Devices configures the device whitelist. + Devices []LinuxDeviceCgroup `json:"devices,omitempty"` + // Memory restriction configuration + Memory *LinuxMemory `json:"memory,omitempty"` + // CPU resource restriction configuration + CPU *LinuxCPU `json:"cpu,omitempty"` + // Task resource restriction configuration. + Pids *LinuxPids `json:"pids,omitempty"` + // BlockIO restriction configuration + BlockIO *LinuxBlockIO `json:"blockIO,omitempty"` + // Hugetlb limit (in bytes) + HugepageLimits []LinuxHugepageLimit `json:"hugepageLimits,omitempty"` + // Network restriction configuration + Network *LinuxNetwork `json:"network,omitempty"` +} + +// LinuxDevice represents the mknod information for a Linux special device file +type LinuxDevice struct { + // Path to the device. + Path string `json:"path"` + // Device type, block, char, etc. + Type string `json:"type"` + // Major is the device's major number. + Major int64 `json:"major"` + // Minor is the device's minor number. + Minor int64 `json:"minor"` + // FileMode permission bits for the device. + FileMode *os.FileMode `json:"fileMode,omitempty"` + // UID of the device. + UID *uint32 `json:"uid,omitempty"` + // Gid of the device. + GID *uint32 `json:"gid,omitempty"` +} + +// LinuxDeviceCgroup represents a device rule for the whitelist controller +type LinuxDeviceCgroup struct { + // Allow or deny + Allow bool `json:"allow"` + // Device type, block, char, etc. + Type string `json:"type,omitempty"` + // Major is the device's major number. + Major *int64 `json:"major,omitempty"` + // Minor is the device's minor number. + Minor *int64 `json:"minor,omitempty"` + // Cgroup access permissions format, rwm. + Access string `json:"access,omitempty"` +} + +// Solaris contains platform-specific configuration for Solaris application containers. +type Solaris struct { + // SMF FMRI which should go "online" before we start the container process. + Milestone string `json:"milestone,omitempty"` + // Maximum set of privileges any process in this container can obtain. + LimitPriv string `json:"limitpriv,omitempty"` + // The maximum amount of shared memory allowed for this container. + MaxShmMemory string `json:"maxShmMemory,omitempty"` + // Specification for automatic creation of network resources for this container. + Anet []SolarisAnet `json:"anet,omitempty"` + // Set limit on the amount of CPU time that can be used by container. + CappedCPU *SolarisCappedCPU `json:"cappedCPU,omitempty"` + // The physical and swap caps on the memory that can be used by this container. + CappedMemory *SolarisCappedMemory `json:"cappedMemory,omitempty"` +} + +// SolarisCappedCPU allows users to set limit on the amount of CPU time that can be used by container. +type SolarisCappedCPU struct { + Ncpus string `json:"ncpus,omitempty"` +} + +// SolarisCappedMemory allows users to set the physical and swap caps on the memory that can be used by this container. +type SolarisCappedMemory struct { + Physical string `json:"physical,omitempty"` + Swap string `json:"swap,omitempty"` +} + +// SolarisAnet provides the specification for automatic creation of network resources for this container. +type SolarisAnet struct { + // Specify a name for the automatically created VNIC datalink. + Linkname string `json:"linkname,omitempty"` + // Specify the link over which the VNIC will be created. + Lowerlink string `json:"lowerLink,omitempty"` + // The set of IP addresses that the container can use. + Allowedaddr string `json:"allowedAddress,omitempty"` + // Specifies whether allowedAddress limitation is to be applied to the VNIC. + Configallowedaddr string `json:"configureAllowedAddress,omitempty"` + // The value of the optional default router. + Defrouter string `json:"defrouter,omitempty"` + // Enable one or more types of link protection. + Linkprotection string `json:"linkProtection,omitempty"` + // Set the VNIC's macAddress + Macaddress string `json:"macAddress,omitempty"` +} + +// Windows defines the runtime configuration for Windows based containers, including Hyper-V containers. +type Windows struct { + // LayerFolders contains a list of absolute paths to directories containing image layers. + LayerFolders []string `json:"layerFolders"` + // Resources contains information for handling resource constraints for the container. + Resources *WindowsResources `json:"resources,omitempty"` + // CredentialSpec contains a JSON object describing a group Managed Service Account (gMSA) specification. + CredentialSpec interface{} `json:"credentialSpec,omitempty"` + // Servicing indicates if the container is being started in a mode to apply a Windows Update servicing operation. + Servicing bool `json:"servicing,omitempty"` + // IgnoreFlushesDuringBoot indicates if the container is being started in a mode where disk writes are not flushed during its boot process. + IgnoreFlushesDuringBoot bool `json:"ignoreFlushesDuringBoot,omitempty"` + // HyperV contains information for running a container with Hyper-V isolation. + HyperV *WindowsHyperV `json:"hyperv,omitempty"` + // Network restriction configuration. + Network *WindowsNetwork `json:"network,omitempty"` +} + +// WindowsResources has container runtime resource constraints for containers running on Windows. +type WindowsResources struct { + // Memory restriction configuration. + Memory *WindowsMemoryResources `json:"memory,omitempty"` + // CPU resource restriction configuration. + CPU *WindowsCPUResources `json:"cpu,omitempty"` + // Storage restriction configuration. + Storage *WindowsStorageResources `json:"storage,omitempty"` +} + +// WindowsMemoryResources contains memory resource management settings. +type WindowsMemoryResources struct { + // Memory limit in bytes. + Limit *uint64 `json:"limit,omitempty"` +} + +// WindowsCPUResources contains CPU resource management settings. +type WindowsCPUResources struct { + // Number of CPUs available to the container. + Count *uint64 `json:"count,omitempty"` + // CPU shares (relative weight to other containers with cpu shares). + Shares *uint16 `json:"shares,omitempty"` + // Specifies the portion of processor cycles that this container can use as a percentage times 100. + Maximum *uint16 `json:"maximum,omitempty"` +} + +// WindowsStorageResources contains storage resource management settings. +type WindowsStorageResources struct { + // Specifies maximum Iops for the system drive. + Iops *uint64 `json:"iops,omitempty"` + // Specifies maximum bytes per second for the system drive. + Bps *uint64 `json:"bps,omitempty"` + // Sandbox size specifies the minimum size of the system drive in bytes. + SandboxSize *uint64 `json:"sandboxSize,omitempty"` +} + +// WindowsNetwork contains network settings for Windows containers. +type WindowsNetwork struct { + // List of HNS endpoints that the container should connect to. + EndpointList []string `json:"endpointList,omitempty"` + // Specifies if unqualified DNS name resolution is allowed. + AllowUnqualifiedDNSQuery bool `json:"allowUnqualifiedDNSQuery,omitempty"` + // Comma separated list of DNS suffixes to use for name resolution. + DNSSearchList []string `json:"DNSSearchList,omitempty"` + // Name (ID) of the container that we will share with the network stack. + NetworkSharedContainerName string `json:"networkSharedContainerName,omitempty"` +} + +// WindowsHyperV contains information for configuring a container to run with Hyper-V isolation. +type WindowsHyperV struct { + // UtilityVMPath is an optional path to the image used for the Utility VM. + UtilityVMPath string `json:"utilityVMPath,omitempty"` +} + +// LinuxSeccomp represents syscall restrictions +type LinuxSeccomp struct { + DefaultAction LinuxSeccompAction `json:"defaultAction"` + Architectures []Arch `json:"architectures,omitempty"` + Syscalls []LinuxSyscall `json:"syscalls,omitempty"` +} + +// Arch used for additional architectures +type Arch string + +// Additional architectures permitted to be used for system calls +// By default only the native architecture of the kernel is permitted +const ( + ArchX86 Arch = "SCMP_ARCH_X86" + ArchX86_64 Arch = "SCMP_ARCH_X86_64" + ArchX32 Arch = "SCMP_ARCH_X32" + ArchARM Arch = "SCMP_ARCH_ARM" + ArchAARCH64 Arch = "SCMP_ARCH_AARCH64" + ArchMIPS Arch = "SCMP_ARCH_MIPS" + ArchMIPS64 Arch = "SCMP_ARCH_MIPS64" + ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32" + ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL" + ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64" + ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32" + ArchPPC Arch = "SCMP_ARCH_PPC" + ArchPPC64 Arch = "SCMP_ARCH_PPC64" + ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE" + ArchS390 Arch = "SCMP_ARCH_S390" + ArchS390X Arch = "SCMP_ARCH_S390X" + ArchPARISC Arch = "SCMP_ARCH_PARISC" + ArchPARISC64 Arch = "SCMP_ARCH_PARISC64" +) + +// LinuxSeccompAction taken upon Seccomp rule match +type LinuxSeccompAction string + +// Define actions for Seccomp rules +const ( + ActKill LinuxSeccompAction = "SCMP_ACT_KILL" + ActTrap LinuxSeccompAction = "SCMP_ACT_TRAP" + ActErrno LinuxSeccompAction = "SCMP_ACT_ERRNO" + ActTrace LinuxSeccompAction = "SCMP_ACT_TRACE" + ActAllow LinuxSeccompAction = "SCMP_ACT_ALLOW" +) + +// LinuxSeccompOperator used to match syscall arguments in Seccomp +type LinuxSeccompOperator string + +// Define operators for syscall arguments in Seccomp +const ( + OpNotEqual LinuxSeccompOperator = "SCMP_CMP_NE" + OpLessThan LinuxSeccompOperator = "SCMP_CMP_LT" + OpLessEqual LinuxSeccompOperator = "SCMP_CMP_LE" + OpEqualTo LinuxSeccompOperator = "SCMP_CMP_EQ" + OpGreaterEqual LinuxSeccompOperator = "SCMP_CMP_GE" + OpGreaterThan LinuxSeccompOperator = "SCMP_CMP_GT" + OpMaskedEqual LinuxSeccompOperator = "SCMP_CMP_MASKED_EQ" +) + +// LinuxSeccompArg used for matching specific syscall arguments in Seccomp +type LinuxSeccompArg struct { + Index uint `json:"index"` + Value uint64 `json:"value"` + ValueTwo uint64 `json:"valueTwo,omitempty"` + Op LinuxSeccompOperator `json:"op"` +} + +// LinuxSyscall is used to match a syscall in Seccomp +type LinuxSyscall struct { + Names []string `json:"names"` + Action LinuxSeccompAction `json:"action"` + Args []LinuxSeccompArg `json:"args,omitempty"` +} + +// LinuxIntelRdt has container runtime resource constraints +// for Intel RDT/CAT which introduced in Linux 4.10 kernel +type LinuxIntelRdt struct { + // The schema for L3 cache id and capacity bitmask (CBM) + // Format: "L3:=;=;..." + L3CacheSchema string `json:"l3CacheSchema,omitempty"` +} diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/state.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/state.go new file mode 100644 index 0000000000..89dce34be2 --- /dev/null +++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/state.go @@ -0,0 +1,17 @@ +package specs + +// State holds information about the runtime state of the container. +type State struct { + // Version is the version of the specification that is supported. + Version string `json:"ociVersion"` + // ID is the container ID + ID string `json:"id"` + // Status is the runtime status of the container. + Status string `json:"status"` + // Pid is the process ID for the container process. + Pid int `json:"pid,omitempty"` + // Bundle is the path to the container's bundle directory. + Bundle string `json:"bundle"` + // Annotations are key values associated with the container. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go new file mode 100644 index 0000000000..ff0cb6a80e --- /dev/null +++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go @@ -0,0 +1,18 @@ +package specs + +import "fmt" + +const ( + // VersionMajor is for an API incompatible changes + VersionMajor = 1 + // VersionMinor is for functionality in a backwards-compatible manner + VersionMinor = 0 + // VersionPatch is for backwards-compatible bug fixes + VersionPatch = 1 + + // VersionDev indicates development branch. Releases will be empty string. + VersionDev = "" +) + +// Version is the specification version that the package types support. +var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev) diff --git a/vendor/github.com/opentracing/opentracing-go/LICENSE b/vendor/github.com/opentracing/opentracing-go/LICENSE new file mode 100644 index 0000000000..148509a403 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 The OpenTracing Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/opentracing/opentracing-go/ext/tags.go b/vendor/github.com/opentracing/opentracing-go/ext/tags.go new file mode 100644 index 0000000000..c67ab5eef5 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/ext/tags.go @@ -0,0 +1,198 @@ +package ext + +import opentracing "github.com/opentracing/opentracing-go" + +// These constants define common tag names recommended for better portability across +// tracing systems and languages/platforms. +// +// The tag names are defined as typed strings, so that in addition to the usual use +// +// span.setTag(TagName, value) +// +// they also support value type validation via this additional syntax: +// +// TagName.Set(span, value) +// +var ( + ////////////////////////////////////////////////////////////////////// + // SpanKind (client/server or producer/consumer) + ////////////////////////////////////////////////////////////////////// + + // SpanKind hints at relationship between spans, e.g. client/server + SpanKind = spanKindTagName("span.kind") + + // SpanKindRPCClient marks a span representing the client-side of an RPC + // or other remote call + SpanKindRPCClientEnum = SpanKindEnum("client") + SpanKindRPCClient = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCClientEnum} + + // SpanKindRPCServer marks a span representing the server-side of an RPC + // or other remote call + SpanKindRPCServerEnum = SpanKindEnum("server") + SpanKindRPCServer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCServerEnum} + + // SpanKindProducer marks a span representing the producer-side of a + // message bus + SpanKindProducerEnum = SpanKindEnum("producer") + SpanKindProducer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindProducerEnum} + + // SpanKindConsumer marks a span representing the consumer-side of a + // message bus + SpanKindConsumerEnum = SpanKindEnum("consumer") + SpanKindConsumer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindConsumerEnum} + + ////////////////////////////////////////////////////////////////////// + // Component name + ////////////////////////////////////////////////////////////////////// + + // Component is a low-cardinality identifier of the module, library, + // or package that is generating a span. + Component = stringTagName("component") + + ////////////////////////////////////////////////////////////////////// + // Sampling hint + ////////////////////////////////////////////////////////////////////// + + // SamplingPriority determines the priority of sampling this Span. + SamplingPriority = uint16TagName("sampling.priority") + + ////////////////////////////////////////////////////////////////////// + // Peer tags. These tags can be emitted by either client-side of + // server-side to describe the other side/service in a peer-to-peer + // communications, like an RPC call. + ////////////////////////////////////////////////////////////////////// + + // PeerService records the service name of the peer. + PeerService = stringTagName("peer.service") + + // PeerAddress records the address name of the peer. This may be a "ip:port", + // a bare "hostname", a FQDN or even a database DSN substring + // like "mysql://username@127.0.0.1:3306/dbname" + PeerAddress = stringTagName("peer.address") + + // PeerHostname records the host name of the peer + PeerHostname = stringTagName("peer.hostname") + + // PeerHostIPv4 records IP v4 host address of the peer + PeerHostIPv4 = uint32TagName("peer.ipv4") + + // PeerHostIPv6 records IP v6 host address of the peer + PeerHostIPv6 = stringTagName("peer.ipv6") + + // PeerPort records port number of the peer + PeerPort = uint16TagName("peer.port") + + ////////////////////////////////////////////////////////////////////// + // HTTP Tags + ////////////////////////////////////////////////////////////////////// + + // HTTPUrl should be the URL of the request being handled in this segment + // of the trace, in standard URI format. The protocol is optional. + HTTPUrl = stringTagName("http.url") + + // HTTPMethod is the HTTP method of the request, and is case-insensitive. + HTTPMethod = stringTagName("http.method") + + // HTTPStatusCode is the numeric HTTP status code (200, 404, etc) of the + // HTTP response. + HTTPStatusCode = uint16TagName("http.status_code") + + ////////////////////////////////////////////////////////////////////// + // DB Tags + ////////////////////////////////////////////////////////////////////// + + // DBInstance is database instance name. + DBInstance = stringTagName("db.instance") + + // DBStatement is a database statement for the given database type. + // It can be a query or a prepared statement (i.e., before substitution). + DBStatement = stringTagName("db.statement") + + // DBType is a database type. For any SQL database, "sql". + // For others, the lower-case database category, e.g. "redis" + DBType = stringTagName("db.type") + + // DBUser is a username for accessing database. + DBUser = stringTagName("db.user") + + ////////////////////////////////////////////////////////////////////// + // Message Bus Tag + ////////////////////////////////////////////////////////////////////// + + // MessageBusDestination is an address at which messages can be exchanged + MessageBusDestination = stringTagName("message_bus.destination") + + ////////////////////////////////////////////////////////////////////// + // Error Tag + ////////////////////////////////////////////////////////////////////// + + // Error indicates that operation represented by the span resulted in an error. + Error = boolTagName("error") +) + +// --- + +// SpanKindEnum represents common span types +type SpanKindEnum string + +type spanKindTagName string + +// Set adds a string tag to the `span` +func (tag spanKindTagName) Set(span opentracing.Span, value SpanKindEnum) { + span.SetTag(string(tag), value) +} + +type rpcServerOption struct { + clientContext opentracing.SpanContext +} + +func (r rpcServerOption) Apply(o *opentracing.StartSpanOptions) { + if r.clientContext != nil { + opentracing.ChildOf(r.clientContext).Apply(o) + } + SpanKindRPCServer.Apply(o) +} + +// RPCServerOption returns a StartSpanOption appropriate for an RPC server span +// with `client` representing the metadata for the remote peer Span if available. +// In case client == nil, due to the client not being instrumented, this RPC +// server span will be a root span. +func RPCServerOption(client opentracing.SpanContext) opentracing.StartSpanOption { + return rpcServerOption{client} +} + +// --- + +type stringTagName string + +// Set adds a string tag to the `span` +func (tag stringTagName) Set(span opentracing.Span, value string) { + span.SetTag(string(tag), value) +} + +// --- + +type uint32TagName string + +// Set adds a uint32 tag to the `span` +func (tag uint32TagName) Set(span opentracing.Span, value uint32) { + span.SetTag(string(tag), value) +} + +// --- + +type uint16TagName string + +// Set adds a uint16 tag to the `span` +func (tag uint16TagName) Set(span opentracing.Span, value uint16) { + span.SetTag(string(tag), value) +} + +// --- + +type boolTagName string + +// Add adds a bool tag to the `span` +func (tag boolTagName) Set(span opentracing.Span, value bool) { + span.SetTag(string(tag), value) +} diff --git a/vendor/github.com/opentracing/opentracing-go/globaltracer.go b/vendor/github.com/opentracing/opentracing-go/globaltracer.go new file mode 100644 index 0000000000..8c8e793ff2 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/globaltracer.go @@ -0,0 +1,32 @@ +package opentracing + +var ( + globalTracer Tracer = NoopTracer{} +) + +// SetGlobalTracer sets the [singleton] opentracing.Tracer returned by +// GlobalTracer(). Those who use GlobalTracer (rather than directly manage an +// opentracing.Tracer instance) should call SetGlobalTracer as early as +// possible in main(), prior to calling the `StartSpan` global func below. +// Prior to calling `SetGlobalTracer`, any Spans started via the `StartSpan` +// (etc) globals are noops. +func SetGlobalTracer(tracer Tracer) { + globalTracer = tracer +} + +// GlobalTracer returns the global singleton `Tracer` implementation. +// Before `SetGlobalTracer()` is called, the `GlobalTracer()` is a noop +// implementation that drops all data handed to it. +func GlobalTracer() Tracer { + return globalTracer +} + +// StartSpan defers to `Tracer.StartSpan`. See `GlobalTracer()`. +func StartSpan(operationName string, opts ...StartSpanOption) Span { + return globalTracer.StartSpan(operationName, opts...) +} + +// InitGlobalTracer is deprecated. Please use SetGlobalTracer. +func InitGlobalTracer(tracer Tracer) { + SetGlobalTracer(tracer) +} diff --git a/vendor/github.com/opentracing/opentracing-go/gocontext.go b/vendor/github.com/opentracing/opentracing-go/gocontext.go new file mode 100644 index 0000000000..222a65202f --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/gocontext.go @@ -0,0 +1,57 @@ +package opentracing + +import "golang.org/x/net/context" + +type contextKey struct{} + +var activeSpanKey = contextKey{} + +// ContextWithSpan returns a new `context.Context` that holds a reference to +// `span`'s SpanContext. +func ContextWithSpan(ctx context.Context, span Span) context.Context { + return context.WithValue(ctx, activeSpanKey, span) +} + +// SpanFromContext returns the `Span` previously associated with `ctx`, or +// `nil` if no such `Span` could be found. +// +// NOTE: context.Context != SpanContext: the former is Go's intra-process +// context propagation mechanism, and the latter houses OpenTracing's per-Span +// identity and baggage information. +func SpanFromContext(ctx context.Context) Span { + val := ctx.Value(activeSpanKey) + if sp, ok := val.(Span); ok { + return sp + } + return nil +} + +// StartSpanFromContext starts and returns a Span with `operationName`, using +// any Span found within `ctx` as a ChildOfRef. If no such parent could be +// found, StartSpanFromContext creates a root (parentless) Span. +// +// The second return value is a context.Context object built around the +// returned Span. +// +// Example usage: +// +// SomeFunction(ctx context.Context, ...) { +// sp, ctx := opentracing.StartSpanFromContext(ctx, "SomeFunction") +// defer sp.Finish() +// ... +// } +func StartSpanFromContext(ctx context.Context, operationName string, opts ...StartSpanOption) (Span, context.Context) { + return startSpanFromContextWithTracer(ctx, GlobalTracer(), operationName, opts...) +} + +// startSpanFromContextWithTracer is factored out for testing purposes. +func startSpanFromContextWithTracer(ctx context.Context, tracer Tracer, operationName string, opts ...StartSpanOption) (Span, context.Context) { + var span Span + if parentSpan := SpanFromContext(ctx); parentSpan != nil { + opts = append(opts, ChildOf(parentSpan.Context())) + span = tracer.StartSpan(operationName, opts...) + } else { + span = tracer.StartSpan(operationName, opts...) + } + return span, ContextWithSpan(ctx, span) +} diff --git a/vendor/github.com/opentracing/opentracing-go/log/field.go b/vendor/github.com/opentracing/opentracing-go/log/field.go new file mode 100644 index 0000000000..d2cd39a16e --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/log/field.go @@ -0,0 +1,245 @@ +package log + +import ( + "fmt" + "math" +) + +type fieldType int + +const ( + stringType fieldType = iota + boolType + intType + int32Type + uint32Type + int64Type + uint64Type + float32Type + float64Type + errorType + objectType + lazyLoggerType +) + +// Field instances are constructed via LogBool, LogString, and so on. +// Tracing implementations may then handle them via the Field.Marshal +// method. +// +// "heavily influenced by" (i.e., partially stolen from) +// https://github.com/uber-go/zap +type Field struct { + key string + fieldType fieldType + numericVal int64 + stringVal string + interfaceVal interface{} +} + +// String adds a string-valued key:value pair to a Span.LogFields() record +func String(key, val string) Field { + return Field{ + key: key, + fieldType: stringType, + stringVal: val, + } +} + +// Bool adds a bool-valued key:value pair to a Span.LogFields() record +func Bool(key string, val bool) Field { + var numericVal int64 + if val { + numericVal = 1 + } + return Field{ + key: key, + fieldType: boolType, + numericVal: numericVal, + } +} + +// Int adds an int-valued key:value pair to a Span.LogFields() record +func Int(key string, val int) Field { + return Field{ + key: key, + fieldType: intType, + numericVal: int64(val), + } +} + +// Int32 adds an int32-valued key:value pair to a Span.LogFields() record +func Int32(key string, val int32) Field { + return Field{ + key: key, + fieldType: int32Type, + numericVal: int64(val), + } +} + +// Int64 adds an int64-valued key:value pair to a Span.LogFields() record +func Int64(key string, val int64) Field { + return Field{ + key: key, + fieldType: int64Type, + numericVal: val, + } +} + +// Uint32 adds a uint32-valued key:value pair to a Span.LogFields() record +func Uint32(key string, val uint32) Field { + return Field{ + key: key, + fieldType: uint32Type, + numericVal: int64(val), + } +} + +// Uint64 adds a uint64-valued key:value pair to a Span.LogFields() record +func Uint64(key string, val uint64) Field { + return Field{ + key: key, + fieldType: uint64Type, + numericVal: int64(val), + } +} + +// Float32 adds a float32-valued key:value pair to a Span.LogFields() record +func Float32(key string, val float32) Field { + return Field{ + key: key, + fieldType: float32Type, + numericVal: int64(math.Float32bits(val)), + } +} + +// Float64 adds a float64-valued key:value pair to a Span.LogFields() record +func Float64(key string, val float64) Field { + return Field{ + key: key, + fieldType: float64Type, + numericVal: int64(math.Float64bits(val)), + } +} + +// Error adds an error with the key "error" to a Span.LogFields() record +func Error(err error) Field { + return Field{ + key: "error", + fieldType: errorType, + interfaceVal: err, + } +} + +// Object adds an object-valued key:value pair to a Span.LogFields() record +func Object(key string, obj interface{}) Field { + return Field{ + key: key, + fieldType: objectType, + interfaceVal: obj, + } +} + +// LazyLogger allows for user-defined, late-bound logging of arbitrary data +type LazyLogger func(fv Encoder) + +// Lazy adds a LazyLogger to a Span.LogFields() record; the tracing +// implementation will call the LazyLogger function at an indefinite time in +// the future (after Lazy() returns). +func Lazy(ll LazyLogger) Field { + return Field{ + fieldType: lazyLoggerType, + interfaceVal: ll, + } +} + +// Encoder allows access to the contents of a Field (via a call to +// Field.Marshal). +// +// Tracer implementations typically provide an implementation of Encoder; +// OpenTracing callers typically do not need to concern themselves with it. +type Encoder interface { + EmitString(key, value string) + EmitBool(key string, value bool) + EmitInt(key string, value int) + EmitInt32(key string, value int32) + EmitInt64(key string, value int64) + EmitUint32(key string, value uint32) + EmitUint64(key string, value uint64) + EmitFloat32(key string, value float32) + EmitFloat64(key string, value float64) + EmitObject(key string, value interface{}) + EmitLazyLogger(value LazyLogger) +} + +// Marshal passes a Field instance through to the appropriate +// field-type-specific method of an Encoder. +func (lf Field) Marshal(visitor Encoder) { + switch lf.fieldType { + case stringType: + visitor.EmitString(lf.key, lf.stringVal) + case boolType: + visitor.EmitBool(lf.key, lf.numericVal != 0) + case intType: + visitor.EmitInt(lf.key, int(lf.numericVal)) + case int32Type: + visitor.EmitInt32(lf.key, int32(lf.numericVal)) + case int64Type: + visitor.EmitInt64(lf.key, int64(lf.numericVal)) + case uint32Type: + visitor.EmitUint32(lf.key, uint32(lf.numericVal)) + case uint64Type: + visitor.EmitUint64(lf.key, uint64(lf.numericVal)) + case float32Type: + visitor.EmitFloat32(lf.key, math.Float32frombits(uint32(lf.numericVal))) + case float64Type: + visitor.EmitFloat64(lf.key, math.Float64frombits(uint64(lf.numericVal))) + case errorType: + if err, ok := lf.interfaceVal.(error); ok { + visitor.EmitString(lf.key, err.Error()) + } else { + visitor.EmitString(lf.key, "") + } + case objectType: + visitor.EmitObject(lf.key, lf.interfaceVal) + case lazyLoggerType: + visitor.EmitLazyLogger(lf.interfaceVal.(LazyLogger)) + } +} + +// Key returns the field's key. +func (lf Field) Key() string { + return lf.key +} + +// Value returns the field's value as interface{}. +func (lf Field) Value() interface{} { + switch lf.fieldType { + case stringType: + return lf.stringVal + case boolType: + return lf.numericVal != 0 + case intType: + return int(lf.numericVal) + case int32Type: + return int32(lf.numericVal) + case int64Type: + return int64(lf.numericVal) + case uint32Type: + return uint32(lf.numericVal) + case uint64Type: + return uint64(lf.numericVal) + case float32Type: + return math.Float32frombits(uint32(lf.numericVal)) + case float64Type: + return math.Float64frombits(uint64(lf.numericVal)) + case errorType, objectType, lazyLoggerType: + return lf.interfaceVal + default: + return nil + } +} + +// String returns a string representation of the key and value. +func (lf Field) String() string { + return fmt.Sprint(lf.key, ":", lf.Value()) +} diff --git a/vendor/github.com/opentracing/opentracing-go/log/util.go b/vendor/github.com/opentracing/opentracing-go/log/util.go new file mode 100644 index 0000000000..3832feb5ce --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/log/util.go @@ -0,0 +1,54 @@ +package log + +import "fmt" + +// InterleavedKVToFields converts keyValues a la Span.LogKV() to a Field slice +// a la Span.LogFields(). +func InterleavedKVToFields(keyValues ...interface{}) ([]Field, error) { + if len(keyValues)%2 != 0 { + return nil, fmt.Errorf("non-even keyValues len: %d", len(keyValues)) + } + fields := make([]Field, len(keyValues)/2) + for i := 0; i*2 < len(keyValues); i++ { + key, ok := keyValues[i*2].(string) + if !ok { + return nil, fmt.Errorf( + "non-string key (pair #%d): %T", + i, keyValues[i*2]) + } + switch typedVal := keyValues[i*2+1].(type) { + case bool: + fields[i] = Bool(key, typedVal) + case string: + fields[i] = String(key, typedVal) + case int: + fields[i] = Int(key, typedVal) + case int8: + fields[i] = Int32(key, int32(typedVal)) + case int16: + fields[i] = Int32(key, int32(typedVal)) + case int32: + fields[i] = Int32(key, typedVal) + case int64: + fields[i] = Int64(key, typedVal) + case uint: + fields[i] = Uint64(key, uint64(typedVal)) + case uint64: + fields[i] = Uint64(key, typedVal) + case uint8: + fields[i] = Uint32(key, uint32(typedVal)) + case uint16: + fields[i] = Uint32(key, uint32(typedVal)) + case uint32: + fields[i] = Uint32(key, typedVal) + case float32: + fields[i] = Float32(key, typedVal) + case float64: + fields[i] = Float64(key, typedVal) + default: + // When in doubt, coerce to a string + fields[i] = String(key, fmt.Sprint(typedVal)) + } + } + return fields, nil +} diff --git a/vendor/github.com/opentracing/opentracing-go/noop.go b/vendor/github.com/opentracing/opentracing-go/noop.go new file mode 100644 index 0000000000..0d32f692c4 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/noop.go @@ -0,0 +1,64 @@ +package opentracing + +import "github.com/opentracing/opentracing-go/log" + +// A NoopTracer is a trivial, minimum overhead implementation of Tracer +// for which all operations are no-ops. +// +// The primary use of this implementation is in libraries, such as RPC +// frameworks, that make tracing an optional feature controlled by the +// end user. A no-op implementation allows said libraries to use it +// as the default Tracer and to write instrumentation that does +// not need to keep checking if the tracer instance is nil. +// +// For the same reason, the NoopTracer is the default "global" tracer +// (see GlobalTracer and SetGlobalTracer functions). +// +// WARNING: NoopTracer does not support baggage propagation. +type NoopTracer struct{} + +type noopSpan struct{} +type noopSpanContext struct{} + +var ( + defaultNoopSpanContext = noopSpanContext{} + defaultNoopSpan = noopSpan{} + defaultNoopTracer = NoopTracer{} +) + +const ( + emptyString = "" +) + +// noopSpanContext: +func (n noopSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {} + +// noopSpan: +func (n noopSpan) Context() SpanContext { return defaultNoopSpanContext } +func (n noopSpan) SetBaggageItem(key, val string) Span { return defaultNoopSpan } +func (n noopSpan) BaggageItem(key string) string { return emptyString } +func (n noopSpan) SetTag(key string, value interface{}) Span { return n } +func (n noopSpan) LogFields(fields ...log.Field) {} +func (n noopSpan) LogKV(keyVals ...interface{}) {} +func (n noopSpan) Finish() {} +func (n noopSpan) FinishWithOptions(opts FinishOptions) {} +func (n noopSpan) SetOperationName(operationName string) Span { return n } +func (n noopSpan) Tracer() Tracer { return defaultNoopTracer } +func (n noopSpan) LogEvent(event string) {} +func (n noopSpan) LogEventWithPayload(event string, payload interface{}) {} +func (n noopSpan) Log(data LogData) {} + +// StartSpan belongs to the Tracer interface. +func (n NoopTracer) StartSpan(operationName string, opts ...StartSpanOption) Span { + return defaultNoopSpan +} + +// Inject belongs to the Tracer interface. +func (n NoopTracer) Inject(sp SpanContext, format interface{}, carrier interface{}) error { + return nil +} + +// Extract belongs to the Tracer interface. +func (n NoopTracer) Extract(format interface{}, carrier interface{}) (SpanContext, error) { + return nil, ErrSpanContextNotFound +} diff --git a/vendor/github.com/opentracing/opentracing-go/propagation.go b/vendor/github.com/opentracing/opentracing-go/propagation.go new file mode 100644 index 0000000000..9583fc53ab --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/propagation.go @@ -0,0 +1,176 @@ +package opentracing + +import ( + "errors" + "net/http" +) + +/////////////////////////////////////////////////////////////////////////////// +// CORE PROPAGATION INTERFACES: +/////////////////////////////////////////////////////////////////////////////// + +var ( + // ErrUnsupportedFormat occurs when the `format` passed to Tracer.Inject() or + // Tracer.Extract() is not recognized by the Tracer implementation. + ErrUnsupportedFormat = errors.New("opentracing: Unknown or unsupported Inject/Extract format") + + // ErrSpanContextNotFound occurs when the `carrier` passed to + // Tracer.Extract() is valid and uncorrupted but has insufficient + // information to extract a SpanContext. + ErrSpanContextNotFound = errors.New("opentracing: SpanContext not found in Extract carrier") + + // ErrInvalidSpanContext errors occur when Tracer.Inject() is asked to + // operate on a SpanContext which it is not prepared to handle (for + // example, since it was created by a different tracer implementation). + ErrInvalidSpanContext = errors.New("opentracing: SpanContext type incompatible with tracer") + + // ErrInvalidCarrier errors occur when Tracer.Inject() or Tracer.Extract() + // implementations expect a different type of `carrier` than they are + // given. + ErrInvalidCarrier = errors.New("opentracing: Invalid Inject/Extract carrier") + + // ErrSpanContextCorrupted occurs when the `carrier` passed to + // Tracer.Extract() is of the expected type but is corrupted. + ErrSpanContextCorrupted = errors.New("opentracing: SpanContext data corrupted in Extract carrier") +) + +/////////////////////////////////////////////////////////////////////////////// +// BUILTIN PROPAGATION FORMATS: +/////////////////////////////////////////////////////////////////////////////// + +// BuiltinFormat is used to demarcate the values within package `opentracing` +// that are intended for use with the Tracer.Inject() and Tracer.Extract() +// methods. +type BuiltinFormat byte + +const ( + // Binary represents SpanContexts as opaque binary data. + // + // For Tracer.Inject(): the carrier must be an `io.Writer`. + // + // For Tracer.Extract(): the carrier must be an `io.Reader`. + Binary BuiltinFormat = iota + + // TextMap represents SpanContexts as key:value string pairs. + // + // Unlike HTTPHeaders, the TextMap format does not restrict the key or + // value character sets in any way. + // + // For Tracer.Inject(): the carrier must be a `TextMapWriter`. + // + // For Tracer.Extract(): the carrier must be a `TextMapReader`. + TextMap + + // HTTPHeaders represents SpanContexts as HTTP header string pairs. + // + // Unlike TextMap, the HTTPHeaders format requires that the keys and values + // be valid as HTTP headers as-is (i.e., character casing may be unstable + // and special characters are disallowed in keys, values should be + // URL-escaped, etc). + // + // For Tracer.Inject(): the carrier must be a `TextMapWriter`. + // + // For Tracer.Extract(): the carrier must be a `TextMapReader`. + // + // See HTTPHeaderCarrier for an implementation of both TextMapWriter + // and TextMapReader that defers to an http.Header instance for storage. + // For example, Inject(): + // + // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) + // err := span.Tracer().Inject( + // span, opentracing.HTTPHeaders, carrier) + // + // Or Extract(): + // + // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) + // span, err := tracer.Extract( + // opentracing.HTTPHeaders, carrier) + // + HTTPHeaders +) + +// TextMapWriter is the Inject() carrier for the TextMap builtin format. With +// it, the caller can encode a SpanContext for propagation as entries in a map +// of unicode strings. +type TextMapWriter interface { + // Set a key:value pair to the carrier. Multiple calls to Set() for the + // same key leads to undefined behavior. + // + // NOTE: The backing store for the TextMapWriter may contain data unrelated + // to SpanContext. As such, Inject() and Extract() implementations that + // call the TextMapWriter and TextMapReader interfaces must agree on a + // prefix or other convention to distinguish their own key:value pairs. + Set(key, val string) +} + +// TextMapReader is the Extract() carrier for the TextMap builtin format. With it, +// the caller can decode a propagated SpanContext as entries in a map of +// unicode strings. +type TextMapReader interface { + // ForeachKey returns TextMap contents via repeated calls to the `handler` + // function. If any call to `handler` returns a non-nil error, ForeachKey + // terminates and returns that error. + // + // NOTE: The backing store for the TextMapReader may contain data unrelated + // to SpanContext. As such, Inject() and Extract() implementations that + // call the TextMapWriter and TextMapReader interfaces must agree on a + // prefix or other convention to distinguish their own key:value pairs. + // + // The "foreach" callback pattern reduces unnecessary copying in some cases + // and also allows implementations to hold locks while the map is read. + ForeachKey(handler func(key, val string) error) error +} + +// TextMapCarrier allows the use of regular map[string]string +// as both TextMapWriter and TextMapReader. +type TextMapCarrier map[string]string + +// ForeachKey conforms to the TextMapReader interface. +func (c TextMapCarrier) ForeachKey(handler func(key, val string) error) error { + for k, v := range c { + if err := handler(k, v); err != nil { + return err + } + } + return nil +} + +// Set implements Set() of opentracing.TextMapWriter +func (c TextMapCarrier) Set(key, val string) { + c[key] = val +} + +// HTTPHeadersCarrier satisfies both TextMapWriter and TextMapReader. +// +// Example usage for server side: +// +// carrier := opentracing.HttpHeadersCarrier(httpReq.Header) +// spanContext, err := tracer.Extract(opentracing.HttpHeaders, carrier) +// +// Example usage for client side: +// +// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) +// err := tracer.Inject( +// span.Context(), +// opentracing.HttpHeaders, +// carrier) +// +type HTTPHeadersCarrier http.Header + +// Set conforms to the TextMapWriter interface. +func (c HTTPHeadersCarrier) Set(key, val string) { + h := http.Header(c) + h.Add(key, val) +} + +// ForeachKey conforms to the TextMapReader interface. +func (c HTTPHeadersCarrier) ForeachKey(handler func(key, val string) error) error { + for k, vals := range c { + for _, v := range vals { + if err := handler(k, v); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/opentracing/opentracing-go/span.go b/vendor/github.com/opentracing/opentracing-go/span.go new file mode 100644 index 0000000000..f6c3234acc --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/span.go @@ -0,0 +1,185 @@ +package opentracing + +import ( + "time" + + "github.com/opentracing/opentracing-go/log" +) + +// SpanContext represents Span state that must propagate to descendant Spans and across process +// boundaries (e.g., a tuple). +type SpanContext interface { + // ForeachBaggageItem grants access to all baggage items stored in the + // SpanContext. + // The handler function will be called for each baggage key/value pair. + // The ordering of items is not guaranteed. + // + // The bool return value indicates if the handler wants to continue iterating + // through the rest of the baggage items; for example if the handler is trying to + // find some baggage item by pattern matching the name, it can return false + // as soon as the item is found to stop further iterations. + ForeachBaggageItem(handler func(k, v string) bool) +} + +// Span represents an active, un-finished span in the OpenTracing system. +// +// Spans are created by the Tracer interface. +type Span interface { + // Sets the end timestamp and finalizes Span state. + // + // With the exception of calls to Context() (which are always allowed), + // Finish() must be the last call made to any span instance, and to do + // otherwise leads to undefined behavior. + Finish() + // FinishWithOptions is like Finish() but with explicit control over + // timestamps and log data. + FinishWithOptions(opts FinishOptions) + + // Context() yields the SpanContext for this Span. Note that the return + // value of Context() is still valid after a call to Span.Finish(), as is + // a call to Span.Context() after a call to Span.Finish(). + Context() SpanContext + + // Sets or changes the operation name. + SetOperationName(operationName string) Span + + // Adds a tag to the span. + // + // If there is a pre-existing tag set for `key`, it is overwritten. + // + // Tag values can be numeric types, strings, or bools. The behavior of + // other tag value types is undefined at the OpenTracing level. If a + // tracing system does not know how to handle a particular value type, it + // may ignore the tag, but shall not panic. + SetTag(key string, value interface{}) Span + + // LogFields is an efficient and type-checked way to record key:value + // logging data about a Span, though the programming interface is a little + // more verbose than LogKV(). Here's an example: + // + // span.LogFields( + // log.String("event", "soft error"), + // log.String("type", "cache timeout"), + // log.Int("waited.millis", 1500)) + // + // Also see Span.FinishWithOptions() and FinishOptions.BulkLogData. + LogFields(fields ...log.Field) + + // LogKV is a concise, readable way to record key:value logging data about + // a Span, though unfortunately this also makes it less efficient and less + // type-safe than LogFields(). Here's an example: + // + // span.LogKV( + // "event", "soft error", + // "type", "cache timeout", + // "waited.millis", 1500) + // + // For LogKV (as opposed to LogFields()), the parameters must appear as + // key-value pairs, like + // + // span.LogKV(key1, val1, key2, val2, key3, val3, ...) + // + // The keys must all be strings. The values may be strings, numeric types, + // bools, Go error instances, or arbitrary structs. + // + // (Note to implementors: consider the log.InterleavedKVToFields() helper) + LogKV(alternatingKeyValues ...interface{}) + + // SetBaggageItem sets a key:value pair on this Span and its SpanContext + // that also propagates to descendants of this Span. + // + // SetBaggageItem() enables powerful functionality given a full-stack + // opentracing integration (e.g., arbitrary application data from a mobile + // app can make it, transparently, all the way into the depths of a storage + // system), and with it some powerful costs: use this feature with care. + // + // IMPORTANT NOTE #1: SetBaggageItem() will only propagate baggage items to + // *future* causal descendants of the associated Span. + // + // IMPORTANT NOTE #2: Use this thoughtfully and with care. Every key and + // value is copied into every local *and remote* child of the associated + // Span, and that can add up to a lot of network and cpu overhead. + // + // Returns a reference to this Span for chaining. + SetBaggageItem(restrictedKey, value string) Span + + // Gets the value for a baggage item given its key. Returns the empty string + // if the value isn't found in this Span. + BaggageItem(restrictedKey string) string + + // Provides access to the Tracer that created this Span. + Tracer() Tracer + + // Deprecated: use LogFields or LogKV + LogEvent(event string) + // Deprecated: use LogFields or LogKV + LogEventWithPayload(event string, payload interface{}) + // Deprecated: use LogFields or LogKV + Log(data LogData) +} + +// LogRecord is data associated with a single Span log. Every LogRecord +// instance must specify at least one Field. +type LogRecord struct { + Timestamp time.Time + Fields []log.Field +} + +// FinishOptions allows Span.FinishWithOptions callers to override the finish +// timestamp and provide log data via a bulk interface. +type FinishOptions struct { + // FinishTime overrides the Span's finish time, or implicitly becomes + // time.Now() if FinishTime.IsZero(). + // + // FinishTime must resolve to a timestamp that's >= the Span's StartTime + // (per StartSpanOptions). + FinishTime time.Time + + // LogRecords allows the caller to specify the contents of many LogFields() + // calls with a single slice. May be nil. + // + // None of the LogRecord.Timestamp values may be .IsZero() (i.e., they must + // be set explicitly). Also, they must be >= the Span's start timestamp and + // <= the FinishTime (or time.Now() if FinishTime.IsZero()). Otherwise the + // behavior of FinishWithOptions() is undefined. + // + // If specified, the caller hands off ownership of LogRecords at + // FinishWithOptions() invocation time. + // + // If specified, the (deprecated) BulkLogData must be nil or empty. + LogRecords []LogRecord + + // BulkLogData is DEPRECATED. + BulkLogData []LogData +} + +// LogData is DEPRECATED +type LogData struct { + Timestamp time.Time + Event string + Payload interface{} +} + +// ToLogRecord converts a deprecated LogData to a non-deprecated LogRecord +func (ld *LogData) ToLogRecord() LogRecord { + var literalTimestamp time.Time + if ld.Timestamp.IsZero() { + literalTimestamp = time.Now() + } else { + literalTimestamp = ld.Timestamp + } + rval := LogRecord{ + Timestamp: literalTimestamp, + } + if ld.Payload == nil { + rval.Fields = []log.Field{ + log.String("event", ld.Event), + } + } else { + rval.Fields = []log.Field{ + log.String("event", ld.Event), + log.Object("payload", ld.Payload), + } + } + return rval +} diff --git a/vendor/github.com/opentracing/opentracing-go/tracer.go b/vendor/github.com/opentracing/opentracing-go/tracer.go new file mode 100644 index 0000000000..fd77c1df32 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/tracer.go @@ -0,0 +1,305 @@ +package opentracing + +import "time" + +// Tracer is a simple, thin interface for Span creation and SpanContext +// propagation. +type Tracer interface { + + // Create, start, and return a new Span with the given `operationName` and + // incorporate the given StartSpanOption `opts`. (Note that `opts` borrows + // from the "functional options" pattern, per + // http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis) + // + // A Span with no SpanReference options (e.g., opentracing.ChildOf() or + // opentracing.FollowsFrom()) becomes the root of its own trace. + // + // Examples: + // + // var tracer opentracing.Tracer = ... + // + // // The root-span case: + // sp := tracer.StartSpan("GetFeed") + // + // // The vanilla child span case: + // sp := tracer.StartSpan( + // "GetFeed", + // opentracing.ChildOf(parentSpan.Context())) + // + // // All the bells and whistles: + // sp := tracer.StartSpan( + // "GetFeed", + // opentracing.ChildOf(parentSpan.Context()), + // opentracing.Tag("user_agent", loggedReq.UserAgent), + // opentracing.StartTime(loggedReq.Timestamp), + // ) + // + StartSpan(operationName string, opts ...StartSpanOption) Span + + // Inject() takes the `sm` SpanContext instance and injects it for + // propagation within `carrier`. The actual type of `carrier` depends on + // the value of `format`. + // + // OpenTracing defines a common set of `format` values (see BuiltinFormat), + // and each has an expected carrier type. + // + // Other packages may declare their own `format` values, much like the keys + // used by `context.Context` (see + // https://godoc.org/golang.org/x/net/context#WithValue). + // + // Example usage (sans error handling): + // + // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) + // err := tracer.Inject( + // span.Context(), + // opentracing.HTTPHeaders, + // carrier) + // + // NOTE: All opentracing.Tracer implementations MUST support all + // BuiltinFormats. + // + // Implementations may return opentracing.ErrUnsupportedFormat if `format` + // is not supported by (or not known by) the implementation. + // + // Implementations may return opentracing.ErrInvalidCarrier or any other + // implementation-specific error if the format is supported but injection + // fails anyway. + // + // See Tracer.Extract(). + Inject(sm SpanContext, format interface{}, carrier interface{}) error + + // Extract() returns a SpanContext instance given `format` and `carrier`. + // + // OpenTracing defines a common set of `format` values (see BuiltinFormat), + // and each has an expected carrier type. + // + // Other packages may declare their own `format` values, much like the keys + // used by `context.Context` (see + // https://godoc.org/golang.org/x/net/context#WithValue). + // + // Example usage (with StartSpan): + // + // + // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) + // clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier) + // + // // ... assuming the ultimate goal here is to resume the trace with a + // // server-side Span: + // var serverSpan opentracing.Span + // if err == nil { + // span = tracer.StartSpan( + // rpcMethodName, ext.RPCServerOption(clientContext)) + // } else { + // span = tracer.StartSpan(rpcMethodName) + // } + // + // + // NOTE: All opentracing.Tracer implementations MUST support all + // BuiltinFormats. + // + // Return values: + // - A successful Extract returns a SpanContext instance and a nil error + // - If there was simply no SpanContext to extract in `carrier`, Extract() + // returns (nil, opentracing.ErrSpanContextNotFound) + // - If `format` is unsupported or unrecognized, Extract() returns (nil, + // opentracing.ErrUnsupportedFormat) + // - If there are more fundamental problems with the `carrier` object, + // Extract() may return opentracing.ErrInvalidCarrier, + // opentracing.ErrSpanContextCorrupted, or implementation-specific + // errors. + // + // See Tracer.Inject(). + Extract(format interface{}, carrier interface{}) (SpanContext, error) +} + +// StartSpanOptions allows Tracer.StartSpan() callers and implementors a +// mechanism to override the start timestamp, specify Span References, and make +// a single Tag or multiple Tags available at Span start time. +// +// StartSpan() callers should look at the StartSpanOption interface and +// implementations available in this package. +// +// Tracer implementations can convert a slice of `StartSpanOption` instances +// into a `StartSpanOptions` struct like so: +// +// func StartSpan(opName string, opts ...opentracing.StartSpanOption) { +// sso := opentracing.StartSpanOptions{} +// for _, o := range opts { +// o.Apply(&sso) +// } +// ... +// } +// +type StartSpanOptions struct { + // Zero or more causal references to other Spans (via their SpanContext). + // If empty, start a "root" Span (i.e., start a new trace). + References []SpanReference + + // StartTime overrides the Span's start time, or implicitly becomes + // time.Now() if StartTime.IsZero(). + StartTime time.Time + + // Tags may have zero or more entries; the restrictions on map values are + // identical to those for Span.SetTag(). May be nil. + // + // If specified, the caller hands off ownership of Tags at + // StartSpan() invocation time. + Tags map[string]interface{} +} + +// StartSpanOption instances (zero or more) may be passed to Tracer.StartSpan. +// +// StartSpanOption borrows from the "functional options" pattern, per +// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis +type StartSpanOption interface { + Apply(*StartSpanOptions) +} + +// SpanReferenceType is an enum type describing different categories of +// relationships between two Spans. If Span-2 refers to Span-1, the +// SpanReferenceType describes Span-1 from Span-2's perspective. For example, +// ChildOfRef means that Span-1 created Span-2. +// +// NOTE: Span-1 and Span-2 do *not* necessarily depend on each other for +// completion; e.g., Span-2 may be part of a background job enqueued by Span-1, +// or Span-2 may be sitting in a distributed queue behind Span-1. +type SpanReferenceType int + +const ( + // ChildOfRef refers to a parent Span that caused *and* somehow depends + // upon the new child Span. Often (but not always), the parent Span cannot + // finish until the child Span does. + // + // An timing diagram for a ChildOfRef that's blocked on the new Span: + // + // [-Parent Span---------] + // [-Child Span----] + // + // See http://opentracing.io/spec/ + // + // See opentracing.ChildOf() + ChildOfRef SpanReferenceType = iota + + // FollowsFromRef refers to a parent Span that does not depend in any way + // on the result of the new child Span. For instance, one might use + // FollowsFromRefs to describe pipeline stages separated by queues, + // or a fire-and-forget cache insert at the tail end of a web request. + // + // A FollowsFromRef Span is part of the same logical trace as the new Span: + // i.e., the new Span is somehow caused by the work of its FollowsFromRef. + // + // All of the following could be valid timing diagrams for children that + // "FollowFrom" a parent. + // + // [-Parent Span-] [-Child Span-] + // + // + // [-Parent Span--] + // [-Child Span-] + // + // + // [-Parent Span-] + // [-Child Span-] + // + // See http://opentracing.io/spec/ + // + // See opentracing.FollowsFrom() + FollowsFromRef +) + +// SpanReference is a StartSpanOption that pairs a SpanReferenceType and a +// referenced SpanContext. See the SpanReferenceType documentation for +// supported relationships. If SpanReference is created with +// ReferencedContext==nil, it has no effect. Thus it allows for a more concise +// syntax for starting spans: +// +// sc, _ := tracer.Extract(someFormat, someCarrier) +// span := tracer.StartSpan("operation", opentracing.ChildOf(sc)) +// +// The `ChildOf(sc)` option above will not panic if sc == nil, it will just +// not add the parent span reference to the options. +type SpanReference struct { + Type SpanReferenceType + ReferencedContext SpanContext +} + +// Apply satisfies the StartSpanOption interface. +func (r SpanReference) Apply(o *StartSpanOptions) { + if r.ReferencedContext != nil { + o.References = append(o.References, r) + } +} + +// ChildOf returns a StartSpanOption pointing to a dependent parent span. +// If sc == nil, the option has no effect. +// +// See ChildOfRef, SpanReference +func ChildOf(sc SpanContext) SpanReference { + return SpanReference{ + Type: ChildOfRef, + ReferencedContext: sc, + } +} + +// FollowsFrom returns a StartSpanOption pointing to a parent Span that caused +// the child Span but does not directly depend on its result in any way. +// If sc == nil, the option has no effect. +// +// See FollowsFromRef, SpanReference +func FollowsFrom(sc SpanContext) SpanReference { + return SpanReference{ + Type: FollowsFromRef, + ReferencedContext: sc, + } +} + +// StartTime is a StartSpanOption that sets an explicit start timestamp for the +// new Span. +type StartTime time.Time + +// Apply satisfies the StartSpanOption interface. +func (t StartTime) Apply(o *StartSpanOptions) { + o.StartTime = time.Time(t) +} + +// Tags are a generic map from an arbitrary string key to an opaque value type. +// The underlying tracing system is responsible for interpreting and +// serializing the values. +type Tags map[string]interface{} + +// Apply satisfies the StartSpanOption interface. +func (t Tags) Apply(o *StartSpanOptions) { + if o.Tags == nil { + o.Tags = make(map[string]interface{}) + } + for k, v := range t { + o.Tags[k] = v + } +} + +// Tag may be passed as a StartSpanOption to add a tag to new spans, +// or its Set method may be used to apply the tag to an existing Span, +// for example: +// +// tracer.StartSpan("opName", Tag{"Key", value}) +// +// or +// +// Tag{"key", value}.Set(span) +type Tag struct { + Key string + Value interface{} +} + +// Apply satisfies the StartSpanOption interface. +func (t Tag) Apply(o *StartSpanOptions) { + if o.Tags == nil { + o.Tags = make(map[string]interface{}) + } + o.Tags[t.Key] = t.Value +} + +// Set applies the tag to an existing Span. +func (t Tag) Set(s Span) { + s.SetTag(t.Key, t.Value) +} diff --git a/vendor/github.com/ostreedev/ostree-go/LICENSE b/vendor/github.com/ostreedev/ostree-go/LICENSE deleted file mode 100644 index aa93b4dab9..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/LICENSE +++ /dev/null @@ -1,17 +0,0 @@ -Portions of this code are derived from: - -https://github.com/dradtke/gotk3 - -Copyright (c) 2013 Conformal Systems LLC. - -Permission to use, copy, modify, and distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gboolean.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gboolean.go deleted file mode 100644 index a4ad0f0005..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gboolean.go +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright (c) 2013 Conformal Systems - * - * This file originated from: http://opensource.conformal.com/ - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package glibobject - -import ( - "unsafe" -) - -// #cgo pkg-config: glib-2.0 gobject-2.0 -// #include -// #include -// #include -// #include "glibobject.go.h" -// #include -import "C" - -/* - * GBoolean - */ - -// GBoolean is a Go representation of glib's gboolean -type GBoolean C.gboolean - -func NewGBoolean() GBoolean { - return GBoolean(0) -} - -func GBool(b bool) GBoolean { - if b { - return GBoolean(1) - } - return GBoolean(0) -} - -func (b GBoolean) Ptr() unsafe.Pointer { - return unsafe.Pointer(&b) -} - -func GoBool(b GBoolean) bool { - if b != 0 { - return true - } - return false -} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gcancellable.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gcancellable.go deleted file mode 100644 index 537db4720d..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gcancellable.go +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright (c) 2013 Conformal Systems - * - * This file originated from: http://opensource.conformal.com/ - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package glibobject - -// #cgo pkg-config: glib-2.0 gobject-2.0 -// #include -// #include -// #include -// #include "glibobject.go.h" -// #include -import "C" - -import ( - "unsafe" -) - -// GIO types - -type GCancellable struct { - *GObject -} - -func (self *GCancellable) native() *C.GCancellable { - return (*C.GCancellable)(unsafe.Pointer(self)) -} - -func (self *GCancellable) Ptr() unsafe.Pointer { - return unsafe.Pointer(self) -} - -// At the moment, no cancellable API, just pass nil diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gerror.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gerror.go deleted file mode 100644 index 714b15d0bf..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gerror.go +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright (c) 2013 Conformal Systems - * - * This file originated from: http://opensource.conformal.com/ - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package glibobject - -// #cgo pkg-config: glib-2.0 gobject-2.0 -// #include -// #include -// #include -// #include "glibobject.go.h" -// #include -import "C" -import ( - "errors" - "unsafe" -) - -/* - * GError - */ - -// GError is a representation of GLib's GError -type GError struct { - ptr unsafe.Pointer -} - -func NewGError() GError { - return GError{nil} -} - -func (e GError) Ptr() unsafe.Pointer { - if e.ptr == nil { - return nil - } - return e.ptr -} - -func (e GError) Nil() { - e.ptr = nil -} - -func (e *GError) native() *C.GError { - if e == nil || e.ptr == nil { - return nil - } - return (*C.GError)(e.ptr) -} - -func ToGError(ptr unsafe.Pointer) GError { - return GError{ptr} -} - -func ConvertGError(e GError) error { - defer C.g_error_free(e.native()) - return errors.New(C.GoString((*C.char)(C._g_error_get_message(e.native())))) -} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfile.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfile.go deleted file mode 100644 index babe705096..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfile.go +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright (c) 2013 Conformal Systems - * - * This file originated from: http://opensource.conformal.com/ - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package glibobject - -// #cgo pkg-config: glib-2.0 gobject-2.0 -// #include -// #include -// #include -// #include "glibobject.go.h" -// #include -import "C" -import ( - "unsafe" -) - -/* - * GFile - */ - -type GFile struct { - ptr unsafe.Pointer -} - -func (f GFile) Ptr() unsafe.Pointer { - return f.ptr -} - -func NewGFile() *GFile { - return &GFile{nil} -} - -func ToGFile(ptr unsafe.Pointer) *GFile { - gf := NewGFile() - gf.ptr = ptr - return gf -} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfileinfo.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfileinfo.go deleted file mode 100644 index 9c155834a8..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gfileinfo.go +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright (c) 2013 Conformal Systems - * - * This file originated from: http://opensource.conformal.com/ - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package glibobject - -// #cgo pkg-config: glib-2.0 gobject-2.0 -// #include -// #include -// #include -// #include "glibobject.go.h" -// #include -import "C" -import ( - "unsafe" -) - -/* - * GFileInfo - */ - -type GFileInfo struct { - ptr unsafe.Pointer -} - -func (fi GFileInfo) Ptr() unsafe.Pointer { - return fi.ptr -} - -func NewGFileInfo() GFileInfo { - var fi GFileInfo = GFileInfo{nil} - return fi -} - -func ToGFileInfo(p unsafe.Pointer) *GFileInfo { - var fi *GFileInfo = &GFileInfo{} - fi.ptr = p - return fi -} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtable.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtable.go deleted file mode 100644 index 20cc321cbb..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtable.go +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright (c) 2013 Conformal Systems - * - * This file originated from: http://opensource.conformal.com/ - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package glibobject - -import ( - "unsafe" -) - -// #cgo pkg-config: glib-2.0 gobject-2.0 -// #include -// #include -// #include -// #include "glibobject.go.h" -// #include -import "C" - -/* - * GHashTable - */ -type GHashTable struct { - ptr unsafe.Pointer -} - -func (ht *GHashTable) Ptr() unsafe.Pointer { - return ht.ptr -} - -func (ht *GHashTable) native() *C.GHashTable { - return (*C.GHashTable)(ht.ptr) -} - -func ToGHashTable(ptr unsafe.Pointer) *GHashTable { - return &GHashTable{ptr} -} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtableiter.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtableiter.go deleted file mode 100644 index 1657edf5fc..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/ghashtableiter.go +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright (c) 2013 Conformal Systems - * - * This file originated from: http://opensource.conformal.com/ - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package glibobject - -import ( - "unsafe" -) - -// #cgo pkg-config: glib-2.0 gobject-2.0 -// #include -// #include -// #include -// #include "glibobject.go.h" -// #include -import "C" - -/* - * GHashTableIter - */ -type GHashTableIter struct { - ptr unsafe.Pointer -} - -func (ht *GHashTableIter) Ptr() unsafe.Pointer { - return ht.ptr -} - -func (ht *GHashTableIter) native() *C.GHashTableIter { - return (*C.GHashTableIter)(ht.ptr) -} - -func ToGHashTableIter(ptr unsafe.Pointer) *GHashTableIter { - return &GHashTableIter{ptr} -} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go deleted file mode 100644 index f3d3aa5266..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright (c) 2013 Conformal Systems - * - * This file originated from: http://opensource.conformal.com/ - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package glibobject - -// #cgo pkg-config: glib-2.0 gobject-2.0 -// #include -// #include -// #include -// #include "glibobject.go.h" -// #include -import "C" diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go.h b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go.h deleted file mode 100644 index a55bd242f9..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/glibobject.go.h +++ /dev/null @@ -1,17 +0,0 @@ -#include - -static char * -_g_error_get_message (GError *error) -{ - g_assert (error != NULL); - return error->message; -} - -static const char * -_g_variant_lookup_string (GVariant *v, const char *key) -{ - const char *r; - if (g_variant_lookup (v, key, "&s", &r)) - return r; - return NULL; -} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gobject.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gobject.go deleted file mode 100644 index dedbe749a4..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gobject.go +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright (c) 2013 Conformal Systems - * - * This file originated from: http://opensource.conformal.com/ - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package glibobject - -// #cgo pkg-config: glib-2.0 gobject-2.0 -// #include -// #include -// #include -// #include "glibobject.go.h" -// #include -import "C" -import ( - "unsafe" -) - -/* - * GObject - */ - -// IObject is an interface type implemented by Object and all types which embed -// an Object. It is meant to be used as a type for function arguments which -// require GObjects or any subclasses thereof. -type IObject interface { - toGObject() *C.GObject - ToObject() *GObject -} - -// GObject is a representation of GLib's GObject. -type GObject struct { - ptr unsafe.Pointer -} - -func (v *GObject) Ptr() unsafe.Pointer { - return v.ptr -} - -func (v *GObject) native() *C.GObject { - if v == nil { - return nil - } - return (*C.GObject)(v.ptr) -} - -func (v *GObject) Ref() { - C.g_object_ref(C.gpointer(v.Ptr())) -} - -func (v *GObject) Unref() { - C.g_object_unref(C.gpointer(v.Ptr())) -} - -func (v *GObject) RefSink() { - C.g_object_ref_sink(C.gpointer(v.native())) -} - -func (v *GObject) IsFloating() bool { - c := C.g_object_is_floating(C.gpointer(v.native())) - return GoBool(GBoolean(c)) -} - -func (v *GObject) ForceFloating() { - C.g_object_force_floating(v.native()) -} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/goptioncontext.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/goptioncontext.go deleted file mode 100644 index 05fd54a1a4..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/goptioncontext.go +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright (c) 2013 Conformal Systems - * - * This file originated from: http://opensource.conformal.com/ - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package glibobject - -import ( - "unsafe" -) - -// #cgo pkg-config: glib-2.0 gobject-2.0 -// #include -// #include -// #include -// #include "glibobject.go.h" -// #include -import "C" - -/* - * GOptionContext - */ - -type GOptionContext struct { - ptr unsafe.Pointer -} - -func (oc *GOptionContext) Ptr() unsafe.Pointer { - return oc.ptr -} - -func (oc *GOptionContext) native() *C.GOptionContext { - return (*C.GOptionContext)(oc.ptr) -} - -func ToGOptionContext(ptr unsafe.Pointer) GOptionContext { - return GOptionContext{ptr} -} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go b/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go deleted file mode 100644 index 30572ea87f..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/glibobject/gvariant.go +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright (c) 2013 Conformal Systems - * - * This file originated from: http://opensource.conformal.com/ - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package glibobject - -// #cgo pkg-config: glib-2.0 gobject-2.0 -// #include -// #include -// #include -// #include "glibobject.go.h" -// #include -import "C" -import ( - "fmt" - "unsafe" -) - -/* - * GVariant - */ - -type GVariant struct { - ptr unsafe.Pointer -} - -//func GVariantNew(p unsafe.Pointer) *GVariant { -//o := &GVariant{p} -//runtime.SetFinalizer(o, (*GVariant).Unref) -//return o; -//} - -//func GVariantNewSink(p unsafe.Pointer) *GVariant { -//o := &GVariant{p} -//runtime.SetFinalizer(o, (*GVariant).Unref) -//o.RefSink() -//return o; -//} - -func (v *GVariant) native() *C.GVariant { - return (*C.GVariant)(v.ptr) -} - -func (v *GVariant) Ptr() unsafe.Pointer { - return v.ptr -} - -func (v *GVariant) Ref() { - C.g_variant_ref(v.native()) -} - -func (v *GVariant) Unref() { - C.g_variant_unref(v.native()) -} - -func (v *GVariant) RefSink() { - C.g_variant_ref_sink(v.native()) -} - -func (v *GVariant) TypeString() string { - cs := (*C.char)(C.g_variant_get_type_string(v.native())) - return C.GoString(cs) -} - -func (v *GVariant) GetChildValue(i int) *GVariant { - cchild := C.g_variant_get_child_value(v.native(), C.gsize(i)) - return (*GVariant)(unsafe.Pointer(cchild)) -} - -func (v *GVariant) LookupString(key string) (string, error) { - ckey := C.CString(key) - defer C.free(unsafe.Pointer(ckey)) - // TODO: Find a way to have constant C strings in golang - cstr := C._g_variant_lookup_string(v.native(), ckey) - if cstr == nil { - return "", fmt.Errorf("No such key: %s", key) - } - return C.GoString(cstr), nil -} - -func ToGVariant(ptr unsafe.Pointer) *GVariant { - return &GVariant{ptr} -} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go deleted file mode 100644 index d3a8ae5fde..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go +++ /dev/null @@ -1,93 +0,0 @@ -// Package otbuiltin contains all of the basic commands for creating and -// interacting with an ostree repository -package otbuiltin - -import ( - "errors" - "fmt" - "runtime" - "unsafe" - - glib "github.com/ostreedev/ostree-go/pkg/glibobject" -) - -// #cgo pkg-config: ostree-1 -// #include -// #include -// #include -// #include "builtin.go.h" -import "C" - -type Repo struct { - //*glib.GObject - ptr unsafe.Pointer -} - -// Converts an ostree repo struct to its C equivalent -func (r *Repo) native() *C.OstreeRepo { - //return (*C.OstreeRepo)(r.Ptr()) - return (*C.OstreeRepo)(r.ptr) -} - -// Takes a C ostree repo and converts it to a Go struct -func repoFromNative(p *C.OstreeRepo) *Repo { - if p == nil { - return nil - } - //o := (*glib.GObject)(unsafe.Pointer(p)) - //r := &Repo{o} - r := &Repo{unsafe.Pointer(p)} - return r -} - -// Checks if the repo has been initialized -func (r *Repo) isInitialized() bool { - if r.ptr != nil { - return true - } - return false -} - -// Attempts to open the repo at the given path -func OpenRepo(path string) (*Repo, error) { - var cerr *C.GError = nil - cpath := C.CString(path) - pathc := C.g_file_new_for_path(cpath) - defer C.g_object_unref(C.gpointer(pathc)) - crepo := C.ostree_repo_new(pathc) - repo := repoFromNative(crepo) - r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(crepo, nil, &cerr))) - if !r { - return nil, generateError(cerr) - } - return repo, nil -} - -// Enable support for tombstone commits, which allow the repo to distinguish between -// commits that were intentionally deleted and commits that were removed accidentally -func enableTombstoneCommits(repo *Repo) error { - var tombstoneCommits bool - var config *C.GKeyFile = C.ostree_repo_get_config(repo.native()) - var cerr *C.GError - - tombstoneCommits = glib.GoBool(glib.GBoolean(C.g_key_file_get_boolean(config, (*C.gchar)(C.CString("core")), (*C.gchar)(C.CString("tombstone-commits")), nil))) - - //tombstoneCommits is false only if it really is false or if it is set to FALSE in the config file - if !tombstoneCommits { - C.g_key_file_set_boolean(config, (*C.gchar)(C.CString("core")), (*C.gchar)(C.CString("tombstone-commits")), C.TRUE) - if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_config(repo.native(), config, &cerr))) { - return generateError(cerr) - } - } - return nil -} - -func generateError(err *C.GError) error { - goErr := glib.ConvertGError(glib.ToGError(unsafe.Pointer(err))) - _, file, line, ok := runtime.Caller(1) - if ok { - return errors.New(fmt.Sprintf("%s:%d - %s", file, line, goErr)) - } else { - return goErr - } -} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go.h b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go.h deleted file mode 100644 index 734de98219..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/builtin.go.h +++ /dev/null @@ -1,191 +0,0 @@ -#ifndef BUILTIN_GO_H -#define BUILTIN_GO_H - -#include -#include -#include -#include - -static guint32 owner_uid; -static guint32 owner_gid; - -static void -_ostree_repo_append_modifier_flags(OstreeRepoCommitModifierFlags *flags, int flag) { - *flags |= flag; -} - -struct CommitFilterData { - GHashTable *mode_adds; - GHashTable *skip_list; -}; - -typedef struct CommitFilterData CommitFilterData; - -static char* _gptr_to_str(gpointer p) -{ - return (char*)p; -} - -// The following 3 functions are wrapper functions for macros since CGO can't parse macros -static OstreeRepoFile* -_ostree_repo_file(GFile *file) -{ - return OSTREE_REPO_FILE (file); -} - -static guint -_gpointer_to_uint (gpointer ptr) -{ - return GPOINTER_TO_UINT (ptr); -} - -static gpointer -_guint_to_pointer (guint u) -{ - return GUINT_TO_POINTER (u); -} - -static void -_g_clear_object (volatile GObject **object_ptr) -{ - g_clear_object(object_ptr); -} - -static const GVariantType* -_g_variant_type (char *type) -{ - return G_VARIANT_TYPE (type); -} - -static int -_at_fdcwd () -{ - return AT_FDCWD; -} - -static guint64 -_guint64_from_be (guint64 val) -{ - return GUINT64_FROM_BE (val); -} - - - -// These functions are wrappers for variadic functions since CGO can't parse variadic functions -static void -_g_printerr_onearg (char* msg, - char* arg) -{ - g_printerr("%s %s\n", msg, arg); -} - -static void -_g_set_error_onearg (GError *err, - char* msg, - char* arg) -{ - g_set_error(&err, G_IO_ERROR, G_IO_ERROR_FAILED, "%s %s", msg, arg); -} - -static void -_g_variant_builder_add_twoargs (GVariantBuilder* builder, - const char *format_string, - char *arg1, - GVariant *arg2) -{ - g_variant_builder_add(builder, format_string, arg1, arg2); -} - -static GHashTable* -_g_hash_table_new_full () -{ - return g_hash_table_new_full(g_str_hash, g_str_equal, g_free, NULL); -} - -static void -_g_variant_get_commit_dump (GVariant *variant, - const char *format, - char **subject, - char **body, - guint64 *timestamp) -{ - return g_variant_get (variant, format, NULL, NULL, NULL, subject, body, timestamp, NULL, NULL); -} - -static guint32 -_binary_or (guint32 a, guint32 b) -{ - return a | b; -} - -static void -_cleanup (OstreeRepo *self, - OstreeRepoCommitModifier *modifier, - GCancellable *cancellable, - GError **out_error) -{ - if (self) - ostree_repo_abort_transaction(self, cancellable, out_error); - if (modifier) - ostree_repo_commit_modifier_unref (modifier); -} - -// The following functions make up a commit_filter function that gets passed into -// another C function (and thus can't be a go function) as well as its helpers -static OstreeRepoCommitFilterResult -_commit_filter (OstreeRepo *self, - const char *path, - GFileInfo *file_info, - gpointer user_data) -{ - struct CommitFilterData *data = user_data; - GHashTable *mode_adds = data->mode_adds; - GHashTable *skip_list = data->skip_list; - gpointer value; - - if (owner_uid >= 0) - g_file_info_set_attribute_uint32 (file_info, "unix::uid", owner_uid); - if (owner_gid >= 0) - g_file_info_set_attribute_uint32 (file_info, "unix::gid", owner_gid); - - if (mode_adds && g_hash_table_lookup_extended (mode_adds, path, NULL, &value)) - { - guint current_mode = g_file_info_get_attribute_uint32 (file_info, "unix::mode"); - guint mode_add = GPOINTER_TO_UINT (value); - g_file_info_set_attribute_uint32 (file_info, "unix::mode", - current_mode | mode_add); - g_hash_table_remove (mode_adds, path); - } - - if (skip_list && g_hash_table_contains (skip_list, path)) - { - g_hash_table_remove (skip_list, path); - return OSTREE_REPO_COMMIT_FILTER_SKIP; - } - - return OSTREE_REPO_COMMIT_FILTER_ALLOW; -} - - -static void -_set_owner_uid (guint32 uid) -{ - owner_uid = uid; -} - -static void _set_owner_gid (guint32 gid) -{ - owner_gid = gid; -} - -// Wrapper function for a function that takes a C function as a parameter. -// That translation doesn't work in go -static OstreeRepoCommitModifier* -_ostree_repo_commit_modifier_new_wrapper (OstreeRepoCommitModifierFlags flags, - gpointer user_data, - GDestroyNotify destroy_notify) -{ - return ostree_repo_commit_modifier_new(flags, _commit_filter, user_data, destroy_notify); -} - -#endif diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/cat.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/cat.go deleted file mode 100644 index d43ea07c74..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/cat.go +++ /dev/null @@ -1 +0,0 @@ -package otbuiltin diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checkout.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checkout.go deleted file mode 100644 index 55b51bfbd0..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checkout.go +++ /dev/null @@ -1,102 +0,0 @@ -package otbuiltin - -import ( - "strings" - "unsafe" - - glib "github.com/ostreedev/ostree-go/pkg/glibobject" -) - -// #cgo pkg-config: ostree-1 -// #include -// #include -// #include -// #include "builtin.go.h" -import "C" - -// Global variable for options -var checkoutOpts checkoutOptions - -// Contains all of the options for checking commits out of -// an ostree repo -type checkoutOptions struct { - UserMode bool // Do not change file ownership or initialize extended attributes - Union bool // Keep existing directories and unchanged files, overwriting existing filesystem - AllowNoent bool // Do nothing if the specified filepath does not exist - DisableCache bool // Do not update or use the internal repository uncompressed object caceh - Whiteouts bool // Process 'whiteout' (docker style) entries - RequireHardlinks bool // Do not fall back to full copies if hard linking fails - Subpath string // Checkout sub-directory path - FromFile string // Process many checkouts from the given file -} - -// Instantiates and returns a checkoutOptions struct with default values set -func NewCheckoutOptions() checkoutOptions { - return checkoutOptions{} -} - -// Checks out a commit with the given ref from a repository at the location of repo path to to the destination. Returns an error if the checkout could not be processed -func Checkout(repoPath, destination, commit string, opts checkoutOptions) error { - checkoutOpts = opts - - var cancellable *glib.GCancellable - ccommit := C.CString(commit) - defer C.free(unsafe.Pointer(ccommit)) - var gerr = glib.NewGError() - cerr := (*C.GError)(gerr.Ptr()) - defer C.free(unsafe.Pointer(cerr)) - - repoPathc := C.g_file_new_for_path(C.CString(repoPath)) - defer C.g_object_unref(C.gpointer(repoPathc)) - crepo := C.ostree_repo_new(repoPathc) - if !glib.GoBool(glib.GBoolean(C.ostree_repo_open(crepo, (*C.GCancellable)(cancellable.Ptr()), &cerr))) { - return generateError(cerr) - } - - if strings.Compare(checkoutOpts.FromFile, "") != 0 { - err := processManyCheckouts(crepo, destination, cancellable) - if err != nil { - return err - } - } else { - var resolvedCommit *C.char - defer C.free(unsafe.Pointer(resolvedCommit)) - if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(crepo, ccommit, C.FALSE, &resolvedCommit, &cerr))) { - return generateError(cerr) - } - err := processOneCheckout(crepo, resolvedCommit, checkoutOpts.Subpath, destination, cancellable) - if err != nil { - return err - } - } - return nil -} - -// Processes one checkout from the repo -func processOneCheckout(crepo *C.OstreeRepo, resolvedCommit *C.char, subpath, destination string, cancellable *glib.GCancellable) error { - cdest := C.CString(destination) - defer C.free(unsafe.Pointer(cdest)) - var gerr = glib.NewGError() - cerr := (*C.GError)(gerr.Ptr()) - defer C.free(unsafe.Pointer(cerr)) - var repoCheckoutAtOptions C.OstreeRepoCheckoutAtOptions - - if checkoutOpts.UserMode { - repoCheckoutAtOptions.mode = C.OSTREE_REPO_CHECKOUT_MODE_USER - } - if checkoutOpts.Union { - repoCheckoutAtOptions.overwrite_mode = C.OSTREE_REPO_CHECKOUT_OVERWRITE_UNION_FILES - } - - checkedOut := glib.GoBool(glib.GBoolean(C.ostree_repo_checkout_at(crepo, &repoCheckoutAtOptions, C._at_fdcwd(), cdest, resolvedCommit, nil, &cerr))) - if !checkedOut { - return generateError(cerr) - } - - return nil -} - -// process many checkouts -func processManyCheckouts(crepo *C.OstreeRepo, target string, cancellable *glib.GCancellable) error { - return nil -} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checksum.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checksum.go deleted file mode 100644 index d43ea07c74..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/checksum.go +++ /dev/null @@ -1 +0,0 @@ -package otbuiltin diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/commit.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/commit.go deleted file mode 100644 index 9550f802c8..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/commit.go +++ /dev/null @@ -1,482 +0,0 @@ -package otbuiltin - -import ( - "bytes" - "errors" - "fmt" - "strings" - "time" - "unsafe" - - glib "github.com/ostreedev/ostree-go/pkg/glibobject" -) - -// #cgo pkg-config: ostree-1 -// #include -// #include -// #include -// #include "builtin.go.h" -import "C" - -// Declare global variable to store commitOptions -var options commitOptions - -// Declare a function prototype for being passed into another function -type handleLineFunc func(string, *glib.GHashTable) error - -// Contains all of the options for commmiting to an ostree repo. Initialize -// with NewCommitOptions() -type commitOptions struct { - Subject string // One line subject - Body string // Full description - Parent string // Parent of the commit - Tree []string // 'dir=PATH' or 'tar=TARFILE' or 'ref=COMMIT': overlay the given argument as a tree - AddMetadataString []string // Add a key/value pair to metadata - AddDetachedMetadataString []string // Add a key/value pair to detached metadata - OwnerUID int // Set file ownership to user id - OwnerGID int // Set file ownership to group id - NoXattrs bool // Do not import extended attributes - LinkCheckoutSpeedup bool // Optimize for commits of trees composed of hardlinks in the repository - TarAutoCreateParents bool // When loading tar archives, automatically create parent directories as needed - SkipIfUnchanged bool // If the contents are unchanged from a previous commit, do nothing - StatOverrideFile string // File containing list of modifications to make permissions - SkipListFile string // File containing list of file paths to skip - GenerateSizes bool // Generate size information along with commit metadata - GpgSign []string // GPG Key ID with which to sign the commit (if you have GPGME - GNU Privacy Guard Made Easy) - GpgHomedir string // GPG home directory to use when looking for keyrings (if you have GPGME - GNU Privacy Guard Made Easy) - Timestamp time.Time // Override the timestamp of the commit - Orphan bool // Commit does not belong to a branch - Fsync bool // Specify whether fsync should be used or not. Default to true -} - -// Initializes a commitOptions struct and sets default values -func NewCommitOptions() commitOptions { - var co commitOptions - co.OwnerUID = -1 - co.OwnerGID = -1 - co.Fsync = true - return co -} - -type OstreeRepoTransactionStats struct { - metadata_objects_total int32 - metadata_objects_written int32 - content_objects_total int32 - content_objects_written int32 - content_bytes_written uint64 -} - -func (repo *Repo) PrepareTransaction() (bool, error) { - var cerr *C.GError = nil - var resume C.gboolean - - r := glib.GoBool(glib.GBoolean(C.ostree_repo_prepare_transaction(repo.native(), &resume, nil, &cerr))) - if !r { - return false, generateError(cerr) - } - return glib.GoBool(glib.GBoolean(resume)), nil -} - -func (repo *Repo) CommitTransaction() (*OstreeRepoTransactionStats, error) { - var cerr *C.GError = nil - var stats OstreeRepoTransactionStats = OstreeRepoTransactionStats{} - statsPtr := (*C.OstreeRepoTransactionStats)(unsafe.Pointer(&stats)) - r := glib.GoBool(glib.GBoolean(C.ostree_repo_commit_transaction(repo.native(), statsPtr, nil, &cerr))) - if !r { - return nil, generateError(cerr) - } - return &stats, nil -} - -func (repo *Repo) TransactionSetRef(remote string, ref string, checksum string) { - var cRemote *C.char = nil - var cRef *C.char = nil - var cChecksum *C.char = nil - - if remote != "" { - cRemote = C.CString(remote) - } - if ref != "" { - cRef = C.CString(ref) - } - if checksum != "" { - cChecksum = C.CString(checksum) - } - C.ostree_repo_transaction_set_ref(repo.native(), cRemote, cRef, cChecksum) -} - -func (repo *Repo) AbortTransaction() error { - var cerr *C.GError = nil - r := glib.GoBool(glib.GBoolean(C.ostree_repo_abort_transaction(repo.native(), nil, &cerr))) - if !r { - return generateError(cerr) - } - return nil -} - -func (repo *Repo) RegenerateSummary() error { - var cerr *C.GError = nil - r := glib.GoBool(glib.GBoolean(C.ostree_repo_regenerate_summary(repo.native(), nil, nil, &cerr))) - if !r { - return generateError(cerr) - } - return nil -} - -// Commits a directory, specified by commitPath, to an ostree repo as a given branch -func (repo *Repo) Commit(commitPath, branch string, opts commitOptions) (string, error) { - options = opts - - var err error - var modeAdds *glib.GHashTable - var skipList *glib.GHashTable - var objectToCommit *glib.GFile - var skipCommit bool = false - var ccommitChecksum *C.char - defer C.free(unsafe.Pointer(ccommitChecksum)) - var flags C.OstreeRepoCommitModifierFlags = 0 - var filter_data C.CommitFilterData - - var cerr *C.GError - defer C.free(unsafe.Pointer(cerr)) - var metadata *C.GVariant = nil - defer func(){ - if metadata != nil { - defer C.g_variant_unref(metadata) - } - }() - - var detachedMetadata *C.GVariant = nil - defer C.free(unsafe.Pointer(detachedMetadata)) - var mtree *C.OstreeMutableTree - defer C.free(unsafe.Pointer(mtree)) - var root *C.GFile - defer C.free(unsafe.Pointer(root)) - var modifier *C.OstreeRepoCommitModifier - defer C.free(unsafe.Pointer(modifier)) - var cancellable *C.GCancellable - defer C.free(unsafe.Pointer(cancellable)) - - cpath := C.CString(commitPath) - defer C.free(unsafe.Pointer(cpath)) - csubject := C.CString(options.Subject) - defer C.free(unsafe.Pointer(csubject)) - cbody := C.CString(options.Body) - defer C.free(unsafe.Pointer(cbody)) - cbranch := C.CString(branch) - defer C.free(unsafe.Pointer(cbranch)) - cparent := C.CString(options.Parent) - defer C.free(unsafe.Pointer(cparent)) - - if !glib.GoBool(glib.GBoolean(C.ostree_repo_is_writable(repo.native(), &cerr))) { - goto out - } - - // If the user provided a stat override file - if strings.Compare(options.StatOverrideFile, "") != 0 { - modeAdds = glib.ToGHashTable(unsafe.Pointer(C._g_hash_table_new_full())) - if err = parseFileByLine(options.StatOverrideFile, handleStatOverrideLine, modeAdds, cancellable); err != nil { - goto out - } - } - - // If the user provided a skiplist file - if strings.Compare(options.SkipListFile, "") != 0 { - skipList = glib.ToGHashTable(unsafe.Pointer(C._g_hash_table_new_full())) - if err = parseFileByLine(options.SkipListFile, handleSkipListline, skipList, cancellable); err != nil { - goto out - } - } - - if options.AddMetadataString != nil { - metadata, err = parseKeyValueStrings(options.AddMetadataString) - if err != nil { - goto out - } - } - - if options.AddDetachedMetadataString != nil { - _, err := parseKeyValueStrings(options.AddDetachedMetadataString) - if err != nil { - goto out - } - } - - if strings.Compare(branch, "") == 0 && !options.Orphan { - err = errors.New("A branch must be specified or use commitOptions.Orphan") - goto out - } - - if options.NoXattrs { - C._ostree_repo_append_modifier_flags(&flags, C.OSTREE_REPO_COMMIT_MODIFIER_FLAGS_SKIP_XATTRS) - } - if options.GenerateSizes { - C._ostree_repo_append_modifier_flags(&flags, C.OSTREE_REPO_COMMIT_MODIFIER_FLAGS_GENERATE_SIZES) - } - if !options.Fsync { - C.ostree_repo_set_disable_fsync(repo.native(), C.TRUE) - } - - if flags != 0 || options.OwnerUID >= 0 || options.OwnerGID >= 0 || strings.Compare(options.StatOverrideFile, "") != 0 || options.NoXattrs { - filter_data.mode_adds = (*C.GHashTable)(modeAdds.Ptr()) - filter_data.skip_list = (*C.GHashTable)(skipList.Ptr()) - C._set_owner_uid((C.guint32)(options.OwnerUID)) - C._set_owner_gid((C.guint32)(options.OwnerGID)) - modifier = C._ostree_repo_commit_modifier_new_wrapper(flags, C.gpointer(&filter_data), nil) - } - - if strings.Compare(options.Parent, "") != 0 { - if strings.Compare(options.Parent, "none") == 0 { - options.Parent = "" - } - } else if !options.Orphan { - cerr = nil - if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(repo.native(), cbranch, C.TRUE, &cparent, &cerr))) { - goto out - } - } - - if options.LinkCheckoutSpeedup && !glib.GoBool(glib.GBoolean(C.ostree_repo_scan_hardlinks(repo.native(), cancellable, &cerr))) { - goto out - } - - mtree = C.ostree_mutable_tree_new() - - if len(commitPath) == 0 && (len(options.Tree) == 0 || len(options.Tree[0]) == 0) { - currentDir := (*C.char)(C.g_get_current_dir()) - objectToCommit = glib.ToGFile(unsafe.Pointer(C.g_file_new_for_path(currentDir))) - C.g_free(C.gpointer(currentDir)) - - if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_directory_to_mtree(repo.native(), (*C.GFile)(objectToCommit.Ptr()), mtree, modifier, cancellable, &cerr))) { - goto out - } - } else if len(options.Tree) != 0 { - var eq int = -1 - cerr = nil - for tree := range options.Tree { - eq = strings.Index(options.Tree[tree], "=") - if eq == -1 { - C._g_set_error_onearg(cerr, C.CString("Missing type in tree specification"), C.CString(options.Tree[tree])) - goto out - } - treeType := options.Tree[tree][:eq] - treeVal := options.Tree[tree][eq+1:] - - if strings.Compare(treeType, "dir") == 0 { - objectToCommit = glib.ToGFile(unsafe.Pointer(C.g_file_new_for_path(C.CString(treeVal)))) - if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_directory_to_mtree(repo.native(), (*C.GFile)(objectToCommit.Ptr()), mtree, modifier, cancellable, &cerr))) { - goto out - } - } else if strings.Compare(treeType, "tar") == 0 { - objectToCommit = glib.ToGFile(unsafe.Pointer(C.g_file_new_for_path(C.CString(treeVal)))) - if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_archive_to_mtree(repo.native(), (*C.GFile)(objectToCommit.Ptr()), mtree, modifier, (C.gboolean)(glib.GBool(opts.TarAutoCreateParents)), cancellable, &cerr))) { - fmt.Println("error 1") - goto out - } - } else if strings.Compare(treeType, "ref") == 0 { - if !glib.GoBool(glib.GBoolean(C.ostree_repo_read_commit(repo.native(), C.CString(treeVal), (**C.GFile)(objectToCommit.Ptr()), nil, cancellable, &cerr))) { - goto out - } - - if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_directory_to_mtree(repo.native(), (*C.GFile)(objectToCommit.Ptr()), mtree, modifier, cancellable, &cerr))) { - goto out - } - } else { - C._g_set_error_onearg(cerr, C.CString("Missing type in tree specification"), C.CString(treeVal)) - goto out - } - } - } else { - objectToCommit = glib.ToGFile(unsafe.Pointer(C.g_file_new_for_path(cpath))) - cerr = nil - if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_directory_to_mtree(repo.native(), (*C.GFile)(objectToCommit.Ptr()), mtree, modifier, cancellable, &cerr))) { - goto out - } - } - - if modeAdds != nil && C.g_hash_table_size((*C.GHashTable)(modeAdds.Ptr())) > 0 { - var hashIter *C.GHashTableIter - - var key, value C.gpointer - - C.g_hash_table_iter_init(hashIter, (*C.GHashTable)(modeAdds.Ptr())) - - for glib.GoBool(glib.GBoolean(C.g_hash_table_iter_next(hashIter, &key, &value))) { - C._g_printerr_onearg(C.CString("Unmatched StatOverride path: "), C._gptr_to_str(key)) - } - err = errors.New("Unmatched StatOverride paths") - C.free(unsafe.Pointer(hashIter)) - C.free(unsafe.Pointer(key)) - C.free(unsafe.Pointer(value)) - goto out - } - - if skipList != nil && C.g_hash_table_size((*C.GHashTable)(skipList.Ptr())) > 0 { - var hashIter *C.GHashTableIter - var key, value C.gpointer - - C.g_hash_table_iter_init(hashIter, (*C.GHashTable)(skipList.Ptr())) - - for glib.GoBool(glib.GBoolean(C.g_hash_table_iter_next(hashIter, &key, &value))) { - C._g_printerr_onearg(C.CString("Unmatched SkipList path: "), C._gptr_to_str(key)) - } - err = errors.New("Unmatched SkipList paths") - C.free(unsafe.Pointer(hashIter)) - C.free(unsafe.Pointer(key)) - C.free(unsafe.Pointer(value)) - goto out - } - - cerr = nil - if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_mtree(repo.native(), mtree, &root, cancellable, &cerr))) { - goto out - } - - if options.SkipIfUnchanged && strings.Compare(options.Parent, "") != 0 { - var parentRoot *C.GFile - - cerr = nil - if !glib.GoBool(glib.GBoolean(C.ostree_repo_read_commit(repo.native(), cparent, &parentRoot, nil, cancellable, &cerr))) { - C.free(unsafe.Pointer(parentRoot)) - goto out - } - - if glib.GoBool(glib.GBoolean(C.g_file_equal(root, parentRoot))) { - skipCommit = true - } - C.free(unsafe.Pointer(parentRoot)) - } - - if !skipCommit { - var timestamp C.guint64 - - if options.Timestamp.IsZero() { - var now *C.GDateTime = C.g_date_time_new_now_utc() - timestamp = (C.guint64)(C.g_date_time_to_unix(now)) - C.g_date_time_unref(now) - - cerr = nil - ret := C.ostree_repo_write_commit(repo.native(), cparent, csubject, cbody, metadata, C._ostree_repo_file(root), &ccommitChecksum, cancellable, &cerr) - if !glib.GoBool(glib.GBoolean(ret)) { - goto out - } - } else { - timestamp = (C.guint64)(options.Timestamp.Unix()) - - if !glib.GoBool(glib.GBoolean(C.ostree_repo_write_commit_with_time(repo.native(), cparent, csubject, cbody, - metadata, C._ostree_repo_file(root), timestamp, &ccommitChecksum, cancellable, &cerr))) { - goto out - } - } - - if detachedMetadata != nil { - C.ostree_repo_write_commit_detached_metadata(repo.native(), ccommitChecksum, detachedMetadata, cancellable, &cerr) - } - - if len(options.GpgSign) != 0 { - for key := range options.GpgSign { - if !glib.GoBool(glib.GBoolean(C.ostree_repo_sign_commit(repo.native(), (*C.gchar)(ccommitChecksum), (*C.gchar)(C.CString(options.GpgSign[key])), (*C.gchar)(C.CString(options.GpgHomedir)), cancellable, &cerr))) { - goto out - } - } - } - - if strings.Compare(branch, "") != 0 { - C.ostree_repo_transaction_set_ref(repo.native(), nil, cbranch, ccommitChecksum) - } else if !options.Orphan { - goto out - } else { - // TODO: Looks like I forgot to implement this. - } - } else { - ccommitChecksum = C.CString(options.Parent) - } - - return C.GoString(ccommitChecksum), nil -out: - if repo.native() != nil { - C.ostree_repo_abort_transaction(repo.native(), cancellable, nil) - //C.free(unsafe.Pointer(repo.native())) - } - if modifier != nil { - C.ostree_repo_commit_modifier_unref(modifier) - } - if err != nil { - return "", err - } - return "", generateError(cerr) -} - -// Parse an array of key value pairs of the format KEY=VALUE and add them to a GVariant -func parseKeyValueStrings(pairs []string) (*C.GVariant, error) { - builder := C.g_variant_builder_new(C._g_variant_type(C.CString("a{sv}"))) - defer C.g_variant_builder_unref(builder) - - for iter := range pairs { - index := strings.Index(pairs[iter], "=") - if index <= 0 { - var buffer bytes.Buffer - buffer.WriteString("Missing '=' in KEY=VALUE metadata '%s'") - buffer.WriteString(pairs[iter]) - return nil, errors.New(buffer.String()) - } - - key := C.CString(pairs[iter][:index]) - value := C.CString(pairs[iter][index+1:]) - - valueVariant := C.g_variant_new_string((*C.gchar)(value)) - - C._g_variant_builder_add_twoargs(builder, C.CString("{sv}"), key, valueVariant) - } - - metadata := C.g_variant_builder_end(builder) - return C.g_variant_ref_sink(metadata), nil -} - -// Parse a file linue by line and handle the line with the handleLineFunc -func parseFileByLine(path string, fn handleLineFunc, table *glib.GHashTable, cancellable *C.GCancellable) error { - var contents *C.char - var file *glib.GFile - var lines []string - var gerr = glib.NewGError() - cerr := (*C.GError)(gerr.Ptr()) - - file = glib.ToGFile(unsafe.Pointer(C.g_file_new_for_path(C.CString(path)))) - if !glib.GoBool(glib.GBoolean(C.g_file_load_contents((*C.GFile)(file.Ptr()), cancellable, &contents, nil, nil, &cerr))) { - return generateError(cerr) - } - - lines = strings.Split(C.GoString(contents), "\n") - for line := range lines { - if strings.Compare(lines[line], "") == 0 { - continue - } - - if err := fn(lines[line], table); err != nil { - return generateError(cerr) - } - } - return nil -} - -// Handle an individual line from a Statoverride file -func handleStatOverrideLine(line string, table *glib.GHashTable) error { - var space int - var modeAdd C.guint - - if space = strings.IndexRune(line, ' '); space == -1 { - return errors.New("Malformed StatOverrideFile (no space found)") - } - - modeAdd = (C.guint)(C.g_ascii_strtod((*C.gchar)(C.CString(line)), nil)) - C.g_hash_table_insert((*C.GHashTable)(table.Ptr()), C.gpointer(C.g_strdup((*C.gchar)(C.CString(line[space+1:])))), C._guint_to_pointer(modeAdd)) - - return nil -} - -// Handle an individual line from a Skiplist file -func handleSkipListline(line string, table *glib.GHashTable) error { - C.g_hash_table_add((*C.GHashTable)(table.Ptr()), C.gpointer( C.g_strdup((*C.gchar)(C.CString(line))))) - - return nil -} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/config.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/config.go deleted file mode 100644 index d43ea07c74..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/config.go +++ /dev/null @@ -1 +0,0 @@ -package otbuiltin diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/diff.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/diff.go deleted file mode 100644 index d43ea07c74..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/diff.go +++ /dev/null @@ -1 +0,0 @@ -package otbuiltin diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/export.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/export.go deleted file mode 100644 index d43ea07c74..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/export.go +++ /dev/null @@ -1 +0,0 @@ -package otbuiltin diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/fsck.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/fsck.go deleted file mode 100644 index d43ea07c74..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/fsck.go +++ /dev/null @@ -1 +0,0 @@ -package otbuiltin diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/gpgsign.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/gpgsign.go deleted file mode 100644 index d43ea07c74..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/gpgsign.go +++ /dev/null @@ -1 +0,0 @@ -package otbuiltin diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/init.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/init.go deleted file mode 100644 index 9f2527927f..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/init.go +++ /dev/null @@ -1,91 +0,0 @@ -package otbuiltin - -import ( - "errors" - "strings" - "unsafe" - - glib "github.com/ostreedev/ostree-go/pkg/glibobject" -) - -// #cgo pkg-config: ostree-1 -// #include -// #include -// #include -// #include "builtin.go.h" -import "C" - -// Declare variables for options -var initOpts initOptions - -// Contains all of the options for initializing an ostree repo -type initOptions struct { - Mode string // either bare, archive-z2, or bare-user - - repoMode C.OstreeRepoMode -} - -// Instantiates and returns an initOptions struct with default values set -func NewInitOptions() initOptions { - io := initOptions{} - io.Mode = "bare" - io.repoMode = C.OSTREE_REPO_MODE_BARE - return io -} - -// Initializes a new ostree repository at the given path. Returns true -// if the repo exists at the location, regardless of whether it was initialized -// by the function or if it already existed. Returns an error if the repo could -// not be initialized -func Init(path string, options initOptions) (bool, error) { - initOpts = options - err := parseMode() - if err != nil { - return false, err - } - - // Create a repo struct from the path - var cerr *C.GError - defer C.free(unsafe.Pointer(cerr)) - cpath := C.CString(path) - defer C.free(unsafe.Pointer(cpath)) - pathc := C.g_file_new_for_path(cpath) - defer C.g_object_unref(C.gpointer(pathc)) - crepo := C.ostree_repo_new(pathc) - - // If the repo exists in the filesystem, return an error but set exists to true - /* var exists C.gboolean = 0 - success := glib.GoBool(glib.GBoolean(C.ostree_repo_exists(crepo, &exists, &cerr))) - if exists != 0 { - err = errors.New("repository already exists") - return true, err - } else if !success { - return false, generateError(cerr) - }*/ - - cerr = nil - created := glib.GoBool(glib.GBoolean(C.ostree_repo_create(crepo, initOpts.repoMode, nil, &cerr))) - if !created { - err := generateError(cerr) - errString := err.Error() - if strings.Contains(errString, "File exists") { - return true, err - } - return false, err - } - return true, nil -} - -// Converts the mode string to a C.OSTREE_REPO_MODE enum value -func parseMode() error { - if strings.EqualFold(initOpts.Mode, "bare") { - initOpts.repoMode = C.OSTREE_REPO_MODE_BARE - } else if strings.EqualFold(initOpts.Mode, "bare-user") { - initOpts.repoMode = C.OSTREE_REPO_MODE_BARE_USER - } else if strings.EqualFold(initOpts.Mode, "archive-z2") { - initOpts.repoMode = C.OSTREE_REPO_MODE_ARCHIVE_Z2 - } else { - return errors.New("Invalid option for mode") - } - return nil -} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/log.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/log.go deleted file mode 100644 index 2ceea09257..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/log.go +++ /dev/null @@ -1,167 +0,0 @@ -package otbuiltin - -import ( - "fmt" - "strings" - "time" - "unsafe" - - glib "github.com/ostreedev/ostree-go/pkg/glibobject" -) - -// #cgo pkg-config: ostree-1 -// #include -// #include -// #include -// #include "builtin.go.h" -import "C" - -// Declare variables for options -var logOpts logOptions - -// Set the format of the strings in the log -const formatString = "2006-01-02 03:04;05 -0700" - -// Struct for the various pieces of data in a log entry -type LogEntry struct { - Checksum []byte - Variant []byte - Timestamp time.Time - Subject string - Body string -} - -// Convert the log entry to a string -func (l LogEntry) String() string { - if len(l.Variant) == 0 { - return fmt.Sprintf("%s\n%s\n\n\t%s\n\n\t%s\n\n", l.Checksum, l.Timestamp, l.Subject, l.Body) - } - return fmt.Sprintf("%s\n%s\n\n", l.Checksum, l.Variant) -} - -type OstreeDumpFlags uint - -const ( - OSTREE_DUMP_NONE OstreeDumpFlags = 0 - OSTREE_DUMP_RAW OstreeDumpFlags = 1 << iota -) - -// Contains all of the options for initializing an ostree repo -type logOptions struct { - Raw bool // Show raw variant data -} - -//Instantiates and returns a logOptions struct with default values set -func NewLogOptions() logOptions { - return logOptions{} -} - -// Show the logs of a branch starting with a given commit or ref. Returns a -// slice of log entries on success and an error otherwise -func Log(repoPath, branch string, options logOptions) ([]LogEntry, error) { - // attempt to open the repository - repo, err := OpenRepo(repoPath) - if err != nil { - return nil, err - } - - cbranch := C.CString(branch) - defer C.free(unsafe.Pointer(cbranch)) - var checksum *C.char - defer C.free(unsafe.Pointer(checksum)) - var flags OstreeDumpFlags = OSTREE_DUMP_NONE - var cerr *C.GError - defer C.free(unsafe.Pointer(cerr)) - - if logOpts.Raw { - flags |= OSTREE_DUMP_RAW - } - - if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(repo.native(), cbranch, C.FALSE, &checksum, &cerr))) { - return nil, generateError(cerr) - } - - return logCommit(repo, checksum, false, flags) -} - -func logCommit(repo *Repo, checksum *C.char, isRecursive bool, flags OstreeDumpFlags) ([]LogEntry, error) { - var variant *C.GVariant - var parent *C.char - defer C.free(unsafe.Pointer(parent)) - var gerr = glib.NewGError() - var cerr = (*C.GError)(gerr.Ptr()) - defer C.free(unsafe.Pointer(cerr)) - entries := make([]LogEntry, 0, 1) - var err error - - if !glib.GoBool(glib.GBoolean(C.ostree_repo_load_variant(repo.native(), C.OSTREE_OBJECT_TYPE_COMMIT, checksum, &variant, &cerr))) { - if isRecursive && glib.GoBool(glib.GBoolean(C.g_error_matches(cerr, C.g_io_error_quark(), C.G_IO_ERROR_NOT_FOUND))) { - return nil, nil - } - return entries, generateError(cerr) - } - - nextLogEntry := dumpLogObject(C.OSTREE_OBJECT_TYPE_COMMIT, checksum, variant, flags) - - // get the parent of this commit - parent = (*C.char)(C.ostree_commit_get_parent(variant)) - defer C.free(unsafe.Pointer(parent)) - if parent != nil { - entries, err = logCommit(repo, parent, true, flags) - if err != nil { - return nil, err - } - } - entries = append(entries, *nextLogEntry) - return entries, nil -} - -func dumpLogObject(objectType C.OstreeObjectType, checksum *C.char, variant *C.GVariant, flags OstreeDumpFlags) *LogEntry { - objLog := new(LogEntry) - objLog.Checksum = []byte(C.GoString(checksum)) - - if (flags & OSTREE_DUMP_RAW) != 0 { - dumpVariant(objLog, variant) - return objLog - } - - switch objectType { - case C.OSTREE_OBJECT_TYPE_COMMIT: - dumpCommit(objLog, variant, flags) - return objLog - default: - return objLog - } -} - -func dumpVariant(log *LogEntry, variant *C.GVariant) { - var byteswappedVariant *C.GVariant - - if C.G_BYTE_ORDER != C.G_BIG_ENDIAN { - byteswappedVariant = C.g_variant_byteswap(variant) - log.Variant = []byte(C.GoString((*C.char)(C.g_variant_print(byteswappedVariant, C.TRUE)))) - } else { - log.Variant = []byte(C.GoString((*C.char)(C.g_variant_print(byteswappedVariant, C.TRUE)))) - } -} - -func dumpCommit(log *LogEntry, variant *C.GVariant, flags OstreeDumpFlags) { - var subject, body *C.char - defer C.free(unsafe.Pointer(subject)) - defer C.free(unsafe.Pointer(body)) - var timestamp C.guint64 - - C._g_variant_get_commit_dump(variant, C.CString("(a{sv}aya(say)&s&stayay)"), &subject, &body, ×tamp) - - // Timestamp is now a Unix formatted timestamp as a guint64 - timestamp = C._guint64_from_be(timestamp) - log.Timestamp = time.Unix((int64)(timestamp), 0) - - if strings.Compare(C.GoString(subject), "") != 0 { - log.Subject = C.GoString(subject) - } - - if strings.Compare(C.GoString(body), "") != 0 { - log.Body = C.GoString(body) - } -} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/ls.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/ls.go deleted file mode 100644 index d43ea07c74..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/ls.go +++ /dev/null @@ -1 +0,0 @@ -package otbuiltin diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/prune.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/prune.go deleted file mode 100644 index 8dfa40a55b..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/prune.go +++ /dev/null @@ -1,217 +0,0 @@ -package otbuiltin - -import ( - "bytes" - "errors" - "strconv" - "strings" - "time" - "unsafe" - - glib "github.com/ostreedev/ostree-go/pkg/glibobject" -) - -// #cgo pkg-config: ostree-1 -// #include -// #include -// #include -// #include "builtin.go.h" -import "C" - -// Declare gobal variable for options -var pruneOpts pruneOptions - -// Contains all of the options for pruning an ostree repo. Use -// NewPruneOptions() to initialize -type pruneOptions struct { - NoPrune bool // Only display unreachable objects; don't delete - RefsOnly bool // Only compute reachability via refs - DeleteCommit string // Specify a commit to delete - KeepYoungerThan time.Time // All commits older than this date will be pruned - Depth int // Only traverse depths (integer) parents for each commit (default: -1=infinite) - StaticDeltasOnly int // Change the behavior of --keep-younger-than and --delete-commit to prune only the static delta files -} - -// Instantiates and returns a pruneOptions struct with default values set -func NewPruneOptions() pruneOptions { - po := new(pruneOptions) - po.Depth = -1 - return *po -} - -// Search for unreachable objects in the repository given by repoPath. Removes the -// objects unless pruneOptions.NoPrune is specified -func Prune(repoPath string, options pruneOptions) (string, error) { - pruneOpts = options - // attempt to open the repository - repo, err := OpenRepo(repoPath) - if err != nil { - return "", err - } - - var pruneFlags C.OstreeRepoPruneFlags - var numObjectsTotal int - var numObjectsPruned int - var objSizeTotal uint64 - var gerr = glib.NewGError() - var cerr = (*C.GError)(gerr.Ptr()) - defer C.free(unsafe.Pointer(cerr)) - var cancellable *glib.GCancellable - - if !pruneOpts.NoPrune && !glib.GoBool(glib.GBoolean(C.ostree_repo_is_writable(repo.native(), &cerr))) { - return "", generateError(cerr) - } - - cerr = nil - if strings.Compare(pruneOpts.DeleteCommit, "") != 0 { - if pruneOpts.NoPrune { - return "", errors.New("Cannot specify both pruneOptions.DeleteCommit and pruneOptions.NoPrune") - } - - if pruneOpts.StaticDeltasOnly > 0 { - if glib.GoBool(glib.GBoolean(C.ostree_repo_prune_static_deltas(repo.native(), C.CString(pruneOpts.DeleteCommit), (*C.GCancellable)(cancellable.Ptr()), &cerr))) { - return "", generateError(cerr) - } - } else if err = deleteCommit(repo, pruneOpts.DeleteCommit, cancellable); err != nil { - return "", err - } - } - - if !pruneOpts.KeepYoungerThan.IsZero() { - if pruneOpts.NoPrune { - return "", errors.New("Cannot specify both pruneOptions.KeepYoungerThan and pruneOptions.NoPrune") - } - - if err = pruneCommitsKeepYoungerThanDate(repo, pruneOpts.KeepYoungerThan, cancellable); err != nil { - return "", err - } - } - - if pruneOpts.RefsOnly { - pruneFlags |= C.OSTREE_REPO_PRUNE_FLAGS_REFS_ONLY - } - if pruneOpts.NoPrune { - pruneFlags |= C.OSTREE_REPO_PRUNE_FLAGS_NO_PRUNE - } - - formattedFreedSize := C.GoString((*C.char)(C.g_format_size_full((C.guint64)(objSizeTotal), 0))) - - var buffer bytes.Buffer - - buffer.WriteString("Total objects: ") - buffer.WriteString(strconv.Itoa(numObjectsTotal)) - if numObjectsPruned == 0 { - buffer.WriteString("\nNo unreachable objects") - } else if pruneOpts.NoPrune { - buffer.WriteString("\nWould delete: ") - buffer.WriteString(strconv.Itoa(numObjectsPruned)) - buffer.WriteString(" objects, freeing ") - buffer.WriteString(formattedFreedSize) - } else { - buffer.WriteString("\nDeleted ") - buffer.WriteString(strconv.Itoa(numObjectsPruned)) - buffer.WriteString(" objects, ") - buffer.WriteString(formattedFreedSize) - buffer.WriteString(" freed") - } - - return buffer.String(), nil -} - -// Delete an unreachable commit from the repo -func deleteCommit(repo *Repo, commitToDelete string, cancellable *glib.GCancellable) error { - var refs *glib.GHashTable - var hashIter glib.GHashTableIter - var hashkey, hashvalue C.gpointer - var gerr = glib.NewGError() - var cerr = (*C.GError)(gerr.Ptr()) - defer C.free(unsafe.Pointer(cerr)) - - if glib.GoBool(glib.GBoolean(C.ostree_repo_list_refs(repo.native(), nil, (**C.GHashTable)(refs.Ptr()), (*C.GCancellable)(cancellable.Ptr()), &cerr))) { - return generateError(cerr) - } - - C.g_hash_table_iter_init((*C.GHashTableIter)(hashIter.Ptr()), (*C.GHashTable)(refs.Ptr())) - for C.g_hash_table_iter_next((*C.GHashTableIter)(hashIter.Ptr()), &hashkey, &hashvalue) != 0 { - var ref string = C.GoString((*C.char)(hashkey)) - var commit string = C.GoString((*C.char)(hashvalue)) - if strings.Compare(commitToDelete, commit) == 0 { - var buffer bytes.Buffer - buffer.WriteString("Commit ") - buffer.WriteString(commitToDelete) - buffer.WriteString(" is referenced by ") - buffer.WriteString(ref) - return errors.New(buffer.String()) - } - } - - if err := enableTombstoneCommits(repo); err != nil { - return err - } - - if !glib.GoBool(glib.GBoolean(C.ostree_repo_delete_object(repo.native(), C.OSTREE_OBJECT_TYPE_COMMIT, C.CString(commitToDelete), (*C.GCancellable)(cancellable.Ptr()), &cerr))) { - return generateError(cerr) - } - - return nil -} - -// Prune commits but keep any younger than the given date regardless of whether they -// are reachable -func pruneCommitsKeepYoungerThanDate(repo *Repo, date time.Time, cancellable *glib.GCancellable) error { - var objects *glib.GHashTable - defer C.free(unsafe.Pointer(objects)) - var hashIter glib.GHashTableIter - var key, value C.gpointer - defer C.free(unsafe.Pointer(key)) - defer C.free(unsafe.Pointer(value)) - var gerr = glib.NewGError() - var cerr = (*C.GError)(gerr.Ptr()) - defer C.free(unsafe.Pointer(cerr)) - - if err := enableTombstoneCommits(repo); err != nil { - return err - } - - if !glib.GoBool(glib.GBoolean(C.ostree_repo_list_objects(repo.native(), C.OSTREE_REPO_LIST_OBJECTS_ALL, (**C.GHashTable)(objects.Ptr()), (*C.GCancellable)(cancellable.Ptr()), &cerr))) { - return generateError(cerr) - } - - C.g_hash_table_iter_init((*C.GHashTableIter)(hashIter.Ptr()), (*C.GHashTable)(objects.Ptr())) - for C.g_hash_table_iter_next((*C.GHashTableIter)(hashIter.Ptr()), &key, &value) != 0 { - var serializedKey *glib.GVariant - defer C.free(unsafe.Pointer(serializedKey)) - var checksum *C.char - defer C.free(unsafe.Pointer(checksum)) - var objType C.OstreeObjectType - var commitTimestamp uint64 - var commit *glib.GVariant = nil - - C.ostree_object_name_deserialize((*C.GVariant)(serializedKey.Ptr()), &checksum, &objType) - - if objType != C.OSTREE_OBJECT_TYPE_COMMIT { - continue - } - - cerr = nil - if !glib.GoBool(glib.GBoolean(C.ostree_repo_load_variant(repo.native(), C.OSTREE_OBJECT_TYPE_COMMIT, checksum, (**C.GVariant)(commit.Ptr()), &cerr))) { - return generateError(cerr) - } - - commitTimestamp = (uint64)(C.ostree_commit_get_timestamp((*C.GVariant)(commit.Ptr()))) - if commitTimestamp < (uint64)(date.Unix()) { - cerr = nil - if pruneOpts.StaticDeltasOnly != 0 { - if !glib.GoBool(glib.GBoolean(C.ostree_repo_prune_static_deltas(repo.native(), checksum, (*C.GCancellable)(cancellable.Ptr()), &cerr))) { - return generateError(cerr) - } - } else { - if !glib.GoBool(glib.GBoolean(C.ostree_repo_delete_object(repo.native(), C.OSTREE_OBJECT_TYPE_COMMIT, checksum, (*C.GCancellable)(cancellable.Ptr()), &cerr))) { - return generateError(cerr) - } - } - } - } - - return nil -} diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pull.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pull.go deleted file mode 100644 index d43ea07c74..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pull.go +++ /dev/null @@ -1 +0,0 @@ -package otbuiltin diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pulllocal.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pulllocal.go deleted file mode 100644 index d43ea07c74..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/pulllocal.go +++ /dev/null @@ -1 +0,0 @@ -package otbuiltin diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/refs.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/refs.go deleted file mode 100644 index d43ea07c74..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/refs.go +++ /dev/null @@ -1 +0,0 @@ -package otbuiltin diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/remote.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/remote.go deleted file mode 100644 index d43ea07c74..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/remote.go +++ /dev/null @@ -1 +0,0 @@ -package otbuiltin diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/reset.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/reset.go deleted file mode 100644 index d43ea07c74..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/reset.go +++ /dev/null @@ -1 +0,0 @@ -package otbuiltin diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/revparse.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/revparse.go deleted file mode 100644 index d43ea07c74..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/revparse.go +++ /dev/null @@ -1 +0,0 @@ -package otbuiltin diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/show.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/show.go deleted file mode 100644 index d43ea07c74..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/show.go +++ /dev/null @@ -1 +0,0 @@ -package otbuiltin diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/staticdelta.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/staticdelta.go deleted file mode 100644 index d43ea07c74..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/staticdelta.go +++ /dev/null @@ -1 +0,0 @@ -package otbuiltin diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/summary.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/summary.go deleted file mode 100644 index d43ea07c74..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/summary.go +++ /dev/null @@ -1 +0,0 @@ -package otbuiltin diff --git a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/trivialhttpd.go b/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/trivialhttpd.go deleted file mode 100644 index d43ea07c74..0000000000 --- a/vendor/github.com/ostreedev/ostree-go/pkg/otbuiltin/trivialhttpd.go +++ /dev/null @@ -1 +0,0 @@ -package otbuiltin diff --git a/vendor/github.com/pquerna/ffjson/LICENSE b/vendor/github.com/pquerna/ffjson/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/github.com/pquerna/ffjson/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/pquerna/ffjson/NOTICE b/vendor/github.com/pquerna/ffjson/NOTICE deleted file mode 100644 index 405a49618b..0000000000 --- a/vendor/github.com/pquerna/ffjson/NOTICE +++ /dev/null @@ -1,8 +0,0 @@ -ffjson -Copyright (c) 2014, Paul Querna - -This product includes software developed by -Paul Querna (http://paul.querna.org/). - -Portions of this software were developed as -part of Go, Copyright (c) 2012 The Go Authors. \ No newline at end of file diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/buffer.go b/vendor/github.com/pquerna/ffjson/fflib/v1/buffer.go deleted file mode 100644 index 7f63a8582d..0000000000 --- a/vendor/github.com/pquerna/ffjson/fflib/v1/buffer.go +++ /dev/null @@ -1,421 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package v1 - -// Simple byte buffer for marshaling data. - -import ( - "bytes" - "encoding/json" - "errors" - "io" - "unicode/utf8" -) - -type grower interface { - Grow(n int) -} - -type truncater interface { - Truncate(n int) - Reset() -} - -type bytesReader interface { - Bytes() []byte - String() string -} - -type runeWriter interface { - WriteRune(r rune) (n int, err error) -} - -type stringWriter interface { - WriteString(s string) (n int, err error) -} - -type lener interface { - Len() int -} - -type rewinder interface { - Rewind(n int) (err error) -} - -type encoder interface { - Encode(interface{}) error -} - -// TODO(pquerna): continue to reduce these interfaces - -type EncodingBuffer interface { - io.Writer - io.WriterTo - io.ByteWriter - stringWriter - truncater - grower - rewinder - encoder -} - -type DecodingBuffer interface { - io.ReadWriter - io.ByteWriter - stringWriter - runeWriter - truncater - grower - bytesReader - lener -} - -// A Buffer is a variable-sized buffer of bytes with Read and Write methods. -// The zero value for Buffer is an empty buffer ready to use. -type Buffer struct { - buf []byte // contents are the bytes buf[off : len(buf)] - off int // read at &buf[off], write at &buf[len(buf)] - runeBytes [utf8.UTFMax]byte // avoid allocation of slice on each WriteByte or Rune - encoder *json.Encoder - skipTrailingByte bool -} - -// ErrTooLarge is passed to panic if memory cannot be allocated to store data in a buffer. -var ErrTooLarge = errors.New("fflib.v1.Buffer: too large") - -// Bytes returns a slice of the contents of the unread portion of the buffer; -// len(b.Bytes()) == b.Len(). If the caller changes the contents of the -// returned slice, the contents of the buffer will change provided there -// are no intervening method calls on the Buffer. -func (b *Buffer) Bytes() []byte { return b.buf[b.off:] } - -// String returns the contents of the unread portion of the buffer -// as a string. If the Buffer is a nil pointer, it returns "". -func (b *Buffer) String() string { - if b == nil { - // Special case, useful in debugging. - return "" - } - return string(b.buf[b.off:]) -} - -// Len returns the number of bytes of the unread portion of the buffer; -// b.Len() == len(b.Bytes()). -func (b *Buffer) Len() int { return len(b.buf) - b.off } - -// Truncate discards all but the first n unread bytes from the buffer. -// It panics if n is negative or greater than the length of the buffer. -func (b *Buffer) Truncate(n int) { - if n == 0 { - b.off = 0 - b.buf = b.buf[0:0] - } else { - b.buf = b.buf[0 : b.off+n] - } -} - -// Reset resets the buffer so it has no content. -// b.Reset() is the same as b.Truncate(0). -func (b *Buffer) Reset() { b.Truncate(0) } - -// grow grows the buffer to guarantee space for n more bytes. -// It returns the index where bytes should be written. -// If the buffer can't grow it will panic with ErrTooLarge. -func (b *Buffer) grow(n int) int { - // If we have no buffer, get one from the pool - m := b.Len() - if m == 0 { - if b.buf == nil { - b.buf = makeSlice(2 * n) - b.off = 0 - } else if b.off != 0 { - // If buffer is empty, reset to recover space. - b.Truncate(0) - } - } - if len(b.buf)+n > cap(b.buf) { - var buf []byte - if m+n <= cap(b.buf)/2 { - // We can slide things down instead of allocating a new - // slice. We only need m+n <= cap(b.buf) to slide, but - // we instead let capacity get twice as large so we - // don't spend all our time copying. - copy(b.buf[:], b.buf[b.off:]) - buf = b.buf[:m] - } else { - // not enough space anywhere - buf = makeSlice(2*cap(b.buf) + n) - copy(buf, b.buf[b.off:]) - Pool(b.buf) - b.buf = buf - } - b.off = 0 - } - b.buf = b.buf[0 : b.off+m+n] - return b.off + m -} - -// Grow grows the buffer's capacity, if necessary, to guarantee space for -// another n bytes. After Grow(n), at least n bytes can be written to the -// buffer without another allocation. -// If n is negative, Grow will panic. -// If the buffer can't grow it will panic with ErrTooLarge. -func (b *Buffer) Grow(n int) { - if n < 0 { - panic("bytes.Buffer.Grow: negative count") - } - m := b.grow(n) - b.buf = b.buf[0:m] -} - -// Write appends the contents of p to the buffer, growing the buffer as -// needed. The return value n is the length of p; err is always nil. If the -// buffer becomes too large, Write will panic with ErrTooLarge. -func (b *Buffer) Write(p []byte) (n int, err error) { - if b.skipTrailingByte { - p = p[:len(p)-1] - } - m := b.grow(len(p)) - return copy(b.buf[m:], p), nil -} - -// WriteString appends the contents of s to the buffer, growing the buffer as -// needed. The return value n is the length of s; err is always nil. If the -// buffer becomes too large, WriteString will panic with ErrTooLarge. -func (b *Buffer) WriteString(s string) (n int, err error) { - m := b.grow(len(s)) - return copy(b.buf[m:], s), nil -} - -// MinRead is the minimum slice size passed to a Read call by -// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond -// what is required to hold the contents of r, ReadFrom will not grow the -// underlying buffer. -const minRead = 512 - -// ReadFrom reads data from r until EOF and appends it to the buffer, growing -// the buffer as needed. The return value n is the number of bytes read. Any -// error except io.EOF encountered during the read is also returned. If the -// buffer becomes too large, ReadFrom will panic with ErrTooLarge. -func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) { - // If buffer is empty, reset to recover space. - if b.off >= len(b.buf) { - b.Truncate(0) - } - for { - if free := cap(b.buf) - len(b.buf); free < minRead { - // not enough space at end - newBuf := b.buf - if b.off+free < minRead { - // not enough space using beginning of buffer; - // double buffer capacity - newBuf = makeSlice(2*cap(b.buf) + minRead) - } - copy(newBuf, b.buf[b.off:]) - Pool(b.buf) - b.buf = newBuf[:len(b.buf)-b.off] - b.off = 0 - } - m, e := r.Read(b.buf[len(b.buf):cap(b.buf)]) - b.buf = b.buf[0 : len(b.buf)+m] - n += int64(m) - if e == io.EOF { - break - } - if e != nil { - return n, e - } - } - return n, nil // err is EOF, so return nil explicitly -} - -// WriteTo writes data to w until the buffer is drained or an error occurs. -// The return value n is the number of bytes written; it always fits into an -// int, but it is int64 to match the io.WriterTo interface. Any error -// encountered during the write is also returned. -func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) { - if b.off < len(b.buf) { - nBytes := b.Len() - m, e := w.Write(b.buf[b.off:]) - if m > nBytes { - panic("bytes.Buffer.WriteTo: invalid Write count") - } - b.off += m - n = int64(m) - if e != nil { - return n, e - } - // all bytes should have been written, by definition of - // Write method in io.Writer - if m != nBytes { - return n, io.ErrShortWrite - } - } - // Buffer is now empty; reset. - b.Truncate(0) - return -} - -// WriteByte appends the byte c to the buffer, growing the buffer as needed. -// The returned error is always nil, but is included to match bufio.Writer's -// WriteByte. If the buffer becomes too large, WriteByte will panic with -// ErrTooLarge. -func (b *Buffer) WriteByte(c byte) error { - m := b.grow(1) - b.buf[m] = c - return nil -} - -func (b *Buffer) Rewind(n int) error { - b.buf = b.buf[:len(b.buf)-n] - return nil -} - -func (b *Buffer) Encode(v interface{}) error { - if b.encoder == nil { - b.encoder = json.NewEncoder(b) - } - b.skipTrailingByte = true - err := b.encoder.Encode(v) - b.skipTrailingByte = false - return err -} - -// WriteRune appends the UTF-8 encoding of Unicode code point r to the -// buffer, returning its length and an error, which is always nil but is -// included to match bufio.Writer's WriteRune. The buffer is grown as needed; -// if it becomes too large, WriteRune will panic with ErrTooLarge. -func (b *Buffer) WriteRune(r rune) (n int, err error) { - if r < utf8.RuneSelf { - b.WriteByte(byte(r)) - return 1, nil - } - n = utf8.EncodeRune(b.runeBytes[0:], r) - b.Write(b.runeBytes[0:n]) - return n, nil -} - -// Read reads the next len(p) bytes from the buffer or until the buffer -// is drained. The return value n is the number of bytes read. If the -// buffer has no data to return, err is io.EOF (unless len(p) is zero); -// otherwise it is nil. -func (b *Buffer) Read(p []byte) (n int, err error) { - if b.off >= len(b.buf) { - // Buffer is empty, reset to recover space. - b.Truncate(0) - if len(p) == 0 { - return - } - return 0, io.EOF - } - n = copy(p, b.buf[b.off:]) - b.off += n - return -} - -// Next returns a slice containing the next n bytes from the buffer, -// advancing the buffer as if the bytes had been returned by Read. -// If there are fewer than n bytes in the buffer, Next returns the entire buffer. -// The slice is only valid until the next call to a read or write method. -func (b *Buffer) Next(n int) []byte { - m := b.Len() - if n > m { - n = m - } - data := b.buf[b.off : b.off+n] - b.off += n - return data -} - -// ReadByte reads and returns the next byte from the buffer. -// If no byte is available, it returns error io.EOF. -func (b *Buffer) ReadByte() (c byte, err error) { - if b.off >= len(b.buf) { - // Buffer is empty, reset to recover space. - b.Truncate(0) - return 0, io.EOF - } - c = b.buf[b.off] - b.off++ - return c, nil -} - -// ReadRune reads and returns the next UTF-8-encoded -// Unicode code point from the buffer. -// If no bytes are available, the error returned is io.EOF. -// If the bytes are an erroneous UTF-8 encoding, it -// consumes one byte and returns U+FFFD, 1. -func (b *Buffer) ReadRune() (r rune, size int, err error) { - if b.off >= len(b.buf) { - // Buffer is empty, reset to recover space. - b.Truncate(0) - return 0, 0, io.EOF - } - c := b.buf[b.off] - if c < utf8.RuneSelf { - b.off++ - return rune(c), 1, nil - } - r, n := utf8.DecodeRune(b.buf[b.off:]) - b.off += n - return r, n, nil -} - -// ReadBytes reads until the first occurrence of delim in the input, -// returning a slice containing the data up to and including the delimiter. -// If ReadBytes encounters an error before finding a delimiter, -// it returns the data read before the error and the error itself (often io.EOF). -// ReadBytes returns err != nil if and only if the returned data does not end in -// delim. -func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) { - slice, err := b.readSlice(delim) - // return a copy of slice. The buffer's backing array may - // be overwritten by later calls. - line = append(line, slice...) - return -} - -// readSlice is like ReadBytes but returns a reference to internal buffer data. -func (b *Buffer) readSlice(delim byte) (line []byte, err error) { - i := bytes.IndexByte(b.buf[b.off:], delim) - end := b.off + i + 1 - if i < 0 { - end = len(b.buf) - err = io.EOF - } - line = b.buf[b.off:end] - b.off = end - return line, err -} - -// ReadString reads until the first occurrence of delim in the input, -// returning a string containing the data up to and including the delimiter. -// If ReadString encounters an error before finding a delimiter, -// it returns the data read before the error and the error itself (often io.EOF). -// ReadString returns err != nil if and only if the returned data does not end -// in delim. -func (b *Buffer) ReadString(delim byte) (line string, err error) { - slice, err := b.readSlice(delim) - return string(slice), err -} - -// NewBuffer creates and initializes a new Buffer using buf as its initial -// contents. It is intended to prepare a Buffer to read existing data. It -// can also be used to size the internal buffer for writing. To do that, -// buf should have the desired capacity but a length of zero. -// -// In most cases, new(Buffer) (or just declaring a Buffer variable) is -// sufficient to initialize a Buffer. -func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} } - -// NewBufferString creates and initializes a new Buffer using string s as its -// initial contents. It is intended to prepare a buffer to read an existing -// string. -// -// In most cases, new(Buffer) (or just declaring a Buffer variable) is -// sufficient to initialize a Buffer. -func NewBufferString(s string) *Buffer { - return &Buffer{buf: []byte(s)} -} diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/buffer_nopool.go b/vendor/github.com/pquerna/ffjson/fflib/v1/buffer_nopool.go deleted file mode 100644 index b84af6ff96..0000000000 --- a/vendor/github.com/pquerna/ffjson/fflib/v1/buffer_nopool.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !go1.3 - -package v1 - -// Stub version of buffer_pool.go for Go 1.2, which doesn't have sync.Pool. - -func Pool(b []byte) {} - -func makeSlice(n int) []byte { - return make([]byte, n) -} diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/buffer_pool.go b/vendor/github.com/pquerna/ffjson/fflib/v1/buffer_pool.go deleted file mode 100644 index a021c57cf4..0000000000 --- a/vendor/github.com/pquerna/ffjson/fflib/v1/buffer_pool.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.3 - -package v1 - -// Allocation pools for Buffers. - -import "sync" - -var pools [14]sync.Pool -var pool64 *sync.Pool - -func init() { - var i uint - // TODO(pquerna): add science here around actual pool sizes. - for i = 6; i < 20; i++ { - n := 1 << i - pools[poolNum(n)].New = func() interface{} { return make([]byte, 0, n) } - } - pool64 = &pools[0] -} - -// This returns the pool number that will give a buffer of -// at least 'i' bytes. -func poolNum(i int) int { - // TODO(pquerna): convert to log2 w/ bsr asm instruction: - // - if i <= 64 { - return 0 - } else if i <= 128 { - return 1 - } else if i <= 256 { - return 2 - } else if i <= 512 { - return 3 - } else if i <= 1024 { - return 4 - } else if i <= 2048 { - return 5 - } else if i <= 4096 { - return 6 - } else if i <= 8192 { - return 7 - } else if i <= 16384 { - return 8 - } else if i <= 32768 { - return 9 - } else if i <= 65536 { - return 10 - } else if i <= 131072 { - return 11 - } else if i <= 262144 { - return 12 - } else if i <= 524288 { - return 13 - } else { - return -1 - } -} - -// Send a buffer to the Pool to reuse for other instances. -// You may no longer utilize the content of the buffer, since it may be used -// by other goroutines. -func Pool(b []byte) { - if b == nil { - return - } - c := cap(b) - - // Our smallest buffer is 64 bytes, so we discard smaller buffers. - if c < 64 { - return - } - - // We need to put the incoming buffer into the NEXT buffer, - // since a buffer guarantees AT LEAST the number of bytes available - // that is the top of this buffer. - // That is the reason for dividing the cap by 2, so it gets into the NEXT bucket. - // We add 2 to avoid rounding down if size is exactly power of 2. - pn := poolNum((c + 2) >> 1) - if pn != -1 { - pools[pn].Put(b[0:0]) - } - // if we didn't have a slot for this []byte, we just drop it and let the GC - // take care of it. -} - -// makeSlice allocates a slice of size n -- it will attempt to use a pool'ed -// instance whenever possible. -func makeSlice(n int) []byte { - if n <= 64 { - return pool64.Get().([]byte)[0:n] - } - - pn := poolNum(n) - - if pn != -1 { - return pools[pn].Get().([]byte)[0:n] - } else { - return make([]byte, n) - } -} diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/bytenum.go b/vendor/github.com/pquerna/ffjson/fflib/v1/bytenum.go deleted file mode 100644 index 08477409ac..0000000000 --- a/vendor/github.com/pquerna/ffjson/fflib/v1/bytenum.go +++ /dev/null @@ -1,88 +0,0 @@ -/** - * Copyright 2014 Paul Querna - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/* Portions of this file are on Go stdlib's strconv/iota.go */ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package v1 - -import ( - "github.com/pquerna/ffjson/fflib/v1/internal" -) - -func ParseFloat(s []byte, bitSize int) (f float64, err error) { - return internal.ParseFloat(s, bitSize) -} - -// ParseUint is like ParseInt but for unsigned numbers, and oeprating on []byte -func ParseUint(s []byte, base int, bitSize int) (n uint64, err error) { - if len(s) == 1 { - switch s[0] { - case '0': - return 0, nil - case '1': - return 1, nil - case '2': - return 2, nil - case '3': - return 3, nil - case '4': - return 4, nil - case '5': - return 5, nil - case '6': - return 6, nil - case '7': - return 7, nil - case '8': - return 8, nil - case '9': - return 9, nil - } - } - return internal.ParseUint(s, base, bitSize) -} - -func ParseInt(s []byte, base int, bitSize int) (i int64, err error) { - if len(s) == 1 { - switch s[0] { - case '0': - return 0, nil - case '1': - return 1, nil - case '2': - return 2, nil - case '3': - return 3, nil - case '4': - return 4, nil - case '5': - return 5, nil - case '6': - return 6, nil - case '7': - return 7, nil - case '8': - return 8, nil - case '9': - return 9, nil - } - } - return internal.ParseInt(s, base, bitSize) -} diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/decimal.go b/vendor/github.com/pquerna/ffjson/fflib/v1/decimal.go deleted file mode 100644 index 069df7a02a..0000000000 --- a/vendor/github.com/pquerna/ffjson/fflib/v1/decimal.go +++ /dev/null @@ -1,378 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Multiprecision decimal numbers. -// For floating-point formatting only; not general purpose. -// Only operations are assign and (binary) left/right shift. -// Can do binary floating point in multiprecision decimal precisely -// because 2 divides 10; cannot do decimal floating point -// in multiprecision binary precisely. - -package v1 - -type decimal struct { - d [800]byte // digits - nd int // number of digits used - dp int // decimal point - neg bool - trunc bool // discarded nonzero digits beyond d[:nd] -} - -func (a *decimal) String() string { - n := 10 + a.nd - if a.dp > 0 { - n += a.dp - } - if a.dp < 0 { - n += -a.dp - } - - buf := make([]byte, n) - w := 0 - switch { - case a.nd == 0: - return "0" - - case a.dp <= 0: - // zeros fill space between decimal point and digits - buf[w] = '0' - w++ - buf[w] = '.' - w++ - w += digitZero(buf[w : w+-a.dp]) - w += copy(buf[w:], a.d[0:a.nd]) - - case a.dp < a.nd: - // decimal point in middle of digits - w += copy(buf[w:], a.d[0:a.dp]) - buf[w] = '.' - w++ - w += copy(buf[w:], a.d[a.dp:a.nd]) - - default: - // zeros fill space between digits and decimal point - w += copy(buf[w:], a.d[0:a.nd]) - w += digitZero(buf[w : w+a.dp-a.nd]) - } - return string(buf[0:w]) -} - -func digitZero(dst []byte) int { - for i := range dst { - dst[i] = '0' - } - return len(dst) -} - -// trim trailing zeros from number. -// (They are meaningless; the decimal point is tracked -// independent of the number of digits.) -func trim(a *decimal) { - for a.nd > 0 && a.d[a.nd-1] == '0' { - a.nd-- - } - if a.nd == 0 { - a.dp = 0 - } -} - -// Assign v to a. -func (a *decimal) Assign(v uint64) { - var buf [24]byte - - // Write reversed decimal in buf. - n := 0 - for v > 0 { - v1 := v / 10 - v -= 10 * v1 - buf[n] = byte(v + '0') - n++ - v = v1 - } - - // Reverse again to produce forward decimal in a.d. - a.nd = 0 - for n--; n >= 0; n-- { - a.d[a.nd] = buf[n] - a.nd++ - } - a.dp = a.nd - trim(a) -} - -// Maximum shift that we can do in one pass without overflow. -// Signed int has 31 bits, and we have to be able to accommodate 9<>k == 0; r++ { - if r >= a.nd { - if n == 0 { - // a == 0; shouldn't get here, but handle anyway. - a.nd = 0 - return - } - for n>>k == 0 { - n = n * 10 - r++ - } - break - } - c := int(a.d[r]) - n = n*10 + c - '0' - } - a.dp -= r - 1 - - // Pick up a digit, put down a digit. - for ; r < a.nd; r++ { - c := int(a.d[r]) - dig := n >> k - n -= dig << k - a.d[w] = byte(dig + '0') - w++ - n = n*10 + c - '0' - } - - // Put down extra digits. - for n > 0 { - dig := n >> k - n -= dig << k - if w < len(a.d) { - a.d[w] = byte(dig + '0') - w++ - } else if dig > 0 { - a.trunc = true - } - n = n * 10 - } - - a.nd = w - trim(a) -} - -// Cheat sheet for left shift: table indexed by shift count giving -// number of new digits that will be introduced by that shift. -// -// For example, leftcheats[4] = {2, "625"}. That means that -// if we are shifting by 4 (multiplying by 16), it will add 2 digits -// when the string prefix is "625" through "999", and one fewer digit -// if the string prefix is "000" through "624". -// -// Credit for this trick goes to Ken. - -type leftCheat struct { - delta int // number of new digits - cutoff string // minus one digit if original < a. -} - -var leftcheats = []leftCheat{ - // Leading digits of 1/2^i = 5^i. - // 5^23 is not an exact 64-bit floating point number, - // so have to use bc for the math. - /* - seq 27 | sed 's/^/5^/' | bc | - awk 'BEGIN{ print "\tleftCheat{ 0, \"\" }," } - { - log2 = log(2)/log(10) - printf("\tleftCheat{ %d, \"%s\" },\t// * %d\n", - int(log2*NR+1), $0, 2**NR) - }' - */ - {0, ""}, - {1, "5"}, // * 2 - {1, "25"}, // * 4 - {1, "125"}, // * 8 - {2, "625"}, // * 16 - {2, "3125"}, // * 32 - {2, "15625"}, // * 64 - {3, "78125"}, // * 128 - {3, "390625"}, // * 256 - {3, "1953125"}, // * 512 - {4, "9765625"}, // * 1024 - {4, "48828125"}, // * 2048 - {4, "244140625"}, // * 4096 - {4, "1220703125"}, // * 8192 - {5, "6103515625"}, // * 16384 - {5, "30517578125"}, // * 32768 - {5, "152587890625"}, // * 65536 - {6, "762939453125"}, // * 131072 - {6, "3814697265625"}, // * 262144 - {6, "19073486328125"}, // * 524288 - {7, "95367431640625"}, // * 1048576 - {7, "476837158203125"}, // * 2097152 - {7, "2384185791015625"}, // * 4194304 - {7, "11920928955078125"}, // * 8388608 - {8, "59604644775390625"}, // * 16777216 - {8, "298023223876953125"}, // * 33554432 - {8, "1490116119384765625"}, // * 67108864 - {9, "7450580596923828125"}, // * 134217728 -} - -// Is the leading prefix of b lexicographically less than s? -func prefixIsLessThan(b []byte, s string) bool { - for i := 0; i < len(s); i++ { - if i >= len(b) { - return true - } - if b[i] != s[i] { - return b[i] < s[i] - } - } - return false -} - -// Binary shift left (/ 2) by k bits. k <= maxShift to avoid overflow. -func leftShift(a *decimal, k uint) { - delta := leftcheats[k].delta - if prefixIsLessThan(a.d[0:a.nd], leftcheats[k].cutoff) { - delta-- - } - - r := a.nd // read index - w := a.nd + delta // write index - n := 0 - - // Pick up a digit, put down a digit. - for r--; r >= 0; r-- { - n += (int(a.d[r]) - '0') << k - quo := n / 10 - rem := n - 10*quo - w-- - if w < len(a.d) { - a.d[w] = byte(rem + '0') - } else if rem != 0 { - a.trunc = true - } - n = quo - } - - // Put down extra digits. - for n > 0 { - quo := n / 10 - rem := n - 10*quo - w-- - if w < len(a.d) { - a.d[w] = byte(rem + '0') - } else if rem != 0 { - a.trunc = true - } - n = quo - } - - a.nd += delta - if a.nd >= len(a.d) { - a.nd = len(a.d) - } - a.dp += delta - trim(a) -} - -// Binary shift left (k > 0) or right (k < 0). -func (a *decimal) Shift(k int) { - switch { - case a.nd == 0: - // nothing to do: a == 0 - case k > 0: - for k > maxShift { - leftShift(a, maxShift) - k -= maxShift - } - leftShift(a, uint(k)) - case k < 0: - for k < -maxShift { - rightShift(a, maxShift) - k += maxShift - } - rightShift(a, uint(-k)) - } -} - -// If we chop a at nd digits, should we round up? -func shouldRoundUp(a *decimal, nd int) bool { - if nd < 0 || nd >= a.nd { - return false - } - if a.d[nd] == '5' && nd+1 == a.nd { // exactly halfway - round to even - // if we truncated, a little higher than what's recorded - always round up - if a.trunc { - return true - } - return nd > 0 && (a.d[nd-1]-'0')%2 != 0 - } - // not halfway - digit tells all - return a.d[nd] >= '5' -} - -// Round a to nd digits (or fewer). -// If nd is zero, it means we're rounding -// just to the left of the digits, as in -// 0.09 -> 0.1. -func (a *decimal) Round(nd int) { - if nd < 0 || nd >= a.nd { - return - } - if shouldRoundUp(a, nd) { - a.RoundUp(nd) - } else { - a.RoundDown(nd) - } -} - -// Round a down to nd digits (or fewer). -func (a *decimal) RoundDown(nd int) { - if nd < 0 || nd >= a.nd { - return - } - a.nd = nd - trim(a) -} - -// Round a up to nd digits (or fewer). -func (a *decimal) RoundUp(nd int) { - if nd < 0 || nd >= a.nd { - return - } - - // round up - for i := nd - 1; i >= 0; i-- { - c := a.d[i] - if c < '9' { // can stop after this digit - a.d[i]++ - a.nd = i + 1 - return - } - } - - // Number is all 9s. - // Change to single 1 with adjusted decimal point. - a.d[0] = '1' - a.nd = 1 - a.dp++ -} - -// Extract integer part, rounded appropriately. -// No guarantees about overflow. -func (a *decimal) RoundedInteger() uint64 { - if a.dp > 20 { - return 0xFFFFFFFFFFFFFFFF - } - var i int - n := uint64(0) - for i = 0; i < a.dp && i < a.nd; i++ { - n = n*10 + uint64(a.d[i]-'0') - } - for ; i < a.dp; i++ { - n *= 10 - } - if shouldRoundUp(a, a.dp) { - n++ - } - return n -} diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/extfloat.go b/vendor/github.com/pquerna/ffjson/fflib/v1/extfloat.go deleted file mode 100644 index 508ddc6bed..0000000000 --- a/vendor/github.com/pquerna/ffjson/fflib/v1/extfloat.go +++ /dev/null @@ -1,668 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package v1 - -// An extFloat represents an extended floating-point number, with more -// precision than a float64. It does not try to save bits: the -// number represented by the structure is mant*(2^exp), with a negative -// sign if neg is true. -type extFloat struct { - mant uint64 - exp int - neg bool -} - -// Powers of ten taken from double-conversion library. -// http://code.google.com/p/double-conversion/ -const ( - firstPowerOfTen = -348 - stepPowerOfTen = 8 -) - -var smallPowersOfTen = [...]extFloat{ - {1 << 63, -63, false}, // 1 - {0xa << 60, -60, false}, // 1e1 - {0x64 << 57, -57, false}, // 1e2 - {0x3e8 << 54, -54, false}, // 1e3 - {0x2710 << 50, -50, false}, // 1e4 - {0x186a0 << 47, -47, false}, // 1e5 - {0xf4240 << 44, -44, false}, // 1e6 - {0x989680 << 40, -40, false}, // 1e7 -} - -var powersOfTen = [...]extFloat{ - {0xfa8fd5a0081c0288, -1220, false}, // 10^-348 - {0xbaaee17fa23ebf76, -1193, false}, // 10^-340 - {0x8b16fb203055ac76, -1166, false}, // 10^-332 - {0xcf42894a5dce35ea, -1140, false}, // 10^-324 - {0x9a6bb0aa55653b2d, -1113, false}, // 10^-316 - {0xe61acf033d1a45df, -1087, false}, // 10^-308 - {0xab70fe17c79ac6ca, -1060, false}, // 10^-300 - {0xff77b1fcbebcdc4f, -1034, false}, // 10^-292 - {0xbe5691ef416bd60c, -1007, false}, // 10^-284 - {0x8dd01fad907ffc3c, -980, false}, // 10^-276 - {0xd3515c2831559a83, -954, false}, // 10^-268 - {0x9d71ac8fada6c9b5, -927, false}, // 10^-260 - {0xea9c227723ee8bcb, -901, false}, // 10^-252 - {0xaecc49914078536d, -874, false}, // 10^-244 - {0x823c12795db6ce57, -847, false}, // 10^-236 - {0xc21094364dfb5637, -821, false}, // 10^-228 - {0x9096ea6f3848984f, -794, false}, // 10^-220 - {0xd77485cb25823ac7, -768, false}, // 10^-212 - {0xa086cfcd97bf97f4, -741, false}, // 10^-204 - {0xef340a98172aace5, -715, false}, // 10^-196 - {0xb23867fb2a35b28e, -688, false}, // 10^-188 - {0x84c8d4dfd2c63f3b, -661, false}, // 10^-180 - {0xc5dd44271ad3cdba, -635, false}, // 10^-172 - {0x936b9fcebb25c996, -608, false}, // 10^-164 - {0xdbac6c247d62a584, -582, false}, // 10^-156 - {0xa3ab66580d5fdaf6, -555, false}, // 10^-148 - {0xf3e2f893dec3f126, -529, false}, // 10^-140 - {0xb5b5ada8aaff80b8, -502, false}, // 10^-132 - {0x87625f056c7c4a8b, -475, false}, // 10^-124 - {0xc9bcff6034c13053, -449, false}, // 10^-116 - {0x964e858c91ba2655, -422, false}, // 10^-108 - {0xdff9772470297ebd, -396, false}, // 10^-100 - {0xa6dfbd9fb8e5b88f, -369, false}, // 10^-92 - {0xf8a95fcf88747d94, -343, false}, // 10^-84 - {0xb94470938fa89bcf, -316, false}, // 10^-76 - {0x8a08f0f8bf0f156b, -289, false}, // 10^-68 - {0xcdb02555653131b6, -263, false}, // 10^-60 - {0x993fe2c6d07b7fac, -236, false}, // 10^-52 - {0xe45c10c42a2b3b06, -210, false}, // 10^-44 - {0xaa242499697392d3, -183, false}, // 10^-36 - {0xfd87b5f28300ca0e, -157, false}, // 10^-28 - {0xbce5086492111aeb, -130, false}, // 10^-20 - {0x8cbccc096f5088cc, -103, false}, // 10^-12 - {0xd1b71758e219652c, -77, false}, // 10^-4 - {0x9c40000000000000, -50, false}, // 10^4 - {0xe8d4a51000000000, -24, false}, // 10^12 - {0xad78ebc5ac620000, 3, false}, // 10^20 - {0x813f3978f8940984, 30, false}, // 10^28 - {0xc097ce7bc90715b3, 56, false}, // 10^36 - {0x8f7e32ce7bea5c70, 83, false}, // 10^44 - {0xd5d238a4abe98068, 109, false}, // 10^52 - {0x9f4f2726179a2245, 136, false}, // 10^60 - {0xed63a231d4c4fb27, 162, false}, // 10^68 - {0xb0de65388cc8ada8, 189, false}, // 10^76 - {0x83c7088e1aab65db, 216, false}, // 10^84 - {0xc45d1df942711d9a, 242, false}, // 10^92 - {0x924d692ca61be758, 269, false}, // 10^100 - {0xda01ee641a708dea, 295, false}, // 10^108 - {0xa26da3999aef774a, 322, false}, // 10^116 - {0xf209787bb47d6b85, 348, false}, // 10^124 - {0xb454e4a179dd1877, 375, false}, // 10^132 - {0x865b86925b9bc5c2, 402, false}, // 10^140 - {0xc83553c5c8965d3d, 428, false}, // 10^148 - {0x952ab45cfa97a0b3, 455, false}, // 10^156 - {0xde469fbd99a05fe3, 481, false}, // 10^164 - {0xa59bc234db398c25, 508, false}, // 10^172 - {0xf6c69a72a3989f5c, 534, false}, // 10^180 - {0xb7dcbf5354e9bece, 561, false}, // 10^188 - {0x88fcf317f22241e2, 588, false}, // 10^196 - {0xcc20ce9bd35c78a5, 614, false}, // 10^204 - {0x98165af37b2153df, 641, false}, // 10^212 - {0xe2a0b5dc971f303a, 667, false}, // 10^220 - {0xa8d9d1535ce3b396, 694, false}, // 10^228 - {0xfb9b7cd9a4a7443c, 720, false}, // 10^236 - {0xbb764c4ca7a44410, 747, false}, // 10^244 - {0x8bab8eefb6409c1a, 774, false}, // 10^252 - {0xd01fef10a657842c, 800, false}, // 10^260 - {0x9b10a4e5e9913129, 827, false}, // 10^268 - {0xe7109bfba19c0c9d, 853, false}, // 10^276 - {0xac2820d9623bf429, 880, false}, // 10^284 - {0x80444b5e7aa7cf85, 907, false}, // 10^292 - {0xbf21e44003acdd2d, 933, false}, // 10^300 - {0x8e679c2f5e44ff8f, 960, false}, // 10^308 - {0xd433179d9c8cb841, 986, false}, // 10^316 - {0x9e19db92b4e31ba9, 1013, false}, // 10^324 - {0xeb96bf6ebadf77d9, 1039, false}, // 10^332 - {0xaf87023b9bf0ee6b, 1066, false}, // 10^340 -} - -// floatBits returns the bits of the float64 that best approximates -// the extFloat passed as receiver. Overflow is set to true if -// the resulting float64 is ±Inf. -func (f *extFloat) floatBits(flt *floatInfo) (bits uint64, overflow bool) { - f.Normalize() - - exp := f.exp + 63 - - // Exponent too small. - if exp < flt.bias+1 { - n := flt.bias + 1 - exp - f.mant >>= uint(n) - exp += n - } - - // Extract 1+flt.mantbits bits from the 64-bit mantissa. - mant := f.mant >> (63 - flt.mantbits) - if f.mant&(1<<(62-flt.mantbits)) != 0 { - // Round up. - mant += 1 - } - - // Rounding might have added a bit; shift down. - if mant == 2<>= 1 - exp++ - } - - // Infinities. - if exp-flt.bias >= 1<>uint(-f.exp))<>= uint(-f.exp) - f.exp = 0 - return *f, *f - } - expBiased := exp - flt.bias - - upper = extFloat{mant: 2*f.mant + 1, exp: f.exp - 1, neg: f.neg} - if mant != 1<>(64-32) == 0 { - mant <<= 32 - exp -= 32 - } - if mant>>(64-16) == 0 { - mant <<= 16 - exp -= 16 - } - if mant>>(64-8) == 0 { - mant <<= 8 - exp -= 8 - } - if mant>>(64-4) == 0 { - mant <<= 4 - exp -= 4 - } - if mant>>(64-2) == 0 { - mant <<= 2 - exp -= 2 - } - if mant>>(64-1) == 0 { - mant <<= 1 - exp -= 1 - } - shift = uint(f.exp - exp) - f.mant, f.exp = mant, exp - return -} - -// Multiply sets f to the product f*g: the result is correctly rounded, -// but not normalized. -func (f *extFloat) Multiply(g extFloat) { - fhi, flo := f.mant>>32, uint64(uint32(f.mant)) - ghi, glo := g.mant>>32, uint64(uint32(g.mant)) - - // Cross products. - cross1 := fhi * glo - cross2 := flo * ghi - - // f.mant*g.mant is fhi*ghi << 64 + (cross1+cross2) << 32 + flo*glo - f.mant = fhi*ghi + (cross1 >> 32) + (cross2 >> 32) - rem := uint64(uint32(cross1)) + uint64(uint32(cross2)) + ((flo * glo) >> 32) - // Round up. - rem += (1 << 31) - - f.mant += (rem >> 32) - f.exp = f.exp + g.exp + 64 -} - -var uint64pow10 = [...]uint64{ - 1, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, - 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, -} - -// AssignDecimal sets f to an approximate value mantissa*10^exp. It -// returns true if the value represented by f is guaranteed to be the -// best approximation of d after being rounded to a float64 or -// float32 depending on flt. -func (f *extFloat) AssignDecimal(mantissa uint64, exp10 int, neg bool, trunc bool, flt *floatInfo) (ok bool) { - const uint64digits = 19 - const errorscale = 8 - errors := 0 // An upper bound for error, computed in errorscale*ulp. - if trunc { - // the decimal number was truncated. - errors += errorscale / 2 - } - - f.mant = mantissa - f.exp = 0 - f.neg = neg - - // Multiply by powers of ten. - i := (exp10 - firstPowerOfTen) / stepPowerOfTen - if exp10 < firstPowerOfTen || i >= len(powersOfTen) { - return false - } - adjExp := (exp10 - firstPowerOfTen) % stepPowerOfTen - - // We multiply by exp%step - if adjExp < uint64digits && mantissa < uint64pow10[uint64digits-adjExp] { - // We can multiply the mantissa exactly. - f.mant *= uint64pow10[adjExp] - f.Normalize() - } else { - f.Normalize() - f.Multiply(smallPowersOfTen[adjExp]) - errors += errorscale / 2 - } - - // We multiply by 10 to the exp - exp%step. - f.Multiply(powersOfTen[i]) - if errors > 0 { - errors += 1 - } - errors += errorscale / 2 - - // Normalize - shift := f.Normalize() - errors <<= shift - - // Now f is a good approximation of the decimal. - // Check whether the error is too large: that is, if the mantissa - // is perturbated by the error, the resulting float64 will change. - // The 64 bits mantissa is 1 + 52 bits for float64 + 11 extra bits. - // - // In many cases the approximation will be good enough. - denormalExp := flt.bias - 63 - var extrabits uint - if f.exp <= denormalExp { - // f.mant * 2^f.exp is smaller than 2^(flt.bias+1). - extrabits = uint(63 - flt.mantbits + 1 + uint(denormalExp-f.exp)) - } else { - extrabits = uint(63 - flt.mantbits) - } - - halfway := uint64(1) << (extrabits - 1) - mant_extra := f.mant & (1< expMax: - i-- - default: - break Loop - } - } - // Apply the desired decimal shift on f. It will have exponent - // in the desired range. This is multiplication by 10^-exp10. - f.Multiply(powersOfTen[i]) - - return -(firstPowerOfTen + i*stepPowerOfTen), i -} - -// frexp10Many applies a common shift by a power of ten to a, b, c. -func frexp10Many(a, b, c *extFloat) (exp10 int) { - exp10, i := c.frexp10() - a.Multiply(powersOfTen[i]) - b.Multiply(powersOfTen[i]) - return -} - -// FixedDecimal stores in d the first n significant digits -// of the decimal representation of f. It returns false -// if it cannot be sure of the answer. -func (f *extFloat) FixedDecimal(d *decimalSlice, n int) bool { - if f.mant == 0 { - d.nd = 0 - d.dp = 0 - d.neg = f.neg - return true - } - if n == 0 { - panic("strconv: internal error: extFloat.FixedDecimal called with n == 0") - } - // Multiply by an appropriate power of ten to have a reasonable - // number to process. - f.Normalize() - exp10, _ := f.frexp10() - - shift := uint(-f.exp) - integer := uint32(f.mant >> shift) - fraction := f.mant - (uint64(integer) << shift) - ε := uint64(1) // ε is the uncertainty we have on the mantissa of f. - - // Write exactly n digits to d. - needed := n // how many digits are left to write. - integerDigits := 0 // the number of decimal digits of integer. - pow10 := uint64(1) // the power of ten by which f was scaled. - for i, pow := 0, uint64(1); i < 20; i++ { - if pow > uint64(integer) { - integerDigits = i - break - } - pow *= 10 - } - rest := integer - if integerDigits > needed { - // the integral part is already large, trim the last digits. - pow10 = uint64pow10[integerDigits-needed] - integer /= uint32(pow10) - rest -= integer * uint32(pow10) - } else { - rest = 0 - } - - // Write the digits of integer: the digits of rest are omitted. - var buf [32]byte - pos := len(buf) - for v := integer; v > 0; { - v1 := v / 10 - v -= 10 * v1 - pos-- - buf[pos] = byte(v + '0') - v = v1 - } - for i := pos; i < len(buf); i++ { - d.d[i-pos] = buf[i] - } - nd := len(buf) - pos - d.nd = nd - d.dp = integerDigits + exp10 - needed -= nd - - if needed > 0 { - if rest != 0 || pow10 != 1 { - panic("strconv: internal error, rest != 0 but needed > 0") - } - // Emit digits for the fractional part. Each time, 10*fraction - // fits in a uint64 without overflow. - for needed > 0 { - fraction *= 10 - ε *= 10 // the uncertainty scales as we multiply by ten. - if 2*ε > 1<> shift - d.d[nd] = byte(digit + '0') - fraction -= digit << shift - nd++ - needed-- - } - d.nd = nd - } - - // We have written a truncation of f (a numerator / 10^d.dp). The remaining part - // can be interpreted as a small number (< 1) to be added to the last digit of the - // numerator. - // - // If rest > 0, the amount is: - // (rest< 0 guarantees that pow10 << shift does not overflow a uint64. - // - // If rest = 0, pow10 == 1 and the amount is - // fraction / (1 << shift) - // fraction being known with a ±ε uncertainty. - // - // We pass this information to the rounding routine for adjustment. - - ok := adjustLastDigitFixed(d, uint64(rest)<= 0; i-- { - if d.d[i] != '0' { - d.nd = i + 1 - break - } - } - return true -} - -// adjustLastDigitFixed assumes d contains the representation of the integral part -// of some number, whose fractional part is num / (den << shift). The numerator -// num is only known up to an uncertainty of size ε, assumed to be less than -// (den << shift)/2. -// -// It will increase the last digit by one to account for correct rounding, typically -// when the fractional part is greater than 1/2, and will return false if ε is such -// that no correct answer can be given. -func adjustLastDigitFixed(d *decimalSlice, num, den uint64, shift uint, ε uint64) bool { - if num > den< den< den< (den< den<= 0; i-- { - if d.d[i] == '9' { - d.nd-- - } else { - break - } - } - if i < 0 { - d.d[0] = '1' - d.nd = 1 - d.dp++ - } else { - d.d[i]++ - } - return true - } - return false -} - -// ShortestDecimal stores in d the shortest decimal representation of f -// which belongs to the open interval (lower, upper), where f is supposed -// to lie. It returns false whenever the result is unsure. The implementation -// uses the Grisu3 algorithm. -func (f *extFloat) ShortestDecimal(d *decimalSlice, lower, upper *extFloat) bool { - if f.mant == 0 { - d.nd = 0 - d.dp = 0 - d.neg = f.neg - return true - } - if f.exp == 0 && *lower == *f && *lower == *upper { - // an exact integer. - var buf [24]byte - n := len(buf) - 1 - for v := f.mant; v > 0; { - v1 := v / 10 - v -= 10 * v1 - buf[n] = byte(v + '0') - n-- - v = v1 - } - nd := len(buf) - n - 1 - for i := 0; i < nd; i++ { - d.d[i] = buf[n+1+i] - } - d.nd, d.dp = nd, nd - for d.nd > 0 && d.d[d.nd-1] == '0' { - d.nd-- - } - if d.nd == 0 { - d.dp = 0 - } - d.neg = f.neg - return true - } - upper.Normalize() - // Uniformize exponents. - if f.exp > upper.exp { - f.mant <<= uint(f.exp - upper.exp) - f.exp = upper.exp - } - if lower.exp > upper.exp { - lower.mant <<= uint(lower.exp - upper.exp) - lower.exp = upper.exp - } - - exp10 := frexp10Many(lower, f, upper) - // Take a safety margin due to rounding in frexp10Many, but we lose precision. - upper.mant++ - lower.mant-- - - // The shortest representation of f is either rounded up or down, but - // in any case, it is a truncation of upper. - shift := uint(-upper.exp) - integer := uint32(upper.mant >> shift) - fraction := upper.mant - (uint64(integer) << shift) - - // How far we can go down from upper until the result is wrong. - allowance := upper.mant - lower.mant - // How far we should go to get a very precise result. - targetDiff := upper.mant - f.mant - - // Count integral digits: there are at most 10. - var integerDigits int - for i, pow := 0, uint64(1); i < 20; i++ { - if pow > uint64(integer) { - integerDigits = i - break - } - pow *= 10 - } - for i := 0; i < integerDigits; i++ { - pow := uint64pow10[integerDigits-i-1] - digit := integer / uint32(pow) - d.d[i] = byte(digit + '0') - integer -= digit * uint32(pow) - // evaluate whether we should stop. - if currentDiff := uint64(integer)<> shift) - d.d[d.nd] = byte(digit + '0') - d.nd++ - fraction -= uint64(digit) << shift - if fraction < allowance*multiplier { - // We are in the admissible range. Note that if allowance is about to - // overflow, that is, allowance > 2^64/10, the condition is automatically - // true due to the limited range of fraction. - return adjustLastDigit(d, - fraction, targetDiff*multiplier, allowance*multiplier, - 1< maxDiff-ulpBinary { - // we went too far - return false - } - if d.nd == 1 && d.d[0] == '0' { - // the number has actually reached zero. - d.nd = 0 - d.dp = 0 - } - return true -} diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/fold.go b/vendor/github.com/pquerna/ffjson/fflib/v1/fold.go deleted file mode 100644 index 4d33e6f77d..0000000000 --- a/vendor/github.com/pquerna/ffjson/fflib/v1/fold.go +++ /dev/null @@ -1,121 +0,0 @@ -/** - * Copyright 2014 Paul Querna - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/* Portions of this file are on Go stdlib's encoding/json/fold.go */ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package v1 - -import ( - "unicode/utf8" -) - -const ( - caseMask = ^byte(0x20) // Mask to ignore case in ASCII. - kelvin = '\u212a' - smallLongEss = '\u017f' -) - -// equalFoldRight is a specialization of bytes.EqualFold when s is -// known to be all ASCII (including punctuation), but contains an 's', -// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. -// See comments on foldFunc. -func EqualFoldRight(s, t []byte) bool { - for _, sb := range s { - if len(t) == 0 { - return false - } - tb := t[0] - if tb < utf8.RuneSelf { - if sb != tb { - sbUpper := sb & caseMask - if 'A' <= sbUpper && sbUpper <= 'Z' { - if sbUpper != tb&caseMask { - return false - } - } else { - return false - } - } - t = t[1:] - continue - } - // sb is ASCII and t is not. t must be either kelvin - // sign or long s; sb must be s, S, k, or K. - tr, size := utf8.DecodeRune(t) - switch sb { - case 's', 'S': - if tr != smallLongEss { - return false - } - case 'k', 'K': - if tr != kelvin { - return false - } - default: - return false - } - t = t[size:] - - } - if len(t) > 0 { - return false - } - return true -} - -// asciiEqualFold is a specialization of bytes.EqualFold for use when -// s is all ASCII (but may contain non-letters) and contains no -// special-folding letters. -// See comments on foldFunc. -func AsciiEqualFold(s, t []byte) bool { - if len(s) != len(t) { - return false - } - for i, sb := range s { - tb := t[i] - if sb == tb { - continue - } - if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { - if sb&caseMask != tb&caseMask { - return false - } - } else { - return false - } - } - return true -} - -// simpleLetterEqualFold is a specialization of bytes.EqualFold for -// use when s is all ASCII letters (no underscores, etc) and also -// doesn't contain 'k', 'K', 's', or 'S'. -// See comments on foldFunc. -func SimpleLetterEqualFold(s, t []byte) bool { - if len(s) != len(t) { - return false - } - for i, b := range s { - if b&caseMask != t[i]&caseMask { - return false - } - } - return true -} diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/ftoa.go b/vendor/github.com/pquerna/ffjson/fflib/v1/ftoa.go deleted file mode 100644 index 360d6dbcf9..0000000000 --- a/vendor/github.com/pquerna/ffjson/fflib/v1/ftoa.go +++ /dev/null @@ -1,542 +0,0 @@ -package v1 - -/** - * Copyright 2015 Paul Querna, Klaus Post - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/* Most of this file are on Go stdlib's strconv/ftoa.go */ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -import "math" - -// TODO: move elsewhere? -type floatInfo struct { - mantbits uint - expbits uint - bias int -} - -var optimize = true // can change for testing - -var float32info = floatInfo{23, 8, -127} -var float64info = floatInfo{52, 11, -1023} - -// AppendFloat appends the string form of the floating-point number f, -// as generated by FormatFloat -func AppendFloat(dst EncodingBuffer, val float64, fmt byte, prec, bitSize int) { - var bits uint64 - var flt *floatInfo - switch bitSize { - case 32: - bits = uint64(math.Float32bits(float32(val))) - flt = &float32info - case 64: - bits = math.Float64bits(val) - flt = &float64info - default: - panic("strconv: illegal AppendFloat/FormatFloat bitSize") - } - - neg := bits>>(flt.expbits+flt.mantbits) != 0 - exp := int(bits>>flt.mantbits) & (1< digs.nd && digs.nd >= digs.dp { - eprec = digs.nd - } - // %e is used if the exponent from the conversion - // is less than -4 or greater than or equal to the precision. - // if precision was the shortest possible, use precision 6 for this decision. - if shortest { - eprec = 6 - } - exp := digs.dp - 1 - if exp < -4 || exp >= eprec { - if prec > digs.nd { - prec = digs.nd - } - fmtE(dst, neg, digs, prec-1, fmt+'e'-'g') - return - } - if prec > digs.dp { - prec = digs.nd - } - fmtF(dst, neg, digs, max(prec-digs.dp, 0)) - return - } - - // unknown format - dst.Write([]byte{'%', fmt}) - return -} - -// Round d (= mant * 2^exp) to the shortest number of digits -// that will let the original floating point value be precisely -// reconstructed. Size is original floating point size (64 or 32). -func roundShortest(d *decimal, mant uint64, exp int, flt *floatInfo) { - // If mantissa is zero, the number is zero; stop now. - if mant == 0 { - d.nd = 0 - return - } - - // Compute upper and lower such that any decimal number - // between upper and lower (possibly inclusive) - // will round to the original floating point number. - - // We may see at once that the number is already shortest. - // - // Suppose d is not denormal, so that 2^exp <= d < 10^dp. - // The closest shorter number is at least 10^(dp-nd) away. - // The lower/upper bounds computed below are at distance - // at most 2^(exp-mantbits). - // - // So the number is already shortest if 10^(dp-nd) > 2^(exp-mantbits), - // or equivalently log2(10)*(dp-nd) > exp-mantbits. - // It is true if 332/100*(dp-nd) >= exp-mantbits (log2(10) > 3.32). - minexp := flt.bias + 1 // minimum possible exponent - if exp > minexp && 332*(d.dp-d.nd) >= 100*(exp-int(flt.mantbits)) { - // The number is already shortest. - return - } - - // d = mant << (exp - mantbits) - // Next highest floating point number is mant+1 << exp-mantbits. - // Our upper bound is halfway between, mant*2+1 << exp-mantbits-1. - upper := new(decimal) - upper.Assign(mant*2 + 1) - upper.Shift(exp - int(flt.mantbits) - 1) - - // d = mant << (exp - mantbits) - // Next lowest floating point number is mant-1 << exp-mantbits, - // unless mant-1 drops the significant bit and exp is not the minimum exp, - // in which case the next lowest is mant*2-1 << exp-mantbits-1. - // Either way, call it mantlo << explo-mantbits. - // Our lower bound is halfway between, mantlo*2+1 << explo-mantbits-1. - var mantlo uint64 - var explo int - if mant > 1< 0 { - dst.WriteByte('.') - i := 1 - m := min(d.nd, prec+1) - if i < m { - dst.Write(d.d[i:m]) - i = m - } - for i <= prec { - dst.WriteByte('0') - i++ - } - } - - // e± - dst.WriteByte(fmt) - exp := d.dp - 1 - if d.nd == 0 { // special case: 0 has exponent 0 - exp = 0 - } - if exp < 0 { - ch = '-' - exp = -exp - } else { - ch = '+' - } - dst.WriteByte(ch) - - // dd or ddd - switch { - case exp < 10: - dst.WriteByte('0') - dst.WriteByte(byte(exp) + '0') - case exp < 100: - dst.WriteByte(byte(exp/10) + '0') - dst.WriteByte(byte(exp%10) + '0') - default: - dst.WriteByte(byte(exp/100) + '0') - dst.WriteByte(byte(exp/10)%10 + '0') - dst.WriteByte(byte(exp%10) + '0') - } - - return -} - -// %f: -ddddddd.ddddd -func fmtF(dst EncodingBuffer, neg bool, d decimalSlice, prec int) { - // sign - if neg { - dst.WriteByte('-') - } - - // integer, padded with zeros as needed. - if d.dp > 0 { - m := min(d.nd, d.dp) - dst.Write(d.d[:m]) - for ; m < d.dp; m++ { - dst.WriteByte('0') - } - } else { - dst.WriteByte('0') - } - - // fraction - if prec > 0 { - dst.WriteByte('.') - for i := 0; i < prec; i++ { - ch := byte('0') - if j := d.dp + i; 0 <= j && j < d.nd { - ch = d.d[j] - } - dst.WriteByte(ch) - } - } - - return -} - -// %b: -ddddddddp±ddd -func fmtB(dst EncodingBuffer, neg bool, mant uint64, exp int, flt *floatInfo) { - // sign - if neg { - dst.WriteByte('-') - } - - // mantissa - formatBits(dst, mant, 10, false) - - // p - dst.WriteByte('p') - - // ±exponent - exp -= int(flt.mantbits) - if exp >= 0 { - dst.WriteByte('+') - } - formatBits(dst, uint64(exp), 10, exp < 0) - - return -} - -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} - -// formatBits computes the string representation of u in the given base. -// If neg is set, u is treated as negative int64 value. -func formatBits(dst EncodingBuffer, u uint64, base int, neg bool) { - if base < 2 || base > len(digits) { - panic("strconv: illegal AppendInt/FormatInt base") - } - // 2 <= base && base <= len(digits) - - var a [64 + 1]byte // +1 for sign of 64bit value in base 2 - i := len(a) - - if neg { - u = -u - } - - // convert bits - if base == 10 { - // common case: use constants for / because - // the compiler can optimize it into a multiply+shift - - if ^uintptr(0)>>32 == 0 { - for u > uint64(^uintptr(0)) { - q := u / 1e9 - us := uintptr(u - q*1e9) // us % 1e9 fits into a uintptr - for j := 9; j > 0; j-- { - i-- - qs := us / 10 - a[i] = byte(us - qs*10 + '0') - us = qs - } - u = q - } - } - - // u guaranteed to fit into a uintptr - us := uintptr(u) - for us >= 10 { - i-- - q := us / 10 - a[i] = byte(us - q*10 + '0') - us = q - } - // u < 10 - i-- - a[i] = byte(us + '0') - - } else if s := shifts[base]; s > 0 { - // base is power of 2: use shifts and masks instead of / and % - b := uint64(base) - m := uintptr(b) - 1 // == 1<= b { - i-- - a[i] = digits[uintptr(u)&m] - u >>= s - } - // u < base - i-- - a[i] = digits[uintptr(u)] - - } else { - // general case - b := uint64(base) - for u >= b { - i-- - q := u / b - a[i] = digits[uintptr(u-q*b)] - u = q - } - // u < base - i-- - a[i] = digits[uintptr(u)] - } - - // add sign, if any - if neg { - i-- - a[i] = '-' - } - - dst.Write(a[i:]) -} diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/internal/atof.go b/vendor/github.com/pquerna/ffjson/fflib/v1/internal/atof.go deleted file mode 100644 index 46c1289ec4..0000000000 --- a/vendor/github.com/pquerna/ffjson/fflib/v1/internal/atof.go +++ /dev/null @@ -1,936 +0,0 @@ -/** - * Copyright 2014 Paul Querna - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/* Portions of this file are on Go stdlib's strconv/atof.go */ - -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package internal - -// decimal to binary floating point conversion. -// Algorithm: -// 1) Store input in multiprecision decimal. -// 2) Multiply/divide decimal by powers of two until in range [0.5, 1) -// 3) Multiply by 2^precision and round to get mantissa. - -import "math" - -var optimize = true // can change for testing - -func equalIgnoreCase(s1 []byte, s2 []byte) bool { - if len(s1) != len(s2) { - return false - } - for i := 0; i < len(s1); i++ { - c1 := s1[i] - if 'A' <= c1 && c1 <= 'Z' { - c1 += 'a' - 'A' - } - c2 := s2[i] - if 'A' <= c2 && c2 <= 'Z' { - c2 += 'a' - 'A' - } - if c1 != c2 { - return false - } - } - return true -} - -func special(s []byte) (f float64, ok bool) { - if len(s) == 0 { - return - } - switch s[0] { - default: - return - case '+': - if equalIgnoreCase(s, []byte("+inf")) || equalIgnoreCase(s, []byte("+infinity")) { - return math.Inf(1), true - } - case '-': - if equalIgnoreCase(s, []byte("-inf")) || equalIgnoreCase(s, []byte("-infinity")) { - return math.Inf(-1), true - } - case 'n', 'N': - if equalIgnoreCase(s, []byte("nan")) { - return math.NaN(), true - } - case 'i', 'I': - if equalIgnoreCase(s, []byte("inf")) || equalIgnoreCase(s, []byte("infinity")) { - return math.Inf(1), true - } - } - return -} - -func (b *decimal) set(s []byte) (ok bool) { - i := 0 - b.neg = false - b.trunc = false - - // optional sign - if i >= len(s) { - return - } - switch { - case s[i] == '+': - i++ - case s[i] == '-': - b.neg = true - i++ - } - - // digits - sawdot := false - sawdigits := false - for ; i < len(s); i++ { - switch { - case s[i] == '.': - if sawdot { - return - } - sawdot = true - b.dp = b.nd - continue - - case '0' <= s[i] && s[i] <= '9': - sawdigits = true - if s[i] == '0' && b.nd == 0 { // ignore leading zeros - b.dp-- - continue - } - if b.nd < len(b.d) { - b.d[b.nd] = s[i] - b.nd++ - } else if s[i] != '0' { - b.trunc = true - } - continue - } - break - } - if !sawdigits { - return - } - if !sawdot { - b.dp = b.nd - } - - // optional exponent moves decimal point. - // if we read a very large, very long number, - // just be sure to move the decimal point by - // a lot (say, 100000). it doesn't matter if it's - // not the exact number. - if i < len(s) && (s[i] == 'e' || s[i] == 'E') { - i++ - if i >= len(s) { - return - } - esign := 1 - if s[i] == '+' { - i++ - } else if s[i] == '-' { - i++ - esign = -1 - } - if i >= len(s) || s[i] < '0' || s[i] > '9' { - return - } - e := 0 - for ; i < len(s) && '0' <= s[i] && s[i] <= '9'; i++ { - if e < 10000 { - e = e*10 + int(s[i]) - '0' - } - } - b.dp += e * esign - } - - if i != len(s) { - return - } - - ok = true - return -} - -// readFloat reads a decimal mantissa and exponent from a float -// string representation. It sets ok to false if the number could -// not fit return types or is invalid. -func readFloat(s []byte) (mantissa uint64, exp int, neg, trunc, ok bool) { - const uint64digits = 19 - i := 0 - - // optional sign - if i >= len(s) { - return - } - switch { - case s[i] == '+': - i++ - case s[i] == '-': - neg = true - i++ - } - - // digits - sawdot := false - sawdigits := false - nd := 0 - ndMant := 0 - dp := 0 - for ; i < len(s); i++ { - switch c := s[i]; true { - case c == '.': - if sawdot { - return - } - sawdot = true - dp = nd - continue - - case '0' <= c && c <= '9': - sawdigits = true - if c == '0' && nd == 0 { // ignore leading zeros - dp-- - continue - } - nd++ - if ndMant < uint64digits { - mantissa *= 10 - mantissa += uint64(c - '0') - ndMant++ - } else if s[i] != '0' { - trunc = true - } - continue - } - break - } - if !sawdigits { - return - } - if !sawdot { - dp = nd - } - - // optional exponent moves decimal point. - // if we read a very large, very long number, - // just be sure to move the decimal point by - // a lot (say, 100000). it doesn't matter if it's - // not the exact number. - if i < len(s) && (s[i] == 'e' || s[i] == 'E') { - i++ - if i >= len(s) { - return - } - esign := 1 - if s[i] == '+' { - i++ - } else if s[i] == '-' { - i++ - esign = -1 - } - if i >= len(s) || s[i] < '0' || s[i] > '9' { - return - } - e := 0 - for ; i < len(s) && '0' <= s[i] && s[i] <= '9'; i++ { - if e < 10000 { - e = e*10 + int(s[i]) - '0' - } - } - dp += e * esign - } - - if i != len(s) { - return - } - - exp = dp - ndMant - ok = true - return - -} - -// decimal power of ten to binary power of two. -var powtab = []int{1, 3, 6, 9, 13, 16, 19, 23, 26} - -func (d *decimal) floatBits(flt *floatInfo) (b uint64, overflow bool) { - var exp int - var mant uint64 - - // Zero is always a special case. - if d.nd == 0 { - mant = 0 - exp = flt.bias - goto out - } - - // Obvious overflow/underflow. - // These bounds are for 64-bit floats. - // Will have to change if we want to support 80-bit floats in the future. - if d.dp > 310 { - goto overflow - } - if d.dp < -330 { - // zero - mant = 0 - exp = flt.bias - goto out - } - - // Scale by powers of two until in range [0.5, 1.0) - exp = 0 - for d.dp > 0 { - var n int - if d.dp >= len(powtab) { - n = 27 - } else { - n = powtab[d.dp] - } - d.Shift(-n) - exp += n - } - for d.dp < 0 || d.dp == 0 && d.d[0] < '5' { - var n int - if -d.dp >= len(powtab) { - n = 27 - } else { - n = powtab[-d.dp] - } - d.Shift(n) - exp -= n - } - - // Our range is [0.5,1) but floating point range is [1,2). - exp-- - - // Minimum representable exponent is flt.bias+1. - // If the exponent is smaller, move it up and - // adjust d accordingly. - if exp < flt.bias+1 { - n := flt.bias + 1 - exp - d.Shift(-n) - exp += n - } - - if exp-flt.bias >= 1<>= 1 - exp++ - if exp-flt.bias >= 1<>float64info.mantbits != 0 { - return - } - f = float64(mantissa) - if neg { - f = -f - } - switch { - case exp == 0: - // an integer. - return f, true - // Exact integers are <= 10^15. - // Exact powers of ten are <= 10^22. - case exp > 0 && exp <= 15+22: // int * 10^k - // If exponent is big but number of digits is not, - // can move a few zeros into the integer part. - if exp > 22 { - f *= float64pow10[exp-22] - exp = 22 - } - if f > 1e15 || f < -1e15 { - // the exponent was really too large. - return - } - return f * float64pow10[exp], true - case exp < 0 && exp >= -22: // int / 10^k - return f / float64pow10[-exp], true - } - return -} - -// If possible to compute mantissa*10^exp to 32-bit float f exactly, -// entirely in floating-point math, do so, avoiding the machinery above. -func atof32exact(mantissa uint64, exp int, neg bool) (f float32, ok bool) { - if mantissa>>float32info.mantbits != 0 { - return - } - f = float32(mantissa) - if neg { - f = -f - } - switch { - case exp == 0: - return f, true - // Exact integers are <= 10^7. - // Exact powers of ten are <= 10^10. - case exp > 0 && exp <= 7+10: // int * 10^k - // If exponent is big but number of digits is not, - // can move a few zeros into the integer part. - if exp > 10 { - f *= float32pow10[exp-10] - exp = 10 - } - if f > 1e7 || f < -1e7 { - // the exponent was really too large. - return - } - return f * float32pow10[exp], true - case exp < 0 && exp >= -10: // int / 10^k - return f / float32pow10[-exp], true - } - return -} - -const fnParseFloat = "ParseFloat" - -func atof32(s []byte) (f float32, err error) { - if val, ok := special(s); ok { - return float32(val), nil - } - - if optimize { - // Parse mantissa and exponent. - mantissa, exp, neg, trunc, ok := readFloat(s) - if ok { - // Try pure floating-point arithmetic conversion. - if !trunc { - if f, ok := atof32exact(mantissa, exp, neg); ok { - return f, nil - } - } - // Try another fast path. - ext := new(extFloat) - if ok := ext.AssignDecimal(mantissa, exp, neg, trunc, &float32info); ok { - b, ovf := ext.floatBits(&float32info) - f = math.Float32frombits(uint32(b)) - if ovf { - err = rangeError(fnParseFloat, string(s)) - } - return f, err - } - } - } - var d decimal - if !d.set(s) { - return 0, syntaxError(fnParseFloat, string(s)) - } - b, ovf := d.floatBits(&float32info) - f = math.Float32frombits(uint32(b)) - if ovf { - err = rangeError(fnParseFloat, string(s)) - } - return f, err -} - -func atof64(s []byte) (f float64, err error) { - if val, ok := special(s); ok { - return val, nil - } - - if optimize { - // Parse mantissa and exponent. - mantissa, exp, neg, trunc, ok := readFloat(s) - if ok { - // Try pure floating-point arithmetic conversion. - if !trunc { - if f, ok := atof64exact(mantissa, exp, neg); ok { - return f, nil - } - } - // Try another fast path. - ext := new(extFloat) - if ok := ext.AssignDecimal(mantissa, exp, neg, trunc, &float64info); ok { - b, ovf := ext.floatBits(&float64info) - f = math.Float64frombits(b) - if ovf { - err = rangeError(fnParseFloat, string(s)) - } - return f, err - } - } - } - var d decimal - if !d.set(s) { - return 0, syntaxError(fnParseFloat, string(s)) - } - b, ovf := d.floatBits(&float64info) - f = math.Float64frombits(b) - if ovf { - err = rangeError(fnParseFloat, string(s)) - } - return f, err -} - -// ParseFloat converts the string s to a floating-point number -// with the precision specified by bitSize: 32 for float32, or 64 for float64. -// When bitSize=32, the result still has type float64, but it will be -// convertible to float32 without changing its value. -// -// If s is well-formed and near a valid floating point number, -// ParseFloat returns the nearest floating point number rounded -// using IEEE754 unbiased rounding. -// -// The errors that ParseFloat returns have concrete type *NumError -// and include err.Num = s. -// -// If s is not syntactically well-formed, ParseFloat returns err.Err = ErrSyntax. -// -// If s is syntactically well-formed but is more than 1/2 ULP -// away from the largest floating point number of the given size, -// ParseFloat returns f = ±Inf, err.Err = ErrRange. -func ParseFloat(s []byte, bitSize int) (f float64, err error) { - if bitSize == 32 { - f1, err1 := atof32(s) - return float64(f1), err1 - } - f1, err1 := atof64(s) - return f1, err1 -} - -// oroginal: strconv/decimal.go, but not exported, and needed for PareFloat. - -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Multiprecision decimal numbers. -// For floating-point formatting only; not general purpose. -// Only operations are assign and (binary) left/right shift. -// Can do binary floating point in multiprecision decimal precisely -// because 2 divides 10; cannot do decimal floating point -// in multiprecision binary precisely. - -type decimal struct { - d [800]byte // digits - nd int // number of digits used - dp int // decimal point - neg bool - trunc bool // discarded nonzero digits beyond d[:nd] -} - -func (a *decimal) String() string { - n := 10 + a.nd - if a.dp > 0 { - n += a.dp - } - if a.dp < 0 { - n += -a.dp - } - - buf := make([]byte, n) - w := 0 - switch { - case a.nd == 0: - return "0" - - case a.dp <= 0: - // zeros fill space between decimal point and digits - buf[w] = '0' - w++ - buf[w] = '.' - w++ - w += digitZero(buf[w : w+-a.dp]) - w += copy(buf[w:], a.d[0:a.nd]) - - case a.dp < a.nd: - // decimal point in middle of digits - w += copy(buf[w:], a.d[0:a.dp]) - buf[w] = '.' - w++ - w += copy(buf[w:], a.d[a.dp:a.nd]) - - default: - // zeros fill space between digits and decimal point - w += copy(buf[w:], a.d[0:a.nd]) - w += digitZero(buf[w : w+a.dp-a.nd]) - } - return string(buf[0:w]) -} - -func digitZero(dst []byte) int { - for i := range dst { - dst[i] = '0' - } - return len(dst) -} - -// trim trailing zeros from number. -// (They are meaningless; the decimal point is tracked -// independent of the number of digits.) -func trim(a *decimal) { - for a.nd > 0 && a.d[a.nd-1] == '0' { - a.nd-- - } - if a.nd == 0 { - a.dp = 0 - } -} - -// Assign v to a. -func (a *decimal) Assign(v uint64) { - var buf [24]byte - - // Write reversed decimal in buf. - n := 0 - for v > 0 { - v1 := v / 10 - v -= 10 * v1 - buf[n] = byte(v + '0') - n++ - v = v1 - } - - // Reverse again to produce forward decimal in a.d. - a.nd = 0 - for n--; n >= 0; n-- { - a.d[a.nd] = buf[n] - a.nd++ - } - a.dp = a.nd - trim(a) -} - -// Maximum shift that we can do in one pass without overflow. -// Signed int has 31 bits, and we have to be able to accommodate 9<>k == 0; r++ { - if r >= a.nd { - if n == 0 { - // a == 0; shouldn't get here, but handle anyway. - a.nd = 0 - return - } - for n>>k == 0 { - n = n * 10 - r++ - } - break - } - c := int(a.d[r]) - n = n*10 + c - '0' - } - a.dp -= r - 1 - - // Pick up a digit, put down a digit. - for ; r < a.nd; r++ { - c := int(a.d[r]) - dig := n >> k - n -= dig << k - a.d[w] = byte(dig + '0') - w++ - n = n*10 + c - '0' - } - - // Put down extra digits. - for n > 0 { - dig := n >> k - n -= dig << k - if w < len(a.d) { - a.d[w] = byte(dig + '0') - w++ - } else if dig > 0 { - a.trunc = true - } - n = n * 10 - } - - a.nd = w - trim(a) -} - -// Cheat sheet for left shift: table indexed by shift count giving -// number of new digits that will be introduced by that shift. -// -// For example, leftcheats[4] = {2, "625"}. That means that -// if we are shifting by 4 (multiplying by 16), it will add 2 digits -// when the string prefix is "625" through "999", and one fewer digit -// if the string prefix is "000" through "624". -// -// Credit for this trick goes to Ken. - -type leftCheat struct { - delta int // number of new digits - cutoff string // minus one digit if original < a. -} - -var leftcheats = []leftCheat{ - // Leading digits of 1/2^i = 5^i. - // 5^23 is not an exact 64-bit floating point number, - // so have to use bc for the math. - /* - seq 27 | sed 's/^/5^/' | bc | - awk 'BEGIN{ print "\tleftCheat{ 0, \"\" }," } - { - log2 = log(2)/log(10) - printf("\tleftCheat{ %d, \"%s\" },\t// * %d\n", - int(log2*NR+1), $0, 2**NR) - }' - */ - {0, ""}, - {1, "5"}, // * 2 - {1, "25"}, // * 4 - {1, "125"}, // * 8 - {2, "625"}, // * 16 - {2, "3125"}, // * 32 - {2, "15625"}, // * 64 - {3, "78125"}, // * 128 - {3, "390625"}, // * 256 - {3, "1953125"}, // * 512 - {4, "9765625"}, // * 1024 - {4, "48828125"}, // * 2048 - {4, "244140625"}, // * 4096 - {4, "1220703125"}, // * 8192 - {5, "6103515625"}, // * 16384 - {5, "30517578125"}, // * 32768 - {5, "152587890625"}, // * 65536 - {6, "762939453125"}, // * 131072 - {6, "3814697265625"}, // * 262144 - {6, "19073486328125"}, // * 524288 - {7, "95367431640625"}, // * 1048576 - {7, "476837158203125"}, // * 2097152 - {7, "2384185791015625"}, // * 4194304 - {7, "11920928955078125"}, // * 8388608 - {8, "59604644775390625"}, // * 16777216 - {8, "298023223876953125"}, // * 33554432 - {8, "1490116119384765625"}, // * 67108864 - {9, "7450580596923828125"}, // * 134217728 -} - -// Is the leading prefix of b lexicographically less than s? -func prefixIsLessThan(b []byte, s string) bool { - for i := 0; i < len(s); i++ { - if i >= len(b) { - return true - } - if b[i] != s[i] { - return b[i] < s[i] - } - } - return false -} - -// Binary shift left (/ 2) by k bits. k <= maxShift to avoid overflow. -func leftShift(a *decimal, k uint) { - delta := leftcheats[k].delta - if prefixIsLessThan(a.d[0:a.nd], leftcheats[k].cutoff) { - delta-- - } - - r := a.nd // read index - w := a.nd + delta // write index - n := 0 - - // Pick up a digit, put down a digit. - for r--; r >= 0; r-- { - n += (int(a.d[r]) - '0') << k - quo := n / 10 - rem := n - 10*quo - w-- - if w < len(a.d) { - a.d[w] = byte(rem + '0') - } else if rem != 0 { - a.trunc = true - } - n = quo - } - - // Put down extra digits. - for n > 0 { - quo := n / 10 - rem := n - 10*quo - w-- - if w < len(a.d) { - a.d[w] = byte(rem + '0') - } else if rem != 0 { - a.trunc = true - } - n = quo - } - - a.nd += delta - if a.nd >= len(a.d) { - a.nd = len(a.d) - } - a.dp += delta - trim(a) -} - -// Binary shift left (k > 0) or right (k < 0). -func (a *decimal) Shift(k int) { - switch { - case a.nd == 0: - // nothing to do: a == 0 - case k > 0: - for k > maxShift { - leftShift(a, maxShift) - k -= maxShift - } - leftShift(a, uint(k)) - case k < 0: - for k < -maxShift { - rightShift(a, maxShift) - k += maxShift - } - rightShift(a, uint(-k)) - } -} - -// If we chop a at nd digits, should we round up? -func shouldRoundUp(a *decimal, nd int) bool { - if nd < 0 || nd >= a.nd { - return false - } - if a.d[nd] == '5' && nd+1 == a.nd { // exactly halfway - round to even - // if we truncated, a little higher than what's recorded - always round up - if a.trunc { - return true - } - return nd > 0 && (a.d[nd-1]-'0')%2 != 0 - } - // not halfway - digit tells all - return a.d[nd] >= '5' -} - -// Round a to nd digits (or fewer). -// If nd is zero, it means we're rounding -// just to the left of the digits, as in -// 0.09 -> 0.1. -func (a *decimal) Round(nd int) { - if nd < 0 || nd >= a.nd { - return - } - if shouldRoundUp(a, nd) { - a.RoundUp(nd) - } else { - a.RoundDown(nd) - } -} - -// Round a down to nd digits (or fewer). -func (a *decimal) RoundDown(nd int) { - if nd < 0 || nd >= a.nd { - return - } - a.nd = nd - trim(a) -} - -// Round a up to nd digits (or fewer). -func (a *decimal) RoundUp(nd int) { - if nd < 0 || nd >= a.nd { - return - } - - // round up - for i := nd - 1; i >= 0; i-- { - c := a.d[i] - if c < '9' { // can stop after this digit - a.d[i]++ - a.nd = i + 1 - return - } - } - - // Number is all 9s. - // Change to single 1 with adjusted decimal point. - a.d[0] = '1' - a.nd = 1 - a.dp++ -} - -// Extract integer part, rounded appropriately. -// No guarantees about overflow. -func (a *decimal) RoundedInteger() uint64 { - if a.dp > 20 { - return 0xFFFFFFFFFFFFFFFF - } - var i int - n := uint64(0) - for i = 0; i < a.dp && i < a.nd; i++ { - n = n*10 + uint64(a.d[i]-'0') - } - for ; i < a.dp; i++ { - n *= 10 - } - if shouldRoundUp(a, a.dp) { - n++ - } - return n -} diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/internal/atoi.go b/vendor/github.com/pquerna/ffjson/fflib/v1/internal/atoi.go deleted file mode 100644 index 06eb2ec29f..0000000000 --- a/vendor/github.com/pquerna/ffjson/fflib/v1/internal/atoi.go +++ /dev/null @@ -1,213 +0,0 @@ -/** - * Copyright 2014 Paul Querna - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/* Portions of this file are on Go stdlib's strconv/atoi.go */ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package internal - -import ( - "errors" - "strconv" -) - -// ErrRange indicates that a value is out of range for the target type. -var ErrRange = errors.New("value out of range") - -// ErrSyntax indicates that a value does not have the right syntax for the target type. -var ErrSyntax = errors.New("invalid syntax") - -// A NumError records a failed conversion. -type NumError struct { - Func string // the failing function (ParseBool, ParseInt, ParseUint, ParseFloat) - Num string // the input - Err error // the reason the conversion failed (ErrRange, ErrSyntax) -} - -func (e *NumError) Error() string { - return "strconv." + e.Func + ": " + "parsing " + strconv.Quote(e.Num) + ": " + e.Err.Error() -} - -func syntaxError(fn, str string) *NumError { - return &NumError{fn, str, ErrSyntax} -} - -func rangeError(fn, str string) *NumError { - return &NumError{fn, str, ErrRange} -} - -const intSize = 32 << uint(^uint(0)>>63) - -// IntSize is the size in bits of an int or uint value. -const IntSize = intSize - -// Return the first number n such that n*base >= 1<<64. -func cutoff64(base int) uint64 { - if base < 2 { - return 0 - } - return (1<<64-1)/uint64(base) + 1 -} - -// ParseUint is like ParseInt but for unsigned numbers, and oeprating on []byte -func ParseUint(s []byte, base int, bitSize int) (n uint64, err error) { - var cutoff, maxVal uint64 - - if bitSize == 0 { - bitSize = int(IntSize) - } - - s0 := s - switch { - case len(s) < 1: - err = ErrSyntax - goto Error - - case 2 <= base && base <= 36: - // valid base; nothing to do - - case base == 0: - // Look for octal, hex prefix. - switch { - case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'): - base = 16 - s = s[2:] - if len(s) < 1 { - err = ErrSyntax - goto Error - } - case s[0] == '0': - base = 8 - default: - base = 10 - } - - default: - err = errors.New("invalid base " + strconv.Itoa(base)) - goto Error - } - - n = 0 - cutoff = cutoff64(base) - maxVal = 1<= base { - n = 0 - err = ErrSyntax - goto Error - } - - if n >= cutoff { - // n*base overflows - n = 1<<64 - 1 - err = ErrRange - goto Error - } - n *= uint64(base) - - n1 := n + uint64(v) - if n1 < n || n1 > maxVal { - // n+v overflows - n = 1<<64 - 1 - err = ErrRange - goto Error - } - n = n1 - } - - return n, nil - -Error: - return n, &NumError{"ParseUint", string(s0), err} -} - -// ParseInt interprets a string s in the given base (2 to 36) and -// returns the corresponding value i. If base == 0, the base is -// implied by the string's prefix: base 16 for "0x", base 8 for -// "0", and base 10 otherwise. -// -// The bitSize argument specifies the integer type -// that the result must fit into. Bit sizes 0, 8, 16, 32, and 64 -// correspond to int, int8, int16, int32, and int64. -// -// The errors that ParseInt returns have concrete type *NumError -// and include err.Num = s. If s is empty or contains invalid -// digits, err.Err = ErrSyntax and the returned value is 0; -// if the value corresponding to s cannot be represented by a -// signed integer of the given size, err.Err = ErrRange and the -// returned value is the maximum magnitude integer of the -// appropriate bitSize and sign. -func ParseInt(s []byte, base int, bitSize int) (i int64, err error) { - const fnParseInt = "ParseInt" - - if bitSize == 0 { - bitSize = int(IntSize) - } - - // Empty string bad. - if len(s) == 0 { - return 0, syntaxError(fnParseInt, string(s)) - } - - // Pick off leading sign. - s0 := s - neg := false - if s[0] == '+' { - s = s[1:] - } else if s[0] == '-' { - neg = true - s = s[1:] - } - - // Convert unsigned and check range. - var un uint64 - un, err = ParseUint(s, base, bitSize) - if err != nil && err.(*NumError).Err != ErrRange { - err.(*NumError).Func = fnParseInt - err.(*NumError).Num = string(s0) - return 0, err - } - cutoff := uint64(1 << uint(bitSize-1)) - if !neg && un >= cutoff { - return int64(cutoff - 1), rangeError(fnParseInt, string(s0)) - } - if neg && un > cutoff { - return -int64(cutoff), rangeError(fnParseInt, string(s0)) - } - n := int64(un) - if neg { - n = -n - } - return n, nil -} diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/internal/extfloat.go b/vendor/github.com/pquerna/ffjson/fflib/v1/internal/extfloat.go deleted file mode 100644 index ab791085a4..0000000000 --- a/vendor/github.com/pquerna/ffjson/fflib/v1/internal/extfloat.go +++ /dev/null @@ -1,668 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package internal - -// An extFloat represents an extended floating-point number, with more -// precision than a float64. It does not try to save bits: the -// number represented by the structure is mant*(2^exp), with a negative -// sign if neg is true. -type extFloat struct { - mant uint64 - exp int - neg bool -} - -// Powers of ten taken from double-conversion library. -// http://code.google.com/p/double-conversion/ -const ( - firstPowerOfTen = -348 - stepPowerOfTen = 8 -) - -var smallPowersOfTen = [...]extFloat{ - {1 << 63, -63, false}, // 1 - {0xa << 60, -60, false}, // 1e1 - {0x64 << 57, -57, false}, // 1e2 - {0x3e8 << 54, -54, false}, // 1e3 - {0x2710 << 50, -50, false}, // 1e4 - {0x186a0 << 47, -47, false}, // 1e5 - {0xf4240 << 44, -44, false}, // 1e6 - {0x989680 << 40, -40, false}, // 1e7 -} - -var powersOfTen = [...]extFloat{ - {0xfa8fd5a0081c0288, -1220, false}, // 10^-348 - {0xbaaee17fa23ebf76, -1193, false}, // 10^-340 - {0x8b16fb203055ac76, -1166, false}, // 10^-332 - {0xcf42894a5dce35ea, -1140, false}, // 10^-324 - {0x9a6bb0aa55653b2d, -1113, false}, // 10^-316 - {0xe61acf033d1a45df, -1087, false}, // 10^-308 - {0xab70fe17c79ac6ca, -1060, false}, // 10^-300 - {0xff77b1fcbebcdc4f, -1034, false}, // 10^-292 - {0xbe5691ef416bd60c, -1007, false}, // 10^-284 - {0x8dd01fad907ffc3c, -980, false}, // 10^-276 - {0xd3515c2831559a83, -954, false}, // 10^-268 - {0x9d71ac8fada6c9b5, -927, false}, // 10^-260 - {0xea9c227723ee8bcb, -901, false}, // 10^-252 - {0xaecc49914078536d, -874, false}, // 10^-244 - {0x823c12795db6ce57, -847, false}, // 10^-236 - {0xc21094364dfb5637, -821, false}, // 10^-228 - {0x9096ea6f3848984f, -794, false}, // 10^-220 - {0xd77485cb25823ac7, -768, false}, // 10^-212 - {0xa086cfcd97bf97f4, -741, false}, // 10^-204 - {0xef340a98172aace5, -715, false}, // 10^-196 - {0xb23867fb2a35b28e, -688, false}, // 10^-188 - {0x84c8d4dfd2c63f3b, -661, false}, // 10^-180 - {0xc5dd44271ad3cdba, -635, false}, // 10^-172 - {0x936b9fcebb25c996, -608, false}, // 10^-164 - {0xdbac6c247d62a584, -582, false}, // 10^-156 - {0xa3ab66580d5fdaf6, -555, false}, // 10^-148 - {0xf3e2f893dec3f126, -529, false}, // 10^-140 - {0xb5b5ada8aaff80b8, -502, false}, // 10^-132 - {0x87625f056c7c4a8b, -475, false}, // 10^-124 - {0xc9bcff6034c13053, -449, false}, // 10^-116 - {0x964e858c91ba2655, -422, false}, // 10^-108 - {0xdff9772470297ebd, -396, false}, // 10^-100 - {0xa6dfbd9fb8e5b88f, -369, false}, // 10^-92 - {0xf8a95fcf88747d94, -343, false}, // 10^-84 - {0xb94470938fa89bcf, -316, false}, // 10^-76 - {0x8a08f0f8bf0f156b, -289, false}, // 10^-68 - {0xcdb02555653131b6, -263, false}, // 10^-60 - {0x993fe2c6d07b7fac, -236, false}, // 10^-52 - {0xe45c10c42a2b3b06, -210, false}, // 10^-44 - {0xaa242499697392d3, -183, false}, // 10^-36 - {0xfd87b5f28300ca0e, -157, false}, // 10^-28 - {0xbce5086492111aeb, -130, false}, // 10^-20 - {0x8cbccc096f5088cc, -103, false}, // 10^-12 - {0xd1b71758e219652c, -77, false}, // 10^-4 - {0x9c40000000000000, -50, false}, // 10^4 - {0xe8d4a51000000000, -24, false}, // 10^12 - {0xad78ebc5ac620000, 3, false}, // 10^20 - {0x813f3978f8940984, 30, false}, // 10^28 - {0xc097ce7bc90715b3, 56, false}, // 10^36 - {0x8f7e32ce7bea5c70, 83, false}, // 10^44 - {0xd5d238a4abe98068, 109, false}, // 10^52 - {0x9f4f2726179a2245, 136, false}, // 10^60 - {0xed63a231d4c4fb27, 162, false}, // 10^68 - {0xb0de65388cc8ada8, 189, false}, // 10^76 - {0x83c7088e1aab65db, 216, false}, // 10^84 - {0xc45d1df942711d9a, 242, false}, // 10^92 - {0x924d692ca61be758, 269, false}, // 10^100 - {0xda01ee641a708dea, 295, false}, // 10^108 - {0xa26da3999aef774a, 322, false}, // 10^116 - {0xf209787bb47d6b85, 348, false}, // 10^124 - {0xb454e4a179dd1877, 375, false}, // 10^132 - {0x865b86925b9bc5c2, 402, false}, // 10^140 - {0xc83553c5c8965d3d, 428, false}, // 10^148 - {0x952ab45cfa97a0b3, 455, false}, // 10^156 - {0xde469fbd99a05fe3, 481, false}, // 10^164 - {0xa59bc234db398c25, 508, false}, // 10^172 - {0xf6c69a72a3989f5c, 534, false}, // 10^180 - {0xb7dcbf5354e9bece, 561, false}, // 10^188 - {0x88fcf317f22241e2, 588, false}, // 10^196 - {0xcc20ce9bd35c78a5, 614, false}, // 10^204 - {0x98165af37b2153df, 641, false}, // 10^212 - {0xe2a0b5dc971f303a, 667, false}, // 10^220 - {0xa8d9d1535ce3b396, 694, false}, // 10^228 - {0xfb9b7cd9a4a7443c, 720, false}, // 10^236 - {0xbb764c4ca7a44410, 747, false}, // 10^244 - {0x8bab8eefb6409c1a, 774, false}, // 10^252 - {0xd01fef10a657842c, 800, false}, // 10^260 - {0x9b10a4e5e9913129, 827, false}, // 10^268 - {0xe7109bfba19c0c9d, 853, false}, // 10^276 - {0xac2820d9623bf429, 880, false}, // 10^284 - {0x80444b5e7aa7cf85, 907, false}, // 10^292 - {0xbf21e44003acdd2d, 933, false}, // 10^300 - {0x8e679c2f5e44ff8f, 960, false}, // 10^308 - {0xd433179d9c8cb841, 986, false}, // 10^316 - {0x9e19db92b4e31ba9, 1013, false}, // 10^324 - {0xeb96bf6ebadf77d9, 1039, false}, // 10^332 - {0xaf87023b9bf0ee6b, 1066, false}, // 10^340 -} - -// floatBits returns the bits of the float64 that best approximates -// the extFloat passed as receiver. Overflow is set to true if -// the resulting float64 is ±Inf. -func (f *extFloat) floatBits(flt *floatInfo) (bits uint64, overflow bool) { - f.Normalize() - - exp := f.exp + 63 - - // Exponent too small. - if exp < flt.bias+1 { - n := flt.bias + 1 - exp - f.mant >>= uint(n) - exp += n - } - - // Extract 1+flt.mantbits bits from the 64-bit mantissa. - mant := f.mant >> (63 - flt.mantbits) - if f.mant&(1<<(62-flt.mantbits)) != 0 { - // Round up. - mant += 1 - } - - // Rounding might have added a bit; shift down. - if mant == 2<>= 1 - exp++ - } - - // Infinities. - if exp-flt.bias >= 1<>uint(-f.exp))<>= uint(-f.exp) - f.exp = 0 - return *f, *f - } - expBiased := exp - flt.bias - - upper = extFloat{mant: 2*f.mant + 1, exp: f.exp - 1, neg: f.neg} - if mant != 1<>(64-32) == 0 { - mant <<= 32 - exp -= 32 - } - if mant>>(64-16) == 0 { - mant <<= 16 - exp -= 16 - } - if mant>>(64-8) == 0 { - mant <<= 8 - exp -= 8 - } - if mant>>(64-4) == 0 { - mant <<= 4 - exp -= 4 - } - if mant>>(64-2) == 0 { - mant <<= 2 - exp -= 2 - } - if mant>>(64-1) == 0 { - mant <<= 1 - exp -= 1 - } - shift = uint(f.exp - exp) - f.mant, f.exp = mant, exp - return -} - -// Multiply sets f to the product f*g: the result is correctly rounded, -// but not normalized. -func (f *extFloat) Multiply(g extFloat) { - fhi, flo := f.mant>>32, uint64(uint32(f.mant)) - ghi, glo := g.mant>>32, uint64(uint32(g.mant)) - - // Cross products. - cross1 := fhi * glo - cross2 := flo * ghi - - // f.mant*g.mant is fhi*ghi << 64 + (cross1+cross2) << 32 + flo*glo - f.mant = fhi*ghi + (cross1 >> 32) + (cross2 >> 32) - rem := uint64(uint32(cross1)) + uint64(uint32(cross2)) + ((flo * glo) >> 32) - // Round up. - rem += (1 << 31) - - f.mant += (rem >> 32) - f.exp = f.exp + g.exp + 64 -} - -var uint64pow10 = [...]uint64{ - 1, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, - 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, -} - -// AssignDecimal sets f to an approximate value mantissa*10^exp. It -// returns true if the value represented by f is guaranteed to be the -// best approximation of d after being rounded to a float64 or -// float32 depending on flt. -func (f *extFloat) AssignDecimal(mantissa uint64, exp10 int, neg bool, trunc bool, flt *floatInfo) (ok bool) { - const uint64digits = 19 - const errorscale = 8 - errors := 0 // An upper bound for error, computed in errorscale*ulp. - if trunc { - // the decimal number was truncated. - errors += errorscale / 2 - } - - f.mant = mantissa - f.exp = 0 - f.neg = neg - - // Multiply by powers of ten. - i := (exp10 - firstPowerOfTen) / stepPowerOfTen - if exp10 < firstPowerOfTen || i >= len(powersOfTen) { - return false - } - adjExp := (exp10 - firstPowerOfTen) % stepPowerOfTen - - // We multiply by exp%step - if adjExp < uint64digits && mantissa < uint64pow10[uint64digits-adjExp] { - // We can multiply the mantissa exactly. - f.mant *= uint64pow10[adjExp] - f.Normalize() - } else { - f.Normalize() - f.Multiply(smallPowersOfTen[adjExp]) - errors += errorscale / 2 - } - - // We multiply by 10 to the exp - exp%step. - f.Multiply(powersOfTen[i]) - if errors > 0 { - errors += 1 - } - errors += errorscale / 2 - - // Normalize - shift := f.Normalize() - errors <<= shift - - // Now f is a good approximation of the decimal. - // Check whether the error is too large: that is, if the mantissa - // is perturbated by the error, the resulting float64 will change. - // The 64 bits mantissa is 1 + 52 bits for float64 + 11 extra bits. - // - // In many cases the approximation will be good enough. - denormalExp := flt.bias - 63 - var extrabits uint - if f.exp <= denormalExp { - // f.mant * 2^f.exp is smaller than 2^(flt.bias+1). - extrabits = uint(63 - flt.mantbits + 1 + uint(denormalExp-f.exp)) - } else { - extrabits = uint(63 - flt.mantbits) - } - - halfway := uint64(1) << (extrabits - 1) - mant_extra := f.mant & (1< expMax: - i-- - default: - break Loop - } - } - // Apply the desired decimal shift on f. It will have exponent - // in the desired range. This is multiplication by 10^-exp10. - f.Multiply(powersOfTen[i]) - - return -(firstPowerOfTen + i*stepPowerOfTen), i -} - -// frexp10Many applies a common shift by a power of ten to a, b, c. -func frexp10Many(a, b, c *extFloat) (exp10 int) { - exp10, i := c.frexp10() - a.Multiply(powersOfTen[i]) - b.Multiply(powersOfTen[i]) - return -} - -// FixedDecimal stores in d the first n significant digits -// of the decimal representation of f. It returns false -// if it cannot be sure of the answer. -func (f *extFloat) FixedDecimal(d *decimalSlice, n int) bool { - if f.mant == 0 { - d.nd = 0 - d.dp = 0 - d.neg = f.neg - return true - } - if n == 0 { - panic("strconv: internal error: extFloat.FixedDecimal called with n == 0") - } - // Multiply by an appropriate power of ten to have a reasonable - // number to process. - f.Normalize() - exp10, _ := f.frexp10() - - shift := uint(-f.exp) - integer := uint32(f.mant >> shift) - fraction := f.mant - (uint64(integer) << shift) - ε := uint64(1) // ε is the uncertainty we have on the mantissa of f. - - // Write exactly n digits to d. - needed := n // how many digits are left to write. - integerDigits := 0 // the number of decimal digits of integer. - pow10 := uint64(1) // the power of ten by which f was scaled. - for i, pow := 0, uint64(1); i < 20; i++ { - if pow > uint64(integer) { - integerDigits = i - break - } - pow *= 10 - } - rest := integer - if integerDigits > needed { - // the integral part is already large, trim the last digits. - pow10 = uint64pow10[integerDigits-needed] - integer /= uint32(pow10) - rest -= integer * uint32(pow10) - } else { - rest = 0 - } - - // Write the digits of integer: the digits of rest are omitted. - var buf [32]byte - pos := len(buf) - for v := integer; v > 0; { - v1 := v / 10 - v -= 10 * v1 - pos-- - buf[pos] = byte(v + '0') - v = v1 - } - for i := pos; i < len(buf); i++ { - d.d[i-pos] = buf[i] - } - nd := len(buf) - pos - d.nd = nd - d.dp = integerDigits + exp10 - needed -= nd - - if needed > 0 { - if rest != 0 || pow10 != 1 { - panic("strconv: internal error, rest != 0 but needed > 0") - } - // Emit digits for the fractional part. Each time, 10*fraction - // fits in a uint64 without overflow. - for needed > 0 { - fraction *= 10 - ε *= 10 // the uncertainty scales as we multiply by ten. - if 2*ε > 1<> shift - d.d[nd] = byte(digit + '0') - fraction -= digit << shift - nd++ - needed-- - } - d.nd = nd - } - - // We have written a truncation of f (a numerator / 10^d.dp). The remaining part - // can be interpreted as a small number (< 1) to be added to the last digit of the - // numerator. - // - // If rest > 0, the amount is: - // (rest< 0 guarantees that pow10 << shift does not overflow a uint64. - // - // If rest = 0, pow10 == 1 and the amount is - // fraction / (1 << shift) - // fraction being known with a ±ε uncertainty. - // - // We pass this information to the rounding routine for adjustment. - - ok := adjustLastDigitFixed(d, uint64(rest)<= 0; i-- { - if d.d[i] != '0' { - d.nd = i + 1 - break - } - } - return true -} - -// adjustLastDigitFixed assumes d contains the representation of the integral part -// of some number, whose fractional part is num / (den << shift). The numerator -// num is only known up to an uncertainty of size ε, assumed to be less than -// (den << shift)/2. -// -// It will increase the last digit by one to account for correct rounding, typically -// when the fractional part is greater than 1/2, and will return false if ε is such -// that no correct answer can be given. -func adjustLastDigitFixed(d *decimalSlice, num, den uint64, shift uint, ε uint64) bool { - if num > den< den< den< (den< den<= 0; i-- { - if d.d[i] == '9' { - d.nd-- - } else { - break - } - } - if i < 0 { - d.d[0] = '1' - d.nd = 1 - d.dp++ - } else { - d.d[i]++ - } - return true - } - return false -} - -// ShortestDecimal stores in d the shortest decimal representation of f -// which belongs to the open interval (lower, upper), where f is supposed -// to lie. It returns false whenever the result is unsure. The implementation -// uses the Grisu3 algorithm. -func (f *extFloat) ShortestDecimal(d *decimalSlice, lower, upper *extFloat) bool { - if f.mant == 0 { - d.nd = 0 - d.dp = 0 - d.neg = f.neg - return true - } - if f.exp == 0 && *lower == *f && *lower == *upper { - // an exact integer. - var buf [24]byte - n := len(buf) - 1 - for v := f.mant; v > 0; { - v1 := v / 10 - v -= 10 * v1 - buf[n] = byte(v + '0') - n-- - v = v1 - } - nd := len(buf) - n - 1 - for i := 0; i < nd; i++ { - d.d[i] = buf[n+1+i] - } - d.nd, d.dp = nd, nd - for d.nd > 0 && d.d[d.nd-1] == '0' { - d.nd-- - } - if d.nd == 0 { - d.dp = 0 - } - d.neg = f.neg - return true - } - upper.Normalize() - // Uniformize exponents. - if f.exp > upper.exp { - f.mant <<= uint(f.exp - upper.exp) - f.exp = upper.exp - } - if lower.exp > upper.exp { - lower.mant <<= uint(lower.exp - upper.exp) - lower.exp = upper.exp - } - - exp10 := frexp10Many(lower, f, upper) - // Take a safety margin due to rounding in frexp10Many, but we lose precision. - upper.mant++ - lower.mant-- - - // The shortest representation of f is either rounded up or down, but - // in any case, it is a truncation of upper. - shift := uint(-upper.exp) - integer := uint32(upper.mant >> shift) - fraction := upper.mant - (uint64(integer) << shift) - - // How far we can go down from upper until the result is wrong. - allowance := upper.mant - lower.mant - // How far we should go to get a very precise result. - targetDiff := upper.mant - f.mant - - // Count integral digits: there are at most 10. - var integerDigits int - for i, pow := 0, uint64(1); i < 20; i++ { - if pow > uint64(integer) { - integerDigits = i - break - } - pow *= 10 - } - for i := 0; i < integerDigits; i++ { - pow := uint64pow10[integerDigits-i-1] - digit := integer / uint32(pow) - d.d[i] = byte(digit + '0') - integer -= digit * uint32(pow) - // evaluate whether we should stop. - if currentDiff := uint64(integer)<> shift) - d.d[d.nd] = byte(digit + '0') - d.nd++ - fraction -= uint64(digit) << shift - if fraction < allowance*multiplier { - // We are in the admissible range. Note that if allowance is about to - // overflow, that is, allowance > 2^64/10, the condition is automatically - // true due to the limited range of fraction. - return adjustLastDigit(d, - fraction, targetDiff*multiplier, allowance*multiplier, - 1< maxDiff-ulpBinary { - // we went too far - return false - } - if d.nd == 1 && d.d[0] == '0' { - // the number has actually reached zero. - d.nd = 0 - d.dp = 0 - } - return true -} diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/internal/ftoa.go b/vendor/github.com/pquerna/ffjson/fflib/v1/internal/ftoa.go deleted file mode 100644 index 253f83b45a..0000000000 --- a/vendor/github.com/pquerna/ffjson/fflib/v1/internal/ftoa.go +++ /dev/null @@ -1,475 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Binary to decimal floating point conversion. -// Algorithm: -// 1) store mantissa in multiprecision decimal -// 2) shift decimal by exponent -// 3) read digits out & format - -package internal - -import "math" - -// TODO: move elsewhere? -type floatInfo struct { - mantbits uint - expbits uint - bias int -} - -var float32info = floatInfo{23, 8, -127} -var float64info = floatInfo{52, 11, -1023} - -// FormatFloat converts the floating-point number f to a string, -// according to the format fmt and precision prec. It rounds the -// result assuming that the original was obtained from a floating-point -// value of bitSize bits (32 for float32, 64 for float64). -// -// The format fmt is one of -// 'b' (-ddddp±ddd, a binary exponent), -// 'e' (-d.dddde±dd, a decimal exponent), -// 'E' (-d.ddddE±dd, a decimal exponent), -// 'f' (-ddd.dddd, no exponent), -// 'g' ('e' for large exponents, 'f' otherwise), or -// 'G' ('E' for large exponents, 'f' otherwise). -// -// The precision prec controls the number of digits -// (excluding the exponent) printed by the 'e', 'E', 'f', 'g', and 'G' formats. -// For 'e', 'E', and 'f' it is the number of digits after the decimal point. -// For 'g' and 'G' it is the total number of digits. -// The special precision -1 uses the smallest number of digits -// necessary such that ParseFloat will return f exactly. -func formatFloat(f float64, fmt byte, prec, bitSize int) string { - return string(genericFtoa(make([]byte, 0, max(prec+4, 24)), f, fmt, prec, bitSize)) -} - -// AppendFloat appends the string form of the floating-point number f, -// as generated by FormatFloat, to dst and returns the extended buffer. -func appendFloat(dst []byte, f float64, fmt byte, prec int, bitSize int) []byte { - return genericFtoa(dst, f, fmt, prec, bitSize) -} - -func genericFtoa(dst []byte, val float64, fmt byte, prec, bitSize int) []byte { - var bits uint64 - var flt *floatInfo - switch bitSize { - case 32: - bits = uint64(math.Float32bits(float32(val))) - flt = &float32info - case 64: - bits = math.Float64bits(val) - flt = &float64info - default: - panic("strconv: illegal AppendFloat/FormatFloat bitSize") - } - - neg := bits>>(flt.expbits+flt.mantbits) != 0 - exp := int(bits>>flt.mantbits) & (1< digs.nd && digs.nd >= digs.dp { - eprec = digs.nd - } - // %e is used if the exponent from the conversion - // is less than -4 or greater than or equal to the precision. - // if precision was the shortest possible, use precision 6 for this decision. - if shortest { - eprec = 6 - } - exp := digs.dp - 1 - if exp < -4 || exp >= eprec { - if prec > digs.nd { - prec = digs.nd - } - return fmtE(dst, neg, digs, prec-1, fmt+'e'-'g') - } - if prec > digs.dp { - prec = digs.nd - } - return fmtF(dst, neg, digs, max(prec-digs.dp, 0)) - } - - // unknown format - return append(dst, '%', fmt) -} - -// Round d (= mant * 2^exp) to the shortest number of digits -// that will let the original floating point value be precisely -// reconstructed. Size is original floating point size (64 or 32). -func roundShortest(d *decimal, mant uint64, exp int, flt *floatInfo) { - // If mantissa is zero, the number is zero; stop now. - if mant == 0 { - d.nd = 0 - return - } - - // Compute upper and lower such that any decimal number - // between upper and lower (possibly inclusive) - // will round to the original floating point number. - - // We may see at once that the number is already shortest. - // - // Suppose d is not denormal, so that 2^exp <= d < 10^dp. - // The closest shorter number is at least 10^(dp-nd) away. - // The lower/upper bounds computed below are at distance - // at most 2^(exp-mantbits). - // - // So the number is already shortest if 10^(dp-nd) > 2^(exp-mantbits), - // or equivalently log2(10)*(dp-nd) > exp-mantbits. - // It is true if 332/100*(dp-nd) >= exp-mantbits (log2(10) > 3.32). - minexp := flt.bias + 1 // minimum possible exponent - if exp > minexp && 332*(d.dp-d.nd) >= 100*(exp-int(flt.mantbits)) { - // The number is already shortest. - return - } - - // d = mant << (exp - mantbits) - // Next highest floating point number is mant+1 << exp-mantbits. - // Our upper bound is halfway between, mant*2+1 << exp-mantbits-1. - upper := new(decimal) - upper.Assign(mant*2 + 1) - upper.Shift(exp - int(flt.mantbits) - 1) - - // d = mant << (exp - mantbits) - // Next lowest floating point number is mant-1 << exp-mantbits, - // unless mant-1 drops the significant bit and exp is not the minimum exp, - // in which case the next lowest is mant*2-1 << exp-mantbits-1. - // Either way, call it mantlo << explo-mantbits. - // Our lower bound is halfway between, mantlo*2+1 << explo-mantbits-1. - var mantlo uint64 - var explo int - if mant > 1< 0 { - dst = append(dst, '.') - i := 1 - m := d.nd + prec + 1 - max(d.nd, prec+1) - for i < m { - dst = append(dst, d.d[i]) - i++ - } - for i <= prec { - dst = append(dst, '0') - i++ - } - } - - // e± - dst = append(dst, fmt) - exp := d.dp - 1 - if d.nd == 0 { // special case: 0 has exponent 0 - exp = 0 - } - if exp < 0 { - ch = '-' - exp = -exp - } else { - ch = '+' - } - dst = append(dst, ch) - - // dddd - var buf [3]byte - i := len(buf) - for exp >= 10 { - i-- - buf[i] = byte(exp%10 + '0') - exp /= 10 - } - // exp < 10 - i-- - buf[i] = byte(exp + '0') - - switch i { - case 0: - dst = append(dst, buf[0], buf[1], buf[2]) - case 1: - dst = append(dst, buf[1], buf[2]) - case 2: - // leading zeroes - dst = append(dst, '0', buf[2]) - } - return dst -} - -// %f: -ddddddd.ddddd -func fmtF(dst []byte, neg bool, d decimalSlice, prec int) []byte { - // sign - if neg { - dst = append(dst, '-') - } - - // integer, padded with zeros as needed. - if d.dp > 0 { - var i int - for i = 0; i < d.dp && i < d.nd; i++ { - dst = append(dst, d.d[i]) - } - for ; i < d.dp; i++ { - dst = append(dst, '0') - } - } else { - dst = append(dst, '0') - } - - // fraction - if prec > 0 { - dst = append(dst, '.') - for i := 0; i < prec; i++ { - ch := byte('0') - if j := d.dp + i; 0 <= j && j < d.nd { - ch = d.d[j] - } - dst = append(dst, ch) - } - } - - return dst -} - -// %b: -ddddddddp+ddd -func fmtB(dst []byte, neg bool, mant uint64, exp int, flt *floatInfo) []byte { - var buf [50]byte - w := len(buf) - exp -= int(flt.mantbits) - esign := byte('+') - if exp < 0 { - esign = '-' - exp = -exp - } - n := 0 - for exp > 0 || n < 1 { - n++ - w-- - buf[w] = byte(exp%10 + '0') - exp /= 10 - } - w-- - buf[w] = esign - w-- - buf[w] = 'p' - n = 0 - for mant > 0 || n < 1 { - n++ - w-- - buf[w] = byte(mant%10 + '0') - mant /= 10 - } - if neg { - w-- - buf[w] = '-' - } - return append(dst, buf[w:]...) -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/iota.go b/vendor/github.com/pquerna/ffjson/fflib/v1/iota.go deleted file mode 100644 index 3e50f0c418..0000000000 --- a/vendor/github.com/pquerna/ffjson/fflib/v1/iota.go +++ /dev/null @@ -1,161 +0,0 @@ -/** - * Copyright 2014 Paul Querna - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/* Portions of this file are on Go stdlib's strconv/iota.go */ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package v1 - -import ( - "io" -) - -const ( - digits = "0123456789abcdefghijklmnopqrstuvwxyz" - digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" - digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999" -) - -var shifts = [len(digits) + 1]uint{ - 1 << 1: 1, - 1 << 2: 2, - 1 << 3: 3, - 1 << 4: 4, - 1 << 5: 5, -} - -var smallNumbers = [][]byte{ - []byte("0"), - []byte("1"), - []byte("2"), - []byte("3"), - []byte("4"), - []byte("5"), - []byte("6"), - []byte("7"), - []byte("8"), - []byte("9"), - []byte("10"), -} - -type FormatBitsWriter interface { - io.Writer - io.ByteWriter -} - -type FormatBitsScratch struct{} - -// -// DEPRECIATED: `scratch` is no longer used, FormatBits2 is available. -// -// FormatBits computes the string representation of u in the given base. -// If neg is set, u is treated as negative int64 value. If append_ is -// set, the string is appended to dst and the resulting byte slice is -// returned as the first result value; otherwise the string is returned -// as the second result value. -// -func FormatBits(scratch *FormatBitsScratch, dst FormatBitsWriter, u uint64, base int, neg bool) { - FormatBits2(dst, u, base, neg) -} - -// FormatBits2 computes the string representation of u in the given base. -// If neg is set, u is treated as negative int64 value. If append_ is -// set, the string is appended to dst and the resulting byte slice is -// returned as the first result value; otherwise the string is returned -// as the second result value. -// -func FormatBits2(dst FormatBitsWriter, u uint64, base int, neg bool) { - if base < 2 || base > len(digits) { - panic("strconv: illegal AppendInt/FormatInt base") - } - // fast path for small common numbers - if u <= 10 { - if neg { - dst.WriteByte('-') - } - dst.Write(smallNumbers[u]) - return - } - - // 2 <= base && base <= len(digits) - - var a = makeSlice(65) - // var a [64 + 1]byte // +1 for sign of 64bit value in base 2 - i := len(a) - - if neg { - u = -u - } - - // convert bits - if base == 10 { - // common case: use constants for / and % because - // the compiler can optimize it into a multiply+shift, - // and unroll loop - for u >= 100 { - i -= 2 - q := u / 100 - j := uintptr(u - q*100) - a[i+1] = digits01[j] - a[i+0] = digits10[j] - u = q - } - if u >= 10 { - i-- - q := u / 10 - a[i] = digits[uintptr(u-q*10)] - u = q - } - - } else if s := shifts[base]; s > 0 { - // base is power of 2: use shifts and masks instead of / and % - b := uint64(base) - m := uintptr(b) - 1 // == 1<= b { - i-- - a[i] = digits[uintptr(u)&m] - u >>= s - } - - } else { - // general case - b := uint64(base) - for u >= b { - i-- - a[i] = digits[uintptr(u%b)] - u /= b - } - } - - // u < base - i-- - a[i] = digits[uintptr(u)] - - // add sign, if any - if neg { - i-- - a[i] = '-' - } - - dst.Write(a[i:]) - - Pool(a) - - return -} diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/jsonstring.go b/vendor/github.com/pquerna/ffjson/fflib/v1/jsonstring.go deleted file mode 100644 index 513b45d570..0000000000 --- a/vendor/github.com/pquerna/ffjson/fflib/v1/jsonstring.go +++ /dev/null @@ -1,512 +0,0 @@ -/** - * Copyright 2014 Paul Querna - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/* Portions of this file are on Go stdlib's encoding/json/encode.go */ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package v1 - -import ( - "io" - "unicode/utf8" - "strconv" - "unicode/utf16" - "unicode" -) - -const hex = "0123456789abcdef" - -type JsonStringWriter interface { - io.Writer - io.ByteWriter - stringWriter -} - -func WriteJsonString(buf JsonStringWriter, s string) { - WriteJson(buf, []byte(s)) -} - -/** - * Function ported from encoding/json: func (e *encodeState) string(s string) (int, error) - */ -func WriteJson(buf JsonStringWriter, s []byte) { - buf.WriteByte('"') - start := 0 - for i := 0; i < len(s); { - if b := s[i]; b < utf8.RuneSelf { - /* - if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { - i++ - continue - } - */ - if lt[b] == true { - i++ - continue - } - - if start < i { - buf.Write(s[start:i]) - } - switch b { - case '\\', '"': - buf.WriteByte('\\') - buf.WriteByte(b) - case '\n': - buf.WriteByte('\\') - buf.WriteByte('n') - case '\r': - buf.WriteByte('\\') - buf.WriteByte('r') - default: - // This encodes bytes < 0x20 except for \n and \r, - // as well as < and >. The latter are escaped because they - // can lead to security holes when user-controlled strings - // are rendered into JSON and served to some browsers. - buf.WriteString(`\u00`) - buf.WriteByte(hex[b>>4]) - buf.WriteByte(hex[b&0xF]) - } - i++ - start = i - continue - } - c, size := utf8.DecodeRune(s[i:]) - if c == utf8.RuneError && size == 1 { - if start < i { - buf.Write(s[start:i]) - } - buf.WriteString(`\ufffd`) - i += size - start = i - continue - } - // U+2028 is LINE SEPARATOR. - // U+2029 is PARAGRAPH SEPARATOR. - // They are both technically valid characters in JSON strings, - // but don't work in JSONP, which has to be evaluated as JavaScript, - // and can lead to security holes there. It is valid JSON to - // escape them, so we do so unconditionally. - // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. - if c == '\u2028' || c == '\u2029' { - if start < i { - buf.Write(s[start:i]) - } - buf.WriteString(`\u202`) - buf.WriteByte(hex[c&0xF]) - i += size - start = i - continue - } - i += size - } - if start < len(s) { - buf.Write(s[start:]) - } - buf.WriteByte('"') -} - -// UnquoteBytes will decode []byte containing json string to go string -// ported from encoding/json/decode.go -func UnquoteBytes(s []byte) (t []byte, ok bool) { - if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { - return - } - s = s[1 : len(s)-1] - - // Check for unusual characters. If there are none, - // then no unquoting is needed, so return a slice of the - // original bytes. - r := 0 - for r < len(s) { - c := s[r] - if c == '\\' || c == '"' || c < ' ' { - break - } - if c < utf8.RuneSelf { - r++ - continue - } - rr, size := utf8.DecodeRune(s[r:]) - if rr == utf8.RuneError && size == 1 { - break - } - r += size - } - if r == len(s) { - return s, true - } - - b := make([]byte, len(s)+2*utf8.UTFMax) - w := copy(b, s[0:r]) - for r < len(s) { - // Out of room? Can only happen if s is full of - // malformed UTF-8 and we're replacing each - // byte with RuneError. - if w >= len(b)-2*utf8.UTFMax { - nb := make([]byte, (len(b)+utf8.UTFMax)*2) - copy(nb, b[0:w]) - b = nb - } - switch c := s[r]; { - case c == '\\': - r++ - if r >= len(s) { - return - } - switch s[r] { - default: - return - case '"', '\\', '/', '\'': - b[w] = s[r] - r++ - w++ - case 'b': - b[w] = '\b' - r++ - w++ - case 'f': - b[w] = '\f' - r++ - w++ - case 'n': - b[w] = '\n' - r++ - w++ - case 'r': - b[w] = '\r' - r++ - w++ - case 't': - b[w] = '\t' - r++ - w++ - case 'u': - r-- - rr := getu4(s[r:]) - if rr < 0 { - return - } - r += 6 - if utf16.IsSurrogate(rr) { - rr1 := getu4(s[r:]) - if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { - // A valid pair; consume. - r += 6 - w += utf8.EncodeRune(b[w:], dec) - break - } - // Invalid surrogate; fall back to replacement rune. - rr = unicode.ReplacementChar - } - w += utf8.EncodeRune(b[w:], rr) - } - - // Quote, control characters are invalid. - case c == '"', c < ' ': - return - - // ASCII - case c < utf8.RuneSelf: - b[w] = c - r++ - w++ - - // Coerce to well-formed UTF-8. - default: - rr, size := utf8.DecodeRune(s[r:]) - r += size - w += utf8.EncodeRune(b[w:], rr) - } - } - return b[0:w], true -} - -// getu4 decodes \uXXXX from the beginning of s, returning the hex value, -// or it returns -1. -func getu4(s []byte) rune { - if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { - return -1 - } - r, err := strconv.ParseUint(string(s[2:6]), 16, 64) - if err != nil { - return -1 - } - return rune(r) -} - -// TODO(pquerna): consider combining wibth the normal byte mask. -var lt [256]bool = [256]bool{ - false, /* 0 */ - false, /* 1 */ - false, /* 2 */ - false, /* 3 */ - false, /* 4 */ - false, /* 5 */ - false, /* 6 */ - false, /* 7 */ - false, /* 8 */ - false, /* 9 */ - false, /* 10 */ - false, /* 11 */ - false, /* 12 */ - false, /* 13 */ - false, /* 14 */ - false, /* 15 */ - false, /* 16 */ - false, /* 17 */ - false, /* 18 */ - false, /* 19 */ - false, /* 20 */ - false, /* 21 */ - false, /* 22 */ - false, /* 23 */ - false, /* 24 */ - false, /* 25 */ - false, /* 26 */ - false, /* 27 */ - false, /* 28 */ - false, /* 29 */ - false, /* 30 */ - false, /* 31 */ - true, /* 32 */ - true, /* 33 */ - false, /* 34 */ - true, /* 35 */ - true, /* 36 */ - true, /* 37 */ - false, /* 38 */ - true, /* 39 */ - true, /* 40 */ - true, /* 41 */ - true, /* 42 */ - true, /* 43 */ - true, /* 44 */ - true, /* 45 */ - true, /* 46 */ - true, /* 47 */ - true, /* 48 */ - true, /* 49 */ - true, /* 50 */ - true, /* 51 */ - true, /* 52 */ - true, /* 53 */ - true, /* 54 */ - true, /* 55 */ - true, /* 56 */ - true, /* 57 */ - true, /* 58 */ - true, /* 59 */ - false, /* 60 */ - true, /* 61 */ - false, /* 62 */ - true, /* 63 */ - true, /* 64 */ - true, /* 65 */ - true, /* 66 */ - true, /* 67 */ - true, /* 68 */ - true, /* 69 */ - true, /* 70 */ - true, /* 71 */ - true, /* 72 */ - true, /* 73 */ - true, /* 74 */ - true, /* 75 */ - true, /* 76 */ - true, /* 77 */ - true, /* 78 */ - true, /* 79 */ - true, /* 80 */ - true, /* 81 */ - true, /* 82 */ - true, /* 83 */ - true, /* 84 */ - true, /* 85 */ - true, /* 86 */ - true, /* 87 */ - true, /* 88 */ - true, /* 89 */ - true, /* 90 */ - true, /* 91 */ - false, /* 92 */ - true, /* 93 */ - true, /* 94 */ - true, /* 95 */ - true, /* 96 */ - true, /* 97 */ - true, /* 98 */ - true, /* 99 */ - true, /* 100 */ - true, /* 101 */ - true, /* 102 */ - true, /* 103 */ - true, /* 104 */ - true, /* 105 */ - true, /* 106 */ - true, /* 107 */ - true, /* 108 */ - true, /* 109 */ - true, /* 110 */ - true, /* 111 */ - true, /* 112 */ - true, /* 113 */ - true, /* 114 */ - true, /* 115 */ - true, /* 116 */ - true, /* 117 */ - true, /* 118 */ - true, /* 119 */ - true, /* 120 */ - true, /* 121 */ - true, /* 122 */ - true, /* 123 */ - true, /* 124 */ - true, /* 125 */ - true, /* 126 */ - true, /* 127 */ - true, /* 128 */ - true, /* 129 */ - true, /* 130 */ - true, /* 131 */ - true, /* 132 */ - true, /* 133 */ - true, /* 134 */ - true, /* 135 */ - true, /* 136 */ - true, /* 137 */ - true, /* 138 */ - true, /* 139 */ - true, /* 140 */ - true, /* 141 */ - true, /* 142 */ - true, /* 143 */ - true, /* 144 */ - true, /* 145 */ - true, /* 146 */ - true, /* 147 */ - true, /* 148 */ - true, /* 149 */ - true, /* 150 */ - true, /* 151 */ - true, /* 152 */ - true, /* 153 */ - true, /* 154 */ - true, /* 155 */ - true, /* 156 */ - true, /* 157 */ - true, /* 158 */ - true, /* 159 */ - true, /* 160 */ - true, /* 161 */ - true, /* 162 */ - true, /* 163 */ - true, /* 164 */ - true, /* 165 */ - true, /* 166 */ - true, /* 167 */ - true, /* 168 */ - true, /* 169 */ - true, /* 170 */ - true, /* 171 */ - true, /* 172 */ - true, /* 173 */ - true, /* 174 */ - true, /* 175 */ - true, /* 176 */ - true, /* 177 */ - true, /* 178 */ - true, /* 179 */ - true, /* 180 */ - true, /* 181 */ - true, /* 182 */ - true, /* 183 */ - true, /* 184 */ - true, /* 185 */ - true, /* 186 */ - true, /* 187 */ - true, /* 188 */ - true, /* 189 */ - true, /* 190 */ - true, /* 191 */ - true, /* 192 */ - true, /* 193 */ - true, /* 194 */ - true, /* 195 */ - true, /* 196 */ - true, /* 197 */ - true, /* 198 */ - true, /* 199 */ - true, /* 200 */ - true, /* 201 */ - true, /* 202 */ - true, /* 203 */ - true, /* 204 */ - true, /* 205 */ - true, /* 206 */ - true, /* 207 */ - true, /* 208 */ - true, /* 209 */ - true, /* 210 */ - true, /* 211 */ - true, /* 212 */ - true, /* 213 */ - true, /* 214 */ - true, /* 215 */ - true, /* 216 */ - true, /* 217 */ - true, /* 218 */ - true, /* 219 */ - true, /* 220 */ - true, /* 221 */ - true, /* 222 */ - true, /* 223 */ - true, /* 224 */ - true, /* 225 */ - true, /* 226 */ - true, /* 227 */ - true, /* 228 */ - true, /* 229 */ - true, /* 230 */ - true, /* 231 */ - true, /* 232 */ - true, /* 233 */ - true, /* 234 */ - true, /* 235 */ - true, /* 236 */ - true, /* 237 */ - true, /* 238 */ - true, /* 239 */ - true, /* 240 */ - true, /* 241 */ - true, /* 242 */ - true, /* 243 */ - true, /* 244 */ - true, /* 245 */ - true, /* 246 */ - true, /* 247 */ - true, /* 248 */ - true, /* 249 */ - true, /* 250 */ - true, /* 251 */ - true, /* 252 */ - true, /* 253 */ - true, /* 254 */ - true, /* 255 */ -} diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/lexer.go b/vendor/github.com/pquerna/ffjson/fflib/v1/lexer.go deleted file mode 100644 index 8ffd54be53..0000000000 --- a/vendor/github.com/pquerna/ffjson/fflib/v1/lexer.go +++ /dev/null @@ -1,937 +0,0 @@ -/** - * Copyright 2014 Paul Querna - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/* Portions of this file are on derived from yajl: */ -/* - * Copyright (c) 2007-2014, Lloyd Hilaiel - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package v1 - -import ( - "errors" - "fmt" - "io" -) - -type FFParseState int - -const ( - FFParse_map_start FFParseState = iota - FFParse_want_key - FFParse_want_colon - FFParse_want_value - FFParse_after_value -) - -type FFTok int - -const ( - FFTok_init FFTok = iota - FFTok_bool FFTok = iota - FFTok_colon FFTok = iota - FFTok_comma FFTok = iota - FFTok_eof FFTok = iota - FFTok_error FFTok = iota - FFTok_left_brace FFTok = iota - FFTok_left_bracket FFTok = iota - FFTok_null FFTok = iota - FFTok_right_brace FFTok = iota - FFTok_right_bracket FFTok = iota - - /* we differentiate between integers and doubles to allow the - * parser to interpret the number without re-scanning */ - FFTok_integer FFTok = iota - FFTok_double FFTok = iota - - FFTok_string FFTok = iota - - /* comment tokens are not currently returned to the parser, ever */ - FFTok_comment FFTok = iota -) - -type FFErr int - -const ( - FFErr_e_ok FFErr = iota - FFErr_io FFErr = iota - FFErr_string_invalid_utf8 FFErr = iota - FFErr_string_invalid_escaped_char FFErr = iota - FFErr_string_invalid_json_char FFErr = iota - FFErr_string_invalid_hex_char FFErr = iota - FFErr_invalid_char FFErr = iota - FFErr_invalid_string FFErr = iota - FFErr_missing_integer_after_decimal FFErr = iota - FFErr_missing_integer_after_exponent FFErr = iota - FFErr_missing_integer_after_minus FFErr = iota - FFErr_unallowed_comment FFErr = iota - FFErr_incomplete_comment FFErr = iota - FFErr_unexpected_token_type FFErr = iota // TODO: improve this error -) - -type FFLexer struct { - reader *ffReader - Output DecodingBuffer - Token FFTok - Error FFErr - BigError error - // TODO: convert all of this to an interface - lastCurrentChar int - captureAll bool - buf Buffer -} - -func NewFFLexer(input []byte) *FFLexer { - fl := &FFLexer{ - Token: FFTok_init, - Error: FFErr_e_ok, - reader: newffReader(input), - Output: &Buffer{}, - } - // TODO: guess size? - //fl.Output.Grow(64) - return fl -} - -type LexerError struct { - offset int - line int - char int - err error -} - -// Reset the Lexer and add new input. -func (ffl *FFLexer) Reset(input []byte) { - ffl.Token = FFTok_init - ffl.Error = FFErr_e_ok - ffl.BigError = nil - ffl.reader.Reset(input) - ffl.lastCurrentChar = 0 - ffl.Output.Reset() -} - -func (le *LexerError) Error() string { - return fmt.Sprintf(`ffjson error: (%T)%s offset=%d line=%d char=%d`, - le.err, le.err.Error(), - le.offset, le.line, le.char) -} - -func (ffl *FFLexer) WrapErr(err error) error { - line, char := ffl.reader.PosWithLine() - // TOOD: calcualte lines/characters based on offset - return &LexerError{ - offset: ffl.reader.Pos(), - line: line, - char: char, - err: err, - } -} - -func (ffl *FFLexer) scanReadByte() (byte, error) { - var c byte - var err error - if ffl.captureAll { - c, err = ffl.reader.ReadByte() - } else { - c, err = ffl.reader.ReadByteNoWS() - } - - if err != nil { - ffl.Error = FFErr_io - ffl.BigError = err - return 0, err - } - - return c, nil -} - -func (ffl *FFLexer) readByte() (byte, error) { - - c, err := ffl.reader.ReadByte() - if err != nil { - ffl.Error = FFErr_io - ffl.BigError = err - return 0, err - } - - return c, nil -} - -func (ffl *FFLexer) unreadByte() { - ffl.reader.UnreadByte() -} - -func (ffl *FFLexer) wantBytes(want []byte, iftrue FFTok) FFTok { - startPos := ffl.reader.Pos() - for _, b := range want { - c, err := ffl.readByte() - - if err != nil { - return FFTok_error - } - - if c != b { - ffl.unreadByte() - // fmt.Printf("wanted bytes: %s\n", string(want)) - // TODO(pquerna): thsi is a bad error message - ffl.Error = FFErr_invalid_string - return FFTok_error - } - } - - endPos := ffl.reader.Pos() - ffl.Output.Write(ffl.reader.Slice(startPos, endPos)) - return iftrue -} - -func (ffl *FFLexer) lexComment() FFTok { - c, err := ffl.readByte() - if err != nil { - return FFTok_error - } - - if c == '/' { - // a // comment, scan until line ends. - for { - c, err := ffl.readByte() - if err != nil { - return FFTok_error - } - - if c == '\n' { - return FFTok_comment - } - } - } else if c == '*' { - // a /* */ comment, scan */ - for { - c, err := ffl.readByte() - if err != nil { - return FFTok_error - } - - if c == '*' { - c, err := ffl.readByte() - - if err != nil { - return FFTok_error - } - - if c == '/' { - return FFTok_comment - } - - ffl.Error = FFErr_incomplete_comment - return FFTok_error - } - } - } else { - ffl.Error = FFErr_incomplete_comment - return FFTok_error - } -} - -func (ffl *FFLexer) lexString() FFTok { - if ffl.captureAll { - ffl.buf.Reset() - err := ffl.reader.SliceString(&ffl.buf) - - if err != nil { - ffl.BigError = err - return FFTok_error - } - - WriteJson(ffl.Output, ffl.buf.Bytes()) - - return FFTok_string - } else { - err := ffl.reader.SliceString(ffl.Output) - - if err != nil { - ffl.BigError = err - return FFTok_error - } - - return FFTok_string - } -} - -func (ffl *FFLexer) lexNumber() FFTok { - var numRead int = 0 - tok := FFTok_integer - startPos := ffl.reader.Pos() - - c, err := ffl.readByte() - if err != nil { - return FFTok_error - } - - /* optional leading minus */ - if c == '-' { - c, err = ffl.readByte() - if err != nil { - return FFTok_error - } - } - - /* a single zero, or a series of integers */ - if c == '0' { - c, err = ffl.readByte() - if err != nil { - return FFTok_error - } - } else if c >= '1' && c <= '9' { - for c >= '0' && c <= '9' { - c, err = ffl.readByte() - if err != nil { - return FFTok_error - } - } - } else { - ffl.unreadByte() - ffl.Error = FFErr_missing_integer_after_minus - return FFTok_error - } - - if c == '.' { - numRead = 0 - c, err = ffl.readByte() - if err != nil { - return FFTok_error - } - - for c >= '0' && c <= '9' { - numRead++ - c, err = ffl.readByte() - if err != nil { - return FFTok_error - } - } - - if numRead == 0 { - ffl.unreadByte() - - ffl.Error = FFErr_missing_integer_after_decimal - return FFTok_error - } - - tok = FFTok_double - } - - /* optional exponent (indicates this is floating point) */ - if c == 'e' || c == 'E' { - numRead = 0 - c, err = ffl.readByte() - if err != nil { - return FFTok_error - } - - /* optional sign */ - if c == '+' || c == '-' { - c, err = ffl.readByte() - if err != nil { - return FFTok_error - } - } - - for c >= '0' && c <= '9' { - numRead++ - c, err = ffl.readByte() - if err != nil { - return FFTok_error - } - } - - if numRead == 0 { - ffl.Error = FFErr_missing_integer_after_exponent - return FFTok_error - } - - tok = FFTok_double - } - - ffl.unreadByte() - - endPos := ffl.reader.Pos() - ffl.Output.Write(ffl.reader.Slice(startPos, endPos)) - return tok -} - -var true_bytes = []byte{'r', 'u', 'e'} -var false_bytes = []byte{'a', 'l', 's', 'e'} -var null_bytes = []byte{'u', 'l', 'l'} - -func (ffl *FFLexer) Scan() FFTok { - tok := FFTok_error - if ffl.captureAll == false { - ffl.Output.Reset() - } - ffl.Token = FFTok_init - - for { - c, err := ffl.scanReadByte() - if err != nil { - if err == io.EOF { - return FFTok_eof - } else { - return FFTok_error - } - } - - switch c { - case '{': - tok = FFTok_left_bracket - if ffl.captureAll { - ffl.Output.WriteByte('{') - } - goto lexed - case '}': - tok = FFTok_right_bracket - if ffl.captureAll { - ffl.Output.WriteByte('}') - } - goto lexed - case '[': - tok = FFTok_left_brace - if ffl.captureAll { - ffl.Output.WriteByte('[') - } - goto lexed - case ']': - tok = FFTok_right_brace - if ffl.captureAll { - ffl.Output.WriteByte(']') - } - goto lexed - case ',': - tok = FFTok_comma - if ffl.captureAll { - ffl.Output.WriteByte(',') - } - goto lexed - case ':': - tok = FFTok_colon - if ffl.captureAll { - ffl.Output.WriteByte(':') - } - goto lexed - case '\t', '\n', '\v', '\f', '\r', ' ': - if ffl.captureAll { - ffl.Output.WriteByte(c) - } - break - case 't': - ffl.Output.WriteByte('t') - tok = ffl.wantBytes(true_bytes, FFTok_bool) - goto lexed - case 'f': - ffl.Output.WriteByte('f') - tok = ffl.wantBytes(false_bytes, FFTok_bool) - goto lexed - case 'n': - ffl.Output.WriteByte('n') - tok = ffl.wantBytes(null_bytes, FFTok_null) - goto lexed - case '"': - tok = ffl.lexString() - goto lexed - case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - ffl.unreadByte() - tok = ffl.lexNumber() - goto lexed - case '/': - tok = ffl.lexComment() - goto lexed - default: - tok = FFTok_error - ffl.Error = FFErr_invalid_char - } - } - -lexed: - ffl.Token = tok - return tok -} - -func (ffl *FFLexer) scanField(start FFTok, capture bool) ([]byte, error) { - switch start { - case FFTok_left_brace, - FFTok_left_bracket: - { - end := FFTok_right_brace - if start == FFTok_left_bracket { - end = FFTok_right_bracket - if capture { - ffl.Output.WriteByte('{') - } - } else { - if capture { - ffl.Output.WriteByte('[') - } - } - - depth := 1 - if capture { - ffl.captureAll = true - } - // TODO: work. - scanloop: - for { - tok := ffl.Scan() - //fmt.Printf("capture-token: %v end: %v depth: %v\n", tok, end, depth) - switch tok { - case FFTok_eof: - return nil, errors.New("ffjson: unexpected EOF") - case FFTok_error: - if ffl.BigError != nil { - return nil, ffl.BigError - } - return nil, ffl.Error.ToError() - case end: - depth-- - if depth == 0 { - break scanloop - } - case start: - depth++ - } - } - - if capture { - ffl.captureAll = false - } - - if capture { - return ffl.Output.Bytes(), nil - } else { - return nil, nil - } - } - case FFTok_bool, - FFTok_integer, - FFTok_null, - FFTok_double: - // simple value, return it. - if capture { - return ffl.Output.Bytes(), nil - } else { - return nil, nil - } - - case FFTok_string: - //TODO(pquerna): so, other users expect this to be a quoted string :( - if capture { - ffl.buf.Reset() - WriteJson(&ffl.buf, ffl.Output.Bytes()) - return ffl.buf.Bytes(), nil - } else { - return nil, nil - } - } - - return nil, fmt.Errorf("ffjson: invalid capture type: %v", start) -} - -// Captures an entire field value, including recursive objects, -// and converts them to a []byte suitable to pass to a sub-object's -// UnmarshalJSON -func (ffl *FFLexer) CaptureField(start FFTok) ([]byte, error) { - return ffl.scanField(start, true) -} - -func (ffl *FFLexer) SkipField(start FFTok) error { - _, err := ffl.scanField(start, false) - return err -} - -// TODO(pquerna): return line number and offset. -func (err FFErr) ToError() error { - switch err { - case FFErr_e_ok: - return nil - case FFErr_io: - return errors.New("ffjson: IO error") - case FFErr_string_invalid_utf8: - return errors.New("ffjson: string with invalid UTF-8 sequence") - case FFErr_string_invalid_escaped_char: - return errors.New("ffjson: string with invalid escaped character") - case FFErr_string_invalid_json_char: - return errors.New("ffjson: string with invalid JSON character") - case FFErr_string_invalid_hex_char: - return errors.New("ffjson: string with invalid hex character") - case FFErr_invalid_char: - return errors.New("ffjson: invalid character") - case FFErr_invalid_string: - return errors.New("ffjson: invalid string") - case FFErr_missing_integer_after_decimal: - return errors.New("ffjson: missing integer after decimal") - case FFErr_missing_integer_after_exponent: - return errors.New("ffjson: missing integer after exponent") - case FFErr_missing_integer_after_minus: - return errors.New("ffjson: missing integer after minus") - case FFErr_unallowed_comment: - return errors.New("ffjson: unallowed comment") - case FFErr_incomplete_comment: - return errors.New("ffjson: incomplete comment") - case FFErr_unexpected_token_type: - return errors.New("ffjson: unexpected token sequence") - } - - panic(fmt.Sprintf("unknown error type: %v ", err)) -} - -func (state FFParseState) String() string { - switch state { - case FFParse_map_start: - return "map:start" - case FFParse_want_key: - return "want_key" - case FFParse_want_colon: - return "want_colon" - case FFParse_want_value: - return "want_value" - case FFParse_after_value: - return "after_value" - } - - panic(fmt.Sprintf("unknown parse state: %d", int(state))) -} - -func (tok FFTok) String() string { - switch tok { - case FFTok_init: - return "tok:init" - case FFTok_bool: - return "tok:bool" - case FFTok_colon: - return "tok:colon" - case FFTok_comma: - return "tok:comma" - case FFTok_eof: - return "tok:eof" - case FFTok_error: - return "tok:error" - case FFTok_left_brace: - return "tok:left_brace" - case FFTok_left_bracket: - return "tok:left_bracket" - case FFTok_null: - return "tok:null" - case FFTok_right_brace: - return "tok:right_brace" - case FFTok_right_bracket: - return "tok:right_bracket" - case FFTok_integer: - return "tok:integer" - case FFTok_double: - return "tok:double" - case FFTok_string: - return "tok:string" - case FFTok_comment: - return "comment" - } - - panic(fmt.Sprintf("unknown token: %d", int(tok))) -} - -/* a lookup table which lets us quickly determine three things: - * cVEC - valid escaped control char - * note. the solidus '/' may be escaped or not. - * cIJC - invalid json char - * cVHC - valid hex char - * cNFP - needs further processing (from a string scanning perspective) - * cNUC - needs utf8 checking when enabled (from a string scanning perspective) - */ - -const ( - cVEC int8 = 0x01 - cIJC int8 = 0x02 - cVHC int8 = 0x04 - cNFP int8 = 0x08 - cNUC int8 = 0x10 -) - -var byteLookupTable [256]int8 = [256]int8{ - cIJC, /* 0 */ - cIJC, /* 1 */ - cIJC, /* 2 */ - cIJC, /* 3 */ - cIJC, /* 4 */ - cIJC, /* 5 */ - cIJC, /* 6 */ - cIJC, /* 7 */ - cIJC, /* 8 */ - cIJC, /* 9 */ - cIJC, /* 10 */ - cIJC, /* 11 */ - cIJC, /* 12 */ - cIJC, /* 13 */ - cIJC, /* 14 */ - cIJC, /* 15 */ - cIJC, /* 16 */ - cIJC, /* 17 */ - cIJC, /* 18 */ - cIJC, /* 19 */ - cIJC, /* 20 */ - cIJC, /* 21 */ - cIJC, /* 22 */ - cIJC, /* 23 */ - cIJC, /* 24 */ - cIJC, /* 25 */ - cIJC, /* 26 */ - cIJC, /* 27 */ - cIJC, /* 28 */ - cIJC, /* 29 */ - cIJC, /* 30 */ - cIJC, /* 31 */ - 0, /* 32 */ - 0, /* 33 */ - cVEC | cIJC | cNFP, /* 34 */ - 0, /* 35 */ - 0, /* 36 */ - 0, /* 37 */ - 0, /* 38 */ - 0, /* 39 */ - 0, /* 40 */ - 0, /* 41 */ - 0, /* 42 */ - 0, /* 43 */ - 0, /* 44 */ - 0, /* 45 */ - 0, /* 46 */ - cVEC, /* 47 */ - cVHC, /* 48 */ - cVHC, /* 49 */ - cVHC, /* 50 */ - cVHC, /* 51 */ - cVHC, /* 52 */ - cVHC, /* 53 */ - cVHC, /* 54 */ - cVHC, /* 55 */ - cVHC, /* 56 */ - cVHC, /* 57 */ - 0, /* 58 */ - 0, /* 59 */ - 0, /* 60 */ - 0, /* 61 */ - 0, /* 62 */ - 0, /* 63 */ - 0, /* 64 */ - cVHC, /* 65 */ - cVHC, /* 66 */ - cVHC, /* 67 */ - cVHC, /* 68 */ - cVHC, /* 69 */ - cVHC, /* 70 */ - 0, /* 71 */ - 0, /* 72 */ - 0, /* 73 */ - 0, /* 74 */ - 0, /* 75 */ - 0, /* 76 */ - 0, /* 77 */ - 0, /* 78 */ - 0, /* 79 */ - 0, /* 80 */ - 0, /* 81 */ - 0, /* 82 */ - 0, /* 83 */ - 0, /* 84 */ - 0, /* 85 */ - 0, /* 86 */ - 0, /* 87 */ - 0, /* 88 */ - 0, /* 89 */ - 0, /* 90 */ - 0, /* 91 */ - cVEC | cIJC | cNFP, /* 92 */ - 0, /* 93 */ - 0, /* 94 */ - 0, /* 95 */ - 0, /* 96 */ - cVHC, /* 97 */ - cVEC | cVHC, /* 98 */ - cVHC, /* 99 */ - cVHC, /* 100 */ - cVHC, /* 101 */ - cVEC | cVHC, /* 102 */ - 0, /* 103 */ - 0, /* 104 */ - 0, /* 105 */ - 0, /* 106 */ - 0, /* 107 */ - 0, /* 108 */ - 0, /* 109 */ - cVEC, /* 110 */ - 0, /* 111 */ - 0, /* 112 */ - 0, /* 113 */ - cVEC, /* 114 */ - 0, /* 115 */ - cVEC, /* 116 */ - 0, /* 117 */ - 0, /* 118 */ - 0, /* 119 */ - 0, /* 120 */ - 0, /* 121 */ - 0, /* 122 */ - 0, /* 123 */ - 0, /* 124 */ - 0, /* 125 */ - 0, /* 126 */ - 0, /* 127 */ - cNUC, /* 128 */ - cNUC, /* 129 */ - cNUC, /* 130 */ - cNUC, /* 131 */ - cNUC, /* 132 */ - cNUC, /* 133 */ - cNUC, /* 134 */ - cNUC, /* 135 */ - cNUC, /* 136 */ - cNUC, /* 137 */ - cNUC, /* 138 */ - cNUC, /* 139 */ - cNUC, /* 140 */ - cNUC, /* 141 */ - cNUC, /* 142 */ - cNUC, /* 143 */ - cNUC, /* 144 */ - cNUC, /* 145 */ - cNUC, /* 146 */ - cNUC, /* 147 */ - cNUC, /* 148 */ - cNUC, /* 149 */ - cNUC, /* 150 */ - cNUC, /* 151 */ - cNUC, /* 152 */ - cNUC, /* 153 */ - cNUC, /* 154 */ - cNUC, /* 155 */ - cNUC, /* 156 */ - cNUC, /* 157 */ - cNUC, /* 158 */ - cNUC, /* 159 */ - cNUC, /* 160 */ - cNUC, /* 161 */ - cNUC, /* 162 */ - cNUC, /* 163 */ - cNUC, /* 164 */ - cNUC, /* 165 */ - cNUC, /* 166 */ - cNUC, /* 167 */ - cNUC, /* 168 */ - cNUC, /* 169 */ - cNUC, /* 170 */ - cNUC, /* 171 */ - cNUC, /* 172 */ - cNUC, /* 173 */ - cNUC, /* 174 */ - cNUC, /* 175 */ - cNUC, /* 176 */ - cNUC, /* 177 */ - cNUC, /* 178 */ - cNUC, /* 179 */ - cNUC, /* 180 */ - cNUC, /* 181 */ - cNUC, /* 182 */ - cNUC, /* 183 */ - cNUC, /* 184 */ - cNUC, /* 185 */ - cNUC, /* 186 */ - cNUC, /* 187 */ - cNUC, /* 188 */ - cNUC, /* 189 */ - cNUC, /* 190 */ - cNUC, /* 191 */ - cNUC, /* 192 */ - cNUC, /* 193 */ - cNUC, /* 194 */ - cNUC, /* 195 */ - cNUC, /* 196 */ - cNUC, /* 197 */ - cNUC, /* 198 */ - cNUC, /* 199 */ - cNUC, /* 200 */ - cNUC, /* 201 */ - cNUC, /* 202 */ - cNUC, /* 203 */ - cNUC, /* 204 */ - cNUC, /* 205 */ - cNUC, /* 206 */ - cNUC, /* 207 */ - cNUC, /* 208 */ - cNUC, /* 209 */ - cNUC, /* 210 */ - cNUC, /* 211 */ - cNUC, /* 212 */ - cNUC, /* 213 */ - cNUC, /* 214 */ - cNUC, /* 215 */ - cNUC, /* 216 */ - cNUC, /* 217 */ - cNUC, /* 218 */ - cNUC, /* 219 */ - cNUC, /* 220 */ - cNUC, /* 221 */ - cNUC, /* 222 */ - cNUC, /* 223 */ - cNUC, /* 224 */ - cNUC, /* 225 */ - cNUC, /* 226 */ - cNUC, /* 227 */ - cNUC, /* 228 */ - cNUC, /* 229 */ - cNUC, /* 230 */ - cNUC, /* 231 */ - cNUC, /* 232 */ - cNUC, /* 233 */ - cNUC, /* 234 */ - cNUC, /* 235 */ - cNUC, /* 236 */ - cNUC, /* 237 */ - cNUC, /* 238 */ - cNUC, /* 239 */ - cNUC, /* 240 */ - cNUC, /* 241 */ - cNUC, /* 242 */ - cNUC, /* 243 */ - cNUC, /* 244 */ - cNUC, /* 245 */ - cNUC, /* 246 */ - cNUC, /* 247 */ - cNUC, /* 248 */ - cNUC, /* 249 */ - cNUC, /* 250 */ - cNUC, /* 251 */ - cNUC, /* 252 */ - cNUC, /* 253 */ - cNUC, /* 254 */ - cNUC, /* 255 */ -} diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/reader.go b/vendor/github.com/pquerna/ffjson/fflib/v1/reader.go deleted file mode 100644 index 0f22c469d6..0000000000 --- a/vendor/github.com/pquerna/ffjson/fflib/v1/reader.go +++ /dev/null @@ -1,512 +0,0 @@ -/** - * Copyright 2014 Paul Querna - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package v1 - -import ( - "fmt" - "io" - "unicode" - "unicode/utf16" -) - -const sliceStringMask = cIJC | cNFP - -type ffReader struct { - s []byte - i int - l int -} - -func newffReader(d []byte) *ffReader { - return &ffReader{ - s: d, - i: 0, - l: len(d), - } -} - -func (r *ffReader) Slice(start, stop int) []byte { - return r.s[start:stop] -} - -func (r *ffReader) Pos() int { - return r.i -} - -// Reset the reader, and add new input. -func (r *ffReader) Reset(d []byte) { - r.s = d - r.i = 0 - r.l = len(d) -} - -// Calcuates the Position with line and line offset, -// because this isn't counted for performance reasons, -// it will iterate the buffer from the beginning, and should -// only be used in error-paths. -func (r *ffReader) PosWithLine() (int, int) { - currentLine := 1 - currentChar := 0 - - for i := 0; i < r.i; i++ { - c := r.s[i] - currentChar++ - if c == '\n' { - currentLine++ - currentChar = 0 - } - } - - return currentLine, currentChar -} - -func (r *ffReader) ReadByteNoWS() (byte, error) { - if r.i >= r.l { - return 0, io.EOF - } - - j := r.i - - for { - c := r.s[j] - j++ - - // inline whitespace parsing gives another ~8% performance boost - // for many kinds of nicely indented JSON. - // ... and using a [255]bool instead of multiple ifs, gives another 2% - /* - if c != '\t' && - c != '\n' && - c != '\v' && - c != '\f' && - c != '\r' && - c != ' ' { - r.i = j - return c, nil - } - */ - if whitespaceLookupTable[c] == false { - r.i = j - return c, nil - } - - if j >= r.l { - return 0, io.EOF - } - } -} - -func (r *ffReader) ReadByte() (byte, error) { - if r.i >= r.l { - return 0, io.EOF - } - - r.i++ - - return r.s[r.i-1], nil -} - -func (r *ffReader) UnreadByte() error { - if r.i <= 0 { - panic("ffReader.UnreadByte: at beginning of slice") - } - r.i-- - return nil -} - -func (r *ffReader) readU4(j int) (rune, error) { - - var u4 [4]byte - for i := 0; i < 4; i++ { - if j >= r.l { - return -1, io.EOF - } - c := r.s[j] - if byteLookupTable[c]&cVHC != 0 { - u4[i] = c - j++ - continue - } else { - // TODO(pquerna): handle errors better. layering violation. - return -1, fmt.Errorf("lex_string_invalid_hex_char: %v %v", c, string(u4[:])) - } - } - - // TODO(pquerna): utf16.IsSurrogate - rr, err := ParseUint(u4[:], 16, 64) - if err != nil { - return -1, err - } - return rune(rr), nil -} - -func (r *ffReader) handleEscaped(c byte, j int, out DecodingBuffer) (int, error) { - if j >= r.l { - return 0, io.EOF - } - - c = r.s[j] - j++ - - if c == 'u' { - ru, err := r.readU4(j) - if err != nil { - return 0, err - } - - if utf16.IsSurrogate(ru) { - ru2, err := r.readU4(j + 6) - if err != nil { - return 0, err - } - out.Write(r.s[r.i : j-2]) - r.i = j + 10 - j = r.i - rval := utf16.DecodeRune(ru, ru2) - if rval != unicode.ReplacementChar { - out.WriteRune(rval) - } else { - return 0, fmt.Errorf("lex_string_invalid_unicode_surrogate: %v %v", ru, ru2) - } - } else { - out.Write(r.s[r.i : j-2]) - r.i = j + 4 - j = r.i - out.WriteRune(ru) - } - return j, nil - } else if byteLookupTable[c]&cVEC == 0 { - return 0, fmt.Errorf("lex_string_invalid_escaped_char: %v", c) - } else { - out.Write(r.s[r.i : j-2]) - r.i = j - j = r.i - - switch c { - case '"': - out.WriteByte('"') - case '\\': - out.WriteByte('\\') - case '/': - out.WriteByte('/') - case 'b': - out.WriteByte('\b') - case 'f': - out.WriteByte('\f') - case 'n': - out.WriteByte('\n') - case 'r': - out.WriteByte('\r') - case 't': - out.WriteByte('\t') - } - } - - return j, nil -} - -func (r *ffReader) SliceString(out DecodingBuffer) error { - var c byte - // TODO(pquerna): string_with_escapes? de-escape here? - j := r.i - - for { - if j >= r.l { - return io.EOF - } - - j, c = scanString(r.s, j) - - if c == '"' { - if j != r.i { - out.Write(r.s[r.i : j-1]) - r.i = j - } - return nil - } else if c == '\\' { - var err error - j, err = r.handleEscaped(c, j, out) - if err != nil { - return err - } - } else if byteLookupTable[c]&cIJC != 0 { - return fmt.Errorf("lex_string_invalid_json_char: %v", c) - } - continue - } -} - -// TODO(pquerna): consider combining wibth the normal byte mask. -var whitespaceLookupTable [256]bool = [256]bool{ - false, /* 0 */ - false, /* 1 */ - false, /* 2 */ - false, /* 3 */ - false, /* 4 */ - false, /* 5 */ - false, /* 6 */ - false, /* 7 */ - false, /* 8 */ - true, /* 9 */ - true, /* 10 */ - true, /* 11 */ - true, /* 12 */ - true, /* 13 */ - false, /* 14 */ - false, /* 15 */ - false, /* 16 */ - false, /* 17 */ - false, /* 18 */ - false, /* 19 */ - false, /* 20 */ - false, /* 21 */ - false, /* 22 */ - false, /* 23 */ - false, /* 24 */ - false, /* 25 */ - false, /* 26 */ - false, /* 27 */ - false, /* 28 */ - false, /* 29 */ - false, /* 30 */ - false, /* 31 */ - true, /* 32 */ - false, /* 33 */ - false, /* 34 */ - false, /* 35 */ - false, /* 36 */ - false, /* 37 */ - false, /* 38 */ - false, /* 39 */ - false, /* 40 */ - false, /* 41 */ - false, /* 42 */ - false, /* 43 */ - false, /* 44 */ - false, /* 45 */ - false, /* 46 */ - false, /* 47 */ - false, /* 48 */ - false, /* 49 */ - false, /* 50 */ - false, /* 51 */ - false, /* 52 */ - false, /* 53 */ - false, /* 54 */ - false, /* 55 */ - false, /* 56 */ - false, /* 57 */ - false, /* 58 */ - false, /* 59 */ - false, /* 60 */ - false, /* 61 */ - false, /* 62 */ - false, /* 63 */ - false, /* 64 */ - false, /* 65 */ - false, /* 66 */ - false, /* 67 */ - false, /* 68 */ - false, /* 69 */ - false, /* 70 */ - false, /* 71 */ - false, /* 72 */ - false, /* 73 */ - false, /* 74 */ - false, /* 75 */ - false, /* 76 */ - false, /* 77 */ - false, /* 78 */ - false, /* 79 */ - false, /* 80 */ - false, /* 81 */ - false, /* 82 */ - false, /* 83 */ - false, /* 84 */ - false, /* 85 */ - false, /* 86 */ - false, /* 87 */ - false, /* 88 */ - false, /* 89 */ - false, /* 90 */ - false, /* 91 */ - false, /* 92 */ - false, /* 93 */ - false, /* 94 */ - false, /* 95 */ - false, /* 96 */ - false, /* 97 */ - false, /* 98 */ - false, /* 99 */ - false, /* 100 */ - false, /* 101 */ - false, /* 102 */ - false, /* 103 */ - false, /* 104 */ - false, /* 105 */ - false, /* 106 */ - false, /* 107 */ - false, /* 108 */ - false, /* 109 */ - false, /* 110 */ - false, /* 111 */ - false, /* 112 */ - false, /* 113 */ - false, /* 114 */ - false, /* 115 */ - false, /* 116 */ - false, /* 117 */ - false, /* 118 */ - false, /* 119 */ - false, /* 120 */ - false, /* 121 */ - false, /* 122 */ - false, /* 123 */ - false, /* 124 */ - false, /* 125 */ - false, /* 126 */ - false, /* 127 */ - false, /* 128 */ - false, /* 129 */ - false, /* 130 */ - false, /* 131 */ - false, /* 132 */ - false, /* 133 */ - false, /* 134 */ - false, /* 135 */ - false, /* 136 */ - false, /* 137 */ - false, /* 138 */ - false, /* 139 */ - false, /* 140 */ - false, /* 141 */ - false, /* 142 */ - false, /* 143 */ - false, /* 144 */ - false, /* 145 */ - false, /* 146 */ - false, /* 147 */ - false, /* 148 */ - false, /* 149 */ - false, /* 150 */ - false, /* 151 */ - false, /* 152 */ - false, /* 153 */ - false, /* 154 */ - false, /* 155 */ - false, /* 156 */ - false, /* 157 */ - false, /* 158 */ - false, /* 159 */ - false, /* 160 */ - false, /* 161 */ - false, /* 162 */ - false, /* 163 */ - false, /* 164 */ - false, /* 165 */ - false, /* 166 */ - false, /* 167 */ - false, /* 168 */ - false, /* 169 */ - false, /* 170 */ - false, /* 171 */ - false, /* 172 */ - false, /* 173 */ - false, /* 174 */ - false, /* 175 */ - false, /* 176 */ - false, /* 177 */ - false, /* 178 */ - false, /* 179 */ - false, /* 180 */ - false, /* 181 */ - false, /* 182 */ - false, /* 183 */ - false, /* 184 */ - false, /* 185 */ - false, /* 186 */ - false, /* 187 */ - false, /* 188 */ - false, /* 189 */ - false, /* 190 */ - false, /* 191 */ - false, /* 192 */ - false, /* 193 */ - false, /* 194 */ - false, /* 195 */ - false, /* 196 */ - false, /* 197 */ - false, /* 198 */ - false, /* 199 */ - false, /* 200 */ - false, /* 201 */ - false, /* 202 */ - false, /* 203 */ - false, /* 204 */ - false, /* 205 */ - false, /* 206 */ - false, /* 207 */ - false, /* 208 */ - false, /* 209 */ - false, /* 210 */ - false, /* 211 */ - false, /* 212 */ - false, /* 213 */ - false, /* 214 */ - false, /* 215 */ - false, /* 216 */ - false, /* 217 */ - false, /* 218 */ - false, /* 219 */ - false, /* 220 */ - false, /* 221 */ - false, /* 222 */ - false, /* 223 */ - false, /* 224 */ - false, /* 225 */ - false, /* 226 */ - false, /* 227 */ - false, /* 228 */ - false, /* 229 */ - false, /* 230 */ - false, /* 231 */ - false, /* 232 */ - false, /* 233 */ - false, /* 234 */ - false, /* 235 */ - false, /* 236 */ - false, /* 237 */ - false, /* 238 */ - false, /* 239 */ - false, /* 240 */ - false, /* 241 */ - false, /* 242 */ - false, /* 243 */ - false, /* 244 */ - false, /* 245 */ - false, /* 246 */ - false, /* 247 */ - false, /* 248 */ - false, /* 249 */ - false, /* 250 */ - false, /* 251 */ - false, /* 252 */ - false, /* 253 */ - false, /* 254 */ - false, /* 255 */ -} diff --git a/vendor/github.com/pquerna/ffjson/fflib/v1/reader_scan_generic.go b/vendor/github.com/pquerna/ffjson/fflib/v1/reader_scan_generic.go deleted file mode 100644 index 47c2607708..0000000000 --- a/vendor/github.com/pquerna/ffjson/fflib/v1/reader_scan_generic.go +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright 2014 Paul Querna - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package v1 - -func scanString(s []byte, j int) (int, byte) { - for { - if j >= len(s) { - return j, 0 - } - - c := s[j] - j++ - if byteLookupTable[c]&sliceStringMask == 0 { - continue - } - - return j, c - } -} diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index a7a42d5ef4..c092723e84 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -164,9 +164,9 @@ func (sd *SampleDecoder) Decode(s *model.Vector) error { } // ExtractSamples builds a slice of samples from the provided metric -// families. If an error occurs during sample extraction, it continues to +// families. If an error occurrs during sample extraction, it continues to // extract from the remaining metric families. The returned error is the last -// error that has occured. +// error that has occurred. func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) { var ( all model.Vector diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 54bcfde294..b86290afa3 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -556,8 +556,8 @@ func (p *TextParser) readTokenUntilWhitespace() { // byte considered is the byte already read (now in p.currentByte). The first // newline byte encountered is still copied into p.currentByte, but not into // p.currentToken. If recognizeEscapeSequence is true, two escape sequences are -// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All -// other escape sequences are invalid and cause an error. +// recognized: '\\' translates into '\', and '\n' into a line-feed character. +// All other escape sequences are invalid and cause an error. func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { p.currentToken.Reset() escaped := false diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go index 7538e29977..bb99889d2c 100644 --- a/vendor/github.com/prometheus/common/model/silence.go +++ b/vendor/github.com/prometheus/common/model/silence.go @@ -59,8 +59,8 @@ func (m *Matcher) Validate() error { return nil } -// Silence defines the representation of a silence definiton -// in the Prometheus eco-system. +// Silence defines the representation of a silence definition in the Prometheus +// eco-system. type Silence struct { ID uint64 `json:"id,omitempty"` diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go index c9ed3ffd82..c9d8fb1a28 100644 --- a/vendor/github.com/prometheus/common/model/value.go +++ b/vendor/github.com/prometheus/common/model/value.go @@ -100,7 +100,7 @@ func (s *SamplePair) UnmarshalJSON(b []byte) error { } // Equal returns true if this SamplePair and o have equal Values and equal -// Timestamps. The sematics of Value equality is defined by SampleValue.Equal. +// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. func (s *SamplePair) Equal(o *SamplePair) bool { return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) } @@ -117,7 +117,7 @@ type Sample struct { } // Equal compares first the metrics, then the timestamp, then the value. The -// sematics of value equality is defined by SampleValue.Equal. +// semantics of value equality is defined by SampleValue.Equal. func (s *Sample) Equal(o *Sample) bool { if s == o { return true diff --git a/vendor/github.com/samuel/go-zookeeper/LICENSE b/vendor/github.com/samuel/go-zookeeper/LICENSE new file mode 100644 index 0000000000..bc00498c52 --- /dev/null +++ b/vendor/github.com/samuel/go-zookeeper/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2013, Samuel Stauffer +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. +* Neither the name of the author nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/samuel/go-zookeeper/zk/conn.go b/vendor/github.com/samuel/go-zookeeper/zk/conn.go new file mode 100644 index 0000000000..f79a51b355 --- /dev/null +++ b/vendor/github.com/samuel/go-zookeeper/zk/conn.go @@ -0,0 +1,1228 @@ +// Package zk is a native Go client library for the ZooKeeper orchestration service. +package zk + +/* +TODO: +* make sure a ping response comes back in a reasonable time + +Possible watcher events: +* Event{Type: EventNotWatching, State: StateDisconnected, Path: path, Err: err} +*/ + +import ( + "crypto/rand" + "encoding/binary" + "errors" + "fmt" + "io" + "net" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" +) + +// ErrNoServer indicates that an operation cannot be completed +// because attempts to connect to all servers in the list failed. +var ErrNoServer = errors.New("zk: could not connect to a server") + +// ErrInvalidPath indicates that an operation was being attempted on +// an invalid path. (e.g. empty path) +var ErrInvalidPath = errors.New("zk: invalid path") + +// DefaultLogger uses the stdlib log package for logging. +var DefaultLogger Logger = defaultLogger{} + +const ( + bufferSize = 1536 * 1024 + eventChanSize = 6 + sendChanSize = 16 + protectedPrefix = "_c_" +) + +type watchType int + +const ( + watchTypeData = iota + watchTypeExist + watchTypeChild +) + +type watchPathType struct { + path string + wType watchType +} + +type Dialer func(network, address string, timeout time.Duration) (net.Conn, error) + +// Logger is an interface that can be implemented to provide custom log output. +type Logger interface { + Printf(string, ...interface{}) +} + +type authCreds struct { + scheme string + auth []byte +} + +type Conn struct { + lastZxid int64 + sessionID int64 + state State // must be 32-bit aligned + xid uint32 + sessionTimeoutMs int32 // session timeout in milliseconds + passwd []byte + + dialer Dialer + hostProvider HostProvider + serverMu sync.Mutex // protects server + server string // remember the address/port of the current server + conn net.Conn + eventChan chan Event + eventCallback EventCallback // may be nil + shouldQuit chan struct{} + pingInterval time.Duration + recvTimeout time.Duration + connectTimeout time.Duration + maxBufferSize int + + creds []authCreds + credsMu sync.Mutex // protects server + + sendChan chan *request + requests map[int32]*request // Xid -> pending request + requestsLock sync.Mutex + watchers map[watchPathType][]chan Event + watchersLock sync.Mutex + closeChan chan struct{} // channel to tell send loop stop + + // Debug (used by unit tests) + reconnectLatch chan struct{} + setWatchLimit int + setWatchCallback func([]*setWatchesRequest) + // Debug (for recurring re-auth hang) + debugCloseRecvLoop bool + debugReauthDone chan struct{} + + logger Logger + logInfo bool // true if information messages are logged; false if only errors are logged + + buf []byte +} + +// connOption represents a connection option. +type connOption func(c *Conn) + +type request struct { + xid int32 + opcode int32 + pkt interface{} + recvStruct interface{} + recvChan chan response + + // Because sending and receiving happen in separate go routines, there's + // a possible race condition when creating watches from outside the read + // loop. We must ensure that a watcher gets added to the list synchronously + // with the response from the server on any request that creates a watch. + // In order to not hard code the watch logic for each opcode in the recv + // loop the caller can use recvFunc to insert some synchronously code + // after a response. + recvFunc func(*request, *responseHeader, error) +} + +type response struct { + zxid int64 + err error +} + +type Event struct { + Type EventType + State State + Path string // For non-session events, the path of the watched node. + Err error + Server string // For connection events +} + +// HostProvider is used to represent a set of hosts a ZooKeeper client should connect to. +// It is an analog of the Java equivalent: +// http://svn.apache.org/viewvc/zookeeper/trunk/src/java/main/org/apache/zookeeper/client/HostProvider.java?view=markup +type HostProvider interface { + // Init is called first, with the servers specified in the connection string. + Init(servers []string) error + // Len returns the number of servers. + Len() int + // Next returns the next server to connect to. retryStart will be true if we've looped through + // all known servers without Connected() being called. + Next() (server string, retryStart bool) + // Notify the HostProvider of a successful connection. + Connected() +} + +// ConnectWithDialer establishes a new connection to a pool of zookeeper servers +// using a custom Dialer. See Connect for further information about session timeout. +// This method is deprecated and provided for compatibility: use the WithDialer option instead. +func ConnectWithDialer(servers []string, sessionTimeout time.Duration, dialer Dialer) (*Conn, <-chan Event, error) { + return Connect(servers, sessionTimeout, WithDialer(dialer)) +} + +// Connect establishes a new connection to a pool of zookeeper +// servers. The provided session timeout sets the amount of time for which +// a session is considered valid after losing connection to a server. Within +// the session timeout it's possible to reestablish a connection to a different +// server and keep the same session. This is means any ephemeral nodes and +// watches are maintained. +func Connect(servers []string, sessionTimeout time.Duration, options ...connOption) (*Conn, <-chan Event, error) { + if len(servers) == 0 { + return nil, nil, errors.New("zk: server list must not be empty") + } + + srvs := make([]string, len(servers)) + + for i, addr := range servers { + if strings.Contains(addr, ":") { + srvs[i] = addr + } else { + srvs[i] = addr + ":" + strconv.Itoa(DefaultPort) + } + } + + // Randomize the order of the servers to avoid creating hotspots + stringShuffle(srvs) + + ec := make(chan Event, eventChanSize) + conn := &Conn{ + dialer: net.DialTimeout, + hostProvider: &DNSHostProvider{}, + conn: nil, + state: StateDisconnected, + eventChan: ec, + shouldQuit: make(chan struct{}), + connectTimeout: 1 * time.Second, + sendChan: make(chan *request, sendChanSize), + requests: make(map[int32]*request), + watchers: make(map[watchPathType][]chan Event), + passwd: emptyPassword, + logger: DefaultLogger, + logInfo: true, // default is true for backwards compatability + buf: make([]byte, bufferSize), + } + + // Set provided options. + for _, option := range options { + option(conn) + } + + if err := conn.hostProvider.Init(srvs); err != nil { + return nil, nil, err + } + + conn.setTimeouts(int32(sessionTimeout / time.Millisecond)) + + go func() { + conn.loop() + conn.flushRequests(ErrClosing) + conn.invalidateWatches(ErrClosing) + close(conn.eventChan) + }() + return conn, ec, nil +} + +// WithDialer returns a connection option specifying a non-default Dialer. +func WithDialer(dialer Dialer) connOption { + return func(c *Conn) { + c.dialer = dialer + } +} + +// WithHostProvider returns a connection option specifying a non-default HostProvider. +func WithHostProvider(hostProvider HostProvider) connOption { + return func(c *Conn) { + c.hostProvider = hostProvider + } +} + +// WithLogger returns a connection option specifying a non-default Logger +func WithLogger(logger Logger) connOption { + return func(c *Conn) { + c.logger = logger + } +} + +// WithLogInfo returns a connection option specifying whether or not information messages +// shoud be logged. +func WithLogInfo(logInfo bool) connOption { + return func(c *Conn) { + c.logInfo = logInfo + } +} + +// EventCallback is a function that is called when an Event occurs. +type EventCallback func(Event) + +// WithEventCallback returns a connection option that specifies an event +// callback. +// The callback must not block - doing so would delay the ZK go routines. +func WithEventCallback(cb EventCallback) connOption { + return func(c *Conn) { + c.eventCallback = cb + } +} + +// WithMaxBufferSize sets the maximum buffer size used to read and decode +// packets received from the Zookeeper server. The standard Zookeeper client for +// Java defaults to a limit of 1mb. For backwards compatibility, this Go client +// defaults to unbounded unless overridden via this option. A value that is zero +// or negative indicates that no limit is enforced. +// +// This is meant to prevent resource exhaustion in the face of potentially +// malicious data in ZK. It should generally match the server setting (which +// also defaults ot 1mb) so that clients and servers agree on the limits for +// things like the size of data in an individual znode and the total size of a +// transaction. +// +// For production systems, this should be set to a reasonable value (ideally +// that matches the server configuration). For ops tooling, it is handy to use a +// much larger limit, in order to do things like clean-up problematic state in +// the ZK tree. For example, if a single znode has a huge number of children, it +// is possible for the response to a "list children" operation to exceed this +// buffer size and cause errors in clients. The only way to subsequently clean +// up the tree (by removing superfluous children) is to use a client configured +// with a larger buffer size that can successfully query for all of the child +// names and then remove them. (Note there are other tools that can list all of +// the child names without an increased buffer size in the client, but they work +// by inspecting the servers' transaction logs to enumerate children instead of +// sending an online request to a server. +func WithMaxBufferSize(maxBufferSize int) connOption { + return func(c *Conn) { + c.maxBufferSize = maxBufferSize + } +} + +// WithMaxConnBufferSize sets maximum buffer size used to send and encode +// packets to Zookeeper server. The standard Zookeepeer client for java defaults +// to a limit of 1mb. This option should be used for non-standard server setup +// where znode is bigger than default 1mb. +func WithMaxConnBufferSize(maxBufferSize int) connOption { + return func(c *Conn) { + c.buf = make([]byte, maxBufferSize) + } +} + +func (c *Conn) Close() { + close(c.shouldQuit) + + select { + case <-c.queueRequest(opClose, &closeRequest{}, &closeResponse{}, nil): + case <-time.After(time.Second): + } +} + +// State returns the current state of the connection. +func (c *Conn) State() State { + return State(atomic.LoadInt32((*int32)(&c.state))) +} + +// SessionID returns the current session id of the connection. +func (c *Conn) SessionID() int64 { + return atomic.LoadInt64(&c.sessionID) +} + +// SetLogger sets the logger to be used for printing errors. +// Logger is an interface provided by this package. +func (c *Conn) SetLogger(l Logger) { + c.logger = l +} + +func (c *Conn) setTimeouts(sessionTimeoutMs int32) { + c.sessionTimeoutMs = sessionTimeoutMs + sessionTimeout := time.Duration(sessionTimeoutMs) * time.Millisecond + c.recvTimeout = sessionTimeout * 2 / 3 + c.pingInterval = c.recvTimeout / 2 +} + +func (c *Conn) setState(state State) { + atomic.StoreInt32((*int32)(&c.state), int32(state)) + c.sendEvent(Event{Type: EventSession, State: state, Server: c.Server()}) +} + +func (c *Conn) sendEvent(evt Event) { + if c.eventCallback != nil { + c.eventCallback(evt) + } + + select { + case c.eventChan <- evt: + default: + // panic("zk: event channel full - it must be monitored and never allowed to be full") + } +} + +func (c *Conn) connect() error { + var retryStart bool + for { + c.serverMu.Lock() + c.server, retryStart = c.hostProvider.Next() + c.serverMu.Unlock() + c.setState(StateConnecting) + if retryStart { + c.flushUnsentRequests(ErrNoServer) + select { + case <-time.After(time.Second): + // pass + case <-c.shouldQuit: + c.setState(StateDisconnected) + c.flushUnsentRequests(ErrClosing) + return ErrClosing + } + } + + zkConn, err := c.dialer("tcp", c.Server(), c.connectTimeout) + if err == nil { + c.conn = zkConn + c.setState(StateConnected) + if c.logInfo { + c.logger.Printf("Connected to %s", c.Server()) + } + return nil + } + + c.logger.Printf("Failed to connect to %s: %+v", c.Server(), err) + } +} + +func (c *Conn) resendZkAuth(reauthReadyChan chan struct{}) { + shouldCancel := func() bool { + select { + case <-c.shouldQuit: + return true + case <-c.closeChan: + return true + default: + return false + } + } + + c.credsMu.Lock() + defer c.credsMu.Unlock() + + defer close(reauthReadyChan) + + if c.logInfo { + c.logger.Printf("Re-submitting `%d` credentials after reconnect", + len(c.creds)) + } + + for _, cred := range c.creds { + if shouldCancel() { + c.logger.Printf("Cancel rer-submitting credentials") + return + } + resChan, err := c.sendRequest( + opSetAuth, + &setAuthRequest{Type: 0, + Scheme: cred.scheme, + Auth: cred.auth, + }, + &setAuthResponse{}, + nil) + + if err != nil { + c.logger.Printf("Call to sendRequest failed during credential resubmit: %s", err) + // FIXME(prozlach): lets ignore errors for now + continue + } + + var res response + select { + case res = <-resChan: + case <-c.closeChan: + c.logger.Printf("Recv closed, cancel re-submitting credentials") + return + case <-c.shouldQuit: + c.logger.Printf("Should quit, cancel re-submitting credentials") + return + } + if res.err != nil { + c.logger.Printf("Credential re-submit failed: %s", res.err) + // FIXME(prozlach): lets ignore errors for now + continue + } + } +} + +func (c *Conn) sendRequest( + opcode int32, + req interface{}, + res interface{}, + recvFunc func(*request, *responseHeader, error), +) ( + <-chan response, + error, +) { + rq := &request{ + xid: c.nextXid(), + opcode: opcode, + pkt: req, + recvStruct: res, + recvChan: make(chan response, 1), + recvFunc: recvFunc, + } + + if err := c.sendData(rq); err != nil { + return nil, err + } + + return rq.recvChan, nil +} + +func (c *Conn) loop() { + for { + if err := c.connect(); err != nil { + // c.Close() was called + return + } + + err := c.authenticate() + switch { + case err == ErrSessionExpired: + c.logger.Printf("Authentication failed: %s", err) + c.invalidateWatches(err) + case err != nil && c.conn != nil: + c.logger.Printf("Authentication failed: %s", err) + c.conn.Close() + case err == nil: + if c.logInfo { + c.logger.Printf("Authenticated: id=%d, timeout=%d", c.SessionID(), c.sessionTimeoutMs) + } + c.hostProvider.Connected() // mark success + c.closeChan = make(chan struct{}) // channel to tell send loop stop + reauthChan := make(chan struct{}) // channel to tell send loop that authdata has been resubmitted + + var wg sync.WaitGroup + wg.Add(1) + go func() { + <-reauthChan + if c.debugCloseRecvLoop { + close(c.debugReauthDone) + } + err := c.sendLoop() + if err != nil || c.logInfo { + c.logger.Printf("Send loop terminated: err=%v", err) + } + c.conn.Close() // causes recv loop to EOF/exit + wg.Done() + }() + + wg.Add(1) + go func() { + var err error + if c.debugCloseRecvLoop { + err = errors.New("DEBUG: close recv loop") + } else { + err = c.recvLoop(c.conn) + } + if err != io.EOF || c.logInfo { + c.logger.Printf("Recv loop terminated: err=%v", err) + } + if err == nil { + panic("zk: recvLoop should never return nil error") + } + close(c.closeChan) // tell send loop to exit + wg.Done() + }() + + c.resendZkAuth(reauthChan) + + c.sendSetWatches() + wg.Wait() + } + + c.setState(StateDisconnected) + + select { + case <-c.shouldQuit: + c.flushRequests(ErrClosing) + return + default: + } + + if err != ErrSessionExpired { + err = ErrConnectionClosed + } + c.flushRequests(err) + + if c.reconnectLatch != nil { + select { + case <-c.shouldQuit: + return + case <-c.reconnectLatch: + } + } + } +} + +func (c *Conn) flushUnsentRequests(err error) { + for { + select { + default: + return + case req := <-c.sendChan: + req.recvChan <- response{-1, err} + } + } +} + +// Send error to all pending requests and clear request map +func (c *Conn) flushRequests(err error) { + c.requestsLock.Lock() + for _, req := range c.requests { + req.recvChan <- response{-1, err} + } + c.requests = make(map[int32]*request) + c.requestsLock.Unlock() +} + +// Send error to all watchers and clear watchers map +func (c *Conn) invalidateWatches(err error) { + c.watchersLock.Lock() + defer c.watchersLock.Unlock() + + if len(c.watchers) >= 0 { + for pathType, watchers := range c.watchers { + ev := Event{Type: EventNotWatching, State: StateDisconnected, Path: pathType.path, Err: err} + for _, ch := range watchers { + ch <- ev + close(ch) + } + } + c.watchers = make(map[watchPathType][]chan Event) + } +} + +func (c *Conn) sendSetWatches() { + c.watchersLock.Lock() + defer c.watchersLock.Unlock() + + if len(c.watchers) == 0 { + return + } + + // NB: A ZK server, by default, rejects packets >1mb. So, if we have too + // many watches to reset, we need to break this up into multiple packets + // to avoid hitting that limit. Mirroring the Java client behavior: we are + // conservative in that we limit requests to 128kb (since server limit is + // is actually configurable and could conceivably be configured smaller + // than default of 1mb). + limit := 128 * 1024 + if c.setWatchLimit > 0 { + limit = c.setWatchLimit + } + + var reqs []*setWatchesRequest + var req *setWatchesRequest + var sizeSoFar int + + n := 0 + for pathType, watchers := range c.watchers { + if len(watchers) == 0 { + continue + } + addlLen := 4 + len(pathType.path) + if req == nil || sizeSoFar+addlLen > limit { + if req != nil { + // add to set of requests that we'll send + reqs = append(reqs, req) + } + sizeSoFar = 28 // fixed overhead of a set-watches packet + req = &setWatchesRequest{ + RelativeZxid: c.lastZxid, + DataWatches: make([]string, 0), + ExistWatches: make([]string, 0), + ChildWatches: make([]string, 0), + } + } + sizeSoFar += addlLen + switch pathType.wType { + case watchTypeData: + req.DataWatches = append(req.DataWatches, pathType.path) + case watchTypeExist: + req.ExistWatches = append(req.ExistWatches, pathType.path) + case watchTypeChild: + req.ChildWatches = append(req.ChildWatches, pathType.path) + } + n++ + } + if n == 0 { + return + } + if req != nil { // don't forget any trailing packet we were building + reqs = append(reqs, req) + } + + if c.setWatchCallback != nil { + c.setWatchCallback(reqs) + } + + go func() { + res := &setWatchesResponse{} + // TODO: Pipeline these so queue all of them up before waiting on any + // response. That will require some investigation to make sure there + // aren't failure modes where a blocking write to the channel of requests + // could hang indefinitely and cause this goroutine to leak... + for _, req := range reqs { + _, err := c.request(opSetWatches, req, res, nil) + if err != nil { + c.logger.Printf("Failed to set previous watches: %s", err.Error()) + break + } + } + }() +} + +func (c *Conn) authenticate() error { + buf := make([]byte, 256) + + // Encode and send a connect request. + n, err := encodePacket(buf[4:], &connectRequest{ + ProtocolVersion: protocolVersion, + LastZxidSeen: c.lastZxid, + TimeOut: c.sessionTimeoutMs, + SessionID: c.SessionID(), + Passwd: c.passwd, + }) + if err != nil { + return err + } + + binary.BigEndian.PutUint32(buf[:4], uint32(n)) + + c.conn.SetWriteDeadline(time.Now().Add(c.recvTimeout * 10)) + _, err = c.conn.Write(buf[:n+4]) + c.conn.SetWriteDeadline(time.Time{}) + if err != nil { + return err + } + + // Receive and decode a connect response. + c.conn.SetReadDeadline(time.Now().Add(c.recvTimeout * 10)) + _, err = io.ReadFull(c.conn, buf[:4]) + c.conn.SetReadDeadline(time.Time{}) + if err != nil { + return err + } + + blen := int(binary.BigEndian.Uint32(buf[:4])) + if cap(buf) < blen { + buf = make([]byte, blen) + } + + _, err = io.ReadFull(c.conn, buf[:blen]) + if err != nil { + return err + } + + r := connectResponse{} + _, err = decodePacket(buf[:blen], &r) + if err != nil { + return err + } + if r.SessionID == 0 { + atomic.StoreInt64(&c.sessionID, int64(0)) + c.passwd = emptyPassword + c.lastZxid = 0 + c.setState(StateExpired) + return ErrSessionExpired + } + + atomic.StoreInt64(&c.sessionID, r.SessionID) + c.setTimeouts(r.TimeOut) + c.passwd = r.Passwd + c.setState(StateHasSession) + + return nil +} + +func (c *Conn) sendData(req *request) error { + header := &requestHeader{req.xid, req.opcode} + n, err := encodePacket(c.buf[4:], header) + if err != nil { + req.recvChan <- response{-1, err} + return nil + } + + n2, err := encodePacket(c.buf[4+n:], req.pkt) + if err != nil { + req.recvChan <- response{-1, err} + return nil + } + + n += n2 + + binary.BigEndian.PutUint32(c.buf[:4], uint32(n)) + + c.requestsLock.Lock() + select { + case <-c.closeChan: + req.recvChan <- response{-1, ErrConnectionClosed} + c.requestsLock.Unlock() + return ErrConnectionClosed + default: + } + c.requests[req.xid] = req + c.requestsLock.Unlock() + + c.conn.SetWriteDeadline(time.Now().Add(c.recvTimeout)) + _, err = c.conn.Write(c.buf[:n+4]) + c.conn.SetWriteDeadline(time.Time{}) + if err != nil { + req.recvChan <- response{-1, err} + c.conn.Close() + return err + } + + return nil +} + +func (c *Conn) sendLoop() error { + pingTicker := time.NewTicker(c.pingInterval) + defer pingTicker.Stop() + + for { + select { + case req := <-c.sendChan: + if err := c.sendData(req); err != nil { + return err + } + case <-pingTicker.C: + n, err := encodePacket(c.buf[4:], &requestHeader{Xid: -2, Opcode: opPing}) + if err != nil { + panic("zk: opPing should never fail to serialize") + } + + binary.BigEndian.PutUint32(c.buf[:4], uint32(n)) + + c.conn.SetWriteDeadline(time.Now().Add(c.recvTimeout)) + _, err = c.conn.Write(c.buf[:n+4]) + c.conn.SetWriteDeadline(time.Time{}) + if err != nil { + c.conn.Close() + return err + } + case <-c.closeChan: + return nil + } + } +} + +func (c *Conn) recvLoop(conn net.Conn) error { + sz := bufferSize + if c.maxBufferSize > 0 && sz > c.maxBufferSize { + sz = c.maxBufferSize + } + buf := make([]byte, sz) + for { + // package length + conn.SetReadDeadline(time.Now().Add(c.recvTimeout)) + _, err := io.ReadFull(conn, buf[:4]) + if err != nil { + return err + } + + blen := int(binary.BigEndian.Uint32(buf[:4])) + if cap(buf) < blen { + if c.maxBufferSize > 0 && blen > c.maxBufferSize { + return fmt.Errorf("received packet from server with length %d, which exceeds max buffer size %d", blen, c.maxBufferSize) + } + buf = make([]byte, blen) + } + + _, err = io.ReadFull(conn, buf[:blen]) + conn.SetReadDeadline(time.Time{}) + if err != nil { + return err + } + + res := responseHeader{} + _, err = decodePacket(buf[:16], &res) + if err != nil { + return err + } + + if res.Xid == -1 { + res := &watcherEvent{} + _, err := decodePacket(buf[16:blen], res) + if err != nil { + return err + } + ev := Event{ + Type: res.Type, + State: res.State, + Path: res.Path, + Err: nil, + } + c.sendEvent(ev) + wTypes := make([]watchType, 0, 2) + switch res.Type { + case EventNodeCreated: + wTypes = append(wTypes, watchTypeExist) + case EventNodeDeleted, EventNodeDataChanged: + wTypes = append(wTypes, watchTypeExist, watchTypeData, watchTypeChild) + case EventNodeChildrenChanged: + wTypes = append(wTypes, watchTypeChild) + } + c.watchersLock.Lock() + for _, t := range wTypes { + wpt := watchPathType{res.Path, t} + if watchers := c.watchers[wpt]; watchers != nil && len(watchers) > 0 { + for _, ch := range watchers { + ch <- ev + close(ch) + } + delete(c.watchers, wpt) + } + } + c.watchersLock.Unlock() + } else if res.Xid == -2 { + // Ping response. Ignore. + } else if res.Xid < 0 { + c.logger.Printf("Xid < 0 (%d) but not ping or watcher event", res.Xid) + } else { + if res.Zxid > 0 { + c.lastZxid = res.Zxid + } + + c.requestsLock.Lock() + req, ok := c.requests[res.Xid] + if ok { + delete(c.requests, res.Xid) + } + c.requestsLock.Unlock() + + if !ok { + c.logger.Printf("Response for unknown request with xid %d", res.Xid) + } else { + if res.Err != 0 { + err = res.Err.toError() + } else { + _, err = decodePacket(buf[16:blen], req.recvStruct) + } + if req.recvFunc != nil { + req.recvFunc(req, &res, err) + } + req.recvChan <- response{res.Zxid, err} + if req.opcode == opClose { + return io.EOF + } + } + } + } +} + +func (c *Conn) nextXid() int32 { + return int32(atomic.AddUint32(&c.xid, 1) & 0x7fffffff) +} + +func (c *Conn) addWatcher(path string, watchType watchType) <-chan Event { + c.watchersLock.Lock() + defer c.watchersLock.Unlock() + + ch := make(chan Event, 1) + wpt := watchPathType{path, watchType} + c.watchers[wpt] = append(c.watchers[wpt], ch) + return ch +} + +func (c *Conn) queueRequest(opcode int32, req interface{}, res interface{}, recvFunc func(*request, *responseHeader, error)) <-chan response { + rq := &request{ + xid: c.nextXid(), + opcode: opcode, + pkt: req, + recvStruct: res, + recvChan: make(chan response, 1), + recvFunc: recvFunc, + } + c.sendChan <- rq + return rq.recvChan +} + +func (c *Conn) request(opcode int32, req interface{}, res interface{}, recvFunc func(*request, *responseHeader, error)) (int64, error) { + r := <-c.queueRequest(opcode, req, res, recvFunc) + return r.zxid, r.err +} + +func (c *Conn) AddAuth(scheme string, auth []byte) error { + _, err := c.request(opSetAuth, &setAuthRequest{Type: 0, Scheme: scheme, Auth: auth}, &setAuthResponse{}, nil) + + if err != nil { + return err + } + + // Remember authdata so that it can be re-submitted on reconnect + // + // FIXME(prozlach): For now we treat "userfoo:passbar" and "userfoo:passbar2" + // as two different entries, which will be re-submitted on reconnet. Some + // research is needed on how ZK treats these cases and + // then maybe switch to something like "map[username] = password" to allow + // only single password for given user with users being unique. + obj := authCreds{ + scheme: scheme, + auth: auth, + } + + c.credsMu.Lock() + c.creds = append(c.creds, obj) + c.credsMu.Unlock() + + return nil +} + +func (c *Conn) Children(path string) ([]string, *Stat, error) { + if err := validatePath(path, false); err != nil { + return nil, nil, err + } + + res := &getChildren2Response{} + _, err := c.request(opGetChildren2, &getChildren2Request{Path: path, Watch: false}, res, nil) + return res.Children, &res.Stat, err +} + +func (c *Conn) ChildrenW(path string) ([]string, *Stat, <-chan Event, error) { + if err := validatePath(path, false); err != nil { + return nil, nil, nil, err + } + + var ech <-chan Event + res := &getChildren2Response{} + _, err := c.request(opGetChildren2, &getChildren2Request{Path: path, Watch: true}, res, func(req *request, res *responseHeader, err error) { + if err == nil { + ech = c.addWatcher(path, watchTypeChild) + } + }) + if err != nil { + return nil, nil, nil, err + } + return res.Children, &res.Stat, ech, err +} + +func (c *Conn) Get(path string) ([]byte, *Stat, error) { + if err := validatePath(path, false); err != nil { + return nil, nil, err + } + + res := &getDataResponse{} + _, err := c.request(opGetData, &getDataRequest{Path: path, Watch: false}, res, nil) + return res.Data, &res.Stat, err +} + +// GetW returns the contents of a znode and sets a watch +func (c *Conn) GetW(path string) ([]byte, *Stat, <-chan Event, error) { + if err := validatePath(path, false); err != nil { + return nil, nil, nil, err + } + + var ech <-chan Event + res := &getDataResponse{} + _, err := c.request(opGetData, &getDataRequest{Path: path, Watch: true}, res, func(req *request, res *responseHeader, err error) { + if err == nil { + ech = c.addWatcher(path, watchTypeData) + } + }) + if err != nil { + return nil, nil, nil, err + } + return res.Data, &res.Stat, ech, err +} + +func (c *Conn) Set(path string, data []byte, version int32) (*Stat, error) { + if err := validatePath(path, false); err != nil { + return nil, err + } + + res := &setDataResponse{} + _, err := c.request(opSetData, &SetDataRequest{path, data, version}, res, nil) + return &res.Stat, err +} + +func (c *Conn) Create(path string, data []byte, flags int32, acl []ACL) (string, error) { + if err := validatePath(path, flags&FlagSequence == FlagSequence); err != nil { + return "", err + } + + res := &createResponse{} + _, err := c.request(opCreate, &CreateRequest{path, data, acl, flags}, res, nil) + return res.Path, err +} + +// CreateProtectedEphemeralSequential fixes a race condition if the server crashes +// after it creates the node. On reconnect the session may still be valid so the +// ephemeral node still exists. Therefore, on reconnect we need to check if a node +// with a GUID generated on create exists. +func (c *Conn) CreateProtectedEphemeralSequential(path string, data []byte, acl []ACL) (string, error) { + if err := validatePath(path, true); err != nil { + return "", err + } + + var guid [16]byte + _, err := io.ReadFull(rand.Reader, guid[:16]) + if err != nil { + return "", err + } + guidStr := fmt.Sprintf("%x", guid) + + parts := strings.Split(path, "/") + parts[len(parts)-1] = fmt.Sprintf("%s%s-%s", protectedPrefix, guidStr, parts[len(parts)-1]) + rootPath := strings.Join(parts[:len(parts)-1], "/") + protectedPath := strings.Join(parts, "/") + + var newPath string + for i := 0; i < 3; i++ { + newPath, err = c.Create(protectedPath, data, FlagEphemeral|FlagSequence, acl) + switch err { + case ErrSessionExpired: + // No need to search for the node since it can't exist. Just try again. + case ErrConnectionClosed: + children, _, err := c.Children(rootPath) + if err != nil { + return "", err + } + for _, p := range children { + parts := strings.Split(p, "/") + if pth := parts[len(parts)-1]; strings.HasPrefix(pth, protectedPrefix) { + if g := pth[len(protectedPrefix) : len(protectedPrefix)+32]; g == guidStr { + return rootPath + "/" + p, nil + } + } + } + case nil: + return newPath, nil + default: + return "", err + } + } + return "", err +} + +func (c *Conn) Delete(path string, version int32) error { + if err := validatePath(path, false); err != nil { + return err + } + + _, err := c.request(opDelete, &DeleteRequest{path, version}, &deleteResponse{}, nil) + return err +} + +func (c *Conn) Exists(path string) (bool, *Stat, error) { + if err := validatePath(path, false); err != nil { + return false, nil, err + } + + res := &existsResponse{} + _, err := c.request(opExists, &existsRequest{Path: path, Watch: false}, res, nil) + exists := true + if err == ErrNoNode { + exists = false + err = nil + } + return exists, &res.Stat, err +} + +func (c *Conn) ExistsW(path string) (bool, *Stat, <-chan Event, error) { + if err := validatePath(path, false); err != nil { + return false, nil, nil, err + } + + var ech <-chan Event + res := &existsResponse{} + _, err := c.request(opExists, &existsRequest{Path: path, Watch: true}, res, func(req *request, res *responseHeader, err error) { + if err == nil { + ech = c.addWatcher(path, watchTypeData) + } else if err == ErrNoNode { + ech = c.addWatcher(path, watchTypeExist) + } + }) + exists := true + if err == ErrNoNode { + exists = false + err = nil + } + if err != nil { + return false, nil, nil, err + } + return exists, &res.Stat, ech, err +} + +func (c *Conn) GetACL(path string) ([]ACL, *Stat, error) { + if err := validatePath(path, false); err != nil { + return nil, nil, err + } + + res := &getAclResponse{} + _, err := c.request(opGetAcl, &getAclRequest{Path: path}, res, nil) + return res.Acl, &res.Stat, err +} +func (c *Conn) SetACL(path string, acl []ACL, version int32) (*Stat, error) { + if err := validatePath(path, false); err != nil { + return nil, err + } + + res := &setAclResponse{} + _, err := c.request(opSetAcl, &setAclRequest{Path: path, Acl: acl, Version: version}, res, nil) + return &res.Stat, err +} + +func (c *Conn) Sync(path string) (string, error) { + if err := validatePath(path, false); err != nil { + return "", err + } + + res := &syncResponse{} + _, err := c.request(opSync, &syncRequest{Path: path}, res, nil) + return res.Path, err +} + +type MultiResponse struct { + Stat *Stat + String string + Error error +} + +// Multi executes multiple ZooKeeper operations or none of them. The provided +// ops must be one of *CreateRequest, *DeleteRequest, *SetDataRequest, or +// *CheckVersionRequest. +func (c *Conn) Multi(ops ...interface{}) ([]MultiResponse, error) { + req := &multiRequest{ + Ops: make([]multiRequestOp, 0, len(ops)), + DoneHeader: multiHeader{Type: -1, Done: true, Err: -1}, + } + for _, op := range ops { + var opCode int32 + switch op.(type) { + case *CreateRequest: + opCode = opCreate + case *SetDataRequest: + opCode = opSetData + case *DeleteRequest: + opCode = opDelete + case *CheckVersionRequest: + opCode = opCheck + default: + return nil, fmt.Errorf("unknown operation type %T", op) + } + req.Ops = append(req.Ops, multiRequestOp{multiHeader{opCode, false, -1}, op}) + } + res := &multiResponse{} + _, err := c.request(opMulti, req, res, nil) + mr := make([]MultiResponse, len(res.Ops)) + for i, op := range res.Ops { + mr[i] = MultiResponse{Stat: op.Stat, String: op.String, Error: op.Err.toError()} + } + return mr, err +} + +// Server returns the current or last-connected server name. +func (c *Conn) Server() string { + c.serverMu.Lock() + defer c.serverMu.Unlock() + return c.server +} diff --git a/vendor/github.com/samuel/go-zookeeper/zk/constants.go b/vendor/github.com/samuel/go-zookeeper/zk/constants.go new file mode 100644 index 0000000000..33b5563b9f --- /dev/null +++ b/vendor/github.com/samuel/go-zookeeper/zk/constants.go @@ -0,0 +1,240 @@ +package zk + +import ( + "errors" +) + +const ( + protocolVersion = 0 + + DefaultPort = 2181 +) + +const ( + opNotify = 0 + opCreate = 1 + opDelete = 2 + opExists = 3 + opGetData = 4 + opSetData = 5 + opGetAcl = 6 + opSetAcl = 7 + opGetChildren = 8 + opSync = 9 + opPing = 11 + opGetChildren2 = 12 + opCheck = 13 + opMulti = 14 + opClose = -11 + opSetAuth = 100 + opSetWatches = 101 + opError = -1 + // Not in protocol, used internally + opWatcherEvent = -2 +) + +const ( + EventNodeCreated EventType = 1 + EventNodeDeleted EventType = 2 + EventNodeDataChanged EventType = 3 + EventNodeChildrenChanged EventType = 4 + + EventSession EventType = -1 + EventNotWatching EventType = -2 +) + +var ( + eventNames = map[EventType]string{ + EventNodeCreated: "EventNodeCreated", + EventNodeDeleted: "EventNodeDeleted", + EventNodeDataChanged: "EventNodeDataChanged", + EventNodeChildrenChanged: "EventNodeChildrenChanged", + EventSession: "EventSession", + EventNotWatching: "EventNotWatching", + } +) + +const ( + StateUnknown State = -1 + StateDisconnected State = 0 + StateConnecting State = 1 + StateAuthFailed State = 4 + StateConnectedReadOnly State = 5 + StateSaslAuthenticated State = 6 + StateExpired State = -112 + + StateConnected = State(100) + StateHasSession = State(101) +) + +const ( + FlagEphemeral = 1 + FlagSequence = 2 +) + +var ( + stateNames = map[State]string{ + StateUnknown: "StateUnknown", + StateDisconnected: "StateDisconnected", + StateConnectedReadOnly: "StateConnectedReadOnly", + StateSaslAuthenticated: "StateSaslAuthenticated", + StateExpired: "StateExpired", + StateAuthFailed: "StateAuthFailed", + StateConnecting: "StateConnecting", + StateConnected: "StateConnected", + StateHasSession: "StateHasSession", + } +) + +type State int32 + +func (s State) String() string { + if name := stateNames[s]; name != "" { + return name + } + return "Unknown" +} + +type ErrCode int32 + +var ( + ErrConnectionClosed = errors.New("zk: connection closed") + ErrUnknown = errors.New("zk: unknown error") + ErrAPIError = errors.New("zk: api error") + ErrNoNode = errors.New("zk: node does not exist") + ErrNoAuth = errors.New("zk: not authenticated") + ErrBadVersion = errors.New("zk: version conflict") + ErrNoChildrenForEphemerals = errors.New("zk: ephemeral nodes may not have children") + ErrNodeExists = errors.New("zk: node already exists") + ErrNotEmpty = errors.New("zk: node has children") + ErrSessionExpired = errors.New("zk: session has been expired by the server") + ErrInvalidACL = errors.New("zk: invalid ACL specified") + ErrAuthFailed = errors.New("zk: client authentication failed") + ErrClosing = errors.New("zk: zookeeper is closing") + ErrNothing = errors.New("zk: no server responsees to process") + ErrSessionMoved = errors.New("zk: session moved to another server, so operation is ignored") + + // ErrInvalidCallback = errors.New("zk: invalid callback specified") + errCodeToError = map[ErrCode]error{ + 0: nil, + errAPIError: ErrAPIError, + errNoNode: ErrNoNode, + errNoAuth: ErrNoAuth, + errBadVersion: ErrBadVersion, + errNoChildrenForEphemerals: ErrNoChildrenForEphemerals, + errNodeExists: ErrNodeExists, + errNotEmpty: ErrNotEmpty, + errSessionExpired: ErrSessionExpired, + // errInvalidCallback: ErrInvalidCallback, + errInvalidAcl: ErrInvalidACL, + errAuthFailed: ErrAuthFailed, + errClosing: ErrClosing, + errNothing: ErrNothing, + errSessionMoved: ErrSessionMoved, + } +) + +func (e ErrCode) toError() error { + if err, ok := errCodeToError[e]; ok { + return err + } + return ErrUnknown +} + +const ( + errOk = 0 + // System and server-side errors + errSystemError = -1 + errRuntimeInconsistency = -2 + errDataInconsistency = -3 + errConnectionLoss = -4 + errMarshallingError = -5 + errUnimplemented = -6 + errOperationTimeout = -7 + errBadArguments = -8 + errInvalidState = -9 + // API errors + errAPIError ErrCode = -100 + errNoNode ErrCode = -101 // * + errNoAuth ErrCode = -102 + errBadVersion ErrCode = -103 // * + errNoChildrenForEphemerals ErrCode = -108 + errNodeExists ErrCode = -110 // * + errNotEmpty ErrCode = -111 + errSessionExpired ErrCode = -112 + errInvalidCallback ErrCode = -113 + errInvalidAcl ErrCode = -114 + errAuthFailed ErrCode = -115 + errClosing ErrCode = -116 + errNothing ErrCode = -117 + errSessionMoved ErrCode = -118 +) + +// Constants for ACL permissions +const ( + PermRead = 1 << iota + PermWrite + PermCreate + PermDelete + PermAdmin + PermAll = 0x1f +) + +var ( + emptyPassword = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + opNames = map[int32]string{ + opNotify: "notify", + opCreate: "create", + opDelete: "delete", + opExists: "exists", + opGetData: "getData", + opSetData: "setData", + opGetAcl: "getACL", + opSetAcl: "setACL", + opGetChildren: "getChildren", + opSync: "sync", + opPing: "ping", + opGetChildren2: "getChildren2", + opCheck: "check", + opMulti: "multi", + opClose: "close", + opSetAuth: "setAuth", + opSetWatches: "setWatches", + + opWatcherEvent: "watcherEvent", + } +) + +type EventType int32 + +func (t EventType) String() string { + if name := eventNames[t]; name != "" { + return name + } + return "Unknown" +} + +// Mode is used to build custom server modes (leader|follower|standalone). +type Mode uint8 + +func (m Mode) String() string { + if name := modeNames[m]; name != "" { + return name + } + return "unknown" +} + +const ( + ModeUnknown Mode = iota + ModeLeader Mode = iota + ModeFollower Mode = iota + ModeStandalone Mode = iota +) + +var ( + modeNames = map[Mode]string{ + ModeLeader: "leader", + ModeFollower: "follower", + ModeStandalone: "standalone", + } +) diff --git a/vendor/github.com/samuel/go-zookeeper/zk/dnshostprovider.go b/vendor/github.com/samuel/go-zookeeper/zk/dnshostprovider.go new file mode 100644 index 0000000000..f4bba8d0b5 --- /dev/null +++ b/vendor/github.com/samuel/go-zookeeper/zk/dnshostprovider.go @@ -0,0 +1,88 @@ +package zk + +import ( + "fmt" + "net" + "sync" +) + +// DNSHostProvider is the default HostProvider. It currently matches +// the Java StaticHostProvider, resolving hosts from DNS once during +// the call to Init. It could be easily extended to re-query DNS +// periodically or if there is trouble connecting. +type DNSHostProvider struct { + mu sync.Mutex // Protects everything, so we can add asynchronous updates later. + servers []string + curr int + last int + lookupHost func(string) ([]string, error) // Override of net.LookupHost, for testing. +} + +// Init is called first, with the servers specified in the connection +// string. It uses DNS to look up addresses for each server, then +// shuffles them all together. +func (hp *DNSHostProvider) Init(servers []string) error { + hp.mu.Lock() + defer hp.mu.Unlock() + + lookupHost := hp.lookupHost + if lookupHost == nil { + lookupHost = net.LookupHost + } + + found := []string{} + for _, server := range servers { + host, port, err := net.SplitHostPort(server) + if err != nil { + return err + } + addrs, err := lookupHost(host) + if err != nil { + return err + } + for _, addr := range addrs { + found = append(found, net.JoinHostPort(addr, port)) + } + } + + if len(found) == 0 { + return fmt.Errorf("No hosts found for addresses %q", servers) + } + + // Randomize the order of the servers to avoid creating hotspots + stringShuffle(found) + + hp.servers = found + hp.curr = -1 + hp.last = -1 + + return nil +} + +// Len returns the number of servers available +func (hp *DNSHostProvider) Len() int { + hp.mu.Lock() + defer hp.mu.Unlock() + return len(hp.servers) +} + +// Next returns the next server to connect to. retryStart will be true +// if we've looped through all known servers without Connected() being +// called. +func (hp *DNSHostProvider) Next() (server string, retryStart bool) { + hp.mu.Lock() + defer hp.mu.Unlock() + hp.curr = (hp.curr + 1) % len(hp.servers) + retryStart = hp.curr == hp.last + if hp.last == -1 { + hp.last = 0 + } + return hp.servers[hp.curr], retryStart +} + +// Connected notifies the HostProvider of a successful connection. +func (hp *DNSHostProvider) Connected() { + hp.mu.Lock() + defer hp.mu.Unlock() + hp.last = hp.curr +} diff --git a/vendor/github.com/samuel/go-zookeeper/zk/flw.go b/vendor/github.com/samuel/go-zookeeper/zk/flw.go new file mode 100644 index 0000000000..3e97f96876 --- /dev/null +++ b/vendor/github.com/samuel/go-zookeeper/zk/flw.go @@ -0,0 +1,266 @@ +package zk + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "net" + "regexp" + "strconv" + "strings" + "time" +) + +// FLWSrvr is a FourLetterWord helper function. In particular, this function pulls the srvr output +// from the zookeeper instances and parses the output. A slice of *ServerStats structs are returned +// as well as a boolean value to indicate whether this function processed successfully. +// +// If the boolean value is false there was a problem. If the *ServerStats slice is empty or nil, +// then the error happened before we started to obtain 'srvr' values. Otherwise, one of the +// servers had an issue and the "Error" value in the struct should be inspected to determine +// which server had the issue. +func FLWSrvr(servers []string, timeout time.Duration) ([]*ServerStats, bool) { + // different parts of the regular expression that are required to parse the srvr output + const ( + zrVer = `^Zookeeper version: ([A-Za-z0-9\.\-]+), built on (\d\d/\d\d/\d\d\d\d \d\d:\d\d [A-Za-z0-9:\+\-]+)` + zrLat = `^Latency min/avg/max: (\d+)/(\d+)/(\d+)` + zrNet = `^Received: (\d+).*\n^Sent: (\d+).*\n^Connections: (\d+).*\n^Outstanding: (\d+)` + zrState = `^Zxid: (0x[A-Za-z0-9]+).*\n^Mode: (\w+).*\n^Node count: (\d+)` + ) + + // build the regex from the pieces above + re, err := regexp.Compile(fmt.Sprintf(`(?m:\A%v.*\n%v.*\n%v.*\n%v)`, zrVer, zrLat, zrNet, zrState)) + if err != nil { + return nil, false + } + + imOk := true + servers = FormatServers(servers) + ss := make([]*ServerStats, len(servers)) + + for i := range ss { + response, err := fourLetterWord(servers[i], "srvr", timeout) + + if err != nil { + ss[i] = &ServerStats{Error: err} + imOk = false + continue + } + + matches := re.FindAllStringSubmatch(string(response), -1) + + if matches == nil { + err := fmt.Errorf("unable to parse fields from zookeeper response (no regex matches)") + ss[i] = &ServerStats{Error: err} + imOk = false + continue + } + + match := matches[0][1:] + + // determine current server + var srvrMode Mode + switch match[10] { + case "leader": + srvrMode = ModeLeader + case "follower": + srvrMode = ModeFollower + case "standalone": + srvrMode = ModeStandalone + default: + srvrMode = ModeUnknown + } + + buildTime, err := time.Parse("01/02/2006 15:04 MST", match[1]) + + if err != nil { + ss[i] = &ServerStats{Error: err} + imOk = false + continue + } + + parsedInt, err := strconv.ParseInt(match[9], 0, 64) + + if err != nil { + ss[i] = &ServerStats{Error: err} + imOk = false + continue + } + + // the ZxID value is an int64 with two int32s packed inside + // the high int32 is the epoch (i.e., number of leader elections) + // the low int32 is the counter + epoch := int32(parsedInt >> 32) + counter := int32(parsedInt & 0xFFFFFFFF) + + // within the regex above, these values must be numerical + // so we can avoid useless checking of the error return value + minLatency, _ := strconv.ParseInt(match[2], 0, 64) + avgLatency, _ := strconv.ParseInt(match[3], 0, 64) + maxLatency, _ := strconv.ParseInt(match[4], 0, 64) + recv, _ := strconv.ParseInt(match[5], 0, 64) + sent, _ := strconv.ParseInt(match[6], 0, 64) + cons, _ := strconv.ParseInt(match[7], 0, 64) + outs, _ := strconv.ParseInt(match[8], 0, 64) + ncnt, _ := strconv.ParseInt(match[11], 0, 64) + + ss[i] = &ServerStats{ + Sent: sent, + Received: recv, + NodeCount: ncnt, + MinLatency: minLatency, + AvgLatency: avgLatency, + MaxLatency: maxLatency, + Connections: cons, + Outstanding: outs, + Epoch: epoch, + Counter: counter, + BuildTime: buildTime, + Mode: srvrMode, + Version: match[0], + } + } + + return ss, imOk +} + +// FLWRuok is a FourLetterWord helper function. In particular, this function +// pulls the ruok output from each server. +func FLWRuok(servers []string, timeout time.Duration) []bool { + servers = FormatServers(servers) + oks := make([]bool, len(servers)) + + for i := range oks { + response, err := fourLetterWord(servers[i], "ruok", timeout) + + if err != nil { + continue + } + + if bytes.Equal(response[:4], []byte("imok")) { + oks[i] = true + } + } + return oks +} + +// FLWCons is a FourLetterWord helper function. In particular, this function +// pulls the ruok output from each server. +// +// As with FLWSrvr, the boolean value indicates whether one of the requests had +// an issue. The Clients struct has an Error value that can be checked. +func FLWCons(servers []string, timeout time.Duration) ([]*ServerClients, bool) { + const ( + zrAddr = `^ /((?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?):(?:\d+))\[\d+\]` + zrPac = `\(queued=(\d+),recved=(\d+),sent=(\d+),sid=(0x[A-Za-z0-9]+),lop=(\w+),est=(\d+),to=(\d+),` + zrSesh = `lcxid=(0x[A-Za-z0-9]+),lzxid=(0x[A-Za-z0-9]+),lresp=(\d+),llat=(\d+),minlat=(\d+),avglat=(\d+),maxlat=(\d+)\)` + ) + + re, err := regexp.Compile(fmt.Sprintf("%v%v%v", zrAddr, zrPac, zrSesh)) + if err != nil { + return nil, false + } + + servers = FormatServers(servers) + sc := make([]*ServerClients, len(servers)) + imOk := true + + for i := range sc { + response, err := fourLetterWord(servers[i], "cons", timeout) + + if err != nil { + sc[i] = &ServerClients{Error: err} + imOk = false + continue + } + + scan := bufio.NewScanner(bytes.NewReader(response)) + + var clients []*ServerClient + + for scan.Scan() { + line := scan.Bytes() + + if len(line) == 0 { + continue + } + + m := re.FindAllStringSubmatch(string(line), -1) + + if m == nil { + err := fmt.Errorf("unable to parse fields from zookeeper response (no regex matches)") + sc[i] = &ServerClients{Error: err} + imOk = false + continue + } + + match := m[0][1:] + + queued, _ := strconv.ParseInt(match[1], 0, 64) + recvd, _ := strconv.ParseInt(match[2], 0, 64) + sent, _ := strconv.ParseInt(match[3], 0, 64) + sid, _ := strconv.ParseInt(match[4], 0, 64) + est, _ := strconv.ParseInt(match[6], 0, 64) + timeout, _ := strconv.ParseInt(match[7], 0, 32) + lcxid, _ := parseInt64(match[8]) + lzxid, _ := parseInt64(match[9]) + lresp, _ := strconv.ParseInt(match[10], 0, 64) + llat, _ := strconv.ParseInt(match[11], 0, 32) + minlat, _ := strconv.ParseInt(match[12], 0, 32) + avglat, _ := strconv.ParseInt(match[13], 0, 32) + maxlat, _ := strconv.ParseInt(match[14], 0, 32) + + clients = append(clients, &ServerClient{ + Queued: queued, + Received: recvd, + Sent: sent, + SessionID: sid, + Lcxid: int64(lcxid), + Lzxid: int64(lzxid), + Timeout: int32(timeout), + LastLatency: int32(llat), + MinLatency: int32(minlat), + AvgLatency: int32(avglat), + MaxLatency: int32(maxlat), + Established: time.Unix(est, 0), + LastResponse: time.Unix(lresp, 0), + Addr: match[0], + LastOperation: match[5], + }) + } + + sc[i] = &ServerClients{Clients: clients} + } + + return sc, imOk +} + +// parseInt64 is similar to strconv.ParseInt, but it also handles hex values that represent negative numbers +func parseInt64(s string) (int64, error) { + if strings.HasPrefix(s, "0x") { + i, err := strconv.ParseUint(s, 0, 64) + return int64(i), err + } + return strconv.ParseInt(s, 0, 64) +} + +func fourLetterWord(server, command string, timeout time.Duration) ([]byte, error) { + conn, err := net.DialTimeout("tcp", server, timeout) + if err != nil { + return nil, err + } + + // the zookeeper server should automatically close this socket + // once the command has been processed, but better safe than sorry + defer conn.Close() + + conn.SetWriteDeadline(time.Now().Add(timeout)) + _, err = conn.Write([]byte(command)) + if err != nil { + return nil, err + } + + conn.SetReadDeadline(time.Now().Add(timeout)) + return ioutil.ReadAll(conn) +} diff --git a/vendor/github.com/samuel/go-zookeeper/zk/lock.go b/vendor/github.com/samuel/go-zookeeper/zk/lock.go new file mode 100644 index 0000000000..3c35a427c8 --- /dev/null +++ b/vendor/github.com/samuel/go-zookeeper/zk/lock.go @@ -0,0 +1,150 @@ +package zk + +import ( + "errors" + "fmt" + "strconv" + "strings" +) + +var ( + // ErrDeadlock is returned by Lock when trying to lock twice without unlocking first + ErrDeadlock = errors.New("zk: trying to acquire a lock twice") + // ErrNotLocked is returned by Unlock when trying to release a lock that has not first be acquired. + ErrNotLocked = errors.New("zk: not locked") +) + +// Lock is a mutual exclusion lock. +type Lock struct { + c *Conn + path string + acl []ACL + lockPath string + seq int +} + +// NewLock creates a new lock instance using the provided connection, path, and acl. +// The path must be a node that is only used by this lock. A lock instances starts +// unlocked until Lock() is called. +func NewLock(c *Conn, path string, acl []ACL) *Lock { + return &Lock{ + c: c, + path: path, + acl: acl, + } +} + +func parseSeq(path string) (int, error) { + parts := strings.Split(path, "-") + return strconv.Atoi(parts[len(parts)-1]) +} + +// Lock attempts to acquire the lock. It will wait to return until the lock +// is acquired or an error occurs. If this instance already has the lock +// then ErrDeadlock is returned. +func (l *Lock) Lock() error { + if l.lockPath != "" { + return ErrDeadlock + } + + prefix := fmt.Sprintf("%s/lock-", l.path) + + path := "" + var err error + for i := 0; i < 3; i++ { + path, err = l.c.CreateProtectedEphemeralSequential(prefix, []byte{}, l.acl) + if err == ErrNoNode { + // Create parent node. + parts := strings.Split(l.path, "/") + pth := "" + for _, p := range parts[1:] { + var exists bool + pth += "/" + p + exists, _, err = l.c.Exists(pth) + if err != nil { + return err + } + if exists == true { + continue + } + _, err = l.c.Create(pth, []byte{}, 0, l.acl) + if err != nil && err != ErrNodeExists { + return err + } + } + } else if err == nil { + break + } else { + return err + } + } + if err != nil { + return err + } + + seq, err := parseSeq(path) + if err != nil { + return err + } + + for { + children, _, err := l.c.Children(l.path) + if err != nil { + return err + } + + lowestSeq := seq + prevSeq := -1 + prevSeqPath := "" + for _, p := range children { + s, err := parseSeq(p) + if err != nil { + return err + } + if s < lowestSeq { + lowestSeq = s + } + if s < seq && s > prevSeq { + prevSeq = s + prevSeqPath = p + } + } + + if seq == lowestSeq { + // Acquired the lock + break + } + + // Wait on the node next in line for the lock + _, _, ch, err := l.c.GetW(l.path + "/" + prevSeqPath) + if err != nil && err != ErrNoNode { + return err + } else if err != nil && err == ErrNoNode { + // try again + continue + } + + ev := <-ch + if ev.Err != nil { + return ev.Err + } + } + + l.seq = seq + l.lockPath = path + return nil +} + +// Unlock releases an acquired lock. If the lock is not currently acquired by +// this Lock instance than ErrNotLocked is returned. +func (l *Lock) Unlock() error { + if l.lockPath == "" { + return ErrNotLocked + } + if err := l.c.Delete(l.lockPath, -1); err != nil { + return err + } + l.lockPath = "" + l.seq = 0 + return nil +} diff --git a/vendor/github.com/samuel/go-zookeeper/zk/server_help.go b/vendor/github.com/samuel/go-zookeeper/zk/server_help.go new file mode 100644 index 0000000000..3663064cae --- /dev/null +++ b/vendor/github.com/samuel/go-zookeeper/zk/server_help.go @@ -0,0 +1,216 @@ +package zk + +import ( + "fmt" + "io" + "io/ioutil" + "math/rand" + "os" + "path/filepath" + "strings" + "time" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +type TestServer struct { + Port int + Path string + Srv *Server +} + +type TestCluster struct { + Path string + Servers []TestServer +} + +func StartTestCluster(size int, stdout, stderr io.Writer) (*TestCluster, error) { + tmpPath, err := ioutil.TempDir("", "gozk") + if err != nil { + return nil, err + } + success := false + startPort := int(rand.Int31n(6000) + 10000) + cluster := &TestCluster{Path: tmpPath} + defer func() { + if !success { + cluster.Stop() + } + }() + for serverN := 0; serverN < size; serverN++ { + srvPath := filepath.Join(tmpPath, fmt.Sprintf("srv%d", serverN)) + if err := os.Mkdir(srvPath, 0700); err != nil { + return nil, err + } + port := startPort + serverN*3 + cfg := ServerConfig{ + ClientPort: port, + DataDir: srvPath, + } + for i := 0; i < size; i++ { + cfg.Servers = append(cfg.Servers, ServerConfigServer{ + ID: i + 1, + Host: "127.0.0.1", + PeerPort: startPort + i*3 + 1, + LeaderElectionPort: startPort + i*3 + 2, + }) + } + cfgPath := filepath.Join(srvPath, "zoo.cfg") + fi, err := os.Create(cfgPath) + if err != nil { + return nil, err + } + err = cfg.Marshall(fi) + fi.Close() + if err != nil { + return nil, err + } + + fi, err = os.Create(filepath.Join(srvPath, "myid")) + if err != nil { + return nil, err + } + _, err = fmt.Fprintf(fi, "%d\n", serverN+1) + fi.Close() + if err != nil { + return nil, err + } + + srv := &Server{ + ConfigPath: cfgPath, + Stdout: stdout, + Stderr: stderr, + } + if err := srv.Start(); err != nil { + return nil, err + } + cluster.Servers = append(cluster.Servers, TestServer{ + Path: srvPath, + Port: cfg.ClientPort, + Srv: srv, + }) + } + if err := cluster.waitForStart(10, time.Second); err != nil { + return nil, err + } + success = true + return cluster, nil +} + +func (tc *TestCluster) Connect(idx int) (*Conn, error) { + zk, _, err := Connect([]string{fmt.Sprintf("127.0.0.1:%d", tc.Servers[idx].Port)}, time.Second*15) + return zk, err +} + +func (tc *TestCluster) ConnectAll() (*Conn, <-chan Event, error) { + return tc.ConnectAllTimeout(time.Second * 15) +} + +func (tc *TestCluster) ConnectAllTimeout(sessionTimeout time.Duration) (*Conn, <-chan Event, error) { + return tc.ConnectWithOptions(sessionTimeout) +} + +func (tc *TestCluster) ConnectWithOptions(sessionTimeout time.Duration, options ...connOption) (*Conn, <-chan Event, error) { + hosts := make([]string, len(tc.Servers)) + for i, srv := range tc.Servers { + hosts[i] = fmt.Sprintf("127.0.0.1:%d", srv.Port) + } + zk, ch, err := Connect(hosts, sessionTimeout, options...) + return zk, ch, err +} + +func (tc *TestCluster) Stop() error { + for _, srv := range tc.Servers { + srv.Srv.Stop() + } + defer os.RemoveAll(tc.Path) + return tc.waitForStop(5, time.Second) +} + +// waitForStart blocks until the cluster is up +func (tc *TestCluster) waitForStart(maxRetry int, interval time.Duration) error { + // verify that the servers are up with SRVR + serverAddrs := make([]string, len(tc.Servers)) + for i, s := range tc.Servers { + serverAddrs[i] = fmt.Sprintf("127.0.0.1:%d", s.Port) + } + + for i := 0; i < maxRetry; i++ { + _, ok := FLWSrvr(serverAddrs, time.Second) + if ok { + return nil + } + time.Sleep(interval) + } + return fmt.Errorf("unable to verify health of servers") +} + +// waitForStop blocks until the cluster is down +func (tc *TestCluster) waitForStop(maxRetry int, interval time.Duration) error { + // verify that the servers are up with RUOK + serverAddrs := make([]string, len(tc.Servers)) + for i, s := range tc.Servers { + serverAddrs[i] = fmt.Sprintf("127.0.0.1:%d", s.Port) + } + + var success bool + for i := 0; i < maxRetry && !success; i++ { + success = true + for _, ok := range FLWRuok(serverAddrs, time.Second) { + if ok { + success = false + } + } + if !success { + time.Sleep(interval) + } + } + if !success { + return fmt.Errorf("unable to verify servers are down") + } + return nil +} + +func (tc *TestCluster) StartServer(server string) { + for _, s := range tc.Servers { + if strings.HasSuffix(server, fmt.Sprintf(":%d", s.Port)) { + s.Srv.Start() + return + } + } + panic(fmt.Sprintf("Unknown server: %s", server)) +} + +func (tc *TestCluster) StopServer(server string) { + for _, s := range tc.Servers { + if strings.HasSuffix(server, fmt.Sprintf(":%d", s.Port)) { + s.Srv.Stop() + return + } + } + panic(fmt.Sprintf("Unknown server: %s", server)) +} + +func (tc *TestCluster) StartAllServers() error { + for _, s := range tc.Servers { + if err := s.Srv.Start(); err != nil { + return fmt.Errorf( + "Failed to start server listening on port `%d` : %+v", s.Port, err) + } + } + + return nil +} + +func (tc *TestCluster) StopAllServers() error { + for _, s := range tc.Servers { + if err := s.Srv.Stop(); err != nil { + return fmt.Errorf( + "Failed to stop server listening on port `%d` : %+v", s.Port, err) + } + } + + return nil +} diff --git a/vendor/github.com/samuel/go-zookeeper/zk/server_java.go b/vendor/github.com/samuel/go-zookeeper/zk/server_java.go new file mode 100644 index 0000000000..e553ec1d9f --- /dev/null +++ b/vendor/github.com/samuel/go-zookeeper/zk/server_java.go @@ -0,0 +1,136 @@ +package zk + +import ( + "fmt" + "io" + "os" + "os/exec" + "path/filepath" +) + +type ErrMissingServerConfigField string + +func (e ErrMissingServerConfigField) Error() string { + return fmt.Sprintf("zk: missing server config field '%s'", string(e)) +} + +const ( + DefaultServerTickTime = 2000 + DefaultServerInitLimit = 10 + DefaultServerSyncLimit = 5 + DefaultServerAutoPurgeSnapRetainCount = 3 + DefaultPeerPort = 2888 + DefaultLeaderElectionPort = 3888 +) + +type ServerConfigServer struct { + ID int + Host string + PeerPort int + LeaderElectionPort int +} + +type ServerConfig struct { + TickTime int // Number of milliseconds of each tick + InitLimit int // Number of ticks that the initial synchronization phase can take + SyncLimit int // Number of ticks that can pass between sending a request and getting an acknowledgement + DataDir string // Direcrory where the snapshot is stored + ClientPort int // Port at which clients will connect + AutoPurgeSnapRetainCount int // Number of snapshots to retain in dataDir + AutoPurgePurgeInterval int // Purge task internal in hours (0 to disable auto purge) + Servers []ServerConfigServer +} + +func (sc ServerConfig) Marshall(w io.Writer) error { + if sc.DataDir == "" { + return ErrMissingServerConfigField("dataDir") + } + fmt.Fprintf(w, "dataDir=%s\n", sc.DataDir) + if sc.TickTime <= 0 { + sc.TickTime = DefaultServerTickTime + } + fmt.Fprintf(w, "tickTime=%d\n", sc.TickTime) + if sc.InitLimit <= 0 { + sc.InitLimit = DefaultServerInitLimit + } + fmt.Fprintf(w, "initLimit=%d\n", sc.InitLimit) + if sc.SyncLimit <= 0 { + sc.SyncLimit = DefaultServerSyncLimit + } + fmt.Fprintf(w, "syncLimit=%d\n", sc.SyncLimit) + if sc.ClientPort <= 0 { + sc.ClientPort = DefaultPort + } + fmt.Fprintf(w, "clientPort=%d\n", sc.ClientPort) + if sc.AutoPurgePurgeInterval > 0 { + if sc.AutoPurgeSnapRetainCount <= 0 { + sc.AutoPurgeSnapRetainCount = DefaultServerAutoPurgeSnapRetainCount + } + fmt.Fprintf(w, "autopurge.snapRetainCount=%d\n", sc.AutoPurgeSnapRetainCount) + fmt.Fprintf(w, "autopurge.purgeInterval=%d\n", sc.AutoPurgePurgeInterval) + } + if len(sc.Servers) > 0 { + for _, srv := range sc.Servers { + if srv.PeerPort <= 0 { + srv.PeerPort = DefaultPeerPort + } + if srv.LeaderElectionPort <= 0 { + srv.LeaderElectionPort = DefaultLeaderElectionPort + } + fmt.Fprintf(w, "server.%d=%s:%d:%d\n", srv.ID, srv.Host, srv.PeerPort, srv.LeaderElectionPort) + } + } + return nil +} + +var jarSearchPaths = []string{ + "zookeeper-*/contrib/fatjar/zookeeper-*-fatjar.jar", + "../zookeeper-*/contrib/fatjar/zookeeper-*-fatjar.jar", + "/usr/share/java/zookeeper-*.jar", + "/usr/local/zookeeper-*/contrib/fatjar/zookeeper-*-fatjar.jar", + "/usr/local/Cellar/zookeeper/*/libexec/contrib/fatjar/zookeeper-*-fatjar.jar", +} + +func findZookeeperFatJar() string { + var paths []string + zkPath := os.Getenv("ZOOKEEPER_PATH") + if zkPath == "" { + paths = jarSearchPaths + } else { + paths = []string{filepath.Join(zkPath, "contrib/fatjar/zookeeper-*-fatjar.jar")} + } + for _, path := range paths { + matches, _ := filepath.Glob(path) + // TODO: could sort by version and pick latest + if len(matches) > 0 { + return matches[0] + } + } + return "" +} + +type Server struct { + JarPath string + ConfigPath string + Stdout, Stderr io.Writer + + cmd *exec.Cmd +} + +func (srv *Server) Start() error { + if srv.JarPath == "" { + srv.JarPath = findZookeeperFatJar() + if srv.JarPath == "" { + return fmt.Errorf("zk: unable to find server jar") + } + } + srv.cmd = exec.Command("java", "-jar", srv.JarPath, "server", srv.ConfigPath) + srv.cmd.Stdout = srv.Stdout + srv.cmd.Stderr = srv.Stderr + return srv.cmd.Start() +} + +func (srv *Server) Stop() error { + srv.cmd.Process.Signal(os.Kill) + return srv.cmd.Wait() +} diff --git a/vendor/github.com/samuel/go-zookeeper/zk/structs.go b/vendor/github.com/samuel/go-zookeeper/zk/structs.go new file mode 100644 index 0000000000..d4af27deaa --- /dev/null +++ b/vendor/github.com/samuel/go-zookeeper/zk/structs.go @@ -0,0 +1,609 @@ +package zk + +import ( + "encoding/binary" + "errors" + "log" + "reflect" + "runtime" + "time" +) + +var ( + ErrUnhandledFieldType = errors.New("zk: unhandled field type") + ErrPtrExpected = errors.New("zk: encode/decode expect a non-nil pointer to struct") + ErrShortBuffer = errors.New("zk: buffer too small") +) + +type defaultLogger struct{} + +func (defaultLogger) Printf(format string, a ...interface{}) { + log.Printf(format, a...) +} + +type ACL struct { + Perms int32 + Scheme string + ID string +} + +type Stat struct { + Czxid int64 // The zxid of the change that caused this znode to be created. + Mzxid int64 // The zxid of the change that last modified this znode. + Ctime int64 // The time in milliseconds from epoch when this znode was created. + Mtime int64 // The time in milliseconds from epoch when this znode was last modified. + Version int32 // The number of changes to the data of this znode. + Cversion int32 // The number of changes to the children of this znode. + Aversion int32 // The number of changes to the ACL of this znode. + EphemeralOwner int64 // The session id of the owner of this znode if the znode is an ephemeral node. If it is not an ephemeral node, it will be zero. + DataLength int32 // The length of the data field of this znode. + NumChildren int32 // The number of children of this znode. + Pzxid int64 // last modified children +} + +// ServerClient is the information for a single Zookeeper client and its session. +// This is used to parse/extract the output fo the `cons` command. +type ServerClient struct { + Queued int64 + Received int64 + Sent int64 + SessionID int64 + Lcxid int64 + Lzxid int64 + Timeout int32 + LastLatency int32 + MinLatency int32 + AvgLatency int32 + MaxLatency int32 + Established time.Time + LastResponse time.Time + Addr string + LastOperation string // maybe? + Error error +} + +// ServerClients is a struct for the FLWCons() function. It's used to provide +// the list of Clients. +// +// This is needed because FLWCons() takes multiple servers. +type ServerClients struct { + Clients []*ServerClient + Error error +} + +// ServerStats is the information pulled from the Zookeeper `stat` command. +type ServerStats struct { + Sent int64 + Received int64 + NodeCount int64 + MinLatency int64 + AvgLatency int64 + MaxLatency int64 + Connections int64 + Outstanding int64 + Epoch int32 + Counter int32 + BuildTime time.Time + Mode Mode + Version string + Error error +} + +type requestHeader struct { + Xid int32 + Opcode int32 +} + +type responseHeader struct { + Xid int32 + Zxid int64 + Err ErrCode +} + +type multiHeader struct { + Type int32 + Done bool + Err ErrCode +} + +type auth struct { + Type int32 + Scheme string + Auth []byte +} + +// Generic request structs + +type pathRequest struct { + Path string +} + +type PathVersionRequest struct { + Path string + Version int32 +} + +type pathWatchRequest struct { + Path string + Watch bool +} + +type pathResponse struct { + Path string +} + +type statResponse struct { + Stat Stat +} + +// + +type CheckVersionRequest PathVersionRequest +type closeRequest struct{} +type closeResponse struct{} + +type connectRequest struct { + ProtocolVersion int32 + LastZxidSeen int64 + TimeOut int32 + SessionID int64 + Passwd []byte +} + +type connectResponse struct { + ProtocolVersion int32 + TimeOut int32 + SessionID int64 + Passwd []byte +} + +type CreateRequest struct { + Path string + Data []byte + Acl []ACL + Flags int32 +} + +type createResponse pathResponse +type DeleteRequest PathVersionRequest +type deleteResponse struct{} + +type errorResponse struct { + Err int32 +} + +type existsRequest pathWatchRequest +type existsResponse statResponse +type getAclRequest pathRequest + +type getAclResponse struct { + Acl []ACL + Stat Stat +} + +type getChildrenRequest pathRequest + +type getChildrenResponse struct { + Children []string +} + +type getChildren2Request pathWatchRequest + +type getChildren2Response struct { + Children []string + Stat Stat +} + +type getDataRequest pathWatchRequest + +type getDataResponse struct { + Data []byte + Stat Stat +} + +type getMaxChildrenRequest pathRequest + +type getMaxChildrenResponse struct { + Max int32 +} + +type getSaslRequest struct { + Token []byte +} + +type pingRequest struct{} +type pingResponse struct{} + +type setAclRequest struct { + Path string + Acl []ACL + Version int32 +} + +type setAclResponse statResponse + +type SetDataRequest struct { + Path string + Data []byte + Version int32 +} + +type setDataResponse statResponse + +type setMaxChildren struct { + Path string + Max int32 +} + +type setSaslRequest struct { + Token string +} + +type setSaslResponse struct { + Token string +} + +type setWatchesRequest struct { + RelativeZxid int64 + DataWatches []string + ExistWatches []string + ChildWatches []string +} + +type setWatchesResponse struct{} + +type syncRequest pathRequest +type syncResponse pathResponse + +type setAuthRequest auth +type setAuthResponse struct{} + +type multiRequestOp struct { + Header multiHeader + Op interface{} +} +type multiRequest struct { + Ops []multiRequestOp + DoneHeader multiHeader +} +type multiResponseOp struct { + Header multiHeader + String string + Stat *Stat + Err ErrCode +} +type multiResponse struct { + Ops []multiResponseOp + DoneHeader multiHeader +} + +func (r *multiRequest) Encode(buf []byte) (int, error) { + total := 0 + for _, op := range r.Ops { + op.Header.Done = false + n, err := encodePacketValue(buf[total:], reflect.ValueOf(op)) + if err != nil { + return total, err + } + total += n + } + r.DoneHeader.Done = true + n, err := encodePacketValue(buf[total:], reflect.ValueOf(r.DoneHeader)) + if err != nil { + return total, err + } + total += n + + return total, nil +} + +func (r *multiRequest) Decode(buf []byte) (int, error) { + r.Ops = make([]multiRequestOp, 0) + r.DoneHeader = multiHeader{-1, true, -1} + total := 0 + for { + header := &multiHeader{} + n, err := decodePacketValue(buf[total:], reflect.ValueOf(header)) + if err != nil { + return total, err + } + total += n + if header.Done { + r.DoneHeader = *header + break + } + + req := requestStructForOp(header.Type) + if req == nil { + return total, ErrAPIError + } + n, err = decodePacketValue(buf[total:], reflect.ValueOf(req)) + if err != nil { + return total, err + } + total += n + r.Ops = append(r.Ops, multiRequestOp{*header, req}) + } + return total, nil +} + +func (r *multiResponse) Decode(buf []byte) (int, error) { + var multiErr error + + r.Ops = make([]multiResponseOp, 0) + r.DoneHeader = multiHeader{-1, true, -1} + total := 0 + for { + header := &multiHeader{} + n, err := decodePacketValue(buf[total:], reflect.ValueOf(header)) + if err != nil { + return total, err + } + total += n + if header.Done { + r.DoneHeader = *header + break + } + + res := multiResponseOp{Header: *header} + var w reflect.Value + switch header.Type { + default: + return total, ErrAPIError + case opError: + w = reflect.ValueOf(&res.Err) + case opCreate: + w = reflect.ValueOf(&res.String) + case opSetData: + res.Stat = new(Stat) + w = reflect.ValueOf(res.Stat) + case opCheck, opDelete: + } + if w.IsValid() { + n, err := decodePacketValue(buf[total:], w) + if err != nil { + return total, err + } + total += n + } + r.Ops = append(r.Ops, res) + if multiErr == nil && res.Err != errOk { + // Use the first error as the error returned from Multi(). + multiErr = res.Err.toError() + } + } + return total, multiErr +} + +type watcherEvent struct { + Type EventType + State State + Path string +} + +type decoder interface { + Decode(buf []byte) (int, error) +} + +type encoder interface { + Encode(buf []byte) (int, error) +} + +func decodePacket(buf []byte, st interface{}) (n int, err error) { + defer func() { + if r := recover(); r != nil { + if e, ok := r.(runtime.Error); ok && e.Error() == "runtime error: slice bounds out of range" { + err = ErrShortBuffer + } else { + panic(r) + } + } + }() + + v := reflect.ValueOf(st) + if v.Kind() != reflect.Ptr || v.IsNil() { + return 0, ErrPtrExpected + } + return decodePacketValue(buf, v) +} + +func decodePacketValue(buf []byte, v reflect.Value) (int, error) { + rv := v + kind := v.Kind() + if kind == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + kind = v.Kind() + } + + n := 0 + switch kind { + default: + return n, ErrUnhandledFieldType + case reflect.Struct: + if de, ok := rv.Interface().(decoder); ok { + return de.Decode(buf) + } else if de, ok := v.Interface().(decoder); ok { + return de.Decode(buf) + } else { + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + n2, err := decodePacketValue(buf[n:], field) + n += n2 + if err != nil { + return n, err + } + } + } + case reflect.Bool: + v.SetBool(buf[n] != 0) + n++ + case reflect.Int32: + v.SetInt(int64(binary.BigEndian.Uint32(buf[n : n+4]))) + n += 4 + case reflect.Int64: + v.SetInt(int64(binary.BigEndian.Uint64(buf[n : n+8]))) + n += 8 + case reflect.String: + ln := int(binary.BigEndian.Uint32(buf[n : n+4])) + v.SetString(string(buf[n+4 : n+4+ln])) + n += 4 + ln + case reflect.Slice: + switch v.Type().Elem().Kind() { + default: + count := int(binary.BigEndian.Uint32(buf[n : n+4])) + n += 4 + values := reflect.MakeSlice(v.Type(), count, count) + v.Set(values) + for i := 0; i < count; i++ { + n2, err := decodePacketValue(buf[n:], values.Index(i)) + n += n2 + if err != nil { + return n, err + } + } + case reflect.Uint8: + ln := int(int32(binary.BigEndian.Uint32(buf[n : n+4]))) + if ln < 0 { + n += 4 + v.SetBytes(nil) + } else { + bytes := make([]byte, ln) + copy(bytes, buf[n+4:n+4+ln]) + v.SetBytes(bytes) + n += 4 + ln + } + } + } + return n, nil +} + +func encodePacket(buf []byte, st interface{}) (n int, err error) { + defer func() { + if r := recover(); r != nil { + if e, ok := r.(runtime.Error); ok && e.Error() == "runtime error: slice bounds out of range" { + err = ErrShortBuffer + } else { + panic(r) + } + } + }() + + v := reflect.ValueOf(st) + if v.Kind() != reflect.Ptr || v.IsNil() { + return 0, ErrPtrExpected + } + return encodePacketValue(buf, v) +} + +func encodePacketValue(buf []byte, v reflect.Value) (int, error) { + rv := v + for v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface { + v = v.Elem() + } + + n := 0 + switch v.Kind() { + default: + return n, ErrUnhandledFieldType + case reflect.Struct: + if en, ok := rv.Interface().(encoder); ok { + return en.Encode(buf) + } else if en, ok := v.Interface().(encoder); ok { + return en.Encode(buf) + } else { + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + n2, err := encodePacketValue(buf[n:], field) + n += n2 + if err != nil { + return n, err + } + } + } + case reflect.Bool: + if v.Bool() { + buf[n] = 1 + } else { + buf[n] = 0 + } + n++ + case reflect.Int32: + binary.BigEndian.PutUint32(buf[n:n+4], uint32(v.Int())) + n += 4 + case reflect.Int64: + binary.BigEndian.PutUint64(buf[n:n+8], uint64(v.Int())) + n += 8 + case reflect.String: + str := v.String() + binary.BigEndian.PutUint32(buf[n:n+4], uint32(len(str))) + copy(buf[n+4:n+4+len(str)], []byte(str)) + n += 4 + len(str) + case reflect.Slice: + switch v.Type().Elem().Kind() { + default: + count := v.Len() + startN := n + n += 4 + for i := 0; i < count; i++ { + n2, err := encodePacketValue(buf[n:], v.Index(i)) + n += n2 + if err != nil { + return n, err + } + } + binary.BigEndian.PutUint32(buf[startN:startN+4], uint32(count)) + case reflect.Uint8: + if v.IsNil() { + binary.BigEndian.PutUint32(buf[n:n+4], uint32(0xffffffff)) + n += 4 + } else { + bytes := v.Bytes() + binary.BigEndian.PutUint32(buf[n:n+4], uint32(len(bytes))) + copy(buf[n+4:n+4+len(bytes)], bytes) + n += 4 + len(bytes) + } + } + } + return n, nil +} + +func requestStructForOp(op int32) interface{} { + switch op { + case opClose: + return &closeRequest{} + case opCreate: + return &CreateRequest{} + case opDelete: + return &DeleteRequest{} + case opExists: + return &existsRequest{} + case opGetAcl: + return &getAclRequest{} + case opGetChildren: + return &getChildrenRequest{} + case opGetChildren2: + return &getChildren2Request{} + case opGetData: + return &getDataRequest{} + case opPing: + return &pingRequest{} + case opSetAcl: + return &setAclRequest{} + case opSetData: + return &SetDataRequest{} + case opSetWatches: + return &setWatchesRequest{} + case opSync: + return &syncRequest{} + case opSetAuth: + return &setAuthRequest{} + case opCheck: + return &CheckVersionRequest{} + case opMulti: + return &multiRequest{} + } + return nil +} diff --git a/vendor/github.com/samuel/go-zookeeper/zk/util.go b/vendor/github.com/samuel/go-zookeeper/zk/util.go new file mode 100644 index 0000000000..f40a5b1561 --- /dev/null +++ b/vendor/github.com/samuel/go-zookeeper/zk/util.go @@ -0,0 +1,116 @@ +package zk + +import ( + "crypto/sha1" + "encoding/base64" + "fmt" + "math/rand" + "strconv" + "strings" + "unicode/utf8" +) + +// AuthACL produces an ACL list containing a single ACL which uses the +// provided permissions, with the scheme "auth", and ID "", which is used +// by ZooKeeper to represent any authenticated user. +func AuthACL(perms int32) []ACL { + return []ACL{{perms, "auth", ""}} +} + +// WorldACL produces an ACL list containing a single ACL which uses the +// provided permissions, with the scheme "world", and ID "anyone", which +// is used by ZooKeeper to represent any user at all. +func WorldACL(perms int32) []ACL { + return []ACL{{perms, "world", "anyone"}} +} + +func DigestACL(perms int32, user, password string) []ACL { + userPass := []byte(fmt.Sprintf("%s:%s", user, password)) + h := sha1.New() + if n, err := h.Write(userPass); err != nil || n != len(userPass) { + panic("SHA1 failed") + } + digest := base64.StdEncoding.EncodeToString(h.Sum(nil)) + return []ACL{{perms, "digest", fmt.Sprintf("%s:%s", user, digest)}} +} + +// FormatServers takes a slice of addresses, and makes sure they are in a format +// that resembles :. If the server has no port provided, the +// DefaultPort constant is added to the end. +func FormatServers(servers []string) []string { + for i := range servers { + if !strings.Contains(servers[i], ":") { + servers[i] = servers[i] + ":" + strconv.Itoa(DefaultPort) + } + } + return servers +} + +// stringShuffle performs a Fisher-Yates shuffle on a slice of strings +func stringShuffle(s []string) { + for i := len(s) - 1; i > 0; i-- { + j := rand.Intn(i + 1) + s[i], s[j] = s[j], s[i] + } +} + +// validatePath will make sure a path is valid before sending the request +func validatePath(path string, isSequential bool) error { + if path == "" { + return ErrInvalidPath + } + + if path[0] != '/' { + return ErrInvalidPath + } + + n := len(path) + if n == 1 { + // path is just the root + return nil + } + + if !isSequential && path[n-1] == '/' { + return ErrInvalidPath + } + + // Start at rune 1 since we already know that the first character is + // a '/'. + for i, w := 1, 0; i < n; i += w { + r, width := utf8.DecodeRuneInString(path[i:]) + switch { + case r == '\u0000': + return ErrInvalidPath + case r == '/': + last, _ := utf8.DecodeLastRuneInString(path[:i]) + if last == '/' { + return ErrInvalidPath + } + case r == '.': + last, lastWidth := utf8.DecodeLastRuneInString(path[:i]) + + // Check for double dot + if last == '.' { + last, _ = utf8.DecodeLastRuneInString(path[:i-lastWidth]) + } + + if last == '/' { + if i+1 == n { + return ErrInvalidPath + } + + next, _ := utf8.DecodeRuneInString(path[i+w:]) + if next == '/' { + return ErrInvalidPath + } + } + case r >= '\u0000' && r <= '\u001f', + r >= '\u007f' && r <= '\u009f', + r >= '\uf000' && r <= '\uf8ff', + r >= '\ufff0' && r < '\uffff': + return ErrInvalidPath + } + w = width + } + return nil +} diff --git a/vendor/github.com/sean-/seed/LICENSE b/vendor/github.com/sean-/seed/LICENSE new file mode 100644 index 0000000000..33d326a371 --- /dev/null +++ b/vendor/github.com/sean-/seed/LICENSE @@ -0,0 +1,54 @@ +MIT License + +Copyright (c) 2017 Sean Chittenden +Copyright (c) 2016 Alex Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +===== + +Bits of Go-lang's `once.Do()` were cribbed and reused here, too. + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/sean-/seed/init.go b/vendor/github.com/sean-/seed/init.go new file mode 100644 index 0000000000..248d6b636c --- /dev/null +++ b/vendor/github.com/sean-/seed/init.go @@ -0,0 +1,84 @@ +package seed + +import ( + crand "crypto/rand" + "fmt" + "math" + "math/big" + "math/rand" + "sync" + "sync/atomic" + "time" +) + +var ( + m sync.Mutex + secure int32 + seeded int32 +) + +func cryptoSeed() error { + defer atomic.StoreInt32(&seeded, 1) + + var err error + var n *big.Int + n, err = crand.Int(crand.Reader, big.NewInt(math.MaxInt64)) + if err != nil { + rand.Seed(time.Now().UTC().UnixNano()) + return err + } + rand.Seed(n.Int64()) + atomic.StoreInt32(&secure, 1) + return nil +} + +// Init provides best-effort seeding (which is better than running with Go's +// default seed of 1). If `/dev/urandom` is available, Init() will seed Go's +// runtime with entropy from `/dev/urandom` and return true because the runtime +// was securely seeded. If Init() has already initialized the random number or +// it had failed to securely initialize the random number generation, Init() +// will return false. See MustInit(). +func Init() (seededSecurely bool, err error) { + if atomic.LoadInt32(&seeded) == 1 { + return false, nil + } + + // Slow-path + m.Lock() + defer m.Unlock() + + if err := cryptoSeed(); err != nil { + return false, err + } + + return true, nil +} + +// MustInit provides guaranteed secure seeding. If `/dev/urandom` is not +// available, MustInit will panic() with an error indicating why reading from +// `/dev/urandom` failed. MustInit() will upgrade the seed if for some reason a +// call to Init() failed in the past. +func MustInit() { + if atomic.LoadInt32(&secure) == 1 { + return + } + + // Slow-path + m.Lock() + defer m.Unlock() + + if err := cryptoSeed(); err != nil { + panic(fmt.Sprintf("Unable to seed the random number generator: %v", err)) + } +} + +// Secure returns true if a cryptographically secure seed was used to +// initialize rand. +func Secure() bool { + return atomic.LoadInt32(&secure) == 1 +} + +// Seeded returns true if Init has seeded the random number generator. +func Seeded() bool { + return atomic.LoadInt32(&seeded) == 1 +} diff --git a/vendor/github.com/tchap/go-patricia/AUTHORS b/vendor/github.com/tchap/go-patricia/AUTHORS deleted file mode 100644 index e640b0bf51..0000000000 --- a/vendor/github.com/tchap/go-patricia/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -This is the complete list of go-patricia copyright holders: - -Ondřej Kupka diff --git a/vendor/github.com/tchap/go-patricia/LICENSE b/vendor/github.com/tchap/go-patricia/LICENSE deleted file mode 100644 index e50d398e98..0000000000 --- a/vendor/github.com/tchap/go-patricia/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 The AUTHORS - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/tchap/go-patricia/patricia/children.go b/vendor/github.com/tchap/go-patricia/patricia/children.go deleted file mode 100644 index a5677c3352..0000000000 --- a/vendor/github.com/tchap/go-patricia/patricia/children.go +++ /dev/null @@ -1,325 +0,0 @@ -// Copyright (c) 2014 The go-patricia AUTHORS -// -// Use of this source code is governed by The MIT License -// that can be found in the LICENSE file. - -package patricia - -import ( - "fmt" - "io" - "sort" -) - -type childList interface { - length() int - head() *Trie - add(child *Trie) childList - remove(b byte) - replace(b byte, child *Trie) - next(b byte) *Trie - walk(prefix *Prefix, visitor VisitorFunc) error - print(w io.Writer, indent int) - total() int -} - -type tries []*Trie - -func (t tries) Len() int { - return len(t) -} - -func (t tries) Less(i, j int) bool { - strings := sort.StringSlice{string(t[i].prefix), string(t[j].prefix)} - return strings.Less(0, 1) -} - -func (t tries) Swap(i, j int) { - t[i], t[j] = t[j], t[i] -} - -type sparseChildList struct { - children tries -} - -func newSparseChildList(maxChildrenPerSparseNode int) childList { - return &sparseChildList{ - children: make(tries, 0, maxChildrenPerSparseNode), - } -} - -func (list *sparseChildList) length() int { - return len(list.children) -} - -func (list *sparseChildList) head() *Trie { - return list.children[0] -} - -func (list *sparseChildList) add(child *Trie) childList { - // Search for an empty spot and insert the child if possible. - if len(list.children) != cap(list.children) { - list.children = append(list.children, child) - return list - } - - // Otherwise we have to transform to the dense list type. - return newDenseChildList(list, child) -} - -func (list *sparseChildList) remove(b byte) { - for i, node := range list.children { - if node.prefix[0] == b { - list.children[i] = list.children[len(list.children)-1] - list.children[len(list.children)-1] = nil - list.children = list.children[:len(list.children)-1] - return - } - } - - // This is not supposed to be reached. - panic("removing non-existent child") -} - -func (list *sparseChildList) replace(b byte, child *Trie) { - // Make a consistency check. - if p0 := child.prefix[0]; p0 != b { - panic(fmt.Errorf("child prefix mismatch: %v != %v", p0, b)) - } - - // Seek the child and replace it. - for i, node := range list.children { - if node.prefix[0] == b { - list.children[i] = child - return - } - } -} - -func (list *sparseChildList) next(b byte) *Trie { - for _, child := range list.children { - if child.prefix[0] == b { - return child - } - } - return nil -} - -func (list *sparseChildList) walk(prefix *Prefix, visitor VisitorFunc) error { - - sort.Sort(list.children) - - for _, child := range list.children { - *prefix = append(*prefix, child.prefix...) - if child.item != nil { - err := visitor(*prefix, child.item) - if err != nil { - if err == SkipSubtree { - *prefix = (*prefix)[:len(*prefix)-len(child.prefix)] - continue - } - *prefix = (*prefix)[:len(*prefix)-len(child.prefix)] - return err - } - } - - err := child.children.walk(prefix, visitor) - *prefix = (*prefix)[:len(*prefix)-len(child.prefix)] - if err != nil { - return err - } - } - - return nil -} - -func (list *sparseChildList) total() int { - tot := 0 - for _, child := range list.children { - if child != nil { - tot = tot + child.total() - } - } - return tot -} - -func (list *sparseChildList) print(w io.Writer, indent int) { - for _, child := range list.children { - if child != nil { - child.print(w, indent) - } - } -} - -type denseChildList struct { - min int - max int - numChildren int - headIndex int - children []*Trie -} - -func newDenseChildList(list *sparseChildList, child *Trie) childList { - var ( - min int = 255 - max int = 0 - ) - for _, child := range list.children { - b := int(child.prefix[0]) - if b < min { - min = b - } - if b > max { - max = b - } - } - - b := int(child.prefix[0]) - if b < min { - min = b - } - if b > max { - max = b - } - - children := make([]*Trie, max-min+1) - for _, child := range list.children { - children[int(child.prefix[0])-min] = child - } - children[int(child.prefix[0])-min] = child - - return &denseChildList{ - min: min, - max: max, - numChildren: list.length() + 1, - headIndex: 0, - children: children, - } -} - -func (list *denseChildList) length() int { - return list.numChildren -} - -func (list *denseChildList) head() *Trie { - return list.children[list.headIndex] -} - -func (list *denseChildList) add(child *Trie) childList { - b := int(child.prefix[0]) - var i int - - switch { - case list.min <= b && b <= list.max: - if list.children[b-list.min] != nil { - panic("dense child list collision detected") - } - i = b - list.min - list.children[i] = child - - case b < list.min: - children := make([]*Trie, list.max-b+1) - i = 0 - children[i] = child - copy(children[list.min-b:], list.children) - list.children = children - list.min = b - - default: // b > list.max - children := make([]*Trie, b-list.min+1) - i = b - list.min - children[i] = child - copy(children, list.children) - list.children = children - list.max = b - } - - list.numChildren++ - if i < list.headIndex { - list.headIndex = i - } - return list -} - -func (list *denseChildList) remove(b byte) { - i := int(b) - list.min - if list.children[i] == nil { - // This is not supposed to be reached. - panic("removing non-existent child") - } - list.numChildren-- - list.children[i] = nil - - // Update head index. - if i == list.headIndex { - for ; i < len(list.children); i++ { - if list.children[i] != nil { - list.headIndex = i - return - } - } - } -} - -func (list *denseChildList) replace(b byte, child *Trie) { - // Make a consistency check. - if p0 := child.prefix[0]; p0 != b { - panic(fmt.Errorf("child prefix mismatch: %v != %v", p0, b)) - } - - // Replace the child. - list.children[int(b)-list.min] = child -} - -func (list *denseChildList) next(b byte) *Trie { - i := int(b) - if i < list.min || list.max < i { - return nil - } - return list.children[i-list.min] -} - -func (list *denseChildList) walk(prefix *Prefix, visitor VisitorFunc) error { - for _, child := range list.children { - if child == nil { - continue - } - *prefix = append(*prefix, child.prefix...) - if child.item != nil { - if err := visitor(*prefix, child.item); err != nil { - if err == SkipSubtree { - *prefix = (*prefix)[:len(*prefix)-len(child.prefix)] - continue - } - *prefix = (*prefix)[:len(*prefix)-len(child.prefix)] - return err - } - } - - err := child.children.walk(prefix, visitor) - *prefix = (*prefix)[:len(*prefix)-len(child.prefix)] - if err != nil { - return err - } - } - - return nil -} - -func (list *denseChildList) print(w io.Writer, indent int) { - for _, child := range list.children { - if child != nil { - child.print(w, indent) - } - } -} - -func (list *denseChildList) total() int { - tot := 0 - for _, child := range list.children { - if child != nil { - tot = tot + child.total() - } - } - return tot -} diff --git a/vendor/github.com/tchap/go-patricia/patricia/patricia.go b/vendor/github.com/tchap/go-patricia/patricia/patricia.go deleted file mode 100644 index a1fc53d5db..0000000000 --- a/vendor/github.com/tchap/go-patricia/patricia/patricia.go +++ /dev/null @@ -1,594 +0,0 @@ -// Copyright (c) 2014 The go-patricia AUTHORS -// -// Use of this source code is governed by The MIT License -// that can be found in the LICENSE file. - -package patricia - -import ( - "bytes" - "errors" - "fmt" - "io" - "strings" -) - -//------------------------------------------------------------------------------ -// Trie -//------------------------------------------------------------------------------ - -const ( - DefaultMaxPrefixPerNode = 10 - DefaultMaxChildrenPerSparseNode = 8 -) - -type ( - Prefix []byte - Item interface{} - VisitorFunc func(prefix Prefix, item Item) error -) - -// Trie is a generic patricia trie that allows fast retrieval of items by prefix. -// and other funky stuff. -// -// Trie is not thread-safe. -type Trie struct { - prefix Prefix - item Item - - maxPrefixPerNode int - maxChildrenPerSparseNode int - - children childList -} - -// Public API ------------------------------------------------------------------ - -type Option func(*Trie) - -// Trie constructor. -func NewTrie(options ...Option) *Trie { - trie := &Trie{} - - for _, opt := range options { - opt(trie) - } - - if trie.maxPrefixPerNode <= 0 { - trie.maxPrefixPerNode = DefaultMaxPrefixPerNode - } - if trie.maxChildrenPerSparseNode <= 0 { - trie.maxChildrenPerSparseNode = DefaultMaxChildrenPerSparseNode - } - - trie.children = newSparseChildList(trie.maxChildrenPerSparseNode) - return trie -} - -func MaxPrefixPerNode(value int) Option { - return func(trie *Trie) { - trie.maxPrefixPerNode = value - } -} - -func MaxChildrenPerSparseNode(value int) Option { - return func(trie *Trie) { - trie.maxChildrenPerSparseNode = value - } -} - -// Item returns the item stored in the root of this trie. -func (trie *Trie) Item() Item { - return trie.item -} - -// Insert inserts a new item into the trie using the given prefix. Insert does -// not replace existing items. It returns false if an item was already in place. -func (trie *Trie) Insert(key Prefix, item Item) (inserted bool) { - return trie.put(key, item, false) -} - -// Set works much like Insert, but it always sets the item, possibly replacing -// the item previously inserted. -func (trie *Trie) Set(key Prefix, item Item) { - trie.put(key, item, true) -} - -// Get returns the item located at key. -// -// This method is a bit dangerous, because Get can as well end up in an internal -// node that is not really representing any user-defined value. So when nil is -// a valid value being used, it is not possible to tell if the value was inserted -// into the tree by the user or not. A possible workaround for this is not to use -// nil interface as a valid value, even using zero value of any type is enough -// to prevent this bad behaviour. -func (trie *Trie) Get(key Prefix) (item Item) { - _, node, found, leftover := trie.findSubtree(key) - if !found || len(leftover) != 0 { - return nil - } - return node.item -} - -// Match returns what Get(prefix) != nil would return. The same warning as for -// Get applies here as well. -func (trie *Trie) Match(prefix Prefix) (matchedExactly bool) { - return trie.Get(prefix) != nil -} - -// MatchSubtree returns true when there is a subtree representing extensions -// to key, that is if there are any keys in the tree which have key as prefix. -func (trie *Trie) MatchSubtree(key Prefix) (matched bool) { - _, _, matched, _ = trie.findSubtree(key) - return -} - -// Visit calls visitor on every node containing a non-nil item -// in alphabetical order. -// -// If an error is returned from visitor, the function stops visiting the tree -// and returns that error, unless it is a special error - SkipSubtree. In that -// case Visit skips the subtree represented by the current node and continues -// elsewhere. -func (trie *Trie) Visit(visitor VisitorFunc) error { - return trie.walk(nil, visitor) -} - -func (trie *Trie) size() int { - n := 0 - - trie.walk(nil, func(prefix Prefix, item Item) error { - n++ - return nil - }) - - return n -} - -func (trie *Trie) total() int { - return 1 + trie.children.total() -} - -// VisitSubtree works much like Visit, but it only visits nodes matching prefix. -func (trie *Trie) VisitSubtree(prefix Prefix, visitor VisitorFunc) error { - // Nil prefix not allowed. - if prefix == nil { - panic(ErrNilPrefix) - } - - // Empty trie must be handled explicitly. - if trie.prefix == nil { - return nil - } - - // Locate the relevant subtree. - _, root, found, leftover := trie.findSubtree(prefix) - if !found { - return nil - } - prefix = append(prefix, leftover...) - - // Visit it. - return root.walk(prefix, visitor) -} - -// VisitPrefixes visits only nodes that represent prefixes of key. -// To say the obvious, returning SkipSubtree from visitor makes no sense here. -func (trie *Trie) VisitPrefixes(key Prefix, visitor VisitorFunc) error { - // Nil key not allowed. - if key == nil { - panic(ErrNilPrefix) - } - - // Empty trie must be handled explicitly. - if trie.prefix == nil { - return nil - } - - // Walk the path matching key prefixes. - node := trie - prefix := key - offset := 0 - for { - // Compute what part of prefix matches. - common := node.longestCommonPrefixLength(key) - key = key[common:] - offset += common - - // Partial match means that there is no subtree matching prefix. - if common < len(node.prefix) { - return nil - } - - // Call the visitor. - if item := node.item; item != nil { - if err := visitor(prefix[:offset], item); err != nil { - return err - } - } - - if len(key) == 0 { - // This node represents key, we are finished. - return nil - } - - // There is some key suffix left, move to the children. - child := node.children.next(key[0]) - if child == nil { - // There is nowhere to continue, return. - return nil - } - - node = child - } -} - -// Delete deletes the item represented by the given prefix. -// -// True is returned if the matching node was found and deleted. -func (trie *Trie) Delete(key Prefix) (deleted bool) { - // Nil prefix not allowed. - if key == nil { - panic(ErrNilPrefix) - } - - // Empty trie must be handled explicitly. - if trie.prefix == nil { - return false - } - - // Find the relevant node. - path, found, _ := trie.findSubtreePath(key) - if !found { - return false - } - - node := path[len(path)-1] - var parent *Trie - if len(path) != 1 { - parent = path[len(path)-2] - } - - // If the item is already set to nil, there is nothing to do. - if node.item == nil { - return false - } - - // Delete the item. - node.item = nil - - // Initialise i before goto. - // Will be used later in a loop. - i := len(path) - 1 - - // In case there are some child nodes, we cannot drop the whole subtree. - // We can try to compact nodes, though. - if node.children.length() != 0 { - goto Compact - } - - // In case we are at the root, just reset it and we are done. - if parent == nil { - node.reset() - return true - } - - // We can drop a subtree. - // Find the first ancestor that has its value set or it has 2 or more child nodes. - // That will be the node where to drop the subtree at. - for ; i >= 0; i-- { - if current := path[i]; current.item != nil || current.children.length() >= 2 { - break - } - } - - // Handle the case when there is no such node. - // In other words, we can reset the whole tree. - if i == -1 { - path[0].reset() - return true - } - - // We can just remove the subtree here. - node = path[i] - if i == 0 { - parent = nil - } else { - parent = path[i-1] - } - // i+1 is always a valid index since i is never pointing to the last node. - // The loop above skips at least the last node since we are sure that the item - // is set to nil and it has no children, othewise we would be compacting instead. - node.children.remove(path[i+1].prefix[0]) - -Compact: - // The node is set to the first non-empty ancestor, - // so try to compact since that might be possible now. - if compacted := node.compact(); compacted != node { - if parent == nil { - *node = *compacted - } else { - parent.children.replace(node.prefix[0], compacted) - *parent = *parent.compact() - } - } - - return true -} - -// DeleteSubtree finds the subtree exactly matching prefix and deletes it. -// -// True is returned if the subtree was found and deleted. -func (trie *Trie) DeleteSubtree(prefix Prefix) (deleted bool) { - // Nil prefix not allowed. - if prefix == nil { - panic(ErrNilPrefix) - } - - // Empty trie must be handled explicitly. - if trie.prefix == nil { - return false - } - - // Locate the relevant subtree. - parent, root, found, _ := trie.findSubtree(prefix) - if !found { - return false - } - - // If we are in the root of the trie, reset the trie. - if parent == nil { - root.reset() - return true - } - - // Otherwise remove the root node from its parent. - parent.children.remove(root.prefix[0]) - return true -} - -// Internal helper methods ----------------------------------------------------- - -func (trie *Trie) empty() bool { - return trie.item == nil && trie.children.length() == 0 -} - -func (trie *Trie) reset() { - trie.prefix = nil - trie.children = newSparseChildList(trie.maxPrefixPerNode) -} - -func (trie *Trie) put(key Prefix, item Item, replace bool) (inserted bool) { - // Nil prefix not allowed. - if key == nil { - panic(ErrNilPrefix) - } - - var ( - common int - node *Trie = trie - child *Trie - ) - - if node.prefix == nil { - if len(key) <= trie.maxPrefixPerNode { - node.prefix = key - goto InsertItem - } - node.prefix = key[:trie.maxPrefixPerNode] - key = key[trie.maxPrefixPerNode:] - goto AppendChild - } - - for { - // Compute the longest common prefix length. - common = node.longestCommonPrefixLength(key) - key = key[common:] - - // Only a part matches, split. - if common < len(node.prefix) { - goto SplitPrefix - } - - // common == len(node.prefix) since never (common > len(node.prefix)) - // common == len(former key) <-> 0 == len(key) - // -> former key == node.prefix - if len(key) == 0 { - goto InsertItem - } - - // Check children for matching prefix. - child = node.children.next(key[0]) - if child == nil { - goto AppendChild - } - node = child - } - -SplitPrefix: - // Split the prefix if necessary. - child = new(Trie) - *child = *node - *node = *NewTrie() - node.prefix = child.prefix[:common] - child.prefix = child.prefix[common:] - child = child.compact() - node.children = node.children.add(child) - -AppendChild: - // Keep appending children until whole prefix is inserted. - // This loop starts with empty node.prefix that needs to be filled. - for len(key) != 0 { - child := NewTrie() - if len(key) <= trie.maxPrefixPerNode { - child.prefix = key - node.children = node.children.add(child) - node = child - goto InsertItem - } else { - child.prefix = key[:trie.maxPrefixPerNode] - key = key[trie.maxPrefixPerNode:] - node.children = node.children.add(child) - node = child - } - } - -InsertItem: - // Try to insert the item if possible. - if replace || node.item == nil { - node.item = item - return true - } - return false -} - -func (trie *Trie) compact() *Trie { - // Only a node with a single child can be compacted. - if trie.children.length() != 1 { - return trie - } - - child := trie.children.head() - - // If any item is set, we cannot compact since we want to retain - // the ability to do searching by key. This makes compaction less usable, - // but that simply cannot be avoided. - if trie.item != nil || child.item != nil { - return trie - } - - // Make sure the combined prefixes fit into a single node. - if len(trie.prefix)+len(child.prefix) > trie.maxPrefixPerNode { - return trie - } - - // Concatenate the prefixes, move the items. - child.prefix = append(trie.prefix, child.prefix...) - if trie.item != nil { - child.item = trie.item - } - - return child -} - -func (trie *Trie) findSubtree(prefix Prefix) (parent *Trie, root *Trie, found bool, leftover Prefix) { - // Find the subtree matching prefix. - root = trie - for { - // Compute what part of prefix matches. - common := root.longestCommonPrefixLength(prefix) - prefix = prefix[common:] - - // We used up the whole prefix, subtree found. - if len(prefix) == 0 { - found = true - leftover = root.prefix[common:] - return - } - - // Partial match means that there is no subtree matching prefix. - if common < len(root.prefix) { - leftover = root.prefix[common:] - return - } - - // There is some prefix left, move to the children. - child := root.children.next(prefix[0]) - if child == nil { - // There is nowhere to continue, there is no subtree matching prefix. - return - } - - parent = root - root = child - } -} - -func (trie *Trie) findSubtreePath(prefix Prefix) (path []*Trie, found bool, leftover Prefix) { - // Find the subtree matching prefix. - root := trie - var subtreePath []*Trie - for { - // Append the current root to the path. - subtreePath = append(subtreePath, root) - - // Compute what part of prefix matches. - common := root.longestCommonPrefixLength(prefix) - prefix = prefix[common:] - - // We used up the whole prefix, subtree found. - if len(prefix) == 0 { - path = subtreePath - found = true - leftover = root.prefix[common:] - return - } - - // Partial match means that there is no subtree matching prefix. - if common < len(root.prefix) { - leftover = root.prefix[common:] - return - } - - // There is some prefix left, move to the children. - child := root.children.next(prefix[0]) - if child == nil { - // There is nowhere to continue, there is no subtree matching prefix. - return - } - - root = child - } -} - -func (trie *Trie) walk(actualRootPrefix Prefix, visitor VisitorFunc) error { - var prefix Prefix - // Allocate a bit more space for prefix at the beginning. - if actualRootPrefix == nil { - prefix = make(Prefix, 32+len(trie.prefix)) - copy(prefix, trie.prefix) - prefix = prefix[:len(trie.prefix)] - } else { - prefix = make(Prefix, 32+len(actualRootPrefix)) - copy(prefix, actualRootPrefix) - prefix = prefix[:len(actualRootPrefix)] - } - - // Visit the root first. Not that this works for empty trie as well since - // in that case item == nil && len(children) == 0. - if trie.item != nil { - if err := visitor(prefix, trie.item); err != nil { - if err == SkipSubtree { - return nil - } - return err - } - } - - // Then continue to the children. - return trie.children.walk(&prefix, visitor) -} - -func (trie *Trie) longestCommonPrefixLength(prefix Prefix) (i int) { - for ; i < len(prefix) && i < len(trie.prefix) && prefix[i] == trie.prefix[i]; i++ { - } - return -} - -func (trie *Trie) dump() string { - writer := &bytes.Buffer{} - trie.print(writer, 0) - return writer.String() -} - -func (trie *Trie) print(writer io.Writer, indent int) { - fmt.Fprintf(writer, "%s%s %v\n", strings.Repeat(" ", indent), string(trie.prefix), trie.item) - trie.children.print(writer, indent+2) -} - -// Errors ---------------------------------------------------------------------- - -var ( - SkipSubtree = errors.New("Skip this subtree") - ErrNilPrefix = errors.New("Nil prefix passed into a method call") -) diff --git a/vendor/github.com/tonistiigi/fsutil/LICENSE b/vendor/github.com/tonistiigi/fsutil/LICENSE new file mode 100644 index 0000000000..7df441d93e --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/LICENSE @@ -0,0 +1,22 @@ +MIT + +Copyright 2017 Tõnis Tiigi + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/tonistiigi/fsutil/chtimes_linux.go b/vendor/github.com/tonistiigi/fsutil/chtimes_linux.go new file mode 100644 index 0000000000..74f08a15ca --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/chtimes_linux.go @@ -0,0 +1,20 @@ +// +build linux + +package fsutil + +import ( + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +func chtimes(path string, un int64) error { + var utimes [2]unix.Timespec + utimes[0] = unix.NsecToTimespec(un) + utimes[1] = utimes[0] + + if err := unix.UtimesNanoAt(unix.AT_FDCWD, path, utimes[0:], unix.AT_SYMLINK_NOFOLLOW); err != nil { + return errors.Wrap(err, "failed call to UtimesNanoAt") + } + + return nil +} diff --git a/vendor/github.com/tonistiigi/fsutil/chtimes_nolinux.go b/vendor/github.com/tonistiigi/fsutil/chtimes_nolinux.go new file mode 100644 index 0000000000..39bfdfee5b --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/chtimes_nolinux.go @@ -0,0 +1,13 @@ +// +build !linux + +package fsutil + +import ( + "os" + "time" +) + +func chtimes(path string, un int64) error { + mtime := time.Unix(0, un) + return os.Chtimes(path, mtime, mtime) +} diff --git a/vendor/github.com/tonistiigi/fsutil/diff.go b/vendor/github.com/tonistiigi/fsutil/diff.go new file mode 100644 index 0000000000..6125ef73af --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/diff.go @@ -0,0 +1,44 @@ +package fsutil + +import ( + "hash" + "os" + + "golang.org/x/net/context" +) + +type walkerFn func(ctx context.Context, pathC chan<- *currentPath) error + +func Changes(ctx context.Context, a, b walkerFn, changeFn ChangeFunc) error { + return nil +} + +type HandleChangeFn func(ChangeKind, string, os.FileInfo, error) error + +type ContentHasher func(*Stat) (hash.Hash, error) + +func GetWalkerFn(root string) walkerFn { + return func(ctx context.Context, pathC chan<- *currentPath) error { + return Walk(ctx, root, nil, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + p := ¤tPath{ + path: path, + f: f, + } + + select { + case <-ctx.Done(): + return ctx.Err() + case pathC <- p: + return nil + } + }) + } +} + +func emptyWalker(ctx context.Context, pathC chan<- *currentPath) error { + return nil +} diff --git a/vendor/github.com/tonistiigi/fsutil/diff_containerd.go b/vendor/github.com/tonistiigi/fsutil/diff_containerd.go new file mode 100644 index 0000000000..c7c9788e85 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/diff_containerd.go @@ -0,0 +1,199 @@ +package fsutil + +import ( + "os" + "strings" + + "golang.org/x/net/context" + "golang.org/x/sync/errgroup" +) + +// Everything below is copied from containerd/fs. TODO: remove duplication @dmcgowan + +// Const redefined because containerd/fs doesn't build on !linux + +// ChangeKind is the type of modification that +// a change is making. +type ChangeKind int + +const ( + // ChangeKindAdd represents an addition of + // a file + ChangeKindAdd ChangeKind = iota + + // ChangeKindModify represents a change to + // an existing file + ChangeKindModify + + // ChangeKindDelete represents a delete of + // a file + ChangeKindDelete +) + +// ChangeFunc is the type of function called for each change +// computed during a directory changes calculation. +type ChangeFunc func(ChangeKind, string, os.FileInfo, error) error + +type currentPath struct { + path string + f os.FileInfo + // fullPath string +} + +// doubleWalkDiff walks both directories to create a diff +func doubleWalkDiff(ctx context.Context, changeFn ChangeFunc, a, b walkerFn) (err error) { + g, ctx := errgroup.WithContext(ctx) + + var ( + c1 = make(chan *currentPath, 128) + c2 = make(chan *currentPath, 128) + + f1, f2 *currentPath + rmdir string + ) + g.Go(func() error { + defer close(c1) + return a(ctx, c1) + }) + g.Go(func() error { + defer close(c2) + return b(ctx, c2) + }) + g.Go(func() error { + loop0: + for c1 != nil || c2 != nil { + if f1 == nil && c1 != nil { + f1, err = nextPath(ctx, c1) + if err != nil { + return err + } + if f1 == nil { + c1 = nil + } + } + + if f2 == nil && c2 != nil { + f2, err = nextPath(ctx, c2) + if err != nil { + return err + } + if f2 == nil { + c2 = nil + } + } + if f1 == nil && f2 == nil { + continue + } + + var f os.FileInfo + k, p := pathChange(f1, f2) + switch k { + case ChangeKindAdd: + if rmdir != "" { + rmdir = "" + } + f = f2.f + f2 = nil + case ChangeKindDelete: + // Check if this file is already removed by being + // under of a removed directory + if rmdir != "" && strings.HasPrefix(f1.path, rmdir) { + f1 = nil + continue + } else if rmdir == "" && f1.f.IsDir() { + rmdir = f1.path + string(os.PathSeparator) + } else if rmdir != "" { + rmdir = "" + } + f1 = nil + case ChangeKindModify: + same, err := sameFile(f1, f2) + if err != nil { + return err + } + if f1.f.IsDir() && !f2.f.IsDir() { + rmdir = f1.path + string(os.PathSeparator) + } else if rmdir != "" { + rmdir = "" + } + f = f2.f + f1 = nil + f2 = nil + if same { + continue loop0 + } + } + if err := changeFn(k, p, f, nil); err != nil { + return err + } + } + return nil + }) + + return g.Wait() +} + +func pathChange(lower, upper *currentPath) (ChangeKind, string) { + if lower == nil { + if upper == nil { + panic("cannot compare nil paths") + } + return ChangeKindAdd, upper.path + } + if upper == nil { + return ChangeKindDelete, lower.path + } + + switch i := ComparePath(lower.path, upper.path); { + case i < 0: + // File in lower that is not in upper + return ChangeKindDelete, lower.path + case i > 0: + // File in upper that is not in lower + return ChangeKindAdd, upper.path + default: + return ChangeKindModify, upper.path + } +} + +func sameFile(f1, f2 *currentPath) (same bool, retErr error) { + // If not a directory also check size, modtime, and content + if !f1.f.IsDir() { + if f1.f.Size() != f2.f.Size() { + return false, nil + } + + t1 := f1.f.ModTime() + t2 := f2.f.ModTime() + if t1.UnixNano() != t2.UnixNano() { + return false, nil + } + } + + ls1, ok := f1.f.Sys().(*Stat) + if !ok { + return false, nil + } + ls2, ok := f1.f.Sys().(*Stat) + if !ok { + return false, nil + } + + return compareStat(ls1, ls2) +} + +// compareStat returns whether the stats are equivalent, +// whether the files are considered the same file, and +// an error +func compareStat(ls1, ls2 *Stat) (bool, error) { + return ls1.Mode == ls2.Mode && ls1.Uid == ls2.Uid && ls1.Gid == ls2.Gid && ls1.Devmajor == ls2.Devmajor && ls1.Devminor == ls2.Devminor && ls1.Linkname == ls2.Linkname, nil +} + +func nextPath(ctx context.Context, pathC <-chan *currentPath) (*currentPath, error) { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case p := <-pathC: + return p, nil + } +} diff --git a/vendor/github.com/tonistiigi/fsutil/diff_containerd_linux.go b/vendor/github.com/tonistiigi/fsutil/diff_containerd_linux.go new file mode 100644 index 0000000000..4ac7ec5ed7 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/diff_containerd_linux.go @@ -0,0 +1,37 @@ +package fsutil + +import ( + "bytes" + "syscall" + + "github.com/containerd/continuity/sysx" + "github.com/pkg/errors" +) + +// compareSysStat returns whether the stats are equivalent, +// whether the files are considered the same file, and +// an error +func compareSysStat(s1, s2 interface{}) (bool, error) { + ls1, ok := s1.(*syscall.Stat_t) + if !ok { + return false, nil + } + ls2, ok := s2.(*syscall.Stat_t) + if !ok { + return false, nil + } + + return ls1.Mode == ls2.Mode && ls1.Uid == ls2.Uid && ls1.Gid == ls2.Gid && ls1.Rdev == ls2.Rdev, nil +} + +func compareCapabilities(p1, p2 string) (bool, error) { + c1, err := sysx.LGetxattr(p1, "security.capability") + if err != nil && err != syscall.ENODATA { + return false, errors.Wrapf(err, "failed to get xattr for %s", p1) + } + c2, err := sysx.LGetxattr(p2, "security.capability") + if err != nil && err != syscall.ENODATA { + return false, errors.Wrapf(err, "failed to get xattr for %s", p2) + } + return bytes.Equal(c1, c2), nil +} diff --git a/vendor/github.com/tonistiigi/fsutil/diskwriter.go b/vendor/github.com/tonistiigi/fsutil/diskwriter.go new file mode 100644 index 0000000000..e2d034c75e --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/diskwriter.go @@ -0,0 +1,323 @@ +package fsutil + +import ( + "hash" + "io" + "os" + "path/filepath" + "strconv" + "sync" + "time" + + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "golang.org/x/net/context" + "golang.org/x/sync/errgroup" +) + +type WriteToFunc func(context.Context, string, io.WriteCloser) error + +type DiskWriterOpt struct { + AsyncDataCb WriteToFunc + SyncDataCb WriteToFunc + NotifyCb func(ChangeKind, string, os.FileInfo, error) error + ContentHasher ContentHasher + Filter FilterFunc +} + +type FilterFunc func(*Stat) bool + +type DiskWriter struct { + opt DiskWriterOpt + dest string + + wg sync.WaitGroup + ctx context.Context + cancel func() + eg *errgroup.Group + filter FilterFunc +} + +func NewDiskWriter(ctx context.Context, dest string, opt DiskWriterOpt) (*DiskWriter, error) { + if opt.SyncDataCb == nil && opt.AsyncDataCb == nil { + return nil, errors.New("no data callback specified") + } + if opt.SyncDataCb != nil && opt.AsyncDataCb != nil { + return nil, errors.New("can't specify both sync and async data callbacks") + } + + ctx, cancel := context.WithCancel(ctx) + eg, ctx := errgroup.WithContext(ctx) + + return &DiskWriter{ + opt: opt, + dest: dest, + eg: eg, + ctx: ctx, + cancel: cancel, + filter: opt.Filter, + }, nil +} + +func (dw *DiskWriter) Wait(ctx context.Context) error { + return dw.eg.Wait() +} + +func (dw *DiskWriter) HandleChange(kind ChangeKind, p string, fi os.FileInfo, err error) (retErr error) { + if err != nil { + return err + } + + select { + case <-dw.ctx.Done(): + return dw.ctx.Err() + default: + } + + defer func() { + if retErr != nil { + dw.cancel() + } + }() + + p = filepath.FromSlash(p) + + destPath := filepath.Join(dw.dest, p) + + if kind == ChangeKindDelete { + // todo: no need to validate if diff is trusted but is it always? + if err := os.RemoveAll(destPath); err != nil { + return errors.Wrapf(err, "failed to remove: %s", destPath) + } + if dw.opt.NotifyCb != nil { + if err := dw.opt.NotifyCb(kind, p, nil, nil); err != nil { + return err + } + } + return nil + } + + stat, ok := fi.Sys().(*Stat) + if !ok { + return errors.Errorf("%s invalid change without stat information", p) + } + + if dw.filter != nil { + if ok := dw.filter(stat); !ok { + return nil + } + } + + rename := true + oldFi, err := os.Lstat(destPath) + if err != nil { + if os.IsNotExist(err) { + if kind != ChangeKindAdd { + return errors.Wrapf(err, "invalid addition: %s", destPath) + } + rename = false + } else { + return errors.Wrapf(err, "failed to stat %s", destPath) + } + } + + if oldFi != nil && fi.IsDir() && oldFi.IsDir() { + if err := rewriteMetadata(destPath, stat); err != nil { + return errors.Wrapf(err, "error setting dir metadata for %s", destPath) + } + return nil + } + + newPath := destPath + if rename { + newPath = filepath.Join(filepath.Dir(destPath), ".tmp."+nextSuffix()) + } + + isRegularFile := false + + switch { + case fi.IsDir(): + if err := os.Mkdir(newPath, fi.Mode()); err != nil { + return errors.Wrapf(err, "failed to create dir %s", newPath) + } + case fi.Mode()&os.ModeDevice != 0 || fi.Mode()&os.ModeNamedPipe != 0: + if err := handleTarTypeBlockCharFifo(newPath, stat); err != nil { + return errors.Wrapf(err, "failed to create device %s", newPath) + } + case fi.Mode()&os.ModeSymlink != 0: + if err := os.Symlink(stat.Linkname, newPath); err != nil { + return errors.Wrapf(err, "failed to symlink %s", newPath) + } + case stat.Linkname != "": + if err := os.Link(filepath.Join(dw.dest, stat.Linkname), newPath); err != nil { + return errors.Wrapf(err, "failed to link %s to %s", newPath, stat.Linkname) + } + default: + isRegularFile = true + file, err := os.OpenFile(newPath, os.O_CREATE|os.O_WRONLY, fi.Mode()) //todo: windows + if err != nil { + return errors.Wrapf(err, "failed to create %s", newPath) + } + if dw.opt.SyncDataCb != nil { + if err := dw.processChange(ChangeKindAdd, p, fi, file); err != nil { + file.Close() + return err + } + break + } + if err := file.Close(); err != nil { + return errors.Wrapf(err, "failed to close %s", newPath) + } + } + + if err := rewriteMetadata(newPath, stat); err != nil { + return errors.Wrapf(err, "error setting metadata for %s", newPath) + } + + if rename { + if err := os.Rename(newPath, destPath); err != nil { + return errors.Wrapf(err, "failed to rename %s to %s", newPath, destPath) + } + } + + if isRegularFile { + if dw.opt.AsyncDataCb != nil { + dw.requestAsyncFileData(p, destPath, fi) + } + } else { + return dw.processChange(kind, p, fi, nil) + } + + return nil +} + +func (dw *DiskWriter) requestAsyncFileData(p, dest string, fi os.FileInfo) { + // todo: limit worker threads + dw.eg.Go(func() error { + if err := dw.processChange(ChangeKindAdd, p, fi, &lazyFileWriter{ + dest: dest, + }); err != nil { + return err + } + return chtimes(dest, fi.ModTime().UnixNano()) // TODO: parent dirs + }) +} + +func (dw *DiskWriter) processChange(kind ChangeKind, p string, fi os.FileInfo, w io.WriteCloser) error { + origw := w + var hw *hashedWriter + if dw.opt.NotifyCb != nil { + var err error + if hw, err = newHashWriter(dw.opt.ContentHasher, fi, w); err != nil { + return err + } + w = hw + } + if origw != nil { + fn := dw.opt.SyncDataCb + if fn == nil && dw.opt.AsyncDataCb != nil { + fn = dw.opt.AsyncDataCb + } + if err := fn(dw.ctx, p, w); err != nil { + return err + } + } else { + if hw != nil { + hw.Close() + } + } + if hw != nil { + return dw.opt.NotifyCb(kind, p, hw, nil) + } + return nil +} + +type hashedWriter struct { + os.FileInfo + io.Writer + h hash.Hash + w io.WriteCloser + dgst digest.Digest +} + +func newHashWriter(ch ContentHasher, fi os.FileInfo, w io.WriteCloser) (*hashedWriter, error) { + stat, ok := fi.Sys().(*Stat) + if !ok { + return nil, errors.Errorf("invalid change without stat information") + } + + h, err := ch(stat) + if err != nil { + return nil, err + } + hw := &hashedWriter{ + FileInfo: fi, + Writer: io.MultiWriter(w, h), + h: h, + w: w, + } + return hw, nil +} + +func (hw *hashedWriter) Close() error { + hw.dgst = digest.NewDigest(digest.SHA256, hw.h) + if hw.w != nil { + return hw.w.Close() + } + return nil +} + +func (hw *hashedWriter) Digest() digest.Digest { + return hw.dgst +} + +type lazyFileWriter struct { + dest string + ctx context.Context + f *os.File +} + +func (lfw *lazyFileWriter) Write(dt []byte) (int, error) { + if lfw.f == nil { + file, err := os.OpenFile(lfw.dest, os.O_WRONLY, 0) //todo: windows + if err != nil { + return 0, errors.Wrapf(err, "failed to open %s", lfw.dest) + } + lfw.f = file + } + return lfw.f.Write(dt) +} + +func (lfw *lazyFileWriter) Close() error { + if lfw.f != nil { + return lfw.f.Close() + } + return nil +} + +func mkdev(major int64, minor int64) uint32 { + return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) +} + +// Random number state. +// We generate random temporary file names so that there's a good +// chance the file doesn't exist yet - keeps the number of tries in +// TempFile to a minimum. +var rand uint32 +var randmu sync.Mutex + +func reseed() uint32 { + return uint32(time.Now().UnixNano() + int64(os.Getpid())) +} + +func nextSuffix() string { + randmu.Lock() + r := rand + if r == 0 { + r = reseed() + } + r = r*1664525 + 1013904223 // constants from Numerical Recipes + rand = r + randmu.Unlock() + return strconv.Itoa(int(1e9 + r%1e9))[1:] +} diff --git a/vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go b/vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go new file mode 100644 index 0000000000..19dffabf74 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go @@ -0,0 +1,51 @@ +// +build !windows + +package fsutil + +import ( + "os" + "syscall" + + "github.com/containerd/continuity/sysx" + "github.com/pkg/errors" +) + +func rewriteMetadata(p string, stat *Stat) error { + for key, value := range stat.Xattrs { + sysx.Setxattr(p, key, value, 0) + } + + if err := os.Lchown(p, int(stat.Uid), int(stat.Gid)); err != nil { + return errors.Wrapf(err, "failed to lchown %s", p) + } + + if os.FileMode(stat.Mode)&os.ModeSymlink == 0 { + if err := os.Chmod(p, os.FileMode(stat.Mode)); err != nil { + return errors.Wrapf(err, "failed to chown %s", p) + } + } + + if err := chtimes(p, stat.ModTime); err != nil { + return errors.Wrapf(err, "failed to chtimes %s", p) + } + + return nil +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(path string, stat *Stat) error { + mode := uint32(stat.Mode & 07777) + if os.FileMode(stat.Mode)&os.ModeCharDevice != 0 { + mode |= syscall.S_IFCHR + } else if os.FileMode(stat.Mode)&os.ModeNamedPipe != 0 { + mode |= syscall.S_IFIFO + } else { + mode |= syscall.S_IFBLK + } + + if err := syscall.Mknod(path, mode, int(mkdev(stat.Devmajor, stat.Devminor))); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/tonistiigi/fsutil/diskwriter_windows.go b/vendor/github.com/tonistiigi/fsutil/diskwriter_windows.go new file mode 100644 index 0000000000..c6d0d7bec7 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/diskwriter_windows.go @@ -0,0 +1,17 @@ +// +build windows + +package fsutil + +import ( + "github.com/pkg/errors" +) + +func rewriteMetadata(p string, stat *Stat) error { + return chtimes(p, stat.ModTime) +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(path string, stat *Stat) error { + return errors.New("Not implemented on windows") +} diff --git a/vendor/github.com/tonistiigi/fsutil/generate.go b/vendor/github.com/tonistiigi/fsutil/generate.go new file mode 100644 index 0000000000..e433195669 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/generate.go @@ -0,0 +1,3 @@ +package fsutil + +//go:generate protoc --gogoslick_out=. stat.proto wire.proto diff --git a/vendor/github.com/tonistiigi/fsutil/hardlinks.go b/vendor/github.com/tonistiigi/fsutil/hardlinks.go new file mode 100644 index 0000000000..e598ead7e6 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/hardlinks.go @@ -0,0 +1,46 @@ +package fsutil + +import ( + "os" + + "github.com/pkg/errors" +) + +// Hardlinks validates that all targets for links were part of the changes + +type Hardlinks struct { + seenFiles map[string]struct{} +} + +func (v *Hardlinks) HandleChange(kind ChangeKind, p string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + if v.seenFiles == nil { + v.seenFiles = make(map[string]struct{}) + } + + if kind == ChangeKindDelete { + return nil + } + + stat, ok := fi.Sys().(*Stat) + if !ok { + return errors.Errorf("invalid change without stat info: %s", p) + } + + if fi.IsDir() || fi.Mode()&os.ModeSymlink != 0 { + return nil + } + + if len(stat.Linkname) > 0 { + if _, ok := v.seenFiles[stat.Linkname]; !ok { + return errors.Errorf("invalid link %s to unknown path: %q", p, stat.Linkname) + } + } else { + v.seenFiles[p] = struct{}{} + } + + return nil +} diff --git a/vendor/github.com/tonistiigi/fsutil/receive.go b/vendor/github.com/tonistiigi/fsutil/receive.go new file mode 100644 index 0000000000..cd4ec50b87 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/receive.go @@ -0,0 +1,267 @@ +package fsutil + +import ( + "io" + "os" + "sync" + + "github.com/pkg/errors" + "golang.org/x/net/context" + "golang.org/x/sync/errgroup" +) + +type ReceiveOpt struct { + NotifyHashed ChangeFunc + ContentHasher ContentHasher + ProgressCb func(int, bool) + Merge bool + Filter FilterFunc +} + +func Receive(ctx context.Context, conn Stream, dest string, opt ReceiveOpt) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + r := &receiver{ + conn: &syncStream{Stream: conn}, + dest: dest, + files: make(map[string]uint32), + pipes: make(map[uint32]io.WriteCloser), + notifyHashed: opt.NotifyHashed, + contentHasher: opt.ContentHasher, + progressCb: opt.ProgressCb, + merge: opt.Merge, + filter: opt.Filter, + } + return r.run(ctx) +} + +type receiver struct { + dest string + conn Stream + files map[string]uint32 + pipes map[uint32]io.WriteCloser + mu sync.RWMutex + muPipes sync.RWMutex + progressCb func(int, bool) + merge bool + filter FilterFunc + + notifyHashed ChangeFunc + contentHasher ContentHasher + orderValidator Validator + hlValidator Hardlinks +} + +type dynamicWalker struct { + walkChan chan *currentPath + err error + closeCh chan struct{} +} + +func newDynamicWalker() *dynamicWalker { + return &dynamicWalker{ + walkChan: make(chan *currentPath, 128), + closeCh: make(chan struct{}), + } +} + +func (w *dynamicWalker) update(p *currentPath) error { + select { + case <-w.closeCh: + return errors.Wrap(w.err, "walker is closed") + default: + } + if p == nil { + close(w.walkChan) + return nil + } + select { + case w.walkChan <- p: + return nil + case <-w.closeCh: + return errors.Wrap(w.err, "walker is closed") + } +} + +func (w *dynamicWalker) fill(ctx context.Context, pathC chan<- *currentPath) error { + for { + select { + case p, ok := <-w.walkChan: + if !ok { + return nil + } + pathC <- p + case <-ctx.Done(): + w.err = ctx.Err() + close(w.closeCh) + return ctx.Err() + } + } + return nil +} + +func (r *receiver) run(ctx context.Context) error { + g, ctx := errgroup.WithContext(ctx) + + dw, err := NewDiskWriter(ctx, r.dest, DiskWriterOpt{ + AsyncDataCb: r.asyncDataFunc, + NotifyCb: r.notifyHashed, + ContentHasher: r.contentHasher, + Filter: r.filter, + }) + if err != nil { + return err + } + + w := newDynamicWalker() + + g.Go(func() (retErr error) { + defer func() { + if retErr != nil { + r.conn.SendMsg(&Packet{Type: PACKET_ERR, Data: []byte(retErr.Error())}) + } + }() + destWalker := emptyWalker + if !r.merge { + destWalker = GetWalkerFn(r.dest) + } + err := doubleWalkDiff(ctx, dw.HandleChange, destWalker, w.fill) + if err != nil { + return err + } + if err := dw.Wait(ctx); err != nil { + return err + } + r.conn.SendMsg(&Packet{Type: PACKET_FIN}) + return nil + }) + + g.Go(func() error { + var i uint32 = 0 + + size := 0 + if r.progressCb != nil { + defer func() { + r.progressCb(size, true) + }() + } + var p Packet + for { + p = Packet{Data: p.Data[:0]} + if err := r.conn.RecvMsg(&p); err != nil { + return err + } + if r.progressCb != nil { + size += p.Size() + r.progressCb(size, false) + } + + switch p.Type { + case PACKET_STAT: + if p.Stat == nil { + if err := w.update(nil); err != nil { + return err + } + break + } + if fileCanRequestData(os.FileMode(p.Stat.Mode)) { + r.mu.Lock() + r.files[p.Stat.Path] = i + r.mu.Unlock() + } + i++ + cp := ¤tPath{path: p.Stat.Path, f: &StatInfo{p.Stat}} + if err := r.orderValidator.HandleChange(ChangeKindAdd, cp.path, cp.f, nil); err != nil { + return err + } + if err := r.hlValidator.HandleChange(ChangeKindAdd, cp.path, cp.f, nil); err != nil { + return err + } + if err := w.update(cp); err != nil { + return err + } + case PACKET_DATA: + r.muPipes.Lock() + pw, ok := r.pipes[p.ID] + r.muPipes.Unlock() + if !ok { + return errors.Errorf("invalid file request %s", p.ID) + } + if len(p.Data) == 0 { + if err := pw.Close(); err != nil { + return err + } + } else { + if _, err := pw.Write(p.Data); err != nil { + return err + } + } + case PACKET_FIN: + for { + var p Packet + if err := r.conn.RecvMsg(&p); err != nil { + if err == io.EOF { + return nil + } + return err + } + } + } + } + }) + return g.Wait() +} + +func (r *receiver) asyncDataFunc(ctx context.Context, p string, wc io.WriteCloser) error { + r.mu.Lock() + id, ok := r.files[p] + if !ok { + r.mu.Unlock() + return errors.Errorf("invalid file request %s", p) + } + delete(r.files, p) + r.mu.Unlock() + + wwc := newWrappedWriteCloser(wc) + r.muPipes.Lock() + r.pipes[id] = wwc + r.muPipes.Unlock() + if err := r.conn.SendMsg(&Packet{Type: PACKET_REQ, ID: id}); err != nil { + return err + } + err := wwc.Wait(ctx) + if err != nil { + return err + } + r.muPipes.Lock() + delete(r.pipes, id) + r.muPipes.Unlock() + return nil +} + +type wrappedWriteCloser struct { + io.WriteCloser + err error + once sync.Once + done chan struct{} +} + +func newWrappedWriteCloser(wc io.WriteCloser) *wrappedWriteCloser { + return &wrappedWriteCloser{WriteCloser: wc, done: make(chan struct{})} +} + +func (w *wrappedWriteCloser) Close() error { + w.err = w.WriteCloser.Close() + w.once.Do(func() { close(w.done) }) + return w.err +} + +func (w *wrappedWriteCloser) Wait(ctx context.Context) error { + select { + case <-ctx.Done(): + return ctx.Err() + case <-w.done: + return w.err + } +} diff --git a/vendor/github.com/tonistiigi/fsutil/send.go b/vendor/github.com/tonistiigi/fsutil/send.go new file mode 100644 index 0000000000..aa5dbe393a --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/send.go @@ -0,0 +1,204 @@ +package fsutil + +import ( + "io" + "os" + "path/filepath" + "sync" + + "github.com/pkg/errors" + "golang.org/x/net/context" + "golang.org/x/sync/errgroup" +) + +var bufPool = sync.Pool{ + New: func() interface{} { + return make([]byte, 32*1<<10) + }, +} + +type Stream interface { + RecvMsg(interface{}) error + SendMsg(m interface{}) error + Context() context.Context +} + +func Send(ctx context.Context, conn Stream, root string, opt *WalkOpt, progressCb func(int, bool)) error { + s := &sender{ + conn: &syncStream{Stream: conn}, + root: root, + opt: opt, + files: make(map[uint32]string), + progressCb: progressCb, + sendpipeline: make(chan *sendHandle, 128), + } + return s.run(ctx) +} + +type sendHandle struct { + id uint32 + path string +} + +type sender struct { + conn Stream + opt *WalkOpt + root string + files map[uint32]string + mu sync.RWMutex + progressCb func(int, bool) + progressCurrent int + sendpipeline chan *sendHandle +} + +func (s *sender) run(ctx context.Context) error { + g, ctx := errgroup.WithContext(ctx) + + defer s.updateProgress(0, true) + + g.Go(func() error { + return s.walk(ctx) + }) + + for i := 0; i < 4; i++ { + g.Go(func() error { + for h := range s.sendpipeline { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + if err := s.sendFile(h); err != nil { + return err + } + } + return nil + }) + } + + g.Go(func() error { + defer close(s.sendpipeline) + + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + var p Packet + if err := s.conn.RecvMsg(&p); err != nil { + return err + } + switch p.Type { + case PACKET_ERR: + return errors.Errorf("error from receiver: %s", p.Data) + case PACKET_REQ: + if err := s.queue(p.ID); err != nil { + return err + } + case PACKET_FIN: + return s.conn.SendMsg(&Packet{Type: PACKET_FIN}) + } + } + }) + + return g.Wait() +} + +func (s *sender) updateProgress(size int, last bool) { + if s.progressCb != nil { + s.progressCurrent += size + s.progressCb(s.progressCurrent, last) + } +} + +func (s *sender) queue(id uint32) error { + s.mu.Lock() + p, ok := s.files[id] + if !ok { + s.mu.Unlock() + return errors.Errorf("invalid file id %d", id) + } + delete(s.files, id) + s.mu.Unlock() + s.sendpipeline <- &sendHandle{id, p} + return nil +} + +func (s *sender) sendFile(h *sendHandle) error { + f, err := os.Open(filepath.Join(s.root, h.path)) + if err == nil { + defer f.Close() + buf := bufPool.Get().([]byte) + defer bufPool.Put(buf) + if _, err := io.CopyBuffer(&fileSender{sender: s, id: h.id}, f, buf); err != nil { + return err + } + } + return s.conn.SendMsg(&Packet{ID: h.id, Type: PACKET_DATA}) +} + +func (s *sender) walk(ctx context.Context) error { + var i uint32 = 0 + err := Walk(ctx, s.root, s.opt, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + stat, ok := fi.Sys().(*Stat) + if !ok { + return errors.Wrapf(err, "invalid fileinfo without stat info: %s", path) + } + + p := &Packet{ + Type: PACKET_STAT, + Stat: stat, + } + if fileCanRequestData(os.FileMode(stat.Mode)) { + s.mu.Lock() + s.files[i] = stat.Path + s.mu.Unlock() + } + i++ + s.updateProgress(p.Size(), false) + return errors.Wrapf(s.conn.SendMsg(p), "failed to send stat %s", path) + }) + if err != nil { + return err + } + return errors.Wrapf(s.conn.SendMsg(&Packet{Type: PACKET_STAT}), "failed to send last stat") +} + +func fileCanRequestData(m os.FileMode) bool { + // avoid updating this function as it needs to match between sender/receiver. + // version if needed + return m&os.ModeType == 0 +} + +type fileSender struct { + sender *sender + id uint32 +} + +func (fs *fileSender) Write(dt []byte) (int, error) { + if len(dt) == 0 { + return 0, nil + } + p := &Packet{Type: PACKET_DATA, ID: fs.id, Data: dt} + if err := fs.sender.conn.SendMsg(p); err != nil { + return 0, err + } + fs.sender.updateProgress(p.Size(), false) + return len(dt), nil +} + +type syncStream struct { + Stream + mu sync.Mutex +} + +func (ss *syncStream) SendMsg(m interface{}) error { + ss.mu.Lock() + err := ss.Stream.SendMsg(m) + ss.mu.Unlock() + return err +} diff --git a/vendor/github.com/tonistiigi/fsutil/stat.pb.go b/vendor/github.com/tonistiigi/fsutil/stat.pb.go new file mode 100644 index 0000000000..3f6925e4b0 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/stat.pb.go @@ -0,0 +1,931 @@ +// Code generated by protoc-gen-gogo. +// source: stat.proto +// DO NOT EDIT! + +/* + Package fsutil is a generated protocol buffer package. + + It is generated from these files: + stat.proto + wire.proto + + It has these top-level messages: + Stat + Packet +*/ +package fsutil + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import bytes "bytes" + +import strings "strings" +import reflect "reflect" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type Stat struct { + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Mode uint32 `protobuf:"varint,2,opt,name=mode,proto3" json:"mode,omitempty"` + Uid uint32 `protobuf:"varint,3,opt,name=uid,proto3" json:"uid,omitempty"` + Gid uint32 `protobuf:"varint,4,opt,name=gid,proto3" json:"gid,omitempty"` + Size_ int64 `protobuf:"varint,5,opt,name=size,proto3" json:"size,omitempty"` + ModTime int64 `protobuf:"varint,6,opt,name=modTime,proto3" json:"modTime,omitempty"` + // int32 typeflag = 7; + Linkname string `protobuf:"bytes,7,opt,name=linkname,proto3" json:"linkname,omitempty"` + Devmajor int64 `protobuf:"varint,8,opt,name=devmajor,proto3" json:"devmajor,omitempty"` + Devminor int64 `protobuf:"varint,9,opt,name=devminor,proto3" json:"devminor,omitempty"` + Xattrs map[string][]byte `protobuf:"bytes,10,rep,name=xattrs" json:"xattrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *Stat) Reset() { *m = Stat{} } +func (*Stat) ProtoMessage() {} +func (*Stat) Descriptor() ([]byte, []int) { return fileDescriptorStat, []int{0} } + +func (m *Stat) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *Stat) GetMode() uint32 { + if m != nil { + return m.Mode + } + return 0 +} + +func (m *Stat) GetUid() uint32 { + if m != nil { + return m.Uid + } + return 0 +} + +func (m *Stat) GetGid() uint32 { + if m != nil { + return m.Gid + } + return 0 +} + +func (m *Stat) GetSize_() int64 { + if m != nil { + return m.Size_ + } + return 0 +} + +func (m *Stat) GetModTime() int64 { + if m != nil { + return m.ModTime + } + return 0 +} + +func (m *Stat) GetLinkname() string { + if m != nil { + return m.Linkname + } + return "" +} + +func (m *Stat) GetDevmajor() int64 { + if m != nil { + return m.Devmajor + } + return 0 +} + +func (m *Stat) GetDevminor() int64 { + if m != nil { + return m.Devminor + } + return 0 +} + +func (m *Stat) GetXattrs() map[string][]byte { + if m != nil { + return m.Xattrs + } + return nil +} + +func init() { + proto.RegisterType((*Stat)(nil), "fsutil.Stat") +} +func (this *Stat) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Stat) + if !ok { + that2, ok := that.(Stat) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Path != that1.Path { + return false + } + if this.Mode != that1.Mode { + return false + } + if this.Uid != that1.Uid { + return false + } + if this.Gid != that1.Gid { + return false + } + if this.Size_ != that1.Size_ { + return false + } + if this.ModTime != that1.ModTime { + return false + } + if this.Linkname != that1.Linkname { + return false + } + if this.Devmajor != that1.Devmajor { + return false + } + if this.Devminor != that1.Devminor { + return false + } + if len(this.Xattrs) != len(that1.Xattrs) { + return false + } + for i := range this.Xattrs { + if !bytes.Equal(this.Xattrs[i], that1.Xattrs[i]) { + return false + } + } + return true +} +func (this *Stat) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&fsutil.Stat{") + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + s = append(s, "Mode: "+fmt.Sprintf("%#v", this.Mode)+",\n") + s = append(s, "Uid: "+fmt.Sprintf("%#v", this.Uid)+",\n") + s = append(s, "Gid: "+fmt.Sprintf("%#v", this.Gid)+",\n") + s = append(s, "Size_: "+fmt.Sprintf("%#v", this.Size_)+",\n") + s = append(s, "ModTime: "+fmt.Sprintf("%#v", this.ModTime)+",\n") + s = append(s, "Linkname: "+fmt.Sprintf("%#v", this.Linkname)+",\n") + s = append(s, "Devmajor: "+fmt.Sprintf("%#v", this.Devmajor)+",\n") + s = append(s, "Devminor: "+fmt.Sprintf("%#v", this.Devminor)+",\n") + keysForXattrs := make([]string, 0, len(this.Xattrs)) + for k, _ := range this.Xattrs { + keysForXattrs = append(keysForXattrs, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForXattrs) + mapStringForXattrs := "map[string][]byte{" + for _, k := range keysForXattrs { + mapStringForXattrs += fmt.Sprintf("%#v: %#v,", k, this.Xattrs[k]) + } + mapStringForXattrs += "}" + if this.Xattrs != nil { + s = append(s, "Xattrs: "+mapStringForXattrs+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringStat(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Stat) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Stat) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Path) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintStat(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + } + if m.Mode != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintStat(dAtA, i, uint64(m.Mode)) + } + if m.Uid != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintStat(dAtA, i, uint64(m.Uid)) + } + if m.Gid != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintStat(dAtA, i, uint64(m.Gid)) + } + if m.Size_ != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintStat(dAtA, i, uint64(m.Size_)) + } + if m.ModTime != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintStat(dAtA, i, uint64(m.ModTime)) + } + if len(m.Linkname) > 0 { + dAtA[i] = 0x3a + i++ + i = encodeVarintStat(dAtA, i, uint64(len(m.Linkname))) + i += copy(dAtA[i:], m.Linkname) + } + if m.Devmajor != 0 { + dAtA[i] = 0x40 + i++ + i = encodeVarintStat(dAtA, i, uint64(m.Devmajor)) + } + if m.Devminor != 0 { + dAtA[i] = 0x48 + i++ + i = encodeVarintStat(dAtA, i, uint64(m.Devminor)) + } + if len(m.Xattrs) > 0 { + for k, _ := range m.Xattrs { + dAtA[i] = 0x52 + i++ + v := m.Xattrs[k] + byteSize := 0 + if len(v) > 0 { + byteSize = 1 + len(v) + sovStat(uint64(len(v))) + } + mapSize := 1 + len(k) + sovStat(uint64(len(k))) + byteSize + i = encodeVarintStat(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintStat(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + if len(v) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintStat(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + } + return i, nil +} + +func encodeFixed64Stat(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Stat(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintStat(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Stat) Size() (n int) { + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovStat(uint64(l)) + } + if m.Mode != 0 { + n += 1 + sovStat(uint64(m.Mode)) + } + if m.Uid != 0 { + n += 1 + sovStat(uint64(m.Uid)) + } + if m.Gid != 0 { + n += 1 + sovStat(uint64(m.Gid)) + } + if m.Size_ != 0 { + n += 1 + sovStat(uint64(m.Size_)) + } + if m.ModTime != 0 { + n += 1 + sovStat(uint64(m.ModTime)) + } + l = len(m.Linkname) + if l > 0 { + n += 1 + l + sovStat(uint64(l)) + } + if m.Devmajor != 0 { + n += 1 + sovStat(uint64(m.Devmajor)) + } + if m.Devminor != 0 { + n += 1 + sovStat(uint64(m.Devminor)) + } + if len(m.Xattrs) > 0 { + for k, v := range m.Xattrs { + _ = k + _ = v + l = 0 + if len(v) > 0 { + l = 1 + len(v) + sovStat(uint64(len(v))) + } + mapEntrySize := 1 + len(k) + sovStat(uint64(len(k))) + l + n += mapEntrySize + 1 + sovStat(uint64(mapEntrySize)) + } + } + return n +} + +func sovStat(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozStat(x uint64) (n int) { + return sovStat(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Stat) String() string { + if this == nil { + return "nil" + } + keysForXattrs := make([]string, 0, len(this.Xattrs)) + for k, _ := range this.Xattrs { + keysForXattrs = append(keysForXattrs, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForXattrs) + mapStringForXattrs := "map[string][]byte{" + for _, k := range keysForXattrs { + mapStringForXattrs += fmt.Sprintf("%v: %v,", k, this.Xattrs[k]) + } + mapStringForXattrs += "}" + s := strings.Join([]string{`&Stat{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Mode:` + fmt.Sprintf("%v", this.Mode) + `,`, + `Uid:` + fmt.Sprintf("%v", this.Uid) + `,`, + `Gid:` + fmt.Sprintf("%v", this.Gid) + `,`, + `Size_:` + fmt.Sprintf("%v", this.Size_) + `,`, + `ModTime:` + fmt.Sprintf("%v", this.ModTime) + `,`, + `Linkname:` + fmt.Sprintf("%v", this.Linkname) + `,`, + `Devmajor:` + fmt.Sprintf("%v", this.Devmajor) + `,`, + `Devminor:` + fmt.Sprintf("%v", this.Devminor) + `,`, + `Xattrs:` + mapStringForXattrs + `,`, + `}`, + }, "") + return s +} +func valueToStringStat(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Stat) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Stat: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Stat: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthStat + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + m.Mode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Mode |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) + } + m.Uid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Uid |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Gid", wireType) + } + m.Gid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Gid |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType) + } + m.Size_ = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Size_ |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ModTime", wireType) + } + m.ModTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ModTime |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Linkname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthStat + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Linkname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Devmajor", wireType) + } + m.Devmajor = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Devmajor |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Devminor", wireType) + } + m.Devminor = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Devminor |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Xattrs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStat + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthStat + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Xattrs == nil { + m.Xattrs = make(map[string][]byte) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapbyteLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStat + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapbyteLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intMapbyteLen := int(mapbyteLen) + if intMapbyteLen < 0 { + return ErrInvalidLengthStat + } + postbytesIndex := iNdEx + intMapbyteLen + if postbytesIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := make([]byte, mapbyteLen) + copy(mapvalue, dAtA[iNdEx:postbytesIndex]) + iNdEx = postbytesIndex + m.Xattrs[mapkey] = mapvalue + } else { + var mapvalue []byte + m.Xattrs[mapkey] = mapvalue + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStat(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStat + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipStat(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStat + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStat + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStat + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthStat + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStat + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipStat(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthStat = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowStat = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("stat.proto", fileDescriptorStat) } + +var fileDescriptorStat = []byte{ + // 303 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x4c, 0x91, 0xb1, 0x4e, 0xf3, 0x30, + 0x14, 0x85, 0x73, 0x9b, 0x36, 0x6d, 0xdd, 0xff, 0x97, 0x90, 0xc5, 0x70, 0xd5, 0xc1, 0x8a, 0x98, + 0x32, 0xa0, 0x08, 0xc1, 0x02, 0x8c, 0x48, 0xbc, 0x40, 0x60, 0x60, 0x35, 0xb2, 0x29, 0xa6, 0x4d, + 0x5c, 0x25, 0x4e, 0x45, 0x99, 0x78, 0x04, 0x1e, 0x83, 0xd7, 0x60, 0x63, 0xec, 0xc8, 0x48, 0xcc, + 0xc2, 0xd8, 0x47, 0x40, 0x76, 0xda, 0xc2, 0x76, 0xce, 0x77, 0x7c, 0x65, 0x9d, 0x7b, 0x09, 0xa9, + 0x0c, 0x37, 0xe9, 0xbc, 0xd4, 0x46, 0xd3, 0xe8, 0xae, 0xaa, 0x8d, 0x9a, 0x1d, 0xbc, 0x75, 0x48, + 0xf7, 0xca, 0x70, 0x43, 0x29, 0xe9, 0xce, 0xb9, 0xb9, 0x47, 0x88, 0x21, 0x19, 0x66, 0x5e, 0x3b, + 0x96, 0x6b, 0x21, 0xb1, 0x13, 0x43, 0xf2, 0x3f, 0xf3, 0x9a, 0xee, 0x91, 0xb0, 0x56, 0x02, 0x43, + 0x8f, 0x9c, 0x74, 0x64, 0xa2, 0x04, 0x76, 0x5b, 0x32, 0x51, 0xc2, 0xcd, 0x55, 0xea, 0x49, 0x62, + 0x2f, 0x86, 0x24, 0xcc, 0xbc, 0xa6, 0x48, 0xfa, 0xb9, 0x16, 0xd7, 0x2a, 0x97, 0x18, 0x79, 0xbc, + 0xb5, 0x74, 0x4c, 0x06, 0x33, 0x55, 0x4c, 0x0b, 0x9e, 0x4b, 0xec, 0xfb, 0xdf, 0x77, 0xde, 0x65, + 0x42, 0x2e, 0x72, 0xfe, 0xa0, 0x4b, 0x1c, 0xf8, 0xb1, 0x9d, 0xdf, 0x66, 0xaa, 0xd0, 0x25, 0x0e, + 0x7f, 0x33, 0xe7, 0xe9, 0x11, 0x89, 0x1e, 0xb9, 0x31, 0x65, 0x85, 0x24, 0x0e, 0x93, 0xd1, 0x31, + 0xa6, 0x6d, 0xdf, 0xd4, 0x75, 0x4d, 0x6f, 0x7c, 0x74, 0x59, 0x98, 0x72, 0x99, 0x6d, 0xde, 0x8d, + 0xcf, 0xc8, 0xe8, 0x0f, 0x76, 0xa5, 0xa6, 0x72, 0xb9, 0xd9, 0x86, 0x93, 0x74, 0x9f, 0xf4, 0x16, + 0x7c, 0x56, 0xb7, 0xdb, 0xf8, 0x97, 0xb5, 0xe6, 0xbc, 0x73, 0x0a, 0x17, 0x87, 0xab, 0x86, 0x05, + 0x1f, 0x0d, 0x0b, 0xd6, 0x0d, 0x83, 0x67, 0xcb, 0xe0, 0xd5, 0x32, 0x78, 0xb7, 0x0c, 0x56, 0x96, + 0xc1, 0xa7, 0x65, 0xf0, 0x6d, 0x59, 0xb0, 0xb6, 0x0c, 0x5e, 0xbe, 0x58, 0x70, 0x1b, 0xf9, 0x03, + 0x9c, 0xfc, 0x04, 0x00, 0x00, 0xff, 0xff, 0x19, 0x97, 0x14, 0xf4, 0x8e, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/tonistiigi/fsutil/validator.go b/vendor/github.com/tonistiigi/fsutil/validator.go new file mode 100644 index 0000000000..2bd1287a85 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/validator.go @@ -0,0 +1,92 @@ +package fsutil + +import ( + "os" + "path" + "runtime" + "sort" + "strings" + + "github.com/pkg/errors" +) + +type parent struct { + dir string + last string +} + +type Validator struct { + parentDirs []parent +} + +func (v *Validator) HandleChange(kind ChangeKind, p string, fi os.FileInfo, err error) (retErr error) { + if err != nil { + return err + } + // test that all paths are in order and all parent dirs were present + if v.parentDirs == nil { + v.parentDirs = make([]parent, 1, 10) + } + if runtime.GOOS == "windows" { + p = strings.Replace(p, "\\", "", -1) + } + if p != path.Clean(p) { + return errors.Errorf("invalid unclean path %s", p) + } + if path.IsAbs(p) { + return errors.Errorf("abolute path %s not allowed", p) + } + dir := path.Dir(p) + base := path.Base(p) + if dir == "." { + dir = "" + } + if dir == ".." || strings.HasPrefix(p, "../") { + return errors.Errorf("invalid path: %s", p) + } + + // find a parent dir from saved records + i := sort.Search(len(v.parentDirs), func(i int) bool { + return ComparePath(v.parentDirs[len(v.parentDirs)-1-i].dir, dir) <= 0 + }) + i = len(v.parentDirs) - 1 - i + if i != len(v.parentDirs)-1 { // skipping back to grandparent + v.parentDirs = v.parentDirs[:i+1] + } + + if dir != v.parentDirs[len(v.parentDirs)-1].dir || v.parentDirs[i].last >= base { + return errors.Errorf("changes out of order: %q %q", p, path.Join(v.parentDirs[i].dir, v.parentDirs[i].last)) + } + v.parentDirs[i].last = base + if kind != ChangeKindDelete && fi.IsDir() { + v.parentDirs = append(v.parentDirs, parent{ + dir: path.Join(dir, base), + last: "", + }) + } + // todo: validate invalid mode combinations + return err +} + +func ComparePath(p1, p2 string) int { + // byte-by-byte comparison to be compatible with str<>str + min := min(len(p1), len(p2)) + for i := 0; i < min; i++ { + switch { + case p1[i] == p2[i]: + continue + case p2[i] != '/' && p1[i] < p2[i] || p1[i] == '/': + return -1 + default: + return 1 + } + } + return len(p1) - len(p2) +} + +func min(x, y int) int { + if x < y { + return x + } + return y +} diff --git a/vendor/github.com/tonistiigi/fsutil/walker.go b/vendor/github.com/tonistiigi/fsutil/walker.go new file mode 100644 index 0000000000..d05a42dbed --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/walker.go @@ -0,0 +1,227 @@ +package fsutil + +import ( + "os" + "path/filepath" + "runtime" + "strings" + "time" + + "github.com/docker/docker/pkg/fileutils" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +type WalkOpt struct { + IncludePatterns []string + ExcludePatterns []string + Map func(*Stat) bool +} + +func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) error { + root, err := filepath.EvalSymlinks(p) + if err != nil { + return errors.Wrapf(err, "failed to resolve %s", root) + } + fi, err := os.Stat(root) + if err != nil { + return errors.Wrapf(err, "failed to stat: %s", root) + } + if !fi.IsDir() { + return errors.Errorf("%s is not a directory", root) + } + + var pm *fileutils.PatternMatcher + if opt != nil && opt.ExcludePatterns != nil { + pm, err = fileutils.NewPatternMatcher(opt.ExcludePatterns) + if err != nil { + return errors.Wrapf(err, "invalid excludepaths %s", opt.ExcludePatterns) + } + } + + var lastIncludedDir string + var includePatternPrefixes []string + + seenFiles := make(map[uint64]string) + return filepath.Walk(root, func(path string, fi os.FileInfo, err error) (retErr error) { + if err != nil { + if os.IsNotExist(err) { + return filepath.SkipDir + } + return err + } + defer func() { + if retErr != nil && os.IsNotExist(errors.Cause(retErr)) { + retErr = filepath.SkipDir + } + }() + origpath := path + path, err = filepath.Rel(root, path) + if err != nil { + return err + } + // Skip root + if path == "." { + return nil + } + + if opt != nil { + if opt.IncludePatterns != nil { + if includePatternPrefixes == nil { + includePatternPrefixes = patternPrefixes(opt.IncludePatterns) + } + matched := false + if lastIncludedDir != "" { + if strings.HasPrefix(path, lastIncludedDir+string(filepath.Separator)) { + matched = true + } + } + if !matched { + for _, p := range opt.IncludePatterns { + if m, _ := filepath.Match(p, path); m { + matched = true + break + } + } + if matched && fi.IsDir() { + lastIncludedDir = path + } + } + if !matched { + if !fi.IsDir() { + return nil + } else { + if noPossiblePrefixMatch(path, includePatternPrefixes) { + return filepath.SkipDir + } + } + } + } + if pm != nil { + m, err := pm.Matches(path) + if err != nil { + return errors.Wrap(err, "failed to match excludepatterns") + } + + if m { + if fi.IsDir() { + if !pm.Exclusions() { + return filepath.SkipDir + } + dirSlash := path + string(filepath.Separator) + for _, pat := range pm.Patterns() { + if !pat.Exclusion() { + continue + } + patStr := pat.String() + string(filepath.Separator) + if strings.HasPrefix(patStr, dirSlash) { + goto passedFilter + } + } + return filepath.SkipDir + } + return nil + } + } + } + + passedFilter: + path = filepath.ToSlash(path) + + stat := &Stat{ + Path: path, + Mode: uint32(fi.Mode()), + Size_: fi.Size(), + ModTime: fi.ModTime().UnixNano(), + } + + setUnixOpt(fi, stat, path, seenFiles) + + if !fi.IsDir() { + if fi.Mode()&os.ModeSymlink != 0 { + link, err := os.Readlink(origpath) + if err != nil { + return errors.Wrapf(err, "failed to readlink %s", origpath) + } + stat.Linkname = link + } + } + if err := loadXattr(origpath, stat); err != nil { + return errors.Wrapf(err, "failed to xattr %s", path) + } + + if runtime.GOOS == "windows" { + permPart := stat.Mode & uint32(os.ModePerm) + noPermPart := stat.Mode &^ uint32(os.ModePerm) + // Add the x bit: make everything +x from windows + permPart |= 0111 + permPart &= 0755 + stat.Mode = noPermPart | permPart + } + + select { + case <-ctx.Done(): + return ctx.Err() + default: + if opt != nil && opt.Map != nil { + if allowed := opt.Map(stat); !allowed { + return nil + } + } + if err := fn(stat.Path, &StatInfo{stat}, nil); err != nil { + return err + } + } + return nil + }) +} + +type StatInfo struct { + *Stat +} + +func (s *StatInfo) Name() string { + return filepath.Base(s.Stat.Path) +} +func (s *StatInfo) Size() int64 { + return s.Stat.Size_ +} +func (s *StatInfo) Mode() os.FileMode { + return os.FileMode(s.Stat.Mode) +} +func (s *StatInfo) ModTime() time.Time { + return time.Unix(s.Stat.ModTime/1e9, s.Stat.ModTime%1e9) +} +func (s *StatInfo) IsDir() bool { + return s.Mode().IsDir() +} +func (s *StatInfo) Sys() interface{} { + return s.Stat +} + +func patternPrefixes(patterns []string) []string { + pfxs := make([]string, 0, len(patterns)) + for _, ptrn := range patterns { + idx := strings.IndexFunc(ptrn, func(ch rune) bool { + return ch == '*' || ch == '?' || ch == '[' || ch == '\\' + }) + if idx == -1 { + idx = len(ptrn) + } + pfxs = append(pfxs, ptrn[:idx]) + } + return pfxs +} + +func noPossiblePrefixMatch(p string, pfxs []string) bool { + for _, pfx := range pfxs { + chk := p + if len(pfx) < len(p) { + chk = p[:len(pfx)] + } + if strings.HasPrefix(pfx, chk) { + return false + } + } + return true +} diff --git a/vendor/github.com/tonistiigi/fsutil/walker_unix.go b/vendor/github.com/tonistiigi/fsutil/walker_unix.go new file mode 100644 index 0000000000..7e8ee80346 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/walker_unix.go @@ -0,0 +1,61 @@ +// +build !windows + +package fsutil + +import ( + "os" + "syscall" + + "github.com/containerd/continuity/sysx" + "github.com/pkg/errors" +) + +func loadXattr(origpath string, stat *Stat) error { + xattrs, err := sysx.LListxattr(origpath) + if err != nil { + return errors.Wrapf(err, "failed to xattr %s", origpath) + } + if len(xattrs) > 0 { + m := make(map[string][]byte) + for _, key := range xattrs { + v, err := sysx.LGetxattr(origpath, key) + if err == nil { + m[key] = v + } + } + stat.Xattrs = m + } + return nil +} + +func setUnixOpt(fi os.FileInfo, stat *Stat, path string, seenFiles map[uint64]string) { + s := fi.Sys().(*syscall.Stat_t) + + stat.Uid = s.Uid + stat.Gid = s.Gid + + if !fi.IsDir() { + if s.Mode&syscall.S_IFBLK != 0 || + s.Mode&syscall.S_IFCHR != 0 { + stat.Devmajor = int64(major(uint64(s.Rdev))) + stat.Devminor = int64(minor(uint64(s.Rdev))) + } + + ino := s.Ino + if s.Nlink > 1 { + if oldpath, ok := seenFiles[ino]; ok { + stat.Linkname = oldpath + stat.Size_ = 0 + } + } + seenFiles[ino] = path + } +} + +func major(device uint64) uint64 { + return (device >> 8) & 0xfff +} + +func minor(device uint64) uint64 { + return (device & 0xff) | ((device >> 12) & 0xfff00) +} diff --git a/vendor/github.com/tonistiigi/fsutil/walker_windows.go b/vendor/github.com/tonistiigi/fsutil/walker_windows.go new file mode 100644 index 0000000000..a1a2b45569 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/walker_windows.go @@ -0,0 +1,14 @@ +// +build windows + +package fsutil + +import ( + "os" +) + +func loadXattr(_ string, _ *Stat) error { + return nil +} + +func setUnixOpt(_ os.FileInfo, _ *Stat, _ string, _ map[uint64]string) { +} diff --git a/vendor/github.com/tonistiigi/fsutil/wire.pb.go b/vendor/github.com/tonistiigi/fsutil/wire.pb.go new file mode 100644 index 0000000000..9d334bbdba --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/wire.pb.go @@ -0,0 +1,567 @@ +// Code generated by protoc-gen-gogo. +// source: wire.proto +// DO NOT EDIT! + +package fsutil + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strconv "strconv" + +import bytes "bytes" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type Packet_PacketType int32 + +const ( + PACKET_STAT Packet_PacketType = 0 + PACKET_REQ Packet_PacketType = 1 + PACKET_DATA Packet_PacketType = 2 + PACKET_FIN Packet_PacketType = 3 + PACKET_ERR Packet_PacketType = 4 +) + +var Packet_PacketType_name = map[int32]string{ + 0: "PACKET_STAT", + 1: "PACKET_REQ", + 2: "PACKET_DATA", + 3: "PACKET_FIN", + 4: "PACKET_ERR", +} +var Packet_PacketType_value = map[string]int32{ + "PACKET_STAT": 0, + "PACKET_REQ": 1, + "PACKET_DATA": 2, + "PACKET_FIN": 3, + "PACKET_ERR": 4, +} + +func (Packet_PacketType) EnumDescriptor() ([]byte, []int) { return fileDescriptorWire, []int{0, 0} } + +type Packet struct { + Type Packet_PacketType `protobuf:"varint,1,opt,name=type,proto3,enum=fsutil.Packet_PacketType" json:"type,omitempty"` + Stat *Stat `protobuf:"bytes,2,opt,name=stat" json:"stat,omitempty"` + ID uint32 `protobuf:"varint,3,opt,name=ID,proto3" json:"ID,omitempty"` + Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` +} + +func (m *Packet) Reset() { *m = Packet{} } +func (*Packet) ProtoMessage() {} +func (*Packet) Descriptor() ([]byte, []int) { return fileDescriptorWire, []int{0} } + +func (m *Packet) GetType() Packet_PacketType { + if m != nil { + return m.Type + } + return PACKET_STAT +} + +func (m *Packet) GetStat() *Stat { + if m != nil { + return m.Stat + } + return nil +} + +func (m *Packet) GetID() uint32 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *Packet) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func init() { + proto.RegisterType((*Packet)(nil), "fsutil.Packet") + proto.RegisterEnum("fsutil.Packet_PacketType", Packet_PacketType_name, Packet_PacketType_value) +} +func (x Packet_PacketType) String() string { + s, ok := Packet_PacketType_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *Packet) Equal(that interface{}) bool { + if that == nil { + if this == nil { + return true + } + return false + } + + that1, ok := that.(*Packet) + if !ok { + that2, ok := that.(Packet) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + if this == nil { + return true + } + return false + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + if !this.Stat.Equal(that1.Stat) { + return false + } + if this.ID != that1.ID { + return false + } + if !bytes.Equal(this.Data, that1.Data) { + return false + } + return true +} +func (this *Packet) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&fsutil.Packet{") + s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + if this.Stat != nil { + s = append(s, "Stat: "+fmt.Sprintf("%#v", this.Stat)+",\n") + } + s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") + s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringWire(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Packet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Packet) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Type != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintWire(dAtA, i, uint64(m.Type)) + } + if m.Stat != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintWire(dAtA, i, uint64(m.Stat.Size())) + n1, err := m.Stat.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.ID != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintWire(dAtA, i, uint64(m.ID)) + } + if len(m.Data) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintWire(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + return i, nil +} + +func encodeFixed64Wire(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Wire(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintWire(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Packet) Size() (n int) { + var l int + _ = l + if m.Type != 0 { + n += 1 + sovWire(uint64(m.Type)) + } + if m.Stat != nil { + l = m.Stat.Size() + n += 1 + l + sovWire(uint64(l)) + } + if m.ID != 0 { + n += 1 + sovWire(uint64(m.ID)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovWire(uint64(l)) + } + return n +} + +func sovWire(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozWire(x uint64) (n int) { + return sovWire(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Packet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Packet{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Stat:` + strings.Replace(fmt.Sprintf("%v", this.Stat), "Stat", "Stat", 1) + `,`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `}`, + }, "") + return s +} +func valueToStringWire(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Packet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Packet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Packet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (Packet_PacketType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stat", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWire + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Stat == nil { + m.Stat = &Stat{} + } + if err := m.Stat.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthWire + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWire(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWire + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipWire(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWire + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWire + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWire + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthWire + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWire + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipWire(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthWire = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowWire = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("wire.proto", fileDescriptorWire) } + +var fileDescriptorWire = []byte{ + // 259 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xcf, 0x2c, 0x4a, + 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x4b, 0x2b, 0x2e, 0x2d, 0xc9, 0xcc, 0x91, 0xe2, + 0x2a, 0x2e, 0x49, 0x2c, 0x81, 0x88, 0x29, 0xdd, 0x65, 0xe4, 0x62, 0x0b, 0x48, 0x4c, 0xce, 0x4e, + 0x2d, 0x11, 0xd2, 0xe5, 0x62, 0x29, 0xa9, 0x2c, 0x48, 0x95, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x33, + 0x92, 0xd4, 0x83, 0xa8, 0xd6, 0x83, 0xc8, 0x42, 0xa9, 0x90, 0xca, 0x82, 0xd4, 0x20, 0xb0, 0x32, + 0x21, 0x05, 0x2e, 0x16, 0x90, 0x39, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xdc, 0x46, 0x3c, 0x30, 0xe5, + 0xc1, 0x25, 0x89, 0x25, 0x41, 0x60, 0x19, 0x21, 0x3e, 0x2e, 0x26, 0x4f, 0x17, 0x09, 0x66, 0x05, + 0x46, 0x0d, 0xde, 0x20, 0x26, 0x4f, 0x17, 0x21, 0x21, 0x2e, 0x96, 0x94, 0xc4, 0x92, 0x44, 0x09, + 0x16, 0x05, 0x46, 0x0d, 0x9e, 0x20, 0x30, 0x5b, 0x29, 0x8e, 0x8b, 0x0b, 0x61, 0xb2, 0x10, 0x3f, + 0x17, 0x77, 0x80, 0xa3, 0xb3, 0xb7, 0x6b, 0x48, 0x7c, 0x70, 0x88, 0x63, 0x88, 0x00, 0x83, 0x10, + 0x1f, 0x17, 0x17, 0x54, 0x20, 0xc8, 0x35, 0x50, 0x80, 0x11, 0x49, 0x81, 0x8b, 0x63, 0x88, 0xa3, + 0x00, 0x13, 0x92, 0x02, 0x37, 0x4f, 0x3f, 0x01, 0x66, 0x24, 0xbe, 0x6b, 0x50, 0x90, 0x00, 0x8b, + 0x93, 0xce, 0x85, 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31, 0x7c, 0x78, 0x28, 0xc7, 0xd8, 0xf0, + 0x48, 0x8e, 0x71, 0xc5, 0x23, 0x39, 0xc6, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, + 0xf0, 0x48, 0x8e, 0xf1, 0xc5, 0x23, 0x39, 0x86, 0x0f, 0x8f, 0xe4, 0x18, 0x27, 0x3c, 0x96, 0x63, + 0x48, 0x62, 0x03, 0x07, 0x8a, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x8b, 0xce, 0x55, 0x3b, 0x36, + 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/ugorji/go/LICENSE b/vendor/github.com/ugorji/go/LICENSE new file mode 100644 index 0000000000..95a0f0541c --- /dev/null +++ b/vendor/github.com/ugorji/go/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2012-2015 Ugorji Nwoke. +All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/ugorji/go/codec/0doc.go b/vendor/github.com/ugorji/go/codec/0doc.go new file mode 100644 index 0000000000..b61a8180e4 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/0doc.go @@ -0,0 +1,264 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +/* +Package codec provides a +High Performance, Feature-Rich Idiomatic Go 1.4+ codec/encoding library +for binc, msgpack, cbor, json. + +Supported Serialization formats are: + + - msgpack: https://github.com/msgpack/msgpack + - binc: http://github.com/ugorji/binc + - cbor: http://cbor.io http://tools.ietf.org/html/rfc7049 + - json: http://json.org http://tools.ietf.org/html/rfc7159 + - simple: + +To install: + + go get github.com/ugorji/go/codec + +This package will carefully use 'unsafe' for performance reasons in specific places. +You can build without unsafe use by passing the safe or appengine tag +i.e. 'go install -tags=safe ...'. Note that unsafe is only supported for the last 3 +go sdk versions e.g. current go release is go 1.9, so we support unsafe use only from +go 1.7+ . This is because supporting unsafe requires knowledge of implementation details. + +For detailed usage information, read the primer at http://ugorji.net/blog/go-codec-primer . + +The idiomatic Go support is as seen in other encoding packages in +the standard library (ie json, xml, gob, etc). + +Rich Feature Set includes: + + - Simple but extremely powerful and feature-rich API + - Support for go1.4 and above, while selectively using newer APIs for later releases + - Excellent code coverage ( > 90% ) + - Very High Performance. + Our extensive benchmarks show us outperforming Gob, Json, Bson, etc by 2-4X. + - Careful selected use of 'unsafe' for targeted performance gains. + 100% mode exists where 'unsafe' is not used at all. + - Lock-free (sans mutex) concurrency for scaling to 100's of cores + - Coerce types where appropriate + e.g. decode an int in the stream into a float, decode numbers from formatted strings, etc + - Corner Cases: + Overflows, nil maps/slices, nil values in streams are handled correctly + - Standard field renaming via tags + - Support for omitting empty fields during an encoding + - Encoding from any value and decoding into pointer to any value + (struct, slice, map, primitives, pointers, interface{}, etc) + - Extensions to support efficient encoding/decoding of any named types + - Support encoding.(Binary|Text)(M|Unm)arshaler interfaces + - Support IsZero() bool to determine if a value is a zero value. + Analogous to time.Time.IsZero() bool. + - Decoding without a schema (into a interface{}). + Includes Options to configure what specific map or slice type to use + when decoding an encoded list or map into a nil interface{} + - Mapping a non-interface type to an interface, so we can decode appropriately + into any interface type with a correctly configured non-interface value. + - Encode a struct as an array, and decode struct from an array in the data stream + - Option to encode struct keys as numbers (instead of strings) + (to support structured streams with fields encoded as numeric codes) + - Comprehensive support for anonymous fields + - Fast (no-reflection) encoding/decoding of common maps and slices + - Code-generation for faster performance. + - Support binary (e.g. messagepack, cbor) and text (e.g. json) formats + - Support indefinite-length formats to enable true streaming + (for formats which support it e.g. json, cbor) + - Support canonical encoding, where a value is ALWAYS encoded as same sequence of bytes. + This mostly applies to maps, where iteration order is non-deterministic. + - NIL in data stream decoded as zero value + - Never silently skip data when decoding. + User decides whether to return an error or silently skip data when keys or indexes + in the data stream do not map to fields in the struct. + - Detect and error when encoding a cyclic reference (instead of stack overflow shutdown) + - Encode/Decode from/to chan types (for iterative streaming support) + - Drop-in replacement for encoding/json. `json:` key in struct tag supported. + - Provides a RPC Server and Client Codec for net/rpc communication protocol. + - Handle unique idiosyncrasies of codecs e.g. + - For messagepack, configure how ambiguities in handling raw bytes are resolved + - For messagepack, provide rpc server/client codec to support + msgpack-rpc protocol defined at: + https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md + +Extension Support + +Users can register a function to handle the encoding or decoding of +their custom types. + +There are no restrictions on what the custom type can be. Some examples: + + type BisSet []int + type BitSet64 uint64 + type UUID string + type MyStructWithUnexportedFields struct { a int; b bool; c []int; } + type GifImage struct { ... } + +As an illustration, MyStructWithUnexportedFields would normally be +encoded as an empty map because it has no exported fields, while UUID +would be encoded as a string. However, with extension support, you can +encode any of these however you like. + +Custom Encoding and Decoding + +This package maintains symmetry in the encoding and decoding halfs. +We determine how to encode or decode by walking this decision tree + + - is type a codec.Selfer? + - is there an extension registered for the type? + - is format binary, and is type a encoding.BinaryMarshaler and BinaryUnmarshaler? + - is format specifically json, and is type a encoding/json.Marshaler and Unmarshaler? + - is format text-based, and type an encoding.TextMarshaler? + - else we use a pair of functions based on the "kind" of the type e.g. map, slice, int64, etc + +This symmetry is important to reduce chances of issues happening because the +encoding and decoding sides are out of sync e.g. decoded via very specific +encoding.TextUnmarshaler but encoded via kind-specific generalized mode. + +Consequently, if a type only defines one-half of the symmetry +(e.g. it implements UnmarshalJSON() but not MarshalJSON() ), +then that type doesn't satisfy the check and we will continue walking down the +decision tree. + +RPC + +RPC Client and Server Codecs are implemented, so the codecs can be used +with the standard net/rpc package. + +Usage + +The Handle is SAFE for concurrent READ, but NOT SAFE for concurrent modification. + +The Encoder and Decoder are NOT safe for concurrent use. + +Consequently, the usage model is basically: + + - Create and initialize the Handle before any use. + Once created, DO NOT modify it. + - Multiple Encoders or Decoders can now use the Handle concurrently. + They only read information off the Handle (never write). + - However, each Encoder or Decoder MUST not be used concurrently + - To re-use an Encoder/Decoder, call Reset(...) on it first. + This allows you use state maintained on the Encoder/Decoder. + +Sample usage model: + + // create and configure Handle + var ( + bh codec.BincHandle + mh codec.MsgpackHandle + ch codec.CborHandle + ) + + mh.MapType = reflect.TypeOf(map[string]interface{}(nil)) + + // configure extensions + // e.g. for msgpack, define functions and enable Time support for tag 1 + // mh.SetExt(reflect.TypeOf(time.Time{}), 1, myExt) + + // create and use decoder/encoder + var ( + r io.Reader + w io.Writer + b []byte + h = &bh // or mh to use msgpack + ) + + dec = codec.NewDecoder(r, h) + dec = codec.NewDecoderBytes(b, h) + err = dec.Decode(&v) + + enc = codec.NewEncoder(w, h) + enc = codec.NewEncoderBytes(&b, h) + err = enc.Encode(v) + + //RPC Server + go func() { + for { + conn, err := listener.Accept() + rpcCodec := codec.GoRpc.ServerCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) + rpc.ServeCodec(rpcCodec) + } + }() + + //RPC Communication (client side) + conn, err = net.Dial("tcp", "localhost:5555") + rpcCodec := codec.GoRpc.ClientCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) + client := rpc.NewClientWithCodec(rpcCodec) + +Running Tests + +To run tests, use the following: + + go test + +To run the full suite of tests, use the following: + + go test -tags alltests -run Suite + +You can run the tag 'safe' to run tests or build in safe mode. e.g. + + go test -tags safe -run Json + go test -tags "alltests safe" -run Suite + +Running Benchmarks + +Please see http://github.com/ugorji/go-codec-bench . + +Caveats + +Struct fields matching the following are ignored during encoding and decoding + - struct tag value set to - + - func, complex numbers, unsafe pointers + - unexported and not embedded + - unexported and embedded and not struct kind + - unexported and embedded pointers (from go1.10) + +Every other field in a struct will be encoded/decoded. + +Embedded fields are encoded as if they exist in the top-level struct, +with some caveats. See Encode documentation. + +*/ +package codec + +// TODO: +// - For Go 1.11, when mid-stack inlining is enabled, +// we should use committed functions for writeXXX and readXXX calls. +// This involves uncommenting the methods for decReaderSwitch and encWriterSwitch +// and using those (decReaderSwitch and encWriterSwitch) in all handles +// instead of encWriter and decReader. +// The benefit is that, for the (En|De)coder over []byte, the encWriter/decReader +// will be inlined, giving a performance bump for that typical case. +// However, it will only be inlined if mid-stack inlining is enabled, +// as we call panic to raise errors, and panic currently prevents inlining. +// +// PUNTED: +// - To make Handle comparable, make extHandle in BasicHandle a non-embedded pointer, +// and use overlay methods on *BasicHandle to call through to extHandle after initializing +// the "xh *extHandle" to point to a real slice. +// +// BEFORE EACH RELEASE: +// - Look through and fix padding for each type, to eliminate false sharing +// - critical shared objects that are read many times +// TypeInfos +// - pooled objects: +// decNaked, decNakedContainers, codecFner, typeInfoLoadArray, +// - small objects allocated independently, that we read/use much across threads: +// codecFn, typeInfo +// - Objects allocated independently and used a lot +// Decoder, Encoder, +// xxxHandle, xxxEncDriver, xxxDecDriver (xxx = json, msgpack, cbor, binc, simple) +// - In all above, arrange values modified together to be close to each other. +// +// For all of these, either ensure that they occupy full cache lines, +// or ensure that the things just past the cache line boundary are hardly read/written +// e.g. JsonHandle.RawBytesExt - which is copied into json(En|De)cDriver at init +// +// Occupying full cache lines means they occupy 8*N words (where N is an integer). +// Check this out by running: ./run.sh -z +// - look at those tagged ****, meaning they are not occupying full cache lines +// - look at those tagged <<<<, meaning they are larger than 32 words (something to watch) +// - Run "golint -min_confidence 0.81" diff --git a/vendor/github.com/ugorji/go/codec/binc.go b/vendor/github.com/ugorji/go/codec/binc.go new file mode 100644 index 0000000000..a3c96fe741 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/binc.go @@ -0,0 +1,1168 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "math" + "reflect" + "time" +) + +const bincDoPrune = true // No longer needed. Needed before as C lib did not support pruning. + +// vd as low 4 bits (there are 16 slots) +const ( + bincVdSpecial byte = iota + bincVdPosInt + bincVdNegInt + bincVdFloat + + bincVdString + bincVdByteArray + bincVdArray + bincVdMap + + bincVdTimestamp + bincVdSmallInt + bincVdUnicodeOther + bincVdSymbol + + bincVdDecimal + _ // open slot + _ // open slot + bincVdCustomExt = 0x0f +) + +const ( + bincSpNil byte = iota + bincSpFalse + bincSpTrue + bincSpNan + bincSpPosInf + bincSpNegInf + bincSpZeroFloat + bincSpZero + bincSpNegOne +) + +const ( + bincFlBin16 byte = iota + bincFlBin32 + _ // bincFlBin32e + bincFlBin64 + _ // bincFlBin64e + // others not currently supported +) + +func bincdesc(vd, vs byte) string { + switch vd { + case bincVdSpecial: + switch vs { + case bincSpNil: + return "nil" + case bincSpFalse: + return "false" + case bincSpTrue: + return "true" + case bincSpNan, bincSpPosInf, bincSpNegInf, bincSpZeroFloat: + return "float" + case bincSpZero: + return "uint" + case bincSpNegOne: + return "int" + default: + return "unknown" + } + case bincVdSmallInt, bincVdPosInt: + return "uint" + case bincVdNegInt: + return "int" + case bincVdFloat: + return "float" + case bincVdSymbol: + return "string" + case bincVdString: + return "string" + case bincVdByteArray: + return "bytes" + case bincVdTimestamp: + return "time" + case bincVdCustomExt: + return "ext" + case bincVdArray: + return "array" + case bincVdMap: + return "map" + default: + return "unknown" + } +} + +type bincEncDriver struct { + e *Encoder + h *BincHandle + w encWriter + m map[string]uint16 // symbols + b [16]byte // scratch, used for encoding numbers - bigendian style + s uint16 // symbols sequencer + // c containerState + encDriverTrackContainerWriter + noBuiltInTypes + // encNoSeparator +} + +func (e *bincEncDriver) EncodeNil() { + e.w.writen1(bincVdSpecial<<4 | bincSpNil) +} + +func (e *bincEncDriver) EncodeTime(t time.Time) { + if t.IsZero() { + e.EncodeNil() + } else { + bs := bincEncodeTime(t) + e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs))) + e.w.writeb(bs) + } +} + +func (e *bincEncDriver) EncodeBool(b bool) { + if b { + e.w.writen1(bincVdSpecial<<4 | bincSpTrue) + } else { + e.w.writen1(bincVdSpecial<<4 | bincSpFalse) + } +} + +func (e *bincEncDriver) EncodeFloat32(f float32) { + if f == 0 { + e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) + return + } + e.w.writen1(bincVdFloat<<4 | bincFlBin32) + bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f)) +} + +func (e *bincEncDriver) EncodeFloat64(f float64) { + if f == 0 { + e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat) + return + } + bigen.PutUint64(e.b[:8], math.Float64bits(f)) + if bincDoPrune { + i := 7 + for ; i >= 0 && (e.b[i] == 0); i-- { + } + i++ + if i <= 6 { + e.w.writen1(bincVdFloat<<4 | 0x8 | bincFlBin64) + e.w.writen1(byte(i)) + e.w.writeb(e.b[:i]) + return + } + } + e.w.writen1(bincVdFloat<<4 | bincFlBin64) + e.w.writeb(e.b[:8]) +} + +func (e *bincEncDriver) encIntegerPrune(bd byte, pos bool, v uint64, lim uint8) { + if lim == 4 { + bigen.PutUint32(e.b[:lim], uint32(v)) + } else { + bigen.PutUint64(e.b[:lim], v) + } + if bincDoPrune { + i := pruneSignExt(e.b[:lim], pos) + e.w.writen1(bd | lim - 1 - byte(i)) + e.w.writeb(e.b[i:lim]) + } else { + e.w.writen1(bd | lim - 1) + e.w.writeb(e.b[:lim]) + } +} + +func (e *bincEncDriver) EncodeInt(v int64) { + const nbd byte = bincVdNegInt << 4 + if v >= 0 { + e.encUint(bincVdPosInt<<4, true, uint64(v)) + } else if v == -1 { + e.w.writen1(bincVdSpecial<<4 | bincSpNegOne) + } else { + e.encUint(bincVdNegInt<<4, false, uint64(-v)) + } +} + +func (e *bincEncDriver) EncodeUint(v uint64) { + e.encUint(bincVdPosInt<<4, true, v) +} + +func (e *bincEncDriver) encUint(bd byte, pos bool, v uint64) { + if v == 0 { + e.w.writen1(bincVdSpecial<<4 | bincSpZero) + } else if pos && v >= 1 && v <= 16 { + e.w.writen1(bincVdSmallInt<<4 | byte(v-1)) + } else if v <= math.MaxUint8 { + e.w.writen2(bd|0x0, byte(v)) + } else if v <= math.MaxUint16 { + e.w.writen1(bd | 0x01) + bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v)) + } else if v <= math.MaxUint32 { + e.encIntegerPrune(bd, pos, v, 4) + } else { + e.encIntegerPrune(bd, pos, v, 8) + } +} + +func (e *bincEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) { + bs := ext.WriteExt(rv) + if bs == nil { + e.EncodeNil() + return + } + e.encodeExtPreamble(uint8(xtag), len(bs)) + e.w.writeb(bs) +} + +func (e *bincEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) { + e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) + e.w.writeb(re.Data) +} + +func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) { + e.encLen(bincVdCustomExt<<4, uint64(length)) + e.w.writen1(xtag) +} + +func (e *bincEncDriver) WriteArrayStart(length int) { + e.encLen(bincVdArray<<4, uint64(length)) + e.c = containerArrayStart +} + +func (e *bincEncDriver) WriteMapStart(length int) { + e.encLen(bincVdMap<<4, uint64(length)) + e.c = containerMapStart +} + +func (e *bincEncDriver) EncodeString(c charEncoding, v string) { + if e.c == containerMapKey && c == cUTF8 && (e.h.AsSymbols == 0 || e.h.AsSymbols == 1) { + e.EncodeSymbol(v) + return + } + l := uint64(len(v)) + e.encBytesLen(c, l) + if l > 0 { + e.w.writestr(v) + } +} + +func (e *bincEncDriver) EncodeSymbol(v string) { + // if WriteSymbolsNoRefs { + // e.encodeString(cUTF8, v) + // return + // } + + //symbols only offer benefit when string length > 1. + //This is because strings with length 1 take only 2 bytes to store + //(bd with embedded length, and single byte for string val). + + l := len(v) + if l == 0 { + e.encBytesLen(cUTF8, 0) + return + } else if l == 1 { + e.encBytesLen(cUTF8, 1) + e.w.writen1(v[0]) + return + } + if e.m == nil { + e.m = make(map[string]uint16, 16) + } + ui, ok := e.m[v] + if ok { + if ui <= math.MaxUint8 { + e.w.writen2(bincVdSymbol<<4, byte(ui)) + } else { + e.w.writen1(bincVdSymbol<<4 | 0x8) + bigenHelper{e.b[:2], e.w}.writeUint16(ui) + } + } else { + e.s++ + ui = e.s + //ui = uint16(atomic.AddUint32(&e.s, 1)) + e.m[v] = ui + var lenprec uint8 + if l <= math.MaxUint8 { + // lenprec = 0 + } else if l <= math.MaxUint16 { + lenprec = 1 + } else if int64(l) <= math.MaxUint32 { + lenprec = 2 + } else { + lenprec = 3 + } + if ui <= math.MaxUint8 { + e.w.writen2(bincVdSymbol<<4|0x0|0x4|lenprec, byte(ui)) + } else { + e.w.writen1(bincVdSymbol<<4 | 0x8 | 0x4 | lenprec) + bigenHelper{e.b[:2], e.w}.writeUint16(ui) + } + if lenprec == 0 { + e.w.writen1(byte(l)) + } else if lenprec == 1 { + bigenHelper{e.b[:2], e.w}.writeUint16(uint16(l)) + } else if lenprec == 2 { + bigenHelper{e.b[:4], e.w}.writeUint32(uint32(l)) + } else { + bigenHelper{e.b[:8], e.w}.writeUint64(uint64(l)) + } + e.w.writestr(v) + } +} + +func (e *bincEncDriver) EncodeStringBytes(c charEncoding, v []byte) { + if v == nil { + e.EncodeNil() + return + } + l := uint64(len(v)) + e.encBytesLen(c, l) + if l > 0 { + e.w.writeb(v) + } +} + +func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) { + //TODO: support bincUnicodeOther (for now, just use string or bytearray) + if c == cRAW { + e.encLen(bincVdByteArray<<4, length) + } else { + e.encLen(bincVdString<<4, length) + } +} + +func (e *bincEncDriver) encLen(bd byte, l uint64) { + if l < 12 { + e.w.writen1(bd | uint8(l+4)) + } else { + e.encLenNumber(bd, l) + } +} + +func (e *bincEncDriver) encLenNumber(bd byte, v uint64) { + if v <= math.MaxUint8 { + e.w.writen2(bd, byte(v)) + } else if v <= math.MaxUint16 { + e.w.writen1(bd | 0x01) + bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v)) + } else if v <= math.MaxUint32 { + e.w.writen1(bd | 0x02) + bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v)) + } else { + e.w.writen1(bd | 0x03) + bigenHelper{e.b[:8], e.w}.writeUint64(uint64(v)) + } +} + +//------------------------------------ + +type bincDecSymbol struct { + s string + b []byte + i uint16 +} + +type bincDecDriver struct { + decDriverNoopContainerReader + noBuiltInTypes + + d *Decoder + h *BincHandle + r decReader + br bool // bytes reader + bdRead bool + bd byte + vd byte + vs byte + _ [3]byte // padding + // linear searching on this slice is ok, + // because we typically expect < 32 symbols in each stream. + s []bincDecSymbol + + // noStreamingCodec + // decNoSeparator + + b [8 * 8]byte // scratch +} + +func (d *bincDecDriver) readNextBd() { + d.bd = d.r.readn1() + d.vd = d.bd >> 4 + d.vs = d.bd & 0x0f + d.bdRead = true +} + +func (d *bincDecDriver) uncacheRead() { + if d.bdRead { + d.r.unreadn1() + d.bdRead = false + } +} + +func (d *bincDecDriver) ContainerType() (vt valueType) { + if !d.bdRead { + d.readNextBd() + } + if d.vd == bincVdSpecial && d.vs == bincSpNil { + return valueTypeNil + } else if d.vd == bincVdByteArray { + return valueTypeBytes + } else if d.vd == bincVdString { + return valueTypeString + } else if d.vd == bincVdArray { + return valueTypeArray + } else if d.vd == bincVdMap { + return valueTypeMap + } + // else { + // d.d.errorf("isContainerType: unsupported parameter: %v", vt) + // } + return valueTypeUnset +} + +func (d *bincDecDriver) TryDecodeAsNil() bool { + if !d.bdRead { + d.readNextBd() + } + if d.bd == bincVdSpecial<<4|bincSpNil { + d.bdRead = false + return true + } + return false +} + +func (d *bincDecDriver) DecodeTime() (t time.Time) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == bincVdSpecial<<4|bincSpNil { + d.bdRead = false + return + } + if d.vd != bincVdTimestamp { + d.d.errorf("cannot decode time - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + return + } + t, err := bincDecodeTime(d.r.readx(int(d.vs))) + if err != nil { + panic(err) + } + d.bdRead = false + return +} + +func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) { + if vs&0x8 == 0 { + d.r.readb(d.b[0:defaultLen]) + } else { + l := d.r.readn1() + if l > 8 { + d.d.errorf("cannot read float - at most 8 bytes used to represent float - received %v bytes", l) + return + } + for i := l; i < 8; i++ { + d.b[i] = 0 + } + d.r.readb(d.b[0:l]) + } +} + +func (d *bincDecDriver) decFloat() (f float64) { + //if true { f = math.Float64frombits(bigen.Uint64(d.r.readx(8))); break; } + if x := d.vs & 0x7; x == bincFlBin32 { + d.decFloatPre(d.vs, 4) + f = float64(math.Float32frombits(bigen.Uint32(d.b[0:4]))) + } else if x == bincFlBin64 { + d.decFloatPre(d.vs, 8) + f = math.Float64frombits(bigen.Uint64(d.b[0:8])) + } else { + d.d.errorf("read float - only float32 and float64 are supported - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + return + } + return +} + +func (d *bincDecDriver) decUint() (v uint64) { + // need to inline the code (interface conversion and type assertion expensive) + switch d.vs { + case 0: + v = uint64(d.r.readn1()) + case 1: + d.r.readb(d.b[6:8]) + v = uint64(bigen.Uint16(d.b[6:8])) + case 2: + d.b[4] = 0 + d.r.readb(d.b[5:8]) + v = uint64(bigen.Uint32(d.b[4:8])) + case 3: + d.r.readb(d.b[4:8]) + v = uint64(bigen.Uint32(d.b[4:8])) + case 4, 5, 6: + lim := int(7 - d.vs) + d.r.readb(d.b[lim:8]) + for i := 0; i < lim; i++ { + d.b[i] = 0 + } + v = uint64(bigen.Uint64(d.b[:8])) + case 7: + d.r.readb(d.b[:8]) + v = uint64(bigen.Uint64(d.b[:8])) + default: + d.d.errorf("unsigned integers with greater than 64 bits of precision not supported") + return + } + return +} + +func (d *bincDecDriver) decCheckInteger() (ui uint64, neg bool) { + if !d.bdRead { + d.readNextBd() + } + vd, vs := d.vd, d.vs + if vd == bincVdPosInt { + ui = d.decUint() + } else if vd == bincVdNegInt { + ui = d.decUint() + neg = true + } else if vd == bincVdSmallInt { + ui = uint64(d.vs) + 1 + } else if vd == bincVdSpecial { + if vs == bincSpZero { + //i = 0 + } else if vs == bincSpNegOne { + neg = true + ui = 1 + } else { + d.d.errorf("integer decode fails - invalid special value from descriptor %x-%x/%s", + d.vd, d.vs, bincdesc(d.vd, d.vs)) + return + } + } else { + d.d.errorf("integer can only be decoded from int/uint. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd) + return + } + return +} + +func (d *bincDecDriver) DecodeInt64() (i int64) { + ui, neg := d.decCheckInteger() + i = chkOvf.SignedIntV(ui) + if neg { + i = -i + } + d.bdRead = false + return +} + +func (d *bincDecDriver) DecodeUint64() (ui uint64) { + ui, neg := d.decCheckInteger() + if neg { + d.d.errorf("assigning negative signed value to unsigned integer type") + return + } + d.bdRead = false + return +} + +func (d *bincDecDriver) DecodeFloat64() (f float64) { + if !d.bdRead { + d.readNextBd() + } + vd, vs := d.vd, d.vs + if vd == bincVdSpecial { + d.bdRead = false + if vs == bincSpNan { + return math.NaN() + } else if vs == bincSpPosInf { + return math.Inf(1) + } else if vs == bincSpZeroFloat || vs == bincSpZero { + return + } else if vs == bincSpNegInf { + return math.Inf(-1) + } else { + d.d.errorf("float - invalid special value from descriptor %x-%x/%s", + d.vd, d.vs, bincdesc(d.vd, d.vs)) + return + } + } else if vd == bincVdFloat { + f = d.decFloat() + } else { + f = float64(d.DecodeInt64()) + } + d.bdRead = false + return +} + +// bool can be decoded from bool only (single byte). +func (d *bincDecDriver) DecodeBool() (b bool) { + if !d.bdRead { + d.readNextBd() + } + if bd := d.bd; bd == (bincVdSpecial | bincSpFalse) { + // b = false + } else if bd == (bincVdSpecial | bincSpTrue) { + b = true + } else { + d.d.errorf("bool - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + return + } + d.bdRead = false + return +} + +func (d *bincDecDriver) ReadMapStart() (length int) { + if !d.bdRead { + d.readNextBd() + } + if d.vd != bincVdMap { + d.d.errorf("map - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + return + } + length = d.decLen() + d.bdRead = false + return +} + +func (d *bincDecDriver) ReadArrayStart() (length int) { + if !d.bdRead { + d.readNextBd() + } + if d.vd != bincVdArray { + d.d.errorf("array - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + return + } + length = d.decLen() + d.bdRead = false + return +} + +func (d *bincDecDriver) decLen() int { + if d.vs > 3 { + return int(d.vs - 4) + } + return int(d.decLenNumber()) +} + +func (d *bincDecDriver) decLenNumber() (v uint64) { + if x := d.vs; x == 0 { + v = uint64(d.r.readn1()) + } else if x == 1 { + d.r.readb(d.b[6:8]) + v = uint64(bigen.Uint16(d.b[6:8])) + } else if x == 2 { + d.r.readb(d.b[4:8]) + v = uint64(bigen.Uint32(d.b[4:8])) + } else { + d.r.readb(d.b[:8]) + v = bigen.Uint64(d.b[:8]) + } + return +} + +func (d *bincDecDriver) decStringAndBytes(bs []byte, withString, zerocopy bool) ( + bs2 []byte, s string) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == bincVdSpecial<<4|bincSpNil { + d.bdRead = false + return + } + var slen = -1 + // var ok bool + switch d.vd { + case bincVdString, bincVdByteArray: + slen = d.decLen() + if zerocopy { + if d.br { + bs2 = d.r.readx(slen) + } else if len(bs) == 0 { + bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, d.b[:]) + } else { + bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, bs) + } + } else { + bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, bs) + } + if withString { + s = string(bs2) + } + case bincVdSymbol: + // zerocopy doesn't apply for symbols, + // as the values must be stored in a table for later use. + // + //from vs: extract numSymbolBytes, containsStringVal, strLenPrecision, + //extract symbol + //if containsStringVal, read it and put in map + //else look in map for string value + var symbol uint16 + vs := d.vs + if vs&0x8 == 0 { + symbol = uint16(d.r.readn1()) + } else { + symbol = uint16(bigen.Uint16(d.r.readx(2))) + } + if d.s == nil { + d.s = make([]bincDecSymbol, 0, 16) + } + + if vs&0x4 == 0 { + for i := range d.s { + j := &d.s[i] + if j.i == symbol { + bs2 = j.b + if withString { + if j.s == "" && bs2 != nil { + j.s = string(bs2) + } + s = j.s + } + break + } + } + } else { + switch vs & 0x3 { + case 0: + slen = int(d.r.readn1()) + case 1: + slen = int(bigen.Uint16(d.r.readx(2))) + case 2: + slen = int(bigen.Uint32(d.r.readx(4))) + case 3: + slen = int(bigen.Uint64(d.r.readx(8))) + } + // since using symbols, do not store any part of + // the parameter bs in the map, as it might be a shared buffer. + // bs2 = decByteSlice(d.r, slen, bs) + bs2 = decByteSlice(d.r, slen, d.d.h.MaxInitLen, nil) + if withString { + s = string(bs2) + } + d.s = append(d.s, bincDecSymbol{i: symbol, s: s, b: bs2}) + } + default: + d.d.errorf("string/bytes - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + return + } + d.bdRead = false + return +} + +func (d *bincDecDriver) DecodeString() (s string) { + // DecodeBytes does not accommodate symbols, whose impl stores string version in map. + // Use decStringAndBytes directly. + // return string(d.DecodeBytes(d.b[:], true, true)) + _, s = d.decStringAndBytes(d.b[:], true, true) + return +} + +func (d *bincDecDriver) DecodeStringAsBytes() (s []byte) { + s, _ = d.decStringAndBytes(d.b[:], false, true) + return +} + +func (d *bincDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == bincVdSpecial<<4|bincSpNil { + d.bdRead = false + return nil + } + // check if an "array" of uint8's (see ContainerType for how to infer if an array) + if d.vd == bincVdArray { + bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d) + return + } + var clen int + if d.vd == bincVdString || d.vd == bincVdByteArray { + clen = d.decLen() + } else { + d.d.errorf("bytes - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + return + } + d.bdRead = false + if zerocopy { + if d.br { + return d.r.readx(clen) + } else if len(bs) == 0 { + bs = d.b[:] + } + } + return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs) +} + +func (d *bincDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if xtag > 0xff { + d.d.errorf("ext: tag must be <= 0xff; got: %v", xtag) + return + } + realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) + realxtag = uint64(realxtag1) + if ext == nil { + re := rv.(*RawExt) + re.Tag = realxtag + re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) + } else { + ext.ReadExt(rv, xbs) + } + return +} + +func (d *bincDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + if !d.bdRead { + d.readNextBd() + } + if d.vd == bincVdCustomExt { + l := d.decLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + d.d.errorf("wrong extension tag - got %b, expecting: %v", xtag, tag) + return + } + xbs = d.r.readx(l) + } else if d.vd == bincVdByteArray { + xbs = d.DecodeBytes(nil, true) + } else { + d.d.errorf("ext - expecting extensions or byte array - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + return + } + d.bdRead = false + return +} + +func (d *bincDecDriver) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + + n := d.d.n + var decodeFurther bool + + switch d.vd { + case bincVdSpecial: + switch d.vs { + case bincSpNil: + n.v = valueTypeNil + case bincSpFalse: + n.v = valueTypeBool + n.b = false + case bincSpTrue: + n.v = valueTypeBool + n.b = true + case bincSpNan: + n.v = valueTypeFloat + n.f = math.NaN() + case bincSpPosInf: + n.v = valueTypeFloat + n.f = math.Inf(1) + case bincSpNegInf: + n.v = valueTypeFloat + n.f = math.Inf(-1) + case bincSpZeroFloat: + n.v = valueTypeFloat + n.f = float64(0) + case bincSpZero: + n.v = valueTypeUint + n.u = uint64(0) // int8(0) + case bincSpNegOne: + n.v = valueTypeInt + n.i = int64(-1) // int8(-1) + default: + d.d.errorf("cannot infer value - unrecognized special value from descriptor %x-%x/%s", d.vd, d.vs, bincdesc(d.vd, d.vs)) + } + case bincVdSmallInt: + n.v = valueTypeUint + n.u = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1 + case bincVdPosInt: + n.v = valueTypeUint + n.u = d.decUint() + case bincVdNegInt: + n.v = valueTypeInt + n.i = -(int64(d.decUint())) + case bincVdFloat: + n.v = valueTypeFloat + n.f = d.decFloat() + case bincVdSymbol: + n.v = valueTypeSymbol + n.s = d.DecodeString() + case bincVdString: + n.v = valueTypeString + n.s = d.DecodeString() + case bincVdByteArray: + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false) + case bincVdTimestamp: + n.v = valueTypeTime + tt, err := bincDecodeTime(d.r.readx(int(d.vs))) + if err != nil { + panic(err) + } + n.t = tt + case bincVdCustomExt: + n.v = valueTypeExt + l := d.decLen() + n.u = uint64(d.r.readn1()) + n.l = d.r.readx(l) + case bincVdArray: + n.v = valueTypeArray + decodeFurther = true + case bincVdMap: + n.v = valueTypeMap + decodeFurther = true + default: + d.d.errorf("cannot infer value - %s %x-%x/%s", msgBadDesc, d.vd, d.vs, bincdesc(d.vd, d.vs)) + } + + if !decodeFurther { + d.bdRead = false + } + if n.v == valueTypeUint && d.h.SignedInteger { + n.v = valueTypeInt + n.i = int64(n.u) + } + return +} + +//------------------------------------ + +//BincHandle is a Handle for the Binc Schema-Free Encoding Format +//defined at https://github.com/ugorji/binc . +// +//BincHandle currently supports all Binc features with the following EXCEPTIONS: +// - only integers up to 64 bits of precision are supported. +// big integers are unsupported. +// - Only IEEE 754 binary32 and binary64 floats are supported (ie Go float32 and float64 types). +// extended precision and decimal IEEE 754 floats are unsupported. +// - Only UTF-8 strings supported. +// Unicode_Other Binc types (UTF16, UTF32) are currently unsupported. +// +//Note that these EXCEPTIONS are temporary and full support is possible and may happen soon. +type BincHandle struct { + BasicHandle + binaryEncodingType + noElemSeparators + + // AsSymbols defines what should be encoded as symbols. + // + // Encoding as symbols can reduce the encoded size significantly. + // + // However, during decoding, each string to be encoded as a symbol must + // be checked to see if it has been seen before. Consequently, encoding time + // will increase if using symbols, because string comparisons has a clear cost. + // + // Values: + // - 0: default: library uses best judgement + // - 1: use symbols + // - 2: do not use symbols + AsSymbols uint8 + + // AsSymbols: may later on introduce more options ... + // - m: map keys + // - s: struct fields + // - n: none + // - a: all: same as m, s, ... + + // _ [1]uint64 // padding +} + +// Name returns the name of the handle: binc +func (h *BincHandle) Name() string { return "binc" } + +// SetBytesExt sets an extension +func (h *BincHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { + return h.SetExt(rt, tag, &extWrapper{ext, interfaceExtFailer{}}) +} + +func (h *BincHandle) newEncDriver(e *Encoder) encDriver { + return &bincEncDriver{e: e, h: h, w: e.w} +} + +func (h *BincHandle) newDecDriver(d *Decoder) decDriver { + return &bincDecDriver{d: d, h: h, r: d.r, br: d.bytes} +} + +func (e *bincEncDriver) reset() { + e.w = e.e.w + e.s = 0 + e.c = 0 + e.m = nil +} + +func (d *bincDecDriver) reset() { + d.r, d.br = d.d.r, d.d.bytes + d.s = nil + d.bd, d.bdRead, d.vd, d.vs = 0, false, 0, 0 +} + +// var timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'} + +// EncodeTime encodes a time.Time as a []byte, including +// information on the instant in time and UTC offset. +// +// Format Description +// +// A timestamp is composed of 3 components: +// +// - secs: signed integer representing seconds since unix epoch +// - nsces: unsigned integer representing fractional seconds as a +// nanosecond offset within secs, in the range 0 <= nsecs < 1e9 +// - tz: signed integer representing timezone offset in minutes east of UTC, +// and a dst (daylight savings time) flag +// +// When encoding a timestamp, the first byte is the descriptor, which +// defines which components are encoded and how many bytes are used to +// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it +// is not encoded in the byte array explicitly*. +// +// Descriptor 8 bits are of the form `A B C DDD EE`: +// A: Is secs component encoded? 1 = true +// B: Is nsecs component encoded? 1 = true +// C: Is tz component encoded? 1 = true +// DDD: Number of extra bytes for secs (range 0-7). +// If A = 1, secs encoded in DDD+1 bytes. +// If A = 0, secs is not encoded, and is assumed to be 0. +// If A = 1, then we need at least 1 byte to encode secs. +// DDD says the number of extra bytes beyond that 1. +// E.g. if DDD=0, then secs is represented in 1 byte. +// if DDD=2, then secs is represented in 3 bytes. +// EE: Number of extra bytes for nsecs (range 0-3). +// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above) +// +// Following the descriptor bytes, subsequent bytes are: +// +// secs component encoded in `DDD + 1` bytes (if A == 1) +// nsecs component encoded in `EE + 1` bytes (if B == 1) +// tz component encoded in 2 bytes (if C == 1) +// +// secs and nsecs components are integers encoded in a BigEndian +// 2-complement encoding format. +// +// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to +// Least significant bit 0 are described below: +// +// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes). +// Bit 15 = have\_dst: set to 1 if we set the dst flag. +// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not. +// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format. +// +func bincEncodeTime(t time.Time) []byte { + //t := rv.Interface().(time.Time) + tsecs, tnsecs := t.Unix(), t.Nanosecond() + var ( + bd byte + btmp [8]byte + bs [16]byte + i int = 1 + ) + l := t.Location() + if l == time.UTC { + l = nil + } + if tsecs != 0 { + bd = bd | 0x80 + bigen.PutUint64(btmp[:], uint64(tsecs)) + f := pruneSignExt(btmp[:], tsecs >= 0) + bd = bd | (byte(7-f) << 2) + copy(bs[i:], btmp[f:]) + i = i + (8 - f) + } + if tnsecs != 0 { + bd = bd | 0x40 + bigen.PutUint32(btmp[:4], uint32(tnsecs)) + f := pruneSignExt(btmp[:4], true) + bd = bd | byte(3-f) + copy(bs[i:], btmp[f:4]) + i = i + (4 - f) + } + if l != nil { + bd = bd | 0x20 + // Note that Go Libs do not give access to dst flag. + _, zoneOffset := t.Zone() + //zoneName, zoneOffset := t.Zone() + zoneOffset /= 60 + z := uint16(zoneOffset) + bigen.PutUint16(btmp[:2], z) + // clear dst flags + bs[i] = btmp[0] & 0x3f + bs[i+1] = btmp[1] + i = i + 2 + } + bs[0] = bd + return bs[0:i] +} + +// bincDecodeTime decodes a []byte into a time.Time. +func bincDecodeTime(bs []byte) (tt time.Time, err error) { + bd := bs[0] + var ( + tsec int64 + tnsec uint32 + tz uint16 + i byte = 1 + i2 byte + n byte + ) + if bd&(1<<7) != 0 { + var btmp [8]byte + n = ((bd >> 2) & 0x7) + 1 + i2 = i + n + copy(btmp[8-n:], bs[i:i2]) + //if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it) + if bs[i]&(1<<7) != 0 { + copy(btmp[0:8-n], bsAll0xff) + //for j,k := byte(0), 8-n; j < k; j++ { btmp[j] = 0xff } + } + i = i2 + tsec = int64(bigen.Uint64(btmp[:])) + } + if bd&(1<<6) != 0 { + var btmp [4]byte + n = (bd & 0x3) + 1 + i2 = i + n + copy(btmp[4-n:], bs[i:i2]) + i = i2 + tnsec = bigen.Uint32(btmp[:]) + } + if bd&(1<<5) == 0 { + tt = time.Unix(tsec, int64(tnsec)).UTC() + return + } + // In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name. + // However, we need name here, so it can be shown when time is printed. + // Zone name is in form: UTC-08:00. + // Note that Go Libs do not give access to dst flag, so we ignore dst bits + + i2 = i + 2 + tz = bigen.Uint16(bs[i:i2]) + // i = i2 + // sign extend sign bit into top 2 MSB (which were dst bits): + if tz&(1<<13) == 0 { // positive + tz = tz & 0x3fff //clear 2 MSBs: dst bits + } else { // negative + tz = tz | 0xc000 //set 2 MSBs: dst bits + } + tzint := int16(tz) + if tzint == 0 { + tt = time.Unix(tsec, int64(tnsec)).UTC() + } else { + // For Go Time, do not use a descriptive timezone. + // It's unnecessary, and makes it harder to do a reflect.DeepEqual. + // The Offset already tells what the offset should be, if not on UTC and unknown zone name. + // var zoneName = timeLocUTCName(tzint) + tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60)) + } + return +} + +var _ decDriver = (*bincDecDriver)(nil) +var _ encDriver = (*bincEncDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/cbor.go b/vendor/github.com/ugorji/go/codec/cbor.go new file mode 100644 index 0000000000..7633c04ac3 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/cbor.go @@ -0,0 +1,756 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "math" + "reflect" + "time" +) + +const ( + cborMajorUint byte = iota + cborMajorNegInt + cborMajorBytes + cborMajorText + cborMajorArray + cborMajorMap + cborMajorTag + cborMajorOther +) + +const ( + cborBdFalse byte = 0xf4 + iota + cborBdTrue + cborBdNil + cborBdUndefined + cborBdExt + cborBdFloat16 + cborBdFloat32 + cborBdFloat64 +) + +const ( + cborBdIndefiniteBytes byte = 0x5f + cborBdIndefiniteString = 0x7f + cborBdIndefiniteArray = 0x9f + cborBdIndefiniteMap = 0xbf + cborBdBreak = 0xff +) + +// These define some in-stream descriptors for +// manual encoding e.g. when doing explicit indefinite-length +const ( + CborStreamBytes byte = 0x5f + CborStreamString = 0x7f + CborStreamArray = 0x9f + CborStreamMap = 0xbf + CborStreamBreak = 0xff +) + +const ( + cborBaseUint byte = 0x00 + cborBaseNegInt = 0x20 + cborBaseBytes = 0x40 + cborBaseString = 0x60 + cborBaseArray = 0x80 + cborBaseMap = 0xa0 + cborBaseTag = 0xc0 + cborBaseSimple = 0xe0 +) + +func cbordesc(bd byte) string { + switch bd { + case cborBdNil: + return "nil" + case cborBdFalse: + return "false" + case cborBdTrue: + return "true" + case cborBdFloat16, cborBdFloat32, cborBdFloat64: + return "float" + case cborBdIndefiniteBytes: + return "bytes*" + case cborBdIndefiniteString: + return "string*" + case cborBdIndefiniteArray: + return "array*" + case cborBdIndefiniteMap: + return "map*" + default: + switch { + case bd >= cborBaseUint && bd < cborBaseNegInt: + return "(u)int" + case bd >= cborBaseNegInt && bd < cborBaseBytes: + return "int" + case bd >= cborBaseBytes && bd < cborBaseString: + return "bytes" + case bd >= cborBaseString && bd < cborBaseArray: + return "string" + case bd >= cborBaseArray && bd < cborBaseMap: + return "array" + case bd >= cborBaseMap && bd < cborBaseTag: + return "map" + case bd >= cborBaseTag && bd < cborBaseSimple: + return "ext" + default: + return "unknown" + } + } +} + +// ------------------- + +type cborEncDriver struct { + noBuiltInTypes + encDriverNoopContainerWriter + // encNoSeparator + e *Encoder + w encWriter + h *CborHandle + x [8]byte + _ [3]uint64 // padding +} + +func (e *cborEncDriver) EncodeNil() { + e.w.writen1(cborBdNil) +} + +func (e *cborEncDriver) EncodeBool(b bool) { + if b { + e.w.writen1(cborBdTrue) + } else { + e.w.writen1(cborBdFalse) + } +} + +func (e *cborEncDriver) EncodeFloat32(f float32) { + e.w.writen1(cborBdFloat32) + bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f)) +} + +func (e *cborEncDriver) EncodeFloat64(f float64) { + e.w.writen1(cborBdFloat64) + bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f)) +} + +func (e *cborEncDriver) encUint(v uint64, bd byte) { + if v <= 0x17 { + e.w.writen1(byte(v) + bd) + } else if v <= math.MaxUint8 { + e.w.writen2(bd+0x18, uint8(v)) + } else if v <= math.MaxUint16 { + e.w.writen1(bd + 0x19) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(v)) + } else if v <= math.MaxUint32 { + e.w.writen1(bd + 0x1a) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(v)) + } else { // if v <= math.MaxUint64 { + e.w.writen1(bd + 0x1b) + bigenHelper{e.x[:8], e.w}.writeUint64(v) + } +} + +func (e *cborEncDriver) EncodeInt(v int64) { + if v < 0 { + e.encUint(uint64(-1-v), cborBaseNegInt) + } else { + e.encUint(uint64(v), cborBaseUint) + } +} + +func (e *cborEncDriver) EncodeUint(v uint64) { + e.encUint(v, cborBaseUint) +} + +func (e *cborEncDriver) encLen(bd byte, length int) { + e.encUint(uint64(length), bd) +} + +func (e *cborEncDriver) EncodeTime(t time.Time) { + if t.IsZero() { + e.EncodeNil() + } else if e.h.TimeRFC3339 { + e.encUint(0, cborBaseTag) + e.EncodeString(cUTF8, t.Format(time.RFC3339Nano)) + } else { + e.encUint(1, cborBaseTag) + t = t.UTC().Round(time.Microsecond) + sec, nsec := t.Unix(), uint64(t.Nanosecond()) + if nsec == 0 { + e.EncodeInt(sec) + } else { + e.EncodeFloat64(float64(sec) + float64(nsec)/1e9) + } + } +} + +func (e *cborEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) { + e.encUint(uint64(xtag), cborBaseTag) + if v := ext.ConvertExt(rv); v == nil { + e.EncodeNil() + } else { + en.encode(v) + } +} + +func (e *cborEncDriver) EncodeRawExt(re *RawExt, en *Encoder) { + e.encUint(uint64(re.Tag), cborBaseTag) + if false && re.Data != nil { + en.encode(re.Data) + } else if re.Value != nil { + en.encode(re.Value) + } else { + e.EncodeNil() + } +} + +func (e *cborEncDriver) WriteArrayStart(length int) { + if e.h.IndefiniteLength { + e.w.writen1(cborBdIndefiniteArray) + } else { + e.encLen(cborBaseArray, length) + } +} + +func (e *cborEncDriver) WriteMapStart(length int) { + if e.h.IndefiniteLength { + e.w.writen1(cborBdIndefiniteMap) + } else { + e.encLen(cborBaseMap, length) + } +} + +func (e *cborEncDriver) WriteMapEnd() { + if e.h.IndefiniteLength { + e.w.writen1(cborBdBreak) + } +} + +func (e *cborEncDriver) WriteArrayEnd() { + if e.h.IndefiniteLength { + e.w.writen1(cborBdBreak) + } +} + +func (e *cborEncDriver) EncodeString(c charEncoding, v string) { + e.encStringBytesS(cborBaseString, v) +} + +func (e *cborEncDriver) EncodeStringBytes(c charEncoding, v []byte) { + if v == nil { + e.EncodeNil() + } else if c == cRAW { + e.encStringBytesS(cborBaseBytes, stringView(v)) + } else { + e.encStringBytesS(cborBaseString, stringView(v)) + } +} + +func (e *cborEncDriver) encStringBytesS(bb byte, v string) { + if e.h.IndefiniteLength { + if bb == cborBaseBytes { + e.w.writen1(cborBdIndefiniteBytes) + } else { + e.w.writen1(cborBdIndefiniteString) + } + blen := len(v) / 4 + if blen == 0 { + blen = 64 + } else if blen > 1024 { + blen = 1024 + } + for i := 0; i < len(v); { + var v2 string + i2 := i + blen + if i2 < len(v) { + v2 = v[i:i2] + } else { + v2 = v[i:] + } + e.encLen(bb, len(v2)) + e.w.writestr(v2) + i = i2 + } + e.w.writen1(cborBdBreak) + } else { + e.encLen(bb, len(v)) + e.w.writestr(v) + } +} + +// ---------------------- + +type cborDecDriver struct { + d *Decoder + h *CborHandle + r decReader + // b [scratchByteArrayLen]byte + br bool // bytes reader + bdRead bool + bd byte + noBuiltInTypes + // decNoSeparator + decDriverNoopContainerReader + _ [3]uint64 // padding +} + +func (d *cborDecDriver) readNextBd() { + d.bd = d.r.readn1() + d.bdRead = true +} + +func (d *cborDecDriver) uncacheRead() { + if d.bdRead { + d.r.unreadn1() + d.bdRead = false + } +} + +func (d *cborDecDriver) ContainerType() (vt valueType) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == cborBdNil { + return valueTypeNil + } else if d.bd == cborBdIndefiniteBytes || (d.bd >= cborBaseBytes && d.bd < cborBaseString) { + return valueTypeBytes + } else if d.bd == cborBdIndefiniteString || (d.bd >= cborBaseString && d.bd < cborBaseArray) { + return valueTypeString + } else if d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap) { + return valueTypeArray + } else if d.bd == cborBdIndefiniteMap || (d.bd >= cborBaseMap && d.bd < cborBaseTag) { + return valueTypeMap + } + // else { + // d.d.errorf("isContainerType: unsupported parameter: %v", vt) + // } + return valueTypeUnset +} + +func (d *cborDecDriver) TryDecodeAsNil() bool { + if !d.bdRead { + d.readNextBd() + } + // treat Nil and Undefined as nil values + if d.bd == cborBdNil || d.bd == cborBdUndefined { + d.bdRead = false + return true + } + return false +} + +func (d *cborDecDriver) CheckBreak() bool { + if !d.bdRead { + d.readNextBd() + } + if d.bd == cborBdBreak { + d.bdRead = false + return true + } + return false +} + +func (d *cborDecDriver) decUint() (ui uint64) { + v := d.bd & 0x1f + if v <= 0x17 { + ui = uint64(v) + } else { + if v == 0x18 { + ui = uint64(d.r.readn1()) + } else if v == 0x19 { + ui = uint64(bigen.Uint16(d.r.readx(2))) + } else if v == 0x1a { + ui = uint64(bigen.Uint32(d.r.readx(4))) + } else if v == 0x1b { + ui = uint64(bigen.Uint64(d.r.readx(8))) + } else { + d.d.errorf("invalid descriptor decoding uint: %x/%s", d.bd, cbordesc(d.bd)) + return + } + } + return +} + +func (d *cborDecDriver) decCheckInteger() (neg bool) { + if !d.bdRead { + d.readNextBd() + } + major := d.bd >> 5 + if major == cborMajorUint { + } else if major == cborMajorNegInt { + neg = true + } else { + d.d.errorf("not an integer - invalid major %v from descriptor %x/%s", major, d.bd, cbordesc(d.bd)) + return + } + return +} + +func (d *cborDecDriver) DecodeInt64() (i int64) { + neg := d.decCheckInteger() + ui := d.decUint() + // check if this number can be converted to an int without overflow + if neg { + i = -(chkOvf.SignedIntV(ui + 1)) + } else { + i = chkOvf.SignedIntV(ui) + } + d.bdRead = false + return +} + +func (d *cborDecDriver) DecodeUint64() (ui uint64) { + if d.decCheckInteger() { + d.d.errorf("assigning negative signed value to unsigned type") + return + } + ui = d.decUint() + d.bdRead = false + return +} + +func (d *cborDecDriver) DecodeFloat64() (f float64) { + if !d.bdRead { + d.readNextBd() + } + if bd := d.bd; bd == cborBdFloat16 { + f = float64(math.Float32frombits(halfFloatToFloatBits(bigen.Uint16(d.r.readx(2))))) + } else if bd == cborBdFloat32 { + f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) + } else if bd == cborBdFloat64 { + f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) + } else if bd >= cborBaseUint && bd < cborBaseBytes { + f = float64(d.DecodeInt64()) + } else { + d.d.errorf("float only valid from float16/32/64 - invalid descriptor %x/%s", bd, cbordesc(bd)) + return + } + d.bdRead = false + return +} + +// bool can be decoded from bool only (single byte). +func (d *cborDecDriver) DecodeBool() (b bool) { + if !d.bdRead { + d.readNextBd() + } + if bd := d.bd; bd == cborBdTrue { + b = true + } else if bd == cborBdFalse { + } else { + d.d.errorf("not bool - %s %x/%s", msgBadDesc, d.bd, cbordesc(d.bd)) + return + } + d.bdRead = false + return +} + +func (d *cborDecDriver) ReadMapStart() (length int) { + if !d.bdRead { + d.readNextBd() + } + d.bdRead = false + if d.bd == cborBdIndefiniteMap { + return -1 + } + return d.decLen() +} + +func (d *cborDecDriver) ReadArrayStart() (length int) { + if !d.bdRead { + d.readNextBd() + } + d.bdRead = false + if d.bd == cborBdIndefiniteArray { + return -1 + } + return d.decLen() +} + +func (d *cborDecDriver) decLen() int { + return int(d.decUint()) +} + +func (d *cborDecDriver) decAppendIndefiniteBytes(bs []byte) []byte { + d.bdRead = false + for { + if d.CheckBreak() { + break + } + if major := d.bd >> 5; major != cborMajorBytes && major != cborMajorText { + d.d.errorf("expect bytes/string major type in indefinite string/bytes;"+ + " got major %v from descriptor %x/%x", major, d.bd, cbordesc(d.bd)) + return nil + } + n := d.decLen() + oldLen := len(bs) + newLen := oldLen + n + if newLen > cap(bs) { + bs2 := make([]byte, newLen, 2*cap(bs)+n) + copy(bs2, bs) + bs = bs2 + } else { + bs = bs[:newLen] + } + d.r.readb(bs[oldLen:newLen]) + // bs = append(bs, d.r.readn()...) + d.bdRead = false + } + d.bdRead = false + return bs +} + +func (d *cborDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == cborBdNil || d.bd == cborBdUndefined { + d.bdRead = false + return nil + } + if d.bd == cborBdIndefiniteBytes || d.bd == cborBdIndefiniteString { + d.bdRead = false + if bs == nil { + if zerocopy { + return d.decAppendIndefiniteBytes(d.d.b[:0]) + } + return d.decAppendIndefiniteBytes(zeroByteSlice) + } + return d.decAppendIndefiniteBytes(bs[:0]) + } + // check if an "array" of uint8's (see ContainerType for how to infer if an array) + if d.bd == cborBdIndefiniteArray || (d.bd >= cborBaseArray && d.bd < cborBaseMap) { + bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d) + return + } + clen := d.decLen() + d.bdRead = false + if zerocopy { + if d.br { + return d.r.readx(clen) + } else if len(bs) == 0 { + bs = d.d.b[:] + } + } + return decByteSlice(d.r, clen, d.h.MaxInitLen, bs) +} + +func (d *cborDecDriver) DecodeString() (s string) { + return string(d.DecodeBytes(d.d.b[:], true)) +} + +func (d *cborDecDriver) DecodeStringAsBytes() (s []byte) { + return d.DecodeBytes(d.d.b[:], true) +} + +func (d *cborDecDriver) DecodeTime() (t time.Time) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == cborBdNil || d.bd == cborBdUndefined { + d.bdRead = false + return + } + xtag := d.decUint() + d.bdRead = false + return d.decodeTime(xtag) +} + +func (d *cborDecDriver) decodeTime(xtag uint64) (t time.Time) { + if !d.bdRead { + d.readNextBd() + } + switch xtag { + case 0: + var err error + if t, err = time.Parse(time.RFC3339, stringView(d.DecodeStringAsBytes())); err != nil { + d.d.errorv(err) + } + case 1: + // decode an int64 or a float, and infer time.Time from there. + // for floats, round to microseconds, as that is what is guaranteed to fit well. + switch { + case d.bd == cborBdFloat16, d.bd == cborBdFloat32: + f1, f2 := math.Modf(d.DecodeFloat64()) + t = time.Unix(int64(f1), int64(f2*1e9)) + case d.bd == cborBdFloat64: + f1, f2 := math.Modf(d.DecodeFloat64()) + t = time.Unix(int64(f1), int64(f2*1e9)) + case d.bd >= cborBaseUint && d.bd < cborBaseNegInt, + d.bd >= cborBaseNegInt && d.bd < cborBaseBytes: + t = time.Unix(d.DecodeInt64(), 0) + default: + d.d.errorf("time.Time can only be decoded from a number (or RFC3339 string)") + } + default: + d.d.errorf("invalid tag for time.Time - expecting 0 or 1, got 0x%x", xtag) + } + t = t.UTC().Round(time.Microsecond) + return +} + +func (d *cborDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if !d.bdRead { + d.readNextBd() + } + u := d.decUint() + d.bdRead = false + realxtag = u + if ext == nil { + re := rv.(*RawExt) + re.Tag = realxtag + d.d.decode(&re.Value) + } else if xtag != realxtag { + d.d.errorf("Wrong extension tag. Got %b. Expecting: %v", realxtag, xtag) + return + } else { + var v interface{} + d.d.decode(&v) + ext.UpdateExt(rv, v) + } + d.bdRead = false + return +} + +func (d *cborDecDriver) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + + n := d.d.n + var decodeFurther bool + + switch d.bd { + case cborBdNil: + n.v = valueTypeNil + case cborBdFalse: + n.v = valueTypeBool + n.b = false + case cborBdTrue: + n.v = valueTypeBool + n.b = true + case cborBdFloat16, cborBdFloat32, cborBdFloat64: + n.v = valueTypeFloat + n.f = d.DecodeFloat64() + case cborBdIndefiniteBytes: + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false) + case cborBdIndefiniteString: + n.v = valueTypeString + n.s = d.DecodeString() + case cborBdIndefiniteArray: + n.v = valueTypeArray + decodeFurther = true + case cborBdIndefiniteMap: + n.v = valueTypeMap + decodeFurther = true + default: + switch { + case d.bd >= cborBaseUint && d.bd < cborBaseNegInt: + if d.h.SignedInteger { + n.v = valueTypeInt + n.i = d.DecodeInt64() + } else { + n.v = valueTypeUint + n.u = d.DecodeUint64() + } + case d.bd >= cborBaseNegInt && d.bd < cborBaseBytes: + n.v = valueTypeInt + n.i = d.DecodeInt64() + case d.bd >= cborBaseBytes && d.bd < cborBaseString: + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false) + case d.bd >= cborBaseString && d.bd < cborBaseArray: + n.v = valueTypeString + n.s = d.DecodeString() + case d.bd >= cborBaseArray && d.bd < cborBaseMap: + n.v = valueTypeArray + decodeFurther = true + case d.bd >= cborBaseMap && d.bd < cborBaseTag: + n.v = valueTypeMap + decodeFurther = true + case d.bd >= cborBaseTag && d.bd < cborBaseSimple: + n.v = valueTypeExt + n.u = d.decUint() + n.l = nil + if n.u == 0 || n.u == 1 { + d.bdRead = false + n.v = valueTypeTime + n.t = d.decodeTime(n.u) + } + // d.bdRead = false + // d.d.decode(&re.Value) // handled by decode itself. + // decodeFurther = true + default: + d.d.errorf("decodeNaked: Unrecognized d.bd: 0x%x", d.bd) + return + } + } + + if !decodeFurther { + d.bdRead = false + } + return +} + +// ------------------------- + +// CborHandle is a Handle for the CBOR encoding format, +// defined at http://tools.ietf.org/html/rfc7049 and documented further at http://cbor.io . +// +// CBOR is comprehensively supported, including support for: +// - indefinite-length arrays/maps/bytes/strings +// - (extension) tags in range 0..0xffff (0 .. 65535) +// - half, single and double-precision floats +// - all numbers (1, 2, 4 and 8-byte signed and unsigned integers) +// - nil, true, false, ... +// - arrays and maps, bytes and text strings +// +// None of the optional extensions (with tags) defined in the spec are supported out-of-the-box. +// Users can implement them as needed (using SetExt), including spec-documented ones: +// - timestamp, BigNum, BigFloat, Decimals, +// - Encoded Text (e.g. URL, regexp, base64, MIME Message), etc. +type CborHandle struct { + binaryEncodingType + noElemSeparators + BasicHandle + + // IndefiniteLength=true, means that we encode using indefinitelength + IndefiniteLength bool + + // TimeRFC3339 says to encode time.Time using RFC3339 format. + // If unset, we encode time.Time using seconds past epoch. + TimeRFC3339 bool + + // _ [1]uint64 // padding +} + +// Name returns the name of the handle: cbor +func (h *CborHandle) Name() string { return "cbor" } + +// SetInterfaceExt sets an extension +func (h *CborHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { + return h.SetExt(rt, tag, &extWrapper{bytesExtFailer{}, ext}) +} + +func (h *CborHandle) newEncDriver(e *Encoder) encDriver { + return &cborEncDriver{e: e, w: e.w, h: h} +} + +func (h *CborHandle) newDecDriver(d *Decoder) decDriver { + return &cborDecDriver{d: d, h: h, r: d.r, br: d.bytes} +} + +func (e *cborEncDriver) reset() { + e.w = e.e.w +} + +func (d *cborDecDriver) reset() { + d.r, d.br = d.d.r, d.d.bytes + d.bd, d.bdRead = 0, false +} + +var _ decDriver = (*cborDecDriver)(nil) +var _ encDriver = (*cborEncDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/decode.go b/vendor/github.com/ugorji/go/codec/decode.go new file mode 100644 index 0000000000..1c0817aafa --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/decode.go @@ -0,0 +1,2552 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "encoding" + "errors" + "fmt" + "io" + "reflect" + "strconv" + "sync" + "time" +) + +// Some tagging information for error messages. +const ( + msgBadDesc = "unrecognized descriptor byte" + msgDecCannotExpandArr = "cannot expand go array from %v to stream length: %v" +) + +const decDefSliceCap = 8 +const decDefChanCap = 64 // should be large, as cap cannot be expanded +const decScratchByteArrayLen = cacheLineSize - 8 + +var ( + errstrOnlyMapOrArrayCanDecodeIntoStruct = "only encoded map or array can be decoded into a struct" + errstrCannotDecodeIntoNil = "cannot decode into nil" + + errmsgExpandSliceOverflow = "expand slice: slice overflow" + errmsgExpandSliceCannotChange = "expand slice: cannot change" + + errDecoderNotInitialized = errors.New("Decoder not initialized") + + errDecUnreadByteNothingToRead = errors.New("cannot unread - nothing has been read") + errDecUnreadByteLastByteNotRead = errors.New("cannot unread - last byte has not been read") + errDecUnreadByteUnknown = errors.New("cannot unread - reason unknown") +) + +// decReader abstracts the reading source, allowing implementations that can +// read from an io.Reader or directly off a byte slice with zero-copying. +type decReader interface { + unreadn1() + + // readx will use the implementation scratch buffer if possible i.e. n < len(scratchbuf), OR + // just return a view of the []byte being decoded from. + // Ensure you call detachZeroCopyBytes later if this needs to be sent outside codec control. + readx(n int) []byte + readb([]byte) + readn1() uint8 + numread() int // number of bytes read + track() + stopTrack() []byte + + // skip will skip any byte that matches, and return the first non-matching byte + skip(accept *bitset256) (token byte) + // readTo will read any byte that matches, stopping once no-longer matching. + readTo(in []byte, accept *bitset256) (out []byte) + // readUntil will read, only stopping once it matches the 'stop' byte. + readUntil(in []byte, stop byte) (out []byte) +} + +type decDriver interface { + // this will check if the next token is a break. + CheckBreak() bool + // Note: TryDecodeAsNil should be careful not to share any temporary []byte with + // the rest of the decDriver. This is because sometimes, we optimize by holding onto + // a transient []byte, and ensuring the only other call we make to the decDriver + // during that time is maybe a TryDecodeAsNil() call. + TryDecodeAsNil() bool + // vt is one of: Bytes, String, Nil, Slice or Map. Return unSet if not known. + ContainerType() (vt valueType) + // IsBuiltinType(rt uintptr) bool + + // DecodeNaked will decode primitives (number, bool, string, []byte) and RawExt. + // For maps and arrays, it will not do the decoding in-band, but will signal + // the decoder, so that is done later, by setting the decNaked.valueType field. + // + // Note: Numbers are decoded as int64, uint64, float64 only (no smaller sized number types). + // for extensions, DecodeNaked must read the tag and the []byte if it exists. + // if the []byte is not read, then kInterfaceNaked will treat it as a Handle + // that stores the subsequent value in-band, and complete reading the RawExt. + // + // extensions should also use readx to decode them, for efficiency. + // kInterface will extract the detached byte slice if it has to pass it outside its realm. + DecodeNaked() + + // Deprecated: use DecodeInt64 and DecodeUint64 instead + // DecodeInt(bitsize uint8) (i int64) + // DecodeUint(bitsize uint8) (ui uint64) + + DecodeInt64() (i int64) + DecodeUint64() (ui uint64) + + DecodeFloat64() (f float64) + DecodeBool() (b bool) + // DecodeString can also decode symbols. + // It looks redundant as DecodeBytes is available. + // However, some codecs (e.g. binc) support symbols and can + // return a pre-stored string value, meaning that it can bypass + // the cost of []byte->string conversion. + DecodeString() (s string) + DecodeStringAsBytes() (v []byte) + + // DecodeBytes may be called directly, without going through reflection. + // Consequently, it must be designed to handle possible nil. + DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) + // DecodeBytes(bs []byte, isstring, zerocopy bool) (bsOut []byte) + + // decodeExt will decode into a *RawExt or into an extension. + DecodeExt(v interface{}, xtag uint64, ext Ext) (realxtag uint64) + // decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) + + DecodeTime() (t time.Time) + + ReadArrayStart() int + ReadArrayElem() + ReadArrayEnd() + ReadMapStart() int + ReadMapElemKey() + ReadMapElemValue() + ReadMapEnd() + + reset() + uncacheRead() +} + +type decDriverNoopContainerReader struct{} + +func (x decDriverNoopContainerReader) ReadArrayStart() (v int) { return } +func (x decDriverNoopContainerReader) ReadArrayElem() {} +func (x decDriverNoopContainerReader) ReadArrayEnd() {} +func (x decDriverNoopContainerReader) ReadMapStart() (v int) { return } +func (x decDriverNoopContainerReader) ReadMapElemKey() {} +func (x decDriverNoopContainerReader) ReadMapElemValue() {} +func (x decDriverNoopContainerReader) ReadMapEnd() {} +func (x decDriverNoopContainerReader) CheckBreak() (v bool) { return } + +// func (x decNoSeparator) uncacheRead() {} + +// DecodeOptions captures configuration options during decode. +type DecodeOptions struct { + // MapType specifies type to use during schema-less decoding of a map in the stream. + // If nil (unset), we default to map[string]interface{} iff json handle and MapStringAsKey=true, + // else map[interface{}]interface{}. + MapType reflect.Type + + // SliceType specifies type to use during schema-less decoding of an array in the stream. + // If nil (unset), we default to []interface{} for all formats. + SliceType reflect.Type + + // MaxInitLen defines the maxinum initial length that we "make" a collection + // (string, slice, map, chan). If 0 or negative, we default to a sensible value + // based on the size of an element in the collection. + // + // For example, when decoding, a stream may say that it has 2^64 elements. + // We should not auto-matically provision a slice of that size, to prevent Out-Of-Memory crash. + // Instead, we provision up to MaxInitLen, fill that up, and start appending after that. + MaxInitLen int + + // ReaderBufferSize is the size of the buffer used when reading. + // + // if > 0, we use a smart buffer internally for performance purposes. + ReaderBufferSize int + + // If ErrorIfNoField, return an error when decoding a map + // from a codec stream into a struct, and no matching struct field is found. + ErrorIfNoField bool + + // If ErrorIfNoArrayExpand, return an error when decoding a slice/array that cannot be expanded. + // For example, the stream contains an array of 8 items, but you are decoding into a [4]T array, + // or you are decoding into a slice of length 4 which is non-addressable (and so cannot be set). + ErrorIfNoArrayExpand bool + + // If SignedInteger, use the int64 during schema-less decoding of unsigned values (not uint64). + SignedInteger bool + + // MapValueReset controls how we decode into a map value. + // + // By default, we MAY retrieve the mapping for a key, and then decode into that. + // However, especially with big maps, that retrieval may be expensive and unnecessary + // if the stream already contains all that is necessary to recreate the value. + // + // If true, we will never retrieve the previous mapping, + // but rather decode into a new value and set that in the map. + // + // If false, we will retrieve the previous mapping if necessary e.g. + // the previous mapping is a pointer, or is a struct or array with pre-set state, + // or is an interface. + MapValueReset bool + + // SliceElementReset: on decoding a slice, reset the element to a zero value first. + // + // concern: if the slice already contained some garbage, we will decode into that garbage. + SliceElementReset bool + + // InterfaceReset controls how we decode into an interface. + // + // By default, when we see a field that is an interface{...}, + // or a map with interface{...} value, we will attempt decoding into the + // "contained" value. + // + // However, this prevents us from reading a string into an interface{} + // that formerly contained a number. + // + // If true, we will decode into a new "blank" value, and set that in the interface. + // If false, we will decode into whatever is contained in the interface. + InterfaceReset bool + + // InternString controls interning of strings during decoding. + // + // Some handles, e.g. json, typically will read map keys as strings. + // If the set of keys are finite, it may help reduce allocation to + // look them up from a map (than to allocate them afresh). + // + // Note: Handles will be smart when using the intern functionality. + // Every string should not be interned. + // An excellent use-case for interning is struct field names, + // or map keys where key type is string. + InternString bool + + // PreferArrayOverSlice controls whether to decode to an array or a slice. + // + // This only impacts decoding into a nil interface{}. + // Consequently, it has no effect on codecgen. + // + // *Note*: This only applies if using go1.5 and above, + // as it requires reflect.ArrayOf support which was absent before go1.5. + PreferArrayOverSlice bool + + // DeleteOnNilMapValue controls how to decode a nil value in the stream. + // + // If true, we will delete the mapping of the key. + // Else, just set the mapping to the zero value of the type. + DeleteOnNilMapValue bool +} + +// ------------------------------------ + +type bufioDecReader struct { + buf []byte + r io.Reader + + c int // cursor + n int // num read + err error + + tr []byte + trb bool + b [4]byte +} + +func (z *bufioDecReader) reset(r io.Reader) { + z.r, z.c, z.n, z.err, z.trb = r, 0, 0, nil, false + if z.tr != nil { + z.tr = z.tr[:0] + } +} + +func (z *bufioDecReader) Read(p []byte) (n int, err error) { + if z.err != nil { + return 0, z.err + } + p0 := p + n = copy(p, z.buf[z.c:]) + z.c += n + if z.c == len(z.buf) { + z.c = 0 + } + z.n += n + if len(p) == n { + if z.c == 0 { + z.buf = z.buf[:1] + z.buf[0] = p[len(p)-1] + z.c = 1 + } + if z.trb { + z.tr = append(z.tr, p0[:n]...) + } + return + } + p = p[n:] + var n2 int + // if we are here, then z.buf is all read + if len(p) > len(z.buf) { + n2, err = decReadFull(z.r, p) + n += n2 + z.n += n2 + z.err = err + // don't return EOF if some bytes were read. keep for next time. + if n > 0 && err == io.EOF { + err = nil + } + // always keep last byte in z.buf + z.buf = z.buf[:1] + z.buf[0] = p[len(p)-1] + z.c = 1 + if z.trb { + z.tr = append(z.tr, p0[:n]...) + } + return + } + // z.c is now 0, and len(p) <= len(z.buf) + for len(p) > 0 && z.err == nil { + // println("len(p) loop starting ... ") + z.c = 0 + z.buf = z.buf[0:cap(z.buf)] + n2, err = z.r.Read(z.buf) + if n2 > 0 { + if err == io.EOF { + err = nil + } + z.buf = z.buf[:n2] + n2 = copy(p, z.buf) + z.c = n2 + n += n2 + z.n += n2 + p = p[n2:] + } + z.err = err + // println("... len(p) loop done") + } + if z.c == 0 { + z.buf = z.buf[:1] + z.buf[0] = p[len(p)-1] + z.c = 1 + } + if z.trb { + z.tr = append(z.tr, p0[:n]...) + } + return +} + +func (z *bufioDecReader) ReadByte() (b byte, err error) { + z.b[0] = 0 + _, err = z.Read(z.b[:1]) + b = z.b[0] + return +} + +func (z *bufioDecReader) UnreadByte() (err error) { + if z.err != nil { + return z.err + } + if z.c > 0 { + z.c-- + z.n-- + if z.trb { + z.tr = z.tr[:len(z.tr)-1] + } + return + } + return errDecUnreadByteNothingToRead +} + +func (z *bufioDecReader) numread() int { + return z.n +} + +func (z *bufioDecReader) readx(n int) (bs []byte) { + if n <= 0 || z.err != nil { + return + } + if z.c+n <= len(z.buf) { + bs = z.buf[z.c : z.c+n] + z.n += n + z.c += n + if z.trb { + z.tr = append(z.tr, bs...) + } + return + } + bs = make([]byte, n) + _, err := z.Read(bs) + if err != nil { + panic(err) + } + return +} + +func (z *bufioDecReader) readb(bs []byte) { + _, err := z.Read(bs) + if err != nil { + panic(err) + } +} + +// func (z *bufioDecReader) readn1eof() (b uint8, eof bool) { +// b, err := z.ReadByte() +// if err != nil { +// if err == io.EOF { +// eof = true +// } else { +// panic(err) +// } +// } +// return +// } + +func (z *bufioDecReader) readn1() (b uint8) { + b, err := z.ReadByte() + if err != nil { + panic(err) + } + return +} + +func (z *bufioDecReader) search(in []byte, accept *bitset256, stop, flag uint8) (token byte, out []byte) { + // flag: 1 (skip), 2 (readTo), 4 (readUntil) + if flag == 4 { + for i := z.c; i < len(z.buf); i++ { + if z.buf[i] == stop { + token = z.buf[i] + z.n = z.n + (i - z.c) - 1 + i++ + out = z.buf[z.c:i] + if z.trb { + z.tr = append(z.tr, z.buf[z.c:i]...) + } + z.c = i + return + } + } + } else { + for i := z.c; i < len(z.buf); i++ { + if !accept.isset(z.buf[i]) { + token = z.buf[i] + z.n = z.n + (i - z.c) - 1 + if flag == 1 { + i++ + } else { + out = z.buf[z.c:i] + } + if z.trb { + z.tr = append(z.tr, z.buf[z.c:i]...) + } + z.c = i + return + } + } + } + z.n += len(z.buf) - z.c + if flag != 1 { + out = append(in, z.buf[z.c:]...) + } + if z.trb { + z.tr = append(z.tr, z.buf[z.c:]...) + } + var n2 int + if z.err != nil { + return + } + for { + z.c = 0 + z.buf = z.buf[0:cap(z.buf)] + n2, z.err = z.r.Read(z.buf) + if n2 > 0 && z.err != nil { + z.err = nil + } + z.buf = z.buf[:n2] + if flag == 4 { + for i := 0; i < n2; i++ { + if z.buf[i] == stop { + token = z.buf[i] + z.n += i - 1 + i++ + out = append(out, z.buf[z.c:i]...) + if z.trb { + z.tr = append(z.tr, z.buf[z.c:i]...) + } + z.c = i + return + } + } + } else { + for i := 0; i < n2; i++ { + if !accept.isset(z.buf[i]) { + token = z.buf[i] + z.n += i - 1 + if flag == 1 { + i++ + } + if flag != 1 { + out = append(out, z.buf[z.c:i]...) + } + if z.trb { + z.tr = append(z.tr, z.buf[z.c:i]...) + } + z.c = i + return + } + } + } + if flag != 1 { + out = append(out, z.buf[:n2]...) + } + z.n += n2 + if z.err != nil { + return + } + if z.trb { + z.tr = append(z.tr, z.buf[:n2]...) + } + } +} + +func (z *bufioDecReader) skip(accept *bitset256) (token byte) { + token, _ = z.search(nil, accept, 0, 1) + return +} + +func (z *bufioDecReader) readTo(in []byte, accept *bitset256) (out []byte) { + _, out = z.search(in, accept, 0, 2) + return +} + +func (z *bufioDecReader) readUntil(in []byte, stop byte) (out []byte) { + _, out = z.search(in, nil, stop, 4) + return +} + +func (z *bufioDecReader) unreadn1() { + err := z.UnreadByte() + if err != nil { + panic(err) + } +} + +func (z *bufioDecReader) track() { + if z.tr != nil { + z.tr = z.tr[:0] + } + z.trb = true +} + +func (z *bufioDecReader) stopTrack() (bs []byte) { + z.trb = false + return z.tr +} + +// ioDecReader is a decReader that reads off an io.Reader. +// +// It also has a fallback implementation of ByteScanner if needed. +type ioDecReader struct { + r io.Reader // the reader passed in + + rr io.Reader + br io.ByteScanner + + l byte // last byte + ls byte // last byte status. 0: init-canDoNothing, 1: canRead, 2: canUnread + trb bool // tracking bytes turned on + _ bool + b [4]byte // tiny buffer for reading single bytes + + x [scratchByteArrayLen]byte // for: get struct field name, swallow valueTypeBytes, etc + n int // num read + tr []byte // tracking bytes read +} + +func (z *ioDecReader) reset(r io.Reader) { + z.r = r + z.rr = r + z.l, z.ls, z.n, z.trb = 0, 0, 0, false + if z.tr != nil { + z.tr = z.tr[:0] + } + var ok bool + if z.br, ok = r.(io.ByteScanner); !ok { + z.br = z + z.rr = z + } +} + +func (z *ioDecReader) Read(p []byte) (n int, err error) { + if len(p) == 0 { + return + } + var firstByte bool + if z.ls == 1 { + z.ls = 2 + p[0] = z.l + if len(p) == 1 { + n = 1 + return + } + firstByte = true + p = p[1:] + } + n, err = z.r.Read(p) + if n > 0 { + if err == io.EOF && n == len(p) { + err = nil // read was successful, so postpone EOF (till next time) + } + z.l = p[n-1] + z.ls = 2 + } + if firstByte { + n++ + } + return +} + +func (z *ioDecReader) ReadByte() (c byte, err error) { + n, err := z.Read(z.b[:1]) + if n == 1 { + c = z.b[0] + if err == io.EOF { + err = nil // read was successful, so postpone EOF (till next time) + } + } + return +} + +func (z *ioDecReader) UnreadByte() (err error) { + switch z.ls { + case 2: + z.ls = 1 + case 0: + err = errDecUnreadByteNothingToRead + case 1: + err = errDecUnreadByteLastByteNotRead + default: + err = errDecUnreadByteUnknown + } + return +} + +func (z *ioDecReader) numread() int { + return z.n +} + +func (z *ioDecReader) readx(n int) (bs []byte) { + if n <= 0 { + return + } + if n < len(z.x) { + bs = z.x[:n] + } else { + bs = make([]byte, n) + } + if _, err := decReadFull(z.rr, bs); err != nil { + panic(err) + } + z.n += len(bs) + if z.trb { + z.tr = append(z.tr, bs...) + } + return +} + +func (z *ioDecReader) readb(bs []byte) { + // if len(bs) == 0 { + // return + // } + if _, err := decReadFull(z.rr, bs); err != nil { + panic(err) + } + z.n += len(bs) + if z.trb { + z.tr = append(z.tr, bs...) + } +} + +func (z *ioDecReader) readn1eof() (b uint8, eof bool) { + b, err := z.br.ReadByte() + if err == nil { + z.n++ + if z.trb { + z.tr = append(z.tr, b) + } + } else if err == io.EOF { + eof = true + } else { + panic(err) + } + return +} + +func (z *ioDecReader) readn1() (b uint8) { + var err error + if b, err = z.br.ReadByte(); err == nil { + z.n++ + if z.trb { + z.tr = append(z.tr, b) + } + return + } + panic(err) +} + +func (z *ioDecReader) skip(accept *bitset256) (token byte) { + for { + var eof bool + token, eof = z.readn1eof() + if eof { + return + } + if accept.isset(token) { + continue + } + return + } +} + +func (z *ioDecReader) readTo(in []byte, accept *bitset256) (out []byte) { + out = in + for { + token, eof := z.readn1eof() + if eof { + return + } + if accept.isset(token) { + out = append(out, token) + } else { + z.unreadn1() + return + } + } +} + +func (z *ioDecReader) readUntil(in []byte, stop byte) (out []byte) { + out = in + for { + token, eof := z.readn1eof() + if eof { + panic(io.EOF) + } + out = append(out, token) + if token == stop { + return + } + } +} + +func (z *ioDecReader) unreadn1() { + err := z.br.UnreadByte() + if err != nil { + panic(err) + } + z.n-- + if z.trb { + if l := len(z.tr) - 1; l >= 0 { + z.tr = z.tr[:l] + } + } +} + +func (z *ioDecReader) track() { + if z.tr != nil { + z.tr = z.tr[:0] + } + z.trb = true +} + +func (z *ioDecReader) stopTrack() (bs []byte) { + z.trb = false + return z.tr +} + +// ------------------------------------ + +var errBytesDecReaderCannotUnread = errors.New("cannot unread last byte read") + +// bytesDecReader is a decReader that reads off a byte slice with zero copying +type bytesDecReader struct { + b []byte // data + c int // cursor + a int // available + t int // track start +} + +func (z *bytesDecReader) reset(in []byte) { + z.b = in + z.a = len(in) + z.c = 0 + z.t = 0 +} + +func (z *bytesDecReader) numread() int { + return z.c +} + +func (z *bytesDecReader) unreadn1() { + if z.c == 0 || len(z.b) == 0 { + panic(errBytesDecReaderCannotUnread) + } + z.c-- + z.a++ + return +} + +func (z *bytesDecReader) readx(n int) (bs []byte) { + // slicing from a non-constant start position is more expensive, + // as more computation is required to decipher the pointer start position. + // However, we do it only once, and it's better than reslicing both z.b and return value. + + if n <= 0 { + } else if z.a == 0 { + panic(io.EOF) + } else if n > z.a { + panic(io.ErrUnexpectedEOF) + } else { + c0 := z.c + z.c = c0 + n + z.a = z.a - n + bs = z.b[c0:z.c] + } + return +} + +func (z *bytesDecReader) readb(bs []byte) { + copy(bs, z.readx(len(bs))) +} + +func (z *bytesDecReader) readn1() (v uint8) { + if z.a == 0 { + panic(io.EOF) + } + v = z.b[z.c] + z.c++ + z.a-- + return +} + +// func (z *bytesDecReader) readn1eof() (v uint8, eof bool) { +// if z.a == 0 { +// eof = true +// return +// } +// v = z.b[z.c] +// z.c++ +// z.a-- +// return +// } + +func (z *bytesDecReader) skip(accept *bitset256) (token byte) { + if z.a == 0 { + return + } + blen := len(z.b) + for i := z.c; i < blen; i++ { + if !accept.isset(z.b[i]) { + token = z.b[i] + i++ + z.a -= (i - z.c) + z.c = i + return + } + } + z.a, z.c = 0, blen + return +} + +func (z *bytesDecReader) readTo(_ []byte, accept *bitset256) (out []byte) { + if z.a == 0 { + return + } + blen := len(z.b) + for i := z.c; i < blen; i++ { + if !accept.isset(z.b[i]) { + out = z.b[z.c:i] + z.a -= (i - z.c) + z.c = i + return + } + } + out = z.b[z.c:] + z.a, z.c = 0, blen + return +} + +func (z *bytesDecReader) readUntil(_ []byte, stop byte) (out []byte) { + if z.a == 0 { + panic(io.EOF) + } + blen := len(z.b) + for i := z.c; i < blen; i++ { + if z.b[i] == stop { + i++ + out = z.b[z.c:i] + z.a -= (i - z.c) + z.c = i + return + } + } + z.a, z.c = 0, blen + panic(io.EOF) +} + +func (z *bytesDecReader) track() { + z.t = z.c +} + +func (z *bytesDecReader) stopTrack() (bs []byte) { + return z.b[z.t:z.c] +} + +// ---------------------------------------- + +// func (d *Decoder) builtin(f *codecFnInfo, rv reflect.Value) { +// d.d.DecodeBuiltin(f.ti.rtid, rv2i(rv)) +// } + +func (d *Decoder) rawExt(f *codecFnInfo, rv reflect.Value) { + d.d.DecodeExt(rv2i(rv), 0, nil) +} + +func (d *Decoder) ext(f *codecFnInfo, rv reflect.Value) { + d.d.DecodeExt(rv2i(rv), f.xfTag, f.xfFn) +} + +func (d *Decoder) selferUnmarshal(f *codecFnInfo, rv reflect.Value) { + rv2i(rv).(Selfer).CodecDecodeSelf(d) +} + +func (d *Decoder) binaryUnmarshal(f *codecFnInfo, rv reflect.Value) { + bm := rv2i(rv).(encoding.BinaryUnmarshaler) + xbs := d.d.DecodeBytes(nil, true) + if fnerr := bm.UnmarshalBinary(xbs); fnerr != nil { + panic(fnerr) + } +} + +func (d *Decoder) textUnmarshal(f *codecFnInfo, rv reflect.Value) { + tm := rv2i(rv).(encoding.TextUnmarshaler) + fnerr := tm.UnmarshalText(d.d.DecodeStringAsBytes()) + if fnerr != nil { + panic(fnerr) + } +} + +func (d *Decoder) jsonUnmarshal(f *codecFnInfo, rv reflect.Value) { + tm := rv2i(rv).(jsonUnmarshaler) + // bs := d.d.DecodeBytes(d.b[:], true, true) + // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. + fnerr := tm.UnmarshalJSON(d.nextValueBytes()) + if fnerr != nil { + panic(fnerr) + } +} + +func (d *Decoder) kErr(f *codecFnInfo, rv reflect.Value) { + d.errorf("no decoding function defined for kind %v", rv.Kind()) +} + +// var kIntfCtr uint64 + +func (d *Decoder) kInterfaceNaked(f *codecFnInfo) (rvn reflect.Value) { + // nil interface: + // use some hieristics to decode it appropriately + // based on the detected next value in the stream. + n := d.naked() + d.d.DecodeNaked() + if n.v == valueTypeNil { + return + } + // We cannot decode non-nil stream value into nil interface with methods (e.g. io.Reader). + if f.ti.numMeth > 0 { + d.errorf("cannot decode non-nil codec value into nil %v (%v methods)", f.ti.rt, f.ti.numMeth) + return + } + // var useRvn bool + switch n.v { + case valueTypeMap: + // if json, default to a map type with string keys + mtid := d.mtid + if mtid == 0 { + if d.jsms { + mtid = mapStrIntfTypId + } else { + mtid = mapIntfIntfTypId + } + } + if mtid == mapIntfIntfTypId { + n.initContainers() + if n.lm < arrayCacheLen { + n.ma[n.lm] = nil + rvn = n.rma[n.lm] + n.lm++ + d.decode(&n.ma[n.lm-1]) + n.lm-- + } else { + var v2 map[interface{}]interface{} + d.decode(&v2) + rvn = reflect.ValueOf(&v2).Elem() + } + } else if mtid == mapStrIntfTypId { // for json performance + n.initContainers() + if n.ln < arrayCacheLen { + n.na[n.ln] = nil + rvn = n.rna[n.ln] + n.ln++ + d.decode(&n.na[n.ln-1]) + n.ln-- + } else { + var v2 map[string]interface{} + d.decode(&v2) + rvn = reflect.ValueOf(&v2).Elem() + } + } else { + if d.mtr { + rvn = reflect.New(d.h.MapType) + d.decode(rv2i(rvn)) + rvn = rvn.Elem() + } else { + rvn = reflect.New(d.h.MapType).Elem() + d.decodeValue(rvn, nil, true) + } + } + case valueTypeArray: + if d.stid == 0 || d.stid == intfSliceTypId { + n.initContainers() + if n.ls < arrayCacheLen { + n.sa[n.ls] = nil + rvn = n.rsa[n.ls] + n.ls++ + d.decode(&n.sa[n.ls-1]) + n.ls-- + } else { + var v2 []interface{} + d.decode(&v2) + rvn = reflect.ValueOf(&v2).Elem() + } + if reflectArrayOfSupported && d.stid == 0 && d.h.PreferArrayOverSlice { + rvn2 := reflect.New(reflectArrayOf(rvn.Len(), intfTyp)).Elem() + reflect.Copy(rvn2, rvn) + rvn = rvn2 + } + } else { + if d.str { + rvn = reflect.New(d.h.SliceType) + d.decode(rv2i(rvn)) + rvn = rvn.Elem() + } else { + rvn = reflect.New(d.h.SliceType).Elem() + d.decodeValue(rvn, nil, true) + } + } + case valueTypeExt: + var v interface{} + tag, bytes := n.u, n.l // calling decode below might taint the values + if bytes == nil { + n.initContainers() + if n.li < arrayCacheLen { + n.ia[n.li] = nil + n.li++ + d.decode(&n.ia[n.li-1]) + // v = *(&n.ia[l]) + n.li-- + v = n.ia[n.li] + n.ia[n.li] = nil + } else { + d.decode(&v) + } + } + bfn := d.h.getExtForTag(tag) + if bfn == nil { + var re RawExt + re.Tag = tag + re.Data = detachZeroCopyBytes(d.bytes, nil, bytes) + re.Value = v + rvn = reflect.ValueOf(&re).Elem() + } else { + rvnA := reflect.New(bfn.rt) + if bytes != nil { + bfn.ext.ReadExt(rv2i(rvnA), bytes) + } else { + bfn.ext.UpdateExt(rv2i(rvnA), v) + } + rvn = rvnA.Elem() + } + case valueTypeNil: + // no-op + case valueTypeInt: + rvn = n.ri + case valueTypeUint: + rvn = n.ru + case valueTypeFloat: + rvn = n.rf + case valueTypeBool: + rvn = n.rb + case valueTypeString, valueTypeSymbol: + rvn = n.rs + case valueTypeBytes: + rvn = n.rl + case valueTypeTime: + rvn = n.rt + default: + panicv.errorf("kInterfaceNaked: unexpected valueType: %d", n.v) + } + return +} + +func (d *Decoder) kInterface(f *codecFnInfo, rv reflect.Value) { + // Note: + // A consequence of how kInterface works, is that + // if an interface already contains something, we try + // to decode into what was there before. + // We do not replace with a generic value (as got from decodeNaked). + + // every interface passed here MUST be settable. + var rvn reflect.Value + if rv.IsNil() || d.h.InterfaceReset { + // check if mapping to a type: if so, initialize it and move on + rvn = d.h.intf2impl(f.ti.rtid) + if rvn.IsValid() { + rv.Set(rvn) + } else { + rvn = d.kInterfaceNaked(f) + if rvn.IsValid() { + rv.Set(rvn) + } else if d.h.InterfaceReset { + // reset to zero value based on current type in there. + rv.Set(reflect.Zero(rv.Elem().Type())) + } + return + } + } else { + // now we have a non-nil interface value, meaning it contains a type + rvn = rv.Elem() + } + if d.d.TryDecodeAsNil() { + rv.Set(reflect.Zero(rvn.Type())) + return + } + + // Note: interface{} is settable, but underlying type may not be. + // Consequently, we MAY have to create a decodable value out of the underlying value, + // decode into it, and reset the interface itself. + // fmt.Printf(">>>> kInterface: rvn type: %v, rv type: %v\n", rvn.Type(), rv.Type()) + + rvn2, canDecode := isDecodeable(rvn) + if canDecode { + d.decodeValue(rvn2, nil, true) + return + } + + rvn2 = reflect.New(rvn.Type()).Elem() + rvn2.Set(rvn) + d.decodeValue(rvn2, nil, true) + rv.Set(rvn2) +} + +func decStructFieldKey(dd decDriver, keyType valueType, b *[decScratchByteArrayLen]byte) (rvkencname []byte) { + // use if-else-if, not switch (which compiles to binary-search) + // since keyType is typically valueTypeString, branch prediction is pretty good. + + if keyType == valueTypeString { + rvkencname = dd.DecodeStringAsBytes() + } else if keyType == valueTypeInt { + rvkencname = strconv.AppendInt(b[:0], dd.DecodeInt64(), 10) + } else if keyType == valueTypeUint { + rvkencname = strconv.AppendUint(b[:0], dd.DecodeUint64(), 10) + } else if keyType == valueTypeFloat { + rvkencname = strconv.AppendFloat(b[:0], dd.DecodeFloat64(), 'f', -1, 64) + } else { + rvkencname = dd.DecodeStringAsBytes() + } + return rvkencname +} + +func (d *Decoder) kStruct(f *codecFnInfo, rv reflect.Value) { + fti := f.ti + dd := d.d + elemsep := d.esep + sfn := structFieldNode{v: rv, update: true} + ctyp := dd.ContainerType() + if ctyp == valueTypeMap { + containerLen := dd.ReadMapStart() + if containerLen == 0 { + dd.ReadMapEnd() + return + } + tisfi := fti.sfiSort + hasLen := containerLen >= 0 + + var rvkencname []byte + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if elemsep { + dd.ReadMapElemKey() + } + rvkencname = decStructFieldKey(dd, fti.keyType, &d.b) + if elemsep { + dd.ReadMapElemValue() + } + if k := fti.indexForEncName(rvkencname); k > -1 { + si := tisfi[k] + if dd.TryDecodeAsNil() { + si.setToZeroValue(rv) + } else { + d.decodeValue(sfn.field(si), nil, true) + } + } else { + d.structFieldNotFound(-1, stringView(rvkencname)) + } + // keepAlive4StringView(rvkencnameB) // not needed, as reference is outside loop + } + dd.ReadMapEnd() + } else if ctyp == valueTypeArray { + containerLen := dd.ReadArrayStart() + if containerLen == 0 { + dd.ReadArrayEnd() + return + } + // Not much gain from doing it two ways for array. + // Arrays are not used as much for structs. + hasLen := containerLen >= 0 + for j, si := range fti.sfiSrc { + if (hasLen && j == containerLen) || (!hasLen && dd.CheckBreak()) { + break + } + if elemsep { + dd.ReadArrayElem() + } + if dd.TryDecodeAsNil() { + si.setToZeroValue(rv) + } else { + d.decodeValue(sfn.field(si), nil, true) + } + } + if containerLen > len(fti.sfiSrc) { + // read remaining values and throw away + for j := len(fti.sfiSrc); j < containerLen; j++ { + if elemsep { + dd.ReadArrayElem() + } + d.structFieldNotFound(j, "") + } + } + dd.ReadArrayEnd() + } else { + d.errorstr(errstrOnlyMapOrArrayCanDecodeIntoStruct) + return + } +} + +func (d *Decoder) kSlice(f *codecFnInfo, rv reflect.Value) { + // A slice can be set from a map or array in stream. + // This way, the order can be kept (as order is lost with map). + ti := f.ti + if f.seq == seqTypeChan && ti.chandir&uint8(reflect.SendDir) == 0 { + d.errorf("receive-only channel cannot be decoded") + } + dd := d.d + rtelem0 := ti.elem + ctyp := dd.ContainerType() + if ctyp == valueTypeBytes || ctyp == valueTypeString { + // you can only decode bytes or string in the stream into a slice or array of bytes + if !(ti.rtid == uint8SliceTypId || rtelem0.Kind() == reflect.Uint8) { + d.errorf("bytes/string in stream must decode into slice/array of bytes, not %v", ti.rt) + } + if f.seq == seqTypeChan { + bs2 := dd.DecodeBytes(nil, true) + irv := rv2i(rv) + ch, ok := irv.(chan<- byte) + if !ok { + ch = irv.(chan byte) + } + for _, b := range bs2 { + ch <- b + } + } else { + rvbs := rv.Bytes() + bs2 := dd.DecodeBytes(rvbs, false) + // if rvbs == nil && bs2 != nil || rvbs != nil && bs2 == nil || len(bs2) != len(rvbs) { + if !(len(bs2) > 0 && len(bs2) == len(rvbs) && &bs2[0] == &rvbs[0]) { + if rv.CanSet() { + rv.SetBytes(bs2) + } else if len(rvbs) > 0 && len(bs2) > 0 { + copy(rvbs, bs2) + } + } + } + return + } + + // array := f.seq == seqTypeChan + + slh, containerLenS := d.decSliceHelperStart() // only expects valueType(Array|Map) + + // an array can never return a nil slice. so no need to check f.array here. + if containerLenS == 0 { + if rv.CanSet() { + if f.seq == seqTypeSlice { + if rv.IsNil() { + rv.Set(reflect.MakeSlice(ti.rt, 0, 0)) + } else { + rv.SetLen(0) + } + } else if f.seq == seqTypeChan { + if rv.IsNil() { + rv.Set(reflect.MakeChan(ti.rt, 0)) + } + } + } + slh.End() + return + } + + rtelem0Size := int(rtelem0.Size()) + rtElem0Kind := rtelem0.Kind() + rtelem0Mut := !isImmutableKind(rtElem0Kind) + rtelem := rtelem0 + rtelemkind := rtelem.Kind() + for rtelemkind == reflect.Ptr { + rtelem = rtelem.Elem() + rtelemkind = rtelem.Kind() + } + + var fn *codecFn + + var rvCanset = rv.CanSet() + var rvChanged bool + var rv0 = rv + var rv9 reflect.Value + + rvlen := rv.Len() + rvcap := rv.Cap() + hasLen := containerLenS > 0 + if hasLen && f.seq == seqTypeSlice { + if containerLenS > rvcap { + oldRvlenGtZero := rvlen > 0 + rvlen = decInferLen(containerLenS, d.h.MaxInitLen, int(rtelem0.Size())) + if rvlen <= rvcap { + if rvCanset { + rv.SetLen(rvlen) + } + } else if rvCanset { + rv = reflect.MakeSlice(ti.rt, rvlen, rvlen) + rvcap = rvlen + rvChanged = true + } else { + d.errorf("cannot decode into non-settable slice") + } + if rvChanged && oldRvlenGtZero && !isImmutableKind(rtelem0.Kind()) { + reflect.Copy(rv, rv0) // only copy up to length NOT cap i.e. rv0.Slice(0, rvcap) + } + } else if containerLenS != rvlen { + rvlen = containerLenS + if rvCanset { + rv.SetLen(rvlen) + } + // else { + // rv = rv.Slice(0, rvlen) + // rvChanged = true + // d.errorf("cannot decode into non-settable slice") + // } + } + } + + // consider creating new element once, and just decoding into it. + var rtelem0Zero reflect.Value + var rtelem0ZeroValid bool + var decodeAsNil bool + var j int + d.cfer() + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && (f.seq == seqTypeSlice || f.seq == seqTypeChan) && rv.IsNil() { + if hasLen { + rvlen = decInferLen(containerLenS, d.h.MaxInitLen, rtelem0Size) + } else if f.seq == seqTypeSlice { + rvlen = decDefSliceCap + } else { + rvlen = decDefChanCap + } + if rvCanset { + if f.seq == seqTypeSlice { + rv = reflect.MakeSlice(ti.rt, rvlen, rvlen) + rvChanged = true + } else { // chan + // xdebugf(">>>>>> haslen = %v, make chan of type '%v' with length: %v", hasLen, ti.rt, rvlen) + rv = reflect.MakeChan(ti.rt, rvlen) + rvChanged = true + } + } else { + d.errorf("cannot decode into non-settable slice") + } + } + slh.ElemContainerState(j) + decodeAsNil = dd.TryDecodeAsNil() + if f.seq == seqTypeChan { + if decodeAsNil { + rv.Send(reflect.Zero(rtelem0)) + continue + } + if rtelem0Mut || !rv9.IsValid() { // || (rtElem0Kind == reflect.Ptr && rv9.IsNil()) { + rv9 = reflect.New(rtelem0).Elem() + } + if fn == nil { + fn = d.cf.get(rtelem, true, true) + } + d.decodeValue(rv9, fn, true) + // xdebugf(">>>> rv9 sent on %v during decode: %v, with len=%v, cap=%v", rv.Type(), rv9, rv.Len(), rv.Cap()) + rv.Send(rv9) + } else { + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= rvlen { + if f.seq == seqTypeArray { + d.arrayCannotExpand(rvlen, j+1) + decodeIntoBlank = true + } else { // if f.seq == seqTypeSlice + // rv = reflect.Append(rv, reflect.Zero(rtelem0)) // append logic + varargs + var rvcap2 int + var rvErrmsg2 string + rv9, rvcap2, rvChanged, rvErrmsg2 = + expandSliceRV(rv, ti.rt, rvCanset, rtelem0Size, 1, rvlen, rvcap) + if rvErrmsg2 != "" { + d.errorf(rvErrmsg2) + } + rvlen++ + if rvChanged { + rv = rv9 + rvcap = rvcap2 + } + } + } + if decodeIntoBlank { + if !decodeAsNil { + d.swallow() + } + } else { + rv9 = rv.Index(j) + if d.h.SliceElementReset || decodeAsNil { + if !rtelem0ZeroValid { + rtelem0ZeroValid = true + rtelem0Zero = reflect.Zero(rtelem0) + } + rv9.Set(rtelem0Zero) + } + if decodeAsNil { + continue + } + + if fn == nil { + fn = d.cf.get(rtelem, true, true) + } + d.decodeValue(rv9, fn, true) + } + } + } + if f.seq == seqTypeSlice { + if j < rvlen { + if rv.CanSet() { + rv.SetLen(j) + } else if rvCanset { + rv = rv.Slice(0, j) + rvChanged = true + } // else { d.errorf("kSlice: cannot change non-settable slice") } + rvlen = j + } else if j == 0 && rv.IsNil() { + if rvCanset { + rv = reflect.MakeSlice(ti.rt, 0, 0) + rvChanged = true + } // else { d.errorf("kSlice: cannot change non-settable slice") } + } + } + slh.End() + + if rvChanged { // infers rvCanset=true, so it can be reset + rv0.Set(rv) + } +} + +// func (d *Decoder) kArray(f *codecFnInfo, rv reflect.Value) { +// // d.decodeValueFn(rv.Slice(0, rv.Len())) +// f.kSlice(rv.Slice(0, rv.Len())) +// } + +func (d *Decoder) kMap(f *codecFnInfo, rv reflect.Value) { + dd := d.d + containerLen := dd.ReadMapStart() + elemsep := d.esep + ti := f.ti + if rv.IsNil() { + rv.Set(makeMapReflect(ti.rt, containerLen)) + } + + if containerLen == 0 { + dd.ReadMapEnd() + return + } + + ktype, vtype := ti.key, ti.elem + ktypeId := rt2id(ktype) + vtypeKind := vtype.Kind() + + var keyFn, valFn *codecFn + var ktypeLo, vtypeLo reflect.Type + + for ktypeLo = ktype; ktypeLo.Kind() == reflect.Ptr; ktypeLo = ktypeLo.Elem() { + } + + for vtypeLo = vtype; vtypeLo.Kind() == reflect.Ptr; vtypeLo = vtypeLo.Elem() { + } + + var mapGet, mapSet bool + rvvImmut := isImmutableKind(vtypeKind) + if !d.h.MapValueReset { + // if pointer, mapGet = true + // if interface, mapGet = true if !DecodeNakedAlways (else false) + // if builtin, mapGet = false + // else mapGet = true + if vtypeKind == reflect.Ptr { + mapGet = true + } else if vtypeKind == reflect.Interface { + if !d.h.InterfaceReset { + mapGet = true + } + } else if !rvvImmut { + mapGet = true + } + } + + var rvk, rvkp, rvv, rvz reflect.Value + rvkMut := !isImmutableKind(ktype.Kind()) // if ktype is immutable, then re-use the same rvk. + ktypeIsString := ktypeId == stringTypId + ktypeIsIntf := ktypeId == intfTypId + hasLen := containerLen > 0 + var kstrbs []byte + d.cfer() + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if rvkMut || !rvkp.IsValid() { + rvkp = reflect.New(ktype) + rvk = rvkp.Elem() + } + if elemsep { + dd.ReadMapElemKey() + } + if false && dd.TryDecodeAsNil() { // nil cannot be a map key, so disregard this block + // Previously, if a nil key, we just ignored the mapped value and continued. + // However, that makes the result of encoding and then decoding map[intf]intf{nil:nil} + // to be an empty map. + // Instead, we treat a nil key as the zero value of the type. + rvk.Set(reflect.Zero(ktype)) + } else if ktypeIsString { + kstrbs = dd.DecodeStringAsBytes() + rvk.SetString(stringView(kstrbs)) + // NOTE: if doing an insert, you MUST use a real string (not stringview) + } else { + if keyFn == nil { + keyFn = d.cf.get(ktypeLo, true, true) + } + d.decodeValue(rvk, keyFn, true) + } + // special case if a byte array. + if ktypeIsIntf { + if rvk2 := rvk.Elem(); rvk2.IsValid() { + if rvk2.Type() == uint8SliceTyp { + rvk = reflect.ValueOf(d.string(rvk2.Bytes())) + } else { + rvk = rvk2 + } + } + } + + if elemsep { + dd.ReadMapElemValue() + } + + // Brittle, but OK per TryDecodeAsNil() contract. + // i.e. TryDecodeAsNil never shares slices with other decDriver procedures + if dd.TryDecodeAsNil() { + if ktypeIsString { + rvk.SetString(d.string(kstrbs)) + } + if d.h.DeleteOnNilMapValue { + rv.SetMapIndex(rvk, reflect.Value{}) + } else { + rv.SetMapIndex(rvk, reflect.Zero(vtype)) + } + continue + } + + mapSet = true // set to false if u do a get, and its a non-nil pointer + if mapGet { + // mapGet true only in case where kind=Ptr|Interface or kind is otherwise mutable. + rvv = rv.MapIndex(rvk) + if !rvv.IsValid() { + rvv = reflect.New(vtype).Elem() + } else if vtypeKind == reflect.Ptr { + if rvv.IsNil() { + rvv = reflect.New(vtype).Elem() + } else { + mapSet = false + } + } else if vtypeKind == reflect.Interface { + // not addressable, and thus not settable. + // e MUST create a settable/addressable variant + rvv2 := reflect.New(rvv.Type()).Elem() + if !rvv.IsNil() { + rvv2.Set(rvv) + } + rvv = rvv2 + } + // else it is ~mutable, and we can just decode into it directly + } else if rvvImmut { + if !rvz.IsValid() { + rvz = reflect.New(vtype).Elem() + } + rvv = rvz + } else { + rvv = reflect.New(vtype).Elem() + } + + // We MUST be done with the stringview of the key, before decoding the value + // so that we don't bastardize the reused byte array. + if mapSet && ktypeIsString { + rvk.SetString(d.string(kstrbs)) + } + if valFn == nil { + valFn = d.cf.get(vtypeLo, true, true) + } + d.decodeValue(rvv, valFn, true) + // d.decodeValueFn(rvv, valFn) + if mapSet { + rv.SetMapIndex(rvk, rvv) + } + // if ktypeIsString { + // // keepAlive4StringView(kstrbs) // not needed, as reference is outside loop + // } + } + + dd.ReadMapEnd() +} + +// decNaked is used to keep track of the primitives decoded. +// Without it, we would have to decode each primitive and wrap it +// in an interface{}, causing an allocation. +// In this model, the primitives are decoded in a "pseudo-atomic" fashion, +// so we can rest assured that no other decoding happens while these +// primitives are being decoded. +// +// maps and arrays are not handled by this mechanism. +// However, RawExt is, and we accommodate for extensions that decode +// RawExt from DecodeNaked, but need to decode the value subsequently. +// kInterfaceNaked and swallow, which call DecodeNaked, handle this caveat. +// +// However, decNaked also keeps some arrays of default maps and slices +// used in DecodeNaked. This way, we can get a pointer to it +// without causing a new heap allocation. +// +// kInterfaceNaked will ensure that there is no allocation for the common +// uses. + +type decNakedContainers struct { + // array/stacks for reducing allocation + // keep arrays at the bottom? Chance is that they are not used much. + ia [arrayCacheLen]interface{} + ma [arrayCacheLen]map[interface{}]interface{} + na [arrayCacheLen]map[string]interface{} + sa [arrayCacheLen][]interface{} + + // ria [arrayCacheLen]reflect.Value // not needed, as we decode directly into &ia[n] + rma, rna, rsa [arrayCacheLen]reflect.Value // reflect.Value mapping to above +} + +func (n *decNakedContainers) init() { + for i := 0; i < arrayCacheLen; i++ { + // n.ria[i] = reflect.ValueOf(&(n.ia[i])).Elem() + n.rma[i] = reflect.ValueOf(&(n.ma[i])).Elem() + n.rna[i] = reflect.ValueOf(&(n.na[i])).Elem() + n.rsa[i] = reflect.ValueOf(&(n.sa[i])).Elem() + } +} + +type decNaked struct { + // r RawExt // used for RawExt, uint, []byte. + + // primitives below + u uint64 + i int64 + f float64 + l []byte + s string + + // ---- cpu cache line boundary? + t time.Time + b bool + + // state + v valueType + li, lm, ln, ls int8 + inited bool + + *decNakedContainers + + ru, ri, rf, rl, rs, rb, rt reflect.Value // mapping to the primitives above + + // _ [6]uint64 // padding // no padding - rt goes into next cache line +} + +func (n *decNaked) init() { + if n.inited { + return + } + n.ru = reflect.ValueOf(&n.u).Elem() + n.ri = reflect.ValueOf(&n.i).Elem() + n.rf = reflect.ValueOf(&n.f).Elem() + n.rl = reflect.ValueOf(&n.l).Elem() + n.rs = reflect.ValueOf(&n.s).Elem() + n.rt = reflect.ValueOf(&n.t).Elem() + n.rb = reflect.ValueOf(&n.b).Elem() + + n.inited = true + // n.rr[] = reflect.ValueOf(&n.) +} + +func (n *decNaked) initContainers() { + if n.decNakedContainers == nil { + n.decNakedContainers = new(decNakedContainers) + n.decNakedContainers.init() + } +} + +func (n *decNaked) reset() { + if n == nil { + return + } + n.li, n.lm, n.ln, n.ls = 0, 0, 0, 0 +} + +type rtid2rv struct { + rtid uintptr + rv reflect.Value +} + +// -------------- + +type decReaderSwitch struct { + rb bytesDecReader + // ---- cpu cache line boundary? + ri *ioDecReader + mtr, str bool // whether maptype or slicetype are known types + + be bool // is binary encoding + bytes bool // is bytes reader + js bool // is json handle + jsms bool // is json handle, and MapKeyAsString + esep bool // has elem separators +} + +// TODO: Uncomment after mid-stack inlining enabled in go 1.11 +// +// func (z *decReaderSwitch) unreadn1() { +// if z.bytes { +// z.rb.unreadn1() +// } else { +// z.ri.unreadn1() +// } +// } +// func (z *decReaderSwitch) readx(n int) []byte { +// if z.bytes { +// return z.rb.readx(n) +// } +// return z.ri.readx(n) +// } +// func (z *decReaderSwitch) readb(s []byte) { +// if z.bytes { +// z.rb.readb(s) +// } else { +// z.ri.readb(s) +// } +// } +// func (z *decReaderSwitch) readn1() uint8 { +// if z.bytes { +// return z.rb.readn1() +// } +// return z.ri.readn1() +// } +// func (z *decReaderSwitch) numread() int { +// if z.bytes { +// return z.rb.numread() +// } +// return z.ri.numread() +// } +// func (z *decReaderSwitch) track() { +// if z.bytes { +// z.rb.track() +// } else { +// z.ri.track() +// } +// } +// func (z *decReaderSwitch) stopTrack() []byte { +// if z.bytes { +// return z.rb.stopTrack() +// } +// return z.ri.stopTrack() +// } +// func (z *decReaderSwitch) skip(accept *bitset256) (token byte) { +// if z.bytes { +// return z.rb.skip(accept) +// } +// return z.ri.skip(accept) +// } +// func (z *decReaderSwitch) readTo(in []byte, accept *bitset256) (out []byte) { +// if z.bytes { +// return z.rb.readTo(in, accept) +// } +// return z.ri.readTo(in, accept) +// } +// func (z *decReaderSwitch) readUntil(in []byte, stop byte) (out []byte) { +// if z.bytes { +// return z.rb.readUntil(in, stop) +// } +// return z.ri.readUntil(in, stop) +// } + +// A Decoder reads and decodes an object from an input stream in the codec format. +type Decoder struct { + panicHdl + // hopefully, reduce derefencing cost by laying the decReader inside the Decoder. + // Try to put things that go together to fit within a cache line (8 words). + + d decDriver + // NOTE: Decoder shouldn't call it's read methods, + // as the handler MAY need to do some coordination. + r decReader + h *BasicHandle + bi *bufioDecReader + // cache the mapTypeId and sliceTypeId for faster comparisons + mtid uintptr + stid uintptr + + // ---- cpu cache line boundary? + decReaderSwitch + + // ---- cpu cache line boundary? + codecFnPooler + // cr containerStateRecv + n *decNaked + nsp *sync.Pool + err error + + // ---- cpu cache line boundary? + b [decScratchByteArrayLen]byte // scratch buffer, used by Decoder and xxxEncDrivers + is map[string]string // used for interning strings + + // padding - false sharing help // modify 232 if Decoder struct changes. + // _ [cacheLineSize - 232%cacheLineSize]byte +} + +// NewDecoder returns a Decoder for decoding a stream of bytes from an io.Reader. +// +// For efficiency, Users are encouraged to pass in a memory buffered reader +// (eg bufio.Reader, bytes.Buffer). +func NewDecoder(r io.Reader, h Handle) *Decoder { + d := newDecoder(h) + d.Reset(r) + return d +} + +// NewDecoderBytes returns a Decoder which efficiently decodes directly +// from a byte slice with zero copying. +func NewDecoderBytes(in []byte, h Handle) *Decoder { + d := newDecoder(h) + d.ResetBytes(in) + return d +} + +var defaultDecNaked decNaked + +func newDecoder(h Handle) *Decoder { + d := &Decoder{h: h.getBasicHandle(), err: errDecoderNotInitialized} + d.hh = h + d.be = h.isBinary() + // NOTE: do not initialize d.n here. It is lazily initialized in d.naked() + var jh *JsonHandle + jh, d.js = h.(*JsonHandle) + if d.js { + d.jsms = jh.MapKeyAsString + } + d.esep = d.hh.hasElemSeparators() + if d.h.InternString { + d.is = make(map[string]string, 32) + } + d.d = h.newDecDriver(d) + // d.cr, _ = d.d.(containerStateRecv) + return d +} + +func (d *Decoder) resetCommon() { + d.n.reset() + d.d.reset() + d.err = nil + // reset all things which were cached from the Handle, but could change + d.mtid, d.stid = 0, 0 + d.mtr, d.str = false, false + if d.h.MapType != nil { + d.mtid = rt2id(d.h.MapType) + d.mtr = fastpathAV.index(d.mtid) != -1 + } + if d.h.SliceType != nil { + d.stid = rt2id(d.h.SliceType) + d.str = fastpathAV.index(d.stid) != -1 + } +} + +// Reset the Decoder with a new Reader to decode from, +// clearing all state from last run(s). +func (d *Decoder) Reset(r io.Reader) { + if r == nil { + return + } + if d.bi == nil { + d.bi = new(bufioDecReader) + } + d.bytes = false + if d.h.ReaderBufferSize > 0 { + d.bi.buf = make([]byte, 0, d.h.ReaderBufferSize) + d.bi.reset(r) + d.r = d.bi + } else { + // d.ri.x = &d.b + // d.s = d.sa[:0] + if d.ri == nil { + d.ri = new(ioDecReader) + } + d.ri.reset(r) + d.r = d.ri + } + d.resetCommon() +} + +// ResetBytes resets the Decoder with a new []byte to decode from, +// clearing all state from last run(s). +func (d *Decoder) ResetBytes(in []byte) { + if in == nil { + return + } + d.bytes = true + d.rb.reset(in) + d.r = &d.rb + d.resetCommon() +} + +// naked must be called before each call to .DecodeNaked, +// as they will use it. +func (d *Decoder) naked() *decNaked { + if d.n == nil { + // consider one of: + // - get from sync.Pool (if GC is frequent, there's no value here) + // - new alloc (safest. only init'ed if it a naked decode will be done) + // - field in Decoder (makes the Decoder struct very big) + // To support using a decoder where a DecodeNaked is not needed, + // we prefer #1 or #2. + // d.n = new(decNaked) // &d.nv // new(decNaked) // grab from a sync.Pool + // d.n.init() + var v interface{} + d.nsp, v = pool.decNaked() + d.n = v.(*decNaked) + } + return d.n +} + +// Decode decodes the stream from reader and stores the result in the +// value pointed to by v. v cannot be a nil pointer. v can also be +// a reflect.Value of a pointer. +// +// Note that a pointer to a nil interface is not a nil pointer. +// If you do not know what type of stream it is, pass in a pointer to a nil interface. +// We will decode and store a value in that nil interface. +// +// Sample usages: +// // Decoding into a non-nil typed value +// var f float32 +// err = codec.NewDecoder(r, handle).Decode(&f) +// +// // Decoding into nil interface +// var v interface{} +// dec := codec.NewDecoder(r, handle) +// err = dec.Decode(&v) +// +// When decoding into a nil interface{}, we will decode into an appropriate value based +// on the contents of the stream: +// - Numbers are decoded as float64, int64 or uint64. +// - Other values are decoded appropriately depending on the type: +// bool, string, []byte, time.Time, etc +// - Extensions are decoded as RawExt (if no ext function registered for the tag) +// Configurations exist on the Handle to override defaults +// (e.g. for MapType, SliceType and how to decode raw bytes). +// +// When decoding into a non-nil interface{} value, the mode of encoding is based on the +// type of the value. When a value is seen: +// - If an extension is registered for it, call that extension function +// - If it implements BinaryUnmarshaler, call its UnmarshalBinary(data []byte) error +// - Else decode it based on its reflect.Kind +// +// There are some special rules when decoding into containers (slice/array/map/struct). +// Decode will typically use the stream contents to UPDATE the container. +// - A map can be decoded from a stream map, by updating matching keys. +// - A slice can be decoded from a stream array, +// by updating the first n elements, where n is length of the stream. +// - A slice can be decoded from a stream map, by decoding as if +// it contains a sequence of key-value pairs. +// - A struct can be decoded from a stream map, by updating matching fields. +// - A struct can be decoded from a stream array, +// by updating fields as they occur in the struct (by index). +// +// When decoding a stream map or array with length of 0 into a nil map or slice, +// we reset the destination map or slice to a zero-length value. +// +// However, when decoding a stream nil, we reset the destination container +// to its "zero" value (e.g. nil for slice/map, etc). +// +// Note: we allow nil values in the stream anywhere except for map keys. +// A nil value in the encoded stream where a map key is expected is treated as an error. +func (d *Decoder) Decode(v interface{}) (err error) { + defer d.deferred(&err) + d.MustDecode(v) + return +} + +// MustDecode is like Decode, but panics if unable to Decode. +// This provides insight to the code location that triggered the error. +func (d *Decoder) MustDecode(v interface{}) { + // TODO: Top-level: ensure that v is a pointer and not nil. + if d.err != nil { + panic(d.err) + } + if d.d.TryDecodeAsNil() { + setZero(v) + } else { + d.decode(v) + } + d.alwaysAtEnd() + // xprintf(">>>>>>>> >>>>>>>> num decFns: %v\n", d.cf.sn) +} + +func (d *Decoder) deferred(err1 *error) { + d.alwaysAtEnd() + if recoverPanicToErr { + if x := recover(); x != nil { + panicValToErr(d, x, err1) + panicValToErr(d, x, &d.err) + } + } +} + +func (d *Decoder) alwaysAtEnd() { + if d.n != nil { + // if n != nil, then nsp != nil (they are always set together) + d.nsp.Put(d.n) + d.n, d.nsp = nil, nil + } + d.codecFnPooler.alwaysAtEnd() +} + +// // this is not a smart swallow, as it allocates objects and does unnecessary work. +// func (d *Decoder) swallowViaHammer() { +// var blank interface{} +// d.decodeValueNoFn(reflect.ValueOf(&blank).Elem()) +// } + +func (d *Decoder) swallow() { + // smarter decode that just swallows the content + dd := d.d + if dd.TryDecodeAsNil() { + return + } + elemsep := d.esep + switch dd.ContainerType() { + case valueTypeMap: + containerLen := dd.ReadMapStart() + hasLen := containerLen >= 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + // if clenGtEqualZero {if j >= containerLen {break} } else if dd.CheckBreak() {break} + if elemsep { + dd.ReadMapElemKey() + } + d.swallow() + if elemsep { + dd.ReadMapElemValue() + } + d.swallow() + } + dd.ReadMapEnd() + case valueTypeArray: + containerLen := dd.ReadArrayStart() + hasLen := containerLen >= 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if elemsep { + dd.ReadArrayElem() + } + d.swallow() + } + dd.ReadArrayEnd() + case valueTypeBytes: + dd.DecodeBytes(d.b[:], true) + case valueTypeString: + dd.DecodeStringAsBytes() + default: + // these are all primitives, which we can get from decodeNaked + // if RawExt using Value, complete the processing. + n := d.naked() + dd.DecodeNaked() + if n.v == valueTypeExt && n.l == nil { + n.initContainers() + if n.li < arrayCacheLen { + n.ia[n.li] = nil + n.li++ + d.decode(&n.ia[n.li-1]) + n.ia[n.li-1] = nil + n.li-- + } else { + var v2 interface{} + d.decode(&v2) + } + } + } +} + +func setZero(iv interface{}) { + if iv == nil || definitelyNil(iv) { + return + } + var canDecode bool + switch v := iv.(type) { + case *string: + *v = "" + case *bool: + *v = false + case *int: + *v = 0 + case *int8: + *v = 0 + case *int16: + *v = 0 + case *int32: + *v = 0 + case *int64: + *v = 0 + case *uint: + *v = 0 + case *uint8: + *v = 0 + case *uint16: + *v = 0 + case *uint32: + *v = 0 + case *uint64: + *v = 0 + case *float32: + *v = 0 + case *float64: + *v = 0 + case *[]uint8: + *v = nil + case *Raw: + *v = nil + case *time.Time: + *v = time.Time{} + case reflect.Value: + if v, canDecode = isDecodeable(v); canDecode && v.CanSet() { + v.Set(reflect.Zero(v.Type())) + } // TODO: else drain if chan, clear if map, set all to nil if slice??? + default: + if !fastpathDecodeSetZeroTypeSwitch(iv) { + v := reflect.ValueOf(iv) + if v, canDecode = isDecodeable(v); canDecode && v.CanSet() { + v.Set(reflect.Zero(v.Type())) + } // TODO: else drain if chan, clear if map, set all to nil if slice??? + } + } +} + +func (d *Decoder) decode(iv interface{}) { + // check nil and interfaces explicitly, + // so that type switches just have a run of constant non-interface types. + if iv == nil { + d.errorstr(errstrCannotDecodeIntoNil) + return + } + if v, ok := iv.(Selfer); ok { + v.CodecDecodeSelf(d) + return + } + + switch v := iv.(type) { + // case nil: + // case Selfer: + + case reflect.Value: + v = d.ensureDecodeable(v) + d.decodeValue(v, nil, true) + + case *string: + *v = d.d.DecodeString() + case *bool: + *v = d.d.DecodeBool() + case *int: + *v = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) + case *int8: + *v = int8(chkOvf.IntV(d.d.DecodeInt64(), 8)) + case *int16: + *v = int16(chkOvf.IntV(d.d.DecodeInt64(), 16)) + case *int32: + *v = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) + case *int64: + *v = d.d.DecodeInt64() + case *uint: + *v = uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) + case *uint8: + *v = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) + case *uint16: + *v = uint16(chkOvf.UintV(d.d.DecodeUint64(), 16)) + case *uint32: + *v = uint32(chkOvf.UintV(d.d.DecodeUint64(), 32)) + case *uint64: + *v = d.d.DecodeUint64() + case *float32: + f64 := d.d.DecodeFloat64() + if chkOvf.Float32(f64) { + d.errorf("float32 overflow: %v", f64) + } + *v = float32(f64) + case *float64: + *v = d.d.DecodeFloat64() + case *[]uint8: + *v = d.d.DecodeBytes(*v, false) + case []uint8: + b := d.d.DecodeBytes(v, false) + if !(len(b) > 0 && len(b) == len(v) && &b[0] == &v[0]) { + copy(v, b) + } + case *time.Time: + *v = d.d.DecodeTime() + case *Raw: + *v = d.rawBytes() + + case *interface{}: + d.decodeValue(reflect.ValueOf(iv).Elem(), nil, true) + // d.decodeValueNotNil(reflect.ValueOf(iv).Elem()) + + default: + if !fastpathDecodeTypeSwitch(iv, d) { + v := reflect.ValueOf(iv) + v = d.ensureDecodeable(v) + d.decodeValue(v, nil, false) + // d.decodeValueFallback(v) + } + } +} + +func (d *Decoder) decodeValue(rv reflect.Value, fn *codecFn, chkAll bool) { + // If stream is not containing a nil value, then we can deref to the base + // non-pointer value, and decode into that. + var rvp reflect.Value + var rvpValid bool + if rv.Kind() == reflect.Ptr { + rvpValid = true + for { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + rvp = rv + rv = rv.Elem() + if rv.Kind() != reflect.Ptr { + break + } + } + } + + if fn == nil { + // always pass checkCodecSelfer=true, in case T or ****T is passed, where *T is a Selfer + fn = d.cfer().get(rv.Type(), chkAll, true) // chkAll, chkAll) + } + if fn.i.addrD { + if rvpValid { + fn.fd(d, &fn.i, rvp) + } else if rv.CanAddr() { + fn.fd(d, &fn.i, rv.Addr()) + } else if !fn.i.addrF { + fn.fd(d, &fn.i, rv) + } else { + d.errorf("cannot decode into a non-pointer value") + } + } else { + fn.fd(d, &fn.i, rv) + } + // return rv +} + +func (d *Decoder) structFieldNotFound(index int, rvkencname string) { + // NOTE: rvkencname may be a stringView, so don't pass it to another function. + if d.h.ErrorIfNoField { + if index >= 0 { + d.errorf("no matching struct field found when decoding stream array at index %v", index) + return + } else if rvkencname != "" { + d.errorf("no matching struct field found when decoding stream map with key " + rvkencname) + return + } + } + d.swallow() +} + +func (d *Decoder) arrayCannotExpand(sliceLen, streamLen int) { + if d.h.ErrorIfNoArrayExpand { + d.errorf("cannot expand array len during decode from %v to %v", sliceLen, streamLen) + } +} + +func isDecodeable(rv reflect.Value) (rv2 reflect.Value, canDecode bool) { + switch rv.Kind() { + case reflect.Array: + return rv, true + case reflect.Ptr: + if !rv.IsNil() { + return rv.Elem(), true + } + case reflect.Slice, reflect.Chan, reflect.Map: + if !rv.IsNil() { + return rv, true + } + } + return +} + +func (d *Decoder) ensureDecodeable(rv reflect.Value) (rv2 reflect.Value) { + // decode can take any reflect.Value that is a inherently addressable i.e. + // - array + // - non-nil chan (we will SEND to it) + // - non-nil slice (we will set its elements) + // - non-nil map (we will put into it) + // - non-nil pointer (we can "update" it) + rv2, canDecode := isDecodeable(rv) + if canDecode { + return + } + if !rv.IsValid() { + d.errorstr(errstrCannotDecodeIntoNil) + return + } + if !rv.CanInterface() { + d.errorf("cannot decode into a value without an interface: %v", rv) + return + } + rvi := rv2i(rv) + rvk := rv.Kind() + d.errorf("cannot decode into value of kind: %v, type: %T, %v", rvk, rvi, rvi) + return +} + +// Possibly get an interned version of a string +// +// This should mostly be used for map keys, where the key type is string. +// This is because keys of a map/struct are typically reused across many objects. +func (d *Decoder) string(v []byte) (s string) { + if d.is == nil { + return string(v) // don't return stringView, as we need a real string here. + } + s, ok := d.is[string(v)] // no allocation here, per go implementation + if !ok { + s = string(v) // new allocation here + d.is[s] = s + } + return s +} + +// nextValueBytes returns the next value in the stream as a set of bytes. +func (d *Decoder) nextValueBytes() (bs []byte) { + d.d.uncacheRead() + d.r.track() + d.swallow() + bs = d.r.stopTrack() + return +} + +func (d *Decoder) rawBytes() []byte { + // ensure that this is not a view into the bytes + // i.e. make new copy always. + bs := d.nextValueBytes() + bs2 := make([]byte, len(bs)) + copy(bs2, bs) + return bs2 +} + +func (d *Decoder) wrapErrstr(v interface{}, err *error) { + *err = fmt.Errorf("%s decode error [pos %d]: %v", d.hh.Name(), d.r.numread(), v) +} + +// -------------------------------------------------- + +// decSliceHelper assists when decoding into a slice, from a map or an array in the stream. +// A slice can be set from a map or array in stream. This supports the MapBySlice interface. +type decSliceHelper struct { + d *Decoder + // ct valueType + array bool +} + +func (d *Decoder) decSliceHelperStart() (x decSliceHelper, clen int) { + dd := d.d + ctyp := dd.ContainerType() + switch ctyp { + case valueTypeArray: + x.array = true + clen = dd.ReadArrayStart() + case valueTypeMap: + clen = dd.ReadMapStart() * 2 + default: + d.errorf("only encoded map or array can be decoded into a slice (%d)", ctyp) + } + // x.ct = ctyp + x.d = d + return +} + +func (x decSliceHelper) End() { + if x.array { + x.d.d.ReadArrayEnd() + } else { + x.d.d.ReadMapEnd() + } +} + +func (x decSliceHelper) ElemContainerState(index int) { + if x.array { + x.d.d.ReadArrayElem() + } else if index%2 == 0 { + x.d.d.ReadMapElemKey() + } else { + x.d.d.ReadMapElemValue() + } +} + +func decByteSlice(r decReader, clen, maxInitLen int, bs []byte) (bsOut []byte) { + if clen == 0 { + return zeroByteSlice + } + if len(bs) == clen { + bsOut = bs + r.readb(bsOut) + } else if cap(bs) >= clen { + bsOut = bs[:clen] + r.readb(bsOut) + } else { + // bsOut = make([]byte, clen) + len2 := decInferLen(clen, maxInitLen, 1) + bsOut = make([]byte, len2) + r.readb(bsOut) + for len2 < clen { + len3 := decInferLen(clen-len2, maxInitLen, 1) + bs3 := bsOut + bsOut = make([]byte, len2+len3) + copy(bsOut, bs3) + r.readb(bsOut[len2:]) + len2 += len3 + } + } + return +} + +func detachZeroCopyBytes(isBytesReader bool, dest []byte, in []byte) (out []byte) { + if xlen := len(in); xlen > 0 { + if isBytesReader || xlen <= scratchByteArrayLen { + if cap(dest) >= xlen { + out = dest[:xlen] + } else { + out = make([]byte, xlen) + } + copy(out, in) + return + } + } + return in +} + +// decInferLen will infer a sensible length, given the following: +// - clen: length wanted. +// - maxlen: max length to be returned. +// if <= 0, it is unset, and we infer it based on the unit size +// - unit: number of bytes for each element of the collection +func decInferLen(clen, maxlen, unit int) (rvlen int) { + // handle when maxlen is not set i.e. <= 0 + if clen <= 0 { + return + } + if unit == 0 { + return clen + } + if maxlen <= 0 { + // no maxlen defined. Use maximum of 256K memory, with a floor of 4K items. + // maxlen = 256 * 1024 / unit + // if maxlen < (4 * 1024) { + // maxlen = 4 * 1024 + // } + if unit < (256 / 4) { + maxlen = 256 * 1024 / unit + } else { + maxlen = 4 * 1024 + } + } + if clen > maxlen { + rvlen = maxlen + } else { + rvlen = clen + } + return +} + +func expandSliceRV(s reflect.Value, st reflect.Type, canChange bool, stElemSize, num, slen, scap int) ( + s2 reflect.Value, scap2 int, changed bool, err string) { + l1 := slen + num // new slice length + if l1 < slen { + err = errmsgExpandSliceOverflow + return + } + if l1 <= scap { + if s.CanSet() { + s.SetLen(l1) + } else if canChange { + s2 = s.Slice(0, l1) + scap2 = scap + changed = true + } else { + err = errmsgExpandSliceCannotChange + return + } + return + } + if !canChange { + err = errmsgExpandSliceCannotChange + return + } + scap2 = growCap(scap, stElemSize, num) + s2 = reflect.MakeSlice(st, l1, scap2) + changed = true + reflect.Copy(s2, s) + return +} + +func decReadFull(r io.Reader, bs []byte) (n int, err error) { + var nn int + for n < len(bs) && err == nil { + nn, err = r.Read(bs[n:]) + if nn > 0 { + if err == io.EOF { + // leave EOF for next time + err = nil + } + n += nn + } + } + + // do not do this - it serves no purpose + // if n != len(bs) && err == io.EOF { err = io.ErrUnexpectedEOF } + return +} diff --git a/vendor/github.com/ugorji/go/codec/encode.go b/vendor/github.com/ugorji/go/codec/encode.go new file mode 100644 index 0000000000..ef4652945f --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/encode.go @@ -0,0 +1,1375 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "bufio" + "encoding" + "errors" + "fmt" + "io" + "reflect" + "sort" + "strconv" + "sync" + "time" +) + +const defEncByteBufSize = 1 << 6 // 4:16, 6:64, 8:256, 10:1024 + +var errEncoderNotInitialized = errors.New("Encoder not initialized") + +// encWriter abstracts writing to a byte array or to an io.Writer. +type encWriter interface { + writeb([]byte) + writestr(string) + writen1(byte) + writen2(byte, byte) + atEndOfEncode() +} + +// encDriver abstracts the actual codec (binc vs msgpack, etc) +type encDriver interface { + EncodeNil() + EncodeInt(i int64) + EncodeUint(i uint64) + EncodeBool(b bool) + EncodeFloat32(f float32) + EncodeFloat64(f float64) + // encodeExtPreamble(xtag byte, length int) + EncodeRawExt(re *RawExt, e *Encoder) + EncodeExt(v interface{}, xtag uint64, ext Ext, e *Encoder) + EncodeString(c charEncoding, v string) + // EncodeSymbol(v string) + EncodeStringBytes(c charEncoding, v []byte) + EncodeTime(time.Time) + //encBignum(f *big.Int) + //encStringRunes(c charEncoding, v []rune) + WriteArrayStart(length int) + WriteArrayElem() + WriteArrayEnd() + WriteMapStart(length int) + WriteMapElemKey() + WriteMapElemValue() + WriteMapEnd() + + reset() + atEndOfEncode() +} + +type ioEncStringWriter interface { + WriteString(s string) (n int, err error) +} + +type encDriverAsis interface { + EncodeAsis(v []byte) +} + +type encDriverNoopContainerWriter struct{} + +func (encDriverNoopContainerWriter) WriteArrayStart(length int) {} +func (encDriverNoopContainerWriter) WriteArrayElem() {} +func (encDriverNoopContainerWriter) WriteArrayEnd() {} +func (encDriverNoopContainerWriter) WriteMapStart(length int) {} +func (encDriverNoopContainerWriter) WriteMapElemKey() {} +func (encDriverNoopContainerWriter) WriteMapElemValue() {} +func (encDriverNoopContainerWriter) WriteMapEnd() {} +func (encDriverNoopContainerWriter) atEndOfEncode() {} + +type encDriverTrackContainerWriter struct { + c containerState +} + +func (e *encDriverTrackContainerWriter) WriteArrayStart(length int) { e.c = containerArrayStart } +func (e *encDriverTrackContainerWriter) WriteArrayElem() { e.c = containerArrayElem } +func (e *encDriverTrackContainerWriter) WriteArrayEnd() { e.c = containerArrayEnd } +func (e *encDriverTrackContainerWriter) WriteMapStart(length int) { e.c = containerMapStart } +func (e *encDriverTrackContainerWriter) WriteMapElemKey() { e.c = containerMapKey } +func (e *encDriverTrackContainerWriter) WriteMapElemValue() { e.c = containerMapValue } +func (e *encDriverTrackContainerWriter) WriteMapEnd() { e.c = containerMapEnd } +func (e *encDriverTrackContainerWriter) atEndOfEncode() {} + +// type ioEncWriterWriter interface { +// WriteByte(c byte) error +// WriteString(s string) (n int, err error) +// Write(p []byte) (n int, err error) +// } + +// EncodeOptions captures configuration options during encode. +type EncodeOptions struct { + // WriterBufferSize is the size of the buffer used when writing. + // + // if > 0, we use a smart buffer internally for performance purposes. + WriterBufferSize int + + // ChanRecvTimeout is the timeout used when selecting from a chan. + // + // Configuring this controls how we receive from a chan during the encoding process. + // - If ==0, we only consume the elements currently available in the chan. + // - if <0, we consume until the chan is closed. + // - If >0, we consume until this timeout. + ChanRecvTimeout time.Duration + + // StructToArray specifies to encode a struct as an array, and not as a map + StructToArray bool + + // Canonical representation means that encoding a value will always result in the same + // sequence of bytes. + // + // This only affects maps, as the iteration order for maps is random. + // + // The implementation MAY use the natural sort order for the map keys if possible: + // + // - If there is a natural sort order (ie for number, bool, string or []byte keys), + // then the map keys are first sorted in natural order and then written + // with corresponding map values to the strema. + // - If there is no natural sort order, then the map keys will first be + // encoded into []byte, and then sorted, + // before writing the sorted keys and the corresponding map values to the stream. + // + Canonical bool + + // CheckCircularRef controls whether we check for circular references + // and error fast during an encode. + // + // If enabled, an error is received if a pointer to a struct + // references itself either directly or through one of its fields (iteratively). + // + // This is opt-in, as there may be a performance hit to checking circular references. + CheckCircularRef bool + + // RecursiveEmptyCheck controls whether we descend into interfaces, structs and pointers + // when checking if a value is empty. + // + // Note that this may make OmitEmpty more expensive, as it incurs a lot more reflect calls. + RecursiveEmptyCheck bool + + // Raw controls whether we encode Raw values. + // This is a "dangerous" option and must be explicitly set. + // If set, we blindly encode Raw values as-is, without checking + // if they are a correct representation of a value in that format. + // If unset, we error out. + Raw bool + + // // AsSymbols defines what should be encoded as symbols. + // // + // // Encoding as symbols can reduce the encoded size significantly. + // // + // // However, during decoding, each string to be encoded as a symbol must + // // be checked to see if it has been seen before. Consequently, encoding time + // // will increase if using symbols, because string comparisons has a clear cost. + // // + // // Sample values: + // // AsSymbolNone + // // AsSymbolAll + // // AsSymbolMapStringKeys + // // AsSymbolMapStringKeysFlag | AsSymbolStructFieldNameFlag + // AsSymbols AsSymbolFlag +} + +// --------------------------------------------- + +// ioEncWriter implements encWriter and can write to an io.Writer implementation +type ioEncWriter struct { + w io.Writer + ww io.Writer + bw io.ByteWriter + sw ioEncStringWriter + fw ioFlusher + b [8]byte +} + +func (z *ioEncWriter) WriteByte(b byte) (err error) { + z.b[0] = b + _, err = z.w.Write(z.b[:1]) + return +} + +func (z *ioEncWriter) WriteString(s string) (n int, err error) { + return z.w.Write(bytesView(s)) +} + +func (z *ioEncWriter) writeb(bs []byte) { + if _, err := z.ww.Write(bs); err != nil { + panic(err) + } +} + +func (z *ioEncWriter) writestr(s string) { + if _, err := z.sw.WriteString(s); err != nil { + panic(err) + } +} + +func (z *ioEncWriter) writen1(b byte) { + if err := z.bw.WriteByte(b); err != nil { + panic(err) + } +} + +func (z *ioEncWriter) writen2(b1, b2 byte) { + var err error + if err = z.bw.WriteByte(b1); err == nil { + if err = z.bw.WriteByte(b2); err == nil { + return + } + } + panic(err) +} + +// func (z *ioEncWriter) writen5(b1, b2, b3, b4, b5 byte) { +// z.b[0], z.b[1], z.b[2], z.b[3], z.b[4] = b1, b2, b3, b4, b5 +// if _, err := z.ww.Write(z.b[:5]); err != nil { +// panic(err) +// } +// } + +func (z *ioEncWriter) atEndOfEncode() { + if z.fw != nil { + if err := z.fw.Flush(); err != nil { + panic(err) + } + } +} + +// --------------------------------------------- + +// bytesEncAppender implements encWriter and can write to an byte slice. +type bytesEncAppender struct { + b []byte + out *[]byte +} + +func (z *bytesEncAppender) writeb(s []byte) { + z.b = append(z.b, s...) +} +func (z *bytesEncAppender) writestr(s string) { + z.b = append(z.b, s...) +} +func (z *bytesEncAppender) writen1(b1 byte) { + z.b = append(z.b, b1) +} +func (z *bytesEncAppender) writen2(b1, b2 byte) { + z.b = append(z.b, b1, b2) +} +func (z *bytesEncAppender) atEndOfEncode() { + *(z.out) = z.b +} +func (z *bytesEncAppender) reset(in []byte, out *[]byte) { + z.b = in[:0] + z.out = out +} + +// --------------------------------------------- + +func (e *Encoder) rawExt(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeRawExt(rv2i(rv).(*RawExt), e) +} + +func (e *Encoder) ext(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeExt(rv2i(rv), f.xfTag, f.xfFn, e) +} + +func (e *Encoder) selferMarshal(f *codecFnInfo, rv reflect.Value) { + rv2i(rv).(Selfer).CodecEncodeSelf(e) +} + +func (e *Encoder) binaryMarshal(f *codecFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(encoding.BinaryMarshaler).MarshalBinary() + e.marshal(bs, fnerr, false, cRAW) +} + +func (e *Encoder) textMarshal(f *codecFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(encoding.TextMarshaler).MarshalText() + e.marshal(bs, fnerr, false, cUTF8) +} + +func (e *Encoder) jsonMarshal(f *codecFnInfo, rv reflect.Value) { + bs, fnerr := rv2i(rv).(jsonMarshaler).MarshalJSON() + e.marshal(bs, fnerr, true, cUTF8) +} + +func (e *Encoder) raw(f *codecFnInfo, rv reflect.Value) { + e.rawBytes(rv2i(rv).(Raw)) +} + +func (e *Encoder) kInvalid(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeNil() +} + +func (e *Encoder) kErr(f *codecFnInfo, rv reflect.Value) { + e.errorf("unsupported kind %s, for %#v", rv.Kind(), rv) +} + +func (e *Encoder) kSlice(f *codecFnInfo, rv reflect.Value) { + ti := f.ti + ee := e.e + // array may be non-addressable, so we have to manage with care + // (don't call rv.Bytes, rv.Slice, etc). + // E.g. type struct S{B [2]byte}; + // Encode(S{}) will bomb on "panic: slice of unaddressable array". + if f.seq != seqTypeArray { + if rv.IsNil() { + ee.EncodeNil() + return + } + // If in this method, then there was no extension function defined. + // So it's okay to treat as []byte. + if ti.rtid == uint8SliceTypId { + ee.EncodeStringBytes(cRAW, rv.Bytes()) + return + } + } + if f.seq == seqTypeChan && ti.chandir&uint8(reflect.RecvDir) == 0 { + e.errorf("send-only channel cannot be encoded") + } + elemsep := e.esep + rtelem := ti.elem + rtelemIsByte := uint8TypId == rt2id(rtelem) // NOT rtelem.Kind() == reflect.Uint8 + var l int + // if a slice, array or chan of bytes, treat specially + if rtelemIsByte { + switch f.seq { + case seqTypeSlice: + ee.EncodeStringBytes(cRAW, rv.Bytes()) + case seqTypeArray: + l = rv.Len() + if rv.CanAddr() { + ee.EncodeStringBytes(cRAW, rv.Slice(0, l).Bytes()) + } else { + var bs []byte + if l <= cap(e.b) { + bs = e.b[:l] + } else { + bs = make([]byte, l) + } + reflect.Copy(reflect.ValueOf(bs), rv) + ee.EncodeStringBytes(cRAW, bs) + } + case seqTypeChan: + // do not use range, so that the number of elements encoded + // does not change, and encoding does not hang waiting on someone to close chan. + // for b := range rv2i(rv).(<-chan byte) { bs = append(bs, b) } + // ch := rv2i(rv).(<-chan byte) // fix error - that this is a chan byte, not a <-chan byte. + + if rv.IsNil() { + ee.EncodeNil() + break + } + bs := e.b[:0] + irv := rv2i(rv) + ch, ok := irv.(<-chan byte) + if !ok { + ch = irv.(chan byte) + } + + L1: + switch timeout := e.h.ChanRecvTimeout; { + case timeout == 0: // only consume available + for { + select { + case b := <-ch: + bs = append(bs, b) + default: + break L1 + } + } + case timeout > 0: // consume until timeout + tt := time.NewTimer(timeout) + for { + select { + case b := <-ch: + bs = append(bs, b) + case <-tt.C: + // close(tt.C) + break L1 + } + } + default: // consume until close + for b := range ch { + bs = append(bs, b) + } + } + + ee.EncodeStringBytes(cRAW, bs) + } + return + } + + // if chan, consume chan into a slice, and work off that slice. + var rvcs reflect.Value + if f.seq == seqTypeChan { + rvcs = reflect.Zero(reflect.SliceOf(rtelem)) + timeout := e.h.ChanRecvTimeout + if timeout < 0 { // consume until close + for { + recv, recvOk := rv.Recv() + if !recvOk { + break + } + rvcs = reflect.Append(rvcs, recv) + } + } else { + cases := make([]reflect.SelectCase, 2) + cases[0] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: rv} + if timeout == 0 { + cases[1] = reflect.SelectCase{Dir: reflect.SelectDefault} + } else { + tt := time.NewTimer(timeout) + cases[1] = reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(tt.C)} + } + for { + chosen, recv, recvOk := reflect.Select(cases) + if chosen == 1 || !recvOk { + break + } + rvcs = reflect.Append(rvcs, recv) + } + } + rv = rvcs // TODO: ensure this doesn't mess up anywhere that rv of kind chan is expected + } + + l = rv.Len() + if ti.mbs { + if l%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", l) + return + } + ee.WriteMapStart(l / 2) + } else { + ee.WriteArrayStart(l) + } + + if l > 0 { + var fn *codecFn + for rtelem.Kind() == reflect.Ptr { + rtelem = rtelem.Elem() + } + // if kind is reflect.Interface, do not pre-determine the + // encoding type, because preEncodeValue may break it down to + // a concrete type and kInterface will bomb. + if rtelem.Kind() != reflect.Interface { + fn = e.cfer().get(rtelem, true, true) + } + for j := 0; j < l; j++ { + if elemsep { + if ti.mbs { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + } else { + ee.WriteArrayElem() + } + } + e.encodeValue(rv.Index(j), fn, true) + } + } + + if ti.mbs { + ee.WriteMapEnd() + } else { + ee.WriteArrayEnd() + } +} + +func (e *Encoder) kStructNoOmitempty(f *codecFnInfo, rv reflect.Value) { + fti := f.ti + elemsep := e.esep + tisfi := fti.sfiSrc + toMap := !(fti.toArray || e.h.StructToArray) + if toMap { + tisfi = fti.sfiSort + } + ee := e.e + + sfn := structFieldNode{v: rv, update: false} + if toMap { + ee.WriteMapStart(len(tisfi)) + if elemsep { + for _, si := range tisfi { + ee.WriteMapElemKey() + // ee.EncodeString(cUTF8, si.encName) + encStructFieldKey(ee, fti.keyType, si.encName) + ee.WriteMapElemValue() + e.encodeValue(sfn.field(si), nil, true) + } + } else { + for _, si := range tisfi { + // ee.EncodeString(cUTF8, si.encName) + encStructFieldKey(ee, fti.keyType, si.encName) + e.encodeValue(sfn.field(si), nil, true) + } + } + ee.WriteMapEnd() + } else { + ee.WriteArrayStart(len(tisfi)) + if elemsep { + for _, si := range tisfi { + ee.WriteArrayElem() + e.encodeValue(sfn.field(si), nil, true) + } + } else { + for _, si := range tisfi { + e.encodeValue(sfn.field(si), nil, true) + } + } + ee.WriteArrayEnd() + } +} + +func encStructFieldKey(ee encDriver, keyType valueType, s string) { + var m must + + // use if-else-if, not switch (which compiles to binary-search) + // since keyType is typically valueTypeString, branch prediction is pretty good. + + if keyType == valueTypeString { + ee.EncodeString(cUTF8, s) + } else if keyType == valueTypeInt { + ee.EncodeInt(m.Int(strconv.ParseInt(s, 10, 64))) + } else if keyType == valueTypeUint { + ee.EncodeUint(m.Uint(strconv.ParseUint(s, 10, 64))) + } else if keyType == valueTypeFloat { + ee.EncodeFloat64(m.Float(strconv.ParseFloat(s, 64))) + } else { + ee.EncodeString(cUTF8, s) + } +} + +func (e *Encoder) kStruct(f *codecFnInfo, rv reflect.Value) { + fti := f.ti + elemsep := e.esep + tisfi := fti.sfiSrc + toMap := !(fti.toArray || e.h.StructToArray) + // if toMap, use the sorted array. If toArray, use unsorted array (to match sequence in struct) + if toMap { + tisfi = fti.sfiSort + } + newlen := len(fti.sfiSort) + ee := e.e + + // Use sync.Pool to reduce allocating slices unnecessarily. + // The cost of sync.Pool is less than the cost of new allocation. + // + // Each element of the array pools one of encStructPool(8|16|32|64). + // It allows the re-use of slices up to 64 in length. + // A performance cost of encoding structs was collecting + // which values were empty and should be omitted. + // We needed slices of reflect.Value and string to collect them. + // This shared pool reduces the amount of unnecessary creation we do. + // The cost is that of locking sometimes, but sync.Pool is efficient + // enough to reduce thread contention. + + var spool *sync.Pool + var poolv interface{} + var fkvs []stringRv + // fmt.Printf(">>>>>>>>>>>>>> encode.kStruct: newlen: %d\n", newlen) + if newlen <= 8 { + spool, poolv = pool.stringRv8() + fkvs = poolv.(*[8]stringRv)[:newlen] + } else if newlen <= 16 { + spool, poolv = pool.stringRv16() + fkvs = poolv.(*[16]stringRv)[:newlen] + } else if newlen <= 32 { + spool, poolv = pool.stringRv32() + fkvs = poolv.(*[32]stringRv)[:newlen] + } else if newlen <= 64 { + spool, poolv = pool.stringRv64() + fkvs = poolv.(*[64]stringRv)[:newlen] + } else if newlen <= 128 { + spool, poolv = pool.stringRv128() + fkvs = poolv.(*[128]stringRv)[:newlen] + } else { + fkvs = make([]stringRv, newlen) + } + + newlen = 0 + var kv stringRv + recur := e.h.RecursiveEmptyCheck + sfn := structFieldNode{v: rv, update: false} + for _, si := range tisfi { + // kv.r = si.field(rv, false) + kv.r = sfn.field(si) + if toMap { + if si.omitEmpty() && isEmptyValue(kv.r, e.h.TypeInfos, recur, recur) { + continue + } + kv.v = si.encName + } else { + // use the zero value. + // if a reference or struct, set to nil (so you do not output too much) + if si.omitEmpty() && isEmptyValue(kv.r, e.h.TypeInfos, recur, recur) { + switch kv.r.Kind() { + case reflect.Struct, reflect.Interface, reflect.Ptr, reflect.Array, reflect.Map, reflect.Slice: + kv.r = reflect.Value{} //encode as nil + } + } + } + fkvs[newlen] = kv + newlen++ + } + + if toMap { + ee.WriteMapStart(newlen) + if elemsep { + for j := 0; j < newlen; j++ { + kv = fkvs[j] + ee.WriteMapElemKey() + // ee.EncodeString(cUTF8, kv.v) + encStructFieldKey(ee, fti.keyType, kv.v) + ee.WriteMapElemValue() + e.encodeValue(kv.r, nil, true) + } + } else { + for j := 0; j < newlen; j++ { + kv = fkvs[j] + // ee.EncodeString(cUTF8, kv.v) + encStructFieldKey(ee, fti.keyType, kv.v) + e.encodeValue(kv.r, nil, true) + } + } + ee.WriteMapEnd() + } else { + ee.WriteArrayStart(newlen) + if elemsep { + for j := 0; j < newlen; j++ { + ee.WriteArrayElem() + e.encodeValue(fkvs[j].r, nil, true) + } + } else { + for j := 0; j < newlen; j++ { + e.encodeValue(fkvs[j].r, nil, true) + } + } + ee.WriteArrayEnd() + } + + // do not use defer. Instead, use explicit pool return at end of function. + // defer has a cost we are trying to avoid. + // If there is a panic and these slices are not returned, it is ok. + if spool != nil { + spool.Put(poolv) + } +} + +func (e *Encoder) kMap(f *codecFnInfo, rv reflect.Value) { + ee := e.e + if rv.IsNil() { + ee.EncodeNil() + return + } + + l := rv.Len() + ee.WriteMapStart(l) + elemsep := e.esep + if l == 0 { + ee.WriteMapEnd() + return + } + // var asSymbols bool + // determine the underlying key and val encFn's for the map. + // This eliminates some work which is done for each loop iteration i.e. + // rv.Type(), ref.ValueOf(rt).Pointer(), then check map/list for fn. + // + // However, if kind is reflect.Interface, do not pre-determine the + // encoding type, because preEncodeValue may break it down to + // a concrete type and kInterface will bomb. + var keyFn, valFn *codecFn + ti := f.ti + rtkey0 := ti.key + rtkey := rtkey0 + rtval0 := ti.elem + rtval := rtval0 + // rtkeyid := rt2id(rtkey0) + for rtval.Kind() == reflect.Ptr { + rtval = rtval.Elem() + } + if rtval.Kind() != reflect.Interface { + valFn = e.cfer().get(rtval, true, true) + } + mks := rv.MapKeys() + + if e.h.Canonical { + e.kMapCanonical(rtkey, rv, mks, valFn) + ee.WriteMapEnd() + return + } + + var keyTypeIsString = stringTypId == rt2id(rtkey0) // rtkeyid + if !keyTypeIsString { + for rtkey.Kind() == reflect.Ptr { + rtkey = rtkey.Elem() + } + if rtkey.Kind() != reflect.Interface { + // rtkeyid = rt2id(rtkey) + keyFn = e.cfer().get(rtkey, true, true) + } + } + + // for j, lmks := 0, len(mks); j < lmks; j++ { + for j := range mks { + if elemsep { + ee.WriteMapElemKey() + } + if keyTypeIsString { + ee.EncodeString(cUTF8, mks[j].String()) + } else { + e.encodeValue(mks[j], keyFn, true) + } + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mks[j]), valFn, true) + + } + ee.WriteMapEnd() +} + +func (e *Encoder) kMapCanonical(rtkey reflect.Type, rv reflect.Value, mks []reflect.Value, valFn *codecFn) { + ee := e.e + elemsep := e.esep + // we previously did out-of-band if an extension was registered. + // This is not necessary, as the natural kind is sufficient for ordering. + + switch rtkey.Kind() { + case reflect.Bool: + mksv := make([]boolRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Bool() + } + sort.Sort(boolRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeBool(mksv[i].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.String: + mksv := make([]stringRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.String() + } + sort.Sort(stringRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeString(cUTF8, mksv[i].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint, reflect.Uintptr: + mksv := make([]uintRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Uint() + } + sort.Sort(uintRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeUint(mksv[i].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + mksv := make([]intRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Int() + } + sort.Sort(intRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeInt(mksv[i].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.Float32: + mksv := make([]floatRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + sort.Sort(floatRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeFloat32(float32(mksv[i].v)) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.Float64: + mksv := make([]floatRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = k.Float() + } + sort.Sort(floatRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeFloat64(mksv[i].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + case reflect.Struct: + if rv.Type() == timeTyp { + mksv := make([]timeRv, len(mks)) + for i, k := range mks { + v := &mksv[i] + v.r = k + v.v = rv2i(k).(time.Time) + } + sort.Sort(timeRvSlice(mksv)) + for i := range mksv { + if elemsep { + ee.WriteMapElemKey() + } + ee.EncodeTime(mksv[i].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksv[i].r), valFn, true) + } + break + } + fallthrough + default: + // out-of-band + // first encode each key to a []byte first, then sort them, then record + var mksv []byte = make([]byte, 0, len(mks)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + mksbv := make([]bytesRv, len(mks)) + for i, k := range mks { + v := &mksbv[i] + l := len(mksv) + e2.MustEncode(k) + v.r = k + v.v = mksv[l:] + } + sort.Sort(bytesRvSlice(mksbv)) + for j := range mksbv { + if elemsep { + ee.WriteMapElemKey() + } + e.asis(mksbv[j].v) + if elemsep { + ee.WriteMapElemValue() + } + e.encodeValue(rv.MapIndex(mksbv[j].r), valFn, true) + } + } +} + +// // -------------------------------------------------- + +type encWriterSwitch struct { + wi *ioEncWriter + // wb bytesEncWriter + wb bytesEncAppender + wx bool // if bytes, wx=true + esep bool // whether it has elem separators + isas bool // whether e.as != nil +} + +// // TODO: Uncomment after mid-stack inlining enabled in go 1.11 + +// func (z *encWriterSwitch) writeb(s []byte) { +// if z.wx { +// z.wb.writeb(s) +// } else { +// z.wi.writeb(s) +// } +// } +// func (z *encWriterSwitch) writestr(s string) { +// if z.wx { +// z.wb.writestr(s) +// } else { +// z.wi.writestr(s) +// } +// } +// func (z *encWriterSwitch) writen1(b1 byte) { +// if z.wx { +// z.wb.writen1(b1) +// } else { +// z.wi.writen1(b1) +// } +// } +// func (z *encWriterSwitch) writen2(b1, b2 byte) { +// if z.wx { +// z.wb.writen2(b1, b2) +// } else { +// z.wi.writen2(b1, b2) +// } +// } + +// An Encoder writes an object to an output stream in the codec format. +type Encoder struct { + panicHdl + // hopefully, reduce derefencing cost by laying the encWriter inside the Encoder + e encDriver + // NOTE: Encoder shouldn't call it's write methods, + // as the handler MAY need to do some coordination. + w encWriter + + h *BasicHandle + bw *bufio.Writer + as encDriverAsis + + // ---- cpu cache line boundary? + + // ---- cpu cache line boundary? + encWriterSwitch + err error + + // ---- cpu cache line boundary? + codecFnPooler + ci set + js bool // here, so that no need to piggy back on *codecFner for this + be bool // here, so that no need to piggy back on *codecFner for this + _ [6]byte // padding + + // ---- writable fields during execution --- *try* to keep in sep cache line + + // ---- cpu cache line boundary? + // b [scratchByteArrayLen]byte + // _ [cacheLineSize - scratchByteArrayLen]byte // padding + b [cacheLineSize - 0]byte // used for encoding a chan or (non-addressable) array of bytes +} + +// NewEncoder returns an Encoder for encoding into an io.Writer. +// +// For efficiency, Users are encouraged to pass in a memory buffered writer +// (eg bufio.Writer, bytes.Buffer). +func NewEncoder(w io.Writer, h Handle) *Encoder { + e := newEncoder(h) + e.Reset(w) + return e +} + +// NewEncoderBytes returns an encoder for encoding directly and efficiently +// into a byte slice, using zero-copying to temporary slices. +// +// It will potentially replace the output byte slice pointed to. +// After encoding, the out parameter contains the encoded contents. +func NewEncoderBytes(out *[]byte, h Handle) *Encoder { + e := newEncoder(h) + e.ResetBytes(out) + return e +} + +func newEncoder(h Handle) *Encoder { + e := &Encoder{h: h.getBasicHandle(), err: errEncoderNotInitialized} + e.hh = h + e.esep = h.hasElemSeparators() + return e +} + +func (e *Encoder) resetCommon() { + if e.e == nil || e.hh.recreateEncDriver(e.e) { + e.e = e.hh.newEncDriver(e) + e.as, e.isas = e.e.(encDriverAsis) + // e.cr, _ = e.e.(containerStateRecv) + } + e.be = e.hh.isBinary() + _, e.js = e.hh.(*JsonHandle) + e.e.reset() + e.err = nil +} + +// Reset resets the Encoder with a new output stream. +// +// This accommodates using the state of the Encoder, +// where it has "cached" information about sub-engines. +func (e *Encoder) Reset(w io.Writer) { + if w == nil { + return + } + if e.wi == nil { + e.wi = new(ioEncWriter) + } + var ok bool + e.wx = false + e.wi.w = w + if e.h.WriterBufferSize > 0 { + e.bw = bufio.NewWriterSize(w, e.h.WriterBufferSize) + e.wi.bw = e.bw + e.wi.sw = e.bw + e.wi.fw = e.bw + e.wi.ww = e.bw + } else { + if e.wi.bw, ok = w.(io.ByteWriter); !ok { + e.wi.bw = e.wi + } + if e.wi.sw, ok = w.(ioEncStringWriter); !ok { + e.wi.sw = e.wi + } + e.wi.fw, _ = w.(ioFlusher) + e.wi.ww = w + } + e.w = e.wi + e.resetCommon() +} + +// ResetBytes resets the Encoder with a new destination output []byte. +func (e *Encoder) ResetBytes(out *[]byte) { + if out == nil { + return + } + var in []byte + if out != nil { + in = *out + } + if in == nil { + in = make([]byte, defEncByteBufSize) + } + e.wx = true + e.wb.reset(in, out) + e.w = &e.wb + e.resetCommon() +} + +// Encode writes an object into a stream. +// +// Encoding can be configured via the struct tag for the fields. +// The key (in the struct tags) that we look at is configurable. +// +// By default, we look up the "codec" key in the struct field's tags, +// and fall bak to the "json" key if "codec" is absent. +// That key in struct field's tag value is the key name, +// followed by an optional comma and options. +// +// To set an option on all fields (e.g. omitempty on all fields), you +// can create a field called _struct, and set flags on it. The options +// which can be set on _struct are: +// - omitempty: so all fields are omitted if empty +// - toarray: so struct is encoded as an array +// - int: so struct key names are encoded as signed integers (instead of strings) +// - uint: so struct key names are encoded as unsigned integers (instead of strings) +// - float: so struct key names are encoded as floats (instead of strings) +// More details on these below. +// +// Struct values "usually" encode as maps. Each exported struct field is encoded unless: +// - the field's tag is "-", OR +// - the field is empty (empty or the zero value) and its tag specifies the "omitempty" option. +// +// When encoding as a map, the first string in the tag (before the comma) +// is the map key string to use when encoding. +// ... +// This key is typically encoded as a string. +// However, there are instances where the encoded stream has mapping keys encoded as numbers. +// For example, some cbor streams have keys as integer codes in the stream, but they should map +// to fields in a structured object. Consequently, a struct is the natural representation in code. +// For these, configure the struct to encode/decode the keys as numbers (instead of string). +// This is done with the int,uint or float option on the _struct field (see above). +// +// However, struct values may encode as arrays. This happens when: +// - StructToArray Encode option is set, OR +// - the tag on the _struct field sets the "toarray" option +// Note that omitempty is ignored when encoding struct values as arrays, +// as an entry must be encoded for each field, to maintain its position. +// +// Values with types that implement MapBySlice are encoded as stream maps. +// +// The empty values (for omitempty option) are false, 0, any nil pointer +// or interface value, and any array, slice, map, or string of length zero. +// +// Anonymous fields are encoded inline except: +// - the struct tag specifies a replacement name (first value) +// - the field is of an interface type +// +// Examples: +// +// // NOTE: 'json:' can be used as struct tag key, in place 'codec:' below. +// type MyStruct struct { +// _struct bool `codec:",omitempty"` //set omitempty for every field +// Field1 string `codec:"-"` //skip this field +// Field2 int `codec:"myName"` //Use key "myName" in encode stream +// Field3 int32 `codec:",omitempty"` //use key "Field3". Omit if empty. +// Field4 bool `codec:"f4,omitempty"` //use key "f4". Omit if empty. +// io.Reader //use key "Reader". +// MyStruct `codec:"my1" //use key "my1". +// MyStruct //inline it +// ... +// } +// +// type MyStruct struct { +// _struct bool `codec:",toarray"` //encode struct as an array +// } +// +// type MyStruct struct { +// _struct bool `codec:",uint"` //encode struct with "unsigned integer" keys +// Field1 string `codec:"1"` //encode Field1 key using: EncodeInt(1) +// Field2 string `codec:"2"` //encode Field2 key using: EncodeInt(2) +// } +// +// The mode of encoding is based on the type of the value. When a value is seen: +// - If a Selfer, call its CodecEncodeSelf method +// - If an extension is registered for it, call that extension function +// - If implements encoding.(Binary|Text|JSON)Marshaler, call Marshal(Binary|Text|JSON) method +// - Else encode it based on its reflect.Kind +// +// Note that struct field names and keys in map[string]XXX will be treated as symbols. +// Some formats support symbols (e.g. binc) and will properly encode the string +// only once in the stream, and use a tag to refer to it thereafter. +func (e *Encoder) Encode(v interface{}) (err error) { + defer e.deferred(&err) + e.MustEncode(v) + return +} + +// MustEncode is like Encode, but panics if unable to Encode. +// This provides insight to the code location that triggered the error. +func (e *Encoder) MustEncode(v interface{}) { + if e.err != nil { + panic(e.err) + } + e.encode(v) + e.e.atEndOfEncode() + e.w.atEndOfEncode() + e.alwaysAtEnd() +} + +func (e *Encoder) deferred(err1 *error) { + e.alwaysAtEnd() + if recoverPanicToErr { + if x := recover(); x != nil { + panicValToErr(e, x, err1) + panicValToErr(e, x, &e.err) + } + } +} + +// func (e *Encoder) alwaysAtEnd() { +// e.codecFnPooler.alwaysAtEnd() +// } + +func (e *Encoder) encode(iv interface{}) { + if iv == nil || definitelyNil(iv) { + e.e.EncodeNil() + return + } + if v, ok := iv.(Selfer); ok { + v.CodecEncodeSelf(e) + return + } + + // a switch with only concrete types can be optimized. + // consequently, we deal with nil and interfaces outside. + + switch v := iv.(type) { + case Raw: + e.rawBytes(v) + case reflect.Value: + e.encodeValue(v, nil, true) + + case string: + e.e.EncodeString(cUTF8, v) + case bool: + e.e.EncodeBool(v) + case int: + e.e.EncodeInt(int64(v)) + case int8: + e.e.EncodeInt(int64(v)) + case int16: + e.e.EncodeInt(int64(v)) + case int32: + e.e.EncodeInt(int64(v)) + case int64: + e.e.EncodeInt(v) + case uint: + e.e.EncodeUint(uint64(v)) + case uint8: + e.e.EncodeUint(uint64(v)) + case uint16: + e.e.EncodeUint(uint64(v)) + case uint32: + e.e.EncodeUint(uint64(v)) + case uint64: + e.e.EncodeUint(v) + case uintptr: + e.e.EncodeUint(uint64(v)) + case float32: + e.e.EncodeFloat32(v) + case float64: + e.e.EncodeFloat64(v) + case time.Time: + e.e.EncodeTime(v) + case []uint8: + e.e.EncodeStringBytes(cRAW, v) + + case *Raw: + e.rawBytes(*v) + + case *string: + e.e.EncodeString(cUTF8, *v) + case *bool: + e.e.EncodeBool(*v) + case *int: + e.e.EncodeInt(int64(*v)) + case *int8: + e.e.EncodeInt(int64(*v)) + case *int16: + e.e.EncodeInt(int64(*v)) + case *int32: + e.e.EncodeInt(int64(*v)) + case *int64: + e.e.EncodeInt(*v) + case *uint: + e.e.EncodeUint(uint64(*v)) + case *uint8: + e.e.EncodeUint(uint64(*v)) + case *uint16: + e.e.EncodeUint(uint64(*v)) + case *uint32: + e.e.EncodeUint(uint64(*v)) + case *uint64: + e.e.EncodeUint(*v) + case *uintptr: + e.e.EncodeUint(uint64(*v)) + case *float32: + e.e.EncodeFloat32(*v) + case *float64: + e.e.EncodeFloat64(*v) + case *time.Time: + e.e.EncodeTime(*v) + + case *[]uint8: + e.e.EncodeStringBytes(cRAW, *v) + + default: + if !fastpathEncodeTypeSwitch(iv, e) { + // checkfastpath=true (not false), as underlying slice/map type may be fast-path + e.encodeValue(reflect.ValueOf(iv), nil, true) + } + } +} + +func (e *Encoder) encodeValue(rv reflect.Value, fn *codecFn, checkFastpath bool) { + // if a valid fn is passed, it MUST BE for the dereferenced type of rv + var sptr uintptr + var rvp reflect.Value + var rvpValid bool +TOP: + switch rv.Kind() { + case reflect.Ptr: + if rv.IsNil() { + e.e.EncodeNil() + return + } + rvpValid = true + rvp = rv + rv = rv.Elem() + if e.h.CheckCircularRef && rv.Kind() == reflect.Struct { + // TODO: Movable pointers will be an issue here. Future problem. + sptr = rv.UnsafeAddr() + break TOP + } + goto TOP + case reflect.Interface: + if rv.IsNil() { + e.e.EncodeNil() + return + } + rv = rv.Elem() + goto TOP + case reflect.Slice, reflect.Map: + if rv.IsNil() { + e.e.EncodeNil() + return + } + case reflect.Invalid, reflect.Func: + e.e.EncodeNil() + return + } + + if sptr != 0 && (&e.ci).add(sptr) { + e.errorf("circular reference found: # %d", sptr) + } + + if fn == nil { + rt := rv.Type() + // always pass checkCodecSelfer=true, in case T or ****T is passed, where *T is a Selfer + fn = e.cfer().get(rt, checkFastpath, true) + } + if fn.i.addrE { + if rvpValid { + fn.fe(e, &fn.i, rvp) + } else if rv.CanAddr() { + fn.fe(e, &fn.i, rv.Addr()) + } else { + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + fn.fe(e, &fn.i, rv2) + } + } else { + fn.fe(e, &fn.i, rv) + } + if sptr != 0 { + (&e.ci).remove(sptr) + } +} + +func (e *Encoder) marshal(bs []byte, fnerr error, asis bool, c charEncoding) { + if fnerr != nil { + panic(fnerr) + } + if bs == nil { + e.e.EncodeNil() + } else if asis { + e.asis(bs) + } else { + e.e.EncodeStringBytes(c, bs) + } +} + +func (e *Encoder) asis(v []byte) { + if e.isas { + e.as.EncodeAsis(v) + } else { + e.w.writeb(v) + } +} + +func (e *Encoder) rawBytes(vv Raw) { + v := []byte(vv) + if !e.h.Raw { + e.errorf("Raw values cannot be encoded: %v", v) + } + e.asis(v) +} + +func (e *Encoder) wrapErrstr(v interface{}, err *error) { + *err = fmt.Errorf("%s encode error: %v", e.hh.Name(), v) +} diff --git a/vendor/github.com/ugorji/go/codec/fast-path.generated.go b/vendor/github.com/ugorji/go/codec/fast-path.generated.go new file mode 100644 index 0000000000..87f2562f65 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/fast-path.generated.go @@ -0,0 +1,34522 @@ +// +build !notfastpath + +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// Code generated from fast-path.go.tmpl - DO NOT EDIT. + +package codec + +// Fast path functions try to create a fast path encode or decode implementation +// for common maps and slices. +// +// We define the functions and register then in this single file +// so as not to pollute the encode.go and decode.go, and create a dependency in there. +// This file can be omitted without causing a build failure. +// +// The advantage of fast paths is: +// - Many calls bypass reflection altogether +// +// Currently support +// - slice of all builtin types, +// - map of all builtin types to string or interface value +// - symmetrical maps of all builtin types (e.g. str-str, uint8-uint8) +// This should provide adequate "typical" implementations. +// +// Note that fast track decode functions must handle values for which an address cannot be obtained. +// For example: +// m2 := map[string]int{} +// p2 := []interface{}{m2} +// // decoding into p2 will bomb if fast track functions do not treat like unaddressable. +// + +import ( + "reflect" + "sort" +) + +const fastpathEnabled = true + +type fastpathT struct{} + +var fastpathTV fastpathT + +type fastpathE struct { + rtid uintptr + rt reflect.Type + encfn func(*Encoder, *codecFnInfo, reflect.Value) + decfn func(*Decoder, *codecFnInfo, reflect.Value) +} + +type fastpathA [271]fastpathE + +func (x *fastpathA) index(rtid uintptr) int { + // use binary search to grab the index (adapted from sort/search.go) + h, i, j := 0, 0, 271 // len(x) + for i < j { + h = i + (j-i)/2 + if x[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + } + if i < 271 && x[i].rtid == rtid { + return i + } + return -1 +} + +type fastpathAslice []fastpathE + +func (x fastpathAslice) Len() int { return len(x) } +func (x fastpathAslice) Less(i, j int) bool { return x[i].rtid < x[j].rtid } +func (x fastpathAslice) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +var fastpathAV fastpathA + +// due to possible initialization loop error, make fastpath in an init() +func init() { + i := 0 + fn := func(v interface{}, + fe func(*Encoder, *codecFnInfo, reflect.Value), + fd func(*Decoder, *codecFnInfo, reflect.Value)) (f fastpathE) { + xrt := reflect.TypeOf(v) + xptr := rt2id(xrt) + fastpathAV[i] = fastpathE{xptr, xrt, fe, fd} + i++ + return + } + + fn([]interface{}(nil), (*Encoder).fastpathEncSliceIntfR, (*Decoder).fastpathDecSliceIntfR) + fn([]string(nil), (*Encoder).fastpathEncSliceStringR, (*Decoder).fastpathDecSliceStringR) + fn([]float32(nil), (*Encoder).fastpathEncSliceFloat32R, (*Decoder).fastpathDecSliceFloat32R) + fn([]float64(nil), (*Encoder).fastpathEncSliceFloat64R, (*Decoder).fastpathDecSliceFloat64R) + fn([]uint(nil), (*Encoder).fastpathEncSliceUintR, (*Decoder).fastpathDecSliceUintR) + fn([]uint16(nil), (*Encoder).fastpathEncSliceUint16R, (*Decoder).fastpathDecSliceUint16R) + fn([]uint32(nil), (*Encoder).fastpathEncSliceUint32R, (*Decoder).fastpathDecSliceUint32R) + fn([]uint64(nil), (*Encoder).fastpathEncSliceUint64R, (*Decoder).fastpathDecSliceUint64R) + fn([]uintptr(nil), (*Encoder).fastpathEncSliceUintptrR, (*Decoder).fastpathDecSliceUintptrR) + fn([]int(nil), (*Encoder).fastpathEncSliceIntR, (*Decoder).fastpathDecSliceIntR) + fn([]int8(nil), (*Encoder).fastpathEncSliceInt8R, (*Decoder).fastpathDecSliceInt8R) + fn([]int16(nil), (*Encoder).fastpathEncSliceInt16R, (*Decoder).fastpathDecSliceInt16R) + fn([]int32(nil), (*Encoder).fastpathEncSliceInt32R, (*Decoder).fastpathDecSliceInt32R) + fn([]int64(nil), (*Encoder).fastpathEncSliceInt64R, (*Decoder).fastpathDecSliceInt64R) + fn([]bool(nil), (*Encoder).fastpathEncSliceBoolR, (*Decoder).fastpathDecSliceBoolR) + + fn(map[interface{}]interface{}(nil), (*Encoder).fastpathEncMapIntfIntfR, (*Decoder).fastpathDecMapIntfIntfR) + fn(map[interface{}]string(nil), (*Encoder).fastpathEncMapIntfStringR, (*Decoder).fastpathDecMapIntfStringR) + fn(map[interface{}]uint(nil), (*Encoder).fastpathEncMapIntfUintR, (*Decoder).fastpathDecMapIntfUintR) + fn(map[interface{}]uint8(nil), (*Encoder).fastpathEncMapIntfUint8R, (*Decoder).fastpathDecMapIntfUint8R) + fn(map[interface{}]uint16(nil), (*Encoder).fastpathEncMapIntfUint16R, (*Decoder).fastpathDecMapIntfUint16R) + fn(map[interface{}]uint32(nil), (*Encoder).fastpathEncMapIntfUint32R, (*Decoder).fastpathDecMapIntfUint32R) + fn(map[interface{}]uint64(nil), (*Encoder).fastpathEncMapIntfUint64R, (*Decoder).fastpathDecMapIntfUint64R) + fn(map[interface{}]uintptr(nil), (*Encoder).fastpathEncMapIntfUintptrR, (*Decoder).fastpathDecMapIntfUintptrR) + fn(map[interface{}]int(nil), (*Encoder).fastpathEncMapIntfIntR, (*Decoder).fastpathDecMapIntfIntR) + fn(map[interface{}]int8(nil), (*Encoder).fastpathEncMapIntfInt8R, (*Decoder).fastpathDecMapIntfInt8R) + fn(map[interface{}]int16(nil), (*Encoder).fastpathEncMapIntfInt16R, (*Decoder).fastpathDecMapIntfInt16R) + fn(map[interface{}]int32(nil), (*Encoder).fastpathEncMapIntfInt32R, (*Decoder).fastpathDecMapIntfInt32R) + fn(map[interface{}]int64(nil), (*Encoder).fastpathEncMapIntfInt64R, (*Decoder).fastpathDecMapIntfInt64R) + fn(map[interface{}]float32(nil), (*Encoder).fastpathEncMapIntfFloat32R, (*Decoder).fastpathDecMapIntfFloat32R) + fn(map[interface{}]float64(nil), (*Encoder).fastpathEncMapIntfFloat64R, (*Decoder).fastpathDecMapIntfFloat64R) + fn(map[interface{}]bool(nil), (*Encoder).fastpathEncMapIntfBoolR, (*Decoder).fastpathDecMapIntfBoolR) + fn(map[string]interface{}(nil), (*Encoder).fastpathEncMapStringIntfR, (*Decoder).fastpathDecMapStringIntfR) + fn(map[string]string(nil), (*Encoder).fastpathEncMapStringStringR, (*Decoder).fastpathDecMapStringStringR) + fn(map[string]uint(nil), (*Encoder).fastpathEncMapStringUintR, (*Decoder).fastpathDecMapStringUintR) + fn(map[string]uint8(nil), (*Encoder).fastpathEncMapStringUint8R, (*Decoder).fastpathDecMapStringUint8R) + fn(map[string]uint16(nil), (*Encoder).fastpathEncMapStringUint16R, (*Decoder).fastpathDecMapStringUint16R) + fn(map[string]uint32(nil), (*Encoder).fastpathEncMapStringUint32R, (*Decoder).fastpathDecMapStringUint32R) + fn(map[string]uint64(nil), (*Encoder).fastpathEncMapStringUint64R, (*Decoder).fastpathDecMapStringUint64R) + fn(map[string]uintptr(nil), (*Encoder).fastpathEncMapStringUintptrR, (*Decoder).fastpathDecMapStringUintptrR) + fn(map[string]int(nil), (*Encoder).fastpathEncMapStringIntR, (*Decoder).fastpathDecMapStringIntR) + fn(map[string]int8(nil), (*Encoder).fastpathEncMapStringInt8R, (*Decoder).fastpathDecMapStringInt8R) + fn(map[string]int16(nil), (*Encoder).fastpathEncMapStringInt16R, (*Decoder).fastpathDecMapStringInt16R) + fn(map[string]int32(nil), (*Encoder).fastpathEncMapStringInt32R, (*Decoder).fastpathDecMapStringInt32R) + fn(map[string]int64(nil), (*Encoder).fastpathEncMapStringInt64R, (*Decoder).fastpathDecMapStringInt64R) + fn(map[string]float32(nil), (*Encoder).fastpathEncMapStringFloat32R, (*Decoder).fastpathDecMapStringFloat32R) + fn(map[string]float64(nil), (*Encoder).fastpathEncMapStringFloat64R, (*Decoder).fastpathDecMapStringFloat64R) + fn(map[string]bool(nil), (*Encoder).fastpathEncMapStringBoolR, (*Decoder).fastpathDecMapStringBoolR) + fn(map[float32]interface{}(nil), (*Encoder).fastpathEncMapFloat32IntfR, (*Decoder).fastpathDecMapFloat32IntfR) + fn(map[float32]string(nil), (*Encoder).fastpathEncMapFloat32StringR, (*Decoder).fastpathDecMapFloat32StringR) + fn(map[float32]uint(nil), (*Encoder).fastpathEncMapFloat32UintR, (*Decoder).fastpathDecMapFloat32UintR) + fn(map[float32]uint8(nil), (*Encoder).fastpathEncMapFloat32Uint8R, (*Decoder).fastpathDecMapFloat32Uint8R) + fn(map[float32]uint16(nil), (*Encoder).fastpathEncMapFloat32Uint16R, (*Decoder).fastpathDecMapFloat32Uint16R) + fn(map[float32]uint32(nil), (*Encoder).fastpathEncMapFloat32Uint32R, (*Decoder).fastpathDecMapFloat32Uint32R) + fn(map[float32]uint64(nil), (*Encoder).fastpathEncMapFloat32Uint64R, (*Decoder).fastpathDecMapFloat32Uint64R) + fn(map[float32]uintptr(nil), (*Encoder).fastpathEncMapFloat32UintptrR, (*Decoder).fastpathDecMapFloat32UintptrR) + fn(map[float32]int(nil), (*Encoder).fastpathEncMapFloat32IntR, (*Decoder).fastpathDecMapFloat32IntR) + fn(map[float32]int8(nil), (*Encoder).fastpathEncMapFloat32Int8R, (*Decoder).fastpathDecMapFloat32Int8R) + fn(map[float32]int16(nil), (*Encoder).fastpathEncMapFloat32Int16R, (*Decoder).fastpathDecMapFloat32Int16R) + fn(map[float32]int32(nil), (*Encoder).fastpathEncMapFloat32Int32R, (*Decoder).fastpathDecMapFloat32Int32R) + fn(map[float32]int64(nil), (*Encoder).fastpathEncMapFloat32Int64R, (*Decoder).fastpathDecMapFloat32Int64R) + fn(map[float32]float32(nil), (*Encoder).fastpathEncMapFloat32Float32R, (*Decoder).fastpathDecMapFloat32Float32R) + fn(map[float32]float64(nil), (*Encoder).fastpathEncMapFloat32Float64R, (*Decoder).fastpathDecMapFloat32Float64R) + fn(map[float32]bool(nil), (*Encoder).fastpathEncMapFloat32BoolR, (*Decoder).fastpathDecMapFloat32BoolR) + fn(map[float64]interface{}(nil), (*Encoder).fastpathEncMapFloat64IntfR, (*Decoder).fastpathDecMapFloat64IntfR) + fn(map[float64]string(nil), (*Encoder).fastpathEncMapFloat64StringR, (*Decoder).fastpathDecMapFloat64StringR) + fn(map[float64]uint(nil), (*Encoder).fastpathEncMapFloat64UintR, (*Decoder).fastpathDecMapFloat64UintR) + fn(map[float64]uint8(nil), (*Encoder).fastpathEncMapFloat64Uint8R, (*Decoder).fastpathDecMapFloat64Uint8R) + fn(map[float64]uint16(nil), (*Encoder).fastpathEncMapFloat64Uint16R, (*Decoder).fastpathDecMapFloat64Uint16R) + fn(map[float64]uint32(nil), (*Encoder).fastpathEncMapFloat64Uint32R, (*Decoder).fastpathDecMapFloat64Uint32R) + fn(map[float64]uint64(nil), (*Encoder).fastpathEncMapFloat64Uint64R, (*Decoder).fastpathDecMapFloat64Uint64R) + fn(map[float64]uintptr(nil), (*Encoder).fastpathEncMapFloat64UintptrR, (*Decoder).fastpathDecMapFloat64UintptrR) + fn(map[float64]int(nil), (*Encoder).fastpathEncMapFloat64IntR, (*Decoder).fastpathDecMapFloat64IntR) + fn(map[float64]int8(nil), (*Encoder).fastpathEncMapFloat64Int8R, (*Decoder).fastpathDecMapFloat64Int8R) + fn(map[float64]int16(nil), (*Encoder).fastpathEncMapFloat64Int16R, (*Decoder).fastpathDecMapFloat64Int16R) + fn(map[float64]int32(nil), (*Encoder).fastpathEncMapFloat64Int32R, (*Decoder).fastpathDecMapFloat64Int32R) + fn(map[float64]int64(nil), (*Encoder).fastpathEncMapFloat64Int64R, (*Decoder).fastpathDecMapFloat64Int64R) + fn(map[float64]float32(nil), (*Encoder).fastpathEncMapFloat64Float32R, (*Decoder).fastpathDecMapFloat64Float32R) + fn(map[float64]float64(nil), (*Encoder).fastpathEncMapFloat64Float64R, (*Decoder).fastpathDecMapFloat64Float64R) + fn(map[float64]bool(nil), (*Encoder).fastpathEncMapFloat64BoolR, (*Decoder).fastpathDecMapFloat64BoolR) + fn(map[uint]interface{}(nil), (*Encoder).fastpathEncMapUintIntfR, (*Decoder).fastpathDecMapUintIntfR) + fn(map[uint]string(nil), (*Encoder).fastpathEncMapUintStringR, (*Decoder).fastpathDecMapUintStringR) + fn(map[uint]uint(nil), (*Encoder).fastpathEncMapUintUintR, (*Decoder).fastpathDecMapUintUintR) + fn(map[uint]uint8(nil), (*Encoder).fastpathEncMapUintUint8R, (*Decoder).fastpathDecMapUintUint8R) + fn(map[uint]uint16(nil), (*Encoder).fastpathEncMapUintUint16R, (*Decoder).fastpathDecMapUintUint16R) + fn(map[uint]uint32(nil), (*Encoder).fastpathEncMapUintUint32R, (*Decoder).fastpathDecMapUintUint32R) + fn(map[uint]uint64(nil), (*Encoder).fastpathEncMapUintUint64R, (*Decoder).fastpathDecMapUintUint64R) + fn(map[uint]uintptr(nil), (*Encoder).fastpathEncMapUintUintptrR, (*Decoder).fastpathDecMapUintUintptrR) + fn(map[uint]int(nil), (*Encoder).fastpathEncMapUintIntR, (*Decoder).fastpathDecMapUintIntR) + fn(map[uint]int8(nil), (*Encoder).fastpathEncMapUintInt8R, (*Decoder).fastpathDecMapUintInt8R) + fn(map[uint]int16(nil), (*Encoder).fastpathEncMapUintInt16R, (*Decoder).fastpathDecMapUintInt16R) + fn(map[uint]int32(nil), (*Encoder).fastpathEncMapUintInt32R, (*Decoder).fastpathDecMapUintInt32R) + fn(map[uint]int64(nil), (*Encoder).fastpathEncMapUintInt64R, (*Decoder).fastpathDecMapUintInt64R) + fn(map[uint]float32(nil), (*Encoder).fastpathEncMapUintFloat32R, (*Decoder).fastpathDecMapUintFloat32R) + fn(map[uint]float64(nil), (*Encoder).fastpathEncMapUintFloat64R, (*Decoder).fastpathDecMapUintFloat64R) + fn(map[uint]bool(nil), (*Encoder).fastpathEncMapUintBoolR, (*Decoder).fastpathDecMapUintBoolR) + fn(map[uint8]interface{}(nil), (*Encoder).fastpathEncMapUint8IntfR, (*Decoder).fastpathDecMapUint8IntfR) + fn(map[uint8]string(nil), (*Encoder).fastpathEncMapUint8StringR, (*Decoder).fastpathDecMapUint8StringR) + fn(map[uint8]uint(nil), (*Encoder).fastpathEncMapUint8UintR, (*Decoder).fastpathDecMapUint8UintR) + fn(map[uint8]uint8(nil), (*Encoder).fastpathEncMapUint8Uint8R, (*Decoder).fastpathDecMapUint8Uint8R) + fn(map[uint8]uint16(nil), (*Encoder).fastpathEncMapUint8Uint16R, (*Decoder).fastpathDecMapUint8Uint16R) + fn(map[uint8]uint32(nil), (*Encoder).fastpathEncMapUint8Uint32R, (*Decoder).fastpathDecMapUint8Uint32R) + fn(map[uint8]uint64(nil), (*Encoder).fastpathEncMapUint8Uint64R, (*Decoder).fastpathDecMapUint8Uint64R) + fn(map[uint8]uintptr(nil), (*Encoder).fastpathEncMapUint8UintptrR, (*Decoder).fastpathDecMapUint8UintptrR) + fn(map[uint8]int(nil), (*Encoder).fastpathEncMapUint8IntR, (*Decoder).fastpathDecMapUint8IntR) + fn(map[uint8]int8(nil), (*Encoder).fastpathEncMapUint8Int8R, (*Decoder).fastpathDecMapUint8Int8R) + fn(map[uint8]int16(nil), (*Encoder).fastpathEncMapUint8Int16R, (*Decoder).fastpathDecMapUint8Int16R) + fn(map[uint8]int32(nil), (*Encoder).fastpathEncMapUint8Int32R, (*Decoder).fastpathDecMapUint8Int32R) + fn(map[uint8]int64(nil), (*Encoder).fastpathEncMapUint8Int64R, (*Decoder).fastpathDecMapUint8Int64R) + fn(map[uint8]float32(nil), (*Encoder).fastpathEncMapUint8Float32R, (*Decoder).fastpathDecMapUint8Float32R) + fn(map[uint8]float64(nil), (*Encoder).fastpathEncMapUint8Float64R, (*Decoder).fastpathDecMapUint8Float64R) + fn(map[uint8]bool(nil), (*Encoder).fastpathEncMapUint8BoolR, (*Decoder).fastpathDecMapUint8BoolR) + fn(map[uint16]interface{}(nil), (*Encoder).fastpathEncMapUint16IntfR, (*Decoder).fastpathDecMapUint16IntfR) + fn(map[uint16]string(nil), (*Encoder).fastpathEncMapUint16StringR, (*Decoder).fastpathDecMapUint16StringR) + fn(map[uint16]uint(nil), (*Encoder).fastpathEncMapUint16UintR, (*Decoder).fastpathDecMapUint16UintR) + fn(map[uint16]uint8(nil), (*Encoder).fastpathEncMapUint16Uint8R, (*Decoder).fastpathDecMapUint16Uint8R) + fn(map[uint16]uint16(nil), (*Encoder).fastpathEncMapUint16Uint16R, (*Decoder).fastpathDecMapUint16Uint16R) + fn(map[uint16]uint32(nil), (*Encoder).fastpathEncMapUint16Uint32R, (*Decoder).fastpathDecMapUint16Uint32R) + fn(map[uint16]uint64(nil), (*Encoder).fastpathEncMapUint16Uint64R, (*Decoder).fastpathDecMapUint16Uint64R) + fn(map[uint16]uintptr(nil), (*Encoder).fastpathEncMapUint16UintptrR, (*Decoder).fastpathDecMapUint16UintptrR) + fn(map[uint16]int(nil), (*Encoder).fastpathEncMapUint16IntR, (*Decoder).fastpathDecMapUint16IntR) + fn(map[uint16]int8(nil), (*Encoder).fastpathEncMapUint16Int8R, (*Decoder).fastpathDecMapUint16Int8R) + fn(map[uint16]int16(nil), (*Encoder).fastpathEncMapUint16Int16R, (*Decoder).fastpathDecMapUint16Int16R) + fn(map[uint16]int32(nil), (*Encoder).fastpathEncMapUint16Int32R, (*Decoder).fastpathDecMapUint16Int32R) + fn(map[uint16]int64(nil), (*Encoder).fastpathEncMapUint16Int64R, (*Decoder).fastpathDecMapUint16Int64R) + fn(map[uint16]float32(nil), (*Encoder).fastpathEncMapUint16Float32R, (*Decoder).fastpathDecMapUint16Float32R) + fn(map[uint16]float64(nil), (*Encoder).fastpathEncMapUint16Float64R, (*Decoder).fastpathDecMapUint16Float64R) + fn(map[uint16]bool(nil), (*Encoder).fastpathEncMapUint16BoolR, (*Decoder).fastpathDecMapUint16BoolR) + fn(map[uint32]interface{}(nil), (*Encoder).fastpathEncMapUint32IntfR, (*Decoder).fastpathDecMapUint32IntfR) + fn(map[uint32]string(nil), (*Encoder).fastpathEncMapUint32StringR, (*Decoder).fastpathDecMapUint32StringR) + fn(map[uint32]uint(nil), (*Encoder).fastpathEncMapUint32UintR, (*Decoder).fastpathDecMapUint32UintR) + fn(map[uint32]uint8(nil), (*Encoder).fastpathEncMapUint32Uint8R, (*Decoder).fastpathDecMapUint32Uint8R) + fn(map[uint32]uint16(nil), (*Encoder).fastpathEncMapUint32Uint16R, (*Decoder).fastpathDecMapUint32Uint16R) + fn(map[uint32]uint32(nil), (*Encoder).fastpathEncMapUint32Uint32R, (*Decoder).fastpathDecMapUint32Uint32R) + fn(map[uint32]uint64(nil), (*Encoder).fastpathEncMapUint32Uint64R, (*Decoder).fastpathDecMapUint32Uint64R) + fn(map[uint32]uintptr(nil), (*Encoder).fastpathEncMapUint32UintptrR, (*Decoder).fastpathDecMapUint32UintptrR) + fn(map[uint32]int(nil), (*Encoder).fastpathEncMapUint32IntR, (*Decoder).fastpathDecMapUint32IntR) + fn(map[uint32]int8(nil), (*Encoder).fastpathEncMapUint32Int8R, (*Decoder).fastpathDecMapUint32Int8R) + fn(map[uint32]int16(nil), (*Encoder).fastpathEncMapUint32Int16R, (*Decoder).fastpathDecMapUint32Int16R) + fn(map[uint32]int32(nil), (*Encoder).fastpathEncMapUint32Int32R, (*Decoder).fastpathDecMapUint32Int32R) + fn(map[uint32]int64(nil), (*Encoder).fastpathEncMapUint32Int64R, (*Decoder).fastpathDecMapUint32Int64R) + fn(map[uint32]float32(nil), (*Encoder).fastpathEncMapUint32Float32R, (*Decoder).fastpathDecMapUint32Float32R) + fn(map[uint32]float64(nil), (*Encoder).fastpathEncMapUint32Float64R, (*Decoder).fastpathDecMapUint32Float64R) + fn(map[uint32]bool(nil), (*Encoder).fastpathEncMapUint32BoolR, (*Decoder).fastpathDecMapUint32BoolR) + fn(map[uint64]interface{}(nil), (*Encoder).fastpathEncMapUint64IntfR, (*Decoder).fastpathDecMapUint64IntfR) + fn(map[uint64]string(nil), (*Encoder).fastpathEncMapUint64StringR, (*Decoder).fastpathDecMapUint64StringR) + fn(map[uint64]uint(nil), (*Encoder).fastpathEncMapUint64UintR, (*Decoder).fastpathDecMapUint64UintR) + fn(map[uint64]uint8(nil), (*Encoder).fastpathEncMapUint64Uint8R, (*Decoder).fastpathDecMapUint64Uint8R) + fn(map[uint64]uint16(nil), (*Encoder).fastpathEncMapUint64Uint16R, (*Decoder).fastpathDecMapUint64Uint16R) + fn(map[uint64]uint32(nil), (*Encoder).fastpathEncMapUint64Uint32R, (*Decoder).fastpathDecMapUint64Uint32R) + fn(map[uint64]uint64(nil), (*Encoder).fastpathEncMapUint64Uint64R, (*Decoder).fastpathDecMapUint64Uint64R) + fn(map[uint64]uintptr(nil), (*Encoder).fastpathEncMapUint64UintptrR, (*Decoder).fastpathDecMapUint64UintptrR) + fn(map[uint64]int(nil), (*Encoder).fastpathEncMapUint64IntR, (*Decoder).fastpathDecMapUint64IntR) + fn(map[uint64]int8(nil), (*Encoder).fastpathEncMapUint64Int8R, (*Decoder).fastpathDecMapUint64Int8R) + fn(map[uint64]int16(nil), (*Encoder).fastpathEncMapUint64Int16R, (*Decoder).fastpathDecMapUint64Int16R) + fn(map[uint64]int32(nil), (*Encoder).fastpathEncMapUint64Int32R, (*Decoder).fastpathDecMapUint64Int32R) + fn(map[uint64]int64(nil), (*Encoder).fastpathEncMapUint64Int64R, (*Decoder).fastpathDecMapUint64Int64R) + fn(map[uint64]float32(nil), (*Encoder).fastpathEncMapUint64Float32R, (*Decoder).fastpathDecMapUint64Float32R) + fn(map[uint64]float64(nil), (*Encoder).fastpathEncMapUint64Float64R, (*Decoder).fastpathDecMapUint64Float64R) + fn(map[uint64]bool(nil), (*Encoder).fastpathEncMapUint64BoolR, (*Decoder).fastpathDecMapUint64BoolR) + fn(map[uintptr]interface{}(nil), (*Encoder).fastpathEncMapUintptrIntfR, (*Decoder).fastpathDecMapUintptrIntfR) + fn(map[uintptr]string(nil), (*Encoder).fastpathEncMapUintptrStringR, (*Decoder).fastpathDecMapUintptrStringR) + fn(map[uintptr]uint(nil), (*Encoder).fastpathEncMapUintptrUintR, (*Decoder).fastpathDecMapUintptrUintR) + fn(map[uintptr]uint8(nil), (*Encoder).fastpathEncMapUintptrUint8R, (*Decoder).fastpathDecMapUintptrUint8R) + fn(map[uintptr]uint16(nil), (*Encoder).fastpathEncMapUintptrUint16R, (*Decoder).fastpathDecMapUintptrUint16R) + fn(map[uintptr]uint32(nil), (*Encoder).fastpathEncMapUintptrUint32R, (*Decoder).fastpathDecMapUintptrUint32R) + fn(map[uintptr]uint64(nil), (*Encoder).fastpathEncMapUintptrUint64R, (*Decoder).fastpathDecMapUintptrUint64R) + fn(map[uintptr]uintptr(nil), (*Encoder).fastpathEncMapUintptrUintptrR, (*Decoder).fastpathDecMapUintptrUintptrR) + fn(map[uintptr]int(nil), (*Encoder).fastpathEncMapUintptrIntR, (*Decoder).fastpathDecMapUintptrIntR) + fn(map[uintptr]int8(nil), (*Encoder).fastpathEncMapUintptrInt8R, (*Decoder).fastpathDecMapUintptrInt8R) + fn(map[uintptr]int16(nil), (*Encoder).fastpathEncMapUintptrInt16R, (*Decoder).fastpathDecMapUintptrInt16R) + fn(map[uintptr]int32(nil), (*Encoder).fastpathEncMapUintptrInt32R, (*Decoder).fastpathDecMapUintptrInt32R) + fn(map[uintptr]int64(nil), (*Encoder).fastpathEncMapUintptrInt64R, (*Decoder).fastpathDecMapUintptrInt64R) + fn(map[uintptr]float32(nil), (*Encoder).fastpathEncMapUintptrFloat32R, (*Decoder).fastpathDecMapUintptrFloat32R) + fn(map[uintptr]float64(nil), (*Encoder).fastpathEncMapUintptrFloat64R, (*Decoder).fastpathDecMapUintptrFloat64R) + fn(map[uintptr]bool(nil), (*Encoder).fastpathEncMapUintptrBoolR, (*Decoder).fastpathDecMapUintptrBoolR) + fn(map[int]interface{}(nil), (*Encoder).fastpathEncMapIntIntfR, (*Decoder).fastpathDecMapIntIntfR) + fn(map[int]string(nil), (*Encoder).fastpathEncMapIntStringR, (*Decoder).fastpathDecMapIntStringR) + fn(map[int]uint(nil), (*Encoder).fastpathEncMapIntUintR, (*Decoder).fastpathDecMapIntUintR) + fn(map[int]uint8(nil), (*Encoder).fastpathEncMapIntUint8R, (*Decoder).fastpathDecMapIntUint8R) + fn(map[int]uint16(nil), (*Encoder).fastpathEncMapIntUint16R, (*Decoder).fastpathDecMapIntUint16R) + fn(map[int]uint32(nil), (*Encoder).fastpathEncMapIntUint32R, (*Decoder).fastpathDecMapIntUint32R) + fn(map[int]uint64(nil), (*Encoder).fastpathEncMapIntUint64R, (*Decoder).fastpathDecMapIntUint64R) + fn(map[int]uintptr(nil), (*Encoder).fastpathEncMapIntUintptrR, (*Decoder).fastpathDecMapIntUintptrR) + fn(map[int]int(nil), (*Encoder).fastpathEncMapIntIntR, (*Decoder).fastpathDecMapIntIntR) + fn(map[int]int8(nil), (*Encoder).fastpathEncMapIntInt8R, (*Decoder).fastpathDecMapIntInt8R) + fn(map[int]int16(nil), (*Encoder).fastpathEncMapIntInt16R, (*Decoder).fastpathDecMapIntInt16R) + fn(map[int]int32(nil), (*Encoder).fastpathEncMapIntInt32R, (*Decoder).fastpathDecMapIntInt32R) + fn(map[int]int64(nil), (*Encoder).fastpathEncMapIntInt64R, (*Decoder).fastpathDecMapIntInt64R) + fn(map[int]float32(nil), (*Encoder).fastpathEncMapIntFloat32R, (*Decoder).fastpathDecMapIntFloat32R) + fn(map[int]float64(nil), (*Encoder).fastpathEncMapIntFloat64R, (*Decoder).fastpathDecMapIntFloat64R) + fn(map[int]bool(nil), (*Encoder).fastpathEncMapIntBoolR, (*Decoder).fastpathDecMapIntBoolR) + fn(map[int8]interface{}(nil), (*Encoder).fastpathEncMapInt8IntfR, (*Decoder).fastpathDecMapInt8IntfR) + fn(map[int8]string(nil), (*Encoder).fastpathEncMapInt8StringR, (*Decoder).fastpathDecMapInt8StringR) + fn(map[int8]uint(nil), (*Encoder).fastpathEncMapInt8UintR, (*Decoder).fastpathDecMapInt8UintR) + fn(map[int8]uint8(nil), (*Encoder).fastpathEncMapInt8Uint8R, (*Decoder).fastpathDecMapInt8Uint8R) + fn(map[int8]uint16(nil), (*Encoder).fastpathEncMapInt8Uint16R, (*Decoder).fastpathDecMapInt8Uint16R) + fn(map[int8]uint32(nil), (*Encoder).fastpathEncMapInt8Uint32R, (*Decoder).fastpathDecMapInt8Uint32R) + fn(map[int8]uint64(nil), (*Encoder).fastpathEncMapInt8Uint64R, (*Decoder).fastpathDecMapInt8Uint64R) + fn(map[int8]uintptr(nil), (*Encoder).fastpathEncMapInt8UintptrR, (*Decoder).fastpathDecMapInt8UintptrR) + fn(map[int8]int(nil), (*Encoder).fastpathEncMapInt8IntR, (*Decoder).fastpathDecMapInt8IntR) + fn(map[int8]int8(nil), (*Encoder).fastpathEncMapInt8Int8R, (*Decoder).fastpathDecMapInt8Int8R) + fn(map[int8]int16(nil), (*Encoder).fastpathEncMapInt8Int16R, (*Decoder).fastpathDecMapInt8Int16R) + fn(map[int8]int32(nil), (*Encoder).fastpathEncMapInt8Int32R, (*Decoder).fastpathDecMapInt8Int32R) + fn(map[int8]int64(nil), (*Encoder).fastpathEncMapInt8Int64R, (*Decoder).fastpathDecMapInt8Int64R) + fn(map[int8]float32(nil), (*Encoder).fastpathEncMapInt8Float32R, (*Decoder).fastpathDecMapInt8Float32R) + fn(map[int8]float64(nil), (*Encoder).fastpathEncMapInt8Float64R, (*Decoder).fastpathDecMapInt8Float64R) + fn(map[int8]bool(nil), (*Encoder).fastpathEncMapInt8BoolR, (*Decoder).fastpathDecMapInt8BoolR) + fn(map[int16]interface{}(nil), (*Encoder).fastpathEncMapInt16IntfR, (*Decoder).fastpathDecMapInt16IntfR) + fn(map[int16]string(nil), (*Encoder).fastpathEncMapInt16StringR, (*Decoder).fastpathDecMapInt16StringR) + fn(map[int16]uint(nil), (*Encoder).fastpathEncMapInt16UintR, (*Decoder).fastpathDecMapInt16UintR) + fn(map[int16]uint8(nil), (*Encoder).fastpathEncMapInt16Uint8R, (*Decoder).fastpathDecMapInt16Uint8R) + fn(map[int16]uint16(nil), (*Encoder).fastpathEncMapInt16Uint16R, (*Decoder).fastpathDecMapInt16Uint16R) + fn(map[int16]uint32(nil), (*Encoder).fastpathEncMapInt16Uint32R, (*Decoder).fastpathDecMapInt16Uint32R) + fn(map[int16]uint64(nil), (*Encoder).fastpathEncMapInt16Uint64R, (*Decoder).fastpathDecMapInt16Uint64R) + fn(map[int16]uintptr(nil), (*Encoder).fastpathEncMapInt16UintptrR, (*Decoder).fastpathDecMapInt16UintptrR) + fn(map[int16]int(nil), (*Encoder).fastpathEncMapInt16IntR, (*Decoder).fastpathDecMapInt16IntR) + fn(map[int16]int8(nil), (*Encoder).fastpathEncMapInt16Int8R, (*Decoder).fastpathDecMapInt16Int8R) + fn(map[int16]int16(nil), (*Encoder).fastpathEncMapInt16Int16R, (*Decoder).fastpathDecMapInt16Int16R) + fn(map[int16]int32(nil), (*Encoder).fastpathEncMapInt16Int32R, (*Decoder).fastpathDecMapInt16Int32R) + fn(map[int16]int64(nil), (*Encoder).fastpathEncMapInt16Int64R, (*Decoder).fastpathDecMapInt16Int64R) + fn(map[int16]float32(nil), (*Encoder).fastpathEncMapInt16Float32R, (*Decoder).fastpathDecMapInt16Float32R) + fn(map[int16]float64(nil), (*Encoder).fastpathEncMapInt16Float64R, (*Decoder).fastpathDecMapInt16Float64R) + fn(map[int16]bool(nil), (*Encoder).fastpathEncMapInt16BoolR, (*Decoder).fastpathDecMapInt16BoolR) + fn(map[int32]interface{}(nil), (*Encoder).fastpathEncMapInt32IntfR, (*Decoder).fastpathDecMapInt32IntfR) + fn(map[int32]string(nil), (*Encoder).fastpathEncMapInt32StringR, (*Decoder).fastpathDecMapInt32StringR) + fn(map[int32]uint(nil), (*Encoder).fastpathEncMapInt32UintR, (*Decoder).fastpathDecMapInt32UintR) + fn(map[int32]uint8(nil), (*Encoder).fastpathEncMapInt32Uint8R, (*Decoder).fastpathDecMapInt32Uint8R) + fn(map[int32]uint16(nil), (*Encoder).fastpathEncMapInt32Uint16R, (*Decoder).fastpathDecMapInt32Uint16R) + fn(map[int32]uint32(nil), (*Encoder).fastpathEncMapInt32Uint32R, (*Decoder).fastpathDecMapInt32Uint32R) + fn(map[int32]uint64(nil), (*Encoder).fastpathEncMapInt32Uint64R, (*Decoder).fastpathDecMapInt32Uint64R) + fn(map[int32]uintptr(nil), (*Encoder).fastpathEncMapInt32UintptrR, (*Decoder).fastpathDecMapInt32UintptrR) + fn(map[int32]int(nil), (*Encoder).fastpathEncMapInt32IntR, (*Decoder).fastpathDecMapInt32IntR) + fn(map[int32]int8(nil), (*Encoder).fastpathEncMapInt32Int8R, (*Decoder).fastpathDecMapInt32Int8R) + fn(map[int32]int16(nil), (*Encoder).fastpathEncMapInt32Int16R, (*Decoder).fastpathDecMapInt32Int16R) + fn(map[int32]int32(nil), (*Encoder).fastpathEncMapInt32Int32R, (*Decoder).fastpathDecMapInt32Int32R) + fn(map[int32]int64(nil), (*Encoder).fastpathEncMapInt32Int64R, (*Decoder).fastpathDecMapInt32Int64R) + fn(map[int32]float32(nil), (*Encoder).fastpathEncMapInt32Float32R, (*Decoder).fastpathDecMapInt32Float32R) + fn(map[int32]float64(nil), (*Encoder).fastpathEncMapInt32Float64R, (*Decoder).fastpathDecMapInt32Float64R) + fn(map[int32]bool(nil), (*Encoder).fastpathEncMapInt32BoolR, (*Decoder).fastpathDecMapInt32BoolR) + fn(map[int64]interface{}(nil), (*Encoder).fastpathEncMapInt64IntfR, (*Decoder).fastpathDecMapInt64IntfR) + fn(map[int64]string(nil), (*Encoder).fastpathEncMapInt64StringR, (*Decoder).fastpathDecMapInt64StringR) + fn(map[int64]uint(nil), (*Encoder).fastpathEncMapInt64UintR, (*Decoder).fastpathDecMapInt64UintR) + fn(map[int64]uint8(nil), (*Encoder).fastpathEncMapInt64Uint8R, (*Decoder).fastpathDecMapInt64Uint8R) + fn(map[int64]uint16(nil), (*Encoder).fastpathEncMapInt64Uint16R, (*Decoder).fastpathDecMapInt64Uint16R) + fn(map[int64]uint32(nil), (*Encoder).fastpathEncMapInt64Uint32R, (*Decoder).fastpathDecMapInt64Uint32R) + fn(map[int64]uint64(nil), (*Encoder).fastpathEncMapInt64Uint64R, (*Decoder).fastpathDecMapInt64Uint64R) + fn(map[int64]uintptr(nil), (*Encoder).fastpathEncMapInt64UintptrR, (*Decoder).fastpathDecMapInt64UintptrR) + fn(map[int64]int(nil), (*Encoder).fastpathEncMapInt64IntR, (*Decoder).fastpathDecMapInt64IntR) + fn(map[int64]int8(nil), (*Encoder).fastpathEncMapInt64Int8R, (*Decoder).fastpathDecMapInt64Int8R) + fn(map[int64]int16(nil), (*Encoder).fastpathEncMapInt64Int16R, (*Decoder).fastpathDecMapInt64Int16R) + fn(map[int64]int32(nil), (*Encoder).fastpathEncMapInt64Int32R, (*Decoder).fastpathDecMapInt64Int32R) + fn(map[int64]int64(nil), (*Encoder).fastpathEncMapInt64Int64R, (*Decoder).fastpathDecMapInt64Int64R) + fn(map[int64]float32(nil), (*Encoder).fastpathEncMapInt64Float32R, (*Decoder).fastpathDecMapInt64Float32R) + fn(map[int64]float64(nil), (*Encoder).fastpathEncMapInt64Float64R, (*Decoder).fastpathDecMapInt64Float64R) + fn(map[int64]bool(nil), (*Encoder).fastpathEncMapInt64BoolR, (*Decoder).fastpathDecMapInt64BoolR) + fn(map[bool]interface{}(nil), (*Encoder).fastpathEncMapBoolIntfR, (*Decoder).fastpathDecMapBoolIntfR) + fn(map[bool]string(nil), (*Encoder).fastpathEncMapBoolStringR, (*Decoder).fastpathDecMapBoolStringR) + fn(map[bool]uint(nil), (*Encoder).fastpathEncMapBoolUintR, (*Decoder).fastpathDecMapBoolUintR) + fn(map[bool]uint8(nil), (*Encoder).fastpathEncMapBoolUint8R, (*Decoder).fastpathDecMapBoolUint8R) + fn(map[bool]uint16(nil), (*Encoder).fastpathEncMapBoolUint16R, (*Decoder).fastpathDecMapBoolUint16R) + fn(map[bool]uint32(nil), (*Encoder).fastpathEncMapBoolUint32R, (*Decoder).fastpathDecMapBoolUint32R) + fn(map[bool]uint64(nil), (*Encoder).fastpathEncMapBoolUint64R, (*Decoder).fastpathDecMapBoolUint64R) + fn(map[bool]uintptr(nil), (*Encoder).fastpathEncMapBoolUintptrR, (*Decoder).fastpathDecMapBoolUintptrR) + fn(map[bool]int(nil), (*Encoder).fastpathEncMapBoolIntR, (*Decoder).fastpathDecMapBoolIntR) + fn(map[bool]int8(nil), (*Encoder).fastpathEncMapBoolInt8R, (*Decoder).fastpathDecMapBoolInt8R) + fn(map[bool]int16(nil), (*Encoder).fastpathEncMapBoolInt16R, (*Decoder).fastpathDecMapBoolInt16R) + fn(map[bool]int32(nil), (*Encoder).fastpathEncMapBoolInt32R, (*Decoder).fastpathDecMapBoolInt32R) + fn(map[bool]int64(nil), (*Encoder).fastpathEncMapBoolInt64R, (*Decoder).fastpathDecMapBoolInt64R) + fn(map[bool]float32(nil), (*Encoder).fastpathEncMapBoolFloat32R, (*Decoder).fastpathDecMapBoolFloat32R) + fn(map[bool]float64(nil), (*Encoder).fastpathEncMapBoolFloat64R, (*Decoder).fastpathDecMapBoolFloat64R) + fn(map[bool]bool(nil), (*Encoder).fastpathEncMapBoolBoolR, (*Decoder).fastpathDecMapBoolBoolR) + + sort.Sort(fastpathAslice(fastpathAV[:])) +} + +// -- encode + +// -- -- fast path type switch +func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { + switch v := iv.(type) { + + case []interface{}: + fastpathTV.EncSliceIntfV(v, e) + case *[]interface{}: + fastpathTV.EncSliceIntfV(*v, e) + case []string: + fastpathTV.EncSliceStringV(v, e) + case *[]string: + fastpathTV.EncSliceStringV(*v, e) + case []float32: + fastpathTV.EncSliceFloat32V(v, e) + case *[]float32: + fastpathTV.EncSliceFloat32V(*v, e) + case []float64: + fastpathTV.EncSliceFloat64V(v, e) + case *[]float64: + fastpathTV.EncSliceFloat64V(*v, e) + case []uint: + fastpathTV.EncSliceUintV(v, e) + case *[]uint: + fastpathTV.EncSliceUintV(*v, e) + case []uint16: + fastpathTV.EncSliceUint16V(v, e) + case *[]uint16: + fastpathTV.EncSliceUint16V(*v, e) + case []uint32: + fastpathTV.EncSliceUint32V(v, e) + case *[]uint32: + fastpathTV.EncSliceUint32V(*v, e) + case []uint64: + fastpathTV.EncSliceUint64V(v, e) + case *[]uint64: + fastpathTV.EncSliceUint64V(*v, e) + case []uintptr: + fastpathTV.EncSliceUintptrV(v, e) + case *[]uintptr: + fastpathTV.EncSliceUintptrV(*v, e) + case []int: + fastpathTV.EncSliceIntV(v, e) + case *[]int: + fastpathTV.EncSliceIntV(*v, e) + case []int8: + fastpathTV.EncSliceInt8V(v, e) + case *[]int8: + fastpathTV.EncSliceInt8V(*v, e) + case []int16: + fastpathTV.EncSliceInt16V(v, e) + case *[]int16: + fastpathTV.EncSliceInt16V(*v, e) + case []int32: + fastpathTV.EncSliceInt32V(v, e) + case *[]int32: + fastpathTV.EncSliceInt32V(*v, e) + case []int64: + fastpathTV.EncSliceInt64V(v, e) + case *[]int64: + fastpathTV.EncSliceInt64V(*v, e) + case []bool: + fastpathTV.EncSliceBoolV(v, e) + case *[]bool: + fastpathTV.EncSliceBoolV(*v, e) + + case map[interface{}]interface{}: + fastpathTV.EncMapIntfIntfV(v, e) + case *map[interface{}]interface{}: + fastpathTV.EncMapIntfIntfV(*v, e) + case map[interface{}]string: + fastpathTV.EncMapIntfStringV(v, e) + case *map[interface{}]string: + fastpathTV.EncMapIntfStringV(*v, e) + case map[interface{}]uint: + fastpathTV.EncMapIntfUintV(v, e) + case *map[interface{}]uint: + fastpathTV.EncMapIntfUintV(*v, e) + case map[interface{}]uint8: + fastpathTV.EncMapIntfUint8V(v, e) + case *map[interface{}]uint8: + fastpathTV.EncMapIntfUint8V(*v, e) + case map[interface{}]uint16: + fastpathTV.EncMapIntfUint16V(v, e) + case *map[interface{}]uint16: + fastpathTV.EncMapIntfUint16V(*v, e) + case map[interface{}]uint32: + fastpathTV.EncMapIntfUint32V(v, e) + case *map[interface{}]uint32: + fastpathTV.EncMapIntfUint32V(*v, e) + case map[interface{}]uint64: + fastpathTV.EncMapIntfUint64V(v, e) + case *map[interface{}]uint64: + fastpathTV.EncMapIntfUint64V(*v, e) + case map[interface{}]uintptr: + fastpathTV.EncMapIntfUintptrV(v, e) + case *map[interface{}]uintptr: + fastpathTV.EncMapIntfUintptrV(*v, e) + case map[interface{}]int: + fastpathTV.EncMapIntfIntV(v, e) + case *map[interface{}]int: + fastpathTV.EncMapIntfIntV(*v, e) + case map[interface{}]int8: + fastpathTV.EncMapIntfInt8V(v, e) + case *map[interface{}]int8: + fastpathTV.EncMapIntfInt8V(*v, e) + case map[interface{}]int16: + fastpathTV.EncMapIntfInt16V(v, e) + case *map[interface{}]int16: + fastpathTV.EncMapIntfInt16V(*v, e) + case map[interface{}]int32: + fastpathTV.EncMapIntfInt32V(v, e) + case *map[interface{}]int32: + fastpathTV.EncMapIntfInt32V(*v, e) + case map[interface{}]int64: + fastpathTV.EncMapIntfInt64V(v, e) + case *map[interface{}]int64: + fastpathTV.EncMapIntfInt64V(*v, e) + case map[interface{}]float32: + fastpathTV.EncMapIntfFloat32V(v, e) + case *map[interface{}]float32: + fastpathTV.EncMapIntfFloat32V(*v, e) + case map[interface{}]float64: + fastpathTV.EncMapIntfFloat64V(v, e) + case *map[interface{}]float64: + fastpathTV.EncMapIntfFloat64V(*v, e) + case map[interface{}]bool: + fastpathTV.EncMapIntfBoolV(v, e) + case *map[interface{}]bool: + fastpathTV.EncMapIntfBoolV(*v, e) + case map[string]interface{}: + fastpathTV.EncMapStringIntfV(v, e) + case *map[string]interface{}: + fastpathTV.EncMapStringIntfV(*v, e) + case map[string]string: + fastpathTV.EncMapStringStringV(v, e) + case *map[string]string: + fastpathTV.EncMapStringStringV(*v, e) + case map[string]uint: + fastpathTV.EncMapStringUintV(v, e) + case *map[string]uint: + fastpathTV.EncMapStringUintV(*v, e) + case map[string]uint8: + fastpathTV.EncMapStringUint8V(v, e) + case *map[string]uint8: + fastpathTV.EncMapStringUint8V(*v, e) + case map[string]uint16: + fastpathTV.EncMapStringUint16V(v, e) + case *map[string]uint16: + fastpathTV.EncMapStringUint16V(*v, e) + case map[string]uint32: + fastpathTV.EncMapStringUint32V(v, e) + case *map[string]uint32: + fastpathTV.EncMapStringUint32V(*v, e) + case map[string]uint64: + fastpathTV.EncMapStringUint64V(v, e) + case *map[string]uint64: + fastpathTV.EncMapStringUint64V(*v, e) + case map[string]uintptr: + fastpathTV.EncMapStringUintptrV(v, e) + case *map[string]uintptr: + fastpathTV.EncMapStringUintptrV(*v, e) + case map[string]int: + fastpathTV.EncMapStringIntV(v, e) + case *map[string]int: + fastpathTV.EncMapStringIntV(*v, e) + case map[string]int8: + fastpathTV.EncMapStringInt8V(v, e) + case *map[string]int8: + fastpathTV.EncMapStringInt8V(*v, e) + case map[string]int16: + fastpathTV.EncMapStringInt16V(v, e) + case *map[string]int16: + fastpathTV.EncMapStringInt16V(*v, e) + case map[string]int32: + fastpathTV.EncMapStringInt32V(v, e) + case *map[string]int32: + fastpathTV.EncMapStringInt32V(*v, e) + case map[string]int64: + fastpathTV.EncMapStringInt64V(v, e) + case *map[string]int64: + fastpathTV.EncMapStringInt64V(*v, e) + case map[string]float32: + fastpathTV.EncMapStringFloat32V(v, e) + case *map[string]float32: + fastpathTV.EncMapStringFloat32V(*v, e) + case map[string]float64: + fastpathTV.EncMapStringFloat64V(v, e) + case *map[string]float64: + fastpathTV.EncMapStringFloat64V(*v, e) + case map[string]bool: + fastpathTV.EncMapStringBoolV(v, e) + case *map[string]bool: + fastpathTV.EncMapStringBoolV(*v, e) + case map[float32]interface{}: + fastpathTV.EncMapFloat32IntfV(v, e) + case *map[float32]interface{}: + fastpathTV.EncMapFloat32IntfV(*v, e) + case map[float32]string: + fastpathTV.EncMapFloat32StringV(v, e) + case *map[float32]string: + fastpathTV.EncMapFloat32StringV(*v, e) + case map[float32]uint: + fastpathTV.EncMapFloat32UintV(v, e) + case *map[float32]uint: + fastpathTV.EncMapFloat32UintV(*v, e) + case map[float32]uint8: + fastpathTV.EncMapFloat32Uint8V(v, e) + case *map[float32]uint8: + fastpathTV.EncMapFloat32Uint8V(*v, e) + case map[float32]uint16: + fastpathTV.EncMapFloat32Uint16V(v, e) + case *map[float32]uint16: + fastpathTV.EncMapFloat32Uint16V(*v, e) + case map[float32]uint32: + fastpathTV.EncMapFloat32Uint32V(v, e) + case *map[float32]uint32: + fastpathTV.EncMapFloat32Uint32V(*v, e) + case map[float32]uint64: + fastpathTV.EncMapFloat32Uint64V(v, e) + case *map[float32]uint64: + fastpathTV.EncMapFloat32Uint64V(*v, e) + case map[float32]uintptr: + fastpathTV.EncMapFloat32UintptrV(v, e) + case *map[float32]uintptr: + fastpathTV.EncMapFloat32UintptrV(*v, e) + case map[float32]int: + fastpathTV.EncMapFloat32IntV(v, e) + case *map[float32]int: + fastpathTV.EncMapFloat32IntV(*v, e) + case map[float32]int8: + fastpathTV.EncMapFloat32Int8V(v, e) + case *map[float32]int8: + fastpathTV.EncMapFloat32Int8V(*v, e) + case map[float32]int16: + fastpathTV.EncMapFloat32Int16V(v, e) + case *map[float32]int16: + fastpathTV.EncMapFloat32Int16V(*v, e) + case map[float32]int32: + fastpathTV.EncMapFloat32Int32V(v, e) + case *map[float32]int32: + fastpathTV.EncMapFloat32Int32V(*v, e) + case map[float32]int64: + fastpathTV.EncMapFloat32Int64V(v, e) + case *map[float32]int64: + fastpathTV.EncMapFloat32Int64V(*v, e) + case map[float32]float32: + fastpathTV.EncMapFloat32Float32V(v, e) + case *map[float32]float32: + fastpathTV.EncMapFloat32Float32V(*v, e) + case map[float32]float64: + fastpathTV.EncMapFloat32Float64V(v, e) + case *map[float32]float64: + fastpathTV.EncMapFloat32Float64V(*v, e) + case map[float32]bool: + fastpathTV.EncMapFloat32BoolV(v, e) + case *map[float32]bool: + fastpathTV.EncMapFloat32BoolV(*v, e) + case map[float64]interface{}: + fastpathTV.EncMapFloat64IntfV(v, e) + case *map[float64]interface{}: + fastpathTV.EncMapFloat64IntfV(*v, e) + case map[float64]string: + fastpathTV.EncMapFloat64StringV(v, e) + case *map[float64]string: + fastpathTV.EncMapFloat64StringV(*v, e) + case map[float64]uint: + fastpathTV.EncMapFloat64UintV(v, e) + case *map[float64]uint: + fastpathTV.EncMapFloat64UintV(*v, e) + case map[float64]uint8: + fastpathTV.EncMapFloat64Uint8V(v, e) + case *map[float64]uint8: + fastpathTV.EncMapFloat64Uint8V(*v, e) + case map[float64]uint16: + fastpathTV.EncMapFloat64Uint16V(v, e) + case *map[float64]uint16: + fastpathTV.EncMapFloat64Uint16V(*v, e) + case map[float64]uint32: + fastpathTV.EncMapFloat64Uint32V(v, e) + case *map[float64]uint32: + fastpathTV.EncMapFloat64Uint32V(*v, e) + case map[float64]uint64: + fastpathTV.EncMapFloat64Uint64V(v, e) + case *map[float64]uint64: + fastpathTV.EncMapFloat64Uint64V(*v, e) + case map[float64]uintptr: + fastpathTV.EncMapFloat64UintptrV(v, e) + case *map[float64]uintptr: + fastpathTV.EncMapFloat64UintptrV(*v, e) + case map[float64]int: + fastpathTV.EncMapFloat64IntV(v, e) + case *map[float64]int: + fastpathTV.EncMapFloat64IntV(*v, e) + case map[float64]int8: + fastpathTV.EncMapFloat64Int8V(v, e) + case *map[float64]int8: + fastpathTV.EncMapFloat64Int8V(*v, e) + case map[float64]int16: + fastpathTV.EncMapFloat64Int16V(v, e) + case *map[float64]int16: + fastpathTV.EncMapFloat64Int16V(*v, e) + case map[float64]int32: + fastpathTV.EncMapFloat64Int32V(v, e) + case *map[float64]int32: + fastpathTV.EncMapFloat64Int32V(*v, e) + case map[float64]int64: + fastpathTV.EncMapFloat64Int64V(v, e) + case *map[float64]int64: + fastpathTV.EncMapFloat64Int64V(*v, e) + case map[float64]float32: + fastpathTV.EncMapFloat64Float32V(v, e) + case *map[float64]float32: + fastpathTV.EncMapFloat64Float32V(*v, e) + case map[float64]float64: + fastpathTV.EncMapFloat64Float64V(v, e) + case *map[float64]float64: + fastpathTV.EncMapFloat64Float64V(*v, e) + case map[float64]bool: + fastpathTV.EncMapFloat64BoolV(v, e) + case *map[float64]bool: + fastpathTV.EncMapFloat64BoolV(*v, e) + case map[uint]interface{}: + fastpathTV.EncMapUintIntfV(v, e) + case *map[uint]interface{}: + fastpathTV.EncMapUintIntfV(*v, e) + case map[uint]string: + fastpathTV.EncMapUintStringV(v, e) + case *map[uint]string: + fastpathTV.EncMapUintStringV(*v, e) + case map[uint]uint: + fastpathTV.EncMapUintUintV(v, e) + case *map[uint]uint: + fastpathTV.EncMapUintUintV(*v, e) + case map[uint]uint8: + fastpathTV.EncMapUintUint8V(v, e) + case *map[uint]uint8: + fastpathTV.EncMapUintUint8V(*v, e) + case map[uint]uint16: + fastpathTV.EncMapUintUint16V(v, e) + case *map[uint]uint16: + fastpathTV.EncMapUintUint16V(*v, e) + case map[uint]uint32: + fastpathTV.EncMapUintUint32V(v, e) + case *map[uint]uint32: + fastpathTV.EncMapUintUint32V(*v, e) + case map[uint]uint64: + fastpathTV.EncMapUintUint64V(v, e) + case *map[uint]uint64: + fastpathTV.EncMapUintUint64V(*v, e) + case map[uint]uintptr: + fastpathTV.EncMapUintUintptrV(v, e) + case *map[uint]uintptr: + fastpathTV.EncMapUintUintptrV(*v, e) + case map[uint]int: + fastpathTV.EncMapUintIntV(v, e) + case *map[uint]int: + fastpathTV.EncMapUintIntV(*v, e) + case map[uint]int8: + fastpathTV.EncMapUintInt8V(v, e) + case *map[uint]int8: + fastpathTV.EncMapUintInt8V(*v, e) + case map[uint]int16: + fastpathTV.EncMapUintInt16V(v, e) + case *map[uint]int16: + fastpathTV.EncMapUintInt16V(*v, e) + case map[uint]int32: + fastpathTV.EncMapUintInt32V(v, e) + case *map[uint]int32: + fastpathTV.EncMapUintInt32V(*v, e) + case map[uint]int64: + fastpathTV.EncMapUintInt64V(v, e) + case *map[uint]int64: + fastpathTV.EncMapUintInt64V(*v, e) + case map[uint]float32: + fastpathTV.EncMapUintFloat32V(v, e) + case *map[uint]float32: + fastpathTV.EncMapUintFloat32V(*v, e) + case map[uint]float64: + fastpathTV.EncMapUintFloat64V(v, e) + case *map[uint]float64: + fastpathTV.EncMapUintFloat64V(*v, e) + case map[uint]bool: + fastpathTV.EncMapUintBoolV(v, e) + case *map[uint]bool: + fastpathTV.EncMapUintBoolV(*v, e) + case map[uint8]interface{}: + fastpathTV.EncMapUint8IntfV(v, e) + case *map[uint8]interface{}: + fastpathTV.EncMapUint8IntfV(*v, e) + case map[uint8]string: + fastpathTV.EncMapUint8StringV(v, e) + case *map[uint8]string: + fastpathTV.EncMapUint8StringV(*v, e) + case map[uint8]uint: + fastpathTV.EncMapUint8UintV(v, e) + case *map[uint8]uint: + fastpathTV.EncMapUint8UintV(*v, e) + case map[uint8]uint8: + fastpathTV.EncMapUint8Uint8V(v, e) + case *map[uint8]uint8: + fastpathTV.EncMapUint8Uint8V(*v, e) + case map[uint8]uint16: + fastpathTV.EncMapUint8Uint16V(v, e) + case *map[uint8]uint16: + fastpathTV.EncMapUint8Uint16V(*v, e) + case map[uint8]uint32: + fastpathTV.EncMapUint8Uint32V(v, e) + case *map[uint8]uint32: + fastpathTV.EncMapUint8Uint32V(*v, e) + case map[uint8]uint64: + fastpathTV.EncMapUint8Uint64V(v, e) + case *map[uint8]uint64: + fastpathTV.EncMapUint8Uint64V(*v, e) + case map[uint8]uintptr: + fastpathTV.EncMapUint8UintptrV(v, e) + case *map[uint8]uintptr: + fastpathTV.EncMapUint8UintptrV(*v, e) + case map[uint8]int: + fastpathTV.EncMapUint8IntV(v, e) + case *map[uint8]int: + fastpathTV.EncMapUint8IntV(*v, e) + case map[uint8]int8: + fastpathTV.EncMapUint8Int8V(v, e) + case *map[uint8]int8: + fastpathTV.EncMapUint8Int8V(*v, e) + case map[uint8]int16: + fastpathTV.EncMapUint8Int16V(v, e) + case *map[uint8]int16: + fastpathTV.EncMapUint8Int16V(*v, e) + case map[uint8]int32: + fastpathTV.EncMapUint8Int32V(v, e) + case *map[uint8]int32: + fastpathTV.EncMapUint8Int32V(*v, e) + case map[uint8]int64: + fastpathTV.EncMapUint8Int64V(v, e) + case *map[uint8]int64: + fastpathTV.EncMapUint8Int64V(*v, e) + case map[uint8]float32: + fastpathTV.EncMapUint8Float32V(v, e) + case *map[uint8]float32: + fastpathTV.EncMapUint8Float32V(*v, e) + case map[uint8]float64: + fastpathTV.EncMapUint8Float64V(v, e) + case *map[uint8]float64: + fastpathTV.EncMapUint8Float64V(*v, e) + case map[uint8]bool: + fastpathTV.EncMapUint8BoolV(v, e) + case *map[uint8]bool: + fastpathTV.EncMapUint8BoolV(*v, e) + case map[uint16]interface{}: + fastpathTV.EncMapUint16IntfV(v, e) + case *map[uint16]interface{}: + fastpathTV.EncMapUint16IntfV(*v, e) + case map[uint16]string: + fastpathTV.EncMapUint16StringV(v, e) + case *map[uint16]string: + fastpathTV.EncMapUint16StringV(*v, e) + case map[uint16]uint: + fastpathTV.EncMapUint16UintV(v, e) + case *map[uint16]uint: + fastpathTV.EncMapUint16UintV(*v, e) + case map[uint16]uint8: + fastpathTV.EncMapUint16Uint8V(v, e) + case *map[uint16]uint8: + fastpathTV.EncMapUint16Uint8V(*v, e) + case map[uint16]uint16: + fastpathTV.EncMapUint16Uint16V(v, e) + case *map[uint16]uint16: + fastpathTV.EncMapUint16Uint16V(*v, e) + case map[uint16]uint32: + fastpathTV.EncMapUint16Uint32V(v, e) + case *map[uint16]uint32: + fastpathTV.EncMapUint16Uint32V(*v, e) + case map[uint16]uint64: + fastpathTV.EncMapUint16Uint64V(v, e) + case *map[uint16]uint64: + fastpathTV.EncMapUint16Uint64V(*v, e) + case map[uint16]uintptr: + fastpathTV.EncMapUint16UintptrV(v, e) + case *map[uint16]uintptr: + fastpathTV.EncMapUint16UintptrV(*v, e) + case map[uint16]int: + fastpathTV.EncMapUint16IntV(v, e) + case *map[uint16]int: + fastpathTV.EncMapUint16IntV(*v, e) + case map[uint16]int8: + fastpathTV.EncMapUint16Int8V(v, e) + case *map[uint16]int8: + fastpathTV.EncMapUint16Int8V(*v, e) + case map[uint16]int16: + fastpathTV.EncMapUint16Int16V(v, e) + case *map[uint16]int16: + fastpathTV.EncMapUint16Int16V(*v, e) + case map[uint16]int32: + fastpathTV.EncMapUint16Int32V(v, e) + case *map[uint16]int32: + fastpathTV.EncMapUint16Int32V(*v, e) + case map[uint16]int64: + fastpathTV.EncMapUint16Int64V(v, e) + case *map[uint16]int64: + fastpathTV.EncMapUint16Int64V(*v, e) + case map[uint16]float32: + fastpathTV.EncMapUint16Float32V(v, e) + case *map[uint16]float32: + fastpathTV.EncMapUint16Float32V(*v, e) + case map[uint16]float64: + fastpathTV.EncMapUint16Float64V(v, e) + case *map[uint16]float64: + fastpathTV.EncMapUint16Float64V(*v, e) + case map[uint16]bool: + fastpathTV.EncMapUint16BoolV(v, e) + case *map[uint16]bool: + fastpathTV.EncMapUint16BoolV(*v, e) + case map[uint32]interface{}: + fastpathTV.EncMapUint32IntfV(v, e) + case *map[uint32]interface{}: + fastpathTV.EncMapUint32IntfV(*v, e) + case map[uint32]string: + fastpathTV.EncMapUint32StringV(v, e) + case *map[uint32]string: + fastpathTV.EncMapUint32StringV(*v, e) + case map[uint32]uint: + fastpathTV.EncMapUint32UintV(v, e) + case *map[uint32]uint: + fastpathTV.EncMapUint32UintV(*v, e) + case map[uint32]uint8: + fastpathTV.EncMapUint32Uint8V(v, e) + case *map[uint32]uint8: + fastpathTV.EncMapUint32Uint8V(*v, e) + case map[uint32]uint16: + fastpathTV.EncMapUint32Uint16V(v, e) + case *map[uint32]uint16: + fastpathTV.EncMapUint32Uint16V(*v, e) + case map[uint32]uint32: + fastpathTV.EncMapUint32Uint32V(v, e) + case *map[uint32]uint32: + fastpathTV.EncMapUint32Uint32V(*v, e) + case map[uint32]uint64: + fastpathTV.EncMapUint32Uint64V(v, e) + case *map[uint32]uint64: + fastpathTV.EncMapUint32Uint64V(*v, e) + case map[uint32]uintptr: + fastpathTV.EncMapUint32UintptrV(v, e) + case *map[uint32]uintptr: + fastpathTV.EncMapUint32UintptrV(*v, e) + case map[uint32]int: + fastpathTV.EncMapUint32IntV(v, e) + case *map[uint32]int: + fastpathTV.EncMapUint32IntV(*v, e) + case map[uint32]int8: + fastpathTV.EncMapUint32Int8V(v, e) + case *map[uint32]int8: + fastpathTV.EncMapUint32Int8V(*v, e) + case map[uint32]int16: + fastpathTV.EncMapUint32Int16V(v, e) + case *map[uint32]int16: + fastpathTV.EncMapUint32Int16V(*v, e) + case map[uint32]int32: + fastpathTV.EncMapUint32Int32V(v, e) + case *map[uint32]int32: + fastpathTV.EncMapUint32Int32V(*v, e) + case map[uint32]int64: + fastpathTV.EncMapUint32Int64V(v, e) + case *map[uint32]int64: + fastpathTV.EncMapUint32Int64V(*v, e) + case map[uint32]float32: + fastpathTV.EncMapUint32Float32V(v, e) + case *map[uint32]float32: + fastpathTV.EncMapUint32Float32V(*v, e) + case map[uint32]float64: + fastpathTV.EncMapUint32Float64V(v, e) + case *map[uint32]float64: + fastpathTV.EncMapUint32Float64V(*v, e) + case map[uint32]bool: + fastpathTV.EncMapUint32BoolV(v, e) + case *map[uint32]bool: + fastpathTV.EncMapUint32BoolV(*v, e) + case map[uint64]interface{}: + fastpathTV.EncMapUint64IntfV(v, e) + case *map[uint64]interface{}: + fastpathTV.EncMapUint64IntfV(*v, e) + case map[uint64]string: + fastpathTV.EncMapUint64StringV(v, e) + case *map[uint64]string: + fastpathTV.EncMapUint64StringV(*v, e) + case map[uint64]uint: + fastpathTV.EncMapUint64UintV(v, e) + case *map[uint64]uint: + fastpathTV.EncMapUint64UintV(*v, e) + case map[uint64]uint8: + fastpathTV.EncMapUint64Uint8V(v, e) + case *map[uint64]uint8: + fastpathTV.EncMapUint64Uint8V(*v, e) + case map[uint64]uint16: + fastpathTV.EncMapUint64Uint16V(v, e) + case *map[uint64]uint16: + fastpathTV.EncMapUint64Uint16V(*v, e) + case map[uint64]uint32: + fastpathTV.EncMapUint64Uint32V(v, e) + case *map[uint64]uint32: + fastpathTV.EncMapUint64Uint32V(*v, e) + case map[uint64]uint64: + fastpathTV.EncMapUint64Uint64V(v, e) + case *map[uint64]uint64: + fastpathTV.EncMapUint64Uint64V(*v, e) + case map[uint64]uintptr: + fastpathTV.EncMapUint64UintptrV(v, e) + case *map[uint64]uintptr: + fastpathTV.EncMapUint64UintptrV(*v, e) + case map[uint64]int: + fastpathTV.EncMapUint64IntV(v, e) + case *map[uint64]int: + fastpathTV.EncMapUint64IntV(*v, e) + case map[uint64]int8: + fastpathTV.EncMapUint64Int8V(v, e) + case *map[uint64]int8: + fastpathTV.EncMapUint64Int8V(*v, e) + case map[uint64]int16: + fastpathTV.EncMapUint64Int16V(v, e) + case *map[uint64]int16: + fastpathTV.EncMapUint64Int16V(*v, e) + case map[uint64]int32: + fastpathTV.EncMapUint64Int32V(v, e) + case *map[uint64]int32: + fastpathTV.EncMapUint64Int32V(*v, e) + case map[uint64]int64: + fastpathTV.EncMapUint64Int64V(v, e) + case *map[uint64]int64: + fastpathTV.EncMapUint64Int64V(*v, e) + case map[uint64]float32: + fastpathTV.EncMapUint64Float32V(v, e) + case *map[uint64]float32: + fastpathTV.EncMapUint64Float32V(*v, e) + case map[uint64]float64: + fastpathTV.EncMapUint64Float64V(v, e) + case *map[uint64]float64: + fastpathTV.EncMapUint64Float64V(*v, e) + case map[uint64]bool: + fastpathTV.EncMapUint64BoolV(v, e) + case *map[uint64]bool: + fastpathTV.EncMapUint64BoolV(*v, e) + case map[uintptr]interface{}: + fastpathTV.EncMapUintptrIntfV(v, e) + case *map[uintptr]interface{}: + fastpathTV.EncMapUintptrIntfV(*v, e) + case map[uintptr]string: + fastpathTV.EncMapUintptrStringV(v, e) + case *map[uintptr]string: + fastpathTV.EncMapUintptrStringV(*v, e) + case map[uintptr]uint: + fastpathTV.EncMapUintptrUintV(v, e) + case *map[uintptr]uint: + fastpathTV.EncMapUintptrUintV(*v, e) + case map[uintptr]uint8: + fastpathTV.EncMapUintptrUint8V(v, e) + case *map[uintptr]uint8: + fastpathTV.EncMapUintptrUint8V(*v, e) + case map[uintptr]uint16: + fastpathTV.EncMapUintptrUint16V(v, e) + case *map[uintptr]uint16: + fastpathTV.EncMapUintptrUint16V(*v, e) + case map[uintptr]uint32: + fastpathTV.EncMapUintptrUint32V(v, e) + case *map[uintptr]uint32: + fastpathTV.EncMapUintptrUint32V(*v, e) + case map[uintptr]uint64: + fastpathTV.EncMapUintptrUint64V(v, e) + case *map[uintptr]uint64: + fastpathTV.EncMapUintptrUint64V(*v, e) + case map[uintptr]uintptr: + fastpathTV.EncMapUintptrUintptrV(v, e) + case *map[uintptr]uintptr: + fastpathTV.EncMapUintptrUintptrV(*v, e) + case map[uintptr]int: + fastpathTV.EncMapUintptrIntV(v, e) + case *map[uintptr]int: + fastpathTV.EncMapUintptrIntV(*v, e) + case map[uintptr]int8: + fastpathTV.EncMapUintptrInt8V(v, e) + case *map[uintptr]int8: + fastpathTV.EncMapUintptrInt8V(*v, e) + case map[uintptr]int16: + fastpathTV.EncMapUintptrInt16V(v, e) + case *map[uintptr]int16: + fastpathTV.EncMapUintptrInt16V(*v, e) + case map[uintptr]int32: + fastpathTV.EncMapUintptrInt32V(v, e) + case *map[uintptr]int32: + fastpathTV.EncMapUintptrInt32V(*v, e) + case map[uintptr]int64: + fastpathTV.EncMapUintptrInt64V(v, e) + case *map[uintptr]int64: + fastpathTV.EncMapUintptrInt64V(*v, e) + case map[uintptr]float32: + fastpathTV.EncMapUintptrFloat32V(v, e) + case *map[uintptr]float32: + fastpathTV.EncMapUintptrFloat32V(*v, e) + case map[uintptr]float64: + fastpathTV.EncMapUintptrFloat64V(v, e) + case *map[uintptr]float64: + fastpathTV.EncMapUintptrFloat64V(*v, e) + case map[uintptr]bool: + fastpathTV.EncMapUintptrBoolV(v, e) + case *map[uintptr]bool: + fastpathTV.EncMapUintptrBoolV(*v, e) + case map[int]interface{}: + fastpathTV.EncMapIntIntfV(v, e) + case *map[int]interface{}: + fastpathTV.EncMapIntIntfV(*v, e) + case map[int]string: + fastpathTV.EncMapIntStringV(v, e) + case *map[int]string: + fastpathTV.EncMapIntStringV(*v, e) + case map[int]uint: + fastpathTV.EncMapIntUintV(v, e) + case *map[int]uint: + fastpathTV.EncMapIntUintV(*v, e) + case map[int]uint8: + fastpathTV.EncMapIntUint8V(v, e) + case *map[int]uint8: + fastpathTV.EncMapIntUint8V(*v, e) + case map[int]uint16: + fastpathTV.EncMapIntUint16V(v, e) + case *map[int]uint16: + fastpathTV.EncMapIntUint16V(*v, e) + case map[int]uint32: + fastpathTV.EncMapIntUint32V(v, e) + case *map[int]uint32: + fastpathTV.EncMapIntUint32V(*v, e) + case map[int]uint64: + fastpathTV.EncMapIntUint64V(v, e) + case *map[int]uint64: + fastpathTV.EncMapIntUint64V(*v, e) + case map[int]uintptr: + fastpathTV.EncMapIntUintptrV(v, e) + case *map[int]uintptr: + fastpathTV.EncMapIntUintptrV(*v, e) + case map[int]int: + fastpathTV.EncMapIntIntV(v, e) + case *map[int]int: + fastpathTV.EncMapIntIntV(*v, e) + case map[int]int8: + fastpathTV.EncMapIntInt8V(v, e) + case *map[int]int8: + fastpathTV.EncMapIntInt8V(*v, e) + case map[int]int16: + fastpathTV.EncMapIntInt16V(v, e) + case *map[int]int16: + fastpathTV.EncMapIntInt16V(*v, e) + case map[int]int32: + fastpathTV.EncMapIntInt32V(v, e) + case *map[int]int32: + fastpathTV.EncMapIntInt32V(*v, e) + case map[int]int64: + fastpathTV.EncMapIntInt64V(v, e) + case *map[int]int64: + fastpathTV.EncMapIntInt64V(*v, e) + case map[int]float32: + fastpathTV.EncMapIntFloat32V(v, e) + case *map[int]float32: + fastpathTV.EncMapIntFloat32V(*v, e) + case map[int]float64: + fastpathTV.EncMapIntFloat64V(v, e) + case *map[int]float64: + fastpathTV.EncMapIntFloat64V(*v, e) + case map[int]bool: + fastpathTV.EncMapIntBoolV(v, e) + case *map[int]bool: + fastpathTV.EncMapIntBoolV(*v, e) + case map[int8]interface{}: + fastpathTV.EncMapInt8IntfV(v, e) + case *map[int8]interface{}: + fastpathTV.EncMapInt8IntfV(*v, e) + case map[int8]string: + fastpathTV.EncMapInt8StringV(v, e) + case *map[int8]string: + fastpathTV.EncMapInt8StringV(*v, e) + case map[int8]uint: + fastpathTV.EncMapInt8UintV(v, e) + case *map[int8]uint: + fastpathTV.EncMapInt8UintV(*v, e) + case map[int8]uint8: + fastpathTV.EncMapInt8Uint8V(v, e) + case *map[int8]uint8: + fastpathTV.EncMapInt8Uint8V(*v, e) + case map[int8]uint16: + fastpathTV.EncMapInt8Uint16V(v, e) + case *map[int8]uint16: + fastpathTV.EncMapInt8Uint16V(*v, e) + case map[int8]uint32: + fastpathTV.EncMapInt8Uint32V(v, e) + case *map[int8]uint32: + fastpathTV.EncMapInt8Uint32V(*v, e) + case map[int8]uint64: + fastpathTV.EncMapInt8Uint64V(v, e) + case *map[int8]uint64: + fastpathTV.EncMapInt8Uint64V(*v, e) + case map[int8]uintptr: + fastpathTV.EncMapInt8UintptrV(v, e) + case *map[int8]uintptr: + fastpathTV.EncMapInt8UintptrV(*v, e) + case map[int8]int: + fastpathTV.EncMapInt8IntV(v, e) + case *map[int8]int: + fastpathTV.EncMapInt8IntV(*v, e) + case map[int8]int8: + fastpathTV.EncMapInt8Int8V(v, e) + case *map[int8]int8: + fastpathTV.EncMapInt8Int8V(*v, e) + case map[int8]int16: + fastpathTV.EncMapInt8Int16V(v, e) + case *map[int8]int16: + fastpathTV.EncMapInt8Int16V(*v, e) + case map[int8]int32: + fastpathTV.EncMapInt8Int32V(v, e) + case *map[int8]int32: + fastpathTV.EncMapInt8Int32V(*v, e) + case map[int8]int64: + fastpathTV.EncMapInt8Int64V(v, e) + case *map[int8]int64: + fastpathTV.EncMapInt8Int64V(*v, e) + case map[int8]float32: + fastpathTV.EncMapInt8Float32V(v, e) + case *map[int8]float32: + fastpathTV.EncMapInt8Float32V(*v, e) + case map[int8]float64: + fastpathTV.EncMapInt8Float64V(v, e) + case *map[int8]float64: + fastpathTV.EncMapInt8Float64V(*v, e) + case map[int8]bool: + fastpathTV.EncMapInt8BoolV(v, e) + case *map[int8]bool: + fastpathTV.EncMapInt8BoolV(*v, e) + case map[int16]interface{}: + fastpathTV.EncMapInt16IntfV(v, e) + case *map[int16]interface{}: + fastpathTV.EncMapInt16IntfV(*v, e) + case map[int16]string: + fastpathTV.EncMapInt16StringV(v, e) + case *map[int16]string: + fastpathTV.EncMapInt16StringV(*v, e) + case map[int16]uint: + fastpathTV.EncMapInt16UintV(v, e) + case *map[int16]uint: + fastpathTV.EncMapInt16UintV(*v, e) + case map[int16]uint8: + fastpathTV.EncMapInt16Uint8V(v, e) + case *map[int16]uint8: + fastpathTV.EncMapInt16Uint8V(*v, e) + case map[int16]uint16: + fastpathTV.EncMapInt16Uint16V(v, e) + case *map[int16]uint16: + fastpathTV.EncMapInt16Uint16V(*v, e) + case map[int16]uint32: + fastpathTV.EncMapInt16Uint32V(v, e) + case *map[int16]uint32: + fastpathTV.EncMapInt16Uint32V(*v, e) + case map[int16]uint64: + fastpathTV.EncMapInt16Uint64V(v, e) + case *map[int16]uint64: + fastpathTV.EncMapInt16Uint64V(*v, e) + case map[int16]uintptr: + fastpathTV.EncMapInt16UintptrV(v, e) + case *map[int16]uintptr: + fastpathTV.EncMapInt16UintptrV(*v, e) + case map[int16]int: + fastpathTV.EncMapInt16IntV(v, e) + case *map[int16]int: + fastpathTV.EncMapInt16IntV(*v, e) + case map[int16]int8: + fastpathTV.EncMapInt16Int8V(v, e) + case *map[int16]int8: + fastpathTV.EncMapInt16Int8V(*v, e) + case map[int16]int16: + fastpathTV.EncMapInt16Int16V(v, e) + case *map[int16]int16: + fastpathTV.EncMapInt16Int16V(*v, e) + case map[int16]int32: + fastpathTV.EncMapInt16Int32V(v, e) + case *map[int16]int32: + fastpathTV.EncMapInt16Int32V(*v, e) + case map[int16]int64: + fastpathTV.EncMapInt16Int64V(v, e) + case *map[int16]int64: + fastpathTV.EncMapInt16Int64V(*v, e) + case map[int16]float32: + fastpathTV.EncMapInt16Float32V(v, e) + case *map[int16]float32: + fastpathTV.EncMapInt16Float32V(*v, e) + case map[int16]float64: + fastpathTV.EncMapInt16Float64V(v, e) + case *map[int16]float64: + fastpathTV.EncMapInt16Float64V(*v, e) + case map[int16]bool: + fastpathTV.EncMapInt16BoolV(v, e) + case *map[int16]bool: + fastpathTV.EncMapInt16BoolV(*v, e) + case map[int32]interface{}: + fastpathTV.EncMapInt32IntfV(v, e) + case *map[int32]interface{}: + fastpathTV.EncMapInt32IntfV(*v, e) + case map[int32]string: + fastpathTV.EncMapInt32StringV(v, e) + case *map[int32]string: + fastpathTV.EncMapInt32StringV(*v, e) + case map[int32]uint: + fastpathTV.EncMapInt32UintV(v, e) + case *map[int32]uint: + fastpathTV.EncMapInt32UintV(*v, e) + case map[int32]uint8: + fastpathTV.EncMapInt32Uint8V(v, e) + case *map[int32]uint8: + fastpathTV.EncMapInt32Uint8V(*v, e) + case map[int32]uint16: + fastpathTV.EncMapInt32Uint16V(v, e) + case *map[int32]uint16: + fastpathTV.EncMapInt32Uint16V(*v, e) + case map[int32]uint32: + fastpathTV.EncMapInt32Uint32V(v, e) + case *map[int32]uint32: + fastpathTV.EncMapInt32Uint32V(*v, e) + case map[int32]uint64: + fastpathTV.EncMapInt32Uint64V(v, e) + case *map[int32]uint64: + fastpathTV.EncMapInt32Uint64V(*v, e) + case map[int32]uintptr: + fastpathTV.EncMapInt32UintptrV(v, e) + case *map[int32]uintptr: + fastpathTV.EncMapInt32UintptrV(*v, e) + case map[int32]int: + fastpathTV.EncMapInt32IntV(v, e) + case *map[int32]int: + fastpathTV.EncMapInt32IntV(*v, e) + case map[int32]int8: + fastpathTV.EncMapInt32Int8V(v, e) + case *map[int32]int8: + fastpathTV.EncMapInt32Int8V(*v, e) + case map[int32]int16: + fastpathTV.EncMapInt32Int16V(v, e) + case *map[int32]int16: + fastpathTV.EncMapInt32Int16V(*v, e) + case map[int32]int32: + fastpathTV.EncMapInt32Int32V(v, e) + case *map[int32]int32: + fastpathTV.EncMapInt32Int32V(*v, e) + case map[int32]int64: + fastpathTV.EncMapInt32Int64V(v, e) + case *map[int32]int64: + fastpathTV.EncMapInt32Int64V(*v, e) + case map[int32]float32: + fastpathTV.EncMapInt32Float32V(v, e) + case *map[int32]float32: + fastpathTV.EncMapInt32Float32V(*v, e) + case map[int32]float64: + fastpathTV.EncMapInt32Float64V(v, e) + case *map[int32]float64: + fastpathTV.EncMapInt32Float64V(*v, e) + case map[int32]bool: + fastpathTV.EncMapInt32BoolV(v, e) + case *map[int32]bool: + fastpathTV.EncMapInt32BoolV(*v, e) + case map[int64]interface{}: + fastpathTV.EncMapInt64IntfV(v, e) + case *map[int64]interface{}: + fastpathTV.EncMapInt64IntfV(*v, e) + case map[int64]string: + fastpathTV.EncMapInt64StringV(v, e) + case *map[int64]string: + fastpathTV.EncMapInt64StringV(*v, e) + case map[int64]uint: + fastpathTV.EncMapInt64UintV(v, e) + case *map[int64]uint: + fastpathTV.EncMapInt64UintV(*v, e) + case map[int64]uint8: + fastpathTV.EncMapInt64Uint8V(v, e) + case *map[int64]uint8: + fastpathTV.EncMapInt64Uint8V(*v, e) + case map[int64]uint16: + fastpathTV.EncMapInt64Uint16V(v, e) + case *map[int64]uint16: + fastpathTV.EncMapInt64Uint16V(*v, e) + case map[int64]uint32: + fastpathTV.EncMapInt64Uint32V(v, e) + case *map[int64]uint32: + fastpathTV.EncMapInt64Uint32V(*v, e) + case map[int64]uint64: + fastpathTV.EncMapInt64Uint64V(v, e) + case *map[int64]uint64: + fastpathTV.EncMapInt64Uint64V(*v, e) + case map[int64]uintptr: + fastpathTV.EncMapInt64UintptrV(v, e) + case *map[int64]uintptr: + fastpathTV.EncMapInt64UintptrV(*v, e) + case map[int64]int: + fastpathTV.EncMapInt64IntV(v, e) + case *map[int64]int: + fastpathTV.EncMapInt64IntV(*v, e) + case map[int64]int8: + fastpathTV.EncMapInt64Int8V(v, e) + case *map[int64]int8: + fastpathTV.EncMapInt64Int8V(*v, e) + case map[int64]int16: + fastpathTV.EncMapInt64Int16V(v, e) + case *map[int64]int16: + fastpathTV.EncMapInt64Int16V(*v, e) + case map[int64]int32: + fastpathTV.EncMapInt64Int32V(v, e) + case *map[int64]int32: + fastpathTV.EncMapInt64Int32V(*v, e) + case map[int64]int64: + fastpathTV.EncMapInt64Int64V(v, e) + case *map[int64]int64: + fastpathTV.EncMapInt64Int64V(*v, e) + case map[int64]float32: + fastpathTV.EncMapInt64Float32V(v, e) + case *map[int64]float32: + fastpathTV.EncMapInt64Float32V(*v, e) + case map[int64]float64: + fastpathTV.EncMapInt64Float64V(v, e) + case *map[int64]float64: + fastpathTV.EncMapInt64Float64V(*v, e) + case map[int64]bool: + fastpathTV.EncMapInt64BoolV(v, e) + case *map[int64]bool: + fastpathTV.EncMapInt64BoolV(*v, e) + case map[bool]interface{}: + fastpathTV.EncMapBoolIntfV(v, e) + case *map[bool]interface{}: + fastpathTV.EncMapBoolIntfV(*v, e) + case map[bool]string: + fastpathTV.EncMapBoolStringV(v, e) + case *map[bool]string: + fastpathTV.EncMapBoolStringV(*v, e) + case map[bool]uint: + fastpathTV.EncMapBoolUintV(v, e) + case *map[bool]uint: + fastpathTV.EncMapBoolUintV(*v, e) + case map[bool]uint8: + fastpathTV.EncMapBoolUint8V(v, e) + case *map[bool]uint8: + fastpathTV.EncMapBoolUint8V(*v, e) + case map[bool]uint16: + fastpathTV.EncMapBoolUint16V(v, e) + case *map[bool]uint16: + fastpathTV.EncMapBoolUint16V(*v, e) + case map[bool]uint32: + fastpathTV.EncMapBoolUint32V(v, e) + case *map[bool]uint32: + fastpathTV.EncMapBoolUint32V(*v, e) + case map[bool]uint64: + fastpathTV.EncMapBoolUint64V(v, e) + case *map[bool]uint64: + fastpathTV.EncMapBoolUint64V(*v, e) + case map[bool]uintptr: + fastpathTV.EncMapBoolUintptrV(v, e) + case *map[bool]uintptr: + fastpathTV.EncMapBoolUintptrV(*v, e) + case map[bool]int: + fastpathTV.EncMapBoolIntV(v, e) + case *map[bool]int: + fastpathTV.EncMapBoolIntV(*v, e) + case map[bool]int8: + fastpathTV.EncMapBoolInt8V(v, e) + case *map[bool]int8: + fastpathTV.EncMapBoolInt8V(*v, e) + case map[bool]int16: + fastpathTV.EncMapBoolInt16V(v, e) + case *map[bool]int16: + fastpathTV.EncMapBoolInt16V(*v, e) + case map[bool]int32: + fastpathTV.EncMapBoolInt32V(v, e) + case *map[bool]int32: + fastpathTV.EncMapBoolInt32V(*v, e) + case map[bool]int64: + fastpathTV.EncMapBoolInt64V(v, e) + case *map[bool]int64: + fastpathTV.EncMapBoolInt64V(*v, e) + case map[bool]float32: + fastpathTV.EncMapBoolFloat32V(v, e) + case *map[bool]float32: + fastpathTV.EncMapBoolFloat32V(*v, e) + case map[bool]float64: + fastpathTV.EncMapBoolFloat64V(v, e) + case *map[bool]float64: + fastpathTV.EncMapBoolFloat64V(*v, e) + case map[bool]bool: + fastpathTV.EncMapBoolBoolV(v, e) + case *map[bool]bool: + fastpathTV.EncMapBoolBoolV(*v, e) + + default: + _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 + return false + } + return true +} + +// -- -- fast path functions + +func (e *Encoder) fastpathEncSliceIntfR(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceIntfV(rv2i(rv).([]interface{}), e) + } else { + fastpathTV.EncSliceIntfV(rv2i(rv).([]interface{}), e) + } +} +func (_ fastpathT) EncSliceIntfV(v []interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + if esep { + for _, v2 := range v { + ee.WriteArrayElem() + e.encode(v2) + } + } else { + for _, v2 := range v { + e.encode(v2) + } + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceIntfV(v []interface{}, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + if esep { + for j, v2 := range v { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } else { + for _, v2 := range v { + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceStringR(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceStringV(rv2i(rv).([]string), e) + } else { + fastpathTV.EncSliceStringV(rv2i(rv).([]string), e) + } +} +func (_ fastpathT) EncSliceStringV(v []string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + if esep { + for _, v2 := range v { + ee.WriteArrayElem() + ee.EncodeString(cUTF8, v2) + } + } else { + for _, v2 := range v { + ee.EncodeString(cUTF8, v2) + } + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceStringV(v []string, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + if esep { + for j, v2 := range v { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + ee.EncodeString(cUTF8, v2) + } + } else { + for _, v2 := range v { + ee.EncodeString(cUTF8, v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceFloat32R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceFloat32V(rv2i(rv).([]float32), e) + } else { + fastpathTV.EncSliceFloat32V(rv2i(rv).([]float32), e) + } +} +func (_ fastpathT) EncSliceFloat32V(v []float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + if esep { + for _, v2 := range v { + ee.WriteArrayElem() + ee.EncodeFloat32(v2) + } + } else { + for _, v2 := range v { + ee.EncodeFloat32(v2) + } + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceFloat32V(v []float32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + if esep { + for j, v2 := range v { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + ee.EncodeFloat32(v2) + } + } else { + for _, v2 := range v { + ee.EncodeFloat32(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceFloat64R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceFloat64V(rv2i(rv).([]float64), e) + } else { + fastpathTV.EncSliceFloat64V(rv2i(rv).([]float64), e) + } +} +func (_ fastpathT) EncSliceFloat64V(v []float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + if esep { + for _, v2 := range v { + ee.WriteArrayElem() + ee.EncodeFloat64(v2) + } + } else { + for _, v2 := range v { + ee.EncodeFloat64(v2) + } + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceFloat64V(v []float64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + if esep { + for j, v2 := range v { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + ee.EncodeFloat64(v2) + } + } else { + for _, v2 := range v { + ee.EncodeFloat64(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceUintR(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUintV(rv2i(rv).([]uint), e) + } else { + fastpathTV.EncSliceUintV(rv2i(rv).([]uint), e) + } +} +func (_ fastpathT) EncSliceUintV(v []uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + if esep { + for _, v2 := range v { + ee.WriteArrayElem() + ee.EncodeUint(uint64(v2)) + } + } else { + for _, v2 := range v { + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceUintV(v []uint, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + if esep { + for j, v2 := range v { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } else { + for _, v2 := range v { + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceUint8R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUint8V(rv2i(rv).([]uint8), e) + } else { + fastpathTV.EncSliceUint8V(rv2i(rv).([]uint8), e) + } +} +func (_ fastpathT) EncSliceUint8V(v []uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + if esep { + for _, v2 := range v { + ee.WriteArrayElem() + ee.EncodeUint(uint64(v2)) + } + } else { + for _, v2 := range v { + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceUint8V(v []uint8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + if esep { + for j, v2 := range v { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } else { + for _, v2 := range v { + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceUint16R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUint16V(rv2i(rv).([]uint16), e) + } else { + fastpathTV.EncSliceUint16V(rv2i(rv).([]uint16), e) + } +} +func (_ fastpathT) EncSliceUint16V(v []uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + if esep { + for _, v2 := range v { + ee.WriteArrayElem() + ee.EncodeUint(uint64(v2)) + } + } else { + for _, v2 := range v { + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceUint16V(v []uint16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + if esep { + for j, v2 := range v { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } else { + for _, v2 := range v { + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceUint32R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUint32V(rv2i(rv).([]uint32), e) + } else { + fastpathTV.EncSliceUint32V(rv2i(rv).([]uint32), e) + } +} +func (_ fastpathT) EncSliceUint32V(v []uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + if esep { + for _, v2 := range v { + ee.WriteArrayElem() + ee.EncodeUint(uint64(v2)) + } + } else { + for _, v2 := range v { + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceUint32V(v []uint32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + if esep { + for j, v2 := range v { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } else { + for _, v2 := range v { + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceUint64R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUint64V(rv2i(rv).([]uint64), e) + } else { + fastpathTV.EncSliceUint64V(rv2i(rv).([]uint64), e) + } +} +func (_ fastpathT) EncSliceUint64V(v []uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + if esep { + for _, v2 := range v { + ee.WriteArrayElem() + ee.EncodeUint(uint64(v2)) + } + } else { + for _, v2 := range v { + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceUint64V(v []uint64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + if esep { + for j, v2 := range v { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + ee.EncodeUint(uint64(v2)) + } + } else { + for _, v2 := range v { + ee.EncodeUint(uint64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceUintptrR(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceUintptrV(rv2i(rv).([]uintptr), e) + } else { + fastpathTV.EncSliceUintptrV(rv2i(rv).([]uintptr), e) + } +} +func (_ fastpathT) EncSliceUintptrV(v []uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + if esep { + for _, v2 := range v { + ee.WriteArrayElem() + e.encode(v2) + } + } else { + for _, v2 := range v { + e.encode(v2) + } + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceUintptrV(v []uintptr, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + if esep { + for j, v2 := range v { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + e.encode(v2) + } + } else { + for _, v2 := range v { + e.encode(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceIntR(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceIntV(rv2i(rv).([]int), e) + } else { + fastpathTV.EncSliceIntV(rv2i(rv).([]int), e) + } +} +func (_ fastpathT) EncSliceIntV(v []int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + if esep { + for _, v2 := range v { + ee.WriteArrayElem() + ee.EncodeInt(int64(v2)) + } + } else { + for _, v2 := range v { + ee.EncodeInt(int64(v2)) + } + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceIntV(v []int, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + if esep { + for j, v2 := range v { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } else { + for _, v2 := range v { + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceInt8R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceInt8V(rv2i(rv).([]int8), e) + } else { + fastpathTV.EncSliceInt8V(rv2i(rv).([]int8), e) + } +} +func (_ fastpathT) EncSliceInt8V(v []int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + if esep { + for _, v2 := range v { + ee.WriteArrayElem() + ee.EncodeInt(int64(v2)) + } + } else { + for _, v2 := range v { + ee.EncodeInt(int64(v2)) + } + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceInt8V(v []int8, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + if esep { + for j, v2 := range v { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } else { + for _, v2 := range v { + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceInt16R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceInt16V(rv2i(rv).([]int16), e) + } else { + fastpathTV.EncSliceInt16V(rv2i(rv).([]int16), e) + } +} +func (_ fastpathT) EncSliceInt16V(v []int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + if esep { + for _, v2 := range v { + ee.WriteArrayElem() + ee.EncodeInt(int64(v2)) + } + } else { + for _, v2 := range v { + ee.EncodeInt(int64(v2)) + } + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceInt16V(v []int16, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + if esep { + for j, v2 := range v { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } else { + for _, v2 := range v { + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceInt32R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceInt32V(rv2i(rv).([]int32), e) + } else { + fastpathTV.EncSliceInt32V(rv2i(rv).([]int32), e) + } +} +func (_ fastpathT) EncSliceInt32V(v []int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + if esep { + for _, v2 := range v { + ee.WriteArrayElem() + ee.EncodeInt(int64(v2)) + } + } else { + for _, v2 := range v { + ee.EncodeInt(int64(v2)) + } + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceInt32V(v []int32, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + if esep { + for j, v2 := range v { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } else { + for _, v2 := range v { + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceInt64R(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceInt64V(rv2i(rv).([]int64), e) + } else { + fastpathTV.EncSliceInt64V(rv2i(rv).([]int64), e) + } +} +func (_ fastpathT) EncSliceInt64V(v []int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + if esep { + for _, v2 := range v { + ee.WriteArrayElem() + ee.EncodeInt(int64(v2)) + } + } else { + for _, v2 := range v { + ee.EncodeInt(int64(v2)) + } + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceInt64V(v []int64, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + if esep { + for j, v2 := range v { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + ee.EncodeInt(int64(v2)) + } + } else { + for _, v2 := range v { + ee.EncodeInt(int64(v2)) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncSliceBoolR(f *codecFnInfo, rv reflect.Value) { + if f.ti.mbs { + fastpathTV.EncAsMapSliceBoolV(rv2i(rv).([]bool), e) + } else { + fastpathTV.EncSliceBoolV(rv2i(rv).([]bool), e) + } +} +func (_ fastpathT) EncSliceBoolV(v []bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteArrayStart(len(v)) + if esep { + for _, v2 := range v { + ee.WriteArrayElem() + ee.EncodeBool(v2) + } + } else { + for _, v2 := range v { + ee.EncodeBool(v2) + } + } + ee.WriteArrayEnd() +} +func (_ fastpathT) EncAsMapSliceBoolV(v []bool, e *Encoder) { + ee, esep := e.e, e.hh.hasElemSeparators() + if len(v)%2 == 1 { + e.errorf("mapBySlice requires even slice length, but got %v", len(v)) + return + } + ee.WriteMapStart(len(v) / 2) + if esep { + for j, v2 := range v { + if j%2 == 0 { + ee.WriteMapElemKey() + } else { + ee.WriteMapElemValue() + } + ee.EncodeBool(v2) + } + } else { + for _, v2 := range v { + ee.EncodeBool(v2) + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfIntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfIntfV(rv2i(rv).(map[interface{}]interface{}), e) +} +func (_ fastpathT) EncMapIntfIntfV(v map[interface{}]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + if esep { + for j := range v2 { + ee.WriteMapElemKey() + e.asis(v2[j].v) + ee.WriteMapElemValue() + e.encode(v[v2[j].i]) + } + } else { + for j := range v2 { + e.asis(v2[j].v) + e.encode(v[v2[j].i]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + e.encode(k2) + ee.WriteMapElemValue() + e.encode(v2) + } + } else { + for k2, v2 := range v { + e.encode(k2) + e.encode(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfStringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfStringV(rv2i(rv).(map[interface{}]string), e) +} +func (_ fastpathT) EncMapIntfStringV(v map[interface{}]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + if esep { + for j := range v2 { + ee.WriteMapElemKey() + e.asis(v2[j].v) + ee.WriteMapElemValue() + e.encode(v[v2[j].i]) + } + } else { + for j := range v2 { + e.asis(v2[j].v) + e.encode(v[v2[j].i]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + e.encode(k2) + ee.WriteMapElemValue() + ee.EncodeString(cUTF8, v2) + } + } else { + for k2, v2 := range v { + e.encode(k2) + ee.EncodeString(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfUintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfUintV(rv2i(rv).(map[interface{}]uint), e) +} +func (_ fastpathT) EncMapIntfUintV(v map[interface{}]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + if esep { + for j := range v2 { + ee.WriteMapElemKey() + e.asis(v2[j].v) + ee.WriteMapElemValue() + e.encode(v[v2[j].i]) + } + } else { + for j := range v2 { + e.asis(v2[j].v) + e.encode(v[v2[j].i]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + e.encode(k2) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + e.encode(k2) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfUint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfUint8V(rv2i(rv).(map[interface{}]uint8), e) +} +func (_ fastpathT) EncMapIntfUint8V(v map[interface{}]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + if esep { + for j := range v2 { + ee.WriteMapElemKey() + e.asis(v2[j].v) + ee.WriteMapElemValue() + e.encode(v[v2[j].i]) + } + } else { + for j := range v2 { + e.asis(v2[j].v) + e.encode(v[v2[j].i]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + e.encode(k2) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + e.encode(k2) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfUint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfUint16V(rv2i(rv).(map[interface{}]uint16), e) +} +func (_ fastpathT) EncMapIntfUint16V(v map[interface{}]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + if esep { + for j := range v2 { + ee.WriteMapElemKey() + e.asis(v2[j].v) + ee.WriteMapElemValue() + e.encode(v[v2[j].i]) + } + } else { + for j := range v2 { + e.asis(v2[j].v) + e.encode(v[v2[j].i]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + e.encode(k2) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + e.encode(k2) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfUint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfUint32V(rv2i(rv).(map[interface{}]uint32), e) +} +func (_ fastpathT) EncMapIntfUint32V(v map[interface{}]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + if esep { + for j := range v2 { + ee.WriteMapElemKey() + e.asis(v2[j].v) + ee.WriteMapElemValue() + e.encode(v[v2[j].i]) + } + } else { + for j := range v2 { + e.asis(v2[j].v) + e.encode(v[v2[j].i]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + e.encode(k2) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + e.encode(k2) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfUint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfUint64V(rv2i(rv).(map[interface{}]uint64), e) +} +func (_ fastpathT) EncMapIntfUint64V(v map[interface{}]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + if esep { + for j := range v2 { + ee.WriteMapElemKey() + e.asis(v2[j].v) + ee.WriteMapElemValue() + e.encode(v[v2[j].i]) + } + } else { + for j := range v2 { + e.asis(v2[j].v) + e.encode(v[v2[j].i]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + e.encode(k2) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + e.encode(k2) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfUintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfUintptrV(rv2i(rv).(map[interface{}]uintptr), e) +} +func (_ fastpathT) EncMapIntfUintptrV(v map[interface{}]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + if esep { + for j := range v2 { + ee.WriteMapElemKey() + e.asis(v2[j].v) + ee.WriteMapElemValue() + e.encode(v[v2[j].i]) + } + } else { + for j := range v2 { + e.asis(v2[j].v) + e.encode(v[v2[j].i]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + e.encode(k2) + ee.WriteMapElemValue() + e.encode(v2) + } + } else { + for k2, v2 := range v { + e.encode(k2) + e.encode(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfIntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfIntV(rv2i(rv).(map[interface{}]int), e) +} +func (_ fastpathT) EncMapIntfIntV(v map[interface{}]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + if esep { + for j := range v2 { + ee.WriteMapElemKey() + e.asis(v2[j].v) + ee.WriteMapElemValue() + e.encode(v[v2[j].i]) + } + } else { + for j := range v2 { + e.asis(v2[j].v) + e.encode(v[v2[j].i]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + e.encode(k2) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + e.encode(k2) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfInt8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfInt8V(rv2i(rv).(map[interface{}]int8), e) +} +func (_ fastpathT) EncMapIntfInt8V(v map[interface{}]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + if esep { + for j := range v2 { + ee.WriteMapElemKey() + e.asis(v2[j].v) + ee.WriteMapElemValue() + e.encode(v[v2[j].i]) + } + } else { + for j := range v2 { + e.asis(v2[j].v) + e.encode(v[v2[j].i]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + e.encode(k2) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + e.encode(k2) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfInt16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfInt16V(rv2i(rv).(map[interface{}]int16), e) +} +func (_ fastpathT) EncMapIntfInt16V(v map[interface{}]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + if esep { + for j := range v2 { + ee.WriteMapElemKey() + e.asis(v2[j].v) + ee.WriteMapElemValue() + e.encode(v[v2[j].i]) + } + } else { + for j := range v2 { + e.asis(v2[j].v) + e.encode(v[v2[j].i]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + e.encode(k2) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + e.encode(k2) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfInt32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfInt32V(rv2i(rv).(map[interface{}]int32), e) +} +func (_ fastpathT) EncMapIntfInt32V(v map[interface{}]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + if esep { + for j := range v2 { + ee.WriteMapElemKey() + e.asis(v2[j].v) + ee.WriteMapElemValue() + e.encode(v[v2[j].i]) + } + } else { + for j := range v2 { + e.asis(v2[j].v) + e.encode(v[v2[j].i]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + e.encode(k2) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + e.encode(k2) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfInt64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfInt64V(rv2i(rv).(map[interface{}]int64), e) +} +func (_ fastpathT) EncMapIntfInt64V(v map[interface{}]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + if esep { + for j := range v2 { + ee.WriteMapElemKey() + e.asis(v2[j].v) + ee.WriteMapElemValue() + e.encode(v[v2[j].i]) + } + } else { + for j := range v2 { + e.asis(v2[j].v) + e.encode(v[v2[j].i]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + e.encode(k2) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + e.encode(k2) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfFloat32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfFloat32V(rv2i(rv).(map[interface{}]float32), e) +} +func (_ fastpathT) EncMapIntfFloat32V(v map[interface{}]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + if esep { + for j := range v2 { + ee.WriteMapElemKey() + e.asis(v2[j].v) + ee.WriteMapElemValue() + e.encode(v[v2[j].i]) + } + } else { + for j := range v2 { + e.asis(v2[j].v) + e.encode(v[v2[j].i]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + e.encode(k2) + ee.WriteMapElemValue() + ee.EncodeFloat32(v2) + } + } else { + for k2, v2 := range v { + e.encode(k2) + ee.EncodeFloat32(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfFloat64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfFloat64V(rv2i(rv).(map[interface{}]float64), e) +} +func (_ fastpathT) EncMapIntfFloat64V(v map[interface{}]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + if esep { + for j := range v2 { + ee.WriteMapElemKey() + e.asis(v2[j].v) + ee.WriteMapElemValue() + e.encode(v[v2[j].i]) + } + } else { + for j := range v2 { + e.asis(v2[j].v) + e.encode(v[v2[j].i]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + e.encode(k2) + ee.WriteMapElemValue() + ee.EncodeFloat64(v2) + } + } else { + for k2, v2 := range v { + e.encode(k2) + ee.EncodeFloat64(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntfBoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntfBoolV(rv2i(rv).(map[interface{}]bool), e) +} +func (_ fastpathT) EncMapIntfBoolV(v map[interface{}]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + var mksv []byte = make([]byte, 0, len(v)*16) // temporary byte slice for the encoding + e2 := NewEncoderBytes(&mksv, e.hh) + v2 := make([]bytesI, len(v)) + var i, l int + var vp *bytesI + for k2, _ := range v { + l = len(mksv) + e2.MustEncode(k2) + vp = &v2[i] + vp.v = mksv[l:] + vp.i = k2 + i++ + } + sort.Sort(bytesISlice(v2)) + if esep { + for j := range v2 { + ee.WriteMapElemKey() + e.asis(v2[j].v) + ee.WriteMapElemValue() + e.encode(v[v2[j].i]) + } + } else { + for j := range v2 { + e.asis(v2[j].v) + e.encode(v[v2[j].i]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + e.encode(k2) + ee.WriteMapElemValue() + ee.EncodeBool(v2) + } + } else { + for k2, v2 := range v { + e.encode(k2) + ee.EncodeBool(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringIntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringIntfV(rv2i(rv).(map[string]interface{}), e) +} +func (_ fastpathT) EncMapStringIntfV(v map[string]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeString(cUTF8, k2) + ee.WriteMapElemValue() + e.encode(v[string(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeString(cUTF8, k2) + e.encode(v[string(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeString(cUTF8, k2) + ee.WriteMapElemValue() + e.encode(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeString(cUTF8, k2) + e.encode(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringStringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringStringV(rv2i(rv).(map[string]string), e) +} +func (_ fastpathT) EncMapStringStringV(v map[string]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeString(cUTF8, k2) + ee.WriteMapElemValue() + ee.EncodeString(cUTF8, v[string(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeString(cUTF8, k2) + ee.EncodeString(cUTF8, v[string(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeString(cUTF8, k2) + ee.WriteMapElemValue() + ee.EncodeString(cUTF8, v2) + } + } else { + for k2, v2 := range v { + ee.EncodeString(cUTF8, k2) + ee.EncodeString(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringUintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringUintV(rv2i(rv).(map[string]uint), e) +} +func (_ fastpathT) EncMapStringUintV(v map[string]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeString(cUTF8, k2) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeString(cUTF8, k2) + ee.EncodeUint(uint64(v[string(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeString(cUTF8, k2) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeString(cUTF8, k2) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringUint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringUint8V(rv2i(rv).(map[string]uint8), e) +} +func (_ fastpathT) EncMapStringUint8V(v map[string]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeString(cUTF8, k2) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeString(cUTF8, k2) + ee.EncodeUint(uint64(v[string(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeString(cUTF8, k2) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeString(cUTF8, k2) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringUint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringUint16V(rv2i(rv).(map[string]uint16), e) +} +func (_ fastpathT) EncMapStringUint16V(v map[string]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeString(cUTF8, k2) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeString(cUTF8, k2) + ee.EncodeUint(uint64(v[string(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeString(cUTF8, k2) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeString(cUTF8, k2) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringUint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringUint32V(rv2i(rv).(map[string]uint32), e) +} +func (_ fastpathT) EncMapStringUint32V(v map[string]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeString(cUTF8, k2) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeString(cUTF8, k2) + ee.EncodeUint(uint64(v[string(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeString(cUTF8, k2) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeString(cUTF8, k2) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringUint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringUint64V(rv2i(rv).(map[string]uint64), e) +} +func (_ fastpathT) EncMapStringUint64V(v map[string]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeString(cUTF8, k2) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[string(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeString(cUTF8, k2) + ee.EncodeUint(uint64(v[string(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeString(cUTF8, k2) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeString(cUTF8, k2) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringUintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringUintptrV(rv2i(rv).(map[string]uintptr), e) +} +func (_ fastpathT) EncMapStringUintptrV(v map[string]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeString(cUTF8, k2) + ee.WriteMapElemValue() + e.encode(v[string(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeString(cUTF8, k2) + e.encode(v[string(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeString(cUTF8, k2) + ee.WriteMapElemValue() + e.encode(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeString(cUTF8, k2) + e.encode(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringIntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringIntV(rv2i(rv).(map[string]int), e) +} +func (_ fastpathT) EncMapStringIntV(v map[string]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeString(cUTF8, k2) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeString(cUTF8, k2) + ee.EncodeInt(int64(v[string(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeString(cUTF8, k2) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeString(cUTF8, k2) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringInt8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringInt8V(rv2i(rv).(map[string]int8), e) +} +func (_ fastpathT) EncMapStringInt8V(v map[string]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeString(cUTF8, k2) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeString(cUTF8, k2) + ee.EncodeInt(int64(v[string(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeString(cUTF8, k2) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeString(cUTF8, k2) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringInt16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringInt16V(rv2i(rv).(map[string]int16), e) +} +func (_ fastpathT) EncMapStringInt16V(v map[string]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeString(cUTF8, k2) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeString(cUTF8, k2) + ee.EncodeInt(int64(v[string(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeString(cUTF8, k2) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeString(cUTF8, k2) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringInt32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringInt32V(rv2i(rv).(map[string]int32), e) +} +func (_ fastpathT) EncMapStringInt32V(v map[string]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeString(cUTF8, k2) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeString(cUTF8, k2) + ee.EncodeInt(int64(v[string(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeString(cUTF8, k2) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeString(cUTF8, k2) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringInt64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringInt64V(rv2i(rv).(map[string]int64), e) +} +func (_ fastpathT) EncMapStringInt64V(v map[string]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeString(cUTF8, k2) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[string(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeString(cUTF8, k2) + ee.EncodeInt(int64(v[string(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeString(cUTF8, k2) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeString(cUTF8, k2) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringFloat32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringFloat32V(rv2i(rv).(map[string]float32), e) +} +func (_ fastpathT) EncMapStringFloat32V(v map[string]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeString(cUTF8, k2) + ee.WriteMapElemValue() + ee.EncodeFloat32(v[string(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeString(cUTF8, k2) + ee.EncodeFloat32(v[string(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeString(cUTF8, k2) + ee.WriteMapElemValue() + ee.EncodeFloat32(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeString(cUTF8, k2) + ee.EncodeFloat32(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringFloat64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringFloat64V(rv2i(rv).(map[string]float64), e) +} +func (_ fastpathT) EncMapStringFloat64V(v map[string]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeString(cUTF8, k2) + ee.WriteMapElemValue() + ee.EncodeFloat64(v[string(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeString(cUTF8, k2) + ee.EncodeFloat64(v[string(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeString(cUTF8, k2) + ee.WriteMapElemValue() + ee.EncodeFloat64(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeString(cUTF8, k2) + ee.EncodeFloat64(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapStringBoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapStringBoolV(rv2i(rv).(map[string]bool), e) +} +func (_ fastpathT) EncMapStringBoolV(v map[string]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]string, len(v)) + var i int + for k, _ := range v { + v2[i] = string(k) + i++ + } + sort.Sort(stringSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeString(cUTF8, k2) + ee.WriteMapElemValue() + ee.EncodeBool(v[string(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeString(cUTF8, k2) + ee.EncodeBool(v[string(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeString(cUTF8, k2) + ee.WriteMapElemValue() + ee.EncodeBool(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeString(cUTF8, k2) + ee.EncodeBool(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32IntfV(rv2i(rv).(map[float32]interface{}), e) +} +func (_ fastpathT) EncMapFloat32IntfV(v map[float32]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeFloat32(float32(k2)) + ee.WriteMapElemValue() + e.encode(v[float32(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeFloat32(float32(k2)) + e.encode(v[float32(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeFloat32(k2) + ee.WriteMapElemValue() + e.encode(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeFloat32(k2) + e.encode(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32StringV(rv2i(rv).(map[float32]string), e) +} +func (_ fastpathT) EncMapFloat32StringV(v map[float32]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeFloat32(float32(k2)) + ee.WriteMapElemValue() + ee.EncodeString(cUTF8, v[float32(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeFloat32(float32(k2)) + ee.EncodeString(cUTF8, v[float32(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeFloat32(k2) + ee.WriteMapElemValue() + ee.EncodeString(cUTF8, v2) + } + } else { + for k2, v2 := range v { + ee.EncodeFloat32(k2) + ee.EncodeString(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32UintV(rv2i(rv).(map[float32]uint), e) +} +func (_ fastpathT) EncMapFloat32UintV(v map[float32]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeFloat32(float32(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeFloat32(float32(k2)) + ee.EncodeUint(uint64(v[float32(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeFloat32(k2) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeFloat32(k2) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Uint8V(rv2i(rv).(map[float32]uint8), e) +} +func (_ fastpathT) EncMapFloat32Uint8V(v map[float32]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeFloat32(float32(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeFloat32(float32(k2)) + ee.EncodeUint(uint64(v[float32(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeFloat32(k2) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeFloat32(k2) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Uint16V(rv2i(rv).(map[float32]uint16), e) +} +func (_ fastpathT) EncMapFloat32Uint16V(v map[float32]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeFloat32(float32(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeFloat32(float32(k2)) + ee.EncodeUint(uint64(v[float32(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeFloat32(k2) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeFloat32(k2) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Uint32V(rv2i(rv).(map[float32]uint32), e) +} +func (_ fastpathT) EncMapFloat32Uint32V(v map[float32]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeFloat32(float32(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeFloat32(float32(k2)) + ee.EncodeUint(uint64(v[float32(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeFloat32(k2) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeFloat32(k2) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Uint64V(rv2i(rv).(map[float32]uint64), e) +} +func (_ fastpathT) EncMapFloat32Uint64V(v map[float32]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeFloat32(float32(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[float32(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeFloat32(float32(k2)) + ee.EncodeUint(uint64(v[float32(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeFloat32(k2) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeFloat32(k2) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32UintptrV(rv2i(rv).(map[float32]uintptr), e) +} +func (_ fastpathT) EncMapFloat32UintptrV(v map[float32]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeFloat32(float32(k2)) + ee.WriteMapElemValue() + e.encode(v[float32(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeFloat32(float32(k2)) + e.encode(v[float32(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeFloat32(k2) + ee.WriteMapElemValue() + e.encode(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeFloat32(k2) + e.encode(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32IntV(rv2i(rv).(map[float32]int), e) +} +func (_ fastpathT) EncMapFloat32IntV(v map[float32]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeFloat32(float32(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeFloat32(float32(k2)) + ee.EncodeInt(int64(v[float32(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeFloat32(k2) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeFloat32(k2) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Int8V(rv2i(rv).(map[float32]int8), e) +} +func (_ fastpathT) EncMapFloat32Int8V(v map[float32]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeFloat32(float32(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeFloat32(float32(k2)) + ee.EncodeInt(int64(v[float32(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeFloat32(k2) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeFloat32(k2) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Int16V(rv2i(rv).(map[float32]int16), e) +} +func (_ fastpathT) EncMapFloat32Int16V(v map[float32]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeFloat32(float32(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeFloat32(float32(k2)) + ee.EncodeInt(int64(v[float32(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeFloat32(k2) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeFloat32(k2) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Int32V(rv2i(rv).(map[float32]int32), e) +} +func (_ fastpathT) EncMapFloat32Int32V(v map[float32]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeFloat32(float32(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeFloat32(float32(k2)) + ee.EncodeInt(int64(v[float32(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeFloat32(k2) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeFloat32(k2) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Int64V(rv2i(rv).(map[float32]int64), e) +} +func (_ fastpathT) EncMapFloat32Int64V(v map[float32]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeFloat32(float32(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[float32(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeFloat32(float32(k2)) + ee.EncodeInt(int64(v[float32(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeFloat32(k2) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeFloat32(k2) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Float32V(rv2i(rv).(map[float32]float32), e) +} +func (_ fastpathT) EncMapFloat32Float32V(v map[float32]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeFloat32(float32(k2)) + ee.WriteMapElemValue() + ee.EncodeFloat32(v[float32(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeFloat32(float32(k2)) + ee.EncodeFloat32(v[float32(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeFloat32(k2) + ee.WriteMapElemValue() + ee.EncodeFloat32(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeFloat32(k2) + ee.EncodeFloat32(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32Float64V(rv2i(rv).(map[float32]float64), e) +} +func (_ fastpathT) EncMapFloat32Float64V(v map[float32]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeFloat32(float32(k2)) + ee.WriteMapElemValue() + ee.EncodeFloat64(v[float32(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeFloat32(float32(k2)) + ee.EncodeFloat64(v[float32(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeFloat32(k2) + ee.WriteMapElemValue() + ee.EncodeFloat64(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeFloat32(k2) + ee.EncodeFloat64(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat32BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat32BoolV(rv2i(rv).(map[float32]bool), e) +} +func (_ fastpathT) EncMapFloat32BoolV(v map[float32]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeFloat32(float32(k2)) + ee.WriteMapElemValue() + ee.EncodeBool(v[float32(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeFloat32(float32(k2)) + ee.EncodeBool(v[float32(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeFloat32(k2) + ee.WriteMapElemValue() + ee.EncodeBool(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeFloat32(k2) + ee.EncodeBool(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64IntfV(rv2i(rv).(map[float64]interface{}), e) +} +func (_ fastpathT) EncMapFloat64IntfV(v map[float64]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeFloat64(float64(k2)) + ee.WriteMapElemValue() + e.encode(v[float64(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeFloat64(float64(k2)) + e.encode(v[float64(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeFloat64(k2) + ee.WriteMapElemValue() + e.encode(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeFloat64(k2) + e.encode(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64StringV(rv2i(rv).(map[float64]string), e) +} +func (_ fastpathT) EncMapFloat64StringV(v map[float64]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeFloat64(float64(k2)) + ee.WriteMapElemValue() + ee.EncodeString(cUTF8, v[float64(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeFloat64(float64(k2)) + ee.EncodeString(cUTF8, v[float64(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeFloat64(k2) + ee.WriteMapElemValue() + ee.EncodeString(cUTF8, v2) + } + } else { + for k2, v2 := range v { + ee.EncodeFloat64(k2) + ee.EncodeString(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64UintV(rv2i(rv).(map[float64]uint), e) +} +func (_ fastpathT) EncMapFloat64UintV(v map[float64]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeFloat64(float64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeFloat64(float64(k2)) + ee.EncodeUint(uint64(v[float64(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeFloat64(k2) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeFloat64(k2) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Uint8V(rv2i(rv).(map[float64]uint8), e) +} +func (_ fastpathT) EncMapFloat64Uint8V(v map[float64]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeFloat64(float64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeFloat64(float64(k2)) + ee.EncodeUint(uint64(v[float64(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeFloat64(k2) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeFloat64(k2) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Uint16V(rv2i(rv).(map[float64]uint16), e) +} +func (_ fastpathT) EncMapFloat64Uint16V(v map[float64]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeFloat64(float64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeFloat64(float64(k2)) + ee.EncodeUint(uint64(v[float64(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeFloat64(k2) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeFloat64(k2) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Uint32V(rv2i(rv).(map[float64]uint32), e) +} +func (_ fastpathT) EncMapFloat64Uint32V(v map[float64]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeFloat64(float64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeFloat64(float64(k2)) + ee.EncodeUint(uint64(v[float64(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeFloat64(k2) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeFloat64(k2) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Uint64V(rv2i(rv).(map[float64]uint64), e) +} +func (_ fastpathT) EncMapFloat64Uint64V(v map[float64]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeFloat64(float64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[float64(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeFloat64(float64(k2)) + ee.EncodeUint(uint64(v[float64(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeFloat64(k2) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeFloat64(k2) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64UintptrV(rv2i(rv).(map[float64]uintptr), e) +} +func (_ fastpathT) EncMapFloat64UintptrV(v map[float64]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeFloat64(float64(k2)) + ee.WriteMapElemValue() + e.encode(v[float64(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeFloat64(float64(k2)) + e.encode(v[float64(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeFloat64(k2) + ee.WriteMapElemValue() + e.encode(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeFloat64(k2) + e.encode(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64IntV(rv2i(rv).(map[float64]int), e) +} +func (_ fastpathT) EncMapFloat64IntV(v map[float64]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeFloat64(float64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeFloat64(float64(k2)) + ee.EncodeInt(int64(v[float64(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeFloat64(k2) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeFloat64(k2) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Int8V(rv2i(rv).(map[float64]int8), e) +} +func (_ fastpathT) EncMapFloat64Int8V(v map[float64]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeFloat64(float64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeFloat64(float64(k2)) + ee.EncodeInt(int64(v[float64(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeFloat64(k2) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeFloat64(k2) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Int16V(rv2i(rv).(map[float64]int16), e) +} +func (_ fastpathT) EncMapFloat64Int16V(v map[float64]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeFloat64(float64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeFloat64(float64(k2)) + ee.EncodeInt(int64(v[float64(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeFloat64(k2) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeFloat64(k2) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Int32V(rv2i(rv).(map[float64]int32), e) +} +func (_ fastpathT) EncMapFloat64Int32V(v map[float64]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeFloat64(float64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeFloat64(float64(k2)) + ee.EncodeInt(int64(v[float64(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeFloat64(k2) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeFloat64(k2) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Int64V(rv2i(rv).(map[float64]int64), e) +} +func (_ fastpathT) EncMapFloat64Int64V(v map[float64]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeFloat64(float64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[float64(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeFloat64(float64(k2)) + ee.EncodeInt(int64(v[float64(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeFloat64(k2) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeFloat64(k2) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Float32V(rv2i(rv).(map[float64]float32), e) +} +func (_ fastpathT) EncMapFloat64Float32V(v map[float64]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeFloat64(float64(k2)) + ee.WriteMapElemValue() + ee.EncodeFloat32(v[float64(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeFloat64(float64(k2)) + ee.EncodeFloat32(v[float64(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeFloat64(k2) + ee.WriteMapElemValue() + ee.EncodeFloat32(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeFloat64(k2) + ee.EncodeFloat32(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64Float64V(rv2i(rv).(map[float64]float64), e) +} +func (_ fastpathT) EncMapFloat64Float64V(v map[float64]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeFloat64(float64(k2)) + ee.WriteMapElemValue() + ee.EncodeFloat64(v[float64(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeFloat64(float64(k2)) + ee.EncodeFloat64(v[float64(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeFloat64(k2) + ee.WriteMapElemValue() + ee.EncodeFloat64(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeFloat64(k2) + ee.EncodeFloat64(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapFloat64BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapFloat64BoolV(rv2i(rv).(map[float64]bool), e) +} +func (_ fastpathT) EncMapFloat64BoolV(v map[float64]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]float64, len(v)) + var i int + for k, _ := range v { + v2[i] = float64(k) + i++ + } + sort.Sort(floatSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeFloat64(float64(k2)) + ee.WriteMapElemValue() + ee.EncodeBool(v[float64(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeFloat64(float64(k2)) + ee.EncodeBool(v[float64(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeFloat64(k2) + ee.WriteMapElemValue() + ee.EncodeBool(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeFloat64(k2) + ee.EncodeBool(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintIntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintIntfV(rv2i(rv).(map[uint]interface{}), e) +} +func (_ fastpathT) EncMapUintIntfV(v map[uint]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint(k2))) + ee.WriteMapElemValue() + e.encode(v[uint(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint(k2))) + e.encode(v[uint(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + e.encode(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + e.encode(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintStringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintStringV(rv2i(rv).(map[uint]string), e) +} +func (_ fastpathT) EncMapUintStringV(v map[uint]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint(k2))) + ee.WriteMapElemValue() + ee.EncodeString(cUTF8, v[uint(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint(k2))) + ee.EncodeString(cUTF8, v[uint(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeString(cUTF8, v2) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeString(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintUintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintUintV(rv2i(rv).(map[uint]uint), e) +} +func (_ fastpathT) EncMapUintUintV(v map[uint]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint(k2))) + ee.EncodeUint(uint64(v[uint(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintUint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintUint8V(rv2i(rv).(map[uint]uint8), e) +} +func (_ fastpathT) EncMapUintUint8V(v map[uint]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint(k2))) + ee.EncodeUint(uint64(v[uint(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintUint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintUint16V(rv2i(rv).(map[uint]uint16), e) +} +func (_ fastpathT) EncMapUintUint16V(v map[uint]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint(k2))) + ee.EncodeUint(uint64(v[uint(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintUint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintUint32V(rv2i(rv).(map[uint]uint32), e) +} +func (_ fastpathT) EncMapUintUint32V(v map[uint]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint(k2))) + ee.EncodeUint(uint64(v[uint(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintUint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintUint64V(rv2i(rv).(map[uint]uint64), e) +} +func (_ fastpathT) EncMapUintUint64V(v map[uint]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[uint(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint(k2))) + ee.EncodeUint(uint64(v[uint(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintUintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintUintptrV(rv2i(rv).(map[uint]uintptr), e) +} +func (_ fastpathT) EncMapUintUintptrV(v map[uint]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint(k2))) + ee.WriteMapElemValue() + e.encode(v[uint(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint(k2))) + e.encode(v[uint(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + e.encode(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + e.encode(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintIntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintIntV(rv2i(rv).(map[uint]int), e) +} +func (_ fastpathT) EncMapUintIntV(v map[uint]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint(k2))) + ee.EncodeInt(int64(v[uint(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintInt8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintInt8V(rv2i(rv).(map[uint]int8), e) +} +func (_ fastpathT) EncMapUintInt8V(v map[uint]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint(k2))) + ee.EncodeInt(int64(v[uint(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintInt16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintInt16V(rv2i(rv).(map[uint]int16), e) +} +func (_ fastpathT) EncMapUintInt16V(v map[uint]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint(k2))) + ee.EncodeInt(int64(v[uint(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintInt32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintInt32V(rv2i(rv).(map[uint]int32), e) +} +func (_ fastpathT) EncMapUintInt32V(v map[uint]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint(k2))) + ee.EncodeInt(int64(v[uint(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintInt64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintInt64V(rv2i(rv).(map[uint]int64), e) +} +func (_ fastpathT) EncMapUintInt64V(v map[uint]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[uint(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint(k2))) + ee.EncodeInt(int64(v[uint(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintFloat32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintFloat32V(rv2i(rv).(map[uint]float32), e) +} +func (_ fastpathT) EncMapUintFloat32V(v map[uint]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint(k2))) + ee.WriteMapElemValue() + ee.EncodeFloat32(v[uint(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint(k2))) + ee.EncodeFloat32(v[uint(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeFloat32(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeFloat32(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintFloat64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintFloat64V(rv2i(rv).(map[uint]float64), e) +} +func (_ fastpathT) EncMapUintFloat64V(v map[uint]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint(k2))) + ee.WriteMapElemValue() + ee.EncodeFloat64(v[uint(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint(k2))) + ee.EncodeFloat64(v[uint(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeFloat64(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeFloat64(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintBoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintBoolV(rv2i(rv).(map[uint]bool), e) +} +func (_ fastpathT) EncMapUintBoolV(v map[uint]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint(k2))) + ee.WriteMapElemValue() + ee.EncodeBool(v[uint(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint(k2))) + ee.EncodeBool(v[uint(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeBool(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeBool(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8IntfV(rv2i(rv).(map[uint8]interface{}), e) +} +func (_ fastpathT) EncMapUint8IntfV(v map[uint8]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint8(k2))) + ee.WriteMapElemValue() + e.encode(v[uint8(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint8(k2))) + e.encode(v[uint8(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + e.encode(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + e.encode(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8StringV(rv2i(rv).(map[uint8]string), e) +} +func (_ fastpathT) EncMapUint8StringV(v map[uint8]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint8(k2))) + ee.WriteMapElemValue() + ee.EncodeString(cUTF8, v[uint8(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint8(k2))) + ee.EncodeString(cUTF8, v[uint8(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeString(cUTF8, v2) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeString(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8UintV(rv2i(rv).(map[uint8]uint), e) +} +func (_ fastpathT) EncMapUint8UintV(v map[uint8]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint8(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint8(k2))) + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Uint8V(rv2i(rv).(map[uint8]uint8), e) +} +func (_ fastpathT) EncMapUint8Uint8V(v map[uint8]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint8(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint8(k2))) + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Uint16V(rv2i(rv).(map[uint8]uint16), e) +} +func (_ fastpathT) EncMapUint8Uint16V(v map[uint8]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint8(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint8(k2))) + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Uint32V(rv2i(rv).(map[uint8]uint32), e) +} +func (_ fastpathT) EncMapUint8Uint32V(v map[uint8]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint8(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint8(k2))) + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Uint64V(rv2i(rv).(map[uint8]uint64), e) +} +func (_ fastpathT) EncMapUint8Uint64V(v map[uint8]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint8(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint8(k2))) + ee.EncodeUint(uint64(v[uint8(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8UintptrV(rv2i(rv).(map[uint8]uintptr), e) +} +func (_ fastpathT) EncMapUint8UintptrV(v map[uint8]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint8(k2))) + ee.WriteMapElemValue() + e.encode(v[uint8(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint8(k2))) + e.encode(v[uint8(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + e.encode(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + e.encode(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8IntV(rv2i(rv).(map[uint8]int), e) +} +func (_ fastpathT) EncMapUint8IntV(v map[uint8]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint8(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint8(k2))) + ee.EncodeInt(int64(v[uint8(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Int8V(rv2i(rv).(map[uint8]int8), e) +} +func (_ fastpathT) EncMapUint8Int8V(v map[uint8]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint8(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint8(k2))) + ee.EncodeInt(int64(v[uint8(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Int16V(rv2i(rv).(map[uint8]int16), e) +} +func (_ fastpathT) EncMapUint8Int16V(v map[uint8]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint8(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint8(k2))) + ee.EncodeInt(int64(v[uint8(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Int32V(rv2i(rv).(map[uint8]int32), e) +} +func (_ fastpathT) EncMapUint8Int32V(v map[uint8]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint8(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint8(k2))) + ee.EncodeInt(int64(v[uint8(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Int64V(rv2i(rv).(map[uint8]int64), e) +} +func (_ fastpathT) EncMapUint8Int64V(v map[uint8]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint8(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[uint8(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint8(k2))) + ee.EncodeInt(int64(v[uint8(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Float32V(rv2i(rv).(map[uint8]float32), e) +} +func (_ fastpathT) EncMapUint8Float32V(v map[uint8]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint8(k2))) + ee.WriteMapElemValue() + ee.EncodeFloat32(v[uint8(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint8(k2))) + ee.EncodeFloat32(v[uint8(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeFloat32(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeFloat32(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8Float64V(rv2i(rv).(map[uint8]float64), e) +} +func (_ fastpathT) EncMapUint8Float64V(v map[uint8]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint8(k2))) + ee.WriteMapElemValue() + ee.EncodeFloat64(v[uint8(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint8(k2))) + ee.EncodeFloat64(v[uint8(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeFloat64(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeFloat64(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint8BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint8BoolV(rv2i(rv).(map[uint8]bool), e) +} +func (_ fastpathT) EncMapUint8BoolV(v map[uint8]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint8(k2))) + ee.WriteMapElemValue() + ee.EncodeBool(v[uint8(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint8(k2))) + ee.EncodeBool(v[uint8(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeBool(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeBool(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16IntfV(rv2i(rv).(map[uint16]interface{}), e) +} +func (_ fastpathT) EncMapUint16IntfV(v map[uint16]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint16(k2))) + ee.WriteMapElemValue() + e.encode(v[uint16(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint16(k2))) + e.encode(v[uint16(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + e.encode(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + e.encode(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16StringV(rv2i(rv).(map[uint16]string), e) +} +func (_ fastpathT) EncMapUint16StringV(v map[uint16]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint16(k2))) + ee.WriteMapElemValue() + ee.EncodeString(cUTF8, v[uint16(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint16(k2))) + ee.EncodeString(cUTF8, v[uint16(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeString(cUTF8, v2) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeString(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16UintV(rv2i(rv).(map[uint16]uint), e) +} +func (_ fastpathT) EncMapUint16UintV(v map[uint16]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint16(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint16(k2))) + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Uint8V(rv2i(rv).(map[uint16]uint8), e) +} +func (_ fastpathT) EncMapUint16Uint8V(v map[uint16]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint16(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint16(k2))) + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Uint16V(rv2i(rv).(map[uint16]uint16), e) +} +func (_ fastpathT) EncMapUint16Uint16V(v map[uint16]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint16(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint16(k2))) + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Uint32V(rv2i(rv).(map[uint16]uint32), e) +} +func (_ fastpathT) EncMapUint16Uint32V(v map[uint16]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint16(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint16(k2))) + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Uint64V(rv2i(rv).(map[uint16]uint64), e) +} +func (_ fastpathT) EncMapUint16Uint64V(v map[uint16]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint16(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint16(k2))) + ee.EncodeUint(uint64(v[uint16(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16UintptrV(rv2i(rv).(map[uint16]uintptr), e) +} +func (_ fastpathT) EncMapUint16UintptrV(v map[uint16]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint16(k2))) + ee.WriteMapElemValue() + e.encode(v[uint16(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint16(k2))) + e.encode(v[uint16(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + e.encode(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + e.encode(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16IntV(rv2i(rv).(map[uint16]int), e) +} +func (_ fastpathT) EncMapUint16IntV(v map[uint16]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint16(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint16(k2))) + ee.EncodeInt(int64(v[uint16(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Int8V(rv2i(rv).(map[uint16]int8), e) +} +func (_ fastpathT) EncMapUint16Int8V(v map[uint16]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint16(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint16(k2))) + ee.EncodeInt(int64(v[uint16(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Int16V(rv2i(rv).(map[uint16]int16), e) +} +func (_ fastpathT) EncMapUint16Int16V(v map[uint16]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint16(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint16(k2))) + ee.EncodeInt(int64(v[uint16(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Int32V(rv2i(rv).(map[uint16]int32), e) +} +func (_ fastpathT) EncMapUint16Int32V(v map[uint16]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint16(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint16(k2))) + ee.EncodeInt(int64(v[uint16(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Int64V(rv2i(rv).(map[uint16]int64), e) +} +func (_ fastpathT) EncMapUint16Int64V(v map[uint16]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint16(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[uint16(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint16(k2))) + ee.EncodeInt(int64(v[uint16(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Float32V(rv2i(rv).(map[uint16]float32), e) +} +func (_ fastpathT) EncMapUint16Float32V(v map[uint16]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint16(k2))) + ee.WriteMapElemValue() + ee.EncodeFloat32(v[uint16(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint16(k2))) + ee.EncodeFloat32(v[uint16(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeFloat32(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeFloat32(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16Float64V(rv2i(rv).(map[uint16]float64), e) +} +func (_ fastpathT) EncMapUint16Float64V(v map[uint16]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint16(k2))) + ee.WriteMapElemValue() + ee.EncodeFloat64(v[uint16(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint16(k2))) + ee.EncodeFloat64(v[uint16(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeFloat64(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeFloat64(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint16BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint16BoolV(rv2i(rv).(map[uint16]bool), e) +} +func (_ fastpathT) EncMapUint16BoolV(v map[uint16]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint16(k2))) + ee.WriteMapElemValue() + ee.EncodeBool(v[uint16(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint16(k2))) + ee.EncodeBool(v[uint16(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeBool(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeBool(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32IntfV(rv2i(rv).(map[uint32]interface{}), e) +} +func (_ fastpathT) EncMapUint32IntfV(v map[uint32]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint32(k2))) + ee.WriteMapElemValue() + e.encode(v[uint32(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint32(k2))) + e.encode(v[uint32(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + e.encode(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + e.encode(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32StringV(rv2i(rv).(map[uint32]string), e) +} +func (_ fastpathT) EncMapUint32StringV(v map[uint32]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint32(k2))) + ee.WriteMapElemValue() + ee.EncodeString(cUTF8, v[uint32(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint32(k2))) + ee.EncodeString(cUTF8, v[uint32(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeString(cUTF8, v2) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeString(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32UintV(rv2i(rv).(map[uint32]uint), e) +} +func (_ fastpathT) EncMapUint32UintV(v map[uint32]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint32(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint32(k2))) + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Uint8V(rv2i(rv).(map[uint32]uint8), e) +} +func (_ fastpathT) EncMapUint32Uint8V(v map[uint32]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint32(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint32(k2))) + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Uint16V(rv2i(rv).(map[uint32]uint16), e) +} +func (_ fastpathT) EncMapUint32Uint16V(v map[uint32]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint32(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint32(k2))) + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Uint32V(rv2i(rv).(map[uint32]uint32), e) +} +func (_ fastpathT) EncMapUint32Uint32V(v map[uint32]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint32(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint32(k2))) + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Uint64V(rv2i(rv).(map[uint32]uint64), e) +} +func (_ fastpathT) EncMapUint32Uint64V(v map[uint32]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint32(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint32(k2))) + ee.EncodeUint(uint64(v[uint32(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32UintptrV(rv2i(rv).(map[uint32]uintptr), e) +} +func (_ fastpathT) EncMapUint32UintptrV(v map[uint32]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint32(k2))) + ee.WriteMapElemValue() + e.encode(v[uint32(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint32(k2))) + e.encode(v[uint32(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + e.encode(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + e.encode(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32IntV(rv2i(rv).(map[uint32]int), e) +} +func (_ fastpathT) EncMapUint32IntV(v map[uint32]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint32(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint32(k2))) + ee.EncodeInt(int64(v[uint32(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Int8V(rv2i(rv).(map[uint32]int8), e) +} +func (_ fastpathT) EncMapUint32Int8V(v map[uint32]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint32(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint32(k2))) + ee.EncodeInt(int64(v[uint32(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Int16V(rv2i(rv).(map[uint32]int16), e) +} +func (_ fastpathT) EncMapUint32Int16V(v map[uint32]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint32(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint32(k2))) + ee.EncodeInt(int64(v[uint32(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Int32V(rv2i(rv).(map[uint32]int32), e) +} +func (_ fastpathT) EncMapUint32Int32V(v map[uint32]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint32(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint32(k2))) + ee.EncodeInt(int64(v[uint32(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Int64V(rv2i(rv).(map[uint32]int64), e) +} +func (_ fastpathT) EncMapUint32Int64V(v map[uint32]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint32(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[uint32(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint32(k2))) + ee.EncodeInt(int64(v[uint32(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Float32V(rv2i(rv).(map[uint32]float32), e) +} +func (_ fastpathT) EncMapUint32Float32V(v map[uint32]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint32(k2))) + ee.WriteMapElemValue() + ee.EncodeFloat32(v[uint32(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint32(k2))) + ee.EncodeFloat32(v[uint32(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeFloat32(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeFloat32(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32Float64V(rv2i(rv).(map[uint32]float64), e) +} +func (_ fastpathT) EncMapUint32Float64V(v map[uint32]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint32(k2))) + ee.WriteMapElemValue() + ee.EncodeFloat64(v[uint32(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint32(k2))) + ee.EncodeFloat64(v[uint32(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeFloat64(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeFloat64(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint32BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint32BoolV(rv2i(rv).(map[uint32]bool), e) +} +func (_ fastpathT) EncMapUint32BoolV(v map[uint32]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint32(k2))) + ee.WriteMapElemValue() + ee.EncodeBool(v[uint32(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint32(k2))) + ee.EncodeBool(v[uint32(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeBool(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeBool(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64IntfV(rv2i(rv).(map[uint64]interface{}), e) +} +func (_ fastpathT) EncMapUint64IntfV(v map[uint64]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint64(k2))) + ee.WriteMapElemValue() + e.encode(v[uint64(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint64(k2))) + e.encode(v[uint64(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + e.encode(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + e.encode(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64StringV(rv2i(rv).(map[uint64]string), e) +} +func (_ fastpathT) EncMapUint64StringV(v map[uint64]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint64(k2))) + ee.WriteMapElemValue() + ee.EncodeString(cUTF8, v[uint64(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint64(k2))) + ee.EncodeString(cUTF8, v[uint64(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeString(cUTF8, v2) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeString(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64UintV(rv2i(rv).(map[uint64]uint), e) +} +func (_ fastpathT) EncMapUint64UintV(v map[uint64]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint64(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint64(k2))) + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Uint8V(rv2i(rv).(map[uint64]uint8), e) +} +func (_ fastpathT) EncMapUint64Uint8V(v map[uint64]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint64(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint64(k2))) + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Uint16V(rv2i(rv).(map[uint64]uint16), e) +} +func (_ fastpathT) EncMapUint64Uint16V(v map[uint64]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint64(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint64(k2))) + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Uint32V(rv2i(rv).(map[uint64]uint32), e) +} +func (_ fastpathT) EncMapUint64Uint32V(v map[uint64]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint64(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint64(k2))) + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Uint64V(rv2i(rv).(map[uint64]uint64), e) +} +func (_ fastpathT) EncMapUint64Uint64V(v map[uint64]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint64(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint64(k2))) + ee.EncodeUint(uint64(v[uint64(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64UintptrV(rv2i(rv).(map[uint64]uintptr), e) +} +func (_ fastpathT) EncMapUint64UintptrV(v map[uint64]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint64(k2))) + ee.WriteMapElemValue() + e.encode(v[uint64(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint64(k2))) + e.encode(v[uint64(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + e.encode(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + e.encode(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64IntV(rv2i(rv).(map[uint64]int), e) +} +func (_ fastpathT) EncMapUint64IntV(v map[uint64]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint64(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint64(k2))) + ee.EncodeInt(int64(v[uint64(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Int8V(rv2i(rv).(map[uint64]int8), e) +} +func (_ fastpathT) EncMapUint64Int8V(v map[uint64]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint64(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint64(k2))) + ee.EncodeInt(int64(v[uint64(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Int16V(rv2i(rv).(map[uint64]int16), e) +} +func (_ fastpathT) EncMapUint64Int16V(v map[uint64]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint64(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint64(k2))) + ee.EncodeInt(int64(v[uint64(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Int32V(rv2i(rv).(map[uint64]int32), e) +} +func (_ fastpathT) EncMapUint64Int32V(v map[uint64]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint64(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint64(k2))) + ee.EncodeInt(int64(v[uint64(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Int64V(rv2i(rv).(map[uint64]int64), e) +} +func (_ fastpathT) EncMapUint64Int64V(v map[uint64]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint64(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[uint64(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint64(k2))) + ee.EncodeInt(int64(v[uint64(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Float32V(rv2i(rv).(map[uint64]float32), e) +} +func (_ fastpathT) EncMapUint64Float32V(v map[uint64]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint64(k2))) + ee.WriteMapElemValue() + ee.EncodeFloat32(v[uint64(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint64(k2))) + ee.EncodeFloat32(v[uint64(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeFloat32(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeFloat32(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64Float64V(rv2i(rv).(map[uint64]float64), e) +} +func (_ fastpathT) EncMapUint64Float64V(v map[uint64]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint64(k2))) + ee.WriteMapElemValue() + ee.EncodeFloat64(v[uint64(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint64(k2))) + ee.EncodeFloat64(v[uint64(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeFloat64(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeFloat64(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUint64BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUint64BoolV(rv2i(rv).(map[uint64]bool), e) +} +func (_ fastpathT) EncMapUint64BoolV(v map[uint64]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(uint64(k2))) + ee.WriteMapElemValue() + ee.EncodeBool(v[uint64(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeUint(uint64(uint64(k2))) + ee.EncodeBool(v[uint64(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeUint(uint64(k2)) + ee.WriteMapElemValue() + ee.EncodeBool(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeUint(uint64(k2)) + ee.EncodeBool(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrIntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrIntfV(rv2i(rv).(map[uintptr]interface{}), e) +} +func (_ fastpathT) EncMapUintptrIntfV(v map[uintptr]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + e.encode(uintptr(k2)) + ee.WriteMapElemValue() + e.encode(v[uintptr(k2)]) + } + } else { + for _, k2 := range v2 { + e.encode(uintptr(k2)) + e.encode(v[uintptr(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + e.encode(k2) + ee.WriteMapElemValue() + e.encode(v2) + } + } else { + for k2, v2 := range v { + e.encode(k2) + e.encode(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrStringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrStringV(rv2i(rv).(map[uintptr]string), e) +} +func (_ fastpathT) EncMapUintptrStringV(v map[uintptr]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + e.encode(uintptr(k2)) + ee.WriteMapElemValue() + ee.EncodeString(cUTF8, v[uintptr(k2)]) + } + } else { + for _, k2 := range v2 { + e.encode(uintptr(k2)) + ee.EncodeString(cUTF8, v[uintptr(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + e.encode(k2) + ee.WriteMapElemValue() + ee.EncodeString(cUTF8, v2) + } + } else { + for k2, v2 := range v { + e.encode(k2) + ee.EncodeString(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrUintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrUintV(rv2i(rv).(map[uintptr]uint), e) +} +func (_ fastpathT) EncMapUintptrUintV(v map[uintptr]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + e.encode(uintptr(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for _, k2 := range v2 { + e.encode(uintptr(k2)) + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + e.encode(k2) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + e.encode(k2) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrUint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrUint8V(rv2i(rv).(map[uintptr]uint8), e) +} +func (_ fastpathT) EncMapUintptrUint8V(v map[uintptr]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + e.encode(uintptr(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for _, k2 := range v2 { + e.encode(uintptr(k2)) + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + e.encode(k2) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + e.encode(k2) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrUint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrUint16V(rv2i(rv).(map[uintptr]uint16), e) +} +func (_ fastpathT) EncMapUintptrUint16V(v map[uintptr]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + e.encode(uintptr(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for _, k2 := range v2 { + e.encode(uintptr(k2)) + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + e.encode(k2) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + e.encode(k2) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrUint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrUint32V(rv2i(rv).(map[uintptr]uint32), e) +} +func (_ fastpathT) EncMapUintptrUint32V(v map[uintptr]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + e.encode(uintptr(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for _, k2 := range v2 { + e.encode(uintptr(k2)) + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + e.encode(k2) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + e.encode(k2) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrUint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrUint64V(rv2i(rv).(map[uintptr]uint64), e) +} +func (_ fastpathT) EncMapUintptrUint64V(v map[uintptr]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + e.encode(uintptr(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } else { + for _, k2 := range v2 { + e.encode(uintptr(k2)) + ee.EncodeUint(uint64(v[uintptr(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + e.encode(k2) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + e.encode(k2) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrUintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrUintptrV(rv2i(rv).(map[uintptr]uintptr), e) +} +func (_ fastpathT) EncMapUintptrUintptrV(v map[uintptr]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + e.encode(uintptr(k2)) + ee.WriteMapElemValue() + e.encode(v[uintptr(k2)]) + } + } else { + for _, k2 := range v2 { + e.encode(uintptr(k2)) + e.encode(v[uintptr(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + e.encode(k2) + ee.WriteMapElemValue() + e.encode(v2) + } + } else { + for k2, v2 := range v { + e.encode(k2) + e.encode(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrIntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrIntV(rv2i(rv).(map[uintptr]int), e) +} +func (_ fastpathT) EncMapUintptrIntV(v map[uintptr]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + e.encode(uintptr(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for _, k2 := range v2 { + e.encode(uintptr(k2)) + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + e.encode(k2) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + e.encode(k2) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrInt8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrInt8V(rv2i(rv).(map[uintptr]int8), e) +} +func (_ fastpathT) EncMapUintptrInt8V(v map[uintptr]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + e.encode(uintptr(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for _, k2 := range v2 { + e.encode(uintptr(k2)) + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + e.encode(k2) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + e.encode(k2) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrInt16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrInt16V(rv2i(rv).(map[uintptr]int16), e) +} +func (_ fastpathT) EncMapUintptrInt16V(v map[uintptr]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + e.encode(uintptr(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for _, k2 := range v2 { + e.encode(uintptr(k2)) + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + e.encode(k2) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + e.encode(k2) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrInt32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrInt32V(rv2i(rv).(map[uintptr]int32), e) +} +func (_ fastpathT) EncMapUintptrInt32V(v map[uintptr]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + e.encode(uintptr(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for _, k2 := range v2 { + e.encode(uintptr(k2)) + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + e.encode(k2) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + e.encode(k2) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrInt64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrInt64V(rv2i(rv).(map[uintptr]int64), e) +} +func (_ fastpathT) EncMapUintptrInt64V(v map[uintptr]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + e.encode(uintptr(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } else { + for _, k2 := range v2 { + e.encode(uintptr(k2)) + ee.EncodeInt(int64(v[uintptr(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + e.encode(k2) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + e.encode(k2) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrFloat32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrFloat32V(rv2i(rv).(map[uintptr]float32), e) +} +func (_ fastpathT) EncMapUintptrFloat32V(v map[uintptr]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + e.encode(uintptr(k2)) + ee.WriteMapElemValue() + ee.EncodeFloat32(v[uintptr(k2)]) + } + } else { + for _, k2 := range v2 { + e.encode(uintptr(k2)) + ee.EncodeFloat32(v[uintptr(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + e.encode(k2) + ee.WriteMapElemValue() + ee.EncodeFloat32(v2) + } + } else { + for k2, v2 := range v { + e.encode(k2) + ee.EncodeFloat32(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrFloat64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrFloat64V(rv2i(rv).(map[uintptr]float64), e) +} +func (_ fastpathT) EncMapUintptrFloat64V(v map[uintptr]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + e.encode(uintptr(k2)) + ee.WriteMapElemValue() + ee.EncodeFloat64(v[uintptr(k2)]) + } + } else { + for _, k2 := range v2 { + e.encode(uintptr(k2)) + ee.EncodeFloat64(v[uintptr(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + e.encode(k2) + ee.WriteMapElemValue() + ee.EncodeFloat64(v2) + } + } else { + for k2, v2 := range v { + e.encode(k2) + ee.EncodeFloat64(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapUintptrBoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapUintptrBoolV(rv2i(rv).(map[uintptr]bool), e) +} +func (_ fastpathT) EncMapUintptrBoolV(v map[uintptr]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]uint64, len(v)) + var i int + for k, _ := range v { + v2[i] = uint64(k) + i++ + } + sort.Sort(uintSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + e.encode(uintptr(k2)) + ee.WriteMapElemValue() + ee.EncodeBool(v[uintptr(k2)]) + } + } else { + for _, k2 := range v2 { + e.encode(uintptr(k2)) + ee.EncodeBool(v[uintptr(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + e.encode(k2) + ee.WriteMapElemValue() + ee.EncodeBool(v2) + } + } else { + for k2, v2 := range v { + e.encode(k2) + ee.EncodeBool(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntIntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntIntfV(rv2i(rv).(map[int]interface{}), e) +} +func (_ fastpathT) EncMapIntIntfV(v map[int]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int(k2))) + ee.WriteMapElemValue() + e.encode(v[int(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int(k2))) + e.encode(v[int(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + e.encode(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + e.encode(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntStringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntStringV(rv2i(rv).(map[int]string), e) +} +func (_ fastpathT) EncMapIntStringV(v map[int]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int(k2))) + ee.WriteMapElemValue() + ee.EncodeString(cUTF8, v[int(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int(k2))) + ee.EncodeString(cUTF8, v[int(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeString(cUTF8, v2) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeString(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntUintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntUintV(rv2i(rv).(map[int]uint), e) +} +func (_ fastpathT) EncMapIntUintV(v map[int]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int(k2))) + ee.EncodeUint(uint64(v[int(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntUint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntUint8V(rv2i(rv).(map[int]uint8), e) +} +func (_ fastpathT) EncMapIntUint8V(v map[int]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int(k2))) + ee.EncodeUint(uint64(v[int(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntUint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntUint16V(rv2i(rv).(map[int]uint16), e) +} +func (_ fastpathT) EncMapIntUint16V(v map[int]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int(k2))) + ee.EncodeUint(uint64(v[int(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntUint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntUint32V(rv2i(rv).(map[int]uint32), e) +} +func (_ fastpathT) EncMapIntUint32V(v map[int]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int(k2))) + ee.EncodeUint(uint64(v[int(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntUint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntUint64V(rv2i(rv).(map[int]uint64), e) +} +func (_ fastpathT) EncMapIntUint64V(v map[int]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[int(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int(k2))) + ee.EncodeUint(uint64(v[int(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntUintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntUintptrV(rv2i(rv).(map[int]uintptr), e) +} +func (_ fastpathT) EncMapIntUintptrV(v map[int]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int(k2))) + ee.WriteMapElemValue() + e.encode(v[int(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int(k2))) + e.encode(v[int(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + e.encode(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + e.encode(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntIntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntIntV(rv2i(rv).(map[int]int), e) +} +func (_ fastpathT) EncMapIntIntV(v map[int]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int(k2))) + ee.EncodeInt(int64(v[int(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntInt8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntInt8V(rv2i(rv).(map[int]int8), e) +} +func (_ fastpathT) EncMapIntInt8V(v map[int]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int(k2))) + ee.EncodeInt(int64(v[int(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntInt16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntInt16V(rv2i(rv).(map[int]int16), e) +} +func (_ fastpathT) EncMapIntInt16V(v map[int]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int(k2))) + ee.EncodeInt(int64(v[int(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntInt32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntInt32V(rv2i(rv).(map[int]int32), e) +} +func (_ fastpathT) EncMapIntInt32V(v map[int]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int(k2))) + ee.EncodeInt(int64(v[int(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntInt64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntInt64V(rv2i(rv).(map[int]int64), e) +} +func (_ fastpathT) EncMapIntInt64V(v map[int]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[int(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int(k2))) + ee.EncodeInt(int64(v[int(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntFloat32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntFloat32V(rv2i(rv).(map[int]float32), e) +} +func (_ fastpathT) EncMapIntFloat32V(v map[int]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int(k2))) + ee.WriteMapElemValue() + ee.EncodeFloat32(v[int(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int(k2))) + ee.EncodeFloat32(v[int(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeFloat32(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeFloat32(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntFloat64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntFloat64V(rv2i(rv).(map[int]float64), e) +} +func (_ fastpathT) EncMapIntFloat64V(v map[int]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int(k2))) + ee.WriteMapElemValue() + ee.EncodeFloat64(v[int(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int(k2))) + ee.EncodeFloat64(v[int(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeFloat64(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeFloat64(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapIntBoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapIntBoolV(rv2i(rv).(map[int]bool), e) +} +func (_ fastpathT) EncMapIntBoolV(v map[int]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int(k2))) + ee.WriteMapElemValue() + ee.EncodeBool(v[int(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int(k2))) + ee.EncodeBool(v[int(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeBool(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeBool(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8IntfV(rv2i(rv).(map[int8]interface{}), e) +} +func (_ fastpathT) EncMapInt8IntfV(v map[int8]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int8(k2))) + ee.WriteMapElemValue() + e.encode(v[int8(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int8(k2))) + e.encode(v[int8(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + e.encode(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + e.encode(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8StringV(rv2i(rv).(map[int8]string), e) +} +func (_ fastpathT) EncMapInt8StringV(v map[int8]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int8(k2))) + ee.WriteMapElemValue() + ee.EncodeString(cUTF8, v[int8(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int8(k2))) + ee.EncodeString(cUTF8, v[int8(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeString(cUTF8, v2) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeString(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8UintV(rv2i(rv).(map[int8]uint), e) +} +func (_ fastpathT) EncMapInt8UintV(v map[int8]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int8(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int8(k2))) + ee.EncodeUint(uint64(v[int8(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Uint8V(rv2i(rv).(map[int8]uint8), e) +} +func (_ fastpathT) EncMapInt8Uint8V(v map[int8]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int8(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int8(k2))) + ee.EncodeUint(uint64(v[int8(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Uint16V(rv2i(rv).(map[int8]uint16), e) +} +func (_ fastpathT) EncMapInt8Uint16V(v map[int8]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int8(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int8(k2))) + ee.EncodeUint(uint64(v[int8(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Uint32V(rv2i(rv).(map[int8]uint32), e) +} +func (_ fastpathT) EncMapInt8Uint32V(v map[int8]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int8(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int8(k2))) + ee.EncodeUint(uint64(v[int8(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Uint64V(rv2i(rv).(map[int8]uint64), e) +} +func (_ fastpathT) EncMapInt8Uint64V(v map[int8]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int8(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[int8(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int8(k2))) + ee.EncodeUint(uint64(v[int8(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8UintptrV(rv2i(rv).(map[int8]uintptr), e) +} +func (_ fastpathT) EncMapInt8UintptrV(v map[int8]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int8(k2))) + ee.WriteMapElemValue() + e.encode(v[int8(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int8(k2))) + e.encode(v[int8(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + e.encode(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + e.encode(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8IntV(rv2i(rv).(map[int8]int), e) +} +func (_ fastpathT) EncMapInt8IntV(v map[int8]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int8(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int8(k2))) + ee.EncodeInt(int64(v[int8(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Int8V(rv2i(rv).(map[int8]int8), e) +} +func (_ fastpathT) EncMapInt8Int8V(v map[int8]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int8(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int8(k2))) + ee.EncodeInt(int64(v[int8(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Int16V(rv2i(rv).(map[int8]int16), e) +} +func (_ fastpathT) EncMapInt8Int16V(v map[int8]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int8(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int8(k2))) + ee.EncodeInt(int64(v[int8(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Int32V(rv2i(rv).(map[int8]int32), e) +} +func (_ fastpathT) EncMapInt8Int32V(v map[int8]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int8(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int8(k2))) + ee.EncodeInt(int64(v[int8(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Int64V(rv2i(rv).(map[int8]int64), e) +} +func (_ fastpathT) EncMapInt8Int64V(v map[int8]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int8(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[int8(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int8(k2))) + ee.EncodeInt(int64(v[int8(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Float32V(rv2i(rv).(map[int8]float32), e) +} +func (_ fastpathT) EncMapInt8Float32V(v map[int8]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int8(k2))) + ee.WriteMapElemValue() + ee.EncodeFloat32(v[int8(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int8(k2))) + ee.EncodeFloat32(v[int8(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeFloat32(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeFloat32(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8Float64V(rv2i(rv).(map[int8]float64), e) +} +func (_ fastpathT) EncMapInt8Float64V(v map[int8]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int8(k2))) + ee.WriteMapElemValue() + ee.EncodeFloat64(v[int8(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int8(k2))) + ee.EncodeFloat64(v[int8(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeFloat64(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeFloat64(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt8BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt8BoolV(rv2i(rv).(map[int8]bool), e) +} +func (_ fastpathT) EncMapInt8BoolV(v map[int8]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int8(k2))) + ee.WriteMapElemValue() + ee.EncodeBool(v[int8(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int8(k2))) + ee.EncodeBool(v[int8(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeBool(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeBool(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16IntfV(rv2i(rv).(map[int16]interface{}), e) +} +func (_ fastpathT) EncMapInt16IntfV(v map[int16]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int16(k2))) + ee.WriteMapElemValue() + e.encode(v[int16(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int16(k2))) + e.encode(v[int16(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + e.encode(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + e.encode(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16StringV(rv2i(rv).(map[int16]string), e) +} +func (_ fastpathT) EncMapInt16StringV(v map[int16]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int16(k2))) + ee.WriteMapElemValue() + ee.EncodeString(cUTF8, v[int16(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int16(k2))) + ee.EncodeString(cUTF8, v[int16(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeString(cUTF8, v2) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeString(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16UintV(rv2i(rv).(map[int16]uint), e) +} +func (_ fastpathT) EncMapInt16UintV(v map[int16]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int16(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int16(k2))) + ee.EncodeUint(uint64(v[int16(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Uint8V(rv2i(rv).(map[int16]uint8), e) +} +func (_ fastpathT) EncMapInt16Uint8V(v map[int16]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int16(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int16(k2))) + ee.EncodeUint(uint64(v[int16(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Uint16V(rv2i(rv).(map[int16]uint16), e) +} +func (_ fastpathT) EncMapInt16Uint16V(v map[int16]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int16(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int16(k2))) + ee.EncodeUint(uint64(v[int16(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Uint32V(rv2i(rv).(map[int16]uint32), e) +} +func (_ fastpathT) EncMapInt16Uint32V(v map[int16]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int16(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int16(k2))) + ee.EncodeUint(uint64(v[int16(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Uint64V(rv2i(rv).(map[int16]uint64), e) +} +func (_ fastpathT) EncMapInt16Uint64V(v map[int16]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int16(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[int16(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int16(k2))) + ee.EncodeUint(uint64(v[int16(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16UintptrV(rv2i(rv).(map[int16]uintptr), e) +} +func (_ fastpathT) EncMapInt16UintptrV(v map[int16]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int16(k2))) + ee.WriteMapElemValue() + e.encode(v[int16(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int16(k2))) + e.encode(v[int16(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + e.encode(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + e.encode(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16IntV(rv2i(rv).(map[int16]int), e) +} +func (_ fastpathT) EncMapInt16IntV(v map[int16]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int16(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int16(k2))) + ee.EncodeInt(int64(v[int16(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Int8V(rv2i(rv).(map[int16]int8), e) +} +func (_ fastpathT) EncMapInt16Int8V(v map[int16]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int16(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int16(k2))) + ee.EncodeInt(int64(v[int16(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Int16V(rv2i(rv).(map[int16]int16), e) +} +func (_ fastpathT) EncMapInt16Int16V(v map[int16]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int16(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int16(k2))) + ee.EncodeInt(int64(v[int16(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Int32V(rv2i(rv).(map[int16]int32), e) +} +func (_ fastpathT) EncMapInt16Int32V(v map[int16]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int16(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int16(k2))) + ee.EncodeInt(int64(v[int16(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Int64V(rv2i(rv).(map[int16]int64), e) +} +func (_ fastpathT) EncMapInt16Int64V(v map[int16]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int16(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[int16(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int16(k2))) + ee.EncodeInt(int64(v[int16(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Float32V(rv2i(rv).(map[int16]float32), e) +} +func (_ fastpathT) EncMapInt16Float32V(v map[int16]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int16(k2))) + ee.WriteMapElemValue() + ee.EncodeFloat32(v[int16(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int16(k2))) + ee.EncodeFloat32(v[int16(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeFloat32(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeFloat32(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16Float64V(rv2i(rv).(map[int16]float64), e) +} +func (_ fastpathT) EncMapInt16Float64V(v map[int16]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int16(k2))) + ee.WriteMapElemValue() + ee.EncodeFloat64(v[int16(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int16(k2))) + ee.EncodeFloat64(v[int16(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeFloat64(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeFloat64(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt16BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt16BoolV(rv2i(rv).(map[int16]bool), e) +} +func (_ fastpathT) EncMapInt16BoolV(v map[int16]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int16(k2))) + ee.WriteMapElemValue() + ee.EncodeBool(v[int16(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int16(k2))) + ee.EncodeBool(v[int16(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeBool(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeBool(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32IntfV(rv2i(rv).(map[int32]interface{}), e) +} +func (_ fastpathT) EncMapInt32IntfV(v map[int32]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int32(k2))) + ee.WriteMapElemValue() + e.encode(v[int32(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int32(k2))) + e.encode(v[int32(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + e.encode(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + e.encode(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32StringV(rv2i(rv).(map[int32]string), e) +} +func (_ fastpathT) EncMapInt32StringV(v map[int32]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int32(k2))) + ee.WriteMapElemValue() + ee.EncodeString(cUTF8, v[int32(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int32(k2))) + ee.EncodeString(cUTF8, v[int32(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeString(cUTF8, v2) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeString(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32UintV(rv2i(rv).(map[int32]uint), e) +} +func (_ fastpathT) EncMapInt32UintV(v map[int32]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int32(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int32(k2))) + ee.EncodeUint(uint64(v[int32(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Uint8V(rv2i(rv).(map[int32]uint8), e) +} +func (_ fastpathT) EncMapInt32Uint8V(v map[int32]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int32(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int32(k2))) + ee.EncodeUint(uint64(v[int32(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Uint16V(rv2i(rv).(map[int32]uint16), e) +} +func (_ fastpathT) EncMapInt32Uint16V(v map[int32]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int32(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int32(k2))) + ee.EncodeUint(uint64(v[int32(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Uint32V(rv2i(rv).(map[int32]uint32), e) +} +func (_ fastpathT) EncMapInt32Uint32V(v map[int32]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int32(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int32(k2))) + ee.EncodeUint(uint64(v[int32(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Uint64V(rv2i(rv).(map[int32]uint64), e) +} +func (_ fastpathT) EncMapInt32Uint64V(v map[int32]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int32(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[int32(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int32(k2))) + ee.EncodeUint(uint64(v[int32(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32UintptrV(rv2i(rv).(map[int32]uintptr), e) +} +func (_ fastpathT) EncMapInt32UintptrV(v map[int32]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int32(k2))) + ee.WriteMapElemValue() + e.encode(v[int32(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int32(k2))) + e.encode(v[int32(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + e.encode(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + e.encode(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32IntV(rv2i(rv).(map[int32]int), e) +} +func (_ fastpathT) EncMapInt32IntV(v map[int32]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int32(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int32(k2))) + ee.EncodeInt(int64(v[int32(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Int8V(rv2i(rv).(map[int32]int8), e) +} +func (_ fastpathT) EncMapInt32Int8V(v map[int32]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int32(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int32(k2))) + ee.EncodeInt(int64(v[int32(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Int16V(rv2i(rv).(map[int32]int16), e) +} +func (_ fastpathT) EncMapInt32Int16V(v map[int32]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int32(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int32(k2))) + ee.EncodeInt(int64(v[int32(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Int32V(rv2i(rv).(map[int32]int32), e) +} +func (_ fastpathT) EncMapInt32Int32V(v map[int32]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int32(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int32(k2))) + ee.EncodeInt(int64(v[int32(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Int64V(rv2i(rv).(map[int32]int64), e) +} +func (_ fastpathT) EncMapInt32Int64V(v map[int32]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int32(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[int32(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int32(k2))) + ee.EncodeInt(int64(v[int32(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Float32V(rv2i(rv).(map[int32]float32), e) +} +func (_ fastpathT) EncMapInt32Float32V(v map[int32]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int32(k2))) + ee.WriteMapElemValue() + ee.EncodeFloat32(v[int32(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int32(k2))) + ee.EncodeFloat32(v[int32(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeFloat32(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeFloat32(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32Float64V(rv2i(rv).(map[int32]float64), e) +} +func (_ fastpathT) EncMapInt32Float64V(v map[int32]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int32(k2))) + ee.WriteMapElemValue() + ee.EncodeFloat64(v[int32(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int32(k2))) + ee.EncodeFloat64(v[int32(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeFloat64(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeFloat64(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt32BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt32BoolV(rv2i(rv).(map[int32]bool), e) +} +func (_ fastpathT) EncMapInt32BoolV(v map[int32]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int32(k2))) + ee.WriteMapElemValue() + ee.EncodeBool(v[int32(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int32(k2))) + ee.EncodeBool(v[int32(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeBool(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeBool(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64IntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64IntfV(rv2i(rv).(map[int64]interface{}), e) +} +func (_ fastpathT) EncMapInt64IntfV(v map[int64]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int64(k2))) + ee.WriteMapElemValue() + e.encode(v[int64(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int64(k2))) + e.encode(v[int64(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + e.encode(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + e.encode(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64StringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64StringV(rv2i(rv).(map[int64]string), e) +} +func (_ fastpathT) EncMapInt64StringV(v map[int64]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int64(k2))) + ee.WriteMapElemValue() + ee.EncodeString(cUTF8, v[int64(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int64(k2))) + ee.EncodeString(cUTF8, v[int64(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeString(cUTF8, v2) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeString(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64UintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64UintV(rv2i(rv).(map[int64]uint), e) +} +func (_ fastpathT) EncMapInt64UintV(v map[int64]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int64(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int64(k2))) + ee.EncodeUint(uint64(v[int64(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Uint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Uint8V(rv2i(rv).(map[int64]uint8), e) +} +func (_ fastpathT) EncMapInt64Uint8V(v map[int64]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int64(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int64(k2))) + ee.EncodeUint(uint64(v[int64(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Uint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Uint16V(rv2i(rv).(map[int64]uint16), e) +} +func (_ fastpathT) EncMapInt64Uint16V(v map[int64]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int64(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int64(k2))) + ee.EncodeUint(uint64(v[int64(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Uint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Uint32V(rv2i(rv).(map[int64]uint32), e) +} +func (_ fastpathT) EncMapInt64Uint32V(v map[int64]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int64(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int64(k2))) + ee.EncodeUint(uint64(v[int64(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Uint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Uint64V(rv2i(rv).(map[int64]uint64), e) +} +func (_ fastpathT) EncMapInt64Uint64V(v map[int64]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int64(k2))) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[int64(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int64(k2))) + ee.EncodeUint(uint64(v[int64(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64UintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64UintptrV(rv2i(rv).(map[int64]uintptr), e) +} +func (_ fastpathT) EncMapInt64UintptrV(v map[int64]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int64(k2))) + ee.WriteMapElemValue() + e.encode(v[int64(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int64(k2))) + e.encode(v[int64(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + e.encode(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + e.encode(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64IntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64IntV(rv2i(rv).(map[int64]int), e) +} +func (_ fastpathT) EncMapInt64IntV(v map[int64]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int64(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int64(k2))) + ee.EncodeInt(int64(v[int64(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Int8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Int8V(rv2i(rv).(map[int64]int8), e) +} +func (_ fastpathT) EncMapInt64Int8V(v map[int64]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int64(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int64(k2))) + ee.EncodeInt(int64(v[int64(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Int16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Int16V(rv2i(rv).(map[int64]int16), e) +} +func (_ fastpathT) EncMapInt64Int16V(v map[int64]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int64(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int64(k2))) + ee.EncodeInt(int64(v[int64(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Int32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Int32V(rv2i(rv).(map[int64]int32), e) +} +func (_ fastpathT) EncMapInt64Int32V(v map[int64]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int64(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int64(k2))) + ee.EncodeInt(int64(v[int64(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Int64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Int64V(rv2i(rv).(map[int64]int64), e) +} +func (_ fastpathT) EncMapInt64Int64V(v map[int64]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int64(k2))) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[int64(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int64(k2))) + ee.EncodeInt(int64(v[int64(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Float32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Float32V(rv2i(rv).(map[int64]float32), e) +} +func (_ fastpathT) EncMapInt64Float32V(v map[int64]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int64(k2))) + ee.WriteMapElemValue() + ee.EncodeFloat32(v[int64(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int64(k2))) + ee.EncodeFloat32(v[int64(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeFloat32(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeFloat32(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64Float64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64Float64V(rv2i(rv).(map[int64]float64), e) +} +func (_ fastpathT) EncMapInt64Float64V(v map[int64]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int64(k2))) + ee.WriteMapElemValue() + ee.EncodeFloat64(v[int64(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int64(k2))) + ee.EncodeFloat64(v[int64(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeFloat64(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeFloat64(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapInt64BoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapInt64BoolV(rv2i(rv).(map[int64]bool), e) +} +func (_ fastpathT) EncMapInt64BoolV(v map[int64]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]int64, len(v)) + var i int + for k, _ := range v { + v2[i] = int64(k) + i++ + } + sort.Sort(intSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeInt(int64(int64(k2))) + ee.WriteMapElemValue() + ee.EncodeBool(v[int64(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeInt(int64(int64(k2))) + ee.EncodeBool(v[int64(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeInt(int64(k2)) + ee.WriteMapElemValue() + ee.EncodeBool(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeInt(int64(k2)) + ee.EncodeBool(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolIntfR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolIntfV(rv2i(rv).(map[bool]interface{}), e) +} +func (_ fastpathT) EncMapBoolIntfV(v map[bool]interface{}, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeBool(bool(k2)) + ee.WriteMapElemValue() + e.encode(v[bool(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeBool(bool(k2)) + e.encode(v[bool(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeBool(k2) + ee.WriteMapElemValue() + e.encode(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeBool(k2) + e.encode(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolStringR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolStringV(rv2i(rv).(map[bool]string), e) +} +func (_ fastpathT) EncMapBoolStringV(v map[bool]string, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeBool(bool(k2)) + ee.WriteMapElemValue() + ee.EncodeString(cUTF8, v[bool(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeBool(bool(k2)) + ee.EncodeString(cUTF8, v[bool(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeBool(k2) + ee.WriteMapElemValue() + ee.EncodeString(cUTF8, v2) + } + } else { + for k2, v2 := range v { + ee.EncodeBool(k2) + ee.EncodeString(cUTF8, v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolUintR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolUintV(rv2i(rv).(map[bool]uint), e) +} +func (_ fastpathT) EncMapBoolUintV(v map[bool]uint, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeBool(bool(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeBool(bool(k2)) + ee.EncodeUint(uint64(v[bool(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeBool(k2) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeBool(k2) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolUint8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolUint8V(rv2i(rv).(map[bool]uint8), e) +} +func (_ fastpathT) EncMapBoolUint8V(v map[bool]uint8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeBool(bool(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeBool(bool(k2)) + ee.EncodeUint(uint64(v[bool(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeBool(k2) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeBool(k2) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolUint16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolUint16V(rv2i(rv).(map[bool]uint16), e) +} +func (_ fastpathT) EncMapBoolUint16V(v map[bool]uint16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeBool(bool(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeBool(bool(k2)) + ee.EncodeUint(uint64(v[bool(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeBool(k2) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeBool(k2) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolUint32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolUint32V(rv2i(rv).(map[bool]uint32), e) +} +func (_ fastpathT) EncMapBoolUint32V(v map[bool]uint32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeBool(bool(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeBool(bool(k2)) + ee.EncodeUint(uint64(v[bool(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeBool(k2) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeBool(k2) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolUint64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolUint64V(rv2i(rv).(map[bool]uint64), e) +} +func (_ fastpathT) EncMapBoolUint64V(v map[bool]uint64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeBool(bool(k2)) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v[bool(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeBool(bool(k2)) + ee.EncodeUint(uint64(v[bool(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeBool(k2) + ee.WriteMapElemValue() + ee.EncodeUint(uint64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeBool(k2) + ee.EncodeUint(uint64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolUintptrR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolUintptrV(rv2i(rv).(map[bool]uintptr), e) +} +func (_ fastpathT) EncMapBoolUintptrV(v map[bool]uintptr, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeBool(bool(k2)) + ee.WriteMapElemValue() + e.encode(v[bool(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeBool(bool(k2)) + e.encode(v[bool(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeBool(k2) + ee.WriteMapElemValue() + e.encode(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeBool(k2) + e.encode(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolIntR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolIntV(rv2i(rv).(map[bool]int), e) +} +func (_ fastpathT) EncMapBoolIntV(v map[bool]int, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeBool(bool(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeBool(bool(k2)) + ee.EncodeInt(int64(v[bool(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeBool(k2) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeBool(k2) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolInt8R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolInt8V(rv2i(rv).(map[bool]int8), e) +} +func (_ fastpathT) EncMapBoolInt8V(v map[bool]int8, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeBool(bool(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeBool(bool(k2)) + ee.EncodeInt(int64(v[bool(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeBool(k2) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeBool(k2) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolInt16R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolInt16V(rv2i(rv).(map[bool]int16), e) +} +func (_ fastpathT) EncMapBoolInt16V(v map[bool]int16, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeBool(bool(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeBool(bool(k2)) + ee.EncodeInt(int64(v[bool(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeBool(k2) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeBool(k2) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolInt32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolInt32V(rv2i(rv).(map[bool]int32), e) +} +func (_ fastpathT) EncMapBoolInt32V(v map[bool]int32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeBool(bool(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeBool(bool(k2)) + ee.EncodeInt(int64(v[bool(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeBool(k2) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeBool(k2) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolInt64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolInt64V(rv2i(rv).(map[bool]int64), e) +} +func (_ fastpathT) EncMapBoolInt64V(v map[bool]int64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeBool(bool(k2)) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v[bool(k2)])) + } + } else { + for _, k2 := range v2 { + ee.EncodeBool(bool(k2)) + ee.EncodeInt(int64(v[bool(k2)])) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeBool(k2) + ee.WriteMapElemValue() + ee.EncodeInt(int64(v2)) + } + } else { + for k2, v2 := range v { + ee.EncodeBool(k2) + ee.EncodeInt(int64(v2)) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolFloat32R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolFloat32V(rv2i(rv).(map[bool]float32), e) +} +func (_ fastpathT) EncMapBoolFloat32V(v map[bool]float32, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeBool(bool(k2)) + ee.WriteMapElemValue() + ee.EncodeFloat32(v[bool(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeBool(bool(k2)) + ee.EncodeFloat32(v[bool(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeBool(k2) + ee.WriteMapElemValue() + ee.EncodeFloat32(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeBool(k2) + ee.EncodeFloat32(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolFloat64R(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolFloat64V(rv2i(rv).(map[bool]float64), e) +} +func (_ fastpathT) EncMapBoolFloat64V(v map[bool]float64, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeBool(bool(k2)) + ee.WriteMapElemValue() + ee.EncodeFloat64(v[bool(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeBool(bool(k2)) + ee.EncodeFloat64(v[bool(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeBool(k2) + ee.WriteMapElemValue() + ee.EncodeFloat64(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeBool(k2) + ee.EncodeFloat64(v2) + } + } + } + ee.WriteMapEnd() +} + +func (e *Encoder) fastpathEncMapBoolBoolR(f *codecFnInfo, rv reflect.Value) { + fastpathTV.EncMapBoolBoolV(rv2i(rv).(map[bool]bool), e) +} +func (_ fastpathT) EncMapBoolBoolV(v map[bool]bool, e *Encoder) { + if v == nil { + e.e.EncodeNil() + return + } + ee, esep := e.e, e.hh.hasElemSeparators() + ee.WriteMapStart(len(v)) + if e.h.Canonical { + v2 := make([]bool, len(v)) + var i int + for k, _ := range v { + v2[i] = bool(k) + i++ + } + sort.Sort(boolSlice(v2)) + if esep { + for _, k2 := range v2 { + ee.WriteMapElemKey() + ee.EncodeBool(bool(k2)) + ee.WriteMapElemValue() + ee.EncodeBool(v[bool(k2)]) + } + } else { + for _, k2 := range v2 { + ee.EncodeBool(bool(k2)) + ee.EncodeBool(v[bool(k2)]) + } + } + } else { + if esep { + for k2, v2 := range v { + ee.WriteMapElemKey() + ee.EncodeBool(k2) + ee.WriteMapElemValue() + ee.EncodeBool(v2) + } + } else { + for k2, v2 := range v { + ee.EncodeBool(k2) + ee.EncodeBool(v2) + } + } + } + ee.WriteMapEnd() +} + +// -- decode + +// -- -- fast path type switch +func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { + var changed bool + switch v := iv.(type) { + + case []interface{}: + var v2 []interface{} + v2, changed = fastpathTV.DecSliceIntfV(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]interface{}: + var v2 []interface{} + v2, changed = fastpathTV.DecSliceIntfV(*v, true, d) + if changed { + *v = v2 + } + case []string: + var v2 []string + v2, changed = fastpathTV.DecSliceStringV(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]string: + var v2 []string + v2, changed = fastpathTV.DecSliceStringV(*v, true, d) + if changed { + *v = v2 + } + case []float32: + var v2 []float32 + v2, changed = fastpathTV.DecSliceFloat32V(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]float32: + var v2 []float32 + v2, changed = fastpathTV.DecSliceFloat32V(*v, true, d) + if changed { + *v = v2 + } + case []float64: + var v2 []float64 + v2, changed = fastpathTV.DecSliceFloat64V(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]float64: + var v2 []float64 + v2, changed = fastpathTV.DecSliceFloat64V(*v, true, d) + if changed { + *v = v2 + } + case []uint: + var v2 []uint + v2, changed = fastpathTV.DecSliceUintV(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]uint: + var v2 []uint + v2, changed = fastpathTV.DecSliceUintV(*v, true, d) + if changed { + *v = v2 + } + case []uint16: + var v2 []uint16 + v2, changed = fastpathTV.DecSliceUint16V(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]uint16: + var v2 []uint16 + v2, changed = fastpathTV.DecSliceUint16V(*v, true, d) + if changed { + *v = v2 + } + case []uint32: + var v2 []uint32 + v2, changed = fastpathTV.DecSliceUint32V(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]uint32: + var v2 []uint32 + v2, changed = fastpathTV.DecSliceUint32V(*v, true, d) + if changed { + *v = v2 + } + case []uint64: + var v2 []uint64 + v2, changed = fastpathTV.DecSliceUint64V(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]uint64: + var v2 []uint64 + v2, changed = fastpathTV.DecSliceUint64V(*v, true, d) + if changed { + *v = v2 + } + case []uintptr: + var v2 []uintptr + v2, changed = fastpathTV.DecSliceUintptrV(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]uintptr: + var v2 []uintptr + v2, changed = fastpathTV.DecSliceUintptrV(*v, true, d) + if changed { + *v = v2 + } + case []int: + var v2 []int + v2, changed = fastpathTV.DecSliceIntV(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]int: + var v2 []int + v2, changed = fastpathTV.DecSliceIntV(*v, true, d) + if changed { + *v = v2 + } + case []int8: + var v2 []int8 + v2, changed = fastpathTV.DecSliceInt8V(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]int8: + var v2 []int8 + v2, changed = fastpathTV.DecSliceInt8V(*v, true, d) + if changed { + *v = v2 + } + case []int16: + var v2 []int16 + v2, changed = fastpathTV.DecSliceInt16V(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]int16: + var v2 []int16 + v2, changed = fastpathTV.DecSliceInt16V(*v, true, d) + if changed { + *v = v2 + } + case []int32: + var v2 []int32 + v2, changed = fastpathTV.DecSliceInt32V(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]int32: + var v2 []int32 + v2, changed = fastpathTV.DecSliceInt32V(*v, true, d) + if changed { + *v = v2 + } + case []int64: + var v2 []int64 + v2, changed = fastpathTV.DecSliceInt64V(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]int64: + var v2 []int64 + v2, changed = fastpathTV.DecSliceInt64V(*v, true, d) + if changed { + *v = v2 + } + case []bool: + var v2 []bool + v2, changed = fastpathTV.DecSliceBoolV(v, false, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + case *[]bool: + var v2 []bool + v2, changed = fastpathTV.DecSliceBoolV(*v, true, d) + if changed { + *v = v2 + } + + case map[interface{}]interface{}: + fastpathTV.DecMapIntfIntfV(v, false, d) + case *map[interface{}]interface{}: + var v2 map[interface{}]interface{} + v2, changed = fastpathTV.DecMapIntfIntfV(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]string: + fastpathTV.DecMapIntfStringV(v, false, d) + case *map[interface{}]string: + var v2 map[interface{}]string + v2, changed = fastpathTV.DecMapIntfStringV(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]uint: + fastpathTV.DecMapIntfUintV(v, false, d) + case *map[interface{}]uint: + var v2 map[interface{}]uint + v2, changed = fastpathTV.DecMapIntfUintV(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]uint8: + fastpathTV.DecMapIntfUint8V(v, false, d) + case *map[interface{}]uint8: + var v2 map[interface{}]uint8 + v2, changed = fastpathTV.DecMapIntfUint8V(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]uint16: + fastpathTV.DecMapIntfUint16V(v, false, d) + case *map[interface{}]uint16: + var v2 map[interface{}]uint16 + v2, changed = fastpathTV.DecMapIntfUint16V(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]uint32: + fastpathTV.DecMapIntfUint32V(v, false, d) + case *map[interface{}]uint32: + var v2 map[interface{}]uint32 + v2, changed = fastpathTV.DecMapIntfUint32V(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]uint64: + fastpathTV.DecMapIntfUint64V(v, false, d) + case *map[interface{}]uint64: + var v2 map[interface{}]uint64 + v2, changed = fastpathTV.DecMapIntfUint64V(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]uintptr: + fastpathTV.DecMapIntfUintptrV(v, false, d) + case *map[interface{}]uintptr: + var v2 map[interface{}]uintptr + v2, changed = fastpathTV.DecMapIntfUintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]int: + fastpathTV.DecMapIntfIntV(v, false, d) + case *map[interface{}]int: + var v2 map[interface{}]int + v2, changed = fastpathTV.DecMapIntfIntV(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]int8: + fastpathTV.DecMapIntfInt8V(v, false, d) + case *map[interface{}]int8: + var v2 map[interface{}]int8 + v2, changed = fastpathTV.DecMapIntfInt8V(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]int16: + fastpathTV.DecMapIntfInt16V(v, false, d) + case *map[interface{}]int16: + var v2 map[interface{}]int16 + v2, changed = fastpathTV.DecMapIntfInt16V(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]int32: + fastpathTV.DecMapIntfInt32V(v, false, d) + case *map[interface{}]int32: + var v2 map[interface{}]int32 + v2, changed = fastpathTV.DecMapIntfInt32V(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]int64: + fastpathTV.DecMapIntfInt64V(v, false, d) + case *map[interface{}]int64: + var v2 map[interface{}]int64 + v2, changed = fastpathTV.DecMapIntfInt64V(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]float32: + fastpathTV.DecMapIntfFloat32V(v, false, d) + case *map[interface{}]float32: + var v2 map[interface{}]float32 + v2, changed = fastpathTV.DecMapIntfFloat32V(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]float64: + fastpathTV.DecMapIntfFloat64V(v, false, d) + case *map[interface{}]float64: + var v2 map[interface{}]float64 + v2, changed = fastpathTV.DecMapIntfFloat64V(*v, true, d) + if changed { + *v = v2 + } + case map[interface{}]bool: + fastpathTV.DecMapIntfBoolV(v, false, d) + case *map[interface{}]bool: + var v2 map[interface{}]bool + v2, changed = fastpathTV.DecMapIntfBoolV(*v, true, d) + if changed { + *v = v2 + } + case map[string]interface{}: + fastpathTV.DecMapStringIntfV(v, false, d) + case *map[string]interface{}: + var v2 map[string]interface{} + v2, changed = fastpathTV.DecMapStringIntfV(*v, true, d) + if changed { + *v = v2 + } + case map[string]string: + fastpathTV.DecMapStringStringV(v, false, d) + case *map[string]string: + var v2 map[string]string + v2, changed = fastpathTV.DecMapStringStringV(*v, true, d) + if changed { + *v = v2 + } + case map[string]uint: + fastpathTV.DecMapStringUintV(v, false, d) + case *map[string]uint: + var v2 map[string]uint + v2, changed = fastpathTV.DecMapStringUintV(*v, true, d) + if changed { + *v = v2 + } + case map[string]uint8: + fastpathTV.DecMapStringUint8V(v, false, d) + case *map[string]uint8: + var v2 map[string]uint8 + v2, changed = fastpathTV.DecMapStringUint8V(*v, true, d) + if changed { + *v = v2 + } + case map[string]uint16: + fastpathTV.DecMapStringUint16V(v, false, d) + case *map[string]uint16: + var v2 map[string]uint16 + v2, changed = fastpathTV.DecMapStringUint16V(*v, true, d) + if changed { + *v = v2 + } + case map[string]uint32: + fastpathTV.DecMapStringUint32V(v, false, d) + case *map[string]uint32: + var v2 map[string]uint32 + v2, changed = fastpathTV.DecMapStringUint32V(*v, true, d) + if changed { + *v = v2 + } + case map[string]uint64: + fastpathTV.DecMapStringUint64V(v, false, d) + case *map[string]uint64: + var v2 map[string]uint64 + v2, changed = fastpathTV.DecMapStringUint64V(*v, true, d) + if changed { + *v = v2 + } + case map[string]uintptr: + fastpathTV.DecMapStringUintptrV(v, false, d) + case *map[string]uintptr: + var v2 map[string]uintptr + v2, changed = fastpathTV.DecMapStringUintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[string]int: + fastpathTV.DecMapStringIntV(v, false, d) + case *map[string]int: + var v2 map[string]int + v2, changed = fastpathTV.DecMapStringIntV(*v, true, d) + if changed { + *v = v2 + } + case map[string]int8: + fastpathTV.DecMapStringInt8V(v, false, d) + case *map[string]int8: + var v2 map[string]int8 + v2, changed = fastpathTV.DecMapStringInt8V(*v, true, d) + if changed { + *v = v2 + } + case map[string]int16: + fastpathTV.DecMapStringInt16V(v, false, d) + case *map[string]int16: + var v2 map[string]int16 + v2, changed = fastpathTV.DecMapStringInt16V(*v, true, d) + if changed { + *v = v2 + } + case map[string]int32: + fastpathTV.DecMapStringInt32V(v, false, d) + case *map[string]int32: + var v2 map[string]int32 + v2, changed = fastpathTV.DecMapStringInt32V(*v, true, d) + if changed { + *v = v2 + } + case map[string]int64: + fastpathTV.DecMapStringInt64V(v, false, d) + case *map[string]int64: + var v2 map[string]int64 + v2, changed = fastpathTV.DecMapStringInt64V(*v, true, d) + if changed { + *v = v2 + } + case map[string]float32: + fastpathTV.DecMapStringFloat32V(v, false, d) + case *map[string]float32: + var v2 map[string]float32 + v2, changed = fastpathTV.DecMapStringFloat32V(*v, true, d) + if changed { + *v = v2 + } + case map[string]float64: + fastpathTV.DecMapStringFloat64V(v, false, d) + case *map[string]float64: + var v2 map[string]float64 + v2, changed = fastpathTV.DecMapStringFloat64V(*v, true, d) + if changed { + *v = v2 + } + case map[string]bool: + fastpathTV.DecMapStringBoolV(v, false, d) + case *map[string]bool: + var v2 map[string]bool + v2, changed = fastpathTV.DecMapStringBoolV(*v, true, d) + if changed { + *v = v2 + } + case map[float32]interface{}: + fastpathTV.DecMapFloat32IntfV(v, false, d) + case *map[float32]interface{}: + var v2 map[float32]interface{} + v2, changed = fastpathTV.DecMapFloat32IntfV(*v, true, d) + if changed { + *v = v2 + } + case map[float32]string: + fastpathTV.DecMapFloat32StringV(v, false, d) + case *map[float32]string: + var v2 map[float32]string + v2, changed = fastpathTV.DecMapFloat32StringV(*v, true, d) + if changed { + *v = v2 + } + case map[float32]uint: + fastpathTV.DecMapFloat32UintV(v, false, d) + case *map[float32]uint: + var v2 map[float32]uint + v2, changed = fastpathTV.DecMapFloat32UintV(*v, true, d) + if changed { + *v = v2 + } + case map[float32]uint8: + fastpathTV.DecMapFloat32Uint8V(v, false, d) + case *map[float32]uint8: + var v2 map[float32]uint8 + v2, changed = fastpathTV.DecMapFloat32Uint8V(*v, true, d) + if changed { + *v = v2 + } + case map[float32]uint16: + fastpathTV.DecMapFloat32Uint16V(v, false, d) + case *map[float32]uint16: + var v2 map[float32]uint16 + v2, changed = fastpathTV.DecMapFloat32Uint16V(*v, true, d) + if changed { + *v = v2 + } + case map[float32]uint32: + fastpathTV.DecMapFloat32Uint32V(v, false, d) + case *map[float32]uint32: + var v2 map[float32]uint32 + v2, changed = fastpathTV.DecMapFloat32Uint32V(*v, true, d) + if changed { + *v = v2 + } + case map[float32]uint64: + fastpathTV.DecMapFloat32Uint64V(v, false, d) + case *map[float32]uint64: + var v2 map[float32]uint64 + v2, changed = fastpathTV.DecMapFloat32Uint64V(*v, true, d) + if changed { + *v = v2 + } + case map[float32]uintptr: + fastpathTV.DecMapFloat32UintptrV(v, false, d) + case *map[float32]uintptr: + var v2 map[float32]uintptr + v2, changed = fastpathTV.DecMapFloat32UintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[float32]int: + fastpathTV.DecMapFloat32IntV(v, false, d) + case *map[float32]int: + var v2 map[float32]int + v2, changed = fastpathTV.DecMapFloat32IntV(*v, true, d) + if changed { + *v = v2 + } + case map[float32]int8: + fastpathTV.DecMapFloat32Int8V(v, false, d) + case *map[float32]int8: + var v2 map[float32]int8 + v2, changed = fastpathTV.DecMapFloat32Int8V(*v, true, d) + if changed { + *v = v2 + } + case map[float32]int16: + fastpathTV.DecMapFloat32Int16V(v, false, d) + case *map[float32]int16: + var v2 map[float32]int16 + v2, changed = fastpathTV.DecMapFloat32Int16V(*v, true, d) + if changed { + *v = v2 + } + case map[float32]int32: + fastpathTV.DecMapFloat32Int32V(v, false, d) + case *map[float32]int32: + var v2 map[float32]int32 + v2, changed = fastpathTV.DecMapFloat32Int32V(*v, true, d) + if changed { + *v = v2 + } + case map[float32]int64: + fastpathTV.DecMapFloat32Int64V(v, false, d) + case *map[float32]int64: + var v2 map[float32]int64 + v2, changed = fastpathTV.DecMapFloat32Int64V(*v, true, d) + if changed { + *v = v2 + } + case map[float32]float32: + fastpathTV.DecMapFloat32Float32V(v, false, d) + case *map[float32]float32: + var v2 map[float32]float32 + v2, changed = fastpathTV.DecMapFloat32Float32V(*v, true, d) + if changed { + *v = v2 + } + case map[float32]float64: + fastpathTV.DecMapFloat32Float64V(v, false, d) + case *map[float32]float64: + var v2 map[float32]float64 + v2, changed = fastpathTV.DecMapFloat32Float64V(*v, true, d) + if changed { + *v = v2 + } + case map[float32]bool: + fastpathTV.DecMapFloat32BoolV(v, false, d) + case *map[float32]bool: + var v2 map[float32]bool + v2, changed = fastpathTV.DecMapFloat32BoolV(*v, true, d) + if changed { + *v = v2 + } + case map[float64]interface{}: + fastpathTV.DecMapFloat64IntfV(v, false, d) + case *map[float64]interface{}: + var v2 map[float64]interface{} + v2, changed = fastpathTV.DecMapFloat64IntfV(*v, true, d) + if changed { + *v = v2 + } + case map[float64]string: + fastpathTV.DecMapFloat64StringV(v, false, d) + case *map[float64]string: + var v2 map[float64]string + v2, changed = fastpathTV.DecMapFloat64StringV(*v, true, d) + if changed { + *v = v2 + } + case map[float64]uint: + fastpathTV.DecMapFloat64UintV(v, false, d) + case *map[float64]uint: + var v2 map[float64]uint + v2, changed = fastpathTV.DecMapFloat64UintV(*v, true, d) + if changed { + *v = v2 + } + case map[float64]uint8: + fastpathTV.DecMapFloat64Uint8V(v, false, d) + case *map[float64]uint8: + var v2 map[float64]uint8 + v2, changed = fastpathTV.DecMapFloat64Uint8V(*v, true, d) + if changed { + *v = v2 + } + case map[float64]uint16: + fastpathTV.DecMapFloat64Uint16V(v, false, d) + case *map[float64]uint16: + var v2 map[float64]uint16 + v2, changed = fastpathTV.DecMapFloat64Uint16V(*v, true, d) + if changed { + *v = v2 + } + case map[float64]uint32: + fastpathTV.DecMapFloat64Uint32V(v, false, d) + case *map[float64]uint32: + var v2 map[float64]uint32 + v2, changed = fastpathTV.DecMapFloat64Uint32V(*v, true, d) + if changed { + *v = v2 + } + case map[float64]uint64: + fastpathTV.DecMapFloat64Uint64V(v, false, d) + case *map[float64]uint64: + var v2 map[float64]uint64 + v2, changed = fastpathTV.DecMapFloat64Uint64V(*v, true, d) + if changed { + *v = v2 + } + case map[float64]uintptr: + fastpathTV.DecMapFloat64UintptrV(v, false, d) + case *map[float64]uintptr: + var v2 map[float64]uintptr + v2, changed = fastpathTV.DecMapFloat64UintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[float64]int: + fastpathTV.DecMapFloat64IntV(v, false, d) + case *map[float64]int: + var v2 map[float64]int + v2, changed = fastpathTV.DecMapFloat64IntV(*v, true, d) + if changed { + *v = v2 + } + case map[float64]int8: + fastpathTV.DecMapFloat64Int8V(v, false, d) + case *map[float64]int8: + var v2 map[float64]int8 + v2, changed = fastpathTV.DecMapFloat64Int8V(*v, true, d) + if changed { + *v = v2 + } + case map[float64]int16: + fastpathTV.DecMapFloat64Int16V(v, false, d) + case *map[float64]int16: + var v2 map[float64]int16 + v2, changed = fastpathTV.DecMapFloat64Int16V(*v, true, d) + if changed { + *v = v2 + } + case map[float64]int32: + fastpathTV.DecMapFloat64Int32V(v, false, d) + case *map[float64]int32: + var v2 map[float64]int32 + v2, changed = fastpathTV.DecMapFloat64Int32V(*v, true, d) + if changed { + *v = v2 + } + case map[float64]int64: + fastpathTV.DecMapFloat64Int64V(v, false, d) + case *map[float64]int64: + var v2 map[float64]int64 + v2, changed = fastpathTV.DecMapFloat64Int64V(*v, true, d) + if changed { + *v = v2 + } + case map[float64]float32: + fastpathTV.DecMapFloat64Float32V(v, false, d) + case *map[float64]float32: + var v2 map[float64]float32 + v2, changed = fastpathTV.DecMapFloat64Float32V(*v, true, d) + if changed { + *v = v2 + } + case map[float64]float64: + fastpathTV.DecMapFloat64Float64V(v, false, d) + case *map[float64]float64: + var v2 map[float64]float64 + v2, changed = fastpathTV.DecMapFloat64Float64V(*v, true, d) + if changed { + *v = v2 + } + case map[float64]bool: + fastpathTV.DecMapFloat64BoolV(v, false, d) + case *map[float64]bool: + var v2 map[float64]bool + v2, changed = fastpathTV.DecMapFloat64BoolV(*v, true, d) + if changed { + *v = v2 + } + case map[uint]interface{}: + fastpathTV.DecMapUintIntfV(v, false, d) + case *map[uint]interface{}: + var v2 map[uint]interface{} + v2, changed = fastpathTV.DecMapUintIntfV(*v, true, d) + if changed { + *v = v2 + } + case map[uint]string: + fastpathTV.DecMapUintStringV(v, false, d) + case *map[uint]string: + var v2 map[uint]string + v2, changed = fastpathTV.DecMapUintStringV(*v, true, d) + if changed { + *v = v2 + } + case map[uint]uint: + fastpathTV.DecMapUintUintV(v, false, d) + case *map[uint]uint: + var v2 map[uint]uint + v2, changed = fastpathTV.DecMapUintUintV(*v, true, d) + if changed { + *v = v2 + } + case map[uint]uint8: + fastpathTV.DecMapUintUint8V(v, false, d) + case *map[uint]uint8: + var v2 map[uint]uint8 + v2, changed = fastpathTV.DecMapUintUint8V(*v, true, d) + if changed { + *v = v2 + } + case map[uint]uint16: + fastpathTV.DecMapUintUint16V(v, false, d) + case *map[uint]uint16: + var v2 map[uint]uint16 + v2, changed = fastpathTV.DecMapUintUint16V(*v, true, d) + if changed { + *v = v2 + } + case map[uint]uint32: + fastpathTV.DecMapUintUint32V(v, false, d) + case *map[uint]uint32: + var v2 map[uint]uint32 + v2, changed = fastpathTV.DecMapUintUint32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint]uint64: + fastpathTV.DecMapUintUint64V(v, false, d) + case *map[uint]uint64: + var v2 map[uint]uint64 + v2, changed = fastpathTV.DecMapUintUint64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint]uintptr: + fastpathTV.DecMapUintUintptrV(v, false, d) + case *map[uint]uintptr: + var v2 map[uint]uintptr + v2, changed = fastpathTV.DecMapUintUintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[uint]int: + fastpathTV.DecMapUintIntV(v, false, d) + case *map[uint]int: + var v2 map[uint]int + v2, changed = fastpathTV.DecMapUintIntV(*v, true, d) + if changed { + *v = v2 + } + case map[uint]int8: + fastpathTV.DecMapUintInt8V(v, false, d) + case *map[uint]int8: + var v2 map[uint]int8 + v2, changed = fastpathTV.DecMapUintInt8V(*v, true, d) + if changed { + *v = v2 + } + case map[uint]int16: + fastpathTV.DecMapUintInt16V(v, false, d) + case *map[uint]int16: + var v2 map[uint]int16 + v2, changed = fastpathTV.DecMapUintInt16V(*v, true, d) + if changed { + *v = v2 + } + case map[uint]int32: + fastpathTV.DecMapUintInt32V(v, false, d) + case *map[uint]int32: + var v2 map[uint]int32 + v2, changed = fastpathTV.DecMapUintInt32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint]int64: + fastpathTV.DecMapUintInt64V(v, false, d) + case *map[uint]int64: + var v2 map[uint]int64 + v2, changed = fastpathTV.DecMapUintInt64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint]float32: + fastpathTV.DecMapUintFloat32V(v, false, d) + case *map[uint]float32: + var v2 map[uint]float32 + v2, changed = fastpathTV.DecMapUintFloat32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint]float64: + fastpathTV.DecMapUintFloat64V(v, false, d) + case *map[uint]float64: + var v2 map[uint]float64 + v2, changed = fastpathTV.DecMapUintFloat64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint]bool: + fastpathTV.DecMapUintBoolV(v, false, d) + case *map[uint]bool: + var v2 map[uint]bool + v2, changed = fastpathTV.DecMapUintBoolV(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]interface{}: + fastpathTV.DecMapUint8IntfV(v, false, d) + case *map[uint8]interface{}: + var v2 map[uint8]interface{} + v2, changed = fastpathTV.DecMapUint8IntfV(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]string: + fastpathTV.DecMapUint8StringV(v, false, d) + case *map[uint8]string: + var v2 map[uint8]string + v2, changed = fastpathTV.DecMapUint8StringV(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]uint: + fastpathTV.DecMapUint8UintV(v, false, d) + case *map[uint8]uint: + var v2 map[uint8]uint + v2, changed = fastpathTV.DecMapUint8UintV(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]uint8: + fastpathTV.DecMapUint8Uint8V(v, false, d) + case *map[uint8]uint8: + var v2 map[uint8]uint8 + v2, changed = fastpathTV.DecMapUint8Uint8V(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]uint16: + fastpathTV.DecMapUint8Uint16V(v, false, d) + case *map[uint8]uint16: + var v2 map[uint8]uint16 + v2, changed = fastpathTV.DecMapUint8Uint16V(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]uint32: + fastpathTV.DecMapUint8Uint32V(v, false, d) + case *map[uint8]uint32: + var v2 map[uint8]uint32 + v2, changed = fastpathTV.DecMapUint8Uint32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]uint64: + fastpathTV.DecMapUint8Uint64V(v, false, d) + case *map[uint8]uint64: + var v2 map[uint8]uint64 + v2, changed = fastpathTV.DecMapUint8Uint64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]uintptr: + fastpathTV.DecMapUint8UintptrV(v, false, d) + case *map[uint8]uintptr: + var v2 map[uint8]uintptr + v2, changed = fastpathTV.DecMapUint8UintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]int: + fastpathTV.DecMapUint8IntV(v, false, d) + case *map[uint8]int: + var v2 map[uint8]int + v2, changed = fastpathTV.DecMapUint8IntV(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]int8: + fastpathTV.DecMapUint8Int8V(v, false, d) + case *map[uint8]int8: + var v2 map[uint8]int8 + v2, changed = fastpathTV.DecMapUint8Int8V(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]int16: + fastpathTV.DecMapUint8Int16V(v, false, d) + case *map[uint8]int16: + var v2 map[uint8]int16 + v2, changed = fastpathTV.DecMapUint8Int16V(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]int32: + fastpathTV.DecMapUint8Int32V(v, false, d) + case *map[uint8]int32: + var v2 map[uint8]int32 + v2, changed = fastpathTV.DecMapUint8Int32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]int64: + fastpathTV.DecMapUint8Int64V(v, false, d) + case *map[uint8]int64: + var v2 map[uint8]int64 + v2, changed = fastpathTV.DecMapUint8Int64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]float32: + fastpathTV.DecMapUint8Float32V(v, false, d) + case *map[uint8]float32: + var v2 map[uint8]float32 + v2, changed = fastpathTV.DecMapUint8Float32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]float64: + fastpathTV.DecMapUint8Float64V(v, false, d) + case *map[uint8]float64: + var v2 map[uint8]float64 + v2, changed = fastpathTV.DecMapUint8Float64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint8]bool: + fastpathTV.DecMapUint8BoolV(v, false, d) + case *map[uint8]bool: + var v2 map[uint8]bool + v2, changed = fastpathTV.DecMapUint8BoolV(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]interface{}: + fastpathTV.DecMapUint16IntfV(v, false, d) + case *map[uint16]interface{}: + var v2 map[uint16]interface{} + v2, changed = fastpathTV.DecMapUint16IntfV(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]string: + fastpathTV.DecMapUint16StringV(v, false, d) + case *map[uint16]string: + var v2 map[uint16]string + v2, changed = fastpathTV.DecMapUint16StringV(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]uint: + fastpathTV.DecMapUint16UintV(v, false, d) + case *map[uint16]uint: + var v2 map[uint16]uint + v2, changed = fastpathTV.DecMapUint16UintV(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]uint8: + fastpathTV.DecMapUint16Uint8V(v, false, d) + case *map[uint16]uint8: + var v2 map[uint16]uint8 + v2, changed = fastpathTV.DecMapUint16Uint8V(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]uint16: + fastpathTV.DecMapUint16Uint16V(v, false, d) + case *map[uint16]uint16: + var v2 map[uint16]uint16 + v2, changed = fastpathTV.DecMapUint16Uint16V(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]uint32: + fastpathTV.DecMapUint16Uint32V(v, false, d) + case *map[uint16]uint32: + var v2 map[uint16]uint32 + v2, changed = fastpathTV.DecMapUint16Uint32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]uint64: + fastpathTV.DecMapUint16Uint64V(v, false, d) + case *map[uint16]uint64: + var v2 map[uint16]uint64 + v2, changed = fastpathTV.DecMapUint16Uint64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]uintptr: + fastpathTV.DecMapUint16UintptrV(v, false, d) + case *map[uint16]uintptr: + var v2 map[uint16]uintptr + v2, changed = fastpathTV.DecMapUint16UintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]int: + fastpathTV.DecMapUint16IntV(v, false, d) + case *map[uint16]int: + var v2 map[uint16]int + v2, changed = fastpathTV.DecMapUint16IntV(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]int8: + fastpathTV.DecMapUint16Int8V(v, false, d) + case *map[uint16]int8: + var v2 map[uint16]int8 + v2, changed = fastpathTV.DecMapUint16Int8V(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]int16: + fastpathTV.DecMapUint16Int16V(v, false, d) + case *map[uint16]int16: + var v2 map[uint16]int16 + v2, changed = fastpathTV.DecMapUint16Int16V(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]int32: + fastpathTV.DecMapUint16Int32V(v, false, d) + case *map[uint16]int32: + var v2 map[uint16]int32 + v2, changed = fastpathTV.DecMapUint16Int32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]int64: + fastpathTV.DecMapUint16Int64V(v, false, d) + case *map[uint16]int64: + var v2 map[uint16]int64 + v2, changed = fastpathTV.DecMapUint16Int64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]float32: + fastpathTV.DecMapUint16Float32V(v, false, d) + case *map[uint16]float32: + var v2 map[uint16]float32 + v2, changed = fastpathTV.DecMapUint16Float32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]float64: + fastpathTV.DecMapUint16Float64V(v, false, d) + case *map[uint16]float64: + var v2 map[uint16]float64 + v2, changed = fastpathTV.DecMapUint16Float64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint16]bool: + fastpathTV.DecMapUint16BoolV(v, false, d) + case *map[uint16]bool: + var v2 map[uint16]bool + v2, changed = fastpathTV.DecMapUint16BoolV(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]interface{}: + fastpathTV.DecMapUint32IntfV(v, false, d) + case *map[uint32]interface{}: + var v2 map[uint32]interface{} + v2, changed = fastpathTV.DecMapUint32IntfV(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]string: + fastpathTV.DecMapUint32StringV(v, false, d) + case *map[uint32]string: + var v2 map[uint32]string + v2, changed = fastpathTV.DecMapUint32StringV(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]uint: + fastpathTV.DecMapUint32UintV(v, false, d) + case *map[uint32]uint: + var v2 map[uint32]uint + v2, changed = fastpathTV.DecMapUint32UintV(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]uint8: + fastpathTV.DecMapUint32Uint8V(v, false, d) + case *map[uint32]uint8: + var v2 map[uint32]uint8 + v2, changed = fastpathTV.DecMapUint32Uint8V(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]uint16: + fastpathTV.DecMapUint32Uint16V(v, false, d) + case *map[uint32]uint16: + var v2 map[uint32]uint16 + v2, changed = fastpathTV.DecMapUint32Uint16V(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]uint32: + fastpathTV.DecMapUint32Uint32V(v, false, d) + case *map[uint32]uint32: + var v2 map[uint32]uint32 + v2, changed = fastpathTV.DecMapUint32Uint32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]uint64: + fastpathTV.DecMapUint32Uint64V(v, false, d) + case *map[uint32]uint64: + var v2 map[uint32]uint64 + v2, changed = fastpathTV.DecMapUint32Uint64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]uintptr: + fastpathTV.DecMapUint32UintptrV(v, false, d) + case *map[uint32]uintptr: + var v2 map[uint32]uintptr + v2, changed = fastpathTV.DecMapUint32UintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]int: + fastpathTV.DecMapUint32IntV(v, false, d) + case *map[uint32]int: + var v2 map[uint32]int + v2, changed = fastpathTV.DecMapUint32IntV(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]int8: + fastpathTV.DecMapUint32Int8V(v, false, d) + case *map[uint32]int8: + var v2 map[uint32]int8 + v2, changed = fastpathTV.DecMapUint32Int8V(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]int16: + fastpathTV.DecMapUint32Int16V(v, false, d) + case *map[uint32]int16: + var v2 map[uint32]int16 + v2, changed = fastpathTV.DecMapUint32Int16V(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]int32: + fastpathTV.DecMapUint32Int32V(v, false, d) + case *map[uint32]int32: + var v2 map[uint32]int32 + v2, changed = fastpathTV.DecMapUint32Int32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]int64: + fastpathTV.DecMapUint32Int64V(v, false, d) + case *map[uint32]int64: + var v2 map[uint32]int64 + v2, changed = fastpathTV.DecMapUint32Int64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]float32: + fastpathTV.DecMapUint32Float32V(v, false, d) + case *map[uint32]float32: + var v2 map[uint32]float32 + v2, changed = fastpathTV.DecMapUint32Float32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]float64: + fastpathTV.DecMapUint32Float64V(v, false, d) + case *map[uint32]float64: + var v2 map[uint32]float64 + v2, changed = fastpathTV.DecMapUint32Float64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint32]bool: + fastpathTV.DecMapUint32BoolV(v, false, d) + case *map[uint32]bool: + var v2 map[uint32]bool + v2, changed = fastpathTV.DecMapUint32BoolV(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]interface{}: + fastpathTV.DecMapUint64IntfV(v, false, d) + case *map[uint64]interface{}: + var v2 map[uint64]interface{} + v2, changed = fastpathTV.DecMapUint64IntfV(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]string: + fastpathTV.DecMapUint64StringV(v, false, d) + case *map[uint64]string: + var v2 map[uint64]string + v2, changed = fastpathTV.DecMapUint64StringV(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]uint: + fastpathTV.DecMapUint64UintV(v, false, d) + case *map[uint64]uint: + var v2 map[uint64]uint + v2, changed = fastpathTV.DecMapUint64UintV(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]uint8: + fastpathTV.DecMapUint64Uint8V(v, false, d) + case *map[uint64]uint8: + var v2 map[uint64]uint8 + v2, changed = fastpathTV.DecMapUint64Uint8V(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]uint16: + fastpathTV.DecMapUint64Uint16V(v, false, d) + case *map[uint64]uint16: + var v2 map[uint64]uint16 + v2, changed = fastpathTV.DecMapUint64Uint16V(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]uint32: + fastpathTV.DecMapUint64Uint32V(v, false, d) + case *map[uint64]uint32: + var v2 map[uint64]uint32 + v2, changed = fastpathTV.DecMapUint64Uint32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]uint64: + fastpathTV.DecMapUint64Uint64V(v, false, d) + case *map[uint64]uint64: + var v2 map[uint64]uint64 + v2, changed = fastpathTV.DecMapUint64Uint64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]uintptr: + fastpathTV.DecMapUint64UintptrV(v, false, d) + case *map[uint64]uintptr: + var v2 map[uint64]uintptr + v2, changed = fastpathTV.DecMapUint64UintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]int: + fastpathTV.DecMapUint64IntV(v, false, d) + case *map[uint64]int: + var v2 map[uint64]int + v2, changed = fastpathTV.DecMapUint64IntV(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]int8: + fastpathTV.DecMapUint64Int8V(v, false, d) + case *map[uint64]int8: + var v2 map[uint64]int8 + v2, changed = fastpathTV.DecMapUint64Int8V(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]int16: + fastpathTV.DecMapUint64Int16V(v, false, d) + case *map[uint64]int16: + var v2 map[uint64]int16 + v2, changed = fastpathTV.DecMapUint64Int16V(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]int32: + fastpathTV.DecMapUint64Int32V(v, false, d) + case *map[uint64]int32: + var v2 map[uint64]int32 + v2, changed = fastpathTV.DecMapUint64Int32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]int64: + fastpathTV.DecMapUint64Int64V(v, false, d) + case *map[uint64]int64: + var v2 map[uint64]int64 + v2, changed = fastpathTV.DecMapUint64Int64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]float32: + fastpathTV.DecMapUint64Float32V(v, false, d) + case *map[uint64]float32: + var v2 map[uint64]float32 + v2, changed = fastpathTV.DecMapUint64Float32V(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]float64: + fastpathTV.DecMapUint64Float64V(v, false, d) + case *map[uint64]float64: + var v2 map[uint64]float64 + v2, changed = fastpathTV.DecMapUint64Float64V(*v, true, d) + if changed { + *v = v2 + } + case map[uint64]bool: + fastpathTV.DecMapUint64BoolV(v, false, d) + case *map[uint64]bool: + var v2 map[uint64]bool + v2, changed = fastpathTV.DecMapUint64BoolV(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]interface{}: + fastpathTV.DecMapUintptrIntfV(v, false, d) + case *map[uintptr]interface{}: + var v2 map[uintptr]interface{} + v2, changed = fastpathTV.DecMapUintptrIntfV(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]string: + fastpathTV.DecMapUintptrStringV(v, false, d) + case *map[uintptr]string: + var v2 map[uintptr]string + v2, changed = fastpathTV.DecMapUintptrStringV(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]uint: + fastpathTV.DecMapUintptrUintV(v, false, d) + case *map[uintptr]uint: + var v2 map[uintptr]uint + v2, changed = fastpathTV.DecMapUintptrUintV(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]uint8: + fastpathTV.DecMapUintptrUint8V(v, false, d) + case *map[uintptr]uint8: + var v2 map[uintptr]uint8 + v2, changed = fastpathTV.DecMapUintptrUint8V(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]uint16: + fastpathTV.DecMapUintptrUint16V(v, false, d) + case *map[uintptr]uint16: + var v2 map[uintptr]uint16 + v2, changed = fastpathTV.DecMapUintptrUint16V(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]uint32: + fastpathTV.DecMapUintptrUint32V(v, false, d) + case *map[uintptr]uint32: + var v2 map[uintptr]uint32 + v2, changed = fastpathTV.DecMapUintptrUint32V(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]uint64: + fastpathTV.DecMapUintptrUint64V(v, false, d) + case *map[uintptr]uint64: + var v2 map[uintptr]uint64 + v2, changed = fastpathTV.DecMapUintptrUint64V(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]uintptr: + fastpathTV.DecMapUintptrUintptrV(v, false, d) + case *map[uintptr]uintptr: + var v2 map[uintptr]uintptr + v2, changed = fastpathTV.DecMapUintptrUintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]int: + fastpathTV.DecMapUintptrIntV(v, false, d) + case *map[uintptr]int: + var v2 map[uintptr]int + v2, changed = fastpathTV.DecMapUintptrIntV(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]int8: + fastpathTV.DecMapUintptrInt8V(v, false, d) + case *map[uintptr]int8: + var v2 map[uintptr]int8 + v2, changed = fastpathTV.DecMapUintptrInt8V(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]int16: + fastpathTV.DecMapUintptrInt16V(v, false, d) + case *map[uintptr]int16: + var v2 map[uintptr]int16 + v2, changed = fastpathTV.DecMapUintptrInt16V(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]int32: + fastpathTV.DecMapUintptrInt32V(v, false, d) + case *map[uintptr]int32: + var v2 map[uintptr]int32 + v2, changed = fastpathTV.DecMapUintptrInt32V(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]int64: + fastpathTV.DecMapUintptrInt64V(v, false, d) + case *map[uintptr]int64: + var v2 map[uintptr]int64 + v2, changed = fastpathTV.DecMapUintptrInt64V(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]float32: + fastpathTV.DecMapUintptrFloat32V(v, false, d) + case *map[uintptr]float32: + var v2 map[uintptr]float32 + v2, changed = fastpathTV.DecMapUintptrFloat32V(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]float64: + fastpathTV.DecMapUintptrFloat64V(v, false, d) + case *map[uintptr]float64: + var v2 map[uintptr]float64 + v2, changed = fastpathTV.DecMapUintptrFloat64V(*v, true, d) + if changed { + *v = v2 + } + case map[uintptr]bool: + fastpathTV.DecMapUintptrBoolV(v, false, d) + case *map[uintptr]bool: + var v2 map[uintptr]bool + v2, changed = fastpathTV.DecMapUintptrBoolV(*v, true, d) + if changed { + *v = v2 + } + case map[int]interface{}: + fastpathTV.DecMapIntIntfV(v, false, d) + case *map[int]interface{}: + var v2 map[int]interface{} + v2, changed = fastpathTV.DecMapIntIntfV(*v, true, d) + if changed { + *v = v2 + } + case map[int]string: + fastpathTV.DecMapIntStringV(v, false, d) + case *map[int]string: + var v2 map[int]string + v2, changed = fastpathTV.DecMapIntStringV(*v, true, d) + if changed { + *v = v2 + } + case map[int]uint: + fastpathTV.DecMapIntUintV(v, false, d) + case *map[int]uint: + var v2 map[int]uint + v2, changed = fastpathTV.DecMapIntUintV(*v, true, d) + if changed { + *v = v2 + } + case map[int]uint8: + fastpathTV.DecMapIntUint8V(v, false, d) + case *map[int]uint8: + var v2 map[int]uint8 + v2, changed = fastpathTV.DecMapIntUint8V(*v, true, d) + if changed { + *v = v2 + } + case map[int]uint16: + fastpathTV.DecMapIntUint16V(v, false, d) + case *map[int]uint16: + var v2 map[int]uint16 + v2, changed = fastpathTV.DecMapIntUint16V(*v, true, d) + if changed { + *v = v2 + } + case map[int]uint32: + fastpathTV.DecMapIntUint32V(v, false, d) + case *map[int]uint32: + var v2 map[int]uint32 + v2, changed = fastpathTV.DecMapIntUint32V(*v, true, d) + if changed { + *v = v2 + } + case map[int]uint64: + fastpathTV.DecMapIntUint64V(v, false, d) + case *map[int]uint64: + var v2 map[int]uint64 + v2, changed = fastpathTV.DecMapIntUint64V(*v, true, d) + if changed { + *v = v2 + } + case map[int]uintptr: + fastpathTV.DecMapIntUintptrV(v, false, d) + case *map[int]uintptr: + var v2 map[int]uintptr + v2, changed = fastpathTV.DecMapIntUintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[int]int: + fastpathTV.DecMapIntIntV(v, false, d) + case *map[int]int: + var v2 map[int]int + v2, changed = fastpathTV.DecMapIntIntV(*v, true, d) + if changed { + *v = v2 + } + case map[int]int8: + fastpathTV.DecMapIntInt8V(v, false, d) + case *map[int]int8: + var v2 map[int]int8 + v2, changed = fastpathTV.DecMapIntInt8V(*v, true, d) + if changed { + *v = v2 + } + case map[int]int16: + fastpathTV.DecMapIntInt16V(v, false, d) + case *map[int]int16: + var v2 map[int]int16 + v2, changed = fastpathTV.DecMapIntInt16V(*v, true, d) + if changed { + *v = v2 + } + case map[int]int32: + fastpathTV.DecMapIntInt32V(v, false, d) + case *map[int]int32: + var v2 map[int]int32 + v2, changed = fastpathTV.DecMapIntInt32V(*v, true, d) + if changed { + *v = v2 + } + case map[int]int64: + fastpathTV.DecMapIntInt64V(v, false, d) + case *map[int]int64: + var v2 map[int]int64 + v2, changed = fastpathTV.DecMapIntInt64V(*v, true, d) + if changed { + *v = v2 + } + case map[int]float32: + fastpathTV.DecMapIntFloat32V(v, false, d) + case *map[int]float32: + var v2 map[int]float32 + v2, changed = fastpathTV.DecMapIntFloat32V(*v, true, d) + if changed { + *v = v2 + } + case map[int]float64: + fastpathTV.DecMapIntFloat64V(v, false, d) + case *map[int]float64: + var v2 map[int]float64 + v2, changed = fastpathTV.DecMapIntFloat64V(*v, true, d) + if changed { + *v = v2 + } + case map[int]bool: + fastpathTV.DecMapIntBoolV(v, false, d) + case *map[int]bool: + var v2 map[int]bool + v2, changed = fastpathTV.DecMapIntBoolV(*v, true, d) + if changed { + *v = v2 + } + case map[int8]interface{}: + fastpathTV.DecMapInt8IntfV(v, false, d) + case *map[int8]interface{}: + var v2 map[int8]interface{} + v2, changed = fastpathTV.DecMapInt8IntfV(*v, true, d) + if changed { + *v = v2 + } + case map[int8]string: + fastpathTV.DecMapInt8StringV(v, false, d) + case *map[int8]string: + var v2 map[int8]string + v2, changed = fastpathTV.DecMapInt8StringV(*v, true, d) + if changed { + *v = v2 + } + case map[int8]uint: + fastpathTV.DecMapInt8UintV(v, false, d) + case *map[int8]uint: + var v2 map[int8]uint + v2, changed = fastpathTV.DecMapInt8UintV(*v, true, d) + if changed { + *v = v2 + } + case map[int8]uint8: + fastpathTV.DecMapInt8Uint8V(v, false, d) + case *map[int8]uint8: + var v2 map[int8]uint8 + v2, changed = fastpathTV.DecMapInt8Uint8V(*v, true, d) + if changed { + *v = v2 + } + case map[int8]uint16: + fastpathTV.DecMapInt8Uint16V(v, false, d) + case *map[int8]uint16: + var v2 map[int8]uint16 + v2, changed = fastpathTV.DecMapInt8Uint16V(*v, true, d) + if changed { + *v = v2 + } + case map[int8]uint32: + fastpathTV.DecMapInt8Uint32V(v, false, d) + case *map[int8]uint32: + var v2 map[int8]uint32 + v2, changed = fastpathTV.DecMapInt8Uint32V(*v, true, d) + if changed { + *v = v2 + } + case map[int8]uint64: + fastpathTV.DecMapInt8Uint64V(v, false, d) + case *map[int8]uint64: + var v2 map[int8]uint64 + v2, changed = fastpathTV.DecMapInt8Uint64V(*v, true, d) + if changed { + *v = v2 + } + case map[int8]uintptr: + fastpathTV.DecMapInt8UintptrV(v, false, d) + case *map[int8]uintptr: + var v2 map[int8]uintptr + v2, changed = fastpathTV.DecMapInt8UintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[int8]int: + fastpathTV.DecMapInt8IntV(v, false, d) + case *map[int8]int: + var v2 map[int8]int + v2, changed = fastpathTV.DecMapInt8IntV(*v, true, d) + if changed { + *v = v2 + } + case map[int8]int8: + fastpathTV.DecMapInt8Int8V(v, false, d) + case *map[int8]int8: + var v2 map[int8]int8 + v2, changed = fastpathTV.DecMapInt8Int8V(*v, true, d) + if changed { + *v = v2 + } + case map[int8]int16: + fastpathTV.DecMapInt8Int16V(v, false, d) + case *map[int8]int16: + var v2 map[int8]int16 + v2, changed = fastpathTV.DecMapInt8Int16V(*v, true, d) + if changed { + *v = v2 + } + case map[int8]int32: + fastpathTV.DecMapInt8Int32V(v, false, d) + case *map[int8]int32: + var v2 map[int8]int32 + v2, changed = fastpathTV.DecMapInt8Int32V(*v, true, d) + if changed { + *v = v2 + } + case map[int8]int64: + fastpathTV.DecMapInt8Int64V(v, false, d) + case *map[int8]int64: + var v2 map[int8]int64 + v2, changed = fastpathTV.DecMapInt8Int64V(*v, true, d) + if changed { + *v = v2 + } + case map[int8]float32: + fastpathTV.DecMapInt8Float32V(v, false, d) + case *map[int8]float32: + var v2 map[int8]float32 + v2, changed = fastpathTV.DecMapInt8Float32V(*v, true, d) + if changed { + *v = v2 + } + case map[int8]float64: + fastpathTV.DecMapInt8Float64V(v, false, d) + case *map[int8]float64: + var v2 map[int8]float64 + v2, changed = fastpathTV.DecMapInt8Float64V(*v, true, d) + if changed { + *v = v2 + } + case map[int8]bool: + fastpathTV.DecMapInt8BoolV(v, false, d) + case *map[int8]bool: + var v2 map[int8]bool + v2, changed = fastpathTV.DecMapInt8BoolV(*v, true, d) + if changed { + *v = v2 + } + case map[int16]interface{}: + fastpathTV.DecMapInt16IntfV(v, false, d) + case *map[int16]interface{}: + var v2 map[int16]interface{} + v2, changed = fastpathTV.DecMapInt16IntfV(*v, true, d) + if changed { + *v = v2 + } + case map[int16]string: + fastpathTV.DecMapInt16StringV(v, false, d) + case *map[int16]string: + var v2 map[int16]string + v2, changed = fastpathTV.DecMapInt16StringV(*v, true, d) + if changed { + *v = v2 + } + case map[int16]uint: + fastpathTV.DecMapInt16UintV(v, false, d) + case *map[int16]uint: + var v2 map[int16]uint + v2, changed = fastpathTV.DecMapInt16UintV(*v, true, d) + if changed { + *v = v2 + } + case map[int16]uint8: + fastpathTV.DecMapInt16Uint8V(v, false, d) + case *map[int16]uint8: + var v2 map[int16]uint8 + v2, changed = fastpathTV.DecMapInt16Uint8V(*v, true, d) + if changed { + *v = v2 + } + case map[int16]uint16: + fastpathTV.DecMapInt16Uint16V(v, false, d) + case *map[int16]uint16: + var v2 map[int16]uint16 + v2, changed = fastpathTV.DecMapInt16Uint16V(*v, true, d) + if changed { + *v = v2 + } + case map[int16]uint32: + fastpathTV.DecMapInt16Uint32V(v, false, d) + case *map[int16]uint32: + var v2 map[int16]uint32 + v2, changed = fastpathTV.DecMapInt16Uint32V(*v, true, d) + if changed { + *v = v2 + } + case map[int16]uint64: + fastpathTV.DecMapInt16Uint64V(v, false, d) + case *map[int16]uint64: + var v2 map[int16]uint64 + v2, changed = fastpathTV.DecMapInt16Uint64V(*v, true, d) + if changed { + *v = v2 + } + case map[int16]uintptr: + fastpathTV.DecMapInt16UintptrV(v, false, d) + case *map[int16]uintptr: + var v2 map[int16]uintptr + v2, changed = fastpathTV.DecMapInt16UintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[int16]int: + fastpathTV.DecMapInt16IntV(v, false, d) + case *map[int16]int: + var v2 map[int16]int + v2, changed = fastpathTV.DecMapInt16IntV(*v, true, d) + if changed { + *v = v2 + } + case map[int16]int8: + fastpathTV.DecMapInt16Int8V(v, false, d) + case *map[int16]int8: + var v2 map[int16]int8 + v2, changed = fastpathTV.DecMapInt16Int8V(*v, true, d) + if changed { + *v = v2 + } + case map[int16]int16: + fastpathTV.DecMapInt16Int16V(v, false, d) + case *map[int16]int16: + var v2 map[int16]int16 + v2, changed = fastpathTV.DecMapInt16Int16V(*v, true, d) + if changed { + *v = v2 + } + case map[int16]int32: + fastpathTV.DecMapInt16Int32V(v, false, d) + case *map[int16]int32: + var v2 map[int16]int32 + v2, changed = fastpathTV.DecMapInt16Int32V(*v, true, d) + if changed { + *v = v2 + } + case map[int16]int64: + fastpathTV.DecMapInt16Int64V(v, false, d) + case *map[int16]int64: + var v2 map[int16]int64 + v2, changed = fastpathTV.DecMapInt16Int64V(*v, true, d) + if changed { + *v = v2 + } + case map[int16]float32: + fastpathTV.DecMapInt16Float32V(v, false, d) + case *map[int16]float32: + var v2 map[int16]float32 + v2, changed = fastpathTV.DecMapInt16Float32V(*v, true, d) + if changed { + *v = v2 + } + case map[int16]float64: + fastpathTV.DecMapInt16Float64V(v, false, d) + case *map[int16]float64: + var v2 map[int16]float64 + v2, changed = fastpathTV.DecMapInt16Float64V(*v, true, d) + if changed { + *v = v2 + } + case map[int16]bool: + fastpathTV.DecMapInt16BoolV(v, false, d) + case *map[int16]bool: + var v2 map[int16]bool + v2, changed = fastpathTV.DecMapInt16BoolV(*v, true, d) + if changed { + *v = v2 + } + case map[int32]interface{}: + fastpathTV.DecMapInt32IntfV(v, false, d) + case *map[int32]interface{}: + var v2 map[int32]interface{} + v2, changed = fastpathTV.DecMapInt32IntfV(*v, true, d) + if changed { + *v = v2 + } + case map[int32]string: + fastpathTV.DecMapInt32StringV(v, false, d) + case *map[int32]string: + var v2 map[int32]string + v2, changed = fastpathTV.DecMapInt32StringV(*v, true, d) + if changed { + *v = v2 + } + case map[int32]uint: + fastpathTV.DecMapInt32UintV(v, false, d) + case *map[int32]uint: + var v2 map[int32]uint + v2, changed = fastpathTV.DecMapInt32UintV(*v, true, d) + if changed { + *v = v2 + } + case map[int32]uint8: + fastpathTV.DecMapInt32Uint8V(v, false, d) + case *map[int32]uint8: + var v2 map[int32]uint8 + v2, changed = fastpathTV.DecMapInt32Uint8V(*v, true, d) + if changed { + *v = v2 + } + case map[int32]uint16: + fastpathTV.DecMapInt32Uint16V(v, false, d) + case *map[int32]uint16: + var v2 map[int32]uint16 + v2, changed = fastpathTV.DecMapInt32Uint16V(*v, true, d) + if changed { + *v = v2 + } + case map[int32]uint32: + fastpathTV.DecMapInt32Uint32V(v, false, d) + case *map[int32]uint32: + var v2 map[int32]uint32 + v2, changed = fastpathTV.DecMapInt32Uint32V(*v, true, d) + if changed { + *v = v2 + } + case map[int32]uint64: + fastpathTV.DecMapInt32Uint64V(v, false, d) + case *map[int32]uint64: + var v2 map[int32]uint64 + v2, changed = fastpathTV.DecMapInt32Uint64V(*v, true, d) + if changed { + *v = v2 + } + case map[int32]uintptr: + fastpathTV.DecMapInt32UintptrV(v, false, d) + case *map[int32]uintptr: + var v2 map[int32]uintptr + v2, changed = fastpathTV.DecMapInt32UintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[int32]int: + fastpathTV.DecMapInt32IntV(v, false, d) + case *map[int32]int: + var v2 map[int32]int + v2, changed = fastpathTV.DecMapInt32IntV(*v, true, d) + if changed { + *v = v2 + } + case map[int32]int8: + fastpathTV.DecMapInt32Int8V(v, false, d) + case *map[int32]int8: + var v2 map[int32]int8 + v2, changed = fastpathTV.DecMapInt32Int8V(*v, true, d) + if changed { + *v = v2 + } + case map[int32]int16: + fastpathTV.DecMapInt32Int16V(v, false, d) + case *map[int32]int16: + var v2 map[int32]int16 + v2, changed = fastpathTV.DecMapInt32Int16V(*v, true, d) + if changed { + *v = v2 + } + case map[int32]int32: + fastpathTV.DecMapInt32Int32V(v, false, d) + case *map[int32]int32: + var v2 map[int32]int32 + v2, changed = fastpathTV.DecMapInt32Int32V(*v, true, d) + if changed { + *v = v2 + } + case map[int32]int64: + fastpathTV.DecMapInt32Int64V(v, false, d) + case *map[int32]int64: + var v2 map[int32]int64 + v2, changed = fastpathTV.DecMapInt32Int64V(*v, true, d) + if changed { + *v = v2 + } + case map[int32]float32: + fastpathTV.DecMapInt32Float32V(v, false, d) + case *map[int32]float32: + var v2 map[int32]float32 + v2, changed = fastpathTV.DecMapInt32Float32V(*v, true, d) + if changed { + *v = v2 + } + case map[int32]float64: + fastpathTV.DecMapInt32Float64V(v, false, d) + case *map[int32]float64: + var v2 map[int32]float64 + v2, changed = fastpathTV.DecMapInt32Float64V(*v, true, d) + if changed { + *v = v2 + } + case map[int32]bool: + fastpathTV.DecMapInt32BoolV(v, false, d) + case *map[int32]bool: + var v2 map[int32]bool + v2, changed = fastpathTV.DecMapInt32BoolV(*v, true, d) + if changed { + *v = v2 + } + case map[int64]interface{}: + fastpathTV.DecMapInt64IntfV(v, false, d) + case *map[int64]interface{}: + var v2 map[int64]interface{} + v2, changed = fastpathTV.DecMapInt64IntfV(*v, true, d) + if changed { + *v = v2 + } + case map[int64]string: + fastpathTV.DecMapInt64StringV(v, false, d) + case *map[int64]string: + var v2 map[int64]string + v2, changed = fastpathTV.DecMapInt64StringV(*v, true, d) + if changed { + *v = v2 + } + case map[int64]uint: + fastpathTV.DecMapInt64UintV(v, false, d) + case *map[int64]uint: + var v2 map[int64]uint + v2, changed = fastpathTV.DecMapInt64UintV(*v, true, d) + if changed { + *v = v2 + } + case map[int64]uint8: + fastpathTV.DecMapInt64Uint8V(v, false, d) + case *map[int64]uint8: + var v2 map[int64]uint8 + v2, changed = fastpathTV.DecMapInt64Uint8V(*v, true, d) + if changed { + *v = v2 + } + case map[int64]uint16: + fastpathTV.DecMapInt64Uint16V(v, false, d) + case *map[int64]uint16: + var v2 map[int64]uint16 + v2, changed = fastpathTV.DecMapInt64Uint16V(*v, true, d) + if changed { + *v = v2 + } + case map[int64]uint32: + fastpathTV.DecMapInt64Uint32V(v, false, d) + case *map[int64]uint32: + var v2 map[int64]uint32 + v2, changed = fastpathTV.DecMapInt64Uint32V(*v, true, d) + if changed { + *v = v2 + } + case map[int64]uint64: + fastpathTV.DecMapInt64Uint64V(v, false, d) + case *map[int64]uint64: + var v2 map[int64]uint64 + v2, changed = fastpathTV.DecMapInt64Uint64V(*v, true, d) + if changed { + *v = v2 + } + case map[int64]uintptr: + fastpathTV.DecMapInt64UintptrV(v, false, d) + case *map[int64]uintptr: + var v2 map[int64]uintptr + v2, changed = fastpathTV.DecMapInt64UintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[int64]int: + fastpathTV.DecMapInt64IntV(v, false, d) + case *map[int64]int: + var v2 map[int64]int + v2, changed = fastpathTV.DecMapInt64IntV(*v, true, d) + if changed { + *v = v2 + } + case map[int64]int8: + fastpathTV.DecMapInt64Int8V(v, false, d) + case *map[int64]int8: + var v2 map[int64]int8 + v2, changed = fastpathTV.DecMapInt64Int8V(*v, true, d) + if changed { + *v = v2 + } + case map[int64]int16: + fastpathTV.DecMapInt64Int16V(v, false, d) + case *map[int64]int16: + var v2 map[int64]int16 + v2, changed = fastpathTV.DecMapInt64Int16V(*v, true, d) + if changed { + *v = v2 + } + case map[int64]int32: + fastpathTV.DecMapInt64Int32V(v, false, d) + case *map[int64]int32: + var v2 map[int64]int32 + v2, changed = fastpathTV.DecMapInt64Int32V(*v, true, d) + if changed { + *v = v2 + } + case map[int64]int64: + fastpathTV.DecMapInt64Int64V(v, false, d) + case *map[int64]int64: + var v2 map[int64]int64 + v2, changed = fastpathTV.DecMapInt64Int64V(*v, true, d) + if changed { + *v = v2 + } + case map[int64]float32: + fastpathTV.DecMapInt64Float32V(v, false, d) + case *map[int64]float32: + var v2 map[int64]float32 + v2, changed = fastpathTV.DecMapInt64Float32V(*v, true, d) + if changed { + *v = v2 + } + case map[int64]float64: + fastpathTV.DecMapInt64Float64V(v, false, d) + case *map[int64]float64: + var v2 map[int64]float64 + v2, changed = fastpathTV.DecMapInt64Float64V(*v, true, d) + if changed { + *v = v2 + } + case map[int64]bool: + fastpathTV.DecMapInt64BoolV(v, false, d) + case *map[int64]bool: + var v2 map[int64]bool + v2, changed = fastpathTV.DecMapInt64BoolV(*v, true, d) + if changed { + *v = v2 + } + case map[bool]interface{}: + fastpathTV.DecMapBoolIntfV(v, false, d) + case *map[bool]interface{}: + var v2 map[bool]interface{} + v2, changed = fastpathTV.DecMapBoolIntfV(*v, true, d) + if changed { + *v = v2 + } + case map[bool]string: + fastpathTV.DecMapBoolStringV(v, false, d) + case *map[bool]string: + var v2 map[bool]string + v2, changed = fastpathTV.DecMapBoolStringV(*v, true, d) + if changed { + *v = v2 + } + case map[bool]uint: + fastpathTV.DecMapBoolUintV(v, false, d) + case *map[bool]uint: + var v2 map[bool]uint + v2, changed = fastpathTV.DecMapBoolUintV(*v, true, d) + if changed { + *v = v2 + } + case map[bool]uint8: + fastpathTV.DecMapBoolUint8V(v, false, d) + case *map[bool]uint8: + var v2 map[bool]uint8 + v2, changed = fastpathTV.DecMapBoolUint8V(*v, true, d) + if changed { + *v = v2 + } + case map[bool]uint16: + fastpathTV.DecMapBoolUint16V(v, false, d) + case *map[bool]uint16: + var v2 map[bool]uint16 + v2, changed = fastpathTV.DecMapBoolUint16V(*v, true, d) + if changed { + *v = v2 + } + case map[bool]uint32: + fastpathTV.DecMapBoolUint32V(v, false, d) + case *map[bool]uint32: + var v2 map[bool]uint32 + v2, changed = fastpathTV.DecMapBoolUint32V(*v, true, d) + if changed { + *v = v2 + } + case map[bool]uint64: + fastpathTV.DecMapBoolUint64V(v, false, d) + case *map[bool]uint64: + var v2 map[bool]uint64 + v2, changed = fastpathTV.DecMapBoolUint64V(*v, true, d) + if changed { + *v = v2 + } + case map[bool]uintptr: + fastpathTV.DecMapBoolUintptrV(v, false, d) + case *map[bool]uintptr: + var v2 map[bool]uintptr + v2, changed = fastpathTV.DecMapBoolUintptrV(*v, true, d) + if changed { + *v = v2 + } + case map[bool]int: + fastpathTV.DecMapBoolIntV(v, false, d) + case *map[bool]int: + var v2 map[bool]int + v2, changed = fastpathTV.DecMapBoolIntV(*v, true, d) + if changed { + *v = v2 + } + case map[bool]int8: + fastpathTV.DecMapBoolInt8V(v, false, d) + case *map[bool]int8: + var v2 map[bool]int8 + v2, changed = fastpathTV.DecMapBoolInt8V(*v, true, d) + if changed { + *v = v2 + } + case map[bool]int16: + fastpathTV.DecMapBoolInt16V(v, false, d) + case *map[bool]int16: + var v2 map[bool]int16 + v2, changed = fastpathTV.DecMapBoolInt16V(*v, true, d) + if changed { + *v = v2 + } + case map[bool]int32: + fastpathTV.DecMapBoolInt32V(v, false, d) + case *map[bool]int32: + var v2 map[bool]int32 + v2, changed = fastpathTV.DecMapBoolInt32V(*v, true, d) + if changed { + *v = v2 + } + case map[bool]int64: + fastpathTV.DecMapBoolInt64V(v, false, d) + case *map[bool]int64: + var v2 map[bool]int64 + v2, changed = fastpathTV.DecMapBoolInt64V(*v, true, d) + if changed { + *v = v2 + } + case map[bool]float32: + fastpathTV.DecMapBoolFloat32V(v, false, d) + case *map[bool]float32: + var v2 map[bool]float32 + v2, changed = fastpathTV.DecMapBoolFloat32V(*v, true, d) + if changed { + *v = v2 + } + case map[bool]float64: + fastpathTV.DecMapBoolFloat64V(v, false, d) + case *map[bool]float64: + var v2 map[bool]float64 + v2, changed = fastpathTV.DecMapBoolFloat64V(*v, true, d) + if changed { + *v = v2 + } + case map[bool]bool: + fastpathTV.DecMapBoolBoolV(v, false, d) + case *map[bool]bool: + var v2 map[bool]bool + v2, changed = fastpathTV.DecMapBoolBoolV(*v, true, d) + if changed { + *v = v2 + } + default: + _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 + return false + } + return true +} + +func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool { + switch v := iv.(type) { + + case *[]interface{}: + *v = nil + case *[]string: + *v = nil + case *[]float32: + *v = nil + case *[]float64: + *v = nil + case *[]uint: + *v = nil + case *[]uint8: + *v = nil + case *[]uint16: + *v = nil + case *[]uint32: + *v = nil + case *[]uint64: + *v = nil + case *[]uintptr: + *v = nil + case *[]int: + *v = nil + case *[]int8: + *v = nil + case *[]int16: + *v = nil + case *[]int32: + *v = nil + case *[]int64: + *v = nil + case *[]bool: + *v = nil + + case *map[interface{}]interface{}: + *v = nil + case *map[interface{}]string: + *v = nil + case *map[interface{}]uint: + *v = nil + case *map[interface{}]uint8: + *v = nil + case *map[interface{}]uint16: + *v = nil + case *map[interface{}]uint32: + *v = nil + case *map[interface{}]uint64: + *v = nil + case *map[interface{}]uintptr: + *v = nil + case *map[interface{}]int: + *v = nil + case *map[interface{}]int8: + *v = nil + case *map[interface{}]int16: + *v = nil + case *map[interface{}]int32: + *v = nil + case *map[interface{}]int64: + *v = nil + case *map[interface{}]float32: + *v = nil + case *map[interface{}]float64: + *v = nil + case *map[interface{}]bool: + *v = nil + case *map[string]interface{}: + *v = nil + case *map[string]string: + *v = nil + case *map[string]uint: + *v = nil + case *map[string]uint8: + *v = nil + case *map[string]uint16: + *v = nil + case *map[string]uint32: + *v = nil + case *map[string]uint64: + *v = nil + case *map[string]uintptr: + *v = nil + case *map[string]int: + *v = nil + case *map[string]int8: + *v = nil + case *map[string]int16: + *v = nil + case *map[string]int32: + *v = nil + case *map[string]int64: + *v = nil + case *map[string]float32: + *v = nil + case *map[string]float64: + *v = nil + case *map[string]bool: + *v = nil + case *map[float32]interface{}: + *v = nil + case *map[float32]string: + *v = nil + case *map[float32]uint: + *v = nil + case *map[float32]uint8: + *v = nil + case *map[float32]uint16: + *v = nil + case *map[float32]uint32: + *v = nil + case *map[float32]uint64: + *v = nil + case *map[float32]uintptr: + *v = nil + case *map[float32]int: + *v = nil + case *map[float32]int8: + *v = nil + case *map[float32]int16: + *v = nil + case *map[float32]int32: + *v = nil + case *map[float32]int64: + *v = nil + case *map[float32]float32: + *v = nil + case *map[float32]float64: + *v = nil + case *map[float32]bool: + *v = nil + case *map[float64]interface{}: + *v = nil + case *map[float64]string: + *v = nil + case *map[float64]uint: + *v = nil + case *map[float64]uint8: + *v = nil + case *map[float64]uint16: + *v = nil + case *map[float64]uint32: + *v = nil + case *map[float64]uint64: + *v = nil + case *map[float64]uintptr: + *v = nil + case *map[float64]int: + *v = nil + case *map[float64]int8: + *v = nil + case *map[float64]int16: + *v = nil + case *map[float64]int32: + *v = nil + case *map[float64]int64: + *v = nil + case *map[float64]float32: + *v = nil + case *map[float64]float64: + *v = nil + case *map[float64]bool: + *v = nil + case *map[uint]interface{}: + *v = nil + case *map[uint]string: + *v = nil + case *map[uint]uint: + *v = nil + case *map[uint]uint8: + *v = nil + case *map[uint]uint16: + *v = nil + case *map[uint]uint32: + *v = nil + case *map[uint]uint64: + *v = nil + case *map[uint]uintptr: + *v = nil + case *map[uint]int: + *v = nil + case *map[uint]int8: + *v = nil + case *map[uint]int16: + *v = nil + case *map[uint]int32: + *v = nil + case *map[uint]int64: + *v = nil + case *map[uint]float32: + *v = nil + case *map[uint]float64: + *v = nil + case *map[uint]bool: + *v = nil + case *map[uint8]interface{}: + *v = nil + case *map[uint8]string: + *v = nil + case *map[uint8]uint: + *v = nil + case *map[uint8]uint8: + *v = nil + case *map[uint8]uint16: + *v = nil + case *map[uint8]uint32: + *v = nil + case *map[uint8]uint64: + *v = nil + case *map[uint8]uintptr: + *v = nil + case *map[uint8]int: + *v = nil + case *map[uint8]int8: + *v = nil + case *map[uint8]int16: + *v = nil + case *map[uint8]int32: + *v = nil + case *map[uint8]int64: + *v = nil + case *map[uint8]float32: + *v = nil + case *map[uint8]float64: + *v = nil + case *map[uint8]bool: + *v = nil + case *map[uint16]interface{}: + *v = nil + case *map[uint16]string: + *v = nil + case *map[uint16]uint: + *v = nil + case *map[uint16]uint8: + *v = nil + case *map[uint16]uint16: + *v = nil + case *map[uint16]uint32: + *v = nil + case *map[uint16]uint64: + *v = nil + case *map[uint16]uintptr: + *v = nil + case *map[uint16]int: + *v = nil + case *map[uint16]int8: + *v = nil + case *map[uint16]int16: + *v = nil + case *map[uint16]int32: + *v = nil + case *map[uint16]int64: + *v = nil + case *map[uint16]float32: + *v = nil + case *map[uint16]float64: + *v = nil + case *map[uint16]bool: + *v = nil + case *map[uint32]interface{}: + *v = nil + case *map[uint32]string: + *v = nil + case *map[uint32]uint: + *v = nil + case *map[uint32]uint8: + *v = nil + case *map[uint32]uint16: + *v = nil + case *map[uint32]uint32: + *v = nil + case *map[uint32]uint64: + *v = nil + case *map[uint32]uintptr: + *v = nil + case *map[uint32]int: + *v = nil + case *map[uint32]int8: + *v = nil + case *map[uint32]int16: + *v = nil + case *map[uint32]int32: + *v = nil + case *map[uint32]int64: + *v = nil + case *map[uint32]float32: + *v = nil + case *map[uint32]float64: + *v = nil + case *map[uint32]bool: + *v = nil + case *map[uint64]interface{}: + *v = nil + case *map[uint64]string: + *v = nil + case *map[uint64]uint: + *v = nil + case *map[uint64]uint8: + *v = nil + case *map[uint64]uint16: + *v = nil + case *map[uint64]uint32: + *v = nil + case *map[uint64]uint64: + *v = nil + case *map[uint64]uintptr: + *v = nil + case *map[uint64]int: + *v = nil + case *map[uint64]int8: + *v = nil + case *map[uint64]int16: + *v = nil + case *map[uint64]int32: + *v = nil + case *map[uint64]int64: + *v = nil + case *map[uint64]float32: + *v = nil + case *map[uint64]float64: + *v = nil + case *map[uint64]bool: + *v = nil + case *map[uintptr]interface{}: + *v = nil + case *map[uintptr]string: + *v = nil + case *map[uintptr]uint: + *v = nil + case *map[uintptr]uint8: + *v = nil + case *map[uintptr]uint16: + *v = nil + case *map[uintptr]uint32: + *v = nil + case *map[uintptr]uint64: + *v = nil + case *map[uintptr]uintptr: + *v = nil + case *map[uintptr]int: + *v = nil + case *map[uintptr]int8: + *v = nil + case *map[uintptr]int16: + *v = nil + case *map[uintptr]int32: + *v = nil + case *map[uintptr]int64: + *v = nil + case *map[uintptr]float32: + *v = nil + case *map[uintptr]float64: + *v = nil + case *map[uintptr]bool: + *v = nil + case *map[int]interface{}: + *v = nil + case *map[int]string: + *v = nil + case *map[int]uint: + *v = nil + case *map[int]uint8: + *v = nil + case *map[int]uint16: + *v = nil + case *map[int]uint32: + *v = nil + case *map[int]uint64: + *v = nil + case *map[int]uintptr: + *v = nil + case *map[int]int: + *v = nil + case *map[int]int8: + *v = nil + case *map[int]int16: + *v = nil + case *map[int]int32: + *v = nil + case *map[int]int64: + *v = nil + case *map[int]float32: + *v = nil + case *map[int]float64: + *v = nil + case *map[int]bool: + *v = nil + case *map[int8]interface{}: + *v = nil + case *map[int8]string: + *v = nil + case *map[int8]uint: + *v = nil + case *map[int8]uint8: + *v = nil + case *map[int8]uint16: + *v = nil + case *map[int8]uint32: + *v = nil + case *map[int8]uint64: + *v = nil + case *map[int8]uintptr: + *v = nil + case *map[int8]int: + *v = nil + case *map[int8]int8: + *v = nil + case *map[int8]int16: + *v = nil + case *map[int8]int32: + *v = nil + case *map[int8]int64: + *v = nil + case *map[int8]float32: + *v = nil + case *map[int8]float64: + *v = nil + case *map[int8]bool: + *v = nil + case *map[int16]interface{}: + *v = nil + case *map[int16]string: + *v = nil + case *map[int16]uint: + *v = nil + case *map[int16]uint8: + *v = nil + case *map[int16]uint16: + *v = nil + case *map[int16]uint32: + *v = nil + case *map[int16]uint64: + *v = nil + case *map[int16]uintptr: + *v = nil + case *map[int16]int: + *v = nil + case *map[int16]int8: + *v = nil + case *map[int16]int16: + *v = nil + case *map[int16]int32: + *v = nil + case *map[int16]int64: + *v = nil + case *map[int16]float32: + *v = nil + case *map[int16]float64: + *v = nil + case *map[int16]bool: + *v = nil + case *map[int32]interface{}: + *v = nil + case *map[int32]string: + *v = nil + case *map[int32]uint: + *v = nil + case *map[int32]uint8: + *v = nil + case *map[int32]uint16: + *v = nil + case *map[int32]uint32: + *v = nil + case *map[int32]uint64: + *v = nil + case *map[int32]uintptr: + *v = nil + case *map[int32]int: + *v = nil + case *map[int32]int8: + *v = nil + case *map[int32]int16: + *v = nil + case *map[int32]int32: + *v = nil + case *map[int32]int64: + *v = nil + case *map[int32]float32: + *v = nil + case *map[int32]float64: + *v = nil + case *map[int32]bool: + *v = nil + case *map[int64]interface{}: + *v = nil + case *map[int64]string: + *v = nil + case *map[int64]uint: + *v = nil + case *map[int64]uint8: + *v = nil + case *map[int64]uint16: + *v = nil + case *map[int64]uint32: + *v = nil + case *map[int64]uint64: + *v = nil + case *map[int64]uintptr: + *v = nil + case *map[int64]int: + *v = nil + case *map[int64]int8: + *v = nil + case *map[int64]int16: + *v = nil + case *map[int64]int32: + *v = nil + case *map[int64]int64: + *v = nil + case *map[int64]float32: + *v = nil + case *map[int64]float64: + *v = nil + case *map[int64]bool: + *v = nil + case *map[bool]interface{}: + *v = nil + case *map[bool]string: + *v = nil + case *map[bool]uint: + *v = nil + case *map[bool]uint8: + *v = nil + case *map[bool]uint16: + *v = nil + case *map[bool]uint32: + *v = nil + case *map[bool]uint64: + *v = nil + case *map[bool]uintptr: + *v = nil + case *map[bool]int: + *v = nil + case *map[bool]int8: + *v = nil + case *map[bool]int16: + *v = nil + case *map[bool]int32: + *v = nil + case *map[bool]int64: + *v = nil + case *map[bool]float32: + *v = nil + case *map[bool]float64: + *v = nil + case *map[bool]bool: + *v = nil + default: + _ = v // workaround https://github.com/golang/go/issues/12927 seen in go1.4 + return false + } + return true +} + +// -- -- fast path functions + +func (d *Decoder) fastpathDecSliceIntfR(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]interface{}) + v, changed := fastpathTV.DecSliceIntfV(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]interface{}) + v2, changed := fastpathTV.DecSliceIntfV(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceIntfX(vp *[]interface{}, d *Decoder) { + v, changed := f.DecSliceIntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceIntfV(v []interface{}, canChange bool, d *Decoder) (_ []interface{}, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []interface{}{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]interface{}, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16) + } else { + xlen = 8 + } + v = make([]interface{}, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, nil) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[j] = nil + } else { + d.decode(&v[j]) + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]interface{}, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceStringR(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]string) + v, changed := fastpathTV.DecSliceStringV(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]string) + v2, changed := fastpathTV.DecSliceStringV(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceStringX(vp *[]string, d *Decoder) { + v, changed := f.DecSliceStringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceStringV(v []string, canChange bool, d *Decoder) (_ []string, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []string{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]string, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 16) + } else { + xlen = 8 + } + v = make([]string, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, "") + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[j] = "" + } else { + v[j] = dd.DecodeString() + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]string, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceFloat32R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]float32) + v, changed := fastpathTV.DecSliceFloat32V(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]float32) + v2, changed := fastpathTV.DecSliceFloat32V(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceFloat32X(vp *[]float32, d *Decoder) { + v, changed := f.DecSliceFloat32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceFloat32V(v []float32, canChange bool, d *Decoder) (_ []float32, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []float32{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]float32, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) + } else { + xlen = 8 + } + v = make([]float32, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[j] = 0 + } else { + v[j] = float32(chkOvf.Float32V(dd.DecodeFloat64())) + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]float32, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceFloat64R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]float64) + v, changed := fastpathTV.DecSliceFloat64V(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]float64) + v2, changed := fastpathTV.DecSliceFloat64V(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceFloat64X(vp *[]float64, d *Decoder) { + v, changed := f.DecSliceFloat64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceFloat64V(v []float64, canChange bool, d *Decoder) (_ []float64, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []float64{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]float64, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + } else { + xlen = 8 + } + v = make([]float64, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[j] = 0 + } else { + v[j] = dd.DecodeFloat64() + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]float64, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceUintR(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]uint) + v, changed := fastpathTV.DecSliceUintV(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]uint) + v2, changed := fastpathTV.DecSliceUintV(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceUintX(vp *[]uint, d *Decoder) { + v, changed := f.DecSliceUintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUintV(v []uint, canChange bool, d *Decoder) (_ []uint, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uint{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]uint, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + } else { + xlen = 8 + } + v = make([]uint, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[j] = 0 + } else { + v[j] = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]uint, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceUint8R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]uint8) + v, changed := fastpathTV.DecSliceUint8V(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]uint8) + v2, changed := fastpathTV.DecSliceUint8V(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceUint8X(vp *[]uint8, d *Decoder) { + v, changed := f.DecSliceUint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUint8V(v []uint8, canChange bool, d *Decoder) (_ []uint8, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uint8{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]uint8, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) + } else { + xlen = 8 + } + v = make([]uint8, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[j] = 0 + } else { + v[j] = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]uint8, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceUint16R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]uint16) + v, changed := fastpathTV.DecSliceUint16V(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]uint16) + v2, changed := fastpathTV.DecSliceUint16V(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceUint16X(vp *[]uint16, d *Decoder) { + v, changed := f.DecSliceUint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUint16V(v []uint16, canChange bool, d *Decoder) (_ []uint16, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uint16{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]uint16, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2) + } else { + xlen = 8 + } + v = make([]uint16, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[j] = 0 + } else { + v[j] = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]uint16, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceUint32R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]uint32) + v, changed := fastpathTV.DecSliceUint32V(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]uint32) + v2, changed := fastpathTV.DecSliceUint32V(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceUint32X(vp *[]uint32, d *Decoder) { + v, changed := f.DecSliceUint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUint32V(v []uint32, canChange bool, d *Decoder) (_ []uint32, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uint32{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]uint32, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) + } else { + xlen = 8 + } + v = make([]uint32, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[j] = 0 + } else { + v[j] = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]uint32, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceUint64R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]uint64) + v, changed := fastpathTV.DecSliceUint64V(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]uint64) + v2, changed := fastpathTV.DecSliceUint64V(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceUint64X(vp *[]uint64, d *Decoder) { + v, changed := f.DecSliceUint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUint64V(v []uint64, canChange bool, d *Decoder) (_ []uint64, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uint64{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]uint64, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + } else { + xlen = 8 + } + v = make([]uint64, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[j] = 0 + } else { + v[j] = dd.DecodeUint64() + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]uint64, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceUintptrR(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]uintptr) + v, changed := fastpathTV.DecSliceUintptrV(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]uintptr) + v2, changed := fastpathTV.DecSliceUintptrV(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceUintptrX(vp *[]uintptr, d *Decoder) { + v, changed := f.DecSliceUintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceUintptrV(v []uintptr, canChange bool, d *Decoder) (_ []uintptr, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []uintptr{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]uintptr, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + } else { + xlen = 8 + } + v = make([]uintptr, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[j] = 0 + } else { + v[j] = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]uintptr, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceIntR(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]int) + v, changed := fastpathTV.DecSliceIntV(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]int) + v2, changed := fastpathTV.DecSliceIntV(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceIntX(vp *[]int, d *Decoder) { + v, changed := f.DecSliceIntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceIntV(v []int, canChange bool, d *Decoder) (_ []int, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]int, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + } else { + xlen = 8 + } + v = make([]int, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[j] = 0 + } else { + v[j] = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]int, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceInt8R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]int8) + v, changed := fastpathTV.DecSliceInt8V(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]int8) + v2, changed := fastpathTV.DecSliceInt8V(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceInt8X(vp *[]int8, d *Decoder) { + v, changed := f.DecSliceInt8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceInt8V(v []int8, canChange bool, d *Decoder) (_ []int8, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int8{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]int8, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) + } else { + xlen = 8 + } + v = make([]int8, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[j] = 0 + } else { + v[j] = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]int8, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceInt16R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]int16) + v, changed := fastpathTV.DecSliceInt16V(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]int16) + v2, changed := fastpathTV.DecSliceInt16V(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceInt16X(vp *[]int16, d *Decoder) { + v, changed := f.DecSliceInt16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceInt16V(v []int16, canChange bool, d *Decoder) (_ []int16, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int16{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]int16, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 2) + } else { + xlen = 8 + } + v = make([]int16, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[j] = 0 + } else { + v[j] = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]int16, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceInt32R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]int32) + v, changed := fastpathTV.DecSliceInt32V(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]int32) + v2, changed := fastpathTV.DecSliceInt32V(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceInt32X(vp *[]int32, d *Decoder) { + v, changed := f.DecSliceInt32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceInt32V(v []int32, canChange bool, d *Decoder) (_ []int32, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int32{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]int32, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 4) + } else { + xlen = 8 + } + v = make([]int32, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[j] = 0 + } else { + v[j] = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]int32, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceInt64R(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]int64) + v, changed := fastpathTV.DecSliceInt64V(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]int64) + v2, changed := fastpathTV.DecSliceInt64V(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceInt64X(vp *[]int64, d *Decoder) { + v, changed := f.DecSliceInt64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceInt64V(v []int64, canChange bool, d *Decoder) (_ []int64, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []int64{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]int64, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 8) + } else { + xlen = 8 + } + v = make([]int64, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, 0) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[j] = 0 + } else { + v[j] = dd.DecodeInt64() + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]int64, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecSliceBoolR(f *codecFnInfo, rv reflect.Value) { + if array := f.seq == seqTypeArray; !array && rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*[]bool) + v, changed := fastpathTV.DecSliceBoolV(*vp, !array, d) + if changed { + *vp = v + } + } else { + v := rv2i(rv).([]bool) + v2, changed := fastpathTV.DecSliceBoolV(v, !array, d) + if changed && len(v) > 0 && len(v2) > 0 && !(len(v2) == len(v) && &v2[0] == &v[0]) { + copy(v, v2) + } + } +} +func (f fastpathT) DecSliceBoolX(vp *[]bool, d *Decoder) { + v, changed := f.DecSliceBoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecSliceBoolV(v []bool, canChange bool, d *Decoder) (_ []bool, changed bool) { + dd := d.d + slh, containerLenS := d.decSliceHelperStart() + if containerLenS == 0 { + if canChange { + if v == nil { + v = []bool{} + } else if len(v) != 0 { + v = v[:0] + } + changed = true + } + slh.End() + return v, changed + } + hasLen := containerLenS > 0 + var xlen int + if hasLen && canChange { + if containerLenS > cap(v) { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) + if xlen <= cap(v) { + v = v[:xlen] + } else { + v = make([]bool, xlen) + } + changed = true + } else if containerLenS != len(v) { + v = v[:containerLenS] + changed = true + } + } + j := 0 + for ; (hasLen && j < containerLenS) || !(hasLen || dd.CheckBreak()); j++ { + if j == 0 && len(v) == 0 && canChange { + if hasLen { + xlen = decInferLen(containerLenS, d.h.MaxInitLen, 1) + } else { + xlen = 8 + } + v = make([]bool, xlen) + changed = true + } + // if indefinite, etc, then expand the slice if necessary + var decodeIntoBlank bool + if j >= len(v) { + if canChange { + v = append(v, false) + changed = true + } else { + d.arrayCannotExpand(len(v), j+1) + decodeIntoBlank = true + } + } + slh.ElemContainerState(j) + if decodeIntoBlank { + d.swallow() + } else if dd.TryDecodeAsNil() { + v[j] = false + } else { + v[j] = dd.DecodeBool() + } + } + if canChange { + if j < len(v) { + v = v[:j] + changed = true + } else if j == 0 && v == nil { + v = make([]bool, 0) + changed = true + } + } + slh.End() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfIntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]interface{}) + v, changed := fastpathTV.DecMapIntfIntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfIntfV(rv2i(rv).(map[interface{}]interface{}), false, d) + } +} +func (f fastpathT) DecMapIntfIntfX(vp *map[interface{}]interface{}, d *Decoder) { + v, changed := f.DecMapIntfIntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfIntfV(v map[interface{}]interface{}, canChange bool, + d *Decoder) (_ map[interface{}]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 32) + v = make(map[interface{}]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk interface{} + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfStringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]string) + v, changed := fastpathTV.DecMapIntfStringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfStringV(rv2i(rv).(map[interface{}]string), false, d) + } +} +func (f fastpathT) DecMapIntfStringX(vp *map[interface{}]string, d *Decoder) { + v, changed := f.DecMapIntfStringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfStringV(v map[interface{}]string, canChange bool, + d *Decoder) (_ map[interface{}]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 32) + v = make(map[interface{}]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk interface{} + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfUintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]uint) + v, changed := fastpathTV.DecMapIntfUintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfUintV(rv2i(rv).(map[interface{}]uint), false, d) + } +} +func (f fastpathT) DecMapIntfUintX(vp *map[interface{}]uint, d *Decoder) { + v, changed := f.DecMapIntfUintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUintV(v map[interface{}]uint, canChange bool, + d *Decoder) (_ map[interface{}]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk interface{} + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfUint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]uint8) + v, changed := fastpathTV.DecMapIntfUint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfUint8V(rv2i(rv).(map[interface{}]uint8), false, d) + } +} +func (f fastpathT) DecMapIntfUint8X(vp *map[interface{}]uint8, d *Decoder) { + v, changed := f.DecMapIntfUint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUint8V(v map[interface{}]uint8, canChange bool, + d *Decoder) (_ map[interface{}]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[interface{}]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk interface{} + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfUint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]uint16) + v, changed := fastpathTV.DecMapIntfUint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfUint16V(rv2i(rv).(map[interface{}]uint16), false, d) + } +} +func (f fastpathT) DecMapIntfUint16X(vp *map[interface{}]uint16, d *Decoder) { + v, changed := f.DecMapIntfUint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUint16V(v map[interface{}]uint16, canChange bool, + d *Decoder) (_ map[interface{}]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[interface{}]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk interface{} + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfUint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]uint32) + v, changed := fastpathTV.DecMapIntfUint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfUint32V(rv2i(rv).(map[interface{}]uint32), false, d) + } +} +func (f fastpathT) DecMapIntfUint32X(vp *map[interface{}]uint32, d *Decoder) { + v, changed := f.DecMapIntfUint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUint32V(v map[interface{}]uint32, canChange bool, + d *Decoder) (_ map[interface{}]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[interface{}]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk interface{} + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfUint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]uint64) + v, changed := fastpathTV.DecMapIntfUint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfUint64V(rv2i(rv).(map[interface{}]uint64), false, d) + } +} +func (f fastpathT) DecMapIntfUint64X(vp *map[interface{}]uint64, d *Decoder) { + v, changed := f.DecMapIntfUint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUint64V(v map[interface{}]uint64, canChange bool, + d *Decoder) (_ map[interface{}]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk interface{} + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfUintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]uintptr) + v, changed := fastpathTV.DecMapIntfUintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfUintptrV(rv2i(rv).(map[interface{}]uintptr), false, d) + } +} +func (f fastpathT) DecMapIntfUintptrX(vp *map[interface{}]uintptr, d *Decoder) { + v, changed := f.DecMapIntfUintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfUintptrV(v map[interface{}]uintptr, canChange bool, + d *Decoder) (_ map[interface{}]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk interface{} + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfIntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]int) + v, changed := fastpathTV.DecMapIntfIntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfIntV(rv2i(rv).(map[interface{}]int), false, d) + } +} +func (f fastpathT) DecMapIntfIntX(vp *map[interface{}]int, d *Decoder) { + v, changed := f.DecMapIntfIntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfIntV(v map[interface{}]int, canChange bool, + d *Decoder) (_ map[interface{}]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk interface{} + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfInt8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]int8) + v, changed := fastpathTV.DecMapIntfInt8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfInt8V(rv2i(rv).(map[interface{}]int8), false, d) + } +} +func (f fastpathT) DecMapIntfInt8X(vp *map[interface{}]int8, d *Decoder) { + v, changed := f.DecMapIntfInt8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfInt8V(v map[interface{}]int8, canChange bool, + d *Decoder) (_ map[interface{}]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[interface{}]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk interface{} + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfInt16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]int16) + v, changed := fastpathTV.DecMapIntfInt16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfInt16V(rv2i(rv).(map[interface{}]int16), false, d) + } +} +func (f fastpathT) DecMapIntfInt16X(vp *map[interface{}]int16, d *Decoder) { + v, changed := f.DecMapIntfInt16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfInt16V(v map[interface{}]int16, canChange bool, + d *Decoder) (_ map[interface{}]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[interface{}]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk interface{} + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfInt32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]int32) + v, changed := fastpathTV.DecMapIntfInt32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfInt32V(rv2i(rv).(map[interface{}]int32), false, d) + } +} +func (f fastpathT) DecMapIntfInt32X(vp *map[interface{}]int32, d *Decoder) { + v, changed := f.DecMapIntfInt32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfInt32V(v map[interface{}]int32, canChange bool, + d *Decoder) (_ map[interface{}]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[interface{}]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk interface{} + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfInt64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]int64) + v, changed := fastpathTV.DecMapIntfInt64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfInt64V(rv2i(rv).(map[interface{}]int64), false, d) + } +} +func (f fastpathT) DecMapIntfInt64X(vp *map[interface{}]int64, d *Decoder) { + v, changed := f.DecMapIntfInt64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfInt64V(v map[interface{}]int64, canChange bool, + d *Decoder) (_ map[interface{}]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk interface{} + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfFloat32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]float32) + v, changed := fastpathTV.DecMapIntfFloat32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfFloat32V(rv2i(rv).(map[interface{}]float32), false, d) + } +} +func (f fastpathT) DecMapIntfFloat32X(vp *map[interface{}]float32, d *Decoder) { + v, changed := f.DecMapIntfFloat32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfFloat32V(v map[interface{}]float32, canChange bool, + d *Decoder) (_ map[interface{}]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[interface{}]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk interface{} + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfFloat64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]float64) + v, changed := fastpathTV.DecMapIntfFloat64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfFloat64V(rv2i(rv).(map[interface{}]float64), false, d) + } +} +func (f fastpathT) DecMapIntfFloat64X(vp *map[interface{}]float64, d *Decoder) { + v, changed := f.DecMapIntfFloat64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfFloat64V(v map[interface{}]float64, canChange bool, + d *Decoder) (_ map[interface{}]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[interface{}]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk interface{} + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntfBoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[interface{}]bool) + v, changed := fastpathTV.DecMapIntfBoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntfBoolV(rv2i(rv).(map[interface{}]bool), false, d) + } +} +func (f fastpathT) DecMapIntfBoolX(vp *map[interface{}]bool, d *Decoder) { + v, changed := f.DecMapIntfBoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntfBoolV(v map[interface{}]bool, canChange bool, + d *Decoder) (_ map[interface{}]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[interface{}]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk interface{} + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = nil + d.decode(&mk) + if bv, bok := mk.([]byte); bok { + mk = d.string(bv) + } + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringIntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]interface{}) + v, changed := fastpathTV.DecMapStringIntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringIntfV(rv2i(rv).(map[string]interface{}), false, d) + } +} +func (f fastpathT) DecMapStringIntfX(vp *map[string]interface{}, d *Decoder) { + v, changed := f.DecMapStringIntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringIntfV(v map[string]interface{}, canChange bool, + d *Decoder) (_ map[string]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 32) + v = make(map[string]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk string + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringStringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]string) + v, changed := fastpathTV.DecMapStringStringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringStringV(rv2i(rv).(map[string]string), false, d) + } +} +func (f fastpathT) DecMapStringStringX(vp *map[string]string, d *Decoder) { + v, changed := f.DecMapStringStringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringStringV(v map[string]string, canChange bool, + d *Decoder) (_ map[string]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 32) + v = make(map[string]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk string + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringUintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]uint) + v, changed := fastpathTV.DecMapStringUintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringUintV(rv2i(rv).(map[string]uint), false, d) + } +} +func (f fastpathT) DecMapStringUintX(vp *map[string]uint, d *Decoder) { + v, changed := f.DecMapStringUintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUintV(v map[string]uint, canChange bool, + d *Decoder) (_ map[string]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk string + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringUint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]uint8) + v, changed := fastpathTV.DecMapStringUint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringUint8V(rv2i(rv).(map[string]uint8), false, d) + } +} +func (f fastpathT) DecMapStringUint8X(vp *map[string]uint8, d *Decoder) { + v, changed := f.DecMapStringUint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUint8V(v map[string]uint8, canChange bool, + d *Decoder) (_ map[string]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[string]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk string + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringUint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]uint16) + v, changed := fastpathTV.DecMapStringUint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringUint16V(rv2i(rv).(map[string]uint16), false, d) + } +} +func (f fastpathT) DecMapStringUint16X(vp *map[string]uint16, d *Decoder) { + v, changed := f.DecMapStringUint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUint16V(v map[string]uint16, canChange bool, + d *Decoder) (_ map[string]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[string]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk string + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringUint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]uint32) + v, changed := fastpathTV.DecMapStringUint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringUint32V(rv2i(rv).(map[string]uint32), false, d) + } +} +func (f fastpathT) DecMapStringUint32X(vp *map[string]uint32, d *Decoder) { + v, changed := f.DecMapStringUint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUint32V(v map[string]uint32, canChange bool, + d *Decoder) (_ map[string]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[string]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk string + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringUint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]uint64) + v, changed := fastpathTV.DecMapStringUint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringUint64V(rv2i(rv).(map[string]uint64), false, d) + } +} +func (f fastpathT) DecMapStringUint64X(vp *map[string]uint64, d *Decoder) { + v, changed := f.DecMapStringUint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUint64V(v map[string]uint64, canChange bool, + d *Decoder) (_ map[string]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk string + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringUintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]uintptr) + v, changed := fastpathTV.DecMapStringUintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringUintptrV(rv2i(rv).(map[string]uintptr), false, d) + } +} +func (f fastpathT) DecMapStringUintptrX(vp *map[string]uintptr, d *Decoder) { + v, changed := f.DecMapStringUintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringUintptrV(v map[string]uintptr, canChange bool, + d *Decoder) (_ map[string]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk string + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringIntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]int) + v, changed := fastpathTV.DecMapStringIntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringIntV(rv2i(rv).(map[string]int), false, d) + } +} +func (f fastpathT) DecMapStringIntX(vp *map[string]int, d *Decoder) { + v, changed := f.DecMapStringIntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringIntV(v map[string]int, canChange bool, + d *Decoder) (_ map[string]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk string + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringInt8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]int8) + v, changed := fastpathTV.DecMapStringInt8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringInt8V(rv2i(rv).(map[string]int8), false, d) + } +} +func (f fastpathT) DecMapStringInt8X(vp *map[string]int8, d *Decoder) { + v, changed := f.DecMapStringInt8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringInt8V(v map[string]int8, canChange bool, + d *Decoder) (_ map[string]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[string]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk string + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringInt16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]int16) + v, changed := fastpathTV.DecMapStringInt16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringInt16V(rv2i(rv).(map[string]int16), false, d) + } +} +func (f fastpathT) DecMapStringInt16X(vp *map[string]int16, d *Decoder) { + v, changed := f.DecMapStringInt16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringInt16V(v map[string]int16, canChange bool, + d *Decoder) (_ map[string]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[string]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk string + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringInt32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]int32) + v, changed := fastpathTV.DecMapStringInt32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringInt32V(rv2i(rv).(map[string]int32), false, d) + } +} +func (f fastpathT) DecMapStringInt32X(vp *map[string]int32, d *Decoder) { + v, changed := f.DecMapStringInt32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringInt32V(v map[string]int32, canChange bool, + d *Decoder) (_ map[string]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[string]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk string + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringInt64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]int64) + v, changed := fastpathTV.DecMapStringInt64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringInt64V(rv2i(rv).(map[string]int64), false, d) + } +} +func (f fastpathT) DecMapStringInt64X(vp *map[string]int64, d *Decoder) { + v, changed := f.DecMapStringInt64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringInt64V(v map[string]int64, canChange bool, + d *Decoder) (_ map[string]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk string + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringFloat32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]float32) + v, changed := fastpathTV.DecMapStringFloat32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringFloat32V(rv2i(rv).(map[string]float32), false, d) + } +} +func (f fastpathT) DecMapStringFloat32X(vp *map[string]float32, d *Decoder) { + v, changed := f.DecMapStringFloat32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringFloat32V(v map[string]float32, canChange bool, + d *Decoder) (_ map[string]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[string]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk string + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringFloat64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]float64) + v, changed := fastpathTV.DecMapStringFloat64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringFloat64V(rv2i(rv).(map[string]float64), false, d) + } +} +func (f fastpathT) DecMapStringFloat64X(vp *map[string]float64, d *Decoder) { + v, changed := f.DecMapStringFloat64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringFloat64V(v map[string]float64, canChange bool, + d *Decoder) (_ map[string]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[string]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk string + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapStringBoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[string]bool) + v, changed := fastpathTV.DecMapStringBoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapStringBoolV(rv2i(rv).(map[string]bool), false, d) + } +} +func (f fastpathT) DecMapStringBoolX(vp *map[string]bool, d *Decoder) { + v, changed := f.DecMapStringBoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapStringBoolV(v map[string]bool, canChange bool, + d *Decoder) (_ map[string]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[string]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk string + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeString() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]interface{}) + v, changed := fastpathTV.DecMapFloat32IntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32IntfV(rv2i(rv).(map[float32]interface{}), false, d) + } +} +func (f fastpathT) DecMapFloat32IntfX(vp *map[float32]interface{}, d *Decoder) { + v, changed := f.DecMapFloat32IntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32IntfV(v map[float32]interface{}, canChange bool, + d *Decoder) (_ map[float32]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[float32]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk float32 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]string) + v, changed := fastpathTV.DecMapFloat32StringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32StringV(rv2i(rv).(map[float32]string), false, d) + } +} +func (f fastpathT) DecMapFloat32StringX(vp *map[float32]string, d *Decoder) { + v, changed := f.DecMapFloat32StringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32StringV(v map[float32]string, canChange bool, + d *Decoder) (_ map[float32]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[float32]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk float32 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]uint) + v, changed := fastpathTV.DecMapFloat32UintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32UintV(rv2i(rv).(map[float32]uint), false, d) + } +} +func (f fastpathT) DecMapFloat32UintX(vp *map[float32]uint, d *Decoder) { + v, changed := f.DecMapFloat32UintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32UintV(v map[float32]uint, canChange bool, + d *Decoder) (_ map[float32]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk float32 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]uint8) + v, changed := fastpathTV.DecMapFloat32Uint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32Uint8V(rv2i(rv).(map[float32]uint8), false, d) + } +} +func (f fastpathT) DecMapFloat32Uint8X(vp *map[float32]uint8, d *Decoder) { + v, changed := f.DecMapFloat32Uint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Uint8V(v map[float32]uint8, canChange bool, + d *Decoder) (_ map[float32]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[float32]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk float32 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]uint16) + v, changed := fastpathTV.DecMapFloat32Uint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32Uint16V(rv2i(rv).(map[float32]uint16), false, d) + } +} +func (f fastpathT) DecMapFloat32Uint16X(vp *map[float32]uint16, d *Decoder) { + v, changed := f.DecMapFloat32Uint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Uint16V(v map[float32]uint16, canChange bool, + d *Decoder) (_ map[float32]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[float32]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk float32 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]uint32) + v, changed := fastpathTV.DecMapFloat32Uint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32Uint32V(rv2i(rv).(map[float32]uint32), false, d) + } +} +func (f fastpathT) DecMapFloat32Uint32X(vp *map[float32]uint32, d *Decoder) { + v, changed := f.DecMapFloat32Uint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Uint32V(v map[float32]uint32, canChange bool, + d *Decoder) (_ map[float32]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[float32]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk float32 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]uint64) + v, changed := fastpathTV.DecMapFloat32Uint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32Uint64V(rv2i(rv).(map[float32]uint64), false, d) + } +} +func (f fastpathT) DecMapFloat32Uint64X(vp *map[float32]uint64, d *Decoder) { + v, changed := f.DecMapFloat32Uint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Uint64V(v map[float32]uint64, canChange bool, + d *Decoder) (_ map[float32]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk float32 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]uintptr) + v, changed := fastpathTV.DecMapFloat32UintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32UintptrV(rv2i(rv).(map[float32]uintptr), false, d) + } +} +func (f fastpathT) DecMapFloat32UintptrX(vp *map[float32]uintptr, d *Decoder) { + v, changed := f.DecMapFloat32UintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32UintptrV(v map[float32]uintptr, canChange bool, + d *Decoder) (_ map[float32]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk float32 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]int) + v, changed := fastpathTV.DecMapFloat32IntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32IntV(rv2i(rv).(map[float32]int), false, d) + } +} +func (f fastpathT) DecMapFloat32IntX(vp *map[float32]int, d *Decoder) { + v, changed := f.DecMapFloat32IntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32IntV(v map[float32]int, canChange bool, + d *Decoder) (_ map[float32]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk float32 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]int8) + v, changed := fastpathTV.DecMapFloat32Int8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32Int8V(rv2i(rv).(map[float32]int8), false, d) + } +} +func (f fastpathT) DecMapFloat32Int8X(vp *map[float32]int8, d *Decoder) { + v, changed := f.DecMapFloat32Int8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Int8V(v map[float32]int8, canChange bool, + d *Decoder) (_ map[float32]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[float32]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk float32 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]int16) + v, changed := fastpathTV.DecMapFloat32Int16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32Int16V(rv2i(rv).(map[float32]int16), false, d) + } +} +func (f fastpathT) DecMapFloat32Int16X(vp *map[float32]int16, d *Decoder) { + v, changed := f.DecMapFloat32Int16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Int16V(v map[float32]int16, canChange bool, + d *Decoder) (_ map[float32]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[float32]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk float32 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]int32) + v, changed := fastpathTV.DecMapFloat32Int32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32Int32V(rv2i(rv).(map[float32]int32), false, d) + } +} +func (f fastpathT) DecMapFloat32Int32X(vp *map[float32]int32, d *Decoder) { + v, changed := f.DecMapFloat32Int32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Int32V(v map[float32]int32, canChange bool, + d *Decoder) (_ map[float32]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[float32]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk float32 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]int64) + v, changed := fastpathTV.DecMapFloat32Int64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32Int64V(rv2i(rv).(map[float32]int64), false, d) + } +} +func (f fastpathT) DecMapFloat32Int64X(vp *map[float32]int64, d *Decoder) { + v, changed := f.DecMapFloat32Int64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Int64V(v map[float32]int64, canChange bool, + d *Decoder) (_ map[float32]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk float32 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]float32) + v, changed := fastpathTV.DecMapFloat32Float32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32Float32V(rv2i(rv).(map[float32]float32), false, d) + } +} +func (f fastpathT) DecMapFloat32Float32X(vp *map[float32]float32, d *Decoder) { + v, changed := f.DecMapFloat32Float32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Float32V(v map[float32]float32, canChange bool, + d *Decoder) (_ map[float32]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[float32]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk float32 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]float64) + v, changed := fastpathTV.DecMapFloat32Float64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32Float64V(rv2i(rv).(map[float32]float64), false, d) + } +} +func (f fastpathT) DecMapFloat32Float64X(vp *map[float32]float64, d *Decoder) { + v, changed := f.DecMapFloat32Float64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32Float64V(v map[float32]float64, canChange bool, + d *Decoder) (_ map[float32]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float32]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk float32 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat32BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float32]bool) + v, changed := fastpathTV.DecMapFloat32BoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat32BoolV(rv2i(rv).(map[float32]bool), false, d) + } +} +func (f fastpathT) DecMapFloat32BoolX(vp *map[float32]bool, d *Decoder) { + v, changed := f.DecMapFloat32BoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat32BoolV(v map[float32]bool, canChange bool, + d *Decoder) (_ map[float32]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[float32]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk float32 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]interface{}) + v, changed := fastpathTV.DecMapFloat64IntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64IntfV(rv2i(rv).(map[float64]interface{}), false, d) + } +} +func (f fastpathT) DecMapFloat64IntfX(vp *map[float64]interface{}, d *Decoder) { + v, changed := f.DecMapFloat64IntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64IntfV(v map[float64]interface{}, canChange bool, + d *Decoder) (_ map[float64]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[float64]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk float64 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]string) + v, changed := fastpathTV.DecMapFloat64StringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64StringV(rv2i(rv).(map[float64]string), false, d) + } +} +func (f fastpathT) DecMapFloat64StringX(vp *map[float64]string, d *Decoder) { + v, changed := f.DecMapFloat64StringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64StringV(v map[float64]string, canChange bool, + d *Decoder) (_ map[float64]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[float64]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk float64 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]uint) + v, changed := fastpathTV.DecMapFloat64UintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64UintV(rv2i(rv).(map[float64]uint), false, d) + } +} +func (f fastpathT) DecMapFloat64UintX(vp *map[float64]uint, d *Decoder) { + v, changed := f.DecMapFloat64UintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64UintV(v map[float64]uint, canChange bool, + d *Decoder) (_ map[float64]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk float64 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]uint8) + v, changed := fastpathTV.DecMapFloat64Uint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64Uint8V(rv2i(rv).(map[float64]uint8), false, d) + } +} +func (f fastpathT) DecMapFloat64Uint8X(vp *map[float64]uint8, d *Decoder) { + v, changed := f.DecMapFloat64Uint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Uint8V(v map[float64]uint8, canChange bool, + d *Decoder) (_ map[float64]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[float64]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk float64 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]uint16) + v, changed := fastpathTV.DecMapFloat64Uint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64Uint16V(rv2i(rv).(map[float64]uint16), false, d) + } +} +func (f fastpathT) DecMapFloat64Uint16X(vp *map[float64]uint16, d *Decoder) { + v, changed := f.DecMapFloat64Uint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Uint16V(v map[float64]uint16, canChange bool, + d *Decoder) (_ map[float64]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[float64]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk float64 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]uint32) + v, changed := fastpathTV.DecMapFloat64Uint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64Uint32V(rv2i(rv).(map[float64]uint32), false, d) + } +} +func (f fastpathT) DecMapFloat64Uint32X(vp *map[float64]uint32, d *Decoder) { + v, changed := f.DecMapFloat64Uint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Uint32V(v map[float64]uint32, canChange bool, + d *Decoder) (_ map[float64]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float64]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk float64 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]uint64) + v, changed := fastpathTV.DecMapFloat64Uint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64Uint64V(rv2i(rv).(map[float64]uint64), false, d) + } +} +func (f fastpathT) DecMapFloat64Uint64X(vp *map[float64]uint64, d *Decoder) { + v, changed := f.DecMapFloat64Uint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Uint64V(v map[float64]uint64, canChange bool, + d *Decoder) (_ map[float64]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk float64 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]uintptr) + v, changed := fastpathTV.DecMapFloat64UintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64UintptrV(rv2i(rv).(map[float64]uintptr), false, d) + } +} +func (f fastpathT) DecMapFloat64UintptrX(vp *map[float64]uintptr, d *Decoder) { + v, changed := f.DecMapFloat64UintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64UintptrV(v map[float64]uintptr, canChange bool, + d *Decoder) (_ map[float64]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk float64 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]int) + v, changed := fastpathTV.DecMapFloat64IntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64IntV(rv2i(rv).(map[float64]int), false, d) + } +} +func (f fastpathT) DecMapFloat64IntX(vp *map[float64]int, d *Decoder) { + v, changed := f.DecMapFloat64IntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64IntV(v map[float64]int, canChange bool, + d *Decoder) (_ map[float64]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk float64 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]int8) + v, changed := fastpathTV.DecMapFloat64Int8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64Int8V(rv2i(rv).(map[float64]int8), false, d) + } +} +func (f fastpathT) DecMapFloat64Int8X(vp *map[float64]int8, d *Decoder) { + v, changed := f.DecMapFloat64Int8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Int8V(v map[float64]int8, canChange bool, + d *Decoder) (_ map[float64]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[float64]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk float64 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]int16) + v, changed := fastpathTV.DecMapFloat64Int16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64Int16V(rv2i(rv).(map[float64]int16), false, d) + } +} +func (f fastpathT) DecMapFloat64Int16X(vp *map[float64]int16, d *Decoder) { + v, changed := f.DecMapFloat64Int16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Int16V(v map[float64]int16, canChange bool, + d *Decoder) (_ map[float64]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[float64]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk float64 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]int32) + v, changed := fastpathTV.DecMapFloat64Int32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64Int32V(rv2i(rv).(map[float64]int32), false, d) + } +} +func (f fastpathT) DecMapFloat64Int32X(vp *map[float64]int32, d *Decoder) { + v, changed := f.DecMapFloat64Int32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Int32V(v map[float64]int32, canChange bool, + d *Decoder) (_ map[float64]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float64]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk float64 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]int64) + v, changed := fastpathTV.DecMapFloat64Int64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64Int64V(rv2i(rv).(map[float64]int64), false, d) + } +} +func (f fastpathT) DecMapFloat64Int64X(vp *map[float64]int64, d *Decoder) { + v, changed := f.DecMapFloat64Int64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Int64V(v map[float64]int64, canChange bool, + d *Decoder) (_ map[float64]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk float64 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]float32) + v, changed := fastpathTV.DecMapFloat64Float32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64Float32V(rv2i(rv).(map[float64]float32), false, d) + } +} +func (f fastpathT) DecMapFloat64Float32X(vp *map[float64]float32, d *Decoder) { + v, changed := f.DecMapFloat64Float32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Float32V(v map[float64]float32, canChange bool, + d *Decoder) (_ map[float64]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[float64]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk float64 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]float64) + v, changed := fastpathTV.DecMapFloat64Float64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64Float64V(rv2i(rv).(map[float64]float64), false, d) + } +} +func (f fastpathT) DecMapFloat64Float64X(vp *map[float64]float64, d *Decoder) { + v, changed := f.DecMapFloat64Float64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64Float64V(v map[float64]float64, canChange bool, + d *Decoder) (_ map[float64]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[float64]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk float64 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapFloat64BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[float64]bool) + v, changed := fastpathTV.DecMapFloat64BoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapFloat64BoolV(rv2i(rv).(map[float64]bool), false, d) + } +} +func (f fastpathT) DecMapFloat64BoolX(vp *map[float64]bool, d *Decoder) { + v, changed := f.DecMapFloat64BoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapFloat64BoolV(v map[float64]bool, canChange bool, + d *Decoder) (_ map[float64]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[float64]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk float64 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeFloat64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintIntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]interface{}) + v, changed := fastpathTV.DecMapUintIntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintIntfV(rv2i(rv).(map[uint]interface{}), false, d) + } +} +func (f fastpathT) DecMapUintIntfX(vp *map[uint]interface{}, d *Decoder) { + v, changed := f.DecMapUintIntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintIntfV(v map[uint]interface{}, canChange bool, + d *Decoder) (_ map[uint]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uint]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintStringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]string) + v, changed := fastpathTV.DecMapUintStringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintStringV(rv2i(rv).(map[uint]string), false, d) + } +} +func (f fastpathT) DecMapUintStringX(vp *map[uint]string, d *Decoder) { + v, changed := f.DecMapUintStringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintStringV(v map[uint]string, canChange bool, + d *Decoder) (_ map[uint]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uint]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintUintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]uint) + v, changed := fastpathTV.DecMapUintUintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintUintV(rv2i(rv).(map[uint]uint), false, d) + } +} +func (f fastpathT) DecMapUintUintX(vp *map[uint]uint, d *Decoder) { + v, changed := f.DecMapUintUintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUintV(v map[uint]uint, canChange bool, + d *Decoder) (_ map[uint]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintUint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]uint8) + v, changed := fastpathTV.DecMapUintUint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintUint8V(rv2i(rv).(map[uint]uint8), false, d) + } +} +func (f fastpathT) DecMapUintUint8X(vp *map[uint]uint8, d *Decoder) { + v, changed := f.DecMapUintUint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUint8V(v map[uint]uint8, canChange bool, + d *Decoder) (_ map[uint]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintUint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]uint16) + v, changed := fastpathTV.DecMapUintUint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintUint16V(rv2i(rv).(map[uint]uint16), false, d) + } +} +func (f fastpathT) DecMapUintUint16X(vp *map[uint]uint16, d *Decoder) { + v, changed := f.DecMapUintUint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUint16V(v map[uint]uint16, canChange bool, + d *Decoder) (_ map[uint]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintUint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]uint32) + v, changed := fastpathTV.DecMapUintUint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintUint32V(rv2i(rv).(map[uint]uint32), false, d) + } +} +func (f fastpathT) DecMapUintUint32X(vp *map[uint]uint32, d *Decoder) { + v, changed := f.DecMapUintUint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUint32V(v map[uint]uint32, canChange bool, + d *Decoder) (_ map[uint]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintUint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]uint64) + v, changed := fastpathTV.DecMapUintUint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintUint64V(rv2i(rv).(map[uint]uint64), false, d) + } +} +func (f fastpathT) DecMapUintUint64X(vp *map[uint]uint64, d *Decoder) { + v, changed := f.DecMapUintUint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUint64V(v map[uint]uint64, canChange bool, + d *Decoder) (_ map[uint]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintUintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]uintptr) + v, changed := fastpathTV.DecMapUintUintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintUintptrV(rv2i(rv).(map[uint]uintptr), false, d) + } +} +func (f fastpathT) DecMapUintUintptrX(vp *map[uint]uintptr, d *Decoder) { + v, changed := f.DecMapUintUintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintUintptrV(v map[uint]uintptr, canChange bool, + d *Decoder) (_ map[uint]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintIntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]int) + v, changed := fastpathTV.DecMapUintIntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintIntV(rv2i(rv).(map[uint]int), false, d) + } +} +func (f fastpathT) DecMapUintIntX(vp *map[uint]int, d *Decoder) { + v, changed := f.DecMapUintIntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintIntV(v map[uint]int, canChange bool, + d *Decoder) (_ map[uint]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintInt8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]int8) + v, changed := fastpathTV.DecMapUintInt8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintInt8V(rv2i(rv).(map[uint]int8), false, d) + } +} +func (f fastpathT) DecMapUintInt8X(vp *map[uint]int8, d *Decoder) { + v, changed := f.DecMapUintInt8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintInt8V(v map[uint]int8, canChange bool, + d *Decoder) (_ map[uint]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintInt16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]int16) + v, changed := fastpathTV.DecMapUintInt16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintInt16V(rv2i(rv).(map[uint]int16), false, d) + } +} +func (f fastpathT) DecMapUintInt16X(vp *map[uint]int16, d *Decoder) { + v, changed := f.DecMapUintInt16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintInt16V(v map[uint]int16, canChange bool, + d *Decoder) (_ map[uint]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintInt32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]int32) + v, changed := fastpathTV.DecMapUintInt32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintInt32V(rv2i(rv).(map[uint]int32), false, d) + } +} +func (f fastpathT) DecMapUintInt32X(vp *map[uint]int32, d *Decoder) { + v, changed := f.DecMapUintInt32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintInt32V(v map[uint]int32, canChange bool, + d *Decoder) (_ map[uint]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintInt64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]int64) + v, changed := fastpathTV.DecMapUintInt64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintInt64V(rv2i(rv).(map[uint]int64), false, d) + } +} +func (f fastpathT) DecMapUintInt64X(vp *map[uint]int64, d *Decoder) { + v, changed := f.DecMapUintInt64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintInt64V(v map[uint]int64, canChange bool, + d *Decoder) (_ map[uint]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintFloat32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]float32) + v, changed := fastpathTV.DecMapUintFloat32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintFloat32V(rv2i(rv).(map[uint]float32), false, d) + } +} +func (f fastpathT) DecMapUintFloat32X(vp *map[uint]float32, d *Decoder) { + v, changed := f.DecMapUintFloat32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintFloat32V(v map[uint]float32, canChange bool, + d *Decoder) (_ map[uint]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintFloat64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]float64) + v, changed := fastpathTV.DecMapUintFloat64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintFloat64V(rv2i(rv).(map[uint]float64), false, d) + } +} +func (f fastpathT) DecMapUintFloat64X(vp *map[uint]float64, d *Decoder) { + v, changed := f.DecMapUintFloat64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintFloat64V(v map[uint]float64, canChange bool, + d *Decoder) (_ map[uint]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintBoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint]bool) + v, changed := fastpathTV.DecMapUintBoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintBoolV(rv2i(rv).(map[uint]bool), false, d) + } +} +func (f fastpathT) DecMapUintBoolX(vp *map[uint]bool, d *Decoder) { + v, changed := f.DecMapUintBoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintBoolV(v map[uint]bool, canChange bool, + d *Decoder) (_ map[uint]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]interface{}) + v, changed := fastpathTV.DecMapUint8IntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8IntfV(rv2i(rv).(map[uint8]interface{}), false, d) + } +} +func (f fastpathT) DecMapUint8IntfX(vp *map[uint8]interface{}, d *Decoder) { + v, changed := f.DecMapUint8IntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8IntfV(v map[uint8]interface{}, canChange bool, + d *Decoder) (_ map[uint8]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[uint8]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint8 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]string) + v, changed := fastpathTV.DecMapUint8StringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8StringV(rv2i(rv).(map[uint8]string), false, d) + } +} +func (f fastpathT) DecMapUint8StringX(vp *map[uint8]string, d *Decoder) { + v, changed := f.DecMapUint8StringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8StringV(v map[uint8]string, canChange bool, + d *Decoder) (_ map[uint8]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[uint8]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint8 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]uint) + v, changed := fastpathTV.DecMapUint8UintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8UintV(rv2i(rv).(map[uint8]uint), false, d) + } +} +func (f fastpathT) DecMapUint8UintX(vp *map[uint8]uint, d *Decoder) { + v, changed := f.DecMapUint8UintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8UintV(v map[uint8]uint, canChange bool, + d *Decoder) (_ map[uint8]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint8 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]uint8) + v, changed := fastpathTV.DecMapUint8Uint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8Uint8V(rv2i(rv).(map[uint8]uint8), false, d) + } +} +func (f fastpathT) DecMapUint8Uint8X(vp *map[uint8]uint8, d *Decoder) { + v, changed := f.DecMapUint8Uint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Uint8V(v map[uint8]uint8, canChange bool, + d *Decoder) (_ map[uint8]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[uint8]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint8 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]uint16) + v, changed := fastpathTV.DecMapUint8Uint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8Uint16V(rv2i(rv).(map[uint8]uint16), false, d) + } +} +func (f fastpathT) DecMapUint8Uint16X(vp *map[uint8]uint16, d *Decoder) { + v, changed := f.DecMapUint8Uint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Uint16V(v map[uint8]uint16, canChange bool, + d *Decoder) (_ map[uint8]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint8]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint8 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]uint32) + v, changed := fastpathTV.DecMapUint8Uint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8Uint32V(rv2i(rv).(map[uint8]uint32), false, d) + } +} +func (f fastpathT) DecMapUint8Uint32X(vp *map[uint8]uint32, d *Decoder) { + v, changed := f.DecMapUint8Uint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Uint32V(v map[uint8]uint32, canChange bool, + d *Decoder) (_ map[uint8]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint8]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint8 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]uint64) + v, changed := fastpathTV.DecMapUint8Uint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8Uint64V(rv2i(rv).(map[uint8]uint64), false, d) + } +} +func (f fastpathT) DecMapUint8Uint64X(vp *map[uint8]uint64, d *Decoder) { + v, changed := f.DecMapUint8Uint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Uint64V(v map[uint8]uint64, canChange bool, + d *Decoder) (_ map[uint8]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint8 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]uintptr) + v, changed := fastpathTV.DecMapUint8UintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8UintptrV(rv2i(rv).(map[uint8]uintptr), false, d) + } +} +func (f fastpathT) DecMapUint8UintptrX(vp *map[uint8]uintptr, d *Decoder) { + v, changed := f.DecMapUint8UintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8UintptrV(v map[uint8]uintptr, canChange bool, + d *Decoder) (_ map[uint8]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint8 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]int) + v, changed := fastpathTV.DecMapUint8IntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8IntV(rv2i(rv).(map[uint8]int), false, d) + } +} +func (f fastpathT) DecMapUint8IntX(vp *map[uint8]int, d *Decoder) { + v, changed := f.DecMapUint8IntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8IntV(v map[uint8]int, canChange bool, + d *Decoder) (_ map[uint8]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint8 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]int8) + v, changed := fastpathTV.DecMapUint8Int8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8Int8V(rv2i(rv).(map[uint8]int8), false, d) + } +} +func (f fastpathT) DecMapUint8Int8X(vp *map[uint8]int8, d *Decoder) { + v, changed := f.DecMapUint8Int8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Int8V(v map[uint8]int8, canChange bool, + d *Decoder) (_ map[uint8]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[uint8]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint8 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]int16) + v, changed := fastpathTV.DecMapUint8Int16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8Int16V(rv2i(rv).(map[uint8]int16), false, d) + } +} +func (f fastpathT) DecMapUint8Int16X(vp *map[uint8]int16, d *Decoder) { + v, changed := f.DecMapUint8Int16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Int16V(v map[uint8]int16, canChange bool, + d *Decoder) (_ map[uint8]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint8]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint8 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]int32) + v, changed := fastpathTV.DecMapUint8Int32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8Int32V(rv2i(rv).(map[uint8]int32), false, d) + } +} +func (f fastpathT) DecMapUint8Int32X(vp *map[uint8]int32, d *Decoder) { + v, changed := f.DecMapUint8Int32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Int32V(v map[uint8]int32, canChange bool, + d *Decoder) (_ map[uint8]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint8]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint8 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]int64) + v, changed := fastpathTV.DecMapUint8Int64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8Int64V(rv2i(rv).(map[uint8]int64), false, d) + } +} +func (f fastpathT) DecMapUint8Int64X(vp *map[uint8]int64, d *Decoder) { + v, changed := f.DecMapUint8Int64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Int64V(v map[uint8]int64, canChange bool, + d *Decoder) (_ map[uint8]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint8 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]float32) + v, changed := fastpathTV.DecMapUint8Float32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8Float32V(rv2i(rv).(map[uint8]float32), false, d) + } +} +func (f fastpathT) DecMapUint8Float32X(vp *map[uint8]float32, d *Decoder) { + v, changed := f.DecMapUint8Float32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Float32V(v map[uint8]float32, canChange bool, + d *Decoder) (_ map[uint8]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint8]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint8 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]float64) + v, changed := fastpathTV.DecMapUint8Float64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8Float64V(rv2i(rv).(map[uint8]float64), false, d) + } +} +func (f fastpathT) DecMapUint8Float64X(vp *map[uint8]float64, d *Decoder) { + v, changed := f.DecMapUint8Float64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8Float64V(v map[uint8]float64, canChange bool, + d *Decoder) (_ map[uint8]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint8]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint8 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint8BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint8]bool) + v, changed := fastpathTV.DecMapUint8BoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint8BoolV(rv2i(rv).(map[uint8]bool), false, d) + } +} +func (f fastpathT) DecMapUint8BoolX(vp *map[uint8]bool, d *Decoder) { + v, changed := f.DecMapUint8BoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint8BoolV(v map[uint8]bool, canChange bool, + d *Decoder) (_ map[uint8]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[uint8]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint8 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]interface{}) + v, changed := fastpathTV.DecMapUint16IntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16IntfV(rv2i(rv).(map[uint16]interface{}), false, d) + } +} +func (f fastpathT) DecMapUint16IntfX(vp *map[uint16]interface{}, d *Decoder) { + v, changed := f.DecMapUint16IntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16IntfV(v map[uint16]interface{}, canChange bool, + d *Decoder) (_ map[uint16]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[uint16]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint16 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]string) + v, changed := fastpathTV.DecMapUint16StringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16StringV(rv2i(rv).(map[uint16]string), false, d) + } +} +func (f fastpathT) DecMapUint16StringX(vp *map[uint16]string, d *Decoder) { + v, changed := f.DecMapUint16StringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16StringV(v map[uint16]string, canChange bool, + d *Decoder) (_ map[uint16]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[uint16]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint16 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]uint) + v, changed := fastpathTV.DecMapUint16UintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16UintV(rv2i(rv).(map[uint16]uint), false, d) + } +} +func (f fastpathT) DecMapUint16UintX(vp *map[uint16]uint, d *Decoder) { + v, changed := f.DecMapUint16UintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16UintV(v map[uint16]uint, canChange bool, + d *Decoder) (_ map[uint16]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint16 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]uint8) + v, changed := fastpathTV.DecMapUint16Uint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16Uint8V(rv2i(rv).(map[uint16]uint8), false, d) + } +} +func (f fastpathT) DecMapUint16Uint8X(vp *map[uint16]uint8, d *Decoder) { + v, changed := f.DecMapUint16Uint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Uint8V(v map[uint16]uint8, canChange bool, + d *Decoder) (_ map[uint16]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint16]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint16 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]uint16) + v, changed := fastpathTV.DecMapUint16Uint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16Uint16V(rv2i(rv).(map[uint16]uint16), false, d) + } +} +func (f fastpathT) DecMapUint16Uint16X(vp *map[uint16]uint16, d *Decoder) { + v, changed := f.DecMapUint16Uint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Uint16V(v map[uint16]uint16, canChange bool, + d *Decoder) (_ map[uint16]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 4) + v = make(map[uint16]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint16 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]uint32) + v, changed := fastpathTV.DecMapUint16Uint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16Uint32V(rv2i(rv).(map[uint16]uint32), false, d) + } +} +func (f fastpathT) DecMapUint16Uint32X(vp *map[uint16]uint32, d *Decoder) { + v, changed := f.DecMapUint16Uint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Uint32V(v map[uint16]uint32, canChange bool, + d *Decoder) (_ map[uint16]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint16]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint16 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]uint64) + v, changed := fastpathTV.DecMapUint16Uint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16Uint64V(rv2i(rv).(map[uint16]uint64), false, d) + } +} +func (f fastpathT) DecMapUint16Uint64X(vp *map[uint16]uint64, d *Decoder) { + v, changed := f.DecMapUint16Uint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Uint64V(v map[uint16]uint64, canChange bool, + d *Decoder) (_ map[uint16]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint16 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]uintptr) + v, changed := fastpathTV.DecMapUint16UintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16UintptrV(rv2i(rv).(map[uint16]uintptr), false, d) + } +} +func (f fastpathT) DecMapUint16UintptrX(vp *map[uint16]uintptr, d *Decoder) { + v, changed := f.DecMapUint16UintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16UintptrV(v map[uint16]uintptr, canChange bool, + d *Decoder) (_ map[uint16]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint16 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]int) + v, changed := fastpathTV.DecMapUint16IntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16IntV(rv2i(rv).(map[uint16]int), false, d) + } +} +func (f fastpathT) DecMapUint16IntX(vp *map[uint16]int, d *Decoder) { + v, changed := f.DecMapUint16IntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16IntV(v map[uint16]int, canChange bool, + d *Decoder) (_ map[uint16]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint16 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]int8) + v, changed := fastpathTV.DecMapUint16Int8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16Int8V(rv2i(rv).(map[uint16]int8), false, d) + } +} +func (f fastpathT) DecMapUint16Int8X(vp *map[uint16]int8, d *Decoder) { + v, changed := f.DecMapUint16Int8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Int8V(v map[uint16]int8, canChange bool, + d *Decoder) (_ map[uint16]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint16]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint16 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]int16) + v, changed := fastpathTV.DecMapUint16Int16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16Int16V(rv2i(rv).(map[uint16]int16), false, d) + } +} +func (f fastpathT) DecMapUint16Int16X(vp *map[uint16]int16, d *Decoder) { + v, changed := f.DecMapUint16Int16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Int16V(v map[uint16]int16, canChange bool, + d *Decoder) (_ map[uint16]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 4) + v = make(map[uint16]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint16 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]int32) + v, changed := fastpathTV.DecMapUint16Int32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16Int32V(rv2i(rv).(map[uint16]int32), false, d) + } +} +func (f fastpathT) DecMapUint16Int32X(vp *map[uint16]int32, d *Decoder) { + v, changed := f.DecMapUint16Int32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Int32V(v map[uint16]int32, canChange bool, + d *Decoder) (_ map[uint16]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint16]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint16 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]int64) + v, changed := fastpathTV.DecMapUint16Int64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16Int64V(rv2i(rv).(map[uint16]int64), false, d) + } +} +func (f fastpathT) DecMapUint16Int64X(vp *map[uint16]int64, d *Decoder) { + v, changed := f.DecMapUint16Int64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Int64V(v map[uint16]int64, canChange bool, + d *Decoder) (_ map[uint16]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint16 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]float32) + v, changed := fastpathTV.DecMapUint16Float32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16Float32V(rv2i(rv).(map[uint16]float32), false, d) + } +} +func (f fastpathT) DecMapUint16Float32X(vp *map[uint16]float32, d *Decoder) { + v, changed := f.DecMapUint16Float32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Float32V(v map[uint16]float32, canChange bool, + d *Decoder) (_ map[uint16]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint16]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint16 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]float64) + v, changed := fastpathTV.DecMapUint16Float64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16Float64V(rv2i(rv).(map[uint16]float64), false, d) + } +} +func (f fastpathT) DecMapUint16Float64X(vp *map[uint16]float64, d *Decoder) { + v, changed := f.DecMapUint16Float64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16Float64V(v map[uint16]float64, canChange bool, + d *Decoder) (_ map[uint16]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint16]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint16 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint16BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint16]bool) + v, changed := fastpathTV.DecMapUint16BoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint16BoolV(rv2i(rv).(map[uint16]bool), false, d) + } +} +func (f fastpathT) DecMapUint16BoolX(vp *map[uint16]bool, d *Decoder) { + v, changed := f.DecMapUint16BoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint16BoolV(v map[uint16]bool, canChange bool, + d *Decoder) (_ map[uint16]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[uint16]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint16 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]interface{}) + v, changed := fastpathTV.DecMapUint32IntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32IntfV(rv2i(rv).(map[uint32]interface{}), false, d) + } +} +func (f fastpathT) DecMapUint32IntfX(vp *map[uint32]interface{}, d *Decoder) { + v, changed := f.DecMapUint32IntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32IntfV(v map[uint32]interface{}, canChange bool, + d *Decoder) (_ map[uint32]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[uint32]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint32 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]string) + v, changed := fastpathTV.DecMapUint32StringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32StringV(rv2i(rv).(map[uint32]string), false, d) + } +} +func (f fastpathT) DecMapUint32StringX(vp *map[uint32]string, d *Decoder) { + v, changed := f.DecMapUint32StringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32StringV(v map[uint32]string, canChange bool, + d *Decoder) (_ map[uint32]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[uint32]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint32 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]uint) + v, changed := fastpathTV.DecMapUint32UintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32UintV(rv2i(rv).(map[uint32]uint), false, d) + } +} +func (f fastpathT) DecMapUint32UintX(vp *map[uint32]uint, d *Decoder) { + v, changed := f.DecMapUint32UintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32UintV(v map[uint32]uint, canChange bool, + d *Decoder) (_ map[uint32]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint32 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]uint8) + v, changed := fastpathTV.DecMapUint32Uint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32Uint8V(rv2i(rv).(map[uint32]uint8), false, d) + } +} +func (f fastpathT) DecMapUint32Uint8X(vp *map[uint32]uint8, d *Decoder) { + v, changed := f.DecMapUint32Uint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Uint8V(v map[uint32]uint8, canChange bool, + d *Decoder) (_ map[uint32]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint32]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint32 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]uint16) + v, changed := fastpathTV.DecMapUint32Uint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32Uint16V(rv2i(rv).(map[uint32]uint16), false, d) + } +} +func (f fastpathT) DecMapUint32Uint16X(vp *map[uint32]uint16, d *Decoder) { + v, changed := f.DecMapUint32Uint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Uint16V(v map[uint32]uint16, canChange bool, + d *Decoder) (_ map[uint32]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint32]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint32 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]uint32) + v, changed := fastpathTV.DecMapUint32Uint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32Uint32V(rv2i(rv).(map[uint32]uint32), false, d) + } +} +func (f fastpathT) DecMapUint32Uint32X(vp *map[uint32]uint32, d *Decoder) { + v, changed := f.DecMapUint32Uint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Uint32V(v map[uint32]uint32, canChange bool, + d *Decoder) (_ map[uint32]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[uint32]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint32 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]uint64) + v, changed := fastpathTV.DecMapUint32Uint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32Uint64V(rv2i(rv).(map[uint32]uint64), false, d) + } +} +func (f fastpathT) DecMapUint32Uint64X(vp *map[uint32]uint64, d *Decoder) { + v, changed := f.DecMapUint32Uint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Uint64V(v map[uint32]uint64, canChange bool, + d *Decoder) (_ map[uint32]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint32 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]uintptr) + v, changed := fastpathTV.DecMapUint32UintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32UintptrV(rv2i(rv).(map[uint32]uintptr), false, d) + } +} +func (f fastpathT) DecMapUint32UintptrX(vp *map[uint32]uintptr, d *Decoder) { + v, changed := f.DecMapUint32UintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32UintptrV(v map[uint32]uintptr, canChange bool, + d *Decoder) (_ map[uint32]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint32 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]int) + v, changed := fastpathTV.DecMapUint32IntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32IntV(rv2i(rv).(map[uint32]int), false, d) + } +} +func (f fastpathT) DecMapUint32IntX(vp *map[uint32]int, d *Decoder) { + v, changed := f.DecMapUint32IntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32IntV(v map[uint32]int, canChange bool, + d *Decoder) (_ map[uint32]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint32 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]int8) + v, changed := fastpathTV.DecMapUint32Int8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32Int8V(rv2i(rv).(map[uint32]int8), false, d) + } +} +func (f fastpathT) DecMapUint32Int8X(vp *map[uint32]int8, d *Decoder) { + v, changed := f.DecMapUint32Int8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Int8V(v map[uint32]int8, canChange bool, + d *Decoder) (_ map[uint32]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint32]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint32 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]int16) + v, changed := fastpathTV.DecMapUint32Int16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32Int16V(rv2i(rv).(map[uint32]int16), false, d) + } +} +func (f fastpathT) DecMapUint32Int16X(vp *map[uint32]int16, d *Decoder) { + v, changed := f.DecMapUint32Int16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Int16V(v map[uint32]int16, canChange bool, + d *Decoder) (_ map[uint32]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[uint32]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint32 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]int32) + v, changed := fastpathTV.DecMapUint32Int32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32Int32V(rv2i(rv).(map[uint32]int32), false, d) + } +} +func (f fastpathT) DecMapUint32Int32X(vp *map[uint32]int32, d *Decoder) { + v, changed := f.DecMapUint32Int32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Int32V(v map[uint32]int32, canChange bool, + d *Decoder) (_ map[uint32]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[uint32]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint32 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]int64) + v, changed := fastpathTV.DecMapUint32Int64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32Int64V(rv2i(rv).(map[uint32]int64), false, d) + } +} +func (f fastpathT) DecMapUint32Int64X(vp *map[uint32]int64, d *Decoder) { + v, changed := f.DecMapUint32Int64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Int64V(v map[uint32]int64, canChange bool, + d *Decoder) (_ map[uint32]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint32 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]float32) + v, changed := fastpathTV.DecMapUint32Float32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32Float32V(rv2i(rv).(map[uint32]float32), false, d) + } +} +func (f fastpathT) DecMapUint32Float32X(vp *map[uint32]float32, d *Decoder) { + v, changed := f.DecMapUint32Float32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Float32V(v map[uint32]float32, canChange bool, + d *Decoder) (_ map[uint32]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[uint32]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint32 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]float64) + v, changed := fastpathTV.DecMapUint32Float64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32Float64V(rv2i(rv).(map[uint32]float64), false, d) + } +} +func (f fastpathT) DecMapUint32Float64X(vp *map[uint32]float64, d *Decoder) { + v, changed := f.DecMapUint32Float64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32Float64V(v map[uint32]float64, canChange bool, + d *Decoder) (_ map[uint32]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint32]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint32 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint32BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint32]bool) + v, changed := fastpathTV.DecMapUint32BoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint32BoolV(rv2i(rv).(map[uint32]bool), false, d) + } +} +func (f fastpathT) DecMapUint32BoolX(vp *map[uint32]bool, d *Decoder) { + v, changed := f.DecMapUint32BoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint32BoolV(v map[uint32]bool, canChange bool, + d *Decoder) (_ map[uint32]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[uint32]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint32 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]interface{}) + v, changed := fastpathTV.DecMapUint64IntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64IntfV(rv2i(rv).(map[uint64]interface{}), false, d) + } +} +func (f fastpathT) DecMapUint64IntfX(vp *map[uint64]interface{}, d *Decoder) { + v, changed := f.DecMapUint64IntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64IntfV(v map[uint64]interface{}, canChange bool, + d *Decoder) (_ map[uint64]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uint64]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk uint64 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]string) + v, changed := fastpathTV.DecMapUint64StringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64StringV(rv2i(rv).(map[uint64]string), false, d) + } +} +func (f fastpathT) DecMapUint64StringX(vp *map[uint64]string, d *Decoder) { + v, changed := f.DecMapUint64StringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64StringV(v map[uint64]string, canChange bool, + d *Decoder) (_ map[uint64]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uint64]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint64 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]uint) + v, changed := fastpathTV.DecMapUint64UintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64UintV(rv2i(rv).(map[uint64]uint), false, d) + } +} +func (f fastpathT) DecMapUint64UintX(vp *map[uint64]uint, d *Decoder) { + v, changed := f.DecMapUint64UintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64UintV(v map[uint64]uint, canChange bool, + d *Decoder) (_ map[uint64]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint64 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]uint8) + v, changed := fastpathTV.DecMapUint64Uint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64Uint8V(rv2i(rv).(map[uint64]uint8), false, d) + } +} +func (f fastpathT) DecMapUint64Uint8X(vp *map[uint64]uint8, d *Decoder) { + v, changed := f.DecMapUint64Uint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Uint8V(v map[uint64]uint8, canChange bool, + d *Decoder) (_ map[uint64]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint64]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint64 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]uint16) + v, changed := fastpathTV.DecMapUint64Uint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64Uint16V(rv2i(rv).(map[uint64]uint16), false, d) + } +} +func (f fastpathT) DecMapUint64Uint16X(vp *map[uint64]uint16, d *Decoder) { + v, changed := f.DecMapUint64Uint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Uint16V(v map[uint64]uint16, canChange bool, + d *Decoder) (_ map[uint64]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint64]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint64 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]uint32) + v, changed := fastpathTV.DecMapUint64Uint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64Uint32V(rv2i(rv).(map[uint64]uint32), false, d) + } +} +func (f fastpathT) DecMapUint64Uint32X(vp *map[uint64]uint32, d *Decoder) { + v, changed := f.DecMapUint64Uint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Uint32V(v map[uint64]uint32, canChange bool, + d *Decoder) (_ map[uint64]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint64]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint64 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]uint64) + v, changed := fastpathTV.DecMapUint64Uint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64Uint64V(rv2i(rv).(map[uint64]uint64), false, d) + } +} +func (f fastpathT) DecMapUint64Uint64X(vp *map[uint64]uint64, d *Decoder) { + v, changed := f.DecMapUint64Uint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Uint64V(v map[uint64]uint64, canChange bool, + d *Decoder) (_ map[uint64]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint64 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]uintptr) + v, changed := fastpathTV.DecMapUint64UintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64UintptrV(rv2i(rv).(map[uint64]uintptr), false, d) + } +} +func (f fastpathT) DecMapUint64UintptrX(vp *map[uint64]uintptr, d *Decoder) { + v, changed := f.DecMapUint64UintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64UintptrV(v map[uint64]uintptr, canChange bool, + d *Decoder) (_ map[uint64]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint64 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]int) + v, changed := fastpathTV.DecMapUint64IntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64IntV(rv2i(rv).(map[uint64]int), false, d) + } +} +func (f fastpathT) DecMapUint64IntX(vp *map[uint64]int, d *Decoder) { + v, changed := f.DecMapUint64IntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64IntV(v map[uint64]int, canChange bool, + d *Decoder) (_ map[uint64]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint64 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]int8) + v, changed := fastpathTV.DecMapUint64Int8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64Int8V(rv2i(rv).(map[uint64]int8), false, d) + } +} +func (f fastpathT) DecMapUint64Int8X(vp *map[uint64]int8, d *Decoder) { + v, changed := f.DecMapUint64Int8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Int8V(v map[uint64]int8, canChange bool, + d *Decoder) (_ map[uint64]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint64]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint64 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]int16) + v, changed := fastpathTV.DecMapUint64Int16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64Int16V(rv2i(rv).(map[uint64]int16), false, d) + } +} +func (f fastpathT) DecMapUint64Int16X(vp *map[uint64]int16, d *Decoder) { + v, changed := f.DecMapUint64Int16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Int16V(v map[uint64]int16, canChange bool, + d *Decoder) (_ map[uint64]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uint64]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint64 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]int32) + v, changed := fastpathTV.DecMapUint64Int32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64Int32V(rv2i(rv).(map[uint64]int32), false, d) + } +} +func (f fastpathT) DecMapUint64Int32X(vp *map[uint64]int32, d *Decoder) { + v, changed := f.DecMapUint64Int32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Int32V(v map[uint64]int32, canChange bool, + d *Decoder) (_ map[uint64]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint64]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint64 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]int64) + v, changed := fastpathTV.DecMapUint64Int64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64Int64V(rv2i(rv).(map[uint64]int64), false, d) + } +} +func (f fastpathT) DecMapUint64Int64X(vp *map[uint64]int64, d *Decoder) { + v, changed := f.DecMapUint64Int64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Int64V(v map[uint64]int64, canChange bool, + d *Decoder) (_ map[uint64]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint64 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]float32) + v, changed := fastpathTV.DecMapUint64Float32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64Float32V(rv2i(rv).(map[uint64]float32), false, d) + } +} +func (f fastpathT) DecMapUint64Float32X(vp *map[uint64]float32, d *Decoder) { + v, changed := f.DecMapUint64Float32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Float32V(v map[uint64]float32, canChange bool, + d *Decoder) (_ map[uint64]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uint64]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint64 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]float64) + v, changed := fastpathTV.DecMapUint64Float64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64Float64V(rv2i(rv).(map[uint64]float64), false, d) + } +} +func (f fastpathT) DecMapUint64Float64X(vp *map[uint64]float64, d *Decoder) { + v, changed := f.DecMapUint64Float64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64Float64V(v map[uint64]float64, canChange bool, + d *Decoder) (_ map[uint64]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uint64]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint64 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUint64BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uint64]bool) + v, changed := fastpathTV.DecMapUint64BoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUint64BoolV(rv2i(rv).(map[uint64]bool), false, d) + } +} +func (f fastpathT) DecMapUint64BoolX(vp *map[uint64]bool, d *Decoder) { + v, changed := f.DecMapUint64BoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUint64BoolV(v map[uint64]bool, canChange bool, + d *Decoder) (_ map[uint64]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uint64]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uint64 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeUint64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrIntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]interface{}) + v, changed := fastpathTV.DecMapUintptrIntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrIntfV(rv2i(rv).(map[uintptr]interface{}), false, d) + } +} +func (f fastpathT) DecMapUintptrIntfX(vp *map[uintptr]interface{}, d *Decoder) { + v, changed := f.DecMapUintptrIntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrIntfV(v map[uintptr]interface{}, canChange bool, + d *Decoder) (_ map[uintptr]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uintptr]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk uintptr + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrStringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]string) + v, changed := fastpathTV.DecMapUintptrStringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrStringV(rv2i(rv).(map[uintptr]string), false, d) + } +} +func (f fastpathT) DecMapUintptrStringX(vp *map[uintptr]string, d *Decoder) { + v, changed := f.DecMapUintptrStringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrStringV(v map[uintptr]string, canChange bool, + d *Decoder) (_ map[uintptr]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[uintptr]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uintptr + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrUintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]uint) + v, changed := fastpathTV.DecMapUintptrUintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrUintV(rv2i(rv).(map[uintptr]uint), false, d) + } +} +func (f fastpathT) DecMapUintptrUintX(vp *map[uintptr]uint, d *Decoder) { + v, changed := f.DecMapUintptrUintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUintV(v map[uintptr]uint, canChange bool, + d *Decoder) (_ map[uintptr]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uintptr + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrUint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]uint8) + v, changed := fastpathTV.DecMapUintptrUint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrUint8V(rv2i(rv).(map[uintptr]uint8), false, d) + } +} +func (f fastpathT) DecMapUintptrUint8X(vp *map[uintptr]uint8, d *Decoder) { + v, changed := f.DecMapUintptrUint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUint8V(v map[uintptr]uint8, canChange bool, + d *Decoder) (_ map[uintptr]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uintptr]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uintptr + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrUint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]uint16) + v, changed := fastpathTV.DecMapUintptrUint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrUint16V(rv2i(rv).(map[uintptr]uint16), false, d) + } +} +func (f fastpathT) DecMapUintptrUint16X(vp *map[uintptr]uint16, d *Decoder) { + v, changed := f.DecMapUintptrUint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUint16V(v map[uintptr]uint16, canChange bool, + d *Decoder) (_ map[uintptr]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uintptr]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uintptr + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrUint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]uint32) + v, changed := fastpathTV.DecMapUintptrUint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrUint32V(rv2i(rv).(map[uintptr]uint32), false, d) + } +} +func (f fastpathT) DecMapUintptrUint32X(vp *map[uintptr]uint32, d *Decoder) { + v, changed := f.DecMapUintptrUint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUint32V(v map[uintptr]uint32, canChange bool, + d *Decoder) (_ map[uintptr]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uintptr]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uintptr + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrUint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]uint64) + v, changed := fastpathTV.DecMapUintptrUint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrUint64V(rv2i(rv).(map[uintptr]uint64), false, d) + } +} +func (f fastpathT) DecMapUintptrUint64X(vp *map[uintptr]uint64, d *Decoder) { + v, changed := f.DecMapUintptrUint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUint64V(v map[uintptr]uint64, canChange bool, + d *Decoder) (_ map[uintptr]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uintptr + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrUintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]uintptr) + v, changed := fastpathTV.DecMapUintptrUintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrUintptrV(rv2i(rv).(map[uintptr]uintptr), false, d) + } +} +func (f fastpathT) DecMapUintptrUintptrX(vp *map[uintptr]uintptr, d *Decoder) { + v, changed := f.DecMapUintptrUintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrUintptrV(v map[uintptr]uintptr, canChange bool, + d *Decoder) (_ map[uintptr]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uintptr + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrIntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]int) + v, changed := fastpathTV.DecMapUintptrIntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrIntV(rv2i(rv).(map[uintptr]int), false, d) + } +} +func (f fastpathT) DecMapUintptrIntX(vp *map[uintptr]int, d *Decoder) { + v, changed := f.DecMapUintptrIntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrIntV(v map[uintptr]int, canChange bool, + d *Decoder) (_ map[uintptr]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uintptr + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrInt8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]int8) + v, changed := fastpathTV.DecMapUintptrInt8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrInt8V(rv2i(rv).(map[uintptr]int8), false, d) + } +} +func (f fastpathT) DecMapUintptrInt8X(vp *map[uintptr]int8, d *Decoder) { + v, changed := f.DecMapUintptrInt8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrInt8V(v map[uintptr]int8, canChange bool, + d *Decoder) (_ map[uintptr]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uintptr]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uintptr + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrInt16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]int16) + v, changed := fastpathTV.DecMapUintptrInt16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrInt16V(rv2i(rv).(map[uintptr]int16), false, d) + } +} +func (f fastpathT) DecMapUintptrInt16X(vp *map[uintptr]int16, d *Decoder) { + v, changed := f.DecMapUintptrInt16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrInt16V(v map[uintptr]int16, canChange bool, + d *Decoder) (_ map[uintptr]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[uintptr]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uintptr + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrInt32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]int32) + v, changed := fastpathTV.DecMapUintptrInt32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrInt32V(rv2i(rv).(map[uintptr]int32), false, d) + } +} +func (f fastpathT) DecMapUintptrInt32X(vp *map[uintptr]int32, d *Decoder) { + v, changed := f.DecMapUintptrInt32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrInt32V(v map[uintptr]int32, canChange bool, + d *Decoder) (_ map[uintptr]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uintptr]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uintptr + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrInt64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]int64) + v, changed := fastpathTV.DecMapUintptrInt64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrInt64V(rv2i(rv).(map[uintptr]int64), false, d) + } +} +func (f fastpathT) DecMapUintptrInt64X(vp *map[uintptr]int64, d *Decoder) { + v, changed := f.DecMapUintptrInt64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrInt64V(v map[uintptr]int64, canChange bool, + d *Decoder) (_ map[uintptr]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uintptr + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrFloat32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]float32) + v, changed := fastpathTV.DecMapUintptrFloat32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrFloat32V(rv2i(rv).(map[uintptr]float32), false, d) + } +} +func (f fastpathT) DecMapUintptrFloat32X(vp *map[uintptr]float32, d *Decoder) { + v, changed := f.DecMapUintptrFloat32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrFloat32V(v map[uintptr]float32, canChange bool, + d *Decoder) (_ map[uintptr]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[uintptr]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uintptr + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrFloat64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]float64) + v, changed := fastpathTV.DecMapUintptrFloat64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrFloat64V(rv2i(rv).(map[uintptr]float64), false, d) + } +} +func (f fastpathT) DecMapUintptrFloat64X(vp *map[uintptr]float64, d *Decoder) { + v, changed := f.DecMapUintptrFloat64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrFloat64V(v map[uintptr]float64, canChange bool, + d *Decoder) (_ map[uintptr]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[uintptr]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uintptr + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapUintptrBoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[uintptr]bool) + v, changed := fastpathTV.DecMapUintptrBoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapUintptrBoolV(rv2i(rv).(map[uintptr]bool), false, d) + } +} +func (f fastpathT) DecMapUintptrBoolX(vp *map[uintptr]bool, d *Decoder) { + v, changed := f.DecMapUintptrBoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapUintptrBoolV(v map[uintptr]bool, canChange bool, + d *Decoder) (_ map[uintptr]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[uintptr]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk uintptr + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntIntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]interface{}) + v, changed := fastpathTV.DecMapIntIntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntIntfV(rv2i(rv).(map[int]interface{}), false, d) + } +} +func (f fastpathT) DecMapIntIntfX(vp *map[int]interface{}, d *Decoder) { + v, changed := f.DecMapIntIntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntIntfV(v map[int]interface{}, canChange bool, + d *Decoder) (_ map[int]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[int]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk int + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntStringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]string) + v, changed := fastpathTV.DecMapIntStringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntStringV(rv2i(rv).(map[int]string), false, d) + } +} +func (f fastpathT) DecMapIntStringX(vp *map[int]string, d *Decoder) { + v, changed := f.DecMapIntStringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntStringV(v map[int]string, canChange bool, + d *Decoder) (_ map[int]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[int]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntUintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]uint) + v, changed := fastpathTV.DecMapIntUintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntUintV(rv2i(rv).(map[int]uint), false, d) + } +} +func (f fastpathT) DecMapIntUintX(vp *map[int]uint, d *Decoder) { + v, changed := f.DecMapIntUintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUintV(v map[int]uint, canChange bool, + d *Decoder) (_ map[int]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntUint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]uint8) + v, changed := fastpathTV.DecMapIntUint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntUint8V(rv2i(rv).(map[int]uint8), false, d) + } +} +func (f fastpathT) DecMapIntUint8X(vp *map[int]uint8, d *Decoder) { + v, changed := f.DecMapIntUint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUint8V(v map[int]uint8, canChange bool, + d *Decoder) (_ map[int]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntUint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]uint16) + v, changed := fastpathTV.DecMapIntUint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntUint16V(rv2i(rv).(map[int]uint16), false, d) + } +} +func (f fastpathT) DecMapIntUint16X(vp *map[int]uint16, d *Decoder) { + v, changed := f.DecMapIntUint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUint16V(v map[int]uint16, canChange bool, + d *Decoder) (_ map[int]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntUint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]uint32) + v, changed := fastpathTV.DecMapIntUint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntUint32V(rv2i(rv).(map[int]uint32), false, d) + } +} +func (f fastpathT) DecMapIntUint32X(vp *map[int]uint32, d *Decoder) { + v, changed := f.DecMapIntUint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUint32V(v map[int]uint32, canChange bool, + d *Decoder) (_ map[int]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntUint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]uint64) + v, changed := fastpathTV.DecMapIntUint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntUint64V(rv2i(rv).(map[int]uint64), false, d) + } +} +func (f fastpathT) DecMapIntUint64X(vp *map[int]uint64, d *Decoder) { + v, changed := f.DecMapIntUint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUint64V(v map[int]uint64, canChange bool, + d *Decoder) (_ map[int]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntUintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]uintptr) + v, changed := fastpathTV.DecMapIntUintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntUintptrV(rv2i(rv).(map[int]uintptr), false, d) + } +} +func (f fastpathT) DecMapIntUintptrX(vp *map[int]uintptr, d *Decoder) { + v, changed := f.DecMapIntUintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntUintptrV(v map[int]uintptr, canChange bool, + d *Decoder) (_ map[int]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntIntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]int) + v, changed := fastpathTV.DecMapIntIntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntIntV(rv2i(rv).(map[int]int), false, d) + } +} +func (f fastpathT) DecMapIntIntX(vp *map[int]int, d *Decoder) { + v, changed := f.DecMapIntIntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntIntV(v map[int]int, canChange bool, + d *Decoder) (_ map[int]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntInt8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]int8) + v, changed := fastpathTV.DecMapIntInt8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntInt8V(rv2i(rv).(map[int]int8), false, d) + } +} +func (f fastpathT) DecMapIntInt8X(vp *map[int]int8, d *Decoder) { + v, changed := f.DecMapIntInt8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntInt8V(v map[int]int8, canChange bool, + d *Decoder) (_ map[int]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntInt16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]int16) + v, changed := fastpathTV.DecMapIntInt16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntInt16V(rv2i(rv).(map[int]int16), false, d) + } +} +func (f fastpathT) DecMapIntInt16X(vp *map[int]int16, d *Decoder) { + v, changed := f.DecMapIntInt16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntInt16V(v map[int]int16, canChange bool, + d *Decoder) (_ map[int]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntInt32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]int32) + v, changed := fastpathTV.DecMapIntInt32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntInt32V(rv2i(rv).(map[int]int32), false, d) + } +} +func (f fastpathT) DecMapIntInt32X(vp *map[int]int32, d *Decoder) { + v, changed := f.DecMapIntInt32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntInt32V(v map[int]int32, canChange bool, + d *Decoder) (_ map[int]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntInt64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]int64) + v, changed := fastpathTV.DecMapIntInt64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntInt64V(rv2i(rv).(map[int]int64), false, d) + } +} +func (f fastpathT) DecMapIntInt64X(vp *map[int]int64, d *Decoder) { + v, changed := f.DecMapIntInt64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntInt64V(v map[int]int64, canChange bool, + d *Decoder) (_ map[int]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntFloat32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]float32) + v, changed := fastpathTV.DecMapIntFloat32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntFloat32V(rv2i(rv).(map[int]float32), false, d) + } +} +func (f fastpathT) DecMapIntFloat32X(vp *map[int]float32, d *Decoder) { + v, changed := f.DecMapIntFloat32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntFloat32V(v map[int]float32, canChange bool, + d *Decoder) (_ map[int]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntFloat64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]float64) + v, changed := fastpathTV.DecMapIntFloat64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntFloat64V(rv2i(rv).(map[int]float64), false, d) + } +} +func (f fastpathT) DecMapIntFloat64X(vp *map[int]float64, d *Decoder) { + v, changed := f.DecMapIntFloat64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntFloat64V(v map[int]float64, canChange bool, + d *Decoder) (_ map[int]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapIntBoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int]bool) + v, changed := fastpathTV.DecMapIntBoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapIntBoolV(rv2i(rv).(map[int]bool), false, d) + } +} +func (f fastpathT) DecMapIntBoolX(vp *map[int]bool, d *Decoder) { + v, changed := f.DecMapIntBoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapIntBoolV(v map[int]bool, canChange bool, + d *Decoder) (_ map[int]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]interface{}) + v, changed := fastpathTV.DecMapInt8IntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8IntfV(rv2i(rv).(map[int8]interface{}), false, d) + } +} +func (f fastpathT) DecMapInt8IntfX(vp *map[int8]interface{}, d *Decoder) { + v, changed := f.DecMapInt8IntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8IntfV(v map[int8]interface{}, canChange bool, + d *Decoder) (_ map[int8]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[int8]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk int8 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]string) + v, changed := fastpathTV.DecMapInt8StringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8StringV(rv2i(rv).(map[int8]string), false, d) + } +} +func (f fastpathT) DecMapInt8StringX(vp *map[int8]string, d *Decoder) { + v, changed := f.DecMapInt8StringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8StringV(v map[int8]string, canChange bool, + d *Decoder) (_ map[int8]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[int8]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int8 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]uint) + v, changed := fastpathTV.DecMapInt8UintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8UintV(rv2i(rv).(map[int8]uint), false, d) + } +} +func (f fastpathT) DecMapInt8UintX(vp *map[int8]uint, d *Decoder) { + v, changed := f.DecMapInt8UintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8UintV(v map[int8]uint, canChange bool, + d *Decoder) (_ map[int8]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int8 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]uint8) + v, changed := fastpathTV.DecMapInt8Uint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8Uint8V(rv2i(rv).(map[int8]uint8), false, d) + } +} +func (f fastpathT) DecMapInt8Uint8X(vp *map[int8]uint8, d *Decoder) { + v, changed := f.DecMapInt8Uint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Uint8V(v map[int8]uint8, canChange bool, + d *Decoder) (_ map[int8]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[int8]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int8 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]uint16) + v, changed := fastpathTV.DecMapInt8Uint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8Uint16V(rv2i(rv).(map[int8]uint16), false, d) + } +} +func (f fastpathT) DecMapInt8Uint16X(vp *map[int8]uint16, d *Decoder) { + v, changed := f.DecMapInt8Uint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Uint16V(v map[int8]uint16, canChange bool, + d *Decoder) (_ map[int8]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int8]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int8 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]uint32) + v, changed := fastpathTV.DecMapInt8Uint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8Uint32V(rv2i(rv).(map[int8]uint32), false, d) + } +} +func (f fastpathT) DecMapInt8Uint32X(vp *map[int8]uint32, d *Decoder) { + v, changed := f.DecMapInt8Uint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Uint32V(v map[int8]uint32, canChange bool, + d *Decoder) (_ map[int8]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int8]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int8 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]uint64) + v, changed := fastpathTV.DecMapInt8Uint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8Uint64V(rv2i(rv).(map[int8]uint64), false, d) + } +} +func (f fastpathT) DecMapInt8Uint64X(vp *map[int8]uint64, d *Decoder) { + v, changed := f.DecMapInt8Uint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Uint64V(v map[int8]uint64, canChange bool, + d *Decoder) (_ map[int8]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int8 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]uintptr) + v, changed := fastpathTV.DecMapInt8UintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8UintptrV(rv2i(rv).(map[int8]uintptr), false, d) + } +} +func (f fastpathT) DecMapInt8UintptrX(vp *map[int8]uintptr, d *Decoder) { + v, changed := f.DecMapInt8UintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8UintptrV(v map[int8]uintptr, canChange bool, + d *Decoder) (_ map[int8]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int8 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]int) + v, changed := fastpathTV.DecMapInt8IntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8IntV(rv2i(rv).(map[int8]int), false, d) + } +} +func (f fastpathT) DecMapInt8IntX(vp *map[int8]int, d *Decoder) { + v, changed := f.DecMapInt8IntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8IntV(v map[int8]int, canChange bool, + d *Decoder) (_ map[int8]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int8 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]int8) + v, changed := fastpathTV.DecMapInt8Int8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8Int8V(rv2i(rv).(map[int8]int8), false, d) + } +} +func (f fastpathT) DecMapInt8Int8X(vp *map[int8]int8, d *Decoder) { + v, changed := f.DecMapInt8Int8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Int8V(v map[int8]int8, canChange bool, + d *Decoder) (_ map[int8]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[int8]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int8 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]int16) + v, changed := fastpathTV.DecMapInt8Int16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8Int16V(rv2i(rv).(map[int8]int16), false, d) + } +} +func (f fastpathT) DecMapInt8Int16X(vp *map[int8]int16, d *Decoder) { + v, changed := f.DecMapInt8Int16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Int16V(v map[int8]int16, canChange bool, + d *Decoder) (_ map[int8]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int8]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int8 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]int32) + v, changed := fastpathTV.DecMapInt8Int32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8Int32V(rv2i(rv).(map[int8]int32), false, d) + } +} +func (f fastpathT) DecMapInt8Int32X(vp *map[int8]int32, d *Decoder) { + v, changed := f.DecMapInt8Int32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Int32V(v map[int8]int32, canChange bool, + d *Decoder) (_ map[int8]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int8]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int8 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]int64) + v, changed := fastpathTV.DecMapInt8Int64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8Int64V(rv2i(rv).(map[int8]int64), false, d) + } +} +func (f fastpathT) DecMapInt8Int64X(vp *map[int8]int64, d *Decoder) { + v, changed := f.DecMapInt8Int64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Int64V(v map[int8]int64, canChange bool, + d *Decoder) (_ map[int8]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int8 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]float32) + v, changed := fastpathTV.DecMapInt8Float32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8Float32V(rv2i(rv).(map[int8]float32), false, d) + } +} +func (f fastpathT) DecMapInt8Float32X(vp *map[int8]float32, d *Decoder) { + v, changed := f.DecMapInt8Float32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Float32V(v map[int8]float32, canChange bool, + d *Decoder) (_ map[int8]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int8]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int8 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]float64) + v, changed := fastpathTV.DecMapInt8Float64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8Float64V(rv2i(rv).(map[int8]float64), false, d) + } +} +func (f fastpathT) DecMapInt8Float64X(vp *map[int8]float64, d *Decoder) { + v, changed := f.DecMapInt8Float64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8Float64V(v map[int8]float64, canChange bool, + d *Decoder) (_ map[int8]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int8]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int8 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt8BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int8]bool) + v, changed := fastpathTV.DecMapInt8BoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt8BoolV(rv2i(rv).(map[int8]bool), false, d) + } +} +func (f fastpathT) DecMapInt8BoolX(vp *map[int8]bool, d *Decoder) { + v, changed := f.DecMapInt8BoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt8BoolV(v map[int8]bool, canChange bool, + d *Decoder) (_ map[int8]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[int8]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int8 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]interface{}) + v, changed := fastpathTV.DecMapInt16IntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16IntfV(rv2i(rv).(map[int16]interface{}), false, d) + } +} +func (f fastpathT) DecMapInt16IntfX(vp *map[int16]interface{}, d *Decoder) { + v, changed := f.DecMapInt16IntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16IntfV(v map[int16]interface{}, canChange bool, + d *Decoder) (_ map[int16]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[int16]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk int16 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]string) + v, changed := fastpathTV.DecMapInt16StringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16StringV(rv2i(rv).(map[int16]string), false, d) + } +} +func (f fastpathT) DecMapInt16StringX(vp *map[int16]string, d *Decoder) { + v, changed := f.DecMapInt16StringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16StringV(v map[int16]string, canChange bool, + d *Decoder) (_ map[int16]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 18) + v = make(map[int16]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int16 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]uint) + v, changed := fastpathTV.DecMapInt16UintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16UintV(rv2i(rv).(map[int16]uint), false, d) + } +} +func (f fastpathT) DecMapInt16UintX(vp *map[int16]uint, d *Decoder) { + v, changed := f.DecMapInt16UintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16UintV(v map[int16]uint, canChange bool, + d *Decoder) (_ map[int16]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int16 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]uint8) + v, changed := fastpathTV.DecMapInt16Uint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16Uint8V(rv2i(rv).(map[int16]uint8), false, d) + } +} +func (f fastpathT) DecMapInt16Uint8X(vp *map[int16]uint8, d *Decoder) { + v, changed := f.DecMapInt16Uint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Uint8V(v map[int16]uint8, canChange bool, + d *Decoder) (_ map[int16]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int16]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int16 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]uint16) + v, changed := fastpathTV.DecMapInt16Uint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16Uint16V(rv2i(rv).(map[int16]uint16), false, d) + } +} +func (f fastpathT) DecMapInt16Uint16X(vp *map[int16]uint16, d *Decoder) { + v, changed := f.DecMapInt16Uint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Uint16V(v map[int16]uint16, canChange bool, + d *Decoder) (_ map[int16]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 4) + v = make(map[int16]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int16 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]uint32) + v, changed := fastpathTV.DecMapInt16Uint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16Uint32V(rv2i(rv).(map[int16]uint32), false, d) + } +} +func (f fastpathT) DecMapInt16Uint32X(vp *map[int16]uint32, d *Decoder) { + v, changed := f.DecMapInt16Uint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Uint32V(v map[int16]uint32, canChange bool, + d *Decoder) (_ map[int16]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int16]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int16 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]uint64) + v, changed := fastpathTV.DecMapInt16Uint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16Uint64V(rv2i(rv).(map[int16]uint64), false, d) + } +} +func (f fastpathT) DecMapInt16Uint64X(vp *map[int16]uint64, d *Decoder) { + v, changed := f.DecMapInt16Uint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Uint64V(v map[int16]uint64, canChange bool, + d *Decoder) (_ map[int16]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int16 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]uintptr) + v, changed := fastpathTV.DecMapInt16UintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16UintptrV(rv2i(rv).(map[int16]uintptr), false, d) + } +} +func (f fastpathT) DecMapInt16UintptrX(vp *map[int16]uintptr, d *Decoder) { + v, changed := f.DecMapInt16UintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16UintptrV(v map[int16]uintptr, canChange bool, + d *Decoder) (_ map[int16]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int16 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]int) + v, changed := fastpathTV.DecMapInt16IntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16IntV(rv2i(rv).(map[int16]int), false, d) + } +} +func (f fastpathT) DecMapInt16IntX(vp *map[int16]int, d *Decoder) { + v, changed := f.DecMapInt16IntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16IntV(v map[int16]int, canChange bool, + d *Decoder) (_ map[int16]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int16 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]int8) + v, changed := fastpathTV.DecMapInt16Int8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16Int8V(rv2i(rv).(map[int16]int8), false, d) + } +} +func (f fastpathT) DecMapInt16Int8X(vp *map[int16]int8, d *Decoder) { + v, changed := f.DecMapInt16Int8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Int8V(v map[int16]int8, canChange bool, + d *Decoder) (_ map[int16]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int16]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int16 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]int16) + v, changed := fastpathTV.DecMapInt16Int16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16Int16V(rv2i(rv).(map[int16]int16), false, d) + } +} +func (f fastpathT) DecMapInt16Int16X(vp *map[int16]int16, d *Decoder) { + v, changed := f.DecMapInt16Int16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Int16V(v map[int16]int16, canChange bool, + d *Decoder) (_ map[int16]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 4) + v = make(map[int16]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int16 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]int32) + v, changed := fastpathTV.DecMapInt16Int32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16Int32V(rv2i(rv).(map[int16]int32), false, d) + } +} +func (f fastpathT) DecMapInt16Int32X(vp *map[int16]int32, d *Decoder) { + v, changed := f.DecMapInt16Int32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Int32V(v map[int16]int32, canChange bool, + d *Decoder) (_ map[int16]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int16]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int16 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]int64) + v, changed := fastpathTV.DecMapInt16Int64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16Int64V(rv2i(rv).(map[int16]int64), false, d) + } +} +func (f fastpathT) DecMapInt16Int64X(vp *map[int16]int64, d *Decoder) { + v, changed := f.DecMapInt16Int64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Int64V(v map[int16]int64, canChange bool, + d *Decoder) (_ map[int16]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int16 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]float32) + v, changed := fastpathTV.DecMapInt16Float32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16Float32V(rv2i(rv).(map[int16]float32), false, d) + } +} +func (f fastpathT) DecMapInt16Float32X(vp *map[int16]float32, d *Decoder) { + v, changed := f.DecMapInt16Float32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Float32V(v map[int16]float32, canChange bool, + d *Decoder) (_ map[int16]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int16]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int16 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]float64) + v, changed := fastpathTV.DecMapInt16Float64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16Float64V(rv2i(rv).(map[int16]float64), false, d) + } +} +func (f fastpathT) DecMapInt16Float64X(vp *map[int16]float64, d *Decoder) { + v, changed := f.DecMapInt16Float64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16Float64V(v map[int16]float64, canChange bool, + d *Decoder) (_ map[int16]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int16]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int16 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt16BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int16]bool) + v, changed := fastpathTV.DecMapInt16BoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt16BoolV(rv2i(rv).(map[int16]bool), false, d) + } +} +func (f fastpathT) DecMapInt16BoolX(vp *map[int16]bool, d *Decoder) { + v, changed := f.DecMapInt16BoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt16BoolV(v map[int16]bool, canChange bool, + d *Decoder) (_ map[int16]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[int16]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int16 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]interface{}) + v, changed := fastpathTV.DecMapInt32IntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32IntfV(rv2i(rv).(map[int32]interface{}), false, d) + } +} +func (f fastpathT) DecMapInt32IntfX(vp *map[int32]interface{}, d *Decoder) { + v, changed := f.DecMapInt32IntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32IntfV(v map[int32]interface{}, canChange bool, + d *Decoder) (_ map[int32]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[int32]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk int32 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]string) + v, changed := fastpathTV.DecMapInt32StringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32StringV(rv2i(rv).(map[int32]string), false, d) + } +} +func (f fastpathT) DecMapInt32StringX(vp *map[int32]string, d *Decoder) { + v, changed := f.DecMapInt32StringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32StringV(v map[int32]string, canChange bool, + d *Decoder) (_ map[int32]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 20) + v = make(map[int32]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int32 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]uint) + v, changed := fastpathTV.DecMapInt32UintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32UintV(rv2i(rv).(map[int32]uint), false, d) + } +} +func (f fastpathT) DecMapInt32UintX(vp *map[int32]uint, d *Decoder) { + v, changed := f.DecMapInt32UintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32UintV(v map[int32]uint, canChange bool, + d *Decoder) (_ map[int32]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int32 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]uint8) + v, changed := fastpathTV.DecMapInt32Uint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32Uint8V(rv2i(rv).(map[int32]uint8), false, d) + } +} +func (f fastpathT) DecMapInt32Uint8X(vp *map[int32]uint8, d *Decoder) { + v, changed := f.DecMapInt32Uint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Uint8V(v map[int32]uint8, canChange bool, + d *Decoder) (_ map[int32]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int32]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int32 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]uint16) + v, changed := fastpathTV.DecMapInt32Uint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32Uint16V(rv2i(rv).(map[int32]uint16), false, d) + } +} +func (f fastpathT) DecMapInt32Uint16X(vp *map[int32]uint16, d *Decoder) { + v, changed := f.DecMapInt32Uint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Uint16V(v map[int32]uint16, canChange bool, + d *Decoder) (_ map[int32]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int32]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int32 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]uint32) + v, changed := fastpathTV.DecMapInt32Uint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32Uint32V(rv2i(rv).(map[int32]uint32), false, d) + } +} +func (f fastpathT) DecMapInt32Uint32X(vp *map[int32]uint32, d *Decoder) { + v, changed := f.DecMapInt32Uint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Uint32V(v map[int32]uint32, canChange bool, + d *Decoder) (_ map[int32]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[int32]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int32 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]uint64) + v, changed := fastpathTV.DecMapInt32Uint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32Uint64V(rv2i(rv).(map[int32]uint64), false, d) + } +} +func (f fastpathT) DecMapInt32Uint64X(vp *map[int32]uint64, d *Decoder) { + v, changed := f.DecMapInt32Uint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Uint64V(v map[int32]uint64, canChange bool, + d *Decoder) (_ map[int32]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int32 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]uintptr) + v, changed := fastpathTV.DecMapInt32UintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32UintptrV(rv2i(rv).(map[int32]uintptr), false, d) + } +} +func (f fastpathT) DecMapInt32UintptrX(vp *map[int32]uintptr, d *Decoder) { + v, changed := f.DecMapInt32UintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32UintptrV(v map[int32]uintptr, canChange bool, + d *Decoder) (_ map[int32]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int32 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]int) + v, changed := fastpathTV.DecMapInt32IntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32IntV(rv2i(rv).(map[int32]int), false, d) + } +} +func (f fastpathT) DecMapInt32IntX(vp *map[int32]int, d *Decoder) { + v, changed := f.DecMapInt32IntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32IntV(v map[int32]int, canChange bool, + d *Decoder) (_ map[int32]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int32 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]int8) + v, changed := fastpathTV.DecMapInt32Int8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32Int8V(rv2i(rv).(map[int32]int8), false, d) + } +} +func (f fastpathT) DecMapInt32Int8X(vp *map[int32]int8, d *Decoder) { + v, changed := f.DecMapInt32Int8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Int8V(v map[int32]int8, canChange bool, + d *Decoder) (_ map[int32]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int32]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int32 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]int16) + v, changed := fastpathTV.DecMapInt32Int16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32Int16V(rv2i(rv).(map[int32]int16), false, d) + } +} +func (f fastpathT) DecMapInt32Int16X(vp *map[int32]int16, d *Decoder) { + v, changed := f.DecMapInt32Int16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Int16V(v map[int32]int16, canChange bool, + d *Decoder) (_ map[int32]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 6) + v = make(map[int32]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int32 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]int32) + v, changed := fastpathTV.DecMapInt32Int32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32Int32V(rv2i(rv).(map[int32]int32), false, d) + } +} +func (f fastpathT) DecMapInt32Int32X(vp *map[int32]int32, d *Decoder) { + v, changed := f.DecMapInt32Int32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Int32V(v map[int32]int32, canChange bool, + d *Decoder) (_ map[int32]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[int32]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int32 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]int64) + v, changed := fastpathTV.DecMapInt32Int64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32Int64V(rv2i(rv).(map[int32]int64), false, d) + } +} +func (f fastpathT) DecMapInt32Int64X(vp *map[int32]int64, d *Decoder) { + v, changed := f.DecMapInt32Int64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Int64V(v map[int32]int64, canChange bool, + d *Decoder) (_ map[int32]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int32 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]float32) + v, changed := fastpathTV.DecMapInt32Float32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32Float32V(rv2i(rv).(map[int32]float32), false, d) + } +} +func (f fastpathT) DecMapInt32Float32X(vp *map[int32]float32, d *Decoder) { + v, changed := f.DecMapInt32Float32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Float32V(v map[int32]float32, canChange bool, + d *Decoder) (_ map[int32]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 8) + v = make(map[int32]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int32 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]float64) + v, changed := fastpathTV.DecMapInt32Float64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32Float64V(rv2i(rv).(map[int32]float64), false, d) + } +} +func (f fastpathT) DecMapInt32Float64X(vp *map[int32]float64, d *Decoder) { + v, changed := f.DecMapInt32Float64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32Float64V(v map[int32]float64, canChange bool, + d *Decoder) (_ map[int32]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int32]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int32 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt32BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int32]bool) + v, changed := fastpathTV.DecMapInt32BoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt32BoolV(rv2i(rv).(map[int32]bool), false, d) + } +} +func (f fastpathT) DecMapInt32BoolX(vp *map[int32]bool, d *Decoder) { + v, changed := f.DecMapInt32BoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt32BoolV(v map[int32]bool, canChange bool, + d *Decoder) (_ map[int32]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[int32]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int32 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64IntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]interface{}) + v, changed := fastpathTV.DecMapInt64IntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64IntfV(rv2i(rv).(map[int64]interface{}), false, d) + } +} +func (f fastpathT) DecMapInt64IntfX(vp *map[int64]interface{}, d *Decoder) { + v, changed := f.DecMapInt64IntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64IntfV(v map[int64]interface{}, canChange bool, + d *Decoder) (_ map[int64]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[int64]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk int64 + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64StringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]string) + v, changed := fastpathTV.DecMapInt64StringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64StringV(rv2i(rv).(map[int64]string), false, d) + } +} +func (f fastpathT) DecMapInt64StringX(vp *map[int64]string, d *Decoder) { + v, changed := f.DecMapInt64StringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64StringV(v map[int64]string, canChange bool, + d *Decoder) (_ map[int64]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 24) + v = make(map[int64]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int64 + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64UintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]uint) + v, changed := fastpathTV.DecMapInt64UintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64UintV(rv2i(rv).(map[int64]uint), false, d) + } +} +func (f fastpathT) DecMapInt64UintX(vp *map[int64]uint, d *Decoder) { + v, changed := f.DecMapInt64UintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64UintV(v map[int64]uint, canChange bool, + d *Decoder) (_ map[int64]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int64 + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Uint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]uint8) + v, changed := fastpathTV.DecMapInt64Uint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64Uint8V(rv2i(rv).(map[int64]uint8), false, d) + } +} +func (f fastpathT) DecMapInt64Uint8X(vp *map[int64]uint8, d *Decoder) { + v, changed := f.DecMapInt64Uint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Uint8V(v map[int64]uint8, canChange bool, + d *Decoder) (_ map[int64]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int64]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int64 + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Uint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]uint16) + v, changed := fastpathTV.DecMapInt64Uint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64Uint16V(rv2i(rv).(map[int64]uint16), false, d) + } +} +func (f fastpathT) DecMapInt64Uint16X(vp *map[int64]uint16, d *Decoder) { + v, changed := f.DecMapInt64Uint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Uint16V(v map[int64]uint16, canChange bool, + d *Decoder) (_ map[int64]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int64]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int64 + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Uint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]uint32) + v, changed := fastpathTV.DecMapInt64Uint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64Uint32V(rv2i(rv).(map[int64]uint32), false, d) + } +} +func (f fastpathT) DecMapInt64Uint32X(vp *map[int64]uint32, d *Decoder) { + v, changed := f.DecMapInt64Uint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Uint32V(v map[int64]uint32, canChange bool, + d *Decoder) (_ map[int64]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int64]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int64 + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Uint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]uint64) + v, changed := fastpathTV.DecMapInt64Uint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64Uint64V(rv2i(rv).(map[int64]uint64), false, d) + } +} +func (f fastpathT) DecMapInt64Uint64X(vp *map[int64]uint64, d *Decoder) { + v, changed := f.DecMapInt64Uint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Uint64V(v map[int64]uint64, canChange bool, + d *Decoder) (_ map[int64]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int64 + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64UintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]uintptr) + v, changed := fastpathTV.DecMapInt64UintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64UintptrV(rv2i(rv).(map[int64]uintptr), false, d) + } +} +func (f fastpathT) DecMapInt64UintptrX(vp *map[int64]uintptr, d *Decoder) { + v, changed := f.DecMapInt64UintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64UintptrV(v map[int64]uintptr, canChange bool, + d *Decoder) (_ map[int64]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int64 + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64IntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]int) + v, changed := fastpathTV.DecMapInt64IntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64IntV(rv2i(rv).(map[int64]int), false, d) + } +} +func (f fastpathT) DecMapInt64IntX(vp *map[int64]int, d *Decoder) { + v, changed := f.DecMapInt64IntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64IntV(v map[int64]int, canChange bool, + d *Decoder) (_ map[int64]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int64 + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Int8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]int8) + v, changed := fastpathTV.DecMapInt64Int8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64Int8V(rv2i(rv).(map[int64]int8), false, d) + } +} +func (f fastpathT) DecMapInt64Int8X(vp *map[int64]int8, d *Decoder) { + v, changed := f.DecMapInt64Int8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Int8V(v map[int64]int8, canChange bool, + d *Decoder) (_ map[int64]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int64]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int64 + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Int16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]int16) + v, changed := fastpathTV.DecMapInt64Int16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64Int16V(rv2i(rv).(map[int64]int16), false, d) + } +} +func (f fastpathT) DecMapInt64Int16X(vp *map[int64]int16, d *Decoder) { + v, changed := f.DecMapInt64Int16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Int16V(v map[int64]int16, canChange bool, + d *Decoder) (_ map[int64]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 10) + v = make(map[int64]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int64 + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Int32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]int32) + v, changed := fastpathTV.DecMapInt64Int32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64Int32V(rv2i(rv).(map[int64]int32), false, d) + } +} +func (f fastpathT) DecMapInt64Int32X(vp *map[int64]int32, d *Decoder) { + v, changed := f.DecMapInt64Int32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Int32V(v map[int64]int32, canChange bool, + d *Decoder) (_ map[int64]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int64]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int64 + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Int64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]int64) + v, changed := fastpathTV.DecMapInt64Int64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64Int64V(rv2i(rv).(map[int64]int64), false, d) + } +} +func (f fastpathT) DecMapInt64Int64X(vp *map[int64]int64, d *Decoder) { + v, changed := f.DecMapInt64Int64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Int64V(v map[int64]int64, canChange bool, + d *Decoder) (_ map[int64]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int64 + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Float32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]float32) + v, changed := fastpathTV.DecMapInt64Float32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64Float32V(rv2i(rv).(map[int64]float32), false, d) + } +} +func (f fastpathT) DecMapInt64Float32X(vp *map[int64]float32, d *Decoder) { + v, changed := f.DecMapInt64Float32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Float32V(v map[int64]float32, canChange bool, + d *Decoder) (_ map[int64]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 12) + v = make(map[int64]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int64 + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64Float64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]float64) + v, changed := fastpathTV.DecMapInt64Float64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64Float64V(rv2i(rv).(map[int64]float64), false, d) + } +} +func (f fastpathT) DecMapInt64Float64X(vp *map[int64]float64, d *Decoder) { + v, changed := f.DecMapInt64Float64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64Float64V(v map[int64]float64, canChange bool, + d *Decoder) (_ map[int64]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 16) + v = make(map[int64]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int64 + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapInt64BoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[int64]bool) + v, changed := fastpathTV.DecMapInt64BoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapInt64BoolV(rv2i(rv).(map[int64]bool), false, d) + } +} +func (f fastpathT) DecMapInt64BoolX(vp *map[int64]bool, d *Decoder) { + v, changed := f.DecMapInt64BoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapInt64BoolV(v map[int64]bool, canChange bool, + d *Decoder) (_ map[int64]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[int64]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk int64 + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeInt64() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolIntfR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]interface{}) + v, changed := fastpathTV.DecMapBoolIntfV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolIntfV(rv2i(rv).(map[bool]interface{}), false, d) + } +} +func (f fastpathT) DecMapBoolIntfX(vp *map[bool]interface{}, d *Decoder) { + v, changed := f.DecMapBoolIntfV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolIntfV(v map[bool]interface{}, canChange bool, + d *Decoder) (_ map[bool]interface{}, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[bool]interface{}, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + mapGet := v != nil && !d.h.MapValueReset && !d.h.InterfaceReset + var mk bool + var mv interface{} + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = nil + } + continue + } + if mapGet { + mv = v[mk] + } else { + mv = nil + } + d.decode(&mv) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolStringR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]string) + v, changed := fastpathTV.DecMapBoolStringV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolStringV(rv2i(rv).(map[bool]string), false, d) + } +} +func (f fastpathT) DecMapBoolStringX(vp *map[bool]string, d *Decoder) { + v, changed := f.DecMapBoolStringV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolStringV(v map[bool]string, canChange bool, + d *Decoder) (_ map[bool]string, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 17) + v = make(map[bool]string, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk bool + var mv string + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = "" + } + continue + } + mv = dd.DecodeString() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolUintR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]uint) + v, changed := fastpathTV.DecMapBoolUintV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolUintV(rv2i(rv).(map[bool]uint), false, d) + } +} +func (f fastpathT) DecMapBoolUintX(vp *map[bool]uint, d *Decoder) { + v, changed := f.DecMapBoolUintV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUintV(v map[bool]uint, canChange bool, + d *Decoder) (_ map[bool]uint, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]uint, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk bool + var mv uint + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolUint8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]uint8) + v, changed := fastpathTV.DecMapBoolUint8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolUint8V(rv2i(rv).(map[bool]uint8), false, d) + } +} +func (f fastpathT) DecMapBoolUint8X(vp *map[bool]uint8, d *Decoder) { + v, changed := f.DecMapBoolUint8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUint8V(v map[bool]uint8, canChange bool, + d *Decoder) (_ map[bool]uint8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[bool]uint8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk bool + var mv uint8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint8(chkOvf.UintV(dd.DecodeUint64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolUint16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]uint16) + v, changed := fastpathTV.DecMapBoolUint16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolUint16V(rv2i(rv).(map[bool]uint16), false, d) + } +} +func (f fastpathT) DecMapBoolUint16X(vp *map[bool]uint16, d *Decoder) { + v, changed := f.DecMapBoolUint16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUint16V(v map[bool]uint16, canChange bool, + d *Decoder) (_ map[bool]uint16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[bool]uint16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk bool + var mv uint16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint16(chkOvf.UintV(dd.DecodeUint64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolUint32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]uint32) + v, changed := fastpathTV.DecMapBoolUint32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolUint32V(rv2i(rv).(map[bool]uint32), false, d) + } +} +func (f fastpathT) DecMapBoolUint32X(vp *map[bool]uint32, d *Decoder) { + v, changed := f.DecMapBoolUint32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUint32V(v map[bool]uint32, canChange bool, + d *Decoder) (_ map[bool]uint32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[bool]uint32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk bool + var mv uint32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uint32(chkOvf.UintV(dd.DecodeUint64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolUint64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]uint64) + v, changed := fastpathTV.DecMapBoolUint64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolUint64V(rv2i(rv).(map[bool]uint64), false, d) + } +} +func (f fastpathT) DecMapBoolUint64X(vp *map[bool]uint64, d *Decoder) { + v, changed := f.DecMapBoolUint64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUint64V(v map[bool]uint64, canChange bool, + d *Decoder) (_ map[bool]uint64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]uint64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk bool + var mv uint64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeUint64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolUintptrR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]uintptr) + v, changed := fastpathTV.DecMapBoolUintptrV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolUintptrV(rv2i(rv).(map[bool]uintptr), false, d) + } +} +func (f fastpathT) DecMapBoolUintptrX(vp *map[bool]uintptr, d *Decoder) { + v, changed := f.DecMapBoolUintptrV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolUintptrV(v map[bool]uintptr, canChange bool, + d *Decoder) (_ map[bool]uintptr, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]uintptr, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk bool + var mv uintptr + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolIntR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]int) + v, changed := fastpathTV.DecMapBoolIntV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolIntV(rv2i(rv).(map[bool]int), false, d) + } +} +func (f fastpathT) DecMapBoolIntX(vp *map[bool]int, d *Decoder) { + v, changed := f.DecMapBoolIntV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolIntV(v map[bool]int, canChange bool, + d *Decoder) (_ map[bool]int, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]int, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk bool + var mv int + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int(chkOvf.IntV(dd.DecodeInt64(), intBitsize)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolInt8R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]int8) + v, changed := fastpathTV.DecMapBoolInt8V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolInt8V(rv2i(rv).(map[bool]int8), false, d) + } +} +func (f fastpathT) DecMapBoolInt8X(vp *map[bool]int8, d *Decoder) { + v, changed := f.DecMapBoolInt8V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolInt8V(v map[bool]int8, canChange bool, + d *Decoder) (_ map[bool]int8, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[bool]int8, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk bool + var mv int8 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int8(chkOvf.IntV(dd.DecodeInt64(), 8)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolInt16R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]int16) + v, changed := fastpathTV.DecMapBoolInt16V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolInt16V(rv2i(rv).(map[bool]int16), false, d) + } +} +func (f fastpathT) DecMapBoolInt16X(vp *map[bool]int16, d *Decoder) { + v, changed := f.DecMapBoolInt16V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolInt16V(v map[bool]int16, canChange bool, + d *Decoder) (_ map[bool]int16, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 3) + v = make(map[bool]int16, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk bool + var mv int16 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int16(chkOvf.IntV(dd.DecodeInt64(), 16)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolInt32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]int32) + v, changed := fastpathTV.DecMapBoolInt32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolInt32V(rv2i(rv).(map[bool]int32), false, d) + } +} +func (f fastpathT) DecMapBoolInt32X(vp *map[bool]int32, d *Decoder) { + v, changed := f.DecMapBoolInt32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolInt32V(v map[bool]int32, canChange bool, + d *Decoder) (_ map[bool]int32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[bool]int32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk bool + var mv int32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = int32(chkOvf.IntV(dd.DecodeInt64(), 32)) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolInt64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]int64) + v, changed := fastpathTV.DecMapBoolInt64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolInt64V(rv2i(rv).(map[bool]int64), false, d) + } +} +func (f fastpathT) DecMapBoolInt64X(vp *map[bool]int64, d *Decoder) { + v, changed := f.DecMapBoolInt64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolInt64V(v map[bool]int64, canChange bool, + d *Decoder) (_ map[bool]int64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]int64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk bool + var mv int64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeInt64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolFloat32R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]float32) + v, changed := fastpathTV.DecMapBoolFloat32V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolFloat32V(rv2i(rv).(map[bool]float32), false, d) + } +} +func (f fastpathT) DecMapBoolFloat32X(vp *map[bool]float32, d *Decoder) { + v, changed := f.DecMapBoolFloat32V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolFloat32V(v map[bool]float32, canChange bool, + d *Decoder) (_ map[bool]float32, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 5) + v = make(map[bool]float32, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk bool + var mv float32 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = float32(chkOvf.Float32V(dd.DecodeFloat64())) + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolFloat64R(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]float64) + v, changed := fastpathTV.DecMapBoolFloat64V(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolFloat64V(rv2i(rv).(map[bool]float64), false, d) + } +} +func (f fastpathT) DecMapBoolFloat64X(vp *map[bool]float64, d *Decoder) { + v, changed := f.DecMapBoolFloat64V(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolFloat64V(v map[bool]float64, canChange bool, + d *Decoder) (_ map[bool]float64, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 9) + v = make(map[bool]float64, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk bool + var mv float64 + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = 0 + } + continue + } + mv = dd.DecodeFloat64() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} + +func (d *Decoder) fastpathDecMapBoolBoolR(f *codecFnInfo, rv reflect.Value) { + if rv.Kind() == reflect.Ptr { + vp := rv2i(rv).(*map[bool]bool) + v, changed := fastpathTV.DecMapBoolBoolV(*vp, true, d) + if changed { + *vp = v + } + } else { + fastpathTV.DecMapBoolBoolV(rv2i(rv).(map[bool]bool), false, d) + } +} +func (f fastpathT) DecMapBoolBoolX(vp *map[bool]bool, d *Decoder) { + v, changed := f.DecMapBoolBoolV(*vp, true, d) + if changed { + *vp = v + } +} +func (_ fastpathT) DecMapBoolBoolV(v map[bool]bool, canChange bool, + d *Decoder) (_ map[bool]bool, changed bool) { + dd, esep := d.d, d.hh.hasElemSeparators() + containerLen := dd.ReadMapStart() + if canChange && v == nil { + xlen := decInferLen(containerLen, d.h.MaxInitLen, 2) + v = make(map[bool]bool, xlen) + changed = true + } + if containerLen == 0 { + dd.ReadMapEnd() + return v, changed + } + var mk bool + var mv bool + hasLen := containerLen > 0 + for j := 0; (hasLen && j < containerLen) || !(hasLen || dd.CheckBreak()); j++ { + if esep { + dd.ReadMapElemKey() + } + mk = dd.DecodeBool() + if esep { + dd.ReadMapElemValue() + } + if dd.TryDecodeAsNil() { + if v == nil { + } else if d.h.DeleteOnNilMapValue { + delete(v, mk) + } else { + v[mk] = false + } + continue + } + mv = dd.DecodeBool() + if v != nil { + v[mk] = mv + } + } + dd.ReadMapEnd() + return v, changed +} diff --git a/vendor/github.com/ugorji/go/codec/fast-path.not.go b/vendor/github.com/ugorji/go/codec/fast-path.not.go new file mode 100644 index 0000000000..f11b4674f8 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/fast-path.not.go @@ -0,0 +1,47 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build notfastpath + +package codec + +import "reflect" + +const fastpathEnabled = false + +// The generated fast-path code is very large, and adds a few seconds to the build time. +// This causes test execution, execution of small tools which use codec, etc +// to take a long time. +// +// To mitigate, we now support the notfastpath tag. +// This tag disables fastpath during build, allowing for faster build, test execution, +// short-program runs, etc. + +func fastpathDecodeTypeSwitch(iv interface{}, d *Decoder) bool { return false } +func fastpathEncodeTypeSwitch(iv interface{}, e *Encoder) bool { return false } +func fastpathEncodeTypeSwitchSlice(iv interface{}, e *Encoder) bool { return false } +func fastpathEncodeTypeSwitchMap(iv interface{}, e *Encoder) bool { return false } +func fastpathDecodeSetZeroTypeSwitch(iv interface{}) bool { return false } + +type fastpathT struct{} +type fastpathE struct { + rtid uintptr + rt reflect.Type + encfn func(*Encoder, *codecFnInfo, reflect.Value) + decfn func(*Decoder, *codecFnInfo, reflect.Value) +} +type fastpathA [0]fastpathE + +func (x fastpathA) index(rtid uintptr) int { return -1 } + +func (_ fastpathT) DecSliceUint8V(v []uint8, canChange bool, d *Decoder) (_ []uint8, changed bool) { + fn := d.cfer().get(uint8SliceTyp, true, true) + d.kSlice(&fn.i, reflect.ValueOf(&v).Elem()) + return v, true +} + +var fastpathAV fastpathA +var fastpathTV fastpathT + +// ---- +type TestMammoth2Wrapper struct{} // to allow testMammoth work in notfastpath mode diff --git a/vendor/github.com/ugorji/go/codec/gen-helper.generated.go b/vendor/github.com/ugorji/go/codec/gen-helper.generated.go new file mode 100644 index 0000000000..917d282837 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/gen-helper.generated.go @@ -0,0 +1,335 @@ +/* // +build ignore */ + +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// Code generated from gen-helper.go.tmpl - DO NOT EDIT. + +package codec + +import ( + "encoding" + "reflect" +) + +// GenVersion is the current version of codecgen. +const GenVersion = 8 + +// This file is used to generate helper code for codecgen. +// The values here i.e. genHelper(En|De)coder are not to be used directly by +// library users. They WILL change continuously and without notice. +// +// To help enforce this, we create an unexported type with exported members. +// The only way to get the type is via the one exported type that we control (somewhat). +// +// When static codecs are created for types, they will use this value +// to perform encoding or decoding of primitives or known slice or map types. + +// GenHelperEncoder is exported so that it can be used externally by codecgen. +// +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func GenHelperEncoder(e *Encoder) (ge genHelperEncoder, ee genHelperEncDriver) { + ge = genHelperEncoder{e: e} + ee = genHelperEncDriver{encDriver: e.e} + return +} + +// GenHelperDecoder is exported so that it can be used externally by codecgen. +// +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE. +func GenHelperDecoder(d *Decoder) (gd genHelperDecoder, dd genHelperDecDriver) { + gd = genHelperDecoder{d: d} + dd = genHelperDecDriver{decDriver: d.d} + return +} + +type genHelperEncDriver struct { + encDriver +} + +func (x genHelperEncDriver) EncodeBuiltin(rt uintptr, v interface{}) {} +func (x genHelperEncDriver) EncStructFieldKey(keyType valueType, s string) { + encStructFieldKey(x.encDriver, keyType, s) +} +func (x genHelperEncDriver) EncodeSymbol(s string) { + x.encDriver.EncodeString(cUTF8, s) +} + +type genHelperDecDriver struct { + decDriver + C checkOverflow +} + +func (x genHelperDecDriver) DecodeBuiltin(rt uintptr, v interface{}) {} +func (x genHelperDecDriver) DecStructFieldKey(keyType valueType, buf *[decScratchByteArrayLen]byte) []byte { + return decStructFieldKey(x.decDriver, keyType, buf) +} +func (x genHelperDecDriver) DecodeInt(bitsize uint8) (i int64) { + return x.C.IntV(x.decDriver.DecodeInt64(), bitsize) +} +func (x genHelperDecDriver) DecodeUint(bitsize uint8) (ui uint64) { + return x.C.UintV(x.decDriver.DecodeUint64(), bitsize) +} +func (x genHelperDecDriver) DecodeFloat(chkOverflow32 bool) (f float64) { + f = x.DecodeFloat64() + if chkOverflow32 && chkOvf.Float32(f) { + panicv.errorf("float32 overflow: %v", f) + } + return +} +func (x genHelperDecDriver) DecodeFloat32As64() (f float64) { + f = x.DecodeFloat64() + if chkOvf.Float32(f) { + panicv.errorf("float32 overflow: %v", f) + } + return +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +type genHelperEncoder struct { + M must + e *Encoder + F fastpathT +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +type genHelperDecoder struct { + C checkOverflow + d *Decoder + F fastpathT +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBasicHandle() *BasicHandle { + return f.e.h +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBinary() bool { + return f.e.be // f.e.hh.isBinaryEncoding() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) IsJSONHandle() bool { + return f.e.js +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncFallback(iv interface{}) { + // println(">>>>>>>>> EncFallback") + // f.e.encodeI(iv, false, false) + f.e.encodeValue(reflect.ValueOf(iv), nil, false) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncTextMarshal(iv encoding.TextMarshaler) { + bs, fnerr := iv.MarshalText() + f.e.marshal(bs, fnerr, false, cUTF8) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncJSONMarshal(iv jsonMarshaler) { + bs, fnerr := iv.MarshalJSON() + f.e.marshal(bs, fnerr, true, cUTF8) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncBinaryMarshal(iv encoding.BinaryMarshaler) { + bs, fnerr := iv.MarshalBinary() + f.e.marshal(bs, fnerr, false, cRAW) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncRaw(iv Raw) { f.e.rawBytes(iv) } + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: builtin no longer supported - so we make this method a no-op, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperEncoder) TimeRtidIfBinc() (v uintptr) { return } + +// func (f genHelperEncoder) TimeRtidIfBinc() uintptr { +// if _, ok := f.e.hh.(*BincHandle); ok { +// return timeTypId +// } +// } + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) I2Rtid(v interface{}) uintptr { + return i2rtid(v) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) Extension(rtid uintptr) (xfn *extTypeTagFn) { + return f.e.h.getExt(rtid) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperEncoder) EncExtension(v interface{}, xfFn *extTypeTagFn) { + f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: No longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperEncoder) HasExtensions() bool { + return len(f.e.h.extHandle) != 0 +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: No longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperEncoder) EncExt(v interface{}) (r bool) { + if xfFn := f.e.h.getExt(i2rtid(v)); xfFn != nil { + f.e.e.EncodeExt(v, xfFn.tag, xfFn.ext, f.e) + return true + } + return false +} + +// ---------------- DECODER FOLLOWS ----------------- + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBasicHandle() *BasicHandle { + return f.d.h +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBinary() bool { + return f.d.be // f.d.hh.isBinaryEncoding() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSwallow() { f.d.swallow() } + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecScratchBuffer() []byte { + return f.d.b[:] +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecScratchArrayBuffer() *[decScratchByteArrayLen]byte { + return &f.d.b +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecFallback(iv interface{}, chkPtr bool) { + // println(">>>>>>>>> DecFallback") + rv := reflect.ValueOf(iv) + if chkPtr { + rv = f.d.ensureDecodeable(rv) + } + f.d.decodeValue(rv, nil, false) + // f.d.decodeValueFallback(rv) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecSliceHelperStart() (decSliceHelper, int) { + return f.d.decSliceHelperStart() +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecStructFieldNotFound(index int, name string) { + f.d.structFieldNotFound(index, name) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecArrayCannotExpand(sliceLen, streamLen int) { + f.d.arrayCannotExpand(sliceLen, streamLen) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecTextUnmarshal(tm encoding.TextUnmarshaler) { + fnerr := tm.UnmarshalText(f.d.d.DecodeStringAsBytes()) + if fnerr != nil { + panic(fnerr) + } +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecJSONUnmarshal(tm jsonUnmarshaler) { + // bs := f.dd.DecodeStringAsBytes() + // grab the bytes to be read, as UnmarshalJSON needs the full JSON so as to unmarshal it itself. + fnerr := tm.UnmarshalJSON(f.d.nextValueBytes()) + if fnerr != nil { + panic(fnerr) + } +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecBinaryUnmarshal(bm encoding.BinaryUnmarshaler) { + fnerr := bm.UnmarshalBinary(f.d.d.DecodeBytes(nil, true)) + if fnerr != nil { + panic(fnerr) + } +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecRaw() []byte { return f.d.rawBytes() } + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: builtin no longer supported - so we make this method a no-op, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperDecoder) TimeRtidIfBinc() (v uintptr) { return } + +// func (f genHelperDecoder) TimeRtidIfBinc() uintptr { +// // Note: builtin is no longer supported - so make this a no-op +// if _, ok := f.d.hh.(*BincHandle); ok { +// return timeTypId +// } +// return 0 +// } + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) IsJSONHandle() bool { + return f.d.js +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) I2Rtid(v interface{}) uintptr { + return i2rtid(v) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) Extension(rtid uintptr) (xfn *extTypeTagFn) { + return f.d.h.getExt(rtid) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecExtension(v interface{}, xfFn *extTypeTagFn) { + f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: No longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperDecoder) HasExtensions() bool { + return len(f.d.h.extHandle) != 0 +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: No longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperDecoder) DecExt(v interface{}) (r bool) { + if xfFn := f.d.h.getExt(i2rtid(v)); xfFn != nil { + f.d.d.DecodeExt(v, xfFn.tag, xfFn.ext) + return true + } + return false +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +func (f genHelperDecoder) DecInferLen(clen, maxlen, unit int) (rvlen int) { + return decInferLen(clen, maxlen, unit) +} + +// FOR USE BY CODECGEN ONLY. IT *WILL* CHANGE WITHOUT NOTICE. *DO NOT USE* +// +// Deprecated: no longer used, +// but leave in-place so that old generated files continue to work without regeneration. +func (f genHelperDecoder) StringView(v []byte) string { return stringView(v) } diff --git a/vendor/github.com/ugorji/go/codec/gen.generated.go b/vendor/github.com/ugorji/go/codec/gen.generated.go new file mode 100644 index 0000000000..240ba9f8c7 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/gen.generated.go @@ -0,0 +1,164 @@ +// +build codecgen.exec + +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// DO NOT EDIT. THIS FILE IS AUTO-GENERATED FROM gen-dec-(map|array).go.tmpl + +const genDecMapTmpl = ` +{{var "v"}} := *{{ .Varname }} +{{var "l"}} := r.ReadMapStart() +{{var "bh"}} := z.DecBasicHandle() +if {{var "v"}} == nil { + {{var "rl"}} := z.DecInferLen({{var "l"}}, {{var "bh"}}.MaxInitLen, {{ .Size }}) + {{var "v"}} = make(map[{{ .KTyp }}]{{ .Typ }}, {{var "rl"}}) + *{{ .Varname }} = {{var "v"}} +} +var {{var "mk"}} {{ .KTyp }} +var {{var "mv"}} {{ .Typ }} +var {{var "mg"}}, {{var "mdn"}} {{if decElemKindPtr}}, {{var "ms"}}, {{var "mok"}}{{end}} bool +if {{var "bh"}}.MapValueReset { + {{if decElemKindPtr}}{{var "mg"}} = true + {{else if decElemKindIntf}}if !{{var "bh"}}.InterfaceReset { {{var "mg"}} = true } + {{else if not decElemKindImmutable}}{{var "mg"}} = true + {{end}} } +if {{var "l"}} != 0 { +{{var "hl"}} := {{var "l"}} > 0 + for {{var "j"}} := 0; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ { + r.ReadMapElemKey() {{/* z.DecSendContainerState(codecSelfer_containerMapKey{{ .Sfx }}) */}} + {{ $x := printf "%vmk%v" .TempVar .Rand }}{{ decLineVarK $x }} +{{ if eq .KTyp "interface{}" }}{{/* // special case if a byte array. */}}if {{var "bv"}}, {{var "bok"}} := {{var "mk"}}.([]byte); {{var "bok"}} { + {{var "mk"}} = string({{var "bv"}}) + }{{ end }}{{if decElemKindPtr}} + {{var "ms"}} = true{{end}} + if {{var "mg"}} { + {{if decElemKindPtr}}{{var "mv"}}, {{var "mok"}} = {{var "v"}}[{{var "mk"}}] + if {{var "mok"}} { + {{var "ms"}} = false + } {{else}}{{var "mv"}} = {{var "v"}}[{{var "mk"}}] {{end}} + } {{if not decElemKindImmutable}}else { {{var "mv"}} = {{decElemZero}} }{{end}} + r.ReadMapElemValue() {{/* z.DecSendContainerState(codecSelfer_containerMapValue{{ .Sfx }}) */}} + {{var "mdn"}} = false + {{ $x := printf "%vmv%v" .TempVar .Rand }}{{ $y := printf "%vmdn%v" .TempVar .Rand }}{{ decLineVar $x $y }} + if {{var "mdn"}} { + if {{ var "bh" }}.DeleteOnNilMapValue { delete({{var "v"}}, {{var "mk"}}) } else { {{var "v"}}[{{var "mk"}}] = {{decElemZero}} } + } else if {{if decElemKindPtr}} {{var "ms"}} && {{end}} {{var "v"}} != nil { + {{var "v"}}[{{var "mk"}}] = {{var "mv"}} + } +} +} // else len==0: TODO: Should we clear map entries? +r.ReadMapEnd() {{/* z.DecSendContainerState(codecSelfer_containerMapEnd{{ .Sfx }}) */}} +` + +const genDecListTmpl = ` +{{var "v"}} := {{if not isArray}}*{{end}}{{ .Varname }} +{{var "h"}}, {{var "l"}} := z.DecSliceHelperStart() {{/* // helper, containerLenS */}}{{if not isArray}} +var {{var "c"}} bool {{/* // changed */}} +_ = {{var "c"}}{{end}} +if {{var "l"}} == 0 { + {{if isSlice }}if {{var "v"}} == nil { + {{var "v"}} = []{{ .Typ }}{} + {{var "c"}} = true + } else if len({{var "v"}}) != 0 { + {{var "v"}} = {{var "v"}}[:0] + {{var "c"}} = true + } {{else if isChan }}if {{var "v"}} == nil { + {{var "v"}} = make({{ .CTyp }}, 0) + {{var "c"}} = true + } {{end}} +} else { + {{var "hl"}} := {{var "l"}} > 0 + var {{var "rl"}} int + _ = {{var "rl"}} + {{if isSlice }} if {{var "hl"}} { + if {{var "l"}} > cap({{var "v"}}) { + {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + if {{var "rl"}} <= cap({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "rl"}}] + } else { + {{var "v"}} = make([]{{ .Typ }}, {{var "rl"}}) + } + {{var "c"}} = true + } else if {{var "l"}} != len({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "l"}}] + {{var "c"}} = true + } + } {{end}} + var {{var "j"}} int + // var {{var "dn"}} bool + for ; ({{var "hl"}} && {{var "j"}} < {{var "l"}}) || !({{var "hl"}} || r.CheckBreak()); {{var "j"}}++ { + {{if not isArray}} if {{var "j"}} == 0 && {{var "v"}} == nil { + if {{var "hl"}} { + {{var "rl"}} = z.DecInferLen({{var "l"}}, z.DecBasicHandle().MaxInitLen, {{ .Size }}) + } else { + {{var "rl"}} = {{if isSlice}}8{{else if isChan}}64{{end}} + } + {{var "v"}} = make({{if isSlice}}[]{{ .Typ }}{{else if isChan}}{{.CTyp}}{{end}}, {{var "rl"}}) + {{var "c"}} = true + }{{end}} + {{var "h"}}.ElemContainerState({{var "j"}}) + {{/* {{var "dn"}} = r.TryDecodeAsNil() */}}{{/* commented out, as decLineVar handles this already each time */}} + {{if isChan}}{{ $x := printf "%[1]vvcx%[2]v" .TempVar .Rand }}var {{$x}} {{ .Typ }} + {{ decLineVar $x }} + {{var "v"}} <- {{ $x }} + // println(">>>> sending ", {{ $x }}, " into ", {{var "v"}}) // TODO: remove this + {{else}}{{/* // if indefinite, etc, then expand the slice if necessary */}} + var {{var "db"}} bool + if {{var "j"}} >= len({{var "v"}}) { + {{if isSlice }} {{var "v"}} = append({{var "v"}}, {{ zero }}) + {{var "c"}} = true + {{else}} z.DecArrayCannotExpand(len(v), {{var "j"}}+1); {{var "db"}} = true + {{end}} + } + if {{var "db"}} { + z.DecSwallow() + } else { + {{ $x := printf "%[1]vv%[2]v[%[1]vj%[2]v]" .TempVar .Rand }}{{ decLineVar $x }} + } + {{end}} + } + {{if isSlice}} if {{var "j"}} < len({{var "v"}}) { + {{var "v"}} = {{var "v"}}[:{{var "j"}}] + {{var "c"}} = true + } else if {{var "j"}} == 0 && {{var "v"}} == nil { + {{var "v"}} = make([]{{ .Typ }}, 0) + {{var "c"}} = true + } {{end}} +} +{{var "h"}}.End() +{{if not isArray }}if {{var "c"}} { + *{{ .Varname }} = {{var "v"}} +}{{end}} +` + +const genEncChanTmpl = ` +{{.Label}}: +switch timeout{{.Sfx}} := z.EncBasicHandle().ChanRecvTimeout; { +case timeout{{.Sfx}} == 0: // only consume available + for { + select { + case b{{.Sfx}} := <-{{.Chan}}: + {{ .Slice }} = append({{.Slice}}, b{{.Sfx}}) + default: + break {{.Label}} + } + } +case timeout{{.Sfx}} > 0: // consume until timeout + tt{{.Sfx}} := time.NewTimer(timeout{{.Sfx}}) + for { + select { + case b{{.Sfx}} := <-{{.Chan}}: + {{.Slice}} = append({{.Slice}}, b{{.Sfx}}) + case <-tt{{.Sfx}}.C: + // close(tt.C) + break {{.Label}} + } + } +default: // consume until close + for b{{.Sfx}} := range {{.Chan}} { + {{.Slice}} = append({{.Slice}}, b{{.Sfx}}) + } +} +` diff --git a/vendor/github.com/ugorji/go/codec/gen.go b/vendor/github.com/ugorji/go/codec/gen.go new file mode 100644 index 0000000000..b4c4031ff4 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/gen.go @@ -0,0 +1,2139 @@ +// +build codecgen.exec + +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "bytes" + "encoding/base64" + "errors" + "fmt" + "go/format" + "io" + "io/ioutil" + "math/rand" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "sync" + "text/template" + "time" + "unicode" + "unicode/utf8" +) + +// --------------------------------------------------- +// codecgen supports the full cycle of reflection-based codec: +// - RawExt +// - Raw +// - Extensions +// - (Binary|Text|JSON)(Unm|M)arshal +// - generic by-kind +// +// This means that, for dynamic things, we MUST use reflection to at least get the reflect.Type. +// In those areas, we try to only do reflection or interface-conversion when NECESSARY: +// - Extensions, only if Extensions are configured. +// +// However, codecgen doesn't support the following: +// - Canonical option. (codecgen IGNORES it currently) +// This is just because it has not been implemented. +// +// During encode/decode, Selfer takes precedence. +// A type implementing Selfer will know how to encode/decode itself statically. +// +// The following field types are supported: +// array: [n]T +// slice: []T +// map: map[K]V +// primitive: [u]int[n], float(32|64), bool, string +// struct +// +// --------------------------------------------------- +// Note that a Selfer cannot call (e|d).(En|De)code on itself, +// as this will cause a circular reference, as (En|De)code will call Selfer methods. +// Any type that implements Selfer must implement completely and not fallback to (En|De)code. +// +// In addition, code in this file manages the generation of fast-path implementations of +// encode/decode of slices/maps of primitive keys/values. +// +// Users MUST re-generate their implementations whenever the code shape changes. +// The generated code will panic if it was generated with a version older than the supporting library. +// --------------------------------------------------- +// +// codec framework is very feature rich. +// When encoding or decoding into an interface, it depends on the runtime type of the interface. +// The type of the interface may be a named type, an extension, etc. +// Consequently, we fallback to runtime codec for encoding/decoding interfaces. +// In addition, we fallback for any value which cannot be guaranteed at runtime. +// This allows us support ANY value, including any named types, specifically those which +// do not implement our interfaces (e.g. Selfer). +// +// This explains some slowness compared to other code generation codecs (e.g. msgp). +// This reduction in speed is only seen when your refers to interfaces, +// e.g. type T struct { A interface{}; B []interface{}; C map[string]interface{} } +// +// codecgen will panic if the file was generated with an old version of the library in use. +// +// Note: +// It was a conscious decision to have gen.go always explicitly call EncodeNil or TryDecodeAsNil. +// This way, there isn't a function call overhead just to see that we should not enter a block of code. +// +// Note: +// codecgen-generated code depends on the variables defined by fast-path.generated.go. +// consequently, you cannot run with tags "codecgen notfastpath". + +// GenVersion is the current version of codecgen. +// +// NOTE: Increment this value each time codecgen changes fundamentally. +// Fundamental changes are: +// - helper methods change (signature change, new ones added, some removed, etc) +// - codecgen command line changes +// +// v1: Initial Version +// v2: +// v3: Changes for Kubernetes: +// changes in signature of some unpublished helper methods and codecgen cmdline arguments. +// v4: Removed separator support from (en|de)cDriver, and refactored codec(gen) +// v5: changes to support faster json decoding. Let encoder/decoder maintain state of collections. +// v6: removed unsafe from gen, and now uses codecgen.exec tag +// v7: +// v8: current - we now maintain compatibility with old generated code. +const genVersion = 8 + +const ( + genCodecPkg = "codec1978" + genTempVarPfx = "yy" + genTopLevelVarName = "x" + + // ignore canBeNil parameter, and always set to true. + // This is because nil can appear anywhere, so we should always check. + genAnythingCanBeNil = true + + // if genUseOneFunctionForDecStructMap, make a single codecDecodeSelferFromMap function; + // else make codecDecodeSelferFromMap{LenPrefix,CheckBreak} so that conditionals + // are not executed a lot. + // + // From testing, it didn't make much difference in runtime, so keep as true (one function only) + genUseOneFunctionForDecStructMap = true +) + +type genStructMapStyle uint8 + +const ( + genStructMapStyleConsolidated genStructMapStyle = iota + genStructMapStyleLenPrefix + genStructMapStyleCheckBreak +) + +var ( + errGenAllTypesSamePkg = errors.New("All types must be in the same package") + errGenExpectArrayOrMap = errors.New("unexpected type. Expecting array/map/slice") + + genBase64enc = base64.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789__") + genQNameRegex = regexp.MustCompile(`[A-Za-z_.]+`) +) + +type genBuf struct { + buf []byte +} + +func (x *genBuf) s(s string) *genBuf { x.buf = append(x.buf, s...); return x } +func (x *genBuf) b(s []byte) *genBuf { x.buf = append(x.buf, s...); return x } +func (x *genBuf) v() string { return string(x.buf) } +func (x *genBuf) f(s string, args ...interface{}) { x.s(fmt.Sprintf(s, args...)) } +func (x *genBuf) reset() { + if x.buf != nil { + x.buf = x.buf[:0] + } +} + +// genRunner holds some state used during a Gen run. +type genRunner struct { + w io.Writer // output + c uint64 // counter used for generating varsfx + t []reflect.Type // list of types to run selfer on + + tc reflect.Type // currently running selfer on this type + te map[uintptr]bool // types for which the encoder has been created + td map[uintptr]bool // types for which the decoder has been created + cp string // codec import path + + im map[string]reflect.Type // imports to add + imn map[string]string // package names of imports to add + imc uint64 // counter for import numbers + + is map[reflect.Type]struct{} // types seen during import search + bp string // base PkgPath, for which we are generating for + + cpfx string // codec package prefix + + tm map[reflect.Type]struct{} // types for which enc/dec must be generated + ts []reflect.Type // types for which enc/dec must be generated + + xs string // top level variable/constant suffix + hn string // fn helper type name + + ti *TypeInfos + // rr *rand.Rand // random generator for file-specific types + + nx bool // no extensions +} + +// Gen will write a complete go file containing Selfer implementations for each +// type passed. All the types must be in the same package. +// +// Library users: DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINUOUSLY WITHOUT NOTICE. +func Gen(w io.Writer, buildTags, pkgName, uid string, noExtensions bool, + ti *TypeInfos, typ ...reflect.Type) { + // All types passed to this method do not have a codec.Selfer method implemented directly. + // codecgen already checks the AST and skips any types that define the codec.Selfer methods. + // Consequently, there's no need to check and trim them if they implement codec.Selfer + + if len(typ) == 0 { + return + } + x := genRunner{ + w: w, + t: typ, + te: make(map[uintptr]bool), + td: make(map[uintptr]bool), + im: make(map[string]reflect.Type), + imn: make(map[string]string), + is: make(map[reflect.Type]struct{}), + tm: make(map[reflect.Type]struct{}), + ts: []reflect.Type{}, + bp: genImportPath(typ[0]), + xs: uid, + ti: ti, + nx: noExtensions, + } + if x.ti == nil { + x.ti = defTypeInfos + } + if x.xs == "" { + rr := rand.New(rand.NewSource(time.Now().UnixNano())) + x.xs = strconv.FormatInt(rr.Int63n(9999), 10) + } + + // gather imports first: + x.cp = genImportPath(reflect.TypeOf(x)) + x.imn[x.cp] = genCodecPkg + for _, t := range typ { + // fmt.Printf("###########: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name()) + if genImportPath(t) != x.bp { + panic(errGenAllTypesSamePkg) + } + x.genRefPkgs(t) + } + if buildTags != "" { + x.line("// +build " + buildTags) + x.line("") + } + x.line(` + +// Code generated by codecgen - DO NOT EDIT. + +`) + x.line("package " + pkgName) + x.line("") + x.line("import (") + if x.cp != x.bp { + x.cpfx = genCodecPkg + "." + x.linef("%s \"%s\"", genCodecPkg, x.cp) + } + // use a sorted set of im keys, so that we can get consistent output + imKeys := make([]string, 0, len(x.im)) + for k := range x.im { + imKeys = append(imKeys, k) + } + sort.Strings(imKeys) + for _, k := range imKeys { // for k, _ := range x.im { + if k == x.imn[k] { + x.linef("\"%s\"", k) + } else { + x.linef("%s \"%s\"", x.imn[k], k) + } + } + // add required packages + for _, k := range [...]string{"runtime", "errors", "strconv"} { // "reflect", "fmt" + if _, ok := x.im[k]; !ok { + x.line("\"" + k + "\"") + } + } + x.line(")") + x.line("") + + x.line("const (") + x.linef("// ----- content types ----") + x.linef("codecSelferCcUTF8%s = %v", x.xs, int64(cUTF8)) + x.linef("codecSelferCcRAW%s = %v", x.xs, int64(cRAW)) + x.linef("// ----- value types used ----") + for _, vt := range [...]valueType{ + valueTypeArray, valueTypeMap, valueTypeString, + valueTypeInt, valueTypeUint, valueTypeFloat} { + x.linef("codecSelferValueType%s%s = %v", vt.String(), x.xs, int64(vt)) + } + + x.linef("codecSelferBitsize%s = uint8(32 << (^uint(0) >> 63))", x.xs) + x.line(")") + x.line("var (") + x.line("errCodecSelferOnlyMapOrArrayEncodeToStruct" + x.xs + " = errors.New(`only encoded map or array can be decoded into a struct`)") + x.line(")") + x.line("") + + x.hn = "codecSelfer" + x.xs + x.line("type " + x.hn + " struct{}") + x.line("") + + x.varsfxreset() + x.line("func init() {") + x.linef("if %sGenVersion != %v {", x.cpfx, genVersion) + x.line("_, file, _, _ := runtime.Caller(0)") + x.outf(`panic("codecgen version mismatch: current: %v, need " + strconv.FormatInt(int64(%sGenVersion), 10) + ". Re-generate file: " + file)`, genVersion, x.cpfx) + // x.out(`panic(fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", `) + // x.linef(`%v, %sGenVersion, file))`, genVersion, x.cpfx) + x.linef("}") + x.line("if false { // reference the types, but skip this branch at build/run time") + // x.line("_ = strconv.ParseInt") + var n int + // for k, t := range x.im { + for _, k := range imKeys { + t := x.im[k] + x.linef("var v%v %s.%s", n, x.imn[k], t.Name()) + n++ + } + if n > 0 { + x.out("_") + for i := 1; i < n; i++ { + x.out(", _") + } + x.out(" = v0") + for i := 1; i < n; i++ { + x.outf(", v%v", i) + } + } + x.line("} ") // close if false + x.line("}") // close init + x.line("") + + // generate rest of type info + for _, t := range typ { + x.tc = t + x.selfer(true) + x.selfer(false) + } + + for _, t := range x.ts { + rtid := rt2id(t) + // generate enc functions for all these slice/map types. + x.varsfxreset() + x.linef("func (x %s) enc%s(v %s%s, e *%sEncoder) {", x.hn, x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), x.cpfx) + x.genRequiredMethodVars(true) + switch t.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + x.encListFallback("v", t) + case reflect.Map: + x.encMapFallback("v", t) + default: + panic(errGenExpectArrayOrMap) + } + x.line("}") + x.line("") + + // generate dec functions for all these slice/map types. + x.varsfxreset() + x.linef("func (x %s) dec%s(v *%s, d *%sDecoder) {", x.hn, x.genMethodNameT(t), x.genTypeName(t), x.cpfx) + x.genRequiredMethodVars(false) + switch t.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + x.decListFallback("v", rtid, t) + case reflect.Map: + x.decMapFallback("v", rtid, t) + default: + panic(errGenExpectArrayOrMap) + } + x.line("}") + x.line("") + } + + x.line("") +} + +func (x *genRunner) checkForSelfer(t reflect.Type, varname string) bool { + // return varname != genTopLevelVarName && t != x.tc + // the only time we checkForSelfer is if we are not at the TOP of the generated code. + return varname != genTopLevelVarName +} + +func (x *genRunner) arr2str(t reflect.Type, s string) string { + if t.Kind() == reflect.Array { + return s + } + return "" +} + +func (x *genRunner) genRequiredMethodVars(encode bool) { + x.line("var h " + x.hn) + if encode { + x.line("z, r := " + x.cpfx + "GenHelperEncoder(e)") + } else { + x.line("z, r := " + x.cpfx + "GenHelperDecoder(d)") + } + x.line("_, _, _ = h, z, r") +} + +func (x *genRunner) genRefPkgs(t reflect.Type) { + if _, ok := x.is[t]; ok { + return + } + x.is[t] = struct{}{} + tpkg, tname := genImportPath(t), t.Name() + if tpkg != "" && tpkg != x.bp && tpkg != x.cp && tname != "" && tname[0] >= 'A' && tname[0] <= 'Z' { + if _, ok := x.im[tpkg]; !ok { + x.im[tpkg] = t + if idx := strings.LastIndex(tpkg, "/"); idx < 0 { + x.imn[tpkg] = tpkg + } else { + x.imc++ + x.imn[tpkg] = "pkg" + strconv.FormatUint(x.imc, 10) + "_" + genGoIdentifier(tpkg[idx+1:], false) + } + } + } + switch t.Kind() { + case reflect.Array, reflect.Slice, reflect.Ptr, reflect.Chan: + x.genRefPkgs(t.Elem()) + case reflect.Map: + x.genRefPkgs(t.Elem()) + x.genRefPkgs(t.Key()) + case reflect.Struct: + for i := 0; i < t.NumField(); i++ { + if fname := t.Field(i).Name; fname != "" && fname[0] >= 'A' && fname[0] <= 'Z' { + x.genRefPkgs(t.Field(i).Type) + } + } + } +} + +func (x *genRunner) varsfx() string { + x.c++ + return strconv.FormatUint(x.c, 10) +} + +func (x *genRunner) varsfxreset() { + x.c = 0 +} + +func (x *genRunner) out(s string) { + _, err := io.WriteString(x.w, s) + if err != nil { + panic(err) + } +} + +func (x *genRunner) outf(s string, params ...interface{}) { + _, err := fmt.Fprintf(x.w, s, params...) + if err != nil { + panic(err) + } +} + +func (x *genRunner) line(s string) { + x.out(s) + if len(s) == 0 || s[len(s)-1] != '\n' { + x.out("\n") + } +} + +func (x *genRunner) linef(s string, params ...interface{}) { + x.outf(s, params...) + if len(s) == 0 || s[len(s)-1] != '\n' { + x.out("\n") + } +} + +func (x *genRunner) genTypeName(t reflect.Type) (n string) { + // defer func() { fmt.Printf(">>>> ####: genTypeName: t: %v, name: '%s'\n", t, n) }() + + // if the type has a PkgPath, which doesn't match the current package, + // then include it. + // We cannot depend on t.String() because it includes current package, + // or t.PkgPath because it includes full import path, + // + var ptrPfx string + for t.Kind() == reflect.Ptr { + ptrPfx += "*" + t = t.Elem() + } + if tn := t.Name(); tn != "" { + return ptrPfx + x.genTypeNamePrim(t) + } + switch t.Kind() { + case reflect.Map: + return ptrPfx + "map[" + x.genTypeName(t.Key()) + "]" + x.genTypeName(t.Elem()) + case reflect.Slice: + return ptrPfx + "[]" + x.genTypeName(t.Elem()) + case reflect.Array: + return ptrPfx + "[" + strconv.FormatInt(int64(t.Len()), 10) + "]" + x.genTypeName(t.Elem()) + case reflect.Chan: + return ptrPfx + t.ChanDir().String() + " " + x.genTypeName(t.Elem()) + default: + if t == intfTyp { + return ptrPfx + "interface{}" + } else { + return ptrPfx + x.genTypeNamePrim(t) + } + } +} + +func (x *genRunner) genTypeNamePrim(t reflect.Type) (n string) { + if t.Name() == "" { + return t.String() + } else if genImportPath(t) == "" || genImportPath(t) == genImportPath(x.tc) { + return t.Name() + } else { + return x.imn[genImportPath(t)] + "." + t.Name() + // return t.String() // best way to get the package name inclusive + } +} + +func (x *genRunner) genZeroValueR(t reflect.Type) string { + // if t is a named type, w + switch t.Kind() { + case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func, + reflect.Slice, reflect.Map, reflect.Invalid: + return "nil" + case reflect.Bool: + return "false" + case reflect.String: + return `""` + case reflect.Struct, reflect.Array: + return x.genTypeName(t) + "{}" + default: // all numbers + return "0" + } +} + +func (x *genRunner) genMethodNameT(t reflect.Type) (s string) { + return genMethodNameT(t, x.tc) +} + +func (x *genRunner) selfer(encode bool) { + t := x.tc + t0 := t + // always make decode use a pointer receiver, + // and structs/arrays always use a ptr receiver (encode|decode) + isptr := !encode || t.Kind() == reflect.Array || (t.Kind() == reflect.Struct && t != timeTyp) + x.varsfxreset() + + fnSigPfx := "func (" + genTopLevelVarName + " " + if isptr { + fnSigPfx += "*" + } + fnSigPfx += x.genTypeName(t) + x.out(fnSigPfx) + + if isptr { + t = reflect.PtrTo(t) + } + if encode { + x.line(") CodecEncodeSelf(e *" + x.cpfx + "Encoder) {") + x.genRequiredMethodVars(true) + x.encVar(genTopLevelVarName, t) + } else { + x.line(") CodecDecodeSelf(d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + // do not use decVar, as there is no need to check TryDecodeAsNil + // or way to elegantly handle that, and also setting it to a + // non-nil value doesn't affect the pointer passed. + // x.decVar(genTopLevelVarName, t, false) + x.dec(genTopLevelVarName, t0, true) + } + x.line("}") + x.line("") + + if encode || t0.Kind() != reflect.Struct { + return + } + + // write is containerMap + if genUseOneFunctionForDecStructMap { + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromMap(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleConsolidated) + x.line("}") + x.line("") + } else { + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromMapLenPrefix(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleLenPrefix) + x.line("}") + x.line("") + + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromMapCheckBreak(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructMap(genTopLevelVarName, "l", rt2id(t0), t0, genStructMapStyleCheckBreak) + x.line("}") + x.line("") + } + + // write containerArray + x.out(fnSigPfx) + x.line(") codecDecodeSelfFromArray(l int, d *" + x.cpfx + "Decoder) {") + x.genRequiredMethodVars(false) + x.decStructArray(genTopLevelVarName, "l", "return", rt2id(t0), t0) + x.line("}") + x.line("") + +} + +// used for chan, array, slice, map +func (x *genRunner) xtraSM(varname string, t reflect.Type, encode, isptr bool) { + var ptrPfx, addrPfx string + if isptr { + ptrPfx = "*" + } else { + addrPfx = "&" + } + if encode { + x.linef("h.enc%s((%s%s)(%s), e)", x.genMethodNameT(t), ptrPfx, x.genTypeName(t), varname) + } else { + x.linef("h.dec%s((*%s)(%s%s), d)", x.genMethodNameT(t), x.genTypeName(t), addrPfx, varname) + } + x.registerXtraT(t) +} + +func (x *genRunner) registerXtraT(t reflect.Type) { + // recursively register the types + if _, ok := x.tm[t]; ok { + return + } + var tkey reflect.Type + switch t.Kind() { + case reflect.Chan, reflect.Slice, reflect.Array: + case reflect.Map: + tkey = t.Key() + default: + return + } + x.tm[t] = struct{}{} + x.ts = append(x.ts, t) + // check if this refers to any xtra types eg. a slice of array: add the array + x.registerXtraT(t.Elem()) + if tkey != nil { + x.registerXtraT(tkey) + } +} + +// encVar will encode a variable. +// The parameter, t, is the reflect.Type of the variable itself +func (x *genRunner) encVar(varname string, t reflect.Type) { + // fmt.Printf(">>>>>> varname: %s, t: %v\n", varname, t) + var checkNil bool + switch t.Kind() { + case reflect.Ptr, reflect.Interface, reflect.Slice, reflect.Map, reflect.Chan: + checkNil = true + } + if checkNil { + x.linef("if %s == nil { r.EncodeNil() } else { ", varname) + } + + switch t.Kind() { + case reflect.Ptr: + telem := t.Elem() + tek := telem.Kind() + if tek == reflect.Array || (tek == reflect.Struct && telem != timeTyp) { + x.enc(varname, genNonPtr(t)) + break + } + i := x.varsfx() + x.line(genTempVarPfx + i + " := *" + varname) + x.enc(genTempVarPfx+i, genNonPtr(t)) + case reflect.Struct, reflect.Array: + if t == timeTyp { + x.enc(varname, t) + break + } + i := x.varsfx() + x.line(genTempVarPfx + i + " := &" + varname) + x.enc(genTempVarPfx+i, t) + default: + x.enc(varname, t) + } + + if checkNil { + x.line("}") + } + +} + +// enc will encode a variable (varname) of type t, where t represents T. +// if t is !time.Time and t is of kind reflect.Struct or reflect.Array, varname is of type *T +// (to prevent copying), +// else t is of type T +func (x *genRunner) enc(varname string, t reflect.Type) { + rtid := rt2id(t) + ti2 := x.ti.get(rtid, t) + // We call CodecEncodeSelf if one of the following are honored: + // - the type already implements Selfer, call that + // - the type has a Selfer implementation just created, use that + // - the type is in the list of the ones we will generate for, but it is not currently being generated + + mi := x.varsfx() + // tptr := reflect.PtrTo(t) + tk := t.Kind() + if x.checkForSelfer(t, varname) { + if tk == reflect.Array || (tk == reflect.Struct && rtid != timeTypId) { // varname is of type *T + // if tptr.Implements(selferTyp) || t.Implements(selferTyp) { + if ti2.isFlag(typeInfoFlagIsZeroerPtr) || ti2.isFlag(typeInfoFlagIsZeroer) { + x.line(varname + ".CodecEncodeSelf(e)") + return + } + } else { // varname is of type T + if ti2.cs { // t.Implements(selferTyp) { + x.line(varname + ".CodecEncodeSelf(e)") + return + } else if ti2.csp { // tptr.Implements(selferTyp) { + x.linef("%ssf%s := &%s", genTempVarPfx, mi, varname) + x.linef("%ssf%s.CodecEncodeSelf(e)", genTempVarPfx, mi) + return + } + } + + if _, ok := x.te[rtid]; ok { + x.line(varname + ".CodecEncodeSelf(e)") + return + } + } + + inlist := false + for _, t0 := range x.t { + if t == t0 { + inlist = true + if x.checkForSelfer(t, varname) { + x.line(varname + ".CodecEncodeSelf(e)") + return + } + break + } + } + + var rtidAdded bool + if t == x.tc { + x.te[rtid] = true + rtidAdded = true + } + + // check if + // - type is time.Time, RawExt, Raw + // - the type implements (Text|JSON|Binary)(Unm|M)arshal + + x.line("if false {") //start if block + defer func() { x.line("}") }() //end if block + + if t == timeTyp { + x.linef("} else { r.EncodeTime(%s)", varname) + return + } + if t == rawTyp { + x.linef("} else { z.EncRaw(%s)", varname) + return + } + if t == rawExtTyp { + x.linef("} else { r.EncodeRawExt(%s, e)", varname) + return + } + // only check for extensions if the type is named, and has a packagePath. + var arrayOrStruct = tk == reflect.Array || tk == reflect.Struct // meaning varname if of type *T + if !x.nx && genImportPath(t) != "" && t.Name() != "" { + yy := fmt.Sprintf("%sxt%s", genTempVarPfx, mi) + x.linef("} else if %s := z.Extension(z.I2Rtid(%s)); %s != nil { z.EncExtension(%s, %s) ", yy, varname, yy, varname, yy) + } + if arrayOrStruct { // varname is of type *T + if ti2.bm || ti2.bmp { // t.Implements(binaryMarshalerTyp) || tptr.Implements(binaryMarshalerTyp) { + x.linef("} else if z.EncBinary() { z.EncBinaryMarshal(%v) ", varname) + } + if ti2.jm || ti2.jmp { // t.Implements(jsonMarshalerTyp) || tptr.Implements(jsonMarshalerTyp) { + x.linef("} else if !z.EncBinary() && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", varname) + } else if ti2.tm || ti2.tmp { // t.Implements(textMarshalerTyp) || tptr.Implements(textMarshalerTyp) { + x.linef("} else if !z.EncBinary() { z.EncTextMarshal(%v) ", varname) + } + } else { // varname is of type T + if ti2.bm { // t.Implements(binaryMarshalerTyp) { + x.linef("} else if z.EncBinary() { z.EncBinaryMarshal(%v) ", varname) + } else if ti2.bmp { // tptr.Implements(binaryMarshalerTyp) { + x.linef("} else if z.EncBinary() { z.EncBinaryMarshal(&%v) ", varname) + } + if ti2.jm { // t.Implements(jsonMarshalerTyp) { + x.linef("} else if !z.EncBinary() && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", varname) + } else if ti2.jmp { // tptr.Implements(jsonMarshalerTyp) { + x.linef("} else if !z.EncBinary() && z.IsJSONHandle() { z.EncJSONMarshal(&%v) ", varname) + } else if ti2.tm { // t.Implements(textMarshalerTyp) { + x.linef("} else if !z.EncBinary() { z.EncTextMarshal(%v) ", varname) + } else if ti2.tmp { // tptr.Implements(textMarshalerTyp) { + x.linef("} else if !z.EncBinary() { z.EncTextMarshal(&%v) ", varname) + } + } + x.line("} else {") + + switch t.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x.line("r.EncodeInt(int64(" + varname + "))") + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + x.line("r.EncodeUint(uint64(" + varname + "))") + case reflect.Float32: + x.line("r.EncodeFloat32(float32(" + varname + "))") + case reflect.Float64: + x.line("r.EncodeFloat64(float64(" + varname + "))") + case reflect.Bool: + x.line("r.EncodeBool(bool(" + varname + "))") + case reflect.String: + x.line("r.EncodeString(codecSelferCcUTF8" + x.xs + ", string(" + varname + "))") + case reflect.Chan: + x.xtraSM(varname, t, true, false) + // x.encListFallback(varname, rtid, t) + case reflect.Array: + x.xtraSM(varname, t, true, true) + case reflect.Slice: + // if nil, call dedicated function + // if a []uint8, call dedicated function + // if a known fastpath slice, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + if rtid == uint8SliceTypId { + x.line("r.EncodeStringBytes(codecSelferCcRAW" + x.xs + ", []byte(" + varname + "))") + } else if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", e)") + } else { + x.xtraSM(varname, t, true, false) + // x.encListFallback(varname, rtid, t) + } + case reflect.Map: + // if nil, call dedicated function + // if a known fastpath map, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + // x.line("if " + varname + " == nil { \nr.EncodeNil()\n } else { ") + if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", e)") + } else { + x.xtraSM(varname, t, true, false) + // x.encMapFallback(varname, rtid, t) + } + case reflect.Struct: + if !inlist { + delete(x.te, rtid) + x.line("z.EncFallback(" + varname + ")") + break + } + x.encStruct(varname, rtid, t) + default: + if rtidAdded { + delete(x.te, rtid) + } + x.line("z.EncFallback(" + varname + ")") + } +} + +func (x *genRunner) encZero(t reflect.Type) { + switch t.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x.line("r.EncodeInt(0)") + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + x.line("r.EncodeUint(0)") + case reflect.Float32: + x.line("r.EncodeFloat32(0)") + case reflect.Float64: + x.line("r.EncodeFloat64(0)") + case reflect.Bool: + x.line("r.EncodeBool(false)") + case reflect.String: + x.line("r.EncodeString(codecSelferCcUTF8" + x.xs + `, "")`) + default: + x.line("r.EncodeNil()") + } +} + +func (x *genRunner) encOmitEmptyLine(t2 reflect.StructField, varname string, buf *genBuf) { + // smartly check omitEmpty on a struct type, as it may contain uncomparable map/slice/etc. + // also, for maps/slices/arrays, check if len ! 0 (not if == zero value) + varname2 := varname + "." + t2.Name + switch t2.Type.Kind() { + case reflect.Struct: + rtid2 := rt2id(t2.Type) + ti2 := x.ti.get(rtid2, t2.Type) + // fmt.Printf(">>>> structfield: omitempty: type: %s, field: %s\n", t2.Type.Name(), t2.Name) + if ti2.rtid == timeTypId { + buf.s("!(").s(varname2).s(".IsZero())") + break + } + if ti2.isFlag(typeInfoFlagIsZeroerPtr) || ti2.isFlag(typeInfoFlagIsZeroer) { + buf.s("!(").s(varname2).s(".IsZero())") + break + } + if ti2.isFlag(typeInfoFlagComparable) { + buf.s(varname2).s(" != ").s(x.genZeroValueR(t2.Type)) + break + } + // buf.s("(") + buf.s("false") + for i, n := 0, t2.Type.NumField(); i < n; i++ { + f := t2.Type.Field(i) + if f.PkgPath != "" { // unexported + continue + } + buf.s(" || ") + x.encOmitEmptyLine(f, varname2, buf) + } + //buf.s(")") + case reflect.Bool: + buf.s(varname2) + case reflect.Map, reflect.Slice, reflect.Array, reflect.Chan: + buf.s("len(").s(varname2).s(") != 0") + default: + buf.s(varname2).s(" != ").s(x.genZeroValueR(t2.Type)) + } +} + +func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) { + // Use knowledge from structfieldinfo (mbs, encodable fields. Ignore omitempty. ) + // replicate code in kStruct i.e. for each field, deref type to non-pointer, and call x.enc on it + + // if t === type currently running selfer on, do for all + ti := x.ti.get(rtid, t) + i := x.varsfx() + sepVarname := genTempVarPfx + "sep" + i + numfieldsvar := genTempVarPfx + "q" + i + ti2arrayvar := genTempVarPfx + "r" + i + struct2arrvar := genTempVarPfx + "2arr" + i + + x.line(sepVarname + " := !z.EncBinary()") + x.linef("%s := z.EncBasicHandle().StructToArray", struct2arrvar) + x.linef("_, _ = %s, %s", sepVarname, struct2arrvar) + x.linef("const %s bool = %v // struct tag has 'toArray'", ti2arrayvar, ti.toArray) + + tisfi := ti.sfiSrc // always use sequence from file. decStruct expects same thing. + + // var nn int + // due to omitEmpty, we need to calculate the + // number of non-empty things we write out first. + // This is required as we need to pre-determine the size of the container, + // to support length-prefixing. + if ti.anyOmitEmpty { + x.linef("var %s = [%v]bool{ // should field at this index be written?", numfieldsvar, len(tisfi)) + + for j, si := range tisfi { + _ = j + if !si.omitEmpty() { + // x.linef("%s[%v] = true // %s", numfieldsvar, j, si.fieldName) + x.linef("true, // %s", si.fieldName) + // nn++ + continue + } + var t2 reflect.StructField + var omitline genBuf + { + t2typ := t + varname3 := varname + // go through the loop, record the t2 field explicitly, + // and gather the omit line if embedded in pointers. + for ij, ix := range si.is { + if uint8(ij) == si.nis { + break + } + for t2typ.Kind() == reflect.Ptr { + t2typ = t2typ.Elem() + } + t2 = t2typ.Field(int(ix)) + t2typ = t2.Type + varname3 = varname3 + "." + t2.Name + // do not include actual field in the omit line. + // that is done subsequently (right after - below). + if uint8(ij+1) < si.nis && t2typ.Kind() == reflect.Ptr { + omitline.s(varname3).s(" != nil && ") + } + } + } + x.encOmitEmptyLine(t2, varname, &omitline) + x.linef("%s, // %s", omitline.v(), si.fieldName) + } + x.line("}") + x.linef("_ = %s", numfieldsvar) + } + // x.linef("var %snn%s int", genTempVarPfx, i) + x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray { + x.linef("r.WriteArrayStart(%d)", len(tisfi)) + x.linef("} else {") // if not ti.toArray + if ti.anyOmitEmpty { + // nn = 0 + // x.linef("var %snn%s = %v", genTempVarPfx, i, nn) + x.linef("var %snn%s int", genTempVarPfx, i) + x.linef("for _, b := range %s { if b { %snn%s++ } }", numfieldsvar, genTempVarPfx, i) + x.linef("r.WriteMapStart(%snn%s)", genTempVarPfx, i) + x.linef("%snn%s = %v", genTempVarPfx, i, 0) + } else { + x.linef("r.WriteMapStart(%d)", len(tisfi)) + } + x.line("}") // close if not StructToArray + + for j, si := range tisfi { + i := x.varsfx() + isNilVarName := genTempVarPfx + "n" + i + var labelUsed bool + var t2 reflect.StructField + { + t2typ := t + varname3 := varname + for ij, ix := range si.is { + if uint8(ij) == si.nis { + break + } + for t2typ.Kind() == reflect.Ptr { + t2typ = t2typ.Elem() + } + t2 = t2typ.Field(int(ix)) + t2typ = t2.Type + varname3 = varname3 + "." + t2.Name + if t2typ.Kind() == reflect.Ptr { + if !labelUsed { + x.line("var " + isNilVarName + " bool") + } + x.line("if " + varname3 + " == nil { " + isNilVarName + " = true ") + x.line("goto LABEL" + i) + x.line("}") + labelUsed = true + // "varname3 = new(" + x.genTypeName(t3.Elem()) + ") }") + } + } + // t2 = t.FieldByIndex(si.is) + } + if labelUsed { + x.line("LABEL" + i + ":") + } + // if the type of the field is a Selfer, or one of the ones + + x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray + if labelUsed { + x.linef("if %s { r.WriteArrayElem(); r.EncodeNil() } else { ", isNilVarName) + } + x.line("r.WriteArrayElem()") + if si.omitEmpty() { + x.linef("if %s[%v] {", numfieldsvar, j) + } + x.encVar(varname+"."+t2.Name, t2.Type) + if si.omitEmpty() { + x.linef("} else {") + x.encZero(t2.Type) + x.linef("}") + } + if labelUsed { + x.line("}") + } + + x.linef("} else {") // if not ti.toArray + + if si.omitEmpty() { + x.linef("if %s[%v] {", numfieldsvar, j) + } + x.line("r.WriteMapElemKey()") + + // x.line("r.EncodeString(codecSelferCcUTF8" + x.xs + ", `" + si.encName + "`)") + // emulate EncStructFieldKey + switch ti.keyType { + case valueTypeInt: + x.linef("r.EncodeInt(z.M.Int(strconv.ParseInt(`%s`, 10, 64)))", si.encName) + case valueTypeUint: + x.linef("r.EncodeUint(z.M.Uint(strconv.ParseUint(`%s`, 10, 64)))", si.encName) + case valueTypeFloat: + x.linef("r.EncodeFloat64(z.M.Float(strconv.ParseFloat(`%s`, 64)))", si.encName) + default: // string + x.linef("r.EncodeString(codecSelferCcUTF8%s, `%s`)", x.xs, si.encName) + } + // x.linef("r.EncStructFieldKey(codecSelferValueType%s%s, `%s`)", ti.keyType.String(), x.xs, si.encName) + x.line("r.WriteMapElemValue()") + if labelUsed { + x.line("if " + isNilVarName + " { r.EncodeNil() } else { ") + x.encVar(varname+"."+t2.Name, t2.Type) + x.line("}") + } else { + x.encVar(varname+"."+t2.Name, t2.Type) + } + if si.omitEmpty() { + x.line("}") + } + x.linef("} ") // end if/else ti.toArray + } + x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray { + x.line("r.WriteArrayEnd()") + x.line("} else {") + x.line("r.WriteMapEnd()") + x.line("}") + +} + +func (x *genRunner) encListFallback(varname string, t reflect.Type) { + elemBytes := t.Elem().Kind() == reflect.Uint8 + if t.AssignableTo(uint8SliceTyp) { + x.linef("r.EncodeStringBytes(codecSelferCcRAW%s, []byte(%s))", x.xs, varname) + return + } + if t.Kind() == reflect.Array && elemBytes { + x.linef("r.EncodeStringBytes(codecSelferCcRAW%s, ((*[%d]byte)(%s))[:])", x.xs, t.Len(), varname) + return + } + i := x.varsfx() + if t.Kind() == reflect.Chan { + type ts struct { + Label, Chan, Slice, Sfx string + } + tm, err := template.New("").Parse(genEncChanTmpl) + if err != nil { + panic(err) + } + x.linef("if %s == nil { r.EncodeNil() } else { ", varname) + x.linef("var sch%s []%s", i, x.genTypeName(t.Elem())) + err = tm.Execute(x.w, &ts{"Lsch" + i, varname, "sch" + i, i}) + if err != nil { + panic(err) + } + // x.linef("%s = sch%s", varname, i) + if elemBytes { + x.linef("r.EncodeStringBytes(codecSelferCcRAW%s, []byte(%s))", x.xs, "sch"+i) + x.line("}") + return + } + varname = "sch" + i + } + + x.line("r.WriteArrayStart(len(" + varname + "))") + x.linef("for _, %sv%s := range %s {", genTempVarPfx, i, varname) + x.line("r.WriteArrayElem()") + + x.encVar(genTempVarPfx+"v"+i, t.Elem()) + x.line("}") + x.line("r.WriteArrayEnd()") + if t.Kind() == reflect.Chan { + x.line("}") + } +} + +func (x *genRunner) encMapFallback(varname string, t reflect.Type) { + // TODO: expand this to handle canonical. + i := x.varsfx() + x.line("r.WriteMapStart(len(" + varname + "))") + x.linef("for %sk%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname) + x.line("r.WriteMapElemKey()") + x.encVar(genTempVarPfx+"k"+i, t.Key()) + x.line("r.WriteMapElemValue()") + x.encVar(genTempVarPfx+"v"+i, t.Elem()) + x.line("}") + x.line("r.WriteMapEnd()") +} + +func (x *genRunner) decVarInitPtr(varname, nilvar string, t reflect.Type, si *structFieldInfo, + newbuf, nilbuf *genBuf) (t2 reflect.StructField) { + //we must accommodate anonymous fields, where the embedded field is a nil pointer in the value. + // t2 = t.FieldByIndex(si.is) + t2typ := t + varname3 := varname + t2kind := t2typ.Kind() + var nilbufed bool + if si != nil { + for ij, ix := range si.is { + if uint8(ij) == si.nis { + break + } + for t2typ.Kind() == reflect.Ptr { + t2typ = t2typ.Elem() + } + t2 = t2typ.Field(int(ix)) + t2typ = t2.Type + varname3 = varname3 + "." + t2.Name + t2kind = t2typ.Kind() + if t2kind != reflect.Ptr { + continue + } + if newbuf != nil { + newbuf.f("if %s == nil { %s = new(%s) }\n", varname3, varname3, x.genTypeName(t2typ.Elem())) + } + if nilbuf != nil { + if !nilbufed { + nilbuf.s("if true") + nilbufed = true + } + nilbuf.s(" && ").s(varname3).s(" != nil") + } + } + } + // if t2typ.Kind() == reflect.Ptr { + // varname3 = varname3 + t2.Name + // } + if nilbuf != nil { + if nilbufed { + nilbuf.s(" { ") + } + if nilvar != "" { + nilbuf.s(nilvar).s(" = true") + } else if tk := t2typ.Kind(); tk == reflect.Ptr { + if strings.IndexByte(varname3, '.') != -1 || strings.IndexByte(varname3, '[') != -1 { + nilbuf.s(varname3).s(" = nil") + } else { + nilbuf.s("*").s(varname3).s(" = ").s(x.genZeroValueR(t2typ.Elem())) + } + } else { + nilbuf.s(varname3).s(" = ").s(x.genZeroValueR(t2typ)) + } + if nilbufed { + nilbuf.s("}") + } + } + return t2 +} + +// decVar takes a variable called varname, of type t +func (x *genRunner) decVarMain(varname, rand string, t reflect.Type, checkNotNil bool) { + // We only encode as nil if a nillable value. + // This removes some of the wasted checks for TryDecodeAsNil. + // We need to think about this more, to see what happens if omitempty, etc + // cause a nil value to be stored when something is expected. + // This could happen when decoding from a struct encoded as an array. + // For that, decVar should be called with canNil=true, to force true as its value. + var varname2 string + if t.Kind() != reflect.Ptr { + if t.PkgPath() != "" || !x.decTryAssignPrimitive(varname, t, false) { + x.dec(varname, t, false) + } + } else { + if checkNotNil { + x.linef("if %s == nil { %s = new(%s) }", varname, varname, x.genTypeName(t.Elem())) + } + // Ensure we set underlying ptr to a non-nil value (so we can deref to it later). + // There's a chance of a **T in here which is nil. + var ptrPfx string + for t = t.Elem(); t.Kind() == reflect.Ptr; t = t.Elem() { + ptrPfx += "*" + if checkNotNil { + x.linef("if %s%s == nil { %s%s = new(%s)}", + ptrPfx, varname, ptrPfx, varname, x.genTypeName(t)) + } + } + // Should we create temp var if a slice/map indexing? No. dec(...) can now handle it. + + if ptrPfx == "" { + x.dec(varname, t, true) + } else { + varname2 = genTempVarPfx + "z" + rand + x.line(varname2 + " := " + ptrPfx + varname) + x.dec(varname2, t, true) + } + } +} + +// decVar takes a variable called varname, of type t +func (x *genRunner) decVar(varname, nilvar string, t reflect.Type, canBeNil, checkNotNil bool) { + i := x.varsfx() + + // We only encode as nil if a nillable value. + // This removes some of the wasted checks for TryDecodeAsNil. + // We need to think about this more, to see what happens if omitempty, etc + // cause a nil value to be stored when something is expected. + // This could happen when decoding from a struct encoded as an array. + // For that, decVar should be called with canNil=true, to force true as its value. + + if !canBeNil { + canBeNil = genAnythingCanBeNil || !genIsImmutable(t) + } + + if canBeNil { + var buf genBuf + x.decVarInitPtr(varname, nilvar, t, nil, nil, &buf) + x.linef("if r.TryDecodeAsNil() { %s } else {", buf.buf) + } else { + x.line("// cannot be nil") + } + + x.decVarMain(varname, i, t, checkNotNil) + + if canBeNil { + x.line("} ") + } +} + +// dec will decode a variable (varname) of type t or ptrTo(t) if isptr==true. +// t is always a basetype (i.e. not of kind reflect.Ptr). +func (x *genRunner) dec(varname string, t reflect.Type, isptr bool) { + // assumptions: + // - the varname is to a pointer already. No need to take address of it + // - t is always a baseType T (not a *T, etc). + rtid := rt2id(t) + ti2 := x.ti.get(rtid, t) + // tptr := reflect.PtrTo(t) + if x.checkForSelfer(t, varname) { + if ti2.cs || ti2.csp { // t.Implements(selferTyp) || tptr.Implements(selferTyp) { + x.line(varname + ".CodecDecodeSelf(d)") + return + } + if _, ok := x.td[rtid]; ok { + x.line(varname + ".CodecDecodeSelf(d)") + return + } + } + + inlist := false + for _, t0 := range x.t { + if t == t0 { + inlist = true + if x.checkForSelfer(t, varname) { + x.line(varname + ".CodecDecodeSelf(d)") + return + } + break + } + } + + var rtidAdded bool + if t == x.tc { + x.td[rtid] = true + rtidAdded = true + } + + // check if + // - type is time.Time, Raw, RawExt + // - the type implements (Text|JSON|Binary)(Unm|M)arshal + + mi := x.varsfx() + // x.linef("%sm%s := z.DecBinary()", genTempVarPfx, mi) + // x.linef("_ = %sm%s", genTempVarPfx, mi) + x.line("if false {") //start if block + defer func() { x.line("}") }() //end if block + + var ptrPfx, addrPfx string + if isptr { + ptrPfx = "*" + } else { + addrPfx = "&" + } + if t == timeTyp { + x.linef("} else { %s%v = r.DecodeTime()", ptrPfx, varname) + return + } + if t == rawTyp { + x.linef("} else { %s%v = z.DecRaw()", ptrPfx, varname) + return + } + + if t == rawExtTyp { + x.linef("} else { r.DecodeExt(%s%v, 0, nil)", addrPfx, varname) + return + } + + // only check for extensions if the type is named, and has a packagePath. + if !x.nx && genImportPath(t) != "" && t.Name() != "" { + // first check if extensions are configued, before doing the interface conversion + // x.linef("} else if z.HasExtensions() && z.DecExt(%s) {", varname) + yy := fmt.Sprintf("%sxt%s", genTempVarPfx, mi) + x.linef("} else if %s := z.Extension(z.I2Rtid(%s)); %s != nil { z.DecExtension(%s, %s) ", yy, varname, yy, varname, yy) + } + + if ti2.bu || ti2.bup { // t.Implements(binaryUnmarshalerTyp) || tptr.Implements(binaryUnmarshalerTyp) { + x.linef("} else if z.DecBinary() { z.DecBinaryUnmarshal(%s%v) ", addrPfx, varname) + } + if ti2.ju || ti2.jup { // t.Implements(jsonUnmarshalerTyp) || tptr.Implements(jsonUnmarshalerTyp) { + x.linef("} else if !z.DecBinary() && z.IsJSONHandle() { z.DecJSONUnmarshal(%s%v)", addrPfx, varname) + } else if ti2.tu || ti2.tup { // t.Implements(textUnmarshalerTyp) || tptr.Implements(textUnmarshalerTyp) { + x.linef("} else if !z.DecBinary() { z.DecTextUnmarshal(%s%v)", addrPfx, varname) + } + + x.line("} else {") + + if x.decTryAssignPrimitive(varname, t, isptr) { + return + } + + switch t.Kind() { + case reflect.Array, reflect.Chan: + x.xtraSM(varname, t, false, isptr) + case reflect.Slice: + // if a []uint8, call dedicated function + // if a known fastpath slice, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + if rtid == uint8SliceTypId { + x.linef("%s%s = r.DecodeBytes(%s(%s[]byte)(%s), false)", + ptrPfx, varname, ptrPfx, ptrPfx, varname) + } else if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.linef("z.F.%sX(%s%s, d)", g.MethodNamePfx("Dec", false), addrPfx, varname) + } else { + x.xtraSM(varname, t, false, isptr) + // x.decListFallback(varname, rtid, false, t) + } + case reflect.Map: + // if a known fastpath map, call dedicated function + // else write encode function in-line. + // - if elements are primitives or Selfers, call dedicated function on each member. + // - else call Encoder.encode(XXX) on it. + if fastpathAV.index(rtid) != -1 { + g := x.newGenV(t) + x.linef("z.F.%sX(%s%s, d)", g.MethodNamePfx("Dec", false), addrPfx, varname) + } else { + x.xtraSM(varname, t, false, isptr) + // x.decMapFallback(varname, rtid, t) + } + case reflect.Struct: + if inlist { + // no need to create temp variable if isptr, or x.F or x[F] + if isptr || strings.IndexByte(varname, '.') != -1 || strings.IndexByte(varname, '[') != -1 { + x.decStruct(varname, rtid, t) + } else { + varname2 := genTempVarPfx + "j" + mi + x.line(varname2 + " := &" + varname) + x.decStruct(varname2, rtid, t) + } + } else { + // delete(x.td, rtid) + x.line("z.DecFallback(" + addrPfx + varname + ", false)") + } + default: + if rtidAdded { + delete(x.te, rtid) + } + x.line("z.DecFallback(" + addrPfx + varname + ", true)") + } +} + +func (x *genRunner) decTryAssignPrimitive(varname string, t reflect.Type, isptr bool) (done bool) { + // This should only be used for exact primitives (ie un-named types). + // Named types may be implementations of Selfer, Unmarshaler, etc. + // They should be handled by dec(...) + + var ptr string + if isptr { + ptr = "*" + } + switch t.Kind() { + case reflect.Int: + x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), codecSelferBitsize%s))", ptr, varname, x.genTypeName(t), x.xs) + case reflect.Int8: + x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), 8))", ptr, varname, x.genTypeName(t)) + case reflect.Int16: + x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), 16))", ptr, varname, x.genTypeName(t)) + case reflect.Int32: + x.linef("%s%s = (%s)(z.C.IntV(r.DecodeInt64(), 32))", ptr, varname, x.genTypeName(t)) + case reflect.Int64: + x.linef("%s%s = (%s)(r.DecodeInt64())", ptr, varname, x.genTypeName(t)) + + case reflect.Uint: + x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), codecSelferBitsize%s))", ptr, varname, x.genTypeName(t), x.xs) + case reflect.Uint8: + x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), 8))", ptr, varname, x.genTypeName(t)) + case reflect.Uint16: + x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), 16))", ptr, varname, x.genTypeName(t)) + case reflect.Uint32: + x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), 32))", ptr, varname, x.genTypeName(t)) + case reflect.Uint64: + x.linef("%s%s = (%s)(r.DecodeUint64())", ptr, varname, x.genTypeName(t)) + case reflect.Uintptr: + x.linef("%s%s = (%s)(z.C.UintV(r.DecodeUint64(), codecSelferBitsize%s))", ptr, varname, x.genTypeName(t), x.xs) + + case reflect.Float32: + x.linef("%s%s = (%s)(r.DecodeFloat32As64())", ptr, varname, x.genTypeName(t)) + case reflect.Float64: + x.linef("%s%s = (%s)(r.DecodeFloat64())", ptr, varname, x.genTypeName(t)) + + case reflect.Bool: + x.linef("%s%s = (%s)(r.DecodeBool())", ptr, varname, x.genTypeName(t)) + case reflect.String: + x.linef("%s%s = (%s)(r.DecodeString())", ptr, varname, x.genTypeName(t)) + default: + return false + } + return true +} + +func (x *genRunner) decListFallback(varname string, rtid uintptr, t reflect.Type) { + if t.AssignableTo(uint8SliceTyp) { + x.line("*" + varname + " = r.DecodeBytes(*((*[]byte)(" + varname + ")), false)") + return + } + if t.Kind() == reflect.Array && t.Elem().Kind() == reflect.Uint8 { + x.linef("r.DecodeBytes( ((*[%d]byte)(%s))[:], true)", t.Len(), varname) + return + } + type tstruc struct { + TempVar string + Rand string + Varname string + CTyp string + Typ string + Immutable bool + Size int + } + telem := t.Elem() + ts := tstruc{genTempVarPfx, x.varsfx(), varname, x.genTypeName(t), x.genTypeName(telem), genIsImmutable(telem), int(telem.Size())} + + funcs := make(template.FuncMap) + + funcs["decLineVar"] = func(varname string) string { + x.decVar(varname, "", telem, false, true) + return "" + } + funcs["var"] = func(s string) string { + return ts.TempVar + s + ts.Rand + } + funcs["zero"] = func() string { + return x.genZeroValueR(telem) + } + funcs["isArray"] = func() bool { + return t.Kind() == reflect.Array + } + funcs["isSlice"] = func() bool { + return t.Kind() == reflect.Slice + } + funcs["isChan"] = func() bool { + return t.Kind() == reflect.Chan + } + tm, err := template.New("").Funcs(funcs).Parse(genDecListTmpl) + if err != nil { + panic(err) + } + if err = tm.Execute(x.w, &ts); err != nil { + panic(err) + } +} + +func (x *genRunner) decMapFallback(varname string, rtid uintptr, t reflect.Type) { + type tstruc struct { + TempVar string + Sfx string + Rand string + Varname string + KTyp string + Typ string + Size int + } + telem := t.Elem() + tkey := t.Key() + ts := tstruc{ + genTempVarPfx, x.xs, x.varsfx(), varname, x.genTypeName(tkey), + x.genTypeName(telem), int(telem.Size() + tkey.Size()), + } + + funcs := make(template.FuncMap) + funcs["decElemZero"] = func() string { + return x.genZeroValueR(telem) + } + funcs["decElemKindImmutable"] = func() bool { + return genIsImmutable(telem) + } + funcs["decElemKindPtr"] = func() bool { + return telem.Kind() == reflect.Ptr + } + funcs["decElemKindIntf"] = func() bool { + return telem.Kind() == reflect.Interface + } + funcs["decLineVarK"] = func(varname string) string { + x.decVar(varname, "", tkey, false, true) + return "" + } + funcs["decLineVar"] = func(varname, decodedNilVarname string) string { + x.decVar(varname, decodedNilVarname, telem, false, true) + return "" + } + funcs["var"] = func(s string) string { + return ts.TempVar + s + ts.Rand + } + + tm, err := template.New("").Funcs(funcs).Parse(genDecMapTmpl) + if err != nil { + panic(err) + } + if err = tm.Execute(x.w, &ts); err != nil { + panic(err) + } +} + +func (x *genRunner) decStructMapSwitch(kName string, varname string, rtid uintptr, t reflect.Type) { + ti := x.ti.get(rtid, t) + tisfi := ti.sfiSrc // always use sequence from file. decStruct expects same thing. + x.line("switch (" + kName + ") {") + var newbuf, nilbuf genBuf + for _, si := range tisfi { + x.line("case \"" + si.encName + "\":") + newbuf.reset() + nilbuf.reset() + t2 := x.decVarInitPtr(varname, "", t, si, &newbuf, &nilbuf) + x.linef("if r.TryDecodeAsNil() { %s } else { %s", nilbuf.buf, newbuf.buf) + x.decVarMain(varname+"."+t2.Name, x.varsfx(), t2.Type, false) + x.line("}") + } + x.line("default:") + // pass the slice here, so that the string will not escape, and maybe save allocation + x.line("z.DecStructFieldNotFound(-1, " + kName + ")") + x.line("} // end switch " + kName) +} + +func (x *genRunner) decStructMap(varname, lenvarname string, rtid uintptr, t reflect.Type, style genStructMapStyle) { + tpfx := genTempVarPfx + ti := x.ti.get(rtid, t) + i := x.varsfx() + kName := tpfx + "s" + i + + switch style { + case genStructMapStyleLenPrefix: + x.linef("for %sj%s := 0; %sj%s < %s; %sj%s++ {", tpfx, i, tpfx, i, lenvarname, tpfx, i) + case genStructMapStyleCheckBreak: + x.linef("for %sj%s := 0; !r.CheckBreak(); %sj%s++ {", tpfx, i, tpfx, i) + default: // 0, otherwise. + x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length + x.linef("for %sj%s := 0; ; %sj%s++ {", tpfx, i, tpfx, i) + x.linef("if %shl%s { if %sj%s >= %s { break }", tpfx, i, tpfx, i, lenvarname) + x.line("} else { if r.CheckBreak() { break }; }") + } + x.line("r.ReadMapElemKey()") + + // emulate decstructfieldkey + switch ti.keyType { + case valueTypeInt: + x.linef("%s := z.StringView(strconv.AppendInt(z.DecScratchArrayBuffer()[:0], r.DecodeInt64(), 10))", kName) + case valueTypeUint: + x.linef("%s := z.StringView(strconv.AppendUint(z.DecScratchArrayBuffer()[:0], r.DecodeUint64(), 10))", kName) + case valueTypeFloat: + x.linef("%s := z.StringView(strconv.AppendFloat(z.DecScratchArrayBuffer()[:0], r.DecodeFloat64(), 'f', -1, 64))", kName) + default: // string + x.linef("%s := z.StringView(r.DecodeStringAsBytes())", kName) + } + // x.linef("%s := z.StringView(r.DecStructFieldKey(codecSelferValueType%s%s, z.DecScratchArrayBuffer()))", kName, ti.keyType.String(), x.xs) + + x.line("r.ReadMapElemValue()") + x.decStructMapSwitch(kName, varname, rtid, t) + + x.line("} // end for " + tpfx + "j" + i) + x.line("r.ReadMapEnd()") +} + +func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid uintptr, t reflect.Type) { + tpfx := genTempVarPfx + i := x.varsfx() + ti := x.ti.get(rtid, t) + tisfi := ti.sfiSrc // always use sequence from file. decStruct expects same thing. + x.linef("var %sj%s int", tpfx, i) + x.linef("var %sb%s bool", tpfx, i) // break + x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length + var newbuf, nilbuf genBuf + for _, si := range tisfi { + x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }", + tpfx, i, tpfx, i, tpfx, i, + tpfx, i, lenvarname, tpfx, i) + x.linef("if %sb%s { r.ReadArrayEnd(); %s }", tpfx, i, breakString) + x.line("r.ReadArrayElem()") + newbuf.reset() + nilbuf.reset() + t2 := x.decVarInitPtr(varname, "", t, si, &newbuf, &nilbuf) + x.linef("if r.TryDecodeAsNil() { %s } else { %s", nilbuf.buf, newbuf.buf) + x.decVarMain(varname+"."+t2.Name, x.varsfx(), t2.Type, false) + x.line("}") + } + // read remaining values and throw away. + x.line("for {") + x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }", + tpfx, i, tpfx, i, tpfx, i, + tpfx, i, lenvarname, tpfx, i) + x.linef("if %sb%s { break }", tpfx, i) + x.line("r.ReadArrayElem()") + x.linef(`z.DecStructFieldNotFound(%sj%s - 1, "")`, tpfx, i) + x.line("}") + x.line("r.ReadArrayEnd()") +} + +func (x *genRunner) decStruct(varname string, rtid uintptr, t reflect.Type) { + // varname MUST be a ptr, or a struct field or a slice element. + i := x.varsfx() + x.linef("%sct%s := r.ContainerType()", genTempVarPfx, i) + x.linef("if %sct%s == codecSelferValueTypeMap%s {", genTempVarPfx, i, x.xs) + x.line(genTempVarPfx + "l" + i + " := r.ReadMapStart()") + x.linef("if %sl%s == 0 {", genTempVarPfx, i) + x.line("r.ReadMapEnd()") + if genUseOneFunctionForDecStructMap { + x.line("} else { ") + x.linef("%s.codecDecodeSelfFromMap(%sl%s, d)", varname, genTempVarPfx, i) + } else { + x.line("} else if " + genTempVarPfx + "l" + i + " > 0 { ") + x.line(varname + ".codecDecodeSelfFromMapLenPrefix(" + genTempVarPfx + "l" + i + ", d)") + x.line("} else {") + x.line(varname + ".codecDecodeSelfFromMapCheckBreak(" + genTempVarPfx + "l" + i + ", d)") + } + x.line("}") + + // else if container is array + x.linef("} else if %sct%s == codecSelferValueTypeArray%s {", genTempVarPfx, i, x.xs) + x.line(genTempVarPfx + "l" + i + " := r.ReadArrayStart()") + x.linef("if %sl%s == 0 {", genTempVarPfx, i) + x.line("r.ReadArrayEnd()") + x.line("} else { ") + x.linef("%s.codecDecodeSelfFromArray(%sl%s, d)", varname, genTempVarPfx, i) + x.line("}") + // else panic + x.line("} else { ") + x.line("panic(errCodecSelferOnlyMapOrArrayEncodeToStruct" + x.xs + ")") + x.line("} ") +} + +// -------- + +type genV struct { + // genV is either a primitive (Primitive != "") or a map (MapKey != "") or a slice + MapKey string + Elem string + Primitive string + Size int +} + +func (x *genRunner) newGenV(t reflect.Type) (v genV) { + switch t.Kind() { + case reflect.Slice, reflect.Array: + te := t.Elem() + v.Elem = x.genTypeName(te) + v.Size = int(te.Size()) + case reflect.Map: + te, tk := t.Elem(), t.Key() + v.Elem = x.genTypeName(te) + v.MapKey = x.genTypeName(tk) + v.Size = int(te.Size() + tk.Size()) + default: + panic("unexpected type for newGenV. Requires map or slice type") + } + return +} + +func (x *genV) MethodNamePfx(prefix string, prim bool) string { + var name []byte + if prefix != "" { + name = append(name, prefix...) + } + if prim { + name = append(name, genTitleCaseName(x.Primitive)...) + } else { + if x.MapKey == "" { + name = append(name, "Slice"...) + } else { + name = append(name, "Map"...) + name = append(name, genTitleCaseName(x.MapKey)...) + } + name = append(name, genTitleCaseName(x.Elem)...) + } + return string(name) + +} + +// genImportPath returns import path of a non-predeclared named typed, or an empty string otherwise. +// +// This handles the misbehaviour that occurs when 1.5-style vendoring is enabled, +// where PkgPath returns the full path, including the vendoring pre-fix that should have been stripped. +// We strip it here. +func genImportPath(t reflect.Type) (s string) { + s = t.PkgPath() + if genCheckVendor { + // HACK: always handle vendoring. It should be typically on in go 1.6, 1.7 + s = genStripVendor(s) + } + return +} + +// A go identifier is (letter|_)[letter|number|_]* +func genGoIdentifier(s string, checkFirstChar bool) string { + b := make([]byte, 0, len(s)) + t := make([]byte, 4) + var n int + for i, r := range s { + if checkFirstChar && i == 0 && !unicode.IsLetter(r) { + b = append(b, '_') + } + // r must be unicode_letter, unicode_digit or _ + if unicode.IsLetter(r) || unicode.IsDigit(r) { + n = utf8.EncodeRune(t, r) + b = append(b, t[:n]...) + } else { + b = append(b, '_') + } + } + return string(b) +} + +func genNonPtr(t reflect.Type) reflect.Type { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + return t +} + +func genTitleCaseName(s string) string { + switch s { + case "interface{}", "interface {}": + return "Intf" + default: + return strings.ToUpper(s[0:1]) + s[1:] + } +} + +func genMethodNameT(t reflect.Type, tRef reflect.Type) (n string) { + var ptrPfx string + for t.Kind() == reflect.Ptr { + ptrPfx += "Ptrto" + t = t.Elem() + } + tstr := t.String() + if tn := t.Name(); tn != "" { + if tRef != nil && genImportPath(t) == genImportPath(tRef) { + return ptrPfx + tn + } else { + if genQNameRegex.MatchString(tstr) { + return ptrPfx + strings.Replace(tstr, ".", "_", 1000) + } else { + return ptrPfx + genCustomTypeName(tstr) + } + } + } + switch t.Kind() { + case reflect.Map: + return ptrPfx + "Map" + genMethodNameT(t.Key(), tRef) + genMethodNameT(t.Elem(), tRef) + case reflect.Slice: + return ptrPfx + "Slice" + genMethodNameT(t.Elem(), tRef) + case reflect.Array: + return ptrPfx + "Array" + strconv.FormatInt(int64(t.Len()), 10) + genMethodNameT(t.Elem(), tRef) + case reflect.Chan: + var cx string + switch t.ChanDir() { + case reflect.SendDir: + cx = "ChanSend" + case reflect.RecvDir: + cx = "ChanRecv" + default: + cx = "Chan" + } + return ptrPfx + cx + genMethodNameT(t.Elem(), tRef) + default: + if t == intfTyp { + return ptrPfx + "Interface" + } else { + if tRef != nil && genImportPath(t) == genImportPath(tRef) { + if t.Name() != "" { + return ptrPfx + t.Name() + } else { + return ptrPfx + genCustomTypeName(tstr) + } + } else { + // best way to get the package name inclusive + // return ptrPfx + strings.Replace(tstr, ".", "_", 1000) + // return ptrPfx + genBase64enc.EncodeToString([]byte(tstr)) + if t.Name() != "" && genQNameRegex.MatchString(tstr) { + return ptrPfx + strings.Replace(tstr, ".", "_", 1000) + } else { + return ptrPfx + genCustomTypeName(tstr) + } + } + } + } +} + +// genCustomNameForType base64encodes the t.String() value in such a way +// that it can be used within a function name. +func genCustomTypeName(tstr string) string { + len2 := genBase64enc.EncodedLen(len(tstr)) + bufx := make([]byte, len2) + genBase64enc.Encode(bufx, []byte(tstr)) + for i := len2 - 1; i >= 0; i-- { + if bufx[i] == '=' { + len2-- + } else { + break + } + } + return string(bufx[:len2]) +} + +func genIsImmutable(t reflect.Type) (v bool) { + return isImmutableKind(t.Kind()) +} + +type genInternal struct { + Version int + Values []genV +} + +func (x genInternal) FastpathLen() (l int) { + for _, v := range x.Values { + if v.Primitive == "" && !(v.MapKey == "" && v.Elem == "uint8") { + l++ + } + } + return +} + +func genInternalZeroValue(s string) string { + switch s { + case "interface{}", "interface {}": + return "nil" + case "bool": + return "false" + case "string": + return `""` + default: + return "0" + } +} + +var genInternalNonZeroValueIdx [5]uint64 +var genInternalNonZeroValueStrs = [2][5]string{ + {`"string-is-an-interface"`, "true", `"some-string"`, "11.1", "33"}, + {`"string-is-an-interface-2"`, "true", `"some-string-2"`, "22.2", "44"}, +} + +func genInternalNonZeroValue(s string) string { + switch s { + case "interface{}", "interface {}": + genInternalNonZeroValueIdx[0]++ + return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[0]%2][0] // return string, to remove ambiguity + case "bool": + genInternalNonZeroValueIdx[1]++ + return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[1]%2][1] + case "string": + genInternalNonZeroValueIdx[2]++ + return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[2]%2][2] + case "float32", "float64", "float", "double": + genInternalNonZeroValueIdx[3]++ + return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[3]%2][3] + default: + genInternalNonZeroValueIdx[4]++ + return genInternalNonZeroValueStrs[genInternalNonZeroValueIdx[4]%2][4] + } +} + +func genInternalEncCommandAsString(s string, vname string) string { + switch s { + case "uint", "uint8", "uint16", "uint32", "uint64": + return "ee.EncodeUint(uint64(" + vname + "))" + case "int", "int8", "int16", "int32", "int64": + return "ee.EncodeInt(int64(" + vname + "))" + case "string": + return "ee.EncodeString(cUTF8, " + vname + ")" + case "float32": + return "ee.EncodeFloat32(" + vname + ")" + case "float64": + return "ee.EncodeFloat64(" + vname + ")" + case "bool": + return "ee.EncodeBool(" + vname + ")" + // case "symbol": + // return "ee.EncodeSymbol(" + vname + ")" + default: + return "e.encode(" + vname + ")" + } +} + +func genInternalDecCommandAsString(s string) string { + switch s { + case "uint": + return "uint(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))" + case "uint8": + return "uint8(chkOvf.UintV(dd.DecodeUint64(), 8))" + case "uint16": + return "uint16(chkOvf.UintV(dd.DecodeUint64(), 16))" + case "uint32": + return "uint32(chkOvf.UintV(dd.DecodeUint64(), 32))" + case "uint64": + return "dd.DecodeUint64()" + case "uintptr": + return "uintptr(chkOvf.UintV(dd.DecodeUint64(), uintBitsize))" + case "int": + return "int(chkOvf.IntV(dd.DecodeInt64(), intBitsize))" + case "int8": + return "int8(chkOvf.IntV(dd.DecodeInt64(), 8))" + case "int16": + return "int16(chkOvf.IntV(dd.DecodeInt64(), 16))" + case "int32": + return "int32(chkOvf.IntV(dd.DecodeInt64(), 32))" + case "int64": + return "dd.DecodeInt64()" + + case "string": + return "dd.DecodeString()" + case "float32": + return "float32(chkOvf.Float32V(dd.DecodeFloat64()))" + case "float64": + return "dd.DecodeFloat64()" + case "bool": + return "dd.DecodeBool()" + default: + panic(errors.New("gen internal: unknown type for decode: " + s)) + } +} + +func genInternalSortType(s string, elem bool) string { + for _, v := range [...]string{"int", "uint", "float", "bool", "string"} { + if strings.HasPrefix(s, v) { + if elem { + if v == "int" || v == "uint" || v == "float" { + return v + "64" + } else { + return v + } + } + return v + "Slice" + } + } + panic("sorttype: unexpected type: " + s) +} + +func genStripVendor(s string) string { + // HACK: Misbehaviour occurs in go 1.5. May have to re-visit this later. + // if s contains /vendor/ OR startsWith vendor/, then return everything after it. + const vendorStart = "vendor/" + const vendorInline = "/vendor/" + if i := strings.LastIndex(s, vendorInline); i >= 0 { + s = s[i+len(vendorInline):] + } else if strings.HasPrefix(s, vendorStart) { + s = s[len(vendorStart):] + } + return s +} + +// var genInternalMu sync.Mutex +var genInternalV = genInternal{Version: genVersion} +var genInternalTmplFuncs template.FuncMap +var genInternalOnce sync.Once + +func genInternalInit() { + types := [...]string{ + "interface{}", + "string", + "float32", + "float64", + "uint", + "uint8", + "uint16", + "uint32", + "uint64", + "uintptr", + "int", + "int8", + "int16", + "int32", + "int64", + "bool", + } + // keep as slice, so it is in specific iteration order. + // Initial order was uint64, string, interface{}, int, int64 + mapvaltypes := [...]string{ + "interface{}", + "string", + "uint", + "uint8", + "uint16", + "uint32", + "uint64", + "uintptr", + "int", + "int8", + "int16", + "int32", + "int64", + "float32", + "float64", + "bool", + } + wordSizeBytes := int(intBitsize) / 8 + + mapvaltypes2 := map[string]int{ + "interface{}": 2 * wordSizeBytes, + "string": 2 * wordSizeBytes, + "uint": 1 * wordSizeBytes, + "uint8": 1, + "uint16": 2, + "uint32": 4, + "uint64": 8, + "uintptr": 1 * wordSizeBytes, + "int": 1 * wordSizeBytes, + "int8": 1, + "int16": 2, + "int32": 4, + "int64": 8, + "float32": 4, + "float64": 8, + "bool": 1, + } + var gt = genInternal{Version: genVersion} + + // For each slice or map type, there must be a (symmetrical) Encode and Decode fast-path function + for _, s := range types { + gt.Values = append(gt.Values, genV{Primitive: s, Size: mapvaltypes2[s]}) + // if s != "uint8" { // do not generate fast path for slice of bytes. Treat specially already. + // gt.Values = append(gt.Values, genV{Elem: s, Size: mapvaltypes2[s]}) + // } + gt.Values = append(gt.Values, genV{Elem: s, Size: mapvaltypes2[s]}) + if _, ok := mapvaltypes2[s]; !ok { + gt.Values = append(gt.Values, genV{MapKey: s, Elem: s, Size: 2 * mapvaltypes2[s]}) + } + for _, ms := range mapvaltypes { + gt.Values = append(gt.Values, genV{MapKey: s, Elem: ms, Size: mapvaltypes2[s] + mapvaltypes2[ms]}) + } + } + + funcs := make(template.FuncMap) + // funcs["haspfx"] = strings.HasPrefix + funcs["encmd"] = genInternalEncCommandAsString + funcs["decmd"] = genInternalDecCommandAsString + funcs["zerocmd"] = genInternalZeroValue + funcs["nonzerocmd"] = genInternalNonZeroValue + funcs["hasprefix"] = strings.HasPrefix + funcs["sorttype"] = genInternalSortType + + genInternalV = gt + genInternalTmplFuncs = funcs +} + +// genInternalGoFile is used to generate source files from templates. +// It is run by the program author alone. +// Unfortunately, it has to be exported so that it can be called from a command line tool. +// *** DO NOT USE *** +func genInternalGoFile(r io.Reader, w io.Writer) (err error) { + genInternalOnce.Do(genInternalInit) + + gt := genInternalV + + t := template.New("").Funcs(genInternalTmplFuncs) + + tmplstr, err := ioutil.ReadAll(r) + if err != nil { + return + } + + if t, err = t.Parse(string(tmplstr)); err != nil { + return + } + + var out bytes.Buffer + err = t.Execute(&out, gt) + if err != nil { + return + } + + bout, err := format.Source(out.Bytes()) + if err != nil { + w.Write(out.Bytes()) // write out if error, so we can still see. + // w.Write(bout) // write out if error, as much as possible, so we can still see. + return + } + w.Write(bout) + return +} diff --git a/vendor/github.com/ugorji/go/codec/goversion_arrayof_gte_go15.go b/vendor/github.com/ugorji/go/codec/goversion_arrayof_gte_go15.go new file mode 100644 index 0000000000..9ddbe20593 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/goversion_arrayof_gte_go15.go @@ -0,0 +1,14 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.5 + +package codec + +import "reflect" + +const reflectArrayOfSupported = true + +func reflectArrayOf(count int, elem reflect.Type) reflect.Type { + return reflect.ArrayOf(count, elem) +} diff --git a/vendor/github.com/ugorji/go/codec/goversion_arrayof_lt_go15.go b/vendor/github.com/ugorji/go/codec/goversion_arrayof_lt_go15.go new file mode 100644 index 0000000000..c5fcd6697e --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/goversion_arrayof_lt_go15.go @@ -0,0 +1,14 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build !go1.5 + +package codec + +import "reflect" + +const reflectArrayOfSupported = false + +func reflectArrayOf(count int, elem reflect.Type) reflect.Type { + panic("codec: reflect.ArrayOf unsupported in this go version") +} diff --git a/vendor/github.com/ugorji/go/codec/goversion_makemap_gte_go19.go b/vendor/github.com/ugorji/go/codec/goversion_makemap_gte_go19.go new file mode 100644 index 0000000000..bc39d6b719 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/goversion_makemap_gte_go19.go @@ -0,0 +1,15 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.9 + +package codec + +import "reflect" + +func makeMapReflect(t reflect.Type, size int) reflect.Value { + if size < 0 { + return reflect.MakeMapWithSize(t, 4) + } + return reflect.MakeMapWithSize(t, size) +} diff --git a/vendor/github.com/ugorji/go/codec/goversion_makemap_lt_go19.go b/vendor/github.com/ugorji/go/codec/goversion_makemap_lt_go19.go new file mode 100644 index 0000000000..cde4cd3725 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/goversion_makemap_lt_go19.go @@ -0,0 +1,12 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build !go1.9 + +package codec + +import "reflect" + +func makeMapReflect(t reflect.Type, size int) reflect.Value { + return reflect.MakeMap(t) +} diff --git a/vendor/github.com/ugorji/go/codec/goversion_unexportedembeddedptr_gte_go110.go b/vendor/github.com/ugorji/go/codec/goversion_unexportedembeddedptr_gte_go110.go new file mode 100644 index 0000000000..794133a3cb --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/goversion_unexportedembeddedptr_gte_go110.go @@ -0,0 +1,8 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.10 + +package codec + +const allowSetUnexportedEmbeddedPtr = false diff --git a/vendor/github.com/ugorji/go/codec/goversion_unexportedembeddedptr_lt_go110.go b/vendor/github.com/ugorji/go/codec/goversion_unexportedembeddedptr_lt_go110.go new file mode 100644 index 0000000000..fd92ede355 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/goversion_unexportedembeddedptr_lt_go110.go @@ -0,0 +1,8 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build !go1.10 + +package codec + +const allowSetUnexportedEmbeddedPtr = true diff --git a/vendor/github.com/ugorji/go/codec/goversion_unsupported_lt_go14.go b/vendor/github.com/ugorji/go/codec/goversion_unsupported_lt_go14.go new file mode 100644 index 0000000000..8debfa6137 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/goversion_unsupported_lt_go14.go @@ -0,0 +1,17 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build !go1.4 + +package codec + +// This codec package will only work for go1.4 and above. +// This is for the following reasons: +// - go 1.4 was released in 2014 +// - go runtime is written fully in go +// - interface only holds pointers +// - reflect.Value is stabilized as 3 words + +func init() { + panic("codec: go 1.3 and below are not supported") +} diff --git a/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go15.go b/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go15.go new file mode 100644 index 0000000000..0f1bb01e5a --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go15.go @@ -0,0 +1,10 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.5,!go1.6 + +package codec + +import "os" + +var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1" diff --git a/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go16.go b/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go16.go new file mode 100644 index 0000000000..2fb4b057dd --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/goversion_vendor_eq_go16.go @@ -0,0 +1,10 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.6,!go1.7 + +package codec + +import "os" + +var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") != "0" diff --git a/vendor/github.com/ugorji/go/codec/goversion_vendor_gte_go17.go b/vendor/github.com/ugorji/go/codec/goversion_vendor_gte_go17.go new file mode 100644 index 0000000000..c5b8155053 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/goversion_vendor_gte_go17.go @@ -0,0 +1,8 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build go1.7 + +package codec + +const genCheckVendor = true diff --git a/vendor/github.com/ugorji/go/codec/goversion_vendor_lt_go15.go b/vendor/github.com/ugorji/go/codec/goversion_vendor_lt_go15.go new file mode 100644 index 0000000000..837cf240ba --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/goversion_vendor_lt_go15.go @@ -0,0 +1,8 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build !go1.5 + +package codec + +var genCheckVendor = false diff --git a/vendor/github.com/ugorji/go/codec/helper.go b/vendor/github.com/ugorji/go/codec/helper.go new file mode 100644 index 0000000000..bd29895b6d --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/helper.go @@ -0,0 +1,2414 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// Contains code shared by both encode and decode. + +// Some shared ideas around encoding/decoding +// ------------------------------------------ +// +// If an interface{} is passed, we first do a type assertion to see if it is +// a primitive type or a map/slice of primitive types, and use a fastpath to handle it. +// +// If we start with a reflect.Value, we are already in reflect.Value land and +// will try to grab the function for the underlying Type and directly call that function. +// This is more performant than calling reflect.Value.Interface(). +// +// This still helps us bypass many layers of reflection, and give best performance. +// +// Containers +// ------------ +// Containers in the stream are either associative arrays (key-value pairs) or +// regular arrays (indexed by incrementing integers). +// +// Some streams support indefinite-length containers, and use a breaking +// byte-sequence to denote that the container has come to an end. +// +// Some streams also are text-based, and use explicit separators to denote the +// end/beginning of different values. +// +// During encode, we use a high-level condition to determine how to iterate through +// the container. That decision is based on whether the container is text-based (with +// separators) or binary (without separators). If binary, we do not even call the +// encoding of separators. +// +// During decode, we use a different high-level condition to determine how to iterate +// through the containers. That decision is based on whether the stream contained +// a length prefix, or if it used explicit breaks. If length-prefixed, we assume that +// it has to be binary, and we do not even try to read separators. +// +// Philosophy +// ------------ +// On decode, this codec will update containers appropriately: +// - If struct, update fields from stream into fields of struct. +// If field in stream not found in struct, handle appropriately (based on option). +// If a struct field has no corresponding value in the stream, leave it AS IS. +// If nil in stream, set value to nil/zero value. +// - If map, update map from stream. +// If the stream value is NIL, set the map to nil. +// - if slice, try to update up to length of array in stream. +// if container len is less than stream array length, +// and container cannot be expanded, handled (based on option). +// This means you can decode 4-element stream array into 1-element array. +// +// ------------------------------------ +// On encode, user can specify omitEmpty. This means that the value will be omitted +// if the zero value. The problem may occur during decode, where omitted values do not affect +// the value being decoded into. This means that if decoding into a struct with an +// int field with current value=5, and the field is omitted in the stream, then after +// decoding, the value will still be 5 (not 0). +// omitEmpty only works if you guarantee that you always decode into zero-values. +// +// ------------------------------------ +// We could have truncated a map to remove keys not available in the stream, +// or set values in the struct which are not in the stream to their zero values. +// We decided against it because there is no efficient way to do it. +// We may introduce it as an option later. +// However, that will require enabling it for both runtime and code generation modes. +// +// To support truncate, we need to do 2 passes over the container: +// map +// - first collect all keys (e.g. in k1) +// - for each key in stream, mark k1 that the key should not be removed +// - after updating map, do second pass and call delete for all keys in k1 which are not marked +// struct: +// - for each field, track the *typeInfo s1 +// - iterate through all s1, and for each one not marked, set value to zero +// - this involves checking the possible anonymous fields which are nil ptrs. +// too much work. +// +// ------------------------------------------ +// Error Handling is done within the library using panic. +// +// This way, the code doesn't have to keep checking if an error has happened, +// and we don't have to keep sending the error value along with each call +// or storing it in the En|Decoder and checking it constantly along the way. +// +// The disadvantage is that small functions which use panics cannot be inlined. +// The code accounts for that by only using panics behind an interface; +// since interface calls cannot be inlined, this is irrelevant. +// +// We considered storing the error is En|Decoder. +// - once it has its err field set, it cannot be used again. +// - panicing will be optional, controlled by const flag. +// - code should always check error first and return early. +// We eventually decided against it as it makes the code clumsier to always +// check for these error conditions. + +import ( + "bytes" + "encoding" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "time" +) + +const ( + scratchByteArrayLen = 32 + // initCollectionCap = 16 // 32 is defensive. 16 is preferred. + + // Support encoding.(Binary|Text)(Unm|M)arshaler. + // This constant flag will enable or disable it. + supportMarshalInterfaces = true + + // for debugging, set this to false, to catch panic traces. + // Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic. + recoverPanicToErr = true + + // arrayCacheLen is the length of the cache used in encoder or decoder for + // allowing zero-alloc initialization. + arrayCacheLen = 8 + + // size of the cacheline: defaulting to value for archs: amd64, arm64, 386 + // should use "runtime/internal/sys".CacheLineSize, but that is not exposed. + cacheLineSize = 64 + + wordSizeBits = 32 << (^uint(0) >> 63) // strconv.IntSize + wordSize = wordSizeBits / 8 + + maxLevelsEmbedding = 15 // use this, so structFieldInfo fits into 8 bytes +) + +var ( + oneByteArr = [1]byte{0} + zeroByteSlice = oneByteArr[:0:0] +) + +var refBitset bitset32 +var pool pooler +var panicv panicHdl + +func init() { + pool.init() + + refBitset.set(byte(reflect.Map)) + refBitset.set(byte(reflect.Ptr)) + refBitset.set(byte(reflect.Func)) + refBitset.set(byte(reflect.Chan)) +} + +type charEncoding uint8 + +const ( + cRAW charEncoding = iota + cUTF8 + cUTF16LE + cUTF16BE + cUTF32LE + cUTF32BE +) + +// valueType is the stream type +type valueType uint8 + +const ( + valueTypeUnset valueType = iota + valueTypeNil + valueTypeInt + valueTypeUint + valueTypeFloat + valueTypeBool + valueTypeString + valueTypeSymbol + valueTypeBytes + valueTypeMap + valueTypeArray + valueTypeTime + valueTypeExt + + // valueTypeInvalid = 0xff +) + +var valueTypeStrings = [...]string{ + "Unset", + "Nil", + "Int", + "Uint", + "Float", + "Bool", + "String", + "Symbol", + "Bytes", + "Map", + "Array", + "Timestamp", + "Ext", +} + +func (x valueType) String() string { + if int(x) < len(valueTypeStrings) { + return valueTypeStrings[x] + } + return strconv.FormatInt(int64(x), 10) +} + +type seqType uint8 + +const ( + _ seqType = iota + seqTypeArray + seqTypeSlice + seqTypeChan +) + +// note that containerMapStart and containerArraySend are not sent. +// This is because the ReadXXXStart and EncodeXXXStart already does these. +type containerState uint8 + +const ( + _ containerState = iota + + containerMapStart // slot left open, since Driver method already covers it + containerMapKey + containerMapValue + containerMapEnd + containerArrayStart // slot left open, since Driver methods already cover it + containerArrayElem + containerArrayEnd +) + +// // sfiIdx used for tracking where a (field/enc)Name is seen in a []*structFieldInfo +// type sfiIdx struct { +// name string +// index int +// } + +// do not recurse if a containing type refers to an embedded type +// which refers back to its containing type (via a pointer). +// The second time this back-reference happens, break out, +// so as not to cause an infinite loop. +const rgetMaxRecursion = 2 + +// Anecdotally, we believe most types have <= 12 fields. +// - even Java's PMD rules set TooManyFields threshold to 15. +// However, go has embedded fields, which should be regarded as +// top level, allowing structs to possibly double or triple. +// In addition, we don't want to keep creating transient arrays, +// especially for the sfi index tracking, and the evtypes tracking. +// +// So - try to keep typeInfoLoadArray within 2K bytes +const ( + typeInfoLoadArraySfisLen = 16 + typeInfoLoadArraySfiidxLen = 8 * 112 + typeInfoLoadArrayEtypesLen = 12 + typeInfoLoadArrayBLen = 8 * 4 +) + +type typeInfoLoad struct { + // fNames []string + // encNames []string + etypes []uintptr + sfis []structFieldInfo +} + +type typeInfoLoadArray struct { + // fNames [typeInfoLoadArrayLen]string + // encNames [typeInfoLoadArrayLen]string + sfis [typeInfoLoadArraySfisLen]structFieldInfo + sfiidx [typeInfoLoadArraySfiidxLen]byte + etypes [typeInfoLoadArrayEtypesLen]uintptr + b [typeInfoLoadArrayBLen]byte // scratch - used for struct field names +} + +// mirror json.Marshaler and json.Unmarshaler here, +// so we don't import the encoding/json package + +type jsonMarshaler interface { + MarshalJSON() ([]byte, error) +} +type jsonUnmarshaler interface { + UnmarshalJSON([]byte) error +} + +type isZeroer interface { + IsZero() bool +} + +// type byteAccepter func(byte) bool + +var ( + bigen = binary.BigEndian + structInfoFieldName = "_struct" + + mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil)) + mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil)) + intfSliceTyp = reflect.TypeOf([]interface{}(nil)) + intfTyp = intfSliceTyp.Elem() + + reflectValTyp = reflect.TypeOf((*reflect.Value)(nil)).Elem() + + stringTyp = reflect.TypeOf("") + timeTyp = reflect.TypeOf(time.Time{}) + rawExtTyp = reflect.TypeOf(RawExt{}) + rawTyp = reflect.TypeOf(Raw{}) + uintptrTyp = reflect.TypeOf(uintptr(0)) + uint8Typ = reflect.TypeOf(uint8(0)) + uint8SliceTyp = reflect.TypeOf([]uint8(nil)) + uintTyp = reflect.TypeOf(uint(0)) + intTyp = reflect.TypeOf(int(0)) + + mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem() + + binaryMarshalerTyp = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem() + binaryUnmarshalerTyp = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem() + + textMarshalerTyp = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + textUnmarshalerTyp = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + + jsonMarshalerTyp = reflect.TypeOf((*jsonMarshaler)(nil)).Elem() + jsonUnmarshalerTyp = reflect.TypeOf((*jsonUnmarshaler)(nil)).Elem() + + selferTyp = reflect.TypeOf((*Selfer)(nil)).Elem() + iszeroTyp = reflect.TypeOf((*isZeroer)(nil)).Elem() + + uint8TypId = rt2id(uint8Typ) + uint8SliceTypId = rt2id(uint8SliceTyp) + rawExtTypId = rt2id(rawExtTyp) + rawTypId = rt2id(rawTyp) + intfTypId = rt2id(intfTyp) + timeTypId = rt2id(timeTyp) + stringTypId = rt2id(stringTyp) + + mapStrIntfTypId = rt2id(mapStrIntfTyp) + mapIntfIntfTypId = rt2id(mapIntfIntfTyp) + intfSliceTypId = rt2id(intfSliceTyp) + // mapBySliceTypId = rt2id(mapBySliceTyp) + + intBitsize = uint8(intTyp.Bits()) + uintBitsize = uint8(uintTyp.Bits()) + + bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0} + bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} + + chkOvf checkOverflow + + errNoFieldNameToStructFieldInfo = errors.New("no field name passed to parseStructFieldInfo") +) + +var defTypeInfos = NewTypeInfos([]string{"codec", "json"}) + +var immutableKindsSet = [32]bool{ + // reflect.Invalid: , + reflect.Bool: true, + reflect.Int: true, + reflect.Int8: true, + reflect.Int16: true, + reflect.Int32: true, + reflect.Int64: true, + reflect.Uint: true, + reflect.Uint8: true, + reflect.Uint16: true, + reflect.Uint32: true, + reflect.Uint64: true, + reflect.Uintptr: true, + reflect.Float32: true, + reflect.Float64: true, + reflect.Complex64: true, + reflect.Complex128: true, + // reflect.Array + // reflect.Chan + // reflect.Func: true, + // reflect.Interface + // reflect.Map + // reflect.Ptr + // reflect.Slice + reflect.String: true, + // reflect.Struct + // reflect.UnsafePointer +} + +// Selfer defines methods by which a value can encode or decode itself. +// +// Any type which implements Selfer will be able to encode or decode itself. +// Consequently, during (en|de)code, this takes precedence over +// (text|binary)(M|Unm)arshal or extension support. +// +// Note: *the first set of bytes of any value MUST NOT represent nil in the format*. +// This is because, during each decode, we first check the the next set of bytes +// represent nil, and if so, we just set the value to nil. +type Selfer interface { + CodecEncodeSelf(*Encoder) + CodecDecodeSelf(*Decoder) +} + +// MapBySlice is a tag interface that denotes wrapped slice should encode as a map in the stream. +// The slice contains a sequence of key-value pairs. +// This affords storing a map in a specific sequence in the stream. +// +// Example usage: +// type T1 []string // or []int or []Point or any other "slice" type +// func (_ T1) MapBySlice{} // T1 now implements MapBySlice, and will be encoded as a map +// type T2 struct { KeyValues T1 } +// +// var kvs = []string{"one", "1", "two", "2", "three", "3"} +// var v2 = T2{ KeyValues: T1(kvs) } +// // v2 will be encoded like the map: {"KeyValues": {"one": "1", "two": "2", "three": "3"} } +// +// The support of MapBySlice affords the following: +// - A slice type which implements MapBySlice will be encoded as a map +// - A slice can be decoded from a map in the stream +// - It MUST be a slice type (not a pointer receiver) that implements MapBySlice +type MapBySlice interface { + MapBySlice() +} + +// BasicHandle encapsulates the common options and extension functions. +// +// Deprecated: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED. +type BasicHandle struct { + // BasicHandle is always a part of a different type. + // It doesn't have to fit into it own cache lines. + + // TypeInfos is used to get the type info for any type. + // + // If not configured, the default TypeInfos is used, which uses struct tag keys: codec, json + TypeInfos *TypeInfos + + // Note: BasicHandle is not comparable, due to these slices here (extHandle, intf2impls). + // If *[]T is used instead, this becomes comparable, at the cost of extra indirection. + // Thses slices are used all the time, so keep as slices (not pointers). + + extHandle + + intf2impls + + RPCOptions + + // ---- cache line + + DecodeOptions + + // ---- cache line + + EncodeOptions + + // noBuiltInTypeChecker +} + +func (x *BasicHandle) getBasicHandle() *BasicHandle { + return x +} + +func (x *BasicHandle) getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) { + if x.TypeInfos == nil { + return defTypeInfos.get(rtid, rt) + } + return x.TypeInfos.get(rtid, rt) +} + +// Handle is the interface for a specific encoding format. +// +// Typically, a Handle is pre-configured before first time use, +// and not modified while in use. Such a pre-configured Handle +// is safe for concurrent access. +type Handle interface { + Name() string + getBasicHandle() *BasicHandle + recreateEncDriver(encDriver) bool + newEncDriver(w *Encoder) encDriver + newDecDriver(r *Decoder) decDriver + isBinary() bool + hasElemSeparators() bool + // IsBuiltinType(rtid uintptr) bool +} + +// Raw represents raw formatted bytes. +// We "blindly" store it during encode and retrieve the raw bytes during decode. +// Note: it is dangerous during encode, so we may gate the behaviour +// behind an Encode flag which must be explicitly set. +type Raw []byte + +// RawExt represents raw unprocessed extension data. +// Some codecs will decode extension data as a *RawExt +// if there is no registered extension for the tag. +// +// Only one of Data or Value is nil. +// If Data is nil, then the content of the RawExt is in the Value. +type RawExt struct { + Tag uint64 + // Data is the []byte which represents the raw ext. If nil, ext is exposed in Value. + // Data is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of types + Data []byte + // Value represents the extension, if Data is nil. + // Value is used by codecs (e.g. cbor, json) which leverage the format to do + // custom serialization of the types. + Value interface{} +} + +// BytesExt handles custom (de)serialization of types to/from []byte. +// It is used by codecs (e.g. binc, msgpack, simple) which do custom serialization of the types. +type BytesExt interface { + // WriteExt converts a value to a []byte. + // + // Note: v is a pointer iff the registered extension type is a struct or array kind. + WriteExt(v interface{}) []byte + + // ReadExt updates a value from a []byte. + // + // Note: dst is always a pointer kind to the registered extension type. + ReadExt(dst interface{}, src []byte) +} + +// InterfaceExt handles custom (de)serialization of types to/from another interface{} value. +// The Encoder or Decoder will then handle the further (de)serialization of that known type. +// +// It is used by codecs (e.g. cbor, json) which use the format to do custom serialization of types. +type InterfaceExt interface { + // ConvertExt converts a value into a simpler interface for easy encoding + // e.g. convert time.Time to int64. + // + // Note: v is a pointer iff the registered extension type is a struct or array kind. + ConvertExt(v interface{}) interface{} + + // UpdateExt updates a value from a simpler interface for easy decoding + // e.g. convert int64 to time.Time. + // + // Note: dst is always a pointer kind to the registered extension type. + UpdateExt(dst interface{}, src interface{}) +} + +// Ext handles custom (de)serialization of custom types / extensions. +type Ext interface { + BytesExt + InterfaceExt +} + +// addExtWrapper is a wrapper implementation to support former AddExt exported method. +type addExtWrapper struct { + encFn func(reflect.Value) ([]byte, error) + decFn func(reflect.Value, []byte) error +} + +func (x addExtWrapper) WriteExt(v interface{}) []byte { + bs, err := x.encFn(reflect.ValueOf(v)) + if err != nil { + panic(err) + } + return bs +} + +func (x addExtWrapper) ReadExt(v interface{}, bs []byte) { + if err := x.decFn(reflect.ValueOf(v), bs); err != nil { + panic(err) + } +} + +func (x addExtWrapper) ConvertExt(v interface{}) interface{} { + return x.WriteExt(v) +} + +func (x addExtWrapper) UpdateExt(dest interface{}, v interface{}) { + x.ReadExt(dest, v.([]byte)) +} + +type extWrapper struct { + BytesExt + InterfaceExt +} + +type bytesExtFailer struct{} + +func (bytesExtFailer) WriteExt(v interface{}) []byte { + panicv.errorstr("BytesExt.WriteExt is not supported") + return nil +} +func (bytesExtFailer) ReadExt(v interface{}, bs []byte) { + panicv.errorstr("BytesExt.ReadExt is not supported") +} + +type interfaceExtFailer struct{} + +func (interfaceExtFailer) ConvertExt(v interface{}) interface{} { + panicv.errorstr("InterfaceExt.ConvertExt is not supported") + return nil +} +func (interfaceExtFailer) UpdateExt(dest interface{}, v interface{}) { + panicv.errorstr("InterfaceExt.UpdateExt is not supported") +} + +type binaryEncodingType struct{} + +func (binaryEncodingType) isBinary() bool { return true } + +type textEncodingType struct{} + +func (textEncodingType) isBinary() bool { return false } + +// noBuiltInTypes is embedded into many types which do not support builtins +// e.g. msgpack, simple, cbor. + +// type noBuiltInTypeChecker struct{} +// func (noBuiltInTypeChecker) IsBuiltinType(rt uintptr) bool { return false } +// type noBuiltInTypes struct{ noBuiltInTypeChecker } + +type noBuiltInTypes struct{} + +func (noBuiltInTypes) EncodeBuiltin(rt uintptr, v interface{}) {} +func (noBuiltInTypes) DecodeBuiltin(rt uintptr, v interface{}) {} + +// type noStreamingCodec struct{} +// func (noStreamingCodec) CheckBreak() bool { return false } +// func (noStreamingCodec) hasElemSeparators() bool { return false } + +type noElemSeparators struct{} + +func (noElemSeparators) hasElemSeparators() (v bool) { return } +func (noElemSeparators) recreateEncDriver(e encDriver) (v bool) { return } + +// bigenHelper. +// Users must already slice the x completely, because we will not reslice. +type bigenHelper struct { + x []byte // must be correctly sliced to appropriate len. slicing is a cost. + w encWriter +} + +func (z bigenHelper) writeUint16(v uint16) { + bigen.PutUint16(z.x, v) + z.w.writeb(z.x) +} + +func (z bigenHelper) writeUint32(v uint32) { + bigen.PutUint32(z.x, v) + z.w.writeb(z.x) +} + +func (z bigenHelper) writeUint64(v uint64) { + bigen.PutUint64(z.x, v) + z.w.writeb(z.x) +} + +type extTypeTagFn struct { + rtid uintptr + rtidptr uintptr + rt reflect.Type + tag uint64 + ext Ext + _ [1]uint64 // padding +} + +type extHandle []extTypeTagFn + +// AddExt registes an encode and decode function for a reflect.Type. +// To deregister an Ext, call AddExt with nil encfn and/or nil decfn. +// +// Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead. +func (o *extHandle) AddExt(rt reflect.Type, tag byte, + encfn func(reflect.Value) ([]byte, error), + decfn func(reflect.Value, []byte) error) (err error) { + if encfn == nil || decfn == nil { + return o.SetExt(rt, uint64(tag), nil) + } + return o.SetExt(rt, uint64(tag), addExtWrapper{encfn, decfn}) +} + +// SetExt will set the extension for a tag and reflect.Type. +// Note that the type must be a named type, and specifically not a pointer or Interface. +// An error is returned if that is not honored. +// To Deregister an ext, call SetExt with nil Ext. +// +// Deprecated: Use SetBytesExt or SetInterfaceExt on the Handle instead. +func (o *extHandle) SetExt(rt reflect.Type, tag uint64, ext Ext) (err error) { + // o is a pointer, because we may need to initialize it + rk := rt.Kind() + for rk == reflect.Ptr { + rt = rt.Elem() + rk = rt.Kind() + } + + if rt.PkgPath() == "" || rk == reflect.Interface { // || rk == reflect.Ptr { + return fmt.Errorf("codec.Handle.SetExt: Takes named type, not a pointer or interface: %v", rt) + } + + rtid := rt2id(rt) + switch rtid { + case timeTypId, rawTypId, rawExtTypId: + // all natively supported type, so cannot have an extension + return // TODO: should we silently ignore, or return an error??? + } + // if o == nil { + // return errors.New("codec.Handle.SetExt: extHandle not initialized") + // } + o2 := *o + // if o2 == nil { + // return errors.New("codec.Handle.SetExt: extHandle not initialized") + // } + for i := range o2 { + v := &o2[i] + if v.rtid == rtid { + v.tag, v.ext = tag, ext + return + } + } + rtidptr := rt2id(reflect.PtrTo(rt)) + *o = append(o2, extTypeTagFn{rtid, rtidptr, rt, tag, ext, [1]uint64{}}) + return +} + +func (o extHandle) getExt(rtid uintptr) (v *extTypeTagFn) { + for i := range o { + v = &o[i] + if v.rtid == rtid || v.rtidptr == rtid { + return + } + } + return nil +} + +func (o extHandle) getExtForTag(tag uint64) (v *extTypeTagFn) { + for i := range o { + v = &o[i] + if v.tag == tag { + return + } + } + return nil +} + +type intf2impl struct { + rtid uintptr // for intf + impl reflect.Type + // _ [1]uint64 // padding // not-needed, as *intf2impl is never returned. +} + +type intf2impls []intf2impl + +// Intf2Impl maps an interface to an implementing type. +// This allows us support infering the concrete type +// and populating it when passed an interface. +// e.g. var v io.Reader can be decoded as a bytes.Buffer, etc. +// +// Passing a nil impl will clear the mapping. +func (o *intf2impls) Intf2Impl(intf, impl reflect.Type) (err error) { + if impl != nil && !impl.Implements(intf) { + return fmt.Errorf("Intf2Impl: %v does not implement %v", impl, intf) + } + rtid := rt2id(intf) + o2 := *o + for i := range o2 { + v := &o2[i] + if v.rtid == rtid { + v.impl = impl + return + } + } + *o = append(o2, intf2impl{rtid, impl}) + return +} + +func (o intf2impls) intf2impl(rtid uintptr) (rv reflect.Value) { + for i := range o { + v := &o[i] + if v.rtid == rtid { + if v.impl == nil { + return + } + if v.impl.Kind() == reflect.Ptr { + return reflect.New(v.impl.Elem()) + } + return reflect.New(v.impl).Elem() + } + } + return +} + +type structFieldInfoFlag uint8 + +const ( + _ structFieldInfoFlag = 1 << iota + structFieldInfoFlagReady + structFieldInfoFlagOmitEmpty +) + +func (x *structFieldInfoFlag) flagSet(f structFieldInfoFlag) { + *x = *x | f +} + +func (x *structFieldInfoFlag) flagClr(f structFieldInfoFlag) { + *x = *x &^ f +} + +func (x structFieldInfoFlag) flagGet(f structFieldInfoFlag) bool { + return x&f != 0 +} + +func (x structFieldInfoFlag) omitEmpty() bool { + return x.flagGet(structFieldInfoFlagOmitEmpty) +} + +func (x structFieldInfoFlag) ready() bool { + return x.flagGet(structFieldInfoFlagReady) +} + +type structFieldInfo struct { + encName string // encode name + fieldName string // field name + + is [maxLevelsEmbedding]uint16 // (recursive/embedded) field index in struct + nis uint8 // num levels of embedding. if 1, then it's not embedded. + structFieldInfoFlag +} + +func (si *structFieldInfo) setToZeroValue(v reflect.Value) { + if v, valid := si.field(v, false); valid { + v.Set(reflect.Zero(v.Type())) + } +} + +// rv returns the field of the struct. +// If anonymous, it returns an Invalid +func (si *structFieldInfo) field(v reflect.Value, update bool) (rv2 reflect.Value, valid bool) { + // replicate FieldByIndex + for i, x := range si.is { + if uint8(i) == si.nis { + break + } + if v, valid = baseStructRv(v, update); !valid { + return + } + v = v.Field(int(x)) + } + + return v, true +} + +// func (si *structFieldInfo) fieldval(v reflect.Value, update bool) reflect.Value { +// v, _ = si.field(v, update) +// return v +// } + +func parseStructInfo(stag string) (toArray, omitEmpty bool, keytype valueType) { + keytype = valueTypeString // default + if stag == "" { + return + } + for i, s := range strings.Split(stag, ",") { + if i == 0 { + } else { + switch s { + case "omitempty": + omitEmpty = true + case "toarray": + toArray = true + case "int": + keytype = valueTypeInt + case "uint": + keytype = valueTypeUint + case "float": + keytype = valueTypeFloat + // case "bool": + // keytype = valueTypeBool + case "string": + keytype = valueTypeString + } + } + } + return +} + +func (si *structFieldInfo) parseTag(stag string) { + // if fname == "" { + // panic(errNoFieldNameToStructFieldInfo) + // } + + if stag == "" { + return + } + for i, s := range strings.Split(stag, ",") { + if i == 0 { + if s != "" { + si.encName = s + } + } else { + switch s { + case "omitempty": + si.flagSet(structFieldInfoFlagOmitEmpty) + // si.omitEmpty = true + // case "toarray": + // si.toArray = true + } + } + } +} + +type sfiSortedByEncName []*structFieldInfo + +func (p sfiSortedByEncName) Len() int { + return len(p) +} + +func (p sfiSortedByEncName) Less(i, j int) bool { + return p[i].encName < p[j].encName +} + +func (p sfiSortedByEncName) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} + +const structFieldNodeNumToCache = 4 + +type structFieldNodeCache struct { + rv [structFieldNodeNumToCache]reflect.Value + idx [structFieldNodeNumToCache]uint32 + num uint8 +} + +func (x *structFieldNodeCache) get(key uint32) (fv reflect.Value, valid bool) { + for i, k := range &x.idx { + if uint8(i) == x.num { + return // break + } + if key == k { + return x.rv[i], true + } + } + return +} + +func (x *structFieldNodeCache) tryAdd(fv reflect.Value, key uint32) { + if x.num < structFieldNodeNumToCache { + x.rv[x.num] = fv + x.idx[x.num] = key + x.num++ + return + } +} + +type structFieldNode struct { + v reflect.Value + cache2 structFieldNodeCache + cache3 structFieldNodeCache + update bool +} + +func (x *structFieldNode) field(si *structFieldInfo) (fv reflect.Value) { + // return si.fieldval(x.v, x.update) + // Note: we only cache if nis=2 or nis=3 i.e. up to 2 levels of embedding + // This mostly saves us time on the repeated calls to v.Elem, v.Field, etc. + var valid bool + switch si.nis { + case 1: + fv = x.v.Field(int(si.is[0])) + case 2: + if fv, valid = x.cache2.get(uint32(si.is[0])); valid { + fv = fv.Field(int(si.is[1])) + return + } + fv = x.v.Field(int(si.is[0])) + if fv, valid = baseStructRv(fv, x.update); !valid { + return + } + x.cache2.tryAdd(fv, uint32(si.is[0])) + fv = fv.Field(int(si.is[1])) + case 3: + var key uint32 = uint32(si.is[0])<<16 | uint32(si.is[1]) + if fv, valid = x.cache3.get(key); valid { + fv = fv.Field(int(si.is[2])) + return + } + fv = x.v.Field(int(si.is[0])) + if fv, valid = baseStructRv(fv, x.update); !valid { + return + } + fv = fv.Field(int(si.is[1])) + if fv, valid = baseStructRv(fv, x.update); !valid { + return + } + x.cache3.tryAdd(fv, key) + fv = fv.Field(int(si.is[2])) + default: + fv, _ = si.field(x.v, x.update) + } + return +} + +func baseStructRv(v reflect.Value, update bool) (v2 reflect.Value, valid bool) { + for v.Kind() == reflect.Ptr { + if v.IsNil() { + if !update { + return + } + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + return v, true +} + +type typeInfoFlag uint8 + +const ( + typeInfoFlagComparable = 1 << iota + typeInfoFlagIsZeroer + typeInfoFlagIsZeroerPtr +) + +// typeInfo keeps information about each (non-ptr) type referenced in the encode/decode sequence. +// +// During an encode/decode sequence, we work as below: +// - If base is a built in type, en/decode base value +// - If base is registered as an extension, en/decode base value +// - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method +// - If type is text(M/Unm)arshaler, call Text(M/Unm)arshal method +// - Else decode appropriately based on the reflect.Kind +type typeInfo struct { + rt reflect.Type + elem reflect.Type + pkgpath string + + rtid uintptr + // rv0 reflect.Value // saved zero value, used if immutableKind + + numMeth uint16 // number of methods + kind uint8 + chandir uint8 + + anyOmitEmpty bool // true if a struct, and any of the fields are tagged "omitempty" + toArray bool // whether this (struct) type should be encoded as an array + keyType valueType // if struct, how is the field name stored in a stream? default is string + mbs bool // base type (T or *T) is a MapBySlice + + // ---- cpu cache line boundary? + sfiSort []*structFieldInfo // sorted. Used when enc/dec struct to map. + sfiSrc []*structFieldInfo // unsorted. Used when enc/dec struct to array. + + key reflect.Type + + // ---- cpu cache line boundary? + // sfis []structFieldInfo // all sfi, in src order, as created. + sfiNamesSort []byte // all names, with indexes into the sfiSort + + // format of marshal type fields below: [btj][mu]p? OR csp? + + bm bool // T is a binaryMarshaler + bmp bool // *T is a binaryMarshaler + bu bool // T is a binaryUnmarshaler + bup bool // *T is a binaryUnmarshaler + tm bool // T is a textMarshaler + tmp bool // *T is a textMarshaler + tu bool // T is a textUnmarshaler + tup bool // *T is a textUnmarshaler + + jm bool // T is a jsonMarshaler + jmp bool // *T is a jsonMarshaler + ju bool // T is a jsonUnmarshaler + jup bool // *T is a jsonUnmarshaler + cs bool // T is a Selfer + csp bool // *T is a Selfer + + // other flags, with individual bits representing if set. + flags typeInfoFlag + + // _ [2]byte // padding + _ [3]uint64 // padding +} + +func (ti *typeInfo) isFlag(f typeInfoFlag) bool { + return ti.flags&f != 0 +} + +func (ti *typeInfo) indexForEncName(name []byte) (index int16) { + var sn []byte + if len(name)+2 <= 32 { + var buf [32]byte // should not escape + sn = buf[:len(name)+2] + } else { + sn = make([]byte, len(name)+2) + } + copy(sn[1:], name) + sn[0], sn[len(sn)-1] = tiSep2(name), 0xff + j := bytes.Index(ti.sfiNamesSort, sn) + if j < 0 { + return -1 + } + index = int16(uint16(ti.sfiNamesSort[j+len(sn)+1]) | uint16(ti.sfiNamesSort[j+len(sn)])<<8) + return +} + +type rtid2ti struct { + rtid uintptr + ti *typeInfo +} + +// TypeInfos caches typeInfo for each type on first inspection. +// +// It is configured with a set of tag keys, which are used to get +// configuration for the type. +type TypeInfos struct { + // infos: formerly map[uintptr]*typeInfo, now *[]rtid2ti, 2 words expected + infos atomicTypeInfoSlice + mu sync.Mutex + tags []string + _ [2]uint64 // padding +} + +// NewTypeInfos creates a TypeInfos given a set of struct tags keys. +// +// This allows users customize the struct tag keys which contain configuration +// of their types. +func NewTypeInfos(tags []string) *TypeInfos { + return &TypeInfos{tags: tags} +} + +func (x *TypeInfos) structTag(t reflect.StructTag) (s string) { + // check for tags: codec, json, in that order. + // this allows seamless support for many configured structs. + for _, x := range x.tags { + s = t.Get(x) + if s != "" { + return s + } + } + return +} + +func (x *TypeInfos) find(s []rtid2ti, rtid uintptr) (idx int, ti *typeInfo) { + // binary search. adapted from sort/search.go. + // if sp == nil { + // return -1, nil + // } + // s := *sp + h, i, j := 0, 0, len(s) + for i < j { + h = i + (j-i)/2 + if s[h].rtid < rtid { + i = h + 1 + } else { + j = h + } + } + if i < len(s) && s[i].rtid == rtid { + return i, s[i].ti + } + return i, nil +} + +func (x *TypeInfos) get(rtid uintptr, rt reflect.Type) (pti *typeInfo) { + sp := x.infos.load() + var idx int + if sp != nil { + idx, pti = x.find(sp, rtid) + if pti != nil { + return + } + } + + rk := rt.Kind() + + if rk == reflect.Ptr { // || (rk == reflect.Interface && rtid != intfTypId) { + panicv.errorf("invalid kind passed to TypeInfos.get: %v - %v", rk, rt) + } + + // do not hold lock while computing this. + // it may lead to duplication, but that's ok. + ti := typeInfo{rt: rt, rtid: rtid, kind: uint8(rk), pkgpath: rt.PkgPath()} + // ti.rv0 = reflect.Zero(rt) + + // ti.comparable = rt.Comparable() + ti.numMeth = uint16(rt.NumMethod()) + + ti.bm, ti.bmp = implIntf(rt, binaryMarshalerTyp) + ti.bu, ti.bup = implIntf(rt, binaryUnmarshalerTyp) + ti.tm, ti.tmp = implIntf(rt, textMarshalerTyp) + ti.tu, ti.tup = implIntf(rt, textUnmarshalerTyp) + ti.jm, ti.jmp = implIntf(rt, jsonMarshalerTyp) + ti.ju, ti.jup = implIntf(rt, jsonUnmarshalerTyp) + ti.cs, ti.csp = implIntf(rt, selferTyp) + + b1, b2 := implIntf(rt, iszeroTyp) + if b1 { + ti.flags |= typeInfoFlagIsZeroer + } + if b2 { + ti.flags |= typeInfoFlagIsZeroerPtr + } + if rt.Comparable() { + ti.flags |= typeInfoFlagComparable + } + + switch rk { + case reflect.Struct: + var omitEmpty bool + if f, ok := rt.FieldByName(structInfoFieldName); ok { + ti.toArray, omitEmpty, ti.keyType = parseStructInfo(x.structTag(f.Tag)) + } else { + ti.keyType = valueTypeString + } + pp, pi := pool.tiLoad() + pv := pi.(*typeInfoLoadArray) + pv.etypes[0] = ti.rtid + // vv := typeInfoLoad{pv.fNames[:0], pv.encNames[:0], pv.etypes[:1], pv.sfis[:0]} + vv := typeInfoLoad{pv.etypes[:1], pv.sfis[:0]} + x.rget(rt, rtid, omitEmpty, nil, &vv) + // ti.sfis = vv.sfis + ti.sfiSrc, ti.sfiSort, ti.sfiNamesSort, ti.anyOmitEmpty = rgetResolveSFI(rt, vv.sfis, pv) + pp.Put(pi) + case reflect.Map: + ti.elem = rt.Elem() + ti.key = rt.Key() + case reflect.Slice: + ti.mbs, _ = implIntf(rt, mapBySliceTyp) + ti.elem = rt.Elem() + case reflect.Chan: + ti.elem = rt.Elem() + ti.chandir = uint8(rt.ChanDir()) + case reflect.Array, reflect.Ptr: + ti.elem = rt.Elem() + } + // sfi = sfiSrc + + x.mu.Lock() + sp = x.infos.load() + if sp == nil { + pti = &ti + vs := []rtid2ti{{rtid, pti}} + x.infos.store(vs) + } else { + idx, pti = x.find(sp, rtid) + if pti == nil { + pti = &ti + vs := make([]rtid2ti, len(sp)+1) + copy(vs, sp[:idx]) + copy(vs[idx+1:], sp[idx:]) + vs[idx] = rtid2ti{rtid, pti} + x.infos.store(vs) + } + } + x.mu.Unlock() + return +} + +func (x *TypeInfos) rget(rt reflect.Type, rtid uintptr, omitEmpty bool, + indexstack []uint16, pv *typeInfoLoad) { + // Read up fields and store how to access the value. + // + // It uses go's rules for message selectors, + // which say that the field with the shallowest depth is selected. + // + // Note: we consciously use slices, not a map, to simulate a set. + // Typically, types have < 16 fields, + // and iteration using equals is faster than maps there + flen := rt.NumField() + if flen > (1< %v fields are not supported - has %v fields", + (1< maxLevelsEmbedding-1 { + panicv.errorf("codec: only supports up to %v depth of embedding - type has %v depth", + maxLevelsEmbedding-1, len(indexstack)) + } + si.nis = uint8(len(indexstack)) + 1 + copy(si.is[:], indexstack) + si.is[len(indexstack)] = j + + if omitEmpty { + si.flagSet(structFieldInfoFlagOmitEmpty) + } + pv.sfis = append(pv.sfis, si) + } +} + +func tiSep(name string) uint8 { + // (xn[0]%64) // (between 192-255 - outside ascii BMP) + // return 0xfe - (name[0] & 63) + // return 0xfe - (name[0] & 63) - uint8(len(name)) + // return 0xfe - (name[0] & 63) - uint8(len(name)&63) + // return ((0xfe - (name[0] & 63)) & 0xf8) | (uint8(len(name) & 0x07)) + return 0xfe - (name[0] & 63) - uint8(len(name)&63) +} + +func tiSep2(name []byte) uint8 { + return 0xfe - (name[0] & 63) - uint8(len(name)&63) +} + +// resolves the struct field info got from a call to rget. +// Returns a trimmed, unsorted and sorted []*structFieldInfo. +func rgetResolveSFI(rt reflect.Type, x []structFieldInfo, pv *typeInfoLoadArray) ( + y, z []*structFieldInfo, ss []byte, anyOmitEmpty bool) { + sa := pv.sfiidx[:0] + sn := pv.b[:] + n := len(x) + + var xn string + var ui uint16 + var sep byte + + for i := range x { + ui = uint16(i) + xn = x[i].encName // fieldName or encName? use encName for now. + if len(xn)+2 > cap(pv.b) { + sn = make([]byte, len(xn)+2) + } else { + sn = sn[:len(xn)+2] + } + // use a custom sep, so that misses are less frequent, + // since the sep (first char in search) is as unique as first char in field name. + sep = tiSep(xn) + sn[0], sn[len(sn)-1] = sep, 0xff + copy(sn[1:], xn) + j := bytes.Index(sa, sn) + if j == -1 { + sa = append(sa, sep) + sa = append(sa, xn...) + sa = append(sa, 0xff, byte(ui>>8), byte(ui)) + } else { + index := uint16(sa[j+len(sn)+1]) | uint16(sa[j+len(sn)])<<8 + // one of them must be reset to nil, + // and the index updated appropriately to the other one + if x[i].nis == x[index].nis { + } else if x[i].nis < x[index].nis { + sa[j+len(sn)], sa[j+len(sn)+1] = byte(ui>>8), byte(ui) + if x[index].ready() { + x[index].flagClr(structFieldInfoFlagReady) + n-- + } + } else { + if x[i].ready() { + x[i].flagClr(structFieldInfoFlagReady) + n-- + } + } + } + + } + var w []structFieldInfo + sharingArray := len(x) <= typeInfoLoadArraySfisLen // sharing array with typeInfoLoadArray + if sharingArray { + w = make([]structFieldInfo, n) + } + + // remove all the nils (non-ready) + y = make([]*structFieldInfo, n) + n = 0 + var sslen int + for i := range x { + if !x[i].ready() { + continue + } + if !anyOmitEmpty && x[i].omitEmpty() { + anyOmitEmpty = true + } + if sharingArray { + w[n] = x[i] + y[n] = &w[n] + } else { + y[n] = &x[i] + } + sslen = sslen + len(x[i].encName) + 4 + n++ + } + if n != len(y) { + panicv.errorf("failure reading struct %v - expecting %d of %d valid fields, got %d", + rt, len(y), len(x), n) + } + + z = make([]*structFieldInfo, len(y)) + copy(z, y) + sort.Sort(sfiSortedByEncName(z)) + + sharingArray = len(sa) <= typeInfoLoadArraySfiidxLen + if sharingArray { + ss = make([]byte, 0, sslen) + } else { + ss = sa[:0] // reuse the newly made sa array if necessary + } + for i := range z { + xn = z[i].encName + sep = tiSep(xn) + ui = uint16(i) + ss = append(ss, sep) + ss = append(ss, xn...) + ss = append(ss, 0xff, byte(ui>>8), byte(ui)) + } + return +} + +func implIntf(rt, iTyp reflect.Type) (base bool, indir bool) { + return rt.Implements(iTyp), reflect.PtrTo(rt).Implements(iTyp) +} + +// isEmptyStruct is only called from isEmptyValue, and checks if a struct is empty: +// - does it implement IsZero() bool +// - is it comparable, and can i compare directly using == +// - if checkStruct, then walk through the encodable fields +// and check if they are empty or not. +func isEmptyStruct(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) bool { + // v is a struct kind - no need to check again. + // We only check isZero on a struct kind, to reduce the amount of times + // that we lookup the rtid and typeInfo for each type as we walk the tree. + + vt := v.Type() + rtid := rt2id(vt) + if tinfos == nil { + tinfos = defTypeInfos + } + ti := tinfos.get(rtid, vt) + if ti.rtid == timeTypId { + return rv2i(v).(time.Time).IsZero() + } + if ti.isFlag(typeInfoFlagIsZeroerPtr) && v.CanAddr() { + return rv2i(v.Addr()).(isZeroer).IsZero() + } + if ti.isFlag(typeInfoFlagIsZeroer) { + return rv2i(v).(isZeroer).IsZero() + } + if ti.isFlag(typeInfoFlagComparable) { + return rv2i(v) == rv2i(reflect.Zero(vt)) + } + if !checkStruct { + return false + } + // We only care about what we can encode/decode, + // so that is what we use to check omitEmpty. + for _, si := range ti.sfiSrc { + sfv, valid := si.field(v, false) + if valid && !isEmptyValue(sfv, tinfos, deref, checkStruct) { + return false + } + } + return true +} + +// func roundFloat(x float64) float64 { +// t := math.Trunc(x) +// if math.Abs(x-t) >= 0.5 { +// return t + math.Copysign(1, x) +// } +// return t +// } + +func panicToErr(h errstrDecorator, err *error) { + // Note: This method MUST be called directly from defer i.e. defer panicToErr ... + // else it seems the recover is not fully handled + if recoverPanicToErr { + if x := recover(); x != nil { + // fmt.Printf("panic'ing with: %v\n", x) + // debug.PrintStack() + panicValToErr(h, x, err) + } + } +} + +func panicValToErr(h errstrDecorator, v interface{}, err *error) { + switch xerr := v.(type) { + case nil: + case error: + switch xerr { + case nil: + case io.EOF, io.ErrUnexpectedEOF, errEncoderNotInitialized, errDecoderNotInitialized: + // treat as special (bubble up) + *err = xerr + default: + h.wrapErrstr(xerr.Error(), err) + } + case string: + if xerr != "" { + h.wrapErrstr(xerr, err) + } + case fmt.Stringer: + if xerr != nil { + h.wrapErrstr(xerr.String(), err) + } + default: + h.wrapErrstr(v, err) + } +} + +func isImmutableKind(k reflect.Kind) (v bool) { + return immutableKindsSet[k] +} + +// ---- + +type codecFnInfo struct { + ti *typeInfo + xfFn Ext + xfTag uint64 + seq seqType + addrD bool + addrF bool // if addrD, this says whether decode function can take a value or a ptr + addrE bool + ready bool // ready to use +} + +// codecFn encapsulates the captured variables and the encode function. +// This way, we only do some calculations one times, and pass to the +// code block that should be called (encapsulated in a function) +// instead of executing the checks every time. +type codecFn struct { + i codecFnInfo + fe func(*Encoder, *codecFnInfo, reflect.Value) + fd func(*Decoder, *codecFnInfo, reflect.Value) + _ [1]uint64 // padding +} + +type codecRtidFn struct { + rtid uintptr + fn *codecFn +} + +type codecFner struct { + // hh Handle + h *BasicHandle + s []codecRtidFn + be bool + js bool + _ [6]byte // padding + _ [3]uint64 // padding +} + +func (c *codecFner) reset(hh Handle) { + bh := hh.getBasicHandle() + // only reset iff extensions changed or *TypeInfos changed + var hhSame = true && + c.h == bh && c.h.TypeInfos == bh.TypeInfos && + len(c.h.extHandle) == len(bh.extHandle) && + (len(c.h.extHandle) == 0 || &c.h.extHandle[0] == &bh.extHandle[0]) + if !hhSame { + // c.hh = hh + c.h, bh = bh, c.h // swap both + _, c.js = hh.(*JsonHandle) + c.be = hh.isBinary() + for i := range c.s { + c.s[i].fn.i.ready = false + } + } +} + +func (c *codecFner) get(rt reflect.Type, checkFastpath, checkCodecSelfer bool) (fn *codecFn) { + rtid := rt2id(rt) + + for _, x := range c.s { + if x.rtid == rtid { + // if rtid exists, then there's a *codenFn attached (non-nil) + fn = x.fn + if fn.i.ready { + return + } + break + } + } + var ti *typeInfo + if fn == nil { + fn = new(codecFn) + if c.s == nil { + c.s = make([]codecRtidFn, 0, 8) + } + c.s = append(c.s, codecRtidFn{rtid, fn}) + } else { + ti = fn.i.ti + *fn = codecFn{} + fn.i.ti = ti + // fn.fe, fn.fd = nil, nil + } + fi := &(fn.i) + fi.ready = true + if ti == nil { + ti = c.h.getTypeInfo(rtid, rt) + fi.ti = ti + } + + rk := reflect.Kind(ti.kind) + + if checkCodecSelfer && (ti.cs || ti.csp) { + fn.fe = (*Encoder).selferMarshal + fn.fd = (*Decoder).selferUnmarshal + fi.addrF = true + fi.addrD = ti.csp + fi.addrE = ti.csp + } else if rtid == timeTypId { + fn.fe = (*Encoder).kTime + fn.fd = (*Decoder).kTime + } else if rtid == rawTypId { + fn.fe = (*Encoder).raw + fn.fd = (*Decoder).raw + } else if rtid == rawExtTypId { + fn.fe = (*Encoder).rawExt + fn.fd = (*Decoder).rawExt + fi.addrF = true + fi.addrD = true + fi.addrE = true + } else if xfFn := c.h.getExt(rtid); xfFn != nil { + fi.xfTag, fi.xfFn = xfFn.tag, xfFn.ext + fn.fe = (*Encoder).ext + fn.fd = (*Decoder).ext + fi.addrF = true + fi.addrD = true + if rk == reflect.Struct || rk == reflect.Array { + fi.addrE = true + } + } else if supportMarshalInterfaces && c.be && (ti.bm || ti.bmp) && (ti.bu || ti.bup) { + fn.fe = (*Encoder).binaryMarshal + fn.fd = (*Decoder).binaryUnmarshal + fi.addrF = true + fi.addrD = ti.bup + fi.addrE = ti.bmp + } else if supportMarshalInterfaces && !c.be && c.js && (ti.jm || ti.jmp) && (ti.ju || ti.jup) { + //If JSON, we should check JSONMarshal before textMarshal + fn.fe = (*Encoder).jsonMarshal + fn.fd = (*Decoder).jsonUnmarshal + fi.addrF = true + fi.addrD = ti.jup + fi.addrE = ti.jmp + } else if supportMarshalInterfaces && !c.be && (ti.tm || ti.tmp) && (ti.tu || ti.tup) { + fn.fe = (*Encoder).textMarshal + fn.fd = (*Decoder).textUnmarshal + fi.addrF = true + fi.addrD = ti.tup + fi.addrE = ti.tmp + } else { + if fastpathEnabled && checkFastpath && (rk == reflect.Map || rk == reflect.Slice) { + if ti.pkgpath == "" { // un-named slice or map + if idx := fastpathAV.index(rtid); idx != -1 { + fn.fe = fastpathAV[idx].encfn + fn.fd = fastpathAV[idx].decfn + fi.addrD = true + fi.addrF = false + } + } else { + // use mapping for underlying type if there + var rtu reflect.Type + if rk == reflect.Map { + rtu = reflect.MapOf(ti.key, ti.elem) + } else { + rtu = reflect.SliceOf(ti.elem) + } + rtuid := rt2id(rtu) + if idx := fastpathAV.index(rtuid); idx != -1 { + xfnf := fastpathAV[idx].encfn + xrt := fastpathAV[idx].rt + fn.fe = func(e *Encoder, xf *codecFnInfo, xrv reflect.Value) { + xfnf(e, xf, xrv.Convert(xrt)) + } + fi.addrD = true + fi.addrF = false // meaning it can be an address(ptr) or a value + xfnf2 := fastpathAV[idx].decfn + fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) { + if xrv.Kind() == reflect.Ptr { + xfnf2(d, xf, xrv.Convert(reflect.PtrTo(xrt))) + } else { + xfnf2(d, xf, xrv.Convert(xrt)) + } + } + } + } + } + if fn.fe == nil && fn.fd == nil { + switch rk { + case reflect.Bool: + fn.fe = (*Encoder).kBool + fn.fd = (*Decoder).kBool + case reflect.String: + fn.fe = (*Encoder).kString + fn.fd = (*Decoder).kString + case reflect.Int: + fn.fd = (*Decoder).kInt + fn.fe = (*Encoder).kInt + case reflect.Int8: + fn.fe = (*Encoder).kInt8 + fn.fd = (*Decoder).kInt8 + case reflect.Int16: + fn.fe = (*Encoder).kInt16 + fn.fd = (*Decoder).kInt16 + case reflect.Int32: + fn.fe = (*Encoder).kInt32 + fn.fd = (*Decoder).kInt32 + case reflect.Int64: + fn.fe = (*Encoder).kInt64 + fn.fd = (*Decoder).kInt64 + case reflect.Uint: + fn.fd = (*Decoder).kUint + fn.fe = (*Encoder).kUint + case reflect.Uint8: + fn.fe = (*Encoder).kUint8 + fn.fd = (*Decoder).kUint8 + case reflect.Uint16: + fn.fe = (*Encoder).kUint16 + fn.fd = (*Decoder).kUint16 + case reflect.Uint32: + fn.fe = (*Encoder).kUint32 + fn.fd = (*Decoder).kUint32 + case reflect.Uint64: + fn.fe = (*Encoder).kUint64 + fn.fd = (*Decoder).kUint64 + case reflect.Uintptr: + fn.fe = (*Encoder).kUintptr + fn.fd = (*Decoder).kUintptr + case reflect.Float32: + fn.fe = (*Encoder).kFloat32 + fn.fd = (*Decoder).kFloat32 + case reflect.Float64: + fn.fe = (*Encoder).kFloat64 + fn.fd = (*Decoder).kFloat64 + case reflect.Invalid: + fn.fe = (*Encoder).kInvalid + fn.fd = (*Decoder).kErr + case reflect.Chan: + fi.seq = seqTypeChan + fn.fe = (*Encoder).kSlice + fn.fd = (*Decoder).kSlice + case reflect.Slice: + fi.seq = seqTypeSlice + fn.fe = (*Encoder).kSlice + fn.fd = (*Decoder).kSlice + case reflect.Array: + fi.seq = seqTypeArray + fn.fe = (*Encoder).kSlice + fi.addrF = false + fi.addrD = false + rt2 := reflect.SliceOf(ti.elem) + fn.fd = func(d *Decoder, xf *codecFnInfo, xrv reflect.Value) { + d.cfer().get(rt2, true, false).fd(d, xf, xrv.Slice(0, xrv.Len())) + } + // fn.fd = (*Decoder).kArray + case reflect.Struct: + if ti.anyOmitEmpty { + fn.fe = (*Encoder).kStruct + } else { + fn.fe = (*Encoder).kStructNoOmitempty + } + fn.fd = (*Decoder).kStruct + case reflect.Map: + fn.fe = (*Encoder).kMap + fn.fd = (*Decoder).kMap + case reflect.Interface: + // encode: reflect.Interface are handled already by preEncodeValue + fn.fd = (*Decoder).kInterface + fn.fe = (*Encoder).kErr + default: + // reflect.Ptr and reflect.Interface are handled already by preEncodeValue + fn.fe = (*Encoder).kErr + fn.fd = (*Decoder).kErr + } + } + } + return +} + +type codecFnPooler struct { + cf *codecFner + cfp *sync.Pool + hh Handle +} + +func (d *codecFnPooler) cfer() *codecFner { + if d.cf == nil { + var v interface{} + d.cfp, v = pool.codecFner() + d.cf = v.(*codecFner) + d.cf.reset(d.hh) + } + return d.cf +} + +func (d *codecFnPooler) alwaysAtEnd() { + if d.cf != nil { + d.cfp.Put(d.cf) + d.cf, d.cfp = nil, nil + } +} + +// ---- + +// these "checkOverflow" functions must be inlinable, and not call anybody. +// Overflow means that the value cannot be represented without wrapping/overflow. +// Overflow=false does not mean that the value can be represented without losing precision +// (especially for floating point). + +type checkOverflow struct{} + +// func (checkOverflow) Float16(f float64) (overflow bool) { +// panicv.errorf("unimplemented") +// if f < 0 { +// f = -f +// } +// return math.MaxFloat32 < f && f <= math.MaxFloat64 +// } + +func (checkOverflow) Float32(v float64) (overflow bool) { + if v < 0 { + v = -v + } + return math.MaxFloat32 < v && v <= math.MaxFloat64 +} +func (checkOverflow) Uint(v uint64, bitsize uint8) (overflow bool) { + if bitsize == 0 || bitsize >= 64 || v == 0 { + return + } + if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc { + overflow = true + } + return +} +func (checkOverflow) Int(v int64, bitsize uint8) (overflow bool) { + if bitsize == 0 || bitsize >= 64 || v == 0 { + return + } + if trunc := (v << (64 - bitsize)) >> (64 - bitsize); v != trunc { + overflow = true + } + return +} +func (checkOverflow) SignedInt(v uint64) (overflow bool) { + //e.g. -127 to 128 for int8 + pos := (v >> 63) == 0 + ui2 := v & 0x7fffffffffffffff + if pos { + if ui2 > math.MaxInt64 { + overflow = true + } + } else { + if ui2 > math.MaxInt64-1 { + overflow = true + } + } + return +} + +func (x checkOverflow) Float32V(v float64) float64 { + if x.Float32(v) { + panicv.errorf("float32 overflow: %v", v) + } + return v +} +func (x checkOverflow) UintV(v uint64, bitsize uint8) uint64 { + if x.Uint(v, bitsize) { + panicv.errorf("uint64 overflow: %v", v) + } + return v +} +func (x checkOverflow) IntV(v int64, bitsize uint8) int64 { + if x.Int(v, bitsize) { + panicv.errorf("int64 overflow: %v", v) + } + return v +} +func (x checkOverflow) SignedIntV(v uint64) int64 { + if x.SignedInt(v) { + panicv.errorf("uint64 to int64 overflow: %v", v) + } + return int64(v) +} + +// ------------------ SORT ----------------- + +func isNaN(f float64) bool { return f != f } + +// ----------------------- + +type ioFlusher interface { + Flush() error +} + +type ioPeeker interface { + Peek(int) ([]byte, error) +} + +type ioBuffered interface { + Buffered() int +} + +// ----------------------- + +type intSlice []int64 +type uintSlice []uint64 + +// type uintptrSlice []uintptr +type floatSlice []float64 +type boolSlice []bool +type stringSlice []string + +// type bytesSlice [][]byte + +func (p intSlice) Len() int { return len(p) } +func (p intSlice) Less(i, j int) bool { return p[i] < p[j] } +func (p intSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p uintSlice) Len() int { return len(p) } +func (p uintSlice) Less(i, j int) bool { return p[i] < p[j] } +func (p uintSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// func (p uintptrSlice) Len() int { return len(p) } +// func (p uintptrSlice) Less(i, j int) bool { return p[i] < p[j] } +// func (p uintptrSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p floatSlice) Len() int { return len(p) } +func (p floatSlice) Less(i, j int) bool { + return p[i] < p[j] || isNaN(p[i]) && !isNaN(p[j]) +} +func (p floatSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p stringSlice) Len() int { return len(p) } +func (p stringSlice) Less(i, j int) bool { return p[i] < p[j] } +func (p stringSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// func (p bytesSlice) Len() int { return len(p) } +// func (p bytesSlice) Less(i, j int) bool { return bytes.Compare(p[i], p[j]) == -1 } +// func (p bytesSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p boolSlice) Len() int { return len(p) } +func (p boolSlice) Less(i, j int) bool { return !p[i] && p[j] } +func (p boolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// --------------------- + +type intRv struct { + v int64 + r reflect.Value +} +type intRvSlice []intRv +type uintRv struct { + v uint64 + r reflect.Value +} +type uintRvSlice []uintRv +type floatRv struct { + v float64 + r reflect.Value +} +type floatRvSlice []floatRv +type boolRv struct { + v bool + r reflect.Value +} +type boolRvSlice []boolRv +type stringRv struct { + v string + r reflect.Value +} +type stringRvSlice []stringRv +type bytesRv struct { + v []byte + r reflect.Value +} +type bytesRvSlice []bytesRv +type timeRv struct { + v time.Time + r reflect.Value +} +type timeRvSlice []timeRv + +func (p intRvSlice) Len() int { return len(p) } +func (p intRvSlice) Less(i, j int) bool { return p[i].v < p[j].v } +func (p intRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p uintRvSlice) Len() int { return len(p) } +func (p uintRvSlice) Less(i, j int) bool { return p[i].v < p[j].v } +func (p uintRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p floatRvSlice) Len() int { return len(p) } +func (p floatRvSlice) Less(i, j int) bool { + return p[i].v < p[j].v || isNaN(p[i].v) && !isNaN(p[j].v) +} +func (p floatRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p stringRvSlice) Len() int { return len(p) } +func (p stringRvSlice) Less(i, j int) bool { return p[i].v < p[j].v } +func (p stringRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p bytesRvSlice) Len() int { return len(p) } +func (p bytesRvSlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 } +func (p bytesRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p boolRvSlice) Len() int { return len(p) } +func (p boolRvSlice) Less(i, j int) bool { return !p[i].v && p[j].v } +func (p boolRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p timeRvSlice) Len() int { return len(p) } +func (p timeRvSlice) Less(i, j int) bool { return p[i].v.Before(p[j].v) } +func (p timeRvSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// ----------------- + +type bytesI struct { + v []byte + i interface{} +} + +type bytesISlice []bytesI + +func (p bytesISlice) Len() int { return len(p) } +func (p bytesISlice) Less(i, j int) bool { return bytes.Compare(p[i].v, p[j].v) == -1 } +func (p bytesISlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// ----------------- + +type set []uintptr + +func (s *set) add(v uintptr) (exists bool) { + // e.ci is always nil, or len >= 1 + x := *s + if x == nil { + x = make([]uintptr, 1, 8) + x[0] = v + *s = x + return + } + // typically, length will be 1. make this perform. + if len(x) == 1 { + if j := x[0]; j == 0 { + x[0] = v + } else if j == v { + exists = true + } else { + x = append(x, v) + *s = x + } + return + } + // check if it exists + for _, j := range x { + if j == v { + exists = true + return + } + } + // try to replace a "deleted" slot + for i, j := range x { + if j == 0 { + x[i] = v + return + } + } + // if unable to replace deleted slot, just append it. + x = append(x, v) + *s = x + return +} + +func (s *set) remove(v uintptr) (exists bool) { + x := *s + if len(x) == 0 { + return + } + if len(x) == 1 { + if x[0] == v { + x[0] = 0 + } + return + } + for i, j := range x { + if j == v { + exists = true + x[i] = 0 // set it to 0, as way to delete it. + // copy(x[i:], x[i+1:]) + // x = x[:len(x)-1] + return + } + } + return +} + +// ------ + +// bitset types are better than [256]bool, because they permit the whole +// bitset array being on a single cache line and use less memory. + +// given x > 0 and n > 0 and x is exactly 2^n, then pos/x === pos>>n AND pos%x === pos&(x-1). +// consequently, pos/32 === pos>>5, pos/16 === pos>>4, pos/8 === pos>>3, pos%8 == pos&7 + +type bitset256 [32]byte + +func (x *bitset256) isset(pos byte) bool { + return x[pos>>3]&(1<<(pos&7)) != 0 +} +func (x *bitset256) issetv(pos byte) byte { + return x[pos>>3] & (1 << (pos & 7)) +} +func (x *bitset256) set(pos byte) { + x[pos>>3] |= (1 << (pos & 7)) +} + +// func (x *bitset256) unset(pos byte) { +// x[pos>>3] &^= (1 << (pos & 7)) +// } + +type bitset128 [16]byte + +func (x *bitset128) isset(pos byte) bool { + return x[pos>>3]&(1<<(pos&7)) != 0 +} +func (x *bitset128) set(pos byte) { + x[pos>>3] |= (1 << (pos & 7)) +} + +// func (x *bitset128) unset(pos byte) { +// x[pos>>3] &^= (1 << (pos & 7)) +// } + +type bitset32 [4]byte + +func (x *bitset32) isset(pos byte) bool { + return x[pos>>3]&(1<<(pos&7)) != 0 +} +func (x *bitset32) set(pos byte) { + x[pos>>3] |= (1 << (pos & 7)) +} + +// func (x *bitset32) unset(pos byte) { +// x[pos>>3] &^= (1 << (pos & 7)) +// } + +// type bit2set256 [64]byte + +// func (x *bit2set256) set(pos byte, v1, v2 bool) { +// var pos2 uint8 = (pos & 3) << 1 // returning 0, 2, 4 or 6 +// if v1 { +// x[pos>>2] |= 1 << (pos2 + 1) +// } +// if v2 { +// x[pos>>2] |= 1 << pos2 +// } +// } +// func (x *bit2set256) get(pos byte) uint8 { +// var pos2 uint8 = (pos & 3) << 1 // returning 0, 2, 4 or 6 +// return x[pos>>2] << (6 - pos2) >> 6 // 11000000 -> 00000011 +// } + +// ------------ + +type pooler struct { + dn sync.Pool // for decNaked + cfn sync.Pool // for codecFner + tiload sync.Pool + strRv8, strRv16, strRv32, strRv64, strRv128 sync.Pool // for stringRV +} + +func (p *pooler) init() { + p.strRv8.New = func() interface{} { return new([8]stringRv) } + p.strRv16.New = func() interface{} { return new([16]stringRv) } + p.strRv32.New = func() interface{} { return new([32]stringRv) } + p.strRv64.New = func() interface{} { return new([64]stringRv) } + p.strRv128.New = func() interface{} { return new([128]stringRv) } + p.dn.New = func() interface{} { x := new(decNaked); x.init(); return x } + p.tiload.New = func() interface{} { return new(typeInfoLoadArray) } + p.cfn.New = func() interface{} { return new(codecFner) } +} + +func (p *pooler) stringRv8() (sp *sync.Pool, v interface{}) { + return &p.strRv8, p.strRv8.Get() +} +func (p *pooler) stringRv16() (sp *sync.Pool, v interface{}) { + return &p.strRv16, p.strRv16.Get() +} +func (p *pooler) stringRv32() (sp *sync.Pool, v interface{}) { + return &p.strRv32, p.strRv32.Get() +} +func (p *pooler) stringRv64() (sp *sync.Pool, v interface{}) { + return &p.strRv64, p.strRv64.Get() +} +func (p *pooler) stringRv128() (sp *sync.Pool, v interface{}) { + return &p.strRv128, p.strRv128.Get() +} +func (p *pooler) decNaked() (sp *sync.Pool, v interface{}) { + return &p.dn, p.dn.Get() +} +func (p *pooler) codecFner() (sp *sync.Pool, v interface{}) { + return &p.cfn, p.cfn.Get() +} +func (p *pooler) tiLoad() (sp *sync.Pool, v interface{}) { + return &p.tiload, p.tiload.Get() +} + +// func (p *pooler) decNaked() (v *decNaked, f func(*decNaked) ) { +// sp := &(p.dn) +// vv := sp.Get() +// return vv.(*decNaked), func(x *decNaked) { sp.Put(vv) } +// } +// func (p *pooler) decNakedGet() (v interface{}) { +// return p.dn.Get() +// } +// func (p *pooler) codecFnerGet() (v interface{}) { +// return p.cfn.Get() +// } +// func (p *pooler) tiLoadGet() (v interface{}) { +// return p.tiload.Get() +// } +// func (p *pooler) decNakedPut(v interface{}) { +// p.dn.Put(v) +// } +// func (p *pooler) codecFnerPut(v interface{}) { +// p.cfn.Put(v) +// } +// func (p *pooler) tiLoadPut(v interface{}) { +// p.tiload.Put(v) +// } + +type panicHdl struct{} + +func (panicHdl) errorv(err error) { + if err != nil { + panic(err) + } +} + +func (panicHdl) errorstr(message string) { + if message != "" { + panic(message) + } +} + +func (panicHdl) errorf(format string, params ...interface{}) { + if format != "" { + if len(params) == 0 { + panic(format) + } else { + panic(fmt.Sprintf(format, params...)) + } + } +} + +type errstrDecorator interface { + wrapErrstr(interface{}, *error) +} + +type errstrDecoratorDef struct{} + +func (errstrDecoratorDef) wrapErrstr(v interface{}, e *error) { *e = fmt.Errorf("%v", v) } + +type must struct{} + +func (must) String(s string, err error) string { + if err != nil { + panicv.errorv(err) + } + return s +} +func (must) Int(s int64, err error) int64 { + if err != nil { + panicv.errorv(err) + } + return s +} +func (must) Uint(s uint64, err error) uint64 { + if err != nil { + panicv.errorv(err) + } + return s +} +func (must) Float(s float64, err error) float64 { + if err != nil { + panicv.errorv(err) + } + return s +} + +// xdebugf prints the message in red on the terminal. +// Use it in place of fmt.Printf (which it calls internally) +func xdebugf(pattern string, args ...interface{}) { + var delim string + if len(pattern) > 0 && pattern[len(pattern)-1] != '\n' { + delim = "\n" + } + fmt.Printf("\033[1;31m"+pattern+delim+"\033[0m", args...) +} + +// func isImmutableKind(k reflect.Kind) (v bool) { +// return false || +// k == reflect.Int || +// k == reflect.Int8 || +// k == reflect.Int16 || +// k == reflect.Int32 || +// k == reflect.Int64 || +// k == reflect.Uint || +// k == reflect.Uint8 || +// k == reflect.Uint16 || +// k == reflect.Uint32 || +// k == reflect.Uint64 || +// k == reflect.Uintptr || +// k == reflect.Float32 || +// k == reflect.Float64 || +// k == reflect.Bool || +// k == reflect.String +// } + +// func timeLocUTCName(tzint int16) string { +// if tzint == 0 { +// return "UTC" +// } +// var tzname = []byte("UTC+00:00") +// //tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf. inline below. +// //tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first +// var tzhr, tzmin int16 +// if tzint < 0 { +// tzname[3] = '-' // (TODO: verify. this works here) +// tzhr, tzmin = -tzint/60, (-tzint)%60 +// } else { +// tzhr, tzmin = tzint/60, tzint%60 +// } +// tzname[4] = timeDigits[tzhr/10] +// tzname[5] = timeDigits[tzhr%10] +// tzname[7] = timeDigits[tzmin/10] +// tzname[8] = timeDigits[tzmin%10] +// return string(tzname) +// //return time.FixedZone(string(tzname), int(tzint)*60) +// } diff --git a/vendor/github.com/ugorji/go/codec/helper_internal.go b/vendor/github.com/ugorji/go/codec/helper_internal.go new file mode 100644 index 0000000000..0cbd665e25 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/helper_internal.go @@ -0,0 +1,121 @@ +// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// All non-std package dependencies live in this file, +// so porting to different environment is easy (just update functions). + +func pruneSignExt(v []byte, pos bool) (n int) { + if len(v) < 2 { + } else if pos && v[0] == 0 { + for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ { + } + } else if !pos && v[0] == 0xff { + for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ { + } + } + return +} + +// validate that this function is correct ... +// culled from OGRE (Object-Oriented Graphics Rendering Engine) +// function: halfToFloatI (http://stderr.org/doc/ogre-doc/api/OgreBitwise_8h-source.html) +func halfFloatToFloatBits(yy uint16) (d uint32) { + y := uint32(yy) + s := (y >> 15) & 0x01 + e := (y >> 10) & 0x1f + m := y & 0x03ff + + if e == 0 { + if m == 0 { // plu or minus 0 + return s << 31 + } + // Denormalized number -- renormalize it + for (m & 0x00000400) == 0 { + m <<= 1 + e -= 1 + } + e += 1 + const zz uint32 = 0x0400 + m &= ^zz + } else if e == 31 { + if m == 0 { // Inf + return (s << 31) | 0x7f800000 + } + return (s << 31) | 0x7f800000 | (m << 13) // NaN + } + e = e + (127 - 15) + m = m << 13 + return (s << 31) | (e << 23) | m +} + +// GrowCap will return a new capacity for a slice, given the following: +// - oldCap: current capacity +// - unit: in-memory size of an element +// - num: number of elements to add +func growCap(oldCap, unit, num int) (newCap int) { + // appendslice logic (if cap < 1024, *2, else *1.25): + // leads to many copy calls, especially when copying bytes. + // bytes.Buffer model (2*cap + n): much better for bytes. + // smarter way is to take the byte-size of the appended element(type) into account + + // maintain 3 thresholds: + // t1: if cap <= t1, newcap = 2x + // t2: if cap <= t2, newcap = 1.75x + // t3: if cap <= t3, newcap = 1.5x + // else newcap = 1.25x + // + // t1, t2, t3 >= 1024 always. + // i.e. if unit size >= 16, then always do 2x or 1.25x (ie t1, t2, t3 are all same) + // + // With this, appending for bytes increase by: + // 100% up to 4K + // 75% up to 8K + // 50% up to 16K + // 25% beyond that + + // unit can be 0 e.g. for struct{}{}; handle that appropriately + var t1, t2, t3 int // thresholds + if unit <= 1 { + t1, t2, t3 = 4*1024, 8*1024, 16*1024 + } else if unit < 16 { + t3 = 16 / unit * 1024 + t1 = t3 * 1 / 4 + t2 = t3 * 2 / 4 + } else { + t1, t2, t3 = 1024, 1024, 1024 + } + + var x int // temporary variable + + // x is multiplier here: one of 5, 6, 7 or 8; incr of 25%, 50%, 75% or 100% respectively + if oldCap <= t1 { // [0,t1] + x = 8 + } else if oldCap > t3 { // (t3,infinity] + x = 5 + } else if oldCap <= t2 { // (t1,t2] + x = 7 + } else { // (t2,t3] + x = 6 + } + newCap = x * oldCap / 4 + + if num > 0 { + newCap += num + } + + // ensure newCap is a multiple of 64 (if it is > 64) or 16. + if newCap > 64 { + if x = newCap % 64; x != 0 { + x = newCap / 64 + newCap = 64 * (x + 1) + } + } else { + if x = newCap % 16; x != 0 { + x = newCap / 16 + newCap = 16 * (x + 1) + } + } + return +} diff --git a/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go b/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go new file mode 100644 index 0000000000..fd52690c94 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/helper_not_unsafe.go @@ -0,0 +1,272 @@ +// +build !go1.7 safe appengine + +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "reflect" + "sync/atomic" + "time" +) + +const safeMode = true + +// stringView returns a view of the []byte as a string. +// In unsafe mode, it doesn't incur allocation and copying caused by conversion. +// In regular safe mode, it is an allocation and copy. +// +// Usage: Always maintain a reference to v while result of this call is in use, +// and call keepAlive4BytesView(v) at point where done with view. +func stringView(v []byte) string { + return string(v) +} + +// bytesView returns a view of the string as a []byte. +// In unsafe mode, it doesn't incur allocation and copying caused by conversion. +// In regular safe mode, it is an allocation and copy. +// +// Usage: Always maintain a reference to v while result of this call is in use, +// and call keepAlive4BytesView(v) at point where done with view. +func bytesView(v string) []byte { + return []byte(v) +} + +func definitelyNil(v interface{}) bool { + // this is a best-effort option. + // We just return false, so we don't unnecessarily incur the cost of reflection this early. + return false +} + +func rv2i(rv reflect.Value) interface{} { + return rv.Interface() +} + +func rt2id(rt reflect.Type) uintptr { + return reflect.ValueOf(rt).Pointer() +} + +func rv2rtid(rv reflect.Value) uintptr { + return reflect.ValueOf(rv.Type()).Pointer() +} + +func i2rtid(i interface{}) uintptr { + return reflect.ValueOf(reflect.TypeOf(i)).Pointer() +} + +// -------------------------- + +func isEmptyValue(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) bool { + switch v.Kind() { + case reflect.Invalid: + return true + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + if deref { + if v.IsNil() { + return true + } + return isEmptyValue(v.Elem(), tinfos, deref, checkStruct) + } + return v.IsNil() + case reflect.Struct: + return isEmptyStruct(v, tinfos, deref, checkStruct) + } + return false +} + +// -------------------------- +// type ptrToRvMap struct{} + +// func (*ptrToRvMap) init() {} +// func (*ptrToRvMap) get(i interface{}) reflect.Value { +// return reflect.ValueOf(i).Elem() +// } + +// -------------------------- +type atomicTypeInfoSlice struct { // expected to be 2 words + v atomic.Value +} + +func (x *atomicTypeInfoSlice) load() []rtid2ti { + i := x.v.Load() + if i == nil { + return nil + } + return i.([]rtid2ti) +} + +func (x *atomicTypeInfoSlice) store(p []rtid2ti) { + x.v.Store(p) +} + +// -------------------------- +func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) { + rv.SetBytes(d.rawBytes()) +} + +func (d *Decoder) kString(f *codecFnInfo, rv reflect.Value) { + rv.SetString(d.d.DecodeString()) +} + +func (d *Decoder) kBool(f *codecFnInfo, rv reflect.Value) { + rv.SetBool(d.d.DecodeBool()) +} + +func (d *Decoder) kTime(f *codecFnInfo, rv reflect.Value) { + rv.Set(reflect.ValueOf(d.d.DecodeTime())) +} + +func (d *Decoder) kFloat32(f *codecFnInfo, rv reflect.Value) { + fv := d.d.DecodeFloat64() + if chkOvf.Float32(fv) { + d.errorf("float32 overflow: %v", fv) + } + rv.SetFloat(fv) +} + +func (d *Decoder) kFloat64(f *codecFnInfo, rv reflect.Value) { + rv.SetFloat(d.d.DecodeFloat64()) +} + +func (d *Decoder) kInt(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) +} + +func (d *Decoder) kInt8(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), 8)) +} + +func (d *Decoder) kInt16(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), 16)) +} + +func (d *Decoder) kInt32(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(chkOvf.IntV(d.d.DecodeInt64(), 32)) +} + +func (d *Decoder) kInt64(f *codecFnInfo, rv reflect.Value) { + rv.SetInt(d.d.DecodeInt64()) +} + +func (d *Decoder) kUint(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) +} + +func (d *Decoder) kUintptr(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) +} + +func (d *Decoder) kUint8(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), 8)) +} + +func (d *Decoder) kUint16(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), 16)) +} + +func (d *Decoder) kUint32(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(chkOvf.UintV(d.d.DecodeUint64(), 32)) +} + +func (d *Decoder) kUint64(f *codecFnInfo, rv reflect.Value) { + rv.SetUint(d.d.DecodeUint64()) +} + +// ---------------- + +func (e *Encoder) kBool(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeBool(rv.Bool()) +} + +func (e *Encoder) kTime(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeTime(rv2i(rv).(time.Time)) +} + +func (e *Encoder) kString(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeString(cUTF8, rv.String()) +} + +func (e *Encoder) kFloat64(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeFloat64(rv.Float()) +} + +func (e *Encoder) kFloat32(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeFloat32(float32(rv.Float())) +} + +func (e *Encoder) kInt(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeInt(rv.Int()) +} + +func (e *Encoder) kInt8(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeInt(rv.Int()) +} + +func (e *Encoder) kInt16(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeInt(rv.Int()) +} + +func (e *Encoder) kInt32(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeInt(rv.Int()) +} + +func (e *Encoder) kInt64(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeInt(rv.Int()) +} + +func (e *Encoder) kUint(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeUint(rv.Uint()) +} + +func (e *Encoder) kUint8(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeUint(rv.Uint()) +} + +func (e *Encoder) kUint16(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeUint(rv.Uint()) +} + +func (e *Encoder) kUint32(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeUint(rv.Uint()) +} + +func (e *Encoder) kUint64(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeUint(rv.Uint()) +} + +func (e *Encoder) kUintptr(f *codecFnInfo, rv reflect.Value) { + e.e.EncodeUint(rv.Uint()) +} + +// // keepAlive4BytesView maintains a reference to the input parameter for bytesView. +// // +// // Usage: call this at point where done with the bytes view. +// func keepAlive4BytesView(v string) {} + +// // keepAlive4BytesView maintains a reference to the input parameter for stringView. +// // +// // Usage: call this at point where done with the string view. +// func keepAlive4StringView(v []byte) {} + +// func definitelyNil(v interface{}) bool { +// rv := reflect.ValueOf(v) +// switch rv.Kind() { +// case reflect.Invalid: +// return true +// case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Slice, reflect.Map, reflect.Func: +// return rv.IsNil() +// default: +// return false +// } +// } diff --git a/vendor/github.com/ugorji/go/codec/helper_unsafe.go b/vendor/github.com/ugorji/go/codec/helper_unsafe.go new file mode 100644 index 0000000000..e3df60abea --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/helper_unsafe.go @@ -0,0 +1,639 @@ +// +build !safe +// +build !appengine +// +build go1.7 + +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "reflect" + "sync/atomic" + "time" + "unsafe" +) + +// This file has unsafe variants of some helper methods. +// NOTE: See helper_not_unsafe.go for the usage information. + +// var zeroRTv [4]uintptr + +const safeMode = false +const unsafeFlagIndir = 1 << 7 // keep in sync with GO_ROOT/src/reflect/value.go + +type unsafeString struct { + Data unsafe.Pointer + Len int +} + +type unsafeSlice struct { + Data unsafe.Pointer + Len int + Cap int +} + +type unsafeIntf struct { + typ unsafe.Pointer + word unsafe.Pointer +} + +type unsafeReflectValue struct { + typ unsafe.Pointer + ptr unsafe.Pointer + flag uintptr +} + +func stringView(v []byte) string { + if len(v) == 0 { + return "" + } + bx := (*unsafeSlice)(unsafe.Pointer(&v)) + return *(*string)(unsafe.Pointer(&unsafeString{bx.Data, bx.Len})) +} + +func bytesView(v string) []byte { + if len(v) == 0 { + return zeroByteSlice + } + sx := (*unsafeString)(unsafe.Pointer(&v)) + return *(*[]byte)(unsafe.Pointer(&unsafeSlice{sx.Data, sx.Len, sx.Len})) +} + +func definitelyNil(v interface{}) bool { + // There is no global way of checking if an interface is nil. + // For true references (map, ptr, func, chan), you can just look + // at the word of the interface. However, for slices, you have to dereference + // the word, and get a pointer to the 3-word interface value. + // + // However, the following are cheap calls + // - TypeOf(interface): cheap 2-line call. + // - ValueOf(interface{}): expensive + // - type.Kind: cheap call through an interface + // - Value.Type(): cheap call + // except it's a method value (e.g. r.Read, which implies that it is a Func) + + return ((*unsafeIntf)(unsafe.Pointer(&v))).word == nil +} + +func rv2i(rv reflect.Value) interface{} { + // TODO: consider a more generally-known optimization for reflect.Value ==> Interface + // + // Currently, we use this fragile method that taps into implememtation details from + // the source go stdlib reflect/value.go, and trims the implementation. + + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + // true references (map, func, chan, ptr - NOT slice) may be double-referenced as flagIndir + var ptr unsafe.Pointer + if refBitset.isset(byte(urv.flag&(1<<5-1))) && urv.flag&unsafeFlagIndir != 0 { + ptr = *(*unsafe.Pointer)(urv.ptr) + } else { + ptr = urv.ptr + } + return *(*interface{})(unsafe.Pointer(&unsafeIntf{typ: urv.typ, word: ptr})) +} + +func rt2id(rt reflect.Type) uintptr { + return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).word) +} + +func rv2rtid(rv reflect.Value) uintptr { + return uintptr((*unsafeReflectValue)(unsafe.Pointer(&rv)).typ) +} + +func i2rtid(i interface{}) uintptr { + return uintptr(((*unsafeIntf)(unsafe.Pointer(&i))).typ) +} + +// -------------------------- + +func isEmptyValue(v reflect.Value, tinfos *TypeInfos, deref, checkStruct bool) bool { + urv := (*unsafeReflectValue)(unsafe.Pointer(&v)) + if urv.flag == 0 { + return true + } + switch v.Kind() { + case reflect.Invalid: + return true + case reflect.String: + return (*unsafeString)(urv.ptr).Len == 0 + case reflect.Slice: + return (*unsafeSlice)(urv.ptr).Len == 0 + case reflect.Bool: + return !*(*bool)(urv.ptr) + case reflect.Int: + return *(*int)(urv.ptr) == 0 + case reflect.Int8: + return *(*int8)(urv.ptr) == 0 + case reflect.Int16: + return *(*int16)(urv.ptr) == 0 + case reflect.Int32: + return *(*int32)(urv.ptr) == 0 + case reflect.Int64: + return *(*int64)(urv.ptr) == 0 + case reflect.Uint: + return *(*uint)(urv.ptr) == 0 + case reflect.Uint8: + return *(*uint8)(urv.ptr) == 0 + case reflect.Uint16: + return *(*uint16)(urv.ptr) == 0 + case reflect.Uint32: + return *(*uint32)(urv.ptr) == 0 + case reflect.Uint64: + return *(*uint64)(urv.ptr) == 0 + case reflect.Uintptr: + return *(*uintptr)(urv.ptr) == 0 + case reflect.Float32: + return *(*float32)(urv.ptr) == 0 + case reflect.Float64: + return *(*float64)(urv.ptr) == 0 + case reflect.Interface: + isnil := urv.ptr == nil || *(*unsafe.Pointer)(urv.ptr) == nil + if deref { + if isnil { + return true + } + return isEmptyValue(v.Elem(), tinfos, deref, checkStruct) + } + return isnil + case reflect.Ptr: + // isnil := urv.ptr == nil (not sufficient, as a pointer value encodes the type) + isnil := urv.ptr == nil || *(*unsafe.Pointer)(urv.ptr) == nil + if deref { + if isnil { + return true + } + return isEmptyValue(v.Elem(), tinfos, deref, checkStruct) + } + return isnil + case reflect.Struct: + return isEmptyStruct(v, tinfos, deref, checkStruct) + case reflect.Map, reflect.Array, reflect.Chan: + return v.Len() == 0 + } + return false +} + +// -------------------------- + +// atomicTypeInfoSlice contains length and pointer to the array for a slice. +// It is expected to be 2 words. +// +// Previously, we atomically loaded and stored the length and array pointer separately, +// which could lead to some races. +// We now just atomically store and load the pointer to the value directly. + +type atomicTypeInfoSlice struct { // expected to be 2 words + l int // length of the data array (must be first in struct, for 64-bit alignment necessary for 386) + v unsafe.Pointer // data array - Pointer (not uintptr) to maintain GC reference +} + +func (x *atomicTypeInfoSlice) load() []rtid2ti { + xp := unsafe.Pointer(x) + x2 := *(*atomicTypeInfoSlice)(atomic.LoadPointer(&xp)) + if x2.l == 0 { + return nil + } + return *(*[]rtid2ti)(unsafe.Pointer(&unsafeSlice{Data: x2.v, Len: x2.l, Cap: x2.l})) +} + +func (x *atomicTypeInfoSlice) store(p []rtid2ti) { + s := (*unsafeSlice)(unsafe.Pointer(&p)) + xp := unsafe.Pointer(x) + atomic.StorePointer(&xp, unsafe.Pointer(&atomicTypeInfoSlice{l: s.Len, v: s.Data})) +} + +// -------------------------- +func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*[]byte)(urv.ptr) = d.rawBytes() +} + +func (d *Decoder) kString(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*string)(urv.ptr) = d.d.DecodeString() +} + +func (d *Decoder) kBool(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*bool)(urv.ptr) = d.d.DecodeBool() +} + +func (d *Decoder) kTime(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*time.Time)(urv.ptr) = d.d.DecodeTime() +} + +func (d *Decoder) kFloat32(f *codecFnInfo, rv reflect.Value) { + fv := d.d.DecodeFloat64() + if chkOvf.Float32(fv) { + d.errorf("float32 overflow: %v", fv) + } + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*float32)(urv.ptr) = float32(fv) +} + +func (d *Decoder) kFloat64(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*float64)(urv.ptr) = d.d.DecodeFloat64() +} + +func (d *Decoder) kInt(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*int)(urv.ptr) = int(chkOvf.IntV(d.d.DecodeInt64(), intBitsize)) +} + +func (d *Decoder) kInt8(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*int8)(urv.ptr) = int8(chkOvf.IntV(d.d.DecodeInt64(), 8)) +} + +func (d *Decoder) kInt16(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*int16)(urv.ptr) = int16(chkOvf.IntV(d.d.DecodeInt64(), 16)) +} + +func (d *Decoder) kInt32(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*int32)(urv.ptr) = int32(chkOvf.IntV(d.d.DecodeInt64(), 32)) +} + +func (d *Decoder) kInt64(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*int64)(urv.ptr) = d.d.DecodeInt64() +} + +func (d *Decoder) kUint(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*uint)(urv.ptr) = uint(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) +} + +func (d *Decoder) kUintptr(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*uintptr)(urv.ptr) = uintptr(chkOvf.UintV(d.d.DecodeUint64(), uintBitsize)) +} + +func (d *Decoder) kUint8(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*uint8)(urv.ptr) = uint8(chkOvf.UintV(d.d.DecodeUint64(), 8)) +} + +func (d *Decoder) kUint16(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*uint16)(urv.ptr) = uint16(chkOvf.UintV(d.d.DecodeUint64(), 16)) +} + +func (d *Decoder) kUint32(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*uint32)(urv.ptr) = uint32(chkOvf.UintV(d.d.DecodeUint64(), 32)) +} + +func (d *Decoder) kUint64(f *codecFnInfo, rv reflect.Value) { + urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + *(*uint64)(urv.ptr) = d.d.DecodeUint64() +} + +// ------------ + +func (e *Encoder) kBool(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeBool(*(*bool)(v.ptr)) +} + +func (e *Encoder) kTime(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeTime(*(*time.Time)(v.ptr)) +} + +func (e *Encoder) kString(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeString(cUTF8, *(*string)(v.ptr)) +} + +func (e *Encoder) kFloat64(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeFloat64(*(*float64)(v.ptr)) +} + +func (e *Encoder) kFloat32(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeFloat32(*(*float32)(v.ptr)) +} + +func (e *Encoder) kInt(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeInt(int64(*(*int)(v.ptr))) +} + +func (e *Encoder) kInt8(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeInt(int64(*(*int8)(v.ptr))) +} + +func (e *Encoder) kInt16(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeInt(int64(*(*int16)(v.ptr))) +} + +func (e *Encoder) kInt32(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeInt(int64(*(*int32)(v.ptr))) +} + +func (e *Encoder) kInt64(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeInt(int64(*(*int64)(v.ptr))) +} + +func (e *Encoder) kUint(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeUint(uint64(*(*uint)(v.ptr))) +} + +func (e *Encoder) kUint8(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeUint(uint64(*(*uint8)(v.ptr))) +} + +func (e *Encoder) kUint16(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeUint(uint64(*(*uint16)(v.ptr))) +} + +func (e *Encoder) kUint32(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeUint(uint64(*(*uint32)(v.ptr))) +} + +func (e *Encoder) kUint64(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeUint(uint64(*(*uint64)(v.ptr))) +} + +func (e *Encoder) kUintptr(f *codecFnInfo, rv reflect.Value) { + v := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + e.e.EncodeUint(uint64(*(*uintptr)(v.ptr))) +} + +// ------------ + +// func (d *Decoder) raw(f *codecFnInfo, rv reflect.Value) { +// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) +// // if urv.flag&unsafeFlagIndir != 0 { +// // urv.ptr = *(*unsafe.Pointer)(urv.ptr) +// // } +// *(*[]byte)(urv.ptr) = d.rawBytes() +// } + +// func rv0t(rt reflect.Type) reflect.Value { +// ut := (*unsafeIntf)(unsafe.Pointer(&rt)) +// // we need to determine whether ifaceIndir, and then whether to just pass 0 as the ptr +// uv := unsafeReflectValue{ut.word, &zeroRTv, flag(rt.Kind())} +// return *(*reflect.Value)(unsafe.Pointer(&uv}) +// } + +// func rv2i(rv reflect.Value) interface{} { +// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) +// // true references (map, func, chan, ptr - NOT slice) may be double-referenced as flagIndir +// var ptr unsafe.Pointer +// // kk := reflect.Kind(urv.flag & (1<<5 - 1)) +// // if (kk == reflect.Map || kk == reflect.Ptr || kk == reflect.Chan || kk == reflect.Func) && urv.flag&unsafeFlagIndir != 0 { +// if refBitset.isset(byte(urv.flag&(1<<5-1))) && urv.flag&unsafeFlagIndir != 0 { +// ptr = *(*unsafe.Pointer)(urv.ptr) +// } else { +// ptr = urv.ptr +// } +// return *(*interface{})(unsafe.Pointer(&unsafeIntf{typ: urv.typ, word: ptr})) +// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) +// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) +// } + +// func definitelyNil(v interface{}) bool { +// var ui *unsafeIntf = (*unsafeIntf)(unsafe.Pointer(&v)) +// if ui.word == nil { +// return true +// } +// var tk = reflect.TypeOf(v).Kind() +// return (tk == reflect.Interface || tk == reflect.Slice) && *(*unsafe.Pointer)(ui.word) == nil +// fmt.Printf(">>>> definitely nil: isnil: %v, TYPE: \t%T, word: %v, *word: %v, type: %v, nil: %v\n", +// v == nil, v, word, *((*unsafe.Pointer)(word)), ui.typ, nil) +// } + +// func keepAlive4BytesView(v string) { +// runtime.KeepAlive(v) +// } + +// func keepAlive4StringView(v []byte) { +// runtime.KeepAlive(v) +// } + +// func rt2id(rt reflect.Type) uintptr { +// return uintptr(((*unsafeIntf)(unsafe.Pointer(&rt))).word) +// // var i interface{} = rt +// // // ui := (*unsafeIntf)(unsafe.Pointer(&i)) +// // return ((*unsafeIntf)(unsafe.Pointer(&i))).word +// } + +// func rv2i(rv reflect.Value) interface{} { +// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) +// // non-reference type: already indir +// // reference type: depend on flagIndir property ('cos maybe was double-referenced) +// // const (unsafeRvFlagKindMask = 1<<5 - 1 , unsafeRvFlagIndir = 1 << 7 ) +// // rvk := reflect.Kind(urv.flag & (1<<5 - 1)) +// // if (rvk == reflect.Chan || +// // rvk == reflect.Func || +// // rvk == reflect.Interface || +// // rvk == reflect.Map || +// // rvk == reflect.Ptr || +// // rvk == reflect.UnsafePointer) && urv.flag&(1<<8) != 0 { +// // fmt.Printf(">>>>> ---- double indirect reference: %v, %v\n", rvk, rv.Type()) +// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) +// // } +// if urv.flag&(1<<5-1) == uintptr(reflect.Map) && urv.flag&(1<<7) != 0 { +// // fmt.Printf(">>>>> ---- double indirect reference: %v, %v\n", rvk, rv.Type()) +// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) +// } +// // fmt.Printf(">>>>> ++++ direct reference: %v, %v\n", rvk, rv.Type()) +// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) +// } + +// const ( +// unsafeRvFlagKindMask = 1<<5 - 1 +// unsafeRvKindDirectIface = 1 << 5 +// unsafeRvFlagIndir = 1 << 7 +// unsafeRvFlagAddr = 1 << 8 +// unsafeRvFlagMethod = 1 << 9 + +// _USE_RV_INTERFACE bool = false +// _UNSAFE_RV_DEBUG = true +// ) + +// type unsafeRtype struct { +// _ [2]uintptr +// _ uint32 +// _ uint8 +// _ uint8 +// _ uint8 +// kind uint8 +// _ [2]uintptr +// _ int32 +// } + +// func _rv2i(rv reflect.Value) interface{} { +// // Note: From use, +// // - it's never an interface +// // - the only calls here are for ifaceIndir types. +// // (though that conditional is wrong) +// // To know for sure, we need the value of t.kind (which is not exposed). +// // +// // Need to validate the path: type is indirect ==> only value is indirect ==> default (value is direct) +// // - Type indirect, Value indirect: ==> numbers, boolean, slice, struct, array, string +// // - Type Direct, Value indirect: ==> map??? +// // - Type Direct, Value direct: ==> pointers, unsafe.Pointer, func, chan, map +// // +// // TRANSLATES TO: +// // if typeIndirect { } else if valueIndirect { } else { } +// // +// // Since we don't deal with funcs, then "flagNethod" is unset, and can be ignored. + +// if _USE_RV_INTERFACE { +// return rv.Interface() +// } +// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) + +// // if urv.flag&unsafeRvFlagMethod != 0 || urv.flag&unsafeRvFlagKindMask == uintptr(reflect.Interface) { +// // println("***** IS flag method or interface: delegating to rv.Interface()") +// // return rv.Interface() +// // } + +// // if urv.flag&unsafeRvFlagKindMask == uintptr(reflect.Interface) { +// // println("***** IS Interface: delegate to rv.Interface") +// // return rv.Interface() +// // } +// // if urv.flag&unsafeRvFlagKindMask&unsafeRvKindDirectIface == 0 { +// // if urv.flag&unsafeRvFlagAddr == 0 { +// // println("***** IS ifaceIndir typ") +// // // ui := unsafeIntf{word: urv.ptr, typ: urv.typ} +// // // return *(*interface{})(unsafe.Pointer(&ui)) +// // // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) +// // } +// // } else if urv.flag&unsafeRvFlagIndir != 0 { +// // println("***** IS flagindir") +// // // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) +// // } else { +// // println("***** NOT flagindir") +// // return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) +// // } +// // println("***** default: delegate to rv.Interface") + +// urt := (*unsafeRtype)(unsafe.Pointer(urv.typ)) +// if _UNSAFE_RV_DEBUG { +// fmt.Printf(">>>> start: %v: ", rv.Type()) +// fmt.Printf("%v - %v\n", *urv, *urt) +// } +// if urt.kind&unsafeRvKindDirectIface == 0 { +// if _UNSAFE_RV_DEBUG { +// fmt.Printf("**** +ifaceIndir type: %v\n", rv.Type()) +// } +// // println("***** IS ifaceIndir typ") +// // if true || urv.flag&unsafeRvFlagAddr == 0 { +// // // println(" ***** IS NOT addr") +// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) +// // } +// } else if urv.flag&unsafeRvFlagIndir != 0 { +// if _UNSAFE_RV_DEBUG { +// fmt.Printf("**** +flagIndir type: %v\n", rv.Type()) +// } +// // println("***** IS flagindir") +// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: *(*unsafe.Pointer)(urv.ptr), typ: urv.typ})) +// } else { +// if _UNSAFE_RV_DEBUG { +// fmt.Printf("**** -flagIndir type: %v\n", rv.Type()) +// } +// // println("***** NOT flagindir") +// return *(*interface{})(unsafe.Pointer(&unsafeIntf{word: urv.ptr, typ: urv.typ})) +// } +// // println("***** default: delegating to rv.Interface()") +// // return rv.Interface() +// } + +// var staticM0 = make(map[string]uint64) +// var staticI0 = (int32)(-5) + +// func staticRv2iTest() { +// i0 := (int32)(-5) +// m0 := make(map[string]uint16) +// m0["1"] = 1 +// for _, i := range []interface{}{ +// (int)(7), +// (uint)(8), +// (int16)(-9), +// (uint16)(19), +// (uintptr)(77), +// (bool)(true), +// float32(-32.7), +// float64(64.9), +// complex(float32(19), 5), +// complex(float64(-32), 7), +// [4]uint64{1, 2, 3, 4}, +// (chan<- int)(nil), // chan, +// rv2i, // func +// io.Writer(ioutil.Discard), +// make(map[string]uint), +// (map[string]uint)(nil), +// staticM0, +// m0, +// &m0, +// i0, +// &i0, +// &staticI0, +// &staticM0, +// []uint32{6, 7, 8}, +// "abc", +// Raw{}, +// RawExt{}, +// &Raw{}, +// &RawExt{}, +// unsafe.Pointer(&i0), +// } { +// i2 := rv2i(reflect.ValueOf(i)) +// eq := reflect.DeepEqual(i, i2) +// fmt.Printf(">>>> %v == %v? %v\n", i, i2, eq) +// } +// // os.Exit(0) +// } + +// func init() { +// staticRv2iTest() +// } + +// func rv2i(rv reflect.Value) interface{} { +// if _USE_RV_INTERFACE || rv.Kind() == reflect.Interface || rv.CanAddr() { +// return rv.Interface() +// } +// // var i interface{} +// // ui := (*unsafeIntf)(unsafe.Pointer(&i)) +// var ui unsafeIntf +// urv := (*unsafeReflectValue)(unsafe.Pointer(&rv)) +// // fmt.Printf("urv: flag: %b, typ: %b, ptr: %b\n", urv.flag, uintptr(urv.typ), uintptr(urv.ptr)) +// if (urv.flag&unsafeRvFlagKindMask)&unsafeRvKindDirectIface == 0 { +// if urv.flag&unsafeRvFlagAddr != 0 { +// println("***** indirect and addressable! Needs typed move - delegate to rv.Interface()") +// return rv.Interface() +// } +// println("****** indirect type/kind") +// ui.word = urv.ptr +// } else if urv.flag&unsafeRvFlagIndir != 0 { +// println("****** unsafe rv flag indir") +// ui.word = *(*unsafe.Pointer)(urv.ptr) +// } else { +// println("****** default: assign prt to word directly") +// ui.word = urv.ptr +// } +// // ui.word = urv.ptr +// ui.typ = urv.typ +// // fmt.Printf("(pointers) ui.typ: %p, word: %p\n", ui.typ, ui.word) +// // fmt.Printf("(binary) ui.typ: %b, word: %b\n", uintptr(ui.typ), uintptr(ui.word)) +// return *(*interface{})(unsafe.Pointer(&ui)) +// // return i +// } diff --git a/vendor/github.com/ugorji/go/codec/json.go b/vendor/github.com/ugorji/go/codec/json.go new file mode 100644 index 0000000000..bdd1996639 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/json.go @@ -0,0 +1,1423 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +// By default, this json support uses base64 encoding for bytes, because you cannot +// store and read any arbitrary string in json (only unicode). +// However, the user can configre how to encode/decode bytes. +// +// This library specifically supports UTF-8 for encoding and decoding only. +// +// Note that the library will happily encode/decode things which are not valid +// json e.g. a map[int64]string. We do it for consistency. With valid json, +// we will encode and decode appropriately. +// Users can specify their map type if necessary to force it. +// +// Note: +// - we cannot use strconv.Quote and strconv.Unquote because json quotes/unquotes differently. +// We implement it here. + +// Top-level methods of json(End|Dec)Driver (which are implementations of (en|de)cDriver +// MUST not call one-another. + +import ( + "bytes" + "encoding/base64" + "math" + "reflect" + "strconv" + "time" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +//-------------------------------- + +var jsonLiterals = [...]byte{ + '"', 't', 'r', 'u', 'e', '"', + '"', 'f', 'a', 'l', 's', 'e', '"', + '"', 'n', 'u', 'l', 'l', '"', +} + +const ( + jsonLitTrueQ = 0 + jsonLitTrue = 1 + jsonLitFalseQ = 6 + jsonLitFalse = 7 + jsonLitNullQ = 13 + jsonLitNull = 14 +) + +const ( + jsonU4Chk2 = '0' + jsonU4Chk1 = 'a' - 10 + jsonU4Chk0 = 'A' - 10 + + jsonScratchArrayLen = 64 +) + +const ( + // If !jsonValidateSymbols, decoding will be faster, by skipping some checks: + // - If we see first character of null, false or true, + // do not validate subsequent characters. + // - e.g. if we see a n, assume null and skip next 3 characters, + // and do not validate they are ull. + // P.S. Do not expect a significant decoding boost from this. + jsonValidateSymbols = true + + jsonSpacesOrTabsLen = 128 + + jsonAlwaysReturnInternString = false +) + +var ( + // jsonTabs and jsonSpaces are used as caches for indents + jsonTabs, jsonSpaces [jsonSpacesOrTabsLen]byte + + jsonCharHtmlSafeSet bitset128 + jsonCharSafeSet bitset128 + jsonCharWhitespaceSet bitset256 + jsonNumSet bitset256 +) + +func init() { + for i := 0; i < jsonSpacesOrTabsLen; i++ { + jsonSpaces[i] = ' ' + jsonTabs[i] = '\t' + } + + // populate the safe values as true: note: ASCII control characters are (0-31) + // jsonCharSafeSet: all true except (0-31) " \ + // jsonCharHtmlSafeSet: all true except (0-31) " \ < > & + var i byte + for i = 32; i < utf8.RuneSelf; i++ { + switch i { + case '"', '\\': + case '<', '>', '&': + jsonCharSafeSet.set(i) // = true + default: + jsonCharSafeSet.set(i) + jsonCharHtmlSafeSet.set(i) + } + } + for i = 0; i <= utf8.RuneSelf; i++ { + switch i { + case ' ', '\t', '\r', '\n': + jsonCharWhitespaceSet.set(i) + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'e', 'E', '.', '+', '-': + jsonNumSet.set(i) + } + } +} + +// ---------------- + +type jsonEncDriverTypical struct { + w encWriter + // w *encWriterSwitch + b *[jsonScratchArrayLen]byte + tw bool // term white space + c containerState +} + +func (e *jsonEncDriverTypical) typical() {} + +func (e *jsonEncDriverTypical) reset(ee *jsonEncDriver) { + e.w = ee.ew + // e.w = &ee.e.encWriterSwitch + e.b = &ee.b + e.tw = ee.h.TermWhitespace + e.c = 0 +} + +func (e *jsonEncDriverTypical) WriteArrayStart(length int) { + e.w.writen1('[') + e.c = containerArrayStart +} + +func (e *jsonEncDriverTypical) WriteArrayElem() { + if e.c != containerArrayStart { + e.w.writen1(',') + } + e.c = containerArrayElem +} + +func (e *jsonEncDriverTypical) WriteArrayEnd() { + e.w.writen1(']') + e.c = containerArrayEnd +} + +func (e *jsonEncDriverTypical) WriteMapStart(length int) { + e.w.writen1('{') + e.c = containerMapStart +} + +func (e *jsonEncDriverTypical) WriteMapElemKey() { + if e.c != containerMapStart { + e.w.writen1(',') + } + e.c = containerMapKey +} + +func (e *jsonEncDriverTypical) WriteMapElemValue() { + e.w.writen1(':') + e.c = containerMapValue +} + +func (e *jsonEncDriverTypical) WriteMapEnd() { + e.w.writen1('}') + e.c = containerMapEnd +} + +func (e *jsonEncDriverTypical) EncodeBool(b bool) { + if b { + e.w.writeb(jsonLiterals[jsonLitTrue : jsonLitTrue+4]) + } else { + e.w.writeb(jsonLiterals[jsonLitFalse : jsonLitFalse+5]) + } +} + +func (e *jsonEncDriverTypical) EncodeFloat64(f float64) { + fmt, prec := jsonFloatStrconvFmtPrec(f) + e.w.writeb(strconv.AppendFloat(e.b[:0], f, fmt, prec, 64)) +} + +func (e *jsonEncDriverTypical) EncodeInt(v int64) { + e.w.writeb(strconv.AppendInt(e.b[:0], v, 10)) +} + +func (e *jsonEncDriverTypical) EncodeUint(v uint64) { + e.w.writeb(strconv.AppendUint(e.b[:0], v, 10)) +} + +func (e *jsonEncDriverTypical) EncodeFloat32(f float32) { + e.EncodeFloat64(float64(f)) +} + +func (e *jsonEncDriverTypical) atEndOfEncode() { + if e.tw { + e.w.writen1(' ') + } +} + +// ---------------- + +type jsonEncDriverGeneric struct { + w encWriter // encWriter // *encWriterSwitch + b *[jsonScratchArrayLen]byte + c containerState + // ds string // indent string + di int8 // indent per + d bool // indenting? + dt bool // indent using tabs + dl uint16 // indent level + ks bool // map key as string + is byte // integer as string + tw bool // term white space + _ [7]byte // padding +} + +// indent is done as below: +// - newline and indent are added before each mapKey or arrayElem +// - newline and indent are added before each ending, +// except there was no entry (so we can have {} or []) + +func (e *jsonEncDriverGeneric) reset(ee *jsonEncDriver) { + e.w = ee.ew + e.b = &ee.b + e.tw = ee.h.TermWhitespace + e.c = 0 + e.d, e.dt, e.dl, e.di = false, false, 0, 0 + h := ee.h + if h.Indent > 0 { + e.d = true + e.di = int8(h.Indent) + } else if h.Indent < 0 { + e.d = true + e.dt = true + e.di = int8(-h.Indent) + } + e.ks = h.MapKeyAsString + e.is = h.IntegerAsString +} + +func (e *jsonEncDriverGeneric) WriteArrayStart(length int) { + if e.d { + e.dl++ + } + e.w.writen1('[') + e.c = containerArrayStart +} + +func (e *jsonEncDriverGeneric) WriteArrayElem() { + if e.c != containerArrayStart { + e.w.writen1(',') + } + if e.d { + e.writeIndent() + } + e.c = containerArrayElem +} + +func (e *jsonEncDriverGeneric) WriteArrayEnd() { + if e.d { + e.dl-- + if e.c != containerArrayStart { + e.writeIndent() + } + } + e.w.writen1(']') + e.c = containerArrayEnd +} + +func (e *jsonEncDriverGeneric) WriteMapStart(length int) { + if e.d { + e.dl++ + } + e.w.writen1('{') + e.c = containerMapStart +} + +func (e *jsonEncDriverGeneric) WriteMapElemKey() { + if e.c != containerMapStart { + e.w.writen1(',') + } + if e.d { + e.writeIndent() + } + e.c = containerMapKey +} + +func (e *jsonEncDriverGeneric) WriteMapElemValue() { + if e.d { + e.w.writen2(':', ' ') + } else { + e.w.writen1(':') + } + e.c = containerMapValue +} + +func (e *jsonEncDriverGeneric) WriteMapEnd() { + if e.d { + e.dl-- + if e.c != containerMapStart { + e.writeIndent() + } + } + e.w.writen1('}') + e.c = containerMapEnd +} + +func (e *jsonEncDriverGeneric) writeIndent() { + e.w.writen1('\n') + x := int(e.di) * int(e.dl) + if e.dt { + for x > jsonSpacesOrTabsLen { + e.w.writeb(jsonTabs[:]) + x -= jsonSpacesOrTabsLen + } + e.w.writeb(jsonTabs[:x]) + } else { + for x > jsonSpacesOrTabsLen { + e.w.writeb(jsonSpaces[:]) + x -= jsonSpacesOrTabsLen + } + e.w.writeb(jsonSpaces[:x]) + } +} + +func (e *jsonEncDriverGeneric) EncodeBool(b bool) { + if e.ks && e.c == containerMapKey { + if b { + e.w.writeb(jsonLiterals[jsonLitTrueQ : jsonLitTrueQ+6]) + } else { + e.w.writeb(jsonLiterals[jsonLitFalseQ : jsonLitFalseQ+7]) + } + } else { + if b { + e.w.writeb(jsonLiterals[jsonLitTrue : jsonLitTrue+4]) + } else { + e.w.writeb(jsonLiterals[jsonLitFalse : jsonLitFalse+5]) + } + } +} + +func (e *jsonEncDriverGeneric) EncodeFloat64(f float64) { + // instead of using 'g', specify whether to use 'e' or 'f' + fmt, prec := jsonFloatStrconvFmtPrec(f) + + var blen int + if e.ks && e.c == containerMapKey { + blen = 2 + len(strconv.AppendFloat(e.b[1:1], f, fmt, prec, 64)) + e.b[0] = '"' + e.b[blen-1] = '"' + } else { + blen = len(strconv.AppendFloat(e.b[:0], f, fmt, prec, 64)) + } + e.w.writeb(e.b[:blen]) +} + +func (e *jsonEncDriverGeneric) EncodeInt(v int64) { + x := e.is + if x == 'A' || x == 'L' && (v > 1<<53 || v < -(1<<53)) || (e.ks && e.c == containerMapKey) { + blen := 2 + len(strconv.AppendInt(e.b[1:1], v, 10)) + e.b[0] = '"' + e.b[blen-1] = '"' + e.w.writeb(e.b[:blen]) + return + } + e.w.writeb(strconv.AppendInt(e.b[:0], v, 10)) +} + +func (e *jsonEncDriverGeneric) EncodeUint(v uint64) { + x := e.is + if x == 'A' || x == 'L' && v > 1<<53 || (e.ks && e.c == containerMapKey) { + blen := 2 + len(strconv.AppendUint(e.b[1:1], v, 10)) + e.b[0] = '"' + e.b[blen-1] = '"' + e.w.writeb(e.b[:blen]) + return + } + e.w.writeb(strconv.AppendUint(e.b[:0], v, 10)) +} + +func (e *jsonEncDriverGeneric) EncodeFloat32(f float32) { + // e.encodeFloat(float64(f), 32) + // always encode all floats as IEEE 64-bit floating point. + // It also ensures that we can decode in full precision even if into a float32, + // as what is written is always to float64 precision. + e.EncodeFloat64(float64(f)) +} + +func (e *jsonEncDriverGeneric) atEndOfEncode() { + if e.tw { + if e.d { + e.w.writen1('\n') + } else { + e.w.writen1(' ') + } + } +} + +// -------------------- + +type jsonEncDriver struct { + noBuiltInTypes + e *Encoder + h *JsonHandle + ew encWriter // encWriter // *encWriterSwitch + se extWrapper + // ---- cpu cache line boundary? + bs []byte // scratch + // ---- cpu cache line boundary? + b [jsonScratchArrayLen]byte // scratch (encode time, +} + +func (e *jsonEncDriver) EncodeNil() { + // We always encode nil as just null (never in quotes) + // This allows us to easily decode if a nil in the json stream + // ie if initial token is n. + e.ew.writeb(jsonLiterals[jsonLitNull : jsonLitNull+4]) + + // if e.h.MapKeyAsString && e.c == containerMapKey { + // e.ew.writeb(jsonLiterals[jsonLitNullQ : jsonLitNullQ+6]) + // } else { + // e.ew.writeb(jsonLiterals[jsonLitNull : jsonLitNull+4]) + // } +} + +func (e *jsonEncDriver) EncodeTime(t time.Time) { + // Do NOT use MarshalJSON, as it allocates internally. + // instead, we call AppendFormat directly, using our scratch buffer (e.b) + if t.IsZero() { + e.EncodeNil() + } else { + e.b[0] = '"' + b := t.AppendFormat(e.b[1:1], time.RFC3339Nano) + e.b[len(b)+1] = '"' + e.ew.writeb(e.b[:len(b)+2]) + } + // v, err := t.MarshalJSON(); if err != nil { e.e.error(err) } e.ew.writeb(v) +} + +func (e *jsonEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, en *Encoder) { + if v := ext.ConvertExt(rv); v == nil { + e.EncodeNil() + } else { + en.encode(v) + } +} + +func (e *jsonEncDriver) EncodeRawExt(re *RawExt, en *Encoder) { + // only encodes re.Value (never re.Data) + if re.Value == nil { + e.EncodeNil() + } else { + en.encode(re.Value) + } +} + +func (e *jsonEncDriver) EncodeString(c charEncoding, v string) { + e.quoteStr(v) +} + +func (e *jsonEncDriver) EncodeStringBytes(c charEncoding, v []byte) { + // if encoding raw bytes and RawBytesExt is configured, use it to encode + if v == nil { + e.EncodeNil() + return + } + if c == cRAW { + if e.se.InterfaceExt != nil { + e.EncodeExt(v, 0, &e.se, e.e) + return + } + + slen := base64.StdEncoding.EncodedLen(len(v)) + if cap(e.bs) >= slen+2 { + e.bs = e.bs[:slen+2] + } else { + e.bs = make([]byte, slen+2) + } + e.bs[0] = '"' + base64.StdEncoding.Encode(e.bs[1:], v) + e.bs[slen+1] = '"' + e.ew.writeb(e.bs) + } else { + e.quoteStr(stringView(v)) + } +} + +func (e *jsonEncDriver) EncodeAsis(v []byte) { + e.ew.writeb(v) +} + +func (e *jsonEncDriver) quoteStr(s string) { + // adapted from std pkg encoding/json + const hex = "0123456789abcdef" + w := e.ew + htmlasis := e.h.HTMLCharsAsIs + w.writen1('"') + var start int + for i, slen := 0, len(s); i < slen; { + // encode all bytes < 0x20 (except \r, \n). + // also encode < > & to prevent security holes when served to some browsers. + if b := s[i]; b < utf8.RuneSelf { + // if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + // if (htmlasis && jsonCharSafeSet.isset(b)) || jsonCharHtmlSafeSet.isset(b) { + if jsonCharHtmlSafeSet.isset(b) || (htmlasis && jsonCharSafeSet.isset(b)) { + i++ + continue + } + if start < i { + w.writestr(s[start:i]) + } + switch b { + case '\\', '"': + w.writen2('\\', b) + case '\n': + w.writen2('\\', 'n') + case '\r': + w.writen2('\\', 'r') + case '\b': + w.writen2('\\', 'b') + case '\f': + w.writen2('\\', 'f') + case '\t': + w.writen2('\\', 't') + default: + w.writestr(`\u00`) + w.writen2(hex[b>>4], hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRuneInString(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + w.writestr(s[start:i]) + } + w.writestr(`\ufffd`) + i += size + start = i + continue + } + // U+2028 is LINE SEPARATOR. U+2029 is PARAGRAPH SEPARATOR. + // Both technically valid JSON, but bomb on JSONP, so fix here unconditionally. + if c == '\u2028' || c == '\u2029' { + if start < i { + w.writestr(s[start:i]) + } + w.writestr(`\u202`) + w.writen1(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + w.writestr(s[start:]) + } + w.writen1('"') +} + +type jsonDecDriver struct { + noBuiltInTypes + d *Decoder + h *JsonHandle + r decReader // *decReaderSwitch // decReader + se extWrapper + + // ---- writable fields during execution --- *try* to keep in sep cache line + + c containerState + // tok is used to store the token read right after skipWhiteSpace. + tok uint8 + fnull bool // found null from appendStringAsBytes + bs []byte // scratch. Initialized from b. Used for parsing strings or numbers. + bstr [8]byte // scratch used for string \UXXX parsing + // ---- cpu cache line boundary? + b [jsonScratchArrayLen]byte // scratch 1, used for parsing strings or numbers or time.Time + b2 [jsonScratchArrayLen]byte // scratch 2, used only for readUntil, decNumBytes + + _ [3]uint64 // padding + // n jsonNum +} + +// func jsonIsWS(b byte) bool { +// // return b == ' ' || b == '\t' || b == '\r' || b == '\n' +// return jsonCharWhitespaceSet.isset(b) +// } + +func (d *jsonDecDriver) uncacheRead() { + if d.tok != 0 { + d.r.unreadn1() + d.tok = 0 + } +} + +func (d *jsonDecDriver) ReadMapStart() int { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + const xc uint8 = '{' + if d.tok != xc { + d.d.errorf("read map - expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + d.c = containerMapStart + return -1 +} + +func (d *jsonDecDriver) ReadArrayStart() int { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + const xc uint8 = '[' + if d.tok != xc { + d.d.errorf("read array - expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + d.c = containerArrayStart + return -1 +} + +func (d *jsonDecDriver) CheckBreak() bool { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + return d.tok == '}' || d.tok == ']' +} + +// For the ReadXXX methods below, we could just delegate to helper functions +// readContainerState(c containerState, xc uint8, check bool) +// - ReadArrayElem would become: +// readContainerState(containerArrayElem, ',', d.c != containerArrayStart) +// +// However, until mid-stack inlining comes in go1.11 which supports inlining of +// one-liners, we explicitly write them all 5 out to elide the extra func call. +// +// TODO: For Go 1.11, if inlined, consider consolidating these. + +func (d *jsonDecDriver) ReadArrayElem() { + const xc uint8 = ',' + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.c != containerArrayStart { + if d.tok != xc { + d.d.errorf("read array element - expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + } + d.c = containerArrayElem +} + +func (d *jsonDecDriver) ReadArrayEnd() { + const xc uint8 = ']' + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.tok != xc { + d.d.errorf("read array end - expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + d.c = containerArrayEnd +} + +func (d *jsonDecDriver) ReadMapElemKey() { + const xc uint8 = ',' + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.c != containerMapStart { + if d.tok != xc { + d.d.errorf("read map key - expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + } + d.c = containerMapKey +} + +func (d *jsonDecDriver) ReadMapElemValue() { + const xc uint8 = ':' + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.tok != xc { + d.d.errorf("read map value - expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + d.c = containerMapValue +} + +func (d *jsonDecDriver) ReadMapEnd() { + const xc uint8 = '}' + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.tok != xc { + d.d.errorf("read map end - expect char '%c' but got char '%c'", xc, d.tok) + } + d.tok = 0 + d.c = containerMapEnd +} + +func (d *jsonDecDriver) readLit(length, fromIdx uint8) { + bs := d.r.readx(int(length)) + d.tok = 0 + if jsonValidateSymbols && !bytes.Equal(bs, jsonLiterals[fromIdx:fromIdx+length]) { + d.d.errorf("expecting %s: got %s", jsonLiterals[fromIdx:fromIdx+length], bs) + return + } +} + +func (d *jsonDecDriver) TryDecodeAsNil() bool { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + // we shouldn't try to see if "null" was here, right? + // only the plain string: `null` denotes a nil (ie not quotes) + if d.tok == 'n' { + d.readLit(3, jsonLitNull+1) // (n)ull + return true + } + return false +} + +func (d *jsonDecDriver) DecodeBool() (v bool) { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + fquot := d.c == containerMapKey && d.tok == '"' + if fquot { + d.tok = d.r.readn1() + } + switch d.tok { + case 'f': + d.readLit(4, jsonLitFalse+1) // (f)alse + // v = false + case 't': + d.readLit(3, jsonLitTrue+1) // (t)rue + v = true + default: + d.d.errorf("decode bool: got first char %c", d.tok) + // v = false // "unreachable" + } + if fquot { + d.r.readn1() + } + return +} + +func (d *jsonDecDriver) DecodeTime() (t time.Time) { + // read string, and pass the string into json.unmarshal + d.appendStringAsBytes() + if d.fnull { + return + } + t, err := time.Parse(time.RFC3339, stringView(d.bs)) + if err != nil { + d.d.errorv(err) + } + return +} + +func (d *jsonDecDriver) ContainerType() (vt valueType) { + // check container type by checking the first char + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + + // optimize this, so we don't do 4 checks but do one computation. + // return jsonContainerSet[d.tok] + + // ContainerType is mostly called for Map and Array, + // so this conditional is good enough (max 2 checks typically) + if b := d.tok; b == '{' { + return valueTypeMap + } else if b == '[' { + return valueTypeArray + } else if b == 'n' { + return valueTypeNil + } else if b == '"' { + return valueTypeString + } + return valueTypeUnset +} + +func (d *jsonDecDriver) decNumBytes() (bs []byte) { + // stores num bytes in d.bs + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + if d.tok == '"' { + bs = d.r.readUntil(d.b2[:0], '"') + bs = bs[:len(bs)-1] + } else { + d.r.unreadn1() + bs = d.r.readTo(d.bs[:0], &jsonNumSet) + } + d.tok = 0 + return bs +} + +func (d *jsonDecDriver) DecodeUint64() (u uint64) { + bs := d.decNumBytes() + n, neg, badsyntax, overflow := jsonParseInteger(bs) + if overflow { + d.d.errorf("overflow parsing unsigned integer: %s", bs) + } else if neg { + d.d.errorf("minus found parsing unsigned integer: %s", bs) + } else if badsyntax { + // fallback: try to decode as float, and cast + n = d.decUint64ViaFloat(stringView(bs)) + } + return n +} + +func (d *jsonDecDriver) DecodeInt64() (i int64) { + const cutoff = uint64(1 << uint(64-1)) + bs := d.decNumBytes() + n, neg, badsyntax, overflow := jsonParseInteger(bs) + if overflow { + d.d.errorf("overflow parsing integer: %s", bs) + } else if badsyntax { + // d.d.errorf("invalid syntax for integer: %s", bs) + // fallback: try to decode as float, and cast + if neg { + n = d.decUint64ViaFloat(stringView(bs[1:])) + } else { + n = d.decUint64ViaFloat(stringView(bs)) + } + } + if neg { + if n > cutoff { + d.d.errorf("overflow parsing integer: %s", bs) + } + i = -(int64(n)) + } else { + if n >= cutoff { + d.d.errorf("overflow parsing integer: %s", bs) + } + i = int64(n) + } + return +} + +func (d *jsonDecDriver) decUint64ViaFloat(s string) (u uint64) { + f, err := strconv.ParseFloat(s, 64) + if err != nil { + d.d.errorf("invalid syntax for integer: %s", s) + // d.d.errorv(err) + } + fi, ff := math.Modf(f) + if ff > 0 { + d.d.errorf("fractional part found parsing integer: %s", s) + } else if fi > float64(math.MaxUint64) { + d.d.errorf("overflow parsing integer: %s", s) + } + return uint64(fi) +} + +func (d *jsonDecDriver) DecodeFloat64() (f float64) { + bs := d.decNumBytes() + f, err := strconv.ParseFloat(stringView(bs), 64) + if err != nil { + d.d.errorv(err) + } + return +} + +func (d *jsonDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if ext == nil { + re := rv.(*RawExt) + re.Tag = xtag + d.d.decode(&re.Value) + } else { + var v interface{} + d.d.decode(&v) + ext.UpdateExt(rv, v) + } + return +} + +func (d *jsonDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { + // if decoding into raw bytes, and the RawBytesExt is configured, use it to decode. + if d.se.InterfaceExt != nil { + bsOut = bs + d.DecodeExt(&bsOut, 0, &d.se) + return + } + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + // check if an "array" of uint8's (see ContainerType for how to infer if an array) + if d.tok == '[' { + bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d) + return + } + d.appendStringAsBytes() + // base64 encodes []byte{} as "", and we encode nil []byte as null. + // Consequently, base64 should decode null as a nil []byte, and "" as an empty []byte{}. + // appendStringAsBytes returns a zero-len slice for both, so as not to reset d.bs. + // However, it sets a fnull field to true, so we can check if a null was found. + if len(d.bs) == 0 { + if d.fnull { + return nil + } + return []byte{} + } + bs0 := d.bs + slen := base64.StdEncoding.DecodedLen(len(bs0)) + if slen <= cap(bs) { + bsOut = bs[:slen] + } else if zerocopy && slen <= cap(d.b2) { + bsOut = d.b2[:slen] + } else { + bsOut = make([]byte, slen) + } + slen2, err := base64.StdEncoding.Decode(bsOut, bs0) + if err != nil { + d.d.errorf("error decoding base64 binary '%s': %v", bs0, err) + return nil + } + if slen != slen2 { + bsOut = bsOut[:slen2] + } + return +} + +func (d *jsonDecDriver) DecodeString() (s string) { + d.appendStringAsBytes() + return d.bsToString() +} + +func (d *jsonDecDriver) DecodeStringAsBytes() (s []byte) { + d.appendStringAsBytes() + return d.bs +} + +func (d *jsonDecDriver) appendStringAsBytes() { + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + + d.fnull = false + if d.tok != '"' { + // d.d.errorf("expect char '%c' but got char '%c'", '"', d.tok) + // handle non-string scalar: null, true, false or a number + switch d.tok { + case 'n': + d.readLit(3, jsonLitNull+1) // (n)ull + d.bs = d.bs[:0] + d.fnull = true + case 'f': + d.readLit(4, jsonLitFalse+1) // (f)alse + d.bs = d.bs[:5] + copy(d.bs, "false") + case 't': + d.readLit(3, jsonLitTrue+1) // (t)rue + d.bs = d.bs[:4] + copy(d.bs, "true") + default: + // try to parse a valid number + bs := d.decNumBytes() + if len(bs) <= cap(d.bs) { + d.bs = d.bs[:len(bs)] + } else { + d.bs = make([]byte, len(bs)) + } + copy(d.bs, bs) + } + return + } + + d.tok = 0 + r := d.r + var cs = r.readUntil(d.b2[:0], '"') + var cslen = len(cs) + var c uint8 + v := d.bs[:0] + // append on each byte seen can be expensive, so we just + // keep track of where we last read a contiguous set of + // non-special bytes (using cursor variable), + // and when we see a special byte + // e.g. end-of-slice, " or \, + // we will append the full range into the v slice before proceeding + for i, cursor := 0, 0; ; { + if i == cslen { + v = append(v, cs[cursor:]...) + cs = r.readUntil(d.b2[:0], '"') + cslen = len(cs) + i, cursor = 0, 0 + } + c = cs[i] + if c == '"' { + v = append(v, cs[cursor:i]...) + break + } + if c != '\\' { + i++ + continue + } + v = append(v, cs[cursor:i]...) + i++ + c = cs[i] + switch c { + case '"', '\\', '/', '\'': + v = append(v, c) + case 'b': + v = append(v, '\b') + case 'f': + v = append(v, '\f') + case 'n': + v = append(v, '\n') + case 'r': + v = append(v, '\r') + case 't': + v = append(v, '\t') + case 'u': + var r rune + var rr uint32 + if len(cs) < i+4 { // may help reduce bounds-checking + d.d.errorf("need at least 4 more bytes for unicode sequence") + } + // c = cs[i+4] // may help reduce bounds-checking + for j := 1; j < 5; j++ { + // best to use explicit if-else + // - not a table, etc which involve memory loads, array lookup with bounds checks, etc + c = cs[i+j] + if c >= '0' && c <= '9' { + rr = rr*16 + uint32(c-jsonU4Chk2) + } else if c >= 'a' && c <= 'f' { + rr = rr*16 + uint32(c-jsonU4Chk1) + } else if c >= 'A' && c <= 'F' { + rr = rr*16 + uint32(c-jsonU4Chk0) + } else { + r = unicode.ReplacementChar + i += 4 + goto encode_rune + } + } + r = rune(rr) + i += 4 + if utf16.IsSurrogate(r) { + if len(cs) >= i+6 && cs[i+2] == 'u' && cs[i+1] == '\\' { + i += 2 + // c = cs[i+4] // may help reduce bounds-checking + var rr1 uint32 + for j := 1; j < 5; j++ { + c = cs[i+j] + if c >= '0' && c <= '9' { + rr = rr*16 + uint32(c-jsonU4Chk2) + } else if c >= 'a' && c <= 'f' { + rr = rr*16 + uint32(c-jsonU4Chk1) + } else if c >= 'A' && c <= 'F' { + rr = rr*16 + uint32(c-jsonU4Chk0) + } else { + r = unicode.ReplacementChar + i += 4 + goto encode_rune + } + } + r = utf16.DecodeRune(r, rune(rr1)) + i += 4 + } else { + r = unicode.ReplacementChar + goto encode_rune + } + } + encode_rune: + w2 := utf8.EncodeRune(d.bstr[:], r) + v = append(v, d.bstr[:w2]...) + default: + d.d.errorf("unsupported escaped value: %c", c) + } + i++ + cursor = i + } + d.bs = v +} + +func (d *jsonDecDriver) nakedNum(z *decNaked, bs []byte) (err error) { + const cutoff = uint64(1 << uint(64-1)) + var n uint64 + var neg, badsyntax, overflow bool + + if d.h.PreferFloat { + goto F + } + n, neg, badsyntax, overflow = jsonParseInteger(bs) + if badsyntax || overflow { + goto F + } + if neg { + if n > cutoff { + goto F + } + z.v = valueTypeInt + z.i = -(int64(n)) + } else if d.h.SignedInteger { + if n >= cutoff { + goto F + } + z.v = valueTypeInt + z.i = int64(n) + } else { + z.v = valueTypeUint + z.u = n + } + return +F: + z.v = valueTypeFloat + z.f, err = strconv.ParseFloat(stringView(bs), 64) + return +} + +func (d *jsonDecDriver) bsToString() string { + // if x := d.s.sc; x != nil && x.so && x.st == '}' { // map key + if jsonAlwaysReturnInternString || d.c == containerMapKey { + return d.d.string(d.bs) + } + return string(d.bs) +} + +func (d *jsonDecDriver) DecodeNaked() { + z := d.d.n + // var decodeFurther bool + + if d.tok == 0 { + d.tok = d.r.skip(&jsonCharWhitespaceSet) + } + switch d.tok { + case 'n': + d.readLit(3, jsonLitNull+1) // (n)ull + z.v = valueTypeNil + case 'f': + d.readLit(4, jsonLitFalse+1) // (f)alse + z.v = valueTypeBool + z.b = false + case 't': + d.readLit(3, jsonLitTrue+1) // (t)rue + z.v = valueTypeBool + z.b = true + case '{': + z.v = valueTypeMap // don't consume. kInterfaceNaked will call ReadMapStart + case '[': + z.v = valueTypeArray // don't consume. kInterfaceNaked will call ReadArrayStart + case '"': + // if a string, and MapKeyAsString, then try to decode it as a nil, bool or number first + d.appendStringAsBytes() + if len(d.bs) > 0 && d.c == containerMapKey && d.h.MapKeyAsString { + switch stringView(d.bs) { + case "null": + z.v = valueTypeNil + case "true": + z.v = valueTypeBool + z.b = true + case "false": + z.v = valueTypeBool + z.b = false + default: + // check if a number: float, int or uint + if err := d.nakedNum(z, d.bs); err != nil { + z.v = valueTypeString + z.s = d.bsToString() + } + } + } else { + z.v = valueTypeString + z.s = d.bsToString() + } + default: // number + bs := d.decNumBytes() + if len(bs) == 0 { + d.d.errorf("decode number from empty string") + return + } + if err := d.nakedNum(z, bs); err != nil { + d.d.errorf("decode number from %s: %v", bs, err) + return + } + } + // if decodeFurther { + // d.s.sc.retryRead() + // } + return +} + +//---------------------- + +// JsonHandle is a handle for JSON encoding format. +// +// Json is comprehensively supported: +// - decodes numbers into interface{} as int, uint or float64 +// based on how the number looks and some config parameters e.g. PreferFloat, SignedInt, etc. +// - decode integers from float formatted numbers e.g. 1.27e+8 +// - decode any json value (numbers, bool, etc) from quoted strings +// - configurable way to encode/decode []byte . +// by default, encodes and decodes []byte using base64 Std Encoding +// - UTF-8 support for encoding and decoding +// +// It has better performance than the json library in the standard library, +// by leveraging the performance improvements of the codec library. +// +// In addition, it doesn't read more bytes than necessary during a decode, which allows +// reading multiple values from a stream containing json and non-json content. +// For example, a user can read a json value, then a cbor value, then a msgpack value, +// all from the same stream in sequence. +// +// Note that, when decoding quoted strings, invalid UTF-8 or invalid UTF-16 surrogate pairs are +// not treated as an error. Instead, they are replaced by the Unicode replacement character U+FFFD. +type JsonHandle struct { + textEncodingType + BasicHandle + + // Indent indicates how a value is encoded. + // - If positive, indent by that number of spaces. + // - If negative, indent by that number of tabs. + Indent int8 + + // IntegerAsString controls how integers (signed and unsigned) are encoded. + // + // Per the JSON Spec, JSON numbers are 64-bit floating point numbers. + // Consequently, integers > 2^53 cannot be represented as a JSON number without losing precision. + // This can be mitigated by configuring how to encode integers. + // + // IntegerAsString interpretes the following values: + // - if 'L', then encode integers > 2^53 as a json string. + // - if 'A', then encode all integers as a json string + // containing the exact integer representation as a decimal. + // - else encode all integers as a json number (default) + IntegerAsString byte + + // HTMLCharsAsIs controls how to encode some special characters to html: < > & + // + // By default, we encode them as \uXXX + // to prevent security holes when served from some browsers. + HTMLCharsAsIs bool + + // PreferFloat says that we will default to decoding a number as a float. + // If not set, we will examine the characters of the number and decode as an + // integer type if it doesn't have any of the characters [.eE]. + PreferFloat bool + + // TermWhitespace says that we add a whitespace character + // at the end of an encoding. + // + // The whitespace is important, especially if using numbers in a context + // where multiple items are written to a stream. + TermWhitespace bool + + // MapKeyAsString says to encode all map keys as strings. + // + // Use this to enforce strict json output. + // The only caveat is that nil value is ALWAYS written as null (never as "null") + MapKeyAsString bool + + // _ [2]byte // padding + + // Note: below, we store hardly-used items e.g. RawBytesExt is cached in the (en|de)cDriver. + + // RawBytesExt, if configured, is used to encode and decode raw bytes in a custom way. + // If not configured, raw bytes are encoded to/from base64 text. + RawBytesExt InterfaceExt + + _ [2]uint64 // padding +} + +// Name returns the name of the handle: json +func (h *JsonHandle) Name() string { return "json" } +func (h *JsonHandle) hasElemSeparators() bool { return true } +func (h *JsonHandle) typical() bool { + return h.Indent == 0 && !h.MapKeyAsString && h.IntegerAsString != 'A' && h.IntegerAsString != 'L' +} + +type jsonTypical interface { + typical() +} + +func (h *JsonHandle) recreateEncDriver(ed encDriver) (v bool) { + _, v = ed.(jsonTypical) + return v != h.typical() +} + +// SetInterfaceExt sets an extension +func (h *JsonHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { + return h.SetExt(rt, tag, &extWrapper{bytesExtFailer{}, ext}) +} + +type jsonEncDriverTypicalImpl struct { + jsonEncDriver + jsonEncDriverTypical + _ [1]uint64 // padding +} + +func (x *jsonEncDriverTypicalImpl) reset() { + x.jsonEncDriver.reset() + x.jsonEncDriverTypical.reset(&x.jsonEncDriver) +} + +type jsonEncDriverGenericImpl struct { + jsonEncDriver + jsonEncDriverGeneric +} + +func (x *jsonEncDriverGenericImpl) reset() { + x.jsonEncDriver.reset() + x.jsonEncDriverGeneric.reset(&x.jsonEncDriver) +} + +func (h *JsonHandle) newEncDriver(e *Encoder) (ee encDriver) { + var hd *jsonEncDriver + if h.typical() { + var v jsonEncDriverTypicalImpl + ee = &v + hd = &v.jsonEncDriver + } else { + var v jsonEncDriverGenericImpl + ee = &v + hd = &v.jsonEncDriver + } + hd.e, hd.h, hd.bs = e, h, hd.b[:0] + hd.se.BytesExt = bytesExtFailer{} + ee.reset() + return +} + +func (h *JsonHandle) newDecDriver(d *Decoder) decDriver { + // d := jsonDecDriver{r: r.(*bytesDecReader), h: h} + hd := jsonDecDriver{d: d, h: h} + hd.se.BytesExt = bytesExtFailer{} + hd.bs = hd.b[:0] + hd.reset() + return &hd +} + +func (e *jsonEncDriver) reset() { + e.ew = e.e.w // e.e.w // &e.e.encWriterSwitch + e.se.InterfaceExt = e.h.RawBytesExt + if e.bs != nil { + e.bs = e.bs[:0] + } +} + +func (d *jsonDecDriver) reset() { + d.r = d.d.r // &d.d.decReaderSwitch // d.d.r + d.se.InterfaceExt = d.h.RawBytesExt + if d.bs != nil { + d.bs = d.bs[:0] + } + d.c, d.tok = 0, 0 + // d.n.reset() +} + +func jsonFloatStrconvFmtPrec(f float64) (fmt byte, prec int) { + prec = -1 + var abs = math.Abs(f) + if abs != 0 && (abs < 1e-6 || abs >= 1e21) { + fmt = 'e' + } else { + fmt = 'f' + // set prec to 1 iff mod is 0. + // better than using jsonIsFloatBytesB2 to check if a . or E in the float bytes. + // this ensures that every float has an e or .0 in it. + if abs <= 1 { + if abs == 0 || abs == 1 { + prec = 1 + } + } else if _, mod := math.Modf(abs); mod == 0 { + prec = 1 + } + } + return +} + +// custom-fitted version of strconv.Parse(Ui|I)nt. +// Also ensures we don't have to search for .eE to determine if a float or not. +func jsonParseInteger(s []byte) (n uint64, neg, badSyntax, overflow bool) { + const maxUint64 = (1<<64 - 1) + const cutoff = maxUint64/10 + 1 + + if len(s) == 0 { + badSyntax = true + return + } + switch s[0] { + case '+': + s = s[1:] + case '-': + s = s[1:] + neg = true + } + for _, c := range s { + if c < '0' || c > '9' { + badSyntax = true + return + } + // unsigned integers don't overflow well on multiplication, so check cutoff here + // e.g. (maxUint64-5)*10 doesn't overflow well ... + if n >= cutoff { + overflow = true + return + } + n *= 10 + n1 := n + uint64(c-'0') + if n1 < n || n1 > maxUint64 { + overflow = true + return + } + n = n1 + } + return +} + +var _ decDriver = (*jsonDecDriver)(nil) +var _ encDriver = (*jsonEncDriverGenericImpl)(nil) +var _ encDriver = (*jsonEncDriverTypicalImpl)(nil) +var _ jsonTypical = (*jsonEncDriverTypical)(nil) diff --git a/vendor/github.com/ugorji/go/codec/msgpack.go b/vendor/github.com/ugorji/go/codec/msgpack.go new file mode 100644 index 0000000000..3271579a1a --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/msgpack.go @@ -0,0 +1,1092 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +/* +MSGPACK + +Msgpack-c implementation powers the c, c++, python, ruby, etc libraries. +We need to maintain compatibility with it and how it encodes integer values +without caring about the type. + +For compatibility with behaviour of msgpack-c reference implementation: + - Go intX (>0) and uintX + IS ENCODED AS + msgpack +ve fixnum, unsigned + - Go intX (<0) + IS ENCODED AS + msgpack -ve fixnum, signed +*/ + +package codec + +import ( + "fmt" + "io" + "math" + "net/rpc" + "reflect" + "time" +) + +const ( + mpPosFixNumMin byte = 0x00 + mpPosFixNumMax = 0x7f + mpFixMapMin = 0x80 + mpFixMapMax = 0x8f + mpFixArrayMin = 0x90 + mpFixArrayMax = 0x9f + mpFixStrMin = 0xa0 + mpFixStrMax = 0xbf + mpNil = 0xc0 + _ = 0xc1 + mpFalse = 0xc2 + mpTrue = 0xc3 + mpFloat = 0xca + mpDouble = 0xcb + mpUint8 = 0xcc + mpUint16 = 0xcd + mpUint32 = 0xce + mpUint64 = 0xcf + mpInt8 = 0xd0 + mpInt16 = 0xd1 + mpInt32 = 0xd2 + mpInt64 = 0xd3 + + // extensions below + mpBin8 = 0xc4 + mpBin16 = 0xc5 + mpBin32 = 0xc6 + mpExt8 = 0xc7 + mpExt16 = 0xc8 + mpExt32 = 0xc9 + mpFixExt1 = 0xd4 + mpFixExt2 = 0xd5 + mpFixExt4 = 0xd6 + mpFixExt8 = 0xd7 + mpFixExt16 = 0xd8 + + mpStr8 = 0xd9 // new + mpStr16 = 0xda + mpStr32 = 0xdb + + mpArray16 = 0xdc + mpArray32 = 0xdd + + mpMap16 = 0xde + mpMap32 = 0xdf + + mpNegFixNumMin = 0xe0 + mpNegFixNumMax = 0xff +) + +var mpTimeExtTag int8 = -1 +var mpTimeExtTagU = uint8(mpTimeExtTag) + +// var mpdesc = map[byte]string{ +// mpPosFixNumMin: "PosFixNumMin", +// mpPosFixNumMax: "PosFixNumMax", +// mpFixMapMin: "FixMapMin", +// mpFixMapMax: "FixMapMax", +// mpFixArrayMin: "FixArrayMin", +// mpFixArrayMax: "FixArrayMax", +// mpFixStrMin: "FixStrMin", +// mpFixStrMax: "FixStrMax", +// mpNil: "Nil", +// mpFalse: "False", +// mpTrue: "True", +// mpFloat: "Float", +// mpDouble: "Double", +// mpUint8: "Uint8", +// mpUint16: "Uint16", +// mpUint32: "Uint32", +// mpUint64: "Uint64", +// mpInt8: "Int8", +// mpInt16: "Int16", +// mpInt32: "Int32", +// mpInt64: "Int64", +// mpBin8: "Bin8", +// mpBin16: "Bin16", +// mpBin32: "Bin32", +// mpExt8: "Ext8", +// mpExt16: "Ext16", +// mpExt32: "Ext32", +// mpFixExt1: "FixExt1", +// mpFixExt2: "FixExt2", +// mpFixExt4: "FixExt4", +// mpFixExt8: "FixExt8", +// mpFixExt16: "FixExt16", +// mpStr8: "Str8", +// mpStr16: "Str16", +// mpStr32: "Str32", +// mpArray16: "Array16", +// mpArray32: "Array32", +// mpMap16: "Map16", +// mpMap32: "Map32", +// mpNegFixNumMin: "NegFixNumMin", +// mpNegFixNumMax: "NegFixNumMax", +// } + +func mpdesc(bd byte) string { + switch bd { + case mpNil: + return "nil" + case mpFalse: + return "false" + case mpTrue: + return "true" + case mpFloat, mpDouble: + return "float" + case mpUint8, mpUint16, mpUint32, mpUint64: + return "uint" + case mpInt8, mpInt16, mpInt32, mpInt64: + return "int" + default: + switch { + case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: + return "int" + case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: + return "int" + case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: + return "string|bytes" + case bd == mpBin8, bd == mpBin16, bd == mpBin32: + return "bytes" + case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: + return "array" + case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: + return "map" + case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: + return "ext" + default: + return "unknown" + } + } +} + +// MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec +// that the backend RPC service takes multiple arguments, which have been arranged +// in sequence in the slice. +// +// The Codec then passes it AS-IS to the rpc service (without wrapping it in an +// array of 1 element). +type MsgpackSpecRpcMultiArgs []interface{} + +// A MsgpackContainer type specifies the different types of msgpackContainers. +type msgpackContainerType struct { + fixCutoff int + bFixMin, b8, b16, b32 byte + hasFixMin, has8, has8Always bool +} + +var ( + msgpackContainerStr = msgpackContainerType{ + 32, mpFixStrMin, mpStr8, mpStr16, mpStr32, true, true, false, + } + msgpackContainerBin = msgpackContainerType{ + 0, 0, mpBin8, mpBin16, mpBin32, false, true, true, + } + msgpackContainerList = msgpackContainerType{ + 16, mpFixArrayMin, 0, mpArray16, mpArray32, true, false, false, + } + msgpackContainerMap = msgpackContainerType{ + 16, mpFixMapMin, 0, mpMap16, mpMap32, true, false, false, + } +) + +//--------------------------------------------- + +type msgpackEncDriver struct { + noBuiltInTypes + encDriverNoopContainerWriter + // encNoSeparator + e *Encoder + w encWriter + h *MsgpackHandle + x [8]byte + _ [3]uint64 // padding +} + +func (e *msgpackEncDriver) EncodeNil() { + e.w.writen1(mpNil) +} + +func (e *msgpackEncDriver) EncodeInt(i int64) { + // if i >= 0 { + // e.EncodeUint(uint64(i)) + // } else if false && + if i > math.MaxInt8 { + if i <= math.MaxInt16 { + e.w.writen1(mpInt16) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i)) + } else if i <= math.MaxInt32 { + e.w.writen1(mpInt32) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i)) + } else { + e.w.writen1(mpInt64) + bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i)) + } + } else if i >= -32 { + if e.h.NoFixedNum { + e.w.writen2(mpInt8, byte(i)) + } else { + e.w.writen1(byte(i)) + } + } else if i >= math.MinInt8 { + e.w.writen2(mpInt8, byte(i)) + } else if i >= math.MinInt16 { + e.w.writen1(mpInt16) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i)) + } else if i >= math.MinInt32 { + e.w.writen1(mpInt32) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i)) + } else { + e.w.writen1(mpInt64) + bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i)) + } +} + +func (e *msgpackEncDriver) EncodeUint(i uint64) { + if i <= math.MaxInt8 { + if e.h.NoFixedNum { + e.w.writen2(mpUint8, byte(i)) + } else { + e.w.writen1(byte(i)) + } + } else if i <= math.MaxUint8 { + e.w.writen2(mpUint8, byte(i)) + } else if i <= math.MaxUint16 { + e.w.writen1(mpUint16) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(i)) + } else if i <= math.MaxUint32 { + e.w.writen1(mpUint32) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(i)) + } else { + e.w.writen1(mpUint64) + bigenHelper{e.x[:8], e.w}.writeUint64(uint64(i)) + } +} + +func (e *msgpackEncDriver) EncodeBool(b bool) { + if b { + e.w.writen1(mpTrue) + } else { + e.w.writen1(mpFalse) + } +} + +func (e *msgpackEncDriver) EncodeFloat32(f float32) { + e.w.writen1(mpFloat) + bigenHelper{e.x[:4], e.w}.writeUint32(math.Float32bits(f)) +} + +func (e *msgpackEncDriver) EncodeFloat64(f float64) { + e.w.writen1(mpDouble) + bigenHelper{e.x[:8], e.w}.writeUint64(math.Float64bits(f)) +} + +func (e *msgpackEncDriver) EncodeTime(t time.Time) { + if t.IsZero() { + e.EncodeNil() + return + } + t = t.UTC() + sec, nsec := t.Unix(), uint64(t.Nanosecond()) + var data64 uint64 + var l = 4 + if sec >= 0 && sec>>34 == 0 { + data64 = (nsec << 34) | uint64(sec) + if data64&0xffffffff00000000 != 0 { + l = 8 + } + } else { + l = 12 + } + if e.h.WriteExt { + e.encodeExtPreamble(mpTimeExtTagU, l) + } else { + e.writeContainerLen(msgpackContainerStr, l) + } + switch l { + case 4: + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(data64)) + case 8: + bigenHelper{e.x[:8], e.w}.writeUint64(data64) + case 12: + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(nsec)) + bigenHelper{e.x[:8], e.w}.writeUint64(uint64(sec)) + } +} + +func (e *msgpackEncDriver) EncodeExt(v interface{}, xtag uint64, ext Ext, _ *Encoder) { + bs := ext.WriteExt(v) + if bs == nil { + e.EncodeNil() + return + } + if e.h.WriteExt { + e.encodeExtPreamble(uint8(xtag), len(bs)) + e.w.writeb(bs) + } else { + e.EncodeStringBytes(cRAW, bs) + } +} + +func (e *msgpackEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) { + e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) + e.w.writeb(re.Data) +} + +func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) { + if l == 1 { + e.w.writen2(mpFixExt1, xtag) + } else if l == 2 { + e.w.writen2(mpFixExt2, xtag) + } else if l == 4 { + e.w.writen2(mpFixExt4, xtag) + } else if l == 8 { + e.w.writen2(mpFixExt8, xtag) + } else if l == 16 { + e.w.writen2(mpFixExt16, xtag) + } else if l < 256 { + e.w.writen2(mpExt8, byte(l)) + e.w.writen1(xtag) + } else if l < 65536 { + e.w.writen1(mpExt16) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l)) + e.w.writen1(xtag) + } else { + e.w.writen1(mpExt32) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l)) + e.w.writen1(xtag) + } +} + +func (e *msgpackEncDriver) WriteArrayStart(length int) { + e.writeContainerLen(msgpackContainerList, length) +} + +func (e *msgpackEncDriver) WriteMapStart(length int) { + e.writeContainerLen(msgpackContainerMap, length) +} + +func (e *msgpackEncDriver) EncodeString(c charEncoding, s string) { + slen := len(s) + if c == cRAW && e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, slen) + } else { + e.writeContainerLen(msgpackContainerStr, slen) + } + if slen > 0 { + e.w.writestr(s) + } +} + +func (e *msgpackEncDriver) EncodeStringBytes(c charEncoding, bs []byte) { + if bs == nil { + e.EncodeNil() + return + } + slen := len(bs) + if c == cRAW && e.h.WriteExt { + e.writeContainerLen(msgpackContainerBin, slen) + } else { + e.writeContainerLen(msgpackContainerStr, slen) + } + if slen > 0 { + e.w.writeb(bs) + } +} + +func (e *msgpackEncDriver) writeContainerLen(ct msgpackContainerType, l int) { + if ct.hasFixMin && l < ct.fixCutoff { + e.w.writen1(ct.bFixMin | byte(l)) + } else if ct.has8 && l < 256 && (ct.has8Always || e.h.WriteExt) { + e.w.writen2(ct.b8, uint8(l)) + } else if l < 65536 { + e.w.writen1(ct.b16) + bigenHelper{e.x[:2], e.w}.writeUint16(uint16(l)) + } else { + e.w.writen1(ct.b32) + bigenHelper{e.x[:4], e.w}.writeUint32(uint32(l)) + } +} + +//--------------------------------------------- + +type msgpackDecDriver struct { + d *Decoder + r decReader // *Decoder decReader decReaderT + h *MsgpackHandle + // b [scratchByteArrayLen]byte + bd byte + bdRead bool + br bool // bytes reader + noBuiltInTypes + // noStreamingCodec + // decNoSeparator + decDriverNoopContainerReader + _ [3]uint64 // padding +} + +// Note: This returns either a primitive (int, bool, etc) for non-containers, +// or a containerType, or a specific type denoting nil or extension. +// It is called when a nil interface{} is passed, leaving it up to the DecDriver +// to introspect the stream and decide how best to decode. +// It deciphers the value by looking at the stream first. +func (d *msgpackDecDriver) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + bd := d.bd + n := d.d.n + var decodeFurther bool + + switch bd { + case mpNil: + n.v = valueTypeNil + d.bdRead = false + case mpFalse: + n.v = valueTypeBool + n.b = false + case mpTrue: + n.v = valueTypeBool + n.b = true + + case mpFloat: + n.v = valueTypeFloat + n.f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) + case mpDouble: + n.v = valueTypeFloat + n.f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) + + case mpUint8: + n.v = valueTypeUint + n.u = uint64(d.r.readn1()) + case mpUint16: + n.v = valueTypeUint + n.u = uint64(bigen.Uint16(d.r.readx(2))) + case mpUint32: + n.v = valueTypeUint + n.u = uint64(bigen.Uint32(d.r.readx(4))) + case mpUint64: + n.v = valueTypeUint + n.u = uint64(bigen.Uint64(d.r.readx(8))) + + case mpInt8: + n.v = valueTypeInt + n.i = int64(int8(d.r.readn1())) + case mpInt16: + n.v = valueTypeInt + n.i = int64(int16(bigen.Uint16(d.r.readx(2)))) + case mpInt32: + n.v = valueTypeInt + n.i = int64(int32(bigen.Uint32(d.r.readx(4)))) + case mpInt64: + n.v = valueTypeInt + n.i = int64(int64(bigen.Uint64(d.r.readx(8)))) + + default: + switch { + case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax: + // positive fixnum (always signed) + n.v = valueTypeInt + n.i = int64(int8(bd)) + case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax: + // negative fixnum + n.v = valueTypeInt + n.i = int64(int8(bd)) + case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax: + if d.h.RawToString { + n.v = valueTypeString + n.s = d.DecodeString() + } else { + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false) + } + case bd == mpBin8, bd == mpBin16, bd == mpBin32: + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false) + case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax: + n.v = valueTypeArray + decodeFurther = true + case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax: + n.v = valueTypeMap + decodeFurther = true + case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32: + n.v = valueTypeExt + clen := d.readExtLen() + n.u = uint64(d.r.readn1()) + if n.u == uint64(mpTimeExtTagU) { + n.v = valueTypeTime + n.t = d.decodeTime(clen) + } else { + n.l = d.r.readx(clen) + } + default: + d.d.errorf("cannot infer value: %s: Ox%x/%d/%s", msgBadDesc, bd, bd, mpdesc(bd)) + } + } + if !decodeFurther { + d.bdRead = false + } + if n.v == valueTypeUint && d.h.SignedInteger { + n.v = valueTypeInt + n.i = int64(n.u) + } + return +} + +// int can be decoded from msgpack type: intXXX or uintXXX +func (d *msgpackDecDriver) DecodeInt64() (i int64) { + if !d.bdRead { + d.readNextBd() + } + switch d.bd { + case mpUint8: + i = int64(uint64(d.r.readn1())) + case mpUint16: + i = int64(uint64(bigen.Uint16(d.r.readx(2)))) + case mpUint32: + i = int64(uint64(bigen.Uint32(d.r.readx(4)))) + case mpUint64: + i = int64(bigen.Uint64(d.r.readx(8))) + case mpInt8: + i = int64(int8(d.r.readn1())) + case mpInt16: + i = int64(int16(bigen.Uint16(d.r.readx(2)))) + case mpInt32: + i = int64(int32(bigen.Uint32(d.r.readx(4)))) + case mpInt64: + i = int64(bigen.Uint64(d.r.readx(8))) + default: + switch { + case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: + i = int64(int8(d.bd)) + case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: + i = int64(int8(d.bd)) + default: + d.d.errorf("cannot decode signed integer: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd)) + return + } + } + d.bdRead = false + return +} + +// uint can be decoded from msgpack type: intXXX or uintXXX +func (d *msgpackDecDriver) DecodeUint64() (ui uint64) { + if !d.bdRead { + d.readNextBd() + } + switch d.bd { + case mpUint8: + ui = uint64(d.r.readn1()) + case mpUint16: + ui = uint64(bigen.Uint16(d.r.readx(2))) + case mpUint32: + ui = uint64(bigen.Uint32(d.r.readx(4))) + case mpUint64: + ui = bigen.Uint64(d.r.readx(8)) + case mpInt8: + if i := int64(int8(d.r.readn1())); i >= 0 { + ui = uint64(i) + } else { + d.d.errorf("assigning negative signed value: %v, to unsigned type", i) + return + } + case mpInt16: + if i := int64(int16(bigen.Uint16(d.r.readx(2)))); i >= 0 { + ui = uint64(i) + } else { + d.d.errorf("assigning negative signed value: %v, to unsigned type", i) + return + } + case mpInt32: + if i := int64(int32(bigen.Uint32(d.r.readx(4)))); i >= 0 { + ui = uint64(i) + } else { + d.d.errorf("assigning negative signed value: %v, to unsigned type", i) + return + } + case mpInt64: + if i := int64(bigen.Uint64(d.r.readx(8))); i >= 0 { + ui = uint64(i) + } else { + d.d.errorf("assigning negative signed value: %v, to unsigned type", i) + return + } + default: + switch { + case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax: + ui = uint64(d.bd) + case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax: + d.d.errorf("assigning negative signed value: %v, to unsigned type", int(d.bd)) + return + default: + d.d.errorf("cannot decode unsigned integer: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd)) + return + } + } + d.bdRead = false + return +} + +// float can either be decoded from msgpack type: float, double or intX +func (d *msgpackDecDriver) DecodeFloat64() (f float64) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == mpFloat { + f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) + } else if d.bd == mpDouble { + f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) + } else { + f = float64(d.DecodeInt64()) + } + d.bdRead = false + return +} + +// bool can be decoded from bool, fixnum 0 or 1. +func (d *msgpackDecDriver) DecodeBool() (b bool) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == mpFalse || d.bd == 0 { + // b = false + } else if d.bd == mpTrue || d.bd == 1 { + b = true + } else { + d.d.errorf("cannot decode bool: %s: %x/%s", msgBadDesc, d.bd, mpdesc(d.bd)) + return + } + d.bdRead = false + return +} + +func (d *msgpackDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { + if !d.bdRead { + d.readNextBd() + } + + // check if an "array" of uint8's (see ContainerType for how to infer if an array) + bd := d.bd + // DecodeBytes could be from: bin str fixstr fixarray array ... + var clen int + vt := d.ContainerType() + switch vt { + case valueTypeBytes: + // valueTypeBytes may be a mpBin or an mpStr container + if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 { + clen = d.readContainerLen(msgpackContainerBin) + } else { + clen = d.readContainerLen(msgpackContainerStr) + } + case valueTypeString: + clen = d.readContainerLen(msgpackContainerStr) + case valueTypeArray: + if zerocopy && len(bs) == 0 { + bs = d.d.b[:] + } + bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d) + return + default: + d.d.errorf("invalid container type: expecting bin|str|array, got: 0x%x", uint8(vt)) + return + } + + // these are (bin|str)(8|16|32) + d.bdRead = false + // bytes may be nil, so handle it. if nil, clen=-1. + if clen < 0 { + return nil + } + if zerocopy { + if d.br { + return d.r.readx(clen) + } else if len(bs) == 0 { + bs = d.d.b[:] + } + } + return decByteSlice(d.r, clen, d.h.MaxInitLen, bs) +} + +func (d *msgpackDecDriver) DecodeString() (s string) { + return string(d.DecodeBytes(d.d.b[:], true)) +} + +func (d *msgpackDecDriver) DecodeStringAsBytes() (s []byte) { + return d.DecodeBytes(d.d.b[:], true) +} + +func (d *msgpackDecDriver) readNextBd() { + d.bd = d.r.readn1() + d.bdRead = true +} + +func (d *msgpackDecDriver) uncacheRead() { + if d.bdRead { + d.r.unreadn1() + d.bdRead = false + } +} + +func (d *msgpackDecDriver) ContainerType() (vt valueType) { + if !d.bdRead { + d.readNextBd() + } + bd := d.bd + if bd == mpNil { + return valueTypeNil + } else if bd == mpBin8 || bd == mpBin16 || bd == mpBin32 || + (!d.h.RawToString && + (bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax))) { + return valueTypeBytes + } else if d.h.RawToString && + (bd == mpStr8 || bd == mpStr16 || bd == mpStr32 || (bd >= mpFixStrMin && bd <= mpFixStrMax)) { + return valueTypeString + } else if bd == mpArray16 || bd == mpArray32 || (bd >= mpFixArrayMin && bd <= mpFixArrayMax) { + return valueTypeArray + } else if bd == mpMap16 || bd == mpMap32 || (bd >= mpFixMapMin && bd <= mpFixMapMax) { + return valueTypeMap + } + // else { + // d.d.errorf("isContainerType: unsupported parameter: %v", vt) + // } + return valueTypeUnset +} + +func (d *msgpackDecDriver) TryDecodeAsNil() (v bool) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == mpNil { + d.bdRead = false + return true + } + return +} + +func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int) { + bd := d.bd + if bd == mpNil { + clen = -1 // to represent nil + } else if bd == ct.b8 { + clen = int(d.r.readn1()) + } else if bd == ct.b16 { + clen = int(bigen.Uint16(d.r.readx(2))) + } else if bd == ct.b32 { + clen = int(bigen.Uint32(d.r.readx(4))) + } else if (ct.bFixMin & bd) == ct.bFixMin { + clen = int(ct.bFixMin ^ bd) + } else { + d.d.errorf("cannot read container length: %s: hex: %x, decimal: %d", msgBadDesc, bd, bd) + return + } + d.bdRead = false + return +} + +func (d *msgpackDecDriver) ReadMapStart() int { + if !d.bdRead { + d.readNextBd() + } + return d.readContainerLen(msgpackContainerMap) +} + +func (d *msgpackDecDriver) ReadArrayStart() int { + if !d.bdRead { + d.readNextBd() + } + return d.readContainerLen(msgpackContainerList) +} + +func (d *msgpackDecDriver) readExtLen() (clen int) { + switch d.bd { + case mpNil: + clen = -1 // to represent nil + case mpFixExt1: + clen = 1 + case mpFixExt2: + clen = 2 + case mpFixExt4: + clen = 4 + case mpFixExt8: + clen = 8 + case mpFixExt16: + clen = 16 + case mpExt8: + clen = int(d.r.readn1()) + case mpExt16: + clen = int(bigen.Uint16(d.r.readx(2))) + case mpExt32: + clen = int(bigen.Uint32(d.r.readx(4))) + default: + d.d.errorf("decoding ext bytes: found unexpected byte: %x", d.bd) + return + } + return +} + +func (d *msgpackDecDriver) DecodeTime() (t time.Time) { + // decode time from string bytes or ext + if !d.bdRead { + d.readNextBd() + } + if d.bd == mpNil { + d.bdRead = false + return + } + var clen int + switch d.ContainerType() { + case valueTypeBytes, valueTypeString: + clen = d.readContainerLen(msgpackContainerStr) + default: + // expect to see mpFixExt4,-1 OR mpFixExt8,-1 OR mpExt8,12,-1 + d.bdRead = false + b2 := d.r.readn1() + if d.bd == mpFixExt4 && b2 == mpTimeExtTagU { + clen = 4 + } else if d.bd == mpFixExt8 && b2 == mpTimeExtTagU { + clen = 8 + } else if d.bd == mpExt8 && b2 == 12 && d.r.readn1() == mpTimeExtTagU { + clen = 12 + } else { + d.d.errorf("invalid bytes for decoding time as extension: got 0x%x, 0x%x", d.bd, b2) + return + } + } + return d.decodeTime(clen) +} + +func (d *msgpackDecDriver) decodeTime(clen int) (t time.Time) { + // bs = d.r.readx(clen) + d.bdRead = false + switch clen { + case 4: + t = time.Unix(int64(bigen.Uint32(d.r.readx(4))), 0).UTC() + case 8: + tv := bigen.Uint64(d.r.readx(8)) + t = time.Unix(int64(tv&0x00000003ffffffff), int64(tv>>34)).UTC() + case 12: + nsec := bigen.Uint32(d.r.readx(4)) + sec := bigen.Uint64(d.r.readx(8)) + t = time.Unix(int64(sec), int64(nsec)).UTC() + default: + d.d.errorf("invalid length of bytes for decoding time - expecting 4 or 8 or 12, got %d", clen) + return + } + return +} + +func (d *msgpackDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if xtag > 0xff { + d.d.errorf("ext: tag must be <= 0xff; got: %v", xtag) + return + } + realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) + realxtag = uint64(realxtag1) + if ext == nil { + re := rv.(*RawExt) + re.Tag = realxtag + re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) + } else { + ext.ReadExt(rv, xbs) + } + return +} + +func (d *msgpackDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + if !d.bdRead { + d.readNextBd() + } + xbd := d.bd + if xbd == mpBin8 || xbd == mpBin16 || xbd == mpBin32 { + xbs = d.DecodeBytes(nil, true) + } else if xbd == mpStr8 || xbd == mpStr16 || xbd == mpStr32 || + (xbd >= mpFixStrMin && xbd <= mpFixStrMax) { + xbs = d.DecodeStringAsBytes() + } else { + clen := d.readExtLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + d.d.errorf("wrong extension tag - got %b, expecting %v", xtag, tag) + return + } + xbs = d.r.readx(clen) + } + d.bdRead = false + return +} + +//-------------------------------------------------- + +//MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format. +type MsgpackHandle struct { + BasicHandle + + // RawToString controls how raw bytes are decoded into a nil interface{}. + RawToString bool + + // NoFixedNum says to output all signed integers as 2-bytes, never as 1-byte fixednum. + NoFixedNum bool + + // WriteExt flag supports encoding configured extensions with extension tags. + // It also controls whether other elements of the new spec are encoded (ie Str8). + // + // With WriteExt=false, configured extensions are serialized as raw bytes + // and Str8 is not encoded. + // + // A stream can still be decoded into a typed value, provided an appropriate value + // is provided, but the type cannot be inferred from the stream. If no appropriate + // type is provided (e.g. decoding into a nil interface{}), you get back + // a []byte or string based on the setting of RawToString. + WriteExt bool + + binaryEncodingType + noElemSeparators + + // _ [1]uint64 // padding +} + +// Name returns the name of the handle: msgpack +func (h *MsgpackHandle) Name() string { return "msgpack" } + +// SetBytesExt sets an extension +func (h *MsgpackHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { + return h.SetExt(rt, tag, &extWrapper{ext, interfaceExtFailer{}}) +} + +func (h *MsgpackHandle) newEncDriver(e *Encoder) encDriver { + return &msgpackEncDriver{e: e, w: e.w, h: h} +} + +func (h *MsgpackHandle) newDecDriver(d *Decoder) decDriver { + return &msgpackDecDriver{d: d, h: h, r: d.r, br: d.bytes} +} + +func (e *msgpackEncDriver) reset() { + e.w = e.e.w +} + +func (d *msgpackDecDriver) reset() { + d.r, d.br = d.d.r, d.d.bytes + d.bd, d.bdRead = 0, false +} + +//-------------------------------------------------- + +type msgpackSpecRpcCodec struct { + rpcCodec +} + +// /////////////// Spec RPC Codec /////////////////// +func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { + // WriteRequest can write to both a Go service, and other services that do + // not abide by the 1 argument rule of a Go service. + // We discriminate based on if the body is a MsgpackSpecRpcMultiArgs + var bodyArr []interface{} + if m, ok := body.(MsgpackSpecRpcMultiArgs); ok { + bodyArr = ([]interface{})(m) + } else { + bodyArr = []interface{}{body} + } + r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr} + return c.write(r2, nil, false) +} + +func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { + var moe interface{} + if r.Error != "" { + moe = r.Error + } + if moe != nil && body != nil { + body = nil + } + r2 := []interface{}{1, uint32(r.Seq), moe, body} + return c.write(r2, nil, false) +} + +func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error { + return c.parseCustomHeader(1, &r.Seq, &r.Error) +} + +func (c *msgpackSpecRpcCodec) ReadRequestHeader(r *rpc.Request) error { + return c.parseCustomHeader(0, &r.Seq, &r.ServiceMethod) +} + +func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error { + if body == nil { // read and discard + return c.read(nil) + } + bodyArr := []interface{}{body} + return c.read(&bodyArr) +} + +func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) { + if c.isClosed() { + return io.EOF + } + + // We read the response header by hand + // so that the body can be decoded on its own from the stream at a later time. + + const fia byte = 0x94 //four item array descriptor value + // Not sure why the panic of EOF is swallowed above. + // if bs1 := c.dec.r.readn1(); bs1 != fia { + // err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs1) + // return + // } + var ba [1]byte + var n int + for { + n, err = c.r.Read(ba[:]) + if err != nil { + return + } + if n == 1 { + break + } + } + + var b = ba[0] + if b != fia { + err = fmt.Errorf("not array - %s %x/%s", msgBadDesc, b, mpdesc(b)) + } else { + err = c.read(&b) + if err == nil { + if b != expectTypeByte { + err = fmt.Errorf("%s - expecting %v but got %x/%s", + msgBadDesc, expectTypeByte, b, mpdesc(b)) + } else { + err = c.read(msgid) + if err == nil { + err = c.read(methodOrError) + } + } + } + } + return +} + +//-------------------------------------------------- + +// msgpackSpecRpc is the implementation of Rpc that uses custom communication protocol +// as defined in the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md +type msgpackSpecRpc struct{} + +// MsgpackSpecRpc implements Rpc using the communication protocol defined in +// the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md . +// +// See GoRpc documentation, for information on buffering for better performance. +var MsgpackSpecRpc msgpackSpecRpc + +func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { + return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} +} + +func (x msgpackSpecRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { + return &msgpackSpecRpcCodec{newRPCCodec(conn, h)} +} + +var _ decDriver = (*msgpackDecDriver)(nil) +var _ encDriver = (*msgpackEncDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/rpc.go b/vendor/github.com/ugorji/go/codec/rpc.go new file mode 100644 index 0000000000..9fb3c01498 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/rpc.go @@ -0,0 +1,232 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "bufio" + "errors" + "io" + "net/rpc" + "sync" +) + +// Rpc provides a rpc Server or Client Codec for rpc communication. +type Rpc interface { + ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec + ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec +} + +// RPCOptions holds options specific to rpc functionality +type RPCOptions struct { + // RPCNoBuffer configures whether we attempt to buffer reads and writes during RPC calls. + // + // Set RPCNoBuffer=true to turn buffering off. + // Buffering can still be done if buffered connections are passed in, or + // buffering is configured on the handle. + RPCNoBuffer bool +} + +// rpcCodec defines the struct members and common methods. +type rpcCodec struct { + c io.Closer + r io.Reader + w io.Writer + f ioFlusher + + dec *Decoder + enc *Encoder + // bw *bufio.Writer + // br *bufio.Reader + mu sync.Mutex + h Handle + + cls bool + clsmu sync.RWMutex + clsErr error +} + +func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec { + // return newRPCCodec2(bufio.NewReader(conn), bufio.NewWriter(conn), conn, h) + return newRPCCodec2(conn, conn, conn, h) +} + +func newRPCCodec2(r io.Reader, w io.Writer, c io.Closer, h Handle) rpcCodec { + // defensive: ensure that jsonH has TermWhitespace turned on. + if jsonH, ok := h.(*JsonHandle); ok && !jsonH.TermWhitespace { + panic(errors.New("rpc requires a JsonHandle with TermWhitespace set to true")) + } + // always ensure that we use a flusher, and always flush what was written to the connection. + // we lose nothing by using a buffered writer internally. + f, ok := w.(ioFlusher) + bh := h.getBasicHandle() + if !bh.RPCNoBuffer { + if bh.WriterBufferSize <= 0 { + if !ok { + bw := bufio.NewWriter(w) + f, w = bw, bw + } + } + if bh.ReaderBufferSize <= 0 { + if _, ok = w.(ioPeeker); !ok { + if _, ok = w.(ioBuffered); !ok { + br := bufio.NewReader(r) + r = br + } + } + } + } + return rpcCodec{ + c: c, + w: w, + r: r, + f: f, + h: h, + enc: NewEncoder(w, h), + dec: NewDecoder(r, h), + } +} + +func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2 bool) (err error) { + if c.isClosed() { + return c.clsErr + } + err = c.enc.Encode(obj1) + if err == nil { + if writeObj2 { + err = c.enc.Encode(obj2) + } + // if err == nil && c.f != nil { + // err = c.f.Flush() + // } + } + if c.f != nil { + if err == nil { + err = c.f.Flush() + } else { + _ = c.f.Flush() // swallow flush error, so we maintain prior error on write + } + } + return +} + +func (c *rpcCodec) swallow(err *error) { + defer panicToErr(c.dec, err) + c.dec.swallow() +} + +func (c *rpcCodec) read(obj interface{}) (err error) { + if c.isClosed() { + return c.clsErr + } + //If nil is passed in, we should read and discard + if obj == nil { + // var obj2 interface{} + // return c.dec.Decode(&obj2) + c.swallow(&err) + return + } + return c.dec.Decode(obj) +} + +func (c *rpcCodec) isClosed() (b bool) { + if c.c != nil { + c.clsmu.RLock() + b = c.cls + c.clsmu.RUnlock() + } + return +} + +func (c *rpcCodec) Close() error { + if c.c == nil || c.isClosed() { + return c.clsErr + } + c.clsmu.Lock() + c.cls = true + c.clsErr = c.c.Close() + c.clsmu.Unlock() + return c.clsErr +} + +func (c *rpcCodec) ReadResponseBody(body interface{}) error { + return c.read(body) +} + +// ------------------------------------- + +type goRpcCodec struct { + rpcCodec +} + +func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error { + // Must protect for concurrent access as per API + c.mu.Lock() + defer c.mu.Unlock() + return c.write(r, body, true) +} + +func (c *goRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error { + c.mu.Lock() + defer c.mu.Unlock() + return c.write(r, body, true) +} + +func (c *goRpcCodec) ReadResponseHeader(r *rpc.Response) error { + return c.read(r) +} + +func (c *goRpcCodec) ReadRequestHeader(r *rpc.Request) error { + return c.read(r) +} + +func (c *goRpcCodec) ReadRequestBody(body interface{}) error { + return c.read(body) +} + +// ------------------------------------- + +// goRpc is the implementation of Rpc that uses the communication protocol +// as defined in net/rpc package. +type goRpc struct{} + +// GoRpc implements Rpc using the communication protocol defined in net/rpc package. +// +// Note: network connection (from net.Dial, of type io.ReadWriteCloser) is not buffered. +// +// For performance, you should configure WriterBufferSize and ReaderBufferSize on the handle. +// This ensures we use an adequate buffer during reading and writing. +// If not configured, we will internally initialize and use a buffer during reads and writes. +// This can be turned off via the RPCNoBuffer option on the Handle. +// var handle codec.JsonHandle +// handle.RPCNoBuffer = true // turns off attempt by rpc module to initialize a buffer +// +// Example 1: one way of configuring buffering explicitly: +// var handle codec.JsonHandle // codec handle +// handle.ReaderBufferSize = 1024 +// handle.WriterBufferSize = 1024 +// var conn io.ReadWriteCloser // connection got from a socket +// var serverCodec = GoRpc.ServerCodec(conn, handle) +// var clientCodec = GoRpc.ClientCodec(conn, handle) +// +// Example 2: you can also explicitly create a buffered connection yourself, +// and not worry about configuring the buffer sizes in the Handle. +// var handle codec.Handle // codec handle +// var conn io.ReadWriteCloser // connection got from a socket +// var bufconn = struct { // bufconn here is a buffered io.ReadWriteCloser +// io.Closer +// *bufio.Reader +// *bufio.Writer +// }{conn, bufio.NewReader(conn), bufio.NewWriter(conn)} +// var serverCodec = GoRpc.ServerCodec(bufconn, handle) +// var clientCodec = GoRpc.ClientCodec(bufconn, handle) +// +var GoRpc goRpc + +func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec { + return &goRpcCodec{newRPCCodec(conn, h)} +} + +func (x goRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec { + return &goRpcCodec{newRPCCodec(conn, h)} +} diff --git a/vendor/github.com/ugorji/go/codec/simple.go b/vendor/github.com/ugorji/go/codec/simple.go new file mode 100644 index 0000000000..f1e181ef31 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/simple.go @@ -0,0 +1,652 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +package codec + +import ( + "math" + "reflect" + "time" +) + +const ( + _ uint8 = iota + simpleVdNil = 1 + simpleVdFalse = 2 + simpleVdTrue = 3 + simpleVdFloat32 = 4 + simpleVdFloat64 = 5 + + // each lasts for 4 (ie n, n+1, n+2, n+3) + simpleVdPosInt = 8 + simpleVdNegInt = 12 + + simpleVdTime = 24 + + // containers: each lasts for 4 (ie n, n+1, n+2, ... n+7) + simpleVdString = 216 + simpleVdByteArray = 224 + simpleVdArray = 232 + simpleVdMap = 240 + simpleVdExt = 248 +) + +type simpleEncDriver struct { + noBuiltInTypes + // encNoSeparator + e *Encoder + h *SimpleHandle + w encWriter + b [8]byte + // c containerState + encDriverTrackContainerWriter + // encDriverNoopContainerWriter + _ [2]uint64 // padding +} + +func (e *simpleEncDriver) EncodeNil() { + e.w.writen1(simpleVdNil) +} + +func (e *simpleEncDriver) EncodeBool(b bool) { + if e.h.EncZeroValuesAsNil && e.c != containerMapKey && !b { + e.EncodeNil() + return + } + if b { + e.w.writen1(simpleVdTrue) + } else { + e.w.writen1(simpleVdFalse) + } +} + +func (e *simpleEncDriver) EncodeFloat32(f float32) { + if e.h.EncZeroValuesAsNil && e.c != containerMapKey && f == 0.0 { + e.EncodeNil() + return + } + e.w.writen1(simpleVdFloat32) + bigenHelper{e.b[:4], e.w}.writeUint32(math.Float32bits(f)) +} + +func (e *simpleEncDriver) EncodeFloat64(f float64) { + if e.h.EncZeroValuesAsNil && e.c != containerMapKey && f == 0.0 { + e.EncodeNil() + return + } + e.w.writen1(simpleVdFloat64) + bigenHelper{e.b[:8], e.w}.writeUint64(math.Float64bits(f)) +} + +func (e *simpleEncDriver) EncodeInt(v int64) { + if v < 0 { + e.encUint(uint64(-v), simpleVdNegInt) + } else { + e.encUint(uint64(v), simpleVdPosInt) + } +} + +func (e *simpleEncDriver) EncodeUint(v uint64) { + e.encUint(v, simpleVdPosInt) +} + +func (e *simpleEncDriver) encUint(v uint64, bd uint8) { + if e.h.EncZeroValuesAsNil && e.c != containerMapKey && v == 0 { + e.EncodeNil() + return + } + if v <= math.MaxUint8 { + e.w.writen2(bd, uint8(v)) + } else if v <= math.MaxUint16 { + e.w.writen1(bd + 1) + bigenHelper{e.b[:2], e.w}.writeUint16(uint16(v)) + } else if v <= math.MaxUint32 { + e.w.writen1(bd + 2) + bigenHelper{e.b[:4], e.w}.writeUint32(uint32(v)) + } else { // if v <= math.MaxUint64 { + e.w.writen1(bd + 3) + bigenHelper{e.b[:8], e.w}.writeUint64(v) + } +} + +func (e *simpleEncDriver) encLen(bd byte, length int) { + if length == 0 { + e.w.writen1(bd) + } else if length <= math.MaxUint8 { + e.w.writen1(bd + 1) + e.w.writen1(uint8(length)) + } else if length <= math.MaxUint16 { + e.w.writen1(bd + 2) + bigenHelper{e.b[:2], e.w}.writeUint16(uint16(length)) + } else if int64(length) <= math.MaxUint32 { + e.w.writen1(bd + 3) + bigenHelper{e.b[:4], e.w}.writeUint32(uint32(length)) + } else { + e.w.writen1(bd + 4) + bigenHelper{e.b[:8], e.w}.writeUint64(uint64(length)) + } +} + +func (e *simpleEncDriver) EncodeExt(rv interface{}, xtag uint64, ext Ext, _ *Encoder) { + bs := ext.WriteExt(rv) + if bs == nil { + e.EncodeNil() + return + } + e.encodeExtPreamble(uint8(xtag), len(bs)) + e.w.writeb(bs) +} + +func (e *simpleEncDriver) EncodeRawExt(re *RawExt, _ *Encoder) { + e.encodeExtPreamble(uint8(re.Tag), len(re.Data)) + e.w.writeb(re.Data) +} + +func (e *simpleEncDriver) encodeExtPreamble(xtag byte, length int) { + e.encLen(simpleVdExt, length) + e.w.writen1(xtag) +} + +func (e *simpleEncDriver) WriteArrayStart(length int) { + e.c = containerArrayStart + e.encLen(simpleVdArray, length) +} + +func (e *simpleEncDriver) WriteMapStart(length int) { + e.c = containerMapStart + e.encLen(simpleVdMap, length) +} + +func (e *simpleEncDriver) EncodeString(c charEncoding, v string) { + if false && e.h.EncZeroValuesAsNil && e.c != containerMapKey && v == "" { + e.EncodeNil() + return + } + e.encLen(simpleVdString, len(v)) + e.w.writestr(v) +} + +// func (e *simpleEncDriver) EncodeSymbol(v string) { +// e.EncodeString(cUTF8, v) +// } + +func (e *simpleEncDriver) EncodeStringBytes(c charEncoding, v []byte) { + // if e.h.EncZeroValuesAsNil && e.c != containerMapKey && v == nil { + if v == nil { + e.EncodeNil() + return + } + e.encLen(simpleVdByteArray, len(v)) + e.w.writeb(v) +} + +func (e *simpleEncDriver) EncodeTime(t time.Time) { + // if e.h.EncZeroValuesAsNil && e.c != containerMapKey && t.IsZero() { + if t.IsZero() { + e.EncodeNil() + return + } + v, err := t.MarshalBinary() + if err != nil { + e.e.errorv(err) + return + } + // time.Time marshalbinary takes about 14 bytes. + e.w.writen2(simpleVdTime, uint8(len(v))) + e.w.writeb(v) +} + +//------------------------------------ + +type simpleDecDriver struct { + d *Decoder + h *SimpleHandle + r decReader + bdRead bool + bd byte + br bool // a bytes reader? + c containerState + // b [scratchByteArrayLen]byte + noBuiltInTypes + // noStreamingCodec + decDriverNoopContainerReader + _ [3]uint64 // padding +} + +func (d *simpleDecDriver) readNextBd() { + d.bd = d.r.readn1() + d.bdRead = true +} + +func (d *simpleDecDriver) uncacheRead() { + if d.bdRead { + d.r.unreadn1() + d.bdRead = false + } +} + +func (d *simpleDecDriver) ContainerType() (vt valueType) { + if !d.bdRead { + d.readNextBd() + } + switch d.bd { + case simpleVdNil: + return valueTypeNil + case simpleVdByteArray, simpleVdByteArray + 1, + simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + return valueTypeBytes + case simpleVdString, simpleVdString + 1, + simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: + return valueTypeString + case simpleVdArray, simpleVdArray + 1, + simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4: + return valueTypeArray + case simpleVdMap, simpleVdMap + 1, + simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: + return valueTypeMap + // case simpleVdTime: + // return valueTypeTime + } + // else { + // d.d.errorf("isContainerType: unsupported parameter: %v", vt) + // } + return valueTypeUnset +} + +func (d *simpleDecDriver) TryDecodeAsNil() bool { + if !d.bdRead { + d.readNextBd() + } + if d.bd == simpleVdNil { + d.bdRead = false + return true + } + return false +} + +func (d *simpleDecDriver) decCheckInteger() (ui uint64, neg bool) { + if !d.bdRead { + d.readNextBd() + } + switch d.bd { + case simpleVdPosInt: + ui = uint64(d.r.readn1()) + case simpleVdPosInt + 1: + ui = uint64(bigen.Uint16(d.r.readx(2))) + case simpleVdPosInt + 2: + ui = uint64(bigen.Uint32(d.r.readx(4))) + case simpleVdPosInt + 3: + ui = uint64(bigen.Uint64(d.r.readx(8))) + case simpleVdNegInt: + ui = uint64(d.r.readn1()) + neg = true + case simpleVdNegInt + 1: + ui = uint64(bigen.Uint16(d.r.readx(2))) + neg = true + case simpleVdNegInt + 2: + ui = uint64(bigen.Uint32(d.r.readx(4))) + neg = true + case simpleVdNegInt + 3: + ui = uint64(bigen.Uint64(d.r.readx(8))) + neg = true + default: + d.d.errorf("integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd) + return + } + // don't do this check, because callers may only want the unsigned value. + // if ui > math.MaxInt64 { + // d.d.errorf("decIntAny: Integer out of range for signed int64: %v", ui) + // return + // } + return +} + +func (d *simpleDecDriver) DecodeInt64() (i int64) { + ui, neg := d.decCheckInteger() + i = chkOvf.SignedIntV(ui) + if neg { + i = -i + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) DecodeUint64() (ui uint64) { + ui, neg := d.decCheckInteger() + if neg { + d.d.errorf("assigning negative signed value to unsigned type") + return + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) DecodeFloat64() (f float64) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == simpleVdFloat32 { + f = float64(math.Float32frombits(bigen.Uint32(d.r.readx(4)))) + } else if d.bd == simpleVdFloat64 { + f = math.Float64frombits(bigen.Uint64(d.r.readx(8))) + } else { + if d.bd >= simpleVdPosInt && d.bd <= simpleVdNegInt+3 { + f = float64(d.DecodeInt64()) + } else { + d.d.errorf("float only valid from float32/64: Invalid descriptor: %v", d.bd) + return + } + } + d.bdRead = false + return +} + +// bool can be decoded from bool only (single byte). +func (d *simpleDecDriver) DecodeBool() (b bool) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == simpleVdTrue { + b = true + } else if d.bd == simpleVdFalse { + } else { + d.d.errorf("cannot decode bool - %s: %x", msgBadDesc, d.bd) + return + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) ReadMapStart() (length int) { + if !d.bdRead { + d.readNextBd() + } + d.bdRead = false + d.c = containerMapStart + return d.decLen() +} + +func (d *simpleDecDriver) ReadArrayStart() (length int) { + if !d.bdRead { + d.readNextBd() + } + d.bdRead = false + d.c = containerArrayStart + return d.decLen() +} + +func (d *simpleDecDriver) ReadArrayElem() { + d.c = containerArrayElem +} + +func (d *simpleDecDriver) ReadArrayEnd() { + d.c = containerArrayEnd +} + +func (d *simpleDecDriver) ReadMapElemKey() { + d.c = containerMapKey +} + +func (d *simpleDecDriver) ReadMapElemValue() { + d.c = containerMapValue +} + +func (d *simpleDecDriver) ReadMapEnd() { + d.c = containerMapEnd +} + +func (d *simpleDecDriver) decLen() int { + switch d.bd % 8 { + case 0: + return 0 + case 1: + return int(d.r.readn1()) + case 2: + return int(bigen.Uint16(d.r.readx(2))) + case 3: + ui := uint64(bigen.Uint32(d.r.readx(4))) + if chkOvf.Uint(ui, intBitsize) { + d.d.errorf("overflow integer: %v", ui) + return 0 + } + return int(ui) + case 4: + ui := bigen.Uint64(d.r.readx(8)) + if chkOvf.Uint(ui, intBitsize) { + d.d.errorf("overflow integer: %v", ui) + return 0 + } + return int(ui) + } + d.d.errorf("cannot read length: bd%%8 must be in range 0..4. Got: %d", d.bd%8) + return -1 +} + +func (d *simpleDecDriver) DecodeString() (s string) { + return string(d.DecodeBytes(d.d.b[:], true)) +} + +func (d *simpleDecDriver) DecodeStringAsBytes() (s []byte) { + return d.DecodeBytes(d.d.b[:], true) +} + +func (d *simpleDecDriver) DecodeBytes(bs []byte, zerocopy bool) (bsOut []byte) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == simpleVdNil { + d.bdRead = false + return + } + // check if an "array" of uint8's (see ContainerType for how to infer if an array) + if d.bd >= simpleVdArray && d.bd <= simpleVdMap+4 { + if len(bs) == 0 && zerocopy { + bs = d.d.b[:] + } + bsOut, _ = fastpathTV.DecSliceUint8V(bs, true, d.d) + return + } + + clen := d.decLen() + d.bdRead = false + if zerocopy { + if d.br { + return d.r.readx(clen) + } else if len(bs) == 0 { + bs = d.d.b[:] + } + } + return decByteSlice(d.r, clen, d.d.h.MaxInitLen, bs) +} + +func (d *simpleDecDriver) DecodeTime() (t time.Time) { + if !d.bdRead { + d.readNextBd() + } + if d.bd == simpleVdNil { + d.bdRead = false + return + } + if d.bd != simpleVdTime { + d.d.errorf("invalid descriptor for time.Time - expect 0x%x, received 0x%x", simpleVdTime, d.bd) + return + } + d.bdRead = false + clen := int(d.r.readn1()) + b := d.r.readx(clen) + if err := (&t).UnmarshalBinary(b); err != nil { + d.d.errorv(err) + } + return +} + +func (d *simpleDecDriver) DecodeExt(rv interface{}, xtag uint64, ext Ext) (realxtag uint64) { + if xtag > 0xff { + d.d.errorf("ext: tag must be <= 0xff; got: %v", xtag) + return + } + realxtag1, xbs := d.decodeExtV(ext != nil, uint8(xtag)) + realxtag = uint64(realxtag1) + if ext == nil { + re := rv.(*RawExt) + re.Tag = realxtag + re.Data = detachZeroCopyBytes(d.br, re.Data, xbs) + } else { + ext.ReadExt(rv, xbs) + } + return +} + +func (d *simpleDecDriver) decodeExtV(verifyTag bool, tag byte) (xtag byte, xbs []byte) { + if !d.bdRead { + d.readNextBd() + } + switch d.bd { + case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: + l := d.decLen() + xtag = d.r.readn1() + if verifyTag && xtag != tag { + d.d.errorf("wrong extension tag. Got %b. Expecting: %v", xtag, tag) + return + } + xbs = d.r.readx(l) + case simpleVdByteArray, simpleVdByteArray + 1, + simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + xbs = d.DecodeBytes(nil, true) + default: + d.d.errorf("ext - %s - expecting extensions/bytearray, got: 0x%x", msgBadDesc, d.bd) + return + } + d.bdRead = false + return +} + +func (d *simpleDecDriver) DecodeNaked() { + if !d.bdRead { + d.readNextBd() + } + + n := d.d.n + var decodeFurther bool + + switch d.bd { + case simpleVdNil: + n.v = valueTypeNil + case simpleVdFalse: + n.v = valueTypeBool + n.b = false + case simpleVdTrue: + n.v = valueTypeBool + n.b = true + case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3: + if d.h.SignedInteger { + n.v = valueTypeInt + n.i = d.DecodeInt64() + } else { + n.v = valueTypeUint + n.u = d.DecodeUint64() + } + case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3: + n.v = valueTypeInt + n.i = d.DecodeInt64() + case simpleVdFloat32: + n.v = valueTypeFloat + n.f = d.DecodeFloat64() + case simpleVdFloat64: + n.v = valueTypeFloat + n.f = d.DecodeFloat64() + case simpleVdTime: + n.v = valueTypeTime + n.t = d.DecodeTime() + case simpleVdString, simpleVdString + 1, + simpleVdString + 2, simpleVdString + 3, simpleVdString + 4: + n.v = valueTypeString + n.s = d.DecodeString() + case simpleVdByteArray, simpleVdByteArray + 1, + simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4: + n.v = valueTypeBytes + n.l = d.DecodeBytes(nil, false) + case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4: + n.v = valueTypeExt + l := d.decLen() + n.u = uint64(d.r.readn1()) + n.l = d.r.readx(l) + case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, + simpleVdArray + 3, simpleVdArray + 4: + n.v = valueTypeArray + decodeFurther = true + case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4: + n.v = valueTypeMap + decodeFurther = true + default: + d.d.errorf("cannot infer value - %s 0x%x", msgBadDesc, d.bd) + } + + if !decodeFurther { + d.bdRead = false + } + return +} + +//------------------------------------ + +// SimpleHandle is a Handle for a very simple encoding format. +// +// simple is a simplistic codec similar to binc, but not as compact. +// - Encoding of a value is always preceded by the descriptor byte (bd) +// - True, false, nil are encoded fully in 1 byte (the descriptor) +// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte). +// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers. +// - Floats are encoded in 4 or 8 bytes (plus a descriptor byte) +// - Length of containers (strings, bytes, array, map, extensions) +// are encoded in 0, 1, 2, 4 or 8 bytes. +// Zero-length containers have no length encoded. +// For others, the number of bytes is given by pow(2, bd%3) +// - maps are encoded as [bd] [length] [[key][value]]... +// - arrays are encoded as [bd] [length] [value]... +// - extensions are encoded as [bd] [length] [tag] [byte]... +// - strings/bytearrays are encoded as [bd] [length] [byte]... +// - time.Time are encoded as [bd] [length] [byte]... +// +// The full spec will be published soon. +type SimpleHandle struct { + BasicHandle + binaryEncodingType + noElemSeparators + // EncZeroValuesAsNil says to encode zero values for numbers, bool, string, etc as nil + EncZeroValuesAsNil bool + + // _ [1]uint64 // padding +} + +// Name returns the name of the handle: simple +func (h *SimpleHandle) Name() string { return "simple" } + +// SetBytesExt sets an extension +func (h *SimpleHandle) SetBytesExt(rt reflect.Type, tag uint64, ext BytesExt) (err error) { + return h.SetExt(rt, tag, &extWrapper{ext, interfaceExtFailer{}}) +} + +func (h *SimpleHandle) hasElemSeparators() bool { return true } // as it implements Write(Map|Array)XXX + +func (h *SimpleHandle) newEncDriver(e *Encoder) encDriver { + return &simpleEncDriver{e: e, w: e.w, h: h} +} + +func (h *SimpleHandle) newDecDriver(d *Decoder) decDriver { + return &simpleDecDriver{d: d, h: h, r: d.r, br: d.bytes} +} + +func (e *simpleEncDriver) reset() { + e.c = 0 + e.w = e.e.w +} + +func (d *simpleDecDriver) reset() { + d.c = 0 + d.r, d.br = d.d.r, d.d.bytes + d.bd, d.bdRead = 0, false +} + +var _ decDriver = (*simpleDecDriver)(nil) +var _ encDriver = (*simpleEncDriver)(nil) diff --git a/vendor/github.com/ugorji/go/codec/xml.go b/vendor/github.com/ugorji/go/codec/xml.go new file mode 100644 index 0000000000..19fc36caf3 --- /dev/null +++ b/vendor/github.com/ugorji/go/codec/xml.go @@ -0,0 +1,508 @@ +// Copyright (c) 2012-2018 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a MIT license found in the LICENSE file. + +// +build ignore + +package codec + +import "reflect" + +/* + +A strict Non-validating namespace-aware XML 1.0 parser and (en|de)coder. + +We are attempting this due to perceived issues with encoding/xml: + - Complicated. It tried to do too much, and is not as simple to use as json. + - Due to over-engineering, reflection is over-used AND performance suffers: + java is 6X faster:http://fabsk.eu/blog/category/informatique/dev/golang/ + even PYTHON performs better: http://outgoing.typepad.com/outgoing/2014/07/exploring-golang.html + +codec framework will offer the following benefits + - VASTLY improved performance (when using reflection-mode or codecgen) + - simplicity and consistency: with the rest of the supported formats + - all other benefits of codec framework (streaming, codegeneration, etc) + +codec is not a drop-in replacement for encoding/xml. +It is a replacement, based on the simplicity and performance of codec. +Look at it like JAXB for Go. + +Challenges: + - Need to output XML preamble, with all namespaces at the right location in the output. + - Each "end" block is dynamic, so we need to maintain a context-aware stack + - How to decide when to use an attribute VS an element + - How to handle chardata, attr, comment EXPLICITLY. + - Should it output fragments? + e.g. encoding a bool should just output true OR false, which is not well-formed XML. + +Extend the struct tag. See representative example: + type X struct { + ID uint8 `codec:"http://ugorji.net/x-namespace xid id,omitempty,toarray,attr,cdata"` + // format: [namespace-uri ][namespace-prefix ]local-name, ... + } + +Based on this, we encode + - fields as elements, BUT + encode as attributes if struct tag contains ",attr" and is a scalar (bool, number or string) + - text as entity-escaped text, BUT encode as CDATA if struct tag contains ",cdata". + +To handle namespaces: + - XMLHandle is denoted as being namespace-aware. + Consequently, we WILL use the ns:name pair to encode and decode if defined, else use the plain name. + - *Encoder and *Decoder know whether the Handle "prefers" namespaces. + - add *Encoder.getEncName(*structFieldInfo). + No one calls *structFieldInfo.indexForEncName directly anymore + - OR better yet: indexForEncName is namespace-aware, and helper.go is all namespace-aware + indexForEncName takes a parameter of the form namespace:local-name OR local-name + - add *Decoder.getStructFieldInfo(encName string) // encName here is either like abc, or h1:nsabc + by being a method on *Decoder, or maybe a method on the Handle itself. + No one accesses .encName anymore + - let encode.go and decode.go use these (for consistency) + - only problem exists for gen.go, where we create a big switch on encName. + Now, we also have to add a switch on strings.endsWith(kName, encNsName) + - gen.go will need to have many more methods, and then double-on the 2 switch loops like: + switch k { + case "abc" : x.abc() + case "def" : x.def() + default { + switch { + case !nsAware: panic(...) + case strings.endsWith(":abc"): x.abc() + case strings.endsWith(":def"): x.def() + default: panic(...) + } + } + } + +The structure below accommodates this: + + type typeInfo struct { + sfi []*structFieldInfo // sorted by encName + sfins // sorted by namespace + sfia // sorted, to have those with attributes at the top. Needed to write XML appropriately. + sfip // unsorted + } + type structFieldInfo struct { + encName + nsEncName + ns string + attr bool + cdata bool + } + +indexForEncName is now an internal helper function that takes a sorted array +(one of ti.sfins or ti.sfi). It is only used by *Encoder.getStructFieldInfo(...) + +There will be a separate parser from the builder. +The parser will have a method: next() xmlToken method. It has lookahead support, +so you can pop multiple tokens, make a determination, and push them back in the order popped. +This will be needed to determine whether we are "nakedly" decoding a container or not. +The stack will be implemented using a slice and push/pop happens at the [0] element. + +xmlToken has fields: + - type uint8: 0 | ElementStart | ElementEnd | AttrKey | AttrVal | Text + - value string + - ns string + +SEE: http://www.xml.com/pub/a/98/10/guide0.html?page=3#ENTDECL + +The following are skipped when parsing: + - External Entities (from external file) + - Notation Declaration e.g. + - Entity Declarations & References + - XML Declaration (assume UTF-8) + - XML Directive i.e. + - Other Declarations: Notation, etc. + - Comment + - Processing Instruction + - schema / DTD for validation: + We are not a VALIDATING parser. Validation is done elsewhere. + However, some parts of the DTD internal subset are used (SEE BELOW). + For Attribute List Declarations e.g. + + We considered using the ATTLIST to get "default" value, but not to validate the contents. (VETOED) + +The following XML features are supported + - Namespace + - Element + - Attribute + - cdata + - Unicode escape + +The following DTD (when as an internal sub-set) features are supported: + - Internal Entities e.g. + AND entities for the set: [<>&"'] + - Parameter entities e.g. + + +At decode time, a structure containing the following is kept + - namespace mapping + - default attribute values + - all internal entities (<>&"' and others written in the document) + +When decode starts, it parses XML namespace declarations and creates a map in the +xmlDecDriver. While parsing, that map continuously gets updated. +The only problem happens when a namespace declaration happens on the node that it defines. +e.g. +To handle this, each Element must be fully parsed at a time, +even if it amounts to multiple tokens which are returned one at a time on request. + +xmlns is a special attribute name. + - It is used to define namespaces, including the default + - It is never returned as an AttrKey or AttrVal. + *We may decide later to allow user to use it e.g. you want to parse the xmlns mappings into a field.* + +Number, bool, null, mapKey, etc can all be decoded from any xmlToken. +This accommodates map[int]string for example. + +It should be possible to create a schema from the types, +or vice versa (generate types from schema with appropriate tags). +This is however out-of-scope from this parsing project. + +We should write all namespace information at the first point that it is referenced in the tree, +and use the mapping for all child nodes and attributes. This means that state is maintained +at a point in the tree. This also means that calls to Decode or MustDecode will reset some state. + +When decoding, it is important to keep track of entity references and default attribute values. +It seems these can only be stored in the DTD components. We should honor them when decoding. + +Configuration for XMLHandle will look like this: + + XMLHandle + DefaultNS string + // Encoding: + NS map[string]string // ns URI to key, used for encoding + // Decoding: in case ENTITY declared in external schema or dtd, store info needed here + Entities map[string]string // map of entity rep to character + + +During encode, if a namespace mapping is not defined for a namespace found on a struct, +then we create a mapping for it using nsN (where N is 1..1000000, and doesn't conflict +with any other namespace mapping). + +Note that different fields in a struct can have different namespaces. +However, all fields will default to the namespace on the _struct field (if defined). + +An XML document is a name, a map of attributes and a list of children. +Consequently, we cannot "DecodeNaked" into a map[string]interface{} (for example). +We have to "DecodeNaked" into something that resembles XML data. + +To support DecodeNaked (decode into nil interface{}), we have to define some "supporting" types: + type Name struct { // Preferred. Less allocations due to conversions. + Local string + Space string + } + type Element struct { + Name Name + Attrs map[Name]string + Children []interface{} // each child is either *Element or string + } +Only two "supporting" types are exposed for XML: Name and Element. + +// ------------------ + +We considered 'type Name string' where Name is like "Space Local" (space-separated). +We decided against it, because each creation of a name would lead to +double allocation (first convert []byte to string, then concatenate them into a string). +The benefit is that it is faster to read Attrs from a map. But given that Element is a value +object, we want to eschew methods and have public exposed variables. + +We also considered the following, where xml types were not value objects, and we used +intelligent accessor methods to extract information and for performance. +*** WE DECIDED AGAINST THIS. *** + type Attr struct { + Name Name + Value string + } + // Element is a ValueObject: There are no accessor methods. + // Make element self-contained. + type Element struct { + Name Name + attrsMap map[string]string // where key is "Space Local" + attrs []Attr + childrenT []string + childrenE []Element + childrenI []int // each child is a index into T or E. + } + func (x *Element) child(i) interface{} // returns string or *Element + +// ------------------ + +Per XML spec and our default handling, white space is always treated as +insignificant between elements, except in a text node. The xml:space='preserve' +attribute is ignored. + +**Note: there is no xml: namespace. The xml: attributes were defined before namespaces.** +**So treat them as just "directives" that should be interpreted to mean something**. + +On encoding, we support indenting aka prettifying markup in the same way we support it for json. + +A document or element can only be encoded/decoded from/to a struct. In this mode: + - struct name maps to element name (or tag-info from _struct field) + - fields are mapped to child elements or attributes + +A map is either encoded as attributes on current element, or as a set of child elements. +Maps are encoded as attributes iff their keys and values are primitives (number, bool, string). + +A list is encoded as a set of child elements. + +Primitives (number, bool, string) are encoded as an element, attribute or text +depending on the context. + +Extensions must encode themselves as a text string. + +Encoding is tough, specifically when encoding mappings, because we need to encode +as either attribute or element. To do this, we need to default to encoding as attributes, +and then let Encoder inform the Handle when to start encoding as nodes. +i.e. Encoder does something like: + + h.EncodeMapStart() + h.Encode(), h.Encode(), ... + h.EncodeMapNotAttrSignal() // this is not a bool, because it's a signal + h.Encode(), h.Encode(), ... + h.EncodeEnd() + +Only XMLHandle understands this, and will set itself to start encoding as elements. + +This support extends to maps. For example, if a struct field is a map, and it has +the struct tag signifying it should be attr, then all its fields are encoded as attributes. +e.g. + + type X struct { + M map[string]int `codec:"m,attr"` // encode keys as attributes named + } + +Question: + - if encoding a map, what if map keys have spaces in them??? + Then they cannot be attributes or child elements. Error. + +Options to consider adding later: + - For attribute values, normalize by trimming beginning and ending white space, + and converting every white space sequence to a single space. + - ATTLIST restrictions are enforced. + e.g. default value of xml:space, skipping xml:XYZ style attributes, etc. + - Consider supporting NON-STRICT mode (e.g. to handle HTML parsing). + Some elements e.g. br, hr, etc need not close and should be auto-closed + ... (see http://www.w3.org/TR/html4/loose.dtd) + An expansive set of entities are pre-defined. + - Have easy way to create a HTML parser: + add a HTML() method to XMLHandle, that will set Strict=false, specify AutoClose, + and add HTML Entities to the list. + - Support validating element/attribute XMLName before writing it. + Keep this behind a flag, which is set to false by default (for performance). + type XMLHandle struct { + CheckName bool + } + +Misc: + +ROADMAP (1 weeks): + - build encoder (1 day) + - build decoder (based off xmlParser) (1 day) + - implement xmlParser (2 days). + Look at encoding/xml for inspiration. + - integrate and TEST (1 days) + - write article and post it (1 day) + +// ---------- MORE NOTES FROM 2017-11-30 ------------ + +when parsing +- parse the attributes first +- then parse the nodes + +basically: +- if encoding a field: we use the field name for the wrapper +- if encoding a non-field, then just use the element type name + + map[string]string ==> abcval... or + val... OR + val1val2... <- PREFERED + []string ==> v1v2... + string v1 ==> v1 + bool true ==> true + float 1.0 ==> 1.0 + ... + + F1 map[string]string ==> abcval... OR + val... OR + val... <- PREFERED + F2 []string ==> v1v2... + F3 bool ==> true + ... + +- a scalar is encoded as: + (value) of type T ==> + (value) of field F ==> +- A kv-pair is encoded as: + (key,value) ==> OR + (key,value) of field F ==> OR +- A map or struct is just a list of kv-pairs +- A list is encoded as sequences of same node e.g. + + + value21 + value22 +- we may have to singularize the field name, when entering into xml, + and pluralize them when encoding. +- bi-directional encode->decode->encode is not a MUST. + even encoding/xml cannot decode correctly what was encoded: + + see https://play.golang.org/p/224V_nyhMS + func main() { + fmt.Println("Hello, playground") + v := []interface{}{"hello", 1, true, nil, time.Now()} + s, err := xml.Marshal(v) + fmt.Printf("err: %v, \ns: %s\n", err, s) + var v2 []interface{} + err = xml.Unmarshal(s, &v2) + fmt.Printf("err: %v, \nv2: %v\n", err, v2) + type T struct { + V []interface{} + } + v3 := T{V: v} + s, err = xml.Marshal(v3) + fmt.Printf("err: %v, \ns: %s\n", err, s) + var v4 T + err = xml.Unmarshal(s, &v4) + fmt.Printf("err: %v, \nv4: %v\n", err, v4) + } + Output: + err: , + s: hello1true + err: , + v2: [] + err: , + s: hello1true2009-11-10T23:00:00Z + err: , + v4: {[ ]} +- +*/ + +// ----------- PARSER ------------------- + +type xmlTokenType uint8 + +const ( + _ xmlTokenType = iota << 1 + xmlTokenElemStart + xmlTokenElemEnd + xmlTokenAttrKey + xmlTokenAttrVal + xmlTokenText +) + +type xmlToken struct { + Type xmlTokenType + Value string + Namespace string // blank for AttrVal and Text +} + +type xmlParser struct { + r decReader + toks []xmlToken // list of tokens. + ptr int // ptr into the toks slice + done bool // nothing else to parse. r now returns EOF. +} + +func (x *xmlParser) next() (t *xmlToken) { + // once x.done, or x.ptr == len(x.toks) == 0, then return nil (to signify finish) + if !x.done && len(x.toks) == 0 { + x.nextTag() + } + // parses one element at a time (into possible many tokens) + if x.ptr < len(x.toks) { + t = &(x.toks[x.ptr]) + x.ptr++ + if x.ptr == len(x.toks) { + x.ptr = 0 + x.toks = x.toks[:0] + } + } + return +} + +// nextTag will parses the next element and fill up toks. +// It set done flag if/once EOF is reached. +func (x *xmlParser) nextTag() { + // TODO: implement. +} + +// ----------- ENCODER ------------------- + +type xmlEncDriver struct { + e *Encoder + w encWriter + h *XMLHandle + b [64]byte // scratch + bs []byte // scratch + // s jsonStack + noBuiltInTypes +} + +// ----------- DECODER ------------------- + +type xmlDecDriver struct { + d *Decoder + h *XMLHandle + r decReader // *bytesDecReader decReader + ct valueType // container type. one of unset, array or map. + bstr [8]byte // scratch used for string \UXXX parsing + b [64]byte // scratch + + // wsSkipped bool // whitespace skipped + + // s jsonStack + + noBuiltInTypes +} + +// DecodeNaked will decode into an XMLNode + +// XMLName is a value object representing a namespace-aware NAME +type XMLName struct { + Local string + Space string +} + +// XMLNode represents a "union" of the different types of XML Nodes. +// Only one of fields (Text or *Element) is set. +type XMLNode struct { + Element *Element + Text string +} + +// XMLElement is a value object representing an fully-parsed XML element. +type XMLElement struct { + Name Name + Attrs map[XMLName]string + // Children is a list of child nodes, each being a *XMLElement or string + Children []XMLNode +} + +// ----------- HANDLE ------------------- + +type XMLHandle struct { + BasicHandle + textEncodingType + + DefaultNS string + NS map[string]string // ns URI to key, for encoding + Entities map[string]string // entity representation to string, for encoding. +} + +func (h *XMLHandle) newEncDriver(e *Encoder) encDriver { + return &xmlEncDriver{e: e, w: e.w, h: h} +} + +func (h *XMLHandle) newDecDriver(d *Decoder) decDriver { + // d := xmlDecDriver{r: r.(*bytesDecReader), h: h} + hd := xmlDecDriver{d: d, r: d.r, h: h} + hd.n.bytes = d.b[:] + return &hd +} + +func (h *XMLHandle) SetInterfaceExt(rt reflect.Type, tag uint64, ext InterfaceExt) (err error) { + return h.SetExt(rt, tag, &extWrapper{bytesExtFailer{}, ext}) +} + +var _ decDriver = (*xmlDecDriver)(nil) +var _ encDriver = (*xmlEncDriver)(nil) diff --git a/vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/LICENSE b/vendor/github.com/vishvananda/netlink/LICENSE similarity index 100% rename from vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/LICENSE rename to vendor/github.com/vishvananda/netlink/LICENSE diff --git a/vendor/github.com/vishvananda/netlink/addr.go b/vendor/github.com/vishvananda/netlink/addr.go new file mode 100644 index 0000000000..f08c956969 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/addr.go @@ -0,0 +1,56 @@ +package netlink + +import ( + "fmt" + "net" + "strings" +) + +// Addr represents an IP address from netlink. Netlink ip addresses +// include a mask, so it stores the address as a net.IPNet. +type Addr struct { + *net.IPNet + Label string + Flags int + Scope int + Peer *net.IPNet + Broadcast net.IP + PreferedLft int + ValidLft int +} + +// String returns $ip/$netmask $label +func (a Addr) String() string { + return strings.TrimSpace(fmt.Sprintf("%s %s", a.IPNet, a.Label)) +} + +// ParseAddr parses the string representation of an address in the +// form $ip/$netmask $label. The label portion is optional +func ParseAddr(s string) (*Addr, error) { + label := "" + parts := strings.Split(s, " ") + if len(parts) > 1 { + s = parts[0] + label = parts[1] + } + m, err := ParseIPNet(s) + if err != nil { + return nil, err + } + return &Addr{IPNet: m, Label: label}, nil +} + +// Equal returns true if both Addrs have the same net.IPNet value. +func (a Addr) Equal(x Addr) bool { + sizea, _ := a.Mask.Size() + sizeb, _ := x.Mask.Size() + // ignore label for comparison + return a.IP.Equal(x.IP) && sizea == sizeb +} + +func (a Addr) PeerEqual(x Addr) bool { + sizea, _ := a.Peer.Mask.Size() + sizeb, _ := x.Peer.Mask.Size() + // ignore label for comparison + return a.Peer.IP.Equal(x.Peer.IP) && sizea == sizeb +} diff --git a/vendor/github.com/vishvananda/netlink/addr_linux.go b/vendor/github.com/vishvananda/netlink/addr_linux.go new file mode 100644 index 0000000000..8808b42d9b --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/addr_linux.go @@ -0,0 +1,316 @@ +package netlink + +import ( + "fmt" + "net" + "strings" + "syscall" + + "github.com/vishvananda/netlink/nl" + "github.com/vishvananda/netns" +) + +// IFA_FLAGS is a u32 attribute. +const IFA_FLAGS = 0x8 + +// AddrAdd will add an IP address to a link device. +// Equivalent to: `ip addr add $addr dev $link` +func AddrAdd(link Link, addr *Addr) error { + return pkgHandle.AddrAdd(link, addr) +} + +// AddrAdd will add an IP address to a link device. +// Equivalent to: `ip addr add $addr dev $link` +func (h *Handle) AddrAdd(link Link, addr *Addr) error { + req := h.newNetlinkRequest(syscall.RTM_NEWADDR, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + return h.addrHandle(link, addr, req) +} + +// AddrReplace will replace (or, if not present, add) an IP address on a link device. +// Equivalent to: `ip addr replace $addr dev $link` +func AddrReplace(link Link, addr *Addr) error { + return pkgHandle.AddrReplace(link, addr) +} + +// AddrReplace will replace (or, if not present, add) an IP address on a link device. +// Equivalent to: `ip addr replace $addr dev $link` +func (h *Handle) AddrReplace(link Link, addr *Addr) error { + req := h.newNetlinkRequest(syscall.RTM_NEWADDR, syscall.NLM_F_CREATE|syscall.NLM_F_REPLACE|syscall.NLM_F_ACK) + return h.addrHandle(link, addr, req) +} + +// AddrDel will delete an IP address from a link device. +// Equivalent to: `ip addr del $addr dev $link` +func AddrDel(link Link, addr *Addr) error { + return pkgHandle.AddrDel(link, addr) +} + +// AddrDel will delete an IP address from a link device. +// Equivalent to: `ip addr del $addr dev $link` +func (h *Handle) AddrDel(link Link, addr *Addr) error { + req := h.newNetlinkRequest(syscall.RTM_DELADDR, syscall.NLM_F_ACK) + return h.addrHandle(link, addr, req) +} + +func (h *Handle) addrHandle(link Link, addr *Addr, req *nl.NetlinkRequest) error { + base := link.Attrs() + if addr.Label != "" && !strings.HasPrefix(addr.Label, base.Name) { + return fmt.Errorf("label must begin with interface name") + } + h.ensureIndex(base) + + family := nl.GetIPFamily(addr.IP) + + msg := nl.NewIfAddrmsg(family) + msg.Index = uint32(base.Index) + msg.Scope = uint8(addr.Scope) + prefixlen, masklen := addr.Mask.Size() + msg.Prefixlen = uint8(prefixlen) + req.AddData(msg) + + var localAddrData []byte + if family == FAMILY_V4 { + localAddrData = addr.IP.To4() + } else { + localAddrData = addr.IP.To16() + } + + localData := nl.NewRtAttr(syscall.IFA_LOCAL, localAddrData) + req.AddData(localData) + var peerAddrData []byte + if addr.Peer != nil { + if family == FAMILY_V4 { + peerAddrData = addr.Peer.IP.To4() + } else { + peerAddrData = addr.Peer.IP.To16() + } + } else { + peerAddrData = localAddrData + } + + addressData := nl.NewRtAttr(syscall.IFA_ADDRESS, peerAddrData) + req.AddData(addressData) + + if addr.Flags != 0 { + if addr.Flags <= 0xff { + msg.IfAddrmsg.Flags = uint8(addr.Flags) + } else { + b := make([]byte, 4) + native.PutUint32(b, uint32(addr.Flags)) + flagsData := nl.NewRtAttr(IFA_FLAGS, b) + req.AddData(flagsData) + } + } + + if addr.Broadcast == nil { + calcBroadcast := make(net.IP, masklen/8) + for i := range localAddrData { + calcBroadcast[i] = localAddrData[i] | ^addr.Mask[i] + } + addr.Broadcast = calcBroadcast + } + req.AddData(nl.NewRtAttr(syscall.IFA_BROADCAST, addr.Broadcast)) + + if addr.Label != "" { + labelData := nl.NewRtAttr(syscall.IFA_LABEL, nl.ZeroTerminated(addr.Label)) + req.AddData(labelData) + } + + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + return err +} + +// AddrList gets a list of IP addresses in the system. +// Equivalent to: `ip addr show`. +// The list can be filtered by link and ip family. +func AddrList(link Link, family int) ([]Addr, error) { + return pkgHandle.AddrList(link, family) +} + +// AddrList gets a list of IP addresses in the system. +// Equivalent to: `ip addr show`. +// The list can be filtered by link and ip family. +func (h *Handle) AddrList(link Link, family int) ([]Addr, error) { + req := h.newNetlinkRequest(syscall.RTM_GETADDR, syscall.NLM_F_DUMP) + msg := nl.NewIfInfomsg(family) + req.AddData(msg) + + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWADDR) + if err != nil { + return nil, err + } + + indexFilter := 0 + if link != nil { + base := link.Attrs() + h.ensureIndex(base) + indexFilter = base.Index + } + + var res []Addr + for _, m := range msgs { + addr, msgFamily, ifindex, err := parseAddr(m) + if err != nil { + return res, err + } + + if link != nil && ifindex != indexFilter { + // Ignore messages from other interfaces + continue + } + + if family != FAMILY_ALL && msgFamily != family { + continue + } + + res = append(res, addr) + } + + return res, nil +} + +func parseAddr(m []byte) (addr Addr, family, index int, err error) { + msg := nl.DeserializeIfAddrmsg(m) + + family = -1 + index = -1 + + attrs, err1 := nl.ParseRouteAttr(m[msg.Len():]) + if err1 != nil { + err = err1 + return + } + + family = int(msg.Family) + index = int(msg.Index) + + var local, dst *net.IPNet + for _, attr := range attrs { + switch attr.Attr.Type { + case syscall.IFA_ADDRESS: + dst = &net.IPNet{ + IP: attr.Value, + Mask: net.CIDRMask(int(msg.Prefixlen), 8*len(attr.Value)), + } + addr.Peer = dst + case syscall.IFA_LOCAL: + local = &net.IPNet{ + IP: attr.Value, + Mask: net.CIDRMask(int(msg.Prefixlen), 8*len(attr.Value)), + } + addr.IPNet = local + case syscall.IFA_BROADCAST: + addr.Broadcast = attr.Value + case syscall.IFA_LABEL: + addr.Label = string(attr.Value[:len(attr.Value)-1]) + case IFA_FLAGS: + addr.Flags = int(native.Uint32(attr.Value[0:4])) + case nl.IFA_CACHEINFO: + ci := nl.DeserializeIfaCacheInfo(attr.Value) + addr.PreferedLft = int(ci.IfaPrefered) + addr.ValidLft = int(ci.IfaValid) + } + } + + // IFA_LOCAL should be there but if not, fall back to IFA_ADDRESS + if local != nil { + addr.IPNet = local + } else { + addr.IPNet = dst + } + addr.Scope = int(msg.Scope) + + return +} + +type AddrUpdate struct { + LinkAddress net.IPNet + LinkIndex int + Flags int + Scope int + PreferedLft int + ValidLft int + NewAddr bool // true=added false=deleted +} + +// AddrSubscribe takes a chan down which notifications will be sent +// when addresses change. Close the 'done' chan to stop subscription. +func AddrSubscribe(ch chan<- AddrUpdate, done <-chan struct{}) error { + return addrSubscribeAt(netns.None(), netns.None(), ch, done, nil) +} + +// AddrSubscribeAt works like AddrSubscribe plus it allows the caller +// to choose the network namespace in which to subscribe (ns). +func AddrSubscribeAt(ns netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}) error { + return addrSubscribeAt(ns, netns.None(), ch, done, nil) +} + +// AddrSubscribeOptions contains a set of options to use with +// AddrSubscribeWithOptions. +type AddrSubscribeOptions struct { + Namespace *netns.NsHandle + ErrorCallback func(error) +} + +// AddrSubscribeWithOptions work like AddrSubscribe but enable to +// provide additional options to modify the behavior. Currently, the +// namespace can be provided as well as an error callback. +func AddrSubscribeWithOptions(ch chan<- AddrUpdate, done <-chan struct{}, options AddrSubscribeOptions) error { + if options.Namespace == nil { + none := netns.None() + options.Namespace = &none + } + return addrSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback) +} + +func addrSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- AddrUpdate, done <-chan struct{}, cberr func(error)) error { + s, err := nl.SubscribeAt(newNs, curNs, syscall.NETLINK_ROUTE, syscall.RTNLGRP_IPV4_IFADDR, syscall.RTNLGRP_IPV6_IFADDR) + if err != nil { + return err + } + if done != nil { + go func() { + <-done + s.Close() + }() + } + go func() { + defer close(ch) + for { + msgs, err := s.Receive() + if err != nil { + if cberr != nil { + cberr(err) + } + return + } + for _, m := range msgs { + msgType := m.Header.Type + if msgType != syscall.RTM_NEWADDR && msgType != syscall.RTM_DELADDR { + if cberr != nil { + cberr(fmt.Errorf("bad message type: %d", msgType)) + } + return + } + + addr, _, ifindex, err := parseAddr(m.Data) + if err != nil { + if cberr != nil { + cberr(fmt.Errorf("could not parse address: %v", err)) + } + return + } + + ch <- AddrUpdate{LinkAddress: *addr.IPNet, + LinkIndex: ifindex, + NewAddr: msgType == syscall.RTM_NEWADDR, + Flags: addr.Flags, + Scope: addr.Scope, + PreferedLft: addr.PreferedLft, + ValidLft: addr.ValidLft} + } + } + }() + + return nil +} diff --git a/vendor/github.com/vishvananda/netlink/bpf_linux.go b/vendor/github.com/vishvananda/netlink/bpf_linux.go new file mode 100644 index 0000000000..533743987a --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/bpf_linux.go @@ -0,0 +1,62 @@ +package netlink + +/* +#include +#include +#include +#include +#include +#include + +static int load_simple_bpf(int prog_type, int ret) { +#ifdef __NR_bpf + // { return ret; } + __u64 __attribute__((aligned(8))) insns[] = { + 0x00000000000000b7ull | ((__u64)ret<<32), + 0x0000000000000095ull, + }; + __u8 __attribute__((aligned(8))) license[] = "ASL2"; + // Copied from a header file since libc is notoriously slow to update. + // The call will succeed or fail and that will be our indication on + // whether or not it is supported. + struct { + __u32 prog_type; + __u32 insn_cnt; + __u64 insns; + __u64 license; + __u32 log_level; + __u32 log_size; + __u64 log_buf; + __u32 kern_version; + } __attribute__((aligned(8))) attr = { + .prog_type = prog_type, + .insn_cnt = 2, + .insns = (uintptr_t)&insns, + .license = (uintptr_t)&license, + }; + return syscall(__NR_bpf, 5, &attr, sizeof(attr)); +#else + errno = EINVAL; + return -1; +#endif +} +*/ +import "C" + +type BpfProgType C.int + +const ( + BPF_PROG_TYPE_UNSPEC BpfProgType = iota + BPF_PROG_TYPE_SOCKET_FILTER + BPF_PROG_TYPE_KPROBE + BPF_PROG_TYPE_SCHED_CLS + BPF_PROG_TYPE_SCHED_ACT + BPF_PROG_TYPE_TRACEPOINT + BPF_PROG_TYPE_XDP +) + +// loadSimpleBpf loads a trivial bpf program for testing purposes +func loadSimpleBpf(progType BpfProgType, ret int) (int, error) { + fd, err := C.load_simple_bpf(C.int(progType), C.int(ret)) + return int(fd), err +} diff --git a/vendor/github.com/vishvananda/netlink/bridge_linux.go b/vendor/github.com/vishvananda/netlink/bridge_linux.go new file mode 100644 index 0000000000..a65d6a1319 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/bridge_linux.go @@ -0,0 +1,115 @@ +package netlink + +import ( + "fmt" + "syscall" + + "github.com/vishvananda/netlink/nl" +) + +// BridgeVlanList gets a map of device id to bridge vlan infos. +// Equivalent to: `bridge vlan show` +func BridgeVlanList() (map[int32][]*nl.BridgeVlanInfo, error) { + return pkgHandle.BridgeVlanList() +} + +// BridgeVlanList gets a map of device id to bridge vlan infos. +// Equivalent to: `bridge vlan show` +func (h *Handle) BridgeVlanList() (map[int32][]*nl.BridgeVlanInfo, error) { + req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_DUMP) + msg := nl.NewIfInfomsg(syscall.AF_BRIDGE) + req.AddData(msg) + req.AddData(nl.NewRtAttr(nl.IFLA_EXT_MASK, nl.Uint32Attr(uint32(nl.RTEXT_FILTER_BRVLAN)))) + + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWLINK) + if err != nil { + return nil, err + } + ret := make(map[int32][]*nl.BridgeVlanInfo) + for _, m := range msgs { + msg := nl.DeserializeIfInfomsg(m) + + attrs, err := nl.ParseRouteAttr(m[msg.Len():]) + if err != nil { + return nil, err + } + for _, attr := range attrs { + switch attr.Attr.Type { + case nl.IFLA_AF_SPEC: + //nested attr + nestAttrs, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return nil, fmt.Errorf("failed to parse nested attr %v", err) + } + for _, nestAttr := range nestAttrs { + switch nestAttr.Attr.Type { + case nl.IFLA_BRIDGE_VLAN_INFO: + vlanInfo := nl.DeserializeBridgeVlanInfo(nestAttr.Value) + ret[msg.Index] = append(ret[msg.Index], vlanInfo) + } + } + } + } + } + return ret, nil +} + +// BridgeVlanAdd adds a new vlan filter entry +// Equivalent to: `bridge vlan add dev DEV vid VID [ pvid ] [ untagged ] [ self ] [ master ]` +func BridgeVlanAdd(link Link, vid uint16, pvid, untagged, self, master bool) error { + return pkgHandle.BridgeVlanAdd(link, vid, pvid, untagged, self, master) +} + +// BridgeVlanAdd adds a new vlan filter entry +// Equivalent to: `bridge vlan add dev DEV vid VID [ pvid ] [ untagged ] [ self ] [ master ]` +func (h *Handle) BridgeVlanAdd(link Link, vid uint16, pvid, untagged, self, master bool) error { + return h.bridgeVlanModify(syscall.RTM_SETLINK, link, vid, pvid, untagged, self, master) +} + +// BridgeVlanDel adds a new vlan filter entry +// Equivalent to: `bridge vlan del dev DEV vid VID [ pvid ] [ untagged ] [ self ] [ master ]` +func BridgeVlanDel(link Link, vid uint16, pvid, untagged, self, master bool) error { + return pkgHandle.BridgeVlanDel(link, vid, pvid, untagged, self, master) +} + +// BridgeVlanDel adds a new vlan filter entry +// Equivalent to: `bridge vlan del dev DEV vid VID [ pvid ] [ untagged ] [ self ] [ master ]` +func (h *Handle) BridgeVlanDel(link Link, vid uint16, pvid, untagged, self, master bool) error { + return h.bridgeVlanModify(syscall.RTM_DELLINK, link, vid, pvid, untagged, self, master) +} + +func (h *Handle) bridgeVlanModify(cmd int, link Link, vid uint16, pvid, untagged, self, master bool) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(cmd, syscall.NLM_F_ACK) + + msg := nl.NewIfInfomsg(syscall.AF_BRIDGE) + msg.Index = int32(base.Index) + req.AddData(msg) + + br := nl.NewRtAttr(nl.IFLA_AF_SPEC, nil) + var flags uint16 + if self { + flags |= nl.BRIDGE_FLAGS_SELF + } + if master { + flags |= nl.BRIDGE_FLAGS_MASTER + } + if flags > 0 { + nl.NewRtAttrChild(br, nl.IFLA_BRIDGE_FLAGS, nl.Uint16Attr(flags)) + } + vlanInfo := &nl.BridgeVlanInfo{Vid: vid} + if pvid { + vlanInfo.Flags |= nl.BRIDGE_VLAN_INFO_PVID + } + if untagged { + vlanInfo.Flags |= nl.BRIDGE_VLAN_INFO_UNTAGGED + } + nl.NewRtAttrChild(br, nl.IFLA_BRIDGE_VLAN_INFO, vlanInfo.Serialize()) + req.AddData(br) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/vishvananda/netlink/class.go b/vendor/github.com/vishvananda/netlink/class.go new file mode 100644 index 0000000000..8ee13af48e --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/class.go @@ -0,0 +1,78 @@ +package netlink + +import ( + "fmt" +) + +type Class interface { + Attrs() *ClassAttrs + Type() string +} + +// ClassAttrs represents a netlink class. A filter is associated with a link, +// has a handle and a parent. The root filter of a device should have a +// parent == HANDLE_ROOT. +type ClassAttrs struct { + LinkIndex int + Handle uint32 + Parent uint32 + Leaf uint32 +} + +func (q ClassAttrs) String() string { + return fmt.Sprintf("{LinkIndex: %d, Handle: %s, Parent: %s, Leaf: %d}", q.LinkIndex, HandleStr(q.Handle), HandleStr(q.Parent), q.Leaf) +} + +type HtbClassAttrs struct { + // TODO handle all attributes + Rate uint64 + Ceil uint64 + Buffer uint32 + Cbuffer uint32 + Quantum uint32 + Level uint32 + Prio uint32 +} + +func (q HtbClassAttrs) String() string { + return fmt.Sprintf("{Rate: %d, Ceil: %d, Buffer: %d, Cbuffer: %d}", q.Rate, q.Ceil, q.Buffer, q.Cbuffer) +} + +// HtbClass represents an Htb class +type HtbClass struct { + ClassAttrs + Rate uint64 + Ceil uint64 + Buffer uint32 + Cbuffer uint32 + Quantum uint32 + Level uint32 + Prio uint32 +} + +func (q HtbClass) String() string { + return fmt.Sprintf("{Rate: %d, Ceil: %d, Buffer: %d, Cbuffer: %d}", q.Rate, q.Ceil, q.Buffer, q.Cbuffer) +} + +func (q *HtbClass) Attrs() *ClassAttrs { + return &q.ClassAttrs +} + +func (q *HtbClass) Type() string { + return "htb" +} + +// GenericClass classes represent types that are not currently understood +// by this netlink library. +type GenericClass struct { + ClassAttrs + ClassType string +} + +func (class *GenericClass) Attrs() *ClassAttrs { + return &class.ClassAttrs +} + +func (class *GenericClass) Type() string { + return class.ClassType +} diff --git a/vendor/github.com/vishvananda/netlink/class_linux.go b/vendor/github.com/vishvananda/netlink/class_linux.go new file mode 100644 index 0000000000..91cd3883de --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/class_linux.go @@ -0,0 +1,254 @@ +package netlink + +import ( + "errors" + "syscall" + + "github.com/vishvananda/netlink/nl" +) + +// NOTE: function is in here because it uses other linux functions +func NewHtbClass(attrs ClassAttrs, cattrs HtbClassAttrs) *HtbClass { + mtu := 1600 + rate := cattrs.Rate / 8 + ceil := cattrs.Ceil / 8 + buffer := cattrs.Buffer + cbuffer := cattrs.Cbuffer + + if ceil == 0 { + ceil = rate + } + + if buffer == 0 { + buffer = uint32(float64(rate)/Hz() + float64(mtu)) + } + buffer = uint32(Xmittime(rate, buffer)) + + if cbuffer == 0 { + cbuffer = uint32(float64(ceil)/Hz() + float64(mtu)) + } + cbuffer = uint32(Xmittime(ceil, cbuffer)) + + return &HtbClass{ + ClassAttrs: attrs, + Rate: rate, + Ceil: ceil, + Buffer: buffer, + Cbuffer: cbuffer, + Quantum: 10, + Level: 0, + Prio: 0, + } +} + +// ClassDel will delete a class from the system. +// Equivalent to: `tc class del $class` +func ClassDel(class Class) error { + return pkgHandle.ClassDel(class) +} + +// ClassDel will delete a class from the system. +// Equivalent to: `tc class del $class` +func (h *Handle) ClassDel(class Class) error { + return h.classModify(syscall.RTM_DELTCLASS, 0, class) +} + +// ClassChange will change a class in place +// Equivalent to: `tc class change $class` +// The parent and handle MUST NOT be changed. +func ClassChange(class Class) error { + return pkgHandle.ClassChange(class) +} + +// ClassChange will change a class in place +// Equivalent to: `tc class change $class` +// The parent and handle MUST NOT be changed. +func (h *Handle) ClassChange(class Class) error { + return h.classModify(syscall.RTM_NEWTCLASS, 0, class) +} + +// ClassReplace will replace a class to the system. +// quivalent to: `tc class replace $class` +// The handle MAY be changed. +// If a class already exist with this parent/handle pair, the class is changed. +// If a class does not already exist with this parent/handle, a new class is created. +func ClassReplace(class Class) error { + return pkgHandle.ClassReplace(class) +} + +// ClassReplace will replace a class to the system. +// quivalent to: `tc class replace $class` +// The handle MAY be changed. +// If a class already exist with this parent/handle pair, the class is changed. +// If a class does not already exist with this parent/handle, a new class is created. +func (h *Handle) ClassReplace(class Class) error { + return h.classModify(syscall.RTM_NEWTCLASS, syscall.NLM_F_CREATE, class) +} + +// ClassAdd will add a class to the system. +// Equivalent to: `tc class add $class` +func ClassAdd(class Class) error { + return pkgHandle.ClassAdd(class) +} + +// ClassAdd will add a class to the system. +// Equivalent to: `tc class add $class` +func (h *Handle) ClassAdd(class Class) error { + return h.classModify( + syscall.RTM_NEWTCLASS, + syscall.NLM_F_CREATE|syscall.NLM_F_EXCL, + class, + ) +} + +func (h *Handle) classModify(cmd, flags int, class Class) error { + req := h.newNetlinkRequest(cmd, flags|syscall.NLM_F_ACK) + base := class.Attrs() + msg := &nl.TcMsg{ + Family: nl.FAMILY_ALL, + Ifindex: int32(base.LinkIndex), + Handle: base.Handle, + Parent: base.Parent, + } + req.AddData(msg) + + if cmd != syscall.RTM_DELTCLASS { + if err := classPayload(req, class); err != nil { + return err + } + } + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + return err +} + +func classPayload(req *nl.NetlinkRequest, class Class) error { + req.AddData(nl.NewRtAttr(nl.TCA_KIND, nl.ZeroTerminated(class.Type()))) + + options := nl.NewRtAttr(nl.TCA_OPTIONS, nil) + if htb, ok := class.(*HtbClass); ok { + opt := nl.TcHtbCopt{} + opt.Buffer = htb.Buffer + opt.Cbuffer = htb.Cbuffer + opt.Quantum = htb.Quantum + opt.Level = htb.Level + opt.Prio = htb.Prio + // TODO: Handle Debug properly. For now default to 0 + /* Calculate {R,C}Tab and set Rate and Ceil */ + cellLog := -1 + ccellLog := -1 + linklayer := nl.LINKLAYER_ETHERNET + mtu := 1600 + var rtab [256]uint32 + var ctab [256]uint32 + tcrate := nl.TcRateSpec{Rate: uint32(htb.Rate)} + if CalcRtable(&tcrate, rtab, cellLog, uint32(mtu), linklayer) < 0 { + return errors.New("HTB: failed to calculate rate table") + } + opt.Rate = tcrate + tcceil := nl.TcRateSpec{Rate: uint32(htb.Ceil)} + if CalcRtable(&tcceil, ctab, ccellLog, uint32(mtu), linklayer) < 0 { + return errors.New("HTB: failed to calculate ceil rate table") + } + opt.Ceil = tcceil + nl.NewRtAttrChild(options, nl.TCA_HTB_PARMS, opt.Serialize()) + nl.NewRtAttrChild(options, nl.TCA_HTB_RTAB, SerializeRtab(rtab)) + nl.NewRtAttrChild(options, nl.TCA_HTB_CTAB, SerializeRtab(ctab)) + } + req.AddData(options) + return nil +} + +// ClassList gets a list of classes in the system. +// Equivalent to: `tc class show`. +// Generally returns nothing if link and parent are not specified. +func ClassList(link Link, parent uint32) ([]Class, error) { + return pkgHandle.ClassList(link, parent) +} + +// ClassList gets a list of classes in the system. +// Equivalent to: `tc class show`. +// Generally returns nothing if link and parent are not specified. +func (h *Handle) ClassList(link Link, parent uint32) ([]Class, error) { + req := h.newNetlinkRequest(syscall.RTM_GETTCLASS, syscall.NLM_F_DUMP) + msg := &nl.TcMsg{ + Family: nl.FAMILY_ALL, + Parent: parent, + } + if link != nil { + base := link.Attrs() + h.ensureIndex(base) + msg.Ifindex = int32(base.Index) + } + req.AddData(msg) + + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWTCLASS) + if err != nil { + return nil, err + } + + var res []Class + for _, m := range msgs { + msg := nl.DeserializeTcMsg(m) + + attrs, err := nl.ParseRouteAttr(m[msg.Len():]) + if err != nil { + return nil, err + } + + base := ClassAttrs{ + LinkIndex: int(msg.Ifindex), + Handle: msg.Handle, + Parent: msg.Parent, + } + + var class Class + classType := "" + for _, attr := range attrs { + switch attr.Attr.Type { + case nl.TCA_KIND: + classType = string(attr.Value[:len(attr.Value)-1]) + switch classType { + case "htb": + class = &HtbClass{} + default: + class = &GenericClass{ClassType: classType} + } + case nl.TCA_OPTIONS: + switch classType { + case "htb": + data, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return nil, err + } + _, err = parseHtbClassData(class, data) + if err != nil { + return nil, err + } + } + } + } + *class.Attrs() = base + res = append(res, class) + } + + return res, nil +} + +func parseHtbClassData(class Class, data []syscall.NetlinkRouteAttr) (bool, error) { + htb := class.(*HtbClass) + detailed := false + for _, datum := range data { + switch datum.Attr.Type { + case nl.TCA_HTB_PARMS: + opt := nl.DeserializeTcHtbCopt(datum.Value) + htb.Rate = uint64(opt.Rate.Rate) + htb.Ceil = uint64(opt.Ceil.Rate) + htb.Buffer = opt.Buffer + htb.Cbuffer = opt.Cbuffer + htb.Quantum = opt.Quantum + htb.Level = opt.Level + htb.Prio = opt.Prio + } + } + return detailed, nil +} diff --git a/vendor/github.com/vishvananda/netlink/conntrack_linux.go b/vendor/github.com/vishvananda/netlink/conntrack_linux.go new file mode 100644 index 0000000000..ecf0445659 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/conntrack_linux.go @@ -0,0 +1,371 @@ +package netlink + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "net" + "syscall" + + "github.com/vishvananda/netlink/nl" +) + +// ConntrackTableType Conntrack table for the netlink operation +type ConntrackTableType uint8 + +const ( + // ConntrackTable Conntrack table + // https://github.com/torvalds/linux/blob/master/include/uapi/linux/netfilter/nfnetlink.h -> #define NFNL_SUBSYS_CTNETLINK 1 + ConntrackTable = 1 + // ConntrackExpectTable Conntrack expect table + // https://github.com/torvalds/linux/blob/master/include/uapi/linux/netfilter/nfnetlink.h -> #define NFNL_SUBSYS_CTNETLINK_EXP 2 + ConntrackExpectTable = 2 +) +const ( + // For Parsing Mark + TCP_PROTO = 6 + UDP_PROTO = 17 +) +const ( + // backward compatibility with golang 1.6 which does not have io.SeekCurrent + seekCurrent = 1 +) + +// InetFamily Family type +type InetFamily uint8 + +// -L [table] [options] List conntrack or expectation table +// -G [table] parameters Get conntrack or expectation + +// -I [table] parameters Create a conntrack or expectation +// -U [table] parameters Update a conntrack +// -E [table] [options] Show events + +// -C [table] Show counter +// -S Show statistics + +// ConntrackTableList returns the flow list of a table of a specific family +// conntrack -L [table] [options] List conntrack or expectation table +func ConntrackTableList(table ConntrackTableType, family InetFamily) ([]*ConntrackFlow, error) { + return pkgHandle.ConntrackTableList(table, family) +} + +// ConntrackTableFlush flushes all the flows of a specified table +// conntrack -F [table] Flush table +// The flush operation applies to all the family types +func ConntrackTableFlush(table ConntrackTableType) error { + return pkgHandle.ConntrackTableFlush(table) +} + +// ConntrackDeleteFilter deletes entries on the specified table on the base of the filter +// conntrack -D [table] parameters Delete conntrack or expectation +func ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter CustomConntrackFilter) (uint, error) { + return pkgHandle.ConntrackDeleteFilter(table, family, filter) +} + +// ConntrackTableList returns the flow list of a table of a specific family using the netlink handle passed +// conntrack -L [table] [options] List conntrack or expectation table +func (h *Handle) ConntrackTableList(table ConntrackTableType, family InetFamily) ([]*ConntrackFlow, error) { + res, err := h.dumpConntrackTable(table, family) + if err != nil { + return nil, err + } + + // Deserialize all the flows + var result []*ConntrackFlow + for _, dataRaw := range res { + result = append(result, parseRawData(dataRaw)) + } + + return result, nil +} + +// ConntrackTableFlush flushes all the flows of a specified table using the netlink handle passed +// conntrack -F [table] Flush table +// The flush operation applies to all the family types +func (h *Handle) ConntrackTableFlush(table ConntrackTableType) error { + req := h.newConntrackRequest(table, syscall.AF_INET, nl.IPCTNL_MSG_CT_DELETE, syscall.NLM_F_ACK) + _, err := req.Execute(syscall.NETLINK_NETFILTER, 0) + return err +} + +// ConntrackDeleteFilter deletes entries on the specified table on the base of the filter using the netlink handle passed +// conntrack -D [table] parameters Delete conntrack or expectation +func (h *Handle) ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter CustomConntrackFilter) (uint, error) { + res, err := h.dumpConntrackTable(table, family) + if err != nil { + return 0, err + } + + var matched uint + for _, dataRaw := range res { + flow := parseRawData(dataRaw) + if match := filter.MatchConntrackFlow(flow); match { + req2 := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_DELETE, syscall.NLM_F_ACK) + // skip the first 4 byte that are the netfilter header, the newConntrackRequest is adding it already + req2.AddRawData(dataRaw[4:]) + req2.Execute(syscall.NETLINK_NETFILTER, 0) + matched++ + } + } + + return matched, nil +} + +func (h *Handle) newConntrackRequest(table ConntrackTableType, family InetFamily, operation, flags int) *nl.NetlinkRequest { + // Create the Netlink request object + req := h.newNetlinkRequest((int(table)<<8)|operation, flags) + // Add the netfilter header + msg := &nl.Nfgenmsg{ + NfgenFamily: uint8(family), + Version: nl.NFNETLINK_V0, + ResId: 0, + } + req.AddData(msg) + return req +} + +func (h *Handle) dumpConntrackTable(table ConntrackTableType, family InetFamily) ([][]byte, error) { + req := h.newConntrackRequest(table, family, nl.IPCTNL_MSG_CT_GET, syscall.NLM_F_DUMP) + return req.Execute(syscall.NETLINK_NETFILTER, 0) +} + +// The full conntrack flow structure is very complicated and can be found in the file: +// http://git.netfilter.org/libnetfilter_conntrack/tree/include/internal/object.h +// For the time being, the structure below allows to parse and extract the base information of a flow +type ipTuple struct { + SrcIP net.IP + DstIP net.IP + Protocol uint8 + SrcPort uint16 + DstPort uint16 +} + +type ConntrackFlow struct { + FamilyType uint8 + Forward ipTuple + Reverse ipTuple + Mark uint32 +} + +func (s *ConntrackFlow) String() string { + // conntrack cmd output: + // udp 17 src=127.0.0.1 dst=127.0.0.1 sport=4001 dport=1234 [UNREPLIED] src=127.0.0.1 dst=127.0.0.1 sport=1234 dport=4001 mark=0 + return fmt.Sprintf("%s\t%d src=%s dst=%s sport=%d dport=%d\tsrc=%s dst=%s sport=%d dport=%d mark=%d", + nl.L4ProtoMap[s.Forward.Protocol], s.Forward.Protocol, + s.Forward.SrcIP.String(), s.Forward.DstIP.String(), s.Forward.SrcPort, s.Forward.DstPort, + s.Reverse.SrcIP.String(), s.Reverse.DstIP.String(), s.Reverse.SrcPort, s.Reverse.DstPort, s.Mark) +} + +// This method parse the ip tuple structure +// The message structure is the following: +// +// +// +// +// +func parseIpTuple(reader *bytes.Reader, tpl *ipTuple) uint8 { + for i := 0; i < 2; i++ { + _, t, _, v := parseNfAttrTLV(reader) + switch t { + case nl.CTA_IP_V4_SRC, nl.CTA_IP_V6_SRC: + tpl.SrcIP = v + case nl.CTA_IP_V4_DST, nl.CTA_IP_V6_DST: + tpl.DstIP = v + } + } + // Skip the next 4 bytes nl.NLA_F_NESTED|nl.CTA_TUPLE_PROTO + reader.Seek(4, seekCurrent) + _, t, _, v := parseNfAttrTLV(reader) + if t == nl.CTA_PROTO_NUM { + tpl.Protocol = uint8(v[0]) + } + // Skip some padding 3 bytes + reader.Seek(3, seekCurrent) + for i := 0; i < 2; i++ { + _, t, _ := parseNfAttrTL(reader) + switch t { + case nl.CTA_PROTO_SRC_PORT: + parseBERaw16(reader, &tpl.SrcPort) + case nl.CTA_PROTO_DST_PORT: + parseBERaw16(reader, &tpl.DstPort) + } + // Skip some padding 2 byte + reader.Seek(2, seekCurrent) + } + return tpl.Protocol +} + +func parseNfAttrTLV(r *bytes.Reader) (isNested bool, attrType, len uint16, value []byte) { + isNested, attrType, len = parseNfAttrTL(r) + + value = make([]byte, len) + binary.Read(r, binary.BigEndian, &value) + return isNested, attrType, len, value +} + +func parseNfAttrTL(r *bytes.Reader) (isNested bool, attrType, len uint16) { + binary.Read(r, nl.NativeEndian(), &len) + len -= nl.SizeofNfattr + + binary.Read(r, nl.NativeEndian(), &attrType) + isNested = (attrType & nl.NLA_F_NESTED) == nl.NLA_F_NESTED + attrType = attrType & (nl.NLA_F_NESTED - 1) + + return isNested, attrType, len +} + +func parseBERaw16(r *bytes.Reader, v *uint16) { + binary.Read(r, binary.BigEndian, v) +} + +func parseRawData(data []byte) *ConntrackFlow { + s := &ConntrackFlow{} + var proto uint8 + // First there is the Nfgenmsg header + // consume only the family field + reader := bytes.NewReader(data) + binary.Read(reader, nl.NativeEndian(), &s.FamilyType) + + // skip rest of the Netfilter header + reader.Seek(3, seekCurrent) + // The message structure is the following: + // 4 bytes + // 4 bytes + // flow information of the forward flow + // 4 bytes + // 4 bytes + // flow information of the reverse flow + for reader.Len() > 0 { + nested, t, l := parseNfAttrTL(reader) + if nested && t == nl.CTA_TUPLE_ORIG { + if nested, t, _ = parseNfAttrTL(reader); nested && t == nl.CTA_TUPLE_IP { + proto = parseIpTuple(reader, &s.Forward) + } + } else if nested && t == nl.CTA_TUPLE_REPLY { + if nested, t, _ = parseNfAttrTL(reader); nested && t == nl.CTA_TUPLE_IP { + parseIpTuple(reader, &s.Reverse) + + // Got all the useful information stop parsing + break + } else { + // Header not recognized skip it + reader.Seek(int64(l), seekCurrent) + } + } + } + if proto == TCP_PROTO { + reader.Seek(64, seekCurrent) + _, t, _, v := parseNfAttrTLV(reader) + if t == nl.CTA_MARK { + s.Mark = uint32(v[3]) + } + } else if proto == UDP_PROTO { + reader.Seek(16, seekCurrent) + _, t, _, v := parseNfAttrTLV(reader) + if t == nl.CTA_MARK { + s.Mark = uint32(v[3]) + } + } + return s +} + +// Conntrack parameters and options: +// -n, --src-nat ip source NAT ip +// -g, --dst-nat ip destination NAT ip +// -j, --any-nat ip source or destination NAT ip +// -m, --mark mark Set mark +// -c, --secmark secmark Set selinux secmark +// -e, --event-mask eventmask Event mask, eg. NEW,DESTROY +// -z, --zero Zero counters while listing +// -o, --output type[,...] Output format, eg. xml +// -l, --label label[,...] conntrack labels + +// Common parameters and options: +// -s, --src, --orig-src ip Source address from original direction +// -d, --dst, --orig-dst ip Destination address from original direction +// -r, --reply-src ip Source addres from reply direction +// -q, --reply-dst ip Destination address from reply direction +// -p, --protonum proto Layer 4 Protocol, eg. 'tcp' +// -f, --family proto Layer 3 Protocol, eg. 'ipv6' +// -t, --timeout timeout Set timeout +// -u, --status status Set status, eg. ASSURED +// -w, --zone value Set conntrack zone +// --orig-zone value Set zone for original direction +// --reply-zone value Set zone for reply direction +// -b, --buffer-size Netlink socket buffer size +// --mask-src ip Source mask address +// --mask-dst ip Destination mask address + +// Filter types +type ConntrackFilterType uint8 + +const ( + ConntrackOrigSrcIP = iota // -orig-src ip Source address from original direction + ConntrackOrigDstIP // -orig-dst ip Destination address from original direction + ConntrackNatSrcIP // -src-nat ip Source NAT ip + ConntrackNatDstIP // -dst-nat ip Destination NAT ip + ConntrackNatAnyIP // -any-nat ip Source or destination NAT ip +) + +type CustomConntrackFilter interface { + // MatchConntrackFlow applies the filter to the flow and returns true if the flow matches + // the filter or false otherwise + MatchConntrackFlow(flow *ConntrackFlow) bool +} + +type ConntrackFilter struct { + ipFilter map[ConntrackFilterType]net.IP +} + +// AddIP adds an IP to the conntrack filter +func (f *ConntrackFilter) AddIP(tp ConntrackFilterType, ip net.IP) error { + if f.ipFilter == nil { + f.ipFilter = make(map[ConntrackFilterType]net.IP) + } + if _, ok := f.ipFilter[tp]; ok { + return errors.New("Filter attribute already present") + } + f.ipFilter[tp] = ip + return nil +} + +// MatchConntrackFlow applies the filter to the flow and returns true if the flow matches the filter +// false otherwise +func (f *ConntrackFilter) MatchConntrackFlow(flow *ConntrackFlow) bool { + if len(f.ipFilter) == 0 { + // empty filter always not match + return false + } + + match := true + // -orig-src ip Source address from original direction + if elem, found := f.ipFilter[ConntrackOrigSrcIP]; found { + match = match && elem.Equal(flow.Forward.SrcIP) + } + + // -orig-dst ip Destination address from original direction + if elem, found := f.ipFilter[ConntrackOrigDstIP]; match && found { + match = match && elem.Equal(flow.Forward.DstIP) + } + + // -src-nat ip Source NAT ip + if elem, found := f.ipFilter[ConntrackNatSrcIP]; match && found { + match = match && elem.Equal(flow.Reverse.SrcIP) + } + + // -dst-nat ip Destination NAT ip + if elem, found := f.ipFilter[ConntrackNatDstIP]; match && found { + match = match && elem.Equal(flow.Reverse.DstIP) + } + + // -any-nat ip Source or destination NAT ip + if elem, found := f.ipFilter[ConntrackNatAnyIP]; match && found { + match = match && (elem.Equal(flow.Reverse.SrcIP) || elem.Equal(flow.Reverse.DstIP)) + } + + return match +} + +var _ CustomConntrackFilter = (*ConntrackFilter)(nil) diff --git a/vendor/github.com/vishvananda/netlink/conntrack_unspecified.go b/vendor/github.com/vishvananda/netlink/conntrack_unspecified.go new file mode 100644 index 0000000000..af7af799e7 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/conntrack_unspecified.go @@ -0,0 +1,53 @@ +// +build !linux + +package netlink + +// ConntrackTableType Conntrack table for the netlink operation +type ConntrackTableType uint8 + +// InetFamily Family type +type InetFamily uint8 + +// ConntrackFlow placeholder +type ConntrackFlow struct{} + +// ConntrackFilter placeholder +type ConntrackFilter struct{} + +// ConntrackTableList returns the flow list of a table of a specific family +// conntrack -L [table] [options] List conntrack or expectation table +func ConntrackTableList(table ConntrackTableType, family InetFamily) ([]*ConntrackFlow, error) { + return nil, ErrNotImplemented +} + +// ConntrackTableFlush flushes all the flows of a specified table +// conntrack -F [table] Flush table +// The flush operation applies to all the family types +func ConntrackTableFlush(table ConntrackTableType) error { + return ErrNotImplemented +} + +// ConntrackDeleteFilter deletes entries on the specified table on the base of the filter +// conntrack -D [table] parameters Delete conntrack or expectation +func ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter *ConntrackFilter) (uint, error) { + return 0, ErrNotImplemented +} + +// ConntrackTableList returns the flow list of a table of a specific family using the netlink handle passed +// conntrack -L [table] [options] List conntrack or expectation table +func (h *Handle) ConntrackTableList(table ConntrackTableType, family InetFamily) ([]*ConntrackFlow, error) { + return nil, ErrNotImplemented +} + +// ConntrackTableFlush flushes all the flows of a specified table using the netlink handle passed +// conntrack -F [table] Flush table +// The flush operation applies to all the family types +func (h *Handle) ConntrackTableFlush(table ConntrackTableType) error { + return ErrNotImplemented +} + +// ConntrackDeleteFilter deletes entries on the specified table on the base of the filter using the netlink handle passed +// conntrack -D [table] parameters Delete conntrack or expectation +func (h *Handle) ConntrackDeleteFilter(table ConntrackTableType, family InetFamily, filter *ConntrackFilter) (uint, error) { + return 0, ErrNotImplemented +} diff --git a/vendor/github.com/vishvananda/netlink/filter.go b/vendor/github.com/vishvananda/netlink/filter.go new file mode 100644 index 0000000000..1120c79d6a --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/filter.go @@ -0,0 +1,273 @@ +package netlink + +import ( + "fmt" +) + +type Filter interface { + Attrs() *FilterAttrs + Type() string +} + +// FilterAttrs represents a netlink filter. A filter is associated with a link, +// has a handle and a parent. The root filter of a device should have a +// parent == HANDLE_ROOT. +type FilterAttrs struct { + LinkIndex int + Handle uint32 + Parent uint32 + Priority uint16 // lower is higher priority + Protocol uint16 // syscall.ETH_P_* +} + +func (q FilterAttrs) String() string { + return fmt.Sprintf("{LinkIndex: %d, Handle: %s, Parent: %s, Priority: %d, Protocol: %d}", q.LinkIndex, HandleStr(q.Handle), HandleStr(q.Parent), q.Priority, q.Protocol) +} + +type TcAct int32 + +const ( + TC_ACT_UNSPEC TcAct = -1 + TC_ACT_OK TcAct = 0 + TC_ACT_RECLASSIFY TcAct = 1 + TC_ACT_SHOT TcAct = 2 + TC_ACT_PIPE TcAct = 3 + TC_ACT_STOLEN TcAct = 4 + TC_ACT_QUEUED TcAct = 5 + TC_ACT_REPEAT TcAct = 6 + TC_ACT_REDIRECT TcAct = 7 + TC_ACT_JUMP TcAct = 0x10000000 +) + +func (a TcAct) String() string { + switch a { + case TC_ACT_UNSPEC: + return "unspec" + case TC_ACT_OK: + return "ok" + case TC_ACT_RECLASSIFY: + return "reclassify" + case TC_ACT_SHOT: + return "shot" + case TC_ACT_PIPE: + return "pipe" + case TC_ACT_STOLEN: + return "stolen" + case TC_ACT_QUEUED: + return "queued" + case TC_ACT_REPEAT: + return "repeat" + case TC_ACT_REDIRECT: + return "redirect" + case TC_ACT_JUMP: + return "jump" + } + return fmt.Sprintf("0x%x", int32(a)) +} + +type TcPolAct int32 + +const ( + TC_POLICE_UNSPEC TcPolAct = TcPolAct(TC_ACT_UNSPEC) + TC_POLICE_OK TcPolAct = TcPolAct(TC_ACT_OK) + TC_POLICE_RECLASSIFY TcPolAct = TcPolAct(TC_ACT_RECLASSIFY) + TC_POLICE_SHOT TcPolAct = TcPolAct(TC_ACT_SHOT) + TC_POLICE_PIPE TcPolAct = TcPolAct(TC_ACT_PIPE) +) + +func (a TcPolAct) String() string { + switch a { + case TC_POLICE_UNSPEC: + return "unspec" + case TC_POLICE_OK: + return "ok" + case TC_POLICE_RECLASSIFY: + return "reclassify" + case TC_POLICE_SHOT: + return "shot" + case TC_POLICE_PIPE: + return "pipe" + } + return fmt.Sprintf("0x%x", int32(a)) +} + +type ActionAttrs struct { + Index int + Capab int + Action TcAct + Refcnt int + Bindcnt int +} + +func (q ActionAttrs) String() string { + return fmt.Sprintf("{Index: %d, Capab: %x, Action: %s, Refcnt: %d, Bindcnt: %d}", q.Index, q.Capab, q.Action.String(), q.Refcnt, q.Bindcnt) +} + +// Action represents an action in any supported filter. +type Action interface { + Attrs() *ActionAttrs + Type() string +} + +type GenericAction struct { + ActionAttrs +} + +func (action *GenericAction) Type() string { + return "generic" +} + +func (action *GenericAction) Attrs() *ActionAttrs { + return &action.ActionAttrs +} + +type BpfAction struct { + ActionAttrs + Fd int + Name string +} + +func (action *BpfAction) Type() string { + return "bpf" +} + +func (action *BpfAction) Attrs() *ActionAttrs { + return &action.ActionAttrs +} + +type MirredAct uint8 + +func (a MirredAct) String() string { + switch a { + case TCA_EGRESS_REDIR: + return "egress redir" + case TCA_EGRESS_MIRROR: + return "egress mirror" + case TCA_INGRESS_REDIR: + return "ingress redir" + case TCA_INGRESS_MIRROR: + return "ingress mirror" + } + return "unknown" +} + +const ( + TCA_EGRESS_REDIR MirredAct = 1 /* packet redirect to EGRESS*/ + TCA_EGRESS_MIRROR MirredAct = 2 /* mirror packet to EGRESS */ + TCA_INGRESS_REDIR MirredAct = 3 /* packet redirect to INGRESS*/ + TCA_INGRESS_MIRROR MirredAct = 4 /* mirror packet to INGRESS */ +) + +type MirredAction struct { + ActionAttrs + MirredAction MirredAct + Ifindex int +} + +func (action *MirredAction) Type() string { + return "mirred" +} + +func (action *MirredAction) Attrs() *ActionAttrs { + return &action.ActionAttrs +} + +func NewMirredAction(redirIndex int) *MirredAction { + return &MirredAction{ + ActionAttrs: ActionAttrs{ + Action: TC_ACT_STOLEN, + }, + MirredAction: TCA_EGRESS_REDIR, + Ifindex: redirIndex, + } +} + +// Sel of the U32 filters that contains multiple TcU32Key. This is the copy +// and the frontend representation of nl.TcU32Sel. It is serialized into canonical +// nl.TcU32Sel with the appropriate endianness. +type TcU32Sel struct { + Flags uint8 + Offshift uint8 + Nkeys uint8 + Pad uint8 + Offmask uint16 + Off uint16 + Offoff int16 + Hoff int16 + Hmask uint32 + Keys []TcU32Key +} + +// TcU32Key contained of Sel in the U32 filters. This is the copy and the frontend +// representation of nl.TcU32Key. It is serialized into chanonical nl.TcU32Sel +// with the appropriate endianness. +type TcU32Key struct { + Mask uint32 + Val uint32 + Off int32 + OffMask int32 +} + +// U32 filters on many packet related properties +type U32 struct { + FilterAttrs + ClassId uint32 + RedirIndex int + Sel *TcU32Sel + Actions []Action +} + +func (filter *U32) Attrs() *FilterAttrs { + return &filter.FilterAttrs +} + +func (filter *U32) Type() string { + return "u32" +} + +type FilterFwAttrs struct { + ClassId uint32 + InDev string + Mask uint32 + Index uint32 + Buffer uint32 + Mtu uint32 + Mpu uint16 + Rate uint32 + AvRate uint32 + PeakRate uint32 + Action TcPolAct + Overhead uint16 + LinkLayer int +} + +type BpfFilter struct { + FilterAttrs + ClassId uint32 + Fd int + Name string + DirectAction bool +} + +func (filter *BpfFilter) Type() string { + return "bpf" +} + +func (filter *BpfFilter) Attrs() *FilterAttrs { + return &filter.FilterAttrs +} + +// GenericFilter filters represent types that are not currently understood +// by this netlink library. +type GenericFilter struct { + FilterAttrs + FilterType string +} + +func (filter *GenericFilter) Attrs() *FilterAttrs { + return &filter.FilterAttrs +} + +func (filter *GenericFilter) Type() string { + return filter.FilterType +} diff --git a/vendor/github.com/vishvananda/netlink/filter_linux.go b/vendor/github.com/vishvananda/netlink/filter_linux.go new file mode 100644 index 0000000000..5025bd56c1 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/filter_linux.go @@ -0,0 +1,601 @@ +package netlink + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "syscall" + "unsafe" + + "github.com/vishvananda/netlink/nl" +) + +// Constants used in TcU32Sel.Flags. +const ( + TC_U32_TERMINAL = nl.TC_U32_TERMINAL + TC_U32_OFFSET = nl.TC_U32_OFFSET + TC_U32_VAROFFSET = nl.TC_U32_VAROFFSET + TC_U32_EAT = nl.TC_U32_EAT +) + +// Fw filter filters on firewall marks +// NOTE: this is in filter_linux because it refers to nl.TcPolice which +// is defined in nl/tc_linux.go +type Fw struct { + FilterAttrs + ClassId uint32 + // TODO remove nl type from interface + Police nl.TcPolice + InDev string + // TODO Action + Mask uint32 + AvRate uint32 + Rtab [256]uint32 + Ptab [256]uint32 +} + +func NewFw(attrs FilterAttrs, fattrs FilterFwAttrs) (*Fw, error) { + var rtab [256]uint32 + var ptab [256]uint32 + rcellLog := -1 + pcellLog := -1 + avrate := fattrs.AvRate / 8 + police := nl.TcPolice{} + police.Rate.Rate = fattrs.Rate / 8 + police.PeakRate.Rate = fattrs.PeakRate / 8 + buffer := fattrs.Buffer + linklayer := nl.LINKLAYER_ETHERNET + + if fattrs.LinkLayer != nl.LINKLAYER_UNSPEC { + linklayer = fattrs.LinkLayer + } + + police.Action = int32(fattrs.Action) + if police.Rate.Rate != 0 { + police.Rate.Mpu = fattrs.Mpu + police.Rate.Overhead = fattrs.Overhead + if CalcRtable(&police.Rate, rtab, rcellLog, fattrs.Mtu, linklayer) < 0 { + return nil, errors.New("TBF: failed to calculate rate table") + } + police.Burst = uint32(Xmittime(uint64(police.Rate.Rate), uint32(buffer))) + } + police.Mtu = fattrs.Mtu + if police.PeakRate.Rate != 0 { + police.PeakRate.Mpu = fattrs.Mpu + police.PeakRate.Overhead = fattrs.Overhead + if CalcRtable(&police.PeakRate, ptab, pcellLog, fattrs.Mtu, linklayer) < 0 { + return nil, errors.New("POLICE: failed to calculate peak rate table") + } + } + + return &Fw{ + FilterAttrs: attrs, + ClassId: fattrs.ClassId, + InDev: fattrs.InDev, + Mask: fattrs.Mask, + Police: police, + AvRate: avrate, + Rtab: rtab, + Ptab: ptab, + }, nil +} + +func (filter *Fw) Attrs() *FilterAttrs { + return &filter.FilterAttrs +} + +func (filter *Fw) Type() string { + return "fw" +} + +// FilterDel will delete a filter from the system. +// Equivalent to: `tc filter del $filter` +func FilterDel(filter Filter) error { + return pkgHandle.FilterDel(filter) +} + +// FilterDel will delete a filter from the system. +// Equivalent to: `tc filter del $filter` +func (h *Handle) FilterDel(filter Filter) error { + req := h.newNetlinkRequest(syscall.RTM_DELTFILTER, syscall.NLM_F_ACK) + base := filter.Attrs() + msg := &nl.TcMsg{ + Family: nl.FAMILY_ALL, + Ifindex: int32(base.LinkIndex), + Handle: base.Handle, + Parent: base.Parent, + Info: MakeHandle(base.Priority, nl.Swap16(base.Protocol)), + } + req.AddData(msg) + + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + return err +} + +// FilterAdd will add a filter to the system. +// Equivalent to: `tc filter add $filter` +func FilterAdd(filter Filter) error { + return pkgHandle.FilterAdd(filter) +} + +// FilterAdd will add a filter to the system. +// Equivalent to: `tc filter add $filter` +func (h *Handle) FilterAdd(filter Filter) error { + native = nl.NativeEndian() + req := h.newNetlinkRequest(syscall.RTM_NEWTFILTER, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + base := filter.Attrs() + msg := &nl.TcMsg{ + Family: nl.FAMILY_ALL, + Ifindex: int32(base.LinkIndex), + Handle: base.Handle, + Parent: base.Parent, + Info: MakeHandle(base.Priority, nl.Swap16(base.Protocol)), + } + req.AddData(msg) + req.AddData(nl.NewRtAttr(nl.TCA_KIND, nl.ZeroTerminated(filter.Type()))) + + options := nl.NewRtAttr(nl.TCA_OPTIONS, nil) + + switch filter := filter.(type) { + case *U32: + // Convert TcU32Sel into nl.TcU32Sel as it is without copy. + sel := (*nl.TcU32Sel)(unsafe.Pointer(filter.Sel)) + if sel == nil { + // match all + sel = &nl.TcU32Sel{ + Nkeys: 1, + Flags: nl.TC_U32_TERMINAL, + } + sel.Keys = append(sel.Keys, nl.TcU32Key{}) + } + + if native != networkOrder { + // Copy TcU32Sel. + cSel := *sel + keys := make([]nl.TcU32Key, cap(sel.Keys)) + copy(keys, sel.Keys) + cSel.Keys = keys + sel = &cSel + + // Handle the endianness of attributes + sel.Offmask = native.Uint16(htons(sel.Offmask)) + sel.Hmask = native.Uint32(htonl(sel.Hmask)) + for i, key := range sel.Keys { + sel.Keys[i].Mask = native.Uint32(htonl(key.Mask)) + sel.Keys[i].Val = native.Uint32(htonl(key.Val)) + } + } + sel.Nkeys = uint8(len(sel.Keys)) + nl.NewRtAttrChild(options, nl.TCA_U32_SEL, sel.Serialize()) + if filter.ClassId != 0 { + nl.NewRtAttrChild(options, nl.TCA_U32_CLASSID, nl.Uint32Attr(filter.ClassId)) + } + actionsAttr := nl.NewRtAttrChild(options, nl.TCA_U32_ACT, nil) + // backwards compatibility + if filter.RedirIndex != 0 { + filter.Actions = append([]Action{NewMirredAction(filter.RedirIndex)}, filter.Actions...) + } + if err := EncodeActions(actionsAttr, filter.Actions); err != nil { + return err + } + case *Fw: + if filter.Mask != 0 { + b := make([]byte, 4) + native.PutUint32(b, filter.Mask) + nl.NewRtAttrChild(options, nl.TCA_FW_MASK, b) + } + if filter.InDev != "" { + nl.NewRtAttrChild(options, nl.TCA_FW_INDEV, nl.ZeroTerminated(filter.InDev)) + } + if (filter.Police != nl.TcPolice{}) { + + police := nl.NewRtAttrChild(options, nl.TCA_FW_POLICE, nil) + nl.NewRtAttrChild(police, nl.TCA_POLICE_TBF, filter.Police.Serialize()) + if (filter.Police.Rate != nl.TcRateSpec{}) { + payload := SerializeRtab(filter.Rtab) + nl.NewRtAttrChild(police, nl.TCA_POLICE_RATE, payload) + } + if (filter.Police.PeakRate != nl.TcRateSpec{}) { + payload := SerializeRtab(filter.Ptab) + nl.NewRtAttrChild(police, nl.TCA_POLICE_PEAKRATE, payload) + } + } + if filter.ClassId != 0 { + b := make([]byte, 4) + native.PutUint32(b, filter.ClassId) + nl.NewRtAttrChild(options, nl.TCA_FW_CLASSID, b) + } + case *BpfFilter: + var bpfFlags uint32 + if filter.ClassId != 0 { + nl.NewRtAttrChild(options, nl.TCA_BPF_CLASSID, nl.Uint32Attr(filter.ClassId)) + } + if filter.Fd >= 0 { + nl.NewRtAttrChild(options, nl.TCA_BPF_FD, nl.Uint32Attr((uint32(filter.Fd)))) + } + if filter.Name != "" { + nl.NewRtAttrChild(options, nl.TCA_BPF_NAME, nl.ZeroTerminated(filter.Name)) + } + if filter.DirectAction { + bpfFlags |= nl.TCA_BPF_FLAG_ACT_DIRECT + } + nl.NewRtAttrChild(options, nl.TCA_BPF_FLAGS, nl.Uint32Attr(bpfFlags)) + } + + req.AddData(options) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + return err +} + +// FilterList gets a list of filters in the system. +// Equivalent to: `tc filter show`. +// Generally returns nothing if link and parent are not specified. +func FilterList(link Link, parent uint32) ([]Filter, error) { + return pkgHandle.FilterList(link, parent) +} + +// FilterList gets a list of filters in the system. +// Equivalent to: `tc filter show`. +// Generally returns nothing if link and parent are not specified. +func (h *Handle) FilterList(link Link, parent uint32) ([]Filter, error) { + req := h.newNetlinkRequest(syscall.RTM_GETTFILTER, syscall.NLM_F_DUMP) + msg := &nl.TcMsg{ + Family: nl.FAMILY_ALL, + Parent: parent, + } + if link != nil { + base := link.Attrs() + h.ensureIndex(base) + msg.Ifindex = int32(base.Index) + } + req.AddData(msg) + + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWTFILTER) + if err != nil { + return nil, err + } + + var res []Filter + for _, m := range msgs { + msg := nl.DeserializeTcMsg(m) + + attrs, err := nl.ParseRouteAttr(m[msg.Len():]) + if err != nil { + return nil, err + } + + base := FilterAttrs{ + LinkIndex: int(msg.Ifindex), + Handle: msg.Handle, + Parent: msg.Parent, + } + base.Priority, base.Protocol = MajorMinor(msg.Info) + base.Protocol = nl.Swap16(base.Protocol) + + var filter Filter + filterType := "" + detailed := false + for _, attr := range attrs { + switch attr.Attr.Type { + case nl.TCA_KIND: + filterType = string(attr.Value[:len(attr.Value)-1]) + switch filterType { + case "u32": + filter = &U32{} + case "fw": + filter = &Fw{} + case "bpf": + filter = &BpfFilter{} + default: + filter = &GenericFilter{FilterType: filterType} + } + case nl.TCA_OPTIONS: + data, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return nil, err + } + switch filterType { + case "u32": + detailed, err = parseU32Data(filter, data) + if err != nil { + return nil, err + } + case "fw": + detailed, err = parseFwData(filter, data) + if err != nil { + return nil, err + } + case "bpf": + detailed, err = parseBpfData(filter, data) + if err != nil { + return nil, err + } + default: + detailed = true + } + } + } + // only return the detailed version of the filter + if detailed { + *filter.Attrs() = base + res = append(res, filter) + } + } + + return res, nil +} + +func toTcGen(attrs *ActionAttrs, tcgen *nl.TcGen) { + tcgen.Index = uint32(attrs.Index) + tcgen.Capab = uint32(attrs.Capab) + tcgen.Action = int32(attrs.Action) + tcgen.Refcnt = int32(attrs.Refcnt) + tcgen.Bindcnt = int32(attrs.Bindcnt) +} + +func toAttrs(tcgen *nl.TcGen, attrs *ActionAttrs) { + attrs.Index = int(tcgen.Index) + attrs.Capab = int(tcgen.Capab) + attrs.Action = TcAct(tcgen.Action) + attrs.Refcnt = int(tcgen.Refcnt) + attrs.Bindcnt = int(tcgen.Bindcnt) +} + +func EncodeActions(attr *nl.RtAttr, actions []Action) error { + tabIndex := int(nl.TCA_ACT_TAB) + + for _, action := range actions { + switch action := action.(type) { + default: + return fmt.Errorf("unknown action type %s", action.Type()) + case *MirredAction: + table := nl.NewRtAttrChild(attr, tabIndex, nil) + tabIndex++ + nl.NewRtAttrChild(table, nl.TCA_ACT_KIND, nl.ZeroTerminated("mirred")) + aopts := nl.NewRtAttrChild(table, nl.TCA_ACT_OPTIONS, nil) + mirred := nl.TcMirred{ + Eaction: int32(action.MirredAction), + Ifindex: uint32(action.Ifindex), + } + toTcGen(action.Attrs(), &mirred.TcGen) + nl.NewRtAttrChild(aopts, nl.TCA_MIRRED_PARMS, mirred.Serialize()) + case *BpfAction: + table := nl.NewRtAttrChild(attr, tabIndex, nil) + tabIndex++ + nl.NewRtAttrChild(table, nl.TCA_ACT_KIND, nl.ZeroTerminated("bpf")) + aopts := nl.NewRtAttrChild(table, nl.TCA_ACT_OPTIONS, nil) + gen := nl.TcGen{} + toTcGen(action.Attrs(), &gen) + nl.NewRtAttrChild(aopts, nl.TCA_ACT_BPF_PARMS, gen.Serialize()) + nl.NewRtAttrChild(aopts, nl.TCA_ACT_BPF_FD, nl.Uint32Attr(uint32(action.Fd))) + nl.NewRtAttrChild(aopts, nl.TCA_ACT_BPF_NAME, nl.ZeroTerminated(action.Name)) + case *GenericAction: + table := nl.NewRtAttrChild(attr, tabIndex, nil) + tabIndex++ + nl.NewRtAttrChild(table, nl.TCA_ACT_KIND, nl.ZeroTerminated("gact")) + aopts := nl.NewRtAttrChild(table, nl.TCA_ACT_OPTIONS, nil) + gen := nl.TcGen{} + toTcGen(action.Attrs(), &gen) + nl.NewRtAttrChild(aopts, nl.TCA_GACT_PARMS, gen.Serialize()) + } + } + return nil +} + +func parseActions(tables []syscall.NetlinkRouteAttr) ([]Action, error) { + var actions []Action + for _, table := range tables { + var action Action + var actionType string + aattrs, err := nl.ParseRouteAttr(table.Value) + if err != nil { + return nil, err + } + nextattr: + for _, aattr := range aattrs { + switch aattr.Attr.Type { + case nl.TCA_KIND: + actionType = string(aattr.Value[:len(aattr.Value)-1]) + // only parse if the action is mirred or bpf + switch actionType { + case "mirred": + action = &MirredAction{} + case "bpf": + action = &BpfAction{} + case "gact": + action = &GenericAction{} + default: + break nextattr + } + case nl.TCA_OPTIONS: + adata, err := nl.ParseRouteAttr(aattr.Value) + if err != nil { + return nil, err + } + for _, adatum := range adata { + switch actionType { + case "mirred": + switch adatum.Attr.Type { + case nl.TCA_MIRRED_PARMS: + mirred := *nl.DeserializeTcMirred(adatum.Value) + toAttrs(&mirred.TcGen, action.Attrs()) + action.(*MirredAction).ActionAttrs = ActionAttrs{} + action.(*MirredAction).Ifindex = int(mirred.Ifindex) + action.(*MirredAction).MirredAction = MirredAct(mirred.Eaction) + } + case "bpf": + switch adatum.Attr.Type { + case nl.TCA_ACT_BPF_PARMS: + gen := *nl.DeserializeTcGen(adatum.Value) + toAttrs(&gen, action.Attrs()) + case nl.TCA_ACT_BPF_FD: + action.(*BpfAction).Fd = int(native.Uint32(adatum.Value[0:4])) + case nl.TCA_ACT_BPF_NAME: + action.(*BpfAction).Name = string(adatum.Value[:len(adatum.Value)-1]) + } + case "gact": + switch adatum.Attr.Type { + case nl.TCA_GACT_PARMS: + gen := *nl.DeserializeTcGen(adatum.Value) + toAttrs(&gen, action.Attrs()) + } + } + } + } + } + actions = append(actions, action) + } + return actions, nil +} + +func parseU32Data(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) { + native = nl.NativeEndian() + u32 := filter.(*U32) + detailed := false + for _, datum := range data { + switch datum.Attr.Type { + case nl.TCA_U32_SEL: + detailed = true + sel := nl.DeserializeTcU32Sel(datum.Value) + u32.Sel = (*TcU32Sel)(unsafe.Pointer(sel)) + if native != networkOrder { + // Handle the endianness of attributes + u32.Sel.Offmask = native.Uint16(htons(sel.Offmask)) + u32.Sel.Hmask = native.Uint32(htonl(sel.Hmask)) + for i, key := range u32.Sel.Keys { + u32.Sel.Keys[i].Mask = native.Uint32(htonl(key.Mask)) + u32.Sel.Keys[i].Val = native.Uint32(htonl(key.Val)) + } + } + case nl.TCA_U32_ACT: + tables, err := nl.ParseRouteAttr(datum.Value) + if err != nil { + return detailed, err + } + u32.Actions, err = parseActions(tables) + if err != nil { + return detailed, err + } + for _, action := range u32.Actions { + if action, ok := action.(*MirredAction); ok { + u32.RedirIndex = int(action.Ifindex) + } + } + case nl.TCA_U32_CLASSID: + u32.ClassId = native.Uint32(datum.Value) + } + } + return detailed, nil +} + +func parseFwData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) { + native = nl.NativeEndian() + fw := filter.(*Fw) + detailed := true + for _, datum := range data { + switch datum.Attr.Type { + case nl.TCA_FW_MASK: + fw.Mask = native.Uint32(datum.Value[0:4]) + case nl.TCA_FW_CLASSID: + fw.ClassId = native.Uint32(datum.Value[0:4]) + case nl.TCA_FW_INDEV: + fw.InDev = string(datum.Value[:len(datum.Value)-1]) + case nl.TCA_FW_POLICE: + adata, _ := nl.ParseRouteAttr(datum.Value) + for _, aattr := range adata { + switch aattr.Attr.Type { + case nl.TCA_POLICE_TBF: + fw.Police = *nl.DeserializeTcPolice(aattr.Value) + case nl.TCA_POLICE_RATE: + fw.Rtab = DeserializeRtab(aattr.Value) + case nl.TCA_POLICE_PEAKRATE: + fw.Ptab = DeserializeRtab(aattr.Value) + } + } + } + } + return detailed, nil +} + +func parseBpfData(filter Filter, data []syscall.NetlinkRouteAttr) (bool, error) { + native = nl.NativeEndian() + bpf := filter.(*BpfFilter) + detailed := true + for _, datum := range data { + switch datum.Attr.Type { + case nl.TCA_BPF_FD: + bpf.Fd = int(native.Uint32(datum.Value[0:4])) + case nl.TCA_BPF_NAME: + bpf.Name = string(datum.Value[:len(datum.Value)-1]) + case nl.TCA_BPF_CLASSID: + bpf.ClassId = native.Uint32(datum.Value[0:4]) + case nl.TCA_BPF_FLAGS: + flags := native.Uint32(datum.Value[0:4]) + if (flags & nl.TCA_BPF_FLAG_ACT_DIRECT) != 0 { + bpf.DirectAction = true + } + } + } + return detailed, nil +} + +func AlignToAtm(size uint) uint { + var linksize, cells int + cells = int(size / nl.ATM_CELL_PAYLOAD) + if (size % nl.ATM_CELL_PAYLOAD) > 0 { + cells++ + } + linksize = cells * nl.ATM_CELL_SIZE + return uint(linksize) +} + +func AdjustSize(sz uint, mpu uint, linklayer int) uint { + if sz < mpu { + sz = mpu + } + switch linklayer { + case nl.LINKLAYER_ATM: + return AlignToAtm(sz) + default: + return sz + } +} + +func CalcRtable(rate *nl.TcRateSpec, rtab [256]uint32, cellLog int, mtu uint32, linklayer int) int { + bps := rate.Rate + mpu := rate.Mpu + var sz uint + if mtu == 0 { + mtu = 2047 + } + if cellLog < 0 { + cellLog = 0 + for (mtu >> uint(cellLog)) > 255 { + cellLog++ + } + } + for i := 0; i < 256; i++ { + sz = AdjustSize(uint((i+1)< 0 { + nl.NewRtAttrChild(data, nl.IFLA_VXLAN_AGEING, nl.Uint32Attr(uint32(vxlan.Age))) + } + if vxlan.Limit > 0 { + nl.NewRtAttrChild(data, nl.IFLA_VXLAN_LIMIT, nl.Uint32Attr(uint32(vxlan.Limit))) + } + if vxlan.Port > 0 { + nl.NewRtAttrChild(data, nl.IFLA_VXLAN_PORT, htons(uint16(vxlan.Port))) + } + if vxlan.PortLow > 0 || vxlan.PortHigh > 0 { + pr := vxlanPortRange{uint16(vxlan.PortLow), uint16(vxlan.PortHigh)} + + buf := new(bytes.Buffer) + binary.Write(buf, binary.BigEndian, &pr) + + nl.NewRtAttrChild(data, nl.IFLA_VXLAN_PORT_RANGE, buf.Bytes()) + } +} + +func addBondAttrs(bond *Bond, linkInfo *nl.RtAttr) { + data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) + if bond.Mode >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_MODE, nl.Uint8Attr(uint8(bond.Mode))) + } + if bond.ActiveSlave >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_ACTIVE_SLAVE, nl.Uint32Attr(uint32(bond.ActiveSlave))) + } + if bond.Miimon >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_MIIMON, nl.Uint32Attr(uint32(bond.Miimon))) + } + if bond.UpDelay >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_UPDELAY, nl.Uint32Attr(uint32(bond.UpDelay))) + } + if bond.DownDelay >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_DOWNDELAY, nl.Uint32Attr(uint32(bond.DownDelay))) + } + if bond.UseCarrier >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_USE_CARRIER, nl.Uint8Attr(uint8(bond.UseCarrier))) + } + if bond.ArpInterval >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_ARP_INTERVAL, nl.Uint32Attr(uint32(bond.ArpInterval))) + } + if bond.ArpIpTargets != nil { + msg := nl.NewRtAttrChild(data, nl.IFLA_BOND_ARP_IP_TARGET, nil) + for i := range bond.ArpIpTargets { + ip := bond.ArpIpTargets[i].To4() + if ip != nil { + nl.NewRtAttrChild(msg, i, []byte(ip)) + continue + } + ip = bond.ArpIpTargets[i].To16() + if ip != nil { + nl.NewRtAttrChild(msg, i, []byte(ip)) + } + } + } + if bond.ArpValidate >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_ARP_VALIDATE, nl.Uint32Attr(uint32(bond.ArpValidate))) + } + if bond.ArpAllTargets >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_ARP_ALL_TARGETS, nl.Uint32Attr(uint32(bond.ArpAllTargets))) + } + if bond.Primary >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_PRIMARY, nl.Uint32Attr(uint32(bond.Primary))) + } + if bond.PrimaryReselect >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_PRIMARY_RESELECT, nl.Uint8Attr(uint8(bond.PrimaryReselect))) + } + if bond.FailOverMac >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_FAIL_OVER_MAC, nl.Uint8Attr(uint8(bond.FailOverMac))) + } + if bond.XmitHashPolicy >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_XMIT_HASH_POLICY, nl.Uint8Attr(uint8(bond.XmitHashPolicy))) + } + if bond.ResendIgmp >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_RESEND_IGMP, nl.Uint32Attr(uint32(bond.ResendIgmp))) + } + if bond.NumPeerNotif >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_NUM_PEER_NOTIF, nl.Uint8Attr(uint8(bond.NumPeerNotif))) + } + if bond.AllSlavesActive >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_ALL_SLAVES_ACTIVE, nl.Uint8Attr(uint8(bond.AllSlavesActive))) + } + if bond.MinLinks >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_MIN_LINKS, nl.Uint32Attr(uint32(bond.MinLinks))) + } + if bond.LpInterval >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_LP_INTERVAL, nl.Uint32Attr(uint32(bond.LpInterval))) + } + if bond.PackersPerSlave >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_PACKETS_PER_SLAVE, nl.Uint32Attr(uint32(bond.PackersPerSlave))) + } + if bond.LacpRate >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_AD_LACP_RATE, nl.Uint8Attr(uint8(bond.LacpRate))) + } + if bond.AdSelect >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_AD_SELECT, nl.Uint8Attr(uint8(bond.AdSelect))) + } + if bond.AdActorSysPrio >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_AD_ACTOR_SYS_PRIO, nl.Uint16Attr(uint16(bond.AdActorSysPrio))) + } + if bond.AdUserPortKey >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_AD_USER_PORT_KEY, nl.Uint16Attr(uint16(bond.AdUserPortKey))) + } + if bond.AdActorSystem != nil { + nl.NewRtAttrChild(data, nl.IFLA_BOND_AD_ACTOR_SYSTEM, []byte(bond.AdActorSystem)) + } + if bond.TlbDynamicLb >= 0 { + nl.NewRtAttrChild(data, nl.IFLA_BOND_TLB_DYNAMIC_LB, nl.Uint8Attr(uint8(bond.TlbDynamicLb))) + } +} + +// LinkAdd adds a new link device. The type and features of the device +// are taken from the parameters in the link object. +// Equivalent to: `ip link add $link` +func LinkAdd(link Link) error { + return pkgHandle.LinkAdd(link) +} + +// LinkAdd adds a new link device. The type and features of the device +// are taken fromt the parameters in the link object. +// Equivalent to: `ip link add $link` +func (h *Handle) LinkAdd(link Link) error { + return h.linkModify(link, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) +} + +func (h *Handle) linkModify(link Link, flags int) error { + // TODO: support extra data for macvlan + base := link.Attrs() + + if base.Name == "" { + return fmt.Errorf("LinkAttrs.Name cannot be empty!") + } + + if tuntap, ok := link.(*Tuntap); ok { + // TODO: support user + // TODO: support group + // TODO: multi_queue + // TODO: support non- persistent + if tuntap.Mode < syscall.IFF_TUN || tuntap.Mode > syscall.IFF_TAP { + return fmt.Errorf("Tuntap.Mode %v unknown!", tuntap.Mode) + } + file, err := os.OpenFile("/dev/net/tun", os.O_RDWR, 0) + if err != nil { + return err + } + defer file.Close() + var req ifReq + if tuntap.Flags == 0 { + req.Flags = uint16(TUNTAP_DEFAULTS) + } else { + req.Flags = uint16(tuntap.Flags) + } + req.Flags |= uint16(tuntap.Mode) + copy(req.Name[:15], base.Name) + _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, file.Fd(), uintptr(syscall.TUNSETIFF), uintptr(unsafe.Pointer(&req))) + if errno != 0 { + return fmt.Errorf("Tuntap IOCTL TUNSETIFF failed, errno %v", errno) + } + _, _, errno = syscall.Syscall(syscall.SYS_IOCTL, file.Fd(), uintptr(syscall.TUNSETPERSIST), 1) + if errno != 0 { + return fmt.Errorf("Tuntap IOCTL TUNSETPERSIST failed, errno %v", errno) + } + h.ensureIndex(base) + + // can't set master during create, so set it afterwards + if base.MasterIndex != 0 { + // TODO: verify MasterIndex is actually a bridge? + return h.LinkSetMasterByIndex(link, base.MasterIndex) + } + return nil + } + + req := h.newNetlinkRequest(syscall.RTM_NEWLINK, flags) + + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + // TODO: make it shorter + if base.Flags&net.FlagUp != 0 { + msg.Change = syscall.IFF_UP + msg.Flags = syscall.IFF_UP + } + if base.Flags&net.FlagBroadcast != 0 { + msg.Change |= syscall.IFF_BROADCAST + msg.Flags |= syscall.IFF_BROADCAST + } + if base.Flags&net.FlagLoopback != 0 { + msg.Change |= syscall.IFF_LOOPBACK + msg.Flags |= syscall.IFF_LOOPBACK + } + if base.Flags&net.FlagPointToPoint != 0 { + msg.Change |= syscall.IFF_POINTOPOINT + msg.Flags |= syscall.IFF_POINTOPOINT + } + if base.Flags&net.FlagMulticast != 0 { + msg.Change |= syscall.IFF_MULTICAST + msg.Flags |= syscall.IFF_MULTICAST + } + req.AddData(msg) + + if base.ParentIndex != 0 { + b := make([]byte, 4) + native.PutUint32(b, uint32(base.ParentIndex)) + data := nl.NewRtAttr(syscall.IFLA_LINK, b) + req.AddData(data) + } else if link.Type() == "ipvlan" { + return fmt.Errorf("Can't create ipvlan link without ParentIndex") + } + + nameData := nl.NewRtAttr(syscall.IFLA_IFNAME, nl.ZeroTerminated(base.Name)) + req.AddData(nameData) + + if base.MTU > 0 { + mtu := nl.NewRtAttr(syscall.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU))) + req.AddData(mtu) + } + + if base.TxQLen >= 0 { + qlen := nl.NewRtAttr(syscall.IFLA_TXQLEN, nl.Uint32Attr(uint32(base.TxQLen))) + req.AddData(qlen) + } + + if base.HardwareAddr != nil { + hwaddr := nl.NewRtAttr(syscall.IFLA_ADDRESS, []byte(base.HardwareAddr)) + req.AddData(hwaddr) + } + + if base.Namespace != nil { + var attr *nl.RtAttr + switch base.Namespace.(type) { + case NsPid: + val := nl.Uint32Attr(uint32(base.Namespace.(NsPid))) + attr = nl.NewRtAttr(syscall.IFLA_NET_NS_PID, val) + case NsFd: + val := nl.Uint32Attr(uint32(base.Namespace.(NsFd))) + attr = nl.NewRtAttr(nl.IFLA_NET_NS_FD, val) + } + + req.AddData(attr) + } + + if base.Xdp != nil { + addXdpAttrs(base.Xdp, req) + } + + linkInfo := nl.NewRtAttr(syscall.IFLA_LINKINFO, nil) + nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_KIND, nl.NonZeroTerminated(link.Type())) + + switch link := link.(type) { + case *Vlan: + b := make([]byte, 2) + native.PutUint16(b, uint16(link.VlanId)) + data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) + nl.NewRtAttrChild(data, nl.IFLA_VLAN_ID, b) + case *Veth: + data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) + peer := nl.NewRtAttrChild(data, nl.VETH_INFO_PEER, nil) + nl.NewIfInfomsgChild(peer, syscall.AF_UNSPEC) + nl.NewRtAttrChild(peer, syscall.IFLA_IFNAME, nl.ZeroTerminated(link.PeerName)) + if base.TxQLen >= 0 { + nl.NewRtAttrChild(peer, syscall.IFLA_TXQLEN, nl.Uint32Attr(uint32(base.TxQLen))) + } + if base.MTU > 0 { + nl.NewRtAttrChild(peer, syscall.IFLA_MTU, nl.Uint32Attr(uint32(base.MTU))) + } + + case *Vxlan: + addVxlanAttrs(link, linkInfo) + case *Bond: + addBondAttrs(link, linkInfo) + case *IPVlan: + data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) + nl.NewRtAttrChild(data, nl.IFLA_IPVLAN_MODE, nl.Uint16Attr(uint16(link.Mode))) + case *Macvlan: + if link.Mode != MACVLAN_MODE_DEFAULT { + data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) + nl.NewRtAttrChild(data, nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[link.Mode])) + } + case *Macvtap: + if link.Mode != MACVLAN_MODE_DEFAULT { + data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) + nl.NewRtAttrChild(data, nl.IFLA_MACVLAN_MODE, nl.Uint32Attr(macvlanModes[link.Mode])) + } + case *Gretap: + addGretapAttrs(link, linkInfo) + case *Iptun: + addIptunAttrs(link, linkInfo) + case *Gretun: + addGretunAttrs(link, linkInfo) + case *Vti: + addVtiAttrs(link, linkInfo) + case *Vrf: + addVrfAttrs(link, linkInfo) + case *Bridge: + addBridgeAttrs(link, linkInfo) + case *GTP: + addGTPAttrs(link, linkInfo) + } + + req.AddData(linkInfo) + + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + if err != nil { + return err + } + + h.ensureIndex(base) + + // can't set master during create, so set it afterwards + if base.MasterIndex != 0 { + // TODO: verify MasterIndex is actually a bridge? + return h.LinkSetMasterByIndex(link, base.MasterIndex) + } + return nil +} + +// LinkDel deletes link device. Either Index or Name must be set in +// the link object for it to be deleted. The other values are ignored. +// Equivalent to: `ip link del $link` +func LinkDel(link Link) error { + return pkgHandle.LinkDel(link) +} + +// LinkDel deletes link device. Either Index or Name must be set in +// the link object for it to be deleted. The other values are ignored. +// Equivalent to: `ip link del $link` +func (h *Handle) LinkDel(link Link) error { + base := link.Attrs() + + h.ensureIndex(base) + + req := h.newNetlinkRequest(syscall.RTM_DELLINK, syscall.NLM_F_ACK) + + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + return err +} + +func (h *Handle) linkByNameDump(name string) (Link, error) { + links, err := h.LinkList() + if err != nil { + return nil, err + } + + for _, link := range links { + if link.Attrs().Name == name { + return link, nil + } + } + return nil, LinkNotFoundError{fmt.Errorf("Link %s not found", name)} +} + +func (h *Handle) linkByAliasDump(alias string) (Link, error) { + links, err := h.LinkList() + if err != nil { + return nil, err + } + + for _, link := range links { + if link.Attrs().Alias == alias { + return link, nil + } + } + return nil, LinkNotFoundError{fmt.Errorf("Link alias %s not found", alias)} +} + +// LinkByName finds a link by name and returns a pointer to the object. +func LinkByName(name string) (Link, error) { + return pkgHandle.LinkByName(name) +} + +// LinkByName finds a link by name and returns a pointer to the object. +func (h *Handle) LinkByName(name string) (Link, error) { + if h.lookupByDump { + return h.linkByNameDump(name) + } + + req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_ACK) + + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + req.AddData(msg) + + nameData := nl.NewRtAttr(syscall.IFLA_IFNAME, nl.ZeroTerminated(name)) + req.AddData(nameData) + + link, err := execGetLink(req) + if err == syscall.EINVAL { + // older kernels don't support looking up via IFLA_IFNAME + // so fall back to dumping all links + h.lookupByDump = true + return h.linkByNameDump(name) + } + + return link, err +} + +// LinkByAlias finds a link by its alias and returns a pointer to the object. +// If there are multiple links with the alias it returns the first one +func LinkByAlias(alias string) (Link, error) { + return pkgHandle.LinkByAlias(alias) +} + +// LinkByAlias finds a link by its alias and returns a pointer to the object. +// If there are multiple links with the alias it returns the first one +func (h *Handle) LinkByAlias(alias string) (Link, error) { + if h.lookupByDump { + return h.linkByAliasDump(alias) + } + + req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_ACK) + + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + req.AddData(msg) + + nameData := nl.NewRtAttr(syscall.IFLA_IFALIAS, nl.ZeroTerminated(alias)) + req.AddData(nameData) + + link, err := execGetLink(req) + if err == syscall.EINVAL { + // older kernels don't support looking up via IFLA_IFALIAS + // so fall back to dumping all links + h.lookupByDump = true + return h.linkByAliasDump(alias) + } + + return link, err +} + +// LinkByIndex finds a link by index and returns a pointer to the object. +func LinkByIndex(index int) (Link, error) { + return pkgHandle.LinkByIndex(index) +} + +// LinkByIndex finds a link by index and returns a pointer to the object. +func (h *Handle) LinkByIndex(index int) (Link, error) { + req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_ACK) + + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg.Index = int32(index) + req.AddData(msg) + + return execGetLink(req) +} + +func execGetLink(req *nl.NetlinkRequest) (Link, error) { + msgs, err := req.Execute(syscall.NETLINK_ROUTE, 0) + if err != nil { + if errno, ok := err.(syscall.Errno); ok { + if errno == syscall.ENODEV { + return nil, LinkNotFoundError{fmt.Errorf("Link not found")} + } + } + return nil, err + } + + switch { + case len(msgs) == 0: + return nil, LinkNotFoundError{fmt.Errorf("Link not found")} + + case len(msgs) == 1: + return LinkDeserialize(nil, msgs[0]) + + default: + return nil, fmt.Errorf("More than one link found") + } +} + +// linkDeserialize deserializes a raw message received from netlink into +// a link object. +func LinkDeserialize(hdr *syscall.NlMsghdr, m []byte) (Link, error) { + msg := nl.DeserializeIfInfomsg(m) + + attrs, err := nl.ParseRouteAttr(m[msg.Len():]) + if err != nil { + return nil, err + } + + base := LinkAttrs{Index: int(msg.Index), RawFlags: msg.Flags, Flags: linkFlags(msg.Flags), EncapType: msg.EncapType()} + if msg.Flags&syscall.IFF_PROMISC != 0 { + base.Promisc = 1 + } + var ( + link Link + stats32 []byte + stats64 []byte + linkType string + ) + for _, attr := range attrs { + switch attr.Attr.Type { + case syscall.IFLA_LINKINFO: + infos, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return nil, err + } + for _, info := range infos { + switch info.Attr.Type { + case nl.IFLA_INFO_KIND: + linkType = string(info.Value[:len(info.Value)-1]) + switch linkType { + case "dummy": + link = &Dummy{} + case "ifb": + link = &Ifb{} + case "bridge": + link = &Bridge{} + case "vlan": + link = &Vlan{} + case "veth": + link = &Veth{} + case "vxlan": + link = &Vxlan{} + case "bond": + link = &Bond{} + case "ipvlan": + link = &IPVlan{} + case "macvlan": + link = &Macvlan{} + case "macvtap": + link = &Macvtap{} + case "gretap": + link = &Gretap{} + case "ipip": + link = &Iptun{} + case "gre": + link = &Gretun{} + case "vti": + link = &Vti{} + case "vrf": + link = &Vrf{} + case "gtp": + link = >P{} + default: + link = &GenericLink{LinkType: linkType} + } + case nl.IFLA_INFO_DATA: + data, err := nl.ParseRouteAttr(info.Value) + if err != nil { + return nil, err + } + switch linkType { + case "vlan": + parseVlanData(link, data) + case "vxlan": + parseVxlanData(link, data) + case "bond": + parseBondData(link, data) + case "ipvlan": + parseIPVlanData(link, data) + case "macvlan": + parseMacvlanData(link, data) + case "macvtap": + parseMacvtapData(link, data) + case "gretap": + parseGretapData(link, data) + case "ipip": + parseIptunData(link, data) + case "gre": + parseGretunData(link, data) + case "vti": + parseVtiData(link, data) + case "vrf": + parseVrfData(link, data) + case "bridge": + parseBridgeData(link, data) + case "gtp": + parseGTPData(link, data) + } + } + } + case syscall.IFLA_ADDRESS: + var nonzero bool + for _, b := range attr.Value { + if b != 0 { + nonzero = true + } + } + if nonzero { + base.HardwareAddr = attr.Value[:] + } + case syscall.IFLA_IFNAME: + base.Name = string(attr.Value[:len(attr.Value)-1]) + case syscall.IFLA_MTU: + base.MTU = int(native.Uint32(attr.Value[0:4])) + case syscall.IFLA_LINK: + base.ParentIndex = int(native.Uint32(attr.Value[0:4])) + case syscall.IFLA_MASTER: + base.MasterIndex = int(native.Uint32(attr.Value[0:4])) + case syscall.IFLA_TXQLEN: + base.TxQLen = int(native.Uint32(attr.Value[0:4])) + case syscall.IFLA_IFALIAS: + base.Alias = string(attr.Value[:len(attr.Value)-1]) + case syscall.IFLA_STATS: + stats32 = attr.Value[:] + case IFLA_STATS64: + stats64 = attr.Value[:] + case nl.IFLA_XDP: + xdp, err := parseLinkXdp(attr.Value[:]) + if err != nil { + return nil, err + } + base.Xdp = xdp + case syscall.IFLA_PROTINFO | syscall.NLA_F_NESTED: + if hdr != nil && hdr.Type == syscall.RTM_NEWLINK && + msg.Family == syscall.AF_BRIDGE { + attrs, err := nl.ParseRouteAttr(attr.Value[:]) + if err != nil { + return nil, err + } + base.Protinfo = parseProtinfo(attrs) + } + case syscall.IFLA_OPERSTATE: + base.OperState = LinkOperState(uint8(attr.Value[0])) + case nl.IFLA_LINK_NETNSID: + base.NetNsID = int(native.Uint32(attr.Value[0:4])) + } + } + + if stats64 != nil { + base.Statistics = parseLinkStats64(stats64) + } else if stats32 != nil { + base.Statistics = parseLinkStats32(stats32) + } + + // Links that don't have IFLA_INFO_KIND are hardware devices + if link == nil { + link = &Device{} + } + *link.Attrs() = base + + return link, nil +} + +// LinkList gets a list of link devices. +// Equivalent to: `ip link show` +func LinkList() ([]Link, error) { + return pkgHandle.LinkList() +} + +// LinkList gets a list of link devices. +// Equivalent to: `ip link show` +func (h *Handle) LinkList() ([]Link, error) { + // NOTE(vish): This duplicates functionality in net/iface_linux.go, but we need + // to get the message ourselves to parse link type. + req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_DUMP) + + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + req.AddData(msg) + + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWLINK) + if err != nil { + return nil, err + } + + var res []Link + for _, m := range msgs { + link, err := LinkDeserialize(nil, m) + if err != nil { + return nil, err + } + res = append(res, link) + } + + return res, nil +} + +// LinkUpdate is used to pass information back from LinkSubscribe() +type LinkUpdate struct { + nl.IfInfomsg + Header syscall.NlMsghdr + Link +} + +// LinkSubscribe takes a chan down which notifications will be sent +// when links change. Close the 'done' chan to stop subscription. +func LinkSubscribe(ch chan<- LinkUpdate, done <-chan struct{}) error { + return linkSubscribeAt(netns.None(), netns.None(), ch, done, nil) +} + +// LinkSubscribeAt works like LinkSubscribe plus it allows the caller +// to choose the network namespace in which to subscribe (ns). +func LinkSubscribeAt(ns netns.NsHandle, ch chan<- LinkUpdate, done <-chan struct{}) error { + return linkSubscribeAt(ns, netns.None(), ch, done, nil) +} + +// LinkSubscribeOptions contains a set of options to use with +// LinkSubscribeWithOptions. +type LinkSubscribeOptions struct { + Namespace *netns.NsHandle + ErrorCallback func(error) +} + +// LinkSubscribeWithOptions work like LinkSubscribe but enable to +// provide additional options to modify the behavior. Currently, the +// namespace can be provided as well as an error callback. +func LinkSubscribeWithOptions(ch chan<- LinkUpdate, done <-chan struct{}, options LinkSubscribeOptions) error { + if options.Namespace == nil { + none := netns.None() + options.Namespace = &none + } + return linkSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback) +} + +func linkSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- LinkUpdate, done <-chan struct{}, cberr func(error)) error { + s, err := nl.SubscribeAt(newNs, curNs, syscall.NETLINK_ROUTE, syscall.RTNLGRP_LINK) + if err != nil { + return err + } + if done != nil { + go func() { + <-done + s.Close() + }() + } + go func() { + defer close(ch) + for { + msgs, err := s.Receive() + if err != nil { + if cberr != nil { + cberr(err) + } + return + } + for _, m := range msgs { + ifmsg := nl.DeserializeIfInfomsg(m.Data) + link, err := LinkDeserialize(&m.Header, m.Data) + if err != nil { + if cberr != nil { + cberr(err) + } + return + } + ch <- LinkUpdate{IfInfomsg: *ifmsg, Header: m.Header, Link: link} + } + } + }() + + return nil +} + +func LinkSetHairpin(link Link, mode bool) error { + return pkgHandle.LinkSetHairpin(link, mode) +} + +func (h *Handle) LinkSetHairpin(link Link, mode bool) error { + return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_MODE) +} + +func LinkSetGuard(link Link, mode bool) error { + return pkgHandle.LinkSetGuard(link, mode) +} + +func (h *Handle) LinkSetGuard(link Link, mode bool) error { + return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_GUARD) +} + +func LinkSetFastLeave(link Link, mode bool) error { + return pkgHandle.LinkSetFastLeave(link, mode) +} + +func (h *Handle) LinkSetFastLeave(link Link, mode bool) error { + return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_FAST_LEAVE) +} + +func LinkSetLearning(link Link, mode bool) error { + return pkgHandle.LinkSetLearning(link, mode) +} + +func (h *Handle) LinkSetLearning(link Link, mode bool) error { + return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_LEARNING) +} + +func LinkSetRootBlock(link Link, mode bool) error { + return pkgHandle.LinkSetRootBlock(link, mode) +} + +func (h *Handle) LinkSetRootBlock(link Link, mode bool) error { + return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_PROTECT) +} + +func LinkSetFlood(link Link, mode bool) error { + return pkgHandle.LinkSetFlood(link, mode) +} + +func (h *Handle) LinkSetFlood(link Link, mode bool) error { + return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_UNICAST_FLOOD) +} + +func LinkSetBrProxyArp(link Link, mode bool) error { + return pkgHandle.LinkSetBrProxyArp(link, mode) +} + +func (h *Handle) LinkSetBrProxyArp(link Link, mode bool) error { + return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_PROXYARP) +} + +func LinkSetBrProxyArpWiFi(link Link, mode bool) error { + return pkgHandle.LinkSetBrProxyArpWiFi(link, mode) +} + +func (h *Handle) LinkSetBrProxyArpWiFi(link Link, mode bool) error { + return h.setProtinfoAttr(link, mode, nl.IFLA_BRPORT_PROXYARP_WIFI) +} + +func (h *Handle) setProtinfoAttr(link Link, mode bool, attr int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + + msg := nl.NewIfInfomsg(syscall.AF_BRIDGE) + msg.Index = int32(base.Index) + req.AddData(msg) + + br := nl.NewRtAttr(syscall.IFLA_PROTINFO|syscall.NLA_F_NESTED, nil) + nl.NewRtAttrChild(br, attr, boolToByte(mode)) + req.AddData(br) + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + if err != nil { + return err + } + return nil +} + +// LinkSetTxQLen sets the transaction queue length for the link. +// Equivalent to: `ip link set $link txqlen $qlen` +func LinkSetTxQLen(link Link, qlen int) error { + return pkgHandle.LinkSetTxQLen(link, qlen) +} + +// LinkSetTxQLen sets the transaction queue length for the link. +// Equivalent to: `ip link set $link txqlen $qlen` +func (h *Handle) LinkSetTxQLen(link Link, qlen int) error { + base := link.Attrs() + h.ensureIndex(base) + req := h.newNetlinkRequest(syscall.RTM_SETLINK, syscall.NLM_F_ACK) + + msg := nl.NewIfInfomsg(syscall.AF_UNSPEC) + msg.Index = int32(base.Index) + req.AddData(msg) + + b := make([]byte, 4) + native.PutUint32(b, uint32(qlen)) + + data := nl.NewRtAttr(syscall.IFLA_TXQLEN, b) + req.AddData(data) + + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + return err +} + +func parseVlanData(link Link, data []syscall.NetlinkRouteAttr) { + vlan := link.(*Vlan) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_VLAN_ID: + vlan.VlanId = int(native.Uint16(datum.Value[0:2])) + } + } +} + +func parseVxlanData(link Link, data []syscall.NetlinkRouteAttr) { + vxlan := link.(*Vxlan) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_VXLAN_ID: + vxlan.VxlanId = int(native.Uint32(datum.Value[0:4])) + case nl.IFLA_VXLAN_LINK: + vxlan.VtepDevIndex = int(native.Uint32(datum.Value[0:4])) + case nl.IFLA_VXLAN_LOCAL: + vxlan.SrcAddr = net.IP(datum.Value[0:4]) + case nl.IFLA_VXLAN_LOCAL6: + vxlan.SrcAddr = net.IP(datum.Value[0:16]) + case nl.IFLA_VXLAN_GROUP: + vxlan.Group = net.IP(datum.Value[0:4]) + case nl.IFLA_VXLAN_GROUP6: + vxlan.Group = net.IP(datum.Value[0:16]) + case nl.IFLA_VXLAN_TTL: + vxlan.TTL = int(datum.Value[0]) + case nl.IFLA_VXLAN_TOS: + vxlan.TOS = int(datum.Value[0]) + case nl.IFLA_VXLAN_LEARNING: + vxlan.Learning = int8(datum.Value[0]) != 0 + case nl.IFLA_VXLAN_PROXY: + vxlan.Proxy = int8(datum.Value[0]) != 0 + case nl.IFLA_VXLAN_RSC: + vxlan.RSC = int8(datum.Value[0]) != 0 + case nl.IFLA_VXLAN_L2MISS: + vxlan.L2miss = int8(datum.Value[0]) != 0 + case nl.IFLA_VXLAN_L3MISS: + vxlan.L3miss = int8(datum.Value[0]) != 0 + case nl.IFLA_VXLAN_UDP_CSUM: + vxlan.UDPCSum = int8(datum.Value[0]) != 0 + case nl.IFLA_VXLAN_GBP: + vxlan.GBP = true + case nl.IFLA_VXLAN_FLOWBASED: + vxlan.FlowBased = int8(datum.Value[0]) != 0 + case nl.IFLA_VXLAN_AGEING: + vxlan.Age = int(native.Uint32(datum.Value[0:4])) + vxlan.NoAge = vxlan.Age == 0 + case nl.IFLA_VXLAN_LIMIT: + vxlan.Limit = int(native.Uint32(datum.Value[0:4])) + case nl.IFLA_VXLAN_PORT: + vxlan.Port = int(ntohs(datum.Value[0:2])) + case nl.IFLA_VXLAN_PORT_RANGE: + buf := bytes.NewBuffer(datum.Value[0:4]) + var pr vxlanPortRange + if binary.Read(buf, binary.BigEndian, &pr) != nil { + vxlan.PortLow = int(pr.Lo) + vxlan.PortHigh = int(pr.Hi) + } + } + } +} + +func parseBondData(link Link, data []syscall.NetlinkRouteAttr) { + bond := link.(*Bond) + for i := range data { + switch data[i].Attr.Type { + case nl.IFLA_BOND_MODE: + bond.Mode = BondMode(data[i].Value[0]) + case nl.IFLA_BOND_ACTIVE_SLAVE: + bond.ActiveSlave = int(native.Uint32(data[i].Value[0:4])) + case nl.IFLA_BOND_MIIMON: + bond.Miimon = int(native.Uint32(data[i].Value[0:4])) + case nl.IFLA_BOND_UPDELAY: + bond.UpDelay = int(native.Uint32(data[i].Value[0:4])) + case nl.IFLA_BOND_DOWNDELAY: + bond.DownDelay = int(native.Uint32(data[i].Value[0:4])) + case nl.IFLA_BOND_USE_CARRIER: + bond.UseCarrier = int(data[i].Value[0]) + case nl.IFLA_BOND_ARP_INTERVAL: + bond.ArpInterval = int(native.Uint32(data[i].Value[0:4])) + case nl.IFLA_BOND_ARP_IP_TARGET: + // TODO: implement + case nl.IFLA_BOND_ARP_VALIDATE: + bond.ArpValidate = BondArpValidate(native.Uint32(data[i].Value[0:4])) + case nl.IFLA_BOND_ARP_ALL_TARGETS: + bond.ArpAllTargets = BondArpAllTargets(native.Uint32(data[i].Value[0:4])) + case nl.IFLA_BOND_PRIMARY: + bond.Primary = int(native.Uint32(data[i].Value[0:4])) + case nl.IFLA_BOND_PRIMARY_RESELECT: + bond.PrimaryReselect = BondPrimaryReselect(data[i].Value[0]) + case nl.IFLA_BOND_FAIL_OVER_MAC: + bond.FailOverMac = BondFailOverMac(data[i].Value[0]) + case nl.IFLA_BOND_XMIT_HASH_POLICY: + bond.XmitHashPolicy = BondXmitHashPolicy(data[i].Value[0]) + case nl.IFLA_BOND_RESEND_IGMP: + bond.ResendIgmp = int(native.Uint32(data[i].Value[0:4])) + case nl.IFLA_BOND_NUM_PEER_NOTIF: + bond.NumPeerNotif = int(data[i].Value[0]) + case nl.IFLA_BOND_ALL_SLAVES_ACTIVE: + bond.AllSlavesActive = int(data[i].Value[0]) + case nl.IFLA_BOND_MIN_LINKS: + bond.MinLinks = int(native.Uint32(data[i].Value[0:4])) + case nl.IFLA_BOND_LP_INTERVAL: + bond.LpInterval = int(native.Uint32(data[i].Value[0:4])) + case nl.IFLA_BOND_PACKETS_PER_SLAVE: + bond.PackersPerSlave = int(native.Uint32(data[i].Value[0:4])) + case nl.IFLA_BOND_AD_LACP_RATE: + bond.LacpRate = BondLacpRate(data[i].Value[0]) + case nl.IFLA_BOND_AD_SELECT: + bond.AdSelect = BondAdSelect(data[i].Value[0]) + case nl.IFLA_BOND_AD_INFO: + // TODO: implement + case nl.IFLA_BOND_AD_ACTOR_SYS_PRIO: + bond.AdActorSysPrio = int(native.Uint16(data[i].Value[0:2])) + case nl.IFLA_BOND_AD_USER_PORT_KEY: + bond.AdUserPortKey = int(native.Uint16(data[i].Value[0:2])) + case nl.IFLA_BOND_AD_ACTOR_SYSTEM: + bond.AdActorSystem = net.HardwareAddr(data[i].Value[0:6]) + case nl.IFLA_BOND_TLB_DYNAMIC_LB: + bond.TlbDynamicLb = int(data[i].Value[0]) + } + } +} + +func parseIPVlanData(link Link, data []syscall.NetlinkRouteAttr) { + ipv := link.(*IPVlan) + for _, datum := range data { + if datum.Attr.Type == nl.IFLA_IPVLAN_MODE { + ipv.Mode = IPVlanMode(native.Uint32(datum.Value[0:4])) + return + } + } +} + +func parseMacvtapData(link Link, data []syscall.NetlinkRouteAttr) { + macv := link.(*Macvtap) + parseMacvlanData(&macv.Macvlan, data) +} + +func parseMacvlanData(link Link, data []syscall.NetlinkRouteAttr) { + macv := link.(*Macvlan) + for _, datum := range data { + if datum.Attr.Type == nl.IFLA_MACVLAN_MODE { + switch native.Uint32(datum.Value[0:4]) { + case nl.MACVLAN_MODE_PRIVATE: + macv.Mode = MACVLAN_MODE_PRIVATE + case nl.MACVLAN_MODE_VEPA: + macv.Mode = MACVLAN_MODE_VEPA + case nl.MACVLAN_MODE_BRIDGE: + macv.Mode = MACVLAN_MODE_BRIDGE + case nl.MACVLAN_MODE_PASSTHRU: + macv.Mode = MACVLAN_MODE_PASSTHRU + case nl.MACVLAN_MODE_SOURCE: + macv.Mode = MACVLAN_MODE_SOURCE + } + return + } + } +} + +// copied from pkg/net_linux.go +func linkFlags(rawFlags uint32) net.Flags { + var f net.Flags + if rawFlags&syscall.IFF_UP != 0 { + f |= net.FlagUp + } + if rawFlags&syscall.IFF_BROADCAST != 0 { + f |= net.FlagBroadcast + } + if rawFlags&syscall.IFF_LOOPBACK != 0 { + f |= net.FlagLoopback + } + if rawFlags&syscall.IFF_POINTOPOINT != 0 { + f |= net.FlagPointToPoint + } + if rawFlags&syscall.IFF_MULTICAST != 0 { + f |= net.FlagMulticast + } + return f +} + +func addGretapAttrs(gretap *Gretap, linkInfo *nl.RtAttr) { + data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) + + if gretap.FlowBased { + // In flow based mode, no other attributes need to be configured + nl.NewRtAttrChild(data, nl.IFLA_GRE_COLLECT_METADATA, boolAttr(gretap.FlowBased)) + return + } + + ip := gretap.Local.To4() + if ip != nil { + nl.NewRtAttrChild(data, nl.IFLA_GRE_LOCAL, []byte(ip)) + } + ip = gretap.Remote.To4() + if ip != nil { + nl.NewRtAttrChild(data, nl.IFLA_GRE_REMOTE, []byte(ip)) + } + + if gretap.IKey != 0 { + nl.NewRtAttrChild(data, nl.IFLA_GRE_IKEY, htonl(gretap.IKey)) + gretap.IFlags |= uint16(nl.GRE_KEY) + } + + if gretap.OKey != 0 { + nl.NewRtAttrChild(data, nl.IFLA_GRE_OKEY, htonl(gretap.OKey)) + gretap.OFlags |= uint16(nl.GRE_KEY) + } + + nl.NewRtAttrChild(data, nl.IFLA_GRE_IFLAGS, htons(gretap.IFlags)) + nl.NewRtAttrChild(data, nl.IFLA_GRE_OFLAGS, htons(gretap.OFlags)) + + if gretap.Link != 0 { + nl.NewRtAttrChild(data, nl.IFLA_GRE_LINK, nl.Uint32Attr(gretap.Link)) + } + + nl.NewRtAttrChild(data, nl.IFLA_GRE_PMTUDISC, nl.Uint8Attr(gretap.PMtuDisc)) + nl.NewRtAttrChild(data, nl.IFLA_GRE_TTL, nl.Uint8Attr(gretap.Ttl)) + nl.NewRtAttrChild(data, nl.IFLA_GRE_TOS, nl.Uint8Attr(gretap.Tos)) + nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_TYPE, nl.Uint16Attr(gretap.EncapType)) + nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_FLAGS, nl.Uint16Attr(gretap.EncapFlags)) + nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_SPORT, htons(gretap.EncapSport)) + nl.NewRtAttrChild(data, nl.IFLA_GRE_ENCAP_DPORT, htons(gretap.EncapDport)) +} + +func parseGretapData(link Link, data []syscall.NetlinkRouteAttr) { + gre := link.(*Gretap) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_GRE_OKEY: + gre.IKey = ntohl(datum.Value[0:4]) + case nl.IFLA_GRE_IKEY: + gre.OKey = ntohl(datum.Value[0:4]) + case nl.IFLA_GRE_LOCAL: + gre.Local = net.IP(datum.Value[0:4]) + case nl.IFLA_GRE_REMOTE: + gre.Remote = net.IP(datum.Value[0:4]) + case nl.IFLA_GRE_ENCAP_SPORT: + gre.EncapSport = ntohs(datum.Value[0:2]) + case nl.IFLA_GRE_ENCAP_DPORT: + gre.EncapDport = ntohs(datum.Value[0:2]) + case nl.IFLA_GRE_IFLAGS: + gre.IFlags = ntohs(datum.Value[0:2]) + case nl.IFLA_GRE_OFLAGS: + gre.OFlags = ntohs(datum.Value[0:2]) + + case nl.IFLA_GRE_TTL: + gre.Ttl = uint8(datum.Value[0]) + case nl.IFLA_GRE_TOS: + gre.Tos = uint8(datum.Value[0]) + case nl.IFLA_GRE_PMTUDISC: + gre.PMtuDisc = uint8(datum.Value[0]) + case nl.IFLA_GRE_ENCAP_TYPE: + gre.EncapType = native.Uint16(datum.Value[0:2]) + case nl.IFLA_GRE_ENCAP_FLAGS: + gre.EncapFlags = native.Uint16(datum.Value[0:2]) + case nl.IFLA_GRE_COLLECT_METADATA: + gre.FlowBased = int8(datum.Value[0]) != 0 + } + } +} + +func addGretunAttrs(gre *Gretun, linkInfo *nl.RtAttr) { + data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) + + ip := gre.Local.To4() + if ip != nil { + nl.NewRtAttrChild(data, nl.IFLA_GRE_LOCAL, []byte(ip)) + } + ip = gre.Remote.To4() + if ip != nil { + nl.NewRtAttrChild(data, nl.IFLA_GRE_REMOTE, []byte(ip)) + } + + if gre.IKey != 0 { + nl.NewRtAttrChild(data, nl.IFLA_GRE_IKEY, htonl(gre.IKey)) + gre.IFlags |= uint16(nl.GRE_KEY) + } + + if gre.OKey != 0 { + nl.NewRtAttrChild(data, nl.IFLA_GRE_OKEY, htonl(gre.OKey)) + gre.OFlags |= uint16(nl.GRE_KEY) + } + + nl.NewRtAttrChild(data, nl.IFLA_GRE_IFLAGS, htons(gre.IFlags)) + nl.NewRtAttrChild(data, nl.IFLA_GRE_OFLAGS, htons(gre.OFlags)) + + if gre.Link != 0 { + nl.NewRtAttrChild(data, nl.IFLA_GRE_LINK, nl.Uint32Attr(gre.Link)) + } + + nl.NewRtAttrChild(data, nl.IFLA_GRE_PMTUDISC, nl.Uint8Attr(gre.PMtuDisc)) + nl.NewRtAttrChild(data, nl.IFLA_GRE_TTL, nl.Uint8Attr(gre.Ttl)) + nl.NewRtAttrChild(data, nl.IFLA_GRE_TOS, nl.Uint8Attr(gre.Tos)) +} + +func parseGretunData(link Link, data []syscall.NetlinkRouteAttr) { + gre := link.(*Gretun) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_GRE_OKEY: + gre.IKey = ntohl(datum.Value[0:4]) + case nl.IFLA_GRE_IKEY: + gre.OKey = ntohl(datum.Value[0:4]) + case nl.IFLA_GRE_LOCAL: + gre.Local = net.IP(datum.Value[0:4]) + case nl.IFLA_GRE_REMOTE: + gre.Remote = net.IP(datum.Value[0:4]) + case nl.IFLA_GRE_IFLAGS: + gre.IFlags = ntohs(datum.Value[0:2]) + case nl.IFLA_GRE_OFLAGS: + gre.OFlags = ntohs(datum.Value[0:2]) + + case nl.IFLA_GRE_TTL: + gre.Ttl = uint8(datum.Value[0]) + case nl.IFLA_GRE_TOS: + gre.Tos = uint8(datum.Value[0]) + case nl.IFLA_GRE_PMTUDISC: + gre.PMtuDisc = uint8(datum.Value[0]) + } + } +} + +func parseLinkStats32(data []byte) *LinkStatistics { + return (*LinkStatistics)((*LinkStatistics32)(unsafe.Pointer(&data[0:SizeofLinkStats32][0])).to64()) +} + +func parseLinkStats64(data []byte) *LinkStatistics { + return (*LinkStatistics)((*LinkStatistics64)(unsafe.Pointer(&data[0:SizeofLinkStats64][0]))) +} + +func addXdpAttrs(xdp *LinkXdp, req *nl.NetlinkRequest) { + attrs := nl.NewRtAttr(nl.IFLA_XDP|syscall.NLA_F_NESTED, nil) + b := make([]byte, 4) + native.PutUint32(b, uint32(xdp.Fd)) + nl.NewRtAttrChild(attrs, nl.IFLA_XDP_FD, b) + if xdp.Flags != 0 { + native.PutUint32(b, xdp.Flags) + nl.NewRtAttrChild(attrs, nl.IFLA_XDP_FLAGS, b) + } + req.AddData(attrs) +} + +func parseLinkXdp(data []byte) (*LinkXdp, error) { + attrs, err := nl.ParseRouteAttr(data) + if err != nil { + return nil, err + } + xdp := &LinkXdp{} + for _, attr := range attrs { + switch attr.Attr.Type { + case nl.IFLA_XDP_FD: + xdp.Fd = int(native.Uint32(attr.Value[0:4])) + case nl.IFLA_XDP_ATTACHED: + xdp.Attached = attr.Value[0] != 0 + case nl.IFLA_XDP_FLAGS: + xdp.Flags = native.Uint32(attr.Value[0:4]) + case nl.IFLA_XDP_PROG_ID: + xdp.ProgId = native.Uint32(attr.Value[0:4]) + } + } + return xdp, nil +} + +func addIptunAttrs(iptun *Iptun, linkInfo *nl.RtAttr) { + data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) + + ip := iptun.Local.To4() + if ip != nil { + nl.NewRtAttrChild(data, nl.IFLA_IPTUN_LOCAL, []byte(ip)) + } + + ip = iptun.Remote.To4() + if ip != nil { + nl.NewRtAttrChild(data, nl.IFLA_IPTUN_REMOTE, []byte(ip)) + } + + if iptun.Link != 0 { + nl.NewRtAttrChild(data, nl.IFLA_IPTUN_LINK, nl.Uint32Attr(iptun.Link)) + } + nl.NewRtAttrChild(data, nl.IFLA_IPTUN_PMTUDISC, nl.Uint8Attr(iptun.PMtuDisc)) + nl.NewRtAttrChild(data, nl.IFLA_IPTUN_TTL, nl.Uint8Attr(iptun.Ttl)) + nl.NewRtAttrChild(data, nl.IFLA_IPTUN_TOS, nl.Uint8Attr(iptun.Tos)) +} + +func parseIptunData(link Link, data []syscall.NetlinkRouteAttr) { + iptun := link.(*Iptun) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_IPTUN_LOCAL: + iptun.Local = net.IP(datum.Value[0:4]) + case nl.IFLA_IPTUN_REMOTE: + iptun.Remote = net.IP(datum.Value[0:4]) + case nl.IFLA_IPTUN_TTL: + iptun.Ttl = uint8(datum.Value[0]) + case nl.IFLA_IPTUN_TOS: + iptun.Tos = uint8(datum.Value[0]) + case nl.IFLA_IPTUN_PMTUDISC: + iptun.PMtuDisc = uint8(datum.Value[0]) + } + } +} + +func addVtiAttrs(vti *Vti, linkInfo *nl.RtAttr) { + data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) + + ip := vti.Local.To4() + if ip != nil { + nl.NewRtAttrChild(data, nl.IFLA_VTI_LOCAL, []byte(ip)) + } + + ip = vti.Remote.To4() + if ip != nil { + nl.NewRtAttrChild(data, nl.IFLA_VTI_REMOTE, []byte(ip)) + } + + if vti.Link != 0 { + nl.NewRtAttrChild(data, nl.IFLA_VTI_LINK, nl.Uint32Attr(vti.Link)) + } + + nl.NewRtAttrChild(data, nl.IFLA_VTI_IKEY, htonl(vti.IKey)) + nl.NewRtAttrChild(data, nl.IFLA_VTI_OKEY, htonl(vti.OKey)) +} + +func parseVtiData(link Link, data []syscall.NetlinkRouteAttr) { + vti := link.(*Vti) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_VTI_LOCAL: + vti.Local = net.IP(datum.Value[0:4]) + case nl.IFLA_VTI_REMOTE: + vti.Remote = net.IP(datum.Value[0:4]) + case nl.IFLA_VTI_IKEY: + vti.IKey = ntohl(datum.Value[0:4]) + case nl.IFLA_VTI_OKEY: + vti.OKey = ntohl(datum.Value[0:4]) + } + } +} + +func addVrfAttrs(vrf *Vrf, linkInfo *nl.RtAttr) { + data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) + b := make([]byte, 4) + native.PutUint32(b, uint32(vrf.Table)) + nl.NewRtAttrChild(data, nl.IFLA_VRF_TABLE, b) +} + +func parseVrfData(link Link, data []syscall.NetlinkRouteAttr) { + vrf := link.(*Vrf) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_VRF_TABLE: + vrf.Table = native.Uint32(datum.Value[0:4]) + } + } +} + +func addBridgeAttrs(bridge *Bridge, linkInfo *nl.RtAttr) { + data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) + if bridge.MulticastSnooping != nil { + nl.NewRtAttrChild(data, nl.IFLA_BR_MCAST_SNOOPING, boolToByte(*bridge.MulticastSnooping)) + } + if bridge.HelloTime != nil { + nl.NewRtAttrChild(data, nl.IFLA_BR_HELLO_TIME, nl.Uint32Attr(*bridge.HelloTime)) + } +} + +func parseBridgeData(bridge Link, data []syscall.NetlinkRouteAttr) { + br := bridge.(*Bridge) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_BR_HELLO_TIME: + helloTime := native.Uint32(datum.Value[0:4]) + br.HelloTime = &helloTime + case nl.IFLA_BR_MCAST_SNOOPING: + mcastSnooping := datum.Value[0] == 1 + br.MulticastSnooping = &mcastSnooping + } + } +} + +func addGTPAttrs(gtp *GTP, linkInfo *nl.RtAttr) { + data := nl.NewRtAttrChild(linkInfo, nl.IFLA_INFO_DATA, nil) + nl.NewRtAttrChild(data, nl.IFLA_GTP_FD0, nl.Uint32Attr(uint32(gtp.FD0))) + nl.NewRtAttrChild(data, nl.IFLA_GTP_FD1, nl.Uint32Attr(uint32(gtp.FD1))) + nl.NewRtAttrChild(data, nl.IFLA_GTP_PDP_HASHSIZE, nl.Uint32Attr(131072)) + if gtp.Role != nl.GTP_ROLE_GGSN { + nl.NewRtAttrChild(data, nl.IFLA_GTP_ROLE, nl.Uint32Attr(uint32(gtp.Role))) + } +} + +func parseGTPData(link Link, data []syscall.NetlinkRouteAttr) { + gtp := link.(*GTP) + for _, datum := range data { + switch datum.Attr.Type { + case nl.IFLA_GTP_FD0: + gtp.FD0 = int(native.Uint32(datum.Value)) + case nl.IFLA_GTP_FD1: + gtp.FD1 = int(native.Uint32(datum.Value)) + case nl.IFLA_GTP_PDP_HASHSIZE: + gtp.PDPHashsize = int(native.Uint32(datum.Value)) + case nl.IFLA_GTP_ROLE: + gtp.Role = int(native.Uint32(datum.Value)) + } + } +} diff --git a/vendor/github.com/vishvananda/netlink/link_tuntap_linux.go b/vendor/github.com/vishvananda/netlink/link_tuntap_linux.go new file mode 100644 index 0000000000..310bd33d8d --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/link_tuntap_linux.go @@ -0,0 +1,14 @@ +package netlink + +// ideally golang.org/x/sys/unix would define IfReq but it only has +// IFNAMSIZ, hence this minimalistic implementation +const ( + SizeOfIfReq = 40 + IFNAMSIZ = 16 +) + +type ifReq struct { + Name [IFNAMSIZ]byte + Flags uint16 + pad [SizeOfIfReq - IFNAMSIZ - 2]byte +} diff --git a/vendor/github.com/vishvananda/netlink/neigh.go b/vendor/github.com/vishvananda/netlink/neigh.go new file mode 100644 index 0000000000..6a6f71ce86 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/neigh.go @@ -0,0 +1,23 @@ +package netlink + +import ( + "fmt" + "net" +) + +// Neigh represents a link layer neighbor from netlink. +type Neigh struct { + LinkIndex int + Family int + State int + Type int + Flags int + IP net.IP + HardwareAddr net.HardwareAddr + LLIPAddr net.IP //Used in the case of NHRP +} + +// String returns $ip/$hwaddr $label +func (neigh *Neigh) String() string { + return fmt.Sprintf("%s %s", neigh.IP, neigh.HardwareAddr) +} diff --git a/vendor/github.com/vishvananda/netlink/neigh_linux.go b/vendor/github.com/vishvananda/netlink/neigh_linux.go new file mode 100644 index 0000000000..5edc8b4125 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/neigh_linux.go @@ -0,0 +1,275 @@ +package netlink + +import ( + "net" + "syscall" + "unsafe" + + "github.com/vishvananda/netlink/nl" +) + +const ( + NDA_UNSPEC = iota + NDA_DST + NDA_LLADDR + NDA_CACHEINFO + NDA_PROBES + NDA_VLAN + NDA_PORT + NDA_VNI + NDA_IFINDEX + NDA_MAX = NDA_IFINDEX +) + +// Neighbor Cache Entry States. +const ( + NUD_NONE = 0x00 + NUD_INCOMPLETE = 0x01 + NUD_REACHABLE = 0x02 + NUD_STALE = 0x04 + NUD_DELAY = 0x08 + NUD_PROBE = 0x10 + NUD_FAILED = 0x20 + NUD_NOARP = 0x40 + NUD_PERMANENT = 0x80 +) + +// Neighbor Flags +const ( + NTF_USE = 0x01 + NTF_SELF = 0x02 + NTF_MASTER = 0x04 + NTF_PROXY = 0x08 + NTF_ROUTER = 0x80 +) + +type Ndmsg struct { + Family uint8 + Index uint32 + State uint16 + Flags uint8 + Type uint8 +} + +func deserializeNdmsg(b []byte) *Ndmsg { + var dummy Ndmsg + return (*Ndmsg)(unsafe.Pointer(&b[0:unsafe.Sizeof(dummy)][0])) +} + +func (msg *Ndmsg) Serialize() []byte { + return (*(*[unsafe.Sizeof(*msg)]byte)(unsafe.Pointer(msg)))[:] +} + +func (msg *Ndmsg) Len() int { + return int(unsafe.Sizeof(*msg)) +} + +// NeighAdd will add an IP to MAC mapping to the ARP table +// Equivalent to: `ip neigh add ....` +func NeighAdd(neigh *Neigh) error { + return pkgHandle.NeighAdd(neigh) +} + +// NeighAdd will add an IP to MAC mapping to the ARP table +// Equivalent to: `ip neigh add ....` +func (h *Handle) NeighAdd(neigh *Neigh) error { + return h.neighAdd(neigh, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL) +} + +// NeighSet will add or replace an IP to MAC mapping to the ARP table +// Equivalent to: `ip neigh replace....` +func NeighSet(neigh *Neigh) error { + return pkgHandle.NeighSet(neigh) +} + +// NeighSet will add or replace an IP to MAC mapping to the ARP table +// Equivalent to: `ip neigh replace....` +func (h *Handle) NeighSet(neigh *Neigh) error { + return h.neighAdd(neigh, syscall.NLM_F_CREATE|syscall.NLM_F_REPLACE) +} + +// NeighAppend will append an entry to FDB +// Equivalent to: `bridge fdb append...` +func NeighAppend(neigh *Neigh) error { + return pkgHandle.NeighAppend(neigh) +} + +// NeighAppend will append an entry to FDB +// Equivalent to: `bridge fdb append...` +func (h *Handle) NeighAppend(neigh *Neigh) error { + return h.neighAdd(neigh, syscall.NLM_F_CREATE|syscall.NLM_F_APPEND) +} + +// NeighAppend will append an entry to FDB +// Equivalent to: `bridge fdb append...` +func neighAdd(neigh *Neigh, mode int) error { + return pkgHandle.neighAdd(neigh, mode) +} + +// NeighAppend will append an entry to FDB +// Equivalent to: `bridge fdb append...` +func (h *Handle) neighAdd(neigh *Neigh, mode int) error { + req := h.newNetlinkRequest(syscall.RTM_NEWNEIGH, mode|syscall.NLM_F_ACK) + return neighHandle(neigh, req) +} + +// NeighDel will delete an IP address from a link device. +// Equivalent to: `ip addr del $addr dev $link` +func NeighDel(neigh *Neigh) error { + return pkgHandle.NeighDel(neigh) +} + +// NeighDel will delete an IP address from a link device. +// Equivalent to: `ip addr del $addr dev $link` +func (h *Handle) NeighDel(neigh *Neigh) error { + req := h.newNetlinkRequest(syscall.RTM_DELNEIGH, syscall.NLM_F_ACK) + return neighHandle(neigh, req) +} + +func neighHandle(neigh *Neigh, req *nl.NetlinkRequest) error { + var family int + + if neigh.Family > 0 { + family = neigh.Family + } else { + family = nl.GetIPFamily(neigh.IP) + } + + msg := Ndmsg{ + Family: uint8(family), + Index: uint32(neigh.LinkIndex), + State: uint16(neigh.State), + Type: uint8(neigh.Type), + Flags: uint8(neigh.Flags), + } + req.AddData(&msg) + + ipData := neigh.IP.To4() + if ipData == nil { + ipData = neigh.IP.To16() + } + + dstData := nl.NewRtAttr(NDA_DST, ipData) + req.AddData(dstData) + + if neigh.LLIPAddr != nil { + llIPData := nl.NewRtAttr(NDA_LLADDR, neigh.LLIPAddr.To4()) + req.AddData(llIPData) + } else if neigh.Flags != NTF_PROXY || neigh.HardwareAddr != nil { + hwData := nl.NewRtAttr(NDA_LLADDR, []byte(neigh.HardwareAddr)) + req.AddData(hwData) + } + + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + return err +} + +// NeighList gets a list of IP-MAC mappings in the system (ARP table). +// Equivalent to: `ip neighbor show`. +// The list can be filtered by link and ip family. +func NeighList(linkIndex, family int) ([]Neigh, error) { + return pkgHandle.NeighList(linkIndex, family) +} + +// NeighProxyList gets a list of neighbor proxies in the system. +// Equivalent to: `ip neighbor show proxy`. +// The list can be filtered by link and ip family. +func NeighProxyList(linkIndex, family int) ([]Neigh, error) { + return pkgHandle.NeighProxyList(linkIndex, family) +} + +// NeighList gets a list of IP-MAC mappings in the system (ARP table). +// Equivalent to: `ip neighbor show`. +// The list can be filtered by link and ip family. +func (h *Handle) NeighList(linkIndex, family int) ([]Neigh, error) { + return h.neighList(linkIndex, family, 0) +} + +// NeighProxyList gets a list of neighbor proxies in the system. +// Equivalent to: `ip neighbor show proxy`. +// The list can be filtered by link, ip family. +func (h *Handle) NeighProxyList(linkIndex, family int) ([]Neigh, error) { + return h.neighList(linkIndex, family, NTF_PROXY) +} + +func (h *Handle) neighList(linkIndex, family, flags int) ([]Neigh, error) { + req := h.newNetlinkRequest(syscall.RTM_GETNEIGH, syscall.NLM_F_DUMP) + msg := Ndmsg{ + Family: uint8(family), + Index: uint32(linkIndex), + Flags: uint8(flags), + } + req.AddData(&msg) + + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWNEIGH) + if err != nil { + return nil, err + } + + var res []Neigh + for _, m := range msgs { + ndm := deserializeNdmsg(m) + if linkIndex != 0 && int(ndm.Index) != linkIndex { + // Ignore messages from other interfaces + continue + } + + neigh, err := NeighDeserialize(m) + if err != nil { + continue + } + + res = append(res, *neigh) + } + + return res, nil +} + +func NeighDeserialize(m []byte) (*Neigh, error) { + msg := deserializeNdmsg(m) + + neigh := Neigh{ + LinkIndex: int(msg.Index), + Family: int(msg.Family), + State: int(msg.State), + Type: int(msg.Type), + Flags: int(msg.Flags), + } + + attrs, err := nl.ParseRouteAttr(m[msg.Len():]) + if err != nil { + return nil, err + } + + // This should be cached for perfomance + // once per table dump + link, err := LinkByIndex(neigh.LinkIndex) + if err != nil { + return nil, err + } + encapType := link.Attrs().EncapType + + for _, attr := range attrs { + switch attr.Attr.Type { + case NDA_DST: + neigh.IP = net.IP(attr.Value) + case NDA_LLADDR: + // BUG: Is this a bug in the netlink library? + // #define RTA_LENGTH(len) (RTA_ALIGN(sizeof(struct rtattr)) + (len)) + // #define RTA_PAYLOAD(rta) ((int)((rta)->rta_len) - RTA_LENGTH(0)) + attrLen := attr.Attr.Len - syscall.SizeofRtAttr + if attrLen == 4 && (encapType == "ipip" || + encapType == "sit" || + encapType == "gre") { + neigh.LLIPAddr = net.IP(attr.Value) + } else if attrLen == 16 && + encapType == "tunnel6" { + neigh.IP = net.IP(attr.Value) + } else { + neigh.HardwareAddr = net.HardwareAddr(attr.Value) + } + } + } + + return &neigh, nil +} diff --git a/vendor/github.com/vishvananda/netlink/netlink.go b/vendor/github.com/vishvananda/netlink/netlink.go new file mode 100644 index 0000000000..fb159526e3 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/netlink.go @@ -0,0 +1,39 @@ +// Package netlink provides a simple library for netlink. Netlink is +// the interface a user-space program in linux uses to communicate with +// the kernel. It can be used to add and remove interfaces, set up ip +// addresses and routes, and confiugre ipsec. Netlink communication +// requires elevated privileges, so in most cases this code needs to +// be run as root. The low level primitives for netlink are contained +// in the nl subpackage. This package attempts to provide a high-level +// interface that is loosly modeled on the iproute2 cli. +package netlink + +import ( + "errors" + "net" +) + +var ( + // ErrNotImplemented is returned when a requested feature is not implemented. + ErrNotImplemented = errors.New("not implemented") +) + +// ParseIPNet parses a string in ip/net format and returns a net.IPNet. +// This is valuable because addresses in netlink are often IPNets and +// ParseCIDR returns an IPNet with the IP part set to the base IP of the +// range. +func ParseIPNet(s string) (*net.IPNet, error) { + ip, ipNet, err := net.ParseCIDR(s) + if err != nil { + return nil, err + } + return &net.IPNet{IP: ip, Mask: ipNet.Mask}, nil +} + +// NewIPNet generates an IPNet from an ip address using a netmask of 32 or 128. +func NewIPNet(ip net.IP) *net.IPNet { + if ip.To4() != nil { + return &net.IPNet{IP: ip, Mask: net.CIDRMask(32, 32)} + } + return &net.IPNet{IP: ip, Mask: net.CIDRMask(128, 128)} +} diff --git a/vendor/github.com/vishvananda/netlink/netlink_linux.go b/vendor/github.com/vishvananda/netlink/netlink_linux.go new file mode 100644 index 0000000000..a20d293d87 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/netlink_linux.go @@ -0,0 +1,11 @@ +package netlink + +import "github.com/vishvananda/netlink/nl" + +// Family type definitions +const ( + FAMILY_ALL = nl.FAMILY_ALL + FAMILY_V4 = nl.FAMILY_V4 + FAMILY_V6 = nl.FAMILY_V6 + FAMILY_MPLS = nl.FAMILY_MPLS +) diff --git a/vendor/github.com/vishvananda/netlink/netlink_unspecified.go b/vendor/github.com/vishvananda/netlink/netlink_unspecified.go new file mode 100644 index 0000000000..86111b92ce --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/netlink_unspecified.go @@ -0,0 +1,225 @@ +// +build !linux + +package netlink + +import "net" + +func LinkSetUp(link Link) error { + return ErrNotImplemented +} + +func LinkSetDown(link Link) error { + return ErrNotImplemented +} + +func LinkSetMTU(link Link, mtu int) error { + return ErrNotImplemented +} + +func LinkSetMaster(link Link, master *Bridge) error { + return ErrNotImplemented +} + +func LinkSetNsPid(link Link, nspid int) error { + return ErrNotImplemented +} + +func LinkSetNsFd(link Link, fd int) error { + return ErrNotImplemented +} + +func LinkSetName(link Link, name string) error { + return ErrNotImplemented +} + +func LinkSetAlias(link Link, name string) error { + return ErrNotImplemented +} + +func LinkSetHardwareAddr(link Link, hwaddr net.HardwareAddr) error { + return ErrNotImplemented +} + +func LinkSetVfHardwareAddr(link Link, vf int, hwaddr net.HardwareAddr) error { + return ErrNotImplemented +} + +func LinkSetVfVlan(link Link, vf, vlan int) error { + return ErrNotImplemented +} + +func LinkSetVfTxRate(link Link, vf, rate int) error { + return ErrNotImplemented +} + +func LinkSetNoMaster(link Link) error { + return ErrNotImplemented +} + +func LinkSetMasterByIndex(link Link, masterIndex int) error { + return ErrNotImplemented +} + +func LinkSetXdpFd(link Link, fd int) error { + return ErrNotImplemented +} + +func LinkSetARPOff(link Link) error { + return ErrNotImplemented +} + +func LinkSetARPOn(link Link) error { + return ErrNotImplemented +} + +func LinkByName(name string) (Link, error) { + return nil, ErrNotImplemented +} + +func LinkByAlias(alias string) (Link, error) { + return nil, ErrNotImplemented +} + +func LinkByIndex(index int) (Link, error) { + return nil, ErrNotImplemented +} + +func LinkSetHairpin(link Link, mode bool) error { + return ErrNotImplemented +} + +func LinkSetGuard(link Link, mode bool) error { + return ErrNotImplemented +} + +func LinkSetFastLeave(link Link, mode bool) error { + return ErrNotImplemented +} + +func LinkSetLearning(link Link, mode bool) error { + return ErrNotImplemented +} + +func LinkSetRootBlock(link Link, mode bool) error { + return ErrNotImplemented +} + +func LinkSetFlood(link Link, mode bool) error { + return ErrNotImplemented +} + +func LinkSetTxQLen(link Link, qlen int) error { + return ErrNotImplemented +} + +func LinkAdd(link Link) error { + return ErrNotImplemented +} + +func LinkDel(link Link) error { + return ErrNotImplemented +} + +func SetHairpin(link Link, mode bool) error { + return ErrNotImplemented +} + +func SetGuard(link Link, mode bool) error { + return ErrNotImplemented +} + +func SetFastLeave(link Link, mode bool) error { + return ErrNotImplemented +} + +func SetLearning(link Link, mode bool) error { + return ErrNotImplemented +} + +func SetRootBlock(link Link, mode bool) error { + return ErrNotImplemented +} + +func SetFlood(link Link, mode bool) error { + return ErrNotImplemented +} + +func LinkList() ([]Link, error) { + return nil, ErrNotImplemented +} + +func AddrAdd(link Link, addr *Addr) error { + return ErrNotImplemented +} + +func AddrDel(link Link, addr *Addr) error { + return ErrNotImplemented +} + +func AddrList(link Link, family int) ([]Addr, error) { + return nil, ErrNotImplemented +} + +func RouteAdd(route *Route) error { + return ErrNotImplemented +} + +func RouteDel(route *Route) error { + return ErrNotImplemented +} + +func RouteList(link Link, family int) ([]Route, error) { + return nil, ErrNotImplemented +} + +func XfrmPolicyAdd(policy *XfrmPolicy) error { + return ErrNotImplemented +} + +func XfrmPolicyDel(policy *XfrmPolicy) error { + return ErrNotImplemented +} + +func XfrmPolicyList(family int) ([]XfrmPolicy, error) { + return nil, ErrNotImplemented +} + +func XfrmStateAdd(policy *XfrmState) error { + return ErrNotImplemented +} + +func XfrmStateDel(policy *XfrmState) error { + return ErrNotImplemented +} + +func XfrmStateList(family int) ([]XfrmState, error) { + return nil, ErrNotImplemented +} + +func NeighAdd(neigh *Neigh) error { + return ErrNotImplemented +} + +func NeighSet(neigh *Neigh) error { + return ErrNotImplemented +} + +func NeighAppend(neigh *Neigh) error { + return ErrNotImplemented +} + +func NeighDel(neigh *Neigh) error { + return ErrNotImplemented +} + +func NeighList(linkIndex, family int) ([]Neigh, error) { + return nil, ErrNotImplemented +} + +func NeighDeserialize(m []byte) (*Neigh, error) { + return nil, ErrNotImplemented +} + +func SocketGet(local, remote net.Addr) (*Socket, error) { + return nil, ErrNotImplemented +} diff --git a/vendor/github.com/vishvananda/netlink/nl/addr_linux.go b/vendor/github.com/vishvananda/netlink/nl/addr_linux.go new file mode 100644 index 0000000000..fe362e9fa7 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/addr_linux.go @@ -0,0 +1,76 @@ +package nl + +import ( + "syscall" + "unsafe" +) + +type IfAddrmsg struct { + syscall.IfAddrmsg +} + +func NewIfAddrmsg(family int) *IfAddrmsg { + return &IfAddrmsg{ + IfAddrmsg: syscall.IfAddrmsg{ + Family: uint8(family), + }, + } +} + +// struct ifaddrmsg { +// __u8 ifa_family; +// __u8 ifa_prefixlen; /* The prefix length */ +// __u8 ifa_flags; /* Flags */ +// __u8 ifa_scope; /* Address scope */ +// __u32 ifa_index; /* Link index */ +// }; + +// type IfAddrmsg struct { +// Family uint8 +// Prefixlen uint8 +// Flags uint8 +// Scope uint8 +// Index uint32 +// } +// SizeofIfAddrmsg = 0x8 + +func DeserializeIfAddrmsg(b []byte) *IfAddrmsg { + return (*IfAddrmsg)(unsafe.Pointer(&b[0:syscall.SizeofIfAddrmsg][0])) +} + +func (msg *IfAddrmsg) Serialize() []byte { + return (*(*[syscall.SizeofIfAddrmsg]byte)(unsafe.Pointer(msg)))[:] +} + +func (msg *IfAddrmsg) Len() int { + return syscall.SizeofIfAddrmsg +} + +// struct ifa_cacheinfo { +// __u32 ifa_prefered; +// __u32 ifa_valid; +// __u32 cstamp; /* created timestamp, hundredths of seconds */ +// __u32 tstamp; /* updated timestamp, hundredths of seconds */ +// }; + +const IFA_CACHEINFO = 6 +const SizeofIfaCacheInfo = 0x10 + +type IfaCacheInfo struct { + IfaPrefered uint32 + IfaValid uint32 + Cstamp uint32 + Tstamp uint32 +} + +func (msg *IfaCacheInfo) Len() int { + return SizeofIfaCacheInfo +} + +func DeserializeIfaCacheInfo(b []byte) *IfaCacheInfo { + return (*IfaCacheInfo)(unsafe.Pointer(&b[0:SizeofIfaCacheInfo][0])) +} + +func (msg *IfaCacheInfo) Serialize() []byte { + return (*(*[SizeofIfaCacheInfo]byte)(unsafe.Pointer(msg)))[:] +} diff --git a/vendor/github.com/vishvananda/netlink/nl/bridge_linux.go b/vendor/github.com/vishvananda/netlink/nl/bridge_linux.go new file mode 100644 index 0000000000..6c0d333387 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/bridge_linux.go @@ -0,0 +1,74 @@ +package nl + +import ( + "fmt" + "unsafe" +) + +const ( + SizeofBridgeVlanInfo = 0x04 +) + +/* Bridge Flags */ +const ( + BRIDGE_FLAGS_MASTER = iota /* Bridge command to/from master */ + BRIDGE_FLAGS_SELF /* Bridge command to/from lowerdev */ +) + +/* Bridge management nested attributes + * [IFLA_AF_SPEC] = { + * [IFLA_BRIDGE_FLAGS] + * [IFLA_BRIDGE_MODE] + * [IFLA_BRIDGE_VLAN_INFO] + * } + */ +const ( + IFLA_BRIDGE_FLAGS = iota + IFLA_BRIDGE_MODE + IFLA_BRIDGE_VLAN_INFO +) + +const ( + BRIDGE_VLAN_INFO_MASTER = 1 << iota + BRIDGE_VLAN_INFO_PVID + BRIDGE_VLAN_INFO_UNTAGGED + BRIDGE_VLAN_INFO_RANGE_BEGIN + BRIDGE_VLAN_INFO_RANGE_END +) + +// struct bridge_vlan_info { +// __u16 flags; +// __u16 vid; +// }; + +type BridgeVlanInfo struct { + Flags uint16 + Vid uint16 +} + +func (b *BridgeVlanInfo) Serialize() []byte { + return (*(*[SizeofBridgeVlanInfo]byte)(unsafe.Pointer(b)))[:] +} + +func DeserializeBridgeVlanInfo(b []byte) *BridgeVlanInfo { + return (*BridgeVlanInfo)(unsafe.Pointer(&b[0:SizeofBridgeVlanInfo][0])) +} + +func (b *BridgeVlanInfo) PortVID() bool { + return b.Flags&BRIDGE_VLAN_INFO_PVID > 0 +} + +func (b *BridgeVlanInfo) EngressUntag() bool { + return b.Flags&BRIDGE_VLAN_INFO_UNTAGGED > 0 +} + +func (b *BridgeVlanInfo) String() string { + return fmt.Sprintf("%+v", *b) +} + +/* New extended info filters for IFLA_EXT_MASK */ +const ( + RTEXT_FILTER_VF = 1 << iota + RTEXT_FILTER_BRVLAN + RTEXT_FILTER_BRVLAN_COMPRESSED +) diff --git a/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go b/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go new file mode 100644 index 0000000000..380cc5967b --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/conntrack_linux.go @@ -0,0 +1,189 @@ +package nl + +import "unsafe" + +// Track the message sizes for the correct serialization/deserialization +const ( + SizeofNfgenmsg = 4 + SizeofNfattr = 4 + SizeofNfConntrack = 376 + SizeofNfctTupleHead = 52 +) + +var L4ProtoMap = map[uint8]string{ + 6: "tcp", + 17: "udp", +} + +// All the following constants are coming from: +// https://github.com/torvalds/linux/blob/master/include/uapi/linux/netfilter/nfnetlink_conntrack.h + +// enum cntl_msg_types { +// IPCTNL_MSG_CT_NEW, +// IPCTNL_MSG_CT_GET, +// IPCTNL_MSG_CT_DELETE, +// IPCTNL_MSG_CT_GET_CTRZERO, +// IPCTNL_MSG_CT_GET_STATS_CPU, +// IPCTNL_MSG_CT_GET_STATS, +// IPCTNL_MSG_CT_GET_DYING, +// IPCTNL_MSG_CT_GET_UNCONFIRMED, +// +// IPCTNL_MSG_MAX +// }; +const ( + IPCTNL_MSG_CT_GET = 1 + IPCTNL_MSG_CT_DELETE = 2 +) + +// #define NFNETLINK_V0 0 +const ( + NFNETLINK_V0 = 0 +) + +// #define NLA_F_NESTED (1 << 15) +const ( + NLA_F_NESTED = (1 << 15) +) + +// enum ctattr_type { +// CTA_UNSPEC, +// CTA_TUPLE_ORIG, +// CTA_TUPLE_REPLY, +// CTA_STATUS, +// CTA_PROTOINFO, +// CTA_HELP, +// CTA_NAT_SRC, +// #define CTA_NAT CTA_NAT_SRC /* backwards compatibility */ +// CTA_TIMEOUT, +// CTA_MARK, +// CTA_COUNTERS_ORIG, +// CTA_COUNTERS_REPLY, +// CTA_USE, +// CTA_ID, +// CTA_NAT_DST, +// CTA_TUPLE_MASTER, +// CTA_SEQ_ADJ_ORIG, +// CTA_NAT_SEQ_ADJ_ORIG = CTA_SEQ_ADJ_ORIG, +// CTA_SEQ_ADJ_REPLY, +// CTA_NAT_SEQ_ADJ_REPLY = CTA_SEQ_ADJ_REPLY, +// CTA_SECMARK, /* obsolete */ +// CTA_ZONE, +// CTA_SECCTX, +// CTA_TIMESTAMP, +// CTA_MARK_MASK, +// CTA_LABELS, +// CTA_LABELS_MASK, +// __CTA_MAX +// }; +const ( + CTA_TUPLE_ORIG = 1 + CTA_TUPLE_REPLY = 2 + CTA_STATUS = 3 + CTA_TIMEOUT = 7 + CTA_MARK = 8 + CTA_PROTOINFO = 4 +) + +// enum ctattr_tuple { +// CTA_TUPLE_UNSPEC, +// CTA_TUPLE_IP, +// CTA_TUPLE_PROTO, +// CTA_TUPLE_ZONE, +// __CTA_TUPLE_MAX +// }; +// #define CTA_TUPLE_MAX (__CTA_TUPLE_MAX - 1) +const ( + CTA_TUPLE_IP = 1 + CTA_TUPLE_PROTO = 2 +) + +// enum ctattr_ip { +// CTA_IP_UNSPEC, +// CTA_IP_V4_SRC, +// CTA_IP_V4_DST, +// CTA_IP_V6_SRC, +// CTA_IP_V6_DST, +// __CTA_IP_MAX +// }; +// #define CTA_IP_MAX (__CTA_IP_MAX - 1) +const ( + CTA_IP_V4_SRC = 1 + CTA_IP_V4_DST = 2 + CTA_IP_V6_SRC = 3 + CTA_IP_V6_DST = 4 +) + +// enum ctattr_l4proto { +// CTA_PROTO_UNSPEC, +// CTA_PROTO_NUM, +// CTA_PROTO_SRC_PORT, +// CTA_PROTO_DST_PORT, +// CTA_PROTO_ICMP_ID, +// CTA_PROTO_ICMP_TYPE, +// CTA_PROTO_ICMP_CODE, +// CTA_PROTO_ICMPV6_ID, +// CTA_PROTO_ICMPV6_TYPE, +// CTA_PROTO_ICMPV6_CODE, +// __CTA_PROTO_MAX +// }; +// #define CTA_PROTO_MAX (__CTA_PROTO_MAX - 1) +const ( + CTA_PROTO_NUM = 1 + CTA_PROTO_SRC_PORT = 2 + CTA_PROTO_DST_PORT = 3 +) + +// enum ctattr_protoinfo { +// CTA_PROTOINFO_UNSPEC, +// CTA_PROTOINFO_TCP, +// CTA_PROTOINFO_DCCP, +// CTA_PROTOINFO_SCTP, +// __CTA_PROTOINFO_MAX +// }; +// #define CTA_PROTOINFO_MAX (__CTA_PROTOINFO_MAX - 1) +const ( + CTA_PROTOINFO_TCP = 1 +) + +// enum ctattr_protoinfo_tcp { +// CTA_PROTOINFO_TCP_UNSPEC, +// CTA_PROTOINFO_TCP_STATE, +// CTA_PROTOINFO_TCP_WSCALE_ORIGINAL, +// CTA_PROTOINFO_TCP_WSCALE_REPLY, +// CTA_PROTOINFO_TCP_FLAGS_ORIGINAL, +// CTA_PROTOINFO_TCP_FLAGS_REPLY, +// __CTA_PROTOINFO_TCP_MAX +// }; +// #define CTA_PROTOINFO_TCP_MAX (__CTA_PROTOINFO_TCP_MAX - 1) +const ( + CTA_PROTOINFO_TCP_STATE = 1 + CTA_PROTOINFO_TCP_WSCALE_ORIGINAL = 2 + CTA_PROTOINFO_TCP_WSCALE_REPLY = 3 + CTA_PROTOINFO_TCP_FLAGS_ORIGINAL = 4 + CTA_PROTOINFO_TCP_FLAGS_REPLY = 5 +) + +// /* General form of address family dependent message. +// */ +// struct nfgenmsg { +// __u8 nfgen_family; /* AF_xxx */ +// __u8 version; /* nfnetlink version */ +// __be16 res_id; /* resource id */ +// }; +type Nfgenmsg struct { + NfgenFamily uint8 + Version uint8 + ResId uint16 // big endian +} + +func (msg *Nfgenmsg) Len() int { + return SizeofNfgenmsg +} + +func DeserializeNfgenmsg(b []byte) *Nfgenmsg { + return (*Nfgenmsg)(unsafe.Pointer(&b[0:SizeofNfgenmsg][0])) +} + +func (msg *Nfgenmsg) Serialize() []byte { + return (*(*[SizeofNfgenmsg]byte)(unsafe.Pointer(msg)))[:] +} diff --git a/vendor/github.com/vishvananda/netlink/nl/genetlink_linux.go b/vendor/github.com/vishvananda/netlink/nl/genetlink_linux.go new file mode 100644 index 0000000000..81b46f2c79 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/genetlink_linux.go @@ -0,0 +1,89 @@ +package nl + +import ( + "unsafe" +) + +const SizeofGenlmsg = 4 + +const ( + GENL_ID_CTRL = 0x10 + GENL_CTRL_VERSION = 2 + GENL_CTRL_NAME = "nlctrl" +) + +const ( + GENL_CTRL_CMD_GETFAMILY = 3 +) + +const ( + GENL_CTRL_ATTR_UNSPEC = iota + GENL_CTRL_ATTR_FAMILY_ID + GENL_CTRL_ATTR_FAMILY_NAME + GENL_CTRL_ATTR_VERSION + GENL_CTRL_ATTR_HDRSIZE + GENL_CTRL_ATTR_MAXATTR + GENL_CTRL_ATTR_OPS + GENL_CTRL_ATTR_MCAST_GROUPS +) + +const ( + GENL_CTRL_ATTR_OP_UNSPEC = iota + GENL_CTRL_ATTR_OP_ID + GENL_CTRL_ATTR_OP_FLAGS +) + +const ( + GENL_ADMIN_PERM = 1 << iota + GENL_CMD_CAP_DO + GENL_CMD_CAP_DUMP + GENL_CMD_CAP_HASPOL +) + +const ( + GENL_CTRL_ATTR_MCAST_GRP_UNSPEC = iota + GENL_CTRL_ATTR_MCAST_GRP_NAME + GENL_CTRL_ATTR_MCAST_GRP_ID +) + +const ( + GENL_GTP_VERSION = 0 + GENL_GTP_NAME = "gtp" +) + +const ( + GENL_GTP_CMD_NEWPDP = iota + GENL_GTP_CMD_DELPDP + GENL_GTP_CMD_GETPDP +) + +const ( + GENL_GTP_ATTR_UNSPEC = iota + GENL_GTP_ATTR_LINK + GENL_GTP_ATTR_VERSION + GENL_GTP_ATTR_TID + GENL_GTP_ATTR_PEER_ADDRESS + GENL_GTP_ATTR_MS_ADDRESS + GENL_GTP_ATTR_FLOW + GENL_GTP_ATTR_NET_NS_FD + GENL_GTP_ATTR_I_TEI + GENL_GTP_ATTR_O_TEI + GENL_GTP_ATTR_PAD +) + +type Genlmsg struct { + Command uint8 + Version uint8 +} + +func (msg *Genlmsg) Len() int { + return SizeofGenlmsg +} + +func DeserializeGenlmsg(b []byte) *Genlmsg { + return (*Genlmsg)(unsafe.Pointer(&b[0:SizeofGenlmsg][0])) +} + +func (msg *Genlmsg) Serialize() []byte { + return (*(*[SizeofGenlmsg]byte)(unsafe.Pointer(msg)))[:] +} diff --git a/vendor/github.com/vishvananda/netlink/nl/link_linux.go b/vendor/github.com/vishvananda/netlink/nl/link_linux.go new file mode 100644 index 0000000000..9ae65a12c2 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/link_linux.go @@ -0,0 +1,556 @@ +package nl + +import ( + "syscall" + "unsafe" +) + +const ( + DEFAULT_CHANGE = 0xFFFFFFFF + // doesn't exist in syscall + IFLA_VFINFO_LIST = syscall.IFLA_IFALIAS + 1 + iota + IFLA_STATS64 + IFLA_VF_PORTS + IFLA_PORT_SELF + IFLA_AF_SPEC + IFLA_GROUP + IFLA_NET_NS_FD + IFLA_EXT_MASK + IFLA_PROMISCUITY + IFLA_NUM_TX_QUEUES + IFLA_NUM_RX_QUEUES + IFLA_CARRIER + IFLA_PHYS_PORT_ID + IFLA_CARRIER_CHANGES + IFLA_PHYS_SWITCH_ID + IFLA_LINK_NETNSID + IFLA_PHYS_PORT_NAME + IFLA_PROTO_DOWN + IFLA_GSO_MAX_SEGS + IFLA_GSO_MAX_SIZE + IFLA_PAD + IFLA_XDP +) + +const ( + IFLA_INFO_UNSPEC = iota + IFLA_INFO_KIND + IFLA_INFO_DATA + IFLA_INFO_XSTATS + IFLA_INFO_MAX = IFLA_INFO_XSTATS +) + +const ( + IFLA_VLAN_UNSPEC = iota + IFLA_VLAN_ID + IFLA_VLAN_FLAGS + IFLA_VLAN_EGRESS_QOS + IFLA_VLAN_INGRESS_QOS + IFLA_VLAN_PROTOCOL + IFLA_VLAN_MAX = IFLA_VLAN_PROTOCOL +) + +const ( + VETH_INFO_UNSPEC = iota + VETH_INFO_PEER + VETH_INFO_MAX = VETH_INFO_PEER +) + +const ( + IFLA_VXLAN_UNSPEC = iota + IFLA_VXLAN_ID + IFLA_VXLAN_GROUP + IFLA_VXLAN_LINK + IFLA_VXLAN_LOCAL + IFLA_VXLAN_TTL + IFLA_VXLAN_TOS + IFLA_VXLAN_LEARNING + IFLA_VXLAN_AGEING + IFLA_VXLAN_LIMIT + IFLA_VXLAN_PORT_RANGE + IFLA_VXLAN_PROXY + IFLA_VXLAN_RSC + IFLA_VXLAN_L2MISS + IFLA_VXLAN_L3MISS + IFLA_VXLAN_PORT + IFLA_VXLAN_GROUP6 + IFLA_VXLAN_LOCAL6 + IFLA_VXLAN_UDP_CSUM + IFLA_VXLAN_UDP_ZERO_CSUM6_TX + IFLA_VXLAN_UDP_ZERO_CSUM6_RX + IFLA_VXLAN_REMCSUM_TX + IFLA_VXLAN_REMCSUM_RX + IFLA_VXLAN_GBP + IFLA_VXLAN_REMCSUM_NOPARTIAL + IFLA_VXLAN_FLOWBASED + IFLA_VXLAN_MAX = IFLA_VXLAN_FLOWBASED +) + +const ( + BRIDGE_MODE_UNSPEC = iota + BRIDGE_MODE_HAIRPIN +) + +const ( + IFLA_BRPORT_UNSPEC = iota + IFLA_BRPORT_STATE + IFLA_BRPORT_PRIORITY + IFLA_BRPORT_COST + IFLA_BRPORT_MODE + IFLA_BRPORT_GUARD + IFLA_BRPORT_PROTECT + IFLA_BRPORT_FAST_LEAVE + IFLA_BRPORT_LEARNING + IFLA_BRPORT_UNICAST_FLOOD + IFLA_BRPORT_PROXYARP + IFLA_BRPORT_LEARNING_SYNC + IFLA_BRPORT_PROXYARP_WIFI + IFLA_BRPORT_MAX = IFLA_BRPORT_PROXYARP_WIFI +) + +const ( + IFLA_IPVLAN_UNSPEC = iota + IFLA_IPVLAN_MODE + IFLA_IPVLAN_MAX = IFLA_IPVLAN_MODE +) + +const ( + IFLA_MACVLAN_UNSPEC = iota + IFLA_MACVLAN_MODE + IFLA_MACVLAN_FLAGS + IFLA_MACVLAN_MAX = IFLA_MACVLAN_FLAGS +) + +const ( + MACVLAN_MODE_PRIVATE = 1 + MACVLAN_MODE_VEPA = 2 + MACVLAN_MODE_BRIDGE = 4 + MACVLAN_MODE_PASSTHRU = 8 + MACVLAN_MODE_SOURCE = 16 +) + +const ( + IFLA_BOND_UNSPEC = iota + IFLA_BOND_MODE + IFLA_BOND_ACTIVE_SLAVE + IFLA_BOND_MIIMON + IFLA_BOND_UPDELAY + IFLA_BOND_DOWNDELAY + IFLA_BOND_USE_CARRIER + IFLA_BOND_ARP_INTERVAL + IFLA_BOND_ARP_IP_TARGET + IFLA_BOND_ARP_VALIDATE + IFLA_BOND_ARP_ALL_TARGETS + IFLA_BOND_PRIMARY + IFLA_BOND_PRIMARY_RESELECT + IFLA_BOND_FAIL_OVER_MAC + IFLA_BOND_XMIT_HASH_POLICY + IFLA_BOND_RESEND_IGMP + IFLA_BOND_NUM_PEER_NOTIF + IFLA_BOND_ALL_SLAVES_ACTIVE + IFLA_BOND_MIN_LINKS + IFLA_BOND_LP_INTERVAL + IFLA_BOND_PACKETS_PER_SLAVE + IFLA_BOND_AD_LACP_RATE + IFLA_BOND_AD_SELECT + IFLA_BOND_AD_INFO + IFLA_BOND_AD_ACTOR_SYS_PRIO + IFLA_BOND_AD_USER_PORT_KEY + IFLA_BOND_AD_ACTOR_SYSTEM + IFLA_BOND_TLB_DYNAMIC_LB +) + +const ( + IFLA_BOND_AD_INFO_UNSPEC = iota + IFLA_BOND_AD_INFO_AGGREGATOR + IFLA_BOND_AD_INFO_NUM_PORTS + IFLA_BOND_AD_INFO_ACTOR_KEY + IFLA_BOND_AD_INFO_PARTNER_KEY + IFLA_BOND_AD_INFO_PARTNER_MAC +) + +const ( + IFLA_BOND_SLAVE_UNSPEC = iota + IFLA_BOND_SLAVE_STATE + IFLA_BOND_SLAVE_MII_STATUS + IFLA_BOND_SLAVE_LINK_FAILURE_COUNT + IFLA_BOND_SLAVE_PERM_HWADDR + IFLA_BOND_SLAVE_QUEUE_ID + IFLA_BOND_SLAVE_AD_AGGREGATOR_ID +) + +const ( + IFLA_GRE_UNSPEC = iota + IFLA_GRE_LINK + IFLA_GRE_IFLAGS + IFLA_GRE_OFLAGS + IFLA_GRE_IKEY + IFLA_GRE_OKEY + IFLA_GRE_LOCAL + IFLA_GRE_REMOTE + IFLA_GRE_TTL + IFLA_GRE_TOS + IFLA_GRE_PMTUDISC + IFLA_GRE_ENCAP_LIMIT + IFLA_GRE_FLOWINFO + IFLA_GRE_FLAGS + IFLA_GRE_ENCAP_TYPE + IFLA_GRE_ENCAP_FLAGS + IFLA_GRE_ENCAP_SPORT + IFLA_GRE_ENCAP_DPORT + IFLA_GRE_COLLECT_METADATA + IFLA_GRE_MAX = IFLA_GRE_COLLECT_METADATA +) + +const ( + GRE_CSUM = 0x8000 + GRE_ROUTING = 0x4000 + GRE_KEY = 0x2000 + GRE_SEQ = 0x1000 + GRE_STRICT = 0x0800 + GRE_REC = 0x0700 + GRE_FLAGS = 0x00F8 + GRE_VERSION = 0x0007 +) + +const ( + IFLA_VF_INFO_UNSPEC = iota + IFLA_VF_INFO + IFLA_VF_INFO_MAX = IFLA_VF_INFO +) + +const ( + IFLA_VF_UNSPEC = iota + IFLA_VF_MAC /* Hardware queue specific attributes */ + IFLA_VF_VLAN + IFLA_VF_TX_RATE /* Max TX Bandwidth Allocation */ + IFLA_VF_SPOOFCHK /* Spoof Checking on/off switch */ + IFLA_VF_LINK_STATE /* link state enable/disable/auto switch */ + IFLA_VF_RATE /* Min and Max TX Bandwidth Allocation */ + IFLA_VF_RSS_QUERY_EN /* RSS Redirection Table and Hash Key query + * on/off switch + */ + IFLA_VF_STATS /* network device statistics */ + IFLA_VF_TRUST /* Trust state of VF */ + IFLA_VF_MAX = IFLA_VF_TRUST +) + +const ( + IFLA_VF_LINK_STATE_AUTO = iota /* link state of the uplink */ + IFLA_VF_LINK_STATE_ENABLE /* link always up */ + IFLA_VF_LINK_STATE_DISABLE /* link always down */ + IFLA_VF_LINK_STATE_MAX = IFLA_VF_LINK_STATE_DISABLE +) + +const ( + IFLA_VF_STATS_RX_PACKETS = iota + IFLA_VF_STATS_TX_PACKETS + IFLA_VF_STATS_RX_BYTES + IFLA_VF_STATS_TX_BYTES + IFLA_VF_STATS_BROADCAST + IFLA_VF_STATS_MULTICAST + IFLA_VF_STATS_MAX = IFLA_VF_STATS_MULTICAST +) + +const ( + SizeofVfMac = 0x24 + SizeofVfVlan = 0x0c + SizeofVfTxRate = 0x08 + SizeofVfRate = 0x0c + SizeofVfSpoofchk = 0x08 + SizeofVfLinkState = 0x08 + SizeofVfRssQueryEn = 0x08 + SizeofVfTrust = 0x08 +) + +// struct ifla_vf_mac { +// __u32 vf; +// __u8 mac[32]; /* MAX_ADDR_LEN */ +// }; + +type VfMac struct { + Vf uint32 + Mac [32]byte +} + +func (msg *VfMac) Len() int { + return SizeofVfMac +} + +func DeserializeVfMac(b []byte) *VfMac { + return (*VfMac)(unsafe.Pointer(&b[0:SizeofVfMac][0])) +} + +func (msg *VfMac) Serialize() []byte { + return (*(*[SizeofVfMac]byte)(unsafe.Pointer(msg)))[:] +} + +// struct ifla_vf_vlan { +// __u32 vf; +// __u32 vlan; /* 0 - 4095, 0 disables VLAN filter */ +// __u32 qos; +// }; + +type VfVlan struct { + Vf uint32 + Vlan uint32 + Qos uint32 +} + +func (msg *VfVlan) Len() int { + return SizeofVfVlan +} + +func DeserializeVfVlan(b []byte) *VfVlan { + return (*VfVlan)(unsafe.Pointer(&b[0:SizeofVfVlan][0])) +} + +func (msg *VfVlan) Serialize() []byte { + return (*(*[SizeofVfVlan]byte)(unsafe.Pointer(msg)))[:] +} + +// struct ifla_vf_tx_rate { +// __u32 vf; +// __u32 rate; /* Max TX bandwidth in Mbps, 0 disables throttling */ +// }; + +type VfTxRate struct { + Vf uint32 + Rate uint32 +} + +func (msg *VfTxRate) Len() int { + return SizeofVfTxRate +} + +func DeserializeVfTxRate(b []byte) *VfTxRate { + return (*VfTxRate)(unsafe.Pointer(&b[0:SizeofVfTxRate][0])) +} + +func (msg *VfTxRate) Serialize() []byte { + return (*(*[SizeofVfTxRate]byte)(unsafe.Pointer(msg)))[:] +} + +// struct ifla_vf_rate { +// __u32 vf; +// __u32 min_tx_rate; /* Min Bandwidth in Mbps */ +// __u32 max_tx_rate; /* Max Bandwidth in Mbps */ +// }; + +type VfRate struct { + Vf uint32 + MinTxRate uint32 + MaxTxRate uint32 +} + +func (msg *VfRate) Len() int { + return SizeofVfRate +} + +func DeserializeVfRate(b []byte) *VfRate { + return (*VfRate)(unsafe.Pointer(&b[0:SizeofVfRate][0])) +} + +func (msg *VfRate) Serialize() []byte { + return (*(*[SizeofVfRate]byte)(unsafe.Pointer(msg)))[:] +} + +// struct ifla_vf_spoofchk { +// __u32 vf; +// __u32 setting; +// }; + +type VfSpoofchk struct { + Vf uint32 + Setting uint32 +} + +func (msg *VfSpoofchk) Len() int { + return SizeofVfSpoofchk +} + +func DeserializeVfSpoofchk(b []byte) *VfSpoofchk { + return (*VfSpoofchk)(unsafe.Pointer(&b[0:SizeofVfSpoofchk][0])) +} + +func (msg *VfSpoofchk) Serialize() []byte { + return (*(*[SizeofVfSpoofchk]byte)(unsafe.Pointer(msg)))[:] +} + +// struct ifla_vf_link_state { +// __u32 vf; +// __u32 link_state; +// }; + +type VfLinkState struct { + Vf uint32 + LinkState uint32 +} + +func (msg *VfLinkState) Len() int { + return SizeofVfLinkState +} + +func DeserializeVfLinkState(b []byte) *VfLinkState { + return (*VfLinkState)(unsafe.Pointer(&b[0:SizeofVfLinkState][0])) +} + +func (msg *VfLinkState) Serialize() []byte { + return (*(*[SizeofVfLinkState]byte)(unsafe.Pointer(msg)))[:] +} + +// struct ifla_vf_rss_query_en { +// __u32 vf; +// __u32 setting; +// }; + +type VfRssQueryEn struct { + Vf uint32 + Setting uint32 +} + +func (msg *VfRssQueryEn) Len() int { + return SizeofVfRssQueryEn +} + +func DeserializeVfRssQueryEn(b []byte) *VfRssQueryEn { + return (*VfRssQueryEn)(unsafe.Pointer(&b[0:SizeofVfRssQueryEn][0])) +} + +func (msg *VfRssQueryEn) Serialize() []byte { + return (*(*[SizeofVfRssQueryEn]byte)(unsafe.Pointer(msg)))[:] +} + +// struct ifla_vf_trust { +// __u32 vf; +// __u32 setting; +// }; + +type VfTrust struct { + Vf uint32 + Setting uint32 +} + +func (msg *VfTrust) Len() int { + return SizeofVfTrust +} + +func DeserializeVfTrust(b []byte) *VfTrust { + return (*VfTrust)(unsafe.Pointer(&b[0:SizeofVfTrust][0])) +} + +func (msg *VfTrust) Serialize() []byte { + return (*(*[SizeofVfTrust]byte)(unsafe.Pointer(msg)))[:] +} + +const ( + XDP_FLAGS_UPDATE_IF_NOEXIST = 1 << iota + XDP_FLAGS_SKB_MODE + XDP_FLAGS_DRV_MODE + XDP_FLAGS_MASK = XDP_FLAGS_UPDATE_IF_NOEXIST | XDP_FLAGS_SKB_MODE | XDP_FLAGS_DRV_MODE +) + +const ( + IFLA_XDP_UNSPEC = iota + IFLA_XDP_FD /* fd of xdp program to attach, or -1 to remove */ + IFLA_XDP_ATTACHED /* read-only bool indicating if prog is attached */ + IFLA_XDP_FLAGS /* xdp prog related flags */ + IFLA_XDP_PROG_ID /* xdp prog id */ + IFLA_XDP_MAX = IFLA_XDP_PROG_ID +) + +const ( + IFLA_IPTUN_UNSPEC = iota + IFLA_IPTUN_LINK + IFLA_IPTUN_LOCAL + IFLA_IPTUN_REMOTE + IFLA_IPTUN_TTL + IFLA_IPTUN_TOS + IFLA_IPTUN_ENCAP_LIMIT + IFLA_IPTUN_FLOWINFO + IFLA_IPTUN_FLAGS + IFLA_IPTUN_PROTO + IFLA_IPTUN_PMTUDISC + IFLA_IPTUN_6RD_PREFIX + IFLA_IPTUN_6RD_RELAY_PREFIX + IFLA_IPTUN_6RD_PREFIXLEN + IFLA_IPTUN_6RD_RELAY_PREFIXLEN + IFLA_IPTUN_MAX = IFLA_IPTUN_6RD_RELAY_PREFIXLEN +) + +const ( + IFLA_VTI_UNSPEC = iota + IFLA_VTI_LINK + IFLA_VTI_IKEY + IFLA_VTI_OKEY + IFLA_VTI_LOCAL + IFLA_VTI_REMOTE + IFLA_VTI_MAX = IFLA_VTI_REMOTE +) + +const ( + IFLA_VRF_UNSPEC = iota + IFLA_VRF_TABLE +) + +const ( + IFLA_BR_UNSPEC = iota + IFLA_BR_FORWARD_DELAY + IFLA_BR_HELLO_TIME + IFLA_BR_MAX_AGE + IFLA_BR_AGEING_TIME + IFLA_BR_STP_STATE + IFLA_BR_PRIORITY + IFLA_BR_VLAN_FILTERING + IFLA_BR_VLAN_PROTOCOL + IFLA_BR_GROUP_FWD_MASK + IFLA_BR_ROOT_ID + IFLA_BR_BRIDGE_ID + IFLA_BR_ROOT_PORT + IFLA_BR_ROOT_PATH_COST + IFLA_BR_TOPOLOGY_CHANGE + IFLA_BR_TOPOLOGY_CHANGE_DETECTED + IFLA_BR_HELLO_TIMER + IFLA_BR_TCN_TIMER + IFLA_BR_TOPOLOGY_CHANGE_TIMER + IFLA_BR_GC_TIMER + IFLA_BR_GROUP_ADDR + IFLA_BR_FDB_FLUSH + IFLA_BR_MCAST_ROUTER + IFLA_BR_MCAST_SNOOPING + IFLA_BR_MCAST_QUERY_USE_IFADDR + IFLA_BR_MCAST_QUERIER + IFLA_BR_MCAST_HASH_ELASTICITY + IFLA_BR_MCAST_HASH_MAX + IFLA_BR_MCAST_LAST_MEMBER_CNT + IFLA_BR_MCAST_STARTUP_QUERY_CNT + IFLA_BR_MCAST_LAST_MEMBER_INTVL + IFLA_BR_MCAST_MEMBERSHIP_INTVL + IFLA_BR_MCAST_QUERIER_INTVL + IFLA_BR_MCAST_QUERY_INTVL + IFLA_BR_MCAST_QUERY_RESPONSE_INTVL + IFLA_BR_MCAST_STARTUP_QUERY_INTVL + IFLA_BR_NF_CALL_IPTABLES + IFLA_BR_NF_CALL_IP6TABLES + IFLA_BR_NF_CALL_ARPTABLES + IFLA_BR_VLAN_DEFAULT_PVID + IFLA_BR_PAD + IFLA_BR_VLAN_STATS_ENABLED + IFLA_BR_MCAST_STATS_ENABLED + IFLA_BR_MCAST_IGMP_VERSION + IFLA_BR_MCAST_MLD_VERSION + IFLA_BR_MAX = IFLA_BR_MCAST_MLD_VERSION +) + +const ( + IFLA_GTP_UNSPEC = iota + IFLA_GTP_FD0 + IFLA_GTP_FD1 + IFLA_GTP_PDP_HASHSIZE + IFLA_GTP_ROLE +) + +const ( + GTP_ROLE_GGSN = iota + GTP_ROLE_SGSN +) diff --git a/vendor/github.com/vishvananda/netlink/nl/mpls_linux.go b/vendor/github.com/vishvananda/netlink/nl/mpls_linux.go new file mode 100644 index 0000000000..3915b7eec4 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/mpls_linux.go @@ -0,0 +1,36 @@ +package nl + +import "encoding/binary" + +const ( + MPLS_LS_LABEL_SHIFT = 12 + MPLS_LS_S_SHIFT = 8 +) + +func EncodeMPLSStack(labels ...int) []byte { + b := make([]byte, 4*len(labels)) + for idx, label := range labels { + l := label << MPLS_LS_LABEL_SHIFT + if idx == len(labels)-1 { + l |= 1 << MPLS_LS_S_SHIFT + } + binary.BigEndian.PutUint32(b[idx*4:], uint32(l)) + } + return b +} + +func DecodeMPLSStack(buf []byte) []int { + if len(buf)%4 != 0 { + return nil + } + stack := make([]int, 0, len(buf)/4) + for len(buf) > 0 { + l := binary.BigEndian.Uint32(buf[:4]) + buf = buf[4:] + stack = append(stack, int(l)>>MPLS_LS_LABEL_SHIFT) + if (l>>MPLS_LS_S_SHIFT)&1 > 0 { + break + } + } + return stack +} diff --git a/vendor/github.com/vishvananda/netlink/nl/nl_linux.go b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go new file mode 100644 index 0000000000..72f7f6af3c --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/nl_linux.go @@ -0,0 +1,732 @@ +// Package nl has low level primitives for making Netlink calls. +package nl + +import ( + "bytes" + "encoding/binary" + "fmt" + "net" + "runtime" + "sync" + "sync/atomic" + "syscall" + "unsafe" + + "github.com/vishvananda/netns" +) + +const ( + // Family type definitions + FAMILY_ALL = syscall.AF_UNSPEC + FAMILY_V4 = syscall.AF_INET + FAMILY_V6 = syscall.AF_INET6 + FAMILY_MPLS = AF_MPLS +) + +// SupportedNlFamilies contains the list of netlink families this netlink package supports +var SupportedNlFamilies = []int{syscall.NETLINK_ROUTE, syscall.NETLINK_XFRM, syscall.NETLINK_NETFILTER} + +var nextSeqNr uint32 + +// GetIPFamily returns the family type of a net.IP. +func GetIPFamily(ip net.IP) int { + if len(ip) <= net.IPv4len { + return FAMILY_V4 + } + if ip.To4() != nil { + return FAMILY_V4 + } + return FAMILY_V6 +} + +var nativeEndian binary.ByteOrder + +// Get native endianness for the system +func NativeEndian() binary.ByteOrder { + if nativeEndian == nil { + var x uint32 = 0x01020304 + if *(*byte)(unsafe.Pointer(&x)) == 0x01 { + nativeEndian = binary.BigEndian + } else { + nativeEndian = binary.LittleEndian + } + } + return nativeEndian +} + +// Byte swap a 16 bit value if we aren't big endian +func Swap16(i uint16) uint16 { + if NativeEndian() == binary.BigEndian { + return i + } + return (i&0xff00)>>8 | (i&0xff)<<8 +} + +// Byte swap a 32 bit value if aren't big endian +func Swap32(i uint32) uint32 { + if NativeEndian() == binary.BigEndian { + return i + } + return (i&0xff000000)>>24 | (i&0xff0000)>>8 | (i&0xff00)<<8 | (i&0xff)<<24 +} + +type NetlinkRequestData interface { + Len() int + Serialize() []byte +} + +// IfInfomsg is related to links, but it is used for list requests as well +type IfInfomsg struct { + syscall.IfInfomsg +} + +// Create an IfInfomsg with family specified +func NewIfInfomsg(family int) *IfInfomsg { + return &IfInfomsg{ + IfInfomsg: syscall.IfInfomsg{ + Family: uint8(family), + }, + } +} + +func DeserializeIfInfomsg(b []byte) *IfInfomsg { + return (*IfInfomsg)(unsafe.Pointer(&b[0:syscall.SizeofIfInfomsg][0])) +} + +func (msg *IfInfomsg) Serialize() []byte { + return (*(*[syscall.SizeofIfInfomsg]byte)(unsafe.Pointer(msg)))[:] +} + +func (msg *IfInfomsg) Len() int { + return syscall.SizeofIfInfomsg +} + +func (msg *IfInfomsg) EncapType() string { + switch msg.Type { + case 0: + return "generic" + case syscall.ARPHRD_ETHER: + return "ether" + case syscall.ARPHRD_EETHER: + return "eether" + case syscall.ARPHRD_AX25: + return "ax25" + case syscall.ARPHRD_PRONET: + return "pronet" + case syscall.ARPHRD_CHAOS: + return "chaos" + case syscall.ARPHRD_IEEE802: + return "ieee802" + case syscall.ARPHRD_ARCNET: + return "arcnet" + case syscall.ARPHRD_APPLETLK: + return "atalk" + case syscall.ARPHRD_DLCI: + return "dlci" + case syscall.ARPHRD_ATM: + return "atm" + case syscall.ARPHRD_METRICOM: + return "metricom" + case syscall.ARPHRD_IEEE1394: + return "ieee1394" + case syscall.ARPHRD_INFINIBAND: + return "infiniband" + case syscall.ARPHRD_SLIP: + return "slip" + case syscall.ARPHRD_CSLIP: + return "cslip" + case syscall.ARPHRD_SLIP6: + return "slip6" + case syscall.ARPHRD_CSLIP6: + return "cslip6" + case syscall.ARPHRD_RSRVD: + return "rsrvd" + case syscall.ARPHRD_ADAPT: + return "adapt" + case syscall.ARPHRD_ROSE: + return "rose" + case syscall.ARPHRD_X25: + return "x25" + case syscall.ARPHRD_HWX25: + return "hwx25" + case syscall.ARPHRD_PPP: + return "ppp" + case syscall.ARPHRD_HDLC: + return "hdlc" + case syscall.ARPHRD_LAPB: + return "lapb" + case syscall.ARPHRD_DDCMP: + return "ddcmp" + case syscall.ARPHRD_RAWHDLC: + return "rawhdlc" + case syscall.ARPHRD_TUNNEL: + return "ipip" + case syscall.ARPHRD_TUNNEL6: + return "tunnel6" + case syscall.ARPHRD_FRAD: + return "frad" + case syscall.ARPHRD_SKIP: + return "skip" + case syscall.ARPHRD_LOOPBACK: + return "loopback" + case syscall.ARPHRD_LOCALTLK: + return "ltalk" + case syscall.ARPHRD_FDDI: + return "fddi" + case syscall.ARPHRD_BIF: + return "bif" + case syscall.ARPHRD_SIT: + return "sit" + case syscall.ARPHRD_IPDDP: + return "ip/ddp" + case syscall.ARPHRD_IPGRE: + return "gre" + case syscall.ARPHRD_PIMREG: + return "pimreg" + case syscall.ARPHRD_HIPPI: + return "hippi" + case syscall.ARPHRD_ASH: + return "ash" + case syscall.ARPHRD_ECONET: + return "econet" + case syscall.ARPHRD_IRDA: + return "irda" + case syscall.ARPHRD_FCPP: + return "fcpp" + case syscall.ARPHRD_FCAL: + return "fcal" + case syscall.ARPHRD_FCPL: + return "fcpl" + case syscall.ARPHRD_FCFABRIC: + return "fcfb0" + case syscall.ARPHRD_FCFABRIC + 1: + return "fcfb1" + case syscall.ARPHRD_FCFABRIC + 2: + return "fcfb2" + case syscall.ARPHRD_FCFABRIC + 3: + return "fcfb3" + case syscall.ARPHRD_FCFABRIC + 4: + return "fcfb4" + case syscall.ARPHRD_FCFABRIC + 5: + return "fcfb5" + case syscall.ARPHRD_FCFABRIC + 6: + return "fcfb6" + case syscall.ARPHRD_FCFABRIC + 7: + return "fcfb7" + case syscall.ARPHRD_FCFABRIC + 8: + return "fcfb8" + case syscall.ARPHRD_FCFABRIC + 9: + return "fcfb9" + case syscall.ARPHRD_FCFABRIC + 10: + return "fcfb10" + case syscall.ARPHRD_FCFABRIC + 11: + return "fcfb11" + case syscall.ARPHRD_FCFABRIC + 12: + return "fcfb12" + case syscall.ARPHRD_IEEE802_TR: + return "tr" + case syscall.ARPHRD_IEEE80211: + return "ieee802.11" + case syscall.ARPHRD_IEEE80211_PRISM: + return "ieee802.11/prism" + case syscall.ARPHRD_IEEE80211_RADIOTAP: + return "ieee802.11/radiotap" + case syscall.ARPHRD_IEEE802154: + return "ieee802.15.4" + + case 65534: + return "none" + case 65535: + return "void" + } + return fmt.Sprintf("unknown%d", msg.Type) +} + +func rtaAlignOf(attrlen int) int { + return (attrlen + syscall.RTA_ALIGNTO - 1) & ^(syscall.RTA_ALIGNTO - 1) +} + +func NewIfInfomsgChild(parent *RtAttr, family int) *IfInfomsg { + msg := NewIfInfomsg(family) + parent.children = append(parent.children, msg) + return msg +} + +// Extend RtAttr to handle data and children +type RtAttr struct { + syscall.RtAttr + Data []byte + children []NetlinkRequestData +} + +// Create a new Extended RtAttr object +func NewRtAttr(attrType int, data []byte) *RtAttr { + return &RtAttr{ + RtAttr: syscall.RtAttr{ + Type: uint16(attrType), + }, + children: []NetlinkRequestData{}, + Data: data, + } +} + +// Create a new RtAttr obj anc add it as a child of an existing object +func NewRtAttrChild(parent *RtAttr, attrType int, data []byte) *RtAttr { + attr := NewRtAttr(attrType, data) + parent.children = append(parent.children, attr) + return attr +} + +func (a *RtAttr) Len() int { + if len(a.children) == 0 { + return (syscall.SizeofRtAttr + len(a.Data)) + } + + l := 0 + for _, child := range a.children { + l += rtaAlignOf(child.Len()) + } + l += syscall.SizeofRtAttr + return rtaAlignOf(l + len(a.Data)) +} + +// Serialize the RtAttr into a byte array +// This can't just unsafe.cast because it must iterate through children. +func (a *RtAttr) Serialize() []byte { + native := NativeEndian() + + length := a.Len() + buf := make([]byte, rtaAlignOf(length)) + + next := 4 + if a.Data != nil { + copy(buf[next:], a.Data) + next += rtaAlignOf(len(a.Data)) + } + if len(a.children) > 0 { + for _, child := range a.children { + childBuf := child.Serialize() + copy(buf[next:], childBuf) + next += rtaAlignOf(len(childBuf)) + } + } + + if l := uint16(length); l != 0 { + native.PutUint16(buf[0:2], l) + } + native.PutUint16(buf[2:4], a.Type) + return buf +} + +type NetlinkRequest struct { + syscall.NlMsghdr + Data []NetlinkRequestData + RawData []byte + Sockets map[int]*SocketHandle +} + +// Serialize the Netlink Request into a byte array +func (req *NetlinkRequest) Serialize() []byte { + length := syscall.SizeofNlMsghdr + dataBytes := make([][]byte, len(req.Data)) + for i, data := range req.Data { + dataBytes[i] = data.Serialize() + length = length + len(dataBytes[i]) + } + length += len(req.RawData) + + req.Len = uint32(length) + b := make([]byte, length) + hdr := (*(*[syscall.SizeofNlMsghdr]byte)(unsafe.Pointer(req)))[:] + next := syscall.SizeofNlMsghdr + copy(b[0:next], hdr) + for _, data := range dataBytes { + for _, dataByte := range data { + b[next] = dataByte + next = next + 1 + } + } + // Add the raw data if any + if len(req.RawData) > 0 { + copy(b[next:length], req.RawData) + } + return b +} + +func (req *NetlinkRequest) AddData(data NetlinkRequestData) { + if data != nil { + req.Data = append(req.Data, data) + } +} + +// AddRawData adds raw bytes to the end of the NetlinkRequest object during serialization +func (req *NetlinkRequest) AddRawData(data []byte) { + if data != nil { + req.RawData = append(req.RawData, data...) + } +} + +// Execute the request against a the given sockType. +// Returns a list of netlink messages in serialized format, optionally filtered +// by resType. +func (req *NetlinkRequest) Execute(sockType int, resType uint16) ([][]byte, error) { + var ( + s *NetlinkSocket + err error + ) + + if req.Sockets != nil { + if sh, ok := req.Sockets[sockType]; ok { + s = sh.Socket + req.Seq = atomic.AddUint32(&sh.Seq, 1) + } + } + sharedSocket := s != nil + + if s == nil { + s, err = getNetlinkSocket(sockType) + if err != nil { + return nil, err + } + defer s.Close() + } else { + s.Lock() + defer s.Unlock() + } + + if err := s.Send(req); err != nil { + return nil, err + } + + pid, err := s.GetPid() + if err != nil { + return nil, err + } + + var res [][]byte + +done: + for { + msgs, err := s.Receive() + if err != nil { + return nil, err + } + for _, m := range msgs { + if m.Header.Seq != req.Seq { + if sharedSocket { + continue + } + return nil, fmt.Errorf("Wrong Seq nr %d, expected %d", m.Header.Seq, req.Seq) + } + if m.Header.Pid != pid { + return nil, fmt.Errorf("Wrong pid %d, expected %d", m.Header.Pid, pid) + } + if m.Header.Type == syscall.NLMSG_DONE { + break done + } + if m.Header.Type == syscall.NLMSG_ERROR { + native := NativeEndian() + error := int32(native.Uint32(m.Data[0:4])) + if error == 0 { + break done + } + return nil, syscall.Errno(-error) + } + if resType != 0 && m.Header.Type != resType { + continue + } + res = append(res, m.Data) + if m.Header.Flags&syscall.NLM_F_MULTI == 0 { + break done + } + } + } + return res, nil +} + +// Create a new netlink request from proto and flags +// Note the Len value will be inaccurate once data is added until +// the message is serialized +func NewNetlinkRequest(proto, flags int) *NetlinkRequest { + return &NetlinkRequest{ + NlMsghdr: syscall.NlMsghdr{ + Len: uint32(syscall.SizeofNlMsghdr), + Type: uint16(proto), + Flags: syscall.NLM_F_REQUEST | uint16(flags), + Seq: atomic.AddUint32(&nextSeqNr, 1), + }, + } +} + +type NetlinkSocket struct { + fd int32 + lsa syscall.SockaddrNetlink + sync.Mutex +} + +func getNetlinkSocket(protocol int) (*NetlinkSocket, error) { + fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_RAW|syscall.SOCK_CLOEXEC, protocol) + if err != nil { + return nil, err + } + s := &NetlinkSocket{ + fd: int32(fd), + } + s.lsa.Family = syscall.AF_NETLINK + if err := syscall.Bind(fd, &s.lsa); err != nil { + syscall.Close(fd) + return nil, err + } + + return s, nil +} + +// GetNetlinkSocketAt opens a netlink socket in the network namespace newNs +// and positions the thread back into the network namespace specified by curNs, +// when done. If curNs is close, the function derives the current namespace and +// moves back into it when done. If newNs is close, the socket will be opened +// in the current network namespace. +func GetNetlinkSocketAt(newNs, curNs netns.NsHandle, protocol int) (*NetlinkSocket, error) { + c, err := executeInNetns(newNs, curNs) + if err != nil { + return nil, err + } + defer c() + return getNetlinkSocket(protocol) +} + +// executeInNetns sets execution of the code following this call to the +// network namespace newNs, then moves the thread back to curNs if open, +// otherwise to the current netns at the time the function was invoked +// In case of success, the caller is expected to execute the returned function +// at the end of the code that needs to be executed in the network namespace. +// Example: +// func jobAt(...) error { +// d, err := executeInNetns(...) +// if err != nil { return err} +// defer d() +// < code which needs to be executed in specific netns> +// } +// TODO: his function probably belongs to netns pkg. +func executeInNetns(newNs, curNs netns.NsHandle) (func(), error) { + var ( + err error + moveBack func(netns.NsHandle) error + closeNs func() error + unlockThd func() + ) + restore := func() { + // order matters + if moveBack != nil { + moveBack(curNs) + } + if closeNs != nil { + closeNs() + } + if unlockThd != nil { + unlockThd() + } + } + if newNs.IsOpen() { + runtime.LockOSThread() + unlockThd = runtime.UnlockOSThread + if !curNs.IsOpen() { + if curNs, err = netns.Get(); err != nil { + restore() + return nil, fmt.Errorf("could not get current namespace while creating netlink socket: %v", err) + } + closeNs = curNs.Close + } + if err := netns.Set(newNs); err != nil { + restore() + return nil, fmt.Errorf("failed to set into network namespace %d while creating netlink socket: %v", newNs, err) + } + moveBack = netns.Set + } + return restore, nil +} + +// Create a netlink socket with a given protocol (e.g. NETLINK_ROUTE) +// and subscribe it to multicast groups passed in variable argument list. +// Returns the netlink socket on which Receive() method can be called +// to retrieve the messages from the kernel. +func Subscribe(protocol int, groups ...uint) (*NetlinkSocket, error) { + fd, err := syscall.Socket(syscall.AF_NETLINK, syscall.SOCK_RAW, protocol) + if err != nil { + return nil, err + } + s := &NetlinkSocket{ + fd: int32(fd), + } + s.lsa.Family = syscall.AF_NETLINK + + for _, g := range groups { + s.lsa.Groups |= (1 << (g - 1)) + } + + if err := syscall.Bind(fd, &s.lsa); err != nil { + syscall.Close(fd) + return nil, err + } + + return s, nil +} + +// SubscribeAt works like Subscribe plus let's the caller choose the network +// namespace in which the socket would be opened (newNs). Then control goes back +// to curNs if open, otherwise to the netns at the time this function was called. +func SubscribeAt(newNs, curNs netns.NsHandle, protocol int, groups ...uint) (*NetlinkSocket, error) { + c, err := executeInNetns(newNs, curNs) + if err != nil { + return nil, err + } + defer c() + return Subscribe(protocol, groups...) +} + +func (s *NetlinkSocket) Close() { + fd := int(atomic.SwapInt32(&s.fd, -1)) + syscall.Close(fd) +} + +func (s *NetlinkSocket) GetFd() int { + return int(atomic.LoadInt32(&s.fd)) +} + +func (s *NetlinkSocket) Send(request *NetlinkRequest) error { + fd := int(atomic.LoadInt32(&s.fd)) + if fd < 0 { + return fmt.Errorf("Send called on a closed socket") + } + if err := syscall.Sendto(fd, request.Serialize(), 0, &s.lsa); err != nil { + return err + } + return nil +} + +func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, error) { + fd := int(atomic.LoadInt32(&s.fd)) + if fd < 0 { + return nil, fmt.Errorf("Receive called on a closed socket") + } + rb := make([]byte, syscall.Getpagesize()) + nr, _, err := syscall.Recvfrom(fd, rb, 0) + if err != nil { + return nil, err + } + if nr < syscall.NLMSG_HDRLEN { + return nil, fmt.Errorf("Got short response from netlink") + } + rb = rb[:nr] + return syscall.ParseNetlinkMessage(rb) +} + +// SetSendTimeout allows to set a send timeout on the socket +func (s *NetlinkSocket) SetSendTimeout(timeout *syscall.Timeval) error { + // Set a send timeout of SOCKET_SEND_TIMEOUT, this will allow the Send to periodically unblock and avoid that a routine + // remains stuck on a send on a closed fd + return syscall.SetsockoptTimeval(int(s.fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO, timeout) +} + +// SetReceiveTimeout allows to set a receive timeout on the socket +func (s *NetlinkSocket) SetReceiveTimeout(timeout *syscall.Timeval) error { + // Set a read timeout of SOCKET_READ_TIMEOUT, this will allow the Read to periodically unblock and avoid that a routine + // remains stuck on a recvmsg on a closed fd + return syscall.SetsockoptTimeval(int(s.fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO, timeout) +} + +func (s *NetlinkSocket) GetPid() (uint32, error) { + fd := int(atomic.LoadInt32(&s.fd)) + lsa, err := syscall.Getsockname(fd) + if err != nil { + return 0, err + } + switch v := lsa.(type) { + case *syscall.SockaddrNetlink: + return v.Pid, nil + } + return 0, fmt.Errorf("Wrong socket type") +} + +func ZeroTerminated(s string) []byte { + bytes := make([]byte, len(s)+1) + for i := 0; i < len(s); i++ { + bytes[i] = s[i] + } + bytes[len(s)] = 0 + return bytes +} + +func NonZeroTerminated(s string) []byte { + bytes := make([]byte, len(s)) + for i := 0; i < len(s); i++ { + bytes[i] = s[i] + } + return bytes +} + +func BytesToString(b []byte) string { + n := bytes.Index(b, []byte{0}) + return string(b[:n]) +} + +func Uint8Attr(v uint8) []byte { + return []byte{byte(v)} +} + +func Uint16Attr(v uint16) []byte { + native := NativeEndian() + bytes := make([]byte, 2) + native.PutUint16(bytes, v) + return bytes +} + +func Uint32Attr(v uint32) []byte { + native := NativeEndian() + bytes := make([]byte, 4) + native.PutUint32(bytes, v) + return bytes +} + +func Uint64Attr(v uint64) []byte { + native := NativeEndian() + bytes := make([]byte, 8) + native.PutUint64(bytes, v) + return bytes +} + +func ParseRouteAttr(b []byte) ([]syscall.NetlinkRouteAttr, error) { + var attrs []syscall.NetlinkRouteAttr + for len(b) >= syscall.SizeofRtAttr { + a, vbuf, alen, err := netlinkRouteAttrAndValue(b) + if err != nil { + return nil, err + } + ra := syscall.NetlinkRouteAttr{Attr: *a, Value: vbuf[:int(a.Len)-syscall.SizeofRtAttr]} + attrs = append(attrs, ra) + b = b[alen:] + } + return attrs, nil +} + +func netlinkRouteAttrAndValue(b []byte) (*syscall.RtAttr, []byte, int, error) { + a := (*syscall.RtAttr)(unsafe.Pointer(&b[0])) + if int(a.Len) < syscall.SizeofRtAttr || int(a.Len) > len(b) { + return nil, nil, 0, syscall.EINVAL + } + return a, b[syscall.SizeofRtAttr:], rtaAlignOf(int(a.Len)), nil +} + +// SocketHandle contains the netlink socket and the associated +// sequence counter for a specific netlink family +type SocketHandle struct { + Seq uint32 + Socket *NetlinkSocket +} + +// Close closes the netlink socket +func (sh *SocketHandle) Close() { + if sh.Socket != nil { + sh.Socket.Close() + } +} diff --git a/vendor/github.com/vishvananda/netlink/nl/nl_unspecified.go b/vendor/github.com/vishvananda/netlink/nl/nl_unspecified.go new file mode 100644 index 0000000000..dfc0be6606 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/nl_unspecified.go @@ -0,0 +1,11 @@ +// +build !linux + +package nl + +import "encoding/binary" + +var SupportedNlFamilies = []int{} + +func NativeEndian() binary.ByteOrder { + return nil +} diff --git a/vendor/github.com/vishvananda/netlink/nl/route_linux.go b/vendor/github.com/vishvananda/netlink/nl/route_linux.go new file mode 100644 index 0000000000..1a064d65d2 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/route_linux.go @@ -0,0 +1,80 @@ +package nl + +import ( + "syscall" + "unsafe" +) + +type RtMsg struct { + syscall.RtMsg +} + +func NewRtMsg() *RtMsg { + return &RtMsg{ + RtMsg: syscall.RtMsg{ + Table: syscall.RT_TABLE_MAIN, + Scope: syscall.RT_SCOPE_UNIVERSE, + Protocol: syscall.RTPROT_BOOT, + Type: syscall.RTN_UNICAST, + }, + } +} + +func NewRtDelMsg() *RtMsg { + return &RtMsg{ + RtMsg: syscall.RtMsg{ + Table: syscall.RT_TABLE_MAIN, + Scope: syscall.RT_SCOPE_NOWHERE, + }, + } +} + +func (msg *RtMsg) Len() int { + return syscall.SizeofRtMsg +} + +func DeserializeRtMsg(b []byte) *RtMsg { + return (*RtMsg)(unsafe.Pointer(&b[0:syscall.SizeofRtMsg][0])) +} + +func (msg *RtMsg) Serialize() []byte { + return (*(*[syscall.SizeofRtMsg]byte)(unsafe.Pointer(msg)))[:] +} + +type RtNexthop struct { + syscall.RtNexthop + Children []NetlinkRequestData +} + +func DeserializeRtNexthop(b []byte) *RtNexthop { + return (*RtNexthop)(unsafe.Pointer(&b[0:syscall.SizeofRtNexthop][0])) +} + +func (msg *RtNexthop) Len() int { + if len(msg.Children) == 0 { + return syscall.SizeofRtNexthop + } + + l := 0 + for _, child := range msg.Children { + l += rtaAlignOf(child.Len()) + } + l += syscall.SizeofRtNexthop + return rtaAlignOf(l) +} + +func (msg *RtNexthop) Serialize() []byte { + length := msg.Len() + msg.RtNexthop.Len = uint16(length) + buf := make([]byte, length) + copy(buf, (*(*[syscall.SizeofRtNexthop]byte)(unsafe.Pointer(msg)))[:]) + next := rtaAlignOf(syscall.SizeofRtNexthop) + if len(msg.Children) > 0 { + for _, child := range msg.Children { + childBuf := child.Serialize() + copy(buf[next:], childBuf) + next += rtaAlignOf(len(childBuf)) + } + } + return buf +} diff --git a/vendor/github.com/vishvananda/netlink/nl/syscall.go b/vendor/github.com/vishvananda/netlink/nl/syscall.go new file mode 100644 index 0000000000..3473e53638 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/syscall.go @@ -0,0 +1,68 @@ +package nl + +// syscall package lack of rule atributes type. +// Thus there are defined below +const ( + FRA_UNSPEC = iota + FRA_DST /* destination address */ + FRA_SRC /* source address */ + FRA_IIFNAME /* interface name */ + FRA_GOTO /* target to jump to (FR_ACT_GOTO) */ + FRA_UNUSED2 + FRA_PRIORITY /* priority/preference */ + FRA_UNUSED3 + FRA_UNUSED4 + FRA_UNUSED5 + FRA_FWMARK /* mark */ + FRA_FLOW /* flow/class id */ + FRA_TUN_ID + FRA_SUPPRESS_IFGROUP + FRA_SUPPRESS_PREFIXLEN + FRA_TABLE /* Extended table id */ + FRA_FWMASK /* mask for netfilter mark */ + FRA_OIFNAME +) + +// ip rule netlink request types +const ( + FR_ACT_UNSPEC = iota + FR_ACT_TO_TBL /* Pass to fixed table */ + FR_ACT_GOTO /* Jump to another rule */ + FR_ACT_NOP /* No operation */ + FR_ACT_RES3 + FR_ACT_RES4 + FR_ACT_BLACKHOLE /* Drop without notification */ + FR_ACT_UNREACHABLE /* Drop with ENETUNREACH */ + FR_ACT_PROHIBIT /* Drop with EACCES */ +) + +// socket diags related +const ( + SOCK_DIAG_BY_FAMILY = 20 /* linux.sock_diag.h */ + TCPDIAG_NOCOOKIE = 0xFFFFFFFF /* TCPDIAG_NOCOOKIE in net/ipv4/tcp_diag.h*/ +) + +const ( + AF_MPLS = 28 +) + +const ( + RTA_NEWDST = 0x13 + RTA_ENCAP_TYPE = 0x15 + RTA_ENCAP = 0x16 +) + +// RTA_ENCAP subtype +const ( + MPLS_IPTUNNEL_UNSPEC = iota + MPLS_IPTUNNEL_DST +) + +// light weight tunnel encap types +const ( + LWTUNNEL_ENCAP_NONE = iota + LWTUNNEL_ENCAP_MPLS + LWTUNNEL_ENCAP_IP + LWTUNNEL_ENCAP_ILA + LWTUNNEL_ENCAP_IP6 +) diff --git a/vendor/github.com/vishvananda/netlink/nl/tc_linux.go b/vendor/github.com/vishvananda/netlink/nl/tc_linux.go new file mode 100644 index 0000000000..e91fb21c55 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/tc_linux.go @@ -0,0 +1,675 @@ +package nl + +import ( + "unsafe" +) + +// LinkLayer +const ( + LINKLAYER_UNSPEC = iota + LINKLAYER_ETHERNET + LINKLAYER_ATM +) + +// ATM +const ( + ATM_CELL_PAYLOAD = 48 + ATM_CELL_SIZE = 53 +) + +const TC_LINKLAYER_MASK = 0x0F + +// Police +const ( + TCA_POLICE_UNSPEC = iota + TCA_POLICE_TBF + TCA_POLICE_RATE + TCA_POLICE_PEAKRATE + TCA_POLICE_AVRATE + TCA_POLICE_RESULT + TCA_POLICE_MAX = TCA_POLICE_RESULT +) + +// Message types +const ( + TCA_UNSPEC = iota + TCA_KIND + TCA_OPTIONS + TCA_STATS + TCA_XSTATS + TCA_RATE + TCA_FCNT + TCA_STATS2 + TCA_STAB + TCA_MAX = TCA_STAB +) + +const ( + TCA_ACT_TAB = 1 + TCAA_MAX = 1 +) + +const ( + TCA_ACT_UNSPEC = iota + TCA_ACT_KIND + TCA_ACT_OPTIONS + TCA_ACT_INDEX + TCA_ACT_STATS + TCA_ACT_MAX +) + +const ( + TCA_PRIO_UNSPEC = iota + TCA_PRIO_MQ + TCA_PRIO_MAX = TCA_PRIO_MQ +) + +const ( + SizeofTcMsg = 0x14 + SizeofTcActionMsg = 0x04 + SizeofTcPrioMap = 0x14 + SizeofTcRateSpec = 0x0c + SizeofTcNetemQopt = 0x18 + SizeofTcNetemCorr = 0x0c + SizeofTcNetemReorder = 0x08 + SizeofTcNetemCorrupt = 0x08 + SizeofTcTbfQopt = 2*SizeofTcRateSpec + 0x0c + SizeofTcHtbCopt = 2*SizeofTcRateSpec + 0x14 + SizeofTcHtbGlob = 0x14 + SizeofTcU32Key = 0x10 + SizeofTcU32Sel = 0x10 // without keys + SizeofTcGen = 0x14 + SizeofTcMirred = SizeofTcGen + 0x08 + SizeofTcPolice = 2*SizeofTcRateSpec + 0x20 +) + +// struct tcmsg { +// unsigned char tcm_family; +// unsigned char tcm__pad1; +// unsigned short tcm__pad2; +// int tcm_ifindex; +// __u32 tcm_handle; +// __u32 tcm_parent; +// __u32 tcm_info; +// }; + +type TcMsg struct { + Family uint8 + Pad [3]byte + Ifindex int32 + Handle uint32 + Parent uint32 + Info uint32 +} + +func (msg *TcMsg) Len() int { + return SizeofTcMsg +} + +func DeserializeTcMsg(b []byte) *TcMsg { + return (*TcMsg)(unsafe.Pointer(&b[0:SizeofTcMsg][0])) +} + +func (x *TcMsg) Serialize() []byte { + return (*(*[SizeofTcMsg]byte)(unsafe.Pointer(x)))[:] +} + +// struct tcamsg { +// unsigned char tca_family; +// unsigned char tca__pad1; +// unsigned short tca__pad2; +// }; + +type TcActionMsg struct { + Family uint8 + Pad [3]byte +} + +func (msg *TcActionMsg) Len() int { + return SizeofTcActionMsg +} + +func DeserializeTcActionMsg(b []byte) *TcActionMsg { + return (*TcActionMsg)(unsafe.Pointer(&b[0:SizeofTcActionMsg][0])) +} + +func (x *TcActionMsg) Serialize() []byte { + return (*(*[SizeofTcActionMsg]byte)(unsafe.Pointer(x)))[:] +} + +const ( + TC_PRIO_MAX = 15 +) + +// struct tc_prio_qopt { +// int bands; /* Number of bands */ +// __u8 priomap[TC_PRIO_MAX+1]; /* Map: logical priority -> PRIO band */ +// }; + +type TcPrioMap struct { + Bands int32 + Priomap [TC_PRIO_MAX + 1]uint8 +} + +func (msg *TcPrioMap) Len() int { + return SizeofTcPrioMap +} + +func DeserializeTcPrioMap(b []byte) *TcPrioMap { + return (*TcPrioMap)(unsafe.Pointer(&b[0:SizeofTcPrioMap][0])) +} + +func (x *TcPrioMap) Serialize() []byte { + return (*(*[SizeofTcPrioMap]byte)(unsafe.Pointer(x)))[:] +} + +const ( + TCA_TBF_UNSPEC = iota + TCA_TBF_PARMS + TCA_TBF_RTAB + TCA_TBF_PTAB + TCA_TBF_RATE64 + TCA_TBF_PRATE64 + TCA_TBF_BURST + TCA_TBF_PBURST + TCA_TBF_MAX = TCA_TBF_PBURST +) + +// struct tc_ratespec { +// unsigned char cell_log; +// __u8 linklayer; /* lower 4 bits */ +// unsigned short overhead; +// short cell_align; +// unsigned short mpu; +// __u32 rate; +// }; + +type TcRateSpec struct { + CellLog uint8 + Linklayer uint8 + Overhead uint16 + CellAlign int16 + Mpu uint16 + Rate uint32 +} + +func (msg *TcRateSpec) Len() int { + return SizeofTcRateSpec +} + +func DeserializeTcRateSpec(b []byte) *TcRateSpec { + return (*TcRateSpec)(unsafe.Pointer(&b[0:SizeofTcRateSpec][0])) +} + +func (x *TcRateSpec) Serialize() []byte { + return (*(*[SizeofTcRateSpec]byte)(unsafe.Pointer(x)))[:] +} + +/** +* NETEM + */ + +const ( + TCA_NETEM_UNSPEC = iota + TCA_NETEM_CORR + TCA_NETEM_DELAY_DIST + TCA_NETEM_REORDER + TCA_NETEM_CORRUPT + TCA_NETEM_LOSS + TCA_NETEM_RATE + TCA_NETEM_ECN + TCA_NETEM_RATE64 + TCA_NETEM_MAX = TCA_NETEM_RATE64 +) + +// struct tc_netem_qopt { +// __u32 latency; /* added delay (us) */ +// __u32 limit; /* fifo limit (packets) */ +// __u32 loss; /* random packet loss (0=none ~0=100%) */ +// __u32 gap; /* re-ordering gap (0 for none) */ +// __u32 duplicate; /* random packet dup (0=none ~0=100%) */ +// __u32 jitter; /* random jitter in latency (us) */ +// }; + +type TcNetemQopt struct { + Latency uint32 + Limit uint32 + Loss uint32 + Gap uint32 + Duplicate uint32 + Jitter uint32 +} + +func (msg *TcNetemQopt) Len() int { + return SizeofTcNetemQopt +} + +func DeserializeTcNetemQopt(b []byte) *TcNetemQopt { + return (*TcNetemQopt)(unsafe.Pointer(&b[0:SizeofTcNetemQopt][0])) +} + +func (x *TcNetemQopt) Serialize() []byte { + return (*(*[SizeofTcNetemQopt]byte)(unsafe.Pointer(x)))[:] +} + +// struct tc_netem_corr { +// __u32 delay_corr; /* delay correlation */ +// __u32 loss_corr; /* packet loss correlation */ +// __u32 dup_corr; /* duplicate correlation */ +// }; + +type TcNetemCorr struct { + DelayCorr uint32 + LossCorr uint32 + DupCorr uint32 +} + +func (msg *TcNetemCorr) Len() int { + return SizeofTcNetemCorr +} + +func DeserializeTcNetemCorr(b []byte) *TcNetemCorr { + return (*TcNetemCorr)(unsafe.Pointer(&b[0:SizeofTcNetemCorr][0])) +} + +func (x *TcNetemCorr) Serialize() []byte { + return (*(*[SizeofTcNetemCorr]byte)(unsafe.Pointer(x)))[:] +} + +// struct tc_netem_reorder { +// __u32 probability; +// __u32 correlation; +// }; + +type TcNetemReorder struct { + Probability uint32 + Correlation uint32 +} + +func (msg *TcNetemReorder) Len() int { + return SizeofTcNetemReorder +} + +func DeserializeTcNetemReorder(b []byte) *TcNetemReorder { + return (*TcNetemReorder)(unsafe.Pointer(&b[0:SizeofTcNetemReorder][0])) +} + +func (x *TcNetemReorder) Serialize() []byte { + return (*(*[SizeofTcNetemReorder]byte)(unsafe.Pointer(x)))[:] +} + +// struct tc_netem_corrupt { +// __u32 probability; +// __u32 correlation; +// }; + +type TcNetemCorrupt struct { + Probability uint32 + Correlation uint32 +} + +func (msg *TcNetemCorrupt) Len() int { + return SizeofTcNetemCorrupt +} + +func DeserializeTcNetemCorrupt(b []byte) *TcNetemCorrupt { + return (*TcNetemCorrupt)(unsafe.Pointer(&b[0:SizeofTcNetemCorrupt][0])) +} + +func (x *TcNetemCorrupt) Serialize() []byte { + return (*(*[SizeofTcNetemCorrupt]byte)(unsafe.Pointer(x)))[:] +} + +// struct tc_tbf_qopt { +// struct tc_ratespec rate; +// struct tc_ratespec peakrate; +// __u32 limit; +// __u32 buffer; +// __u32 mtu; +// }; + +type TcTbfQopt struct { + Rate TcRateSpec + Peakrate TcRateSpec + Limit uint32 + Buffer uint32 + Mtu uint32 +} + +func (msg *TcTbfQopt) Len() int { + return SizeofTcTbfQopt +} + +func DeserializeTcTbfQopt(b []byte) *TcTbfQopt { + return (*TcTbfQopt)(unsafe.Pointer(&b[0:SizeofTcTbfQopt][0])) +} + +func (x *TcTbfQopt) Serialize() []byte { + return (*(*[SizeofTcTbfQopt]byte)(unsafe.Pointer(x)))[:] +} + +const ( + TCA_HTB_UNSPEC = iota + TCA_HTB_PARMS + TCA_HTB_INIT + TCA_HTB_CTAB + TCA_HTB_RTAB + TCA_HTB_DIRECT_QLEN + TCA_HTB_RATE64 + TCA_HTB_CEIL64 + TCA_HTB_MAX = TCA_HTB_CEIL64 +) + +//struct tc_htb_opt { +// struct tc_ratespec rate; +// struct tc_ratespec ceil; +// __u32 buffer; +// __u32 cbuffer; +// __u32 quantum; +// __u32 level; /* out only */ +// __u32 prio; +//}; + +type TcHtbCopt struct { + Rate TcRateSpec + Ceil TcRateSpec + Buffer uint32 + Cbuffer uint32 + Quantum uint32 + Level uint32 + Prio uint32 +} + +func (msg *TcHtbCopt) Len() int { + return SizeofTcHtbCopt +} + +func DeserializeTcHtbCopt(b []byte) *TcHtbCopt { + return (*TcHtbCopt)(unsafe.Pointer(&b[0:SizeofTcHtbCopt][0])) +} + +func (x *TcHtbCopt) Serialize() []byte { + return (*(*[SizeofTcHtbCopt]byte)(unsafe.Pointer(x)))[:] +} + +type TcHtbGlob struct { + Version uint32 + Rate2Quantum uint32 + Defcls uint32 + Debug uint32 + DirectPkts uint32 +} + +func (msg *TcHtbGlob) Len() int { + return SizeofTcHtbGlob +} + +func DeserializeTcHtbGlob(b []byte) *TcHtbGlob { + return (*TcHtbGlob)(unsafe.Pointer(&b[0:SizeofTcHtbGlob][0])) +} + +func (x *TcHtbGlob) Serialize() []byte { + return (*(*[SizeofTcHtbGlob]byte)(unsafe.Pointer(x)))[:] +} + +const ( + TCA_U32_UNSPEC = iota + TCA_U32_CLASSID + TCA_U32_HASH + TCA_U32_LINK + TCA_U32_DIVISOR + TCA_U32_SEL + TCA_U32_POLICE + TCA_U32_ACT + TCA_U32_INDEV + TCA_U32_PCNT + TCA_U32_MARK + TCA_U32_MAX = TCA_U32_MARK +) + +// struct tc_u32_key { +// __be32 mask; +// __be32 val; +// int off; +// int offmask; +// }; + +type TcU32Key struct { + Mask uint32 // big endian + Val uint32 // big endian + Off int32 + OffMask int32 +} + +func (msg *TcU32Key) Len() int { + return SizeofTcU32Key +} + +func DeserializeTcU32Key(b []byte) *TcU32Key { + return (*TcU32Key)(unsafe.Pointer(&b[0:SizeofTcU32Key][0])) +} + +func (x *TcU32Key) Serialize() []byte { + return (*(*[SizeofTcU32Key]byte)(unsafe.Pointer(x)))[:] +} + +// struct tc_u32_sel { +// unsigned char flags; +// unsigned char offshift; +// unsigned char nkeys; +// +// __be16 offmask; +// __u16 off; +// short offoff; +// +// short hoff; +// __be32 hmask; +// struct tc_u32_key keys[0]; +// }; + +const ( + TC_U32_TERMINAL = 1 << iota + TC_U32_OFFSET = 1 << iota + TC_U32_VAROFFSET = 1 << iota + TC_U32_EAT = 1 << iota +) + +type TcU32Sel struct { + Flags uint8 + Offshift uint8 + Nkeys uint8 + Pad uint8 + Offmask uint16 // big endian + Off uint16 + Offoff int16 + Hoff int16 + Hmask uint32 // big endian + Keys []TcU32Key +} + +func (msg *TcU32Sel) Len() int { + return SizeofTcU32Sel + int(msg.Nkeys)*SizeofTcU32Key +} + +func DeserializeTcU32Sel(b []byte) *TcU32Sel { + x := &TcU32Sel{} + copy((*(*[SizeofTcU32Sel]byte)(unsafe.Pointer(x)))[:], b) + next := SizeofTcU32Sel + var i uint8 + for i = 0; i < x.Nkeys; i++ { + x.Keys = append(x.Keys, *DeserializeTcU32Key(b[next:])) + next += SizeofTcU32Key + } + return x +} + +func (x *TcU32Sel) Serialize() []byte { + // This can't just unsafe.cast because it must iterate through keys. + buf := make([]byte, x.Len()) + copy(buf, (*(*[SizeofTcU32Sel]byte)(unsafe.Pointer(x)))[:]) + next := SizeofTcU32Sel + for _, key := range x.Keys { + keyBuf := key.Serialize() + copy(buf[next:], keyBuf) + next += SizeofTcU32Key + } + return buf +} + +type TcGen struct { + Index uint32 + Capab uint32 + Action int32 + Refcnt int32 + Bindcnt int32 +} + +func (msg *TcGen) Len() int { + return SizeofTcGen +} + +func DeserializeTcGen(b []byte) *TcGen { + return (*TcGen)(unsafe.Pointer(&b[0:SizeofTcGen][0])) +} + +func (x *TcGen) Serialize() []byte { + return (*(*[SizeofTcGen]byte)(unsafe.Pointer(x)))[:] +} + +// #define tc_gen \ +// __u32 index; \ +// __u32 capab; \ +// int action; \ +// int refcnt; \ +// int bindcnt + +const ( + TCA_ACT_GACT = 5 +) + +const ( + TCA_GACT_UNSPEC = iota + TCA_GACT_TM + TCA_GACT_PARMS + TCA_GACT_PROB + TCA_GACT_MAX = TCA_GACT_PROB +) + +type TcGact TcGen + +const ( + TCA_ACT_BPF = 13 +) + +const ( + TCA_ACT_BPF_UNSPEC = iota + TCA_ACT_BPF_TM + TCA_ACT_BPF_PARMS + TCA_ACT_BPF_OPS_LEN + TCA_ACT_BPF_OPS + TCA_ACT_BPF_FD + TCA_ACT_BPF_NAME + TCA_ACT_BPF_MAX = TCA_ACT_BPF_NAME +) + +const ( + TCA_BPF_FLAG_ACT_DIRECT uint32 = 1 << iota +) + +const ( + TCA_BPF_UNSPEC = iota + TCA_BPF_ACT + TCA_BPF_POLICE + TCA_BPF_CLASSID + TCA_BPF_OPS_LEN + TCA_BPF_OPS + TCA_BPF_FD + TCA_BPF_NAME + TCA_BPF_FLAGS + TCA_BPF_MAX = TCA_BPF_FLAGS +) + +type TcBpf TcGen + +const ( + TCA_ACT_MIRRED = 8 +) + +const ( + TCA_MIRRED_UNSPEC = iota + TCA_MIRRED_TM + TCA_MIRRED_PARMS + TCA_MIRRED_MAX = TCA_MIRRED_PARMS +) + +// struct tc_mirred { +// tc_gen; +// int eaction; /* one of IN/EGRESS_MIRROR/REDIR */ +// __u32 ifindex; /* ifindex of egress port */ +// }; + +type TcMirred struct { + TcGen + Eaction int32 + Ifindex uint32 +} + +func (msg *TcMirred) Len() int { + return SizeofTcMirred +} + +func DeserializeTcMirred(b []byte) *TcMirred { + return (*TcMirred)(unsafe.Pointer(&b[0:SizeofTcMirred][0])) +} + +func (x *TcMirred) Serialize() []byte { + return (*(*[SizeofTcMirred]byte)(unsafe.Pointer(x)))[:] +} + +// struct tc_police { +// __u32 index; +// int action; +// __u32 limit; +// __u32 burst; +// __u32 mtu; +// struct tc_ratespec rate; +// struct tc_ratespec peakrate; +// int refcnt; +// int bindcnt; +// __u32 capab; +// }; + +type TcPolice struct { + Index uint32 + Action int32 + Limit uint32 + Burst uint32 + Mtu uint32 + Rate TcRateSpec + PeakRate TcRateSpec + Refcnt int32 + Bindcnt int32 + Capab uint32 +} + +func (msg *TcPolice) Len() int { + return SizeofTcPolice +} + +func DeserializeTcPolice(b []byte) *TcPolice { + return (*TcPolice)(unsafe.Pointer(&b[0:SizeofTcPolice][0])) +} + +func (x *TcPolice) Serialize() []byte { + return (*(*[SizeofTcPolice]byte)(unsafe.Pointer(x)))[:] +} + +const ( + TCA_FW_UNSPEC = iota + TCA_FW_CLASSID + TCA_FW_POLICE + TCA_FW_INDEV + TCA_FW_ACT + TCA_FW_MASK + TCA_FW_MAX = TCA_FW_MASK +) diff --git a/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go b/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go new file mode 100644 index 0000000000..09a2ffa10e --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/xfrm_linux.go @@ -0,0 +1,296 @@ +package nl + +import ( + "bytes" + "net" + "unsafe" +) + +// Infinity for packet and byte counts +const ( + XFRM_INF = ^uint64(0) +) + +type XfrmMsgType uint8 + +type XfrmMsg interface { + Type() XfrmMsgType +} + +// Message Types +const ( + XFRM_MSG_BASE XfrmMsgType = 0x10 + XFRM_MSG_NEWSA = 0x10 + XFRM_MSG_DELSA = 0x11 + XFRM_MSG_GETSA = 0x12 + XFRM_MSG_NEWPOLICY = 0x13 + XFRM_MSG_DELPOLICY = 0x14 + XFRM_MSG_GETPOLICY = 0x15 + XFRM_MSG_ALLOCSPI = 0x16 + XFRM_MSG_ACQUIRE = 0x17 + XFRM_MSG_EXPIRE = 0x18 + XFRM_MSG_UPDPOLICY = 0x19 + XFRM_MSG_UPDSA = 0x1a + XFRM_MSG_POLEXPIRE = 0x1b + XFRM_MSG_FLUSHSA = 0x1c + XFRM_MSG_FLUSHPOLICY = 0x1d + XFRM_MSG_NEWAE = 0x1e + XFRM_MSG_GETAE = 0x1f + XFRM_MSG_REPORT = 0x20 + XFRM_MSG_MIGRATE = 0x21 + XFRM_MSG_NEWSADINFO = 0x22 + XFRM_MSG_GETSADINFO = 0x23 + XFRM_MSG_NEWSPDINFO = 0x24 + XFRM_MSG_GETSPDINFO = 0x25 + XFRM_MSG_MAPPING = 0x26 + XFRM_MSG_MAX = 0x26 + XFRM_NR_MSGTYPES = 0x17 +) + +// Attribute types +const ( + /* Netlink message attributes. */ + XFRMA_UNSPEC = 0x00 + XFRMA_ALG_AUTH = 0x01 /* struct xfrm_algo */ + XFRMA_ALG_CRYPT = 0x02 /* struct xfrm_algo */ + XFRMA_ALG_COMP = 0x03 /* struct xfrm_algo */ + XFRMA_ENCAP = 0x04 /* struct xfrm_algo + struct xfrm_encap_tmpl */ + XFRMA_TMPL = 0x05 /* 1 or more struct xfrm_user_tmpl */ + XFRMA_SA = 0x06 /* struct xfrm_usersa_info */ + XFRMA_POLICY = 0x07 /* struct xfrm_userpolicy_info */ + XFRMA_SEC_CTX = 0x08 /* struct xfrm_sec_ctx */ + XFRMA_LTIME_VAL = 0x09 + XFRMA_REPLAY_VAL = 0x0a + XFRMA_REPLAY_THRESH = 0x0b + XFRMA_ETIMER_THRESH = 0x0c + XFRMA_SRCADDR = 0x0d /* xfrm_address_t */ + XFRMA_COADDR = 0x0e /* xfrm_address_t */ + XFRMA_LASTUSED = 0x0f /* unsigned long */ + XFRMA_POLICY_TYPE = 0x10 /* struct xfrm_userpolicy_type */ + XFRMA_MIGRATE = 0x11 + XFRMA_ALG_AEAD = 0x12 /* struct xfrm_algo_aead */ + XFRMA_KMADDRESS = 0x13 /* struct xfrm_user_kmaddress */ + XFRMA_ALG_AUTH_TRUNC = 0x14 /* struct xfrm_algo_auth */ + XFRMA_MARK = 0x15 /* struct xfrm_mark */ + XFRMA_TFCPAD = 0x16 /* __u32 */ + XFRMA_REPLAY_ESN_VAL = 0x17 /* struct xfrm_replay_esn */ + XFRMA_SA_EXTRA_FLAGS = 0x18 /* __u32 */ + XFRMA_MAX = 0x18 +) + +const ( + SizeofXfrmAddress = 0x10 + SizeofXfrmSelector = 0x38 + SizeofXfrmLifetimeCfg = 0x40 + SizeofXfrmLifetimeCur = 0x20 + SizeofXfrmId = 0x18 + SizeofXfrmMark = 0x08 +) + +// Netlink groups +const ( + XFRMNLGRP_NONE = 0x0 + XFRMNLGRP_ACQUIRE = 0x1 + XFRMNLGRP_EXPIRE = 0x2 + XFRMNLGRP_SA = 0x3 + XFRMNLGRP_POLICY = 0x4 + XFRMNLGRP_AEVENTS = 0x5 + XFRMNLGRP_REPORT = 0x6 + XFRMNLGRP_MIGRATE = 0x7 + XFRMNLGRP_MAPPING = 0x8 + __XFRMNLGRP_MAX = 0x9 +) + +// typedef union { +// __be32 a4; +// __be32 a6[4]; +// } xfrm_address_t; + +type XfrmAddress [SizeofXfrmAddress]byte + +func (x *XfrmAddress) ToIP() net.IP { + var empty = [12]byte{} + ip := make(net.IP, net.IPv6len) + if bytes.Equal(x[4:16], empty[:]) { + ip[10] = 0xff + ip[11] = 0xff + copy(ip[12:16], x[0:4]) + } else { + copy(ip[:], x[:]) + } + return ip +} + +func (x *XfrmAddress) ToIPNet(prefixlen uint8) *net.IPNet { + ip := x.ToIP() + if GetIPFamily(ip) == FAMILY_V4 { + return &net.IPNet{IP: ip, Mask: net.CIDRMask(int(prefixlen), 32)} + } + return &net.IPNet{IP: ip, Mask: net.CIDRMask(int(prefixlen), 128)} +} + +func (x *XfrmAddress) FromIP(ip net.IP) { + var empty = [16]byte{} + if len(ip) < net.IPv4len { + copy(x[4:16], empty[:]) + } else if GetIPFamily(ip) == FAMILY_V4 { + copy(x[0:4], ip.To4()[0:4]) + copy(x[4:16], empty[:12]) + } else { + copy(x[0:16], ip.To16()[0:16]) + } +} + +func DeserializeXfrmAddress(b []byte) *XfrmAddress { + return (*XfrmAddress)(unsafe.Pointer(&b[0:SizeofXfrmAddress][0])) +} + +func (x *XfrmAddress) Serialize() []byte { + return (*(*[SizeofXfrmAddress]byte)(unsafe.Pointer(x)))[:] +} + +// struct xfrm_selector { +// xfrm_address_t daddr; +// xfrm_address_t saddr; +// __be16 dport; +// __be16 dport_mask; +// __be16 sport; +// __be16 sport_mask; +// __u16 family; +// __u8 prefixlen_d; +// __u8 prefixlen_s; +// __u8 proto; +// int ifindex; +// __kernel_uid32_t user; +// }; + +type XfrmSelector struct { + Daddr XfrmAddress + Saddr XfrmAddress + Dport uint16 // big endian + DportMask uint16 // big endian + Sport uint16 // big endian + SportMask uint16 // big endian + Family uint16 + PrefixlenD uint8 + PrefixlenS uint8 + Proto uint8 + Pad [3]byte + Ifindex int32 + User uint32 +} + +func (msg *XfrmSelector) Len() int { + return SizeofXfrmSelector +} + +func DeserializeXfrmSelector(b []byte) *XfrmSelector { + return (*XfrmSelector)(unsafe.Pointer(&b[0:SizeofXfrmSelector][0])) +} + +func (msg *XfrmSelector) Serialize() []byte { + return (*(*[SizeofXfrmSelector]byte)(unsafe.Pointer(msg)))[:] +} + +// struct xfrm_lifetime_cfg { +// __u64 soft_byte_limit; +// __u64 hard_byte_limit; +// __u64 soft_packet_limit; +// __u64 hard_packet_limit; +// __u64 soft_add_expires_seconds; +// __u64 hard_add_expires_seconds; +// __u64 soft_use_expires_seconds; +// __u64 hard_use_expires_seconds; +// }; +// + +type XfrmLifetimeCfg struct { + SoftByteLimit uint64 + HardByteLimit uint64 + SoftPacketLimit uint64 + HardPacketLimit uint64 + SoftAddExpiresSeconds uint64 + HardAddExpiresSeconds uint64 + SoftUseExpiresSeconds uint64 + HardUseExpiresSeconds uint64 +} + +func (msg *XfrmLifetimeCfg) Len() int { + return SizeofXfrmLifetimeCfg +} + +func DeserializeXfrmLifetimeCfg(b []byte) *XfrmLifetimeCfg { + return (*XfrmLifetimeCfg)(unsafe.Pointer(&b[0:SizeofXfrmLifetimeCfg][0])) +} + +func (msg *XfrmLifetimeCfg) Serialize() []byte { + return (*(*[SizeofXfrmLifetimeCfg]byte)(unsafe.Pointer(msg)))[:] +} + +// struct xfrm_lifetime_cur { +// __u64 bytes; +// __u64 packets; +// __u64 add_time; +// __u64 use_time; +// }; + +type XfrmLifetimeCur struct { + Bytes uint64 + Packets uint64 + AddTime uint64 + UseTime uint64 +} + +func (msg *XfrmLifetimeCur) Len() int { + return SizeofXfrmLifetimeCur +} + +func DeserializeXfrmLifetimeCur(b []byte) *XfrmLifetimeCur { + return (*XfrmLifetimeCur)(unsafe.Pointer(&b[0:SizeofXfrmLifetimeCur][0])) +} + +func (msg *XfrmLifetimeCur) Serialize() []byte { + return (*(*[SizeofXfrmLifetimeCur]byte)(unsafe.Pointer(msg)))[:] +} + +// struct xfrm_id { +// xfrm_address_t daddr; +// __be32 spi; +// __u8 proto; +// }; + +type XfrmId struct { + Daddr XfrmAddress + Spi uint32 // big endian + Proto uint8 + Pad [3]byte +} + +func (msg *XfrmId) Len() int { + return SizeofXfrmId +} + +func DeserializeXfrmId(b []byte) *XfrmId { + return (*XfrmId)(unsafe.Pointer(&b[0:SizeofXfrmId][0])) +} + +func (msg *XfrmId) Serialize() []byte { + return (*(*[SizeofXfrmId]byte)(unsafe.Pointer(msg)))[:] +} + +type XfrmMark struct { + Value uint32 + Mask uint32 +} + +func (msg *XfrmMark) Len() int { + return SizeofXfrmMark +} + +func DeserializeXfrmMark(b []byte) *XfrmMark { + return (*XfrmMark)(unsafe.Pointer(&b[0:SizeofXfrmMark][0])) +} + +func (msg *XfrmMark) Serialize() []byte { + return (*(*[SizeofXfrmMark]byte)(unsafe.Pointer(msg)))[:] +} diff --git a/vendor/github.com/vishvananda/netlink/nl/xfrm_monitor_linux.go b/vendor/github.com/vishvananda/netlink/nl/xfrm_monitor_linux.go new file mode 100644 index 0000000000..715df4cc5f --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/xfrm_monitor_linux.go @@ -0,0 +1,32 @@ +package nl + +import ( + "unsafe" +) + +const ( + SizeofXfrmUserExpire = 0xe8 +) + +// struct xfrm_user_expire { +// struct xfrm_usersa_info state; +// __u8 hard; +// }; + +type XfrmUserExpire struct { + XfrmUsersaInfo XfrmUsersaInfo + Hard uint8 + Pad [7]byte +} + +func (msg *XfrmUserExpire) Len() int { + return SizeofXfrmUserExpire +} + +func DeserializeXfrmUserExpire(b []byte) *XfrmUserExpire { + return (*XfrmUserExpire)(unsafe.Pointer(&b[0:SizeofXfrmUserExpire][0])) +} + +func (msg *XfrmUserExpire) Serialize() []byte { + return (*(*[SizeofXfrmUserExpire]byte)(unsafe.Pointer(msg)))[:] +} diff --git a/vendor/github.com/vishvananda/netlink/nl/xfrm_policy_linux.go b/vendor/github.com/vishvananda/netlink/nl/xfrm_policy_linux.go new file mode 100644 index 0000000000..66f7e03d2d --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/xfrm_policy_linux.go @@ -0,0 +1,119 @@ +package nl + +import ( + "unsafe" +) + +const ( + SizeofXfrmUserpolicyId = 0x40 + SizeofXfrmUserpolicyInfo = 0xa8 + SizeofXfrmUserTmpl = 0x40 +) + +// struct xfrm_userpolicy_id { +// struct xfrm_selector sel; +// __u32 index; +// __u8 dir; +// }; +// + +type XfrmUserpolicyId struct { + Sel XfrmSelector + Index uint32 + Dir uint8 + Pad [3]byte +} + +func (msg *XfrmUserpolicyId) Len() int { + return SizeofXfrmUserpolicyId +} + +func DeserializeXfrmUserpolicyId(b []byte) *XfrmUserpolicyId { + return (*XfrmUserpolicyId)(unsafe.Pointer(&b[0:SizeofXfrmUserpolicyId][0])) +} + +func (msg *XfrmUserpolicyId) Serialize() []byte { + return (*(*[SizeofXfrmUserpolicyId]byte)(unsafe.Pointer(msg)))[:] +} + +// struct xfrm_userpolicy_info { +// struct xfrm_selector sel; +// struct xfrm_lifetime_cfg lft; +// struct xfrm_lifetime_cur curlft; +// __u32 priority; +// __u32 index; +// __u8 dir; +// __u8 action; +// #define XFRM_POLICY_ALLOW 0 +// #define XFRM_POLICY_BLOCK 1 +// __u8 flags; +// #define XFRM_POLICY_LOCALOK 1 /* Allow user to override global policy */ +// /* Automatically expand selector to include matching ICMP payloads. */ +// #define XFRM_POLICY_ICMP 2 +// __u8 share; +// }; + +type XfrmUserpolicyInfo struct { + Sel XfrmSelector + Lft XfrmLifetimeCfg + Curlft XfrmLifetimeCur + Priority uint32 + Index uint32 + Dir uint8 + Action uint8 + Flags uint8 + Share uint8 + Pad [4]byte +} + +func (msg *XfrmUserpolicyInfo) Len() int { + return SizeofXfrmUserpolicyInfo +} + +func DeserializeXfrmUserpolicyInfo(b []byte) *XfrmUserpolicyInfo { + return (*XfrmUserpolicyInfo)(unsafe.Pointer(&b[0:SizeofXfrmUserpolicyInfo][0])) +} + +func (msg *XfrmUserpolicyInfo) Serialize() []byte { + return (*(*[SizeofXfrmUserpolicyInfo]byte)(unsafe.Pointer(msg)))[:] +} + +// struct xfrm_user_tmpl { +// struct xfrm_id id; +// __u16 family; +// xfrm_address_t saddr; +// __u32 reqid; +// __u8 mode; +// __u8 share; +// __u8 optional; +// __u32 aalgos; +// __u32 ealgos; +// __u32 calgos; +// } + +type XfrmUserTmpl struct { + XfrmId XfrmId + Family uint16 + Pad1 [2]byte + Saddr XfrmAddress + Reqid uint32 + Mode uint8 + Share uint8 + Optional uint8 + Pad2 byte + Aalgos uint32 + Ealgos uint32 + Calgos uint32 +} + +func (msg *XfrmUserTmpl) Len() int { + return SizeofXfrmUserTmpl +} + +func DeserializeXfrmUserTmpl(b []byte) *XfrmUserTmpl { + return (*XfrmUserTmpl)(unsafe.Pointer(&b[0:SizeofXfrmUserTmpl][0])) +} + +func (msg *XfrmUserTmpl) Serialize() []byte { + return (*(*[SizeofXfrmUserTmpl]byte)(unsafe.Pointer(msg)))[:] +} diff --git a/vendor/github.com/vishvananda/netlink/nl/xfrm_state_linux.go b/vendor/github.com/vishvananda/netlink/nl/xfrm_state_linux.go new file mode 100644 index 0000000000..b6290fd544 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/nl/xfrm_state_linux.go @@ -0,0 +1,334 @@ +package nl + +import ( + "unsafe" +) + +const ( + SizeofXfrmUsersaId = 0x18 + SizeofXfrmStats = 0x0c + SizeofXfrmUsersaInfo = 0xe0 + SizeofXfrmUserSpiInfo = 0xe8 + SizeofXfrmAlgo = 0x44 + SizeofXfrmAlgoAuth = 0x48 + SizeofXfrmAlgoAEAD = 0x48 + SizeofXfrmEncapTmpl = 0x18 + SizeofXfrmUsersaFlush = 0x8 + SizeofXfrmReplayStateEsn = 0x18 +) + +const ( + XFRM_STATE_NOECN = 1 + XFRM_STATE_DECAP_DSCP = 2 + XFRM_STATE_NOPMTUDISC = 4 + XFRM_STATE_WILDRECV = 8 + XFRM_STATE_ICMP = 16 + XFRM_STATE_AF_UNSPEC = 32 + XFRM_STATE_ALIGN4 = 64 + XFRM_STATE_ESN = 128 +) + +// struct xfrm_usersa_id { +// xfrm_address_t daddr; +// __be32 spi; +// __u16 family; +// __u8 proto; +// }; + +type XfrmUsersaId struct { + Daddr XfrmAddress + Spi uint32 // big endian + Family uint16 + Proto uint8 + Pad byte +} + +func (msg *XfrmUsersaId) Len() int { + return SizeofXfrmUsersaId +} + +func DeserializeXfrmUsersaId(b []byte) *XfrmUsersaId { + return (*XfrmUsersaId)(unsafe.Pointer(&b[0:SizeofXfrmUsersaId][0])) +} + +func (msg *XfrmUsersaId) Serialize() []byte { + return (*(*[SizeofXfrmUsersaId]byte)(unsafe.Pointer(msg)))[:] +} + +// struct xfrm_stats { +// __u32 replay_window; +// __u32 replay; +// __u32 integrity_failed; +// }; + +type XfrmStats struct { + ReplayWindow uint32 + Replay uint32 + IntegrityFailed uint32 +} + +func (msg *XfrmStats) Len() int { + return SizeofXfrmStats +} + +func DeserializeXfrmStats(b []byte) *XfrmStats { + return (*XfrmStats)(unsafe.Pointer(&b[0:SizeofXfrmStats][0])) +} + +func (msg *XfrmStats) Serialize() []byte { + return (*(*[SizeofXfrmStats]byte)(unsafe.Pointer(msg)))[:] +} + +// struct xfrm_usersa_info { +// struct xfrm_selector sel; +// struct xfrm_id id; +// xfrm_address_t saddr; +// struct xfrm_lifetime_cfg lft; +// struct xfrm_lifetime_cur curlft; +// struct xfrm_stats stats; +// __u32 seq; +// __u32 reqid; +// __u16 family; +// __u8 mode; /* XFRM_MODE_xxx */ +// __u8 replay_window; +// __u8 flags; +// #define XFRM_STATE_NOECN 1 +// #define XFRM_STATE_DECAP_DSCP 2 +// #define XFRM_STATE_NOPMTUDISC 4 +// #define XFRM_STATE_WILDRECV 8 +// #define XFRM_STATE_ICMP 16 +// #define XFRM_STATE_AF_UNSPEC 32 +// #define XFRM_STATE_ALIGN4 64 +// #define XFRM_STATE_ESN 128 +// }; +// +// #define XFRM_SA_XFLAG_DONT_ENCAP_DSCP 1 +// + +type XfrmUsersaInfo struct { + Sel XfrmSelector + Id XfrmId + Saddr XfrmAddress + Lft XfrmLifetimeCfg + Curlft XfrmLifetimeCur + Stats XfrmStats + Seq uint32 + Reqid uint32 + Family uint16 + Mode uint8 + ReplayWindow uint8 + Flags uint8 + Pad [7]byte +} + +func (msg *XfrmUsersaInfo) Len() int { + return SizeofXfrmUsersaInfo +} + +func DeserializeXfrmUsersaInfo(b []byte) *XfrmUsersaInfo { + return (*XfrmUsersaInfo)(unsafe.Pointer(&b[0:SizeofXfrmUsersaInfo][0])) +} + +func (msg *XfrmUsersaInfo) Serialize() []byte { + return (*(*[SizeofXfrmUsersaInfo]byte)(unsafe.Pointer(msg)))[:] +} + +// struct xfrm_userspi_info { +// struct xfrm_usersa_info info; +// __u32 min; +// __u32 max; +// }; + +type XfrmUserSpiInfo struct { + XfrmUsersaInfo XfrmUsersaInfo + Min uint32 + Max uint32 +} + +func (msg *XfrmUserSpiInfo) Len() int { + return SizeofXfrmUserSpiInfo +} + +func DeserializeXfrmUserSpiInfo(b []byte) *XfrmUserSpiInfo { + return (*XfrmUserSpiInfo)(unsafe.Pointer(&b[0:SizeofXfrmUserSpiInfo][0])) +} + +func (msg *XfrmUserSpiInfo) Serialize() []byte { + return (*(*[SizeofXfrmUserSpiInfo]byte)(unsafe.Pointer(msg)))[:] +} + +// struct xfrm_algo { +// char alg_name[64]; +// unsigned int alg_key_len; /* in bits */ +// char alg_key[0]; +// }; + +type XfrmAlgo struct { + AlgName [64]byte + AlgKeyLen uint32 + AlgKey []byte +} + +func (msg *XfrmAlgo) Len() int { + return SizeofXfrmAlgo + int(msg.AlgKeyLen/8) +} + +func DeserializeXfrmAlgo(b []byte) *XfrmAlgo { + ret := XfrmAlgo{} + copy(ret.AlgName[:], b[0:64]) + ret.AlgKeyLen = *(*uint32)(unsafe.Pointer(&b[64])) + ret.AlgKey = b[68:ret.Len()] + return &ret +} + +func (msg *XfrmAlgo) Serialize() []byte { + b := make([]byte, msg.Len()) + copy(b[0:64], msg.AlgName[:]) + copy(b[64:68], (*(*[4]byte)(unsafe.Pointer(&msg.AlgKeyLen)))[:]) + copy(b[68:msg.Len()], msg.AlgKey[:]) + return b +} + +// struct xfrm_algo_auth { +// char alg_name[64]; +// unsigned int alg_key_len; /* in bits */ +// unsigned int alg_trunc_len; /* in bits */ +// char alg_key[0]; +// }; + +type XfrmAlgoAuth struct { + AlgName [64]byte + AlgKeyLen uint32 + AlgTruncLen uint32 + AlgKey []byte +} + +func (msg *XfrmAlgoAuth) Len() int { + return SizeofXfrmAlgoAuth + int(msg.AlgKeyLen/8) +} + +func DeserializeXfrmAlgoAuth(b []byte) *XfrmAlgoAuth { + ret := XfrmAlgoAuth{} + copy(ret.AlgName[:], b[0:64]) + ret.AlgKeyLen = *(*uint32)(unsafe.Pointer(&b[64])) + ret.AlgTruncLen = *(*uint32)(unsafe.Pointer(&b[68])) + ret.AlgKey = b[72:ret.Len()] + return &ret +} + +func (msg *XfrmAlgoAuth) Serialize() []byte { + b := make([]byte, msg.Len()) + copy(b[0:64], msg.AlgName[:]) + copy(b[64:68], (*(*[4]byte)(unsafe.Pointer(&msg.AlgKeyLen)))[:]) + copy(b[68:72], (*(*[4]byte)(unsafe.Pointer(&msg.AlgTruncLen)))[:]) + copy(b[72:msg.Len()], msg.AlgKey[:]) + return b +} + +// struct xfrm_algo_aead { +// char alg_name[64]; +// unsigned int alg_key_len; /* in bits */ +// unsigned int alg_icv_len; /* in bits */ +// char alg_key[0]; +// } + +type XfrmAlgoAEAD struct { + AlgName [64]byte + AlgKeyLen uint32 + AlgICVLen uint32 + AlgKey []byte +} + +func (msg *XfrmAlgoAEAD) Len() int { + return SizeofXfrmAlgoAEAD + int(msg.AlgKeyLen/8) +} + +func DeserializeXfrmAlgoAEAD(b []byte) *XfrmAlgoAEAD { + ret := XfrmAlgoAEAD{} + copy(ret.AlgName[:], b[0:64]) + ret.AlgKeyLen = *(*uint32)(unsafe.Pointer(&b[64])) + ret.AlgICVLen = *(*uint32)(unsafe.Pointer(&b[68])) + ret.AlgKey = b[72:ret.Len()] + return &ret +} + +func (msg *XfrmAlgoAEAD) Serialize() []byte { + b := make([]byte, msg.Len()) + copy(b[0:64], msg.AlgName[:]) + copy(b[64:68], (*(*[4]byte)(unsafe.Pointer(&msg.AlgKeyLen)))[:]) + copy(b[68:72], (*(*[4]byte)(unsafe.Pointer(&msg.AlgICVLen)))[:]) + copy(b[72:msg.Len()], msg.AlgKey[:]) + return b +} + +// struct xfrm_encap_tmpl { +// __u16 encap_type; +// __be16 encap_sport; +// __be16 encap_dport; +// xfrm_address_t encap_oa; +// }; + +type XfrmEncapTmpl struct { + EncapType uint16 + EncapSport uint16 // big endian + EncapDport uint16 // big endian + Pad [2]byte + EncapOa XfrmAddress +} + +func (msg *XfrmEncapTmpl) Len() int { + return SizeofXfrmEncapTmpl +} + +func DeserializeXfrmEncapTmpl(b []byte) *XfrmEncapTmpl { + return (*XfrmEncapTmpl)(unsafe.Pointer(&b[0:SizeofXfrmEncapTmpl][0])) +} + +func (msg *XfrmEncapTmpl) Serialize() []byte { + return (*(*[SizeofXfrmEncapTmpl]byte)(unsafe.Pointer(msg)))[:] +} + +// struct xfrm_usersa_flush { +// __u8 proto; +// }; + +type XfrmUsersaFlush struct { + Proto uint8 +} + +func (msg *XfrmUsersaFlush) Len() int { + return SizeofXfrmUsersaFlush +} + +func DeserializeXfrmUsersaFlush(b []byte) *XfrmUsersaFlush { + return (*XfrmUsersaFlush)(unsafe.Pointer(&b[0:SizeofXfrmUsersaFlush][0])) +} + +func (msg *XfrmUsersaFlush) Serialize() []byte { + return (*(*[SizeofXfrmUsersaFlush]byte)(unsafe.Pointer(msg)))[:] +} + +// struct xfrm_replay_state_esn { +// unsigned int bmp_len; +// __u32 oseq; +// __u32 seq; +// __u32 oseq_hi; +// __u32 seq_hi; +// __u32 replay_window; +// __u32 bmp[0]; +// }; + +type XfrmReplayStateEsn struct { + BmpLen uint32 + OSeq uint32 + Seq uint32 + OSeqHi uint32 + SeqHi uint32 + ReplayWindow uint32 + Bmp []uint32 +} + +func (msg *XfrmReplayStateEsn) Serialize() []byte { + // We deliberately do not pass Bmp, as it gets set by the kernel. + return (*(*[SizeofXfrmReplayStateEsn]byte)(unsafe.Pointer(msg)))[:] +} diff --git a/vendor/github.com/vishvananda/netlink/order.go b/vendor/github.com/vishvananda/netlink/order.go new file mode 100644 index 0000000000..e28e153a1b --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/order.go @@ -0,0 +1,32 @@ +package netlink + +import ( + "encoding/binary" + + "github.com/vishvananda/netlink/nl" +) + +var ( + native = nl.NativeEndian() + networkOrder = binary.BigEndian +) + +func htonl(val uint32) []byte { + bytes := make([]byte, 4) + binary.BigEndian.PutUint32(bytes, val) + return bytes +} + +func htons(val uint16) []byte { + bytes := make([]byte, 2) + binary.BigEndian.PutUint16(bytes, val) + return bytes +} + +func ntohl(buf []byte) uint32 { + return binary.BigEndian.Uint32(buf) +} + +func ntohs(buf []byte) uint16 { + return binary.BigEndian.Uint16(buf) +} diff --git a/vendor/github.com/vishvananda/netlink/protinfo.go b/vendor/github.com/vishvananda/netlink/protinfo.go new file mode 100644 index 0000000000..0087c4438b --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/protinfo.go @@ -0,0 +1,58 @@ +package netlink + +import ( + "strings" +) + +// Protinfo represents bridge flags from netlink. +type Protinfo struct { + Hairpin bool + Guard bool + FastLeave bool + RootBlock bool + Learning bool + Flood bool + ProxyArp bool + ProxyArpWiFi bool +} + +// String returns a list of enabled flags +func (prot *Protinfo) String() string { + var boolStrings []string + if prot.Hairpin { + boolStrings = append(boolStrings, "Hairpin") + } + if prot.Guard { + boolStrings = append(boolStrings, "Guard") + } + if prot.FastLeave { + boolStrings = append(boolStrings, "FastLeave") + } + if prot.RootBlock { + boolStrings = append(boolStrings, "RootBlock") + } + if prot.Learning { + boolStrings = append(boolStrings, "Learning") + } + if prot.Flood { + boolStrings = append(boolStrings, "Flood") + } + if prot.ProxyArp { + boolStrings = append(boolStrings, "ProxyArp") + } + if prot.ProxyArpWiFi { + boolStrings = append(boolStrings, "ProxyArpWiFi") + } + return strings.Join(boolStrings, " ") +} + +func boolToByte(x bool) []byte { + if x { + return []byte{1} + } + return []byte{0} +} + +func byteToBool(x byte) bool { + return uint8(x) != 0 +} diff --git a/vendor/github.com/vishvananda/netlink/protinfo_linux.go b/vendor/github.com/vishvananda/netlink/protinfo_linux.go new file mode 100644 index 0000000000..10dd0d5335 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/protinfo_linux.go @@ -0,0 +1,74 @@ +package netlink + +import ( + "fmt" + "syscall" + + "github.com/vishvananda/netlink/nl" +) + +func LinkGetProtinfo(link Link) (Protinfo, error) { + return pkgHandle.LinkGetProtinfo(link) +} + +func (h *Handle) LinkGetProtinfo(link Link) (Protinfo, error) { + base := link.Attrs() + h.ensureIndex(base) + var pi Protinfo + req := h.newNetlinkRequest(syscall.RTM_GETLINK, syscall.NLM_F_DUMP) + msg := nl.NewIfInfomsg(syscall.AF_BRIDGE) + req.AddData(msg) + msgs, err := req.Execute(syscall.NETLINK_ROUTE, 0) + if err != nil { + return pi, err + } + + for _, m := range msgs { + ans := nl.DeserializeIfInfomsg(m) + if int(ans.Index) != base.Index { + continue + } + attrs, err := nl.ParseRouteAttr(m[ans.Len():]) + if err != nil { + return pi, err + } + for _, attr := range attrs { + if attr.Attr.Type != syscall.IFLA_PROTINFO|syscall.NLA_F_NESTED { + continue + } + infos, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return pi, err + } + pi = *parseProtinfo(infos) + + return pi, nil + } + } + return pi, fmt.Errorf("Device with index %d not found", base.Index) +} + +func parseProtinfo(infos []syscall.NetlinkRouteAttr) *Protinfo { + var pi Protinfo + for _, info := range infos { + switch info.Attr.Type { + case nl.IFLA_BRPORT_MODE: + pi.Hairpin = byteToBool(info.Value[0]) + case nl.IFLA_BRPORT_GUARD: + pi.Guard = byteToBool(info.Value[0]) + case nl.IFLA_BRPORT_FAST_LEAVE: + pi.FastLeave = byteToBool(info.Value[0]) + case nl.IFLA_BRPORT_PROTECT: + pi.RootBlock = byteToBool(info.Value[0]) + case nl.IFLA_BRPORT_LEARNING: + pi.Learning = byteToBool(info.Value[0]) + case nl.IFLA_BRPORT_UNICAST_FLOOD: + pi.Flood = byteToBool(info.Value[0]) + case nl.IFLA_BRPORT_PROXYARP: + pi.ProxyArp = byteToBool(info.Value[0]) + case nl.IFLA_BRPORT_PROXYARP_WIFI: + pi.ProxyArpWiFi = byteToBool(info.Value[0]) + } + } + return &pi +} diff --git a/vendor/github.com/vishvananda/netlink/qdisc.go b/vendor/github.com/vishvananda/netlink/qdisc.go new file mode 100644 index 0000000000..0ca86ebe89 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/qdisc.go @@ -0,0 +1,232 @@ +package netlink + +import ( + "fmt" + "math" +) + +const ( + HANDLE_NONE = 0 + HANDLE_INGRESS = 0xFFFFFFF1 + HANDLE_CLSACT = HANDLE_INGRESS + HANDLE_ROOT = 0xFFFFFFFF + PRIORITY_MAP_LEN = 16 +) +const ( + HANDLE_MIN_INGRESS = 0xFFFFFFF2 + HANDLE_MIN_EGRESS = 0xFFFFFFF3 +) + +type Qdisc interface { + Attrs() *QdiscAttrs + Type() string +} + +// QdiscAttrs represents a netlink qdisc. A qdisc is associated with a link, +// has a handle, a parent and a refcnt. The root qdisc of a device should +// have parent == HANDLE_ROOT. +type QdiscAttrs struct { + LinkIndex int + Handle uint32 + Parent uint32 + Refcnt uint32 // read only +} + +func (q QdiscAttrs) String() string { + return fmt.Sprintf("{LinkIndex: %d, Handle: %s, Parent: %s, Refcnt: %d}", q.LinkIndex, HandleStr(q.Handle), HandleStr(q.Parent), q.Refcnt) +} + +func MakeHandle(major, minor uint16) uint32 { + return (uint32(major) << 16) | uint32(minor) +} + +func MajorMinor(handle uint32) (uint16, uint16) { + return uint16((handle & 0xFFFF0000) >> 16), uint16(handle & 0x0000FFFFF) +} + +func HandleStr(handle uint32) string { + switch handle { + case HANDLE_NONE: + return "none" + case HANDLE_INGRESS: + return "ingress" + case HANDLE_ROOT: + return "root" + default: + major, minor := MajorMinor(handle) + return fmt.Sprintf("%x:%x", major, minor) + } +} + +func Percentage2u32(percentage float32) uint32 { + // FIXME this is most likely not the best way to convert from % to uint32 + if percentage == 100 { + return math.MaxUint32 + } + return uint32(math.MaxUint32 * (percentage / 100)) +} + +// PfifoFast is the default qdisc created by the kernel if one has not +// been defined for the interface +type PfifoFast struct { + QdiscAttrs + Bands uint8 + PriorityMap [PRIORITY_MAP_LEN]uint8 +} + +func (qdisc *PfifoFast) Attrs() *QdiscAttrs { + return &qdisc.QdiscAttrs +} + +func (qdisc *PfifoFast) Type() string { + return "pfifo_fast" +} + +// Prio is a basic qdisc that works just like PfifoFast +type Prio struct { + QdiscAttrs + Bands uint8 + PriorityMap [PRIORITY_MAP_LEN]uint8 +} + +func NewPrio(attrs QdiscAttrs) *Prio { + return &Prio{ + QdiscAttrs: attrs, + Bands: 3, + PriorityMap: [PRIORITY_MAP_LEN]uint8{1, 2, 2, 2, 1, 2, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1}, + } +} + +func (qdisc *Prio) Attrs() *QdiscAttrs { + return &qdisc.QdiscAttrs +} + +func (qdisc *Prio) Type() string { + return "prio" +} + +// Htb is a classful qdisc that rate limits based on tokens +type Htb struct { + QdiscAttrs + Version uint32 + Rate2Quantum uint32 + Defcls uint32 + Debug uint32 + DirectPkts uint32 +} + +func NewHtb(attrs QdiscAttrs) *Htb { + return &Htb{ + QdiscAttrs: attrs, + Version: 3, + Defcls: 0, + Rate2Quantum: 10, + Debug: 0, + DirectPkts: 0, + } +} + +func (qdisc *Htb) Attrs() *QdiscAttrs { + return &qdisc.QdiscAttrs +} + +func (qdisc *Htb) Type() string { + return "htb" +} + +// Netem is a classless qdisc that rate limits based on tokens + +type NetemQdiscAttrs struct { + Latency uint32 // in us + DelayCorr float32 // in % + Limit uint32 + Loss float32 // in % + LossCorr float32 // in % + Gap uint32 + Duplicate float32 // in % + DuplicateCorr float32 // in % + Jitter uint32 // in us + ReorderProb float32 // in % + ReorderCorr float32 // in % + CorruptProb float32 // in % + CorruptCorr float32 // in % +} + +func (q NetemQdiscAttrs) String() string { + return fmt.Sprintf( + "{Latency: %d, Limit: %d, Loss: %f, Gap: %d, Duplicate: %f, Jitter: %d}", + q.Latency, q.Limit, q.Loss, q.Gap, q.Duplicate, q.Jitter, + ) +} + +type Netem struct { + QdiscAttrs + Latency uint32 + DelayCorr uint32 + Limit uint32 + Loss uint32 + LossCorr uint32 + Gap uint32 + Duplicate uint32 + DuplicateCorr uint32 + Jitter uint32 + ReorderProb uint32 + ReorderCorr uint32 + CorruptProb uint32 + CorruptCorr uint32 +} + +func (qdisc *Netem) Attrs() *QdiscAttrs { + return &qdisc.QdiscAttrs +} + +func (qdisc *Netem) Type() string { + return "netem" +} + +// Tbf is a classless qdisc that rate limits based on tokens +type Tbf struct { + QdiscAttrs + Rate uint64 + Limit uint32 + Buffer uint32 + Peakrate uint64 + Minburst uint32 + // TODO: handle other settings +} + +func (qdisc *Tbf) Attrs() *QdiscAttrs { + return &qdisc.QdiscAttrs +} + +func (qdisc *Tbf) Type() string { + return "tbf" +} + +// Ingress is a qdisc for adding ingress filters +type Ingress struct { + QdiscAttrs +} + +func (qdisc *Ingress) Attrs() *QdiscAttrs { + return &qdisc.QdiscAttrs +} + +func (qdisc *Ingress) Type() string { + return "ingress" +} + +// GenericQdisc qdiscs represent types that are not currently understood +// by this netlink library. +type GenericQdisc struct { + QdiscAttrs + QdiscType string +} + +func (qdisc *GenericQdisc) Attrs() *QdiscAttrs { + return &qdisc.QdiscAttrs +} + +func (qdisc *GenericQdisc) Type() string { + return qdisc.QdiscType +} diff --git a/vendor/github.com/vishvananda/netlink/qdisc_linux.go b/vendor/github.com/vishvananda/netlink/qdisc_linux.go new file mode 100644 index 0000000000..1123396e47 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/qdisc_linux.go @@ -0,0 +1,529 @@ +package netlink + +import ( + "fmt" + "io/ioutil" + "strconv" + "strings" + "syscall" + + "github.com/vishvananda/netlink/nl" +) + +// NOTE function is here because it uses other linux functions +func NewNetem(attrs QdiscAttrs, nattrs NetemQdiscAttrs) *Netem { + var limit uint32 = 1000 + var lossCorr, delayCorr, duplicateCorr uint32 + var reorderProb, reorderCorr uint32 + var corruptProb, corruptCorr uint32 + + latency := nattrs.Latency + loss := Percentage2u32(nattrs.Loss) + gap := nattrs.Gap + duplicate := Percentage2u32(nattrs.Duplicate) + jitter := nattrs.Jitter + + // Correlation + if latency > 0 && jitter > 0 { + delayCorr = Percentage2u32(nattrs.DelayCorr) + } + if loss > 0 { + lossCorr = Percentage2u32(nattrs.LossCorr) + } + if duplicate > 0 { + duplicateCorr = Percentage2u32(nattrs.DuplicateCorr) + } + // FIXME should validate values(like loss/duplicate are percentages...) + latency = time2Tick(latency) + + if nattrs.Limit != 0 { + limit = nattrs.Limit + } + // Jitter is only value if latency is > 0 + if latency > 0 { + jitter = time2Tick(jitter) + } + + reorderProb = Percentage2u32(nattrs.ReorderProb) + reorderCorr = Percentage2u32(nattrs.ReorderCorr) + + if reorderProb > 0 { + // ERROR if lantency == 0 + if gap == 0 { + gap = 1 + } + } + + corruptProb = Percentage2u32(nattrs.CorruptProb) + corruptCorr = Percentage2u32(nattrs.CorruptCorr) + + return &Netem{ + QdiscAttrs: attrs, + Latency: latency, + DelayCorr: delayCorr, + Limit: limit, + Loss: loss, + LossCorr: lossCorr, + Gap: gap, + Duplicate: duplicate, + DuplicateCorr: duplicateCorr, + Jitter: jitter, + ReorderProb: reorderProb, + ReorderCorr: reorderCorr, + CorruptProb: corruptProb, + CorruptCorr: corruptCorr, + } +} + +// QdiscDel will delete a qdisc from the system. +// Equivalent to: `tc qdisc del $qdisc` +func QdiscDel(qdisc Qdisc) error { + return pkgHandle.QdiscDel(qdisc) +} + +// QdiscDel will delete a qdisc from the system. +// Equivalent to: `tc qdisc del $qdisc` +func (h *Handle) QdiscDel(qdisc Qdisc) error { + return h.qdiscModify(syscall.RTM_DELQDISC, 0, qdisc) +} + +// QdiscChange will change a qdisc in place +// Equivalent to: `tc qdisc change $qdisc` +// The parent and handle MUST NOT be changed. +func QdiscChange(qdisc Qdisc) error { + return pkgHandle.QdiscChange(qdisc) +} + +// QdiscChange will change a qdisc in place +// Equivalent to: `tc qdisc change $qdisc` +// The parent and handle MUST NOT be changed. +func (h *Handle) QdiscChange(qdisc Qdisc) error { + return h.qdiscModify(syscall.RTM_NEWQDISC, 0, qdisc) +} + +// QdiscReplace will replace a qdisc to the system. +// Equivalent to: `tc qdisc replace $qdisc` +// The handle MUST change. +func QdiscReplace(qdisc Qdisc) error { + return pkgHandle.QdiscReplace(qdisc) +} + +// QdiscReplace will replace a qdisc to the system. +// Equivalent to: `tc qdisc replace $qdisc` +// The handle MUST change. +func (h *Handle) QdiscReplace(qdisc Qdisc) error { + return h.qdiscModify( + syscall.RTM_NEWQDISC, + syscall.NLM_F_CREATE|syscall.NLM_F_REPLACE, + qdisc) +} + +// QdiscAdd will add a qdisc to the system. +// Equivalent to: `tc qdisc add $qdisc` +func QdiscAdd(qdisc Qdisc) error { + return pkgHandle.QdiscAdd(qdisc) +} + +// QdiscAdd will add a qdisc to the system. +// Equivalent to: `tc qdisc add $qdisc` +func (h *Handle) QdiscAdd(qdisc Qdisc) error { + return h.qdiscModify( + syscall.RTM_NEWQDISC, + syscall.NLM_F_CREATE|syscall.NLM_F_EXCL, + qdisc) +} + +func (h *Handle) qdiscModify(cmd, flags int, qdisc Qdisc) error { + req := h.newNetlinkRequest(cmd, flags|syscall.NLM_F_ACK) + base := qdisc.Attrs() + msg := &nl.TcMsg{ + Family: nl.FAMILY_ALL, + Ifindex: int32(base.LinkIndex), + Handle: base.Handle, + Parent: base.Parent, + } + req.AddData(msg) + + // When deleting don't bother building the rest of the netlink payload + if cmd != syscall.RTM_DELQDISC { + if err := qdiscPayload(req, qdisc); err != nil { + return err + } + } + + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + return err +} + +func qdiscPayload(req *nl.NetlinkRequest, qdisc Qdisc) error { + + req.AddData(nl.NewRtAttr(nl.TCA_KIND, nl.ZeroTerminated(qdisc.Type()))) + + options := nl.NewRtAttr(nl.TCA_OPTIONS, nil) + + switch qdisc := qdisc.(type) { + case *Prio: + tcmap := nl.TcPrioMap{ + Bands: int32(qdisc.Bands), + Priomap: qdisc.PriorityMap, + } + options = nl.NewRtAttr(nl.TCA_OPTIONS, tcmap.Serialize()) + case *Tbf: + opt := nl.TcTbfQopt{} + opt.Rate.Rate = uint32(qdisc.Rate) + opt.Peakrate.Rate = uint32(qdisc.Peakrate) + opt.Limit = qdisc.Limit + opt.Buffer = qdisc.Buffer + nl.NewRtAttrChild(options, nl.TCA_TBF_PARMS, opt.Serialize()) + if qdisc.Rate >= uint64(1<<32) { + nl.NewRtAttrChild(options, nl.TCA_TBF_RATE64, nl.Uint64Attr(qdisc.Rate)) + } + if qdisc.Peakrate >= uint64(1<<32) { + nl.NewRtAttrChild(options, nl.TCA_TBF_PRATE64, nl.Uint64Attr(qdisc.Peakrate)) + } + if qdisc.Peakrate > 0 { + nl.NewRtAttrChild(options, nl.TCA_TBF_PBURST, nl.Uint32Attr(qdisc.Minburst)) + } + case *Htb: + opt := nl.TcHtbGlob{} + opt.Version = qdisc.Version + opt.Rate2Quantum = qdisc.Rate2Quantum + opt.Defcls = qdisc.Defcls + // TODO: Handle Debug properly. For now default to 0 + opt.Debug = qdisc.Debug + opt.DirectPkts = qdisc.DirectPkts + nl.NewRtAttrChild(options, nl.TCA_HTB_INIT, opt.Serialize()) + // nl.NewRtAttrChild(options, nl.TCA_HTB_DIRECT_QLEN, opt.Serialize()) + case *Netem: + opt := nl.TcNetemQopt{} + opt.Latency = qdisc.Latency + opt.Limit = qdisc.Limit + opt.Loss = qdisc.Loss + opt.Gap = qdisc.Gap + opt.Duplicate = qdisc.Duplicate + opt.Jitter = qdisc.Jitter + options = nl.NewRtAttr(nl.TCA_OPTIONS, opt.Serialize()) + // Correlation + corr := nl.TcNetemCorr{} + corr.DelayCorr = qdisc.DelayCorr + corr.LossCorr = qdisc.LossCorr + corr.DupCorr = qdisc.DuplicateCorr + + if corr.DelayCorr > 0 || corr.LossCorr > 0 || corr.DupCorr > 0 { + nl.NewRtAttrChild(options, nl.TCA_NETEM_CORR, corr.Serialize()) + } + // Corruption + corruption := nl.TcNetemCorrupt{} + corruption.Probability = qdisc.CorruptProb + corruption.Correlation = qdisc.CorruptCorr + if corruption.Probability > 0 { + nl.NewRtAttrChild(options, nl.TCA_NETEM_CORRUPT, corruption.Serialize()) + } + // Reorder + reorder := nl.TcNetemReorder{} + reorder.Probability = qdisc.ReorderProb + reorder.Correlation = qdisc.ReorderCorr + if reorder.Probability > 0 { + nl.NewRtAttrChild(options, nl.TCA_NETEM_REORDER, reorder.Serialize()) + } + case *Ingress: + // ingress filters must use the proper handle + if qdisc.Attrs().Parent != HANDLE_INGRESS { + return fmt.Errorf("Ingress filters must set Parent to HANDLE_INGRESS") + } + } + + req.AddData(options) + return nil +} + +// QdiscList gets a list of qdiscs in the system. +// Equivalent to: `tc qdisc show`. +// The list can be filtered by link. +func QdiscList(link Link) ([]Qdisc, error) { + return pkgHandle.QdiscList(link) +} + +// QdiscList gets a list of qdiscs in the system. +// Equivalent to: `tc qdisc show`. +// The list can be filtered by link. +func (h *Handle) QdiscList(link Link) ([]Qdisc, error) { + req := h.newNetlinkRequest(syscall.RTM_GETQDISC, syscall.NLM_F_DUMP) + index := int32(0) + if link != nil { + base := link.Attrs() + h.ensureIndex(base) + index = int32(base.Index) + } + msg := &nl.TcMsg{ + Family: nl.FAMILY_ALL, + Ifindex: index, + } + req.AddData(msg) + + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWQDISC) + if err != nil { + return nil, err + } + + var res []Qdisc + for _, m := range msgs { + msg := nl.DeserializeTcMsg(m) + + attrs, err := nl.ParseRouteAttr(m[msg.Len():]) + if err != nil { + return nil, err + } + + // skip qdiscs from other interfaces + if link != nil && msg.Ifindex != index { + continue + } + + base := QdiscAttrs{ + LinkIndex: int(msg.Ifindex), + Handle: msg.Handle, + Parent: msg.Parent, + Refcnt: msg.Info, + } + var qdisc Qdisc + qdiscType := "" + for _, attr := range attrs { + switch attr.Attr.Type { + case nl.TCA_KIND: + qdiscType = string(attr.Value[:len(attr.Value)-1]) + switch qdiscType { + case "pfifo_fast": + qdisc = &PfifoFast{} + case "prio": + qdisc = &Prio{} + case "tbf": + qdisc = &Tbf{} + case "ingress": + qdisc = &Ingress{} + case "htb": + qdisc = &Htb{} + case "netem": + qdisc = &Netem{} + default: + qdisc = &GenericQdisc{QdiscType: qdiscType} + } + case nl.TCA_OPTIONS: + switch qdiscType { + case "pfifo_fast": + // pfifo returns TcPrioMap directly without wrapping it in rtattr + if err := parsePfifoFastData(qdisc, attr.Value); err != nil { + return nil, err + } + case "prio": + // prio returns TcPrioMap directly without wrapping it in rtattr + if err := parsePrioData(qdisc, attr.Value); err != nil { + return nil, err + } + case "tbf": + data, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return nil, err + } + if err := parseTbfData(qdisc, data); err != nil { + return nil, err + } + case "htb": + data, err := nl.ParseRouteAttr(attr.Value) + if err != nil { + return nil, err + } + if err := parseHtbData(qdisc, data); err != nil { + return nil, err + } + case "netem": + if err := parseNetemData(qdisc, attr.Value); err != nil { + return nil, err + } + + // no options for ingress + } + } + } + *qdisc.Attrs() = base + res = append(res, qdisc) + } + + return res, nil +} + +func parsePfifoFastData(qdisc Qdisc, value []byte) error { + pfifo := qdisc.(*PfifoFast) + tcmap := nl.DeserializeTcPrioMap(value) + pfifo.PriorityMap = tcmap.Priomap + pfifo.Bands = uint8(tcmap.Bands) + return nil +} + +func parsePrioData(qdisc Qdisc, value []byte) error { + prio := qdisc.(*Prio) + tcmap := nl.DeserializeTcPrioMap(value) + prio.PriorityMap = tcmap.Priomap + prio.Bands = uint8(tcmap.Bands) + return nil +} + +func parseHtbData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error { + native = nl.NativeEndian() + htb := qdisc.(*Htb) + for _, datum := range data { + switch datum.Attr.Type { + case nl.TCA_HTB_INIT: + opt := nl.DeserializeTcHtbGlob(datum.Value) + htb.Version = opt.Version + htb.Rate2Quantum = opt.Rate2Quantum + htb.Defcls = opt.Defcls + htb.Debug = opt.Debug + htb.DirectPkts = opt.DirectPkts + case nl.TCA_HTB_DIRECT_QLEN: + // TODO + //htb.DirectQlen = native.uint32(datum.Value) + } + } + return nil +} + +func parseNetemData(qdisc Qdisc, value []byte) error { + netem := qdisc.(*Netem) + opt := nl.DeserializeTcNetemQopt(value) + netem.Latency = opt.Latency + netem.Limit = opt.Limit + netem.Loss = opt.Loss + netem.Gap = opt.Gap + netem.Duplicate = opt.Duplicate + netem.Jitter = opt.Jitter + data, err := nl.ParseRouteAttr(value[nl.SizeofTcNetemQopt:]) + if err != nil { + return err + } + for _, datum := range data { + switch datum.Attr.Type { + case nl.TCA_NETEM_CORR: + opt := nl.DeserializeTcNetemCorr(datum.Value) + netem.DelayCorr = opt.DelayCorr + netem.LossCorr = opt.LossCorr + netem.DuplicateCorr = opt.DupCorr + case nl.TCA_NETEM_CORRUPT: + opt := nl.DeserializeTcNetemCorrupt(datum.Value) + netem.CorruptProb = opt.Probability + netem.CorruptCorr = opt.Correlation + case nl.TCA_NETEM_REORDER: + opt := nl.DeserializeTcNetemReorder(datum.Value) + netem.ReorderProb = opt.Probability + netem.ReorderCorr = opt.Correlation + } + } + return nil +} + +func parseTbfData(qdisc Qdisc, data []syscall.NetlinkRouteAttr) error { + native = nl.NativeEndian() + tbf := qdisc.(*Tbf) + for _, datum := range data { + switch datum.Attr.Type { + case nl.TCA_TBF_PARMS: + opt := nl.DeserializeTcTbfQopt(datum.Value) + tbf.Rate = uint64(opt.Rate.Rate) + tbf.Peakrate = uint64(opt.Peakrate.Rate) + tbf.Limit = opt.Limit + tbf.Buffer = opt.Buffer + case nl.TCA_TBF_RATE64: + tbf.Rate = native.Uint64(datum.Value[0:8]) + case nl.TCA_TBF_PRATE64: + tbf.Peakrate = native.Uint64(datum.Value[0:8]) + case nl.TCA_TBF_PBURST: + tbf.Minburst = native.Uint32(datum.Value[0:4]) + } + } + return nil +} + +const ( + TIME_UNITS_PER_SEC = 1000000 +) + +var ( + tickInUsec float64 + clockFactor float64 + hz float64 +) + +func initClock() { + data, err := ioutil.ReadFile("/proc/net/psched") + if err != nil { + return + } + parts := strings.Split(strings.TrimSpace(string(data)), " ") + if len(parts) < 3 { + return + } + var vals [3]uint64 + for i := range vals { + val, err := strconv.ParseUint(parts[i], 16, 32) + if err != nil { + return + } + vals[i] = val + } + // compatibility + if vals[2] == 1000000000 { + vals[0] = vals[1] + } + clockFactor = float64(vals[2]) / TIME_UNITS_PER_SEC + tickInUsec = float64(vals[0]) / float64(vals[1]) * clockFactor + hz = float64(vals[0]) +} + +func TickInUsec() float64 { + if tickInUsec == 0.0 { + initClock() + } + return tickInUsec +} + +func ClockFactor() float64 { + if clockFactor == 0.0 { + initClock() + } + return clockFactor +} + +func Hz() float64 { + if hz == 0.0 { + initClock() + } + return hz +} + +func time2Tick(time uint32) uint32 { + return uint32(float64(time) * TickInUsec()) +} + +func tick2Time(tick uint32) uint32 { + return uint32(float64(tick) / TickInUsec()) +} + +func time2Ktime(time uint32) uint32 { + return uint32(float64(time) * ClockFactor()) +} + +func ktime2Time(ktime uint32) uint32 { + return uint32(float64(ktime) / ClockFactor()) +} + +func burst(rate uint64, buffer uint32) uint32 { + return uint32(float64(rate) * float64(tick2Time(buffer)) / TIME_UNITS_PER_SEC) +} + +func latency(rate uint64, limit, buffer uint32) float64 { + return TIME_UNITS_PER_SEC*(float64(limit)/float64(rate)) - float64(tick2Time(buffer)) +} + +func Xmittime(rate uint64, size uint32) float64 { + return TickInUsec() * TIME_UNITS_PER_SEC * (float64(size) / float64(rate)) +} diff --git a/vendor/github.com/vishvananda/netlink/route.go b/vendor/github.com/vishvananda/netlink/route.go new file mode 100644 index 0000000000..68c6a2230d --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/route.go @@ -0,0 +1,176 @@ +package netlink + +import ( + "fmt" + "net" + "strings" +) + +// Scope is an enum representing a route scope. +type Scope uint8 + +type NextHopFlag int + +type Destination interface { + Family() int + Decode([]byte) error + Encode() ([]byte, error) + String() string + Equal(Destination) bool +} + +type Encap interface { + Type() int + Decode([]byte) error + Encode() ([]byte, error) + String() string + Equal(Encap) bool +} + +// Route represents a netlink route. +type Route struct { + LinkIndex int + ILinkIndex int + Scope Scope + Dst *net.IPNet + Src net.IP + Gw net.IP + MultiPath []*NexthopInfo + Protocol int + Priority int + Table int + Type int + Tos int + Flags int + MPLSDst *int + NewDst Destination + Encap Encap +} + +func (r Route) String() string { + elems := []string{} + if len(r.MultiPath) == 0 { + elems = append(elems, fmt.Sprintf("Ifindex: %d", r.LinkIndex)) + } + if r.MPLSDst != nil { + elems = append(elems, fmt.Sprintf("Dst: %d", r.MPLSDst)) + } else { + elems = append(elems, fmt.Sprintf("Dst: %s", r.Dst)) + } + if r.NewDst != nil { + elems = append(elems, fmt.Sprintf("NewDst: %s", r.NewDst)) + } + if r.Encap != nil { + elems = append(elems, fmt.Sprintf("Encap: %s", r.Encap)) + } + elems = append(elems, fmt.Sprintf("Src: %s", r.Src)) + if len(r.MultiPath) > 0 { + elems = append(elems, fmt.Sprintf("Gw: %s", r.MultiPath)) + } else { + elems = append(elems, fmt.Sprintf("Gw: %s", r.Gw)) + } + elems = append(elems, fmt.Sprintf("Flags: %s", r.ListFlags())) + elems = append(elems, fmt.Sprintf("Table: %d", r.Table)) + return fmt.Sprintf("{%s}", strings.Join(elems, " ")) +} + +func (r Route) Equal(x Route) bool { + return r.LinkIndex == x.LinkIndex && + r.ILinkIndex == x.ILinkIndex && + r.Scope == x.Scope && + ipNetEqual(r.Dst, x.Dst) && + r.Src.Equal(x.Src) && + r.Gw.Equal(x.Gw) && + nexthopInfoSlice(r.MultiPath).Equal(x.MultiPath) && + r.Protocol == x.Protocol && + r.Priority == x.Priority && + r.Table == x.Table && + r.Type == x.Type && + r.Tos == x.Tos && + r.Flags == x.Flags && + (r.MPLSDst == x.MPLSDst || (r.MPLSDst != nil && x.MPLSDst != nil && *r.MPLSDst == *x.MPLSDst)) && + (r.NewDst == x.NewDst || (r.NewDst != nil && r.NewDst.Equal(x.NewDst))) && + (r.Encap == x.Encap || (r.Encap != nil && r.Encap.Equal(x.Encap))) +} + +func (r *Route) SetFlag(flag NextHopFlag) { + r.Flags |= int(flag) +} + +func (r *Route) ClearFlag(flag NextHopFlag) { + r.Flags &^= int(flag) +} + +type flagString struct { + f NextHopFlag + s string +} + +// RouteUpdate is sent when a route changes - type is RTM_NEWROUTE or RTM_DELROUTE +type RouteUpdate struct { + Type uint16 + Route +} + +type NexthopInfo struct { + LinkIndex int + Hops int + Gw net.IP + Flags int + NewDst Destination + Encap Encap +} + +func (n *NexthopInfo) String() string { + elems := []string{} + elems = append(elems, fmt.Sprintf("Ifindex: %d", n.LinkIndex)) + if n.NewDst != nil { + elems = append(elems, fmt.Sprintf("NewDst: %s", n.NewDst)) + } + if n.Encap != nil { + elems = append(elems, fmt.Sprintf("Encap: %s", n.Encap)) + } + elems = append(elems, fmt.Sprintf("Weight: %d", n.Hops+1)) + elems = append(elems, fmt.Sprintf("Gw: %s", n.Gw)) + elems = append(elems, fmt.Sprintf("Flags: %s", n.ListFlags())) + return fmt.Sprintf("{%s}", strings.Join(elems, " ")) +} + +func (n NexthopInfo) Equal(x NexthopInfo) bool { + return n.LinkIndex == x.LinkIndex && + n.Hops == x.Hops && + n.Gw.Equal(x.Gw) && + n.Flags == x.Flags && + (n.NewDst == x.NewDst || (n.NewDst != nil && n.NewDst.Equal(x.NewDst))) && + (n.Encap == x.Encap || (n.Encap != nil && n.Encap.Equal(x.Encap))) +} + +type nexthopInfoSlice []*NexthopInfo + +func (n nexthopInfoSlice) Equal(x []*NexthopInfo) bool { + if len(n) != len(x) { + return false + } + for i := range n { + if n[i] == nil || x[i] == nil { + return false + } + if !n[i].Equal(*x[i]) { + return false + } + } + return true +} + +// ipNetEqual returns true iff both IPNet are equal +func ipNetEqual(ipn1 *net.IPNet, ipn2 *net.IPNet) bool { + if ipn1 == ipn2 { + return true + } + if ipn1 == nil || ipn2 == nil { + return false + } + m1, _ := ipn1.Mask.Size() + m2, _ := ipn2.Mask.Size() + return m1 == m2 && ipn1.IP.Equal(ipn2.IP) +} diff --git a/vendor/github.com/vishvananda/netlink/route_linux.go b/vendor/github.com/vishvananda/netlink/route_linux.go new file mode 100644 index 0000000000..9234c6986d --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/route_linux.go @@ -0,0 +1,743 @@ +package netlink + +import ( + "fmt" + "net" + "strings" + "syscall" + + "github.com/vishvananda/netlink/nl" + "github.com/vishvananda/netns" +) + +// RtAttr is shared so it is in netlink_linux.go + +const ( + SCOPE_UNIVERSE Scope = syscall.RT_SCOPE_UNIVERSE + SCOPE_SITE Scope = syscall.RT_SCOPE_SITE + SCOPE_LINK Scope = syscall.RT_SCOPE_LINK + SCOPE_HOST Scope = syscall.RT_SCOPE_HOST + SCOPE_NOWHERE Scope = syscall.RT_SCOPE_NOWHERE +) + +const ( + RT_FILTER_PROTOCOL uint64 = 1 << (1 + iota) + RT_FILTER_SCOPE + RT_FILTER_TYPE + RT_FILTER_TOS + RT_FILTER_IIF + RT_FILTER_OIF + RT_FILTER_DST + RT_FILTER_SRC + RT_FILTER_GW + RT_FILTER_TABLE +) + +const ( + FLAG_ONLINK NextHopFlag = syscall.RTNH_F_ONLINK + FLAG_PERVASIVE NextHopFlag = syscall.RTNH_F_PERVASIVE +) + +var testFlags = []flagString{ + {f: FLAG_ONLINK, s: "onlink"}, + {f: FLAG_PERVASIVE, s: "pervasive"}, +} + +func listFlags(flag int) []string { + var flags []string + for _, tf := range testFlags { + if flag&int(tf.f) != 0 { + flags = append(flags, tf.s) + } + } + return flags +} + +func (r *Route) ListFlags() []string { + return listFlags(r.Flags) +} + +func (n *NexthopInfo) ListFlags() []string { + return listFlags(n.Flags) +} + +type MPLSDestination struct { + Labels []int +} + +func (d *MPLSDestination) Family() int { + return nl.FAMILY_MPLS +} + +func (d *MPLSDestination) Decode(buf []byte) error { + d.Labels = nl.DecodeMPLSStack(buf) + return nil +} + +func (d *MPLSDestination) Encode() ([]byte, error) { + return nl.EncodeMPLSStack(d.Labels...), nil +} + +func (d *MPLSDestination) String() string { + s := make([]string, 0, len(d.Labels)) + for _, l := range d.Labels { + s = append(s, fmt.Sprintf("%d", l)) + } + return strings.Join(s, "/") +} + +func (d *MPLSDestination) Equal(x Destination) bool { + o, ok := x.(*MPLSDestination) + if !ok { + return false + } + if d == nil && o == nil { + return true + } + if d == nil || o == nil { + return false + } + if d.Labels == nil && o.Labels == nil { + return true + } + if d.Labels == nil || o.Labels == nil { + return false + } + if len(d.Labels) != len(o.Labels) { + return false + } + for i := range d.Labels { + if d.Labels[i] != o.Labels[i] { + return false + } + } + return true +} + +type MPLSEncap struct { + Labels []int +} + +func (e *MPLSEncap) Type() int { + return nl.LWTUNNEL_ENCAP_MPLS +} + +func (e *MPLSEncap) Decode(buf []byte) error { + if len(buf) < 4 { + return fmt.Errorf("Lack of bytes") + } + native := nl.NativeEndian() + l := native.Uint16(buf) + if len(buf) < int(l) { + return fmt.Errorf("Lack of bytes") + } + buf = buf[:l] + typ := native.Uint16(buf[2:]) + if typ != nl.MPLS_IPTUNNEL_DST { + return fmt.Errorf("Unknown MPLS Encap Type: %d", typ) + } + e.Labels = nl.DecodeMPLSStack(buf[4:]) + return nil +} + +func (e *MPLSEncap) Encode() ([]byte, error) { + s := nl.EncodeMPLSStack(e.Labels...) + native := nl.NativeEndian() + hdr := make([]byte, 4) + native.PutUint16(hdr, uint16(len(s)+4)) + native.PutUint16(hdr[2:], nl.MPLS_IPTUNNEL_DST) + return append(hdr, s...), nil +} + +func (e *MPLSEncap) String() string { + s := make([]string, 0, len(e.Labels)) + for _, l := range e.Labels { + s = append(s, fmt.Sprintf("%d", l)) + } + return strings.Join(s, "/") +} + +func (e *MPLSEncap) Equal(x Encap) bool { + o, ok := x.(*MPLSEncap) + if !ok { + return false + } + if e == nil && o == nil { + return true + } + if e == nil || o == nil { + return false + } + if e.Labels == nil && o.Labels == nil { + return true + } + if e.Labels == nil || o.Labels == nil { + return false + } + if len(e.Labels) != len(o.Labels) { + return false + } + for i := range e.Labels { + if e.Labels[i] != o.Labels[i] { + return false + } + } + return true +} + +// RouteAdd will add a route to the system. +// Equivalent to: `ip route add $route` +func RouteAdd(route *Route) error { + return pkgHandle.RouteAdd(route) +} + +// RouteAdd will add a route to the system. +// Equivalent to: `ip route add $route` +func (h *Handle) RouteAdd(route *Route) error { + flags := syscall.NLM_F_CREATE | syscall.NLM_F_EXCL | syscall.NLM_F_ACK + req := h.newNetlinkRequest(syscall.RTM_NEWROUTE, flags) + return h.routeHandle(route, req, nl.NewRtMsg()) +} + +// RouteReplace will add a route to the system. +// Equivalent to: `ip route replace $route` +func RouteReplace(route *Route) error { + return pkgHandle.RouteReplace(route) +} + +// RouteReplace will add a route to the system. +// Equivalent to: `ip route replace $route` +func (h *Handle) RouteReplace(route *Route) error { + flags := syscall.NLM_F_CREATE | syscall.NLM_F_REPLACE | syscall.NLM_F_ACK + req := h.newNetlinkRequest(syscall.RTM_NEWROUTE, flags) + return h.routeHandle(route, req, nl.NewRtMsg()) +} + +// RouteDel will delete a route from the system. +// Equivalent to: `ip route del $route` +func RouteDel(route *Route) error { + return pkgHandle.RouteDel(route) +} + +// RouteDel will delete a route from the system. +// Equivalent to: `ip route del $route` +func (h *Handle) RouteDel(route *Route) error { + req := h.newNetlinkRequest(syscall.RTM_DELROUTE, syscall.NLM_F_ACK) + return h.routeHandle(route, req, nl.NewRtDelMsg()) +} + +func (h *Handle) routeHandle(route *Route, req *nl.NetlinkRequest, msg *nl.RtMsg) error { + if (route.Dst == nil || route.Dst.IP == nil) && route.Src == nil && route.Gw == nil && route.MPLSDst == nil { + return fmt.Errorf("one of Dst.IP, Src, or Gw must not be nil") + } + + family := -1 + var rtAttrs []*nl.RtAttr + + if route.Dst != nil && route.Dst.IP != nil { + dstLen, _ := route.Dst.Mask.Size() + msg.Dst_len = uint8(dstLen) + dstFamily := nl.GetIPFamily(route.Dst.IP) + family = dstFamily + var dstData []byte + if dstFamily == FAMILY_V4 { + dstData = route.Dst.IP.To4() + } else { + dstData = route.Dst.IP.To16() + } + rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_DST, dstData)) + } else if route.MPLSDst != nil { + family = nl.FAMILY_MPLS + msg.Dst_len = uint8(20) + msg.Type = syscall.RTN_UNICAST + rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_DST, nl.EncodeMPLSStack(*route.MPLSDst))) + } + + if route.NewDst != nil { + if family != -1 && family != route.NewDst.Family() { + return fmt.Errorf("new destination and destination are not the same address family") + } + buf, err := route.NewDst.Encode() + if err != nil { + return err + } + rtAttrs = append(rtAttrs, nl.NewRtAttr(nl.RTA_NEWDST, buf)) + } + + if route.Encap != nil { + buf := make([]byte, 2) + native.PutUint16(buf, uint16(route.Encap.Type())) + rtAttrs = append(rtAttrs, nl.NewRtAttr(nl.RTA_ENCAP_TYPE, buf)) + buf, err := route.Encap.Encode() + if err != nil { + return err + } + rtAttrs = append(rtAttrs, nl.NewRtAttr(nl.RTA_ENCAP, buf)) + } + + if route.Src != nil { + srcFamily := nl.GetIPFamily(route.Src) + if family != -1 && family != srcFamily { + return fmt.Errorf("source and destination ip are not the same IP family") + } + family = srcFamily + var srcData []byte + if srcFamily == FAMILY_V4 { + srcData = route.Src.To4() + } else { + srcData = route.Src.To16() + } + // The commonly used src ip for routes is actually PREFSRC + rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_PREFSRC, srcData)) + } + + if route.Gw != nil { + gwFamily := nl.GetIPFamily(route.Gw) + if family != -1 && family != gwFamily { + return fmt.Errorf("gateway, source, and destination ip are not the same IP family") + } + family = gwFamily + var gwData []byte + if gwFamily == FAMILY_V4 { + gwData = route.Gw.To4() + } else { + gwData = route.Gw.To16() + } + rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_GATEWAY, gwData)) + } + + if len(route.MultiPath) > 0 { + buf := []byte{} + for _, nh := range route.MultiPath { + rtnh := &nl.RtNexthop{ + RtNexthop: syscall.RtNexthop{ + Hops: uint8(nh.Hops), + Ifindex: int32(nh.LinkIndex), + Flags: uint8(nh.Flags), + }, + } + children := []nl.NetlinkRequestData{} + if nh.Gw != nil { + gwFamily := nl.GetIPFamily(nh.Gw) + if family != -1 && family != gwFamily { + return fmt.Errorf("gateway, source, and destination ip are not the same IP family") + } + if gwFamily == FAMILY_V4 { + children = append(children, nl.NewRtAttr(syscall.RTA_GATEWAY, []byte(nh.Gw.To4()))) + } else { + children = append(children, nl.NewRtAttr(syscall.RTA_GATEWAY, []byte(nh.Gw.To16()))) + } + } + if nh.NewDst != nil { + if family != -1 && family != nh.NewDst.Family() { + return fmt.Errorf("new destination and destination are not the same address family") + } + buf, err := nh.NewDst.Encode() + if err != nil { + return err + } + children = append(children, nl.NewRtAttr(nl.RTA_NEWDST, buf)) + } + if nh.Encap != nil { + buf := make([]byte, 2) + native.PutUint16(buf, uint16(nh.Encap.Type())) + rtAttrs = append(rtAttrs, nl.NewRtAttr(nl.RTA_ENCAP_TYPE, buf)) + buf, err := nh.Encap.Encode() + if err != nil { + return err + } + children = append(children, nl.NewRtAttr(nl.RTA_ENCAP, buf)) + } + rtnh.Children = children + buf = append(buf, rtnh.Serialize()...) + } + rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_MULTIPATH, buf)) + } + + if route.Table > 0 { + if route.Table >= 256 { + msg.Table = syscall.RT_TABLE_UNSPEC + b := make([]byte, 4) + native.PutUint32(b, uint32(route.Table)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_TABLE, b)) + } else { + msg.Table = uint8(route.Table) + } + } + + if route.Priority > 0 { + b := make([]byte, 4) + native.PutUint32(b, uint32(route.Priority)) + rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_PRIORITY, b)) + } + if route.Tos > 0 { + msg.Tos = uint8(route.Tos) + } + if route.Protocol > 0 { + msg.Protocol = uint8(route.Protocol) + } + if route.Type > 0 { + msg.Type = uint8(route.Type) + } + + msg.Flags = uint32(route.Flags) + msg.Scope = uint8(route.Scope) + msg.Family = uint8(family) + req.AddData(msg) + for _, attr := range rtAttrs { + req.AddData(attr) + } + + var ( + b = make([]byte, 4) + native = nl.NativeEndian() + ) + native.PutUint32(b, uint32(route.LinkIndex)) + + req.AddData(nl.NewRtAttr(syscall.RTA_OIF, b)) + + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + return err +} + +// RouteList gets a list of routes in the system. +// Equivalent to: `ip route show`. +// The list can be filtered by link and ip family. +func RouteList(link Link, family int) ([]Route, error) { + return pkgHandle.RouteList(link, family) +} + +// RouteList gets a list of routes in the system. +// Equivalent to: `ip route show`. +// The list can be filtered by link and ip family. +func (h *Handle) RouteList(link Link, family int) ([]Route, error) { + var routeFilter *Route + if link != nil { + routeFilter = &Route{ + LinkIndex: link.Attrs().Index, + } + } + return h.RouteListFiltered(family, routeFilter, RT_FILTER_OIF) +} + +// RouteListFiltered gets a list of routes in the system filtered with specified rules. +// All rules must be defined in RouteFilter struct +func RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, error) { + return pkgHandle.RouteListFiltered(family, filter, filterMask) +} + +// RouteListFiltered gets a list of routes in the system filtered with specified rules. +// All rules must be defined in RouteFilter struct +func (h *Handle) RouteListFiltered(family int, filter *Route, filterMask uint64) ([]Route, error) { + req := h.newNetlinkRequest(syscall.RTM_GETROUTE, syscall.NLM_F_DUMP) + infmsg := nl.NewIfInfomsg(family) + req.AddData(infmsg) + + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWROUTE) + if err != nil { + return nil, err + } + + var res []Route + for _, m := range msgs { + msg := nl.DeserializeRtMsg(m) + if msg.Flags&syscall.RTM_F_CLONED != 0 { + // Ignore cloned routes + continue + } + if msg.Table != syscall.RT_TABLE_MAIN { + if filter == nil || filter != nil && filterMask&RT_FILTER_TABLE == 0 { + // Ignore non-main tables + continue + } + } + route, err := deserializeRoute(m) + if err != nil { + return nil, err + } + if filter != nil { + switch { + case filterMask&RT_FILTER_TABLE != 0 && filter.Table != syscall.RT_TABLE_UNSPEC && route.Table != filter.Table: + continue + case filterMask&RT_FILTER_PROTOCOL != 0 && route.Protocol != filter.Protocol: + continue + case filterMask&RT_FILTER_SCOPE != 0 && route.Scope != filter.Scope: + continue + case filterMask&RT_FILTER_TYPE != 0 && route.Type != filter.Type: + continue + case filterMask&RT_FILTER_TOS != 0 && route.Tos != filter.Tos: + continue + case filterMask&RT_FILTER_OIF != 0 && route.LinkIndex != filter.LinkIndex: + continue + case filterMask&RT_FILTER_IIF != 0 && route.ILinkIndex != filter.ILinkIndex: + continue + case filterMask&RT_FILTER_GW != 0 && !route.Gw.Equal(filter.Gw): + continue + case filterMask&RT_FILTER_SRC != 0 && !route.Src.Equal(filter.Src): + continue + case filterMask&RT_FILTER_DST != 0: + if filter.MPLSDst == nil || route.MPLSDst == nil || (*filter.MPLSDst) != (*route.MPLSDst) { + if !ipNetEqual(route.Dst, filter.Dst) { + continue + } + } + } + } + res = append(res, route) + } + return res, nil +} + +// deserializeRoute decodes a binary netlink message into a Route struct +func deserializeRoute(m []byte) (Route, error) { + msg := nl.DeserializeRtMsg(m) + attrs, err := nl.ParseRouteAttr(m[msg.Len():]) + if err != nil { + return Route{}, err + } + route := Route{ + Scope: Scope(msg.Scope), + Protocol: int(msg.Protocol), + Table: int(msg.Table), + Type: int(msg.Type), + Tos: int(msg.Tos), + Flags: int(msg.Flags), + } + + native := nl.NativeEndian() + var encap, encapType syscall.NetlinkRouteAttr + for _, attr := range attrs { + switch attr.Attr.Type { + case syscall.RTA_GATEWAY: + route.Gw = net.IP(attr.Value) + case syscall.RTA_PREFSRC: + route.Src = net.IP(attr.Value) + case syscall.RTA_DST: + if msg.Family == nl.FAMILY_MPLS { + stack := nl.DecodeMPLSStack(attr.Value) + if len(stack) == 0 || len(stack) > 1 { + return route, fmt.Errorf("invalid MPLS RTA_DST") + } + route.MPLSDst = &stack[0] + } else { + route.Dst = &net.IPNet{ + IP: attr.Value, + Mask: net.CIDRMask(int(msg.Dst_len), 8*len(attr.Value)), + } + } + case syscall.RTA_OIF: + route.LinkIndex = int(native.Uint32(attr.Value[0:4])) + case syscall.RTA_IIF: + route.ILinkIndex = int(native.Uint32(attr.Value[0:4])) + case syscall.RTA_PRIORITY: + route.Priority = int(native.Uint32(attr.Value[0:4])) + case syscall.RTA_TABLE: + route.Table = int(native.Uint32(attr.Value[0:4])) + case syscall.RTA_MULTIPATH: + parseRtNexthop := func(value []byte) (*NexthopInfo, []byte, error) { + if len(value) < syscall.SizeofRtNexthop { + return nil, nil, fmt.Errorf("Lack of bytes") + } + nh := nl.DeserializeRtNexthop(value) + if len(value) < int(nh.RtNexthop.Len) { + return nil, nil, fmt.Errorf("Lack of bytes") + } + info := &NexthopInfo{ + LinkIndex: int(nh.RtNexthop.Ifindex), + Hops: int(nh.RtNexthop.Hops), + Flags: int(nh.RtNexthop.Flags), + } + attrs, err := nl.ParseRouteAttr(value[syscall.SizeofRtNexthop:int(nh.RtNexthop.Len)]) + if err != nil { + return nil, nil, err + } + var encap, encapType syscall.NetlinkRouteAttr + for _, attr := range attrs { + switch attr.Attr.Type { + case syscall.RTA_GATEWAY: + info.Gw = net.IP(attr.Value) + case nl.RTA_NEWDST: + var d Destination + switch msg.Family { + case nl.FAMILY_MPLS: + d = &MPLSDestination{} + } + if err := d.Decode(attr.Value); err != nil { + return nil, nil, err + } + info.NewDst = d + case nl.RTA_ENCAP_TYPE: + encapType = attr + case nl.RTA_ENCAP: + encap = attr + } + } + + if len(encap.Value) != 0 && len(encapType.Value) != 0 { + typ := int(native.Uint16(encapType.Value[0:2])) + var e Encap + switch typ { + case nl.LWTUNNEL_ENCAP_MPLS: + e = &MPLSEncap{} + if err := e.Decode(encap.Value); err != nil { + return nil, nil, err + } + } + info.Encap = e + } + + return info, value[int(nh.RtNexthop.Len):], nil + } + rest := attr.Value + for len(rest) > 0 { + info, buf, err := parseRtNexthop(rest) + if err != nil { + return route, err + } + route.MultiPath = append(route.MultiPath, info) + rest = buf + } + case nl.RTA_NEWDST: + var d Destination + switch msg.Family { + case nl.FAMILY_MPLS: + d = &MPLSDestination{} + } + if err := d.Decode(attr.Value); err != nil { + return route, err + } + route.NewDst = d + case nl.RTA_ENCAP_TYPE: + encapType = attr + case nl.RTA_ENCAP: + encap = attr + } + } + + if len(encap.Value) != 0 && len(encapType.Value) != 0 { + typ := int(native.Uint16(encapType.Value[0:2])) + var e Encap + switch typ { + case nl.LWTUNNEL_ENCAP_MPLS: + e = &MPLSEncap{} + if err := e.Decode(encap.Value); err != nil { + return route, err + } + } + route.Encap = e + } + + return route, nil +} + +// RouteGet gets a route to a specific destination from the host system. +// Equivalent to: 'ip route get'. +func RouteGet(destination net.IP) ([]Route, error) { + return pkgHandle.RouteGet(destination) +} + +// RouteGet gets a route to a specific destination from the host system. +// Equivalent to: 'ip route get'. +func (h *Handle) RouteGet(destination net.IP) ([]Route, error) { + req := h.newNetlinkRequest(syscall.RTM_GETROUTE, syscall.NLM_F_REQUEST) + family := nl.GetIPFamily(destination) + var destinationData []byte + var bitlen uint8 + if family == FAMILY_V4 { + destinationData = destination.To4() + bitlen = 32 + } else { + destinationData = destination.To16() + bitlen = 128 + } + msg := &nl.RtMsg{} + msg.Family = uint8(family) + msg.Dst_len = bitlen + req.AddData(msg) + + rtaDst := nl.NewRtAttr(syscall.RTA_DST, destinationData) + req.AddData(rtaDst) + + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWROUTE) + if err != nil { + return nil, err + } + + var res []Route + for _, m := range msgs { + route, err := deserializeRoute(m) + if err != nil { + return nil, err + } + res = append(res, route) + } + return res, nil + +} + +// RouteSubscribe takes a chan down which notifications will be sent +// when routes are added or deleted. Close the 'done' chan to stop subscription. +func RouteSubscribe(ch chan<- RouteUpdate, done <-chan struct{}) error { + return routeSubscribeAt(netns.None(), netns.None(), ch, done, nil) +} + +// RouteSubscribeAt works like RouteSubscribe plus it allows the caller +// to choose the network namespace in which to subscribe (ns). +func RouteSubscribeAt(ns netns.NsHandle, ch chan<- RouteUpdate, done <-chan struct{}) error { + return routeSubscribeAt(ns, netns.None(), ch, done, nil) +} + +// RouteSubscribeOptions contains a set of options to use with +// RouteSubscribeWithOptions. +type RouteSubscribeOptions struct { + Namespace *netns.NsHandle + ErrorCallback func(error) +} + +// RouteSubscribeWithOptions work like RouteSubscribe but enable to +// provide additional options to modify the behavior. Currently, the +// namespace can be provided as well as an error callback. +func RouteSubscribeWithOptions(ch chan<- RouteUpdate, done <-chan struct{}, options RouteSubscribeOptions) error { + if options.Namespace == nil { + none := netns.None() + options.Namespace = &none + } + return routeSubscribeAt(*options.Namespace, netns.None(), ch, done, options.ErrorCallback) +} + +func routeSubscribeAt(newNs, curNs netns.NsHandle, ch chan<- RouteUpdate, done <-chan struct{}, cberr func(error)) error { + s, err := nl.SubscribeAt(newNs, curNs, syscall.NETLINK_ROUTE, syscall.RTNLGRP_IPV4_ROUTE, syscall.RTNLGRP_IPV6_ROUTE) + if err != nil { + return err + } + if done != nil { + go func() { + <-done + s.Close() + }() + } + go func() { + defer close(ch) + for { + msgs, err := s.Receive() + if err != nil { + if cberr != nil { + cberr(err) + } + return + } + for _, m := range msgs { + route, err := deserializeRoute(m.Data) + if err != nil { + if cberr != nil { + cberr(err) + } + return + } + ch <- RouteUpdate{Type: m.Header.Type, Route: route} + } + } + }() + + return nil +} diff --git a/vendor/github.com/vishvananda/netlink/route_unspecified.go b/vendor/github.com/vishvananda/netlink/route_unspecified.go new file mode 100644 index 0000000000..2701862b4b --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/route_unspecified.go @@ -0,0 +1,11 @@ +// +build !linux + +package netlink + +func (r *Route) ListFlags() []string { + return []string{} +} + +func (n *NexthopInfo) ListFlags() []string { + return []string{} +} diff --git a/vendor/github.com/vishvananda/netlink/rule.go b/vendor/github.com/vishvananda/netlink/rule.go new file mode 100644 index 0000000000..e4d9168d6c --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/rule.go @@ -0,0 +1,41 @@ +package netlink + +import ( + "fmt" + "net" +) + +// Rule represents a netlink rule. +type Rule struct { + Priority int + Family int + Table int + Mark int + Mask int + TunID uint + Goto int + Src *net.IPNet + Dst *net.IPNet + Flow int + IifName string + OifName string + SuppressIfgroup int + SuppressPrefixlen int +} + +func (r Rule) String() string { + return fmt.Sprintf("ip rule %d: from %s table %d", r.Priority, r.Src, r.Table) +} + +// NewRule return empty rules. +func NewRule() *Rule { + return &Rule{ + SuppressIfgroup: -1, + SuppressPrefixlen: -1, + Priority: -1, + Mark: -1, + Mask: -1, + Goto: -1, + Flow: -1, + } +} diff --git a/vendor/github.com/vishvananda/netlink/rule_linux.go b/vendor/github.com/vishvananda/netlink/rule_linux.go new file mode 100644 index 0000000000..cbd91a56bb --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/rule_linux.go @@ -0,0 +1,224 @@ +package netlink + +import ( + "fmt" + "net" + "syscall" + + "github.com/vishvananda/netlink/nl" +) + +// RuleAdd adds a rule to the system. +// Equivalent to: ip rule add +func RuleAdd(rule *Rule) error { + return pkgHandle.RuleAdd(rule) +} + +// RuleAdd adds a rule to the system. +// Equivalent to: ip rule add +func (h *Handle) RuleAdd(rule *Rule) error { + req := h.newNetlinkRequest(syscall.RTM_NEWRULE, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + return ruleHandle(rule, req) +} + +// RuleDel deletes a rule from the system. +// Equivalent to: ip rule del +func RuleDel(rule *Rule) error { + return pkgHandle.RuleDel(rule) +} + +// RuleDel deletes a rule from the system. +// Equivalent to: ip rule del +func (h *Handle) RuleDel(rule *Rule) error { + req := h.newNetlinkRequest(syscall.RTM_DELRULE, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + return ruleHandle(rule, req) +} + +func ruleHandle(rule *Rule, req *nl.NetlinkRequest) error { + msg := nl.NewRtMsg() + msg.Family = syscall.AF_INET + if rule.Family != 0 { + msg.Family = uint8(rule.Family) + } + var dstFamily uint8 + + var rtAttrs []*nl.RtAttr + if rule.Dst != nil && rule.Dst.IP != nil { + dstLen, _ := rule.Dst.Mask.Size() + msg.Dst_len = uint8(dstLen) + msg.Family = uint8(nl.GetIPFamily(rule.Dst.IP)) + dstFamily = msg.Family + var dstData []byte + if msg.Family == syscall.AF_INET { + dstData = rule.Dst.IP.To4() + } else { + dstData = rule.Dst.IP.To16() + } + rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_DST, dstData)) + } + + if rule.Src != nil && rule.Src.IP != nil { + msg.Family = uint8(nl.GetIPFamily(rule.Src.IP)) + if dstFamily != 0 && dstFamily != msg.Family { + return fmt.Errorf("source and destination ip are not the same IP family") + } + srcLen, _ := rule.Src.Mask.Size() + msg.Src_len = uint8(srcLen) + var srcData []byte + if msg.Family == syscall.AF_INET { + srcData = rule.Src.IP.To4() + } else { + srcData = rule.Src.IP.To16() + } + rtAttrs = append(rtAttrs, nl.NewRtAttr(syscall.RTA_SRC, srcData)) + } + + if rule.Table >= 0 { + msg.Table = uint8(rule.Table) + if rule.Table >= 256 { + msg.Table = syscall.RT_TABLE_UNSPEC + } + } + + req.AddData(msg) + for i := range rtAttrs { + req.AddData(rtAttrs[i]) + } + + native := nl.NativeEndian() + + if rule.Priority >= 0 { + b := make([]byte, 4) + native.PutUint32(b, uint32(rule.Priority)) + req.AddData(nl.NewRtAttr(nl.FRA_PRIORITY, b)) + } + if rule.Mark >= 0 { + b := make([]byte, 4) + native.PutUint32(b, uint32(rule.Mark)) + req.AddData(nl.NewRtAttr(nl.FRA_FWMARK, b)) + } + if rule.Mask >= 0 { + b := make([]byte, 4) + native.PutUint32(b, uint32(rule.Mask)) + req.AddData(nl.NewRtAttr(nl.FRA_FWMASK, b)) + } + if rule.Flow >= 0 { + b := make([]byte, 4) + native.PutUint32(b, uint32(rule.Flow)) + req.AddData(nl.NewRtAttr(nl.FRA_FLOW, b)) + } + if rule.TunID > 0 { + b := make([]byte, 4) + native.PutUint32(b, uint32(rule.TunID)) + req.AddData(nl.NewRtAttr(nl.FRA_TUN_ID, b)) + } + if rule.Table >= 256 { + b := make([]byte, 4) + native.PutUint32(b, uint32(rule.Table)) + req.AddData(nl.NewRtAttr(nl.FRA_TABLE, b)) + } + if msg.Table > 0 { + if rule.SuppressPrefixlen >= 0 { + b := make([]byte, 4) + native.PutUint32(b, uint32(rule.SuppressPrefixlen)) + req.AddData(nl.NewRtAttr(nl.FRA_SUPPRESS_PREFIXLEN, b)) + } + if rule.SuppressIfgroup >= 0 { + b := make([]byte, 4) + native.PutUint32(b, uint32(rule.SuppressIfgroup)) + req.AddData(nl.NewRtAttr(nl.FRA_SUPPRESS_IFGROUP, b)) + } + } + if rule.IifName != "" { + req.AddData(nl.NewRtAttr(nl.FRA_IIFNAME, []byte(rule.IifName))) + } + if rule.OifName != "" { + req.AddData(nl.NewRtAttr(nl.FRA_OIFNAME, []byte(rule.OifName))) + } + if rule.Goto >= 0 { + msg.Type = nl.FR_ACT_NOP + b := make([]byte, 4) + native.PutUint32(b, uint32(rule.Goto)) + req.AddData(nl.NewRtAttr(nl.FRA_GOTO, b)) + } + + _, err := req.Execute(syscall.NETLINK_ROUTE, 0) + return err +} + +// RuleList lists rules in the system. +// Equivalent to: ip rule list +func RuleList(family int) ([]Rule, error) { + return pkgHandle.RuleList(family) +} + +// RuleList lists rules in the system. +// Equivalent to: ip rule list +func (h *Handle) RuleList(family int) ([]Rule, error) { + req := h.newNetlinkRequest(syscall.RTM_GETRULE, syscall.NLM_F_DUMP|syscall.NLM_F_REQUEST) + msg := nl.NewIfInfomsg(family) + req.AddData(msg) + + msgs, err := req.Execute(syscall.NETLINK_ROUTE, syscall.RTM_NEWRULE) + if err != nil { + return nil, err + } + + native := nl.NativeEndian() + var res = make([]Rule, 0) + for i := range msgs { + msg := nl.DeserializeRtMsg(msgs[i]) + attrs, err := nl.ParseRouteAttr(msgs[i][msg.Len():]) + if err != nil { + return nil, err + } + + rule := NewRule() + + for j := range attrs { + switch attrs[j].Attr.Type { + case syscall.RTA_TABLE: + rule.Table = int(native.Uint32(attrs[j].Value[0:4])) + case nl.FRA_SRC: + rule.Src = &net.IPNet{ + IP: attrs[j].Value, + Mask: net.CIDRMask(int(msg.Src_len), 8*len(attrs[j].Value)), + } + case nl.FRA_DST: + rule.Dst = &net.IPNet{ + IP: attrs[j].Value, + Mask: net.CIDRMask(int(msg.Dst_len), 8*len(attrs[j].Value)), + } + case nl.FRA_FWMARK: + rule.Mark = int(native.Uint32(attrs[j].Value[0:4])) + case nl.FRA_FWMASK: + rule.Mask = int(native.Uint32(attrs[j].Value[0:4])) + case nl.FRA_TUN_ID: + rule.TunID = uint(native.Uint64(attrs[j].Value[0:4])) + case nl.FRA_IIFNAME: + rule.IifName = string(attrs[j].Value[:len(attrs[j].Value)-1]) + case nl.FRA_OIFNAME: + rule.OifName = string(attrs[j].Value[:len(attrs[j].Value)-1]) + case nl.FRA_SUPPRESS_PREFIXLEN: + i := native.Uint32(attrs[j].Value[0:4]) + if i != 0xffffffff { + rule.SuppressPrefixlen = int(i) + } + case nl.FRA_SUPPRESS_IFGROUP: + i := native.Uint32(attrs[j].Value[0:4]) + if i != 0xffffffff { + rule.SuppressIfgroup = int(i) + } + case nl.FRA_FLOW: + rule.Flow = int(native.Uint32(attrs[j].Value[0:4])) + case nl.FRA_GOTO: + rule.Goto = int(native.Uint32(attrs[j].Value[0:4])) + case nl.FRA_PRIORITY: + rule.Priority = int(native.Uint32(attrs[j].Value[0:4])) + } + } + res = append(res, *rule) + } + + return res, nil +} diff --git a/vendor/github.com/vishvananda/netlink/socket.go b/vendor/github.com/vishvananda/netlink/socket.go new file mode 100644 index 0000000000..41aa726245 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/socket.go @@ -0,0 +1,27 @@ +package netlink + +import "net" + +// SocketID identifies a single socket. +type SocketID struct { + SourcePort uint16 + DestinationPort uint16 + Source net.IP + Destination net.IP + Interface uint32 + Cookie [2]uint32 +} + +// Socket represents a netlink socket. +type Socket struct { + Family uint8 + State uint8 + Timer uint8 + Retrans uint8 + ID SocketID + Expires uint32 + RQueue uint32 + WQueue uint32 + UID uint32 + INode uint32 +} diff --git a/vendor/github.com/vishvananda/netlink/socket_linux.go b/vendor/github.com/vishvananda/netlink/socket_linux.go new file mode 100644 index 0000000000..b42b84f0cf --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/socket_linux.go @@ -0,0 +1,159 @@ +package netlink + +import ( + "errors" + "fmt" + "net" + "syscall" + + "github.com/vishvananda/netlink/nl" +) + +const ( + sizeofSocketID = 0x30 + sizeofSocketRequest = sizeofSocketID + 0x8 + sizeofSocket = sizeofSocketID + 0x18 +) + +type socketRequest struct { + Family uint8 + Protocol uint8 + Ext uint8 + pad uint8 + States uint32 + ID SocketID +} + +type writeBuffer struct { + Bytes []byte + pos int +} + +func (b *writeBuffer) Write(c byte) { + b.Bytes[b.pos] = c + b.pos++ +} + +func (b *writeBuffer) Next(n int) []byte { + s := b.Bytes[b.pos : b.pos+n] + b.pos += n + return s +} + +func (r *socketRequest) Serialize() []byte { + b := writeBuffer{Bytes: make([]byte, sizeofSocketRequest)} + b.Write(r.Family) + b.Write(r.Protocol) + b.Write(r.Ext) + b.Write(r.pad) + native.PutUint32(b.Next(4), r.States) + networkOrder.PutUint16(b.Next(2), r.ID.SourcePort) + networkOrder.PutUint16(b.Next(2), r.ID.DestinationPort) + copy(b.Next(4), r.ID.Source.To4()) + b.Next(12) + copy(b.Next(4), r.ID.Destination.To4()) + b.Next(12) + native.PutUint32(b.Next(4), r.ID.Interface) + native.PutUint32(b.Next(4), r.ID.Cookie[0]) + native.PutUint32(b.Next(4), r.ID.Cookie[1]) + return b.Bytes +} + +func (r *socketRequest) Len() int { return sizeofSocketRequest } + +type readBuffer struct { + Bytes []byte + pos int +} + +func (b *readBuffer) Read() byte { + c := b.Bytes[b.pos] + b.pos++ + return c +} + +func (b *readBuffer) Next(n int) []byte { + s := b.Bytes[b.pos : b.pos+n] + b.pos += n + return s +} + +func (s *Socket) deserialize(b []byte) error { + if len(b) < sizeofSocket { + return fmt.Errorf("socket data short read (%d); want %d", len(b), sizeofSocket) + } + rb := readBuffer{Bytes: b} + s.Family = rb.Read() + s.State = rb.Read() + s.Timer = rb.Read() + s.Retrans = rb.Read() + s.ID.SourcePort = networkOrder.Uint16(rb.Next(2)) + s.ID.DestinationPort = networkOrder.Uint16(rb.Next(2)) + s.ID.Source = net.IPv4(rb.Read(), rb.Read(), rb.Read(), rb.Read()) + rb.Next(12) + s.ID.Destination = net.IPv4(rb.Read(), rb.Read(), rb.Read(), rb.Read()) + rb.Next(12) + s.ID.Interface = native.Uint32(rb.Next(4)) + s.ID.Cookie[0] = native.Uint32(rb.Next(4)) + s.ID.Cookie[1] = native.Uint32(rb.Next(4)) + s.Expires = native.Uint32(rb.Next(4)) + s.RQueue = native.Uint32(rb.Next(4)) + s.WQueue = native.Uint32(rb.Next(4)) + s.UID = native.Uint32(rb.Next(4)) + s.INode = native.Uint32(rb.Next(4)) + return nil +} + +// SocketGet returns the Socket identified by its local and remote addresses. +func SocketGet(local, remote net.Addr) (*Socket, error) { + localTCP, ok := local.(*net.TCPAddr) + if !ok { + return nil, ErrNotImplemented + } + remoteTCP, ok := remote.(*net.TCPAddr) + if !ok { + return nil, ErrNotImplemented + } + localIP := localTCP.IP.To4() + if localIP == nil { + return nil, ErrNotImplemented + } + remoteIP := remoteTCP.IP.To4() + if remoteIP == nil { + return nil, ErrNotImplemented + } + + s, err := nl.Subscribe(syscall.NETLINK_INET_DIAG) + if err != nil { + return nil, err + } + defer s.Close() + req := nl.NewNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, 0) + req.AddData(&socketRequest{ + Family: syscall.AF_INET, + Protocol: syscall.IPPROTO_TCP, + ID: SocketID{ + SourcePort: uint16(localTCP.Port), + DestinationPort: uint16(remoteTCP.Port), + Source: localIP, + Destination: remoteIP, + Cookie: [2]uint32{nl.TCPDIAG_NOCOOKIE, nl.TCPDIAG_NOCOOKIE}, + }, + }) + s.Send(req) + msgs, err := s.Receive() + if err != nil { + return nil, err + } + if len(msgs) == 0 { + return nil, errors.New("no message nor error from netlink") + } + if len(msgs) > 2 { + return nil, fmt.Errorf("multiple (%d) matching sockets", len(msgs)) + } + sock := &Socket{} + if err := sock.deserialize(msgs[0].Data); err != nil { + return nil, err + } + return sock, nil +} diff --git a/vendor/github.com/vishvananda/netlink/xfrm.go b/vendor/github.com/vishvananda/netlink/xfrm.go new file mode 100644 index 0000000000..9962dcf700 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/xfrm.go @@ -0,0 +1,74 @@ +package netlink + +import ( + "fmt" + "syscall" +) + +// Proto is an enum representing an ipsec protocol. +type Proto uint8 + +const ( + XFRM_PROTO_ROUTE2 Proto = syscall.IPPROTO_ROUTING + XFRM_PROTO_ESP Proto = syscall.IPPROTO_ESP + XFRM_PROTO_AH Proto = syscall.IPPROTO_AH + XFRM_PROTO_HAO Proto = syscall.IPPROTO_DSTOPTS + XFRM_PROTO_COMP Proto = 0x6c // NOTE not defined on darwin + XFRM_PROTO_IPSEC_ANY Proto = syscall.IPPROTO_RAW +) + +func (p Proto) String() string { + switch p { + case XFRM_PROTO_ROUTE2: + return "route2" + case XFRM_PROTO_ESP: + return "esp" + case XFRM_PROTO_AH: + return "ah" + case XFRM_PROTO_HAO: + return "hao" + case XFRM_PROTO_COMP: + return "comp" + case XFRM_PROTO_IPSEC_ANY: + return "ipsec-any" + } + return fmt.Sprintf("%d", p) +} + +// Mode is an enum representing an ipsec transport. +type Mode uint8 + +const ( + XFRM_MODE_TRANSPORT Mode = iota + XFRM_MODE_TUNNEL + XFRM_MODE_ROUTEOPTIMIZATION + XFRM_MODE_IN_TRIGGER + XFRM_MODE_BEET + XFRM_MODE_MAX +) + +func (m Mode) String() string { + switch m { + case XFRM_MODE_TRANSPORT: + return "transport" + case XFRM_MODE_TUNNEL: + return "tunnel" + case XFRM_MODE_ROUTEOPTIMIZATION: + return "ro" + case XFRM_MODE_IN_TRIGGER: + return "in_trigger" + case XFRM_MODE_BEET: + return "beet" + } + return fmt.Sprintf("%d", m) +} + +// XfrmMark represents the mark associated to the state or policy +type XfrmMark struct { + Value uint32 + Mask uint32 +} + +func (m *XfrmMark) String() string { + return fmt.Sprintf("(0x%x,0x%x)", m.Value, m.Mask) +} diff --git a/vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go new file mode 100644 index 0000000000..7b98c9cb6d --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/xfrm_monitor_linux.go @@ -0,0 +1,98 @@ +package netlink + +import ( + "fmt" + "syscall" + + "github.com/vishvananda/netns" + + "github.com/vishvananda/netlink/nl" +) + +type XfrmMsg interface { + Type() nl.XfrmMsgType +} + +type XfrmMsgExpire struct { + XfrmState *XfrmState + Hard bool +} + +func (ue *XfrmMsgExpire) Type() nl.XfrmMsgType { + return nl.XFRM_MSG_EXPIRE +} + +func parseXfrmMsgExpire(b []byte) *XfrmMsgExpire { + var e XfrmMsgExpire + + msg := nl.DeserializeXfrmUserExpire(b) + e.XfrmState = xfrmStateFromXfrmUsersaInfo(&msg.XfrmUsersaInfo) + e.Hard = msg.Hard == 1 + + return &e +} + +func XfrmMonitor(ch chan<- XfrmMsg, done <-chan struct{}, errorChan chan<- error, + types ...nl.XfrmMsgType) error { + + groups, err := xfrmMcastGroups(types) + if err != nil { + return nil + } + s, err := nl.SubscribeAt(netns.None(), netns.None(), syscall.NETLINK_XFRM, groups...) + if err != nil { + return err + } + + if done != nil { + go func() { + <-done + s.Close() + }() + + } + + go func() { + defer close(ch) + for { + msgs, err := s.Receive() + if err != nil { + errorChan <- err + return + } + for _, m := range msgs { + switch m.Header.Type { + case nl.XFRM_MSG_EXPIRE: + ch <- parseXfrmMsgExpire(m.Data) + default: + errorChan <- fmt.Errorf("unsupported msg type: %x", m.Header.Type) + } + } + } + }() + + return nil +} + +func xfrmMcastGroups(types []nl.XfrmMsgType) ([]uint, error) { + groups := make([]uint, 0) + + if len(types) == 0 { + return nil, fmt.Errorf("no xfrm msg type specified") + } + + for _, t := range types { + var group uint + + switch t { + case nl.XFRM_MSG_EXPIRE: + group = nl.XFRMNLGRP_EXPIRE + default: + return nil, fmt.Errorf("unsupported group: %x", t) + } + + groups = append(groups, group) + } + + return groups, nil +} diff --git a/vendor/github.com/vishvananda/netlink/xfrm_policy.go b/vendor/github.com/vishvananda/netlink/xfrm_policy.go new file mode 100644 index 0000000000..c97ec43a25 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/xfrm_policy.go @@ -0,0 +1,74 @@ +package netlink + +import ( + "fmt" + "net" +) + +// Dir is an enum representing an ipsec template direction. +type Dir uint8 + +const ( + XFRM_DIR_IN Dir = iota + XFRM_DIR_OUT + XFRM_DIR_FWD + XFRM_SOCKET_IN + XFRM_SOCKET_OUT + XFRM_SOCKET_FWD +) + +func (d Dir) String() string { + switch d { + case XFRM_DIR_IN: + return "dir in" + case XFRM_DIR_OUT: + return "dir out" + case XFRM_DIR_FWD: + return "dir fwd" + case XFRM_SOCKET_IN: + return "socket in" + case XFRM_SOCKET_OUT: + return "socket out" + case XFRM_SOCKET_FWD: + return "socket fwd" + } + return fmt.Sprintf("socket %d", d-XFRM_SOCKET_IN) +} + +// XfrmPolicyTmpl encapsulates a rule for the base addresses of an ipsec +// policy. These rules are matched with XfrmState to determine encryption +// and authentication algorithms. +type XfrmPolicyTmpl struct { + Dst net.IP + Src net.IP + Proto Proto + Mode Mode + Spi int + Reqid int +} + +func (t XfrmPolicyTmpl) String() string { + return fmt.Sprintf("{Dst: %v, Src: %v, Proto: %s, Mode: %s, Spi: 0x%x, Reqid: 0x%x}", + t.Dst, t.Src, t.Proto, t.Mode, t.Spi, t.Reqid) +} + +// XfrmPolicy represents an ipsec policy. It represents the overlay network +// and has a list of XfrmPolicyTmpls representing the base addresses of +// the policy. +type XfrmPolicy struct { + Dst *net.IPNet + Src *net.IPNet + Proto Proto + DstPort int + SrcPort int + Dir Dir + Priority int + Index int + Mark *XfrmMark + Tmpls []XfrmPolicyTmpl +} + +func (p XfrmPolicy) String() string { + return fmt.Sprintf("{Dst: %v, Src: %v, Proto: %s, DstPort: %d, SrcPort: %d, Dir: %s, Priority: %d, Index: %d, Mark: %s, Tmpls: %s}", + p.Dst, p.Src, p.Proto, p.DstPort, p.SrcPort, p.Dir, p.Priority, p.Index, p.Mark, p.Tmpls) +} diff --git a/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go new file mode 100644 index 0000000000..c3d4e42227 --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/xfrm_policy_linux.go @@ -0,0 +1,257 @@ +package netlink + +import ( + "syscall" + + "github.com/vishvananda/netlink/nl" +) + +func selFromPolicy(sel *nl.XfrmSelector, policy *XfrmPolicy) { + sel.Family = uint16(nl.FAMILY_V4) + if policy.Dst != nil { + sel.Family = uint16(nl.GetIPFamily(policy.Dst.IP)) + sel.Daddr.FromIP(policy.Dst.IP) + prefixlenD, _ := policy.Dst.Mask.Size() + sel.PrefixlenD = uint8(prefixlenD) + } + if policy.Src != nil { + sel.Saddr.FromIP(policy.Src.IP) + prefixlenS, _ := policy.Src.Mask.Size() + sel.PrefixlenS = uint8(prefixlenS) + } + sel.Proto = uint8(policy.Proto) + sel.Dport = nl.Swap16(uint16(policy.DstPort)) + sel.Sport = nl.Swap16(uint16(policy.SrcPort)) + if sel.Dport != 0 { + sel.DportMask = ^uint16(0) + } + if sel.Sport != 0 { + sel.SportMask = ^uint16(0) + } +} + +// XfrmPolicyAdd will add an xfrm policy to the system. +// Equivalent to: `ip xfrm policy add $policy` +func XfrmPolicyAdd(policy *XfrmPolicy) error { + return pkgHandle.XfrmPolicyAdd(policy) +} + +// XfrmPolicyAdd will add an xfrm policy to the system. +// Equivalent to: `ip xfrm policy add $policy` +func (h *Handle) XfrmPolicyAdd(policy *XfrmPolicy) error { + return h.xfrmPolicyAddOrUpdate(policy, nl.XFRM_MSG_NEWPOLICY) +} + +// XfrmPolicyUpdate will update an xfrm policy to the system. +// Equivalent to: `ip xfrm policy update $policy` +func XfrmPolicyUpdate(policy *XfrmPolicy) error { + return pkgHandle.XfrmPolicyUpdate(policy) +} + +// XfrmPolicyUpdate will update an xfrm policy to the system. +// Equivalent to: `ip xfrm policy update $policy` +func (h *Handle) XfrmPolicyUpdate(policy *XfrmPolicy) error { + return h.xfrmPolicyAddOrUpdate(policy, nl.XFRM_MSG_UPDPOLICY) +} + +func (h *Handle) xfrmPolicyAddOrUpdate(policy *XfrmPolicy, nlProto int) error { + req := h.newNetlinkRequest(nlProto, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + + msg := &nl.XfrmUserpolicyInfo{} + selFromPolicy(&msg.Sel, policy) + msg.Priority = uint32(policy.Priority) + msg.Index = uint32(policy.Index) + msg.Dir = uint8(policy.Dir) + msg.Lft.SoftByteLimit = nl.XFRM_INF + msg.Lft.HardByteLimit = nl.XFRM_INF + msg.Lft.SoftPacketLimit = nl.XFRM_INF + msg.Lft.HardPacketLimit = nl.XFRM_INF + req.AddData(msg) + + tmplData := make([]byte, nl.SizeofXfrmUserTmpl*len(policy.Tmpls)) + for i, tmpl := range policy.Tmpls { + start := i * nl.SizeofXfrmUserTmpl + userTmpl := nl.DeserializeXfrmUserTmpl(tmplData[start : start+nl.SizeofXfrmUserTmpl]) + userTmpl.XfrmId.Daddr.FromIP(tmpl.Dst) + userTmpl.Saddr.FromIP(tmpl.Src) + userTmpl.XfrmId.Proto = uint8(tmpl.Proto) + userTmpl.XfrmId.Spi = nl.Swap32(uint32(tmpl.Spi)) + userTmpl.Mode = uint8(tmpl.Mode) + userTmpl.Reqid = uint32(tmpl.Reqid) + userTmpl.Aalgos = ^uint32(0) + userTmpl.Ealgos = ^uint32(0) + userTmpl.Calgos = ^uint32(0) + } + if len(tmplData) > 0 { + tmpls := nl.NewRtAttr(nl.XFRMA_TMPL, tmplData) + req.AddData(tmpls) + } + if policy.Mark != nil { + out := nl.NewRtAttr(nl.XFRMA_MARK, writeMark(policy.Mark)) + req.AddData(out) + } + + _, err := req.Execute(syscall.NETLINK_XFRM, 0) + return err +} + +// XfrmPolicyDel will delete an xfrm policy from the system. Note that +// the Tmpls are ignored when matching the policy to delete. +// Equivalent to: `ip xfrm policy del $policy` +func XfrmPolicyDel(policy *XfrmPolicy) error { + return pkgHandle.XfrmPolicyDel(policy) +} + +// XfrmPolicyDel will delete an xfrm policy from the system. Note that +// the Tmpls are ignored when matching the policy to delete. +// Equivalent to: `ip xfrm policy del $policy` +func (h *Handle) XfrmPolicyDel(policy *XfrmPolicy) error { + _, err := h.xfrmPolicyGetOrDelete(policy, nl.XFRM_MSG_DELPOLICY) + return err +} + +// XfrmPolicyList gets a list of xfrm policies in the system. +// Equivalent to: `ip xfrm policy show`. +// The list can be filtered by ip family. +func XfrmPolicyList(family int) ([]XfrmPolicy, error) { + return pkgHandle.XfrmPolicyList(family) +} + +// XfrmPolicyList gets a list of xfrm policies in the system. +// Equivalent to: `ip xfrm policy show`. +// The list can be filtered by ip family. +func (h *Handle) XfrmPolicyList(family int) ([]XfrmPolicy, error) { + req := h.newNetlinkRequest(nl.XFRM_MSG_GETPOLICY, syscall.NLM_F_DUMP) + + msg := nl.NewIfInfomsg(family) + req.AddData(msg) + + msgs, err := req.Execute(syscall.NETLINK_XFRM, nl.XFRM_MSG_NEWPOLICY) + if err != nil { + return nil, err + } + + var res []XfrmPolicy + for _, m := range msgs { + if policy, err := parseXfrmPolicy(m, family); err == nil { + res = append(res, *policy) + } else if err == familyError { + continue + } else { + return nil, err + } + } + return res, nil +} + +// XfrmPolicyGet gets a the policy described by the index or selector, if found. +// Equivalent to: `ip xfrm policy get { SELECTOR | index INDEX } dir DIR [ctx CTX ] [ mark MARK [ mask MASK ] ] [ ptype PTYPE ]`. +func XfrmPolicyGet(policy *XfrmPolicy) (*XfrmPolicy, error) { + return pkgHandle.XfrmPolicyGet(policy) +} + +// XfrmPolicyGet gets a the policy described by the index or selector, if found. +// Equivalent to: `ip xfrm policy get { SELECTOR | index INDEX } dir DIR [ctx CTX ] [ mark MARK [ mask MASK ] ] [ ptype PTYPE ]`. +func (h *Handle) XfrmPolicyGet(policy *XfrmPolicy) (*XfrmPolicy, error) { + return h.xfrmPolicyGetOrDelete(policy, nl.XFRM_MSG_GETPOLICY) +} + +// XfrmPolicyFlush will flush the policies on the system. +// Equivalent to: `ip xfrm policy flush` +func XfrmPolicyFlush() error { + return pkgHandle.XfrmPolicyFlush() +} + +// XfrmPolicyFlush will flush the policies on the system. +// Equivalent to: `ip xfrm policy flush` +func (h *Handle) XfrmPolicyFlush() error { + req := h.newNetlinkRequest(nl.XFRM_MSG_FLUSHPOLICY, syscall.NLM_F_ACK) + _, err := req.Execute(syscall.NETLINK_XFRM, 0) + return err +} + +func (h *Handle) xfrmPolicyGetOrDelete(policy *XfrmPolicy, nlProto int) (*XfrmPolicy, error) { + req := h.newNetlinkRequest(nlProto, syscall.NLM_F_ACK) + + msg := &nl.XfrmUserpolicyId{} + selFromPolicy(&msg.Sel, policy) + msg.Index = uint32(policy.Index) + msg.Dir = uint8(policy.Dir) + req.AddData(msg) + + if policy.Mark != nil { + out := nl.NewRtAttr(nl.XFRMA_MARK, writeMark(policy.Mark)) + req.AddData(out) + } + + resType := nl.XFRM_MSG_NEWPOLICY + if nlProto == nl.XFRM_MSG_DELPOLICY { + resType = 0 + } + + msgs, err := req.Execute(syscall.NETLINK_XFRM, uint16(resType)) + if err != nil { + return nil, err + } + + if nlProto == nl.XFRM_MSG_DELPOLICY { + return nil, err + } + + p, err := parseXfrmPolicy(msgs[0], FAMILY_ALL) + if err != nil { + return nil, err + } + + return p, nil +} + +func parseXfrmPolicy(m []byte, family int) (*XfrmPolicy, error) { + msg := nl.DeserializeXfrmUserpolicyInfo(m) + + // This is mainly for the policy dump + if family != FAMILY_ALL && family != int(msg.Sel.Family) { + return nil, familyError + } + + var policy XfrmPolicy + + policy.Dst = msg.Sel.Daddr.ToIPNet(msg.Sel.PrefixlenD) + policy.Src = msg.Sel.Saddr.ToIPNet(msg.Sel.PrefixlenS) + policy.Proto = Proto(msg.Sel.Proto) + policy.DstPort = int(nl.Swap16(msg.Sel.Dport)) + policy.SrcPort = int(nl.Swap16(msg.Sel.Sport)) + policy.Priority = int(msg.Priority) + policy.Index = int(msg.Index) + policy.Dir = Dir(msg.Dir) + + attrs, err := nl.ParseRouteAttr(m[msg.Len():]) + if err != nil { + return nil, err + } + + for _, attr := range attrs { + switch attr.Attr.Type { + case nl.XFRMA_TMPL: + max := len(attr.Value) + for i := 0; i < max; i += nl.SizeofXfrmUserTmpl { + var resTmpl XfrmPolicyTmpl + tmpl := nl.DeserializeXfrmUserTmpl(attr.Value[i : i+nl.SizeofXfrmUserTmpl]) + resTmpl.Dst = tmpl.XfrmId.Daddr.ToIP() + resTmpl.Src = tmpl.Saddr.ToIP() + resTmpl.Proto = Proto(tmpl.XfrmId.Proto) + resTmpl.Mode = Mode(tmpl.Mode) + resTmpl.Spi = int(nl.Swap32(tmpl.XfrmId.Spi)) + resTmpl.Reqid = int(tmpl.Reqid) + policy.Tmpls = append(policy.Tmpls, resTmpl) + } + case nl.XFRMA_MARK: + mark := nl.DeserializeXfrmMark(attr.Value[:]) + policy.Mark = new(XfrmMark) + policy.Mark.Value = mark.Value + policy.Mark.Mask = mark.Mask + } + } + + return &policy, nil +} diff --git a/vendor/github.com/vishvananda/netlink/xfrm_state.go b/vendor/github.com/vishvananda/netlink/xfrm_state.go new file mode 100644 index 0000000000..368a9b986d --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/xfrm_state.go @@ -0,0 +1,108 @@ +package netlink + +import ( + "fmt" + "net" +) + +// XfrmStateAlgo represents the algorithm to use for the ipsec encryption. +type XfrmStateAlgo struct { + Name string + Key []byte + TruncateLen int // Auth only + ICVLen int // AEAD only +} + +func (a XfrmStateAlgo) String() string { + base := fmt.Sprintf("{Name: %s, Key: 0x%x", a.Name, a.Key) + if a.TruncateLen != 0 { + base = fmt.Sprintf("%s, Truncate length: %d", base, a.TruncateLen) + } + if a.ICVLen != 0 { + base = fmt.Sprintf("%s, ICV length: %d", base, a.ICVLen) + } + return fmt.Sprintf("%s}", base) +} + +// EncapType is an enum representing the optional packet encapsulation. +type EncapType uint8 + +const ( + XFRM_ENCAP_ESPINUDP_NONIKE EncapType = iota + 1 + XFRM_ENCAP_ESPINUDP +) + +func (e EncapType) String() string { + switch e { + case XFRM_ENCAP_ESPINUDP_NONIKE: + return "espinudp-non-ike" + case XFRM_ENCAP_ESPINUDP: + return "espinudp" + } + return "unknown" +} + +// XfrmStateEncap represents the encapsulation to use for the ipsec encryption. +type XfrmStateEncap struct { + Type EncapType + SrcPort int + DstPort int + OriginalAddress net.IP +} + +func (e XfrmStateEncap) String() string { + return fmt.Sprintf("{Type: %s, Srcport: %d, DstPort: %d, OriginalAddress: %v}", + e.Type, e.SrcPort, e.DstPort, e.OriginalAddress) +} + +// XfrmStateLimits represents the configured limits for the state. +type XfrmStateLimits struct { + ByteSoft uint64 + ByteHard uint64 + PacketSoft uint64 + PacketHard uint64 + TimeSoft uint64 + TimeHard uint64 + TimeUseSoft uint64 + TimeUseHard uint64 +} + +// XfrmState represents the state of an ipsec policy. It optionally +// contains an XfrmStateAlgo for encryption and one for authentication. +type XfrmState struct { + Dst net.IP + Src net.IP + Proto Proto + Mode Mode + Spi int + Reqid int + ReplayWindow int + Limits XfrmStateLimits + Mark *XfrmMark + Auth *XfrmStateAlgo + Crypt *XfrmStateAlgo + Aead *XfrmStateAlgo + Encap *XfrmStateEncap + ESN bool +} + +func (sa XfrmState) String() string { + return fmt.Sprintf("Dst: %v, Src: %v, Proto: %s, Mode: %s, SPI: 0x%x, ReqID: 0x%x, ReplayWindow: %d, Mark: %v, Auth: %v, Crypt: %v, Aead: %v, Encap: %v, ESN: %t", + sa.Dst, sa.Src, sa.Proto, sa.Mode, sa.Spi, sa.Reqid, sa.ReplayWindow, sa.Mark, sa.Auth, sa.Crypt, sa.Aead, sa.Encap, sa.ESN) +} +func (sa XfrmState) Print(stats bool) string { + if !stats { + return sa.String() + } + + return fmt.Sprintf("%s, ByteSoft: %s, ByteHard: %s, PacketSoft: %s, PacketHard: %s, TimeSoft: %d, TimeHard: %d, TimeUseSoft: %d, TimeUseHard: %d", + sa.String(), printLimit(sa.Limits.ByteSoft), printLimit(sa.Limits.ByteHard), printLimit(sa.Limits.PacketSoft), printLimit(sa.Limits.PacketHard), + sa.Limits.TimeSoft, sa.Limits.TimeHard, sa.Limits.TimeUseSoft, sa.Limits.TimeUseHard) +} + +func printLimit(lmt uint64) string { + if lmt == ^uint64(0) { + return "(INF)" + } + return fmt.Sprintf("%d", lmt) +} diff --git a/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go b/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go new file mode 100644 index 0000000000..6a7bc0deca --- /dev/null +++ b/vendor/github.com/vishvananda/netlink/xfrm_state_linux.go @@ -0,0 +1,444 @@ +package netlink + +import ( + "fmt" + "syscall" + "unsafe" + + "github.com/vishvananda/netlink/nl" +) + +func writeStateAlgo(a *XfrmStateAlgo) []byte { + algo := nl.XfrmAlgo{ + AlgKeyLen: uint32(len(a.Key) * 8), + AlgKey: a.Key, + } + end := len(a.Name) + if end > 64 { + end = 64 + } + copy(algo.AlgName[:end], a.Name) + return algo.Serialize() +} + +func writeStateAlgoAuth(a *XfrmStateAlgo) []byte { + algo := nl.XfrmAlgoAuth{ + AlgKeyLen: uint32(len(a.Key) * 8), + AlgTruncLen: uint32(a.TruncateLen), + AlgKey: a.Key, + } + end := len(a.Name) + if end > 64 { + end = 64 + } + copy(algo.AlgName[:end], a.Name) + return algo.Serialize() +} + +func writeStateAlgoAead(a *XfrmStateAlgo) []byte { + algo := nl.XfrmAlgoAEAD{ + AlgKeyLen: uint32(len(a.Key) * 8), + AlgICVLen: uint32(a.ICVLen), + AlgKey: a.Key, + } + end := len(a.Name) + if end > 64 { + end = 64 + } + copy(algo.AlgName[:end], a.Name) + return algo.Serialize() +} + +func writeMark(m *XfrmMark) []byte { + mark := &nl.XfrmMark{ + Value: m.Value, + Mask: m.Mask, + } + if mark.Mask == 0 { + mark.Mask = ^uint32(0) + } + return mark.Serialize() +} + +func writeReplayEsn(replayWindow int) []byte { + replayEsn := &nl.XfrmReplayStateEsn{ + OSeq: 0, + Seq: 0, + OSeqHi: 0, + SeqHi: 0, + ReplayWindow: uint32(replayWindow), + } + + // taken from iproute2/ip/xfrm_state.c: + replayEsn.BmpLen = uint32((replayWindow + (4 * 8) - 1) / (4 * 8)) + + return replayEsn.Serialize() +} + +// XfrmStateAdd will add an xfrm state to the system. +// Equivalent to: `ip xfrm state add $state` +func XfrmStateAdd(state *XfrmState) error { + return pkgHandle.XfrmStateAdd(state) +} + +// XfrmStateAdd will add an xfrm state to the system. +// Equivalent to: `ip xfrm state add $state` +func (h *Handle) XfrmStateAdd(state *XfrmState) error { + return h.xfrmStateAddOrUpdate(state, nl.XFRM_MSG_NEWSA) +} + +// XfrmStateAllocSpi will allocate an xfrm state in the system. +// Equivalent to: `ip xfrm state allocspi` +func XfrmStateAllocSpi(state *XfrmState) (*XfrmState, error) { + return pkgHandle.xfrmStateAllocSpi(state) +} + +// XfrmStateUpdate will update an xfrm state to the system. +// Equivalent to: `ip xfrm state update $state` +func XfrmStateUpdate(state *XfrmState) error { + return pkgHandle.XfrmStateUpdate(state) +} + +// XfrmStateUpdate will update an xfrm state to the system. +// Equivalent to: `ip xfrm state update $state` +func (h *Handle) XfrmStateUpdate(state *XfrmState) error { + return h.xfrmStateAddOrUpdate(state, nl.XFRM_MSG_UPDSA) +} + +func (h *Handle) xfrmStateAddOrUpdate(state *XfrmState, nlProto int) error { + + // A state with spi 0 can't be deleted so don't allow it to be set + if state.Spi == 0 { + return fmt.Errorf("Spi must be set when adding xfrm state.") + } + req := h.newNetlinkRequest(nlProto, syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + + msg := xfrmUsersaInfoFromXfrmState(state) + + if state.ESN { + if state.ReplayWindow == 0 { + return fmt.Errorf("ESN flag set without ReplayWindow") + } + msg.Flags |= nl.XFRM_STATE_ESN + msg.ReplayWindow = 0 + } + + limitsToLft(state.Limits, &msg.Lft) + req.AddData(msg) + + if state.Auth != nil { + out := nl.NewRtAttr(nl.XFRMA_ALG_AUTH_TRUNC, writeStateAlgoAuth(state.Auth)) + req.AddData(out) + } + if state.Crypt != nil { + out := nl.NewRtAttr(nl.XFRMA_ALG_CRYPT, writeStateAlgo(state.Crypt)) + req.AddData(out) + } + if state.Aead != nil { + out := nl.NewRtAttr(nl.XFRMA_ALG_AEAD, writeStateAlgoAead(state.Aead)) + req.AddData(out) + } + if state.Encap != nil { + encapData := make([]byte, nl.SizeofXfrmEncapTmpl) + encap := nl.DeserializeXfrmEncapTmpl(encapData) + encap.EncapType = uint16(state.Encap.Type) + encap.EncapSport = nl.Swap16(uint16(state.Encap.SrcPort)) + encap.EncapDport = nl.Swap16(uint16(state.Encap.DstPort)) + encap.EncapOa.FromIP(state.Encap.OriginalAddress) + out := nl.NewRtAttr(nl.XFRMA_ENCAP, encapData) + req.AddData(out) + } + if state.Mark != nil { + out := nl.NewRtAttr(nl.XFRMA_MARK, writeMark(state.Mark)) + req.AddData(out) + } + if state.ESN { + out := nl.NewRtAttr(nl.XFRMA_REPLAY_ESN_VAL, writeReplayEsn(state.ReplayWindow)) + req.AddData(out) + } + + _, err := req.Execute(syscall.NETLINK_XFRM, 0) + return err +} + +func (h *Handle) xfrmStateAllocSpi(state *XfrmState) (*XfrmState, error) { + req := h.newNetlinkRequest(nl.XFRM_MSG_ALLOCSPI, + syscall.NLM_F_CREATE|syscall.NLM_F_EXCL|syscall.NLM_F_ACK) + + msg := &nl.XfrmUserSpiInfo{} + msg.XfrmUsersaInfo = *(xfrmUsersaInfoFromXfrmState(state)) + // 1-255 is reserved by IANA for future use + msg.Min = 0x100 + msg.Max = 0xffffffff + req.AddData(msg) + + if state.Mark != nil { + out := nl.NewRtAttr(nl.XFRMA_MARK, writeMark(state.Mark)) + req.AddData(out) + } + + msgs, err := req.Execute(syscall.NETLINK_XFRM, 0) + if err != nil { + return nil, err + } + + s, err := parseXfrmState(msgs[0], FAMILY_ALL) + if err != nil { + return nil, err + } + + return s, err +} + +// XfrmStateDel will delete an xfrm state from the system. Note that +// the Algos are ignored when matching the state to delete. +// Equivalent to: `ip xfrm state del $state` +func XfrmStateDel(state *XfrmState) error { + return pkgHandle.XfrmStateDel(state) +} + +// XfrmStateDel will delete an xfrm state from the system. Note that +// the Algos are ignored when matching the state to delete. +// Equivalent to: `ip xfrm state del $state` +func (h *Handle) XfrmStateDel(state *XfrmState) error { + _, err := h.xfrmStateGetOrDelete(state, nl.XFRM_MSG_DELSA) + return err +} + +// XfrmStateList gets a list of xfrm states in the system. +// Equivalent to: `ip [-4|-6] xfrm state show`. +// The list can be filtered by ip family. +func XfrmStateList(family int) ([]XfrmState, error) { + return pkgHandle.XfrmStateList(family) +} + +// XfrmStateList gets a list of xfrm states in the system. +// Equivalent to: `ip xfrm state show`. +// The list can be filtered by ip family. +func (h *Handle) XfrmStateList(family int) ([]XfrmState, error) { + req := h.newNetlinkRequest(nl.XFRM_MSG_GETSA, syscall.NLM_F_DUMP) + + msgs, err := req.Execute(syscall.NETLINK_XFRM, nl.XFRM_MSG_NEWSA) + if err != nil { + return nil, err + } + + var res []XfrmState + for _, m := range msgs { + if state, err := parseXfrmState(m, family); err == nil { + res = append(res, *state) + } else if err == familyError { + continue + } else { + return nil, err + } + } + return res, nil +} + +// XfrmStateGet gets the xfrm state described by the ID, if found. +// Equivalent to: `ip xfrm state get ID [ mark MARK [ mask MASK ] ]`. +// Only the fields which constitue the SA ID must be filled in: +// ID := [ src ADDR ] [ dst ADDR ] [ proto XFRM-PROTO ] [ spi SPI ] +// mark is optional +func XfrmStateGet(state *XfrmState) (*XfrmState, error) { + return pkgHandle.XfrmStateGet(state) +} + +// XfrmStateGet gets the xfrm state described by the ID, if found. +// Equivalent to: `ip xfrm state get ID [ mark MARK [ mask MASK ] ]`. +// Only the fields which constitue the SA ID must be filled in: +// ID := [ src ADDR ] [ dst ADDR ] [ proto XFRM-PROTO ] [ spi SPI ] +// mark is optional +func (h *Handle) XfrmStateGet(state *XfrmState) (*XfrmState, error) { + return h.xfrmStateGetOrDelete(state, nl.XFRM_MSG_GETSA) +} + +func (h *Handle) xfrmStateGetOrDelete(state *XfrmState, nlProto int) (*XfrmState, error) { + req := h.newNetlinkRequest(nlProto, syscall.NLM_F_ACK) + + msg := &nl.XfrmUsersaId{} + msg.Family = uint16(nl.GetIPFamily(state.Dst)) + msg.Daddr.FromIP(state.Dst) + msg.Proto = uint8(state.Proto) + msg.Spi = nl.Swap32(uint32(state.Spi)) + req.AddData(msg) + + if state.Mark != nil { + out := nl.NewRtAttr(nl.XFRMA_MARK, writeMark(state.Mark)) + req.AddData(out) + } + if state.Src != nil { + out := nl.NewRtAttr(nl.XFRMA_SRCADDR, state.Src.To16()) + req.AddData(out) + } + + resType := nl.XFRM_MSG_NEWSA + if nlProto == nl.XFRM_MSG_DELSA { + resType = 0 + } + + msgs, err := req.Execute(syscall.NETLINK_XFRM, uint16(resType)) + if err != nil { + return nil, err + } + + if nlProto == nl.XFRM_MSG_DELSA { + return nil, nil + } + + s, err := parseXfrmState(msgs[0], FAMILY_ALL) + if err != nil { + return nil, err + } + + return s, nil +} + +var familyError = fmt.Errorf("family error") + +func xfrmStateFromXfrmUsersaInfo(msg *nl.XfrmUsersaInfo) *XfrmState { + var state XfrmState + + state.Dst = msg.Id.Daddr.ToIP() + state.Src = msg.Saddr.ToIP() + state.Proto = Proto(msg.Id.Proto) + state.Mode = Mode(msg.Mode) + state.Spi = int(nl.Swap32(msg.Id.Spi)) + state.Reqid = int(msg.Reqid) + state.ReplayWindow = int(msg.ReplayWindow) + lftToLimits(&msg.Lft, &state.Limits) + + return &state +} + +func parseXfrmState(m []byte, family int) (*XfrmState, error) { + msg := nl.DeserializeXfrmUsersaInfo(m) + + // This is mainly for the state dump + if family != FAMILY_ALL && family != int(msg.Family) { + return nil, familyError + } + + state := xfrmStateFromXfrmUsersaInfo(msg) + + attrs, err := nl.ParseRouteAttr(m[nl.SizeofXfrmUsersaInfo:]) + if err != nil { + return nil, err + } + + for _, attr := range attrs { + switch attr.Attr.Type { + case nl.XFRMA_ALG_AUTH, nl.XFRMA_ALG_CRYPT: + var resAlgo *XfrmStateAlgo + if attr.Attr.Type == nl.XFRMA_ALG_AUTH { + if state.Auth == nil { + state.Auth = new(XfrmStateAlgo) + } + resAlgo = state.Auth + } else { + state.Crypt = new(XfrmStateAlgo) + resAlgo = state.Crypt + } + algo := nl.DeserializeXfrmAlgo(attr.Value[:]) + (*resAlgo).Name = nl.BytesToString(algo.AlgName[:]) + (*resAlgo).Key = algo.AlgKey + case nl.XFRMA_ALG_AUTH_TRUNC: + if state.Auth == nil { + state.Auth = new(XfrmStateAlgo) + } + algo := nl.DeserializeXfrmAlgoAuth(attr.Value[:]) + state.Auth.Name = nl.BytesToString(algo.AlgName[:]) + state.Auth.Key = algo.AlgKey + state.Auth.TruncateLen = int(algo.AlgTruncLen) + case nl.XFRMA_ALG_AEAD: + state.Aead = new(XfrmStateAlgo) + algo := nl.DeserializeXfrmAlgoAEAD(attr.Value[:]) + state.Aead.Name = nl.BytesToString(algo.AlgName[:]) + state.Aead.Key = algo.AlgKey + state.Aead.ICVLen = int(algo.AlgICVLen) + case nl.XFRMA_ENCAP: + encap := nl.DeserializeXfrmEncapTmpl(attr.Value[:]) + state.Encap = new(XfrmStateEncap) + state.Encap.Type = EncapType(encap.EncapType) + state.Encap.SrcPort = int(nl.Swap16(encap.EncapSport)) + state.Encap.DstPort = int(nl.Swap16(encap.EncapDport)) + state.Encap.OriginalAddress = encap.EncapOa.ToIP() + case nl.XFRMA_MARK: + mark := nl.DeserializeXfrmMark(attr.Value[:]) + state.Mark = new(XfrmMark) + state.Mark.Value = mark.Value + state.Mark.Mask = mark.Mask + } + } + + return state, nil +} + +// XfrmStateFlush will flush the xfrm state on the system. +// proto = 0 means any transformation protocols +// Equivalent to: `ip xfrm state flush [ proto XFRM-PROTO ]` +func XfrmStateFlush(proto Proto) error { + return pkgHandle.XfrmStateFlush(proto) +} + +// XfrmStateFlush will flush the xfrm state on the system. +// proto = 0 means any transformation protocols +// Equivalent to: `ip xfrm state flush [ proto XFRM-PROTO ]` +func (h *Handle) XfrmStateFlush(proto Proto) error { + req := h.newNetlinkRequest(nl.XFRM_MSG_FLUSHSA, syscall.NLM_F_ACK) + + req.AddData(&nl.XfrmUsersaFlush{Proto: uint8(proto)}) + + _, err := req.Execute(syscall.NETLINK_XFRM, 0) + if err != nil { + return err + } + + return nil +} + +func limitsToLft(lmts XfrmStateLimits, lft *nl.XfrmLifetimeCfg) { + if lmts.ByteSoft != 0 { + lft.SoftByteLimit = lmts.ByteSoft + } else { + lft.SoftByteLimit = nl.XFRM_INF + } + if lmts.ByteHard != 0 { + lft.HardByteLimit = lmts.ByteHard + } else { + lft.HardByteLimit = nl.XFRM_INF + } + if lmts.PacketSoft != 0 { + lft.SoftPacketLimit = lmts.PacketSoft + } else { + lft.SoftPacketLimit = nl.XFRM_INF + } + if lmts.PacketHard != 0 { + lft.HardPacketLimit = lmts.PacketHard + } else { + lft.HardPacketLimit = nl.XFRM_INF + } + lft.SoftAddExpiresSeconds = lmts.TimeSoft + lft.HardAddExpiresSeconds = lmts.TimeHard + lft.SoftUseExpiresSeconds = lmts.TimeUseSoft + lft.HardUseExpiresSeconds = lmts.TimeUseHard +} + +func lftToLimits(lft *nl.XfrmLifetimeCfg, lmts *XfrmStateLimits) { + *lmts = *(*XfrmStateLimits)(unsafe.Pointer(lft)) +} + +func xfrmUsersaInfoFromXfrmState(state *XfrmState) *nl.XfrmUsersaInfo { + msg := &nl.XfrmUsersaInfo{} + msg.Family = uint16(nl.GetIPFamily(state.Dst)) + msg.Id.Daddr.FromIP(state.Dst) + msg.Saddr.FromIP(state.Src) + msg.Id.Proto = uint8(state.Proto) + msg.Mode = uint8(state.Mode) + msg.Id.Spi = nl.Swap32(uint32(state.Spi)) + msg.Reqid = uint32(state.Reqid) + msg.ReplayWindow = uint8(state.ReplayWindow) + + return msg +} diff --git a/vendor/github.com/vishvananda/netns/LICENSE b/vendor/github.com/vishvananda/netns/LICENSE new file mode 100644 index 0000000000..9f64db8582 --- /dev/null +++ b/vendor/github.com/vishvananda/netns/LICENSE @@ -0,0 +1,192 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014 Vishvananda Ishaya. + Copyright 2014 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/vishvananda/netns/netns.go b/vendor/github.com/vishvananda/netns/netns.go new file mode 100644 index 0000000000..dd2f21570a --- /dev/null +++ b/vendor/github.com/vishvananda/netns/netns.go @@ -0,0 +1,80 @@ +// Package netns allows ultra-simple network namespace handling. NsHandles +// can be retrieved and set. Note that the current namespace is thread +// local so actions that set and reset namespaces should use LockOSThread +// to make sure the namespace doesn't change due to a goroutine switch. +// It is best to close NsHandles when you are done with them. This can be +// accomplished via a `defer ns.Close()` on the handle. Changing namespaces +// requires elevated privileges, so in most cases this code needs to be run +// as root. +package netns + +import ( + "fmt" + "syscall" +) + +// NsHandle is a handle to a network namespace. It can be cast directly +// to an int and used as a file descriptor. +type NsHandle int + +// Equal determines if two network handles refer to the same network +// namespace. This is done by comparing the device and inode that the +// file descriptors point to. +func (ns NsHandle) Equal(other NsHandle) bool { + if ns == other { + return true + } + var s1, s2 syscall.Stat_t + if err := syscall.Fstat(int(ns), &s1); err != nil { + return false + } + if err := syscall.Fstat(int(other), &s2); err != nil { + return false + } + return (s1.Dev == s2.Dev) && (s1.Ino == s2.Ino) +} + +// String shows the file descriptor number and its dev and inode. +func (ns NsHandle) String() string { + var s syscall.Stat_t + if ns == -1 { + return "NS(None)" + } + if err := syscall.Fstat(int(ns), &s); err != nil { + return fmt.Sprintf("NS(%d: unknown)", ns) + } + return fmt.Sprintf("NS(%d: %d, %d)", ns, s.Dev, s.Ino) +} + +// UniqueId returns a string which uniquely identifies the namespace +// associated with the network handle. +func (ns NsHandle) UniqueId() string { + var s syscall.Stat_t + if ns == -1 { + return "NS(none)" + } + if err := syscall.Fstat(int(ns), &s); err != nil { + return "NS(unknown)" + } + return fmt.Sprintf("NS(%d:%d)", s.Dev, s.Ino) +} + +// IsOpen returns true if Close() has not been called. +func (ns NsHandle) IsOpen() bool { + return ns != -1 +} + +// Close closes the NsHandle and resets its file descriptor to -1. +// It is not safe to use an NsHandle after Close() is called. +func (ns *NsHandle) Close() error { + if err := syscall.Close(int(*ns)); err != nil { + return err + } + (*ns) = -1 + return nil +} + +// None gets an empty (closed) NsHandle. +func None() NsHandle { + return NsHandle(-1) +} diff --git a/vendor/github.com/vishvananda/netns/netns_linux.go b/vendor/github.com/vishvananda/netns/netns_linux.go new file mode 100644 index 0000000000..34af6e28b3 --- /dev/null +++ b/vendor/github.com/vishvananda/netns/netns_linux.go @@ -0,0 +1,226 @@ +// +build linux + +package netns + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "syscall" +) + +// SYS_SETNS syscall allows changing the namespace of the current process. +var SYS_SETNS = map[string]uintptr{ + "386": 346, + "amd64": 308, + "arm64": 268, + "arm": 375, + "mips": 4344, + "mipsle": 4344, + "ppc64": 350, + "ppc64le": 350, + "s390x": 339, +}[runtime.GOARCH] + +// Deprecated: use syscall pkg instead (go >= 1.5 needed). +const ( + CLONE_NEWUTS = 0x04000000 /* New utsname group? */ + CLONE_NEWIPC = 0x08000000 /* New ipcs */ + CLONE_NEWUSER = 0x10000000 /* New user namespace */ + CLONE_NEWPID = 0x20000000 /* New pid namespace */ + CLONE_NEWNET = 0x40000000 /* New network namespace */ + CLONE_IO = 0x80000000 /* Get io context */ +) + +// Setns sets namespace using syscall. Note that this should be a method +// in syscall but it has not been added. +func Setns(ns NsHandle, nstype int) (err error) { + _, _, e1 := syscall.Syscall(SYS_SETNS, uintptr(ns), uintptr(nstype), 0) + if e1 != 0 { + err = e1 + } + return +} + +// Set sets the current network namespace to the namespace represented +// by NsHandle. +func Set(ns NsHandle) (err error) { + return Setns(ns, CLONE_NEWNET) +} + +// New creates a new network namespace and returns a handle to it. +func New() (ns NsHandle, err error) { + if err := syscall.Unshare(CLONE_NEWNET); err != nil { + return -1, err + } + return Get() +} + +// Get gets a handle to the current threads network namespace. +func Get() (NsHandle, error) { + return GetFromThread(os.Getpid(), syscall.Gettid()) +} + +// GetFromPath gets a handle to a network namespace +// identified by the path +func GetFromPath(path string) (NsHandle, error) { + fd, err := syscall.Open(path, syscall.O_RDONLY, 0) + if err != nil { + return -1, err + } + return NsHandle(fd), nil +} + +// GetFromName gets a handle to a named network namespace such as one +// created by `ip netns add`. +func GetFromName(name string) (NsHandle, error) { + return GetFromPath(fmt.Sprintf("/var/run/netns/%s", name)) +} + +// GetFromPid gets a handle to the network namespace of a given pid. +func GetFromPid(pid int) (NsHandle, error) { + return GetFromPath(fmt.Sprintf("/proc/%d/ns/net", pid)) +} + +// GetFromThread gets a handle to the network namespace of a given pid and tid. +func GetFromThread(pid, tid int) (NsHandle, error) { + return GetFromPath(fmt.Sprintf("/proc/%d/task/%d/ns/net", pid, tid)) +} + +// GetFromDocker gets a handle to the network namespace of a docker container. +// Id is prefixed matched against the running docker containers, so a short +// identifier can be used as long as it isn't ambiguous. +func GetFromDocker(id string) (NsHandle, error) { + pid, err := getPidForContainer(id) + if err != nil { + return -1, err + } + return GetFromPid(pid) +} + +// borrowed from docker/utils/utils.go +func findCgroupMountpoint(cgroupType string) (string, error) { + output, err := ioutil.ReadFile("/proc/mounts") + if err != nil { + return "", err + } + + // /proc/mounts has 6 fields per line, one mount per line, e.g. + // cgroup /sys/fs/cgroup/devices cgroup rw,relatime,devices 0 0 + for _, line := range strings.Split(string(output), "\n") { + parts := strings.Split(line, " ") + if len(parts) == 6 && parts[2] == "cgroup" { + for _, opt := range strings.Split(parts[3], ",") { + if opt == cgroupType { + return parts[1], nil + } + } + } + } + + return "", fmt.Errorf("cgroup mountpoint not found for %s", cgroupType) +} + +// Returns the relative path to the cgroup docker is running in. +// borrowed from docker/utils/utils.go +// modified to get the docker pid instead of using /proc/self +func getThisCgroup(cgroupType string) (string, error) { + dockerpid, err := ioutil.ReadFile("/var/run/docker.pid") + if err != nil { + return "", err + } + result := strings.Split(string(dockerpid), "\n") + if len(result) == 0 || len(result[0]) == 0 { + return "", fmt.Errorf("docker pid not found in /var/run/docker.pid") + } + pid, err := strconv.Atoi(result[0]) + if err != nil { + return "", err + } + output, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/cgroup", pid)) + if err != nil { + return "", err + } + for _, line := range strings.Split(string(output), "\n") { + parts := strings.Split(line, ":") + // any type used by docker should work + if parts[1] == cgroupType { + return parts[2], nil + } + } + return "", fmt.Errorf("cgroup '%s' not found in /proc/%d/cgroup", cgroupType, pid) +} + +// Returns the first pid in a container. +// borrowed from docker/utils/utils.go +// modified to only return the first pid +// modified to glob with id +// modified to search for newer docker containers +func getPidForContainer(id string) (int, error) { + pid := 0 + + // memory is chosen randomly, any cgroup used by docker works + cgroupType := "memory" + + cgroupRoot, err := findCgroupMountpoint(cgroupType) + if err != nil { + return pid, err + } + + cgroupThis, err := getThisCgroup(cgroupType) + if err != nil { + return pid, err + } + + id += "*" + + attempts := []string{ + filepath.Join(cgroupRoot, cgroupThis, id, "tasks"), + // With more recent lxc versions use, cgroup will be in lxc/ + filepath.Join(cgroupRoot, cgroupThis, "lxc", id, "tasks"), + // With more recent docker, cgroup will be in docker/ + filepath.Join(cgroupRoot, cgroupThis, "docker", id, "tasks"), + // Even more recent docker versions under systemd use docker-.scope/ + filepath.Join(cgroupRoot, "system.slice", "docker-"+id+".scope", "tasks"), + // Even more recent docker versions under cgroup/systemd/docker// + filepath.Join(cgroupRoot, "..", "systemd", "docker", id, "tasks"), + // Kubernetes with docker and CNI is even more different + filepath.Join(cgroupRoot, "..", "systemd", "kubepods", "*", "pod*", id, "tasks"), + } + + var filename string + for _, attempt := range attempts { + filenames, _ := filepath.Glob(attempt) + if len(filenames) > 1 { + return pid, fmt.Errorf("Ambiguous id supplied: %v", filenames) + } else if len(filenames) == 1 { + filename = filenames[0] + break + } + } + + if filename == "" { + return pid, fmt.Errorf("Unable to find container: %v", id[:len(id)-1]) + } + + output, err := ioutil.ReadFile(filename) + if err != nil { + return pid, err + } + + result := strings.Split(string(output), "\n") + if len(result) == 0 || len(result[0]) == 0 { + return pid, fmt.Errorf("No pid found for container") + } + + pid, err = strconv.Atoi(result[0]) + if err != nil { + return pid, fmt.Errorf("Invalid pid '%s': %s", result[0], err) + } + + return pid, nil +} diff --git a/vendor/github.com/vishvananda/netns/netns_unspecified.go b/vendor/github.com/vishvananda/netns/netns_unspecified.go new file mode 100644 index 0000000000..d06af62b68 --- /dev/null +++ b/vendor/github.com/vishvananda/netns/netns_unspecified.go @@ -0,0 +1,43 @@ +// +build !linux + +package netns + +import ( + "errors" +) + +var ( + ErrNotImplemented = errors.New("not implemented") +) + +func Set(ns NsHandle) (err error) { + return ErrNotImplemented +} + +func New() (ns NsHandle, err error) { + return -1, ErrNotImplemented +} + +func Get() (NsHandle, error) { + return -1, ErrNotImplemented +} + +func GetFromPath(path string) (NsHandle, error) { + return -1, ErrNotImplemented +} + +func GetFromName(name string) (NsHandle, error) { + return -1, ErrNotImplemented +} + +func GetFromPid(pid int) (NsHandle, error) { + return -1, ErrNotImplemented +} + +func GetFromThread(pid, tid int) (NsHandle, error) { + return -1, ErrNotImplemented +} + +func GetFromDocker(id string) (NsHandle, error) { + return -1, ErrNotImplemented +} diff --git a/vendor/go.opencensus.io/internal/internal.go b/vendor/go.opencensus.io/internal/internal.go index fce1f02dc2..ec8c72dcf1 100644 --- a/vendor/go.opencensus.io/internal/internal.go +++ b/vendor/go.opencensus.io/internal/internal.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package internal +package internal // import "go.opencensus.io/internal" import "time" diff --git a/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go b/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go index d9f23abee2..3b1af8b4b8 100644 --- a/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go +++ b/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go @@ -15,7 +15,7 @@ // Package tagencoding contains the tag encoding // used interally by the stats collector. -package tagencoding +package tagencoding // import "go.opencensus.io/internal/tagencoding" type Values struct { Buffer []byte diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go index ddc99460a2..168d81e759 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go +++ b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go @@ -15,7 +15,7 @@ // Package b3 contains a propagation.HTTPFormat implementation // for B3 propagation. See https://github.com/openzipkin/b3-propagation // for more details. -package b3 +package b3 // import "go.opencensus.io/plugin/ochttp/propagation/b3" import ( "encoding/hex" diff --git a/vendor/go.opencensus.io/plugin/ochttp/server.go b/vendor/go.opencensus.io/plugin/ochttp/server.go index a92c7c1ed6..554e05ffaf 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/server.go +++ b/vendor/go.opencensus.io/plugin/ochttp/server.go @@ -76,20 +76,19 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) { - opts := trace.StartOptions{ - Sampler: h.StartOptions.Sampler, - SpanKind: trace.SpanKindServer, - } - name := spanNameFromURL(r.URL) ctx := r.Context() var span *trace.Span sc, ok := h.extractSpanContext(r) if ok && !h.IsPublicEndpoint { - span = trace.NewSpanWithRemoteParent(name, sc, opts) - ctx = trace.WithSpan(ctx, span) + ctx, span = trace.StartSpanWithRemoteParent(ctx, name, sc, + trace.WithSampler(h.StartOptions.Sampler), + trace.WithSpanKind(trace.SpanKindServer)) } else { - span = trace.NewSpan(name, nil, opts) + ctx, span = trace.StartSpan(ctx, name, + trace.WithSampler(h.StartOptions.Sampler), + trace.WithSpanKind(trace.SpanKindServer), + ) if ok { span.AddLink(trace.Link{ TraceID: sc.TraceID, @@ -99,9 +98,8 @@ func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Requ }) } } - ctx = trace.WithSpan(ctx, span) span.AddAttributes(requestAttrs(r)...) - return r.WithContext(trace.WithSpan(r.Context(), span)), span.End + return r.WithContext(ctx), span.End } func (h *Handler) extractSpanContext(r *http.Request) (trace.SpanContext, bool) { @@ -137,6 +135,7 @@ type trackingResponseWriter struct { respSize int64 start time.Time statusCode int + statusLine string endOnce sync.Once writer http.ResponseWriter } @@ -159,6 +158,10 @@ func (t *trackingResponseWriter) end() { if t.statusCode == 0 { t.statusCode = 200 } + + span := trace.FromContext(t.ctx) + span.SetStatus(TraceStatus(t.statusCode, t.statusLine)) + m := []stats.Measurement{ ServerLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)), ServerResponseBytes.M(t.respSize), @@ -184,6 +187,7 @@ func (t *trackingResponseWriter) Write(data []byte) (int, error) { func (t *trackingResponseWriter) WriteHeader(statusCode int) { t.writer.WriteHeader(statusCode) t.statusCode = statusCode + t.statusLine = http.StatusText(t.statusCode) } func (t *trackingResponseWriter) Flush() { diff --git a/vendor/go.opencensus.io/plugin/ochttp/stats.go b/vendor/go.opencensus.io/plugin/ochttp/stats.go index 803a606f23..2bd11f6ddc 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/stats.go +++ b/vendor/go.opencensus.io/plugin/ochttp/stats.go @@ -22,7 +22,7 @@ import ( // The following client HTTP measures are supported for use in custom views. var ( - ClientRequestCount = stats.Int64("opencensus.io/http/client/request_count", "Number of HTTP requests started", stats.UnitNone) + ClientRequestCount = stats.Int64("opencensus.io/http/client/request_count", "Number of HTTP requests started", stats.UnitDimensionless) ClientRequestBytes = stats.Int64("opencensus.io/http/client/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes) ClientResponseBytes = stats.Int64("opencensus.io/http/client/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes) ClientLatency = stats.Float64("opencensus.io/http/client/latency", "End-to-end latency", stats.UnitMilliseconds) @@ -30,7 +30,7 @@ var ( // The following server HTTP measures are supported for use in custom views: var ( - ServerRequestCount = stats.Int64("opencensus.io/http/server/request_count", "Number of HTTP requests started", stats.UnitNone) + ServerRequestCount = stats.Int64("opencensus.io/http/server/request_count", "Number of HTTP requests started", stats.UnitDimensionless) ServerRequestBytes = stats.Int64("opencensus.io/http/server/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes) ServerResponseBytes = stats.Int64("opencensus.io/http/server/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes) ServerLatency = stats.Float64("opencensus.io/http/server/latency", "End-to-end latency", stats.UnitMilliseconds) diff --git a/vendor/go.opencensus.io/plugin/ochttp/trace.go b/vendor/go.opencensus.io/plugin/ochttp/trace.go index 10d4a70609..80ee86c7a1 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/trace.go +++ b/vendor/go.opencensus.io/plugin/ochttp/trace.go @@ -53,10 +53,11 @@ func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) { name := spanNameFromURL(req.URL) // TODO(jbd): Discuss whether we want to prefix // outgoing requests with Sent. - parent := trace.FromContext(req.Context()) - span := trace.NewSpan(name, parent, t.startOptions) - req = req.WithContext(trace.WithSpan(req.Context(), span)) + _, span := trace.StartSpan(req.Context(), name, + trace.WithSampler(t.startOptions.Sampler), + trace.WithSpanKind(trace.SpanKindClient)) + req = req.WithContext(trace.WithSpan(req.Context(), span)) if t.format != nil { t.format.SpanContextToRequest(span.SpanContext(), req) } @@ -64,13 +65,13 @@ func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) { span.AddAttributes(requestAttrs(req)...) resp, err := t.base.RoundTrip(req) if err != nil { - span.SetStatus(trace.Status{Code: 2, Message: err.Error()}) + span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()}) span.End() return resp, err } span.AddAttributes(responseAttrs(resp)...) - span.SetStatus(status(resp.StatusCode)) + span.SetStatus(TraceStatus(resp.StatusCode, resp.Status)) // span.End() will be invoked after // a read from resp.Body returns io.EOF or when @@ -145,71 +146,54 @@ func responseAttrs(resp *http.Response) []trace.Attribute { } } -func status(statusCode int) trace.Status { +// HTTPStatusToTraceStatus converts the HTTP status code to a trace.Status that +// represents the outcome as closely as possible. +func TraceStatus(httpStatusCode int, statusLine string) trace.Status { var code int32 - if statusCode < 200 || statusCode >= 400 { - code = codeUnknown + if httpStatusCode < 200 || httpStatusCode >= 400 { + code = trace.StatusCodeUnknown } - switch statusCode { + switch httpStatusCode { case 499: - code = codeCancelled + code = trace.StatusCodeCancelled case http.StatusBadRequest: - code = codeInvalidArgument + code = trace.StatusCodeInvalidArgument case http.StatusGatewayTimeout: - code = codeDeadlineExceeded + code = trace.StatusCodeDeadlineExceeded case http.StatusNotFound: - code = codeNotFound + code = trace.StatusCodeNotFound case http.StatusForbidden: - code = codePermissionDenied + code = trace.StatusCodePermissionDenied case http.StatusUnauthorized: // 401 is actually unauthenticated. - code = codeUnathenticated + code = trace.StatusCodeUnauthenticated case http.StatusTooManyRequests: - code = codeResourceExhausted + code = trace.StatusCodeResourceExhausted case http.StatusNotImplemented: - code = codeUnimplemented + code = trace.StatusCodeUnimplemented case http.StatusServiceUnavailable: - code = codeUnavailable + code = trace.StatusCodeUnavailable + case http.StatusOK: + code = trace.StatusCodeOK } return trace.Status{Code: code, Message: codeToStr[code]} } -// TODO(jbd): Provide status codes from trace package. -const ( - codeOK = 0 - codeCancelled = 1 - codeUnknown = 2 - codeInvalidArgument = 3 - codeDeadlineExceeded = 4 - codeNotFound = 5 - codeAlreadyExists = 6 - codePermissionDenied = 7 - codeResourceExhausted = 8 - codeFailedPrecondition = 9 - codeAborted = 10 - codeOutOfRange = 11 - codeUnimplemented = 12 - codeInternal = 13 - codeUnavailable = 14 - codeDataLoss = 15 - codeUnathenticated = 16 -) - var codeToStr = map[int32]string{ - codeOK: `"OK"`, - codeCancelled: `"CANCELLED"`, - codeUnknown: `"UNKNOWN"`, - codeInvalidArgument: `"INVALID_ARGUMENT"`, - codeDeadlineExceeded: `"DEADLINE_EXCEEDED"`, - codeNotFound: `"NOT_FOUND"`, - codeAlreadyExists: `"ALREADY_EXISTS"`, - codePermissionDenied: `"PERMISSION_DENIED"`, - codeResourceExhausted: `"RESOURCE_EXHAUSTED"`, - codeFailedPrecondition: `"FAILED_PRECONDITION"`, - codeAborted: `"ABORTED"`, - codeOutOfRange: `"OUT_OF_RANGE"`, - codeUnimplemented: `"UNIMPLEMENTED"`, - codeInternal: `"INTERNAL"`, - codeUnavailable: `"UNAVAILABLE"`, - codeDataLoss: `"DATA_LOSS"`, - codeUnathenticated: `"UNAUTHENTICATED"`, + trace.StatusCodeOK: `"OK"`, + trace.StatusCodeCancelled: `"CANCELLED"`, + trace.StatusCodeUnknown: `"UNKNOWN"`, + trace.StatusCodeInvalidArgument: `"INVALID_ARGUMENT"`, + trace.StatusCodeDeadlineExceeded: `"DEADLINE_EXCEEDED"`, + trace.StatusCodeNotFound: `"NOT_FOUND"`, + trace.StatusCodeAlreadyExists: `"ALREADY_EXISTS"`, + trace.StatusCodePermissionDenied: `"PERMISSION_DENIED"`, + trace.StatusCodeResourceExhausted: `"RESOURCE_EXHAUSTED"`, + trace.StatusCodeFailedPrecondition: `"FAILED_PRECONDITION"`, + trace.StatusCodeAborted: `"ABORTED"`, + trace.StatusCodeOutOfRange: `"OUT_OF_RANGE"`, + trace.StatusCodeUnimplemented: `"UNIMPLEMENTED"`, + trace.StatusCodeInternal: `"INTERNAL"`, + trace.StatusCodeUnavailable: `"UNAVAILABLE"`, + trace.StatusCodeDataLoss: `"DATA_LOSS"`, + trace.StatusCodeUnauthenticated: `"UNAUTHENTICATED"`, } diff --git a/vendor/go.opencensus.io/stats/internal/validation.go b/vendor/go.opencensus.io/stats/internal/validation.go index b962d524f2..b946667f96 100644 --- a/vendor/go.opencensus.io/stats/internal/validation.go +++ b/vendor/go.opencensus.io/stats/internal/validation.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package internal +package internal // import "go.opencensus.io/stats/internal" const ( MaxNameLength = 255 diff --git a/vendor/go.opencensus.io/stats/units.go b/vendor/go.opencensus.io/stats/units.go index d37e2152fe..6931a5f296 100644 --- a/vendor/go.opencensus.io/stats/units.go +++ b/vendor/go.opencensus.io/stats/units.go @@ -18,7 +18,8 @@ package stats // Units are encoded according to the case-sensitive abbreviations from the // Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html const ( - UnitNone = "1" - UnitBytes = "By" - UnitMilliseconds = "ms" + UnitNone = "1" // Deprecated: Use UnitDimensionless. + UnitDimensionless = "1" + UnitBytes = "By" + UnitMilliseconds = "ms" ) diff --git a/vendor/go.opencensus.io/stats/view/worker.go b/vendor/go.opencensus.io/stats/view/worker.go index 2c9b621260..c19de05716 100644 --- a/vendor/go.opencensus.io/stats/view/worker.go +++ b/vendor/go.opencensus.io/stats/view/worker.go @@ -86,7 +86,7 @@ func Unregister(views ...*View) { for i := range views { names[i] = views[i].Name } - req := &unsubscribeFromViewReq{ + req := &unregisterFromViewReq{ views: names, done: make(chan struct{}), } diff --git a/vendor/go.opencensus.io/stats/view/worker_commands.go b/vendor/go.opencensus.io/stats/view/worker_commands.go index 0d244ac7e7..ef79ec383c 100644 --- a/vendor/go.opencensus.io/stats/view/worker_commands.go +++ b/vendor/go.opencensus.io/stats/view/worker_commands.go @@ -73,15 +73,15 @@ func (cmd *registerViewReq) handleCommand(w *worker) { } } -// unsubscribeFromViewReq is the command to unsubscribe to a view. Has no +// unregisterFromViewReq is the command to unsubscribe to a view. Has no // impact on the data collection for client that are pulling data from the // library. -type unsubscribeFromViewReq struct { +type unregisterFromViewReq struct { views []string done chan struct{} } -func (cmd *unsubscribeFromViewReq) handleCommand(w *worker) { +func (cmd *unregisterFromViewReq) handleCommand(w *worker) { for _, name := range cmd.views { vi, ok := w.views[name] if !ok { @@ -94,6 +94,7 @@ func (cmd *unsubscribeFromViewReq) handleCommand(w *worker) { // The collected data can be cleared. vi.clearRows() } + delete(w.views, name) } cmd.done <- struct{}{} } diff --git a/vendor/go.opencensus.io/trace/doc.go b/vendor/go.opencensus.io/trace/doc.go index 2501271ef6..a2b54e58ca 100644 --- a/vendor/go.opencensus.io/trace/doc.go +++ b/vendor/go.opencensus.io/trace/doc.go @@ -13,19 +13,18 @@ // limitations under the License. /* -Package trace contains types for representing trace information, and -functions for global configuration of tracing. +Package trace contains support for OpenCensus distributed tracing. The following assumes a basic familiarity with OpenCensus concepts. See http://opencensus.io -Enabling Tracing for a Program +Exporting Traces -To use OpenCensus tracing, register at least one Exporter. You can use +To export collected tracing data, register at least one exporter. You can use one of the provided exporters or write your own. - trace.RegisterExporter(anExporter) + trace.RegisterExporter(exporter) By default, traces will be sampled relatively rarely. To change the sampling frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler @@ -43,13 +42,10 @@ It is common to want to capture all the activity of a function call in a span. F this to work, the function must take a context.Context as a parameter. Add these two lines to the top of the function: - ctx, span := trace.StartSpan(ctx, "your choice of name") + ctx, span := trace.StartSpan(ctx, "my.org/Run") defer span.End() StartSpan will create a new top-level span if the context doesn't contain another span, otherwise it will create a child span. - -As a suggestion, use the fully-qualified function name as the span name, e.g. -"github.com/me/mypackage.Run". */ package trace // import "go.opencensus.io/trace" diff --git a/vendor/go.opencensus.io/trace/propagation/propagation.go b/vendor/go.opencensus.io/trace/propagation/propagation.go index a3b00f474f..1eb190a96a 100644 --- a/vendor/go.opencensus.io/trace/propagation/propagation.go +++ b/vendor/go.opencensus.io/trace/propagation/propagation.go @@ -13,7 +13,7 @@ // limitations under the License. // Package propagation implements the binary trace context format. -package propagation +package propagation // import "go.opencensus.io/trace/propagation" // TODO: link to external spec document. diff --git a/vendor/go.opencensus.io/trace/status_codes.go b/vendor/go.opencensus.io/trace/status_codes.go new file mode 100644 index 0000000000..ec60effd10 --- /dev/null +++ b/vendor/go.opencensus.io/trace/status_codes.go @@ -0,0 +1,37 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +// Status codes for use with Span.SetStatus. These correspond to the status +// codes used by gRPC defined here: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto +const ( + StatusCodeOK = 0 + StatusCodeCancelled = 1 + StatusCodeUnknown = 2 + StatusCodeInvalidArgument = 3 + StatusCodeDeadlineExceeded = 4 + StatusCodeNotFound = 5 + StatusCodeAlreadyExists = 6 + StatusCodePermissionDenied = 7 + StatusCodeResourceExhausted = 8 + StatusCodeFailedPrecondition = 9 + StatusCodeAborted = 10 + StatusCodeOutOfRange = 11 + StatusCodeUnimplemented = 12 + StatusCodeInternal = 13 + StatusCodeUnavailable = 14 + StatusCodeDataLoss = 15 + StatusCodeUnauthenticated = 16 +) diff --git a/vendor/go.opencensus.io/trace/trace.go b/vendor/go.opencensus.io/trace/trace.go index 49a1c01965..19c6930efa 100644 --- a/vendor/go.opencensus.io/trace/trace.go +++ b/vendor/go.opencensus.io/trace/trace.go @@ -43,7 +43,9 @@ type Span struct { spanContext SpanContext // spanStore is the spanStore this span belongs to, if any, otherwise it is nil. *spanStore - exportOnce sync.Once + endOnce sync.Once + + executionTracerTaskEnd func() // ends the execution tracer span } // IsRecordingEvents returns true if events are being recorded for this span. @@ -97,7 +99,14 @@ func FromContext(ctx context.Context) *Span { } // WithSpan returns a new context with the given Span attached. +// +// Deprecated: Use NewContext. func WithSpan(parent context.Context, s *Span) context.Context { + return NewContext(parent, s) +} + +// NewContext returns a new context with the given Span attached. +func NewContext(parent context.Context, s *Span) context.Context { return context.WithValue(parent, contextKey{}, s) } @@ -125,32 +134,73 @@ type StartOptions struct { SpanKind int } +// StartOption apply changes to StartOptions. +type StartOption func(*StartOptions) + +// WithSpanKind makes new spans to be created with the given kind. +func WithSpanKind(spanKind int) StartOption { + return func(o *StartOptions) { + o.SpanKind = spanKind + } +} + +// WithSampler makes new spans to be be created with a custom sampler. +// Otherwise, the global sampler is used. +func WithSampler(sampler Sampler) StartOption { + return func(o *StartOptions) { + o.Sampler = sampler + } +} + // StartSpan starts a new child span of the current span in the context. If // there is no span in the context, creates a new trace and span. +func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) { + var opts StartOptions + var parent SpanContext + if p := FromContext(ctx); p != nil { + parent = p.spanContext + } + for _, op := range o { + op(&opts) + } + span := startSpanInternal(name, parent != SpanContext{}, parent, false, opts) + + ctx, end := startExecutionTracerTask(ctx, name) + span.executionTracerTaskEnd = end + return NewContext(ctx, span), span +} + +// StartSpanWithRemoteParent starts a new child span of the span from the given parent. // -// This is provided as a convenience for WithSpan(ctx, NewSpan(...)). Use it -// if you require custom spans in addition to the default spans provided by -// ocgrpc, ochttp or similar framework integration. -func StartSpan(ctx context.Context, name string) (context.Context, *Span) { - parentSpan, _ := ctx.Value(contextKey{}).(*Span) - span := NewSpan(name, parentSpan, StartOptions{}) - return WithSpan(ctx, span), span +// If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is +// preferred for cases where the parent is propagated via an incoming request. +func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) { + var opts StartOptions + for _, op := range o { + op(&opts) + } + span := startSpanInternal(name, parent != SpanContext{}, parent, true, opts) + ctx, end := startExecutionTracerTask(ctx, name) + span.executionTracerTaskEnd = end + return NewContext(ctx, span), span } // NewSpan returns a new span. // // If parent is not nil, created span will be a child of the parent. +// +// Deprecated: Use StartSpan. func NewSpan(name string, parent *Span, o StartOptions) *Span { - hasParent := false var parentSpanContext SpanContext if parent != nil { - hasParent = true parentSpanContext = parent.SpanContext() } - return startSpanInternal(name, hasParent, parentSpanContext, false, o) + return startSpanInternal(name, parent != nil, parentSpanContext, false, o) } // NewSpanWithRemoteParent returns a new span with the given parent SpanContext. +// +// Deprecated: Use StartSpanWithRemoteParent. func NewSpanWithRemoteParent(name string, parent SpanContext, o StartOptions) *Span { return startSpanInternal(name, true, parent, true, o) } @@ -215,7 +265,10 @@ func (s *Span) End() { if !s.IsRecordingEvents() { return } - s.exportOnce.Do(func() { + s.endOnce.Do(func() { + if s.executionTracerTaskEnd != nil { + s.executionTracerTaskEnd() + } // TODO: optimize to avoid this call if sd won't be used. sd := s.makeSpanData() sd.EndTime = internal.MonotonicEndTime(sd.StartTime) diff --git a/vendor/go.opencensus.io/trace/trace_go11.go b/vendor/go.opencensus.io/trace/trace_go11.go new file mode 100644 index 0000000000..8b1715f25d --- /dev/null +++ b/vendor/go.opencensus.io/trace/trace_go11.go @@ -0,0 +1,31 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.11 + +package trace + +import ( + "context" + t "runtime/trace" +) + +func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) { + if !t.IsEnabled() { + // Avoid additional overhead if + // runtime/trace is not enabled. + return ctx, func() {} + } + return t.NewContext(ctx, name) +} diff --git a/vendor/go.opencensus.io/trace/trace_nongo11.go b/vendor/go.opencensus.io/trace/trace_nongo11.go new file mode 100644 index 0000000000..e25419859c --- /dev/null +++ b/vendor/go.opencensus.io/trace/trace_nongo11.go @@ -0,0 +1,25 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.11 + +package trace + +import ( + "context" +) + +func startExecutionTracerTask(ctx context.Context, name string) (context.Context, func()) { + return ctx, func() {} +} diff --git a/vendor/golang.org/x/crypto/cast5/cast5.go b/vendor/golang.org/x/crypto/cast5/cast5.go deleted file mode 100644 index 0b4af37bdc..0000000000 --- a/vendor/golang.org/x/crypto/cast5/cast5.go +++ /dev/null @@ -1,526 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cast5 implements CAST5, as defined in RFC 2144. CAST5 is a common -// OpenPGP cipher. -package cast5 // import "golang.org/x/crypto/cast5" - -import "errors" - -const BlockSize = 8 -const KeySize = 16 - -type Cipher struct { - masking [16]uint32 - rotate [16]uint8 -} - -func NewCipher(key []byte) (c *Cipher, err error) { - if len(key) != KeySize { - return nil, errors.New("CAST5: keys must be 16 bytes") - } - - c = new(Cipher) - c.keySchedule(key) - return -} - -func (c *Cipher) BlockSize() int { - return BlockSize -} - -func (c *Cipher) Encrypt(dst, src []byte) { - l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) - r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) - - l, r = r, l^f1(r, c.masking[0], c.rotate[0]) - l, r = r, l^f2(r, c.masking[1], c.rotate[1]) - l, r = r, l^f3(r, c.masking[2], c.rotate[2]) - l, r = r, l^f1(r, c.masking[3], c.rotate[3]) - - l, r = r, l^f2(r, c.masking[4], c.rotate[4]) - l, r = r, l^f3(r, c.masking[5], c.rotate[5]) - l, r = r, l^f1(r, c.masking[6], c.rotate[6]) - l, r = r, l^f2(r, c.masking[7], c.rotate[7]) - - l, r = r, l^f3(r, c.masking[8], c.rotate[8]) - l, r = r, l^f1(r, c.masking[9], c.rotate[9]) - l, r = r, l^f2(r, c.masking[10], c.rotate[10]) - l, r = r, l^f3(r, c.masking[11], c.rotate[11]) - - l, r = r, l^f1(r, c.masking[12], c.rotate[12]) - l, r = r, l^f2(r, c.masking[13], c.rotate[13]) - l, r = r, l^f3(r, c.masking[14], c.rotate[14]) - l, r = r, l^f1(r, c.masking[15], c.rotate[15]) - - dst[0] = uint8(r >> 24) - dst[1] = uint8(r >> 16) - dst[2] = uint8(r >> 8) - dst[3] = uint8(r) - dst[4] = uint8(l >> 24) - dst[5] = uint8(l >> 16) - dst[6] = uint8(l >> 8) - dst[7] = uint8(l) -} - -func (c *Cipher) Decrypt(dst, src []byte) { - l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) - r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) - - l, r = r, l^f1(r, c.masking[15], c.rotate[15]) - l, r = r, l^f3(r, c.masking[14], c.rotate[14]) - l, r = r, l^f2(r, c.masking[13], c.rotate[13]) - l, r = r, l^f1(r, c.masking[12], c.rotate[12]) - - l, r = r, l^f3(r, c.masking[11], c.rotate[11]) - l, r = r, l^f2(r, c.masking[10], c.rotate[10]) - l, r = r, l^f1(r, c.masking[9], c.rotate[9]) - l, r = r, l^f3(r, c.masking[8], c.rotate[8]) - - l, r = r, l^f2(r, c.masking[7], c.rotate[7]) - l, r = r, l^f1(r, c.masking[6], c.rotate[6]) - l, r = r, l^f3(r, c.masking[5], c.rotate[5]) - l, r = r, l^f2(r, c.masking[4], c.rotate[4]) - - l, r = r, l^f1(r, c.masking[3], c.rotate[3]) - l, r = r, l^f3(r, c.masking[2], c.rotate[2]) - l, r = r, l^f2(r, c.masking[1], c.rotate[1]) - l, r = r, l^f1(r, c.masking[0], c.rotate[0]) - - dst[0] = uint8(r >> 24) - dst[1] = uint8(r >> 16) - dst[2] = uint8(r >> 8) - dst[3] = uint8(r) - dst[4] = uint8(l >> 24) - dst[5] = uint8(l >> 16) - dst[6] = uint8(l >> 8) - dst[7] = uint8(l) -} - -type keyScheduleA [4][7]uint8 -type keyScheduleB [4][5]uint8 - -// keyScheduleRound contains the magic values for a round of the key schedule. -// The keyScheduleA deals with the lines like: -// z0z1z2z3 = x0x1x2x3 ^ S5[xD] ^ S6[xF] ^ S7[xC] ^ S8[xE] ^ S7[x8] -// Conceptually, both x and z are in the same array, x first. The first -// element describes which word of this array gets written to and the -// second, which word gets read. So, for the line above, it's "4, 0", because -// it's writing to the first word of z, which, being after x, is word 4, and -// reading from the first word of x: word 0. -// -// Next are the indexes into the S-boxes. Now the array is treated as bytes. So -// "xD" is 0xd. The first byte of z is written as "16 + 0", just to be clear -// that it's z that we're indexing. -// -// keyScheduleB deals with lines like: -// K1 = S5[z8] ^ S6[z9] ^ S7[z7] ^ S8[z6] ^ S5[z2] -// "K1" is ignored because key words are always written in order. So the five -// elements are the S-box indexes. They use the same form as in keyScheduleA, -// above. - -type keyScheduleRound struct{} -type keySchedule []keyScheduleRound - -var schedule = []struct { - a keyScheduleA - b keyScheduleB -}{ - { - keyScheduleA{ - {4, 0, 0xd, 0xf, 0xc, 0xe, 0x8}, - {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa}, - {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9}, - {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb}, - }, - keyScheduleB{ - {16 + 8, 16 + 9, 16 + 7, 16 + 6, 16 + 2}, - {16 + 0xa, 16 + 0xb, 16 + 5, 16 + 4, 16 + 6}, - {16 + 0xc, 16 + 0xd, 16 + 3, 16 + 2, 16 + 9}, - {16 + 0xe, 16 + 0xf, 16 + 1, 16 + 0, 16 + 0xc}, - }, - }, - { - keyScheduleA{ - {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0}, - {1, 4, 0, 2, 1, 3, 16 + 2}, - {2, 5, 7, 6, 5, 4, 16 + 1}, - {3, 7, 0xa, 9, 0xb, 8, 16 + 3}, - }, - keyScheduleB{ - {3, 2, 0xc, 0xd, 8}, - {1, 0, 0xe, 0xf, 0xd}, - {7, 6, 8, 9, 3}, - {5, 4, 0xa, 0xb, 7}, - }, - }, - { - keyScheduleA{ - {4, 0, 0xd, 0xf, 0xc, 0xe, 8}, - {5, 2, 16 + 0, 16 + 2, 16 + 1, 16 + 3, 0xa}, - {6, 3, 16 + 7, 16 + 6, 16 + 5, 16 + 4, 9}, - {7, 1, 16 + 0xa, 16 + 9, 16 + 0xb, 16 + 8, 0xb}, - }, - keyScheduleB{ - {16 + 3, 16 + 2, 16 + 0xc, 16 + 0xd, 16 + 9}, - {16 + 1, 16 + 0, 16 + 0xe, 16 + 0xf, 16 + 0xc}, - {16 + 7, 16 + 6, 16 + 8, 16 + 9, 16 + 2}, - {16 + 5, 16 + 4, 16 + 0xa, 16 + 0xb, 16 + 6}, - }, - }, - { - keyScheduleA{ - {0, 6, 16 + 5, 16 + 7, 16 + 4, 16 + 6, 16 + 0}, - {1, 4, 0, 2, 1, 3, 16 + 2}, - {2, 5, 7, 6, 5, 4, 16 + 1}, - {3, 7, 0xa, 9, 0xb, 8, 16 + 3}, - }, - keyScheduleB{ - {8, 9, 7, 6, 3}, - {0xa, 0xb, 5, 4, 7}, - {0xc, 0xd, 3, 2, 8}, - {0xe, 0xf, 1, 0, 0xd}, - }, - }, -} - -func (c *Cipher) keySchedule(in []byte) { - var t [8]uint32 - var k [32]uint32 - - for i := 0; i < 4; i++ { - j := i * 4 - t[i] = uint32(in[j])<<24 | uint32(in[j+1])<<16 | uint32(in[j+2])<<8 | uint32(in[j+3]) - } - - x := []byte{6, 7, 4, 5} - ki := 0 - - for half := 0; half < 2; half++ { - for _, round := range schedule { - for j := 0; j < 4; j++ { - var a [7]uint8 - copy(a[:], round.a[j][:]) - w := t[a[1]] - w ^= sBox[4][(t[a[2]>>2]>>(24-8*(a[2]&3)))&0xff] - w ^= sBox[5][(t[a[3]>>2]>>(24-8*(a[3]&3)))&0xff] - w ^= sBox[6][(t[a[4]>>2]>>(24-8*(a[4]&3)))&0xff] - w ^= sBox[7][(t[a[5]>>2]>>(24-8*(a[5]&3)))&0xff] - w ^= sBox[x[j]][(t[a[6]>>2]>>(24-8*(a[6]&3)))&0xff] - t[a[0]] = w - } - - for j := 0; j < 4; j++ { - var b [5]uint8 - copy(b[:], round.b[j][:]) - w := sBox[4][(t[b[0]>>2]>>(24-8*(b[0]&3)))&0xff] - w ^= sBox[5][(t[b[1]>>2]>>(24-8*(b[1]&3)))&0xff] - w ^= sBox[6][(t[b[2]>>2]>>(24-8*(b[2]&3)))&0xff] - w ^= sBox[7][(t[b[3]>>2]>>(24-8*(b[3]&3)))&0xff] - w ^= sBox[4+j][(t[b[4]>>2]>>(24-8*(b[4]&3)))&0xff] - k[ki] = w - ki++ - } - } - } - - for i := 0; i < 16; i++ { - c.masking[i] = k[i] - c.rotate[i] = uint8(k[16+i] & 0x1f) - } -} - -// These are the three 'f' functions. See RFC 2144, section 2.2. -func f1(d, m uint32, r uint8) uint32 { - t := m + d - I := (t << r) | (t >> (32 - r)) - return ((sBox[0][I>>24] ^ sBox[1][(I>>16)&0xff]) - sBox[2][(I>>8)&0xff]) + sBox[3][I&0xff] -} - -func f2(d, m uint32, r uint8) uint32 { - t := m ^ d - I := (t << r) | (t >> (32 - r)) - return ((sBox[0][I>>24] - sBox[1][(I>>16)&0xff]) + sBox[2][(I>>8)&0xff]) ^ sBox[3][I&0xff] -} - -func f3(d, m uint32, r uint8) uint32 { - t := m - d - I := (t << r) | (t >> (32 - r)) - return ((sBox[0][I>>24] + sBox[1][(I>>16)&0xff]) ^ sBox[2][(I>>8)&0xff]) - sBox[3][I&0xff] -} - -var sBox = [8][256]uint32{ - { - 0x30fb40d4, 0x9fa0ff0b, 0x6beccd2f, 0x3f258c7a, 0x1e213f2f, 0x9c004dd3, 0x6003e540, 0xcf9fc949, - 0xbfd4af27, 0x88bbbdb5, 0xe2034090, 0x98d09675, 0x6e63a0e0, 0x15c361d2, 0xc2e7661d, 0x22d4ff8e, - 0x28683b6f, 0xc07fd059, 0xff2379c8, 0x775f50e2, 0x43c340d3, 0xdf2f8656, 0x887ca41a, 0xa2d2bd2d, - 0xa1c9e0d6, 0x346c4819, 0x61b76d87, 0x22540f2f, 0x2abe32e1, 0xaa54166b, 0x22568e3a, 0xa2d341d0, - 0x66db40c8, 0xa784392f, 0x004dff2f, 0x2db9d2de, 0x97943fac, 0x4a97c1d8, 0x527644b7, 0xb5f437a7, - 0xb82cbaef, 0xd751d159, 0x6ff7f0ed, 0x5a097a1f, 0x827b68d0, 0x90ecf52e, 0x22b0c054, 0xbc8e5935, - 0x4b6d2f7f, 0x50bb64a2, 0xd2664910, 0xbee5812d, 0xb7332290, 0xe93b159f, 0xb48ee411, 0x4bff345d, - 0xfd45c240, 0xad31973f, 0xc4f6d02e, 0x55fc8165, 0xd5b1caad, 0xa1ac2dae, 0xa2d4b76d, 0xc19b0c50, - 0x882240f2, 0x0c6e4f38, 0xa4e4bfd7, 0x4f5ba272, 0x564c1d2f, 0xc59c5319, 0xb949e354, 0xb04669fe, - 0xb1b6ab8a, 0xc71358dd, 0x6385c545, 0x110f935d, 0x57538ad5, 0x6a390493, 0xe63d37e0, 0x2a54f6b3, - 0x3a787d5f, 0x6276a0b5, 0x19a6fcdf, 0x7a42206a, 0x29f9d4d5, 0xf61b1891, 0xbb72275e, 0xaa508167, - 0x38901091, 0xc6b505eb, 0x84c7cb8c, 0x2ad75a0f, 0x874a1427, 0xa2d1936b, 0x2ad286af, 0xaa56d291, - 0xd7894360, 0x425c750d, 0x93b39e26, 0x187184c9, 0x6c00b32d, 0x73e2bb14, 0xa0bebc3c, 0x54623779, - 0x64459eab, 0x3f328b82, 0x7718cf82, 0x59a2cea6, 0x04ee002e, 0x89fe78e6, 0x3fab0950, 0x325ff6c2, - 0x81383f05, 0x6963c5c8, 0x76cb5ad6, 0xd49974c9, 0xca180dcf, 0x380782d5, 0xc7fa5cf6, 0x8ac31511, - 0x35e79e13, 0x47da91d0, 0xf40f9086, 0xa7e2419e, 0x31366241, 0x051ef495, 0xaa573b04, 0x4a805d8d, - 0x548300d0, 0x00322a3c, 0xbf64cddf, 0xba57a68e, 0x75c6372b, 0x50afd341, 0xa7c13275, 0x915a0bf5, - 0x6b54bfab, 0x2b0b1426, 0xab4cc9d7, 0x449ccd82, 0xf7fbf265, 0xab85c5f3, 0x1b55db94, 0xaad4e324, - 0xcfa4bd3f, 0x2deaa3e2, 0x9e204d02, 0xc8bd25ac, 0xeadf55b3, 0xd5bd9e98, 0xe31231b2, 0x2ad5ad6c, - 0x954329de, 0xadbe4528, 0xd8710f69, 0xaa51c90f, 0xaa786bf6, 0x22513f1e, 0xaa51a79b, 0x2ad344cc, - 0x7b5a41f0, 0xd37cfbad, 0x1b069505, 0x41ece491, 0xb4c332e6, 0x032268d4, 0xc9600acc, 0xce387e6d, - 0xbf6bb16c, 0x6a70fb78, 0x0d03d9c9, 0xd4df39de, 0xe01063da, 0x4736f464, 0x5ad328d8, 0xb347cc96, - 0x75bb0fc3, 0x98511bfb, 0x4ffbcc35, 0xb58bcf6a, 0xe11f0abc, 0xbfc5fe4a, 0xa70aec10, 0xac39570a, - 0x3f04442f, 0x6188b153, 0xe0397a2e, 0x5727cb79, 0x9ceb418f, 0x1cacd68d, 0x2ad37c96, 0x0175cb9d, - 0xc69dff09, 0xc75b65f0, 0xd9db40d8, 0xec0e7779, 0x4744ead4, 0xb11c3274, 0xdd24cb9e, 0x7e1c54bd, - 0xf01144f9, 0xd2240eb1, 0x9675b3fd, 0xa3ac3755, 0xd47c27af, 0x51c85f4d, 0x56907596, 0xa5bb15e6, - 0x580304f0, 0xca042cf1, 0x011a37ea, 0x8dbfaadb, 0x35ba3e4a, 0x3526ffa0, 0xc37b4d09, 0xbc306ed9, - 0x98a52666, 0x5648f725, 0xff5e569d, 0x0ced63d0, 0x7c63b2cf, 0x700b45e1, 0xd5ea50f1, 0x85a92872, - 0xaf1fbda7, 0xd4234870, 0xa7870bf3, 0x2d3b4d79, 0x42e04198, 0x0cd0ede7, 0x26470db8, 0xf881814c, - 0x474d6ad7, 0x7c0c5e5c, 0xd1231959, 0x381b7298, 0xf5d2f4db, 0xab838653, 0x6e2f1e23, 0x83719c9e, - 0xbd91e046, 0x9a56456e, 0xdc39200c, 0x20c8c571, 0x962bda1c, 0xe1e696ff, 0xb141ab08, 0x7cca89b9, - 0x1a69e783, 0x02cc4843, 0xa2f7c579, 0x429ef47d, 0x427b169c, 0x5ac9f049, 0xdd8f0f00, 0x5c8165bf, - }, - { - 0x1f201094, 0xef0ba75b, 0x69e3cf7e, 0x393f4380, 0xfe61cf7a, 0xeec5207a, 0x55889c94, 0x72fc0651, - 0xada7ef79, 0x4e1d7235, 0xd55a63ce, 0xde0436ba, 0x99c430ef, 0x5f0c0794, 0x18dcdb7d, 0xa1d6eff3, - 0xa0b52f7b, 0x59e83605, 0xee15b094, 0xe9ffd909, 0xdc440086, 0xef944459, 0xba83ccb3, 0xe0c3cdfb, - 0xd1da4181, 0x3b092ab1, 0xf997f1c1, 0xa5e6cf7b, 0x01420ddb, 0xe4e7ef5b, 0x25a1ff41, 0xe180f806, - 0x1fc41080, 0x179bee7a, 0xd37ac6a9, 0xfe5830a4, 0x98de8b7f, 0x77e83f4e, 0x79929269, 0x24fa9f7b, - 0xe113c85b, 0xacc40083, 0xd7503525, 0xf7ea615f, 0x62143154, 0x0d554b63, 0x5d681121, 0xc866c359, - 0x3d63cf73, 0xcee234c0, 0xd4d87e87, 0x5c672b21, 0x071f6181, 0x39f7627f, 0x361e3084, 0xe4eb573b, - 0x602f64a4, 0xd63acd9c, 0x1bbc4635, 0x9e81032d, 0x2701f50c, 0x99847ab4, 0xa0e3df79, 0xba6cf38c, - 0x10843094, 0x2537a95e, 0xf46f6ffe, 0xa1ff3b1f, 0x208cfb6a, 0x8f458c74, 0xd9e0a227, 0x4ec73a34, - 0xfc884f69, 0x3e4de8df, 0xef0e0088, 0x3559648d, 0x8a45388c, 0x1d804366, 0x721d9bfd, 0xa58684bb, - 0xe8256333, 0x844e8212, 0x128d8098, 0xfed33fb4, 0xce280ae1, 0x27e19ba5, 0xd5a6c252, 0xe49754bd, - 0xc5d655dd, 0xeb667064, 0x77840b4d, 0xa1b6a801, 0x84db26a9, 0xe0b56714, 0x21f043b7, 0xe5d05860, - 0x54f03084, 0x066ff472, 0xa31aa153, 0xdadc4755, 0xb5625dbf, 0x68561be6, 0x83ca6b94, 0x2d6ed23b, - 0xeccf01db, 0xa6d3d0ba, 0xb6803d5c, 0xaf77a709, 0x33b4a34c, 0x397bc8d6, 0x5ee22b95, 0x5f0e5304, - 0x81ed6f61, 0x20e74364, 0xb45e1378, 0xde18639b, 0x881ca122, 0xb96726d1, 0x8049a7e8, 0x22b7da7b, - 0x5e552d25, 0x5272d237, 0x79d2951c, 0xc60d894c, 0x488cb402, 0x1ba4fe5b, 0xa4b09f6b, 0x1ca815cf, - 0xa20c3005, 0x8871df63, 0xb9de2fcb, 0x0cc6c9e9, 0x0beeff53, 0xe3214517, 0xb4542835, 0x9f63293c, - 0xee41e729, 0x6e1d2d7c, 0x50045286, 0x1e6685f3, 0xf33401c6, 0x30a22c95, 0x31a70850, 0x60930f13, - 0x73f98417, 0xa1269859, 0xec645c44, 0x52c877a9, 0xcdff33a6, 0xa02b1741, 0x7cbad9a2, 0x2180036f, - 0x50d99c08, 0xcb3f4861, 0xc26bd765, 0x64a3f6ab, 0x80342676, 0x25a75e7b, 0xe4e6d1fc, 0x20c710e6, - 0xcdf0b680, 0x17844d3b, 0x31eef84d, 0x7e0824e4, 0x2ccb49eb, 0x846a3bae, 0x8ff77888, 0xee5d60f6, - 0x7af75673, 0x2fdd5cdb, 0xa11631c1, 0x30f66f43, 0xb3faec54, 0x157fd7fa, 0xef8579cc, 0xd152de58, - 0xdb2ffd5e, 0x8f32ce19, 0x306af97a, 0x02f03ef8, 0x99319ad5, 0xc242fa0f, 0xa7e3ebb0, 0xc68e4906, - 0xb8da230c, 0x80823028, 0xdcdef3c8, 0xd35fb171, 0x088a1bc8, 0xbec0c560, 0x61a3c9e8, 0xbca8f54d, - 0xc72feffa, 0x22822e99, 0x82c570b4, 0xd8d94e89, 0x8b1c34bc, 0x301e16e6, 0x273be979, 0xb0ffeaa6, - 0x61d9b8c6, 0x00b24869, 0xb7ffce3f, 0x08dc283b, 0x43daf65a, 0xf7e19798, 0x7619b72f, 0x8f1c9ba4, - 0xdc8637a0, 0x16a7d3b1, 0x9fc393b7, 0xa7136eeb, 0xc6bcc63e, 0x1a513742, 0xef6828bc, 0x520365d6, - 0x2d6a77ab, 0x3527ed4b, 0x821fd216, 0x095c6e2e, 0xdb92f2fb, 0x5eea29cb, 0x145892f5, 0x91584f7f, - 0x5483697b, 0x2667a8cc, 0x85196048, 0x8c4bacea, 0x833860d4, 0x0d23e0f9, 0x6c387e8a, 0x0ae6d249, - 0xb284600c, 0xd835731d, 0xdcb1c647, 0xac4c56ea, 0x3ebd81b3, 0x230eabb0, 0x6438bc87, 0xf0b5b1fa, - 0x8f5ea2b3, 0xfc184642, 0x0a036b7a, 0x4fb089bd, 0x649da589, 0xa345415e, 0x5c038323, 0x3e5d3bb9, - 0x43d79572, 0x7e6dd07c, 0x06dfdf1e, 0x6c6cc4ef, 0x7160a539, 0x73bfbe70, 0x83877605, 0x4523ecf1, - }, - { - 0x8defc240, 0x25fa5d9f, 0xeb903dbf, 0xe810c907, 0x47607fff, 0x369fe44b, 0x8c1fc644, 0xaececa90, - 0xbeb1f9bf, 0xeefbcaea, 0xe8cf1950, 0x51df07ae, 0x920e8806, 0xf0ad0548, 0xe13c8d83, 0x927010d5, - 0x11107d9f, 0x07647db9, 0xb2e3e4d4, 0x3d4f285e, 0xb9afa820, 0xfade82e0, 0xa067268b, 0x8272792e, - 0x553fb2c0, 0x489ae22b, 0xd4ef9794, 0x125e3fbc, 0x21fffcee, 0x825b1bfd, 0x9255c5ed, 0x1257a240, - 0x4e1a8302, 0xbae07fff, 0x528246e7, 0x8e57140e, 0x3373f7bf, 0x8c9f8188, 0xa6fc4ee8, 0xc982b5a5, - 0xa8c01db7, 0x579fc264, 0x67094f31, 0xf2bd3f5f, 0x40fff7c1, 0x1fb78dfc, 0x8e6bd2c1, 0x437be59b, - 0x99b03dbf, 0xb5dbc64b, 0x638dc0e6, 0x55819d99, 0xa197c81c, 0x4a012d6e, 0xc5884a28, 0xccc36f71, - 0xb843c213, 0x6c0743f1, 0x8309893c, 0x0feddd5f, 0x2f7fe850, 0xd7c07f7e, 0x02507fbf, 0x5afb9a04, - 0xa747d2d0, 0x1651192e, 0xaf70bf3e, 0x58c31380, 0x5f98302e, 0x727cc3c4, 0x0a0fb402, 0x0f7fef82, - 0x8c96fdad, 0x5d2c2aae, 0x8ee99a49, 0x50da88b8, 0x8427f4a0, 0x1eac5790, 0x796fb449, 0x8252dc15, - 0xefbd7d9b, 0xa672597d, 0xada840d8, 0x45f54504, 0xfa5d7403, 0xe83ec305, 0x4f91751a, 0x925669c2, - 0x23efe941, 0xa903f12e, 0x60270df2, 0x0276e4b6, 0x94fd6574, 0x927985b2, 0x8276dbcb, 0x02778176, - 0xf8af918d, 0x4e48f79e, 0x8f616ddf, 0xe29d840e, 0x842f7d83, 0x340ce5c8, 0x96bbb682, 0x93b4b148, - 0xef303cab, 0x984faf28, 0x779faf9b, 0x92dc560d, 0x224d1e20, 0x8437aa88, 0x7d29dc96, 0x2756d3dc, - 0x8b907cee, 0xb51fd240, 0xe7c07ce3, 0xe566b4a1, 0xc3e9615e, 0x3cf8209d, 0x6094d1e3, 0xcd9ca341, - 0x5c76460e, 0x00ea983b, 0xd4d67881, 0xfd47572c, 0xf76cedd9, 0xbda8229c, 0x127dadaa, 0x438a074e, - 0x1f97c090, 0x081bdb8a, 0x93a07ebe, 0xb938ca15, 0x97b03cff, 0x3dc2c0f8, 0x8d1ab2ec, 0x64380e51, - 0x68cc7bfb, 0xd90f2788, 0x12490181, 0x5de5ffd4, 0xdd7ef86a, 0x76a2e214, 0xb9a40368, 0x925d958f, - 0x4b39fffa, 0xba39aee9, 0xa4ffd30b, 0xfaf7933b, 0x6d498623, 0x193cbcfa, 0x27627545, 0x825cf47a, - 0x61bd8ba0, 0xd11e42d1, 0xcead04f4, 0x127ea392, 0x10428db7, 0x8272a972, 0x9270c4a8, 0x127de50b, - 0x285ba1c8, 0x3c62f44f, 0x35c0eaa5, 0xe805d231, 0x428929fb, 0xb4fcdf82, 0x4fb66a53, 0x0e7dc15b, - 0x1f081fab, 0x108618ae, 0xfcfd086d, 0xf9ff2889, 0x694bcc11, 0x236a5cae, 0x12deca4d, 0x2c3f8cc5, - 0xd2d02dfe, 0xf8ef5896, 0xe4cf52da, 0x95155b67, 0x494a488c, 0xb9b6a80c, 0x5c8f82bc, 0x89d36b45, - 0x3a609437, 0xec00c9a9, 0x44715253, 0x0a874b49, 0xd773bc40, 0x7c34671c, 0x02717ef6, 0x4feb5536, - 0xa2d02fff, 0xd2bf60c4, 0xd43f03c0, 0x50b4ef6d, 0x07478cd1, 0x006e1888, 0xa2e53f55, 0xb9e6d4bc, - 0xa2048016, 0x97573833, 0xd7207d67, 0xde0f8f3d, 0x72f87b33, 0xabcc4f33, 0x7688c55d, 0x7b00a6b0, - 0x947b0001, 0x570075d2, 0xf9bb88f8, 0x8942019e, 0x4264a5ff, 0x856302e0, 0x72dbd92b, 0xee971b69, - 0x6ea22fde, 0x5f08ae2b, 0xaf7a616d, 0xe5c98767, 0xcf1febd2, 0x61efc8c2, 0xf1ac2571, 0xcc8239c2, - 0x67214cb8, 0xb1e583d1, 0xb7dc3e62, 0x7f10bdce, 0xf90a5c38, 0x0ff0443d, 0x606e6dc6, 0x60543a49, - 0x5727c148, 0x2be98a1d, 0x8ab41738, 0x20e1be24, 0xaf96da0f, 0x68458425, 0x99833be5, 0x600d457d, - 0x282f9350, 0x8334b362, 0xd91d1120, 0x2b6d8da0, 0x642b1e31, 0x9c305a00, 0x52bce688, 0x1b03588a, - 0xf7baefd5, 0x4142ed9c, 0xa4315c11, 0x83323ec5, 0xdfef4636, 0xa133c501, 0xe9d3531c, 0xee353783, - }, - { - 0x9db30420, 0x1fb6e9de, 0xa7be7bef, 0xd273a298, 0x4a4f7bdb, 0x64ad8c57, 0x85510443, 0xfa020ed1, - 0x7e287aff, 0xe60fb663, 0x095f35a1, 0x79ebf120, 0xfd059d43, 0x6497b7b1, 0xf3641f63, 0x241e4adf, - 0x28147f5f, 0x4fa2b8cd, 0xc9430040, 0x0cc32220, 0xfdd30b30, 0xc0a5374f, 0x1d2d00d9, 0x24147b15, - 0xee4d111a, 0x0fca5167, 0x71ff904c, 0x2d195ffe, 0x1a05645f, 0x0c13fefe, 0x081b08ca, 0x05170121, - 0x80530100, 0xe83e5efe, 0xac9af4f8, 0x7fe72701, 0xd2b8ee5f, 0x06df4261, 0xbb9e9b8a, 0x7293ea25, - 0xce84ffdf, 0xf5718801, 0x3dd64b04, 0xa26f263b, 0x7ed48400, 0x547eebe6, 0x446d4ca0, 0x6cf3d6f5, - 0x2649abdf, 0xaea0c7f5, 0x36338cc1, 0x503f7e93, 0xd3772061, 0x11b638e1, 0x72500e03, 0xf80eb2bb, - 0xabe0502e, 0xec8d77de, 0x57971e81, 0xe14f6746, 0xc9335400, 0x6920318f, 0x081dbb99, 0xffc304a5, - 0x4d351805, 0x7f3d5ce3, 0xa6c866c6, 0x5d5bcca9, 0xdaec6fea, 0x9f926f91, 0x9f46222f, 0x3991467d, - 0xa5bf6d8e, 0x1143c44f, 0x43958302, 0xd0214eeb, 0x022083b8, 0x3fb6180c, 0x18f8931e, 0x281658e6, - 0x26486e3e, 0x8bd78a70, 0x7477e4c1, 0xb506e07c, 0xf32d0a25, 0x79098b02, 0xe4eabb81, 0x28123b23, - 0x69dead38, 0x1574ca16, 0xdf871b62, 0x211c40b7, 0xa51a9ef9, 0x0014377b, 0x041e8ac8, 0x09114003, - 0xbd59e4d2, 0xe3d156d5, 0x4fe876d5, 0x2f91a340, 0x557be8de, 0x00eae4a7, 0x0ce5c2ec, 0x4db4bba6, - 0xe756bdff, 0xdd3369ac, 0xec17b035, 0x06572327, 0x99afc8b0, 0x56c8c391, 0x6b65811c, 0x5e146119, - 0x6e85cb75, 0xbe07c002, 0xc2325577, 0x893ff4ec, 0x5bbfc92d, 0xd0ec3b25, 0xb7801ab7, 0x8d6d3b24, - 0x20c763ef, 0xc366a5fc, 0x9c382880, 0x0ace3205, 0xaac9548a, 0xeca1d7c7, 0x041afa32, 0x1d16625a, - 0x6701902c, 0x9b757a54, 0x31d477f7, 0x9126b031, 0x36cc6fdb, 0xc70b8b46, 0xd9e66a48, 0x56e55a79, - 0x026a4ceb, 0x52437eff, 0x2f8f76b4, 0x0df980a5, 0x8674cde3, 0xedda04eb, 0x17a9be04, 0x2c18f4df, - 0xb7747f9d, 0xab2af7b4, 0xefc34d20, 0x2e096b7c, 0x1741a254, 0xe5b6a035, 0x213d42f6, 0x2c1c7c26, - 0x61c2f50f, 0x6552daf9, 0xd2c231f8, 0x25130f69, 0xd8167fa2, 0x0418f2c8, 0x001a96a6, 0x0d1526ab, - 0x63315c21, 0x5e0a72ec, 0x49bafefd, 0x187908d9, 0x8d0dbd86, 0x311170a7, 0x3e9b640c, 0xcc3e10d7, - 0xd5cad3b6, 0x0caec388, 0xf73001e1, 0x6c728aff, 0x71eae2a1, 0x1f9af36e, 0xcfcbd12f, 0xc1de8417, - 0xac07be6b, 0xcb44a1d8, 0x8b9b0f56, 0x013988c3, 0xb1c52fca, 0xb4be31cd, 0xd8782806, 0x12a3a4e2, - 0x6f7de532, 0x58fd7eb6, 0xd01ee900, 0x24adffc2, 0xf4990fc5, 0x9711aac5, 0x001d7b95, 0x82e5e7d2, - 0x109873f6, 0x00613096, 0xc32d9521, 0xada121ff, 0x29908415, 0x7fbb977f, 0xaf9eb3db, 0x29c9ed2a, - 0x5ce2a465, 0xa730f32c, 0xd0aa3fe8, 0x8a5cc091, 0xd49e2ce7, 0x0ce454a9, 0xd60acd86, 0x015f1919, - 0x77079103, 0xdea03af6, 0x78a8565e, 0xdee356df, 0x21f05cbe, 0x8b75e387, 0xb3c50651, 0xb8a5c3ef, - 0xd8eeb6d2, 0xe523be77, 0xc2154529, 0x2f69efdf, 0xafe67afb, 0xf470c4b2, 0xf3e0eb5b, 0xd6cc9876, - 0x39e4460c, 0x1fda8538, 0x1987832f, 0xca007367, 0xa99144f8, 0x296b299e, 0x492fc295, 0x9266beab, - 0xb5676e69, 0x9bd3ddda, 0xdf7e052f, 0xdb25701c, 0x1b5e51ee, 0xf65324e6, 0x6afce36c, 0x0316cc04, - 0x8644213e, 0xb7dc59d0, 0x7965291f, 0xccd6fd43, 0x41823979, 0x932bcdf6, 0xb657c34d, 0x4edfd282, - 0x7ae5290c, 0x3cb9536b, 0x851e20fe, 0x9833557e, 0x13ecf0b0, 0xd3ffb372, 0x3f85c5c1, 0x0aef7ed2, - }, - { - 0x7ec90c04, 0x2c6e74b9, 0x9b0e66df, 0xa6337911, 0xb86a7fff, 0x1dd358f5, 0x44dd9d44, 0x1731167f, - 0x08fbf1fa, 0xe7f511cc, 0xd2051b00, 0x735aba00, 0x2ab722d8, 0x386381cb, 0xacf6243a, 0x69befd7a, - 0xe6a2e77f, 0xf0c720cd, 0xc4494816, 0xccf5c180, 0x38851640, 0x15b0a848, 0xe68b18cb, 0x4caadeff, - 0x5f480a01, 0x0412b2aa, 0x259814fc, 0x41d0efe2, 0x4e40b48d, 0x248eb6fb, 0x8dba1cfe, 0x41a99b02, - 0x1a550a04, 0xba8f65cb, 0x7251f4e7, 0x95a51725, 0xc106ecd7, 0x97a5980a, 0xc539b9aa, 0x4d79fe6a, - 0xf2f3f763, 0x68af8040, 0xed0c9e56, 0x11b4958b, 0xe1eb5a88, 0x8709e6b0, 0xd7e07156, 0x4e29fea7, - 0x6366e52d, 0x02d1c000, 0xc4ac8e05, 0x9377f571, 0x0c05372a, 0x578535f2, 0x2261be02, 0xd642a0c9, - 0xdf13a280, 0x74b55bd2, 0x682199c0, 0xd421e5ec, 0x53fb3ce8, 0xc8adedb3, 0x28a87fc9, 0x3d959981, - 0x5c1ff900, 0xfe38d399, 0x0c4eff0b, 0x062407ea, 0xaa2f4fb1, 0x4fb96976, 0x90c79505, 0xb0a8a774, - 0xef55a1ff, 0xe59ca2c2, 0xa6b62d27, 0xe66a4263, 0xdf65001f, 0x0ec50966, 0xdfdd55bc, 0x29de0655, - 0x911e739a, 0x17af8975, 0x32c7911c, 0x89f89468, 0x0d01e980, 0x524755f4, 0x03b63cc9, 0x0cc844b2, - 0xbcf3f0aa, 0x87ac36e9, 0xe53a7426, 0x01b3d82b, 0x1a9e7449, 0x64ee2d7e, 0xcddbb1da, 0x01c94910, - 0xb868bf80, 0x0d26f3fd, 0x9342ede7, 0x04a5c284, 0x636737b6, 0x50f5b616, 0xf24766e3, 0x8eca36c1, - 0x136e05db, 0xfef18391, 0xfb887a37, 0xd6e7f7d4, 0xc7fb7dc9, 0x3063fcdf, 0xb6f589de, 0xec2941da, - 0x26e46695, 0xb7566419, 0xf654efc5, 0xd08d58b7, 0x48925401, 0xc1bacb7f, 0xe5ff550f, 0xb6083049, - 0x5bb5d0e8, 0x87d72e5a, 0xab6a6ee1, 0x223a66ce, 0xc62bf3cd, 0x9e0885f9, 0x68cb3e47, 0x086c010f, - 0xa21de820, 0xd18b69de, 0xf3f65777, 0xfa02c3f6, 0x407edac3, 0xcbb3d550, 0x1793084d, 0xb0d70eba, - 0x0ab378d5, 0xd951fb0c, 0xded7da56, 0x4124bbe4, 0x94ca0b56, 0x0f5755d1, 0xe0e1e56e, 0x6184b5be, - 0x580a249f, 0x94f74bc0, 0xe327888e, 0x9f7b5561, 0xc3dc0280, 0x05687715, 0x646c6bd7, 0x44904db3, - 0x66b4f0a3, 0xc0f1648a, 0x697ed5af, 0x49e92ff6, 0x309e374f, 0x2cb6356a, 0x85808573, 0x4991f840, - 0x76f0ae02, 0x083be84d, 0x28421c9a, 0x44489406, 0x736e4cb8, 0xc1092910, 0x8bc95fc6, 0x7d869cf4, - 0x134f616f, 0x2e77118d, 0xb31b2be1, 0xaa90b472, 0x3ca5d717, 0x7d161bba, 0x9cad9010, 0xaf462ba2, - 0x9fe459d2, 0x45d34559, 0xd9f2da13, 0xdbc65487, 0xf3e4f94e, 0x176d486f, 0x097c13ea, 0x631da5c7, - 0x445f7382, 0x175683f4, 0xcdc66a97, 0x70be0288, 0xb3cdcf72, 0x6e5dd2f3, 0x20936079, 0x459b80a5, - 0xbe60e2db, 0xa9c23101, 0xeba5315c, 0x224e42f2, 0x1c5c1572, 0xf6721b2c, 0x1ad2fff3, 0x8c25404e, - 0x324ed72f, 0x4067b7fd, 0x0523138e, 0x5ca3bc78, 0xdc0fd66e, 0x75922283, 0x784d6b17, 0x58ebb16e, - 0x44094f85, 0x3f481d87, 0xfcfeae7b, 0x77b5ff76, 0x8c2302bf, 0xaaf47556, 0x5f46b02a, 0x2b092801, - 0x3d38f5f7, 0x0ca81f36, 0x52af4a8a, 0x66d5e7c0, 0xdf3b0874, 0x95055110, 0x1b5ad7a8, 0xf61ed5ad, - 0x6cf6e479, 0x20758184, 0xd0cefa65, 0x88f7be58, 0x4a046826, 0x0ff6f8f3, 0xa09c7f70, 0x5346aba0, - 0x5ce96c28, 0xe176eda3, 0x6bac307f, 0x376829d2, 0x85360fa9, 0x17e3fe2a, 0x24b79767, 0xf5a96b20, - 0xd6cd2595, 0x68ff1ebf, 0x7555442c, 0xf19f06be, 0xf9e0659a, 0xeeb9491d, 0x34010718, 0xbb30cab8, - 0xe822fe15, 0x88570983, 0x750e6249, 0xda627e55, 0x5e76ffa8, 0xb1534546, 0x6d47de08, 0xefe9e7d4, - }, - { - 0xf6fa8f9d, 0x2cac6ce1, 0x4ca34867, 0xe2337f7c, 0x95db08e7, 0x016843b4, 0xeced5cbc, 0x325553ac, - 0xbf9f0960, 0xdfa1e2ed, 0x83f0579d, 0x63ed86b9, 0x1ab6a6b8, 0xde5ebe39, 0xf38ff732, 0x8989b138, - 0x33f14961, 0xc01937bd, 0xf506c6da, 0xe4625e7e, 0xa308ea99, 0x4e23e33c, 0x79cbd7cc, 0x48a14367, - 0xa3149619, 0xfec94bd5, 0xa114174a, 0xeaa01866, 0xa084db2d, 0x09a8486f, 0xa888614a, 0x2900af98, - 0x01665991, 0xe1992863, 0xc8f30c60, 0x2e78ef3c, 0xd0d51932, 0xcf0fec14, 0xf7ca07d2, 0xd0a82072, - 0xfd41197e, 0x9305a6b0, 0xe86be3da, 0x74bed3cd, 0x372da53c, 0x4c7f4448, 0xdab5d440, 0x6dba0ec3, - 0x083919a7, 0x9fbaeed9, 0x49dbcfb0, 0x4e670c53, 0x5c3d9c01, 0x64bdb941, 0x2c0e636a, 0xba7dd9cd, - 0xea6f7388, 0xe70bc762, 0x35f29adb, 0x5c4cdd8d, 0xf0d48d8c, 0xb88153e2, 0x08a19866, 0x1ae2eac8, - 0x284caf89, 0xaa928223, 0x9334be53, 0x3b3a21bf, 0x16434be3, 0x9aea3906, 0xefe8c36e, 0xf890cdd9, - 0x80226dae, 0xc340a4a3, 0xdf7e9c09, 0xa694a807, 0x5b7c5ecc, 0x221db3a6, 0x9a69a02f, 0x68818a54, - 0xceb2296f, 0x53c0843a, 0xfe893655, 0x25bfe68a, 0xb4628abc, 0xcf222ebf, 0x25ac6f48, 0xa9a99387, - 0x53bddb65, 0xe76ffbe7, 0xe967fd78, 0x0ba93563, 0x8e342bc1, 0xe8a11be9, 0x4980740d, 0xc8087dfc, - 0x8de4bf99, 0xa11101a0, 0x7fd37975, 0xda5a26c0, 0xe81f994f, 0x9528cd89, 0xfd339fed, 0xb87834bf, - 0x5f04456d, 0x22258698, 0xc9c4c83b, 0x2dc156be, 0x4f628daa, 0x57f55ec5, 0xe2220abe, 0xd2916ebf, - 0x4ec75b95, 0x24f2c3c0, 0x42d15d99, 0xcd0d7fa0, 0x7b6e27ff, 0xa8dc8af0, 0x7345c106, 0xf41e232f, - 0x35162386, 0xe6ea8926, 0x3333b094, 0x157ec6f2, 0x372b74af, 0x692573e4, 0xe9a9d848, 0xf3160289, - 0x3a62ef1d, 0xa787e238, 0xf3a5f676, 0x74364853, 0x20951063, 0x4576698d, 0xb6fad407, 0x592af950, - 0x36f73523, 0x4cfb6e87, 0x7da4cec0, 0x6c152daa, 0xcb0396a8, 0xc50dfe5d, 0xfcd707ab, 0x0921c42f, - 0x89dff0bb, 0x5fe2be78, 0x448f4f33, 0x754613c9, 0x2b05d08d, 0x48b9d585, 0xdc049441, 0xc8098f9b, - 0x7dede786, 0xc39a3373, 0x42410005, 0x6a091751, 0x0ef3c8a6, 0x890072d6, 0x28207682, 0xa9a9f7be, - 0xbf32679d, 0xd45b5b75, 0xb353fd00, 0xcbb0e358, 0x830f220a, 0x1f8fb214, 0xd372cf08, 0xcc3c4a13, - 0x8cf63166, 0x061c87be, 0x88c98f88, 0x6062e397, 0x47cf8e7a, 0xb6c85283, 0x3cc2acfb, 0x3fc06976, - 0x4e8f0252, 0x64d8314d, 0xda3870e3, 0x1e665459, 0xc10908f0, 0x513021a5, 0x6c5b68b7, 0x822f8aa0, - 0x3007cd3e, 0x74719eef, 0xdc872681, 0x073340d4, 0x7e432fd9, 0x0c5ec241, 0x8809286c, 0xf592d891, - 0x08a930f6, 0x957ef305, 0xb7fbffbd, 0xc266e96f, 0x6fe4ac98, 0xb173ecc0, 0xbc60b42a, 0x953498da, - 0xfba1ae12, 0x2d4bd736, 0x0f25faab, 0xa4f3fceb, 0xe2969123, 0x257f0c3d, 0x9348af49, 0x361400bc, - 0xe8816f4a, 0x3814f200, 0xa3f94043, 0x9c7a54c2, 0xbc704f57, 0xda41e7f9, 0xc25ad33a, 0x54f4a084, - 0xb17f5505, 0x59357cbe, 0xedbd15c8, 0x7f97c5ab, 0xba5ac7b5, 0xb6f6deaf, 0x3a479c3a, 0x5302da25, - 0x653d7e6a, 0x54268d49, 0x51a477ea, 0x5017d55b, 0xd7d25d88, 0x44136c76, 0x0404a8c8, 0xb8e5a121, - 0xb81a928a, 0x60ed5869, 0x97c55b96, 0xeaec991b, 0x29935913, 0x01fdb7f1, 0x088e8dfa, 0x9ab6f6f5, - 0x3b4cbf9f, 0x4a5de3ab, 0xe6051d35, 0xa0e1d855, 0xd36b4cf1, 0xf544edeb, 0xb0e93524, 0xbebb8fbd, - 0xa2d762cf, 0x49c92f54, 0x38b5f331, 0x7128a454, 0x48392905, 0xa65b1db8, 0x851c97bd, 0xd675cf2f, - }, - { - 0x85e04019, 0x332bf567, 0x662dbfff, 0xcfc65693, 0x2a8d7f6f, 0xab9bc912, 0xde6008a1, 0x2028da1f, - 0x0227bce7, 0x4d642916, 0x18fac300, 0x50f18b82, 0x2cb2cb11, 0xb232e75c, 0x4b3695f2, 0xb28707de, - 0xa05fbcf6, 0xcd4181e9, 0xe150210c, 0xe24ef1bd, 0xb168c381, 0xfde4e789, 0x5c79b0d8, 0x1e8bfd43, - 0x4d495001, 0x38be4341, 0x913cee1d, 0x92a79c3f, 0x089766be, 0xbaeeadf4, 0x1286becf, 0xb6eacb19, - 0x2660c200, 0x7565bde4, 0x64241f7a, 0x8248dca9, 0xc3b3ad66, 0x28136086, 0x0bd8dfa8, 0x356d1cf2, - 0x107789be, 0xb3b2e9ce, 0x0502aa8f, 0x0bc0351e, 0x166bf52a, 0xeb12ff82, 0xe3486911, 0xd34d7516, - 0x4e7b3aff, 0x5f43671b, 0x9cf6e037, 0x4981ac83, 0x334266ce, 0x8c9341b7, 0xd0d854c0, 0xcb3a6c88, - 0x47bc2829, 0x4725ba37, 0xa66ad22b, 0x7ad61f1e, 0x0c5cbafa, 0x4437f107, 0xb6e79962, 0x42d2d816, - 0x0a961288, 0xe1a5c06e, 0x13749e67, 0x72fc081a, 0xb1d139f7, 0xf9583745, 0xcf19df58, 0xbec3f756, - 0xc06eba30, 0x07211b24, 0x45c28829, 0xc95e317f, 0xbc8ec511, 0x38bc46e9, 0xc6e6fa14, 0xbae8584a, - 0xad4ebc46, 0x468f508b, 0x7829435f, 0xf124183b, 0x821dba9f, 0xaff60ff4, 0xea2c4e6d, 0x16e39264, - 0x92544a8b, 0x009b4fc3, 0xaba68ced, 0x9ac96f78, 0x06a5b79a, 0xb2856e6e, 0x1aec3ca9, 0xbe838688, - 0x0e0804e9, 0x55f1be56, 0xe7e5363b, 0xb3a1f25d, 0xf7debb85, 0x61fe033c, 0x16746233, 0x3c034c28, - 0xda6d0c74, 0x79aac56c, 0x3ce4e1ad, 0x51f0c802, 0x98f8f35a, 0x1626a49f, 0xeed82b29, 0x1d382fe3, - 0x0c4fb99a, 0xbb325778, 0x3ec6d97b, 0x6e77a6a9, 0xcb658b5c, 0xd45230c7, 0x2bd1408b, 0x60c03eb7, - 0xb9068d78, 0xa33754f4, 0xf430c87d, 0xc8a71302, 0xb96d8c32, 0xebd4e7be, 0xbe8b9d2d, 0x7979fb06, - 0xe7225308, 0x8b75cf77, 0x11ef8da4, 0xe083c858, 0x8d6b786f, 0x5a6317a6, 0xfa5cf7a0, 0x5dda0033, - 0xf28ebfb0, 0xf5b9c310, 0xa0eac280, 0x08b9767a, 0xa3d9d2b0, 0x79d34217, 0x021a718d, 0x9ac6336a, - 0x2711fd60, 0x438050e3, 0x069908a8, 0x3d7fedc4, 0x826d2bef, 0x4eeb8476, 0x488dcf25, 0x36c9d566, - 0x28e74e41, 0xc2610aca, 0x3d49a9cf, 0xbae3b9df, 0xb65f8de6, 0x92aeaf64, 0x3ac7d5e6, 0x9ea80509, - 0xf22b017d, 0xa4173f70, 0xdd1e16c3, 0x15e0d7f9, 0x50b1b887, 0x2b9f4fd5, 0x625aba82, 0x6a017962, - 0x2ec01b9c, 0x15488aa9, 0xd716e740, 0x40055a2c, 0x93d29a22, 0xe32dbf9a, 0x058745b9, 0x3453dc1e, - 0xd699296e, 0x496cff6f, 0x1c9f4986, 0xdfe2ed07, 0xb87242d1, 0x19de7eae, 0x053e561a, 0x15ad6f8c, - 0x66626c1c, 0x7154c24c, 0xea082b2a, 0x93eb2939, 0x17dcb0f0, 0x58d4f2ae, 0x9ea294fb, 0x52cf564c, - 0x9883fe66, 0x2ec40581, 0x763953c3, 0x01d6692e, 0xd3a0c108, 0xa1e7160e, 0xe4f2dfa6, 0x693ed285, - 0x74904698, 0x4c2b0edd, 0x4f757656, 0x5d393378, 0xa132234f, 0x3d321c5d, 0xc3f5e194, 0x4b269301, - 0xc79f022f, 0x3c997e7e, 0x5e4f9504, 0x3ffafbbd, 0x76f7ad0e, 0x296693f4, 0x3d1fce6f, 0xc61e45be, - 0xd3b5ab34, 0xf72bf9b7, 0x1b0434c0, 0x4e72b567, 0x5592a33d, 0xb5229301, 0xcfd2a87f, 0x60aeb767, - 0x1814386b, 0x30bcc33d, 0x38a0c07d, 0xfd1606f2, 0xc363519b, 0x589dd390, 0x5479f8e6, 0x1cb8d647, - 0x97fd61a9, 0xea7759f4, 0x2d57539d, 0x569a58cf, 0xe84e63ad, 0x462e1b78, 0x6580f87e, 0xf3817914, - 0x91da55f4, 0x40a230f3, 0xd1988f35, 0xb6e318d2, 0x3ffa50bc, 0x3d40f021, 0xc3c0bdae, 0x4958c24c, - 0x518f36b2, 0x84b1d370, 0x0fedce83, 0x878ddada, 0xf2a279c7, 0x94e01be8, 0x90716f4b, 0x954b8aa3, - }, - { - 0xe216300d, 0xbbddfffc, 0xa7ebdabd, 0x35648095, 0x7789f8b7, 0xe6c1121b, 0x0e241600, 0x052ce8b5, - 0x11a9cfb0, 0xe5952f11, 0xece7990a, 0x9386d174, 0x2a42931c, 0x76e38111, 0xb12def3a, 0x37ddddfc, - 0xde9adeb1, 0x0a0cc32c, 0xbe197029, 0x84a00940, 0xbb243a0f, 0xb4d137cf, 0xb44e79f0, 0x049eedfd, - 0x0b15a15d, 0x480d3168, 0x8bbbde5a, 0x669ded42, 0xc7ece831, 0x3f8f95e7, 0x72df191b, 0x7580330d, - 0x94074251, 0x5c7dcdfa, 0xabbe6d63, 0xaa402164, 0xb301d40a, 0x02e7d1ca, 0x53571dae, 0x7a3182a2, - 0x12a8ddec, 0xfdaa335d, 0x176f43e8, 0x71fb46d4, 0x38129022, 0xce949ad4, 0xb84769ad, 0x965bd862, - 0x82f3d055, 0x66fb9767, 0x15b80b4e, 0x1d5b47a0, 0x4cfde06f, 0xc28ec4b8, 0x57e8726e, 0x647a78fc, - 0x99865d44, 0x608bd593, 0x6c200e03, 0x39dc5ff6, 0x5d0b00a3, 0xae63aff2, 0x7e8bd632, 0x70108c0c, - 0xbbd35049, 0x2998df04, 0x980cf42a, 0x9b6df491, 0x9e7edd53, 0x06918548, 0x58cb7e07, 0x3b74ef2e, - 0x522fffb1, 0xd24708cc, 0x1c7e27cd, 0xa4eb215b, 0x3cf1d2e2, 0x19b47a38, 0x424f7618, 0x35856039, - 0x9d17dee7, 0x27eb35e6, 0xc9aff67b, 0x36baf5b8, 0x09c467cd, 0xc18910b1, 0xe11dbf7b, 0x06cd1af8, - 0x7170c608, 0x2d5e3354, 0xd4de495a, 0x64c6d006, 0xbcc0c62c, 0x3dd00db3, 0x708f8f34, 0x77d51b42, - 0x264f620f, 0x24b8d2bf, 0x15c1b79e, 0x46a52564, 0xf8d7e54e, 0x3e378160, 0x7895cda5, 0x859c15a5, - 0xe6459788, 0xc37bc75f, 0xdb07ba0c, 0x0676a3ab, 0x7f229b1e, 0x31842e7b, 0x24259fd7, 0xf8bef472, - 0x835ffcb8, 0x6df4c1f2, 0x96f5b195, 0xfd0af0fc, 0xb0fe134c, 0xe2506d3d, 0x4f9b12ea, 0xf215f225, - 0xa223736f, 0x9fb4c428, 0x25d04979, 0x34c713f8, 0xc4618187, 0xea7a6e98, 0x7cd16efc, 0x1436876c, - 0xf1544107, 0xbedeee14, 0x56e9af27, 0xa04aa441, 0x3cf7c899, 0x92ecbae6, 0xdd67016d, 0x151682eb, - 0xa842eedf, 0xfdba60b4, 0xf1907b75, 0x20e3030f, 0x24d8c29e, 0xe139673b, 0xefa63fb8, 0x71873054, - 0xb6f2cf3b, 0x9f326442, 0xcb15a4cc, 0xb01a4504, 0xf1e47d8d, 0x844a1be5, 0xbae7dfdc, 0x42cbda70, - 0xcd7dae0a, 0x57e85b7a, 0xd53f5af6, 0x20cf4d8c, 0xcea4d428, 0x79d130a4, 0x3486ebfb, 0x33d3cddc, - 0x77853b53, 0x37effcb5, 0xc5068778, 0xe580b3e6, 0x4e68b8f4, 0xc5c8b37e, 0x0d809ea2, 0x398feb7c, - 0x132a4f94, 0x43b7950e, 0x2fee7d1c, 0x223613bd, 0xdd06caa2, 0x37df932b, 0xc4248289, 0xacf3ebc3, - 0x5715f6b7, 0xef3478dd, 0xf267616f, 0xc148cbe4, 0x9052815e, 0x5e410fab, 0xb48a2465, 0x2eda7fa4, - 0xe87b40e4, 0xe98ea084, 0x5889e9e1, 0xefd390fc, 0xdd07d35b, 0xdb485694, 0x38d7e5b2, 0x57720101, - 0x730edebc, 0x5b643113, 0x94917e4f, 0x503c2fba, 0x646f1282, 0x7523d24a, 0xe0779695, 0xf9c17a8f, - 0x7a5b2121, 0xd187b896, 0x29263a4d, 0xba510cdf, 0x81f47c9f, 0xad1163ed, 0xea7b5965, 0x1a00726e, - 0x11403092, 0x00da6d77, 0x4a0cdd61, 0xad1f4603, 0x605bdfb0, 0x9eedc364, 0x22ebe6a8, 0xcee7d28a, - 0xa0e736a0, 0x5564a6b9, 0x10853209, 0xc7eb8f37, 0x2de705ca, 0x8951570f, 0xdf09822b, 0xbd691a6c, - 0xaa12e4f2, 0x87451c0f, 0xe0f6a27a, 0x3ada4819, 0x4cf1764f, 0x0d771c2b, 0x67cdb156, 0x350d8384, - 0x5938fa0f, 0x42399ef3, 0x36997b07, 0x0e84093d, 0x4aa93e61, 0x8360d87b, 0x1fa98b0c, 0x1149382c, - 0xe97625a5, 0x0614d1b7, 0x0e25244b, 0x0c768347, 0x589e8d82, 0x0d2059d1, 0xa466bb1e, 0xf8da0a82, - 0x04f19130, 0xba6e4ec0, 0x99265164, 0x1ee7230d, 0x50b2ad80, 0xeaee6801, 0x8db2a283, 0xea8bf59e, - }, -} diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go new file mode 100644 index 0000000000..a57771a1ed --- /dev/null +++ b/vendor/golang.org/x/crypto/ed25519/ed25519.go @@ -0,0 +1,188 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ed25519 implements the Ed25519 signature algorithm. See +// https://ed25519.cr.yp.to/. +// +// These functions are also compatible with the “Ed25519” function defined in +// RFC 8032. +package ed25519 + +// This code is a port of the public domain, “ref10” implementation of ed25519 +// from SUPERCOP. + +import ( + "bytes" + "crypto" + cryptorand "crypto/rand" + "crypto/sha512" + "errors" + "io" + "strconv" + + "golang.org/x/crypto/ed25519/internal/edwards25519" +) + +const ( + // PublicKeySize is the size, in bytes, of public keys as used in this package. + PublicKeySize = 32 + // PrivateKeySize is the size, in bytes, of private keys as used in this package. + PrivateKeySize = 64 + // SignatureSize is the size, in bytes, of signatures generated and verified by this package. + SignatureSize = 64 +) + +// PublicKey is the type of Ed25519 public keys. +type PublicKey []byte + +// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. +type PrivateKey []byte + +// Public returns the PublicKey corresponding to priv. +func (priv PrivateKey) Public() crypto.PublicKey { + publicKey := make([]byte, PublicKeySize) + copy(publicKey, priv[32:]) + return PublicKey(publicKey) +} + +// Sign signs the given message with priv. +// Ed25519 performs two passes over messages to be signed and therefore cannot +// handle pre-hashed messages. Thus opts.HashFunc() must return zero to +// indicate the message hasn't been hashed. This can be achieved by passing +// crypto.Hash(0) as the value for opts. +func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) { + if opts.HashFunc() != crypto.Hash(0) { + return nil, errors.New("ed25519: cannot sign hashed message") + } + + return Sign(priv, message), nil +} + +// GenerateKey generates a public/private key pair using entropy from rand. +// If rand is nil, crypto/rand.Reader will be used. +func GenerateKey(rand io.Reader) (publicKey PublicKey, privateKey PrivateKey, err error) { + if rand == nil { + rand = cryptorand.Reader + } + + privateKey = make([]byte, PrivateKeySize) + publicKey = make([]byte, PublicKeySize) + _, err = io.ReadFull(rand, privateKey[:32]) + if err != nil { + return nil, nil, err + } + + digest := sha512.Sum512(privateKey[:32]) + digest[0] &= 248 + digest[31] &= 127 + digest[31] |= 64 + + var A edwards25519.ExtendedGroupElement + var hBytes [32]byte + copy(hBytes[:], digest[:]) + edwards25519.GeScalarMultBase(&A, &hBytes) + var publicKeyBytes [32]byte + A.ToBytes(&publicKeyBytes) + + copy(privateKey[32:], publicKeyBytes[:]) + copy(publicKey, publicKeyBytes[:]) + + return publicKey, privateKey, nil +} + +// Sign signs the message with privateKey and returns a signature. It will +// panic if len(privateKey) is not PrivateKeySize. +func Sign(privateKey PrivateKey, message []byte) []byte { + if l := len(privateKey); l != PrivateKeySize { + panic("ed25519: bad private key length: " + strconv.Itoa(l)) + } + + h := sha512.New() + h.Write(privateKey[:32]) + + var digest1, messageDigest, hramDigest [64]byte + var expandedSecretKey [32]byte + h.Sum(digest1[:0]) + copy(expandedSecretKey[:], digest1[:]) + expandedSecretKey[0] &= 248 + expandedSecretKey[31] &= 63 + expandedSecretKey[31] |= 64 + + h.Reset() + h.Write(digest1[32:]) + h.Write(message) + h.Sum(messageDigest[:0]) + + var messageDigestReduced [32]byte + edwards25519.ScReduce(&messageDigestReduced, &messageDigest) + var R edwards25519.ExtendedGroupElement + edwards25519.GeScalarMultBase(&R, &messageDigestReduced) + + var encodedR [32]byte + R.ToBytes(&encodedR) + + h.Reset() + h.Write(encodedR[:]) + h.Write(privateKey[32:]) + h.Write(message) + h.Sum(hramDigest[:0]) + var hramDigestReduced [32]byte + edwards25519.ScReduce(&hramDigestReduced, &hramDigest) + + var s [32]byte + edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced) + + signature := make([]byte, SignatureSize) + copy(signature[:], encodedR[:]) + copy(signature[32:], s[:]) + + return signature +} + +// Verify reports whether sig is a valid signature of message by publicKey. It +// will panic if len(publicKey) is not PublicKeySize. +func Verify(publicKey PublicKey, message, sig []byte) bool { + if l := len(publicKey); l != PublicKeySize { + panic("ed25519: bad public key length: " + strconv.Itoa(l)) + } + + if len(sig) != SignatureSize || sig[63]&224 != 0 { + return false + } + + var A edwards25519.ExtendedGroupElement + var publicKeyBytes [32]byte + copy(publicKeyBytes[:], publicKey) + if !A.FromBytes(&publicKeyBytes) { + return false + } + edwards25519.FeNeg(&A.X, &A.X) + edwards25519.FeNeg(&A.T, &A.T) + + h := sha512.New() + h.Write(sig[:32]) + h.Write(publicKey[:]) + h.Write(message) + var digest [64]byte + h.Sum(digest[:0]) + + var hReduced [32]byte + edwards25519.ScReduce(&hReduced, &digest) + + var R edwards25519.ProjectiveGroupElement + var s [32]byte + copy(s[:], sig[32:]) + + // https://tools.ietf.org/html/rfc8032#section-5.1.7 requires that s be in + // the range [0, order) in order to prevent signature malleability. + if !edwards25519.ScMinimal(&s) { + return false + } + + edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &s) + + var checkR [32]byte + R.ToBytes(&checkR) + return bytes.Equal(sig[:32], checkR[:]) +} diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go new file mode 100644 index 0000000000..e39f086c1d --- /dev/null +++ b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go @@ -0,0 +1,1422 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +// These values are from the public domain, “ref10” implementation of ed25519 +// from SUPERCOP. + +// d is a constant in the Edwards curve equation. +var d = FieldElement{ + -10913610, 13857413, -15372611, 6949391, 114729, -8787816, -6275908, -3247719, -18696448, -12055116, +} + +// d2 is 2*d. +var d2 = FieldElement{ + -21827239, -5839606, -30745221, 13898782, 229458, 15978800, -12551817, -6495438, 29715968, 9444199, +} + +// SqrtM1 is the square-root of -1 in the field. +var SqrtM1 = FieldElement{ + -32595792, -7943725, 9377950, 3500415, 12389472, -272473, -25146209, -2005654, 326686, 11406482, +} + +// A is a constant in the Montgomery-form of curve25519. +var A = FieldElement{ + 486662, 0, 0, 0, 0, 0, 0, 0, 0, 0, +} + +// bi contains precomputed multiples of the base-point. See the Ed25519 paper +// for a discussion about how these values are used. +var bi = [8]PreComputedGroupElement{ + { + FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, + FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, + FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, + }, + { + FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, + FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, + FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, + }, + { + FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, + FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, + FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, + }, + { + FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, + FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, + FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, + }, + { + FieldElement{-22518993, -6692182, 14201702, -8745502, -23510406, 8844726, 18474211, -1361450, -13062696, 13821877}, + FieldElement{-6455177, -7839871, 3374702, -4740862, -27098617, -10571707, 31655028, -7212327, 18853322, -14220951}, + FieldElement{4566830, -12963868, -28974889, -12240689, -7602672, -2830569, -8514358, -10431137, 2207753, -3209784}, + }, + { + FieldElement{-25154831, -4185821, 29681144, 7868801, -6854661, -9423865, -12437364, -663000, -31111463, -16132436}, + FieldElement{25576264, -2703214, 7349804, -11814844, 16472782, 9300885, 3844789, 15725684, 171356, 6466918}, + FieldElement{23103977, 13316479, 9739013, -16149481, 817875, -15038942, 8965339, -14088058, -30714912, 16193877}, + }, + { + FieldElement{-33521811, 3180713, -2394130, 14003687, -16903474, -16270840, 17238398, 4729455, -18074513, 9256800}, + FieldElement{-25182317, -4174131, 32336398, 5036987, -21236817, 11360617, 22616405, 9761698, -19827198, 630305}, + FieldElement{-13720693, 2639453, -24237460, -7406481, 9494427, -5774029, -6554551, -15960994, -2449256, -14291300}, + }, + { + FieldElement{-3151181, -5046075, 9282714, 6866145, -31907062, -863023, -18940575, 15033784, 25105118, -7894876}, + FieldElement{-24326370, 15950226, -31801215, -14592823, -11662737, -5090925, 1573892, -2625887, 2198790, -15804619}, + FieldElement{-3099351, 10324967, -2241613, 7453183, -5446979, -2735503, -13812022, -16236442, -32461234, -12290683}, + }, +} + +// base contains precomputed multiples of the base-point. See the Ed25519 paper +// for a discussion about how these values are used. +var base = [32][8]PreComputedGroupElement{ + { + { + FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, + FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, + FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, + }, + { + FieldElement{-12815894, -12976347, -21581243, 11784320, -25355658, -2750717, -11717903, -3814571, -358445, -10211303}, + FieldElement{-21703237, 6903825, 27185491, 6451973, -29577724, -9554005, -15616551, 11189268, -26829678, -5319081}, + FieldElement{26966642, 11152617, 32442495, 15396054, 14353839, -12752335, -3128826, -9541118, -15472047, -4166697}, + }, + { + FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, + FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, + FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, + }, + { + FieldElement{-17036878, 13921892, 10945806, -6033431, 27105052, -16084379, -28926210, 15006023, 3284568, -6276540}, + FieldElement{23599295, -8306047, -11193664, -7687416, 13236774, 10506355, 7464579, 9656445, 13059162, 10374397}, + FieldElement{7798556, 16710257, 3033922, 2874086, 28997861, 2835604, 32406664, -3839045, -641708, -101325}, + }, + { + FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, + FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, + FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, + }, + { + FieldElement{-15371964, -12862754, 32573250, 4720197, -26436522, 5875511, -19188627, -15224819, -9818940, -12085777}, + FieldElement{-8549212, 109983, 15149363, 2178705, 22900618, 4543417, 3044240, -15689887, 1762328, 14866737}, + FieldElement{-18199695, -15951423, -10473290, 1707278, -17185920, 3916101, -28236412, 3959421, 27914454, 4383652}, + }, + { + FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, + FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, + FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, + }, + { + FieldElement{14499471, -2729599, -33191113, -4254652, 28494862, 14271267, 30290735, 10876454, -33154098, 2381726}, + FieldElement{-7195431, -2655363, -14730155, 462251, -27724326, 3941372, -6236617, 3696005, -32300832, 15351955}, + FieldElement{27431194, 8222322, 16448760, -3907995, -18707002, 11938355, -32961401, -2970515, 29551813, 10109425}, + }, + }, + { + { + FieldElement{-13657040, -13155431, -31283750, 11777098, 21447386, 6519384, -2378284, -1627556, 10092783, -4764171}, + FieldElement{27939166, 14210322, 4677035, 16277044, -22964462, -12398139, -32508754, 12005538, -17810127, 12803510}, + FieldElement{17228999, -15661624, -1233527, 300140, -1224870, -11714777, 30364213, -9038194, 18016357, 4397660}, + }, + { + FieldElement{-10958843, -7690207, 4776341, -14954238, 27850028, -15602212, -26619106, 14544525, -17477504, 982639}, + FieldElement{29253598, 15796703, -2863982, -9908884, 10057023, 3163536, 7332899, -4120128, -21047696, 9934963}, + FieldElement{5793303, 16271923, -24131614, -10116404, 29188560, 1206517, -14747930, 4559895, -30123922, -10897950}, + }, + { + FieldElement{-27643952, -11493006, 16282657, -11036493, 28414021, -15012264, 24191034, 4541697, -13338309, 5500568}, + FieldElement{12650548, -1497113, 9052871, 11355358, -17680037, -8400164, -17430592, 12264343, 10874051, 13524335}, + FieldElement{25556948, -3045990, 714651, 2510400, 23394682, -10415330, 33119038, 5080568, -22528059, 5376628}, + }, + { + FieldElement{-26088264, -4011052, -17013699, -3537628, -6726793, 1920897, -22321305, -9447443, 4535768, 1569007}, + FieldElement{-2255422, 14606630, -21692440, -8039818, 28430649, 8775819, -30494562, 3044290, 31848280, 12543772}, + FieldElement{-22028579, 2943893, -31857513, 6777306, 13784462, -4292203, -27377195, -2062731, 7718482, 14474653}, + }, + { + FieldElement{2385315, 2454213, -22631320, 46603, -4437935, -15680415, 656965, -7236665, 24316168, -5253567}, + FieldElement{13741529, 10911568, -33233417, -8603737, -20177830, -1033297, 33040651, -13424532, -20729456, 8321686}, + FieldElement{21060490, -2212744, 15712757, -4336099, 1639040, 10656336, 23845965, -11874838, -9984458, 608372}, + }, + { + FieldElement{-13672732, -15087586, -10889693, -7557059, -6036909, 11305547, 1123968, -6780577, 27229399, 23887}, + FieldElement{-23244140, -294205, -11744728, 14712571, -29465699, -2029617, 12797024, -6440308, -1633405, 16678954}, + FieldElement{-29500620, 4770662, -16054387, 14001338, 7830047, 9564805, -1508144, -4795045, -17169265, 4904953}, + }, + { + FieldElement{24059557, 14617003, 19037157, -15039908, 19766093, -14906429, 5169211, 16191880, 2128236, -4326833}, + FieldElement{-16981152, 4124966, -8540610, -10653797, 30336522, -14105247, -29806336, 916033, -6882542, -2986532}, + FieldElement{-22630907, 12419372, -7134229, -7473371, -16478904, 16739175, 285431, 2763829, 15736322, 4143876}, + }, + { + FieldElement{2379352, 11839345, -4110402, -5988665, 11274298, 794957, 212801, -14594663, 23527084, -16458268}, + FieldElement{33431127, -11130478, -17838966, -15626900, 8909499, 8376530, -32625340, 4087881, -15188911, -14416214}, + FieldElement{1767683, 7197987, -13205226, -2022635, -13091350, 448826, 5799055, 4357868, -4774191, -16323038}, + }, + }, + { + { + FieldElement{6721966, 13833823, -23523388, -1551314, 26354293, -11863321, 23365147, -3949732, 7390890, 2759800}, + FieldElement{4409041, 2052381, 23373853, 10530217, 7676779, -12885954, 21302353, -4264057, 1244380, -12919645}, + FieldElement{-4421239, 7169619, 4982368, -2957590, 30256825, -2777540, 14086413, 9208236, 15886429, 16489664}, + }, + { + FieldElement{1996075, 10375649, 14346367, 13311202, -6874135, -16438411, -13693198, 398369, -30606455, -712933}, + FieldElement{-25307465, 9795880, -2777414, 14878809, -33531835, 14780363, 13348553, 12076947, -30836462, 5113182}, + FieldElement{-17770784, 11797796, 31950843, 13929123, -25888302, 12288344, -30341101, -7336386, 13847711, 5387222}, + }, + { + FieldElement{-18582163, -3416217, 17824843, -2340966, 22744343, -10442611, 8763061, 3617786, -19600662, 10370991}, + FieldElement{20246567, -14369378, 22358229, -543712, 18507283, -10413996, 14554437, -8746092, 32232924, 16763880}, + FieldElement{9648505, 10094563, 26416693, 14745928, -30374318, -6472621, 11094161, 15689506, 3140038, -16510092}, + }, + { + FieldElement{-16160072, 5472695, 31895588, 4744994, 8823515, 10365685, -27224800, 9448613, -28774454, 366295}, + FieldElement{19153450, 11523972, -11096490, -6503142, -24647631, 5420647, 28344573, 8041113, 719605, 11671788}, + FieldElement{8678025, 2694440, -6808014, 2517372, 4964326, 11152271, -15432916, -15266516, 27000813, -10195553}, + }, + { + FieldElement{-15157904, 7134312, 8639287, -2814877, -7235688, 10421742, 564065, 5336097, 6750977, -14521026}, + FieldElement{11836410, -3979488, 26297894, 16080799, 23455045, 15735944, 1695823, -8819122, 8169720, 16220347}, + FieldElement{-18115838, 8653647, 17578566, -6092619, -8025777, -16012763, -11144307, -2627664, -5990708, -14166033}, + }, + { + FieldElement{-23308498, -10968312, 15213228, -10081214, -30853605, -11050004, 27884329, 2847284, 2655861, 1738395}, + FieldElement{-27537433, -14253021, -25336301, -8002780, -9370762, 8129821, 21651608, -3239336, -19087449, -11005278}, + FieldElement{1533110, 3437855, 23735889, 459276, 29970501, 11335377, 26030092, 5821408, 10478196, 8544890}, + }, + { + FieldElement{32173121, -16129311, 24896207, 3921497, 22579056, -3410854, 19270449, 12217473, 17789017, -3395995}, + FieldElement{-30552961, -2228401, -15578829, -10147201, 13243889, 517024, 15479401, -3853233, 30460520, 1052596}, + FieldElement{-11614875, 13323618, 32618793, 8175907, -15230173, 12596687, 27491595, -4612359, 3179268, -9478891}, + }, + { + FieldElement{31947069, -14366651, -4640583, -15339921, -15125977, -6039709, -14756777, -16411740, 19072640, -9511060}, + FieldElement{11685058, 11822410, 3158003, -13952594, 33402194, -4165066, 5977896, -5215017, 473099, 5040608}, + FieldElement{-20290863, 8198642, -27410132, 11602123, 1290375, -2799760, 28326862, 1721092, -19558642, -3131606}, + }, + }, + { + { + FieldElement{7881532, 10687937, 7578723, 7738378, -18951012, -2553952, 21820786, 8076149, -27868496, 11538389}, + FieldElement{-19935666, 3899861, 18283497, -6801568, -15728660, -11249211, 8754525, 7446702, -5676054, 5797016}, + FieldElement{-11295600, -3793569, -15782110, -7964573, 12708869, -8456199, 2014099, -9050574, -2369172, -5877341}, + }, + { + FieldElement{-22472376, -11568741, -27682020, 1146375, 18956691, 16640559, 1192730, -3714199, 15123619, 10811505}, + FieldElement{14352098, -3419715, -18942044, 10822655, 32750596, 4699007, -70363, 15776356, -28886779, -11974553}, + FieldElement{-28241164, -8072475, -4978962, -5315317, 29416931, 1847569, -20654173, -16484855, 4714547, -9600655}, + }, + { + FieldElement{15200332, 8368572, 19679101, 15970074, -31872674, 1959451, 24611599, -4543832, -11745876, 12340220}, + FieldElement{12876937, -10480056, 33134381, 6590940, -6307776, 14872440, 9613953, 8241152, 15370987, 9608631}, + FieldElement{-4143277, -12014408, 8446281, -391603, 4407738, 13629032, -7724868, 15866074, -28210621, -8814099}, + }, + { + FieldElement{26660628, -15677655, 8393734, 358047, -7401291, 992988, -23904233, 858697, 20571223, 8420556}, + FieldElement{14620715, 13067227, -15447274, 8264467, 14106269, 15080814, 33531827, 12516406, -21574435, -12476749}, + FieldElement{236881, 10476226, 57258, -14677024, 6472998, 2466984, 17258519, 7256740, 8791136, 15069930}, + }, + { + FieldElement{1276410, -9371918, 22949635, -16322807, -23493039, -5702186, 14711875, 4874229, -30663140, -2331391}, + FieldElement{5855666, 4990204, -13711848, 7294284, -7804282, 1924647, -1423175, -7912378, -33069337, 9234253}, + FieldElement{20590503, -9018988, 31529744, -7352666, -2706834, 10650548, 31559055, -11609587, 18979186, 13396066}, + }, + { + FieldElement{24474287, 4968103, 22267082, 4407354, 24063882, -8325180, -18816887, 13594782, 33514650, 7021958}, + FieldElement{-11566906, -6565505, -21365085, 15928892, -26158305, 4315421, -25948728, -3916677, -21480480, 12868082}, + FieldElement{-28635013, 13504661, 19988037, -2132761, 21078225, 6443208, -21446107, 2244500, -12455797, -8089383}, + }, + { + FieldElement{-30595528, 13793479, -5852820, 319136, -25723172, -6263899, 33086546, 8957937, -15233648, 5540521}, + FieldElement{-11630176, -11503902, -8119500, -7643073, 2620056, 1022908, -23710744, -1568984, -16128528, -14962807}, + FieldElement{23152971, 775386, 27395463, 14006635, -9701118, 4649512, 1689819, 892185, -11513277, -15205948}, + }, + { + FieldElement{9770129, 9586738, 26496094, 4324120, 1556511, -3550024, 27453819, 4763127, -19179614, 5867134}, + FieldElement{-32765025, 1927590, 31726409, -4753295, 23962434, -16019500, 27846559, 5931263, -29749703, -16108455}, + FieldElement{27461885, -2977536, 22380810, 1815854, -23033753, -3031938, 7283490, -15148073, -19526700, 7734629}, + }, + }, + { + { + FieldElement{-8010264, -9590817, -11120403, 6196038, 29344158, -13430885, 7585295, -3176626, 18549497, 15302069}, + FieldElement{-32658337, -6171222, -7672793, -11051681, 6258878, 13504381, 10458790, -6418461, -8872242, 8424746}, + FieldElement{24687205, 8613276, -30667046, -3233545, 1863892, -1830544, 19206234, 7134917, -11284482, -828919}, + }, + { + FieldElement{11334899, -9218022, 8025293, 12707519, 17523892, -10476071, 10243738, -14685461, -5066034, 16498837}, + FieldElement{8911542, 6887158, -9584260, -6958590, 11145641, -9543680, 17303925, -14124238, 6536641, 10543906}, + FieldElement{-28946384, 15479763, -17466835, 568876, -1497683, 11223454, -2669190, -16625574, -27235709, 8876771}, + }, + { + FieldElement{-25742899, -12566864, -15649966, -846607, -33026686, -796288, -33481822, 15824474, -604426, -9039817}, + FieldElement{10330056, 70051, 7957388, -9002667, 9764902, 15609756, 27698697, -4890037, 1657394, 3084098}, + FieldElement{10477963, -7470260, 12119566, -13250805, 29016247, -5365589, 31280319, 14396151, -30233575, 15272409}, + }, + { + FieldElement{-12288309, 3169463, 28813183, 16658753, 25116432, -5630466, -25173957, -12636138, -25014757, 1950504}, + FieldElement{-26180358, 9489187, 11053416, -14746161, -31053720, 5825630, -8384306, -8767532, 15341279, 8373727}, + FieldElement{28685821, 7759505, -14378516, -12002860, -31971820, 4079242, 298136, -10232602, -2878207, 15190420}, + }, + { + FieldElement{-32932876, 13806336, -14337485, -15794431, -24004620, 10940928, 8669718, 2742393, -26033313, -6875003}, + FieldElement{-1580388, -11729417, -25979658, -11445023, -17411874, -10912854, 9291594, -16247779, -12154742, 6048605}, + FieldElement{-30305315, 14843444, 1539301, 11864366, 20201677, 1900163, 13934231, 5128323, 11213262, 9168384}, + }, + { + FieldElement{-26280513, 11007847, 19408960, -940758, -18592965, -4328580, -5088060, -11105150, 20470157, -16398701}, + FieldElement{-23136053, 9282192, 14855179, -15390078, -7362815, -14408560, -22783952, 14461608, 14042978, 5230683}, + FieldElement{29969567, -2741594, -16711867, -8552442, 9175486, -2468974, 21556951, 3506042, -5933891, -12449708}, + }, + { + FieldElement{-3144746, 8744661, 19704003, 4581278, -20430686, 6830683, -21284170, 8971513, -28539189, 15326563}, + FieldElement{-19464629, 10110288, -17262528, -3503892, -23500387, 1355669, -15523050, 15300988, -20514118, 9168260}, + FieldElement{-5353335, 4488613, -23803248, 16314347, 7780487, -15638939, -28948358, 9601605, 33087103, -9011387}, + }, + { + FieldElement{-19443170, -15512900, -20797467, -12445323, -29824447, 10229461, -27444329, -15000531, -5996870, 15664672}, + FieldElement{23294591, -16632613, -22650781, -8470978, 27844204, 11461195, 13099750, -2460356, 18151676, 13417686}, + FieldElement{-24722913, -4176517, -31150679, 5988919, -26858785, 6685065, 1661597, -12551441, 15271676, -15452665}, + }, + }, + { + { + FieldElement{11433042, -13228665, 8239631, -5279517, -1985436, -725718, -18698764, 2167544, -6921301, -13440182}, + FieldElement{-31436171, 15575146, 30436815, 12192228, -22463353, 9395379, -9917708, -8638997, 12215110, 12028277}, + FieldElement{14098400, 6555944, 23007258, 5757252, -15427832, -12950502, 30123440, 4617780, -16900089, -655628}, + }, + { + FieldElement{-4026201, -15240835, 11893168, 13718664, -14809462, 1847385, -15819999, 10154009, 23973261, -12684474}, + FieldElement{-26531820, -3695990, -1908898, 2534301, -31870557, -16550355, 18341390, -11419951, 32013174, -10103539}, + FieldElement{-25479301, 10876443, -11771086, -14625140, -12369567, 1838104, 21911214, 6354752, 4425632, -837822}, + }, + { + FieldElement{-10433389, -14612966, 22229858, -3091047, -13191166, 776729, -17415375, -12020462, 4725005, 14044970}, + FieldElement{19268650, -7304421, 1555349, 8692754, -21474059, -9910664, 6347390, -1411784, -19522291, -16109756}, + FieldElement{-24864089, 12986008, -10898878, -5558584, -11312371, -148526, 19541418, 8180106, 9282262, 10282508}, + }, + { + FieldElement{-26205082, 4428547, -8661196, -13194263, 4098402, -14165257, 15522535, 8372215, 5542595, -10702683}, + FieldElement{-10562541, 14895633, 26814552, -16673850, -17480754, -2489360, -2781891, 6993761, -18093885, 10114655}, + FieldElement{-20107055, -929418, 31422704, 10427861, -7110749, 6150669, -29091755, -11529146, 25953725, -106158}, + }, + { + FieldElement{-4234397, -8039292, -9119125, 3046000, 2101609, -12607294, 19390020, 6094296, -3315279, 12831125}, + FieldElement{-15998678, 7578152, 5310217, 14408357, -33548620, -224739, 31575954, 6326196, 7381791, -2421839}, + FieldElement{-20902779, 3296811, 24736065, -16328389, 18374254, 7318640, 6295303, 8082724, -15362489, 12339664}, + }, + { + FieldElement{27724736, 2291157, 6088201, -14184798, 1792727, 5857634, 13848414, 15768922, 25091167, 14856294}, + FieldElement{-18866652, 8331043, 24373479, 8541013, -701998, -9269457, 12927300, -12695493, -22182473, -9012899}, + FieldElement{-11423429, -5421590, 11632845, 3405020, 30536730, -11674039, -27260765, 13866390, 30146206, 9142070}, + }, + { + FieldElement{3924129, -15307516, -13817122, -10054960, 12291820, -668366, -27702774, 9326384, -8237858, 4171294}, + FieldElement{-15921940, 16037937, 6713787, 16606682, -21612135, 2790944, 26396185, 3731949, 345228, -5462949}, + FieldElement{-21327538, 13448259, 25284571, 1143661, 20614966, -8849387, 2031539, -12391231, -16253183, -13582083}, + }, + { + FieldElement{31016211, -16722429, 26371392, -14451233, -5027349, 14854137, 17477601, 3842657, 28012650, -16405420}, + FieldElement{-5075835, 9368966, -8562079, -4600902, -15249953, 6970560, -9189873, 16292057, -8867157, 3507940}, + FieldElement{29439664, 3537914, 23333589, 6997794, -17555561, -11018068, -15209202, -15051267, -9164929, 6580396}, + }, + }, + { + { + FieldElement{-12185861, -7679788, 16438269, 10826160, -8696817, -6235611, 17860444, -9273846, -2095802, 9304567}, + FieldElement{20714564, -4336911, 29088195, 7406487, 11426967, -5095705, 14792667, -14608617, 5289421, -477127}, + FieldElement{-16665533, -10650790, -6160345, -13305760, 9192020, -1802462, 17271490, 12349094, 26939669, -3752294}, + }, + { + FieldElement{-12889898, 9373458, 31595848, 16374215, 21471720, 13221525, -27283495, -12348559, -3698806, 117887}, + FieldElement{22263325, -6560050, 3984570, -11174646, -15114008, -566785, 28311253, 5358056, -23319780, 541964}, + FieldElement{16259219, 3261970, 2309254, -15534474, -16885711, -4581916, 24134070, -16705829, -13337066, -13552195}, + }, + { + FieldElement{9378160, -13140186, -22845982, -12745264, 28198281, -7244098, -2399684, -717351, 690426, 14876244}, + FieldElement{24977353, -314384, -8223969, -13465086, 28432343, -1176353, -13068804, -12297348, -22380984, 6618999}, + FieldElement{-1538174, 11685646, 12944378, 13682314, -24389511, -14413193, 8044829, -13817328, 32239829, -5652762}, + }, + { + FieldElement{-18603066, 4762990, -926250, 8885304, -28412480, -3187315, 9781647, -10350059, 32779359, 5095274}, + FieldElement{-33008130, -5214506, -32264887, -3685216, 9460461, -9327423, -24601656, 14506724, 21639561, -2630236}, + FieldElement{-16400943, -13112215, 25239338, 15531969, 3987758, -4499318, -1289502, -6863535, 17874574, 558605}, + }, + { + FieldElement{-13600129, 10240081, 9171883, 16131053, -20869254, 9599700, 33499487, 5080151, 2085892, 5119761}, + FieldElement{-22205145, -2519528, -16381601, 414691, -25019550, 2170430, 30634760, -8363614, -31999993, -5759884}, + FieldElement{-6845704, 15791202, 8550074, -1312654, 29928809, -12092256, 27534430, -7192145, -22351378, 12961482}, + }, + { + FieldElement{-24492060, -9570771, 10368194, 11582341, -23397293, -2245287, 16533930, 8206996, -30194652, -5159638}, + FieldElement{-11121496, -3382234, 2307366, 6362031, -135455, 8868177, -16835630, 7031275, 7589640, 8945490}, + FieldElement{-32152748, 8917967, 6661220, -11677616, -1192060, -15793393, 7251489, -11182180, 24099109, -14456170}, + }, + { + FieldElement{5019558, -7907470, 4244127, -14714356, -26933272, 6453165, -19118182, -13289025, -6231896, -10280736}, + FieldElement{10853594, 10721687, 26480089, 5861829, -22995819, 1972175, -1866647, -10557898, -3363451, -6441124}, + FieldElement{-17002408, 5906790, 221599, -6563147, 7828208, -13248918, 24362661, -2008168, -13866408, 7421392}, + }, + { + FieldElement{8139927, -6546497, 32257646, -5890546, 30375719, 1886181, -21175108, 15441252, 28826358, -4123029}, + FieldElement{6267086, 9695052, 7709135, -16603597, -32869068, -1886135, 14795160, -7840124, 13746021, -1742048}, + FieldElement{28584902, 7787108, -6732942, -15050729, 22846041, -7571236, -3181936, -363524, 4771362, -8419958}, + }, + }, + { + { + FieldElement{24949256, 6376279, -27466481, -8174608, -18646154, -9930606, 33543569, -12141695, 3569627, 11342593}, + FieldElement{26514989, 4740088, 27912651, 3697550, 19331575, -11472339, 6809886, 4608608, 7325975, -14801071}, + FieldElement{-11618399, -14554430, -24321212, 7655128, -1369274, 5214312, -27400540, 10258390, -17646694, -8186692}, + }, + { + FieldElement{11431204, 15823007, 26570245, 14329124, 18029990, 4796082, -31446179, 15580664, 9280358, -3973687}, + FieldElement{-160783, -10326257, -22855316, -4304997, -20861367, -13621002, -32810901, -11181622, -15545091, 4387441}, + FieldElement{-20799378, 12194512, 3937617, -5805892, -27154820, 9340370, -24513992, 8548137, 20617071, -7482001}, + }, + { + FieldElement{-938825, -3930586, -8714311, 16124718, 24603125, -6225393, -13775352, -11875822, 24345683, 10325460}, + FieldElement{-19855277, -1568885, -22202708, 8714034, 14007766, 6928528, 16318175, -1010689, 4766743, 3552007}, + FieldElement{-21751364, -16730916, 1351763, -803421, -4009670, 3950935, 3217514, 14481909, 10988822, -3994762}, + }, + { + FieldElement{15564307, -14311570, 3101243, 5684148, 30446780, -8051356, 12677127, -6505343, -8295852, 13296005}, + FieldElement{-9442290, 6624296, -30298964, -11913677, -4670981, -2057379, 31521204, 9614054, -30000824, 12074674}, + FieldElement{4771191, -135239, 14290749, -13089852, 27992298, 14998318, -1413936, -1556716, 29832613, -16391035}, + }, + { + FieldElement{7064884, -7541174, -19161962, -5067537, -18891269, -2912736, 25825242, 5293297, -27122660, 13101590}, + FieldElement{-2298563, 2439670, -7466610, 1719965, -27267541, -16328445, 32512469, -5317593, -30356070, -4190957}, + FieldElement{-30006540, 10162316, -33180176, 3981723, -16482138, -13070044, 14413974, 9515896, 19568978, 9628812}, + }, + { + FieldElement{33053803, 199357, 15894591, 1583059, 27380243, -4580435, -17838894, -6106839, -6291786, 3437740}, + FieldElement{-18978877, 3884493, 19469877, 12726490, 15913552, 13614290, -22961733, 70104, 7463304, 4176122}, + FieldElement{-27124001, 10659917, 11482427, -16070381, 12771467, -6635117, -32719404, -5322751, 24216882, 5944158}, + }, + { + FieldElement{8894125, 7450974, -2664149, -9765752, -28080517, -12389115, 19345746, 14680796, 11632993, 5847885}, + FieldElement{26942781, -2315317, 9129564, -4906607, 26024105, 11769399, -11518837, 6367194, -9727230, 4782140}, + FieldElement{19916461, -4828410, -22910704, -11414391, 25606324, -5972441, 33253853, 8220911, 6358847, -1873857}, + }, + { + FieldElement{801428, -2081702, 16569428, 11065167, 29875704, 96627, 7908388, -4480480, -13538503, 1387155}, + FieldElement{19646058, 5720633, -11416706, 12814209, 11607948, 12749789, 14147075, 15156355, -21866831, 11835260}, + FieldElement{19299512, 1155910, 28703737, 14890794, 2925026, 7269399, 26121523, 15467869, -26560550, 5052483}, + }, + }, + { + { + FieldElement{-3017432, 10058206, 1980837, 3964243, 22160966, 12322533, -6431123, -12618185, 12228557, -7003677}, + FieldElement{32944382, 14922211, -22844894, 5188528, 21913450, -8719943, 4001465, 13238564, -6114803, 8653815}, + FieldElement{22865569, -4652735, 27603668, -12545395, 14348958, 8234005, 24808405, 5719875, 28483275, 2841751}, + }, + { + FieldElement{-16420968, -1113305, -327719, -12107856, 21886282, -15552774, -1887966, -315658, 19932058, -12739203}, + FieldElement{-11656086, 10087521, -8864888, -5536143, -19278573, -3055912, 3999228, 13239134, -4777469, -13910208}, + FieldElement{1382174, -11694719, 17266790, 9194690, -13324356, 9720081, 20403944, 11284705, -14013818, 3093230}, + }, + { + FieldElement{16650921, -11037932, -1064178, 1570629, -8329746, 7352753, -302424, 16271225, -24049421, -6691850}, + FieldElement{-21911077, -5927941, -4611316, -5560156, -31744103, -10785293, 24123614, 15193618, -21652117, -16739389}, + FieldElement{-9935934, -4289447, -25279823, 4372842, 2087473, 10399484, 31870908, 14690798, 17361620, 11864968}, + }, + { + FieldElement{-11307610, 6210372, 13206574, 5806320, -29017692, -13967200, -12331205, -7486601, -25578460, -16240689}, + FieldElement{14668462, -12270235, 26039039, 15305210, 25515617, 4542480, 10453892, 6577524, 9145645, -6443880}, + FieldElement{5974874, 3053895, -9433049, -10385191, -31865124, 3225009, -7972642, 3936128, -5652273, -3050304}, + }, + { + FieldElement{30625386, -4729400, -25555961, -12792866, -20484575, 7695099, 17097188, -16303496, -27999779, 1803632}, + FieldElement{-3553091, 9865099, -5228566, 4272701, -5673832, -16689700, 14911344, 12196514, -21405489, 7047412}, + FieldElement{20093277, 9920966, -11138194, -5343857, 13161587, 12044805, -32856851, 4124601, -32343828, -10257566}, + }, + { + FieldElement{-20788824, 14084654, -13531713, 7842147, 19119038, -13822605, 4752377, -8714640, -21679658, 2288038}, + FieldElement{-26819236, -3283715, 29965059, 3039786, -14473765, 2540457, 29457502, 14625692, -24819617, 12570232}, + FieldElement{-1063558, -11551823, 16920318, 12494842, 1278292, -5869109, -21159943, -3498680, -11974704, 4724943}, + }, + { + FieldElement{17960970, -11775534, -4140968, -9702530, -8876562, -1410617, -12907383, -8659932, -29576300, 1903856}, + FieldElement{23134274, -14279132, -10681997, -1611936, 20684485, 15770816, -12989750, 3190296, 26955097, 14109738}, + FieldElement{15308788, 5320727, -30113809, -14318877, 22902008, 7767164, 29425325, -11277562, 31960942, 11934971}, + }, + { + FieldElement{-27395711, 8435796, 4109644, 12222639, -24627868, 14818669, 20638173, 4875028, 10491392, 1379718}, + FieldElement{-13159415, 9197841, 3875503, -8936108, -1383712, -5879801, 33518459, 16176658, 21432314, 12180697}, + FieldElement{-11787308, 11500838, 13787581, -13832590, -22430679, 10140205, 1465425, 12689540, -10301319, -13872883}, + }, + }, + { + { + FieldElement{5414091, -15386041, -21007664, 9643570, 12834970, 1186149, -2622916, -1342231, 26128231, 6032912}, + FieldElement{-26337395, -13766162, 32496025, -13653919, 17847801, -12669156, 3604025, 8316894, -25875034, -10437358}, + FieldElement{3296484, 6223048, 24680646, -12246460, -23052020, 5903205, -8862297, -4639164, 12376617, 3188849}, + }, + { + FieldElement{29190488, -14659046, 27549113, -1183516, 3520066, -10697301, 32049515, -7309113, -16109234, -9852307}, + FieldElement{-14744486, -9309156, 735818, -598978, -20407687, -5057904, 25246078, -15795669, 18640741, -960977}, + FieldElement{-6928835, -16430795, 10361374, 5642961, 4910474, 12345252, -31638386, -494430, 10530747, 1053335}, + }, + { + FieldElement{-29265967, -14186805, -13538216, -12117373, -19457059, -10655384, -31462369, -2948985, 24018831, 15026644}, + FieldElement{-22592535, -3145277, -2289276, 5953843, -13440189, 9425631, 25310643, 13003497, -2314791, -15145616}, + FieldElement{-27419985, -603321, -8043984, -1669117, -26092265, 13987819, -27297622, 187899, -23166419, -2531735}, + }, + { + FieldElement{-21744398, -13810475, 1844840, 5021428, -10434399, -15911473, 9716667, 16266922, -5070217, 726099}, + FieldElement{29370922, -6053998, 7334071, -15342259, 9385287, 2247707, -13661962, -4839461, 30007388, -15823341}, + FieldElement{-936379, 16086691, 23751945, -543318, -1167538, -5189036, 9137109, 730663, 9835848, 4555336}, + }, + { + FieldElement{-23376435, 1410446, -22253753, -12899614, 30867635, 15826977, 17693930, 544696, -11985298, 12422646}, + FieldElement{31117226, -12215734, -13502838, 6561947, -9876867, -12757670, -5118685, -4096706, 29120153, 13924425}, + FieldElement{-17400879, -14233209, 19675799, -2734756, -11006962, -5858820, -9383939, -11317700, 7240931, -237388}, + }, + { + FieldElement{-31361739, -11346780, -15007447, -5856218, -22453340, -12152771, 1222336, 4389483, 3293637, -15551743}, + FieldElement{-16684801, -14444245, 11038544, 11054958, -13801175, -3338533, -24319580, 7733547, 12796905, -6335822}, + FieldElement{-8759414, -10817836, -25418864, 10783769, -30615557, -9746811, -28253339, 3647836, 3222231, -11160462}, + }, + { + FieldElement{18606113, 1693100, -25448386, -15170272, 4112353, 10045021, 23603893, -2048234, -7550776, 2484985}, + FieldElement{9255317, -3131197, -12156162, -1004256, 13098013, -9214866, 16377220, -2102812, -19802075, -3034702}, + FieldElement{-22729289, 7496160, -5742199, 11329249, 19991973, -3347502, -31718148, 9936966, -30097688, -10618797}, + }, + { + FieldElement{21878590, -5001297, 4338336, 13643897, -3036865, 13160960, 19708896, 5415497, -7360503, -4109293}, + FieldElement{27736861, 10103576, 12500508, 8502413, -3413016, -9633558, 10436918, -1550276, -23659143, -8132100}, + FieldElement{19492550, -12104365, -29681976, -852630, -3208171, 12403437, 30066266, 8367329, 13243957, 8709688}, + }, + }, + { + { + FieldElement{12015105, 2801261, 28198131, 10151021, 24818120, -4743133, -11194191, -5645734, 5150968, 7274186}, + FieldElement{2831366, -12492146, 1478975, 6122054, 23825128, -12733586, 31097299, 6083058, 31021603, -9793610}, + FieldElement{-2529932, -2229646, 445613, 10720828, -13849527, -11505937, -23507731, 16354465, 15067285, -14147707}, + }, + { + FieldElement{7840942, 14037873, -33364863, 15934016, -728213, -3642706, 21403988, 1057586, -19379462, -12403220}, + FieldElement{915865, -16469274, 15608285, -8789130, -24357026, 6060030, -17371319, 8410997, -7220461, 16527025}, + FieldElement{32922597, -556987, 20336074, -16184568, 10903705, -5384487, 16957574, 52992, 23834301, 6588044}, + }, + { + FieldElement{32752030, 11232950, 3381995, -8714866, 22652988, -10744103, 17159699, 16689107, -20314580, -1305992}, + FieldElement{-4689649, 9166776, -25710296, -10847306, 11576752, 12733943, 7924251, -2752281, 1976123, -7249027}, + FieldElement{21251222, 16309901, -2983015, -6783122, 30810597, 12967303, 156041, -3371252, 12331345, -8237197}, + }, + { + FieldElement{8651614, -4477032, -16085636, -4996994, 13002507, 2950805, 29054427, -5106970, 10008136, -4667901}, + FieldElement{31486080, 15114593, -14261250, 12951354, 14369431, -7387845, 16347321, -13662089, 8684155, -10532952}, + FieldElement{19443825, 11385320, 24468943, -9659068, -23919258, 2187569, -26263207, -6086921, 31316348, 14219878}, + }, + { + FieldElement{-28594490, 1193785, 32245219, 11392485, 31092169, 15722801, 27146014, 6992409, 29126555, 9207390}, + FieldElement{32382935, 1110093, 18477781, 11028262, -27411763, -7548111, -4980517, 10843782, -7957600, -14435730}, + FieldElement{2814918, 7836403, 27519878, -7868156, -20894015, -11553689, -21494559, 8550130, 28346258, 1994730}, + }, + { + FieldElement{-19578299, 8085545, -14000519, -3948622, 2785838, -16231307, -19516951, 7174894, 22628102, 8115180}, + FieldElement{-30405132, 955511, -11133838, -15078069, -32447087, -13278079, -25651578, 3317160, -9943017, 930272}, + FieldElement{-15303681, -6833769, 28856490, 1357446, 23421993, 1057177, 24091212, -1388970, -22765376, -10650715}, + }, + { + FieldElement{-22751231, -5303997, -12907607, -12768866, -15811511, -7797053, -14839018, -16554220, -1867018, 8398970}, + FieldElement{-31969310, 2106403, -4736360, 1362501, 12813763, 16200670, 22981545, -6291273, 18009408, -15772772}, + FieldElement{-17220923, -9545221, -27784654, 14166835, 29815394, 7444469, 29551787, -3727419, 19288549, 1325865}, + }, + { + FieldElement{15100157, -15835752, -23923978, -1005098, -26450192, 15509408, 12376730, -3479146, 33166107, -8042750}, + FieldElement{20909231, 13023121, -9209752, 16251778, -5778415, -8094914, 12412151, 10018715, 2213263, -13878373}, + FieldElement{32529814, -11074689, 30361439, -16689753, -9135940, 1513226, 22922121, 6382134, -5766928, 8371348}, + }, + }, + { + { + FieldElement{9923462, 11271500, 12616794, 3544722, -29998368, -1721626, 12891687, -8193132, -26442943, 10486144}, + FieldElement{-22597207, -7012665, 8587003, -8257861, 4084309, -12970062, 361726, 2610596, -23921530, -11455195}, + FieldElement{5408411, -1136691, -4969122, 10561668, 24145918, 14240566, 31319731, -4235541, 19985175, -3436086}, + }, + { + FieldElement{-13994457, 16616821, 14549246, 3341099, 32155958, 13648976, -17577068, 8849297, 65030, 8370684}, + FieldElement{-8320926, -12049626, 31204563, 5839400, -20627288, -1057277, -19442942, 6922164, 12743482, -9800518}, + FieldElement{-2361371, 12678785, 28815050, 4759974, -23893047, 4884717, 23783145, 11038569, 18800704, 255233}, + }, + { + FieldElement{-5269658, -1773886, 13957886, 7990715, 23132995, 728773, 13393847, 9066957, 19258688, -14753793}, + FieldElement{-2936654, -10827535, -10432089, 14516793, -3640786, 4372541, -31934921, 2209390, -1524053, 2055794}, + FieldElement{580882, 16705327, 5468415, -2683018, -30926419, -14696000, -7203346, -8994389, -30021019, 7394435}, + }, + { + FieldElement{23838809, 1822728, -15738443, 15242727, 8318092, -3733104, -21672180, -3492205, -4821741, 14799921}, + FieldElement{13345610, 9759151, 3371034, -16137791, 16353039, 8577942, 31129804, 13496856, -9056018, 7402518}, + FieldElement{2286874, -4435931, -20042458, -2008336, -13696227, 5038122, 11006906, -15760352, 8205061, 1607563}, + }, + { + FieldElement{14414086, -8002132, 3331830, -3208217, 22249151, -5594188, 18364661, -2906958, 30019587, -9029278}, + FieldElement{-27688051, 1585953, -10775053, 931069, -29120221, -11002319, -14410829, 12029093, 9944378, 8024}, + FieldElement{4368715, -3709630, 29874200, -15022983, -20230386, -11410704, -16114594, -999085, -8142388, 5640030}, + }, + { + FieldElement{10299610, 13746483, 11661824, 16234854, 7630238, 5998374, 9809887, -16694564, 15219798, -14327783}, + FieldElement{27425505, -5719081, 3055006, 10660664, 23458024, 595578, -15398605, -1173195, -18342183, 9742717}, + FieldElement{6744077, 2427284, 26042789, 2720740, -847906, 1118974, 32324614, 7406442, 12420155, 1994844}, + }, + { + FieldElement{14012521, -5024720, -18384453, -9578469, -26485342, -3936439, -13033478, -10909803, 24319929, -6446333}, + FieldElement{16412690, -4507367, 10772641, 15929391, -17068788, -4658621, 10555945, -10484049, -30102368, -4739048}, + FieldElement{22397382, -7767684, -9293161, -12792868, 17166287, -9755136, -27333065, 6199366, 21880021, -12250760}, + }, + { + FieldElement{-4283307, 5368523, -31117018, 8163389, -30323063, 3209128, 16557151, 8890729, 8840445, 4957760}, + FieldElement{-15447727, 709327, -6919446, -10870178, -29777922, 6522332, -21720181, 12130072, -14796503, 5005757}, + FieldElement{-2114751, -14308128, 23019042, 15765735, -25269683, 6002752, 10183197, -13239326, -16395286, -2176112}, + }, + }, + { + { + FieldElement{-19025756, 1632005, 13466291, -7995100, -23640451, 16573537, -32013908, -3057104, 22208662, 2000468}, + FieldElement{3065073, -1412761, -25598674, -361432, -17683065, -5703415, -8164212, 11248527, -3691214, -7414184}, + FieldElement{10379208, -6045554, 8877319, 1473647, -29291284, -12507580, 16690915, 2553332, -3132688, 16400289}, + }, + { + FieldElement{15716668, 1254266, -18472690, 7446274, -8448918, 6344164, -22097271, -7285580, 26894937, 9132066}, + FieldElement{24158887, 12938817, 11085297, -8177598, -28063478, -4457083, -30576463, 64452, -6817084, -2692882}, + FieldElement{13488534, 7794716, 22236231, 5989356, 25426474, -12578208, 2350710, -3418511, -4688006, 2364226}, + }, + { + FieldElement{16335052, 9132434, 25640582, 6678888, 1725628, 8517937, -11807024, -11697457, 15445875, -7798101}, + FieldElement{29004207, -7867081, 28661402, -640412, -12794003, -7943086, 31863255, -4135540, -278050, -15759279}, + FieldElement{-6122061, -14866665, -28614905, 14569919, -10857999, -3591829, 10343412, -6976290, -29828287, -10815811}, + }, + { + FieldElement{27081650, 3463984, 14099042, -4517604, 1616303, -6205604, 29542636, 15372179, 17293797, 960709}, + FieldElement{20263915, 11434237, -5765435, 11236810, 13505955, -10857102, -16111345, 6493122, -19384511, 7639714}, + FieldElement{-2830798, -14839232, 25403038, -8215196, -8317012, -16173699, 18006287, -16043750, 29994677, -15808121}, + }, + { + FieldElement{9769828, 5202651, -24157398, -13631392, -28051003, -11561624, -24613141, -13860782, -31184575, 709464}, + FieldElement{12286395, 13076066, -21775189, -1176622, -25003198, 4057652, -32018128, -8890874, 16102007, 13205847}, + FieldElement{13733362, 5599946, 10557076, 3195751, -5557991, 8536970, -25540170, 8525972, 10151379, 10394400}, + }, + { + FieldElement{4024660, -16137551, 22436262, 12276534, -9099015, -2686099, 19698229, 11743039, -33302334, 8934414}, + FieldElement{-15879800, -4525240, -8580747, -2934061, 14634845, -698278, -9449077, 3137094, -11536886, 11721158}, + FieldElement{17555939, -5013938, 8268606, 2331751, -22738815, 9761013, 9319229, 8835153, -9205489, -1280045}, + }, + { + FieldElement{-461409, -7830014, 20614118, 16688288, -7514766, -4807119, 22300304, 505429, 6108462, -6183415}, + FieldElement{-5070281, 12367917, -30663534, 3234473, 32617080, -8422642, 29880583, -13483331, -26898490, -7867459}, + FieldElement{-31975283, 5726539, 26934134, 10237677, -3173717, -605053, 24199304, 3795095, 7592688, -14992079}, + }, + { + FieldElement{21594432, -14964228, 17466408, -4077222, 32537084, 2739898, 6407723, 12018833, -28256052, 4298412}, + FieldElement{-20650503, -11961496, -27236275, 570498, 3767144, -1717540, 13891942, -1569194, 13717174, 10805743}, + FieldElement{-14676630, -15644296, 15287174, 11927123, 24177847, -8175568, -796431, 14860609, -26938930, -5863836}, + }, + }, + { + { + FieldElement{12962541, 5311799, -10060768, 11658280, 18855286, -7954201, 13286263, -12808704, -4381056, 9882022}, + FieldElement{18512079, 11319350, -20123124, 15090309, 18818594, 5271736, -22727904, 3666879, -23967430, -3299429}, + FieldElement{-6789020, -3146043, 16192429, 13241070, 15898607, -14206114, -10084880, -6661110, -2403099, 5276065}, + }, + { + FieldElement{30169808, -5317648, 26306206, -11750859, 27814964, 7069267, 7152851, 3684982, 1449224, 13082861}, + FieldElement{10342826, 3098505, 2119311, 193222, 25702612, 12233820, 23697382, 15056736, -21016438, -8202000}, + FieldElement{-33150110, 3261608, 22745853, 7948688, 19370557, -15177665, -26171976, 6482814, -10300080, -11060101}, + }, + { + FieldElement{32869458, -5408545, 25609743, 15678670, -10687769, -15471071, 26112421, 2521008, -22664288, 6904815}, + FieldElement{29506923, 4457497, 3377935, -9796444, -30510046, 12935080, 1561737, 3841096, -29003639, -6657642}, + FieldElement{10340844, -6630377, -18656632, -2278430, 12621151, -13339055, 30878497, -11824370, -25584551, 5181966}, + }, + { + FieldElement{25940115, -12658025, 17324188, -10307374, -8671468, 15029094, 24396252, -16450922, -2322852, -12388574}, + FieldElement{-21765684, 9916823, -1300409, 4079498, -1028346, 11909559, 1782390, 12641087, 20603771, -6561742}, + FieldElement{-18882287, -11673380, 24849422, 11501709, 13161720, -4768874, 1925523, 11914390, 4662781, 7820689}, + }, + { + FieldElement{12241050, -425982, 8132691, 9393934, 32846760, -1599620, 29749456, 12172924, 16136752, 15264020}, + FieldElement{-10349955, -14680563, -8211979, 2330220, -17662549, -14545780, 10658213, 6671822, 19012087, 3772772}, + FieldElement{3753511, -3421066, 10617074, 2028709, 14841030, -6721664, 28718732, -15762884, 20527771, 12988982}, + }, + { + FieldElement{-14822485, -5797269, -3707987, 12689773, -898983, -10914866, -24183046, -10564943, 3299665, -12424953}, + FieldElement{-16777703, -15253301, -9642417, 4978983, 3308785, 8755439, 6943197, 6461331, -25583147, 8991218}, + FieldElement{-17226263, 1816362, -1673288, -6086439, 31783888, -8175991, -32948145, 7417950, -30242287, 1507265}, + }, + { + FieldElement{29692663, 6829891, -10498800, 4334896, 20945975, -11906496, -28887608, 8209391, 14606362, -10647073}, + FieldElement{-3481570, 8707081, 32188102, 5672294, 22096700, 1711240, -33020695, 9761487, 4170404, -2085325}, + FieldElement{-11587470, 14855945, -4127778, -1531857, -26649089, 15084046, 22186522, 16002000, -14276837, -8400798}, + }, + { + FieldElement{-4811456, 13761029, -31703877, -2483919, -3312471, 7869047, -7113572, -9620092, 13240845, 10965870}, + FieldElement{-7742563, -8256762, -14768334, -13656260, -23232383, 12387166, 4498947, 14147411, 29514390, 4302863}, + FieldElement{-13413405, -12407859, 20757302, -13801832, 14785143, 8976368, -5061276, -2144373, 17846988, -13971927}, + }, + }, + { + { + FieldElement{-2244452, -754728, -4597030, -1066309, -6247172, 1455299, -21647728, -9214789, -5222701, 12650267}, + FieldElement{-9906797, -16070310, 21134160, 12198166, -27064575, 708126, 387813, 13770293, -19134326, 10958663}, + FieldElement{22470984, 12369526, 23446014, -5441109, -21520802, -9698723, -11772496, -11574455, -25083830, 4271862}, + }, + { + FieldElement{-25169565, -10053642, -19909332, 15361595, -5984358, 2159192, 75375, -4278529, -32526221, 8469673}, + FieldElement{15854970, 4148314, -8893890, 7259002, 11666551, 13824734, -30531198, 2697372, 24154791, -9460943}, + FieldElement{15446137, -15806644, 29759747, 14019369, 30811221, -9610191, -31582008, 12840104, 24913809, 9815020}, + }, + { + FieldElement{-4709286, -5614269, -31841498, -12288893, -14443537, 10799414, -9103676, 13438769, 18735128, 9466238}, + FieldElement{11933045, 9281483, 5081055, -5183824, -2628162, -4905629, -7727821, -10896103, -22728655, 16199064}, + FieldElement{14576810, 379472, -26786533, -8317236, -29426508, -10812974, -102766, 1876699, 30801119, 2164795}, + }, + { + FieldElement{15995086, 3199873, 13672555, 13712240, -19378835, -4647646, -13081610, -15496269, -13492807, 1268052}, + FieldElement{-10290614, -3659039, -3286592, 10948818, 23037027, 3794475, -3470338, -12600221, -17055369, 3565904}, + FieldElement{29210088, -9419337, -5919792, -4952785, 10834811, -13327726, -16512102, -10820713, -27162222, -14030531}, + }, + { + FieldElement{-13161890, 15508588, 16663704, -8156150, -28349942, 9019123, -29183421, -3769423, 2244111, -14001979}, + FieldElement{-5152875, -3800936, -9306475, -6071583, 16243069, 14684434, -25673088, -16180800, 13491506, 4641841}, + FieldElement{10813417, 643330, -19188515, -728916, 30292062, -16600078, 27548447, -7721242, 14476989, -12767431}, + }, + { + FieldElement{10292079, 9984945, 6481436, 8279905, -7251514, 7032743, 27282937, -1644259, -27912810, 12651324}, + FieldElement{-31185513, -813383, 22271204, 11835308, 10201545, 15351028, 17099662, 3988035, 21721536, -3148940}, + FieldElement{10202177, -6545839, -31373232, -9574638, -32150642, -8119683, -12906320, 3852694, 13216206, 14842320}, + }, + { + FieldElement{-15815640, -10601066, -6538952, -7258995, -6984659, -6581778, -31500847, 13765824, -27434397, 9900184}, + FieldElement{14465505, -13833331, -32133984, -14738873, -27443187, 12990492, 33046193, 15796406, -7051866, -8040114}, + FieldElement{30924417, -8279620, 6359016, -12816335, 16508377, 9071735, -25488601, 15413635, 9524356, -7018878}, + }, + { + FieldElement{12274201, -13175547, 32627641, -1785326, 6736625, 13267305, 5237659, -5109483, 15663516, 4035784}, + FieldElement{-2951309, 8903985, 17349946, 601635, -16432815, -4612556, -13732739, -15889334, -22258478, 4659091}, + FieldElement{-16916263, -4952973, -30393711, -15158821, 20774812, 15897498, 5736189, 15026997, -2178256, -13455585}, + }, + }, + { + { + FieldElement{-8858980, -2219056, 28571666, -10155518, -474467, -10105698, -3801496, 278095, 23440562, -290208}, + FieldElement{10226241, -5928702, 15139956, 120818, -14867693, 5218603, 32937275, 11551483, -16571960, -7442864}, + FieldElement{17932739, -12437276, -24039557, 10749060, 11316803, 7535897, 22503767, 5561594, -3646624, 3898661}, + }, + { + FieldElement{7749907, -969567, -16339731, -16464, -25018111, 15122143, -1573531, 7152530, 21831162, 1245233}, + FieldElement{26958459, -14658026, 4314586, 8346991, -5677764, 11960072, -32589295, -620035, -30402091, -16716212}, + FieldElement{-12165896, 9166947, 33491384, 13673479, 29787085, 13096535, 6280834, 14587357, -22338025, 13987525}, + }, + { + FieldElement{-24349909, 7778775, 21116000, 15572597, -4833266, -5357778, -4300898, -5124639, -7469781, -2858068}, + FieldElement{9681908, -6737123, -31951644, 13591838, -6883821, 386950, 31622781, 6439245, -14581012, 4091397}, + FieldElement{-8426427, 1470727, -28109679, -1596990, 3978627, -5123623, -19622683, 12092163, 29077877, -14741988}, + }, + { + FieldElement{5269168, -6859726, -13230211, -8020715, 25932563, 1763552, -5606110, -5505881, -20017847, 2357889}, + FieldElement{32264008, -15407652, -5387735, -1160093, -2091322, -3946900, 23104804, -12869908, 5727338, 189038}, + FieldElement{14609123, -8954470, -6000566, -16622781, -14577387, -7743898, -26745169, 10942115, -25888931, -14884697}, + }, + { + FieldElement{20513500, 5557931, -15604613, 7829531, 26413943, -2019404, -21378968, 7471781, 13913677, -5137875}, + FieldElement{-25574376, 11967826, 29233242, 12948236, -6754465, 4713227, -8940970, 14059180, 12878652, 8511905}, + FieldElement{-25656801, 3393631, -2955415, -7075526, -2250709, 9366908, -30223418, 6812974, 5568676, -3127656}, + }, + { + FieldElement{11630004, 12144454, 2116339, 13606037, 27378885, 15676917, -17408753, -13504373, -14395196, 8070818}, + FieldElement{27117696, -10007378, -31282771, -5570088, 1127282, 12772488, -29845906, 10483306, -11552749, -1028714}, + FieldElement{10637467, -5688064, 5674781, 1072708, -26343588, -6982302, -1683975, 9177853, -27493162, 15431203}, + }, + { + FieldElement{20525145, 10892566, -12742472, 12779443, -29493034, 16150075, -28240519, 14943142, -15056790, -7935931}, + FieldElement{-30024462, 5626926, -551567, -9981087, 753598, 11981191, 25244767, -3239766, -3356550, 9594024}, + FieldElement{-23752644, 2636870, -5163910, -10103818, 585134, 7877383, 11345683, -6492290, 13352335, -10977084}, + }, + { + FieldElement{-1931799, -5407458, 3304649, -12884869, 17015806, -4877091, -29783850, -7752482, -13215537, -319204}, + FieldElement{20239939, 6607058, 6203985, 3483793, -18386976, -779229, -20723742, 15077870, -22750759, 14523817}, + FieldElement{27406042, -6041657, 27423596, -4497394, 4996214, 10002360, -28842031, -4545494, -30172742, -4805667}, + }, + }, + { + { + FieldElement{11374242, 12660715, 17861383, -12540833, 10935568, 1099227, -13886076, -9091740, -27727044, 11358504}, + FieldElement{-12730809, 10311867, 1510375, 10778093, -2119455, -9145702, 32676003, 11149336, -26123651, 4985768}, + FieldElement{-19096303, 341147, -6197485, -239033, 15756973, -8796662, -983043, 13794114, -19414307, -15621255}, + }, + { + FieldElement{6490081, 11940286, 25495923, -7726360, 8668373, -8751316, 3367603, 6970005, -1691065, -9004790}, + FieldElement{1656497, 13457317, 15370807, 6364910, 13605745, 8362338, -19174622, -5475723, -16796596, -5031438}, + FieldElement{-22273315, -13524424, -64685, -4334223, -18605636, -10921968, -20571065, -7007978, -99853, -10237333}, + }, + { + FieldElement{17747465, 10039260, 19368299, -4050591, -20630635, -16041286, 31992683, -15857976, -29260363, -5511971}, + FieldElement{31932027, -4986141, -19612382, 16366580, 22023614, 88450, 11371999, -3744247, 4882242, -10626905}, + FieldElement{29796507, 37186, 19818052, 10115756, -11829032, 3352736, 18551198, 3272828, -5190932, -4162409}, + }, + { + FieldElement{12501286, 4044383, -8612957, -13392385, -32430052, 5136599, -19230378, -3529697, 330070, -3659409}, + FieldElement{6384877, 2899513, 17807477, 7663917, -2358888, 12363165, 25366522, -8573892, -271295, 12071499}, + FieldElement{-8365515, -4042521, 25133448, -4517355, -6211027, 2265927, -32769618, 1936675, -5159697, 3829363}, + }, + { + FieldElement{28425966, -5835433, -577090, -4697198, -14217555, 6870930, 7921550, -6567787, 26333140, 14267664}, + FieldElement{-11067219, 11871231, 27385719, -10559544, -4585914, -11189312, 10004786, -8709488, -21761224, 8930324}, + FieldElement{-21197785, -16396035, 25654216, -1725397, 12282012, 11008919, 1541940, 4757911, -26491501, -16408940}, + }, + { + FieldElement{13537262, -7759490, -20604840, 10961927, -5922820, -13218065, -13156584, 6217254, -15943699, 13814990}, + FieldElement{-17422573, 15157790, 18705543, 29619, 24409717, -260476, 27361681, 9257833, -1956526, -1776914}, + FieldElement{-25045300, -10191966, 15366585, 15166509, -13105086, 8423556, -29171540, 12361135, -18685978, 4578290}, + }, + { + FieldElement{24579768, 3711570, 1342322, -11180126, -27005135, 14124956, -22544529, 14074919, 21964432, 8235257}, + FieldElement{-6528613, -2411497, 9442966, -5925588, 12025640, -1487420, -2981514, -1669206, 13006806, 2355433}, + FieldElement{-16304899, -13605259, -6632427, -5142349, 16974359, -10911083, 27202044, 1719366, 1141648, -12796236}, + }, + { + FieldElement{-12863944, -13219986, -8318266, -11018091, -6810145, -4843894, 13475066, -3133972, 32674895, 13715045}, + FieldElement{11423335, -5468059, 32344216, 8962751, 24989809, 9241752, -13265253, 16086212, -28740881, -15642093}, + FieldElement{-1409668, 12530728, -6368726, 10847387, 19531186, -14132160, -11709148, 7791794, -27245943, 4383347}, + }, + }, + { + { + FieldElement{-28970898, 5271447, -1266009, -9736989, -12455236, 16732599, -4862407, -4906449, 27193557, 6245191}, + FieldElement{-15193956, 5362278, -1783893, 2695834, 4960227, 12840725, 23061898, 3260492, 22510453, 8577507}, + FieldElement{-12632451, 11257346, -32692994, 13548177, -721004, 10879011, 31168030, 13952092, -29571492, -3635906}, + }, + { + FieldElement{3877321, -9572739, 32416692, 5405324, -11004407, -13656635, 3759769, 11935320, 5611860, 8164018}, + FieldElement{-16275802, 14667797, 15906460, 12155291, -22111149, -9039718, 32003002, -8832289, 5773085, -8422109}, + FieldElement{-23788118, -8254300, 1950875, 8937633, 18686727, 16459170, -905725, 12376320, 31632953, 190926}, + }, + { + FieldElement{-24593607, -16138885, -8423991, 13378746, 14162407, 6901328, -8288749, 4508564, -25341555, -3627528}, + FieldElement{8884438, -5884009, 6023974, 10104341, -6881569, -4941533, 18722941, -14786005, -1672488, 827625}, + FieldElement{-32720583, -16289296, -32503547, 7101210, 13354605, 2659080, -1800575, -14108036, -24878478, 1541286}, + }, + { + FieldElement{2901347, -1117687, 3880376, -10059388, -17620940, -3612781, -21802117, -3567481, 20456845, -1885033}, + FieldElement{27019610, 12299467, -13658288, -1603234, -12861660, -4861471, -19540150, -5016058, 29439641, 15138866}, + FieldElement{21536104, -6626420, -32447818, -10690208, -22408077, 5175814, -5420040, -16361163, 7779328, 109896}, + }, + { + FieldElement{30279744, 14648750, -8044871, 6425558, 13639621, -743509, 28698390, 12180118, 23177719, -554075}, + FieldElement{26572847, 3405927, -31701700, 12890905, -19265668, 5335866, -6493768, 2378492, 4439158, -13279347}, + FieldElement{-22716706, 3489070, -9225266, -332753, 18875722, -1140095, 14819434, -12731527, -17717757, -5461437}, + }, + { + FieldElement{-5056483, 16566551, 15953661, 3767752, -10436499, 15627060, -820954, 2177225, 8550082, -15114165}, + FieldElement{-18473302, 16596775, -381660, 15663611, 22860960, 15585581, -27844109, -3582739, -23260460, -8428588}, + FieldElement{-32480551, 15707275, -8205912, -5652081, 29464558, 2713815, -22725137, 15860482, -21902570, 1494193}, + }, + { + FieldElement{-19562091, -14087393, -25583872, -9299552, 13127842, 759709, 21923482, 16529112, 8742704, 12967017}, + FieldElement{-28464899, 1553205, 32536856, -10473729, -24691605, -406174, -8914625, -2933896, -29903758, 15553883}, + FieldElement{21877909, 3230008, 9881174, 10539357, -4797115, 2841332, 11543572, 14513274, 19375923, -12647961}, + }, + { + FieldElement{8832269, -14495485, 13253511, 5137575, 5037871, 4078777, 24880818, -6222716, 2862653, 9455043}, + FieldElement{29306751, 5123106, 20245049, -14149889, 9592566, 8447059, -2077124, -2990080, 15511449, 4789663}, + FieldElement{-20679756, 7004547, 8824831, -9434977, -4045704, -3750736, -5754762, 108893, 23513200, 16652362}, + }, + }, + { + { + FieldElement{-33256173, 4144782, -4476029, -6579123, 10770039, -7155542, -6650416, -12936300, -18319198, 10212860}, + FieldElement{2756081, 8598110, 7383731, -6859892, 22312759, -1105012, 21179801, 2600940, -9988298, -12506466}, + FieldElement{-24645692, 13317462, -30449259, -15653928, 21365574, -10869657, 11344424, 864440, -2499677, -16710063}, + }, + { + FieldElement{-26432803, 6148329, -17184412, -14474154, 18782929, -275997, -22561534, 211300, 2719757, 4940997}, + FieldElement{-1323882, 3911313, -6948744, 14759765, -30027150, 7851207, 21690126, 8518463, 26699843, 5276295}, + FieldElement{-13149873, -6429067, 9396249, 365013, 24703301, -10488939, 1321586, 149635, -15452774, 7159369}, + }, + { + FieldElement{9987780, -3404759, 17507962, 9505530, 9731535, -2165514, 22356009, 8312176, 22477218, -8403385}, + FieldElement{18155857, -16504990, 19744716, 9006923, 15154154, -10538976, 24256460, -4864995, -22548173, 9334109}, + FieldElement{2986088, -4911893, 10776628, -3473844, 10620590, -7083203, -21413845, 14253545, -22587149, 536906}, + }, + { + FieldElement{4377756, 8115836, 24567078, 15495314, 11625074, 13064599, 7390551, 10589625, 10838060, -15420424}, + FieldElement{-19342404, 867880, 9277171, -3218459, -14431572, -1986443, 19295826, -15796950, 6378260, 699185}, + FieldElement{7895026, 4057113, -7081772, -13077756, -17886831, -323126, -716039, 15693155, -5045064, -13373962}, + }, + { + FieldElement{-7737563, -5869402, -14566319, -7406919, 11385654, 13201616, 31730678, -10962840, -3918636, -9669325}, + FieldElement{10188286, -15770834, -7336361, 13427543, 22223443, 14896287, 30743455, 7116568, -21786507, 5427593}, + FieldElement{696102, 13206899, 27047647, -10632082, 15285305, -9853179, 10798490, -4578720, 19236243, 12477404}, + }, + { + FieldElement{-11229439, 11243796, -17054270, -8040865, -788228, -8167967, -3897669, 11180504, -23169516, 7733644}, + FieldElement{17800790, -14036179, -27000429, -11766671, 23887827, 3149671, 23466177, -10538171, 10322027, 15313801}, + FieldElement{26246234, 11968874, 32263343, -5468728, 6830755, -13323031, -15794704, -101982, -24449242, 10890804}, + }, + { + FieldElement{-31365647, 10271363, -12660625, -6267268, 16690207, -13062544, -14982212, 16484931, 25180797, -5334884}, + FieldElement{-586574, 10376444, -32586414, -11286356, 19801893, 10997610, 2276632, 9482883, 316878, 13820577}, + FieldElement{-9882808, -4510367, -2115506, 16457136, -11100081, 11674996, 30756178, -7515054, 30696930, -3712849}, + }, + { + FieldElement{32988917, -9603412, 12499366, 7910787, -10617257, -11931514, -7342816, -9985397, -32349517, 7392473}, + FieldElement{-8855661, 15927861, 9866406, -3649411, -2396914, -16655781, -30409476, -9134995, 25112947, -2926644}, + FieldElement{-2504044, -436966, 25621774, -5678772, 15085042, -5479877, -24884878, -13526194, 5537438, -13914319}, + }, + }, + { + { + FieldElement{-11225584, 2320285, -9584280, 10149187, -33444663, 5808648, -14876251, -1729667, 31234590, 6090599}, + FieldElement{-9633316, 116426, 26083934, 2897444, -6364437, -2688086, 609721, 15878753, -6970405, -9034768}, + FieldElement{-27757857, 247744, -15194774, -9002551, 23288161, -10011936, -23869595, 6503646, 20650474, 1804084}, + }, + { + FieldElement{-27589786, 15456424, 8972517, 8469608, 15640622, 4439847, 3121995, -10329713, 27842616, -202328}, + FieldElement{-15306973, 2839644, 22530074, 10026331, 4602058, 5048462, 28248656, 5031932, -11375082, 12714369}, + FieldElement{20807691, -7270825, 29286141, 11421711, -27876523, -13868230, -21227475, 1035546, -19733229, 12796920}, + }, + { + FieldElement{12076899, -14301286, -8785001, -11848922, -25012791, 16400684, -17591495, -12899438, 3480665, -15182815}, + FieldElement{-32361549, 5457597, 28548107, 7833186, 7303070, -11953545, -24363064, -15921875, -33374054, 2771025}, + FieldElement{-21389266, 421932, 26597266, 6860826, 22486084, -6737172, -17137485, -4210226, -24552282, 15673397}, + }, + { + FieldElement{-20184622, 2338216, 19788685, -9620956, -4001265, -8740893, -20271184, 4733254, 3727144, -12934448}, + FieldElement{6120119, 814863, -11794402, -622716, 6812205, -15747771, 2019594, 7975683, 31123697, -10958981}, + FieldElement{30069250, -11435332, 30434654, 2958439, 18399564, -976289, 12296869, 9204260, -16432438, 9648165}, + }, + { + FieldElement{32705432, -1550977, 30705658, 7451065, -11805606, 9631813, 3305266, 5248604, -26008332, -11377501}, + FieldElement{17219865, 2375039, -31570947, -5575615, -19459679, 9219903, 294711, 15298639, 2662509, -16297073}, + FieldElement{-1172927, -7558695, -4366770, -4287744, -21346413, -8434326, 32087529, -1222777, 32247248, -14389861}, + }, + { + FieldElement{14312628, 1221556, 17395390, -8700143, -4945741, -8684635, -28197744, -9637817, -16027623, -13378845}, + FieldElement{-1428825, -9678990, -9235681, 6549687, -7383069, -468664, 23046502, 9803137, 17597934, 2346211}, + FieldElement{18510800, 15337574, 26171504, 981392, -22241552, 7827556, -23491134, -11323352, 3059833, -11782870}, + }, + { + FieldElement{10141598, 6082907, 17829293, -1947643, 9830092, 13613136, -25556636, -5544586, -33502212, 3592096}, + FieldElement{33114168, -15889352, -26525686, -13343397, 33076705, 8716171, 1151462, 1521897, -982665, -6837803}, + FieldElement{-32939165, -4255815, 23947181, -324178, -33072974, -12305637, -16637686, 3891704, 26353178, 693168}, + }, + { + FieldElement{30374239, 1595580, -16884039, 13186931, 4600344, 406904, 9585294, -400668, 31375464, 14369965}, + FieldElement{-14370654, -7772529, 1510301, 6434173, -18784789, -6262728, 32732230, -13108839, 17901441, 16011505}, + FieldElement{18171223, -11934626, -12500402, 15197122, -11038147, -15230035, -19172240, -16046376, 8764035, 12309598}, + }, + }, + { + { + FieldElement{5975908, -5243188, -19459362, -9681747, -11541277, 14015782, -23665757, 1228319, 17544096, -10593782}, + FieldElement{5811932, -1715293, 3442887, -2269310, -18367348, -8359541, -18044043, -15410127, -5565381, 12348900}, + FieldElement{-31399660, 11407555, 25755363, 6891399, -3256938, 14872274, -24849353, 8141295, -10632534, -585479}, + }, + { + FieldElement{-12675304, 694026, -5076145, 13300344, 14015258, -14451394, -9698672, -11329050, 30944593, 1130208}, + FieldElement{8247766, -6710942, -26562381, -7709309, -14401939, -14648910, 4652152, 2488540, 23550156, -271232}, + FieldElement{17294316, -3788438, 7026748, 15626851, 22990044, 113481, 2267737, -5908146, -408818, -137719}, + }, + { + FieldElement{16091085, -16253926, 18599252, 7340678, 2137637, -1221657, -3364161, 14550936, 3260525, -7166271}, + FieldElement{-4910104, -13332887, 18550887, 10864893, -16459325, -7291596, -23028869, -13204905, -12748722, 2701326}, + FieldElement{-8574695, 16099415, 4629974, -16340524, -20786213, -6005432, -10018363, 9276971, 11329923, 1862132}, + }, + { + FieldElement{14763076, -15903608, -30918270, 3689867, 3511892, 10313526, -21951088, 12219231, -9037963, -940300}, + FieldElement{8894987, -3446094, 6150753, 3013931, 301220, 15693451, -31981216, -2909717, -15438168, 11595570}, + FieldElement{15214962, 3537601, -26238722, -14058872, 4418657, -15230761, 13947276, 10730794, -13489462, -4363670}, + }, + { + FieldElement{-2538306, 7682793, 32759013, 263109, -29984731, -7955452, -22332124, -10188635, 977108, 699994}, + FieldElement{-12466472, 4195084, -9211532, 550904, -15565337, 12917920, 19118110, -439841, -30534533, -14337913}, + FieldElement{31788461, -14507657, 4799989, 7372237, 8808585, -14747943, 9408237, -10051775, 12493932, -5409317}, + }, + { + FieldElement{-25680606, 5260744, -19235809, -6284470, -3695942, 16566087, 27218280, 2607121, 29375955, 6024730}, + FieldElement{842132, -2794693, -4763381, -8722815, 26332018, -12405641, 11831880, 6985184, -9940361, 2854096}, + FieldElement{-4847262, -7969331, 2516242, -5847713, 9695691, -7221186, 16512645, 960770, 12121869, 16648078}, + }, + { + FieldElement{-15218652, 14667096, -13336229, 2013717, 30598287, -464137, -31504922, -7882064, 20237806, 2838411}, + FieldElement{-19288047, 4453152, 15298546, -16178388, 22115043, -15972604, 12544294, -13470457, 1068881, -12499905}, + FieldElement{-9558883, -16518835, 33238498, 13506958, 30505848, -1114596, -8486907, -2630053, 12521378, 4845654}, + }, + { + FieldElement{-28198521, 10744108, -2958380, 10199664, 7759311, -13088600, 3409348, -873400, -6482306, -12885870}, + FieldElement{-23561822, 6230156, -20382013, 10655314, -24040585, -11621172, 10477734, -1240216, -3113227, 13974498}, + FieldElement{12966261, 15550616, -32038948, -1615346, 21025980, -629444, 5642325, 7188737, 18895762, 12629579}, + }, + }, + { + { + FieldElement{14741879, -14946887, 22177208, -11721237, 1279741, 8058600, 11758140, 789443, 32195181, 3895677}, + FieldElement{10758205, 15755439, -4509950, 9243698, -4879422, 6879879, -2204575, -3566119, -8982069, 4429647}, + FieldElement{-2453894, 15725973, -20436342, -10410672, -5803908, -11040220, -7135870, -11642895, 18047436, -15281743}, + }, + { + FieldElement{-25173001, -11307165, 29759956, 11776784, -22262383, -15820455, 10993114, -12850837, -17620701, -9408468}, + FieldElement{21987233, 700364, -24505048, 14972008, -7774265, -5718395, 32155026, 2581431, -29958985, 8773375}, + FieldElement{-25568350, 454463, -13211935, 16126715, 25240068, 8594567, 20656846, 12017935, -7874389, -13920155}, + }, + { + FieldElement{6028182, 6263078, -31011806, -11301710, -818919, 2461772, -31841174, -5468042, -1721788, -2776725}, + FieldElement{-12278994, 16624277, 987579, -5922598, 32908203, 1248608, 7719845, -4166698, 28408820, 6816612}, + FieldElement{-10358094, -8237829, 19549651, -12169222, 22082623, 16147817, 20613181, 13982702, -10339570, 5067943}, + }, + { + FieldElement{-30505967, -3821767, 12074681, 13582412, -19877972, 2443951, -19719286, 12746132, 5331210, -10105944}, + FieldElement{30528811, 3601899, -1957090, 4619785, -27361822, -15436388, 24180793, -12570394, 27679908, -1648928}, + FieldElement{9402404, -13957065, 32834043, 10838634, -26580150, -13237195, 26653274, -8685565, 22611444, -12715406}, + }, + { + FieldElement{22190590, 1118029, 22736441, 15130463, -30460692, -5991321, 19189625, -4648942, 4854859, 6622139}, + FieldElement{-8310738, -2953450, -8262579, -3388049, -10401731, -271929, 13424426, -3567227, 26404409, 13001963}, + FieldElement{-31241838, -15415700, -2994250, 8939346, 11562230, -12840670, -26064365, -11621720, -15405155, 11020693}, + }, + { + FieldElement{1866042, -7949489, -7898649, -10301010, 12483315, 13477547, 3175636, -12424163, 28761762, 1406734}, + FieldElement{-448555, -1777666, 13018551, 3194501, -9580420, -11161737, 24760585, -4347088, 25577411, -13378680}, + FieldElement{-24290378, 4759345, -690653, -1852816, 2066747, 10693769, -29595790, 9884936, -9368926, 4745410}, + }, + { + FieldElement{-9141284, 6049714, -19531061, -4341411, -31260798, 9944276, -15462008, -11311852, 10931924, -11931931}, + FieldElement{-16561513, 14112680, -8012645, 4817318, -8040464, -11414606, -22853429, 10856641, -20470770, 13434654}, + FieldElement{22759489, -10073434, -16766264, -1871422, 13637442, -10168091, 1765144, -12654326, 28445307, -5364710}, + }, + { + FieldElement{29875063, 12493613, 2795536, -3786330, 1710620, 15181182, -10195717, -8788675, 9074234, 1167180}, + FieldElement{-26205683, 11014233, -9842651, -2635485, -26908120, 7532294, -18716888, -9535498, 3843903, 9367684}, + FieldElement{-10969595, -6403711, 9591134, 9582310, 11349256, 108879, 16235123, 8601684, -139197, 4242895}, + }, + }, + { + { + FieldElement{22092954, -13191123, -2042793, -11968512, 32186753, -11517388, -6574341, 2470660, -27417366, 16625501}, + FieldElement{-11057722, 3042016, 13770083, -9257922, 584236, -544855, -7770857, 2602725, -27351616, 14247413}, + FieldElement{6314175, -10264892, -32772502, 15957557, -10157730, 168750, -8618807, 14290061, 27108877, -1180880}, + }, + { + FieldElement{-8586597, -7170966, 13241782, 10960156, -32991015, -13794596, 33547976, -11058889, -27148451, 981874}, + FieldElement{22833440, 9293594, -32649448, -13618667, -9136966, 14756819, -22928859, -13970780, -10479804, -16197962}, + FieldElement{-7768587, 3326786, -28111797, 10783824, 19178761, 14905060, 22680049, 13906969, -15933690, 3797899}, + }, + { + FieldElement{21721356, -4212746, -12206123, 9310182, -3882239, -13653110, 23740224, -2709232, 20491983, -8042152}, + FieldElement{9209270, -15135055, -13256557, -6167798, -731016, 15289673, 25947805, 15286587, 30997318, -6703063}, + FieldElement{7392032, 16618386, 23946583, -8039892, -13265164, -1533858, -14197445, -2321576, 17649998, -250080}, + }, + { + FieldElement{-9301088, -14193827, 30609526, -3049543, -25175069, -1283752, -15241566, -9525724, -2233253, 7662146}, + FieldElement{-17558673, 1763594, -33114336, 15908610, -30040870, -12174295, 7335080, -8472199, -3174674, 3440183}, + FieldElement{-19889700, -5977008, -24111293, -9688870, 10799743, -16571957, 40450, -4431835, 4862400, 1133}, + }, + { + FieldElement{-32856209, -7873957, -5422389, 14860950, -16319031, 7956142, 7258061, 311861, -30594991, -7379421}, + FieldElement{-3773428, -1565936, 28985340, 7499440, 24445838, 9325937, 29727763, 16527196, 18278453, 15405622}, + FieldElement{-4381906, 8508652, -19898366, -3674424, -5984453, 15149970, -13313598, 843523, -21875062, 13626197}, + }, + { + FieldElement{2281448, -13487055, -10915418, -2609910, 1879358, 16164207, -10783882, 3953792, 13340839, 15928663}, + FieldElement{31727126, -7179855, -18437503, -8283652, 2875793, -16390330, -25269894, -7014826, -23452306, 5964753}, + FieldElement{4100420, -5959452, -17179337, 6017714, -18705837, 12227141, -26684835, 11344144, 2538215, -7570755}, + }, + { + FieldElement{-9433605, 6123113, 11159803, -2156608, 30016280, 14966241, -20474983, 1485421, -629256, -15958862}, + FieldElement{-26804558, 4260919, 11851389, 9658551, -32017107, 16367492, -20205425, -13191288, 11659922, -11115118}, + FieldElement{26180396, 10015009, -30844224, -8581293, 5418197, 9480663, 2231568, -10170080, 33100372, -1306171}, + }, + { + FieldElement{15121113, -5201871, -10389905, 15427821, -27509937, -15992507, 21670947, 4486675, -5931810, -14466380}, + FieldElement{16166486, -9483733, -11104130, 6023908, -31926798, -1364923, 2340060, -16254968, -10735770, -10039824}, + FieldElement{28042865, -3557089, -12126526, 12259706, -3717498, -6945899, 6766453, -8689599, 18036436, 5803270}, + }, + }, + { + { + FieldElement{-817581, 6763912, 11803561, 1585585, 10958447, -2671165, 23855391, 4598332, -6159431, -14117438}, + FieldElement{-31031306, -14256194, 17332029, -2383520, 31312682, -5967183, 696309, 50292, -20095739, 11763584}, + FieldElement{-594563, -2514283, -32234153, 12643980, 12650761, 14811489, 665117, -12613632, -19773211, -10713562}, + }, + { + FieldElement{30464590, -11262872, -4127476, -12734478, 19835327, -7105613, -24396175, 2075773, -17020157, 992471}, + FieldElement{18357185, -6994433, 7766382, 16342475, -29324918, 411174, 14578841, 8080033, -11574335, -10601610}, + FieldElement{19598397, 10334610, 12555054, 2555664, 18821899, -10339780, 21873263, 16014234, 26224780, 16452269}, + }, + { + FieldElement{-30223925, 5145196, 5944548, 16385966, 3976735, 2009897, -11377804, -7618186, -20533829, 3698650}, + FieldElement{14187449, 3448569, -10636236, -10810935, -22663880, -3433596, 7268410, -10890444, 27394301, 12015369}, + FieldElement{19695761, 16087646, 28032085, 12999827, 6817792, 11427614, 20244189, -1312777, -13259127, -3402461}, + }, + { + FieldElement{30860103, 12735208, -1888245, -4699734, -16974906, 2256940, -8166013, 12298312, -8550524, -10393462}, + FieldElement{-5719826, -11245325, -1910649, 15569035, 26642876, -7587760, -5789354, -15118654, -4976164, 12651793}, + FieldElement{-2848395, 9953421, 11531313, -5282879, 26895123, -12697089, -13118820, -16517902, 9768698, -2533218}, + }, + { + FieldElement{-24719459, 1894651, -287698, -4704085, 15348719, -8156530, 32767513, 12765450, 4940095, 10678226}, + FieldElement{18860224, 15980149, -18987240, -1562570, -26233012, -11071856, -7843882, 13944024, -24372348, 16582019}, + FieldElement{-15504260, 4970268, -29893044, 4175593, -20993212, -2199756, -11704054, 15444560, -11003761, 7989037}, + }, + { + FieldElement{31490452, 5568061, -2412803, 2182383, -32336847, 4531686, -32078269, 6200206, -19686113, -14800171}, + FieldElement{-17308668, -15879940, -31522777, -2831, -32887382, 16375549, 8680158, -16371713, 28550068, -6857132}, + FieldElement{-28126887, -5688091, 16837845, -1820458, -6850681, 12700016, -30039981, 4364038, 1155602, 5988841}, + }, + { + FieldElement{21890435, -13272907, -12624011, 12154349, -7831873, 15300496, 23148983, -4470481, 24618407, 8283181}, + FieldElement{-33136107, -10512751, 9975416, 6841041, -31559793, 16356536, 3070187, -7025928, 1466169, 10740210}, + FieldElement{-1509399, -15488185, -13503385, -10655916, 32799044, 909394, -13938903, -5779719, -32164649, -15327040}, + }, + { + FieldElement{3960823, -14267803, -28026090, -15918051, -19404858, 13146868, 15567327, 951507, -3260321, -573935}, + FieldElement{24740841, 5052253, -30094131, 8961361, 25877428, 6165135, -24368180, 14397372, -7380369, -6144105}, + FieldElement{-28888365, 3510803, -28103278, -1158478, -11238128, -10631454, -15441463, -14453128, -1625486, -6494814}, + }, + }, + { + { + FieldElement{793299, -9230478, 8836302, -6235707, -27360908, -2369593, 33152843, -4885251, -9906200, -621852}, + FieldElement{5666233, 525582, 20782575, -8038419, -24538499, 14657740, 16099374, 1468826, -6171428, -15186581}, + FieldElement{-4859255, -3779343, -2917758, -6748019, 7778750, 11688288, -30404353, -9871238, -1558923, -9863646}, + }, + { + FieldElement{10896332, -7719704, 824275, 472601, -19460308, 3009587, 25248958, 14783338, -30581476, -15757844}, + FieldElement{10566929, 12612572, -31944212, 11118703, -12633376, 12362879, 21752402, 8822496, 24003793, 14264025}, + FieldElement{27713862, -7355973, -11008240, 9227530, 27050101, 2504721, 23886875, -13117525, 13958495, -5732453}, + }, + { + FieldElement{-23481610, 4867226, -27247128, 3900521, 29838369, -8212291, -31889399, -10041781, 7340521, -15410068}, + FieldElement{4646514, -8011124, -22766023, -11532654, 23184553, 8566613, 31366726, -1381061, -15066784, -10375192}, + FieldElement{-17270517, 12723032, -16993061, 14878794, 21619651, -6197576, 27584817, 3093888, -8843694, 3849921}, + }, + { + FieldElement{-9064912, 2103172, 25561640, -15125738, -5239824, 9582958, 32477045, -9017955, 5002294, -15550259}, + FieldElement{-12057553, -11177906, 21115585, -13365155, 8808712, -12030708, 16489530, 13378448, -25845716, 12741426}, + FieldElement{-5946367, 10645103, -30911586, 15390284, -3286982, -7118677, 24306472, 15852464, 28834118, -7646072}, + }, + { + FieldElement{-17335748, -9107057, -24531279, 9434953, -8472084, -583362, -13090771, 455841, 20461858, 5491305}, + FieldElement{13669248, -16095482, -12481974, -10203039, -14569770, -11893198, -24995986, 11293807, -28588204, -9421832}, + FieldElement{28497928, 6272777, -33022994, 14470570, 8906179, -1225630, 18504674, -14165166, 29867745, -8795943}, + }, + { + FieldElement{-16207023, 13517196, -27799630, -13697798, 24009064, -6373891, -6367600, -13175392, 22853429, -4012011}, + FieldElement{24191378, 16712145, -13931797, 15217831, 14542237, 1646131, 18603514, -11037887, 12876623, -2112447}, + FieldElement{17902668, 4518229, -411702, -2829247, 26878217, 5258055, -12860753, 608397, 16031844, 3723494}, + }, + { + FieldElement{-28632773, 12763728, -20446446, 7577504, 33001348, -13017745, 17558842, -7872890, 23896954, -4314245}, + FieldElement{-20005381, -12011952, 31520464, 605201, 2543521, 5991821, -2945064, 7229064, -9919646, -8826859}, + FieldElement{28816045, 298879, -28165016, -15920938, 19000928, -1665890, -12680833, -2949325, -18051778, -2082915}, + }, + { + FieldElement{16000882, -344896, 3493092, -11447198, -29504595, -13159789, 12577740, 16041268, -19715240, 7847707}, + FieldElement{10151868, 10572098, 27312476, 7922682, 14825339, 4723128, -32855931, -6519018, -10020567, 3852848}, + FieldElement{-11430470, 15697596, -21121557, -4420647, 5386314, 15063598, 16514493, -15932110, 29330899, -15076224}, + }, + }, + { + { + FieldElement{-25499735, -4378794, -15222908, -6901211, 16615731, 2051784, 3303702, 15490, -27548796, 12314391}, + FieldElement{15683520, -6003043, 18109120, -9980648, 15337968, -5997823, -16717435, 15921866, 16103996, -3731215}, + FieldElement{-23169824, -10781249, 13588192, -1628807, -3798557, -1074929, -19273607, 5402699, -29815713, -9841101}, + }, + { + FieldElement{23190676, 2384583, -32714340, 3462154, -29903655, -1529132, -11266856, 8911517, -25205859, 2739713}, + FieldElement{21374101, -3554250, -33524649, 9874411, 15377179, 11831242, -33529904, 6134907, 4931255, 11987849}, + FieldElement{-7732, -2978858, -16223486, 7277597, 105524, -322051, -31480539, 13861388, -30076310, 10117930}, + }, + { + FieldElement{-29501170, -10744872, -26163768, 13051539, -25625564, 5089643, -6325503, 6704079, 12890019, 15728940}, + FieldElement{-21972360, -11771379, -951059, -4418840, 14704840, 2695116, 903376, -10428139, 12885167, 8311031}, + FieldElement{-17516482, 5352194, 10384213, -13811658, 7506451, 13453191, 26423267, 4384730, 1888765, -5435404}, + }, + { + FieldElement{-25817338, -3107312, -13494599, -3182506, 30896459, -13921729, -32251644, -12707869, -19464434, -3340243}, + FieldElement{-23607977, -2665774, -526091, 4651136, 5765089, 4618330, 6092245, 14845197, 17151279, -9854116}, + FieldElement{-24830458, -12733720, -15165978, 10367250, -29530908, -265356, 22825805, -7087279, -16866484, 16176525}, + }, + { + FieldElement{-23583256, 6564961, 20063689, 3798228, -4740178, 7359225, 2006182, -10363426, -28746253, -10197509}, + FieldElement{-10626600, -4486402, -13320562, -5125317, 3432136, -6393229, 23632037, -1940610, 32808310, 1099883}, + FieldElement{15030977, 5768825, -27451236, -2887299, -6427378, -15361371, -15277896, -6809350, 2051441, -15225865}, + }, + { + FieldElement{-3362323, -7239372, 7517890, 9824992, 23555850, 295369, 5148398, -14154188, -22686354, 16633660}, + FieldElement{4577086, -16752288, 13249841, -15304328, 19958763, -14537274, 18559670, -10759549, 8402478, -9864273}, + FieldElement{-28406330, -1051581, -26790155, -907698, -17212414, -11030789, 9453451, -14980072, 17983010, 9967138}, + }, + { + FieldElement{-25762494, 6524722, 26585488, 9969270, 24709298, 1220360, -1677990, 7806337, 17507396, 3651560}, + FieldElement{-10420457, -4118111, 14584639, 15971087, -15768321, 8861010, 26556809, -5574557, -18553322, -11357135}, + FieldElement{2839101, 14284142, 4029895, 3472686, 14402957, 12689363, -26642121, 8459447, -5605463, -7621941}, + }, + { + FieldElement{-4839289, -3535444, 9744961, 2871048, 25113978, 3187018, -25110813, -849066, 17258084, -7977739}, + FieldElement{18164541, -10595176, -17154882, -1542417, 19237078, -9745295, 23357533, -15217008, 26908270, 12150756}, + FieldElement{-30264870, -7647865, 5112249, -7036672, -1499807, -6974257, 43168, -5537701, -32302074, 16215819}, + }, + }, + { + { + FieldElement{-6898905, 9824394, -12304779, -4401089, -31397141, -6276835, 32574489, 12532905, -7503072, -8675347}, + FieldElement{-27343522, -16515468, -27151524, -10722951, 946346, 16291093, 254968, 7168080, 21676107, -1943028}, + FieldElement{21260961, -8424752, -16831886, -11920822, -23677961, 3968121, -3651949, -6215466, -3556191, -7913075}, + }, + { + FieldElement{16544754, 13250366, -16804428, 15546242, -4583003, 12757258, -2462308, -8680336, -18907032, -9662799}, + FieldElement{-2415239, -15577728, 18312303, 4964443, -15272530, -12653564, 26820651, 16690659, 25459437, -4564609}, + FieldElement{-25144690, 11425020, 28423002, -11020557, -6144921, -15826224, 9142795, -2391602, -6432418, -1644817}, + }, + { + FieldElement{-23104652, 6253476, 16964147, -3768872, -25113972, -12296437, -27457225, -16344658, 6335692, 7249989}, + FieldElement{-30333227, 13979675, 7503222, -12368314, -11956721, -4621693, -30272269, 2682242, 25993170, -12478523}, + FieldElement{4364628, 5930691, 32304656, -10044554, -8054781, 15091131, 22857016, -10598955, 31820368, 15075278}, + }, + { + FieldElement{31879134, -8918693, 17258761, 90626, -8041836, -4917709, 24162788, -9650886, -17970238, 12833045}, + FieldElement{19073683, 14851414, -24403169, -11860168, 7625278, 11091125, -19619190, 2074449, -9413939, 14905377}, + FieldElement{24483667, -11935567, -2518866, -11547418, -1553130, 15355506, -25282080, 9253129, 27628530, -7555480}, + }, + { + FieldElement{17597607, 8340603, 19355617, 552187, 26198470, -3176583, 4593324, -9157582, -14110875, 15297016}, + FieldElement{510886, 14337390, -31785257, 16638632, 6328095, 2713355, -20217417, -11864220, 8683221, 2921426}, + FieldElement{18606791, 11874196, 27155355, -5281482, -24031742, 6265446, -25178240, -1278924, 4674690, 13890525}, + }, + { + FieldElement{13609624, 13069022, -27372361, -13055908, 24360586, 9592974, 14977157, 9835105, 4389687, 288396}, + FieldElement{9922506, -519394, 13613107, 5883594, -18758345, -434263, -12304062, 8317628, 23388070, 16052080}, + FieldElement{12720016, 11937594, -31970060, -5028689, 26900120, 8561328, -20155687, -11632979, -14754271, -10812892}, + }, + { + FieldElement{15961858, 14150409, 26716931, -665832, -22794328, 13603569, 11829573, 7467844, -28822128, 929275}, + FieldElement{11038231, -11582396, -27310482, -7316562, -10498527, -16307831, -23479533, -9371869, -21393143, 2465074}, + FieldElement{20017163, -4323226, 27915242, 1529148, 12396362, 15675764, 13817261, -9658066, 2463391, -4622140}, + }, + { + FieldElement{-16358878, -12663911, -12065183, 4996454, -1256422, 1073572, 9583558, 12851107, 4003896, 12673717}, + FieldElement{-1731589, -15155870, -3262930, 16143082, 19294135, 13385325, 14741514, -9103726, 7903886, 2348101}, + FieldElement{24536016, -16515207, 12715592, -3862155, 1511293, 10047386, -3842346, -7129159, -28377538, 10048127}, + }, + }, + { + { + FieldElement{-12622226, -6204820, 30718825, 2591312, -10617028, 12192840, 18873298, -7297090, -32297756, 15221632}, + FieldElement{-26478122, -11103864, 11546244, -1852483, 9180880, 7656409, -21343950, 2095755, 29769758, 6593415}, + FieldElement{-31994208, -2907461, 4176912, 3264766, 12538965, -868111, 26312345, -6118678, 30958054, 8292160}, + }, + { + FieldElement{31429822, -13959116, 29173532, 15632448, 12174511, -2760094, 32808831, 3977186, 26143136, -3148876}, + FieldElement{22648901, 1402143, -22799984, 13746059, 7936347, 365344, -8668633, -1674433, -3758243, -2304625}, + FieldElement{-15491917, 8012313, -2514730, -12702462, -23965846, -10254029, -1612713, -1535569, -16664475, 8194478}, + }, + { + FieldElement{27338066, -7507420, -7414224, 10140405, -19026427, -6589889, 27277191, 8855376, 28572286, 3005164}, + FieldElement{26287124, 4821776, 25476601, -4145903, -3764513, -15788984, -18008582, 1182479, -26094821, -13079595}, + FieldElement{-7171154, 3178080, 23970071, 6201893, -17195577, -4489192, -21876275, -13982627, 32208683, -1198248}, + }, + { + FieldElement{-16657702, 2817643, -10286362, 14811298, 6024667, 13349505, -27315504, -10497842, -27672585, -11539858}, + FieldElement{15941029, -9405932, -21367050, 8062055, 31876073, -238629, -15278393, -1444429, 15397331, -4130193}, + FieldElement{8934485, -13485467, -23286397, -13423241, -32446090, 14047986, 31170398, -1441021, -27505566, 15087184}, + }, + { + FieldElement{-18357243, -2156491, 24524913, -16677868, 15520427, -6360776, -15502406, 11461896, 16788528, -5868942}, + FieldElement{-1947386, 16013773, 21750665, 3714552, -17401782, -16055433, -3770287, -10323320, 31322514, -11615635}, + FieldElement{21426655, -5650218, -13648287, -5347537, -28812189, -4920970, -18275391, -14621414, 13040862, -12112948}, + }, + { + FieldElement{11293895, 12478086, -27136401, 15083750, -29307421, 14748872, 14555558, -13417103, 1613711, 4896935}, + FieldElement{-25894883, 15323294, -8489791, -8057900, 25967126, -13425460, 2825960, -4897045, -23971776, -11267415}, + FieldElement{-15924766, -5229880, -17443532, 6410664, 3622847, 10243618, 20615400, 12405433, -23753030, -8436416}, + }, + { + FieldElement{-7091295, 12556208, -20191352, 9025187, -17072479, 4333801, 4378436, 2432030, 23097949, -566018}, + FieldElement{4565804, -16025654, 20084412, -7842817, 1724999, 189254, 24767264, 10103221, -18512313, 2424778}, + FieldElement{366633, -11976806, 8173090, -6890119, 30788634, 5745705, -7168678, 1344109, -3642553, 12412659}, + }, + { + FieldElement{-24001791, 7690286, 14929416, -168257, -32210835, -13412986, 24162697, -15326504, -3141501, 11179385}, + FieldElement{18289522, -14724954, 8056945, 16430056, -21729724, 7842514, -6001441, -1486897, -18684645, -11443503}, + FieldElement{476239, 6601091, -6152790, -9723375, 17503545, -4863900, 27672959, 13403813, 11052904, 5219329}, + }, + }, + { + { + FieldElement{20678546, -8375738, -32671898, 8849123, -5009758, 14574752, 31186971, -3973730, 9014762, -8579056}, + FieldElement{-13644050, -10350239, -15962508, 5075808, -1514661, -11534600, -33102500, 9160280, 8473550, -3256838}, + FieldElement{24900749, 14435722, 17209120, -15292541, -22592275, 9878983, -7689309, -16335821, -24568481, 11788948}, + }, + { + FieldElement{-3118155, -11395194, -13802089, 14797441, 9652448, -6845904, -20037437, 10410733, -24568470, -1458691}, + FieldElement{-15659161, 16736706, -22467150, 10215878, -9097177, 7563911, 11871841, -12505194, -18513325, 8464118}, + FieldElement{-23400612, 8348507, -14585951, -861714, -3950205, -6373419, 14325289, 8628612, 33313881, -8370517}, + }, + { + FieldElement{-20186973, -4967935, 22367356, 5271547, -1097117, -4788838, -24805667, -10236854, -8940735, -5818269}, + FieldElement{-6948785, -1795212, -32625683, -16021179, 32635414, -7374245, 15989197, -12838188, 28358192, -4253904}, + FieldElement{-23561781, -2799059, -32351682, -1661963, -9147719, 10429267, -16637684, 4072016, -5351664, 5596589}, + }, + { + FieldElement{-28236598, -3390048, 12312896, 6213178, 3117142, 16078565, 29266239, 2557221, 1768301, 15373193}, + FieldElement{-7243358, -3246960, -4593467, -7553353, -127927, -912245, -1090902, -4504991, -24660491, 3442910}, + FieldElement{-30210571, 5124043, 14181784, 8197961, 18964734, -11939093, 22597931, 7176455, -18585478, 13365930}, + }, + { + FieldElement{-7877390, -1499958, 8324673, 4690079, 6261860, 890446, 24538107, -8570186, -9689599, -3031667}, + FieldElement{25008904, -10771599, -4305031, -9638010, 16265036, 15721635, 683793, -11823784, 15723479, -15163481}, + FieldElement{-9660625, 12374379, -27006999, -7026148, -7724114, -12314514, 11879682, 5400171, 519526, -1235876}, + }, + { + FieldElement{22258397, -16332233, -7869817, 14613016, -22520255, -2950923, -20353881, 7315967, 16648397, 7605640}, + FieldElement{-8081308, -8464597, -8223311, 9719710, 19259459, -15348212, 23994942, -5281555, -9468848, 4763278}, + FieldElement{-21699244, 9220969, -15730624, 1084137, -25476107, -2852390, 31088447, -7764523, -11356529, 728112}, + }, + { + FieldElement{26047220, -11751471, -6900323, -16521798, 24092068, 9158119, -4273545, -12555558, -29365436, -5498272}, + FieldElement{17510331, -322857, 5854289, 8403524, 17133918, -3112612, -28111007, 12327945, 10750447, 10014012}, + FieldElement{-10312768, 3936952, 9156313, -8897683, 16498692, -994647, -27481051, -666732, 3424691, 7540221}, + }, + { + FieldElement{30322361, -6964110, 11361005, -4143317, 7433304, 4989748, -7071422, -16317219, -9244265, 15258046}, + FieldElement{13054562, -2779497, 19155474, 469045, -12482797, 4566042, 5631406, 2711395, 1062915, -5136345}, + FieldElement{-19240248, -11254599, -29509029, -7499965, -5835763, 13005411, -6066489, 12194497, 32960380, 1459310}, + }, + }, + { + { + FieldElement{19852034, 7027924, 23669353, 10020366, 8586503, -6657907, 394197, -6101885, 18638003, -11174937}, + FieldElement{31395534, 15098109, 26581030, 8030562, -16527914, -5007134, 9012486, -7584354, -6643087, -5442636}, + FieldElement{-9192165, -2347377, -1997099, 4529534, 25766844, 607986, -13222, 9677543, -32294889, -6456008}, + }, + { + FieldElement{-2444496, -149937, 29348902, 8186665, 1873760, 12489863, -30934579, -7839692, -7852844, -8138429}, + FieldElement{-15236356, -15433509, 7766470, 746860, 26346930, -10221762, -27333451, 10754588, -9431476, 5203576}, + FieldElement{31834314, 14135496, -770007, 5159118, 20917671, -16768096, -7467973, -7337524, 31809243, 7347066}, + }, + { + FieldElement{-9606723, -11874240, 20414459, 13033986, 13716524, -11691881, 19797970, -12211255, 15192876, -2087490}, + FieldElement{-12663563, -2181719, 1168162, -3804809, 26747877, -14138091, 10609330, 12694420, 33473243, -13382104}, + FieldElement{33184999, 11180355, 15832085, -11385430, -1633671, 225884, 15089336, -11023903, -6135662, 14480053}, + }, + { + FieldElement{31308717, -5619998, 31030840, -1897099, 15674547, -6582883, 5496208, 13685227, 27595050, 8737275}, + FieldElement{-20318852, -15150239, 10933843, -16178022, 8335352, -7546022, -31008351, -12610604, 26498114, 66511}, + FieldElement{22644454, -8761729, -16671776, 4884562, -3105614, -13559366, 30540766, -4286747, -13327787, -7515095}, + }, + { + FieldElement{-28017847, 9834845, 18617207, -2681312, -3401956, -13307506, 8205540, 13585437, -17127465, 15115439}, + FieldElement{23711543, -672915, 31206561, -8362711, 6164647, -9709987, -33535882, -1426096, 8236921, 16492939}, + FieldElement{-23910559, -13515526, -26299483, -4503841, 25005590, -7687270, 19574902, 10071562, 6708380, -6222424}, + }, + { + FieldElement{2101391, -4930054, 19702731, 2367575, -15427167, 1047675, 5301017, 9328700, 29955601, -11678310}, + FieldElement{3096359, 9271816, -21620864, -15521844, -14847996, -7592937, -25892142, -12635595, -9917575, 6216608}, + FieldElement{-32615849, 338663, -25195611, 2510422, -29213566, -13820213, 24822830, -6146567, -26767480, 7525079}, + }, + { + FieldElement{-23066649, -13985623, 16133487, -7896178, -3389565, 778788, -910336, -2782495, -19386633, 11994101}, + FieldElement{21691500, -13624626, -641331, -14367021, 3285881, -3483596, -25064666, 9718258, -7477437, 13381418}, + FieldElement{18445390, -4202236, 14979846, 11622458, -1727110, -3582980, 23111648, -6375247, 28535282, 15779576}, + }, + { + FieldElement{30098053, 3089662, -9234387, 16662135, -21306940, 11308411, -14068454, 12021730, 9955285, -16303356}, + FieldElement{9734894, -14576830, -7473633, -9138735, 2060392, 11313496, -18426029, 9924399, 20194861, 13380996}, + FieldElement{-26378102, -7965207, -22167821, 15789297, -18055342, -6168792, -1984914, 15707771, 26342023, 10146099}, + }, + }, + { + { + FieldElement{-26016874, -219943, 21339191, -41388, 19745256, -2878700, -29637280, 2227040, 21612326, -545728}, + FieldElement{-13077387, 1184228, 23562814, -5970442, -20351244, -6348714, 25764461, 12243797, -20856566, 11649658}, + FieldElement{-10031494, 11262626, 27384172, 2271902, 26947504, -15997771, 39944, 6114064, 33514190, 2333242}, + }, + { + FieldElement{-21433588, -12421821, 8119782, 7219913, -21830522, -9016134, -6679750, -12670638, 24350578, -13450001}, + FieldElement{-4116307, -11271533, -23886186, 4843615, -30088339, 690623, -31536088, -10406836, 8317860, 12352766}, + FieldElement{18200138, -14475911, -33087759, -2696619, -23702521, -9102511, -23552096, -2287550, 20712163, 6719373}, + }, + { + FieldElement{26656208, 6075253, -7858556, 1886072, -28344043, 4262326, 11117530, -3763210, 26224235, -3297458}, + FieldElement{-17168938, -14854097, -3395676, -16369877, -19954045, 14050420, 21728352, 9493610, 18620611, -16428628}, + FieldElement{-13323321, 13325349, 11432106, 5964811, 18609221, 6062965, -5269471, -9725556, -30701573, -16479657}, + }, + { + FieldElement{-23860538, -11233159, 26961357, 1640861, -32413112, -16737940, 12248509, -5240639, 13735342, 1934062}, + FieldElement{25089769, 6742589, 17081145, -13406266, 21909293, -16067981, -15136294, -3765346, -21277997, 5473616}, + FieldElement{31883677, -7961101, 1083432, -11572403, 22828471, 13290673, -7125085, 12469656, 29111212, -5451014}, + }, + { + FieldElement{24244947, -15050407, -26262976, 2791540, -14997599, 16666678, 24367466, 6388839, -10295587, 452383}, + FieldElement{-25640782, -3417841, 5217916, 16224624, 19987036, -4082269, -24236251, -5915248, 15766062, 8407814}, + FieldElement{-20406999, 13990231, 15495425, 16395525, 5377168, 15166495, -8917023, -4388953, -8067909, 2276718}, + }, + { + FieldElement{30157918, 12924066, -17712050, 9245753, 19895028, 3368142, -23827587, 5096219, 22740376, -7303417}, + FieldElement{2041139, -14256350, 7783687, 13876377, -25946985, -13352459, 24051124, 13742383, -15637599, 13295222}, + FieldElement{33338237, -8505733, 12532113, 7977527, 9106186, -1715251, -17720195, -4612972, -4451357, -14669444}, + }, + { + FieldElement{-20045281, 5454097, -14346548, 6447146, 28862071, 1883651, -2469266, -4141880, 7770569, 9620597}, + FieldElement{23208068, 7979712, 33071466, 8149229, 1758231, -10834995, 30945528, -1694323, -33502340, -14767970}, + FieldElement{1439958, -16270480, -1079989, -793782, 4625402, 10647766, -5043801, 1220118, 30494170, -11440799}, + }, + { + FieldElement{-5037580, -13028295, -2970559, -3061767, 15640974, -6701666, -26739026, 926050, -1684339, -13333647}, + FieldElement{13908495, -3549272, 30919928, -6273825, -21521863, 7989039, 9021034, 9078865, 3353509, 4033511}, + FieldElement{-29663431, -15113610, 32259991, -344482, 24295849, -12912123, 23161163, 8839127, 27485041, 7356032}, + }, + }, + { + { + FieldElement{9661027, 705443, 11980065, -5370154, -1628543, 14661173, -6346142, 2625015, 28431036, -16771834}, + FieldElement{-23839233, -8311415, -25945511, 7480958, -17681669, -8354183, -22545972, 14150565, 15970762, 4099461}, + FieldElement{29262576, 16756590, 26350592, -8793563, 8529671, -11208050, 13617293, -9937143, 11465739, 8317062}, + }, + { + FieldElement{-25493081, -6962928, 32500200, -9419051, -23038724, -2302222, 14898637, 3848455, 20969334, -5157516}, + FieldElement{-20384450, -14347713, -18336405, 13884722, -33039454, 2842114, -21610826, -3649888, 11177095, 14989547}, + FieldElement{-24496721, -11716016, 16959896, 2278463, 12066309, 10137771, 13515641, 2581286, -28487508, 9930240}, + }, + { + FieldElement{-17751622, -2097826, 16544300, -13009300, -15914807, -14949081, 18345767, -13403753, 16291481, -5314038}, + FieldElement{-33229194, 2553288, 32678213, 9875984, 8534129, 6889387, -9676774, 6957617, 4368891, 9788741}, + FieldElement{16660756, 7281060, -10830758, 12911820, 20108584, -8101676, -21722536, -8613148, 16250552, -11111103}, + }, + { + FieldElement{-19765507, 2390526, -16551031, 14161980, 1905286, 6414907, 4689584, 10604807, -30190403, 4782747}, + FieldElement{-1354539, 14736941, -7367442, -13292886, 7710542, -14155590, -9981571, 4383045, 22546403, 437323}, + FieldElement{31665577, -12180464, -16186830, 1491339, -18368625, 3294682, 27343084, 2786261, -30633590, -14097016}, + }, + { + FieldElement{-14467279, -683715, -33374107, 7448552, 19294360, 14334329, -19690631, 2355319, -19284671, -6114373}, + FieldElement{15121312, -15796162, 6377020, -6031361, -10798111, -12957845, 18952177, 15496498, -29380133, 11754228}, + FieldElement{-2637277, -13483075, 8488727, -14303896, 12728761, -1622493, 7141596, 11724556, 22761615, -10134141}, + }, + { + FieldElement{16918416, 11729663, -18083579, 3022987, -31015732, -13339659, -28741185, -12227393, 32851222, 11717399}, + FieldElement{11166634, 7338049, -6722523, 4531520, -29468672, -7302055, 31474879, 3483633, -1193175, -4030831}, + FieldElement{-185635, 9921305, 31456609, -13536438, -12013818, 13348923, 33142652, 6546660, -19985279, -3948376}, + }, + { + FieldElement{-32460596, 11266712, -11197107, -7899103, 31703694, 3855903, -8537131, -12833048, -30772034, -15486313}, + FieldElement{-18006477, 12709068, 3991746, -6479188, -21491523, -10550425, -31135347, -16049879, 10928917, 3011958}, + FieldElement{-6957757, -15594337, 31696059, 334240, 29576716, 14796075, -30831056, -12805180, 18008031, 10258577}, + }, + { + FieldElement{-22448644, 15655569, 7018479, -4410003, -30314266, -1201591, -1853465, 1367120, 25127874, 6671743}, + FieldElement{29701166, -14373934, -10878120, 9279288, -17568, 13127210, 21382910, 11042292, 25838796, 4642684}, + FieldElement{-20430234, 14955537, -24126347, 8124619, -5369288, -5990470, 30468147, -13900640, 18423289, 4177476}, + }, + }, +} diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go new file mode 100644 index 0000000000..fd03c252af --- /dev/null +++ b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go @@ -0,0 +1,1793 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package edwards25519 + +import "encoding/binary" + +// This code is a port of the public domain, “ref10” implementation of ed25519 +// from SUPERCOP. + +// FieldElement represents an element of the field GF(2^255 - 19). An element +// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 +// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on +// context. +type FieldElement [10]int32 + +var zero FieldElement + +func FeZero(fe *FieldElement) { + copy(fe[:], zero[:]) +} + +func FeOne(fe *FieldElement) { + FeZero(fe) + fe[0] = 1 +} + +func FeAdd(dst, a, b *FieldElement) { + dst[0] = a[0] + b[0] + dst[1] = a[1] + b[1] + dst[2] = a[2] + b[2] + dst[3] = a[3] + b[3] + dst[4] = a[4] + b[4] + dst[5] = a[5] + b[5] + dst[6] = a[6] + b[6] + dst[7] = a[7] + b[7] + dst[8] = a[8] + b[8] + dst[9] = a[9] + b[9] +} + +func FeSub(dst, a, b *FieldElement) { + dst[0] = a[0] - b[0] + dst[1] = a[1] - b[1] + dst[2] = a[2] - b[2] + dst[3] = a[3] - b[3] + dst[4] = a[4] - b[4] + dst[5] = a[5] - b[5] + dst[6] = a[6] - b[6] + dst[7] = a[7] - b[7] + dst[8] = a[8] - b[8] + dst[9] = a[9] - b[9] +} + +func FeCopy(dst, src *FieldElement) { + copy(dst[:], src[:]) +} + +// Replace (f,g) with (g,g) if b == 1; +// replace (f,g) with (f,g) if b == 0. +// +// Preconditions: b in {0,1}. +func FeCMove(f, g *FieldElement, b int32) { + b = -b + f[0] ^= b & (f[0] ^ g[0]) + f[1] ^= b & (f[1] ^ g[1]) + f[2] ^= b & (f[2] ^ g[2]) + f[3] ^= b & (f[3] ^ g[3]) + f[4] ^= b & (f[4] ^ g[4]) + f[5] ^= b & (f[5] ^ g[5]) + f[6] ^= b & (f[6] ^ g[6]) + f[7] ^= b & (f[7] ^ g[7]) + f[8] ^= b & (f[8] ^ g[8]) + f[9] ^= b & (f[9] ^ g[9]) +} + +func load3(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + return r +} + +func load4(in []byte) int64 { + var r int64 + r = int64(in[0]) + r |= int64(in[1]) << 8 + r |= int64(in[2]) << 16 + r |= int64(in[3]) << 24 + return r +} + +func FeFromBytes(dst *FieldElement, src *[32]byte) { + h0 := load4(src[:]) + h1 := load3(src[4:]) << 6 + h2 := load3(src[7:]) << 5 + h3 := load3(src[10:]) << 3 + h4 := load3(src[13:]) << 2 + h5 := load4(src[16:]) + h6 := load3(src[20:]) << 7 + h7 := load3(src[23:]) << 5 + h8 := load3(src[26:]) << 4 + h9 := (load3(src[29:]) & 8388607) << 2 + + FeCombine(dst, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +// FeToBytes marshals h to s. +// Preconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Write p=2^255-19; q=floor(h/p). +// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). +// +// Proof: +// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. +// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. +// +// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). +// Then 0> 25 + q = (h[0] + q) >> 26 + q = (h[1] + q) >> 25 + q = (h[2] + q) >> 26 + q = (h[3] + q) >> 25 + q = (h[4] + q) >> 26 + q = (h[5] + q) >> 25 + q = (h[6] + q) >> 26 + q = (h[7] + q) >> 25 + q = (h[8] + q) >> 26 + q = (h[9] + q) >> 25 + + // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. + h[0] += 19 * q + // Goal: Output h-2^255 q, which is between 0 and 2^255-20. + + carry[0] = h[0] >> 26 + h[1] += carry[0] + h[0] -= carry[0] << 26 + carry[1] = h[1] >> 25 + h[2] += carry[1] + h[1] -= carry[1] << 25 + carry[2] = h[2] >> 26 + h[3] += carry[2] + h[2] -= carry[2] << 26 + carry[3] = h[3] >> 25 + h[4] += carry[3] + h[3] -= carry[3] << 25 + carry[4] = h[4] >> 26 + h[5] += carry[4] + h[4] -= carry[4] << 26 + carry[5] = h[5] >> 25 + h[6] += carry[5] + h[5] -= carry[5] << 25 + carry[6] = h[6] >> 26 + h[7] += carry[6] + h[6] -= carry[6] << 26 + carry[7] = h[7] >> 25 + h[8] += carry[7] + h[7] -= carry[7] << 25 + carry[8] = h[8] >> 26 + h[9] += carry[8] + h[8] -= carry[8] << 26 + carry[9] = h[9] >> 25 + h[9] -= carry[9] << 25 + // h10 = carry9 + + // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. + // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; + // evidently 2^255 h10-2^255 q = 0. + // Goal: Output h[0]+...+2^230 h[9]. + + s[0] = byte(h[0] >> 0) + s[1] = byte(h[0] >> 8) + s[2] = byte(h[0] >> 16) + s[3] = byte((h[0] >> 24) | (h[1] << 2)) + s[4] = byte(h[1] >> 6) + s[5] = byte(h[1] >> 14) + s[6] = byte((h[1] >> 22) | (h[2] << 3)) + s[7] = byte(h[2] >> 5) + s[8] = byte(h[2] >> 13) + s[9] = byte((h[2] >> 21) | (h[3] << 5)) + s[10] = byte(h[3] >> 3) + s[11] = byte(h[3] >> 11) + s[12] = byte((h[3] >> 19) | (h[4] << 6)) + s[13] = byte(h[4] >> 2) + s[14] = byte(h[4] >> 10) + s[15] = byte(h[4] >> 18) + s[16] = byte(h[5] >> 0) + s[17] = byte(h[5] >> 8) + s[18] = byte(h[5] >> 16) + s[19] = byte((h[5] >> 24) | (h[6] << 1)) + s[20] = byte(h[6] >> 7) + s[21] = byte(h[6] >> 15) + s[22] = byte((h[6] >> 23) | (h[7] << 3)) + s[23] = byte(h[7] >> 5) + s[24] = byte(h[7] >> 13) + s[25] = byte((h[7] >> 21) | (h[8] << 4)) + s[26] = byte(h[8] >> 4) + s[27] = byte(h[8] >> 12) + s[28] = byte((h[8] >> 20) | (h[9] << 6)) + s[29] = byte(h[9] >> 2) + s[30] = byte(h[9] >> 10) + s[31] = byte(h[9] >> 18) +} + +func FeIsNegative(f *FieldElement) byte { + var s [32]byte + FeToBytes(&s, f) + return s[0] & 1 +} + +func FeIsNonZero(f *FieldElement) int32 { + var s [32]byte + FeToBytes(&s, f) + var x uint8 + for _, b := range s { + x |= b + } + x |= x >> 4 + x |= x >> 2 + x |= x >> 1 + return int32(x & 1) +} + +// FeNeg sets h = -f +// +// Preconditions: +// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func FeNeg(h, f *FieldElement) { + h[0] = -f[0] + h[1] = -f[1] + h[2] = -f[2] + h[3] = -f[3] + h[4] = -f[4] + h[5] = -f[5] + h[6] = -f[6] + h[7] = -f[7] + h[8] = -f[8] + h[9] = -f[9] +} + +func FeCombine(h *FieldElement, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { + var c0, c1, c2, c3, c4, c5, c6, c7, c8, c9 int64 + + /* + |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) + i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 + |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) + i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 + */ + + c0 = (h0 + (1 << 25)) >> 26 + h1 += c0 + h0 -= c0 << 26 + c4 = (h4 + (1 << 25)) >> 26 + h5 += c4 + h4 -= c4 << 26 + /* |h0| <= 2^25 */ + /* |h4| <= 2^25 */ + /* |h1| <= 1.51*2^58 */ + /* |h5| <= 1.51*2^58 */ + + c1 = (h1 + (1 << 24)) >> 25 + h2 += c1 + h1 -= c1 << 25 + c5 = (h5 + (1 << 24)) >> 25 + h6 += c5 + h5 -= c5 << 25 + /* |h1| <= 2^24; from now on fits into int32 */ + /* |h5| <= 2^24; from now on fits into int32 */ + /* |h2| <= 1.21*2^59 */ + /* |h6| <= 1.21*2^59 */ + + c2 = (h2 + (1 << 25)) >> 26 + h3 += c2 + h2 -= c2 << 26 + c6 = (h6 + (1 << 25)) >> 26 + h7 += c6 + h6 -= c6 << 26 + /* |h2| <= 2^25; from now on fits into int32 unchanged */ + /* |h6| <= 2^25; from now on fits into int32 unchanged */ + /* |h3| <= 1.51*2^58 */ + /* |h7| <= 1.51*2^58 */ + + c3 = (h3 + (1 << 24)) >> 25 + h4 += c3 + h3 -= c3 << 25 + c7 = (h7 + (1 << 24)) >> 25 + h8 += c7 + h7 -= c7 << 25 + /* |h3| <= 2^24; from now on fits into int32 unchanged */ + /* |h7| <= 2^24; from now on fits into int32 unchanged */ + /* |h4| <= 1.52*2^33 */ + /* |h8| <= 1.52*2^33 */ + + c4 = (h4 + (1 << 25)) >> 26 + h5 += c4 + h4 -= c4 << 26 + c8 = (h8 + (1 << 25)) >> 26 + h9 += c8 + h8 -= c8 << 26 + /* |h4| <= 2^25; from now on fits into int32 unchanged */ + /* |h8| <= 2^25; from now on fits into int32 unchanged */ + /* |h5| <= 1.01*2^24 */ + /* |h9| <= 1.51*2^58 */ + + c9 = (h9 + (1 << 24)) >> 25 + h0 += c9 * 19 + h9 -= c9 << 25 + /* |h9| <= 2^24; from now on fits into int32 unchanged */ + /* |h0| <= 1.8*2^37 */ + + c0 = (h0 + (1 << 25)) >> 26 + h1 += c0 + h0 -= c0 << 26 + /* |h0| <= 2^25; from now on fits into int32 unchanged */ + /* |h1| <= 1.01*2^24 */ + + h[0] = int32(h0) + h[1] = int32(h1) + h[2] = int32(h2) + h[3] = int32(h3) + h[4] = int32(h4) + h[5] = int32(h5) + h[6] = int32(h6) + h[7] = int32(h7) + h[8] = int32(h8) + h[9] = int32(h9) +} + +// FeMul calculates h = f * g +// Can overlap h with f or g. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +// +// Notes on implementation strategy: +// +// Using schoolbook multiplication. +// Karatsuba would save a little in some cost models. +// +// Most multiplications by 2 and 19 are 32-bit precomputations; +// cheaper than 64-bit postcomputations. +// +// There is one remaining multiplication by 19 in the carry chain; +// one *19 precomputation can be merged into this, +// but the resulting data flow is considerably less clean. +// +// There are 12 carries below. +// 10 of them are 2-way parallelizable and vectorizable. +// Can get away with 11 carries, but then data flow is much deeper. +// +// With tighter constraints on inputs, can squeeze carries into int32. +func FeMul(h, f, g *FieldElement) { + f0 := int64(f[0]) + f1 := int64(f[1]) + f2 := int64(f[2]) + f3 := int64(f[3]) + f4 := int64(f[4]) + f5 := int64(f[5]) + f6 := int64(f[6]) + f7 := int64(f[7]) + f8 := int64(f[8]) + f9 := int64(f[9]) + + f1_2 := int64(2 * f[1]) + f3_2 := int64(2 * f[3]) + f5_2 := int64(2 * f[5]) + f7_2 := int64(2 * f[7]) + f9_2 := int64(2 * f[9]) + + g0 := int64(g[0]) + g1 := int64(g[1]) + g2 := int64(g[2]) + g3 := int64(g[3]) + g4 := int64(g[4]) + g5 := int64(g[5]) + g6 := int64(g[6]) + g7 := int64(g[7]) + g8 := int64(g[8]) + g9 := int64(g[9]) + + g1_19 := int64(19 * g[1]) /* 1.4*2^29 */ + g2_19 := int64(19 * g[2]) /* 1.4*2^30; still ok */ + g3_19 := int64(19 * g[3]) + g4_19 := int64(19 * g[4]) + g5_19 := int64(19 * g[5]) + g6_19 := int64(19 * g[6]) + g7_19 := int64(19 * g[7]) + g8_19 := int64(19 * g[8]) + g9_19 := int64(19 * g[9]) + + h0 := f0*g0 + f1_2*g9_19 + f2*g8_19 + f3_2*g7_19 + f4*g6_19 + f5_2*g5_19 + f6*g4_19 + f7_2*g3_19 + f8*g2_19 + f9_2*g1_19 + h1 := f0*g1 + f1*g0 + f2*g9_19 + f3*g8_19 + f4*g7_19 + f5*g6_19 + f6*g5_19 + f7*g4_19 + f8*g3_19 + f9*g2_19 + h2 := f0*g2 + f1_2*g1 + f2*g0 + f3_2*g9_19 + f4*g8_19 + f5_2*g7_19 + f6*g6_19 + f7_2*g5_19 + f8*g4_19 + f9_2*g3_19 + h3 := f0*g3 + f1*g2 + f2*g1 + f3*g0 + f4*g9_19 + f5*g8_19 + f6*g7_19 + f7*g6_19 + f8*g5_19 + f9*g4_19 + h4 := f0*g4 + f1_2*g3 + f2*g2 + f3_2*g1 + f4*g0 + f5_2*g9_19 + f6*g8_19 + f7_2*g7_19 + f8*g6_19 + f9_2*g5_19 + h5 := f0*g5 + f1*g4 + f2*g3 + f3*g2 + f4*g1 + f5*g0 + f6*g9_19 + f7*g8_19 + f8*g7_19 + f9*g6_19 + h6 := f0*g6 + f1_2*g5 + f2*g4 + f3_2*g3 + f4*g2 + f5_2*g1 + f6*g0 + f7_2*g9_19 + f8*g8_19 + f9_2*g7_19 + h7 := f0*g7 + f1*g6 + f2*g5 + f3*g4 + f4*g3 + f5*g2 + f6*g1 + f7*g0 + f8*g9_19 + f9*g8_19 + h8 := f0*g8 + f1_2*g7 + f2*g6 + f3_2*g5 + f4*g4 + f5_2*g3 + f6*g2 + f7_2*g1 + f8*g0 + f9_2*g9_19 + h9 := f0*g9 + f1*g8 + f2*g7 + f3*g6 + f4*g5 + f5*g4 + f6*g3 + f7*g2 + f8*g1 + f9*g0 + + FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +func feSquare(f *FieldElement) (h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { + f0 := int64(f[0]) + f1 := int64(f[1]) + f2 := int64(f[2]) + f3 := int64(f[3]) + f4 := int64(f[4]) + f5 := int64(f[5]) + f6 := int64(f[6]) + f7 := int64(f[7]) + f8 := int64(f[8]) + f9 := int64(f[9]) + f0_2 := int64(2 * f[0]) + f1_2 := int64(2 * f[1]) + f2_2 := int64(2 * f[2]) + f3_2 := int64(2 * f[3]) + f4_2 := int64(2 * f[4]) + f5_2 := int64(2 * f[5]) + f6_2 := int64(2 * f[6]) + f7_2 := int64(2 * f[7]) + f5_38 := 38 * f5 // 1.31*2^30 + f6_19 := 19 * f6 // 1.31*2^30 + f7_38 := 38 * f7 // 1.31*2^30 + f8_19 := 19 * f8 // 1.31*2^30 + f9_38 := 38 * f9 // 1.31*2^30 + + h0 = f0*f0 + f1_2*f9_38 + f2_2*f8_19 + f3_2*f7_38 + f4_2*f6_19 + f5*f5_38 + h1 = f0_2*f1 + f2*f9_38 + f3_2*f8_19 + f4*f7_38 + f5_2*f6_19 + h2 = f0_2*f2 + f1_2*f1 + f3_2*f9_38 + f4_2*f8_19 + f5_2*f7_38 + f6*f6_19 + h3 = f0_2*f3 + f1_2*f2 + f4*f9_38 + f5_2*f8_19 + f6*f7_38 + h4 = f0_2*f4 + f1_2*f3_2 + f2*f2 + f5_2*f9_38 + f6_2*f8_19 + f7*f7_38 + h5 = f0_2*f5 + f1_2*f4 + f2_2*f3 + f6*f9_38 + f7_2*f8_19 + h6 = f0_2*f6 + f1_2*f5_2 + f2_2*f4 + f3_2*f3 + f7_2*f9_38 + f8*f8_19 + h7 = f0_2*f7 + f1_2*f6 + f2_2*f5 + f3_2*f4 + f8*f9_38 + h8 = f0_2*f8 + f1_2*f7_2 + f2_2*f6 + f3_2*f5_2 + f4*f4 + f9*f9_38 + h9 = f0_2*f9 + f1_2*f8 + f2_2*f7 + f3_2*f6 + f4_2*f5 + + return +} + +// FeSquare calculates h = f*f. Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. +func FeSquare(h, f *FieldElement) { + h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) + FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +// FeSquare2 sets h = 2 * f * f +// +// Can overlap h with f. +// +// Preconditions: +// |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. +// +// Postconditions: +// |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. +// See fe_mul.c for discussion of implementation strategy. +func FeSquare2(h, f *FieldElement) { + h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) + + h0 += h0 + h1 += h1 + h2 += h2 + h3 += h3 + h4 += h4 + h5 += h5 + h6 += h6 + h7 += h7 + h8 += h8 + h9 += h9 + + FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) +} + +func FeInvert(out, z *FieldElement) { + var t0, t1, t2, t3 FieldElement + var i int + + FeSquare(&t0, z) // 2^1 + FeSquare(&t1, &t0) // 2^2 + for i = 1; i < 2; i++ { // 2^3 + FeSquare(&t1, &t1) + } + FeMul(&t1, z, &t1) // 2^3 + 2^0 + FeMul(&t0, &t0, &t1) // 2^3 + 2^1 + 2^0 + FeSquare(&t2, &t0) // 2^4 + 2^2 + 2^1 + FeMul(&t1, &t1, &t2) // 2^4 + 2^3 + 2^2 + 2^1 + 2^0 + FeSquare(&t2, &t1) // 5,4,3,2,1 + for i = 1; i < 5; i++ { // 9,8,7,6,5 + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) // 9,8,7,6,5,4,3,2,1,0 + FeSquare(&t2, &t1) // 10..1 + for i = 1; i < 10; i++ { // 19..10 + FeSquare(&t2, &t2) + } + FeMul(&t2, &t2, &t1) // 19..0 + FeSquare(&t3, &t2) // 20..1 + for i = 1; i < 20; i++ { // 39..20 + FeSquare(&t3, &t3) + } + FeMul(&t2, &t3, &t2) // 39..0 + FeSquare(&t2, &t2) // 40..1 + for i = 1; i < 10; i++ { // 49..10 + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) // 49..0 + FeSquare(&t2, &t1) // 50..1 + for i = 1; i < 50; i++ { // 99..50 + FeSquare(&t2, &t2) + } + FeMul(&t2, &t2, &t1) // 99..0 + FeSquare(&t3, &t2) // 100..1 + for i = 1; i < 100; i++ { // 199..100 + FeSquare(&t3, &t3) + } + FeMul(&t2, &t3, &t2) // 199..0 + FeSquare(&t2, &t2) // 200..1 + for i = 1; i < 50; i++ { // 249..50 + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) // 249..0 + FeSquare(&t1, &t1) // 250..1 + for i = 1; i < 5; i++ { // 254..5 + FeSquare(&t1, &t1) + } + FeMul(out, &t1, &t0) // 254..5,3,1,0 +} + +func fePow22523(out, z *FieldElement) { + var t0, t1, t2 FieldElement + var i int + + FeSquare(&t0, z) + for i = 1; i < 1; i++ { + FeSquare(&t0, &t0) + } + FeSquare(&t1, &t0) + for i = 1; i < 2; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t1, z, &t1) + FeMul(&t0, &t0, &t1) + FeSquare(&t0, &t0) + for i = 1; i < 1; i++ { + FeSquare(&t0, &t0) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t1, &t0) + for i = 1; i < 5; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t1, &t0) + for i = 1; i < 10; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t1, &t1, &t0) + FeSquare(&t2, &t1) + for i = 1; i < 20; i++ { + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) + FeSquare(&t1, &t1) + for i = 1; i < 10; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t1, &t0) + for i = 1; i < 50; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t1, &t1, &t0) + FeSquare(&t2, &t1) + for i = 1; i < 100; i++ { + FeSquare(&t2, &t2) + } + FeMul(&t1, &t2, &t1) + FeSquare(&t1, &t1) + for i = 1; i < 50; i++ { + FeSquare(&t1, &t1) + } + FeMul(&t0, &t1, &t0) + FeSquare(&t0, &t0) + for i = 1; i < 2; i++ { + FeSquare(&t0, &t0) + } + FeMul(out, &t0, z) +} + +// Group elements are members of the elliptic curve -x^2 + y^2 = 1 + d * x^2 * +// y^2 where d = -121665/121666. +// +// Several representations are used: +// ProjectiveGroupElement: (X:Y:Z) satisfying x=X/Z, y=Y/Z +// ExtendedGroupElement: (X:Y:Z:T) satisfying x=X/Z, y=Y/Z, XY=ZT +// CompletedGroupElement: ((X:Z),(Y:T)) satisfying x=X/Z, y=Y/T +// PreComputedGroupElement: (y+x,y-x,2dxy) + +type ProjectiveGroupElement struct { + X, Y, Z FieldElement +} + +type ExtendedGroupElement struct { + X, Y, Z, T FieldElement +} + +type CompletedGroupElement struct { + X, Y, Z, T FieldElement +} + +type PreComputedGroupElement struct { + yPlusX, yMinusX, xy2d FieldElement +} + +type CachedGroupElement struct { + yPlusX, yMinusX, Z, T2d FieldElement +} + +func (p *ProjectiveGroupElement) Zero() { + FeZero(&p.X) + FeOne(&p.Y) + FeOne(&p.Z) +} + +func (p *ProjectiveGroupElement) Double(r *CompletedGroupElement) { + var t0 FieldElement + + FeSquare(&r.X, &p.X) + FeSquare(&r.Z, &p.Y) + FeSquare2(&r.T, &p.Z) + FeAdd(&r.Y, &p.X, &p.Y) + FeSquare(&t0, &r.Y) + FeAdd(&r.Y, &r.Z, &r.X) + FeSub(&r.Z, &r.Z, &r.X) + FeSub(&r.X, &t0, &r.Y) + FeSub(&r.T, &r.T, &r.Z) +} + +func (p *ProjectiveGroupElement) ToBytes(s *[32]byte) { + var recip, x, y FieldElement + + FeInvert(&recip, &p.Z) + FeMul(&x, &p.X, &recip) + FeMul(&y, &p.Y, &recip) + FeToBytes(s, &y) + s[31] ^= FeIsNegative(&x) << 7 +} + +func (p *ExtendedGroupElement) Zero() { + FeZero(&p.X) + FeOne(&p.Y) + FeOne(&p.Z) + FeZero(&p.T) +} + +func (p *ExtendedGroupElement) Double(r *CompletedGroupElement) { + var q ProjectiveGroupElement + p.ToProjective(&q) + q.Double(r) +} + +func (p *ExtendedGroupElement) ToCached(r *CachedGroupElement) { + FeAdd(&r.yPlusX, &p.Y, &p.X) + FeSub(&r.yMinusX, &p.Y, &p.X) + FeCopy(&r.Z, &p.Z) + FeMul(&r.T2d, &p.T, &d2) +} + +func (p *ExtendedGroupElement) ToProjective(r *ProjectiveGroupElement) { + FeCopy(&r.X, &p.X) + FeCopy(&r.Y, &p.Y) + FeCopy(&r.Z, &p.Z) +} + +func (p *ExtendedGroupElement) ToBytes(s *[32]byte) { + var recip, x, y FieldElement + + FeInvert(&recip, &p.Z) + FeMul(&x, &p.X, &recip) + FeMul(&y, &p.Y, &recip) + FeToBytes(s, &y) + s[31] ^= FeIsNegative(&x) << 7 +} + +func (p *ExtendedGroupElement) FromBytes(s *[32]byte) bool { + var u, v, v3, vxx, check FieldElement + + FeFromBytes(&p.Y, s) + FeOne(&p.Z) + FeSquare(&u, &p.Y) + FeMul(&v, &u, &d) + FeSub(&u, &u, &p.Z) // y = y^2-1 + FeAdd(&v, &v, &p.Z) // v = dy^2+1 + + FeSquare(&v3, &v) + FeMul(&v3, &v3, &v) // v3 = v^3 + FeSquare(&p.X, &v3) + FeMul(&p.X, &p.X, &v) + FeMul(&p.X, &p.X, &u) // x = uv^7 + + fePow22523(&p.X, &p.X) // x = (uv^7)^((q-5)/8) + FeMul(&p.X, &p.X, &v3) + FeMul(&p.X, &p.X, &u) // x = uv^3(uv^7)^((q-5)/8) + + var tmpX, tmp2 [32]byte + + FeSquare(&vxx, &p.X) + FeMul(&vxx, &vxx, &v) + FeSub(&check, &vxx, &u) // vx^2-u + if FeIsNonZero(&check) == 1 { + FeAdd(&check, &vxx, &u) // vx^2+u + if FeIsNonZero(&check) == 1 { + return false + } + FeMul(&p.X, &p.X, &SqrtM1) + + FeToBytes(&tmpX, &p.X) + for i, v := range tmpX { + tmp2[31-i] = v + } + } + + if FeIsNegative(&p.X) != (s[31] >> 7) { + FeNeg(&p.X, &p.X) + } + + FeMul(&p.T, &p.X, &p.Y) + return true +} + +func (p *CompletedGroupElement) ToProjective(r *ProjectiveGroupElement) { + FeMul(&r.X, &p.X, &p.T) + FeMul(&r.Y, &p.Y, &p.Z) + FeMul(&r.Z, &p.Z, &p.T) +} + +func (p *CompletedGroupElement) ToExtended(r *ExtendedGroupElement) { + FeMul(&r.X, &p.X, &p.T) + FeMul(&r.Y, &p.Y, &p.Z) + FeMul(&r.Z, &p.Z, &p.T) + FeMul(&r.T, &p.X, &p.Y) +} + +func (p *PreComputedGroupElement) Zero() { + FeOne(&p.yPlusX) + FeOne(&p.yMinusX) + FeZero(&p.xy2d) +} + +func geAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yPlusX) + FeMul(&r.Y, &r.Y, &q.yMinusX) + FeMul(&r.T, &q.T2d, &p.T) + FeMul(&r.X, &p.Z, &q.Z) + FeAdd(&t0, &r.X, &r.X) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeAdd(&r.Z, &t0, &r.T) + FeSub(&r.T, &t0, &r.T) +} + +func geSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yMinusX) + FeMul(&r.Y, &r.Y, &q.yPlusX) + FeMul(&r.T, &q.T2d, &p.T) + FeMul(&r.X, &p.Z, &q.Z) + FeAdd(&t0, &r.X, &r.X) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeSub(&r.Z, &t0, &r.T) + FeAdd(&r.T, &t0, &r.T) +} + +func geMixedAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yPlusX) + FeMul(&r.Y, &r.Y, &q.yMinusX) + FeMul(&r.T, &q.xy2d, &p.T) + FeAdd(&t0, &p.Z, &p.Z) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeAdd(&r.Z, &t0, &r.T) + FeSub(&r.T, &t0, &r.T) +} + +func geMixedSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { + var t0 FieldElement + + FeAdd(&r.X, &p.Y, &p.X) + FeSub(&r.Y, &p.Y, &p.X) + FeMul(&r.Z, &r.X, &q.yMinusX) + FeMul(&r.Y, &r.Y, &q.yPlusX) + FeMul(&r.T, &q.xy2d, &p.T) + FeAdd(&t0, &p.Z, &p.Z) + FeSub(&r.X, &r.Z, &r.Y) + FeAdd(&r.Y, &r.Z, &r.Y) + FeSub(&r.Z, &t0, &r.T) + FeAdd(&r.T, &t0, &r.T) +} + +func slide(r *[256]int8, a *[32]byte) { + for i := range r { + r[i] = int8(1 & (a[i>>3] >> uint(i&7))) + } + + for i := range r { + if r[i] != 0 { + for b := 1; b <= 6 && i+b < 256; b++ { + if r[i+b] != 0 { + if r[i]+(r[i+b]<= -15 { + r[i] -= r[i+b] << uint(b) + for k := i + b; k < 256; k++ { + if r[k] == 0 { + r[k] = 1 + break + } + r[k] = 0 + } + } else { + break + } + } + } + } + } +} + +// GeDoubleScalarMultVartime sets r = a*A + b*B +// where a = a[0]+256*a[1]+...+256^31 a[31]. +// and b = b[0]+256*b[1]+...+256^31 b[31]. +// B is the Ed25519 base point (x,4/5) with x positive. +func GeDoubleScalarMultVartime(r *ProjectiveGroupElement, a *[32]byte, A *ExtendedGroupElement, b *[32]byte) { + var aSlide, bSlide [256]int8 + var Ai [8]CachedGroupElement // A,3A,5A,7A,9A,11A,13A,15A + var t CompletedGroupElement + var u, A2 ExtendedGroupElement + var i int + + slide(&aSlide, a) + slide(&bSlide, b) + + A.ToCached(&Ai[0]) + A.Double(&t) + t.ToExtended(&A2) + + for i := 0; i < 7; i++ { + geAdd(&t, &A2, &Ai[i]) + t.ToExtended(&u) + u.ToCached(&Ai[i+1]) + } + + r.Zero() + + for i = 255; i >= 0; i-- { + if aSlide[i] != 0 || bSlide[i] != 0 { + break + } + } + + for ; i >= 0; i-- { + r.Double(&t) + + if aSlide[i] > 0 { + t.ToExtended(&u) + geAdd(&t, &u, &Ai[aSlide[i]/2]) + } else if aSlide[i] < 0 { + t.ToExtended(&u) + geSub(&t, &u, &Ai[(-aSlide[i])/2]) + } + + if bSlide[i] > 0 { + t.ToExtended(&u) + geMixedAdd(&t, &u, &bi[bSlide[i]/2]) + } else if bSlide[i] < 0 { + t.ToExtended(&u) + geMixedSub(&t, &u, &bi[(-bSlide[i])/2]) + } + + t.ToProjective(r) + } +} + +// equal returns 1 if b == c and 0 otherwise, assuming that b and c are +// non-negative. +func equal(b, c int32) int32 { + x := uint32(b ^ c) + x-- + return int32(x >> 31) +} + +// negative returns 1 if b < 0 and 0 otherwise. +func negative(b int32) int32 { + return (b >> 31) & 1 +} + +func PreComputedGroupElementCMove(t, u *PreComputedGroupElement, b int32) { + FeCMove(&t.yPlusX, &u.yPlusX, b) + FeCMove(&t.yMinusX, &u.yMinusX, b) + FeCMove(&t.xy2d, &u.xy2d, b) +} + +func selectPoint(t *PreComputedGroupElement, pos int32, b int32) { + var minusT PreComputedGroupElement + bNegative := negative(b) + bAbs := b - (((-bNegative) & b) << 1) + + t.Zero() + for i := int32(0); i < 8; i++ { + PreComputedGroupElementCMove(t, &base[pos][i], equal(bAbs, i+1)) + } + FeCopy(&minusT.yPlusX, &t.yMinusX) + FeCopy(&minusT.yMinusX, &t.yPlusX) + FeNeg(&minusT.xy2d, &t.xy2d) + PreComputedGroupElementCMove(t, &minusT, bNegative) +} + +// GeScalarMultBase computes h = a*B, where +// a = a[0]+256*a[1]+...+256^31 a[31] +// B is the Ed25519 base point (x,4/5) with x positive. +// +// Preconditions: +// a[31] <= 127 +func GeScalarMultBase(h *ExtendedGroupElement, a *[32]byte) { + var e [64]int8 + + for i, v := range a { + e[2*i] = int8(v & 15) + e[2*i+1] = int8((v >> 4) & 15) + } + + // each e[i] is between 0 and 15 and e[63] is between 0 and 7. + + carry := int8(0) + for i := 0; i < 63; i++ { + e[i] += carry + carry = (e[i] + 8) >> 4 + e[i] -= carry << 4 + } + e[63] += carry + // each e[i] is between -8 and 8. + + h.Zero() + var t PreComputedGroupElement + var r CompletedGroupElement + for i := int32(1); i < 64; i += 2 { + selectPoint(&t, i/2, int32(e[i])) + geMixedAdd(&r, h, &t) + r.ToExtended(h) + } + + var s ProjectiveGroupElement + + h.Double(&r) + r.ToProjective(&s) + s.Double(&r) + r.ToProjective(&s) + s.Double(&r) + r.ToProjective(&s) + s.Double(&r) + r.ToExtended(h) + + for i := int32(0); i < 64; i += 2 { + selectPoint(&t, i/2, int32(e[i])) + geMixedAdd(&r, h, &t) + r.ToExtended(h) + } +} + +// The scalars are GF(2^252 + 27742317777372353535851937790883648493). + +// Input: +// a[0]+256*a[1]+...+256^31*a[31] = a +// b[0]+256*b[1]+...+256^31*b[31] = b +// c[0]+256*c[1]+...+256^31*c[31] = c +// +// Output: +// s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l +// where l = 2^252 + 27742317777372353535851937790883648493. +func ScMulAdd(s, a, b, c *[32]byte) { + a0 := 2097151 & load3(a[:]) + a1 := 2097151 & (load4(a[2:]) >> 5) + a2 := 2097151 & (load3(a[5:]) >> 2) + a3 := 2097151 & (load4(a[7:]) >> 7) + a4 := 2097151 & (load4(a[10:]) >> 4) + a5 := 2097151 & (load3(a[13:]) >> 1) + a6 := 2097151 & (load4(a[15:]) >> 6) + a7 := 2097151 & (load3(a[18:]) >> 3) + a8 := 2097151 & load3(a[21:]) + a9 := 2097151 & (load4(a[23:]) >> 5) + a10 := 2097151 & (load3(a[26:]) >> 2) + a11 := (load4(a[28:]) >> 7) + b0 := 2097151 & load3(b[:]) + b1 := 2097151 & (load4(b[2:]) >> 5) + b2 := 2097151 & (load3(b[5:]) >> 2) + b3 := 2097151 & (load4(b[7:]) >> 7) + b4 := 2097151 & (load4(b[10:]) >> 4) + b5 := 2097151 & (load3(b[13:]) >> 1) + b6 := 2097151 & (load4(b[15:]) >> 6) + b7 := 2097151 & (load3(b[18:]) >> 3) + b8 := 2097151 & load3(b[21:]) + b9 := 2097151 & (load4(b[23:]) >> 5) + b10 := 2097151 & (load3(b[26:]) >> 2) + b11 := (load4(b[28:]) >> 7) + c0 := 2097151 & load3(c[:]) + c1 := 2097151 & (load4(c[2:]) >> 5) + c2 := 2097151 & (load3(c[5:]) >> 2) + c3 := 2097151 & (load4(c[7:]) >> 7) + c4 := 2097151 & (load4(c[10:]) >> 4) + c5 := 2097151 & (load3(c[13:]) >> 1) + c6 := 2097151 & (load4(c[15:]) >> 6) + c7 := 2097151 & (load3(c[18:]) >> 3) + c8 := 2097151 & load3(c[21:]) + c9 := 2097151 & (load4(c[23:]) >> 5) + c10 := 2097151 & (load3(c[26:]) >> 2) + c11 := (load4(c[28:]) >> 7) + var carry [23]int64 + + s0 := c0 + a0*b0 + s1 := c1 + a0*b1 + a1*b0 + s2 := c2 + a0*b2 + a1*b1 + a2*b0 + s3 := c3 + a0*b3 + a1*b2 + a2*b1 + a3*b0 + s4 := c4 + a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0 + s5 := c5 + a0*b5 + a1*b4 + a2*b3 + a3*b2 + a4*b1 + a5*b0 + s6 := c6 + a0*b6 + a1*b5 + a2*b4 + a3*b3 + a4*b2 + a5*b1 + a6*b0 + s7 := c7 + a0*b7 + a1*b6 + a2*b5 + a3*b4 + a4*b3 + a5*b2 + a6*b1 + a7*b0 + s8 := c8 + a0*b8 + a1*b7 + a2*b6 + a3*b5 + a4*b4 + a5*b3 + a6*b2 + a7*b1 + a8*b0 + s9 := c9 + a0*b9 + a1*b8 + a2*b7 + a3*b6 + a4*b5 + a5*b4 + a6*b3 + a7*b2 + a8*b1 + a9*b0 + s10 := c10 + a0*b10 + a1*b9 + a2*b8 + a3*b7 + a4*b6 + a5*b5 + a6*b4 + a7*b3 + a8*b2 + a9*b1 + a10*b0 + s11 := c11 + a0*b11 + a1*b10 + a2*b9 + a3*b8 + a4*b7 + a5*b6 + a6*b5 + a7*b4 + a8*b3 + a9*b2 + a10*b1 + a11*b0 + s12 := a1*b11 + a2*b10 + a3*b9 + a4*b8 + a5*b7 + a6*b6 + a7*b5 + a8*b4 + a9*b3 + a10*b2 + a11*b1 + s13 := a2*b11 + a3*b10 + a4*b9 + a5*b8 + a6*b7 + a7*b6 + a8*b5 + a9*b4 + a10*b3 + a11*b2 + s14 := a3*b11 + a4*b10 + a5*b9 + a6*b8 + a7*b7 + a8*b6 + a9*b5 + a10*b4 + a11*b3 + s15 := a4*b11 + a5*b10 + a6*b9 + a7*b8 + a8*b7 + a9*b6 + a10*b5 + a11*b4 + s16 := a5*b11 + a6*b10 + a7*b9 + a8*b8 + a9*b7 + a10*b6 + a11*b5 + s17 := a6*b11 + a7*b10 + a8*b9 + a9*b8 + a10*b7 + a11*b6 + s18 := a7*b11 + a8*b10 + a9*b9 + a10*b8 + a11*b7 + s19 := a8*b11 + a9*b10 + a10*b9 + a11*b8 + s20 := a9*b11 + a10*b10 + a11*b9 + s21 := a10*b11 + a11*b10 + s22 := a11 * b11 + s23 := int64(0) + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + carry[18] = (s18 + (1 << 20)) >> 21 + s19 += carry[18] + s18 -= carry[18] << 21 + carry[20] = (s20 + (1 << 20)) >> 21 + s21 += carry[20] + s20 -= carry[20] << 21 + carry[22] = (s22 + (1 << 20)) >> 21 + s23 += carry[22] + s22 -= carry[22] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + carry[17] = (s17 + (1 << 20)) >> 21 + s18 += carry[17] + s17 -= carry[17] << 21 + carry[19] = (s19 + (1 << 20)) >> 21 + s20 += carry[19] + s19 -= carry[19] << 21 + carry[21] = (s21 + (1 << 20)) >> 21 + s22 += carry[21] + s21 -= carry[21] << 21 + + s11 += s23 * 666643 + s12 += s23 * 470296 + s13 += s23 * 654183 + s14 -= s23 * 997805 + s15 += s23 * 136657 + s16 -= s23 * 683901 + s23 = 0 + + s10 += s22 * 666643 + s11 += s22 * 470296 + s12 += s22 * 654183 + s13 -= s22 * 997805 + s14 += s22 * 136657 + s15 -= s22 * 683901 + s22 = 0 + + s9 += s21 * 666643 + s10 += s21 * 470296 + s11 += s21 * 654183 + s12 -= s21 * 997805 + s13 += s21 * 136657 + s14 -= s21 * 683901 + s21 = 0 + + s8 += s20 * 666643 + s9 += s20 * 470296 + s10 += s20 * 654183 + s11 -= s20 * 997805 + s12 += s20 * 136657 + s13 -= s20 * 683901 + s20 = 0 + + s7 += s19 * 666643 + s8 += s19 * 470296 + s9 += s19 * 654183 + s10 -= s19 * 997805 + s11 += s19 * 136657 + s12 -= s19 * 683901 + s19 = 0 + + s6 += s18 * 666643 + s7 += s18 * 470296 + s8 += s18 * 654183 + s9 -= s18 * 997805 + s10 += s18 * 136657 + s11 -= s18 * 683901 + s18 = 0 + + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + + s5 += s17 * 666643 + s6 += s17 * 470296 + s7 += s17 * 654183 + s8 -= s17 * 997805 + s9 += s17 * 136657 + s10 -= s17 * 683901 + s17 = 0 + + s4 += s16 * 666643 + s5 += s16 * 470296 + s6 += s16 * 654183 + s7 -= s16 * 997805 + s8 += s16 * 136657 + s9 -= s16 * 683901 + s16 = 0 + + s3 += s15 * 666643 + s4 += s15 * 470296 + s5 += s15 * 654183 + s6 -= s15 * 997805 + s7 += s15 * 136657 + s8 -= s15 * 683901 + s15 = 0 + + s2 += s14 * 666643 + s3 += s14 * 470296 + s4 += s14 * 654183 + s5 -= s14 * 997805 + s6 += s14 * 136657 + s7 -= s14 * 683901 + s14 = 0 + + s1 += s13 * 666643 + s2 += s13 * 470296 + s3 += s13 * 654183 + s4 -= s13 * 997805 + s5 += s13 * 136657 + s6 -= s13 * 683901 + s13 = 0 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[11] = s11 >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + s[0] = byte(s0 >> 0) + s[1] = byte(s0 >> 8) + s[2] = byte((s0 >> 16) | (s1 << 5)) + s[3] = byte(s1 >> 3) + s[4] = byte(s1 >> 11) + s[5] = byte((s1 >> 19) | (s2 << 2)) + s[6] = byte(s2 >> 6) + s[7] = byte((s2 >> 14) | (s3 << 7)) + s[8] = byte(s3 >> 1) + s[9] = byte(s3 >> 9) + s[10] = byte((s3 >> 17) | (s4 << 4)) + s[11] = byte(s4 >> 4) + s[12] = byte(s4 >> 12) + s[13] = byte((s4 >> 20) | (s5 << 1)) + s[14] = byte(s5 >> 7) + s[15] = byte((s5 >> 15) | (s6 << 6)) + s[16] = byte(s6 >> 2) + s[17] = byte(s6 >> 10) + s[18] = byte((s6 >> 18) | (s7 << 3)) + s[19] = byte(s7 >> 5) + s[20] = byte(s7 >> 13) + s[21] = byte(s8 >> 0) + s[22] = byte(s8 >> 8) + s[23] = byte((s8 >> 16) | (s9 << 5)) + s[24] = byte(s9 >> 3) + s[25] = byte(s9 >> 11) + s[26] = byte((s9 >> 19) | (s10 << 2)) + s[27] = byte(s10 >> 6) + s[28] = byte((s10 >> 14) | (s11 << 7)) + s[29] = byte(s11 >> 1) + s[30] = byte(s11 >> 9) + s[31] = byte(s11 >> 17) +} + +// Input: +// s[0]+256*s[1]+...+256^63*s[63] = s +// +// Output: +// s[0]+256*s[1]+...+256^31*s[31] = s mod l +// where l = 2^252 + 27742317777372353535851937790883648493. +func ScReduce(out *[32]byte, s *[64]byte) { + s0 := 2097151 & load3(s[:]) + s1 := 2097151 & (load4(s[2:]) >> 5) + s2 := 2097151 & (load3(s[5:]) >> 2) + s3 := 2097151 & (load4(s[7:]) >> 7) + s4 := 2097151 & (load4(s[10:]) >> 4) + s5 := 2097151 & (load3(s[13:]) >> 1) + s6 := 2097151 & (load4(s[15:]) >> 6) + s7 := 2097151 & (load3(s[18:]) >> 3) + s8 := 2097151 & load3(s[21:]) + s9 := 2097151 & (load4(s[23:]) >> 5) + s10 := 2097151 & (load3(s[26:]) >> 2) + s11 := 2097151 & (load4(s[28:]) >> 7) + s12 := 2097151 & (load4(s[31:]) >> 4) + s13 := 2097151 & (load3(s[34:]) >> 1) + s14 := 2097151 & (load4(s[36:]) >> 6) + s15 := 2097151 & (load3(s[39:]) >> 3) + s16 := 2097151 & load3(s[42:]) + s17 := 2097151 & (load4(s[44:]) >> 5) + s18 := 2097151 & (load3(s[47:]) >> 2) + s19 := 2097151 & (load4(s[49:]) >> 7) + s20 := 2097151 & (load4(s[52:]) >> 4) + s21 := 2097151 & (load3(s[55:]) >> 1) + s22 := 2097151 & (load4(s[57:]) >> 6) + s23 := (load4(s[60:]) >> 3) + + s11 += s23 * 666643 + s12 += s23 * 470296 + s13 += s23 * 654183 + s14 -= s23 * 997805 + s15 += s23 * 136657 + s16 -= s23 * 683901 + s23 = 0 + + s10 += s22 * 666643 + s11 += s22 * 470296 + s12 += s22 * 654183 + s13 -= s22 * 997805 + s14 += s22 * 136657 + s15 -= s22 * 683901 + s22 = 0 + + s9 += s21 * 666643 + s10 += s21 * 470296 + s11 += s21 * 654183 + s12 -= s21 * 997805 + s13 += s21 * 136657 + s14 -= s21 * 683901 + s21 = 0 + + s8 += s20 * 666643 + s9 += s20 * 470296 + s10 += s20 * 654183 + s11 -= s20 * 997805 + s12 += s20 * 136657 + s13 -= s20 * 683901 + s20 = 0 + + s7 += s19 * 666643 + s8 += s19 * 470296 + s9 += s19 * 654183 + s10 -= s19 * 997805 + s11 += s19 * 136657 + s12 -= s19 * 683901 + s19 = 0 + + s6 += s18 * 666643 + s7 += s18 * 470296 + s8 += s18 * 654183 + s9 -= s18 * 997805 + s10 += s18 * 136657 + s11 -= s18 * 683901 + s18 = 0 + + var carry [17]int64 + + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[12] = (s12 + (1 << 20)) >> 21 + s13 += carry[12] + s12 -= carry[12] << 21 + carry[14] = (s14 + (1 << 20)) >> 21 + s15 += carry[14] + s14 -= carry[14] << 21 + carry[16] = (s16 + (1 << 20)) >> 21 + s17 += carry[16] + s16 -= carry[16] << 21 + + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + carry[13] = (s13 + (1 << 20)) >> 21 + s14 += carry[13] + s13 -= carry[13] << 21 + carry[15] = (s15 + (1 << 20)) >> 21 + s16 += carry[15] + s15 -= carry[15] << 21 + + s5 += s17 * 666643 + s6 += s17 * 470296 + s7 += s17 * 654183 + s8 -= s17 * 997805 + s9 += s17 * 136657 + s10 -= s17 * 683901 + s17 = 0 + + s4 += s16 * 666643 + s5 += s16 * 470296 + s6 += s16 * 654183 + s7 -= s16 * 997805 + s8 += s16 * 136657 + s9 -= s16 * 683901 + s16 = 0 + + s3 += s15 * 666643 + s4 += s15 * 470296 + s5 += s15 * 654183 + s6 -= s15 * 997805 + s7 += s15 * 136657 + s8 -= s15 * 683901 + s15 = 0 + + s2 += s14 * 666643 + s3 += s14 * 470296 + s4 += s14 * 654183 + s5 -= s14 * 997805 + s6 += s14 * 136657 + s7 -= s14 * 683901 + s14 = 0 + + s1 += s13 * 666643 + s2 += s13 * 470296 + s3 += s13 * 654183 + s4 -= s13 * 997805 + s5 += s13 * 136657 + s6 -= s13 * 683901 + s13 = 0 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = (s0 + (1 << 20)) >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[2] = (s2 + (1 << 20)) >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[4] = (s4 + (1 << 20)) >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[6] = (s6 + (1 << 20)) >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[8] = (s8 + (1 << 20)) >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[10] = (s10 + (1 << 20)) >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + carry[1] = (s1 + (1 << 20)) >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[3] = (s3 + (1 << 20)) >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[5] = (s5 + (1 << 20)) >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[7] = (s7 + (1 << 20)) >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[9] = (s9 + (1 << 20)) >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[11] = (s11 + (1 << 20)) >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + carry[11] = s11 >> 21 + s12 += carry[11] + s11 -= carry[11] << 21 + + s0 += s12 * 666643 + s1 += s12 * 470296 + s2 += s12 * 654183 + s3 -= s12 * 997805 + s4 += s12 * 136657 + s5 -= s12 * 683901 + s12 = 0 + + carry[0] = s0 >> 21 + s1 += carry[0] + s0 -= carry[0] << 21 + carry[1] = s1 >> 21 + s2 += carry[1] + s1 -= carry[1] << 21 + carry[2] = s2 >> 21 + s3 += carry[2] + s2 -= carry[2] << 21 + carry[3] = s3 >> 21 + s4 += carry[3] + s3 -= carry[3] << 21 + carry[4] = s4 >> 21 + s5 += carry[4] + s4 -= carry[4] << 21 + carry[5] = s5 >> 21 + s6 += carry[5] + s5 -= carry[5] << 21 + carry[6] = s6 >> 21 + s7 += carry[6] + s6 -= carry[6] << 21 + carry[7] = s7 >> 21 + s8 += carry[7] + s7 -= carry[7] << 21 + carry[8] = s8 >> 21 + s9 += carry[8] + s8 -= carry[8] << 21 + carry[9] = s9 >> 21 + s10 += carry[9] + s9 -= carry[9] << 21 + carry[10] = s10 >> 21 + s11 += carry[10] + s10 -= carry[10] << 21 + + out[0] = byte(s0 >> 0) + out[1] = byte(s0 >> 8) + out[2] = byte((s0 >> 16) | (s1 << 5)) + out[3] = byte(s1 >> 3) + out[4] = byte(s1 >> 11) + out[5] = byte((s1 >> 19) | (s2 << 2)) + out[6] = byte(s2 >> 6) + out[7] = byte((s2 >> 14) | (s3 << 7)) + out[8] = byte(s3 >> 1) + out[9] = byte(s3 >> 9) + out[10] = byte((s3 >> 17) | (s4 << 4)) + out[11] = byte(s4 >> 4) + out[12] = byte(s4 >> 12) + out[13] = byte((s4 >> 20) | (s5 << 1)) + out[14] = byte(s5 >> 7) + out[15] = byte((s5 >> 15) | (s6 << 6)) + out[16] = byte(s6 >> 2) + out[17] = byte(s6 >> 10) + out[18] = byte((s6 >> 18) | (s7 << 3)) + out[19] = byte(s7 >> 5) + out[20] = byte(s7 >> 13) + out[21] = byte(s8 >> 0) + out[22] = byte(s8 >> 8) + out[23] = byte((s8 >> 16) | (s9 << 5)) + out[24] = byte(s9 >> 3) + out[25] = byte(s9 >> 11) + out[26] = byte((s9 >> 19) | (s10 << 2)) + out[27] = byte(s10 >> 6) + out[28] = byte((s10 >> 14) | (s11 << 7)) + out[29] = byte(s11 >> 1) + out[30] = byte(s11 >> 9) + out[31] = byte(s11 >> 17) +} + +// order is the order of Curve25519 in little-endian form. +var order = [4]uint64{0x5812631a5cf5d3ed, 0x14def9dea2f79cd6, 0, 0x1000000000000000} + +// ScMinimal returns true if the given scalar is less than the order of the +// curve. +func ScMinimal(scalar *[32]byte) bool { + for i := 3; ; i-- { + v := binary.LittleEndian.Uint64(scalar[i*8:]) + if v > order[i] { + return false + } else if v < order[i] { + break + } else if i == 0 { + return false + } + } + + return true +} diff --git a/vendor/golang.org/x/crypto/openpgp/armor/armor.go b/vendor/golang.org/x/crypto/openpgp/armor/armor.go deleted file mode 100644 index 592d186436..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/armor/armor.go +++ /dev/null @@ -1,219 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is -// very similar to PEM except that it has an additional CRC checksum. -package armor // import "golang.org/x/crypto/openpgp/armor" - -import ( - "bufio" - "bytes" - "encoding/base64" - "golang.org/x/crypto/openpgp/errors" - "io" -) - -// A Block represents an OpenPGP armored structure. -// -// The encoded form is: -// -----BEGIN Type----- -// Headers -// -// base64-encoded Bytes -// '=' base64 encoded checksum -// -----END Type----- -// where Headers is a possibly empty sequence of Key: Value lines. -// -// Since the armored data can be very large, this package presents a streaming -// interface. -type Block struct { - Type string // The type, taken from the preamble (i.e. "PGP SIGNATURE"). - Header map[string]string // Optional headers. - Body io.Reader // A Reader from which the contents can be read - lReader lineReader - oReader openpgpReader -} - -var ArmorCorrupt error = errors.StructuralError("armor invalid") - -const crc24Init = 0xb704ce -const crc24Poly = 0x1864cfb -const crc24Mask = 0xffffff - -// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1 -func crc24(crc uint32, d []byte) uint32 { - for _, b := range d { - crc ^= uint32(b) << 16 - for i := 0; i < 8; i++ { - crc <<= 1 - if crc&0x1000000 != 0 { - crc ^= crc24Poly - } - } - } - return crc -} - -var armorStart = []byte("-----BEGIN ") -var armorEnd = []byte("-----END ") -var armorEndOfLine = []byte("-----") - -// lineReader wraps a line based reader. It watches for the end of an armor -// block and records the expected CRC value. -type lineReader struct { - in *bufio.Reader - buf []byte - eof bool - crc uint32 -} - -func (l *lineReader) Read(p []byte) (n int, err error) { - if l.eof { - return 0, io.EOF - } - - if len(l.buf) > 0 { - n = copy(p, l.buf) - l.buf = l.buf[n:] - return - } - - line, isPrefix, err := l.in.ReadLine() - if err != nil { - return - } - if isPrefix { - return 0, ArmorCorrupt - } - - if len(line) == 5 && line[0] == '=' { - // This is the checksum line - var expectedBytes [3]byte - var m int - m, err = base64.StdEncoding.Decode(expectedBytes[0:], line[1:]) - if m != 3 || err != nil { - return - } - l.crc = uint32(expectedBytes[0])<<16 | - uint32(expectedBytes[1])<<8 | - uint32(expectedBytes[2]) - - line, _, err = l.in.ReadLine() - if err != nil && err != io.EOF { - return - } - if !bytes.HasPrefix(line, armorEnd) { - return 0, ArmorCorrupt - } - - l.eof = true - return 0, io.EOF - } - - if len(line) > 96 { - return 0, ArmorCorrupt - } - - n = copy(p, line) - bytesToSave := len(line) - n - if bytesToSave > 0 { - if cap(l.buf) < bytesToSave { - l.buf = make([]byte, 0, bytesToSave) - } - l.buf = l.buf[0:bytesToSave] - copy(l.buf, line[n:]) - } - - return -} - -// openpgpReader passes Read calls to the underlying base64 decoder, but keeps -// a running CRC of the resulting data and checks the CRC against the value -// found by the lineReader at EOF. -type openpgpReader struct { - lReader *lineReader - b64Reader io.Reader - currentCRC uint32 -} - -func (r *openpgpReader) Read(p []byte) (n int, err error) { - n, err = r.b64Reader.Read(p) - r.currentCRC = crc24(r.currentCRC, p[:n]) - - if err == io.EOF { - if r.lReader.crc != uint32(r.currentCRC&crc24Mask) { - return 0, ArmorCorrupt - } - } - - return -} - -// Decode reads a PGP armored block from the given Reader. It will ignore -// leading garbage. If it doesn't find a block, it will return nil, io.EOF. The -// given Reader is not usable after calling this function: an arbitrary amount -// of data may have been read past the end of the block. -func Decode(in io.Reader) (p *Block, err error) { - r := bufio.NewReaderSize(in, 100) - var line []byte - ignoreNext := false - -TryNextBlock: - p = nil - - // Skip leading garbage - for { - ignoreThis := ignoreNext - line, ignoreNext, err = r.ReadLine() - if err != nil { - return - } - if ignoreNext || ignoreThis { - continue - } - line = bytes.TrimSpace(line) - if len(line) > len(armorStart)+len(armorEndOfLine) && bytes.HasPrefix(line, armorStart) { - break - } - } - - p = new(Block) - p.Type = string(line[len(armorStart) : len(line)-len(armorEndOfLine)]) - p.Header = make(map[string]string) - nextIsContinuation := false - var lastKey string - - // Read headers - for { - isContinuation := nextIsContinuation - line, nextIsContinuation, err = r.ReadLine() - if err != nil { - p = nil - return - } - if isContinuation { - p.Header[lastKey] += string(line) - continue - } - line = bytes.TrimSpace(line) - if len(line) == 0 { - break - } - - i := bytes.Index(line, []byte(": ")) - if i == -1 { - goto TryNextBlock - } - lastKey = string(line[:i]) - p.Header[lastKey] = string(line[i+2:]) - } - - p.lReader.in = r - p.oReader.currentCRC = crc24Init - p.oReader.lReader = &p.lReader - p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader) - p.Body = &p.oReader - - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/armor/encode.go b/vendor/golang.org/x/crypto/openpgp/armor/encode.go deleted file mode 100644 index 6f07582c37..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/armor/encode.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package armor - -import ( - "encoding/base64" - "io" -) - -var armorHeaderSep = []byte(": ") -var blockEnd = []byte("\n=") -var newline = []byte("\n") -var armorEndOfLineOut = []byte("-----\n") - -// writeSlices writes its arguments to the given Writer. -func writeSlices(out io.Writer, slices ...[]byte) (err error) { - for _, s := range slices { - _, err = out.Write(s) - if err != nil { - return err - } - } - return -} - -// lineBreaker breaks data across several lines, all of the same byte length -// (except possibly the last). Lines are broken with a single '\n'. -type lineBreaker struct { - lineLength int - line []byte - used int - out io.Writer - haveWritten bool -} - -func newLineBreaker(out io.Writer, lineLength int) *lineBreaker { - return &lineBreaker{ - lineLength: lineLength, - line: make([]byte, lineLength), - used: 0, - out: out, - } -} - -func (l *lineBreaker) Write(b []byte) (n int, err error) { - n = len(b) - - if n == 0 { - return - } - - if l.used == 0 && l.haveWritten { - _, err = l.out.Write([]byte{'\n'}) - if err != nil { - return - } - } - - if l.used+len(b) < l.lineLength { - l.used += copy(l.line[l.used:], b) - return - } - - l.haveWritten = true - _, err = l.out.Write(l.line[0:l.used]) - if err != nil { - return - } - excess := l.lineLength - l.used - l.used = 0 - - _, err = l.out.Write(b[0:excess]) - if err != nil { - return - } - - _, err = l.Write(b[excess:]) - return -} - -func (l *lineBreaker) Close() (err error) { - if l.used > 0 { - _, err = l.out.Write(l.line[0:l.used]) - if err != nil { - return - } - } - - return -} - -// encoding keeps track of a running CRC24 over the data which has been written -// to it and outputs a OpenPGP checksum when closed, followed by an armor -// trailer. -// -// It's built into a stack of io.Writers: -// encoding -> base64 encoder -> lineBreaker -> out -type encoding struct { - out io.Writer - breaker *lineBreaker - b64 io.WriteCloser - crc uint32 - blockType []byte -} - -func (e *encoding) Write(data []byte) (n int, err error) { - e.crc = crc24(e.crc, data) - return e.b64.Write(data) -} - -func (e *encoding) Close() (err error) { - err = e.b64.Close() - if err != nil { - return - } - e.breaker.Close() - - var checksumBytes [3]byte - checksumBytes[0] = byte(e.crc >> 16) - checksumBytes[1] = byte(e.crc >> 8) - checksumBytes[2] = byte(e.crc) - - var b64ChecksumBytes [4]byte - base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:]) - - return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine) -} - -// Encode returns a WriteCloser which will encode the data written to it in -// OpenPGP armor. -func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) { - bType := []byte(blockType) - err = writeSlices(out, armorStart, bType, armorEndOfLineOut) - if err != nil { - return - } - - for k, v := range headers { - err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline) - if err != nil { - return - } - } - - _, err = out.Write(newline) - if err != nil { - return - } - - e := &encoding{ - out: out, - breaker: newLineBreaker(out, 64), - crc: crc24Init, - blockType: bType, - } - e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker) - return e, nil -} diff --git a/vendor/golang.org/x/crypto/openpgp/canonical_text.go b/vendor/golang.org/x/crypto/openpgp/canonical_text.go deleted file mode 100644 index e601e389f1..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/canonical_text.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package openpgp - -import "hash" - -// NewCanonicalTextHash reformats text written to it into the canonical -// form and then applies the hash h. See RFC 4880, section 5.2.1. -func NewCanonicalTextHash(h hash.Hash) hash.Hash { - return &canonicalTextHash{h, 0} -} - -type canonicalTextHash struct { - h hash.Hash - s int -} - -var newline = []byte{'\r', '\n'} - -func (cth *canonicalTextHash) Write(buf []byte) (int, error) { - start := 0 - - for i, c := range buf { - switch cth.s { - case 0: - if c == '\r' { - cth.s = 1 - } else if c == '\n' { - cth.h.Write(buf[start:i]) - cth.h.Write(newline) - start = i + 1 - } - case 1: - cth.s = 0 - } - } - - cth.h.Write(buf[start:]) - return len(buf), nil -} - -func (cth *canonicalTextHash) Sum(in []byte) []byte { - return cth.h.Sum(in) -} - -func (cth *canonicalTextHash) Reset() { - cth.h.Reset() - cth.s = 0 -} - -func (cth *canonicalTextHash) Size() int { - return cth.h.Size() -} - -func (cth *canonicalTextHash) BlockSize() int { - return cth.h.BlockSize() -} diff --git a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go b/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go deleted file mode 100644 index 73f4fe3785..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package elgamal implements ElGamal encryption, suitable for OpenPGP, -// as specified in "A Public-Key Cryptosystem and a Signature Scheme Based on -// Discrete Logarithms," IEEE Transactions on Information Theory, v. IT-31, -// n. 4, 1985, pp. 469-472. -// -// This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it -// unsuitable for other protocols. RSA should be used in preference in any -// case. -package elgamal // import "golang.org/x/crypto/openpgp/elgamal" - -import ( - "crypto/rand" - "crypto/subtle" - "errors" - "io" - "math/big" -) - -// PublicKey represents an ElGamal public key. -type PublicKey struct { - G, P, Y *big.Int -} - -// PrivateKey represents an ElGamal private key. -type PrivateKey struct { - PublicKey - X *big.Int -} - -// Encrypt encrypts the given message to the given public key. The result is a -// pair of integers. Errors can result from reading random, or because msg is -// too large to be encrypted to the public key. -func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err error) { - pLen := (pub.P.BitLen() + 7) / 8 - if len(msg) > pLen-11 { - err = errors.New("elgamal: message too long") - return - } - - // EM = 0x02 || PS || 0x00 || M - em := make([]byte, pLen-1) - em[0] = 2 - ps, mm := em[1:len(em)-len(msg)-1], em[len(em)-len(msg):] - err = nonZeroRandomBytes(ps, random) - if err != nil { - return - } - em[len(em)-len(msg)-1] = 0 - copy(mm, msg) - - m := new(big.Int).SetBytes(em) - - k, err := rand.Int(random, pub.P) - if err != nil { - return - } - - c1 = new(big.Int).Exp(pub.G, k, pub.P) - s := new(big.Int).Exp(pub.Y, k, pub.P) - c2 = s.Mul(s, m) - c2.Mod(c2, pub.P) - - return -} - -// Decrypt takes two integers, resulting from an ElGamal encryption, and -// returns the plaintext of the message. An error can result only if the -// ciphertext is invalid. Users should keep in mind that this is a padding -// oracle and thus, if exposed to an adaptive chosen ciphertext attack, can -// be used to break the cryptosystem. See ``Chosen Ciphertext Attacks -// Against Protocols Based on the RSA Encryption Standard PKCS #1'', Daniel -// Bleichenbacher, Advances in Cryptology (Crypto '98), -func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) { - s := new(big.Int).Exp(c1, priv.X, priv.P) - s.ModInverse(s, priv.P) - s.Mul(s, c2) - s.Mod(s, priv.P) - em := s.Bytes() - - firstByteIsTwo := subtle.ConstantTimeByteEq(em[0], 2) - - // The remainder of the plaintext must be a string of non-zero random - // octets, followed by a 0, followed by the message. - // lookingForIndex: 1 iff we are still looking for the zero. - // index: the offset of the first zero byte. - var lookingForIndex, index int - lookingForIndex = 1 - - for i := 1; i < len(em); i++ { - equals0 := subtle.ConstantTimeByteEq(em[i], 0) - index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index) - lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex) - } - - if firstByteIsTwo != 1 || lookingForIndex != 0 || index < 9 { - return nil, errors.New("elgamal: decryption error") - } - return em[index+1:], nil -} - -// nonZeroRandomBytes fills the given slice with non-zero random octets. -func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) { - _, err = io.ReadFull(rand, s) - if err != nil { - return - } - - for i := 0; i < len(s); i++ { - for s[i] == 0 { - _, err = io.ReadFull(rand, s[i:i+1]) - if err != nil { - return - } - } - } - - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/errors/errors.go b/vendor/golang.org/x/crypto/openpgp/errors/errors.go deleted file mode 100644 index eb0550b2d0..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/errors/errors.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package errors contains common error types for the OpenPGP packages. -package errors // import "golang.org/x/crypto/openpgp/errors" - -import ( - "strconv" -) - -// A StructuralError is returned when OpenPGP data is found to be syntactically -// invalid. -type StructuralError string - -func (s StructuralError) Error() string { - return "openpgp: invalid data: " + string(s) -} - -// UnsupportedError indicates that, although the OpenPGP data is valid, it -// makes use of currently unimplemented features. -type UnsupportedError string - -func (s UnsupportedError) Error() string { - return "openpgp: unsupported feature: " + string(s) -} - -// InvalidArgumentError indicates that the caller is in error and passed an -// incorrect value. -type InvalidArgumentError string - -func (i InvalidArgumentError) Error() string { - return "openpgp: invalid argument: " + string(i) -} - -// SignatureError indicates that a syntactically valid signature failed to -// validate. -type SignatureError string - -func (b SignatureError) Error() string { - return "openpgp: invalid signature: " + string(b) -} - -type keyIncorrectError int - -func (ki keyIncorrectError) Error() string { - return "openpgp: incorrect key" -} - -var ErrKeyIncorrect error = keyIncorrectError(0) - -type unknownIssuerError int - -func (unknownIssuerError) Error() string { - return "openpgp: signature made by unknown entity" -} - -var ErrUnknownIssuer error = unknownIssuerError(0) - -type keyRevokedError int - -func (keyRevokedError) Error() string { - return "openpgp: signature made by revoked key" -} - -var ErrKeyRevoked error = keyRevokedError(0) - -type UnknownPacketTypeError uint8 - -func (upte UnknownPacketTypeError) Error() string { - return "openpgp: unknown packet type: " + strconv.Itoa(int(upte)) -} diff --git a/vendor/golang.org/x/crypto/openpgp/keys.go b/vendor/golang.org/x/crypto/openpgp/keys.go deleted file mode 100644 index fd582a89c0..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/keys.go +++ /dev/null @@ -1,641 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package openpgp - -import ( - "crypto/rsa" - "io" - "time" - - "golang.org/x/crypto/openpgp/armor" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/packet" -) - -// PublicKeyType is the armor type for a PGP public key. -var PublicKeyType = "PGP PUBLIC KEY BLOCK" - -// PrivateKeyType is the armor type for a PGP private key. -var PrivateKeyType = "PGP PRIVATE KEY BLOCK" - -// An Entity represents the components of an OpenPGP key: a primary public key -// (which must be a signing key), one or more identities claimed by that key, -// and zero or more subkeys, which may be encryption keys. -type Entity struct { - PrimaryKey *packet.PublicKey - PrivateKey *packet.PrivateKey - Identities map[string]*Identity // indexed by Identity.Name - Revocations []*packet.Signature - Subkeys []Subkey -} - -// An Identity represents an identity claimed by an Entity and zero or more -// assertions by other entities about that claim. -type Identity struct { - Name string // by convention, has the form "Full Name (comment) " - UserId *packet.UserId - SelfSignature *packet.Signature - Signatures []*packet.Signature -} - -// A Subkey is an additional public key in an Entity. Subkeys can be used for -// encryption. -type Subkey struct { - PublicKey *packet.PublicKey - PrivateKey *packet.PrivateKey - Sig *packet.Signature -} - -// A Key identifies a specific public key in an Entity. This is either the -// Entity's primary key or a subkey. -type Key struct { - Entity *Entity - PublicKey *packet.PublicKey - PrivateKey *packet.PrivateKey - SelfSignature *packet.Signature -} - -// A KeyRing provides access to public and private keys. -type KeyRing interface { - // KeysById returns the set of keys that have the given key id. - KeysById(id uint64) []Key - // KeysByIdAndUsage returns the set of keys with the given id - // that also meet the key usage given by requiredUsage. - // The requiredUsage is expressed as the bitwise-OR of - // packet.KeyFlag* values. - KeysByIdUsage(id uint64, requiredUsage byte) []Key - // DecryptionKeys returns all private keys that are valid for - // decryption. - DecryptionKeys() []Key -} - -// primaryIdentity returns the Identity marked as primary or the first identity -// if none are so marked. -func (e *Entity) primaryIdentity() *Identity { - var firstIdentity *Identity - for _, ident := range e.Identities { - if firstIdentity == nil { - firstIdentity = ident - } - if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { - return ident - } - } - return firstIdentity -} - -// encryptionKey returns the best candidate Key for encrypting a message to the -// given Entity. -func (e *Entity) encryptionKey(now time.Time) (Key, bool) { - candidateSubkey := -1 - - // Iterate the keys to find the newest key - var maxTime time.Time - for i, subkey := range e.Subkeys { - if subkey.Sig.FlagsValid && - subkey.Sig.FlagEncryptCommunications && - subkey.PublicKey.PubKeyAlgo.CanEncrypt() && - !subkey.Sig.KeyExpired(now) && - (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) { - candidateSubkey = i - maxTime = subkey.Sig.CreationTime - } - } - - if candidateSubkey != -1 { - subkey := e.Subkeys[candidateSubkey] - return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true - } - - // If we don't have any candidate subkeys for encryption and - // the primary key doesn't have any usage metadata then we - // assume that the primary key is ok. Or, if the primary key is - // marked as ok to encrypt to, then we can obviously use it. - i := e.primaryIdentity() - if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications && - e.PrimaryKey.PubKeyAlgo.CanEncrypt() && - !i.SelfSignature.KeyExpired(now) { - return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true - } - - // This Entity appears to be signing only. - return Key{}, false -} - -// signingKey return the best candidate Key for signing a message with this -// Entity. -func (e *Entity) signingKey(now time.Time) (Key, bool) { - candidateSubkey := -1 - - for i, subkey := range e.Subkeys { - if subkey.Sig.FlagsValid && - subkey.Sig.FlagSign && - subkey.PublicKey.PubKeyAlgo.CanSign() && - !subkey.Sig.KeyExpired(now) { - candidateSubkey = i - break - } - } - - if candidateSubkey != -1 { - subkey := e.Subkeys[candidateSubkey] - return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true - } - - // If we have no candidate subkey then we assume that it's ok to sign - // with the primary key. - i := e.primaryIdentity() - if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagSign && - !i.SelfSignature.KeyExpired(now) { - return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true - } - - return Key{}, false -} - -// An EntityList contains one or more Entities. -type EntityList []*Entity - -// KeysById returns the set of keys that have the given key id. -func (el EntityList) KeysById(id uint64) (keys []Key) { - for _, e := range el { - if e.PrimaryKey.KeyId == id { - var selfSig *packet.Signature - for _, ident := range e.Identities { - if selfSig == nil { - selfSig = ident.SelfSignature - } else if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { - selfSig = ident.SelfSignature - break - } - } - keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig}) - } - - for _, subKey := range e.Subkeys { - if subKey.PublicKey.KeyId == id { - keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) - } - } - } - return -} - -// KeysByIdAndUsage returns the set of keys with the given id that also meet -// the key usage given by requiredUsage. The requiredUsage is expressed as -// the bitwise-OR of packet.KeyFlag* values. -func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) { - for _, key := range el.KeysById(id) { - if len(key.Entity.Revocations) > 0 { - continue - } - - if key.SelfSignature.RevocationReason != nil { - continue - } - - if key.SelfSignature.FlagsValid && requiredUsage != 0 { - var usage byte - if key.SelfSignature.FlagCertify { - usage |= packet.KeyFlagCertify - } - if key.SelfSignature.FlagSign { - usage |= packet.KeyFlagSign - } - if key.SelfSignature.FlagEncryptCommunications { - usage |= packet.KeyFlagEncryptCommunications - } - if key.SelfSignature.FlagEncryptStorage { - usage |= packet.KeyFlagEncryptStorage - } - if usage&requiredUsage != requiredUsage { - continue - } - } - - keys = append(keys, key) - } - return -} - -// DecryptionKeys returns all private keys that are valid for decryption. -func (el EntityList) DecryptionKeys() (keys []Key) { - for _, e := range el { - for _, subKey := range e.Subkeys { - if subKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) { - keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) - } - } - } - return -} - -// ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file. -func ReadArmoredKeyRing(r io.Reader) (EntityList, error) { - block, err := armor.Decode(r) - if err == io.EOF { - return nil, errors.InvalidArgumentError("no armored data found") - } - if err != nil { - return nil, err - } - if block.Type != PublicKeyType && block.Type != PrivateKeyType { - return nil, errors.InvalidArgumentError("expected public or private key block, got: " + block.Type) - } - - return ReadKeyRing(block.Body) -} - -// ReadKeyRing reads one or more public/private keys. Unsupported keys are -// ignored as long as at least a single valid key is found. -func ReadKeyRing(r io.Reader) (el EntityList, err error) { - packets := packet.NewReader(r) - var lastUnsupportedError error - - for { - var e *Entity - e, err = ReadEntity(packets) - if err != nil { - // TODO: warn about skipped unsupported/unreadable keys - if _, ok := err.(errors.UnsupportedError); ok { - lastUnsupportedError = err - err = readToNextPublicKey(packets) - } else if _, ok := err.(errors.StructuralError); ok { - // Skip unreadable, badly-formatted keys - lastUnsupportedError = err - err = readToNextPublicKey(packets) - } - if err == io.EOF { - err = nil - break - } - if err != nil { - el = nil - break - } - } else { - el = append(el, e) - } - } - - if len(el) == 0 && err == nil { - err = lastUnsupportedError - } - return -} - -// readToNextPublicKey reads packets until the start of the entity and leaves -// the first packet of the new entity in the Reader. -func readToNextPublicKey(packets *packet.Reader) (err error) { - var p packet.Packet - for { - p, err = packets.Next() - if err == io.EOF { - return - } else if err != nil { - if _, ok := err.(errors.UnsupportedError); ok { - err = nil - continue - } - return - } - - if pk, ok := p.(*packet.PublicKey); ok && !pk.IsSubkey { - packets.Unread(p) - return - } - } -} - -// ReadEntity reads an entity (public key, identities, subkeys etc) from the -// given Reader. -func ReadEntity(packets *packet.Reader) (*Entity, error) { - e := new(Entity) - e.Identities = make(map[string]*Identity) - - p, err := packets.Next() - if err != nil { - return nil, err - } - - var ok bool - if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok { - if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok { - packets.Unread(p) - return nil, errors.StructuralError("first packet was not a public/private key") - } - e.PrimaryKey = &e.PrivateKey.PublicKey - } - - if !e.PrimaryKey.PubKeyAlgo.CanSign() { - return nil, errors.StructuralError("primary key cannot be used for signatures") - } - - var current *Identity - var revocations []*packet.Signature -EachPacket: - for { - p, err := packets.Next() - if err == io.EOF { - break - } else if err != nil { - return nil, err - } - - switch pkt := p.(type) { - case *packet.UserId: - current = new(Identity) - current.Name = pkt.Id - current.UserId = pkt - e.Identities[pkt.Id] = current - - for { - p, err = packets.Next() - if err == io.EOF { - return nil, io.ErrUnexpectedEOF - } else if err != nil { - return nil, err - } - - sig, ok := p.(*packet.Signature) - if !ok { - return nil, errors.StructuralError("user ID packet not followed by self-signature") - } - - if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.IssuerKeyId != nil && *sig.IssuerKeyId == e.PrimaryKey.KeyId { - if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil { - return nil, errors.StructuralError("user ID self-signature invalid: " + err.Error()) - } - current.SelfSignature = sig - break - } - current.Signatures = append(current.Signatures, sig) - } - case *packet.Signature: - if pkt.SigType == packet.SigTypeKeyRevocation { - revocations = append(revocations, pkt) - } else if pkt.SigType == packet.SigTypeDirectSignature { - // TODO: RFC4880 5.2.1 permits signatures - // directly on keys (eg. to bind additional - // revocation keys). - } else if current == nil { - return nil, errors.StructuralError("signature packet found before user id packet") - } else { - current.Signatures = append(current.Signatures, pkt) - } - case *packet.PrivateKey: - if pkt.IsSubkey == false { - packets.Unread(p) - break EachPacket - } - err = addSubkey(e, packets, &pkt.PublicKey, pkt) - if err != nil { - return nil, err - } - case *packet.PublicKey: - if pkt.IsSubkey == false { - packets.Unread(p) - break EachPacket - } - err = addSubkey(e, packets, pkt, nil) - if err != nil { - return nil, err - } - default: - // we ignore unknown packets - } - } - - if len(e.Identities) == 0 { - return nil, errors.StructuralError("entity without any identities") - } - - for _, revocation := range revocations { - err = e.PrimaryKey.VerifyRevocationSignature(revocation) - if err == nil { - e.Revocations = append(e.Revocations, revocation) - } else { - // TODO: RFC 4880 5.2.3.15 defines revocation keys. - return nil, errors.StructuralError("revocation signature signed by alternate key") - } - } - - return e, nil -} - -func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error { - var subKey Subkey - subKey.PublicKey = pub - subKey.PrivateKey = priv - p, err := packets.Next() - if err == io.EOF { - return io.ErrUnexpectedEOF - } - if err != nil { - return errors.StructuralError("subkey signature invalid: " + err.Error()) - } - var ok bool - subKey.Sig, ok = p.(*packet.Signature) - if !ok { - return errors.StructuralError("subkey packet not followed by signature") - } - if subKey.Sig.SigType != packet.SigTypeSubkeyBinding && subKey.Sig.SigType != packet.SigTypeSubkeyRevocation { - return errors.StructuralError("subkey signature with wrong type") - } - err = e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, subKey.Sig) - if err != nil { - return errors.StructuralError("subkey signature invalid: " + err.Error()) - } - e.Subkeys = append(e.Subkeys, subKey) - return nil -} - -const defaultRSAKeyBits = 2048 - -// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a -// single identity composed of the given full name, comment and email, any of -// which may be empty but must not contain any of "()<>\x00". -// If config is nil, sensible defaults will be used. -func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) { - currentTime := config.Now() - - bits := defaultRSAKeyBits - if config != nil && config.RSABits != 0 { - bits = config.RSABits - } - - uid := packet.NewUserId(name, comment, email) - if uid == nil { - return nil, errors.InvalidArgumentError("user id field contained invalid characters") - } - signingPriv, err := rsa.GenerateKey(config.Random(), bits) - if err != nil { - return nil, err - } - encryptingPriv, err := rsa.GenerateKey(config.Random(), bits) - if err != nil { - return nil, err - } - - e := &Entity{ - PrimaryKey: packet.NewRSAPublicKey(currentTime, &signingPriv.PublicKey), - PrivateKey: packet.NewRSAPrivateKey(currentTime, signingPriv), - Identities: make(map[string]*Identity), - } - isPrimaryId := true - e.Identities[uid.Id] = &Identity{ - Name: uid.Id, - UserId: uid, - SelfSignature: &packet.Signature{ - CreationTime: currentTime, - SigType: packet.SigTypePositiveCert, - PubKeyAlgo: packet.PubKeyAlgoRSA, - Hash: config.Hash(), - IsPrimaryId: &isPrimaryId, - FlagsValid: true, - FlagSign: true, - FlagCertify: true, - IssuerKeyId: &e.PrimaryKey.KeyId, - }, - } - - // If the user passes in a DefaultHash via packet.Config, - // set the PreferredHash for the SelfSignature. - if config != nil && config.DefaultHash != 0 { - e.Identities[uid.Id].SelfSignature.PreferredHash = []uint8{hashToHashId(config.DefaultHash)} - } - - // Likewise for DefaultCipher. - if config != nil && config.DefaultCipher != 0 { - e.Identities[uid.Id].SelfSignature.PreferredSymmetric = []uint8{uint8(config.DefaultCipher)} - } - - e.Subkeys = make([]Subkey, 1) - e.Subkeys[0] = Subkey{ - PublicKey: packet.NewRSAPublicKey(currentTime, &encryptingPriv.PublicKey), - PrivateKey: packet.NewRSAPrivateKey(currentTime, encryptingPriv), - Sig: &packet.Signature{ - CreationTime: currentTime, - SigType: packet.SigTypeSubkeyBinding, - PubKeyAlgo: packet.PubKeyAlgoRSA, - Hash: config.Hash(), - FlagsValid: true, - FlagEncryptStorage: true, - FlagEncryptCommunications: true, - IssuerKeyId: &e.PrimaryKey.KeyId, - }, - } - e.Subkeys[0].PublicKey.IsSubkey = true - e.Subkeys[0].PrivateKey.IsSubkey = true - - return e, nil -} - -// SerializePrivate serializes an Entity, including private key material, to -// the given Writer. For now, it must only be used on an Entity returned from -// NewEntity. -// If config is nil, sensible defaults will be used. -func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) { - err = e.PrivateKey.Serialize(w) - if err != nil { - return - } - for _, ident := range e.Identities { - err = ident.UserId.Serialize(w) - if err != nil { - return - } - err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config) - if err != nil { - return - } - err = ident.SelfSignature.Serialize(w) - if err != nil { - return - } - } - for _, subkey := range e.Subkeys { - err = subkey.PrivateKey.Serialize(w) - if err != nil { - return - } - err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config) - if err != nil { - return - } - err = subkey.Sig.Serialize(w) - if err != nil { - return - } - } - return nil -} - -// Serialize writes the public part of the given Entity to w. (No private -// key material will be output). -func (e *Entity) Serialize(w io.Writer) error { - err := e.PrimaryKey.Serialize(w) - if err != nil { - return err - } - for _, ident := range e.Identities { - err = ident.UserId.Serialize(w) - if err != nil { - return err - } - err = ident.SelfSignature.Serialize(w) - if err != nil { - return err - } - for _, sig := range ident.Signatures { - err = sig.Serialize(w) - if err != nil { - return err - } - } - } - for _, subkey := range e.Subkeys { - err = subkey.PublicKey.Serialize(w) - if err != nil { - return err - } - err = subkey.Sig.Serialize(w) - if err != nil { - return err - } - } - return nil -} - -// SignIdentity adds a signature to e, from signer, attesting that identity is -// associated with e. The provided identity must already be an element of -// e.Identities and the private key of signer must have been decrypted if -// necessary. -// If config is nil, sensible defaults will be used. -func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Config) error { - if signer.PrivateKey == nil { - return errors.InvalidArgumentError("signing Entity must have a private key") - } - if signer.PrivateKey.Encrypted { - return errors.InvalidArgumentError("signing Entity's private key must be decrypted") - } - ident, ok := e.Identities[identity] - if !ok { - return errors.InvalidArgumentError("given identity string not found in Entity") - } - - sig := &packet.Signature{ - SigType: packet.SigTypeGenericCert, - PubKeyAlgo: signer.PrivateKey.PubKeyAlgo, - Hash: config.Hash(), - CreationTime: config.Now(), - IssuerKeyId: &signer.PrivateKey.KeyId, - } - if err := sig.SignUserId(identity, e.PrimaryKey, signer.PrivateKey, config); err != nil { - return err - } - ident.Signatures = append(ident.Signatures, sig) - return nil -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/compressed.go b/vendor/golang.org/x/crypto/openpgp/packet/compressed.go deleted file mode 100644 index e8f0b5caa7..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/compressed.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "compress/bzip2" - "compress/flate" - "compress/zlib" - "golang.org/x/crypto/openpgp/errors" - "io" - "strconv" -) - -// Compressed represents a compressed OpenPGP packet. The decompressed contents -// will contain more OpenPGP packets. See RFC 4880, section 5.6. -type Compressed struct { - Body io.Reader -} - -const ( - NoCompression = flate.NoCompression - BestSpeed = flate.BestSpeed - BestCompression = flate.BestCompression - DefaultCompression = flate.DefaultCompression -) - -// CompressionConfig contains compressor configuration settings. -type CompressionConfig struct { - // Level is the compression level to use. It must be set to - // between -1 and 9, with -1 causing the compressor to use the - // default compression level, 0 causing the compressor to use - // no compression and 1 to 9 representing increasing (better, - // slower) compression levels. If Level is less than -1 or - // more then 9, a non-nil error will be returned during - // encryption. See the constants above for convenient common - // settings for Level. - Level int -} - -func (c *Compressed) parse(r io.Reader) error { - var buf [1]byte - _, err := readFull(r, buf[:]) - if err != nil { - return err - } - - switch buf[0] { - case 1: - c.Body = flate.NewReader(r) - case 2: - c.Body, err = zlib.NewReader(r) - case 3: - c.Body = bzip2.NewReader(r) - default: - err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0]))) - } - - return err -} - -// compressedWriterCloser represents the serialized compression stream -// header and the compressor. Its Close() method ensures that both the -// compressor and serialized stream header are closed. Its Write() -// method writes to the compressor. -type compressedWriteCloser struct { - sh io.Closer // Stream Header - c io.WriteCloser // Compressor -} - -func (cwc compressedWriteCloser) Write(p []byte) (int, error) { - return cwc.c.Write(p) -} - -func (cwc compressedWriteCloser) Close() (err error) { - err = cwc.c.Close() - if err != nil { - return err - } - - return cwc.sh.Close() -} - -// SerializeCompressed serializes a compressed data packet to w and -// returns a WriteCloser to which the literal data packets themselves -// can be written and which MUST be closed on completion. If cc is -// nil, sensible defaults will be used to configure the compression -// algorithm. -func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *CompressionConfig) (literaldata io.WriteCloser, err error) { - compressed, err := serializeStreamHeader(w, packetTypeCompressed) - if err != nil { - return - } - - _, err = compressed.Write([]byte{uint8(algo)}) - if err != nil { - return - } - - level := DefaultCompression - if cc != nil { - level = cc.Level - } - - var compressor io.WriteCloser - switch algo { - case CompressionZIP: - compressor, err = flate.NewWriter(compressed, level) - case CompressionZLIB: - compressor, err = zlib.NewWriterLevel(compressed, level) - default: - s := strconv.Itoa(int(algo)) - err = errors.UnsupportedError("Unsupported compression algorithm: " + s) - } - if err != nil { - return - } - - literaldata = compressedWriteCloser{compressed, compressor} - - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/config.go b/vendor/golang.org/x/crypto/openpgp/packet/config.go deleted file mode 100644 index c76eecc963..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/config.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "crypto/rand" - "io" - "time" -) - -// Config collects a number of parameters along with sensible defaults. -// A nil *Config is valid and results in all default values. -type Config struct { - // Rand provides the source of entropy. - // If nil, the crypto/rand Reader is used. - Rand io.Reader - // DefaultHash is the default hash function to be used. - // If zero, SHA-256 is used. - DefaultHash crypto.Hash - // DefaultCipher is the cipher to be used. - // If zero, AES-128 is used. - DefaultCipher CipherFunction - // Time returns the current time as the number of seconds since the - // epoch. If Time is nil, time.Now is used. - Time func() time.Time - // DefaultCompressionAlgo is the compression algorithm to be - // applied to the plaintext before encryption. If zero, no - // compression is done. - DefaultCompressionAlgo CompressionAlgo - // CompressionConfig configures the compression settings. - CompressionConfig *CompressionConfig - // S2KCount is only used for symmetric encryption. It - // determines the strength of the passphrase stretching when - // the said passphrase is hashed to produce a key. S2KCount - // should be between 1024 and 65011712, inclusive. If Config - // is nil or S2KCount is 0, the value 65536 used. Not all - // values in the above range can be represented. S2KCount will - // be rounded up to the next representable value if it cannot - // be encoded exactly. When set, it is strongly encrouraged to - // use a value that is at least 65536. See RFC 4880 Section - // 3.7.1.3. - S2KCount int - // RSABits is the number of bits in new RSA keys made with NewEntity. - // If zero, then 2048 bit keys are created. - RSABits int -} - -func (c *Config) Random() io.Reader { - if c == nil || c.Rand == nil { - return rand.Reader - } - return c.Rand -} - -func (c *Config) Hash() crypto.Hash { - if c == nil || uint(c.DefaultHash) == 0 { - return crypto.SHA256 - } - return c.DefaultHash -} - -func (c *Config) Cipher() CipherFunction { - if c == nil || uint8(c.DefaultCipher) == 0 { - return CipherAES128 - } - return c.DefaultCipher -} - -func (c *Config) Now() time.Time { - if c == nil || c.Time == nil { - return time.Now() - } - return c.Time() -} - -func (c *Config) Compression() CompressionAlgo { - if c == nil { - return CompressionNone - } - return c.DefaultCompressionAlgo -} - -func (c *Config) PasswordHashIterations() int { - if c == nil || c.S2KCount == 0 { - return 0 - } - return c.S2KCount -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go b/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go deleted file mode 100644 index 02b372cf37..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto/rsa" - "encoding/binary" - "io" - "math/big" - "strconv" - - "golang.org/x/crypto/openpgp/elgamal" - "golang.org/x/crypto/openpgp/errors" -) - -const encryptedKeyVersion = 3 - -// EncryptedKey represents a public-key encrypted session key. See RFC 4880, -// section 5.1. -type EncryptedKey struct { - KeyId uint64 - Algo PublicKeyAlgorithm - CipherFunc CipherFunction // only valid after a successful Decrypt - Key []byte // only valid after a successful Decrypt - - encryptedMPI1, encryptedMPI2 parsedMPI -} - -func (e *EncryptedKey) parse(r io.Reader) (err error) { - var buf [10]byte - _, err = readFull(r, buf[:]) - if err != nil { - return - } - if buf[0] != encryptedKeyVersion { - return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0]))) - } - e.KeyId = binary.BigEndian.Uint64(buf[1:9]) - e.Algo = PublicKeyAlgorithm(buf[9]) - switch e.Algo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) - if err != nil { - return - } - case PubKeyAlgoElGamal: - e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) - if err != nil { - return - } - e.encryptedMPI2.bytes, e.encryptedMPI2.bitLength, err = readMPI(r) - if err != nil { - return - } - } - _, err = consumeAll(r) - return -} - -func checksumKeyMaterial(key []byte) uint16 { - var checksum uint16 - for _, v := range key { - checksum += uint16(v) - } - return checksum -} - -// Decrypt decrypts an encrypted session key with the given private key. The -// private key must have been decrypted first. -// If config is nil, sensible defaults will be used. -func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { - var err error - var b []byte - - // TODO(agl): use session key decryption routines here to avoid - // padding oracle attacks. - switch priv.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - k := priv.PrivateKey.(*rsa.PrivateKey) - b, err = rsa.DecryptPKCS1v15(config.Random(), k, padToKeySize(&k.PublicKey, e.encryptedMPI1.bytes)) - case PubKeyAlgoElGamal: - c1 := new(big.Int).SetBytes(e.encryptedMPI1.bytes) - c2 := new(big.Int).SetBytes(e.encryptedMPI2.bytes) - b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2) - default: - err = errors.InvalidArgumentError("cannot decrypted encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) - } - - if err != nil { - return err - } - - e.CipherFunc = CipherFunction(b[0]) - e.Key = b[1 : len(b)-2] - expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1]) - checksum := checksumKeyMaterial(e.Key) - if checksum != expectedChecksum { - return errors.StructuralError("EncryptedKey checksum incorrect") - } - - return nil -} - -// Serialize writes the encrypted key packet, e, to w. -func (e *EncryptedKey) Serialize(w io.Writer) error { - var mpiLen int - switch e.Algo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - mpiLen = 2 + len(e.encryptedMPI1.bytes) - case PubKeyAlgoElGamal: - mpiLen = 2 + len(e.encryptedMPI1.bytes) + 2 + len(e.encryptedMPI2.bytes) - default: - return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo))) - } - - serializeHeader(w, packetTypeEncryptedKey, 1 /* version */ +8 /* key id */ +1 /* algo */ +mpiLen) - - w.Write([]byte{encryptedKeyVersion}) - binary.Write(w, binary.BigEndian, e.KeyId) - w.Write([]byte{byte(e.Algo)}) - - switch e.Algo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - writeMPIs(w, e.encryptedMPI1) - case PubKeyAlgoElGamal: - writeMPIs(w, e.encryptedMPI1, e.encryptedMPI2) - default: - panic("internal error") - } - - return nil -} - -// SerializeEncryptedKey serializes an encrypted key packet to w that contains -// key, encrypted to pub. -// If config is nil, sensible defaults will be used. -func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error { - var buf [10]byte - buf[0] = encryptedKeyVersion - binary.BigEndian.PutUint64(buf[1:9], pub.KeyId) - buf[9] = byte(pub.PubKeyAlgo) - - keyBlock := make([]byte, 1 /* cipher type */ +len(key)+2 /* checksum */) - keyBlock[0] = byte(cipherFunc) - copy(keyBlock[1:], key) - checksum := checksumKeyMaterial(key) - keyBlock[1+len(key)] = byte(checksum >> 8) - keyBlock[1+len(key)+1] = byte(checksum) - - switch pub.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - return serializeEncryptedKeyRSA(w, config.Random(), buf, pub.PublicKey.(*rsa.PublicKey), keyBlock) - case PubKeyAlgoElGamal: - return serializeEncryptedKeyElGamal(w, config.Random(), buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock) - case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly: - return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) - } - - return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) -} - -func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error { - cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock) - if err != nil { - return errors.InvalidArgumentError("RSA encryption failed: " + err.Error()) - } - - packetLen := 10 /* header length */ + 2 /* mpi size */ + len(cipherText) - - err = serializeHeader(w, packetTypeEncryptedKey, packetLen) - if err != nil { - return err - } - _, err = w.Write(header[:]) - if err != nil { - return err - } - return writeMPI(w, 8*uint16(len(cipherText)), cipherText) -} - -func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error { - c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock) - if err != nil { - return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error()) - } - - packetLen := 10 /* header length */ - packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8 - packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8 - - err = serializeHeader(w, packetTypeEncryptedKey, packetLen) - if err != nil { - return err - } - _, err = w.Write(header[:]) - if err != nil { - return err - } - err = writeBig(w, c1) - if err != nil { - return err - } - return writeBig(w, c2) -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/literal.go b/vendor/golang.org/x/crypto/openpgp/packet/literal.go deleted file mode 100644 index 1a9ec6e51e..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/literal.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "encoding/binary" - "io" -) - -// LiteralData represents an encrypted file. See RFC 4880, section 5.9. -type LiteralData struct { - IsBinary bool - FileName string - Time uint32 // Unix epoch time. Either creation time or modification time. 0 means undefined. - Body io.Reader -} - -// ForEyesOnly returns whether the contents of the LiteralData have been marked -// as especially sensitive. -func (l *LiteralData) ForEyesOnly() bool { - return l.FileName == "_CONSOLE" -} - -func (l *LiteralData) parse(r io.Reader) (err error) { - var buf [256]byte - - _, err = readFull(r, buf[:2]) - if err != nil { - return - } - - l.IsBinary = buf[0] == 'b' - fileNameLen := int(buf[1]) - - _, err = readFull(r, buf[:fileNameLen]) - if err != nil { - return - } - - l.FileName = string(buf[:fileNameLen]) - - _, err = readFull(r, buf[:4]) - if err != nil { - return - } - - l.Time = binary.BigEndian.Uint32(buf[:4]) - l.Body = r - return -} - -// SerializeLiteral serializes a literal data packet to w and returns a -// WriteCloser to which the data itself can be written and which MUST be closed -// on completion. The fileName is truncated to 255 bytes. -func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) { - var buf [4]byte - buf[0] = 't' - if isBinary { - buf[0] = 'b' - } - if len(fileName) > 255 { - fileName = fileName[:255] - } - buf[1] = byte(len(fileName)) - - inner, err := serializeStreamHeader(w, packetTypeLiteralData) - if err != nil { - return - } - - _, err = inner.Write(buf[:2]) - if err != nil { - return - } - _, err = inner.Write([]byte(fileName)) - if err != nil { - return - } - binary.BigEndian.PutUint32(buf[:], time) - _, err = inner.Write(buf[:]) - if err != nil { - return - } - - plaintext = inner - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go b/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go deleted file mode 100644 index ce2a33a547..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/ocfb.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// OpenPGP CFB Mode. http://tools.ietf.org/html/rfc4880#section-13.9 - -package packet - -import ( - "crypto/cipher" -) - -type ocfbEncrypter struct { - b cipher.Block - fre []byte - outUsed int -} - -// An OCFBResyncOption determines if the "resynchronization step" of OCFB is -// performed. -type OCFBResyncOption bool - -const ( - OCFBResync OCFBResyncOption = true - OCFBNoResync OCFBResyncOption = false -) - -// NewOCFBEncrypter returns a cipher.Stream which encrypts data with OpenPGP's -// cipher feedback mode using the given cipher.Block, and an initial amount of -// ciphertext. randData must be random bytes and be the same length as the -// cipher.Block's block size. Resync determines if the "resynchronization step" -// from RFC 4880, 13.9 step 7 is performed. Different parts of OpenPGP vary on -// this point. -func NewOCFBEncrypter(block cipher.Block, randData []byte, resync OCFBResyncOption) (cipher.Stream, []byte) { - blockSize := block.BlockSize() - if len(randData) != blockSize { - return nil, nil - } - - x := &ocfbEncrypter{ - b: block, - fre: make([]byte, blockSize), - outUsed: 0, - } - prefix := make([]byte, blockSize+2) - - block.Encrypt(x.fre, x.fre) - for i := 0; i < blockSize; i++ { - prefix[i] = randData[i] ^ x.fre[i] - } - - block.Encrypt(x.fre, prefix[:blockSize]) - prefix[blockSize] = x.fre[0] ^ randData[blockSize-2] - prefix[blockSize+1] = x.fre[1] ^ randData[blockSize-1] - - if resync { - block.Encrypt(x.fre, prefix[2:]) - } else { - x.fre[0] = prefix[blockSize] - x.fre[1] = prefix[blockSize+1] - x.outUsed = 2 - } - return x, prefix -} - -func (x *ocfbEncrypter) XORKeyStream(dst, src []byte) { - for i := 0; i < len(src); i++ { - if x.outUsed == len(x.fre) { - x.b.Encrypt(x.fre, x.fre) - x.outUsed = 0 - } - - x.fre[x.outUsed] ^= src[i] - dst[i] = x.fre[x.outUsed] - x.outUsed++ - } -} - -type ocfbDecrypter struct { - b cipher.Block - fre []byte - outUsed int -} - -// NewOCFBDecrypter returns a cipher.Stream which decrypts data with OpenPGP's -// cipher feedback mode using the given cipher.Block. Prefix must be the first -// blockSize + 2 bytes of the ciphertext, where blockSize is the cipher.Block's -// block size. If an incorrect key is detected then nil is returned. On -// successful exit, blockSize+2 bytes of decrypted data are written into -// prefix. Resync determines if the "resynchronization step" from RFC 4880, -// 13.9 step 7 is performed. Different parts of OpenPGP vary on this point. -func NewOCFBDecrypter(block cipher.Block, prefix []byte, resync OCFBResyncOption) cipher.Stream { - blockSize := block.BlockSize() - if len(prefix) != blockSize+2 { - return nil - } - - x := &ocfbDecrypter{ - b: block, - fre: make([]byte, blockSize), - outUsed: 0, - } - prefixCopy := make([]byte, len(prefix)) - copy(prefixCopy, prefix) - - block.Encrypt(x.fre, x.fre) - for i := 0; i < blockSize; i++ { - prefixCopy[i] ^= x.fre[i] - } - - block.Encrypt(x.fre, prefix[:blockSize]) - prefixCopy[blockSize] ^= x.fre[0] - prefixCopy[blockSize+1] ^= x.fre[1] - - if prefixCopy[blockSize-2] != prefixCopy[blockSize] || - prefixCopy[blockSize-1] != prefixCopy[blockSize+1] { - return nil - } - - if resync { - block.Encrypt(x.fre, prefix[2:]) - } else { - x.fre[0] = prefix[blockSize] - x.fre[1] = prefix[blockSize+1] - x.outUsed = 2 - } - copy(prefix, prefixCopy) - return x -} - -func (x *ocfbDecrypter) XORKeyStream(dst, src []byte) { - for i := 0; i < len(src); i++ { - if x.outUsed == len(x.fre) { - x.b.Encrypt(x.fre, x.fre) - x.outUsed = 0 - } - - c := src[i] - dst[i] = x.fre[x.outUsed] ^ src[i] - x.fre[x.outUsed] = c - x.outUsed++ - } -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go b/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go deleted file mode 100644 index 1713503395..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "encoding/binary" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" - "io" - "strconv" -) - -// OnePassSignature represents a one-pass signature packet. See RFC 4880, -// section 5.4. -type OnePassSignature struct { - SigType SignatureType - Hash crypto.Hash - PubKeyAlgo PublicKeyAlgorithm - KeyId uint64 - IsLast bool -} - -const onePassSignatureVersion = 3 - -func (ops *OnePassSignature) parse(r io.Reader) (err error) { - var buf [13]byte - - _, err = readFull(r, buf[:]) - if err != nil { - return - } - if buf[0] != onePassSignatureVersion { - err = errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0]))) - } - - var ok bool - ops.Hash, ok = s2k.HashIdToHash(buf[2]) - if !ok { - return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2]))) - } - - ops.SigType = SignatureType(buf[1]) - ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3]) - ops.KeyId = binary.BigEndian.Uint64(buf[4:12]) - ops.IsLast = buf[12] != 0 - return -} - -// Serialize marshals the given OnePassSignature to w. -func (ops *OnePassSignature) Serialize(w io.Writer) error { - var buf [13]byte - buf[0] = onePassSignatureVersion - buf[1] = uint8(ops.SigType) - var ok bool - buf[2], ok = s2k.HashToHashId(ops.Hash) - if !ok { - return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash))) - } - buf[3] = uint8(ops.PubKeyAlgo) - binary.BigEndian.PutUint64(buf[4:12], ops.KeyId) - if ops.IsLast { - buf[12] = 1 - } - - if err := serializeHeader(w, packetTypeOnePassSignature, len(buf)); err != nil { - return err - } - _, err := w.Write(buf[:]) - return err -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/opaque.go b/vendor/golang.org/x/crypto/openpgp/packet/opaque.go deleted file mode 100644 index 456d807f25..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/opaque.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "io" - "io/ioutil" - - "golang.org/x/crypto/openpgp/errors" -) - -// OpaquePacket represents an OpenPGP packet as raw, unparsed data. This is -// useful for splitting and storing the original packet contents separately, -// handling unsupported packet types or accessing parts of the packet not yet -// implemented by this package. -type OpaquePacket struct { - // Packet type - Tag uint8 - // Reason why the packet was parsed opaquely - Reason error - // Binary contents of the packet data - Contents []byte -} - -func (op *OpaquePacket) parse(r io.Reader) (err error) { - op.Contents, err = ioutil.ReadAll(r) - return -} - -// Serialize marshals the packet to a writer in its original form, including -// the packet header. -func (op *OpaquePacket) Serialize(w io.Writer) (err error) { - err = serializeHeader(w, packetType(op.Tag), len(op.Contents)) - if err == nil { - _, err = w.Write(op.Contents) - } - return -} - -// Parse attempts to parse the opaque contents into a structure supported by -// this package. If the packet is not known then the result will be another -// OpaquePacket. -func (op *OpaquePacket) Parse() (p Packet, err error) { - hdr := bytes.NewBuffer(nil) - err = serializeHeader(hdr, packetType(op.Tag), len(op.Contents)) - if err != nil { - op.Reason = err - return op, err - } - p, err = Read(io.MultiReader(hdr, bytes.NewBuffer(op.Contents))) - if err != nil { - op.Reason = err - p = op - } - return -} - -// OpaqueReader reads OpaquePackets from an io.Reader. -type OpaqueReader struct { - r io.Reader -} - -func NewOpaqueReader(r io.Reader) *OpaqueReader { - return &OpaqueReader{r: r} -} - -// Read the next OpaquePacket. -func (or *OpaqueReader) Next() (op *OpaquePacket, err error) { - tag, _, contents, err := readHeader(or.r) - if err != nil { - return - } - op = &OpaquePacket{Tag: uint8(tag), Reason: err} - err = op.parse(contents) - if err != nil { - consumeAll(contents) - } - return -} - -// OpaqueSubpacket represents an unparsed OpenPGP subpacket, -// as found in signature and user attribute packets. -type OpaqueSubpacket struct { - SubType uint8 - Contents []byte -} - -// OpaqueSubpackets extracts opaque, unparsed OpenPGP subpackets from -// their byte representation. -func OpaqueSubpackets(contents []byte) (result []*OpaqueSubpacket, err error) { - var ( - subHeaderLen int - subPacket *OpaqueSubpacket - ) - for len(contents) > 0 { - subHeaderLen, subPacket, err = nextSubpacket(contents) - if err != nil { - break - } - result = append(result, subPacket) - contents = contents[subHeaderLen+len(subPacket.Contents):] - } - return -} - -func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacket, err error) { - // RFC 4880, section 5.2.3.1 - var subLen uint32 - if len(contents) < 1 { - goto Truncated - } - subPacket = &OpaqueSubpacket{} - switch { - case contents[0] < 192: - subHeaderLen = 2 // 1 length byte, 1 subtype byte - if len(contents) < subHeaderLen { - goto Truncated - } - subLen = uint32(contents[0]) - contents = contents[1:] - case contents[0] < 255: - subHeaderLen = 3 // 2 length bytes, 1 subtype - if len(contents) < subHeaderLen { - goto Truncated - } - subLen = uint32(contents[0]-192)<<8 + uint32(contents[1]) + 192 - contents = contents[2:] - default: - subHeaderLen = 6 // 5 length bytes, 1 subtype - if len(contents) < subHeaderLen { - goto Truncated - } - subLen = uint32(contents[1])<<24 | - uint32(contents[2])<<16 | - uint32(contents[3])<<8 | - uint32(contents[4]) - contents = contents[5:] - } - if subLen > uint32(len(contents)) || subLen == 0 { - goto Truncated - } - subPacket.SubType = contents[0] - subPacket.Contents = contents[1:subLen] - return -Truncated: - err = errors.StructuralError("subpacket truncated") - return -} - -func (osp *OpaqueSubpacket) Serialize(w io.Writer) (err error) { - buf := make([]byte, 6) - n := serializeSubpacketLength(buf, len(osp.Contents)+1) - buf[n] = osp.SubType - if _, err = w.Write(buf[:n+1]); err != nil { - return - } - _, err = w.Write(osp.Contents) - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/packet.go b/vendor/golang.org/x/crypto/openpgp/packet/packet.go deleted file mode 100644 index 625bb5ac80..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/packet.go +++ /dev/null @@ -1,549 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package packet implements parsing and serialization of OpenPGP packets, as -// specified in RFC 4880. -package packet // import "golang.org/x/crypto/openpgp/packet" - -import ( - "bufio" - "crypto/aes" - "crypto/cipher" - "crypto/des" - "crypto/rsa" - "io" - "math/big" - - "golang.org/x/crypto/cast5" - "golang.org/x/crypto/openpgp/errors" -) - -// readFull is the same as io.ReadFull except that reading zero bytes returns -// ErrUnexpectedEOF rather than EOF. -func readFull(r io.Reader, buf []byte) (n int, err error) { - n, err = io.ReadFull(r, buf) - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -// readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2. -func readLength(r io.Reader) (length int64, isPartial bool, err error) { - var buf [4]byte - _, err = readFull(r, buf[:1]) - if err != nil { - return - } - switch { - case buf[0] < 192: - length = int64(buf[0]) - case buf[0] < 224: - length = int64(buf[0]-192) << 8 - _, err = readFull(r, buf[0:1]) - if err != nil { - return - } - length += int64(buf[0]) + 192 - case buf[0] < 255: - length = int64(1) << (buf[0] & 0x1f) - isPartial = true - default: - _, err = readFull(r, buf[0:4]) - if err != nil { - return - } - length = int64(buf[0])<<24 | - int64(buf[1])<<16 | - int64(buf[2])<<8 | - int64(buf[3]) - } - return -} - -// partialLengthReader wraps an io.Reader and handles OpenPGP partial lengths. -// The continuation lengths are parsed and removed from the stream and EOF is -// returned at the end of the packet. See RFC 4880, section 4.2.2.4. -type partialLengthReader struct { - r io.Reader - remaining int64 - isPartial bool -} - -func (r *partialLengthReader) Read(p []byte) (n int, err error) { - for r.remaining == 0 { - if !r.isPartial { - return 0, io.EOF - } - r.remaining, r.isPartial, err = readLength(r.r) - if err != nil { - return 0, err - } - } - - toRead := int64(len(p)) - if toRead > r.remaining { - toRead = r.remaining - } - - n, err = r.r.Read(p[:int(toRead)]) - r.remaining -= int64(n) - if n < int(toRead) && err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -// partialLengthWriter writes a stream of data using OpenPGP partial lengths. -// See RFC 4880, section 4.2.2.4. -type partialLengthWriter struct { - w io.WriteCloser - lengthByte [1]byte -} - -func (w *partialLengthWriter) Write(p []byte) (n int, err error) { - for len(p) > 0 { - for power := uint(14); power < 32; power-- { - l := 1 << power - if len(p) >= l { - w.lengthByte[0] = 224 + uint8(power) - _, err = w.w.Write(w.lengthByte[:]) - if err != nil { - return - } - var m int - m, err = w.w.Write(p[:l]) - n += m - if err != nil { - return - } - p = p[l:] - break - } - } - } - return -} - -func (w *partialLengthWriter) Close() error { - w.lengthByte[0] = 0 - _, err := w.w.Write(w.lengthByte[:]) - if err != nil { - return err - } - return w.w.Close() -} - -// A spanReader is an io.LimitReader, but it returns ErrUnexpectedEOF if the -// underlying Reader returns EOF before the limit has been reached. -type spanReader struct { - r io.Reader - n int64 -} - -func (l *spanReader) Read(p []byte) (n int, err error) { - if l.n <= 0 { - return 0, io.EOF - } - if int64(len(p)) > l.n { - p = p[0:l.n] - } - n, err = l.r.Read(p) - l.n -= int64(n) - if l.n > 0 && err == io.EOF { - err = io.ErrUnexpectedEOF - } - return -} - -// readHeader parses a packet header and returns an io.Reader which will return -// the contents of the packet. See RFC 4880, section 4.2. -func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err error) { - var buf [4]byte - _, err = io.ReadFull(r, buf[:1]) - if err != nil { - return - } - if buf[0]&0x80 == 0 { - err = errors.StructuralError("tag byte does not have MSB set") - return - } - if buf[0]&0x40 == 0 { - // Old format packet - tag = packetType((buf[0] & 0x3f) >> 2) - lengthType := buf[0] & 3 - if lengthType == 3 { - length = -1 - contents = r - return - } - lengthBytes := 1 << lengthType - _, err = readFull(r, buf[0:lengthBytes]) - if err != nil { - return - } - for i := 0; i < lengthBytes; i++ { - length <<= 8 - length |= int64(buf[i]) - } - contents = &spanReader{r, length} - return - } - - // New format packet - tag = packetType(buf[0] & 0x3f) - length, isPartial, err := readLength(r) - if err != nil { - return - } - if isPartial { - contents = &partialLengthReader{ - remaining: length, - isPartial: true, - r: r, - } - length = -1 - } else { - contents = &spanReader{r, length} - } - return -} - -// serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section -// 4.2. -func serializeHeader(w io.Writer, ptype packetType, length int) (err error) { - var buf [6]byte - var n int - - buf[0] = 0x80 | 0x40 | byte(ptype) - if length < 192 { - buf[1] = byte(length) - n = 2 - } else if length < 8384 { - length -= 192 - buf[1] = 192 + byte(length>>8) - buf[2] = byte(length) - n = 3 - } else { - buf[1] = 255 - buf[2] = byte(length >> 24) - buf[3] = byte(length >> 16) - buf[4] = byte(length >> 8) - buf[5] = byte(length) - n = 6 - } - - _, err = w.Write(buf[:n]) - return -} - -// serializeStreamHeader writes an OpenPGP packet header to w where the -// length of the packet is unknown. It returns a io.WriteCloser which can be -// used to write the contents of the packet. See RFC 4880, section 4.2. -func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err error) { - var buf [1]byte - buf[0] = 0x80 | 0x40 | byte(ptype) - _, err = w.Write(buf[:]) - if err != nil { - return - } - out = &partialLengthWriter{w: w} - return -} - -// Packet represents an OpenPGP packet. Users are expected to try casting -// instances of this interface to specific packet types. -type Packet interface { - parse(io.Reader) error -} - -// consumeAll reads from the given Reader until error, returning the number of -// bytes read. -func consumeAll(r io.Reader) (n int64, err error) { - var m int - var buf [1024]byte - - for { - m, err = r.Read(buf[:]) - n += int64(m) - if err == io.EOF { - err = nil - return - } - if err != nil { - return - } - } -} - -// packetType represents the numeric ids of the different OpenPGP packet types. See -// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-2 -type packetType uint8 - -const ( - packetTypeEncryptedKey packetType = 1 - packetTypeSignature packetType = 2 - packetTypeSymmetricKeyEncrypted packetType = 3 - packetTypeOnePassSignature packetType = 4 - packetTypePrivateKey packetType = 5 - packetTypePublicKey packetType = 6 - packetTypePrivateSubkey packetType = 7 - packetTypeCompressed packetType = 8 - packetTypeSymmetricallyEncrypted packetType = 9 - packetTypeLiteralData packetType = 11 - packetTypeUserId packetType = 13 - packetTypePublicSubkey packetType = 14 - packetTypeUserAttribute packetType = 17 - packetTypeSymmetricallyEncryptedMDC packetType = 18 -) - -// peekVersion detects the version of a public key packet about to -// be read. A bufio.Reader at the original position of the io.Reader -// is returned. -func peekVersion(r io.Reader) (bufr *bufio.Reader, ver byte, err error) { - bufr = bufio.NewReader(r) - var verBuf []byte - if verBuf, err = bufr.Peek(1); err != nil { - return - } - ver = verBuf[0] - return -} - -// Read reads a single OpenPGP packet from the given io.Reader. If there is an -// error parsing a packet, the whole packet is consumed from the input. -func Read(r io.Reader) (p Packet, err error) { - tag, _, contents, err := readHeader(r) - if err != nil { - return - } - - switch tag { - case packetTypeEncryptedKey: - p = new(EncryptedKey) - case packetTypeSignature: - var version byte - // Detect signature version - if contents, version, err = peekVersion(contents); err != nil { - return - } - if version < 4 { - p = new(SignatureV3) - } else { - p = new(Signature) - } - case packetTypeSymmetricKeyEncrypted: - p = new(SymmetricKeyEncrypted) - case packetTypeOnePassSignature: - p = new(OnePassSignature) - case packetTypePrivateKey, packetTypePrivateSubkey: - pk := new(PrivateKey) - if tag == packetTypePrivateSubkey { - pk.IsSubkey = true - } - p = pk - case packetTypePublicKey, packetTypePublicSubkey: - var version byte - if contents, version, err = peekVersion(contents); err != nil { - return - } - isSubkey := tag == packetTypePublicSubkey - if version < 4 { - p = &PublicKeyV3{IsSubkey: isSubkey} - } else { - p = &PublicKey{IsSubkey: isSubkey} - } - case packetTypeCompressed: - p = new(Compressed) - case packetTypeSymmetricallyEncrypted: - p = new(SymmetricallyEncrypted) - case packetTypeLiteralData: - p = new(LiteralData) - case packetTypeUserId: - p = new(UserId) - case packetTypeUserAttribute: - p = new(UserAttribute) - case packetTypeSymmetricallyEncryptedMDC: - se := new(SymmetricallyEncrypted) - se.MDC = true - p = se - default: - err = errors.UnknownPacketTypeError(tag) - } - if p != nil { - err = p.parse(contents) - } - if err != nil { - consumeAll(contents) - } - return -} - -// SignatureType represents the different semantic meanings of an OpenPGP -// signature. See RFC 4880, section 5.2.1. -type SignatureType uint8 - -const ( - SigTypeBinary SignatureType = 0 - SigTypeText = 1 - SigTypeGenericCert = 0x10 - SigTypePersonaCert = 0x11 - SigTypeCasualCert = 0x12 - SigTypePositiveCert = 0x13 - SigTypeSubkeyBinding = 0x18 - SigTypePrimaryKeyBinding = 0x19 - SigTypeDirectSignature = 0x1F - SigTypeKeyRevocation = 0x20 - SigTypeSubkeyRevocation = 0x28 -) - -// PublicKeyAlgorithm represents the different public key system specified for -// OpenPGP. See -// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12 -type PublicKeyAlgorithm uint8 - -const ( - PubKeyAlgoRSA PublicKeyAlgorithm = 1 - PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2 - PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3 - PubKeyAlgoElGamal PublicKeyAlgorithm = 16 - PubKeyAlgoDSA PublicKeyAlgorithm = 17 - // RFC 6637, Section 5. - PubKeyAlgoECDH PublicKeyAlgorithm = 18 - PubKeyAlgoECDSA PublicKeyAlgorithm = 19 -) - -// CanEncrypt returns true if it's possible to encrypt a message to a public -// key of the given type. -func (pka PublicKeyAlgorithm) CanEncrypt() bool { - switch pka { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal: - return true - } - return false -} - -// CanSign returns true if it's possible for a public key of the given type to -// sign a message. -func (pka PublicKeyAlgorithm) CanSign() bool { - switch pka { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA: - return true - } - return false -} - -// CipherFunction represents the different block ciphers specified for OpenPGP. See -// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13 -type CipherFunction uint8 - -const ( - Cipher3DES CipherFunction = 2 - CipherCAST5 CipherFunction = 3 - CipherAES128 CipherFunction = 7 - CipherAES192 CipherFunction = 8 - CipherAES256 CipherFunction = 9 -) - -// KeySize returns the key size, in bytes, of cipher. -func (cipher CipherFunction) KeySize() int { - switch cipher { - case Cipher3DES: - return 24 - case CipherCAST5: - return cast5.KeySize - case CipherAES128: - return 16 - case CipherAES192: - return 24 - case CipherAES256: - return 32 - } - return 0 -} - -// blockSize returns the block size, in bytes, of cipher. -func (cipher CipherFunction) blockSize() int { - switch cipher { - case Cipher3DES: - return des.BlockSize - case CipherCAST5: - return 8 - case CipherAES128, CipherAES192, CipherAES256: - return 16 - } - return 0 -} - -// new returns a fresh instance of the given cipher. -func (cipher CipherFunction) new(key []byte) (block cipher.Block) { - switch cipher { - case Cipher3DES: - block, _ = des.NewTripleDESCipher(key) - case CipherCAST5: - block, _ = cast5.NewCipher(key) - case CipherAES128, CipherAES192, CipherAES256: - block, _ = aes.NewCipher(key) - } - return -} - -// readMPI reads a big integer from r. The bit length returned is the bit -// length that was specified in r. This is preserved so that the integer can be -// reserialized exactly. -func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err error) { - var buf [2]byte - _, err = readFull(r, buf[0:]) - if err != nil { - return - } - bitLength = uint16(buf[0])<<8 | uint16(buf[1]) - numBytes := (int(bitLength) + 7) / 8 - mpi = make([]byte, numBytes) - _, err = readFull(r, mpi) - // According to RFC 4880 3.2. we should check that the MPI has no leading - // zeroes (at least when not an encrypted MPI?), but this implementation - // does generate leading zeroes, so we keep accepting them. - return -} - -// writeMPI serializes a big integer to w. -func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err error) { - // Note that we can produce leading zeroes, in violation of RFC 4880 3.2. - // Implementations seem to be tolerant of them, and stripping them would - // make it complex to guarantee matching re-serialization. - _, err = w.Write([]byte{byte(bitLength >> 8), byte(bitLength)}) - if err == nil { - _, err = w.Write(mpiBytes) - } - return -} - -// writeBig serializes a *big.Int to w. -func writeBig(w io.Writer, i *big.Int) error { - return writeMPI(w, uint16(i.BitLen()), i.Bytes()) -} - -// padToKeySize left-pads a MPI with zeroes to match the length of the -// specified RSA public. -func padToKeySize(pub *rsa.PublicKey, b []byte) []byte { - k := (pub.N.BitLen() + 7) / 8 - if len(b) >= k { - return b - } - bb := make([]byte, k) - copy(bb[len(bb)-len(b):], b) - return bb -} - -// CompressionAlgo Represents the different compression algorithms -// supported by OpenPGP (except for BZIP2, which is not currently -// supported). See Section 9.3 of RFC 4880. -type CompressionAlgo uint8 - -const ( - CompressionNone CompressionAlgo = 0 - CompressionZIP CompressionAlgo = 1 - CompressionZLIB CompressionAlgo = 2 -) diff --git a/vendor/golang.org/x/crypto/openpgp/packet/private_key.go b/vendor/golang.org/x/crypto/openpgp/packet/private_key.go deleted file mode 100644 index 34734cc63d..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/private_key.go +++ /dev/null @@ -1,380 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto" - "crypto/cipher" - "crypto/dsa" - "crypto/ecdsa" - "crypto/rsa" - "crypto/sha1" - "io" - "io/ioutil" - "math/big" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/elgamal" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" -) - -// PrivateKey represents a possibly encrypted private key. See RFC 4880, -// section 5.5.3. -type PrivateKey struct { - PublicKey - Encrypted bool // if true then the private key is unavailable until Decrypt has been called. - encryptedData []byte - cipher CipherFunction - s2k func(out, in []byte) - PrivateKey interface{} // An *{rsa|dsa|ecdsa}.PrivateKey or a crypto.Signer. - sha1Checksum bool - iv []byte -} - -func NewRSAPrivateKey(currentTime time.Time, priv *rsa.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewRSAPublicKey(currentTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -func NewDSAPrivateKey(currentTime time.Time, priv *dsa.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewDSAPublicKey(currentTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -func NewElGamalPrivateKey(currentTime time.Time, priv *elgamal.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewElGamalPublicKey(currentTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -func NewECDSAPrivateKey(currentTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey { - pk := new(PrivateKey) - pk.PublicKey = *NewECDSAPublicKey(currentTime, &priv.PublicKey) - pk.PrivateKey = priv - return pk -} - -// NewSignerPrivateKey creates a sign-only PrivateKey from a crypto.Signer that -// implements RSA or ECDSA. -func NewSignerPrivateKey(currentTime time.Time, signer crypto.Signer) *PrivateKey { - pk := new(PrivateKey) - switch pubkey := signer.Public().(type) { - case rsa.PublicKey: - pk.PublicKey = *NewRSAPublicKey(currentTime, &pubkey) - pk.PubKeyAlgo = PubKeyAlgoRSASignOnly - case ecdsa.PublicKey: - pk.PublicKey = *NewECDSAPublicKey(currentTime, &pubkey) - default: - panic("openpgp: unknown crypto.Signer type in NewSignerPrivateKey") - } - pk.PrivateKey = signer - return pk -} - -func (pk *PrivateKey) parse(r io.Reader) (err error) { - err = (&pk.PublicKey).parse(r) - if err != nil { - return - } - var buf [1]byte - _, err = readFull(r, buf[:]) - if err != nil { - return - } - - s2kType := buf[0] - - switch s2kType { - case 0: - pk.s2k = nil - pk.Encrypted = false - case 254, 255: - _, err = readFull(r, buf[:]) - if err != nil { - return - } - pk.cipher = CipherFunction(buf[0]) - pk.Encrypted = true - pk.s2k, err = s2k.Parse(r) - if err != nil { - return - } - if s2kType == 254 { - pk.sha1Checksum = true - } - default: - return errors.UnsupportedError("deprecated s2k function in private key") - } - - if pk.Encrypted { - blockSize := pk.cipher.blockSize() - if blockSize == 0 { - return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher))) - } - pk.iv = make([]byte, blockSize) - _, err = readFull(r, pk.iv) - if err != nil { - return - } - } - - pk.encryptedData, err = ioutil.ReadAll(r) - if err != nil { - return - } - - if !pk.Encrypted { - return pk.parsePrivateKey(pk.encryptedData) - } - - return -} - -func mod64kHash(d []byte) uint16 { - var h uint16 - for _, b := range d { - h += uint16(b) - } - return h -} - -func (pk *PrivateKey) Serialize(w io.Writer) (err error) { - // TODO(agl): support encrypted private keys - buf := bytes.NewBuffer(nil) - err = pk.PublicKey.serializeWithoutHeaders(buf) - if err != nil { - return - } - buf.WriteByte(0 /* no encryption */) - - privateKeyBuf := bytes.NewBuffer(nil) - - switch priv := pk.PrivateKey.(type) { - case *rsa.PrivateKey: - err = serializeRSAPrivateKey(privateKeyBuf, priv) - case *dsa.PrivateKey: - err = serializeDSAPrivateKey(privateKeyBuf, priv) - case *elgamal.PrivateKey: - err = serializeElGamalPrivateKey(privateKeyBuf, priv) - case *ecdsa.PrivateKey: - err = serializeECDSAPrivateKey(privateKeyBuf, priv) - default: - err = errors.InvalidArgumentError("unknown private key type") - } - if err != nil { - return - } - - ptype := packetTypePrivateKey - contents := buf.Bytes() - privateKeyBytes := privateKeyBuf.Bytes() - if pk.IsSubkey { - ptype = packetTypePrivateSubkey - } - err = serializeHeader(w, ptype, len(contents)+len(privateKeyBytes)+2) - if err != nil { - return - } - _, err = w.Write(contents) - if err != nil { - return - } - _, err = w.Write(privateKeyBytes) - if err != nil { - return - } - - checksum := mod64kHash(privateKeyBytes) - var checksumBytes [2]byte - checksumBytes[0] = byte(checksum >> 8) - checksumBytes[1] = byte(checksum) - _, err = w.Write(checksumBytes[:]) - - return -} - -func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) error { - err := writeBig(w, priv.D) - if err != nil { - return err - } - err = writeBig(w, priv.Primes[1]) - if err != nil { - return err - } - err = writeBig(w, priv.Primes[0]) - if err != nil { - return err - } - return writeBig(w, priv.Precomputed.Qinv) -} - -func serializeDSAPrivateKey(w io.Writer, priv *dsa.PrivateKey) error { - return writeBig(w, priv.X) -} - -func serializeElGamalPrivateKey(w io.Writer, priv *elgamal.PrivateKey) error { - return writeBig(w, priv.X) -} - -func serializeECDSAPrivateKey(w io.Writer, priv *ecdsa.PrivateKey) error { - return writeBig(w, priv.D) -} - -// Decrypt decrypts an encrypted private key using a passphrase. -func (pk *PrivateKey) Decrypt(passphrase []byte) error { - if !pk.Encrypted { - return nil - } - - key := make([]byte, pk.cipher.KeySize()) - pk.s2k(key, passphrase) - block := pk.cipher.new(key) - cfb := cipher.NewCFBDecrypter(block, pk.iv) - - data := make([]byte, len(pk.encryptedData)) - cfb.XORKeyStream(data, pk.encryptedData) - - if pk.sha1Checksum { - if len(data) < sha1.Size { - return errors.StructuralError("truncated private key data") - } - h := sha1.New() - h.Write(data[:len(data)-sha1.Size]) - sum := h.Sum(nil) - if !bytes.Equal(sum, data[len(data)-sha1.Size:]) { - return errors.StructuralError("private key checksum failure") - } - data = data[:len(data)-sha1.Size] - } else { - if len(data) < 2 { - return errors.StructuralError("truncated private key data") - } - var sum uint16 - for i := 0; i < len(data)-2; i++ { - sum += uint16(data[i]) - } - if data[len(data)-2] != uint8(sum>>8) || - data[len(data)-1] != uint8(sum) { - return errors.StructuralError("private key checksum failure") - } - data = data[:len(data)-2] - } - - return pk.parsePrivateKey(data) -} - -func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) { - switch pk.PublicKey.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly: - return pk.parseRSAPrivateKey(data) - case PubKeyAlgoDSA: - return pk.parseDSAPrivateKey(data) - case PubKeyAlgoElGamal: - return pk.parseElGamalPrivateKey(data) - case PubKeyAlgoECDSA: - return pk.parseECDSAPrivateKey(data) - } - panic("impossible") -} - -func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) { - rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey) - rsaPriv := new(rsa.PrivateKey) - rsaPriv.PublicKey = *rsaPub - - buf := bytes.NewBuffer(data) - d, _, err := readMPI(buf) - if err != nil { - return - } - p, _, err := readMPI(buf) - if err != nil { - return - } - q, _, err := readMPI(buf) - if err != nil { - return - } - - rsaPriv.D = new(big.Int).SetBytes(d) - rsaPriv.Primes = make([]*big.Int, 2) - rsaPriv.Primes[0] = new(big.Int).SetBytes(p) - rsaPriv.Primes[1] = new(big.Int).SetBytes(q) - if err := rsaPriv.Validate(); err != nil { - return err - } - rsaPriv.Precompute() - pk.PrivateKey = rsaPriv - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} - -func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err error) { - dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey) - dsaPriv := new(dsa.PrivateKey) - dsaPriv.PublicKey = *dsaPub - - buf := bytes.NewBuffer(data) - x, _, err := readMPI(buf) - if err != nil { - return - } - - dsaPriv.X = new(big.Int).SetBytes(x) - pk.PrivateKey = dsaPriv - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} - -func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err error) { - pub := pk.PublicKey.PublicKey.(*elgamal.PublicKey) - priv := new(elgamal.PrivateKey) - priv.PublicKey = *pub - - buf := bytes.NewBuffer(data) - x, _, err := readMPI(buf) - if err != nil { - return - } - - priv.X = new(big.Int).SetBytes(x) - pk.PrivateKey = priv - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} - -func (pk *PrivateKey) parseECDSAPrivateKey(data []byte) (err error) { - ecdsaPub := pk.PublicKey.PublicKey.(*ecdsa.PublicKey) - - buf := bytes.NewBuffer(data) - d, _, err := readMPI(buf) - if err != nil { - return - } - - pk.PrivateKey = &ecdsa.PrivateKey{ - PublicKey: *ecdsaPub, - D: new(big.Int).SetBytes(d), - } - pk.Encrypted = false - pk.encryptedData = nil - - return nil -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/public_key.go b/vendor/golang.org/x/crypto/openpgp/packet/public_key.go deleted file mode 100644 index fcd5f52519..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/public_key.go +++ /dev/null @@ -1,753 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "crypto/sha1" - _ "crypto/sha256" - _ "crypto/sha512" - "encoding/binary" - "fmt" - "hash" - "io" - "math/big" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/elgamal" - "golang.org/x/crypto/openpgp/errors" -) - -var ( - // NIST curve P-256 - oidCurveP256 []byte = []byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07} - // NIST curve P-384 - oidCurveP384 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x22} - // NIST curve P-521 - oidCurveP521 []byte = []byte{0x2B, 0x81, 0x04, 0x00, 0x23} -) - -const maxOIDLength = 8 - -// ecdsaKey stores the algorithm-specific fields for ECDSA keys. -// as defined in RFC 6637, Section 9. -type ecdsaKey struct { - // oid contains the OID byte sequence identifying the elliptic curve used - oid []byte - // p contains the elliptic curve point that represents the public key - p parsedMPI -} - -// parseOID reads the OID for the curve as defined in RFC 6637, Section 9. -func parseOID(r io.Reader) (oid []byte, err error) { - buf := make([]byte, maxOIDLength) - if _, err = readFull(r, buf[:1]); err != nil { - return - } - oidLen := buf[0] - if int(oidLen) > len(buf) { - err = errors.UnsupportedError("invalid oid length: " + strconv.Itoa(int(oidLen))) - return - } - oid = buf[:oidLen] - _, err = readFull(r, oid) - return -} - -func (f *ecdsaKey) parse(r io.Reader) (err error) { - if f.oid, err = parseOID(r); err != nil { - return err - } - f.p.bytes, f.p.bitLength, err = readMPI(r) - return -} - -func (f *ecdsaKey) serialize(w io.Writer) (err error) { - buf := make([]byte, maxOIDLength+1) - buf[0] = byte(len(f.oid)) - copy(buf[1:], f.oid) - if _, err = w.Write(buf[:len(f.oid)+1]); err != nil { - return - } - return writeMPIs(w, f.p) -} - -func (f *ecdsaKey) newECDSA() (*ecdsa.PublicKey, error) { - var c elliptic.Curve - if bytes.Equal(f.oid, oidCurveP256) { - c = elliptic.P256() - } else if bytes.Equal(f.oid, oidCurveP384) { - c = elliptic.P384() - } else if bytes.Equal(f.oid, oidCurveP521) { - c = elliptic.P521() - } else { - return nil, errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", f.oid)) - } - x, y := elliptic.Unmarshal(c, f.p.bytes) - if x == nil { - return nil, errors.UnsupportedError("failed to parse EC point") - } - return &ecdsa.PublicKey{Curve: c, X: x, Y: y}, nil -} - -func (f *ecdsaKey) byteLen() int { - return 1 + len(f.oid) + 2 + len(f.p.bytes) -} - -type kdfHashFunction byte -type kdfAlgorithm byte - -// ecdhKdf stores key derivation function parameters -// used for ECDH encryption. See RFC 6637, Section 9. -type ecdhKdf struct { - KdfHash kdfHashFunction - KdfAlgo kdfAlgorithm -} - -func (f *ecdhKdf) parse(r io.Reader) (err error) { - buf := make([]byte, 1) - if _, err = readFull(r, buf); err != nil { - return - } - kdfLen := int(buf[0]) - if kdfLen < 3 { - return errors.UnsupportedError("Unsupported ECDH KDF length: " + strconv.Itoa(kdfLen)) - } - buf = make([]byte, kdfLen) - if _, err = readFull(r, buf); err != nil { - return - } - reserved := int(buf[0]) - f.KdfHash = kdfHashFunction(buf[1]) - f.KdfAlgo = kdfAlgorithm(buf[2]) - if reserved != 0x01 { - return errors.UnsupportedError("Unsupported KDF reserved field: " + strconv.Itoa(reserved)) - } - return -} - -func (f *ecdhKdf) serialize(w io.Writer) (err error) { - buf := make([]byte, 4) - // See RFC 6637, Section 9, Algorithm-Specific Fields for ECDH keys. - buf[0] = byte(0x03) // Length of the following fields - buf[1] = byte(0x01) // Reserved for future extensions, must be 1 for now - buf[2] = byte(f.KdfHash) - buf[3] = byte(f.KdfAlgo) - _, err = w.Write(buf[:]) - return -} - -func (f *ecdhKdf) byteLen() int { - return 4 -} - -// PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2. -type PublicKey struct { - CreationTime time.Time - PubKeyAlgo PublicKeyAlgorithm - PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey or *ecdsa.PublicKey - Fingerprint [20]byte - KeyId uint64 - IsSubkey bool - - n, e, p, q, g, y parsedMPI - - // RFC 6637 fields - ec *ecdsaKey - ecdh *ecdhKdf -} - -// signingKey provides a convenient abstraction over signature verification -// for v3 and v4 public keys. -type signingKey interface { - SerializeSignaturePrefix(io.Writer) - serializeWithoutHeaders(io.Writer) error -} - -func fromBig(n *big.Int) parsedMPI { - return parsedMPI{ - bytes: n.Bytes(), - bitLength: uint16(n.BitLen()), - } -} - -// NewRSAPublicKey returns a PublicKey that wraps the given rsa.PublicKey. -func NewRSAPublicKey(creationTime time.Time, pub *rsa.PublicKey) *PublicKey { - pk := &PublicKey{ - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoRSA, - PublicKey: pub, - n: fromBig(pub.N), - e: fromBig(big.NewInt(int64(pub.E))), - } - - pk.setFingerPrintAndKeyId() - return pk -} - -// NewDSAPublicKey returns a PublicKey that wraps the given dsa.PublicKey. -func NewDSAPublicKey(creationTime time.Time, pub *dsa.PublicKey) *PublicKey { - pk := &PublicKey{ - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoDSA, - PublicKey: pub, - p: fromBig(pub.P), - q: fromBig(pub.Q), - g: fromBig(pub.G), - y: fromBig(pub.Y), - } - - pk.setFingerPrintAndKeyId() - return pk -} - -// NewElGamalPublicKey returns a PublicKey that wraps the given elgamal.PublicKey. -func NewElGamalPublicKey(creationTime time.Time, pub *elgamal.PublicKey) *PublicKey { - pk := &PublicKey{ - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoElGamal, - PublicKey: pub, - p: fromBig(pub.P), - g: fromBig(pub.G), - y: fromBig(pub.Y), - } - - pk.setFingerPrintAndKeyId() - return pk -} - -func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey { - pk := &PublicKey{ - CreationTime: creationTime, - PubKeyAlgo: PubKeyAlgoECDSA, - PublicKey: pub, - ec: new(ecdsaKey), - } - - switch pub.Curve { - case elliptic.P256(): - pk.ec.oid = oidCurveP256 - case elliptic.P384(): - pk.ec.oid = oidCurveP384 - case elliptic.P521(): - pk.ec.oid = oidCurveP521 - default: - panic("unknown elliptic curve") - } - - pk.ec.p.bytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y) - - // The bit length is 3 (for the 0x04 specifying an uncompressed key) - // plus two field elements (for x and y), which are rounded up to the - // nearest byte. See https://tools.ietf.org/html/rfc6637#section-6 - fieldBytes := (pub.Curve.Params().BitSize + 7) & ^7 - pk.ec.p.bitLength = uint16(3 + fieldBytes + fieldBytes) - - pk.setFingerPrintAndKeyId() - return pk -} - -func (pk *PublicKey) parse(r io.Reader) (err error) { - // RFC 4880, section 5.5.2 - var buf [6]byte - _, err = readFull(r, buf[:]) - if err != nil { - return - } - if buf[0] != 4 { - return errors.UnsupportedError("public key version") - } - pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) - pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5]) - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - err = pk.parseRSA(r) - case PubKeyAlgoDSA: - err = pk.parseDSA(r) - case PubKeyAlgoElGamal: - err = pk.parseElGamal(r) - case PubKeyAlgoECDSA: - pk.ec = new(ecdsaKey) - if err = pk.ec.parse(r); err != nil { - return err - } - pk.PublicKey, err = pk.ec.newECDSA() - case PubKeyAlgoECDH: - pk.ec = new(ecdsaKey) - if err = pk.ec.parse(r); err != nil { - return - } - pk.ecdh = new(ecdhKdf) - if err = pk.ecdh.parse(r); err != nil { - return - } - // The ECDH key is stored in an ecdsa.PublicKey for convenience. - pk.PublicKey, err = pk.ec.newECDSA() - default: - err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) - } - if err != nil { - return - } - - pk.setFingerPrintAndKeyId() - return -} - -func (pk *PublicKey) setFingerPrintAndKeyId() { - // RFC 4880, section 12.2 - fingerPrint := sha1.New() - pk.SerializeSignaturePrefix(fingerPrint) - pk.serializeWithoutHeaders(fingerPrint) - copy(pk.Fingerprint[:], fingerPrint.Sum(nil)) - pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20]) -} - -// parseRSA parses RSA public key material from the given Reader. See RFC 4880, -// section 5.5.2. -func (pk *PublicKey) parseRSA(r io.Reader) (err error) { - pk.n.bytes, pk.n.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.e.bytes, pk.e.bitLength, err = readMPI(r) - if err != nil { - return - } - - if len(pk.e.bytes) > 3 { - err = errors.UnsupportedError("large public exponent") - return - } - rsa := &rsa.PublicKey{ - N: new(big.Int).SetBytes(pk.n.bytes), - E: 0, - } - for i := 0; i < len(pk.e.bytes); i++ { - rsa.E <<= 8 - rsa.E |= int(pk.e.bytes[i]) - } - pk.PublicKey = rsa - return -} - -// parseDSA parses DSA public key material from the given Reader. See RFC 4880, -// section 5.5.2. -func (pk *PublicKey) parseDSA(r io.Reader) (err error) { - pk.p.bytes, pk.p.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.q.bytes, pk.q.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.g.bytes, pk.g.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.y.bytes, pk.y.bitLength, err = readMPI(r) - if err != nil { - return - } - - dsa := new(dsa.PublicKey) - dsa.P = new(big.Int).SetBytes(pk.p.bytes) - dsa.Q = new(big.Int).SetBytes(pk.q.bytes) - dsa.G = new(big.Int).SetBytes(pk.g.bytes) - dsa.Y = new(big.Int).SetBytes(pk.y.bytes) - pk.PublicKey = dsa - return -} - -// parseElGamal parses ElGamal public key material from the given Reader. See -// RFC 4880, section 5.5.2. -func (pk *PublicKey) parseElGamal(r io.Reader) (err error) { - pk.p.bytes, pk.p.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.g.bytes, pk.g.bitLength, err = readMPI(r) - if err != nil { - return - } - pk.y.bytes, pk.y.bitLength, err = readMPI(r) - if err != nil { - return - } - - elgamal := new(elgamal.PublicKey) - elgamal.P = new(big.Int).SetBytes(pk.p.bytes) - elgamal.G = new(big.Int).SetBytes(pk.g.bytes) - elgamal.Y = new(big.Int).SetBytes(pk.y.bytes) - pk.PublicKey = elgamal - return -} - -// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. -// The prefix is used when calculating a signature over this public key. See -// RFC 4880, section 5.2.4. -func (pk *PublicKey) SerializeSignaturePrefix(h io.Writer) { - var pLength uint16 - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - pLength += 2 + uint16(len(pk.n.bytes)) - pLength += 2 + uint16(len(pk.e.bytes)) - case PubKeyAlgoDSA: - pLength += 2 + uint16(len(pk.p.bytes)) - pLength += 2 + uint16(len(pk.q.bytes)) - pLength += 2 + uint16(len(pk.g.bytes)) - pLength += 2 + uint16(len(pk.y.bytes)) - case PubKeyAlgoElGamal: - pLength += 2 + uint16(len(pk.p.bytes)) - pLength += 2 + uint16(len(pk.g.bytes)) - pLength += 2 + uint16(len(pk.y.bytes)) - case PubKeyAlgoECDSA: - pLength += uint16(pk.ec.byteLen()) - case PubKeyAlgoECDH: - pLength += uint16(pk.ec.byteLen()) - pLength += uint16(pk.ecdh.byteLen()) - default: - panic("unknown public key algorithm") - } - pLength += 6 - h.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) - return -} - -func (pk *PublicKey) Serialize(w io.Writer) (err error) { - length := 6 // 6 byte header - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - length += 2 + len(pk.n.bytes) - length += 2 + len(pk.e.bytes) - case PubKeyAlgoDSA: - length += 2 + len(pk.p.bytes) - length += 2 + len(pk.q.bytes) - length += 2 + len(pk.g.bytes) - length += 2 + len(pk.y.bytes) - case PubKeyAlgoElGamal: - length += 2 + len(pk.p.bytes) - length += 2 + len(pk.g.bytes) - length += 2 + len(pk.y.bytes) - case PubKeyAlgoECDSA: - length += pk.ec.byteLen() - case PubKeyAlgoECDH: - length += pk.ec.byteLen() - length += pk.ecdh.byteLen() - default: - panic("unknown public key algorithm") - } - - packetType := packetTypePublicKey - if pk.IsSubkey { - packetType = packetTypePublicSubkey - } - err = serializeHeader(w, packetType, length) - if err != nil { - return - } - return pk.serializeWithoutHeaders(w) -} - -// serializeWithoutHeaders marshals the PublicKey to w in the form of an -// OpenPGP public key packet, not including the packet header. -func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) { - var buf [6]byte - buf[0] = 4 - t := uint32(pk.CreationTime.Unix()) - buf[1] = byte(t >> 24) - buf[2] = byte(t >> 16) - buf[3] = byte(t >> 8) - buf[4] = byte(t) - buf[5] = byte(pk.PubKeyAlgo) - - _, err = w.Write(buf[:]) - if err != nil { - return - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - return writeMPIs(w, pk.n, pk.e) - case PubKeyAlgoDSA: - return writeMPIs(w, pk.p, pk.q, pk.g, pk.y) - case PubKeyAlgoElGamal: - return writeMPIs(w, pk.p, pk.g, pk.y) - case PubKeyAlgoECDSA: - return pk.ec.serialize(w) - case PubKeyAlgoECDH: - if err = pk.ec.serialize(w); err != nil { - return - } - return pk.ecdh.serialize(w) - } - return errors.InvalidArgumentError("bad public-key algorithm") -} - -// CanSign returns true iff this public key can generate signatures -func (pk *PublicKey) CanSign() bool { - return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElGamal -} - -// VerifySignature returns nil iff sig is a valid signature, made by this -// public key, of the data hashed into signed. signed is mutated by this call. -func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) { - if !pk.CanSign() { - return errors.InvalidArgumentError("public key cannot generate signatures") - } - - signed.Write(sig.HashSuffix) - hashBytes := signed.Sum(nil) - - if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { - return errors.SignatureError("hash tag doesn't match") - } - - if pk.PubKeyAlgo != sig.PubKeyAlgo { - return errors.InvalidArgumentError("public key and signature use different algorithms") - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey) - err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)) - if err != nil { - return errors.SignatureError("RSA verification failure") - } - return nil - case PubKeyAlgoDSA: - dsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey) - // Need to truncate hashBytes to match FIPS 186-3 section 4.6. - subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 - if len(hashBytes) > subgroupSize { - hashBytes = hashBytes[:subgroupSize] - } - if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) { - return errors.SignatureError("DSA verification failure") - } - return nil - case PubKeyAlgoECDSA: - ecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey) - if !ecdsa.Verify(ecdsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.ECDSASigR.bytes), new(big.Int).SetBytes(sig.ECDSASigS.bytes)) { - return errors.SignatureError("ECDSA verification failure") - } - return nil - default: - return errors.SignatureError("Unsupported public key algorithm used in signature") - } -} - -// VerifySignatureV3 returns nil iff sig is a valid signature, made by this -// public key, of the data hashed into signed. signed is mutated by this call. -func (pk *PublicKey) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) { - if !pk.CanSign() { - return errors.InvalidArgumentError("public key cannot generate signatures") - } - - suffix := make([]byte, 5) - suffix[0] = byte(sig.SigType) - binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix())) - signed.Write(suffix) - hashBytes := signed.Sum(nil) - - if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { - return errors.SignatureError("hash tag doesn't match") - } - - if pk.PubKeyAlgo != sig.PubKeyAlgo { - return errors.InvalidArgumentError("public key and signature use different algorithms") - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - rsaPublicKey := pk.PublicKey.(*rsa.PublicKey) - if err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)); err != nil { - return errors.SignatureError("RSA verification failure") - } - return - case PubKeyAlgoDSA: - dsaPublicKey := pk.PublicKey.(*dsa.PublicKey) - // Need to truncate hashBytes to match FIPS 186-3 section 4.6. - subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 - if len(hashBytes) > subgroupSize { - hashBytes = hashBytes[:subgroupSize] - } - if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.bytes), new(big.Int).SetBytes(sig.DSASigS.bytes)) { - return errors.SignatureError("DSA verification failure") - } - return nil - default: - panic("shouldn't happen") - } -} - -// keySignatureHash returns a Hash of the message that needs to be signed for -// pk to assert a subkey relationship to signed. -func keySignatureHash(pk, signed signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() - - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - signed.SerializeSignaturePrefix(h) - signed.serializeWithoutHeaders(h) - return -} - -// VerifyKeySignature returns nil iff sig is a valid signature, made by this -// public key, of signed. -func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error { - h, err := keySignatureHash(pk, signed, sig.Hash) - if err != nil { - return err - } - if err = pk.VerifySignature(h, sig); err != nil { - return err - } - - if sig.FlagSign { - // Signing subkeys must be cross-signed. See - // https://www.gnupg.org/faq/subkey-cross-certify.html. - if sig.EmbeddedSignature == nil { - return errors.StructuralError("signing subkey is missing cross-signature") - } - // Verify the cross-signature. This is calculated over the same - // data as the main signature, so we cannot just recursively - // call signed.VerifyKeySignature(...) - if h, err = keySignatureHash(pk, signed, sig.EmbeddedSignature.Hash); err != nil { - return errors.StructuralError("error while hashing for cross-signature: " + err.Error()) - } - if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil { - return errors.StructuralError("error while verifying cross-signature: " + err.Error()) - } - } - - return nil -} - -func keyRevocationHash(pk signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() - - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - - return -} - -// VerifyRevocationSignature returns nil iff sig is a valid signature, made by this -// public key. -func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) { - h, err := keyRevocationHash(pk, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignature(h, sig) -} - -// userIdSignatureHash returns a Hash of the message that needs to be signed -// to assert that pk is a valid key for id. -func userIdSignatureHash(id string, pk *PublicKey, hashFunc crypto.Hash) (h hash.Hash, err error) { - if !hashFunc.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hashFunc.New() - - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - - var buf [5]byte - buf[0] = 0xb4 - buf[1] = byte(len(id) >> 24) - buf[2] = byte(len(id) >> 16) - buf[3] = byte(len(id) >> 8) - buf[4] = byte(len(id)) - h.Write(buf[:]) - h.Write([]byte(id)) - - return -} - -// VerifyUserIdSignature returns nil iff sig is a valid signature, made by this -// public key, that id is the identity of pub. -func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) { - h, err := userIdSignatureHash(id, pub, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignature(h, sig) -} - -// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this -// public key, that id is the identity of pub. -func (pk *PublicKey) VerifyUserIdSignatureV3(id string, pub *PublicKey, sig *SignatureV3) (err error) { - h, err := userIdSignatureV3Hash(id, pub, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignatureV3(h, sig) -} - -// KeyIdString returns the public key's fingerprint in capital hex -// (e.g. "6C7EE1B8621CC013"). -func (pk *PublicKey) KeyIdString() string { - return fmt.Sprintf("%X", pk.Fingerprint[12:20]) -} - -// KeyIdShortString returns the short form of public key's fingerprint -// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). -func (pk *PublicKey) KeyIdShortString() string { - return fmt.Sprintf("%X", pk.Fingerprint[16:20]) -} - -// A parsedMPI is used to store the contents of a big integer, along with the -// bit length that was specified in the original input. This allows the MPI to -// be reserialized exactly. -type parsedMPI struct { - bytes []byte - bitLength uint16 -} - -// writeMPIs is a utility function for serializing several big integers to the -// given Writer. -func writeMPIs(w io.Writer, mpis ...parsedMPI) (err error) { - for _, mpi := range mpis { - err = writeMPI(w, mpi.bitLength, mpi.bytes) - if err != nil { - return - } - } - return -} - -// BitLength returns the bit length for the given public key. -func (pk *PublicKey) BitLength() (bitLength uint16, err error) { - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - bitLength = pk.n.bitLength - case PubKeyAlgoDSA: - bitLength = pk.p.bitLength - case PubKeyAlgoElGamal: - bitLength = pk.p.bitLength - default: - err = errors.InvalidArgumentError("bad public-key algorithm") - } - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go b/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go deleted file mode 100644 index 5daf7b6cfd..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go +++ /dev/null @@ -1,279 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "crypto/md5" - "crypto/rsa" - "encoding/binary" - "fmt" - "hash" - "io" - "math/big" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/errors" -) - -// PublicKeyV3 represents older, version 3 public keys. These keys are less secure and -// should not be used for signing or encrypting. They are supported here only for -// parsing version 3 key material and validating signatures. -// See RFC 4880, section 5.5.2. -type PublicKeyV3 struct { - CreationTime time.Time - DaysToExpire uint16 - PubKeyAlgo PublicKeyAlgorithm - PublicKey *rsa.PublicKey - Fingerprint [16]byte - KeyId uint64 - IsSubkey bool - - n, e parsedMPI -} - -// newRSAPublicKeyV3 returns a PublicKey that wraps the given rsa.PublicKey. -// Included here for testing purposes only. RFC 4880, section 5.5.2: -// "an implementation MUST NOT generate a V3 key, but MAY accept it." -func newRSAPublicKeyV3(creationTime time.Time, pub *rsa.PublicKey) *PublicKeyV3 { - pk := &PublicKeyV3{ - CreationTime: creationTime, - PublicKey: pub, - n: fromBig(pub.N), - e: fromBig(big.NewInt(int64(pub.E))), - } - - pk.setFingerPrintAndKeyId() - return pk -} - -func (pk *PublicKeyV3) parse(r io.Reader) (err error) { - // RFC 4880, section 5.5.2 - var buf [8]byte - if _, err = readFull(r, buf[:]); err != nil { - return - } - if buf[0] < 2 || buf[0] > 3 { - return errors.UnsupportedError("public key version") - } - pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) - pk.DaysToExpire = binary.BigEndian.Uint16(buf[5:7]) - pk.PubKeyAlgo = PublicKeyAlgorithm(buf[7]) - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - err = pk.parseRSA(r) - default: - err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) - } - if err != nil { - return - } - - pk.setFingerPrintAndKeyId() - return -} - -func (pk *PublicKeyV3) setFingerPrintAndKeyId() { - // RFC 4880, section 12.2 - fingerPrint := md5.New() - fingerPrint.Write(pk.n.bytes) - fingerPrint.Write(pk.e.bytes) - fingerPrint.Sum(pk.Fingerprint[:0]) - pk.KeyId = binary.BigEndian.Uint64(pk.n.bytes[len(pk.n.bytes)-8:]) -} - -// parseRSA parses RSA public key material from the given Reader. See RFC 4880, -// section 5.5.2. -func (pk *PublicKeyV3) parseRSA(r io.Reader) (err error) { - if pk.n.bytes, pk.n.bitLength, err = readMPI(r); err != nil { - return - } - if pk.e.bytes, pk.e.bitLength, err = readMPI(r); err != nil { - return - } - - // RFC 4880 Section 12.2 requires the low 8 bytes of the - // modulus to form the key id. - if len(pk.n.bytes) < 8 { - return errors.StructuralError("v3 public key modulus is too short") - } - if len(pk.e.bytes) > 3 { - err = errors.UnsupportedError("large public exponent") - return - } - rsa := &rsa.PublicKey{N: new(big.Int).SetBytes(pk.n.bytes)} - for i := 0; i < len(pk.e.bytes); i++ { - rsa.E <<= 8 - rsa.E |= int(pk.e.bytes[i]) - } - pk.PublicKey = rsa - return -} - -// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. -// The prefix is used when calculating a signature over this public key. See -// RFC 4880, section 5.2.4. -func (pk *PublicKeyV3) SerializeSignaturePrefix(w io.Writer) { - var pLength uint16 - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - pLength += 2 + uint16(len(pk.n.bytes)) - pLength += 2 + uint16(len(pk.e.bytes)) - default: - panic("unknown public key algorithm") - } - pLength += 6 - w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) - return -} - -func (pk *PublicKeyV3) Serialize(w io.Writer) (err error) { - length := 8 // 8 byte header - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - length += 2 + len(pk.n.bytes) - length += 2 + len(pk.e.bytes) - default: - panic("unknown public key algorithm") - } - - packetType := packetTypePublicKey - if pk.IsSubkey { - packetType = packetTypePublicSubkey - } - if err = serializeHeader(w, packetType, length); err != nil { - return - } - return pk.serializeWithoutHeaders(w) -} - -// serializeWithoutHeaders marshals the PublicKey to w in the form of an -// OpenPGP public key packet, not including the packet header. -func (pk *PublicKeyV3) serializeWithoutHeaders(w io.Writer) (err error) { - var buf [8]byte - // Version 3 - buf[0] = 3 - // Creation time - t := uint32(pk.CreationTime.Unix()) - buf[1] = byte(t >> 24) - buf[2] = byte(t >> 16) - buf[3] = byte(t >> 8) - buf[4] = byte(t) - // Days to expire - buf[5] = byte(pk.DaysToExpire >> 8) - buf[6] = byte(pk.DaysToExpire) - // Public key algorithm - buf[7] = byte(pk.PubKeyAlgo) - - if _, err = w.Write(buf[:]); err != nil { - return - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - return writeMPIs(w, pk.n, pk.e) - } - return errors.InvalidArgumentError("bad public-key algorithm") -} - -// CanSign returns true iff this public key can generate signatures -func (pk *PublicKeyV3) CanSign() bool { - return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly -} - -// VerifySignatureV3 returns nil iff sig is a valid signature, made by this -// public key, of the data hashed into signed. signed is mutated by this call. -func (pk *PublicKeyV3) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err error) { - if !pk.CanSign() { - return errors.InvalidArgumentError("public key cannot generate signatures") - } - - suffix := make([]byte, 5) - suffix[0] = byte(sig.SigType) - binary.BigEndian.PutUint32(suffix[1:], uint32(sig.CreationTime.Unix())) - signed.Write(suffix) - hashBytes := signed.Sum(nil) - - if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { - return errors.SignatureError("hash tag doesn't match") - } - - if pk.PubKeyAlgo != sig.PubKeyAlgo { - return errors.InvalidArgumentError("public key and signature use different algorithms") - } - - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - if err = rsa.VerifyPKCS1v15(pk.PublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes); err != nil { - return errors.SignatureError("RSA verification failure") - } - return - default: - // V3 public keys only support RSA. - panic("shouldn't happen") - } -} - -// VerifyUserIdSignatureV3 returns nil iff sig is a valid signature, made by this -// public key, that id is the identity of pub. -func (pk *PublicKeyV3) VerifyUserIdSignatureV3(id string, pub *PublicKeyV3, sig *SignatureV3) (err error) { - h, err := userIdSignatureV3Hash(id, pk, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignatureV3(h, sig) -} - -// VerifyKeySignatureV3 returns nil iff sig is a valid signature, made by this -// public key, of signed. -func (pk *PublicKeyV3) VerifyKeySignatureV3(signed *PublicKeyV3, sig *SignatureV3) (err error) { - h, err := keySignatureHash(pk, signed, sig.Hash) - if err != nil { - return err - } - return pk.VerifySignatureV3(h, sig) -} - -// userIdSignatureV3Hash returns a Hash of the message that needs to be signed -// to assert that pk is a valid key for id. -func userIdSignatureV3Hash(id string, pk signingKey, hfn crypto.Hash) (h hash.Hash, err error) { - if !hfn.Available() { - return nil, errors.UnsupportedError("hash function") - } - h = hfn.New() - - // RFC 4880, section 5.2.4 - pk.SerializeSignaturePrefix(h) - pk.serializeWithoutHeaders(h) - - h.Write([]byte(id)) - - return -} - -// KeyIdString returns the public key's fingerprint in capital hex -// (e.g. "6C7EE1B8621CC013"). -func (pk *PublicKeyV3) KeyIdString() string { - return fmt.Sprintf("%X", pk.KeyId) -} - -// KeyIdShortString returns the short form of public key's fingerprint -// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). -func (pk *PublicKeyV3) KeyIdShortString() string { - return fmt.Sprintf("%X", pk.KeyId&0xFFFFFFFF) -} - -// BitLength returns the bit length for the given public key. -func (pk *PublicKeyV3) BitLength() (bitLength uint16, err error) { - switch pk.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: - bitLength = pk.n.bitLength - default: - err = errors.InvalidArgumentError("bad public-key algorithm") - } - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/reader.go b/vendor/golang.org/x/crypto/openpgp/packet/reader.go deleted file mode 100644 index 34bc7c613e..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/reader.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "golang.org/x/crypto/openpgp/errors" - "io" -) - -// Reader reads packets from an io.Reader and allows packets to be 'unread' so -// that they result from the next call to Next. -type Reader struct { - q []Packet - readers []io.Reader -} - -// New io.Readers are pushed when a compressed or encrypted packet is processed -// and recursively treated as a new source of packets. However, a carefully -// crafted packet can trigger an infinite recursive sequence of packets. See -// http://mumble.net/~campbell/misc/pgp-quine -// https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2013-4402 -// This constant limits the number of recursive packets that may be pushed. -const maxReaders = 32 - -// Next returns the most recently unread Packet, or reads another packet from -// the top-most io.Reader. Unknown packet types are skipped. -func (r *Reader) Next() (p Packet, err error) { - if len(r.q) > 0 { - p = r.q[len(r.q)-1] - r.q = r.q[:len(r.q)-1] - return - } - - for len(r.readers) > 0 { - p, err = Read(r.readers[len(r.readers)-1]) - if err == nil { - return - } - if err == io.EOF { - r.readers = r.readers[:len(r.readers)-1] - continue - } - if _, ok := err.(errors.UnknownPacketTypeError); !ok { - return nil, err - } - } - - return nil, io.EOF -} - -// Push causes the Reader to start reading from a new io.Reader. When an EOF -// error is seen from the new io.Reader, it is popped and the Reader continues -// to read from the next most recent io.Reader. Push returns a StructuralError -// if pushing the reader would exceed the maximum recursion level, otherwise it -// returns nil. -func (r *Reader) Push(reader io.Reader) (err error) { - if len(r.readers) >= maxReaders { - return errors.StructuralError("too many layers of packets") - } - r.readers = append(r.readers, reader) - return nil -} - -// Unread causes the given Packet to be returned from the next call to Next. -func (r *Reader) Unread(p Packet) { - r.q = append(r.q, p) -} - -func NewReader(r io.Reader) *Reader { - return &Reader{ - q: nil, - readers: []io.Reader{r}, - } -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/signature.go b/vendor/golang.org/x/crypto/openpgp/packet/signature.go deleted file mode 100644 index 6ce0cbedbe..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/signature.go +++ /dev/null @@ -1,731 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "encoding/asn1" - "encoding/binary" - "hash" - "io" - "math/big" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" -) - -const ( - // See RFC 4880, section 5.2.3.21 for details. - KeyFlagCertify = 1 << iota - KeyFlagSign - KeyFlagEncryptCommunications - KeyFlagEncryptStorage -) - -// Signature represents a signature. See RFC 4880, section 5.2. -type Signature struct { - SigType SignatureType - PubKeyAlgo PublicKeyAlgorithm - Hash crypto.Hash - - // HashSuffix is extra data that is hashed in after the signed data. - HashSuffix []byte - // HashTag contains the first two bytes of the hash for fast rejection - // of bad signed data. - HashTag [2]byte - CreationTime time.Time - - RSASignature parsedMPI - DSASigR, DSASigS parsedMPI - ECDSASigR, ECDSASigS parsedMPI - - // rawSubpackets contains the unparsed subpackets, in order. - rawSubpackets []outputSubpacket - - // The following are optional so are nil when not included in the - // signature. - - SigLifetimeSecs, KeyLifetimeSecs *uint32 - PreferredSymmetric, PreferredHash, PreferredCompression []uint8 - IssuerKeyId *uint64 - IsPrimaryId *bool - - // FlagsValid is set if any flags were given. See RFC 4880, section - // 5.2.3.21 for details. - FlagsValid bool - FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage bool - - // RevocationReason is set if this signature has been revoked. - // See RFC 4880, section 5.2.3.23 for details. - RevocationReason *uint8 - RevocationReasonText string - - // MDC is set if this signature has a feature packet that indicates - // support for MDC subpackets. - MDC bool - - // EmbeddedSignature, if non-nil, is a signature of the parent key, by - // this key. This prevents an attacker from claiming another's signing - // subkey as their own. - EmbeddedSignature *Signature - - outSubpackets []outputSubpacket -} - -func (sig *Signature) parse(r io.Reader) (err error) { - // RFC 4880, section 5.2.3 - var buf [5]byte - _, err = readFull(r, buf[:1]) - if err != nil { - return - } - if buf[0] != 4 { - err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) - return - } - - _, err = readFull(r, buf[:5]) - if err != nil { - return - } - sig.SigType = SignatureType(buf[0]) - sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1]) - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA: - default: - err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) - return - } - - var ok bool - sig.Hash, ok = s2k.HashIdToHash(buf[2]) - if !ok { - return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) - } - - hashedSubpacketsLength := int(buf[3])<<8 | int(buf[4]) - l := 6 + hashedSubpacketsLength - sig.HashSuffix = make([]byte, l+6) - sig.HashSuffix[0] = 4 - copy(sig.HashSuffix[1:], buf[:5]) - hashedSubpackets := sig.HashSuffix[6:l] - _, err = readFull(r, hashedSubpackets) - if err != nil { - return - } - // See RFC 4880, section 5.2.4 - trailer := sig.HashSuffix[l:] - trailer[0] = 4 - trailer[1] = 0xff - trailer[2] = uint8(l >> 24) - trailer[3] = uint8(l >> 16) - trailer[4] = uint8(l >> 8) - trailer[5] = uint8(l) - - err = parseSignatureSubpackets(sig, hashedSubpackets, true) - if err != nil { - return - } - - _, err = readFull(r, buf[:2]) - if err != nil { - return - } - unhashedSubpacketsLength := int(buf[0])<<8 | int(buf[1]) - unhashedSubpackets := make([]byte, unhashedSubpacketsLength) - _, err = readFull(r, unhashedSubpackets) - if err != nil { - return - } - err = parseSignatureSubpackets(sig, unhashedSubpackets, false) - if err != nil { - return - } - - _, err = readFull(r, sig.HashTag[:2]) - if err != nil { - return - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r) - case PubKeyAlgoDSA: - sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r) - if err == nil { - sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r) - } - case PubKeyAlgoECDSA: - sig.ECDSASigR.bytes, sig.ECDSASigR.bitLength, err = readMPI(r) - if err == nil { - sig.ECDSASigS.bytes, sig.ECDSASigS.bitLength, err = readMPI(r) - } - default: - panic("unreachable") - } - return -} - -// parseSignatureSubpackets parses subpackets of the main signature packet. See -// RFC 4880, section 5.2.3.1. -func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) { - for len(subpackets) > 0 { - subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed) - if err != nil { - return - } - } - - if sig.CreationTime.IsZero() { - err = errors.StructuralError("no creation time in signature") - } - - return -} - -type signatureSubpacketType uint8 - -const ( - creationTimeSubpacket signatureSubpacketType = 2 - signatureExpirationSubpacket signatureSubpacketType = 3 - keyExpirationSubpacket signatureSubpacketType = 9 - prefSymmetricAlgosSubpacket signatureSubpacketType = 11 - issuerSubpacket signatureSubpacketType = 16 - prefHashAlgosSubpacket signatureSubpacketType = 21 - prefCompressionSubpacket signatureSubpacketType = 22 - primaryUserIdSubpacket signatureSubpacketType = 25 - keyFlagsSubpacket signatureSubpacketType = 27 - reasonForRevocationSubpacket signatureSubpacketType = 29 - featuresSubpacket signatureSubpacketType = 30 - embeddedSignatureSubpacket signatureSubpacketType = 32 -) - -// parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1. -func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) { - // RFC 4880, section 5.2.3.1 - var ( - length uint32 - packetType signatureSubpacketType - isCritical bool - ) - switch { - case subpacket[0] < 192: - length = uint32(subpacket[0]) - subpacket = subpacket[1:] - case subpacket[0] < 255: - if len(subpacket) < 2 { - goto Truncated - } - length = uint32(subpacket[0]-192)<<8 + uint32(subpacket[1]) + 192 - subpacket = subpacket[2:] - default: - if len(subpacket) < 5 { - goto Truncated - } - length = uint32(subpacket[1])<<24 | - uint32(subpacket[2])<<16 | - uint32(subpacket[3])<<8 | - uint32(subpacket[4]) - subpacket = subpacket[5:] - } - if length > uint32(len(subpacket)) { - goto Truncated - } - rest = subpacket[length:] - subpacket = subpacket[:length] - if len(subpacket) == 0 { - err = errors.StructuralError("zero length signature subpacket") - return - } - packetType = signatureSubpacketType(subpacket[0] & 0x7f) - isCritical = subpacket[0]&0x80 == 0x80 - subpacket = subpacket[1:] - sig.rawSubpackets = append(sig.rawSubpackets, outputSubpacket{isHashed, packetType, isCritical, subpacket}) - switch packetType { - case creationTimeSubpacket: - if !isHashed { - err = errors.StructuralError("signature creation time in non-hashed area") - return - } - if len(subpacket) != 4 { - err = errors.StructuralError("signature creation time not four bytes") - return - } - t := binary.BigEndian.Uint32(subpacket) - sig.CreationTime = time.Unix(int64(t), 0) - case signatureExpirationSubpacket: - // Signature expiration time, section 5.2.3.10 - if !isHashed { - return - } - if len(subpacket) != 4 { - err = errors.StructuralError("expiration subpacket with bad length") - return - } - sig.SigLifetimeSecs = new(uint32) - *sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket) - case keyExpirationSubpacket: - // Key expiration time, section 5.2.3.6 - if !isHashed { - return - } - if len(subpacket) != 4 { - err = errors.StructuralError("key expiration subpacket with bad length") - return - } - sig.KeyLifetimeSecs = new(uint32) - *sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket) - case prefSymmetricAlgosSubpacket: - // Preferred symmetric algorithms, section 5.2.3.7 - if !isHashed { - return - } - sig.PreferredSymmetric = make([]byte, len(subpacket)) - copy(sig.PreferredSymmetric, subpacket) - case issuerSubpacket: - // Issuer, section 5.2.3.5 - if len(subpacket) != 8 { - err = errors.StructuralError("issuer subpacket with bad length") - return - } - sig.IssuerKeyId = new(uint64) - *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket) - case prefHashAlgosSubpacket: - // Preferred hash algorithms, section 5.2.3.8 - if !isHashed { - return - } - sig.PreferredHash = make([]byte, len(subpacket)) - copy(sig.PreferredHash, subpacket) - case prefCompressionSubpacket: - // Preferred compression algorithms, section 5.2.3.9 - if !isHashed { - return - } - sig.PreferredCompression = make([]byte, len(subpacket)) - copy(sig.PreferredCompression, subpacket) - case primaryUserIdSubpacket: - // Primary User ID, section 5.2.3.19 - if !isHashed { - return - } - if len(subpacket) != 1 { - err = errors.StructuralError("primary user id subpacket with bad length") - return - } - sig.IsPrimaryId = new(bool) - if subpacket[0] > 0 { - *sig.IsPrimaryId = true - } - case keyFlagsSubpacket: - // Key flags, section 5.2.3.21 - if !isHashed { - return - } - if len(subpacket) == 0 { - err = errors.StructuralError("empty key flags subpacket") - return - } - sig.FlagsValid = true - if subpacket[0]&KeyFlagCertify != 0 { - sig.FlagCertify = true - } - if subpacket[0]&KeyFlagSign != 0 { - sig.FlagSign = true - } - if subpacket[0]&KeyFlagEncryptCommunications != 0 { - sig.FlagEncryptCommunications = true - } - if subpacket[0]&KeyFlagEncryptStorage != 0 { - sig.FlagEncryptStorage = true - } - case reasonForRevocationSubpacket: - // Reason For Revocation, section 5.2.3.23 - if !isHashed { - return - } - if len(subpacket) == 0 { - err = errors.StructuralError("empty revocation reason subpacket") - return - } - sig.RevocationReason = new(uint8) - *sig.RevocationReason = subpacket[0] - sig.RevocationReasonText = string(subpacket[1:]) - case featuresSubpacket: - // Features subpacket, section 5.2.3.24 specifies a very general - // mechanism for OpenPGP implementations to signal support for new - // features. In practice, the subpacket is used exclusively to - // indicate support for MDC-protected encryption. - sig.MDC = len(subpacket) >= 1 && subpacket[0]&1 == 1 - case embeddedSignatureSubpacket: - // Only usage is in signatures that cross-certify - // signing subkeys. section 5.2.3.26 describes the - // format, with its usage described in section 11.1 - if sig.EmbeddedSignature != nil { - err = errors.StructuralError("Cannot have multiple embedded signatures") - return - } - sig.EmbeddedSignature = new(Signature) - // Embedded signatures are required to be v4 signatures see - // section 12.1. However, we only parse v4 signatures in this - // file anyway. - if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil { - return nil, err - } - if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding { - return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType))) - } - default: - if isCritical { - err = errors.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType))) - return - } - } - return - -Truncated: - err = errors.StructuralError("signature subpacket truncated") - return -} - -// subpacketLengthLength returns the length, in bytes, of an encoded length value. -func subpacketLengthLength(length int) int { - if length < 192 { - return 1 - } - if length < 16320 { - return 2 - } - return 5 -} - -// serializeSubpacketLength marshals the given length into to. -func serializeSubpacketLength(to []byte, length int) int { - // RFC 4880, Section 4.2.2. - if length < 192 { - to[0] = byte(length) - return 1 - } - if length < 16320 { - length -= 192 - to[0] = byte((length >> 8) + 192) - to[1] = byte(length) - return 2 - } - to[0] = 255 - to[1] = byte(length >> 24) - to[2] = byte(length >> 16) - to[3] = byte(length >> 8) - to[4] = byte(length) - return 5 -} - -// subpacketsLength returns the serialized length, in bytes, of the given -// subpackets. -func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) { - for _, subpacket := range subpackets { - if subpacket.hashed == hashed { - length += subpacketLengthLength(len(subpacket.contents) + 1) - length += 1 // type byte - length += len(subpacket.contents) - } - } - return -} - -// serializeSubpackets marshals the given subpackets into to. -func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) { - for _, subpacket := range subpackets { - if subpacket.hashed == hashed { - n := serializeSubpacketLength(to, len(subpacket.contents)+1) - to[n] = byte(subpacket.subpacketType) - to = to[1+n:] - n = copy(to, subpacket.contents) - to = to[n:] - } - } - return -} - -// KeyExpired returns whether sig is a self-signature of a key that has -// expired. -func (sig *Signature) KeyExpired(currentTime time.Time) bool { - if sig.KeyLifetimeSecs == nil { - return false - } - expiry := sig.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second) - return currentTime.After(expiry) -} - -// buildHashSuffix constructs the HashSuffix member of sig in preparation for signing. -func (sig *Signature) buildHashSuffix() (err error) { - hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true) - - var ok bool - l := 6 + hashedSubpacketsLen - sig.HashSuffix = make([]byte, l+6) - sig.HashSuffix[0] = 4 - sig.HashSuffix[1] = uint8(sig.SigType) - sig.HashSuffix[2] = uint8(sig.PubKeyAlgo) - sig.HashSuffix[3], ok = s2k.HashToHashId(sig.Hash) - if !ok { - sig.HashSuffix = nil - return errors.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash))) - } - sig.HashSuffix[4] = byte(hashedSubpacketsLen >> 8) - sig.HashSuffix[5] = byte(hashedSubpacketsLen) - serializeSubpackets(sig.HashSuffix[6:l], sig.outSubpackets, true) - trailer := sig.HashSuffix[l:] - trailer[0] = 4 - trailer[1] = 0xff - trailer[2] = byte(l >> 24) - trailer[3] = byte(l >> 16) - trailer[4] = byte(l >> 8) - trailer[5] = byte(l) - return -} - -func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) { - err = sig.buildHashSuffix() - if err != nil { - return - } - - h.Write(sig.HashSuffix) - digest = h.Sum(nil) - copy(sig.HashTag[:], digest) - return -} - -// Sign signs a message with a private key. The hash, h, must contain -// the hash of the message to be signed and will be mutated by this function. -// On success, the signature is stored in sig. Call Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err error) { - sig.outSubpackets = sig.buildSubpackets() - digest, err := sig.signPrepareHash(h) - if err != nil { - return - } - - switch priv.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - // supports both *rsa.PrivateKey and crypto.Signer - sig.RSASignature.bytes, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) - sig.RSASignature.bitLength = uint16(8 * len(sig.RSASignature.bytes)) - case PubKeyAlgoDSA: - dsaPriv := priv.PrivateKey.(*dsa.PrivateKey) - - // Need to truncate hashBytes to match FIPS 186-3 section 4.6. - subgroupSize := (dsaPriv.Q.BitLen() + 7) / 8 - if len(digest) > subgroupSize { - digest = digest[:subgroupSize] - } - r, s, err := dsa.Sign(config.Random(), dsaPriv, digest) - if err == nil { - sig.DSASigR.bytes = r.Bytes() - sig.DSASigR.bitLength = uint16(8 * len(sig.DSASigR.bytes)) - sig.DSASigS.bytes = s.Bytes() - sig.DSASigS.bitLength = uint16(8 * len(sig.DSASigS.bytes)) - } - case PubKeyAlgoECDSA: - var r, s *big.Int - if pk, ok := priv.PrivateKey.(*ecdsa.PrivateKey); ok { - // direct support, avoid asn1 wrapping/unwrapping - r, s, err = ecdsa.Sign(config.Random(), pk, digest) - } else { - var b []byte - b, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, nil) - if err == nil { - r, s, err = unwrapECDSASig(b) - } - } - if err == nil { - sig.ECDSASigR = fromBig(r) - sig.ECDSASigS = fromBig(s) - } - default: - err = errors.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo))) - } - - return -} - -// unwrapECDSASig parses the two integer components of an ASN.1-encoded ECDSA -// signature. -func unwrapECDSASig(b []byte) (r, s *big.Int, err error) { - var ecsdaSig struct { - R, S *big.Int - } - _, err = asn1.Unmarshal(b, &ecsdaSig) - if err != nil { - return - } - return ecsdaSig.R, ecsdaSig.S, nil -} - -// SignUserId computes a signature from priv, asserting that pub is a valid -// key for the identity id. On success, the signature is stored in sig. Call -// Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, config *Config) error { - h, err := userIdSignatureHash(id, pub, sig.Hash) - if err != nil { - return err - } - return sig.Sign(h, priv, config) -} - -// SignKey computes a signature from priv, asserting that pub is a subkey. On -// success, the signature is stored in sig. Call Serialize to write it out. -// If config is nil, sensible defaults will be used. -func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) error { - h, err := keySignatureHash(&priv.PublicKey, pub, sig.Hash) - if err != nil { - return err - } - return sig.Sign(h, priv, config) -} - -// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been -// called first. -func (sig *Signature) Serialize(w io.Writer) (err error) { - if len(sig.outSubpackets) == 0 { - sig.outSubpackets = sig.rawSubpackets - } - if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil && sig.ECDSASigR.bytes == nil { - return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") - } - - sigLength := 0 - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - sigLength = 2 + len(sig.RSASignature.bytes) - case PubKeyAlgoDSA: - sigLength = 2 + len(sig.DSASigR.bytes) - sigLength += 2 + len(sig.DSASigS.bytes) - case PubKeyAlgoECDSA: - sigLength = 2 + len(sig.ECDSASigR.bytes) - sigLength += 2 + len(sig.ECDSASigS.bytes) - default: - panic("impossible") - } - - unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) - length := len(sig.HashSuffix) - 6 /* trailer not included */ + - 2 /* length of unhashed subpackets */ + unhashedSubpacketsLen + - 2 /* hash tag */ + sigLength - err = serializeHeader(w, packetTypeSignature, length) - if err != nil { - return - } - - _, err = w.Write(sig.HashSuffix[:len(sig.HashSuffix)-6]) - if err != nil { - return - } - - unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen) - unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8) - unhashedSubpackets[1] = byte(unhashedSubpacketsLen) - serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false) - - _, err = w.Write(unhashedSubpackets) - if err != nil { - return - } - _, err = w.Write(sig.HashTag[:]) - if err != nil { - return - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - err = writeMPIs(w, sig.RSASignature) - case PubKeyAlgoDSA: - err = writeMPIs(w, sig.DSASigR, sig.DSASigS) - case PubKeyAlgoECDSA: - err = writeMPIs(w, sig.ECDSASigR, sig.ECDSASigS) - default: - panic("impossible") - } - return -} - -// outputSubpacket represents a subpacket to be marshaled. -type outputSubpacket struct { - hashed bool // true if this subpacket is in the hashed area. - subpacketType signatureSubpacketType - isCritical bool - contents []byte -} - -func (sig *Signature) buildSubpackets() (subpackets []outputSubpacket) { - creationTime := make([]byte, 4) - binary.BigEndian.PutUint32(creationTime, uint32(sig.CreationTime.Unix())) - subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, false, creationTime}) - - if sig.IssuerKeyId != nil { - keyId := make([]byte, 8) - binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId) - subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, false, keyId}) - } - - if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 { - sigLifetime := make([]byte, 4) - binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs) - subpackets = append(subpackets, outputSubpacket{true, signatureExpirationSubpacket, true, sigLifetime}) - } - - // Key flags may only appear in self-signatures or certification signatures. - - if sig.FlagsValid { - var flags byte - if sig.FlagCertify { - flags |= KeyFlagCertify - } - if sig.FlagSign { - flags |= KeyFlagSign - } - if sig.FlagEncryptCommunications { - flags |= KeyFlagEncryptCommunications - } - if sig.FlagEncryptStorage { - flags |= KeyFlagEncryptStorage - } - subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}}) - } - - // The following subpackets may only appear in self-signatures - - if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 { - keyLifetime := make([]byte, 4) - binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs) - subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime}) - } - - if sig.IsPrimaryId != nil && *sig.IsPrimaryId { - subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}}) - } - - if len(sig.PreferredSymmetric) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric}) - } - - if len(sig.PreferredHash) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash}) - } - - if len(sig.PreferredCompression) > 0 { - subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression}) - } - - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go b/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go deleted file mode 100644 index 6edff88934..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto" - "encoding/binary" - "fmt" - "io" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" -) - -// SignatureV3 represents older version 3 signatures. These signatures are less secure -// than version 4 and should not be used to create new signatures. They are included -// here for backwards compatibility to read and validate with older key material. -// See RFC 4880, section 5.2.2. -type SignatureV3 struct { - SigType SignatureType - CreationTime time.Time - IssuerKeyId uint64 - PubKeyAlgo PublicKeyAlgorithm - Hash crypto.Hash - HashTag [2]byte - - RSASignature parsedMPI - DSASigR, DSASigS parsedMPI -} - -func (sig *SignatureV3) parse(r io.Reader) (err error) { - // RFC 4880, section 5.2.2 - var buf [8]byte - if _, err = readFull(r, buf[:1]); err != nil { - return - } - if buf[0] < 2 || buf[0] > 3 { - err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) - return - } - if _, err = readFull(r, buf[:1]); err != nil { - return - } - if buf[0] != 5 { - err = errors.UnsupportedError( - "invalid hashed material length " + strconv.Itoa(int(buf[0]))) - return - } - - // Read hashed material: signature type + creation time - if _, err = readFull(r, buf[:5]); err != nil { - return - } - sig.SigType = SignatureType(buf[0]) - t := binary.BigEndian.Uint32(buf[1:5]) - sig.CreationTime = time.Unix(int64(t), 0) - - // Eight-octet Key ID of signer. - if _, err = readFull(r, buf[:8]); err != nil { - return - } - sig.IssuerKeyId = binary.BigEndian.Uint64(buf[:]) - - // Public-key and hash algorithm - if _, err = readFull(r, buf[:2]); err != nil { - return - } - sig.PubKeyAlgo = PublicKeyAlgorithm(buf[0]) - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA: - default: - err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) - return - } - var ok bool - if sig.Hash, ok = s2k.HashIdToHash(buf[1]); !ok { - return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) - } - - // Two-octet field holding left 16 bits of signed hash value. - if _, err = readFull(r, sig.HashTag[:2]); err != nil { - return - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - sig.RSASignature.bytes, sig.RSASignature.bitLength, err = readMPI(r) - case PubKeyAlgoDSA: - if sig.DSASigR.bytes, sig.DSASigR.bitLength, err = readMPI(r); err != nil { - return - } - sig.DSASigS.bytes, sig.DSASigS.bitLength, err = readMPI(r) - default: - panic("unreachable") - } - return -} - -// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been -// called first. -func (sig *SignatureV3) Serialize(w io.Writer) (err error) { - buf := make([]byte, 8) - - // Write the sig type and creation time - buf[0] = byte(sig.SigType) - binary.BigEndian.PutUint32(buf[1:5], uint32(sig.CreationTime.Unix())) - if _, err = w.Write(buf[:5]); err != nil { - return - } - - // Write the issuer long key ID - binary.BigEndian.PutUint64(buf[:8], sig.IssuerKeyId) - if _, err = w.Write(buf[:8]); err != nil { - return - } - - // Write public key algorithm, hash ID, and hash value - buf[0] = byte(sig.PubKeyAlgo) - hashId, ok := s2k.HashToHashId(sig.Hash) - if !ok { - return errors.UnsupportedError(fmt.Sprintf("hash function %v", sig.Hash)) - } - buf[1] = hashId - copy(buf[2:4], sig.HashTag[:]) - if _, err = w.Write(buf[:4]); err != nil { - return - } - - if sig.RSASignature.bytes == nil && sig.DSASigR.bytes == nil { - return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") - } - - switch sig.PubKeyAlgo { - case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: - err = writeMPIs(w, sig.RSASignature) - case PubKeyAlgoDSA: - err = writeMPIs(w, sig.DSASigR, sig.DSASigS) - default: - panic("impossible") - } - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go b/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go deleted file mode 100644 index 744c2d2c42..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "crypto/cipher" - "io" - "strconv" - - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/s2k" -) - -// This is the largest session key that we'll support. Since no 512-bit cipher -// has even been seriously used, this is comfortably large. -const maxSessionKeySizeInBytes = 64 - -// SymmetricKeyEncrypted represents a passphrase protected session key. See RFC -// 4880, section 5.3. -type SymmetricKeyEncrypted struct { - CipherFunc CipherFunction - s2k func(out, in []byte) - encryptedKey []byte -} - -const symmetricKeyEncryptedVersion = 4 - -func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { - // RFC 4880, section 5.3. - var buf [2]byte - if _, err := readFull(r, buf[:]); err != nil { - return err - } - if buf[0] != symmetricKeyEncryptedVersion { - return errors.UnsupportedError("SymmetricKeyEncrypted version") - } - ske.CipherFunc = CipherFunction(buf[1]) - - if ske.CipherFunc.KeySize() == 0 { - return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[1]))) - } - - var err error - ske.s2k, err = s2k.Parse(r) - if err != nil { - return err - } - - encryptedKey := make([]byte, maxSessionKeySizeInBytes) - // The session key may follow. We just have to try and read to find - // out. If it exists then we limit it to maxSessionKeySizeInBytes. - n, err := readFull(r, encryptedKey) - if err != nil && err != io.ErrUnexpectedEOF { - return err - } - - if n != 0 { - if n == maxSessionKeySizeInBytes { - return errors.UnsupportedError("oversized encrypted session key") - } - ske.encryptedKey = encryptedKey[:n] - } - - return nil -} - -// Decrypt attempts to decrypt an encrypted session key and returns the key and -// the cipher to use when decrypting a subsequent Symmetrically Encrypted Data -// packet. -func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunction, error) { - key := make([]byte, ske.CipherFunc.KeySize()) - ske.s2k(key, passphrase) - - if len(ske.encryptedKey) == 0 { - return key, ske.CipherFunc, nil - } - - // the IV is all zeros - iv := make([]byte, ske.CipherFunc.blockSize()) - c := cipher.NewCFBDecrypter(ske.CipherFunc.new(key), iv) - plaintextKey := make([]byte, len(ske.encryptedKey)) - c.XORKeyStream(plaintextKey, ske.encryptedKey) - cipherFunc := CipherFunction(plaintextKey[0]) - if cipherFunc.blockSize() == 0 { - return nil, ske.CipherFunc, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) - } - plaintextKey = plaintextKey[1:] - if l, cipherKeySize := len(plaintextKey), cipherFunc.KeySize(); l != cipherFunc.KeySize() { - return nil, cipherFunc, errors.StructuralError("length of decrypted key (" + strconv.Itoa(l) + ") " + - "not equal to cipher keysize (" + strconv.Itoa(cipherKeySize) + ")") - } - return plaintextKey, cipherFunc, nil -} - -// SerializeSymmetricKeyEncrypted serializes a symmetric key packet to w. The -// packet contains a random session key, encrypted by a key derived from the -// given passphrase. The session key is returned and must be passed to -// SerializeSymmetricallyEncrypted. -// If config is nil, sensible defaults will be used. -func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Config) (key []byte, err error) { - cipherFunc := config.Cipher() - keySize := cipherFunc.KeySize() - if keySize == 0 { - return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) - } - - s2kBuf := new(bytes.Buffer) - keyEncryptingKey := make([]byte, keySize) - // s2k.Serialize salts and stretches the passphrase, and writes the - // resulting key to keyEncryptingKey and the s2k descriptor to s2kBuf. - err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, &s2k.Config{Hash: config.Hash(), S2KCount: config.PasswordHashIterations()}) - if err != nil { - return - } - s2kBytes := s2kBuf.Bytes() - - packetLength := 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize - err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength) - if err != nil { - return - } - - var buf [2]byte - buf[0] = symmetricKeyEncryptedVersion - buf[1] = byte(cipherFunc) - _, err = w.Write(buf[:]) - if err != nil { - return - } - _, err = w.Write(s2kBytes) - if err != nil { - return - } - - sessionKey := make([]byte, keySize) - _, err = io.ReadFull(config.Random(), sessionKey) - if err != nil { - return - } - iv := make([]byte, cipherFunc.blockSize()) - c := cipher.NewCFBEncrypter(cipherFunc.new(keyEncryptingKey), iv) - encryptedCipherAndKey := make([]byte, keySize+1) - c.XORKeyStream(encryptedCipherAndKey, buf[1:]) - c.XORKeyStream(encryptedCipherAndKey[1:], sessionKey) - _, err = w.Write(encryptedCipherAndKey) - if err != nil { - return - } - - key = sessionKey - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go b/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go deleted file mode 100644 index 6126030eb9..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "crypto/cipher" - "crypto/sha1" - "crypto/subtle" - "golang.org/x/crypto/openpgp/errors" - "hash" - "io" - "strconv" -) - -// SymmetricallyEncrypted represents a symmetrically encrypted byte string. The -// encrypted contents will consist of more OpenPGP packets. See RFC 4880, -// sections 5.7 and 5.13. -type SymmetricallyEncrypted struct { - MDC bool // true iff this is a type 18 packet and thus has an embedded MAC. - contents io.Reader - prefix []byte -} - -const symmetricallyEncryptedVersion = 1 - -func (se *SymmetricallyEncrypted) parse(r io.Reader) error { - if se.MDC { - // See RFC 4880, section 5.13. - var buf [1]byte - _, err := readFull(r, buf[:]) - if err != nil { - return err - } - if buf[0] != symmetricallyEncryptedVersion { - return errors.UnsupportedError("unknown SymmetricallyEncrypted version") - } - } - se.contents = r - return nil -} - -// Decrypt returns a ReadCloser, from which the decrypted contents of the -// packet can be read. An incorrect key can, with high probability, be detected -// immediately and this will result in a KeyIncorrect error being returned. -func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, error) { - keySize := c.KeySize() - if keySize == 0 { - return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(c))) - } - if len(key) != keySize { - return nil, errors.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length") - } - - if se.prefix == nil { - se.prefix = make([]byte, c.blockSize()+2) - _, err := readFull(se.contents, se.prefix) - if err != nil { - return nil, err - } - } else if len(se.prefix) != c.blockSize()+2 { - return nil, errors.InvalidArgumentError("can't try ciphers with different block lengths") - } - - ocfbResync := OCFBResync - if se.MDC { - // MDC packets use a different form of OCFB mode. - ocfbResync = OCFBNoResync - } - - s := NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync) - if s == nil { - return nil, errors.ErrKeyIncorrect - } - - plaintext := cipher.StreamReader{S: s, R: se.contents} - - if se.MDC { - // MDC packets have an embedded hash that we need to check. - h := sha1.New() - h.Write(se.prefix) - return &seMDCReader{in: plaintext, h: h}, nil - } - - // Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser. - return seReader{plaintext}, nil -} - -// seReader wraps an io.Reader with a no-op Close method. -type seReader struct { - in io.Reader -} - -func (ser seReader) Read(buf []byte) (int, error) { - return ser.in.Read(buf) -} - -func (ser seReader) Close() error { - return nil -} - -const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size - -// An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold -// of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an -// MDC packet containing a hash of the previous contents which is checked -// against the running hash. See RFC 4880, section 5.13. -type seMDCReader struct { - in io.Reader - h hash.Hash - trailer [mdcTrailerSize]byte - scratch [mdcTrailerSize]byte - trailerUsed int - error bool - eof bool -} - -func (ser *seMDCReader) Read(buf []byte) (n int, err error) { - if ser.error { - err = io.ErrUnexpectedEOF - return - } - if ser.eof { - err = io.EOF - return - } - - // If we haven't yet filled the trailer buffer then we must do that - // first. - for ser.trailerUsed < mdcTrailerSize { - n, err = ser.in.Read(ser.trailer[ser.trailerUsed:]) - ser.trailerUsed += n - if err == io.EOF { - if ser.trailerUsed != mdcTrailerSize { - n = 0 - err = io.ErrUnexpectedEOF - ser.error = true - return - } - ser.eof = true - n = 0 - return - } - - if err != nil { - n = 0 - return - } - } - - // If it's a short read then we read into a temporary buffer and shift - // the data into the caller's buffer. - if len(buf) <= mdcTrailerSize { - n, err = readFull(ser.in, ser.scratch[:len(buf)]) - copy(buf, ser.trailer[:n]) - ser.h.Write(buf[:n]) - copy(ser.trailer[:], ser.trailer[n:]) - copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:]) - if n < len(buf) { - ser.eof = true - err = io.EOF - } - return - } - - n, err = ser.in.Read(buf[mdcTrailerSize:]) - copy(buf, ser.trailer[:]) - ser.h.Write(buf[:n]) - copy(ser.trailer[:], buf[n:]) - - if err == io.EOF { - ser.eof = true - } - return -} - -// This is a new-format packet tag byte for a type 19 (MDC) packet. -const mdcPacketTagByte = byte(0x80) | 0x40 | 19 - -func (ser *seMDCReader) Close() error { - if ser.error { - return errors.SignatureError("error during reading") - } - - for !ser.eof { - // We haven't seen EOF so we need to read to the end - var buf [1024]byte - _, err := ser.Read(buf[:]) - if err == io.EOF { - break - } - if err != nil { - return errors.SignatureError("error during reading") - } - } - - if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size { - return errors.SignatureError("MDC packet not found") - } - ser.h.Write(ser.trailer[:2]) - - final := ser.h.Sum(nil) - if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 { - return errors.SignatureError("hash mismatch") - } - return nil -} - -// An seMDCWriter writes through to an io.WriteCloser while maintains a running -// hash of the data written. On close, it emits an MDC packet containing the -// running hash. -type seMDCWriter struct { - w io.WriteCloser - h hash.Hash -} - -func (w *seMDCWriter) Write(buf []byte) (n int, err error) { - w.h.Write(buf) - return w.w.Write(buf) -} - -func (w *seMDCWriter) Close() (err error) { - var buf [mdcTrailerSize]byte - - buf[0] = mdcPacketTagByte - buf[1] = sha1.Size - w.h.Write(buf[:2]) - digest := w.h.Sum(nil) - copy(buf[2:], digest) - - _, err = w.w.Write(buf[:]) - if err != nil { - return - } - return w.w.Close() -} - -// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. -type noOpCloser struct { - w io.Writer -} - -func (c noOpCloser) Write(data []byte) (n int, err error) { - return c.w.Write(data) -} - -func (c noOpCloser) Close() error { - return nil -} - -// SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet -// to w and returns a WriteCloser to which the to-be-encrypted packets can be -// written. -// If config is nil, sensible defaults will be used. -func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, key []byte, config *Config) (contents io.WriteCloser, err error) { - if c.KeySize() != len(key) { - return nil, errors.InvalidArgumentError("SymmetricallyEncrypted.Serialize: bad key length") - } - writeCloser := noOpCloser{w} - ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedMDC) - if err != nil { - return - } - - _, err = ciphertext.Write([]byte{symmetricallyEncryptedVersion}) - if err != nil { - return - } - - block := c.new(key) - blockSize := block.BlockSize() - iv := make([]byte, blockSize) - _, err = config.Random().Read(iv) - if err != nil { - return - } - s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync) - _, err = ciphertext.Write(prefix) - if err != nil { - return - } - plaintext := cipher.StreamWriter{S: s, W: ciphertext} - - h := sha1.New() - h.Write(iv) - h.Write(iv[blockSize-2:]) - contents = &seMDCWriter{w: plaintext, h: h} - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go b/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go deleted file mode 100644 index 96a2b382a1..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/userattribute.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "bytes" - "image" - "image/jpeg" - "io" - "io/ioutil" -) - -const UserAttrImageSubpacket = 1 - -// UserAttribute is capable of storing other types of data about a user -// beyond name, email and a text comment. In practice, user attributes are typically used -// to store a signed thumbnail photo JPEG image of the user. -// See RFC 4880, section 5.12. -type UserAttribute struct { - Contents []*OpaqueSubpacket -} - -// NewUserAttributePhoto creates a user attribute packet -// containing the given images. -func NewUserAttributePhoto(photos ...image.Image) (uat *UserAttribute, err error) { - uat = new(UserAttribute) - for _, photo := range photos { - var buf bytes.Buffer - // RFC 4880, Section 5.12.1. - data := []byte{ - 0x10, 0x00, // Little-endian image header length (16 bytes) - 0x01, // Image header version 1 - 0x01, // JPEG - 0, 0, 0, 0, // 12 reserved octets, must be all zero. - 0, 0, 0, 0, - 0, 0, 0, 0} - if _, err = buf.Write(data); err != nil { - return - } - if err = jpeg.Encode(&buf, photo, nil); err != nil { - return - } - uat.Contents = append(uat.Contents, &OpaqueSubpacket{ - SubType: UserAttrImageSubpacket, - Contents: buf.Bytes()}) - } - return -} - -// NewUserAttribute creates a new user attribute packet containing the given subpackets. -func NewUserAttribute(contents ...*OpaqueSubpacket) *UserAttribute { - return &UserAttribute{Contents: contents} -} - -func (uat *UserAttribute) parse(r io.Reader) (err error) { - // RFC 4880, section 5.13 - b, err := ioutil.ReadAll(r) - if err != nil { - return - } - uat.Contents, err = OpaqueSubpackets(b) - return -} - -// Serialize marshals the user attribute to w in the form of an OpenPGP packet, including -// header. -func (uat *UserAttribute) Serialize(w io.Writer) (err error) { - var buf bytes.Buffer - for _, sp := range uat.Contents { - sp.Serialize(&buf) - } - if err = serializeHeader(w, packetTypeUserAttribute, buf.Len()); err != nil { - return err - } - _, err = w.Write(buf.Bytes()) - return -} - -// ImageData returns zero or more byte slices, each containing -// JPEG File Interchange Format (JFIF), for each photo in the -// the user attribute packet. -func (uat *UserAttribute) ImageData() (imageData [][]byte) { - for _, sp := range uat.Contents { - if sp.SubType == UserAttrImageSubpacket && len(sp.Contents) > 16 { - imageData = append(imageData, sp.Contents[16:]) - } - } - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/packet/userid.go b/vendor/golang.org/x/crypto/openpgp/packet/userid.go deleted file mode 100644 index d6bea7d4ac..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/packet/userid.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package packet - -import ( - "io" - "io/ioutil" - "strings" -) - -// UserId contains text that is intended to represent the name and email -// address of the key holder. See RFC 4880, section 5.11. By convention, this -// takes the form "Full Name (Comment) " -type UserId struct { - Id string // By convention, this takes the form "Full Name (Comment) " which is split out in the fields below. - - Name, Comment, Email string -} - -func hasInvalidCharacters(s string) bool { - for _, c := range s { - switch c { - case '(', ')', '<', '>', 0: - return true - } - } - return false -} - -// NewUserId returns a UserId or nil if any of the arguments contain invalid -// characters. The invalid characters are '\x00', '(', ')', '<' and '>' -func NewUserId(name, comment, email string) *UserId { - // RFC 4880 doesn't deal with the structure of userid strings; the - // name, comment and email form is just a convention. However, there's - // no convention about escaping the metacharacters and GPG just refuses - // to create user ids where, say, the name contains a '('. We mirror - // this behaviour. - - if hasInvalidCharacters(name) || hasInvalidCharacters(comment) || hasInvalidCharacters(email) { - return nil - } - - uid := new(UserId) - uid.Name, uid.Comment, uid.Email = name, comment, email - uid.Id = name - if len(comment) > 0 { - if len(uid.Id) > 0 { - uid.Id += " " - } - uid.Id += "(" - uid.Id += comment - uid.Id += ")" - } - if len(email) > 0 { - if len(uid.Id) > 0 { - uid.Id += " " - } - uid.Id += "<" - uid.Id += email - uid.Id += ">" - } - return uid -} - -func (uid *UserId) parse(r io.Reader) (err error) { - // RFC 4880, section 5.11 - b, err := ioutil.ReadAll(r) - if err != nil { - return - } - uid.Id = string(b) - uid.Name, uid.Comment, uid.Email = parseUserId(uid.Id) - return -} - -// Serialize marshals uid to w in the form of an OpenPGP packet, including -// header. -func (uid *UserId) Serialize(w io.Writer) error { - err := serializeHeader(w, packetTypeUserId, len(uid.Id)) - if err != nil { - return err - } - _, err = w.Write([]byte(uid.Id)) - return err -} - -// parseUserId extracts the name, comment and email from a user id string that -// is formatted as "Full Name (Comment) ". -func parseUserId(id string) (name, comment, email string) { - var n, c, e struct { - start, end int - } - var state int - - for offset, rune := range id { - switch state { - case 0: - // Entering name - n.start = offset - state = 1 - fallthrough - case 1: - // In name - if rune == '(' { - state = 2 - n.end = offset - } else if rune == '<' { - state = 5 - n.end = offset - } - case 2: - // Entering comment - c.start = offset - state = 3 - fallthrough - case 3: - // In comment - if rune == ')' { - state = 4 - c.end = offset - } - case 4: - // Between comment and email - if rune == '<' { - state = 5 - } - case 5: - // Entering email - e.start = offset - state = 6 - fallthrough - case 6: - // In email - if rune == '>' { - state = 7 - e.end = offset - } - default: - // After email - } - } - switch state { - case 1: - // ended in the name - n.end = len(id) - case 3: - // ended in comment - c.end = len(id) - case 6: - // ended in email - e.end = len(id) - } - - name = strings.TrimSpace(id[n.start:n.end]) - comment = strings.TrimSpace(id[c.start:c.end]) - email = strings.TrimSpace(id[e.start:e.end]) - return -} diff --git a/vendor/golang.org/x/crypto/openpgp/read.go b/vendor/golang.org/x/crypto/openpgp/read.go deleted file mode 100644 index 6ec664f44a..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/read.go +++ /dev/null @@ -1,442 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package openpgp implements high level operations on OpenPGP messages. -package openpgp // import "golang.org/x/crypto/openpgp" - -import ( - "crypto" - _ "crypto/sha256" - "hash" - "io" - "strconv" - - "golang.org/x/crypto/openpgp/armor" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/packet" -) - -// SignatureType is the armor type for a PGP signature. -var SignatureType = "PGP SIGNATURE" - -// readArmored reads an armored block with the given type. -func readArmored(r io.Reader, expectedType string) (body io.Reader, err error) { - block, err := armor.Decode(r) - if err != nil { - return - } - - if block.Type != expectedType { - return nil, errors.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type) - } - - return block.Body, nil -} - -// MessageDetails contains the result of parsing an OpenPGP encrypted and/or -// signed message. -type MessageDetails struct { - IsEncrypted bool // true if the message was encrypted. - EncryptedToKeyIds []uint64 // the list of recipient key ids. - IsSymmetricallyEncrypted bool // true if a passphrase could have decrypted the message. - DecryptedWith Key // the private key used to decrypt the message, if any. - IsSigned bool // true if the message is signed. - SignedByKeyId uint64 // the key id of the signer, if any. - SignedBy *Key // the key of the signer, if available. - LiteralData *packet.LiteralData // the metadata of the contents - UnverifiedBody io.Reader // the contents of the message. - - // If IsSigned is true and SignedBy is non-zero then the signature will - // be verified as UnverifiedBody is read. The signature cannot be - // checked until the whole of UnverifiedBody is read so UnverifiedBody - // must be consumed until EOF before the data can be trusted. Even if a - // message isn't signed (or the signer is unknown) the data may contain - // an authentication code that is only checked once UnverifiedBody has - // been consumed. Once EOF has been seen, the following fields are - // valid. (An authentication code failure is reported as a - // SignatureError error when reading from UnverifiedBody.) - SignatureError error // nil if the signature is good. - Signature *packet.Signature // the signature packet itself, if v4 (default) - SignatureV3 *packet.SignatureV3 // the signature packet if it is a v2 or v3 signature - - decrypted io.ReadCloser -} - -// A PromptFunction is used as a callback by functions that may need to decrypt -// a private key, or prompt for a passphrase. It is called with a list of -// acceptable, encrypted private keys and a boolean that indicates whether a -// passphrase is usable. It should either decrypt a private key or return a -// passphrase to try. If the decrypted private key or given passphrase isn't -// correct, the function will be called again, forever. Any error returned will -// be passed up. -type PromptFunction func(keys []Key, symmetric bool) ([]byte, error) - -// A keyEnvelopePair is used to store a private key with the envelope that -// contains a symmetric key, encrypted with that key. -type keyEnvelopePair struct { - key Key - encryptedKey *packet.EncryptedKey -} - -// ReadMessage parses an OpenPGP message that may be signed and/or encrypted. -// The given KeyRing should contain both public keys (for signature -// verification) and, possibly encrypted, private keys for decrypting. -// If config is nil, sensible defaults will be used. -func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction, config *packet.Config) (md *MessageDetails, err error) { - var p packet.Packet - - var symKeys []*packet.SymmetricKeyEncrypted - var pubKeys []keyEnvelopePair - var se *packet.SymmetricallyEncrypted - - packets := packet.NewReader(r) - md = new(MessageDetails) - md.IsEncrypted = true - - // The message, if encrypted, starts with a number of packets - // containing an encrypted decryption key. The decryption key is either - // encrypted to a public key, or with a passphrase. This loop - // collects these packets. -ParsePackets: - for { - p, err = packets.Next() - if err != nil { - return nil, err - } - switch p := p.(type) { - case *packet.SymmetricKeyEncrypted: - // This packet contains the decryption key encrypted with a passphrase. - md.IsSymmetricallyEncrypted = true - symKeys = append(symKeys, p) - case *packet.EncryptedKey: - // This packet contains the decryption key encrypted to a public key. - md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId) - switch p.Algo { - case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal: - break - default: - continue - } - var keys []Key - if p.KeyId == 0 { - keys = keyring.DecryptionKeys() - } else { - keys = keyring.KeysById(p.KeyId) - } - for _, k := range keys { - pubKeys = append(pubKeys, keyEnvelopePair{k, p}) - } - case *packet.SymmetricallyEncrypted: - se = p - break ParsePackets - case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature: - // This message isn't encrypted. - if len(symKeys) != 0 || len(pubKeys) != 0 { - return nil, errors.StructuralError("key material not followed by encrypted message") - } - packets.Unread(p) - return readSignedMessage(packets, nil, keyring) - } - } - - var candidates []Key - var decrypted io.ReadCloser - - // Now that we have the list of encrypted keys we need to decrypt at - // least one of them or, if we cannot, we need to call the prompt - // function so that it can decrypt a key or give us a passphrase. -FindKey: - for { - // See if any of the keys already have a private key available - candidates = candidates[:0] - candidateFingerprints := make(map[string]bool) - - for _, pk := range pubKeys { - if pk.key.PrivateKey == nil { - continue - } - if !pk.key.PrivateKey.Encrypted { - if len(pk.encryptedKey.Key) == 0 { - pk.encryptedKey.Decrypt(pk.key.PrivateKey, config) - } - if len(pk.encryptedKey.Key) == 0 { - continue - } - decrypted, err = se.Decrypt(pk.encryptedKey.CipherFunc, pk.encryptedKey.Key) - if err != nil && err != errors.ErrKeyIncorrect { - return nil, err - } - if decrypted != nil { - md.DecryptedWith = pk.key - break FindKey - } - } else { - fpr := string(pk.key.PublicKey.Fingerprint[:]) - if v := candidateFingerprints[fpr]; v { - continue - } - candidates = append(candidates, pk.key) - candidateFingerprints[fpr] = true - } - } - - if len(candidates) == 0 && len(symKeys) == 0 { - return nil, errors.ErrKeyIncorrect - } - - if prompt == nil { - return nil, errors.ErrKeyIncorrect - } - - passphrase, err := prompt(candidates, len(symKeys) != 0) - if err != nil { - return nil, err - } - - // Try the symmetric passphrase first - if len(symKeys) != 0 && passphrase != nil { - for _, s := range symKeys { - key, cipherFunc, err := s.Decrypt(passphrase) - if err == nil { - decrypted, err = se.Decrypt(cipherFunc, key) - if err != nil && err != errors.ErrKeyIncorrect { - return nil, err - } - if decrypted != nil { - break FindKey - } - } - - } - } - } - - md.decrypted = decrypted - if err := packets.Push(decrypted); err != nil { - return nil, err - } - return readSignedMessage(packets, md, keyring) -} - -// readSignedMessage reads a possibly signed message if mdin is non-zero then -// that structure is updated and returned. Otherwise a fresh MessageDetails is -// used. -func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing) (md *MessageDetails, err error) { - if mdin == nil { - mdin = new(MessageDetails) - } - md = mdin - - var p packet.Packet - var h hash.Hash - var wrappedHash hash.Hash -FindLiteralData: - for { - p, err = packets.Next() - if err != nil { - return nil, err - } - switch p := p.(type) { - case *packet.Compressed: - if err := packets.Push(p.Body); err != nil { - return nil, err - } - case *packet.OnePassSignature: - if !p.IsLast { - return nil, errors.UnsupportedError("nested signatures") - } - - h, wrappedHash, err = hashForSignature(p.Hash, p.SigType) - if err != nil { - md = nil - return - } - - md.IsSigned = true - md.SignedByKeyId = p.KeyId - keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign) - if len(keys) > 0 { - md.SignedBy = &keys[0] - } - case *packet.LiteralData: - md.LiteralData = p - break FindLiteralData - } - } - - if md.SignedBy != nil { - md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md} - } else if md.decrypted != nil { - md.UnverifiedBody = checkReader{md} - } else { - md.UnverifiedBody = md.LiteralData.Body - } - - return md, nil -} - -// hashForSignature returns a pair of hashes that can be used to verify a -// signature. The signature may specify that the contents of the signed message -// should be preprocessed (i.e. to normalize line endings). Thus this function -// returns two hashes. The second should be used to hash the message itself and -// performs any needed preprocessing. -func hashForSignature(hashId crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) { - if !hashId.Available() { - return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashId))) - } - h := hashId.New() - - switch sigType { - case packet.SigTypeBinary: - return h, h, nil - case packet.SigTypeText: - return h, NewCanonicalTextHash(h), nil - } - - return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType))) -} - -// checkReader wraps an io.Reader from a LiteralData packet. When it sees EOF -// it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger -// MDC checks. -type checkReader struct { - md *MessageDetails -} - -func (cr checkReader) Read(buf []byte) (n int, err error) { - n, err = cr.md.LiteralData.Body.Read(buf) - if err == io.EOF { - mdcErr := cr.md.decrypted.Close() - if mdcErr != nil { - err = mdcErr - } - } - return -} - -// signatureCheckReader wraps an io.Reader from a LiteralData packet and hashes -// the data as it is read. When it sees an EOF from the underlying io.Reader -// it parses and checks a trailing Signature packet and triggers any MDC checks. -type signatureCheckReader struct { - packets *packet.Reader - h, wrappedHash hash.Hash - md *MessageDetails -} - -func (scr *signatureCheckReader) Read(buf []byte) (n int, err error) { - n, err = scr.md.LiteralData.Body.Read(buf) - scr.wrappedHash.Write(buf[:n]) - if err == io.EOF { - var p packet.Packet - p, scr.md.SignatureError = scr.packets.Next() - if scr.md.SignatureError != nil { - return - } - - var ok bool - if scr.md.Signature, ok = p.(*packet.Signature); ok { - scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature) - } else if scr.md.SignatureV3, ok = p.(*packet.SignatureV3); ok { - scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignatureV3(scr.h, scr.md.SignatureV3) - } else { - scr.md.SignatureError = errors.StructuralError("LiteralData not followed by Signature") - return - } - - // The SymmetricallyEncrypted packet, if any, might have an - // unsigned hash of its own. In order to check this we need to - // close that Reader. - if scr.md.decrypted != nil { - mdcErr := scr.md.decrypted.Close() - if mdcErr != nil { - err = mdcErr - } - } - } - return -} - -// CheckDetachedSignature takes a signed file and a detached signature and -// returns the signer if the signature is valid. If the signer isn't known, -// ErrUnknownIssuer is returned. -func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) { - var issuerKeyId uint64 - var hashFunc crypto.Hash - var sigType packet.SignatureType - var keys []Key - var p packet.Packet - - packets := packet.NewReader(signature) - for { - p, err = packets.Next() - if err == io.EOF { - return nil, errors.ErrUnknownIssuer - } - if err != nil { - return nil, err - } - - switch sig := p.(type) { - case *packet.Signature: - if sig.IssuerKeyId == nil { - return nil, errors.StructuralError("signature doesn't have an issuer") - } - issuerKeyId = *sig.IssuerKeyId - hashFunc = sig.Hash - sigType = sig.SigType - case *packet.SignatureV3: - issuerKeyId = sig.IssuerKeyId - hashFunc = sig.Hash - sigType = sig.SigType - default: - return nil, errors.StructuralError("non signature packet found") - } - - keys = keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign) - if len(keys) > 0 { - break - } - } - - if len(keys) == 0 { - panic("unreachable") - } - - h, wrappedHash, err := hashForSignature(hashFunc, sigType) - if err != nil { - return nil, err - } - - if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF { - return nil, err - } - - for _, key := range keys { - switch sig := p.(type) { - case *packet.Signature: - err = key.PublicKey.VerifySignature(h, sig) - case *packet.SignatureV3: - err = key.PublicKey.VerifySignatureV3(h, sig) - default: - panic("unreachable") - } - - if err == nil { - return key.Entity, nil - } - } - - return nil, err -} - -// CheckArmoredDetachedSignature performs the same actions as -// CheckDetachedSignature but expects the signature to be armored. -func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader) (signer *Entity, err error) { - body, err := readArmored(signature, SignatureType) - if err != nil { - return - } - - return CheckDetachedSignature(keyring, signed, body) -} diff --git a/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go b/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go deleted file mode 100644 index 4b9a44ca26..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/s2k/s2k.go +++ /dev/null @@ -1,273 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package s2k implements the various OpenPGP string-to-key transforms as -// specified in RFC 4800 section 3.7.1. -package s2k // import "golang.org/x/crypto/openpgp/s2k" - -import ( - "crypto" - "hash" - "io" - "strconv" - - "golang.org/x/crypto/openpgp/errors" -) - -// Config collects configuration parameters for s2k key-stretching -// transformatioms. A nil *Config is valid and results in all default -// values. Currently, Config is used only by the Serialize function in -// this package. -type Config struct { - // Hash is the default hash function to be used. If - // nil, SHA1 is used. - Hash crypto.Hash - // S2KCount is only used for symmetric encryption. It - // determines the strength of the passphrase stretching when - // the said passphrase is hashed to produce a key. S2KCount - // should be between 1024 and 65011712, inclusive. If Config - // is nil or S2KCount is 0, the value 65536 used. Not all - // values in the above range can be represented. S2KCount will - // be rounded up to the next representable value if it cannot - // be encoded exactly. When set, it is strongly encrouraged to - // use a value that is at least 65536. See RFC 4880 Section - // 3.7.1.3. - S2KCount int -} - -func (c *Config) hash() crypto.Hash { - if c == nil || uint(c.Hash) == 0 { - // SHA1 is the historical default in this package. - return crypto.SHA1 - } - - return c.Hash -} - -func (c *Config) encodedCount() uint8 { - if c == nil || c.S2KCount == 0 { - return 96 // The common case. Correspoding to 65536 - } - - i := c.S2KCount - switch { - // Behave like GPG. Should we make 65536 the lowest value used? - case i < 1024: - i = 1024 - case i > 65011712: - i = 65011712 - } - - return encodeCount(i) -} - -// encodeCount converts an iterative "count" in the range 1024 to -// 65011712, inclusive, to an encoded count. The return value is the -// octet that is actually stored in the GPG file. encodeCount panics -// if i is not in the above range (encodedCount above takes care to -// pass i in the correct range). See RFC 4880 Section 3.7.7.1. -func encodeCount(i int) uint8 { - if i < 1024 || i > 65011712 { - panic("count arg i outside the required range") - } - - for encoded := 0; encoded < 256; encoded++ { - count := decodeCount(uint8(encoded)) - if count >= i { - return uint8(encoded) - } - } - - return 255 -} - -// decodeCount returns the s2k mode 3 iterative "count" corresponding to -// the encoded octet c. -func decodeCount(c uint8) int { - return (16 + int(c&15)) << (uint32(c>>4) + 6) -} - -// Simple writes to out the result of computing the Simple S2K function (RFC -// 4880, section 3.7.1.1) using the given hash and input passphrase. -func Simple(out []byte, h hash.Hash, in []byte) { - Salted(out, h, in, nil) -} - -var zero [1]byte - -// Salted writes to out the result of computing the Salted S2K function (RFC -// 4880, section 3.7.1.2) using the given hash, input passphrase and salt. -func Salted(out []byte, h hash.Hash, in []byte, salt []byte) { - done := 0 - var digest []byte - - for i := 0; done < len(out); i++ { - h.Reset() - for j := 0; j < i; j++ { - h.Write(zero[:]) - } - h.Write(salt) - h.Write(in) - digest = h.Sum(digest[:0]) - n := copy(out[done:], digest) - done += n - } -} - -// Iterated writes to out the result of computing the Iterated and Salted S2K -// function (RFC 4880, section 3.7.1.3) using the given hash, input passphrase, -// salt and iteration count. -func Iterated(out []byte, h hash.Hash, in []byte, salt []byte, count int) { - combined := make([]byte, len(in)+len(salt)) - copy(combined, salt) - copy(combined[len(salt):], in) - - if count < len(combined) { - count = len(combined) - } - - done := 0 - var digest []byte - for i := 0; done < len(out); i++ { - h.Reset() - for j := 0; j < i; j++ { - h.Write(zero[:]) - } - written := 0 - for written < count { - if written+len(combined) > count { - todo := count - written - h.Write(combined[:todo]) - written = count - } else { - h.Write(combined) - written += len(combined) - } - } - digest = h.Sum(digest[:0]) - n := copy(out[done:], digest) - done += n - } -} - -// Parse reads a binary specification for a string-to-key transformation from r -// and returns a function which performs that transform. -func Parse(r io.Reader) (f func(out, in []byte), err error) { - var buf [9]byte - - _, err = io.ReadFull(r, buf[:2]) - if err != nil { - return - } - - hash, ok := HashIdToHash(buf[1]) - if !ok { - return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(buf[1]))) - } - if !hash.Available() { - return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hash))) - } - h := hash.New() - - switch buf[0] { - case 0: - f := func(out, in []byte) { - Simple(out, h, in) - } - return f, nil - case 1: - _, err = io.ReadFull(r, buf[:8]) - if err != nil { - return - } - f := func(out, in []byte) { - Salted(out, h, in, buf[:8]) - } - return f, nil - case 3: - _, err = io.ReadFull(r, buf[:9]) - if err != nil { - return - } - count := decodeCount(buf[8]) - f := func(out, in []byte) { - Iterated(out, h, in, buf[:8], count) - } - return f, nil - } - - return nil, errors.UnsupportedError("S2K function") -} - -// Serialize salts and stretches the given passphrase and writes the -// resulting key into key. It also serializes an S2K descriptor to -// w. The key stretching can be configured with c, which may be -// nil. In that case, sensible defaults will be used. -func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Config) error { - var buf [11]byte - buf[0] = 3 /* iterated and salted */ - buf[1], _ = HashToHashId(c.hash()) - salt := buf[2:10] - if _, err := io.ReadFull(rand, salt); err != nil { - return err - } - encodedCount := c.encodedCount() - count := decodeCount(encodedCount) - buf[10] = encodedCount - if _, err := w.Write(buf[:]); err != nil { - return err - } - - Iterated(key, c.hash().New(), passphrase, salt, count) - return nil -} - -// hashToHashIdMapping contains pairs relating OpenPGP's hash identifier with -// Go's crypto.Hash type. See RFC 4880, section 9.4. -var hashToHashIdMapping = []struct { - id byte - hash crypto.Hash - name string -}{ - {1, crypto.MD5, "MD5"}, - {2, crypto.SHA1, "SHA1"}, - {3, crypto.RIPEMD160, "RIPEMD160"}, - {8, crypto.SHA256, "SHA256"}, - {9, crypto.SHA384, "SHA384"}, - {10, crypto.SHA512, "SHA512"}, - {11, crypto.SHA224, "SHA224"}, -} - -// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP -// hash id. -func HashIdToHash(id byte) (h crypto.Hash, ok bool) { - for _, m := range hashToHashIdMapping { - if m.id == id { - return m.hash, true - } - } - return 0, false -} - -// HashIdToString returns the name of the hash function corresponding to the -// given OpenPGP hash id. -func HashIdToString(id byte) (name string, ok bool) { - for _, m := range hashToHashIdMapping { - if m.id == id { - return m.name, true - } - } - - return "", false -} - -// HashIdToHash returns an OpenPGP hash id which corresponds the given Hash. -func HashToHashId(h crypto.Hash) (id byte, ok bool) { - for _, m := range hashToHashIdMapping { - if m.hash == h { - return m.id, true - } - } - return 0, false -} diff --git a/vendor/golang.org/x/crypto/openpgp/write.go b/vendor/golang.org/x/crypto/openpgp/write.go deleted file mode 100644 index 65a304cc86..0000000000 --- a/vendor/golang.org/x/crypto/openpgp/write.go +++ /dev/null @@ -1,378 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package openpgp - -import ( - "crypto" - "hash" - "io" - "strconv" - "time" - - "golang.org/x/crypto/openpgp/armor" - "golang.org/x/crypto/openpgp/errors" - "golang.org/x/crypto/openpgp/packet" - "golang.org/x/crypto/openpgp/s2k" -) - -// DetachSign signs message with the private key from signer (which must -// already have been decrypted) and writes the signature to w. -// If config is nil, sensible defaults will be used. -func DetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { - return detachSign(w, signer, message, packet.SigTypeBinary, config) -} - -// ArmoredDetachSign signs message with the private key from signer (which -// must already have been decrypted) and writes an armored signature to w. -// If config is nil, sensible defaults will be used. -func ArmoredDetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) (err error) { - return armoredDetachSign(w, signer, message, packet.SigTypeBinary, config) -} - -// DetachSignText signs message (after canonicalising the line endings) with -// the private key from signer (which must already have been decrypted) and -// writes the signature to w. -// If config is nil, sensible defaults will be used. -func DetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { - return detachSign(w, signer, message, packet.SigTypeText, config) -} - -// ArmoredDetachSignText signs message (after canonicalising the line endings) -// with the private key from signer (which must already have been decrypted) -// and writes an armored signature to w. -// If config is nil, sensible defaults will be used. -func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { - return armoredDetachSign(w, signer, message, packet.SigTypeText, config) -} - -func armoredDetachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { - out, err := armor.Encode(w, SignatureType, nil) - if err != nil { - return - } - err = detachSign(out, signer, message, sigType, config) - if err != nil { - return - } - return out.Close() -} - -func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { - if signer.PrivateKey == nil { - return errors.InvalidArgumentError("signing key doesn't have a private key") - } - if signer.PrivateKey.Encrypted { - return errors.InvalidArgumentError("signing key is encrypted") - } - - sig := new(packet.Signature) - sig.SigType = sigType - sig.PubKeyAlgo = signer.PrivateKey.PubKeyAlgo - sig.Hash = config.Hash() - sig.CreationTime = config.Now() - sig.IssuerKeyId = &signer.PrivateKey.KeyId - - h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType) - if err != nil { - return - } - io.Copy(wrappedHash, message) - - err = sig.Sign(h, signer.PrivateKey, config) - if err != nil { - return - } - - return sig.Serialize(w) -} - -// FileHints contains metadata about encrypted files. This metadata is, itself, -// encrypted. -type FileHints struct { - // IsBinary can be set to hint that the contents are binary data. - IsBinary bool - // FileName hints at the name of the file that should be written. It's - // truncated to 255 bytes if longer. It may be empty to suggest that the - // file should not be written to disk. It may be equal to "_CONSOLE" to - // suggest the data should not be written to disk. - FileName string - // ModTime contains the modification time of the file, or the zero time if not applicable. - ModTime time.Time -} - -// SymmetricallyEncrypt acts like gpg -c: it encrypts a file with a passphrase. -// The resulting WriteCloser must be closed after the contents of the file have -// been written. -// If config is nil, sensible defaults will be used. -func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { - if hints == nil { - hints = &FileHints{} - } - - key, err := packet.SerializeSymmetricKeyEncrypted(ciphertext, passphrase, config) - if err != nil { - return - } - w, err := packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), key, config) - if err != nil { - return - } - - literaldata := w - if algo := config.Compression(); algo != packet.CompressionNone { - var compConfig *packet.CompressionConfig - if config != nil { - compConfig = config.CompressionConfig - } - literaldata, err = packet.SerializeCompressed(w, algo, compConfig) - if err != nil { - return - } - } - - var epochSeconds uint32 - if !hints.ModTime.IsZero() { - epochSeconds = uint32(hints.ModTime.Unix()) - } - return packet.SerializeLiteral(literaldata, hints.IsBinary, hints.FileName, epochSeconds) -} - -// intersectPreferences mutates and returns a prefix of a that contains only -// the values in the intersection of a and b. The order of a is preserved. -func intersectPreferences(a []uint8, b []uint8) (intersection []uint8) { - var j int - for _, v := range a { - for _, v2 := range b { - if v == v2 { - a[j] = v - j++ - break - } - } - } - - return a[:j] -} - -func hashToHashId(h crypto.Hash) uint8 { - v, ok := s2k.HashToHashId(h) - if !ok { - panic("tried to convert unknown hash") - } - return v -} - -// Encrypt encrypts a message to a number of recipients and, optionally, signs -// it. hints contains optional information, that is also encrypted, that aids -// the recipients in processing the message. The resulting WriteCloser must -// be closed after the contents of the file have been written. -// If config is nil, sensible defaults will be used. -func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { - var signer *packet.PrivateKey - if signed != nil { - signKey, ok := signed.signingKey(config.Now()) - if !ok { - return nil, errors.InvalidArgumentError("no valid signing keys") - } - signer = signKey.PrivateKey - if signer == nil { - return nil, errors.InvalidArgumentError("no private key in signing key") - } - if signer.Encrypted { - return nil, errors.InvalidArgumentError("signing key must be decrypted") - } - } - - // These are the possible ciphers that we'll use for the message. - candidateCiphers := []uint8{ - uint8(packet.CipherAES128), - uint8(packet.CipherAES256), - uint8(packet.CipherCAST5), - } - // These are the possible hash functions that we'll use for the signature. - candidateHashes := []uint8{ - hashToHashId(crypto.SHA256), - hashToHashId(crypto.SHA512), - hashToHashId(crypto.SHA1), - hashToHashId(crypto.RIPEMD160), - } - // In the event that a recipient doesn't specify any supported ciphers - // or hash functions, these are the ones that we assume that every - // implementation supports. - defaultCiphers := candidateCiphers[len(candidateCiphers)-1:] - defaultHashes := candidateHashes[len(candidateHashes)-1:] - - encryptKeys := make([]Key, len(to)) - for i := range to { - var ok bool - encryptKeys[i], ok = to[i].encryptionKey(config.Now()) - if !ok { - return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no encryption keys") - } - - sig := to[i].primaryIdentity().SelfSignature - - preferredSymmetric := sig.PreferredSymmetric - if len(preferredSymmetric) == 0 { - preferredSymmetric = defaultCiphers - } - preferredHashes := sig.PreferredHash - if len(preferredHashes) == 0 { - preferredHashes = defaultHashes - } - candidateCiphers = intersectPreferences(candidateCiphers, preferredSymmetric) - candidateHashes = intersectPreferences(candidateHashes, preferredHashes) - } - - if len(candidateCiphers) == 0 || len(candidateHashes) == 0 { - return nil, errors.InvalidArgumentError("cannot encrypt because recipient set shares no common algorithms") - } - - cipher := packet.CipherFunction(candidateCiphers[0]) - // If the cipher specified by config is a candidate, we'll use that. - configuredCipher := config.Cipher() - for _, c := range candidateCiphers { - cipherFunc := packet.CipherFunction(c) - if cipherFunc == configuredCipher { - cipher = cipherFunc - break - } - } - - var hash crypto.Hash - for _, hashId := range candidateHashes { - if h, ok := s2k.HashIdToHash(hashId); ok && h.Available() { - hash = h - break - } - } - - // If the hash specified by config is a candidate, we'll use that. - if configuredHash := config.Hash(); configuredHash.Available() { - for _, hashId := range candidateHashes { - if h, ok := s2k.HashIdToHash(hashId); ok && h == configuredHash { - hash = h - break - } - } - } - - if hash == 0 { - hashId := candidateHashes[0] - name, ok := s2k.HashIdToString(hashId) - if !ok { - name = "#" + strconv.Itoa(int(hashId)) - } - return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)") - } - - symKey := make([]byte, cipher.KeySize()) - if _, err := io.ReadFull(config.Random(), symKey); err != nil { - return nil, err - } - - for _, key := range encryptKeys { - if err := packet.SerializeEncryptedKey(ciphertext, key.PublicKey, cipher, symKey, config); err != nil { - return nil, err - } - } - - encryptedData, err := packet.SerializeSymmetricallyEncrypted(ciphertext, cipher, symKey, config) - if err != nil { - return - } - - if signer != nil { - ops := &packet.OnePassSignature{ - SigType: packet.SigTypeBinary, - Hash: hash, - PubKeyAlgo: signer.PubKeyAlgo, - KeyId: signer.KeyId, - IsLast: true, - } - if err := ops.Serialize(encryptedData); err != nil { - return nil, err - } - } - - if hints == nil { - hints = &FileHints{} - } - - w := encryptedData - if signer != nil { - // If we need to write a signature packet after the literal - // data then we need to stop literalData from closing - // encryptedData. - w = noOpCloser{encryptedData} - - } - var epochSeconds uint32 - if !hints.ModTime.IsZero() { - epochSeconds = uint32(hints.ModTime.Unix()) - } - literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds) - if err != nil { - return nil, err - } - - if signer != nil { - return signatureWriter{encryptedData, literalData, hash, hash.New(), signer, config}, nil - } - return literalData, nil -} - -// signatureWriter hashes the contents of a message while passing it along to -// literalData. When closed, it closes literalData, writes a signature packet -// to encryptedData and then also closes encryptedData. -type signatureWriter struct { - encryptedData io.WriteCloser - literalData io.WriteCloser - hashType crypto.Hash - h hash.Hash - signer *packet.PrivateKey - config *packet.Config -} - -func (s signatureWriter) Write(data []byte) (int, error) { - s.h.Write(data) - return s.literalData.Write(data) -} - -func (s signatureWriter) Close() error { - sig := &packet.Signature{ - SigType: packet.SigTypeBinary, - PubKeyAlgo: s.signer.PubKeyAlgo, - Hash: s.hashType, - CreationTime: s.config.Now(), - IssuerKeyId: &s.signer.KeyId, - } - - if err := sig.Sign(s.h, s.signer, s.config); err != nil { - return err - } - if err := s.literalData.Close(); err != nil { - return err - } - if err := sig.Serialize(s.encryptedData); err != nil { - return err - } - return s.encryptedData.Close() -} - -// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. -// TODO: we have two of these in OpenPGP packages alone. This probably needs -// to be promoted somewhere more common. -type noOpCloser struct { - w io.Writer -} - -func (c noOpCloser) Write(data []byte) (n int, err error) { - return c.w.Write(data) -} - -func (c noOpCloser) Close() error { - return nil -} diff --git a/vendor/golang.org/x/net/bpf/asm.go b/vendor/golang.org/x/net/bpf/asm.go new file mode 100644 index 0000000000..15e21b1812 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/asm.go @@ -0,0 +1,41 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import "fmt" + +// Assemble converts insts into raw instructions suitable for loading +// into a BPF virtual machine. +// +// Currently, no optimization is attempted, the assembled program flow +// is exactly as provided. +func Assemble(insts []Instruction) ([]RawInstruction, error) { + ret := make([]RawInstruction, len(insts)) + var err error + for i, inst := range insts { + ret[i], err = inst.Assemble() + if err != nil { + return nil, fmt.Errorf("assembling instruction %d: %s", i+1, err) + } + } + return ret, nil +} + +// Disassemble attempts to parse raw back into +// Instructions. Unrecognized RawInstructions are assumed to be an +// extension not implemented by this package, and are passed through +// unchanged to the output. The allDecoded value reports whether insts +// contains no RawInstructions. +func Disassemble(raw []RawInstruction) (insts []Instruction, allDecoded bool) { + insts = make([]Instruction, len(raw)) + allDecoded = true + for i, r := range raw { + insts[i] = r.Disassemble() + if _, ok := insts[i].(RawInstruction); ok { + allDecoded = false + } + } + return insts, allDecoded +} diff --git a/vendor/golang.org/x/net/bpf/constants.go b/vendor/golang.org/x/net/bpf/constants.go new file mode 100644 index 0000000000..b89ca35239 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/constants.go @@ -0,0 +1,218 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +// A Register is a register of the BPF virtual machine. +type Register uint16 + +const ( + // RegA is the accumulator register. RegA is always the + // destination register of ALU operations. + RegA Register = iota + // RegX is the indirection register, used by LoadIndirect + // operations. + RegX +) + +// An ALUOp is an arithmetic or logic operation. +type ALUOp uint16 + +// ALU binary operation types. +const ( + ALUOpAdd ALUOp = iota << 4 + ALUOpSub + ALUOpMul + ALUOpDiv + ALUOpOr + ALUOpAnd + ALUOpShiftLeft + ALUOpShiftRight + aluOpNeg // Not exported because it's the only unary ALU operation, and gets its own instruction type. + ALUOpMod + ALUOpXor +) + +// A JumpTest is a comparison operator used in conditional jumps. +type JumpTest uint16 + +// Supported operators for conditional jumps. +const ( + // K == A + JumpEqual JumpTest = iota + // K != A + JumpNotEqual + // K > A + JumpGreaterThan + // K < A + JumpLessThan + // K >= A + JumpGreaterOrEqual + // K <= A + JumpLessOrEqual + // K & A != 0 + JumpBitsSet + // K & A == 0 + JumpBitsNotSet +) + +// An Extension is a function call provided by the kernel that +// performs advanced operations that are expensive or impossible +// within the BPF virtual machine. +// +// Extensions are only implemented by the Linux kernel. +// +// TODO: should we prune this list? Some of these extensions seem +// either broken or near-impossible to use correctly, whereas other +// (len, random, ifindex) are quite useful. +type Extension int + +// Extension functions available in the Linux kernel. +const ( + // extOffset is the negative maximum number of instructions used + // to load instructions by overloading the K argument. + extOffset = -0x1000 + // ExtLen returns the length of the packet. + ExtLen Extension = 1 + // ExtProto returns the packet's L3 protocol type. + ExtProto Extension = 0 + // ExtType returns the packet's type (skb->pkt_type in the kernel) + // + // TODO: better documentation. How nice an API do we want to + // provide for these esoteric extensions? + ExtType Extension = 4 + // ExtPayloadOffset returns the offset of the packet payload, or + // the first protocol header that the kernel does not know how to + // parse. + ExtPayloadOffset Extension = 52 + // ExtInterfaceIndex returns the index of the interface on which + // the packet was received. + ExtInterfaceIndex Extension = 8 + // ExtNetlinkAttr returns the netlink attribute of type X at + // offset A. + ExtNetlinkAttr Extension = 12 + // ExtNetlinkAttrNested returns the nested netlink attribute of + // type X at offset A. + ExtNetlinkAttrNested Extension = 16 + // ExtMark returns the packet's mark value. + ExtMark Extension = 20 + // ExtQueue returns the packet's assigned hardware queue. + ExtQueue Extension = 24 + // ExtLinkLayerType returns the packet's hardware address type + // (e.g. Ethernet, Infiniband). + ExtLinkLayerType Extension = 28 + // ExtRXHash returns the packets receive hash. + // + // TODO: figure out what this rxhash actually is. + ExtRXHash Extension = 32 + // ExtCPUID returns the ID of the CPU processing the current + // packet. + ExtCPUID Extension = 36 + // ExtVLANTag returns the packet's VLAN tag. + ExtVLANTag Extension = 44 + // ExtVLANTagPresent returns non-zero if the packet has a VLAN + // tag. + // + // TODO: I think this might be a lie: it reads bit 0x1000 of the + // VLAN header, which changed meaning in recent revisions of the + // spec - this extension may now return meaningless information. + ExtVLANTagPresent Extension = 48 + // ExtVLANProto returns 0x8100 if the frame has a VLAN header, + // 0x88a8 if the frame has a "Q-in-Q" double VLAN header, or some + // other value if no VLAN information is present. + ExtVLANProto Extension = 60 + // ExtRand returns a uniformly random uint32. + ExtRand Extension = 56 +) + +// The following gives names to various bit patterns used in opcode construction. + +const ( + opMaskCls uint16 = 0x7 + // opClsLoad masks + opMaskLoadDest = 0x01 + opMaskLoadWidth = 0x18 + opMaskLoadMode = 0xe0 + // opClsALU + opMaskOperandSrc = 0x08 + opMaskOperator = 0xf0 + // opClsJump + opMaskJumpConst = 0x0f + opMaskJumpCond = 0xf0 +) + +const ( + // +---------------+-----------------+---+---+---+ + // | AddrMode (3b) | LoadWidth (2b) | 0 | 0 | 0 | + // +---------------+-----------------+---+---+---+ + opClsLoadA uint16 = iota + // +---------------+-----------------+---+---+---+ + // | AddrMode (3b) | LoadWidth (2b) | 0 | 0 | 1 | + // +---------------+-----------------+---+---+---+ + opClsLoadX + // +---+---+---+---+---+---+---+---+ + // | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | + // +---+---+---+---+---+---+---+---+ + opClsStoreA + // +---+---+---+---+---+---+---+---+ + // | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | + // +---+---+---+---+---+---+---+---+ + opClsStoreX + // +---------------+-----------------+---+---+---+ + // | Operator (4b) | OperandSrc (1b) | 1 | 0 | 0 | + // +---------------+-----------------+---+---+---+ + opClsALU + // +-----------------------------+---+---+---+---+ + // | TestOperator (4b) | 0 | 1 | 0 | 1 | + // +-----------------------------+---+---+---+---+ + opClsJump + // +---+-------------------------+---+---+---+---+ + // | 0 | 0 | 0 | RetSrc (1b) | 0 | 1 | 1 | 0 | + // +---+-------------------------+---+---+---+---+ + opClsReturn + // +---+-------------------------+---+---+---+---+ + // | 0 | 0 | 0 | TXAorTAX (1b) | 0 | 1 | 1 | 1 | + // +---+-------------------------+---+---+---+---+ + opClsMisc +) + +const ( + opAddrModeImmediate uint16 = iota << 5 + opAddrModeAbsolute + opAddrModeIndirect + opAddrModeScratch + opAddrModePacketLen // actually an extension, not an addressing mode. + opAddrModeMemShift +) + +const ( + opLoadWidth4 uint16 = iota << 3 + opLoadWidth2 + opLoadWidth1 +) + +// Operator defined by ALUOp* + +const ( + opALUSrcConstant uint16 = iota << 3 + opALUSrcX +) + +const ( + opJumpAlways = iota << 4 + opJumpEqual + opJumpGT + opJumpGE + opJumpSet +) + +const ( + opRetSrcConstant uint16 = iota << 4 + opRetSrcA +) + +const ( + opMiscTAX = 0x00 + opMiscTXA = 0x80 +) diff --git a/vendor/golang.org/x/net/bpf/doc.go b/vendor/golang.org/x/net/bpf/doc.go new file mode 100644 index 0000000000..ae62feb534 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/doc.go @@ -0,0 +1,82 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* + +Package bpf implements marshaling and unmarshaling of programs for the +Berkeley Packet Filter virtual machine, and provides a Go implementation +of the virtual machine. + +BPF's main use is to specify a packet filter for network taps, so that +the kernel doesn't have to expensively copy every packet it sees to +userspace. However, it's been repurposed to other areas where running +user code in-kernel is needed. For example, Linux's seccomp uses BPF +to apply security policies to system calls. For simplicity, this +documentation refers only to packets, but other uses of BPF have their +own data payloads. + +BPF programs run in a restricted virtual machine. It has almost no +access to kernel functions, and while conditional branches are +allowed, they can only jump forwards, to guarantee that there are no +infinite loops. + +The virtual machine + +The BPF VM is an accumulator machine. Its main register, called +register A, is an implicit source and destination in all arithmetic +and logic operations. The machine also has 16 scratch registers for +temporary storage, and an indirection register (register X) for +indirect memory access. All registers are 32 bits wide. + +Each run of a BPF program is given one packet, which is placed in the +VM's read-only "main memory". LoadAbsolute and LoadIndirect +instructions can fetch up to 32 bits at a time into register A for +examination. + +The goal of a BPF program is to produce and return a verdict (uint32), +which tells the kernel what to do with the packet. In the context of +packet filtering, the returned value is the number of bytes of the +packet to forward to userspace, or 0 to ignore the packet. Other +contexts like seccomp define their own return values. + +In order to simplify programs, attempts to read past the end of the +packet terminate the program execution with a verdict of 0 (ignore +packet). This means that the vast majority of BPF programs don't need +to do any explicit bounds checking. + +In addition to the bytes of the packet, some BPF programs have access +to extensions, which are essentially calls to kernel utility +functions. Currently, the only extensions supported by this package +are the Linux packet filter extensions. + +Examples + +This packet filter selects all ARP packets. + + bpf.Assemble([]bpf.Instruction{ + // Load "EtherType" field from the ethernet header. + bpf.LoadAbsolute{Off: 12, Size: 2}, + // Skip over the next instruction if EtherType is not ARP. + bpf.JumpIf{Cond: bpf.JumpNotEqual, Val: 0x0806, SkipTrue: 1}, + // Verdict is "send up to 4k of the packet to userspace." + bpf.RetConstant{Val: 4096}, + // Verdict is "ignore packet." + bpf.RetConstant{Val: 0}, + }) + +This packet filter captures a random 1% sample of traffic. + + bpf.Assemble([]bpf.Instruction{ + // Get a 32-bit random number from the Linux kernel. + bpf.LoadExtension{Num: bpf.ExtRand}, + // 1% dice roll? + bpf.JumpIf{Cond: bpf.JumpLessThan, Val: 2^32/100, SkipFalse: 1}, + // Capture. + bpf.RetConstant{Val: 4096}, + // Ignore. + bpf.RetConstant{Val: 0}, + }) + +*/ +package bpf // import "golang.org/x/net/bpf" diff --git a/vendor/golang.org/x/net/bpf/instructions.go b/vendor/golang.org/x/net/bpf/instructions.go new file mode 100644 index 0000000000..f9dc0e8ee7 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/instructions.go @@ -0,0 +1,704 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import "fmt" + +// An Instruction is one instruction executed by the BPF virtual +// machine. +type Instruction interface { + // Assemble assembles the Instruction into a RawInstruction. + Assemble() (RawInstruction, error) +} + +// A RawInstruction is a raw BPF virtual machine instruction. +type RawInstruction struct { + // Operation to execute. + Op uint16 + // For conditional jump instructions, the number of instructions + // to skip if the condition is true/false. + Jt uint8 + Jf uint8 + // Constant parameter. The meaning depends on the Op. + K uint32 +} + +// Assemble implements the Instruction Assemble method. +func (ri RawInstruction) Assemble() (RawInstruction, error) { return ri, nil } + +// Disassemble parses ri into an Instruction and returns it. If ri is +// not recognized by this package, ri itself is returned. +func (ri RawInstruction) Disassemble() Instruction { + switch ri.Op & opMaskCls { + case opClsLoadA, opClsLoadX: + reg := Register(ri.Op & opMaskLoadDest) + sz := 0 + switch ri.Op & opMaskLoadWidth { + case opLoadWidth4: + sz = 4 + case opLoadWidth2: + sz = 2 + case opLoadWidth1: + sz = 1 + default: + return ri + } + switch ri.Op & opMaskLoadMode { + case opAddrModeImmediate: + if sz != 4 { + return ri + } + return LoadConstant{Dst: reg, Val: ri.K} + case opAddrModeScratch: + if sz != 4 || ri.K > 15 { + return ri + } + return LoadScratch{Dst: reg, N: int(ri.K)} + case opAddrModeAbsolute: + if ri.K > extOffset+0xffffffff { + return LoadExtension{Num: Extension(-extOffset + ri.K)} + } + return LoadAbsolute{Size: sz, Off: ri.K} + case opAddrModeIndirect: + return LoadIndirect{Size: sz, Off: ri.K} + case opAddrModePacketLen: + if sz != 4 { + return ri + } + return LoadExtension{Num: ExtLen} + case opAddrModeMemShift: + return LoadMemShift{Off: ri.K} + default: + return ri + } + + case opClsStoreA: + if ri.Op != opClsStoreA || ri.K > 15 { + return ri + } + return StoreScratch{Src: RegA, N: int(ri.K)} + + case opClsStoreX: + if ri.Op != opClsStoreX || ri.K > 15 { + return ri + } + return StoreScratch{Src: RegX, N: int(ri.K)} + + case opClsALU: + switch op := ALUOp(ri.Op & opMaskOperator); op { + case ALUOpAdd, ALUOpSub, ALUOpMul, ALUOpDiv, ALUOpOr, ALUOpAnd, ALUOpShiftLeft, ALUOpShiftRight, ALUOpMod, ALUOpXor: + if ri.Op&opMaskOperandSrc != 0 { + return ALUOpX{Op: op} + } + return ALUOpConstant{Op: op, Val: ri.K} + case aluOpNeg: + return NegateA{} + default: + return ri + } + + case opClsJump: + if ri.Op&opMaskJumpConst != opClsJump { + return ri + } + switch ri.Op & opMaskJumpCond { + case opJumpAlways: + return Jump{Skip: ri.K} + case opJumpEqual: + if ri.Jt == 0 { + return JumpIf{ + Cond: JumpNotEqual, + Val: ri.K, + SkipTrue: ri.Jf, + SkipFalse: 0, + } + } + return JumpIf{ + Cond: JumpEqual, + Val: ri.K, + SkipTrue: ri.Jt, + SkipFalse: ri.Jf, + } + case opJumpGT: + if ri.Jt == 0 { + return JumpIf{ + Cond: JumpLessOrEqual, + Val: ri.K, + SkipTrue: ri.Jf, + SkipFalse: 0, + } + } + return JumpIf{ + Cond: JumpGreaterThan, + Val: ri.K, + SkipTrue: ri.Jt, + SkipFalse: ri.Jf, + } + case opJumpGE: + if ri.Jt == 0 { + return JumpIf{ + Cond: JumpLessThan, + Val: ri.K, + SkipTrue: ri.Jf, + SkipFalse: 0, + } + } + return JumpIf{ + Cond: JumpGreaterOrEqual, + Val: ri.K, + SkipTrue: ri.Jt, + SkipFalse: ri.Jf, + } + case opJumpSet: + return JumpIf{ + Cond: JumpBitsSet, + Val: ri.K, + SkipTrue: ri.Jt, + SkipFalse: ri.Jf, + } + default: + return ri + } + + case opClsReturn: + switch ri.Op { + case opClsReturn | opRetSrcA: + return RetA{} + case opClsReturn | opRetSrcConstant: + return RetConstant{Val: ri.K} + default: + return ri + } + + case opClsMisc: + switch ri.Op { + case opClsMisc | opMiscTAX: + return TAX{} + case opClsMisc | opMiscTXA: + return TXA{} + default: + return ri + } + + default: + panic("unreachable") // switch is exhaustive on the bit pattern + } +} + +// LoadConstant loads Val into register Dst. +type LoadConstant struct { + Dst Register + Val uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadConstant) Assemble() (RawInstruction, error) { + return assembleLoad(a.Dst, 4, opAddrModeImmediate, a.Val) +} + +// String returns the instruction in assembler notation. +func (a LoadConstant) String() string { + switch a.Dst { + case RegA: + return fmt.Sprintf("ld #%d", a.Val) + case RegX: + return fmt.Sprintf("ldx #%d", a.Val) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// LoadScratch loads scratch[N] into register Dst. +type LoadScratch struct { + Dst Register + N int // 0-15 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadScratch) Assemble() (RawInstruction, error) { + if a.N < 0 || a.N > 15 { + return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N) + } + return assembleLoad(a.Dst, 4, opAddrModeScratch, uint32(a.N)) +} + +// String returns the instruction in assembler notation. +func (a LoadScratch) String() string { + switch a.Dst { + case RegA: + return fmt.Sprintf("ld M[%d]", a.N) + case RegX: + return fmt.Sprintf("ldx M[%d]", a.N) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// LoadAbsolute loads packet[Off:Off+Size] as an integer value into +// register A. +type LoadAbsolute struct { + Off uint32 + Size int // 1, 2 or 4 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadAbsolute) Assemble() (RawInstruction, error) { + return assembleLoad(RegA, a.Size, opAddrModeAbsolute, a.Off) +} + +// String returns the instruction in assembler notation. +func (a LoadAbsolute) String() string { + switch a.Size { + case 1: // byte + return fmt.Sprintf("ldb [%d]", a.Off) + case 2: // half word + return fmt.Sprintf("ldh [%d]", a.Off) + case 4: // word + if a.Off > extOffset+0xffffffff { + return LoadExtension{Num: Extension(a.Off + 0x1000)}.String() + } + return fmt.Sprintf("ld [%d]", a.Off) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// LoadIndirect loads packet[X+Off:X+Off+Size] as an integer value +// into register A. +type LoadIndirect struct { + Off uint32 + Size int // 1, 2 or 4 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadIndirect) Assemble() (RawInstruction, error) { + return assembleLoad(RegA, a.Size, opAddrModeIndirect, a.Off) +} + +// String returns the instruction in assembler notation. +func (a LoadIndirect) String() string { + switch a.Size { + case 1: // byte + return fmt.Sprintf("ldb [x + %d]", a.Off) + case 2: // half word + return fmt.Sprintf("ldh [x + %d]", a.Off) + case 4: // word + return fmt.Sprintf("ld [x + %d]", a.Off) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// LoadMemShift multiplies the first 4 bits of the byte at packet[Off] +// by 4 and stores the result in register X. +// +// This instruction is mainly useful to load into X the length of an +// IPv4 packet header in a single instruction, rather than have to do +// the arithmetic on the header's first byte by hand. +type LoadMemShift struct { + Off uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a LoadMemShift) Assemble() (RawInstruction, error) { + return assembleLoad(RegX, 1, opAddrModeMemShift, a.Off) +} + +// String returns the instruction in assembler notation. +func (a LoadMemShift) String() string { + return fmt.Sprintf("ldx 4*([%d]&0xf)", a.Off) +} + +// LoadExtension invokes a linux-specific extension and stores the +// result in register A. +type LoadExtension struct { + Num Extension +} + +// Assemble implements the Instruction Assemble method. +func (a LoadExtension) Assemble() (RawInstruction, error) { + if a.Num == ExtLen { + return assembleLoad(RegA, 4, opAddrModePacketLen, 0) + } + return assembleLoad(RegA, 4, opAddrModeAbsolute, uint32(extOffset+a.Num)) +} + +// String returns the instruction in assembler notation. +func (a LoadExtension) String() string { + switch a.Num { + case ExtLen: + return "ld #len" + case ExtProto: + return "ld #proto" + case ExtType: + return "ld #type" + case ExtPayloadOffset: + return "ld #poff" + case ExtInterfaceIndex: + return "ld #ifidx" + case ExtNetlinkAttr: + return "ld #nla" + case ExtNetlinkAttrNested: + return "ld #nlan" + case ExtMark: + return "ld #mark" + case ExtQueue: + return "ld #queue" + case ExtLinkLayerType: + return "ld #hatype" + case ExtRXHash: + return "ld #rxhash" + case ExtCPUID: + return "ld #cpu" + case ExtVLANTag: + return "ld #vlan_tci" + case ExtVLANTagPresent: + return "ld #vlan_avail" + case ExtVLANProto: + return "ld #vlan_tpid" + case ExtRand: + return "ld #rand" + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// StoreScratch stores register Src into scratch[N]. +type StoreScratch struct { + Src Register + N int // 0-15 +} + +// Assemble implements the Instruction Assemble method. +func (a StoreScratch) Assemble() (RawInstruction, error) { + if a.N < 0 || a.N > 15 { + return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N) + } + var op uint16 + switch a.Src { + case RegA: + op = opClsStoreA + case RegX: + op = opClsStoreX + default: + return RawInstruction{}, fmt.Errorf("invalid source register %v", a.Src) + } + + return RawInstruction{ + Op: op, + K: uint32(a.N), + }, nil +} + +// String returns the instruction in assembler notation. +func (a StoreScratch) String() string { + switch a.Src { + case RegA: + return fmt.Sprintf("st M[%d]", a.N) + case RegX: + return fmt.Sprintf("stx M[%d]", a.N) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// ALUOpConstant executes A = A Val. +type ALUOpConstant struct { + Op ALUOp + Val uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a ALUOpConstant) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsALU | opALUSrcConstant | uint16(a.Op), + K: a.Val, + }, nil +} + +// String returns the instruction in assembler notation. +func (a ALUOpConstant) String() string { + switch a.Op { + case ALUOpAdd: + return fmt.Sprintf("add #%d", a.Val) + case ALUOpSub: + return fmt.Sprintf("sub #%d", a.Val) + case ALUOpMul: + return fmt.Sprintf("mul #%d", a.Val) + case ALUOpDiv: + return fmt.Sprintf("div #%d", a.Val) + case ALUOpMod: + return fmt.Sprintf("mod #%d", a.Val) + case ALUOpAnd: + return fmt.Sprintf("and #%d", a.Val) + case ALUOpOr: + return fmt.Sprintf("or #%d", a.Val) + case ALUOpXor: + return fmt.Sprintf("xor #%d", a.Val) + case ALUOpShiftLeft: + return fmt.Sprintf("lsh #%d", a.Val) + case ALUOpShiftRight: + return fmt.Sprintf("rsh #%d", a.Val) + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// ALUOpX executes A = A X +type ALUOpX struct { + Op ALUOp +} + +// Assemble implements the Instruction Assemble method. +func (a ALUOpX) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsALU | opALUSrcX | uint16(a.Op), + }, nil +} + +// String returns the instruction in assembler notation. +func (a ALUOpX) String() string { + switch a.Op { + case ALUOpAdd: + return "add x" + case ALUOpSub: + return "sub x" + case ALUOpMul: + return "mul x" + case ALUOpDiv: + return "div x" + case ALUOpMod: + return "mod x" + case ALUOpAnd: + return "and x" + case ALUOpOr: + return "or x" + case ALUOpXor: + return "xor x" + case ALUOpShiftLeft: + return "lsh x" + case ALUOpShiftRight: + return "rsh x" + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +// NegateA executes A = -A. +type NegateA struct{} + +// Assemble implements the Instruction Assemble method. +func (a NegateA) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsALU | uint16(aluOpNeg), + }, nil +} + +// String returns the instruction in assembler notation. +func (a NegateA) String() string { + return fmt.Sprintf("neg") +} + +// Jump skips the following Skip instructions in the program. +type Jump struct { + Skip uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a Jump) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsJump | opJumpAlways, + K: a.Skip, + }, nil +} + +// String returns the instruction in assembler notation. +func (a Jump) String() string { + return fmt.Sprintf("ja %d", a.Skip) +} + +// JumpIf skips the following Skip instructions in the program if A +// Val is true. +type JumpIf struct { + Cond JumpTest + Val uint32 + SkipTrue uint8 + SkipFalse uint8 +} + +// Assemble implements the Instruction Assemble method. +func (a JumpIf) Assemble() (RawInstruction, error) { + var ( + cond uint16 + flip bool + ) + switch a.Cond { + case JumpEqual: + cond = opJumpEqual + case JumpNotEqual: + cond, flip = opJumpEqual, true + case JumpGreaterThan: + cond = opJumpGT + case JumpLessThan: + cond, flip = opJumpGE, true + case JumpGreaterOrEqual: + cond = opJumpGE + case JumpLessOrEqual: + cond, flip = opJumpGT, true + case JumpBitsSet: + cond = opJumpSet + case JumpBitsNotSet: + cond, flip = opJumpSet, true + default: + return RawInstruction{}, fmt.Errorf("unknown JumpTest %v", a.Cond) + } + jt, jf := a.SkipTrue, a.SkipFalse + if flip { + jt, jf = jf, jt + } + return RawInstruction{ + Op: opClsJump | cond, + Jt: jt, + Jf: jf, + K: a.Val, + }, nil +} + +// String returns the instruction in assembler notation. +func (a JumpIf) String() string { + switch a.Cond { + // K == A + case JumpEqual: + return conditionalJump(a, "jeq", "jneq") + // K != A + case JumpNotEqual: + return fmt.Sprintf("jneq #%d,%d", a.Val, a.SkipTrue) + // K > A + case JumpGreaterThan: + return conditionalJump(a, "jgt", "jle") + // K < A + case JumpLessThan: + return fmt.Sprintf("jlt #%d,%d", a.Val, a.SkipTrue) + // K >= A + case JumpGreaterOrEqual: + return conditionalJump(a, "jge", "jlt") + // K <= A + case JumpLessOrEqual: + return fmt.Sprintf("jle #%d,%d", a.Val, a.SkipTrue) + // K & A != 0 + case JumpBitsSet: + if a.SkipFalse > 0 { + return fmt.Sprintf("jset #%d,%d,%d", a.Val, a.SkipTrue, a.SkipFalse) + } + return fmt.Sprintf("jset #%d,%d", a.Val, a.SkipTrue) + // K & A == 0, there is no assembler instruction for JumpBitNotSet, use JumpBitSet and invert skips + case JumpBitsNotSet: + return JumpIf{Cond: JumpBitsSet, SkipTrue: a.SkipFalse, SkipFalse: a.SkipTrue, Val: a.Val}.String() + default: + return fmt.Sprintf("unknown instruction: %#v", a) + } +} + +func conditionalJump(inst JumpIf, positiveJump, negativeJump string) string { + if inst.SkipTrue > 0 { + if inst.SkipFalse > 0 { + return fmt.Sprintf("%s #%d,%d,%d", positiveJump, inst.Val, inst.SkipTrue, inst.SkipFalse) + } + return fmt.Sprintf("%s #%d,%d", positiveJump, inst.Val, inst.SkipTrue) + } + return fmt.Sprintf("%s #%d,%d", negativeJump, inst.Val, inst.SkipFalse) +} + +// RetA exits the BPF program, returning the value of register A. +type RetA struct{} + +// Assemble implements the Instruction Assemble method. +func (a RetA) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsReturn | opRetSrcA, + }, nil +} + +// String returns the instruction in assembler notation. +func (a RetA) String() string { + return fmt.Sprintf("ret a") +} + +// RetConstant exits the BPF program, returning a constant value. +type RetConstant struct { + Val uint32 +} + +// Assemble implements the Instruction Assemble method. +func (a RetConstant) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsReturn | opRetSrcConstant, + K: a.Val, + }, nil +} + +// String returns the instruction in assembler notation. +func (a RetConstant) String() string { + return fmt.Sprintf("ret #%d", a.Val) +} + +// TXA copies the value of register X to register A. +type TXA struct{} + +// Assemble implements the Instruction Assemble method. +func (a TXA) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsMisc | opMiscTXA, + }, nil +} + +// String returns the instruction in assembler notation. +func (a TXA) String() string { + return fmt.Sprintf("txa") +} + +// TAX copies the value of register A to register X. +type TAX struct{} + +// Assemble implements the Instruction Assemble method. +func (a TAX) Assemble() (RawInstruction, error) { + return RawInstruction{ + Op: opClsMisc | opMiscTAX, + }, nil +} + +// String returns the instruction in assembler notation. +func (a TAX) String() string { + return fmt.Sprintf("tax") +} + +func assembleLoad(dst Register, loadSize int, mode uint16, k uint32) (RawInstruction, error) { + var ( + cls uint16 + sz uint16 + ) + switch dst { + case RegA: + cls = opClsLoadA + case RegX: + cls = opClsLoadX + default: + return RawInstruction{}, fmt.Errorf("invalid target register %v", dst) + } + switch loadSize { + case 1: + sz = opLoadWidth1 + case 2: + sz = opLoadWidth2 + case 4: + sz = opLoadWidth4 + default: + return RawInstruction{}, fmt.Errorf("invalid load byte length %d", sz) + } + return RawInstruction{ + Op: cls | sz | mode, + K: k, + }, nil +} diff --git a/vendor/golang.org/x/net/bpf/setter.go b/vendor/golang.org/x/net/bpf/setter.go new file mode 100644 index 0000000000..43e35f0ac2 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/setter.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +// A Setter is a type which can attach a compiled BPF filter to itself. +type Setter interface { + SetBPF(filter []RawInstruction) error +} diff --git a/vendor/golang.org/x/net/bpf/vm.go b/vendor/golang.org/x/net/bpf/vm.go new file mode 100644 index 0000000000..4c656f1e12 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm.go @@ -0,0 +1,140 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import ( + "errors" + "fmt" +) + +// A VM is an emulated BPF virtual machine. +type VM struct { + filter []Instruction +} + +// NewVM returns a new VM using the input BPF program. +func NewVM(filter []Instruction) (*VM, error) { + if len(filter) == 0 { + return nil, errors.New("one or more Instructions must be specified") + } + + for i, ins := range filter { + check := len(filter) - (i + 1) + switch ins := ins.(type) { + // Check for out-of-bounds jumps in instructions + case Jump: + if check <= int(ins.Skip) { + return nil, fmt.Errorf("cannot jump %d instructions; jumping past program bounds", ins.Skip) + } + case JumpIf: + if check <= int(ins.SkipTrue) { + return nil, fmt.Errorf("cannot jump %d instructions in true case; jumping past program bounds", ins.SkipTrue) + } + if check <= int(ins.SkipFalse) { + return nil, fmt.Errorf("cannot jump %d instructions in false case; jumping past program bounds", ins.SkipFalse) + } + // Check for division or modulus by zero + case ALUOpConstant: + if ins.Val != 0 { + break + } + + switch ins.Op { + case ALUOpDiv, ALUOpMod: + return nil, errors.New("cannot divide by zero using ALUOpConstant") + } + // Check for unknown extensions + case LoadExtension: + switch ins.Num { + case ExtLen: + default: + return nil, fmt.Errorf("extension %d not implemented", ins.Num) + } + } + } + + // Make sure last instruction is a return instruction + switch filter[len(filter)-1].(type) { + case RetA, RetConstant: + default: + return nil, errors.New("BPF program must end with RetA or RetConstant") + } + + // Though our VM works using disassembled instructions, we + // attempt to assemble the input filter anyway to ensure it is compatible + // with an operating system VM. + _, err := Assemble(filter) + + return &VM{ + filter: filter, + }, err +} + +// Run runs the VM's BPF program against the input bytes. +// Run returns the number of bytes accepted by the BPF program, and any errors +// which occurred while processing the program. +func (v *VM) Run(in []byte) (int, error) { + var ( + // Registers of the virtual machine + regA uint32 + regX uint32 + regScratch [16]uint32 + + // OK is true if the program should continue processing the next + // instruction, or false if not, causing the loop to break + ok = true + ) + + // TODO(mdlayher): implement: + // - NegateA: + // - would require a change from uint32 registers to int32 + // registers + + // TODO(mdlayher): add interop tests that check signedness of ALU + // operations against kernel implementation, and make sure Go + // implementation matches behavior + + for i := 0; i < len(v.filter) && ok; i++ { + ins := v.filter[i] + + switch ins := ins.(type) { + case ALUOpConstant: + regA = aluOpConstant(ins, regA) + case ALUOpX: + regA, ok = aluOpX(ins, regA, regX) + case Jump: + i += int(ins.Skip) + case JumpIf: + jump := jumpIf(ins, regA) + i += jump + case LoadAbsolute: + regA, ok = loadAbsolute(ins, in) + case LoadConstant: + regA, regX = loadConstant(ins, regA, regX) + case LoadExtension: + regA = loadExtension(ins, in) + case LoadIndirect: + regA, ok = loadIndirect(ins, in, regX) + case LoadMemShift: + regX, ok = loadMemShift(ins, in) + case LoadScratch: + regA, regX = loadScratch(ins, regScratch, regA, regX) + case RetA: + return int(regA), nil + case RetConstant: + return int(ins.Val), nil + case StoreScratch: + regScratch = storeScratch(ins, regScratch, regA, regX) + case TAX: + regX = regA + case TXA: + regA = regX + default: + return 0, fmt.Errorf("unknown Instruction at index %d: %T", i, ins) + } + } + + return 0, nil +} diff --git a/vendor/golang.org/x/net/bpf/vm_instructions.go b/vendor/golang.org/x/net/bpf/vm_instructions.go new file mode 100644 index 0000000000..516f9462b9 --- /dev/null +++ b/vendor/golang.org/x/net/bpf/vm_instructions.go @@ -0,0 +1,174 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package bpf + +import ( + "encoding/binary" + "fmt" +) + +func aluOpConstant(ins ALUOpConstant, regA uint32) uint32 { + return aluOpCommon(ins.Op, regA, ins.Val) +} + +func aluOpX(ins ALUOpX, regA uint32, regX uint32) (uint32, bool) { + // Guard against division or modulus by zero by terminating + // the program, as the OS BPF VM does + if regX == 0 { + switch ins.Op { + case ALUOpDiv, ALUOpMod: + return 0, false + } + } + + return aluOpCommon(ins.Op, regA, regX), true +} + +func aluOpCommon(op ALUOp, regA uint32, value uint32) uint32 { + switch op { + case ALUOpAdd: + return regA + value + case ALUOpSub: + return regA - value + case ALUOpMul: + return regA * value + case ALUOpDiv: + // Division by zero not permitted by NewVM and aluOpX checks + return regA / value + case ALUOpOr: + return regA | value + case ALUOpAnd: + return regA & value + case ALUOpShiftLeft: + return regA << value + case ALUOpShiftRight: + return regA >> value + case ALUOpMod: + // Modulus by zero not permitted by NewVM and aluOpX checks + return regA % value + case ALUOpXor: + return regA ^ value + default: + return regA + } +} + +func jumpIf(ins JumpIf, value uint32) int { + var ok bool + inV := uint32(ins.Val) + + switch ins.Cond { + case JumpEqual: + ok = value == inV + case JumpNotEqual: + ok = value != inV + case JumpGreaterThan: + ok = value > inV + case JumpLessThan: + ok = value < inV + case JumpGreaterOrEqual: + ok = value >= inV + case JumpLessOrEqual: + ok = value <= inV + case JumpBitsSet: + ok = (value & inV) != 0 + case JumpBitsNotSet: + ok = (value & inV) == 0 + } + + if ok { + return int(ins.SkipTrue) + } + + return int(ins.SkipFalse) +} + +func loadAbsolute(ins LoadAbsolute, in []byte) (uint32, bool) { + offset := int(ins.Off) + size := int(ins.Size) + + return loadCommon(in, offset, size) +} + +func loadConstant(ins LoadConstant, regA uint32, regX uint32) (uint32, uint32) { + switch ins.Dst { + case RegA: + regA = ins.Val + case RegX: + regX = ins.Val + } + + return regA, regX +} + +func loadExtension(ins LoadExtension, in []byte) uint32 { + switch ins.Num { + case ExtLen: + return uint32(len(in)) + default: + panic(fmt.Sprintf("unimplemented extension: %d", ins.Num)) + } +} + +func loadIndirect(ins LoadIndirect, in []byte, regX uint32) (uint32, bool) { + offset := int(ins.Off) + int(regX) + size := int(ins.Size) + + return loadCommon(in, offset, size) +} + +func loadMemShift(ins LoadMemShift, in []byte) (uint32, bool) { + offset := int(ins.Off) + + if !inBounds(len(in), offset, 0) { + return 0, false + } + + // Mask off high 4 bits and multiply low 4 bits by 4 + return uint32(in[offset]&0x0f) * 4, true +} + +func inBounds(inLen int, offset int, size int) bool { + return offset+size <= inLen +} + +func loadCommon(in []byte, offset int, size int) (uint32, bool) { + if !inBounds(len(in), offset, size) { + return 0, false + } + + switch size { + case 1: + return uint32(in[offset]), true + case 2: + return uint32(binary.BigEndian.Uint16(in[offset : offset+size])), true + case 4: + return uint32(binary.BigEndian.Uint32(in[offset : offset+size])), true + default: + panic(fmt.Sprintf("invalid load size: %d", size)) + } +} + +func loadScratch(ins LoadScratch, regScratch [16]uint32, regA uint32, regX uint32) (uint32, uint32) { + switch ins.Dst { + case RegA: + regA = regScratch[ins.N] + case RegX: + regX = regScratch[ins.N] + } + + return regA, regX +} + +func storeScratch(ins StoreScratch, regScratch [16]uint32, regA uint32, regX uint32) [16]uint32 { + switch ins.Src { + case RegA: + regScratch[ins.N] = regA + case RegX: + regScratch[ins.N] = regX + } + + return regScratch +} diff --git a/vendor/golang.org/x/net/http/httpguts/guts.go b/vendor/golang.org/x/net/http/httpguts/guts.go new file mode 100644 index 0000000000..8255fd49b4 --- /dev/null +++ b/vendor/golang.org/x/net/http/httpguts/guts.go @@ -0,0 +1,65 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package httpguts provides functions implementing various details +// of the HTTP specification. +// +// This package is shared by the standard library (which vendors it) +// and x/net/http2. It comes with no API stability promise. +package httpguts + +import ( + "net/textproto" + "strings" +) + +// SniffedContentType reports whether ct is a Content-Type that is known +// to cause client-side content sniffing. +// +// This provides just a partial implementation of mime.ParseMediaType +// with the assumption that the Content-Type is not attacker controlled. +func SniffedContentType(ct string) bool { + if i := strings.Index(ct, ";"); i != -1 { + ct = ct[:i] + } + ct = strings.ToLower(strings.TrimSpace(ct)) + return ct == "text/plain" || ct == "application/octet-stream" || + ct == "application/unknown" || ct == "unknown/unknown" || ct == "*/*" || + !strings.Contains(ct, "/") +} + +// ValidTrailerHeader reports whether name is a valid header field name to appear +// in trailers. +// See RFC 7230, Section 4.1.2 +func ValidTrailerHeader(name string) bool { + name = textproto.CanonicalMIMEHeaderKey(name) + if strings.HasPrefix(name, "If-") || badTrailer[name] { + return false + } + return true +} + +var badTrailer = map[string]bool{ + "Authorization": true, + "Cache-Control": true, + "Connection": true, + "Content-Encoding": true, + "Content-Length": true, + "Content-Range": true, + "Content-Type": true, + "Expect": true, + "Host": true, + "Keep-Alive": true, + "Max-Forwards": true, + "Pragma": true, + "Proxy-Authenticate": true, + "Proxy-Authorization": true, + "Proxy-Connection": true, + "Range": true, + "Realm": true, + "Te": true, + "Trailer": true, + "Transfer-Encoding": true, + "Www-Authenticate": true, +} diff --git a/vendor/golang.org/x/net/lex/httplex/httplex.go b/vendor/golang.org/x/net/http/httpguts/httplex.go similarity index 97% rename from vendor/golang.org/x/net/lex/httplex/httplex.go rename to vendor/golang.org/x/net/http/httpguts/httplex.go index 20f2b8940b..e7de24ee64 100644 --- a/vendor/golang.org/x/net/lex/httplex/httplex.go +++ b/vendor/golang.org/x/net/http/httpguts/httplex.go @@ -2,12 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package httplex contains rules around lexical matters of various -// HTTP-related specifications. -// -// This package is shared by the standard library (which vendors it) -// and x/net/http2. It comes with no API stability promise. -package httplex +package httpguts import ( "net" diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 3b14890728..e32500779a 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -14,8 +14,8 @@ import ( "strings" "sync" + "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" - "golang.org/x/net/lex/httplex" ) const frameHeaderLen = 9 @@ -1462,7 +1462,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { if VerboseLogs && fr.logReads { fr.debugReadLoggerf("http2: decoded hpack field %+v", hf) } - if !httplex.ValidHeaderFieldValue(hf.Value) { + if !httpguts.ValidHeaderFieldValue(hf.Value) { invalid = headerFieldValueError(hf.Value) } isPseudo := strings.HasPrefix(hf.Name, ":") diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go index 176644acda..166788ceec 100644 --- a/vendor/golang.org/x/net/http2/hpack/hpack.go +++ b/vendor/golang.org/x/net/http2/hpack/hpack.go @@ -389,6 +389,12 @@ func (d *Decoder) callEmit(hf HeaderField) error { // (same invariants and behavior as parseHeaderFieldRepr) func (d *Decoder) parseDynamicTableSizeUpdate() error { + // RFC 7541, sec 4.2: This dynamic table size update MUST occur at the + // beginning of the first header block following the change to the dynamic table size. + if d.dynTab.size > 0 { + return DecodingError{errors.New("dynamic table size update MUST occur at the beginning of a header block")} + } + buf := d.buf size, buf, err := readVarInt(5, buf) if err != nil { diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 71db28a873..c82428254a 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -29,7 +29,7 @@ import ( "strings" "sync" - "golang.org/x/net/lex/httplex" + "golang.org/x/net/http/httpguts" ) var ( @@ -179,7 +179,7 @@ var ( ) // validWireHeaderFieldName reports whether v is a valid header field -// name (key). See httplex.ValidHeaderName for the base rules. +// name (key). See httpguts.ValidHeaderName for the base rules. // // Further, http2 says: // "Just as in HTTP/1.x, header field names are strings of ASCII @@ -191,7 +191,7 @@ func validWireHeaderFieldName(v string) bool { return false } for _, r := range v { - if !httplex.IsTokenRune(r) { + if !httpguts.IsTokenRune(r) { return false } if 'A' <= r && r <= 'Z' { diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 39ed755a86..abf94e8d36 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -46,6 +46,7 @@ import ( "sync" "time" + "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" ) @@ -1817,7 +1818,7 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { if st.trailer != nil { for _, hf := range f.RegularFields() { key := sc.canonicalHeader(hf.Name) - if !ValidTrailerHeader(key) { + if !httpguts.ValidTrailerHeader(key) { // TODO: send more details to the peer somehow. But http2 has // no way to send debug data at a stream level. Discuss with // HTTP folk. @@ -2284,7 +2285,7 @@ func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != // written in the trailers at the end of the response. func (rws *responseWriterState) declareTrailer(k string) { k = http.CanonicalHeaderKey(k) - if !ValidTrailerHeader(k) { + if !httpguts.ValidTrailerHeader(k) { // Forbidden by RFC 7230, section 4.1.2. rws.conn.logf("ignoring invalid trailer %q", k) return @@ -2308,6 +2309,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { isHeadResp := rws.req.Method == "HEAD" if !rws.sentHeader { rws.sentHeader = true + var ctype, clen string if clen = rws.snapHeader.Get("Content-Length"); clen != "" { rws.snapHeader.Del("Content-Length") @@ -2321,10 +2323,33 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { clen = strconv.Itoa(len(p)) } + _, hasContentType := rws.snapHeader["Content-Type"] if !hasContentType && bodyAllowedForStatus(rws.status) && len(p) > 0 { - ctype = http.DetectContentType(p) + if cto := rws.snapHeader.Get("X-Content-Type-Options"); strings.EqualFold("nosniff", cto) { + // nosniff is an explicit directive not to guess a content-type. + // Content-sniffing is no less susceptible to polyglot attacks via + // hosted content when done on the server. + ctype = "application/octet-stream" + rws.conn.logf("http2: WriteHeader called with X-Content-Type-Options:nosniff but no Content-Type") + } else { + ctype = http.DetectContentType(p) + } } + + var noSniff bool + if bodyAllowedForStatus(rws.status) && (rws.sentContentLen > 0 || len(p) > 0) { + // If the content type triggers client-side sniffing on old browsers, + // attach a X-Content-Type-Options header if not present (or explicitly nil). + if _, ok := rws.snapHeader["X-Content-Type-Options"]; !ok { + if hasContentType { + noSniff = httpguts.SniffedContentType(rws.snapHeader.Get("Content-Type")) + } else if ctype != "" { + noSniff = httpguts.SniffedContentType(ctype) + } + } + } + var date string if _, ok := rws.snapHeader["Date"]; !ok { // TODO(bradfitz): be faster here, like net/http? measure. @@ -2343,6 +2368,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { endStream: endStream, contentType: ctype, contentLength: clen, + noSniff: noSniff, date: date, }) if err != nil { @@ -2838,41 +2864,6 @@ func new400Handler(err error) http.HandlerFunc { } } -// ValidTrailerHeader reports whether name is a valid header field name to appear -// in trailers. -// See: http://tools.ietf.org/html/rfc7230#section-4.1.2 -func ValidTrailerHeader(name string) bool { - name = http.CanonicalHeaderKey(name) - if strings.HasPrefix(name, "If-") || badTrailer[name] { - return false - } - return true -} - -var badTrailer = map[string]bool{ - "Authorization": true, - "Cache-Control": true, - "Connection": true, - "Content-Encoding": true, - "Content-Length": true, - "Content-Range": true, - "Content-Type": true, - "Expect": true, - "Host": true, - "Keep-Alive": true, - "Max-Forwards": true, - "Pragma": true, - "Proxy-Authenticate": true, - "Proxy-Authorization": true, - "Proxy-Connection": true, - "Range": true, - "Realm": true, - "Te": true, - "Trailer": true, - "Transfer-Encoding": true, - "Www-Authenticate": true, -} - // h1ServerKeepAlivesDisabled reports whether hs has its keep-alives // disabled. See comments on h1ServerShutdownChan above for why // the code is written this way. diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index e6b321f4bb..d23a226251 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -27,9 +27,9 @@ import ( "sync" "time" + "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" "golang.org/x/net/idna" - "golang.org/x/net/lex/httplex" ) const ( @@ -567,6 +567,10 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro // henc in response to SETTINGS frames? cc.henc = hpack.NewEncoder(&cc.hbuf) + if t.AllowHTTP { + cc.nextStreamID = 3 + } + if cs, ok := c.(connectionStater); ok { state := cs.ConnectionState() cc.tlsState = &state @@ -951,6 +955,9 @@ func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error { for { cc.lastActive = time.Now() if cc.closed || !cc.canTakeNewRequestLocked() { + if waitingForConn != nil { + close(waitingForConn) + } return errClientConnUnusable } if int64(len(cc.streams))+1 <= int64(cc.maxConcurrentStreams) { @@ -1174,7 +1181,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail if host == "" { host = req.URL.Host } - host, err := httplex.PunycodeHostPort(host) + host, err := httpguts.PunycodeHostPort(host) if err != nil { return nil, err } @@ -1199,11 +1206,11 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail // potentially pollute our hpack state. (We want to be able to // continue to reuse the hpack encoder for future requests) for k, vv := range req.Header { - if !httplex.ValidHeaderFieldName(k) { + if !httpguts.ValidHeaderFieldName(k) { return nil, fmt.Errorf("invalid HTTP header name %q", k) } for _, v := range vv { - if !httplex.ValidHeaderFieldValue(v) { + if !httpguts.ValidHeaderFieldValue(v) { return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) } } @@ -2244,7 +2251,7 @@ func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s body } s.delay = t.expectContinueTimeout() if s.delay == 0 || - !httplex.HeaderValuesContainsToken( + !httpguts.HeaderValuesContainsToken( cs.req.Header["Expect"], "100-continue") { return @@ -2299,5 +2306,5 @@ func (s bodyWriterState) scheduleBodyWrite() { // isConnectionCloseRequest reports whether req should use its own // connection for a single request and then close the connection. func isConnectionCloseRequest(req *http.Request) bool { - return req.Close || httplex.HeaderValuesContainsToken(req.Header["Connection"], "close") + return req.Close || httpguts.HeaderValuesContainsToken(req.Header["Connection"], "close") } diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go index 54ab4a88e7..a5120412e6 100644 --- a/vendor/golang.org/x/net/http2/write.go +++ b/vendor/golang.org/x/net/http2/write.go @@ -11,8 +11,8 @@ import ( "net/http" "net/url" + "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" - "golang.org/x/net/lex/httplex" ) // writeFramer is implemented by any type that is used to write frames. @@ -186,6 +186,7 @@ type writeResHeaders struct { date string contentType string contentLength string + noSniff bool } func encKV(enc *hpack.Encoder, k, v string) { @@ -222,6 +223,9 @@ func (w *writeResHeaders) writeFrame(ctx writeContext) error { if w.contentLength != "" { encKV(enc, "content-length", w.contentLength) } + if w.noSniff { + encKV(enc, "x-content-type-options", "nosniff") + } if w.date != "" { encKV(enc, "date", w.date) } @@ -350,7 +354,7 @@ func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { } isTE := k == "transfer-encoding" for _, v := range vv { - if !httplex.ValidHeaderFieldValue(v) { + if !httpguts.ValidHeaderFieldValue(v) { // TODO: return an error? golang.org/issue/14048 // For now just omit it. continue diff --git a/vendor/golang.org/x/net/internal/iana/const.go b/vendor/golang.org/x/net/internal/iana/const.go new file mode 100644 index 0000000000..826633e1b8 --- /dev/null +++ b/vendor/golang.org/x/net/internal/iana/const.go @@ -0,0 +1,227 @@ +// go generate gen.go +// Code generated by the command above; DO NOT EDIT. + +// Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA). +package iana // import "golang.org/x/net/internal/iana" + +// Differentiated Services Field Codepoints (DSCP), Updated: 2017-05-12 +const ( + DiffServCS0 = 0x0 // CS0 + DiffServCS1 = 0x20 // CS1 + DiffServCS2 = 0x40 // CS2 + DiffServCS3 = 0x60 // CS3 + DiffServCS4 = 0x80 // CS4 + DiffServCS5 = 0xa0 // CS5 + DiffServCS6 = 0xc0 // CS6 + DiffServCS7 = 0xe0 // CS7 + DiffServAF11 = 0x28 // AF11 + DiffServAF12 = 0x30 // AF12 + DiffServAF13 = 0x38 // AF13 + DiffServAF21 = 0x48 // AF21 + DiffServAF22 = 0x50 // AF22 + DiffServAF23 = 0x58 // AF23 + DiffServAF31 = 0x68 // AF31 + DiffServAF32 = 0x70 // AF32 + DiffServAF33 = 0x78 // AF33 + DiffServAF41 = 0x88 // AF41 + DiffServAF42 = 0x90 // AF42 + DiffServAF43 = 0x98 // AF43 + DiffServEF = 0xb8 // EF + DiffServVOICEADMIT = 0xb0 // VOICE-ADMIT +) + +// IPv4 TOS Byte and IPv6 Traffic Class Octet, Updated: 2001-09-06 +const ( + NotECNTransport = 0x0 // Not-ECT (Not ECN-Capable Transport) + ECNTransport1 = 0x1 // ECT(1) (ECN-Capable Transport(1)) + ECNTransport0 = 0x2 // ECT(0) (ECN-Capable Transport(0)) + CongestionExperienced = 0x3 // CE (Congestion Experienced) +) + +// Protocol Numbers, Updated: 2017-10-13 +const ( + ProtocolIP = 0 // IPv4 encapsulation, pseudo protocol number + ProtocolHOPOPT = 0 // IPv6 Hop-by-Hop Option + ProtocolICMP = 1 // Internet Control Message + ProtocolIGMP = 2 // Internet Group Management + ProtocolGGP = 3 // Gateway-to-Gateway + ProtocolIPv4 = 4 // IPv4 encapsulation + ProtocolST = 5 // Stream + ProtocolTCP = 6 // Transmission Control + ProtocolCBT = 7 // CBT + ProtocolEGP = 8 // Exterior Gateway Protocol + ProtocolIGP = 9 // any private interior gateway (used by Cisco for their IGRP) + ProtocolBBNRCCMON = 10 // BBN RCC Monitoring + ProtocolNVPII = 11 // Network Voice Protocol + ProtocolPUP = 12 // PUP + ProtocolEMCON = 14 // EMCON + ProtocolXNET = 15 // Cross Net Debugger + ProtocolCHAOS = 16 // Chaos + ProtocolUDP = 17 // User Datagram + ProtocolMUX = 18 // Multiplexing + ProtocolDCNMEAS = 19 // DCN Measurement Subsystems + ProtocolHMP = 20 // Host Monitoring + ProtocolPRM = 21 // Packet Radio Measurement + ProtocolXNSIDP = 22 // XEROX NS IDP + ProtocolTRUNK1 = 23 // Trunk-1 + ProtocolTRUNK2 = 24 // Trunk-2 + ProtocolLEAF1 = 25 // Leaf-1 + ProtocolLEAF2 = 26 // Leaf-2 + ProtocolRDP = 27 // Reliable Data Protocol + ProtocolIRTP = 28 // Internet Reliable Transaction + ProtocolISOTP4 = 29 // ISO Transport Protocol Class 4 + ProtocolNETBLT = 30 // Bulk Data Transfer Protocol + ProtocolMFENSP = 31 // MFE Network Services Protocol + ProtocolMERITINP = 32 // MERIT Internodal Protocol + ProtocolDCCP = 33 // Datagram Congestion Control Protocol + Protocol3PC = 34 // Third Party Connect Protocol + ProtocolIDPR = 35 // Inter-Domain Policy Routing Protocol + ProtocolXTP = 36 // XTP + ProtocolDDP = 37 // Datagram Delivery Protocol + ProtocolIDPRCMTP = 38 // IDPR Control Message Transport Proto + ProtocolTPPP = 39 // TP++ Transport Protocol + ProtocolIL = 40 // IL Transport Protocol + ProtocolIPv6 = 41 // IPv6 encapsulation + ProtocolSDRP = 42 // Source Demand Routing Protocol + ProtocolIPv6Route = 43 // Routing Header for IPv6 + ProtocolIPv6Frag = 44 // Fragment Header for IPv6 + ProtocolIDRP = 45 // Inter-Domain Routing Protocol + ProtocolRSVP = 46 // Reservation Protocol + ProtocolGRE = 47 // Generic Routing Encapsulation + ProtocolDSR = 48 // Dynamic Source Routing Protocol + ProtocolBNA = 49 // BNA + ProtocolESP = 50 // Encap Security Payload + ProtocolAH = 51 // Authentication Header + ProtocolINLSP = 52 // Integrated Net Layer Security TUBA + ProtocolNARP = 54 // NBMA Address Resolution Protocol + ProtocolMOBILE = 55 // IP Mobility + ProtocolTLSP = 56 // Transport Layer Security Protocol using Kryptonet key management + ProtocolSKIP = 57 // SKIP + ProtocolIPv6ICMP = 58 // ICMP for IPv6 + ProtocolIPv6NoNxt = 59 // No Next Header for IPv6 + ProtocolIPv6Opts = 60 // Destination Options for IPv6 + ProtocolCFTP = 62 // CFTP + ProtocolSATEXPAK = 64 // SATNET and Backroom EXPAK + ProtocolKRYPTOLAN = 65 // Kryptolan + ProtocolRVD = 66 // MIT Remote Virtual Disk Protocol + ProtocolIPPC = 67 // Internet Pluribus Packet Core + ProtocolSATMON = 69 // SATNET Monitoring + ProtocolVISA = 70 // VISA Protocol + ProtocolIPCV = 71 // Internet Packet Core Utility + ProtocolCPNX = 72 // Computer Protocol Network Executive + ProtocolCPHB = 73 // Computer Protocol Heart Beat + ProtocolWSN = 74 // Wang Span Network + ProtocolPVP = 75 // Packet Video Protocol + ProtocolBRSATMON = 76 // Backroom SATNET Monitoring + ProtocolSUNND = 77 // SUN ND PROTOCOL-Temporary + ProtocolWBMON = 78 // WIDEBAND Monitoring + ProtocolWBEXPAK = 79 // WIDEBAND EXPAK + ProtocolISOIP = 80 // ISO Internet Protocol + ProtocolVMTP = 81 // VMTP + ProtocolSECUREVMTP = 82 // SECURE-VMTP + ProtocolVINES = 83 // VINES + ProtocolTTP = 84 // Transaction Transport Protocol + ProtocolIPTM = 84 // Internet Protocol Traffic Manager + ProtocolNSFNETIGP = 85 // NSFNET-IGP + ProtocolDGP = 86 // Dissimilar Gateway Protocol + ProtocolTCF = 87 // TCF + ProtocolEIGRP = 88 // EIGRP + ProtocolOSPFIGP = 89 // OSPFIGP + ProtocolSpriteRPC = 90 // Sprite RPC Protocol + ProtocolLARP = 91 // Locus Address Resolution Protocol + ProtocolMTP = 92 // Multicast Transport Protocol + ProtocolAX25 = 93 // AX.25 Frames + ProtocolIPIP = 94 // IP-within-IP Encapsulation Protocol + ProtocolSCCSP = 96 // Semaphore Communications Sec. Pro. + ProtocolETHERIP = 97 // Ethernet-within-IP Encapsulation + ProtocolENCAP = 98 // Encapsulation Header + ProtocolGMTP = 100 // GMTP + ProtocolIFMP = 101 // Ipsilon Flow Management Protocol + ProtocolPNNI = 102 // PNNI over IP + ProtocolPIM = 103 // Protocol Independent Multicast + ProtocolARIS = 104 // ARIS + ProtocolSCPS = 105 // SCPS + ProtocolQNX = 106 // QNX + ProtocolAN = 107 // Active Networks + ProtocolIPComp = 108 // IP Payload Compression Protocol + ProtocolSNP = 109 // Sitara Networks Protocol + ProtocolCompaqPeer = 110 // Compaq Peer Protocol + ProtocolIPXinIP = 111 // IPX in IP + ProtocolVRRP = 112 // Virtual Router Redundancy Protocol + ProtocolPGM = 113 // PGM Reliable Transport Protocol + ProtocolL2TP = 115 // Layer Two Tunneling Protocol + ProtocolDDX = 116 // D-II Data Exchange (DDX) + ProtocolIATP = 117 // Interactive Agent Transfer Protocol + ProtocolSTP = 118 // Schedule Transfer Protocol + ProtocolSRP = 119 // SpectraLink Radio Protocol + ProtocolUTI = 120 // UTI + ProtocolSMP = 121 // Simple Message Protocol + ProtocolPTP = 123 // Performance Transparency Protocol + ProtocolISIS = 124 // ISIS over IPv4 + ProtocolFIRE = 125 // FIRE + ProtocolCRTP = 126 // Combat Radio Transport Protocol + ProtocolCRUDP = 127 // Combat Radio User Datagram + ProtocolSSCOPMCE = 128 // SSCOPMCE + ProtocolIPLT = 129 // IPLT + ProtocolSPS = 130 // Secure Packet Shield + ProtocolPIPE = 131 // Private IP Encapsulation within IP + ProtocolSCTP = 132 // Stream Control Transmission Protocol + ProtocolFC = 133 // Fibre Channel + ProtocolRSVPE2EIGNORE = 134 // RSVP-E2E-IGNORE + ProtocolMobilityHeader = 135 // Mobility Header + ProtocolUDPLite = 136 // UDPLite + ProtocolMPLSinIP = 137 // MPLS-in-IP + ProtocolMANET = 138 // MANET Protocols + ProtocolHIP = 139 // Host Identity Protocol + ProtocolShim6 = 140 // Shim6 Protocol + ProtocolWESP = 141 // Wrapped Encapsulating Security Payload + ProtocolROHC = 142 // Robust Header Compression + ProtocolReserved = 255 // Reserved +) + +// Address Family Numbers, Updated: 2016-10-25 +const ( + AddrFamilyIPv4 = 1 // IP (IP version 4) + AddrFamilyIPv6 = 2 // IP6 (IP version 6) + AddrFamilyNSAP = 3 // NSAP + AddrFamilyHDLC = 4 // HDLC (8-bit multidrop) + AddrFamilyBBN1822 = 5 // BBN 1822 + AddrFamily802 = 6 // 802 (includes all 802 media plus Ethernet "canonical format") + AddrFamilyE163 = 7 // E.163 + AddrFamilyE164 = 8 // E.164 (SMDS, Frame Relay, ATM) + AddrFamilyF69 = 9 // F.69 (Telex) + AddrFamilyX121 = 10 // X.121 (X.25, Frame Relay) + AddrFamilyIPX = 11 // IPX + AddrFamilyAppletalk = 12 // Appletalk + AddrFamilyDecnetIV = 13 // Decnet IV + AddrFamilyBanyanVines = 14 // Banyan Vines + AddrFamilyE164withSubaddress = 15 // E.164 with NSAP format subaddress + AddrFamilyDNS = 16 // DNS (Domain Name System) + AddrFamilyDistinguishedName = 17 // Distinguished Name + AddrFamilyASNumber = 18 // AS Number + AddrFamilyXTPoverIPv4 = 19 // XTP over IP version 4 + AddrFamilyXTPoverIPv6 = 20 // XTP over IP version 6 + AddrFamilyXTPnativemodeXTP = 21 // XTP native mode XTP + AddrFamilyFibreChannelWorldWidePortName = 22 // Fibre Channel World-Wide Port Name + AddrFamilyFibreChannelWorldWideNodeName = 23 // Fibre Channel World-Wide Node Name + AddrFamilyGWID = 24 // GWID + AddrFamilyL2VPN = 25 // AFI for L2VPN information + AddrFamilyMPLSTPSectionEndpointID = 26 // MPLS-TP Section Endpoint Identifier + AddrFamilyMPLSTPLSPEndpointID = 27 // MPLS-TP LSP Endpoint Identifier + AddrFamilyMPLSTPPseudowireEndpointID = 28 // MPLS-TP Pseudowire Endpoint Identifier + AddrFamilyMTIPv4 = 29 // MT IP: Multi-Topology IP version 4 + AddrFamilyMTIPv6 = 30 // MT IPv6: Multi-Topology IP version 6 + AddrFamilyEIGRPCommonServiceFamily = 16384 // EIGRP Common Service Family + AddrFamilyEIGRPIPv4ServiceFamily = 16385 // EIGRP IPv4 Service Family + AddrFamilyEIGRPIPv6ServiceFamily = 16386 // EIGRP IPv6 Service Family + AddrFamilyLISPCanonicalAddressFormat = 16387 // LISP Canonical Address Format (LCAF) + AddrFamilyBGPLS = 16388 // BGP-LS + AddrFamily48bitMAC = 16389 // 48-bit MAC + AddrFamily64bitMAC = 16390 // 64-bit MAC + AddrFamilyOUI = 16391 // OUI + AddrFamilyMACFinal24bits = 16392 // MAC/24 + AddrFamilyMACFinal40bits = 16393 // MAC/40 + AddrFamilyIPv6Initial64bits = 16394 // IPv6/64 + AddrFamilyRBridgePortID = 16395 // RBridge Port ID + AddrFamilyTRILLNickname = 16396 // TRILL Nickname +) diff --git a/vendor/golang.org/x/net/internal/iana/gen.go b/vendor/golang.org/x/net/internal/iana/gen.go new file mode 100644 index 0000000000..2227e09e89 --- /dev/null +++ b/vendor/golang.org/x/net/internal/iana/gen.go @@ -0,0 +1,387 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +//go:generate go run gen.go + +// This program generates internet protocol constants and tables by +// reading IANA protocol registries. +package main + +import ( + "bytes" + "encoding/xml" + "fmt" + "go/format" + "io" + "io/ioutil" + "net/http" + "os" + "strconv" + "strings" +) + +var registries = []struct { + url string + parse func(io.Writer, io.Reader) error +}{ + { + "https://www.iana.org/assignments/dscp-registry/dscp-registry.xml", + parseDSCPRegistry, + }, + { + "https://www.iana.org/assignments/ipv4-tos-byte/ipv4-tos-byte.xml", + parseTOSTCByte, + }, + { + "https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xml", + parseProtocolNumbers, + }, + { + "http://www.iana.org/assignments/address-family-numbers/address-family-numbers.xml", + parseAddrFamilyNumbers, + }, +} + +func main() { + var bb bytes.Buffer + fmt.Fprintf(&bb, "// go generate gen.go\n") + fmt.Fprintf(&bb, "// Code generated by the command above; DO NOT EDIT.\n\n") + fmt.Fprintf(&bb, "// Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA).\n") + fmt.Fprintf(&bb, `package iana // import "golang.org/x/net/internal/iana"`+"\n\n") + for _, r := range registries { + resp, err := http.Get(r.url) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + fmt.Fprintf(os.Stderr, "got HTTP status code %v for %v\n", resp.StatusCode, r.url) + os.Exit(1) + } + if err := r.parse(&bb, resp.Body); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + fmt.Fprintf(&bb, "\n") + } + b, err := format.Source(bb.Bytes()) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + if err := ioutil.WriteFile("const.go", b, 0644); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func parseDSCPRegistry(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var dr dscpRegistry + if err := dec.Decode(&dr); err != nil { + return err + } + drs := dr.escape() + fmt.Fprintf(w, "// %s, Updated: %s\n", dr.Title, dr.Updated) + fmt.Fprintf(w, "const (\n") + for _, dr := range drs { + fmt.Fprintf(w, "DiffServ%s = %#x", dr.Name, dr.Value) + fmt.Fprintf(w, "// %s\n", dr.OrigName) + } + fmt.Fprintf(w, ")\n") + return nil +} + +type dscpRegistry struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + Note string `xml:"note"` + RegTitle string `xml:"registry>title"` + PoolRecords []struct { + Name string `xml:"name"` + Space string `xml:"space"` + } `xml:"registry>record"` + Records []struct { + Name string `xml:"name"` + Space string `xml:"space"` + } `xml:"registry>registry>record"` +} + +type canonDSCPRecord struct { + OrigName string + Name string + Value int +} + +func (drr *dscpRegistry) escape() []canonDSCPRecord { + drs := make([]canonDSCPRecord, len(drr.Records)) + sr := strings.NewReplacer( + "+", "", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, dr := range drr.Records { + s := strings.TrimSpace(dr.Name) + drs[i].OrigName = s + drs[i].Name = sr.Replace(s) + n, err := strconv.ParseUint(dr.Space, 2, 8) + if err != nil { + continue + } + drs[i].Value = int(n) << 2 + } + return drs +} + +func parseTOSTCByte(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var ttb tosTCByte + if err := dec.Decode(&ttb); err != nil { + return err + } + trs := ttb.escape() + fmt.Fprintf(w, "// %s, Updated: %s\n", ttb.Title, ttb.Updated) + fmt.Fprintf(w, "const (\n") + for _, tr := range trs { + fmt.Fprintf(w, "%s = %#x", tr.Keyword, tr.Value) + fmt.Fprintf(w, "// %s\n", tr.OrigKeyword) + } + fmt.Fprintf(w, ")\n") + return nil +} + +type tosTCByte struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + Note string `xml:"note"` + RegTitle string `xml:"registry>title"` + Records []struct { + Binary string `xml:"binary"` + Keyword string `xml:"keyword"` + } `xml:"registry>record"` +} + +type canonTOSTCByteRecord struct { + OrigKeyword string + Keyword string + Value int +} + +func (ttb *tosTCByte) escape() []canonTOSTCByteRecord { + trs := make([]canonTOSTCByteRecord, len(ttb.Records)) + sr := strings.NewReplacer( + "Capable", "", + "(", "", + ")", "", + "+", "", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, tr := range ttb.Records { + s := strings.TrimSpace(tr.Keyword) + trs[i].OrigKeyword = s + ss := strings.Split(s, " ") + if len(ss) > 1 { + trs[i].Keyword = strings.Join(ss[1:], " ") + } else { + trs[i].Keyword = ss[0] + } + trs[i].Keyword = sr.Replace(trs[i].Keyword) + n, err := strconv.ParseUint(tr.Binary, 2, 8) + if err != nil { + continue + } + trs[i].Value = int(n) + } + return trs +} + +func parseProtocolNumbers(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var pn protocolNumbers + if err := dec.Decode(&pn); err != nil { + return err + } + prs := pn.escape() + prs = append([]canonProtocolRecord{{ + Name: "IP", + Descr: "IPv4 encapsulation, pseudo protocol number", + Value: 0, + }}, prs...) + fmt.Fprintf(w, "// %s, Updated: %s\n", pn.Title, pn.Updated) + fmt.Fprintf(w, "const (\n") + for _, pr := range prs { + if pr.Name == "" { + continue + } + fmt.Fprintf(w, "Protocol%s = %d", pr.Name, pr.Value) + s := pr.Descr + if s == "" { + s = pr.OrigName + } + fmt.Fprintf(w, "// %s\n", s) + } + fmt.Fprintf(w, ")\n") + return nil +} + +type protocolNumbers struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + RegTitle string `xml:"registry>title"` + Note string `xml:"registry>note"` + Records []struct { + Value string `xml:"value"` + Name string `xml:"name"` + Descr string `xml:"description"` + } `xml:"registry>record"` +} + +type canonProtocolRecord struct { + OrigName string + Name string + Descr string + Value int +} + +func (pn *protocolNumbers) escape() []canonProtocolRecord { + prs := make([]canonProtocolRecord, len(pn.Records)) + sr := strings.NewReplacer( + "-in-", "in", + "-within-", "within", + "-over-", "over", + "+", "P", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, pr := range pn.Records { + if strings.Contains(pr.Name, "Deprecated") || + strings.Contains(pr.Name, "deprecated") { + continue + } + prs[i].OrigName = pr.Name + s := strings.TrimSpace(pr.Name) + switch pr.Name { + case "ISIS over IPv4": + prs[i].Name = "ISIS" + case "manet": + prs[i].Name = "MANET" + default: + prs[i].Name = sr.Replace(s) + } + ss := strings.Split(pr.Descr, "\n") + for i := range ss { + ss[i] = strings.TrimSpace(ss[i]) + } + if len(ss) > 1 { + prs[i].Descr = strings.Join(ss, " ") + } else { + prs[i].Descr = ss[0] + } + prs[i].Value, _ = strconv.Atoi(pr.Value) + } + return prs +} + +func parseAddrFamilyNumbers(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var afn addrFamilylNumbers + if err := dec.Decode(&afn); err != nil { + return err + } + afrs := afn.escape() + fmt.Fprintf(w, "// %s, Updated: %s\n", afn.Title, afn.Updated) + fmt.Fprintf(w, "const (\n") + for _, afr := range afrs { + if afr.Name == "" { + continue + } + fmt.Fprintf(w, "AddrFamily%s = %d", afr.Name, afr.Value) + fmt.Fprintf(w, "// %s\n", afr.Descr) + } + fmt.Fprintf(w, ")\n") + return nil +} + +type addrFamilylNumbers struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + RegTitle string `xml:"registry>title"` + Note string `xml:"registry>note"` + Records []struct { + Value string `xml:"value"` + Descr string `xml:"description"` + } `xml:"registry>record"` +} + +type canonAddrFamilyRecord struct { + Name string + Descr string + Value int +} + +func (afn *addrFamilylNumbers) escape() []canonAddrFamilyRecord { + afrs := make([]canonAddrFamilyRecord, len(afn.Records)) + sr := strings.NewReplacer( + "IP version 4", "IPv4", + "IP version 6", "IPv6", + "Identifier", "ID", + "-", "", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, afr := range afn.Records { + if strings.Contains(afr.Descr, "Unassigned") || + strings.Contains(afr.Descr, "Reserved") { + continue + } + afrs[i].Descr = afr.Descr + s := strings.TrimSpace(afr.Descr) + switch s { + case "IP (IP version 4)": + afrs[i].Name = "IPv4" + case "IP6 (IP version 6)": + afrs[i].Name = "IPv6" + case "AFI for L2VPN information": + afrs[i].Name = "L2VPN" + case "E.164 with NSAP format subaddress": + afrs[i].Name = "E164withSubaddress" + case "MT IP: Multi-Topology IP version 4": + afrs[i].Name = "MTIPv4" + case "MAC/24": + afrs[i].Name = "MACFinal24bits" + case "MAC/40": + afrs[i].Name = "MACFinal40bits" + case "IPv6/64": + afrs[i].Name = "IPv6Initial64bits" + default: + n := strings.Index(s, "(") + if n > 0 { + s = s[:n] + } + n = strings.Index(s, ":") + if n > 0 { + s = s[:n] + } + afrs[i].Name = sr.Replace(s) + } + afrs[i].Value, _ = strconv.Atoi(afr.Value) + } + return afrs +} diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr.go b/vendor/golang.org/x/net/internal/socket/cmsghdr.go new file mode 100644 index 0000000000..1eb07d26de --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr.go @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package socket + +func (h *cmsghdr) len() int { return int(h.Len) } +func (h *cmsghdr) lvl() int { return int(h.Level) } +func (h *cmsghdr) typ() int { return int(h.Type) } diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go new file mode 100644 index 0000000000..d1d0c2de54 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go @@ -0,0 +1,13 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package socket + +func (h *cmsghdr) set(l, lvl, typ int) { + h.Len = uint32(l) + h.Level = int32(lvl) + h.Type = int32(typ) +} diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go new file mode 100644 index 0000000000..bac66811dd --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm mips mipsle 386 +// +build linux + +package socket + +func (h *cmsghdr) set(l, lvl, typ int) { + h.Len = uint32(l) + h.Level = int32(lvl) + h.Type = int32(typ) +} diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go new file mode 100644 index 0000000000..63f0534fa7 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm64 amd64 ppc64 ppc64le mips64 mips64le s390x +// +build linux + +package socket + +func (h *cmsghdr) set(l, lvl, typ int) { + h.Len = uint64(l) + h.Level = int32(lvl) + h.Type = int32(typ) +} diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go new file mode 100644 index 0000000000..7dedd430eb --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64 +// +build solaris + +package socket + +func (h *cmsghdr) set(l, lvl, typ int) { + h.Len = uint32(l) + h.Level = int32(lvl) + h.Type = int32(typ) +} diff --git a/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go b/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go new file mode 100644 index 0000000000..a4e71226f8 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go @@ -0,0 +1,17 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package socket + +type cmsghdr struct{} + +const sizeofCmsghdr = 0 + +func (h *cmsghdr) len() int { return 0 } +func (h *cmsghdr) lvl() int { return 0 } +func (h *cmsghdr) typ() int { return 0 } + +func (h *cmsghdr) set(l, lvl, typ int) {} diff --git a/vendor/golang.org/x/net/internal/socket/defs_darwin.go b/vendor/golang.org/x/net/internal/socket/defs_darwin.go new file mode 100644 index 0000000000..14e28c0b45 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_darwin.go @@ -0,0 +1,44 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/defs_dragonfly.go b/vendor/golang.org/x/net/internal/socket/defs_dragonfly.go new file mode 100644 index 0000000000..14e28c0b45 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_dragonfly.go @@ -0,0 +1,44 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/defs_freebsd.go b/vendor/golang.org/x/net/internal/socket/defs_freebsd.go new file mode 100644 index 0000000000..14e28c0b45 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_freebsd.go @@ -0,0 +1,44 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/defs_linux.go b/vendor/golang.org/x/net/internal/socket/defs_linux.go new file mode 100644 index 0000000000..ce9ec2f6d7 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_linux.go @@ -0,0 +1,49 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include +#include + +#define _GNU_SOURCE +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type mmsghdr C.struct_mmsghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofMmsghdr = C.sizeof_struct_mmsghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/defs_netbsd.go b/vendor/golang.org/x/net/internal/socket/defs_netbsd.go new file mode 100644 index 0000000000..3f84335699 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_netbsd.go @@ -0,0 +1,47 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type mmsghdr C.struct_mmsghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofMmsghdr = C.sizeof_struct_mmsghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/defs_openbsd.go b/vendor/golang.org/x/net/internal/socket/defs_openbsd.go new file mode 100644 index 0000000000..14e28c0b45 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_openbsd.go @@ -0,0 +1,44 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/defs_solaris.go b/vendor/golang.org/x/net/internal/socket/defs_solaris.go new file mode 100644 index 0000000000..14e28c0b45 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/defs_solaris.go @@ -0,0 +1,44 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package socket + +/* +#include + +#include +*/ +import "C" + +const ( + sysAF_UNSPEC = C.AF_UNSPEC + sysAF_INET = C.AF_INET + sysAF_INET6 = C.AF_INET6 + + sysSOCK_RAW = C.SOCK_RAW +) + +type iovec C.struct_iovec + +type msghdr C.struct_msghdr + +type cmsghdr C.struct_cmsghdr + +type sockaddrInet C.struct_sockaddr_in + +type sockaddrInet6 C.struct_sockaddr_in6 + +const ( + sizeofIovec = C.sizeof_struct_iovec + sizeofMsghdr = C.sizeof_struct_msghdr + sizeofCmsghdr = C.sizeof_struct_cmsghdr + + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 +) diff --git a/vendor/golang.org/x/net/internal/socket/error_unix.go b/vendor/golang.org/x/net/internal/socket/error_unix.go new file mode 100644 index 0000000000..93dff91802 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/error_unix.go @@ -0,0 +1,31 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package socket + +import "syscall" + +var ( + errEAGAIN error = syscall.EAGAIN + errEINVAL error = syscall.EINVAL + errENOENT error = syscall.ENOENT +) + +// errnoErr returns common boxed Errno values, to prevent allocations +// at runtime. +func errnoErr(errno syscall.Errno) error { + switch errno { + case 0: + return nil + case syscall.EAGAIN: + return errEAGAIN + case syscall.EINVAL: + return errEINVAL + case syscall.ENOENT: + return errENOENT + } + return errno +} diff --git a/vendor/golang.org/x/net/internal/socket/error_windows.go b/vendor/golang.org/x/net/internal/socket/error_windows.go new file mode 100644 index 0000000000..6a6379a8b0 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/error_windows.go @@ -0,0 +1,26 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import "syscall" + +var ( + errERROR_IO_PENDING error = syscall.ERROR_IO_PENDING + errEINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent allocations +// at runtime. +func errnoErr(errno syscall.Errno) error { + switch errno { + case 0: + return nil + case syscall.ERROR_IO_PENDING: + return errERROR_IO_PENDING + case syscall.EINVAL: + return errEINVAL + } + return errno +} diff --git a/vendor/golang.org/x/net/internal/socket/iovec_32bit.go b/vendor/golang.org/x/net/internal/socket/iovec_32bit.go new file mode 100644 index 0000000000..05d6082d14 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/iovec_32bit.go @@ -0,0 +1,19 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm mips mipsle 386 +// +build darwin dragonfly freebsd linux netbsd openbsd + +package socket + +import "unsafe" + +func (v *iovec) set(b []byte) { + l := len(b) + if l == 0 { + return + } + v.Base = (*byte)(unsafe.Pointer(&b[0])) + v.Len = uint32(l) +} diff --git a/vendor/golang.org/x/net/internal/socket/iovec_64bit.go b/vendor/golang.org/x/net/internal/socket/iovec_64bit.go new file mode 100644 index 0000000000..afb34ad58e --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/iovec_64bit.go @@ -0,0 +1,19 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm64 amd64 ppc64 ppc64le mips64 mips64le s390x +// +build darwin dragonfly freebsd linux netbsd openbsd + +package socket + +import "unsafe" + +func (v *iovec) set(b []byte) { + l := len(b) + if l == 0 { + return + } + v.Base = (*byte)(unsafe.Pointer(&b[0])) + v.Len = uint64(l) +} diff --git a/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go b/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go new file mode 100644 index 0000000000..8d17a40c40 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go @@ -0,0 +1,19 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64 +// +build solaris + +package socket + +import "unsafe" + +func (v *iovec) set(b []byte) { + l := len(b) + if l == 0 { + return + } + v.Base = (*int8)(unsafe.Pointer(&b[0])) + v.Len = uint64(l) +} diff --git a/vendor/golang.org/x/net/internal/socket/iovec_stub.go b/vendor/golang.org/x/net/internal/socket/iovec_stub.go new file mode 100644 index 0000000000..c87d2a9339 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/iovec_stub.go @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package socket + +type iovec struct{} + +func (v *iovec) set(b []byte) {} diff --git a/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go b/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go new file mode 100644 index 0000000000..2e80a9cb74 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux,!netbsd + +package socket + +import "net" + +type mmsghdr struct{} + +type mmsghdrs []mmsghdr + +func (hs mmsghdrs) pack(ms []Message, parseFn func([]byte, string) (net.Addr, error), marshalFn func(net.Addr) []byte) error { + return nil +} + +func (hs mmsghdrs) unpack(ms []Message, parseFn func([]byte, string) (net.Addr, error), hint string) error { + return nil +} diff --git a/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go b/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go new file mode 100644 index 0000000000..3c42ea7ad8 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go @@ -0,0 +1,42 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux netbsd + +package socket + +import "net" + +type mmsghdrs []mmsghdr + +func (hs mmsghdrs) pack(ms []Message, parseFn func([]byte, string) (net.Addr, error), marshalFn func(net.Addr) []byte) error { + for i := range hs { + vs := make([]iovec, len(ms[i].Buffers)) + var sa []byte + if parseFn != nil { + sa = make([]byte, sizeofSockaddrInet6) + } + if marshalFn != nil { + sa = marshalFn(ms[i].Addr) + } + hs[i].Hdr.pack(vs, ms[i].Buffers, ms[i].OOB, sa) + } + return nil +} + +func (hs mmsghdrs) unpack(ms []Message, parseFn func([]byte, string) (net.Addr, error), hint string) error { + for i := range hs { + ms[i].N = int(hs[i].Len) + ms[i].NN = hs[i].Hdr.controllen() + ms[i].Flags = hs[i].Hdr.flags() + if parseFn != nil { + var err error + ms[i].Addr, err = parseFn(hs[i].Hdr.name(), hint) + if err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go b/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go new file mode 100644 index 0000000000..5567afc88d --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_bsd.go @@ -0,0 +1,39 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package socket + +import "unsafe" + +func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) { + for i := range vs { + vs[i].set(bs[i]) + } + h.setIov(vs) + if len(oob) > 0 { + h.Control = (*byte)(unsafe.Pointer(&oob[0])) + h.Controllen = uint32(len(oob)) + } + if sa != nil { + h.Name = (*byte)(unsafe.Pointer(&sa[0])) + h.Namelen = uint32(len(sa)) + } +} + +func (h *msghdr) name() []byte { + if h.Name != nil && h.Namelen > 0 { + return (*[sizeofSockaddrInet6]byte)(unsafe.Pointer(h.Name))[:h.Namelen] + } + return nil +} + +func (h *msghdr) controllen() int { + return int(h.Controllen) +} + +func (h *msghdr) flags() int { + return int(h.Flags) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go b/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go new file mode 100644 index 0000000000..b8c87b72b9 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd + +package socket + +func (h *msghdr) setIov(vs []iovec) { + l := len(vs) + if l == 0 { + return + } + h.Iov = &vs[0] + h.Iovlen = int32(l) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_linux.go b/vendor/golang.org/x/net/internal/socket/msghdr_linux.go new file mode 100644 index 0000000000..5a38798cc0 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_linux.go @@ -0,0 +1,36 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import "unsafe" + +func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) { + for i := range vs { + vs[i].set(bs[i]) + } + h.setIov(vs) + if len(oob) > 0 { + h.setControl(oob) + } + if sa != nil { + h.Name = (*byte)(unsafe.Pointer(&sa[0])) + h.Namelen = uint32(len(sa)) + } +} + +func (h *msghdr) name() []byte { + if h.Name != nil && h.Namelen > 0 { + return (*[sizeofSockaddrInet6]byte)(unsafe.Pointer(h.Name))[:h.Namelen] + } + return nil +} + +func (h *msghdr) controllen() int { + return int(h.Controllen) +} + +func (h *msghdr) flags() int { + return int(h.Flags) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go b/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go new file mode 100644 index 0000000000..a7a5987c88 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go @@ -0,0 +1,24 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm mips mipsle 386 +// +build linux + +package socket + +import "unsafe" + +func (h *msghdr) setIov(vs []iovec) { + l := len(vs) + if l == 0 { + return + } + h.Iov = &vs[0] + h.Iovlen = uint32(l) +} + +func (h *msghdr) setControl(b []byte) { + h.Control = (*byte)(unsafe.Pointer(&b[0])) + h.Controllen = uint32(len(b)) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go b/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go new file mode 100644 index 0000000000..610fc4f3bb --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go @@ -0,0 +1,24 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm64 amd64 ppc64 ppc64le mips64 mips64le s390x +// +build linux + +package socket + +import "unsafe" + +func (h *msghdr) setIov(vs []iovec) { + l := len(vs) + if l == 0 { + return + } + h.Iov = &vs[0] + h.Iovlen = uint64(l) +} + +func (h *msghdr) setControl(b []byte) { + h.Control = (*byte)(unsafe.Pointer(&b[0])) + h.Controllen = uint64(len(b)) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go b/vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go new file mode 100644 index 0000000000..71a69e2513 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +func (h *msghdr) setIov(vs []iovec) { + l := len(vs) + if l == 0 { + return + } + h.Iov = &vs[0] + h.Iovlen = uint32(l) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go b/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go new file mode 100644 index 0000000000..6465b20732 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go @@ -0,0 +1,36 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64 +// +build solaris + +package socket + +import "unsafe" + +func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) { + for i := range vs { + vs[i].set(bs[i]) + } + if len(vs) > 0 { + h.Iov = &vs[0] + h.Iovlen = int32(len(vs)) + } + if len(oob) > 0 { + h.Accrights = (*int8)(unsafe.Pointer(&oob[0])) + h.Accrightslen = int32(len(oob)) + } + if sa != nil { + h.Name = (*byte)(unsafe.Pointer(&sa[0])) + h.Namelen = uint32(len(sa)) + } +} + +func (h *msghdr) controllen() int { + return int(h.Accrightslen) +} + +func (h *msghdr) flags() int { + return int(NativeEndian.Uint32(h.Pad_cgo_2[:])) +} diff --git a/vendor/golang.org/x/net/internal/socket/msghdr_stub.go b/vendor/golang.org/x/net/internal/socket/msghdr_stub.go new file mode 100644 index 0000000000..64e8173352 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/msghdr_stub.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris + +package socket + +type msghdr struct{} + +func (h *msghdr) pack(vs []iovec, bs [][]byte, oob []byte, sa []byte) {} +func (h *msghdr) name() []byte { return nil } +func (h *msghdr) controllen() int { return 0 } +func (h *msghdr) flags() int { return 0 } diff --git a/vendor/golang.org/x/net/internal/socket/rawconn.go b/vendor/golang.org/x/net/internal/socket/rawconn.go new file mode 100644 index 0000000000..d6871d55f7 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn.go @@ -0,0 +1,66 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package socket + +import ( + "errors" + "net" + "os" + "syscall" +) + +// A Conn represents a raw connection. +type Conn struct { + network string + c syscall.RawConn +} + +// NewConn returns a new raw connection. +func NewConn(c net.Conn) (*Conn, error) { + var err error + var cc Conn + switch c := c.(type) { + case *net.TCPConn: + cc.network = "tcp" + cc.c, err = c.SyscallConn() + case *net.UDPConn: + cc.network = "udp" + cc.c, err = c.SyscallConn() + case *net.IPConn: + cc.network = "ip" + cc.c, err = c.SyscallConn() + default: + return nil, errors.New("unknown connection type") + } + if err != nil { + return nil, err + } + return &cc, nil +} + +func (o *Option) get(c *Conn, b []byte) (int, error) { + var operr error + var n int + fn := func(s uintptr) { + n, operr = getsockopt(s, o.Level, o.Name, b) + } + if err := c.c.Control(fn); err != nil { + return 0, err + } + return n, os.NewSyscallError("getsockopt", operr) +} + +func (o *Option) set(c *Conn, b []byte) error { + var operr error + fn := func(s uintptr) { + operr = setsockopt(s, o.Level, o.Name, b) + } + if err := c.c.Control(fn); err != nil { + return err + } + return os.NewSyscallError("setsockopt", operr) +} diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go b/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go new file mode 100644 index 0000000000..499164a3fb --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go @@ -0,0 +1,74 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build linux + +package socket + +import ( + "net" + "os" + "syscall" +) + +func (c *Conn) recvMsgs(ms []Message, flags int) (int, error) { + hs := make(mmsghdrs, len(ms)) + var parseFn func([]byte, string) (net.Addr, error) + if c.network != "tcp" { + parseFn = parseInetAddr + } + if err := hs.pack(ms, parseFn, nil); err != nil { + return 0, err + } + var operr error + var n int + fn := func(s uintptr) bool { + n, operr = recvmmsg(s, hs, flags) + if operr == syscall.EAGAIN { + return false + } + return true + } + if err := c.c.Read(fn); err != nil { + return n, err + } + if operr != nil { + return n, os.NewSyscallError("recvmmsg", operr) + } + if err := hs[:n].unpack(ms[:n], parseFn, c.network); err != nil { + return n, err + } + return n, nil +} + +func (c *Conn) sendMsgs(ms []Message, flags int) (int, error) { + hs := make(mmsghdrs, len(ms)) + var marshalFn func(net.Addr) []byte + if c.network != "tcp" { + marshalFn = marshalInetAddr + } + if err := hs.pack(ms, nil, marshalFn); err != nil { + return 0, err + } + var operr error + var n int + fn := func(s uintptr) bool { + n, operr = sendmmsg(s, hs, flags) + if operr == syscall.EAGAIN { + return false + } + return true + } + if err := c.c.Write(fn); err != nil { + return n, err + } + if operr != nil { + return n, os.NewSyscallError("sendmmsg", operr) + } + if err := hs[:n].unpack(ms[:n], nil, ""); err != nil { + return n, err + } + return n, nil +} diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_msg.go b/vendor/golang.org/x/net/internal/socket/rawconn_msg.go new file mode 100644 index 0000000000..b21d2e6418 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn_msg.go @@ -0,0 +1,77 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package socket + +import ( + "os" + "syscall" +) + +func (c *Conn) recvMsg(m *Message, flags int) error { + var h msghdr + vs := make([]iovec, len(m.Buffers)) + var sa []byte + if c.network != "tcp" { + sa = make([]byte, sizeofSockaddrInet6) + } + h.pack(vs, m.Buffers, m.OOB, sa) + var operr error + var n int + fn := func(s uintptr) bool { + n, operr = recvmsg(s, &h, flags) + if operr == syscall.EAGAIN { + return false + } + return true + } + if err := c.c.Read(fn); err != nil { + return err + } + if operr != nil { + return os.NewSyscallError("recvmsg", operr) + } + if c.network != "tcp" { + var err error + m.Addr, err = parseInetAddr(sa[:], c.network) + if err != nil { + return err + } + } + m.N = n + m.NN = h.controllen() + m.Flags = h.flags() + return nil +} + +func (c *Conn) sendMsg(m *Message, flags int) error { + var h msghdr + vs := make([]iovec, len(m.Buffers)) + var sa []byte + if m.Addr != nil { + sa = marshalInetAddr(m.Addr) + } + h.pack(vs, m.Buffers, m.OOB, sa) + var operr error + var n int + fn := func(s uintptr) bool { + n, operr = sendmsg(s, &h, flags) + if operr == syscall.EAGAIN { + return false + } + return true + } + if err := c.c.Write(fn); err != nil { + return err + } + if operr != nil { + return os.NewSyscallError("sendmsg", operr) + } + m.N = n + m.NN = len(m.OOB) + return nil +} diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go b/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go new file mode 100644 index 0000000000..f78832aa4a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go @@ -0,0 +1,18 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build !linux + +package socket + +import "errors" + +func (c *Conn) recvMsgs(ms []Message, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func (c *Conn) sendMsgs(ms []Message, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go b/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go new file mode 100644 index 0000000000..96733cbe1b --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go @@ -0,0 +1,18 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package socket + +import "errors" + +func (c *Conn) recvMsg(m *Message, flags int) error { + return errors.New("not implemented") +} + +func (c *Conn) sendMsg(m *Message, flags int) error { + return errors.New("not implemented") +} diff --git a/vendor/golang.org/x/net/internal/socket/rawconn_stub.go b/vendor/golang.org/x/net/internal/socket/rawconn_stub.go new file mode 100644 index 0000000000..d2add1a0aa --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/rawconn_stub.go @@ -0,0 +1,25 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package socket + +import "errors" + +func (c *Conn) recvMsg(m *Message, flags int) error { + return errors.New("not implemented") +} + +func (c *Conn) sendMsg(m *Message, flags int) error { + return errors.New("not implemented") +} + +func (c *Conn) recvMsgs(ms []Message, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func (c *Conn) sendMsgs(ms []Message, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/vendor/golang.org/x/net/internal/socket/reflect.go b/vendor/golang.org/x/net/internal/socket/reflect.go new file mode 100644 index 0000000000..bb179f11d8 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/reflect.go @@ -0,0 +1,62 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package socket + +import ( + "errors" + "net" + "os" + "reflect" + "runtime" +) + +// A Conn represents a raw connection. +type Conn struct { + c net.Conn +} + +// NewConn returns a new raw connection. +func NewConn(c net.Conn) (*Conn, error) { + return &Conn{c: c}, nil +} + +func (o *Option) get(c *Conn, b []byte) (int, error) { + s, err := socketOf(c.c) + if err != nil { + return 0, err + } + n, err := getsockopt(s, o.Level, o.Name, b) + return n, os.NewSyscallError("getsockopt", err) +} + +func (o *Option) set(c *Conn, b []byte) error { + s, err := socketOf(c.c) + if err != nil { + return err + } + return os.NewSyscallError("setsockopt", setsockopt(s, o.Level, o.Name, b)) +} + +func socketOf(c net.Conn) (uintptr, error) { + switch c.(type) { + case *net.TCPConn, *net.UDPConn, *net.IPConn: + v := reflect.ValueOf(c) + switch e := v.Elem(); e.Kind() { + case reflect.Struct: + fd := e.FieldByName("conn").FieldByName("fd") + switch e := fd.Elem(); e.Kind() { + case reflect.Struct: + sysfd := e.FieldByName("sysfd") + if runtime.GOOS == "windows" { + return uintptr(sysfd.Uint()), nil + } + return uintptr(sysfd.Int()), nil + } + } + } + return 0, errors.New("invalid type") +} diff --git a/vendor/golang.org/x/net/internal/socket/socket.go b/vendor/golang.org/x/net/internal/socket/socket.go new file mode 100644 index 0000000000..5f9730e6d9 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/socket.go @@ -0,0 +1,285 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package socket provides a portable interface for socket system +// calls. +package socket // import "golang.org/x/net/internal/socket" + +import ( + "errors" + "net" + "unsafe" +) + +// An Option represents a sticky socket option. +type Option struct { + Level int // level + Name int // name; must be equal or greater than 1 + Len int // length of value in bytes; must be equal or greater than 1 +} + +// Get reads a value for the option from the kernel. +// It returns the number of bytes written into b. +func (o *Option) Get(c *Conn, b []byte) (int, error) { + if o.Name < 1 || o.Len < 1 { + return 0, errors.New("invalid option") + } + if len(b) < o.Len { + return 0, errors.New("short buffer") + } + return o.get(c, b) +} + +// GetInt returns an integer value for the option. +// +// The Len field of Option must be either 1 or 4. +func (o *Option) GetInt(c *Conn) (int, error) { + if o.Len != 1 && o.Len != 4 { + return 0, errors.New("invalid option") + } + var b []byte + var bb [4]byte + if o.Len == 1 { + b = bb[:1] + } else { + b = bb[:4] + } + n, err := o.get(c, b) + if err != nil { + return 0, err + } + if n != o.Len { + return 0, errors.New("invalid option length") + } + if o.Len == 1 { + return int(b[0]), nil + } + return int(NativeEndian.Uint32(b[:4])), nil +} + +// Set writes the option and value to the kernel. +func (o *Option) Set(c *Conn, b []byte) error { + if o.Name < 1 || o.Len < 1 { + return errors.New("invalid option") + } + if len(b) < o.Len { + return errors.New("short buffer") + } + return o.set(c, b) +} + +// SetInt writes the option and value to the kernel. +// +// The Len field of Option must be either 1 or 4. +func (o *Option) SetInt(c *Conn, v int) error { + if o.Len != 1 && o.Len != 4 { + return errors.New("invalid option") + } + var b []byte + if o.Len == 1 { + b = []byte{byte(v)} + } else { + var bb [4]byte + NativeEndian.PutUint32(bb[:o.Len], uint32(v)) + b = bb[:4] + } + return o.set(c, b) +} + +func controlHeaderLen() int { + return roundup(sizeofCmsghdr) +} + +func controlMessageLen(dataLen int) int { + return roundup(sizeofCmsghdr) + dataLen +} + +// ControlMessageSpace returns the whole length of control message. +func ControlMessageSpace(dataLen int) int { + return roundup(sizeofCmsghdr) + roundup(dataLen) +} + +// A ControlMessage represents the head message in a stream of control +// messages. +// +// A control message comprises of a header, data and a few padding +// fields to conform to the interface to the kernel. +// +// See RFC 3542 for further information. +type ControlMessage []byte + +// Data returns the data field of the control message at the head on +// m. +func (m ControlMessage) Data(dataLen int) []byte { + l := controlHeaderLen() + if len(m) < l || len(m) < l+dataLen { + return nil + } + return m[l : l+dataLen] +} + +// Next returns the control message at the next on m. +// +// Next works only for standard control messages. +func (m ControlMessage) Next(dataLen int) ControlMessage { + l := ControlMessageSpace(dataLen) + if len(m) < l { + return nil + } + return m[l:] +} + +// MarshalHeader marshals the header fields of the control message at +// the head on m. +func (m ControlMessage) MarshalHeader(lvl, typ, dataLen int) error { + if len(m) < controlHeaderLen() { + return errors.New("short message") + } + h := (*cmsghdr)(unsafe.Pointer(&m[0])) + h.set(controlMessageLen(dataLen), lvl, typ) + return nil +} + +// ParseHeader parses and returns the header fields of the control +// message at the head on m. +func (m ControlMessage) ParseHeader() (lvl, typ, dataLen int, err error) { + l := controlHeaderLen() + if len(m) < l { + return 0, 0, 0, errors.New("short message") + } + h := (*cmsghdr)(unsafe.Pointer(&m[0])) + return h.lvl(), h.typ(), int(uint64(h.len()) - uint64(l)), nil +} + +// Marshal marshals the control message at the head on m, and returns +// the next control message. +func (m ControlMessage) Marshal(lvl, typ int, data []byte) (ControlMessage, error) { + l := len(data) + if len(m) < ControlMessageSpace(l) { + return nil, errors.New("short message") + } + h := (*cmsghdr)(unsafe.Pointer(&m[0])) + h.set(controlMessageLen(l), lvl, typ) + if l > 0 { + copy(m.Data(l), data) + } + return m.Next(l), nil +} + +// Parse parses m as a single or multiple control messages. +// +// Parse works for both standard and compatible messages. +func (m ControlMessage) Parse() ([]ControlMessage, error) { + var ms []ControlMessage + for len(m) >= controlHeaderLen() { + h := (*cmsghdr)(unsafe.Pointer(&m[0])) + l := h.len() + if l <= 0 { + return nil, errors.New("invalid header length") + } + if uint64(l) < uint64(controlHeaderLen()) { + return nil, errors.New("invalid message length") + } + if uint64(l) > uint64(len(m)) { + return nil, errors.New("short buffer") + } + // On message reception: + // + // |<- ControlMessageSpace --------------->| + // |<- controlMessageLen ---------->| | + // |<- controlHeaderLen ->| | | + // +---------------+------+---------+------+ + // | Header | PadH | Data | PadD | + // +---------------+------+---------+------+ + // + // On compatible message reception: + // + // | ... |<- controlMessageLen ----------->| + // | ... |<- controlHeaderLen ->| | + // +-----+---------------+------+----------+ + // | ... | Header | PadH | Data | + // +-----+---------------+------+----------+ + ms = append(ms, ControlMessage(m[:l])) + ll := l - controlHeaderLen() + if len(m) >= ControlMessageSpace(ll) { + m = m[ControlMessageSpace(ll):] + } else { + m = m[controlMessageLen(ll):] + } + } + return ms, nil +} + +// NewControlMessage returns a new stream of control messages. +func NewControlMessage(dataLen []int) ControlMessage { + var l int + for i := range dataLen { + l += ControlMessageSpace(dataLen[i]) + } + return make([]byte, l) +} + +// A Message represents an IO message. +type Message struct { + // When writing, the Buffers field must contain at least one + // byte to write. + // When reading, the Buffers field will always contain a byte + // to read. + Buffers [][]byte + + // OOB contains protocol-specific control or miscellaneous + // ancillary data known as out-of-band data. + OOB []byte + + // Addr specifies a destination address when writing. + // It can be nil when the underlying protocol of the raw + // connection uses connection-oriented communication. + // After a successful read, it may contain the source address + // on the received packet. + Addr net.Addr + + N int // # of bytes read or written from/to Buffers + NN int // # of bytes read or written from/to OOB + Flags int // protocol-specific information on the received message +} + +// RecvMsg wraps recvmsg system call. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +func (c *Conn) RecvMsg(m *Message, flags int) error { + return c.recvMsg(m, flags) +} + +// SendMsg wraps sendmsg system call. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +func (c *Conn) SendMsg(m *Message, flags int) error { + return c.sendMsg(m, flags) +} + +// RecvMsgs wraps recvmmsg system call. +// +// It returns the number of processed messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// Only Linux supports this. +func (c *Conn) RecvMsgs(ms []Message, flags int) (int, error) { + return c.recvMsgs(ms, flags) +} + +// SendMsgs wraps sendmmsg system call. +// +// It returns the number of processed messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// Only Linux supports this. +func (c *Conn) SendMsgs(ms []Message, flags int) (int, error) { + return c.sendMsgs(ms, flags) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys.go b/vendor/golang.org/x/net/internal/socket/sys.go new file mode 100644 index 0000000000..4f0eead138 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys.go @@ -0,0 +1,33 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "encoding/binary" + "unsafe" +) + +var ( + // NativeEndian is the machine native endian implementation of + // ByteOrder. + NativeEndian binary.ByteOrder + + kernelAlign int +) + +func init() { + i := uint32(1) + b := (*[4]byte)(unsafe.Pointer(&i)) + if b[0] == 1 { + NativeEndian = binary.LittleEndian + } else { + NativeEndian = binary.BigEndian + } + kernelAlign = probeProtocolStack() +} + +func roundup(l int) int { + return (l + kernelAlign - 1) & ^(kernelAlign - 1) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_bsd.go b/vendor/golang.org/x/net/internal/socket/sys_bsd.go new file mode 100644 index 0000000000..f13e14ff36 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_bsd.go @@ -0,0 +1,17 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd openbsd + +package socket + +import "errors" + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go b/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go new file mode 100644 index 0000000000..f723fa36af --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_bsdvar.go @@ -0,0 +1,14 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd netbsd openbsd + +package socket + +import "unsafe" + +func probeProtocolStack() int { + var p uintptr + return int(unsafe.Sizeof(p)) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_darwin.go b/vendor/golang.org/x/net/internal/socket/sys_darwin.go new file mode 100644 index 0000000000..b17d223bff --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_darwin.go @@ -0,0 +1,7 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +func probeProtocolStack() int { return 4 } diff --git a/vendor/golang.org/x/net/internal/socket/sys_dragonfly.go b/vendor/golang.org/x/net/internal/socket/sys_dragonfly.go new file mode 100644 index 0000000000..b17d223bff --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_dragonfly.go @@ -0,0 +1,7 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +func probeProtocolStack() int { return 4 } diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux.go b/vendor/golang.org/x/net/internal/socket/sys_linux.go new file mode 100644 index 0000000000..1559521e03 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux.go @@ -0,0 +1,27 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux,!s390x,!386 + +package socket + +import ( + "syscall" + "unsafe" +) + +func probeProtocolStack() int { + var p uintptr + return int(unsafe.Sizeof(p)) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_386.go b/vendor/golang.org/x/net/internal/socket/sys_linux_386.go new file mode 100644 index 0000000000..235b2cc08a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_386.go @@ -0,0 +1,55 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "syscall" + "unsafe" +) + +func probeProtocolStack() int { return 4 } + +const ( + sysSETSOCKOPT = 0xe + sysGETSOCKOPT = 0xf + sysSENDMSG = 0x10 + sysRECVMSG = 0x11 + sysRECVMMSG = 0x13 + sysSENDMMSG = 0x14 +) + +func socketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) +func rawsocketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, errno := socketcall(sysGETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, errno := socketcall(sysSETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_386.s b/vendor/golang.org/x/net/internal/socket/sys_linux_386.s new file mode 100644 index 0000000000..93e7d75ec0 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_386.s @@ -0,0 +1,11 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·socketcall(SB),NOSPLIT,$0-36 + JMP syscall·socketcall(SB) + +TEXT ·rawsocketcall(SB),NOSPLIT,$0-36 + JMP syscall·rawsocketcall(SB) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go new file mode 100644 index 0000000000..9decee2e59 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x12b + sysSENDMMSG = 0x133 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_arm.go b/vendor/golang.org/x/net/internal/socket/sys_linux_arm.go new file mode 100644 index 0000000000..d753b436df --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_arm.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x16d + sysSENDMMSG = 0x176 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go new file mode 100644 index 0000000000..b670894366 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0xf3 + sysSENDMMSG = 0x10d +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_mips.go b/vendor/golang.org/x/net/internal/socket/sys_linux_mips.go new file mode 100644 index 0000000000..9c0d74014f --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_mips.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x10ef + sysSENDMMSG = 0x10f7 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go new file mode 100644 index 0000000000..071a4aba8b --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x14ae + sysSENDMMSG = 0x14b6 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go b/vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go new file mode 100644 index 0000000000..071a4aba8b --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x14ae + sysSENDMMSG = 0x14b6 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go b/vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go new file mode 100644 index 0000000000..9c0d74014f --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x10ef + sysSENDMMSG = 0x10f7 +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go b/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go new file mode 100644 index 0000000000..21c1e3f004 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x157 + sysSENDMMSG = 0x15d +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go b/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go new file mode 100644 index 0000000000..21c1e3f004 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go @@ -0,0 +1,10 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +const ( + sysRECVMMSG = 0x157 + sysSENDMMSG = 0x15d +) diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go b/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go new file mode 100644 index 0000000000..327979efbb --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go @@ -0,0 +1,55 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "syscall" + "unsafe" +) + +func probeProtocolStack() int { return 8 } + +const ( + sysSETSOCKOPT = 0xe + sysGETSOCKOPT = 0xf + sysSENDMSG = 0x10 + sysRECVMSG = 0x11 + sysRECVMMSG = 0x13 + sysSENDMMSG = 0x14 +) + +func socketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) +func rawsocketcall(call, a0, a1, a2, a3, a4, a5 uintptr) (uintptr, syscall.Errno) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, errno := socketcall(sysGETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, errno := socketcall(sysSETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, errno := socketcall(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s b/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s new file mode 100644 index 0000000000..06d75628c9 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s @@ -0,0 +1,11 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·socketcall(SB),NOSPLIT,$0-72 + JMP syscall·socketcall(SB) + +TEXT ·rawsocketcall(SB),NOSPLIT,$0-72 + JMP syscall·rawsocketcall(SB) diff --git a/vendor/golang.org/x/net/internal/socket/sys_netbsd.go b/vendor/golang.org/x/net/internal/socket/sys_netbsd.go new file mode 100644 index 0000000000..431851c12e --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_netbsd.go @@ -0,0 +1,25 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "syscall" + "unsafe" +) + +const ( + sysRECVMMSG = 0x1db + sysSENDMMSG = 0x1dc +) + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysRECVMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall6(sysSENDMMSG, s, uintptr(unsafe.Pointer(&hs[0])), uintptr(len(hs)), uintptr(flags), 0, 0) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_posix.go b/vendor/golang.org/x/net/internal/socket/sys_posix.go new file mode 100644 index 0000000000..dc130c27eb --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_posix.go @@ -0,0 +1,168 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package socket + +import ( + "encoding/binary" + "errors" + "net" + "runtime" + "strconv" + "sync" + "time" +) + +func marshalInetAddr(a net.Addr) []byte { + switch a := a.(type) { + case *net.TCPAddr: + return marshalSockaddr(a.IP, a.Port, a.Zone) + case *net.UDPAddr: + return marshalSockaddr(a.IP, a.Port, a.Zone) + case *net.IPAddr: + return marshalSockaddr(a.IP, 0, a.Zone) + default: + return nil + } +} + +func marshalSockaddr(ip net.IP, port int, zone string) []byte { + if ip4 := ip.To4(); ip4 != nil { + b := make([]byte, sizeofSockaddrInet) + switch runtime.GOOS { + case "android", "linux", "solaris", "windows": + NativeEndian.PutUint16(b[:2], uint16(sysAF_INET)) + default: + b[0] = sizeofSockaddrInet + b[1] = sysAF_INET + } + binary.BigEndian.PutUint16(b[2:4], uint16(port)) + copy(b[4:8], ip4) + return b + } + if ip6 := ip.To16(); ip6 != nil && ip.To4() == nil { + b := make([]byte, sizeofSockaddrInet6) + switch runtime.GOOS { + case "android", "linux", "solaris", "windows": + NativeEndian.PutUint16(b[:2], uint16(sysAF_INET6)) + default: + b[0] = sizeofSockaddrInet6 + b[1] = sysAF_INET6 + } + binary.BigEndian.PutUint16(b[2:4], uint16(port)) + copy(b[8:24], ip6) + if zone != "" { + NativeEndian.PutUint32(b[24:28], uint32(zoneCache.index(zone))) + } + return b + } + return nil +} + +func parseInetAddr(b []byte, network string) (net.Addr, error) { + if len(b) < 2 { + return nil, errors.New("invalid address") + } + var af int + switch runtime.GOOS { + case "android", "linux", "solaris", "windows": + af = int(NativeEndian.Uint16(b[:2])) + default: + af = int(b[1]) + } + var ip net.IP + var zone string + if af == sysAF_INET { + if len(b) < sizeofSockaddrInet { + return nil, errors.New("short address") + } + ip = make(net.IP, net.IPv4len) + copy(ip, b[4:8]) + } + if af == sysAF_INET6 { + if len(b) < sizeofSockaddrInet6 { + return nil, errors.New("short address") + } + ip = make(net.IP, net.IPv6len) + copy(ip, b[8:24]) + if id := int(NativeEndian.Uint32(b[24:28])); id > 0 { + zone = zoneCache.name(id) + } + } + switch network { + case "tcp", "tcp4", "tcp6": + return &net.TCPAddr{IP: ip, Port: int(binary.BigEndian.Uint16(b[2:4])), Zone: zone}, nil + case "udp", "udp4", "udp6": + return &net.UDPAddr{IP: ip, Port: int(binary.BigEndian.Uint16(b[2:4])), Zone: zone}, nil + default: + return &net.IPAddr{IP: ip, Zone: zone}, nil + } +} + +// An ipv6ZoneCache represents a cache holding partial network +// interface information. It is used for reducing the cost of IPv6 +// addressing scope zone resolution. +// +// Multiple names sharing the index are managed by first-come +// first-served basis for consistency. +type ipv6ZoneCache struct { + sync.RWMutex // guard the following + lastFetched time.Time // last time routing information was fetched + toIndex map[string]int // interface name to its index + toName map[int]string // interface index to its name +} + +var zoneCache = ipv6ZoneCache{ + toIndex: make(map[string]int), + toName: make(map[int]string), +} + +func (zc *ipv6ZoneCache) update(ift []net.Interface) { + zc.Lock() + defer zc.Unlock() + now := time.Now() + if zc.lastFetched.After(now.Add(-60 * time.Second)) { + return + } + zc.lastFetched = now + if len(ift) == 0 { + var err error + if ift, err = net.Interfaces(); err != nil { + return + } + } + zc.toIndex = make(map[string]int, len(ift)) + zc.toName = make(map[int]string, len(ift)) + for _, ifi := range ift { + zc.toIndex[ifi.Name] = ifi.Index + if _, ok := zc.toName[ifi.Index]; !ok { + zc.toName[ifi.Index] = ifi.Name + } + } +} + +func (zc *ipv6ZoneCache) name(zone int) string { + zoneCache.update(nil) + zoneCache.RLock() + defer zoneCache.RUnlock() + name, ok := zoneCache.toName[zone] + if !ok { + name = strconv.Itoa(zone) + } + return name +} + +func (zc *ipv6ZoneCache) index(zone string) int { + zoneCache.update(nil) + zoneCache.RLock() + defer zoneCache.RUnlock() + index, ok := zoneCache.toIndex[zone] + if !ok { + index, _ = strconv.Atoi(zone) + } + return index +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_solaris.go b/vendor/golang.org/x/net/internal/socket/sys_solaris.go new file mode 100644 index 0000000000..cced74e60d --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_solaris.go @@ -0,0 +1,71 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "errors" + "runtime" + "syscall" + "unsafe" +) + +func probeProtocolStack() int { + switch runtime.GOARCH { + case "amd64": + return 4 + default: + var p uintptr + return int(unsafe.Sizeof(p)) + } +} + +//go:cgo_import_dynamic libc___xnet_getsockopt __xnet_getsockopt "libsocket.so" +//go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so" +//go:cgo_import_dynamic libc___xnet_recvmsg __xnet_recvmsg "libsocket.so" +//go:cgo_import_dynamic libc___xnet_sendmsg __xnet_sendmsg "libsocket.so" + +//go:linkname procGetsockopt libc___xnet_getsockopt +//go:linkname procSetsockopt libc_setsockopt +//go:linkname procRecvmsg libc___xnet_recvmsg +//go:linkname procSendmsg libc___xnet_sendmsg + +var ( + procGetsockopt uintptr + procSetsockopt uintptr + procRecvmsg uintptr + procSendmsg uintptr +) + +func sysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, uintptr, syscall.Errno) +func rawSysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, uintptr, syscall.Errno) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procGetsockopt)), 5, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procSetsockopt)), 5, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procRecvmsg)), 3, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procSendmsg)), 3, s, uintptr(unsafe.Pointer(h)), uintptr(flags), 0, 0, 0) + return int(n), errnoErr(errno) +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s b/vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s new file mode 100644 index 0000000000..a18ac5ed75 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s @@ -0,0 +1,11 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·sysvicall6(SB),NOSPLIT,$0-88 + JMP syscall·sysvicall6(SB) + +TEXT ·rawSysvicall6(SB),NOSPLIT,$0-88 + JMP syscall·rawSysvicall6(SB) diff --git a/vendor/golang.org/x/net/internal/socket/sys_stub.go b/vendor/golang.org/x/net/internal/socket/sys_stub.go new file mode 100644 index 0000000000..d9f06d00e9 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_stub.go @@ -0,0 +1,64 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package socket + +import ( + "errors" + "net" + "runtime" + "unsafe" +) + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +func probeProtocolStack() int { + switch runtime.GOARCH { + case "amd64p32", "mips64p32": + return 4 + default: + var p uintptr + return int(unsafe.Sizeof(p)) + } +} + +func marshalInetAddr(ip net.IP, port int, zone string) []byte { + return nil +} + +func parseInetAddr(b []byte, network string) (net.Addr, error) { + return nil, errors.New("not implemented") +} + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + return 0, errors.New("not implemented") +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + return errors.New("not implemented") +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_unix.go b/vendor/golang.org/x/net/internal/socket/sys_unix.go new file mode 100644 index 0000000000..18eba30853 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_unix.go @@ -0,0 +1,33 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux,!s390x,!386 netbsd openbsd + +package socket + +import ( + "syscall" + "unsafe" +) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + _, _, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&l)), 0) + return int(l), errnoErr(errno) +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + _, _, errno := syscall.Syscall6(syscall.SYS_SETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0) + return errnoErr(errno) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall(syscall.SYS_RECVMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags)) + return int(n), errnoErr(errno) +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + n, _, errno := syscall.Syscall(syscall.SYS_SENDMSG, s, uintptr(unsafe.Pointer(h)), uintptr(flags)) + return int(n), errnoErr(errno) +} diff --git a/vendor/golang.org/x/net/internal/socket/sys_windows.go b/vendor/golang.org/x/net/internal/socket/sys_windows.go new file mode 100644 index 0000000000..54a470ebe3 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/sys_windows.go @@ -0,0 +1,70 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socket + +import ( + "errors" + "syscall" + "unsafe" +) + +func probeProtocolStack() int { + var p uintptr + return int(unsafe.Sizeof(p)) +} + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x17 + + sysSOCK_RAW = 0x3 +) + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) + +func getsockopt(s uintptr, level, name int, b []byte) (int, error) { + l := uint32(len(b)) + err := syscall.Getsockopt(syscall.Handle(s), int32(level), int32(name), (*byte)(unsafe.Pointer(&b[0])), (*int32)(unsafe.Pointer(&l))) + return int(l), err +} + +func setsockopt(s uintptr, level, name int, b []byte) error { + return syscall.Setsockopt(syscall.Handle(s), int32(level), int32(name), (*byte)(unsafe.Pointer(&b[0])), int32(len(b))) +} + +func recvmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmsg(s uintptr, h *msghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func recvmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} + +func sendmmsg(s uintptr, hs []mmsghdr, flags int) (int, error) { + return 0, errors.New("not implemented") +} diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go new file mode 100644 index 0000000000..26f8feff3a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1e + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go new file mode 100644 index 0000000000..e2987f7db8 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go @@ -0,0 +1,61 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1e + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go new file mode 100644 index 0000000000..26f8feff3a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1e + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go new file mode 100644 index 0000000000..e2987f7db8 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go @@ -0,0 +1,61 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1e + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go new file mode 100644 index 0000000000..c582abd57d --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go @@ -0,0 +1,61 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_dragonfly.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go new file mode 100644 index 0000000000..04a24886c7 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go new file mode 100644 index 0000000000..35c7cb9c95 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go @@ -0,0 +1,61 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go new file mode 100644 index 0000000000..04a24886c7 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1c + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go new file mode 100644 index 0000000000..430206930b --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_386.go @@ -0,0 +1,63 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go new file mode 100644 index 0000000000..1502f6c552 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go new file mode 100644 index 0000000000..430206930b --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go @@ -0,0 +1,63 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go new file mode 100644 index 0000000000..1502f6c552 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go new file mode 100644 index 0000000000..430206930b --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go @@ -0,0 +1,63 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go new file mode 100644 index 0000000000..1502f6c552 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go new file mode 100644 index 0000000000..1502f6c552 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go new file mode 100644 index 0000000000..430206930b --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go @@ -0,0 +1,63 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go new file mode 100644 index 0000000000..1502f6c552 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go new file mode 100644 index 0000000000..1502f6c552 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go b/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go new file mode 100644 index 0000000000..1502f6c552 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go @@ -0,0 +1,66 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0xa + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint64 + Control *byte + Controllen uint64 + Flags int32 + Pad_cgo_1 [4]byte +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint64 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x38 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0x10 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go new file mode 100644 index 0000000000..db60491fe3 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go @@ -0,0 +1,65 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go new file mode 100644 index 0000000000..2a1a79985a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go @@ -0,0 +1,68 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 + Pad_cgo_0 [4]byte +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofMmsghdr = 0x40 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go new file mode 100644 index 0000000000..db60491fe3 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go @@ -0,0 +1,65 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen int32 + Control *byte + Controllen uint32 + Flags int32 +} + +type mmsghdr struct { + Hdr msghdr + Len uint32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofMmsghdr = 0x20 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go new file mode 100644 index 0000000000..1c836361e8 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go new file mode 100644 index 0000000000..a6c0bf464a --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go @@ -0,0 +1,61 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen uint32 + Pad_cgo_1 [4]byte + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go new file mode 100644 index 0000000000..1c836361e8 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go @@ -0,0 +1,59 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x18 + + sysSOCK_RAW = 0x3 +) + +type iovec struct { + Base *byte + Len uint32 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x8 + sizeofMsghdr = 0x1c + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go b/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go new file mode 100644 index 0000000000..327c63290c --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go @@ -0,0 +1,60 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_solaris.go + +package socket + +const ( + sysAF_UNSPEC = 0x0 + sysAF_INET = 0x2 + sysAF_INET6 = 0x1a + + sysSOCK_RAW = 0x4 +) + +type iovec struct { + Base *int8 + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Pad_cgo_0 [4]byte + Iov *iovec + Iovlen int32 + Pad_cgo_1 [4]byte + Accrights *int8 + Accrightslen int32 + Pad_cgo_2 [4]byte +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 + X__sin6_src_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + sizeofCmsghdr = 0xc + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x20 +) diff --git a/vendor/golang.org/x/net/ipv4/batch.go b/vendor/golang.org/x/net/ipv4/batch.go new file mode 100644 index 0000000000..b445499288 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/batch.go @@ -0,0 +1,191 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package ipv4 + +import ( + "net" + "runtime" + "syscall" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ReadBatch and WriteBatch methods of +// PacketConn are not implemented. + +// BUG(mikio): On Windows, the ReadBatch and WriteBatch methods of +// RawConn are not implemented. + +// A Message represents an IO message. +// +// type Message struct { +// Buffers [][]byte +// OOB []byte +// Addr net.Addr +// N int +// NN int +// Flags int +// } +// +// The Buffers fields represents a list of contiguous buffers, which +// can be used for vectored IO, for example, putting a header and a +// payload in each slice. +// When writing, the Buffers field must contain at least one byte to +// write. +// When reading, the Buffers field will always contain a byte to read. +// +// The OOB field contains protocol-specific control or miscellaneous +// ancillary data known as out-of-band data. +// It can be nil when not required. +// +// The Addr field specifies a destination address when writing. +// It can be nil when the underlying protocol of the endpoint uses +// connection-oriented communication. +// After a successful read, it may contain the source address on the +// received packet. +// +// The N field indicates the number of bytes read or written from/to +// Buffers. +// +// The NN field indicates the number of bytes read or written from/to +// OOB. +// +// The Flags field contains protocol-specific information on the +// received message. +type Message = socket.Message + +// ReadBatch reads a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// On a successful read it returns the number of messages received, up +// to len(ms). +// +// On Linux, a batch read will be optimized. +// On other platforms, this method will read only a single message. +// +// Unlike the ReadFrom method, it doesn't strip the IPv4 header +// followed by option headers from the received IPv4 datagram when the +// underlying transport is net.IPConn. Each Buffers field of Message +// must be large enough to accommodate an IPv4 header and option +// headers. +func (c *payloadHandler) ReadBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.RecvMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.RecvMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + } +} + +// WriteBatch writes a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// It returns the number of messages written on a successful write. +// +// On Linux, a batch write will be optimized. +// On other platforms, this method will write only a single message. +func (c *payloadHandler) WriteBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.SendMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.SendMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + } +} + +// ReadBatch reads a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// On a successful read it returns the number of messages received, up +// to len(ms). +// +// On Linux, a batch read will be optimized. +// On other platforms, this method will read only a single message. +func (c *packetHandler) ReadBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.RecvMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.RecvMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + return n, err + } +} + +// WriteBatch writes a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// It returns the number of messages written on a successful write. +// +// On Linux, a batch write will be optimized. +// On other platforms, this method will write only a single message. +func (c *packetHandler) WriteBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.SendMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "write", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.SendMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "write", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + return n, err + } +} diff --git a/vendor/golang.org/x/net/ipv4/control.go b/vendor/golang.org/x/net/ipv4/control.go new file mode 100644 index 0000000000..a2b02ca95b --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control.go @@ -0,0 +1,144 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "fmt" + "net" + "sync" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +type rawOpt struct { + sync.RWMutex + cflags ControlFlags +} + +func (c *rawOpt) set(f ControlFlags) { c.cflags |= f } +func (c *rawOpt) clear(f ControlFlags) { c.cflags &^= f } +func (c *rawOpt) isset(f ControlFlags) bool { return c.cflags&f != 0 } + +type ControlFlags uint + +const ( + FlagTTL ControlFlags = 1 << iota // pass the TTL on the received packet + FlagSrc // pass the source address on the received packet + FlagDst // pass the destination address on the received packet + FlagInterface // pass the interface index on the received packet +) + +// A ControlMessage represents per packet basis IP-level socket options. +type ControlMessage struct { + // Receiving socket options: SetControlMessage allows to + // receive the options from the protocol stack using ReadFrom + // method of PacketConn or RawConn. + // + // Specifying socket options: ControlMessage for WriteTo + // method of PacketConn or RawConn allows to send the options + // to the protocol stack. + // + TTL int // time-to-live, receiving only + Src net.IP // source address, specifying only + Dst net.IP // destination address, receiving only + IfIndex int // interface index, must be 1 <= value when specifying +} + +func (cm *ControlMessage) String() string { + if cm == nil { + return "" + } + return fmt.Sprintf("ttl=%d src=%v dst=%v ifindex=%d", cm.TTL, cm.Src, cm.Dst, cm.IfIndex) +} + +// Marshal returns the binary encoding of cm. +func (cm *ControlMessage) Marshal() []byte { + if cm == nil { + return nil + } + var m socket.ControlMessage + if ctlOpts[ctlPacketInfo].name > 0 && (cm.Src.To4() != nil || cm.IfIndex > 0) { + m = socket.NewControlMessage([]int{ctlOpts[ctlPacketInfo].length}) + } + if len(m) > 0 { + ctlOpts[ctlPacketInfo].marshal(m, cm) + } + return m +} + +// Parse parses b as a control message and stores the result in cm. +func (cm *ControlMessage) Parse(b []byte) error { + ms, err := socket.ControlMessage(b).Parse() + if err != nil { + return err + } + for _, m := range ms { + lvl, typ, l, err := m.ParseHeader() + if err != nil { + return err + } + if lvl != iana.ProtocolIP { + continue + } + switch { + case typ == ctlOpts[ctlTTL].name && l >= ctlOpts[ctlTTL].length: + ctlOpts[ctlTTL].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlDst].name && l >= ctlOpts[ctlDst].length: + ctlOpts[ctlDst].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlInterface].name && l >= ctlOpts[ctlInterface].length: + ctlOpts[ctlInterface].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlPacketInfo].name && l >= ctlOpts[ctlPacketInfo].length: + ctlOpts[ctlPacketInfo].parse(cm, m.Data(l)) + } + } + return nil +} + +// NewControlMessage returns a new control message. +// +// The returned message is large enough for options specified by cf. +func NewControlMessage(cf ControlFlags) []byte { + opt := rawOpt{cflags: cf} + var l int + if opt.isset(FlagTTL) && ctlOpts[ctlTTL].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlTTL].length) + } + if ctlOpts[ctlPacketInfo].name > 0 { + if opt.isset(FlagSrc | FlagDst | FlagInterface) { + l += socket.ControlMessageSpace(ctlOpts[ctlPacketInfo].length) + } + } else { + if opt.isset(FlagDst) && ctlOpts[ctlDst].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlDst].length) + } + if opt.isset(FlagInterface) && ctlOpts[ctlInterface].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlInterface].length) + } + } + var b []byte + if l > 0 { + b = make([]byte, l) + } + return b +} + +// Ancillary data socket options +const ( + ctlTTL = iota // header field + ctlSrc // header field + ctlDst // header field + ctlInterface // inbound or outbound interface + ctlPacketInfo // inbound or outbound packet path + ctlMax +) + +// A ctlOpt represents a binding for ancillary data socket option. +type ctlOpt struct { + name int // option name, must be equal or greater than 1 + length int // option length + marshal func([]byte, *ControlMessage) []byte + parse func(*ControlMessage, []byte) +} diff --git a/vendor/golang.org/x/net/ipv4/control_bsd.go b/vendor/golang.org/x/net/ipv4/control_bsd.go new file mode 100644 index 0000000000..77e7ad5bed --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_bsd.go @@ -0,0 +1,40 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package ipv4 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func marshalDst(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_RECVDSTADDR, net.IPv4len) + return m.Next(net.IPv4len) +} + +func parseDst(cm *ControlMessage, b []byte) { + if len(cm.Dst) < net.IPv4len { + cm.Dst = make(net.IP, net.IPv4len) + } + copy(cm.Dst, b[:net.IPv4len]) +} + +func marshalInterface(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_RECVIF, syscall.SizeofSockaddrDatalink) + return m.Next(syscall.SizeofSockaddrDatalink) +} + +func parseInterface(cm *ControlMessage, b []byte) { + sadl := (*syscall.SockaddrDatalink)(unsafe.Pointer(&b[0])) + cm.IfIndex = int(sadl.Index) +} diff --git a/vendor/golang.org/x/net/ipv4/control_pktinfo.go b/vendor/golang.org/x/net/ipv4/control_pktinfo.go new file mode 100644 index 0000000000..425338f35b --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_pktinfo.go @@ -0,0 +1,39 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin linux solaris + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func marshalPacketInfo(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_PKTINFO, sizeofInetPktinfo) + if cm != nil { + pi := (*inetPktinfo)(unsafe.Pointer(&m.Data(sizeofInetPktinfo)[0])) + if ip := cm.Src.To4(); ip != nil { + copy(pi.Spec_dst[:], ip) + } + if cm.IfIndex > 0 { + pi.setIfindex(cm.IfIndex) + } + } + return m.Next(sizeofInetPktinfo) +} + +func parsePacketInfo(cm *ControlMessage, b []byte) { + pi := (*inetPktinfo)(unsafe.Pointer(&b[0])) + cm.IfIndex = int(pi.Ifindex) + if len(cm.Dst) < net.IPv4len { + cm.Dst = make(net.IP, net.IPv4len) + } + copy(cm.Dst, pi.Addr[:]) +} diff --git a/vendor/golang.org/x/net/ipv4/control_stub.go b/vendor/golang.org/x/net/ipv4/control_stub.go new file mode 100644 index 0000000000..5a2f7d8d3c --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_stub.go @@ -0,0 +1,13 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv4 + +import "golang.org/x/net/internal/socket" + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/control_unix.go b/vendor/golang.org/x/net/ipv4/control_unix.go new file mode 100644 index 0000000000..e1ae8167b3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_unix.go @@ -0,0 +1,73 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package ipv4 + +import ( + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + opt.Lock() + defer opt.Unlock() + if so, ok := sockOpts[ssoReceiveTTL]; ok && cf&FlagTTL != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagTTL) + } else { + opt.clear(FlagTTL) + } + } + if so, ok := sockOpts[ssoPacketInfo]; ok { + if cf&(FlagSrc|FlagDst|FlagInterface) != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(cf & (FlagSrc | FlagDst | FlagInterface)) + } else { + opt.clear(cf & (FlagSrc | FlagDst | FlagInterface)) + } + } + } else { + if so, ok := sockOpts[ssoReceiveDst]; ok && cf&FlagDst != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagDst) + } else { + opt.clear(FlagDst) + } + } + if so, ok := sockOpts[ssoReceiveInterface]; ok && cf&FlagInterface != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagInterface) + } else { + opt.clear(FlagInterface) + } + } + } + return nil +} + +func marshalTTL(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIP, sysIP_RECVTTL, 1) + return m.Next(1) +} + +func parseTTL(cm *ControlMessage, b []byte) { + cm.TTL = int(*(*byte)(unsafe.Pointer(&b[:1][0]))) +} diff --git a/vendor/golang.org/x/net/ipv4/control_windows.go b/vendor/golang.org/x/net/ipv4/control_windows.go new file mode 100644 index 0000000000..ce55c66447 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/control_windows.go @@ -0,0 +1,16 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "syscall" + + "golang.org/x/net/internal/socket" +) + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + // TODO(mikio): implement this + return syscall.EWINDOWS +} diff --git a/vendor/golang.org/x/net/ipv4/defs_darwin.go b/vendor/golang.org/x/net/ipv4/defs_darwin.go new file mode 100644 index 0000000000..c8f2e05b81 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_darwin.go @@ -0,0 +1,77 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include + +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_STRIPHDR = C.IP_STRIPHDR + sysIP_RECVTTL = C.IP_RECVTTL + sysIP_BOUND_IF = C.IP_BOUND_IF + sysIP_PKTINFO = C.IP_PKTINFO + sysIP_RECVPKTINFO = C.IP_RECVPKTINFO + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF + sysIP_MULTICAST_IFINDEX = C.IP_MULTICAST_IFINDEX + sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP + sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP + sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE + sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofInetPktinfo = C.sizeof_struct_in_pktinfo + + sizeofIPMreq = C.sizeof_struct_ip_mreq + sizeofIPMreqn = C.sizeof_struct_ip_mreqn + sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet C.struct_sockaddr_in + +type inetPktinfo C.struct_in_pktinfo + +type ipMreq C.struct_ip_mreq + +type ipMreqn C.struct_ip_mreqn + +type ipMreqSource C.struct_ip_mreq_source + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv4/defs_dragonfly.go b/vendor/golang.org/x/net/ipv4/defs_dragonfly.go new file mode 100644 index 0000000000..f30544ea24 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_dragonfly.go @@ -0,0 +1,38 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_RECVTTL = C.IP_RECVTTL + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + + sizeofIPMreq = C.sizeof_struct_ip_mreq +) + +type ipMreq C.struct_ip_mreq diff --git a/vendor/golang.org/x/net/ipv4/defs_freebsd.go b/vendor/golang.org/x/net/ipv4/defs_freebsd.go new file mode 100644 index 0000000000..4dd57d8653 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_freebsd.go @@ -0,0 +1,75 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include + +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_SENDSRCADDR = C.IP_SENDSRCADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_ONESBCAST = C.IP_ONESBCAST + sysIP_BINDANY = C.IP_BINDANY + sysIP_RECVTTL = C.IP_RECVTTL + sysIP_MINTTL = C.IP_MINTTL + sysIP_DONTFRAG = C.IP_DONTFRAG + sysIP_RECVTOS = C.IP_RECVTOS + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + sysIP_MULTICAST_VIF = C.IP_MULTICAST_VIF + sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP + sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP + sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE + sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + + sizeofIPMreq = C.sizeof_struct_ip_mreq + sizeofIPMreqn = C.sizeof_struct_ip_mreqn + sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet C.struct_sockaddr_in + +type ipMreq C.struct_ip_mreq + +type ipMreqn C.struct_ip_mreqn + +type ipMreqSource C.struct_ip_mreq_source + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv4/defs_linux.go b/vendor/golang.org/x/net/ipv4/defs_linux.go new file mode 100644 index 0000000000..beb11071ad --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_linux.go @@ -0,0 +1,122 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include + +#include +#include +#include +#include +#include +*/ +import "C" + +const ( + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_ROUTER_ALERT = C.IP_ROUTER_ALERT + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_PKTINFO = C.IP_PKTINFO + sysIP_PKTOPTIONS = C.IP_PKTOPTIONS + sysIP_MTU_DISCOVER = C.IP_MTU_DISCOVER + sysIP_RECVERR = C.IP_RECVERR + sysIP_RECVTTL = C.IP_RECVTTL + sysIP_RECVTOS = C.IP_RECVTOS + sysIP_MTU = C.IP_MTU + sysIP_FREEBIND = C.IP_FREEBIND + sysIP_TRANSPARENT = C.IP_TRANSPARENT + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_ORIGDSTADDR = C.IP_ORIGDSTADDR + sysIP_RECVORIGDSTADDR = C.IP_RECVORIGDSTADDR + sysIP_MINTTL = C.IP_MINTTL + sysIP_NODEFRAG = C.IP_NODEFRAG + sysIP_UNICAST_IF = C.IP_UNICAST_IF + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE + sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE + sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP + sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP + sysIP_MSFILTER = C.IP_MSFILTER + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + sysMCAST_MSFILTER = C.MCAST_MSFILTER + sysIP_MULTICAST_ALL = C.IP_MULTICAST_ALL + + //sysIP_PMTUDISC_DONT = C.IP_PMTUDISC_DONT + //sysIP_PMTUDISC_WANT = C.IP_PMTUDISC_WANT + //sysIP_PMTUDISC_DO = C.IP_PMTUDISC_DO + //sysIP_PMTUDISC_PROBE = C.IP_PMTUDISC_PROBE + //sysIP_PMTUDISC_INTERFACE = C.IP_PMTUDISC_INTERFACE + //sysIP_PMTUDISC_OMIT = C.IP_PMTUDISC_OMIT + + sysICMP_FILTER = C.ICMP_FILTER + + sysSO_EE_ORIGIN_NONE = C.SO_EE_ORIGIN_NONE + sysSO_EE_ORIGIN_LOCAL = C.SO_EE_ORIGIN_LOCAL + sysSO_EE_ORIGIN_ICMP = C.SO_EE_ORIGIN_ICMP + sysSO_EE_ORIGIN_ICMP6 = C.SO_EE_ORIGIN_ICMP6 + sysSO_EE_ORIGIN_TXSTATUS = C.SO_EE_ORIGIN_TXSTATUS + sysSO_EE_ORIGIN_TIMESTAMPING = C.SO_EE_ORIGIN_TIMESTAMPING + + sysSOL_SOCKET = C.SOL_SOCKET + sysSO_ATTACH_FILTER = C.SO_ATTACH_FILTER + + sizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofInetPktinfo = C.sizeof_struct_in_pktinfo + sizeofSockExtendedErr = C.sizeof_struct_sock_extended_err + + sizeofIPMreq = C.sizeof_struct_ip_mreq + sizeofIPMreqn = C.sizeof_struct_ip_mreqn + sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sizeofICMPFilter = C.sizeof_struct_icmp_filter + + sizeofSockFprog = C.sizeof_struct_sock_fprog +) + +type kernelSockaddrStorage C.struct___kernel_sockaddr_storage + +type sockaddrInet C.struct_sockaddr_in + +type inetPktinfo C.struct_in_pktinfo + +type sockExtendedErr C.struct_sock_extended_err + +type ipMreq C.struct_ip_mreq + +type ipMreqn C.struct_ip_mreqn + +type ipMreqSource C.struct_ip_mreq_source + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req + +type icmpFilter C.struct_icmp_filter + +type sockFProg C.struct_sock_fprog + +type sockFilter C.struct_sock_filter diff --git a/vendor/golang.org/x/net/ipv4/defs_netbsd.go b/vendor/golang.org/x/net/ipv4/defs_netbsd.go new file mode 100644 index 0000000000..8f8af1b899 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_netbsd.go @@ -0,0 +1,37 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_RECVTTL = C.IP_RECVTTL + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + + sizeofIPMreq = C.sizeof_struct_ip_mreq +) + +type ipMreq C.struct_ip_mreq diff --git a/vendor/golang.org/x/net/ipv4/defs_openbsd.go b/vendor/golang.org/x/net/ipv4/defs_openbsd.go new file mode 100644 index 0000000000..8f8af1b899 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_openbsd.go @@ -0,0 +1,37 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_RECVTTL = C.IP_RECVTTL + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + + sizeofIPMreq = C.sizeof_struct_ip_mreq +) + +type ipMreq C.struct_ip_mreq diff --git a/vendor/golang.org/x/net/ipv4/defs_solaris.go b/vendor/golang.org/x/net/ipv4/defs_solaris.go new file mode 100644 index 0000000000..aeb33e9c8f --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/defs_solaris.go @@ -0,0 +1,84 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in_addr [4]byte /* in_addr */ + +package ipv4 + +/* +#include + +#include +*/ +import "C" + +const ( + sysIP_OPTIONS = C.IP_OPTIONS + sysIP_HDRINCL = C.IP_HDRINCL + sysIP_TOS = C.IP_TOS + sysIP_TTL = C.IP_TTL + sysIP_RECVOPTS = C.IP_RECVOPTS + sysIP_RECVRETOPTS = C.IP_RECVRETOPTS + sysIP_RECVDSTADDR = C.IP_RECVDSTADDR + sysIP_RETOPTS = C.IP_RETOPTS + sysIP_RECVIF = C.IP_RECVIF + sysIP_RECVSLLA = C.IP_RECVSLLA + sysIP_RECVTTL = C.IP_RECVTTL + + sysIP_MULTICAST_IF = C.IP_MULTICAST_IF + sysIP_MULTICAST_TTL = C.IP_MULTICAST_TTL + sysIP_MULTICAST_LOOP = C.IP_MULTICAST_LOOP + sysIP_ADD_MEMBERSHIP = C.IP_ADD_MEMBERSHIP + sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP + sysIP_BLOCK_SOURCE = C.IP_BLOCK_SOURCE + sysIP_UNBLOCK_SOURCE = C.IP_UNBLOCK_SOURCE + sysIP_ADD_SOURCE_MEMBERSHIP = C.IP_ADD_SOURCE_MEMBERSHIP + sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP + sysIP_NEXTHOP = C.IP_NEXTHOP + + sysIP_PKTINFO = C.IP_PKTINFO + sysIP_RECVPKTINFO = C.IP_RECVPKTINFO + sysIP_DONTFRAG = C.IP_DONTFRAG + + sysIP_BOUND_IF = C.IP_BOUND_IF + sysIP_UNSPEC_SRC = C.IP_UNSPEC_SRC + sysIP_BROADCAST_TTL = C.IP_BROADCAST_TTL + sysIP_DHCPINIT_IF = C.IP_DHCPINIT_IF + + sysIP_REUSEADDR = C.IP_REUSEADDR + sysIP_DONTROUTE = C.IP_DONTROUTE + sysIP_BROADCAST = C.IP_BROADCAST + + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet = C.sizeof_struct_sockaddr_in + sizeofInetPktinfo = C.sizeof_struct_in_pktinfo + + sizeofIPMreq = C.sizeof_struct_ip_mreq + sizeofIPMreqSource = C.sizeof_struct_ip_mreq_source + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet C.struct_sockaddr_in + +type inetPktinfo C.struct_in_pktinfo + +type ipMreq C.struct_ip_mreq + +type ipMreqSource C.struct_ip_mreq_source + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv4/dgramopt.go b/vendor/golang.org/x/net/ipv4/dgramopt.go new file mode 100644 index 0000000000..54d77d5fed --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/dgramopt.go @@ -0,0 +1,265 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + + "golang.org/x/net/bpf" +) + +// MulticastTTL returns the time-to-live field value for outgoing +// multicast packets. +func (c *dgramOpt) MulticastTTL() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastTTL] + if !ok { + return 0, errOpNoSupport + } + return so.GetInt(c.Conn) +} + +// SetMulticastTTL sets the time-to-live field value for future +// outgoing multicast packets. +func (c *dgramOpt) SetMulticastTTL(ttl int) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastTTL] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, ttl) +} + +// MulticastInterface returns the default interface for multicast +// packet transmissions. +func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { + if !c.ok() { + return nil, syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return nil, errOpNoSupport + } + return so.getMulticastInterface(c.Conn) +} + +// SetMulticastInterface sets the default interface for future +// multicast packet transmissions. +func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return errOpNoSupport + } + return so.setMulticastInterface(c.Conn, ifi) +} + +// MulticastLoopback reports whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) MulticastLoopback() (bool, error) { + if !c.ok() { + return false, syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return false, errOpNoSupport + } + on, err := so.GetInt(c.Conn) + if err != nil { + return false, err + } + return on == 1, nil +} + +// SetMulticastLoopback sets whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) SetMulticastLoopback(on bool) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, boolint(on)) +} + +// JoinGroup joins the group address group on the interface ifi. +// By default all sources that can cast data to group are accepted. +// It's possible to mute and unmute data transmission from a specific +// source by using ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup. +// JoinGroup uses the system assigned multicast interface when ifi is +// nil, although this is not recommended because the assignment +// depends on platforms and sometimes it might require routing +// configuration. +func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoJoinGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + return so.setGroup(c.Conn, ifi, grp) +} + +// LeaveGroup leaves the group address group on the interface ifi +// regardless of whether the group is any-source group or +// source-specific group. +func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoLeaveGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + return so.setGroup(c.Conn, ifi, grp) +} + +// JoinSourceSpecificGroup joins the source-specific group comprising +// group and source on the interface ifi. +// JoinSourceSpecificGroup uses the system assigned multicast +// interface when ifi is nil, although this is not recommended because +// the assignment depends on platforms and sometimes it might require +// routing configuration. +func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoJoinSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// LeaveSourceSpecificGroup leaves the source-specific group on the +// interface ifi. +func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoLeaveSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// ExcludeSourceSpecificGroup excludes the source-specific group from +// the already joined any-source groups by JoinGroup on the interface +// ifi. +func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoBlockSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// IncludeSourceSpecificGroup includes the excluded source-specific +// group by ExcludeSourceSpecificGroup again on the interface ifi. +func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoUnblockSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP4(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP4(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// ICMPFilter returns an ICMP filter. +// Currently only Linux supports this. +func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { + if !c.ok() { + return nil, syscall.EINVAL + } + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return nil, errOpNoSupport + } + return so.getICMPFilter(c.Conn) +} + +// SetICMPFilter deploys the ICMP filter. +// Currently only Linux supports this. +func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return errOpNoSupport + } + return so.setICMPFilter(c.Conn, f) +} + +// SetBPF attaches a BPF program to the connection. +// +// Only supported on Linux. +func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoAttachFilter] + if !ok { + return errOpNoSupport + } + return so.setBPF(c.Conn, filter) +} diff --git a/vendor/golang.org/x/net/ipv4/doc.go b/vendor/golang.org/x/net/ipv4/doc.go new file mode 100644 index 0000000000..b43935a5ae --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/doc.go @@ -0,0 +1,244 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ipv4 implements IP-level socket options for the Internet +// Protocol version 4. +// +// The package provides IP-level socket options that allow +// manipulation of IPv4 facilities. +// +// The IPv4 protocol and basic host requirements for IPv4 are defined +// in RFC 791 and RFC 1122. +// Host extensions for multicasting and socket interface extensions +// for multicast source filters are defined in RFC 1112 and RFC 3678. +// IGMPv1, IGMPv2 and IGMPv3 are defined in RFC 1112, RFC 2236 and RFC +// 3376. +// Source-specific multicast is defined in RFC 4607. +// +// +// Unicasting +// +// The options for unicasting are available for net.TCPConn, +// net.UDPConn and net.IPConn which are created as network connections +// that use the IPv4 transport. When a single TCP connection carrying +// a data flow of multiple packets needs to indicate the flow is +// important, Conn is used to set the type-of-service field on the +// IPv4 header for each packet. +// +// ln, err := net.Listen("tcp4", "0.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer ln.Close() +// for { +// c, err := ln.Accept() +// if err != nil { +// // error handling +// } +// go func(c net.Conn) { +// defer c.Close() +// +// The outgoing packets will be labeled DiffServ assured forwarding +// class 1 low drop precedence, known as AF11 packets. +// +// if err := ipv4.NewConn(c).SetTOS(0x28); err != nil { +// // error handling +// } +// if _, err := c.Write(data); err != nil { +// // error handling +// } +// }(c) +// } +// +// +// Multicasting +// +// The options for multicasting are available for net.UDPConn and +// net.IPconn which are created as network connections that use the +// IPv4 transport. A few network facilities must be prepared before +// you begin multicasting, at a minimum joining network interfaces and +// multicast groups. +// +// en0, err := net.InterfaceByName("en0") +// if err != nil { +// // error handling +// } +// en1, err := net.InterfaceByIndex(911) +// if err != nil { +// // error handling +// } +// group := net.IPv4(224, 0, 0, 250) +// +// First, an application listens to an appropriate address with an +// appropriate service port. +// +// c, err := net.ListenPacket("udp4", "0.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// +// Second, the application joins multicast groups, starts listening to +// the groups on the specified network interfaces. Note that the +// service port for transport layer protocol does not matter with this +// operation as joining groups affects only network and link layer +// protocols, such as IPv4 and Ethernet. +// +// p := ipv4.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// +// The application might set per packet control message transmissions +// between the protocol stack within the kernel. When the application +// needs a destination address on an incoming packet, +// SetControlMessage of PacketConn is used to enable control message +// transmissions. +// +// if err := p.SetControlMessage(ipv4.FlagDst, true); err != nil { +// // error handling +// } +// +// The application could identify whether the received packets are +// of interest by using the control message that contains the +// destination address of the received packet. +// +// b := make([]byte, 1500) +// for { +// n, cm, src, err := p.ReadFrom(b) +// if err != nil { +// // error handling +// } +// if cm.Dst.IsMulticast() { +// if cm.Dst.Equal(group) { +// // joined group, do something +// } else { +// // unknown group, discard +// continue +// } +// } +// +// The application can also send both unicast and multicast packets. +// +// p.SetTOS(0x0) +// p.SetTTL(16) +// if _, err := p.WriteTo(data, nil, src); err != nil { +// // error handling +// } +// dst := &net.UDPAddr{IP: group, Port: 1024} +// for _, ifi := range []*net.Interface{en0, en1} { +// if err := p.SetMulticastInterface(ifi); err != nil { +// // error handling +// } +// p.SetMulticastTTL(2) +// if _, err := p.WriteTo(data, nil, dst); err != nil { +// // error handling +// } +// } +// } +// +// +// More multicasting +// +// An application that uses PacketConn or RawConn may join multiple +// multicast groups. For example, a UDP listener with port 1024 might +// join two different groups across over two different network +// interfaces by using: +// +// c, err := net.ListenPacket("udp4", "0.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// p := ipv4.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}); err != nil { +// // error handling +// } +// +// It is possible for multiple UDP listeners that listen on the same +// UDP port to join the same multicast group. The net package will +// provide a socket that listens to a wildcard address with reusable +// UDP port when an appropriate multicast address prefix is passed to +// the net.ListenPacket or net.ListenUDP. +// +// c1, err := net.ListenPacket("udp4", "224.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c1.Close() +// c2, err := net.ListenPacket("udp4", "224.0.0.0:1024") +// if err != nil { +// // error handling +// } +// defer c2.Close() +// p1 := ipv4.NewPacketConn(c1) +// if err := p1.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// p2 := ipv4.NewPacketConn(c2) +// if err := p2.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// +// Also it is possible for the application to leave or rejoin a +// multicast group on the network interface. +// +// if err := p.LeaveGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 250)}); err != nil { +// // error handling +// } +// +// +// Source-specific multicasting +// +// An application that uses PacketConn or RawConn on IGMPv3 supported +// platform is able to join source-specific multicast groups. +// The application may use JoinSourceSpecificGroup and +// LeaveSourceSpecificGroup for the operation known as "include" mode, +// +// ssmgroup := net.UDPAddr{IP: net.IPv4(232, 7, 8, 9)} +// ssmsource := net.UDPAddr{IP: net.IPv4(192, 168, 0, 1)}) +// if err := p.JoinSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// if err := p.LeaveSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// +// or JoinGroup, ExcludeSourceSpecificGroup, +// IncludeSourceSpecificGroup and LeaveGroup for the operation known +// as "exclude" mode. +// +// exclsource := net.UDPAddr{IP: net.IPv4(192, 168, 0, 254)} +// if err := p.JoinGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// if err := p.ExcludeSourceSpecificGroup(en0, &ssmgroup, &exclsource); err != nil { +// // error handling +// } +// if err := p.LeaveGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// +// Note that it depends on each platform implementation what happens +// when an application which runs on IGMPv3 unsupported platform uses +// JoinSourceSpecificGroup and LeaveSourceSpecificGroup. +// In general the platform tries to fall back to conversations using +// IGMPv1 or IGMPv2 and starts to listen to multicast traffic. +// In the fallback case, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup may return an error. +package ipv4 // import "golang.org/x/net/ipv4" + +// BUG(mikio): This package is not implemented on NaCl and Plan 9. diff --git a/vendor/golang.org/x/net/ipv4/endpoint.go b/vendor/golang.org/x/net/ipv4/endpoint.go new file mode 100644 index 0000000000..2ab8773630 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/endpoint.go @@ -0,0 +1,187 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + "time" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the JoinSourceSpecificGroup, +// LeaveSourceSpecificGroup, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup methods of PacketConn and RawConn are +// not implemented. + +// A Conn represents a network endpoint that uses the IPv4 transport. +// It is used to control basic IP-level socket options such as TOS and +// TTL. +type Conn struct { + genericOpt +} + +type genericOpt struct { + *socket.Conn +} + +func (c *genericOpt) ok() bool { return c != nil && c.Conn != nil } + +// NewConn returns a new Conn. +func NewConn(c net.Conn) *Conn { + cc, _ := socket.NewConn(c) + return &Conn{ + genericOpt: genericOpt{Conn: cc}, + } +} + +// A PacketConn represents a packet network endpoint that uses the +// IPv4 transport. It is used to control several IP-level socket +// options including multicasting. It also provides datagram based +// network I/O methods specific to the IPv4 and higher layer protocols +// such as UDP. +type PacketConn struct { + genericOpt + dgramOpt + payloadHandler +} + +type dgramOpt struct { + *socket.Conn +} + +func (c *dgramOpt) ok() bool { return c != nil && c.Conn != nil } + +// SetControlMessage sets the per packet IP-level socket options. +func (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return setControlMessage(c.dgramOpt.Conn, &c.payloadHandler.rawOpt, cf, on) +} + +// SetDeadline sets the read and write deadlines associated with the +// endpoint. +func (c *PacketConn) SetDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.PacketConn.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline associated with the +// endpoint. +func (c *PacketConn) SetReadDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.PacketConn.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline associated with the +// endpoint. +func (c *PacketConn) SetWriteDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.PacketConn.SetWriteDeadline(t) +} + +// Close closes the endpoint. +func (c *PacketConn) Close() error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.PacketConn.Close() +} + +// NewPacketConn returns a new PacketConn using c as its underlying +// transport. +func NewPacketConn(c net.PacketConn) *PacketConn { + cc, _ := socket.NewConn(c.(net.Conn)) + p := &PacketConn{ + genericOpt: genericOpt{Conn: cc}, + dgramOpt: dgramOpt{Conn: cc}, + payloadHandler: payloadHandler{PacketConn: c, Conn: cc}, + } + return p +} + +// A RawConn represents a packet network endpoint that uses the IPv4 +// transport. It is used to control several IP-level socket options +// including IPv4 header manipulation. It also provides datagram +// based network I/O methods specific to the IPv4 and higher layer +// protocols that handle IPv4 datagram directly such as OSPF, GRE. +type RawConn struct { + genericOpt + dgramOpt + packetHandler +} + +// SetControlMessage sets the per packet IP-level socket options. +func (c *RawConn) SetControlMessage(cf ControlFlags, on bool) error { + if !c.packetHandler.ok() { + return syscall.EINVAL + } + return setControlMessage(c.dgramOpt.Conn, &c.packetHandler.rawOpt, cf, on) +} + +// SetDeadline sets the read and write deadlines associated with the +// endpoint. +func (c *RawConn) SetDeadline(t time.Time) error { + if !c.packetHandler.ok() { + return syscall.EINVAL + } + return c.packetHandler.IPConn.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline associated with the +// endpoint. +func (c *RawConn) SetReadDeadline(t time.Time) error { + if !c.packetHandler.ok() { + return syscall.EINVAL + } + return c.packetHandler.IPConn.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline associated with the +// endpoint. +func (c *RawConn) SetWriteDeadline(t time.Time) error { + if !c.packetHandler.ok() { + return syscall.EINVAL + } + return c.packetHandler.IPConn.SetWriteDeadline(t) +} + +// Close closes the endpoint. +func (c *RawConn) Close() error { + if !c.packetHandler.ok() { + return syscall.EINVAL + } + return c.packetHandler.IPConn.Close() +} + +// NewRawConn returns a new RawConn using c as its underlying +// transport. +func NewRawConn(c net.PacketConn) (*RawConn, error) { + cc, err := socket.NewConn(c.(net.Conn)) + if err != nil { + return nil, err + } + r := &RawConn{ + genericOpt: genericOpt{Conn: cc}, + dgramOpt: dgramOpt{Conn: cc}, + packetHandler: packetHandler{IPConn: c.(*net.IPConn), Conn: cc}, + } + so, ok := sockOpts[ssoHeaderPrepend] + if !ok { + return nil, errOpNoSupport + } + if err := so.SetInt(r.dgramOpt.Conn, boolint(true)); err != nil { + return nil, err + } + return r, nil +} diff --git a/vendor/golang.org/x/net/ipv4/gen.go b/vendor/golang.org/x/net/ipv4/gen.go new file mode 100644 index 0000000000..1bb1737f67 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/gen.go @@ -0,0 +1,199 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +//go:generate go run gen.go + +// This program generates system adaptation constants and types, +// internet protocol constants and tables by reading template files +// and IANA protocol registries. +package main + +import ( + "bytes" + "encoding/xml" + "fmt" + "go/format" + "io" + "io/ioutil" + "net/http" + "os" + "os/exec" + "runtime" + "strconv" + "strings" +) + +func main() { + if err := genzsys(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + if err := geniana(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func genzsys() error { + defs := "defs_" + runtime.GOOS + ".go" + f, err := os.Open(defs) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + f.Close() + cmd := exec.Command("go", "tool", "cgo", "-godefs", defs) + b, err := cmd.Output() + if err != nil { + return err + } + b, err = format.Source(b) + if err != nil { + return err + } + zsys := "zsys_" + runtime.GOOS + ".go" + switch runtime.GOOS { + case "freebsd", "linux": + zsys = "zsys_" + runtime.GOOS + "_" + runtime.GOARCH + ".go" + } + if err := ioutil.WriteFile(zsys, b, 0644); err != nil { + return err + } + return nil +} + +var registries = []struct { + url string + parse func(io.Writer, io.Reader) error +}{ + { + "https://www.iana.org/assignments/icmp-parameters/icmp-parameters.xml", + parseICMPv4Parameters, + }, +} + +func geniana() error { + var bb bytes.Buffer + fmt.Fprintf(&bb, "// go generate gen.go\n") + fmt.Fprintf(&bb, "// Code generated by the command above; DO NOT EDIT.\n\n") + fmt.Fprintf(&bb, "package ipv4\n\n") + for _, r := range registries { + resp, err := http.Get(r.url) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("got HTTP status code %v for %v\n", resp.StatusCode, r.url) + } + if err := r.parse(&bb, resp.Body); err != nil { + return err + } + fmt.Fprintf(&bb, "\n") + } + b, err := format.Source(bb.Bytes()) + if err != nil { + return err + } + if err := ioutil.WriteFile("iana.go", b, 0644); err != nil { + return err + } + return nil +} + +func parseICMPv4Parameters(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var icp icmpv4Parameters + if err := dec.Decode(&icp); err != nil { + return err + } + prs := icp.escape() + fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) + fmt.Fprintf(w, "const (\n") + for _, pr := range prs { + if pr.Descr == "" { + continue + } + fmt.Fprintf(w, "ICMPType%s ICMPType = %d", pr.Descr, pr.Value) + fmt.Fprintf(w, "// %s\n", pr.OrigDescr) + } + fmt.Fprintf(w, ")\n\n") + fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) + fmt.Fprintf(w, "var icmpTypes = map[ICMPType]string{\n") + for _, pr := range prs { + if pr.Descr == "" { + continue + } + fmt.Fprintf(w, "%d: %q,\n", pr.Value, strings.ToLower(pr.OrigDescr)) + } + fmt.Fprintf(w, "}\n") + return nil +} + +type icmpv4Parameters struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + Registries []struct { + Title string `xml:"title"` + Records []struct { + Value string `xml:"value"` + Descr string `xml:"description"` + } `xml:"record"` + } `xml:"registry"` +} + +type canonICMPv4ParamRecord struct { + OrigDescr string + Descr string + Value int +} + +func (icp *icmpv4Parameters) escape() []canonICMPv4ParamRecord { + id := -1 + for i, r := range icp.Registries { + if strings.Contains(r.Title, "Type") || strings.Contains(r.Title, "type") { + id = i + break + } + } + if id < 0 { + return nil + } + prs := make([]canonICMPv4ParamRecord, len(icp.Registries[id].Records)) + sr := strings.NewReplacer( + "Messages", "", + "Message", "", + "ICMP", "", + "+", "P", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, pr := range icp.Registries[id].Records { + if strings.Contains(pr.Descr, "Reserved") || + strings.Contains(pr.Descr, "Unassigned") || + strings.Contains(pr.Descr, "Deprecated") || + strings.Contains(pr.Descr, "Experiment") || + strings.Contains(pr.Descr, "experiment") { + continue + } + ss := strings.Split(pr.Descr, "\n") + if len(ss) > 1 { + prs[i].Descr = strings.Join(ss, " ") + } else { + prs[i].Descr = ss[0] + } + s := strings.TrimSpace(prs[i].Descr) + prs[i].OrigDescr = s + prs[i].Descr = sr.Replace(s) + prs[i].Value, _ = strconv.Atoi(pr.Value) + } + return prs +} diff --git a/vendor/golang.org/x/net/ipv4/genericopt.go b/vendor/golang.org/x/net/ipv4/genericopt.go new file mode 100644 index 0000000000..119bf841b6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/genericopt.go @@ -0,0 +1,57 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import "syscall" + +// TOS returns the type-of-service field value for outgoing packets. +func (c *genericOpt) TOS() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + so, ok := sockOpts[ssoTOS] + if !ok { + return 0, errOpNoSupport + } + return so.GetInt(c.Conn) +} + +// SetTOS sets the type-of-service field value for future outgoing +// packets. +func (c *genericOpt) SetTOS(tos int) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoTOS] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, tos) +} + +// TTL returns the time-to-live field value for outgoing packets. +func (c *genericOpt) TTL() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + so, ok := sockOpts[ssoTTL] + if !ok { + return 0, errOpNoSupport + } + return so.GetInt(c.Conn) +} + +// SetTTL sets the time-to-live field value for future outgoing +// packets. +func (c *genericOpt) SetTTL(ttl int) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoTTL] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, ttl) +} diff --git a/vendor/golang.org/x/net/ipv4/header.go b/vendor/golang.org/x/net/ipv4/header.go new file mode 100644 index 0000000000..8bb0f0f4d4 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/header.go @@ -0,0 +1,159 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "encoding/binary" + "fmt" + "net" + "runtime" + "syscall" + + "golang.org/x/net/internal/socket" +) + +const ( + Version = 4 // protocol version + HeaderLen = 20 // header length without extension headers + maxHeaderLen = 60 // sensible default, revisit if later RFCs define new usage of version and header length fields +) + +type HeaderFlags int + +const ( + MoreFragments HeaderFlags = 1 << iota // more fragments flag + DontFragment // don't fragment flag +) + +// A Header represents an IPv4 header. +type Header struct { + Version int // protocol version + Len int // header length + TOS int // type-of-service + TotalLen int // packet total length + ID int // identification + Flags HeaderFlags // flags + FragOff int // fragment offset + TTL int // time-to-live + Protocol int // next protocol + Checksum int // checksum + Src net.IP // source address + Dst net.IP // destination address + Options []byte // options, extension headers +} + +func (h *Header) String() string { + if h == nil { + return "" + } + return fmt.Sprintf("ver=%d hdrlen=%d tos=%#x totallen=%d id=%#x flags=%#x fragoff=%#x ttl=%d proto=%d cksum=%#x src=%v dst=%v", h.Version, h.Len, h.TOS, h.TotalLen, h.ID, h.Flags, h.FragOff, h.TTL, h.Protocol, h.Checksum, h.Src, h.Dst) +} + +// Marshal returns the binary encoding of h. +func (h *Header) Marshal() ([]byte, error) { + if h == nil { + return nil, syscall.EINVAL + } + if h.Len < HeaderLen { + return nil, errHeaderTooShort + } + hdrlen := HeaderLen + len(h.Options) + b := make([]byte, hdrlen) + b[0] = byte(Version<<4 | (hdrlen >> 2 & 0x0f)) + b[1] = byte(h.TOS) + flagsAndFragOff := (h.FragOff & 0x1fff) | int(h.Flags<<13) + switch runtime.GOOS { + case "darwin", "dragonfly", "netbsd": + socket.NativeEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + socket.NativeEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + case "freebsd": + if freebsdVersion < 1100000 { + socket.NativeEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + socket.NativeEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + } else { + binary.BigEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + binary.BigEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + } + default: + binary.BigEndian.PutUint16(b[2:4], uint16(h.TotalLen)) + binary.BigEndian.PutUint16(b[6:8], uint16(flagsAndFragOff)) + } + binary.BigEndian.PutUint16(b[4:6], uint16(h.ID)) + b[8] = byte(h.TTL) + b[9] = byte(h.Protocol) + binary.BigEndian.PutUint16(b[10:12], uint16(h.Checksum)) + if ip := h.Src.To4(); ip != nil { + copy(b[12:16], ip[:net.IPv4len]) + } + if ip := h.Dst.To4(); ip != nil { + copy(b[16:20], ip[:net.IPv4len]) + } else { + return nil, errMissingAddress + } + if len(h.Options) > 0 { + copy(b[HeaderLen:], h.Options) + } + return b, nil +} + +// Parse parses b as an IPv4 header and sotres the result in h. +func (h *Header) Parse(b []byte) error { + if h == nil || len(b) < HeaderLen { + return errHeaderTooShort + } + hdrlen := int(b[0]&0x0f) << 2 + if hdrlen > len(b) { + return errBufferTooShort + } + h.Version = int(b[0] >> 4) + h.Len = hdrlen + h.TOS = int(b[1]) + h.ID = int(binary.BigEndian.Uint16(b[4:6])) + h.TTL = int(b[8]) + h.Protocol = int(b[9]) + h.Checksum = int(binary.BigEndian.Uint16(b[10:12])) + h.Src = net.IPv4(b[12], b[13], b[14], b[15]) + h.Dst = net.IPv4(b[16], b[17], b[18], b[19]) + switch runtime.GOOS { + case "darwin", "dragonfly", "netbsd": + h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) + hdrlen + h.FragOff = int(socket.NativeEndian.Uint16(b[6:8])) + case "freebsd": + if freebsdVersion < 1100000 { + h.TotalLen = int(socket.NativeEndian.Uint16(b[2:4])) + if freebsdVersion < 1000000 { + h.TotalLen += hdrlen + } + h.FragOff = int(socket.NativeEndian.Uint16(b[6:8])) + } else { + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) + h.FragOff = int(binary.BigEndian.Uint16(b[6:8])) + } + default: + h.TotalLen = int(binary.BigEndian.Uint16(b[2:4])) + h.FragOff = int(binary.BigEndian.Uint16(b[6:8])) + } + h.Flags = HeaderFlags(h.FragOff&0xe000) >> 13 + h.FragOff = h.FragOff & 0x1fff + optlen := hdrlen - HeaderLen + if optlen > 0 && len(b) >= hdrlen { + if cap(h.Options) < optlen { + h.Options = make([]byte, optlen) + } else { + h.Options = h.Options[:optlen] + } + copy(h.Options, b[HeaderLen:hdrlen]) + } + return nil +} + +// ParseHeader parses b as an IPv4 header. +func ParseHeader(b []byte) (*Header, error) { + h := new(Header) + if err := h.Parse(b); err != nil { + return nil, err + } + return h, nil +} diff --git a/vendor/golang.org/x/net/ipv4/helper.go b/vendor/golang.org/x/net/ipv4/helper.go new file mode 100644 index 0000000000..a5052e3249 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/helper.go @@ -0,0 +1,63 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "errors" + "net" +) + +var ( + errMissingAddress = errors.New("missing address") + errMissingHeader = errors.New("missing header") + errHeaderTooShort = errors.New("header too short") + errBufferTooShort = errors.New("buffer too short") + errInvalidConnType = errors.New("invalid conn type") + errOpNoSupport = errors.New("operation not supported") + errNoSuchInterface = errors.New("no such interface") + errNoSuchMulticastInterface = errors.New("no such multicast interface") + + // See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html. + freebsdVersion uint32 +) + +func boolint(b bool) int { + if b { + return 1 + } + return 0 +} + +func netAddrToIP4(a net.Addr) net.IP { + switch v := a.(type) { + case *net.UDPAddr: + if ip := v.IP.To4(); ip != nil { + return ip + } + case *net.IPAddr: + if ip := v.IP.To4(); ip != nil { + return ip + } + } + return nil +} + +func opAddr(a net.Addr) net.Addr { + switch a.(type) { + case *net.TCPAddr: + if a == nil { + return nil + } + case *net.UDPAddr: + if a == nil { + return nil + } + case *net.IPAddr: + if a == nil { + return nil + } + } + return a +} diff --git a/vendor/golang.org/x/net/ipv4/iana.go b/vendor/golang.org/x/net/ipv4/iana.go new file mode 100644 index 0000000000..4375b4099b --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/iana.go @@ -0,0 +1,38 @@ +// go generate gen.go +// Code generated by the command above; DO NOT EDIT. + +package ipv4 + +// Internet Control Message Protocol (ICMP) Parameters, Updated: 2018-02-26 +const ( + ICMPTypeEchoReply ICMPType = 0 // Echo Reply + ICMPTypeDestinationUnreachable ICMPType = 3 // Destination Unreachable + ICMPTypeRedirect ICMPType = 5 // Redirect + ICMPTypeEcho ICMPType = 8 // Echo + ICMPTypeRouterAdvertisement ICMPType = 9 // Router Advertisement + ICMPTypeRouterSolicitation ICMPType = 10 // Router Solicitation + ICMPTypeTimeExceeded ICMPType = 11 // Time Exceeded + ICMPTypeParameterProblem ICMPType = 12 // Parameter Problem + ICMPTypeTimestamp ICMPType = 13 // Timestamp + ICMPTypeTimestampReply ICMPType = 14 // Timestamp Reply + ICMPTypePhoturis ICMPType = 40 // Photuris + ICMPTypeExtendedEchoRequest ICMPType = 42 // Extended Echo Request + ICMPTypeExtendedEchoReply ICMPType = 43 // Extended Echo Reply +) + +// Internet Control Message Protocol (ICMP) Parameters, Updated: 2018-02-26 +var icmpTypes = map[ICMPType]string{ + 0: "echo reply", + 3: "destination unreachable", + 5: "redirect", + 8: "echo", + 9: "router advertisement", + 10: "router solicitation", + 11: "time exceeded", + 12: "parameter problem", + 13: "timestamp", + 14: "timestamp reply", + 40: "photuris", + 42: "extended echo request", + 43: "extended echo reply", +} diff --git a/vendor/golang.org/x/net/ipv4/icmp.go b/vendor/golang.org/x/net/ipv4/icmp.go new file mode 100644 index 0000000000..9902bb3d2a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/icmp.go @@ -0,0 +1,57 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import "golang.org/x/net/internal/iana" + +// An ICMPType represents a type of ICMP message. +type ICMPType int + +func (typ ICMPType) String() string { + s, ok := icmpTypes[typ] + if !ok { + return "" + } + return s +} + +// Protocol returns the ICMPv4 protocol number. +func (typ ICMPType) Protocol() int { + return iana.ProtocolICMP +} + +// An ICMPFilter represents an ICMP message filter for incoming +// packets. The filter belongs to a packet delivery path on a host and +// it cannot interact with forwarding packets or tunnel-outer packets. +// +// Note: RFC 8200 defines a reasonable role model and it works not +// only for IPv6 but IPv4. A node means a device that implements IP. +// A router means a node that forwards IP packets not explicitly +// addressed to itself, and a host means a node that is not a router. +type ICMPFilter struct { + icmpFilter +} + +// Accept accepts incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Accept(typ ICMPType) { + f.accept(typ) +} + +// Block blocks incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Block(typ ICMPType) { + f.block(typ) +} + +// SetAll sets the filter action to the filter. +func (f *ICMPFilter) SetAll(block bool) { + f.setAll(block) +} + +// WillBlock reports whether the ICMP type will be blocked. +func (f *ICMPFilter) WillBlock(typ ICMPType) bool { + return f.willBlock(typ) +} diff --git a/vendor/golang.org/x/net/ipv4/icmp_linux.go b/vendor/golang.org/x/net/ipv4/icmp_linux.go new file mode 100644 index 0000000000..6e1c5c80ad --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/icmp_linux.go @@ -0,0 +1,25 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +func (f *icmpFilter) accept(typ ICMPType) { + f.Data &^= 1 << (uint32(typ) & 31) +} + +func (f *icmpFilter) block(typ ICMPType) { + f.Data |= 1 << (uint32(typ) & 31) +} + +func (f *icmpFilter) setAll(block bool) { + if block { + f.Data = 1<<32 - 1 + } else { + f.Data = 0 + } +} + +func (f *icmpFilter) willBlock(typ ICMPType) bool { + return f.Data&(1<<(uint32(typ)&31)) != 0 +} diff --git a/vendor/golang.org/x/net/ipv4/icmp_stub.go b/vendor/golang.org/x/net/ipv4/icmp_stub.go new file mode 100644 index 0000000000..21bb29ab36 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/icmp_stub.go @@ -0,0 +1,25 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux + +package ipv4 + +const sizeofICMPFilter = 0x0 + +type icmpFilter struct { +} + +func (f *icmpFilter) accept(typ ICMPType) { +} + +func (f *icmpFilter) block(typ ICMPType) { +} + +func (f *icmpFilter) setAll(block bool) { +} + +func (f *icmpFilter) willBlock(typ ICMPType) bool { + return false +} diff --git a/vendor/golang.org/x/net/ipv4/packet.go b/vendor/golang.org/x/net/ipv4/packet.go new file mode 100644 index 0000000000..f00f5b052f --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/packet.go @@ -0,0 +1,69 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ReadFrom and WriteTo methods of RawConn +// are not implemented. + +// A packetHandler represents the IPv4 datagram handler. +type packetHandler struct { + *net.IPConn + *socket.Conn + rawOpt +} + +func (c *packetHandler) ok() bool { return c != nil && c.IPConn != nil && c.Conn != nil } + +// ReadFrom reads an IPv4 datagram from the endpoint c, copying the +// datagram into b. It returns the received datagram as the IPv4 +// header h, the payload p and the control message cm. +func (c *packetHandler) ReadFrom(b []byte) (h *Header, p []byte, cm *ControlMessage, err error) { + if !c.ok() { + return nil, nil, nil, syscall.EINVAL + } + return c.readFrom(b) +} + +func slicePacket(b []byte) (h, p []byte, err error) { + if len(b) < HeaderLen { + return nil, nil, errHeaderTooShort + } + hdrlen := int(b[0]&0x0f) << 2 + return b[:hdrlen], b[hdrlen:], nil +} + +// WriteTo writes an IPv4 datagram through the endpoint c, copying the +// datagram from the IPv4 header h and the payload p. The control +// message cm allows the datagram path and the outgoing interface to be +// specified. Currently only Darwin and Linux support this. The cm +// may be nil if control of the outgoing datagram is not required. +// +// The IPv4 header h must contain appropriate fields that include: +// +// Version = +// Len = +// TOS = +// TotalLen = +// ID = platform sets an appropriate value if ID is zero +// FragOff = +// TTL = +// Protocol = +// Checksum = platform sets an appropriate value if Checksum is zero +// Src = platform sets an appropriate value if Src is nil +// Dst = +// Options = optional +func (c *packetHandler) WriteTo(h *Header, p []byte, cm *ControlMessage) error { + if !c.ok() { + return syscall.EINVAL + } + return c.writeTo(h, p, cm) +} diff --git a/vendor/golang.org/x/net/ipv4/packet_go1_8.go b/vendor/golang.org/x/net/ipv4/packet_go1_8.go new file mode 100644 index 0000000000..b47d186834 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/packet_go1_8.go @@ -0,0 +1,56 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package ipv4 + +import "net" + +func (c *packetHandler) readFrom(b []byte) (h *Header, p []byte, cm *ControlMessage, err error) { + c.rawOpt.RLock() + oob := NewControlMessage(c.rawOpt.cflags) + c.rawOpt.RUnlock() + n, nn, _, src, err := c.ReadMsgIP(b, oob) + if err != nil { + return nil, nil, nil, err + } + var hs []byte + if hs, p, err = slicePacket(b[:n]); err != nil { + return nil, nil, nil, err + } + if h, err = ParseHeader(hs); err != nil { + return nil, nil, nil, err + } + if nn > 0 { + cm = new(ControlMessage) + if err := cm.Parse(oob[:nn]); err != nil { + return nil, nil, nil, err + } + } + if src != nil && cm != nil { + cm.Src = src.IP + } + return +} + +func (c *packetHandler) writeTo(h *Header, p []byte, cm *ControlMessage) error { + oob := cm.Marshal() + wh, err := h.Marshal() + if err != nil { + return err + } + dst := new(net.IPAddr) + if cm != nil { + if ip := cm.Dst.To4(); ip != nil { + dst.IP = ip + } + } + if dst.IP == nil { + dst.IP = h.Dst + } + wh = append(wh, p...) + _, _, err = c.WriteMsgIP(wh, oob, dst) + return err +} diff --git a/vendor/golang.org/x/net/ipv4/packet_go1_9.go b/vendor/golang.org/x/net/ipv4/packet_go1_9.go new file mode 100644 index 0000000000..082c36d73e --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/packet_go1_9.go @@ -0,0 +1,67 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (c *packetHandler) readFrom(b []byte) (h *Header, p []byte, cm *ControlMessage, err error) { + c.rawOpt.RLock() + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: NewControlMessage(c.rawOpt.cflags), + } + c.rawOpt.RUnlock() + if err := c.RecvMsg(&m, 0); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + var hs []byte + if hs, p, err = slicePacket(b[:m.N]); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + if h, err = ParseHeader(hs); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + if m.NN > 0 { + cm = new(ControlMessage) + if err := cm.Parse(m.OOB[:m.NN]); err != nil { + return nil, nil, nil, &net.OpError{Op: "read", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Err: err} + } + } + if src, ok := m.Addr.(*net.IPAddr); ok && cm != nil { + cm.Src = src.IP + } + return +} + +func (c *packetHandler) writeTo(h *Header, p []byte, cm *ControlMessage) error { + m := socket.Message{ + OOB: cm.Marshal(), + } + wh, err := h.Marshal() + if err != nil { + return err + } + m.Buffers = [][]byte{wh, p} + dst := new(net.IPAddr) + if cm != nil { + if ip := cm.Dst.To4(); ip != nil { + dst.IP = ip + } + } + if dst.IP == nil { + dst.IP = h.Dst + } + m.Addr = dst + if err := c.SendMsg(&m, 0); err != nil { + return &net.OpError{Op: "write", Net: c.IPConn.LocalAddr().Network(), Source: c.IPConn.LocalAddr(), Addr: opAddr(dst), Err: err} + } + return nil +} diff --git a/vendor/golang.org/x/net/ipv4/payload.go b/vendor/golang.org/x/net/ipv4/payload.go new file mode 100644 index 0000000000..f95f811acd --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload.go @@ -0,0 +1,23 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ControlMessage for ReadFrom and WriteTo +// methods of PacketConn is not implemented. + +// A payloadHandler represents the IPv4 datagram payload handler. +type payloadHandler struct { + net.PacketConn + *socket.Conn + rawOpt +} + +func (c *payloadHandler) ok() bool { return c != nil && c.PacketConn != nil && c.Conn != nil } diff --git a/vendor/golang.org/x/net/ipv4/payload_cmsg.go b/vendor/golang.org/x/net/ipv4/payload_cmsg.go new file mode 100644 index 0000000000..3f06d76063 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload_cmsg.go @@ -0,0 +1,36 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !nacl,!plan9,!windows + +package ipv4 + +import ( + "net" + "syscall" +) + +// ReadFrom reads a payload of the received IPv4 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, syscall.EINVAL + } + return c.readFrom(b) +} + +// WriteTo writes a payload of the IPv4 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the datagram path and the outgoing interface to be specified. +// Currently only Darwin and Linux support this. The cm may be nil if +// control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, syscall.EINVAL + } + return c.writeTo(b, cm, dst) +} diff --git a/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go b/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go new file mode 100644 index 0000000000..d26ccd90c4 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go @@ -0,0 +1,59 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 +// +build !nacl,!plan9,!windows + +package ipv4 + +import "net" + +func (c *payloadHandler) readFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + c.rawOpt.RLock() + oob := NewControlMessage(c.rawOpt.cflags) + c.rawOpt.RUnlock() + var nn int + switch c := c.PacketConn.(type) { + case *net.UDPConn: + if n, nn, _, src, err = c.ReadMsgUDP(b, oob); err != nil { + return 0, nil, nil, err + } + case *net.IPConn: + nb := make([]byte, maxHeaderLen+len(b)) + if n, nn, _, src, err = c.ReadMsgIP(nb, oob); err != nil { + return 0, nil, nil, err + } + hdrlen := int(nb[0]&0x0f) << 2 + copy(b, nb[hdrlen:]) + n -= hdrlen + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Err: errInvalidConnType} + } + if nn > 0 { + cm = new(ControlMessage) + if err = cm.Parse(oob[:nn]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + } + if cm != nil { + cm.Src = netAddrToIP4(src) + } + return +} + +func (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + oob := cm.Marshal() + if dst == nil { + return 0, &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errMissingAddress} + } + switch c := c.PacketConn.(type) { + case *net.UDPConn: + n, _, err = c.WriteMsgUDP(b, oob, dst.(*net.UDPAddr)) + case *net.IPConn: + n, _, err = c.WriteMsgIP(b, oob, dst.(*net.IPAddr)) + default: + return 0, &net.OpError{Op: "write", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Addr: opAddr(dst), Err: errInvalidConnType} + } + return +} diff --git a/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go b/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go new file mode 100644 index 0000000000..2f19311839 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go @@ -0,0 +1,67 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build !nacl,!plan9,!windows + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (c *payloadHandler) readFrom(b []byte) (int, *ControlMessage, net.Addr, error) { + c.rawOpt.RLock() + m := socket.Message{ + OOB: NewControlMessage(c.rawOpt.cflags), + } + c.rawOpt.RUnlock() + switch c.PacketConn.(type) { + case *net.UDPConn: + m.Buffers = [][]byte{b} + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + case *net.IPConn: + h := make([]byte, HeaderLen) + m.Buffers = [][]byte{h, b} + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + hdrlen := int(h[0]&0x0f) << 2 + if hdrlen > len(h) { + d := hdrlen - len(h) + copy(b, b[d:]) + m.N -= d + } else { + m.N -= hdrlen + } + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errInvalidConnType} + } + var cm *ControlMessage + if m.NN > 0 { + cm = new(ControlMessage) + if err := cm.Parse(m.OOB[:m.NN]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + cm.Src = netAddrToIP4(m.Addr) + } + return m.N, cm, m.Addr, nil +} + +func (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (int, error) { + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: cm.Marshal(), + Addr: dst, + } + err := c.SendMsg(&m, 0) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Addr: opAddr(dst), Err: err} + } + return m.N, err +} diff --git a/vendor/golang.org/x/net/ipv4/payload_nocmsg.go b/vendor/golang.org/x/net/ipv4/payload_nocmsg.go new file mode 100644 index 0000000000..3926de70b8 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/payload_nocmsg.go @@ -0,0 +1,42 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 windows + +package ipv4 + +import ( + "net" + "syscall" +) + +// ReadFrom reads a payload of the received IPv4 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, syscall.EINVAL + } + if n, src, err = c.PacketConn.ReadFrom(b); err != nil { + return 0, nil, nil, err + } + return +} + +// WriteTo writes a payload of the IPv4 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the datagram path and the outgoing interface to be specified. +// Currently only Darwin and Linux support this. The cm may be nil if +// control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, syscall.EINVAL + } + if dst == nil { + return 0, errMissingAddress + } + return c.PacketConn.WriteTo(b, dst) +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt.go b/vendor/golang.org/x/net/ipv4/sockopt.go new file mode 100644 index 0000000000..22e90c0392 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt.go @@ -0,0 +1,44 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import "golang.org/x/net/internal/socket" + +// Sticky socket options +const ( + ssoTOS = iota // header field for unicast packet + ssoTTL // header field for unicast packet + ssoMulticastTTL // header field for multicast packet + ssoMulticastInterface // outbound interface for multicast packet + ssoMulticastLoopback // loopback for multicast packet + ssoReceiveTTL // header field on received packet + ssoReceiveDst // header field on received packet + ssoReceiveInterface // inbound interface on received packet + ssoPacketInfo // incbound or outbound packet path + ssoHeaderPrepend // ipv4 header prepend + ssoStripHeader // strip ipv4 header + ssoICMPFilter // icmp filter + ssoJoinGroup // any-source multicast + ssoLeaveGroup // any-source multicast + ssoJoinSourceGroup // source-specific multicast + ssoLeaveSourceGroup // source-specific multicast + ssoBlockSourceGroup // any-source or source-specific multicast + ssoUnblockSourceGroup // any-source or source-specific multicast + ssoAttachFilter // attach BPF for filtering inbound traffic +) + +// Sticky socket option value types +const ( + ssoTypeIPMreq = iota + 1 + ssoTypeIPMreqn + ssoTypeGroupReq + ssoTypeGroupSourceReq +) + +// A sockOpt represents a binding for sticky socket option. +type sockOpt struct { + socket.Option + typ int // hint for option value type; optional +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_posix.go b/vendor/golang.org/x/net/ipv4/sockopt_posix.go new file mode 100644 index 0000000000..e96955bc18 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_posix.go @@ -0,0 +1,71 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + switch so.typ { + case ssoTypeIPMreqn: + return so.getIPMreqn(c) + default: + return so.getMulticastIf(c) + } +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + switch so.typ { + case ssoTypeIPMreqn: + return so.setIPMreqn(c, ifi, nil) + default: + return so.setMulticastIf(c, ifi) + } +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + b := make([]byte, so.Len) + n, err := so.Get(c, b) + if err != nil { + return nil, err + } + if n != sizeofICMPFilter { + return nil, errOpNoSupport + } + return (*ICMPFilter)(unsafe.Pointer(&b[0])), nil +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + b := (*[sizeofICMPFilter]byte)(unsafe.Pointer(f))[:sizeofICMPFilter] + return so.Set(c, b) +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + switch so.typ { + case ssoTypeIPMreq: + return so.setIPMreq(c, ifi, grp) + case ssoTypeIPMreqn: + return so.setIPMreqn(c, ifi, grp) + case ssoTypeGroupReq: + return so.setGroupReq(c, ifi, grp) + default: + return errOpNoSupport + } +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return so.setGroupSourceReq(c, ifi, grp, src) +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return so.setAttachFilter(c, f) +} diff --git a/vendor/golang.org/x/net/ipv4/sockopt_stub.go b/vendor/golang.org/x/net/ipv4/sockopt_stub.go new file mode 100644 index 0000000000..23249b782e --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sockopt_stub.go @@ -0,0 +1,42 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv4 + +import ( + "net" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + return errOpNoSupport +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + return errOpNoSupport +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreq.go b/vendor/golang.org/x/net/ipv4/sys_asmreq.go new file mode 100644 index 0000000000..0388cba00c --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_asmreq.go @@ -0,0 +1,119 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd solaris windows + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + mreq := ipMreq{Multiaddr: [4]byte{grp[0], grp[1], grp[2], grp[3]}} + if err := setIPMreqInterface(&mreq, ifi); err != nil { + return err + } + b := (*[sizeofIPMreq]byte)(unsafe.Pointer(&mreq))[:sizeofIPMreq] + return so.Set(c, b) +} + +func (so *sockOpt) getMulticastIf(c *socket.Conn) (*net.Interface, error) { + var b [4]byte + if _, err := so.Get(c, b[:]); err != nil { + return nil, err + } + ifi, err := netIP4ToInterface(net.IPv4(b[0], b[1], b[2], b[3])) + if err != nil { + return nil, err + } + return ifi, nil +} + +func (so *sockOpt) setMulticastIf(c *socket.Conn, ifi *net.Interface) error { + ip, err := netInterfaceToIP4(ifi) + if err != nil { + return err + } + var b [4]byte + copy(b[:], ip) + return so.Set(c, b[:]) +} + +func setIPMreqInterface(mreq *ipMreq, ifi *net.Interface) error { + if ifi == nil { + return nil + } + ifat, err := ifi.Addrs() + if err != nil { + return err + } + for _, ifa := range ifat { + switch ifa := ifa.(type) { + case *net.IPAddr: + if ip := ifa.IP.To4(); ip != nil { + copy(mreq.Interface[:], ip) + return nil + } + case *net.IPNet: + if ip := ifa.IP.To4(); ip != nil { + copy(mreq.Interface[:], ip) + return nil + } + } + } + return errNoSuchInterface +} + +func netIP4ToInterface(ip net.IP) (*net.Interface, error) { + ift, err := net.Interfaces() + if err != nil { + return nil, err + } + for _, ifi := range ift { + ifat, err := ifi.Addrs() + if err != nil { + return nil, err + } + for _, ifa := range ifat { + switch ifa := ifa.(type) { + case *net.IPAddr: + if ip.Equal(ifa.IP) { + return &ifi, nil + } + case *net.IPNet: + if ip.Equal(ifa.IP) { + return &ifi, nil + } + } + } + } + return nil, errNoSuchInterface +} + +func netInterfaceToIP4(ifi *net.Interface) (net.IP, error) { + if ifi == nil { + return net.IPv4zero.To4(), nil + } + ifat, err := ifi.Addrs() + if err != nil { + return nil, err + } + for _, ifa := range ifat { + switch ifa := ifa.(type) { + case *net.IPAddr: + if ip := ifa.IP.To4(); ip != nil { + return ip, nil + } + case *net.IPNet: + if ip := ifa.IP.To4(); ip != nil { + return ip, nil + } + } + } + return nil, errNoSuchInterface +} diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go b/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go new file mode 100644 index 0000000000..f3919208b6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go @@ -0,0 +1,25 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!netbsd,!openbsd,!solaris,!windows + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) getMulticastIf(c *socket.Conn) (*net.Interface, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setMulticastIf(c *socket.Conn, ifi *net.Interface) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreqn.go b/vendor/golang.org/x/net/ipv4/sys_asmreqn.go new file mode 100644 index 0000000000..1f24f69f3b --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_asmreqn.go @@ -0,0 +1,42 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd linux + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getIPMreqn(c *socket.Conn) (*net.Interface, error) { + b := make([]byte, so.Len) + if _, err := so.Get(c, b); err != nil { + return nil, err + } + mreqn := (*ipMreqn)(unsafe.Pointer(&b[0])) + if mreqn.Ifindex == 0 { + return nil, nil + } + ifi, err := net.InterfaceByIndex(int(mreqn.Ifindex)) + if err != nil { + return nil, err + } + return ifi, nil +} + +func (so *sockOpt) setIPMreqn(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var mreqn ipMreqn + if ifi != nil { + mreqn.Ifindex = int32(ifi.Index) + } + if grp != nil { + mreqn.Multiaddr = [4]byte{grp[0], grp[1], grp[2], grp[3]} + } + b := (*[sizeofIPMreqn]byte)(unsafe.Pointer(&mreqn))[:sizeofIPMreqn] + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go b/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go new file mode 100644 index 0000000000..0711d3d786 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go @@ -0,0 +1,21 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!freebsd,!linux + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getIPMreqn(c *socket.Conn) (*net.Interface, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setIPMreqn(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/sys_bpf.go b/vendor/golang.org/x/net/ipv4/sys_bpf.go new file mode 100644 index 0000000000..9f30b7308e --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_bpf.go @@ -0,0 +1,23 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package ipv4 + +import ( + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + prog := sockFProg{ + Len: uint16(len(f)), + Filter: (*sockFilter)(unsafe.Pointer(&f[0])), + } + b := (*[sizeofSockFprog]byte)(unsafe.Pointer(&prog))[:sizeofSockFprog] + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go b/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go new file mode 100644 index 0000000000..9a2132093d --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_bpf_stub.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux + +package ipv4 + +import ( + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/sys_bsd.go b/vendor/golang.org/x/net/ipv4/sys_bsd.go new file mode 100644 index 0000000000..58256dd9d6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_bsd.go @@ -0,0 +1,37 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build netbsd openbsd + +package ipv4 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 1}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + } +) diff --git a/vendor/golang.org/x/net/ipv4/sys_darwin.go b/vendor/golang.org/x/net/ipv4/sys_darwin.go new file mode 100644 index 0000000000..e8fb191692 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_darwin.go @@ -0,0 +1,93 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "strconv" + "strings" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoStripHeader: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_STRIPHDR, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + } +) + +func init() { + // Seems like kern.osreldate is veiled on latest OS X. We use + // kern.osrelease instead. + s, err := syscall.Sysctl("kern.osrelease") + if err != nil { + return + } + ss := strings.Split(s, ".") + if len(ss) == 0 { + return + } + // The IP_PKTINFO and protocol-independent multicast API were + // introduced in OS X 10.7 (Darwin 11). But it looks like + // those features require OS X 10.8 (Darwin 12) or above. + // See http://support.apple.com/kb/HT1633. + if mjver, err := strconv.Atoi(ss[0]); err != nil || mjver < 12 { + return + } + ctlOpts[ctlPacketInfo].name = sysIP_PKTINFO + ctlOpts[ctlPacketInfo].length = sizeofInetPktinfo + ctlOpts[ctlPacketInfo].marshal = marshalPacketInfo + ctlOpts[ctlPacketInfo].parse = parsePacketInfo + sockOpts[ssoPacketInfo] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVPKTINFO, Len: 4}} + sockOpts[ssoMulticastInterface] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: sizeofIPMreqn}, typ: ssoTypeIPMreqn} + sockOpts[ssoJoinGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq} + sockOpts[ssoLeaveGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq} + sockOpts[ssoJoinSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoLeaveSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoBlockSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoUnblockSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} +} + +func (pi *inetPktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 132)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_dragonfly.go b/vendor/golang.org/x/net/ipv4/sys_dragonfly.go new file mode 100644 index 0000000000..859764f33a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_dragonfly.go @@ -0,0 +1,35 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + } +) diff --git a/vendor/golang.org/x/net/ipv4/sys_freebsd.go b/vendor/golang.org/x/net/ipv4/sys_freebsd.go new file mode 100644 index 0000000000..b80032454a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_freebsd.go @@ -0,0 +1,76 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "runtime" + "strings" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 1, marshalTTL, parseTTL}, + ctlDst: {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst}, + ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoReceiveDst: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVDSTADDR, Len: 4}}, + ssoReceiveInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVIF, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func init() { + freebsdVersion, _ = syscall.SysctlUint32("kern.osreldate") + if freebsdVersion >= 1000000 { + sockOpts[ssoMulticastInterface] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: sizeofIPMreqn}, typ: ssoTypeIPMreqn} + } + if runtime.GOOS == "freebsd" && runtime.GOARCH == "386" { + archs, _ := syscall.Sysctl("kern.supported_archs") + for _, s := range strings.Fields(archs) { + if s == "amd64" { + freebsd32o64 = true + break + } + } + } +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gr.Group)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gsr.Group)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sockaddrInet)(unsafe.Pointer(&gsr.Source)) + sa.Len = sizeofSockaddrInet + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_linux.go b/vendor/golang.org/x/net/ipv4/sys_linux.go new file mode 100644 index 0000000000..60defe1326 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_linux.go @@ -0,0 +1,59 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_TTL, 1, marshalTTL, parseTTL}, + ctlPacketInfo: {sysIP_PKTINFO, sizeofInetPktinfo, marshalPacketInfo, parsePacketInfo}, + } + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: sizeofIPMreqn}, typ: ssoTypeIPMreqn}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoPacketInfo: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_PKTINFO, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolReserved, Name: sysICMP_FILTER, Len: sizeofICMPFilter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoAttachFilter: {Option: socket.Option{Level: sysSOL_SOCKET, Name: sysSO_ATTACH_FILTER, Len: sizeofSockFprog}}, + } +) + +func (pi *inetPktinfo) setIfindex(i int) { + pi.Ifindex = int32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gr.Group)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(&gsr.Group)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sockaddrInet)(unsafe.Pointer(&gsr.Source)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_solaris.go b/vendor/golang.org/x/net/ipv4/sys_solaris.go new file mode 100644 index 0000000000..832fef1e2e --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_solaris.go @@ -0,0 +1,57 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTTL: {sysIP_RECVTTL, 4, marshalTTL, parseTTL}, + ctlPacketInfo: {sysIP_PKTINFO, sizeofInetPktinfo, marshalPacketInfo, parsePacketInfo}, + } + + sockOpts = map[int]sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 1}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 1}}, + ssoReceiveTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVTTL, Len: 4}}, + ssoPacketInfo: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_RECVPKTINFO, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func (pi *inetPktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], grp) + sa = (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 260)) + sa.Family = syscall.AF_INET + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_ssmreq.go b/vendor/golang.org/x/net/ipv4/sys_ssmreq.go new file mode 100644 index 0000000000..ae5704e77a --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_ssmreq.go @@ -0,0 +1,54 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd linux solaris + +package ipv4 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +var freebsd32o64 bool + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var gr groupReq + if ifi != nil { + gr.Interface = uint32(ifi.Index) + } + gr.setGroup(grp) + var b []byte + if freebsd32o64 { + var d [sizeofGroupReq + 4]byte + s := (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr))[:sizeofGroupReq] + } + return so.Set(c, b) +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + var gsr groupSourceReq + if ifi != nil { + gsr.Interface = uint32(ifi.Index) + } + gsr.setSourceGroup(grp, src) + var b []byte + if freebsd32o64 { + var d [sizeofGroupSourceReq + 4]byte + s := (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr))[:sizeofGroupSourceReq] + } + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go b/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go new file mode 100644 index 0000000000..e6b7623d0d --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go @@ -0,0 +1,21 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!freebsd,!linux,!solaris + +package ipv4 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv4/sys_stub.go b/vendor/golang.org/x/net/ipv4/sys_stub.go new file mode 100644 index 0000000000..4f076473bd --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_stub.go @@ -0,0 +1,13 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv4 + +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = map[int]*sockOpt{} +) diff --git a/vendor/golang.org/x/net/ipv4/sys_windows.go b/vendor/golang.org/x/net/ipv4/sys_windows.go new file mode 100644 index 0000000000..b0913d539c --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/sys_windows.go @@ -0,0 +1,67 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv4 + +import ( + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +const ( + // See ws2tcpip.h. + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_DONTFRAGMENT = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0xf + sysIP_DROP_SOURCE_MEMBERSHIP = 0x10 + sysIP_PKTINFO = 0x13 + + sizeofInetPktinfo = 0x8 + sizeofIPMreq = 0x8 + sizeofIPMreqSource = 0xc +) + +type inetPktinfo struct { + Addr [4]byte + Ifindex int32 +} + +type ipMreq struct { + Multiaddr [4]byte + Interface [4]byte +} + +type ipMreqSource struct { + Multiaddr [4]byte + Sourceaddr [4]byte + Interface [4]byte +} + +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms738586(v=vs.85).aspx +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = map[int]*sockOpt{ + ssoTOS: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TOS, Len: 4}}, + ssoTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_TTL, Len: 4}}, + ssoMulticastTTL: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_TTL, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_IF, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_MULTICAST_LOOP, Len: 4}}, + ssoHeaderPrepend: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_HDRINCL, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_ADD_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIP, Name: sysIP_DROP_MEMBERSHIP, Len: sizeofIPMreq}, typ: ssoTypeIPMreq}, + } +) + +func (pi *inetPktinfo) setIfindex(i int) { + pi.Ifindex = int32(i) +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_darwin.go b/vendor/golang.org/x/net/ipv4/zsys_darwin.go new file mode 100644 index 0000000000..c07cc883fc --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_darwin.go @@ -0,0 +1,99 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_STRIPHDR = 0x17 + sysIP_RECVTTL = 0x18 + sysIP_BOUND_IF = 0x19 + sysIP_PKTINFO = 0x1a + sysIP_RECVPKTINFO = 0x1a + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_MULTICAST_IFINDEX = 0x42 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type inetPktinfo struct { + Ifindex uint32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [128]byte +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [128]byte + Pad_cgo_1 [128]byte +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go b/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go new file mode 100644 index 0000000000..c4365e9e71 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go @@ -0,0 +1,31 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_dragonfly.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_RECVTTL = 0x41 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + + sizeofIPMreq = 0x8 +) + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go new file mode 100644 index 0000000000..8c4aec94c8 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go @@ -0,0 +1,93 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_SENDSRCADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_ONESBCAST = 0x17 + sysIP_BINDANY = 0x18 + sysIP_RECVTTL = 0x41 + sysIP_MINTTL = 0x42 + sysIP_DONTFRAG = 0x43 + sysIP_RECVTOS = 0x44 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group sockaddrStorage + Source sockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go new file mode 100644 index 0000000000..4b10b7c575 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go @@ -0,0 +1,95 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_SENDSRCADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_ONESBCAST = 0x17 + sysIP_BINDANY = 0x18 + sysIP_RECVTTL = 0x41 + sysIP_MINTTL = 0x42 + sysIP_DONTFRAG = 0x43 + sysIP_RECVTOS = 0x44 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage + Source sockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go b/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go new file mode 100644 index 0000000000..4b10b7c575 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go @@ -0,0 +1,95 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_SENDSRCADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_ONESBCAST = 0x17 + sysIP_BINDANY = 0x18 + sysIP_RECVTTL = 0x41 + sysIP_MINTTL = 0x42 + sysIP_DONTFRAG = 0x43 + sysIP_RECVTOS = 0x44 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + sysIP_MULTICAST_VIF = 0xe + sysIP_ADD_SOURCE_MEMBERSHIP = 0x46 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x47 + sysIP_BLOCK_SOURCE = 0x48 + sysIP_UNBLOCK_SOURCE = 0x49 + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage + Source sockaddrStorage +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_386.go b/vendor/golang.org/x/net/ipv4/zsys_linux_386.go new file mode 100644 index 0000000000..c0260f0ce3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_386.go @@ -0,0 +1,148 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go new file mode 100644 index 0000000000..9c967eaa64 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go b/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go new file mode 100644 index 0000000000..c0260f0ce3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go @@ -0,0 +1,148 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go new file mode 100644 index 0000000000..9c967eaa64 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go new file mode 100644 index 0000000000..c0260f0ce3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go @@ -0,0 +1,148 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go new file mode 100644 index 0000000000..9c967eaa64 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go new file mode 100644 index 0000000000..9c967eaa64 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go b/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go new file mode 100644 index 0000000000..c0260f0ce3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go @@ -0,0 +1,148 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go new file mode 100644 index 0000000000..f65bd9a7a6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go @@ -0,0 +1,148 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]uint8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go new file mode 100644 index 0000000000..9c967eaa64 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go new file mode 100644 index 0000000000..9c967eaa64 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go b/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go new file mode 100644 index 0000000000..9c967eaa64 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go @@ -0,0 +1,150 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv4 + +const ( + sysIP_TOS = 0x1 + sysIP_TTL = 0x2 + sysIP_HDRINCL = 0x3 + sysIP_OPTIONS = 0x4 + sysIP_ROUTER_ALERT = 0x5 + sysIP_RECVOPTS = 0x6 + sysIP_RETOPTS = 0x7 + sysIP_PKTINFO = 0x8 + sysIP_PKTOPTIONS = 0x9 + sysIP_MTU_DISCOVER = 0xa + sysIP_RECVERR = 0xb + sysIP_RECVTTL = 0xc + sysIP_RECVTOS = 0xd + sysIP_MTU = 0xe + sysIP_FREEBIND = 0xf + sysIP_TRANSPARENT = 0x13 + sysIP_RECVRETOPTS = 0x7 + sysIP_ORIGDSTADDR = 0x14 + sysIP_RECVORIGDSTADDR = 0x14 + sysIP_MINTTL = 0x15 + sysIP_NODEFRAG = 0x16 + sysIP_UNICAST_IF = 0x32 + + sysIP_MULTICAST_IF = 0x20 + sysIP_MULTICAST_TTL = 0x21 + sysIP_MULTICAST_LOOP = 0x22 + sysIP_ADD_MEMBERSHIP = 0x23 + sysIP_DROP_MEMBERSHIP = 0x24 + sysIP_UNBLOCK_SOURCE = 0x25 + sysIP_BLOCK_SOURCE = 0x26 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x27 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x28 + sysIP_MSFILTER = 0x29 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIP_MULTICAST_ALL = 0x31 + + sysICMP_FILTER = 0x1 + + sysSO_EE_ORIGIN_NONE = 0x0 + sysSO_EE_ORIGIN_LOCAL = 0x1 + sysSO_EE_ORIGIN_ICMP = 0x2 + sysSO_EE_ORIGIN_ICMP6 = 0x3 + sysSO_EE_ORIGIN_TXSTATUS = 0x4 + sysSO_EE_ORIGIN_TIMESTAMPING = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + sizeofSockExtendedErr = 0x10 + + sizeofIPMreq = 0x8 + sizeofIPMreqn = 0xc + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPFilter = 0x4 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + X__pad [8]uint8 +} + +type inetPktinfo struct { + Ifindex int32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type sockExtendedErr struct { + Errno uint32 + Origin uint8 + Type uint8 + Code uint8 + Pad uint8 + Info uint32 + Data uint32 +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqn struct { + Multiaddr [4]byte /* in_addr */ + Address [4]byte /* in_addr */ + Ifindex int32 +} + +type ipMreqSource struct { + Multiaddr uint32 + Interface uint32 + Sourceaddr uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpFilter struct { + Data uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_netbsd.go b/vendor/golang.org/x/net/ipv4/zsys_netbsd.go new file mode 100644 index 0000000000..fd3624d93c --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_netbsd.go @@ -0,0 +1,30 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x14 + sysIP_RECVTTL = 0x17 + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + + sizeofIPMreq = 0x8 +) + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_openbsd.go b/vendor/golang.org/x/net/ipv4/zsys_openbsd.go new file mode 100644 index 0000000000..12f36be759 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_openbsd.go @@ -0,0 +1,30 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x1e + sysIP_RECVTTL = 0x1f + + sysIP_MULTICAST_IF = 0x9 + sysIP_MULTICAST_TTL = 0xa + sysIP_MULTICAST_LOOP = 0xb + sysIP_ADD_MEMBERSHIP = 0xc + sysIP_DROP_MEMBERSHIP = 0xd + + sizeofIPMreq = 0x8 +) + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} diff --git a/vendor/golang.org/x/net/ipv4/zsys_solaris.go b/vendor/golang.org/x/net/ipv4/zsys_solaris.go new file mode 100644 index 0000000000..0a3875cc41 --- /dev/null +++ b/vendor/golang.org/x/net/ipv4/zsys_solaris.go @@ -0,0 +1,100 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_solaris.go + +package ipv4 + +const ( + sysIP_OPTIONS = 0x1 + sysIP_HDRINCL = 0x2 + sysIP_TOS = 0x3 + sysIP_TTL = 0x4 + sysIP_RECVOPTS = 0x5 + sysIP_RECVRETOPTS = 0x6 + sysIP_RECVDSTADDR = 0x7 + sysIP_RETOPTS = 0x8 + sysIP_RECVIF = 0x9 + sysIP_RECVSLLA = 0xa + sysIP_RECVTTL = 0xb + + sysIP_MULTICAST_IF = 0x10 + sysIP_MULTICAST_TTL = 0x11 + sysIP_MULTICAST_LOOP = 0x12 + sysIP_ADD_MEMBERSHIP = 0x13 + sysIP_DROP_MEMBERSHIP = 0x14 + sysIP_BLOCK_SOURCE = 0x15 + sysIP_UNBLOCK_SOURCE = 0x16 + sysIP_ADD_SOURCE_MEMBERSHIP = 0x17 + sysIP_DROP_SOURCE_MEMBERSHIP = 0x18 + sysIP_NEXTHOP = 0x19 + + sysIP_PKTINFO = 0x1a + sysIP_RECVPKTINFO = 0x1a + sysIP_DONTFRAG = 0x1b + + sysIP_BOUND_IF = 0x41 + sysIP_UNSPEC_SRC = 0x42 + sysIP_BROADCAST_TTL = 0x43 + sysIP_DHCPINIT_IF = 0x45 + + sysIP_REUSEADDR = 0x104 + sysIP_DONTROUTE = 0x105 + sysIP_BROADCAST = 0x106 + + sysMCAST_JOIN_GROUP = 0x29 + sysMCAST_LEAVE_GROUP = 0x2a + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_JOIN_SOURCE_GROUP = 0x2d + sysMCAST_LEAVE_SOURCE_GROUP = 0x2e + + sizeofSockaddrStorage = 0x100 + sizeofSockaddrInet = 0x10 + sizeofInetPktinfo = 0xc + + sizeofIPMreq = 0x8 + sizeofIPMreqSource = 0xc + sizeofGroupReq = 0x104 + sizeofGroupSourceReq = 0x204 +) + +type sockaddrStorage struct { + Family uint16 + X_ss_pad1 [6]int8 + X_ss_align float64 + X_ss_pad2 [240]int8 +} + +type sockaddrInet struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type inetPktinfo struct { + Ifindex uint32 + Spec_dst [4]byte /* in_addr */ + Addr [4]byte /* in_addr */ +} + +type ipMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type ipMreqSource struct { + Multiaddr [4]byte /* in_addr */ + Sourceaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [256]byte +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [256]byte + Pad_cgo_1 [256]byte +} diff --git a/vendor/golang.org/x/net/ipv6/batch.go b/vendor/golang.org/x/net/ipv6/batch.go new file mode 100644 index 0000000000..4f5fe683d5 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/batch.go @@ -0,0 +1,119 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package ipv6 + +import ( + "net" + "runtime" + "syscall" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ReadBatch and WriteBatch methods of +// PacketConn are not implemented. + +// A Message represents an IO message. +// +// type Message struct { +// Buffers [][]byte +// OOB []byte +// Addr net.Addr +// N int +// NN int +// Flags int +// } +// +// The Buffers fields represents a list of contiguous buffers, which +// can be used for vectored IO, for example, putting a header and a +// payload in each slice. +// When writing, the Buffers field must contain at least one byte to +// write. +// When reading, the Buffers field will always contain a byte to read. +// +// The OOB field contains protocol-specific control or miscellaneous +// ancillary data known as out-of-band data. +// It can be nil when not required. +// +// The Addr field specifies a destination address when writing. +// It can be nil when the underlying protocol of the endpoint uses +// connection-oriented communication. +// After a successful read, it may contain the source address on the +// received packet. +// +// The N field indicates the number of bytes read or written from/to +// Buffers. +// +// The NN field indicates the number of bytes read or written from/to +// OOB. +// +// The Flags field contains protocol-specific information on the +// received message. +type Message = socket.Message + +// ReadBatch reads a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_PEEK. +// +// On a successful read it returns the number of messages received, up +// to len(ms). +// +// On Linux, a batch read will be optimized. +// On other platforms, this method will read only a single message. +func (c *payloadHandler) ReadBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.RecvMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.RecvMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + } +} + +// WriteBatch writes a batch of messages. +// +// The provided flags is a set of platform-dependent flags, such as +// syscall.MSG_DONTROUTE. +// +// It returns the number of messages written on a successful write. +// +// On Linux, a batch write will be optimized. +// On other platforms, this method will write only a single message. +func (c *payloadHandler) WriteBatch(ms []Message, flags int) (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + switch runtime.GOOS { + case "linux": + n, err := c.SendMsgs([]socket.Message(ms), flags) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + default: + n := 1 + err := c.SendMsg(&ms[0], flags) + if err != nil { + n = 0 + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + return n, err + } +} diff --git a/vendor/golang.org/x/net/ipv6/control.go b/vendor/golang.org/x/net/ipv6/control.go new file mode 100644 index 0000000000..2da644413b --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control.go @@ -0,0 +1,187 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "fmt" + "net" + "sync" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +// Note that RFC 3542 obsoletes RFC 2292 but OS X Snow Leopard and the +// former still support RFC 2292 only. Please be aware that almost +// all protocol implementations prohibit using a combination of RFC +// 2292 and RFC 3542 for some practical reasons. + +type rawOpt struct { + sync.RWMutex + cflags ControlFlags +} + +func (c *rawOpt) set(f ControlFlags) { c.cflags |= f } +func (c *rawOpt) clear(f ControlFlags) { c.cflags &^= f } +func (c *rawOpt) isset(f ControlFlags) bool { return c.cflags&f != 0 } + +// A ControlFlags represents per packet basis IP-level socket option +// control flags. +type ControlFlags uint + +const ( + FlagTrafficClass ControlFlags = 1 << iota // pass the traffic class on the received packet + FlagHopLimit // pass the hop limit on the received packet + FlagSrc // pass the source address on the received packet + FlagDst // pass the destination address on the received packet + FlagInterface // pass the interface index on the received packet + FlagPathMTU // pass the path MTU on the received packet path +) + +const flagPacketInfo = FlagDst | FlagInterface + +// A ControlMessage represents per packet basis IP-level socket +// options. +type ControlMessage struct { + // Receiving socket options: SetControlMessage allows to + // receive the options from the protocol stack using ReadFrom + // method of PacketConn. + // + // Specifying socket options: ControlMessage for WriteTo + // method of PacketConn allows to send the options to the + // protocol stack. + // + TrafficClass int // traffic class, must be 1 <= value <= 255 when specifying + HopLimit int // hop limit, must be 1 <= value <= 255 when specifying + Src net.IP // source address, specifying only + Dst net.IP // destination address, receiving only + IfIndex int // interface index, must be 1 <= value when specifying + NextHop net.IP // next hop address, specifying only + MTU int // path MTU, receiving only +} + +func (cm *ControlMessage) String() string { + if cm == nil { + return "" + } + return fmt.Sprintf("tclass=%#x hoplim=%d src=%v dst=%v ifindex=%d nexthop=%v mtu=%d", cm.TrafficClass, cm.HopLimit, cm.Src, cm.Dst, cm.IfIndex, cm.NextHop, cm.MTU) +} + +// Marshal returns the binary encoding of cm. +func (cm *ControlMessage) Marshal() []byte { + if cm == nil { + return nil + } + var l int + tclass := false + if ctlOpts[ctlTrafficClass].name > 0 && cm.TrafficClass > 0 { + tclass = true + l += socket.ControlMessageSpace(ctlOpts[ctlTrafficClass].length) + } + hoplimit := false + if ctlOpts[ctlHopLimit].name > 0 && cm.HopLimit > 0 { + hoplimit = true + l += socket.ControlMessageSpace(ctlOpts[ctlHopLimit].length) + } + pktinfo := false + if ctlOpts[ctlPacketInfo].name > 0 && (cm.Src.To16() != nil && cm.Src.To4() == nil || cm.IfIndex > 0) { + pktinfo = true + l += socket.ControlMessageSpace(ctlOpts[ctlPacketInfo].length) + } + nexthop := false + if ctlOpts[ctlNextHop].name > 0 && cm.NextHop.To16() != nil && cm.NextHop.To4() == nil { + nexthop = true + l += socket.ControlMessageSpace(ctlOpts[ctlNextHop].length) + } + var b []byte + if l > 0 { + b = make([]byte, l) + bb := b + if tclass { + bb = ctlOpts[ctlTrafficClass].marshal(bb, cm) + } + if hoplimit { + bb = ctlOpts[ctlHopLimit].marshal(bb, cm) + } + if pktinfo { + bb = ctlOpts[ctlPacketInfo].marshal(bb, cm) + } + if nexthop { + bb = ctlOpts[ctlNextHop].marshal(bb, cm) + } + } + return b +} + +// Parse parses b as a control message and stores the result in cm. +func (cm *ControlMessage) Parse(b []byte) error { + ms, err := socket.ControlMessage(b).Parse() + if err != nil { + return err + } + for _, m := range ms { + lvl, typ, l, err := m.ParseHeader() + if err != nil { + return err + } + if lvl != iana.ProtocolIPv6 { + continue + } + switch { + case typ == ctlOpts[ctlTrafficClass].name && l >= ctlOpts[ctlTrafficClass].length: + ctlOpts[ctlTrafficClass].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlHopLimit].name && l >= ctlOpts[ctlHopLimit].length: + ctlOpts[ctlHopLimit].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlPacketInfo].name && l >= ctlOpts[ctlPacketInfo].length: + ctlOpts[ctlPacketInfo].parse(cm, m.Data(l)) + case typ == ctlOpts[ctlPathMTU].name && l >= ctlOpts[ctlPathMTU].length: + ctlOpts[ctlPathMTU].parse(cm, m.Data(l)) + } + } + return nil +} + +// NewControlMessage returns a new control message. +// +// The returned message is large enough for options specified by cf. +func NewControlMessage(cf ControlFlags) []byte { + opt := rawOpt{cflags: cf} + var l int + if opt.isset(FlagTrafficClass) && ctlOpts[ctlTrafficClass].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlTrafficClass].length) + } + if opt.isset(FlagHopLimit) && ctlOpts[ctlHopLimit].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlHopLimit].length) + } + if opt.isset(flagPacketInfo) && ctlOpts[ctlPacketInfo].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlPacketInfo].length) + } + if opt.isset(FlagPathMTU) && ctlOpts[ctlPathMTU].name > 0 { + l += socket.ControlMessageSpace(ctlOpts[ctlPathMTU].length) + } + var b []byte + if l > 0 { + b = make([]byte, l) + } + return b +} + +// Ancillary data socket options +const ( + ctlTrafficClass = iota // header field + ctlHopLimit // header field + ctlPacketInfo // inbound or outbound packet path + ctlNextHop // nexthop + ctlPathMTU // path mtu + ctlMax +) + +// A ctlOpt represents a binding for ancillary data socket option. +type ctlOpt struct { + name int // option name, must be equal or greater than 1 + length int // option length + marshal func([]byte, *ControlMessage) []byte + parse func(*ControlMessage, []byte) +} diff --git a/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go b/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go new file mode 100644 index 0000000000..9fd9eb15e3 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go @@ -0,0 +1,48 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin + +package ipv6 + +import ( + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func marshal2292HopLimit(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_2292HOPLIMIT, 4) + if cm != nil { + socket.NativeEndian.PutUint32(m.Data(4), uint32(cm.HopLimit)) + } + return m.Next(4) +} + +func marshal2292PacketInfo(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_2292PKTINFO, sizeofInet6Pktinfo) + if cm != nil { + pi := (*inet6Pktinfo)(unsafe.Pointer(&m.Data(sizeofInet6Pktinfo)[0])) + if ip := cm.Src.To16(); ip != nil && ip.To4() == nil { + copy(pi.Addr[:], ip) + } + if cm.IfIndex > 0 { + pi.setIfindex(cm.IfIndex) + } + } + return m.Next(sizeofInet6Pktinfo) +} + +func marshal2292NextHop(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_2292NEXTHOP, sizeofSockaddrInet6) + if cm != nil { + sa := (*sockaddrInet6)(unsafe.Pointer(&m.Data(sizeofSockaddrInet6)[0])) + sa.setSockaddr(cm.NextHop, cm.IfIndex) + } + return m.Next(sizeofSockaddrInet6) +} diff --git a/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go b/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go new file mode 100644 index 0000000000..eec529c205 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go @@ -0,0 +1,94 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package ipv6 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +func marshalTrafficClass(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_TCLASS, 4) + if cm != nil { + socket.NativeEndian.PutUint32(m.Data(4), uint32(cm.TrafficClass)) + } + return m.Next(4) +} + +func parseTrafficClass(cm *ControlMessage, b []byte) { + cm.TrafficClass = int(socket.NativeEndian.Uint32(b[:4])) +} + +func marshalHopLimit(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_HOPLIMIT, 4) + if cm != nil { + socket.NativeEndian.PutUint32(m.Data(4), uint32(cm.HopLimit)) + } + return m.Next(4) +} + +func parseHopLimit(cm *ControlMessage, b []byte) { + cm.HopLimit = int(socket.NativeEndian.Uint32(b[:4])) +} + +func marshalPacketInfo(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_PKTINFO, sizeofInet6Pktinfo) + if cm != nil { + pi := (*inet6Pktinfo)(unsafe.Pointer(&m.Data(sizeofInet6Pktinfo)[0])) + if ip := cm.Src.To16(); ip != nil && ip.To4() == nil { + copy(pi.Addr[:], ip) + } + if cm.IfIndex > 0 { + pi.setIfindex(cm.IfIndex) + } + } + return m.Next(sizeofInet6Pktinfo) +} + +func parsePacketInfo(cm *ControlMessage, b []byte) { + pi := (*inet6Pktinfo)(unsafe.Pointer(&b[0])) + if len(cm.Dst) < net.IPv6len { + cm.Dst = make(net.IP, net.IPv6len) + } + copy(cm.Dst, pi.Addr[:]) + cm.IfIndex = int(pi.Ifindex) +} + +func marshalNextHop(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_NEXTHOP, sizeofSockaddrInet6) + if cm != nil { + sa := (*sockaddrInet6)(unsafe.Pointer(&m.Data(sizeofSockaddrInet6)[0])) + sa.setSockaddr(cm.NextHop, cm.IfIndex) + } + return m.Next(sizeofSockaddrInet6) +} + +func parseNextHop(cm *ControlMessage, b []byte) { +} + +func marshalPathMTU(b []byte, cm *ControlMessage) []byte { + m := socket.ControlMessage(b) + m.MarshalHeader(iana.ProtocolIPv6, sysIPV6_PATHMTU, sizeofIPv6Mtuinfo) + return m.Next(sizeofIPv6Mtuinfo) +} + +func parsePathMTU(cm *ControlMessage, b []byte) { + mi := (*ipv6Mtuinfo)(unsafe.Pointer(&b[0])) + if len(cm.Dst) < net.IPv6len { + cm.Dst = make(net.IP, net.IPv6len) + } + copy(cm.Dst, mi.Addr.Addr[:]) + cm.IfIndex = int(mi.Addr.Scope_id) + cm.MTU = int(mi.Mtu) +} diff --git a/vendor/golang.org/x/net/ipv6/control_stub.go b/vendor/golang.org/x/net/ipv6/control_stub.go new file mode 100644 index 0000000000..a045f28f74 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_stub.go @@ -0,0 +1,13 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +import "golang.org/x/net/internal/socket" + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/control_unix.go b/vendor/golang.org/x/net/ipv6/control_unix.go new file mode 100644 index 0000000000..66515060a8 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_unix.go @@ -0,0 +1,55 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package ipv6 + +import "golang.org/x/net/internal/socket" + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + opt.Lock() + defer opt.Unlock() + if so, ok := sockOpts[ssoReceiveTrafficClass]; ok && cf&FlagTrafficClass != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagTrafficClass) + } else { + opt.clear(FlagTrafficClass) + } + } + if so, ok := sockOpts[ssoReceiveHopLimit]; ok && cf&FlagHopLimit != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagHopLimit) + } else { + opt.clear(FlagHopLimit) + } + } + if so, ok := sockOpts[ssoReceivePacketInfo]; ok && cf&flagPacketInfo != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(cf & flagPacketInfo) + } else { + opt.clear(cf & flagPacketInfo) + } + } + if so, ok := sockOpts[ssoReceivePathMTU]; ok && cf&FlagPathMTU != 0 { + if err := so.SetInt(c, boolint(on)); err != nil { + return err + } + if on { + opt.set(FlagPathMTU) + } else { + opt.clear(FlagPathMTU) + } + } + return nil +} diff --git a/vendor/golang.org/x/net/ipv6/control_windows.go b/vendor/golang.org/x/net/ipv6/control_windows.go new file mode 100644 index 0000000000..ef2563b3fc --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/control_windows.go @@ -0,0 +1,16 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "syscall" + + "golang.org/x/net/internal/socket" +) + +func setControlMessage(c *socket.Conn, opt *rawOpt, cf ControlFlags, on bool) error { + // TODO(mikio): implement this + return syscall.EWINDOWS +} diff --git a/vendor/golang.org/x/net/ipv6/defs_darwin.go b/vendor/golang.org/x/net/ipv6/defs_darwin.go new file mode 100644 index 0000000000..55ddc116fc --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_darwin.go @@ -0,0 +1,112 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#define __APPLE_USE_RFC_3542 +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + sysIPV6_2292PKTINFO = C.IPV6_2292PKTINFO + sysIPV6_2292HOPLIMIT = C.IPV6_2292HOPLIMIT + sysIPV6_2292NEXTHOP = C.IPV6_2292NEXTHOP + sysIPV6_2292HOPOPTS = C.IPV6_2292HOPOPTS + sysIPV6_2292DSTOPTS = C.IPV6_2292DSTOPTS + sysIPV6_2292RTHDR = C.IPV6_2292RTHDR + + sysIPV6_2292PKTOPTIONS = C.IPV6_2292PKTOPTIONS + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + sysIPV6_TCLASS = C.IPV6_TCLASS + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL + + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR + + sysIPV6_MSFILTER = C.IPV6_MSFILTER + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + + sysIPV6_BOUND_IF = C.IPV6_BOUND_IF + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type icmpv6Filter C.struct_icmp6_filter + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req diff --git a/vendor/golang.org/x/net/ipv6/defs_dragonfly.go b/vendor/golang.org/x/net/ipv6/defs_dragonfly.go new file mode 100644 index 0000000000..a4c383a515 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_dragonfly.go @@ -0,0 +1,84 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include + +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + + sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL + + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_freebsd.go b/vendor/golang.org/x/net/ipv6/defs_freebsd.go new file mode 100644 index 0000000000..53e625389a --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_freebsd.go @@ -0,0 +1,105 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include + +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + + sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL + + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR + + sysIPV6_BINDANY = C.IPV6_BINDANY + + sysIPV6_MSFILTER = C.IPV6_MSFILTER + + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req + +type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_linux.go b/vendor/golang.org/x/net/ipv6/defs_linux.go new file mode 100644 index 0000000000..3308cb2c38 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_linux.go @@ -0,0 +1,147 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include +#include +#include +#include +#include +*/ +import "C" + +const ( + sysIPV6_ADDRFORM = C.IPV6_ADDRFORM + sysIPV6_2292PKTINFO = C.IPV6_2292PKTINFO + sysIPV6_2292HOPOPTS = C.IPV6_2292HOPOPTS + sysIPV6_2292DSTOPTS = C.IPV6_2292DSTOPTS + sysIPV6_2292RTHDR = C.IPV6_2292RTHDR + sysIPV6_2292PKTOPTIONS = C.IPV6_2292PKTOPTIONS + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_2292HOPLIMIT = C.IPV6_2292HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_FLOWINFO = C.IPV6_FLOWINFO + + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_ADD_MEMBERSHIP = C.IPV6_ADD_MEMBERSHIP + sysIPV6_DROP_MEMBERSHIP = C.IPV6_DROP_MEMBERSHIP + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + sysMCAST_MSFILTER = C.MCAST_MSFILTER + sysIPV6_ROUTER_ALERT = C.IPV6_ROUTER_ALERT + sysIPV6_MTU_DISCOVER = C.IPV6_MTU_DISCOVER + sysIPV6_MTU = C.IPV6_MTU + sysIPV6_RECVERR = C.IPV6_RECVERR + sysIPV6_V6ONLY = C.IPV6_V6ONLY + sysIPV6_JOIN_ANYCAST = C.IPV6_JOIN_ANYCAST + sysIPV6_LEAVE_ANYCAST = C.IPV6_LEAVE_ANYCAST + + //sysIPV6_PMTUDISC_DONT = C.IPV6_PMTUDISC_DONT + //sysIPV6_PMTUDISC_WANT = C.IPV6_PMTUDISC_WANT + //sysIPV6_PMTUDISC_DO = C.IPV6_PMTUDISC_DO + //sysIPV6_PMTUDISC_PROBE = C.IPV6_PMTUDISC_PROBE + //sysIPV6_PMTUDISC_INTERFACE = C.IPV6_PMTUDISC_INTERFACE + //sysIPV6_PMTUDISC_OMIT = C.IPV6_PMTUDISC_OMIT + + sysIPV6_FLOWLABEL_MGR = C.IPV6_FLOWLABEL_MGR + sysIPV6_FLOWINFO_SEND = C.IPV6_FLOWINFO_SEND + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + sysIPV6_XFRM_POLICY = C.IPV6_XFRM_POLICY + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RTHDR = C.IPV6_RTHDR + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + sysIPV6_PATHMTU = C.IPV6_PATHMTU + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + sysIPV6_TCLASS = C.IPV6_TCLASS + + sysIPV6_ADDR_PREFERENCES = C.IPV6_ADDR_PREFERENCES + + sysIPV6_PREFER_SRC_TMP = C.IPV6_PREFER_SRC_TMP + sysIPV6_PREFER_SRC_PUBLIC = C.IPV6_PREFER_SRC_PUBLIC + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = C.IPV6_PREFER_SRC_PUBTMP_DEFAULT + sysIPV6_PREFER_SRC_COA = C.IPV6_PREFER_SRC_COA + sysIPV6_PREFER_SRC_HOME = C.IPV6_PREFER_SRC_HOME + sysIPV6_PREFER_SRC_CGA = C.IPV6_PREFER_SRC_CGA + sysIPV6_PREFER_SRC_NONCGA = C.IPV6_PREFER_SRC_NONCGA + + sysIPV6_MINHOPCOUNT = C.IPV6_MINHOPCOUNT + + sysIPV6_ORIGDSTADDR = C.IPV6_ORIGDSTADDR + sysIPV6_RECVORIGDSTADDR = C.IPV6_RECVORIGDSTADDR + sysIPV6_TRANSPARENT = C.IPV6_TRANSPARENT + sysIPV6_UNICAST_IF = C.IPV6_UNICAST_IF + + sysICMPV6_FILTER = C.ICMPV6_FILTER + + sysICMPV6_FILTER_BLOCK = C.ICMPV6_FILTER_BLOCK + sysICMPV6_FILTER_PASS = C.ICMPV6_FILTER_PASS + sysICMPV6_FILTER_BLOCKOTHERS = C.ICMPV6_FILTER_BLOCKOTHERS + sysICMPV6_FILTER_PASSONLY = C.ICMPV6_FILTER_PASSONLY + + sysSOL_SOCKET = C.SOL_SOCKET + sysSO_ATTACH_FILTER = C.SO_ATTACH_FILTER + + sizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + sizeofIPv6FlowlabelReq = C.sizeof_struct_in6_flowlabel_req + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter + + sizeofSockFprog = C.sizeof_struct_sock_fprog +) + +type kernelSockaddrStorage C.struct___kernel_sockaddr_storage + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6FlowlabelReq C.struct_in6_flowlabel_req + +type ipv6Mreq C.struct_ipv6_mreq + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req + +type icmpv6Filter C.struct_icmp6_filter + +type sockFProg C.struct_sock_fprog + +type sockFilter C.struct_sock_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_netbsd.go b/vendor/golang.org/x/net/ipv6/defs_netbsd.go new file mode 100644 index 0000000000..be9ceb9cc0 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_netbsd.go @@ -0,0 +1,80 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include + +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_openbsd.go b/vendor/golang.org/x/net/ipv6/defs_openbsd.go new file mode 100644 index 0000000000..177ddf87d2 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_openbsd.go @@ -0,0 +1,89 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include +#include + +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + sysIPV6_PORTRANGE = C.IPV6_PORTRANGE + sysICMP6_FILTER = C.ICMP6_FILTER + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + + sysIPV6_PATHMTU = C.IPV6_PATHMTU + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + sysIPV6_RTHDR = C.IPV6_RTHDR + + sysIPV6_AUTH_LEVEL = C.IPV6_AUTH_LEVEL + sysIPV6_ESP_TRANS_LEVEL = C.IPV6_ESP_TRANS_LEVEL + sysIPV6_ESP_NETWORK_LEVEL = C.IPV6_ESP_NETWORK_LEVEL + sysIPSEC6_OUTSA = C.IPSEC6_OUTSA + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + + sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL + sysIPV6_IPCOMP_LEVEL = C.IPV6_IPCOMP_LEVEL + + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + sysIPV6_PIPEX = C.IPV6_PIPEX + + sysIPV6_RTABLE = C.IPV6_RTABLE + + sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT + sysIPV6_PORTRANGE_HIGH = C.IPV6_PORTRANGE_HIGH + sysIPV6_PORTRANGE_LOW = C.IPV6_PORTRANGE_LOW + + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/defs_solaris.go b/vendor/golang.org/x/net/ipv6/defs_solaris.go new file mode 100644 index 0000000000..0f8ce2b46a --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/defs_solaris.go @@ -0,0 +1,114 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package ipv6 + +/* +#include + +#include +#include +*/ +import "C" + +const ( + sysIPV6_UNICAST_HOPS = C.IPV6_UNICAST_HOPS + sysIPV6_MULTICAST_IF = C.IPV6_MULTICAST_IF + sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS + sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP + sysIPV6_JOIN_GROUP = C.IPV6_JOIN_GROUP + sysIPV6_LEAVE_GROUP = C.IPV6_LEAVE_GROUP + + sysIPV6_PKTINFO = C.IPV6_PKTINFO + + sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT + sysIPV6_NEXTHOP = C.IPV6_NEXTHOP + sysIPV6_HOPOPTS = C.IPV6_HOPOPTS + sysIPV6_DSTOPTS = C.IPV6_DSTOPTS + + sysIPV6_RTHDR = C.IPV6_RTHDR + sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS + + sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO + sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT + sysIPV6_RECVHOPOPTS = C.IPV6_RECVHOPOPTS + + sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR + + sysIPV6_RECVRTHDRDSTOPTS = C.IPV6_RECVRTHDRDSTOPTS + + sysIPV6_CHECKSUM = C.IPV6_CHECKSUM + sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS + sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU + sysIPV6_DONTFRAG = C.IPV6_DONTFRAG + sysIPV6_SEC_OPT = C.IPV6_SEC_OPT + sysIPV6_SRC_PREFERENCES = C.IPV6_SRC_PREFERENCES + sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU + sysIPV6_PATHMTU = C.IPV6_PATHMTU + sysIPV6_TCLASS = C.IPV6_TCLASS + sysIPV6_V6ONLY = C.IPV6_V6ONLY + + sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS + + sysMCAST_JOIN_GROUP = C.MCAST_JOIN_GROUP + sysMCAST_LEAVE_GROUP = C.MCAST_LEAVE_GROUP + sysMCAST_BLOCK_SOURCE = C.MCAST_BLOCK_SOURCE + sysMCAST_UNBLOCK_SOURCE = C.MCAST_UNBLOCK_SOURCE + sysMCAST_JOIN_SOURCE_GROUP = C.MCAST_JOIN_SOURCE_GROUP + sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP + + sysIPV6_PREFER_SRC_HOME = C.IPV6_PREFER_SRC_HOME + sysIPV6_PREFER_SRC_COA = C.IPV6_PREFER_SRC_COA + sysIPV6_PREFER_SRC_PUBLIC = C.IPV6_PREFER_SRC_PUBLIC + sysIPV6_PREFER_SRC_TMP = C.IPV6_PREFER_SRC_TMP + sysIPV6_PREFER_SRC_NONCGA = C.IPV6_PREFER_SRC_NONCGA + sysIPV6_PREFER_SRC_CGA = C.IPV6_PREFER_SRC_CGA + + sysIPV6_PREFER_SRC_MIPMASK = C.IPV6_PREFER_SRC_MIPMASK + sysIPV6_PREFER_SRC_MIPDEFAULT = C.IPV6_PREFER_SRC_MIPDEFAULT + sysIPV6_PREFER_SRC_TMPMASK = C.IPV6_PREFER_SRC_TMPMASK + sysIPV6_PREFER_SRC_TMPDEFAULT = C.IPV6_PREFER_SRC_TMPDEFAULT + sysIPV6_PREFER_SRC_CGAMASK = C.IPV6_PREFER_SRC_CGAMASK + sysIPV6_PREFER_SRC_CGADEFAULT = C.IPV6_PREFER_SRC_CGADEFAULT + + sysIPV6_PREFER_SRC_MASK = C.IPV6_PREFER_SRC_MASK + + sysIPV6_PREFER_SRC_DEFAULT = C.IPV6_PREFER_SRC_DEFAULT + + sysIPV6_BOUND_IF = C.IPV6_BOUND_IF + sysIPV6_UNSPEC_SRC = C.IPV6_UNSPEC_SRC + + sysICMP6_FILTER = C.ICMP6_FILTER + + sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage + sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + sizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + sizeofIPv6Mtuinfo = C.sizeof_struct_ip6_mtuinfo + + sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + sizeofGroupReq = C.sizeof_struct_group_req + sizeofGroupSourceReq = C.sizeof_struct_group_source_req + + sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +type sockaddrStorage C.struct_sockaddr_storage + +type sockaddrInet6 C.struct_sockaddr_in6 + +type inet6Pktinfo C.struct_in6_pktinfo + +type ipv6Mtuinfo C.struct_ip6_mtuinfo + +type ipv6Mreq C.struct_ipv6_mreq + +type groupReq C.struct_group_req + +type groupSourceReq C.struct_group_source_req + +type icmpv6Filter C.struct_icmp6_filter diff --git a/vendor/golang.org/x/net/ipv6/dgramopt.go b/vendor/golang.org/x/net/ipv6/dgramopt.go new file mode 100644 index 0000000000..703dafe84a --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/dgramopt.go @@ -0,0 +1,302 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + + "golang.org/x/net/bpf" +) + +// MulticastHopLimit returns the hop limit field value for outgoing +// multicast packets. +func (c *dgramOpt) MulticastHopLimit() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastHopLimit] + if !ok { + return 0, errOpNoSupport + } + return so.GetInt(c.Conn) +} + +// SetMulticastHopLimit sets the hop limit field value for future +// outgoing multicast packets. +func (c *dgramOpt) SetMulticastHopLimit(hoplim int) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastHopLimit] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, hoplim) +} + +// MulticastInterface returns the default interface for multicast +// packet transmissions. +func (c *dgramOpt) MulticastInterface() (*net.Interface, error) { + if !c.ok() { + return nil, syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return nil, errOpNoSupport + } + return so.getMulticastInterface(c.Conn) +} + +// SetMulticastInterface sets the default interface for future +// multicast packet transmissions. +func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastInterface] + if !ok { + return errOpNoSupport + } + return so.setMulticastInterface(c.Conn, ifi) +} + +// MulticastLoopback reports whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) MulticastLoopback() (bool, error) { + if !c.ok() { + return false, syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return false, errOpNoSupport + } + on, err := so.GetInt(c.Conn) + if err != nil { + return false, err + } + return on == 1, nil +} + +// SetMulticastLoopback sets whether transmitted multicast packets +// should be copied and send back to the originator. +func (c *dgramOpt) SetMulticastLoopback(on bool) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoMulticastLoopback] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, boolint(on)) +} + +// JoinGroup joins the group address group on the interface ifi. +// By default all sources that can cast data to group are accepted. +// It's possible to mute and unmute data transmission from a specific +// source by using ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup. +// JoinGroup uses the system assigned multicast interface when ifi is +// nil, although this is not recommended because the assignment +// depends on platforms and sometimes it might require routing +// configuration. +func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoJoinGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + return so.setGroup(c.Conn, ifi, grp) +} + +// LeaveGroup leaves the group address group on the interface ifi +// regardless of whether the group is any-source group or +// source-specific group. +func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoLeaveGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + return so.setGroup(c.Conn, ifi, grp) +} + +// JoinSourceSpecificGroup joins the source-specific group comprising +// group and source on the interface ifi. +// JoinSourceSpecificGroup uses the system assigned multicast +// interface when ifi is nil, although this is not recommended because +// the assignment depends on platforms and sometimes it might require +// routing configuration. +func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoJoinSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// LeaveSourceSpecificGroup leaves the source-specific group on the +// interface ifi. +func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoLeaveSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// ExcludeSourceSpecificGroup excludes the source-specific group from +// the already joined any-source groups by JoinGroup on the interface +// ifi. +func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoBlockSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// IncludeSourceSpecificGroup includes the excluded source-specific +// group by ExcludeSourceSpecificGroup again on the interface ifi. +func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoUnblockSourceGroup] + if !ok { + return errOpNoSupport + } + grp := netAddrToIP16(group) + if grp == nil { + return errMissingAddress + } + src := netAddrToIP16(source) + if src == nil { + return errMissingAddress + } + return so.setSourceGroup(c.Conn, ifi, grp, src) +} + +// Checksum reports whether the kernel will compute, store or verify a +// checksum for both incoming and outgoing packets. If on is true, it +// returns an offset in bytes into the data of where the checksum +// field is located. +func (c *dgramOpt) Checksum() (on bool, offset int, err error) { + if !c.ok() { + return false, 0, syscall.EINVAL + } + so, ok := sockOpts[ssoChecksum] + if !ok { + return false, 0, errOpNoSupport + } + offset, err = so.GetInt(c.Conn) + if err != nil { + return false, 0, err + } + if offset < 0 { + return false, 0, nil + } + return true, offset, nil +} + +// SetChecksum enables the kernel checksum processing. If on is ture, +// the offset should be an offset in bytes into the data of where the +// checksum field is located. +func (c *dgramOpt) SetChecksum(on bool, offset int) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoChecksum] + if !ok { + return errOpNoSupport + } + if !on { + offset = -1 + } + return so.SetInt(c.Conn, offset) +} + +// ICMPFilter returns an ICMP filter. +func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) { + if !c.ok() { + return nil, syscall.EINVAL + } + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return nil, errOpNoSupport + } + return so.getICMPFilter(c.Conn) +} + +// SetICMPFilter deploys the ICMP filter. +func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoICMPFilter] + if !ok { + return errOpNoSupport + } + return so.setICMPFilter(c.Conn, f) +} + +// SetBPF attaches a BPF program to the connection. +// +// Only supported on Linux. +func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoAttachFilter] + if !ok { + return errOpNoSupport + } + return so.setBPF(c.Conn, filter) +} diff --git a/vendor/golang.org/x/net/ipv6/doc.go b/vendor/golang.org/x/net/ipv6/doc.go new file mode 100644 index 0000000000..664a97dea1 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/doc.go @@ -0,0 +1,243 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ipv6 implements IP-level socket options for the Internet +// Protocol version 6. +// +// The package provides IP-level socket options that allow +// manipulation of IPv6 facilities. +// +// The IPv6 protocol is defined in RFC 8200. +// Socket interface extensions are defined in RFC 3493, RFC 3542 and +// RFC 3678. +// MLDv1 and MLDv2 are defined in RFC 2710 and RFC 3810. +// Source-specific multicast is defined in RFC 4607. +// +// On Darwin, this package requires OS X Mavericks version 10.9 or +// above, or equivalent. +// +// +// Unicasting +// +// The options for unicasting are available for net.TCPConn, +// net.UDPConn and net.IPConn which are created as network connections +// that use the IPv6 transport. When a single TCP connection carrying +// a data flow of multiple packets needs to indicate the flow is +// important, Conn is used to set the traffic class field on the IPv6 +// header for each packet. +// +// ln, err := net.Listen("tcp6", "[::]:1024") +// if err != nil { +// // error handling +// } +// defer ln.Close() +// for { +// c, err := ln.Accept() +// if err != nil { +// // error handling +// } +// go func(c net.Conn) { +// defer c.Close() +// +// The outgoing packets will be labeled DiffServ assured forwarding +// class 1 low drop precedence, known as AF11 packets. +// +// if err := ipv6.NewConn(c).SetTrafficClass(0x28); err != nil { +// // error handling +// } +// if _, err := c.Write(data); err != nil { +// // error handling +// } +// }(c) +// } +// +// +// Multicasting +// +// The options for multicasting are available for net.UDPConn and +// net.IPconn which are created as network connections that use the +// IPv6 transport. A few network facilities must be prepared before +// you begin multicasting, at a minimum joining network interfaces and +// multicast groups. +// +// en0, err := net.InterfaceByName("en0") +// if err != nil { +// // error handling +// } +// en1, err := net.InterfaceByIndex(911) +// if err != nil { +// // error handling +// } +// group := net.ParseIP("ff02::114") +// +// First, an application listens to an appropriate address with an +// appropriate service port. +// +// c, err := net.ListenPacket("udp6", "[::]:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// +// Second, the application joins multicast groups, starts listening to +// the groups on the specified network interfaces. Note that the +// service port for transport layer protocol does not matter with this +// operation as joining groups affects only network and link layer +// protocols, such as IPv6 and Ethernet. +// +// p := ipv6.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: group}); err != nil { +// // error handling +// } +// +// The application might set per packet control message transmissions +// between the protocol stack within the kernel. When the application +// needs a destination address on an incoming packet, +// SetControlMessage of PacketConn is used to enable control message +// transmissions. +// +// if err := p.SetControlMessage(ipv6.FlagDst, true); err != nil { +// // error handling +// } +// +// The application could identify whether the received packets are +// of interest by using the control message that contains the +// destination address of the received packet. +// +// b := make([]byte, 1500) +// for { +// n, rcm, src, err := p.ReadFrom(b) +// if err != nil { +// // error handling +// } +// if rcm.Dst.IsMulticast() { +// if rcm.Dst.Equal(group) { +// // joined group, do something +// } else { +// // unknown group, discard +// continue +// } +// } +// +// The application can also send both unicast and multicast packets. +// +// p.SetTrafficClass(0x0) +// p.SetHopLimit(16) +// if _, err := p.WriteTo(data[:n], nil, src); err != nil { +// // error handling +// } +// dst := &net.UDPAddr{IP: group, Port: 1024} +// wcm := ipv6.ControlMessage{TrafficClass: 0xe0, HopLimit: 1} +// for _, ifi := range []*net.Interface{en0, en1} { +// wcm.IfIndex = ifi.Index +// if _, err := p.WriteTo(data[:n], &wcm, dst); err != nil { +// // error handling +// } +// } +// } +// +// +// More multicasting +// +// An application that uses PacketConn may join multiple multicast +// groups. For example, a UDP listener with port 1024 might join two +// different groups across over two different network interfaces by +// using: +// +// c, err := net.ListenPacket("udp6", "[::]:1024") +// if err != nil { +// // error handling +// } +// defer c.Close() +// p := ipv6.NewPacketConn(c) +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::1:114")}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::2:114")}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en1, &net.UDPAddr{IP: net.ParseIP("ff02::2:114")}); err != nil { +// // error handling +// } +// +// It is possible for multiple UDP listeners that listen on the same +// UDP port to join the same multicast group. The net package will +// provide a socket that listens to a wildcard address with reusable +// UDP port when an appropriate multicast address prefix is passed to +// the net.ListenPacket or net.ListenUDP. +// +// c1, err := net.ListenPacket("udp6", "[ff02::]:1024") +// if err != nil { +// // error handling +// } +// defer c1.Close() +// c2, err := net.ListenPacket("udp6", "[ff02::]:1024") +// if err != nil { +// // error handling +// } +// defer c2.Close() +// p1 := ipv6.NewPacketConn(c1) +// if err := p1.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::114")}); err != nil { +// // error handling +// } +// p2 := ipv6.NewPacketConn(c2) +// if err := p2.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::114")}); err != nil { +// // error handling +// } +// +// Also it is possible for the application to leave or rejoin a +// multicast group on the network interface. +// +// if err := p.LeaveGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::114")}); err != nil { +// // error handling +// } +// if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff01::114")}); err != nil { +// // error handling +// } +// +// +// Source-specific multicasting +// +// An application that uses PacketConn on MLDv2 supported platform is +// able to join source-specific multicast groups. +// The application may use JoinSourceSpecificGroup and +// LeaveSourceSpecificGroup for the operation known as "include" mode, +// +// ssmgroup := net.UDPAddr{IP: net.ParseIP("ff32::8000:9")} +// ssmsource := net.UDPAddr{IP: net.ParseIP("fe80::cafe")} +// if err := p.JoinSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// if err := p.LeaveSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil { +// // error handling +// } +// +// or JoinGroup, ExcludeSourceSpecificGroup, +// IncludeSourceSpecificGroup and LeaveGroup for the operation known +// as "exclude" mode. +// +// exclsource := net.UDPAddr{IP: net.ParseIP("fe80::dead")} +// if err := p.JoinGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// if err := p.ExcludeSourceSpecificGroup(en0, &ssmgroup, &exclsource); err != nil { +// // error handling +// } +// if err := p.LeaveGroup(en0, &ssmgroup); err != nil { +// // error handling +// } +// +// Note that it depends on each platform implementation what happens +// when an application which runs on MLDv2 unsupported platform uses +// JoinSourceSpecificGroup and LeaveSourceSpecificGroup. +// In general the platform tries to fall back to conversations using +// MLDv1 and starts to listen to multicast traffic. +// In the fallback case, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup may return an error. +package ipv6 // import "golang.org/x/net/ipv6" + +// BUG(mikio): This package is not implemented on NaCl and Plan 9. diff --git a/vendor/golang.org/x/net/ipv6/endpoint.go b/vendor/golang.org/x/net/ipv6/endpoint.go new file mode 100644 index 0000000000..0624c17404 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/endpoint.go @@ -0,0 +1,128 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + "time" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the JoinSourceSpecificGroup, +// LeaveSourceSpecificGroup, ExcludeSourceSpecificGroup and +// IncludeSourceSpecificGroup methods of PacketConn are not +// implemented. + +// A Conn represents a network endpoint that uses IPv6 transport. +// It allows to set basic IP-level socket options such as traffic +// class and hop limit. +type Conn struct { + genericOpt +} + +type genericOpt struct { + *socket.Conn +} + +func (c *genericOpt) ok() bool { return c != nil && c.Conn != nil } + +// PathMTU returns a path MTU value for the destination associated +// with the endpoint. +func (c *Conn) PathMTU() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + so, ok := sockOpts[ssoPathMTU] + if !ok { + return 0, errOpNoSupport + } + _, mtu, err := so.getMTUInfo(c.Conn) + if err != nil { + return 0, err + } + return mtu, nil +} + +// NewConn returns a new Conn. +func NewConn(c net.Conn) *Conn { + cc, _ := socket.NewConn(c) + return &Conn{ + genericOpt: genericOpt{Conn: cc}, + } +} + +// A PacketConn represents a packet network endpoint that uses IPv6 +// transport. It is used to control several IP-level socket options +// including IPv6 header manipulation. It also provides datagram +// based network I/O methods specific to the IPv6 and higher layer +// protocols such as OSPF, GRE, and UDP. +type PacketConn struct { + genericOpt + dgramOpt + payloadHandler +} + +type dgramOpt struct { + *socket.Conn +} + +func (c *dgramOpt) ok() bool { return c != nil && c.Conn != nil } + +// SetControlMessage allows to receive the per packet basis IP-level +// socket options. +func (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return setControlMessage(c.dgramOpt.Conn, &c.payloadHandler.rawOpt, cf, on) +} + +// SetDeadline sets the read and write deadlines associated with the +// endpoint. +func (c *PacketConn) SetDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.SetDeadline(t) +} + +// SetReadDeadline sets the read deadline associated with the +// endpoint. +func (c *PacketConn) SetReadDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.SetReadDeadline(t) +} + +// SetWriteDeadline sets the write deadline associated with the +// endpoint. +func (c *PacketConn) SetWriteDeadline(t time.Time) error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.SetWriteDeadline(t) +} + +// Close closes the endpoint. +func (c *PacketConn) Close() error { + if !c.payloadHandler.ok() { + return syscall.EINVAL + } + return c.payloadHandler.Close() +} + +// NewPacketConn returns a new PacketConn using c as its underlying +// transport. +func NewPacketConn(c net.PacketConn) *PacketConn { + cc, _ := socket.NewConn(c.(net.Conn)) + return &PacketConn{ + genericOpt: genericOpt{Conn: cc}, + dgramOpt: dgramOpt{Conn: cc}, + payloadHandler: payloadHandler{PacketConn: c, Conn: cc}, + } +} diff --git a/vendor/golang.org/x/net/ipv6/gen.go b/vendor/golang.org/x/net/ipv6/gen.go new file mode 100644 index 0000000000..5885664fbc --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/gen.go @@ -0,0 +1,199 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +//go:generate go run gen.go + +// This program generates system adaptation constants and types, +// internet protocol constants and tables by reading template files +// and IANA protocol registries. +package main + +import ( + "bytes" + "encoding/xml" + "fmt" + "go/format" + "io" + "io/ioutil" + "net/http" + "os" + "os/exec" + "runtime" + "strconv" + "strings" +) + +func main() { + if err := genzsys(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + if err := geniana(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} + +func genzsys() error { + defs := "defs_" + runtime.GOOS + ".go" + f, err := os.Open(defs) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + f.Close() + cmd := exec.Command("go", "tool", "cgo", "-godefs", defs) + b, err := cmd.Output() + if err != nil { + return err + } + b, err = format.Source(b) + if err != nil { + return err + } + zsys := "zsys_" + runtime.GOOS + ".go" + switch runtime.GOOS { + case "freebsd", "linux": + zsys = "zsys_" + runtime.GOOS + "_" + runtime.GOARCH + ".go" + } + if err := ioutil.WriteFile(zsys, b, 0644); err != nil { + return err + } + return nil +} + +var registries = []struct { + url string + parse func(io.Writer, io.Reader) error +}{ + { + "https://www.iana.org/assignments/icmpv6-parameters/icmpv6-parameters.xml", + parseICMPv6Parameters, + }, +} + +func geniana() error { + var bb bytes.Buffer + fmt.Fprintf(&bb, "// go generate gen.go\n") + fmt.Fprintf(&bb, "// Code generated by the command above; DO NOT EDIT.\n\n") + fmt.Fprintf(&bb, "package ipv6\n\n") + for _, r := range registries { + resp, err := http.Get(r.url) + if err != nil { + return err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("got HTTP status code %v for %v\n", resp.StatusCode, r.url) + } + if err := r.parse(&bb, resp.Body); err != nil { + return err + } + fmt.Fprintf(&bb, "\n") + } + b, err := format.Source(bb.Bytes()) + if err != nil { + return err + } + if err := ioutil.WriteFile("iana.go", b, 0644); err != nil { + return err + } + return nil +} + +func parseICMPv6Parameters(w io.Writer, r io.Reader) error { + dec := xml.NewDecoder(r) + var icp icmpv6Parameters + if err := dec.Decode(&icp); err != nil { + return err + } + prs := icp.escape() + fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) + fmt.Fprintf(w, "const (\n") + for _, pr := range prs { + if pr.Name == "" { + continue + } + fmt.Fprintf(w, "ICMPType%s ICMPType = %d", pr.Name, pr.Value) + fmt.Fprintf(w, "// %s\n", pr.OrigName) + } + fmt.Fprintf(w, ")\n\n") + fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated) + fmt.Fprintf(w, "var icmpTypes = map[ICMPType]string{\n") + for _, pr := range prs { + if pr.Name == "" { + continue + } + fmt.Fprintf(w, "%d: %q,\n", pr.Value, strings.ToLower(pr.OrigName)) + } + fmt.Fprintf(w, "}\n") + return nil +} + +type icmpv6Parameters struct { + XMLName xml.Name `xml:"registry"` + Title string `xml:"title"` + Updated string `xml:"updated"` + Registries []struct { + Title string `xml:"title"` + Records []struct { + Value string `xml:"value"` + Name string `xml:"name"` + } `xml:"record"` + } `xml:"registry"` +} + +type canonICMPv6ParamRecord struct { + OrigName string + Name string + Value int +} + +func (icp *icmpv6Parameters) escape() []canonICMPv6ParamRecord { + id := -1 + for i, r := range icp.Registries { + if strings.Contains(r.Title, "Type") || strings.Contains(r.Title, "type") { + id = i + break + } + } + if id < 0 { + return nil + } + prs := make([]canonICMPv6ParamRecord, len(icp.Registries[id].Records)) + sr := strings.NewReplacer( + "Messages", "", + "Message", "", + "ICMP", "", + "+", "P", + "-", "", + "/", "", + ".", "", + " ", "", + ) + for i, pr := range icp.Registries[id].Records { + if strings.Contains(pr.Name, "Reserved") || + strings.Contains(pr.Name, "Unassigned") || + strings.Contains(pr.Name, "Deprecated") || + strings.Contains(pr.Name, "Experiment") || + strings.Contains(pr.Name, "experiment") { + continue + } + ss := strings.Split(pr.Name, "\n") + if len(ss) > 1 { + prs[i].Name = strings.Join(ss, " ") + } else { + prs[i].Name = ss[0] + } + s := strings.TrimSpace(prs[i].Name) + prs[i].OrigName = s + prs[i].Name = sr.Replace(s) + prs[i].Value, _ = strconv.Atoi(pr.Value) + } + return prs +} diff --git a/vendor/golang.org/x/net/ipv6/genericopt.go b/vendor/golang.org/x/net/ipv6/genericopt.go new file mode 100644 index 0000000000..e9dbc2e189 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/genericopt.go @@ -0,0 +1,58 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import "syscall" + +// TrafficClass returns the traffic class field value for outgoing +// packets. +func (c *genericOpt) TrafficClass() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + so, ok := sockOpts[ssoTrafficClass] + if !ok { + return 0, errOpNoSupport + } + return so.GetInt(c.Conn) +} + +// SetTrafficClass sets the traffic class field value for future +// outgoing packets. +func (c *genericOpt) SetTrafficClass(tclass int) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoTrafficClass] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, tclass) +} + +// HopLimit returns the hop limit field value for outgoing packets. +func (c *genericOpt) HopLimit() (int, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + so, ok := sockOpts[ssoHopLimit] + if !ok { + return 0, errOpNoSupport + } + return so.GetInt(c.Conn) +} + +// SetHopLimit sets the hop limit field value for future outgoing +// packets. +func (c *genericOpt) SetHopLimit(hoplim int) error { + if !c.ok() { + return syscall.EINVAL + } + so, ok := sockOpts[ssoHopLimit] + if !ok { + return errOpNoSupport + } + return so.SetInt(c.Conn, hoplim) +} diff --git a/vendor/golang.org/x/net/ipv6/header.go b/vendor/golang.org/x/net/ipv6/header.go new file mode 100644 index 0000000000..e05cb08b21 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/header.go @@ -0,0 +1,55 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "encoding/binary" + "fmt" + "net" +) + +const ( + Version = 6 // protocol version + HeaderLen = 40 // header length +) + +// A Header represents an IPv6 base header. +type Header struct { + Version int // protocol version + TrafficClass int // traffic class + FlowLabel int // flow label + PayloadLen int // payload length + NextHeader int // next header + HopLimit int // hop limit + Src net.IP // source address + Dst net.IP // destination address +} + +func (h *Header) String() string { + if h == nil { + return "" + } + return fmt.Sprintf("ver=%d tclass=%#x flowlbl=%#x payloadlen=%d nxthdr=%d hoplim=%d src=%v dst=%v", h.Version, h.TrafficClass, h.FlowLabel, h.PayloadLen, h.NextHeader, h.HopLimit, h.Src, h.Dst) +} + +// ParseHeader parses b as an IPv6 base header. +func ParseHeader(b []byte) (*Header, error) { + if len(b) < HeaderLen { + return nil, errHeaderTooShort + } + h := &Header{ + Version: int(b[0]) >> 4, + TrafficClass: int(b[0]&0x0f)<<4 | int(b[1])>>4, + FlowLabel: int(b[1]&0x0f)<<16 | int(b[2])<<8 | int(b[3]), + PayloadLen: int(binary.BigEndian.Uint16(b[4:6])), + NextHeader: int(b[6]), + HopLimit: int(b[7]), + } + h.Src = make(net.IP, net.IPv6len) + copy(h.Src, b[8:24]) + h.Dst = make(net.IP, net.IPv6len) + copy(h.Dst, b[24:40]) + return h, nil +} diff --git a/vendor/golang.org/x/net/ipv6/helper.go b/vendor/golang.org/x/net/ipv6/helper.go new file mode 100644 index 0000000000..259740132c --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/helper.go @@ -0,0 +1,57 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "errors" + "net" +) + +var ( + errMissingAddress = errors.New("missing address") + errHeaderTooShort = errors.New("header too short") + errInvalidConnType = errors.New("invalid conn type") + errOpNoSupport = errors.New("operation not supported") + errNoSuchInterface = errors.New("no such interface") +) + +func boolint(b bool) int { + if b { + return 1 + } + return 0 +} + +func netAddrToIP16(a net.Addr) net.IP { + switch v := a.(type) { + case *net.UDPAddr: + if ip := v.IP.To16(); ip != nil && ip.To4() == nil { + return ip + } + case *net.IPAddr: + if ip := v.IP.To16(); ip != nil && ip.To4() == nil { + return ip + } + } + return nil +} + +func opAddr(a net.Addr) net.Addr { + switch a.(type) { + case *net.TCPAddr: + if a == nil { + return nil + } + case *net.UDPAddr: + if a == nil { + return nil + } + case *net.IPAddr: + if a == nil { + return nil + } + } + return a +} diff --git a/vendor/golang.org/x/net/ipv6/iana.go b/vendor/golang.org/x/net/ipv6/iana.go new file mode 100644 index 0000000000..32db1aa949 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/iana.go @@ -0,0 +1,86 @@ +// go generate gen.go +// Code generated by the command above; DO NOT EDIT. + +package ipv6 + +// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2018-03-09 +const ( + ICMPTypeDestinationUnreachable ICMPType = 1 // Destination Unreachable + ICMPTypePacketTooBig ICMPType = 2 // Packet Too Big + ICMPTypeTimeExceeded ICMPType = 3 // Time Exceeded + ICMPTypeParameterProblem ICMPType = 4 // Parameter Problem + ICMPTypeEchoRequest ICMPType = 128 // Echo Request + ICMPTypeEchoReply ICMPType = 129 // Echo Reply + ICMPTypeMulticastListenerQuery ICMPType = 130 // Multicast Listener Query + ICMPTypeMulticastListenerReport ICMPType = 131 // Multicast Listener Report + ICMPTypeMulticastListenerDone ICMPType = 132 // Multicast Listener Done + ICMPTypeRouterSolicitation ICMPType = 133 // Router Solicitation + ICMPTypeRouterAdvertisement ICMPType = 134 // Router Advertisement + ICMPTypeNeighborSolicitation ICMPType = 135 // Neighbor Solicitation + ICMPTypeNeighborAdvertisement ICMPType = 136 // Neighbor Advertisement + ICMPTypeRedirect ICMPType = 137 // Redirect Message + ICMPTypeRouterRenumbering ICMPType = 138 // Router Renumbering + ICMPTypeNodeInformationQuery ICMPType = 139 // ICMP Node Information Query + ICMPTypeNodeInformationResponse ICMPType = 140 // ICMP Node Information Response + ICMPTypeInverseNeighborDiscoverySolicitation ICMPType = 141 // Inverse Neighbor Discovery Solicitation Message + ICMPTypeInverseNeighborDiscoveryAdvertisement ICMPType = 142 // Inverse Neighbor Discovery Advertisement Message + ICMPTypeVersion2MulticastListenerReport ICMPType = 143 // Version 2 Multicast Listener Report + ICMPTypeHomeAgentAddressDiscoveryRequest ICMPType = 144 // Home Agent Address Discovery Request Message + ICMPTypeHomeAgentAddressDiscoveryReply ICMPType = 145 // Home Agent Address Discovery Reply Message + ICMPTypeMobilePrefixSolicitation ICMPType = 146 // Mobile Prefix Solicitation + ICMPTypeMobilePrefixAdvertisement ICMPType = 147 // Mobile Prefix Advertisement + ICMPTypeCertificationPathSolicitation ICMPType = 148 // Certification Path Solicitation Message + ICMPTypeCertificationPathAdvertisement ICMPType = 149 // Certification Path Advertisement Message + ICMPTypeMulticastRouterAdvertisement ICMPType = 151 // Multicast Router Advertisement + ICMPTypeMulticastRouterSolicitation ICMPType = 152 // Multicast Router Solicitation + ICMPTypeMulticastRouterTermination ICMPType = 153 // Multicast Router Termination + ICMPTypeFMIPv6 ICMPType = 154 // FMIPv6 Messages + ICMPTypeRPLControl ICMPType = 155 // RPL Control Message + ICMPTypeILNPv6LocatorUpdate ICMPType = 156 // ILNPv6 Locator Update Message + ICMPTypeDuplicateAddressRequest ICMPType = 157 // Duplicate Address Request + ICMPTypeDuplicateAddressConfirmation ICMPType = 158 // Duplicate Address Confirmation + ICMPTypeMPLControl ICMPType = 159 // MPL Control Message + ICMPTypeExtendedEchoRequest ICMPType = 160 // Extended Echo Request + ICMPTypeExtendedEchoReply ICMPType = 161 // Extended Echo Reply +) + +// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2018-03-09 +var icmpTypes = map[ICMPType]string{ + 1: "destination unreachable", + 2: "packet too big", + 3: "time exceeded", + 4: "parameter problem", + 128: "echo request", + 129: "echo reply", + 130: "multicast listener query", + 131: "multicast listener report", + 132: "multicast listener done", + 133: "router solicitation", + 134: "router advertisement", + 135: "neighbor solicitation", + 136: "neighbor advertisement", + 137: "redirect message", + 138: "router renumbering", + 139: "icmp node information query", + 140: "icmp node information response", + 141: "inverse neighbor discovery solicitation message", + 142: "inverse neighbor discovery advertisement message", + 143: "version 2 multicast listener report", + 144: "home agent address discovery request message", + 145: "home agent address discovery reply message", + 146: "mobile prefix solicitation", + 147: "mobile prefix advertisement", + 148: "certification path solicitation message", + 149: "certification path advertisement message", + 151: "multicast router advertisement", + 152: "multicast router solicitation", + 153: "multicast router termination", + 154: "fmipv6 messages", + 155: "rpl control message", + 156: "ilnpv6 locator update message", + 157: "duplicate address request", + 158: "duplicate address confirmation", + 159: "mpl control message", + 160: "extended echo request", + 161: "extended echo reply", +} diff --git a/vendor/golang.org/x/net/ipv6/icmp.go b/vendor/golang.org/x/net/ipv6/icmp.go new file mode 100644 index 0000000000..b7f48e27b8 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp.go @@ -0,0 +1,60 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import "golang.org/x/net/internal/iana" + +// BUG(mikio): On Windows, methods related to ICMPFilter are not +// implemented. + +// An ICMPType represents a type of ICMP message. +type ICMPType int + +func (typ ICMPType) String() string { + s, ok := icmpTypes[typ] + if !ok { + return "" + } + return s +} + +// Protocol returns the ICMPv6 protocol number. +func (typ ICMPType) Protocol() int { + return iana.ProtocolIPv6ICMP +} + +// An ICMPFilter represents an ICMP message filter for incoming +// packets. The filter belongs to a packet delivery path on a host and +// it cannot interact with forwarding packets or tunnel-outer packets. +// +// Note: RFC 8200 defines a reasonable role model. A node means a +// device that implements IP. A router means a node that forwards IP +// packets not explicitly addressed to itself, and a host means a node +// that is not a router. +type ICMPFilter struct { + icmpv6Filter +} + +// Accept accepts incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Accept(typ ICMPType) { + f.accept(typ) +} + +// Block blocks incoming ICMP packets including the type field value +// typ. +func (f *ICMPFilter) Block(typ ICMPType) { + f.block(typ) +} + +// SetAll sets the filter action to the filter. +func (f *ICMPFilter) SetAll(block bool) { + f.setAll(block) +} + +// WillBlock reports whether the ICMP type will be blocked. +func (f *ICMPFilter) WillBlock(typ ICMPType) bool { + return f.willBlock(typ) +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_bsd.go b/vendor/golang.org/x/net/ipv6/icmp_bsd.go new file mode 100644 index 0000000000..e1a791de46 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_bsd.go @@ -0,0 +1,29 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package ipv6 + +func (f *icmpv6Filter) accept(typ ICMPType) { + f.Filt[typ>>5] |= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) block(typ ICMPType) { + f.Filt[typ>>5] &^= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) setAll(block bool) { + for i := range f.Filt { + if block { + f.Filt[i] = 0 + } else { + f.Filt[i] = 1<<32 - 1 + } + } +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + return f.Filt[typ>>5]&(1<<(uint32(typ)&31)) == 0 +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_linux.go b/vendor/golang.org/x/net/ipv6/icmp_linux.go new file mode 100644 index 0000000000..647f6b44ff --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_linux.go @@ -0,0 +1,27 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +func (f *icmpv6Filter) accept(typ ICMPType) { + f.Data[typ>>5] &^= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) block(typ ICMPType) { + f.Data[typ>>5] |= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) setAll(block bool) { + for i := range f.Data { + if block { + f.Data[i] = 1<<32 - 1 + } else { + f.Data[i] = 0 + } + } +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + return f.Data[typ>>5]&(1<<(uint32(typ)&31)) != 0 +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_solaris.go b/vendor/golang.org/x/net/ipv6/icmp_solaris.go new file mode 100644 index 0000000000..7c23bb1cf6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_solaris.go @@ -0,0 +1,27 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +func (f *icmpv6Filter) accept(typ ICMPType) { + f.X__icmp6_filt[typ>>5] |= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) block(typ ICMPType) { + f.X__icmp6_filt[typ>>5] &^= 1 << (uint32(typ) & 31) +} + +func (f *icmpv6Filter) setAll(block bool) { + for i := range f.X__icmp6_filt { + if block { + f.X__icmp6_filt[i] = 0 + } else { + f.X__icmp6_filt[i] = 1<<32 - 1 + } + } +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + return f.X__icmp6_filt[typ>>5]&(1<<(uint32(typ)&31)) == 0 +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_stub.go b/vendor/golang.org/x/net/ipv6/icmp_stub.go new file mode 100644 index 0000000000..c4b9be6dbf --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_stub.go @@ -0,0 +1,23 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +type icmpv6Filter struct { +} + +func (f *icmpv6Filter) accept(typ ICMPType) { +} + +func (f *icmpv6Filter) block(typ ICMPType) { +} + +func (f *icmpv6Filter) setAll(block bool) { +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + return false +} diff --git a/vendor/golang.org/x/net/ipv6/icmp_windows.go b/vendor/golang.org/x/net/ipv6/icmp_windows.go new file mode 100644 index 0000000000..443cd07367 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/icmp_windows.go @@ -0,0 +1,22 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +func (f *icmpv6Filter) accept(typ ICMPType) { + // TODO(mikio): implement this +} + +func (f *icmpv6Filter) block(typ ICMPType) { + // TODO(mikio): implement this +} + +func (f *icmpv6Filter) setAll(block bool) { + // TODO(mikio): implement this +} + +func (f *icmpv6Filter) willBlock(typ ICMPType) bool { + // TODO(mikio): implement this + return false +} diff --git a/vendor/golang.org/x/net/ipv6/payload.go b/vendor/golang.org/x/net/ipv6/payload.go new file mode 100644 index 0000000000..a8197f1695 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload.go @@ -0,0 +1,23 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +// BUG(mikio): On Windows, the ControlMessage for ReadFrom and WriteTo +// methods of PacketConn is not implemented. + +// A payloadHandler represents the IPv6 datagram payload handler. +type payloadHandler struct { + net.PacketConn + *socket.Conn + rawOpt +} + +func (c *payloadHandler) ok() bool { return c != nil && c.PacketConn != nil && c.Conn != nil } diff --git a/vendor/golang.org/x/net/ipv6/payload_cmsg.go b/vendor/golang.org/x/net/ipv6/payload_cmsg.go new file mode 100644 index 0000000000..4ee4b062ca --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload_cmsg.go @@ -0,0 +1,35 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !nacl,!plan9,!windows + +package ipv6 + +import ( + "net" + "syscall" +) + +// ReadFrom reads a payload of the received IPv6 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, syscall.EINVAL + } + return c.readFrom(b) +} + +// WriteTo writes a payload of the IPv6 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the IPv6 header fields and the datagram path to be specified. The +// cm may be nil if control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, syscall.EINVAL + } + return c.writeTo(b, cm, dst) +} diff --git a/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go b/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go new file mode 100644 index 0000000000..fdc6c39941 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go @@ -0,0 +1,55 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 +// +build !nacl,!plan9,!windows + +package ipv6 + +import "net" + +func (c *payloadHandler) readFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + c.rawOpt.RLock() + oob := NewControlMessage(c.rawOpt.cflags) + c.rawOpt.RUnlock() + var nn int + switch c := c.PacketConn.(type) { + case *net.UDPConn: + if n, nn, _, src, err = c.ReadMsgUDP(b, oob); err != nil { + return 0, nil, nil, err + } + case *net.IPConn: + if n, nn, _, src, err = c.ReadMsgIP(b, oob); err != nil { + return 0, nil, nil, err + } + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Err: errInvalidConnType} + } + if nn > 0 { + cm = new(ControlMessage) + if err = cm.Parse(oob[:nn]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + } + if cm != nil { + cm.Src = netAddrToIP16(src) + } + return +} + +func (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + oob := cm.Marshal() + if dst == nil { + return 0, &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errMissingAddress} + } + switch c := c.PacketConn.(type) { + case *net.UDPConn: + n, _, err = c.WriteMsgUDP(b, oob, dst.(*net.UDPAddr)) + case *net.IPConn: + n, _, err = c.WriteMsgIP(b, oob, dst.(*net.IPAddr)) + default: + return 0, &net.OpError{Op: "write", Net: c.LocalAddr().Network(), Source: c.LocalAddr(), Addr: opAddr(dst), Err: errInvalidConnType} + } + return +} diff --git a/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go b/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go new file mode 100644 index 0000000000..8f6d02e2f8 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go @@ -0,0 +1,57 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 +// +build !nacl,!plan9,!windows + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (c *payloadHandler) readFrom(b []byte) (int, *ControlMessage, net.Addr, error) { + c.rawOpt.RLock() + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: NewControlMessage(c.rawOpt.cflags), + } + c.rawOpt.RUnlock() + switch c.PacketConn.(type) { + case *net.UDPConn: + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + case *net.IPConn: + if err := c.RecvMsg(&m, 0); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + default: + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: errInvalidConnType} + } + var cm *ControlMessage + if m.NN > 0 { + cm = new(ControlMessage) + if err := cm.Parse(m.OOB[:m.NN]); err != nil { + return 0, nil, nil, &net.OpError{Op: "read", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Err: err} + } + cm.Src = netAddrToIP16(m.Addr) + } + return m.N, cm, m.Addr, nil +} + +func (c *payloadHandler) writeTo(b []byte, cm *ControlMessage, dst net.Addr) (int, error) { + m := socket.Message{ + Buffers: [][]byte{b}, + OOB: cm.Marshal(), + Addr: dst, + } + err := c.SendMsg(&m, 0) + if err != nil { + err = &net.OpError{Op: "write", Net: c.PacketConn.LocalAddr().Network(), Source: c.PacketConn.LocalAddr(), Addr: opAddr(dst), Err: err} + } + return m.N, err +} diff --git a/vendor/golang.org/x/net/ipv6/payload_nocmsg.go b/vendor/golang.org/x/net/ipv6/payload_nocmsg.go new file mode 100644 index 0000000000..99a43542b4 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/payload_nocmsg.go @@ -0,0 +1,41 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build nacl plan9 windows + +package ipv6 + +import ( + "net" + "syscall" +) + +// ReadFrom reads a payload of the received IPv6 datagram, from the +// endpoint c, copying the payload into b. It returns the number of +// bytes copied into b, the control message cm and the source address +// src of the received datagram. +func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) { + if !c.ok() { + return 0, nil, nil, syscall.EINVAL + } + if n, src, err = c.PacketConn.ReadFrom(b); err != nil { + return 0, nil, nil, err + } + return +} + +// WriteTo writes a payload of the IPv6 datagram, to the destination +// address dst through the endpoint c, copying the payload from b. It +// returns the number of bytes written. The control message cm allows +// the IPv6 header fields and the datagram path to be specified. The +// cm may be nil if control of the outgoing datagram is not required. +func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) { + if !c.ok() { + return 0, syscall.EINVAL + } + if dst == nil { + return 0, errMissingAddress + } + return c.PacketConn.WriteTo(b, dst) +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt.go b/vendor/golang.org/x/net/ipv6/sockopt.go new file mode 100644 index 0000000000..cc3907df38 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt.go @@ -0,0 +1,43 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import "golang.org/x/net/internal/socket" + +// Sticky socket options +const ( + ssoTrafficClass = iota // header field for unicast packet, RFC 3542 + ssoHopLimit // header field for unicast packet, RFC 3493 + ssoMulticastInterface // outbound interface for multicast packet, RFC 3493 + ssoMulticastHopLimit // header field for multicast packet, RFC 3493 + ssoMulticastLoopback // loopback for multicast packet, RFC 3493 + ssoReceiveTrafficClass // header field on received packet, RFC 3542 + ssoReceiveHopLimit // header field on received packet, RFC 2292 or 3542 + ssoReceivePacketInfo // incbound or outbound packet path, RFC 2292 or 3542 + ssoReceivePathMTU // path mtu, RFC 3542 + ssoPathMTU // path mtu, RFC 3542 + ssoChecksum // packet checksum, RFC 2292 or 3542 + ssoICMPFilter // icmp filter, RFC 2292 or 3542 + ssoJoinGroup // any-source multicast, RFC 3493 + ssoLeaveGroup // any-source multicast, RFC 3493 + ssoJoinSourceGroup // source-specific multicast + ssoLeaveSourceGroup // source-specific multicast + ssoBlockSourceGroup // any-source or source-specific multicast + ssoUnblockSourceGroup // any-source or source-specific multicast + ssoAttachFilter // attach BPF for filtering inbound traffic +) + +// Sticky socket option value types +const ( + ssoTypeIPMreq = iota + 1 + ssoTypeGroupReq + ssoTypeGroupSourceReq +) + +// A sockOpt represents a binding for sticky socket option. +type sockOpt struct { + socket.Option + typ int // hint for option value type; optional +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt_posix.go b/vendor/golang.org/x/net/ipv6/sockopt_posix.go new file mode 100644 index 0000000000..0eac86eb8c --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt_posix.go @@ -0,0 +1,87 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package ipv6 + +import ( + "net" + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + n, err := so.GetInt(c) + if err != nil { + return nil, err + } + return net.InterfaceByIndex(n) +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + var n int + if ifi != nil { + n = ifi.Index + } + return so.SetInt(c, n) +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + b := make([]byte, so.Len) + n, err := so.Get(c, b) + if err != nil { + return nil, err + } + if n != sizeofICMPv6Filter { + return nil, errOpNoSupport + } + return (*ICMPFilter)(unsafe.Pointer(&b[0])), nil +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + b := (*[sizeofICMPv6Filter]byte)(unsafe.Pointer(f))[:sizeofICMPv6Filter] + return so.Set(c, b) +} + +func (so *sockOpt) getMTUInfo(c *socket.Conn) (*net.Interface, int, error) { + b := make([]byte, so.Len) + n, err := so.Get(c, b) + if err != nil { + return nil, 0, err + } + if n != sizeofIPv6Mtuinfo { + return nil, 0, errOpNoSupport + } + mi := (*ipv6Mtuinfo)(unsafe.Pointer(&b[0])) + if mi.Addr.Scope_id == 0 { + return nil, int(mi.Mtu), nil + } + ifi, err := net.InterfaceByIndex(int(mi.Addr.Scope_id)) + if err != nil { + return nil, 0, err + } + return ifi, int(mi.Mtu), nil +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + switch so.typ { + case ssoTypeIPMreq: + return so.setIPMreq(c, ifi, grp) + case ssoTypeGroupReq: + return so.setGroupReq(c, ifi, grp) + default: + return errOpNoSupport + } +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return so.setGroupSourceReq(c, ifi, grp, src) +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return so.setAttachFilter(c, f) +} diff --git a/vendor/golang.org/x/net/ipv6/sockopt_stub.go b/vendor/golang.org/x/net/ipv6/sockopt_stub.go new file mode 100644 index 0000000000..1f4a273e44 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sockopt_stub.go @@ -0,0 +1,46 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +import ( + "net" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) getMulticastInterface(c *socket.Conn) (*net.Interface, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setMulticastInterface(c *socket.Conn, ifi *net.Interface) error { + return errOpNoSupport +} + +func (so *sockOpt) getICMPFilter(c *socket.Conn) (*ICMPFilter, error) { + return nil, errOpNoSupport +} + +func (so *sockOpt) setICMPFilter(c *socket.Conn, f *ICMPFilter) error { + return errOpNoSupport +} + +func (so *sockOpt) getMTUInfo(c *socket.Conn) (*net.Interface, int, error) { + return nil, 0, errOpNoSupport +} + +func (so *sockOpt) setGroup(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setSourceGroup(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setBPF(c *socket.Conn, f []bpf.RawInstruction) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/sys_asmreq.go b/vendor/golang.org/x/net/ipv6/sys_asmreq.go new file mode 100644 index 0000000000..b0510c0b5d --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_asmreq.go @@ -0,0 +1,24 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows + +package ipv6 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var mreq ipv6Mreq + copy(mreq.Multiaddr[:], grp) + if ifi != nil { + mreq.setIfindex(ifi.Index) + } + b := (*[sizeofIPv6Mreq]byte)(unsafe.Pointer(&mreq))[:sizeofIPv6Mreq] + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go b/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go new file mode 100644 index 0000000000..eece96187b --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go @@ -0,0 +1,17 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setIPMreq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/sys_bpf.go b/vendor/golang.org/x/net/ipv6/sys_bpf.go new file mode 100644 index 0000000000..b2dbcb2f28 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_bpf.go @@ -0,0 +1,23 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package ipv6 + +import ( + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + prog := sockFProg{ + Len: uint16(len(f)), + Filter: (*sockFilter)(unsafe.Pointer(&f[0])), + } + b := (*[sizeofSockFprog]byte)(unsafe.Pointer(&prog))[:sizeofSockFprog] + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go b/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go new file mode 100644 index 0000000000..676bea555f --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_bpf_stub.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !linux + +package ipv6 + +import ( + "golang.org/x/net/bpf" + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setAttachFilter(c *socket.Conn, f []bpf.RawInstruction) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/sys_bsd.go b/vendor/golang.org/x/net/ipv6/sys_bsd.go new file mode 100644 index 0000000000..e416eaa1fe --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_bsd.go @@ -0,0 +1,57 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build dragonfly netbsd openbsd + +package ipv6 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]*sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_JOIN_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_LEAVE_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_darwin.go b/vendor/golang.org/x/net/ipv6/sys_darwin.go new file mode 100644 index 0000000000..e3d0443927 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_darwin.go @@ -0,0 +1,106 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "strconv" + "strings" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlHopLimit: {sysIPV6_2292HOPLIMIT, 4, marshal2292HopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_2292PKTINFO, sizeofInet6Pktinfo, marshal2292PacketInfo, parsePacketInfo}, + } + + sockOpts = map[int]*sockOpt{ + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_2292HOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_2292PKTINFO, Len: 4}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_JOIN_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_LEAVE_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + } +) + +func init() { + // Seems like kern.osreldate is veiled on latest OS X. We use + // kern.osrelease instead. + s, err := syscall.Sysctl("kern.osrelease") + if err != nil { + return + } + ss := strings.Split(s, ".") + if len(ss) == 0 { + return + } + // The IP_PKTINFO and protocol-independent multicast API were + // introduced in OS X 10.7 (Darwin 11). But it looks like + // those features require OS X 10.8 (Darwin 12) or above. + // See http://support.apple.com/kb/HT1633. + if mjver, err := strconv.Atoi(ss[0]); err != nil || mjver < 12 { + return + } + ctlOpts[ctlTrafficClass] = ctlOpt{sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass} + ctlOpts[ctlHopLimit] = ctlOpt{sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit} + ctlOpts[ctlPacketInfo] = ctlOpt{sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo} + ctlOpts[ctlNextHop] = ctlOpt{sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop} + ctlOpts[ctlPathMTU] = ctlOpt{sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU} + sockOpts[ssoTrafficClass] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}} + sockOpts[ssoReceiveTrafficClass] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}} + sockOpts[ssoReceiveHopLimit] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}} + sockOpts[ssoReceivePacketInfo] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}} + sockOpts[ssoReceivePathMTU] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}} + sockOpts[ssoPathMTU] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}} + sockOpts[ssoJoinGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq} + sockOpts[ssoLeaveGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq} + sockOpts[ssoJoinSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoLeaveSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoBlockSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} + sockOpts[ssoUnblockSourceGroup] = &sockOpt{Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq} +} + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 132)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_freebsd.go b/vendor/golang.org/x/net/ipv6/sys_freebsd.go new file mode 100644 index 0000000000..e9349dc2cc --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_freebsd.go @@ -0,0 +1,92 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "runtime" + "strings" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func init() { + if runtime.GOOS == "freebsd" && runtime.GOARCH == "386" { + archs, _ := syscall.Sysctl("kern.supported_archs") + for _, s := range strings.Fields(archs) { + if s == "amd64" { + freebsd32o64 = true + break + } + } + } +} + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gr.Group)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gsr.Group)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(&gsr.Source)) + sa.Len = sizeofSockaddrInet6 + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_linux.go b/vendor/golang.org/x/net/ipv6/sys_linux.go new file mode 100644 index 0000000000..bc218103c1 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_linux.go @@ -0,0 +1,74 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]*sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolReserved, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMPV6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoAttachFilter: {Option: socket.Option{Level: sysSOL_SOCKET, Name: sysSO_ATTACH_FILTER, Len: sizeofSockFprog}}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = int32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Ifindex = int32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gr.Group)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(&gsr.Group)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(&gsr.Source)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_solaris.go b/vendor/golang.org/x/net/ipv6/sys_solaris.go new file mode 100644 index 0000000000..d348b5f6e4 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_solaris.go @@ -0,0 +1,74 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + "unsafe" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +var ( + ctlOpts = [ctlMax]ctlOpt{ + ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}, + ctlHopLimit: {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}, + ctlPacketInfo: {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}, + ctlNextHop: {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}, + ctlPathMTU: {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}, + } + + sockOpts = map[int]*sockOpt{ + ssoTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_TCLASS, Len: 4}}, + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoReceiveTrafficClass: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVTCLASS, Len: 4}}, + ssoReceiveHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVHOPLIMIT, Len: 4}}, + ssoReceivePacketInfo: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPKTINFO, Len: 4}}, + ssoReceivePathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_RECVPATHMTU, Len: 4}}, + ssoPathMTU: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_PATHMTU, Len: sizeofIPv6Mtuinfo}}, + ssoChecksum: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_CHECKSUM, Len: 4}}, + ssoICMPFilter: {Option: socket.Option{Level: iana.ProtocolIPv6ICMP, Name: sysICMP6_FILTER, Len: sizeofICMPv6Filter}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_GROUP, Len: sizeofGroupReq}, typ: ssoTypeGroupReq}, + ssoJoinSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_JOIN_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoLeaveSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_LEAVE_SOURCE_GROUP, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoBlockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_BLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + ssoUnblockSourceGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysMCAST_UNBLOCK_SOURCE, Len: sizeofGroupSourceReq}, typ: ssoTypeGroupSourceReq}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (pi *inet6Pktinfo) setIfindex(i int) { + pi.Ifindex = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} + +func (gr *groupReq) setGroup(grp net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) +} + +func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) { + sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], grp) + sa = (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 260)) + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], src) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_ssmreq.go b/vendor/golang.org/x/net/ipv6/sys_ssmreq.go new file mode 100644 index 0000000000..add8ccc0b1 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_ssmreq.go @@ -0,0 +1,54 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd linux solaris + +package ipv6 + +import ( + "net" + "unsafe" + + "golang.org/x/net/internal/socket" +) + +var freebsd32o64 bool + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + var gr groupReq + if ifi != nil { + gr.Interface = uint32(ifi.Index) + } + gr.setGroup(grp) + var b []byte + if freebsd32o64 { + var d [sizeofGroupReq + 4]byte + s := (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr))[:sizeofGroupReq] + } + return so.Set(c, b) +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + var gsr groupSourceReq + if ifi != nil { + gsr.Interface = uint32(ifi.Index) + } + gsr.setSourceGroup(grp, src) + var b []byte + if freebsd32o64 { + var d [sizeofGroupSourceReq + 4]byte + s := (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr)) + copy(d[:4], s[:4]) + copy(d[8:], s[4:]) + b = d[:] + } else { + b = (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr))[:sizeofGroupSourceReq] + } + return so.Set(c, b) +} diff --git a/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go b/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go new file mode 100644 index 0000000000..581ee490ff --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go @@ -0,0 +1,21 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!freebsd,!linux,!solaris + +package ipv6 + +import ( + "net" + + "golang.org/x/net/internal/socket" +) + +func (so *sockOpt) setGroupReq(c *socket.Conn, ifi *net.Interface, grp net.IP) error { + return errOpNoSupport +} + +func (so *sockOpt) setGroupSourceReq(c *socket.Conn, ifi *net.Interface, grp, src net.IP) error { + return errOpNoSupport +} diff --git a/vendor/golang.org/x/net/ipv6/sys_stub.go b/vendor/golang.org/x/net/ipv6/sys_stub.go new file mode 100644 index 0000000000..b845388ea4 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_stub.go @@ -0,0 +1,13 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows + +package ipv6 + +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = map[int]*sockOpt{} +) diff --git a/vendor/golang.org/x/net/ipv6/sys_windows.go b/vendor/golang.org/x/net/ipv6/sys_windows.go new file mode 100644 index 0000000000..fc36b018bd --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/sys_windows.go @@ -0,0 +1,75 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ipv6 + +import ( + "net" + "syscall" + + "golang.org/x/net/internal/iana" + "golang.org/x/net/internal/socket" +) + +const ( + // See ws2tcpip.h. + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PKTINFO = 0x13 + + sizeofSockaddrInet6 = 0x1c + + sizeofIPv6Mreq = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofICMPv6Filter = 0 +) + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type icmpv6Filter struct { + // TODO(mikio): implement this +} + +var ( + ctlOpts = [ctlMax]ctlOpt{} + + sockOpts = map[int]*sockOpt{ + ssoHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_UNICAST_HOPS, Len: 4}}, + ssoMulticastInterface: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_IF, Len: 4}}, + ssoMulticastHopLimit: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_HOPS, Len: 4}}, + ssoMulticastLoopback: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_MULTICAST_LOOP, Len: 4}}, + ssoJoinGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_JOIN_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + ssoLeaveGroup: {Option: socket.Option{Level: iana.ProtocolIPv6, Name: sysIPV6_LEAVE_GROUP, Len: sizeofIPv6Mreq}, typ: ssoTypeIPMreq}, + } +) + +func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) { + sa.Family = syscall.AF_INET6 + copy(sa.Addr[:], ip) + sa.Scope_id = uint32(i) +} + +func (mreq *ipv6Mreq) setIfindex(i int) { + mreq.Interface = uint32(i) +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_darwin.go b/vendor/golang.org/x/net/ipv6/zsys_darwin.go new file mode 100644 index 0000000000..6aab1dfab7 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_darwin.go @@ -0,0 +1,131 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_darwin.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + sysIPV6_2292PKTINFO = 0x13 + sysIPV6_2292HOPLIMIT = 0x14 + sysIPV6_2292NEXTHOP = 0x15 + sysIPV6_2292HOPOPTS = 0x16 + sysIPV6_2292DSTOPTS = 0x17 + sysIPV6_2292RTHDR = 0x18 + + sysIPV6_2292PKTOPTIONS = 0x19 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RECVTCLASS = 0x23 + sysIPV6_TCLASS = 0x24 + + sysIPV6_RTHDRDSTOPTS = 0x39 + + sysIPV6_RECVPKTINFO = 0x3d + + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_MSFILTER = 0x4a + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_BOUND_IF = 0x7d + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type icmpv6Filter struct { + Filt [8]uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [128]byte +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [128]byte + Pad_cgo_1 [128]byte +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go b/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go new file mode 100644 index 0000000000..d2de804d88 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go @@ -0,0 +1,88 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_dragonfly.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go new file mode 100644 index 0000000000..919e572d4a --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go @@ -0,0 +1,122 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_BINDANY = 0x40 + + sysIPV6_MSFILTER = 0x4a + + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type groupReq struct { + Interface uint32 + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group sockaddrStorage + Source sockaddrStorage +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go new file mode 100644 index 0000000000..cb8141f9c6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go @@ -0,0 +1,124 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_BINDANY = 0x40 + + sysIPV6_MSFILTER = 0x4a + + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage + Source sockaddrStorage +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go b/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go new file mode 100644 index 0000000000..cb8141f9c6 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go @@ -0,0 +1,124 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_freebsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PREFER_TEMPADDR = 0x3f + + sysIPV6_BINDANY = 0x40 + + sysIPV6_MSFILTER = 0x4a + + sysMCAST_JOIN_GROUP = 0x50 + sysMCAST_LEAVE_GROUP = 0x51 + sysMCAST_JOIN_SOURCE_GROUP = 0x52 + sysMCAST_LEAVE_SOURCE_GROUP = 0x53 + sysMCAST_BLOCK_SOURCE = 0x54 + sysMCAST_UNBLOCK_SOURCE = 0x55 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Len uint8 + Family uint8 + X__ss_pad1 [6]int8 + X__ss_align int64 + X__ss_pad2 [112]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group sockaddrStorage + Source sockaddrStorage +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_386.go b/vendor/golang.org/x/net/ipv6/zsys_linux_386.go new file mode 100644 index 0000000000..73aa8c6dfc --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_386.go @@ -0,0 +1,170 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go new file mode 100644 index 0000000000..b64f0157d7 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go b/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go new file mode 100644 index 0000000000..73aa8c6dfc --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go @@ -0,0 +1,170 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go new file mode 100644 index 0000000000..b64f0157d7 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go new file mode 100644 index 0000000000..73aa8c6dfc --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go @@ -0,0 +1,170 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go new file mode 100644 index 0000000000..b64f0157d7 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go new file mode 100644 index 0000000000..b64f0157d7 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go b/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go new file mode 100644 index 0000000000..73aa8c6dfc --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go @@ -0,0 +1,170 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go new file mode 100644 index 0000000000..c9bf6a87ef --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go @@ -0,0 +1,170 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x84 + sizeofGroupSourceReq = 0x104 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x8 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]uint8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [2]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go new file mode 100644 index 0000000000..b64f0157d7 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go new file mode 100644 index 0000000000..b64f0157d7 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go b/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go new file mode 100644 index 0000000000..b64f0157d7 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go @@ -0,0 +1,172 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_linux.go + +package ipv6 + +const ( + sysIPV6_ADDRFORM = 0x1 + sysIPV6_2292PKTINFO = 0x2 + sysIPV6_2292HOPOPTS = 0x3 + sysIPV6_2292DSTOPTS = 0x4 + sysIPV6_2292RTHDR = 0x5 + sysIPV6_2292PKTOPTIONS = 0x6 + sysIPV6_CHECKSUM = 0x7 + sysIPV6_2292HOPLIMIT = 0x8 + sysIPV6_NEXTHOP = 0x9 + sysIPV6_FLOWINFO = 0xb + + sysIPV6_UNICAST_HOPS = 0x10 + sysIPV6_MULTICAST_IF = 0x11 + sysIPV6_MULTICAST_HOPS = 0x12 + sysIPV6_MULTICAST_LOOP = 0x13 + sysIPV6_ADD_MEMBERSHIP = 0x14 + sysIPV6_DROP_MEMBERSHIP = 0x15 + sysMCAST_JOIN_GROUP = 0x2a + sysMCAST_LEAVE_GROUP = 0x2d + sysMCAST_JOIN_SOURCE_GROUP = 0x2e + sysMCAST_LEAVE_SOURCE_GROUP = 0x2f + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_MSFILTER = 0x30 + sysIPV6_ROUTER_ALERT = 0x16 + sysIPV6_MTU_DISCOVER = 0x17 + sysIPV6_MTU = 0x18 + sysIPV6_RECVERR = 0x19 + sysIPV6_V6ONLY = 0x1a + sysIPV6_JOIN_ANYCAST = 0x1b + sysIPV6_LEAVE_ANYCAST = 0x1c + + sysIPV6_FLOWLABEL_MGR = 0x20 + sysIPV6_FLOWINFO_SEND = 0x21 + + sysIPV6_IPSEC_POLICY = 0x22 + sysIPV6_XFRM_POLICY = 0x23 + + sysIPV6_RECVPKTINFO = 0x31 + sysIPV6_PKTINFO = 0x32 + sysIPV6_RECVHOPLIMIT = 0x33 + sysIPV6_HOPLIMIT = 0x34 + sysIPV6_RECVHOPOPTS = 0x35 + sysIPV6_HOPOPTS = 0x36 + sysIPV6_RTHDRDSTOPTS = 0x37 + sysIPV6_RECVRTHDR = 0x38 + sysIPV6_RTHDR = 0x39 + sysIPV6_RECVDSTOPTS = 0x3a + sysIPV6_DSTOPTS = 0x3b + sysIPV6_RECVPATHMTU = 0x3c + sysIPV6_PATHMTU = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_RECVTCLASS = 0x42 + sysIPV6_TCLASS = 0x43 + + sysIPV6_ADDR_PREFERENCES = 0x48 + + sysIPV6_PREFER_SRC_TMP = 0x1 + sysIPV6_PREFER_SRC_PUBLIC = 0x2 + sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100 + sysIPV6_PREFER_SRC_COA = 0x4 + sysIPV6_PREFER_SRC_HOME = 0x400 + sysIPV6_PREFER_SRC_CGA = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x800 + + sysIPV6_MINHOPCOUNT = 0x49 + + sysIPV6_ORIGDSTADDR = 0x4a + sysIPV6_RECVORIGDSTADDR = 0x4a + sysIPV6_TRANSPARENT = 0x4b + sysIPV6_UNICAST_IF = 0x4c + + sysICMPV6_FILTER = 0x1 + + sysICMPV6_FILTER_BLOCK = 0x1 + sysICMPV6_FILTER_PASS = 0x2 + sysICMPV6_FILTER_BLOCKOTHERS = 0x3 + sysICMPV6_FILTER_PASSONLY = 0x4 + + sysSOL_SOCKET = 0x1 + sysSO_ATTACH_FILTER = 0x1a + + sizeofKernelSockaddrStorage = 0x80 + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + sizeofIPv6FlowlabelReq = 0x20 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x88 + sizeofGroupSourceReq = 0x108 + + sizeofICMPv6Filter = 0x20 + + sizeofSockFprog = 0x10 +) + +type kernelSockaddrStorage struct { + Family uint16 + X__data [126]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex int32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6FlowlabelReq struct { + Dst [16]byte /* in6_addr */ + Label uint32 + Action uint8 + Share uint8 + Flags uint16 + Expires uint16 + Linger uint16 + X__flr_pad uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Ifindex int32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [4]byte + Group kernelSockaddrStorage + Source kernelSockaddrStorage +} + +type icmpv6Filter struct { + Data [8]uint32 +} + +type sockFProg struct { + Len uint16 + Pad_cgo_0 [6]byte + Filter *sockFilter +} + +type sockFilter struct { + Code uint16 + Jt uint8 + Jf uint8 + K uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_netbsd.go b/vendor/golang.org/x/net/ipv6/zsys_netbsd.go new file mode 100644 index 0000000000..bcada13b7a --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_netbsd.go @@ -0,0 +1,84 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_netbsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_IPSEC_POLICY = 0x1c + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_openbsd.go b/vendor/golang.org/x/net/ipv6/zsys_openbsd.go new file mode 100644 index 0000000000..86cf3c6379 --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_openbsd.go @@ -0,0 +1,93 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_openbsd.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x4 + sysIPV6_MULTICAST_IF = 0x9 + sysIPV6_MULTICAST_HOPS = 0xa + sysIPV6_MULTICAST_LOOP = 0xb + sysIPV6_JOIN_GROUP = 0xc + sysIPV6_LEAVE_GROUP = 0xd + sysIPV6_PORTRANGE = 0xe + sysICMP6_FILTER = 0x12 + + sysIPV6_CHECKSUM = 0x1a + sysIPV6_V6ONLY = 0x1b + + sysIPV6_RTHDRDSTOPTS = 0x23 + + sysIPV6_RECVPKTINFO = 0x24 + sysIPV6_RECVHOPLIMIT = 0x25 + sysIPV6_RECVRTHDR = 0x26 + sysIPV6_RECVHOPOPTS = 0x27 + sysIPV6_RECVDSTOPTS = 0x28 + + sysIPV6_USE_MIN_MTU = 0x2a + sysIPV6_RECVPATHMTU = 0x2b + + sysIPV6_PATHMTU = 0x2c + + sysIPV6_PKTINFO = 0x2e + sysIPV6_HOPLIMIT = 0x2f + sysIPV6_NEXTHOP = 0x30 + sysIPV6_HOPOPTS = 0x31 + sysIPV6_DSTOPTS = 0x32 + sysIPV6_RTHDR = 0x33 + + sysIPV6_AUTH_LEVEL = 0x35 + sysIPV6_ESP_TRANS_LEVEL = 0x36 + sysIPV6_ESP_NETWORK_LEVEL = 0x37 + sysIPSEC6_OUTSA = 0x38 + sysIPV6_RECVTCLASS = 0x39 + + sysIPV6_AUTOFLOWLABEL = 0x3b + sysIPV6_IPCOMP_LEVEL = 0x3c + + sysIPV6_TCLASS = 0x3d + sysIPV6_DONTFRAG = 0x3e + sysIPV6_PIPEX = 0x3f + + sysIPV6_RTABLE = 0x1021 + + sysIPV6_PORTRANGE_DEFAULT = 0x0 + sysIPV6_PORTRANGE_HIGH = 0x1 + sysIPV6_PORTRANGE_LOW = 0x2 + + sizeofSockaddrInet6 = 0x1c + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x20 + + sizeofIPv6Mreq = 0x14 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type icmpv6Filter struct { + Filt [8]uint32 +} diff --git a/vendor/golang.org/x/net/ipv6/zsys_solaris.go b/vendor/golang.org/x/net/ipv6/zsys_solaris.go new file mode 100644 index 0000000000..cf1837dd2a --- /dev/null +++ b/vendor/golang.org/x/net/ipv6/zsys_solaris.go @@ -0,0 +1,131 @@ +// Created by cgo -godefs - DO NOT EDIT +// cgo -godefs defs_solaris.go + +package ipv6 + +const ( + sysIPV6_UNICAST_HOPS = 0x5 + sysIPV6_MULTICAST_IF = 0x6 + sysIPV6_MULTICAST_HOPS = 0x7 + sysIPV6_MULTICAST_LOOP = 0x8 + sysIPV6_JOIN_GROUP = 0x9 + sysIPV6_LEAVE_GROUP = 0xa + + sysIPV6_PKTINFO = 0xb + + sysIPV6_HOPLIMIT = 0xc + sysIPV6_NEXTHOP = 0xd + sysIPV6_HOPOPTS = 0xe + sysIPV6_DSTOPTS = 0xf + + sysIPV6_RTHDR = 0x10 + sysIPV6_RTHDRDSTOPTS = 0x11 + + sysIPV6_RECVPKTINFO = 0x12 + sysIPV6_RECVHOPLIMIT = 0x13 + sysIPV6_RECVHOPOPTS = 0x14 + + sysIPV6_RECVRTHDR = 0x16 + + sysIPV6_RECVRTHDRDSTOPTS = 0x17 + + sysIPV6_CHECKSUM = 0x18 + sysIPV6_RECVTCLASS = 0x19 + sysIPV6_USE_MIN_MTU = 0x20 + sysIPV6_DONTFRAG = 0x21 + sysIPV6_SEC_OPT = 0x22 + sysIPV6_SRC_PREFERENCES = 0x23 + sysIPV6_RECVPATHMTU = 0x24 + sysIPV6_PATHMTU = 0x25 + sysIPV6_TCLASS = 0x26 + sysIPV6_V6ONLY = 0x27 + + sysIPV6_RECVDSTOPTS = 0x28 + + sysMCAST_JOIN_GROUP = 0x29 + sysMCAST_LEAVE_GROUP = 0x2a + sysMCAST_BLOCK_SOURCE = 0x2b + sysMCAST_UNBLOCK_SOURCE = 0x2c + sysMCAST_JOIN_SOURCE_GROUP = 0x2d + sysMCAST_LEAVE_SOURCE_GROUP = 0x2e + + sysIPV6_PREFER_SRC_HOME = 0x1 + sysIPV6_PREFER_SRC_COA = 0x2 + sysIPV6_PREFER_SRC_PUBLIC = 0x4 + sysIPV6_PREFER_SRC_TMP = 0x8 + sysIPV6_PREFER_SRC_NONCGA = 0x10 + sysIPV6_PREFER_SRC_CGA = 0x20 + + sysIPV6_PREFER_SRC_MIPMASK = 0x3 + sysIPV6_PREFER_SRC_MIPDEFAULT = 0x1 + sysIPV6_PREFER_SRC_TMPMASK = 0xc + sysIPV6_PREFER_SRC_TMPDEFAULT = 0x4 + sysIPV6_PREFER_SRC_CGAMASK = 0x30 + sysIPV6_PREFER_SRC_CGADEFAULT = 0x10 + + sysIPV6_PREFER_SRC_MASK = 0x3f + + sysIPV6_PREFER_SRC_DEFAULT = 0x15 + + sysIPV6_BOUND_IF = 0x41 + sysIPV6_UNSPEC_SRC = 0x42 + + sysICMP6_FILTER = 0x1 + + sizeofSockaddrStorage = 0x100 + sizeofSockaddrInet6 = 0x20 + sizeofInet6Pktinfo = 0x14 + sizeofIPv6Mtuinfo = 0x24 + + sizeofIPv6Mreq = 0x14 + sizeofGroupReq = 0x104 + sizeofGroupSourceReq = 0x204 + + sizeofICMPv6Filter = 0x20 +) + +type sockaddrStorage struct { + Family uint16 + X_ss_pad1 [6]int8 + X_ss_align float64 + X_ss_pad2 [240]int8 +} + +type sockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 + X__sin6_src_id uint32 +} + +type inet6Pktinfo struct { + Addr [16]byte /* in6_addr */ + Ifindex uint32 +} + +type ipv6Mtuinfo struct { + Addr sockaddrInet6 + Mtu uint32 +} + +type ipv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +type groupReq struct { + Interface uint32 + Pad_cgo_0 [256]byte +} + +type groupSourceReq struct { + Interface uint32 + Pad_cgo_0 [256]byte + Pad_cgo_1 [256]byte +} + +type icmpv6Filter struct { + X__icmp6_filt [8]uint32 +} diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go index ce3f27e028..30fb315d13 100644 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -126,6 +126,8 @@ var brokenAuthHeaderProviders = []string{ "https://api.sipgate.com/v1/authorization/oauth", "https://api.medium.com/v1/tokens", "https://log.finalsurge.com/oauth/token", + "https://multisport.todaysplan.com.au/rest/oauth/access_token", + "https://whats.todaysplan.com.au/rest/oauth/access_token", } // brokenAuthHeaderDomains lists broken providers that issue dynamic endpoints. diff --git a/vendor/golang.org/x/sync/AUTHORS b/vendor/golang.org/x/sync/AUTHORS new file mode 100644 index 0000000000..15167cd746 --- /dev/null +++ b/vendor/golang.org/x/sync/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/sync/CONTRIBUTORS b/vendor/golang.org/x/sync/CONTRIBUTORS new file mode 100644 index 0000000000..1c4577e968 --- /dev/null +++ b/vendor/golang.org/x/sync/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/vendor/golang.org/x/sync/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/sync/PATENTS b/vendor/golang.org/x/sync/PATENTS new file mode 100644 index 0000000000..733099041f --- /dev/null +++ b/vendor/golang.org/x/sync/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go new file mode 100644 index 0000000000..533438d91c --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -0,0 +1,67 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errgroup provides synchronization, error propagation, and Context +// cancelation for groups of goroutines working on subtasks of a common task. +package errgroup + +import ( + "sync" + + "golang.org/x/net/context" +) + +// A Group is a collection of goroutines working on subtasks that are part of +// the same overall task. +// +// A zero Group is valid and does not cancel on error. +type Group struct { + cancel func() + + wg sync.WaitGroup + + errOnce sync.Once + err error +} + +// WithContext returns a new Group and an associated Context derived from ctx. +// +// The derived Context is canceled the first time a function passed to Go +// returns a non-nil error or the first time Wait returns, whichever occurs +// first. +func WithContext(ctx context.Context) (*Group, context.Context) { + ctx, cancel := context.WithCancel(ctx) + return &Group{cancel: cancel}, ctx +} + +// Wait blocks until all function calls from the Go method have returned, then +// returns the first non-nil error (if any) from them. +func (g *Group) Wait() error { + g.wg.Wait() + if g.cancel != nil { + g.cancel() + } + return g.err +} + +// Go calls the given function in a new goroutine. +// +// The first call to return a non-nil error cancels the group; its error will be +// returned by Wait. +func (g *Group) Go(f func() error) { + g.wg.Add(1) + + go func() { + defer g.wg.Done() + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel() + } + }) + } + }() +} diff --git a/vendor/golang.org/x/sync/singleflight/singleflight.go b/vendor/golang.org/x/sync/singleflight/singleflight.go new file mode 100644 index 0000000000..9a4f8d59e0 --- /dev/null +++ b/vendor/golang.org/x/sync/singleflight/singleflight.go @@ -0,0 +1,111 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package singleflight provides a duplicate function call suppression +// mechanism. +package singleflight // import "golang.org/x/sync/singleflight" + +import "sync" + +// call is an in-flight or completed singleflight.Do call +type call struct { + wg sync.WaitGroup + + // These fields are written once before the WaitGroup is done + // and are only read after the WaitGroup is done. + val interface{} + err error + + // These fields are read and written with the singleflight + // mutex held before the WaitGroup is done, and are read but + // not written after the WaitGroup is done. + dups int + chans []chan<- Result +} + +// Group represents a class of work and forms a namespace in +// which units of work can be executed with duplicate suppression. +type Group struct { + mu sync.Mutex // protects m + m map[string]*call // lazily initialized +} + +// Result holds the results of Do, so they can be passed +// on a channel. +type Result struct { + Val interface{} + Err error + Shared bool +} + +// Do executes and returns the results of the given function, making +// sure that only one execution is in-flight for a given key at a +// time. If a duplicate comes in, the duplicate caller waits for the +// original to complete and receives the same results. +// The return value shared indicates whether v was given to multiple callers. +func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) { + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + g.mu.Unlock() + c.wg.Wait() + return c.val, c.err, true + } + c := new(call) + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + g.doCall(c, key, fn) + return c.val, c.err, c.dups > 0 +} + +// DoChan is like Do but returns a channel that will receive the +// results when they are ready. +func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result { + ch := make(chan Result, 1) + g.mu.Lock() + if g.m == nil { + g.m = make(map[string]*call) + } + if c, ok := g.m[key]; ok { + c.dups++ + c.chans = append(c.chans, ch) + g.mu.Unlock() + return ch + } + c := &call{chans: []chan<- Result{ch}} + c.wg.Add(1) + g.m[key] = c + g.mu.Unlock() + + go g.doCall(c, key, fn) + + return ch +} + +// doCall handles the single call for a key. +func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) { + c.val, c.err = fn() + c.wg.Done() + + g.mu.Lock() + delete(g.m, key) + for _, ch := range c.chans { + ch <- Result{c.val, c.err, c.dups > 0} + } + g.mu.Unlock() +} + +// Forget tells the singleflight to forget about a key. Future calls +// to Do for this key will call the function rather than waiting for +// an earlier call to complete. +func (g *Group) Forget(key string) { + g.mu.Lock() + delete(g.m, key) + g.mu.Unlock() +} diff --git a/vendor/golang.org/x/sync/syncmap/map.go b/vendor/golang.org/x/sync/syncmap/map.go new file mode 100644 index 0000000000..80e15847ef --- /dev/null +++ b/vendor/golang.org/x/sync/syncmap/map.go @@ -0,0 +1,372 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package syncmap provides a concurrent map implementation. +// It is a prototype for a proposed addition to the sync package +// in the standard library. +// (https://golang.org/issue/18177) +package syncmap + +import ( + "sync" + "sync/atomic" + "unsafe" +) + +// Map is a concurrent map with amortized-constant-time loads, stores, and deletes. +// It is safe for multiple goroutines to call a Map's methods concurrently. +// +// The zero Map is valid and empty. +// +// A Map must not be copied after first use. +type Map struct { + mu sync.Mutex + + // read contains the portion of the map's contents that are safe for + // concurrent access (with or without mu held). + // + // The read field itself is always safe to load, but must only be stored with + // mu held. + // + // Entries stored in read may be updated concurrently without mu, but updating + // a previously-expunged entry requires that the entry be copied to the dirty + // map and unexpunged with mu held. + read atomic.Value // readOnly + + // dirty contains the portion of the map's contents that require mu to be + // held. To ensure that the dirty map can be promoted to the read map quickly, + // it also includes all of the non-expunged entries in the read map. + // + // Expunged entries are not stored in the dirty map. An expunged entry in the + // clean map must be unexpunged and added to the dirty map before a new value + // can be stored to it. + // + // If the dirty map is nil, the next write to the map will initialize it by + // making a shallow copy of the clean map, omitting stale entries. + dirty map[interface{}]*entry + + // misses counts the number of loads since the read map was last updated that + // needed to lock mu to determine whether the key was present. + // + // Once enough misses have occurred to cover the cost of copying the dirty + // map, the dirty map will be promoted to the read map (in the unamended + // state) and the next store to the map will make a new dirty copy. + misses int +} + +// readOnly is an immutable struct stored atomically in the Map.read field. +type readOnly struct { + m map[interface{}]*entry + amended bool // true if the dirty map contains some key not in m. +} + +// expunged is an arbitrary pointer that marks entries which have been deleted +// from the dirty map. +var expunged = unsafe.Pointer(new(interface{})) + +// An entry is a slot in the map corresponding to a particular key. +type entry struct { + // p points to the interface{} value stored for the entry. + // + // If p == nil, the entry has been deleted and m.dirty == nil. + // + // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry + // is missing from m.dirty. + // + // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty + // != nil, in m.dirty[key]. + // + // An entry can be deleted by atomic replacement with nil: when m.dirty is + // next created, it will atomically replace nil with expunged and leave + // m.dirty[key] unset. + // + // An entry's associated value can be updated by atomic replacement, provided + // p != expunged. If p == expunged, an entry's associated value can be updated + // only after first setting m.dirty[key] = e so that lookups using the dirty + // map find the entry. + p unsafe.Pointer // *interface{} +} + +func newEntry(i interface{}) *entry { + return &entry{p: unsafe.Pointer(&i)} +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (m *Map) Load(key interface{}) (value interface{}, ok bool) { + read, _ := m.read.Load().(readOnly) + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + // Avoid reporting a spurious miss if m.dirty got promoted while we were + // blocked on m.mu. (If further loads of the same key will not miss, it's + // not worth copying the dirty map for this key.) + read, _ = m.read.Load().(readOnly) + e, ok = read.m[key] + if !ok && read.amended { + e, ok = m.dirty[key] + // Regardless of whether the entry was present, record a miss: this key + // will take the slow path until the dirty map is promoted to the read + // map. + m.missLocked() + } + m.mu.Unlock() + } + if !ok { + return nil, false + } + return e.load() +} + +func (e *entry) load() (value interface{}, ok bool) { + p := atomic.LoadPointer(&e.p) + if p == nil || p == expunged { + return nil, false + } + return *(*interface{})(p), true +} + +// Store sets the value for a key. +func (m *Map) Store(key, value interface{}) { + read, _ := m.read.Load().(readOnly) + if e, ok := read.m[key]; ok && e.tryStore(&value) { + return + } + + m.mu.Lock() + read, _ = m.read.Load().(readOnly) + if e, ok := read.m[key]; ok { + if e.unexpungeLocked() { + // The entry was previously expunged, which implies that there is a + // non-nil dirty map and this entry is not in it. + m.dirty[key] = e + } + e.storeLocked(&value) + } else if e, ok := m.dirty[key]; ok { + e.storeLocked(&value) + } else { + if !read.amended { + // We're adding the first new key to the dirty map. + // Make sure it is allocated and mark the read-only map as incomplete. + m.dirtyLocked() + m.read.Store(readOnly{m: read.m, amended: true}) + } + m.dirty[key] = newEntry(value) + } + m.mu.Unlock() +} + +// tryStore stores a value if the entry has not been expunged. +// +// If the entry is expunged, tryStore returns false and leaves the entry +// unchanged. +func (e *entry) tryStore(i *interface{}) bool { + p := atomic.LoadPointer(&e.p) + if p == expunged { + return false + } + for { + if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { + return true + } + p = atomic.LoadPointer(&e.p) + if p == expunged { + return false + } + } +} + +// unexpungeLocked ensures that the entry is not marked as expunged. +// +// If the entry was previously expunged, it must be added to the dirty map +// before m.mu is unlocked. +func (e *entry) unexpungeLocked() (wasExpunged bool) { + return atomic.CompareAndSwapPointer(&e.p, expunged, nil) +} + +// storeLocked unconditionally stores a value to the entry. +// +// The entry must be known not to be expunged. +func (e *entry) storeLocked(i *interface{}) { + atomic.StorePointer(&e.p, unsafe.Pointer(i)) +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +func (m *Map) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) { + // Avoid locking if it's a clean hit. + read, _ := m.read.Load().(readOnly) + if e, ok := read.m[key]; ok { + actual, loaded, ok := e.tryLoadOrStore(value) + if ok { + return actual, loaded + } + } + + m.mu.Lock() + read, _ = m.read.Load().(readOnly) + if e, ok := read.m[key]; ok { + if e.unexpungeLocked() { + m.dirty[key] = e + } + actual, loaded, _ = e.tryLoadOrStore(value) + } else if e, ok := m.dirty[key]; ok { + actual, loaded, _ = e.tryLoadOrStore(value) + m.missLocked() + } else { + if !read.amended { + // We're adding the first new key to the dirty map. + // Make sure it is allocated and mark the read-only map as incomplete. + m.dirtyLocked() + m.read.Store(readOnly{m: read.m, amended: true}) + } + m.dirty[key] = newEntry(value) + actual, loaded = value, false + } + m.mu.Unlock() + + return actual, loaded +} + +// tryLoadOrStore atomically loads or stores a value if the entry is not +// expunged. +// +// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and +// returns with ok==false. +func (e *entry) tryLoadOrStore(i interface{}) (actual interface{}, loaded, ok bool) { + p := atomic.LoadPointer(&e.p) + if p == expunged { + return nil, false, false + } + if p != nil { + return *(*interface{})(p), true, true + } + + // Copy the interface after the first load to make this method more amenable + // to escape analysis: if we hit the "load" path or the entry is expunged, we + // shouldn't bother heap-allocating. + ic := i + for { + if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { + return i, false, true + } + p = atomic.LoadPointer(&e.p) + if p == expunged { + return nil, false, false + } + if p != nil { + return *(*interface{})(p), true, true + } + } +} + +// Delete deletes the value for a key. +func (m *Map) Delete(key interface{}) { + read, _ := m.read.Load().(readOnly) + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + read, _ = m.read.Load().(readOnly) + e, ok = read.m[key] + if !ok && read.amended { + delete(m.dirty, key) + } + m.mu.Unlock() + } + if ok { + e.delete() + } +} + +func (e *entry) delete() (hadValue bool) { + for { + p := atomic.LoadPointer(&e.p) + if p == nil || p == expunged { + return false + } + if atomic.CompareAndSwapPointer(&e.p, p, nil) { + return true + } + } +} + +// Range calls f sequentially for each key and value present in the map. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently, Range may reflect any mapping for that key +// from any point during the Range call. +// +// Range may be O(N) with the number of elements in the map even if f returns +// false after a constant number of calls. +func (m *Map) Range(f func(key, value interface{}) bool) { + // We need to be able to iterate over all of the keys that were already + // present at the start of the call to Range. + // If read.amended is false, then read.m satisfies that property without + // requiring us to hold m.mu for a long time. + read, _ := m.read.Load().(readOnly) + if read.amended { + // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) + // (assuming the caller does not break out early), so a call to Range + // amortizes an entire copy of the map: we can promote the dirty copy + // immediately! + m.mu.Lock() + read, _ = m.read.Load().(readOnly) + if read.amended { + read = readOnly{m: m.dirty} + m.read.Store(read) + m.dirty = nil + m.misses = 0 + } + m.mu.Unlock() + } + + for k, e := range read.m { + v, ok := e.load() + if !ok { + continue + } + if !f(k, v) { + break + } + } +} + +func (m *Map) missLocked() { + m.misses++ + if m.misses < len(m.dirty) { + return + } + m.read.Store(readOnly{m: m.dirty}) + m.dirty = nil + m.misses = 0 +} + +func (m *Map) dirtyLocked() { + if m.dirty != nil { + return + } + + read, _ := m.read.Load().(readOnly) + m.dirty = make(map[interface{}]*entry, len(read.m)) + for k, e := range read.m { + if !e.tryExpungeLocked() { + m.dirty[k] = e + } + } +} + +func (e *entry) tryExpungeLocked() (isExpunged bool) { + p := atomic.LoadPointer(&e.p) + for p == nil { + if atomic.CompareAndSwapPointer(&e.p, nil, expunged) { + return true + } + p = atomic.LoadPointer(&e.p) + } + return p == expunged +} diff --git a/vendor/golang.org/x/sys/unix/cap_freebsd.go b/vendor/golang.org/x/sys/unix/cap_freebsd.go index 83b6bceab4..df52048773 100644 --- a/vendor/golang.org/x/sys/unix/cap_freebsd.go +++ b/vendor/golang.org/x/sys/unix/cap_freebsd.go @@ -7,7 +7,7 @@ package unix import ( - errorspkg "errors" + "errors" "fmt" ) @@ -60,26 +60,26 @@ func CapRightsSet(rights *CapRights, setrights []uint64) error { n := caparsize(rights) if n < capArSizeMin || n > capArSizeMax { - return errorspkg.New("bad rights size") + return errors.New("bad rights size") } for _, right := range setrights { if caprver(right) != CAP_RIGHTS_VERSION_00 { - return errorspkg.New("bad right version") + return errors.New("bad right version") } i, err := rightToIndex(right) if err != nil { return err } if i >= n { - return errorspkg.New("index overflow") + return errors.New("index overflow") } if capidxbit(rights.Rights[i]) != capidxbit(right) { - return errorspkg.New("index mismatch") + return errors.New("index mismatch") } rights.Rights[i] |= right if capidxbit(rights.Rights[i]) != capidxbit(right) { - return errorspkg.New("index mismatch (after assign)") + return errors.New("index mismatch (after assign)") } } @@ -95,26 +95,26 @@ func CapRightsClear(rights *CapRights, clearrights []uint64) error { n := caparsize(rights) if n < capArSizeMin || n > capArSizeMax { - return errorspkg.New("bad rights size") + return errors.New("bad rights size") } for _, right := range clearrights { if caprver(right) != CAP_RIGHTS_VERSION_00 { - return errorspkg.New("bad right version") + return errors.New("bad right version") } i, err := rightToIndex(right) if err != nil { return err } if i >= n { - return errorspkg.New("index overflow") + return errors.New("index overflow") } if capidxbit(rights.Rights[i]) != capidxbit(right) { - return errorspkg.New("index mismatch") + return errors.New("index mismatch") } rights.Rights[i] &= ^(right & 0x01FFFFFFFFFFFFFF) if capidxbit(rights.Rights[i]) != capidxbit(right) { - return errorspkg.New("index mismatch (after assign)") + return errors.New("index mismatch (after assign)") } } @@ -130,22 +130,22 @@ func CapRightsIsSet(rights *CapRights, setrights []uint64) (bool, error) { n := caparsize(rights) if n < capArSizeMin || n > capArSizeMax { - return false, errorspkg.New("bad rights size") + return false, errors.New("bad rights size") } for _, right := range setrights { if caprver(right) != CAP_RIGHTS_VERSION_00 { - return false, errorspkg.New("bad right version") + return false, errors.New("bad right version") } i, err := rightToIndex(right) if err != nil { return false, err } if i >= n { - return false, errorspkg.New("index overflow") + return false, errors.New("index overflow") } if capidxbit(rights.Rights[i]) != capidxbit(right) { - return false, errorspkg.New("index mismatch") + return false, errors.New("index mismatch") } if (rights.Rights[i] & right) != right { return false, nil diff --git a/vendor/golang.org/x/sys/unix/mkpost.go b/vendor/golang.org/x/sys/unix/mkpost.go index 23590bda36..c28e42b009 100644 --- a/vendor/golang.org/x/sys/unix/mkpost.go +++ b/vendor/golang.org/x/sys/unix/mkpost.go @@ -42,6 +42,10 @@ func main() { log.Fatal(err) } + // Intentionally export __val fields in Fsid and Sigset_t + valRegex := regexp.MustCompile(`type (Fsid|Sigset_t) struct {(\s+)X__val(\s+\S+\s+)}`) + b = valRegex.ReplaceAll(b, []byte("type $1 struct {${2}Val$3}")) + // If we have empty Ptrace structs, we should delete them. Only s390x emits // nonempty Ptrace structs. ptraceRexexp := regexp.MustCompile(`type Ptrace((Psw|Fpregs|Per) struct {\s*})`) @@ -69,12 +73,9 @@ func main() { removePaddingFieldsRegex := regexp.MustCompile(`Pad_cgo_\d+`) b = removePaddingFieldsRegex.ReplaceAll(b, []byte("_")) - // We refuse to export private fields on s390x - if goarch == "s390x" && goos == "linux" { - // Remove padding, hidden, or unused fields - removeFieldsRegex = regexp.MustCompile(`\bX_\S+`) - b = removeFieldsRegex.ReplaceAll(b, []byte("_")) - } + // Remove padding, hidden, or unused fields + removeFieldsRegex = regexp.MustCompile(`\bX_\S+`) + b = removeFieldsRegex.ReplaceAll(b, []byte("_")) // Remove the first line of warning from cgo b = b[bytes.IndexByte(b, '\n')+1:] diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 006e21f5d0..693be791cf 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -13,7 +13,7 @@ package unix import ( - errorspkg "errors" + "errors" "syscall" "unsafe" ) @@ -98,7 +98,7 @@ type attrList struct { func getAttrList(path string, attrList attrList, attrBuf []byte, options uint) (attrs [][]byte, err error) { if len(attrBuf) < 4 { - return nil, errorspkg.New("attrBuf too small") + return nil, errors.New("attrBuf too small") } attrList.bitmapCount = attrBitMapCount @@ -134,12 +134,12 @@ func getAttrList(path string, attrList attrList, attrBuf []byte, options uint) ( for i := uint32(0); int(i) < len(dat); { header := dat[i:] if len(header) < 8 { - return attrs, errorspkg.New("truncated attribute header") + return attrs, errors.New("truncated attribute header") } datOff := *(*int32)(unsafe.Pointer(&header[0])) attrLen := *(*uint32)(unsafe.Pointer(&header[4])) if datOff < 0 || uint32(datOff)+attrLen > uint32(len(dat)) { - return attrs, errorspkg.New("truncated results; attrBuf too small") + return attrs, errors.New("truncated results; attrBuf too small") } end := uint32(datOff) + attrLen attrs = append(attrs, dat[datOff:end]) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index a24ba5fbcf..04f38c53ee 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -944,15 +944,17 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from } var dummy byte if len(oob) > 0 { - var sockType int - sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE) - if err != nil { - return - } - // receive at least one normal byte - if sockType != SOCK_DGRAM && len(p) == 0 { - iov.Base = &dummy - iov.SetLen(1) + if len(p) == 0 { + var sockType int + sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE) + if err != nil { + return + } + // receive at least one normal byte + if sockType != SOCK_DGRAM { + iov.Base = &dummy + iov.SetLen(1) + } } msg.Control = &oob[0] msg.SetControllen(len(oob)) @@ -996,15 +998,17 @@ func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) } var dummy byte if len(oob) > 0 { - var sockType int - sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE) - if err != nil { - return 0, err - } - // send at least one normal byte - if sockType != SOCK_DGRAM && len(p) == 0 { - iov.Base = &dummy - iov.SetLen(1) + if len(p) == 0 { + var sockType int + sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE) + if err != nil { + return 0, err + } + // send at least one normal byte + if sockType != SOCK_DGRAM { + iov.Base = &dummy + iov.SetLen(1) + } } msg.Control = &oob[0] msg.SetControllen(len(oob)) @@ -1260,6 +1264,7 @@ func Getpgrp() (pid int) { //sys Mkdirat(dirfd int, path string, mode uint32) (err error) //sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) +//sys PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) //sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT //sysnb prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64 //sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index 53d38a5342..d121106323 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -29,7 +29,15 @@ package unix //sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 //sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK -//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + var ts *Timespec + if timeout != nil { + ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} + } + return Pselect(nfd, r, w, e, ts, nil) +} + //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys Setfsgid(gid int) (err error) //sys Setfsuid(uid int) (err error) @@ -40,7 +48,12 @@ package unix //sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) -//sys Stat(path string, stat *Stat_t) (err error) + +func Stat(path string, stat *Stat_t) (err error) { + // Use fstatat, because Android's seccomp policy blocks stat. + return Fstatat(AT_FDCWD, path, stat, 0) +} + //sys Statfs(path string, buf *Statfs_t) (err error) //sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) //sys Truncate(path string, length int64) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index 3d5817f661..9e16cc9d14 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -36,7 +36,7 @@ func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, //sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) -//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) +//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) //sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) //sys Truncate(path string, length int64) (err error) = SYS_TRUNCATE64 diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 387e1cfcf9..614fcf0494 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -222,6 +222,7 @@ func Uname(uname *Utsname) error { //sysnb Getppid() (ppid int) //sys Getpriority(which int, who int) (prio int, err error) //sysnb Getrlimit(which int, lim *Rlimit) (err error) +//sysnb Getrtable() (rtable int, err error) //sysnb Getrusage(who int, rusage *Rusage) (err error) //sysnb Getsid(pid int) (sid int, err error) //sysnb Gettimeofday(tv *Timeval) (err error) @@ -259,6 +260,7 @@ func Uname(uname *Utsname) error { //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) //sysnb Setrlimit(which int, lim *Rlimit) (err error) +//sysnb Setrtable(rtable int) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) @@ -307,7 +309,6 @@ func Uname(uname *Utsname) error { // getlogin // getresgid // getresuid -// getrtable // getthrid // ktrace // lfs_bmapv @@ -343,7 +344,6 @@ func Uname(uname *Utsname) error { // semop // setgroups // setitimer -// setrtable // setsockopt // shmat // shmctl diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index 262dc520ea..b835bad0fe 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -9,6 +9,7 @@ package unix import ( "bytes" "runtime" + "sort" "sync" "syscall" "unsafe" @@ -51,6 +52,28 @@ func errnoErr(e syscall.Errno) error { return e } +// ErrnoName returns the error name for error number e. +func ErrnoName(e syscall.Errno) string { + i := sort.Search(len(errorList), func(i int) bool { + return errorList[i].num >= e + }) + if i < len(errorList) && errorList[i].num == e { + return errorList[i].name + } + return "" +} + +// SignalName returns the signal name for signal number s. +func SignalName(s syscall.Signal) string { + i := sort.Search(len(signalList), func(i int) bool { + return signalList[i].num >= s + }) + if i < len(signalList) && signalList[i].num == s { + return signalList[i].name + } + return "" +} + // clen returns the index of the first NULL byte in n or len(n) if n contains no NULL byte. func clen(n []byte) int { i := bytes.IndexByte(n, 0) diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go index dcba88424b..c5a9b73dc8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go @@ -1624,146 +1624,154 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "device not configured", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource deadlock avoided", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "resource busy", - 17: "file exists", - 18: "cross-device link", - 19: "operation not supported by device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "result too large", - 35: "resource temporarily unavailable", - 36: "operation now in progress", - 37: "operation already in progress", - 38: "socket operation on non-socket", - 39: "destination address required", - 40: "message too long", - 41: "protocol wrong type for socket", - 42: "protocol not available", - 43: "protocol not supported", - 44: "socket type not supported", - 45: "operation not supported", - 46: "protocol family not supported", - 47: "address family not supported by protocol family", - 48: "address already in use", - 49: "can't assign requested address", - 50: "network is down", - 51: "network is unreachable", - 52: "network dropped connection on reset", - 53: "software caused connection abort", - 54: "connection reset by peer", - 55: "no buffer space available", - 56: "socket is already connected", - 57: "socket is not connected", - 58: "can't send after socket shutdown", - 59: "too many references: can't splice", - 60: "operation timed out", - 61: "connection refused", - 62: "too many levels of symbolic links", - 63: "file name too long", - 64: "host is down", - 65: "no route to host", - 66: "directory not empty", - 67: "too many processes", - 68: "too many users", - 69: "disc quota exceeded", - 70: "stale NFS file handle", - 71: "too many levels of remote in path", - 72: "RPC struct is bad", - 73: "RPC version wrong", - 74: "RPC prog. not avail", - 75: "program version wrong", - 76: "bad procedure for program", - 77: "no locks available", - 78: "function not implemented", - 79: "inappropriate file type or format", - 80: "authentication error", - 81: "need authenticator", - 82: "device power is off", - 83: "device error", - 84: "value too large to be stored in data type", - 85: "bad executable (or shared library)", - 86: "bad CPU type in executable", - 87: "shared library version mismatch", - 88: "malformed Mach-o file", - 89: "operation canceled", - 90: "identifier removed", - 91: "no message of desired type", - 92: "illegal byte sequence", - 93: "attribute not found", - 94: "bad message", - 95: "EMULTIHOP (Reserved)", - 96: "no message available on STREAM", - 97: "ENOLINK (Reserved)", - 98: "no STREAM resources", - 99: "not a STREAM", - 100: "protocol error", - 101: "STREAM ioctl timeout", - 102: "operation not supported on socket", - 103: "policy not found", - 104: "state not recoverable", - 105: "previous owner died", - 106: "interface output queue is full", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "EAGAIN", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "ENOTSUP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "operation timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disc quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC prog. not avail"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EPWROFF", "device power is off"}, + {83, "EDEVERR", "device error"}, + {84, "EOVERFLOW", "value too large to be stored in data type"}, + {85, "EBADEXEC", "bad executable (or shared library)"}, + {86, "EBADARCH", "bad CPU type in executable"}, + {87, "ESHLIBVERS", "shared library version mismatch"}, + {88, "EBADMACHO", "malformed Mach-o file"}, + {89, "ECANCELED", "operation canceled"}, + {90, "EIDRM", "identifier removed"}, + {91, "ENOMSG", "no message of desired type"}, + {92, "EILSEQ", "illegal byte sequence"}, + {93, "ENOATTR", "attribute not found"}, + {94, "EBADMSG", "bad message"}, + {95, "EMULTIHOP", "EMULTIHOP (Reserved)"}, + {96, "ENODATA", "no message available on STREAM"}, + {97, "ENOLINK", "ENOLINK (Reserved)"}, + {98, "ENOSR", "no STREAM resources"}, + {99, "ENOSTR", "not a STREAM"}, + {100, "EPROTO", "protocol error"}, + {101, "ETIME", "STREAM ioctl timeout"}, + {102, "EOPNOTSUPP", "operation not supported on socket"}, + {103, "ENOPOLICY", "policy not found"}, + {104, "ENOTRECOVERABLE", "state not recoverable"}, + {105, "EOWNERDEAD", "previous owner died"}, + {106, "EQFULL", "interface output queue is full"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/BPT trap", - 6: "abort trap", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "urgent I/O condition", - 17: "suspended (signal)", - 18: "suspended", - 19: "continued", - 20: "child exited", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "I/O possible", - 24: "cputime limit exceeded", - 25: "filesize limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window size changes", - 29: "information request", - 30: "user defined signal 1", - 31: "user defined signal 2", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGABRT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "suspended (signal)"}, + {18, "SIGTSTP", "suspended"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index 1a51c963c8..7de222b652 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -1624,146 +1624,154 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "device not configured", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource deadlock avoided", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "resource busy", - 17: "file exists", - 18: "cross-device link", - 19: "operation not supported by device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "result too large", - 35: "resource temporarily unavailable", - 36: "operation now in progress", - 37: "operation already in progress", - 38: "socket operation on non-socket", - 39: "destination address required", - 40: "message too long", - 41: "protocol wrong type for socket", - 42: "protocol not available", - 43: "protocol not supported", - 44: "socket type not supported", - 45: "operation not supported", - 46: "protocol family not supported", - 47: "address family not supported by protocol family", - 48: "address already in use", - 49: "can't assign requested address", - 50: "network is down", - 51: "network is unreachable", - 52: "network dropped connection on reset", - 53: "software caused connection abort", - 54: "connection reset by peer", - 55: "no buffer space available", - 56: "socket is already connected", - 57: "socket is not connected", - 58: "can't send after socket shutdown", - 59: "too many references: can't splice", - 60: "operation timed out", - 61: "connection refused", - 62: "too many levels of symbolic links", - 63: "file name too long", - 64: "host is down", - 65: "no route to host", - 66: "directory not empty", - 67: "too many processes", - 68: "too many users", - 69: "disc quota exceeded", - 70: "stale NFS file handle", - 71: "too many levels of remote in path", - 72: "RPC struct is bad", - 73: "RPC version wrong", - 74: "RPC prog. not avail", - 75: "program version wrong", - 76: "bad procedure for program", - 77: "no locks available", - 78: "function not implemented", - 79: "inappropriate file type or format", - 80: "authentication error", - 81: "need authenticator", - 82: "device power is off", - 83: "device error", - 84: "value too large to be stored in data type", - 85: "bad executable (or shared library)", - 86: "bad CPU type in executable", - 87: "shared library version mismatch", - 88: "malformed Mach-o file", - 89: "operation canceled", - 90: "identifier removed", - 91: "no message of desired type", - 92: "illegal byte sequence", - 93: "attribute not found", - 94: "bad message", - 95: "EMULTIHOP (Reserved)", - 96: "no message available on STREAM", - 97: "ENOLINK (Reserved)", - 98: "no STREAM resources", - 99: "not a STREAM", - 100: "protocol error", - 101: "STREAM ioctl timeout", - 102: "operation not supported on socket", - 103: "policy not found", - 104: "state not recoverable", - 105: "previous owner died", - 106: "interface output queue is full", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "EAGAIN", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "ENOTSUP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "operation timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disc quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC prog. not avail"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EPWROFF", "device power is off"}, + {83, "EDEVERR", "device error"}, + {84, "EOVERFLOW", "value too large to be stored in data type"}, + {85, "EBADEXEC", "bad executable (or shared library)"}, + {86, "EBADARCH", "bad CPU type in executable"}, + {87, "ESHLIBVERS", "shared library version mismatch"}, + {88, "EBADMACHO", "malformed Mach-o file"}, + {89, "ECANCELED", "operation canceled"}, + {90, "EIDRM", "identifier removed"}, + {91, "ENOMSG", "no message of desired type"}, + {92, "EILSEQ", "illegal byte sequence"}, + {93, "ENOATTR", "attribute not found"}, + {94, "EBADMSG", "bad message"}, + {95, "EMULTIHOP", "EMULTIHOP (Reserved)"}, + {96, "ENODATA", "no message available on STREAM"}, + {97, "ENOLINK", "ENOLINK (Reserved)"}, + {98, "ENOSR", "no STREAM resources"}, + {99, "ENOSTR", "not a STREAM"}, + {100, "EPROTO", "protocol error"}, + {101, "ETIME", "STREAM ioctl timeout"}, + {102, "EOPNOTSUPP", "operation not supported on socket"}, + {103, "ENOPOLICY", "policy not found"}, + {104, "ENOTRECOVERABLE", "state not recoverable"}, + {105, "EOWNERDEAD", "previous owner died"}, + {106, "EQFULL", "interface output queue is full"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/BPT trap", - 6: "abort trap", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "urgent I/O condition", - 17: "suspended (signal)", - 18: "suspended", - 19: "continued", - 20: "child exited", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "I/O possible", - 24: "cputime limit exceeded", - 25: "filesize limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window size changes", - 29: "information request", - 30: "user defined signal 1", - 31: "user defined signal 2", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGABRT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "suspended (signal)"}, + {18, "SIGTSTP", "suspended"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go index fa135b17c1..33a42e78a2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go @@ -1624,146 +1624,154 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "device not configured", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource deadlock avoided", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "resource busy", - 17: "file exists", - 18: "cross-device link", - 19: "operation not supported by device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "result too large", - 35: "resource temporarily unavailable", - 36: "operation now in progress", - 37: "operation already in progress", - 38: "socket operation on non-socket", - 39: "destination address required", - 40: "message too long", - 41: "protocol wrong type for socket", - 42: "protocol not available", - 43: "protocol not supported", - 44: "socket type not supported", - 45: "operation not supported", - 46: "protocol family not supported", - 47: "address family not supported by protocol family", - 48: "address already in use", - 49: "can't assign requested address", - 50: "network is down", - 51: "network is unreachable", - 52: "network dropped connection on reset", - 53: "software caused connection abort", - 54: "connection reset by peer", - 55: "no buffer space available", - 56: "socket is already connected", - 57: "socket is not connected", - 58: "can't send after socket shutdown", - 59: "too many references: can't splice", - 60: "operation timed out", - 61: "connection refused", - 62: "too many levels of symbolic links", - 63: "file name too long", - 64: "host is down", - 65: "no route to host", - 66: "directory not empty", - 67: "too many processes", - 68: "too many users", - 69: "disc quota exceeded", - 70: "stale NFS file handle", - 71: "too many levels of remote in path", - 72: "RPC struct is bad", - 73: "RPC version wrong", - 74: "RPC prog. not avail", - 75: "program version wrong", - 76: "bad procedure for program", - 77: "no locks available", - 78: "function not implemented", - 79: "inappropriate file type or format", - 80: "authentication error", - 81: "need authenticator", - 82: "device power is off", - 83: "device error", - 84: "value too large to be stored in data type", - 85: "bad executable (or shared library)", - 86: "bad CPU type in executable", - 87: "shared library version mismatch", - 88: "malformed Mach-o file", - 89: "operation canceled", - 90: "identifier removed", - 91: "no message of desired type", - 92: "illegal byte sequence", - 93: "attribute not found", - 94: "bad message", - 95: "EMULTIHOP (Reserved)", - 96: "no message available on STREAM", - 97: "ENOLINK (Reserved)", - 98: "no STREAM resources", - 99: "not a STREAM", - 100: "protocol error", - 101: "STREAM ioctl timeout", - 102: "operation not supported on socket", - 103: "policy not found", - 104: "state not recoverable", - 105: "previous owner died", - 106: "interface output queue is full", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "EAGAIN", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "ENOTSUP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "operation timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disc quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC prog. not avail"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EPWROFF", "device power is off"}, + {83, "EDEVERR", "device error"}, + {84, "EOVERFLOW", "value too large to be stored in data type"}, + {85, "EBADEXEC", "bad executable (or shared library)"}, + {86, "EBADARCH", "bad CPU type in executable"}, + {87, "ESHLIBVERS", "shared library version mismatch"}, + {88, "EBADMACHO", "malformed Mach-o file"}, + {89, "ECANCELED", "operation canceled"}, + {90, "EIDRM", "identifier removed"}, + {91, "ENOMSG", "no message of desired type"}, + {92, "EILSEQ", "illegal byte sequence"}, + {93, "ENOATTR", "attribute not found"}, + {94, "EBADMSG", "bad message"}, + {95, "EMULTIHOP", "EMULTIHOP (Reserved)"}, + {96, "ENODATA", "no message available on STREAM"}, + {97, "ENOLINK", "ENOLINK (Reserved)"}, + {98, "ENOSR", "no STREAM resources"}, + {99, "ENOSTR", "not a STREAM"}, + {100, "EPROTO", "protocol error"}, + {101, "ETIME", "STREAM ioctl timeout"}, + {102, "EOPNOTSUPP", "operation not supported on socket"}, + {103, "ENOPOLICY", "policy not found"}, + {104, "ENOTRECOVERABLE", "state not recoverable"}, + {105, "EOWNERDEAD", "previous owner died"}, + {106, "EQFULL", "interface output queue is full"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/BPT trap", - 6: "abort trap", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "urgent I/O condition", - 17: "suspended (signal)", - 18: "suspended", - 19: "continued", - 20: "child exited", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "I/O possible", - 24: "cputime limit exceeded", - 25: "filesize limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window size changes", - 29: "information request", - 30: "user defined signal 1", - 31: "user defined signal 2", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGABRT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "suspended (signal)"}, + {18, "SIGTSTP", "suspended"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index 6419c65e13..71805155bc 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -1624,146 +1624,154 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "device not configured", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource deadlock avoided", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "resource busy", - 17: "file exists", - 18: "cross-device link", - 19: "operation not supported by device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "result too large", - 35: "resource temporarily unavailable", - 36: "operation now in progress", - 37: "operation already in progress", - 38: "socket operation on non-socket", - 39: "destination address required", - 40: "message too long", - 41: "protocol wrong type for socket", - 42: "protocol not available", - 43: "protocol not supported", - 44: "socket type not supported", - 45: "operation not supported", - 46: "protocol family not supported", - 47: "address family not supported by protocol family", - 48: "address already in use", - 49: "can't assign requested address", - 50: "network is down", - 51: "network is unreachable", - 52: "network dropped connection on reset", - 53: "software caused connection abort", - 54: "connection reset by peer", - 55: "no buffer space available", - 56: "socket is already connected", - 57: "socket is not connected", - 58: "can't send after socket shutdown", - 59: "too many references: can't splice", - 60: "operation timed out", - 61: "connection refused", - 62: "too many levels of symbolic links", - 63: "file name too long", - 64: "host is down", - 65: "no route to host", - 66: "directory not empty", - 67: "too many processes", - 68: "too many users", - 69: "disc quota exceeded", - 70: "stale NFS file handle", - 71: "too many levels of remote in path", - 72: "RPC struct is bad", - 73: "RPC version wrong", - 74: "RPC prog. not avail", - 75: "program version wrong", - 76: "bad procedure for program", - 77: "no locks available", - 78: "function not implemented", - 79: "inappropriate file type or format", - 80: "authentication error", - 81: "need authenticator", - 82: "device power is off", - 83: "device error", - 84: "value too large to be stored in data type", - 85: "bad executable (or shared library)", - 86: "bad CPU type in executable", - 87: "shared library version mismatch", - 88: "malformed Mach-o file", - 89: "operation canceled", - 90: "identifier removed", - 91: "no message of desired type", - 92: "illegal byte sequence", - 93: "attribute not found", - 94: "bad message", - 95: "EMULTIHOP (Reserved)", - 96: "no message available on STREAM", - 97: "ENOLINK (Reserved)", - 98: "no STREAM resources", - 99: "not a STREAM", - 100: "protocol error", - 101: "STREAM ioctl timeout", - 102: "operation not supported on socket", - 103: "policy not found", - 104: "state not recoverable", - 105: "previous owner died", - 106: "interface output queue is full", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "EAGAIN", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "ENOTSUP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "operation timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disc quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC prog. not avail"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EPWROFF", "device power is off"}, + {83, "EDEVERR", "device error"}, + {84, "EOVERFLOW", "value too large to be stored in data type"}, + {85, "EBADEXEC", "bad executable (or shared library)"}, + {86, "EBADARCH", "bad CPU type in executable"}, + {87, "ESHLIBVERS", "shared library version mismatch"}, + {88, "EBADMACHO", "malformed Mach-o file"}, + {89, "ECANCELED", "operation canceled"}, + {90, "EIDRM", "identifier removed"}, + {91, "ENOMSG", "no message of desired type"}, + {92, "EILSEQ", "illegal byte sequence"}, + {93, "ENOATTR", "attribute not found"}, + {94, "EBADMSG", "bad message"}, + {95, "EMULTIHOP", "EMULTIHOP (Reserved)"}, + {96, "ENODATA", "no message available on STREAM"}, + {97, "ENOLINK", "ENOLINK (Reserved)"}, + {98, "ENOSR", "no STREAM resources"}, + {99, "ENOSTR", "not a STREAM"}, + {100, "EPROTO", "protocol error"}, + {101, "ETIME", "STREAM ioctl timeout"}, + {102, "EOPNOTSUPP", "operation not supported on socket"}, + {103, "ENOPOLICY", "policy not found"}, + {104, "ENOTRECOVERABLE", "state not recoverable"}, + {105, "EOWNERDEAD", "previous owner died"}, + {106, "EQFULL", "interface output queue is full"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/BPT trap", - 6: "abort trap", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "urgent I/O condition", - 17: "suspended (signal)", - 18: "suspended", - 19: "continued", - 20: "child exited", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "I/O possible", - 24: "cputime limit exceeded", - 25: "filesize limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window size changes", - 29: "information request", - 30: "user defined signal 1", - 31: "user defined signal 2", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGABRT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "suspended (signal)"}, + {18, "SIGTSTP", "suspended"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go index 474441b808..46a082b6d5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go @@ -1437,142 +1437,150 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "device not configured", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource deadlock avoided", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device busy", - 17: "file exists", - 18: "cross-device link", - 19: "operation not supported by device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "result too large", - 35: "resource temporarily unavailable", - 36: "operation now in progress", - 37: "operation already in progress", - 38: "socket operation on non-socket", - 39: "destination address required", - 40: "message too long", - 41: "protocol wrong type for socket", - 42: "protocol not available", - 43: "protocol not supported", - 44: "socket type not supported", - 45: "operation not supported", - 46: "protocol family not supported", - 47: "address family not supported by protocol family", - 48: "address already in use", - 49: "can't assign requested address", - 50: "network is down", - 51: "network is unreachable", - 52: "network dropped connection on reset", - 53: "software caused connection abort", - 54: "connection reset by peer", - 55: "no buffer space available", - 56: "socket is already connected", - 57: "socket is not connected", - 58: "can't send after socket shutdown", - 59: "too many references: can't splice", - 60: "operation timed out", - 61: "connection refused", - 62: "too many levels of symbolic links", - 63: "file name too long", - 64: "host is down", - 65: "no route to host", - 66: "directory not empty", - 67: "too many processes", - 68: "too many users", - 69: "disc quota exceeded", - 70: "stale NFS file handle", - 71: "too many levels of remote in path", - 72: "RPC struct is bad", - 73: "RPC version wrong", - 74: "RPC prog. not avail", - 75: "program version wrong", - 76: "bad procedure for program", - 77: "no locks available", - 78: "function not implemented", - 79: "inappropriate file type or format", - 80: "authentication error", - 81: "need authenticator", - 82: "identifier removed", - 83: "no message of desired type", - 84: "value too large to be stored in data type", - 85: "operation canceled", - 86: "illegal byte sequence", - 87: "attribute not found", - 88: "programming error", - 89: "bad message", - 90: "multihop attempted", - 91: "link has been severed", - 92: "protocol error", - 93: "no medium found", - 94: "unknown error: 94", - 95: "unknown error: 95", - 96: "unknown error: 96", - 97: "unknown error: 97", - 98: "unknown error: 98", - 99: "unknown error: 99", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "EWOULDBLOCK", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "EOPNOTSUPP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "operation timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disc quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC prog. not avail"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EIDRM", "identifier removed"}, + {83, "ENOMSG", "no message of desired type"}, + {84, "EOVERFLOW", "value too large to be stored in data type"}, + {85, "ECANCELED", "operation canceled"}, + {86, "EILSEQ", "illegal byte sequence"}, + {87, "ENOATTR", "attribute not found"}, + {88, "EDOOFUS", "programming error"}, + {89, "EBADMSG", "bad message"}, + {90, "EMULTIHOP", "multihop attempted"}, + {91, "ENOLINK", "link has been severed"}, + {92, "EPROTO", "protocol error"}, + {93, "ENOMEDIUM", "no medium found"}, + {94, "EUNUSED94", "unknown error: 94"}, + {95, "EUNUSED95", "unknown error: 95"}, + {96, "EUNUSED96", "unknown error: 96"}, + {97, "EUNUSED97", "unknown error: 97"}, + {98, "EUNUSED98", "unknown error: 98"}, + {99, "ELAST", "unknown error: 99"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/BPT trap", - 6: "abort trap", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "urgent I/O condition", - 17: "suspended (signal)", - 18: "suspended", - 19: "continued", - 20: "child exited", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "I/O possible", - 24: "cputime limit exceeded", - 25: "filesize limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window size changes", - 29: "information request", - 30: "user defined signal 1", - 31: "user defined signal 2", - 32: "thread Scheduler", - 33: "checkPoint", - 34: "checkPointExit", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGIOT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "suspended (signal)"}, + {18, "SIGTSTP", "suspended"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, + {32, "SIGTHR", "thread Scheduler"}, + {33, "SIGCKPT", "checkPoint"}, + {34, "SIGCKPTEXIT", "checkPointExit"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go index a8b05878e3..2947dc0382 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go @@ -1619,138 +1619,146 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "device not configured", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource deadlock avoided", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device busy", - 17: "file exists", - 18: "cross-device link", - 19: "operation not supported by device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "result too large", - 35: "resource temporarily unavailable", - 36: "operation now in progress", - 37: "operation already in progress", - 38: "socket operation on non-socket", - 39: "destination address required", - 40: "message too long", - 41: "protocol wrong type for socket", - 42: "protocol not available", - 43: "protocol not supported", - 44: "socket type not supported", - 45: "operation not supported", - 46: "protocol family not supported", - 47: "address family not supported by protocol family", - 48: "address already in use", - 49: "can't assign requested address", - 50: "network is down", - 51: "network is unreachable", - 52: "network dropped connection on reset", - 53: "software caused connection abort", - 54: "connection reset by peer", - 55: "no buffer space available", - 56: "socket is already connected", - 57: "socket is not connected", - 58: "can't send after socket shutdown", - 59: "too many references: can't splice", - 60: "operation timed out", - 61: "connection refused", - 62: "too many levels of symbolic links", - 63: "file name too long", - 64: "host is down", - 65: "no route to host", - 66: "directory not empty", - 67: "too many processes", - 68: "too many users", - 69: "disc quota exceeded", - 70: "stale NFS file handle", - 71: "too many levels of remote in path", - 72: "RPC struct is bad", - 73: "RPC version wrong", - 74: "RPC prog. not avail", - 75: "program version wrong", - 76: "bad procedure for program", - 77: "no locks available", - 78: "function not implemented", - 79: "inappropriate file type or format", - 80: "authentication error", - 81: "need authenticator", - 82: "identifier removed", - 83: "no message of desired type", - 84: "value too large to be stored in data type", - 85: "operation canceled", - 86: "illegal byte sequence", - 87: "attribute not found", - 88: "programming error", - 89: "bad message", - 90: "multihop attempted", - 91: "link has been severed", - 92: "protocol error", - 93: "capabilities insufficient", - 94: "not permitted in capability mode", - 95: "state not recoverable", - 96: "previous owner died", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "EAGAIN", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "EOPNOTSUPP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "operation timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disc quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC prog. not avail"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EIDRM", "identifier removed"}, + {83, "ENOMSG", "no message of desired type"}, + {84, "EOVERFLOW", "value too large to be stored in data type"}, + {85, "ECANCELED", "operation canceled"}, + {86, "EILSEQ", "illegal byte sequence"}, + {87, "ENOATTR", "attribute not found"}, + {88, "EDOOFUS", "programming error"}, + {89, "EBADMSG", "bad message"}, + {90, "EMULTIHOP", "multihop attempted"}, + {91, "ENOLINK", "link has been severed"}, + {92, "EPROTO", "protocol error"}, + {93, "ENOTCAPABLE", "capabilities insufficient"}, + {94, "ECAPMODE", "not permitted in capability mode"}, + {95, "ENOTRECOVERABLE", "state not recoverable"}, + {96, "EOWNERDEAD", "previous owner died"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/BPT trap", - 6: "abort trap", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "urgent I/O condition", - 17: "suspended (signal)", - 18: "suspended", - 19: "continued", - 20: "child exited", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "I/O possible", - 24: "cputime limit exceeded", - 25: "filesize limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window size changes", - 29: "information request", - 30: "user defined signal 1", - 31: "user defined signal 2", - 32: "unknown signal", - 33: "unknown signal", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGIOT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "suspended (signal)"}, + {18, "SIGTSTP", "suspended"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, + {32, "SIGTHR", "unknown signal"}, + {33, "SIGLIBRT", "unknown signal"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go index cf5f01260e..c600d012d0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go @@ -1620,138 +1620,146 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "device not configured", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource deadlock avoided", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device busy", - 17: "file exists", - 18: "cross-device link", - 19: "operation not supported by device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "result too large", - 35: "resource temporarily unavailable", - 36: "operation now in progress", - 37: "operation already in progress", - 38: "socket operation on non-socket", - 39: "destination address required", - 40: "message too long", - 41: "protocol wrong type for socket", - 42: "protocol not available", - 43: "protocol not supported", - 44: "socket type not supported", - 45: "operation not supported", - 46: "protocol family not supported", - 47: "address family not supported by protocol family", - 48: "address already in use", - 49: "can't assign requested address", - 50: "network is down", - 51: "network is unreachable", - 52: "network dropped connection on reset", - 53: "software caused connection abort", - 54: "connection reset by peer", - 55: "no buffer space available", - 56: "socket is already connected", - 57: "socket is not connected", - 58: "can't send after socket shutdown", - 59: "too many references: can't splice", - 60: "operation timed out", - 61: "connection refused", - 62: "too many levels of symbolic links", - 63: "file name too long", - 64: "host is down", - 65: "no route to host", - 66: "directory not empty", - 67: "too many processes", - 68: "too many users", - 69: "disc quota exceeded", - 70: "stale NFS file handle", - 71: "too many levels of remote in path", - 72: "RPC struct is bad", - 73: "RPC version wrong", - 74: "RPC prog. not avail", - 75: "program version wrong", - 76: "bad procedure for program", - 77: "no locks available", - 78: "function not implemented", - 79: "inappropriate file type or format", - 80: "authentication error", - 81: "need authenticator", - 82: "identifier removed", - 83: "no message of desired type", - 84: "value too large to be stored in data type", - 85: "operation canceled", - 86: "illegal byte sequence", - 87: "attribute not found", - 88: "programming error", - 89: "bad message", - 90: "multihop attempted", - 91: "link has been severed", - 92: "protocol error", - 93: "capabilities insufficient", - 94: "not permitted in capability mode", - 95: "state not recoverable", - 96: "previous owner died", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "EAGAIN", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "EOPNOTSUPP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "operation timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disc quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC prog. not avail"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EIDRM", "identifier removed"}, + {83, "ENOMSG", "no message of desired type"}, + {84, "EOVERFLOW", "value too large to be stored in data type"}, + {85, "ECANCELED", "operation canceled"}, + {86, "EILSEQ", "illegal byte sequence"}, + {87, "ENOATTR", "attribute not found"}, + {88, "EDOOFUS", "programming error"}, + {89, "EBADMSG", "bad message"}, + {90, "EMULTIHOP", "multihop attempted"}, + {91, "ENOLINK", "link has been severed"}, + {92, "EPROTO", "protocol error"}, + {93, "ENOTCAPABLE", "capabilities insufficient"}, + {94, "ECAPMODE", "not permitted in capability mode"}, + {95, "ENOTRECOVERABLE", "state not recoverable"}, + {96, "EOWNERDEAD", "previous owner died"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/BPT trap", - 6: "abort trap", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "urgent I/O condition", - 17: "suspended (signal)", - 18: "suspended", - 19: "continued", - 20: "child exited", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "I/O possible", - 24: "cputime limit exceeded", - 25: "filesize limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window size changes", - 29: "information request", - 30: "user defined signal 1", - 31: "user defined signal 2", - 32: "unknown signal", - 33: "unknown signal", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGIOT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "suspended (signal)"}, + {18, "SIGTSTP", "suspended"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, + {32, "SIGTHR", "unknown signal"}, + {33, "SIGLIBRT", "unknown signal"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go index 9bbb90ad8a..e8240d2397 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go @@ -1628,138 +1628,146 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "device not configured", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource deadlock avoided", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device busy", - 17: "file exists", - 18: "cross-device link", - 19: "operation not supported by device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "result too large", - 35: "resource temporarily unavailable", - 36: "operation now in progress", - 37: "operation already in progress", - 38: "socket operation on non-socket", - 39: "destination address required", - 40: "message too long", - 41: "protocol wrong type for socket", - 42: "protocol not available", - 43: "protocol not supported", - 44: "socket type not supported", - 45: "operation not supported", - 46: "protocol family not supported", - 47: "address family not supported by protocol family", - 48: "address already in use", - 49: "can't assign requested address", - 50: "network is down", - 51: "network is unreachable", - 52: "network dropped connection on reset", - 53: "software caused connection abort", - 54: "connection reset by peer", - 55: "no buffer space available", - 56: "socket is already connected", - 57: "socket is not connected", - 58: "can't send after socket shutdown", - 59: "too many references: can't splice", - 60: "operation timed out", - 61: "connection refused", - 62: "too many levels of symbolic links", - 63: "file name too long", - 64: "host is down", - 65: "no route to host", - 66: "directory not empty", - 67: "too many processes", - 68: "too many users", - 69: "disc quota exceeded", - 70: "stale NFS file handle", - 71: "too many levels of remote in path", - 72: "RPC struct is bad", - 73: "RPC version wrong", - 74: "RPC prog. not avail", - 75: "program version wrong", - 76: "bad procedure for program", - 77: "no locks available", - 78: "function not implemented", - 79: "inappropriate file type or format", - 80: "authentication error", - 81: "need authenticator", - 82: "identifier removed", - 83: "no message of desired type", - 84: "value too large to be stored in data type", - 85: "operation canceled", - 86: "illegal byte sequence", - 87: "attribute not found", - 88: "programming error", - 89: "bad message", - 90: "multihop attempted", - 91: "link has been severed", - 92: "protocol error", - 93: "capabilities insufficient", - 94: "not permitted in capability mode", - 95: "state not recoverable", - 96: "previous owner died", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "EAGAIN", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "EOPNOTSUPP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "operation timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disc quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC prog. not avail"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EIDRM", "identifier removed"}, + {83, "ENOMSG", "no message of desired type"}, + {84, "EOVERFLOW", "value too large to be stored in data type"}, + {85, "ECANCELED", "operation canceled"}, + {86, "EILSEQ", "illegal byte sequence"}, + {87, "ENOATTR", "attribute not found"}, + {88, "EDOOFUS", "programming error"}, + {89, "EBADMSG", "bad message"}, + {90, "EMULTIHOP", "multihop attempted"}, + {91, "ENOLINK", "link has been severed"}, + {92, "EPROTO", "protocol error"}, + {93, "ENOTCAPABLE", "capabilities insufficient"}, + {94, "ECAPMODE", "not permitted in capability mode"}, + {95, "ENOTRECOVERABLE", "state not recoverable"}, + {96, "EOWNERDEAD", "previous owner died"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/BPT trap", - 6: "abort trap", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "urgent I/O condition", - 17: "suspended (signal)", - 18: "suspended", - 19: "continued", - 20: "child exited", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "I/O possible", - 24: "cputime limit exceeded", - 25: "filesize limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window size changes", - 29: "information request", - 30: "user defined signal 1", - 31: "user defined signal 2", - 32: "unknown signal", - 33: "unknown signal", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGIOT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "suspended (signal)"}, + {18, "SIGTSTP", "suspended"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, + {32, "SIGTHR", "unknown signal"}, + {33, "SIGLIBRT", "unknown signal"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 68de61b8c3..3c3d5e5e85 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -446,6 +446,7 @@ const ( ETH_P_WCCP = 0x883e ETH_P_X25 = 0x805 ETH_P_XDSA = 0xf8 + EXABYTE_ENABLE_NEST = 0xf0 EXTA = 0xe EXTB = 0xf EXTPROC = 0x10000 @@ -543,6 +544,46 @@ const ( GENL_UNS_ADMIN_PERM = 0x10 GRND_NONBLOCK = 0x1 GRND_RANDOM = 0x2 + HDIO_DRIVE_CMD = 0x31f + HDIO_DRIVE_CMD_AEB = 0x31e + HDIO_DRIVE_CMD_HDR_SIZE = 0x4 + HDIO_DRIVE_HOB_HDR_SIZE = 0x8 + HDIO_DRIVE_RESET = 0x31c + HDIO_DRIVE_TASK = 0x31e + HDIO_DRIVE_TASKFILE = 0x31d + HDIO_DRIVE_TASK_HDR_SIZE = 0x8 + HDIO_GETGEO = 0x301 + HDIO_GET_32BIT = 0x309 + HDIO_GET_ACOUSTIC = 0x30f + HDIO_GET_ADDRESS = 0x310 + HDIO_GET_BUSSTATE = 0x31a + HDIO_GET_DMA = 0x30b + HDIO_GET_IDENTITY = 0x30d + HDIO_GET_KEEPSETTINGS = 0x308 + HDIO_GET_MULTCOUNT = 0x304 + HDIO_GET_NICE = 0x30c + HDIO_GET_NOWERR = 0x30a + HDIO_GET_QDMA = 0x305 + HDIO_GET_UNMASKINTR = 0x302 + HDIO_GET_WCACHE = 0x30e + HDIO_OBSOLETE_IDENTITY = 0x307 + HDIO_SCAN_HWIF = 0x328 + HDIO_SET_32BIT = 0x324 + HDIO_SET_ACOUSTIC = 0x32c + HDIO_SET_ADDRESS = 0x32f + HDIO_SET_BUSSTATE = 0x32d + HDIO_SET_DMA = 0x326 + HDIO_SET_KEEPSETTINGS = 0x323 + HDIO_SET_MULTCOUNT = 0x321 + HDIO_SET_NICE = 0x329 + HDIO_SET_NOWERR = 0x325 + HDIO_SET_PIO_MODE = 0x327 + HDIO_SET_QDMA = 0x32e + HDIO_SET_UNMASKINTR = 0x322 + HDIO_SET_WCACHE = 0x32b + HDIO_SET_XFER = 0x306 + HDIO_TRISTATE_HWIF = 0x31b + HDIO_UNREGISTER_HWIF = 0x32a HUPCL = 0x400 IBSHIFT = 0x10 ICANON = 0x2 @@ -1560,6 +1601,20 @@ const ( SIOCSPGRP = 0x8902 SIOCSRARP = 0x8962 SIOCWANDEV = 0x894a + SMART_AUTOSAVE = 0xd2 + SMART_AUTO_OFFLINE = 0xdb + SMART_DISABLE = 0xd9 + SMART_ENABLE = 0xd8 + SMART_HCYL_PASS = 0xc2 + SMART_IMMEDIATE_OFFLINE = 0xd4 + SMART_LCYL_PASS = 0x4f + SMART_READ_LOG_SECTOR = 0xd5 + SMART_READ_THRESHOLDS = 0xd1 + SMART_READ_VALUES = 0xd0 + SMART_SAVE = 0xd3 + SMART_STATUS = 0xda + SMART_WRITE_LOG_SECTOR = 0xd6 + SMART_WRITE_THRESHOLDS = 0xd7 SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x2 @@ -1944,6 +1999,86 @@ const ( WDIOC_SETPRETIMEOUT = 0xc0045708 WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 + WIN_ACKMEDIACHANGE = 0xdb + WIN_CHECKPOWERMODE1 = 0xe5 + WIN_CHECKPOWERMODE2 = 0x98 + WIN_DEVICE_RESET = 0x8 + WIN_DIAGNOSE = 0x90 + WIN_DOORLOCK = 0xde + WIN_DOORUNLOCK = 0xdf + WIN_DOWNLOAD_MICROCODE = 0x92 + WIN_FLUSH_CACHE = 0xe7 + WIN_FLUSH_CACHE_EXT = 0xea + WIN_FORMAT = 0x50 + WIN_GETMEDIASTATUS = 0xda + WIN_IDENTIFY = 0xec + WIN_IDENTIFY_DMA = 0xee + WIN_IDLEIMMEDIATE = 0xe1 + WIN_INIT = 0x60 + WIN_MEDIAEJECT = 0xed + WIN_MULTREAD = 0xc4 + WIN_MULTREAD_EXT = 0x29 + WIN_MULTWRITE = 0xc5 + WIN_MULTWRITE_EXT = 0x39 + WIN_NOP = 0x0 + WIN_PACKETCMD = 0xa0 + WIN_PIDENTIFY = 0xa1 + WIN_POSTBOOT = 0xdc + WIN_PREBOOT = 0xdd + WIN_QUEUED_SERVICE = 0xa2 + WIN_READ = 0x20 + WIN_READDMA = 0xc8 + WIN_READDMA_EXT = 0x25 + WIN_READDMA_ONCE = 0xc9 + WIN_READDMA_QUEUED = 0xc7 + WIN_READDMA_QUEUED_EXT = 0x26 + WIN_READ_BUFFER = 0xe4 + WIN_READ_EXT = 0x24 + WIN_READ_LONG = 0x22 + WIN_READ_LONG_ONCE = 0x23 + WIN_READ_NATIVE_MAX = 0xf8 + WIN_READ_NATIVE_MAX_EXT = 0x27 + WIN_READ_ONCE = 0x21 + WIN_RECAL = 0x10 + WIN_RESTORE = 0x10 + WIN_SECURITY_DISABLE = 0xf6 + WIN_SECURITY_ERASE_PREPARE = 0xf3 + WIN_SECURITY_ERASE_UNIT = 0xf4 + WIN_SECURITY_FREEZE_LOCK = 0xf5 + WIN_SECURITY_SET_PASS = 0xf1 + WIN_SECURITY_UNLOCK = 0xf2 + WIN_SEEK = 0x70 + WIN_SETFEATURES = 0xef + WIN_SETIDLE1 = 0xe3 + WIN_SETIDLE2 = 0x97 + WIN_SETMULT = 0xc6 + WIN_SET_MAX = 0xf9 + WIN_SET_MAX_EXT = 0x37 + WIN_SLEEPNOW1 = 0xe6 + WIN_SLEEPNOW2 = 0x99 + WIN_SMART = 0xb0 + WIN_SPECIFY = 0x91 + WIN_SRST = 0x8 + WIN_STANDBY = 0xe2 + WIN_STANDBY2 = 0x96 + WIN_STANDBYNOW1 = 0xe0 + WIN_STANDBYNOW2 = 0x94 + WIN_VERIFY = 0x40 + WIN_VERIFY_EXT = 0x42 + WIN_VERIFY_ONCE = 0x41 + WIN_WRITE = 0x30 + WIN_WRITEDMA = 0xca + WIN_WRITEDMA_EXT = 0x35 + WIN_WRITEDMA_ONCE = 0xcb + WIN_WRITEDMA_QUEUED = 0xcc + WIN_WRITEDMA_QUEUED_EXT = 0x36 + WIN_WRITE_BUFFER = 0xe8 + WIN_WRITE_EXT = 0x34 + WIN_WRITE_LONG = 0x32 + WIN_WRITE_LONG_ONCE = 0x33 + WIN_WRITE_ONCE = 0x31 + WIN_WRITE_SAME = 0xe9 + WIN_WRITE_VERIFY = 0x3c WNOHANG = 0x1 WNOTHREAD = 0x20000000 WNOWAIT = 0x1000000 @@ -2133,171 +2268,179 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "no such device or address", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource temporarily unavailable", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device or resource busy", - 17: "file exists", - 18: "invalid cross-device link", - 19: "no such device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "numerical result out of range", - 35: "resource deadlock avoided", - 36: "file name too long", - 37: "no locks available", - 38: "function not implemented", - 39: "directory not empty", - 40: "too many levels of symbolic links", - 42: "no message of desired type", - 43: "identifier removed", - 44: "channel number out of range", - 45: "level 2 not synchronized", - 46: "level 3 halted", - 47: "level 3 reset", - 48: "link number out of range", - 49: "protocol driver not attached", - 50: "no CSI structure available", - 51: "level 2 halted", - 52: "invalid exchange", - 53: "invalid request descriptor", - 54: "exchange full", - 55: "no anode", - 56: "invalid request code", - 57: "invalid slot", - 59: "bad font file format", - 60: "device not a stream", - 61: "no data available", - 62: "timer expired", - 63: "out of streams resources", - 64: "machine is not on the network", - 65: "package not installed", - 66: "object is remote", - 67: "link has been severed", - 68: "advertise error", - 69: "srmount error", - 70: "communication error on send", - 71: "protocol error", - 72: "multihop attempted", - 73: "RFS specific error", - 74: "bad message", - 75: "value too large for defined data type", - 76: "name not unique on network", - 77: "file descriptor in bad state", - 78: "remote address changed", - 79: "can not access a needed shared library", - 80: "accessing a corrupted shared library", - 81: ".lib section in a.out corrupted", - 82: "attempting to link in too many shared libraries", - 83: "cannot exec a shared library directly", - 84: "invalid or incomplete multibyte or wide character", - 85: "interrupted system call should be restarted", - 86: "streams pipe error", - 87: "too many users", - 88: "socket operation on non-socket", - 89: "destination address required", - 90: "message too long", - 91: "protocol wrong type for socket", - 92: "protocol not available", - 93: "protocol not supported", - 94: "socket type not supported", - 95: "operation not supported", - 96: "protocol family not supported", - 97: "address family not supported by protocol", - 98: "address already in use", - 99: "cannot assign requested address", - 100: "network is down", - 101: "network is unreachable", - 102: "network dropped connection on reset", - 103: "software caused connection abort", - 104: "connection reset by peer", - 105: "no buffer space available", - 106: "transport endpoint is already connected", - 107: "transport endpoint is not connected", - 108: "cannot send after transport endpoint shutdown", - 109: "too many references: cannot splice", - 110: "connection timed out", - 111: "connection refused", - 112: "host is down", - 113: "no route to host", - 114: "operation already in progress", - 115: "operation now in progress", - 116: "stale file handle", - 117: "structure needs cleaning", - 118: "not a XENIX named type file", - 119: "no XENIX semaphores available", - 120: "is a named type file", - 121: "remote I/O error", - 122: "disk quota exceeded", - 123: "no medium found", - 124: "wrong medium type", - 125: "operation canceled", - 126: "required key not available", - 127: "key has expired", - 128: "key has been revoked", - 129: "key was rejected by service", - 130: "owner died", - 131: "state not recoverable", - 132: "operation not possible due to RF-kill", - 133: "memory page has hardware error", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EAGAIN", "resource temporarily unavailable"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device or resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "invalid cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "numerical result out of range"}, + {35, "EDEADLK", "resource deadlock avoided"}, + {36, "ENAMETOOLONG", "file name too long"}, + {37, "ENOLCK", "no locks available"}, + {38, "ENOSYS", "function not implemented"}, + {39, "ENOTEMPTY", "directory not empty"}, + {40, "ELOOP", "too many levels of symbolic links"}, + {42, "ENOMSG", "no message of desired type"}, + {43, "EIDRM", "identifier removed"}, + {44, "ECHRNG", "channel number out of range"}, + {45, "EL2NSYNC", "level 2 not synchronized"}, + {46, "EL3HLT", "level 3 halted"}, + {47, "EL3RST", "level 3 reset"}, + {48, "ELNRNG", "link number out of range"}, + {49, "EUNATCH", "protocol driver not attached"}, + {50, "ENOCSI", "no CSI structure available"}, + {51, "EL2HLT", "level 2 halted"}, + {52, "EBADE", "invalid exchange"}, + {53, "EBADR", "invalid request descriptor"}, + {54, "EXFULL", "exchange full"}, + {55, "ENOANO", "no anode"}, + {56, "EBADRQC", "invalid request code"}, + {57, "EBADSLT", "invalid slot"}, + {59, "EBFONT", "bad font file format"}, + {60, "ENOSTR", "device not a stream"}, + {61, "ENODATA", "no data available"}, + {62, "ETIME", "timer expired"}, + {63, "ENOSR", "out of streams resources"}, + {64, "ENONET", "machine is not on the network"}, + {65, "ENOPKG", "package not installed"}, + {66, "EREMOTE", "object is remote"}, + {67, "ENOLINK", "link has been severed"}, + {68, "EADV", "advertise error"}, + {69, "ESRMNT", "srmount error"}, + {70, "ECOMM", "communication error on send"}, + {71, "EPROTO", "protocol error"}, + {72, "EMULTIHOP", "multihop attempted"}, + {73, "EDOTDOT", "RFS specific error"}, + {74, "EBADMSG", "bad message"}, + {75, "EOVERFLOW", "value too large for defined data type"}, + {76, "ENOTUNIQ", "name not unique on network"}, + {77, "EBADFD", "file descriptor in bad state"}, + {78, "EREMCHG", "remote address changed"}, + {79, "ELIBACC", "can not access a needed shared library"}, + {80, "ELIBBAD", "accessing a corrupted shared library"}, + {81, "ELIBSCN", ".lib section in a.out corrupted"}, + {82, "ELIBMAX", "attempting to link in too many shared libraries"}, + {83, "ELIBEXEC", "cannot exec a shared library directly"}, + {84, "EILSEQ", "invalid or incomplete multibyte or wide character"}, + {85, "ERESTART", "interrupted system call should be restarted"}, + {86, "ESTRPIPE", "streams pipe error"}, + {87, "EUSERS", "too many users"}, + {88, "ENOTSOCK", "socket operation on non-socket"}, + {89, "EDESTADDRREQ", "destination address required"}, + {90, "EMSGSIZE", "message too long"}, + {91, "EPROTOTYPE", "protocol wrong type for socket"}, + {92, "ENOPROTOOPT", "protocol not available"}, + {93, "EPROTONOSUPPORT", "protocol not supported"}, + {94, "ESOCKTNOSUPPORT", "socket type not supported"}, + {95, "ENOTSUP", "operation not supported"}, + {96, "EPFNOSUPPORT", "protocol family not supported"}, + {97, "EAFNOSUPPORT", "address family not supported by protocol"}, + {98, "EADDRINUSE", "address already in use"}, + {99, "EADDRNOTAVAIL", "cannot assign requested address"}, + {100, "ENETDOWN", "network is down"}, + {101, "ENETUNREACH", "network is unreachable"}, + {102, "ENETRESET", "network dropped connection on reset"}, + {103, "ECONNABORTED", "software caused connection abort"}, + {104, "ECONNRESET", "connection reset by peer"}, + {105, "ENOBUFS", "no buffer space available"}, + {106, "EISCONN", "transport endpoint is already connected"}, + {107, "ENOTCONN", "transport endpoint is not connected"}, + {108, "ESHUTDOWN", "cannot send after transport endpoint shutdown"}, + {109, "ETOOMANYREFS", "too many references: cannot splice"}, + {110, "ETIMEDOUT", "connection timed out"}, + {111, "ECONNREFUSED", "connection refused"}, + {112, "EHOSTDOWN", "host is down"}, + {113, "EHOSTUNREACH", "no route to host"}, + {114, "EALREADY", "operation already in progress"}, + {115, "EINPROGRESS", "operation now in progress"}, + {116, "ESTALE", "stale file handle"}, + {117, "EUCLEAN", "structure needs cleaning"}, + {118, "ENOTNAM", "not a XENIX named type file"}, + {119, "ENAVAIL", "no XENIX semaphores available"}, + {120, "EISNAM", "is a named type file"}, + {121, "EREMOTEIO", "remote I/O error"}, + {122, "EDQUOT", "disk quota exceeded"}, + {123, "ENOMEDIUM", "no medium found"}, + {124, "EMEDIUMTYPE", "wrong medium type"}, + {125, "ECANCELED", "operation canceled"}, + {126, "ENOKEY", "required key not available"}, + {127, "EKEYEXPIRED", "key has expired"}, + {128, "EKEYREVOKED", "key has been revoked"}, + {129, "EKEYREJECTED", "key was rejected by service"}, + {130, "EOWNERDEAD", "owner died"}, + {131, "ENOTRECOVERABLE", "state not recoverable"}, + {132, "ERFKILL", "operation not possible due to RF-kill"}, + {133, "EHWPOISON", "memory page has hardware error"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/breakpoint trap", - 6: "aborted", - 7: "bus error", - 8: "floating point exception", - 9: "killed", - 10: "user defined signal 1", - 11: "segmentation fault", - 12: "user defined signal 2", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "stack fault", - 17: "child exited", - 18: "continued", - 19: "stopped (signal)", - 20: "stopped", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "urgent I/O condition", - 24: "CPU time limit exceeded", - 25: "file size limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window changed", - 29: "I/O possible", - 30: "power failure", - 31: "bad system call", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/breakpoint trap"}, + {6, "SIGABRT", "aborted"}, + {7, "SIGBUS", "bus error"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGUSR1", "user defined signal 1"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGUSR2", "user defined signal 2"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGSTKFLT", "stack fault"}, + {17, "SIGCHLD", "child exited"}, + {18, "SIGCONT", "continued"}, + {19, "SIGSTOP", "stopped (signal)"}, + {20, "SIGTSTP", "stopped"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGURG", "urgent I/O condition"}, + {24, "SIGXCPU", "CPU time limit exceeded"}, + {25, "SIGXFSZ", "file size limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window changed"}, + {29, "SIGIO", "I/O possible"}, + {30, "SIGPWR", "power failure"}, + {31, "SIGSYS", "bad system call"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index a5748ae9e9..4229e971e7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -446,6 +446,7 @@ const ( ETH_P_WCCP = 0x883e ETH_P_X25 = 0x805 ETH_P_XDSA = 0xf8 + EXABYTE_ENABLE_NEST = 0xf0 EXTA = 0xe EXTB = 0xf EXTPROC = 0x10000 @@ -543,6 +544,46 @@ const ( GENL_UNS_ADMIN_PERM = 0x10 GRND_NONBLOCK = 0x1 GRND_RANDOM = 0x2 + HDIO_DRIVE_CMD = 0x31f + HDIO_DRIVE_CMD_AEB = 0x31e + HDIO_DRIVE_CMD_HDR_SIZE = 0x4 + HDIO_DRIVE_HOB_HDR_SIZE = 0x8 + HDIO_DRIVE_RESET = 0x31c + HDIO_DRIVE_TASK = 0x31e + HDIO_DRIVE_TASKFILE = 0x31d + HDIO_DRIVE_TASK_HDR_SIZE = 0x8 + HDIO_GETGEO = 0x301 + HDIO_GET_32BIT = 0x309 + HDIO_GET_ACOUSTIC = 0x30f + HDIO_GET_ADDRESS = 0x310 + HDIO_GET_BUSSTATE = 0x31a + HDIO_GET_DMA = 0x30b + HDIO_GET_IDENTITY = 0x30d + HDIO_GET_KEEPSETTINGS = 0x308 + HDIO_GET_MULTCOUNT = 0x304 + HDIO_GET_NICE = 0x30c + HDIO_GET_NOWERR = 0x30a + HDIO_GET_QDMA = 0x305 + HDIO_GET_UNMASKINTR = 0x302 + HDIO_GET_WCACHE = 0x30e + HDIO_OBSOLETE_IDENTITY = 0x307 + HDIO_SCAN_HWIF = 0x328 + HDIO_SET_32BIT = 0x324 + HDIO_SET_ACOUSTIC = 0x32c + HDIO_SET_ADDRESS = 0x32f + HDIO_SET_BUSSTATE = 0x32d + HDIO_SET_DMA = 0x326 + HDIO_SET_KEEPSETTINGS = 0x323 + HDIO_SET_MULTCOUNT = 0x321 + HDIO_SET_NICE = 0x329 + HDIO_SET_NOWERR = 0x325 + HDIO_SET_PIO_MODE = 0x327 + HDIO_SET_QDMA = 0x32e + HDIO_SET_UNMASKINTR = 0x322 + HDIO_SET_WCACHE = 0x32b + HDIO_SET_XFER = 0x306 + HDIO_TRISTATE_HWIF = 0x31b + HDIO_UNREGISTER_HWIF = 0x32a HUPCL = 0x400 IBSHIFT = 0x10 ICANON = 0x2 @@ -1561,6 +1602,20 @@ const ( SIOCSPGRP = 0x8902 SIOCSRARP = 0x8962 SIOCWANDEV = 0x894a + SMART_AUTOSAVE = 0xd2 + SMART_AUTO_OFFLINE = 0xdb + SMART_DISABLE = 0xd9 + SMART_ENABLE = 0xd8 + SMART_HCYL_PASS = 0xc2 + SMART_IMMEDIATE_OFFLINE = 0xd4 + SMART_LCYL_PASS = 0x4f + SMART_READ_LOG_SECTOR = 0xd5 + SMART_READ_THRESHOLDS = 0xd1 + SMART_READ_VALUES = 0xd0 + SMART_SAVE = 0xd3 + SMART_STATUS = 0xda + SMART_WRITE_LOG_SECTOR = 0xd6 + SMART_WRITE_THRESHOLDS = 0xd7 SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x2 @@ -1945,6 +2000,86 @@ const ( WDIOC_SETPRETIMEOUT = 0xc0045708 WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 + WIN_ACKMEDIACHANGE = 0xdb + WIN_CHECKPOWERMODE1 = 0xe5 + WIN_CHECKPOWERMODE2 = 0x98 + WIN_DEVICE_RESET = 0x8 + WIN_DIAGNOSE = 0x90 + WIN_DOORLOCK = 0xde + WIN_DOORUNLOCK = 0xdf + WIN_DOWNLOAD_MICROCODE = 0x92 + WIN_FLUSH_CACHE = 0xe7 + WIN_FLUSH_CACHE_EXT = 0xea + WIN_FORMAT = 0x50 + WIN_GETMEDIASTATUS = 0xda + WIN_IDENTIFY = 0xec + WIN_IDENTIFY_DMA = 0xee + WIN_IDLEIMMEDIATE = 0xe1 + WIN_INIT = 0x60 + WIN_MEDIAEJECT = 0xed + WIN_MULTREAD = 0xc4 + WIN_MULTREAD_EXT = 0x29 + WIN_MULTWRITE = 0xc5 + WIN_MULTWRITE_EXT = 0x39 + WIN_NOP = 0x0 + WIN_PACKETCMD = 0xa0 + WIN_PIDENTIFY = 0xa1 + WIN_POSTBOOT = 0xdc + WIN_PREBOOT = 0xdd + WIN_QUEUED_SERVICE = 0xa2 + WIN_READ = 0x20 + WIN_READDMA = 0xc8 + WIN_READDMA_EXT = 0x25 + WIN_READDMA_ONCE = 0xc9 + WIN_READDMA_QUEUED = 0xc7 + WIN_READDMA_QUEUED_EXT = 0x26 + WIN_READ_BUFFER = 0xe4 + WIN_READ_EXT = 0x24 + WIN_READ_LONG = 0x22 + WIN_READ_LONG_ONCE = 0x23 + WIN_READ_NATIVE_MAX = 0xf8 + WIN_READ_NATIVE_MAX_EXT = 0x27 + WIN_READ_ONCE = 0x21 + WIN_RECAL = 0x10 + WIN_RESTORE = 0x10 + WIN_SECURITY_DISABLE = 0xf6 + WIN_SECURITY_ERASE_PREPARE = 0xf3 + WIN_SECURITY_ERASE_UNIT = 0xf4 + WIN_SECURITY_FREEZE_LOCK = 0xf5 + WIN_SECURITY_SET_PASS = 0xf1 + WIN_SECURITY_UNLOCK = 0xf2 + WIN_SEEK = 0x70 + WIN_SETFEATURES = 0xef + WIN_SETIDLE1 = 0xe3 + WIN_SETIDLE2 = 0x97 + WIN_SETMULT = 0xc6 + WIN_SET_MAX = 0xf9 + WIN_SET_MAX_EXT = 0x37 + WIN_SLEEPNOW1 = 0xe6 + WIN_SLEEPNOW2 = 0x99 + WIN_SMART = 0xb0 + WIN_SPECIFY = 0x91 + WIN_SRST = 0x8 + WIN_STANDBY = 0xe2 + WIN_STANDBY2 = 0x96 + WIN_STANDBYNOW1 = 0xe0 + WIN_STANDBYNOW2 = 0x94 + WIN_VERIFY = 0x40 + WIN_VERIFY_EXT = 0x42 + WIN_VERIFY_ONCE = 0x41 + WIN_WRITE = 0x30 + WIN_WRITEDMA = 0xca + WIN_WRITEDMA_EXT = 0x35 + WIN_WRITEDMA_ONCE = 0xcb + WIN_WRITEDMA_QUEUED = 0xcc + WIN_WRITEDMA_QUEUED_EXT = 0x36 + WIN_WRITE_BUFFER = 0xe8 + WIN_WRITE_EXT = 0x34 + WIN_WRITE_LONG = 0x32 + WIN_WRITE_LONG_ONCE = 0x33 + WIN_WRITE_ONCE = 0x31 + WIN_WRITE_SAME = 0xe9 + WIN_WRITE_VERIFY = 0x3c WNOHANG = 0x1 WNOTHREAD = 0x20000000 WNOWAIT = 0x1000000 @@ -2134,171 +2269,179 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "no such device or address", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource temporarily unavailable", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device or resource busy", - 17: "file exists", - 18: "invalid cross-device link", - 19: "no such device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "numerical result out of range", - 35: "resource deadlock avoided", - 36: "file name too long", - 37: "no locks available", - 38: "function not implemented", - 39: "directory not empty", - 40: "too many levels of symbolic links", - 42: "no message of desired type", - 43: "identifier removed", - 44: "channel number out of range", - 45: "level 2 not synchronized", - 46: "level 3 halted", - 47: "level 3 reset", - 48: "link number out of range", - 49: "protocol driver not attached", - 50: "no CSI structure available", - 51: "level 2 halted", - 52: "invalid exchange", - 53: "invalid request descriptor", - 54: "exchange full", - 55: "no anode", - 56: "invalid request code", - 57: "invalid slot", - 59: "bad font file format", - 60: "device not a stream", - 61: "no data available", - 62: "timer expired", - 63: "out of streams resources", - 64: "machine is not on the network", - 65: "package not installed", - 66: "object is remote", - 67: "link has been severed", - 68: "advertise error", - 69: "srmount error", - 70: "communication error on send", - 71: "protocol error", - 72: "multihop attempted", - 73: "RFS specific error", - 74: "bad message", - 75: "value too large for defined data type", - 76: "name not unique on network", - 77: "file descriptor in bad state", - 78: "remote address changed", - 79: "can not access a needed shared library", - 80: "accessing a corrupted shared library", - 81: ".lib section in a.out corrupted", - 82: "attempting to link in too many shared libraries", - 83: "cannot exec a shared library directly", - 84: "invalid or incomplete multibyte or wide character", - 85: "interrupted system call should be restarted", - 86: "streams pipe error", - 87: "too many users", - 88: "socket operation on non-socket", - 89: "destination address required", - 90: "message too long", - 91: "protocol wrong type for socket", - 92: "protocol not available", - 93: "protocol not supported", - 94: "socket type not supported", - 95: "operation not supported", - 96: "protocol family not supported", - 97: "address family not supported by protocol", - 98: "address already in use", - 99: "cannot assign requested address", - 100: "network is down", - 101: "network is unreachable", - 102: "network dropped connection on reset", - 103: "software caused connection abort", - 104: "connection reset by peer", - 105: "no buffer space available", - 106: "transport endpoint is already connected", - 107: "transport endpoint is not connected", - 108: "cannot send after transport endpoint shutdown", - 109: "too many references: cannot splice", - 110: "connection timed out", - 111: "connection refused", - 112: "host is down", - 113: "no route to host", - 114: "operation already in progress", - 115: "operation now in progress", - 116: "stale file handle", - 117: "structure needs cleaning", - 118: "not a XENIX named type file", - 119: "no XENIX semaphores available", - 120: "is a named type file", - 121: "remote I/O error", - 122: "disk quota exceeded", - 123: "no medium found", - 124: "wrong medium type", - 125: "operation canceled", - 126: "required key not available", - 127: "key has expired", - 128: "key has been revoked", - 129: "key was rejected by service", - 130: "owner died", - 131: "state not recoverable", - 132: "operation not possible due to RF-kill", - 133: "memory page has hardware error", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EAGAIN", "resource temporarily unavailable"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device or resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "invalid cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "numerical result out of range"}, + {35, "EDEADLK", "resource deadlock avoided"}, + {36, "ENAMETOOLONG", "file name too long"}, + {37, "ENOLCK", "no locks available"}, + {38, "ENOSYS", "function not implemented"}, + {39, "ENOTEMPTY", "directory not empty"}, + {40, "ELOOP", "too many levels of symbolic links"}, + {42, "ENOMSG", "no message of desired type"}, + {43, "EIDRM", "identifier removed"}, + {44, "ECHRNG", "channel number out of range"}, + {45, "EL2NSYNC", "level 2 not synchronized"}, + {46, "EL3HLT", "level 3 halted"}, + {47, "EL3RST", "level 3 reset"}, + {48, "ELNRNG", "link number out of range"}, + {49, "EUNATCH", "protocol driver not attached"}, + {50, "ENOCSI", "no CSI structure available"}, + {51, "EL2HLT", "level 2 halted"}, + {52, "EBADE", "invalid exchange"}, + {53, "EBADR", "invalid request descriptor"}, + {54, "EXFULL", "exchange full"}, + {55, "ENOANO", "no anode"}, + {56, "EBADRQC", "invalid request code"}, + {57, "EBADSLT", "invalid slot"}, + {59, "EBFONT", "bad font file format"}, + {60, "ENOSTR", "device not a stream"}, + {61, "ENODATA", "no data available"}, + {62, "ETIME", "timer expired"}, + {63, "ENOSR", "out of streams resources"}, + {64, "ENONET", "machine is not on the network"}, + {65, "ENOPKG", "package not installed"}, + {66, "EREMOTE", "object is remote"}, + {67, "ENOLINK", "link has been severed"}, + {68, "EADV", "advertise error"}, + {69, "ESRMNT", "srmount error"}, + {70, "ECOMM", "communication error on send"}, + {71, "EPROTO", "protocol error"}, + {72, "EMULTIHOP", "multihop attempted"}, + {73, "EDOTDOT", "RFS specific error"}, + {74, "EBADMSG", "bad message"}, + {75, "EOVERFLOW", "value too large for defined data type"}, + {76, "ENOTUNIQ", "name not unique on network"}, + {77, "EBADFD", "file descriptor in bad state"}, + {78, "EREMCHG", "remote address changed"}, + {79, "ELIBACC", "can not access a needed shared library"}, + {80, "ELIBBAD", "accessing a corrupted shared library"}, + {81, "ELIBSCN", ".lib section in a.out corrupted"}, + {82, "ELIBMAX", "attempting to link in too many shared libraries"}, + {83, "ELIBEXEC", "cannot exec a shared library directly"}, + {84, "EILSEQ", "invalid or incomplete multibyte or wide character"}, + {85, "ERESTART", "interrupted system call should be restarted"}, + {86, "ESTRPIPE", "streams pipe error"}, + {87, "EUSERS", "too many users"}, + {88, "ENOTSOCK", "socket operation on non-socket"}, + {89, "EDESTADDRREQ", "destination address required"}, + {90, "EMSGSIZE", "message too long"}, + {91, "EPROTOTYPE", "protocol wrong type for socket"}, + {92, "ENOPROTOOPT", "protocol not available"}, + {93, "EPROTONOSUPPORT", "protocol not supported"}, + {94, "ESOCKTNOSUPPORT", "socket type not supported"}, + {95, "ENOTSUP", "operation not supported"}, + {96, "EPFNOSUPPORT", "protocol family not supported"}, + {97, "EAFNOSUPPORT", "address family not supported by protocol"}, + {98, "EADDRINUSE", "address already in use"}, + {99, "EADDRNOTAVAIL", "cannot assign requested address"}, + {100, "ENETDOWN", "network is down"}, + {101, "ENETUNREACH", "network is unreachable"}, + {102, "ENETRESET", "network dropped connection on reset"}, + {103, "ECONNABORTED", "software caused connection abort"}, + {104, "ECONNRESET", "connection reset by peer"}, + {105, "ENOBUFS", "no buffer space available"}, + {106, "EISCONN", "transport endpoint is already connected"}, + {107, "ENOTCONN", "transport endpoint is not connected"}, + {108, "ESHUTDOWN", "cannot send after transport endpoint shutdown"}, + {109, "ETOOMANYREFS", "too many references: cannot splice"}, + {110, "ETIMEDOUT", "connection timed out"}, + {111, "ECONNREFUSED", "connection refused"}, + {112, "EHOSTDOWN", "host is down"}, + {113, "EHOSTUNREACH", "no route to host"}, + {114, "EALREADY", "operation already in progress"}, + {115, "EINPROGRESS", "operation now in progress"}, + {116, "ESTALE", "stale file handle"}, + {117, "EUCLEAN", "structure needs cleaning"}, + {118, "ENOTNAM", "not a XENIX named type file"}, + {119, "ENAVAIL", "no XENIX semaphores available"}, + {120, "EISNAM", "is a named type file"}, + {121, "EREMOTEIO", "remote I/O error"}, + {122, "EDQUOT", "disk quota exceeded"}, + {123, "ENOMEDIUM", "no medium found"}, + {124, "EMEDIUMTYPE", "wrong medium type"}, + {125, "ECANCELED", "operation canceled"}, + {126, "ENOKEY", "required key not available"}, + {127, "EKEYEXPIRED", "key has expired"}, + {128, "EKEYREVOKED", "key has been revoked"}, + {129, "EKEYREJECTED", "key was rejected by service"}, + {130, "EOWNERDEAD", "owner died"}, + {131, "ENOTRECOVERABLE", "state not recoverable"}, + {132, "ERFKILL", "operation not possible due to RF-kill"}, + {133, "EHWPOISON", "memory page has hardware error"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/breakpoint trap", - 6: "aborted", - 7: "bus error", - 8: "floating point exception", - 9: "killed", - 10: "user defined signal 1", - 11: "segmentation fault", - 12: "user defined signal 2", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "stack fault", - 17: "child exited", - 18: "continued", - 19: "stopped (signal)", - 20: "stopped", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "urgent I/O condition", - 24: "CPU time limit exceeded", - 25: "file size limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window changed", - 29: "I/O possible", - 30: "power failure", - 31: "bad system call", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/breakpoint trap"}, + {6, "SIGABRT", "aborted"}, + {7, "SIGBUS", "bus error"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGUSR1", "user defined signal 1"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGUSR2", "user defined signal 2"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGSTKFLT", "stack fault"}, + {17, "SIGCHLD", "child exited"}, + {18, "SIGCONT", "continued"}, + {19, "SIGSTOP", "stopped (signal)"}, + {20, "SIGTSTP", "stopped"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGURG", "urgent I/O condition"}, + {24, "SIGXCPU", "CPU time limit exceeded"}, + {25, "SIGXFSZ", "file size limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window changed"}, + {29, "SIGIO", "I/O possible"}, + {30, "SIGPWR", "power failure"}, + {31, "SIGSYS", "bad system call"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 6d9fc7e943..d0c62aebd0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -446,6 +446,7 @@ const ( ETH_P_WCCP = 0x883e ETH_P_X25 = 0x805 ETH_P_XDSA = 0xf8 + EXABYTE_ENABLE_NEST = 0xf0 EXTA = 0xe EXTB = 0xf EXTPROC = 0x10000 @@ -543,6 +544,46 @@ const ( GENL_UNS_ADMIN_PERM = 0x10 GRND_NONBLOCK = 0x1 GRND_RANDOM = 0x2 + HDIO_DRIVE_CMD = 0x31f + HDIO_DRIVE_CMD_AEB = 0x31e + HDIO_DRIVE_CMD_HDR_SIZE = 0x4 + HDIO_DRIVE_HOB_HDR_SIZE = 0x8 + HDIO_DRIVE_RESET = 0x31c + HDIO_DRIVE_TASK = 0x31e + HDIO_DRIVE_TASKFILE = 0x31d + HDIO_DRIVE_TASK_HDR_SIZE = 0x8 + HDIO_GETGEO = 0x301 + HDIO_GET_32BIT = 0x309 + HDIO_GET_ACOUSTIC = 0x30f + HDIO_GET_ADDRESS = 0x310 + HDIO_GET_BUSSTATE = 0x31a + HDIO_GET_DMA = 0x30b + HDIO_GET_IDENTITY = 0x30d + HDIO_GET_KEEPSETTINGS = 0x308 + HDIO_GET_MULTCOUNT = 0x304 + HDIO_GET_NICE = 0x30c + HDIO_GET_NOWERR = 0x30a + HDIO_GET_QDMA = 0x305 + HDIO_GET_UNMASKINTR = 0x302 + HDIO_GET_WCACHE = 0x30e + HDIO_OBSOLETE_IDENTITY = 0x307 + HDIO_SCAN_HWIF = 0x328 + HDIO_SET_32BIT = 0x324 + HDIO_SET_ACOUSTIC = 0x32c + HDIO_SET_ADDRESS = 0x32f + HDIO_SET_BUSSTATE = 0x32d + HDIO_SET_DMA = 0x326 + HDIO_SET_KEEPSETTINGS = 0x323 + HDIO_SET_MULTCOUNT = 0x321 + HDIO_SET_NICE = 0x329 + HDIO_SET_NOWERR = 0x325 + HDIO_SET_PIO_MODE = 0x327 + HDIO_SET_QDMA = 0x32e + HDIO_SET_UNMASKINTR = 0x322 + HDIO_SET_WCACHE = 0x32b + HDIO_SET_XFER = 0x306 + HDIO_TRISTATE_HWIF = 0x31b + HDIO_UNREGISTER_HWIF = 0x32a HUPCL = 0x400 IBSHIFT = 0x10 ICANON = 0x2 @@ -1568,6 +1609,20 @@ const ( SIOCSPGRP = 0x8902 SIOCSRARP = 0x8962 SIOCWANDEV = 0x894a + SMART_AUTOSAVE = 0xd2 + SMART_AUTO_OFFLINE = 0xdb + SMART_DISABLE = 0xd9 + SMART_ENABLE = 0xd8 + SMART_HCYL_PASS = 0xc2 + SMART_IMMEDIATE_OFFLINE = 0xd4 + SMART_LCYL_PASS = 0x4f + SMART_READ_LOG_SECTOR = 0xd5 + SMART_READ_THRESHOLDS = 0xd1 + SMART_READ_VALUES = 0xd0 + SMART_SAVE = 0xd3 + SMART_STATUS = 0xda + SMART_WRITE_LOG_SECTOR = 0xd6 + SMART_WRITE_THRESHOLDS = 0xd7 SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x2 @@ -1952,6 +2007,86 @@ const ( WDIOC_SETPRETIMEOUT = 0xc0045708 WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 + WIN_ACKMEDIACHANGE = 0xdb + WIN_CHECKPOWERMODE1 = 0xe5 + WIN_CHECKPOWERMODE2 = 0x98 + WIN_DEVICE_RESET = 0x8 + WIN_DIAGNOSE = 0x90 + WIN_DOORLOCK = 0xde + WIN_DOORUNLOCK = 0xdf + WIN_DOWNLOAD_MICROCODE = 0x92 + WIN_FLUSH_CACHE = 0xe7 + WIN_FLUSH_CACHE_EXT = 0xea + WIN_FORMAT = 0x50 + WIN_GETMEDIASTATUS = 0xda + WIN_IDENTIFY = 0xec + WIN_IDENTIFY_DMA = 0xee + WIN_IDLEIMMEDIATE = 0xe1 + WIN_INIT = 0x60 + WIN_MEDIAEJECT = 0xed + WIN_MULTREAD = 0xc4 + WIN_MULTREAD_EXT = 0x29 + WIN_MULTWRITE = 0xc5 + WIN_MULTWRITE_EXT = 0x39 + WIN_NOP = 0x0 + WIN_PACKETCMD = 0xa0 + WIN_PIDENTIFY = 0xa1 + WIN_POSTBOOT = 0xdc + WIN_PREBOOT = 0xdd + WIN_QUEUED_SERVICE = 0xa2 + WIN_READ = 0x20 + WIN_READDMA = 0xc8 + WIN_READDMA_EXT = 0x25 + WIN_READDMA_ONCE = 0xc9 + WIN_READDMA_QUEUED = 0xc7 + WIN_READDMA_QUEUED_EXT = 0x26 + WIN_READ_BUFFER = 0xe4 + WIN_READ_EXT = 0x24 + WIN_READ_LONG = 0x22 + WIN_READ_LONG_ONCE = 0x23 + WIN_READ_NATIVE_MAX = 0xf8 + WIN_READ_NATIVE_MAX_EXT = 0x27 + WIN_READ_ONCE = 0x21 + WIN_RECAL = 0x10 + WIN_RESTORE = 0x10 + WIN_SECURITY_DISABLE = 0xf6 + WIN_SECURITY_ERASE_PREPARE = 0xf3 + WIN_SECURITY_ERASE_UNIT = 0xf4 + WIN_SECURITY_FREEZE_LOCK = 0xf5 + WIN_SECURITY_SET_PASS = 0xf1 + WIN_SECURITY_UNLOCK = 0xf2 + WIN_SEEK = 0x70 + WIN_SETFEATURES = 0xef + WIN_SETIDLE1 = 0xe3 + WIN_SETIDLE2 = 0x97 + WIN_SETMULT = 0xc6 + WIN_SET_MAX = 0xf9 + WIN_SET_MAX_EXT = 0x37 + WIN_SLEEPNOW1 = 0xe6 + WIN_SLEEPNOW2 = 0x99 + WIN_SMART = 0xb0 + WIN_SPECIFY = 0x91 + WIN_SRST = 0x8 + WIN_STANDBY = 0xe2 + WIN_STANDBY2 = 0x96 + WIN_STANDBYNOW1 = 0xe0 + WIN_STANDBYNOW2 = 0x94 + WIN_VERIFY = 0x40 + WIN_VERIFY_EXT = 0x42 + WIN_VERIFY_ONCE = 0x41 + WIN_WRITE = 0x30 + WIN_WRITEDMA = 0xca + WIN_WRITEDMA_EXT = 0x35 + WIN_WRITEDMA_ONCE = 0xcb + WIN_WRITEDMA_QUEUED = 0xcc + WIN_WRITEDMA_QUEUED_EXT = 0x36 + WIN_WRITE_BUFFER = 0xe8 + WIN_WRITE_EXT = 0x34 + WIN_WRITE_LONG = 0x32 + WIN_WRITE_LONG_ONCE = 0x33 + WIN_WRITE_ONCE = 0x31 + WIN_WRITE_SAME = 0xe9 + WIN_WRITE_VERIFY = 0x3c WNOHANG = 0x1 WNOTHREAD = 0x20000000 WNOWAIT = 0x1000000 @@ -2141,171 +2276,179 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "no such device or address", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource temporarily unavailable", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device or resource busy", - 17: "file exists", - 18: "invalid cross-device link", - 19: "no such device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "numerical result out of range", - 35: "resource deadlock avoided", - 36: "file name too long", - 37: "no locks available", - 38: "function not implemented", - 39: "directory not empty", - 40: "too many levels of symbolic links", - 42: "no message of desired type", - 43: "identifier removed", - 44: "channel number out of range", - 45: "level 2 not synchronized", - 46: "level 3 halted", - 47: "level 3 reset", - 48: "link number out of range", - 49: "protocol driver not attached", - 50: "no CSI structure available", - 51: "level 2 halted", - 52: "invalid exchange", - 53: "invalid request descriptor", - 54: "exchange full", - 55: "no anode", - 56: "invalid request code", - 57: "invalid slot", - 59: "bad font file format", - 60: "device not a stream", - 61: "no data available", - 62: "timer expired", - 63: "out of streams resources", - 64: "machine is not on the network", - 65: "package not installed", - 66: "object is remote", - 67: "link has been severed", - 68: "advertise error", - 69: "srmount error", - 70: "communication error on send", - 71: "protocol error", - 72: "multihop attempted", - 73: "RFS specific error", - 74: "bad message", - 75: "value too large for defined data type", - 76: "name not unique on network", - 77: "file descriptor in bad state", - 78: "remote address changed", - 79: "can not access a needed shared library", - 80: "accessing a corrupted shared library", - 81: ".lib section in a.out corrupted", - 82: "attempting to link in too many shared libraries", - 83: "cannot exec a shared library directly", - 84: "invalid or incomplete multibyte or wide character", - 85: "interrupted system call should be restarted", - 86: "streams pipe error", - 87: "too many users", - 88: "socket operation on non-socket", - 89: "destination address required", - 90: "message too long", - 91: "protocol wrong type for socket", - 92: "protocol not available", - 93: "protocol not supported", - 94: "socket type not supported", - 95: "operation not supported", - 96: "protocol family not supported", - 97: "address family not supported by protocol", - 98: "address already in use", - 99: "cannot assign requested address", - 100: "network is down", - 101: "network is unreachable", - 102: "network dropped connection on reset", - 103: "software caused connection abort", - 104: "connection reset by peer", - 105: "no buffer space available", - 106: "transport endpoint is already connected", - 107: "transport endpoint is not connected", - 108: "cannot send after transport endpoint shutdown", - 109: "too many references: cannot splice", - 110: "connection timed out", - 111: "connection refused", - 112: "host is down", - 113: "no route to host", - 114: "operation already in progress", - 115: "operation now in progress", - 116: "stale file handle", - 117: "structure needs cleaning", - 118: "not a XENIX named type file", - 119: "no XENIX semaphores available", - 120: "is a named type file", - 121: "remote I/O error", - 122: "disk quota exceeded", - 123: "no medium found", - 124: "wrong medium type", - 125: "operation canceled", - 126: "required key not available", - 127: "key has expired", - 128: "key has been revoked", - 129: "key was rejected by service", - 130: "owner died", - 131: "state not recoverable", - 132: "operation not possible due to RF-kill", - 133: "memory page has hardware error", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EAGAIN", "resource temporarily unavailable"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device or resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "invalid cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "numerical result out of range"}, + {35, "EDEADLK", "resource deadlock avoided"}, + {36, "ENAMETOOLONG", "file name too long"}, + {37, "ENOLCK", "no locks available"}, + {38, "ENOSYS", "function not implemented"}, + {39, "ENOTEMPTY", "directory not empty"}, + {40, "ELOOP", "too many levels of symbolic links"}, + {42, "ENOMSG", "no message of desired type"}, + {43, "EIDRM", "identifier removed"}, + {44, "ECHRNG", "channel number out of range"}, + {45, "EL2NSYNC", "level 2 not synchronized"}, + {46, "EL3HLT", "level 3 halted"}, + {47, "EL3RST", "level 3 reset"}, + {48, "ELNRNG", "link number out of range"}, + {49, "EUNATCH", "protocol driver not attached"}, + {50, "ENOCSI", "no CSI structure available"}, + {51, "EL2HLT", "level 2 halted"}, + {52, "EBADE", "invalid exchange"}, + {53, "EBADR", "invalid request descriptor"}, + {54, "EXFULL", "exchange full"}, + {55, "ENOANO", "no anode"}, + {56, "EBADRQC", "invalid request code"}, + {57, "EBADSLT", "invalid slot"}, + {59, "EBFONT", "bad font file format"}, + {60, "ENOSTR", "device not a stream"}, + {61, "ENODATA", "no data available"}, + {62, "ETIME", "timer expired"}, + {63, "ENOSR", "out of streams resources"}, + {64, "ENONET", "machine is not on the network"}, + {65, "ENOPKG", "package not installed"}, + {66, "EREMOTE", "object is remote"}, + {67, "ENOLINK", "link has been severed"}, + {68, "EADV", "advertise error"}, + {69, "ESRMNT", "srmount error"}, + {70, "ECOMM", "communication error on send"}, + {71, "EPROTO", "protocol error"}, + {72, "EMULTIHOP", "multihop attempted"}, + {73, "EDOTDOT", "RFS specific error"}, + {74, "EBADMSG", "bad message"}, + {75, "EOVERFLOW", "value too large for defined data type"}, + {76, "ENOTUNIQ", "name not unique on network"}, + {77, "EBADFD", "file descriptor in bad state"}, + {78, "EREMCHG", "remote address changed"}, + {79, "ELIBACC", "can not access a needed shared library"}, + {80, "ELIBBAD", "accessing a corrupted shared library"}, + {81, "ELIBSCN", ".lib section in a.out corrupted"}, + {82, "ELIBMAX", "attempting to link in too many shared libraries"}, + {83, "ELIBEXEC", "cannot exec a shared library directly"}, + {84, "EILSEQ", "invalid or incomplete multibyte or wide character"}, + {85, "ERESTART", "interrupted system call should be restarted"}, + {86, "ESTRPIPE", "streams pipe error"}, + {87, "EUSERS", "too many users"}, + {88, "ENOTSOCK", "socket operation on non-socket"}, + {89, "EDESTADDRREQ", "destination address required"}, + {90, "EMSGSIZE", "message too long"}, + {91, "EPROTOTYPE", "protocol wrong type for socket"}, + {92, "ENOPROTOOPT", "protocol not available"}, + {93, "EPROTONOSUPPORT", "protocol not supported"}, + {94, "ESOCKTNOSUPPORT", "socket type not supported"}, + {95, "ENOTSUP", "operation not supported"}, + {96, "EPFNOSUPPORT", "protocol family not supported"}, + {97, "EAFNOSUPPORT", "address family not supported by protocol"}, + {98, "EADDRINUSE", "address already in use"}, + {99, "EADDRNOTAVAIL", "cannot assign requested address"}, + {100, "ENETDOWN", "network is down"}, + {101, "ENETUNREACH", "network is unreachable"}, + {102, "ENETRESET", "network dropped connection on reset"}, + {103, "ECONNABORTED", "software caused connection abort"}, + {104, "ECONNRESET", "connection reset by peer"}, + {105, "ENOBUFS", "no buffer space available"}, + {106, "EISCONN", "transport endpoint is already connected"}, + {107, "ENOTCONN", "transport endpoint is not connected"}, + {108, "ESHUTDOWN", "cannot send after transport endpoint shutdown"}, + {109, "ETOOMANYREFS", "too many references: cannot splice"}, + {110, "ETIMEDOUT", "connection timed out"}, + {111, "ECONNREFUSED", "connection refused"}, + {112, "EHOSTDOWN", "host is down"}, + {113, "EHOSTUNREACH", "no route to host"}, + {114, "EALREADY", "operation already in progress"}, + {115, "EINPROGRESS", "operation now in progress"}, + {116, "ESTALE", "stale file handle"}, + {117, "EUCLEAN", "structure needs cleaning"}, + {118, "ENOTNAM", "not a XENIX named type file"}, + {119, "ENAVAIL", "no XENIX semaphores available"}, + {120, "EISNAM", "is a named type file"}, + {121, "EREMOTEIO", "remote I/O error"}, + {122, "EDQUOT", "disk quota exceeded"}, + {123, "ENOMEDIUM", "no medium found"}, + {124, "EMEDIUMTYPE", "wrong medium type"}, + {125, "ECANCELED", "operation canceled"}, + {126, "ENOKEY", "required key not available"}, + {127, "EKEYEXPIRED", "key has expired"}, + {128, "EKEYREVOKED", "key has been revoked"}, + {129, "EKEYREJECTED", "key was rejected by service"}, + {130, "EOWNERDEAD", "owner died"}, + {131, "ENOTRECOVERABLE", "state not recoverable"}, + {132, "ERFKILL", "operation not possible due to RF-kill"}, + {133, "EHWPOISON", "memory page has hardware error"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/breakpoint trap", - 6: "aborted", - 7: "bus error", - 8: "floating point exception", - 9: "killed", - 10: "user defined signal 1", - 11: "segmentation fault", - 12: "user defined signal 2", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "stack fault", - 17: "child exited", - 18: "continued", - 19: "stopped (signal)", - 20: "stopped", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "urgent I/O condition", - 24: "CPU time limit exceeded", - 25: "file size limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window changed", - 29: "I/O possible", - 30: "power failure", - 31: "bad system call", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/breakpoint trap"}, + {6, "SIGABRT", "aborted"}, + {7, "SIGBUS", "bus error"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGUSR1", "user defined signal 1"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGUSR2", "user defined signal 2"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGSTKFLT", "stack fault"}, + {17, "SIGCHLD", "child exited"}, + {18, "SIGCONT", "continued"}, + {19, "SIGSTOP", "stopped (signal)"}, + {20, "SIGTSTP", "stopped"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGURG", "urgent I/O condition"}, + {24, "SIGXCPU", "CPU time limit exceeded"}, + {25, "SIGXFSZ", "file size limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window changed"}, + {29, "SIGIO", "I/O possible"}, + {30, "SIGPWR", "power failure"}, + {31, "SIGSYS", "bad system call"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 0253ba34fd..018fe573c6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -447,6 +447,7 @@ const ( ETH_P_WCCP = 0x883e ETH_P_X25 = 0x805 ETH_P_XDSA = 0xf8 + EXABYTE_ENABLE_NEST = 0xf0 EXTA = 0xe EXTB = 0xf EXTPROC = 0x10000 @@ -545,6 +546,46 @@ const ( GENL_UNS_ADMIN_PERM = 0x10 GRND_NONBLOCK = 0x1 GRND_RANDOM = 0x2 + HDIO_DRIVE_CMD = 0x31f + HDIO_DRIVE_CMD_AEB = 0x31e + HDIO_DRIVE_CMD_HDR_SIZE = 0x4 + HDIO_DRIVE_HOB_HDR_SIZE = 0x8 + HDIO_DRIVE_RESET = 0x31c + HDIO_DRIVE_TASK = 0x31e + HDIO_DRIVE_TASKFILE = 0x31d + HDIO_DRIVE_TASK_HDR_SIZE = 0x8 + HDIO_GETGEO = 0x301 + HDIO_GET_32BIT = 0x309 + HDIO_GET_ACOUSTIC = 0x30f + HDIO_GET_ADDRESS = 0x310 + HDIO_GET_BUSSTATE = 0x31a + HDIO_GET_DMA = 0x30b + HDIO_GET_IDENTITY = 0x30d + HDIO_GET_KEEPSETTINGS = 0x308 + HDIO_GET_MULTCOUNT = 0x304 + HDIO_GET_NICE = 0x30c + HDIO_GET_NOWERR = 0x30a + HDIO_GET_QDMA = 0x305 + HDIO_GET_UNMASKINTR = 0x302 + HDIO_GET_WCACHE = 0x30e + HDIO_OBSOLETE_IDENTITY = 0x307 + HDIO_SCAN_HWIF = 0x328 + HDIO_SET_32BIT = 0x324 + HDIO_SET_ACOUSTIC = 0x32c + HDIO_SET_ADDRESS = 0x32f + HDIO_SET_BUSSTATE = 0x32d + HDIO_SET_DMA = 0x326 + HDIO_SET_KEEPSETTINGS = 0x323 + HDIO_SET_MULTCOUNT = 0x321 + HDIO_SET_NICE = 0x329 + HDIO_SET_NOWERR = 0x325 + HDIO_SET_PIO_MODE = 0x327 + HDIO_SET_QDMA = 0x32e + HDIO_SET_UNMASKINTR = 0x322 + HDIO_SET_WCACHE = 0x32b + HDIO_SET_XFER = 0x306 + HDIO_TRISTATE_HWIF = 0x31b + HDIO_UNREGISTER_HWIF = 0x32a HUPCL = 0x400 IBSHIFT = 0x10 ICANON = 0x2 @@ -1551,6 +1592,20 @@ const ( SIOCSPGRP = 0x8902 SIOCSRARP = 0x8962 SIOCWANDEV = 0x894a + SMART_AUTOSAVE = 0xd2 + SMART_AUTO_OFFLINE = 0xdb + SMART_DISABLE = 0xd9 + SMART_ENABLE = 0xd8 + SMART_HCYL_PASS = 0xc2 + SMART_IMMEDIATE_OFFLINE = 0xd4 + SMART_LCYL_PASS = 0x4f + SMART_READ_LOG_SECTOR = 0xd5 + SMART_READ_THRESHOLDS = 0xd1 + SMART_READ_VALUES = 0xd0 + SMART_SAVE = 0xd3 + SMART_STATUS = 0xda + SMART_WRITE_LOG_SECTOR = 0xd6 + SMART_WRITE_THRESHOLDS = 0xd7 SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x2 @@ -1935,6 +1990,86 @@ const ( WDIOC_SETPRETIMEOUT = 0xc0045708 WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 + WIN_ACKMEDIACHANGE = 0xdb + WIN_CHECKPOWERMODE1 = 0xe5 + WIN_CHECKPOWERMODE2 = 0x98 + WIN_DEVICE_RESET = 0x8 + WIN_DIAGNOSE = 0x90 + WIN_DOORLOCK = 0xde + WIN_DOORUNLOCK = 0xdf + WIN_DOWNLOAD_MICROCODE = 0x92 + WIN_FLUSH_CACHE = 0xe7 + WIN_FLUSH_CACHE_EXT = 0xea + WIN_FORMAT = 0x50 + WIN_GETMEDIASTATUS = 0xda + WIN_IDENTIFY = 0xec + WIN_IDENTIFY_DMA = 0xee + WIN_IDLEIMMEDIATE = 0xe1 + WIN_INIT = 0x60 + WIN_MEDIAEJECT = 0xed + WIN_MULTREAD = 0xc4 + WIN_MULTREAD_EXT = 0x29 + WIN_MULTWRITE = 0xc5 + WIN_MULTWRITE_EXT = 0x39 + WIN_NOP = 0x0 + WIN_PACKETCMD = 0xa0 + WIN_PIDENTIFY = 0xa1 + WIN_POSTBOOT = 0xdc + WIN_PREBOOT = 0xdd + WIN_QUEUED_SERVICE = 0xa2 + WIN_READ = 0x20 + WIN_READDMA = 0xc8 + WIN_READDMA_EXT = 0x25 + WIN_READDMA_ONCE = 0xc9 + WIN_READDMA_QUEUED = 0xc7 + WIN_READDMA_QUEUED_EXT = 0x26 + WIN_READ_BUFFER = 0xe4 + WIN_READ_EXT = 0x24 + WIN_READ_LONG = 0x22 + WIN_READ_LONG_ONCE = 0x23 + WIN_READ_NATIVE_MAX = 0xf8 + WIN_READ_NATIVE_MAX_EXT = 0x27 + WIN_READ_ONCE = 0x21 + WIN_RECAL = 0x10 + WIN_RESTORE = 0x10 + WIN_SECURITY_DISABLE = 0xf6 + WIN_SECURITY_ERASE_PREPARE = 0xf3 + WIN_SECURITY_ERASE_UNIT = 0xf4 + WIN_SECURITY_FREEZE_LOCK = 0xf5 + WIN_SECURITY_SET_PASS = 0xf1 + WIN_SECURITY_UNLOCK = 0xf2 + WIN_SEEK = 0x70 + WIN_SETFEATURES = 0xef + WIN_SETIDLE1 = 0xe3 + WIN_SETIDLE2 = 0x97 + WIN_SETMULT = 0xc6 + WIN_SET_MAX = 0xf9 + WIN_SET_MAX_EXT = 0x37 + WIN_SLEEPNOW1 = 0xe6 + WIN_SLEEPNOW2 = 0x99 + WIN_SMART = 0xb0 + WIN_SPECIFY = 0x91 + WIN_SRST = 0x8 + WIN_STANDBY = 0xe2 + WIN_STANDBY2 = 0x96 + WIN_STANDBYNOW1 = 0xe0 + WIN_STANDBYNOW2 = 0x94 + WIN_VERIFY = 0x40 + WIN_VERIFY_EXT = 0x42 + WIN_VERIFY_ONCE = 0x41 + WIN_WRITE = 0x30 + WIN_WRITEDMA = 0xca + WIN_WRITEDMA_EXT = 0x35 + WIN_WRITEDMA_ONCE = 0xcb + WIN_WRITEDMA_QUEUED = 0xcc + WIN_WRITEDMA_QUEUED_EXT = 0x36 + WIN_WRITE_BUFFER = 0xe8 + WIN_WRITE_EXT = 0x34 + WIN_WRITE_LONG = 0x32 + WIN_WRITE_LONG_ONCE = 0x33 + WIN_WRITE_ONCE = 0x31 + WIN_WRITE_SAME = 0xe9 + WIN_WRITE_VERIFY = 0x3c WNOHANG = 0x1 WNOTHREAD = 0x20000000 WNOWAIT = 0x1000000 @@ -2124,171 +2259,179 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "no such device or address", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource temporarily unavailable", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device or resource busy", - 17: "file exists", - 18: "invalid cross-device link", - 19: "no such device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "numerical result out of range", - 35: "resource deadlock avoided", - 36: "file name too long", - 37: "no locks available", - 38: "function not implemented", - 39: "directory not empty", - 40: "too many levels of symbolic links", - 42: "no message of desired type", - 43: "identifier removed", - 44: "channel number out of range", - 45: "level 2 not synchronized", - 46: "level 3 halted", - 47: "level 3 reset", - 48: "link number out of range", - 49: "protocol driver not attached", - 50: "no CSI structure available", - 51: "level 2 halted", - 52: "invalid exchange", - 53: "invalid request descriptor", - 54: "exchange full", - 55: "no anode", - 56: "invalid request code", - 57: "invalid slot", - 59: "bad font file format", - 60: "device not a stream", - 61: "no data available", - 62: "timer expired", - 63: "out of streams resources", - 64: "machine is not on the network", - 65: "package not installed", - 66: "object is remote", - 67: "link has been severed", - 68: "advertise error", - 69: "srmount error", - 70: "communication error on send", - 71: "protocol error", - 72: "multihop attempted", - 73: "RFS specific error", - 74: "bad message", - 75: "value too large for defined data type", - 76: "name not unique on network", - 77: "file descriptor in bad state", - 78: "remote address changed", - 79: "can not access a needed shared library", - 80: "accessing a corrupted shared library", - 81: ".lib section in a.out corrupted", - 82: "attempting to link in too many shared libraries", - 83: "cannot exec a shared library directly", - 84: "invalid or incomplete multibyte or wide character", - 85: "interrupted system call should be restarted", - 86: "streams pipe error", - 87: "too many users", - 88: "socket operation on non-socket", - 89: "destination address required", - 90: "message too long", - 91: "protocol wrong type for socket", - 92: "protocol not available", - 93: "protocol not supported", - 94: "socket type not supported", - 95: "operation not supported", - 96: "protocol family not supported", - 97: "address family not supported by protocol", - 98: "address already in use", - 99: "cannot assign requested address", - 100: "network is down", - 101: "network is unreachable", - 102: "network dropped connection on reset", - 103: "software caused connection abort", - 104: "connection reset by peer", - 105: "no buffer space available", - 106: "transport endpoint is already connected", - 107: "transport endpoint is not connected", - 108: "cannot send after transport endpoint shutdown", - 109: "too many references: cannot splice", - 110: "connection timed out", - 111: "connection refused", - 112: "host is down", - 113: "no route to host", - 114: "operation already in progress", - 115: "operation now in progress", - 116: "stale file handle", - 117: "structure needs cleaning", - 118: "not a XENIX named type file", - 119: "no XENIX semaphores available", - 120: "is a named type file", - 121: "remote I/O error", - 122: "disk quota exceeded", - 123: "no medium found", - 124: "wrong medium type", - 125: "operation canceled", - 126: "required key not available", - 127: "key has expired", - 128: "key has been revoked", - 129: "key was rejected by service", - 130: "owner died", - 131: "state not recoverable", - 132: "operation not possible due to RF-kill", - 133: "memory page has hardware error", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EAGAIN", "resource temporarily unavailable"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device or resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "invalid cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "numerical result out of range"}, + {35, "EDEADLK", "resource deadlock avoided"}, + {36, "ENAMETOOLONG", "file name too long"}, + {37, "ENOLCK", "no locks available"}, + {38, "ENOSYS", "function not implemented"}, + {39, "ENOTEMPTY", "directory not empty"}, + {40, "ELOOP", "too many levels of symbolic links"}, + {42, "ENOMSG", "no message of desired type"}, + {43, "EIDRM", "identifier removed"}, + {44, "ECHRNG", "channel number out of range"}, + {45, "EL2NSYNC", "level 2 not synchronized"}, + {46, "EL3HLT", "level 3 halted"}, + {47, "EL3RST", "level 3 reset"}, + {48, "ELNRNG", "link number out of range"}, + {49, "EUNATCH", "protocol driver not attached"}, + {50, "ENOCSI", "no CSI structure available"}, + {51, "EL2HLT", "level 2 halted"}, + {52, "EBADE", "invalid exchange"}, + {53, "EBADR", "invalid request descriptor"}, + {54, "EXFULL", "exchange full"}, + {55, "ENOANO", "no anode"}, + {56, "EBADRQC", "invalid request code"}, + {57, "EBADSLT", "invalid slot"}, + {59, "EBFONT", "bad font file format"}, + {60, "ENOSTR", "device not a stream"}, + {61, "ENODATA", "no data available"}, + {62, "ETIME", "timer expired"}, + {63, "ENOSR", "out of streams resources"}, + {64, "ENONET", "machine is not on the network"}, + {65, "ENOPKG", "package not installed"}, + {66, "EREMOTE", "object is remote"}, + {67, "ENOLINK", "link has been severed"}, + {68, "EADV", "advertise error"}, + {69, "ESRMNT", "srmount error"}, + {70, "ECOMM", "communication error on send"}, + {71, "EPROTO", "protocol error"}, + {72, "EMULTIHOP", "multihop attempted"}, + {73, "EDOTDOT", "RFS specific error"}, + {74, "EBADMSG", "bad message"}, + {75, "EOVERFLOW", "value too large for defined data type"}, + {76, "ENOTUNIQ", "name not unique on network"}, + {77, "EBADFD", "file descriptor in bad state"}, + {78, "EREMCHG", "remote address changed"}, + {79, "ELIBACC", "can not access a needed shared library"}, + {80, "ELIBBAD", "accessing a corrupted shared library"}, + {81, "ELIBSCN", ".lib section in a.out corrupted"}, + {82, "ELIBMAX", "attempting to link in too many shared libraries"}, + {83, "ELIBEXEC", "cannot exec a shared library directly"}, + {84, "EILSEQ", "invalid or incomplete multibyte or wide character"}, + {85, "ERESTART", "interrupted system call should be restarted"}, + {86, "ESTRPIPE", "streams pipe error"}, + {87, "EUSERS", "too many users"}, + {88, "ENOTSOCK", "socket operation on non-socket"}, + {89, "EDESTADDRREQ", "destination address required"}, + {90, "EMSGSIZE", "message too long"}, + {91, "EPROTOTYPE", "protocol wrong type for socket"}, + {92, "ENOPROTOOPT", "protocol not available"}, + {93, "EPROTONOSUPPORT", "protocol not supported"}, + {94, "ESOCKTNOSUPPORT", "socket type not supported"}, + {95, "ENOTSUP", "operation not supported"}, + {96, "EPFNOSUPPORT", "protocol family not supported"}, + {97, "EAFNOSUPPORT", "address family not supported by protocol"}, + {98, "EADDRINUSE", "address already in use"}, + {99, "EADDRNOTAVAIL", "cannot assign requested address"}, + {100, "ENETDOWN", "network is down"}, + {101, "ENETUNREACH", "network is unreachable"}, + {102, "ENETRESET", "network dropped connection on reset"}, + {103, "ECONNABORTED", "software caused connection abort"}, + {104, "ECONNRESET", "connection reset by peer"}, + {105, "ENOBUFS", "no buffer space available"}, + {106, "EISCONN", "transport endpoint is already connected"}, + {107, "ENOTCONN", "transport endpoint is not connected"}, + {108, "ESHUTDOWN", "cannot send after transport endpoint shutdown"}, + {109, "ETOOMANYREFS", "too many references: cannot splice"}, + {110, "ETIMEDOUT", "connection timed out"}, + {111, "ECONNREFUSED", "connection refused"}, + {112, "EHOSTDOWN", "host is down"}, + {113, "EHOSTUNREACH", "no route to host"}, + {114, "EALREADY", "operation already in progress"}, + {115, "EINPROGRESS", "operation now in progress"}, + {116, "ESTALE", "stale file handle"}, + {117, "EUCLEAN", "structure needs cleaning"}, + {118, "ENOTNAM", "not a XENIX named type file"}, + {119, "ENAVAIL", "no XENIX semaphores available"}, + {120, "EISNAM", "is a named type file"}, + {121, "EREMOTEIO", "remote I/O error"}, + {122, "EDQUOT", "disk quota exceeded"}, + {123, "ENOMEDIUM", "no medium found"}, + {124, "EMEDIUMTYPE", "wrong medium type"}, + {125, "ECANCELED", "operation canceled"}, + {126, "ENOKEY", "required key not available"}, + {127, "EKEYEXPIRED", "key has expired"}, + {128, "EKEYREVOKED", "key has been revoked"}, + {129, "EKEYREJECTED", "key was rejected by service"}, + {130, "EOWNERDEAD", "owner died"}, + {131, "ENOTRECOVERABLE", "state not recoverable"}, + {132, "ERFKILL", "operation not possible due to RF-kill"}, + {133, "EHWPOISON", "memory page has hardware error"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/breakpoint trap", - 6: "aborted", - 7: "bus error", - 8: "floating point exception", - 9: "killed", - 10: "user defined signal 1", - 11: "segmentation fault", - 12: "user defined signal 2", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "stack fault", - 17: "child exited", - 18: "continued", - 19: "stopped (signal)", - 20: "stopped", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "urgent I/O condition", - 24: "CPU time limit exceeded", - 25: "file size limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window changed", - 29: "I/O possible", - 30: "power failure", - 31: "bad system call", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/breakpoint trap"}, + {6, "SIGABRT", "aborted"}, + {7, "SIGBUS", "bus error"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGUSR1", "user defined signal 1"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGUSR2", "user defined signal 2"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGSTKFLT", "stack fault"}, + {17, "SIGCHLD", "child exited"}, + {18, "SIGCONT", "continued"}, + {19, "SIGSTOP", "stopped (signal)"}, + {20, "SIGTSTP", "stopped"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGURG", "urgent I/O condition"}, + {24, "SIGXCPU", "CPU time limit exceeded"}, + {25, "SIGXFSZ", "file size limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window changed"}, + {29, "SIGIO", "I/O possible"}, + {30, "SIGPWR", "power failure"}, + {31, "SIGSYS", "bad system call"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 4cef12d8e7..f523b6c33a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -446,6 +446,7 @@ const ( ETH_P_WCCP = 0x883e ETH_P_X25 = 0x805 ETH_P_XDSA = 0xf8 + EXABYTE_ENABLE_NEST = 0xf0 EXTA = 0xe EXTB = 0xf EXTPROC = 0x10000 @@ -543,6 +544,46 @@ const ( GENL_UNS_ADMIN_PERM = 0x10 GRND_NONBLOCK = 0x1 GRND_RANDOM = 0x2 + HDIO_DRIVE_CMD = 0x31f + HDIO_DRIVE_CMD_AEB = 0x31e + HDIO_DRIVE_CMD_HDR_SIZE = 0x4 + HDIO_DRIVE_HOB_HDR_SIZE = 0x8 + HDIO_DRIVE_RESET = 0x31c + HDIO_DRIVE_TASK = 0x31e + HDIO_DRIVE_TASKFILE = 0x31d + HDIO_DRIVE_TASK_HDR_SIZE = 0x8 + HDIO_GETGEO = 0x301 + HDIO_GET_32BIT = 0x309 + HDIO_GET_ACOUSTIC = 0x30f + HDIO_GET_ADDRESS = 0x310 + HDIO_GET_BUSSTATE = 0x31a + HDIO_GET_DMA = 0x30b + HDIO_GET_IDENTITY = 0x30d + HDIO_GET_KEEPSETTINGS = 0x308 + HDIO_GET_MULTCOUNT = 0x304 + HDIO_GET_NICE = 0x30c + HDIO_GET_NOWERR = 0x30a + HDIO_GET_QDMA = 0x305 + HDIO_GET_UNMASKINTR = 0x302 + HDIO_GET_WCACHE = 0x30e + HDIO_OBSOLETE_IDENTITY = 0x307 + HDIO_SCAN_HWIF = 0x328 + HDIO_SET_32BIT = 0x324 + HDIO_SET_ACOUSTIC = 0x32c + HDIO_SET_ADDRESS = 0x32f + HDIO_SET_BUSSTATE = 0x32d + HDIO_SET_DMA = 0x326 + HDIO_SET_KEEPSETTINGS = 0x323 + HDIO_SET_MULTCOUNT = 0x321 + HDIO_SET_NICE = 0x329 + HDIO_SET_NOWERR = 0x325 + HDIO_SET_PIO_MODE = 0x327 + HDIO_SET_QDMA = 0x32e + HDIO_SET_UNMASKINTR = 0x322 + HDIO_SET_WCACHE = 0x32b + HDIO_SET_XFER = 0x306 + HDIO_TRISTATE_HWIF = 0x31b + HDIO_UNREGISTER_HWIF = 0x32a HUPCL = 0x400 IBSHIFT = 0x10 ICANON = 0x2 @@ -1562,6 +1603,20 @@ const ( SIOCSPGRP = 0x80047308 SIOCSRARP = 0x8962 SIOCWANDEV = 0x894a + SMART_AUTOSAVE = 0xd2 + SMART_AUTO_OFFLINE = 0xdb + SMART_DISABLE = 0xd9 + SMART_ENABLE = 0xd8 + SMART_HCYL_PASS = 0xc2 + SMART_IMMEDIATE_OFFLINE = 0xd4 + SMART_LCYL_PASS = 0x4f + SMART_READ_LOG_SECTOR = 0xd5 + SMART_READ_THRESHOLDS = 0xd1 + SMART_READ_VALUES = 0xd0 + SMART_SAVE = 0xd3 + SMART_STATUS = 0xda + SMART_WRITE_LOG_SECTOR = 0xd6 + SMART_WRITE_THRESHOLDS = 0xd7 SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x1 @@ -1949,6 +2004,86 @@ const ( WDIOC_SETPRETIMEOUT = 0xc0045708 WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 + WIN_ACKMEDIACHANGE = 0xdb + WIN_CHECKPOWERMODE1 = 0xe5 + WIN_CHECKPOWERMODE2 = 0x98 + WIN_DEVICE_RESET = 0x8 + WIN_DIAGNOSE = 0x90 + WIN_DOORLOCK = 0xde + WIN_DOORUNLOCK = 0xdf + WIN_DOWNLOAD_MICROCODE = 0x92 + WIN_FLUSH_CACHE = 0xe7 + WIN_FLUSH_CACHE_EXT = 0xea + WIN_FORMAT = 0x50 + WIN_GETMEDIASTATUS = 0xda + WIN_IDENTIFY = 0xec + WIN_IDENTIFY_DMA = 0xee + WIN_IDLEIMMEDIATE = 0xe1 + WIN_INIT = 0x60 + WIN_MEDIAEJECT = 0xed + WIN_MULTREAD = 0xc4 + WIN_MULTREAD_EXT = 0x29 + WIN_MULTWRITE = 0xc5 + WIN_MULTWRITE_EXT = 0x39 + WIN_NOP = 0x0 + WIN_PACKETCMD = 0xa0 + WIN_PIDENTIFY = 0xa1 + WIN_POSTBOOT = 0xdc + WIN_PREBOOT = 0xdd + WIN_QUEUED_SERVICE = 0xa2 + WIN_READ = 0x20 + WIN_READDMA = 0xc8 + WIN_READDMA_EXT = 0x25 + WIN_READDMA_ONCE = 0xc9 + WIN_READDMA_QUEUED = 0xc7 + WIN_READDMA_QUEUED_EXT = 0x26 + WIN_READ_BUFFER = 0xe4 + WIN_READ_EXT = 0x24 + WIN_READ_LONG = 0x22 + WIN_READ_LONG_ONCE = 0x23 + WIN_READ_NATIVE_MAX = 0xf8 + WIN_READ_NATIVE_MAX_EXT = 0x27 + WIN_READ_ONCE = 0x21 + WIN_RECAL = 0x10 + WIN_RESTORE = 0x10 + WIN_SECURITY_DISABLE = 0xf6 + WIN_SECURITY_ERASE_PREPARE = 0xf3 + WIN_SECURITY_ERASE_UNIT = 0xf4 + WIN_SECURITY_FREEZE_LOCK = 0xf5 + WIN_SECURITY_SET_PASS = 0xf1 + WIN_SECURITY_UNLOCK = 0xf2 + WIN_SEEK = 0x70 + WIN_SETFEATURES = 0xef + WIN_SETIDLE1 = 0xe3 + WIN_SETIDLE2 = 0x97 + WIN_SETMULT = 0xc6 + WIN_SET_MAX = 0xf9 + WIN_SET_MAX_EXT = 0x37 + WIN_SLEEPNOW1 = 0xe6 + WIN_SLEEPNOW2 = 0x99 + WIN_SMART = 0xb0 + WIN_SPECIFY = 0x91 + WIN_SRST = 0x8 + WIN_STANDBY = 0xe2 + WIN_STANDBY2 = 0x96 + WIN_STANDBYNOW1 = 0xe0 + WIN_STANDBYNOW2 = 0x94 + WIN_VERIFY = 0x40 + WIN_VERIFY_EXT = 0x42 + WIN_VERIFY_ONCE = 0x41 + WIN_WRITE = 0x30 + WIN_WRITEDMA = 0xca + WIN_WRITEDMA_EXT = 0x35 + WIN_WRITEDMA_ONCE = 0xcb + WIN_WRITEDMA_QUEUED = 0xcc + WIN_WRITEDMA_QUEUED_EXT = 0x36 + WIN_WRITE_BUFFER = 0xe8 + WIN_WRITE_EXT = 0x34 + WIN_WRITE_LONG = 0x32 + WIN_WRITE_LONG_ONCE = 0x33 + WIN_WRITE_ONCE = 0x31 + WIN_WRITE_SAME = 0xe9 + WIN_WRITE_VERIFY = 0x3c WNOHANG = 0x1 WNOTHREAD = 0x20000000 WNOWAIT = 0x1000000 @@ -2140,174 +2275,182 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "no such device or address", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource temporarily unavailable", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device or resource busy", - 17: "file exists", - 18: "invalid cross-device link", - 19: "no such device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "numerical result out of range", - 35: "no message of desired type", - 36: "identifier removed", - 37: "channel number out of range", - 38: "level 2 not synchronized", - 39: "level 3 halted", - 40: "level 3 reset", - 41: "link number out of range", - 42: "protocol driver not attached", - 43: "no CSI structure available", - 44: "level 2 halted", - 45: "resource deadlock avoided", - 46: "no locks available", - 50: "invalid exchange", - 51: "invalid request descriptor", - 52: "exchange full", - 53: "no anode", - 54: "invalid request code", - 55: "invalid slot", - 56: "file locking deadlock error", - 59: "bad font file format", - 60: "device not a stream", - 61: "no data available", - 62: "timer expired", - 63: "out of streams resources", - 64: "machine is not on the network", - 65: "package not installed", - 66: "object is remote", - 67: "link has been severed", - 68: "advertise error", - 69: "srmount error", - 70: "communication error on send", - 71: "protocol error", - 73: "RFS specific error", - 74: "multihop attempted", - 77: "bad message", - 78: "file name too long", - 79: "value too large for defined data type", - 80: "name not unique on network", - 81: "file descriptor in bad state", - 82: "remote address changed", - 83: "can not access a needed shared library", - 84: "accessing a corrupted shared library", - 85: ".lib section in a.out corrupted", - 86: "attempting to link in too many shared libraries", - 87: "cannot exec a shared library directly", - 88: "invalid or incomplete multibyte or wide character", - 89: "function not implemented", - 90: "too many levels of symbolic links", - 91: "interrupted system call should be restarted", - 92: "streams pipe error", - 93: "directory not empty", - 94: "too many users", - 95: "socket operation on non-socket", - 96: "destination address required", - 97: "message too long", - 98: "protocol wrong type for socket", - 99: "protocol not available", - 120: "protocol not supported", - 121: "socket type not supported", - 122: "operation not supported", - 123: "protocol family not supported", - 124: "address family not supported by protocol", - 125: "address already in use", - 126: "cannot assign requested address", - 127: "network is down", - 128: "network is unreachable", - 129: "network dropped connection on reset", - 130: "software caused connection abort", - 131: "connection reset by peer", - 132: "no buffer space available", - 133: "transport endpoint is already connected", - 134: "transport endpoint is not connected", - 135: "structure needs cleaning", - 137: "not a XENIX named type file", - 138: "no XENIX semaphores available", - 139: "is a named type file", - 140: "remote I/O error", - 141: "unknown error 141", - 142: "unknown error 142", - 143: "cannot send after transport endpoint shutdown", - 144: "too many references: cannot splice", - 145: "connection timed out", - 146: "connection refused", - 147: "host is down", - 148: "no route to host", - 149: "operation already in progress", - 150: "operation now in progress", - 151: "stale file handle", - 158: "operation canceled", - 159: "no medium found", - 160: "wrong medium type", - 161: "required key not available", - 162: "key has expired", - 163: "key has been revoked", - 164: "key was rejected by service", - 165: "owner died", - 166: "state not recoverable", - 167: "operation not possible due to RF-kill", - 168: "memory page has hardware error", - 1133: "disk quota exceeded", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EAGAIN", "resource temporarily unavailable"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device or resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "invalid cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "numerical result out of range"}, + {35, "ENOMSG", "no message of desired type"}, + {36, "EIDRM", "identifier removed"}, + {37, "ECHRNG", "channel number out of range"}, + {38, "EL2NSYNC", "level 2 not synchronized"}, + {39, "EL3HLT", "level 3 halted"}, + {40, "EL3RST", "level 3 reset"}, + {41, "ELNRNG", "link number out of range"}, + {42, "EUNATCH", "protocol driver not attached"}, + {43, "ENOCSI", "no CSI structure available"}, + {44, "EL2HLT", "level 2 halted"}, + {45, "EDEADLK", "resource deadlock avoided"}, + {46, "ENOLCK", "no locks available"}, + {50, "EBADE", "invalid exchange"}, + {51, "EBADR", "invalid request descriptor"}, + {52, "EXFULL", "exchange full"}, + {53, "ENOANO", "no anode"}, + {54, "EBADRQC", "invalid request code"}, + {55, "EBADSLT", "invalid slot"}, + {56, "EDEADLOCK", "file locking deadlock error"}, + {59, "EBFONT", "bad font file format"}, + {60, "ENOSTR", "device not a stream"}, + {61, "ENODATA", "no data available"}, + {62, "ETIME", "timer expired"}, + {63, "ENOSR", "out of streams resources"}, + {64, "ENONET", "machine is not on the network"}, + {65, "ENOPKG", "package not installed"}, + {66, "EREMOTE", "object is remote"}, + {67, "ENOLINK", "link has been severed"}, + {68, "EADV", "advertise error"}, + {69, "ESRMNT", "srmount error"}, + {70, "ECOMM", "communication error on send"}, + {71, "EPROTO", "protocol error"}, + {73, "EDOTDOT", "RFS specific error"}, + {74, "EMULTIHOP", "multihop attempted"}, + {77, "EBADMSG", "bad message"}, + {78, "ENAMETOOLONG", "file name too long"}, + {79, "EOVERFLOW", "value too large for defined data type"}, + {80, "ENOTUNIQ", "name not unique on network"}, + {81, "EBADFD", "file descriptor in bad state"}, + {82, "EREMCHG", "remote address changed"}, + {83, "ELIBACC", "can not access a needed shared library"}, + {84, "ELIBBAD", "accessing a corrupted shared library"}, + {85, "ELIBSCN", ".lib section in a.out corrupted"}, + {86, "ELIBMAX", "attempting to link in too many shared libraries"}, + {87, "ELIBEXEC", "cannot exec a shared library directly"}, + {88, "EILSEQ", "invalid or incomplete multibyte or wide character"}, + {89, "ENOSYS", "function not implemented"}, + {90, "ELOOP", "too many levels of symbolic links"}, + {91, "ERESTART", "interrupted system call should be restarted"}, + {92, "ESTRPIPE", "streams pipe error"}, + {93, "ENOTEMPTY", "directory not empty"}, + {94, "EUSERS", "too many users"}, + {95, "ENOTSOCK", "socket operation on non-socket"}, + {96, "EDESTADDRREQ", "destination address required"}, + {97, "EMSGSIZE", "message too long"}, + {98, "EPROTOTYPE", "protocol wrong type for socket"}, + {99, "ENOPROTOOPT", "protocol not available"}, + {120, "EPROTONOSUPPORT", "protocol not supported"}, + {121, "ESOCKTNOSUPPORT", "socket type not supported"}, + {122, "ENOTSUP", "operation not supported"}, + {123, "EPFNOSUPPORT", "protocol family not supported"}, + {124, "EAFNOSUPPORT", "address family not supported by protocol"}, + {125, "EADDRINUSE", "address already in use"}, + {126, "EADDRNOTAVAIL", "cannot assign requested address"}, + {127, "ENETDOWN", "network is down"}, + {128, "ENETUNREACH", "network is unreachable"}, + {129, "ENETRESET", "network dropped connection on reset"}, + {130, "ECONNABORTED", "software caused connection abort"}, + {131, "ECONNRESET", "connection reset by peer"}, + {132, "ENOBUFS", "no buffer space available"}, + {133, "EISCONN", "transport endpoint is already connected"}, + {134, "ENOTCONN", "transport endpoint is not connected"}, + {135, "EUCLEAN", "structure needs cleaning"}, + {137, "ENOTNAM", "not a XENIX named type file"}, + {138, "ENAVAIL", "no XENIX semaphores available"}, + {139, "EISNAM", "is a named type file"}, + {140, "EREMOTEIO", "remote I/O error"}, + {141, "EINIT", "unknown error 141"}, + {142, "EREMDEV", "unknown error 142"}, + {143, "ESHUTDOWN", "cannot send after transport endpoint shutdown"}, + {144, "ETOOMANYREFS", "too many references: cannot splice"}, + {145, "ETIMEDOUT", "connection timed out"}, + {146, "ECONNREFUSED", "connection refused"}, + {147, "EHOSTDOWN", "host is down"}, + {148, "EHOSTUNREACH", "no route to host"}, + {149, "EALREADY", "operation already in progress"}, + {150, "EINPROGRESS", "operation now in progress"}, + {151, "ESTALE", "stale file handle"}, + {158, "ECANCELED", "operation canceled"}, + {159, "ENOMEDIUM", "no medium found"}, + {160, "EMEDIUMTYPE", "wrong medium type"}, + {161, "ENOKEY", "required key not available"}, + {162, "EKEYEXPIRED", "key has expired"}, + {163, "EKEYREVOKED", "key has been revoked"}, + {164, "EKEYREJECTED", "key was rejected by service"}, + {165, "EOWNERDEAD", "owner died"}, + {166, "ENOTRECOVERABLE", "state not recoverable"}, + {167, "ERFKILL", "operation not possible due to RF-kill"}, + {168, "EHWPOISON", "memory page has hardware error"}, + {1133, "EDQUOT", "disk quota exceeded"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/breakpoint trap", - 6: "aborted", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "user defined signal 1", - 17: "user defined signal 2", - 18: "child exited", - 19: "power failure", - 20: "window changed", - 21: "urgent I/O condition", - 22: "I/O possible", - 23: "stopped (signal)", - 24: "stopped", - 25: "continued", - 26: "stopped (tty input)", - 27: "stopped (tty output)", - 28: "virtual timer expired", - 29: "profiling timer expired", - 30: "CPU time limit exceeded", - 31: "file size limit exceeded", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/breakpoint trap"}, + {6, "SIGABRT", "aborted"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGUSR1", "user defined signal 1"}, + {17, "SIGUSR2", "user defined signal 2"}, + {18, "SIGCHLD", "child exited"}, + {19, "SIGPWR", "power failure"}, + {20, "SIGWINCH", "window changed"}, + {21, "SIGURG", "urgent I/O condition"}, + {22, "SIGIO", "I/O possible"}, + {23, "SIGSTOP", "stopped (signal)"}, + {24, "SIGTSTP", "stopped"}, + {25, "SIGCONT", "continued"}, + {26, "SIGTTIN", "stopped (tty input)"}, + {27, "SIGTTOU", "stopped (tty output)"}, + {28, "SIGVTALRM", "virtual timer expired"}, + {29, "SIGPROF", "profiling timer expired"}, + {30, "SIGXCPU", "CPU time limit exceeded"}, + {31, "SIGXFSZ", "file size limit exceeded"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 4782b3efb6..7e2e296433 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -446,6 +446,7 @@ const ( ETH_P_WCCP = 0x883e ETH_P_X25 = 0x805 ETH_P_XDSA = 0xf8 + EXABYTE_ENABLE_NEST = 0xf0 EXTA = 0xe EXTB = 0xf EXTPROC = 0x10000 @@ -543,6 +544,46 @@ const ( GENL_UNS_ADMIN_PERM = 0x10 GRND_NONBLOCK = 0x1 GRND_RANDOM = 0x2 + HDIO_DRIVE_CMD = 0x31f + HDIO_DRIVE_CMD_AEB = 0x31e + HDIO_DRIVE_CMD_HDR_SIZE = 0x4 + HDIO_DRIVE_HOB_HDR_SIZE = 0x8 + HDIO_DRIVE_RESET = 0x31c + HDIO_DRIVE_TASK = 0x31e + HDIO_DRIVE_TASKFILE = 0x31d + HDIO_DRIVE_TASK_HDR_SIZE = 0x8 + HDIO_GETGEO = 0x301 + HDIO_GET_32BIT = 0x309 + HDIO_GET_ACOUSTIC = 0x30f + HDIO_GET_ADDRESS = 0x310 + HDIO_GET_BUSSTATE = 0x31a + HDIO_GET_DMA = 0x30b + HDIO_GET_IDENTITY = 0x30d + HDIO_GET_KEEPSETTINGS = 0x308 + HDIO_GET_MULTCOUNT = 0x304 + HDIO_GET_NICE = 0x30c + HDIO_GET_NOWERR = 0x30a + HDIO_GET_QDMA = 0x305 + HDIO_GET_UNMASKINTR = 0x302 + HDIO_GET_WCACHE = 0x30e + HDIO_OBSOLETE_IDENTITY = 0x307 + HDIO_SCAN_HWIF = 0x328 + HDIO_SET_32BIT = 0x324 + HDIO_SET_ACOUSTIC = 0x32c + HDIO_SET_ADDRESS = 0x32f + HDIO_SET_BUSSTATE = 0x32d + HDIO_SET_DMA = 0x326 + HDIO_SET_KEEPSETTINGS = 0x323 + HDIO_SET_MULTCOUNT = 0x321 + HDIO_SET_NICE = 0x329 + HDIO_SET_NOWERR = 0x325 + HDIO_SET_PIO_MODE = 0x327 + HDIO_SET_QDMA = 0x32e + HDIO_SET_UNMASKINTR = 0x322 + HDIO_SET_WCACHE = 0x32b + HDIO_SET_XFER = 0x306 + HDIO_TRISTATE_HWIF = 0x31b + HDIO_UNREGISTER_HWIF = 0x32a HUPCL = 0x400 IBSHIFT = 0x10 ICANON = 0x2 @@ -1562,6 +1603,20 @@ const ( SIOCSPGRP = 0x80047308 SIOCSRARP = 0x8962 SIOCWANDEV = 0x894a + SMART_AUTOSAVE = 0xd2 + SMART_AUTO_OFFLINE = 0xdb + SMART_DISABLE = 0xd9 + SMART_ENABLE = 0xd8 + SMART_HCYL_PASS = 0xc2 + SMART_IMMEDIATE_OFFLINE = 0xd4 + SMART_LCYL_PASS = 0x4f + SMART_READ_LOG_SECTOR = 0xd5 + SMART_READ_THRESHOLDS = 0xd1 + SMART_READ_VALUES = 0xd0 + SMART_SAVE = 0xd3 + SMART_STATUS = 0xda + SMART_WRITE_LOG_SECTOR = 0xd6 + SMART_WRITE_THRESHOLDS = 0xd7 SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x1 @@ -1949,6 +2004,86 @@ const ( WDIOC_SETPRETIMEOUT = 0xc0045708 WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 + WIN_ACKMEDIACHANGE = 0xdb + WIN_CHECKPOWERMODE1 = 0xe5 + WIN_CHECKPOWERMODE2 = 0x98 + WIN_DEVICE_RESET = 0x8 + WIN_DIAGNOSE = 0x90 + WIN_DOORLOCK = 0xde + WIN_DOORUNLOCK = 0xdf + WIN_DOWNLOAD_MICROCODE = 0x92 + WIN_FLUSH_CACHE = 0xe7 + WIN_FLUSH_CACHE_EXT = 0xea + WIN_FORMAT = 0x50 + WIN_GETMEDIASTATUS = 0xda + WIN_IDENTIFY = 0xec + WIN_IDENTIFY_DMA = 0xee + WIN_IDLEIMMEDIATE = 0xe1 + WIN_INIT = 0x60 + WIN_MEDIAEJECT = 0xed + WIN_MULTREAD = 0xc4 + WIN_MULTREAD_EXT = 0x29 + WIN_MULTWRITE = 0xc5 + WIN_MULTWRITE_EXT = 0x39 + WIN_NOP = 0x0 + WIN_PACKETCMD = 0xa0 + WIN_PIDENTIFY = 0xa1 + WIN_POSTBOOT = 0xdc + WIN_PREBOOT = 0xdd + WIN_QUEUED_SERVICE = 0xa2 + WIN_READ = 0x20 + WIN_READDMA = 0xc8 + WIN_READDMA_EXT = 0x25 + WIN_READDMA_ONCE = 0xc9 + WIN_READDMA_QUEUED = 0xc7 + WIN_READDMA_QUEUED_EXT = 0x26 + WIN_READ_BUFFER = 0xe4 + WIN_READ_EXT = 0x24 + WIN_READ_LONG = 0x22 + WIN_READ_LONG_ONCE = 0x23 + WIN_READ_NATIVE_MAX = 0xf8 + WIN_READ_NATIVE_MAX_EXT = 0x27 + WIN_READ_ONCE = 0x21 + WIN_RECAL = 0x10 + WIN_RESTORE = 0x10 + WIN_SECURITY_DISABLE = 0xf6 + WIN_SECURITY_ERASE_PREPARE = 0xf3 + WIN_SECURITY_ERASE_UNIT = 0xf4 + WIN_SECURITY_FREEZE_LOCK = 0xf5 + WIN_SECURITY_SET_PASS = 0xf1 + WIN_SECURITY_UNLOCK = 0xf2 + WIN_SEEK = 0x70 + WIN_SETFEATURES = 0xef + WIN_SETIDLE1 = 0xe3 + WIN_SETIDLE2 = 0x97 + WIN_SETMULT = 0xc6 + WIN_SET_MAX = 0xf9 + WIN_SET_MAX_EXT = 0x37 + WIN_SLEEPNOW1 = 0xe6 + WIN_SLEEPNOW2 = 0x99 + WIN_SMART = 0xb0 + WIN_SPECIFY = 0x91 + WIN_SRST = 0x8 + WIN_STANDBY = 0xe2 + WIN_STANDBY2 = 0x96 + WIN_STANDBYNOW1 = 0xe0 + WIN_STANDBYNOW2 = 0x94 + WIN_VERIFY = 0x40 + WIN_VERIFY_EXT = 0x42 + WIN_VERIFY_ONCE = 0x41 + WIN_WRITE = 0x30 + WIN_WRITEDMA = 0xca + WIN_WRITEDMA_EXT = 0x35 + WIN_WRITEDMA_ONCE = 0xcb + WIN_WRITEDMA_QUEUED = 0xcc + WIN_WRITEDMA_QUEUED_EXT = 0x36 + WIN_WRITE_BUFFER = 0xe8 + WIN_WRITE_EXT = 0x34 + WIN_WRITE_LONG = 0x32 + WIN_WRITE_LONG_ONCE = 0x33 + WIN_WRITE_ONCE = 0x31 + WIN_WRITE_SAME = 0xe9 + WIN_WRITE_VERIFY = 0x3c WNOHANG = 0x1 WNOTHREAD = 0x20000000 WNOWAIT = 0x1000000 @@ -2140,174 +2275,182 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "no such device or address", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource temporarily unavailable", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device or resource busy", - 17: "file exists", - 18: "invalid cross-device link", - 19: "no such device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "numerical result out of range", - 35: "no message of desired type", - 36: "identifier removed", - 37: "channel number out of range", - 38: "level 2 not synchronized", - 39: "level 3 halted", - 40: "level 3 reset", - 41: "link number out of range", - 42: "protocol driver not attached", - 43: "no CSI structure available", - 44: "level 2 halted", - 45: "resource deadlock avoided", - 46: "no locks available", - 50: "invalid exchange", - 51: "invalid request descriptor", - 52: "exchange full", - 53: "no anode", - 54: "invalid request code", - 55: "invalid slot", - 56: "file locking deadlock error", - 59: "bad font file format", - 60: "device not a stream", - 61: "no data available", - 62: "timer expired", - 63: "out of streams resources", - 64: "machine is not on the network", - 65: "package not installed", - 66: "object is remote", - 67: "link has been severed", - 68: "advertise error", - 69: "srmount error", - 70: "communication error on send", - 71: "protocol error", - 73: "RFS specific error", - 74: "multihop attempted", - 77: "bad message", - 78: "file name too long", - 79: "value too large for defined data type", - 80: "name not unique on network", - 81: "file descriptor in bad state", - 82: "remote address changed", - 83: "can not access a needed shared library", - 84: "accessing a corrupted shared library", - 85: ".lib section in a.out corrupted", - 86: "attempting to link in too many shared libraries", - 87: "cannot exec a shared library directly", - 88: "invalid or incomplete multibyte or wide character", - 89: "function not implemented", - 90: "too many levels of symbolic links", - 91: "interrupted system call should be restarted", - 92: "streams pipe error", - 93: "directory not empty", - 94: "too many users", - 95: "socket operation on non-socket", - 96: "destination address required", - 97: "message too long", - 98: "protocol wrong type for socket", - 99: "protocol not available", - 120: "protocol not supported", - 121: "socket type not supported", - 122: "operation not supported", - 123: "protocol family not supported", - 124: "address family not supported by protocol", - 125: "address already in use", - 126: "cannot assign requested address", - 127: "network is down", - 128: "network is unreachable", - 129: "network dropped connection on reset", - 130: "software caused connection abort", - 131: "connection reset by peer", - 132: "no buffer space available", - 133: "transport endpoint is already connected", - 134: "transport endpoint is not connected", - 135: "structure needs cleaning", - 137: "not a XENIX named type file", - 138: "no XENIX semaphores available", - 139: "is a named type file", - 140: "remote I/O error", - 141: "unknown error 141", - 142: "unknown error 142", - 143: "cannot send after transport endpoint shutdown", - 144: "too many references: cannot splice", - 145: "connection timed out", - 146: "connection refused", - 147: "host is down", - 148: "no route to host", - 149: "operation already in progress", - 150: "operation now in progress", - 151: "stale file handle", - 158: "operation canceled", - 159: "no medium found", - 160: "wrong medium type", - 161: "required key not available", - 162: "key has expired", - 163: "key has been revoked", - 164: "key was rejected by service", - 165: "owner died", - 166: "state not recoverable", - 167: "operation not possible due to RF-kill", - 168: "memory page has hardware error", - 1133: "disk quota exceeded", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EAGAIN", "resource temporarily unavailable"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device or resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "invalid cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "numerical result out of range"}, + {35, "ENOMSG", "no message of desired type"}, + {36, "EIDRM", "identifier removed"}, + {37, "ECHRNG", "channel number out of range"}, + {38, "EL2NSYNC", "level 2 not synchronized"}, + {39, "EL3HLT", "level 3 halted"}, + {40, "EL3RST", "level 3 reset"}, + {41, "ELNRNG", "link number out of range"}, + {42, "EUNATCH", "protocol driver not attached"}, + {43, "ENOCSI", "no CSI structure available"}, + {44, "EL2HLT", "level 2 halted"}, + {45, "EDEADLK", "resource deadlock avoided"}, + {46, "ENOLCK", "no locks available"}, + {50, "EBADE", "invalid exchange"}, + {51, "EBADR", "invalid request descriptor"}, + {52, "EXFULL", "exchange full"}, + {53, "ENOANO", "no anode"}, + {54, "EBADRQC", "invalid request code"}, + {55, "EBADSLT", "invalid slot"}, + {56, "EDEADLOCK", "file locking deadlock error"}, + {59, "EBFONT", "bad font file format"}, + {60, "ENOSTR", "device not a stream"}, + {61, "ENODATA", "no data available"}, + {62, "ETIME", "timer expired"}, + {63, "ENOSR", "out of streams resources"}, + {64, "ENONET", "machine is not on the network"}, + {65, "ENOPKG", "package not installed"}, + {66, "EREMOTE", "object is remote"}, + {67, "ENOLINK", "link has been severed"}, + {68, "EADV", "advertise error"}, + {69, "ESRMNT", "srmount error"}, + {70, "ECOMM", "communication error on send"}, + {71, "EPROTO", "protocol error"}, + {73, "EDOTDOT", "RFS specific error"}, + {74, "EMULTIHOP", "multihop attempted"}, + {77, "EBADMSG", "bad message"}, + {78, "ENAMETOOLONG", "file name too long"}, + {79, "EOVERFLOW", "value too large for defined data type"}, + {80, "ENOTUNIQ", "name not unique on network"}, + {81, "EBADFD", "file descriptor in bad state"}, + {82, "EREMCHG", "remote address changed"}, + {83, "ELIBACC", "can not access a needed shared library"}, + {84, "ELIBBAD", "accessing a corrupted shared library"}, + {85, "ELIBSCN", ".lib section in a.out corrupted"}, + {86, "ELIBMAX", "attempting to link in too many shared libraries"}, + {87, "ELIBEXEC", "cannot exec a shared library directly"}, + {88, "EILSEQ", "invalid or incomplete multibyte or wide character"}, + {89, "ENOSYS", "function not implemented"}, + {90, "ELOOP", "too many levels of symbolic links"}, + {91, "ERESTART", "interrupted system call should be restarted"}, + {92, "ESTRPIPE", "streams pipe error"}, + {93, "ENOTEMPTY", "directory not empty"}, + {94, "EUSERS", "too many users"}, + {95, "ENOTSOCK", "socket operation on non-socket"}, + {96, "EDESTADDRREQ", "destination address required"}, + {97, "EMSGSIZE", "message too long"}, + {98, "EPROTOTYPE", "protocol wrong type for socket"}, + {99, "ENOPROTOOPT", "protocol not available"}, + {120, "EPROTONOSUPPORT", "protocol not supported"}, + {121, "ESOCKTNOSUPPORT", "socket type not supported"}, + {122, "ENOTSUP", "operation not supported"}, + {123, "EPFNOSUPPORT", "protocol family not supported"}, + {124, "EAFNOSUPPORT", "address family not supported by protocol"}, + {125, "EADDRINUSE", "address already in use"}, + {126, "EADDRNOTAVAIL", "cannot assign requested address"}, + {127, "ENETDOWN", "network is down"}, + {128, "ENETUNREACH", "network is unreachable"}, + {129, "ENETRESET", "network dropped connection on reset"}, + {130, "ECONNABORTED", "software caused connection abort"}, + {131, "ECONNRESET", "connection reset by peer"}, + {132, "ENOBUFS", "no buffer space available"}, + {133, "EISCONN", "transport endpoint is already connected"}, + {134, "ENOTCONN", "transport endpoint is not connected"}, + {135, "EUCLEAN", "structure needs cleaning"}, + {137, "ENOTNAM", "not a XENIX named type file"}, + {138, "ENAVAIL", "no XENIX semaphores available"}, + {139, "EISNAM", "is a named type file"}, + {140, "EREMOTEIO", "remote I/O error"}, + {141, "EINIT", "unknown error 141"}, + {142, "EREMDEV", "unknown error 142"}, + {143, "ESHUTDOWN", "cannot send after transport endpoint shutdown"}, + {144, "ETOOMANYREFS", "too many references: cannot splice"}, + {145, "ETIMEDOUT", "connection timed out"}, + {146, "ECONNREFUSED", "connection refused"}, + {147, "EHOSTDOWN", "host is down"}, + {148, "EHOSTUNREACH", "no route to host"}, + {149, "EALREADY", "operation already in progress"}, + {150, "EINPROGRESS", "operation now in progress"}, + {151, "ESTALE", "stale file handle"}, + {158, "ECANCELED", "operation canceled"}, + {159, "ENOMEDIUM", "no medium found"}, + {160, "EMEDIUMTYPE", "wrong medium type"}, + {161, "ENOKEY", "required key not available"}, + {162, "EKEYEXPIRED", "key has expired"}, + {163, "EKEYREVOKED", "key has been revoked"}, + {164, "EKEYREJECTED", "key was rejected by service"}, + {165, "EOWNERDEAD", "owner died"}, + {166, "ENOTRECOVERABLE", "state not recoverable"}, + {167, "ERFKILL", "operation not possible due to RF-kill"}, + {168, "EHWPOISON", "memory page has hardware error"}, + {1133, "EDQUOT", "disk quota exceeded"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/breakpoint trap", - 6: "aborted", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "user defined signal 1", - 17: "user defined signal 2", - 18: "child exited", - 19: "power failure", - 20: "window changed", - 21: "urgent I/O condition", - 22: "I/O possible", - 23: "stopped (signal)", - 24: "stopped", - 25: "continued", - 26: "stopped (tty input)", - 27: "stopped (tty output)", - 28: "virtual timer expired", - 29: "profiling timer expired", - 30: "CPU time limit exceeded", - 31: "file size limit exceeded", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/breakpoint trap"}, + {6, "SIGABRT", "aborted"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGUSR1", "user defined signal 1"}, + {17, "SIGUSR2", "user defined signal 2"}, + {18, "SIGCHLD", "child exited"}, + {19, "SIGPWR", "power failure"}, + {20, "SIGWINCH", "window changed"}, + {21, "SIGURG", "urgent I/O condition"}, + {22, "SIGIO", "I/O possible"}, + {23, "SIGSTOP", "stopped (signal)"}, + {24, "SIGTSTP", "stopped"}, + {25, "SIGCONT", "continued"}, + {26, "SIGTTIN", "stopped (tty input)"}, + {27, "SIGTTOU", "stopped (tty output)"}, + {28, "SIGVTALRM", "virtual timer expired"}, + {29, "SIGPROF", "profiling timer expired"}, + {30, "SIGXCPU", "CPU time limit exceeded"}, + {31, "SIGXFSZ", "file size limit exceeded"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 86eb3063cd..08f9638ac8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -446,6 +446,7 @@ const ( ETH_P_WCCP = 0x883e ETH_P_X25 = 0x805 ETH_P_XDSA = 0xf8 + EXABYTE_ENABLE_NEST = 0xf0 EXTA = 0xe EXTB = 0xf EXTPROC = 0x10000 @@ -543,6 +544,46 @@ const ( GENL_UNS_ADMIN_PERM = 0x10 GRND_NONBLOCK = 0x1 GRND_RANDOM = 0x2 + HDIO_DRIVE_CMD = 0x31f + HDIO_DRIVE_CMD_AEB = 0x31e + HDIO_DRIVE_CMD_HDR_SIZE = 0x4 + HDIO_DRIVE_HOB_HDR_SIZE = 0x8 + HDIO_DRIVE_RESET = 0x31c + HDIO_DRIVE_TASK = 0x31e + HDIO_DRIVE_TASKFILE = 0x31d + HDIO_DRIVE_TASK_HDR_SIZE = 0x8 + HDIO_GETGEO = 0x301 + HDIO_GET_32BIT = 0x309 + HDIO_GET_ACOUSTIC = 0x30f + HDIO_GET_ADDRESS = 0x310 + HDIO_GET_BUSSTATE = 0x31a + HDIO_GET_DMA = 0x30b + HDIO_GET_IDENTITY = 0x30d + HDIO_GET_KEEPSETTINGS = 0x308 + HDIO_GET_MULTCOUNT = 0x304 + HDIO_GET_NICE = 0x30c + HDIO_GET_NOWERR = 0x30a + HDIO_GET_QDMA = 0x305 + HDIO_GET_UNMASKINTR = 0x302 + HDIO_GET_WCACHE = 0x30e + HDIO_OBSOLETE_IDENTITY = 0x307 + HDIO_SCAN_HWIF = 0x328 + HDIO_SET_32BIT = 0x324 + HDIO_SET_ACOUSTIC = 0x32c + HDIO_SET_ADDRESS = 0x32f + HDIO_SET_BUSSTATE = 0x32d + HDIO_SET_DMA = 0x326 + HDIO_SET_KEEPSETTINGS = 0x323 + HDIO_SET_MULTCOUNT = 0x321 + HDIO_SET_NICE = 0x329 + HDIO_SET_NOWERR = 0x325 + HDIO_SET_PIO_MODE = 0x327 + HDIO_SET_QDMA = 0x32e + HDIO_SET_UNMASKINTR = 0x322 + HDIO_SET_WCACHE = 0x32b + HDIO_SET_XFER = 0x306 + HDIO_TRISTATE_HWIF = 0x31b + HDIO_UNREGISTER_HWIF = 0x32a HUPCL = 0x400 IBSHIFT = 0x10 ICANON = 0x2 @@ -1562,6 +1603,20 @@ const ( SIOCSPGRP = 0x80047308 SIOCSRARP = 0x8962 SIOCWANDEV = 0x894a + SMART_AUTOSAVE = 0xd2 + SMART_AUTO_OFFLINE = 0xdb + SMART_DISABLE = 0xd9 + SMART_ENABLE = 0xd8 + SMART_HCYL_PASS = 0xc2 + SMART_IMMEDIATE_OFFLINE = 0xd4 + SMART_LCYL_PASS = 0x4f + SMART_READ_LOG_SECTOR = 0xd5 + SMART_READ_THRESHOLDS = 0xd1 + SMART_READ_VALUES = 0xd0 + SMART_SAVE = 0xd3 + SMART_STATUS = 0xda + SMART_WRITE_LOG_SECTOR = 0xd6 + SMART_WRITE_THRESHOLDS = 0xd7 SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x1 @@ -1949,6 +2004,86 @@ const ( WDIOC_SETPRETIMEOUT = 0xc0045708 WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 + WIN_ACKMEDIACHANGE = 0xdb + WIN_CHECKPOWERMODE1 = 0xe5 + WIN_CHECKPOWERMODE2 = 0x98 + WIN_DEVICE_RESET = 0x8 + WIN_DIAGNOSE = 0x90 + WIN_DOORLOCK = 0xde + WIN_DOORUNLOCK = 0xdf + WIN_DOWNLOAD_MICROCODE = 0x92 + WIN_FLUSH_CACHE = 0xe7 + WIN_FLUSH_CACHE_EXT = 0xea + WIN_FORMAT = 0x50 + WIN_GETMEDIASTATUS = 0xda + WIN_IDENTIFY = 0xec + WIN_IDENTIFY_DMA = 0xee + WIN_IDLEIMMEDIATE = 0xe1 + WIN_INIT = 0x60 + WIN_MEDIAEJECT = 0xed + WIN_MULTREAD = 0xc4 + WIN_MULTREAD_EXT = 0x29 + WIN_MULTWRITE = 0xc5 + WIN_MULTWRITE_EXT = 0x39 + WIN_NOP = 0x0 + WIN_PACKETCMD = 0xa0 + WIN_PIDENTIFY = 0xa1 + WIN_POSTBOOT = 0xdc + WIN_PREBOOT = 0xdd + WIN_QUEUED_SERVICE = 0xa2 + WIN_READ = 0x20 + WIN_READDMA = 0xc8 + WIN_READDMA_EXT = 0x25 + WIN_READDMA_ONCE = 0xc9 + WIN_READDMA_QUEUED = 0xc7 + WIN_READDMA_QUEUED_EXT = 0x26 + WIN_READ_BUFFER = 0xe4 + WIN_READ_EXT = 0x24 + WIN_READ_LONG = 0x22 + WIN_READ_LONG_ONCE = 0x23 + WIN_READ_NATIVE_MAX = 0xf8 + WIN_READ_NATIVE_MAX_EXT = 0x27 + WIN_READ_ONCE = 0x21 + WIN_RECAL = 0x10 + WIN_RESTORE = 0x10 + WIN_SECURITY_DISABLE = 0xf6 + WIN_SECURITY_ERASE_PREPARE = 0xf3 + WIN_SECURITY_ERASE_UNIT = 0xf4 + WIN_SECURITY_FREEZE_LOCK = 0xf5 + WIN_SECURITY_SET_PASS = 0xf1 + WIN_SECURITY_UNLOCK = 0xf2 + WIN_SEEK = 0x70 + WIN_SETFEATURES = 0xef + WIN_SETIDLE1 = 0xe3 + WIN_SETIDLE2 = 0x97 + WIN_SETMULT = 0xc6 + WIN_SET_MAX = 0xf9 + WIN_SET_MAX_EXT = 0x37 + WIN_SLEEPNOW1 = 0xe6 + WIN_SLEEPNOW2 = 0x99 + WIN_SMART = 0xb0 + WIN_SPECIFY = 0x91 + WIN_SRST = 0x8 + WIN_STANDBY = 0xe2 + WIN_STANDBY2 = 0x96 + WIN_STANDBYNOW1 = 0xe0 + WIN_STANDBYNOW2 = 0x94 + WIN_VERIFY = 0x40 + WIN_VERIFY_EXT = 0x42 + WIN_VERIFY_ONCE = 0x41 + WIN_WRITE = 0x30 + WIN_WRITEDMA = 0xca + WIN_WRITEDMA_EXT = 0x35 + WIN_WRITEDMA_ONCE = 0xcb + WIN_WRITEDMA_QUEUED = 0xcc + WIN_WRITEDMA_QUEUED_EXT = 0x36 + WIN_WRITE_BUFFER = 0xe8 + WIN_WRITE_EXT = 0x34 + WIN_WRITE_LONG = 0x32 + WIN_WRITE_LONG_ONCE = 0x33 + WIN_WRITE_ONCE = 0x31 + WIN_WRITE_SAME = 0xe9 + WIN_WRITE_VERIFY = 0x3c WNOHANG = 0x1 WNOTHREAD = 0x20000000 WNOWAIT = 0x1000000 @@ -2140,174 +2275,182 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "no such device or address", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource temporarily unavailable", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device or resource busy", - 17: "file exists", - 18: "invalid cross-device link", - 19: "no such device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "numerical result out of range", - 35: "no message of desired type", - 36: "identifier removed", - 37: "channel number out of range", - 38: "level 2 not synchronized", - 39: "level 3 halted", - 40: "level 3 reset", - 41: "link number out of range", - 42: "protocol driver not attached", - 43: "no CSI structure available", - 44: "level 2 halted", - 45: "resource deadlock avoided", - 46: "no locks available", - 50: "invalid exchange", - 51: "invalid request descriptor", - 52: "exchange full", - 53: "no anode", - 54: "invalid request code", - 55: "invalid slot", - 56: "file locking deadlock error", - 59: "bad font file format", - 60: "device not a stream", - 61: "no data available", - 62: "timer expired", - 63: "out of streams resources", - 64: "machine is not on the network", - 65: "package not installed", - 66: "object is remote", - 67: "link has been severed", - 68: "advertise error", - 69: "srmount error", - 70: "communication error on send", - 71: "protocol error", - 73: "RFS specific error", - 74: "multihop attempted", - 77: "bad message", - 78: "file name too long", - 79: "value too large for defined data type", - 80: "name not unique on network", - 81: "file descriptor in bad state", - 82: "remote address changed", - 83: "can not access a needed shared library", - 84: "accessing a corrupted shared library", - 85: ".lib section in a.out corrupted", - 86: "attempting to link in too many shared libraries", - 87: "cannot exec a shared library directly", - 88: "invalid or incomplete multibyte or wide character", - 89: "function not implemented", - 90: "too many levels of symbolic links", - 91: "interrupted system call should be restarted", - 92: "streams pipe error", - 93: "directory not empty", - 94: "too many users", - 95: "socket operation on non-socket", - 96: "destination address required", - 97: "message too long", - 98: "protocol wrong type for socket", - 99: "protocol not available", - 120: "protocol not supported", - 121: "socket type not supported", - 122: "operation not supported", - 123: "protocol family not supported", - 124: "address family not supported by protocol", - 125: "address already in use", - 126: "cannot assign requested address", - 127: "network is down", - 128: "network is unreachable", - 129: "network dropped connection on reset", - 130: "software caused connection abort", - 131: "connection reset by peer", - 132: "no buffer space available", - 133: "transport endpoint is already connected", - 134: "transport endpoint is not connected", - 135: "structure needs cleaning", - 137: "not a XENIX named type file", - 138: "no XENIX semaphores available", - 139: "is a named type file", - 140: "remote I/O error", - 141: "unknown error 141", - 142: "unknown error 142", - 143: "cannot send after transport endpoint shutdown", - 144: "too many references: cannot splice", - 145: "connection timed out", - 146: "connection refused", - 147: "host is down", - 148: "no route to host", - 149: "operation already in progress", - 150: "operation now in progress", - 151: "stale file handle", - 158: "operation canceled", - 159: "no medium found", - 160: "wrong medium type", - 161: "required key not available", - 162: "key has expired", - 163: "key has been revoked", - 164: "key was rejected by service", - 165: "owner died", - 166: "state not recoverable", - 167: "operation not possible due to RF-kill", - 168: "memory page has hardware error", - 1133: "disk quota exceeded", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EAGAIN", "resource temporarily unavailable"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device or resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "invalid cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "numerical result out of range"}, + {35, "ENOMSG", "no message of desired type"}, + {36, "EIDRM", "identifier removed"}, + {37, "ECHRNG", "channel number out of range"}, + {38, "EL2NSYNC", "level 2 not synchronized"}, + {39, "EL3HLT", "level 3 halted"}, + {40, "EL3RST", "level 3 reset"}, + {41, "ELNRNG", "link number out of range"}, + {42, "EUNATCH", "protocol driver not attached"}, + {43, "ENOCSI", "no CSI structure available"}, + {44, "EL2HLT", "level 2 halted"}, + {45, "EDEADLK", "resource deadlock avoided"}, + {46, "ENOLCK", "no locks available"}, + {50, "EBADE", "invalid exchange"}, + {51, "EBADR", "invalid request descriptor"}, + {52, "EXFULL", "exchange full"}, + {53, "ENOANO", "no anode"}, + {54, "EBADRQC", "invalid request code"}, + {55, "EBADSLT", "invalid slot"}, + {56, "EDEADLOCK", "file locking deadlock error"}, + {59, "EBFONT", "bad font file format"}, + {60, "ENOSTR", "device not a stream"}, + {61, "ENODATA", "no data available"}, + {62, "ETIME", "timer expired"}, + {63, "ENOSR", "out of streams resources"}, + {64, "ENONET", "machine is not on the network"}, + {65, "ENOPKG", "package not installed"}, + {66, "EREMOTE", "object is remote"}, + {67, "ENOLINK", "link has been severed"}, + {68, "EADV", "advertise error"}, + {69, "ESRMNT", "srmount error"}, + {70, "ECOMM", "communication error on send"}, + {71, "EPROTO", "protocol error"}, + {73, "EDOTDOT", "RFS specific error"}, + {74, "EMULTIHOP", "multihop attempted"}, + {77, "EBADMSG", "bad message"}, + {78, "ENAMETOOLONG", "file name too long"}, + {79, "EOVERFLOW", "value too large for defined data type"}, + {80, "ENOTUNIQ", "name not unique on network"}, + {81, "EBADFD", "file descriptor in bad state"}, + {82, "EREMCHG", "remote address changed"}, + {83, "ELIBACC", "can not access a needed shared library"}, + {84, "ELIBBAD", "accessing a corrupted shared library"}, + {85, "ELIBSCN", ".lib section in a.out corrupted"}, + {86, "ELIBMAX", "attempting to link in too many shared libraries"}, + {87, "ELIBEXEC", "cannot exec a shared library directly"}, + {88, "EILSEQ", "invalid or incomplete multibyte or wide character"}, + {89, "ENOSYS", "function not implemented"}, + {90, "ELOOP", "too many levels of symbolic links"}, + {91, "ERESTART", "interrupted system call should be restarted"}, + {92, "ESTRPIPE", "streams pipe error"}, + {93, "ENOTEMPTY", "directory not empty"}, + {94, "EUSERS", "too many users"}, + {95, "ENOTSOCK", "socket operation on non-socket"}, + {96, "EDESTADDRREQ", "destination address required"}, + {97, "EMSGSIZE", "message too long"}, + {98, "EPROTOTYPE", "protocol wrong type for socket"}, + {99, "ENOPROTOOPT", "protocol not available"}, + {120, "EPROTONOSUPPORT", "protocol not supported"}, + {121, "ESOCKTNOSUPPORT", "socket type not supported"}, + {122, "ENOTSUP", "operation not supported"}, + {123, "EPFNOSUPPORT", "protocol family not supported"}, + {124, "EAFNOSUPPORT", "address family not supported by protocol"}, + {125, "EADDRINUSE", "address already in use"}, + {126, "EADDRNOTAVAIL", "cannot assign requested address"}, + {127, "ENETDOWN", "network is down"}, + {128, "ENETUNREACH", "network is unreachable"}, + {129, "ENETRESET", "network dropped connection on reset"}, + {130, "ECONNABORTED", "software caused connection abort"}, + {131, "ECONNRESET", "connection reset by peer"}, + {132, "ENOBUFS", "no buffer space available"}, + {133, "EISCONN", "transport endpoint is already connected"}, + {134, "ENOTCONN", "transport endpoint is not connected"}, + {135, "EUCLEAN", "structure needs cleaning"}, + {137, "ENOTNAM", "not a XENIX named type file"}, + {138, "ENAVAIL", "no XENIX semaphores available"}, + {139, "EISNAM", "is a named type file"}, + {140, "EREMOTEIO", "remote I/O error"}, + {141, "EINIT", "unknown error 141"}, + {142, "EREMDEV", "unknown error 142"}, + {143, "ESHUTDOWN", "cannot send after transport endpoint shutdown"}, + {144, "ETOOMANYREFS", "too many references: cannot splice"}, + {145, "ETIMEDOUT", "connection timed out"}, + {146, "ECONNREFUSED", "connection refused"}, + {147, "EHOSTDOWN", "host is down"}, + {148, "EHOSTUNREACH", "no route to host"}, + {149, "EALREADY", "operation already in progress"}, + {150, "EINPROGRESS", "operation now in progress"}, + {151, "ESTALE", "stale file handle"}, + {158, "ECANCELED", "operation canceled"}, + {159, "ENOMEDIUM", "no medium found"}, + {160, "EMEDIUMTYPE", "wrong medium type"}, + {161, "ENOKEY", "required key not available"}, + {162, "EKEYEXPIRED", "key has expired"}, + {163, "EKEYREVOKED", "key has been revoked"}, + {164, "EKEYREJECTED", "key was rejected by service"}, + {165, "EOWNERDEAD", "owner died"}, + {166, "ENOTRECOVERABLE", "state not recoverable"}, + {167, "ERFKILL", "operation not possible due to RF-kill"}, + {168, "EHWPOISON", "memory page has hardware error"}, + {1133, "EDQUOT", "disk quota exceeded"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/breakpoint trap", - 6: "aborted", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "user defined signal 1", - 17: "user defined signal 2", - 18: "child exited", - 19: "power failure", - 20: "window changed", - 21: "urgent I/O condition", - 22: "I/O possible", - 23: "stopped (signal)", - 24: "stopped", - 25: "continued", - 26: "stopped (tty input)", - 27: "stopped (tty output)", - 28: "virtual timer expired", - 29: "profiling timer expired", - 30: "CPU time limit exceeded", - 31: "file size limit exceeded", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/breakpoint trap"}, + {6, "SIGABRT", "aborted"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGUSR1", "user defined signal 1"}, + {17, "SIGUSR2", "user defined signal 2"}, + {18, "SIGCHLD", "child exited"}, + {19, "SIGPWR", "power failure"}, + {20, "SIGWINCH", "window changed"}, + {21, "SIGURG", "urgent I/O condition"}, + {22, "SIGIO", "I/O possible"}, + {23, "SIGSTOP", "stopped (signal)"}, + {24, "SIGTSTP", "stopped"}, + {25, "SIGCONT", "continued"}, + {26, "SIGTTIN", "stopped (tty input)"}, + {27, "SIGTTOU", "stopped (tty output)"}, + {28, "SIGVTALRM", "virtual timer expired"}, + {29, "SIGPROF", "profiling timer expired"}, + {30, "SIGXCPU", "CPU time limit exceeded"}, + {31, "SIGXFSZ", "file size limit exceeded"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 2168ece949..3a3fd1df16 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -446,6 +446,7 @@ const ( ETH_P_WCCP = 0x883e ETH_P_X25 = 0x805 ETH_P_XDSA = 0xf8 + EXABYTE_ENABLE_NEST = 0xf0 EXTA = 0xe EXTB = 0xf EXTPROC = 0x10000 @@ -543,6 +544,46 @@ const ( GENL_UNS_ADMIN_PERM = 0x10 GRND_NONBLOCK = 0x1 GRND_RANDOM = 0x2 + HDIO_DRIVE_CMD = 0x31f + HDIO_DRIVE_CMD_AEB = 0x31e + HDIO_DRIVE_CMD_HDR_SIZE = 0x4 + HDIO_DRIVE_HOB_HDR_SIZE = 0x8 + HDIO_DRIVE_RESET = 0x31c + HDIO_DRIVE_TASK = 0x31e + HDIO_DRIVE_TASKFILE = 0x31d + HDIO_DRIVE_TASK_HDR_SIZE = 0x8 + HDIO_GETGEO = 0x301 + HDIO_GET_32BIT = 0x309 + HDIO_GET_ACOUSTIC = 0x30f + HDIO_GET_ADDRESS = 0x310 + HDIO_GET_BUSSTATE = 0x31a + HDIO_GET_DMA = 0x30b + HDIO_GET_IDENTITY = 0x30d + HDIO_GET_KEEPSETTINGS = 0x308 + HDIO_GET_MULTCOUNT = 0x304 + HDIO_GET_NICE = 0x30c + HDIO_GET_NOWERR = 0x30a + HDIO_GET_QDMA = 0x305 + HDIO_GET_UNMASKINTR = 0x302 + HDIO_GET_WCACHE = 0x30e + HDIO_OBSOLETE_IDENTITY = 0x307 + HDIO_SCAN_HWIF = 0x328 + HDIO_SET_32BIT = 0x324 + HDIO_SET_ACOUSTIC = 0x32c + HDIO_SET_ADDRESS = 0x32f + HDIO_SET_BUSSTATE = 0x32d + HDIO_SET_DMA = 0x326 + HDIO_SET_KEEPSETTINGS = 0x323 + HDIO_SET_MULTCOUNT = 0x321 + HDIO_SET_NICE = 0x329 + HDIO_SET_NOWERR = 0x325 + HDIO_SET_PIO_MODE = 0x327 + HDIO_SET_QDMA = 0x32e + HDIO_SET_UNMASKINTR = 0x322 + HDIO_SET_WCACHE = 0x32b + HDIO_SET_XFER = 0x306 + HDIO_TRISTATE_HWIF = 0x31b + HDIO_UNREGISTER_HWIF = 0x32a HUPCL = 0x400 IBSHIFT = 0x10 ICANON = 0x2 @@ -1562,6 +1603,20 @@ const ( SIOCSPGRP = 0x80047308 SIOCSRARP = 0x8962 SIOCWANDEV = 0x894a + SMART_AUTOSAVE = 0xd2 + SMART_AUTO_OFFLINE = 0xdb + SMART_DISABLE = 0xd9 + SMART_ENABLE = 0xd8 + SMART_HCYL_PASS = 0xc2 + SMART_IMMEDIATE_OFFLINE = 0xd4 + SMART_LCYL_PASS = 0x4f + SMART_READ_LOG_SECTOR = 0xd5 + SMART_READ_THRESHOLDS = 0xd1 + SMART_READ_VALUES = 0xd0 + SMART_SAVE = 0xd3 + SMART_STATUS = 0xda + SMART_WRITE_LOG_SECTOR = 0xd6 + SMART_WRITE_THRESHOLDS = 0xd7 SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x1 @@ -1949,6 +2004,86 @@ const ( WDIOC_SETPRETIMEOUT = 0xc0045708 WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 + WIN_ACKMEDIACHANGE = 0xdb + WIN_CHECKPOWERMODE1 = 0xe5 + WIN_CHECKPOWERMODE2 = 0x98 + WIN_DEVICE_RESET = 0x8 + WIN_DIAGNOSE = 0x90 + WIN_DOORLOCK = 0xde + WIN_DOORUNLOCK = 0xdf + WIN_DOWNLOAD_MICROCODE = 0x92 + WIN_FLUSH_CACHE = 0xe7 + WIN_FLUSH_CACHE_EXT = 0xea + WIN_FORMAT = 0x50 + WIN_GETMEDIASTATUS = 0xda + WIN_IDENTIFY = 0xec + WIN_IDENTIFY_DMA = 0xee + WIN_IDLEIMMEDIATE = 0xe1 + WIN_INIT = 0x60 + WIN_MEDIAEJECT = 0xed + WIN_MULTREAD = 0xc4 + WIN_MULTREAD_EXT = 0x29 + WIN_MULTWRITE = 0xc5 + WIN_MULTWRITE_EXT = 0x39 + WIN_NOP = 0x0 + WIN_PACKETCMD = 0xa0 + WIN_PIDENTIFY = 0xa1 + WIN_POSTBOOT = 0xdc + WIN_PREBOOT = 0xdd + WIN_QUEUED_SERVICE = 0xa2 + WIN_READ = 0x20 + WIN_READDMA = 0xc8 + WIN_READDMA_EXT = 0x25 + WIN_READDMA_ONCE = 0xc9 + WIN_READDMA_QUEUED = 0xc7 + WIN_READDMA_QUEUED_EXT = 0x26 + WIN_READ_BUFFER = 0xe4 + WIN_READ_EXT = 0x24 + WIN_READ_LONG = 0x22 + WIN_READ_LONG_ONCE = 0x23 + WIN_READ_NATIVE_MAX = 0xf8 + WIN_READ_NATIVE_MAX_EXT = 0x27 + WIN_READ_ONCE = 0x21 + WIN_RECAL = 0x10 + WIN_RESTORE = 0x10 + WIN_SECURITY_DISABLE = 0xf6 + WIN_SECURITY_ERASE_PREPARE = 0xf3 + WIN_SECURITY_ERASE_UNIT = 0xf4 + WIN_SECURITY_FREEZE_LOCK = 0xf5 + WIN_SECURITY_SET_PASS = 0xf1 + WIN_SECURITY_UNLOCK = 0xf2 + WIN_SEEK = 0x70 + WIN_SETFEATURES = 0xef + WIN_SETIDLE1 = 0xe3 + WIN_SETIDLE2 = 0x97 + WIN_SETMULT = 0xc6 + WIN_SET_MAX = 0xf9 + WIN_SET_MAX_EXT = 0x37 + WIN_SLEEPNOW1 = 0xe6 + WIN_SLEEPNOW2 = 0x99 + WIN_SMART = 0xb0 + WIN_SPECIFY = 0x91 + WIN_SRST = 0x8 + WIN_STANDBY = 0xe2 + WIN_STANDBY2 = 0x96 + WIN_STANDBYNOW1 = 0xe0 + WIN_STANDBYNOW2 = 0x94 + WIN_VERIFY = 0x40 + WIN_VERIFY_EXT = 0x42 + WIN_VERIFY_ONCE = 0x41 + WIN_WRITE = 0x30 + WIN_WRITEDMA = 0xca + WIN_WRITEDMA_EXT = 0x35 + WIN_WRITEDMA_ONCE = 0xcb + WIN_WRITEDMA_QUEUED = 0xcc + WIN_WRITEDMA_QUEUED_EXT = 0x36 + WIN_WRITE_BUFFER = 0xe8 + WIN_WRITE_EXT = 0x34 + WIN_WRITE_LONG = 0x32 + WIN_WRITE_LONG_ONCE = 0x33 + WIN_WRITE_ONCE = 0x31 + WIN_WRITE_SAME = 0xe9 + WIN_WRITE_VERIFY = 0x3c WNOHANG = 0x1 WNOTHREAD = 0x20000000 WNOWAIT = 0x1000000 @@ -2140,174 +2275,182 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "no such device or address", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource temporarily unavailable", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device or resource busy", - 17: "file exists", - 18: "invalid cross-device link", - 19: "no such device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "numerical result out of range", - 35: "no message of desired type", - 36: "identifier removed", - 37: "channel number out of range", - 38: "level 2 not synchronized", - 39: "level 3 halted", - 40: "level 3 reset", - 41: "link number out of range", - 42: "protocol driver not attached", - 43: "no CSI structure available", - 44: "level 2 halted", - 45: "resource deadlock avoided", - 46: "no locks available", - 50: "invalid exchange", - 51: "invalid request descriptor", - 52: "exchange full", - 53: "no anode", - 54: "invalid request code", - 55: "invalid slot", - 56: "file locking deadlock error", - 59: "bad font file format", - 60: "device not a stream", - 61: "no data available", - 62: "timer expired", - 63: "out of streams resources", - 64: "machine is not on the network", - 65: "package not installed", - 66: "object is remote", - 67: "link has been severed", - 68: "advertise error", - 69: "srmount error", - 70: "communication error on send", - 71: "protocol error", - 73: "RFS specific error", - 74: "multihop attempted", - 77: "bad message", - 78: "file name too long", - 79: "value too large for defined data type", - 80: "name not unique on network", - 81: "file descriptor in bad state", - 82: "remote address changed", - 83: "can not access a needed shared library", - 84: "accessing a corrupted shared library", - 85: ".lib section in a.out corrupted", - 86: "attempting to link in too many shared libraries", - 87: "cannot exec a shared library directly", - 88: "invalid or incomplete multibyte or wide character", - 89: "function not implemented", - 90: "too many levels of symbolic links", - 91: "interrupted system call should be restarted", - 92: "streams pipe error", - 93: "directory not empty", - 94: "too many users", - 95: "socket operation on non-socket", - 96: "destination address required", - 97: "message too long", - 98: "protocol wrong type for socket", - 99: "protocol not available", - 120: "protocol not supported", - 121: "socket type not supported", - 122: "operation not supported", - 123: "protocol family not supported", - 124: "address family not supported by protocol", - 125: "address already in use", - 126: "cannot assign requested address", - 127: "network is down", - 128: "network is unreachable", - 129: "network dropped connection on reset", - 130: "software caused connection abort", - 131: "connection reset by peer", - 132: "no buffer space available", - 133: "transport endpoint is already connected", - 134: "transport endpoint is not connected", - 135: "structure needs cleaning", - 137: "not a XENIX named type file", - 138: "no XENIX semaphores available", - 139: "is a named type file", - 140: "remote I/O error", - 141: "unknown error 141", - 142: "unknown error 142", - 143: "cannot send after transport endpoint shutdown", - 144: "too many references: cannot splice", - 145: "connection timed out", - 146: "connection refused", - 147: "host is down", - 148: "no route to host", - 149: "operation already in progress", - 150: "operation now in progress", - 151: "stale file handle", - 158: "operation canceled", - 159: "no medium found", - 160: "wrong medium type", - 161: "required key not available", - 162: "key has expired", - 163: "key has been revoked", - 164: "key was rejected by service", - 165: "owner died", - 166: "state not recoverable", - 167: "operation not possible due to RF-kill", - 168: "memory page has hardware error", - 1133: "disk quota exceeded", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EAGAIN", "resource temporarily unavailable"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device or resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "invalid cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "numerical result out of range"}, + {35, "ENOMSG", "no message of desired type"}, + {36, "EIDRM", "identifier removed"}, + {37, "ECHRNG", "channel number out of range"}, + {38, "EL2NSYNC", "level 2 not synchronized"}, + {39, "EL3HLT", "level 3 halted"}, + {40, "EL3RST", "level 3 reset"}, + {41, "ELNRNG", "link number out of range"}, + {42, "EUNATCH", "protocol driver not attached"}, + {43, "ENOCSI", "no CSI structure available"}, + {44, "EL2HLT", "level 2 halted"}, + {45, "EDEADLK", "resource deadlock avoided"}, + {46, "ENOLCK", "no locks available"}, + {50, "EBADE", "invalid exchange"}, + {51, "EBADR", "invalid request descriptor"}, + {52, "EXFULL", "exchange full"}, + {53, "ENOANO", "no anode"}, + {54, "EBADRQC", "invalid request code"}, + {55, "EBADSLT", "invalid slot"}, + {56, "EDEADLOCK", "file locking deadlock error"}, + {59, "EBFONT", "bad font file format"}, + {60, "ENOSTR", "device not a stream"}, + {61, "ENODATA", "no data available"}, + {62, "ETIME", "timer expired"}, + {63, "ENOSR", "out of streams resources"}, + {64, "ENONET", "machine is not on the network"}, + {65, "ENOPKG", "package not installed"}, + {66, "EREMOTE", "object is remote"}, + {67, "ENOLINK", "link has been severed"}, + {68, "EADV", "advertise error"}, + {69, "ESRMNT", "srmount error"}, + {70, "ECOMM", "communication error on send"}, + {71, "EPROTO", "protocol error"}, + {73, "EDOTDOT", "RFS specific error"}, + {74, "EMULTIHOP", "multihop attempted"}, + {77, "EBADMSG", "bad message"}, + {78, "ENAMETOOLONG", "file name too long"}, + {79, "EOVERFLOW", "value too large for defined data type"}, + {80, "ENOTUNIQ", "name not unique on network"}, + {81, "EBADFD", "file descriptor in bad state"}, + {82, "EREMCHG", "remote address changed"}, + {83, "ELIBACC", "can not access a needed shared library"}, + {84, "ELIBBAD", "accessing a corrupted shared library"}, + {85, "ELIBSCN", ".lib section in a.out corrupted"}, + {86, "ELIBMAX", "attempting to link in too many shared libraries"}, + {87, "ELIBEXEC", "cannot exec a shared library directly"}, + {88, "EILSEQ", "invalid or incomplete multibyte or wide character"}, + {89, "ENOSYS", "function not implemented"}, + {90, "ELOOP", "too many levels of symbolic links"}, + {91, "ERESTART", "interrupted system call should be restarted"}, + {92, "ESTRPIPE", "streams pipe error"}, + {93, "ENOTEMPTY", "directory not empty"}, + {94, "EUSERS", "too many users"}, + {95, "ENOTSOCK", "socket operation on non-socket"}, + {96, "EDESTADDRREQ", "destination address required"}, + {97, "EMSGSIZE", "message too long"}, + {98, "EPROTOTYPE", "protocol wrong type for socket"}, + {99, "ENOPROTOOPT", "protocol not available"}, + {120, "EPROTONOSUPPORT", "protocol not supported"}, + {121, "ESOCKTNOSUPPORT", "socket type not supported"}, + {122, "ENOTSUP", "operation not supported"}, + {123, "EPFNOSUPPORT", "protocol family not supported"}, + {124, "EAFNOSUPPORT", "address family not supported by protocol"}, + {125, "EADDRINUSE", "address already in use"}, + {126, "EADDRNOTAVAIL", "cannot assign requested address"}, + {127, "ENETDOWN", "network is down"}, + {128, "ENETUNREACH", "network is unreachable"}, + {129, "ENETRESET", "network dropped connection on reset"}, + {130, "ECONNABORTED", "software caused connection abort"}, + {131, "ECONNRESET", "connection reset by peer"}, + {132, "ENOBUFS", "no buffer space available"}, + {133, "EISCONN", "transport endpoint is already connected"}, + {134, "ENOTCONN", "transport endpoint is not connected"}, + {135, "EUCLEAN", "structure needs cleaning"}, + {137, "ENOTNAM", "not a XENIX named type file"}, + {138, "ENAVAIL", "no XENIX semaphores available"}, + {139, "EISNAM", "is a named type file"}, + {140, "EREMOTEIO", "remote I/O error"}, + {141, "EINIT", "unknown error 141"}, + {142, "EREMDEV", "unknown error 142"}, + {143, "ESHUTDOWN", "cannot send after transport endpoint shutdown"}, + {144, "ETOOMANYREFS", "too many references: cannot splice"}, + {145, "ETIMEDOUT", "connection timed out"}, + {146, "ECONNREFUSED", "connection refused"}, + {147, "EHOSTDOWN", "host is down"}, + {148, "EHOSTUNREACH", "no route to host"}, + {149, "EALREADY", "operation already in progress"}, + {150, "EINPROGRESS", "operation now in progress"}, + {151, "ESTALE", "stale file handle"}, + {158, "ECANCELED", "operation canceled"}, + {159, "ENOMEDIUM", "no medium found"}, + {160, "EMEDIUMTYPE", "wrong medium type"}, + {161, "ENOKEY", "required key not available"}, + {162, "EKEYEXPIRED", "key has expired"}, + {163, "EKEYREVOKED", "key has been revoked"}, + {164, "EKEYREJECTED", "key was rejected by service"}, + {165, "EOWNERDEAD", "owner died"}, + {166, "ENOTRECOVERABLE", "state not recoverable"}, + {167, "ERFKILL", "operation not possible due to RF-kill"}, + {168, "EHWPOISON", "memory page has hardware error"}, + {1133, "EDQUOT", "disk quota exceeded"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/breakpoint trap", - 6: "aborted", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "user defined signal 1", - 17: "user defined signal 2", - 18: "child exited", - 19: "power failure", - 20: "window changed", - 21: "urgent I/O condition", - 22: "I/O possible", - 23: "stopped (signal)", - 24: "stopped", - 25: "continued", - 26: "stopped (tty input)", - 27: "stopped (tty output)", - 28: "virtual timer expired", - 29: "profiling timer expired", - 30: "CPU time limit exceeded", - 31: "file size limit exceeded", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/breakpoint trap"}, + {6, "SIGABRT", "aborted"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGUSR1", "user defined signal 1"}, + {17, "SIGUSR2", "user defined signal 2"}, + {18, "SIGCHLD", "child exited"}, + {19, "SIGPWR", "power failure"}, + {20, "SIGWINCH", "window changed"}, + {21, "SIGURG", "urgent I/O condition"}, + {22, "SIGIO", "I/O possible"}, + {23, "SIGSTOP", "stopped (signal)"}, + {24, "SIGTSTP", "stopped"}, + {25, "SIGCONT", "continued"}, + {26, "SIGTTIN", "stopped (tty input)"}, + {27, "SIGTTOU", "stopped (tty output)"}, + {28, "SIGVTALRM", "virtual timer expired"}, + {29, "SIGPROF", "profiling timer expired"}, + {30, "SIGXCPU", "CPU time limit exceeded"}, + {31, "SIGXFSZ", "file size limit exceeded"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 18e48b4f2b..3e6c1783e6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -446,6 +446,7 @@ const ( ETH_P_WCCP = 0x883e ETH_P_X25 = 0x805 ETH_P_XDSA = 0xf8 + EXABYTE_ENABLE_NEST = 0xf0 EXTA = 0xe EXTB = 0xf EXTPROC = 0x10000000 @@ -543,6 +544,46 @@ const ( GENL_UNS_ADMIN_PERM = 0x10 GRND_NONBLOCK = 0x1 GRND_RANDOM = 0x2 + HDIO_DRIVE_CMD = 0x31f + HDIO_DRIVE_CMD_AEB = 0x31e + HDIO_DRIVE_CMD_HDR_SIZE = 0x4 + HDIO_DRIVE_HOB_HDR_SIZE = 0x8 + HDIO_DRIVE_RESET = 0x31c + HDIO_DRIVE_TASK = 0x31e + HDIO_DRIVE_TASKFILE = 0x31d + HDIO_DRIVE_TASK_HDR_SIZE = 0x8 + HDIO_GETGEO = 0x301 + HDIO_GET_32BIT = 0x309 + HDIO_GET_ACOUSTIC = 0x30f + HDIO_GET_ADDRESS = 0x310 + HDIO_GET_BUSSTATE = 0x31a + HDIO_GET_DMA = 0x30b + HDIO_GET_IDENTITY = 0x30d + HDIO_GET_KEEPSETTINGS = 0x308 + HDIO_GET_MULTCOUNT = 0x304 + HDIO_GET_NICE = 0x30c + HDIO_GET_NOWERR = 0x30a + HDIO_GET_QDMA = 0x305 + HDIO_GET_UNMASKINTR = 0x302 + HDIO_GET_WCACHE = 0x30e + HDIO_OBSOLETE_IDENTITY = 0x307 + HDIO_SCAN_HWIF = 0x328 + HDIO_SET_32BIT = 0x324 + HDIO_SET_ACOUSTIC = 0x32c + HDIO_SET_ADDRESS = 0x32f + HDIO_SET_BUSSTATE = 0x32d + HDIO_SET_DMA = 0x326 + HDIO_SET_KEEPSETTINGS = 0x323 + HDIO_SET_MULTCOUNT = 0x321 + HDIO_SET_NICE = 0x329 + HDIO_SET_NOWERR = 0x325 + HDIO_SET_PIO_MODE = 0x327 + HDIO_SET_QDMA = 0x32e + HDIO_SET_UNMASKINTR = 0x322 + HDIO_SET_WCACHE = 0x32b + HDIO_SET_XFER = 0x306 + HDIO_TRISTATE_HWIF = 0x31b + HDIO_UNREGISTER_HWIF = 0x32a HUPCL = 0x4000 IBSHIFT = 0x10 ICANON = 0x100 @@ -1618,6 +1659,20 @@ const ( SIOCSPGRP = 0x8902 SIOCSRARP = 0x8962 SIOCWANDEV = 0x894a + SMART_AUTOSAVE = 0xd2 + SMART_AUTO_OFFLINE = 0xdb + SMART_DISABLE = 0xd9 + SMART_ENABLE = 0xd8 + SMART_HCYL_PASS = 0xc2 + SMART_IMMEDIATE_OFFLINE = 0xd4 + SMART_LCYL_PASS = 0x4f + SMART_READ_LOG_SECTOR = 0xd5 + SMART_READ_THRESHOLDS = 0xd1 + SMART_READ_VALUES = 0xd0 + SMART_SAVE = 0xd3 + SMART_STATUS = 0xda + SMART_WRITE_LOG_SECTOR = 0xd6 + SMART_WRITE_THRESHOLDS = 0xd7 SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x2 @@ -2006,6 +2061,86 @@ const ( WDIOC_SETPRETIMEOUT = 0xc0045708 WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 + WIN_ACKMEDIACHANGE = 0xdb + WIN_CHECKPOWERMODE1 = 0xe5 + WIN_CHECKPOWERMODE2 = 0x98 + WIN_DEVICE_RESET = 0x8 + WIN_DIAGNOSE = 0x90 + WIN_DOORLOCK = 0xde + WIN_DOORUNLOCK = 0xdf + WIN_DOWNLOAD_MICROCODE = 0x92 + WIN_FLUSH_CACHE = 0xe7 + WIN_FLUSH_CACHE_EXT = 0xea + WIN_FORMAT = 0x50 + WIN_GETMEDIASTATUS = 0xda + WIN_IDENTIFY = 0xec + WIN_IDENTIFY_DMA = 0xee + WIN_IDLEIMMEDIATE = 0xe1 + WIN_INIT = 0x60 + WIN_MEDIAEJECT = 0xed + WIN_MULTREAD = 0xc4 + WIN_MULTREAD_EXT = 0x29 + WIN_MULTWRITE = 0xc5 + WIN_MULTWRITE_EXT = 0x39 + WIN_NOP = 0x0 + WIN_PACKETCMD = 0xa0 + WIN_PIDENTIFY = 0xa1 + WIN_POSTBOOT = 0xdc + WIN_PREBOOT = 0xdd + WIN_QUEUED_SERVICE = 0xa2 + WIN_READ = 0x20 + WIN_READDMA = 0xc8 + WIN_READDMA_EXT = 0x25 + WIN_READDMA_ONCE = 0xc9 + WIN_READDMA_QUEUED = 0xc7 + WIN_READDMA_QUEUED_EXT = 0x26 + WIN_READ_BUFFER = 0xe4 + WIN_READ_EXT = 0x24 + WIN_READ_LONG = 0x22 + WIN_READ_LONG_ONCE = 0x23 + WIN_READ_NATIVE_MAX = 0xf8 + WIN_READ_NATIVE_MAX_EXT = 0x27 + WIN_READ_ONCE = 0x21 + WIN_RECAL = 0x10 + WIN_RESTORE = 0x10 + WIN_SECURITY_DISABLE = 0xf6 + WIN_SECURITY_ERASE_PREPARE = 0xf3 + WIN_SECURITY_ERASE_UNIT = 0xf4 + WIN_SECURITY_FREEZE_LOCK = 0xf5 + WIN_SECURITY_SET_PASS = 0xf1 + WIN_SECURITY_UNLOCK = 0xf2 + WIN_SEEK = 0x70 + WIN_SETFEATURES = 0xef + WIN_SETIDLE1 = 0xe3 + WIN_SETIDLE2 = 0x97 + WIN_SETMULT = 0xc6 + WIN_SET_MAX = 0xf9 + WIN_SET_MAX_EXT = 0x37 + WIN_SLEEPNOW1 = 0xe6 + WIN_SLEEPNOW2 = 0x99 + WIN_SMART = 0xb0 + WIN_SPECIFY = 0x91 + WIN_SRST = 0x8 + WIN_STANDBY = 0xe2 + WIN_STANDBY2 = 0x96 + WIN_STANDBYNOW1 = 0xe0 + WIN_STANDBYNOW2 = 0x94 + WIN_VERIFY = 0x40 + WIN_VERIFY_EXT = 0x42 + WIN_VERIFY_ONCE = 0x41 + WIN_WRITE = 0x30 + WIN_WRITEDMA = 0xca + WIN_WRITEDMA_EXT = 0x35 + WIN_WRITEDMA_ONCE = 0xcb + WIN_WRITEDMA_QUEUED = 0xcc + WIN_WRITEDMA_QUEUED_EXT = 0x36 + WIN_WRITE_BUFFER = 0xe8 + WIN_WRITE_EXT = 0x34 + WIN_WRITE_LONG = 0x32 + WIN_WRITE_LONG_ONCE = 0x33 + WIN_WRITE_ONCE = 0x31 + WIN_WRITE_SAME = 0xe9 + WIN_WRITE_VERIFY = 0x3c WNOHANG = 0x1 WNOTHREAD = 0x20000000 WNOWAIT = 0x1000000 @@ -2195,172 +2330,180 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "no such device or address", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource temporarily unavailable", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device or resource busy", - 17: "file exists", - 18: "invalid cross-device link", - 19: "no such device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "numerical result out of range", - 35: "resource deadlock avoided", - 36: "file name too long", - 37: "no locks available", - 38: "function not implemented", - 39: "directory not empty", - 40: "too many levels of symbolic links", - 42: "no message of desired type", - 43: "identifier removed", - 44: "channel number out of range", - 45: "level 2 not synchronized", - 46: "level 3 halted", - 47: "level 3 reset", - 48: "link number out of range", - 49: "protocol driver not attached", - 50: "no CSI structure available", - 51: "level 2 halted", - 52: "invalid exchange", - 53: "invalid request descriptor", - 54: "exchange full", - 55: "no anode", - 56: "invalid request code", - 57: "invalid slot", - 58: "file locking deadlock error", - 59: "bad font file format", - 60: "device not a stream", - 61: "no data available", - 62: "timer expired", - 63: "out of streams resources", - 64: "machine is not on the network", - 65: "package not installed", - 66: "object is remote", - 67: "link has been severed", - 68: "advertise error", - 69: "srmount error", - 70: "communication error on send", - 71: "protocol error", - 72: "multihop attempted", - 73: "RFS specific error", - 74: "bad message", - 75: "value too large for defined data type", - 76: "name not unique on network", - 77: "file descriptor in bad state", - 78: "remote address changed", - 79: "can not access a needed shared library", - 80: "accessing a corrupted shared library", - 81: ".lib section in a.out corrupted", - 82: "attempting to link in too many shared libraries", - 83: "cannot exec a shared library directly", - 84: "invalid or incomplete multibyte or wide character", - 85: "interrupted system call should be restarted", - 86: "streams pipe error", - 87: "too many users", - 88: "socket operation on non-socket", - 89: "destination address required", - 90: "message too long", - 91: "protocol wrong type for socket", - 92: "protocol not available", - 93: "protocol not supported", - 94: "socket type not supported", - 95: "operation not supported", - 96: "protocol family not supported", - 97: "address family not supported by protocol", - 98: "address already in use", - 99: "cannot assign requested address", - 100: "network is down", - 101: "network is unreachable", - 102: "network dropped connection on reset", - 103: "software caused connection abort", - 104: "connection reset by peer", - 105: "no buffer space available", - 106: "transport endpoint is already connected", - 107: "transport endpoint is not connected", - 108: "cannot send after transport endpoint shutdown", - 109: "too many references: cannot splice", - 110: "connection timed out", - 111: "connection refused", - 112: "host is down", - 113: "no route to host", - 114: "operation already in progress", - 115: "operation now in progress", - 116: "stale file handle", - 117: "structure needs cleaning", - 118: "not a XENIX named type file", - 119: "no XENIX semaphores available", - 120: "is a named type file", - 121: "remote I/O error", - 122: "disk quota exceeded", - 123: "no medium found", - 124: "wrong medium type", - 125: "operation canceled", - 126: "required key not available", - 127: "key has expired", - 128: "key has been revoked", - 129: "key was rejected by service", - 130: "owner died", - 131: "state not recoverable", - 132: "operation not possible due to RF-kill", - 133: "memory page has hardware error", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EAGAIN", "resource temporarily unavailable"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device or resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "invalid cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "numerical result out of range"}, + {35, "EDEADLK", "resource deadlock avoided"}, + {36, "ENAMETOOLONG", "file name too long"}, + {37, "ENOLCK", "no locks available"}, + {38, "ENOSYS", "function not implemented"}, + {39, "ENOTEMPTY", "directory not empty"}, + {40, "ELOOP", "too many levels of symbolic links"}, + {42, "ENOMSG", "no message of desired type"}, + {43, "EIDRM", "identifier removed"}, + {44, "ECHRNG", "channel number out of range"}, + {45, "EL2NSYNC", "level 2 not synchronized"}, + {46, "EL3HLT", "level 3 halted"}, + {47, "EL3RST", "level 3 reset"}, + {48, "ELNRNG", "link number out of range"}, + {49, "EUNATCH", "protocol driver not attached"}, + {50, "ENOCSI", "no CSI structure available"}, + {51, "EL2HLT", "level 2 halted"}, + {52, "EBADE", "invalid exchange"}, + {53, "EBADR", "invalid request descriptor"}, + {54, "EXFULL", "exchange full"}, + {55, "ENOANO", "no anode"}, + {56, "EBADRQC", "invalid request code"}, + {57, "EBADSLT", "invalid slot"}, + {58, "EDEADLOCK", "file locking deadlock error"}, + {59, "EBFONT", "bad font file format"}, + {60, "ENOSTR", "device not a stream"}, + {61, "ENODATA", "no data available"}, + {62, "ETIME", "timer expired"}, + {63, "ENOSR", "out of streams resources"}, + {64, "ENONET", "machine is not on the network"}, + {65, "ENOPKG", "package not installed"}, + {66, "EREMOTE", "object is remote"}, + {67, "ENOLINK", "link has been severed"}, + {68, "EADV", "advertise error"}, + {69, "ESRMNT", "srmount error"}, + {70, "ECOMM", "communication error on send"}, + {71, "EPROTO", "protocol error"}, + {72, "EMULTIHOP", "multihop attempted"}, + {73, "EDOTDOT", "RFS specific error"}, + {74, "EBADMSG", "bad message"}, + {75, "EOVERFLOW", "value too large for defined data type"}, + {76, "ENOTUNIQ", "name not unique on network"}, + {77, "EBADFD", "file descriptor in bad state"}, + {78, "EREMCHG", "remote address changed"}, + {79, "ELIBACC", "can not access a needed shared library"}, + {80, "ELIBBAD", "accessing a corrupted shared library"}, + {81, "ELIBSCN", ".lib section in a.out corrupted"}, + {82, "ELIBMAX", "attempting to link in too many shared libraries"}, + {83, "ELIBEXEC", "cannot exec a shared library directly"}, + {84, "EILSEQ", "invalid or incomplete multibyte or wide character"}, + {85, "ERESTART", "interrupted system call should be restarted"}, + {86, "ESTRPIPE", "streams pipe error"}, + {87, "EUSERS", "too many users"}, + {88, "ENOTSOCK", "socket operation on non-socket"}, + {89, "EDESTADDRREQ", "destination address required"}, + {90, "EMSGSIZE", "message too long"}, + {91, "EPROTOTYPE", "protocol wrong type for socket"}, + {92, "ENOPROTOOPT", "protocol not available"}, + {93, "EPROTONOSUPPORT", "protocol not supported"}, + {94, "ESOCKTNOSUPPORT", "socket type not supported"}, + {95, "ENOTSUP", "operation not supported"}, + {96, "EPFNOSUPPORT", "protocol family not supported"}, + {97, "EAFNOSUPPORT", "address family not supported by protocol"}, + {98, "EADDRINUSE", "address already in use"}, + {99, "EADDRNOTAVAIL", "cannot assign requested address"}, + {100, "ENETDOWN", "network is down"}, + {101, "ENETUNREACH", "network is unreachable"}, + {102, "ENETRESET", "network dropped connection on reset"}, + {103, "ECONNABORTED", "software caused connection abort"}, + {104, "ECONNRESET", "connection reset by peer"}, + {105, "ENOBUFS", "no buffer space available"}, + {106, "EISCONN", "transport endpoint is already connected"}, + {107, "ENOTCONN", "transport endpoint is not connected"}, + {108, "ESHUTDOWN", "cannot send after transport endpoint shutdown"}, + {109, "ETOOMANYREFS", "too many references: cannot splice"}, + {110, "ETIMEDOUT", "connection timed out"}, + {111, "ECONNREFUSED", "connection refused"}, + {112, "EHOSTDOWN", "host is down"}, + {113, "EHOSTUNREACH", "no route to host"}, + {114, "EALREADY", "operation already in progress"}, + {115, "EINPROGRESS", "operation now in progress"}, + {116, "ESTALE", "stale file handle"}, + {117, "EUCLEAN", "structure needs cleaning"}, + {118, "ENOTNAM", "not a XENIX named type file"}, + {119, "ENAVAIL", "no XENIX semaphores available"}, + {120, "EISNAM", "is a named type file"}, + {121, "EREMOTEIO", "remote I/O error"}, + {122, "EDQUOT", "disk quota exceeded"}, + {123, "ENOMEDIUM", "no medium found"}, + {124, "EMEDIUMTYPE", "wrong medium type"}, + {125, "ECANCELED", "operation canceled"}, + {126, "ENOKEY", "required key not available"}, + {127, "EKEYEXPIRED", "key has expired"}, + {128, "EKEYREVOKED", "key has been revoked"}, + {129, "EKEYREJECTED", "key was rejected by service"}, + {130, "EOWNERDEAD", "owner died"}, + {131, "ENOTRECOVERABLE", "state not recoverable"}, + {132, "ERFKILL", "operation not possible due to RF-kill"}, + {133, "EHWPOISON", "memory page has hardware error"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/breakpoint trap", - 6: "aborted", - 7: "bus error", - 8: "floating point exception", - 9: "killed", - 10: "user defined signal 1", - 11: "segmentation fault", - 12: "user defined signal 2", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "stack fault", - 17: "child exited", - 18: "continued", - 19: "stopped (signal)", - 20: "stopped", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "urgent I/O condition", - 24: "CPU time limit exceeded", - 25: "file size limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window changed", - 29: "I/O possible", - 30: "power failure", - 31: "bad system call", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/breakpoint trap"}, + {6, "SIGABRT", "aborted"}, + {7, "SIGBUS", "bus error"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGUSR1", "user defined signal 1"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGUSR2", "user defined signal 2"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGSTKFLT", "stack fault"}, + {17, "SIGCHLD", "child exited"}, + {18, "SIGCONT", "continued"}, + {19, "SIGSTOP", "stopped (signal)"}, + {20, "SIGTSTP", "stopped"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGURG", "urgent I/O condition"}, + {24, "SIGXCPU", "CPU time limit exceeded"}, + {25, "SIGXFSZ", "file size limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window changed"}, + {29, "SIGIO", "I/O possible"}, + {30, "SIGPWR", "power failure"}, + {31, "SIGSYS", "bad system call"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index c069f8fabc..4fabd8d610 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -446,6 +446,7 @@ const ( ETH_P_WCCP = 0x883e ETH_P_X25 = 0x805 ETH_P_XDSA = 0xf8 + EXABYTE_ENABLE_NEST = 0xf0 EXTA = 0xe EXTB = 0xf EXTPROC = 0x10000000 @@ -543,6 +544,46 @@ const ( GENL_UNS_ADMIN_PERM = 0x10 GRND_NONBLOCK = 0x1 GRND_RANDOM = 0x2 + HDIO_DRIVE_CMD = 0x31f + HDIO_DRIVE_CMD_AEB = 0x31e + HDIO_DRIVE_CMD_HDR_SIZE = 0x4 + HDIO_DRIVE_HOB_HDR_SIZE = 0x8 + HDIO_DRIVE_RESET = 0x31c + HDIO_DRIVE_TASK = 0x31e + HDIO_DRIVE_TASKFILE = 0x31d + HDIO_DRIVE_TASK_HDR_SIZE = 0x8 + HDIO_GETGEO = 0x301 + HDIO_GET_32BIT = 0x309 + HDIO_GET_ACOUSTIC = 0x30f + HDIO_GET_ADDRESS = 0x310 + HDIO_GET_BUSSTATE = 0x31a + HDIO_GET_DMA = 0x30b + HDIO_GET_IDENTITY = 0x30d + HDIO_GET_KEEPSETTINGS = 0x308 + HDIO_GET_MULTCOUNT = 0x304 + HDIO_GET_NICE = 0x30c + HDIO_GET_NOWERR = 0x30a + HDIO_GET_QDMA = 0x305 + HDIO_GET_UNMASKINTR = 0x302 + HDIO_GET_WCACHE = 0x30e + HDIO_OBSOLETE_IDENTITY = 0x307 + HDIO_SCAN_HWIF = 0x328 + HDIO_SET_32BIT = 0x324 + HDIO_SET_ACOUSTIC = 0x32c + HDIO_SET_ADDRESS = 0x32f + HDIO_SET_BUSSTATE = 0x32d + HDIO_SET_DMA = 0x326 + HDIO_SET_KEEPSETTINGS = 0x323 + HDIO_SET_MULTCOUNT = 0x321 + HDIO_SET_NICE = 0x329 + HDIO_SET_NOWERR = 0x325 + HDIO_SET_PIO_MODE = 0x327 + HDIO_SET_QDMA = 0x32e + HDIO_SET_UNMASKINTR = 0x322 + HDIO_SET_WCACHE = 0x32b + HDIO_SET_XFER = 0x306 + HDIO_TRISTATE_HWIF = 0x31b + HDIO_UNREGISTER_HWIF = 0x32a HUPCL = 0x4000 IBSHIFT = 0x10 ICANON = 0x100 @@ -1618,6 +1659,20 @@ const ( SIOCSPGRP = 0x8902 SIOCSRARP = 0x8962 SIOCWANDEV = 0x894a + SMART_AUTOSAVE = 0xd2 + SMART_AUTO_OFFLINE = 0xdb + SMART_DISABLE = 0xd9 + SMART_ENABLE = 0xd8 + SMART_HCYL_PASS = 0xc2 + SMART_IMMEDIATE_OFFLINE = 0xd4 + SMART_LCYL_PASS = 0x4f + SMART_READ_LOG_SECTOR = 0xd5 + SMART_READ_THRESHOLDS = 0xd1 + SMART_READ_VALUES = 0xd0 + SMART_SAVE = 0xd3 + SMART_STATUS = 0xda + SMART_WRITE_LOG_SECTOR = 0xd6 + SMART_WRITE_THRESHOLDS = 0xd7 SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x2 @@ -2006,6 +2061,86 @@ const ( WDIOC_SETPRETIMEOUT = 0xc0045708 WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 + WIN_ACKMEDIACHANGE = 0xdb + WIN_CHECKPOWERMODE1 = 0xe5 + WIN_CHECKPOWERMODE2 = 0x98 + WIN_DEVICE_RESET = 0x8 + WIN_DIAGNOSE = 0x90 + WIN_DOORLOCK = 0xde + WIN_DOORUNLOCK = 0xdf + WIN_DOWNLOAD_MICROCODE = 0x92 + WIN_FLUSH_CACHE = 0xe7 + WIN_FLUSH_CACHE_EXT = 0xea + WIN_FORMAT = 0x50 + WIN_GETMEDIASTATUS = 0xda + WIN_IDENTIFY = 0xec + WIN_IDENTIFY_DMA = 0xee + WIN_IDLEIMMEDIATE = 0xe1 + WIN_INIT = 0x60 + WIN_MEDIAEJECT = 0xed + WIN_MULTREAD = 0xc4 + WIN_MULTREAD_EXT = 0x29 + WIN_MULTWRITE = 0xc5 + WIN_MULTWRITE_EXT = 0x39 + WIN_NOP = 0x0 + WIN_PACKETCMD = 0xa0 + WIN_PIDENTIFY = 0xa1 + WIN_POSTBOOT = 0xdc + WIN_PREBOOT = 0xdd + WIN_QUEUED_SERVICE = 0xa2 + WIN_READ = 0x20 + WIN_READDMA = 0xc8 + WIN_READDMA_EXT = 0x25 + WIN_READDMA_ONCE = 0xc9 + WIN_READDMA_QUEUED = 0xc7 + WIN_READDMA_QUEUED_EXT = 0x26 + WIN_READ_BUFFER = 0xe4 + WIN_READ_EXT = 0x24 + WIN_READ_LONG = 0x22 + WIN_READ_LONG_ONCE = 0x23 + WIN_READ_NATIVE_MAX = 0xf8 + WIN_READ_NATIVE_MAX_EXT = 0x27 + WIN_READ_ONCE = 0x21 + WIN_RECAL = 0x10 + WIN_RESTORE = 0x10 + WIN_SECURITY_DISABLE = 0xf6 + WIN_SECURITY_ERASE_PREPARE = 0xf3 + WIN_SECURITY_ERASE_UNIT = 0xf4 + WIN_SECURITY_FREEZE_LOCK = 0xf5 + WIN_SECURITY_SET_PASS = 0xf1 + WIN_SECURITY_UNLOCK = 0xf2 + WIN_SEEK = 0x70 + WIN_SETFEATURES = 0xef + WIN_SETIDLE1 = 0xe3 + WIN_SETIDLE2 = 0x97 + WIN_SETMULT = 0xc6 + WIN_SET_MAX = 0xf9 + WIN_SET_MAX_EXT = 0x37 + WIN_SLEEPNOW1 = 0xe6 + WIN_SLEEPNOW2 = 0x99 + WIN_SMART = 0xb0 + WIN_SPECIFY = 0x91 + WIN_SRST = 0x8 + WIN_STANDBY = 0xe2 + WIN_STANDBY2 = 0x96 + WIN_STANDBYNOW1 = 0xe0 + WIN_STANDBYNOW2 = 0x94 + WIN_VERIFY = 0x40 + WIN_VERIFY_EXT = 0x42 + WIN_VERIFY_ONCE = 0x41 + WIN_WRITE = 0x30 + WIN_WRITEDMA = 0xca + WIN_WRITEDMA_EXT = 0x35 + WIN_WRITEDMA_ONCE = 0xcb + WIN_WRITEDMA_QUEUED = 0xcc + WIN_WRITEDMA_QUEUED_EXT = 0x36 + WIN_WRITE_BUFFER = 0xe8 + WIN_WRITE_EXT = 0x34 + WIN_WRITE_LONG = 0x32 + WIN_WRITE_LONG_ONCE = 0x33 + WIN_WRITE_ONCE = 0x31 + WIN_WRITE_SAME = 0xe9 + WIN_WRITE_VERIFY = 0x3c WNOHANG = 0x1 WNOTHREAD = 0x20000000 WNOWAIT = 0x1000000 @@ -2195,172 +2330,180 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "no such device or address", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource temporarily unavailable", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device or resource busy", - 17: "file exists", - 18: "invalid cross-device link", - 19: "no such device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "numerical result out of range", - 35: "resource deadlock avoided", - 36: "file name too long", - 37: "no locks available", - 38: "function not implemented", - 39: "directory not empty", - 40: "too many levels of symbolic links", - 42: "no message of desired type", - 43: "identifier removed", - 44: "channel number out of range", - 45: "level 2 not synchronized", - 46: "level 3 halted", - 47: "level 3 reset", - 48: "link number out of range", - 49: "protocol driver not attached", - 50: "no CSI structure available", - 51: "level 2 halted", - 52: "invalid exchange", - 53: "invalid request descriptor", - 54: "exchange full", - 55: "no anode", - 56: "invalid request code", - 57: "invalid slot", - 58: "file locking deadlock error", - 59: "bad font file format", - 60: "device not a stream", - 61: "no data available", - 62: "timer expired", - 63: "out of streams resources", - 64: "machine is not on the network", - 65: "package not installed", - 66: "object is remote", - 67: "link has been severed", - 68: "advertise error", - 69: "srmount error", - 70: "communication error on send", - 71: "protocol error", - 72: "multihop attempted", - 73: "RFS specific error", - 74: "bad message", - 75: "value too large for defined data type", - 76: "name not unique on network", - 77: "file descriptor in bad state", - 78: "remote address changed", - 79: "can not access a needed shared library", - 80: "accessing a corrupted shared library", - 81: ".lib section in a.out corrupted", - 82: "attempting to link in too many shared libraries", - 83: "cannot exec a shared library directly", - 84: "invalid or incomplete multibyte or wide character", - 85: "interrupted system call should be restarted", - 86: "streams pipe error", - 87: "too many users", - 88: "socket operation on non-socket", - 89: "destination address required", - 90: "message too long", - 91: "protocol wrong type for socket", - 92: "protocol not available", - 93: "protocol not supported", - 94: "socket type not supported", - 95: "operation not supported", - 96: "protocol family not supported", - 97: "address family not supported by protocol", - 98: "address already in use", - 99: "cannot assign requested address", - 100: "network is down", - 101: "network is unreachable", - 102: "network dropped connection on reset", - 103: "software caused connection abort", - 104: "connection reset by peer", - 105: "no buffer space available", - 106: "transport endpoint is already connected", - 107: "transport endpoint is not connected", - 108: "cannot send after transport endpoint shutdown", - 109: "too many references: cannot splice", - 110: "connection timed out", - 111: "connection refused", - 112: "host is down", - 113: "no route to host", - 114: "operation already in progress", - 115: "operation now in progress", - 116: "stale file handle", - 117: "structure needs cleaning", - 118: "not a XENIX named type file", - 119: "no XENIX semaphores available", - 120: "is a named type file", - 121: "remote I/O error", - 122: "disk quota exceeded", - 123: "no medium found", - 124: "wrong medium type", - 125: "operation canceled", - 126: "required key not available", - 127: "key has expired", - 128: "key has been revoked", - 129: "key was rejected by service", - 130: "owner died", - 131: "state not recoverable", - 132: "operation not possible due to RF-kill", - 133: "memory page has hardware error", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EAGAIN", "resource temporarily unavailable"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device or resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "invalid cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "numerical result out of range"}, + {35, "EDEADLK", "resource deadlock avoided"}, + {36, "ENAMETOOLONG", "file name too long"}, + {37, "ENOLCK", "no locks available"}, + {38, "ENOSYS", "function not implemented"}, + {39, "ENOTEMPTY", "directory not empty"}, + {40, "ELOOP", "too many levels of symbolic links"}, + {42, "ENOMSG", "no message of desired type"}, + {43, "EIDRM", "identifier removed"}, + {44, "ECHRNG", "channel number out of range"}, + {45, "EL2NSYNC", "level 2 not synchronized"}, + {46, "EL3HLT", "level 3 halted"}, + {47, "EL3RST", "level 3 reset"}, + {48, "ELNRNG", "link number out of range"}, + {49, "EUNATCH", "protocol driver not attached"}, + {50, "ENOCSI", "no CSI structure available"}, + {51, "EL2HLT", "level 2 halted"}, + {52, "EBADE", "invalid exchange"}, + {53, "EBADR", "invalid request descriptor"}, + {54, "EXFULL", "exchange full"}, + {55, "ENOANO", "no anode"}, + {56, "EBADRQC", "invalid request code"}, + {57, "EBADSLT", "invalid slot"}, + {58, "EDEADLOCK", "file locking deadlock error"}, + {59, "EBFONT", "bad font file format"}, + {60, "ENOSTR", "device not a stream"}, + {61, "ENODATA", "no data available"}, + {62, "ETIME", "timer expired"}, + {63, "ENOSR", "out of streams resources"}, + {64, "ENONET", "machine is not on the network"}, + {65, "ENOPKG", "package not installed"}, + {66, "EREMOTE", "object is remote"}, + {67, "ENOLINK", "link has been severed"}, + {68, "EADV", "advertise error"}, + {69, "ESRMNT", "srmount error"}, + {70, "ECOMM", "communication error on send"}, + {71, "EPROTO", "protocol error"}, + {72, "EMULTIHOP", "multihop attempted"}, + {73, "EDOTDOT", "RFS specific error"}, + {74, "EBADMSG", "bad message"}, + {75, "EOVERFLOW", "value too large for defined data type"}, + {76, "ENOTUNIQ", "name not unique on network"}, + {77, "EBADFD", "file descriptor in bad state"}, + {78, "EREMCHG", "remote address changed"}, + {79, "ELIBACC", "can not access a needed shared library"}, + {80, "ELIBBAD", "accessing a corrupted shared library"}, + {81, "ELIBSCN", ".lib section in a.out corrupted"}, + {82, "ELIBMAX", "attempting to link in too many shared libraries"}, + {83, "ELIBEXEC", "cannot exec a shared library directly"}, + {84, "EILSEQ", "invalid or incomplete multibyte or wide character"}, + {85, "ERESTART", "interrupted system call should be restarted"}, + {86, "ESTRPIPE", "streams pipe error"}, + {87, "EUSERS", "too many users"}, + {88, "ENOTSOCK", "socket operation on non-socket"}, + {89, "EDESTADDRREQ", "destination address required"}, + {90, "EMSGSIZE", "message too long"}, + {91, "EPROTOTYPE", "protocol wrong type for socket"}, + {92, "ENOPROTOOPT", "protocol not available"}, + {93, "EPROTONOSUPPORT", "protocol not supported"}, + {94, "ESOCKTNOSUPPORT", "socket type not supported"}, + {95, "ENOTSUP", "operation not supported"}, + {96, "EPFNOSUPPORT", "protocol family not supported"}, + {97, "EAFNOSUPPORT", "address family not supported by protocol"}, + {98, "EADDRINUSE", "address already in use"}, + {99, "EADDRNOTAVAIL", "cannot assign requested address"}, + {100, "ENETDOWN", "network is down"}, + {101, "ENETUNREACH", "network is unreachable"}, + {102, "ENETRESET", "network dropped connection on reset"}, + {103, "ECONNABORTED", "software caused connection abort"}, + {104, "ECONNRESET", "connection reset by peer"}, + {105, "ENOBUFS", "no buffer space available"}, + {106, "EISCONN", "transport endpoint is already connected"}, + {107, "ENOTCONN", "transport endpoint is not connected"}, + {108, "ESHUTDOWN", "cannot send after transport endpoint shutdown"}, + {109, "ETOOMANYREFS", "too many references: cannot splice"}, + {110, "ETIMEDOUT", "connection timed out"}, + {111, "ECONNREFUSED", "connection refused"}, + {112, "EHOSTDOWN", "host is down"}, + {113, "EHOSTUNREACH", "no route to host"}, + {114, "EALREADY", "operation already in progress"}, + {115, "EINPROGRESS", "operation now in progress"}, + {116, "ESTALE", "stale file handle"}, + {117, "EUCLEAN", "structure needs cleaning"}, + {118, "ENOTNAM", "not a XENIX named type file"}, + {119, "ENAVAIL", "no XENIX semaphores available"}, + {120, "EISNAM", "is a named type file"}, + {121, "EREMOTEIO", "remote I/O error"}, + {122, "EDQUOT", "disk quota exceeded"}, + {123, "ENOMEDIUM", "no medium found"}, + {124, "EMEDIUMTYPE", "wrong medium type"}, + {125, "ECANCELED", "operation canceled"}, + {126, "ENOKEY", "required key not available"}, + {127, "EKEYEXPIRED", "key has expired"}, + {128, "EKEYREVOKED", "key has been revoked"}, + {129, "EKEYREJECTED", "key was rejected by service"}, + {130, "EOWNERDEAD", "owner died"}, + {131, "ENOTRECOVERABLE", "state not recoverable"}, + {132, "ERFKILL", "operation not possible due to RF-kill"}, + {133, "EHWPOISON", "memory page has hardware error"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/breakpoint trap", - 6: "aborted", - 7: "bus error", - 8: "floating point exception", - 9: "killed", - 10: "user defined signal 1", - 11: "segmentation fault", - 12: "user defined signal 2", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "stack fault", - 17: "child exited", - 18: "continued", - 19: "stopped (signal)", - 20: "stopped", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "urgent I/O condition", - 24: "CPU time limit exceeded", - 25: "file size limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window changed", - 29: "I/O possible", - 30: "power failure", - 31: "bad system call", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/breakpoint trap"}, + {6, "SIGABRT", "aborted"}, + {7, "SIGBUS", "bus error"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGUSR1", "user defined signal 1"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGUSR2", "user defined signal 2"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGSTKFLT", "stack fault"}, + {17, "SIGCHLD", "child exited"}, + {18, "SIGCONT", "continued"}, + {19, "SIGSTOP", "stopped (signal)"}, + {20, "SIGTSTP", "stopped"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGURG", "urgent I/O condition"}, + {24, "SIGXCPU", "CPU time limit exceeded"}, + {25, "SIGXFSZ", "file size limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window changed"}, + {29, "SIGIO", "I/O possible"}, + {30, "SIGPWR", "power failure"}, + {31, "SIGSYS", "bad system call"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index e3f0e27e27..685667adfa 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -446,6 +446,7 @@ const ( ETH_P_WCCP = 0x883e ETH_P_X25 = 0x805 ETH_P_XDSA = 0xf8 + EXABYTE_ENABLE_NEST = 0xf0 EXTA = 0xe EXTB = 0xf EXTPROC = 0x10000 @@ -543,6 +544,46 @@ const ( GENL_UNS_ADMIN_PERM = 0x10 GRND_NONBLOCK = 0x1 GRND_RANDOM = 0x2 + HDIO_DRIVE_CMD = 0x31f + HDIO_DRIVE_CMD_AEB = 0x31e + HDIO_DRIVE_CMD_HDR_SIZE = 0x4 + HDIO_DRIVE_HOB_HDR_SIZE = 0x8 + HDIO_DRIVE_RESET = 0x31c + HDIO_DRIVE_TASK = 0x31e + HDIO_DRIVE_TASKFILE = 0x31d + HDIO_DRIVE_TASK_HDR_SIZE = 0x8 + HDIO_GETGEO = 0x301 + HDIO_GET_32BIT = 0x309 + HDIO_GET_ACOUSTIC = 0x30f + HDIO_GET_ADDRESS = 0x310 + HDIO_GET_BUSSTATE = 0x31a + HDIO_GET_DMA = 0x30b + HDIO_GET_IDENTITY = 0x30d + HDIO_GET_KEEPSETTINGS = 0x308 + HDIO_GET_MULTCOUNT = 0x304 + HDIO_GET_NICE = 0x30c + HDIO_GET_NOWERR = 0x30a + HDIO_GET_QDMA = 0x305 + HDIO_GET_UNMASKINTR = 0x302 + HDIO_GET_WCACHE = 0x30e + HDIO_OBSOLETE_IDENTITY = 0x307 + HDIO_SCAN_HWIF = 0x328 + HDIO_SET_32BIT = 0x324 + HDIO_SET_ACOUSTIC = 0x32c + HDIO_SET_ADDRESS = 0x32f + HDIO_SET_BUSSTATE = 0x32d + HDIO_SET_DMA = 0x326 + HDIO_SET_KEEPSETTINGS = 0x323 + HDIO_SET_MULTCOUNT = 0x321 + HDIO_SET_NICE = 0x329 + HDIO_SET_NOWERR = 0x325 + HDIO_SET_PIO_MODE = 0x327 + HDIO_SET_QDMA = 0x32e + HDIO_SET_UNMASKINTR = 0x322 + HDIO_SET_WCACHE = 0x32b + HDIO_SET_XFER = 0x306 + HDIO_TRISTATE_HWIF = 0x31b + HDIO_UNREGISTER_HWIF = 0x32a HUPCL = 0x400 IBSHIFT = 0x10 ICANON = 0x2 @@ -1622,6 +1663,20 @@ const ( SIOCSPGRP = 0x8902 SIOCSRARP = 0x8962 SIOCWANDEV = 0x894a + SMART_AUTOSAVE = 0xd2 + SMART_AUTO_OFFLINE = 0xdb + SMART_DISABLE = 0xd9 + SMART_ENABLE = 0xd8 + SMART_HCYL_PASS = 0xc2 + SMART_IMMEDIATE_OFFLINE = 0xd4 + SMART_LCYL_PASS = 0x4f + SMART_READ_LOG_SECTOR = 0xd5 + SMART_READ_THRESHOLDS = 0xd1 + SMART_READ_VALUES = 0xd0 + SMART_SAVE = 0xd3 + SMART_STATUS = 0xda + SMART_WRITE_LOG_SECTOR = 0xd6 + SMART_WRITE_THRESHOLDS = 0xd7 SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x2 @@ -2006,6 +2061,86 @@ const ( WDIOC_SETPRETIMEOUT = 0xc0045708 WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 + WIN_ACKMEDIACHANGE = 0xdb + WIN_CHECKPOWERMODE1 = 0xe5 + WIN_CHECKPOWERMODE2 = 0x98 + WIN_DEVICE_RESET = 0x8 + WIN_DIAGNOSE = 0x90 + WIN_DOORLOCK = 0xde + WIN_DOORUNLOCK = 0xdf + WIN_DOWNLOAD_MICROCODE = 0x92 + WIN_FLUSH_CACHE = 0xe7 + WIN_FLUSH_CACHE_EXT = 0xea + WIN_FORMAT = 0x50 + WIN_GETMEDIASTATUS = 0xda + WIN_IDENTIFY = 0xec + WIN_IDENTIFY_DMA = 0xee + WIN_IDLEIMMEDIATE = 0xe1 + WIN_INIT = 0x60 + WIN_MEDIAEJECT = 0xed + WIN_MULTREAD = 0xc4 + WIN_MULTREAD_EXT = 0x29 + WIN_MULTWRITE = 0xc5 + WIN_MULTWRITE_EXT = 0x39 + WIN_NOP = 0x0 + WIN_PACKETCMD = 0xa0 + WIN_PIDENTIFY = 0xa1 + WIN_POSTBOOT = 0xdc + WIN_PREBOOT = 0xdd + WIN_QUEUED_SERVICE = 0xa2 + WIN_READ = 0x20 + WIN_READDMA = 0xc8 + WIN_READDMA_EXT = 0x25 + WIN_READDMA_ONCE = 0xc9 + WIN_READDMA_QUEUED = 0xc7 + WIN_READDMA_QUEUED_EXT = 0x26 + WIN_READ_BUFFER = 0xe4 + WIN_READ_EXT = 0x24 + WIN_READ_LONG = 0x22 + WIN_READ_LONG_ONCE = 0x23 + WIN_READ_NATIVE_MAX = 0xf8 + WIN_READ_NATIVE_MAX_EXT = 0x27 + WIN_READ_ONCE = 0x21 + WIN_RECAL = 0x10 + WIN_RESTORE = 0x10 + WIN_SECURITY_DISABLE = 0xf6 + WIN_SECURITY_ERASE_PREPARE = 0xf3 + WIN_SECURITY_ERASE_UNIT = 0xf4 + WIN_SECURITY_FREEZE_LOCK = 0xf5 + WIN_SECURITY_SET_PASS = 0xf1 + WIN_SECURITY_UNLOCK = 0xf2 + WIN_SEEK = 0x70 + WIN_SETFEATURES = 0xef + WIN_SETIDLE1 = 0xe3 + WIN_SETIDLE2 = 0x97 + WIN_SETMULT = 0xc6 + WIN_SET_MAX = 0xf9 + WIN_SET_MAX_EXT = 0x37 + WIN_SLEEPNOW1 = 0xe6 + WIN_SLEEPNOW2 = 0x99 + WIN_SMART = 0xb0 + WIN_SPECIFY = 0x91 + WIN_SRST = 0x8 + WIN_STANDBY = 0xe2 + WIN_STANDBY2 = 0x96 + WIN_STANDBYNOW1 = 0xe0 + WIN_STANDBYNOW2 = 0x94 + WIN_VERIFY = 0x40 + WIN_VERIFY_EXT = 0x42 + WIN_VERIFY_ONCE = 0x41 + WIN_WRITE = 0x30 + WIN_WRITEDMA = 0xca + WIN_WRITEDMA_EXT = 0x35 + WIN_WRITEDMA_ONCE = 0xcb + WIN_WRITEDMA_QUEUED = 0xcc + WIN_WRITEDMA_QUEUED_EXT = 0x36 + WIN_WRITE_BUFFER = 0xe8 + WIN_WRITE_EXT = 0x34 + WIN_WRITE_LONG = 0x32 + WIN_WRITE_LONG_ONCE = 0x33 + WIN_WRITE_ONCE = 0x31 + WIN_WRITE_SAME = 0xe9 + WIN_WRITE_VERIFY = 0x3c WNOHANG = 0x1 WNOTHREAD = 0x20000000 WNOWAIT = 0x1000000 @@ -2195,171 +2330,179 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "no such device or address", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource temporarily unavailable", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device or resource busy", - 17: "file exists", - 18: "invalid cross-device link", - 19: "no such device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "numerical result out of range", - 35: "resource deadlock avoided", - 36: "file name too long", - 37: "no locks available", - 38: "function not implemented", - 39: "directory not empty", - 40: "too many levels of symbolic links", - 42: "no message of desired type", - 43: "identifier removed", - 44: "channel number out of range", - 45: "level 2 not synchronized", - 46: "level 3 halted", - 47: "level 3 reset", - 48: "link number out of range", - 49: "protocol driver not attached", - 50: "no CSI structure available", - 51: "level 2 halted", - 52: "invalid exchange", - 53: "invalid request descriptor", - 54: "exchange full", - 55: "no anode", - 56: "invalid request code", - 57: "invalid slot", - 59: "bad font file format", - 60: "device not a stream", - 61: "no data available", - 62: "timer expired", - 63: "out of streams resources", - 64: "machine is not on the network", - 65: "package not installed", - 66: "object is remote", - 67: "link has been severed", - 68: "advertise error", - 69: "srmount error", - 70: "communication error on send", - 71: "protocol error", - 72: "multihop attempted", - 73: "RFS specific error", - 74: "bad message", - 75: "value too large for defined data type", - 76: "name not unique on network", - 77: "file descriptor in bad state", - 78: "remote address changed", - 79: "can not access a needed shared library", - 80: "accessing a corrupted shared library", - 81: ".lib section in a.out corrupted", - 82: "attempting to link in too many shared libraries", - 83: "cannot exec a shared library directly", - 84: "invalid or incomplete multibyte or wide character", - 85: "interrupted system call should be restarted", - 86: "streams pipe error", - 87: "too many users", - 88: "socket operation on non-socket", - 89: "destination address required", - 90: "message too long", - 91: "protocol wrong type for socket", - 92: "protocol not available", - 93: "protocol not supported", - 94: "socket type not supported", - 95: "operation not supported", - 96: "protocol family not supported", - 97: "address family not supported by protocol", - 98: "address already in use", - 99: "cannot assign requested address", - 100: "network is down", - 101: "network is unreachable", - 102: "network dropped connection on reset", - 103: "software caused connection abort", - 104: "connection reset by peer", - 105: "no buffer space available", - 106: "transport endpoint is already connected", - 107: "transport endpoint is not connected", - 108: "cannot send after transport endpoint shutdown", - 109: "too many references: cannot splice", - 110: "connection timed out", - 111: "connection refused", - 112: "host is down", - 113: "no route to host", - 114: "operation already in progress", - 115: "operation now in progress", - 116: "stale file handle", - 117: "structure needs cleaning", - 118: "not a XENIX named type file", - 119: "no XENIX semaphores available", - 120: "is a named type file", - 121: "remote I/O error", - 122: "disk quota exceeded", - 123: "no medium found", - 124: "wrong medium type", - 125: "operation canceled", - 126: "required key not available", - 127: "key has expired", - 128: "key has been revoked", - 129: "key was rejected by service", - 130: "owner died", - 131: "state not recoverable", - 132: "operation not possible due to RF-kill", - 133: "memory page has hardware error", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EAGAIN", "resource temporarily unavailable"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device or resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "invalid cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "numerical result out of range"}, + {35, "EDEADLK", "resource deadlock avoided"}, + {36, "ENAMETOOLONG", "file name too long"}, + {37, "ENOLCK", "no locks available"}, + {38, "ENOSYS", "function not implemented"}, + {39, "ENOTEMPTY", "directory not empty"}, + {40, "ELOOP", "too many levels of symbolic links"}, + {42, "ENOMSG", "no message of desired type"}, + {43, "EIDRM", "identifier removed"}, + {44, "ECHRNG", "channel number out of range"}, + {45, "EL2NSYNC", "level 2 not synchronized"}, + {46, "EL3HLT", "level 3 halted"}, + {47, "EL3RST", "level 3 reset"}, + {48, "ELNRNG", "link number out of range"}, + {49, "EUNATCH", "protocol driver not attached"}, + {50, "ENOCSI", "no CSI structure available"}, + {51, "EL2HLT", "level 2 halted"}, + {52, "EBADE", "invalid exchange"}, + {53, "EBADR", "invalid request descriptor"}, + {54, "EXFULL", "exchange full"}, + {55, "ENOANO", "no anode"}, + {56, "EBADRQC", "invalid request code"}, + {57, "EBADSLT", "invalid slot"}, + {59, "EBFONT", "bad font file format"}, + {60, "ENOSTR", "device not a stream"}, + {61, "ENODATA", "no data available"}, + {62, "ETIME", "timer expired"}, + {63, "ENOSR", "out of streams resources"}, + {64, "ENONET", "machine is not on the network"}, + {65, "ENOPKG", "package not installed"}, + {66, "EREMOTE", "object is remote"}, + {67, "ENOLINK", "link has been severed"}, + {68, "EADV", "advertise error"}, + {69, "ESRMNT", "srmount error"}, + {70, "ECOMM", "communication error on send"}, + {71, "EPROTO", "protocol error"}, + {72, "EMULTIHOP", "multihop attempted"}, + {73, "EDOTDOT", "RFS specific error"}, + {74, "EBADMSG", "bad message"}, + {75, "EOVERFLOW", "value too large for defined data type"}, + {76, "ENOTUNIQ", "name not unique on network"}, + {77, "EBADFD", "file descriptor in bad state"}, + {78, "EREMCHG", "remote address changed"}, + {79, "ELIBACC", "can not access a needed shared library"}, + {80, "ELIBBAD", "accessing a corrupted shared library"}, + {81, "ELIBSCN", ".lib section in a.out corrupted"}, + {82, "ELIBMAX", "attempting to link in too many shared libraries"}, + {83, "ELIBEXEC", "cannot exec a shared library directly"}, + {84, "EILSEQ", "invalid or incomplete multibyte or wide character"}, + {85, "ERESTART", "interrupted system call should be restarted"}, + {86, "ESTRPIPE", "streams pipe error"}, + {87, "EUSERS", "too many users"}, + {88, "ENOTSOCK", "socket operation on non-socket"}, + {89, "EDESTADDRREQ", "destination address required"}, + {90, "EMSGSIZE", "message too long"}, + {91, "EPROTOTYPE", "protocol wrong type for socket"}, + {92, "ENOPROTOOPT", "protocol not available"}, + {93, "EPROTONOSUPPORT", "protocol not supported"}, + {94, "ESOCKTNOSUPPORT", "socket type not supported"}, + {95, "ENOTSUP", "operation not supported"}, + {96, "EPFNOSUPPORT", "protocol family not supported"}, + {97, "EAFNOSUPPORT", "address family not supported by protocol"}, + {98, "EADDRINUSE", "address already in use"}, + {99, "EADDRNOTAVAIL", "cannot assign requested address"}, + {100, "ENETDOWN", "network is down"}, + {101, "ENETUNREACH", "network is unreachable"}, + {102, "ENETRESET", "network dropped connection on reset"}, + {103, "ECONNABORTED", "software caused connection abort"}, + {104, "ECONNRESET", "connection reset by peer"}, + {105, "ENOBUFS", "no buffer space available"}, + {106, "EISCONN", "transport endpoint is already connected"}, + {107, "ENOTCONN", "transport endpoint is not connected"}, + {108, "ESHUTDOWN", "cannot send after transport endpoint shutdown"}, + {109, "ETOOMANYREFS", "too many references: cannot splice"}, + {110, "ETIMEDOUT", "connection timed out"}, + {111, "ECONNREFUSED", "connection refused"}, + {112, "EHOSTDOWN", "host is down"}, + {113, "EHOSTUNREACH", "no route to host"}, + {114, "EALREADY", "operation already in progress"}, + {115, "EINPROGRESS", "operation now in progress"}, + {116, "ESTALE", "stale file handle"}, + {117, "EUCLEAN", "structure needs cleaning"}, + {118, "ENOTNAM", "not a XENIX named type file"}, + {119, "ENAVAIL", "no XENIX semaphores available"}, + {120, "EISNAM", "is a named type file"}, + {121, "EREMOTEIO", "remote I/O error"}, + {122, "EDQUOT", "disk quota exceeded"}, + {123, "ENOMEDIUM", "no medium found"}, + {124, "EMEDIUMTYPE", "wrong medium type"}, + {125, "ECANCELED", "operation canceled"}, + {126, "ENOKEY", "required key not available"}, + {127, "EKEYEXPIRED", "key has expired"}, + {128, "EKEYREVOKED", "key has been revoked"}, + {129, "EKEYREJECTED", "key was rejected by service"}, + {130, "EOWNERDEAD", "owner died"}, + {131, "ENOTRECOVERABLE", "state not recoverable"}, + {132, "ERFKILL", "operation not possible due to RF-kill"}, + {133, "EHWPOISON", "memory page has hardware error"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/breakpoint trap", - 6: "aborted", - 7: "bus error", - 8: "floating point exception", - 9: "killed", - 10: "user defined signal 1", - 11: "segmentation fault", - 12: "user defined signal 2", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "stack fault", - 17: "child exited", - 18: "continued", - 19: "stopped (signal)", - 20: "stopped", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "urgent I/O condition", - 24: "CPU time limit exceeded", - 25: "file size limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window changed", - 29: "I/O possible", - 30: "power failure", - 31: "bad system call", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/breakpoint trap"}, + {6, "SIGABRT", "aborted"}, + {7, "SIGBUS", "bus error"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGUSR1", "user defined signal 1"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGUSR2", "user defined signal 2"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGSTKFLT", "stack fault"}, + {17, "SIGCHLD", "child exited"}, + {18, "SIGCONT", "continued"}, + {19, "SIGSTOP", "stopped (signal)"}, + {20, "SIGTSTP", "stopped"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGURG", "urgent I/O condition"}, + {24, "SIGXCPU", "CPU time limit exceeded"}, + {25, "SIGXFSZ", "file size limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window changed"}, + {29, "SIGIO", "I/O possible"}, + {30, "SIGPWR", "power failure"}, + {31, "SIGSYS", "bad system call"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go index 3eef63f5a6..cd93ce0d85 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go @@ -1584,137 +1584,145 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "device not configured", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource deadlock avoided", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device busy", - 17: "file exists", - 18: "cross-device link", - 19: "operation not supported by device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "result too large or too small", - 35: "resource temporarily unavailable", - 36: "operation now in progress", - 37: "operation already in progress", - 38: "socket operation on non-socket", - 39: "destination address required", - 40: "message too long", - 41: "protocol wrong type for socket", - 42: "protocol option not available", - 43: "protocol not supported", - 44: "socket type not supported", - 45: "operation not supported", - 46: "protocol family not supported", - 47: "address family not supported by protocol family", - 48: "address already in use", - 49: "can't assign requested address", - 50: "network is down", - 51: "network is unreachable", - 52: "network dropped connection on reset", - 53: "software caused connection abort", - 54: "connection reset by peer", - 55: "no buffer space available", - 56: "socket is already connected", - 57: "socket is not connected", - 58: "can't send after socket shutdown", - 59: "too many references: can't splice", - 60: "connection timed out", - 61: "connection refused", - 62: "too many levels of symbolic links", - 63: "file name too long", - 64: "host is down", - 65: "no route to host", - 66: "directory not empty", - 67: "too many processes", - 68: "too many users", - 69: "disc quota exceeded", - 70: "stale NFS file handle", - 71: "too many levels of remote in path", - 72: "RPC struct is bad", - 73: "RPC version wrong", - 74: "RPC prog. not avail", - 75: "program version wrong", - 76: "bad procedure for program", - 77: "no locks available", - 78: "function not implemented", - 79: "inappropriate file type or format", - 80: "authentication error", - 81: "need authenticator", - 82: "identifier removed", - 83: "no message of desired type", - 84: "value too large to be stored in data type", - 85: "illegal byte sequence", - 86: "not supported", - 87: "operation Canceled", - 88: "bad or Corrupt message", - 89: "no message available", - 90: "no STREAM resources", - 91: "not a STREAM", - 92: "STREAM ioctl timeout", - 93: "attribute not found", - 94: "multihop attempted", - 95: "link has been severed", - 96: "protocol error", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large or too small"}, + {35, "EAGAIN", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol option not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "EOPNOTSUPP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "connection timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disc quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC prog. not avail"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EIDRM", "identifier removed"}, + {83, "ENOMSG", "no message of desired type"}, + {84, "EOVERFLOW", "value too large to be stored in data type"}, + {85, "EILSEQ", "illegal byte sequence"}, + {86, "ENOTSUP", "not supported"}, + {87, "ECANCELED", "operation Canceled"}, + {88, "EBADMSG", "bad or Corrupt message"}, + {89, "ENODATA", "no message available"}, + {90, "ENOSR", "no STREAM resources"}, + {91, "ENOSTR", "not a STREAM"}, + {92, "ETIME", "STREAM ioctl timeout"}, + {93, "ENOATTR", "attribute not found"}, + {94, "EMULTIHOP", "multihop attempted"}, + {95, "ENOLINK", "link has been severed"}, + {96, "ELAST", "protocol error"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/BPT trap", - 6: "abort trap", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "urgent I/O condition", - 17: "stopped (signal)", - 18: "stopped", - 19: "continued", - 20: "child exited", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "I/O possible", - 24: "cputime limit exceeded", - 25: "filesize limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window size changes", - 29: "information request", - 30: "user defined signal 1", - 31: "user defined signal 2", - 32: "power fail/restart", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGIOT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "stopped (signal)"}, + {18, "SIGTSTP", "stopped"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, + {32, "SIGPWR", "power fail/restart"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go index 40c870be15..071701c411 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go @@ -1574,137 +1574,145 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "device not configured", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource deadlock avoided", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device busy", - 17: "file exists", - 18: "cross-device link", - 19: "operation not supported by device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "result too large or too small", - 35: "resource temporarily unavailable", - 36: "operation now in progress", - 37: "operation already in progress", - 38: "socket operation on non-socket", - 39: "destination address required", - 40: "message too long", - 41: "protocol wrong type for socket", - 42: "protocol option not available", - 43: "protocol not supported", - 44: "socket type not supported", - 45: "operation not supported", - 46: "protocol family not supported", - 47: "address family not supported by protocol family", - 48: "address already in use", - 49: "can't assign requested address", - 50: "network is down", - 51: "network is unreachable", - 52: "network dropped connection on reset", - 53: "software caused connection abort", - 54: "connection reset by peer", - 55: "no buffer space available", - 56: "socket is already connected", - 57: "socket is not connected", - 58: "can't send after socket shutdown", - 59: "too many references: can't splice", - 60: "connection timed out", - 61: "connection refused", - 62: "too many levels of symbolic links", - 63: "file name too long", - 64: "host is down", - 65: "no route to host", - 66: "directory not empty", - 67: "too many processes", - 68: "too many users", - 69: "disc quota exceeded", - 70: "stale NFS file handle", - 71: "too many levels of remote in path", - 72: "RPC struct is bad", - 73: "RPC version wrong", - 74: "RPC prog. not avail", - 75: "program version wrong", - 76: "bad procedure for program", - 77: "no locks available", - 78: "function not implemented", - 79: "inappropriate file type or format", - 80: "authentication error", - 81: "need authenticator", - 82: "identifier removed", - 83: "no message of desired type", - 84: "value too large to be stored in data type", - 85: "illegal byte sequence", - 86: "not supported", - 87: "operation Canceled", - 88: "bad or Corrupt message", - 89: "no message available", - 90: "no STREAM resources", - 91: "not a STREAM", - 92: "STREAM ioctl timeout", - 93: "attribute not found", - 94: "multihop attempted", - 95: "link has been severed", - 96: "protocol error", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large or too small"}, + {35, "EAGAIN", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol option not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "EOPNOTSUPP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "connection timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disc quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC prog. not avail"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EIDRM", "identifier removed"}, + {83, "ENOMSG", "no message of desired type"}, + {84, "EOVERFLOW", "value too large to be stored in data type"}, + {85, "EILSEQ", "illegal byte sequence"}, + {86, "ENOTSUP", "not supported"}, + {87, "ECANCELED", "operation Canceled"}, + {88, "EBADMSG", "bad or Corrupt message"}, + {89, "ENODATA", "no message available"}, + {90, "ENOSR", "no STREAM resources"}, + {91, "ENOSTR", "not a STREAM"}, + {92, "ETIME", "STREAM ioctl timeout"}, + {93, "ENOATTR", "attribute not found"}, + {94, "EMULTIHOP", "multihop attempted"}, + {95, "ENOLINK", "link has been severed"}, + {96, "ELAST", "protocol error"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/BPT trap", - 6: "abort trap", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "urgent I/O condition", - 17: "stopped (signal)", - 18: "stopped", - 19: "continued", - 20: "child exited", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "I/O possible", - 24: "cputime limit exceeded", - 25: "filesize limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window size changes", - 29: "information request", - 30: "user defined signal 1", - 31: "user defined signal 2", - 32: "power fail/restart", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGIOT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "stopped (signal)"}, + {18, "SIGTSTP", "stopped"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, + {32, "SIGPWR", "power fail/restart"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go index 43c4add50c..5fe56ae8c7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go @@ -1563,137 +1563,145 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "device not configured", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource deadlock avoided", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device busy", - 17: "file exists", - 18: "cross-device link", - 19: "operation not supported by device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "result too large or too small", - 35: "resource temporarily unavailable", - 36: "operation now in progress", - 37: "operation already in progress", - 38: "socket operation on non-socket", - 39: "destination address required", - 40: "message too long", - 41: "protocol wrong type for socket", - 42: "protocol option not available", - 43: "protocol not supported", - 44: "socket type not supported", - 45: "operation not supported", - 46: "protocol family not supported", - 47: "address family not supported by protocol family", - 48: "address already in use", - 49: "can't assign requested address", - 50: "network is down", - 51: "network is unreachable", - 52: "network dropped connection on reset", - 53: "software caused connection abort", - 54: "connection reset by peer", - 55: "no buffer space available", - 56: "socket is already connected", - 57: "socket is not connected", - 58: "can't send after socket shutdown", - 59: "too many references: can't splice", - 60: "connection timed out", - 61: "connection refused", - 62: "too many levels of symbolic links", - 63: "file name too long", - 64: "host is down", - 65: "no route to host", - 66: "directory not empty", - 67: "too many processes", - 68: "too many users", - 69: "disc quota exceeded", - 70: "stale NFS file handle", - 71: "too many levels of remote in path", - 72: "RPC struct is bad", - 73: "RPC version wrong", - 74: "RPC prog. not avail", - 75: "program version wrong", - 76: "bad procedure for program", - 77: "no locks available", - 78: "function not implemented", - 79: "inappropriate file type or format", - 80: "authentication error", - 81: "need authenticator", - 82: "identifier removed", - 83: "no message of desired type", - 84: "value too large to be stored in data type", - 85: "illegal byte sequence", - 86: "not supported", - 87: "operation Canceled", - 88: "bad or Corrupt message", - 89: "no message available", - 90: "no STREAM resources", - 91: "not a STREAM", - 92: "STREAM ioctl timeout", - 93: "attribute not found", - 94: "multihop attempted", - 95: "link has been severed", - 96: "protocol error", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large or too small"}, + {35, "EAGAIN", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol option not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "EOPNOTSUPP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "connection timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disc quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC prog. not avail"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EIDRM", "identifier removed"}, + {83, "ENOMSG", "no message of desired type"}, + {84, "EOVERFLOW", "value too large to be stored in data type"}, + {85, "EILSEQ", "illegal byte sequence"}, + {86, "ENOTSUP", "not supported"}, + {87, "ECANCELED", "operation Canceled"}, + {88, "EBADMSG", "bad or Corrupt message"}, + {89, "ENODATA", "no message available"}, + {90, "ENOSR", "no STREAM resources"}, + {91, "ENOSTR", "not a STREAM"}, + {92, "ETIME", "STREAM ioctl timeout"}, + {93, "ENOATTR", "attribute not found"}, + {94, "EMULTIHOP", "multihop attempted"}, + {95, "ENOLINK", "link has been severed"}, + {96, "ELAST", "protocol error"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/BPT trap", - 6: "abort trap", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "urgent I/O condition", - 17: "stopped (signal)", - 18: "stopped", - 19: "continued", - 20: "child exited", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "I/O possible", - 24: "cputime limit exceeded", - 25: "filesize limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window size changes", - 29: "information request", - 30: "user defined signal 1", - 31: "user defined signal 2", - 32: "power fail/restart", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGIOT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "stopped (signal)"}, + {18, "SIGTSTP", "stopped"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, + {32, "SIGPWR", "power fail/restart"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go index f47536dc43..0a1c3e7e8c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go @@ -1461,132 +1461,140 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "device not configured", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource deadlock avoided", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device busy", - 17: "file exists", - 18: "cross-device link", - 19: "operation not supported by device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "result too large", - 35: "resource temporarily unavailable", - 36: "operation now in progress", - 37: "operation already in progress", - 38: "socket operation on non-socket", - 39: "destination address required", - 40: "message too long", - 41: "protocol wrong type for socket", - 42: "protocol not available", - 43: "protocol not supported", - 44: "socket type not supported", - 45: "operation not supported", - 46: "protocol family not supported", - 47: "address family not supported by protocol family", - 48: "address already in use", - 49: "can't assign requested address", - 50: "network is down", - 51: "network is unreachable", - 52: "network dropped connection on reset", - 53: "software caused connection abort", - 54: "connection reset by peer", - 55: "no buffer space available", - 56: "socket is already connected", - 57: "socket is not connected", - 58: "can't send after socket shutdown", - 59: "too many references: can't splice", - 60: "connection timed out", - 61: "connection refused", - 62: "too many levels of symbolic links", - 63: "file name too long", - 64: "host is down", - 65: "no route to host", - 66: "directory not empty", - 67: "too many processes", - 68: "too many users", - 69: "disc quota exceeded", - 70: "stale NFS file handle", - 71: "too many levels of remote in path", - 72: "RPC struct is bad", - 73: "RPC version wrong", - 74: "RPC prog. not avail", - 75: "program version wrong", - 76: "bad procedure for program", - 77: "no locks available", - 78: "function not implemented", - 79: "inappropriate file type or format", - 80: "authentication error", - 81: "need authenticator", - 82: "IPsec processing failure", - 83: "attribute not found", - 84: "illegal byte sequence", - 85: "no medium found", - 86: "wrong medium type", - 87: "value too large to be stored in data type", - 88: "operation canceled", - 89: "identifier removed", - 90: "no message of desired type", - 91: "not supported", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "EWOULDBLOCK", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "EOPNOTSUPP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "operation timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disk quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC program not available"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EIPSEC", "IPsec processing failure"}, + {83, "ENOATTR", "attribute not found"}, + {84, "EILSEQ", "illegal byte sequence"}, + {85, "ENOMEDIUM", "no medium found"}, + {86, "EMEDIUMTYPE", "wrong medium type"}, + {87, "EOVERFLOW", "value too large to be stored in data type"}, + {88, "ECANCELED", "operation canceled"}, + {89, "EIDRM", "identifier removed"}, + {90, "ENOMSG", "no message of desired type"}, + {91, "ELAST", "not supported"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/BPT trap", - 6: "abort trap", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "urgent I/O condition", - 17: "stopped (signal)", - 18: "stopped", - 19: "continued", - 20: "child exited", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "I/O possible", - 24: "cputime limit exceeded", - 25: "filesize limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window size changes", - 29: "information request", - 30: "user defined signal 1", - 31: "user defined signal 2", - 32: "thread AST", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGABRT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "suspended (signal)"}, + {18, "SIGTSTP", "suspended"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, + {32, "SIGTHR", "thread AST"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go index c96ca653ac..0d50f6b093 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go @@ -1460,132 +1460,140 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "device not configured", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource deadlock avoided", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device busy", - 17: "file exists", - 18: "cross-device link", - 19: "operation not supported by device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "result too large", - 35: "resource temporarily unavailable", - 36: "operation now in progress", - 37: "operation already in progress", - 38: "socket operation on non-socket", - 39: "destination address required", - 40: "message too long", - 41: "protocol wrong type for socket", - 42: "protocol not available", - 43: "protocol not supported", - 44: "socket type not supported", - 45: "operation not supported", - 46: "protocol family not supported", - 47: "address family not supported by protocol family", - 48: "address already in use", - 49: "can't assign requested address", - 50: "network is down", - 51: "network is unreachable", - 52: "network dropped connection on reset", - 53: "software caused connection abort", - 54: "connection reset by peer", - 55: "no buffer space available", - 56: "socket is already connected", - 57: "socket is not connected", - 58: "can't send after socket shutdown", - 59: "too many references: can't splice", - 60: "connection timed out", - 61: "connection refused", - 62: "too many levels of symbolic links", - 63: "file name too long", - 64: "host is down", - 65: "no route to host", - 66: "directory not empty", - 67: "too many processes", - 68: "too many users", - 69: "disc quota exceeded", - 70: "stale NFS file handle", - 71: "too many levels of remote in path", - 72: "RPC struct is bad", - 73: "RPC version wrong", - 74: "RPC prog. not avail", - 75: "program version wrong", - 76: "bad procedure for program", - 77: "no locks available", - 78: "function not implemented", - 79: "inappropriate file type or format", - 80: "authentication error", - 81: "need authenticator", - 82: "IPsec processing failure", - 83: "attribute not found", - 84: "illegal byte sequence", - 85: "no medium found", - 86: "wrong medium type", - 87: "value too large to be stored in data type", - 88: "operation canceled", - 89: "identifier removed", - 90: "no message of desired type", - 91: "not supported", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "EWOULDBLOCK", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "EOPNOTSUPP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "operation timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disk quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC program not available"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EIPSEC", "IPsec processing failure"}, + {83, "ENOATTR", "attribute not found"}, + {84, "EILSEQ", "illegal byte sequence"}, + {85, "ENOMEDIUM", "no medium found"}, + {86, "EMEDIUMTYPE", "wrong medium type"}, + {87, "EOVERFLOW", "value too large to be stored in data type"}, + {88, "ECANCELED", "operation canceled"}, + {89, "EIDRM", "identifier removed"}, + {90, "ENOMSG", "no message of desired type"}, + {91, "ELAST", "not supported"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/BPT trap", - 6: "abort trap", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "urgent I/O condition", - 17: "stopped (signal)", - 18: "stopped", - 19: "continued", - 20: "child exited", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "I/O possible", - 24: "cputime limit exceeded", - 25: "filesize limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window size changes", - 29: "information request", - 30: "user defined signal 1", - 31: "user defined signal 2", - 32: "thread AST", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGABRT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "suspended (signal)"}, + {18, "SIGTSTP", "suspended"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, + {32, "SIGTHR", "thread AST"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go index 4c027352e9..93e37c4b28 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go @@ -1463,132 +1463,140 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "device not configured", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource deadlock avoided", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device busy", - 17: "file exists", - 18: "cross-device link", - 19: "operation not supported by device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "result too large", - 35: "resource temporarily unavailable", - 36: "operation now in progress", - 37: "operation already in progress", - 38: "socket operation on non-socket", - 39: "destination address required", - 40: "message too long", - 41: "protocol wrong type for socket", - 42: "protocol not available", - 43: "protocol not supported", - 44: "socket type not supported", - 45: "operation not supported", - 46: "protocol family not supported", - 47: "address family not supported by protocol family", - 48: "address already in use", - 49: "can't assign requested address", - 50: "network is down", - 51: "network is unreachable", - 52: "network dropped connection on reset", - 53: "software caused connection abort", - 54: "connection reset by peer", - 55: "no buffer space available", - 56: "socket is already connected", - 57: "socket is not connected", - 58: "can't send after socket shutdown", - 59: "too many references: can't splice", - 60: "connection timed out", - 61: "connection refused", - 62: "too many levels of symbolic links", - 63: "file name too long", - 64: "host is down", - 65: "no route to host", - 66: "directory not empty", - 67: "too many processes", - 68: "too many users", - 69: "disc quota exceeded", - 70: "stale NFS file handle", - 71: "too many levels of remote in path", - 72: "RPC struct is bad", - 73: "RPC version wrong", - 74: "RPC prog. not avail", - 75: "program version wrong", - 76: "bad procedure for program", - 77: "no locks available", - 78: "function not implemented", - 79: "inappropriate file type or format", - 80: "authentication error", - 81: "need authenticator", - 82: "IPsec processing failure", - 83: "attribute not found", - 84: "illegal byte sequence", - 85: "no medium found", - 86: "wrong medium type", - 87: "value too large to be stored in data type", - 88: "operation canceled", - 89: "identifier removed", - 90: "no message of desired type", - 91: "not supported", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "EWOULDBLOCK", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "EOPNOTSUPP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "operation timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disk quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC program not available"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EIPSEC", "IPsec processing failure"}, + {83, "ENOATTR", "attribute not found"}, + {84, "EILSEQ", "illegal byte sequence"}, + {85, "ENOMEDIUM", "no medium found"}, + {86, "EMEDIUMTYPE", "wrong medium type"}, + {87, "EOVERFLOW", "value too large to be stored in data type"}, + {88, "ECANCELED", "operation canceled"}, + {89, "EIDRM", "identifier removed"}, + {90, "ENOMSG", "no message of desired type"}, + {91, "ELAST", "not supported"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/BPT trap", - 6: "abort trap", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "urgent I/O condition", - 17: "stopped (signal)", - 18: "stopped", - 19: "continued", - 20: "child exited", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "I/O possible", - 24: "cputime limit exceeded", - 25: "filesize limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window size changes", - 29: "information request", - 30: "user defined signal 1", - 31: "user defined signal 2", - 32: "thread AST", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGABRT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "suspended (signal)"}, + {18, "SIGTSTP", "suspended"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, + {32, "SIGTHR", "thread AST"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go index 09eedb0093..be42830cf3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go @@ -1319,171 +1319,179 @@ const ( ) // Error table -var errors = [...]string{ - 1: "not owner", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "I/O error", - 6: "no such device or address", - 7: "arg list too long", - 8: "exec format error", - 9: "bad file number", - 10: "no child processes", - 11: "resource temporarily unavailable", - 12: "not enough space", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device busy", - 17: "file exists", - 18: "cross-device link", - 19: "no such device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "file table overflow", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "argument out of domain", - 34: "result too large", - 35: "no message of desired type", - 36: "identifier removed", - 37: "channel number out of range", - 38: "level 2 not synchronized", - 39: "level 3 halted", - 40: "level 3 reset", - 41: "link number out of range", - 42: "protocol driver not attached", - 43: "no CSI structure available", - 44: "level 2 halted", - 45: "deadlock situation detected/avoided", - 46: "no record locks available", - 47: "operation canceled", - 48: "operation not supported", - 49: "disc quota exceeded", - 50: "bad exchange descriptor", - 51: "bad request descriptor", - 52: "message tables full", - 53: "anode table overflow", - 54: "bad request code", - 55: "invalid slot", - 56: "file locking deadlock", - 57: "bad font file format", - 58: "owner of the lock died", - 59: "lock is not recoverable", - 60: "not a stream device", - 61: "no data available", - 62: "timer expired", - 63: "out of stream resources", - 64: "machine is not on the network", - 65: "package not installed", - 66: "object is remote", - 67: "link has been severed", - 68: "advertise error", - 69: "srmount error", - 70: "communication error on send", - 71: "protocol error", - 72: "locked lock was unmapped ", - 73: "facility is not active", - 74: "multihop attempted", - 77: "not a data message", - 78: "file name too long", - 79: "value too large for defined data type", - 80: "name not unique on network", - 81: "file descriptor in bad state", - 82: "remote address changed", - 83: "can not access a needed shared library", - 84: "accessing a corrupted shared library", - 85: ".lib section in a.out corrupted", - 86: "attempting to link in more shared libraries than system limit", - 87: "can not exec a shared library directly", - 88: "illegal byte sequence", - 89: "operation not applicable", - 90: "number of symbolic links encountered during path name traversal exceeds MAXSYMLINKS", - 91: "error 91", - 92: "error 92", - 93: "directory not empty", - 94: "too many users", - 95: "socket operation on non-socket", - 96: "destination address required", - 97: "message too long", - 98: "protocol wrong type for socket", - 99: "option not supported by protocol", - 120: "protocol not supported", - 121: "socket type not supported", - 122: "operation not supported on transport endpoint", - 123: "protocol family not supported", - 124: "address family not supported by protocol family", - 125: "address already in use", - 126: "cannot assign requested address", - 127: "network is down", - 128: "network is unreachable", - 129: "network dropped connection because of reset", - 130: "software caused connection abort", - 131: "connection reset by peer", - 132: "no buffer space available", - 133: "transport endpoint is already connected", - 134: "transport endpoint is not connected", - 143: "cannot send after socket shutdown", - 144: "too many references: cannot splice", - 145: "connection timed out", - 146: "connection refused", - 147: "host is down", - 148: "no route to host", - 149: "operation already in progress", - 150: "operation now in progress", - 151: "stale NFS file handle", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "not owner"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "I/O error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "arg list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file number"}, + {10, "ECHILD", "no child processes"}, + {11, "EAGAIN", "resource temporarily unavailable"}, + {12, "ENOMEM", "not enough space"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "file table overflow"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "ENOMSG", "no message of desired type"}, + {36, "EIDRM", "identifier removed"}, + {37, "ECHRNG", "channel number out of range"}, + {38, "EL2NSYNC", "level 2 not synchronized"}, + {39, "EL3HLT", "level 3 halted"}, + {40, "EL3RST", "level 3 reset"}, + {41, "ELNRNG", "link number out of range"}, + {42, "EUNATCH", "protocol driver not attached"}, + {43, "ENOCSI", "no CSI structure available"}, + {44, "EL2HLT", "level 2 halted"}, + {45, "EDEADLK", "deadlock situation detected/avoided"}, + {46, "ENOLCK", "no record locks available"}, + {47, "ECANCELED", "operation canceled"}, + {48, "ENOTSUP", "operation not supported"}, + {49, "EDQUOT", "disc quota exceeded"}, + {50, "EBADE", "bad exchange descriptor"}, + {51, "EBADR", "bad request descriptor"}, + {52, "EXFULL", "message tables full"}, + {53, "ENOANO", "anode table overflow"}, + {54, "EBADRQC", "bad request code"}, + {55, "EBADSLT", "invalid slot"}, + {56, "EDEADLOCK", "file locking deadlock"}, + {57, "EBFONT", "bad font file format"}, + {58, "EOWNERDEAD", "owner of the lock died"}, + {59, "ENOTRECOVERABLE", "lock is not recoverable"}, + {60, "ENOSTR", "not a stream device"}, + {61, "ENODATA", "no data available"}, + {62, "ETIME", "timer expired"}, + {63, "ENOSR", "out of stream resources"}, + {64, "ENONET", "machine is not on the network"}, + {65, "ENOPKG", "package not installed"}, + {66, "EREMOTE", "object is remote"}, + {67, "ENOLINK", "link has been severed"}, + {68, "EADV", "advertise error"}, + {69, "ESRMNT", "srmount error"}, + {70, "ECOMM", "communication error on send"}, + {71, "EPROTO", "protocol error"}, + {72, "ELOCKUNMAPPED", "locked lock was unmapped "}, + {73, "ENOTACTIVE", "facility is not active"}, + {74, "EMULTIHOP", "multihop attempted"}, + {77, "EBADMSG", "not a data message"}, + {78, "ENAMETOOLONG", "file name too long"}, + {79, "EOVERFLOW", "value too large for defined data type"}, + {80, "ENOTUNIQ", "name not unique on network"}, + {81, "EBADFD", "file descriptor in bad state"}, + {82, "EREMCHG", "remote address changed"}, + {83, "ELIBACC", "can not access a needed shared library"}, + {84, "ELIBBAD", "accessing a corrupted shared library"}, + {85, "ELIBSCN", ".lib section in a.out corrupted"}, + {86, "ELIBMAX", "attempting to link in more shared libraries than system limit"}, + {87, "ELIBEXEC", "can not exec a shared library directly"}, + {88, "EILSEQ", "illegal byte sequence"}, + {89, "ENOSYS", "operation not applicable"}, + {90, "ELOOP", "number of symbolic links encountered during path name traversal exceeds MAXSYMLINKS"}, + {91, "ERESTART", "error 91"}, + {92, "ESTRPIPE", "error 92"}, + {93, "ENOTEMPTY", "directory not empty"}, + {94, "EUSERS", "too many users"}, + {95, "ENOTSOCK", "socket operation on non-socket"}, + {96, "EDESTADDRREQ", "destination address required"}, + {97, "EMSGSIZE", "message too long"}, + {98, "EPROTOTYPE", "protocol wrong type for socket"}, + {99, "ENOPROTOOPT", "option not supported by protocol"}, + {120, "EPROTONOSUPPORT", "protocol not supported"}, + {121, "ESOCKTNOSUPPORT", "socket type not supported"}, + {122, "EOPNOTSUPP", "operation not supported on transport endpoint"}, + {123, "EPFNOSUPPORT", "protocol family not supported"}, + {124, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {125, "EADDRINUSE", "address already in use"}, + {126, "EADDRNOTAVAIL", "cannot assign requested address"}, + {127, "ENETDOWN", "network is down"}, + {128, "ENETUNREACH", "network is unreachable"}, + {129, "ENETRESET", "network dropped connection because of reset"}, + {130, "ECONNABORTED", "software caused connection abort"}, + {131, "ECONNRESET", "connection reset by peer"}, + {132, "ENOBUFS", "no buffer space available"}, + {133, "EISCONN", "transport endpoint is already connected"}, + {134, "ENOTCONN", "transport endpoint is not connected"}, + {143, "ESHUTDOWN", "cannot send after socket shutdown"}, + {144, "ETOOMANYREFS", "too many references: cannot splice"}, + {145, "ETIMEDOUT", "connection timed out"}, + {146, "ECONNREFUSED", "connection refused"}, + {147, "EHOSTDOWN", "host is down"}, + {148, "EHOSTUNREACH", "no route to host"}, + {149, "EALREADY", "operation already in progress"}, + {150, "EINPROGRESS", "operation now in progress"}, + {151, "ESTALE", "stale NFS file handle"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal Instruction", - 5: "trace/Breakpoint Trap", - 6: "abort", - 7: "emulation Trap", - 8: "arithmetic Exception", - 9: "killed", - 10: "bus Error", - 11: "segmentation Fault", - 12: "bad System Call", - 13: "broken Pipe", - 14: "alarm Clock", - 15: "terminated", - 16: "user Signal 1", - 17: "user Signal 2", - 18: "child Status Changed", - 19: "power-Fail/Restart", - 20: "window Size Change", - 21: "urgent Socket Condition", - 22: "pollable Event", - 23: "stopped (signal)", - 24: "stopped (user)", - 25: "continued", - 26: "stopped (tty input)", - 27: "stopped (tty output)", - 28: "virtual Timer Expired", - 29: "profiling Timer Expired", - 30: "cpu Limit Exceeded", - 31: "file Size Limit Exceeded", - 32: "no runnable lwp", - 33: "inter-lwp signal", - 34: "checkpoint Freeze", - 35: "checkpoint Thaw", - 36: "thread Cancellation", - 37: "resource Lost", - 38: "resource Control Exceeded", - 39: "reserved for JVM 1", - 40: "reserved for JVM 2", - 41: "information Request", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal Instruction"}, + {5, "SIGTRAP", "trace/Breakpoint Trap"}, + {6, "SIGABRT", "abort"}, + {7, "SIGEMT", "emulation Trap"}, + {8, "SIGFPE", "arithmetic Exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus Error"}, + {11, "SIGSEGV", "segmentation Fault"}, + {12, "SIGSYS", "bad System Call"}, + {13, "SIGPIPE", "broken Pipe"}, + {14, "SIGALRM", "alarm Clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGUSR1", "user Signal 1"}, + {17, "SIGUSR2", "user Signal 2"}, + {18, "SIGCHLD", "child Status Changed"}, + {19, "SIGPWR", "power-Fail/Restart"}, + {20, "SIGWINCH", "window Size Change"}, + {21, "SIGURG", "urgent Socket Condition"}, + {22, "SIGIO", "pollable Event"}, + {23, "SIGSTOP", "stopped (signal)"}, + {24, "SIGTSTP", "stopped (user)"}, + {25, "SIGCONT", "continued"}, + {26, "SIGTTIN", "stopped (tty input)"}, + {27, "SIGTTOU", "stopped (tty output)"}, + {28, "SIGVTALRM", "virtual Timer Expired"}, + {29, "SIGPROF", "profiling Timer Expired"}, + {30, "SIGXCPU", "cpu Limit Exceeded"}, + {31, "SIGXFSZ", "file Size Limit Exceeded"}, + {32, "SIGWAITING", "no runnable lwp"}, + {33, "SIGLWP", "inter-lwp signal"}, + {34, "SIGFREEZE", "checkpoint Freeze"}, + {35, "SIGTHAW", "checkpoint Thaw"}, + {36, "SIGCANCEL", "thread Cancellation"}, + {37, "SIGLOST", "resource Lost"}, + {38, "SIGXRES", "resource Control Exceeded"}, + {39, "SIGJVM1", "reserved for JVM 1"}, + {40, "SIGJVM2", "reserved for JVM 2"}, + {41, "SIGINFO", "information Request"}, } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index ef9602c1eb..433becfd02 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -995,6 +995,17 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 63054b3585..33c02b2695 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -995,6 +995,17 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) @@ -1784,17 +1795,6 @@ func Seek(fd int, offset int64, whence int) (off int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) written = int(r0) @@ -1897,21 +1897,6 @@ func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n i // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Stat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Statfs(path string, buf *Statfs_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index 8b10ee1445..f91b56c217 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -995,6 +995,17 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index 8c9e26a0a1..52d7595250 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -995,6 +995,17 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 8dc2b58f58..970a5c132d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -995,6 +995,17 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) @@ -1747,9 +1758,9 @@ func Shutdown(fd int, how int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { - r0, r1, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) - n = int64(int64(r0)<<32 | int64(r1)) +func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index e8beef850c..b989d0f282 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -995,6 +995,17 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index 899e4403a9..1f8d14cacc 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -995,6 +995,17 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index 7a477cbdea..a9c7e520e4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -995,6 +995,17 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) @@ -1747,9 +1758,9 @@ func Shutdown(fd int, how int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { - r0, r1, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) - n = int64(int64(r1)<<32 | int64(r0)) +func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index 9dc4c7d6d4..3bb9a20992 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -995,6 +995,17 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index f0d1ee125c..56116623d6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -995,6 +995,17 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index c443baf63f..9696a0199d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -995,6 +995,17 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 5c09c07587..49b3b5e8a4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -780,6 +780,17 @@ func Getrlimit(which int, lim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getrtable() (rtable int, err error) { + r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0) + rtable = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getrusage(who int, rusage *Rusage) (err error) { _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { @@ -1254,6 +1265,16 @@ func Setrlimit(which int, lim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Setrtable(rtable int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 54ccc935d4..c4c7d8540c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -780,6 +780,17 @@ func Getrlimit(which int, lim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getrtable() (rtable int, err error) { + r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0) + rtable = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getrusage(who int, rusage *Rusage) (err error) { _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { @@ -1254,6 +1265,16 @@ func Setrlimit(which int, lim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Setrtable(rtable int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 59258b0a45..210285b0ba 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -780,6 +780,17 @@ func Getrlimit(which int, lim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getrtable() (rtable int, err error) { + r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0) + rtable = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getrusage(who int, rusage *Rusage) (err error) { _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { @@ -1254,6 +1265,16 @@ func Setrlimit(which int, lim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Setrtable(rtable int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 3c56b20762..a805313f2c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -96,24 +96,24 @@ type Rlimit struct { type _Gid_t uint32 type Stat_t struct { - Dev uint64 - X__pad1 uint16 - _ [2]byte - X__st_ino uint32 - Mode uint32 - Nlink uint32 - Uid uint32 - Gid uint32 - Rdev uint64 - X__pad2 uint16 - _ [2]byte - Size int64 - Blksize int32 - Blocks int64 - Atim Timespec - Mtim Timespec - Ctim Timespec - Ino uint64 + Dev uint64 + _ uint16 + _ [2]byte + _ uint32 + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint64 + _ uint16 + _ [2]byte + Size int64 + Blksize int32 + Blocks int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Ino uint64 } type Statfs_t struct { @@ -132,9 +132,9 @@ type Statfs_t struct { } type StatxTimestamp struct { - Sec int64 - Nsec uint32 - X__reserved int32 + Sec int64 + Nsec uint32 + _ int32 } type Statx_t struct { @@ -171,7 +171,7 @@ type Dirent struct { } type Fsid struct { - X__val [2]int32 + Val [2]int32 } type Flock_t struct { @@ -583,12 +583,12 @@ type RtAttr struct { } type IfInfomsg struct { - Family uint8 - X__ifi_pad uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 + Family uint8 + _ uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 } type IfAddrmsg struct { @@ -683,7 +683,7 @@ type Sysinfo_t struct { Totalhigh uint32 Freehigh uint32 Unit uint32 - X_f [8]int8 + _ [8]int8 } type Utsname struct { @@ -739,7 +739,7 @@ const ( ) type Sigset_t struct { - X__val [32]uint32 + Val [32]uint32 } const RNDGETENTCNT = 0x80045200 @@ -895,3 +895,352 @@ const ( BDADDR_LE_PUBLIC = 0x1 BDADDR_LE_RANDOM = 0x2 ) + +type PerfEventAttr struct { + Type uint32 + Size uint32 + Config uint64 + Sample uint64 + Sample_type uint64 + Read_format uint64 + Bits uint64 + Wakeup uint32 + Bp_type uint32 + Ext1 uint64 + Ext2 uint64 + Branch_sample_type uint64 + Sample_regs_user uint64 + Sample_stack_user uint32 + Clockid int32 + Sample_regs_intr uint64 + Aux_watermark uint32 + _ uint32 +} + +type PerfEventMmapPage struct { + Version uint32 + Compat_version uint32 + Lock uint32 + Index uint32 + Offset int64 + Time_enabled uint64 + Time_running uint64 + Capabilities uint64 + Pmc_width uint16 + Time_shift uint16 + Time_mult uint32 + Time_offset uint64 + Time_zero uint64 + Size uint32 + _ [948]uint8 + Data_head uint64 + Data_tail uint64 + Data_offset uint64 + Data_size uint64 + Aux_head uint64 + Aux_tail uint64 + Aux_offset uint64 + Aux_size uint64 +} + +const ( + PerfBitDisabled uint64 = CBitFieldMaskBit0 + PerfBitInherit = CBitFieldMaskBit1 + PerfBitPinned = CBitFieldMaskBit2 + PerfBitExclusive = CBitFieldMaskBit3 + PerfBitExcludeUser = CBitFieldMaskBit4 + PerfBitExcludeKernel = CBitFieldMaskBit5 + PerfBitExcludeHv = CBitFieldMaskBit6 + PerfBitExcludeIdle = CBitFieldMaskBit7 + PerfBitMmap = CBitFieldMaskBit8 + PerfBitComm = CBitFieldMaskBit9 + PerfBitFreq = CBitFieldMaskBit10 + PerfBitInheritStat = CBitFieldMaskBit11 + PerfBitEnableOnExec = CBitFieldMaskBit12 + PerfBitTask = CBitFieldMaskBit13 + PerfBitWatermark = CBitFieldMaskBit14 + PerfBitPreciseIPBit1 = CBitFieldMaskBit15 + PerfBitPreciseIPBit2 = CBitFieldMaskBit16 + PerfBitMmapData = CBitFieldMaskBit17 + PerfBitSampleIDAll = CBitFieldMaskBit18 + PerfBitExcludeHost = CBitFieldMaskBit19 + PerfBitExcludeGuest = CBitFieldMaskBit20 + PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 + PerfBitExcludeCallchainUser = CBitFieldMaskBit22 + PerfBitMmap2 = CBitFieldMaskBit23 + PerfBitCommExec = CBitFieldMaskBit24 + PerfBitUseClockID = CBitFieldMaskBit25 + PerfBitContextSwitch = CBitFieldMaskBit26 +) + +const ( + PERF_TYPE_HARDWARE = 0x0 + PERF_TYPE_SOFTWARE = 0x1 + PERF_TYPE_TRACEPOINT = 0x2 + PERF_TYPE_HW_CACHE = 0x3 + PERF_TYPE_RAW = 0x4 + PERF_TYPE_BREAKPOINT = 0x5 + + PERF_COUNT_HW_CPU_CYCLES = 0x0 + PERF_COUNT_HW_INSTRUCTIONS = 0x1 + PERF_COUNT_HW_CACHE_REFERENCES = 0x2 + PERF_COUNT_HW_CACHE_MISSES = 0x3 + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 + PERF_COUNT_HW_BRANCH_MISSES = 0x5 + PERF_COUNT_HW_BUS_CYCLES = 0x6 + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 + PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 + + PERF_COUNT_HW_CACHE_L1D = 0x0 + PERF_COUNT_HW_CACHE_L1I = 0x1 + PERF_COUNT_HW_CACHE_LL = 0x2 + PERF_COUNT_HW_CACHE_DTLB = 0x3 + PERF_COUNT_HW_CACHE_ITLB = 0x4 + PERF_COUNT_HW_CACHE_BPU = 0x5 + PERF_COUNT_HW_CACHE_NODE = 0x6 + + PERF_COUNT_HW_CACHE_OP_READ = 0x0 + PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 + PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 + + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 + PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 + + PERF_COUNT_SW_CPU_CLOCK = 0x0 + PERF_COUNT_SW_TASK_CLOCK = 0x1 + PERF_COUNT_SW_PAGE_FAULTS = 0x2 + PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 + PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 + PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 + PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 + PERF_COUNT_SW_EMULATION_FAULTS = 0x8 + PERF_COUNT_SW_DUMMY = 0x9 + + PERF_SAMPLE_IP = 0x1 + PERF_SAMPLE_TID = 0x2 + PERF_SAMPLE_TIME = 0x4 + PERF_SAMPLE_ADDR = 0x8 + PERF_SAMPLE_READ = 0x10 + PERF_SAMPLE_CALLCHAIN = 0x20 + PERF_SAMPLE_ID = 0x40 + PERF_SAMPLE_CPU = 0x80 + PERF_SAMPLE_PERIOD = 0x100 + PERF_SAMPLE_STREAM_ID = 0x200 + PERF_SAMPLE_RAW = 0x400 + PERF_SAMPLE_BRANCH_STACK = 0x800 + + PERF_SAMPLE_BRANCH_USER = 0x1 + PERF_SAMPLE_BRANCH_KERNEL = 0x2 + PERF_SAMPLE_BRANCH_HV = 0x4 + PERF_SAMPLE_BRANCH_ANY = 0x8 + PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 + PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 + PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + + PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 + PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 + PERF_FORMAT_ID = 0x4 + PERF_FORMAT_GROUP = 0x8 + + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + + PERF_CONTEXT_HV = -0x20 + PERF_CONTEXT_KERNEL = -0x80 + PERF_CONTEXT_USER = -0x200 + + PERF_CONTEXT_GUEST = -0x800 + PERF_CONTEXT_GUEST_KERNEL = -0x880 + PERF_CONTEXT_GUEST_USER = -0xa00 + + PERF_FLAG_FD_NO_GROUP = 0x1 + PERF_FLAG_FD_OUTPUT = 0x2 + PERF_FLAG_PID_CGROUP = 0x4 +) + +const ( + CBitFieldMaskBit0 = 0x1 + CBitFieldMaskBit1 = 0x2 + CBitFieldMaskBit2 = 0x4 + CBitFieldMaskBit3 = 0x8 + CBitFieldMaskBit4 = 0x10 + CBitFieldMaskBit5 = 0x20 + CBitFieldMaskBit6 = 0x40 + CBitFieldMaskBit7 = 0x80 + CBitFieldMaskBit8 = 0x100 + CBitFieldMaskBit9 = 0x200 + CBitFieldMaskBit10 = 0x400 + CBitFieldMaskBit11 = 0x800 + CBitFieldMaskBit12 = 0x1000 + CBitFieldMaskBit13 = 0x2000 + CBitFieldMaskBit14 = 0x4000 + CBitFieldMaskBit15 = 0x8000 + CBitFieldMaskBit16 = 0x10000 + CBitFieldMaskBit17 = 0x20000 + CBitFieldMaskBit18 = 0x40000 + CBitFieldMaskBit19 = 0x80000 + CBitFieldMaskBit20 = 0x100000 + CBitFieldMaskBit21 = 0x200000 + CBitFieldMaskBit22 = 0x400000 + CBitFieldMaskBit23 = 0x800000 + CBitFieldMaskBit24 = 0x1000000 + CBitFieldMaskBit25 = 0x2000000 + CBitFieldMaskBit26 = 0x4000000 + CBitFieldMaskBit27 = 0x8000000 + CBitFieldMaskBit28 = 0x10000000 + CBitFieldMaskBit29 = 0x20000000 + CBitFieldMaskBit30 = 0x40000000 + CBitFieldMaskBit31 = 0x80000000 + CBitFieldMaskBit32 = 0x100000000 + CBitFieldMaskBit33 = 0x200000000 + CBitFieldMaskBit34 = 0x400000000 + CBitFieldMaskBit35 = 0x800000000 + CBitFieldMaskBit36 = 0x1000000000 + CBitFieldMaskBit37 = 0x2000000000 + CBitFieldMaskBit38 = 0x4000000000 + CBitFieldMaskBit39 = 0x8000000000 + CBitFieldMaskBit40 = 0x10000000000 + CBitFieldMaskBit41 = 0x20000000000 + CBitFieldMaskBit42 = 0x40000000000 + CBitFieldMaskBit43 = 0x80000000000 + CBitFieldMaskBit44 = 0x100000000000 + CBitFieldMaskBit45 = 0x200000000000 + CBitFieldMaskBit46 = 0x400000000000 + CBitFieldMaskBit47 = 0x800000000000 + CBitFieldMaskBit48 = 0x1000000000000 + CBitFieldMaskBit49 = 0x2000000000000 + CBitFieldMaskBit50 = 0x4000000000000 + CBitFieldMaskBit51 = 0x8000000000000 + CBitFieldMaskBit52 = 0x10000000000000 + CBitFieldMaskBit53 = 0x20000000000000 + CBitFieldMaskBit54 = 0x40000000000000 + CBitFieldMaskBit55 = 0x80000000000000 + CBitFieldMaskBit56 = 0x100000000000000 + CBitFieldMaskBit57 = 0x200000000000000 + CBitFieldMaskBit58 = 0x400000000000000 + CBitFieldMaskBit59 = 0x800000000000000 + CBitFieldMaskBit60 = 0x1000000000000000 + CBitFieldMaskBit61 = 0x2000000000000000 + CBitFieldMaskBit62 = 0x4000000000000000 + CBitFieldMaskBit63 = 0x8000000000000000 +) + +type SockaddrStorage struct { + Family uint16 + _ [122]int8 + _ uint32 +} + +type TCPMD5Sig struct { + Addr SockaddrStorage + Flags uint8 + Prefixlen uint8 + Keylen uint16 + _ uint32 + Key [80]uint8 +} + +type HDDriveCmdHdr struct { + Command uint8 + Number uint8 + Feature uint8 + Count uint8 +} + +type HDGeometry struct { + Heads uint8 + Sectors uint8 + Cylinders uint16 + Start uint32 +} + +type HDDriveID struct { + Config uint16 + Cyls uint16 + Reserved2 uint16 + Heads uint16 + Track_bytes uint16 + Sector_bytes uint16 + Sectors uint16 + Vendor0 uint16 + Vendor1 uint16 + Vendor2 uint16 + Serial_no [20]uint8 + Buf_type uint16 + Buf_size uint16 + Ecc_bytes uint16 + Fw_rev [8]uint8 + Model [40]uint8 + Max_multsect uint8 + Vendor3 uint8 + Dword_io uint16 + Vendor4 uint8 + Capability uint8 + Reserved50 uint16 + Vendor5 uint8 + TPIO uint8 + Vendor6 uint8 + TDMA uint8 + Field_valid uint16 + Cur_cyls uint16 + Cur_heads uint16 + Cur_sectors uint16 + Cur_capacity0 uint16 + Cur_capacity1 uint16 + Multsect uint8 + Multsect_valid uint8 + Lba_capacity uint32 + Dma_1word uint16 + Dma_mword uint16 + Eide_pio_modes uint16 + Eide_dma_min uint16 + Eide_dma_time uint16 + Eide_pio uint16 + Eide_pio_iordy uint16 + Words69_70 [2]uint16 + Words71_74 [4]uint16 + Queue_depth uint16 + Words76_79 [4]uint16 + Major_rev_num uint16 + Minor_rev_num uint16 + Command_set_1 uint16 + Command_set_2 uint16 + Cfsse uint16 + Cfs_enable_1 uint16 + Cfs_enable_2 uint16 + Csf_default uint16 + Dma_ultra uint16 + Trseuc uint16 + TrsEuc uint16 + CurAPMvalues uint16 + Mprc uint16 + Hw_config uint16 + Acoustic uint16 + Msrqs uint16 + Sxfert uint16 + Sal uint16 + Spg uint32 + Lba_capacity_2 uint64 + Words104_125 [22]uint16 + Last_lun uint16 + Word127 uint16 + Dlf uint16 + Csfo uint16 + Words130_155 [26]uint16 + Word156 uint16 + Words157_159 [3]uint16 + Cfa_power uint16 + Words161_175 [15]uint16 + Words176_205 [30]uint16 + Words206_254 [49]uint16 + Integrity_word uint16 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 92f1c8fe4d..98aa92371b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -105,7 +105,7 @@ type Stat_t struct { Mode uint32 Uid uint32 Gid uint32 - X__pad0 int32 + _ int32 Rdev uint64 Size int64 Blksize int64 @@ -132,9 +132,9 @@ type Statfs_t struct { } type StatxTimestamp struct { - Sec int64 - Nsec uint32 - X__reserved int32 + Sec int64 + Nsec uint32 + _ int32 } type Statx_t struct { @@ -171,7 +171,7 @@ type Dirent struct { } type Fsid struct { - X__val [2]int32 + Val [2]int32 } type Flock_t struct { @@ -587,12 +587,12 @@ type RtAttr struct { } type IfInfomsg struct { - Family uint8 - X__ifi_pad uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 + Family uint8 + _ uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 } type IfAddrmsg struct { @@ -698,7 +698,7 @@ type Sysinfo_t struct { Totalhigh uint64 Freehigh uint64 Unit uint32 - X_f [0]int8 + _ [0]int8 _ [4]byte } @@ -757,7 +757,7 @@ const ( ) type Sigset_t struct { - X__val [16]uint64 + Val [16]uint64 } const RNDGETENTCNT = 0x80045200 @@ -913,3 +913,353 @@ const ( BDADDR_LE_PUBLIC = 0x1 BDADDR_LE_RANDOM = 0x2 ) + +type PerfEventAttr struct { + Type uint32 + Size uint32 + Config uint64 + Sample uint64 + Sample_type uint64 + Read_format uint64 + Bits uint64 + Wakeup uint32 + Bp_type uint32 + Ext1 uint64 + Ext2 uint64 + Branch_sample_type uint64 + Sample_regs_user uint64 + Sample_stack_user uint32 + Clockid int32 + Sample_regs_intr uint64 + Aux_watermark uint32 + _ uint32 +} + +type PerfEventMmapPage struct { + Version uint32 + Compat_version uint32 + Lock uint32 + Index uint32 + Offset int64 + Time_enabled uint64 + Time_running uint64 + Capabilities uint64 + Pmc_width uint16 + Time_shift uint16 + Time_mult uint32 + Time_offset uint64 + Time_zero uint64 + Size uint32 + _ [948]uint8 + Data_head uint64 + Data_tail uint64 + Data_offset uint64 + Data_size uint64 + Aux_head uint64 + Aux_tail uint64 + Aux_offset uint64 + Aux_size uint64 +} + +const ( + PerfBitDisabled uint64 = CBitFieldMaskBit0 + PerfBitInherit = CBitFieldMaskBit1 + PerfBitPinned = CBitFieldMaskBit2 + PerfBitExclusive = CBitFieldMaskBit3 + PerfBitExcludeUser = CBitFieldMaskBit4 + PerfBitExcludeKernel = CBitFieldMaskBit5 + PerfBitExcludeHv = CBitFieldMaskBit6 + PerfBitExcludeIdle = CBitFieldMaskBit7 + PerfBitMmap = CBitFieldMaskBit8 + PerfBitComm = CBitFieldMaskBit9 + PerfBitFreq = CBitFieldMaskBit10 + PerfBitInheritStat = CBitFieldMaskBit11 + PerfBitEnableOnExec = CBitFieldMaskBit12 + PerfBitTask = CBitFieldMaskBit13 + PerfBitWatermark = CBitFieldMaskBit14 + PerfBitPreciseIPBit1 = CBitFieldMaskBit15 + PerfBitPreciseIPBit2 = CBitFieldMaskBit16 + PerfBitMmapData = CBitFieldMaskBit17 + PerfBitSampleIDAll = CBitFieldMaskBit18 + PerfBitExcludeHost = CBitFieldMaskBit19 + PerfBitExcludeGuest = CBitFieldMaskBit20 + PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 + PerfBitExcludeCallchainUser = CBitFieldMaskBit22 + PerfBitMmap2 = CBitFieldMaskBit23 + PerfBitCommExec = CBitFieldMaskBit24 + PerfBitUseClockID = CBitFieldMaskBit25 + PerfBitContextSwitch = CBitFieldMaskBit26 +) + +const ( + PERF_TYPE_HARDWARE = 0x0 + PERF_TYPE_SOFTWARE = 0x1 + PERF_TYPE_TRACEPOINT = 0x2 + PERF_TYPE_HW_CACHE = 0x3 + PERF_TYPE_RAW = 0x4 + PERF_TYPE_BREAKPOINT = 0x5 + + PERF_COUNT_HW_CPU_CYCLES = 0x0 + PERF_COUNT_HW_INSTRUCTIONS = 0x1 + PERF_COUNT_HW_CACHE_REFERENCES = 0x2 + PERF_COUNT_HW_CACHE_MISSES = 0x3 + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 + PERF_COUNT_HW_BRANCH_MISSES = 0x5 + PERF_COUNT_HW_BUS_CYCLES = 0x6 + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 + PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 + + PERF_COUNT_HW_CACHE_L1D = 0x0 + PERF_COUNT_HW_CACHE_L1I = 0x1 + PERF_COUNT_HW_CACHE_LL = 0x2 + PERF_COUNT_HW_CACHE_DTLB = 0x3 + PERF_COUNT_HW_CACHE_ITLB = 0x4 + PERF_COUNT_HW_CACHE_BPU = 0x5 + PERF_COUNT_HW_CACHE_NODE = 0x6 + + PERF_COUNT_HW_CACHE_OP_READ = 0x0 + PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 + PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 + + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 + PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 + + PERF_COUNT_SW_CPU_CLOCK = 0x0 + PERF_COUNT_SW_TASK_CLOCK = 0x1 + PERF_COUNT_SW_PAGE_FAULTS = 0x2 + PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 + PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 + PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 + PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 + PERF_COUNT_SW_EMULATION_FAULTS = 0x8 + PERF_COUNT_SW_DUMMY = 0x9 + + PERF_SAMPLE_IP = 0x1 + PERF_SAMPLE_TID = 0x2 + PERF_SAMPLE_TIME = 0x4 + PERF_SAMPLE_ADDR = 0x8 + PERF_SAMPLE_READ = 0x10 + PERF_SAMPLE_CALLCHAIN = 0x20 + PERF_SAMPLE_ID = 0x40 + PERF_SAMPLE_CPU = 0x80 + PERF_SAMPLE_PERIOD = 0x100 + PERF_SAMPLE_STREAM_ID = 0x200 + PERF_SAMPLE_RAW = 0x400 + PERF_SAMPLE_BRANCH_STACK = 0x800 + + PERF_SAMPLE_BRANCH_USER = 0x1 + PERF_SAMPLE_BRANCH_KERNEL = 0x2 + PERF_SAMPLE_BRANCH_HV = 0x4 + PERF_SAMPLE_BRANCH_ANY = 0x8 + PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 + PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 + PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + + PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 + PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 + PERF_FORMAT_ID = 0x4 + PERF_FORMAT_GROUP = 0x8 + + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + + PERF_CONTEXT_HV = -0x20 + PERF_CONTEXT_KERNEL = -0x80 + PERF_CONTEXT_USER = -0x200 + + PERF_CONTEXT_GUEST = -0x800 + PERF_CONTEXT_GUEST_KERNEL = -0x880 + PERF_CONTEXT_GUEST_USER = -0xa00 + + PERF_FLAG_FD_NO_GROUP = 0x1 + PERF_FLAG_FD_OUTPUT = 0x2 + PERF_FLAG_PID_CGROUP = 0x4 +) + +const ( + CBitFieldMaskBit0 = 0x1 + CBitFieldMaskBit1 = 0x2 + CBitFieldMaskBit2 = 0x4 + CBitFieldMaskBit3 = 0x8 + CBitFieldMaskBit4 = 0x10 + CBitFieldMaskBit5 = 0x20 + CBitFieldMaskBit6 = 0x40 + CBitFieldMaskBit7 = 0x80 + CBitFieldMaskBit8 = 0x100 + CBitFieldMaskBit9 = 0x200 + CBitFieldMaskBit10 = 0x400 + CBitFieldMaskBit11 = 0x800 + CBitFieldMaskBit12 = 0x1000 + CBitFieldMaskBit13 = 0x2000 + CBitFieldMaskBit14 = 0x4000 + CBitFieldMaskBit15 = 0x8000 + CBitFieldMaskBit16 = 0x10000 + CBitFieldMaskBit17 = 0x20000 + CBitFieldMaskBit18 = 0x40000 + CBitFieldMaskBit19 = 0x80000 + CBitFieldMaskBit20 = 0x100000 + CBitFieldMaskBit21 = 0x200000 + CBitFieldMaskBit22 = 0x400000 + CBitFieldMaskBit23 = 0x800000 + CBitFieldMaskBit24 = 0x1000000 + CBitFieldMaskBit25 = 0x2000000 + CBitFieldMaskBit26 = 0x4000000 + CBitFieldMaskBit27 = 0x8000000 + CBitFieldMaskBit28 = 0x10000000 + CBitFieldMaskBit29 = 0x20000000 + CBitFieldMaskBit30 = 0x40000000 + CBitFieldMaskBit31 = 0x80000000 + CBitFieldMaskBit32 = 0x100000000 + CBitFieldMaskBit33 = 0x200000000 + CBitFieldMaskBit34 = 0x400000000 + CBitFieldMaskBit35 = 0x800000000 + CBitFieldMaskBit36 = 0x1000000000 + CBitFieldMaskBit37 = 0x2000000000 + CBitFieldMaskBit38 = 0x4000000000 + CBitFieldMaskBit39 = 0x8000000000 + CBitFieldMaskBit40 = 0x10000000000 + CBitFieldMaskBit41 = 0x20000000000 + CBitFieldMaskBit42 = 0x40000000000 + CBitFieldMaskBit43 = 0x80000000000 + CBitFieldMaskBit44 = 0x100000000000 + CBitFieldMaskBit45 = 0x200000000000 + CBitFieldMaskBit46 = 0x400000000000 + CBitFieldMaskBit47 = 0x800000000000 + CBitFieldMaskBit48 = 0x1000000000000 + CBitFieldMaskBit49 = 0x2000000000000 + CBitFieldMaskBit50 = 0x4000000000000 + CBitFieldMaskBit51 = 0x8000000000000 + CBitFieldMaskBit52 = 0x10000000000000 + CBitFieldMaskBit53 = 0x20000000000000 + CBitFieldMaskBit54 = 0x40000000000000 + CBitFieldMaskBit55 = 0x80000000000000 + CBitFieldMaskBit56 = 0x100000000000000 + CBitFieldMaskBit57 = 0x200000000000000 + CBitFieldMaskBit58 = 0x400000000000000 + CBitFieldMaskBit59 = 0x800000000000000 + CBitFieldMaskBit60 = 0x1000000000000000 + CBitFieldMaskBit61 = 0x2000000000000000 + CBitFieldMaskBit62 = 0x4000000000000000 + CBitFieldMaskBit63 = 0x8000000000000000 +) + +type SockaddrStorage struct { + Family uint16 + _ [118]int8 + _ uint64 +} + +type TCPMD5Sig struct { + Addr SockaddrStorage + Flags uint8 + Prefixlen uint8 + Keylen uint16 + _ uint32 + Key [80]uint8 +} + +type HDDriveCmdHdr struct { + Command uint8 + Number uint8 + Feature uint8 + Count uint8 +} + +type HDGeometry struct { + Heads uint8 + Sectors uint8 + Cylinders uint16 + _ [4]byte + Start uint64 +} + +type HDDriveID struct { + Config uint16 + Cyls uint16 + Reserved2 uint16 + Heads uint16 + Track_bytes uint16 + Sector_bytes uint16 + Sectors uint16 + Vendor0 uint16 + Vendor1 uint16 + Vendor2 uint16 + Serial_no [20]uint8 + Buf_type uint16 + Buf_size uint16 + Ecc_bytes uint16 + Fw_rev [8]uint8 + Model [40]uint8 + Max_multsect uint8 + Vendor3 uint8 + Dword_io uint16 + Vendor4 uint8 + Capability uint8 + Reserved50 uint16 + Vendor5 uint8 + TPIO uint8 + Vendor6 uint8 + TDMA uint8 + Field_valid uint16 + Cur_cyls uint16 + Cur_heads uint16 + Cur_sectors uint16 + Cur_capacity0 uint16 + Cur_capacity1 uint16 + Multsect uint8 + Multsect_valid uint8 + Lba_capacity uint32 + Dma_1word uint16 + Dma_mword uint16 + Eide_pio_modes uint16 + Eide_dma_min uint16 + Eide_dma_time uint16 + Eide_pio uint16 + Eide_pio_iordy uint16 + Words69_70 [2]uint16 + Words71_74 [4]uint16 + Queue_depth uint16 + Words76_79 [4]uint16 + Major_rev_num uint16 + Minor_rev_num uint16 + Command_set_1 uint16 + Command_set_2 uint16 + Cfsse uint16 + Cfs_enable_1 uint16 + Cfs_enable_2 uint16 + Csf_default uint16 + Dma_ultra uint16 + Trseuc uint16 + TrsEuc uint16 + CurAPMvalues uint16 + Mprc uint16 + Hw_config uint16 + Acoustic uint16 + Msrqs uint16 + Sxfert uint16 + Sal uint16 + Spg uint32 + Lba_capacity_2 uint64 + Words104_125 [22]uint16 + Last_lun uint16 + Word127 uint16 + Dlf uint16 + Csfo uint16 + Words130_155 [26]uint16 + Word156 uint16 + Words157_159 [3]uint16 + Cfa_power uint16 + Words161_175 [15]uint16 + Words176_205 [30]uint16 + Words206_254 [49]uint16 + Integrity_word uint16 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index cee5459ac1..ead8f252ec 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -96,25 +96,25 @@ type Rlimit struct { type _Gid_t uint32 type Stat_t struct { - Dev uint64 - X__pad1 uint16 - _ [2]byte - X__st_ino uint32 - Mode uint32 - Nlink uint32 - Uid uint32 - Gid uint32 - Rdev uint64 - X__pad2 uint16 - _ [6]byte - Size int64 - Blksize int32 - _ [4]byte - Blocks int64 - Atim Timespec - Mtim Timespec - Ctim Timespec - Ino uint64 + Dev uint64 + _ uint16 + _ [2]byte + _ uint32 + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint64 + _ uint16 + _ [6]byte + Size int64 + Blksize int32 + _ [4]byte + Blocks int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Ino uint64 } type Statfs_t struct { @@ -134,9 +134,9 @@ type Statfs_t struct { } type StatxTimestamp struct { - Sec int64 - Nsec uint32 - X__reserved int32 + Sec int64 + Nsec uint32 + _ int32 } type Statx_t struct { @@ -173,7 +173,7 @@ type Dirent struct { } type Fsid struct { - X__val [2]int32 + Val [2]int32 } type Flock_t struct { @@ -587,12 +587,12 @@ type RtAttr struct { } type IfInfomsg struct { - Family uint8 - X__ifi_pad uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 + Family uint8 + _ uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 } type IfAddrmsg struct { @@ -671,7 +671,7 @@ type Sysinfo_t struct { Totalhigh uint32 Freehigh uint32 Unit uint32 - X_f [8]uint8 + _ [8]uint8 } type Utsname struct { @@ -728,7 +728,7 @@ const ( ) type Sigset_t struct { - X__val [32]uint32 + Val [32]uint32 } const RNDGETENTCNT = 0x80045200 @@ -884,3 +884,352 @@ const ( BDADDR_LE_PUBLIC = 0x1 BDADDR_LE_RANDOM = 0x2 ) + +type PerfEventAttr struct { + Type uint32 + Size uint32 + Config uint64 + Sample uint64 + Sample_type uint64 + Read_format uint64 + Bits uint64 + Wakeup uint32 + Bp_type uint32 + Ext1 uint64 + Ext2 uint64 + Branch_sample_type uint64 + Sample_regs_user uint64 + Sample_stack_user uint32 + Clockid int32 + Sample_regs_intr uint64 + Aux_watermark uint32 + _ uint32 +} + +type PerfEventMmapPage struct { + Version uint32 + Compat_version uint32 + Lock uint32 + Index uint32 + Offset int64 + Time_enabled uint64 + Time_running uint64 + Capabilities uint64 + Pmc_width uint16 + Time_shift uint16 + Time_mult uint32 + Time_offset uint64 + Time_zero uint64 + Size uint32 + _ [948]uint8 + Data_head uint64 + Data_tail uint64 + Data_offset uint64 + Data_size uint64 + Aux_head uint64 + Aux_tail uint64 + Aux_offset uint64 + Aux_size uint64 +} + +const ( + PerfBitDisabled uint64 = CBitFieldMaskBit0 + PerfBitInherit = CBitFieldMaskBit1 + PerfBitPinned = CBitFieldMaskBit2 + PerfBitExclusive = CBitFieldMaskBit3 + PerfBitExcludeUser = CBitFieldMaskBit4 + PerfBitExcludeKernel = CBitFieldMaskBit5 + PerfBitExcludeHv = CBitFieldMaskBit6 + PerfBitExcludeIdle = CBitFieldMaskBit7 + PerfBitMmap = CBitFieldMaskBit8 + PerfBitComm = CBitFieldMaskBit9 + PerfBitFreq = CBitFieldMaskBit10 + PerfBitInheritStat = CBitFieldMaskBit11 + PerfBitEnableOnExec = CBitFieldMaskBit12 + PerfBitTask = CBitFieldMaskBit13 + PerfBitWatermark = CBitFieldMaskBit14 + PerfBitPreciseIPBit1 = CBitFieldMaskBit15 + PerfBitPreciseIPBit2 = CBitFieldMaskBit16 + PerfBitMmapData = CBitFieldMaskBit17 + PerfBitSampleIDAll = CBitFieldMaskBit18 + PerfBitExcludeHost = CBitFieldMaskBit19 + PerfBitExcludeGuest = CBitFieldMaskBit20 + PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 + PerfBitExcludeCallchainUser = CBitFieldMaskBit22 + PerfBitMmap2 = CBitFieldMaskBit23 + PerfBitCommExec = CBitFieldMaskBit24 + PerfBitUseClockID = CBitFieldMaskBit25 + PerfBitContextSwitch = CBitFieldMaskBit26 +) + +const ( + PERF_TYPE_HARDWARE = 0x0 + PERF_TYPE_SOFTWARE = 0x1 + PERF_TYPE_TRACEPOINT = 0x2 + PERF_TYPE_HW_CACHE = 0x3 + PERF_TYPE_RAW = 0x4 + PERF_TYPE_BREAKPOINT = 0x5 + + PERF_COUNT_HW_CPU_CYCLES = 0x0 + PERF_COUNT_HW_INSTRUCTIONS = 0x1 + PERF_COUNT_HW_CACHE_REFERENCES = 0x2 + PERF_COUNT_HW_CACHE_MISSES = 0x3 + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 + PERF_COUNT_HW_BRANCH_MISSES = 0x5 + PERF_COUNT_HW_BUS_CYCLES = 0x6 + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 + PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 + + PERF_COUNT_HW_CACHE_L1D = 0x0 + PERF_COUNT_HW_CACHE_L1I = 0x1 + PERF_COUNT_HW_CACHE_LL = 0x2 + PERF_COUNT_HW_CACHE_DTLB = 0x3 + PERF_COUNT_HW_CACHE_ITLB = 0x4 + PERF_COUNT_HW_CACHE_BPU = 0x5 + PERF_COUNT_HW_CACHE_NODE = 0x6 + + PERF_COUNT_HW_CACHE_OP_READ = 0x0 + PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 + PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 + + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 + PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 + + PERF_COUNT_SW_CPU_CLOCK = 0x0 + PERF_COUNT_SW_TASK_CLOCK = 0x1 + PERF_COUNT_SW_PAGE_FAULTS = 0x2 + PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 + PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 + PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 + PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 + PERF_COUNT_SW_EMULATION_FAULTS = 0x8 + PERF_COUNT_SW_DUMMY = 0x9 + + PERF_SAMPLE_IP = 0x1 + PERF_SAMPLE_TID = 0x2 + PERF_SAMPLE_TIME = 0x4 + PERF_SAMPLE_ADDR = 0x8 + PERF_SAMPLE_READ = 0x10 + PERF_SAMPLE_CALLCHAIN = 0x20 + PERF_SAMPLE_ID = 0x40 + PERF_SAMPLE_CPU = 0x80 + PERF_SAMPLE_PERIOD = 0x100 + PERF_SAMPLE_STREAM_ID = 0x200 + PERF_SAMPLE_RAW = 0x400 + PERF_SAMPLE_BRANCH_STACK = 0x800 + + PERF_SAMPLE_BRANCH_USER = 0x1 + PERF_SAMPLE_BRANCH_KERNEL = 0x2 + PERF_SAMPLE_BRANCH_HV = 0x4 + PERF_SAMPLE_BRANCH_ANY = 0x8 + PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 + PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 + PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + + PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 + PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 + PERF_FORMAT_ID = 0x4 + PERF_FORMAT_GROUP = 0x8 + + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + + PERF_CONTEXT_HV = -0x20 + PERF_CONTEXT_KERNEL = -0x80 + PERF_CONTEXT_USER = -0x200 + + PERF_CONTEXT_GUEST = -0x800 + PERF_CONTEXT_GUEST_KERNEL = -0x880 + PERF_CONTEXT_GUEST_USER = -0xa00 + + PERF_FLAG_FD_NO_GROUP = 0x1 + PERF_FLAG_FD_OUTPUT = 0x2 + PERF_FLAG_PID_CGROUP = 0x4 +) + +const ( + CBitFieldMaskBit0 = 0x1 + CBitFieldMaskBit1 = 0x2 + CBitFieldMaskBit2 = 0x4 + CBitFieldMaskBit3 = 0x8 + CBitFieldMaskBit4 = 0x10 + CBitFieldMaskBit5 = 0x20 + CBitFieldMaskBit6 = 0x40 + CBitFieldMaskBit7 = 0x80 + CBitFieldMaskBit8 = 0x100 + CBitFieldMaskBit9 = 0x200 + CBitFieldMaskBit10 = 0x400 + CBitFieldMaskBit11 = 0x800 + CBitFieldMaskBit12 = 0x1000 + CBitFieldMaskBit13 = 0x2000 + CBitFieldMaskBit14 = 0x4000 + CBitFieldMaskBit15 = 0x8000 + CBitFieldMaskBit16 = 0x10000 + CBitFieldMaskBit17 = 0x20000 + CBitFieldMaskBit18 = 0x40000 + CBitFieldMaskBit19 = 0x80000 + CBitFieldMaskBit20 = 0x100000 + CBitFieldMaskBit21 = 0x200000 + CBitFieldMaskBit22 = 0x400000 + CBitFieldMaskBit23 = 0x800000 + CBitFieldMaskBit24 = 0x1000000 + CBitFieldMaskBit25 = 0x2000000 + CBitFieldMaskBit26 = 0x4000000 + CBitFieldMaskBit27 = 0x8000000 + CBitFieldMaskBit28 = 0x10000000 + CBitFieldMaskBit29 = 0x20000000 + CBitFieldMaskBit30 = 0x40000000 + CBitFieldMaskBit31 = 0x80000000 + CBitFieldMaskBit32 = 0x100000000 + CBitFieldMaskBit33 = 0x200000000 + CBitFieldMaskBit34 = 0x400000000 + CBitFieldMaskBit35 = 0x800000000 + CBitFieldMaskBit36 = 0x1000000000 + CBitFieldMaskBit37 = 0x2000000000 + CBitFieldMaskBit38 = 0x4000000000 + CBitFieldMaskBit39 = 0x8000000000 + CBitFieldMaskBit40 = 0x10000000000 + CBitFieldMaskBit41 = 0x20000000000 + CBitFieldMaskBit42 = 0x40000000000 + CBitFieldMaskBit43 = 0x80000000000 + CBitFieldMaskBit44 = 0x100000000000 + CBitFieldMaskBit45 = 0x200000000000 + CBitFieldMaskBit46 = 0x400000000000 + CBitFieldMaskBit47 = 0x800000000000 + CBitFieldMaskBit48 = 0x1000000000000 + CBitFieldMaskBit49 = 0x2000000000000 + CBitFieldMaskBit50 = 0x4000000000000 + CBitFieldMaskBit51 = 0x8000000000000 + CBitFieldMaskBit52 = 0x10000000000000 + CBitFieldMaskBit53 = 0x20000000000000 + CBitFieldMaskBit54 = 0x40000000000000 + CBitFieldMaskBit55 = 0x80000000000000 + CBitFieldMaskBit56 = 0x100000000000000 + CBitFieldMaskBit57 = 0x200000000000000 + CBitFieldMaskBit58 = 0x400000000000000 + CBitFieldMaskBit59 = 0x800000000000000 + CBitFieldMaskBit60 = 0x1000000000000000 + CBitFieldMaskBit61 = 0x2000000000000000 + CBitFieldMaskBit62 = 0x4000000000000000 + CBitFieldMaskBit63 = 0x8000000000000000 +) + +type SockaddrStorage struct { + Family uint16 + _ [122]uint8 + _ uint32 +} + +type TCPMD5Sig struct { + Addr SockaddrStorage + Flags uint8 + Prefixlen uint8 + Keylen uint16 + _ uint32 + Key [80]uint8 +} + +type HDDriveCmdHdr struct { + Command uint8 + Number uint8 + Feature uint8 + Count uint8 +} + +type HDGeometry struct { + Heads uint8 + Sectors uint8 + Cylinders uint16 + Start uint32 +} + +type HDDriveID struct { + Config uint16 + Cyls uint16 + Reserved2 uint16 + Heads uint16 + Track_bytes uint16 + Sector_bytes uint16 + Sectors uint16 + Vendor0 uint16 + Vendor1 uint16 + Vendor2 uint16 + Serial_no [20]uint8 + Buf_type uint16 + Buf_size uint16 + Ecc_bytes uint16 + Fw_rev [8]uint8 + Model [40]uint8 + Max_multsect uint8 + Vendor3 uint8 + Dword_io uint16 + Vendor4 uint8 + Capability uint8 + Reserved50 uint16 + Vendor5 uint8 + TPIO uint8 + Vendor6 uint8 + TDMA uint8 + Field_valid uint16 + Cur_cyls uint16 + Cur_heads uint16 + Cur_sectors uint16 + Cur_capacity0 uint16 + Cur_capacity1 uint16 + Multsect uint8 + Multsect_valid uint8 + Lba_capacity uint32 + Dma_1word uint16 + Dma_mword uint16 + Eide_pio_modes uint16 + Eide_dma_min uint16 + Eide_dma_time uint16 + Eide_pio uint16 + Eide_pio_iordy uint16 + Words69_70 [2]uint16 + Words71_74 [4]uint16 + Queue_depth uint16 + Words76_79 [4]uint16 + Major_rev_num uint16 + Minor_rev_num uint16 + Command_set_1 uint16 + Command_set_2 uint16 + Cfsse uint16 + Cfs_enable_1 uint16 + Cfs_enable_2 uint16 + Csf_default uint16 + Dma_ultra uint16 + Trseuc uint16 + TrsEuc uint16 + CurAPMvalues uint16 + Mprc uint16 + Hw_config uint16 + Acoustic uint16 + Msrqs uint16 + Sxfert uint16 + Sal uint16 + Spg uint32 + Lba_capacity_2 uint64 + Words104_125 [22]uint16 + Last_lun uint16 + Word127 uint16 + Dlf uint16 + Csfo uint16 + Words130_155 [26]uint16 + Word156 uint16 + Words157_159 [3]uint16 + Cfa_power uint16 + Words161_175 [15]uint16 + Words176_205 [30]uint16 + Words206_254 [49]uint16 + Integrity_word uint16 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 5ce7cfe835..8c32c3bff0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -106,10 +106,10 @@ type Stat_t struct { Uid uint32 Gid uint32 Rdev uint64 - X__pad1 uint64 + _ uint64 Size int64 Blksize int32 - X__pad2 int32 + _ int32 Blocks int64 Atim Timespec Mtim Timespec @@ -133,9 +133,9 @@ type Statfs_t struct { } type StatxTimestamp struct { - Sec int64 - Nsec uint32 - X__reserved int32 + Sec int64 + Nsec uint32 + _ int32 } type Statx_t struct { @@ -172,7 +172,7 @@ type Dirent struct { } type Fsid struct { - X__val [2]int32 + Val [2]int32 } type Flock_t struct { @@ -588,12 +588,12 @@ type RtAttr struct { } type IfInfomsg struct { - Family uint8 - X__ifi_pad uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 + Family uint8 + _ uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 } type IfAddrmsg struct { @@ -676,7 +676,7 @@ type Sysinfo_t struct { Totalhigh uint64 Freehigh uint64 Unit uint32 - X_f [0]int8 + _ [0]int8 _ [4]byte } @@ -736,7 +736,7 @@ const ( ) type Sigset_t struct { - X__val [16]uint64 + Val [16]uint64 } const RNDGETENTCNT = 0x80045200 @@ -892,3 +892,353 @@ const ( BDADDR_LE_PUBLIC = 0x1 BDADDR_LE_RANDOM = 0x2 ) + +type PerfEventAttr struct { + Type uint32 + Size uint32 + Config uint64 + Sample uint64 + Sample_type uint64 + Read_format uint64 + Bits uint64 + Wakeup uint32 + Bp_type uint32 + Ext1 uint64 + Ext2 uint64 + Branch_sample_type uint64 + Sample_regs_user uint64 + Sample_stack_user uint32 + Clockid int32 + Sample_regs_intr uint64 + Aux_watermark uint32 + _ uint32 +} + +type PerfEventMmapPage struct { + Version uint32 + Compat_version uint32 + Lock uint32 + Index uint32 + Offset int64 + Time_enabled uint64 + Time_running uint64 + Capabilities uint64 + Pmc_width uint16 + Time_shift uint16 + Time_mult uint32 + Time_offset uint64 + Time_zero uint64 + Size uint32 + _ [948]uint8 + Data_head uint64 + Data_tail uint64 + Data_offset uint64 + Data_size uint64 + Aux_head uint64 + Aux_tail uint64 + Aux_offset uint64 + Aux_size uint64 +} + +const ( + PerfBitDisabled uint64 = CBitFieldMaskBit0 + PerfBitInherit = CBitFieldMaskBit1 + PerfBitPinned = CBitFieldMaskBit2 + PerfBitExclusive = CBitFieldMaskBit3 + PerfBitExcludeUser = CBitFieldMaskBit4 + PerfBitExcludeKernel = CBitFieldMaskBit5 + PerfBitExcludeHv = CBitFieldMaskBit6 + PerfBitExcludeIdle = CBitFieldMaskBit7 + PerfBitMmap = CBitFieldMaskBit8 + PerfBitComm = CBitFieldMaskBit9 + PerfBitFreq = CBitFieldMaskBit10 + PerfBitInheritStat = CBitFieldMaskBit11 + PerfBitEnableOnExec = CBitFieldMaskBit12 + PerfBitTask = CBitFieldMaskBit13 + PerfBitWatermark = CBitFieldMaskBit14 + PerfBitPreciseIPBit1 = CBitFieldMaskBit15 + PerfBitPreciseIPBit2 = CBitFieldMaskBit16 + PerfBitMmapData = CBitFieldMaskBit17 + PerfBitSampleIDAll = CBitFieldMaskBit18 + PerfBitExcludeHost = CBitFieldMaskBit19 + PerfBitExcludeGuest = CBitFieldMaskBit20 + PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 + PerfBitExcludeCallchainUser = CBitFieldMaskBit22 + PerfBitMmap2 = CBitFieldMaskBit23 + PerfBitCommExec = CBitFieldMaskBit24 + PerfBitUseClockID = CBitFieldMaskBit25 + PerfBitContextSwitch = CBitFieldMaskBit26 +) + +const ( + PERF_TYPE_HARDWARE = 0x0 + PERF_TYPE_SOFTWARE = 0x1 + PERF_TYPE_TRACEPOINT = 0x2 + PERF_TYPE_HW_CACHE = 0x3 + PERF_TYPE_RAW = 0x4 + PERF_TYPE_BREAKPOINT = 0x5 + + PERF_COUNT_HW_CPU_CYCLES = 0x0 + PERF_COUNT_HW_INSTRUCTIONS = 0x1 + PERF_COUNT_HW_CACHE_REFERENCES = 0x2 + PERF_COUNT_HW_CACHE_MISSES = 0x3 + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 + PERF_COUNT_HW_BRANCH_MISSES = 0x5 + PERF_COUNT_HW_BUS_CYCLES = 0x6 + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 + PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 + + PERF_COUNT_HW_CACHE_L1D = 0x0 + PERF_COUNT_HW_CACHE_L1I = 0x1 + PERF_COUNT_HW_CACHE_LL = 0x2 + PERF_COUNT_HW_CACHE_DTLB = 0x3 + PERF_COUNT_HW_CACHE_ITLB = 0x4 + PERF_COUNT_HW_CACHE_BPU = 0x5 + PERF_COUNT_HW_CACHE_NODE = 0x6 + + PERF_COUNT_HW_CACHE_OP_READ = 0x0 + PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 + PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 + + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 + PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 + + PERF_COUNT_SW_CPU_CLOCK = 0x0 + PERF_COUNT_SW_TASK_CLOCK = 0x1 + PERF_COUNT_SW_PAGE_FAULTS = 0x2 + PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 + PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 + PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 + PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 + PERF_COUNT_SW_EMULATION_FAULTS = 0x8 + PERF_COUNT_SW_DUMMY = 0x9 + + PERF_SAMPLE_IP = 0x1 + PERF_SAMPLE_TID = 0x2 + PERF_SAMPLE_TIME = 0x4 + PERF_SAMPLE_ADDR = 0x8 + PERF_SAMPLE_READ = 0x10 + PERF_SAMPLE_CALLCHAIN = 0x20 + PERF_SAMPLE_ID = 0x40 + PERF_SAMPLE_CPU = 0x80 + PERF_SAMPLE_PERIOD = 0x100 + PERF_SAMPLE_STREAM_ID = 0x200 + PERF_SAMPLE_RAW = 0x400 + PERF_SAMPLE_BRANCH_STACK = 0x800 + + PERF_SAMPLE_BRANCH_USER = 0x1 + PERF_SAMPLE_BRANCH_KERNEL = 0x2 + PERF_SAMPLE_BRANCH_HV = 0x4 + PERF_SAMPLE_BRANCH_ANY = 0x8 + PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 + PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 + PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + + PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 + PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 + PERF_FORMAT_ID = 0x4 + PERF_FORMAT_GROUP = 0x8 + + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + + PERF_CONTEXT_HV = -0x20 + PERF_CONTEXT_KERNEL = -0x80 + PERF_CONTEXT_USER = -0x200 + + PERF_CONTEXT_GUEST = -0x800 + PERF_CONTEXT_GUEST_KERNEL = -0x880 + PERF_CONTEXT_GUEST_USER = -0xa00 + + PERF_FLAG_FD_NO_GROUP = 0x1 + PERF_FLAG_FD_OUTPUT = 0x2 + PERF_FLAG_PID_CGROUP = 0x4 +) + +const ( + CBitFieldMaskBit0 = 0x1 + CBitFieldMaskBit1 = 0x2 + CBitFieldMaskBit2 = 0x4 + CBitFieldMaskBit3 = 0x8 + CBitFieldMaskBit4 = 0x10 + CBitFieldMaskBit5 = 0x20 + CBitFieldMaskBit6 = 0x40 + CBitFieldMaskBit7 = 0x80 + CBitFieldMaskBit8 = 0x100 + CBitFieldMaskBit9 = 0x200 + CBitFieldMaskBit10 = 0x400 + CBitFieldMaskBit11 = 0x800 + CBitFieldMaskBit12 = 0x1000 + CBitFieldMaskBit13 = 0x2000 + CBitFieldMaskBit14 = 0x4000 + CBitFieldMaskBit15 = 0x8000 + CBitFieldMaskBit16 = 0x10000 + CBitFieldMaskBit17 = 0x20000 + CBitFieldMaskBit18 = 0x40000 + CBitFieldMaskBit19 = 0x80000 + CBitFieldMaskBit20 = 0x100000 + CBitFieldMaskBit21 = 0x200000 + CBitFieldMaskBit22 = 0x400000 + CBitFieldMaskBit23 = 0x800000 + CBitFieldMaskBit24 = 0x1000000 + CBitFieldMaskBit25 = 0x2000000 + CBitFieldMaskBit26 = 0x4000000 + CBitFieldMaskBit27 = 0x8000000 + CBitFieldMaskBit28 = 0x10000000 + CBitFieldMaskBit29 = 0x20000000 + CBitFieldMaskBit30 = 0x40000000 + CBitFieldMaskBit31 = 0x80000000 + CBitFieldMaskBit32 = 0x100000000 + CBitFieldMaskBit33 = 0x200000000 + CBitFieldMaskBit34 = 0x400000000 + CBitFieldMaskBit35 = 0x800000000 + CBitFieldMaskBit36 = 0x1000000000 + CBitFieldMaskBit37 = 0x2000000000 + CBitFieldMaskBit38 = 0x4000000000 + CBitFieldMaskBit39 = 0x8000000000 + CBitFieldMaskBit40 = 0x10000000000 + CBitFieldMaskBit41 = 0x20000000000 + CBitFieldMaskBit42 = 0x40000000000 + CBitFieldMaskBit43 = 0x80000000000 + CBitFieldMaskBit44 = 0x100000000000 + CBitFieldMaskBit45 = 0x200000000000 + CBitFieldMaskBit46 = 0x400000000000 + CBitFieldMaskBit47 = 0x800000000000 + CBitFieldMaskBit48 = 0x1000000000000 + CBitFieldMaskBit49 = 0x2000000000000 + CBitFieldMaskBit50 = 0x4000000000000 + CBitFieldMaskBit51 = 0x8000000000000 + CBitFieldMaskBit52 = 0x10000000000000 + CBitFieldMaskBit53 = 0x20000000000000 + CBitFieldMaskBit54 = 0x40000000000000 + CBitFieldMaskBit55 = 0x80000000000000 + CBitFieldMaskBit56 = 0x100000000000000 + CBitFieldMaskBit57 = 0x200000000000000 + CBitFieldMaskBit58 = 0x400000000000000 + CBitFieldMaskBit59 = 0x800000000000000 + CBitFieldMaskBit60 = 0x1000000000000000 + CBitFieldMaskBit61 = 0x2000000000000000 + CBitFieldMaskBit62 = 0x4000000000000000 + CBitFieldMaskBit63 = 0x8000000000000000 +) + +type SockaddrStorage struct { + Family uint16 + _ [118]int8 + _ uint64 +} + +type TCPMD5Sig struct { + Addr SockaddrStorage + Flags uint8 + Prefixlen uint8 + Keylen uint16 + _ uint32 + Key [80]uint8 +} + +type HDDriveCmdHdr struct { + Command uint8 + Number uint8 + Feature uint8 + Count uint8 +} + +type HDGeometry struct { + Heads uint8 + Sectors uint8 + Cylinders uint16 + _ [4]byte + Start uint64 +} + +type HDDriveID struct { + Config uint16 + Cyls uint16 + Reserved2 uint16 + Heads uint16 + Track_bytes uint16 + Sector_bytes uint16 + Sectors uint16 + Vendor0 uint16 + Vendor1 uint16 + Vendor2 uint16 + Serial_no [20]uint8 + Buf_type uint16 + Buf_size uint16 + Ecc_bytes uint16 + Fw_rev [8]uint8 + Model [40]uint8 + Max_multsect uint8 + Vendor3 uint8 + Dword_io uint16 + Vendor4 uint8 + Capability uint8 + Reserved50 uint16 + Vendor5 uint8 + TPIO uint8 + Vendor6 uint8 + TDMA uint8 + Field_valid uint16 + Cur_cyls uint16 + Cur_heads uint16 + Cur_sectors uint16 + Cur_capacity0 uint16 + Cur_capacity1 uint16 + Multsect uint8 + Multsect_valid uint8 + Lba_capacity uint32 + Dma_1word uint16 + Dma_mword uint16 + Eide_pio_modes uint16 + Eide_dma_min uint16 + Eide_dma_time uint16 + Eide_pio uint16 + Eide_pio_iordy uint16 + Words69_70 [2]uint16 + Words71_74 [4]uint16 + Queue_depth uint16 + Words76_79 [4]uint16 + Major_rev_num uint16 + Minor_rev_num uint16 + Command_set_1 uint16 + Command_set_2 uint16 + Cfsse uint16 + Cfs_enable_1 uint16 + Cfs_enable_2 uint16 + Csf_default uint16 + Dma_ultra uint16 + Trseuc uint16 + TrsEuc uint16 + CurAPMvalues uint16 + Mprc uint16 + Hw_config uint16 + Acoustic uint16 + Msrqs uint16 + Sxfert uint16 + Sal uint16 + Spg uint32 + Lba_capacity_2 uint64 + Words104_125 [22]uint16 + Last_lun uint16 + Word127 uint16 + Dlf uint16 + Csfo uint16 + Words130_155 [26]uint16 + Word156 uint16 + Words157_159 [3]uint16 + Cfa_power uint16 + Words161_175 [15]uint16 + Words176_205 [30]uint16 + Words206_254 [49]uint16 + Integrity_word uint16 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 2860b3e284..af291d948b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -133,9 +133,9 @@ type Statfs_t struct { } type StatxTimestamp struct { - Sec int64 - Nsec uint32 - X__reserved int32 + Sec int64 + Nsec uint32 + _ int32 } type Statx_t struct { @@ -172,7 +172,7 @@ type Dirent struct { } type Fsid struct { - X__val [2]int32 + Val [2]int32 } type Flock_t struct { @@ -586,12 +586,12 @@ type RtAttr struct { } type IfInfomsg struct { - Family uint8 - X__ifi_pad uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 + Family uint8 + _ uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 } type IfAddrmsg struct { @@ -676,7 +676,7 @@ type Sysinfo_t struct { Totalhigh uint32 Freehigh uint32 Unit uint32 - X_f [8]int8 + _ [8]int8 } type Utsname struct { @@ -733,7 +733,7 @@ const ( ) type Sigset_t struct { - X__val [32]uint32 + Val [32]uint32 } const RNDGETENTCNT = 0x40045200 @@ -889,3 +889,352 @@ const ( BDADDR_LE_PUBLIC = 0x1 BDADDR_LE_RANDOM = 0x2 ) + +type PerfEventAttr struct { + Type uint32 + Size uint32 + Config uint64 + Sample uint64 + Sample_type uint64 + Read_format uint64 + Bits uint64 + Wakeup uint32 + Bp_type uint32 + Ext1 uint64 + Ext2 uint64 + Branch_sample_type uint64 + Sample_regs_user uint64 + Sample_stack_user uint32 + Clockid int32 + Sample_regs_intr uint64 + Aux_watermark uint32 + _ uint32 +} + +type PerfEventMmapPage struct { + Version uint32 + Compat_version uint32 + Lock uint32 + Index uint32 + Offset int64 + Time_enabled uint64 + Time_running uint64 + Capabilities uint64 + Pmc_width uint16 + Time_shift uint16 + Time_mult uint32 + Time_offset uint64 + Time_zero uint64 + Size uint32 + _ [948]uint8 + Data_head uint64 + Data_tail uint64 + Data_offset uint64 + Data_size uint64 + Aux_head uint64 + Aux_tail uint64 + Aux_offset uint64 + Aux_size uint64 +} + +const ( + PerfBitDisabled uint64 = CBitFieldMaskBit0 + PerfBitInherit = CBitFieldMaskBit1 + PerfBitPinned = CBitFieldMaskBit2 + PerfBitExclusive = CBitFieldMaskBit3 + PerfBitExcludeUser = CBitFieldMaskBit4 + PerfBitExcludeKernel = CBitFieldMaskBit5 + PerfBitExcludeHv = CBitFieldMaskBit6 + PerfBitExcludeIdle = CBitFieldMaskBit7 + PerfBitMmap = CBitFieldMaskBit8 + PerfBitComm = CBitFieldMaskBit9 + PerfBitFreq = CBitFieldMaskBit10 + PerfBitInheritStat = CBitFieldMaskBit11 + PerfBitEnableOnExec = CBitFieldMaskBit12 + PerfBitTask = CBitFieldMaskBit13 + PerfBitWatermark = CBitFieldMaskBit14 + PerfBitPreciseIPBit1 = CBitFieldMaskBit15 + PerfBitPreciseIPBit2 = CBitFieldMaskBit16 + PerfBitMmapData = CBitFieldMaskBit17 + PerfBitSampleIDAll = CBitFieldMaskBit18 + PerfBitExcludeHost = CBitFieldMaskBit19 + PerfBitExcludeGuest = CBitFieldMaskBit20 + PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 + PerfBitExcludeCallchainUser = CBitFieldMaskBit22 + PerfBitMmap2 = CBitFieldMaskBit23 + PerfBitCommExec = CBitFieldMaskBit24 + PerfBitUseClockID = CBitFieldMaskBit25 + PerfBitContextSwitch = CBitFieldMaskBit26 +) + +const ( + PERF_TYPE_HARDWARE = 0x0 + PERF_TYPE_SOFTWARE = 0x1 + PERF_TYPE_TRACEPOINT = 0x2 + PERF_TYPE_HW_CACHE = 0x3 + PERF_TYPE_RAW = 0x4 + PERF_TYPE_BREAKPOINT = 0x5 + + PERF_COUNT_HW_CPU_CYCLES = 0x0 + PERF_COUNT_HW_INSTRUCTIONS = 0x1 + PERF_COUNT_HW_CACHE_REFERENCES = 0x2 + PERF_COUNT_HW_CACHE_MISSES = 0x3 + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 + PERF_COUNT_HW_BRANCH_MISSES = 0x5 + PERF_COUNT_HW_BUS_CYCLES = 0x6 + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 + PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 + + PERF_COUNT_HW_CACHE_L1D = 0x0 + PERF_COUNT_HW_CACHE_L1I = 0x1 + PERF_COUNT_HW_CACHE_LL = 0x2 + PERF_COUNT_HW_CACHE_DTLB = 0x3 + PERF_COUNT_HW_CACHE_ITLB = 0x4 + PERF_COUNT_HW_CACHE_BPU = 0x5 + PERF_COUNT_HW_CACHE_NODE = 0x6 + + PERF_COUNT_HW_CACHE_OP_READ = 0x0 + PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 + PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 + + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 + PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 + + PERF_COUNT_SW_CPU_CLOCK = 0x0 + PERF_COUNT_SW_TASK_CLOCK = 0x1 + PERF_COUNT_SW_PAGE_FAULTS = 0x2 + PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 + PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 + PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 + PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 + PERF_COUNT_SW_EMULATION_FAULTS = 0x8 + PERF_COUNT_SW_DUMMY = 0x9 + + PERF_SAMPLE_IP = 0x1 + PERF_SAMPLE_TID = 0x2 + PERF_SAMPLE_TIME = 0x4 + PERF_SAMPLE_ADDR = 0x8 + PERF_SAMPLE_READ = 0x10 + PERF_SAMPLE_CALLCHAIN = 0x20 + PERF_SAMPLE_ID = 0x40 + PERF_SAMPLE_CPU = 0x80 + PERF_SAMPLE_PERIOD = 0x100 + PERF_SAMPLE_STREAM_ID = 0x200 + PERF_SAMPLE_RAW = 0x400 + PERF_SAMPLE_BRANCH_STACK = 0x800 + + PERF_SAMPLE_BRANCH_USER = 0x1 + PERF_SAMPLE_BRANCH_KERNEL = 0x2 + PERF_SAMPLE_BRANCH_HV = 0x4 + PERF_SAMPLE_BRANCH_ANY = 0x8 + PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 + PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 + PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + + PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 + PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 + PERF_FORMAT_ID = 0x4 + PERF_FORMAT_GROUP = 0x8 + + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + + PERF_CONTEXT_HV = -0x20 + PERF_CONTEXT_KERNEL = -0x80 + PERF_CONTEXT_USER = -0x200 + + PERF_CONTEXT_GUEST = -0x800 + PERF_CONTEXT_GUEST_KERNEL = -0x880 + PERF_CONTEXT_GUEST_USER = -0xa00 + + PERF_FLAG_FD_NO_GROUP = 0x1 + PERF_FLAG_FD_OUTPUT = 0x2 + PERF_FLAG_PID_CGROUP = 0x4 +) + +const ( + CBitFieldMaskBit0 = 0x8000000000000000 + CBitFieldMaskBit1 = 0x4000000000000000 + CBitFieldMaskBit2 = 0x2000000000000000 + CBitFieldMaskBit3 = 0x1000000000000000 + CBitFieldMaskBit4 = 0x800000000000000 + CBitFieldMaskBit5 = 0x400000000000000 + CBitFieldMaskBit6 = 0x200000000000000 + CBitFieldMaskBit7 = 0x100000000000000 + CBitFieldMaskBit8 = 0x80000000000000 + CBitFieldMaskBit9 = 0x40000000000000 + CBitFieldMaskBit10 = 0x20000000000000 + CBitFieldMaskBit11 = 0x10000000000000 + CBitFieldMaskBit12 = 0x8000000000000 + CBitFieldMaskBit13 = 0x4000000000000 + CBitFieldMaskBit14 = 0x2000000000000 + CBitFieldMaskBit15 = 0x1000000000000 + CBitFieldMaskBit16 = 0x800000000000 + CBitFieldMaskBit17 = 0x400000000000 + CBitFieldMaskBit18 = 0x200000000000 + CBitFieldMaskBit19 = 0x100000000000 + CBitFieldMaskBit20 = 0x80000000000 + CBitFieldMaskBit21 = 0x40000000000 + CBitFieldMaskBit22 = 0x20000000000 + CBitFieldMaskBit23 = 0x10000000000 + CBitFieldMaskBit24 = 0x8000000000 + CBitFieldMaskBit25 = 0x4000000000 + CBitFieldMaskBit26 = 0x2000000000 + CBitFieldMaskBit27 = 0x1000000000 + CBitFieldMaskBit28 = 0x800000000 + CBitFieldMaskBit29 = 0x400000000 + CBitFieldMaskBit30 = 0x200000000 + CBitFieldMaskBit31 = 0x100000000 + CBitFieldMaskBit32 = 0x80000000 + CBitFieldMaskBit33 = 0x40000000 + CBitFieldMaskBit34 = 0x20000000 + CBitFieldMaskBit35 = 0x10000000 + CBitFieldMaskBit36 = 0x8000000 + CBitFieldMaskBit37 = 0x4000000 + CBitFieldMaskBit38 = 0x2000000 + CBitFieldMaskBit39 = 0x1000000 + CBitFieldMaskBit40 = 0x800000 + CBitFieldMaskBit41 = 0x400000 + CBitFieldMaskBit42 = 0x200000 + CBitFieldMaskBit43 = 0x100000 + CBitFieldMaskBit44 = 0x80000 + CBitFieldMaskBit45 = 0x40000 + CBitFieldMaskBit46 = 0x20000 + CBitFieldMaskBit47 = 0x10000 + CBitFieldMaskBit48 = 0x8000 + CBitFieldMaskBit49 = 0x4000 + CBitFieldMaskBit50 = 0x2000 + CBitFieldMaskBit51 = 0x1000 + CBitFieldMaskBit52 = 0x800 + CBitFieldMaskBit53 = 0x400 + CBitFieldMaskBit54 = 0x200 + CBitFieldMaskBit55 = 0x100 + CBitFieldMaskBit56 = 0x80 + CBitFieldMaskBit57 = 0x40 + CBitFieldMaskBit58 = 0x20 + CBitFieldMaskBit59 = 0x10 + CBitFieldMaskBit60 = 0x8 + CBitFieldMaskBit61 = 0x4 + CBitFieldMaskBit62 = 0x2 + CBitFieldMaskBit63 = 0x1 +) + +type SockaddrStorage struct { + Family uint16 + _ [122]int8 + _ uint32 +} + +type TCPMD5Sig struct { + Addr SockaddrStorage + Flags uint8 + Prefixlen uint8 + Keylen uint16 + _ uint32 + Key [80]uint8 +} + +type HDDriveCmdHdr struct { + Command uint8 + Number uint8 + Feature uint8 + Count uint8 +} + +type HDGeometry struct { + Heads uint8 + Sectors uint8 + Cylinders uint16 + Start uint32 +} + +type HDDriveID struct { + Config uint16 + Cyls uint16 + Reserved2 uint16 + Heads uint16 + Track_bytes uint16 + Sector_bytes uint16 + Sectors uint16 + Vendor0 uint16 + Vendor1 uint16 + Vendor2 uint16 + Serial_no [20]uint8 + Buf_type uint16 + Buf_size uint16 + Ecc_bytes uint16 + Fw_rev [8]uint8 + Model [40]uint8 + Max_multsect uint8 + Vendor3 uint8 + Dword_io uint16 + Vendor4 uint8 + Capability uint8 + Reserved50 uint16 + Vendor5 uint8 + TPIO uint8 + Vendor6 uint8 + TDMA uint8 + Field_valid uint16 + Cur_cyls uint16 + Cur_heads uint16 + Cur_sectors uint16 + Cur_capacity0 uint16 + Cur_capacity1 uint16 + Multsect uint8 + Multsect_valid uint8 + Lba_capacity uint32 + Dma_1word uint16 + Dma_mword uint16 + Eide_pio_modes uint16 + Eide_dma_min uint16 + Eide_dma_time uint16 + Eide_pio uint16 + Eide_pio_iordy uint16 + Words69_70 [2]uint16 + Words71_74 [4]uint16 + Queue_depth uint16 + Words76_79 [4]uint16 + Major_rev_num uint16 + Minor_rev_num uint16 + Command_set_1 uint16 + Command_set_2 uint16 + Cfsse uint16 + Cfs_enable_1 uint16 + Cfs_enable_2 uint16 + Csf_default uint16 + Dma_ultra uint16 + Trseuc uint16 + TrsEuc uint16 + CurAPMvalues uint16 + Mprc uint16 + Hw_config uint16 + Acoustic uint16 + Msrqs uint16 + Sxfert uint16 + Sal uint16 + Spg uint32 + Lba_capacity_2 uint64 + Words104_125 [22]uint16 + Last_lun uint16 + Word127 uint16 + Dlf uint16 + Csfo uint16 + Words130_155 [26]uint16 + Word156 uint16 + Words157_159 [3]uint16 + Cfa_power uint16 + Words161_175 [15]uint16 + Words176_205 [30]uint16 + Words206_254 [49]uint16 + Integrity_word uint16 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 17b881cab2..324ee16fcf 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -133,9 +133,9 @@ type Statfs_t struct { } type StatxTimestamp struct { - Sec int64 - Nsec uint32 - X__reserved int32 + Sec int64 + Nsec uint32 + _ int32 } type Statx_t struct { @@ -172,7 +172,7 @@ type Dirent struct { } type Fsid struct { - X__val [2]int32 + Val [2]int32 } type Flock_t struct { @@ -588,12 +588,12 @@ type RtAttr struct { } type IfInfomsg struct { - Family uint8 - X__ifi_pad uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 + Family uint8 + _ uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 } type IfAddrmsg struct { @@ -679,7 +679,7 @@ type Sysinfo_t struct { Totalhigh uint64 Freehigh uint64 Unit uint32 - X_f [0]int8 + _ [0]int8 _ [4]byte } @@ -738,7 +738,7 @@ const ( ) type Sigset_t struct { - X__val [16]uint64 + Val [16]uint64 } const RNDGETENTCNT = 0x40045200 @@ -894,3 +894,353 @@ const ( BDADDR_LE_PUBLIC = 0x1 BDADDR_LE_RANDOM = 0x2 ) + +type PerfEventAttr struct { + Type uint32 + Size uint32 + Config uint64 + Sample uint64 + Sample_type uint64 + Read_format uint64 + Bits uint64 + Wakeup uint32 + Bp_type uint32 + Ext1 uint64 + Ext2 uint64 + Branch_sample_type uint64 + Sample_regs_user uint64 + Sample_stack_user uint32 + Clockid int32 + Sample_regs_intr uint64 + Aux_watermark uint32 + _ uint32 +} + +type PerfEventMmapPage struct { + Version uint32 + Compat_version uint32 + Lock uint32 + Index uint32 + Offset int64 + Time_enabled uint64 + Time_running uint64 + Capabilities uint64 + Pmc_width uint16 + Time_shift uint16 + Time_mult uint32 + Time_offset uint64 + Time_zero uint64 + Size uint32 + _ [948]uint8 + Data_head uint64 + Data_tail uint64 + Data_offset uint64 + Data_size uint64 + Aux_head uint64 + Aux_tail uint64 + Aux_offset uint64 + Aux_size uint64 +} + +const ( + PerfBitDisabled uint64 = CBitFieldMaskBit0 + PerfBitInherit = CBitFieldMaskBit1 + PerfBitPinned = CBitFieldMaskBit2 + PerfBitExclusive = CBitFieldMaskBit3 + PerfBitExcludeUser = CBitFieldMaskBit4 + PerfBitExcludeKernel = CBitFieldMaskBit5 + PerfBitExcludeHv = CBitFieldMaskBit6 + PerfBitExcludeIdle = CBitFieldMaskBit7 + PerfBitMmap = CBitFieldMaskBit8 + PerfBitComm = CBitFieldMaskBit9 + PerfBitFreq = CBitFieldMaskBit10 + PerfBitInheritStat = CBitFieldMaskBit11 + PerfBitEnableOnExec = CBitFieldMaskBit12 + PerfBitTask = CBitFieldMaskBit13 + PerfBitWatermark = CBitFieldMaskBit14 + PerfBitPreciseIPBit1 = CBitFieldMaskBit15 + PerfBitPreciseIPBit2 = CBitFieldMaskBit16 + PerfBitMmapData = CBitFieldMaskBit17 + PerfBitSampleIDAll = CBitFieldMaskBit18 + PerfBitExcludeHost = CBitFieldMaskBit19 + PerfBitExcludeGuest = CBitFieldMaskBit20 + PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 + PerfBitExcludeCallchainUser = CBitFieldMaskBit22 + PerfBitMmap2 = CBitFieldMaskBit23 + PerfBitCommExec = CBitFieldMaskBit24 + PerfBitUseClockID = CBitFieldMaskBit25 + PerfBitContextSwitch = CBitFieldMaskBit26 +) + +const ( + PERF_TYPE_HARDWARE = 0x0 + PERF_TYPE_SOFTWARE = 0x1 + PERF_TYPE_TRACEPOINT = 0x2 + PERF_TYPE_HW_CACHE = 0x3 + PERF_TYPE_RAW = 0x4 + PERF_TYPE_BREAKPOINT = 0x5 + + PERF_COUNT_HW_CPU_CYCLES = 0x0 + PERF_COUNT_HW_INSTRUCTIONS = 0x1 + PERF_COUNT_HW_CACHE_REFERENCES = 0x2 + PERF_COUNT_HW_CACHE_MISSES = 0x3 + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 + PERF_COUNT_HW_BRANCH_MISSES = 0x5 + PERF_COUNT_HW_BUS_CYCLES = 0x6 + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 + PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 + + PERF_COUNT_HW_CACHE_L1D = 0x0 + PERF_COUNT_HW_CACHE_L1I = 0x1 + PERF_COUNT_HW_CACHE_LL = 0x2 + PERF_COUNT_HW_CACHE_DTLB = 0x3 + PERF_COUNT_HW_CACHE_ITLB = 0x4 + PERF_COUNT_HW_CACHE_BPU = 0x5 + PERF_COUNT_HW_CACHE_NODE = 0x6 + + PERF_COUNT_HW_CACHE_OP_READ = 0x0 + PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 + PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 + + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 + PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 + + PERF_COUNT_SW_CPU_CLOCK = 0x0 + PERF_COUNT_SW_TASK_CLOCK = 0x1 + PERF_COUNT_SW_PAGE_FAULTS = 0x2 + PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 + PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 + PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 + PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 + PERF_COUNT_SW_EMULATION_FAULTS = 0x8 + PERF_COUNT_SW_DUMMY = 0x9 + + PERF_SAMPLE_IP = 0x1 + PERF_SAMPLE_TID = 0x2 + PERF_SAMPLE_TIME = 0x4 + PERF_SAMPLE_ADDR = 0x8 + PERF_SAMPLE_READ = 0x10 + PERF_SAMPLE_CALLCHAIN = 0x20 + PERF_SAMPLE_ID = 0x40 + PERF_SAMPLE_CPU = 0x80 + PERF_SAMPLE_PERIOD = 0x100 + PERF_SAMPLE_STREAM_ID = 0x200 + PERF_SAMPLE_RAW = 0x400 + PERF_SAMPLE_BRANCH_STACK = 0x800 + + PERF_SAMPLE_BRANCH_USER = 0x1 + PERF_SAMPLE_BRANCH_KERNEL = 0x2 + PERF_SAMPLE_BRANCH_HV = 0x4 + PERF_SAMPLE_BRANCH_ANY = 0x8 + PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 + PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 + PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + + PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 + PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 + PERF_FORMAT_ID = 0x4 + PERF_FORMAT_GROUP = 0x8 + + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + + PERF_CONTEXT_HV = -0x20 + PERF_CONTEXT_KERNEL = -0x80 + PERF_CONTEXT_USER = -0x200 + + PERF_CONTEXT_GUEST = -0x800 + PERF_CONTEXT_GUEST_KERNEL = -0x880 + PERF_CONTEXT_GUEST_USER = -0xa00 + + PERF_FLAG_FD_NO_GROUP = 0x1 + PERF_FLAG_FD_OUTPUT = 0x2 + PERF_FLAG_PID_CGROUP = 0x4 +) + +const ( + CBitFieldMaskBit0 = 0x8000000000000000 + CBitFieldMaskBit1 = 0x4000000000000000 + CBitFieldMaskBit2 = 0x2000000000000000 + CBitFieldMaskBit3 = 0x1000000000000000 + CBitFieldMaskBit4 = 0x800000000000000 + CBitFieldMaskBit5 = 0x400000000000000 + CBitFieldMaskBit6 = 0x200000000000000 + CBitFieldMaskBit7 = 0x100000000000000 + CBitFieldMaskBit8 = 0x80000000000000 + CBitFieldMaskBit9 = 0x40000000000000 + CBitFieldMaskBit10 = 0x20000000000000 + CBitFieldMaskBit11 = 0x10000000000000 + CBitFieldMaskBit12 = 0x8000000000000 + CBitFieldMaskBit13 = 0x4000000000000 + CBitFieldMaskBit14 = 0x2000000000000 + CBitFieldMaskBit15 = 0x1000000000000 + CBitFieldMaskBit16 = 0x800000000000 + CBitFieldMaskBit17 = 0x400000000000 + CBitFieldMaskBit18 = 0x200000000000 + CBitFieldMaskBit19 = 0x100000000000 + CBitFieldMaskBit20 = 0x80000000000 + CBitFieldMaskBit21 = 0x40000000000 + CBitFieldMaskBit22 = 0x20000000000 + CBitFieldMaskBit23 = 0x10000000000 + CBitFieldMaskBit24 = 0x8000000000 + CBitFieldMaskBit25 = 0x4000000000 + CBitFieldMaskBit26 = 0x2000000000 + CBitFieldMaskBit27 = 0x1000000000 + CBitFieldMaskBit28 = 0x800000000 + CBitFieldMaskBit29 = 0x400000000 + CBitFieldMaskBit30 = 0x200000000 + CBitFieldMaskBit31 = 0x100000000 + CBitFieldMaskBit32 = 0x80000000 + CBitFieldMaskBit33 = 0x40000000 + CBitFieldMaskBit34 = 0x20000000 + CBitFieldMaskBit35 = 0x10000000 + CBitFieldMaskBit36 = 0x8000000 + CBitFieldMaskBit37 = 0x4000000 + CBitFieldMaskBit38 = 0x2000000 + CBitFieldMaskBit39 = 0x1000000 + CBitFieldMaskBit40 = 0x800000 + CBitFieldMaskBit41 = 0x400000 + CBitFieldMaskBit42 = 0x200000 + CBitFieldMaskBit43 = 0x100000 + CBitFieldMaskBit44 = 0x80000 + CBitFieldMaskBit45 = 0x40000 + CBitFieldMaskBit46 = 0x20000 + CBitFieldMaskBit47 = 0x10000 + CBitFieldMaskBit48 = 0x8000 + CBitFieldMaskBit49 = 0x4000 + CBitFieldMaskBit50 = 0x2000 + CBitFieldMaskBit51 = 0x1000 + CBitFieldMaskBit52 = 0x800 + CBitFieldMaskBit53 = 0x400 + CBitFieldMaskBit54 = 0x200 + CBitFieldMaskBit55 = 0x100 + CBitFieldMaskBit56 = 0x80 + CBitFieldMaskBit57 = 0x40 + CBitFieldMaskBit58 = 0x20 + CBitFieldMaskBit59 = 0x10 + CBitFieldMaskBit60 = 0x8 + CBitFieldMaskBit61 = 0x4 + CBitFieldMaskBit62 = 0x2 + CBitFieldMaskBit63 = 0x1 +) + +type SockaddrStorage struct { + Family uint16 + _ [118]int8 + _ uint64 +} + +type TCPMD5Sig struct { + Addr SockaddrStorage + Flags uint8 + Prefixlen uint8 + Keylen uint16 + _ uint32 + Key [80]uint8 +} + +type HDDriveCmdHdr struct { + Command uint8 + Number uint8 + Feature uint8 + Count uint8 +} + +type HDGeometry struct { + Heads uint8 + Sectors uint8 + Cylinders uint16 + _ [4]byte + Start uint64 +} + +type HDDriveID struct { + Config uint16 + Cyls uint16 + Reserved2 uint16 + Heads uint16 + Track_bytes uint16 + Sector_bytes uint16 + Sectors uint16 + Vendor0 uint16 + Vendor1 uint16 + Vendor2 uint16 + Serial_no [20]uint8 + Buf_type uint16 + Buf_size uint16 + Ecc_bytes uint16 + Fw_rev [8]uint8 + Model [40]uint8 + Max_multsect uint8 + Vendor3 uint8 + Dword_io uint16 + Vendor4 uint8 + Capability uint8 + Reserved50 uint16 + Vendor5 uint8 + TPIO uint8 + Vendor6 uint8 + TDMA uint8 + Field_valid uint16 + Cur_cyls uint16 + Cur_heads uint16 + Cur_sectors uint16 + Cur_capacity0 uint16 + Cur_capacity1 uint16 + Multsect uint8 + Multsect_valid uint8 + Lba_capacity uint32 + Dma_1word uint16 + Dma_mword uint16 + Eide_pio_modes uint16 + Eide_dma_min uint16 + Eide_dma_time uint16 + Eide_pio uint16 + Eide_pio_iordy uint16 + Words69_70 [2]uint16 + Words71_74 [4]uint16 + Queue_depth uint16 + Words76_79 [4]uint16 + Major_rev_num uint16 + Minor_rev_num uint16 + Command_set_1 uint16 + Command_set_2 uint16 + Cfsse uint16 + Cfs_enable_1 uint16 + Cfs_enable_2 uint16 + Csf_default uint16 + Dma_ultra uint16 + Trseuc uint16 + TrsEuc uint16 + CurAPMvalues uint16 + Mprc uint16 + Hw_config uint16 + Acoustic uint16 + Msrqs uint16 + Sxfert uint16 + Sal uint16 + Spg uint32 + Lba_capacity_2 uint64 + Words104_125 [22]uint16 + Last_lun uint16 + Word127 uint16 + Dlf uint16 + Csfo uint16 + Words130_155 [26]uint16 + Word156 uint16 + Words157_159 [3]uint16 + Cfa_power uint16 + Words161_175 [15]uint16 + Words176_205 [30]uint16 + Words206_254 [49]uint16 + Integrity_word uint16 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index ec802dc892..08849df80a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -133,9 +133,9 @@ type Statfs_t struct { } type StatxTimestamp struct { - Sec int64 - Nsec uint32 - X__reserved int32 + Sec int64 + Nsec uint32 + _ int32 } type Statx_t struct { @@ -172,7 +172,7 @@ type Dirent struct { } type Fsid struct { - X__val [2]int32 + Val [2]int32 } type Flock_t struct { @@ -588,12 +588,12 @@ type RtAttr struct { } type IfInfomsg struct { - Family uint8 - X__ifi_pad uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 + Family uint8 + _ uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 } type IfAddrmsg struct { @@ -679,7 +679,7 @@ type Sysinfo_t struct { Totalhigh uint64 Freehigh uint64 Unit uint32 - X_f [0]int8 + _ [0]int8 _ [4]byte } @@ -738,7 +738,7 @@ const ( ) type Sigset_t struct { - X__val [16]uint64 + Val [16]uint64 } const RNDGETENTCNT = 0x40045200 @@ -894,3 +894,353 @@ const ( BDADDR_LE_PUBLIC = 0x1 BDADDR_LE_RANDOM = 0x2 ) + +type PerfEventAttr struct { + Type uint32 + Size uint32 + Config uint64 + Sample uint64 + Sample_type uint64 + Read_format uint64 + Bits uint64 + Wakeup uint32 + Bp_type uint32 + Ext1 uint64 + Ext2 uint64 + Branch_sample_type uint64 + Sample_regs_user uint64 + Sample_stack_user uint32 + Clockid int32 + Sample_regs_intr uint64 + Aux_watermark uint32 + _ uint32 +} + +type PerfEventMmapPage struct { + Version uint32 + Compat_version uint32 + Lock uint32 + Index uint32 + Offset int64 + Time_enabled uint64 + Time_running uint64 + Capabilities uint64 + Pmc_width uint16 + Time_shift uint16 + Time_mult uint32 + Time_offset uint64 + Time_zero uint64 + Size uint32 + _ [948]uint8 + Data_head uint64 + Data_tail uint64 + Data_offset uint64 + Data_size uint64 + Aux_head uint64 + Aux_tail uint64 + Aux_offset uint64 + Aux_size uint64 +} + +const ( + PerfBitDisabled uint64 = CBitFieldMaskBit0 + PerfBitInherit = CBitFieldMaskBit1 + PerfBitPinned = CBitFieldMaskBit2 + PerfBitExclusive = CBitFieldMaskBit3 + PerfBitExcludeUser = CBitFieldMaskBit4 + PerfBitExcludeKernel = CBitFieldMaskBit5 + PerfBitExcludeHv = CBitFieldMaskBit6 + PerfBitExcludeIdle = CBitFieldMaskBit7 + PerfBitMmap = CBitFieldMaskBit8 + PerfBitComm = CBitFieldMaskBit9 + PerfBitFreq = CBitFieldMaskBit10 + PerfBitInheritStat = CBitFieldMaskBit11 + PerfBitEnableOnExec = CBitFieldMaskBit12 + PerfBitTask = CBitFieldMaskBit13 + PerfBitWatermark = CBitFieldMaskBit14 + PerfBitPreciseIPBit1 = CBitFieldMaskBit15 + PerfBitPreciseIPBit2 = CBitFieldMaskBit16 + PerfBitMmapData = CBitFieldMaskBit17 + PerfBitSampleIDAll = CBitFieldMaskBit18 + PerfBitExcludeHost = CBitFieldMaskBit19 + PerfBitExcludeGuest = CBitFieldMaskBit20 + PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 + PerfBitExcludeCallchainUser = CBitFieldMaskBit22 + PerfBitMmap2 = CBitFieldMaskBit23 + PerfBitCommExec = CBitFieldMaskBit24 + PerfBitUseClockID = CBitFieldMaskBit25 + PerfBitContextSwitch = CBitFieldMaskBit26 +) + +const ( + PERF_TYPE_HARDWARE = 0x0 + PERF_TYPE_SOFTWARE = 0x1 + PERF_TYPE_TRACEPOINT = 0x2 + PERF_TYPE_HW_CACHE = 0x3 + PERF_TYPE_RAW = 0x4 + PERF_TYPE_BREAKPOINT = 0x5 + + PERF_COUNT_HW_CPU_CYCLES = 0x0 + PERF_COUNT_HW_INSTRUCTIONS = 0x1 + PERF_COUNT_HW_CACHE_REFERENCES = 0x2 + PERF_COUNT_HW_CACHE_MISSES = 0x3 + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 + PERF_COUNT_HW_BRANCH_MISSES = 0x5 + PERF_COUNT_HW_BUS_CYCLES = 0x6 + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 + PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 + + PERF_COUNT_HW_CACHE_L1D = 0x0 + PERF_COUNT_HW_CACHE_L1I = 0x1 + PERF_COUNT_HW_CACHE_LL = 0x2 + PERF_COUNT_HW_CACHE_DTLB = 0x3 + PERF_COUNT_HW_CACHE_ITLB = 0x4 + PERF_COUNT_HW_CACHE_BPU = 0x5 + PERF_COUNT_HW_CACHE_NODE = 0x6 + + PERF_COUNT_HW_CACHE_OP_READ = 0x0 + PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 + PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 + + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 + PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 + + PERF_COUNT_SW_CPU_CLOCK = 0x0 + PERF_COUNT_SW_TASK_CLOCK = 0x1 + PERF_COUNT_SW_PAGE_FAULTS = 0x2 + PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 + PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 + PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 + PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 + PERF_COUNT_SW_EMULATION_FAULTS = 0x8 + PERF_COUNT_SW_DUMMY = 0x9 + + PERF_SAMPLE_IP = 0x1 + PERF_SAMPLE_TID = 0x2 + PERF_SAMPLE_TIME = 0x4 + PERF_SAMPLE_ADDR = 0x8 + PERF_SAMPLE_READ = 0x10 + PERF_SAMPLE_CALLCHAIN = 0x20 + PERF_SAMPLE_ID = 0x40 + PERF_SAMPLE_CPU = 0x80 + PERF_SAMPLE_PERIOD = 0x100 + PERF_SAMPLE_STREAM_ID = 0x200 + PERF_SAMPLE_RAW = 0x400 + PERF_SAMPLE_BRANCH_STACK = 0x800 + + PERF_SAMPLE_BRANCH_USER = 0x1 + PERF_SAMPLE_BRANCH_KERNEL = 0x2 + PERF_SAMPLE_BRANCH_HV = 0x4 + PERF_SAMPLE_BRANCH_ANY = 0x8 + PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 + PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 + PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + + PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 + PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 + PERF_FORMAT_ID = 0x4 + PERF_FORMAT_GROUP = 0x8 + + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + + PERF_CONTEXT_HV = -0x20 + PERF_CONTEXT_KERNEL = -0x80 + PERF_CONTEXT_USER = -0x200 + + PERF_CONTEXT_GUEST = -0x800 + PERF_CONTEXT_GUEST_KERNEL = -0x880 + PERF_CONTEXT_GUEST_USER = -0xa00 + + PERF_FLAG_FD_NO_GROUP = 0x1 + PERF_FLAG_FD_OUTPUT = 0x2 + PERF_FLAG_PID_CGROUP = 0x4 +) + +const ( + CBitFieldMaskBit0 = 0x1 + CBitFieldMaskBit1 = 0x2 + CBitFieldMaskBit2 = 0x4 + CBitFieldMaskBit3 = 0x8 + CBitFieldMaskBit4 = 0x10 + CBitFieldMaskBit5 = 0x20 + CBitFieldMaskBit6 = 0x40 + CBitFieldMaskBit7 = 0x80 + CBitFieldMaskBit8 = 0x100 + CBitFieldMaskBit9 = 0x200 + CBitFieldMaskBit10 = 0x400 + CBitFieldMaskBit11 = 0x800 + CBitFieldMaskBit12 = 0x1000 + CBitFieldMaskBit13 = 0x2000 + CBitFieldMaskBit14 = 0x4000 + CBitFieldMaskBit15 = 0x8000 + CBitFieldMaskBit16 = 0x10000 + CBitFieldMaskBit17 = 0x20000 + CBitFieldMaskBit18 = 0x40000 + CBitFieldMaskBit19 = 0x80000 + CBitFieldMaskBit20 = 0x100000 + CBitFieldMaskBit21 = 0x200000 + CBitFieldMaskBit22 = 0x400000 + CBitFieldMaskBit23 = 0x800000 + CBitFieldMaskBit24 = 0x1000000 + CBitFieldMaskBit25 = 0x2000000 + CBitFieldMaskBit26 = 0x4000000 + CBitFieldMaskBit27 = 0x8000000 + CBitFieldMaskBit28 = 0x10000000 + CBitFieldMaskBit29 = 0x20000000 + CBitFieldMaskBit30 = 0x40000000 + CBitFieldMaskBit31 = 0x80000000 + CBitFieldMaskBit32 = 0x100000000 + CBitFieldMaskBit33 = 0x200000000 + CBitFieldMaskBit34 = 0x400000000 + CBitFieldMaskBit35 = 0x800000000 + CBitFieldMaskBit36 = 0x1000000000 + CBitFieldMaskBit37 = 0x2000000000 + CBitFieldMaskBit38 = 0x4000000000 + CBitFieldMaskBit39 = 0x8000000000 + CBitFieldMaskBit40 = 0x10000000000 + CBitFieldMaskBit41 = 0x20000000000 + CBitFieldMaskBit42 = 0x40000000000 + CBitFieldMaskBit43 = 0x80000000000 + CBitFieldMaskBit44 = 0x100000000000 + CBitFieldMaskBit45 = 0x200000000000 + CBitFieldMaskBit46 = 0x400000000000 + CBitFieldMaskBit47 = 0x800000000000 + CBitFieldMaskBit48 = 0x1000000000000 + CBitFieldMaskBit49 = 0x2000000000000 + CBitFieldMaskBit50 = 0x4000000000000 + CBitFieldMaskBit51 = 0x8000000000000 + CBitFieldMaskBit52 = 0x10000000000000 + CBitFieldMaskBit53 = 0x20000000000000 + CBitFieldMaskBit54 = 0x40000000000000 + CBitFieldMaskBit55 = 0x80000000000000 + CBitFieldMaskBit56 = 0x100000000000000 + CBitFieldMaskBit57 = 0x200000000000000 + CBitFieldMaskBit58 = 0x400000000000000 + CBitFieldMaskBit59 = 0x800000000000000 + CBitFieldMaskBit60 = 0x1000000000000000 + CBitFieldMaskBit61 = 0x2000000000000000 + CBitFieldMaskBit62 = 0x4000000000000000 + CBitFieldMaskBit63 = 0x8000000000000000 +) + +type SockaddrStorage struct { + Family uint16 + _ [118]int8 + _ uint64 +} + +type TCPMD5Sig struct { + Addr SockaddrStorage + Flags uint8 + Prefixlen uint8 + Keylen uint16 + _ uint32 + Key [80]uint8 +} + +type HDDriveCmdHdr struct { + Command uint8 + Number uint8 + Feature uint8 + Count uint8 +} + +type HDGeometry struct { + Heads uint8 + Sectors uint8 + Cylinders uint16 + _ [4]byte + Start uint64 +} + +type HDDriveID struct { + Config uint16 + Cyls uint16 + Reserved2 uint16 + Heads uint16 + Track_bytes uint16 + Sector_bytes uint16 + Sectors uint16 + Vendor0 uint16 + Vendor1 uint16 + Vendor2 uint16 + Serial_no [20]uint8 + Buf_type uint16 + Buf_size uint16 + Ecc_bytes uint16 + Fw_rev [8]uint8 + Model [40]uint8 + Max_multsect uint8 + Vendor3 uint8 + Dword_io uint16 + Vendor4 uint8 + Capability uint8 + Reserved50 uint16 + Vendor5 uint8 + TPIO uint8 + Vendor6 uint8 + TDMA uint8 + Field_valid uint16 + Cur_cyls uint16 + Cur_heads uint16 + Cur_sectors uint16 + Cur_capacity0 uint16 + Cur_capacity1 uint16 + Multsect uint8 + Multsect_valid uint8 + Lba_capacity uint32 + Dma_1word uint16 + Dma_mword uint16 + Eide_pio_modes uint16 + Eide_dma_min uint16 + Eide_dma_time uint16 + Eide_pio uint16 + Eide_pio_iordy uint16 + Words69_70 [2]uint16 + Words71_74 [4]uint16 + Queue_depth uint16 + Words76_79 [4]uint16 + Major_rev_num uint16 + Minor_rev_num uint16 + Command_set_1 uint16 + Command_set_2 uint16 + Cfsse uint16 + Cfs_enable_1 uint16 + Cfs_enable_2 uint16 + Csf_default uint16 + Dma_ultra uint16 + Trseuc uint16 + TrsEuc uint16 + CurAPMvalues uint16 + Mprc uint16 + Hw_config uint16 + Acoustic uint16 + Msrqs uint16 + Sxfert uint16 + Sal uint16 + Spg uint32 + Lba_capacity_2 uint64 + Words104_125 [22]uint16 + Last_lun uint16 + Word127 uint16 + Dlf uint16 + Csfo uint16 + Words130_155 [26]uint16 + Word156 uint16 + Words157_159 [3]uint16 + Cfa_power uint16 + Words161_175 [15]uint16 + Words176_205 [30]uint16 + Words206_254 [49]uint16 + Integrity_word uint16 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 4a99648510..3682beb6ce 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -133,9 +133,9 @@ type Statfs_t struct { } type StatxTimestamp struct { - Sec int64 - Nsec uint32 - X__reserved int32 + Sec int64 + Nsec uint32 + _ int32 } type Statx_t struct { @@ -172,7 +172,7 @@ type Dirent struct { } type Fsid struct { - X__val [2]int32 + Val [2]int32 } type Flock_t struct { @@ -586,12 +586,12 @@ type RtAttr struct { } type IfInfomsg struct { - Family uint8 - X__ifi_pad uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 + Family uint8 + _ uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 } type IfAddrmsg struct { @@ -676,7 +676,7 @@ type Sysinfo_t struct { Totalhigh uint32 Freehigh uint32 Unit uint32 - X_f [8]int8 + _ [8]int8 } type Utsname struct { @@ -733,7 +733,7 @@ const ( ) type Sigset_t struct { - X__val [32]uint32 + Val [32]uint32 } const RNDGETENTCNT = 0x40045200 @@ -889,3 +889,352 @@ const ( BDADDR_LE_PUBLIC = 0x1 BDADDR_LE_RANDOM = 0x2 ) + +type PerfEventAttr struct { + Type uint32 + Size uint32 + Config uint64 + Sample uint64 + Sample_type uint64 + Read_format uint64 + Bits uint64 + Wakeup uint32 + Bp_type uint32 + Ext1 uint64 + Ext2 uint64 + Branch_sample_type uint64 + Sample_regs_user uint64 + Sample_stack_user uint32 + Clockid int32 + Sample_regs_intr uint64 + Aux_watermark uint32 + _ uint32 +} + +type PerfEventMmapPage struct { + Version uint32 + Compat_version uint32 + Lock uint32 + Index uint32 + Offset int64 + Time_enabled uint64 + Time_running uint64 + Capabilities uint64 + Pmc_width uint16 + Time_shift uint16 + Time_mult uint32 + Time_offset uint64 + Time_zero uint64 + Size uint32 + _ [948]uint8 + Data_head uint64 + Data_tail uint64 + Data_offset uint64 + Data_size uint64 + Aux_head uint64 + Aux_tail uint64 + Aux_offset uint64 + Aux_size uint64 +} + +const ( + PerfBitDisabled uint64 = CBitFieldMaskBit0 + PerfBitInherit = CBitFieldMaskBit1 + PerfBitPinned = CBitFieldMaskBit2 + PerfBitExclusive = CBitFieldMaskBit3 + PerfBitExcludeUser = CBitFieldMaskBit4 + PerfBitExcludeKernel = CBitFieldMaskBit5 + PerfBitExcludeHv = CBitFieldMaskBit6 + PerfBitExcludeIdle = CBitFieldMaskBit7 + PerfBitMmap = CBitFieldMaskBit8 + PerfBitComm = CBitFieldMaskBit9 + PerfBitFreq = CBitFieldMaskBit10 + PerfBitInheritStat = CBitFieldMaskBit11 + PerfBitEnableOnExec = CBitFieldMaskBit12 + PerfBitTask = CBitFieldMaskBit13 + PerfBitWatermark = CBitFieldMaskBit14 + PerfBitPreciseIPBit1 = CBitFieldMaskBit15 + PerfBitPreciseIPBit2 = CBitFieldMaskBit16 + PerfBitMmapData = CBitFieldMaskBit17 + PerfBitSampleIDAll = CBitFieldMaskBit18 + PerfBitExcludeHost = CBitFieldMaskBit19 + PerfBitExcludeGuest = CBitFieldMaskBit20 + PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 + PerfBitExcludeCallchainUser = CBitFieldMaskBit22 + PerfBitMmap2 = CBitFieldMaskBit23 + PerfBitCommExec = CBitFieldMaskBit24 + PerfBitUseClockID = CBitFieldMaskBit25 + PerfBitContextSwitch = CBitFieldMaskBit26 +) + +const ( + PERF_TYPE_HARDWARE = 0x0 + PERF_TYPE_SOFTWARE = 0x1 + PERF_TYPE_TRACEPOINT = 0x2 + PERF_TYPE_HW_CACHE = 0x3 + PERF_TYPE_RAW = 0x4 + PERF_TYPE_BREAKPOINT = 0x5 + + PERF_COUNT_HW_CPU_CYCLES = 0x0 + PERF_COUNT_HW_INSTRUCTIONS = 0x1 + PERF_COUNT_HW_CACHE_REFERENCES = 0x2 + PERF_COUNT_HW_CACHE_MISSES = 0x3 + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 + PERF_COUNT_HW_BRANCH_MISSES = 0x5 + PERF_COUNT_HW_BUS_CYCLES = 0x6 + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 + PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 + + PERF_COUNT_HW_CACHE_L1D = 0x0 + PERF_COUNT_HW_CACHE_L1I = 0x1 + PERF_COUNT_HW_CACHE_LL = 0x2 + PERF_COUNT_HW_CACHE_DTLB = 0x3 + PERF_COUNT_HW_CACHE_ITLB = 0x4 + PERF_COUNT_HW_CACHE_BPU = 0x5 + PERF_COUNT_HW_CACHE_NODE = 0x6 + + PERF_COUNT_HW_CACHE_OP_READ = 0x0 + PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 + PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 + + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 + PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 + + PERF_COUNT_SW_CPU_CLOCK = 0x0 + PERF_COUNT_SW_TASK_CLOCK = 0x1 + PERF_COUNT_SW_PAGE_FAULTS = 0x2 + PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 + PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 + PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 + PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 + PERF_COUNT_SW_EMULATION_FAULTS = 0x8 + PERF_COUNT_SW_DUMMY = 0x9 + + PERF_SAMPLE_IP = 0x1 + PERF_SAMPLE_TID = 0x2 + PERF_SAMPLE_TIME = 0x4 + PERF_SAMPLE_ADDR = 0x8 + PERF_SAMPLE_READ = 0x10 + PERF_SAMPLE_CALLCHAIN = 0x20 + PERF_SAMPLE_ID = 0x40 + PERF_SAMPLE_CPU = 0x80 + PERF_SAMPLE_PERIOD = 0x100 + PERF_SAMPLE_STREAM_ID = 0x200 + PERF_SAMPLE_RAW = 0x400 + PERF_SAMPLE_BRANCH_STACK = 0x800 + + PERF_SAMPLE_BRANCH_USER = 0x1 + PERF_SAMPLE_BRANCH_KERNEL = 0x2 + PERF_SAMPLE_BRANCH_HV = 0x4 + PERF_SAMPLE_BRANCH_ANY = 0x8 + PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 + PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 + PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + + PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 + PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 + PERF_FORMAT_ID = 0x4 + PERF_FORMAT_GROUP = 0x8 + + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + + PERF_CONTEXT_HV = -0x20 + PERF_CONTEXT_KERNEL = -0x80 + PERF_CONTEXT_USER = -0x200 + + PERF_CONTEXT_GUEST = -0x800 + PERF_CONTEXT_GUEST_KERNEL = -0x880 + PERF_CONTEXT_GUEST_USER = -0xa00 + + PERF_FLAG_FD_NO_GROUP = 0x1 + PERF_FLAG_FD_OUTPUT = 0x2 + PERF_FLAG_PID_CGROUP = 0x4 +) + +const ( + CBitFieldMaskBit0 = 0x1 + CBitFieldMaskBit1 = 0x2 + CBitFieldMaskBit2 = 0x4 + CBitFieldMaskBit3 = 0x8 + CBitFieldMaskBit4 = 0x10 + CBitFieldMaskBit5 = 0x20 + CBitFieldMaskBit6 = 0x40 + CBitFieldMaskBit7 = 0x80 + CBitFieldMaskBit8 = 0x100 + CBitFieldMaskBit9 = 0x200 + CBitFieldMaskBit10 = 0x400 + CBitFieldMaskBit11 = 0x800 + CBitFieldMaskBit12 = 0x1000 + CBitFieldMaskBit13 = 0x2000 + CBitFieldMaskBit14 = 0x4000 + CBitFieldMaskBit15 = 0x8000 + CBitFieldMaskBit16 = 0x10000 + CBitFieldMaskBit17 = 0x20000 + CBitFieldMaskBit18 = 0x40000 + CBitFieldMaskBit19 = 0x80000 + CBitFieldMaskBit20 = 0x100000 + CBitFieldMaskBit21 = 0x200000 + CBitFieldMaskBit22 = 0x400000 + CBitFieldMaskBit23 = 0x800000 + CBitFieldMaskBit24 = 0x1000000 + CBitFieldMaskBit25 = 0x2000000 + CBitFieldMaskBit26 = 0x4000000 + CBitFieldMaskBit27 = 0x8000000 + CBitFieldMaskBit28 = 0x10000000 + CBitFieldMaskBit29 = 0x20000000 + CBitFieldMaskBit30 = 0x40000000 + CBitFieldMaskBit31 = 0x80000000 + CBitFieldMaskBit32 = 0x100000000 + CBitFieldMaskBit33 = 0x200000000 + CBitFieldMaskBit34 = 0x400000000 + CBitFieldMaskBit35 = 0x800000000 + CBitFieldMaskBit36 = 0x1000000000 + CBitFieldMaskBit37 = 0x2000000000 + CBitFieldMaskBit38 = 0x4000000000 + CBitFieldMaskBit39 = 0x8000000000 + CBitFieldMaskBit40 = 0x10000000000 + CBitFieldMaskBit41 = 0x20000000000 + CBitFieldMaskBit42 = 0x40000000000 + CBitFieldMaskBit43 = 0x80000000000 + CBitFieldMaskBit44 = 0x100000000000 + CBitFieldMaskBit45 = 0x200000000000 + CBitFieldMaskBit46 = 0x400000000000 + CBitFieldMaskBit47 = 0x800000000000 + CBitFieldMaskBit48 = 0x1000000000000 + CBitFieldMaskBit49 = 0x2000000000000 + CBitFieldMaskBit50 = 0x4000000000000 + CBitFieldMaskBit51 = 0x8000000000000 + CBitFieldMaskBit52 = 0x10000000000000 + CBitFieldMaskBit53 = 0x20000000000000 + CBitFieldMaskBit54 = 0x40000000000000 + CBitFieldMaskBit55 = 0x80000000000000 + CBitFieldMaskBit56 = 0x100000000000000 + CBitFieldMaskBit57 = 0x200000000000000 + CBitFieldMaskBit58 = 0x400000000000000 + CBitFieldMaskBit59 = 0x800000000000000 + CBitFieldMaskBit60 = 0x1000000000000000 + CBitFieldMaskBit61 = 0x2000000000000000 + CBitFieldMaskBit62 = 0x4000000000000000 + CBitFieldMaskBit63 = 0x8000000000000000 +) + +type SockaddrStorage struct { + Family uint16 + _ [122]int8 + _ uint32 +} + +type TCPMD5Sig struct { + Addr SockaddrStorage + Flags uint8 + Prefixlen uint8 + Keylen uint16 + _ uint32 + Key [80]uint8 +} + +type HDDriveCmdHdr struct { + Command uint8 + Number uint8 + Feature uint8 + Count uint8 +} + +type HDGeometry struct { + Heads uint8 + Sectors uint8 + Cylinders uint16 + Start uint32 +} + +type HDDriveID struct { + Config uint16 + Cyls uint16 + Reserved2 uint16 + Heads uint16 + Track_bytes uint16 + Sector_bytes uint16 + Sectors uint16 + Vendor0 uint16 + Vendor1 uint16 + Vendor2 uint16 + Serial_no [20]uint8 + Buf_type uint16 + Buf_size uint16 + Ecc_bytes uint16 + Fw_rev [8]uint8 + Model [40]uint8 + Max_multsect uint8 + Vendor3 uint8 + Dword_io uint16 + Vendor4 uint8 + Capability uint8 + Reserved50 uint16 + Vendor5 uint8 + TPIO uint8 + Vendor6 uint8 + TDMA uint8 + Field_valid uint16 + Cur_cyls uint16 + Cur_heads uint16 + Cur_sectors uint16 + Cur_capacity0 uint16 + Cur_capacity1 uint16 + Multsect uint8 + Multsect_valid uint8 + Lba_capacity uint32 + Dma_1word uint16 + Dma_mword uint16 + Eide_pio_modes uint16 + Eide_dma_min uint16 + Eide_dma_time uint16 + Eide_pio uint16 + Eide_pio_iordy uint16 + Words69_70 [2]uint16 + Words71_74 [4]uint16 + Queue_depth uint16 + Words76_79 [4]uint16 + Major_rev_num uint16 + Minor_rev_num uint16 + Command_set_1 uint16 + Command_set_2 uint16 + Cfsse uint16 + Cfs_enable_1 uint16 + Cfs_enable_2 uint16 + Csf_default uint16 + Dma_ultra uint16 + Trseuc uint16 + TrsEuc uint16 + CurAPMvalues uint16 + Mprc uint16 + Hw_config uint16 + Acoustic uint16 + Msrqs uint16 + Sxfert uint16 + Sal uint16 + Spg uint32 + Lba_capacity_2 uint64 + Words104_125 [22]uint16 + Last_lun uint16 + Word127 uint16 + Dlf uint16 + Csfo uint16 + Words130_155 [26]uint16 + Word156 uint16 + Words157_159 [3]uint16 + Cfa_power uint16 + Words161_175 [15]uint16 + Words176_205 [30]uint16 + Words206_254 [49]uint16 + Integrity_word uint16 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 63a0dbe94e..bfa39ec9ce 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -105,7 +105,7 @@ type Stat_t struct { Mode uint32 Uid uint32 Gid uint32 - X__pad2 int32 + _ int32 Rdev uint64 Size int64 Blksize int64 @@ -134,9 +134,9 @@ type Statfs_t struct { } type StatxTimestamp struct { - Sec int64 - Nsec uint32 - X__reserved int32 + Sec int64 + Nsec uint32 + _ int32 } type Statx_t struct { @@ -173,7 +173,7 @@ type Dirent struct { } type Fsid struct { - X__val [2]int32 + Val [2]int32 } type Flock_t struct { @@ -589,12 +589,12 @@ type RtAttr struct { } type IfInfomsg struct { - Family uint8 - X__ifi_pad uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 + Family uint8 + _ uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 } type IfAddrmsg struct { @@ -686,7 +686,7 @@ type Sysinfo_t struct { Totalhigh uint64 Freehigh uint64 Unit uint32 - X_f [0]uint8 + _ [0]uint8 _ [4]byte } @@ -709,10 +709,10 @@ type Ustat_t struct { } type EpollEvent struct { - Events uint32 - X_padFd int32 - Fd int32 - Pad int32 + Events uint32 + _ int32 + Fd int32 + Pad int32 } const ( @@ -746,7 +746,7 @@ const ( ) type Sigset_t struct { - X__val [16]uint64 + Val [16]uint64 } const RNDGETENTCNT = 0x40045200 @@ -902,3 +902,353 @@ const ( BDADDR_LE_PUBLIC = 0x1 BDADDR_LE_RANDOM = 0x2 ) + +type PerfEventAttr struct { + Type uint32 + Size uint32 + Config uint64 + Sample uint64 + Sample_type uint64 + Read_format uint64 + Bits uint64 + Wakeup uint32 + Bp_type uint32 + Ext1 uint64 + Ext2 uint64 + Branch_sample_type uint64 + Sample_regs_user uint64 + Sample_stack_user uint32 + Clockid int32 + Sample_regs_intr uint64 + Aux_watermark uint32 + _ uint32 +} + +type PerfEventMmapPage struct { + Version uint32 + Compat_version uint32 + Lock uint32 + Index uint32 + Offset int64 + Time_enabled uint64 + Time_running uint64 + Capabilities uint64 + Pmc_width uint16 + Time_shift uint16 + Time_mult uint32 + Time_offset uint64 + Time_zero uint64 + Size uint32 + _ [948]uint8 + Data_head uint64 + Data_tail uint64 + Data_offset uint64 + Data_size uint64 + Aux_head uint64 + Aux_tail uint64 + Aux_offset uint64 + Aux_size uint64 +} + +const ( + PerfBitDisabled uint64 = CBitFieldMaskBit0 + PerfBitInherit = CBitFieldMaskBit1 + PerfBitPinned = CBitFieldMaskBit2 + PerfBitExclusive = CBitFieldMaskBit3 + PerfBitExcludeUser = CBitFieldMaskBit4 + PerfBitExcludeKernel = CBitFieldMaskBit5 + PerfBitExcludeHv = CBitFieldMaskBit6 + PerfBitExcludeIdle = CBitFieldMaskBit7 + PerfBitMmap = CBitFieldMaskBit8 + PerfBitComm = CBitFieldMaskBit9 + PerfBitFreq = CBitFieldMaskBit10 + PerfBitInheritStat = CBitFieldMaskBit11 + PerfBitEnableOnExec = CBitFieldMaskBit12 + PerfBitTask = CBitFieldMaskBit13 + PerfBitWatermark = CBitFieldMaskBit14 + PerfBitPreciseIPBit1 = CBitFieldMaskBit15 + PerfBitPreciseIPBit2 = CBitFieldMaskBit16 + PerfBitMmapData = CBitFieldMaskBit17 + PerfBitSampleIDAll = CBitFieldMaskBit18 + PerfBitExcludeHost = CBitFieldMaskBit19 + PerfBitExcludeGuest = CBitFieldMaskBit20 + PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 + PerfBitExcludeCallchainUser = CBitFieldMaskBit22 + PerfBitMmap2 = CBitFieldMaskBit23 + PerfBitCommExec = CBitFieldMaskBit24 + PerfBitUseClockID = CBitFieldMaskBit25 + PerfBitContextSwitch = CBitFieldMaskBit26 +) + +const ( + PERF_TYPE_HARDWARE = 0x0 + PERF_TYPE_SOFTWARE = 0x1 + PERF_TYPE_TRACEPOINT = 0x2 + PERF_TYPE_HW_CACHE = 0x3 + PERF_TYPE_RAW = 0x4 + PERF_TYPE_BREAKPOINT = 0x5 + + PERF_COUNT_HW_CPU_CYCLES = 0x0 + PERF_COUNT_HW_INSTRUCTIONS = 0x1 + PERF_COUNT_HW_CACHE_REFERENCES = 0x2 + PERF_COUNT_HW_CACHE_MISSES = 0x3 + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 + PERF_COUNT_HW_BRANCH_MISSES = 0x5 + PERF_COUNT_HW_BUS_CYCLES = 0x6 + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 + PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 + + PERF_COUNT_HW_CACHE_L1D = 0x0 + PERF_COUNT_HW_CACHE_L1I = 0x1 + PERF_COUNT_HW_CACHE_LL = 0x2 + PERF_COUNT_HW_CACHE_DTLB = 0x3 + PERF_COUNT_HW_CACHE_ITLB = 0x4 + PERF_COUNT_HW_CACHE_BPU = 0x5 + PERF_COUNT_HW_CACHE_NODE = 0x6 + + PERF_COUNT_HW_CACHE_OP_READ = 0x0 + PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 + PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 + + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 + PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 + + PERF_COUNT_SW_CPU_CLOCK = 0x0 + PERF_COUNT_SW_TASK_CLOCK = 0x1 + PERF_COUNT_SW_PAGE_FAULTS = 0x2 + PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 + PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 + PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 + PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 + PERF_COUNT_SW_EMULATION_FAULTS = 0x8 + PERF_COUNT_SW_DUMMY = 0x9 + + PERF_SAMPLE_IP = 0x1 + PERF_SAMPLE_TID = 0x2 + PERF_SAMPLE_TIME = 0x4 + PERF_SAMPLE_ADDR = 0x8 + PERF_SAMPLE_READ = 0x10 + PERF_SAMPLE_CALLCHAIN = 0x20 + PERF_SAMPLE_ID = 0x40 + PERF_SAMPLE_CPU = 0x80 + PERF_SAMPLE_PERIOD = 0x100 + PERF_SAMPLE_STREAM_ID = 0x200 + PERF_SAMPLE_RAW = 0x400 + PERF_SAMPLE_BRANCH_STACK = 0x800 + + PERF_SAMPLE_BRANCH_USER = 0x1 + PERF_SAMPLE_BRANCH_KERNEL = 0x2 + PERF_SAMPLE_BRANCH_HV = 0x4 + PERF_SAMPLE_BRANCH_ANY = 0x8 + PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 + PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 + PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + + PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 + PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 + PERF_FORMAT_ID = 0x4 + PERF_FORMAT_GROUP = 0x8 + + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + + PERF_CONTEXT_HV = -0x20 + PERF_CONTEXT_KERNEL = -0x80 + PERF_CONTEXT_USER = -0x200 + + PERF_CONTEXT_GUEST = -0x800 + PERF_CONTEXT_GUEST_KERNEL = -0x880 + PERF_CONTEXT_GUEST_USER = -0xa00 + + PERF_FLAG_FD_NO_GROUP = 0x1 + PERF_FLAG_FD_OUTPUT = 0x2 + PERF_FLAG_PID_CGROUP = 0x4 +) + +const ( + CBitFieldMaskBit0 = 0x8000000000000000 + CBitFieldMaskBit1 = 0x4000000000000000 + CBitFieldMaskBit2 = 0x2000000000000000 + CBitFieldMaskBit3 = 0x1000000000000000 + CBitFieldMaskBit4 = 0x800000000000000 + CBitFieldMaskBit5 = 0x400000000000000 + CBitFieldMaskBit6 = 0x200000000000000 + CBitFieldMaskBit7 = 0x100000000000000 + CBitFieldMaskBit8 = 0x80000000000000 + CBitFieldMaskBit9 = 0x40000000000000 + CBitFieldMaskBit10 = 0x20000000000000 + CBitFieldMaskBit11 = 0x10000000000000 + CBitFieldMaskBit12 = 0x8000000000000 + CBitFieldMaskBit13 = 0x4000000000000 + CBitFieldMaskBit14 = 0x2000000000000 + CBitFieldMaskBit15 = 0x1000000000000 + CBitFieldMaskBit16 = 0x800000000000 + CBitFieldMaskBit17 = 0x400000000000 + CBitFieldMaskBit18 = 0x200000000000 + CBitFieldMaskBit19 = 0x100000000000 + CBitFieldMaskBit20 = 0x80000000000 + CBitFieldMaskBit21 = 0x40000000000 + CBitFieldMaskBit22 = 0x20000000000 + CBitFieldMaskBit23 = 0x10000000000 + CBitFieldMaskBit24 = 0x8000000000 + CBitFieldMaskBit25 = 0x4000000000 + CBitFieldMaskBit26 = 0x2000000000 + CBitFieldMaskBit27 = 0x1000000000 + CBitFieldMaskBit28 = 0x800000000 + CBitFieldMaskBit29 = 0x400000000 + CBitFieldMaskBit30 = 0x200000000 + CBitFieldMaskBit31 = 0x100000000 + CBitFieldMaskBit32 = 0x80000000 + CBitFieldMaskBit33 = 0x40000000 + CBitFieldMaskBit34 = 0x20000000 + CBitFieldMaskBit35 = 0x10000000 + CBitFieldMaskBit36 = 0x8000000 + CBitFieldMaskBit37 = 0x4000000 + CBitFieldMaskBit38 = 0x2000000 + CBitFieldMaskBit39 = 0x1000000 + CBitFieldMaskBit40 = 0x800000 + CBitFieldMaskBit41 = 0x400000 + CBitFieldMaskBit42 = 0x200000 + CBitFieldMaskBit43 = 0x100000 + CBitFieldMaskBit44 = 0x80000 + CBitFieldMaskBit45 = 0x40000 + CBitFieldMaskBit46 = 0x20000 + CBitFieldMaskBit47 = 0x10000 + CBitFieldMaskBit48 = 0x8000 + CBitFieldMaskBit49 = 0x4000 + CBitFieldMaskBit50 = 0x2000 + CBitFieldMaskBit51 = 0x1000 + CBitFieldMaskBit52 = 0x800 + CBitFieldMaskBit53 = 0x400 + CBitFieldMaskBit54 = 0x200 + CBitFieldMaskBit55 = 0x100 + CBitFieldMaskBit56 = 0x80 + CBitFieldMaskBit57 = 0x40 + CBitFieldMaskBit58 = 0x20 + CBitFieldMaskBit59 = 0x10 + CBitFieldMaskBit60 = 0x8 + CBitFieldMaskBit61 = 0x4 + CBitFieldMaskBit62 = 0x2 + CBitFieldMaskBit63 = 0x1 +) + +type SockaddrStorage struct { + Family uint16 + _ [118]uint8 + _ uint64 +} + +type TCPMD5Sig struct { + Addr SockaddrStorage + Flags uint8 + Prefixlen uint8 + Keylen uint16 + _ uint32 + Key [80]uint8 +} + +type HDDriveCmdHdr struct { + Command uint8 + Number uint8 + Feature uint8 + Count uint8 +} + +type HDGeometry struct { + Heads uint8 + Sectors uint8 + Cylinders uint16 + _ [4]byte + Start uint64 +} + +type HDDriveID struct { + Config uint16 + Cyls uint16 + Reserved2 uint16 + Heads uint16 + Track_bytes uint16 + Sector_bytes uint16 + Sectors uint16 + Vendor0 uint16 + Vendor1 uint16 + Vendor2 uint16 + Serial_no [20]uint8 + Buf_type uint16 + Buf_size uint16 + Ecc_bytes uint16 + Fw_rev [8]uint8 + Model [40]uint8 + Max_multsect uint8 + Vendor3 uint8 + Dword_io uint16 + Vendor4 uint8 + Capability uint8 + Reserved50 uint16 + Vendor5 uint8 + TPIO uint8 + Vendor6 uint8 + TDMA uint8 + Field_valid uint16 + Cur_cyls uint16 + Cur_heads uint16 + Cur_sectors uint16 + Cur_capacity0 uint16 + Cur_capacity1 uint16 + Multsect uint8 + Multsect_valid uint8 + Lba_capacity uint32 + Dma_1word uint16 + Dma_mword uint16 + Eide_pio_modes uint16 + Eide_dma_min uint16 + Eide_dma_time uint16 + Eide_pio uint16 + Eide_pio_iordy uint16 + Words69_70 [2]uint16 + Words71_74 [4]uint16 + Queue_depth uint16 + Words76_79 [4]uint16 + Major_rev_num uint16 + Minor_rev_num uint16 + Command_set_1 uint16 + Command_set_2 uint16 + Cfsse uint16 + Cfs_enable_1 uint16 + Cfs_enable_2 uint16 + Csf_default uint16 + Dma_ultra uint16 + Trseuc uint16 + TrsEuc uint16 + CurAPMvalues uint16 + Mprc uint16 + Hw_config uint16 + Acoustic uint16 + Msrqs uint16 + Sxfert uint16 + Sal uint16 + Spg uint32 + Lba_capacity_2 uint64 + Words104_125 [22]uint16 + Last_lun uint16 + Word127 uint16 + Dlf uint16 + Csfo uint16 + Words130_155 [26]uint16 + Word156 uint16 + Words157_159 [3]uint16 + Cfa_power uint16 + Words161_175 [15]uint16 + Words176_205 [30]uint16 + Words206_254 [49]uint16 + Integrity_word uint16 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index f20a63591b..72eab3d832 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -105,7 +105,7 @@ type Stat_t struct { Mode uint32 Uid uint32 Gid uint32 - X__pad2 int32 + _ int32 Rdev uint64 Size int64 Blksize int64 @@ -134,9 +134,9 @@ type Statfs_t struct { } type StatxTimestamp struct { - Sec int64 - Nsec uint32 - X__reserved int32 + Sec int64 + Nsec uint32 + _ int32 } type Statx_t struct { @@ -173,7 +173,7 @@ type Dirent struct { } type Fsid struct { - X__val [2]int32 + Val [2]int32 } type Flock_t struct { @@ -589,12 +589,12 @@ type RtAttr struct { } type IfInfomsg struct { - Family uint8 - X__ifi_pad uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 + Family uint8 + _ uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 } type IfAddrmsg struct { @@ -686,7 +686,7 @@ type Sysinfo_t struct { Totalhigh uint64 Freehigh uint64 Unit uint32 - X_f [0]uint8 + _ [0]uint8 _ [4]byte } @@ -709,10 +709,10 @@ type Ustat_t struct { } type EpollEvent struct { - Events uint32 - X_padFd int32 - Fd int32 - Pad int32 + Events uint32 + _ int32 + Fd int32 + Pad int32 } const ( @@ -746,7 +746,7 @@ const ( ) type Sigset_t struct { - X__val [16]uint64 + Val [16]uint64 } const RNDGETENTCNT = 0x40045200 @@ -902,3 +902,353 @@ const ( BDADDR_LE_PUBLIC = 0x1 BDADDR_LE_RANDOM = 0x2 ) + +type PerfEventAttr struct { + Type uint32 + Size uint32 + Config uint64 + Sample uint64 + Sample_type uint64 + Read_format uint64 + Bits uint64 + Wakeup uint32 + Bp_type uint32 + Ext1 uint64 + Ext2 uint64 + Branch_sample_type uint64 + Sample_regs_user uint64 + Sample_stack_user uint32 + Clockid int32 + Sample_regs_intr uint64 + Aux_watermark uint32 + _ uint32 +} + +type PerfEventMmapPage struct { + Version uint32 + Compat_version uint32 + Lock uint32 + Index uint32 + Offset int64 + Time_enabled uint64 + Time_running uint64 + Capabilities uint64 + Pmc_width uint16 + Time_shift uint16 + Time_mult uint32 + Time_offset uint64 + Time_zero uint64 + Size uint32 + _ [948]uint8 + Data_head uint64 + Data_tail uint64 + Data_offset uint64 + Data_size uint64 + Aux_head uint64 + Aux_tail uint64 + Aux_offset uint64 + Aux_size uint64 +} + +const ( + PerfBitDisabled uint64 = CBitFieldMaskBit0 + PerfBitInherit = CBitFieldMaskBit1 + PerfBitPinned = CBitFieldMaskBit2 + PerfBitExclusive = CBitFieldMaskBit3 + PerfBitExcludeUser = CBitFieldMaskBit4 + PerfBitExcludeKernel = CBitFieldMaskBit5 + PerfBitExcludeHv = CBitFieldMaskBit6 + PerfBitExcludeIdle = CBitFieldMaskBit7 + PerfBitMmap = CBitFieldMaskBit8 + PerfBitComm = CBitFieldMaskBit9 + PerfBitFreq = CBitFieldMaskBit10 + PerfBitInheritStat = CBitFieldMaskBit11 + PerfBitEnableOnExec = CBitFieldMaskBit12 + PerfBitTask = CBitFieldMaskBit13 + PerfBitWatermark = CBitFieldMaskBit14 + PerfBitPreciseIPBit1 = CBitFieldMaskBit15 + PerfBitPreciseIPBit2 = CBitFieldMaskBit16 + PerfBitMmapData = CBitFieldMaskBit17 + PerfBitSampleIDAll = CBitFieldMaskBit18 + PerfBitExcludeHost = CBitFieldMaskBit19 + PerfBitExcludeGuest = CBitFieldMaskBit20 + PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 + PerfBitExcludeCallchainUser = CBitFieldMaskBit22 + PerfBitMmap2 = CBitFieldMaskBit23 + PerfBitCommExec = CBitFieldMaskBit24 + PerfBitUseClockID = CBitFieldMaskBit25 + PerfBitContextSwitch = CBitFieldMaskBit26 +) + +const ( + PERF_TYPE_HARDWARE = 0x0 + PERF_TYPE_SOFTWARE = 0x1 + PERF_TYPE_TRACEPOINT = 0x2 + PERF_TYPE_HW_CACHE = 0x3 + PERF_TYPE_RAW = 0x4 + PERF_TYPE_BREAKPOINT = 0x5 + + PERF_COUNT_HW_CPU_CYCLES = 0x0 + PERF_COUNT_HW_INSTRUCTIONS = 0x1 + PERF_COUNT_HW_CACHE_REFERENCES = 0x2 + PERF_COUNT_HW_CACHE_MISSES = 0x3 + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 + PERF_COUNT_HW_BRANCH_MISSES = 0x5 + PERF_COUNT_HW_BUS_CYCLES = 0x6 + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 + PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 + + PERF_COUNT_HW_CACHE_L1D = 0x0 + PERF_COUNT_HW_CACHE_L1I = 0x1 + PERF_COUNT_HW_CACHE_LL = 0x2 + PERF_COUNT_HW_CACHE_DTLB = 0x3 + PERF_COUNT_HW_CACHE_ITLB = 0x4 + PERF_COUNT_HW_CACHE_BPU = 0x5 + PERF_COUNT_HW_CACHE_NODE = 0x6 + + PERF_COUNT_HW_CACHE_OP_READ = 0x0 + PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 + PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 + + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 + PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 + + PERF_COUNT_SW_CPU_CLOCK = 0x0 + PERF_COUNT_SW_TASK_CLOCK = 0x1 + PERF_COUNT_SW_PAGE_FAULTS = 0x2 + PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 + PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 + PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 + PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 + PERF_COUNT_SW_EMULATION_FAULTS = 0x8 + PERF_COUNT_SW_DUMMY = 0x9 + + PERF_SAMPLE_IP = 0x1 + PERF_SAMPLE_TID = 0x2 + PERF_SAMPLE_TIME = 0x4 + PERF_SAMPLE_ADDR = 0x8 + PERF_SAMPLE_READ = 0x10 + PERF_SAMPLE_CALLCHAIN = 0x20 + PERF_SAMPLE_ID = 0x40 + PERF_SAMPLE_CPU = 0x80 + PERF_SAMPLE_PERIOD = 0x100 + PERF_SAMPLE_STREAM_ID = 0x200 + PERF_SAMPLE_RAW = 0x400 + PERF_SAMPLE_BRANCH_STACK = 0x800 + + PERF_SAMPLE_BRANCH_USER = 0x1 + PERF_SAMPLE_BRANCH_KERNEL = 0x2 + PERF_SAMPLE_BRANCH_HV = 0x4 + PERF_SAMPLE_BRANCH_ANY = 0x8 + PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 + PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 + PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + + PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 + PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 + PERF_FORMAT_ID = 0x4 + PERF_FORMAT_GROUP = 0x8 + + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + + PERF_CONTEXT_HV = -0x20 + PERF_CONTEXT_KERNEL = -0x80 + PERF_CONTEXT_USER = -0x200 + + PERF_CONTEXT_GUEST = -0x800 + PERF_CONTEXT_GUEST_KERNEL = -0x880 + PERF_CONTEXT_GUEST_USER = -0xa00 + + PERF_FLAG_FD_NO_GROUP = 0x1 + PERF_FLAG_FD_OUTPUT = 0x2 + PERF_FLAG_PID_CGROUP = 0x4 +) + +const ( + CBitFieldMaskBit0 = 0x1 + CBitFieldMaskBit1 = 0x2 + CBitFieldMaskBit2 = 0x4 + CBitFieldMaskBit3 = 0x8 + CBitFieldMaskBit4 = 0x10 + CBitFieldMaskBit5 = 0x20 + CBitFieldMaskBit6 = 0x40 + CBitFieldMaskBit7 = 0x80 + CBitFieldMaskBit8 = 0x100 + CBitFieldMaskBit9 = 0x200 + CBitFieldMaskBit10 = 0x400 + CBitFieldMaskBit11 = 0x800 + CBitFieldMaskBit12 = 0x1000 + CBitFieldMaskBit13 = 0x2000 + CBitFieldMaskBit14 = 0x4000 + CBitFieldMaskBit15 = 0x8000 + CBitFieldMaskBit16 = 0x10000 + CBitFieldMaskBit17 = 0x20000 + CBitFieldMaskBit18 = 0x40000 + CBitFieldMaskBit19 = 0x80000 + CBitFieldMaskBit20 = 0x100000 + CBitFieldMaskBit21 = 0x200000 + CBitFieldMaskBit22 = 0x400000 + CBitFieldMaskBit23 = 0x800000 + CBitFieldMaskBit24 = 0x1000000 + CBitFieldMaskBit25 = 0x2000000 + CBitFieldMaskBit26 = 0x4000000 + CBitFieldMaskBit27 = 0x8000000 + CBitFieldMaskBit28 = 0x10000000 + CBitFieldMaskBit29 = 0x20000000 + CBitFieldMaskBit30 = 0x40000000 + CBitFieldMaskBit31 = 0x80000000 + CBitFieldMaskBit32 = 0x100000000 + CBitFieldMaskBit33 = 0x200000000 + CBitFieldMaskBit34 = 0x400000000 + CBitFieldMaskBit35 = 0x800000000 + CBitFieldMaskBit36 = 0x1000000000 + CBitFieldMaskBit37 = 0x2000000000 + CBitFieldMaskBit38 = 0x4000000000 + CBitFieldMaskBit39 = 0x8000000000 + CBitFieldMaskBit40 = 0x10000000000 + CBitFieldMaskBit41 = 0x20000000000 + CBitFieldMaskBit42 = 0x40000000000 + CBitFieldMaskBit43 = 0x80000000000 + CBitFieldMaskBit44 = 0x100000000000 + CBitFieldMaskBit45 = 0x200000000000 + CBitFieldMaskBit46 = 0x400000000000 + CBitFieldMaskBit47 = 0x800000000000 + CBitFieldMaskBit48 = 0x1000000000000 + CBitFieldMaskBit49 = 0x2000000000000 + CBitFieldMaskBit50 = 0x4000000000000 + CBitFieldMaskBit51 = 0x8000000000000 + CBitFieldMaskBit52 = 0x10000000000000 + CBitFieldMaskBit53 = 0x20000000000000 + CBitFieldMaskBit54 = 0x40000000000000 + CBitFieldMaskBit55 = 0x80000000000000 + CBitFieldMaskBit56 = 0x100000000000000 + CBitFieldMaskBit57 = 0x200000000000000 + CBitFieldMaskBit58 = 0x400000000000000 + CBitFieldMaskBit59 = 0x800000000000000 + CBitFieldMaskBit60 = 0x1000000000000000 + CBitFieldMaskBit61 = 0x2000000000000000 + CBitFieldMaskBit62 = 0x4000000000000000 + CBitFieldMaskBit63 = 0x8000000000000000 +) + +type SockaddrStorage struct { + Family uint16 + _ [118]uint8 + _ uint64 +} + +type TCPMD5Sig struct { + Addr SockaddrStorage + Flags uint8 + Prefixlen uint8 + Keylen uint16 + _ uint32 + Key [80]uint8 +} + +type HDDriveCmdHdr struct { + Command uint8 + Number uint8 + Feature uint8 + Count uint8 +} + +type HDGeometry struct { + Heads uint8 + Sectors uint8 + Cylinders uint16 + _ [4]byte + Start uint64 +} + +type HDDriveID struct { + Config uint16 + Cyls uint16 + Reserved2 uint16 + Heads uint16 + Track_bytes uint16 + Sector_bytes uint16 + Sectors uint16 + Vendor0 uint16 + Vendor1 uint16 + Vendor2 uint16 + Serial_no [20]uint8 + Buf_type uint16 + Buf_size uint16 + Ecc_bytes uint16 + Fw_rev [8]uint8 + Model [40]uint8 + Max_multsect uint8 + Vendor3 uint8 + Dword_io uint16 + Vendor4 uint8 + Capability uint8 + Reserved50 uint16 + Vendor5 uint8 + TPIO uint8 + Vendor6 uint8 + TDMA uint8 + Field_valid uint16 + Cur_cyls uint16 + Cur_heads uint16 + Cur_sectors uint16 + Cur_capacity0 uint16 + Cur_capacity1 uint16 + Multsect uint8 + Multsect_valid uint8 + Lba_capacity uint32 + Dma_1word uint16 + Dma_mword uint16 + Eide_pio_modes uint16 + Eide_dma_min uint16 + Eide_dma_time uint16 + Eide_pio uint16 + Eide_pio_iordy uint16 + Words69_70 [2]uint16 + Words71_74 [4]uint16 + Queue_depth uint16 + Words76_79 [4]uint16 + Major_rev_num uint16 + Minor_rev_num uint16 + Command_set_1 uint16 + Command_set_2 uint16 + Cfsse uint16 + Cfs_enable_1 uint16 + Cfs_enable_2 uint16 + Csf_default uint16 + Dma_ultra uint16 + Trseuc uint16 + TrsEuc uint16 + CurAPMvalues uint16 + Mprc uint16 + Hw_config uint16 + Acoustic uint16 + Msrqs uint16 + Sxfert uint16 + Sal uint16 + Spg uint32 + Lba_capacity_2 uint64 + Words104_125 [22]uint16 + Last_lun uint16 + Word127 uint16 + Dlf uint16 + Csfo uint16 + Words130_155 [26]uint16 + Word156 uint16 + Words157_159 [3]uint16 + Cfa_power uint16 + Words161_175 [15]uint16 + Words176_205 [30]uint16 + Words206_254 [49]uint16 + Integrity_word uint16 +} diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index 30dc10582f..c7935b760b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -172,7 +172,7 @@ type Dirent struct { } type Fsid struct { - _ [2]int32 + Val [2]int32 } type Flock_t struct { @@ -763,7 +763,7 @@ const ( ) type Sigset_t struct { - _ [16]uint64 + Val [16]uint64 } const RNDGETENTCNT = 0x80045200 @@ -919,3 +919,353 @@ const ( BDADDR_LE_PUBLIC = 0x1 BDADDR_LE_RANDOM = 0x2 ) + +type PerfEventAttr struct { + Type uint32 + Size uint32 + Config uint64 + Sample uint64 + Sample_type uint64 + Read_format uint64 + Bits uint64 + Wakeup uint32 + Bp_type uint32 + Ext1 uint64 + Ext2 uint64 + Branch_sample_type uint64 + Sample_regs_user uint64 + Sample_stack_user uint32 + Clockid int32 + Sample_regs_intr uint64 + Aux_watermark uint32 + _ uint32 +} + +type PerfEventMmapPage struct { + Version uint32 + Compat_version uint32 + Lock uint32 + Index uint32 + Offset int64 + Time_enabled uint64 + Time_running uint64 + Capabilities uint64 + Pmc_width uint16 + Time_shift uint16 + Time_mult uint32 + Time_offset uint64 + Time_zero uint64 + Size uint32 + _ [948]uint8 + Data_head uint64 + Data_tail uint64 + Data_offset uint64 + Data_size uint64 + Aux_head uint64 + Aux_tail uint64 + Aux_offset uint64 + Aux_size uint64 +} + +const ( + PerfBitDisabled uint64 = CBitFieldMaskBit0 + PerfBitInherit = CBitFieldMaskBit1 + PerfBitPinned = CBitFieldMaskBit2 + PerfBitExclusive = CBitFieldMaskBit3 + PerfBitExcludeUser = CBitFieldMaskBit4 + PerfBitExcludeKernel = CBitFieldMaskBit5 + PerfBitExcludeHv = CBitFieldMaskBit6 + PerfBitExcludeIdle = CBitFieldMaskBit7 + PerfBitMmap = CBitFieldMaskBit8 + PerfBitComm = CBitFieldMaskBit9 + PerfBitFreq = CBitFieldMaskBit10 + PerfBitInheritStat = CBitFieldMaskBit11 + PerfBitEnableOnExec = CBitFieldMaskBit12 + PerfBitTask = CBitFieldMaskBit13 + PerfBitWatermark = CBitFieldMaskBit14 + PerfBitPreciseIPBit1 = CBitFieldMaskBit15 + PerfBitPreciseIPBit2 = CBitFieldMaskBit16 + PerfBitMmapData = CBitFieldMaskBit17 + PerfBitSampleIDAll = CBitFieldMaskBit18 + PerfBitExcludeHost = CBitFieldMaskBit19 + PerfBitExcludeGuest = CBitFieldMaskBit20 + PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 + PerfBitExcludeCallchainUser = CBitFieldMaskBit22 + PerfBitMmap2 = CBitFieldMaskBit23 + PerfBitCommExec = CBitFieldMaskBit24 + PerfBitUseClockID = CBitFieldMaskBit25 + PerfBitContextSwitch = CBitFieldMaskBit26 +) + +const ( + PERF_TYPE_HARDWARE = 0x0 + PERF_TYPE_SOFTWARE = 0x1 + PERF_TYPE_TRACEPOINT = 0x2 + PERF_TYPE_HW_CACHE = 0x3 + PERF_TYPE_RAW = 0x4 + PERF_TYPE_BREAKPOINT = 0x5 + + PERF_COUNT_HW_CPU_CYCLES = 0x0 + PERF_COUNT_HW_INSTRUCTIONS = 0x1 + PERF_COUNT_HW_CACHE_REFERENCES = 0x2 + PERF_COUNT_HW_CACHE_MISSES = 0x3 + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 + PERF_COUNT_HW_BRANCH_MISSES = 0x5 + PERF_COUNT_HW_BUS_CYCLES = 0x6 + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 + PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 + + PERF_COUNT_HW_CACHE_L1D = 0x0 + PERF_COUNT_HW_CACHE_L1I = 0x1 + PERF_COUNT_HW_CACHE_LL = 0x2 + PERF_COUNT_HW_CACHE_DTLB = 0x3 + PERF_COUNT_HW_CACHE_ITLB = 0x4 + PERF_COUNT_HW_CACHE_BPU = 0x5 + PERF_COUNT_HW_CACHE_NODE = 0x6 + + PERF_COUNT_HW_CACHE_OP_READ = 0x0 + PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 + PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 + + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 + PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 + + PERF_COUNT_SW_CPU_CLOCK = 0x0 + PERF_COUNT_SW_TASK_CLOCK = 0x1 + PERF_COUNT_SW_PAGE_FAULTS = 0x2 + PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 + PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 + PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 + PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 + PERF_COUNT_SW_EMULATION_FAULTS = 0x8 + PERF_COUNT_SW_DUMMY = 0x9 + + PERF_SAMPLE_IP = 0x1 + PERF_SAMPLE_TID = 0x2 + PERF_SAMPLE_TIME = 0x4 + PERF_SAMPLE_ADDR = 0x8 + PERF_SAMPLE_READ = 0x10 + PERF_SAMPLE_CALLCHAIN = 0x20 + PERF_SAMPLE_ID = 0x40 + PERF_SAMPLE_CPU = 0x80 + PERF_SAMPLE_PERIOD = 0x100 + PERF_SAMPLE_STREAM_ID = 0x200 + PERF_SAMPLE_RAW = 0x400 + PERF_SAMPLE_BRANCH_STACK = 0x800 + + PERF_SAMPLE_BRANCH_USER = 0x1 + PERF_SAMPLE_BRANCH_KERNEL = 0x2 + PERF_SAMPLE_BRANCH_HV = 0x4 + PERF_SAMPLE_BRANCH_ANY = 0x8 + PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 + PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 + PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + + PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 + PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 + PERF_FORMAT_ID = 0x4 + PERF_FORMAT_GROUP = 0x8 + + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + + PERF_CONTEXT_HV = -0x20 + PERF_CONTEXT_KERNEL = -0x80 + PERF_CONTEXT_USER = -0x200 + + PERF_CONTEXT_GUEST = -0x800 + PERF_CONTEXT_GUEST_KERNEL = -0x880 + PERF_CONTEXT_GUEST_USER = -0xa00 + + PERF_FLAG_FD_NO_GROUP = 0x1 + PERF_FLAG_FD_OUTPUT = 0x2 + PERF_FLAG_PID_CGROUP = 0x4 +) + +const ( + CBitFieldMaskBit0 = 0x8000000000000000 + CBitFieldMaskBit1 = 0x4000000000000000 + CBitFieldMaskBit2 = 0x2000000000000000 + CBitFieldMaskBit3 = 0x1000000000000000 + CBitFieldMaskBit4 = 0x800000000000000 + CBitFieldMaskBit5 = 0x400000000000000 + CBitFieldMaskBit6 = 0x200000000000000 + CBitFieldMaskBit7 = 0x100000000000000 + CBitFieldMaskBit8 = 0x80000000000000 + CBitFieldMaskBit9 = 0x40000000000000 + CBitFieldMaskBit10 = 0x20000000000000 + CBitFieldMaskBit11 = 0x10000000000000 + CBitFieldMaskBit12 = 0x8000000000000 + CBitFieldMaskBit13 = 0x4000000000000 + CBitFieldMaskBit14 = 0x2000000000000 + CBitFieldMaskBit15 = 0x1000000000000 + CBitFieldMaskBit16 = 0x800000000000 + CBitFieldMaskBit17 = 0x400000000000 + CBitFieldMaskBit18 = 0x200000000000 + CBitFieldMaskBit19 = 0x100000000000 + CBitFieldMaskBit20 = 0x80000000000 + CBitFieldMaskBit21 = 0x40000000000 + CBitFieldMaskBit22 = 0x20000000000 + CBitFieldMaskBit23 = 0x10000000000 + CBitFieldMaskBit24 = 0x8000000000 + CBitFieldMaskBit25 = 0x4000000000 + CBitFieldMaskBit26 = 0x2000000000 + CBitFieldMaskBit27 = 0x1000000000 + CBitFieldMaskBit28 = 0x800000000 + CBitFieldMaskBit29 = 0x400000000 + CBitFieldMaskBit30 = 0x200000000 + CBitFieldMaskBit31 = 0x100000000 + CBitFieldMaskBit32 = 0x80000000 + CBitFieldMaskBit33 = 0x40000000 + CBitFieldMaskBit34 = 0x20000000 + CBitFieldMaskBit35 = 0x10000000 + CBitFieldMaskBit36 = 0x8000000 + CBitFieldMaskBit37 = 0x4000000 + CBitFieldMaskBit38 = 0x2000000 + CBitFieldMaskBit39 = 0x1000000 + CBitFieldMaskBit40 = 0x800000 + CBitFieldMaskBit41 = 0x400000 + CBitFieldMaskBit42 = 0x200000 + CBitFieldMaskBit43 = 0x100000 + CBitFieldMaskBit44 = 0x80000 + CBitFieldMaskBit45 = 0x40000 + CBitFieldMaskBit46 = 0x20000 + CBitFieldMaskBit47 = 0x10000 + CBitFieldMaskBit48 = 0x8000 + CBitFieldMaskBit49 = 0x4000 + CBitFieldMaskBit50 = 0x2000 + CBitFieldMaskBit51 = 0x1000 + CBitFieldMaskBit52 = 0x800 + CBitFieldMaskBit53 = 0x400 + CBitFieldMaskBit54 = 0x200 + CBitFieldMaskBit55 = 0x100 + CBitFieldMaskBit56 = 0x80 + CBitFieldMaskBit57 = 0x40 + CBitFieldMaskBit58 = 0x20 + CBitFieldMaskBit59 = 0x10 + CBitFieldMaskBit60 = 0x8 + CBitFieldMaskBit61 = 0x4 + CBitFieldMaskBit62 = 0x2 + CBitFieldMaskBit63 = 0x1 +) + +type SockaddrStorage struct { + Family uint16 + _ [118]int8 + _ uint64 +} + +type TCPMD5Sig struct { + Addr SockaddrStorage + Flags uint8 + Prefixlen uint8 + Keylen uint16 + _ uint32 + Key [80]uint8 +} + +type HDDriveCmdHdr struct { + Command uint8 + Number uint8 + Feature uint8 + Count uint8 +} + +type HDGeometry struct { + Heads uint8 + Sectors uint8 + Cylinders uint16 + _ [4]byte + Start uint64 +} + +type HDDriveID struct { + Config uint16 + Cyls uint16 + Reserved2 uint16 + Heads uint16 + Track_bytes uint16 + Sector_bytes uint16 + Sectors uint16 + Vendor0 uint16 + Vendor1 uint16 + Vendor2 uint16 + Serial_no [20]uint8 + Buf_type uint16 + Buf_size uint16 + Ecc_bytes uint16 + Fw_rev [8]uint8 + Model [40]uint8 + Max_multsect uint8 + Vendor3 uint8 + Dword_io uint16 + Vendor4 uint8 + Capability uint8 + Reserved50 uint16 + Vendor5 uint8 + TPIO uint8 + Vendor6 uint8 + TDMA uint8 + Field_valid uint16 + Cur_cyls uint16 + Cur_heads uint16 + Cur_sectors uint16 + Cur_capacity0 uint16 + Cur_capacity1 uint16 + Multsect uint8 + Multsect_valid uint8 + Lba_capacity uint32 + Dma_1word uint16 + Dma_mword uint16 + Eide_pio_modes uint16 + Eide_dma_min uint16 + Eide_dma_time uint16 + Eide_pio uint16 + Eide_pio_iordy uint16 + Words69_70 [2]uint16 + Words71_74 [4]uint16 + Queue_depth uint16 + Words76_79 [4]uint16 + Major_rev_num uint16 + Minor_rev_num uint16 + Command_set_1 uint16 + Command_set_2 uint16 + Cfsse uint16 + Cfs_enable_1 uint16 + Cfs_enable_2 uint16 + Csf_default uint16 + Dma_ultra uint16 + Trseuc uint16 + TrsEuc uint16 + CurAPMvalues uint16 + Mprc uint16 + Hw_config uint16 + Acoustic uint16 + Msrqs uint16 + Sxfert uint16 + Sal uint16 + Spg uint32 + Lba_capacity_2 uint64 + Words104_125 [22]uint16 + Last_lun uint16 + Word127 uint16 + Dlf uint16 + Csfo uint16 + Words130_155 [26]uint16 + Word156 uint16 + Words157_159 [3]uint16 + Cfa_power uint16 + Words161_175 [15]uint16 + Words176_205 [30]uint16 + Words206_254 [49]uint16 + Integrity_word uint16 +} diff --git a/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/golang.org/x/sys/windows/registry/key.go new file mode 100644 index 0000000000..c256483434 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/key.go @@ -0,0 +1,198 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +// Package registry provides access to the Windows registry. +// +// Here is a simple example, opening a registry key and reading a string value from it. +// +// k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) +// if err != nil { +// log.Fatal(err) +// } +// defer k.Close() +// +// s, _, err := k.GetStringValue("SystemRoot") +// if err != nil { +// log.Fatal(err) +// } +// fmt.Printf("Windows system root is %q\n", s) +// +package registry + +import ( + "io" + "syscall" + "time" +) + +const ( + // Registry key security and access rights. + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms724878.aspx + // for details. + ALL_ACCESS = 0xf003f + CREATE_LINK = 0x00020 + CREATE_SUB_KEY = 0x00004 + ENUMERATE_SUB_KEYS = 0x00008 + EXECUTE = 0x20019 + NOTIFY = 0x00010 + QUERY_VALUE = 0x00001 + READ = 0x20019 + SET_VALUE = 0x00002 + WOW64_32KEY = 0x00200 + WOW64_64KEY = 0x00100 + WRITE = 0x20006 +) + +// Key is a handle to an open Windows registry key. +// Keys can be obtained by calling OpenKey; there are +// also some predefined root keys such as CURRENT_USER. +// Keys can be used directly in the Windows API. +type Key syscall.Handle + +const ( + // Windows defines some predefined root keys that are always open. + // An application can use these keys as entry points to the registry. + // Normally these keys are used in OpenKey to open new keys, + // but they can also be used anywhere a Key is required. + CLASSES_ROOT = Key(syscall.HKEY_CLASSES_ROOT) + CURRENT_USER = Key(syscall.HKEY_CURRENT_USER) + LOCAL_MACHINE = Key(syscall.HKEY_LOCAL_MACHINE) + USERS = Key(syscall.HKEY_USERS) + CURRENT_CONFIG = Key(syscall.HKEY_CURRENT_CONFIG) + PERFORMANCE_DATA = Key(syscall.HKEY_PERFORMANCE_DATA) +) + +// Close closes open key k. +func (k Key) Close() error { + return syscall.RegCloseKey(syscall.Handle(k)) +} + +// OpenKey opens a new key with path name relative to key k. +// It accepts any open key, including CURRENT_USER and others, +// and returns the new key and an error. +// The access parameter specifies desired access rights to the +// key to be opened. +func OpenKey(k Key, path string, access uint32) (Key, error) { + p, err := syscall.UTF16PtrFromString(path) + if err != nil { + return 0, err + } + var subkey syscall.Handle + err = syscall.RegOpenKeyEx(syscall.Handle(k), p, 0, access, &subkey) + if err != nil { + return 0, err + } + return Key(subkey), nil +} + +// OpenRemoteKey opens a predefined registry key on another +// computer pcname. The key to be opened is specified by k, but +// can only be one of LOCAL_MACHINE, PERFORMANCE_DATA or USERS. +// If pcname is "", OpenRemoteKey returns local computer key. +func OpenRemoteKey(pcname string, k Key) (Key, error) { + var err error + var p *uint16 + if pcname != "" { + p, err = syscall.UTF16PtrFromString(`\\` + pcname) + if err != nil { + return 0, err + } + } + var remoteKey syscall.Handle + err = regConnectRegistry(p, syscall.Handle(k), &remoteKey) + if err != nil { + return 0, err + } + return Key(remoteKey), nil +} + +// ReadSubKeyNames returns the names of subkeys of key k. +// The parameter n controls the number of returned names, +// analogous to the way os.File.Readdirnames works. +func (k Key) ReadSubKeyNames(n int) ([]string, error) { + names := make([]string, 0) + // Registry key size limit is 255 bytes and described there: + // https://msdn.microsoft.com/library/windows/desktop/ms724872.aspx + buf := make([]uint16, 256) //plus extra room for terminating zero byte +loopItems: + for i := uint32(0); ; i++ { + if n > 0 { + if len(names) == n { + return names, nil + } + } + l := uint32(len(buf)) + for { + err := syscall.RegEnumKeyEx(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) + if err == nil { + break + } + if err == syscall.ERROR_MORE_DATA { + // Double buffer size and try again. + l = uint32(2 * len(buf)) + buf = make([]uint16, l) + continue + } + if err == _ERROR_NO_MORE_ITEMS { + break loopItems + } + return names, err + } + names = append(names, syscall.UTF16ToString(buf[:l])) + } + if n > len(names) { + return names, io.EOF + } + return names, nil +} + +// CreateKey creates a key named path under open key k. +// CreateKey returns the new key and a boolean flag that reports +// whether the key already existed. +// The access parameter specifies the access rights for the key +// to be created. +func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool, err error) { + var h syscall.Handle + var d uint32 + err = regCreateKeyEx(syscall.Handle(k), syscall.StringToUTF16Ptr(path), + 0, nil, _REG_OPTION_NON_VOLATILE, access, nil, &h, &d) + if err != nil { + return 0, false, err + } + return Key(h), d == _REG_OPENED_EXISTING_KEY, nil +} + +// DeleteKey deletes the subkey path of key k and its values. +func DeleteKey(k Key, path string) error { + return regDeleteKey(syscall.Handle(k), syscall.StringToUTF16Ptr(path)) +} + +// A KeyInfo describes the statistics of a key. It is returned by Stat. +type KeyInfo struct { + SubKeyCount uint32 + MaxSubKeyLen uint32 // size of the key's subkey with the longest name, in Unicode characters, not including the terminating zero byte + ValueCount uint32 + MaxValueNameLen uint32 // size of the key's longest value name, in Unicode characters, not including the terminating zero byte + MaxValueLen uint32 // longest data component among the key's values, in bytes + lastWriteTime syscall.Filetime +} + +// ModTime returns the key's last write time. +func (ki *KeyInfo) ModTime() time.Time { + return time.Unix(0, ki.lastWriteTime.Nanoseconds()) +} + +// Stat retrieves information about the open key k. +func (k Key) Stat() (*KeyInfo, error) { + var ki KeyInfo + err := syscall.RegQueryInfoKey(syscall.Handle(k), nil, nil, nil, + &ki.SubKeyCount, &ki.MaxSubKeyLen, nil, &ki.ValueCount, + &ki.MaxValueNameLen, &ki.MaxValueLen, nil, &ki.lastWriteTime) + if err != nil { + return nil, err + } + return &ki, nil +} diff --git a/vendor/golang.org/x/sys/windows/registry/mksyscall.go b/vendor/golang.org/x/sys/windows/registry/mksyscall.go new file mode 100644 index 0000000000..0ac95ffe73 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/mksyscall.go @@ -0,0 +1,7 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package registry + +//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go syscall.go diff --git a/vendor/golang.org/x/sys/windows/registry/syscall.go b/vendor/golang.org/x/sys/windows/registry/syscall.go new file mode 100644 index 0000000000..e66643cbaa --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/syscall.go @@ -0,0 +1,32 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package registry + +import "syscall" + +const ( + _REG_OPTION_NON_VOLATILE = 0 + + _REG_CREATED_NEW_KEY = 1 + _REG_OPENED_EXISTING_KEY = 2 + + _ERROR_NO_MORE_ITEMS syscall.Errno = 259 +) + +func LoadRegLoadMUIString() error { + return procRegLoadMUIStringW.Find() +} + +//sys regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW +//sys regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) = advapi32.RegDeleteKeyW +//sys regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) = advapi32.RegSetValueExW +//sys regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegEnumValueW +//sys regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) = advapi32.RegDeleteValueW +//sys regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) = advapi32.RegLoadMUIStringW +//sys regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) = advapi32.RegConnectRegistryW + +//sys expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) = kernel32.ExpandEnvironmentStringsW diff --git a/vendor/golang.org/x/sys/windows/registry/value.go b/vendor/golang.org/x/sys/windows/registry/value.go new file mode 100644 index 0000000000..71d4e15bab --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/value.go @@ -0,0 +1,384 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package registry + +import ( + "errors" + "io" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + // Registry value types. + NONE = 0 + SZ = 1 + EXPAND_SZ = 2 + BINARY = 3 + DWORD = 4 + DWORD_BIG_ENDIAN = 5 + LINK = 6 + MULTI_SZ = 7 + RESOURCE_LIST = 8 + FULL_RESOURCE_DESCRIPTOR = 9 + RESOURCE_REQUIREMENTS_LIST = 10 + QWORD = 11 +) + +var ( + // ErrShortBuffer is returned when the buffer was too short for the operation. + ErrShortBuffer = syscall.ERROR_MORE_DATA + + // ErrNotExist is returned when a registry key or value does not exist. + ErrNotExist = syscall.ERROR_FILE_NOT_FOUND + + // ErrUnexpectedType is returned by Get*Value when the value's type was unexpected. + ErrUnexpectedType = errors.New("unexpected key value type") +) + +// GetValue retrieves the type and data for the specified value associated +// with an open key k. It fills up buffer buf and returns the retrieved +// byte count n. If buf is too small to fit the stored value it returns +// ErrShortBuffer error along with the required buffer size n. +// If no buffer is provided, it returns true and actual buffer size n. +// If no buffer is provided, GetValue returns the value's type only. +// If the value does not exist, the error returned is ErrNotExist. +// +// GetValue is a low level function. If value's type is known, use the appropriate +// Get*Value function instead. +func (k Key) GetValue(name string, buf []byte) (n int, valtype uint32, err error) { + pname, err := syscall.UTF16PtrFromString(name) + if err != nil { + return 0, 0, err + } + var pbuf *byte + if len(buf) > 0 { + pbuf = (*byte)(unsafe.Pointer(&buf[0])) + } + l := uint32(len(buf)) + err = syscall.RegQueryValueEx(syscall.Handle(k), pname, nil, &valtype, pbuf, &l) + if err != nil { + return int(l), valtype, err + } + return int(l), valtype, nil +} + +func (k Key) getValue(name string, buf []byte) (date []byte, valtype uint32, err error) { + p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return nil, 0, err + } + var t uint32 + n := uint32(len(buf)) + for { + err = syscall.RegQueryValueEx(syscall.Handle(k), p, nil, &t, (*byte)(unsafe.Pointer(&buf[0])), &n) + if err == nil { + return buf[:n], t, nil + } + if err != syscall.ERROR_MORE_DATA { + return nil, 0, err + } + if n <= uint32(len(buf)) { + return nil, 0, err + } + buf = make([]byte, n) + } +} + +// GetStringValue retrieves the string value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetStringValue returns ErrNotExist. +// If value is not SZ or EXPAND_SZ, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetStringValue(name string) (val string, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return "", typ, err2 + } + switch typ { + case SZ, EXPAND_SZ: + default: + return "", typ, ErrUnexpectedType + } + if len(data) == 0 { + return "", typ, nil + } + u := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[:] + return syscall.UTF16ToString(u), typ, nil +} + +// GetMUIStringValue retrieves the localized string value for +// the specified value name associated with an open key k. +// If the value name doesn't exist or the localized string value +// can't be resolved, GetMUIStringValue returns ErrNotExist. +// GetMUIStringValue panics if the system doesn't support +// regLoadMUIString; use LoadRegLoadMUIString to check if +// regLoadMUIString is supported before calling this function. +func (k Key) GetMUIStringValue(name string) (string, error) { + pname, err := syscall.UTF16PtrFromString(name) + if err != nil { + return "", err + } + + buf := make([]uint16, 1024) + var buflen uint32 + var pdir *uint16 + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + if err == syscall.ERROR_FILE_NOT_FOUND { // Try fallback path + + // Try to resolve the string value using the system directory as + // a DLL search path; this assumes the string value is of the form + // @[path]\dllname,-strID but with no path given, e.g. @tzres.dll,-320. + + // This approach works with tzres.dll but may have to be revised + // in the future to allow callers to provide custom search paths. + + var s string + s, err = ExpandString("%SystemRoot%\\system32\\") + if err != nil { + return "", err + } + pdir, err = syscall.UTF16PtrFromString(s) + if err != nil { + return "", err + } + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + for err == syscall.ERROR_MORE_DATA { // Grow buffer if needed + if buflen <= uint32(len(buf)) { + break // Buffer not growing, assume race; break + } + buf = make([]uint16, buflen) + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + if err != nil { + return "", err + } + + return syscall.UTF16ToString(buf), nil +} + +// ExpandString expands environment-variable strings and replaces +// them with the values defined for the current user. +// Use ExpandString to expand EXPAND_SZ strings. +func ExpandString(value string) (string, error) { + if value == "" { + return "", nil + } + p, err := syscall.UTF16PtrFromString(value) + if err != nil { + return "", err + } + r := make([]uint16, 100) + for { + n, err := expandEnvironmentStrings(p, &r[0], uint32(len(r))) + if err != nil { + return "", err + } + if n <= uint32(len(r)) { + u := (*[1 << 29]uint16)(unsafe.Pointer(&r[0]))[:] + return syscall.UTF16ToString(u), nil + } + r = make([]uint16, n) + } +} + +// GetStringsValue retrieves the []string value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetStringsValue returns ErrNotExist. +// If value is not MULTI_SZ, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetStringsValue(name string) (val []string, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return nil, typ, err2 + } + if typ != MULTI_SZ { + return nil, typ, ErrUnexpectedType + } + if len(data) == 0 { + return nil, typ, nil + } + p := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[:len(data)/2] + if len(p) == 0 { + return nil, typ, nil + } + if p[len(p)-1] == 0 { + p = p[:len(p)-1] // remove terminating null + } + val = make([]string, 0, 5) + from := 0 + for i, c := range p { + if c == 0 { + val = append(val, string(utf16.Decode(p[from:i]))) + from = i + 1 + } + } + return val, typ, nil +} + +// GetIntegerValue retrieves the integer value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetIntegerValue returns ErrNotExist. +// If value is not DWORD or QWORD, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetIntegerValue(name string) (val uint64, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 8)) + if err2 != nil { + return 0, typ, err2 + } + switch typ { + case DWORD: + if len(data) != 4 { + return 0, typ, errors.New("DWORD value is not 4 bytes long") + } + return uint64(*(*uint32)(unsafe.Pointer(&data[0]))), DWORD, nil + case QWORD: + if len(data) != 8 { + return 0, typ, errors.New("QWORD value is not 8 bytes long") + } + return uint64(*(*uint64)(unsafe.Pointer(&data[0]))), QWORD, nil + default: + return 0, typ, ErrUnexpectedType + } +} + +// GetBinaryValue retrieves the binary value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetBinaryValue returns ErrNotExist. +// If value is not BINARY, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetBinaryValue(name string) (val []byte, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return nil, typ, err2 + } + if typ != BINARY { + return nil, typ, ErrUnexpectedType + } + return data, typ, nil +} + +func (k Key) setValue(name string, valtype uint32, data []byte) error { + p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return err + } + if len(data) == 0 { + return regSetValueEx(syscall.Handle(k), p, 0, valtype, nil, 0) + } + return regSetValueEx(syscall.Handle(k), p, 0, valtype, &data[0], uint32(len(data))) +} + +// SetDWordValue sets the data and type of a name value +// under key k to value and DWORD. +func (k Key) SetDWordValue(name string, value uint32) error { + return k.setValue(name, DWORD, (*[4]byte)(unsafe.Pointer(&value))[:]) +} + +// SetQWordValue sets the data and type of a name value +// under key k to value and QWORD. +func (k Key) SetQWordValue(name string, value uint64) error { + return k.setValue(name, QWORD, (*[8]byte)(unsafe.Pointer(&value))[:]) +} + +func (k Key) setStringValue(name string, valtype uint32, value string) error { + v, err := syscall.UTF16FromString(value) + if err != nil { + return err + } + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[:len(v)*2] + return k.setValue(name, valtype, buf) +} + +// SetStringValue sets the data and type of a name value +// under key k to value and SZ. The value must not contain a zero byte. +func (k Key) SetStringValue(name, value string) error { + return k.setStringValue(name, SZ, value) +} + +// SetExpandStringValue sets the data and type of a name value +// under key k to value and EXPAND_SZ. The value must not contain a zero byte. +func (k Key) SetExpandStringValue(name, value string) error { + return k.setStringValue(name, EXPAND_SZ, value) +} + +// SetStringsValue sets the data and type of a name value +// under key k to value and MULTI_SZ. The value strings +// must not contain a zero byte. +func (k Key) SetStringsValue(name string, value []string) error { + ss := "" + for _, s := range value { + for i := 0; i < len(s); i++ { + if s[i] == 0 { + return errors.New("string cannot have 0 inside") + } + } + ss += s + "\x00" + } + v := utf16.Encode([]rune(ss + "\x00")) + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[:len(v)*2] + return k.setValue(name, MULTI_SZ, buf) +} + +// SetBinaryValue sets the data and type of a name value +// under key k to value and BINARY. +func (k Key) SetBinaryValue(name string, value []byte) error { + return k.setValue(name, BINARY, value) +} + +// DeleteValue removes a named value from the key k. +func (k Key) DeleteValue(name string) error { + return regDeleteValue(syscall.Handle(k), syscall.StringToUTF16Ptr(name)) +} + +// ReadValueNames returns the value names of key k. +// The parameter n controls the number of returned names, +// analogous to the way os.File.Readdirnames works. +func (k Key) ReadValueNames(n int) ([]string, error) { + ki, err := k.Stat() + if err != nil { + return nil, err + } + names := make([]string, 0, ki.ValueCount) + buf := make([]uint16, ki.MaxValueNameLen+1) // extra room for terminating null character +loopItems: + for i := uint32(0); ; i++ { + if n > 0 { + if len(names) == n { + return names, nil + } + } + l := uint32(len(buf)) + for { + err := regEnumValue(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) + if err == nil { + break + } + if err == syscall.ERROR_MORE_DATA { + // Double buffer size and try again. + l = uint32(2 * len(buf)) + buf = make([]uint16, l) + continue + } + if err == _ERROR_NO_MORE_ITEMS { + break loopItems + } + return names, err + } + names = append(names, syscall.UTF16ToString(buf[:l])) + } + if n > len(names) { + return names, io.EOF + } + return names, nil +} diff --git a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go new file mode 100644 index 0000000000..ceebdd7726 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go @@ -0,0 +1,120 @@ +// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT + +package registry + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procRegCreateKeyExW = modadvapi32.NewProc("RegCreateKeyExW") + procRegDeleteKeyW = modadvapi32.NewProc("RegDeleteKeyW") + procRegSetValueExW = modadvapi32.NewProc("RegSetValueExW") + procRegEnumValueW = modadvapi32.NewProc("RegEnumValueW") + procRegDeleteValueW = modadvapi32.NewProc("RegDeleteValueW") + procRegLoadMUIStringW = modadvapi32.NewProc("RegLoadMUIStringW") + procRegConnectRegistryW = modadvapi32.NewProc("RegConnectRegistryW") + procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") +) + +func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteKeyW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(subkey)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) { + r0, _, _ := syscall.Syscall6(procRegSetValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize)) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteValueW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(name)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegLoadMUIStringW.Addr(), 7, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)), 0, 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegConnectRegistryW.Addr(), 3, uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/vendor/golang.org/x/time/AUTHORS b/vendor/golang.org/x/time/AUTHORS new file mode 100644 index 0000000000..15167cd746 --- /dev/null +++ b/vendor/golang.org/x/time/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/time/CONTRIBUTORS b/vendor/golang.org/x/time/CONTRIBUTORS new file mode 100644 index 0000000000..1c4577e968 --- /dev/null +++ b/vendor/golang.org/x/time/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/time/LICENSE b/vendor/golang.org/x/time/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/vendor/golang.org/x/time/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/time/PATENTS b/vendor/golang.org/x/time/PATENTS new file mode 100644 index 0000000000..733099041f --- /dev/null +++ b/vendor/golang.org/x/time/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go new file mode 100644 index 0000000000..7228d97e96 --- /dev/null +++ b/vendor/golang.org/x/time/rate/rate.go @@ -0,0 +1,384 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package rate provides a rate limiter. +package rate + +import ( + "fmt" + "math" + "sync" + "time" +) + +// Limit defines the maximum frequency of some events. +// Limit is represented as number of events per second. +// A zero Limit allows no events. +type Limit float64 + +// Inf is the infinite rate limit; it allows all events (even if burst is zero). +const Inf = Limit(math.MaxFloat64) + +// Every converts a minimum time interval between events to a Limit. +func Every(interval time.Duration) Limit { + if interval <= 0 { + return Inf + } + return 1 / Limit(interval.Seconds()) +} + +// A Limiter controls how frequently events are allowed to happen. +// It implements a "token bucket" of size b, initially full and refilled +// at rate r tokens per second. +// Informally, in any large enough time interval, the Limiter limits the +// rate to r tokens per second, with a maximum burst size of b events. +// As a special case, if r == Inf (the infinite rate), b is ignored. +// See https://en.wikipedia.org/wiki/Token_bucket for more about token buckets. +// +// The zero value is a valid Limiter, but it will reject all events. +// Use NewLimiter to create non-zero Limiters. +// +// Limiter has three main methods, Allow, Reserve, and Wait. +// Most callers should use Wait. +// +// Each of the three methods consumes a single token. +// They differ in their behavior when no token is available. +// If no token is available, Allow returns false. +// If no token is available, Reserve returns a reservation for a future token +// and the amount of time the caller must wait before using it. +// If no token is available, Wait blocks until one can be obtained +// or its associated context.Context is canceled. +// +// The methods AllowN, ReserveN, and WaitN consume n tokens. +type Limiter struct { + limit Limit + burst int + + mu sync.Mutex + tokens float64 + // last is the last time the limiter's tokens field was updated + last time.Time + // lastEvent is the latest time of a rate-limited event (past or future) + lastEvent time.Time +} + +// Limit returns the maximum overall event rate. +func (lim *Limiter) Limit() Limit { + lim.mu.Lock() + defer lim.mu.Unlock() + return lim.limit +} + +// Burst returns the maximum burst size. Burst is the maximum number of tokens +// that can be consumed in a single call to Allow, Reserve, or Wait, so higher +// Burst values allow more events to happen at once. +// A zero Burst allows no events, unless limit == Inf. +func (lim *Limiter) Burst() int { + return lim.burst +} + +// NewLimiter returns a new Limiter that allows events up to rate r and permits +// bursts of at most b tokens. +func NewLimiter(r Limit, b int) *Limiter { + return &Limiter{ + limit: r, + burst: b, + } +} + +// Allow is shorthand for AllowN(time.Now(), 1). +func (lim *Limiter) Allow() bool { + return lim.AllowN(time.Now(), 1) +} + +// AllowN reports whether n events may happen at time now. +// Use this method if you intend to drop / skip events that exceed the rate limit. +// Otherwise use Reserve or Wait. +func (lim *Limiter) AllowN(now time.Time, n int) bool { + return lim.reserveN(now, n, 0).ok +} + +// A Reservation holds information about events that are permitted by a Limiter to happen after a delay. +// A Reservation may be canceled, which may enable the Limiter to permit additional events. +type Reservation struct { + ok bool + lim *Limiter + tokens int + timeToAct time.Time + // This is the Limit at reservation time, it can change later. + limit Limit +} + +// OK returns whether the limiter can provide the requested number of tokens +// within the maximum wait time. If OK is false, Delay returns InfDuration, and +// Cancel does nothing. +func (r *Reservation) OK() bool { + return r.ok +} + +// Delay is shorthand for DelayFrom(time.Now()). +func (r *Reservation) Delay() time.Duration { + return r.DelayFrom(time.Now()) +} + +// InfDuration is the duration returned by Delay when a Reservation is not OK. +const InfDuration = time.Duration(1<<63 - 1) + +// DelayFrom returns the duration for which the reservation holder must wait +// before taking the reserved action. Zero duration means act immediately. +// InfDuration means the limiter cannot grant the tokens requested in this +// Reservation within the maximum wait time. +func (r *Reservation) DelayFrom(now time.Time) time.Duration { + if !r.ok { + return InfDuration + } + delay := r.timeToAct.Sub(now) + if delay < 0 { + return 0 + } + return delay +} + +// Cancel is shorthand for CancelAt(time.Now()). +func (r *Reservation) Cancel() { + r.CancelAt(time.Now()) + return +} + +// CancelAt indicates that the reservation holder will not perform the reserved action +// and reverses the effects of this Reservation on the rate limit as much as possible, +// considering that other reservations may have already been made. +func (r *Reservation) CancelAt(now time.Time) { + if !r.ok { + return + } + + r.lim.mu.Lock() + defer r.lim.mu.Unlock() + + if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(now) { + return + } + + // calculate tokens to restore + // The duration between lim.lastEvent and r.timeToAct tells us how many tokens were reserved + // after r was obtained. These tokens should not be restored. + restoreTokens := float64(r.tokens) - r.limit.tokensFromDuration(r.lim.lastEvent.Sub(r.timeToAct)) + if restoreTokens <= 0 { + return + } + // advance time to now + now, _, tokens := r.lim.advance(now) + // calculate new number of tokens + tokens += restoreTokens + if burst := float64(r.lim.burst); tokens > burst { + tokens = burst + } + // update state + r.lim.last = now + r.lim.tokens = tokens + if r.timeToAct == r.lim.lastEvent { + prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens))) + if !prevEvent.Before(now) { + r.lim.lastEvent = prevEvent + } + } + + return +} + +// Reserve is shorthand for ReserveN(time.Now(), 1). +func (lim *Limiter) Reserve() *Reservation { + return lim.ReserveN(time.Now(), 1) +} + +// ReserveN returns a Reservation that indicates how long the caller must wait before n events happen. +// The Limiter takes this Reservation into account when allowing future events. +// ReserveN returns false if n exceeds the Limiter's burst size. +// Usage example: +// r := lim.ReserveN(time.Now(), 1) +// if !r.OK() { +// // Not allowed to act! Did you remember to set lim.burst to be > 0 ? +// return +// } +// time.Sleep(r.Delay()) +// Act() +// Use this method if you wish to wait and slow down in accordance with the rate limit without dropping events. +// If you need to respect a deadline or cancel the delay, use Wait instead. +// To drop or skip events exceeding rate limit, use Allow instead. +func (lim *Limiter) ReserveN(now time.Time, n int) *Reservation { + r := lim.reserveN(now, n, InfDuration) + return &r +} + +// contextContext is a temporary(?) copy of the context.Context type +// to support both Go 1.6 using golang.org/x/net/context and Go 1.7+ +// with the built-in context package. If people ever stop using Go 1.6 +// we can remove this. +type contextContext interface { + Deadline() (deadline time.Time, ok bool) + Done() <-chan struct{} + Err() error + Value(key interface{}) interface{} +} + +// Wait is shorthand for WaitN(ctx, 1). +func (lim *Limiter) wait(ctx contextContext) (err error) { + return lim.WaitN(ctx, 1) +} + +// WaitN blocks until lim permits n events to happen. +// It returns an error if n exceeds the Limiter's burst size, the Context is +// canceled, or the expected wait time exceeds the Context's Deadline. +// The burst limit is ignored if the rate limit is Inf. +func (lim *Limiter) waitN(ctx contextContext, n int) (err error) { + if n > lim.burst && lim.limit != Inf { + return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, lim.burst) + } + // Check if ctx is already cancelled + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + // Determine wait limit + now := time.Now() + waitLimit := InfDuration + if deadline, ok := ctx.Deadline(); ok { + waitLimit = deadline.Sub(now) + } + // Reserve + r := lim.reserveN(now, n, waitLimit) + if !r.ok { + return fmt.Errorf("rate: Wait(n=%d) would exceed context deadline", n) + } + // Wait if necessary + delay := r.DelayFrom(now) + if delay == 0 { + return nil + } + t := time.NewTimer(delay) + defer t.Stop() + select { + case <-t.C: + // We can proceed. + return nil + case <-ctx.Done(): + // Context was canceled before we could proceed. Cancel the + // reservation, which may permit other events to proceed sooner. + r.Cancel() + return ctx.Err() + } +} + +// SetLimit is shorthand for SetLimitAt(time.Now(), newLimit). +func (lim *Limiter) SetLimit(newLimit Limit) { + lim.SetLimitAt(time.Now(), newLimit) +} + +// SetLimitAt sets a new Limit for the limiter. The new Limit, and Burst, may be violated +// or underutilized by those which reserved (using Reserve or Wait) but did not yet act +// before SetLimitAt was called. +func (lim *Limiter) SetLimitAt(now time.Time, newLimit Limit) { + lim.mu.Lock() + defer lim.mu.Unlock() + + now, _, tokens := lim.advance(now) + + lim.last = now + lim.tokens = tokens + lim.limit = newLimit +} + +// reserveN is a helper method for AllowN, ReserveN, and WaitN. +// maxFutureReserve specifies the maximum reservation wait duration allowed. +// reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN. +func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duration) Reservation { + lim.mu.Lock() + + if lim.limit == Inf { + lim.mu.Unlock() + return Reservation{ + ok: true, + lim: lim, + tokens: n, + timeToAct: now, + } + } + + now, last, tokens := lim.advance(now) + + // Calculate the remaining number of tokens resulting from the request. + tokens -= float64(n) + + // Calculate the wait duration + var waitDuration time.Duration + if tokens < 0 { + waitDuration = lim.limit.durationFromTokens(-tokens) + } + + // Decide result + ok := n <= lim.burst && waitDuration <= maxFutureReserve + + // Prepare reservation + r := Reservation{ + ok: ok, + lim: lim, + limit: lim.limit, + } + if ok { + r.tokens = n + r.timeToAct = now.Add(waitDuration) + } + + // Update state + if ok { + lim.last = now + lim.tokens = tokens + lim.lastEvent = r.timeToAct + } else { + lim.last = last + } + + lim.mu.Unlock() + return r +} + +// advance calculates and returns an updated state for lim resulting from the passage of time. +// lim is not changed. +func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, newTokens float64) { + last := lim.last + if now.Before(last) { + last = now + } + + // Avoid making delta overflow below when last is very old. + maxElapsed := lim.limit.durationFromTokens(float64(lim.burst) - lim.tokens) + elapsed := now.Sub(last) + if elapsed > maxElapsed { + elapsed = maxElapsed + } + + // Calculate the new number of tokens, due to time that passed. + delta := lim.limit.tokensFromDuration(elapsed) + tokens := lim.tokens + delta + if burst := float64(lim.burst); tokens > burst { + tokens = burst + } + + return now, last, tokens +} + +// durationFromTokens is a unit conversion function from the number of tokens to the duration +// of time it takes to accumulate them at a rate of limit tokens per second. +func (limit Limit) durationFromTokens(tokens float64) time.Duration { + seconds := tokens / float64(limit) + return time.Nanosecond * time.Duration(1e9*seconds) +} + +// tokensFromDuration is a unit conversion function from a time duration to the number of tokens +// which could be accumulated during that duration at a rate of limit tokens per second. +func (limit Limit) tokensFromDuration(d time.Duration) float64 { + return d.Seconds() * float64(limit) +} diff --git a/vendor/golang.org/x/time/rate/rate_go16.go b/vendor/golang.org/x/time/rate/rate_go16.go new file mode 100644 index 0000000000..6bab1850f8 --- /dev/null +++ b/vendor/golang.org/x/time/rate/rate_go16.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package rate + +import "golang.org/x/net/context" + +// Wait is shorthand for WaitN(ctx, 1). +func (lim *Limiter) Wait(ctx context.Context) (err error) { + return lim.waitN(ctx, 1) +} + +// WaitN blocks until lim permits n events to happen. +// It returns an error if n exceeds the Limiter's burst size, the Context is +// canceled, or the expected wait time exceeds the Context's Deadline. +func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { + return lim.waitN(ctx, n) +} diff --git a/vendor/golang.org/x/time/rate/rate_go17.go b/vendor/golang.org/x/time/rate/rate_go17.go new file mode 100644 index 0000000000..f90d85f51e --- /dev/null +++ b/vendor/golang.org/x/time/rate/rate_go17.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package rate + +import "context" + +// Wait is shorthand for WaitN(ctx, 1). +func (lim *Limiter) Wait(ctx context.Context) (err error) { + return lim.waitN(ctx, 1) +} + +// WaitN blocks until lim permits n events to happen. +// It returns an error if n exceeds the Limiter's burst size, the Context is +// canceled, or the expected wait time exceeds the Context's Deadline. +func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { + return lim.waitN(ctx, n) +} diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index 36846eb54e..d9374be0d6 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -222,8 +222,7 @@ type Bucket struct { // when no ACL is provided. DefaultObjectAcl []*ObjectAccessControl `json:"defaultObjectAcl,omitempty"` - // Encryption: Encryption configuration used by default for newly - // inserted objects, when no encryption config is specified. + // Encryption: Encryption configuration for a bucket. Encryption *BucketEncryption `json:"encryption,omitempty"` // Etag: HTTP 1.1 Entity tag for the bucket. @@ -405,12 +404,11 @@ func (s *BucketCors) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } -// BucketEncryption: Encryption configuration used by default for newly -// inserted objects, when no encryption config is specified. +// BucketEncryption: Encryption configuration for a bucket. type BucketEncryption struct { // DefaultKmsKeyName: A Cloud KMS key that will be used to encrypt // objects inserted into this bucket, if no encryption method is - // specified. Limited availability; usable only by enabled projects. + // specified. DefaultKmsKeyName string `json:"defaultKmsKeyName,omitempty"` // ForceSendFields is a list of field names (e.g. "DefaultKmsKeyName") diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go index f73b7d5528..b2590e97b0 100644 --- a/vendor/google.golang.org/grpc/call.go +++ b/vendor/google.golang.org/grpc/call.go @@ -29,7 +29,7 @@ import ( func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options - opts = combine(cc.dopts.callOptions, opts) + opts = append(cc.dopts.callOptions, opts...) if cc.dopts.unaryInt != nil { return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...) @@ -37,21 +37,6 @@ func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply int return invoke(ctx, method, args, reply, cc, opts...) } -func combine(o1 []CallOption, o2 []CallOption) []CallOption { - // we don't use append because o1 could have extra capacity whose - // elements would be overwritten, which could cause inadvertent - // sharing (and race connditions) between concurrent calls - if len(o1) == 0 { - return o2 - } else if len(o2) == 0 { - return o1 - } - ret := make([]CallOption, len(o1)+len(o2)) - copy(ret, o1) - copy(ret[len(o1):], o2) - return ret -} - // Invoke sends the RPC request on the wire and returns after response is // received. This is typically called by generated code. // diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 6385407292..096d9434c2 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -443,8 +443,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * if cc.dopts.copts.Dialer == nil { cc.dopts.copts.Dialer = newProxyDialer( func(ctx context.Context, addr string) (net.Conn, error) { - network, addr := parseDialTarget(addr) - return dialContext(ctx, network, addr) + return dialContext(ctx, "tcp", addr) }, ) } @@ -488,25 +487,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * if cc.dopts.bs == nil { cc.dopts.bs = DefaultBackoffConfig } - if cc.dopts.resolverBuilder == nil { - // Only try to parse target when resolver builder is not already set. - cc.parsedTarget = parseTarget(cc.target) - grpclog.Infof("parsed scheme: %q", cc.parsedTarget.Scheme) - cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme) - if cc.dopts.resolverBuilder == nil { - // If resolver builder is still nil, the parse target's scheme is - // not registered. Fallback to default resolver and set Endpoint to - // the original unparsed target. - grpclog.Infof("scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) - cc.parsedTarget = resolver.Target{ - Scheme: resolver.GetDefaultScheme(), - Endpoint: target, - } - cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme) - } - } else { - cc.parsedTarget = resolver.Target{Endpoint: target} - } + cc.parsedTarget = parseTarget(cc.target) creds := cc.dopts.copts.TransportCredentials if creds != nil && creds.Info().ServerName != "" { cc.authority = creds.Info().ServerName diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go new file mode 100644 index 0000000000..fdcbb9e0b7 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -0,0 +1,190 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_health_v1/health.proto + +/* +Package grpc_health_v1 is a generated protocol buffer package. + +It is generated from these files: + grpc_health_v1/health.proto + +It has these top-level messages: + HealthCheckRequest + HealthCheckResponse +*/ +package grpc_health_v1 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type HealthCheckResponse_ServingStatus int32 + +const ( + HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0 + HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1 + HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2 +) + +var HealthCheckResponse_ServingStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SERVING", + 2: "NOT_SERVING", +} +var HealthCheckResponse_ServingStatus_value = map[string]int32{ + "UNKNOWN": 0, + "SERVING": 1, + "NOT_SERVING": 2, +} + +func (x HealthCheckResponse_ServingStatus) String() string { + return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x)) +} +func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{1, 0} +} + +type HealthCheckRequest struct { + Service string `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"` +} + +func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} } +func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) } +func (*HealthCheckRequest) ProtoMessage() {} +func (*HealthCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *HealthCheckRequest) GetService() string { + if m != nil { + return m.Service + } + return "" +} + +type HealthCheckResponse struct { + Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"` +} + +func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} } +func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) } +func (*HealthCheckResponse) ProtoMessage() {} +func (*HealthCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus { + if m != nil { + return m.Status + } + return HealthCheckResponse_UNKNOWN +} + +func init() { + proto.RegisterType((*HealthCheckRequest)(nil), "grpc.health.v1.HealthCheckRequest") + proto.RegisterType((*HealthCheckResponse)(nil), "grpc.health.v1.HealthCheckResponse") + proto.RegisterEnum("grpc.health.v1.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Health service + +type HealthClient interface { + Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) +} + +type healthClient struct { + cc *grpc.ClientConn +} + +func NewHealthClient(cc *grpc.ClientConn) HealthClient { + return &healthClient{cc} +} + +func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { + out := new(HealthCheckResponse) + err := grpc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Health service + +type HealthServer interface { + Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) +} + +func RegisterHealthServer(s *grpc.Server, srv HealthServer) { + s.RegisterService(&_Health_serviceDesc, srv) +} + +func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HealthCheckRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HealthServer).Check(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.health.v1.Health/Check", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Health_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.health.v1.Health", + HandlerType: (*HealthServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Check", + Handler: _Health_Check_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "grpc_health_v1/health.proto", +} + +func init() { proto.RegisterFile("grpc_health_v1/health.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 213 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48, + 0x8e, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0x88, 0x2f, 0x33, 0xd4, 0x87, 0xb0, 0xf4, 0x0a, 0x8a, + 0xf2, 0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21, + 0x0f, 0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48, + 0x82, 0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, + 0x08, 0xc6, 0x55, 0x9a, 0xc3, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, + 0xc8, 0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f, + 0xd5, 0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41, + 0x0d, 0x50, 0xb2, 0xe2, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3, + 0x0f, 0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85, + 0xf8, 0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x46, 0x51, 0x5c, 0x6c, 0x10, 0x8b, + 0x84, 0x02, 0xb8, 0x58, 0xc1, 0x96, 0x09, 0x29, 0xe1, 0x75, 0x09, 0xd8, 0xbf, 0x52, 0xca, 0x44, + 0xb8, 0x36, 0x89, 0x0d, 0x1c, 0x82, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x53, 0x2b, 0x65, + 0x20, 0x60, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/health/health.go b/vendor/google.golang.org/grpc/health/health.go new file mode 100644 index 0000000000..30a78667e6 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/health.go @@ -0,0 +1,72 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate protoc --go_out=plugins=grpc:. grpc_health_v1/health.proto + +// Package health provides some utility functions to health-check a server. The implementation +// is based on protobuf. Users need to write their own implementations if other IDLs are used. +package health + +import ( + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + healthpb "google.golang.org/grpc/health/grpc_health_v1" + "google.golang.org/grpc/status" +) + +// Server implements `service Health`. +type Server struct { + mu sync.Mutex + // statusMap stores the serving status of the services this Server monitors. + statusMap map[string]healthpb.HealthCheckResponse_ServingStatus +} + +// NewServer returns a new Server. +func NewServer() *Server { + return &Server{ + statusMap: make(map[string]healthpb.HealthCheckResponse_ServingStatus), + } +} + +// Check implements `service Health`. +func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + if in.Service == "" { + // check the server overall health status. + return &healthpb.HealthCheckResponse{ + Status: healthpb.HealthCheckResponse_SERVING, + }, nil + } + if status, ok := s.statusMap[in.Service]; ok { + return &healthpb.HealthCheckResponse{ + Status: status, + }, nil + } + return nil, status.Error(codes.NotFound, "unknown service") +} + +// SetServingStatus is called when need to reset the serving status of a service +// or insert a new service entry into the statusMap. +func (s *Server) SetServingStatus(service string, status healthpb.HealthCheckResponse_ServingStatus) { + s.mu.Lock() + s.statusMap[service] = status + s.mu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 775ee4d0d2..9efcffb3aa 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -36,12 +36,21 @@ func Register(b Builder) { } // Get returns the resolver builder registered with the given scheme. -// -// If no builder is register with the scheme, nil will be returned. +// If no builder is register with the scheme, the default scheme will +// be used. +// If the default scheme is not modified, "passthrough" will be the default +// scheme, and the preinstalled dns resolver will be used. +// If the default scheme is modified, and a resolver is registered with +// the scheme, that resolver will be returned. +// If the default scheme is modified, and no resolver is registered with +// the scheme, nil will be returned. func Get(scheme string) Builder { if b, ok := m[scheme]; ok { return b } + if b, ok := m[defaultScheme]; ok { + return b + } return nil } @@ -51,11 +60,6 @@ func SetDefaultScheme(scheme string) { defaultScheme = scheme } -// GetDefaultScheme gets the default scheme that will be used. -func GetDefaultScheme() string { - return defaultScheme -} - // AddressType indicates the address type returned by name resolution. type AddressType uint8 diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index 75b8ce1eb6..3e9c7594cc 100644 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -48,9 +48,6 @@ func split2(s, sep string) (string, string, bool) { // parseTarget splits target into a struct containing scheme, authority and // endpoint. -// -// If target is not a valid scheme://authority/endpoint, it returns {Endpoint: -// target}. func parseTarget(target string) (ret resolver.Target) { var ok bool ret.Scheme, ret.Endpoint, ok = split2(target, "://") @@ -71,9 +68,14 @@ func parseTarget(target string) (ret resolver.Target) { // If withResolverBuilder dial option is set, the specified resolver will be // used instead. func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) { + grpclog.Infof("dialing to target with scheme: %q", cc.parsedTarget.Scheme) + rb := cc.dopts.resolverBuilder if rb == nil { - return nil, fmt.Errorf("could not get resolver for scheme: %q", cc.parsedTarget.Scheme) + rb = resolver.Get(cc.parsedTarget.Scheme) + if rb == nil { + return nil, fmt.Errorf("could not get resolver for scheme: %q", cc.parsedTarget.Scheme) + } } ccr := &ccResolverWrapper{ diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 00a9976606..de254a031b 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -22,11 +22,9 @@ import ( "bytes" "compress/gzip" "encoding/binary" - "fmt" "io" "io/ioutil" "math" - "net/url" "strings" "sync" "time" @@ -57,29 +55,13 @@ type gzipCompressor struct { // NewGZIPCompressor creates a Compressor based on GZIP. func NewGZIPCompressor() Compressor { - c, _ := NewGZIPCompressorWithLevel(gzip.DefaultCompression) - return c -} - -// NewGZIPCompressorWithLevel is like NewGZIPCompressor but specifies the gzip compression level instead -// of assuming DefaultCompression. -// -// The error returned will be nil if the level is valid. -func NewGZIPCompressorWithLevel(level int) (Compressor, error) { - if level < gzip.DefaultCompression || level > gzip.BestCompression { - return nil, fmt.Errorf("grpc: invalid compression level: %d", level) - } return &gzipCompressor{ pool: sync.Pool{ New: func() interface{} { - w, err := gzip.NewWriterLevel(ioutil.Discard, level) - if err != nil { - panic(err) - } - return w + return gzip.NewWriter(ioutil.Discard) }, }, - }, nil + } } func (c *gzipCompressor) Do(w io.Writer, p []byte) error { @@ -663,40 +645,6 @@ func setCallInfoCodec(c *callInfo) error { return nil } -// parseDialTarget returns the network and address to pass to dialer -func parseDialTarget(target string) (net string, addr string) { - net = "tcp" - - m1 := strings.Index(target, ":") - m2 := strings.Index(target, ":/") - - // handle unix:addr which will fail with url.Parse - if m1 >= 0 && m2 < 0 { - if n := target[0:m1]; n == "unix" { - net = n - addr = target[m1+1:] - return net, addr - } - } - if m2 >= 0 { - t, err := url.Parse(target) - if err != nil { - return net, target - } - scheme := t.Scheme - addr = t.Path - if scheme == "unix" { - net = scheme - if addr == "" { - addr = t.Host - } - return net, addr - } - } - - return net, target -} - // The SupportPackageIsVersion variables are referenced from generated protocol // buffer files to ensure compatibility with the gRPC version used. The latest // support package version is 5. @@ -712,6 +660,6 @@ const ( ) // Version is the current grpc version. -const Version = "1.11.3" +const Version = "1.11.0-dev" const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index c6b413b9d9..997b24fa3e 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -919,8 +919,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } return nil } - ctx := NewContextWithServerTransportStream(stream.Context(), stream) - reply, appErr := md.Handler(srv.server, ctx, df, s.opts.unaryInt) + reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt) if appErr != nil { appStatus, ok := status.FromError(appErr) if !ok { @@ -996,9 +995,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp sh.HandleRPC(stream.Context(), end) }() } - ctx := NewContextWithServerTransportStream(stream.Context(), stream) ss := &serverStream{ - ctx: ctx, t: t, s: stream, p: &parser{r: stream}, @@ -1092,6 +1089,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ss.mu.Unlock() } return t.WriteStatus(ss.s, status.New(codes.OK, "")) + } func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { @@ -1173,40 +1171,6 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str } } -// The key to save ServerTransportStream in the context. -type streamKey struct{} - -// NewContextWithServerTransportStream creates a new context from ctx and -// attaches stream to it. -// -// This API is EXPERIMENTAL. -func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context { - return context.WithValue(ctx, streamKey{}, stream) -} - -// ServerTransportStream is a minimal interface that a transport stream must -// implement. This can be used to mock an actual transport stream for tests of -// handler code that use, for example, grpc.SetHeader (which requires some -// stream to be in context). -// -// See also NewContextWithServerTransportStream. -// -// This API is EXPERIMENTAL. -type ServerTransportStream interface { - Method() string - SetHeader(md metadata.MD) error - SendHeader(md metadata.MD) error - SetTrailer(md metadata.MD) error -} - -// serverStreamFromContext returns the server stream saved in ctx. Returns -// nil if the given context has no stream associated with it (which implies -// it is not an RPC invocation context). -func serverTransportStreamFromContext(ctx context.Context) ServerTransportStream { - s, _ := ctx.Value(streamKey{}).(ServerTransportStream) - return s -} - // Stop stops the gRPC server. It immediately closes all open // connections and listeners. // It cancels all active RPCs on the server side and the corresponding @@ -1327,8 +1291,8 @@ func SetHeader(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil } - stream := serverTransportStreamFromContext(ctx) - if stream == nil { + stream, ok := transport.StreamFromContext(ctx) + if !ok { return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) } return stream.SetHeader(md) @@ -1337,11 +1301,15 @@ func SetHeader(ctx context.Context, md metadata.MD) error { // SendHeader sends header metadata. It may be called at most once. // The provided md and headers set by SetHeader() will be sent. func SendHeader(ctx context.Context, md metadata.MD) error { - stream := serverTransportStreamFromContext(ctx) - if stream == nil { + stream, ok := transport.StreamFromContext(ctx) + if !ok { return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) } - if err := stream.SendHeader(md); err != nil { + t := stream.ServerTransport() + if t == nil { + grpclog.Fatalf("grpc: SendHeader: %v has no ServerTransport to send header metadata.", stream) + } + if err := t.WriteHeader(stream, md); err != nil { return toRPCErr(err) } return nil @@ -1353,19 +1321,9 @@ func SetTrailer(ctx context.Context, md metadata.MD) error { if md.Len() == 0 { return nil } - stream := serverTransportStreamFromContext(ctx) - if stream == nil { + stream, ok := transport.StreamFromContext(ctx) + if !ok { return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) } return stream.SetTrailer(md) } - -// Method returns the method string for the server context. The returned -// string is in the format of "/service/method". -func Method(ctx context.Context) (string, bool) { - s := serverTransportStreamFromContext(ctx) - if s == nil { - return "", false - } - return s.Method(), true -} diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go index 9c61b09450..3a42dc6de0 100644 --- a/vendor/google.golang.org/grpc/status/status.go +++ b/vendor/google.golang.org/grpc/status/status.go @@ -46,7 +46,7 @@ func (se *statusError) Error() string { return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage()) } -func (se *statusError) GRPCStatus() *Status { +func (se *statusError) status() *Status { return &Status{s: (*spb.Status)(se)} } @@ -120,14 +120,14 @@ func FromProto(s *spb.Status) *Status { } // FromError returns a Status representing err if it was produced from this -// package or has a method `GRPCStatus() *Status`. Otherwise, ok is false and a -// Status is returned with codes.Unknown and the original error message. +// package. Otherwise, ok is false and a Status is returned with codes.Unknown +// and the original error message. func FromError(err error) (s *Status, ok bool) { if err == nil { return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true } - if se, ok := err.(interface{ GRPCStatus() *Status }); ok { - return se.GRPCStatus(), true + if se, ok := err.(*statusError); ok { + return se.status(), true } return New(codes.Unknown, err.Error()), false } @@ -182,8 +182,8 @@ func Code(err error) codes.Code { if err == nil { return codes.OK } - if se, ok := err.(interface{ GRPCStatus() *Status }); ok { - return se.GRPCStatus().Code() + if se, ok := err.(*statusError); ok { + return se.status().Code() } return codes.Unknown } diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 75a4e8d45b..34d7414f91 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -104,7 +104,7 @@ type ClientStream interface { func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options - opts = combine(cc.dopts.callOptions, opts) + opts = append(cc.dopts.callOptions, opts...) if cc.dopts.streamInt != nil { return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...) @@ -608,7 +608,6 @@ type ServerStream interface { // serverStream implements a server side Stream. type serverStream struct { - ctx context.Context t transport.ServerTransport s *transport.Stream p *parser @@ -629,7 +628,7 @@ type serverStream struct { } func (ss *serverStream) Context() context.Context { - return ss.ctx + return ss.s.Context() } func (ss *serverStream) SetHeader(md metadata.MD) error { @@ -732,5 +731,9 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { // MethodFromServerStream returns the method string for the input stream. // The returned string is in the format of "/service/method". func MethodFromServerStream(stream ServerStream) (string, bool) { - return Method(stream.Context()) + s, ok := transport.StreamFromContext(stream.Context()) + if !ok { + return "", ok + } + return s.Method(), ok } diff --git a/vendor/google.golang.org/grpc/transport/handler_server.go b/vendor/google.golang.org/grpc/transport/handler_server.go index 1a5e96c5a1..9937fff999 100644 --- a/vendor/google.golang.org/grpc/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/transport/handler_server.go @@ -354,7 +354,8 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace pr.AuthInfo = credentials.TLSInfo{State: *req.TLS} } ctx = metadata.NewIncomingContext(ctx, ht.headerMD) - s.ctx = peer.NewContext(ctx, pr) + ctx = peer.NewContext(ctx, pr) + s.ctx = newContextWithStream(ctx, s) if ht.stats != nil { s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) inHeader := &stats.InHeader{ diff --git a/vendor/google.golang.org/grpc/transport/http2_server.go b/vendor/google.golang.org/grpc/transport/http2_server.go index 97b214c640..eb73b722a9 100644 --- a/vendor/google.golang.org/grpc/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/transport/http2_server.go @@ -307,6 +307,10 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( pr.AuthInfo = t.authInfo } s.ctx = peer.NewContext(s.ctx, pr) + // Cache the current stream to the context so that the server application + // can find out. Required when the server wants to send some metadata + // back to the client (unary call only). + s.ctx = newContextWithStream(s.ctx, s) // Attach the received metadata to the context. if len(state.mdata) > 0 { s.ctx = metadata.NewIncomingContext(s.ctx, state.mdata) diff --git a/vendor/google.golang.org/grpc/transport/transport.go b/vendor/google.golang.org/grpc/transport/transport.go index e0c1e343e7..e68f89ec45 100644 --- a/vendor/google.golang.org/grpc/transport/transport.go +++ b/vendor/google.golang.org/grpc/transport/transport.go @@ -366,14 +366,6 @@ func (s *Stream) SetHeader(md metadata.MD) error { return nil } -// SendHeader sends the given header metadata. The given metadata is -// combined with any metadata set by previous calls to SetHeader and -// then written to the transport stream. -func (s *Stream) SendHeader(md metadata.MD) error { - t := s.ServerTransport() - return t.WriteHeader(s, md) -} - // SetTrailer sets the trailer metadata which will be sent with the RPC status // by the server. This can be called multiple times. Server side only. func (s *Stream) SetTrailer(md metadata.MD) error { @@ -453,6 +445,21 @@ func (s *Stream) GoString() string { return fmt.Sprintf("", s, s.method) } +// The key to save transport.Stream in the context. +type streamKey struct{} + +// newContextWithStream creates a new context from ctx and attaches stream +// to it. +func newContextWithStream(ctx context.Context, stream *Stream) context.Context { + return context.WithValue(ctx, streamKey{}, stream) +} + +// StreamFromContext returns the stream saved in ctx. +func StreamFromContext(ctx context.Context) (s *Stream, ok bool) { + s, ok = ctx.Value(streamKey{}).(*Stream) + return +} + // state of transport type transportState int diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/LICENSE b/vendor/gopkg.in/cheggaaa/pb.v1/LICENSE deleted file mode 100644 index 5119703339..0000000000 --- a/vendor/gopkg.in/cheggaaa/pb.v1/LICENSE +++ /dev/null @@ -1,12 +0,0 @@ -Copyright (c) 2012-2015, Sergey Cherepanov -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/format.go b/vendor/gopkg.in/cheggaaa/pb.v1/format.go deleted file mode 100644 index 0723561c2e..0000000000 --- a/vendor/gopkg.in/cheggaaa/pb.v1/format.go +++ /dev/null @@ -1,118 +0,0 @@ -package pb - -import ( - "fmt" - "time" -) - -type Units int - -const ( - // U_NO are default units, they represent a simple value and are not formatted at all. - U_NO Units = iota - // U_BYTES units are formatted in a human readable way (B, KiB, MiB, ...) - U_BYTES - // U_BYTES_DEC units are like U_BYTES, but base 10 (B, KB, MB, ...) - U_BYTES_DEC - // U_DURATION units are formatted in a human readable way (3h14m15s) - U_DURATION -) - -const ( - KiB = 1024 - MiB = 1048576 - GiB = 1073741824 - TiB = 1099511627776 - - KB = 1e3 - MB = 1e6 - GB = 1e9 - TB = 1e12 -) - -func Format(i int64) *formatter { - return &formatter{n: i} -} - -type formatter struct { - n int64 - unit Units - width int - perSec bool -} - -func (f *formatter) To(unit Units) *formatter { - f.unit = unit - return f -} - -func (f *formatter) Width(width int) *formatter { - f.width = width - return f -} - -func (f *formatter) PerSec() *formatter { - f.perSec = true - return f -} - -func (f *formatter) String() (out string) { - switch f.unit { - case U_BYTES: - out = formatBytes(f.n) - case U_BYTES_DEC: - out = formatBytesDec(f.n) - case U_DURATION: - out = formatDuration(f.n) - default: - out = fmt.Sprintf(fmt.Sprintf("%%%dd", f.width), f.n) - } - if f.perSec { - out += "/s" - } - return -} - -// Convert bytes to human readable string. Like 2 MiB, 64.2 KiB, 52 B -func formatBytes(i int64) (result string) { - switch { - case i >= TiB: - result = fmt.Sprintf("%.02f TiB", float64(i)/TiB) - case i >= GiB: - result = fmt.Sprintf("%.02f GiB", float64(i)/GiB) - case i >= MiB: - result = fmt.Sprintf("%.02f MiB", float64(i)/MiB) - case i >= KiB: - result = fmt.Sprintf("%.02f KiB", float64(i)/KiB) - default: - result = fmt.Sprintf("%d B", i) - } - return -} - -// Convert bytes to base-10 human readable string. Like 2 MB, 64.2 KB, 52 B -func formatBytesDec(i int64) (result string) { - switch { - case i >= TB: - result = fmt.Sprintf("%.02f TB", float64(i)/TB) - case i >= GB: - result = fmt.Sprintf("%.02f GB", float64(i)/GB) - case i >= MB: - result = fmt.Sprintf("%.02f MB", float64(i)/MB) - case i >= KB: - result = fmt.Sprintf("%.02f KB", float64(i)/KB) - default: - result = fmt.Sprintf("%d B", i) - } - return -} - -func formatDuration(n int64) (result string) { - d := time.Duration(n) - if d > time.Hour*24 { - result = fmt.Sprintf("%dd", d/24/time.Hour) - d -= (d / time.Hour / 24) * (time.Hour * 24) - } - result = fmt.Sprintf("%s%v", result, d) - return -} diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pb.go b/vendor/gopkg.in/cheggaaa/pb.v1/pb.go deleted file mode 100644 index eddc807b1b..0000000000 --- a/vendor/gopkg.in/cheggaaa/pb.v1/pb.go +++ /dev/null @@ -1,475 +0,0 @@ -// Simple console progress bars -package pb - -import ( - "fmt" - "io" - "math" - "strings" - "sync" - "sync/atomic" - "time" - "unicode/utf8" -) - -// Current version -const Version = "1.0.22" - -const ( - // Default refresh rate - 200ms - DEFAULT_REFRESH_RATE = time.Millisecond * 200 - FORMAT = "[=>-]" -) - -// DEPRECATED -// variables for backward compatibility, from now do not work -// use pb.Format and pb.SetRefreshRate -var ( - DefaultRefreshRate = DEFAULT_REFRESH_RATE - BarStart, BarEnd, Empty, Current, CurrentN string -) - -// Create new progress bar object -func New(total int) *ProgressBar { - return New64(int64(total)) -} - -// Create new progress bar object using int64 as total -func New64(total int64) *ProgressBar { - pb := &ProgressBar{ - Total: total, - RefreshRate: DEFAULT_REFRESH_RATE, - ShowPercent: true, - ShowCounters: true, - ShowBar: true, - ShowTimeLeft: true, - ShowElapsedTime: false, - ShowFinalTime: true, - Units: U_NO, - ManualUpdate: false, - finish: make(chan struct{}), - } - return pb.Format(FORMAT) -} - -// Create new object and start -func StartNew(total int) *ProgressBar { - return New(total).Start() -} - -// Callback for custom output -// For example: -// bar.Callback = func(s string) { -// mySuperPrint(s) -// } -// -type Callback func(out string) - -type ProgressBar struct { - current int64 // current must be first member of struct (https://code.google.com/p/go/issues/detail?id=5278) - previous int64 - - Total int64 - RefreshRate time.Duration - ShowPercent, ShowCounters bool - ShowSpeed, ShowTimeLeft, ShowBar bool - ShowFinalTime, ShowElapsedTime bool - Output io.Writer - Callback Callback - NotPrint bool - Units Units - Width int - ForceWidth bool - ManualUpdate bool - AutoStat bool - - // Default width for the time box. - UnitsWidth int - TimeBoxWidth int - - finishOnce sync.Once //Guards isFinish - finish chan struct{} - isFinish bool - - startTime time.Time - startValue int64 - - changeTime time.Time - - prefix, postfix string - - mu sync.Mutex - lastPrint string - - BarStart string - BarEnd string - Empty string - Current string - CurrentN string - - AlwaysUpdate bool -} - -// Start print -func (pb *ProgressBar) Start() *ProgressBar { - pb.startTime = time.Now() - pb.startValue = atomic.LoadInt64(&pb.current) - if pb.Total == 0 { - pb.ShowTimeLeft = false - pb.ShowPercent = false - pb.AutoStat = false - } - if !pb.ManualUpdate { - pb.Update() // Initial printing of the bar before running the bar refresher. - go pb.refresher() - } - return pb -} - -// Increment current value -func (pb *ProgressBar) Increment() int { - return pb.Add(1) -} - -// Get current value -func (pb *ProgressBar) Get() int64 { - c := atomic.LoadInt64(&pb.current) - return c -} - -// Set current value -func (pb *ProgressBar) Set(current int) *ProgressBar { - return pb.Set64(int64(current)) -} - -// Set64 sets the current value as int64 -func (pb *ProgressBar) Set64(current int64) *ProgressBar { - atomic.StoreInt64(&pb.current, current) - return pb -} - -// Add to current value -func (pb *ProgressBar) Add(add int) int { - return int(pb.Add64(int64(add))) -} - -func (pb *ProgressBar) Add64(add int64) int64 { - return atomic.AddInt64(&pb.current, add) -} - -// Set prefix string -func (pb *ProgressBar) Prefix(prefix string) *ProgressBar { - pb.prefix = prefix - return pb -} - -// Set postfix string -func (pb *ProgressBar) Postfix(postfix string) *ProgressBar { - pb.postfix = postfix - return pb -} - -// Set custom format for bar -// Example: bar.Format("[=>_]") -// Example: bar.Format("[\x00=\x00>\x00-\x00]") // \x00 is the delimiter -func (pb *ProgressBar) Format(format string) *ProgressBar { - var formatEntries []string - if utf8.RuneCountInString(format) == 5 { - formatEntries = strings.Split(format, "") - } else { - formatEntries = strings.Split(format, "\x00") - } - if len(formatEntries) == 5 { - pb.BarStart = formatEntries[0] - pb.BarEnd = formatEntries[4] - pb.Empty = formatEntries[3] - pb.Current = formatEntries[1] - pb.CurrentN = formatEntries[2] - } - return pb -} - -// Set bar refresh rate -func (pb *ProgressBar) SetRefreshRate(rate time.Duration) *ProgressBar { - pb.RefreshRate = rate - return pb -} - -// Set units -// bar.SetUnits(U_NO) - by default -// bar.SetUnits(U_BYTES) - for Mb, Kb, etc -func (pb *ProgressBar) SetUnits(units Units) *ProgressBar { - pb.Units = units - return pb -} - -// Set max width, if width is bigger than terminal width, will be ignored -func (pb *ProgressBar) SetMaxWidth(width int) *ProgressBar { - pb.Width = width - pb.ForceWidth = false - return pb -} - -// Set bar width -func (pb *ProgressBar) SetWidth(width int) *ProgressBar { - pb.Width = width - pb.ForceWidth = true - return pb -} - -// End print -func (pb *ProgressBar) Finish() { - //Protect multiple calls - pb.finishOnce.Do(func() { - close(pb.finish) - pb.write(atomic.LoadInt64(&pb.current)) - pb.mu.Lock() - defer pb.mu.Unlock() - switch { - case pb.Output != nil: - fmt.Fprintln(pb.Output) - case !pb.NotPrint: - fmt.Println() - } - pb.isFinish = true - }) -} - -// IsFinished return boolean -func (pb *ProgressBar) IsFinished() bool { - pb.mu.Lock() - defer pb.mu.Unlock() - return pb.isFinish -} - -// End print and write string 'str' -func (pb *ProgressBar) FinishPrint(str string) { - pb.Finish() - if pb.Output != nil { - fmt.Fprintln(pb.Output, str) - } else { - fmt.Println(str) - } -} - -// implement io.Writer -func (pb *ProgressBar) Write(p []byte) (n int, err error) { - n = len(p) - pb.Add(n) - return -} - -// implement io.Reader -func (pb *ProgressBar) Read(p []byte) (n int, err error) { - n = len(p) - pb.Add(n) - return -} - -// Create new proxy reader over bar -// Takes io.Reader or io.ReadCloser -func (pb *ProgressBar) NewProxyReader(r io.Reader) *Reader { - return &Reader{r, pb} -} - -func (pb *ProgressBar) write(current int64) { - width := pb.GetWidth() - - var percentBox, countersBox, timeLeftBox, timeSpentBox, speedBox, barBox, end, out string - - // percents - if pb.ShowPercent { - var percent float64 - if pb.Total > 0 { - percent = float64(current) / (float64(pb.Total) / float64(100)) - } else { - percent = float64(current) / float64(100) - } - percentBox = fmt.Sprintf(" %6.02f%%", percent) - } - - // counters - if pb.ShowCounters { - current := Format(current).To(pb.Units).Width(pb.UnitsWidth) - if pb.Total > 0 { - total := Format(pb.Total).To(pb.Units).Width(pb.UnitsWidth) - countersBox = fmt.Sprintf(" %s / %s ", current, total) - } else { - countersBox = fmt.Sprintf(" %s / ? ", current) - } - } - - // time left - pb.mu.Lock() - currentFromStart := current - pb.startValue - fromStart := time.Now().Sub(pb.startTime) - lastChangeTime := pb.changeTime - fromChange := lastChangeTime.Sub(pb.startTime) - pb.mu.Unlock() - - if pb.ShowElapsedTime { - timeSpentBox = fmt.Sprintf(" %s ", (fromStart/time.Second)*time.Second) - } - - select { - case <-pb.finish: - if pb.ShowFinalTime { - var left time.Duration - left = (fromStart / time.Second) * time.Second - timeLeftBox = fmt.Sprintf(" %s", left.String()) - } - default: - if pb.ShowTimeLeft && currentFromStart > 0 { - perEntry := fromChange / time.Duration(currentFromStart) - var left time.Duration - if pb.Total > 0 { - left = time.Duration(pb.Total-currentFromStart) * perEntry - left -= time.Since(lastChangeTime) - left = (left / time.Second) * time.Second - } else { - left = time.Duration(currentFromStart) * perEntry - left = (left / time.Second) * time.Second - } - if left > 0 { - timeLeft := Format(int64(left)).To(U_DURATION).String() - timeLeftBox = fmt.Sprintf(" %s", timeLeft) - } - } - } - - if len(timeLeftBox) < pb.TimeBoxWidth { - timeLeftBox = fmt.Sprintf("%s%s", strings.Repeat(" ", pb.TimeBoxWidth-len(timeLeftBox)), timeLeftBox) - } - - // speed - if pb.ShowSpeed && currentFromStart > 0 { - fromStart := time.Now().Sub(pb.startTime) - speed := float64(currentFromStart) / (float64(fromStart) / float64(time.Second)) - speedBox = " " + Format(int64(speed)).To(pb.Units).Width(pb.UnitsWidth).PerSec().String() - } - - barWidth := escapeAwareRuneCountInString(countersBox + pb.BarStart + pb.BarEnd + percentBox + timeSpentBox + timeLeftBox + speedBox + pb.prefix + pb.postfix) - // bar - if pb.ShowBar { - size := width - barWidth - if size > 0 { - if pb.Total > 0 { - curSize := int(math.Ceil((float64(current) / float64(pb.Total)) * float64(size))) - emptySize := size - curSize - barBox = pb.BarStart - if emptySize < 0 { - emptySize = 0 - } - if curSize > size { - curSize = size - } - - cursorLen := escapeAwareRuneCountInString(pb.Current) - if emptySize <= 0 { - barBox += strings.Repeat(pb.Current, curSize/cursorLen) - } else if curSize > 0 { - cursorEndLen := escapeAwareRuneCountInString(pb.CurrentN) - cursorRepetitions := (curSize - cursorEndLen) / cursorLen - barBox += strings.Repeat(pb.Current, cursorRepetitions) - barBox += pb.CurrentN - } - - emptyLen := escapeAwareRuneCountInString(pb.Empty) - barBox += strings.Repeat(pb.Empty, emptySize/emptyLen) - barBox += pb.BarEnd - } else { - pos := size - int(current)%int(size) - barBox = pb.BarStart - if pos-1 > 0 { - barBox += strings.Repeat(pb.Empty, pos-1) - } - barBox += pb.Current - if size-pos-1 > 0 { - barBox += strings.Repeat(pb.Empty, size-pos-1) - } - barBox += pb.BarEnd - } - } - } - - // check len - out = pb.prefix + timeSpentBox + countersBox + barBox + percentBox + speedBox + timeLeftBox + pb.postfix - if cl := escapeAwareRuneCountInString(out); cl < width { - end = strings.Repeat(" ", width-cl) - } - - // and print! - pb.mu.Lock() - pb.lastPrint = out + end - isFinish := pb.isFinish - pb.mu.Unlock() - switch { - case isFinish: - return - case pb.Output != nil: - fmt.Fprint(pb.Output, "\r"+out+end) - case pb.Callback != nil: - pb.Callback(out + end) - case !pb.NotPrint: - fmt.Print("\r" + out + end) - } -} - -// GetTerminalWidth - returns terminal width for all platforms. -func GetTerminalWidth() (int, error) { - return terminalWidth() -} - -func (pb *ProgressBar) GetWidth() int { - if pb.ForceWidth { - return pb.Width - } - - width := pb.Width - termWidth, _ := terminalWidth() - if width == 0 || termWidth <= width { - width = termWidth - } - - return width -} - -// Write the current state of the progressbar -func (pb *ProgressBar) Update() { - c := atomic.LoadInt64(&pb.current) - p := atomic.LoadInt64(&pb.previous) - if p != c { - pb.mu.Lock() - pb.changeTime = time.Now() - pb.mu.Unlock() - atomic.StoreInt64(&pb.previous, c) - } - pb.write(c) - if pb.AutoStat { - if c == 0 { - pb.startTime = time.Now() - pb.startValue = 0 - } else if c >= pb.Total && pb.isFinish != true { - pb.Finish() - } - } -} - -// String return the last bar print -func (pb *ProgressBar) String() string { - pb.mu.Lock() - defer pb.mu.Unlock() - return pb.lastPrint -} - -// Internal loop for refreshing the progressbar -func (pb *ProgressBar) refresher() { - for { - select { - case <-pb.finish: - return - case <-time.After(pb.RefreshRate): - pb.Update() - } - } -} diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pb_appengine.go b/vendor/gopkg.in/cheggaaa/pb.v1/pb_appengine.go deleted file mode 100644 index d85dbc3b2b..0000000000 --- a/vendor/gopkg.in/cheggaaa/pb.v1/pb_appengine.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build appengine - -package pb - -import "errors" - -// terminalWidth returns width of the terminal, which is not supported -// and should always failed on appengine classic which is a sandboxed PaaS. -func terminalWidth() (int, error) { - return 0, errors.New("Not supported") -} diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pb_win.go b/vendor/gopkg.in/cheggaaa/pb.v1/pb_win.go deleted file mode 100644 index 2c67e19471..0000000000 --- a/vendor/gopkg.in/cheggaaa/pb.v1/pb_win.go +++ /dev/null @@ -1,141 +0,0 @@ -// +build windows - -package pb - -import ( - "errors" - "fmt" - "os" - "sync" - "syscall" - "unsafe" -) - -var tty = os.Stdin - -var ( - kernel32 = syscall.NewLazyDLL("kernel32.dll") - - // GetConsoleScreenBufferInfo retrieves information about the - // specified console screen buffer. - // http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx - procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") - - // GetConsoleMode retrieves the current input mode of a console's - // input buffer or the current output mode of a console screen buffer. - // https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx - getConsoleMode = kernel32.NewProc("GetConsoleMode") - - // SetConsoleMode sets the input mode of a console's input buffer - // or the output mode of a console screen buffer. - // https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx - setConsoleMode = kernel32.NewProc("SetConsoleMode") - - // SetConsoleCursorPosition sets the cursor position in the - // specified console screen buffer. - // https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx - setConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") -) - -type ( - // Defines the coordinates of the upper left and lower right corners - // of a rectangle. - // See - // http://msdn.microsoft.com/en-us/library/windows/desktop/ms686311(v=vs.85).aspx - smallRect struct { - Left, Top, Right, Bottom int16 - } - - // Defines the coordinates of a character cell in a console screen - // buffer. The origin of the coordinate system (0,0) is at the top, left cell - // of the buffer. - // See - // http://msdn.microsoft.com/en-us/library/windows/desktop/ms682119(v=vs.85).aspx - coordinates struct { - X, Y int16 - } - - word int16 - - // Contains information about a console screen buffer. - // http://msdn.microsoft.com/en-us/library/windows/desktop/ms682093(v=vs.85).aspx - consoleScreenBufferInfo struct { - dwSize coordinates - dwCursorPosition coordinates - wAttributes word - srWindow smallRect - dwMaximumWindowSize coordinates - } -) - -// terminalWidth returns width of the terminal. -func terminalWidth() (width int, err error) { - var info consoleScreenBufferInfo - _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(syscall.Stdout), uintptr(unsafe.Pointer(&info)), 0) - if e != 0 { - return 0, error(e) - } - return int(info.dwSize.X) - 1, nil -} - -func getCursorPos() (pos coordinates, err error) { - var info consoleScreenBufferInfo - _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(syscall.Stdout), uintptr(unsafe.Pointer(&info)), 0) - if e != 0 { - return info.dwCursorPosition, error(e) - } - return info.dwCursorPosition, nil -} - -func setCursorPos(pos coordinates) error { - _, _, e := syscall.Syscall(setConsoleCursorPosition.Addr(), 2, uintptr(syscall.Stdout), uintptr(uint32(uint16(pos.Y))<<16|uint32(uint16(pos.X))), 0) - if e != 0 { - return error(e) - } - return nil -} - -var ErrPoolWasStarted = errors.New("Bar pool was started") - -var echoLocked bool -var echoLockMutex sync.Mutex - -var oldState word - -func lockEcho() (shutdownCh chan struct{}, err error) { - echoLockMutex.Lock() - defer echoLockMutex.Unlock() - if echoLocked { - err = ErrPoolWasStarted - return - } - echoLocked = true - - if _, _, e := syscall.Syscall(getConsoleMode.Addr(), 2, uintptr(syscall.Stdout), uintptr(unsafe.Pointer(&oldState)), 0); e != 0 { - err = fmt.Errorf("Can't get terminal settings: %v", e) - return - } - - newState := oldState - const ENABLE_ECHO_INPUT = 0x0004 - const ENABLE_LINE_INPUT = 0x0002 - newState = newState & (^(ENABLE_LINE_INPUT | ENABLE_ECHO_INPUT)) - if _, _, e := syscall.Syscall(setConsoleMode.Addr(), 2, uintptr(syscall.Stdout), uintptr(newState), 0); e != 0 { - err = fmt.Errorf("Can't set terminal settings: %v", e) - return - } - return -} - -func unlockEcho() (err error) { - echoLockMutex.Lock() - defer echoLockMutex.Unlock() - if !echoLocked { - return - } - echoLocked = false - if _, _, e := syscall.Syscall(setConsoleMode.Addr(), 2, uintptr(syscall.Stdout), uintptr(oldState), 0); e != 0 { - err = fmt.Errorf("Can't set terminal settings") - } - return -} diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pb_x.go b/vendor/gopkg.in/cheggaaa/pb.v1/pb_x.go deleted file mode 100644 index 8e05770ceb..0000000000 --- a/vendor/gopkg.in/cheggaaa/pb.v1/pb_x.go +++ /dev/null @@ -1,108 +0,0 @@ -// +build linux darwin freebsd netbsd openbsd solaris dragonfly -// +build !appengine - -package pb - -import ( - "errors" - "fmt" - "os" - "os/signal" - "sync" - "syscall" - - "golang.org/x/sys/unix" -) - -var ErrPoolWasStarted = errors.New("Bar pool was started") - -var ( - echoLockMutex sync.Mutex - origTermStatePtr *unix.Termios - tty *os.File -) - -func init() { - echoLockMutex.Lock() - defer echoLockMutex.Unlock() - - var err error - tty, err = os.Open("/dev/tty") - if err != nil { - tty = os.Stdin - } -} - -// terminalWidth returns width of the terminal. -func terminalWidth() (int, error) { - echoLockMutex.Lock() - defer echoLockMutex.Unlock() - - fd := int(tty.Fd()) - - ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ) - if err != nil { - return 0, err - } - - return int(ws.Col), nil -} - -func lockEcho() (shutdownCh chan struct{}, err error) { - echoLockMutex.Lock() - defer echoLockMutex.Unlock() - if origTermStatePtr != nil { - return shutdownCh, ErrPoolWasStarted - } - - fd := int(tty.Fd()) - - origTermStatePtr, err = unix.IoctlGetTermios(fd, ioctlReadTermios) - if err != nil { - return nil, fmt.Errorf("Can't get terminal settings: %v", err) - } - - oldTermios := *origTermStatePtr - newTermios := oldTermios - newTermios.Lflag &^= syscall.ECHO - newTermios.Lflag |= syscall.ICANON | syscall.ISIG - newTermios.Iflag |= syscall.ICRNL - if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, &newTermios); err != nil { - return nil, fmt.Errorf("Can't set terminal settings: %v", err) - } - - shutdownCh = make(chan struct{}) - go catchTerminate(shutdownCh) - return -} - -func unlockEcho() error { - echoLockMutex.Lock() - defer echoLockMutex.Unlock() - if origTermStatePtr == nil { - return nil - } - - fd := int(tty.Fd()) - - if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, origTermStatePtr); err != nil { - return fmt.Errorf("Can't set terminal settings: %v", err) - } - - origTermStatePtr = nil - - return nil -} - -// listen exit signals and restore terminal state -func catchTerminate(shutdownCh chan struct{}) { - sig := make(chan os.Signal, 1) - signal.Notify(sig, os.Interrupt, syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGKILL) - defer signal.Stop(sig) - select { - case <-shutdownCh: - unlockEcho() - case <-sig: - unlockEcho() - } -} diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pool.go b/vendor/gopkg.in/cheggaaa/pb.v1/pool.go deleted file mode 100644 index f44baa01fc..0000000000 --- a/vendor/gopkg.in/cheggaaa/pb.v1/pool.go +++ /dev/null @@ -1,102 +0,0 @@ -// +build linux darwin freebsd netbsd openbsd solaris dragonfly windows - -package pb - -import ( - "io" - "sync" - "time" -) - -// Create and start new pool with given bars -// You need call pool.Stop() after work -func StartPool(pbs ...*ProgressBar) (pool *Pool, err error) { - pool = new(Pool) - if err = pool.Start(); err != nil { - return - } - pool.Add(pbs...) - return -} - -// NewPool initialises a pool with progress bars, but -// doesn't start it. You need to call Start manually -func NewPool(pbs ...*ProgressBar) (pool *Pool) { - pool = new(Pool) - pool.Add(pbs...) - return -} - -type Pool struct { - Output io.Writer - RefreshRate time.Duration - bars []*ProgressBar - lastBarsCount int - shutdownCh chan struct{} - workerCh chan struct{} - m sync.Mutex - finishOnce sync.Once -} - -// Add progress bars. -func (p *Pool) Add(pbs ...*ProgressBar) { - p.m.Lock() - defer p.m.Unlock() - for _, bar := range pbs { - bar.ManualUpdate = true - bar.NotPrint = true - bar.Start() - p.bars = append(p.bars, bar) - } -} - -func (p *Pool) Start() (err error) { - p.RefreshRate = DefaultRefreshRate - p.shutdownCh, err = lockEcho() - if err != nil { - return - } - p.workerCh = make(chan struct{}) - go p.writer() - return -} - -func (p *Pool) writer() { - var first = true - defer func() { - if first == false { - p.print(false) - } else { - p.print(true) - p.print(false) - } - close(p.workerCh) - }() - - for { - select { - case <-time.After(p.RefreshRate): - if p.print(first) { - p.print(false) - return - } - first = false - case <-p.shutdownCh: - return - } - } -} - -// Restore terminal state and close pool -func (p *Pool) Stop() error { - p.finishOnce.Do(func() { - close(p.shutdownCh) - }) - - // Wait for the worker to complete - select { - case <-p.workerCh: - } - - return unlockEcho() -} diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pool_win.go b/vendor/gopkg.in/cheggaaa/pb.v1/pool_win.go deleted file mode 100644 index 63598d378a..0000000000 --- a/vendor/gopkg.in/cheggaaa/pb.v1/pool_win.go +++ /dev/null @@ -1,45 +0,0 @@ -// +build windows - -package pb - -import ( - "fmt" - "log" -) - -func (p *Pool) print(first bool) bool { - p.m.Lock() - defer p.m.Unlock() - var out string - if !first { - coords, err := getCursorPos() - if err != nil { - log.Panic(err) - } - coords.Y -= int16(p.lastBarsCount) - if coords.Y < 0 { - coords.Y = 0 - } - coords.X = 0 - - err = setCursorPos(coords) - if err != nil { - log.Panic(err) - } - } - isFinished := true - for _, bar := range p.bars { - if !bar.IsFinished() { - isFinished = false - } - bar.Update() - out += fmt.Sprintf("\r%s\n", bar.String()) - } - if p.Output != nil { - fmt.Fprint(p.Output, out) - } else { - fmt.Print(out) - } - p.lastBarsCount = len(p.bars) - return isFinished -} diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/pool_x.go b/vendor/gopkg.in/cheggaaa/pb.v1/pool_x.go deleted file mode 100644 index a8ae14d2f6..0000000000 --- a/vendor/gopkg.in/cheggaaa/pb.v1/pool_x.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build linux darwin freebsd netbsd openbsd solaris dragonfly - -package pb - -import "fmt" - -func (p *Pool) print(first bool) bool { - p.m.Lock() - defer p.m.Unlock() - var out string - if !first { - out = fmt.Sprintf("\033[%dA", p.lastBarsCount) - } - isFinished := true - for _, bar := range p.bars { - if !bar.IsFinished() { - isFinished = false - } - bar.Update() - out += fmt.Sprintf("\r%s\n", bar.String()) - } - if p.Output != nil { - fmt.Fprint(p.Output, out) - } else { - fmt.Print(out) - } - p.lastBarsCount = len(p.bars) - return isFinished -} diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/reader.go b/vendor/gopkg.in/cheggaaa/pb.v1/reader.go deleted file mode 100644 index 9f3148b546..0000000000 --- a/vendor/gopkg.in/cheggaaa/pb.v1/reader.go +++ /dev/null @@ -1,25 +0,0 @@ -package pb - -import ( - "io" -) - -// It's proxy reader, implement io.Reader -type Reader struct { - io.Reader - bar *ProgressBar -} - -func (r *Reader) Read(p []byte) (n int, err error) { - n, err = r.Reader.Read(p) - r.bar.Add(n) - return -} - -// Close the reader when it implements io.Closer -func (r *Reader) Close() (err error) { - if closer, ok := r.Reader.(io.Closer); ok { - return closer.Close() - } - return -} diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/runecount.go b/vendor/gopkg.in/cheggaaa/pb.v1/runecount.go deleted file mode 100644 index c617c55ec3..0000000000 --- a/vendor/gopkg.in/cheggaaa/pb.v1/runecount.go +++ /dev/null @@ -1,17 +0,0 @@ -package pb - -import ( - "github.com/mattn/go-runewidth" - "regexp" -) - -// Finds the control character sequences (like colors) -var ctrlFinder = regexp.MustCompile("\x1b\x5b[0-9]+\x6d") - -func escapeAwareRuneCountInString(s string) int { - n := runewidth.StringWidth(s) - for _, sm := range ctrlFinder.FindAllString(s, -1) { - n -= runewidth.StringWidth(sm) - } - return n -} diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/termios_bsd.go b/vendor/gopkg.in/cheggaaa/pb.v1/termios_bsd.go deleted file mode 100644 index 517ea8ed72..0000000000 --- a/vendor/gopkg.in/cheggaaa/pb.v1/termios_bsd.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build darwin freebsd netbsd openbsd dragonfly -// +build !appengine - -package pb - -import "syscall" - -const ioctlReadTermios = syscall.TIOCGETA -const ioctlWriteTermios = syscall.TIOCSETA diff --git a/vendor/gopkg.in/cheggaaa/pb.v1/termios_sysv.go b/vendor/gopkg.in/cheggaaa/pb.v1/termios_sysv.go deleted file mode 100644 index b10f61859c..0000000000 --- a/vendor/gopkg.in/cheggaaa/pb.v1/termios_sysv.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux solaris -// +build !appengine - -package pb - -import "golang.org/x/sys/unix" - -const ioctlReadTermios = unix.TCGETS -const ioctlWriteTermios = unix.TCSETS diff --git a/vendor/k8s.io/client-go/LICENSE b/vendor/k8s.io/client-go/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/k8s.io/client-go/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/k8s.io/client-go/util/homedir/homedir.go b/vendor/k8s.io/client-go/util/homedir/homedir.go deleted file mode 100644 index 816db57f59..0000000000 --- a/vendor/k8s.io/client-go/util/homedir/homedir.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package homedir - -import ( - "os" - "runtime" -) - -// HomeDir returns the home directory for the current user -func HomeDir() string { - if runtime.GOOS == "windows" { - - // First prefer the HOME environmental variable - if home := os.Getenv("HOME"); len(home) > 0 { - if _, err := os.Stat(home); err == nil { - return home - } - } - if homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"); len(homeDrive) > 0 && len(homePath) > 0 { - homeDir := homeDrive + homePath - if _, err := os.Stat(homeDir); err == nil { - return homeDir - } - } - if userProfile := os.Getenv("USERPROFILE"); len(userProfile) > 0 { - if _, err := os.Stat(userProfile); err == nil { - return userProfile - } - } - } - return os.Getenv("HOME") -}